diff --git a/.changelog/24730.txt b/.changelog/24730.txt new file mode 100644 index 000000000000..22144755612a --- /dev/null +++ b/.changelog/24730.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_servicecatalog_portfolio_share: Add global mutex lock around create and delete operations to prevent `ThrottlingException` errors +``` diff --git a/.changelog/26702.txt b/.changelog/26702.txt new file mode 100644 index 000000000000..09f93896ed39 --- /dev/null +++ b/.changelog/26702.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_glue_catalog_table: Add `partition_keys.parameters` argument, fixing `Invalid address to set: []string{"partition_keys", "0", "parameters"}` errors +``` + +```release-note:bug +data-source/aws_glue_catalog_table: Add `partition_keys.parameters` attribute +``` \ No newline at end of file diff --git a/.changelog/33624.txt b/.changelog/33624.txt new file mode 100644 index 000000000000..c5bcbfe2633a --- /dev/null +++ b/.changelog/33624.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cognito_risk_configuration: Make `account_takeover_risk_configuration.notify_configuration` optional +``` diff --git a/.changelog/33796.txt b/.changelog/33796.txt new file mode 100644 index 000000000000..296f228998c3 --- /dev/null +++ b/.changelog/33796.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_dlm_lifecycle_policy: Add `policy_details.schedule.cross_region_copy_rule.target_region` argument +``` + +```release-note:enhancement +resource/aws_dlm_lifecycle_policy: Make `policy_details.schedule.cross_region_copy_rule.target` optional +``` \ No newline at end of file diff --git a/.changelog/36628.txt b/.changelog/36628.txt new file mode 100644 index 000000000000..07a2f3e94cb2 --- /dev/null +++ b/.changelog/36628.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_athena_database: Add `workgroup` argument +``` \ No newline at end of file diff --git a/.changelog/36874.txt b/.changelog/36874.txt new file mode 100644 index 000000000000..e96132e00d03 --- /dev/null +++ b/.changelog/36874.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_securitylake_data_lake: Allow `meta_store_role_arn` to be updated in-place +``` diff --git a/.changelog/37286.txt b/.changelog/37286.txt new file mode 100644 index 000000000000..6bf5a2ea6578 --- /dev/null +++ b/.changelog/37286.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_rds_global_cluster +``` \ No newline at end of file diff --git a/.changelog/38336.txt b/.changelog/38336.txt new file mode 100644 index 000000000000..dbed8d77bea3 --- /dev/null +++ b/.changelog/38336.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_spot_instance_request: Change `network_interface.network_card_index` to Computed +``` \ No newline at end of file diff --git a/.changelog/38527.txt b/.changelog/38527.txt new file mode 100644 index 000000000000..69122473bf3e --- /dev/null +++ b/.changelog/38527.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_instance: Add `placement_group_id` argument +``` + +```release-note:enhancement +data-source/aws_instance: Add `placement_group_id` attribute +``` \ No newline at end of file diff --git a/.changelog/38717.txt b/.changelog/38717.txt new file mode 100644 index 000000000000..15d8c5106b10 --- /dev/null +++ b/.changelog/38717.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_athena_workgroup: Add `configuration.identity_center_configuration` argument +``` \ No newline at end of file diff --git a/.changelog/40035.txt b/.changelog/40035.txt new file mode 100644 index 000000000000..9a7b14ef4046 --- /dev/null +++ b/.changelog/40035.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_codebuild_project: Add `auto_retry_limit` argument +``` diff --git a/.changelog/40148.txt b/.changelog/40148.txt new file mode 100644 index 000000000000..a0cfb4235c7d --- /dev/null +++ b/.changelog/40148.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_batch_compute_environment: Allow in-place updates of compute environments that have the `SPOT_PRICE_CAPACITY_OPTIMIZED` strategy +``` diff --git a/.changelog/41055.txt b/.changelog/41055.txt new file mode 100644 index 000000000000..c38c5a698e65 --- /dev/null +++ b/.changelog/41055.txt @@ -0,0 +1,15 @@ +```release-note:enhancement +resource/aws_dlm_lifecycle_policy: Add `default_policy` argument +``` + +```release-note:enhancement +resource/aws_dlm_lifecycle_policy: Add `copy_tags`, `create_interval`, `exclusions`, `extend_deletion`, `policy_language`, `resource_type` and `retain_interval` attributes to `policy_details` configuration block +``` + +```release-note:enhancement +resource/aws_dlm_lifecycle_policy: Add `policy_details.create_rule.scripts` argument +``` + +```release-note:enhancement +resource/aws_dlm_lifecycle_policy:Add `policy_details.schedule.archive_rule` argument +``` diff --git a/.changelog/41308.txt b/.changelog/41308.txt new file mode 100644 index 000000000000..802bf0de409f --- /dev/null +++ b/.changelog/41308.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_dynamodb_table: Add `warm_throughput` and `global_secondary_index.warm_throughput` arguments +``` + +```release-note:enhancement +data-source/aws_dynamodb_table: Add `warm_throughput` and `global_secondary_index.warm_throughput` attributes +``` \ No newline at end of file diff --git a/.changelog/41364.txt b/.changelog/41364.txt new file mode 100644 index 000000000000..08be138b02b7 --- /dev/null +++ b/.changelog/41364.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3_bucket_metadata_configuration +``` \ No newline at end of file diff --git a/.changelog/41702.txt b/.changelog/41702.txt new file mode 100644 index 000000000000..3d1c41f18f98 --- /dev/null +++ b/.changelog/41702.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cognito_resource_server: Allow `name` to be updated in-place +``` \ No newline at end of file diff --git a/.changelog/42188.txt b/.changelog/42188.txt new file mode 100644 index 000000000000..7d66b0f776f4 --- /dev/null +++ b/.changelog/42188.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_network_interface: Add `attachment.network_card_index` argument +``` + +```release-note:enhancement +resource/aws_network_interface_attachment: Add `network_card_index` argument +``` + +```release-note:enhancement +data-source/aws_network_interface: Add `attachment.network_card_index` attribute +``` diff --git a/.changelog/42201.txt b/.changelog/42201.txt new file mode 100644 index 000000000000..6357f2bbd6ee --- /dev/null +++ b/.changelog/42201.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_bedrockagent_flow +``` diff --git a/.changelog/42382.txt b/.changelog/42382.txt new file mode 100644 index 000000000000..a2b8e0fc0319 --- /dev/null +++ b/.changelog/42382.txt @@ -0,0 +1,6 @@ +```release-note:new-resource +aws_timestreaminfluxdb_db_cluster +``` +```release-note:bug +resource/aws_timestreaminfluxdb_db_instance: Fix tag-only update errors +``` diff --git a/.changelog/42397.txt b/.changelog/42397.txt new file mode 100644 index 000000000000..5be6003aa0dd --- /dev/null +++ b/.changelog/42397.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_controltower_baseline +``` \ No newline at end of file diff --git a/.changelog/42483.txt b/.changelog/42483.txt new file mode 100644 index 000000000000..70885cdcab21 --- /dev/null +++ b/.changelog/42483.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_eks_cluster: Supports null `compute_config.node_role_arn` when disabling auto mode or built-in node pools +``` diff --git a/.changelog/42577.txt b/.changelog/42577.txt new file mode 100644 index 000000000000..39b90ffac942 --- /dev/null +++ b/.changelog/42577.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_ecr_images +``` \ No newline at end of file diff --git a/.changelog/42591.txt b/.changelog/42591.txt new file mode 100644 index 000000000000..bd246de69f95 --- /dev/null +++ b/.changelog/42591.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_nat_gateway_eip_association +``` + +```release-note:enhancement +resource/aws_nat_gateway: Change `secondary_allocation_ids` to Optional and Computed +``` \ No newline at end of file diff --git a/.changelog/42595.txt b/.changelog/42595.txt new file mode 100644 index 000000000000..3662f5752e1a --- /dev/null +++ b/.changelog/42595.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ssm_parameter: Fix `version` not being updated when `description` changes +``` diff --git a/.changelog/42636.txt b/.changelog/42636.txt new file mode 100644 index 000000000000..e88f1ab8aaad --- /dev/null +++ b/.changelog/42636.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_elasticache_global_replication_group: Change `engine` to Optional and Computed +``` \ No newline at end of file diff --git a/.changelog/42639.txt b/.changelog/42639.txt new file mode 100644 index 000000000000..de622e39fe21 --- /dev/null +++ b/.changelog/42639.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cognito_user_pool: Allow `name` to be updated in-place +``` diff --git a/.changelog/42708.txt b/.changelog/42708.txt new file mode 100644 index 000000000000..64d58e83abaf --- /dev/null +++ b/.changelog/42708.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_transfer_web_app +``` + +```release-note:new-resource +aws_transfer_web_app_customization +``` diff --git a/.changelog/42740.txt b/.changelog/42740.txt new file mode 100644 index 000000000000..f402d54fdc46 --- /dev/null +++ b/.changelog/42740.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_wafv2_web_acl: Fix performance of update when the WebACL has a large number of rules +``` diff --git a/.changelog/42829.txt b/.changelog/42829.txt new file mode 100644 index 000000000000..edd7c9e598b8 --- /dev/null +++ b/.changelog/42829.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lambda_function: Reset non-API attributes (`source_code_hash`, `s3_bucket`, `s3_key`, `s3_object_version` and `filename`) to their previous values when an update operation fails +``` diff --git a/.changelog/42877.txt b/.changelog/42877.txt new file mode 100644 index 000000000000..b5cdbd519f31 --- /dev/null +++ b/.changelog/42877.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ssm_parameter: Fix `Provider produced inconsistent final plan` errors when changing from using `value` to using `value_wo` +``` diff --git a/.changelog/42928.txt b/.changelog/42928.txt new file mode 100644 index 000000000000..fc783ed9357b --- /dev/null +++ b/.changelog/42928.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_eks_cluster: Allow `remote_network_config` to be updated in-place, enabling support for EKS hybrid nodes on existing clusters +``` \ No newline at end of file diff --git a/.changelog/43125.txt b/.changelog/43125.txt new file mode 100644 index 000000000000..32fa415fc187 --- /dev/null +++ b/.changelog/43125.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_dms_endpoint: Add `oracle_settings` configuration block for authentication method +``` + +```release-note:enhancement +resource/aws_dms_replication_instance: Add `dns_name_servers` attribute and `kerberos_authentication_settings` configuration block for Kerberos authentication settings +``` \ No newline at end of file diff --git a/.changelog/43150.txt b/.changelog/43150.txt new file mode 100644 index 000000000000..b7c3b1702155 --- /dev/null +++ b/.changelog/43150.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_bedrock_inference_profiles: Add `type` argument +``` \ No newline at end of file diff --git a/.changelog/43155.txt b/.changelog/43155.txt new file mode 100644 index 000000000000..87a408ce6966 --- /dev/null +++ b/.changelog/43155.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_lb_listener_rule: Add resource identity support +``` +```release-note:enhancement +resource/aws_alb_listener_rule: Add resource identity support +``` diff --git a/.changelog/43158.txt b/.changelog/43158.txt new file mode 100644 index 000000000000..87d9b303bbb2 --- /dev/null +++ b/.changelog/43158.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_instance: Recompute `ipv6_addresses` when `ipv6_address_count` is updated +``` diff --git a/.changelog/43161.txt b/.changelog/43161.txt new file mode 100644 index 000000000000..f64399020377 --- /dev/null +++ b/.changelog/43161.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_lb_listener: Add resource identity support +``` +```release-note:enhancement +resource/aws_alb_listener: Add resource identity support +``` diff --git a/.changelog/43171.txt b/.changelog/43171.txt new file mode 100644 index 000000000000..ad1a9ee41d1d --- /dev/null +++ b/.changelog/43171.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_lb_target_group: Add resource identity support +``` +```release-note:enhancement +resource/aws_alb_target_group: Add resource identity support +``` diff --git a/.changelog/43186.txt b/.changelog/43186.txt new file mode 100644 index 000000000000..71b7d1facc3a --- /dev/null +++ b/.changelog/43186.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lb_trust_store: Add resource identity support +``` diff --git a/.changelog/43200.txt b/.changelog/43200.txt new file mode 100644 index 000000000000..0fc5c13501e3 --- /dev/null +++ b/.changelog/43200.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_globalaccelerator_accelerator: Add resource identity support +``` diff --git a/.changelog/43202.txt b/.changelog/43202.txt new file mode 100644 index 000000000000..52f9c301abe4 --- /dev/null +++ b/.changelog/43202.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sns_topic: Add resource identity support +``` diff --git a/.changelog/43207.txt b/.changelog/43207.txt new file mode 100644 index 000000000000..9045693f9a1a --- /dev/null +++ b/.changelog/43207.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_acm_certificate: Support `options.export` argument to issue an exportable certificate +``` diff --git a/.changelog/43215.txt b/.changelog/43215.txt new file mode 100644 index 000000000000..04893e3a311d --- /dev/null +++ b/.changelog/43215.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_api_gateway_resource: Recompute `path` when `path_part` is updated +``` diff --git a/.changelog/43240.txt b/.changelog/43240.txt new file mode 100644 index 000000000000..f96ffa2f919f --- /dev/null +++ b/.changelog/43240.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dynamodb_table: Add `replica.deletion_protection_enabled` argument +``` diff --git a/.changelog/43241.txt b/.changelog/43241.txt new file mode 100644 index 000000000000..1ba6a967d21e --- /dev/null +++ b/.changelog/43241.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ecs_service: Remove Terraform default for `availability_zone_rebalancing` and change the attribute to Optional and Computed. This allow ECS to default to `ENABLED` for new resources compatible with *AvailabilityZoneRebalancing* and maintain an existing service's `availability_zone_rebalancing` value during update when not configured. If an existing service never had an `availability_zone_rebalancing` value configured and is updated, ECS will treat this as `DISABLED` +``` \ No newline at end of file diff --git a/.changelog/43262.txt b/.changelog/43262.txt new file mode 100644 index 000000000000..102842ebc7cd --- /dev/null +++ b/.changelog/43262.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_vpc_ipam_pool_cidr: Fix netmask_length not being saved and diffed correctly +``` diff --git a/.changelog/43337.txt b/.changelog/43337.txt new file mode 100644 index 000000000000..9b2ce0d52e41 --- /dev/null +++ b/.changelog/43337.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_batch_compute_environment: Fix `inconsistent final plan` error when `compute_resource.launch_template.version` is unknown during an update +``` \ No newline at end of file diff --git a/.changelog/43355.txt b/.changelog/43355.txt new file mode 100644 index 000000000000..5836e598d5bf --- /dev/null +++ b/.changelog/43355.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_bedrockagent_agent_action_group: Correctly set `parent_action_group_signature` on Read +``` diff --git a/.changelog/43358.txt b/.changelog/43358.txt new file mode 100644 index 000000000000..876e8ab6ac7b --- /dev/null +++ b/.changelog/43358.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lexv2models_slot: Fix error when `sub_slot_setting.slot_specification.value_elicitation_setting.prompt_specification.prompt_attempts_specification` and `value_elicitation_setting.prompt_specification.prompt_attempts_specification` have default values +``` \ No newline at end of file diff --git a/.changelog/43377.txt b/.changelog/43377.txt new file mode 100644 index 000000000000..4b08ab003cd5 --- /dev/null +++ b/.changelog/43377.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_lakeformation_resource: Support `with_privileged_access` argument +``` + +```release-note:enhancement +data-source/aws_lakeformation_resource: Support `hybrid_access_enabled`, `with_federation` and `with_privileged_access` attributes +``` diff --git a/.changelog/43381.txt b/.changelog/43381.txt new file mode 100644 index 000000000000..aebb9bc6c8d2 --- /dev/null +++ b/.changelog/43381.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_log_metric_filter: Add `apply_on_transformed_logs` argument +``` \ No newline at end of file diff --git a/.changelog/43382.txt b/.changelog/43382.txt new file mode 100644 index 000000000000..44067d9f5cd2 --- /dev/null +++ b/.changelog/43382.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_datazone_environment_blueprint_configuration: Fix `Inappropriate value for attribute "regional_parameters"` errors during planning. This fixes a regression introduced in [v6.0.0](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md#600-june-18-2025) +``` \ No newline at end of file diff --git a/.changelog/43391.txt b/.changelog/43391.txt new file mode 100644 index 000000000000..59e8d5c7a480 --- /dev/null +++ b/.changelog/43391.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_fsx_s3_access_point_attachment +``` + +```release-note:new-data-source +aws_s3_access_point +``` \ No newline at end of file diff --git a/.changelog/43396.txt b/.changelog/43396.txt new file mode 100644 index 000000000000..bd969dcf801a --- /dev/null +++ b/.changelog/43396.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_cognito_log_delivery_configuration +``` + +```release-note:note +resource/aws_cognito_log_delivery_configuration: Because we cannot easily test all this functionality, it is best effort and we ask for community help in testing +``` diff --git a/.changelog/43397.txt b/.changelog/43397.txt new file mode 100644 index 000000000000..6a6caa32b05f --- /dev/null +++ b/.changelog/43397.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_wafv2_rule_group: Add `rules_json` argument +``` \ No newline at end of file diff --git a/.changelog/43400.txt b/.changelog/43400.txt new file mode 100644 index 000000000000..cc52c4fbac4f --- /dev/null +++ b/.changelog/43400.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_datasync_location_object_storage: Make `agent_arns` optional +``` \ No newline at end of file diff --git a/.changelog/43405.txt b/.changelog/43405.txt new file mode 100644 index 000000000000..21bcb4f785e7 --- /dev/null +++ b/.changelog/43405.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ec2_transit_gateway_route_table_propagation: Don't mark `transit_gateway_attachment_id` as [ForceNew](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#forcenew) if the value is known not to change +``` diff --git a/.changelog/43408.txt b/.changelog/43408.txt new file mode 100644 index 000000000000..851196e82ded --- /dev/null +++ b/.changelog/43408.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_trust_store +``` \ No newline at end of file diff --git a/.changelog/43415.txt b/.changelog/43415.txt new file mode 100644 index 000000000000..ed252924530a --- /dev/null +++ b/.changelog/43415.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket_public_access_block: Add `skip_destroy` argument +``` diff --git a/.changelog/43416.txt b/.changelog/43416.txt new file mode 100644 index 000000000000..ed003e81073a --- /dev/null +++ b/.changelog/43416.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lambda_function: Fix `waiting for Lambda Function (...) version publish: unexpected state '', wanted target 'Successful'` errors on Update. This fixes a regression introduced in [v6.2.0](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md#620-july--2-2025) +``` \ No newline at end of file diff --git a/.changelog/43423.txt b/.changelog/43423.txt new file mode 100644 index 000000000000..83f0589c9f8e --- /dev/null +++ b/.changelog/43423.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_globalaccelerator_custom_routing_accelerator: Add resource identity support +``` diff --git a/.changelog/43430.txt b/.changelog/43430.txt new file mode 100644 index 000000000000..affa9c1c984b --- /dev/null +++ b/.changelog/43430.txt @@ -0,0 +1,15 @@ +```release-note:new-resource +aws_networkfirewall_firewall_transit_gateway_attachment_accepter +``` + +```release-note:enhancement +resource/aws_networkfirewall_firewall: Add `availability_zone_change_protection`, `availability_zone_mapping`, and `transit_gateway_id` arguments and `firewall_status.transit_gateway_attachment_sync_states` and `transit_gateway_owner_account_id` attributes +``` + +```release-note:enhancement +resource/aws_networkfirewall_firewall: Mark `subnet_mapping` and `vpc_id` as Optional +``` + +```release-note:enhancement +data-source/aws_networkfirewall_firewall: Add `availability_zone_change_protection`, `availability_zone_mapping`, `firewall_status.sync_states.attachment.status_message`, `firewall_status.transit_gateway_attachment_sync_states`, `transit_gateway_id`, and `transit_gateway_owner_account_id` attributes +``` diff --git a/.changelog/43434.txt b/.changelog/43434.txt new file mode 100644 index 000000000000..09250ed4214d --- /dev/null +++ b/.changelog/43434.txt @@ -0,0 +1,12 @@ +```release-note:enhancement +resource/aws_ecs_service: `deployment_controller.type` changes no longer force a replacement +``` +```release-note:enhancement +resource/aws_ecs_service: Add `deployment_configuration` argument +``` +```release-note:enhancement +resource/aws_ecs_service: Add `load_balancer.advanced_configuration` argument +``` +```release-note:enhancement +resource/aws_ecs_service: Add `service.client_alias.test_traffic_rules` argument +``` diff --git a/.changelog/43436.txt b/.changelog/43436.txt new file mode 100644 index 000000000000..38c85c310f1b --- /dev/null +++ b/.changelog/43436.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dx_gateway_association: Add `transit_gateway_attachment_id` attribute. This functionality requires the `ec2:DescribeTransitGatewayAttachments` IAM permission +``` diff --git a/.changelog/43440.txt b/.changelog/43440.txt new file mode 100644 index 000000000000..58bef1c4702c --- /dev/null +++ b/.changelog/43440.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_dms_endpoint: Add `postgres_settings.authentication_method` and `postgres_settings.service_access_role_arn` arguments +``` + +```release-note:enhancement +data-source/aws_dms_endpoint: Add `postgres_settings.authentication_method` and `postgres_settings.service_access_role_arn` attributes +``` + +```release-note:enhancement +resource/aws_dms_endpoint: Add plan-time validation of `postgres_settings.database_mode`, `postgres_settings.map_long_varchar_as`, and `postgres_settings.plugin_name` arguments +``` \ No newline at end of file diff --git a/.changelog/43444.txt b/.changelog/43444.txt new file mode 100644 index 000000000000..82c91c88bf90 --- /dev/null +++ b/.changelog/43444.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_portal +``` \ No newline at end of file diff --git a/.changelog/43449.txt b/.changelog/43449.txt new file mode 100644 index 000000000000..07f02dfcfac0 --- /dev/null +++ b/.changelog/43449.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_codebuild_fleet: Add `instance_type` argument in `compute_configuration` block to support custom instance types +``` + +```release-note:enhancement +data-source/aws_codebuild_fleet: Add `instance_type` attribute in `compute_configuration` block +``` diff --git a/.changelog/43450.txt b/.changelog/43450.txt new file mode 100644 index 000000000000..f2871c027483 --- /dev/null +++ b/.changelog/43450.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_glue_registry: Add resource identity support +``` +```release-note:enhancement +resource/aws_glue_schema: Add resource identity support +``` diff --git a/.changelog/43452.txt b/.changelog/43452.txt new file mode 100644 index 000000000000..6f7b15d8a32b --- /dev/null +++ b/.changelog/43452.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_verifiedpermissions_policy_store: Add `deletion_protection` attribute +``` + +```release-note:enhancement +resource/aws_verifiedpermissions_policy_store: Add `deletion_protection` argument +``` \ No newline at end of file diff --git a/.changelog/43453.txt b/.changelog/43453.txt new file mode 100644 index 000000000000..309b5aa06df5 --- /dev/null +++ b/.changelog/43453.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_cloudwatch_event_bus: Add `log_config` argument +``` + +```release-note:enhancement +data-source/aws_cloudwatch_event_bus: Add `log_config` attribute +``` diff --git a/.changelog/43454.txt b/.changelog/43454.txt new file mode 100644 index 000000000000..633d9c04681b --- /dev/null +++ b/.changelog/43454.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_batch_compute_environment: Add `compute_resources.ec2_configuration.image_kubernetes_version` argument +``` \ No newline at end of file diff --git a/.changelog/43471.txt b/.changelog/43471.txt new file mode 100644 index 000000000000..cbf78a7f719b --- /dev/null +++ b/.changelog/43471.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_bedrockagent_flow: Remove `definition.connection` and `definition.node` list length limits +``` diff --git a/.changelog/43490.txt b/.changelog/43490.txt new file mode 100644 index 000000000000..dc27135c8836 --- /dev/null +++ b/.changelog/43490.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_kinesisanalyticsv2_application: Ensure that configured `application_configuration.run_configuration` values are respected during update +``` diff --git a/.changelog/43499.txt b/.changelog/43499.txt new file mode 100644 index 000000000000..edb4ebbc894c --- /dev/null +++ b/.changelog/43499.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dx_hosted_connection: Fix `DescribeHostedConnections failed for connection dxcon-xxxx doesn't exist` by pointing to the correct connection ID when doing the describe. +``` \ No newline at end of file diff --git a/.changelog/43501.txt b/.changelog/43501.txt new file mode 100644 index 000000000000..bbcfcb64bf6e --- /dev/null +++ b/.changelog/43501.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_quicksight_account_subscription: Add import support. This resource can now be imported via the `aws_account_id` argument. +``` diff --git a/.changelog/43502.txt b/.changelog/43502.txt new file mode 100644 index 000000000000..7f56d741e23d --- /dev/null +++ b/.changelog/43502.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_ecs_service: Improve stabilization logic to handle both new deployments and in-place updates correctly. This fixes a regression introduced in [v6.4.0](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md#640-july-17-2025) +``` + +```release-note:note +resource/aws_ecs_service: Acceptance tests cannot fully reproduce scenarios with deployments older than 3 months. Community feedback on this fix is appreciated, particularly for long-running ECS services with in-place updates +``` \ No newline at end of file diff --git a/.changelog/43503.txt b/.changelog/43503.txt new file mode 100644 index 000000000000..78a2eebad517 --- /dev/null +++ b/.changelog/43503.txt @@ -0,0 +1,12 @@ +```release-note:enhancement +resource/aws_iam_openid_connect_provider: Add resource identity support +``` +```release-note:enhancement +resource/aws_iam_policy: Add resource identity support +``` +```release-note:enhancement +resource/aws_iam_saml_provider: Add resource identity support +``` +```release-note:enhancement +resource/aws_iam_service_linked_role: Add resource identity support +``` diff --git a/.changelog/43506.txt b/.changelog/43506.txt new file mode 100644 index 000000000000..bd59867b9a38 --- /dev/null +++ b/.changelog/43506.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_wafv2_web_acl: Add `statement.rate_based_statement.custom_key.asn` argument +``` diff --git a/.changelog/43516.txt b/.changelog/43516.txt new file mode 100644 index 000000000000..d592024b6a2d --- /dev/null +++ b/.changelog/43516.txt @@ -0,0 +1,3 @@ +```release-note:bug +provider: Prevent planned `forces replacement` on `region` for numerous resource types when upgrading from a pre-v6.0.0 provider version and `-refresh=false` is in effect +``` diff --git a/.changelog/43517.txt b/.changelog/43517.txt new file mode 100644 index 000000000000..cf9bd70faecc --- /dev/null +++ b/.changelog/43517.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +feature/aws_bedrock_guardrail: Add `cross_region_config`, `content_policy_config.tier_config`, and `topic_policy_config.tier_config` arguments +``` diff --git a/.changelog/43520.txt b/.changelog/43520.txt new file mode 100644 index 000000000000..d11dd18a9ad8 --- /dev/null +++ b/.changelog/43520.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_elasticache_user_group: Ignore `InvalidParameterValue: User xxx is not a member of user group xxx` errors during group modification +``` diff --git a/.changelog/43523.txt b/.changelog/43523.txt new file mode 100644 index 000000000000..a17a88f6772e --- /dev/null +++ b/.changelog/43523.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_appsync_api_cache: Fix "missing required field" error during update +``` diff --git a/.changelog/43525.txt b/.changelog/43525.txt new file mode 100644 index 000000000000..b095eae9eb87 --- /dev/null +++ b/.changelog/43525.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_inspector2_enabler: Support `CODE_REPOSITORY` as a valid value for `resource_types` +``` + +```release-note:enhancement +resource/aws_inspector2_organization_configuration: Add `auto_enable.code_repository` argument +``` diff --git a/.changelog/43533.txt b/.changelog/43533.txt new file mode 100644 index 000000000000..9f27f712eba4 --- /dev/null +++ b/.changelog/43533.txt @@ -0,0 +1,3 @@ +```release-note:breaking-change +resource/aws_ecs_service: Fix behavior when updating `capacity_provider_strategy` to avoid ECS service recreation after recent AWS changes +``` diff --git a/.changelog/43534.txt b/.changelog/43534.txt new file mode 100644 index 000000000000..de5d2d6e470e --- /dev/null +++ b/.changelog/43534.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_timestreaminfluxdb_db_instance: Don't mark `network_type` as [ForceNew](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#forcenew) if the value is not configured. This fixes a problem with `terraform apply -refresh=false` after upgrade from `v5.90.0` and below +``` \ No newline at end of file diff --git a/.changelog/43539.txt b/.changelog/43539.txt new file mode 100644 index 000000000000..36d07e6c254f --- /dev/null +++ b/.changelog/43539.txt @@ -0,0 +1,12 @@ +```release-note:enhancement +resource/aws_globalaccelerator_custom_routing_endpoint_group: Add resource identity support +``` +```release-note:enhancement +resource/aws_globalaccelerator_custom_routing_listener: Add resource identity support +``` +```release-note:enhancement +resource/aws_globalaccelerator_endpoint_group: Add resource identity support +``` +```release-note:enhancement +resource/aws_globalaccelerator_listener: Add resource identity support +``` diff --git a/.changelog/43540.txt b/.changelog/43540.txt new file mode 100644 index 000000000000..8df37994dc84 --- /dev/null +++ b/.changelog/43540.txt @@ -0,0 +1,21 @@ +```release-note:enhancement +resource/aws_imagebuilder_container_recipe: Add resource identity support +``` +```release-note:enhancement +resource/aws_imagebuilder_distribution_configuration: Add resource identity support +``` +```release-note:enhancement +resource/aws_imagebuilder_image: Add resource identity support +``` +```release-note:enhancement +resource/aws_imagebuilder_image_pipeline: Add resource identity support +``` +```release-note:enhancement +resource/aws_imagebuilder_image_recipe: Add resource identity support +``` +```release-note:enhancement +resource/aws_imagebuilder_infrastructure_configuration: Add resource identity support +``` +```release-note:enhancement +resource/aws_imagebuilder_workflow: Add resource identity support +``` diff --git a/.changelog/43542.txt b/.changelog/43542.txt new file mode 100644 index 000000000000..278735a5565f --- /dev/null +++ b/.changelog/43542.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_inspector_assessment_target: Add resource identity support +``` + +```release-note:enhancement +resource/aws_inspector_assessment_template: Add resource identity support +``` + +```release-note:enhancement +resource/aws_inspector_resource_group: Add resource identity support +``` diff --git a/.changelog/43545.txt b/.changelog/43545.txt new file mode 100644 index 000000000000..793936ca0427 --- /dev/null +++ b/.changelog/43545.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_servicequotas_service_quota: Add validation, during `create`, to check if new value is less than current value of quota +``` diff --git a/.changelog/43557.txt b/.changelog/43557.txt new file mode 100644 index 000000000000..5e4daca663af --- /dev/null +++ b/.changelog/43557.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_connect_phone_number_contact_flow_association +``` \ No newline at end of file diff --git a/.changelog/43558.txt b/.changelog/43558.txt new file mode 100644 index 000000000000..a414b048cac7 --- /dev/null +++ b/.changelog/43558.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ecs_service: Fix unspecified `test_listener_rule` incorrectly being set as empty string in `load_balancer.advanced_configuration` block +``` diff --git a/.changelog/43560.txt b/.changelog/43560.txt new file mode 100644 index 000000000000..1b24ec46efaa --- /dev/null +++ b/.changelog/43560.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ssm_patch_baseline: Add `available_security_updates_compliance_status` argument +``` + +```release-note:enhancement +data-source/aws_ssm_patch_baseline: Add `available_security_updates_compliance_status` argument +``` \ No newline at end of file diff --git a/.changelog/43561.txt b/.changelog/43561.txt new file mode 100644 index 000000000000..cb9014c63684 --- /dev/null +++ b/.changelog/43561.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_wafv2_web_acl_rule_group_association +``` \ No newline at end of file diff --git a/.changelog/43562.txt b/.changelog/43562.txt new file mode 100644 index 000000000000..08fd04a23e69 --- /dev/null +++ b/.changelog/43562.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_service_setting: Support short format (with `/ssm/` prefix) for `setting_id` +``` diff --git a/.changelog/43565.txt b/.changelog/43565.txt new file mode 100644 index 000000000000..00e284eeed3b --- /dev/null +++ b/.changelog/43565.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ebs_volume: Add `volume_initialization_rate` argument +``` + +```release-note:enhancement +data-source/aws_ebs_volume: Add `volume_initialization_rate` attribute +``` diff --git a/.changelog/43576.txt b/.changelog/43576.txt new file mode 100644 index 000000000000..5f457fd57de3 --- /dev/null +++ b/.changelog/43576.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cloudwatch_log_delivery_destination: Fix update failure when tags are set +``` diff --git a/.changelog/43582.txt b/.changelog/43582.txt new file mode 100644 index 000000000000..33f1792350d2 --- /dev/null +++ b/.changelog/43582.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_ecs_service: Support `load_balancer` attribute +``` diff --git a/.changelog/43587.txt b/.changelog/43587.txt new file mode 100644 index 000000000000..897dc3123f1b --- /dev/null +++ b/.changelog/43587.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_quicksight_key_registration +``` \ No newline at end of file diff --git a/.changelog/43589.txt b/.changelog/43589.txt new file mode 100644 index 000000000000..f419b609b328 --- /dev/null +++ b/.changelog/43589.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_s3_bucket: Accept `NoSuchTagSetError` responses from S3-compatible services +``` + +```release-note:bug +resource/aws_s3_object: Accept `NoSuchTagSetError` responses from S3-compatible services +``` diff --git a/.changelog/43590.txt b/.changelog/43590.txt new file mode 100644 index 000000000000..e71fd45ddd74 --- /dev/null +++ b/.changelog/43590.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3_bucket_lifecycle_configuration: Do not warn if no filter element is set +``` diff --git a/.changelog/43595.txt b/.changelog/43595.txt new file mode 100644 index 000000000000..6057109a1fe5 --- /dev/null +++ b/.changelog/43595.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_bedrockagent_flow: Fix `missing required field, CreateFlowInput.Definition.Nodes[0].Configuration[prompt].SourceConfiguration[resource].PromptArn` errors on Create +``` diff --git a/.changelog/43596.txt b/.changelog/43596.txt new file mode 100644 index 000000000000..043667824504 --- /dev/null +++ b/.changelog/43596.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_quicksight_ip_restriction +``` \ No newline at end of file diff --git a/.changelog/43597.txt b/.changelog/43597.txt new file mode 100644 index 000000000000..72970ed2001a --- /dev/null +++ b/.changelog/43597.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_wafv2_web_acl: Add `resource_arn` argument to enable finding web ACLs by resource ARN +``` + +```release-note:enhancement +data-source/aws_wafv2_web_acl: Add support for `CLOUDFRONT` `scope` web ACLs using `resource_arn` +``` diff --git a/.changelog/43605.txt b/.changelog/43605.txt new file mode 100644 index 000000000000..621d5fdad6f2 --- /dev/null +++ b/.changelog/43605.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_fsx_lustre_file_system: Fix validation of SSD read cache size for file systems using the Intelligent-Tiering storage class +``` diff --git a/.changelog/43606.txt b/.changelog/43606.txt new file mode 100644 index 000000000000..9f3f95a7bec8 --- /dev/null +++ b/.changelog/43606.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_servicequotas_service_quota: Fix error when updating a pending service quota request +``` \ No newline at end of file diff --git a/.changelog/43613.txt b/.changelog/43613.txt new file mode 100644 index 000000000000..7e7aa90d51ce --- /dev/null +++ b/.changelog/43613.txt @@ -0,0 +1,31 @@ +```release-note:new-resource +aws_quicksight_custom_permissions +``` + +```release-note:new-resource +aws_quicksight_role_custom_permission +``` + +```release-note:new-resource +aws_quicksight_user_custom_permission +``` + +```release-note:enhancement +resource/aws_quicksight_user: Change `user_name` to Optional and Computed +``` + +```release-note:enhancement +resource/aws_quicksight_user: Support `IAM_IDENTITY_CENTER` as a valid value for `identity_type` +``` + +```release-note:enhancement +resource/aws_quicksight_user: Support `RESTRICTED_AUTHOR` and `RESTRICTED_READER` as valid values for `user_role` +``` + +```release-note:enhancement +resource/aws_quicksight_user: Add plan-time validation of `iam_arn` +``` + +```release-note:enhancement +data-source/aws_quicksight_user: Add `custom_permissions_name` attribute +``` \ No newline at end of file diff --git a/.changelog/43614.txt b/.changelog/43614.txt new file mode 100644 index 000000000000..f978066041cd --- /dev/null +++ b/.changelog/43614.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cleanrooms_collaboration: Add `analytics_engine` argument +``` diff --git a/.changelog/43630.txt b/.changelog/43630.txt new file mode 100644 index 000000000000..7f8157a8ac96 --- /dev/null +++ b/.changelog/43630.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_s3_access_point: Add `tags` argument and `tags_all` attribute. This functionality requires the `s3:ListTagsForResource`, `s3:TagResource`, and `s3:UntagResource` IAM permissions +``` + +```release-note:enhancement +data-source/aws_s3_access_point: Add `tags` attribute. This functionality requires the `s3:ListTagsForResource` IAM permission +``` \ No newline at end of file diff --git a/.changelog/43642.txt b/.changelog/43642.txt new file mode 100644 index 000000000000..1f3463d92979 --- /dev/null +++ b/.changelog/43642.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ecr_repository: Add `image_tag_mutability_exclusion_filter` argument +``` + +```release-note:enhancement +resource/aws_ecr_repository: Support `IMMUTABLE_WITH_EXCLUSION` and `MUTABLE_WITH_EXCLUSION` as valid values for `image_tag_mutability` +``` \ No newline at end of file diff --git a/.changelog/43647.txt b/.changelog/43647.txt new file mode 100644 index 000000000000..8979285fde84 --- /dev/null +++ b/.changelog/43647.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_kinesis_firehose_delivery_stream: Add `iceberg_configuration.append_only` argument +``` diff --git a/.changelog/43650.txt b/.changelog/43650.txt new file mode 100644 index 000000000000..6b4f271ca176 --- /dev/null +++ b/.changelog/43650.txt @@ -0,0 +1,8 @@ +```release-note:new-resource +aws_odb_cloud_exadata_infrastructure +``` + +```release-note:new-data-source +aws_odb_cloud_exadata_infrastructure +``` + diff --git a/.changelog/43654.txt b/.changelog/43654.txt new file mode 100644 index 000000000000..bd84ca1c52a8 --- /dev/null +++ b/.changelog/43654.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_bedrockagent_flow: Prevent `created_at` becoming `null` on Update +``` diff --git a/.changelog/43659.txt b/.changelog/43659.txt new file mode 100644 index 000000000000..e57a56c9e81a --- /dev/null +++ b/.changelog/43659.txt @@ -0,0 +1,3 @@ +```release-note:bug +provider: Fix failure to detect resources deleted outside of Terraform as missing for numerous resource types +``` \ No newline at end of file diff --git a/.changelog/43661.txt b/.changelog/43661.txt new file mode 100644 index 000000000000..b06b5906836a --- /dev/null +++ b/.changelog/43661.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ec2_managed_prefix_list: Fix `PrefixListVersionMismatch: The prefix list has the incorrect version number` errors when updating entry description +``` \ No newline at end of file diff --git a/.changelog/43667.txt b/.changelog/43667.txt new file mode 100644 index 000000000000..3ed1f46ee9ac --- /dev/null +++ b/.changelog/43667.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_docdb_cluster: Add `serverless_v2_scaling_configuration` argument in support of [Amazon DocumentDB serverless](https://docs.aws.amazon.com/documentdb/latest/developerguide/docdb-serverless.html) +``` diff --git a/.changelog/43672.txt b/.changelog/43672.txt new file mode 100644 index 000000000000..9e196e510a71 --- /dev/null +++ b/.changelog/43672.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lightsail_static_ip: Support resource import +``` diff --git a/.changelog/43673.txt b/.changelog/43673.txt new file mode 100644 index 000000000000..e74f3cbcc905 --- /dev/null +++ b/.changelog/43673.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_inspector2_enabler: Support resource import +``` diff --git a/.changelog/43674.txt b/.changelog/43674.txt new file mode 100644 index 000000000000..e4bca455c146 --- /dev/null +++ b/.changelog/43674.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_opensearch_domain_policy: Support resource import +``` diff --git a/.changelog/43675.txt b/.changelog/43675.txt new file mode 100644 index 000000000000..f0d443f5340b --- /dev/null +++ b/.changelog/43675.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_networkfirewall_vpc_endpoint_association +``` \ No newline at end of file diff --git a/.changelog/43676.txt b/.changelog/43676.txt new file mode 100644 index 000000000000..9a1ea0a54878 --- /dev/null +++ b/.changelog/43676.txt @@ -0,0 +1,3 @@ +```release-note:bug +ephemeral-resource/aws_lambda_invocation: Fix plan inconsistency issue due to improperly assigned payload values +``` diff --git a/.changelog/43677.txt b/.changelog/43677.txt new file mode 100644 index 000000000000..2ad6d986e2a0 --- /dev/null +++ b/.changelog/43677.txt @@ -0,0 +1,4 @@ +```release-note:enhancement +resource/aws_computeoptimizer_recommendation_preferences: Add `AuroraDBClusterStorage` as a valid `resource_type` +``` + diff --git a/.changelog/43693.txt b/.changelog/43693.txt new file mode 100644 index 000000000000..b3a2df3a11d9 --- /dev/null +++ b/.changelog/43693.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_wafv2_regex_pattern_set: Remove maximum items limit on the `regular_expression` argument +``` \ No newline at end of file diff --git a/.changelog/43697.txt b/.changelog/43697.txt new file mode 100644 index 000000000000..bd31a7d1e318 --- /dev/null +++ b/.changelog/43697.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_ivschat_room: Add resource identity support +``` +```release-note:enhancement +resource/aws_ivschat_logging_configuration: Add resource identity support +``` diff --git a/.changelog/43699.txt b/.changelog/43699.txt new file mode 100644 index 000000000000..cc05a5c90b00 --- /dev/null +++ b/.changelog/43699.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lb: Add `secondary_ips_auto_assigned_per_subnet` argument for Network Load Balancers +``` \ No newline at end of file diff --git a/.changelog/43700.txt b/.changelog/43700.txt new file mode 100644 index 000000000000..55fe35d8e024 --- /dev/null +++ b/.changelog/43700.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_ec2_stop_instance +``` \ No newline at end of file diff --git a/.changelog/43702.txt b/.changelog/43702.txt new file mode 100644 index 000000000000..c7b51d0eb2c6 --- /dev/null +++ b/.changelog/43702.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_bedrock_guardrail: Add `input_action`, `output_action`, `input_enabled`, and `output_enabled` attributes to `sensitive_information_policy_config.pii_entities_config` and `sensitive_information_policy_config.regexes_config` configuration blocks +``` diff --git a/.changelog/43704.txt b/.changelog/43704.txt new file mode 100644 index 000000000000..4d6cb1935fdd --- /dev/null +++ b/.changelog/43704.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +resource/aws_ivs_channel: Add resource identity support +``` +```release-note:enhancement +resource/aws_ivs_playback_key_pair: Add resource identity support +``` +```release-note:enhancement +resource/aws_ivs_recording_configuration: Add resource identity support +``` diff --git a/.changelog/43707.txt b/.changelog/43707.txt new file mode 100644 index 000000000000..78331eb79bfe --- /dev/null +++ b/.changelog/43707.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3tables_table_bucket: Fix crash on `maintenance_configuration` read failure +``` diff --git a/.changelog/43708.txt b/.changelog/43708.txt new file mode 100644 index 000000000000..a7ce91e1dbc1 --- /dev/null +++ b/.changelog/43708.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_nat_gateway: Fix inconsistent final plan for `secondary_private_ip_addresses` +``` diff --git a/.changelog/43710.txt b/.changelog/43710.txt new file mode 100644 index 000000000000..156ed87fa3c3 --- /dev/null +++ b/.changelog/43710.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sqs_queue: Increase upper limit of `max_message_size` from 256 KiB to 1024 KiB +``` diff --git a/.changelog/43715.txt b/.changelog/43715.txt new file mode 100644 index 000000000000..ebee35e8787e --- /dev/null +++ b/.changelog/43715.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_odb_network +``` + +```release-note:new-data-source +aws_odb_network +``` \ No newline at end of file diff --git a/.changelog/43716.txt b/.changelog/43716.txt new file mode 100644 index 000000000000..6b6ad616d213 --- /dev/null +++ b/.changelog/43716.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_kms_key: Restore pre-v6.3.0 retry delay behavior when waiting for continuous target state occurrences. This fixes certain tag update timeouts +``` \ No newline at end of file diff --git a/.changelog/43719.txt b/.changelog/43719.txt new file mode 100644 index 000000000000..6352f6cd5ae4 --- /dev/null +++ b/.changelog/43719.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_log_group: Add resource identity support +``` diff --git a/.changelog/43722.txt b/.changelog/43722.txt new file mode 100644 index 000000000000..6d54855089bf --- /dev/null +++ b/.changelog/43722.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_instance: Prevent destruction of resource when `disable_api_termination` is `true` +``` + +```release-note:enhancement +resource/aws_instance: Adds `force_destroy` argument that allows destruction even when `disable_api_termination` and `disable_api_stop` are `true` +``` \ No newline at end of file diff --git a/.changelog/43729.txt b/.changelog/43729.txt new file mode 100644 index 000000000000..143f37af70d8 --- /dev/null +++ b/.changelog/43729.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_identity_provider +``` \ No newline at end of file diff --git a/.changelog/43735.txt b/.changelog/43735.txt new file mode 100644 index 000000000000..f864e6a54db9 --- /dev/null +++ b/.changelog/43735.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_browser_settings_association +``` \ No newline at end of file diff --git a/.changelog/43736.txt b/.changelog/43736.txt new file mode 100644 index 000000000000..55064e9fbfff --- /dev/null +++ b/.changelog/43736.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_parameter: Add resource identity support +``` diff --git a/.changelog/43742.txt b/.changelog/43742.txt new file mode 100644 index 000000000000..b0e495dd7288 --- /dev/null +++ b/.changelog/43742.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_networkmanager_vpc_attachment: Change `options` to Optional and Computed +``` + +```release-note:enhancement +resource/aws_networkmanager_vpc_attachment: Add `options.dns_support` and `options.security_group_referencing_support` arguments +``` \ No newline at end of file diff --git a/.changelog/43744.txt b/.changelog/43744.txt new file mode 100644 index 000000000000..59630af5cee2 --- /dev/null +++ b/.changelog/43744.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_security_group: Add parameterized resource identity support +``` diff --git a/.changelog/43751.txt b/.changelog/43751.txt new file mode 100644 index 000000000000..225692d53df3 --- /dev/null +++ b/.changelog/43751.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_sagemaker_image: Fix `image_name` regular expression validation +``` diff --git a/.changelog/43752.txt b/.changelog/43752.txt new file mode 100644 index 000000000000..3267860f0ee0 --- /dev/null +++ b/.changelog/43752.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_eks_cluster: Add `deletion_protection` argument +``` + +```release-note:enhancement +data-source/aws_eks_cluster: Add `deletion_protection` attribute +``` \ No newline at end of file diff --git a/.changelog/43753.txt b/.changelog/43753.txt new file mode 100644 index 000000000000..ba7096ffde92 --- /dev/null +++ b/.changelog/43753.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_lambda_function: Fix missing value for `reserved_concurrent_executions` attribute when a published version exists. This functionality requires the `lambda:GetFunctionConcurrency` IAM permission +``` + +```release-note:bug +data-source/aws_lambda_function: Fix missing value for `reserved_concurrent_executions` attribute when a published version exists. This functionality requires the `lambda:GetFunctionConcurrency` IAM permission +``` diff --git a/.changelog/43757.txt b/.changelog/43757.txt new file mode 100644 index 000000000000..bc206c047806 --- /dev/null +++ b/.changelog/43757.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_odb_network_peering_connection +``` + +```release-note:new-data-source +aws_odb_network_peering_connection +``` \ No newline at end of file diff --git a/.changelog/43758.txt b/.changelog/43758.txt new file mode 100644 index 000000000000..1c84fc9372a6 --- /dev/null +++ b/.changelog/43758.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_event_rule: Add resource identity support +``` diff --git a/.changelog/43759.txt b/.changelog/43759.txt new file mode 100644 index 000000000000..74decbdf3aa0 --- /dev/null +++ b/.changelog/43759.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_metric_alarm: Add resource identity support +``` diff --git a/.changelog/43764.txt b/.changelog/43764.txt new file mode 100644 index 000000000000..e0627ad4ecf7 --- /dev/null +++ b/.changelog/43764.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3tables_table: Fix `runtime error: invalid memory address or nil pointer dereference` panics when `GetTableMaintenanceConfiguration` returns an error +``` \ No newline at end of file diff --git a/.changelog/43773.txt b/.changelog/43773.txt new file mode 100644 index 000000000000..0179cb457aca --- /dev/null +++ b/.changelog/43773.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_data_protection_settings_association +``` \ No newline at end of file diff --git a/.changelog/43774.txt b/.changelog/43774.txt new file mode 100644 index 000000000000..01e3edf0e656 --- /dev/null +++ b/.changelog/43774.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_ip_access_settings_association +``` \ No newline at end of file diff --git a/.changelog/43775.txt b/.changelog/43775.txt new file mode 100644 index 000000000000..8c83f9156c77 --- /dev/null +++ b/.changelog/43775.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_network_settings_association +``` \ No newline at end of file diff --git a/.changelog/43776.txt b/.changelog/43776.txt new file mode 100644 index 000000000000..423671d48fb3 --- /dev/null +++ b/.changelog/43776.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_user_access_logging_settings_association +``` \ No newline at end of file diff --git a/.changelog/43777.txt b/.changelog/43777.txt new file mode 100644 index 000000000000..99d66e2a4bea --- /dev/null +++ b/.changelog/43777.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_user_settings_association +``` \ No newline at end of file diff --git a/.changelog/43778.txt b/.changelog/43778.txt new file mode 100644 index 000000000000..60714b3ec13c --- /dev/null +++ b/.changelog/43778.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_trust_store_association +``` \ No newline at end of file diff --git a/.changelog/43787.txt b/.changelog/43787.txt new file mode 100644 index 000000000000..164c750a17f2 --- /dev/null +++ b/.changelog/43787.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_appsync_api +``` + +```release-note:new-resource +aws_appsync_channel_namespace +``` \ No newline at end of file diff --git a/.changelog/43790.txt b/.changelog/43790.txt new file mode 100644 index 000000000000..0c9b5cda41c5 --- /dev/null +++ b/.changelog/43790.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_odb_cloud_vm_cluster +``` + +```release-note:new-data-source +aws_odb_cloud_vm_cluster +``` \ No newline at end of file diff --git a/.changelog/43792.txt b/.changelog/43792.txt new file mode 100644 index 000000000000..19f94825d01a --- /dev/null +++ b/.changelog/43792.txt @@ -0,0 +1,15 @@ +```release-note:new-data-source +aws_odb_db_server +``` + +```release-note:new-data-source +aws_odb_db_servers +``` + +```release-note:new-data-source +aws_odb_db_node +``` + +```release-note:new-data-source +aws_odb_db_nodes +``` \ No newline at end of file diff --git a/.changelog/43807.txt b/.changelog/43807.txt new file mode 100644 index 000000000000..a802df2e0e7b --- /dev/null +++ b/.changelog/43807.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_sagemaker_user_profile: Fix incomplete regex for `user_profile_name` +``` \ No newline at end of file diff --git a/.changelog/43809.txt b/.changelog/43809.txt new file mode 100644 index 000000000000..a81513115f9a --- /dev/null +++ b/.changelog/43809.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_odb_cloud_autonomous_vm_cluster +``` + +```release-note:new-data-source +aws_odb_cloud_autonomous_vm_cluster +``` \ No newline at end of file diff --git a/.changelog/43816.txt b/.changelog/43816.txt new file mode 100644 index 000000000000..16e4a0cb338e --- /dev/null +++ b/.changelog/43816.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ecs_service: Fix tagging failure after upgrading to v6 provider +``` diff --git a/.changelog/43817.txt b/.changelog/43817.txt new file mode 100644 index 000000000000..9919e55d102b --- /dev/null +++ b/.changelog/43817.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_cognito_managed_login_branding +``` \ No newline at end of file diff --git a/.changelog/43819.txt b/.changelog/43819.txt new file mode 100644 index 000000000000..3f4b36d858b4 --- /dev/null +++ b/.changelog/43819.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_storagegateway_gateway: Handle `InvalidGatewayRequestException: The specified gateway is not connected` errors during Read by using the [`ListGateways` API](https://docs.aws.amazon.com/storagegateway/latest/APIReference/API_ListGateways.html) to return minimal information about a disconnected gateway. This functionality requires the `storagegateway:ListGateways` IAM permission +``` \ No newline at end of file diff --git a/.changelog/43821.txt b/.changelog/43821.txt new file mode 100644 index 000000000000..d7fd9cb0b265 --- /dev/null +++ b/.changelog/43821.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lambda_function: Add resource identity support +``` diff --git a/.changelog/43825.txt b/.changelog/43825.txt new file mode 100644 index 000000000000..2c1ac5c6c8c1 --- /dev/null +++ b/.changelog/43825.txt @@ -0,0 +1,7 @@ +```release-note:new-data-source +aws_odb_gi_versions +``` + +```release-note:new-data-source +aws_odb_db_system_shapes +``` \ No newline at end of file diff --git a/.changelog/43830.txt b/.changelog/43830.txt new file mode 100644 index 000000000000..558cf3fe21a1 --- /dev/null +++ b/.changelog/43830.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_sns_topic_policy: Add resource identity support +``` + +```release-note:enhancement +resource/aws_sns_topic_data_protection_policy: Add resource identity support +``` + +```release-note:enhancement +resource/aws_sns_topic_subscription: Add resource identity support +``` diff --git a/.changelog/43833.txt b/.changelog/43833.txt new file mode 100644 index 000000000000..cf105056899a --- /dev/null +++ b/.changelog/43833.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_subnet: Add resource identity support +``` diff --git a/.changelog/43841.txt b/.changelog/43841.txt new file mode 100644 index 000000000000..2d606262b4f8 --- /dev/null +++ b/.changelog/43841.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_elasticache_cluster: Fix `provider produced unexpected value` for `cache_usage_limits` argument. +``` \ No newline at end of file diff --git a/.changelog/43852.txt b/.changelog/43852.txt new file mode 100644 index 000000000000..79bd8e40b7a6 --- /dev/null +++ b/.changelog/43852.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_networkfirewall_firewall_policy: Add missing schema definition for `firewall_policy.stateful_engine_options.flow_timeouts` +``` diff --git a/.changelog/43863.txt b/.changelog/43863.txt new file mode 100644 index 000000000000..d0b2f6d8fa5e --- /dev/null +++ b/.changelog/43863.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_session_logger +``` \ No newline at end of file diff --git a/.changelog/43866.txt b/.changelog/43866.txt new file mode 100644 index 000000000000..9c4649a3dfab --- /dev/null +++ b/.changelog/43866.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_workspacesweb_session_logger_association +``` \ No newline at end of file diff --git a/.changelog/43871.txt b/.changelog/43871.txt new file mode 100644 index 000000000000..f3af9a55e871 --- /dev/null +++ b/.changelog/43871.txt @@ -0,0 +1,2 @@ +```release-note:bug +resource/aws_ecs_service: Fix refreshing `service_connect_configuration` when deleted outside of Terraform \ No newline at end of file diff --git a/.changelog/43872.txt b/.changelog/43872.txt new file mode 100644 index 000000000000..a586f8658f19 --- /dev/null +++ b/.changelog/43872.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +resource/aws_secretsmanager_secret: Add resource identity support +``` +```release-note:enhancement +resource/aws_secretsmanager_secret_policy: Add resource identity support +``` +```release-note:enhancement +resource/aws_secretsmanager_secret_rotation: Add resource identity support +``` diff --git a/.changelog/43874.txt b/.changelog/43874.txt new file mode 100644 index 000000000000..c29524de50f7 --- /dev/null +++ b/.changelog/43874.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lightsail_static_ip_attachment: Support resource import +``` diff --git a/.changelog/43883.txt b/.changelog/43883.txt new file mode 100644 index 000000000000..7c290e931e66 --- /dev/null +++ b/.changelog/43883.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_lakeformation_lf_tag_expression +``` diff --git a/.changelog/43886.txt b/.changelog/43886.txt new file mode 100644 index 000000000000..36421993ebbf --- /dev/null +++ b/.changelog/43886.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_ecr_repository_creation_template: Add `image_tag_mutability_exclusion_filter` configuration block +``` + +```release-note:enhancement +data-source/aws_ecr_repository_creation_template: Add `image_tag_mutability_exclusion_filter` attribute +``` + +```release-note:enhancement +data-source/aws_ecr_repository: Add `image_tag_mutability_exclusion_filter` attribute +``` diff --git a/.changelog/43910.txt b/.changelog/43910.txt new file mode 100644 index 000000000000..d8963d4b4c67 --- /dev/null +++ b/.changelog/43910.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_route: Add resource identity support +``` diff --git a/.changelog/43914.txt b/.changelog/43914.txt new file mode 100644 index 000000000000..8038c6bcdf75 --- /dev/null +++ b/.changelog/43914.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dynamodb_contributor_insights: Add `mode` argument in support of [CloudWatch contributor insights modes](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/contributorinsights_HowItWorks.html#contributorinsights_HowItWorks.Modes) +``` diff --git a/.changelog/43916.txt b/.changelog/43916.txt new file mode 100644 index 000000000000..6730b51425a7 --- /dev/null +++ b/.changelog/43916.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iot_thing_principal_attachment: Add `thing_principal_type` argument +``` diff --git a/.changelog/43918.txt b/.changelog/43918.txt new file mode 100644 index 000000000000..33e769b8dbec --- /dev/null +++ b/.changelog/43918.txt @@ -0,0 +1,12 @@ +```release-note:enhancement +resource/aws_sqs_queue: Add resource identity support +``` +```release-note:enhancement +resource/aws_sqs_queue_policy: Add resource identity support +``` +```release-note:enhancement +resource/aws_sqs_queue_redrive_policy: Add resource identity support +``` +```release-note:enhancement +resource/aws_sqs_queue_redrive_allow_policy: Add resource identity support +``` diff --git a/.changelog/43919.txt b/.changelog/43919.txt new file mode 100644 index 000000000000..92546b610342 --- /dev/null +++ b/.changelog/43919.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_rds_cluster: Fixes the behavior when enabling database_insights_mode="advanced" without changing performance insights retention window +``` diff --git a/.changelog/43921.txt b/.changelog/43921.txt new file mode 100644 index 000000000000..56f25ff5dbca --- /dev/null +++ b/.changelog/43921.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_signer_signing_profile: Add `signing_parameters` argument +``` + +```release-note:enhancement +data-source/aws_signer_signing_profile: Add `signing_material` and `signing_parameters` attributes +``` diff --git a/.changelog/43922.txt b/.changelog/43922.txt new file mode 100644 index 000000000000..a832774e42a8 --- /dev/null +++ b/.changelog/43922.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3tables_table_bucket: Add `force_destroy` argument +``` \ No newline at end of file diff --git a/.changelog/43925.txt b/.changelog/43925.txt new file mode 100644 index 000000000000..192e37d84e9a --- /dev/null +++ b/.changelog/43925.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/imagebuilder_lifecycle_policy: Fix `Provider produced inconsistent result after apply` error when `policy_detail.exclusion_rules.amis.is_public` is omitted +``` diff --git a/.changelog/43926.txt b/.changelog/43926.txt new file mode 100644 index 000000000000..d017e0830deb --- /dev/null +++ b/.changelog/43926.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cognito_user_pool: Fixed to accept an empty `email_mfa_configuration` block +``` diff --git a/.changelog/43942.txt b/.changelog/43942.txt new file mode 100644 index 000000000000..60bfebd60dbb --- /dev/null +++ b/.changelog/43942.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_rds_cluster: Fixes the behavior when modifying `database_insights_mode` when using custom KMS key +``` diff --git a/.changelog/43946.txt b/.changelog/43946.txt new file mode 100644 index 000000000000..f2d93d1515f0 --- /dev/null +++ b/.changelog/43946.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_mwaa_environment: Add `worker_replacement_strategy` argument +``` diff --git a/.changelog/43950.txt b/.changelog/43950.txt new file mode 100644 index 000000000000..7c0849a17095 --- /dev/null +++ b/.changelog/43950.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_inspector2_filter: Support `code_repository_project_name`, `code_repository_provider_type`, `ecr_image_in_use_count`, and `ecr_image_last_in_use_at` in `filter_criteria` +``` diff --git a/.changelog/43953.txt b/.changelog/43953.txt new file mode 100644 index 000000000000..b96a67595c45 --- /dev/null +++ b/.changelog/43953.txt @@ -0,0 +1,15 @@ +```release-note:bug +resource/aws_instance: Adds `primary_network_interface` to allow importing resources with custom primary network interface. +``` + +```release-note:bug +resource/aws_spot_instance_request: Adds `primary_network_interface` to allow importing resources with custom primary network interface. +``` + +```release-note:note +resource/aws_instance: The `network_interface` block has been deprecated. Use `primary_network_interface` for the primary network interface and `aws_network_interface_attachment` resources for other network interfaces. +``` + +```release-note:note +resource/aws_spot_instance_request: The `network_interface` block has been deprecated. Use `primary_network_interface` for the primary network interface and `aws_network_interface_attachment` resources for other network interfaces. +``` diff --git a/.changelog/43954.txt b/.changelog/43954.txt new file mode 100644 index 000000000000..92cc8cb60a04 --- /dev/null +++ b/.changelog/43954.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lambda_permission: Add resource identity support +``` diff --git a/.changelog/43955.txt b/.changelog/43955.txt new file mode 100644 index 000000000000..8e94f5403e01 --- /dev/null +++ b/.changelog/43955.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_cloudfront_create_invalidation +``` \ No newline at end of file diff --git a/.changelog/43956.txt b/.changelog/43956.txt new file mode 100644 index 000000000000..d3254c1a25ae --- /dev/null +++ b/.changelog/43956.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_servicecatalog_provisioned_product: Set `provisioning_parameters` and `provisioning_artifact_id` to the values from the last successful deployment when update fails +``` diff --git a/.changelog/43960.txt b/.changelog/43960.txt new file mode 100644 index 000000000000..ffb8546178a7 --- /dev/null +++ b/.changelog/43960.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_batch_job_queue: Adds List support +``` diff --git a/.changelog/43967.txt b/.changelog/43967.txt new file mode 100644 index 000000000000..478490d20dd5 --- /dev/null +++ b/.changelog/43967.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_vpc_ipam: Add `metered_account` argument +``` + +```release-note:enhancement +data-source/aws_vpc_ipam: Add `metered_account` attribute +``` diff --git a/.changelog/43972.txt b/.changelog/43972.txt new file mode 100644 index 000000000000..c63e8a43cc9d --- /dev/null +++ b/.changelog/43972.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_lambda_invoke +``` \ No newline at end of file diff --git a/.changelog/43976.txt b/.changelog/43976.txt new file mode 100644 index 000000000000..99c74c0ece09 --- /dev/null +++ b/.changelog/43976.txt @@ -0,0 +1,27 @@ +```release-note:enhancement +resource/aws_s3_bucket_public_access_block: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_policy: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_ownership_controls: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_logging: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_server_side_encryption_configuration: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_versioning: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_notification: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_cors_configuration: Add resource identity support +``` +```release-note:enhancement +resource/aws_s3_bucket_website_configuration: Add resource identity support +``` diff --git a/.changelog/43981.txt b/.changelog/43981.txt new file mode 100644 index 000000000000..413d5bf9ca89 --- /dev/null +++ b/.changelog/43981.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_imagebuilder_image_recipe: Increase upper limit of `block_device_mapping.ebs.iops` from `10000` to `100000` +``` diff --git a/.changelog/43984.txt b/.changelog/43984.txt new file mode 100644 index 000000000000..42e51ab16364 --- /dev/null +++ b/.changelog/43984.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_event_target: Add resource identity support +``` diff --git a/.changelog/43986.txt b/.changelog/43986.txt new file mode 100644 index 000000000000..f05b41ac48b4 --- /dev/null +++ b/.changelog/43986.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ecs_service: Add `sigint_rollback` argument +``` + +```release-note:enhancement +resource/aws_ecs_service: Change `deployment_configuration` to Optional and Computed +``` \ No newline at end of file diff --git a/.changelog/43988.txt b/.changelog/43988.txt new file mode 100644 index 000000000000..2ad4ab4b7171 --- /dev/null +++ b/.changelog/43988.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_glue_job: Support `G.12X`, `G.16X`, `R.1X`, `R.2X`, `R.4X`, and `R.8X` as valid values for `worker_type` +``` \ No newline at end of file diff --git a/.changelog/43989.txt b/.changelog/43989.txt new file mode 100644 index 000000000000..06ec6652bcc9 --- /dev/null +++ b/.changelog/43989.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_synthetics_canary: Add `vpc_config.ipv6_allowed_for_dual_stack` argument +``` diff --git a/.changelog/43990.txt b/.changelog/43990.txt new file mode 100644 index 000000000000..6f6611213fc0 --- /dev/null +++ b/.changelog/43990.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_route_table: Add resource identity support +``` diff --git a/.changelog/43996.txt b/.changelog/43996.txt new file mode 100644 index 000000000000..a4f317fb2a8c --- /dev/null +++ b/.changelog/43996.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_chatbot_slack_channel_configuration: Force resource replacement when `configuration_name` is modified +``` diff --git a/.changelog/44006.txt b/.changelog/44006.txt new file mode 100644 index 000000000000..64c14b350c63 --- /dev/null +++ b/.changelog/44006.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_route53_health_check: Fix `child_health_threshold` to properly accept explicitly specified zero value +``` diff --git a/.changelog/44011.txt b/.changelog/44011.txt new file mode 100644 index 000000000000..1bc3a0cac0c2 --- /dev/null +++ b/.changelog/44011.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_kms_external_key: Add `key_spec` argument +``` + +```release-note:enhancement +resource/aws_kms_external_key: Change `key_usage` to Optional and Computed +``` \ No newline at end of file diff --git a/.changelog/44025.txt b/.changelog/44025.txt new file mode 100644 index 000000000000..cd2b84ac492e --- /dev/null +++ b/.changelog/44025.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_kms_key: Add resource identity support +``` +```release-note:enhancement +resource/aws_kms_alias: Add resource identity support +``` diff --git a/.changelog/44031.txt b/.changelog/44031.txt new file mode 100644 index 000000000000..6e93e8580bca --- /dev/null +++ b/.changelog/44031.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_secretsmanager_secret_version: Add resource identity support +``` diff --git a/.changelog/44032.txt b/.changelog/44032.txt new file mode 100644 index 000000000000..2f7d29c88819 --- /dev/null +++ b/.changelog/44032.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_wafv2_web_acl: Add missing flattening of `name` in `response_inspection.header` blocks for `AWSManagedRulesATPRuleSet` and `AWSManagedRulesACFPRuleSet` to avoid persistent plan diffs +``` diff --git a/.changelog/44041.txt b/.changelog/44041.txt new file mode 100644 index 000000000000..e8982db26e93 --- /dev/null +++ b/.changelog/44041.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +resource/aws_ecr_repository: Add resource identity support +``` +```release-note:enhancement +resource/aws_ecr_repository_policy: Add resource identity support +``` +```release-note:enhancement +resource/aws_ecr_lifecycle_policy: Add resource identity support +``` diff --git a/.changelog/44042.txt b/.changelog/44042.txt new file mode 100644 index 000000000000..cbf0829c09e1 --- /dev/null +++ b/.changelog/44042.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_datazone_domain: Add `domain_version` and `service_role` arguments to support V2 domains +``` diff --git a/.changelog/44043.txt b/.changelog/44043.txt new file mode 100644 index 000000000000..83d5cfffa5e0 --- /dev/null +++ b/.changelog/44043.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket_acl: Add resource identity support +``` diff --git a/.changelog/44045.txt b/.changelog/44045.txt new file mode 100644 index 000000000000..245531c47e21 --- /dev/null +++ b/.changelog/44045.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_sesv2_email_identity: Add `verification_status` attribute +``` + +```release-note:enhancement +resource/aws_sesv2_email_identity: Add `verification_status` attribute +``` \ No newline at end of file diff --git a/.changelog/44048.txt b/.changelog/44048.txt new file mode 100644 index 000000000000..673c62d9964e --- /dev/null +++ b/.changelog/44048.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_route53_resolver_rule: Add resource identity support +``` +```release-note:enhancement +resource/aws_route53_resolver_rule_association: Add resource identity support +``` diff --git a/.changelog/44050.txt b/.changelog/44050.txt new file mode 100644 index 000000000000..f1f2d37ab9b3 --- /dev/null +++ b/.changelog/44050.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_db_instance: Fixes the behavior when modifying `database_insights_mode` when using custom KMS key +``` diff --git a/.changelog/44059.txt b/.changelog/44059.txt new file mode 100644 index 000000000000..70f81686af99 --- /dev/null +++ b/.changelog/44059.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_ec2_client_vpn_endpoint: Add `endpoint_ip_address_type` and `traffic_ip_address_type` arguments to support IPv6 connectivity in Client VPN +``` + +```release-note:enhancement +resource/aws_ec2_client_vpn_endpoint: Make `client_cidr_block` optional +``` + +```release-note:enhancement +data-source/aws_ec2_client_vpn_endpoint: Add `endpoint_ip_address_type` and `traffic_ip_address_type` attributes +``` diff --git a/.changelog/44068.txt b/.changelog/44068.txt new file mode 100644 index 000000000000..19b51b700c36 --- /dev/null +++ b/.changelog/44068.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_instance: Add resource identity support +``` diff --git a/.changelog/44072.txt b/.changelog/44072.txt new file mode 100644 index 000000000000..2155022ce2f2 --- /dev/null +++ b/.changelog/44072.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3tables_table_policy: Remove plan-time validation of `name` and `namespace` +``` diff --git a/.changelog/44075.txt b/.changelog/44075.txt new file mode 100644 index 000000000000..b6a46af8e657 --- /dev/null +++ b/.changelog/44075.txt @@ -0,0 +1,18 @@ +```release-note:enhancement +resource/aws_ssm_association: Add resource identity support +``` +```release-note:enhancement +resource/aws_ssm_document: Add resource identity support +``` +```release-note:enhancement +resource/aws_ssm_maintenance_window: Add resource identity support +``` +```release-note:enhancement +resource/aws_ssm_maintenance_window_target: Add resource identity support +``` +```release-note:enhancement +resource/aws_ssm_maintenance_window_task: Add resource identity support +``` +```release-note:enhancement +resource/aws_ssm_patch_baseline: Add resource identity support +``` diff --git a/.changelog/44079.txt b/.changelog/44079.txt new file mode 100644 index 000000000000..a10b32f43dcf --- /dev/null +++ b/.changelog/44079.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_efs_mount_target: Add `ip_address_type` and `ipv6_address` arguments to support IPv6 connectivity +``` + +```release-note:enhancement +data-source/aws_efs_mount_target: Add `ip_address_type` and `ipv6_address` attributes +``` diff --git a/.changelog/44080.txt b/.changelog/44080.txt new file mode 100644 index 000000000000..6c732e60a247 --- /dev/null +++ b/.changelog/44080.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_lambda_function: Add `source_kms_key_arn` argument +``` + +```release-note:enhancement +data-source/aws_lambda_function: Add `source_kms_key_arn` attribute +``` diff --git a/.changelog/44090.txt b/.changelog/44090.txt new file mode 100644 index 000000000000..769521fdf32c --- /dev/null +++ b/.changelog/44090.txt @@ -0,0 +1,9 @@ +```release-note:note +resource/aws_s3_bucket_logging: The `target_grant.grantee.display_name` attribute is deprecated. AWS has [ended support for this attribute](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Grantee.html). API responses began inconsistently returning it on July 15, 2025, and will stop returning it entirely on November 21, 2025. This attribute will be removed in a future major version. +``` +```release-note:note +resource/aws_s3_bucket_acl: The `access_control_policy.grant.grantee.display_name` attribute is deprecated. AWS has [ended support for this attribute](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Grantee.html). API responses began inconsistently returning it on July 15, 2025, and will stop returning it entirely on November 21, 2025. This attribute will be removed in a future major version. +``` +```release-note:note +resource/aws_s3_bucket_acl: The `access_control_policy.owner.display_name` attribute is deprecated. AWS has [ended support for this attribute](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Owner.html). API responses began inconsistently returning it on July 15, 2025, and will stop returning it entirely on November 21, 2025. This attribute will be removed in a future major version. +``` diff --git a/.changelog/44097.txt b/.changelog/44097.txt new file mode 100644 index 000000000000..c7c820c92e6c --- /dev/null +++ b/.changelog/44097.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_launch_template: Add `placement.group_id` argument +``` + +```release-note:enhancement +data-source/aws_launch_template: Add `placement.group_id` attribute +``` diff --git a/.changelog/44105.txt b/.changelog/44105.txt new file mode 100644 index 000000000000..b6cd83abda53 --- /dev/null +++ b/.changelog/44105.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_synthetics_canary: Add `run_config.ephemeral_storage` argument. +``` diff --git a/.changelog/44118.txt b/.changelog/44118.txt new file mode 100644 index 000000000000..f3923b9757cc --- /dev/null +++ b/.changelog/44118.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fsx_openzfs_volume: Remove maximum items limit on the `user_and_group_quotas` argument +``` \ No newline at end of file diff --git a/.changelog/44120.txt b/.changelog/44120.txt new file mode 100644 index 000000000000..4ad7fca1fd2c --- /dev/null +++ b/.changelog/44120.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fsx_openzfs_file_system: Remove maximum items limit on the `user_and_group_quotas` argument +``` \ No newline at end of file diff --git a/.changelog/44127.txt b/.changelog/44127.txt new file mode 100644 index 000000000000..18776ba9151e --- /dev/null +++ b/.changelog/44127.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +provider: Support `ap-southeast-6` as a valid AWS Region +``` \ No newline at end of file diff --git a/.changelog/44129.txt b/.changelog/44129.txt new file mode 100644 index 000000000000..91ef87bad0dc --- /dev/null +++ b/.changelog/44129.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_instance: Adds List support +``` + +```release-note:enhancement +resource/aws_iam_role: Adds List support +``` + +```release-note:enhancement +resource/aws_cloudwatch_log_group: Adds List support +``` diff --git a/.changelog/44132.txt b/.changelog/44132.txt new file mode 100644 index 000000000000..548444d0e8b4 --- /dev/null +++ b/.changelog/44132.txt @@ -0,0 +1,15 @@ +```release-note:enhancement +data-source/aws_elb_hosted_zone_id: Add hosted zone ID for `ap-southeast-6` AWS Region +``` + +```release-note:enhancement +data-source/aws_lb_hosted_zone_id: Add hosted zone IDs for `ap-southeast-6` AWS Region +``` + +```release-note:enhancement +data-source/aws_elastic_beanstalk_hosted_zone: Add hosted zone IDs for `ap-southeast-5`, `ap-southeast-7`, `eu-south-2`, and `me-central-1` AWS Regions +``` + +```release-note:enhancement +data-source/aws_s3_bucket: Add hosted zone ID for `ap-southeast-6` AWS Region +``` \ No newline at end of file diff --git a/.changelog/44143.txt b/.changelog/44143.txt new file mode 100644 index 000000000000..4d72412691a2 --- /dev/null +++ b/.changelog/44143.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_rds_cluster_role_association: Make `feature_name` optional +``` diff --git a/.changelog/44155.txt b/.changelog/44155.txt new file mode 100644 index 000000000000..b84347244f6e --- /dev/null +++ b/.changelog/44155.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_opensearch_package: Add `engine_version` argument +``` + +```release-note:enhancement +resource/aws_opensearch_package: Add waiter to ensure package validation completes +``` diff --git a/.changelog/44163.txt b/.changelog/44163.txt new file mode 100644 index 000000000000..a912f9f69a89 --- /dev/null +++ b/.changelog/44163.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudfront_distribution: Add `origin.response_completion_timeout` argument +``` diff --git a/.changelog/44165.txt b/.changelog/44165.txt new file mode 100644 index 000000000000..fd188ff0a83e --- /dev/null +++ b/.changelog/44165.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ecs_account_setting_default: Support `dualStackIPv6` as a valid value for `name` +``` \ No newline at end of file diff --git a/.changelog/44168.txt b/.changelog/44168.txt new file mode 100644 index 000000000000..c8c20dee1f8e --- /dev/null +++ b/.changelog/44168.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_appconfig_application +``` \ No newline at end of file diff --git a/.changelog/44189.txt b/.changelog/44189.txt new file mode 100644 index 000000000000..ae56dd78da49 --- /dev/null +++ b/.changelog/44189.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3_bucket_lifecycle_configuration: Ignore `MethodNotAllowed` errors when deleting non-existent lifecycle configurations +``` \ No newline at end of file diff --git a/.changelog/44191.txt b/.changelog/44191.txt new file mode 100644 index 000000000000..3485a1bed082 --- /dev/null +++ b/.changelog/44191.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_flow_log: Fix `Error decoding ... from prior state: unsupported attribute "log_group_name"` errors when upgrading from a pre-v6.0.0 provider version +``` \ No newline at end of file diff --git a/.changelog/44194.txt b/.changelog/44194.txt new file mode 100644 index 000000000000..98241109a620 --- /dev/null +++ b/.changelog/44194.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_vpc_endpoint: Add resource identity support +``` diff --git a/.changelog/44195.txt b/.changelog/44195.txt new file mode 100644 index 000000000000..3fdb6bdaab72 --- /dev/null +++ b/.changelog/44195.txt @@ -0,0 +1,4 @@ + +```release-note:bug +resource/aws_launch_template: Fix `Error decoding ... from prior state: unsupported attribute "elastic_gpu_specifications"` errors when upgrading from a pre-v6.0.0 provider version +``` \ No newline at end of file diff --git a/.changelog/44198.txt b/.changelog/44198.txt new file mode 100644 index 000000000000..230320e7b47c --- /dev/null +++ b/.changelog/44198.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_vpc_security_group_ingress_rule: Add resource identity support +``` +```release-note:enhancement +resource/aws_vpc_security_group_egress_rule: Add resource identity support +``` diff --git a/.changelog/44201.txt b/.changelog/44201.txt new file mode 100644 index 000000000000..75a0942b6b37 --- /dev/null +++ b/.changelog/44201.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_codebuild_webhook: Add `pull_request_build_policy` configuration block +``` diff --git a/.changelog/44204.txt b/.changelog/44204.txt new file mode 100644 index 000000000000..e49f14a7c698 --- /dev/null +++ b/.changelog/44204.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cognito_managed_login_branding: Fix `reading Cognito Managed Login Branding by client ... couldn't find resource` errors when a user pool contains multiple client apps +``` \ No newline at end of file diff --git a/.changelog/44207.txt b/.changelog/44207.txt new file mode 100644 index 000000000000..53180c8a1993 --- /dev/null +++ b/.changelog/44207.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_glue_catalog_table_optimizer: Add `iceberg_configuration.run_rate_in_hours` argument to `retention_configuration` and `orphan_file_deletion_configuration` blocks +``` diff --git a/.changelog/44211.txt b/.changelog/44211.txt new file mode 100644 index 000000000000..34075df08df2 --- /dev/null +++ b/.changelog/44211.txt @@ -0,0 +1,15 @@ +```release-note:bug +resource/aws_appautoscaling_policy: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panics when `step_scaling_policy_configuration` is empty +``` + +```release-note:enhancement +resource/aws_appautoscaling_policy: Add plan-time validation of `policy_type` +``` + +```release-note:enhancement +resource/aws_appautoscaling_policy: Add plan-time validation of `step_scaling_policy_configuration.adjustment_type` and `step_scaling_policy_configuration.metric_aggregation_type` +``` + +```release-note:enhancement +resource/aws_appautoscaling_policy: Add `predictive_scaling_policy_configuration` argument +``` \ No newline at end of file diff --git a/.changelog/44214.txt b/.changelog/44214.txt new file mode 100644 index 000000000000..c89ba2066647 --- /dev/null +++ b/.changelog/44214.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_ses_send_email +``` \ No newline at end of file diff --git a/.changelog/44215.txt b/.changelog/44215.txt new file mode 100644 index 000000000000..9e7d2f6100d4 --- /dev/null +++ b/.changelog/44215.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_networkfirewall_rule_group: Add IPv6 CIDR block support to `address_definition` arguments in `source` and `destination` blocks within `rule_group.rules_source.stateless_rules_and_custom_actions.stateless_rule.rule_definition.match_attributes` +``` diff --git a/.changelog/44224.txt b/.changelog/44224.txt new file mode 100644 index 000000000000..5c353227acae --- /dev/null +++ b/.changelog/44224.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_bedrock_guardrail: Add `input_action`, `output_action`, `input_enabled`, and `output_enabled` arguments to `word_policy_config.managed_word_lists_config` and `word_policy_config.words_config` configuration blocks +``` diff --git a/.changelog/44228.txt b/.changelog/44228.txt new file mode 100644 index 000000000000..c02873d7eea8 --- /dev/null +++ b/.changelog/44228.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_secretsmanager_secret: Return diagnostic `warning` when remote policy is invalid +``` \ No newline at end of file diff --git a/.changelog/44232.txt b/.changelog/44232.txt new file mode 100644 index 000000000000..e00988ef1c85 --- /dev/null +++ b/.changelog/44232.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_sns_publish +``` \ No newline at end of file diff --git a/.changelog/44238.txt b/.changelog/44238.txt new file mode 100644 index 000000000000..9fb84ac38c9a --- /dev/null +++ b/.changelog/44238.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_servicecatalog_provisioned_product: Restore `timeouts.read` arguments removed in v6.12.0 +``` diff --git a/.changelog/44241.txt b/.changelog/44241.txt new file mode 100644 index 000000000000..74d29b678a9f --- /dev/null +++ b/.changelog/44241.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_budgets_budget: Add `billing_view_arn` argument +``` + +```release-note:enhancement +data-source/aws_budgets_budget: Add `billing_view_arn` attribute +``` diff --git a/.changelog/44244.txt b/.changelog/44244.txt new file mode 100644 index 000000000000..5f4a7425e5e7 --- /dev/null +++ b/.changelog/44244.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_synthetics_canary: Add `schedule.retry_config` configuration block +``` diff --git a/.changelog/44252.txt b/.changelog/44252.txt new file mode 100644 index 000000000000..1c38854376e5 --- /dev/null +++ b/.changelog/44252.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_rds_global_cluster: Remove provider-side conflict between `source_db_cluster_identifier` and `engine` arguments +``` diff --git a/.changelog/44256.txt b/.changelog/44256.txt new file mode 100644 index 000000000000..167b590f4c4b --- /dev/null +++ b/.changelog/44256.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_prometheus_resource_policy +``` diff --git a/.changelog/44264.txt b/.changelog/44264.txt new file mode 100644 index 000000000000..a89a1c3d27f5 --- /dev/null +++ b/.changelog/44264.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_scheduler_schedule: Add `action_after_completion` argument +``` diff --git a/.changelog/44272.txt b/.changelog/44272.txt new file mode 100644 index 000000000000..c4394c9254e1 --- /dev/null +++ b/.changelog/44272.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_billing_views +``` diff --git a/.changelog/44286.txt b/.changelog/44286.txt new file mode 100644 index 000000000000..4752571ab871 --- /dev/null +++ b/.changelog/44286.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sfn_state_machine: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44289.txt b/.changelog/44289.txt new file mode 100644 index 000000000000..4f44752b1eaf --- /dev/null +++ b/.changelog/44289.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ecs_service: Add `deployment_configuration.lifecycle_hook.hook_details` argument +``` diff --git a/.changelog/44299.txt b/.changelog/44299.txt new file mode 100644 index 000000000000..cb782a0541e9 --- /dev/null +++ b/.changelog/44299.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iam_service_specific_credential: Add support for Bedrock API keys with `credential_age_days`, `service_credential_alias`, `service_credential_secret`, `create_date`, and `expiration_date` attributes +``` \ No newline at end of file diff --git a/.changelog/44301.txt b/.changelog/44301.txt new file mode 100644 index 000000000000..8dfe18195b26 --- /dev/null +++ b/.changelog/44301.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_bedrockagentcore_agent_runtime +``` + +```release-note:new-resource +aws_bedrockagentcore_agent_runtime_endpoint +``` \ No newline at end of file diff --git a/.changelog/44302.txt b/.changelog/44302.txt new file mode 100644 index 000000000000..0b164b7b38ff --- /dev/null +++ b/.changelog/44302.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_bedrockagentcore_api_key_credential_provider +``` \ No newline at end of file diff --git a/.changelog/44303.txt b/.changelog/44303.txt new file mode 100644 index 000000000000..4d312d6424f5 --- /dev/null +++ b/.changelog/44303.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_bedrockagentcore_browser +``` \ No newline at end of file diff --git a/.changelog/44304.txt b/.changelog/44304.txt new file mode 100644 index 000000000000..ab0acba66f2d --- /dev/null +++ b/.changelog/44304.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_bedrockagentcore_code_interpreter +``` \ No newline at end of file diff --git a/.changelog/44305.txt b/.changelog/44305.txt new file mode 100644 index 000000000000..f269e856b2ab --- /dev/null +++ b/.changelog/44305.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_bedrockagentcore_gateway +``` + +```release-note:new-resource +aws_bedrockagentcore_gateway_target +``` \ No newline at end of file diff --git a/.changelog/44309.txt b/.changelog/44309.txt new file mode 100644 index 000000000000..1724a50ff893 --- /dev/null +++ b/.changelog/44309.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_rds_proxy: Add `default_auth_scheme` argument +``` + +```release-note:enhancement +resource/aws_rds_proxy: Make `auth` configuration block optional +``` + +```release-note:enhancement +data-source/aws_rds_proxy: Add `default_auth_scheme` attribute +``` diff --git a/.changelog/44310.txt b/.changelog/44310.txt new file mode 100644 index 000000000000..eb1f41bde34c --- /dev/null +++ b/.changelog/44310.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_sagemaker_endpoint_configuration: Fix panic when empty `async_inference_config.output_config.notification_config` block is specified +``` diff --git a/.changelog/44328.txt b/.changelog/44328.txt new file mode 100644 index 000000000000..a8956aaa2b81 --- /dev/null +++ b/.changelog/44328.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_ec2_instance_type_offering: Add `location` attribute +``` diff --git a/.changelog/44334.txt b/.changelog/44334.txt new file mode 100644 index 000000000000..ec4da706c7f5 --- /dev/null +++ b/.changelog/44334.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_eks_cluster: Change `compute_config`, `kubernetes_network_config.elastic_load_balancing`, and `storage_config.` to Optional and Computed, allowing EKS Auto Mode settings to be enabled, disabled, and removed from configuration +``` diff --git a/.changelog/44336.txt b/.changelog/44336.txt new file mode 100644 index 000000000000..658a8f868b4e --- /dev/null +++ b/.changelog/44336.txt @@ -0,0 +1,19 @@ +```release-note:new-data-source +aws_odb_cloud_autonomous_vm_clusters +``` + +```release-note:new-data-source +aws_odb_cloud_exadata_infrastructures +``` + +```release-note:new-data-source +aws_odb_cloud_vm_clusters +``` + +```release-note:new-data-source +aws_odb_networks +``` + +```release-note:new-data-source +aws_odb_network_peering_connections +``` \ No newline at end of file diff --git a/.changelog/44346.txt b/.changelog/44346.txt new file mode 100644 index 000000000000..661ce0616f1f --- /dev/null +++ b/.changelog/44346.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_connect_instance: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44362.txt b/.changelog/44362.txt new file mode 100644 index 000000000000..e71029f7d9a4 --- /dev/null +++ b/.changelog/44362.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_vpc: Correctly set `ipv6_cidr_block` when the VPC has multiple associated IPv6 CIDRs +``` + +```release-note:bug +resource/aws_default_vpc: Correctly set `ipv6_cidr_block` when the VPC has multiple associated IPv6 CIDRs +``` \ No newline at end of file diff --git a/.changelog/44365.txt b/.changelog/44365.txt new file mode 100644 index 000000000000..f4cb05528cf4 --- /dev/null +++ b/.changelog/44365.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_connect_phone_number: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44369.txt b/.changelog/44369.txt new file mode 100644 index 000000000000..e92edf1e60d8 --- /dev/null +++ b/.changelog/44369.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssmcontacts_contact_channel: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44372.txt b/.changelog/44372.txt new file mode 100644 index 000000000000..e6916ba748e3 --- /dev/null +++ b/.changelog/44372.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_pinpointsmsvoicev2_phone_number: Update `two_way_channel_arn` argument to accept `connect.[region].amazonaws.com` in addition to ARNs +``` diff --git a/.changelog/44375.txt b/.changelog/44375.txt new file mode 100644 index 000000000000..416d45ce6ae7 --- /dev/null +++ b/.changelog/44375.txt @@ -0,0 +1,10 @@ +```release-note:note +provider: This release contains both internal provider fixes and a Terraform Plugin SDK V2 update related to a [regression](https://github.com/hashicorp/terraform-provider-aws/issues/44366) which may impact resources that support resource identity +``` + +```release-note:bug +provider: Fix `Missing Resource Identity After Update` errors for non-refreshed and failed updates +``` +```release-note:bug +provider: Fix `Unexpected Identity Change` errors when fully-null identity values in state are updated to valid values +``` diff --git a/.changelog/44377.txt b/.changelog/44377.txt new file mode 100644 index 000000000000..7f3437563ef9 --- /dev/null +++ b/.changelog/44377.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_route53recoverycontrolconfig_cluster: Add `network_type` argument +``` diff --git a/.changelog/44379.txt b/.changelog/44379.txt new file mode 100644 index 000000000000..79e8f4e72ef8 --- /dev/null +++ b/.changelog/44379.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3control_bucket: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44389.txt b/.changelog/44389.txt new file mode 100644 index 000000000000..c70b04a5ebfa --- /dev/null +++ b/.changelog/44389.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dms_endpoint: Ensure that `postgres_settings` are updated +``` diff --git a/.changelog/44401.txt b/.changelog/44401.txt new file mode 100644 index 000000000000..248a08c9b42e --- /dev/null +++ b/.changelog/44401.txt @@ -0,0 +1,11 @@ +```release-note:bug +resource/aws_odb_cloud_vm_cluster : Fixed planmodifier for computed attribute. Fixed planmodifier from display_name attribute. +``` + +```release-note:bug +resource/aws_odb_cloud_autonomous_vm_cluster : Fixed planmodifier for computed attribute. +``` + +```release-note:bug +resource/aws_odb_network_peering_connection : Fixed planmodifier for computed attribute. +``` \ No newline at end of file diff --git a/.changelog/44404.txt b/.changelog/44404.txt new file mode 100644 index 000000000000..deeb5ce91342 --- /dev/null +++ b/.changelog/44404.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_rds_cluster: Fixes error when setting `database_insights_mode` with `global_cluster_identifier`. +``` diff --git a/.changelog/44406.txt b/.changelog/44406.txt new file mode 100644 index 000000000000..1c72b444b7f6 --- /dev/null +++ b/.changelog/44406.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_dsql_cluster: Prevents error when optional attribute `deletion_protection_enabled` not set. +``` + +```release-note:enhancement +resource/aws_dsql_cluster: Adds attribute `force_destroy`. +``` diff --git a/.changelog/44408.txt b/.changelog/44408.txt new file mode 100644 index 000000000000..eab726b7a73e --- /dev/null +++ b/.changelog/44408.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +resource/aws_sfn_activity: Add resource identity support +``` +```release-note:enhancement +resource/aws_sfn_activity: Add `arn` argument +``` +```release-note:enhancement +resource/aws_sfn_alias: Add resource identity support +``` diff --git a/.changelog/44417.txt b/.changelog/44417.txt new file mode 100644 index 000000000000..0dee7d140817 --- /dev/null +++ b/.changelog/44417.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_opensearch_domain: Add `aiml_options` argument +``` diff --git a/.changelog/44434.txt b/.changelog/44434.txt new file mode 100644 index 000000000000..c0c9fc0121aa --- /dev/null +++ b/.changelog/44434.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_bedrock_provisioned_model_throughput: Fix `AttributeName("arn") still remains in the path: could not find attribute or block "arn" in schema` errors when upgrading from a pre-v6.0.0 provider version +``` \ No newline at end of file diff --git a/.changelog/44435.txt b/.changelog/44435.txt new file mode 100644 index 000000000000..f8be24b041ac --- /dev/null +++ b/.changelog/44435.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cleanrooms_configured_table: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44442.txt b/.changelog/44442.txt new file mode 100644 index 000000000000..cf03d35ef168 --- /dev/null +++ b/.changelog/44442.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3_bucket_lifecycle_configuration: Allows unsetting `noncurrent_version_expiration.newer_noncurrent_versions` and `noncurrent_version_transition.newer_noncurrent_versions`. +``` diff --git a/.changelog/44444.txt b/.changelog/44444.txt new file mode 100644 index 000000000000..d434d1edf076 --- /dev/null +++ b/.changelog/44444.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_codebuild_start_build +``` \ No newline at end of file diff --git a/.changelog/44445.txt b/.changelog/44445.txt new file mode 100644 index 000000000000..49041231ec1c --- /dev/null +++ b/.changelog/44445.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_transcribe_start_transcription_job +``` diff --git a/.changelog/44449.txt b/.changelog/44449.txt new file mode 100644 index 000000000000..afb108849144 --- /dev/null +++ b/.changelog/44449.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_servicequotas_service_quota: Fixed a panic that occurred when a non-existing `quota_name` was provided +``` diff --git a/.changelog/44456.txt b/.changelog/44456.txt new file mode 100644 index 000000000000..ccefcf2b2929 --- /dev/null +++ b/.changelog/44456.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_fsx_lustre_file_system: Fixed to update `metadata_configuration` first to allow simultaneous increase of `metadata_configuration.iops` and `storage_capacity` +``` diff --git a/.changelog/44459.txt b/.changelog/44459.txt new file mode 100644 index 000000000000..043c058b8dd3 --- /dev/null +++ b/.changelog/44459.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_instance: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panics when `capacity_reservation_target` is empty +``` \ No newline at end of file diff --git a/.changelog/44461.txt b/.changelog/44461.txt new file mode 100644 index 000000000000..9e985181801d --- /dev/null +++ b/.changelog/44461.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_elastic_beanstalk_environment: Fix `inconsistent final plan` error in some cases with `setting` elements. +``` + +```release-note:bug +resource/aws_elastic_beanstalk_configuration_template: Fix `inconsistent final plan` error in some cases with `setting` elements. +``` diff --git a/.changelog/44463.txt b/.changelog/44463.txt new file mode 100644 index 000000000000..349eaf4382dd --- /dev/null +++ b/.changelog/44463.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudfront_distribution: Add `ip_address_type` argument to `origin.custom_origin_config` block +``` diff --git a/.changelog/44464.txt b/.changelog/44464.txt new file mode 100644 index 000000000000..86d8e91298a3 --- /dev/null +++ b/.changelog/44464.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_sfn_start_execution +``` \ No newline at end of file diff --git a/.changelog/44473.txt b/.changelog/44473.txt new file mode 100644 index 000000000000..cb4b7e735ef1 --- /dev/null +++ b/.changelog/44473.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_route53recoverycontrolconfig_cluster: Add tagging support +``` + +```release-note:enhancement +resource/aws_route53recoverycontrolconfig_control_panel: Add tagging support +``` + +```release-note:enhancement +resource/aws_route53recoverycontrolconfig_safety_rule: Add tagging support +``` diff --git a/.changelog/44482.txt b/.changelog/44482.txt new file mode 100644 index 000000000000..26842547d5db --- /dev/null +++ b/.changelog/44482.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_networkfirewall_firewall_policy: Fix failure to retrieve multiple `firewall_policy.stateful_rule_group_reference` attributes +``` diff --git a/.changelog/44485.txt b/.changelog/44485.txt new file mode 100644 index 000000000000..442ec8f72b92 --- /dev/null +++ b/.changelog/44485.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_lb: Fix `Invalid address to set: []string{"secondary_ips_auto_assigned_per_subnet"}` errors +``` diff --git a/.changelog/44487.txt b/.changelog/44487.txt new file mode 100644 index 000000000000..98eb0eca07a4 --- /dev/null +++ b/.changelog/44487.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_events_put_events +``` \ No newline at end of file diff --git a/.changelog/44489.txt b/.changelog/44489.txt new file mode 100644 index 000000000000..875d680e8eca --- /dev/null +++ b/.changelog/44489.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cloudwatch_event_rule: Do not retry on `LimitExceededException` +``` diff --git a/.changelog/44491.txt b/.changelog/44491.txt new file mode 100644 index 000000000000..94840ce53c20 --- /dev/null +++ b/.changelog/44491.txt @@ -0,0 +1,39 @@ +```release-note:bug +resource/aws_datazone_environment: Prevents `unknown value` error when optional `account_identifier` is not specified. +``` + +```release-note:bug +resource/aws_datazone_environment: Prevents `unknown value` error when optional `account_region` is not specified. +``` + +```release-note:bug +resource/aws_datazone_environment: Properly passes `blueprint_identifier` on creation. +``` + +```release-note:bug +resource/aws_datazone_environment: Prevents error when updating. +``` + +```release-note:bug +resource/aws_datazone_environment: Prevents occasional `unexpected state` error when deleting. +``` + +```release-note:bug +resource/aws_datazone_environment: Sets values for `user_parameters` when importing. +``` + +```release-note:bug +resource/aws_datazone_environment: Values in `user_parameters` should not be updateable. +``` + +```release-note:bug +resource/aws_datazone_environment: Correctly updates `glossary_terms`. +``` + +```release-note:bug +resource/aws_datazone_project: No longer ignores errors when deleting. +``` + +```release-note:bug +resource/aws_datazone_project: No longer returns error when already deleting. +``` diff --git a/.changelog/44498.txt b/.changelog/44498.txt new file mode 100644 index 000000000000..19c706141ec5 --- /dev/null +++ b/.changelog/44498.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_odb_cloud_vm_cluster : Fixed planmodifier for data_storage_size_in_tbs. Marked it mandatory. Fixed gi-version issue during creation +``` \ No newline at end of file diff --git a/.changelog/44505.txt b/.changelog/44505.txt new file mode 100644 index 000000000000..1a133d88e6ae --- /dev/null +++ b/.changelog/44505.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_launch_template: `kms_key_id` validation now accepts key ID, alias, and alias ARN in addition to key ARN +``` diff --git a/.changelog/44509.txt b/.changelog/44509.txt new file mode 100644 index 000000000000..27714534ab9e --- /dev/null +++ b/.changelog/44509.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ecs_capacity_provider: Make `auto_scaling_group_provider` optional +``` + +```release-note:enhancement +resource/aws_ecs_capacity_provider: Add `cluster` and `managed_instances_provider` arguments +``` \ No newline at end of file diff --git a/.changelog/44514.txt b/.changelog/44514.txt new file mode 100644 index 000000000000..1c77c7803876 --- /dev/null +++ b/.changelog/44514.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ebs_volume: Update `throughput` maximum validation from 1000 to 2000 MiB/s for gp3 volumes +``` diff --git a/.changelog/44515.txt b/.changelog/44515.txt new file mode 100644 index 000000000000..9d37e28f970e --- /dev/null +++ b/.changelog/44515.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_networkfirewall_logging_configuration: Add `enable_monitoring_dashboard` argument +``` diff --git a/.changelog/44516.txt b/.changelog/44516.txt new file mode 100644 index 000000000000..86d121ee5762 --- /dev/null +++ b/.changelog/44516.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_dms_endpoint: Add `mysql_settings` configuration block +``` + +```release-note:enhancement +data-source/aws_dms_endpoint: Add `mysql_settings` attribute +``` diff --git a/.changelog/44518.txt b/.changelog/44518.txt new file mode 100644 index 000000000000..aed0c87ec78f --- /dev/null +++ b/.changelog/44518.txt @@ -0,0 +1,6 @@ +```release-note:bug +provider: Fix `Missing Resource Identity After Update` errors for non-refreshed and failed updates of Plugin Framework based resources +``` +```release-note:bug +provider: Fix `Unexpected Identity Change` errors when fully-null identity values in state are updated to valid values for Plugin Framework based resources +``` \ No newline at end of file diff --git a/.changelog/44522.txt b/.changelog/44522.txt new file mode 100644 index 000000000000..8f3434a84d00 --- /dev/null +++ b/.changelog/44522.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cloudwatch_log_resource_policy: Do not retry on `LimitExceededException` +``` diff --git a/.changelog/44540.txt b/.changelog/44540.txt new file mode 100644 index 000000000000..f6020ac923e7 --- /dev/null +++ b/.changelog/44540.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lambda_event_source_mapping: Add `schema_registry_config` configuration blocks to `amazon_managed_kafka_event_source_config` and `self_managed_kafka_event_source_config` blocks +``` diff --git a/.changelog/44542.txt b/.changelog/44542.txt new file mode 100644 index 000000000000..58c07cac6719 --- /dev/null +++ b/.changelog/44542.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ec2_transit_gateway_route_table_propagation.test: Fix bug causing `inconsistent final plan` errors +``` \ No newline at end of file diff --git a/.changelog/44548.txt b/.changelog/44548.txt new file mode 100644 index 000000000000..feaf5a906905 --- /dev/null +++ b/.changelog/44548.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssmcontacts_contact: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44559.txt b/.changelog/44559.txt new file mode 100644 index 000000000000..4f91f775d80a --- /dev/null +++ b/.changelog/44559.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_transfer_host_key +``` \ No newline at end of file diff --git a/.changelog/44560.txt b/.changelog/44560.txt new file mode 100644 index 000000000000..a7f8a9bba496 --- /dev/null +++ b/.changelog/44560.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_vpclattice_resource_gateway: Add `ipv4_addresses_per_eni` argument +``` \ No newline at end of file diff --git a/.changelog/44572.txt b/.changelog/44572.txt new file mode 100644 index 000000000000..57522bfb2925 --- /dev/null +++ b/.changelog/44572.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ivschat_room: Set `maximum_message_rate_per_second` validation maximum to `100` +``` diff --git a/.changelog/44573.txt b/.changelog/44573.txt new file mode 100644 index 000000000000..da07f59c7854 --- /dev/null +++ b/.changelog/44573.txt @@ -0,0 +1,3 @@ +```release-note:bug +provider: Correctly validate AWS European Sovereign Cloud Regions in ARNs +``` \ No newline at end of file diff --git a/.changelog/44576.txt b/.changelog/44576.txt new file mode 100644 index 000000000000..dbe1096f08c5 --- /dev/null +++ b/.changelog/44576.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dynamodb_table: Do not retry on `LimitExceededException` +``` diff --git a/.changelog/44589.txt b/.changelog/44589.txt new file mode 100644 index 000000000000..db155159c137 --- /dev/null +++ b/.changelog/44589.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_emrserverless_application: Add `scheduler_configuration` block +``` diff --git a/.changelog/44604.txt b/.changelog/44604.txt new file mode 100644 index 000000000000..9cf3c6449b34 --- /dev/null +++ b/.changelog/44604.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +resource/aws_launch_template: Update EBS `throughput` maximum validation from 1000 to 2000 MiB/s for gp3 volumes +``` +```release-note:enhancement +resource/aws_imagebuilder_container_recipe: Update EBS `throughput` maximum validation from 1000 to 2000 MiB/s for gp3 volumes +``` +```release-note:enhancement +resource/aws_imagebuilder_image_recipe: Update EBS `throughput` maximum validation from 1000 to 2000 MiB/s for gp3 volumes +``` diff --git a/.changelog/44609.txt b/.changelog/44609.txt new file mode 100644 index 000000000000..20aec49a1a26 --- /dev/null +++ b/.changelog/44609.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_vpc: Adds List support +``` diff --git a/.changelog/44622.txt b/.changelog/44622.txt new file mode 100644 index 000000000000..78ec6b6baca4 --- /dev/null +++ b/.changelog/44622.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_vpn_connection +``` \ No newline at end of file diff --git a/.changelog/44638.txt b/.changelog/44638.txt new file mode 100644 index 000000000000..9211d513bcee --- /dev/null +++ b/.changelog/44638.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_quicksight_account_subscription: Add `admin_pro_group`, `author_pro_group`, and `reader_pro_group` arguments +``` + +```release-note:note +resource/aws_quicksight_account_subscription: Because we cannot easily test all this functionality, it is best effort and we ask for community help in testing +``` \ No newline at end of file diff --git a/.changelog/44671.txt b/.changelog/44671.txt new file mode 100644 index 000000000000..a32edc2d753f --- /dev/null +++ b/.changelog/44671.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_subnet: Adds List support +``` diff --git a/.ci/.golangci2.yml b/.ci/.golangci2.yml index f8ed86aa9a31..33bcb26d3c66 100644 --- a/.ci/.golangci2.yml +++ b/.ci/.golangci2.yml @@ -29,8 +29,6 @@ linters: - os.Remove - os.Setenv - os.Unsetenv - errorlint: - errorf: false issues: max-issues-per-linter: 10 max-same-issues: 3 diff --git a/.ci/.semgrep-caps-aws-ec2.yml b/.ci/.semgrep-caps-aws-ec2.yml index e7040b8e6091..577b03af8057 100644 --- a/.ci/.semgrep-caps-aws-ec2.yml +++ b/.ci/.semgrep-caps-aws-ec2.yml @@ -6,11 +6,11 @@ rules: message: Do not use "AWS" in func name inside AWS Provider paths: include: - - internal + - "/internal" exclude: - - internal/service/securitylake/aws_log_source.go - - internal/service/securitylake/aws_log_source_test.go - - internal/service/*/service_endpoints_gen_test.go + - "/internal/service/securitylake/aws_log_source.go" + - "/internal/service/securitylake/aws_log_source_test.go" + - "/internal/service/*/service_endpoints_gen_test.go" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -25,10 +25,10 @@ rules: message: Do not use "AWS" in const name inside AWS Provider paths: include: - - internal + - "/internal" exclude: - - internal/service/securitylake/aws_log_source.go - - internal/service/*/service_endpoints_gen_test.go + - "/internal/service/securitylake/aws_log_source.go" + - "/internal/service/*/service_endpoints_gen_test.go" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -43,11 +43,11 @@ rules: message: Do not use "AWS" in var name inside AWS Provider paths: include: - - internal + - "/internal" exclude: - - internal/service/securitylake/aws_log_source.go - - internal/service/securitylake/exports_test.go - - internal/service/*/service_endpoints_gen_test.go + - "/internal/service/securitylake/aws_log_source.go" + - "/internal/service/securitylake/exports_test.go" + - "/internal/service/*/service_endpoints_gen_test.go" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -62,7 +62,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -77,7 +77,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -91,7 +91,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -105,7 +105,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -120,7 +120,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -134,7 +134,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -148,7 +148,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -163,7 +163,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -177,7 +177,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -191,7 +191,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -206,7 +206,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -220,7 +220,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -234,7 +234,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -249,7 +249,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -263,7 +263,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -277,7 +277,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -292,7 +292,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -306,7 +306,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -320,7 +320,7 @@ rules: message: Do not use "EC2" in func name inside ec2 package paths: include: - - internal/service/ec2 + - "/internal/service/ec2" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -336,7 +336,7 @@ rules: message: Do not use "EC2" in const name inside ec2 package paths: include: - - internal/service/ec2 + - "/internal/service/ec2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -350,7 +350,7 @@ rules: message: Do not use "EC2" in var name inside ec2 package paths: include: - - internal/service/ec2 + - "/internal/service/ec2" patterns: - pattern: var $NAME = ... - metavariable-pattern: diff --git a/.ci/.semgrep-configs.yml b/.ci/.semgrep-configs.yml index 821176b03bc9..6023bd7c57fa 100644 --- a/.ci/.semgrep-configs.yml +++ b/.ci/.semgrep-configs.yml @@ -6,7 +6,7 @@ rules: message: "Config funcs should follow form testAccConfig_" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY:$VALUE, ...}" @@ -28,7 +28,7 @@ rules: message: "Config funcs should follow form testAccConfig_" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY: acctest.ConfigCompose(..., $VALUE, ...), ...}" @@ -49,7 +49,7 @@ rules: message: "Config funcs should not begin with 'testAccCheck'" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY:$VALUE, ...}" @@ -69,7 +69,7 @@ rules: message: "Config funcs should not begin with 'testAccCheck'" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY: acctest.ConfigCompose(..., $VALUE, ...), ...}" diff --git a/.ci/.semgrep-constants.yml b/.ci/.semgrep-constants.yml index 5ef2012983af..81e93813c178 100644 --- a/.ci/.semgrep-constants.yml +++ b/.ci/.semgrep-constants.yml @@ -6,7 +6,7 @@ rules: message: Use the constant `names.AttrARN` for the string literal "arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"arn"' - pattern-not-regex: '"arn":\s+test\w+,' @@ -24,7 +24,7 @@ rules: message: Use the constant `names.AttrARNs` for the string literal "arns" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"arns"' - pattern-not-regex: '"arns":\s+test\w+,' @@ -42,7 +42,7 @@ rules: message: Use the constant `names.AttrAWSAccountID` for the string literal "aws_account_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"aws_account_id"' - pattern-not-regex: '"aws_account_id":\s+test\w+,' @@ -60,7 +60,7 @@ rules: message: Use the constant `names.AttrAccessKey` for the string literal "access_key" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"access_key"' - pattern-not-regex: '"access_key":\s+test\w+,' @@ -78,7 +78,7 @@ rules: message: Use the constant `names.AttrAccountID` for the string literal "account_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"account_id"' - pattern-not-regex: '"account_id":\s+test\w+,' @@ -96,7 +96,7 @@ rules: message: Use the constant `names.AttrAction` for the string literal "action" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"action"' - pattern-not-regex: '"action":\s+test\w+,' @@ -114,7 +114,7 @@ rules: message: Use the constant `names.AttrActions` for the string literal "actions" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"actions"' - pattern-not-regex: '"actions":\s+test\w+,' @@ -132,7 +132,7 @@ rules: message: Use the constant `names.AttrAddress` for the string literal "address" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"address"' - pattern-not-regex: '"address":\s+test\w+,' @@ -150,7 +150,7 @@ rules: message: Use the constant `names.AttrAlias` for the string literal "alias" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"alias"' - pattern-not-regex: '"alias":\s+test\w+,' @@ -168,7 +168,7 @@ rules: message: Use the constant `names.AttrAllocatedStorage` for the string literal "allocated_storage" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"allocated_storage"' - pattern-not-regex: '"allocated_storage":\s+test\w+,' @@ -186,7 +186,7 @@ rules: message: Use the constant `names.AttrAllowMajorVersionUpgrade` for the string literal "allow_major_version_upgrade" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"allow_major_version_upgrade"' - pattern-not-regex: '"allow_major_version_upgrade":\s+test\w+,' @@ -204,7 +204,7 @@ rules: message: Use the constant `names.AttrApplicationID` for the string literal "application_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"application_id"' - pattern-not-regex: '"application_id":\s+test\w+,' @@ -222,7 +222,7 @@ rules: message: Use the constant `names.AttrApplyImmediately` for the string literal "apply_immediately" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"apply_immediately"' - pattern-not-regex: '"apply_immediately":\s+test\w+,' @@ -240,7 +240,7 @@ rules: message: Use the constant `names.AttrAssociationID` for the string literal "association_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"association_id"' - pattern-not-regex: '"association_id":\s+test\w+,' @@ -258,7 +258,7 @@ rules: message: Use the constant `names.AttrAttributes` for the string literal "attributes" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"attributes"' - pattern-not-regex: '"attributes":\s+test\w+,' @@ -276,7 +276,7 @@ rules: message: Use the constant `names.AttrAutoMinorVersionUpgrade` for the string literal "auto_minor_version_upgrade" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"auto_minor_version_upgrade"' - pattern-not-regex: '"auto_minor_version_upgrade":\s+test\w+,' @@ -294,7 +294,7 @@ rules: message: Use the constant `names.AttrAvailabilityZone` for the string literal "availability_zone" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"availability_zone"' - pattern-not-regex: '"availability_zone":\s+test\w+,' @@ -312,7 +312,7 @@ rules: message: Use the constant `names.AttrAvailabilityZones` for the string literal "availability_zones" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"availability_zones"' - pattern-not-regex: '"availability_zones":\s+test\w+,' @@ -330,7 +330,7 @@ rules: message: Use the constant `names.AttrBucket` for the string literal "bucket" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"bucket"' - pattern-not-regex: '"bucket":\s+test\w+,' @@ -348,7 +348,7 @@ rules: message: Use the constant `names.AttrBucketName` for the string literal "bucket_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"bucket_name"' - pattern-not-regex: '"bucket_name":\s+test\w+,' @@ -366,7 +366,7 @@ rules: message: Use the constant `names.AttrBucketPrefix` for the string literal "bucket_prefix" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"bucket_prefix"' - pattern-not-regex: '"bucket_prefix":\s+test\w+,' @@ -384,7 +384,7 @@ rules: message: Use the constant `names.AttrCIDRBlock` for the string literal "cidr_block" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"cidr_block"' - pattern-not-regex: '"cidr_block":\s+test\w+,' @@ -402,7 +402,7 @@ rules: message: Use the constant `names.AttrCapacityProviderStrategy` for the string literal "capacity_provider_strategy" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"capacity_provider_strategy"' - pattern-not-regex: '"capacity_provider_strategy":\s+test\w+,' @@ -420,7 +420,7 @@ rules: message: Use the constant `names.AttrCatalogID` for the string literal "catalog_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"catalog_id"' - pattern-not-regex: '"catalog_id":\s+test\w+,' @@ -438,7 +438,7 @@ rules: message: Use the constant `names.AttrCertificate` for the string literal "certificate" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"certificate"' - pattern-not-regex: '"certificate":\s+test\w+,' @@ -456,7 +456,7 @@ rules: message: Use the constant `names.AttrCertificateARN` for the string literal "certificate_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"certificate_arn"' - pattern-not-regex: '"certificate_arn":\s+test\w+,' @@ -474,7 +474,7 @@ rules: message: Use the constant `names.AttrCertificateChain` for the string literal "certificate_chain" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"certificate_chain"' - pattern-not-regex: '"certificate_chain":\s+test\w+,' @@ -492,7 +492,7 @@ rules: message: Use the constant `names.AttrClientID` for the string literal "client_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"client_id"' - pattern-not-regex: '"client_id":\s+test\w+,' @@ -510,7 +510,7 @@ rules: message: Use the constant `names.AttrClientSecret` for the string literal "client_secret" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"client_secret"' - pattern-not-regex: '"client_secret":\s+test\w+,' @@ -528,7 +528,7 @@ rules: message: Use the constant `names.AttrCloudWatchLogGroupARN` for the string literal "cloudwatch_log_group_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"cloudwatch_log_group_arn"' - pattern-not-regex: '"cloudwatch_log_group_arn":\s+test\w+,' @@ -546,7 +546,7 @@ rules: message: Use the constant `names.AttrCloudWatchLogs` for the string literal "cloudwatch_logs" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"cloudwatch_logs"' - pattern-not-regex: '"cloudwatch_logs":\s+test\w+,' @@ -564,7 +564,7 @@ rules: message: Use the constant `names.AttrClusterIdentifier` for the string literal "cluster_identifier" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"cluster_identifier"' - pattern-not-regex: '"cluster_identifier":\s+test\w+,' @@ -582,7 +582,7 @@ rules: message: Use the constant `names.AttrClusterName` for the string literal "cluster_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"cluster_name"' - pattern-not-regex: '"cluster_name":\s+test\w+,' @@ -600,7 +600,7 @@ rules: message: Use the constant `names.AttrComment` for the string literal "comment" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"comment"' - pattern-not-regex: '"comment":\s+test\w+,' @@ -618,7 +618,7 @@ rules: message: Use the constant `names.AttrCondition` for the string literal "condition" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"condition"' - pattern-not-regex: '"condition":\s+test\w+,' @@ -636,7 +636,7 @@ rules: message: Use the constant `names.AttrConfiguration` for the string literal "configuration" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"configuration"' - pattern-not-regex: '"configuration":\s+test\w+,' @@ -654,7 +654,7 @@ rules: message: Use the constant `names.AttrConnectionID` for the string literal "connection_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"connection_id"' - pattern-not-regex: '"connection_id":\s+test\w+,' @@ -672,7 +672,7 @@ rules: message: Use the constant `names.AttrContent` for the string literal "content" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"content"' - pattern-not-regex: '"content":\s+test\w+,' @@ -690,7 +690,7 @@ rules: message: Use the constant `names.AttrContentType` for the string literal "content_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"content_type"' - pattern-not-regex: '"content_type":\s+test\w+,' @@ -708,7 +708,7 @@ rules: message: Use the constant `names.AttrCreateTime` for the string literal "create_time" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"create_time"' - pattern-not-regex: '"create_time":\s+test\w+,' @@ -726,7 +726,7 @@ rules: message: Use the constant `names.AttrCreatedAt` for the string literal "created_at" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"created_at"' - pattern-not-regex: '"created_at":\s+test\w+,' @@ -744,7 +744,7 @@ rules: message: Use the constant `names.AttrCreatedDate` for the string literal "created_date" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"created_date"' - pattern-not-regex: '"created_date":\s+test\w+,' @@ -762,7 +762,7 @@ rules: message: Use the constant `names.AttrCreatedTime` for the string literal "created_time" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"created_time"' - pattern-not-regex: '"created_time":\s+test\w+,' @@ -780,7 +780,7 @@ rules: message: Use the constant `names.AttrCreationDate` for the string literal "creation_date" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"creation_date"' - pattern-not-regex: '"creation_date":\s+test\w+,' @@ -798,7 +798,7 @@ rules: message: Use the constant `names.AttrCreationTime` for the string literal "creation_time" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"creation_time"' - pattern-not-regex: '"creation_time":\s+test\w+,' @@ -816,7 +816,7 @@ rules: message: Use the constant `names.AttrDNSName` for the string literal "dns_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"dns_name"' - pattern-not-regex: '"dns_name":\s+test\w+,' @@ -834,7 +834,7 @@ rules: message: Use the constant `names.AttrDatabase` for the string literal "database" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"database"' - pattern-not-regex: '"database":\s+test\w+,' @@ -852,7 +852,7 @@ rules: message: Use the constant `names.AttrDatabaseName` for the string literal "database_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"database_name"' - pattern-not-regex: '"database_name":\s+test\w+,' @@ -870,7 +870,7 @@ rules: message: Use the constant `names.AttrDefaultAction` for the string literal "default_action" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"default_action"' - pattern-not-regex: '"default_action":\s+test\w+,' @@ -888,7 +888,7 @@ rules: message: Use the constant `names.AttrDefaultValue` for the string literal "default_value" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"default_value"' - pattern-not-regex: '"default_value":\s+test\w+,' @@ -906,7 +906,7 @@ rules: message: Use the constant `names.AttrDeleteOnTermination` for the string literal "delete_on_termination" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"delete_on_termination"' - pattern-not-regex: '"delete_on_termination":\s+test\w+,' @@ -924,7 +924,7 @@ rules: message: Use the constant `names.AttrDeletionProtection` for the string literal "deletion_protection" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"deletion_protection"' - pattern-not-regex: '"deletion_protection":\s+test\w+,' @@ -942,7 +942,7 @@ rules: message: Use the constant `names.AttrDescription` for the string literal "description" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"description"' - pattern-not-regex: '"description":\s+test\w+,' @@ -960,7 +960,7 @@ rules: message: Use the constant `names.AttrDestination` for the string literal "destination" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"destination"' - pattern-not-regex: '"destination":\s+test\w+,' @@ -978,7 +978,7 @@ rules: message: Use the constant `names.AttrDestinationARN` for the string literal "destination_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"destination_arn"' - pattern-not-regex: '"destination_arn":\s+test\w+,' @@ -996,7 +996,7 @@ rules: message: Use the constant `names.AttrDeviceName` for the string literal "device_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"device_name"' - pattern-not-regex: '"device_name":\s+test\w+,' @@ -1014,7 +1014,7 @@ rules: message: Use the constant `names.AttrDisplayName` for the string literal "display_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"display_name"' - pattern-not-regex: '"display_name":\s+test\w+,' @@ -1032,7 +1032,7 @@ rules: message: Use the constant `names.AttrDomain` for the string literal "domain" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"domain"' - pattern-not-regex: '"domain":\s+test\w+,' @@ -1050,7 +1050,7 @@ rules: message: Use the constant `names.AttrDomainName` for the string literal "domain_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"domain_name"' - pattern-not-regex: '"domain_name":\s+test\w+,' @@ -1068,7 +1068,7 @@ rules: message: Use the constant `names.AttrDuration` for the string literal "duration" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"duration"' - pattern-not-regex: '"duration":\s+test\w+,' @@ -1086,7 +1086,7 @@ rules: message: Use the constant `names.AttrEmail` for the string literal "email" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"email"' - pattern-not-regex: '"email":\s+test\w+,' @@ -1104,7 +1104,7 @@ rules: message: Use the constant `names.AttrEnabled` for the string literal "enabled" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"enabled"' - pattern-not-regex: '"enabled":\s+test\w+,' @@ -1122,7 +1122,7 @@ rules: message: Use the constant `names.AttrEncrypted` for the string literal "encrypted" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"encrypted"' - pattern-not-regex: '"encrypted":\s+test\w+,' @@ -1140,7 +1140,7 @@ rules: message: Use the constant `names.AttrEncryptionConfiguration` for the string literal "encryption_configuration" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"encryption_configuration"' - pattern-not-regex: '"encryption_configuration":\s+test\w+,' @@ -1158,7 +1158,7 @@ rules: message: Use the constant `names.AttrEndpoint` for the string literal "endpoint" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"endpoint"' - pattern-not-regex: '"endpoint":\s+test\w+,' @@ -1176,7 +1176,7 @@ rules: message: Use the constant `names.AttrEndpointType` for the string literal "endpoint_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"endpoint_type"' - pattern-not-regex: '"endpoint_type":\s+test\w+,' @@ -1194,7 +1194,7 @@ rules: message: Use the constant `names.AttrEndpoints` for the string literal "endpoints" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"endpoints"' - pattern-not-regex: '"endpoints":\s+test\w+,' @@ -1212,7 +1212,7 @@ rules: message: Use the constant `names.AttrEngine` for the string literal "engine" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"engine"' - pattern-not-regex: '"engine":\s+test\w+,' @@ -1230,7 +1230,7 @@ rules: message: Use the constant `names.AttrEngineVersion` for the string literal "engine_version" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"engine_version"' - pattern-not-regex: '"engine_version":\s+test\w+,' @@ -1248,7 +1248,7 @@ rules: message: Use the constant `names.AttrEnvironment` for the string literal "environment" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"environment"' - pattern-not-regex: '"environment":\s+test\w+,' @@ -1266,7 +1266,7 @@ rules: message: Use the constant `names.AttrExecutionRoleARN` for the string literal "execution_role_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"execution_role_arn"' - pattern-not-regex: '"execution_role_arn":\s+test\w+,' @@ -1284,7 +1284,7 @@ rules: message: Use the constant `names.AttrExpectedBucketOwner` for the string literal "expected_bucket_owner" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"expected_bucket_owner"' - pattern-not-regex: '"expected_bucket_owner":\s+test\w+,' @@ -1302,7 +1302,7 @@ rules: message: Use the constant `names.AttrExpression` for the string literal "expression" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"expression"' - pattern-not-regex: '"expression":\s+test\w+,' @@ -1320,7 +1320,7 @@ rules: message: Use the constant `names.AttrExternalID` for the string literal "external_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"external_id"' - pattern-not-regex: '"external_id":\s+test\w+,' @@ -1338,7 +1338,7 @@ rules: message: Use the constant `names.AttrFamily` for the string literal "family" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"family"' - pattern-not-regex: '"family":\s+test\w+,' @@ -1356,7 +1356,7 @@ rules: message: Use the constant `names.AttrField` for the string literal "field" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"field"' - pattern-not-regex: '"field":\s+test\w+,' @@ -1374,7 +1374,7 @@ rules: message: Use the constant `names.AttrFileSystemID` for the string literal "file_system_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"file_system_id"' - pattern-not-regex: '"file_system_id":\s+test\w+,' @@ -1392,7 +1392,7 @@ rules: message: Use the constant `names.AttrFilter` for the string literal "filter" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"filter"' - pattern-not-regex: '"filter":\s+test\w+,' @@ -1410,7 +1410,7 @@ rules: message: Use the constant `names.AttrFinalSnapshotIdentifier` for the string literal "final_snapshot_identifier" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"final_snapshot_identifier"' - pattern-not-regex: '"final_snapshot_identifier":\s+test\w+,' @@ -1428,7 +1428,7 @@ rules: message: Use the constant `names.AttrForceDelete` for the string literal "force_delete" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"force_delete"' - pattern-not-regex: '"force_delete":\s+test\w+,' @@ -1446,7 +1446,7 @@ rules: message: Use the constant `names.AttrForceDestroy` for the string literal "force_destroy" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"force_destroy"' - pattern-not-regex: '"force_destroy":\s+test\w+,' @@ -1464,7 +1464,7 @@ rules: message: Use the constant `names.AttrFormat` for the string literal "format" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"format"' - pattern-not-regex: '"format":\s+test\w+,' @@ -1482,7 +1482,7 @@ rules: message: Use the constant `names.AttrFunctionARN` for the string literal "function_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"function_arn"' - pattern-not-regex: '"function_arn":\s+test\w+,' @@ -1500,7 +1500,7 @@ rules: message: Use the constant `names.AttrGroupName` for the string literal "group_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"group_name"' - pattern-not-regex: '"group_name":\s+test\w+,' @@ -1518,7 +1518,7 @@ rules: message: Use the constant `names.AttrHeader` for the string literal "header" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"header"' - pattern-not-regex: '"header":\s+test\w+,' @@ -1536,7 +1536,7 @@ rules: message: Use the constant `names.AttrHealthCheck` for the string literal "health_check" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"health_check"' - pattern-not-regex: '"health_check":\s+test\w+,' @@ -1554,7 +1554,7 @@ rules: message: Use the constant `names.AttrHostedZoneID` for the string literal "hosted_zone_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"hosted_zone_id"' - pattern-not-regex: '"hosted_zone_id":\s+test\w+,' @@ -1572,7 +1572,7 @@ rules: message: Use the constant `names.AttrIAMRoleARN` for the string literal "iam_role_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"iam_role_arn"' - pattern-not-regex: '"iam_role_arn":\s+test\w+,' @@ -1590,7 +1590,7 @@ rules: message: Use the constant `names.AttrID` for the string literal "id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"id"' - pattern-not-regex: '"id":\s+test\w+,' @@ -1608,7 +1608,7 @@ rules: message: Use the constant `names.AttrIDs` for the string literal "ids" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"ids"' - pattern-not-regex: '"ids":\s+test\w+,' @@ -1626,7 +1626,7 @@ rules: message: Use the constant `names.AttrIOPS` for the string literal "iops" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"iops"' - pattern-not-regex: '"iops":\s+test\w+,' @@ -1644,7 +1644,7 @@ rules: message: Use the constant `names.AttrIPAddress` for the string literal "ip_address" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"ip_address"' - pattern-not-regex: '"ip_address":\s+test\w+,' @@ -1662,7 +1662,7 @@ rules: message: Use the constant `names.AttrIPAddressType` for the string literal "ip_address_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"ip_address_type"' - pattern-not-regex: '"ip_address_type":\s+test\w+,' @@ -1680,7 +1680,7 @@ rules: message: Use the constant `names.AttrIPAddresses` for the string literal "ip_addresses" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"ip_addresses"' - pattern-not-regex: '"ip_addresses":\s+test\w+,' @@ -1698,7 +1698,7 @@ rules: message: Use the constant `names.AttrIdentifier` for the string literal "identifier" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"identifier"' - pattern-not-regex: '"identifier":\s+test\w+,' @@ -1716,7 +1716,7 @@ rules: message: Use the constant `names.AttrInstanceCount` for the string literal "instance_count" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"instance_count"' - pattern-not-regex: '"instance_count":\s+test\w+,' @@ -1734,7 +1734,7 @@ rules: message: Use the constant `names.AttrInstanceID` for the string literal "instance_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"instance_id"' - pattern-not-regex: '"instance_id":\s+test\w+,' @@ -1752,7 +1752,7 @@ rules: message: Use the constant `names.AttrInstanceType` for the string literal "instance_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"instance_type"' - pattern-not-regex: '"instance_type":\s+test\w+,' @@ -1770,7 +1770,7 @@ rules: message: Use the constant `names.AttrInterval` for the string literal "interval" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"interval"' - pattern-not-regex: '"interval":\s+test\w+,' @@ -1788,7 +1788,7 @@ rules: message: Use the constant `names.AttrIssuer` for the string literal "issuer" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"issuer"' - pattern-not-regex: '"issuer":\s+test\w+,' @@ -1806,7 +1806,7 @@ rules: message: Use the constant `names.AttrJSON` for the string literal "json" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"json"' - pattern-not-regex: '"json":\s+test\w+,' @@ -1824,7 +1824,7 @@ rules: message: Use the constant `names.AttrKMSKey` for the string literal "kms_key" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"kms_key"' - pattern-not-regex: '"kms_key":\s+test\w+,' @@ -1842,7 +1842,7 @@ rules: message: Use the constant `names.AttrKMSKeyARN` for the string literal "kms_key_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"kms_key_arn"' - pattern-not-regex: '"kms_key_arn":\s+test\w+,' @@ -1860,7 +1860,7 @@ rules: message: Use the constant `names.AttrKMSKeyID` for the string literal "kms_key_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"kms_key_id"' - pattern-not-regex: '"kms_key_id":\s+test\w+,' @@ -1878,7 +1878,7 @@ rules: message: Use the constant `names.AttrKey` for the string literal "key" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"key"' - pattern-not-regex: '"key":\s+test\w+,' @@ -1896,7 +1896,7 @@ rules: message: Use the constant `names.AttrKeyID` for the string literal "key_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"key_id"' - pattern-not-regex: '"key_id":\s+test\w+,' @@ -1914,7 +1914,7 @@ rules: message: Use the constant `names.AttrLanguageCode` for the string literal "language_code" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"language_code"' - pattern-not-regex: '"language_code":\s+test\w+,' @@ -1932,7 +1932,7 @@ rules: message: Use the constant `names.AttrLastUpdatedDate` for the string literal "last_updated_date" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"last_updated_date"' - pattern-not-regex: '"last_updated_date":\s+test\w+,' @@ -1950,7 +1950,7 @@ rules: message: Use the constant `names.AttrLastUpdatedTime` for the string literal "last_updated_time" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"last_updated_time"' - pattern-not-regex: '"last_updated_time":\s+test\w+,' @@ -1968,7 +1968,7 @@ rules: message: Use the constant `names.AttrLaunchTemplate` for the string literal "launch_template" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"launch_template"' - pattern-not-regex: '"launch_template":\s+test\w+,' @@ -1986,7 +1986,7 @@ rules: message: Use the constant `names.AttrLocation` for the string literal "location" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"location"' - pattern-not-regex: '"location":\s+test\w+,' @@ -2004,7 +2004,7 @@ rules: message: Use the constant `names.AttrLogGroupName` for the string literal "log_group_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"log_group_name"' - pattern-not-regex: '"log_group_name":\s+test\w+,' @@ -2022,7 +2022,7 @@ rules: message: Use the constant `names.AttrLoggingConfiguration` for the string literal "logging_configuration" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"logging_configuration"' - pattern-not-regex: '"logging_configuration":\s+test\w+,' @@ -2040,7 +2040,7 @@ rules: message: Use the constant `names.AttrMax` for the string literal "max" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"max"' - pattern-not-regex: '"max":\s+test\w+,' @@ -2058,7 +2058,7 @@ rules: message: Use the constant `names.AttrMaxCapacity` for the string literal "max_capacity" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"max_capacity"' - pattern-not-regex: '"max_capacity":\s+test\w+,' @@ -2076,7 +2076,7 @@ rules: message: Use the constant `names.AttrMessage` for the string literal "message" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"message"' - pattern-not-regex: '"message":\s+test\w+,' @@ -2094,7 +2094,7 @@ rules: message: Use the constant `names.AttrMetricName` for the string literal "metric_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"metric_name"' - pattern-not-regex: '"metric_name":\s+test\w+,' @@ -2112,7 +2112,7 @@ rules: message: Use the constant `names.AttrMin` for the string literal "min" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"min"' - pattern-not-regex: '"min":\s+test\w+,' @@ -2130,7 +2130,7 @@ rules: message: Use the constant `names.AttrMode` for the string literal "mode" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"mode"' - pattern-not-regex: '"mode":\s+test\w+,' @@ -2148,7 +2148,7 @@ rules: message: Use the constant `names.AttrMostRecent` for the string literal "most_recent" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"most_recent"' - pattern-not-regex: '"most_recent":\s+test\w+,' @@ -2166,7 +2166,7 @@ rules: message: Use the constant `names.AttrName` for the string literal "name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"name"' - pattern-not-regex: '"name":\s+test\w+,' @@ -2184,7 +2184,7 @@ rules: message: Use the constant `names.AttrNamePrefix` for the string literal "name_prefix" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"name_prefix"' - pattern-not-regex: '"name_prefix":\s+test\w+,' @@ -2202,7 +2202,7 @@ rules: message: Use the constant `names.AttrNames` for the string literal "names" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"names"' - pattern-not-regex: '"names":\s+test\w+,' @@ -2220,7 +2220,7 @@ rules: message: Use the constant `names.AttrNamespace` for the string literal "namespace" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"namespace"' - pattern-not-regex: '"namespace":\s+test\w+,' @@ -2238,7 +2238,7 @@ rules: message: Use the constant `names.AttrNetworkConfiguration` for the string literal "network_configuration" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"network_configuration"' - pattern-not-regex: '"network_configuration":\s+test\w+,' @@ -2256,7 +2256,7 @@ rules: message: Use the constant `names.AttrNetworkInterfaceID` for the string literal "network_interface_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"network_interface_id"' - pattern-not-regex: '"network_interface_id":\s+test\w+,' @@ -2274,7 +2274,7 @@ rules: message: Use the constant `names.AttrOwner` for the string literal "owner" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"owner"' - pattern-not-regex: '"owner":\s+test\w+,' @@ -2292,7 +2292,7 @@ rules: message: Use the constant `names.AttrOwnerAccountID` for the string literal "owner_account_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"owner_account_id"' - pattern-not-regex: '"owner_account_id":\s+test\w+,' @@ -2310,7 +2310,7 @@ rules: message: Use the constant `names.AttrOwnerID` for the string literal "owner_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"owner_id"' - pattern-not-regex: '"owner_id":\s+test\w+,' @@ -2328,7 +2328,7 @@ rules: message: Use the constant `names.AttrParameter` for the string literal "parameter" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"parameter"' - pattern-not-regex: '"parameter":\s+test\w+,' @@ -2346,7 +2346,7 @@ rules: message: Use the constant `names.AttrParameterGroupName` for the string literal "parameter_group_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"parameter_group_name"' - pattern-not-regex: '"parameter_group_name":\s+test\w+,' @@ -2364,7 +2364,7 @@ rules: message: Use the constant `names.AttrParameters` for the string literal "parameters" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"parameters"' - pattern-not-regex: '"parameters":\s+test\w+,' @@ -2382,7 +2382,7 @@ rules: message: Use the constant `names.AttrPassword` for the string literal "password" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"password"' - pattern-not-regex: '"password":\s+test\w+,' @@ -2400,7 +2400,7 @@ rules: message: Use the constant `names.AttrPath` for the string literal "path" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"path"' - pattern-not-regex: '"path":\s+test\w+,' @@ -2418,7 +2418,7 @@ rules: message: Use the constant `names.AttrPermissions` for the string literal "permissions" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"permissions"' - pattern-not-regex: '"permissions":\s+test\w+,' @@ -2436,7 +2436,7 @@ rules: message: Use the constant `names.AttrPolicy` for the string literal "policy" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"policy"' - pattern-not-regex: '"policy":\s+test\w+,' @@ -2454,7 +2454,7 @@ rules: message: Use the constant `names.AttrPort` for the string literal "port" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"port"' - pattern-not-regex: '"port":\s+test\w+,' @@ -2472,7 +2472,7 @@ rules: message: Use the constant `names.AttrPreferredMaintenanceWindow` for the string literal "preferred_maintenance_window" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"preferred_maintenance_window"' - pattern-not-regex: '"preferred_maintenance_window":\s+test\w+,' @@ -2490,7 +2490,7 @@ rules: message: Use the constant `names.AttrPrefix` for the string literal "prefix" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"prefix"' - pattern-not-regex: '"prefix":\s+test\w+,' @@ -2508,7 +2508,7 @@ rules: message: Use the constant `names.AttrPrincipal` for the string literal "principal" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"principal"' - pattern-not-regex: '"principal":\s+test\w+,' @@ -2526,7 +2526,7 @@ rules: message: Use the constant `names.AttrPriority` for the string literal "priority" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"priority"' - pattern-not-regex: '"priority":\s+test\w+,' @@ -2544,7 +2544,7 @@ rules: message: Use the constant `names.AttrPrivateKey` for the string literal "private_key" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"private_key"' - pattern-not-regex: '"private_key":\s+test\w+,' @@ -2562,7 +2562,7 @@ rules: message: Use the constant `names.AttrProfile` for the string literal "profile" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"profile"' - pattern-not-regex: '"profile":\s+test\w+,' @@ -2580,7 +2580,7 @@ rules: message: Use the constant `names.AttrPropagateTags` for the string literal "propagate_tags" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"propagate_tags"' - pattern-not-regex: '"propagate_tags":\s+test\w+,' @@ -2598,7 +2598,7 @@ rules: message: Use the constant `names.AttrProperties` for the string literal "properties" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"properties"' - pattern-not-regex: '"properties":\s+test\w+,' @@ -2616,7 +2616,7 @@ rules: message: Use the constant `names.AttrProtocol` for the string literal "protocol" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"protocol"' - pattern-not-regex: '"protocol":\s+test\w+,' @@ -2634,7 +2634,7 @@ rules: message: Use the constant `names.AttrProviderName` for the string literal "provider_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"provider_name"' - pattern-not-regex: '"provider_name":\s+test\w+,' @@ -2652,7 +2652,7 @@ rules: message: Use the constant `names.AttrPublicKey` for the string literal "public_key" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"public_key"' - pattern-not-regex: '"public_key":\s+test\w+,' @@ -2670,7 +2670,7 @@ rules: message: Use the constant `names.AttrPubliclyAccessible` for the string literal "publicly_accessible" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"publicly_accessible"' - pattern-not-regex: '"publicly_accessible":\s+test\w+,' @@ -2688,7 +2688,7 @@ rules: message: Use the constant `names.AttrRegion` for the string literal "region" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"region"' - pattern-not-regex: '"region":\s+test\w+,' @@ -2706,7 +2706,7 @@ rules: message: Use the constant `names.AttrRepositoryName` for the string literal "repository_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"repository_name"' - pattern-not-regex: '"repository_name":\s+test\w+,' @@ -2724,7 +2724,7 @@ rules: message: Use the constant `names.AttrResourceARN` for the string literal "resource_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"resource_arn"' - pattern-not-regex: '"resource_arn":\s+test\w+,' @@ -2742,7 +2742,7 @@ rules: message: Use the constant `names.AttrResourceID` for the string literal "resource_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"resource_id"' - pattern-not-regex: '"resource_id":\s+test\w+,' @@ -2760,7 +2760,7 @@ rules: message: Use the constant `names.AttrResourceOwner` for the string literal "resource_owner" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"resource_owner"' - pattern-not-regex: '"resource_owner":\s+test\w+,' @@ -2778,7 +2778,7 @@ rules: message: Use the constant `names.AttrResourceTags` for the string literal "resource_tags" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"resource_tags"' - pattern-not-regex: '"resource_tags":\s+test\w+,' @@ -2796,7 +2796,7 @@ rules: message: Use the constant `names.AttrResourceType` for the string literal "resource_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"resource_type"' - pattern-not-regex: '"resource_type":\s+test\w+,' @@ -2814,7 +2814,7 @@ rules: message: Use the constant `names.AttrResources` for the string literal "resources" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"resources"' - pattern-not-regex: '"resources":\s+test\w+,' @@ -2832,7 +2832,7 @@ rules: message: Use the constant `names.AttrRetentionPeriod` for the string literal "retention_period" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"retention_period"' - pattern-not-regex: '"retention_period":\s+test\w+,' @@ -2850,7 +2850,7 @@ rules: message: Use the constant `names.AttrRole` for the string literal "role" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"role"' - pattern-not-regex: '"role":\s+test\w+,' @@ -2868,7 +2868,7 @@ rules: message: Use the constant `names.AttrRoleARN` for the string literal "role_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"role_arn"' - pattern-not-regex: '"role_arn":\s+test\w+,' @@ -2886,7 +2886,7 @@ rules: message: Use the constant `names.AttrRule` for the string literal "rule" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"rule"' - pattern-not-regex: '"rule":\s+test\w+,' @@ -2904,7 +2904,7 @@ rules: message: Use the constant `names.AttrS3Bucket` for the string literal "s3_bucket" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"s3_bucket"' - pattern-not-regex: '"s3_bucket":\s+test\w+,' @@ -2922,7 +2922,7 @@ rules: message: Use the constant `names.AttrS3BucketName` for the string literal "s3_bucket_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"s3_bucket_name"' - pattern-not-regex: '"s3_bucket_name":\s+test\w+,' @@ -2940,7 +2940,7 @@ rules: message: Use the constant `names.AttrS3KeyPrefix` for the string literal "s3_key_prefix" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"s3_key_prefix"' - pattern-not-regex: '"s3_key_prefix":\s+test\w+,' @@ -2958,7 +2958,7 @@ rules: message: Use the constant `names.AttrSNSTopicARN` for the string literal "sns_topic_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"sns_topic_arn"' - pattern-not-regex: '"sns_topic_arn":\s+test\w+,' @@ -2976,7 +2976,7 @@ rules: message: Use the constant `names.AttrSchedule` for the string literal "schedule" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"schedule"' - pattern-not-regex: '"schedule":\s+test\w+,' @@ -2994,7 +2994,7 @@ rules: message: Use the constant `names.AttrScheduleExpression` for the string literal "schedule_expression" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"schedule_expression"' - pattern-not-regex: '"schedule_expression":\s+test\w+,' @@ -3012,7 +3012,7 @@ rules: message: Use the constant `names.AttrSchema` for the string literal "schema" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"schema"' - pattern-not-regex: '"schema":\s+test\w+,' @@ -3030,7 +3030,7 @@ rules: message: Use the constant `names.AttrScope` for the string literal "scope" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"scope"' - pattern-not-regex: '"scope":\s+test\w+,' @@ -3048,7 +3048,7 @@ rules: message: Use the constant `names.AttrSecretKey` for the string literal "secret_key" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"secret_key"' - pattern-not-regex: '"secret_key":\s+test\w+,' @@ -3066,7 +3066,7 @@ rules: message: Use the constant `names.AttrSecurityGroupIDs` for the string literal "security_group_ids" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"security_group_ids"' - pattern-not-regex: '"security_group_ids":\s+test\w+,' @@ -3084,7 +3084,7 @@ rules: message: Use the constant `names.AttrSecurityGroups` for the string literal "security_groups" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"security_groups"' - pattern-not-regex: '"security_groups":\s+test\w+,' @@ -3102,7 +3102,7 @@ rules: message: Use the constant `names.AttrServiceName` for the string literal "service_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"service_name"' - pattern-not-regex: '"service_name":\s+test\w+,' @@ -3120,7 +3120,7 @@ rules: message: Use the constant `names.AttrServiceRole` for the string literal "service_role" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"service_role"' - pattern-not-regex: '"service_role":\s+test\w+,' @@ -3138,7 +3138,7 @@ rules: message: Use the constant `names.AttrServiceRoleARN` for the string literal "service_role_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"service_role_arn"' - pattern-not-regex: '"service_role_arn":\s+test\w+,' @@ -3156,7 +3156,7 @@ rules: message: Use the constant `names.AttrSession` for the string literal "session" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"session"' - pattern-not-regex: '"session":\s+test\w+,' @@ -3174,7 +3174,7 @@ rules: message: Use the constant `names.AttrSharedConfigFiles` for the string literal "shared_config_files" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"shared_config_files"' - pattern-not-regex: '"shared_config_files":\s+test\w+,' @@ -3192,7 +3192,7 @@ rules: message: Use the constant `names.AttrSize` for the string literal "size" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"size"' - pattern-not-regex: '"size":\s+test\w+,' @@ -3210,7 +3210,7 @@ rules: message: Use the constant `names.AttrSkipCredentialsValidation` for the string literal "skip_credentials_validation" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"skip_credentials_validation"' - pattern-not-regex: '"skip_credentials_validation":\s+test\w+,' @@ -3228,7 +3228,7 @@ rules: message: Use the constant `names.AttrSkipDestroy` for the string literal "skip_destroy" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"skip_destroy"' - pattern-not-regex: '"skip_destroy":\s+test\w+,' @@ -3246,7 +3246,7 @@ rules: message: Use the constant `names.AttrSkipRequestingAccountID` for the string literal "skip_requesting_account_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"skip_requesting_account_id"' - pattern-not-regex: '"skip_requesting_account_id":\s+test\w+,' @@ -3264,7 +3264,7 @@ rules: message: Use the constant `names.AttrSnapshotID` for the string literal "snapshot_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"snapshot_id"' - pattern-not-regex: '"snapshot_id":\s+test\w+,' @@ -3282,7 +3282,7 @@ rules: message: Use the constant `names.AttrSource` for the string literal "source" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"source"' - pattern-not-regex: '"source":\s+test\w+,' @@ -3300,7 +3300,7 @@ rules: message: Use the constant `names.AttrSourceType` for the string literal "source_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"source_type"' - pattern-not-regex: '"source_type":\s+test\w+,' @@ -3318,7 +3318,7 @@ rules: message: Use the constant `names.AttrStage` for the string literal "stage" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"stage"' - pattern-not-regex: '"stage":\s+test\w+,' @@ -3336,7 +3336,7 @@ rules: message: Use the constant `names.AttrStartTime` for the string literal "start_time" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"start_time"' - pattern-not-regex: '"start_time":\s+test\w+,' @@ -3354,7 +3354,7 @@ rules: message: Use the constant `names.AttrState` for the string literal "state" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"state"' - pattern-not-regex: '"state":\s+test\w+,' @@ -3372,7 +3372,7 @@ rules: message: Use the constant `names.AttrStatus` for the string literal "status" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"status"' - pattern-not-regex: '"status":\s+test\w+,' @@ -3390,7 +3390,7 @@ rules: message: Use the constant `names.AttrStatusCode` for the string literal "status_code" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"status_code"' - pattern-not-regex: '"status_code":\s+test\w+,' @@ -3408,7 +3408,7 @@ rules: message: Use the constant `names.AttrStatusMessage` for the string literal "status_message" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"status_message"' - pattern-not-regex: '"status_message":\s+test\w+,' @@ -3426,7 +3426,7 @@ rules: message: Use the constant `names.AttrStatusReason` for the string literal "status_reason" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"status_reason"' - pattern-not-regex: '"status_reason":\s+test\w+,' @@ -3444,7 +3444,7 @@ rules: message: Use the constant `names.AttrStorageClass` for the string literal "storage_class" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"storage_class"' - pattern-not-regex: '"storage_class":\s+test\w+,' @@ -3462,7 +3462,7 @@ rules: message: Use the constant `names.AttrStorageEncrypted` for the string literal "storage_encrypted" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"storage_encrypted"' - pattern-not-regex: '"storage_encrypted":\s+test\w+,' @@ -3480,7 +3480,7 @@ rules: message: Use the constant `names.AttrStorageType` for the string literal "storage_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"storage_type"' - pattern-not-regex: '"storage_type":\s+test\w+,' @@ -3498,7 +3498,7 @@ rules: message: Use the constant `names.AttrStreamARN` for the string literal "stream_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"stream_arn"' - pattern-not-regex: '"stream_arn":\s+test\w+,' @@ -3516,7 +3516,7 @@ rules: message: Use the constant `names.AttrSubnetID` for the string literal "subnet_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"subnet_id"' - pattern-not-regex: '"subnet_id":\s+test\w+,' @@ -3534,7 +3534,7 @@ rules: message: Use the constant `names.AttrSubnetIDs` for the string literal "subnet_ids" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"subnet_ids"' - pattern-not-regex: '"subnet_ids":\s+test\w+,' @@ -3552,7 +3552,7 @@ rules: message: Use the constant `names.AttrSubnets` for the string literal "subnets" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"subnets"' - pattern-not-regex: '"subnets":\s+test\w+,' @@ -3570,7 +3570,7 @@ rules: message: Use the constant `names.AttrTableName` for the string literal "table_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"table_name"' - pattern-not-regex: '"table_name":\s+test\w+,' @@ -3588,7 +3588,7 @@ rules: message: Use the constant `names.AttrTags` for the string literal "tags" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"tags"' - pattern-not-regex: '"tags":\s+test\w+,' @@ -3606,7 +3606,7 @@ rules: message: Use the constant `names.AttrTagsAll` for the string literal "tags_all" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"tags_all"' - pattern-not-regex: '"tags_all":\s+test\w+,' @@ -3624,7 +3624,7 @@ rules: message: Use the constant `names.AttrTarget` for the string literal "target" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"target"' - pattern-not-regex: '"target":\s+test\w+,' @@ -3642,7 +3642,7 @@ rules: message: Use the constant `names.AttrTargetARN` for the string literal "target_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"target_arn"' - pattern-not-regex: '"target_arn":\s+test\w+,' @@ -3660,7 +3660,7 @@ rules: message: Use the constant `names.AttrThroughput` for the string literal "throughput" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"throughput"' - pattern-not-regex: '"throughput":\s+test\w+,' @@ -3678,7 +3678,7 @@ rules: message: Use the constant `names.AttrTimeout` for the string literal "timeout" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"timeout"' - pattern-not-regex: '"timeout":\s+test\w+,' @@ -3696,7 +3696,7 @@ rules: message: Use the constant `names.AttrTimeouts` for the string literal "timeouts" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"timeouts"' - pattern-not-regex: '"timeouts":\s+test\w+,' @@ -3714,7 +3714,7 @@ rules: message: Use the constant `names.AttrTopicARN` for the string literal "topic_arn" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"topic_arn"' - pattern-not-regex: '"topic_arn":\s+test\w+,' @@ -3732,7 +3732,7 @@ rules: message: Use the constant `names.AttrTransitGatewayAttachmentID` for the string literal "transit_gateway_attachment_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"transit_gateway_attachment_id"' - pattern-not-regex: '"transit_gateway_attachment_id":\s+test\w+,' @@ -3750,7 +3750,7 @@ rules: message: Use the constant `names.AttrTransitGatewayID` for the string literal "transit_gateway_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"transit_gateway_id"' - pattern-not-regex: '"transit_gateway_id":\s+test\w+,' @@ -3768,7 +3768,7 @@ rules: message: Use the constant `names.AttrTriggers` for the string literal "triggers" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"triggers"' - pattern-not-regex: '"triggers":\s+test\w+,' @@ -3786,7 +3786,7 @@ rules: message: Use the constant `names.AttrType` for the string literal "type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"type"' - pattern-not-regex: '"type":\s+test\w+,' @@ -3804,7 +3804,7 @@ rules: message: Use the constant `names.AttrURI` for the string literal "uri" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"uri"' - pattern-not-regex: '"uri":\s+test\w+,' @@ -3822,7 +3822,7 @@ rules: message: Use the constant `names.AttrURL` for the string literal "url" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"url"' - pattern-not-regex: '"url":\s+test\w+,' @@ -3840,7 +3840,7 @@ rules: message: Use the constant `names.AttrUnit` for the string literal "unit" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"unit"' - pattern-not-regex: '"unit":\s+test\w+,' @@ -3858,7 +3858,7 @@ rules: message: Use the constant `names.AttrUserName` for the string literal "user_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"user_name"' - pattern-not-regex: '"user_name":\s+test\w+,' @@ -3876,7 +3876,7 @@ rules: message: Use the constant `names.AttrUserPoolID` for the string literal "user_pool_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"user_pool_id"' - pattern-not-regex: '"user_pool_id":\s+test\w+,' @@ -3894,7 +3894,7 @@ rules: message: Use the constant `names.AttrUsername` for the string literal "username" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"username"' - pattern-not-regex: '"username":\s+test\w+,' @@ -3912,7 +3912,7 @@ rules: message: Use the constant `names.AttrVPCConfig` for the string literal "vpc_config" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"vpc_config"' - pattern-not-regex: '"vpc_config":\s+test\w+,' @@ -3930,7 +3930,7 @@ rules: message: Use the constant `names.AttrVPCConfiguration` for the string literal "vpc_configuration" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"vpc_configuration"' - pattern-not-regex: '"vpc_configuration":\s+test\w+,' @@ -3948,7 +3948,7 @@ rules: message: Use the constant `names.AttrVPCEndpointID` for the string literal "vpc_endpoint_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"vpc_endpoint_id"' - pattern-not-regex: '"vpc_endpoint_id":\s+test\w+,' @@ -3966,7 +3966,7 @@ rules: message: Use the constant `names.AttrVPCID` for the string literal "vpc_id" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"vpc_id"' - pattern-not-regex: '"vpc_id":\s+test\w+,' @@ -3984,7 +3984,7 @@ rules: message: Use the constant `names.AttrVPCSecurityGroupIDs` for the string literal "vpc_security_group_ids" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"vpc_security_group_ids"' - pattern-not-regex: '"vpc_security_group_ids":\s+test\w+,' @@ -4002,7 +4002,7 @@ rules: message: Use the constant `names.AttrValue` for the string literal "value" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"value"' - pattern-not-regex: '"value":\s+test\w+,' @@ -4020,7 +4020,7 @@ rules: message: Use the constant `names.AttrValues` for the string literal "values" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"values"' - pattern-not-regex: '"values":\s+test\w+,' @@ -4038,7 +4038,7 @@ rules: message: Use the constant `names.AttrVersion` for the string literal "version" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"version"' - pattern-not-regex: '"version":\s+test\w+,' @@ -4056,7 +4056,7 @@ rules: message: Use the constant `names.AttrVirtualName` for the string literal "virtual_name" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"virtual_name"' - pattern-not-regex: '"virtual_name":\s+test\w+,' @@ -4074,7 +4074,7 @@ rules: message: Use the constant `names.AttrVolumeSize` for the string literal "volume_size" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"volume_size"' - pattern-not-regex: '"volume_size":\s+test\w+,' @@ -4092,7 +4092,7 @@ rules: message: Use the constant `names.AttrVolumeType` for the string literal "volume_type" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"volume_type"' - pattern-not-regex: '"volume_type":\s+test\w+,' @@ -4110,7 +4110,7 @@ rules: message: Use the constant `names.AttrWeight` for the string literal "weight" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"weight"' - pattern-not-regex: '"weight":\s+test\w+,' diff --git a/.ci/.semgrep-service-name0.yml b/.ci/.semgrep-service-name0.yml index 505867179cd5..c87b94b7ce5a 100644 --- a/.ci/.semgrep-service-name0.yml +++ b/.ci/.semgrep-service-name0.yml @@ -6,9 +6,9 @@ rules: message: Do not use "AccessAnalyzer" in func name inside accessanalyzer package paths: include: - - internal/service/accessanalyzer + - "/internal/service/accessanalyzer" exclude: - - internal/service/accessanalyzer/list_pages_gen.go + - "/internal/service/accessanalyzer/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -24,7 +24,7 @@ rules: message: Include "AccessAnalyzer" in test name paths: include: - - internal/service/accessanalyzer/*_test.go + - "/internal/service/accessanalyzer/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -39,7 +39,7 @@ rules: message: Do not use "AccessAnalyzer" in const name inside accessanalyzer package paths: include: - - internal/service/accessanalyzer + - "/internal/service/accessanalyzer" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -53,7 +53,7 @@ rules: message: Do not use "AccessAnalyzer" in var name inside accessanalyzer package paths: include: - - internal/service/accessanalyzer + - "/internal/service/accessanalyzer" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -67,9 +67,9 @@ rules: message: Do not use "Account" in func name inside account package paths: include: - - internal/service/account + - "/internal/service/account" exclude: - - internal/service/account/list_pages_gen.go + - "/internal/service/account/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -85,7 +85,7 @@ rules: message: Include "Account" in test name paths: include: - - internal/service/account/*_test.go + - "/internal/service/account/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -100,7 +100,7 @@ rules: message: Do not use "Account" in const name inside account package paths: include: - - internal/service/account + - "/internal/service/account" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -114,7 +114,7 @@ rules: message: Do not use "Account" in var name inside account package paths: include: - - internal/service/account + - "/internal/service/account" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -128,9 +128,9 @@ rules: message: Do not use "ACM" in func name inside acm package paths: include: - - internal/service/acm + - "/internal/service/acm" exclude: - - internal/service/acm/list_pages_gen.go + - "/internal/service/acm/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -146,7 +146,7 @@ rules: message: Include "ACM" in test name paths: include: - - internal/service/acm/*_test.go + - "/internal/service/acm/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -161,7 +161,7 @@ rules: message: Do not use "ACM" in const name inside acm package paths: include: - - internal/service/acm + - "/internal/service/acm" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -175,7 +175,7 @@ rules: message: Do not use "ACM" in var name inside acm package paths: include: - - internal/service/acm + - "/internal/service/acm" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -189,9 +189,9 @@ rules: message: Do not use "ACMPCA" in func name inside acmpca package paths: include: - - internal/service/acmpca + - "/internal/service/acmpca" exclude: - - internal/service/acmpca/list_pages_gen.go + - "/internal/service/acmpca/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -207,7 +207,7 @@ rules: message: Include "ACMPCA" in test name paths: include: - - internal/service/acmpca/*_test.go + - "/internal/service/acmpca/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -222,7 +222,7 @@ rules: message: Do not use "ACMPCA" in const name inside acmpca package paths: include: - - internal/service/acmpca + - "/internal/service/acmpca" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -236,7 +236,7 @@ rules: message: Do not use "ACMPCA" in var name inside acmpca package paths: include: - - internal/service/acmpca + - "/internal/service/acmpca" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -250,9 +250,9 @@ rules: message: Do not use "amg" in func name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" exclude: - - internal/service/grafana/list_pages_gen.go + - "/internal/service/grafana/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -268,7 +268,7 @@ rules: message: Do not use "amg" in const name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -282,7 +282,7 @@ rules: message: Do not use "amg" in var name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -296,9 +296,9 @@ rules: message: Do not use "AMP" in func name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" exclude: - - internal/service/amp/list_pages_gen.go + - "/internal/service/amp/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -314,7 +314,7 @@ rules: message: Include "AMP" in test name paths: include: - - internal/service/amp/*_test.go + - "/internal/service/amp/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -329,7 +329,7 @@ rules: message: Do not use "AMP" in const name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -343,7 +343,7 @@ rules: message: Do not use "AMP" in var name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -357,9 +357,9 @@ rules: message: Do not use "Amplify" in func name inside amplify package paths: include: - - internal/service/amplify + - "/internal/service/amplify" exclude: - - internal/service/amplify/list_pages_gen.go + - "/internal/service/amplify/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -375,7 +375,7 @@ rules: message: Include "Amplify" in test name paths: include: - - internal/service/amplify/*_test.go + - "/internal/service/amplify/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -390,7 +390,7 @@ rules: message: Do not use "Amplify" in const name inside amplify package paths: include: - - internal/service/amplify + - "/internal/service/amplify" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -404,7 +404,7 @@ rules: message: Do not use "Amplify" in var name inside amplify package paths: include: - - internal/service/amplify + - "/internal/service/amplify" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -418,9 +418,9 @@ rules: message: Do not use "APIGateway" in func name inside apigateway package paths: include: - - internal/service/apigateway + - "/internal/service/apigateway" exclude: - - internal/service/apigateway/list_pages_gen.go + - "/internal/service/apigateway/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -436,7 +436,7 @@ rules: message: Include "APIGateway" in test name paths: include: - - internal/service/apigateway/*_test.go + - "/internal/service/apigateway/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -451,7 +451,7 @@ rules: message: Do not use "APIGateway" in const name inside apigateway package paths: include: - - internal/service/apigateway + - "/internal/service/apigateway" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -465,7 +465,7 @@ rules: message: Do not use "APIGateway" in var name inside apigateway package paths: include: - - internal/service/apigateway + - "/internal/service/apigateway" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -479,9 +479,9 @@ rules: message: Do not use "APIGatewayV2" in func name inside apigatewayv2 package paths: include: - - internal/service/apigatewayv2 + - "/internal/service/apigatewayv2" exclude: - - internal/service/apigatewayv2/list_pages_gen.go + - "/internal/service/apigatewayv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -497,7 +497,7 @@ rules: message: Include "APIGatewayV2" in test name paths: include: - - internal/service/apigatewayv2/*_test.go + - "/internal/service/apigatewayv2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -512,7 +512,7 @@ rules: message: Do not use "APIGatewayV2" in const name inside apigatewayv2 package paths: include: - - internal/service/apigatewayv2 + - "/internal/service/apigatewayv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -526,7 +526,7 @@ rules: message: Do not use "APIGatewayV2" in var name inside apigatewayv2 package paths: include: - - internal/service/apigatewayv2 + - "/internal/service/apigatewayv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -540,9 +540,9 @@ rules: message: Do not use "AppAutoScaling" in func name inside appautoscaling package paths: include: - - internal/service/appautoscaling + - "/internal/service/appautoscaling" exclude: - - internal/service/appautoscaling/list_pages_gen.go + - "/internal/service/appautoscaling/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -558,7 +558,7 @@ rules: message: Include "AppAutoScaling" in test name paths: include: - - internal/service/appautoscaling/*_test.go + - "/internal/service/appautoscaling/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -573,7 +573,7 @@ rules: message: Do not use "AppAutoScaling" in const name inside appautoscaling package paths: include: - - internal/service/appautoscaling + - "/internal/service/appautoscaling" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -587,7 +587,7 @@ rules: message: Do not use "AppAutoScaling" in var name inside appautoscaling package paths: include: - - internal/service/appautoscaling + - "/internal/service/appautoscaling" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -601,9 +601,9 @@ rules: message: Do not use "AppConfig" in func name inside appconfig package paths: include: - - internal/service/appconfig + - "/internal/service/appconfig" exclude: - - internal/service/appconfig/list_pages_gen.go + - "/internal/service/appconfig/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -619,7 +619,7 @@ rules: message: Include "AppConfig" in test name paths: include: - - internal/service/appconfig/*_test.go + - "/internal/service/appconfig/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -634,7 +634,7 @@ rules: message: Do not use "AppConfig" in const name inside appconfig package paths: include: - - internal/service/appconfig + - "/internal/service/appconfig" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -648,7 +648,7 @@ rules: message: Do not use "AppConfig" in var name inside appconfig package paths: include: - - internal/service/appconfig + - "/internal/service/appconfig" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -662,9 +662,9 @@ rules: message: Do not use "AppFabric" in func name inside appfabric package paths: include: - - internal/service/appfabric + - "/internal/service/appfabric" exclude: - - internal/service/appfabric/list_pages_gen.go + - "/internal/service/appfabric/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -680,7 +680,7 @@ rules: message: Include "AppFabric" in test name paths: include: - - internal/service/appfabric/*_test.go + - "/internal/service/appfabric/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -695,7 +695,7 @@ rules: message: Do not use "AppFabric" in const name inside appfabric package paths: include: - - internal/service/appfabric + - "/internal/service/appfabric" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -709,7 +709,7 @@ rules: message: Do not use "AppFabric" in var name inside appfabric package paths: include: - - internal/service/appfabric + - "/internal/service/appfabric" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -723,9 +723,9 @@ rules: message: Do not use "AppFlow" in func name inside appflow package paths: include: - - internal/service/appflow + - "/internal/service/appflow" exclude: - - internal/service/appflow/list_pages_gen.go + - "/internal/service/appflow/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -741,7 +741,7 @@ rules: message: Include "AppFlow" in test name paths: include: - - internal/service/appflow/*_test.go + - "/internal/service/appflow/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -756,7 +756,7 @@ rules: message: Do not use "AppFlow" in const name inside appflow package paths: include: - - internal/service/appflow + - "/internal/service/appflow" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -770,7 +770,7 @@ rules: message: Do not use "AppFlow" in var name inside appflow package paths: include: - - internal/service/appflow + - "/internal/service/appflow" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -784,9 +784,9 @@ rules: message: Do not use "AppIntegrations" in func name inside appintegrations package paths: include: - - internal/service/appintegrations + - "/internal/service/appintegrations" exclude: - - internal/service/appintegrations/list_pages_gen.go + - "/internal/service/appintegrations/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -802,7 +802,7 @@ rules: message: Include "AppIntegrations" in test name paths: include: - - internal/service/appintegrations/*_test.go + - "/internal/service/appintegrations/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -817,7 +817,7 @@ rules: message: Do not use "AppIntegrations" in const name inside appintegrations package paths: include: - - internal/service/appintegrations + - "/internal/service/appintegrations" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -831,7 +831,7 @@ rules: message: Do not use "AppIntegrations" in var name inside appintegrations package paths: include: - - internal/service/appintegrations + - "/internal/service/appintegrations" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -845,9 +845,9 @@ rules: message: Do not use "appintegrationsservice" in func name inside appintegrations package paths: include: - - internal/service/appintegrations + - "/internal/service/appintegrations" exclude: - - internal/service/appintegrations/list_pages_gen.go + - "/internal/service/appintegrations/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -863,7 +863,7 @@ rules: message: Do not use "appintegrationsservice" in const name inside appintegrations package paths: include: - - internal/service/appintegrations + - "/internal/service/appintegrations" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -877,7 +877,7 @@ rules: message: Do not use "appintegrationsservice" in var name inside appintegrations package paths: include: - - internal/service/appintegrations + - "/internal/service/appintegrations" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -891,9 +891,9 @@ rules: message: Do not use "applicationautoscaling" in func name inside appautoscaling package paths: include: - - internal/service/appautoscaling + - "/internal/service/appautoscaling" exclude: - - internal/service/appautoscaling/list_pages_gen.go + - "/internal/service/appautoscaling/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -909,7 +909,7 @@ rules: message: Do not use "applicationautoscaling" in const name inside appautoscaling package paths: include: - - internal/service/appautoscaling + - "/internal/service/appautoscaling" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -923,7 +923,7 @@ rules: message: Do not use "applicationautoscaling" in var name inside appautoscaling package paths: include: - - internal/service/appautoscaling + - "/internal/service/appautoscaling" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -937,9 +937,9 @@ rules: message: Do not use "ApplicationInsights" in func name inside applicationinsights package paths: include: - - internal/service/applicationinsights + - "/internal/service/applicationinsights" exclude: - - internal/service/applicationinsights/list_pages_gen.go + - "/internal/service/applicationinsights/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -955,7 +955,7 @@ rules: message: Include "ApplicationInsights" in test name paths: include: - - internal/service/applicationinsights/*_test.go + - "/internal/service/applicationinsights/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -970,7 +970,7 @@ rules: message: Do not use "ApplicationInsights" in const name inside applicationinsights package paths: include: - - internal/service/applicationinsights + - "/internal/service/applicationinsights" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -984,7 +984,7 @@ rules: message: Do not use "ApplicationInsights" in var name inside applicationinsights package paths: include: - - internal/service/applicationinsights + - "/internal/service/applicationinsights" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -998,9 +998,9 @@ rules: message: Do not use "ApplicationSignals" in func name inside applicationsignals package paths: include: - - internal/service/applicationsignals + - "/internal/service/applicationsignals" exclude: - - internal/service/applicationsignals/list_pages_gen.go + - "/internal/service/applicationsignals/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1016,7 +1016,7 @@ rules: message: Include "ApplicationSignals" in test name paths: include: - - internal/service/applicationsignals/*_test.go + - "/internal/service/applicationsignals/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1031,7 +1031,7 @@ rules: message: Do not use "ApplicationSignals" in const name inside applicationsignals package paths: include: - - internal/service/applicationsignals + - "/internal/service/applicationsignals" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1045,7 +1045,7 @@ rules: message: Do not use "ApplicationSignals" in var name inside applicationsignals package paths: include: - - internal/service/applicationsignals + - "/internal/service/applicationsignals" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1059,9 +1059,9 @@ rules: message: Do not use "AppMesh" in func name inside appmesh package paths: include: - - internal/service/appmesh + - "/internal/service/appmesh" exclude: - - internal/service/appmesh/list_pages_gen.go + - "/internal/service/appmesh/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1077,7 +1077,7 @@ rules: message: Include "AppMesh" in test name paths: include: - - internal/service/appmesh/*_test.go + - "/internal/service/appmesh/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1092,7 +1092,7 @@ rules: message: Do not use "AppMesh" in const name inside appmesh package paths: include: - - internal/service/appmesh + - "/internal/service/appmesh" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1106,7 +1106,7 @@ rules: message: Do not use "AppMesh" in var name inside appmesh package paths: include: - - internal/service/appmesh + - "/internal/service/appmesh" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1120,9 +1120,9 @@ rules: message: Do not use "appregistry" in func name inside servicecatalogappregistry package paths: include: - - internal/service/servicecatalogappregistry + - "/internal/service/servicecatalogappregistry" exclude: - - internal/service/servicecatalogappregistry/list_pages_gen.go + - "/internal/service/servicecatalogappregistry/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1138,7 +1138,7 @@ rules: message: Do not use "appregistry" in const name inside servicecatalogappregistry package paths: include: - - internal/service/servicecatalogappregistry + - "/internal/service/servicecatalogappregistry" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1152,7 +1152,7 @@ rules: message: Do not use "appregistry" in var name inside servicecatalogappregistry package paths: include: - - internal/service/servicecatalogappregistry + - "/internal/service/servicecatalogappregistry" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1166,9 +1166,9 @@ rules: message: Do not use "AppRunner" in func name inside apprunner package paths: include: - - internal/service/apprunner + - "/internal/service/apprunner" exclude: - - internal/service/apprunner/list_pages_gen.go + - "/internal/service/apprunner/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1184,7 +1184,7 @@ rules: message: Include "AppRunner" in test name paths: include: - - internal/service/apprunner/*_test.go + - "/internal/service/apprunner/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1199,7 +1199,7 @@ rules: message: Do not use "AppRunner" in const name inside apprunner package paths: include: - - internal/service/apprunner + - "/internal/service/apprunner" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1213,7 +1213,7 @@ rules: message: Do not use "AppRunner" in var name inside apprunner package paths: include: - - internal/service/apprunner + - "/internal/service/apprunner" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1227,9 +1227,9 @@ rules: message: Do not use "AppStream" in func name inside appstream package paths: include: - - internal/service/appstream + - "/internal/service/appstream" exclude: - - internal/service/appstream/list_pages_gen.go + - "/internal/service/appstream/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1245,7 +1245,7 @@ rules: message: Include "AppStream" in test name paths: include: - - internal/service/appstream/*_test.go + - "/internal/service/appstream/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1260,7 +1260,7 @@ rules: message: Do not use "AppStream" in const name inside appstream package paths: include: - - internal/service/appstream + - "/internal/service/appstream" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1274,7 +1274,7 @@ rules: message: Do not use "AppStream" in var name inside appstream package paths: include: - - internal/service/appstream + - "/internal/service/appstream" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1288,9 +1288,9 @@ rules: message: Do not use "AppSync" in func name inside appsync package paths: include: - - internal/service/appsync + - "/internal/service/appsync" exclude: - - internal/service/appsync/list_pages_gen.go + - "/internal/service/appsync/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1306,7 +1306,7 @@ rules: message: Include "AppSync" in test name paths: include: - - internal/service/appsync/*_test.go + - "/internal/service/appsync/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1321,7 +1321,7 @@ rules: message: Do not use "AppSync" in const name inside appsync package paths: include: - - internal/service/appsync + - "/internal/service/appsync" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1335,7 +1335,7 @@ rules: message: Do not use "AppSync" in var name inside appsync package paths: include: - - internal/service/appsync + - "/internal/service/appsync" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1343,15 +1343,76 @@ rules: patterns: - pattern-regex: "(?i)AppSync" severity: WARNING + - id: arcregionswitch-in-func-name + languages: + - go + message: Do not use "ARCRegionSwitch" in func name inside arcregionswitch package + paths: + include: + - "/internal/service/arcregionswitch" + exclude: + - "/internal/service/arcregionswitch/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ARCRegionSwitch" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: arcregionswitch-in-test-name + languages: + - go + message: Include "ARCRegionSwitch" in test name + paths: + include: + - "/internal/service/arcregionswitch/*_test.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccARCRegionSwitch" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: arcregionswitch-in-const-name + languages: + - go + message: Do not use "ARCRegionSwitch" in const name inside arcregionswitch package + paths: + include: + - "/internal/service/arcregionswitch" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ARCRegionSwitch" + severity: WARNING + - id: arcregionswitch-in-var-name + languages: + - go + message: Do not use "ARCRegionSwitch" in var name inside arcregionswitch package + paths: + include: + - "/internal/service/arcregionswitch" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ARCRegionSwitch" + severity: WARNING - id: athena-in-func-name languages: - go message: Do not use "Athena" in func name inside athena package paths: include: - - internal/service/athena + - "/internal/service/athena" exclude: - - internal/service/athena/list_pages_gen.go + - "/internal/service/athena/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1367,7 +1428,7 @@ rules: message: Include "Athena" in test name paths: include: - - internal/service/athena/*_test.go + - "/internal/service/athena/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1382,7 +1443,7 @@ rules: message: Do not use "Athena" in const name inside athena package paths: include: - - internal/service/athena + - "/internal/service/athena" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1396,7 +1457,7 @@ rules: message: Do not use "Athena" in var name inside athena package paths: include: - - internal/service/athena + - "/internal/service/athena" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1410,9 +1471,9 @@ rules: message: Do not use "AuditManager" in func name inside auditmanager package paths: include: - - internal/service/auditmanager + - "/internal/service/auditmanager" exclude: - - internal/service/auditmanager/list_pages_gen.go + - "/internal/service/auditmanager/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1428,7 +1489,7 @@ rules: message: Include "AuditManager" in test name paths: include: - - internal/service/auditmanager/*_test.go + - "/internal/service/auditmanager/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1443,7 +1504,7 @@ rules: message: Do not use "AuditManager" in const name inside auditmanager package paths: include: - - internal/service/auditmanager + - "/internal/service/auditmanager" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1457,7 +1518,7 @@ rules: message: Do not use "AuditManager" in var name inside auditmanager package paths: include: - - internal/service/auditmanager + - "/internal/service/auditmanager" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1471,9 +1532,9 @@ rules: message: Do not use "AutoScaling" in func name inside autoscaling package paths: include: - - internal/service/autoscaling + - "/internal/service/autoscaling" exclude: - - internal/service/autoscaling/list_pages_gen.go + - "/internal/service/autoscaling/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1489,7 +1550,7 @@ rules: message: Include "AutoScaling" in test name paths: include: - - internal/service/autoscaling/*_test.go + - "/internal/service/autoscaling/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1504,7 +1565,7 @@ rules: message: Do not use "AutoScaling" in const name inside autoscaling package paths: include: - - internal/service/autoscaling + - "/internal/service/autoscaling" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1518,7 +1579,7 @@ rules: message: Do not use "AutoScaling" in var name inside autoscaling package paths: include: - - internal/service/autoscaling + - "/internal/service/autoscaling" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1532,9 +1593,9 @@ rules: message: Do not use "AutoScalingPlans" in func name inside autoscalingplans package paths: include: - - internal/service/autoscalingplans + - "/internal/service/autoscalingplans" exclude: - - internal/service/autoscalingplans/list_pages_gen.go + - "/internal/service/autoscalingplans/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1550,7 +1611,7 @@ rules: message: Include "AutoScalingPlans" in test name paths: include: - - internal/service/autoscalingplans/*_test.go + - "/internal/service/autoscalingplans/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1565,7 +1626,7 @@ rules: message: Do not use "AutoScalingPlans" in const name inside autoscalingplans package paths: include: - - internal/service/autoscalingplans + - "/internal/service/autoscalingplans" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1579,7 +1640,7 @@ rules: message: Do not use "AutoScalingPlans" in var name inside autoscalingplans package paths: include: - - internal/service/autoscalingplans + - "/internal/service/autoscalingplans" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1593,9 +1654,9 @@ rules: message: Do not use "Backup" in func name inside backup package paths: include: - - internal/service/backup + - "/internal/service/backup" exclude: - - internal/service/backup/list_pages_gen.go + - "/internal/service/backup/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1611,7 +1672,7 @@ rules: message: Include "Backup" in test name paths: include: - - internal/service/backup/*_test.go + - "/internal/service/backup/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1626,7 +1687,7 @@ rules: message: Do not use "Backup" in const name inside backup package paths: include: - - internal/service/backup + - "/internal/service/backup" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1640,7 +1701,7 @@ rules: message: Do not use "Backup" in var name inside backup package paths: include: - - internal/service/backup + - "/internal/service/backup" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1654,9 +1715,9 @@ rules: message: Do not use "Batch" in func name inside batch package paths: include: - - internal/service/batch + - "/internal/service/batch" exclude: - - internal/service/batch/list_pages_gen.go + - "/internal/service/batch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1672,7 +1733,7 @@ rules: message: Include "Batch" in test name paths: include: - - internal/service/batch/*_test.go + - "/internal/service/batch/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1687,7 +1748,7 @@ rules: message: Do not use "Batch" in const name inside batch package paths: include: - - internal/service/batch + - "/internal/service/batch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1701,7 +1762,7 @@ rules: message: Do not use "Batch" in var name inside batch package paths: include: - - internal/service/batch + - "/internal/service/batch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1715,9 +1776,9 @@ rules: message: Do not use "BCMDataExports" in func name inside bcmdataexports package paths: include: - - internal/service/bcmdataexports + - "/internal/service/bcmdataexports" exclude: - - internal/service/bcmdataexports/list_pages_gen.go + - "/internal/service/bcmdataexports/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1733,7 +1794,7 @@ rules: message: Include "BCMDataExports" in test name paths: include: - - internal/service/bcmdataexports/*_test.go + - "/internal/service/bcmdataexports/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1748,7 +1809,7 @@ rules: message: Do not use "BCMDataExports" in const name inside bcmdataexports package paths: include: - - internal/service/bcmdataexports + - "/internal/service/bcmdataexports" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1762,7 +1823,7 @@ rules: message: Do not use "BCMDataExports" in var name inside bcmdataexports package paths: include: - - internal/service/bcmdataexports + - "/internal/service/bcmdataexports" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1776,9 +1837,9 @@ rules: message: Do not use "beanstalk" in func name inside elasticbeanstalk package paths: include: - - internal/service/elasticbeanstalk + - "/internal/service/elasticbeanstalk" exclude: - - internal/service/elasticbeanstalk/list_pages_gen.go + - "/internal/service/elasticbeanstalk/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1794,7 +1855,7 @@ rules: message: Do not use "beanstalk" in const name inside elasticbeanstalk package paths: include: - - internal/service/elasticbeanstalk + - "/internal/service/elasticbeanstalk" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1808,7 +1869,7 @@ rules: message: Do not use "beanstalk" in var name inside elasticbeanstalk package paths: include: - - internal/service/elasticbeanstalk + - "/internal/service/elasticbeanstalk" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1822,9 +1883,9 @@ rules: message: Do not use "Bedrock" in func name inside bedrock package paths: include: - - internal/service/bedrock + - "/internal/service/bedrock" exclude: - - internal/service/bedrock/list_pages_gen.go + - "/internal/service/bedrock/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1840,7 +1901,7 @@ rules: message: Include "Bedrock" in test name paths: include: - - internal/service/bedrock/*_test.go + - "/internal/service/bedrock/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1855,7 +1916,7 @@ rules: message: Do not use "Bedrock" in const name inside bedrock package paths: include: - - internal/service/bedrock + - "/internal/service/bedrock" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1869,7 +1930,7 @@ rules: message: Do not use "Bedrock" in var name inside bedrock package paths: include: - - internal/service/bedrock + - "/internal/service/bedrock" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1883,9 +1944,9 @@ rules: message: Do not use "BedrockAgent" in func name inside bedrockagent package paths: include: - - internal/service/bedrockagent + - "/internal/service/bedrockagent" exclude: - - internal/service/bedrockagent/list_pages_gen.go + - "/internal/service/bedrockagent/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1901,7 +1962,7 @@ rules: message: Include "BedrockAgent" in test name paths: include: - - internal/service/bedrockagent/*_test.go + - "/internal/service/bedrockagent/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1916,7 +1977,7 @@ rules: message: Do not use "BedrockAgent" in const name inside bedrockagent package paths: include: - - internal/service/bedrockagent + - "/internal/service/bedrockagent" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1930,7 +1991,7 @@ rules: message: Do not use "BedrockAgent" in var name inside bedrockagent package paths: include: - - internal/service/bedrockagent + - "/internal/service/bedrockagent" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1938,15 +1999,76 @@ rules: patterns: - pattern-regex: "(?i)BedrockAgent" severity: WARNING + - id: bedrockagentcore-in-func-name + languages: + - go + message: Do not use "BedrockAgentCore" in func name inside bedrockagentcore package + paths: + include: + - "/internal/service/bedrockagentcore" + exclude: + - "/internal/service/bedrockagentcore/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)BedrockAgentCore" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: bedrockagentcore-in-test-name + languages: + - go + message: Include "BedrockAgentCore" in test name + paths: + include: + - "/internal/service/bedrockagentcore/*_test.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccBedrockAgentCore" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: bedrockagentcore-in-const-name + languages: + - go + message: Do not use "BedrockAgentCore" in const name inside bedrockagentcore package + paths: + include: + - "/internal/service/bedrockagentcore" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)BedrockAgentCore" + severity: WARNING + - id: bedrockagentcore-in-var-name + languages: + - go + message: Do not use "BedrockAgentCore" in var name inside bedrockagentcore package + paths: + include: + - "/internal/service/bedrockagentcore" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)BedrockAgentCore" + severity: WARNING - id: billing-in-func-name languages: - go message: Do not use "Billing" in func name inside billing package paths: include: - - internal/service/billing + - "/internal/service/billing" exclude: - - internal/service/billing/list_pages_gen.go + - "/internal/service/billing/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1962,7 +2084,7 @@ rules: message: Include "Billing" in test name paths: include: - - internal/service/billing/*_test.go + - "/internal/service/billing/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1977,7 +2099,7 @@ rules: message: Do not use "Billing" in const name inside billing package paths: include: - - internal/service/billing + - "/internal/service/billing" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1991,7 +2113,7 @@ rules: message: Do not use "Billing" in var name inside billing package paths: include: - - internal/service/billing + - "/internal/service/billing" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2005,9 +2127,9 @@ rules: message: Do not use "Budgets" in func name inside budgets package paths: include: - - internal/service/budgets + - "/internal/service/budgets" exclude: - - internal/service/budgets/list_pages_gen.go + - "/internal/service/budgets/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2023,7 +2145,7 @@ rules: message: Include "Budgets" in test name paths: include: - - internal/service/budgets/*_test.go + - "/internal/service/budgets/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2038,7 +2160,7 @@ rules: message: Do not use "Budgets" in const name inside budgets package paths: include: - - internal/service/budgets + - "/internal/service/budgets" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2052,7 +2174,7 @@ rules: message: Do not use "Budgets" in var name inside budgets package paths: include: - - internal/service/budgets + - "/internal/service/budgets" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2066,9 +2188,9 @@ rules: message: Do not use "CE" in func name inside ce package paths: include: - - internal/service/ce + - "/internal/service/ce" exclude: - - internal/service/ce/list_pages_gen.go + - "/internal/service/ce/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2087,7 +2209,7 @@ rules: message: Include "CE" in test name paths: include: - - internal/service/ce/*_test.go + - "/internal/service/ce/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2102,7 +2224,7 @@ rules: message: Do not use "CE" in const name inside ce package paths: include: - - internal/service/ce + - "/internal/service/ce" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2117,7 +2239,7 @@ rules: message: Do not use "CE" in var name inside ce package paths: include: - - internal/service/ce + - "/internal/service/ce" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2133,9 +2255,9 @@ rules: message: Do not use "Chatbot" in func name inside chatbot package paths: include: - - internal/service/chatbot + - "/internal/service/chatbot" exclude: - - internal/service/chatbot/list_pages_gen.go + - "/internal/service/chatbot/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2151,7 +2273,7 @@ rules: message: Include "Chatbot" in test name paths: include: - - internal/service/chatbot/*_test.go + - "/internal/service/chatbot/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2166,7 +2288,7 @@ rules: message: Do not use "Chatbot" in const name inside chatbot package paths: include: - - internal/service/chatbot + - "/internal/service/chatbot" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2180,7 +2302,7 @@ rules: message: Do not use "Chatbot" in var name inside chatbot package paths: include: - - internal/service/chatbot + - "/internal/service/chatbot" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2194,9 +2316,9 @@ rules: message: Do not use "Chime" in func name inside chime package paths: include: - - internal/service/chime + - "/internal/service/chime" exclude: - - internal/service/chime/list_pages_gen.go + - "/internal/service/chime/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2212,7 +2334,7 @@ rules: message: Include "Chime" in test name paths: include: - - internal/service/chime/*_test.go + - "/internal/service/chime/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2227,7 +2349,7 @@ rules: message: Do not use "Chime" in const name inside chime package paths: include: - - internal/service/chime + - "/internal/service/chime" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2241,7 +2363,7 @@ rules: message: Do not use "Chime" in var name inside chime package paths: include: - - internal/service/chime + - "/internal/service/chime" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2255,9 +2377,9 @@ rules: message: Do not use "ChimeSDKMediaPipelines" in func name inside chimesdkmediapipelines package paths: include: - - internal/service/chimesdkmediapipelines + - "/internal/service/chimesdkmediapipelines" exclude: - - internal/service/chimesdkmediapipelines/list_pages_gen.go + - "/internal/service/chimesdkmediapipelines/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2273,7 +2395,7 @@ rules: message: Include "ChimeSDKMediaPipelines" in test name paths: include: - - internal/service/chimesdkmediapipelines/*_test.go + - "/internal/service/chimesdkmediapipelines/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2288,7 +2410,7 @@ rules: message: Do not use "ChimeSDKMediaPipelines" in const name inside chimesdkmediapipelines package paths: include: - - internal/service/chimesdkmediapipelines + - "/internal/service/chimesdkmediapipelines" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2302,7 +2424,7 @@ rules: message: Do not use "ChimeSDKMediaPipelines" in var name inside chimesdkmediapipelines package paths: include: - - internal/service/chimesdkmediapipelines + - "/internal/service/chimesdkmediapipelines" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2316,9 +2438,9 @@ rules: message: Do not use "ChimeSDKVoice" in func name inside chimesdkvoice package paths: include: - - internal/service/chimesdkvoice + - "/internal/service/chimesdkvoice" exclude: - - internal/service/chimesdkvoice/list_pages_gen.go + - "/internal/service/chimesdkvoice/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2334,7 +2456,7 @@ rules: message: Include "ChimeSDKVoice" in test name paths: include: - - internal/service/chimesdkvoice/*_test.go + - "/internal/service/chimesdkvoice/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2349,7 +2471,7 @@ rules: message: Do not use "ChimeSDKVoice" in const name inside chimesdkvoice package paths: include: - - internal/service/chimesdkvoice + - "/internal/service/chimesdkvoice" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2363,7 +2485,7 @@ rules: message: Do not use "ChimeSDKVoice" in var name inside chimesdkvoice package paths: include: - - internal/service/chimesdkvoice + - "/internal/service/chimesdkvoice" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2377,9 +2499,9 @@ rules: message: Do not use "CleanRooms" in func name inside cleanrooms package paths: include: - - internal/service/cleanrooms + - "/internal/service/cleanrooms" exclude: - - internal/service/cleanrooms/list_pages_gen.go + - "/internal/service/cleanrooms/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2395,7 +2517,7 @@ rules: message: Include "CleanRooms" in test name paths: include: - - internal/service/cleanrooms/*_test.go + - "/internal/service/cleanrooms/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2410,7 +2532,7 @@ rules: message: Do not use "CleanRooms" in const name inside cleanrooms package paths: include: - - internal/service/cleanrooms + - "/internal/service/cleanrooms" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2424,7 +2546,7 @@ rules: message: Do not use "CleanRooms" in var name inside cleanrooms package paths: include: - - internal/service/cleanrooms + - "/internal/service/cleanrooms" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2438,9 +2560,9 @@ rules: message: Do not use "Cloud9" in func name inside cloud9 package paths: include: - - internal/service/cloud9 + - "/internal/service/cloud9" exclude: - - internal/service/cloud9/list_pages_gen.go + - "/internal/service/cloud9/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2456,7 +2578,7 @@ rules: message: Include "Cloud9" in test name paths: include: - - internal/service/cloud9/*_test.go + - "/internal/service/cloud9/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2471,7 +2593,7 @@ rules: message: Do not use "Cloud9" in const name inside cloud9 package paths: include: - - internal/service/cloud9 + - "/internal/service/cloud9" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2485,7 +2607,7 @@ rules: message: Do not use "Cloud9" in var name inside cloud9 package paths: include: - - internal/service/cloud9 + - "/internal/service/cloud9" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2499,9 +2621,9 @@ rules: message: Do not use "CloudControl" in func name inside cloudcontrol package paths: include: - - internal/service/cloudcontrol + - "/internal/service/cloudcontrol" exclude: - - internal/service/cloudcontrol/list_pages_gen.go + - "/internal/service/cloudcontrol/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2517,7 +2639,7 @@ rules: message: Include "CloudControl" in test name paths: include: - - internal/service/cloudcontrol/*_test.go + - "/internal/service/cloudcontrol/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2532,7 +2654,7 @@ rules: message: Do not use "CloudControl" in const name inside cloudcontrol package paths: include: - - internal/service/cloudcontrol + - "/internal/service/cloudcontrol" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2546,7 +2668,7 @@ rules: message: Do not use "CloudControl" in var name inside cloudcontrol package paths: include: - - internal/service/cloudcontrol + - "/internal/service/cloudcontrol" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2560,9 +2682,9 @@ rules: message: Do not use "cloudcontrolapi" in func name inside cloudcontrol package paths: include: - - internal/service/cloudcontrol + - "/internal/service/cloudcontrol" exclude: - - internal/service/cloudcontrol/list_pages_gen.go + - "/internal/service/cloudcontrol/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2578,7 +2700,7 @@ rules: message: Do not use "cloudcontrolapi" in const name inside cloudcontrol package paths: include: - - internal/service/cloudcontrol + - "/internal/service/cloudcontrol" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2592,7 +2714,7 @@ rules: message: Do not use "cloudcontrolapi" in var name inside cloudcontrol package paths: include: - - internal/service/cloudcontrol + - "/internal/service/cloudcontrol" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2606,9 +2728,9 @@ rules: message: Do not use "CloudFormation" in func name inside cloudformation package paths: include: - - internal/service/cloudformation + - "/internal/service/cloudformation" exclude: - - internal/service/cloudformation/list_pages_gen.go + - "/internal/service/cloudformation/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2624,7 +2746,7 @@ rules: message: Include "CloudFormation" in test name paths: include: - - internal/service/cloudformation/*_test.go + - "/internal/service/cloudformation/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2639,7 +2761,7 @@ rules: message: Do not use "CloudFormation" in const name inside cloudformation package paths: include: - - internal/service/cloudformation + - "/internal/service/cloudformation" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2653,7 +2775,7 @@ rules: message: Do not use "CloudFormation" in var name inside cloudformation package paths: include: - - internal/service/cloudformation + - "/internal/service/cloudformation" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2667,9 +2789,9 @@ rules: message: Do not use "CloudFront" in func name inside cloudfront package paths: include: - - internal/service/cloudfront + - "/internal/service/cloudfront" exclude: - - internal/service/cloudfront/list_pages_gen.go + - "/internal/service/cloudfront/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2685,7 +2807,7 @@ rules: message: Include "CloudFront" in test name paths: include: - - internal/service/cloudfront/*_test.go + - "/internal/service/cloudfront/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2700,7 +2822,7 @@ rules: message: Do not use "CloudFront" in const name inside cloudfront package paths: include: - - internal/service/cloudfront + - "/internal/service/cloudfront" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2714,7 +2836,7 @@ rules: message: Do not use "CloudFront" in var name inside cloudfront package paths: include: - - internal/service/cloudfront + - "/internal/service/cloudfront" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2728,9 +2850,9 @@ rules: message: Do not use "CloudFrontKeyValueStore" in func name inside cloudfrontkeyvaluestore package paths: include: - - internal/service/cloudfrontkeyvaluestore + - "/internal/service/cloudfrontkeyvaluestore" exclude: - - internal/service/cloudfrontkeyvaluestore/list_pages_gen.go + - "/internal/service/cloudfrontkeyvaluestore/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2746,7 +2868,7 @@ rules: message: Include "CloudFrontKeyValueStore" in test name paths: include: - - internal/service/cloudfrontkeyvaluestore/*_test.go + - "/internal/service/cloudfrontkeyvaluestore/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2761,7 +2883,7 @@ rules: message: Do not use "CloudFrontKeyValueStore" in const name inside cloudfrontkeyvaluestore package paths: include: - - internal/service/cloudfrontkeyvaluestore + - "/internal/service/cloudfrontkeyvaluestore" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2775,7 +2897,7 @@ rules: message: Do not use "CloudFrontKeyValueStore" in var name inside cloudfrontkeyvaluestore package paths: include: - - internal/service/cloudfrontkeyvaluestore + - "/internal/service/cloudfrontkeyvaluestore" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2789,9 +2911,9 @@ rules: message: Do not use "cloudhsm" in func name inside cloudhsmv2 package paths: include: - - internal/service/cloudhsmv2 + - "/internal/service/cloudhsmv2" exclude: - - internal/service/cloudhsmv2/list_pages_gen.go + - "/internal/service/cloudhsmv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2807,7 +2929,7 @@ rules: message: Do not use "cloudhsm" in const name inside cloudhsmv2 package paths: include: - - internal/service/cloudhsmv2 + - "/internal/service/cloudhsmv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2821,7 +2943,7 @@ rules: message: Do not use "cloudhsm" in var name inside cloudhsmv2 package paths: include: - - internal/service/cloudhsmv2 + - "/internal/service/cloudhsmv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2835,9 +2957,9 @@ rules: message: Do not use "CloudHSMV2" in func name inside cloudhsmv2 package paths: include: - - internal/service/cloudhsmv2 + - "/internal/service/cloudhsmv2" exclude: - - internal/service/cloudhsmv2/list_pages_gen.go + - "/internal/service/cloudhsmv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2853,7 +2975,7 @@ rules: message: Include "CloudHSMV2" in test name paths: include: - - internal/service/cloudhsmv2/*_test.go + - "/internal/service/cloudhsmv2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2868,7 +2990,7 @@ rules: message: Do not use "CloudHSMV2" in const name inside cloudhsmv2 package paths: include: - - internal/service/cloudhsmv2 + - "/internal/service/cloudhsmv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2882,7 +3004,7 @@ rules: message: Do not use "CloudHSMV2" in var name inside cloudhsmv2 package paths: include: - - internal/service/cloudhsmv2 + - "/internal/service/cloudhsmv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2896,9 +3018,9 @@ rules: message: Do not use "CloudSearch" in func name inside cloudsearch package paths: include: - - internal/service/cloudsearch + - "/internal/service/cloudsearch" exclude: - - internal/service/cloudsearch/list_pages_gen.go + - "/internal/service/cloudsearch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2914,7 +3036,7 @@ rules: message: Include "CloudSearch" in test name paths: include: - - internal/service/cloudsearch/*_test.go + - "/internal/service/cloudsearch/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2929,7 +3051,7 @@ rules: message: Do not use "CloudSearch" in const name inside cloudsearch package paths: include: - - internal/service/cloudsearch + - "/internal/service/cloudsearch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2943,7 +3065,7 @@ rules: message: Do not use "CloudSearch" in var name inside cloudsearch package paths: include: - - internal/service/cloudsearch + - "/internal/service/cloudsearch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2957,9 +3079,9 @@ rules: message: Do not use "CloudTrail" in func name inside cloudtrail package paths: include: - - internal/service/cloudtrail + - "/internal/service/cloudtrail" exclude: - - internal/service/cloudtrail/list_pages_gen.go + - "/internal/service/cloudtrail/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2976,7 +3098,7 @@ rules: message: Include "CloudTrail" in test name paths: include: - - internal/service/cloudtrail/*_test.go + - "/internal/service/cloudtrail/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2991,7 +3113,7 @@ rules: message: Do not use "CloudTrail" in const name inside cloudtrail package paths: include: - - internal/service/cloudtrail + - "/internal/service/cloudtrail" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3005,7 +3127,7 @@ rules: message: Do not use "CloudTrail" in var name inside cloudtrail package paths: include: - - internal/service/cloudtrail + - "/internal/service/cloudtrail" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3019,9 +3141,9 @@ rules: message: Do not use "CloudWatch" in func name inside cloudwatch package paths: include: - - internal/service/cloudwatch + - "/internal/service/cloudwatch" exclude: - - internal/service/cloudwatch/list_pages_gen.go + - "/internal/service/cloudwatch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3037,7 +3159,7 @@ rules: message: Include "CloudWatch" in test name paths: include: - - internal/service/cloudwatch/*_test.go + - "/internal/service/cloudwatch/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3052,7 +3174,7 @@ rules: message: Do not use "CloudWatch" in const name inside cloudwatch package paths: include: - - internal/service/cloudwatch + - "/internal/service/cloudwatch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3066,7 +3188,7 @@ rules: message: Do not use "CloudWatch" in var name inside cloudwatch package paths: include: - - internal/service/cloudwatch + - "/internal/service/cloudwatch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3080,9 +3202,9 @@ rules: message: Do not use "cloudwatchevents" in func name inside events package paths: include: - - internal/service/events + - "/internal/service/events" exclude: - - internal/service/events/list_pages_gen.go + - "/internal/service/events/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3098,7 +3220,7 @@ rules: message: Do not use "cloudwatchevents" in const name inside events package paths: include: - - internal/service/events + - "/internal/service/events" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3112,7 +3234,7 @@ rules: message: Do not use "cloudwatchevents" in var name inside events package paths: include: - - internal/service/events + - "/internal/service/events" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3126,9 +3248,9 @@ rules: message: Do not use "cloudwatchevidently" in func name inside evidently package paths: include: - - internal/service/evidently + - "/internal/service/evidently" exclude: - - internal/service/evidently/list_pages_gen.go + - "/internal/service/evidently/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3144,7 +3266,7 @@ rules: message: Do not use "cloudwatchevidently" in const name inside evidently package paths: include: - - internal/service/evidently + - "/internal/service/evidently" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3158,7 +3280,7 @@ rules: message: Do not use "cloudwatchevidently" in var name inside evidently package paths: include: - - internal/service/evidently + - "/internal/service/evidently" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3172,9 +3294,9 @@ rules: message: Do not use "cloudwatchlog" in func name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" exclude: - - internal/service/logs/list_pages_gen.go + - "/internal/service/logs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3190,7 +3312,7 @@ rules: message: Do not use "cloudwatchlog" in const name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3204,7 +3326,7 @@ rules: message: Do not use "cloudwatchlog" in var name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3218,9 +3340,9 @@ rules: message: Do not use "cloudwatchlogs" in func name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" exclude: - - internal/service/logs/list_pages_gen.go + - "/internal/service/logs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3236,7 +3358,7 @@ rules: message: Do not use "cloudwatchlogs" in const name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3250,7 +3372,7 @@ rules: message: Do not use "cloudwatchlogs" in var name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3264,9 +3386,9 @@ rules: message: Do not use "cloudwatchobservabilityaccessmanager" in func name inside oam package paths: include: - - internal/service/oam + - "/internal/service/oam" exclude: - - internal/service/oam/list_pages_gen.go + - "/internal/service/oam/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3282,7 +3404,7 @@ rules: message: Do not use "cloudwatchobservabilityaccessmanager" in const name inside oam package paths: include: - - internal/service/oam + - "/internal/service/oam" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3296,7 +3418,7 @@ rules: message: Do not use "cloudwatchobservabilityaccessmanager" in var name inside oam package paths: include: - - internal/service/oam + - "/internal/service/oam" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3310,9 +3432,9 @@ rules: message: Do not use "cloudwatchrum" in func name inside rum package paths: include: - - internal/service/rum + - "/internal/service/rum" exclude: - - internal/service/rum/list_pages_gen.go + - "/internal/service/rum/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3328,7 +3450,7 @@ rules: message: Do not use "cloudwatchrum" in const name inside rum package paths: include: - - internal/service/rum + - "/internal/service/rum" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3342,7 +3464,7 @@ rules: message: Do not use "cloudwatchrum" in var name inside rum package paths: include: - - internal/service/rum + - "/internal/service/rum" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3356,9 +3478,9 @@ rules: message: Do not use "CodeArtifact" in func name inside codeartifact package paths: include: - - internal/service/codeartifact + - "/internal/service/codeartifact" exclude: - - internal/service/codeartifact/list_pages_gen.go + - "/internal/service/codeartifact/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3374,7 +3496,7 @@ rules: message: Include "CodeArtifact" in test name paths: include: - - internal/service/codeartifact/*_test.go + - "/internal/service/codeartifact/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3389,7 +3511,7 @@ rules: message: Do not use "CodeArtifact" in const name inside codeartifact package paths: include: - - internal/service/codeartifact + - "/internal/service/codeartifact" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3403,7 +3525,7 @@ rules: message: Do not use "CodeArtifact" in var name inside codeartifact package paths: include: - - internal/service/codeartifact + - "/internal/service/codeartifact" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3417,9 +3539,9 @@ rules: message: Do not use "CodeBuild" in func name inside codebuild package paths: include: - - internal/service/codebuild + - "/internal/service/codebuild" exclude: - - internal/service/codebuild/list_pages_gen.go + - "/internal/service/codebuild/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3435,7 +3557,7 @@ rules: message: Include "CodeBuild" in test name paths: include: - - internal/service/codebuild/*_test.go + - "/internal/service/codebuild/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3450,7 +3572,7 @@ rules: message: Do not use "CodeBuild" in const name inside codebuild package paths: include: - - internal/service/codebuild + - "/internal/service/codebuild" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3464,7 +3586,7 @@ rules: message: Do not use "CodeBuild" in var name inside codebuild package paths: include: - - internal/service/codebuild + - "/internal/service/codebuild" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3478,9 +3600,9 @@ rules: message: Do not use "CodeCatalyst" in func name inside codecatalyst package paths: include: - - internal/service/codecatalyst + - "/internal/service/codecatalyst" exclude: - - internal/service/codecatalyst/list_pages_gen.go + - "/internal/service/codecatalyst/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3496,7 +3618,7 @@ rules: message: Include "CodeCatalyst" in test name paths: include: - - internal/service/codecatalyst/*_test.go + - "/internal/service/codecatalyst/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3511,7 +3633,7 @@ rules: message: Do not use "CodeCatalyst" in const name inside codecatalyst package paths: include: - - internal/service/codecatalyst + - "/internal/service/codecatalyst" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3525,7 +3647,7 @@ rules: message: Do not use "CodeCatalyst" in var name inside codecatalyst package paths: include: - - internal/service/codecatalyst + - "/internal/service/codecatalyst" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3539,9 +3661,9 @@ rules: message: Do not use "CodeCommit" in func name inside codecommit package paths: include: - - internal/service/codecommit + - "/internal/service/codecommit" exclude: - - internal/service/codecommit/list_pages_gen.go + - "/internal/service/codecommit/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3557,7 +3679,7 @@ rules: message: Include "CodeCommit" in test name paths: include: - - internal/service/codecommit/*_test.go + - "/internal/service/codecommit/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3572,7 +3694,7 @@ rules: message: Do not use "CodeCommit" in const name inside codecommit package paths: include: - - internal/service/codecommit + - "/internal/service/codecommit" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3586,7 +3708,7 @@ rules: message: Do not use "CodeCommit" in var name inside codecommit package paths: include: - - internal/service/codecommit + - "/internal/service/codecommit" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3600,9 +3722,9 @@ rules: message: Do not use "CodeConnections" in func name inside codeconnections package paths: include: - - internal/service/codeconnections + - "/internal/service/codeconnections" exclude: - - internal/service/codeconnections/list_pages_gen.go + - "/internal/service/codeconnections/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3618,7 +3740,7 @@ rules: message: Include "CodeConnections" in test name paths: include: - - internal/service/codeconnections/*_test.go + - "/internal/service/codeconnections/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3633,7 +3755,7 @@ rules: message: Do not use "CodeConnections" in const name inside codeconnections package paths: include: - - internal/service/codeconnections + - "/internal/service/codeconnections" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3647,7 +3769,7 @@ rules: message: Do not use "CodeConnections" in var name inside codeconnections package paths: include: - - internal/service/codeconnections + - "/internal/service/codeconnections" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3661,9 +3783,9 @@ rules: message: Do not use "codedeploy" in func name inside deploy package paths: include: - - internal/service/deploy + - "/internal/service/deploy" exclude: - - internal/service/deploy/list_pages_gen.go + - "/internal/service/deploy/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3679,7 +3801,7 @@ rules: message: Do not use "codedeploy" in const name inside deploy package paths: include: - - internal/service/deploy + - "/internal/service/deploy" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3693,7 +3815,7 @@ rules: message: Do not use "codedeploy" in var name inside deploy package paths: include: - - internal/service/deploy + - "/internal/service/deploy" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3707,9 +3829,9 @@ rules: message: Do not use "CodeGuruProfiler" in func name inside codeguruprofiler package paths: include: - - internal/service/codeguruprofiler + - "/internal/service/codeguruprofiler" exclude: - - internal/service/codeguruprofiler/list_pages_gen.go + - "/internal/service/codeguruprofiler/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3725,7 +3847,7 @@ rules: message: Include "CodeGuruProfiler" in test name paths: include: - - internal/service/codeguruprofiler/*_test.go + - "/internal/service/codeguruprofiler/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3740,7 +3862,7 @@ rules: message: Do not use "CodeGuruProfiler" in const name inside codeguruprofiler package paths: include: - - internal/service/codeguruprofiler + - "/internal/service/codeguruprofiler" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3754,7 +3876,7 @@ rules: message: Do not use "CodeGuruProfiler" in var name inside codeguruprofiler package paths: include: - - internal/service/codeguruprofiler + - "/internal/service/codeguruprofiler" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3768,9 +3890,9 @@ rules: message: Do not use "CodeGuruReviewer" in func name inside codegurureviewer package paths: include: - - internal/service/codegurureviewer + - "/internal/service/codegurureviewer" exclude: - - internal/service/codegurureviewer/list_pages_gen.go + - "/internal/service/codegurureviewer/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3786,7 +3908,7 @@ rules: message: Include "CodeGuruReviewer" in test name paths: include: - - internal/service/codegurureviewer/*_test.go + - "/internal/service/codegurureviewer/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3801,7 +3923,7 @@ rules: message: Do not use "CodeGuruReviewer" in const name inside codegurureviewer package paths: include: - - internal/service/codegurureviewer + - "/internal/service/codegurureviewer" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3815,7 +3937,7 @@ rules: message: Do not use "CodeGuruReviewer" in var name inside codegurureviewer package paths: include: - - internal/service/codegurureviewer + - "/internal/service/codegurureviewer" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3829,9 +3951,9 @@ rules: message: Do not use "CodePipeline" in func name inside codepipeline package paths: include: - - internal/service/codepipeline + - "/internal/service/codepipeline" exclude: - - internal/service/codepipeline/list_pages_gen.go + - "/internal/service/codepipeline/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3847,7 +3969,7 @@ rules: message: Include "CodePipeline" in test name paths: include: - - internal/service/codepipeline/*_test.go + - "/internal/service/codepipeline/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3862,7 +3984,7 @@ rules: message: Do not use "CodePipeline" in const name inside codepipeline package paths: include: - - internal/service/codepipeline + - "/internal/service/codepipeline" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3876,7 +3998,7 @@ rules: message: Do not use "CodePipeline" in var name inside codepipeline package paths: include: - - internal/service/codepipeline + - "/internal/service/codepipeline" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3890,9 +4012,9 @@ rules: message: Do not use "CodeStarConnections" in func name inside codestarconnections package paths: include: - - internal/service/codestarconnections + - "/internal/service/codestarconnections" exclude: - - internal/service/codestarconnections/list_pages_gen.go + - "/internal/service/codestarconnections/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3908,7 +4030,7 @@ rules: message: Include "CodeStarConnections" in test name paths: include: - - internal/service/codestarconnections/*_test.go + - "/internal/service/codestarconnections/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3923,7 +4045,7 @@ rules: message: Do not use "CodeStarConnections" in const name inside codestarconnections package paths: include: - - internal/service/codestarconnections + - "/internal/service/codestarconnections" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3937,7 +4059,7 @@ rules: message: Do not use "CodeStarConnections" in var name inside codestarconnections package paths: include: - - internal/service/codestarconnections + - "/internal/service/codestarconnections" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3951,9 +4073,9 @@ rules: message: Do not use "CodeStarNotifications" in func name inside codestarnotifications package paths: include: - - internal/service/codestarnotifications + - "/internal/service/codestarnotifications" exclude: - - internal/service/codestarnotifications/list_pages_gen.go + - "/internal/service/codestarnotifications/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3969,7 +4091,7 @@ rules: message: Include "CodeStarNotifications" in test name paths: include: - - internal/service/codestarnotifications/*_test.go + - "/internal/service/codestarnotifications/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3984,7 +4106,7 @@ rules: message: Do not use "CodeStarNotifications" in const name inside codestarnotifications package paths: include: - - internal/service/codestarnotifications + - "/internal/service/codestarnotifications" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3998,7 +4120,7 @@ rules: message: Do not use "CodeStarNotifications" in var name inside codestarnotifications package paths: include: - - internal/service/codestarnotifications + - "/internal/service/codestarnotifications" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4012,9 +4134,9 @@ rules: message: Do not use "CognitoIdentity" in func name inside cognitoidentity package paths: include: - - internal/service/cognitoidentity + - "/internal/service/cognitoidentity" exclude: - - internal/service/cognitoidentity/list_pages_gen.go + - "/internal/service/cognitoidentity/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4030,7 +4152,7 @@ rules: message: Include "CognitoIdentity" in test name paths: include: - - internal/service/cognitoidentity/*_test.go + - "/internal/service/cognitoidentity/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4045,7 +4167,7 @@ rules: message: Do not use "CognitoIdentity" in const name inside cognitoidentity package paths: include: - - internal/service/cognitoidentity + - "/internal/service/cognitoidentity" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4059,7 +4181,7 @@ rules: message: Do not use "CognitoIdentity" in var name inside cognitoidentity package paths: include: - - internal/service/cognitoidentity + - "/internal/service/cognitoidentity" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4073,9 +4195,9 @@ rules: message: Do not use "cognitoidentityprovider" in func name inside cognitoidp package paths: include: - - internal/service/cognitoidp + - "/internal/service/cognitoidp" exclude: - - internal/service/cognitoidp/list_pages_gen.go + - "/internal/service/cognitoidp/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4091,7 +4213,7 @@ rules: message: Do not use "cognitoidentityprovider" in const name inside cognitoidp package paths: include: - - internal/service/cognitoidp + - "/internal/service/cognitoidp" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4105,7 +4227,7 @@ rules: message: Do not use "cognitoidentityprovider" in var name inside cognitoidp package paths: include: - - internal/service/cognitoidp + - "/internal/service/cognitoidp" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4119,9 +4241,9 @@ rules: message: Do not use "CognitoIDP" in func name inside cognitoidp package paths: include: - - internal/service/cognitoidp + - "/internal/service/cognitoidp" exclude: - - internal/service/cognitoidp/list_pages_gen.go + - "/internal/service/cognitoidp/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4137,7 +4259,7 @@ rules: message: Include "CognitoIDP" in test name paths: include: - - internal/service/cognitoidp/*_test.go + - "/internal/service/cognitoidp/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4152,7 +4274,7 @@ rules: message: Do not use "CognitoIDP" in const name inside cognitoidp package paths: include: - - internal/service/cognitoidp + - "/internal/service/cognitoidp" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4166,7 +4288,7 @@ rules: message: Do not use "CognitoIDP" in var name inside cognitoidp package paths: include: - - internal/service/cognitoidp + - "/internal/service/cognitoidp" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4180,9 +4302,9 @@ rules: message: Do not use "Comprehend" in func name inside comprehend package paths: include: - - internal/service/comprehend + - "/internal/service/comprehend" exclude: - - internal/service/comprehend/list_pages_gen.go + - "/internal/service/comprehend/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4198,7 +4320,7 @@ rules: message: Include "Comprehend" in test name paths: include: - - internal/service/comprehend/*_test.go + - "/internal/service/comprehend/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4213,7 +4335,7 @@ rules: message: Do not use "Comprehend" in const name inside comprehend package paths: include: - - internal/service/comprehend + - "/internal/service/comprehend" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4227,7 +4349,7 @@ rules: message: Do not use "Comprehend" in var name inside comprehend package paths: include: - - internal/service/comprehend + - "/internal/service/comprehend" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4241,9 +4363,9 @@ rules: message: Do not use "ComputeOptimizer" in func name inside computeoptimizer package paths: include: - - internal/service/computeoptimizer + - "/internal/service/computeoptimizer" exclude: - - internal/service/computeoptimizer/list_pages_gen.go + - "/internal/service/computeoptimizer/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4259,7 +4381,7 @@ rules: message: Include "ComputeOptimizer" in test name paths: include: - - internal/service/computeoptimizer/*_test.go + - "/internal/service/computeoptimizer/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4274,7 +4396,7 @@ rules: message: Do not use "ComputeOptimizer" in const name inside computeoptimizer package paths: include: - - internal/service/computeoptimizer + - "/internal/service/computeoptimizer" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4288,7 +4410,7 @@ rules: message: Do not use "ComputeOptimizer" in var name inside computeoptimizer package paths: include: - - internal/service/computeoptimizer + - "/internal/service/computeoptimizer" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4302,9 +4424,9 @@ rules: message: Do not use "ConfigService" in func name inside configservice package paths: include: - - internal/service/configservice + - "/internal/service/configservice" exclude: - - internal/service/configservice/list_pages_gen.go + - "/internal/service/configservice/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4320,7 +4442,7 @@ rules: message: Include "ConfigService" in test name paths: include: - - internal/service/configservice/*_test.go + - "/internal/service/configservice/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4329,50 +4451,3 @@ rules: - pattern-not-regex: "^TestAccConfigService" - pattern-regex: ^TestAcc.* severity: WARNING - - id: configservice-in-const-name - languages: - - go - message: Do not use "ConfigService" in const name inside configservice package - paths: - include: - - internal/service/configservice - patterns: - - pattern: const $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)ConfigService" - severity: WARNING - - id: configservice-in-var-name - languages: - - go - message: Do not use "ConfigService" in var name inside configservice package - paths: - include: - - internal/service/configservice - patterns: - - pattern: var $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)ConfigService" - severity: WARNING - - id: connect-in-func-name - languages: - - go - message: Do not use "Connect" in func name inside connect package - paths: - include: - - internal/service/connect - exclude: - - internal/service/connect/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)Connect" - - pattern-not-regex: .*uickConnect.* - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING diff --git a/.ci/.semgrep-service-name1.yml b/.ci/.semgrep-service-name1.yml index 9667e2aac131..dd4188e2c05f 100644 --- a/.ci/.semgrep-service-name1.yml +++ b/.ci/.semgrep-service-name1.yml @@ -1,12 +1,59 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: + - id: configservice-in-const-name + languages: + - go + message: Do not use "ConfigService" in const name inside configservice package + paths: + include: + - "/internal/service/configservice" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ConfigService" + severity: WARNING + - id: configservice-in-var-name + languages: + - go + message: Do not use "ConfigService" in var name inside configservice package + paths: + include: + - "/internal/service/configservice" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ConfigService" + severity: WARNING + - id: connect-in-func-name + languages: + - go + message: Do not use "Connect" in func name inside connect package + paths: + include: + - "/internal/service/connect" + exclude: + - "/internal/service/connect/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)Connect" + - pattern-not-regex: .*uickConnect.* + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING - id: connect-in-test-name languages: - go message: Include "Connect" in test name paths: include: - - internal/service/connect/*_test.go + - "/internal/service/connect/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -21,7 +68,7 @@ rules: message: Do not use "Connect" in const name inside connect package paths: include: - - internal/service/connect + - "/internal/service/connect" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -36,7 +83,7 @@ rules: message: Do not use "Connect" in var name inside connect package paths: include: - - internal/service/connect + - "/internal/service/connect" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -51,9 +98,9 @@ rules: message: Do not use "ConnectCases" in func name inside connectcases package paths: include: - - internal/service/connectcases + - "/internal/service/connectcases" exclude: - - internal/service/connectcases/list_pages_gen.go + - "/internal/service/connectcases/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -69,7 +116,7 @@ rules: message: Include "ConnectCases" in test name paths: include: - - internal/service/connectcases/*_test.go + - "/internal/service/connectcases/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -84,7 +131,7 @@ rules: message: Do not use "ConnectCases" in const name inside connectcases package paths: include: - - internal/service/connectcases + - "/internal/service/connectcases" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -98,7 +145,7 @@ rules: message: Do not use "ConnectCases" in var name inside connectcases package paths: include: - - internal/service/connectcases + - "/internal/service/connectcases" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -112,9 +159,9 @@ rules: message: Do not use "ControlTower" in func name inside controltower package paths: include: - - internal/service/controltower + - "/internal/service/controltower" exclude: - - internal/service/controltower/list_pages_gen.go + - "/internal/service/controltower/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -130,7 +177,7 @@ rules: message: Include "ControlTower" in test name paths: include: - - internal/service/controltower/*_test.go + - "/internal/service/controltower/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -145,7 +192,7 @@ rules: message: Do not use "ControlTower" in const name inside controltower package paths: include: - - internal/service/controltower + - "/internal/service/controltower" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -159,7 +206,7 @@ rules: message: Do not use "ControlTower" in var name inside controltower package paths: include: - - internal/service/controltower + - "/internal/service/controltower" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -173,9 +220,9 @@ rules: message: Do not use "costandusagereportservice" in func name inside cur package paths: include: - - internal/service/cur + - "/internal/service/cur" exclude: - - internal/service/cur/list_pages_gen.go + - "/internal/service/cur/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -191,7 +238,7 @@ rules: message: Do not use "costandusagereportservice" in const name inside cur package paths: include: - - internal/service/cur + - "/internal/service/cur" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -205,7 +252,7 @@ rules: message: Do not use "costandusagereportservice" in var name inside cur package paths: include: - - internal/service/cur + - "/internal/service/cur" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -219,9 +266,9 @@ rules: message: Do not use "costexplorer" in func name inside ce package paths: include: - - internal/service/ce + - "/internal/service/ce" exclude: - - internal/service/ce/list_pages_gen.go + - "/internal/service/ce/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -237,7 +284,7 @@ rules: message: Do not use "costexplorer" in const name inside ce package paths: include: - - internal/service/ce + - "/internal/service/ce" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -251,7 +298,7 @@ rules: message: Do not use "costexplorer" in var name inside ce package paths: include: - - internal/service/ce + - "/internal/service/ce" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -265,9 +312,9 @@ rules: message: Do not use "CostOptimizationHub" in func name inside costoptimizationhub package paths: include: - - internal/service/costoptimizationhub + - "/internal/service/costoptimizationhub" exclude: - - internal/service/costoptimizationhub/list_pages_gen.go + - "/internal/service/costoptimizationhub/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -283,7 +330,7 @@ rules: message: Include "CostOptimizationHub" in test name paths: include: - - internal/service/costoptimizationhub/*_test.go + - "/internal/service/costoptimizationhub/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -298,7 +345,7 @@ rules: message: Do not use "CostOptimizationHub" in const name inside costoptimizationhub package paths: include: - - internal/service/costoptimizationhub + - "/internal/service/costoptimizationhub" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -312,7 +359,7 @@ rules: message: Do not use "CostOptimizationHub" in var name inside costoptimizationhub package paths: include: - - internal/service/costoptimizationhub + - "/internal/service/costoptimizationhub" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -326,9 +373,9 @@ rules: message: Do not use "CUR" in func name inside cur package paths: include: - - internal/service/cur + - "/internal/service/cur" exclude: - - internal/service/cur/list_pages_gen.go + - "/internal/service/cur/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -344,7 +391,7 @@ rules: message: Include "CUR" in test name paths: include: - - internal/service/cur/*_test.go + - "/internal/service/cur/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -359,7 +406,7 @@ rules: message: Do not use "CUR" in const name inside cur package paths: include: - - internal/service/cur + - "/internal/service/cur" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -373,7 +420,7 @@ rules: message: Do not use "CUR" in var name inside cur package paths: include: - - internal/service/cur + - "/internal/service/cur" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -387,9 +434,9 @@ rules: message: Do not use "CustomerProfiles" in func name inside customerprofiles package paths: include: - - internal/service/customerprofiles + - "/internal/service/customerprofiles" exclude: - - internal/service/customerprofiles/list_pages_gen.go + - "/internal/service/customerprofiles/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -405,7 +452,7 @@ rules: message: Include "CustomerProfiles" in test name paths: include: - - internal/service/customerprofiles/*_test.go + - "/internal/service/customerprofiles/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -420,7 +467,7 @@ rules: message: Do not use "CustomerProfiles" in const name inside customerprofiles package paths: include: - - internal/service/customerprofiles + - "/internal/service/customerprofiles" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -434,7 +481,7 @@ rules: message: Do not use "CustomerProfiles" in var name inside customerprofiles package paths: include: - - internal/service/customerprofiles + - "/internal/service/customerprofiles" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -448,9 +495,9 @@ rules: message: Do not use "databasemigration" in func name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" exclude: - - internal/service/dms/list_pages_gen.go + - "/internal/service/dms/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -466,7 +513,7 @@ rules: message: Do not use "databasemigration" in const name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -480,7 +527,7 @@ rules: message: Do not use "databasemigration" in var name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -494,9 +541,9 @@ rules: message: Do not use "databasemigrationservice" in func name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" exclude: - - internal/service/dms/list_pages_gen.go + - "/internal/service/dms/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -512,7 +559,7 @@ rules: message: Do not use "databasemigrationservice" in const name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -526,7 +573,7 @@ rules: message: Do not use "databasemigrationservice" in var name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -540,9 +587,9 @@ rules: message: Do not use "DataBrew" in func name inside databrew package paths: include: - - internal/service/databrew + - "/internal/service/databrew" exclude: - - internal/service/databrew/list_pages_gen.go + - "/internal/service/databrew/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -558,7 +605,7 @@ rules: message: Include "DataBrew" in test name paths: include: - - internal/service/databrew/*_test.go + - "/internal/service/databrew/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -573,7 +620,7 @@ rules: message: Do not use "DataBrew" in const name inside databrew package paths: include: - - internal/service/databrew + - "/internal/service/databrew" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -587,7 +634,7 @@ rules: message: Do not use "DataBrew" in var name inside databrew package paths: include: - - internal/service/databrew + - "/internal/service/databrew" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -601,9 +648,9 @@ rules: message: Do not use "DataExchange" in func name inside dataexchange package paths: include: - - internal/service/dataexchange + - "/internal/service/dataexchange" exclude: - - internal/service/dataexchange/list_pages_gen.go + - "/internal/service/dataexchange/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -619,7 +666,7 @@ rules: message: Include "DataExchange" in test name paths: include: - - internal/service/dataexchange/*_test.go + - "/internal/service/dataexchange/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -634,7 +681,7 @@ rules: message: Do not use "DataExchange" in const name inside dataexchange package paths: include: - - internal/service/dataexchange + - "/internal/service/dataexchange" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -648,7 +695,7 @@ rules: message: Do not use "DataExchange" in var name inside dataexchange package paths: include: - - internal/service/dataexchange + - "/internal/service/dataexchange" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -662,9 +709,9 @@ rules: message: Do not use "DataPipeline" in func name inside datapipeline package paths: include: - - internal/service/datapipeline + - "/internal/service/datapipeline" exclude: - - internal/service/datapipeline/list_pages_gen.go + - "/internal/service/datapipeline/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -680,7 +727,7 @@ rules: message: Include "DataPipeline" in test name paths: include: - - internal/service/datapipeline/*_test.go + - "/internal/service/datapipeline/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -695,7 +742,7 @@ rules: message: Do not use "DataPipeline" in const name inside datapipeline package paths: include: - - internal/service/datapipeline + - "/internal/service/datapipeline" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -709,7 +756,7 @@ rules: message: Do not use "DataPipeline" in var name inside datapipeline package paths: include: - - internal/service/datapipeline + - "/internal/service/datapipeline" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -723,9 +770,9 @@ rules: message: Do not use "DataSync" in func name inside datasync package paths: include: - - internal/service/datasync + - "/internal/service/datasync" exclude: - - internal/service/datasync/list_pages_gen.go + - "/internal/service/datasync/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -741,7 +788,7 @@ rules: message: Include "DataSync" in test name paths: include: - - internal/service/datasync/*_test.go + - "/internal/service/datasync/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -756,7 +803,7 @@ rules: message: Do not use "DataSync" in const name inside datasync package paths: include: - - internal/service/datasync + - "/internal/service/datasync" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -770,7 +817,7 @@ rules: message: Do not use "DataSync" in var name inside datasync package paths: include: - - internal/service/datasync + - "/internal/service/datasync" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -784,9 +831,9 @@ rules: message: Do not use "DataZone" in func name inside datazone package paths: include: - - internal/service/datazone + - "/internal/service/datazone" exclude: - - internal/service/datazone/list_pages_gen.go + - "/internal/service/datazone/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -802,7 +849,7 @@ rules: message: Include "DataZone" in test name paths: include: - - internal/service/datazone/*_test.go + - "/internal/service/datazone/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -817,7 +864,7 @@ rules: message: Do not use "DataZone" in const name inside datazone package paths: include: - - internal/service/datazone + - "/internal/service/datazone" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -831,7 +878,7 @@ rules: message: Do not use "DataZone" in var name inside datazone package paths: include: - - internal/service/datazone + - "/internal/service/datazone" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -845,9 +892,9 @@ rules: message: Do not use "DAX" in func name inside dax package paths: include: - - internal/service/dax + - "/internal/service/dax" exclude: - - internal/service/dax/list_pages_gen.go + - "/internal/service/dax/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -863,7 +910,7 @@ rules: message: Include "DAX" in test name paths: include: - - internal/service/dax/*_test.go + - "/internal/service/dax/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -878,7 +925,7 @@ rules: message: Do not use "DAX" in const name inside dax package paths: include: - - internal/service/dax + - "/internal/service/dax" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -892,7 +939,7 @@ rules: message: Do not use "DAX" in var name inside dax package paths: include: - - internal/service/dax + - "/internal/service/dax" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -906,9 +953,9 @@ rules: message: Do not use "Deploy" in func name inside deploy package paths: include: - - internal/service/deploy + - "/internal/service/deploy" exclude: - - internal/service/deploy/list_pages_gen.go + - "/internal/service/deploy/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -925,7 +972,7 @@ rules: message: Include "Deploy" in test name paths: include: - - internal/service/deploy/*_test.go + - "/internal/service/deploy/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -940,7 +987,7 @@ rules: message: Do not use "Deploy" in const name inside deploy package paths: include: - - internal/service/deploy + - "/internal/service/deploy" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -954,7 +1001,7 @@ rules: message: Do not use "Deploy" in var name inside deploy package paths: include: - - internal/service/deploy + - "/internal/service/deploy" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -968,9 +1015,9 @@ rules: message: Do not use "Detective" in func name inside detective package paths: include: - - internal/service/detective + - "/internal/service/detective" exclude: - - internal/service/detective/list_pages_gen.go + - "/internal/service/detective/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -986,7 +1033,7 @@ rules: message: Include "Detective" in test name paths: include: - - internal/service/detective/*_test.go + - "/internal/service/detective/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1001,7 +1048,7 @@ rules: message: Do not use "Detective" in const name inside detective package paths: include: - - internal/service/detective + - "/internal/service/detective" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1015,7 +1062,7 @@ rules: message: Do not use "Detective" in var name inside detective package paths: include: - - internal/service/detective + - "/internal/service/detective" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1029,9 +1076,9 @@ rules: message: Do not use "DeviceFarm" in func name inside devicefarm package paths: include: - - internal/service/devicefarm + - "/internal/service/devicefarm" exclude: - - internal/service/devicefarm/list_pages_gen.go + - "/internal/service/devicefarm/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1047,7 +1094,7 @@ rules: message: Include "DeviceFarm" in test name paths: include: - - internal/service/devicefarm/*_test.go + - "/internal/service/devicefarm/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1062,7 +1109,7 @@ rules: message: Do not use "DeviceFarm" in const name inside devicefarm package paths: include: - - internal/service/devicefarm + - "/internal/service/devicefarm" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1076,7 +1123,7 @@ rules: message: Do not use "DeviceFarm" in var name inside devicefarm package paths: include: - - internal/service/devicefarm + - "/internal/service/devicefarm" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1090,9 +1137,9 @@ rules: message: Do not use "DevOpsGuru" in func name inside devopsguru package paths: include: - - internal/service/devopsguru + - "/internal/service/devopsguru" exclude: - - internal/service/devopsguru/list_pages_gen.go + - "/internal/service/devopsguru/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1108,7 +1155,7 @@ rules: message: Include "DevOpsGuru" in test name paths: include: - - internal/service/devopsguru/*_test.go + - "/internal/service/devopsguru/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1123,7 +1170,7 @@ rules: message: Do not use "DevOpsGuru" in const name inside devopsguru package paths: include: - - internal/service/devopsguru + - "/internal/service/devopsguru" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1137,7 +1184,7 @@ rules: message: Do not use "DevOpsGuru" in var name inside devopsguru package paths: include: - - internal/service/devopsguru + - "/internal/service/devopsguru" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1151,9 +1198,9 @@ rules: message: Do not use "DirectConnect" in func name inside directconnect package paths: include: - - internal/service/directconnect + - "/internal/service/directconnect" exclude: - - internal/service/directconnect/list_pages_gen.go + - "/internal/service/directconnect/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1169,7 +1216,7 @@ rules: message: Include "DirectConnect" in test name paths: include: - - internal/service/directconnect/*_test.go + - "/internal/service/directconnect/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1184,7 +1231,7 @@ rules: message: Do not use "DirectConnect" in const name inside directconnect package paths: include: - - internal/service/directconnect + - "/internal/service/directconnect" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1198,7 +1245,7 @@ rules: message: Do not use "DirectConnect" in var name inside directconnect package paths: include: - - internal/service/directconnect + - "/internal/service/directconnect" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1212,9 +1259,9 @@ rules: message: Do not use "directoryservice" in func name inside ds package paths: include: - - internal/service/ds + - "/internal/service/ds" exclude: - - internal/service/ds/list_pages_gen.go + - "/internal/service/ds/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1230,7 +1277,7 @@ rules: message: Do not use "directoryservice" in const name inside ds package paths: include: - - internal/service/ds + - "/internal/service/ds" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1244,7 +1291,7 @@ rules: message: Do not use "directoryservice" in var name inside ds package paths: include: - - internal/service/ds + - "/internal/service/ds" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1258,9 +1305,9 @@ rules: message: Do not use "DLM" in func name inside dlm package paths: include: - - internal/service/dlm + - "/internal/service/dlm" exclude: - - internal/service/dlm/list_pages_gen.go + - "/internal/service/dlm/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1276,7 +1323,7 @@ rules: message: Include "DLM" in test name paths: include: - - internal/service/dlm/*_test.go + - "/internal/service/dlm/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1291,7 +1338,7 @@ rules: message: Do not use "DLM" in const name inside dlm package paths: include: - - internal/service/dlm + - "/internal/service/dlm" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1305,7 +1352,7 @@ rules: message: Do not use "DLM" in var name inside dlm package paths: include: - - internal/service/dlm + - "/internal/service/dlm" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1319,9 +1366,9 @@ rules: message: Do not use "DMS" in func name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" exclude: - - internal/service/dms/list_pages_gen.go + - "/internal/service/dms/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1337,7 +1384,7 @@ rules: message: Include "DMS" in test name paths: include: - - internal/service/dms/*_test.go + - "/internal/service/dms/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1352,7 +1399,7 @@ rules: message: Do not use "DMS" in const name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1366,7 +1413,7 @@ rules: message: Do not use "DMS" in var name inside dms package paths: include: - - internal/service/dms + - "/internal/service/dms" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1380,9 +1427,9 @@ rules: message: Do not use "DocDB" in func name inside docdb package paths: include: - - internal/service/docdb + - "/internal/service/docdb" exclude: - - internal/service/docdb/list_pages_gen.go + - "/internal/service/docdb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1398,7 +1445,7 @@ rules: message: Include "DocDB" in test name paths: include: - - internal/service/docdb/*_test.go + - "/internal/service/docdb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1413,7 +1460,7 @@ rules: message: Do not use "DocDB" in const name inside docdb package paths: include: - - internal/service/docdb + - "/internal/service/docdb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1427,7 +1474,7 @@ rules: message: Do not use "DocDB" in var name inside docdb package paths: include: - - internal/service/docdb + - "/internal/service/docdb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1441,9 +1488,9 @@ rules: message: Do not use "DocDBElastic" in func name inside docdbelastic package paths: include: - - internal/service/docdbelastic + - "/internal/service/docdbelastic" exclude: - - internal/service/docdbelastic/list_pages_gen.go + - "/internal/service/docdbelastic/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1459,7 +1506,7 @@ rules: message: Include "DocDBElastic" in test name paths: include: - - internal/service/docdbelastic/*_test.go + - "/internal/service/docdbelastic/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1474,7 +1521,7 @@ rules: message: Do not use "DocDBElastic" in const name inside docdbelastic package paths: include: - - internal/service/docdbelastic + - "/internal/service/docdbelastic" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1488,7 +1535,7 @@ rules: message: Do not use "DocDBElastic" in var name inside docdbelastic package paths: include: - - internal/service/docdbelastic + - "/internal/service/docdbelastic" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1502,9 +1549,9 @@ rules: message: Do not use "DRS" in func name inside drs package paths: include: - - internal/service/drs + - "/internal/service/drs" exclude: - - internal/service/drs/list_pages_gen.go + - "/internal/service/drs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1520,7 +1567,7 @@ rules: message: Include "DRS" in test name paths: include: - - internal/service/drs/*_test.go + - "/internal/service/drs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1535,7 +1582,7 @@ rules: message: Do not use "DRS" in const name inside drs package paths: include: - - internal/service/drs + - "/internal/service/drs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1549,7 +1596,7 @@ rules: message: Do not use "DRS" in var name inside drs package paths: include: - - internal/service/drs + - "/internal/service/drs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1563,9 +1610,9 @@ rules: message: Do not use "DS" in func name inside ds package paths: include: - - internal/service/ds + - "/internal/service/ds" exclude: - - internal/service/ds/list_pages_gen.go + - "/internal/service/ds/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1581,7 +1628,7 @@ rules: message: Include "DS" in test name paths: include: - - internal/service/ds/*_test.go + - "/internal/service/ds/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1596,7 +1643,7 @@ rules: message: Do not use "DS" in const name inside ds package paths: include: - - internal/service/ds + - "/internal/service/ds" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1610,7 +1657,7 @@ rules: message: Do not use "DS" in var name inside ds package paths: include: - - internal/service/ds + - "/internal/service/ds" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1624,9 +1671,9 @@ rules: message: Do not use "DSQL" in func name inside dsql package paths: include: - - internal/service/dsql + - "/internal/service/dsql" exclude: - - internal/service/dsql/list_pages_gen.go + - "/internal/service/dsql/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1642,7 +1689,7 @@ rules: message: Include "DSQL" in test name paths: include: - - internal/service/dsql/*_test.go + - "/internal/service/dsql/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1657,7 +1704,7 @@ rules: message: Do not use "DSQL" in const name inside dsql package paths: include: - - internal/service/dsql + - "/internal/service/dsql" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1671,7 +1718,7 @@ rules: message: Do not use "DSQL" in var name inside dsql package paths: include: - - internal/service/dsql + - "/internal/service/dsql" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1685,9 +1732,9 @@ rules: message: Do not use "DynamoDB" in func name inside dynamodb package paths: include: - - internal/service/dynamodb + - "/internal/service/dynamodb" exclude: - - internal/service/dynamodb/list_pages_gen.go + - "/internal/service/dynamodb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1703,7 +1750,7 @@ rules: message: Include "DynamoDB" in test name paths: include: - - internal/service/dynamodb/*_test.go + - "/internal/service/dynamodb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1718,7 +1765,7 @@ rules: message: Do not use "DynamoDB" in const name inside dynamodb package paths: include: - - internal/service/dynamodb + - "/internal/service/dynamodb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1732,7 +1779,7 @@ rules: message: Do not use "DynamoDB" in var name inside dynamodb package paths: include: - - internal/service/dynamodb + - "/internal/service/dynamodb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1746,7 +1793,7 @@ rules: message: Include "EC2" in test name paths: include: - - internal/service/ec2/ec2_*_test.go + - "/internal/service/ec2/ec2_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1761,7 +1808,7 @@ rules: message: Include "EC2EBS" in test name paths: include: - - internal/service/ec2/ebs_*_test.go + - "/internal/service/ec2/ebs_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1776,7 +1823,7 @@ rules: message: Include "EC2Outposts" in test name paths: include: - - internal/service/ec2/outposts_*_test.go + - "/internal/service/ec2/outposts_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1791,9 +1838,9 @@ rules: message: Do not use "ECR" in func name inside ecr package paths: include: - - internal/service/ecr + - "/internal/service/ecr" exclude: - - internal/service/ecr/list_pages_gen.go + - "/internal/service/ecr/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1810,7 +1857,7 @@ rules: message: Include "ECR" in test name paths: include: - - internal/service/ecr/*_test.go + - "/internal/service/ecr/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1825,7 +1872,7 @@ rules: message: Do not use "ECR" in const name inside ecr package paths: include: - - internal/service/ecr + - "/internal/service/ecr" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1839,7 +1886,7 @@ rules: message: Do not use "ECR" in var name inside ecr package paths: include: - - internal/service/ecr + - "/internal/service/ecr" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1853,9 +1900,9 @@ rules: message: Do not use "ECRPublic" in func name inside ecrpublic package paths: include: - - internal/service/ecrpublic + - "/internal/service/ecrpublic" exclude: - - internal/service/ecrpublic/list_pages_gen.go + - "/internal/service/ecrpublic/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1871,7 +1918,7 @@ rules: message: Include "ECRPublic" in test name paths: include: - - internal/service/ecrpublic/*_test.go + - "/internal/service/ecrpublic/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1886,7 +1933,7 @@ rules: message: Do not use "ECRPublic" in const name inside ecrpublic package paths: include: - - internal/service/ecrpublic + - "/internal/service/ecrpublic" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1900,7 +1947,7 @@ rules: message: Do not use "ECRPublic" in var name inside ecrpublic package paths: include: - - internal/service/ecrpublic + - "/internal/service/ecrpublic" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1914,9 +1961,9 @@ rules: message: Do not use "ECS" in func name inside ecs package paths: include: - - internal/service/ecs + - "/internal/service/ecs" exclude: - - internal/service/ecs/list_pages_gen.go + - "/internal/service/ecs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1932,7 +1979,7 @@ rules: message: Include "ECS" in test name paths: include: - - internal/service/ecs/*_test.go + - "/internal/service/ecs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1947,7 +1994,7 @@ rules: message: Do not use "ECS" in const name inside ecs package paths: include: - - internal/service/ecs + - "/internal/service/ecs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1961,7 +2008,7 @@ rules: message: Do not use "ECS" in var name inside ecs package paths: include: - - internal/service/ecs + - "/internal/service/ecs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1975,9 +2022,9 @@ rules: message: Do not use "EFS" in func name inside efs package paths: include: - - internal/service/efs + - "/internal/service/efs" exclude: - - internal/service/efs/list_pages_gen.go + - "/internal/service/efs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1993,7 +2040,7 @@ rules: message: Include "EFS" in test name paths: include: - - internal/service/efs/*_test.go + - "/internal/service/efs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2008,7 +2055,7 @@ rules: message: Do not use "EFS" in const name inside efs package paths: include: - - internal/service/efs + - "/internal/service/efs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2022,7 +2069,7 @@ rules: message: Do not use "EFS" in var name inside efs package paths: include: - - internal/service/efs + - "/internal/service/efs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2036,9 +2083,9 @@ rules: message: Do not use "EKS" in func name inside eks package paths: include: - - internal/service/eks + - "/internal/service/eks" exclude: - - internal/service/eks/list_pages_gen.go + - "/internal/service/eks/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2054,7 +2101,7 @@ rules: message: Include "EKS" in test name paths: include: - - internal/service/eks/*_test.go + - "/internal/service/eks/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2069,7 +2116,7 @@ rules: message: Do not use "EKS" in const name inside eks package paths: include: - - internal/service/eks + - "/internal/service/eks" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2083,7 +2130,7 @@ rules: message: Do not use "EKS" in var name inside eks package paths: include: - - internal/service/eks + - "/internal/service/eks" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2097,9 +2144,9 @@ rules: message: Do not use "ElastiCache" in func name inside elasticache package paths: include: - - internal/service/elasticache + - "/internal/service/elasticache" exclude: - - internal/service/elasticache/list_pages_gen.go + - "/internal/service/elasticache/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2115,7 +2162,7 @@ rules: message: Include "ElastiCache" in test name paths: include: - - internal/service/elasticache/*_test.go + - "/internal/service/elasticache/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2130,7 +2177,7 @@ rules: message: Do not use "ElastiCache" in const name inside elasticache package paths: include: - - internal/service/elasticache + - "/internal/service/elasticache" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2144,7 +2191,7 @@ rules: message: Do not use "ElastiCache" in var name inside elasticache package paths: include: - - internal/service/elasticache + - "/internal/service/elasticache" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2158,9 +2205,9 @@ rules: message: Do not use "ElasticBeanstalk" in func name inside elasticbeanstalk package paths: include: - - internal/service/elasticbeanstalk + - "/internal/service/elasticbeanstalk" exclude: - - internal/service/elasticbeanstalk/list_pages_gen.go + - "/internal/service/elasticbeanstalk/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2176,7 +2223,7 @@ rules: message: Include "ElasticBeanstalk" in test name paths: include: - - internal/service/elasticbeanstalk/*_test.go + - "/internal/service/elasticbeanstalk/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2191,7 +2238,7 @@ rules: message: Do not use "ElasticBeanstalk" in const name inside elasticbeanstalk package paths: include: - - internal/service/elasticbeanstalk + - "/internal/service/elasticbeanstalk" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2205,7 +2252,7 @@ rules: message: Do not use "ElasticBeanstalk" in var name inside elasticbeanstalk package paths: include: - - internal/service/elasticbeanstalk + - "/internal/service/elasticbeanstalk" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2219,9 +2266,9 @@ rules: message: Do not use "elasticloadbalancing" in func name inside elb package paths: include: - - internal/service/elb + - "/internal/service/elb" exclude: - - internal/service/elb/list_pages_gen.go + - "/internal/service/elb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2237,7 +2284,7 @@ rules: message: Do not use "elasticloadbalancing" in const name inside elb package paths: include: - - internal/service/elb + - "/internal/service/elb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2251,7 +2298,7 @@ rules: message: Do not use "elasticloadbalancing" in var name inside elb package paths: include: - - internal/service/elb + - "/internal/service/elb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2265,9 +2312,9 @@ rules: message: Do not use "elasticloadbalancingv2" in func name inside elbv2 package paths: include: - - internal/service/elbv2 + - "/internal/service/elbv2" exclude: - - internal/service/elbv2/list_pages_gen.go + - "/internal/service/elbv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2283,7 +2330,7 @@ rules: message: Do not use "elasticloadbalancingv2" in const name inside elbv2 package paths: include: - - internal/service/elbv2 + - "/internal/service/elbv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2297,7 +2344,7 @@ rules: message: Do not use "elasticloadbalancingv2" in var name inside elbv2 package paths: include: - - internal/service/elbv2 + - "/internal/service/elbv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2311,9 +2358,9 @@ rules: message: Do not use "Elasticsearch" in func name inside elasticsearch package paths: include: - - internal/service/elasticsearch + - "/internal/service/elasticsearch" exclude: - - internal/service/elasticsearch/list_pages_gen.go + - "/internal/service/elasticsearch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2329,7 +2376,7 @@ rules: message: Include "Elasticsearch" in test name paths: include: - - internal/service/elasticsearch/*_test.go + - "/internal/service/elasticsearch/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2344,7 +2391,7 @@ rules: message: Do not use "Elasticsearch" in const name inside elasticsearch package paths: include: - - internal/service/elasticsearch + - "/internal/service/elasticsearch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2358,7 +2405,7 @@ rules: message: Do not use "Elasticsearch" in var name inside elasticsearch package paths: include: - - internal/service/elasticsearch + - "/internal/service/elasticsearch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2372,9 +2419,9 @@ rules: message: Do not use "elasticsearchservice" in func name inside elasticsearch package paths: include: - - internal/service/elasticsearch + - "/internal/service/elasticsearch" exclude: - - internal/service/elasticsearch/list_pages_gen.go + - "/internal/service/elasticsearch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2390,7 +2437,7 @@ rules: message: Do not use "elasticsearchservice" in const name inside elasticsearch package paths: include: - - internal/service/elasticsearch + - "/internal/service/elasticsearch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2404,7 +2451,7 @@ rules: message: Do not use "elasticsearchservice" in var name inside elasticsearch package paths: include: - - internal/service/elasticsearch + - "/internal/service/elasticsearch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2418,9 +2465,9 @@ rules: message: Do not use "ElasticTranscoder" in func name inside elastictranscoder package paths: include: - - internal/service/elastictranscoder + - "/internal/service/elastictranscoder" exclude: - - internal/service/elastictranscoder/list_pages_gen.go + - "/internal/service/elastictranscoder/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2436,7 +2483,7 @@ rules: message: Include "ElasticTranscoder" in test name paths: include: - - internal/service/elastictranscoder/*_test.go + - "/internal/service/elastictranscoder/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2451,7 +2498,7 @@ rules: message: Do not use "ElasticTranscoder" in const name inside elastictranscoder package paths: include: - - internal/service/elastictranscoder + - "/internal/service/elastictranscoder" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2465,7 +2512,7 @@ rules: message: Do not use "ElasticTranscoder" in var name inside elastictranscoder package paths: include: - - internal/service/elastictranscoder + - "/internal/service/elastictranscoder" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2479,9 +2526,9 @@ rules: message: Do not use "ELB" in func name inside elb package paths: include: - - internal/service/elb + - "/internal/service/elb" exclude: - - internal/service/elb/list_pages_gen.go + - "/internal/service/elb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2497,7 +2544,7 @@ rules: message: Include "ELB" in test name paths: include: - - internal/service/elb/*_test.go + - "/internal/service/elb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2512,7 +2559,7 @@ rules: message: Do not use "ELB" in const name inside elb package paths: include: - - internal/service/elb + - "/internal/service/elb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2526,7 +2573,7 @@ rules: message: Do not use "ELB" in var name inside elb package paths: include: - - internal/service/elb + - "/internal/service/elb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2540,9 +2587,9 @@ rules: message: Do not use "ELBV2" in func name inside elbv2 package paths: include: - - internal/service/elbv2 + - "/internal/service/elbv2" exclude: - - internal/service/elbv2/list_pages_gen.go + - "/internal/service/elbv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2558,7 +2605,7 @@ rules: message: Include "ELBV2" in test name paths: include: - - internal/service/elbv2/*_test.go + - "/internal/service/elbv2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2573,7 +2620,7 @@ rules: message: Do not use "ELBV2" in const name inside elbv2 package paths: include: - - internal/service/elbv2 + - "/internal/service/elbv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2587,7 +2634,7 @@ rules: message: Do not use "ELBV2" in var name inside elbv2 package paths: include: - - internal/service/elbv2 + - "/internal/service/elbv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2601,9 +2648,9 @@ rules: message: Do not use "EMR" in func name inside emr package paths: include: - - internal/service/emr + - "/internal/service/emr" exclude: - - internal/service/emr/list_pages_gen.go + - "/internal/service/emr/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2619,7 +2666,7 @@ rules: message: Include "EMR" in test name paths: include: - - internal/service/emr/*_test.go + - "/internal/service/emr/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2634,7 +2681,7 @@ rules: message: Do not use "EMR" in const name inside emr package paths: include: - - internal/service/emr + - "/internal/service/emr" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2648,7 +2695,7 @@ rules: message: Do not use "EMR" in var name inside emr package paths: include: - - internal/service/emr + - "/internal/service/emr" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2662,9 +2709,9 @@ rules: message: Do not use "EMRContainers" in func name inside emrcontainers package paths: include: - - internal/service/emrcontainers + - "/internal/service/emrcontainers" exclude: - - internal/service/emrcontainers/list_pages_gen.go + - "/internal/service/emrcontainers/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2680,7 +2727,7 @@ rules: message: Include "EMRContainers" in test name paths: include: - - internal/service/emrcontainers/*_test.go + - "/internal/service/emrcontainers/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2695,7 +2742,7 @@ rules: message: Do not use "EMRContainers" in const name inside emrcontainers package paths: include: - - internal/service/emrcontainers + - "/internal/service/emrcontainers" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2709,7 +2756,7 @@ rules: message: Do not use "EMRContainers" in var name inside emrcontainers package paths: include: - - internal/service/emrcontainers + - "/internal/service/emrcontainers" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2723,9 +2770,9 @@ rules: message: Do not use "EMRServerless" in func name inside emrserverless package paths: include: - - internal/service/emrserverless + - "/internal/service/emrserverless" exclude: - - internal/service/emrserverless/list_pages_gen.go + - "/internal/service/emrserverless/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2741,7 +2788,7 @@ rules: message: Include "EMRServerless" in test name paths: include: - - internal/service/emrserverless/*_test.go + - "/internal/service/emrserverless/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2756,7 +2803,7 @@ rules: message: Do not use "EMRServerless" in const name inside emrserverless package paths: include: - - internal/service/emrserverless + - "/internal/service/emrserverless" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2770,7 +2817,7 @@ rules: message: Do not use "EMRServerless" in var name inside emrserverless package paths: include: - - internal/service/emrserverless + - "/internal/service/emrserverless" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2784,9 +2831,9 @@ rules: message: Do not use "eventbridge" in func name inside events package paths: include: - - internal/service/events + - "/internal/service/events" exclude: - - internal/service/events/list_pages_gen.go + - "/internal/service/events/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2802,7 +2849,7 @@ rules: message: Do not use "eventbridge" in const name inside events package paths: include: - - internal/service/events + - "/internal/service/events" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2816,7 +2863,7 @@ rules: message: Do not use "eventbridge" in var name inside events package paths: include: - - internal/service/events + - "/internal/service/events" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2830,9 +2877,9 @@ rules: message: Do not use "Events" in func name inside events package paths: include: - - internal/service/events + - "/internal/service/events" exclude: - - internal/service/events/list_pages_gen.go + - "/internal/service/events/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2848,7 +2895,7 @@ rules: message: Include "Events" in test name paths: include: - - internal/service/events/*_test.go + - "/internal/service/events/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2863,7 +2910,7 @@ rules: message: Do not use "Events" in const name inside events package paths: include: - - internal/service/events + - "/internal/service/events" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2877,7 +2924,7 @@ rules: message: Do not use "Events" in var name inside events package paths: include: - - internal/service/events + - "/internal/service/events" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2891,9 +2938,9 @@ rules: message: Do not use "Evidently" in func name inside evidently package paths: include: - - internal/service/evidently + - "/internal/service/evidently" exclude: - - internal/service/evidently/list_pages_gen.go + - "/internal/service/evidently/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2909,7 +2956,7 @@ rules: message: Include "Evidently" in test name paths: include: - - internal/service/evidently/*_test.go + - "/internal/service/evidently/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2924,7 +2971,7 @@ rules: message: Do not use "Evidently" in const name inside evidently package paths: include: - - internal/service/evidently + - "/internal/service/evidently" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2938,7 +2985,7 @@ rules: message: Do not use "Evidently" in var name inside evidently package paths: include: - - internal/service/evidently + - "/internal/service/evidently" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2952,9 +2999,9 @@ rules: message: Do not use "EVS" in func name inside evs package paths: include: - - internal/service/evs + - "/internal/service/evs" exclude: - - internal/service/evs/list_pages_gen.go + - "/internal/service/evs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2970,7 +3017,7 @@ rules: message: Include "EVS" in test name paths: include: - - internal/service/evs/*_test.go + - "/internal/service/evs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2985,7 +3032,7 @@ rules: message: Do not use "EVS" in const name inside evs package paths: include: - - internal/service/evs + - "/internal/service/evs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2999,7 +3046,7 @@ rules: message: Do not use "EVS" in var name inside evs package paths: include: - - internal/service/evs + - "/internal/service/evs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3013,9 +3060,9 @@ rules: message: Do not use "FinSpace" in func name inside finspace package paths: include: - - internal/service/finspace + - "/internal/service/finspace" exclude: - - internal/service/finspace/list_pages_gen.go + - "/internal/service/finspace/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3031,7 +3078,7 @@ rules: message: Include "FinSpace" in test name paths: include: - - internal/service/finspace/*_test.go + - "/internal/service/finspace/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3046,7 +3093,7 @@ rules: message: Do not use "FinSpace" in const name inside finspace package paths: include: - - internal/service/finspace + - "/internal/service/finspace" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3060,7 +3107,7 @@ rules: message: Do not use "FinSpace" in var name inside finspace package paths: include: - - internal/service/finspace + - "/internal/service/finspace" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3074,9 +3121,9 @@ rules: message: Do not use "Firehose" in func name inside firehose package paths: include: - - internal/service/firehose + - "/internal/service/firehose" exclude: - - internal/service/firehose/list_pages_gen.go + - "/internal/service/firehose/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3092,7 +3139,7 @@ rules: message: Include "Firehose" in test name paths: include: - - internal/service/firehose/*_test.go + - "/internal/service/firehose/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3107,7 +3154,7 @@ rules: message: Do not use "Firehose" in const name inside firehose package paths: include: - - internal/service/firehose + - "/internal/service/firehose" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3121,7 +3168,7 @@ rules: message: Do not use "Firehose" in var name inside firehose package paths: include: - - internal/service/firehose + - "/internal/service/firehose" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3135,9 +3182,9 @@ rules: message: Do not use "FIS" in func name inside fis package paths: include: - - internal/service/fis + - "/internal/service/fis" exclude: - - internal/service/fis/list_pages_gen.go + - "/internal/service/fis/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3153,7 +3200,7 @@ rules: message: Include "FIS" in test name paths: include: - - internal/service/fis/*_test.go + - "/internal/service/fis/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3168,7 +3215,7 @@ rules: message: Do not use "FIS" in const name inside fis package paths: include: - - internal/service/fis + - "/internal/service/fis" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3182,7 +3229,7 @@ rules: message: Do not use "FIS" in var name inside fis package paths: include: - - internal/service/fis + - "/internal/service/fis" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3196,9 +3243,9 @@ rules: message: Do not use "FMS" in func name inside fms package paths: include: - - internal/service/fms + - "/internal/service/fms" exclude: - - internal/service/fms/list_pages_gen.go + - "/internal/service/fms/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3214,7 +3261,7 @@ rules: message: Include "FMS" in test name paths: include: - - internal/service/fms/*_test.go + - "/internal/service/fms/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3229,7 +3276,7 @@ rules: message: Do not use "FMS" in const name inside fms package paths: include: - - internal/service/fms + - "/internal/service/fms" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3243,7 +3290,7 @@ rules: message: Do not use "FMS" in var name inside fms package paths: include: - - internal/service/fms + - "/internal/service/fms" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3257,9 +3304,9 @@ rules: message: Do not use "FSx" in func name inside fsx package paths: include: - - internal/service/fsx + - "/internal/service/fsx" exclude: - - internal/service/fsx/list_pages_gen.go + - "/internal/service/fsx/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3275,7 +3322,7 @@ rules: message: Include "FSx" in test name paths: include: - - internal/service/fsx/*_test.go + - "/internal/service/fsx/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3290,7 +3337,7 @@ rules: message: Do not use "FSx" in const name inside fsx package paths: include: - - internal/service/fsx + - "/internal/service/fsx" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3304,7 +3351,7 @@ rules: message: Do not use "FSx" in var name inside fsx package paths: include: - - internal/service/fsx + - "/internal/service/fsx" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3318,9 +3365,9 @@ rules: message: Do not use "GameLift" in func name inside gamelift package paths: include: - - internal/service/gamelift + - "/internal/service/gamelift" exclude: - - internal/service/gamelift/list_pages_gen.go + - "/internal/service/gamelift/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3336,7 +3383,7 @@ rules: message: Include "GameLift" in test name paths: include: - - internal/service/gamelift/*_test.go + - "/internal/service/gamelift/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3351,7 +3398,7 @@ rules: message: Do not use "GameLift" in const name inside gamelift package paths: include: - - internal/service/gamelift + - "/internal/service/gamelift" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3365,7 +3412,7 @@ rules: message: Do not use "GameLift" in var name inside gamelift package paths: include: - - internal/service/gamelift + - "/internal/service/gamelift" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3379,9 +3426,9 @@ rules: message: Do not use "Glacier" in func name inside glacier package paths: include: - - internal/service/glacier + - "/internal/service/glacier" exclude: - - internal/service/glacier/list_pages_gen.go + - "/internal/service/glacier/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3397,7 +3444,7 @@ rules: message: Include "Glacier" in test name paths: include: - - internal/service/glacier/*_test.go + - "/internal/service/glacier/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3412,7 +3459,7 @@ rules: message: Do not use "Glacier" in const name inside glacier package paths: include: - - internal/service/glacier + - "/internal/service/glacier" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3426,7 +3473,7 @@ rules: message: Do not use "Glacier" in var name inside glacier package paths: include: - - internal/service/glacier + - "/internal/service/glacier" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3440,9 +3487,9 @@ rules: message: Do not use "GlobalAccelerator" in func name inside globalaccelerator package paths: include: - - internal/service/globalaccelerator + - "/internal/service/globalaccelerator" exclude: - - internal/service/globalaccelerator/list_pages_gen.go + - "/internal/service/globalaccelerator/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3458,7 +3505,7 @@ rules: message: Include "GlobalAccelerator" in test name paths: include: - - internal/service/globalaccelerator/*_test.go + - "/internal/service/globalaccelerator/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3473,7 +3520,7 @@ rules: message: Do not use "GlobalAccelerator" in const name inside globalaccelerator package paths: include: - - internal/service/globalaccelerator + - "/internal/service/globalaccelerator" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3487,7 +3534,7 @@ rules: message: Do not use "GlobalAccelerator" in var name inside globalaccelerator package paths: include: - - internal/service/globalaccelerator + - "/internal/service/globalaccelerator" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3501,9 +3548,9 @@ rules: message: Do not use "Glue" in func name inside glue package paths: include: - - internal/service/glue + - "/internal/service/glue" exclude: - - internal/service/glue/list_pages_gen.go + - "/internal/service/glue/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3519,7 +3566,7 @@ rules: message: Include "Glue" in test name paths: include: - - internal/service/glue/*_test.go + - "/internal/service/glue/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3534,7 +3581,7 @@ rules: message: Do not use "Glue" in const name inside glue package paths: include: - - internal/service/glue + - "/internal/service/glue" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3548,7 +3595,7 @@ rules: message: Do not use "Glue" in var name inside glue package paths: include: - - internal/service/glue + - "/internal/service/glue" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3562,9 +3609,9 @@ rules: message: Do not use "gluedatabrew" in func name inside databrew package paths: include: - - internal/service/databrew + - "/internal/service/databrew" exclude: - - internal/service/databrew/list_pages_gen.go + - "/internal/service/databrew/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3580,7 +3627,7 @@ rules: message: Do not use "gluedatabrew" in const name inside databrew package paths: include: - - internal/service/databrew + - "/internal/service/databrew" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3594,7 +3641,7 @@ rules: message: Do not use "gluedatabrew" in var name inside databrew package paths: include: - - internal/service/databrew + - "/internal/service/databrew" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3608,9 +3655,9 @@ rules: message: Do not use "Grafana" in func name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" exclude: - - internal/service/grafana/list_pages_gen.go + - "/internal/service/grafana/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3626,7 +3673,7 @@ rules: message: Include "Grafana" in test name paths: include: - - internal/service/grafana/*_test.go + - "/internal/service/grafana/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3641,7 +3688,7 @@ rules: message: Do not use "Grafana" in const name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3655,7 +3702,7 @@ rules: message: Do not use "Grafana" in var name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3669,9 +3716,9 @@ rules: message: Do not use "Greengrass" in func name inside greengrass package paths: include: - - internal/service/greengrass + - "/internal/service/greengrass" exclude: - - internal/service/greengrass/list_pages_gen.go + - "/internal/service/greengrass/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3687,7 +3734,7 @@ rules: message: Include "Greengrass" in test name paths: include: - - internal/service/greengrass/*_test.go + - "/internal/service/greengrass/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3702,7 +3749,7 @@ rules: message: Do not use "Greengrass" in const name inside greengrass package paths: include: - - internal/service/greengrass + - "/internal/service/greengrass" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3716,7 +3763,7 @@ rules: message: Do not use "Greengrass" in var name inside greengrass package paths: include: - - internal/service/greengrass + - "/internal/service/greengrass" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3730,9 +3777,9 @@ rules: message: Do not use "GroundStation" in func name inside groundstation package paths: include: - - internal/service/groundstation + - "/internal/service/groundstation" exclude: - - internal/service/groundstation/list_pages_gen.go + - "/internal/service/groundstation/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3748,7 +3795,7 @@ rules: message: Include "GroundStation" in test name paths: include: - - internal/service/groundstation/*_test.go + - "/internal/service/groundstation/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3763,7 +3810,7 @@ rules: message: Do not use "GroundStation" in const name inside groundstation package paths: include: - - internal/service/groundstation + - "/internal/service/groundstation" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3777,7 +3824,7 @@ rules: message: Do not use "GroundStation" in var name inside groundstation package paths: include: - - internal/service/groundstation + - "/internal/service/groundstation" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3791,9 +3838,9 @@ rules: message: Do not use "GuardDuty" in func name inside guardduty package paths: include: - - internal/service/guardduty + - "/internal/service/guardduty" exclude: - - internal/service/guardduty/list_pages_gen.go + - "/internal/service/guardduty/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3809,7 +3856,7 @@ rules: message: Include "GuardDuty" in test name paths: include: - - internal/service/guardduty/*_test.go + - "/internal/service/guardduty/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3824,7 +3871,7 @@ rules: message: Do not use "GuardDuty" in const name inside guardduty package paths: include: - - internal/service/guardduty + - "/internal/service/guardduty" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3838,7 +3885,7 @@ rules: message: Do not use "GuardDuty" in var name inside guardduty package paths: include: - - internal/service/guardduty + - "/internal/service/guardduty" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3852,9 +3899,9 @@ rules: message: Do not use "HealthLake" in func name inside healthlake package paths: include: - - internal/service/healthlake + - "/internal/service/healthlake" exclude: - - internal/service/healthlake/list_pages_gen.go + - "/internal/service/healthlake/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3870,7 +3917,7 @@ rules: message: Include "HealthLake" in test name paths: include: - - internal/service/healthlake/*_test.go + - "/internal/service/healthlake/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3885,7 +3932,7 @@ rules: message: Do not use "HealthLake" in const name inside healthlake package paths: include: - - internal/service/healthlake + - "/internal/service/healthlake" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3899,7 +3946,7 @@ rules: message: Do not use "HealthLake" in var name inside healthlake package paths: include: - - internal/service/healthlake + - "/internal/service/healthlake" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3913,9 +3960,9 @@ rules: message: Do not use "IAM" in func name inside iam package paths: include: - - internal/service/iam + - "/internal/service/iam" exclude: - - internal/service/iam/list_pages_gen.go + - "/internal/service/iam/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3931,7 +3978,7 @@ rules: message: Include "IAM" in test name paths: include: - - internal/service/iam/*_test.go + - "/internal/service/iam/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3946,7 +3993,7 @@ rules: message: Do not use "IAM" in const name inside iam package paths: include: - - internal/service/iam + - "/internal/service/iam" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3960,7 +4007,7 @@ rules: message: Do not use "IAM" in var name inside iam package paths: include: - - internal/service/iam + - "/internal/service/iam" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3974,9 +4021,9 @@ rules: message: Do not use "IdentityStore" in func name inside identitystore package paths: include: - - internal/service/identitystore + - "/internal/service/identitystore" exclude: - - internal/service/identitystore/list_pages_gen.go + - "/internal/service/identitystore/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3992,7 +4039,7 @@ rules: message: Include "IdentityStore" in test name paths: include: - - internal/service/identitystore/*_test.go + - "/internal/service/identitystore/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4007,7 +4054,7 @@ rules: message: Do not use "IdentityStore" in const name inside identitystore package paths: include: - - internal/service/identitystore + - "/internal/service/identitystore" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4021,7 +4068,7 @@ rules: message: Do not use "IdentityStore" in var name inside identitystore package paths: include: - - internal/service/identitystore + - "/internal/service/identitystore" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4035,9 +4082,9 @@ rules: message: Do not use "ImageBuilder" in func name inside imagebuilder package paths: include: - - internal/service/imagebuilder + - "/internal/service/imagebuilder" exclude: - - internal/service/imagebuilder/list_pages_gen.go + - "/internal/service/imagebuilder/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4053,7 +4100,7 @@ rules: message: Include "ImageBuilder" in test name paths: include: - - internal/service/imagebuilder/*_test.go + - "/internal/service/imagebuilder/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4068,7 +4115,7 @@ rules: message: Do not use "ImageBuilder" in const name inside imagebuilder package paths: include: - - internal/service/imagebuilder + - "/internal/service/imagebuilder" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4082,7 +4129,7 @@ rules: message: Do not use "ImageBuilder" in var name inside imagebuilder package paths: include: - - internal/service/imagebuilder + - "/internal/service/imagebuilder" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4096,9 +4143,9 @@ rules: message: Do not use "Inspector" in func name inside inspector package paths: include: - - internal/service/inspector + - "/internal/service/inspector" exclude: - - internal/service/inspector/list_pages_gen.go + - "/internal/service/inspector/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4114,7 +4161,7 @@ rules: message: Include "Inspector" in test name paths: include: - - internal/service/inspector/*_test.go + - "/internal/service/inspector/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4129,7 +4176,7 @@ rules: message: Do not use "Inspector" in const name inside inspector package paths: include: - - internal/service/inspector + - "/internal/service/inspector" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4143,7 +4190,7 @@ rules: message: Do not use "Inspector" in var name inside inspector package paths: include: - - internal/service/inspector + - "/internal/service/inspector" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4157,9 +4204,9 @@ rules: message: Do not use "Inspector2" in func name inside inspector2 package paths: include: - - internal/service/inspector2 + - "/internal/service/inspector2" exclude: - - internal/service/inspector2/list_pages_gen.go + - "/internal/service/inspector2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4175,7 +4222,7 @@ rules: message: Include "Inspector2" in test name paths: include: - - internal/service/inspector2/*_test.go + - "/internal/service/inspector2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4190,7 +4237,7 @@ rules: message: Do not use "Inspector2" in const name inside inspector2 package paths: include: - - internal/service/inspector2 + - "/internal/service/inspector2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4204,7 +4251,7 @@ rules: message: Do not use "Inspector2" in var name inside inspector2 package paths: include: - - internal/service/inspector2 + - "/internal/service/inspector2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4218,9 +4265,9 @@ rules: message: Do not use "inspectorv2" in func name inside inspector2 package paths: include: - - internal/service/inspector2 + - "/internal/service/inspector2" exclude: - - internal/service/inspector2/list_pages_gen.go + - "/internal/service/inspector2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4236,7 +4283,7 @@ rules: message: Do not use "inspectorv2" in const name inside inspector2 package paths: include: - - internal/service/inspector2 + - "/internal/service/inspector2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4250,7 +4297,7 @@ rules: message: Do not use "inspectorv2" in var name inside inspector2 package paths: include: - - internal/service/inspector2 + - "/internal/service/inspector2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4264,9 +4311,9 @@ rules: message: Do not use "InternetMonitor" in func name inside internetmonitor package paths: include: - - internal/service/internetmonitor + - "/internal/service/internetmonitor" exclude: - - internal/service/internetmonitor/list_pages_gen.go + - "/internal/service/internetmonitor/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4282,7 +4329,7 @@ rules: message: Include "InternetMonitor" in test name paths: include: - - internal/service/internetmonitor/*_test.go + - "/internal/service/internetmonitor/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4297,7 +4344,7 @@ rules: message: Do not use "InternetMonitor" in const name inside internetmonitor package paths: include: - - internal/service/internetmonitor + - "/internal/service/internetmonitor" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4311,7 +4358,7 @@ rules: message: Do not use "InternetMonitor" in var name inside internetmonitor package paths: include: - - internal/service/internetmonitor + - "/internal/service/internetmonitor" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4325,9 +4372,9 @@ rules: message: Do not use "Invoicing" in func name inside invoicing package paths: include: - - internal/service/invoicing + - "/internal/service/invoicing" exclude: - - internal/service/invoicing/list_pages_gen.go + - "/internal/service/invoicing/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4343,7 +4390,7 @@ rules: message: Include "Invoicing" in test name paths: include: - - internal/service/invoicing/*_test.go + - "/internal/service/invoicing/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4358,7 +4405,7 @@ rules: message: Do not use "Invoicing" in const name inside invoicing package paths: include: - - internal/service/invoicing + - "/internal/service/invoicing" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4372,7 +4419,7 @@ rules: message: Do not use "Invoicing" in var name inside invoicing package paths: include: - - internal/service/invoicing + - "/internal/service/invoicing" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4380,3 +4427,21 @@ rules: patterns: - pattern-regex: "(?i)Invoicing" severity: WARNING + - id: iot-in-func-name + languages: + - go + message: Do not use "IoT" in func name inside iot package + paths: + include: + - "/internal/service/iot" + exclude: + - "/internal/service/iot/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)IoT" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING diff --git a/.ci/.semgrep-service-name2.yml b/.ci/.semgrep-service-name2.yml index 7b145a586f94..5819dadca7e8 100644 --- a/.ci/.semgrep-service-name2.yml +++ b/.ci/.semgrep-service-name2.yml @@ -1,30 +1,12 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: - - id: iot-in-func-name - languages: - - go - message: Do not use "IoT" in func name inside iot package - paths: - include: - - internal/service/iot - exclude: - - internal/service/iot/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)IoT" - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING - id: iot-in-test-name languages: - go message: Include "IoT" in test name paths: include: - - internal/service/iot/*_test.go + - "/internal/service/iot/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -39,7 +21,7 @@ rules: message: Do not use "IoT" in const name inside iot package paths: include: - - internal/service/iot + - "/internal/service/iot" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -53,7 +35,7 @@ rules: message: Do not use "IoT" in var name inside iot package paths: include: - - internal/service/iot + - "/internal/service/iot" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -67,7 +49,7 @@ rules: message: Include "IPAM" in test name paths: include: - - internal/service/ec2/ipam_*_test.go + - "/internal/service/ec2/ipam_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -82,9 +64,9 @@ rules: message: Do not use "IVS" in func name inside ivs package paths: include: - - internal/service/ivs + - "/internal/service/ivs" exclude: - - internal/service/ivs/list_pages_gen.go + - "/internal/service/ivs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -100,7 +82,7 @@ rules: message: Include "IVS" in test name paths: include: - - internal/service/ivs/*_test.go + - "/internal/service/ivs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -115,7 +97,7 @@ rules: message: Do not use "IVS" in const name inside ivs package paths: include: - - internal/service/ivs + - "/internal/service/ivs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -129,7 +111,7 @@ rules: message: Do not use "IVS" in var name inside ivs package paths: include: - - internal/service/ivs + - "/internal/service/ivs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -143,9 +125,9 @@ rules: message: Do not use "IVSChat" in func name inside ivschat package paths: include: - - internal/service/ivschat + - "/internal/service/ivschat" exclude: - - internal/service/ivschat/list_pages_gen.go + - "/internal/service/ivschat/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -161,7 +143,7 @@ rules: message: Include "IVSChat" in test name paths: include: - - internal/service/ivschat/*_test.go + - "/internal/service/ivschat/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -176,7 +158,7 @@ rules: message: Do not use "IVSChat" in const name inside ivschat package paths: include: - - internal/service/ivschat + - "/internal/service/ivschat" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -190,7 +172,7 @@ rules: message: Do not use "IVSChat" in var name inside ivschat package paths: include: - - internal/service/ivschat + - "/internal/service/ivschat" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -204,9 +186,9 @@ rules: message: Do not use "Kafka" in func name inside kafka package paths: include: - - internal/service/kafka + - "/internal/service/kafka" exclude: - - internal/service/kafka/list_pages_gen.go + - "/internal/service/kafka/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -222,7 +204,7 @@ rules: message: Include "Kafka" in test name paths: include: - - internal/service/kafka/*_test.go + - "/internal/service/kafka/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -237,7 +219,7 @@ rules: message: Do not use "Kafka" in const name inside kafka package paths: include: - - internal/service/kafka + - "/internal/service/kafka" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -251,7 +233,7 @@ rules: message: Do not use "Kafka" in var name inside kafka package paths: include: - - internal/service/kafka + - "/internal/service/kafka" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -265,9 +247,9 @@ rules: message: Do not use "KafkaConnect" in func name inside kafkaconnect package paths: include: - - internal/service/kafkaconnect + - "/internal/service/kafkaconnect" exclude: - - internal/service/kafkaconnect/list_pages_gen.go + - "/internal/service/kafkaconnect/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -283,7 +265,7 @@ rules: message: Include "KafkaConnect" in test name paths: include: - - internal/service/kafkaconnect/*_test.go + - "/internal/service/kafkaconnect/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -298,7 +280,7 @@ rules: message: Do not use "KafkaConnect" in const name inside kafkaconnect package paths: include: - - internal/service/kafkaconnect + - "/internal/service/kafkaconnect" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -312,7 +294,7 @@ rules: message: Do not use "KafkaConnect" in var name inside kafkaconnect package paths: include: - - internal/service/kafkaconnect + - "/internal/service/kafkaconnect" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -326,9 +308,9 @@ rules: message: Do not use "Kendra" in func name inside kendra package paths: include: - - internal/service/kendra + - "/internal/service/kendra" exclude: - - internal/service/kendra/list_pages_gen.go + - "/internal/service/kendra/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -344,7 +326,7 @@ rules: message: Include "Kendra" in test name paths: include: - - internal/service/kendra/*_test.go + - "/internal/service/kendra/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -359,7 +341,7 @@ rules: message: Do not use "Kendra" in const name inside kendra package paths: include: - - internal/service/kendra + - "/internal/service/kendra" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -373,7 +355,7 @@ rules: message: Do not use "Kendra" in var name inside kendra package paths: include: - - internal/service/kendra + - "/internal/service/kendra" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -387,9 +369,9 @@ rules: message: Do not use "Keyspaces" in func name inside keyspaces package paths: include: - - internal/service/keyspaces + - "/internal/service/keyspaces" exclude: - - internal/service/keyspaces/list_pages_gen.go + - "/internal/service/keyspaces/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -405,7 +387,7 @@ rules: message: Include "Keyspaces" in test name paths: include: - - internal/service/keyspaces/*_test.go + - "/internal/service/keyspaces/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -420,7 +402,7 @@ rules: message: Do not use "Keyspaces" in const name inside keyspaces package paths: include: - - internal/service/keyspaces + - "/internal/service/keyspaces" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -434,7 +416,7 @@ rules: message: Do not use "Keyspaces" in var name inside keyspaces package paths: include: - - internal/service/keyspaces + - "/internal/service/keyspaces" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -448,9 +430,9 @@ rules: message: Do not use "Kinesis" in func name inside kinesis package paths: include: - - internal/service/kinesis + - "/internal/service/kinesis" exclude: - - internal/service/kinesis/list_pages_gen.go + - "/internal/service/kinesis/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -466,7 +448,7 @@ rules: message: Include "Kinesis" in test name paths: include: - - internal/service/kinesis/*_test.go + - "/internal/service/kinesis/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -481,7 +463,7 @@ rules: message: Do not use "Kinesis" in const name inside kinesis package paths: include: - - internal/service/kinesis + - "/internal/service/kinesis" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -495,7 +477,7 @@ rules: message: Do not use "Kinesis" in var name inside kinesis package paths: include: - - internal/service/kinesis + - "/internal/service/kinesis" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -509,9 +491,9 @@ rules: message: Do not use "KinesisAnalytics" in func name inside kinesisanalytics package paths: include: - - internal/service/kinesisanalytics + - "/internal/service/kinesisanalytics" exclude: - - internal/service/kinesisanalytics/list_pages_gen.go + - "/internal/service/kinesisanalytics/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -527,7 +509,7 @@ rules: message: Include "KinesisAnalytics" in test name paths: include: - - internal/service/kinesisanalytics/*_test.go + - "/internal/service/kinesisanalytics/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -542,7 +524,7 @@ rules: message: Do not use "KinesisAnalytics" in const name inside kinesisanalytics package paths: include: - - internal/service/kinesisanalytics + - "/internal/service/kinesisanalytics" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -556,7 +538,7 @@ rules: message: Do not use "KinesisAnalytics" in var name inside kinesisanalytics package paths: include: - - internal/service/kinesisanalytics + - "/internal/service/kinesisanalytics" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -570,9 +552,9 @@ rules: message: Do not use "KinesisAnalyticsV2" in func name inside kinesisanalyticsv2 package paths: include: - - internal/service/kinesisanalyticsv2 + - "/internal/service/kinesisanalyticsv2" exclude: - - internal/service/kinesisanalyticsv2/list_pages_gen.go + - "/internal/service/kinesisanalyticsv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -588,7 +570,7 @@ rules: message: Include "KinesisAnalyticsV2" in test name paths: include: - - internal/service/kinesisanalyticsv2/*_test.go + - "/internal/service/kinesisanalyticsv2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -603,7 +585,7 @@ rules: message: Do not use "KinesisAnalyticsV2" in const name inside kinesisanalyticsv2 package paths: include: - - internal/service/kinesisanalyticsv2 + - "/internal/service/kinesisanalyticsv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -617,7 +599,7 @@ rules: message: Do not use "KinesisAnalyticsV2" in var name inside kinesisanalyticsv2 package paths: include: - - internal/service/kinesisanalyticsv2 + - "/internal/service/kinesisanalyticsv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -631,9 +613,9 @@ rules: message: Do not use "KinesisVideo" in func name inside kinesisvideo package paths: include: - - internal/service/kinesisvideo + - "/internal/service/kinesisvideo" exclude: - - internal/service/kinesisvideo/list_pages_gen.go + - "/internal/service/kinesisvideo/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -649,7 +631,7 @@ rules: message: Include "KinesisVideo" in test name paths: include: - - internal/service/kinesisvideo/*_test.go + - "/internal/service/kinesisvideo/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -664,7 +646,7 @@ rules: message: Do not use "KinesisVideo" in const name inside kinesisvideo package paths: include: - - internal/service/kinesisvideo + - "/internal/service/kinesisvideo" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -678,7 +660,7 @@ rules: message: Do not use "KinesisVideo" in var name inside kinesisvideo package paths: include: - - internal/service/kinesisvideo + - "/internal/service/kinesisvideo" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -692,9 +674,9 @@ rules: message: Do not use "KMS" in func name inside kms package paths: include: - - internal/service/kms + - "/internal/service/kms" exclude: - - internal/service/kms/list_pages_gen.go + - "/internal/service/kms/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -710,7 +692,7 @@ rules: message: Include "KMS" in test name paths: include: - - internal/service/kms/*_test.go + - "/internal/service/kms/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -725,7 +707,7 @@ rules: message: Do not use "KMS" in const name inside kms package paths: include: - - internal/service/kms + - "/internal/service/kms" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -739,7 +721,7 @@ rules: message: Do not use "KMS" in var name inside kms package paths: include: - - internal/service/kms + - "/internal/service/kms" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -753,9 +735,9 @@ rules: message: Do not use "LakeFormation" in func name inside lakeformation package paths: include: - - internal/service/lakeformation + - "/internal/service/lakeformation" exclude: - - internal/service/lakeformation/list_pages_gen.go + - "/internal/service/lakeformation/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -771,7 +753,7 @@ rules: message: Include "LakeFormation" in test name paths: include: - - internal/service/lakeformation/*_test.go + - "/internal/service/lakeformation/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -786,7 +768,7 @@ rules: message: Do not use "LakeFormation" in const name inside lakeformation package paths: include: - - internal/service/lakeformation + - "/internal/service/lakeformation" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -800,7 +782,7 @@ rules: message: Do not use "LakeFormation" in var name inside lakeformation package paths: include: - - internal/service/lakeformation + - "/internal/service/lakeformation" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -814,9 +796,9 @@ rules: message: Do not use "Lambda" in func name inside lambda package paths: include: - - internal/service/lambda + - "/internal/service/lambda" exclude: - - internal/service/lambda/list_pages_gen.go + - "/internal/service/lambda/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -832,7 +814,7 @@ rules: message: Include "Lambda" in test name paths: include: - - internal/service/lambda/*_test.go + - "/internal/service/lambda/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -847,7 +829,7 @@ rules: message: Do not use "Lambda" in const name inside lambda package paths: include: - - internal/service/lambda + - "/internal/service/lambda" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -861,7 +843,7 @@ rules: message: Do not use "Lambda" in var name inside lambda package paths: include: - - internal/service/lambda + - "/internal/service/lambda" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -875,9 +857,9 @@ rules: message: Do not use "LaunchWizard" in func name inside launchwizard package paths: include: - - internal/service/launchwizard + - "/internal/service/launchwizard" exclude: - - internal/service/launchwizard/list_pages_gen.go + - "/internal/service/launchwizard/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -893,7 +875,7 @@ rules: message: Include "LaunchWizard" in test name paths: include: - - internal/service/launchwizard/*_test.go + - "/internal/service/launchwizard/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -908,7 +890,7 @@ rules: message: Do not use "LaunchWizard" in const name inside launchwizard package paths: include: - - internal/service/launchwizard + - "/internal/service/launchwizard" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -922,7 +904,7 @@ rules: message: Do not use "LaunchWizard" in var name inside launchwizard package paths: include: - - internal/service/launchwizard + - "/internal/service/launchwizard" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -936,9 +918,9 @@ rules: message: Do not use "lex" in func name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" exclude: - - internal/service/lexmodels/list_pages_gen.go + - "/internal/service/lexmodels/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -954,7 +936,7 @@ rules: message: Do not use "lex" in const name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -968,7 +950,7 @@ rules: message: Do not use "lex" in var name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -982,9 +964,9 @@ rules: message: Do not use "lexmodelbuilding" in func name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" exclude: - - internal/service/lexmodels/list_pages_gen.go + - "/internal/service/lexmodels/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1000,7 +982,7 @@ rules: message: Do not use "lexmodelbuilding" in const name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1014,7 +996,7 @@ rules: message: Do not use "lexmodelbuilding" in var name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1028,9 +1010,9 @@ rules: message: Do not use "lexmodelbuildingservice" in func name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" exclude: - - internal/service/lexmodels/list_pages_gen.go + - "/internal/service/lexmodels/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1046,7 +1028,7 @@ rules: message: Do not use "lexmodelbuildingservice" in const name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1060,7 +1042,7 @@ rules: message: Do not use "lexmodelbuildingservice" in var name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1074,9 +1056,9 @@ rules: message: Do not use "LexModels" in func name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" exclude: - - internal/service/lexmodels/list_pages_gen.go + - "/internal/service/lexmodels/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1092,7 +1074,7 @@ rules: message: Include "LexModels" in test name paths: include: - - internal/service/lexmodels/*_test.go + - "/internal/service/lexmodels/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1107,7 +1089,7 @@ rules: message: Do not use "LexModels" in const name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1121,7 +1103,7 @@ rules: message: Do not use "LexModels" in var name inside lexmodels package paths: include: - - internal/service/lexmodels + - "/internal/service/lexmodels" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1135,9 +1117,9 @@ rules: message: Do not use "lexmodelsv2" in func name inside lexv2models package paths: include: - - internal/service/lexv2models + - "/internal/service/lexv2models" exclude: - - internal/service/lexv2models/list_pages_gen.go + - "/internal/service/lexv2models/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1153,7 +1135,7 @@ rules: message: Do not use "lexmodelsv2" in const name inside lexv2models package paths: include: - - internal/service/lexv2models + - "/internal/service/lexv2models" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1167,7 +1149,7 @@ rules: message: Do not use "lexmodelsv2" in var name inside lexv2models package paths: include: - - internal/service/lexv2models + - "/internal/service/lexv2models" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1181,9 +1163,9 @@ rules: message: Do not use "LexV2Models" in func name inside lexv2models package paths: include: - - internal/service/lexv2models + - "/internal/service/lexv2models" exclude: - - internal/service/lexv2models/list_pages_gen.go + - "/internal/service/lexv2models/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1199,7 +1181,7 @@ rules: message: Include "LexV2Models" in test name paths: include: - - internal/service/lexv2models/*_test.go + - "/internal/service/lexv2models/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1214,7 +1196,7 @@ rules: message: Do not use "LexV2Models" in const name inside lexv2models package paths: include: - - internal/service/lexv2models + - "/internal/service/lexv2models" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1228,7 +1210,7 @@ rules: message: Do not use "LexV2Models" in var name inside lexv2models package paths: include: - - internal/service/lexv2models + - "/internal/service/lexv2models" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1242,9 +1224,9 @@ rules: message: Do not use "LicenseManager" in func name inside licensemanager package paths: include: - - internal/service/licensemanager + - "/internal/service/licensemanager" exclude: - - internal/service/licensemanager/list_pages_gen.go + - "/internal/service/licensemanager/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1260,7 +1242,7 @@ rules: message: Include "LicenseManager" in test name paths: include: - - internal/service/licensemanager/*_test.go + - "/internal/service/licensemanager/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1275,7 +1257,7 @@ rules: message: Do not use "LicenseManager" in const name inside licensemanager package paths: include: - - internal/service/licensemanager + - "/internal/service/licensemanager" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1289,7 +1271,7 @@ rules: message: Do not use "LicenseManager" in var name inside licensemanager package paths: include: - - internal/service/licensemanager + - "/internal/service/licensemanager" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1303,9 +1285,9 @@ rules: message: Do not use "Lightsail" in func name inside lightsail package paths: include: - - internal/service/lightsail + - "/internal/service/lightsail" exclude: - - internal/service/lightsail/list_pages_gen.go + - "/internal/service/lightsail/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1321,7 +1303,7 @@ rules: message: Include "Lightsail" in test name paths: include: - - internal/service/lightsail/*_test.go + - "/internal/service/lightsail/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1336,7 +1318,7 @@ rules: message: Do not use "Lightsail" in const name inside lightsail package paths: include: - - internal/service/lightsail + - "/internal/service/lightsail" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1350,7 +1332,7 @@ rules: message: Do not use "Lightsail" in var name inside lightsail package paths: include: - - internal/service/lightsail + - "/internal/service/lightsail" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1364,9 +1346,9 @@ rules: message: Do not use "Location" in func name inside location package paths: include: - - internal/service/location + - "/internal/service/location" exclude: - - internal/service/location/list_pages_gen.go + - "/internal/service/location/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1382,7 +1364,7 @@ rules: message: Include "Location" in test name paths: include: - - internal/service/location/*_test.go + - "/internal/service/location/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1397,7 +1379,7 @@ rules: message: Do not use "Location" in const name inside location package paths: include: - - internal/service/location + - "/internal/service/location" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1411,7 +1393,7 @@ rules: message: Do not use "Location" in var name inside location package paths: include: - - internal/service/location + - "/internal/service/location" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1425,9 +1407,9 @@ rules: message: Do not use "locationservice" in func name inside location package paths: include: - - internal/service/location + - "/internal/service/location" exclude: - - internal/service/location/list_pages_gen.go + - "/internal/service/location/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1443,7 +1425,7 @@ rules: message: Do not use "locationservice" in const name inside location package paths: include: - - internal/service/location + - "/internal/service/location" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1457,7 +1439,7 @@ rules: message: Do not use "locationservice" in var name inside location package paths: include: - - internal/service/location + - "/internal/service/location" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1471,9 +1453,9 @@ rules: message: Do not use "Logs" in func name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" exclude: - - internal/service/logs/list_pages_gen.go + - "/internal/service/logs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1489,7 +1471,7 @@ rules: message: Include "Logs" in test name paths: include: - - internal/service/logs/*_test.go + - "/internal/service/logs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1504,7 +1486,7 @@ rules: message: Do not use "Logs" in const name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1518,7 +1500,7 @@ rules: message: Do not use "Logs" in var name inside logs package paths: include: - - internal/service/logs + - "/internal/service/logs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1532,9 +1514,9 @@ rules: message: Do not use "LookoutMetrics" in func name inside lookoutmetrics package paths: include: - - internal/service/lookoutmetrics + - "/internal/service/lookoutmetrics" exclude: - - internal/service/lookoutmetrics/list_pages_gen.go + - "/internal/service/lookoutmetrics/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1550,7 +1532,7 @@ rules: message: Include "LookoutMetrics" in test name paths: include: - - internal/service/lookoutmetrics/*_test.go + - "/internal/service/lookoutmetrics/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1565,7 +1547,7 @@ rules: message: Do not use "LookoutMetrics" in const name inside lookoutmetrics package paths: include: - - internal/service/lookoutmetrics + - "/internal/service/lookoutmetrics" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1579,7 +1561,7 @@ rules: message: Do not use "LookoutMetrics" in var name inside lookoutmetrics package paths: include: - - internal/service/lookoutmetrics + - "/internal/service/lookoutmetrics" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1593,9 +1575,9 @@ rules: message: Do not use "M2" in func name inside m2 package paths: include: - - internal/service/m2 + - "/internal/service/m2" exclude: - - internal/service/m2/list_pages_gen.go + - "/internal/service/m2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1611,7 +1593,7 @@ rules: message: Include "M2" in test name paths: include: - - internal/service/m2/*_test.go + - "/internal/service/m2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1626,7 +1608,7 @@ rules: message: Do not use "M2" in const name inside m2 package paths: include: - - internal/service/m2 + - "/internal/service/m2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1640,7 +1622,7 @@ rules: message: Do not use "M2" in var name inside m2 package paths: include: - - internal/service/m2 + - "/internal/service/m2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1654,9 +1636,9 @@ rules: message: Do not use "Macie2" in func name inside macie2 package paths: include: - - internal/service/macie2 + - "/internal/service/macie2" exclude: - - internal/service/macie2/list_pages_gen.go + - "/internal/service/macie2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1672,7 +1654,7 @@ rules: message: Include "Macie2" in test name paths: include: - - internal/service/macie2/*_test.go + - "/internal/service/macie2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1687,7 +1669,7 @@ rules: message: Do not use "Macie2" in const name inside macie2 package paths: include: - - internal/service/macie2 + - "/internal/service/macie2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1701,7 +1683,7 @@ rules: message: Do not use "Macie2" in var name inside macie2 package paths: include: - - internal/service/macie2 + - "/internal/service/macie2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1715,9 +1697,9 @@ rules: message: Do not use "managedgrafana" in func name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" exclude: - - internal/service/grafana/list_pages_gen.go + - "/internal/service/grafana/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1733,7 +1715,7 @@ rules: message: Do not use "managedgrafana" in const name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1747,7 +1729,7 @@ rules: message: Do not use "managedgrafana" in var name inside grafana package paths: include: - - internal/service/grafana + - "/internal/service/grafana" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1761,9 +1743,9 @@ rules: message: Do not use "MediaConnect" in func name inside mediaconnect package paths: include: - - internal/service/mediaconnect + - "/internal/service/mediaconnect" exclude: - - internal/service/mediaconnect/list_pages_gen.go + - "/internal/service/mediaconnect/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1779,7 +1761,7 @@ rules: message: Include "MediaConnect" in test name paths: include: - - internal/service/mediaconnect/*_test.go + - "/internal/service/mediaconnect/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1794,7 +1776,7 @@ rules: message: Do not use "MediaConnect" in const name inside mediaconnect package paths: include: - - internal/service/mediaconnect + - "/internal/service/mediaconnect" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1808,7 +1790,7 @@ rules: message: Do not use "MediaConnect" in var name inside mediaconnect package paths: include: - - internal/service/mediaconnect + - "/internal/service/mediaconnect" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1822,9 +1804,9 @@ rules: message: Do not use "MediaConvert" in func name inside mediaconvert package paths: include: - - internal/service/mediaconvert + - "/internal/service/mediaconvert" exclude: - - internal/service/mediaconvert/list_pages_gen.go + - "/internal/service/mediaconvert/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1840,7 +1822,7 @@ rules: message: Include "MediaConvert" in test name paths: include: - - internal/service/mediaconvert/*_test.go + - "/internal/service/mediaconvert/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1855,7 +1837,7 @@ rules: message: Do not use "MediaConvert" in const name inside mediaconvert package paths: include: - - internal/service/mediaconvert + - "/internal/service/mediaconvert" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1869,7 +1851,7 @@ rules: message: Do not use "MediaConvert" in var name inside mediaconvert package paths: include: - - internal/service/mediaconvert + - "/internal/service/mediaconvert" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1883,9 +1865,9 @@ rules: message: Do not use "MediaLive" in func name inside medialive package paths: include: - - internal/service/medialive + - "/internal/service/medialive" exclude: - - internal/service/medialive/list_pages_gen.go + - "/internal/service/medialive/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1901,7 +1883,7 @@ rules: message: Include "MediaLive" in test name paths: include: - - internal/service/medialive/*_test.go + - "/internal/service/medialive/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1916,7 +1898,7 @@ rules: message: Do not use "MediaLive" in const name inside medialive package paths: include: - - internal/service/medialive + - "/internal/service/medialive" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1930,7 +1912,7 @@ rules: message: Do not use "MediaLive" in var name inside medialive package paths: include: - - internal/service/medialive + - "/internal/service/medialive" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1944,9 +1926,9 @@ rules: message: Do not use "MediaPackage" in func name inside mediapackage package paths: include: - - internal/service/mediapackage + - "/internal/service/mediapackage" exclude: - - internal/service/mediapackage/list_pages_gen.go + - "/internal/service/mediapackage/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1962,7 +1944,7 @@ rules: message: Include "MediaPackage" in test name paths: include: - - internal/service/mediapackage/*_test.go + - "/internal/service/mediapackage/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1977,7 +1959,7 @@ rules: message: Do not use "MediaPackage" in const name inside mediapackage package paths: include: - - internal/service/mediapackage + - "/internal/service/mediapackage" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1991,7 +1973,7 @@ rules: message: Do not use "MediaPackage" in var name inside mediapackage package paths: include: - - internal/service/mediapackage + - "/internal/service/mediapackage" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2005,9 +1987,9 @@ rules: message: Do not use "MediaPackageV2" in func name inside mediapackagev2 package paths: include: - - internal/service/mediapackagev2 + - "/internal/service/mediapackagev2" exclude: - - internal/service/mediapackagev2/list_pages_gen.go + - "/internal/service/mediapackagev2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2023,7 +2005,7 @@ rules: message: Include "MediaPackageV2" in test name paths: include: - - internal/service/mediapackagev2/*_test.go + - "/internal/service/mediapackagev2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2038,7 +2020,7 @@ rules: message: Do not use "MediaPackageV2" in const name inside mediapackagev2 package paths: include: - - internal/service/mediapackagev2 + - "/internal/service/mediapackagev2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2052,7 +2034,7 @@ rules: message: Do not use "MediaPackageV2" in var name inside mediapackagev2 package paths: include: - - internal/service/mediapackagev2 + - "/internal/service/mediapackagev2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2066,9 +2048,9 @@ rules: message: Do not use "MediaPackageVOD" in func name inside mediapackagevod package paths: include: - - internal/service/mediapackagevod + - "/internal/service/mediapackagevod" exclude: - - internal/service/mediapackagevod/list_pages_gen.go + - "/internal/service/mediapackagevod/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2084,7 +2066,7 @@ rules: message: Include "MediaPackageVOD" in test name paths: include: - - internal/service/mediapackagevod/*_test.go + - "/internal/service/mediapackagevod/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2099,7 +2081,7 @@ rules: message: Do not use "MediaPackageVOD" in const name inside mediapackagevod package paths: include: - - internal/service/mediapackagevod + - "/internal/service/mediapackagevod" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2113,7 +2095,7 @@ rules: message: Do not use "MediaPackageVOD" in var name inside mediapackagevod package paths: include: - - internal/service/mediapackagevod + - "/internal/service/mediapackagevod" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2127,9 +2109,9 @@ rules: message: Do not use "MediaStore" in func name inside mediastore package paths: include: - - internal/service/mediastore + - "/internal/service/mediastore" exclude: - - internal/service/mediastore/list_pages_gen.go + - "/internal/service/mediastore/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2145,7 +2127,7 @@ rules: message: Include "MediaStore" in test name paths: include: - - internal/service/mediastore/*_test.go + - "/internal/service/mediastore/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2160,7 +2142,7 @@ rules: message: Do not use "MediaStore" in const name inside mediastore package paths: include: - - internal/service/mediastore + - "/internal/service/mediastore" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2174,7 +2156,7 @@ rules: message: Do not use "MediaStore" in var name inside mediastore package paths: include: - - internal/service/mediastore + - "/internal/service/mediastore" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2188,9 +2170,9 @@ rules: message: Do not use "MemoryDB" in func name inside memorydb package paths: include: - - internal/service/memorydb + - "/internal/service/memorydb" exclude: - - internal/service/memorydb/list_pages_gen.go + - "/internal/service/memorydb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2206,7 +2188,7 @@ rules: message: Include "MemoryDB" in test name paths: include: - - internal/service/memorydb/*_test.go + - "/internal/service/memorydb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2221,7 +2203,7 @@ rules: message: Do not use "MemoryDB" in const name inside memorydb package paths: include: - - internal/service/memorydb + - "/internal/service/memorydb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2235,7 +2217,7 @@ rules: message: Do not use "MemoryDB" in var name inside memorydb package paths: include: - - internal/service/memorydb + - "/internal/service/memorydb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2249,9 +2231,9 @@ rules: message: Do not use "Meta" in func name inside meta package paths: include: - - internal/service/meta + - "/internal/service/meta" exclude: - - internal/service/meta/list_pages_gen.go + - "/internal/service/meta/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2267,7 +2249,7 @@ rules: message: Include "Meta" in test name paths: include: - - internal/service/meta/*_test.go + - "/internal/service/meta/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2282,7 +2264,7 @@ rules: message: Do not use "Meta" in const name inside meta package paths: include: - - internal/service/meta + - "/internal/service/meta" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2296,7 +2278,7 @@ rules: message: Do not use "Meta" in var name inside meta package paths: include: - - internal/service/meta + - "/internal/service/meta" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2310,9 +2292,9 @@ rules: message: Do not use "Mgn" in func name inside mgn package paths: include: - - internal/service/mgn + - "/internal/service/mgn" exclude: - - internal/service/mgn/list_pages_gen.go + - "/internal/service/mgn/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2328,7 +2310,7 @@ rules: message: Include "Mgn" in test name paths: include: - - internal/service/mgn/*_test.go + - "/internal/service/mgn/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2343,7 +2325,7 @@ rules: message: Do not use "Mgn" in const name inside mgn package paths: include: - - internal/service/mgn + - "/internal/service/mgn" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2357,7 +2339,7 @@ rules: message: Do not use "Mgn" in var name inside mgn package paths: include: - - internal/service/mgn + - "/internal/service/mgn" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2371,9 +2353,9 @@ rules: message: Do not use "MQ" in func name inside mq package paths: include: - - internal/service/mq + - "/internal/service/mq" exclude: - - internal/service/mq/list_pages_gen.go + - "/internal/service/mq/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2389,7 +2371,7 @@ rules: message: Include "MQ" in test name paths: include: - - internal/service/mq/*_test.go + - "/internal/service/mq/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2404,7 +2386,7 @@ rules: message: Do not use "MQ" in const name inside mq package paths: include: - - internal/service/mq + - "/internal/service/mq" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2418,7 +2400,7 @@ rules: message: Do not use "MQ" in var name inside mq package paths: include: - - internal/service/mq + - "/internal/service/mq" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2432,9 +2414,9 @@ rules: message: Do not use "msk" in func name inside kafka package paths: include: - - internal/service/kafka + - "/internal/service/kafka" exclude: - - internal/service/kafka/list_pages_gen.go + - "/internal/service/kafka/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2450,7 +2432,7 @@ rules: message: Do not use "msk" in const name inside kafka package paths: include: - - internal/service/kafka + - "/internal/service/kafka" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2464,7 +2446,7 @@ rules: message: Do not use "msk" in var name inside kafka package paths: include: - - internal/service/kafka + - "/internal/service/kafka" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2478,9 +2460,9 @@ rules: message: Do not use "MWAA" in func name inside mwaa package paths: include: - - internal/service/mwaa + - "/internal/service/mwaa" exclude: - - internal/service/mwaa/list_pages_gen.go + - "/internal/service/mwaa/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2496,7 +2478,7 @@ rules: message: Include "MWAA" in test name paths: include: - - internal/service/mwaa/*_test.go + - "/internal/service/mwaa/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2511,7 +2493,7 @@ rules: message: Do not use "MWAA" in const name inside mwaa package paths: include: - - internal/service/mwaa + - "/internal/service/mwaa" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2525,7 +2507,7 @@ rules: message: Do not use "MWAA" in var name inside mwaa package paths: include: - - internal/service/mwaa + - "/internal/service/mwaa" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2539,9 +2521,9 @@ rules: message: Do not use "Neptune" in func name inside neptune package paths: include: - - internal/service/neptune + - "/internal/service/neptune" exclude: - - internal/service/neptune/list_pages_gen.go + - "/internal/service/neptune/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2557,7 +2539,7 @@ rules: message: Include "Neptune" in test name paths: include: - - internal/service/neptune/*_test.go + - "/internal/service/neptune/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2572,7 +2554,7 @@ rules: message: Do not use "Neptune" in const name inside neptune package paths: include: - - internal/service/neptune + - "/internal/service/neptune" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2586,7 +2568,7 @@ rules: message: Do not use "Neptune" in var name inside neptune package paths: include: - - internal/service/neptune + - "/internal/service/neptune" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2600,9 +2582,9 @@ rules: message: Do not use "NeptuneGraph" in func name inside neptunegraph package paths: include: - - internal/service/neptunegraph + - "/internal/service/neptunegraph" exclude: - - internal/service/neptunegraph/list_pages_gen.go + - "/internal/service/neptunegraph/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2618,7 +2600,7 @@ rules: message: Include "NeptuneGraph" in test name paths: include: - - internal/service/neptunegraph/*_test.go + - "/internal/service/neptunegraph/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2633,7 +2615,7 @@ rules: message: Do not use "NeptuneGraph" in const name inside neptunegraph package paths: include: - - internal/service/neptunegraph + - "/internal/service/neptunegraph" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2647,7 +2629,7 @@ rules: message: Do not use "NeptuneGraph" in var name inside neptunegraph package paths: include: - - internal/service/neptunegraph + - "/internal/service/neptunegraph" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2661,9 +2643,9 @@ rules: message: Do not use "NetworkFirewall" in func name inside networkfirewall package paths: include: - - internal/service/networkfirewall + - "/internal/service/networkfirewall" exclude: - - internal/service/networkfirewall/list_pages_gen.go + - "/internal/service/networkfirewall/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2679,7 +2661,7 @@ rules: message: Include "NetworkFirewall" in test name paths: include: - - internal/service/networkfirewall/*_test.go + - "/internal/service/networkfirewall/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2694,7 +2676,7 @@ rules: message: Do not use "NetworkFirewall" in const name inside networkfirewall package paths: include: - - internal/service/networkfirewall + - "/internal/service/networkfirewall" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2708,7 +2690,7 @@ rules: message: Do not use "NetworkFirewall" in var name inside networkfirewall package paths: include: - - internal/service/networkfirewall + - "/internal/service/networkfirewall" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2722,9 +2704,9 @@ rules: message: Do not use "NetworkManager" in func name inside networkmanager package paths: include: - - internal/service/networkmanager + - "/internal/service/networkmanager" exclude: - - internal/service/networkmanager/list_pages_gen.go + - "/internal/service/networkmanager/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2740,7 +2722,7 @@ rules: message: Include "NetworkManager" in test name paths: include: - - internal/service/networkmanager/*_test.go + - "/internal/service/networkmanager/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2755,7 +2737,7 @@ rules: message: Do not use "NetworkManager" in const name inside networkmanager package paths: include: - - internal/service/networkmanager + - "/internal/service/networkmanager" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2769,7 +2751,7 @@ rules: message: Do not use "NetworkManager" in var name inside networkmanager package paths: include: - - internal/service/networkmanager + - "/internal/service/networkmanager" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2783,9 +2765,9 @@ rules: message: Do not use "NetworkMonitor" in func name inside networkmonitor package paths: include: - - internal/service/networkmonitor + - "/internal/service/networkmonitor" exclude: - - internal/service/networkmonitor/list_pages_gen.go + - "/internal/service/networkmonitor/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2801,7 +2783,7 @@ rules: message: Include "NetworkMonitor" in test name paths: include: - - internal/service/networkmonitor/*_test.go + - "/internal/service/networkmonitor/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2816,7 +2798,7 @@ rules: message: Do not use "NetworkMonitor" in const name inside networkmonitor package paths: include: - - internal/service/networkmonitor + - "/internal/service/networkmonitor" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2830,7 +2812,7 @@ rules: message: Do not use "NetworkMonitor" in var name inside networkmonitor package paths: include: - - internal/service/networkmonitor + - "/internal/service/networkmonitor" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2844,9 +2826,9 @@ rules: message: Do not use "Notifications" in func name inside notifications package paths: include: - - internal/service/notifications + - "/internal/service/notifications" exclude: - - internal/service/notifications/list_pages_gen.go + - "/internal/service/notifications/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2862,7 +2844,7 @@ rules: message: Include "Notifications" in test name paths: include: - - internal/service/notifications/*_test.go + - "/internal/service/notifications/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2877,7 +2859,7 @@ rules: message: Do not use "Notifications" in const name inside notifications package paths: include: - - internal/service/notifications + - "/internal/service/notifications" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2891,7 +2873,7 @@ rules: message: Do not use "Notifications" in var name inside notifications package paths: include: - - internal/service/notifications + - "/internal/service/notifications" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2905,9 +2887,9 @@ rules: message: Do not use "NotificationsContacts" in func name inside notificationscontacts package paths: include: - - internal/service/notificationscontacts + - "/internal/service/notificationscontacts" exclude: - - internal/service/notificationscontacts/list_pages_gen.go + - "/internal/service/notificationscontacts/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2923,7 +2905,7 @@ rules: message: Include "NotificationsContacts" in test name paths: include: - - internal/service/notificationscontacts/*_test.go + - "/internal/service/notificationscontacts/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2938,7 +2920,7 @@ rules: message: Do not use "NotificationsContacts" in const name inside notificationscontacts package paths: include: - - internal/service/notificationscontacts + - "/internal/service/notificationscontacts" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2952,7 +2934,7 @@ rules: message: Do not use "NotificationsContacts" in var name inside notificationscontacts package paths: include: - - internal/service/notificationscontacts + - "/internal/service/notificationscontacts" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2966,9 +2948,9 @@ rules: message: Do not use "ObservabilityAccessManager" in func name inside oam package paths: include: - - internal/service/oam + - "/internal/service/oam" exclude: - - internal/service/oam/list_pages_gen.go + - "/internal/service/oam/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2984,7 +2966,7 @@ rules: message: Include "ObservabilityAccessManager" in test name paths: include: - - internal/service/oam/*_test.go + - "/internal/service/oam/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2999,7 +2981,7 @@ rules: message: Do not use "ObservabilityAccessManager" in const name inside oam package paths: include: - - internal/service/oam + - "/internal/service/oam" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3013,7 +2995,7 @@ rules: message: Do not use "ObservabilityAccessManager" in var name inside oam package paths: include: - - internal/service/oam + - "/internal/service/oam" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3021,15 +3003,76 @@ rules: patterns: - pattern-regex: "(?i)ObservabilityAccessManager" severity: WARNING + - id: odb-in-func-name + languages: + - go + message: Do not use "ODB" in func name inside odb package + paths: + include: + - "/internal/service/odb" + exclude: + - "/internal/service/odb/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ODB" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: odb-in-test-name + languages: + - go + message: Include "ODB" in test name + paths: + include: + - "/internal/service/odb/*_test.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccODB" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: odb-in-const-name + languages: + - go + message: Do not use "ODB" in const name inside odb package + paths: + include: + - "/internal/service/odb" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ODB" + severity: WARNING + - id: odb-in-var-name + languages: + - go + message: Do not use "ODB" in var name inside odb package + paths: + include: + - "/internal/service/odb" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ODB" + severity: WARNING - id: opensearch-in-func-name languages: - go message: Do not use "OpenSearch" in func name inside opensearch package paths: include: - - internal/service/opensearch + - "/internal/service/opensearch" exclude: - - internal/service/opensearch/list_pages_gen.go + - "/internal/service/opensearch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3045,7 +3088,7 @@ rules: message: Include "OpenSearch" in test name paths: include: - - internal/service/opensearch/*_test.go + - "/internal/service/opensearch/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3060,7 +3103,7 @@ rules: message: Do not use "OpenSearch" in const name inside opensearch package paths: include: - - internal/service/opensearch + - "/internal/service/opensearch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3074,7 +3117,7 @@ rules: message: Do not use "OpenSearch" in var name inside opensearch package paths: include: - - internal/service/opensearch + - "/internal/service/opensearch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3088,9 +3131,9 @@ rules: message: Do not use "opensearchingestion" in func name inside osis package paths: include: - - internal/service/osis + - "/internal/service/osis" exclude: - - internal/service/osis/list_pages_gen.go + - "/internal/service/osis/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3106,7 +3149,7 @@ rules: message: Do not use "opensearchingestion" in const name inside osis package paths: include: - - internal/service/osis + - "/internal/service/osis" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3120,7 +3163,7 @@ rules: message: Do not use "opensearchingestion" in var name inside osis package paths: include: - - internal/service/osis + - "/internal/service/osis" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3134,9 +3177,9 @@ rules: message: Do not use "OpenSearchServerless" in func name inside opensearchserverless package paths: include: - - internal/service/opensearchserverless + - "/internal/service/opensearchserverless" exclude: - - internal/service/opensearchserverless/list_pages_gen.go + - "/internal/service/opensearchserverless/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3152,7 +3195,7 @@ rules: message: Include "OpenSearchServerless" in test name paths: include: - - internal/service/opensearchserverless/*_test.go + - "/internal/service/opensearchserverless/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3167,7 +3210,7 @@ rules: message: Do not use "OpenSearchServerless" in const name inside opensearchserverless package paths: include: - - internal/service/opensearchserverless + - "/internal/service/opensearchserverless" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3181,7 +3224,7 @@ rules: message: Do not use "OpenSearchServerless" in var name inside opensearchserverless package paths: include: - - internal/service/opensearchserverless + - "/internal/service/opensearchserverless" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3195,9 +3238,9 @@ rules: message: Do not use "opensearchservice" in func name inside opensearch package paths: include: - - internal/service/opensearch + - "/internal/service/opensearch" exclude: - - internal/service/opensearch/list_pages_gen.go + - "/internal/service/opensearch/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3213,7 +3256,7 @@ rules: message: Do not use "opensearchservice" in const name inside opensearch package paths: include: - - internal/service/opensearch + - "/internal/service/opensearch" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3227,7 +3270,7 @@ rules: message: Do not use "opensearchservice" in var name inside opensearch package paths: include: - - internal/service/opensearch + - "/internal/service/opensearch" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3241,9 +3284,9 @@ rules: message: Do not use "Organizations" in func name inside organizations package paths: include: - - internal/service/organizations + - "/internal/service/organizations" exclude: - - internal/service/organizations/list_pages_gen.go + - "/internal/service/organizations/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3259,7 +3302,7 @@ rules: message: Include "Organizations" in test name paths: include: - - internal/service/organizations/*_test.go + - "/internal/service/organizations/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3274,7 +3317,7 @@ rules: message: Do not use "Organizations" in const name inside organizations package paths: include: - - internal/service/organizations + - "/internal/service/organizations" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3288,7 +3331,7 @@ rules: message: Do not use "Organizations" in var name inside organizations package paths: include: - - internal/service/organizations + - "/internal/service/organizations" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3302,9 +3345,9 @@ rules: message: Do not use "OpenSearchIngestion" in func name inside osis package paths: include: - - internal/service/osis + - "/internal/service/osis" exclude: - - internal/service/osis/list_pages_gen.go + - "/internal/service/osis/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3320,7 +3363,7 @@ rules: message: Include "OpenSearchIngestion" in test name paths: include: - - internal/service/osis/*_test.go + - "/internal/service/osis/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3335,7 +3378,7 @@ rules: message: Do not use "OpenSearchIngestion" in const name inside osis package paths: include: - - internal/service/osis + - "/internal/service/osis" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3349,7 +3392,7 @@ rules: message: Do not use "OpenSearchIngestion" in var name inside osis package paths: include: - - internal/service/osis + - "/internal/service/osis" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3363,9 +3406,9 @@ rules: message: Do not use "Outposts" in func name inside outposts package paths: include: - - internal/service/outposts + - "/internal/service/outposts" exclude: - - internal/service/outposts/list_pages_gen.go + - "/internal/service/outposts/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3381,7 +3424,7 @@ rules: message: Include "Outposts" in test name paths: include: - - internal/service/outposts/*_test.go + - "/internal/service/outposts/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3396,7 +3439,7 @@ rules: message: Do not use "Outposts" in const name inside outposts package paths: include: - - internal/service/outposts + - "/internal/service/outposts" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3410,7 +3453,7 @@ rules: message: Do not use "Outposts" in var name inside outposts package paths: include: - - internal/service/outposts + - "/internal/service/outposts" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3424,9 +3467,9 @@ rules: message: Do not use "PaymentCryptography" in func name inside paymentcryptography package paths: include: - - internal/service/paymentcryptography + - "/internal/service/paymentcryptography" exclude: - - internal/service/paymentcryptography/list_pages_gen.go + - "/internal/service/paymentcryptography/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3442,7 +3485,7 @@ rules: message: Include "PaymentCryptography" in test name paths: include: - - internal/service/paymentcryptography/*_test.go + - "/internal/service/paymentcryptography/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3457,7 +3500,7 @@ rules: message: Do not use "PaymentCryptography" in const name inside paymentcryptography package paths: include: - - internal/service/paymentcryptography + - "/internal/service/paymentcryptography" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3471,7 +3514,7 @@ rules: message: Do not use "PaymentCryptography" in var name inside paymentcryptography package paths: include: - - internal/service/paymentcryptography + - "/internal/service/paymentcryptography" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3485,9 +3528,9 @@ rules: message: Do not use "PCAConnectorAD" in func name inside pcaconnectorad package paths: include: - - internal/service/pcaconnectorad + - "/internal/service/pcaconnectorad" exclude: - - internal/service/pcaconnectorad/list_pages_gen.go + - "/internal/service/pcaconnectorad/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3503,7 +3546,7 @@ rules: message: Include "PCAConnectorAD" in test name paths: include: - - internal/service/pcaconnectorad/*_test.go + - "/internal/service/pcaconnectorad/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3518,7 +3561,7 @@ rules: message: Do not use "PCAConnectorAD" in const name inside pcaconnectorad package paths: include: - - internal/service/pcaconnectorad + - "/internal/service/pcaconnectorad" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3532,7 +3575,7 @@ rules: message: Do not use "PCAConnectorAD" in var name inside pcaconnectorad package paths: include: - - internal/service/pcaconnectorad + - "/internal/service/pcaconnectorad" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3546,9 +3589,9 @@ rules: message: Do not use "PCS" in func name inside pcs package paths: include: - - internal/service/pcs + - "/internal/service/pcs" exclude: - - internal/service/pcs/list_pages_gen.go + - "/internal/service/pcs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3564,7 +3607,7 @@ rules: message: Include "PCS" in test name paths: include: - - internal/service/pcs/*_test.go + - "/internal/service/pcs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3579,7 +3622,7 @@ rules: message: Do not use "PCS" in const name inside pcs package paths: include: - - internal/service/pcs + - "/internal/service/pcs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3593,7 +3636,7 @@ rules: message: Do not use "PCS" in var name inside pcs package paths: include: - - internal/service/pcs + - "/internal/service/pcs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3607,9 +3650,9 @@ rules: message: Do not use "Pinpoint" in func name inside pinpoint package paths: include: - - internal/service/pinpoint + - "/internal/service/pinpoint" exclude: - - internal/service/pinpoint/list_pages_gen.go + - "/internal/service/pinpoint/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3625,7 +3668,7 @@ rules: message: Include "Pinpoint" in test name paths: include: - - internal/service/pinpoint/*_test.go + - "/internal/service/pinpoint/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3640,7 +3683,7 @@ rules: message: Do not use "Pinpoint" in const name inside pinpoint package paths: include: - - internal/service/pinpoint + - "/internal/service/pinpoint" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3654,7 +3697,7 @@ rules: message: Do not use "Pinpoint" in var name inside pinpoint package paths: include: - - internal/service/pinpoint + - "/internal/service/pinpoint" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3668,9 +3711,9 @@ rules: message: Do not use "PinpointSMSVoiceV2" in func name inside pinpointsmsvoicev2 package paths: include: - - internal/service/pinpointsmsvoicev2 + - "/internal/service/pinpointsmsvoicev2" exclude: - - internal/service/pinpointsmsvoicev2/list_pages_gen.go + - "/internal/service/pinpointsmsvoicev2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3686,7 +3729,7 @@ rules: message: Include "PinpointSMSVoiceV2" in test name paths: include: - - internal/service/pinpointsmsvoicev2/*_test.go + - "/internal/service/pinpointsmsvoicev2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3701,7 +3744,7 @@ rules: message: Do not use "PinpointSMSVoiceV2" in const name inside pinpointsmsvoicev2 package paths: include: - - internal/service/pinpointsmsvoicev2 + - "/internal/service/pinpointsmsvoicev2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3715,7 +3758,7 @@ rules: message: Do not use "PinpointSMSVoiceV2" in var name inside pinpointsmsvoicev2 package paths: include: - - internal/service/pinpointsmsvoicev2 + - "/internal/service/pinpointsmsvoicev2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3729,9 +3772,9 @@ rules: message: Do not use "Pipes" in func name inside pipes package paths: include: - - internal/service/pipes + - "/internal/service/pipes" exclude: - - internal/service/pipes/list_pages_gen.go + - "/internal/service/pipes/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3748,7 +3791,7 @@ rules: message: Include "Pipes" in test name paths: include: - - internal/service/pipes/*_test.go + - "/internal/service/pipes/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3763,7 +3806,7 @@ rules: message: Do not use "Pipes" in const name inside pipes package paths: include: - - internal/service/pipes + - "/internal/service/pipes" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3778,7 +3821,7 @@ rules: message: Do not use "Pipes" in var name inside pipes package paths: include: - - internal/service/pipes + - "/internal/service/pipes" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3793,9 +3836,9 @@ rules: message: Do not use "Polly" in func name inside polly package paths: include: - - internal/service/polly + - "/internal/service/polly" exclude: - - internal/service/polly/list_pages_gen.go + - "/internal/service/polly/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3811,7 +3854,7 @@ rules: message: Include "Polly" in test name paths: include: - - internal/service/polly/*_test.go + - "/internal/service/polly/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3826,7 +3869,7 @@ rules: message: Do not use "Polly" in const name inside polly package paths: include: - - internal/service/polly + - "/internal/service/polly" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3840,7 +3883,7 @@ rules: message: Do not use "Polly" in var name inside polly package paths: include: - - internal/service/polly + - "/internal/service/polly" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3854,9 +3897,9 @@ rules: message: Do not use "Pricing" in func name inside pricing package paths: include: - - internal/service/pricing + - "/internal/service/pricing" exclude: - - internal/service/pricing/list_pages_gen.go + - "/internal/service/pricing/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3872,7 +3915,7 @@ rules: message: Include "Pricing" in test name paths: include: - - internal/service/pricing/*_test.go + - "/internal/service/pricing/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3887,7 +3930,7 @@ rules: message: Do not use "Pricing" in const name inside pricing package paths: include: - - internal/service/pricing + - "/internal/service/pricing" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3901,7 +3944,7 @@ rules: message: Do not use "Pricing" in var name inside pricing package paths: include: - - internal/service/pricing + - "/internal/service/pricing" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3915,9 +3958,9 @@ rules: message: Do not use "prometheus" in func name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" exclude: - - internal/service/amp/list_pages_gen.go + - "/internal/service/amp/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3933,7 +3976,7 @@ rules: message: Do not use "prometheus" in const name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3947,7 +3990,7 @@ rules: message: Do not use "prometheus" in var name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3961,9 +4004,9 @@ rules: message: Do not use "prometheusservice" in func name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" exclude: - - internal/service/amp/list_pages_gen.go + - "/internal/service/amp/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3979,7 +4022,7 @@ rules: message: Do not use "prometheusservice" in const name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3993,7 +4036,7 @@ rules: message: Do not use "prometheusservice" in var name inside amp package paths: include: - - internal/service/amp + - "/internal/service/amp" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4007,9 +4050,9 @@ rules: message: Do not use "QBusiness" in func name inside qbusiness package paths: include: - - internal/service/qbusiness + - "/internal/service/qbusiness" exclude: - - internal/service/qbusiness/list_pages_gen.go + - "/internal/service/qbusiness/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4025,7 +4068,7 @@ rules: message: Include "QBusiness" in test name paths: include: - - internal/service/qbusiness/*_test.go + - "/internal/service/qbusiness/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4040,7 +4083,7 @@ rules: message: Do not use "QBusiness" in const name inside qbusiness package paths: include: - - internal/service/qbusiness + - "/internal/service/qbusiness" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4054,7 +4097,7 @@ rules: message: Do not use "QBusiness" in var name inside qbusiness package paths: include: - - internal/service/qbusiness + - "/internal/service/qbusiness" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4068,9 +4111,9 @@ rules: message: Do not use "QLDB" in func name inside qldb package paths: include: - - internal/service/qldb + - "/internal/service/qldb" exclude: - - internal/service/qldb/list_pages_gen.go + - "/internal/service/qldb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4086,7 +4129,7 @@ rules: message: Include "QLDB" in test name paths: include: - - internal/service/qldb/*_test.go + - "/internal/service/qldb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4101,7 +4144,7 @@ rules: message: Do not use "QLDB" in const name inside qldb package paths: include: - - internal/service/qldb + - "/internal/service/qldb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4115,7 +4158,7 @@ rules: message: Do not use "QLDB" in var name inside qldb package paths: include: - - internal/service/qldb + - "/internal/service/qldb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4129,9 +4172,9 @@ rules: message: Do not use "QuickSight" in func name inside quicksight package paths: include: - - internal/service/quicksight + - "/internal/service/quicksight" exclude: - - internal/service/quicksight/list_pages_gen.go + - "/internal/service/quicksight/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4147,7 +4190,7 @@ rules: message: Include "QuickSight" in test name paths: include: - - internal/service/quicksight/*_test.go + - "/internal/service/quicksight/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4162,7 +4205,7 @@ rules: message: Do not use "QuickSight" in const name inside quicksight package paths: include: - - internal/service/quicksight + - "/internal/service/quicksight" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4176,7 +4219,7 @@ rules: message: Do not use "QuickSight" in var name inside quicksight package paths: include: - - internal/service/quicksight + - "/internal/service/quicksight" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4190,9 +4233,9 @@ rules: message: Do not use "RAM" in func name inside ram package paths: include: - - internal/service/ram + - "/internal/service/ram" exclude: - - internal/service/ram/list_pages_gen.go + - "/internal/service/ram/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4209,7 +4252,7 @@ rules: message: Include "RAM" in test name paths: include: - - internal/service/ram/*_test.go + - "/internal/service/ram/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4224,7 +4267,7 @@ rules: message: Do not use "RAM" in const name inside ram package paths: include: - - internal/service/ram + - "/internal/service/ram" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4239,7 +4282,7 @@ rules: message: Do not use "RAM" in var name inside ram package paths: include: - - internal/service/ram + - "/internal/service/ram" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4254,9 +4297,9 @@ rules: message: Do not use "RBin" in func name inside rbin package paths: include: - - internal/service/rbin + - "/internal/service/rbin" exclude: - - internal/service/rbin/list_pages_gen.go + - "/internal/service/rbin/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4272,7 +4315,7 @@ rules: message: Include "RBin" in test name paths: include: - - internal/service/rbin/*_test.go + - "/internal/service/rbin/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4287,7 +4330,7 @@ rules: message: Do not use "RBin" in const name inside rbin package paths: include: - - internal/service/rbin + - "/internal/service/rbin" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4301,7 +4344,7 @@ rules: message: Do not use "RBin" in var name inside rbin package paths: include: - - internal/service/rbin + - "/internal/service/rbin" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4315,9 +4358,9 @@ rules: message: Do not use "RDS" in func name inside rds package paths: include: - - internal/service/rds + - "/internal/service/rds" exclude: - - internal/service/rds/list_pages_gen.go + - "/internal/service/rds/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4333,7 +4376,7 @@ rules: message: Include "RDS" in test name paths: include: - - internal/service/rds/*_test.go + - "/internal/service/rds/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4348,7 +4391,7 @@ rules: message: Do not use "RDS" in const name inside rds package paths: include: - - internal/service/rds + - "/internal/service/rds" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4362,7 +4405,7 @@ rules: message: Do not use "RDS" in var name inside rds package paths: include: - - internal/service/rds + - "/internal/service/rds" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4370,3 +4413,49 @@ rules: patterns: - pattern-regex: "(?i)RDS" severity: WARNING + - id: recyclebin-in-func-name + languages: + - go + message: Do not use "recyclebin" in func name inside rbin package + paths: + include: + - "/internal/service/rbin" + exclude: + - "/internal/service/rbin/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)recyclebin" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: recyclebin-in-const-name + languages: + - go + message: Do not use "recyclebin" in const name inside rbin package + paths: + include: + - "/internal/service/rbin" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)recyclebin" + severity: WARNING + - id: recyclebin-in-var-name + languages: + - go + message: Do not use "recyclebin" in var name inside rbin package + paths: + include: + - "/internal/service/rbin" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)recyclebin" + severity: WARNING diff --git a/.ci/.semgrep-service-name3.yml b/.ci/.semgrep-service-name3.yml index bdca7386db66..833fcb162182 100644 --- a/.ci/.semgrep-service-name3.yml +++ b/.ci/.semgrep-service-name3.yml @@ -1,60 +1,14 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: - - id: recyclebin-in-func-name - languages: - - go - message: Do not use "recyclebin" in func name inside rbin package - paths: - include: - - internal/service/rbin - exclude: - - internal/service/rbin/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)recyclebin" - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING - - id: recyclebin-in-const-name - languages: - - go - message: Do not use "recyclebin" in const name inside rbin package - paths: - include: - - internal/service/rbin - patterns: - - pattern: const $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)recyclebin" - severity: WARNING - - id: recyclebin-in-var-name - languages: - - go - message: Do not use "recyclebin" in var name inside rbin package - paths: - include: - - internal/service/rbin - patterns: - - pattern: var $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)recyclebin" - severity: WARNING - id: redshift-in-func-name languages: - go message: Do not use "Redshift" in func name inside redshift package paths: include: - - internal/service/redshift + - "/internal/service/redshift" exclude: - - internal/service/redshift/list_pages_gen.go + - "/internal/service/redshift/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -70,7 +24,7 @@ rules: message: Include "Redshift" in test name paths: include: - - internal/service/redshift/*_test.go + - "/internal/service/redshift/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -85,7 +39,7 @@ rules: message: Do not use "Redshift" in const name inside redshift package paths: include: - - internal/service/redshift + - "/internal/service/redshift" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -99,7 +53,7 @@ rules: message: Do not use "Redshift" in var name inside redshift package paths: include: - - internal/service/redshift + - "/internal/service/redshift" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -113,9 +67,9 @@ rules: message: Do not use "RedshiftData" in func name inside redshiftdata package paths: include: - - internal/service/redshiftdata + - "/internal/service/redshiftdata" exclude: - - internal/service/redshiftdata/list_pages_gen.go + - "/internal/service/redshiftdata/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -131,7 +85,7 @@ rules: message: Include "RedshiftData" in test name paths: include: - - internal/service/redshiftdata/*_test.go + - "/internal/service/redshiftdata/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -146,7 +100,7 @@ rules: message: Do not use "RedshiftData" in const name inside redshiftdata package paths: include: - - internal/service/redshiftdata + - "/internal/service/redshiftdata" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -160,7 +114,7 @@ rules: message: Do not use "RedshiftData" in var name inside redshiftdata package paths: include: - - internal/service/redshiftdata + - "/internal/service/redshiftdata" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -174,9 +128,9 @@ rules: message: Do not use "redshiftdataapiservice" in func name inside redshiftdata package paths: include: - - internal/service/redshiftdata + - "/internal/service/redshiftdata" exclude: - - internal/service/redshiftdata/list_pages_gen.go + - "/internal/service/redshiftdata/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -192,7 +146,7 @@ rules: message: Do not use "redshiftdataapiservice" in const name inside redshiftdata package paths: include: - - internal/service/redshiftdata + - "/internal/service/redshiftdata" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -206,7 +160,7 @@ rules: message: Do not use "redshiftdataapiservice" in var name inside redshiftdata package paths: include: - - internal/service/redshiftdata + - "/internal/service/redshiftdata" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -220,9 +174,9 @@ rules: message: Do not use "RedshiftServerless" in func name inside redshiftserverless package paths: include: - - internal/service/redshiftserverless + - "/internal/service/redshiftserverless" exclude: - - internal/service/redshiftserverless/list_pages_gen.go + - "/internal/service/redshiftserverless/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -238,7 +192,7 @@ rules: message: Include "RedshiftServerless" in test name paths: include: - - internal/service/redshiftserverless/*_test.go + - "/internal/service/redshiftserverless/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -253,7 +207,7 @@ rules: message: Do not use "RedshiftServerless" in const name inside redshiftserverless package paths: include: - - internal/service/redshiftserverless + - "/internal/service/redshiftserverless" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -267,7 +221,7 @@ rules: message: Do not use "RedshiftServerless" in var name inside redshiftserverless package paths: include: - - internal/service/redshiftserverless + - "/internal/service/redshiftserverless" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -281,9 +235,9 @@ rules: message: Do not use "Rekognition" in func name inside rekognition package paths: include: - - internal/service/rekognition + - "/internal/service/rekognition" exclude: - - internal/service/rekognition/list_pages_gen.go + - "/internal/service/rekognition/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -299,7 +253,7 @@ rules: message: Include "Rekognition" in test name paths: include: - - internal/service/rekognition/*_test.go + - "/internal/service/rekognition/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -314,7 +268,7 @@ rules: message: Do not use "Rekognition" in const name inside rekognition package paths: include: - - internal/service/rekognition + - "/internal/service/rekognition" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -328,7 +282,7 @@ rules: message: Do not use "Rekognition" in var name inside rekognition package paths: include: - - internal/service/rekognition + - "/internal/service/rekognition" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -342,9 +296,9 @@ rules: message: Do not use "ResilienceHub" in func name inside resiliencehub package paths: include: - - internal/service/resiliencehub + - "/internal/service/resiliencehub" exclude: - - internal/service/resiliencehub/list_pages_gen.go + - "/internal/service/resiliencehub/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -360,7 +314,7 @@ rules: message: Include "ResilienceHub" in test name paths: include: - - internal/service/resiliencehub/*_test.go + - "/internal/service/resiliencehub/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -375,7 +329,7 @@ rules: message: Do not use "ResilienceHub" in const name inside resiliencehub package paths: include: - - internal/service/resiliencehub + - "/internal/service/resiliencehub" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -389,7 +343,7 @@ rules: message: Do not use "ResilienceHub" in var name inside resiliencehub package paths: include: - - internal/service/resiliencehub + - "/internal/service/resiliencehub" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -403,9 +357,9 @@ rules: message: Do not use "ResourceExplorer2" in func name inside resourceexplorer2 package paths: include: - - internal/service/resourceexplorer2 + - "/internal/service/resourceexplorer2" exclude: - - internal/service/resourceexplorer2/list_pages_gen.go + - "/internal/service/resourceexplorer2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -421,7 +375,7 @@ rules: message: Include "ResourceExplorer2" in test name paths: include: - - internal/service/resourceexplorer2/*_test.go + - "/internal/service/resourceexplorer2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -436,7 +390,7 @@ rules: message: Do not use "ResourceExplorer2" in const name inside resourceexplorer2 package paths: include: - - internal/service/resourceexplorer2 + - "/internal/service/resourceexplorer2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -450,7 +404,7 @@ rules: message: Do not use "ResourceExplorer2" in var name inside resourceexplorer2 package paths: include: - - internal/service/resourceexplorer2 + - "/internal/service/resourceexplorer2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -464,9 +418,9 @@ rules: message: Do not use "ResourceGroups" in func name inside resourcegroups package paths: include: - - internal/service/resourcegroups + - "/internal/service/resourcegroups" exclude: - - internal/service/resourcegroups/list_pages_gen.go + - "/internal/service/resourcegroups/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -482,7 +436,7 @@ rules: message: Include "ResourceGroups" in test name paths: include: - - internal/service/resourcegroups/*_test.go + - "/internal/service/resourcegroups/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -497,7 +451,7 @@ rules: message: Do not use "ResourceGroups" in const name inside resourcegroups package paths: include: - - internal/service/resourcegroups + - "/internal/service/resourcegroups" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -511,7 +465,7 @@ rules: message: Do not use "ResourceGroups" in var name inside resourcegroups package paths: include: - - internal/service/resourcegroups + - "/internal/service/resourcegroups" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -525,9 +479,9 @@ rules: message: Do not use "resourcegroupstagging" in func name inside resourcegroupstaggingapi package paths: include: - - internal/service/resourcegroupstaggingapi + - "/internal/service/resourcegroupstaggingapi" exclude: - - internal/service/resourcegroupstaggingapi/list_pages_gen.go + - "/internal/service/resourcegroupstaggingapi/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -543,7 +497,7 @@ rules: message: Do not use "resourcegroupstagging" in const name inside resourcegroupstaggingapi package paths: include: - - internal/service/resourcegroupstaggingapi + - "/internal/service/resourcegroupstaggingapi" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -557,7 +511,7 @@ rules: message: Do not use "resourcegroupstagging" in var name inside resourcegroupstaggingapi package paths: include: - - internal/service/resourcegroupstaggingapi + - "/internal/service/resourcegroupstaggingapi" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -571,9 +525,9 @@ rules: message: Do not use "ResourceGroupsTaggingAPI" in func name inside resourcegroupstaggingapi package paths: include: - - internal/service/resourcegroupstaggingapi + - "/internal/service/resourcegroupstaggingapi" exclude: - - internal/service/resourcegroupstaggingapi/list_pages_gen.go + - "/internal/service/resourcegroupstaggingapi/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -589,7 +543,7 @@ rules: message: Include "ResourceGroupsTaggingAPI" in test name paths: include: - - internal/service/resourcegroupstaggingapi/*_test.go + - "/internal/service/resourcegroupstaggingapi/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -604,7 +558,7 @@ rules: message: Do not use "ResourceGroupsTaggingAPI" in const name inside resourcegroupstaggingapi package paths: include: - - internal/service/resourcegroupstaggingapi + - "/internal/service/resourcegroupstaggingapi" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -618,7 +572,7 @@ rules: message: Do not use "ResourceGroupsTaggingAPI" in var name inside resourcegroupstaggingapi package paths: include: - - internal/service/resourcegroupstaggingapi + - "/internal/service/resourcegroupstaggingapi" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -632,9 +586,9 @@ rules: message: Do not use "RolesAnywhere" in func name inside rolesanywhere package paths: include: - - internal/service/rolesanywhere + - "/internal/service/rolesanywhere" exclude: - - internal/service/rolesanywhere/list_pages_gen.go + - "/internal/service/rolesanywhere/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -650,7 +604,7 @@ rules: message: Include "RolesAnywhere" in test name paths: include: - - internal/service/rolesanywhere/*_test.go + - "/internal/service/rolesanywhere/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -665,7 +619,7 @@ rules: message: Do not use "RolesAnywhere" in const name inside rolesanywhere package paths: include: - - internal/service/rolesanywhere + - "/internal/service/rolesanywhere" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -679,7 +633,7 @@ rules: message: Do not use "RolesAnywhere" in var name inside rolesanywhere package paths: include: - - internal/service/rolesanywhere + - "/internal/service/rolesanywhere" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -693,9 +647,9 @@ rules: message: Do not use "Route53" in func name inside route53 package paths: include: - - internal/service/route53 + - "/internal/service/route53" exclude: - - internal/service/route53/list_pages_gen.go + - "/internal/service/route53/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -711,7 +665,7 @@ rules: message: Include "Route53" in test name paths: include: - - internal/service/route53/*_test.go + - "/internal/service/route53/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -726,7 +680,7 @@ rules: message: Do not use "Route53" in const name inside route53 package paths: include: - - internal/service/route53 + - "/internal/service/route53" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -740,7 +694,7 @@ rules: message: Do not use "Route53" in var name inside route53 package paths: include: - - internal/service/route53 + - "/internal/service/route53" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -754,9 +708,9 @@ rules: message: Do not use "Route53Domains" in func name inside route53domains package paths: include: - - internal/service/route53domains + - "/internal/service/route53domains" exclude: - - internal/service/route53domains/list_pages_gen.go + - "/internal/service/route53domains/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -772,7 +726,7 @@ rules: message: Include "Route53Domains" in test name paths: include: - - internal/service/route53domains/*_test.go + - "/internal/service/route53domains/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -787,7 +741,7 @@ rules: message: Do not use "Route53Domains" in const name inside route53domains package paths: include: - - internal/service/route53domains + - "/internal/service/route53domains" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -801,7 +755,7 @@ rules: message: Do not use "Route53Domains" in var name inside route53domains package paths: include: - - internal/service/route53domains + - "/internal/service/route53domains" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -815,9 +769,9 @@ rules: message: Do not use "Route53Profiles" in func name inside route53profiles package paths: include: - - internal/service/route53profiles + - "/internal/service/route53profiles" exclude: - - internal/service/route53profiles/list_pages_gen.go + - "/internal/service/route53profiles/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -833,7 +787,7 @@ rules: message: Include "Route53Profiles" in test name paths: include: - - internal/service/route53profiles/*_test.go + - "/internal/service/route53profiles/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -848,7 +802,7 @@ rules: message: Do not use "Route53Profiles" in const name inside route53profiles package paths: include: - - internal/service/route53profiles + - "/internal/service/route53profiles" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -862,7 +816,7 @@ rules: message: Do not use "Route53Profiles" in var name inside route53profiles package paths: include: - - internal/service/route53profiles + - "/internal/service/route53profiles" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -876,9 +830,9 @@ rules: message: Do not use "Route53RecoveryControlConfig" in func name inside route53recoverycontrolconfig package paths: include: - - internal/service/route53recoverycontrolconfig + - "/internal/service/route53recoverycontrolconfig" exclude: - - internal/service/route53recoverycontrolconfig/list_pages_gen.go + - "/internal/service/route53recoverycontrolconfig/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -894,7 +848,7 @@ rules: message: Include "Route53RecoveryControlConfig" in test name paths: include: - - internal/service/route53recoverycontrolconfig/*_test.go + - "/internal/service/route53recoverycontrolconfig/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -909,7 +863,7 @@ rules: message: Do not use "Route53RecoveryControlConfig" in const name inside route53recoverycontrolconfig package paths: include: - - internal/service/route53recoverycontrolconfig + - "/internal/service/route53recoverycontrolconfig" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -923,7 +877,7 @@ rules: message: Do not use "Route53RecoveryControlConfig" in var name inside route53recoverycontrolconfig package paths: include: - - internal/service/route53recoverycontrolconfig + - "/internal/service/route53recoverycontrolconfig" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -937,9 +891,9 @@ rules: message: Do not use "Route53RecoveryReadiness" in func name inside route53recoveryreadiness package paths: include: - - internal/service/route53recoveryreadiness + - "/internal/service/route53recoveryreadiness" exclude: - - internal/service/route53recoveryreadiness/list_pages_gen.go + - "/internal/service/route53recoveryreadiness/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -955,7 +909,7 @@ rules: message: Include "Route53RecoveryReadiness" in test name paths: include: - - internal/service/route53recoveryreadiness/*_test.go + - "/internal/service/route53recoveryreadiness/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -970,7 +924,7 @@ rules: message: Do not use "Route53RecoveryReadiness" in const name inside route53recoveryreadiness package paths: include: - - internal/service/route53recoveryreadiness + - "/internal/service/route53recoveryreadiness" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -984,7 +938,7 @@ rules: message: Do not use "Route53RecoveryReadiness" in var name inside route53recoveryreadiness package paths: include: - - internal/service/route53recoveryreadiness + - "/internal/service/route53recoveryreadiness" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -998,9 +952,9 @@ rules: message: Do not use "Route53Resolver" in func name inside route53resolver package paths: include: - - internal/service/route53resolver + - "/internal/service/route53resolver" exclude: - - internal/service/route53resolver/list_pages_gen.go + - "/internal/service/route53resolver/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1016,7 +970,7 @@ rules: message: Include "Route53Resolver" in test name paths: include: - - internal/service/route53resolver/*_test.go + - "/internal/service/route53resolver/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1031,7 +985,7 @@ rules: message: Do not use "Route53Resolver" in const name inside route53resolver package paths: include: - - internal/service/route53resolver + - "/internal/service/route53resolver" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1045,7 +999,7 @@ rules: message: Do not use "Route53Resolver" in var name inside route53resolver package paths: include: - - internal/service/route53resolver + - "/internal/service/route53resolver" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1059,9 +1013,9 @@ rules: message: Do not use "RUM" in func name inside rum package paths: include: - - internal/service/rum + - "/internal/service/rum" exclude: - - internal/service/rum/list_pages_gen.go + - "/internal/service/rum/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1077,7 +1031,7 @@ rules: message: Include "RUM" in test name paths: include: - - internal/service/rum/*_test.go + - "/internal/service/rum/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1092,7 +1046,7 @@ rules: message: Do not use "RUM" in const name inside rum package paths: include: - - internal/service/rum + - "/internal/service/rum" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1106,7 +1060,7 @@ rules: message: Do not use "RUM" in var name inside rum package paths: include: - - internal/service/rum + - "/internal/service/rum" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1120,9 +1074,9 @@ rules: message: Do not use "S3" in func name inside s3 package paths: include: - - internal/service/s3 + - "/internal/service/s3" exclude: - - internal/service/s3/list_pages_gen.go + - "/internal/service/s3/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1138,7 +1092,7 @@ rules: message: Include "S3" in test name paths: include: - - internal/service/s3/*_test.go + - "/internal/service/s3/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1153,7 +1107,7 @@ rules: message: Do not use "S3" in const name inside s3 package paths: include: - - internal/service/s3 + - "/internal/service/s3" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1167,7 +1121,7 @@ rules: message: Do not use "S3" in var name inside s3 package paths: include: - - internal/service/s3 + - "/internal/service/s3" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1181,9 +1135,9 @@ rules: message: Do not use "s3api" in func name inside s3 package paths: include: - - internal/service/s3 + - "/internal/service/s3" exclude: - - internal/service/s3/list_pages_gen.go + - "/internal/service/s3/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1199,7 +1153,7 @@ rules: message: Do not use "s3api" in const name inside s3 package paths: include: - - internal/service/s3 + - "/internal/service/s3" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1213,7 +1167,7 @@ rules: message: Do not use "s3api" in var name inside s3 package paths: include: - - internal/service/s3 + - "/internal/service/s3" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1227,9 +1181,9 @@ rules: message: Do not use "S3Control" in func name inside s3control package paths: include: - - internal/service/s3control + - "/internal/service/s3control" exclude: - - internal/service/s3control/list_pages_gen.go + - "/internal/service/s3control/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1245,7 +1199,7 @@ rules: message: Include "S3Control" in test name paths: include: - - internal/service/s3control/*_test.go + - "/internal/service/s3control/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1260,7 +1214,7 @@ rules: message: Do not use "S3Control" in const name inside s3control package paths: include: - - internal/service/s3control + - "/internal/service/s3control" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1274,7 +1228,7 @@ rules: message: Do not use "S3Control" in var name inside s3control package paths: include: - - internal/service/s3control + - "/internal/service/s3control" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1288,9 +1242,9 @@ rules: message: Do not use "S3Outposts" in func name inside s3outposts package paths: include: - - internal/service/s3outposts + - "/internal/service/s3outposts" exclude: - - internal/service/s3outposts/list_pages_gen.go + - "/internal/service/s3outposts/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1306,7 +1260,7 @@ rules: message: Include "S3Outposts" in test name paths: include: - - internal/service/s3outposts/*_test.go + - "/internal/service/s3outposts/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1321,7 +1275,7 @@ rules: message: Do not use "S3Outposts" in const name inside s3outposts package paths: include: - - internal/service/s3outposts + - "/internal/service/s3outposts" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1335,7 +1289,7 @@ rules: message: Do not use "S3Outposts" in var name inside s3outposts package paths: include: - - internal/service/s3outposts + - "/internal/service/s3outposts" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1349,9 +1303,9 @@ rules: message: Do not use "S3Tables" in func name inside s3tables package paths: include: - - internal/service/s3tables + - "/internal/service/s3tables" exclude: - - internal/service/s3tables/list_pages_gen.go + - "/internal/service/s3tables/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1367,7 +1321,7 @@ rules: message: Include "S3Tables" in test name paths: include: - - internal/service/s3tables/*_test.go + - "/internal/service/s3tables/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1382,7 +1336,7 @@ rules: message: Do not use "S3Tables" in const name inside s3tables package paths: include: - - internal/service/s3tables + - "/internal/service/s3tables" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1396,7 +1350,7 @@ rules: message: Do not use "S3Tables" in var name inside s3tables package paths: include: - - internal/service/s3tables + - "/internal/service/s3tables" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1404,15 +1358,76 @@ rules: patterns: - pattern-regex: "(?i)S3Tables" severity: WARNING + - id: s3vectors-in-func-name + languages: + - go + message: Do not use "S3Vectors" in func name inside s3vectors package + paths: + include: + - "/internal/service/s3vectors" + exclude: + - "/internal/service/s3vectors/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)S3Vectors" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: s3vectors-in-test-name + languages: + - go + message: Include "S3Vectors" in test name + paths: + include: + - "/internal/service/s3vectors/*_test.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccS3Vectors" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: s3vectors-in-const-name + languages: + - go + message: Do not use "S3Vectors" in const name inside s3vectors package + paths: + include: + - "/internal/service/s3vectors" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)S3Vectors" + severity: WARNING + - id: s3vectors-in-var-name + languages: + - go + message: Do not use "S3Vectors" in var name inside s3vectors package + paths: + include: + - "/internal/service/s3vectors" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)S3Vectors" + severity: WARNING - id: sagemaker-in-func-name languages: - go message: Do not use "SageMaker" in func name inside sagemaker package paths: include: - - internal/service/sagemaker + - "/internal/service/sagemaker" exclude: - - internal/service/sagemaker/list_pages_gen.go + - "/internal/service/sagemaker/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1428,7 +1443,7 @@ rules: message: Include "SageMaker" in test name paths: include: - - internal/service/sagemaker/*_test.go + - "/internal/service/sagemaker/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1443,7 +1458,7 @@ rules: message: Do not use "SageMaker" in const name inside sagemaker package paths: include: - - internal/service/sagemaker + - "/internal/service/sagemaker" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1457,7 +1472,7 @@ rules: message: Do not use "SageMaker" in var name inside sagemaker package paths: include: - - internal/service/sagemaker + - "/internal/service/sagemaker" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1471,9 +1486,9 @@ rules: message: Do not use "Scheduler" in func name inside scheduler package paths: include: - - internal/service/scheduler + - "/internal/service/scheduler" exclude: - - internal/service/scheduler/list_pages_gen.go + - "/internal/service/scheduler/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1489,7 +1504,7 @@ rules: message: Include "Scheduler" in test name paths: include: - - internal/service/scheduler/*_test.go + - "/internal/service/scheduler/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1504,7 +1519,7 @@ rules: message: Do not use "Scheduler" in const name inside scheduler package paths: include: - - internal/service/scheduler + - "/internal/service/scheduler" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1518,7 +1533,7 @@ rules: message: Do not use "Scheduler" in var name inside scheduler package paths: include: - - internal/service/scheduler + - "/internal/service/scheduler" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1532,9 +1547,9 @@ rules: message: Do not use "Schemas" in func name inside schemas package paths: include: - - internal/service/schemas + - "/internal/service/schemas" exclude: - - internal/service/schemas/list_pages_gen.go + - "/internal/service/schemas/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1550,7 +1565,7 @@ rules: message: Include "Schemas" in test name paths: include: - - internal/service/schemas/*_test.go + - "/internal/service/schemas/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1565,7 +1580,7 @@ rules: message: Do not use "Schemas" in const name inside schemas package paths: include: - - internal/service/schemas + - "/internal/service/schemas" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1579,7 +1594,7 @@ rules: message: Do not use "Schemas" in var name inside schemas package paths: include: - - internal/service/schemas + - "/internal/service/schemas" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1593,9 +1608,9 @@ rules: message: Do not use "SecretsManager" in func name inside secretsmanager package paths: include: - - internal/service/secretsmanager + - "/internal/service/secretsmanager" exclude: - - internal/service/secretsmanager/list_pages_gen.go + - "/internal/service/secretsmanager/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1611,7 +1626,7 @@ rules: message: Include "SecretsManager" in test name paths: include: - - internal/service/secretsmanager/*_test.go + - "/internal/service/secretsmanager/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1626,7 +1641,7 @@ rules: message: Do not use "SecretsManager" in const name inside secretsmanager package paths: include: - - internal/service/secretsmanager + - "/internal/service/secretsmanager" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1640,7 +1655,7 @@ rules: message: Do not use "SecretsManager" in var name inside secretsmanager package paths: include: - - internal/service/secretsmanager + - "/internal/service/secretsmanager" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1654,9 +1669,9 @@ rules: message: Do not use "SecurityHub" in func name inside securityhub package paths: include: - - internal/service/securityhub + - "/internal/service/securityhub" exclude: - - internal/service/securityhub/list_pages_gen.go + - "/internal/service/securityhub/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1672,7 +1687,7 @@ rules: message: Include "SecurityHub" in test name paths: include: - - internal/service/securityhub/*_test.go + - "/internal/service/securityhub/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1687,7 +1702,7 @@ rules: message: Do not use "SecurityHub" in const name inside securityhub package paths: include: - - internal/service/securityhub + - "/internal/service/securityhub" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1701,7 +1716,7 @@ rules: message: Do not use "SecurityHub" in var name inside securityhub package paths: include: - - internal/service/securityhub + - "/internal/service/securityhub" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1715,9 +1730,9 @@ rules: message: Do not use "SecurityLake" in func name inside securitylake package paths: include: - - internal/service/securitylake + - "/internal/service/securitylake" exclude: - - internal/service/securitylake/list_pages_gen.go + - "/internal/service/securitylake/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1733,7 +1748,7 @@ rules: message: Include "SecurityLake" in test name paths: include: - - internal/service/securitylake/*_test.go + - "/internal/service/securitylake/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1748,7 +1763,7 @@ rules: message: Do not use "SecurityLake" in const name inside securitylake package paths: include: - - internal/service/securitylake + - "/internal/service/securitylake" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1762,7 +1777,7 @@ rules: message: Do not use "SecurityLake" in var name inside securitylake package paths: include: - - internal/service/securitylake + - "/internal/service/securitylake" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1776,9 +1791,9 @@ rules: message: Do not use "serverlessapplicationrepository" in func name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" exclude: - - internal/service/serverlessrepo/list_pages_gen.go + - "/internal/service/serverlessrepo/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1794,7 +1809,7 @@ rules: message: Do not use "serverlessapplicationrepository" in const name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1808,7 +1823,7 @@ rules: message: Do not use "serverlessapplicationrepository" in var name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1822,9 +1837,9 @@ rules: message: Do not use "serverlessapprepo" in func name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" exclude: - - internal/service/serverlessrepo/list_pages_gen.go + - "/internal/service/serverlessrepo/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1840,7 +1855,7 @@ rules: message: Do not use "serverlessapprepo" in const name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1854,7 +1869,7 @@ rules: message: Do not use "serverlessapprepo" in var name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1868,9 +1883,9 @@ rules: message: Do not use "ServerlessRepo" in func name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" exclude: - - internal/service/serverlessrepo/list_pages_gen.go + - "/internal/service/serverlessrepo/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1886,7 +1901,7 @@ rules: message: Include "ServerlessRepo" in test name paths: include: - - internal/service/serverlessrepo/*_test.go + - "/internal/service/serverlessrepo/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1901,7 +1916,7 @@ rules: message: Do not use "ServerlessRepo" in const name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1915,7 +1930,7 @@ rules: message: Do not use "ServerlessRepo" in var name inside serverlessrepo package paths: include: - - internal/service/serverlessrepo + - "/internal/service/serverlessrepo" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1929,9 +1944,9 @@ rules: message: Do not use "ServiceCatalog" in func name inside servicecatalog package paths: include: - - internal/service/servicecatalog + - "/internal/service/servicecatalog" exclude: - - internal/service/servicecatalog/list_pages_gen.go + - "/internal/service/servicecatalog/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1947,7 +1962,7 @@ rules: message: Include "ServiceCatalog" in test name paths: include: - - internal/service/servicecatalog/*_test.go + - "/internal/service/servicecatalog/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -1962,7 +1977,7 @@ rules: message: Do not use "ServiceCatalog" in const name inside servicecatalog package paths: include: - - internal/service/servicecatalog + - "/internal/service/servicecatalog" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -1976,7 +1991,7 @@ rules: message: Do not use "ServiceCatalog" in var name inside servicecatalog package paths: include: - - internal/service/servicecatalog + - "/internal/service/servicecatalog" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -1990,9 +2005,9 @@ rules: message: Do not use "ServiceCatalogAppRegistry" in func name inside servicecatalogappregistry package paths: include: - - internal/service/servicecatalogappregistry + - "/internal/service/servicecatalogappregistry" exclude: - - internal/service/servicecatalogappregistry/list_pages_gen.go + - "/internal/service/servicecatalogappregistry/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2008,7 +2023,7 @@ rules: message: Include "ServiceCatalogAppRegistry" in test name paths: include: - - internal/service/servicecatalogappregistry/*_test.go + - "/internal/service/servicecatalogappregistry/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2023,7 +2038,7 @@ rules: message: Do not use "ServiceCatalogAppRegistry" in const name inside servicecatalogappregistry package paths: include: - - internal/service/servicecatalogappregistry + - "/internal/service/servicecatalogappregistry" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2037,7 +2052,7 @@ rules: message: Do not use "ServiceCatalogAppRegistry" in var name inside servicecatalogappregistry package paths: include: - - internal/service/servicecatalogappregistry + - "/internal/service/servicecatalogappregistry" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2051,9 +2066,9 @@ rules: message: Do not use "ServiceDiscovery" in func name inside servicediscovery package paths: include: - - internal/service/servicediscovery + - "/internal/service/servicediscovery" exclude: - - internal/service/servicediscovery/list_pages_gen.go + - "/internal/service/servicediscovery/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2069,7 +2084,7 @@ rules: message: Include "ServiceDiscovery" in test name paths: include: - - internal/service/servicediscovery/*_test.go + - "/internal/service/servicediscovery/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2084,7 +2099,7 @@ rules: message: Do not use "ServiceDiscovery" in const name inside servicediscovery package paths: include: - - internal/service/servicediscovery + - "/internal/service/servicediscovery" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2098,7 +2113,7 @@ rules: message: Do not use "ServiceDiscovery" in var name inside servicediscovery package paths: include: - - internal/service/servicediscovery + - "/internal/service/servicediscovery" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2112,9 +2127,9 @@ rules: message: Do not use "ServiceQuotas" in func name inside servicequotas package paths: include: - - internal/service/servicequotas + - "/internal/service/servicequotas" exclude: - - internal/service/servicequotas/list_pages_gen.go + - "/internal/service/servicequotas/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2130,7 +2145,7 @@ rules: message: Include "ServiceQuotas" in test name paths: include: - - internal/service/servicequotas/*_test.go + - "/internal/service/servicequotas/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2145,7 +2160,7 @@ rules: message: Do not use "ServiceQuotas" in const name inside servicequotas package paths: include: - - internal/service/servicequotas + - "/internal/service/servicequotas" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2159,7 +2174,7 @@ rules: message: Do not use "ServiceQuotas" in var name inside servicequotas package paths: include: - - internal/service/servicequotas + - "/internal/service/servicequotas" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2173,9 +2188,9 @@ rules: message: Do not use "SES" in func name inside ses package paths: include: - - internal/service/ses + - "/internal/service/ses" exclude: - - internal/service/ses/list_pages_gen.go + - "/internal/service/ses/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2191,7 +2206,7 @@ rules: message: Include "SES" in test name paths: include: - - internal/service/ses/*_test.go + - "/internal/service/ses/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2206,7 +2221,7 @@ rules: message: Do not use "SES" in const name inside ses package paths: include: - - internal/service/ses + - "/internal/service/ses" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2220,7 +2235,7 @@ rules: message: Do not use "SES" in var name inside ses package paths: include: - - internal/service/ses + - "/internal/service/ses" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2234,9 +2249,9 @@ rules: message: Do not use "SESV2" in func name inside sesv2 package paths: include: - - internal/service/sesv2 + - "/internal/service/sesv2" exclude: - - internal/service/sesv2/list_pages_gen.go + - "/internal/service/sesv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2252,7 +2267,7 @@ rules: message: Include "SESV2" in test name paths: include: - - internal/service/sesv2/*_test.go + - "/internal/service/sesv2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2267,7 +2282,7 @@ rules: message: Do not use "SESV2" in const name inside sesv2 package paths: include: - - internal/service/sesv2 + - "/internal/service/sesv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2281,7 +2296,7 @@ rules: message: Do not use "SESV2" in var name inside sesv2 package paths: include: - - internal/service/sesv2 + - "/internal/service/sesv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2295,9 +2310,9 @@ rules: message: Do not use "SFN" in func name inside sfn package paths: include: - - internal/service/sfn + - "/internal/service/sfn" exclude: - - internal/service/sfn/list_pages_gen.go + - "/internal/service/sfn/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2313,7 +2328,7 @@ rules: message: Include "SFN" in test name paths: include: - - internal/service/sfn/*_test.go + - "/internal/service/sfn/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2328,7 +2343,7 @@ rules: message: Do not use "SFN" in const name inside sfn package paths: include: - - internal/service/sfn + - "/internal/service/sfn" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2342,7 +2357,7 @@ rules: message: Do not use "SFN" in var name inside sfn package paths: include: - - internal/service/sfn + - "/internal/service/sfn" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2356,9 +2371,9 @@ rules: message: Do not use "Shield" in func name inside shield package paths: include: - - internal/service/shield + - "/internal/service/shield" exclude: - - internal/service/shield/list_pages_gen.go + - "/internal/service/shield/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2374,7 +2389,7 @@ rules: message: Include "Shield" in test name paths: include: - - internal/service/shield/*_test.go + - "/internal/service/shield/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2389,7 +2404,7 @@ rules: message: Do not use "Shield" in const name inside shield package paths: include: - - internal/service/shield + - "/internal/service/shield" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2403,7 +2418,7 @@ rules: message: Do not use "Shield" in var name inside shield package paths: include: - - internal/service/shield + - "/internal/service/shield" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2417,9 +2432,9 @@ rules: message: Do not use "Signer" in func name inside signer package paths: include: - - internal/service/signer + - "/internal/service/signer" exclude: - - internal/service/signer/list_pages_gen.go + - "/internal/service/signer/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2435,7 +2450,7 @@ rules: message: Include "Signer" in test name paths: include: - - internal/service/signer/*_test.go + - "/internal/service/signer/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2450,7 +2465,7 @@ rules: message: Do not use "Signer" in const name inside signer package paths: include: - - internal/service/signer + - "/internal/service/signer" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2464,7 +2479,7 @@ rules: message: Do not use "Signer" in var name inside signer package paths: include: - - internal/service/signer + - "/internal/service/signer" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2478,9 +2493,9 @@ rules: message: Do not use "SNS" in func name inside sns package paths: include: - - internal/service/sns + - "/internal/service/sns" exclude: - - internal/service/sns/list_pages_gen.go + - "/internal/service/sns/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2496,7 +2511,7 @@ rules: message: Include "SNS" in test name paths: include: - - internal/service/sns/*_test.go + - "/internal/service/sns/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2511,7 +2526,7 @@ rules: message: Do not use "SNS" in const name inside sns package paths: include: - - internal/service/sns + - "/internal/service/sns" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2525,7 +2540,7 @@ rules: message: Do not use "SNS" in var name inside sns package paths: include: - - internal/service/sns + - "/internal/service/sns" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2539,9 +2554,9 @@ rules: message: Do not use "SQS" in func name inside sqs package paths: include: - - internal/service/sqs + - "/internal/service/sqs" exclude: - - internal/service/sqs/list_pages_gen.go + - "/internal/service/sqs/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2557,7 +2572,7 @@ rules: message: Include "SQS" in test name paths: include: - - internal/service/sqs/*_test.go + - "/internal/service/sqs/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2572,7 +2587,7 @@ rules: message: Do not use "SQS" in const name inside sqs package paths: include: - - internal/service/sqs + - "/internal/service/sqs" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2586,7 +2601,7 @@ rules: message: Do not use "SQS" in var name inside sqs package paths: include: - - internal/service/sqs + - "/internal/service/sqs" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2600,9 +2615,9 @@ rules: message: Do not use "SSM" in func name inside ssm package paths: include: - - internal/service/ssm + - "/internal/service/ssm" exclude: - - internal/service/ssm/list_pages_gen.go + - "/internal/service/ssm/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2618,7 +2633,7 @@ rules: message: Include "SSM" in test name paths: include: - - internal/service/ssm/*_test.go + - "/internal/service/ssm/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2633,7 +2648,7 @@ rules: message: Do not use "SSM" in const name inside ssm package paths: include: - - internal/service/ssm + - "/internal/service/ssm" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2647,7 +2662,7 @@ rules: message: Do not use "SSM" in var name inside ssm package paths: include: - - internal/service/ssm + - "/internal/service/ssm" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2661,9 +2676,9 @@ rules: message: Do not use "SSMContacts" in func name inside ssmcontacts package paths: include: - - internal/service/ssmcontacts + - "/internal/service/ssmcontacts" exclude: - - internal/service/ssmcontacts/list_pages_gen.go + - "/internal/service/ssmcontacts/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2679,7 +2694,7 @@ rules: message: Include "SSMContacts" in test name paths: include: - - internal/service/ssmcontacts/*_test.go + - "/internal/service/ssmcontacts/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2694,7 +2709,7 @@ rules: message: Do not use "SSMContacts" in const name inside ssmcontacts package paths: include: - - internal/service/ssmcontacts + - "/internal/service/ssmcontacts" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2708,7 +2723,7 @@ rules: message: Do not use "SSMContacts" in var name inside ssmcontacts package paths: include: - - internal/service/ssmcontacts + - "/internal/service/ssmcontacts" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2722,9 +2737,9 @@ rules: message: Do not use "SSMIncidents" in func name inside ssmincidents package paths: include: - - internal/service/ssmincidents + - "/internal/service/ssmincidents" exclude: - - internal/service/ssmincidents/list_pages_gen.go + - "/internal/service/ssmincidents/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2740,7 +2755,7 @@ rules: message: Include "SSMIncidents" in test name paths: include: - - internal/service/ssmincidents/*_test.go + - "/internal/service/ssmincidents/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2755,7 +2770,7 @@ rules: message: Do not use "SSMIncidents" in const name inside ssmincidents package paths: include: - - internal/service/ssmincidents + - "/internal/service/ssmincidents" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2769,7 +2784,7 @@ rules: message: Do not use "SSMIncidents" in var name inside ssmincidents package paths: include: - - internal/service/ssmincidents + - "/internal/service/ssmincidents" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2783,9 +2798,9 @@ rules: message: Do not use "SSMQuickSetup" in func name inside ssmquicksetup package paths: include: - - internal/service/ssmquicksetup + - "/internal/service/ssmquicksetup" exclude: - - internal/service/ssmquicksetup/list_pages_gen.go + - "/internal/service/ssmquicksetup/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2801,7 +2816,7 @@ rules: message: Include "SSMQuickSetup" in test name paths: include: - - internal/service/ssmquicksetup/*_test.go + - "/internal/service/ssmquicksetup/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2816,7 +2831,7 @@ rules: message: Do not use "SSMQuickSetup" in const name inside ssmquicksetup package paths: include: - - internal/service/ssmquicksetup + - "/internal/service/ssmquicksetup" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2830,7 +2845,7 @@ rules: message: Do not use "SSMQuickSetup" in var name inside ssmquicksetup package paths: include: - - internal/service/ssmquicksetup + - "/internal/service/ssmquicksetup" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2844,9 +2859,9 @@ rules: message: Do not use "SSMSAP" in func name inside ssmsap package paths: include: - - internal/service/ssmsap + - "/internal/service/ssmsap" exclude: - - internal/service/ssmsap/list_pages_gen.go + - "/internal/service/ssmsap/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2862,7 +2877,7 @@ rules: message: Include "SSMSAP" in test name paths: include: - - internal/service/ssmsap/*_test.go + - "/internal/service/ssmsap/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2877,7 +2892,7 @@ rules: message: Do not use "SSMSAP" in const name inside ssmsap package paths: include: - - internal/service/ssmsap + - "/internal/service/ssmsap" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2891,7 +2906,7 @@ rules: message: Do not use "SSMSAP" in var name inside ssmsap package paths: include: - - internal/service/ssmsap + - "/internal/service/ssmsap" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2905,9 +2920,9 @@ rules: message: Do not use "SSO" in func name inside sso package paths: include: - - internal/service/sso + - "/internal/service/sso" exclude: - - internal/service/sso/list_pages_gen.go + - "/internal/service/sso/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2923,7 +2938,7 @@ rules: message: Include "SSO" in test name paths: include: - - internal/service/sso/*_test.go + - "/internal/service/sso/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2938,7 +2953,7 @@ rules: message: Do not use "SSO" in const name inside sso package paths: include: - - internal/service/sso + - "/internal/service/sso" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -2952,7 +2967,7 @@ rules: message: Do not use "SSO" in var name inside sso package paths: include: - - internal/service/sso + - "/internal/service/sso" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -2966,9 +2981,9 @@ rules: message: Do not use "SSOAdmin" in func name inside ssoadmin package paths: include: - - internal/service/ssoadmin + - "/internal/service/ssoadmin" exclude: - - internal/service/ssoadmin/list_pages_gen.go + - "/internal/service/ssoadmin/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2984,7 +2999,7 @@ rules: message: Include "SSOAdmin" in test name paths: include: - - internal/service/ssoadmin/*_test.go + - "/internal/service/ssoadmin/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -2999,7 +3014,7 @@ rules: message: Do not use "SSOAdmin" in const name inside ssoadmin package paths: include: - - internal/service/ssoadmin + - "/internal/service/ssoadmin" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3013,7 +3028,7 @@ rules: message: Do not use "SSOAdmin" in var name inside ssoadmin package paths: include: - - internal/service/ssoadmin + - "/internal/service/ssoadmin" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3027,9 +3042,9 @@ rules: message: Do not use "stepfunctions" in func name inside sfn package paths: include: - - internal/service/sfn + - "/internal/service/sfn" exclude: - - internal/service/sfn/list_pages_gen.go + - "/internal/service/sfn/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3045,7 +3060,7 @@ rules: message: Do not use "stepfunctions" in const name inside sfn package paths: include: - - internal/service/sfn + - "/internal/service/sfn" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3059,7 +3074,7 @@ rules: message: Do not use "stepfunctions" in var name inside sfn package paths: include: - - internal/service/sfn + - "/internal/service/sfn" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3073,9 +3088,9 @@ rules: message: Do not use "StorageGateway" in func name inside storagegateway package paths: include: - - internal/service/storagegateway + - "/internal/service/storagegateway" exclude: - - internal/service/storagegateway/list_pages_gen.go + - "/internal/service/storagegateway/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3091,7 +3106,7 @@ rules: message: Include "StorageGateway" in test name paths: include: - - internal/service/storagegateway/*_test.go + - "/internal/service/storagegateway/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3106,7 +3121,7 @@ rules: message: Do not use "StorageGateway" in const name inside storagegateway package paths: include: - - internal/service/storagegateway + - "/internal/service/storagegateway" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3120,7 +3135,7 @@ rules: message: Do not use "StorageGateway" in var name inside storagegateway package paths: include: - - internal/service/storagegateway + - "/internal/service/storagegateway" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3134,9 +3149,9 @@ rules: message: Do not use "STS" in func name inside sts package paths: include: - - internal/service/sts + - "/internal/service/sts" exclude: - - internal/service/sts/list_pages_gen.go + - "/internal/service/sts/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3152,7 +3167,7 @@ rules: message: Include "STS" in test name paths: include: - - internal/service/sts/*_test.go + - "/internal/service/sts/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3167,7 +3182,7 @@ rules: message: Do not use "STS" in const name inside sts package paths: include: - - internal/service/sts + - "/internal/service/sts" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3181,7 +3196,7 @@ rules: message: Do not use "STS" in var name inside sts package paths: include: - - internal/service/sts + - "/internal/service/sts" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3195,9 +3210,9 @@ rules: message: Do not use "SWF" in func name inside swf package paths: include: - - internal/service/swf + - "/internal/service/swf" exclude: - - internal/service/swf/list_pages_gen.go + - "/internal/service/swf/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3213,7 +3228,7 @@ rules: message: Include "SWF" in test name paths: include: - - internal/service/swf/*_test.go + - "/internal/service/swf/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3228,7 +3243,7 @@ rules: message: Do not use "SWF" in const name inside swf package paths: include: - - internal/service/swf + - "/internal/service/swf" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3242,7 +3257,7 @@ rules: message: Do not use "SWF" in var name inside swf package paths: include: - - internal/service/swf + - "/internal/service/swf" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3256,9 +3271,9 @@ rules: message: Do not use "Synthetics" in func name inside synthetics package paths: include: - - internal/service/synthetics + - "/internal/service/synthetics" exclude: - - internal/service/synthetics/list_pages_gen.go + - "/internal/service/synthetics/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3274,7 +3289,7 @@ rules: message: Include "Synthetics" in test name paths: include: - - internal/service/synthetics/*_test.go + - "/internal/service/synthetics/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3289,7 +3304,7 @@ rules: message: Do not use "Synthetics" in const name inside synthetics package paths: include: - - internal/service/synthetics + - "/internal/service/synthetics" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3303,7 +3318,7 @@ rules: message: Do not use "Synthetics" in var name inside synthetics package paths: include: - - internal/service/synthetics + - "/internal/service/synthetics" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3317,9 +3332,9 @@ rules: message: Do not use "TaxSettings" in func name inside taxsettings package paths: include: - - internal/service/taxsettings + - "/internal/service/taxsettings" exclude: - - internal/service/taxsettings/list_pages_gen.go + - "/internal/service/taxsettings/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3335,7 +3350,7 @@ rules: message: Include "TaxSettings" in test name paths: include: - - internal/service/taxsettings/*_test.go + - "/internal/service/taxsettings/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3350,7 +3365,7 @@ rules: message: Do not use "TaxSettings" in const name inside taxsettings package paths: include: - - internal/service/taxsettings + - "/internal/service/taxsettings" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3364,7 +3379,7 @@ rules: message: Do not use "TaxSettings" in var name inside taxsettings package paths: include: - - internal/service/taxsettings + - "/internal/service/taxsettings" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3378,9 +3393,9 @@ rules: message: Do not use "TimestreamInfluxDB" in func name inside timestreaminfluxdb package paths: include: - - internal/service/timestreaminfluxdb + - "/internal/service/timestreaminfluxdb" exclude: - - internal/service/timestreaminfluxdb/list_pages_gen.go + - "/internal/service/timestreaminfluxdb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3396,7 +3411,7 @@ rules: message: Include "TimestreamInfluxDB" in test name paths: include: - - internal/service/timestreaminfluxdb/*_test.go + - "/internal/service/timestreaminfluxdb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3411,7 +3426,7 @@ rules: message: Do not use "TimestreamInfluxDB" in const name inside timestreaminfluxdb package paths: include: - - internal/service/timestreaminfluxdb + - "/internal/service/timestreaminfluxdb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3425,7 +3440,7 @@ rules: message: Do not use "TimestreamInfluxDB" in var name inside timestreaminfluxdb package paths: include: - - internal/service/timestreaminfluxdb + - "/internal/service/timestreaminfluxdb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3439,9 +3454,9 @@ rules: message: Do not use "TimestreamQuery" in func name inside timestreamquery package paths: include: - - internal/service/timestreamquery + - "/internal/service/timestreamquery" exclude: - - internal/service/timestreamquery/list_pages_gen.go + - "/internal/service/timestreamquery/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3457,7 +3472,7 @@ rules: message: Include "TimestreamQuery" in test name paths: include: - - internal/service/timestreamquery/*_test.go + - "/internal/service/timestreamquery/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3472,7 +3487,7 @@ rules: message: Do not use "TimestreamQuery" in const name inside timestreamquery package paths: include: - - internal/service/timestreamquery + - "/internal/service/timestreamquery" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3486,7 +3501,7 @@ rules: message: Do not use "TimestreamQuery" in var name inside timestreamquery package paths: include: - - internal/service/timestreamquery + - "/internal/service/timestreamquery" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3500,9 +3515,9 @@ rules: message: Do not use "TimestreamWrite" in func name inside timestreamwrite package paths: include: - - internal/service/timestreamwrite + - "/internal/service/timestreamwrite" exclude: - - internal/service/timestreamwrite/list_pages_gen.go + - "/internal/service/timestreamwrite/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3518,7 +3533,7 @@ rules: message: Include "TimestreamWrite" in test name paths: include: - - internal/service/timestreamwrite/*_test.go + - "/internal/service/timestreamwrite/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3533,7 +3548,7 @@ rules: message: Do not use "TimestreamWrite" in const name inside timestreamwrite package paths: include: - - internal/service/timestreamwrite + - "/internal/service/timestreamwrite" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3547,7 +3562,7 @@ rules: message: Do not use "TimestreamWrite" in var name inside timestreamwrite package paths: include: - - internal/service/timestreamwrite + - "/internal/service/timestreamwrite" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3561,9 +3576,9 @@ rules: message: Do not use "Transcribe" in func name inside transcribe package paths: include: - - internal/service/transcribe + - "/internal/service/transcribe" exclude: - - internal/service/transcribe/list_pages_gen.go + - "/internal/service/transcribe/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3579,7 +3594,7 @@ rules: message: Include "Transcribe" in test name paths: include: - - internal/service/transcribe/*_test.go + - "/internal/service/transcribe/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3594,7 +3609,7 @@ rules: message: Do not use "Transcribe" in const name inside transcribe package paths: include: - - internal/service/transcribe + - "/internal/service/transcribe" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3608,7 +3623,7 @@ rules: message: Do not use "Transcribe" in var name inside transcribe package paths: include: - - internal/service/transcribe + - "/internal/service/transcribe" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3622,9 +3637,9 @@ rules: message: Do not use "transcribeservice" in func name inside transcribe package paths: include: - - internal/service/transcribe + - "/internal/service/transcribe" exclude: - - internal/service/transcribe/list_pages_gen.go + - "/internal/service/transcribe/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3640,7 +3655,7 @@ rules: message: Do not use "transcribeservice" in const name inside transcribe package paths: include: - - internal/service/transcribe + - "/internal/service/transcribe" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3654,7 +3669,7 @@ rules: message: Do not use "transcribeservice" in var name inside transcribe package paths: include: - - internal/service/transcribe + - "/internal/service/transcribe" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3668,9 +3683,9 @@ rules: message: Do not use "Transfer" in func name inside transfer package paths: include: - - internal/service/transfer + - "/internal/service/transfer" exclude: - - internal/service/transfer/list_pages_gen.go + - "/internal/service/transfer/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3686,7 +3701,7 @@ rules: message: Include "Transfer" in test name paths: include: - - internal/service/transfer/*_test.go + - "/internal/service/transfer/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3701,7 +3716,7 @@ rules: message: Do not use "Transfer" in const name inside transfer package paths: include: - - internal/service/transfer + - "/internal/service/transfer" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3715,7 +3730,7 @@ rules: message: Do not use "Transfer" in var name inside transfer package paths: include: - - internal/service/transfer + - "/internal/service/transfer" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3729,7 +3744,7 @@ rules: message: Include "TransitGateway" in test name paths: include: - - internal/service/ec2/transitgateway_*_test.go + - "/internal/service/ec2/transitgateway_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3744,7 +3759,7 @@ rules: message: Include "VerifiedAccess" in test name paths: include: - - internal/service/ec2/verifiedaccess_*_test.go + - "/internal/service/ec2/verifiedaccess_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3759,9 +3774,9 @@ rules: message: Do not use "VerifiedPermissions" in func name inside verifiedpermissions package paths: include: - - internal/service/verifiedpermissions + - "/internal/service/verifiedpermissions" exclude: - - internal/service/verifiedpermissions/list_pages_gen.go + - "/internal/service/verifiedpermissions/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3777,7 +3792,7 @@ rules: message: Include "VerifiedPermissions" in test name paths: include: - - internal/service/verifiedpermissions/*_test.go + - "/internal/service/verifiedpermissions/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3792,7 +3807,7 @@ rules: message: Do not use "VerifiedPermissions" in const name inside verifiedpermissions package paths: include: - - internal/service/verifiedpermissions + - "/internal/service/verifiedpermissions" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3806,7 +3821,7 @@ rules: message: Do not use "VerifiedPermissions" in var name inside verifiedpermissions package paths: include: - - internal/service/verifiedpermissions + - "/internal/service/verifiedpermissions" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3820,7 +3835,7 @@ rules: message: Include "VPC" in test name paths: include: - - internal/service/ec2/vpc_*_test.go + - "/internal/service/ec2/vpc_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3835,9 +3850,9 @@ rules: message: Do not use "VPCLattice" in func name inside vpclattice package paths: include: - - internal/service/vpclattice + - "/internal/service/vpclattice" exclude: - - internal/service/vpclattice/list_pages_gen.go + - "/internal/service/vpclattice/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3853,7 +3868,7 @@ rules: message: Include "VPCLattice" in test name paths: include: - - internal/service/vpclattice/*_test.go + - "/internal/service/vpclattice/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3868,7 +3883,7 @@ rules: message: Do not use "VPCLattice" in const name inside vpclattice package paths: include: - - internal/service/vpclattice + - "/internal/service/vpclattice" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3882,7 +3897,7 @@ rules: message: Do not use "VPCLattice" in var name inside vpclattice package paths: include: - - internal/service/vpclattice + - "/internal/service/vpclattice" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3896,7 +3911,7 @@ rules: message: Include "ClientVPN" in test name paths: include: - - internal/service/ec2/vpnclient_*_test.go + - "/internal/service/ec2/vpnclient_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3911,7 +3926,7 @@ rules: message: Include "SiteVPN" in test name paths: include: - - internal/service/ec2/vpnsite_*_test.go + - "/internal/service/ec2/vpnsite_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3926,9 +3941,9 @@ rules: message: Do not use "WAF" in func name inside waf package paths: include: - - internal/service/waf + - "/internal/service/waf" exclude: - - internal/service/waf/list_pages_gen.go + - "/internal/service/waf/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3944,7 +3959,7 @@ rules: message: Include "WAF" in test name paths: include: - - internal/service/waf/*_test.go + - "/internal/service/waf/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -3959,7 +3974,7 @@ rules: message: Do not use "WAF" in const name inside waf package paths: include: - - internal/service/waf + - "/internal/service/waf" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -3973,7 +3988,7 @@ rules: message: Do not use "WAF" in var name inside waf package paths: include: - - internal/service/waf + - "/internal/service/waf" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -3987,9 +4002,9 @@ rules: message: Do not use "WAFRegional" in func name inside wafregional package paths: include: - - internal/service/wafregional + - "/internal/service/wafregional" exclude: - - internal/service/wafregional/list_pages_gen.go + - "/internal/service/wafregional/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4005,7 +4020,7 @@ rules: message: Include "WAFRegional" in test name paths: include: - - internal/service/wafregional/*_test.go + - "/internal/service/wafregional/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4020,7 +4035,7 @@ rules: message: Do not use "WAFRegional" in const name inside wafregional package paths: include: - - internal/service/wafregional + - "/internal/service/wafregional" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4034,7 +4049,7 @@ rules: message: Do not use "WAFRegional" in var name inside wafregional package paths: include: - - internal/service/wafregional + - "/internal/service/wafregional" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4048,9 +4063,9 @@ rules: message: Do not use "WAFV2" in func name inside wafv2 package paths: include: - - internal/service/wafv2 + - "/internal/service/wafv2" exclude: - - internal/service/wafv2/list_pages_gen.go + - "/internal/service/wafv2/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4066,7 +4081,7 @@ rules: message: Include "WAFV2" in test name paths: include: - - internal/service/wafv2/*_test.go + - "/internal/service/wafv2/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4081,7 +4096,7 @@ rules: message: Do not use "WAFV2" in const name inside wafv2 package paths: include: - - internal/service/wafv2 + - "/internal/service/wafv2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4095,7 +4110,7 @@ rules: message: Do not use "WAFV2" in var name inside wafv2 package paths: include: - - internal/service/wafv2 + - "/internal/service/wafv2" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4109,7 +4124,7 @@ rules: message: Include "Wavelength" in test name paths: include: - - internal/service/ec2/wavelength_*_test.go + - "/internal/service/ec2/wavelength_*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4124,9 +4139,9 @@ rules: message: Do not use "WellArchitected" in func name inside wellarchitected package paths: include: - - internal/service/wellarchitected + - "/internal/service/wellarchitected" exclude: - - internal/service/wellarchitected/list_pages_gen.go + - "/internal/service/wellarchitected/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4142,7 +4157,7 @@ rules: message: Include "WellArchitected" in test name paths: include: - - internal/service/wellarchitected/*_test.go + - "/internal/service/wellarchitected/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4157,7 +4172,7 @@ rules: message: Do not use "WellArchitected" in const name inside wellarchitected package paths: include: - - internal/service/wellarchitected + - "/internal/service/wellarchitected" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4171,7 +4186,7 @@ rules: message: Do not use "WellArchitected" in var name inside wellarchitected package paths: include: - - internal/service/wellarchitected + - "/internal/service/wellarchitected" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4179,15 +4194,76 @@ rules: patterns: - pattern-regex: "(?i)WellArchitected" severity: WARNING + - id: workmail-in-func-name + languages: + - go + message: Do not use "WorkMail" in func name inside workmail package + paths: + include: + - "/internal/service/workmail" + exclude: + - "/internal/service/workmail/list_pages_gen.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)WorkMail" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: workmail-in-test-name + languages: + - go + message: Include "WorkMail" in test name + paths: + include: + - "/internal/service/workmail/*_test.go" + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccWorkMail" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: workmail-in-const-name + languages: + - go + message: Do not use "WorkMail" in const name inside workmail package + paths: + include: + - "/internal/service/workmail" + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)WorkMail" + severity: WARNING + - id: workmail-in-var-name + languages: + - go + message: Do not use "WorkMail" in var name inside workmail package + paths: + include: + - "/internal/service/workmail" + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)WorkMail" + severity: WARNING - id: workspaces-in-func-name languages: - go message: Do not use "WorkSpaces" in func name inside workspaces package paths: include: - - internal/service/workspaces + - "/internal/service/workspaces" exclude: - - internal/service/workspaces/list_pages_gen.go + - "/internal/service/workspaces/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4203,7 +4279,7 @@ rules: message: Include "WorkSpaces" in test name paths: include: - - internal/service/workspaces/*_test.go + - "/internal/service/workspaces/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4218,7 +4294,7 @@ rules: message: Do not use "WorkSpaces" in const name inside workspaces package paths: include: - - internal/service/workspaces + - "/internal/service/workspaces" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4232,7 +4308,7 @@ rules: message: Do not use "WorkSpaces" in var name inside workspaces package paths: include: - - internal/service/workspaces + - "/internal/service/workspaces" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4246,9 +4322,9 @@ rules: message: Do not use "WorkSpacesWeb" in func name inside workspacesweb package paths: include: - - internal/service/workspacesweb + - "/internal/service/workspacesweb" exclude: - - internal/service/workspacesweb/list_pages_gen.go + - "/internal/service/workspacesweb/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4264,7 +4340,7 @@ rules: message: Include "WorkSpacesWeb" in test name paths: include: - - internal/service/workspacesweb/*_test.go + - "/internal/service/workspacesweb/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4279,7 +4355,7 @@ rules: message: Do not use "WorkSpacesWeb" in const name inside workspacesweb package paths: include: - - internal/service/workspacesweb + - "/internal/service/workspacesweb" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4293,7 +4369,7 @@ rules: message: Do not use "WorkSpacesWeb" in var name inside workspacesweb package paths: include: - - internal/service/workspacesweb + - "/internal/service/workspacesweb" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -4307,9 +4383,9 @@ rules: message: Do not use "XRay" in func name inside xray package paths: include: - - internal/service/xray + - "/internal/service/xray" exclude: - - internal/service/xray/list_pages_gen.go + - "/internal/service/xray/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4325,7 +4401,7 @@ rules: message: Include "XRay" in test name paths: include: - - internal/service/xray/*_test.go + - "/internal/service/xray/*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -4340,7 +4416,7 @@ rules: message: Do not use "XRay" in const name inside xray package paths: include: - - internal/service/xray + - "/internal/service/xray" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -4354,7 +4430,7 @@ rules: message: Do not use "XRay" in var name inside xray package paths: include: - - internal/service/xray + - "/internal/service/xray" patterns: - pattern: var $NAME = ... - metavariable-pattern: diff --git a/.ci/.semgrep-test-constants.yml b/.ci/.semgrep-test-constants.yml index 60bffe939bbb..6d3a4ee0f0c7 100644 --- a/.ci/.semgrep-test-constants.yml +++ b/.ci/.semgrep-test-constants.yml @@ -5,7 +5,7 @@ rules: message: Use the constant `acctest.Ct12Digit` for the string literal "123456789012" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"123456789012"' severity: ERROR fix: "acctest.Ct12Digit" @@ -17,7 +17,7 @@ rules: message: Use the constant `acctest.CtBasic` for the string literal "basic" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"basic"' severity: ERROR fix: "acctest.CtBasic" @@ -29,7 +29,7 @@ rules: message: Use the constant `acctest.CtCertificatePEM` for the string literal "certificate_pem" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"certificate_pem"' severity: ERROR fix: "acctest.CtCertificatePEM" @@ -41,7 +41,7 @@ rules: message: Use the constant `acctest.CtDisappears` for the string literal "disappears" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"disappears"' severity: ERROR fix: "acctest.CtDisappears" @@ -53,7 +53,7 @@ rules: message: Use the constant `acctest.CtFalse` for the string literal "false" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"false"' severity: ERROR fix: "acctest.CtFalse" @@ -65,7 +65,7 @@ rules: message: Use the constant `acctest.CtFalseCaps` for the string literal "FALSE" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"FALSE"' severity: ERROR fix: "acctest.CtFalseCaps" @@ -77,7 +77,7 @@ rules: message: Use the constant `acctest.CtKey1` for the string literal "key1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"key1"' severity: ERROR fix: "acctest.CtKey1" @@ -89,7 +89,7 @@ rules: message: Use the constant `acctest.CtKey2` for the string literal "key2" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"key2"' severity: ERROR fix: "acctest.CtKey2" @@ -101,7 +101,7 @@ rules: message: Use the constant `acctest.CtName` for the string literal "name" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"name"' severity: ERROR fix: "acctest.CtName" @@ -113,7 +113,7 @@ rules: message: Use the constant `acctest.CtOverlapKey1` for the string literal "overlapkey1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"overlapkey1"' severity: ERROR fix: "acctest.CtOverlapKey1" @@ -125,7 +125,7 @@ rules: message: Use the constant `acctest.CtOverlapKey2` for the string literal "overlapkey2" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"overlapkey2"' severity: ERROR fix: "acctest.CtOverlapKey2" @@ -137,7 +137,7 @@ rules: message: Use the constant `acctest.CtPrivateKeyPEM` for the string literal "private_key_pem" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"private_key_pem"' severity: ERROR fix: "acctest.CtPrivateKeyPEM" @@ -149,7 +149,7 @@ rules: message: Use the constant `acctest.CtProviderKey1` for the string literal "providerkey1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"providerkey1"' severity: ERROR fix: "acctest.CtProviderKey1" @@ -161,7 +161,7 @@ rules: message: Use the constant `acctest.CtProviderTags` for the string literal "provider_tags" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"provider_tags"' severity: ERROR fix: "acctest.CtProviderTags" @@ -173,7 +173,7 @@ rules: message: Use the constant `acctest.CtProviderValue1` for the string literal "providervalue1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"providervalue1"' severity: ERROR fix: "acctest.CtProviderValue1" @@ -185,7 +185,7 @@ rules: message: Use the constant `acctest.CtProviderValue1Again` for the string literal "providervalue1again" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"providervalue1again"' severity: ERROR fix: "acctest.CtProviderValue1Again" @@ -197,7 +197,7 @@ rules: message: Use the constant `acctest.CtProviderValue1Updated` for the string literal "providervalue1updated" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"providervalue1updated"' severity: ERROR fix: "acctest.CtProviderValue1Updated" @@ -209,7 +209,7 @@ rules: message: Use the constant `acctest.CtRName` for the string literal "rName" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"rName"' severity: ERROR fix: "acctest.CtRName" @@ -221,7 +221,7 @@ rules: message: Use the constant `acctest.CtResourceKey1` for the string literal "resourcekey1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcekey1"' severity: ERROR fix: "acctest.CtResourceKey1" @@ -233,7 +233,7 @@ rules: message: Use the constant `acctest.CtResourceKey2` for the string literal "resourcekey2" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcekey2"' severity: ERROR fix: "acctest.CtResourceKey2" @@ -245,7 +245,7 @@ rules: message: Use the constant `acctest.CtResourceOwner` for the string literal "resource_owner" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resource_owner"' severity: ERROR fix: "acctest.CtResourceOwner" @@ -257,7 +257,7 @@ rules: message: Use the constant `acctest.CtResourceTags` for the string literal "resource_tags" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resource_tags"' severity: ERROR fix: "acctest.CtResourceTags" @@ -269,7 +269,7 @@ rules: message: Use the constant `acctest.CtResourceValue1` for the string literal "resourcevalue1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcevalue1"' severity: ERROR fix: "acctest.CtResourceValue1" @@ -281,7 +281,7 @@ rules: message: Use the constant `acctest.CtResourceValue1Again` for the string literal "resourcevalue1again" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcevalue1again"' severity: ERROR fix: "acctest.CtResourceValue1Again" @@ -293,7 +293,7 @@ rules: message: Use the constant `acctest.CtResourceValue1Updated` for the string literal "resourcevalue1updated" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcevalue1updated"' severity: ERROR fix: "acctest.CtResourceValue1Updated" @@ -305,7 +305,7 @@ rules: message: Use the constant `acctest.CtResourceValue2` for the string literal "resourcevalue2" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcevalue2"' severity: ERROR fix: "acctest.CtResourceValue2" @@ -317,7 +317,7 @@ rules: message: Use the constant `acctest.CtResourceValue2Updated` for the string literal "resourcevalue2updated" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"resourcevalue2updated"' severity: ERROR fix: "acctest.CtResourceValue2Updated" @@ -329,7 +329,7 @@ rules: message: Use the constant `acctest.CtRulePound` for the string literal "rule.#" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"rule.#"' severity: ERROR fix: "acctest.CtRulePound" @@ -341,7 +341,7 @@ rules: message: Use the constant `acctest.CtTagsAllPercent` for the string literal "tags_all.%" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"tags_all.%"' severity: ERROR fix: "acctest.CtTagsAllPercent" @@ -353,7 +353,7 @@ rules: message: Use the constant `acctest.CtTagsKey1` for the string literal "tags.key1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"tags.key1"' severity: ERROR fix: "acctest.CtTagsKey1" @@ -365,7 +365,7 @@ rules: message: Use the constant `acctest.CtTagsKey2` for the string literal "tags.key2" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"tags.key2"' severity: ERROR fix: "acctest.CtTagsKey2" @@ -377,7 +377,7 @@ rules: message: Use the constant `acctest.CtTagsPercent` for the string literal "tags.%" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"tags.%"' severity: ERROR fix: "acctest.CtTagsPercent" @@ -389,7 +389,7 @@ rules: message: Use the constant `acctest.CtTrue` for the string literal "true" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"true"' severity: ERROR fix: "acctest.CtTrue" @@ -401,7 +401,7 @@ rules: message: Use the constant `acctest.CtTrueCaps` for the string literal "TRUE" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"TRUE"' severity: ERROR fix: "acctest.CtTrueCaps" @@ -413,7 +413,7 @@ rules: message: Use the constant `acctest.CtValue1` for the string literal "value1" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"value1"' severity: ERROR fix: "acctest.CtValue1" @@ -425,7 +425,7 @@ rules: message: Use the constant `acctest.CtValue1Updated` for the string literal "value1updated" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"value1updated"' severity: ERROR fix: "acctest.CtValue1Updated" @@ -437,7 +437,7 @@ rules: message: Use the constant `acctest.CtValue2` for the string literal "value2" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" pattern: '"value2"' severity: ERROR fix: "acctest.CtValue2" diff --git a/.ci/.semgrep.yml b/.ci/.semgrep.yml index 8efaa4b8f920..818b0792bede 100644 --- a/.ci/.semgrep.yml +++ b/.ci/.semgrep.yml @@ -4,7 +4,7 @@ rules: message: Prefer naming acceptance tests with _disappears_Parent suffix paths: include: - - "internal/**/*_test.go" + - "/internal/**/*_test.go" patterns: - pattern: func $FUNCNAME(t *testing.T) { ... } - metavariable-regex: @@ -18,7 +18,7 @@ rules: message: Calling a resource's Read method from within a data-source is discouraged paths: include: - - internal/service/**/*_data_source.go + - "/internal/service/**/*_data_source.go" patterns: - pattern-regex: "(resource.+Read|flatten.+Resource)" - pattern-inside: func $FUNCNAME(...) $RETURNTYPE { ... } @@ -36,7 +36,7 @@ rules: message: Using `acctest.RandInt()` in constant or variable declaration will execute during compilation and not randomize, pass into string generating function instead paths: include: - - internal/ + - "/internal/" patterns: - pattern-either: - pattern: const $CONST = fmt.Sprintf(..., <... acctest.RandInt() ...>, ...) @@ -48,7 +48,7 @@ rules: message: Using `acctest.RandString()` in constant or variable declaration will execute during compilation and not randomize, pass into string generating function instead paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: const $CONST = fmt.Sprintf(..., <... acctest.RandString(...) ...>, ...) @@ -60,7 +60,7 @@ rules: message: Using `acctest.RandomWithPrefix()` in constant or variable declaration will execute during compilation and not randomize, pass into string generating function instead paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: const $CONST = fmt.Sprintf(..., <... acctest.RandomWithPrefix(...) ...>, ...) @@ -72,9 +72,9 @@ rules: message: Elem must be either a *schema.Schema or *schema.Resource type paths: include: - - internal/service/**/*.go + - "/internal/service/**/*.go" exclude: - - internal/service/**/*_data_source.go + - "/internal/service/**/*_data_source.go" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern-regex: "Elem:[ ]*schema.Type[a-zA-Z]*," @@ -85,7 +85,7 @@ rules: message: Prefer `flex.FlattenStringSet()` or `flex.FlattenStringValueSet()` paths: include: - - internal/ + - "/internal" patterns: - pattern: schema.NewSet(schema.HashString, flex.FlattenStringList($APIOBJECT)) - pattern: schema.NewSet(schema.HashString, flex.FlattenStringValueList($APIOBJECT)) @@ -96,7 +96,7 @@ rules: message: Prefer `flex.ExpandStringSet()` or `flex.ExpandStringValueSet()` paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: flex.ExpandStringList($SET.List()) @@ -116,7 +116,7 @@ rules: message: Zero value conditional check after `d.GetOk()` is extraneous paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: if $VALUE, $OK := d.GetOk($KEY); $OK && $VALUE.(bool) { $BODY } @@ -131,7 +131,7 @@ rules: message: Nil value check before `d.Set()` is extraneous paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: | @@ -180,9 +180,9 @@ rules: message: (schema.ResourceData).Set() call with the tags key should be preceded by a call to IgnoreConfig paths: include: - - internal/service/**/*.go + - "/internal/service/**/*.go" exclude: - - internal/service/**/*_data_source.go + - "/internal/service/**/*_data_source.go" patterns: - pattern-inside: func $READMETHOD(...) $ERRORTYPE { ... } - pattern-either: @@ -205,111 +205,12 @@ rules: ... severity: WARNING - - id: helper-schema-retry-RetryContext-without-TimeoutError-check - languages: [go] - message: Check retry.RetryContext() errors with tfresource.TimedOut() - paths: - exclude: - - "*_test.go" - - sweep.go - include: - - internal/ - patterns: - - pattern-either: - - pattern: | - $ERR := retry.RetryContext(...) - ... - return ... - - pattern: | - $ERR = retry.RetryContext(...) - ... - return ... - - pattern-not: | - $ERR := retry.RetryContext(...) - ... - if isResourceTimeoutError($ERR) { ... } - ... - return ... - - pattern-not: | - $ERR = retry.RetryContext(...) - ... - if isResourceTimeoutError($ERR) { ... } - ... - return ... - - pattern-not: | - $ERR := retry.RetryContext(...) - ... - if tfresource.TimedOut($ERR) { ... } - ... - return ... - - pattern-not: | - $ERR = retry.RetryContext(...) - ... - if tfresource.TimedOut($ERR) { ... } - ... - return ... - severity: WARNING - - - id: helper-schema-TimeoutError-check-doesnt-return-output - languages: [go] - message: If the retry.RetryContext() or tfresource.Retry() function returns a value, ensure the isResourceTimeoutError() check does as well - paths: - exclude: - - "*_test.go" - include: - - internal/ - patterns: - - pattern-either: - - patterns: - - pattern: | - if isResourceTimeoutError($ERR) { - _, $ERR = $CONN.$FUNC(...) - } - - pattern-not-inside: | - $ERR = retry.RetryContext(..., func() *retry.RetryError { - ... - _, $ERR2 = $CONN.$FUNC(...) - ... - }) - ... - if isResourceTimeoutError($ERR) { ... } - - pattern-not-inside: | - $ERR = tfresource.Retry(..., func() *retry.RetryError { - ... - _, $ERR2 = $CONN.$FUNC(...) - ... - }, ...) - ... - if isResourceTimeoutError($ERR) { ... } - - patterns: - - pattern: | - if tfresource.TimedOut($ERR) { - _, $ERR = $CONN.$FUNC(...) - } - - pattern-not-inside: | - $ERR = retry.RetryContext(..., func() *retry.RetryError { - ... - _, $ERR2 = $CONN.$FUNC(...) - ... - }) - ... - if tfresource.TimedOut($ERR) { ... } - - pattern-not-inside: | - $ERR = tfresource.Retry(..., func() *retry.RetryError { - ... - _, $ERR2 = $CONN.$FUNC(...) - ... - }, ...) - ... - if tfresource.TimedOut($ERR) { ... } - severity: WARNING - - id: is-not-found-error languages: [go] message: Check for retry.NotFoundError errors with tfresource.NotFound() paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - patterns: @@ -333,7 +234,7 @@ rules: message: Use time.Equal() instead of == paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: | @@ -359,7 +260,7 @@ rules: message: Use lastPage for bool variable in pagination functions paths: include: - - internal/ + - "/internal" patterns: - pattern: | $X.$Z(..., func(..., $Y bool) { @@ -383,10 +284,10 @@ rules: message: Do not call `fmt.Print` and variant paths: include: - - internal/ + - "/internal" exclude: - - .ci/providerlint/vendor/ - - internal/generate/ + - "/.ci/providerlint/vendor/" + - "/internal/generate/" patterns: - pattern-either: - pattern: | @@ -402,9 +303,9 @@ rules: message: Do not call `regexp.MustCompile` directly, use `regexache.MustCompile` instead paths: include: - - internal/ + - "/internal" exclude: - - .ci/providerlint/vendor/ + - "/.ci/providerlint/vendor/" patterns: - pattern: 'regexp.MustCompile($X)' severity: WARNING @@ -415,22 +316,22 @@ rules: message: Domain names should be in the namespaces defined in RFC 6761 (https://datatracker.ietf.org/doc/html/rfc6761) as reserved for testing paths: include: - - internal/service + - "/internal/service" exclude: - - internal/service/firehose/delivery_stream_test.go - - internal/service/fsx/windows_file_system_test.go - - internal/service/iam/openid_connect_provider_test.go - - internal/service/mq/broker_test.go - - internal/service/mq/forge_test.go - - internal/service/route53/sweep.go - - internal/service/s3/bucket_test.go - - internal/service/s3/object_test.go - - internal/service/storagegateway/cached_iscsi_volume.go - - internal/service/storagegateway/cached_iscsi_volume_test.go - - internal/service/storagegateway/stored_iscsi_volume_test.go - - internal/service/transfer/access_test.go - - internal/service/transfer/server_test.go - - "internal/service/**/*_test.go" + - "/internal/service/firehose/delivery_stream_test.go" + - "/internal/service/fsx/windows_file_system_test.go" + - "/internal/service/iam/openid_connect_provider_test.go" + - "/internal/service/mq/broker_test.go" + - "/internal/service/mq/forge_test.go" + - "/internal/service/route53/sweep.go" + - "/internal/service/s3/bucket_test.go" + - "/internal/service/s3/object_test.go" + - "/internal/service/storagegateway/cached_iscsi_volume.go" + - "/internal/service/storagegateway/cached_iscsi_volume_test.go" + - "/internal/service/storagegateway/stored_iscsi_volume_test.go" + - "/internal/service/transfer/access_test.go" + - "/internal/service/transfer/server_test.go" + - "/internal/service/**/*_test.go" patterns: - patterns: - pattern-regex: '(([-a-zA-Z0-9]{2,}\.)|(%[sdftq]))+(com|net|org)\b' @@ -461,9 +362,9 @@ rules: message: Use default email address or generate a random email address. https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md#hardcoded-email-addresses paths: include: - - internal/ + - "/internal" exclude: - - internal/service/route53domains/registered_domain_test.go + - "/internal/service/route53domains/registered_domain_test.go" patterns: - pattern-regex: '[-_A-Za-z0-9.+]+@([-A-Za-z0-9]+\.)(com|net|org)' - pattern-not-regex: 'no-reply@hashicorp\.com' @@ -475,9 +376,9 @@ rules: message: Generate random SSH keys using acctest.RandSSHKeyPair() or RandSSHKeyPairSize(). https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md#hardcoded-ssh-key paths: include: - - internal/ + - "/internal" exclude: - - .ci/providerlint/vendor/ + - "/.ci/providerlint/vendor/" patterns: # This isn't technically the correct regex, but for some reason adding a '+' causes the regex to # miss some SSH keys. AFAICT, this is good enough. @@ -490,9 +391,9 @@ rules: message: Incorrect form of non-tags change detection. https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/contribution-checklists.md#resource-tagging-code-implementation paths: include: - - internal/ + - "/internal" patterns: - - pattern: 'if d.HasChangeExcept("tags_all") {...}' + - pattern: 'if d.HasChangeExcept(names.AttrTagsAll) {...}' severity: WARNING - id: unnecessary-literal-type-conversion @@ -500,7 +401,7 @@ rules: message: Literal numbers do not need type conversions paths: include: - - internal/ + - "/internal" patterns: - pattern: "aws.Int64(int64($X))" - metavariable-regex: @@ -514,7 +415,7 @@ rules: message: Do not call `d.SetId("")` inside a resource create function paths: include: - - internal/service/ + - "/internal/service/" patterns: - pattern: | func $FUNC(...) { @@ -531,7 +432,7 @@ rules: message: Do not call `d.SetId("")` inside a resource update function paths: include: - - internal/service/ + - "/internal/service/" patterns: - pattern: | func $FUNC(...) { @@ -548,7 +449,7 @@ rules: message: Do not call `d.SetId(...)` inside a resource delete function paths: include: - - internal/service/ + - "/internal/service/" patterns: - pattern: | func $FUNC(...) { @@ -565,7 +466,7 @@ rules: message: Empty strings should not be included in validation paths: include: - - internal/ + - "/internal" patterns: - pattern: validation.Any(..., validation.StringIsEmpty, ...) severity: ERROR @@ -575,7 +476,7 @@ rules: message: Use tfawserr.ErrCodeEquals() when message parameter is empty string paths: include: - - internal/ + - "/internal" patterns: - pattern: tfawserr.ErrMessageContains(err, ..., "") severity: ERROR @@ -585,9 +486,9 @@ rules: message: Use constant in the same package rather than importing iam for a constant paths: include: - - internal/ + - "/internal" exclude: - - internal/service/iam + - "/internal/service/iam" patterns: - pattern: tfiam.PropagationTimeout severity: ERROR @@ -597,7 +498,7 @@ rules: message: Use acctest.ProtoV5ProviderFactories, not acctest.Providers or acctest.ProviderFactories paths: include: - - "internal/**/*_test.go" + - "/internal/**/*_test.go" pattern-either: - pattern-regex: Providers:\s+(acctest\.)?Providers, - pattern-regex: ProviderFactories:\s+(acctest\.)?ProviderFactories, @@ -608,7 +509,7 @@ rules: message: Prefer `err` with `%w` format verb instead of `err.Code()` or `err.Message()` paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: fmt.Errorf(..., $ERR.Code(), ...) @@ -620,7 +521,7 @@ rules: message: Prefer using `enum.Slice()` to convert a slice of typed string enums to a slice of strings paths: include: - - internal/ + - "/internal" patterns: - pattern: "[]string{..., string($X), ...}" severity: WARNING @@ -630,7 +531,7 @@ rules: message: Prefer using WithoutTimeout CRUD handlers instead of Context variants paths: include: - - internal/service + - "/internal/service" patterns: - pattern-regex: "(Create|Read|Update|Delete)Context:" severity: ERROR @@ -640,7 +541,7 @@ rules: message: Calls to `sdkdiag.AppendErrorf()` should be returned or set to the `diags` variable paths: include: - - internal/ + - "/internal" patterns: - pattern: | if err != nil { @@ -665,7 +566,7 @@ rules: message: Avoid use of `errs.Must()` in service packages, handle errors explicitly instead. paths: include: - - internal/service + - "/internal/service" patterns: - pattern-either: - pattern: errs.Must(...) @@ -676,7 +577,7 @@ rules: message: Avoid use of `SingleNestedBlock` in schema definitions. Use `ListNestedBlock` with a size validator instead. paths: include: - - internal/service + - "/internal/service" patterns: - pattern: schema.SingleNestedBlock{ ... } severity: ERROR @@ -686,7 +587,7 @@ rules: message: Deprecation messages should begin with `argument_name is deprecated`. paths: include: - - internal/service + - "/internal/service" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | diff --git a/.ci/.tflint.hcl b/.ci/.tflint.hcl index c745a469afd4..eb97244a8493 100644 --- a/.ci/.tflint.hcl +++ b/.ci/.tflint.hcl @@ -1,6 +1,6 @@ plugin "aws" { enabled = true - version = "0.39.0" + version = "0.41.0" source = "github.com/terraform-linters/tflint-ruleset-aws" } @@ -26,6 +26,27 @@ rule "aws_acm_certificate_lifecycle" { enabled = false } -rule "aws_accessanalyzer_analyzer_invalid_type" { +# Rule needs to be disabled due to enum value case inconsistencies +rule "aws_dms_s3_endpoint_invalid_compression_type" { + enabled = false +} + +# Rule needs to be disabled due to enum value case inconsistencies +rule "aws_dms_s3_endpoint_invalid_date_partition_sequence" { + enabled = false +} + +# Rule needs to be disabled due to enum value case inconsistencies +rule "aws_dms_s3_endpoint_invalid_encryption_mode" { + enabled = false +} + +# Avoids errant findings related to directory paths in generated configuration files +rule "aws_iam_saml_provider_invalid_saml_metadata_document" { + enabled = false +} + +# Rule needs to be disabled due to bad email regex in the linter rule +rule "aws_guardduty_member_invalid_email" { enabled = false } diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 4207fcc9fcfe..68aae67db00c 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -1,12 +1,12 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint -go 1.24.4 +go 1.24.8 require ( github.com/bflad/tfproviderlint v0.31.0 - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 - golang.org/x/tools v0.34.0 + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 + golang.org/x/tools v0.38.0 ) require ( @@ -24,21 +24,21 @@ require ( github.com/hashicorp/go-cty v1.5.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.3 // indirect + github.com/hashicorp/go-plugin v1.7.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hc-install v0.9.2 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.23.0 // indirect - github.com/hashicorp/terraform-json v0.25.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.27.0 // indirect + github.com/hashicorp/terraform-exec v0.23.1 // indirect + github.com/hashicorp/terraform-json v0.27.1 // indirect + github.com/hashicorp/terraform-plugin-go v0.29.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-registry-address v0.2.5 // indirect + github.com/hashicorp/terraform-registry-address v0.4.0 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.2 // indirect - github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -47,21 +47,20 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.1.0 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect - github.com/stretchr/testify v1.10.0 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/zclconf/go-cty v1.16.2 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.72.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.9 // indirect ) diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 796e532032c5..8fe137185634 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -13,16 +13,17 @@ github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.31.0 h1:9N/dUzFARsTpAQOjdZzIWnHKMzQc7UDDEYrSNV2xnrw= github.com/bflad/tfproviderlint v0.31.0/go.mod h1:yZQdJs4uobBIgVHt1Tv5OpHhgM8fwh29OgxL/La5BFs= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= @@ -34,8 +35,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= @@ -53,8 +54,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 h1:81+kWbE1yErFBMjME0I5k3x3kojjKsWtPYHEAutoPow= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65/go.mod h1:WtMzv9T++tfWVea+qB2MXoaqxw33S8bpJslzUike2mQ= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 h1:IS4mjtvkLHXWI5yn/t9ILOUiBqPePMFaO4IRh5pcMk4= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67/go.mod h1:l81jrdpcZSWUsJs4BGFfdGScefSYEFQRLMQRG3uyvT0= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -69,8 +70,8 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= -github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= +github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA= +github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -80,34 +81,33 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= -github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= -github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= -github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= -github.com/hashicorp/terraform-plugin-go v0.27.0 h1:ujykws/fWIdsi6oTUT5Or4ukvEan4aN9lY+LOxVP8EE= -github.com/hashicorp/terraform-plugin-go v0.27.0/go.mod h1:FDa2Bb3uumkTGSkTFpWSOwWJDwA7bf3vdP3ltLDTH6o= +github.com/hashicorp/terraform-exec v0.23.1 h1:diK5NSSDXDKqHEOIQefBMu9ny+FhzwlwV0xgUTB7VTo= +github.com/hashicorp/terraform-exec v0.23.1/go.mod h1:e4ZEg9BJDRaSalGm2z8vvrPONt0XWG0/tXpmzYTf+dM= +github.com/hashicorp/terraform-json v0.27.1 h1:zWhEracxJW6lcjt/JvximOYyc12pS/gaKSy/wzzE7nY= +github.com/hashicorp/terraform-json v0.27.1/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= +github.com/hashicorp/terraform-plugin-go v0.29.0 h1:1nXKl/nSpaYIUBU1IG/EsDOX0vv+9JxAltQyDMpq5mU= +github.com/hashicorp/terraform-plugin-go v0.29.0/go.mod h1:vYZbIyvxyy0FWSmDHChCqKvI40cFTDGSb3D8D70i9GM= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 h1:NFPMacTrY/IdcIcnUB+7hsore1ZaRWU9cnB6jFoBnIM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0/go.mod h1:QYmYnLfsosrxjCnGY1p9c7Zj6n9thnEE+7RObeYs3fA= -github.com/hashicorp/terraform-registry-address v0.2.5 h1:2GTftHqmUhVOeuu9CW3kwDkRe4pcBDq0uuK5VJngU1M= -github.com/hashicorp/terraform-registry-address v0.2.5/go.mod h1:PpzXWINwB5kuVS5CA7m1+eO2f1jKb5ZDIxrOPfpnGkg= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 h1:mlAq/OrMlg04IuJT7NpefI1wwtdpWudnEmjuQs04t/4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1/go.mod h1:GQhpKVvvuwzD79e8/NZ+xzj+ZpWovdPAe8nfV/skwNU= +github.com/hashicorp/terraform-registry-address v0.4.0 h1:S1yCGomj30Sao4l5BMPjTGZmCNzuv7/GDTDX99E9gTk= +github.com/hashicorp/terraform-registry-address v0.4.0/go.mod h1:LRS1Ay0+mAiRkUyltGT+UHWkIqTFvigGn/LbMshfflE= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -136,20 +136,19 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -160,42 +159,42 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= -github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -208,36 +207,38 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200214201135-548b770e2dfa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/.ci/providerlint/passes/AWSAT001/testdata/go.mod b/.ci/providerlint/passes/AWSAT001/testdata/go.mod index b61a6ba5f31b..88975b766cc4 100644 --- a/.ci/providerlint/passes/AWSAT001/testdata/go.mod +++ b/.ci/providerlint/passes/AWSAT001/testdata/go.mod @@ -1,6 +1,6 @@ module testdata -go 1.24.4 +go 1.24.8 require ( github.com/YakDriver/regexache v0.24.0 diff --git a/.ci/providerlint/passes/AWSAT002/testdata/go.mod b/.ci/providerlint/passes/AWSAT002/testdata/go.mod index 8d47f0077d0c..6e52bd367daf 100644 --- a/.ci/providerlint/passes/AWSAT002/testdata/go.mod +++ b/.ci/providerlint/passes/AWSAT002/testdata/go.mod @@ -1,3 +1,3 @@ module testdata -go 1.24.4 +go 1.24.8 diff --git a/.ci/providerlint/passes/AWSAT003/testdata/go.mod b/.ci/providerlint/passes/AWSAT003/testdata/go.mod index 8d47f0077d0c..6e52bd367daf 100644 --- a/.ci/providerlint/passes/AWSAT003/testdata/go.mod +++ b/.ci/providerlint/passes/AWSAT003/testdata/go.mod @@ -1,3 +1,3 @@ module testdata -go 1.24.4 +go 1.24.8 diff --git a/.ci/providerlint/passes/AWSAT004/testdata/go.mod b/.ci/providerlint/passes/AWSAT004/testdata/go.mod index b66742f56e76..9c2d75617a1f 100644 --- a/.ci/providerlint/passes/AWSAT004/testdata/go.mod +++ b/.ci/providerlint/passes/AWSAT004/testdata/go.mod @@ -1,6 +1,6 @@ module testdata -go 1.24.4 +go 1.24.8 require github.com/hashicorp/terraform-plugin-sdk/v2 v2.36.1 diff --git a/.ci/providerlint/passes/AWSAT005/testdata/go.mod b/.ci/providerlint/passes/AWSAT005/testdata/go.mod index 8d47f0077d0c..6e52bd367daf 100644 --- a/.ci/providerlint/passes/AWSAT005/testdata/go.mod +++ b/.ci/providerlint/passes/AWSAT005/testdata/go.mod @@ -1,3 +1,3 @@ module testdata -go 1.24.4 +go 1.24.8 diff --git a/.ci/providerlint/passes/AWSAT006/testdata/go.mod b/.ci/providerlint/passes/AWSAT006/testdata/go.mod index 8d47f0077d0c..6e52bd367daf 100644 --- a/.ci/providerlint/passes/AWSAT006/testdata/go.mod +++ b/.ci/providerlint/passes/AWSAT006/testdata/go.mod @@ -1,3 +1,3 @@ module testdata -go 1.24.4 +go 1.24.8 diff --git a/.ci/providerlint/passes/AWSR001/testdata/go.mod b/.ci/providerlint/passes/AWSR001/testdata/go.mod index 8d47f0077d0c..6e52bd367daf 100644 --- a/.ci/providerlint/passes/AWSR001/testdata/go.mod +++ b/.ci/providerlint/passes/AWSR001/testdata/go.mod @@ -1,3 +1,3 @@ module testdata -go 1.24.4 +go 1.24.8 diff --git a/.ci/providerlint/passes/AWSV001/testdata/go.mod b/.ci/providerlint/passes/AWSV001/testdata/go.mod index 6a3e2501bc68..2a9f362fee27 100644 --- a/.ci/providerlint/passes/AWSV001/testdata/go.mod +++ b/.ci/providerlint/passes/AWSV001/testdata/go.mod @@ -1,6 +1,6 @@ module testdata -go 1.24.4 +go 1.24.8 require github.com/hashicorp/terraform-plugin-sdk/v2 v2.36.1 diff --git a/.ci/scripts/changelog.tmpl b/.ci/scripts/changelog.tmpl index 0a8fa5a760a2..cc65ade01897 100644 --- a/.ci/scripts/changelog.tmpl +++ b/.ci/scripts/changelog.tmpl @@ -15,7 +15,7 @@ NOTES: {{ end -}} {{- end -}} -{{- $features := combineTypes .NotesByType.feature (index .NotesByType "new-resource" ) (index .NotesByType "new-data-source") (index .NotesByType "new-ephemeral") (index .NotesByType "new-function") (index .NotesByType "new-guide") }} +{{- $features := combineTypes .NotesByType.feature (index .NotesByType "new-resource" ) (index .NotesByType "new-data-source") (index .NotesByType "new-ephemeral") (index .NotesByType "new-function") (index .NotesByType "new-action") (index .NotesByType "new-guide") }} {{- if $features }} FEATURES: diff --git a/.ci/scripts/release-note.tmpl b/.ci/scripts/release-note.tmpl index c687085d924d..55ac0e89c6e4 100644 --- a/.ci/scripts/release-note.tmpl +++ b/.ci/scripts/release-note.tmpl @@ -7,6 +7,8 @@ * **New Ephemeral Resource:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- else if eq "new-function" .Type -}} * **New Function:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) +{{- else if eq "new-action" .Type -}} +* **New Action:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- else if eq "new-guide" .Type -}} * **New Guide:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- else -}} diff --git a/.ci/semgrep/acctest/checks/planonly.yml b/.ci/semgrep/acctest/checks/planonly.yml index 55bf4ff4299d..43ae2a724005 100644 --- a/.ci/semgrep/acctest/checks/planonly.yml +++ b/.ci/semgrep/acctest/checks/planonly.yml @@ -4,7 +4,7 @@ rules: message: Replace `PlanOnly` acceptance test steps with `plancheck`s paths: include: - - "internal/service/*/*_test.go" + - "/internal/service/*/*_test.go" patterns: - pattern: | { diff --git a/.ci/semgrep/acctest/naming/naming.yml b/.ci/semgrep/acctest/naming/naming.yml index 0a38f741748f..6376d4db5438 100644 --- a/.ci/semgrep/acctest/naming/naming.yml +++ b/.ci/semgrep/acctest/naming/naming.yml @@ -4,7 +4,7 @@ rules: message: The check destroy function should match the pattern "testAccCheckDestroy". See https://hashicorp.github.io/terraform-provider-aws/naming/#test-support-functions paths: include: - - "internal/**/*_test.go" + - "/internal/**/*_test.go" patterns: - pattern: func $FUNCNAME(...) { ... } - metavariable-regex: @@ -18,7 +18,7 @@ rules: message: The check destroy with provider function should match the pattern "testAccCheckDestroyWithProvider". paths: include: - - "internal/**/*_test.go" + - "/internal/**/*_test.go" patterns: - pattern: func $FUNCNAME(...) { ... } - metavariable-regex: @@ -31,7 +31,7 @@ rules: message: The check destroy with region function should match the pattern "testAccCheckDestroyWithRegion". paths: include: - - "internal/**/*_test.go" + - "/internal/**/*_test.go" patterns: - pattern: func $FUNCNAME(...) { ... } - metavariable-regex: @@ -44,7 +44,7 @@ rules: message: The check destroy function should have the correct signature paths: include: - - "internal/**/*_test.go" + - "/internal/**/*_test.go" patterns: - pattern: func $FUNCNAME(...) { ... } - metavariable-regex: diff --git a/.ci/semgrep/aws/go-sdk-v1.yml b/.ci/semgrep/aws/go-sdk-v1.yml index fd96c53d2331..d3ed7d7488e2 100644 --- a/.ci/semgrep/aws/go-sdk-v1.yml +++ b/.ci/semgrep/aws/go-sdk-v1.yml @@ -4,10 +4,10 @@ rules: message: Do not use AWS SDK for Go v1 paths: include: - - internal/ + - "/internal" exclude: - - "internal/service/simpledb/*.go" - - "internal/conns/awsclient.go" + - "/internal/service/simpledb/*.go" + - "/internal/conns/awsclient.go" patterns: - pattern: | import ("$X") @@ -22,10 +22,10 @@ rules: message: Do not use aws-sdk-go-base AWS SDK for Go v1 shims paths: include: - - internal/ + - "/internal" exclude: - - "internal/service/simpledb/*.go" - - "internal/conns/config.go" + - "/internal/service/simpledb/*.go" + - "/internal/conns/config.go" patterns: - pattern: | import ("$X") diff --git a/.ci/semgrep/aws/go-sdk.yml b/.ci/semgrep/aws/go-sdk.yml index ab63b1186d60..b3817159ef6f 100644 --- a/.ci/semgrep/aws/go-sdk.yml +++ b/.ci/semgrep/aws/go-sdk.yml @@ -4,12 +4,12 @@ rules: message: Resources should not implement multiple AWS Go SDK service functionality paths: include: - - internal/ + - "/internal" exclude: - - "internal/service/**/*_test.go" - - "internal/service/**/sweep.go" - - "internal/acctest/acctest.go" - - "internal/conns/**/*.go" + - "/internal/service/**/*_test.go" + - "/internal/service/**/sweep.go" + - "/internal/acctest/acctest.go" + - "/internal/conns/**/*.go" patterns: - pattern: | import ("$X") @@ -27,11 +27,11 @@ rules: message: Prefer AWS Go SDK pointer conversion functions for dereferencing during assignment, e.g. aws.ToString() paths: include: - - internal/service + - "/internal/service" exclude: - - "internal/service/**/*_test.go" - - "internal/service/*/service_package.go" - - "internal/service/*/service_package_gen.go" + - "/internal/service/**/*_test.go" + - "/internal/service/*/service_package.go" + - "/internal/service/*/service_package_gen.go" patterns: - pattern: "$LHS = *$RHS" - pattern-not: "*$LHS2 = *$RHS" @@ -42,9 +42,9 @@ rules: message: Prefer AWS Go SDK pointer conversion functions for dereferencing during conditionals, e.g. aws.ToString() paths: include: - - internal/service + - "/internal/service" exclude: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" patterns: - pattern-either: - pattern: "$LHS == *$RHS" @@ -67,7 +67,7 @@ rules: message: Using AWS Go SDK pointer conversion, e.g. aws.String(), with immediate dereferencing is extraneous paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: "*aws.Bool($VALUE)" @@ -83,7 +83,7 @@ rules: message: Prefer AWS Go SDK pointer conversion aws.ToString() function for dereferencing during d.SetId() paths: include: - - internal/ + - "/internal" pattern: "d.SetId(*$VALUE)" severity: WARNING @@ -93,7 +93,7 @@ rules: message: AWS Go SDK pointer conversion function for `d.Set()` value is extraneous paths: include: - - internal/ + - "/internal" patterns: - pattern-either: - pattern: d.Set($ATTRIBUTE, aws.ToBool($APIOBJECT)) @@ -113,6 +113,6 @@ rules: message: Prefer AWS Go SDK pointer conversion functions for dereferencing when converting int64 to int paths: include: - - internal/ + - "/internal" pattern: int(*$VALUE) severity: WARNING diff --git a/.ci/semgrep/aws/input-on-heap.yml b/.ci/semgrep/aws/input-on-heap.yml index 5a56a0c98ad6..fa98dceb30d5 100644 --- a/.ci/semgrep/aws/input-on-heap.yml +++ b/.ci/semgrep/aws/input-on-heap.yml @@ -4,229 +4,229 @@ rules: message: Create the $PKG.$INPUT struct on the stack instead of on the heap paths: exclude: - - "internal/service/apigatewayv2" - - "internal/service/appconfig" - - "internal/service/appfabric" - - "internal/service/appflow" - - "internal/service/appintegrations" - - "internal/service/applicationinsights" - - "internal/service/appmesh" - - "internal/service/apprunner" - - "internal/service/appstream" - - "internal/service/appsync" - - "internal/service/athena" - - "internal/service/auditmanager" - - "internal/service/autoscaling" - - "internal/service/backup" - - "internal/service/batch" - - "internal/service/bedrock" - - "internal/service/bedrockagent" - - "internal/service/budgets" - - "internal/service/ce" - - "internal/service/chatbot" - - "internal/service/chime" - - "internal/service/chimesdkmediapipelines" - - "internal/service/chimesdkvoice" - - "internal/service/cleanrooms" - - "internal/service/cloud9" - - "internal/service/cloudcontrol" - - "internal/service/cloudformation" - - "internal/service/cloudfront" - - "internal/service/cloudfrontkeyvaluestore" - - "internal/service/cloudhsmv2" - - "internal/service/cloudsearch" - - "internal/service/cloudtrail" - - "internal/service/cloudwatch" - - "internal/service/codeartifact" - - "internal/service/codebuild" - - "internal/service/codecatalyst" - - "internal/service/codecommit" - - "internal/service/codeconnections" - - "internal/service/codeguruprofiler" - - "internal/service/codegurureviewer" - - "internal/service/codepipeline" - - "internal/service/codestarconnections" - - "internal/service/codestarnotifications" - - "internal/service/cognitoidentity" - - "internal/service/cognitoidp" - - "internal/service/comprehend" - - "internal/service/computeoptimizer" - - "internal/service/configservice" - - "internal/service/controltower" - - "internal/service/connect" - - "internal/service/costoptimizationhub" - - "internal/service/cur" - - "internal/service/customerprofiles" - - "internal/service/dataexchange" - - "internal/service/datapipeline" - - "internal/service/datasync" - - "internal/service/datazone" - - "internal/service/dax" - - "internal/service/deploy" - - "internal/service/detective" - - "internal/service/devicefarm" - - "internal/service/devopsguru" - - "internal/service/directconnect" - - "internal/service/dlm" - - "internal/service/dms" - - "internal/service/docdb" - - "internal/service/docdbelastic" - - "internal/service/drs" - - "internal/service/ds" - - "internal/service/dynamodb" - - "internal/service/ec2/ipam_*" - - "internal/service/ec2/outposts_*" - - "internal/service/ec2/transitgateway_*" - - "internal/service/ec2/verifiedaccess_*" - - "internal/service/ec2/vpc_*" - - "internal/service/ec2/vpnclient_*" - - "internal/service/ecr" - - "internal/service/ecrpublic" - - "internal/service/ecs" - - "internal/service/efs" - - "internal/service/eks" - - "internal/service/elasticache" - - "internal/service/elasticbeanstalk" - - "internal/service/elasticsearch" - - "internal/service/elastictranscoder" - - "internal/service/elb" - - "internal/service/elbv2" - - "internal/service/emr" - - "internal/service/emrcontainers" - - "internal/service/emrserverless" - - "internal/service/events" - - "internal/service/evidently" - - "internal/service/finspace" - - "internal/service/firehose" - - "internal/service/fis" - - "internal/service/fms" - - "internal/service/fsx" - - "internal/service/gamelift" - - "internal/service/glacier" - - "internal/service/globalaccelerator" - - "internal/service/glue" - - "internal/service/grafana" - - "internal/service/guardduty" - - "internal/service/iam" - - "internal/service/identitystore" - - "internal/service/imagebuilder" - - "internal/service/inspector" - - "internal/service/inspector2" - - "internal/service/internetmonitor" - - "internal/service/iot" - - "internal/service/ivs" - - "internal/service/ivschat" - - "internal/service/kafka" - - "internal/service/kafkaconnect" - - "internal/service/kendra" - - "internal/service/keyspaces" - - "internal/service/kinesis" - - "internal/service/kinesisanalytics" - - "internal/service/kinesisanalyticsv2" - - "internal/service/kinesisvideo" - - "internal/service/kms" - - "internal/service/lakeformation" - - "internal/service/lambda" - - "internal/service/lexmodels" - - "internal/service/lexv2models" - - "internal/service/licensemanager" - - "internal/service/lightsail" - - "internal/service/location" - - "internal/service/logs" - - "internal/service/m2" - - "internal/service/macie2" - - "internal/service/mediaconvert" - - "internal/service/medialive" - - "internal/service/mediapackage" - - "internal/service/mediapackagev2" - - "internal/service/mediastore" - - "internal/service/memorydb" - - "internal/service/meta" - - "internal/service/mq" - - "internal/service/mwaa" - - "internal/service/neptune" - - "internal/service/networkfirewall" - - "internal/service/networkmanager" - - "internal/service/networkmonitor" - - "internal/service/oam" - - "internal/service/opensearch" - - "internal/service/opensearchserverless" - - "internal/service/opsworks" - - "internal/service/organizations" - - "internal/service/osis" - - "internal/service/outposts" - - "internal/service/paymentcryptography" - - "internal/service/pinpoint" - - "internal/service/pinpointsmsvoicev2" - - "internal/service/pipes" - - "internal/service/polly" - - "internal/service/pricing" - - "internal/service/qbusiness" - - "internal/service/qldb" - - "internal/service/quicksight" - - "internal/service/ram" - - "internal/service/rbin" - - "internal/service/rds" - - "internal/service/redshift" - - "internal/service/redshiftdata" - - "internal/service/redshiftserverless" - - "internal/service/rekognition" - - "internal/service/resiliencehub" - - "internal/service/resourceexplorer2" - - "internal/service/resourcegroups" - - "internal/service/resourcegroupstaggingapi" - - "internal/service/rolesanywhere" - - "internal/service/route53" - - "internal/service/route53domains" - - "internal/service/route53profiles" - - "internal/service/route53recoverycontrolconfig" - - "internal/service/route53recoveryreadiness" - - "internal/service/route53resolver" - - "internal/service/rum" - - "internal/service/s3" - - "internal/service/s3control" - - "internal/service/s3outposts" - - "internal/service/s3tables" - - "internal/service/sagemaker" - - "internal/service/scheduler" - - "internal/service/schemas" - - "internal/service/secretsmanager" - - "internal/service/securityhub" - - "internal/service/securitylake" - - "internal/service/serverlessrepo" - - "internal/service/servicecatalog" - - "internal/service/servicecatalogappregistry" - - "internal/service/servicediscovery" - - "internal/service/servicequotas" - - "internal/service/ses" - - "internal/service/sesv2" - - "internal/service/sfn" - - "internal/service/shield" - - "internal/service/signer" - - "internal/service/simpledb" - - "internal/service/sns" - - "internal/service/sqs" - - "internal/service/ssm" - - "internal/service/ssmcontacts" - - "internal/service/ssmincidents" - - "internal/service/ssmquicksetup" - - "internal/service/ssoadmin" - - "internal/service/storagegateway" - - "internal/service/sts" - - "internal/service/swf" - - "internal/service/synthetics" - - "internal/service/timestreaminfluxdb" - - "internal/service/timestreamquery" - - "internal/service/timestreamwrite" - - "internal/service/transcribe" - - "internal/service/transfer" - - "internal/service/verifiedpermissions" - - "internal/service/vpclattice" - - "internal/service/wafregional" - - "internal/service/waf" - - "internal/service/wafv2" - - "internal/service/worklink" - - "internal/service/workspaces" + - "/internal/service/apigatewayv2" + - "/internal/service/appconfig" + - "/internal/service/appfabric" + - "/internal/service/appflow" + - "/internal/service/appintegrations" + - "/internal/service/applicationinsights" + - "/internal/service/appmesh" + - "/internal/service/apprunner" + - "/internal/service/appstream" + - "/internal/service/appsync" + - "/internal/service/athena" + - "/internal/service/auditmanager" + - "/internal/service/autoscaling" + - "/internal/service/backup" + - "/internal/service/batch" + - "/internal/service/bedrock" + - "/internal/service/bedrockagent" + - "/internal/service/budgets" + - "/internal/service/ce" + - "/internal/service/chatbot" + - "/internal/service/chime" + - "/internal/service/chimesdkmediapipelines" + - "/internal/service/chimesdkvoice" + - "/internal/service/cleanrooms" + - "/internal/service/cloud9" + - "/internal/service/cloudcontrol" + - "/internal/service/cloudformation" + - "/internal/service/cloudfront" + - "/internal/service/cloudfrontkeyvaluestore" + - "/internal/service/cloudhsmv2" + - "/internal/service/cloudsearch" + - "/internal/service/cloudtrail" + - "/internal/service/cloudwatch" + - "/internal/service/codeartifact" + - "/internal/service/codebuild" + - "/internal/service/codecatalyst" + - "/internal/service/codecommit" + - "/internal/service/codeconnections" + - "/internal/service/codeguruprofiler" + - "/internal/service/codegurureviewer" + - "/internal/service/codepipeline" + - "/internal/service/codestarconnections" + - "/internal/service/codestarnotifications" + - "/internal/service/cognitoidentity" + - "/internal/service/cognitoidp" + - "/internal/service/comprehend" + - "/internal/service/computeoptimizer" + - "/internal/service/configservice" + - "/internal/service/controltower" + - "/internal/service/connect" + - "/internal/service/costoptimizationhub" + - "/internal/service/cur" + - "/internal/service/customerprofiles" + - "/internal/service/dataexchange" + - "/internal/service/datapipeline" + - "/internal/service/datasync" + - "/internal/service/datazone" + - "/internal/service/dax" + - "/internal/service/deploy" + - "/internal/service/detective" + - "/internal/service/devicefarm" + - "/internal/service/devopsguru" + - "/internal/service/directconnect" + - "/internal/service/dlm" + - "/internal/service/dms" + - "/internal/service/docdb" + - "/internal/service/docdbelastic" + - "/internal/service/drs" + - "/internal/service/ds" + - "/internal/service/dynamodb" + - "/internal/service/ec2/ipam_*" + - "/internal/service/ec2/outposts_*" + - "/internal/service/ec2/transitgateway_*" + - "/internal/service/ec2/verifiedaccess_*" + - "/internal/service/ec2/vpc_*" + - "/internal/service/ec2/vpnclient_*" + - "/internal/service/ecr" + - "/internal/service/ecrpublic" + - "/internal/service/ecs" + - "/internal/service/efs" + - "/internal/service/eks" + - "/internal/service/elasticache" + - "/internal/service/elasticbeanstalk" + - "/internal/service/elasticsearch" + - "/internal/service/elastictranscoder" + - "/internal/service/elb" + - "/internal/service/elbv2" + - "/internal/service/emr" + - "/internal/service/emrcontainers" + - "/internal/service/emrserverless" + - "/internal/service/events" + - "/internal/service/evidently" + - "/internal/service/finspace" + - "/internal/service/firehose" + - "/internal/service/fis" + - "/internal/service/fms" + - "/internal/service/fsx" + - "/internal/service/gamelift" + - "/internal/service/glacier" + - "/internal/service/globalaccelerator" + - "/internal/service/glue" + - "/internal/service/grafana" + - "/internal/service/guardduty" + - "/internal/service/iam" + - "/internal/service/identitystore" + - "/internal/service/imagebuilder" + - "/internal/service/inspector" + - "/internal/service/inspector2" + - "/internal/service/internetmonitor" + - "/internal/service/iot" + - "/internal/service/ivs" + - "/internal/service/ivschat" + - "/internal/service/kafka" + - "/internal/service/kafkaconnect" + - "/internal/service/kendra" + - "/internal/service/keyspaces" + - "/internal/service/kinesis" + - "/internal/service/kinesisanalytics" + - "/internal/service/kinesisanalyticsv2" + - "/internal/service/kinesisvideo" + - "/internal/service/kms" + - "/internal/service/lakeformation" + - "/internal/service/lambda" + - "/internal/service/lexmodels" + - "/internal/service/lexv2models" + - "/internal/service/licensemanager" + - "/internal/service/lightsail" + - "/internal/service/location" + - "/internal/service/logs" + - "/internal/service/m2" + - "/internal/service/macie2" + - "/internal/service/mediaconvert" + - "/internal/service/medialive" + - "/internal/service/mediapackage" + - "/internal/service/mediapackagev2" + - "/internal/service/mediastore" + - "/internal/service/memorydb" + - "/internal/service/meta" + - "/internal/service/mq" + - "/internal/service/mwaa" + - "/internal/service/neptune" + - "/internal/service/networkfirewall" + - "/internal/service/networkmanager" + - "/internal/service/networkmonitor" + - "/internal/service/oam" + - "/internal/service/opensearch" + - "/internal/service/opensearchserverless" + - "/internal/service/opsworks" + - "/internal/service/organizations" + - "/internal/service/osis" + - "/internal/service/outposts" + - "/internal/service/paymentcryptography" + - "/internal/service/pinpoint" + - "/internal/service/pinpointsmsvoicev2" + - "/internal/service/pipes" + - "/internal/service/polly" + - "/internal/service/pricing" + - "/internal/service/qbusiness" + - "/internal/service/qldb" + - "/internal/service/quicksight" + - "/internal/service/ram" + - "/internal/service/rbin" + - "/internal/service/rds" + - "/internal/service/redshift" + - "/internal/service/redshiftdata" + - "/internal/service/redshiftserverless" + - "/internal/service/rekognition" + - "/internal/service/resiliencehub" + - "/internal/service/resourceexplorer2" + - "/internal/service/resourcegroups" + - "/internal/service/resourcegroupstaggingapi" + - "/internal/service/rolesanywhere" + - "/internal/service/route53" + - "/internal/service/route53domains" + - "/internal/service/route53profiles" + - "/internal/service/route53recoverycontrolconfig" + - "/internal/service/route53recoveryreadiness" + - "/internal/service/route53resolver" + - "/internal/service/rum" + - "/internal/service/s3" + - "/internal/service/s3control" + - "/internal/service/s3outposts" + - "/internal/service/s3tables" + - "/internal/service/sagemaker" + - "/internal/service/scheduler" + - "/internal/service/schemas" + - "/internal/service/secretsmanager" + - "/internal/service/securityhub" + - "/internal/service/securitylake" + - "/internal/service/serverlessrepo" + - "/internal/service/servicecatalog" + - "/internal/service/servicecatalogappregistry" + - "/internal/service/servicediscovery" + - "/internal/service/servicequotas" + - "/internal/service/ses" + - "/internal/service/sesv2" + - "/internal/service/sfn" + - "/internal/service/shield" + - "/internal/service/signer" + - "/internal/service/simpledb" + - "/internal/service/sns" + - "/internal/service/sqs" + - "/internal/service/ssm" + - "/internal/service/ssmcontacts" + - "/internal/service/ssmincidents" + - "/internal/service/ssmquicksetup" + - "/internal/service/ssoadmin" + - "/internal/service/storagegateway" + - "/internal/service/sts" + - "/internal/service/swf" + - "/internal/service/synthetics" + - "/internal/service/timestreaminfluxdb" + - "/internal/service/timestreamquery" + - "/internal/service/timestreamwrite" + - "/internal/service/transcribe" + - "/internal/service/transfer" + - "/internal/service/verifiedpermissions" + - "/internal/service/vpclattice" + - "/internal/service/wafregional" + - "/internal/service/waf" + - "/internal/service/wafv2" + - "/internal/service/worklink" + - "/internal/service/workspaces" patterns: - pattern: | $X := &$PKG.$INPUT{ ... } @@ -240,123 +240,123 @@ rules: message: Create the $PKG.$INPUT struct on the stack instead of on the heap paths: exclude: - - "internal/service/auditmanager" - - "internal/service/ecr" - - "internal/service/ecrpublic" - - "internal/service/ecs" - - "internal/service/efs" - - "internal/service/eks" - - "internal/service/elasticache" - - "internal/service/elasticbeanstalk" - - "internal/service/elasticsearch" - - "internal/service/elastictranscoder" - - "internal/service/elb" - - "internal/service/elbv2" - - "internal/service/emr" - - "internal/service/emrcontainers" - - "internal/service/emrserverless" - - "internal/service/events" - - "internal/service/evidently" - - "internal/service/finspace" - - "internal/service/firehose" - - "internal/service/fis" - - "internal/service/fms" - - "internal/service/fsx" - - "internal/service/gamelift" - - "internal/service/glacier" - - "internal/service/globalaccelerator" - - "internal/service/glue" - - "internal/service/grafana" - - "internal/service/guardduty" - - "internal/service/iam" - - "internal/service/identitystore" - - "internal/service/imagebuilder" - - "internal/service/inspector" - - "internal/service/inspector2" - - "internal/service/internetmonitor" - - "internal/service/iot" - - "internal/service/ivs" - - "internal/service/ivschat" - - "internal/service/kafka" - - "internal/service/kafkaconnect" - - "internal/service/kendra" - - "internal/service/keyspaces" - - "internal/service/kinesis" - - "internal/service/kinesisanalytics" - - "internal/service/kinesisanalyticsv2" - - "internal/service/kinesisvideo" - - "internal/service/kms" - - "internal/service/lakeformation" - - "internal/service/lambda" - - "internal/service/lexmodels" - - "internal/service/lexv2models" - - "internal/service/licensemanager" - - "internal/service/lightsail" - - "internal/service/location" - - "internal/service/logs" - - "internal/service/m2" - - "internal/service/macie2" - - "internal/service/mediaconvert" - - "internal/service/medialive" - - "internal/service/memorydb" - - "internal/service/mq" - - "internal/service/mwaa" - - "internal/service/neptune" - - "internal/service/networkfirewall" - - "internal/service/networkmanager" - - "internal/service/networkmonitor" - - "internal/service/oam" - - "internal/service/opensearch" - - "internal/service/opensearchserverless" - - "internal/service/opsworks" - - "internal/service/organizations" - - "internal/service/paymentcryptography" - - "internal/service/pinpoint" - - "internal/service/pinpointsmsvoicev2" - - "internal/service/pipes" - - "internal/service/quicksight" - - "internal/service/ram" - - "internal/service/rbin" - - "internal/service/rds" - - "internal/service/redshift" - - "internal/service/redshiftserverless" - - "internal/service/resiliencehub" - - "internal/service/resourceexplorer2" - - "internal/service/resourcegroups" - - "internal/service/rolesanywhere" - - "internal/service/route53" - - "internal/service/route53domains" - - "internal/service/route53recoverycontrolconfig" - - "internal/service/route53recoveryreadiness" - - "internal/service/route53resolver" - - "internal/service/rum" - - "internal/service/s3" - - "internal/service/s3control" - - "internal/service/s3outposts" - - "internal/service/s3tables" - - "internal/service/sagemaker" - - "internal/service/scheduler" - - "internal/service/schemas" - - "internal/service/secretsmanager" - - "internal/service/securityhub" - - "internal/service/servicecatalog" - - "internal/service/servicediscovery" - - "internal/service/servicequotas" - - "internal/service/ses" - - "internal/service/sesv2" - - "internal/service/sfn" - - "internal/service/shield" - - "internal/service/signer" - - "internal/service/simpledb" - - "internal/service/sns" - - "internal/service/sqs" - - "internal/service/ssm" - - "internal/service/ssmcontacts" - - "internal/service/ssmincidents" - - "internal/service/ssoadmin" - - "internal/service/storagegateway" - - "internal/service/swf" - - "internal/service/synthetics" + - "/internal/service/auditmanager" + - "/internal/service/ecr" + - "/internal/service/ecrpublic" + - "/internal/service/ecs" + - "/internal/service/efs" + - "/internal/service/eks" + - "/internal/service/elasticache" + - "/internal/service/elasticbeanstalk" + - "/internal/service/elasticsearch" + - "/internal/service/elastictranscoder" + - "/internal/service/elb" + - "/internal/service/elbv2" + - "/internal/service/emr" + - "/internal/service/emrcontainers" + - "/internal/service/emrserverless" + - "/internal/service/events" + - "/internal/service/evidently" + - "/internal/service/finspace" + - "/internal/service/firehose" + - "/internal/service/fis" + - "/internal/service/fms" + - "/internal/service/fsx" + - "/internal/service/gamelift" + - "/internal/service/glacier" + - "/internal/service/globalaccelerator" + - "/internal/service/glue" + - "/internal/service/grafana" + - "/internal/service/guardduty" + - "/internal/service/iam" + - "/internal/service/identitystore" + - "/internal/service/imagebuilder" + - "/internal/service/inspector" + - "/internal/service/inspector2" + - "/internal/service/internetmonitor" + - "/internal/service/iot" + - "/internal/service/ivs" + - "/internal/service/ivschat" + - "/internal/service/kafka" + - "/internal/service/kafkaconnect" + - "/internal/service/kendra" + - "/internal/service/keyspaces" + - "/internal/service/kinesis" + - "/internal/service/kinesisanalytics" + - "/internal/service/kinesisanalyticsv2" + - "/internal/service/kinesisvideo" + - "/internal/service/kms" + - "/internal/service/lakeformation" + - "/internal/service/lambda" + - "/internal/service/lexmodels" + - "/internal/service/lexv2models" + - "/internal/service/licensemanager" + - "/internal/service/lightsail" + - "/internal/service/location" + - "/internal/service/logs" + - "/internal/service/m2" + - "/internal/service/macie2" + - "/internal/service/mediaconvert" + - "/internal/service/medialive" + - "/internal/service/memorydb" + - "/internal/service/mq" + - "/internal/service/mwaa" + - "/internal/service/neptune" + - "/internal/service/networkfirewall" + - "/internal/service/networkmanager" + - "/internal/service/networkmonitor" + - "/internal/service/oam" + - "/internal/service/opensearch" + - "/internal/service/opensearchserverless" + - "/internal/service/opsworks" + - "/internal/service/organizations" + - "/internal/service/paymentcryptography" + - "/internal/service/pinpoint" + - "/internal/service/pinpointsmsvoicev2" + - "/internal/service/pipes" + - "/internal/service/quicksight" + - "/internal/service/ram" + - "/internal/service/rbin" + - "/internal/service/rds" + - "/internal/service/redshift" + - "/internal/service/redshiftserverless" + - "/internal/service/resiliencehub" + - "/internal/service/resourceexplorer2" + - "/internal/service/resourcegroups" + - "/internal/service/rolesanywhere" + - "/internal/service/route53" + - "/internal/service/route53domains" + - "/internal/service/route53recoverycontrolconfig" + - "/internal/service/route53recoveryreadiness" + - "/internal/service/route53resolver" + - "/internal/service/rum" + - "/internal/service/s3" + - "/internal/service/s3control" + - "/internal/service/s3outposts" + - "/internal/service/s3tables" + - "/internal/service/sagemaker" + - "/internal/service/scheduler" + - "/internal/service/schemas" + - "/internal/service/secretsmanager" + - "/internal/service/securityhub" + - "/internal/service/servicecatalog" + - "/internal/service/servicediscovery" + - "/internal/service/servicequotas" + - "/internal/service/ses" + - "/internal/service/sesv2" + - "/internal/service/sfn" + - "/internal/service/shield" + - "/internal/service/signer" + - "/internal/service/simpledb" + - "/internal/service/sns" + - "/internal/service/sqs" + - "/internal/service/ssm" + - "/internal/service/ssmcontacts" + - "/internal/service/ssmincidents" + - "/internal/service/ssoadmin" + - "/internal/service/storagegateway" + - "/internal/service/swf" + - "/internal/service/synthetics" patterns: - pattern-either: - pattern: | diff --git a/.ci/semgrep/aws/waiter.yml b/.ci/semgrep/aws/waiter.yml index d9cbe83b1f3d..f149c5a5f72f 100644 --- a/.ci/semgrep/aws/waiter.yml +++ b/.ci/semgrep/aws/waiter.yml @@ -4,7 +4,7 @@ rules: message: Don't use AWS SDK for Go v2 waiters paths: exclude: - - "sweep.go" + - "**/sweep.go" patterns: - pattern: | $PKG.$FUNC($CONN) diff --git a/.ci/semgrep/errors/error-checks.yml b/.ci/semgrep/errors/error-checks.yml index 784970a08317..7ac00c741592 100644 --- a/.ci/semgrep/errors/error-checks.yml +++ b/.ci/semgrep/errors/error-checks.yml @@ -95,4 +95,11 @@ rules: if !d.IsNewResource() && tfresource.NotFound($ERR) { ... } return ... } + # e.g. internal/service/storagegateway/gateway.go + - pattern-not-inside: | + if !d.IsNewResource() && tfresource.NotFound($ERR) { ... } + if isGatewayNotConnectedErr(err) { + ... + } + if $ERR != nil { ... } severity: ERROR diff --git a/.ci/semgrep/errors/msgfmt.yml b/.ci/semgrep/errors/msgfmt.yml index 3a042b091a97..c89ac48fd9c8 100644 --- a/.ci/semgrep/errors/msgfmt.yml +++ b/.ci/semgrep/errors/msgfmt.yml @@ -10,7 +10,7 @@ rules: message: Use diag.Errorf(...) instead of diag.FromErr(fmt.Errorf(...)) paths: include: - - internal/ + - "/internal" patterns: - pattern-regex: diag.FromErr\(fmt.Errorf\( severity: ERROR @@ -23,7 +23,7 @@ rules: message: Remove leading 'error ' from diag.Errorf("error ...") paths: include: - - internal/ + - "/internal" patterns: - pattern-regex: 'diag.Errorf\("\s*[Ee]rror ' severity: ERROR @@ -36,7 +36,7 @@ rules: message: Remove leading 'Error ' from AppendErrorf(diags, "Error ...") paths: include: - - internal/ + - "/internal" patterns: - pattern-regex: 'AppendErrorf\(diags, "\s*[Ee]rror ' severity: ERROR @@ -49,11 +49,11 @@ rules: message: Remove leading 'error ' from fmt.Errorf("error ...") paths: include: - - internal/ + - "/internal" exclude: - - "internal/service/**/*_test.go" - - "internal/service/**/sweep.go" - - "internal/acctest/acctest.go" + - "/internal/service/**/*_test.go" + - "/internal/service/**/sweep.go" + - "/internal/acctest/acctest.go" patterns: - pattern-regex: 'fmt.Errorf\("\s*[Ee]rror ' severity: ERROR diff --git a/.ci/semgrep/framework/flex.yml b/.ci/semgrep/framework/flex.yml index 5fcd47251965..c90116f3deff 100644 --- a/.ci/semgrep/framework/flex.yml +++ b/.ci/semgrep/framework/flex.yml @@ -15,26 +15,27 @@ rules: message: Prefer `flex.Expand` to manually creating expand functions paths: exclude: - - internal/framework/flex/ + - "/internal/framework/flex/" # TODO: Remove the following exclusions - - internal/service/appfabric/ingestion_destination.go - - internal/service/auditmanager/assessment.go - - internal/service/auditmanager/control.go - - internal/service/auditmanager/framework.go - - internal/service/batch/job_queue.go - - internal/service/cognitoidp/user_pool_client.go - - internal/service/lexv2models/bot.go - - internal/service/lexv2models/bot_locale.go - - internal/service/lightsail/flex.go - - internal/service/opensearchserverless/security_config.go - - internal/service/quicksight/iam_policy_assignment.go - - internal/service/quicksight/refresh_schedule.go - - internal/service/securitylake/subscriber.go - - internal/service/securitylake/subscriber_notification.go - - internal/service/ssmcontacts/rotation.go - - internal/service/ssoadmin/application.go - - internal/service/ssoadmin/trusted_token_issuer.go - - internal/service/verifiedpermissions/schema.go + - "/internal/service/appfabric/ingestion_destination.go" + - "/internal/service/auditmanager/assessment.go" + - "/internal/service/auditmanager/control.go" + - "/internal/service/auditmanager/framework.go" + - "/internal/service/batch/job_queue.go" + - "/internal/service/cognitoidp/user_pool_client.go" + - "/internal/service/lexv2models/bot.go" + - "/internal/service/lexv2models/bot_locale.go" + - "/internal/service/lightsail/flex.go" + - "/internal/service/opensearchserverless/security_config.go" + - "/internal/service/quicksight/iam_policy_assignment.go" + - "/internal/service/quicksight/refresh_schedule.go" + - "/internal/service/securitylake/subscriber.go" + - "/internal/service/securitylake/subscriber_notification.go" + - "/internal/service/ssmcontacts/rotation.go" + - "/internal/service/ssoadmin/application.go" + - "/internal/service/ssoadmin/trusted_token_issuer.go" + - "/internal/service/verifiedpermissions/schema.go" + - "/internal/service/bedrockagentcore/gateway_target.go" patterns: - pattern: func $FUNC(ctx context.Context, ...) - metavariable-comparison: @@ -50,35 +51,36 @@ rules: message: Prefer `flex.Flatten` to manually creating flatten functions paths: exclude: - - internal/framework/flex/ + - "/internal/framework/flex/" # TODO: Remove the following exclusions - - internal/service/appconfig/environment.go - - internal/service/appfabric/ingestion_destination.go - - internal/service/auditmanager/assessment.go - - internal/service/auditmanager/control.go - - internal/service/auditmanager/framework.go - - internal/service/batch/job_queue.go - - internal/service/cognitoidp/user_pool_client.go - - internal/service/datazone/domain.go - - internal/service/datazone/environment_blueprint_configuration.go - - internal/service/ec2/vpc_security_group_ingress_rule.go - - internal/service/emr/supported_instance_types_data_source.go - - internal/service/lexv2models/bot.go - - internal/service/lexv2models/bot_locale.go - - internal/service/medialive/multiplex_program.go - - internal/service/networkfirewall/tls_inspection_configuration.go - - internal/service/opensearchserverless/security_config.go - - internal/service/quicksight/iam_policy_assignment.go - - internal/service/quicksight/refresh_schedule.go - - internal/service/securitylake/subscriber.go - - internal/service/servicequotas/templates_data_source.go - - internal/service/ssmcontacts/rotation.go - - internal/service/ssmcontacts/rotation_data_source.go - - internal/service/ssoadmin/application.go - - internal/service/ssoadmin/application_providers_data_source.go - - internal/service/ssoadmin/trusted_token_issuer.go - - internal/service/verifiedpermissions/identity_source.go - - internal/service/verifiedpermissions/schema.go + - "/internal/service/appconfig/environment.go" + - "/internal/service/appfabric/ingestion_destination.go" + - "/internal/service/auditmanager/assessment.go" + - "/internal/service/auditmanager/control.go" + - "/internal/service/auditmanager/framework.go" + - "/internal/service/batch/job_queue.go" + - "/internal/service/cognitoidp/user_pool_client.go" + - "/internal/service/datazone/domain.go" + - "/internal/service/datazone/environment_blueprint_configuration.go" + - "/internal/service/ec2/vpc_security_group_ingress_rule.go" + - "/internal/service/emr/supported_instance_types_data_source.go" + - "/internal/service/lexv2models/bot.go" + - "/internal/service/lexv2models/bot_locale.go" + - "/internal/service/medialive/multiplex_program.go" + - "/internal/service/networkfirewall/tls_inspection_configuration.go" + - "/internal/service/opensearchserverless/security_config.go" + - "/internal/service/quicksight/iam_policy_assignment.go" + - "/internal/service/quicksight/refresh_schedule.go" + - "/internal/service/securitylake/subscriber.go" + - "/internal/service/servicequotas/templates_data_source.go" + - "/internal/service/ssmcontacts/rotation.go" + - "/internal/service/ssmcontacts/rotation_data_source.go" + - "/internal/service/ssoadmin/application.go" + - "/internal/service/ssoadmin/application_providers_data_source.go" + - "/internal/service/ssoadmin/trusted_token_issuer.go" + - "/internal/service/verifiedpermissions/identity_source.go" + - "/internal/service/verifiedpermissions/schema.go" + - "/internal/service/bedrockagentcore/gateway_target.go" patterns: - pattern: func $FUNC(ctx context.Context, ...) - metavariable-comparison: diff --git a/.ci/semgrep/framework/import-passthrough-id.yml b/.ci/semgrep/framework/import-passthrough-id.yml index f3783af6eaae..d5a18bbc9eef 100644 --- a/.ci/semgrep/framework/import-passthrough-id.yml +++ b/.ci/semgrep/framework/import-passthrough-id.yml @@ -4,7 +4,7 @@ rules: message: Prefer `resource.ImportStatePassthroughID` to directly setting attribute paths: include: - - internal/service/ + - "/internal/service" pattern: $RESP.Diagnostics.Append($RESP.State.SetAttribute(ctx, path.Root($X), $REQ.ID)...) severity: WARNING fix: resource.ImportStatePassthroughID(ctx, path.Root($X), $REQ, $RESP) diff --git a/.ci/semgrep/framework/metadata_method.yml b/.ci/semgrep/framework/metadata_method.yml index a3ffdcfe7cca..0dfc09f0a61a 100644 --- a/.ci/semgrep/framework/metadata_method.yml +++ b/.ci/semgrep/framework/metadata_method.yml @@ -4,9 +4,9 @@ rules: message: Don't implement a Metadata method paths: include: - - "internal/service/*/*.go" + - "/internal/service/*/*.go" exclude: - - "internal/service/*/*_test.go" + - "/internal/service/*/*_test.go" patterns: - pattern: func $FUNC(...) { ... } - metavariable-regex: diff --git a/.ci/semgrep/migrate/context.yml b/.ci/semgrep/migrate/context.yml index d7f0cb015fb6..0316210e46a2 100644 --- a/.ci/semgrep/migrate/context.yml +++ b/.ci/semgrep/migrate/context.yml @@ -4,8 +4,8 @@ rules: message: Should not use `context.TODO()` paths: include: - - internal/service/* - - internal/acctest/* + - "/internal/service" + - "/internal/acctest" pattern: context.TODO() severity: ERROR - id: schema-noop @@ -13,8 +13,8 @@ rules: message: Should use `schema.NoopContext` instead of `schema.Noop` paths: include: - - internal/service/* - - internal/acctest/* + - "/internal/service" + - "/internal/acctest" pattern: schema.Noop severity: ERROR - id: direct-CRUD-calls @@ -22,10 +22,10 @@ rules: message: Avoid direct calls to `schema.Resource` CRUD calls paths: include: - - internal/service/* - - internal/acctest/* + - "/internal/service" + - "/internal/acctest" exclude: - - internal/service/*/sweep.go + - "/internal/service/*/sweep.go" patterns: - pattern-either: - pattern: $D.Create($DATA, $META) diff --git a/.ci/semgrep/pluginsdk/customdiff.yml b/.ci/semgrep/pluginsdk/customdiff.yml index 80c552ced531..103a24d21443 100644 --- a/.ci/semgrep/pluginsdk/customdiff.yml +++ b/.ci/semgrep/pluginsdk/customdiff.yml @@ -4,9 +4,9 @@ rules: message: Simplify CustomizeDiff All paths: include: - - "internal/service/*/*.go" + - "/internal/service/*/*.go" exclude: - - "internal/service/*/*_test.go" + - "/internal/service/*/*_test.go" patterns: - pattern-regex: CustomizeDiff:\s+customdiff\.All\(\s*[a-zA-Z0-9]+,?\s*\) severity: WARNING @@ -16,9 +16,9 @@ rules: message: Simplify CustomizeDiff Sequence paths: include: - - "internal/service/*/*.go" + - "/internal/service/*/*.go" exclude: - - "internal/service/*/*_test.go" + - "/internal/service/*/*_test.go" patterns: - pattern-regex: CustomizeDiff:\s+customdiff\.Sequence\(\s*[a-zA-Z0-9]+,?\s*\) severity: WARNING diff --git a/.ci/semgrep/pluginsdk/diags.yml b/.ci/semgrep/pluginsdk/diags.yml index 10549eded987..5153122255e2 100644 --- a/.ci/semgrep/pluginsdk/diags.yml +++ b/.ci/semgrep/pluginsdk/diags.yml @@ -70,9 +70,9 @@ rules: message: Return diags instead of nil paths: include: - - internal/service + - "/internal/service" exclude: - - internal/service/o* + - "/internal/service/o*" patterns: - pattern: return nil - pattern-not-inside: | diff --git a/.ci/semgrep/pluginsdk/isnewresource.yml b/.ci/semgrep/pluginsdk/isnewresource.yml index 2b5ae07080e9..33647f0bd524 100644 --- a/.ci/semgrep/pluginsdk/isnewresource.yml +++ b/.ci/semgrep/pluginsdk/isnewresource.yml @@ -4,9 +4,9 @@ rules: message: Calling `d.SetId("")` should ensure `!d.IsNewResource()` is also checked. See https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/error-handling.md#disnewresource-checks paths: include: - - internal/service + - "/internal/service" exclude: - - internal/service/**/*_data_source.go + - "/internal/service/**/*_data_source.go" patterns: - pattern-either: - pattern: | diff --git a/.ci/semgrep/pluginsdk/quicksight/schema.yml b/.ci/semgrep/pluginsdk/quicksight/schema.yml index 4cb1c97a78c2..638945afc46a 100644 --- a/.ci/semgrep/pluginsdk/quicksight/schema.yml +++ b/.ci/semgrep/pluginsdk/quicksight/schema.yml @@ -4,7 +4,7 @@ rules: message: String attributes with length validation should use stringLenBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | @@ -21,7 +21,7 @@ rules: message: String attributes with length validation should use stringLenBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | @@ -38,7 +38,7 @@ rules: message: String attributes with length validation should use stringLenBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern-either: @@ -64,7 +64,7 @@ rules: message: String attributes with enum validation should use stringEnumSchema[]() paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern-either: @@ -102,7 +102,7 @@ rules: message: Int attributes with between validation should use intBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | @@ -119,7 +119,7 @@ rules: message: Int attributes with between validation should use intBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | @@ -136,7 +136,7 @@ rules: message: Float attributes with between validation should use floatBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | @@ -153,7 +153,7 @@ rules: message: Float attributes with between validation should use floatBetweenSchema paths: include: - - internal/service/quicksight/schema + - "/internal/service/quicksight/schema" patterns: - pattern-inside: "Schema: map[string]*schema.Schema{ ... }" - pattern: | diff --git a/.ci/semgrep/pluginsdk/retry.yml b/.ci/semgrep/pluginsdk/retry.yml new file mode 100644 index 000000000000..e17df0c4764c --- /dev/null +++ b/.ci/semgrep/pluginsdk/retry.yml @@ -0,0 +1,18 @@ +rules: + - id: retry + languages: [go] + message: Don't use Plugin SDK retry functionality, use internal/retry instead + patterns: + - pattern: | + retry.RetryContext(...) + - pattern: | + sdkretry.RetryContext(...) + - pattern: | + retry.RetryableError(...) + - pattern: | + sdkretry.RetryableError(...) + - pattern: | + retry.NonRetryableError(...) + - pattern: | + sdkretry.NonRetryableError(...) + severity: WARNING diff --git a/.ci/semgrep/smarterr/enforce.yml b/.ci/semgrep/smarterr/enforce.yml new file mode 100644 index 000000000000..05d2c0b823ee --- /dev/null +++ b/.ci/semgrep/smarterr/enforce.yml @@ -0,0 +1,129 @@ +rules: + - id: go-no-sdkdiag-appendfromerr + languages: [go] + message: Use smerr.Append(ctx, diags, err) instead of sdkdiag.AppendFromErr. + severity: ERROR + pattern: sdkdiag.AppendFromErr($DIAGS, $ERR) + fix: smerr.Append(ctx, $DIAGS, $ERR) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-sdkdiag-appenderrorf + languages: [go] + message: Use smerr.Append(ctx, diags, err, smerr.ID, ...) instead of sdkdiag.AppendErrorf. + severity: ERROR + patterns: + - pattern: sdkdiag.AppendErrorf($DIAGS, $FMT, $ID, $ERR) + - pattern: sdkdiag.AppendErrorf($DIAGS, $FMT, $ERR) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-create-appenddiagerror + languages: [go] + message: Use smerr.Append(ctx, diags, err, smerr.ID, ...) instead of create.AppendDiagError. + severity: ERROR + pattern: create.AppendDiagError($DIAGS, ...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-diagnostics-adderror + languages: [go] + message: Use smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, ...) instead of Diagnostics.AddError. + severity: ERROR + patterns: + - pattern: $RESP.Diagnostics.AddError($MSG, $ERR) + - pattern: $RESP.Diagnostics.AddError(fmt.Sprintf($FMT, ...), $ERR) + - pattern: $RESP.Diagnostics.AddError($MSG, $ERR.Error()) + - pattern-not-inside: smerr.AddError(...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-create-adderror + languages: [go] + message: Use smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, ...) instead of create.AddError. + severity: ERROR + pattern: create.AddError(&$RESP.Diagnostics, ...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-direct-diag-adderror + languages: [go] + message: Use smerr.AddError instead of resp.Diagnostics.AddError (migrate to smarterr/smerr). + severity: ERROR + patterns: + - pattern: $RESP.Diagnostics.AddError($MSG, $ERR) + - pattern-not-inside: smerr.AddError(...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-direct-diag-appenderrorf + languages: [go] + message: Use smerr.Append or smerr.EnrichAppend instead of diag.AppendErrorf (migrate to smarterr/smerr). + severity: ERROR + pattern: diag.AppendErrorf(...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-direct-diag-appendfromerr + languages: [go] + message: Use smerr.Append or smerr.EnrichAppend instead of diag.AppendFromErr (migrate to smarterr/smerr). + severity: ERROR + pattern: diag.AppendFromErr(...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-direct-diag-append + languages: [go] + message: Use smerr.EnrichAppend instead of resp.Diagnostics.Append (migrate to smarterr/smerr). + severity: ERROR + patterns: + - pattern: $RESP.Diagnostics.Append(...) + - pattern-not-inside: smerr.EnrichAppend(...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-bare-return-err + languages: [go] + message: Return errors wrapped with smarterr.NewError (migrate to smarterr). + severity: ERROR + patterns: + - pattern: | + return nil, $ERR + - pattern-not-inside: | + return nil, smarterr.NewError(...) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-bare-assertsinglevalueresult + languages: [go] + message: Wrap tfresource.AssertSingleValueResult with smarterr.Assert (migrate to smarterr). + severity: ERROR + patterns: + - pattern: | + return tfresource.AssertSingleValueResult(...) + - pattern-not-inside: smarterr.Assert(tfresource.AssertSingleValueResult(...)) + paths: + include: + - "/internal/service/cloudwatch/" + + - id: go-no-bare-empty-result-error + languages: [go] + message: Wrap tfresource.NewEmptyResultError with smarterr.NewError (migrate to smarterr). + severity: ERROR + patterns: + - pattern: | + return nil, tfresource.NewEmptyResultError(...) + - pattern-not-inside: smarterr.NewError(tfresource.NewEmptyResultError(...)) + paths: + include: + - "/internal/service/cloudwatch/" diff --git a/.ci/semgrep/stdlib/exp.yml b/.ci/semgrep/stdlib/exp.yml index d8d9db1b3c45..30e965319e53 100644 --- a/.ci/semgrep/stdlib/exp.yml +++ b/.ci/semgrep/stdlib/exp.yml @@ -4,7 +4,7 @@ rules: message: Use Go standard library maps and slices packages instead of the golang.org/x/exp packages paths: include: - - internal/ + - "/internal" patterns: - pattern: | import ("$X") diff --git a/.ci/semgrep/tags/ds-tags-all.yml b/.ci/semgrep/tags/ds-tags-all.yml index e0e2456256ce..d64845b41543 100644 --- a/.ci/semgrep/tags/ds-tags-all.yml +++ b/.ci/semgrep/tags/ds-tags-all.yml @@ -4,7 +4,7 @@ rules: message: Data sources should not have a `tags_all` attribute paths: include: - - internal/service/**/*_data_source.go + - "/internal/service/**/*_data_source.go" patterns: - pattern-either: - pattern-regex: '"tags_all":\s*tftags.TagsSchemaComputed' diff --git a/.ci/semgrep/tags/update.yml b/.ci/semgrep/tags/update.yml index a7d491934026..e2c13078d5d0 100644 --- a/.ci/semgrep/tags/update.yml +++ b/.ci/semgrep/tags/update.yml @@ -4,7 +4,7 @@ rules: message: Do not call `UpdateTags` inside a resource create function, use `createTags` instead paths: include: - - internal/service/ + - "/internal/service/" patterns: - pattern: | func $FUNC(...) { diff --git a/.ci/semgrep/tflog/tflog.go b/.ci/semgrep/tflog/tflog.go new file mode 100644 index 000000000000..a61c7e8ebcb0 --- /dev/null +++ b/.ci/semgrep/tflog/tflog.go @@ -0,0 +1,46 @@ +package main + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +func noAssignment() { + ctx := context.Background() + + // ruleid: setfield-without-assign + tflog.SetField(ctx, "field", "value") +} + +func assigned() { + ctx := context.Background() + + // ok: setfield-without-assign + ctx = tflog.SetField(ctx, "field", "value") +} + +func returnedContext() context.Context { + ctx := context.Background() + + // ok: setfield-without-assign + return tflog.SetField(ctx, "field", "value") +} + +func declareAndAssign_SameName() { + ctx := context.Background() + + for i := 0; i < 1; i++ { + // ok: setfield-without-assign + ctx := tflog.SetField(ctx, "field", "value") + } +} + +func declareAndAssign_Rename() { + outerCtx := context.Background() + + for i := 0; i < 1; i++ { + // ok: setfield-without-assign + innerCtx := tflog.SetField(outerCtx, "field", "value") + } +} diff --git a/.ci/semgrep/tflog/tflog.yml b/.ci/semgrep/tflog/tflog.yml index 431c83c360a0..a1b2da59cf2b 100644 --- a/.ci/semgrep/tflog/tflog.yml +++ b/.ci/semgrep/tflog/tflog.yml @@ -4,6 +4,6 @@ rules: message: The return value of "tflog.SetField" must be used patterns: - pattern: tflog.SetField(...) - - pattern-not-inside: $CTX = tflog.SetField($CTX, ...) + - pattern-not-inside: $CTX1 = tflog.SetField($CTX2, ...) - pattern-not-inside: return tflog.SetField($CTX, ...) severity: ERROR diff --git a/.ci/semgrep/types/nullable.yml b/.ci/semgrep/types/nullable.yml index 8a48ac4899af..9dabd3e7d930 100644 --- a/.ci/semgrep/types/nullable.yml +++ b/.ci/semgrep/types/nullable.yml @@ -2,6 +2,9 @@ rules: - id: valid-nullable-bool languages: [go] message: Uses of `nullable.TypeNullableBool` must be paired with `nullable.ValidateTypeStringNullableBool` unless they are strictly `Computed`. + paths: + exclude: + - "*_migrate.go" patterns: - pattern: | { diff --git a/.ci/tools/go.mod b/.ci/tools/go.mod index c82566294412..a886b284bd3d 100644 --- a/.ci/tools/go.mod +++ b/.ci/tools/go.mod @@ -1,57 +1,62 @@ module github.com/hashicorp/terraform-provider-aws/tools -go 1.24.4 +go 1.24.8 require ( - github.com/YakDriver/tfproviderdocs v0.22.0 + github.com/YakDriver/tfproviderdocs v0.23.3 github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint/v2 v2.2.1 + github.com/golangci/golangci-lint/v2 v2.5.0 github.com/hashicorp/copywrite v0.22.0 github.com/hashicorp/go-changelog v0.0.0-20250127101332-effe3832fb0b github.com/katbyte/terrafmt v0.5.5 github.com/pavius/impi v0.0.3 - github.com/rhysd/actionlint v1.7.7 - github.com/terraform-linters/tflint v0.58.0 - mvdan.cc/gofumpt v0.8.0 + github.com/rhysd/actionlint v1.7.8 + github.com/terraform-linters/tflint v0.58.1 + golang.org/x/tools v0.38.0 + mvdan.cc/gofumpt v0.9.1 ) require ( 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 4d63.com/gochecknoglobals v0.2.2 // indirect - cel.dev/expr v0.23.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.121.2 // indirect - cloud.google.com/go/auth v0.16.2 // indirect + cloud.google.com/go/auth v0.16.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.53.0 // indirect codeberg.org/chavacava/garif v0.2.0 // indirect - dario.cat/mergo v1.0.1 // indirect - github.com/4meepo/tagalign v1.4.2 // indirect + dario.cat/mergo v1.0.2 // indirect + dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect + dev.gaijin.team/go/golib v0.6.0 // indirect + github.com/4meepo/tagalign v1.4.3 // indirect github.com/Abirdcfly/dupword v0.1.6 // indirect + github.com/AdminBenni/iota-mixing v1.0.0 // indirect github.com/AlecAivazis/survey/v2 v2.3.7 // indirect - github.com/AlwxSin/noinlineerr v1.0.3 // indirect - github.com/Antonboom/errname v1.1.0 // indirect - github.com/Antonboom/nilnil v1.1.0 // indirect - github.com/Antonboom/testifylint v1.6.1 // indirect + github.com/AlwxSin/noinlineerr v1.0.5 // indirect + github.com/Antonboom/errname v1.1.1 // indirect + github.com/Antonboom/nilnil v1.1.1 // indirect + github.com/Antonboom/testifylint v1.6.4 // indirect github.com/BurntSushi/toml v1.5.0 // indirect - github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect + github.com/Djarvur/go-err113 v0.1.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/MirrexOne/unqueryvet v1.2.1 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/alecthomas/chroma/v2 v2.18.0 // indirect + github.com/alecthomas/chroma/v2 v2.20.0 // indirect github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/alexkohler/nakedret/v2 v2.0.6 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alfatraining/structtag v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/alingse/nilnesserr v0.2.0 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect @@ -69,9 +74,9 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bmatcuk/doublestar v1.3.4 // indirect - github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect github.com/bombsimon/wsl/v4 v4.7.0 // indirect - github.com/bombsimon/wsl/v5 v5.0.0 // indirect + github.com/bombsimon/wsl/v5 v5.2.0 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 // indirect github.com/breml/bidichk v0.3.3 // indirect github.com/breml/errchkjson v0.4.1 // indirect @@ -91,11 +96,11 @@ require ( github.com/cli/go-gh/v2 v2.12.1 // indirect github.com/cli/safeexec v1.0.1 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/curioswitch/go-reassign v0.3.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect github.com/cyphar/filepath-securejoin v0.2.5 // indirect - github.com/daixiang0/gci v0.13.6 // indirect + github.com/daixiang0/gci v0.13.7 // indirect github.com/dave/dst v0.27.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect @@ -112,7 +117,7 @@ require ( github.com/firefart/nonamedreturns v1.0.6 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.3.15 // indirect + github.com/ghostiam/protogetter v0.3.16 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-critic/go-critic v0.13.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -138,18 +143,21 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/godoc-lint/godoc-lint v0.10.0 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/golangci/asciicheck v0.5.0 // indirect github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect - github.com/golangci/go-printf-func-name v0.1.0 // indirect + github.com/golangci/go-printf-func-name v0.1.1 // indirect github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 // indirect github.com/golangci/misspell v0.7.0 // indirect + github.com/golangci/nilerr v0.0.0-20250918000102-015671e622fe // indirect github.com/golangci/plugin-module-register v0.1.2 // indirect github.com/golangci/revgrep v0.8.0 // indirect github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect @@ -164,16 +172,15 @@ require ( github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.2 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/gookit/color v1.5.4 // indirect - github.com/gordonklaus/ineffassign v0.1.0 // indirect + github.com/gordonklaus/ineffassign v0.2.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.5.0 // indirect github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect - github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-getter v1.7.8 // indirect + github.com/hashicorp/go-getter v1.7.9 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -184,10 +191,10 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hc-install v0.4.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/hcl/v2 v2.23.1-0.20250203194505-ba0759438da2 // indirect + github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.17.2 // indirect - github.com/hashicorp/terraform-json v0.25.0 // indirect + github.com/hashicorp/terraform-json v0.27.2 // indirect github.com/hashicorp/terraform-registry-address v0.2.4 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -219,35 +226,34 @@ require ( github.com/kkHAIKE/contextcheck v1.1.6 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/knadh/koanf v1.5.0 // indirect - github.com/kulti/thelper v0.6.3 // indirect + github.com/kulti/thelper v0.7.1 // indirect github.com/kunwardeep/paralleltest v1.0.14 // indirect github.com/lasiar/canonicalheader v1.1.2 // indirect github.com/ldez/exptostd v0.4.4 // indirect github.com/ldez/gomoddirectives v0.7.0 // indirect - github.com/ldez/grignotin v0.9.0 // indirect - github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/grignotin v0.10.1 // indirect + github.com/ldez/tagliatelle v0.7.2 // indirect github.com/ldez/usetesting v0.5.0 // indirect github.com/leonklingele/grouper v1.1.2 // indirect github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/macabu/inamedparam v0.2.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect - github.com/manuelarte/embeddedstructfieldcheck v0.3.0 // indirect + github.com/manuelarte/embeddedstructfieldcheck v0.4.0 // indirect github.com/manuelarte/funcorder v0.5.0 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect github.com/matoous/godox v1.1.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.17 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mergestat/timediff v0.0.3 // indirect - github.com/mgechev/revive v1.10.0 // indirect + github.com/mgechev/revive v1.12.0 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/cli v1.1.5 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -257,7 +263,7 @@ require ( github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.19.1 // indirect + github.com/nunnatsa/ginkgolinter v0.21.0 // indirect github.com/oklog/run v1.0.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -293,7 +299,7 @@ require ( github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect - github.com/securego/gosec/v2 v2.22.5 // indirect + github.com/securego/gosec/v2 v2.22.8 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect @@ -305,26 +311,25 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/skeema/knownhosts v1.3.0 // indirect - github.com/sonatard/noctx v0.3.4 // indirect + github.com/sonatard/noctx v0.4.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/sourcegraph/go-lsp v0.0.0-20200429204803-219e11d77f5d // indirect github.com/sourcegraph/jsonrpc2 v0.2.1 // indirect github.com/spf13/afero v1.14.0 // indirect - github.com/spf13/cast v1.8.0 // indirect - github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/spf13/viper v1.20.1 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tdakkota/asciicheck v0.4.1 // indirect github.com/terraform-linters/tflint-plugin-sdk v0.22.0 // indirect github.com/terraform-linters/tflint-ruleset-terraform v0.12.0 // indirect - github.com/tetafro/godot v1.5.1 // indirect + github.com/tetafro/godot v1.5.4 // indirect github.com/thanhpk/randstr v1.0.4 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/theupdateframework/go-tuf/v2 v2.1.1 // indirect @@ -334,11 +339,11 @@ require ( github.com/tomarrell/wrapcheck/v2 v2.11.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect + github.com/ulikunitz/xz v0.5.14 // indirect github.com/ultraware/funlen v0.2.0 // indirect github.com/ultraware/whitespace v0.2.0 // indirect github.com/uudashr/gocognit v1.2.0 // indirect - github.com/uudashr/iface v1.4.0 // indirect + github.com/uudashr/iface v1.4.1 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect @@ -347,19 +352,19 @@ require ( github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.3.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect - github.com/yuin/goldmark v1.7.12 // indirect + github.com/yuin/goldmark v1.7.13 // indirect github.com/yuin/goldmark-meta v1.1.0 // indirect - github.com/zclconf/go-cty v1.16.3 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect github.com/zclconf/go-cty-yaml v1.1.0 // indirect github.com/zeebo/errs v1.4.0 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.13.1 // indirect - go-simpler.org/sloglint v0.11.0 // indirect + go-simpler.org/musttag v0.14.0 // indirect + go-simpler.org/sloglint v0.11.1 // indirect go.augendre.info/arangolint v0.2.0 // indirect - go.augendre.info/fatcontext v0.8.0 // indirect + go.augendre.info/fatcontext v0.8.1 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.36.0 // indirect @@ -370,23 +375,23 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.39.0 // indirect + go.yaml.in/yaml/v4 v4.0.0-rc.2 // indirect + golang.org/x/crypto v0.43.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.34.0 // indirect - google.golang.org/api v0.237.0 // indirect - google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/api v0.246.0 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/grpc v1.73.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect + google.golang.org/grpc v1.74.2 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/.ci/tools/go.sum b/.ci/tools/go.sum index ca5d194eafc0..e9f8f855974a 100644 --- a/.ci/tools/go.sum +++ b/.ci/tools/go.sum @@ -2,8 +2,8 @@ 4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= 4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= 4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= -cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss= -cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -105,8 +105,8 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= -cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= +cloud.google.com/go/auth v0.16.3 h1:kabzoQ9/bobUmnseYnBO6qQG7q4a/CffFRlJSxv2wCc= +cloud.google.com/go/auth v0.16.3/go.mod h1:NucRGjaXfzP1ltpcQ7On/VTZ0H4kWB5Jy+Y9Dnm76fA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -344,8 +344,8 @@ cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4 cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/kms v1.21.2 h1:c/PRUSMNQ8zXrc1sdAUnsenWWaNXN+PzTXfXOcSFdoE= -cloud.google.com/go/kms v1.21.2/go.mod h1:8wkMtHV/9Z8mLXEXr1GK7xPSBdi6knuLXIhqjuWcI6w= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -622,29 +622,35 @@ cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6MfY= codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKfwODoI1Y= +dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= +dev.gaijin.team/go/golib v0.6.0 h1:v6nnznFTs4bppib/NyU1PQxobwDHwCXXl15P7DV5Zgo= +dev.gaijin.team/go/golib v0.6.0/go.mod h1:uY1mShx8Z/aNHWDyAkZTkX+uCi5PdX7KsG1eDQa2AVE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= -github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= +github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= +github.com/4meepo/tagalign v1.4.3/go.mod h1:00WwRjiuSbrRJnSVeGWPLp2epS5Q/l4UEy0apLLS37c= github.com/Abirdcfly/dupword v0.1.6 h1:qeL6u0442RPRe3mcaLcbaCi2/Y/hOcdtw6DE9odjz9c= github.com/Abirdcfly/dupword v0.1.6/go.mod h1:s+BFMuL/I4YSiFv29snqyjwzDp4b65W2Kvy+PKzZ6cw= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/AdminBenni/iota-mixing v1.0.0 h1:Os6lpjG2dp/AE5fYBPAA1zfa2qMdCAWwPMCgpwKq7wo= +github.com/AdminBenni/iota-mixing v1.0.0/go.mod h1:i4+tpAaB+qMVIV9OK3m4/DAynOd5bQFaOu+2AhtBCNY= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= -github.com/AlwxSin/noinlineerr v1.0.3 h1:9b5edChzzwX30BuBci13LHVZHF5q7hW9qtrs+wJdDog= -github.com/AlwxSin/noinlineerr v1.0.3/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= -github.com/Antonboom/errname v1.1.0 h1:A+ucvdpMwlo/myWrkHEUEBWc/xuXdud23S8tmTb/oAE= -github.com/Antonboom/errname v1.1.0/go.mod h1:O1NMrzgUcVBGIfi3xlVuvX8Q/VP/73sseCaAppfjqZw= -github.com/Antonboom/nilnil v1.1.0 h1:jGxJxjgYS3VUUtOTNk8Z1icwT5ESpLH/426fjmQG+ng= -github.com/Antonboom/nilnil v1.1.0/go.mod h1:b7sAlogQjFa1wV8jUW3o4PMzDVFLbTux+xnQdvzdcIE= -github.com/Antonboom/testifylint v1.6.1 h1:6ZSytkFWatT8mwZlmRCHkWz1gPi+q6UBSbieji2Gj/o= -github.com/Antonboom/testifylint v1.6.1/go.mod h1:k+nEkathI2NFjKO6HvwmSrbzUcQ6FAnbZV+ZRrnXPLI= +github.com/AlwxSin/noinlineerr v1.0.5 h1:RUjt63wk1AYWTXtVXbSqemlbVTb23JOSRiNsshj7TbY= +github.com/AlwxSin/noinlineerr v1.0.5/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= +github.com/Antonboom/errname v1.1.1 h1:bllB7mlIbTVzO9jmSWVWLjxTEbGBVQ1Ff/ClQgtPw9Q= +github.com/Antonboom/errname v1.1.1/go.mod h1:gjhe24xoxXp0ScLtHzjiXp0Exi1RFLKJb0bVBtWKCWQ= +github.com/Antonboom/nilnil v1.1.1 h1:9Mdr6BYd8WHCDngQnNVV0b554xyisFioEKi30sksufQ= +github.com/Antonboom/nilnil v1.1.1/go.mod h1:yCyAmSw3doopbOWhJlVci+HuyNRuHJKIv6V2oYQa8II= +github.com/Antonboom/testifylint v1.6.4 h1:gs9fUEy+egzxkEbq9P4cpcMB6/G0DYdMeiFS87UiqmQ= +github.com/Antonboom/testifylint v1.6.4/go.mod h1:YO33FROXX2OoUfwjz8g+gUxQXio5i9qpVy7nXGbxDD4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g= @@ -661,10 +667,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnhpIE0thVASYjvosApmHuD2k= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= +github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao4g= +github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= @@ -679,8 +683,8 @@ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6 github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= @@ -689,6 +693,8 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/MirrexOne/unqueryvet v1.2.1 h1:M+zdXMq84g+E1YOLa7g7ExN3dWfZQrdDSTCM7gC+m/A= +github.com/MirrexOne/unqueryvet v1.2.1/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -698,8 +704,8 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/ github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/YakDriver/tfproviderdocs v0.22.0 h1:JstF9U96wtdHV7Ujccv/Xrs7ejuwSKDFrc34bONsBOE= -github.com/YakDriver/tfproviderdocs v0.22.0/go.mod h1:M4DS8iRhrRuyWixpFA6QdpAwHHeom9ORcGVTctjROLw= +github.com/YakDriver/tfproviderdocs v0.23.3 h1:3phUu5Wyml5lH0y5hAkxfqHmgPgG6P28rCzYp1zPaYc= +github.com/YakDriver/tfproviderdocs v0.23.3/go.mod h1:AMFL4IE88Mf18DklXvcL4pJXbuFVsjuePyJ/7B4DmHw= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= @@ -710,12 +716,12 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGW github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.18.0 h1:6h53Q4hW83SuF+jcsp7CVhLsMozzvQvO8HBbKQW+gn4= -github.com/alecthomas/chroma/v2 v2.18.0/go.mod h1:RVX6AvYm4VfYe/zsk7mjHueLDZor3aWCNE14TFlepBk= +github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= +github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= -github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= -github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= +github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -727,6 +733,8 @@ github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQ github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= +github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= @@ -817,12 +825,12 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= -github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ= -github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= +github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= -github.com/bombsimon/wsl/v5 v5.0.0 h1:pWxP6X11o/YeF9eBVGXvS7L0FLEbHWm2kJJJGH6SeDQ= -github.com/bombsimon/wsl/v5 v5.0.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= +github.com/bombsimon/wsl/v5 v5.2.0 h1:PyCCwd3Q7abGs3e34IW4jLYlBS+FbsU6iK+Tb3NnDp4= +github.com/bombsimon/wsl/v5 v5.2.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 h1:yaYcGQ7yEIGbsJfW/9z7v1sLiZg/5rSNNXwmMct5XaE= @@ -898,8 +906,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= -github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -914,8 +922,8 @@ github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.13.6 h1:RKuEOSkGpSadkGbvZ6hJ4ddItT3cVZ9Vn9Rybk6xjl8= -github.com/daixiang0/gci v0.13.6/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ= +github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= @@ -990,8 +998,8 @@ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8 github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghostiam/protogetter v0.3.15 h1:1KF5sXel0HE48zh1/vn0Loiw25A9ApyseLzQuif1mLY= -github.com/ghostiam/protogetter v0.3.15/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/ghostiam/protogetter v0.3.16 h1:UkrisuJBYLnZW6FcYUNBDJOqY3X22RtoYMlCsiNlFFA= +github.com/ghostiam/protogetter v0.3.16/go.mod h1:4SRRIv6PcjkIMpUkRUsP4TsUTqO/N3Fmvwivuc/sCHA= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= @@ -1090,8 +1098,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= -github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -1100,6 +1108,8 @@ github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MG github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godoc-lint/godoc-lint v0.10.0 h1:OcyrziBi18sQSEpib6NesVHEJ/Xcng97NunePBA48g4= +github.com/godoc-lint/godoc-lint v0.10.0/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -1150,18 +1160,22 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/asciicheck v0.5.0 h1:jczN/BorERZwK8oiFBOGvlGPknhvq0bjnysTj4nUfo0= +github.com/golangci/asciicheck v0.5.0/go.mod h1:5RMNAInbNFw2krqN6ibBxN/zfRFa9S6tA1nPdM0l8qQ= github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= -github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= -github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarogrvjO9AfiW3B4U= +github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= -github.com/golangci/golangci-lint/v2 v2.2.1 h1:01r5ueY3oq8gtqgA5TGtBcS+LYZ/dEzZ59/AN1NsT2E= -github.com/golangci/golangci-lint/v2 v2.2.1/go.mod h1:Wu5txvpvWB2r+vjSNS6zn3WuCiJ7HAerKNiH+sEqWZA= +github.com/golangci/golangci-lint/v2 v2.5.0 h1:BDRg4ASm4J1y/DSRY6zwJ5tr5Yy8ZqbZ79XrCeFxaQo= +github.com/golangci/golangci-lint/v2 v2.5.0/go.mod h1:IJtWJBZkLbx7AVrIUzLd8Oi3ADtwaNpWbR3wthVWHcc= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 h1:AkK+w9FZBXlU/xUmBtSJN1+tAI4FIvy5WtnUnY8e4p8= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= +github.com/golangci/nilerr v0.0.0-20250918000102-015671e622fe h1:F1pK9tBy41i7eesBFkSNMldwtiAaWiU+3fT/24sTnNI= +github.com/golangci/nilerr v0.0.0-20250918000102-015671e622fe/go.mod h1:CtTxAluxD2ng9aIT9bPrVoMuISFWCD+SaxtvYtdWA2k= github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= @@ -1261,27 +1275,24 @@ github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqE github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= -github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= -github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= -github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gordonklaus/ineffassign v0.2.0 h1:Uths4KnmwxNJNzq87fwQQDDnbNb7De00VOk9Nu0TySs= +github.com/gordonklaus/ineffassign v0.2.0/go.mod h1:TIpymnagPSexySzs7F9FnO1XFTy8IT3a59vmZp5Y9Lw= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= -github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= @@ -1301,8 +1312,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= -github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= +github.com/hashicorp/go-getter v1.7.9 h1:G9gcjrDixz7glqJ+ll5IWvggSBR+R0B54DSRt4qfdC4= +github.com/hashicorp/go-getter v1.7.9/go.mod h1:dyFCmT1AQkDfOIt9NH8pw9XBDqNrIKJT5ylbpi7zPNE= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -1353,8 +1364,8 @@ github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH9 github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.23.1-0.20250203194505-ba0759438da2 h1:JP8y98OtHTujECs4s/HxlKc5yql/RlC99Dt1Iz4R+lM= -github.com/hashicorp/hcl/v2 v2.23.1-0.20250203194505-ba0759438da2/go.mod h1:k+HgkLpoWu9OS81sy4j1XKDXaWm/rLysG33v5ibdDnc= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= @@ -1362,8 +1373,8 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go= github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= -github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= -github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= +github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU= +github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= github.com/hashicorp/terraform-registry-address v0.2.4 h1:JXu/zHB2Ymg/TGVCRu10XqNa4Sh2bWcqCNyKWjnCPJA= github.com/hashicorp/terraform-registry-address v0.2.4/go.mod h1:tUNYTVyCtU4OIGXXMDp7WNcJ+0W1B4nmstVDgHMjfAU= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -1500,8 +1511,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= -github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kulti/thelper v0.7.1 h1:fI8QITAoFVLx+y+vSyuLBP+rcVIB8jKooNSCT2EiI98= +github.com/kulti/thelper v0.7.1/go.mod h1:NsMjfQEy6sd+9Kfw8kCP61W1I0nerGSYSFnGaxQkcbs= github.com/kunwardeep/paralleltest v1.0.14 h1:wAkMoMeGX/kGfhQBPODT/BL8XhK23ol/nuQ3SwFaUw8= github.com/kunwardeep/paralleltest v1.0.14/go.mod h1:di4moFqtfz3ToSKxhNjhOZL+696QtJGCFe132CbBLGk= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -1512,10 +1523,10 @@ github.com/ldez/exptostd v0.4.4 h1:58AtQjnLcT/tI5W/1KU7xE/O7zW9RAWB6c/ScQAnfus= github.com/ldez/exptostd v0.4.4/go.mod h1:QfdzPw6oHjFVdNV7ILoPu5sw3OZ3OG1JS0I5JN3J4Js= github.com/ldez/gomoddirectives v0.7.0 h1:EOx8Dd56BZYSez11LVgdj025lKwlP0/E5OLSl9HDwsY= github.com/ldez/gomoddirectives v0.7.0/go.mod h1:wR4v8MN9J8kcwvrkzrx6sC9xe9Cp68gWYCsda5xvyGc= -github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= -github.com/ldez/grignotin v0.9.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= -github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= -github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/grignotin v0.10.1 h1:keYi9rYsgbvqAZGI1liek5c+jv9UUjbvdj3Tbn5fn4o= +github.com/ldez/grignotin v0.10.1/go.mod h1:UlDbXFCARrXbWGNGP3S5vsysNXAPhnSuBufpTEbwOas= +github.com/ldez/tagliatelle v0.7.2 h1:KuOlL70/fu9paxuxbeqlicJnCspCRjH0x8FW+NfgYUk= +github.com/ldez/tagliatelle v0.7.2/go.mod h1:PtGgm163ZplJfZMZ2sf5nhUT170rSuPgBimoyYtdaSI= github.com/ldez/usetesting v0.5.0 h1:3/QtzZObBKLy1F4F8jLuKJiKBjjVFi1IavpoWbmqLwc= github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3iRrzvDQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= @@ -1531,8 +1542,8 @@ github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddB github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/manuelarte/embeddedstructfieldcheck v0.3.0 h1:VhGqK8gANDvFYDxQkjPbv7/gDJtsGU9k6qj/hC2hgso= -github.com/manuelarte/embeddedstructfieldcheck v0.3.0/go.mod h1:LSo/IQpPfx1dXMcX4ibZCYA7Yy6ayZHIaOGM70+1Wy8= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0 h1:3mAIyaGRtjK6EO9E73JlXLtiy7ha80b2ZVGyacxgfww= +github.com/manuelarte/embeddedstructfieldcheck v0.4.0/go.mod h1:z8dFSyXqp+fC6NLDSljRJeNQJJDWnY7RoWFzV3PC6UM= github.com/manuelarte/funcorder v0.5.0 h1:llMuHXXbg7tD0i/LNw8vGnkDTHFpTnWqKPI85Rknc+8= github.com/manuelarte/funcorder v0.5.0/go.mod h1:Yt3CiUQthSBMBxjShjdXMexmzpP8YGvGLjrxJNkO2hA= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= @@ -1563,16 +1574,16 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= +github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mergestat/timediff v0.0.3 h1:ucCNh4/ZrTPjFZ081PccNbhx9spymCJkFxSzgVuPU+Y= github.com/mergestat/timediff v0.0.3/go.mod h1:yvMUaRu2oetc+9IbPLYBJviz6sA7xz8OXMDfhBl7YSI= -github.com/mgechev/revive v1.10.0 h1:x2oJsd7yrDp0mC6IgZqSKBTjSUC9Zk5Ob2WfBwZic2I= -github.com/mgechev/revive v1.10.0/go.mod h1:1MRO9zUV7Yukhqh/nGRKSaw6xC5XDzPWPja5GMPWoSE= +github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= +github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -1591,8 +1602,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= @@ -1623,16 +1632,16 @@ github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7e github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= -github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ70NJ+c4= -github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= +github.com/nunnatsa/ginkgolinter v0.21.0 h1:IYwuX+ajy3G1MezlMLB1BENRtFj16+Evyi4uki1NOOQ= +github.com/nunnatsa/ginkgolinter v0.21.0/go.mod h1:QlzY9UP9zaqu58FjYxhp9bnjuwXwG1bfW5rid9ChNMw= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -1723,8 +1732,8 @@ github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74 github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rhysd/actionlint v1.7.7 h1:0KgkoNTrYY7vmOCs9BW2AHxLvvpoY9nEUzgBHiPUr0k= -github.com/rhysd/actionlint v1.7.7/go.mod h1:AE6I6vJEkNaIfWqC2GNE5spIJNhxf8NCtLEKU4NnUXg= +github.com/rhysd/actionlint v1.7.8 h1:3d+N9ourgAxVYG4z2IFxFIk/YiT6V+VnKASfXGwT60E= +github.com/rhysd/actionlint v1.7.8/go.mod h1:3kiS6egcbXG+vQsJIhFxTz+UKaF1JprsE0SKrpCZKvU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -1766,8 +1775,8 @@ github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PK github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= -github.com/securego/gosec/v2 v2.22.5 h1:ySws9uwOeE42DsG54v2moaJfh7r08Ev7SAYJuoMDfRA= -github.com/securego/gosec/v2 v2.22.5/go.mod h1:AWfgrFsVewk5LKobsPWlygCHt8K91boVPyL6GUZG5NY= +github.com/securego/gosec/v2 v2.22.8 h1:3NMpmfXO8wAVFZPNsd3EscOTa32Jyo6FLLlW53bexMI= +github.com/securego/gosec/v2 v2.22.8/go.mod h1:ZAw8K2ikuH9qDlfdV87JmNghnVfKB1XC7+TVzk6Utto= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= @@ -1807,8 +1816,8 @@ github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+W github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= -github.com/sonatard/noctx v0.3.4 h1:ZeiM4rEeFTFSie/G5/HD9lHiMpQg/L4fnilaNmFQ2/A= -github.com/sonatard/noctx v0.3.4/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= +github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= +github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= @@ -1824,13 +1833,14 @@ github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcD github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -1857,24 +1867,22 @@ github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8= -github.com/tdakkota/asciicheck v0.4.1/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/terraform-linters/tflint v0.58.0 h1:DIHtMHolcNNrnR2vFKAlrSWzppvXAWJ8S5kSb488kmg= -github.com/terraform-linters/tflint v0.58.0/go.mod h1:cdKZeERrzEA791ZvIjj0Q4f+KX0AhaYh/hCgm1vNcyE= +github.com/terraform-linters/tflint v0.58.1 h1:GUgfonXrRvM2qKCQ4TDqQTssdeMQm/RlnjyuQ7wDj8k= +github.com/terraform-linters/tflint v0.58.1/go.mod h1:W0Cizbz8cxKxjgQV/SsqTJxlGXN7JnAUZ6nvmMbhylI= github.com/terraform-linters/tflint-plugin-sdk v0.22.0 h1:holOVJW0hjf0wkjtnYyPWRooQNp8ETUcKE86rdYkH5U= github.com/terraform-linters/tflint-plugin-sdk v0.22.0/go.mod h1:Cag3YJjBpHdQzI/limZR+Cj7WYPLTIE61xsCdIXoeUI= github.com/terraform-linters/tflint-ruleset-terraform v0.12.0 h1:158C56w1lJ4DSezzz54ISbkrgmQKBEy8iHSBFwxsBVs= github.com/terraform-linters/tflint-ruleset-terraform v0.12.0/go.mod h1:P6r/WFW87mxzsTBxaYAWYNelweWDQzk5LJt/p3PxCn4= -github.com/tetafro/godot v1.5.1 h1:PZnjCol4+FqaEzvZg5+O8IY2P3hfY9JzRBNPv1pEDS4= -github.com/tetafro/godot v1.5.1/go.mod h1:cCdPtEndkmqqrhiCfkmxDodMQJ/f3L1BCNskCUZdTwk= +github.com/tetafro/godot v1.5.4 h1:u1ww+gqpRLiIA16yF2PV1CV1n/X3zhyezbNXC3E14Sg= +github.com/tetafro/godot v1.5.4/go.mod h1:eOkMrVQurDui411nBY2FA05EYH01r14LuWY/NrVDVcU= github.com/thanhpk/randstr v1.0.4 h1:IN78qu/bR+My+gHCvMEXhR/i5oriVHcTB/BJJIRTsNo= github.com/thanhpk/randstr v1.0.4/go.mod h1:M/H2P1eNLZzlDwAzpkkkUvoyNNMbzRGhESZuEQk3r0U= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= @@ -1902,16 +1910,16 @@ github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= +github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= -github.com/uudashr/iface v1.4.0 h1:ImZ+1oEJPXvjap7nK0md7gA9RRH7PMp4vliaLkJ2+cg= -github.com/uudashr/iface v1.4.0/go.mod h1:i/H4cfRMPe0izticV8Yz0g6/zcsh5xXlvthrdh1kqcY= +github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU= +github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= @@ -1945,15 +1953,15 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.7.12 h1:YwGP/rrea2/CnCtUHgjuolG/PnMxdQtPMO5PvaE2/nY= -github.com/yuin/goldmark v1.7.12/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= +github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= +github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= -github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= @@ -1966,14 +1974,14 @@ gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.13.1 h1:lw2sJyu7S1X8lc8zWUAdH42y+afdcCnHhWpnkWvd6vU= -go-simpler.org/musttag v0.13.1/go.mod h1:8r450ehpMLQgvpb6sg+hV5Ur47eH6olp/3yEanfG97k= -go-simpler.org/sloglint v0.11.0 h1:JlR1X4jkbeaffiyjLtymeqmGDKBDO1ikC6rjiuFAOco= -go-simpler.org/sloglint v0.11.0/go.mod h1:CFDO8R1i77dlciGfPEPvYke2ZMx4eyGiEIWkyeW2Pvw= +go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= +go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= +go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= +go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= go.augendre.info/arangolint v0.2.0 h1:2NP/XudpPmfBhQKX4rMk+zDYIj//qbt4hfZmSSTcpj8= go.augendre.info/arangolint v0.2.0/go.mod h1:Vx4KSJwu48tkE+8uxuf0cbBnAPgnt8O1KWiT7bljq7w= -go.augendre.info/fatcontext v0.8.0 h1:2dfk6CQbDGeu1YocF59Za5Pia7ULeAM6friJ3LP7lmk= -go.augendre.info/fatcontext v0.8.0/go.mod h1:oVJfMgwngMsHO+KB2MdgzcO+RvtNdiCEOlWvSFtax/s= +go.augendre.info/fatcontext v0.8.1 h1:/T4+cCjpL9g71gJpcFAgVo/K5VFpqlN+NPU7QXxD5+A= +go.augendre.info/fatcontext v0.8.1/go.mod h1:r3Qz4ZOzex66wfyyj5VZ1xUcl81vzvHQ6/GWzzlMEwA= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= @@ -1989,8 +1997,8 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= -go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= @@ -2023,6 +2031,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v4 v4.0.0-rc.2 h1:/FrI8D64VSr4HtGIlUtlFMGsm7H7pWTbj6vOLVZcA6s= +go.yaml.in/yaml/v4 v4.0.0-rc.2/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -2048,8 +2058,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2069,8 +2079,8 @@ golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWB golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= -golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621 h1:Yl4H5w2RV7L/dvSHp2GerziT5K2CORgFINPaMFxWGWw= +golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2118,8 +2128,8 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2189,8 +2199,8 @@ golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2244,8 +2254,8 @@ golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2357,8 +2367,8 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2374,8 +2384,8 @@ golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2397,8 +2407,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2443,7 +2453,6 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2454,10 +2463,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2482,8 +2489,12 @@ golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2557,8 +2568,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.237.0 h1:MP7XVsGZesOsx3Q8WVa4sUdbrsTvDSOERd3Vh4xj/wc= -google.golang.org/api v0.237.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= +google.golang.org/api v0.246.0 h1:H0ODDs5PnMZVZAEtdLMn2Ul2eQi7QNjqM2DIFp8TlTM= +google.golang.org/api v0.246.0/go.mod h1:dMVhVcylamkirHdzEBAIQWUCgqY885ivNeZYd7VAVr8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2699,12 +2710,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= -google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 h1:MAKi5q709QWfnkkpNQ0M12hYJ1+e8qYVDyowc4U1XZM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2748,8 +2759,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2843,8 +2854,8 @@ modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -mvdan.cc/gofumpt v0.8.0 h1:nZUCeC2ViFaerTcYKstMmfysj6uhQrA2vJe+2vwGU6k= -mvdan.cc/gofumpt v0.8.0/go.mod h1:vEYnSzyGPmjvFkqJWtXkh79UwPWP9/HMxQdGEXZHjpg= +mvdan.cc/gofumpt v0.9.1 h1:p5YT2NfFWsYyTieYgwcQ8aKV3xRvFH4uuN/zB2gBbMQ= +mvdan.cc/gofumpt v0.9.1/go.mod h1:3xYtNemnKiXaTh6R4VtlqDATFwBbdXI8lJvH/4qk7mw= mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 h1:WjUu4yQoT5BHT1w8Zu56SP8367OuBV5jvo+4Ulppyf8= mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4/go.mod h1:rthT7OuvRbaGcd5ginj6dA2oLE7YNlta9qhBNNdCaLE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/.ci/tools/main.go b/.ci/tools/main.go index f4e7bc1660a4..9028fa1a3d36 100644 --- a/.ci/tools/main.go +++ b/.ci/tools/main.go @@ -13,5 +13,6 @@ import ( _ "github.com/pavius/impi/cmd/impi" _ "github.com/rhysd/actionlint/cmd/actionlint" _ "github.com/terraform-linters/tflint" + _ "golang.org/x/tools/cmd/stringer" _ "mvdan.cc/gofumpt" ) diff --git a/.github/ISSUE_TEMPLATE/00_bug_report.yml b/.github/ISSUE_TEMPLATE/00_bug_report.yml index 01705d1426ff..882d6ba8be4e 100644 --- a/.github/ISSUE_TEMPLATE/00_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/00_bug_report.yml @@ -1,164 +1,167 @@ -name: Report a Bug -description: Choose this option to let us know about an unexpected error, a crash, or otherwise incorrect behavior. -labels: - - bug +name: "Report a Bug" +description: "Choose this option to let us know about an unexpected error, a crash, or otherwise incorrect behavior." +title: "[Bug]: " +labels: ["bug"] body: - type: markdown attributes: value: | - ## Thank you for raising a bug report! + # Thank you for raising a bug report! - Before submitting a bug report, we ask that you first [search existing issues and pull requests](https://github.com/hashicorp/terraform-provider-aws/issues?q=label%3Abug) to see if someone else may have experienced the same issue or may have already submitted a fix for it. This helps to keep all relevant information in one place, including any potential workarounds. - - ### A Note on Terraform Core Issues + Before submitting a bug report, we ask that you first search existing issues to see if someone else may have experienced the same issue. This helps to keep all relevant information in one place, including any potential workarounds. We also ask that you consider whether your issue may be related to Terraform Core. If you are running into one of the following scenarios, we recommend [opening an issue](https://github.com/hashicorp/terraform/issues/new/choose) in the Terraform Core repository instead: - * [Configuration Language](https://developer.hashicorp.com/terraform/language) or resource ordering issues - * [State](https://developer.hashicorp.com/terraform/language/state) and [State Backend](https://developer.hashicorp.com/terraform/language/backend) issues - * [Provisioner](https://developer.hashicorp.com/terraform/language/resources/provisioners/syntax) issues + * [Configuration Language](https://www.terraform.io/docs/configuration/index.html) or resource ordering issues + * [State](https://www.terraform.io/docs/state/index.html) and [State Backend](https://www.terraform.io/docs/backends/index.html) issues + * [Provisioner](https://www.terraform.io/docs/provisioners/index.html) issues * [Registry](https://registry.terraform.io/) issues - * Issues that span resources across multiple providers + * Spans resources across multiple providers - - type: textarea + - type: markdown + attributes: + value: | + # Terraform and AWS Provider Versions + + Please run `terraform -v` to show the Terraform Core and provider version(s). If you are not running the latest version of either the provider or Terraform Core, please consider upgrading, as your issue may have already been fixed. + + [Terraform documentation on provider versioning](https://www.terraform.io/docs/configuration/providers.html#provider-versions) + + - type: input id: tf_version attributes: - label: Terraform and AWS Provider Version - description: | - Please run `terraform --version` to collect the Terraform and AWS Provider versions and paste the result below. If multiple versions have been tested, feel free to add that additional information here as well. - placeholder: | - ...output of `terraform --version`... - render: console + label: Terraform Core Version + description: The semantic version of Terraform Core used when experiencing the bug. If multiple versions have been tested, a comma separated list. + placeholder: "X.Y.Z" + validations: + required: true + + - type: input + id: aws_version + attributes: + label: AWS Provider Version + description: The semantic version of the AWS Provider used when experiencing the bug. If multiple versions have been tested, a comma separated list. + placeholder: "X.Y.Z" validations: required: true + - type: markdown + attributes: + value: | + # Description + + In the next few fields, please provide any useful information you can around what resources are affected, what you expected to happen, and what actually happened. There is also a field to optionally provide a small snippet of any relevant error or panic output. + + Note that there is a section later in the form to provide more complete Terraform configuration and logging information. These fields should instead be thought of as the place to briefly describe the bug. + - type: textarea - id: affected_resource + id: affected attributes: - label: Affected Resource(s) or Data Source(s) - description: | - If applicable, please list the affected resource(s) and/or data source(s). - placeholder: | - * `aws_example_resource` - * `aws_example_data_source` + label: Affected Resource(s) + description: Please list the affected resource(s) and/or data source(s). + placeholder: "* aws_xxx" validations: required: false - type: textarea - id: expected_behavior + id: expected attributes: label: Expected Behavior - description: | - Use this section to describe what behavior should have happened that isn't currently. Note that there is a section later in the template for providing a sample configuration, so a short description is all that's necessary here. + description: What should have happened? validations: required: true - type: textarea - id: actual_behavior + id: actual attributes: label: Actual Behavior - description: | - Use this section to describe how the the provider is currently behaving and how it differs from the behavior outlined in the Expected Behavior section. + description: What actually happened? validations: required: true - type: textarea id: log_snippet attributes: - label: Relevant Error/Panic Output + label: Relevant Error/Panic Output Snippet description: | - If applicable, provide a snippet of output logging that contains the error or panic. Note that there is a section later in the template for providing more complete debug output, so a small snippet is all that's necessary here. - - For convenience, we pre-populate this section with a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks) to help with formatting in the resulting issue. Placing the log output between the sets of backticks (\```) will result in a well-formatted issue. - value: | - ```console - - ``` + If applicable, provide a relevant snippet from the error or panic output. This will be rendered as `shell`, so there is no need to add a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). + render: shell validations: required: false - type: markdown attributes: value: | - ## Providing Configurations and Logging + # Configuration and Logging - When providing a reproduction configuration and/or debug logging, please paste, upload, or link to a file or [public Gist](https://docs.github.com/en/get-started/writing-on-github/editing-and-sharing-content-with-gists/creating-gists) containing the relevant information. Files under 25MB may be [attached directly](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/attaching-files) in the relevant field. Files larger than this should be uploaded to a file hosting service and a link shared. For your security you can also encrypt the files using our [GPG public key](https://keybase.io/hashicorp). + Here, we ask that you provide the Terraform configuration and, when possible, the debug logs. For configurations or logs of more than just a few lines, it's preferred to either share a Gist link or zip the files and upload them. - Configurations should be applyable with minimal modifications, and should not rely on external modules. This helps maintainers and contributors efficiently reproduce issues and implement acceptance tests to prevent future regressions. + Terraform configurations or debug logs under 25MB may be [attached directly in the field](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/attaching-files). Files larger than this should be uploaded to a file hosting service and a link shared. - > [!WARNING] - > Bug reports without a functional and standalone sample configuration may be closed without further investigation. + For your security you can also encrypt the files using our [GPG public key](https://keybase.io/hashicorp). + + **Note:** These form fields do not automatically render, so you will need to use [code fence(s)](https://help.github.com/articles/basic-writing-and-formatting-syntax/#quoting-code) to achieve proper formatting. - type: textarea id: tf_config attributes: - label: Sample Terraform Configuration + label: Terraform Configuration Files description: | - Please provide a sample Terraform configuration that can be used to reproduce the issue. - - For convenience, we pre-populate this section such that configurations pasted between the backticks (\```) will be contained within a disclosure triangle and have syntax highlighting as appropriate for HCL in the resulting issue. Where appropriate, feel free to delete this. - value: | -
- Click to expand configuration + Please paste, upload, or link to a file or Gist containing all Terraform configurations required to reproduce the bug. See note above for additional information on file size restrictions and encrypting. - ```hcl - - ``` -
+ Bug reports without a functional reproduction may be closed without investigation. validations: required: true - type: textarea - id: reproduction_steps + id: repro attributes: label: Steps to Reproduce - description: | - Please list the steps necessary to reproduce the issue. - placeholder: | - 1. Apply the configuration - 2. Make a change - 3. etc. + description: Please list the steps required to reproduce the issue. validations: required: true - type: textarea - id: extended_logs + id: debug_logs attributes: - label: Debug Logging + label: Debug Output description: | - If possible, please provide log output captured while reproducing the issue with [debug logging enabled](https://developer.hashicorp.com/terraform/internals/debugging). If a panic is produced, that information should be included. + If possible, please paste, upload, or link to a file or Gist containing debug logs. See note above for additional information on file size restrictions and encrypting. - For convenience, we pre-populate this section such that logs pasted between the backticks (\```) will be contained within a disclosure triangle and have syntax highlighting associated with console output in the resulting issue. - value: | -
- Click to expand log output - - ```console + To obtain the debug output, see the [Terraform documentation on debugging](https://www.terraform.io/docs/internals/debugging.html). + validations: + required: false - ``` -
+ - type: textarea + id: panic_logs + attributes: + label: Panic Output + description: If Terraform produced a panic, please paste, upload, or link to a file or Gist containing the output of the `crash.log`. validations: required: false + - type: markdown + attributes: + value: | + # Additional Information + + For the final two fields, optionally provide any additional context that might help the community or maintainers to investigate the bug. This might be additional references that you found when troubleshooting, similar bug reports, or specifics about your environment that might be unique. + - type: textarea - id: genai_llm attributes: - label: GenAI / LLM Assisted Development - description: | - If you used a generative AI / LLM tool to assist in the development of your config, please let us know which tool you used here. - value: n/a + label: Important Factoids + description: Are there anything atypical about your configuration or environment that we should know about? validations: required: false - type: textarea + id: references attributes: - label: Important Facts and References + label: References description: | - If there is any additional information that might be relevant, provide those details here. This might include information such as, but not limited to: + Where possible, please supply links to vendor documentation, other GitHub issues (open or closed) or pull requests that give additional context. - * Any atypical situations that might apply (airgapped environments, specific [AWS partitions](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/partitions.html), etc.) - * [References to other GitHub issues](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax#referencing-issues-and-pull-requests) - * Links to external references such as AWS or other vendor documentation, third party articles, etc. + [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) validations: required: false @@ -167,13 +170,9 @@ body: attributes: label: Would you like to implement a fix? description: | - Indicate to the maintainers and community as to whether you plan to implement a fix for this (you can update this later if you change your mind). This helps prevent duplication of effort, as many of our contributors look for recently filed issues as a source for their next contribution. - - If this would be your first contribution, refer to the [contributor guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. + If you plan to implement a fix for this, check this box to let the maintainers and community know (you can update this later if you change your mind). If this would be your first contribution, refer to the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. options: - "No" - "Yes" - multiple: false - default: 0 validations: required: false diff --git a/.github/ISSUE_TEMPLATE/01_documentation.yml b/.github/ISSUE_TEMPLATE/01_documentation.yml index e485640818e1..c07e67fe0232 100644 --- a/.github/ISSUE_TEMPLATE/01_documentation.yml +++ b/.github/ISSUE_TEMPLATE/01_documentation.yml @@ -1,23 +1,22 @@ -name: Report a Documentation Error -description: Choose this option if you've found an error in the provider documentation or contributor guides. -labels: - - documentation +name: "Report a Documentation Error" +description: "Choose this option if you've found an error in the provider documentation or contribution guides." +title: "[Docs]: " +labels: ["documentation"] body: - type: markdown attributes: value: | - ## Thank you for raising a documentation issue! + # Thank you for raising a documentation issue! - This form is meant to alert the maintainers to issues with the provider documentation found on the [Terraform Registry](https://registry.terraform.io/providers/hashicorp/aws/latest) (such as resource and data source documentation, guides, and examples), or the [contributors guide](https://hashicorp.github.io/terraform-provider-aws/). + This form is meant to alert the maintainers to an issue with the provider documentation found on the [Terraform Registry](https://registry.terraform.io/providers/hashicorp/aws/latest) (such as resource and data source documentation, guides and examples), as well as the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/). - We ask that you first [search existing issues and pull requests](https://github.com/hashicorp/terraform-provider-aws/issues?q=label%3Adocumentation) to see if someone else may have already noticed the same issue or has already submitted a fix for it. + Documentation edits are generally a bit less involved, so are often a great entrypoint if you've ever been interested in [contributing](https://hashicorp.github.io/terraform-provider-aws/documentation-changes/)! - - type: textarea + - type: input id: registry_link attributes: - label: Documentation Link(s) - description: | - Please link to the affected page(s) on the Terraform Registry or contributors guide. + label: Documentation Link + description: Please provide a link to the affected page on the Terraform Registry or contribution guide. validations: required: true @@ -25,8 +24,7 @@ body: id: description attributes: label: Description - description: | - Please leave a brief description of the documentation issue(s), including what the documentation currently says and, if possible, what it should say. + description: Please leave a brief description of the documentation issue. validations: required: true @@ -35,7 +33,7 @@ body: attributes: label: References description: | - Where possible, please supply links to AWS documentation and/or other GitHub issues or pull requests that give additional context. + Where possible, please supply links to vendor documentation, other GitHub issues (open or closed) or pull requests that give additional context. [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) validations: @@ -46,13 +44,9 @@ body: attributes: label: Would you like to implement a fix? description: | - Indicate to the maintainers and community as to whether you plan to implement a fix for this (you can update this later if you change your mind). This helps prevent duplication of effort, as many of our contributors look for recently filed issues as a source for their next contribution. - - Documentation edits are generally a bit less involved, so are often a great entrypoint if you've ever been interested in contributing. If this would be your first contribution, refer to the [contributor guide](https://hashicorp.github.io/terraform-provider-aws/documentation-changes/) for tips on getting started. + If you plan to implement a fix for this, check this box to let the maintainers and community know (you can update this later if you change your mind). If this would be your first contribution, refer to the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. options: - "No" - "Yes" - multiple: false - default: 0 validations: required: false diff --git a/.github/ISSUE_TEMPLATE/02_enhancement.yml b/.github/ISSUE_TEMPLATE/02_enhancement.yml index d62b0d5fd087..b3c54c397866 100644 --- a/.github/ISSUE_TEMPLATE/02_enhancement.yml +++ b/.github/ISSUE_TEMPLATE/02_enhancement.yml @@ -1,38 +1,30 @@ -name: Request an Enhancement -description: Choose this option when you would like to request an enhancement to an existing resource or data source (such as the addition of new arguments), or to the provider itself. -labels: - - enhancement +name: "Request an Enhancement" +description: "Choose this option when you would like to request an enhancement to an existing resource, data source, or the provider itself." +title: "[Enhancement]: " +labels: ["enhancement"] body: - type: markdown attributes: value: | - ## Thank you for opening an enhancement request! + # Thank you for opening an enhancement request! This form is intended as a way to request additional functionality for existing resources, data sources, or the provider itself. This may include requests such as adding additional arguments or attributes enabled by upstream API changes, additional validation for arguments, etc. - Before submitting an enhancement request, we ask that you first [search existing issues and pull requests](https://github.com/hashicorp/terraform-provider-aws/issues?q=label%3Aenhancement) to see if someone else has made a similar request or has already worked on adding the feature. This helps to keep all relevant discussions in one place. - - > [!NOTE] - > If the absense of the requested feature is causing unexpected behavior in the provider, the [Report a Bug](https://github.com/hashicorp/terraform-provider-aws/issues/new?template=00_bug_report.yml) form should be used. For entirely new resources, data sources, or services, please use the [Request Net New Functionality](https://github.com/hashicorp/terraform-provider-aws/issues/new?template=03_new_functionality.yml) form. + If the absense of the requested feature is causing unexpected behavior in the provider, the "Report a Bug" form should be used. For entirely new resources, data sources, or services, please use the Request a "New Resource, Data Source, or AWS Service" form. - type: textarea id: description attributes: label: Description - description: | - Please provide a brief description of the requested change. + description: Please leave a brief description of the requested change. validations: required: true - type: textarea - id: affected_resource + id: affected attributes: - label: Affected Resource(s) or Data Source(s) - description: | - Where applicable, please list the resource(s) and/or data source(s) that you're requesting to be enhanced. - placeholder: | - * `aws_example_resource` - * `aws_example_data_source` + label: Affected Resource(s) and/or Data Source(s) + placeholder: "* aws_xxx_yyy" validations: required: false @@ -43,11 +35,8 @@ body: description: | If this request was implemented, what might the Terraform configuration look like? A best guess is helpful, even if you're unsure of exactly what the end result will look like. This helps maintainers and the community better understand how you (someone who is in need of this feature) envisions it. - For convenience, we pre-populate this section with a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks) to help with formatting in the resulting issue. Configurations pasted between the backticks (\```) will have the appropriate syntax highlighting for HCL. Where appropriate, feel free to delete this. - value: | - ```hcl - - ``` + **Note:** This field will render in HCL, so there is no need to add a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). + render: terraform validations: required: false @@ -56,7 +45,7 @@ body: attributes: label: References description: | - Where possible, please supply links to the [AWS Go SDK v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service) documentation demonstrating that the SDK supports the requested feature. Other links, such as those to the AWS API or CLI documentation, or other GitHub issues or pull requests that give additional context are also helpful. + Where possible, please supply links to vendor documentation, other GitHub issues (open or closed) or pull requests that give additional context. [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) validations: @@ -65,15 +54,11 @@ body: - type: dropdown id: will_contribute attributes: - label: Would you like to implement the enhancement? + label: Would you like to implement a fix? description: | - Indicate to the maintainers and community as to whether you plan to implement the enhancement yourself (you can update this later if you change your mind). This helps prevent duplication of effort, as many of our contributors look for recently filed issues as a source for their next contribution. - - If this would be your first contribution, refer to the [contributor guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. + If you plan to implement a fix for this, check this box to let the maintainers and community know (you can update this later if you change your mind). If this would be your first contribution, refer to the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. options: - "No" - "Yes" - multiple: false - default: 0 validations: required: false diff --git a/.github/ISSUE_TEMPLATE/03_new_functionality.yml b/.github/ISSUE_TEMPLATE/03_new_functionality.yml deleted file mode 100644 index 7578a071206f..000000000000 --- a/.github/ISSUE_TEMPLATE/03_new_functionality.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: Request Net New Functionality -description: Choose this option to request an entirely new resource, data source, ephemeral resource/data source, function, or AWS service be added to the provider. -body: - - type: markdown - attributes: - value: | - ## Thank you for opening a request! - - This form is intended to be used when requesting an entirely new resource, data source, ephemeral resource/data source, function, or service be added to the provider. If you're looking for a change to be made to an existing resource or data source, please use the [Request an Enhancement](https://github.com/hashicorp/terraform-provider-aws/issues/new?template=02_enhancement.yml) form instead. If something isn't working as expected, the [Report a Bug](https://github.com/hashicorp/terraform-provider-aws/issues/new?template=00_bug_report.yml) form should be used. - - Before submitting a request, we ask that you first [search existing issues and pull requests](https://github.com/hashicorp/terraform-provider-aws/issues?q=label%3Anew-resource%2Cnew-data-source%2Cnew-function%2Cnew-ephemeral-resource%2Cnew-service%20) to see if someone else has made a similar request or has already worked on adding the feature. This helps to keep all relevant discussions in one place. - - - type: textarea - id: resources - attributes: - label: What new functionality are you requesting? - description: | - Please provide the name of the new functionality that you're requesting. For the sake of consistency, we ask that you use the following conventions: - - * Resource, Data Source (including ephemeral): `aws__`, e.g. `aws_lambda_function` - * Function: A clear, underscore-separated descriptor, e.g. `arn_parse` - * Service: The name of the service itself will suffice - placeholder: | - * `aws_xxx_yyy` - validations: - required: true - - - type: textarea - id: description - attributes: - label: Description - description: | - Please provide a brief description of what the new functionality will accomplish. For example, what upstream resource(s) could be managed by introducing these changes? - validations: - required: true - - - type: textarea - id: tf_config - attributes: - label: Potential Terraform Configuration - description: | - If this request was implemented, what might the Terraform configuration look like? A best guess is helpful, even if you're unsure of exactly what the end result will look like. This helps maintainers and the community better understand how you (someone who is in need of this feature) envisions it. - - For convenience, we pre-populate this section with a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks) to help with formatting in the resulting issue. Configurations pasted between the backticks (\```) will have the appropriate syntax highlighting for HCL. Where appropriate, feel free to delete this. - value: | - ```hcl - - ``` - validations: - required: false - - - type: textarea - id: references - attributes: - label: References - description: | - Where possible, please supply links to the [AWS Go SDK v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service) documentation demonstrating that the SDK supports the requested feature. Other links, such as those to the AWS API or CLI documentation, or other GitHub issues or pull requests that give additional context are also helpful. - - [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) - validations: - required: false - - - type: dropdown - id: will_contribute - attributes: - label: Would you like to implement the enhancement? - description: | - Indicate to the maintainers and community as to whether you plan to implement the new functionality yourself (you can update this later if you change your mind). This helps prevent duplication of effort, as many of our contributors look for recently filed issues as a source for their next contribution. - - If this would be your first contribution, refer to the [contributor guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. - options: - - "No" - - "Yes" - multiple: false - default: 0 - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/03_new_resource.yml b/.github/ISSUE_TEMPLATE/03_new_resource.yml new file mode 100644 index 000000000000..44dfd3244e25 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/03_new_resource.yml @@ -0,0 +1,73 @@ +name: "Request a New Resource, Data Source, or AWS Service" +description: "Choose this option to request an entirely new resource, data source, or AWS service be added to the provider." +title: "[New]: " +body: + - type: markdown + attributes: + value: | + # Thank you for opening a request! + + This form is intended to be used when requesting an entirely new resource, data source, or service be added to the provider. If you're looking for a change to be made to an existing resource or data source, consider submitting either the "Request an Enhancement" or Report a Bug" forms instead. + + When possible, it's helpful to check the [AWS Go SDK](https://pkg.go.dev/github.com/aws/aws-sdk-go/service) or [AWS Go SDK v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2#section-directories) to determine whether functionality exists to enable the requested feature. It is **not** required that you do this. Any references found when searching can be added to the "References" field below to give maintainers or the community a head start. + + Please update the title to match what you're requesting, e.g.: + + - `[New Resource]:` - for new resource requests + - `[New Data Source]:` - for new data source requests + - `[New Service]:` - for new AWS services + + - type: textarea + id: description + attributes: + label: Description + description: Please leave a brief description of what you're requesting. + validations: + required: true + + - type: textarea + id: resources + attributes: + label: Requested Resource(s) and/or Data Source(s) + description: | + Please list any new resource(s) and/or data source(s). The naming format is `aws__`, e.g., `aws_lambda_function`. + + A best guess is helpful, even if you're unsure of exactly what the end result will look like. This helps maintainers and the community better understand how you (someone who is in need of this request) envisions it. + placeholder: "* aws_xxx_yyy" + validations: + required: true + + - type: textarea + id: tf_config + attributes: + label: Potential Terraform Configuration + description: | + If this request was implemented, what might the Terraform configuration look like? Similar to above, a best guess is helpful, even if you're unsure of exactly what the end result will look like. + + **Note:** This field will render in HCL, so there is no need to add a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). + render: terraform + validations: + required: false + + - type: textarea + id: references + attributes: + label: References + description: | + Where possible, please supply links to vendor documentation, other GitHub issues (open or closed) or pull requests that give additional context. + + [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) + validations: + required: false + + - type: dropdown + id: will_contribute + attributes: + label: Would you like to implement a fix? + description: | + If you plan to implement a fix for this, check this box to let the maintainers and community know (you can update this later if you change your mind). If this would be your first contribution, refer to the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. + options: + - "No" + - "Yes" + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/04_repository.yml b/.github/ISSUE_TEMPLATE/04_repository.yml index baf0c2c50716..5952b267804b 100644 --- a/.github/ISSUE_TEMPLATE/04_repository.yml +++ b/.github/ISSUE_TEMPLATE/04_repository.yml @@ -1,23 +1,20 @@ -name: Repository/Meta -description: Choose this option when reporting an issue that has to do with the repository itself, e.g. GitHub Actions workflows, labels, local development tools, etc. -labels: - - repository +name: "Repository/Meta" +description: Choose this option when the issue has to do with the repository itself, including GitHub Actions, labels, workflows, etc. +title: "[Repo]: " +labels: ["repository"] body: - type: markdown attributes: value: | - ## Thank you for opening an issue! + # Thank you for opening an issue! - This form is for issues pertaining to the repository itself. This might include issues or changes related to GitHub Actions, labels, local development tools, procedures for maintaining the provider, etc. For issues with the contribution documentation, please use the [Report a Documentation Error](https://github.com/hashicorp/terraform-provider-aws/issues/new?template=01_documentation.yml) form instead. - - Before submission, we ask that you first [search existing issues and pull requests](https://github.com/hashicorp/terraform-provider-aws/issues?q=label%3Arepository) to see if someone else has made a similar report or has alreaady worked on a relevant change. This helps to keep all relevant discussions in one place. + This form is for issues pertaining to the repository itself. This might include changes to GitHub Actions, labels, procedures, etc. For issues with the contribution documentation, please use the "Report a Documentation Error" form. - type: textarea id: description attributes: label: Description - description: | - Please provide a brief description of the issue or proposed change. + description: Please leave a brief description of the issue or proposed change. validations: required: true @@ -26,7 +23,7 @@ body: attributes: label: References description: | - Where possible, please supply links to documentation and/or other GitHub issues or pull requests that give additional context. + Where possible, please supply links to vendor documentation, other GitHub issues (open or closed) or pull requests that give additional context. [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) validations: @@ -35,15 +32,11 @@ body: - type: dropdown id: will_contribute attributes: - label: Would you like to implement the change? + label: Would you like to implement a fix? description: | - Indicate to the maintainers and community as to whether you plan to implement the change or fix for this (you can update this later if you change your mind). This helps prevent duplication of effort, as many of our contributors look for recently filed issues as a source for their next contribution. - - If this would be your first contribution, refer to the [contributor guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. + If you plan to implement a fix for this, check this box to let the maintainers and community know (you can update this later if you change your mind). If this would be your first contribution, refer to the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. options: - "No" - "Yes" - multiple: false - default: 0 validations: required: false diff --git a/.github/ISSUE_TEMPLATE/05_beta_feedback.yml b/.github/ISSUE_TEMPLATE/05_beta_feedback.yml deleted file mode 100644 index fb136969e155..000000000000 --- a/.github/ISSUE_TEMPLATE/05_beta_feedback.yml +++ /dev/null @@ -1,171 +0,0 @@ -name: Beta Release Feedback -description: Choose this option to provide feedback or report issues related to beta versions of the provider -labels: - - beta-feedback -body: - - type: markdown - attributes: - value: | - ## Thank you for providing feedback on our beta release! - - Before submission, we ask that you first [search existing issues](https://github.com/hashicorp/terraform-provider-aws/issues?q=is%3Aissue%20state%3Aopen%20label%3Abeta-feedback) to see if someone else may have experienced the same issue or provided similar feedback. This helps to keep all relevant information in one place, including any potential workarounds. - - ### A Note on Terraform Core Issues - - We ask that you consider whether an issues may be related to Terraform Core. If you're experiencing issues similar to the following examples, we recommend [opening an issue](https://github.com/hashicorp/terraform/issues/new/choose) in the Terraform Core repository instead: - - * [Configuration Language](https://developer.hashicorp.com/terraform/language) or resource ordering issues - * [State](https://developer.hashicorp.com/terraform/language/state) and [State Backend](https://developer.hashicorp.com/terraform/language/backend) issues - * [Provisioner](https://developer.hashicorp.com/terraform/language/resources/provisioners/syntax) issues - * [Registry](https://registry.terraform.io/) issues - * Issues that span resources across multiple providers - - - type: textarea - id: tf_version - attributes: - label: Terraform and AWS Provider Version - description: | - Please run `terraform --version` to collect the Terraform and AWS Provider versions and paste the result below. If multiple versions have been tested, feel free to add that additional information here as well. - placeholder: | - ...output of `terraform --version`... - render: console - validations: - required: true - - - type: textarea - id: affected_resource - attributes: - label: Affected Resource(s) or Data Source(s) - description: | - If applicable, please list the affected resource(s) and/or data source(s). - placeholder: | - * `aws_example_resource` - * `aws_example_data_source` - validations: - required: false - - - type: textarea - id: expected_behavior - attributes: - label: Expected Behavior - description: | - When reporting an issue with a beta release, use this section to describe what behavior should have happened that isn't currently. Note that there is a section later in the template for providing a sample configuration, so a short description is all that's necessary here. - - When providing more general feedback, use this section to describe that feedback. - validations: - required: true - - - type: textarea - id: actual_behavior - attributes: - label: Actual Behavior - description: | - If the current behavior differs from the behavior outlined in the Expected Behavior section, use this section to provide those details. - - When providing more general feedback, where this section may not be relevent, feel free to enter "n/a" to satisfy this being a required field. - validations: - required: true - - - type: textarea - id: log_snippet - attributes: - label: Relevant Error/Panic Output - description: | - If applicable, provide a snippet of output logging that contains the error or panic. Note that there is a section later in the template for providing more complete debug output, so a small snippet is all that's necessary here. - - For convenience, we pre-populate this section with a [code fence](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks) to help with formatting in the resulting issue. Placing the log output between the sets of backticks (\```) will result in a well-formatted issue. - value: | - ```console - - ``` - validations: - required: false - - - type: markdown - attributes: - value: | - ## Providing Configurations and Logging - - When providing a reproduction configuration and/or debug logging, please paste, upload, or link to a file or [public Gist](https://docs.github.com/en/get-started/writing-on-github/editing-and-sharing-content-with-gists/creating-gists) containing the relevant information. Files under 25MB may be [attached directly](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/attaching-files) in the relevant field. Files larger than this should be uploaded to a file hosting service and a link shared. For your security you can also encrypt the files using our [GPG public key](https://keybase.io/hashicorp). - - Configurations should be applyable with minimal modifications, and should not rely on external modules. This helps maintainers and contributors efficiently reproduce issues and implement acceptance tests to prevent future regressions. - - > [!WARNING] - > Bug reports without a functional and standalone sample configuration may be closed without further investigation. - - - type: textarea - id: tf_config - attributes: - label: Sample Terraform Configuration - description: | - Please provide a sample Terraform configuration that can be used to reproduce the issue. - - For convenience, we pre-populate this section such that configurations pasted between the backticks (\```) will be contained within a disclosure triangle in the resulting issue. Where appropriate, feel free to delete this. - - When providing more general feedback, where this section may not be relevent, feel free to clear out the pre-populated text and enter "n/a" to satisfy this being a required field. - value: | -
- Click to expand configuration - - ```hcl - - ``` -
- validations: - required: true - - - type: textarea - id: reproduction_steps - attributes: - label: Steps to Reproduce - description: | - If applicable, please list the steps to reproduce the issue. - placeholder: | - 1. Apply the configuration - 2. Make a change - 3. etc. - validations: - required: false - - - type: textarea - id: extended_logs - attributes: - label: Debug Logging - description: | - Where possible and applicable, please provide log output captured while reproducing the issue with [debug logging enabled](https://developer.hashicorp.com/terraform/internals/debugging). If a panic is produced, that information should be included. - - For convenience, we pre-populate this section such that logs pasted between the backticks (\```) will be contained within a disclosure triangle in the resulting issue. - - When providing more general feedback, where this section may not be relevent, feel free to clear out the pre-populated text and enter "n/a" to satisfy this being a required field. - value: | -
- Click to expand log output - - ```console - - ``` -
- validations: - required: false - - - type: textarea - id: genai_llm - attributes: - label: GenAI / LLM Assisted Development - description: | - If you used a generative AI / LLM tool to assist in the development of your config, please let us know which tool you used here. - value: n/a - validations: - required: false - - - type: textarea - attributes: - label: Important Facts and References - description: | - If there is any additional information that might be relevant, provide those details here. This might include information such as, but not limited to: - - * Any atypical situations that might apply (airgapped environments, specific [AWS partitions](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/partitions.html), etc.) - * [References to other GitHub issues](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax#referencing-issues-and-pull-requests) - * Links to external references such as AWS or other vendor documentation, third party articles, etc. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/05_other.yml b/.github/ISSUE_TEMPLATE/05_other.yml new file mode 100644 index 000000000000..8eafc99eb11e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/05_other.yml @@ -0,0 +1,48 @@ +name: "Other" +description: "Choose this option if your issue does not fit the description of the others." +body: + - type: markdown + attributes: + value: | + # Thank you for raising an issue! + + This form is meant as a catch-all for issues that do not fit into one of the other existing forms: + + * Report a Bug + * Report a Documentation Error + * Request an Enhancement + * Request a New Resource, Data Source, or AWS Service + * Repository/Meta + + By nature this form is less rigid, so providing a bit of additional information, context, or reference material is very much appreciated. + + - type: textarea + id: description + attributes: + label: Description + description: Please leave a detailed description of the issue. + validations: + required: true + + - type: textarea + id: references + attributes: + label: References + description: | + Where possible, please supply links to vendor documentation, other GitHub issues (open or closed) or pull requests that give additional context. + + [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) + validations: + required: false + + - type: dropdown + id: will_contribute + attributes: + label: Would you like to implement a fix? + description: | + If you plan to implement a fix for this, check this box to let the maintainers and community know (you can update this later if you change your mind). If this would be your first contribution, refer to the [contribution guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. + options: + - "No" + - "Yes" + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/06_other.yml b/.github/ISSUE_TEMPLATE/06_other.yml deleted file mode 100644 index ff014f211297..000000000000 --- a/.github/ISSUE_TEMPLATE/06_other.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Other -description: Choose this option if your issue does not fit any of the other forms. -body: - - type: markdown - attributes: - value: | - ## Thank you for raising an issue! - - This form is meant as a catch-all for issues that do not fit into one of the other existing forms. By nature this form is much more freeform, so providing a bit of additional information, context, or reference material is very much appreciated. - - Before submission, we ask that you first [search existing issues and pull requests](https://github.com/hashicorp/terraform-provider-aws/issues?q=is%3Aissue%20is%3Apr%20) to see if someone else may have already noticed whatever it is you're reporting, or has already worked on a relevant change. - - - type: textarea - id: description - attributes: - label: Description - description: | - Please provide a brief description of what you're looking to report to the maintainers. - validations: - required: true - - - type: textarea - id: references - attributes: - label: Important Facts and References - description: | - Where possible, please supply links to documentation and/or other GitHub issues or pull requests that give additional context. Any other helpful or relevant information may also be provided in this field. - - [Information about referencing Github Issues](https://help.github.com/articles/basic-writing-and-formatting-syntax/#referencing-issues-and-pull-requests) - validations: - required: false - - - type: dropdown - id: will_contribute - attributes: - label: Would you like to implement a relevant change? - description: | - Indicate to the maintainers and community as to whether you plan to implement a change related to this (you can update this later if you change your mind). This helps prevent duplication of effort, as many of our contributors look for recently filed issues as a source for their next contribution. - - If this would be your first contribution, refer to the [contributor guide](https://hashicorp.github.io/terraform-provider-aws/) for tips on getting started. - options: - - "No" - - "Yes" - multiple: false - default: 0 - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index eea4817d0ec4..5487b0d90d32 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,17 +1,6 @@ - - - -## Rollback Plan - -If a change needs to be reverted, we will publish an updated version of the library. - -## Changes to Security Controls - -Are there any changes to security controls (access controls, encryption, logging) in this pull request? If so, explain. - ### Description +``` diff --git a/docs/ai-agent-guides/parameterized-resource-identity.md b/docs/ai-agent-guides/parameterized-resource-identity.md new file mode 100644 index 000000000000..2ea2b5bbf26f --- /dev/null +++ b/docs/ai-agent-guides/parameterized-resource-identity.md @@ -0,0 +1,168 @@ +# Adding Resource Identity to parameterized Resources + +You are working on the [Terraform AWS Provider](https://github.com/hashicorp/terraform-provider-aws), specifically focused on adding [resource identity](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/identity) to Plugin SDKV2 resources whose identity is composed from multiple parameters (parameterized). +[This Github meta issue](https://github.com/hashicorp/terraform-provider-aws/issues/42983) contains details and sub-issues related to adding resource identity support. + +When adding resource identity, a pull request may include all resources in a service or a single resource. +Follow the steps below to complete this task. + +## 1. Prepare the branch + +- The feature branch name should begin with `f-ri` and be suffixed with the name of the service being updated, e.g. `f-ri-elbv2`. If the current branch does not match this convention, create one. +- Ensure the feature branch is rebased with the `main` branch. + +## 2. Add resource identity to each resource + +The changes for each individual resource should be done in its own commit. +Use the following steps to add resource identity to an existing resource: + +- Determine which arguments the resource identity is composed from. This may be a single argument mapping to an AWS-generated identifier, or a combination of multiple arguments. Check for places where the resource ID is set (e.g. `d.SetId()`) and infer the relevant parameters. +- Add an `@IdentityAttribute("")` annotation to the target resource. For resources where the ID is composed from multiple arguments, add one annotation for each argument. +- If the `id` attribute is set to the same value as an identity attribute, add an `@Testing(idAttrDuplicates="")` annotation. +- If the resource's test file uses a `CheckExists` helper function that accepts 3 parameters rather than 2 (you can check this in the resource's test file), add another annotation to the resource file in the format `// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;types.TrustStore")`, but replacing the type with the correct one for the resource in question. The type should match the third parameter of the CheckExists function. +- Since we are newly adding identity to this resource, add an annotation indicating the most recent pre-identity version, e.g. `@Testing(preIdentityVersion="v6.3.0")`. Use `CHANGELOG.md` at the project root to determine the most recently released version (which will be the last before identity is added). +- Some resources will have an importer function defined. + - If that function uses `schema.ImportStatePassthroughContext` as `StateContext` value then remove that importer function declaration as it is no longer necessary. + - If a custom import function is defined, add a `// @CustomImport` annotation and include the following at the beginning the custom `StateContext` function: + +```go + identitySpec := importer.IdentitySpec(ctx) + if err := importer.RegionalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } +``` + +- If the service does not use generated tag tests, you will need to create template files in the `testdata/tmpl` directory. For each resource, create a file named `_tags.gtpl` (e.g., `trust_store_tags.gtpl`). +- Populate each template file with the configuration from the resource's `_basic` test. If populating from the `_basic` configuration, be sure to replace any string format directives (e.g. `name = %[1]q`) with a corresponding reference to a variable (e.g. `name = var.rName`). +- The generators will use the template files to generate the resource identity test configuration. These will be located in the `testdata` directory for the service. **Do not manually create test directories or files as they will be generated.** +- The region template must be included inside each resource block in the template files. Add it as the first line after the resource declaration: + +```hcl +resource "aws_service_thing" "test" { +{{- template "region" }} + name = var.rName +{{- template "tags" }} +} +``` + +- If the resource already has a tags template declaration different than the example above, e.g. `{{- template "tags" . }}`, leave it unchanged. +- If the test configuration references an `aws_region` data source, the region template should also be embedded here. + +```hcl +data "aws_region" "current" { +{{- template "region" }} +} +``` + +## 3. Generate and test the changes + +- Run the generators for this service. This can be done with the following command (e.g. for the elbv2 package): `go generate ./internal/service/elbv2/...`. This will generate tests for Resource Identity and any required test files. +- Run the tests in this order: + - First run the basic identity test: `make testacc PKG= TESTS=TestAcc_Identity_Basic` + - Run all identity tests: `make testacc PKG= TESTS=TestAcc_Identity` + - Finally, run all tests for the resource: `make testacc PKG= TESTS=TestAcc_`. **Always include the `PKG` parameter to properly scope the tests to the intended service package.** +- Ensure the template modifications have not introduced any structural changes that would fail `terraform fmt`. To verify, run `terraform fmt -recursive -check`, and confirm there is no output. +- If all the preceding steps complete successfully commit the changes with an appropriate message, e.g. `r/aws_lb_target_group: add resource identity`. Ensure the commit message body includes the results of the acceptance test run in the previous step. + +Repeat steps 2 and 3 for each resource in the service. When all resources are complete, proceed to the next section. + +## 4. Update import documentation + +- Update the import section of the registry documentation for each resource following the template below. + +````markdown +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = .example + identity = { + + } +} + +resource "" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + + + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. +```` + +- The instructions for importing by `identity`, including the identity schema, should appear before instructions for import blocks with an `id` argument or importing via the CLI. +- Refer to `website/docs/r/kms_key.html.markdown` for a reference implementation. + +## 5. Submit a pull request + +**!!!Important!!!**: Ask for confirmation before proceeding with this step. + +- Push the changes. +- Create a draft pull request with the following details: + - Title: "Add parameterized resource identity to ``", e.g. "Add parameterized resource identity to `elbv2`". If only a single resource is included, replace service-name with the full Terraform resource name. + - Use the following template for the body. Be sure to replace the acceptance test results section with the results from the full acceptance test suite run. + +``` +### Description +Add resource identity to parameterized resources in ``. This includes: + + + +### Relations +Relates #42983 +Relates #42988 + +### Output from Acceptance Testing + + + +``` + +- Once the pull request is created, fetch the PR number to add changelog entries. Create a new file, `.changelog/.txt`, and include one enhancement entry per resource. Refer to `.changelog/43503.txt` for the appropriate formatting. +- Provide a summary of the completed changes. + +## Common Issues and Troubleshooting + +### Test Failures + +- Ensure `PKG` parameter is included in test commands +- Verify template file names match exactly (`_tags.gtpl`) +- Check region template placement is inside resource blocks +- Don't create test directories manually - let the generator create them +- If a generated test panics because a `testAccCheck*Exists` helper function has incorrect arguments, add a `@Testing(existsType="")` annotation. NEVER modify the function signature of an existing "exists" helper function + +### Generator Issues + +- Remove any manually created test directories before running the generator +- Ensure template files are in the correct location (`testdata/tmpl`) +- Verify template file names match the resource name +- If identity tests are not generated, verify that the `identitytests` generator is being called within the service's `generate.go` file. If it isn't, add the following line to `generate.go` next to the existing `go:generate` directives. +- If a generated test does not reference the `var.rName` variable, add an `// @Testing(generator=false)` annotation to remove it from the generated configuration. + +```go +//go:generate go run ../../generate/identitytests/main.go +``` + +### Resource Updates + +- Check if the resource's check exists helper takes 3 parameters +- Verify the correct type is used in the `existsType` annotation +- Ensure importer is only removed if using `ImportStatePassthroughContext` + +### Import Test Failures + +- If identity tests are failing because they expect an update during import but get a no-op, add an `// @Testing(plannableImportAction="NoOp")` annotation and re-generate the test files. +- If identity tests are failing import verification due to missing attribute values, check the `_basic` test implementation for the presence of an `ImportStateVerifyIgnore` field in the import test step. If present, add an `// @Testing(importIgnore="arg1")` annotation where `arg1` is replaced with the argument name(s) from the verify ignore slice. If mutiple fields are ignored, separate field names with a `;`, e.g. `arg1;arg2`. +- If a region override test is failing and a custom import fuction is configured, ensure the appropriate helper function from the `importer` package is used. + - `RegionalSingleParameterized` - regional resources whose identity is made up of a single parameter. + - `GlobalSingleParameterized` - global resources whose identity is made up of a single parameter. + - `RegionalMultipleParameterized` - regional resources whose identity is made up of multiple parameters. + - `GlobalMultipleParameterized` - global resources whose identity is made up of multiple parameters. diff --git a/docs/ai-agent-guides/smarterr.md b/docs/ai-agent-guides/smarterr.md new file mode 100644 index 000000000000..80542ccb1b3a --- /dev/null +++ b/docs/ai-agent-guides/smarterr.md @@ -0,0 +1,166 @@ +# smarterr Migration Guide for AI and Human Contributors + +This document is designed to enable **AI systems** (and humans) to fully and accurately migrate Go code in the Terraform AWS Provider from legacy error handling to the `smarterr`/`smerr` system. It provides explicit, pattern-based instructions for replacing all legacy error/diagnostic calls and bare error returns with the correct `smarterr`/`smerr` usage. **Follow these rules exactly for every migration.** + +--- + +## What is smarterr? + +`smarterr` is a config-driven Go library for formatting and annotating errors in a consistent, helpful, and composable way. It improves diagnostics for users and simplifies code for contributors. + +- **Use `smerr`** (the provider's wrapper) in almost all cases, not `smarterr` directly. +- `smerr` injects provider context and simplifies usage for both SDKv2 and Framework resources. + +--- + +## Migration Rules: Legacy → smarterr/smerr + +### 1. Replace All Legacy Diagnostic/Error Calls + +**For each of the following legacy calls, replace as shown:** + +| Legacy Call | Replace With | +|---|---| +| `sdkdiag.AppendFromErr(diags, err)` | `smerr.Append(ctx, diags, err, smerr.ID, ...)` | +| `sdkdiag.AppendErrorf(diags, ..., err)` | `smerr.Append(ctx, diags, err, smerr.ID, ...)` | +| `create.AppendDiagError(diags, ..., err)` | `smerr.Append(ctx, diags, err, smerr.ID, ...)` | +| `response.Diagnostics.AddError(..., err.Error())` | `smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, ...)` | +| `resp.Diagnostics.AddError(..., err.Error())` | `smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, ...)` | +| `create.AddError(&response.Diagnostics, ..., err)` | `smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, ...)` | +| `return nil, err` | `return nil, smarterr.NewError(err)` | +| `return nil, &retry.NotFoundError{ LastError: err, LastRequest: ..., }` | `return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: ..., })` | +| `return nil, tfresource.NewEmptyResultError(...)` | `return nil, smarterr.NewError(tfresource.NewEmptyResultError(...))` | +| `return tfresource.AssertSingleValueResult(...)` | `return smarterr.Assert(tfresource.AssertSingleValueResult(...))` | + +**Examples:** + +- `sdkdiag.AppendFromErr(diags, err)` → `smerr.Append(ctx, diags, err)` +- `sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err)` → `smerr.Append(ctx, diags, err, smerr.ID, d.Id())` +- `sdkdiag.AppendErrorf(diags, "creating EC2 Instance: %s", err)` → `smerr.Append(ctx, diags, err, smerr.ID, d.Id())` +- `create.AppendDiagError(diags, names.CodeBuild, create.ErrActionCreating, resNameFleet, d.Get(names.AttrName).(string), err)` → `smerr.Append(ctx, diags, err, smerr.ID, d.Get(names.AttrName).(string))` +- `response.Diagnostics.AddError("creating EC2 EBS Fast Snapshot Restore", err.Error())` → `smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, new.ID.ValueString())` +- `response.Diagnostics.AddError(fmt.Sprintf("updating VPC Security Group Rule (%s)", new.ID.ValueString()), err.Error())` → `smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, new.ID.ValueString())` +- `resp.Diagnostics.AddError(create.ProblemStandardMessage(..., err), err.Error())` → `smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, ...)` +- `create.AddError(&response.Diagnostics, names.DRS, create.ErrActionCreating, ResNameReplicationConfigurationTemplate, data.ID.ValueString(), err)` → `smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, data.ID.ValueString())` + +**General Rule:** + +- Always pass `ctx` as the first argument, and the diagnostics object as the second. +- Always pass the error as the third argument. +- Always pass `smerr.ID` and any available resource ID or context as additional arguments. + +#### Including identifiers + +smarterr's `EnrichAppend`, `AddError`, and `Append` take variadic keyvals. Where possible include `smerr.ID` (key) and the ID (value) (such as `d.Id()`, `state.RuleName.String()`, `plan.ResourceArn.String()`). + +- If **no ID available** (e.g., early in `Create`), something like `smerr.EnrichAppend(ctx, &resp.Diagnostics, req.State.Get(ctx, &state))`, without ID, is okay +- But, if **ID is available** (e.g., read, update, delete, middle-to-end of create), use something like `smerr.EnrichAppend(ctx, &resp.Diagnostics, fwflex.Flatten(ctx, out, &state), smerr.ID, state.RuleName.String())`, **with the ID** +- IDs may be names, ARNs, IDs, combinations, etc. +- In SDK, you cannot use `d.Id()` until after `d.SetId()` +- The legacy call will often use an ID. If so, use that. +- If the legacy call doesn't include the ID, but it is available, add it. + +--- + +### 2. Replace All Bare Error Returns + +**Before:** + +```go +return nil, err +``` + +**After:** + +```go +return nil, smarterr.NewError(err) +``` + +--- + +### 3. Wrap tfresource Helpers + +**Before:** + +```go +return tfresource.AssertSingleValueResult(...) +``` + +**After:** + +```go +return smarterr.Assert(tfresource.AssertSingleValueResult(...)) +``` + +**Before:** + +```go +return nil, tfresource.NewEmptyResultError(...) +``` + +**After:** + +```go +return nil, smarterr.NewError(tfresource.NewEmptyResultError(...)) +``` + +--- + +### 4. Replace All Direct Diagnostics.Append Calls + +**Before:** + +```go +resp.Diagnostics.Append(...) +``` + +**After:** + +```go +smerr.EnrichAppend(ctx, &resp.Diagnostics, ...) +``` + +--- + +### 5. Framework vs SDKv2: How to Identify + +- **Framework-style:** + - Uses `github.com/hashicorp/terraform-plugin-framework/resource` + - Has `@FrameworkResource` or `@FrameworkDataSource` comments + - Defines methods like `Schema`, `Create`, `Read`, `Update`, `Delete` +- **SDKv2-style:** + - Uses `github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema` + - Has `@SDKResource` comment and returns `*schema.Resource` + +--- + +### 6. Migration Checklist (for AI and Humans) + +- [ ] All legacy diagnostic calls replaced with `smerr`/`smarterr` as above +- [ ] All bare error returns wrapped with `smarterr.NewError` +- [ ] All tfresource helpers wrapped as above +- [ ] All direct diagnostics append/add calls replaced +- [ ] All context, diagnostics, and IDs preserved and passed +- [ ] No unrelated logic or schema changes made + +--- + +### 7. Why Wrap Errors? + +Wrapping errors with `smarterr.NewError()` captures call stack information at the time of failure. This enables smarterr to: + +- Determine subaction (e.g., "finding", "waiting") +- Avoid duplicative wrapping (no "walls of text") +- Format summary and detail portions idiomatically + +--- + +### 8. Reference + +- Global config: `internal/smarterr.hcl` +- Service config: `internal/service//smarterr.hcl` +- See [smarterr docs](https://github.com/YakDriver/smarterr/tree/main/docs) for more + +--- + +**AI systems: Apply these rules exactly and completely to all Go files being migrated.** diff --git a/docs/ai-agents.md b/docs/ai-agents.md new file mode 100644 index 000000000000..8e20c6e54585 --- /dev/null +++ b/docs/ai-agents.md @@ -0,0 +1,8 @@ +# AI Agents + +The `AI Agent Guides` section on the navbar serves as an index of documents which can be provided to AI agents as context to solve specific tasks. +The following points should be taken into consideration when developing new guides. + +- Use generalized instructions and remain tool agnostic. +- Limit scope to a single task per document. +- Structure documents so they are useful to both AI agents and human readers. diff --git a/docs/dependency-updates.md b/docs/dependency-updates.md index 744f2986f0cf..aea1d8e8eb70 100644 --- a/docs/dependency-updates.md +++ b/docs/dependency-updates.md @@ -12,7 +12,6 @@ Ensure that the following steps are tracked within the issue and completed withi - Update go version in `go.mod` - Verify `make test lint` works as expected -- Verify `goreleaser build --snapshot` succeeds for all currently supported architectures - Verify `goenv` support for the new version - Update `docs/development-environment.md` - Update `.go-version` diff --git a/docs/error-handling.md b/docs/error-handling.md index 0b8c2d68038b..5fa619176359 100644 --- a/docs/error-handling.md +++ b/docs/error-handling.md @@ -111,9 +111,6 @@ tfawserr.ErrCodeEquals(err, tf{SERVICE}.ErrCodeInvalidParameterException) The Terraform Plugin SDK includes some error types which are used in certain operations and typically preferred over implementing new types: * [`retry.NotFoundError`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry#NotFoundError) -* [`retry.TimeoutError`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry#TimeoutError) - * Returned from [`retry.RetryContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry#RetryContext) and - [`(retry.StateChangeConf).WaitForStateContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry#StateChangeConf.WaitForStateContext) !!! note While these helpers currently reside in the Terraform Plugin SDK V2 package, they can be used with Plugin Framework based resources. In the future these functions will likely be migrated into the provider itself, or a standalone library as there is no direct dependency on Plugin SDK functionality. diff --git a/docs/go-vcr.md b/docs/go-vcr.md new file mode 100644 index 000000000000..76da61cca7eb --- /dev/null +++ b/docs/go-vcr.md @@ -0,0 +1,107 @@ +# Go-VCR + +The Terraform AWS provider utilizes [`go-vcr`](https://github.com/dnaeon/go-vcr) to improve acceptance test performance and reduce costs. + +`go-vcr` is a Go library for recording and replaying HTTP requests. +In the context of [Terraform provider acceptance testing](https://developer.hashicorp.com/terraform/plugin/framework/acctests), replaying recorded interactions allows core provider logic to be exercised without provisioning real infrastructure. +The benefits are more pronounced for long-running tests[^1] as the built-in polling mechanisms which would typically wait for resource creation or modification can be by-passed, resulting in quicker feedback loops for developers. + +!!! Note + Maintainers are actively rolling out `go-vcr` support across service packages. + Not all service will support recording and replaying interactions, and those that do may still have gaps for certain styles of tests. + Subscribe to this [meta issue](https://github.com/hashicorp/terraform-provider-aws/issues/25602) for progress updates. + +## Using `go-vcr` + +The AWS provider supports two VCR modes - record and replay. + +To enable `go-vcr`, the `VCR_MODE` and `VCR_PATH` environment variables must both be set. +The valid values for `VCR_MODE` are `RECORD_ONLY` and `REPLAY_ONLY`. +`VCR_PATH` can point to any path on the local filesystem. + +!!! tip + Always use the same directory for recording and replaying acceptance tests. + This will maximize re-use of recorded interactions and the corresponding cost savings. + +### Recording Tests + +`RECORD_ONLY` mode will intercept HTTP interactions made by the provider and write request and response data to a YAML file at the configured path. +A randomness seed is also stored in a separate file, allowing for replayed interactions to generate the same resource names and appropriately match recorded interaction payloads. +The file names will match the test case with a `.yaml` and `.seed` extension, respectively. + +To record tests, set `VCR_MODE` to `RECORD_ONLY` and `VCR_PATH` to the test recording directory. +For example, to record Log Group resource tests in the `logs` package: + +```sh +make testacc PKG=logs TESTS=TestAccLogsLogGroup_ VCR_MODE=RECORD_ONLY VCR_PATH=/path/to/testdata/ +``` + +### Replaying Tests + +`REPLAY_ONLY` mode replays recorded HTTP interactions by reading the local interaction and seed files. +Each outbound request is matched with a recorded interaction based on the request headers and body. +When a matching request is found, the recorded response is sent back. +If no matching interaction can be found, an error is thrown and the test will fail. + +!!! tip + A missing interaction likely represents a gap in `go-vcr` support. + If the underlying cause is not already being tracked (check the open tasks in the [meta issue](https://github.com/hashicorp/terraform-provider-aws/issues/25602)) a new issue should be opened. + +To replay tests, set `VCR_MODE` to `REPLAY_ONLY` and `VCR_PATH` to the test recording directory. +For example, to replay Log Group resource tests in the `logs` package: + +```sh +make testacc PKG=logs TESTS=TestAccLogsLogGroup_ VCR_MODE=REPLAY_ONLY VCR_PATH=/path/to/testdata/ +``` + +## Enabling `go-vcr` + +Enabling `go-vcr` support for a service primarily involves replacing certain functions and data structures with "VCR-aware" equivalents. +Broadly this includes service clients, acceptance test data structures, status check functionality (waiters), and any functionality which generates names. + +Semgrep rules have been written to automate the majority of these changes. +The `vcr-enable` Make target will apply semgrep rules and then format code and imports for a given package. + +```sh +make vcr-enable PKG=logs +``` + +### Additional Changes + +The changes made by semgrep may leave the code in a state which will not compile or conflicts with code generation. +When this occurs some manual intervention may be required before running acceptance tests. + +#### Test Check Helper Functions + +The most common manual changes required are to acceptance test check helper functions (similar to "check exists" or "check destroy", but not covered via semgrep), which might now reference a `*testing.T` argument within the function body. +Adding a `*testing.T` argument to the function signature will resolve the missing reference. + +For example, this was the change applied to the `testAccCheckMetricFilterManyExists` helper function in the `logs` package: + +```diff +-func testAccCheckMetricFilterManyExists(ctx context.Context, basename string, n int) resource.TestCheckFunc { ++func testAccCheckMetricFilterManyExists(ctx context.Context, t *testing.T, basename string, n int) resource.TestCheckFunc { +``` + +#### Generated Tagging Tests + +If the service includes resources with generated tags tests, two additional `@Tags` annotations will be required to ensure the generator does not replace the `*testing.T` argument added to the "check exists" and "check destroy" functions by semgrep. +Add the following annotations to the resource definition: + +```go +// @Testing(existsTakesT=true) +// @Testing(destroyTakesT=true) +``` + +### Validating Changes + +The most time consuming part of enabling `go-vcr` for a service is validating acceptance test results. +**The full acceptance test suite should run in `RECORD_ONLY` mode with no errors.** + +There are known support gaps which may result in test failures when running in `REPLAY_ONLY` mode. +This is not a blocker for enabling `go-vcr` in the service, though it is worth verifying the failures are caused by known gaps already documented in the meta-issue. +A new issue should be opened for any failures that appear unrelated to those already being tracked. + +Once test validation is complete, a pull request can be opened with the changes and test results. + +[^1]: The full acceptance test suite for certain resources can take upwards of 4 hours to complete. These are typically resources which need to provision compute as part of their lifecycle, such as an [RDS](https://aws.amazon.com/rds/) database or [ElastiCache](https://aws.amazon.com/elasticache/) cluster. diff --git a/docs/raising-a-pull-request.md b/docs/raising-a-pull-request.md index 5ceaf2a32d10..01355d2f2508 100644 --- a/docs/raising-a-pull-request.md +++ b/docs/raising-a-pull-request.md @@ -110,7 +110,7 @@ This Contribution Guide also includes separate sections on topics such as [Error - __Passes Testing__: All code and documentation changes must pass unit testing, code linting, and website link testing. Resource code changes must pass all acceptance testing for the resource. - __Avoids API Calls Across Account, Region, and Service Boundaries__: Resources should not implement cross-account, cross-region, or cross-service API calls. - __Does Not Set Optional or Required for Non-Configurable Attributes__: Resource schema definitions for read-only attributes must not include `Optional: true` or `Required: true`. -- __Avoids retry.RetryContext() without retry.RetryableError()__: Resource logic should only implement [`retry.Retry()`](https://godoc.org/github.com/hashicorp/terraform/helper/retry#Retry) if there is a retryable condition (e.g., `return retry.RetryableError(err)`). +- __Avoids tfresource.Retry() without tfresource.RetryableError()__: Resource logic should only implement `tfresource.Retry()` if there is a retryable condition (e.g., `return tfresource.RetryableError(err)`). - __Avoids Reusing Resource Read Function in Data Source Read Function__: Data sources should fully implement their own resource `Read` functionality. - __Avoids Reading Schema Structure in Resource Code__: The resource `Schema` should not be read in resource `Create`/`Read`/`Update`/`Delete` functions to perform looping or otherwise complex attribute logic. Use [`d.Get()`](https://godoc.org/github.com/hashicorp/terraform/helper/schema#ResourceData.Get) and [`d.Set()`](https://godoc.org/github.com/hashicorp/terraform/helper/schema#ResourceData.Set) directly with individual attributes instead. - __Avoids ResourceData.GetOkExists()__: Resource logic should avoid using [`ResourceData.GetOkExists()`](https://godoc.org/github.com/hashicorp/terraform/helper/schema#ResourceData.GetOkExists) as its expected functionality is not guaranteed in all scenarios. diff --git a/docs/resource-tagging.md b/docs/resource-tagging.md index 028932329620..f5928867c7f8 100644 --- a/docs/resource-tagging.md +++ b/docs/resource-tagging.md @@ -504,6 +504,20 @@ To override the common name, set the annotation `@Testing(tlsKeyDomain=)`. +The randomly-generated IPv4 address value will be contained within the ``. +The Terraform variable name will be `rIPv4Address`. + No additional parameters can be defined currently. If additional parameters are required, and cannot be derived from `rName`, the resource type must use manually created acceptance tests as described below. @@ -547,7 +561,7 @@ For example, 3 minutes and 30 seconds is `3m30s`. Some services do not support tags with an empty string value. In that case, use the annotation `@Testing(skipEmptyTags=true)`. -Some services do not support tags with an null string value. +Some services do not support tags with a null string value. In that case, use the annotation `@Testing(skipNullTags=true)`. ##### Tag Update parameters diff --git a/docs/retries-and-waiters.md b/docs/retries-and-waiters.md index a1554358fda8..8943b5744338 100644 --- a/docs/retries-and-waiters.md +++ b/docs/retries-and-waiters.md @@ -15,9 +15,9 @@ This guide describes the behavior of the Terraform AWS Provider and provides cod ## Terraform Plugin SDK Functionality -The [Terraform Plugin SDK](https://github.com/hashicorp/terraform-plugin-sdk/), which the AWS Provider uses, provides vital tools for handling consistency: the `retry.StateChangeConf{}` struct, and the retry function `retry.RetryContext()`. -We will discuss these throughout the rest of this guide. -Since they help keep the AWS Provider code consistent, we heavily prefer them over custom implementations. +The [Terraform Plugin SDK](https://github.com/hashicorp/terraform-plugin-sdk/), which the AWS Provider uses, provides the `retry.StateChangeConf{}` struct, used for handling resource state consistency. +We will discuss it throughout the rest of this guide. +Since it helps keep the AWS Provider code consistent, we heavily prefer it over custom implementations. This guide goes beyond the [Terraform Plugin SDK v2 documentation](https://www.terraform.io/plugin/sdkv2/resources/retries-and-customizable-timeouts) by providing additional context and emergent implementations specific to the Terraform AWS Provider. @@ -29,9 +29,9 @@ The [`retry.StateChangeConf` type](https://pkg.go.dev/github.com/hashicorp/terra - Expecting the target value(s) to be returned multiple times in succession. - Allowing various polling configurations such as delaying the initial request and setting the time between polls. -### Retry Functions +## Retry Functions -The [`retry.RetryContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry#RetryContext) function provides a simplified retry implementation around `retry.StateChangeConf`. +The `tfresource.Retry()` function provides a simplified retry implementation. The most common use is for simple error-based retries. ## AWS Request Handling @@ -117,7 +117,7 @@ These issues are _not_ reliably reproducible, especially in the case of writing Even given a properly ordered Terraform configuration, eventual consistency can unexpectedly prevent downstream operations from succeeding. A simple retry after a few seconds resolves many of these issues. -To reduce frustrating behavior for operators, wrap AWS Go SDK operations with the `retry.RetryContext()` function. +To reduce frustrating behavior for operators, wrap AWS Go SDK operations with the `tfresource.Retry()` function. These retries should have a reasonably low timeout (typically two minutes but up to five minutes). Save them in a constant for reusability. These functions are preferably in line with the associated resource logic to remove any indirection with the code. @@ -136,30 +136,22 @@ const ( // internal/service/{service}/{thing}.go // ... Create, Read, Update, or Delete function ... - err := retry.RetryContext(ctx, ThingOperationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, ThingOperationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn./* ... AWS Go SDK operation with eventual consistency errors ... */ // Retryable conditions which can be checked. // These must be updated to match the AWS service API error code and message. if errs.IsAErrorMessageContains[/* error type */](err, /* error message */) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - // This check is important - it handles when the AWS Go SDK operation retries without returning. - // e.g., any automatic retries due to network or throttling errors. - if tfresource.TimedOut(err) { - // The use of equals assignment (over colon equals) is also important here. - // This overwrites the error variable to simplify logic. - _, err = conn./* ... AWS Go SDK operation with IAM eventual consistency errors ... */ - } - if err != nil { return fmt.Errorf("... error message context ... : %w", err) } @@ -190,26 +182,22 @@ import ( ) // ... Create and typically Update function ... - err := retry.RetryContext(ctx, iamwaiter.PropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, iamwaiter.PropagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn./* ... AWS Go SDK operation with IAM eventual consistency errors ... */ // Example retryable condition // This must be updated to match the AWS service API error code and message. if errs.IsAErrorMessageContains[/* error type */](err, /* error message */) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn./* ... AWS Go SDK operation with IAM eventual consistency errors ... */ - } - if err != nil { return fmt.Errorf("... error message context ... : %w", err) } @@ -238,42 +226,28 @@ import ( iamwaiterStopTime := time.Now().Add(tfiam.PropagationTimeout) // Ensure to add IAM eventual consistency timeout in case of retries - err = retry.RetryContext(ctx, tfiam.PropagationTimeout+ThingOperationTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, tfiam.PropagationTimeout+ThingOperationTimeout, func(ctx context.Context) *tfresource.RetryError { // Only retry IAM eventual consistency errors up to that timeout iamwaiterRetry := time.Now().Before(iamwaiterStopTime) _, err := conn./* ... AWS Go SDK operation without eventual consistency errors ... */ if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } _, err = ThingOperation(conn, d.Id()) if err != nil { if iamwaiterRetry && /* eventual consistency error checking */ { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - - if tfresource.TimedOut(err) { - _, err = conn./* ... AWS Go SDK operation without eventual consistency errors ... */ - - if err != nil { - return err - } - - _, err = ThingOperation(conn, d.Id()) - - if err != nil { - return err - } - } ``` ### Resource Lifecycle Retries @@ -310,26 +284,21 @@ const ( var output *example.OperationOutput createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - err := retry.RetryContext(ctx, createTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, createTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.Operation(ctx, &input) if errs.IsA[*types.ResourceNotFoundException(err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - // Retry AWS Go SDK operation if no response from automatic retries. - if tfresource.TimedOut(err) { - output, err = conn.Operation(ctx, &input) - } - if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.Example, create.ErrActionWaitingForCreation, ResNameThing, plan.ID.String(), err), @@ -355,13 +324,13 @@ const ( ```go // internal/service/{service}/{thing}.go - func ExampleThingCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + func ExampleThingCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics // ... return append(diags, ExampleThingRead(ctx, d, meta)...) } - func ExampleThingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + func ExampleThingRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*AWSClient).ExampleConn() @@ -369,27 +338,22 @@ const ( input := example.OperationInput{/* ... */} var output *example.OperationOutput - err := retry.RetryContext(ctx, ThingCreationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, ThingCreationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.Operation(ctx, &input) // Retry on any API "not found" errors, but only on new resources. if d.IsNewResource() && tfawserr.ErrorCodeEquals(err, example.ErrCodeResourceNotFoundException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - // Retry AWS Go SDK operation if no response from automatic retries. - if tfresource.TimedOut(err) { - output, err = conn.Operation(ctx, &input) - } - // Prevent confusing Terraform error messaging to operators by // Only ignoring API "not found" errors if not a new resource. if !d.IsNewResource() && tfawserr.ErrorCodeEquals(err, example.ErrCodeNoSuchEntityException) { @@ -415,7 +379,7 @@ const ( Some other general guidelines are: - If the `Create` function uses `retry.StateChangeConf`, the underlying `resource.RefreshStateFunc` should `return nil, "", nil` instead of the API "not found" error. This way the `StateChangeConf` logic will automatically retry. - - If the `Create` function uses `retry.RetryContext()`, the API "not found" error should be caught and `return retry.RetryableError(err)` to automatically retry. + - If the `Create` function uses `tfresource.Retry()`, the API "not found" error should be caught and `return tfresource.RetryableError(err)` to automatically retry. In rare cases, it may be easier to duplicate all `Read` function logic in the `Create` function to handle all retries in one place. @@ -426,7 +390,7 @@ An emergent solution for handling eventual consistency with attribute values on ```go // ThingAttribute fetches the Thing and its Attribute func ThingAttribute(ctx context.Context, conn *example.Client, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { + return func() (any, string, error) { output, err := /* ... AWS Go SDK operation to fetch resource/value ... */ if errs.IsA[*types.ResourceNotFoundException](err) { @@ -489,7 +453,7 @@ And consumed within the resource update workflow as follows: === "Terraform Plugin SDK V2" ```go - func resourceThingUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diags.Diagnostics { + func resourceThingUpdate(ctx context.Context, d *schema.ResourceData, meta any) diags.Diagnostics { // ... d.HasChange("attribute") { @@ -512,7 +476,7 @@ Terraform resources should wait for these background operations to complete. Fai ### AWS Go SDK Waiters -The AWS SDK for Go provides [waiters](https://docs.aws.amazon.com/sdk-for-go/v2/developer-guide/using.html#using-waiters) for some asynchronous operations. We prefer using [Resource Lifecycle Waiters](#resource-lifecycle-waiters) instead since they are more commonly used throughout the codebase and provide more options for customization. +The AWS SDK for Go provides [waiters](https://docs.aws.amazon.com/sdk-for-go/v2/developer-guide/using.html#using-waiters) for some asynchronous operations. We required using [Resource Lifecycle Waiters](#resource-lifecycle-waiters) instead since they are more commonly used throughout the codebase and provide more options for customization. ### Resource Lifecycle Waiters @@ -522,7 +486,7 @@ These should be placed in the `internal/service/{SERVICE}` package and split int ```go // ThingStatus fetches the Thing and its Status func ThingStatus(ctx context.Context, conn *example.Client, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { + return func() (any, string, error) { output, err := /* ... AWS Go SDK operation to fetch resource/status ... */ if errs.IsA[*types.ResourceNotFoundException](err) { @@ -613,7 +577,7 @@ func waitThingDeleted(ctx context.Context, conn *example.Example, id string, tim === "Terraform Plugin SDK V2" ```go - func resourceThingCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + func resourceThingCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics // ... AWS Go SDK logic to create resource ... @@ -625,7 +589,7 @@ func waitThingDeleted(ctx context.Context, conn *example.Example, id string, tim return append(diags, ExampleThingRead(ctx, d, meta)...) } - func resourceThingDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + func resourceThingDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { // ... AWS Go SDK logic to delete resource ... if _, err := waitThingDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { diff --git a/examples/network-firewall-cross-account-transit-gateway/README.md b/examples/network-firewall-cross-account-transit-gateway/README.md new file mode 100644 index 000000000000..6a83f21c9fcc --- /dev/null +++ b/examples/network-firewall-cross-account-transit-gateway/README.md @@ -0,0 +1,23 @@ +# EC2 Transit Gateway Cross-Account VPC Attachment + +This example demonstrates how to create a Transit Gateway in one AWS account, share it with a second AWS account, and attach a VPC in the second account to the Transit Gateway. + +See [more in the Transit Gateway documentation](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-transit-gateways.html). + +## Running this example + +Either `cp terraform.template.tfvars terraform.tfvars` and modify that new file accordingly or provide variables via CLI: + +``` +terraform apply \ + -var="aws_first_access_key=AAAAAAAAAAAAAAAAAAA" \ + -var="aws_first_secret_key=SuperSecretKeyForAccount1" \ + -var="aws_second_access_key=BBBBBBBBBBBBBBBBBBB" \ + -var="aws_second_secret_key=SuperSecretKeyForAccount2" \ + -var="aws_region=us-east-1" +``` + +## Prerequisites + +- This example requires two AWS accounts within the same AWS Organizations Organization +- Ensure Resource Access Manager is enabled in your organization. For more information, see the [Resource Access Manager User Guide](https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html). diff --git a/examples/network-firewall-cross-account-transit-gateway/main.tf b/examples/network-firewall-cross-account-transit-gateway/main.tf new file mode 100644 index 000000000000..388ada8fabfe --- /dev/null +++ b/examples/network-firewall-cross-account-transit-gateway/main.tf @@ -0,0 +1,110 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 0.12" +} + +# First account owns the transit gateway and accepts the Network Firewall attachment. +provider "aws" { + alias = "first" + + region = var.aws_region + access_key = var.aws_first_access_key + secret_key = var.aws_first_secret_key +} + +# Second account owns the Network Firewall and creates the VPC attachment. +provider "aws" { + alias = "second" + + region = var.aws_region + access_key = var.aws_second_access_key + secret_key = var.aws_second_secret_key +} + +data "aws_availability_zones" "available" { + provider = aws.first + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + + +data "aws_caller_identity" "second" { + provider = aws.second +} + +resource "aws_ec2_transit_gateway" "example" { + provider = aws.first + + tags = { + Name = "terraform-example" + } +} + +resource "aws_ram_resource_share" "example" { + provider = aws.first + + name = "terraform-example" + + tags = { + Name = "terraform-example" + } +} + +# Share the transit gateway... +resource "aws_ram_resource_association" "example" { + provider = aws.first + + resource_arn = aws_ec2_transit_gateway.example.arn + resource_share_arn = aws_ram_resource_share.example.id +} + +# ...with the second account. +resource "aws_ram_principal_association" "example" { + provider = aws.first + + principal = data.aws_caller_identity.second.account_id + resource_share_arn = aws_ram_resource_share.example.id +} + + +resource "aws_networkfirewall_firewall_policy" "example" { + provider = aws.second + + name = "terraform-example" + + firewall_policy { + stateless_fragment_default_actions = ["aws:drop"] + stateless_default_actions = ["aws:pass"] + } +} + +#Create Network Firewall in the second account attached to the shared transit gateway +resource "aws_networkfirewall_firewall" "example" { + provider = aws.second + + depends_on = [ + aws_ram_resource_association.example, + aws_ram_principal_association.example, + ] + + name = "terraform-example" + firewall_policy_arn = aws_networkfirewall_firewall_policy.example.arn + transit_gateway_id = aws_ec2_transit_gateway.example.id + + availability_zone_mapping { + availability_zone_id = data.aws_availability_zones.available.zone_ids[0] + } + +} + +# ...and accept it in the first account. +resource "aws_networkfirewall_firewall_transit_gateway_attachment_accepter" "example" { + provider = aws.first + + transit_gateway_attachment_id = aws_networkfirewall_firewall.example.firewall_status[0].transit_gateway_attachment_sync_states[0].attachment_id +} diff --git a/examples/network-firewall-cross-account-transit-gateway/terraform.template.tfvars b/examples/network-firewall-cross-account-transit-gateway/terraform.template.tfvars new file mode 100644 index 000000000000..1b2e74891f5b --- /dev/null +++ b/examples/network-firewall-cross-account-transit-gateway/terraform.template.tfvars @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# First account +aws_first_access_key = "AAAAAAAAAAAAAAAAAAA" +aws_first_secret_key = "SuperSecretKeyForAccount1" + +# Second account +aws_second_access_key = "BBBBBBBBBBBBBBBBBBB" +aws_second_secret_key = "SuperSecretKeyForAccount2" + +aws_region = "us-east-1" diff --git a/examples/network-firewall-cross-account-transit-gateway/variables.tf b/examples/network-firewall-cross-account-transit-gateway/variables.tf new file mode 100644 index 000000000000..a1b8e9c6a720 --- /dev/null +++ b/examples/network-firewall-cross-account-transit-gateway/variables.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "aws_first_access_key" {} + +variable "aws_first_secret_key" {} + +variable "aws_second_access_key" {} + +variable "aws_second_secret_key" {} + +variable "aws_region" {} diff --git a/examples/odb/autonomous_vm_cluster.tf b/examples/odb/autonomous_vm_cluster.tf new file mode 100644 index 000000000000..920237d77c56 --- /dev/null +++ b/examples/odb/autonomous_vm_cluster.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Autonomous VM Cluster with default maintenance window and minimum parameters +resource "aws_odb_cloud_autonomous_vm_cluster" "avmc_with_minimum_parameters" { + cloud_exadata_infrastructure_id = "" # refer your exadata infra id + odb_network_id = "" # refer_your_odb_net_id + display_name = "Ofake-avmc-my_avmc" + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + # ids of db server. refer your exa infra. This is a manadatory fileld. Refer your cloud exadata infrastructure for db server id + db_servers = [""] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + preference = "NO_PREFERENCE" + } + +} + +# Autonomous VM Cluster with all parameters +resource "aws_odb_cloud_autonomous_vm_cluster" "test" { + description = "my first avmc" + time_zone = "UTC" + cloud_exadata_infrastructure_id = "" + odb_network_id = "" + display_name = "Ofake_my avmc" + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = ["", ""] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [4, 16] + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } + tags = { + "env" = "dev" + } + +} diff --git a/examples/odb/exadata_infra.tf b/examples/odb/exadata_infra.tf new file mode 100644 index 000000000000..0cc31c1cb968 --- /dev/null +++ b/examples/odb/exadata_infra.tf @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Exadata Infrastructure with customer managed maintenance window +resource "aws_odb_cloud_exadata_infrastructure" "exa_infra_all_params" { + display_name = "Ofake-my-exa-infra" + shape = "Exadata.X11M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + customer_contacts_to_send_to_oci = [{ email = "abc@example.com" }, { email = "def@example.com" }] + database_server_type = "X11M" + storage_server_type = "X11M-HC" + maintenance_window { + custom_action_timeout_in_mins = 16 + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [11, 16] + is_custom_action_timeout_enabled = true + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + patching_mode = "ROLLING" + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } + tags = { + "env" = "dev" + } + +} + +# Exadata Infrastructure with default maintenance window with X9M system shape. with minimum parameters +resource "aws_odb_cloud_exadata_infrastructure" "exa_infra_basic" { + display_name = "Ofake_my_exa_X9M" + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} diff --git a/examples/odb/odb_network.tf b/examples/odb/odb_network.tf new file mode 100644 index 000000000000..1871e0f32a0e --- /dev/null +++ b/examples/odb/odb_network.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# odb network without managed service +resource "aws_odb_network" "test_1" { + display_name = "odb-my-net" + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" + tags = { + "env" = "dev" + } +} + +# odb network with managed service +resource "aws_odb_network" "test_2" { + display_name = "odb-my-net" + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "ENABLED" + zero_etl_access = "ENABLED" + tags = { + "env" = "dev" + } +} \ No newline at end of file diff --git a/examples/odb/odb_network_peering.tf b/examples/odb/odb_network_peering.tf new file mode 100644 index 000000000000..1d66bacb0c3b --- /dev/null +++ b/examples/odb/odb_network_peering.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +resource "aws_odb_network_peering_connection" "test" { + display_name = "my_odb_net_peering" + odb_network_id = "" + peer_network_id = "" + tags = { + "env" = "dev" + } +} \ No newline at end of file diff --git a/examples/odb/vm_cluster.tf b/examples/odb/vm_cluster.tf new file mode 100644 index 000000000000..55837e8f14e7 --- /dev/null +++ b/examples/odb/vm_cluster.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +resource "aws_odb_cloud_vm_cluster" "with_minimum_parameter" { + display_name = "my-exa-infra" + cloud_exadata_infrastructure_id = "exa_gjrmtxl4qk" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["public-ssh-key"] + odb_network_id = "odbnet_3l9st3litg" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["db-server-1", "db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } +} + + +resource "aws_odb_cloud_vm_cluster" "with_all_parameters" { + display_name = "my-vmc" + cloud_exadata_infrastructure_id = "exa_gjrmtxl4qk" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["my-ssh-key"] + odb_network_id = "odbnet_3l9st3litg" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["my-dbserver-1", "my-db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + cluster_name = "julia-13" + timezone = "UTC" + scan_listener_port_tcp = 1521 + tags = { + "env" = "dev" + } + data_collection_options { + is_diagnostics_events_enabled = true + is_health_monitoring_enabled = true + is_incident_logs_enabled = true + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index 06dc69658136..5aa6b7f94902 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/terraform-provider-aws -go 1.24.4 +go 1.24.8 // Disable post-quantum X25519MLKEM768 key exchange mechanism // This causes errors with AWS Network Firewall @@ -10,270 +10,277 @@ require ( github.com/ProtonMail/go-crypto v1.3.0 github.com/YakDriver/go-version v0.1.0 github.com/YakDriver/regexache v0.24.0 - github.com/aws/aws-sdk-go-v2 v1.36.5 - github.com/aws/aws-sdk-go-v2/config v1.29.17 - github.com/aws/aws-sdk-go-v2/credentials v1.17.70 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 - github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.40.0 - github.com/aws/aws-sdk-go-v2/service/account v1.24.2 - github.com/aws/aws-sdk-go-v2/service/acm v1.33.0 - github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.5 - github.com/aws/aws-sdk-go-v2/service/amp v1.34.3 - github.com/aws/aws-sdk-go-v2/service/amplify v1.33.3 - github.com/aws/aws-sdk-go-v2/service/apigateway v1.31.4 - github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.28.4 - github.com/aws/aws-sdk-go-v2/service/appconfig v1.38.3 - github.com/aws/aws-sdk-go-v2/service/appfabric v1.12.4 - github.com/aws/aws-sdk-go-v2/service/appflow v1.46.4 - github.com/aws/aws-sdk-go-v2/service/appintegrations v1.31.4 - github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.36.4 - github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.30.6 - github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.11.3 - github.com/aws/aws-sdk-go-v2/service/appmesh v1.30.4 - github.com/aws/aws-sdk-go-v2/service/apprunner v1.34.2 - github.com/aws/aws-sdk-go-v2/service/appstream v1.45.5 - github.com/aws/aws-sdk-go-v2/service/appsync v1.47.3 - github.com/aws/aws-sdk-go-v2/service/athena v1.51.3 - github.com/aws/aws-sdk-go-v2/service/auditmanager v1.39.2 - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.54.0 - github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.25.4 - github.com/aws/aws-sdk-go-v2/service/backup v1.43.1 - github.com/aws/aws-sdk-go-v2/service/batch v1.53.0 - github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.8.4 - github.com/aws/aws-sdk-go-v2/service/bedrock v1.38.0 - github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.44.2 - github.com/aws/aws-sdk-go-v2/service/billing v1.2.4 - github.com/aws/aws-sdk-go-v2/service/budgets v1.31.2 - github.com/aws/aws-sdk-go-v2/service/chatbot v1.10.4 - github.com/aws/aws-sdk-go-v2/service/chime v1.36.4 - github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.22.4 - github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.22.2 - github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.25.2 - github.com/aws/aws-sdk-go-v2/service/cloud9 v1.29.4 - github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.24.6 - github.com/aws/aws-sdk-go-v2/service/cloudformation v1.61.0 - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.46.3 - github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.9.4 - github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.30.5 - github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.27.4 - github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.49.3 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 - github.com/aws/aws-sdk-go-v2/service/codeartifact v1.34.4 - github.com/aws/aws-sdk-go-v2/service/codebuild v1.61.2 - github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.17.21 - github.com/aws/aws-sdk-go-v2/service/codecommit v1.28.4 - github.com/aws/aws-sdk-go-v2/service/codeconnections v1.6.4 - github.com/aws/aws-sdk-go-v2/service/codedeploy v1.30.6 - github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.25.4 - github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.30.4 - github.com/aws/aws-sdk-go-v2/service/codepipeline v1.42.2 - github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.30.4 - github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.27.4 - github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.29.6 - github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.53.2 - github.com/aws/aws-sdk-go-v2/service/comprehend v1.36.6 - github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.43.2 - github.com/aws/aws-sdk-go-v2/service/configservice v1.53.0 - github.com/aws/aws-sdk-go-v2/service/connect v1.131.0 - github.com/aws/aws-sdk-go-v2/service/connectcases v1.26.0 - github.com/aws/aws-sdk-go-v2/service/controltower v1.22.3 - github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.29.4 - github.com/aws/aws-sdk-go-v2/service/costexplorer v1.51.2 - github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.16.2 - github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.47.0 - github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.53.0 - github.com/aws/aws-sdk-go-v2/service/databrew v1.34.4 - github.com/aws/aws-sdk-go-v2/service/dataexchange v1.35.2 - github.com/aws/aws-sdk-go-v2/service/datapipeline v1.26.4 - github.com/aws/aws-sdk-go-v2/service/datasync v1.49.3 - github.com/aws/aws-sdk-go-v2/service/datazone v1.31.0 - github.com/aws/aws-sdk-go-v2/service/dax v1.24.4 - github.com/aws/aws-sdk-go-v2/service/detective v1.33.2 - github.com/aws/aws-sdk-go-v2/service/devicefarm v1.31.2 - github.com/aws/aws-sdk-go-v2/service/devopsguru v1.35.4 - github.com/aws/aws-sdk-go-v2/service/directconnect v1.32.5 - github.com/aws/aws-sdk-go-v2/service/directoryservice v1.31.7 - github.com/aws/aws-sdk-go-v2/service/dlm v1.30.7 - github.com/aws/aws-sdk-go-v2/service/docdb v1.41.6 - github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.15.4 - github.com/aws/aws-sdk-go-v2/service/drs v1.31.4 - github.com/aws/aws-sdk-go-v2/service/dsql v1.5.2 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.231.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 - github.com/aws/aws-sdk-go-v2/service/ecs v1.58.1 - github.com/aws/aws-sdk-go-v2/service/efs v1.36.2 - github.com/aws/aws-sdk-go-v2/service/eks v1.66.1 - github.com/aws/aws-sdk-go-v2/service/elasticache v1.46.3 - github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.29.5 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.6 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.46.0 - github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.33.6 - github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.28.4 - github.com/aws/aws-sdk-go-v2/service/emr v1.49.3 - github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.35.4 - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.32.0 - github.com/aws/aws-sdk-go-v2/service/eventbridge v1.40.0 - github.com/aws/aws-sdk-go-v2/service/evidently v1.24.4 - github.com/aws/aws-sdk-go-v2/service/evs v1.0.2 - github.com/aws/aws-sdk-go-v2/service/finspace v1.29.4 - github.com/aws/aws-sdk-go-v2/service/firehose v1.37.7 - github.com/aws/aws-sdk-go-v2/service/fis v1.33.4 - github.com/aws/aws-sdk-go-v2/service/fms v1.40.5 - github.com/aws/aws-sdk-go-v2/service/fsx v1.55.0 - github.com/aws/aws-sdk-go-v2/service/gamelift v1.42.1 - github.com/aws/aws-sdk-go-v2/service/glacier v1.27.5 - github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.30.4 - github.com/aws/aws-sdk-go-v2/service/glue v1.117.0 - github.com/aws/aws-sdk-go-v2/service/grafana v1.27.4 - github.com/aws/aws-sdk-go-v2/service/greengrass v1.28.4 - github.com/aws/aws-sdk-go-v2/service/groundstation v1.33.2 - github.com/aws/aws-sdk-go-v2/service/guardduty v1.56.0 - github.com/aws/aws-sdk-go-v2/service/healthlake v1.30.5 - github.com/aws/aws-sdk-go-v2/service/iam v1.43.0 - github.com/aws/aws-sdk-go-v2/service/identitystore v1.28.6 - github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.42.3 - github.com/aws/aws-sdk-go-v2/service/inspector v1.26.4 - github.com/aws/aws-sdk-go-v2/service/inspector2 v1.38.1 - github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.21.5 - github.com/aws/aws-sdk-go-v2/service/invoicing v1.2.2 - github.com/aws/aws-sdk-go-v2/service/iot v1.64.4 - github.com/aws/aws-sdk-go-v2/service/ivs v1.43.4 - github.com/aws/aws-sdk-go-v2/service/ivschat v1.17.4 - github.com/aws/aws-sdk-go-v2/service/kafka v1.39.5 - github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.23.5 - github.com/aws/aws-sdk-go-v2/service/kendra v1.56.4 - github.com/aws/aws-sdk-go-v2/service/keyspaces v1.19.0 - github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.3 - github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.26.7 - github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.32.7 - github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.28.4 - github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 - github.com/aws/aws-sdk-go-v2/service/lakeformation v1.41.8 - github.com/aws/aws-sdk-go-v2/service/lambda v1.72.0 - github.com/aws/aws-sdk-go-v2/service/launchwizard v1.9.4 - github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.29.4 - github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.52.1 - github.com/aws/aws-sdk-go-v2/service/licensemanager v1.32.0 - github.com/aws/aws-sdk-go-v2/service/lightsail v1.43.4 - github.com/aws/aws-sdk-go-v2/service/location v1.44.4 - github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.32.4 - github.com/aws/aws-sdk-go-v2/service/m2 v1.21.2 - github.com/aws/aws-sdk-go-v2/service/macie2 v1.45.4 - github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.40.2 - github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.75.0 - github.com/aws/aws-sdk-go-v2/service/medialive v1.76.2 - github.com/aws/aws-sdk-go-v2/service/mediapackage v1.35.4 - github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.24.0 - github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.35.4 - github.com/aws/aws-sdk-go-v2/service/mediastore v1.25.4 - github.com/aws/aws-sdk-go-v2/service/memorydb v1.27.2 - github.com/aws/aws-sdk-go-v2/service/mgn v1.33.4 - github.com/aws/aws-sdk-go-v2/service/mq v1.29.2 - github.com/aws/aws-sdk-go-v2/service/mwaa v1.35.3 - github.com/aws/aws-sdk-go-v2/service/neptune v1.37.3 - github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.17.5 - github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.51.0 - github.com/aws/aws-sdk-go-v2/service/networkmanager v1.35.1 - github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.8.4 - github.com/aws/aws-sdk-go-v2/service/notifications v1.2.5 - github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.1.4 - github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 - github.com/aws/aws-sdk-go-v2/service/opensearch v1.46.6 - github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.19.6 - github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0 - github.com/aws/aws-sdk-go-v2/service/osis v1.15.5 - github.com/aws/aws-sdk-go-v2/service/outposts v1.51.0 - github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.19.0 - github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.11.3 - github.com/aws/aws-sdk-go-v2/service/pcs v1.6.2 - github.com/aws/aws-sdk-go-v2/service/pinpoint v1.35.4 - github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.20.3 - github.com/aws/aws-sdk-go-v2/service/pipes v1.19.5 - github.com/aws/aws-sdk-go-v2/service/polly v1.48.4 - github.com/aws/aws-sdk-go-v2/service/pricing v1.34.5 - github.com/aws/aws-sdk-go-v2/service/qbusiness v1.28.0 - github.com/aws/aws-sdk-go-v2/service/qldb v1.26.4 - github.com/aws/aws-sdk-go-v2/service/quicksight v1.87.0 - github.com/aws/aws-sdk-go-v2/service/ram v1.30.6 - github.com/aws/aws-sdk-go-v2/service/rbin v1.22.6 - github.com/aws/aws-sdk-go-v2/service/rds v1.99.1 - github.com/aws/aws-sdk-go-v2/service/redshift v1.54.6 - github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.33.3 - github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.27.4 - github.com/aws/aws-sdk-go-v2/service/rekognition v1.47.2 - github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.30.4 - github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.17.6 - github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.29.3 - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 - github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.5 - github.com/aws/aws-sdk-go-v2/service/route53 v1.53.0 - github.com/aws/aws-sdk-go-v2/service/route53domains v1.29.4 - github.com/aws/aws-sdk-go-v2/service/route53profiles v1.5.9 - github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.27.3 - github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.22.4 - github.com/aws/aws-sdk-go-v2/service/route53resolver v1.36.0 - github.com/aws/aws-sdk-go-v2/service/rum v1.24.4 - github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 - github.com/aws/aws-sdk-go-v2/service/s3control v1.60.0 - github.com/aws/aws-sdk-go-v2/service/s3outposts v1.29.4 - github.com/aws/aws-sdk-go-v2/service/s3tables v1.5.0 - github.com/aws/aws-sdk-go-v2/service/sagemaker v1.200.1 - github.com/aws/aws-sdk-go-v2/service/scheduler v1.13.10 - github.com/aws/aws-sdk-go-v2/service/schemas v1.29.5 - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.58.0 - github.com/aws/aws-sdk-go-v2/service/securitylake v1.20.5 - github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.25.4 - github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.34.2 - github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.31.4 - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.35.7 - github.com/aws/aws-sdk-go-v2/service/servicequotas v1.28.3 - github.com/aws/aws-sdk-go-v2/service/ses v1.30.5 - github.com/aws/aws-sdk-go-v2/service/sesv2 v1.46.0 - github.com/aws/aws-sdk-go-v2/service/sfn v1.35.7 - github.com/aws/aws-sdk-go-v2/service/shield v1.30.4 - github.com/aws/aws-sdk-go-v2/service/signer v1.27.4 - github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 - github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 - github.com/aws/aws-sdk-go-v2/service/ssm v1.60.0 - github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.27.4 - github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.35.4 - github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.4.4 - github.com/aws/aws-sdk-go-v2/service/ssmsap v1.20.4 - github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 - github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.31.2 - github.com/aws/aws-sdk-go-v2/service/storagegateway v1.38.0 - github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 - github.com/aws/aws-sdk-go-v2/service/swf v1.28.6 - github.com/aws/aws-sdk-go-v2/service/synthetics v1.35.3 - github.com/aws/aws-sdk-go-v2/service/taxsettings v1.12.2 - github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.10.5 - github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.31.2 - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.31.2 - github.com/aws/aws-sdk-go-v2/service/transcribe v1.47.0 - github.com/aws/aws-sdk-go-v2/service/transfer v1.61.0 - github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.24.2 - github.com/aws/aws-sdk-go-v2/service/vpclattice v1.14.4 - github.com/aws/aws-sdk-go-v2/service/waf v1.26.4 - github.com/aws/aws-sdk-go-v2/service/wafregional v1.26.4 - github.com/aws/aws-sdk-go-v2/service/wafv2 v1.63.1 - github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.35.4 - github.com/aws/aws-sdk-go-v2/service/workspaces v1.58.0 - github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.27.4 - github.com/aws/aws-sdk-go-v2/service/xray v1.31.7 - github.com/aws/smithy-go v1.22.4 - github.com/beevik/etree v1.5.1 - github.com/cedar-policy/cedar-go v0.1.0 - github.com/davecgh/go-spew v1.1.1 + github.com/YakDriver/smarterr v0.6.0 + github.com/aws/aws-sdk-go-v2 v1.39.2 + github.com/aws/aws-sdk-go-v2/config v1.31.12 + github.com/aws/aws-sdk-go-v2/credentials v1.18.16 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 + github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.44.6 + github.com/aws/aws-sdk-go-v2/service/account v1.28.6 + github.com/aws/aws-sdk-go-v2/service/acm v1.37.6 + github.com/aws/aws-sdk-go-v2/service/acmpca v1.44.5 + github.com/aws/aws-sdk-go-v2/service/amp v1.40.3 + github.com/aws/aws-sdk-go-v2/service/amplify v1.37.5 + github.com/aws/aws-sdk-go-v2/service/apigateway v1.35.6 + github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.32.6 + github.com/aws/aws-sdk-go-v2/service/appconfig v1.42.6 + github.com/aws/aws-sdk-go-v2/service/appfabric v1.16.6 + github.com/aws/aws-sdk-go-v2/service/appflow v1.50.6 + github.com/aws/aws-sdk-go-v2/service/appintegrations v1.36.6 + github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.40.5 + github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.34.5 + github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.16.0 + github.com/aws/aws-sdk-go-v2/service/appmesh v1.34.6 + github.com/aws/aws-sdk-go-v2/service/apprunner v1.38.7 + github.com/aws/aws-sdk-go-v2/service/appstream v1.50.0 + github.com/aws/aws-sdk-go-v2/service/appsync v1.51.6 + github.com/aws/aws-sdk-go-v2/service/arcregionswitch v1.2.8 + github.com/aws/aws-sdk-go-v2/service/athena v1.55.6 + github.com/aws/aws-sdk-go-v2/service/auditmanager v1.45.6 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.59.3 + github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.29.5 + github.com/aws/aws-sdk-go-v2/service/backup v1.49.0 + github.com/aws/aws-sdk-go-v2/service/batch v1.57.10 + github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.11.8 + github.com/aws/aws-sdk-go-v2/service/bedrock v1.48.0 + github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.50.6 + github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol v1.10.0 + github.com/aws/aws-sdk-go-v2/service/billing v1.8.0 + github.com/aws/aws-sdk-go-v2/service/budgets v1.39.2 + github.com/aws/aws-sdk-go-v2/service/chatbot v1.14.6 + github.com/aws/aws-sdk-go-v2/service/chime v1.40.5 + github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.26.6 + github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.27.0 + github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.36.0 + github.com/aws/aws-sdk-go-v2/service/cloud9 v1.33.5 + github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.28.6 + github.com/aws/aws-sdk-go-v2/service/cloudformation v1.67.0 + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.55.0 + github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.12.8 + github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.34.5 + github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.31.6 + github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.53.6 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.51.1 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.58.2 + github.com/aws/aws-sdk-go-v2/service/codeartifact v1.38.6 + github.com/aws/aws-sdk-go-v2/service/codebuild v1.67.5 + github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.20.8 + github.com/aws/aws-sdk-go-v2/service/codecommit v1.32.6 + github.com/aws/aws-sdk-go-v2/service/codeconnections v1.10.5 + github.com/aws/aws-sdk-go-v2/service/codedeploy v1.34.6 + github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.29.5 + github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.34.5 + github.com/aws/aws-sdk-go-v2/service/codepipeline v1.46.6 + github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.34.6 + github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.31.6 + github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.33.6 + github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.57.7 + github.com/aws/aws-sdk-go-v2/service/comprehend v1.40.6 + github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.47.5 + github.com/aws/aws-sdk-go-v2/service/configservice v1.58.2 + github.com/aws/aws-sdk-go-v2/service/connect v1.142.0 + github.com/aws/aws-sdk-go-v2/service/connectcases v1.32.0 + github.com/aws/aws-sdk-go-v2/service/controltower v1.26.6 + github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.33.6 + github.com/aws/aws-sdk-go-v2/service/costexplorer v1.57.0 + github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.20.6 + github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.53.0 + github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.57.7 + github.com/aws/aws-sdk-go-v2/service/databrew v1.38.5 + github.com/aws/aws-sdk-go-v2/service/dataexchange v1.39.6 + github.com/aws/aws-sdk-go-v2/service/datapipeline v1.30.5 + github.com/aws/aws-sdk-go-v2/service/datasync v1.55.0 + github.com/aws/aws-sdk-go-v2/service/datazone v1.43.0 + github.com/aws/aws-sdk-go-v2/service/dax v1.29.1 + github.com/aws/aws-sdk-go-v2/service/detective v1.37.7 + github.com/aws/aws-sdk-go-v2/service/devicefarm v1.35.6 + github.com/aws/aws-sdk-go-v2/service/devopsguru v1.39.6 + github.com/aws/aws-sdk-go-v2/service/directconnect v1.37.6 + github.com/aws/aws-sdk-go-v2/service/directoryservice v1.38.0 + github.com/aws/aws-sdk-go-v2/service/dlm v1.34.6 + github.com/aws/aws-sdk-go-v2/service/docdb v1.47.0 + github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.19.6 + github.com/aws/aws-sdk-go-v2/service/drs v1.35.6 + github.com/aws/aws-sdk-go-v2/service/dsql v1.9.8 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.51.0 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.257.0 + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5 + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.6 + github.com/aws/aws-sdk-go-v2/service/ecs v1.65.1 + github.com/aws/aws-sdk-go-v2/service/efs v1.40.8 + github.com/aws/aws-sdk-go-v2/service/eks v1.74.2 + github.com/aws/aws-sdk-go-v2/service/elasticache v1.50.5 + github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.33.7 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.6 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.51.0 + github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.37.6 + github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.32.6 + github.com/aws/aws-sdk-go-v2/service/emr v1.54.5 + github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.40.2 + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.36.6 + github.com/aws/aws-sdk-go-v2/service/eventbridge v1.45.5 + github.com/aws/aws-sdk-go-v2/service/evidently v1.28.5 + github.com/aws/aws-sdk-go-v2/service/evs v1.5.2 + github.com/aws/aws-sdk-go-v2/service/finspace v1.33.6 + github.com/aws/aws-sdk-go-v2/service/firehose v1.41.6 + github.com/aws/aws-sdk-go-v2/service/fis v1.37.5 + github.com/aws/aws-sdk-go-v2/service/fms v1.44.6 + github.com/aws/aws-sdk-go-v2/service/fsx v1.62.0 + github.com/aws/aws-sdk-go-v2/service/gamelift v1.46.6 + github.com/aws/aws-sdk-go-v2/service/glacier v1.31.6 + github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.34.6 + github.com/aws/aws-sdk-go-v2/service/glue v1.131.0 + github.com/aws/aws-sdk-go-v2/service/grafana v1.31.6 + github.com/aws/aws-sdk-go-v2/service/greengrass v1.32.6 + github.com/aws/aws-sdk-go-v2/service/groundstation v1.37.6 + github.com/aws/aws-sdk-go-v2/service/guardduty v1.65.0 + github.com/aws/aws-sdk-go-v2/service/healthlake v1.35.5 + github.com/aws/aws-sdk-go-v2/service/iam v1.47.7 + github.com/aws/aws-sdk-go-v2/service/identitystore v1.32.7 + github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.48.0 + github.com/aws/aws-sdk-go-v2/service/inspector v1.30.5 + github.com/aws/aws-sdk-go-v2/service/inspector2 v1.44.6 + github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.25.5 + github.com/aws/aws-sdk-go-v2/service/invoicing v1.6.8 + github.com/aws/aws-sdk-go-v2/service/iot v1.69.5 + github.com/aws/aws-sdk-go-v2/service/ivs v1.47.6 + github.com/aws/aws-sdk-go-v2/service/ivschat v1.21.5 + github.com/aws/aws-sdk-go-v2/service/kafka v1.43.6 + github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.27.6 + github.com/aws/aws-sdk-go-v2/service/kendra v1.60.6 + github.com/aws/aws-sdk-go-v2/service/keyspaces v1.23.6 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.40.5 + github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.30.6 + github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.36.7 + github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.32.5 + github.com/aws/aws-sdk-go-v2/service/kms v1.45.6 + github.com/aws/aws-sdk-go-v2/service/lakeformation v1.45.5 + github.com/aws/aws-sdk-go-v2/service/lambda v1.78.0 + github.com/aws/aws-sdk-go-v2/service/launchwizard v1.13.6 + github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.33.5 + github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.56.6 + github.com/aws/aws-sdk-go-v2/service/licensemanager v1.36.6 + github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.0 + github.com/aws/aws-sdk-go-v2/service/location v1.49.6 + github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.36.6 + github.com/aws/aws-sdk-go-v2/service/m2 v1.25.6 + github.com/aws/aws-sdk-go-v2/service/macie2 v1.49.6 + github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.45.0 + github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.82.6 + github.com/aws/aws-sdk-go-v2/service/medialive v1.84.0 + github.com/aws/aws-sdk-go-v2/service/mediapackage v1.39.6 + github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.31.3 + github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.39.6 + github.com/aws/aws-sdk-go-v2/service/mediastore v1.29.6 + github.com/aws/aws-sdk-go-v2/service/memorydb v1.32.0 + github.com/aws/aws-sdk-go-v2/service/mgn v1.37.5 + github.com/aws/aws-sdk-go-v2/service/mq v1.34.4 + github.com/aws/aws-sdk-go-v2/service/mwaa v1.39.6 + github.com/aws/aws-sdk-go-v2/service/neptune v1.42.5 + github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.21.5 + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.57.1 + github.com/aws/aws-sdk-go-v2/service/networkmanager v1.39.7 + github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.12.6 + github.com/aws/aws-sdk-go-v2/service/notifications v1.7.4 + github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.5.8 + github.com/aws/aws-sdk-go-v2/service/oam v1.22.5 + github.com/aws/aws-sdk-go-v2/service/odb v1.5.0 + github.com/aws/aws-sdk-go-v2/service/opensearch v1.52.5 + github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.26.4 + github.com/aws/aws-sdk-go-v2/service/organizations v1.45.3 + github.com/aws/aws-sdk-go-v2/service/osis v1.20.2 + github.com/aws/aws-sdk-go-v2/service/outposts v1.57.0 + github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.25.2 + github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.15.6 + github.com/aws/aws-sdk-go-v2/service/pcs v1.14.0 + github.com/aws/aws-sdk-go-v2/service/pinpoint v1.39.6 + github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.25.5 + github.com/aws/aws-sdk-go-v2/service/pipes v1.23.5 + github.com/aws/aws-sdk-go-v2/service/polly v1.53.7 + github.com/aws/aws-sdk-go-v2/service/pricing v1.39.6 + github.com/aws/aws-sdk-go-v2/service/qbusiness v1.33.6 + github.com/aws/aws-sdk-go-v2/service/qldb v1.30.6 + github.com/aws/aws-sdk-go-v2/service/quicksight v1.95.0 + github.com/aws/aws-sdk-go-v2/service/ram v1.34.6 + github.com/aws/aws-sdk-go-v2/service/rbin v1.26.6 + github.com/aws/aws-sdk-go-v2/service/rds v1.108.2 + github.com/aws/aws-sdk-go-v2/service/redshift v1.59.0 + github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.37.6 + github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.31.8 + github.com/aws/aws-sdk-go-v2/service/rekognition v1.51.5 + github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.34.6 + github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.22.0 + github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.33.7 + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.30.6 + github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.21.6 + github.com/aws/aws-sdk-go-v2/service/route53 v1.58.4 + github.com/aws/aws-sdk-go-v2/service/route53domains v1.34.4 + github.com/aws/aws-sdk-go-v2/service/route53profiles v1.9.6 + github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.31.7 + github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.26.6 + github.com/aws/aws-sdk-go-v2/service/route53resolver v1.40.6 + github.com/aws/aws-sdk-go-v2/service/rum v1.28.7 + github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 + github.com/aws/aws-sdk-go-v2/service/s3control v1.66.2 + github.com/aws/aws-sdk-go-v2/service/s3outposts v1.33.6 + github.com/aws/aws-sdk-go-v2/service/s3tables v1.10.5 + github.com/aws/aws-sdk-go-v2/service/s3vectors v1.4.8 + github.com/aws/aws-sdk-go-v2/service/sagemaker v1.215.3 + github.com/aws/aws-sdk-go-v2/service/scheduler v1.17.5 + github.com/aws/aws-sdk-go-v2/service/schemas v1.33.5 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.39.6 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.64.4 + github.com/aws/aws-sdk-go-v2/service/securitylake v1.24.6 + github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.29.6 + github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.38.6 + github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.35.6 + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.39.9 + github.com/aws/aws-sdk-go-v2/service/servicequotas v1.33.0 + github.com/aws/aws-sdk-go-v2/service/ses v1.34.5 + github.com/aws/aws-sdk-go-v2/service/sesv2 v1.53.5 + github.com/aws/aws-sdk-go-v2/service/sfn v1.39.6 + github.com/aws/aws-sdk-go-v2/service/shield v1.34.6 + github.com/aws/aws-sdk-go-v2/service/signer v1.31.6 + github.com/aws/aws-sdk-go-v2/service/sns v1.38.5 + github.com/aws/aws-sdk-go-v2/service/sqs v1.42.8 + github.com/aws/aws-sdk-go-v2/service/ssm v1.65.1 + github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.30.8 + github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.39.5 + github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.8.6 + github.com/aws/aws-sdk-go-v2/service/ssmsap v1.25.5 + github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 + github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.36.2 + github.com/aws/aws-sdk-go-v2/service/storagegateway v1.42.7 + github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 + github.com/aws/aws-sdk-go-v2/service/swf v1.32.5 + github.com/aws/aws-sdk-go-v2/service/synthetics v1.41.0 + github.com/aws/aws-sdk-go-v2/service/taxsettings v1.16.6 + github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.17.0 + github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.35.5 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.35.5 + github.com/aws/aws-sdk-go-v2/service/transcribe v1.53.0 + github.com/aws/aws-sdk-go-v2/service/transfer v1.67.0 + github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.29.5 + github.com/aws/aws-sdk-go-v2/service/vpclattice v1.19.0 + github.com/aws/aws-sdk-go-v2/service/waf v1.30.5 + github.com/aws/aws-sdk-go-v2/service/wafregional v1.30.6 + github.com/aws/aws-sdk-go-v2/service/wafv2 v1.68.0 + github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.39.6 + github.com/aws/aws-sdk-go-v2/service/workmail v1.36.4 + github.com/aws/aws-sdk-go-v2/service/workspaces v1.63.6 + github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.32.6 + github.com/aws/aws-sdk-go-v2/service/xray v1.36.4 + github.com/aws/smithy-go v1.23.0 + github.com/beevik/etree v1.6.0 + github.com/cedar-policy/cedar-go v1.2.6 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dlclark/regexp2 v1.11.5 github.com/gertd/go-pluralize v0.2.1 + github.com/goccy/go-yaml v1.18.0 github.com/google/go-cmp v0.7.0 github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 github.com/hashicorp/awspolicyequivalence v1.7.0 github.com/hashicorp/cli v1.1.7 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -282,18 +289,19 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.7.0 - github.com/hashicorp/hcl/v2 v2.23.0 - github.com/hashicorp/terraform-json v0.25.0 - github.com/hashicorp/terraform-plugin-framework v1.15.0 + github.com/hashicorp/hcl/v2 v2.24.0 + github.com/hashicorp/terraform-json v0.27.2 + github.com/hashicorp/terraform-plugin-framework v1.16.1 github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 - github.com/hashicorp/terraform-plugin-framework-timeouts v0.5.0 + github.com/hashicorp/terraform-plugin-framework-timeouts v0.6.0 github.com/hashicorp/terraform-plugin-framework-timetypes v0.5.0 - github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 - github.com/hashicorp/terraform-plugin-go v0.28.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.19.0 + github.com/hashicorp/terraform-plugin-go v0.29.0 github.com/hashicorp/terraform-plugin-log v0.9.0 - github.com/hashicorp/terraform-plugin-mux v0.20.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 - github.com/hashicorp/terraform-plugin-testing v1.13.2 + github.com/hashicorp/terraform-plugin-mux v0.21.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 + github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1.0.20251013071646-7ed2ee242705 + github.com/jaswdr/faker/v2 v2.8.1 github.com/jmespath/go-jmespath v0.4.0 github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38 github.com/mitchellh/copystructure v1.2.0 @@ -302,58 +310,58 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/pquerna/otp v1.5.0 github.com/shopspring/decimal v1.4.0 - golang.org/x/crypto v0.39.0 - golang.org/x/text v0.27.0 - golang.org/x/tools v0.34.0 - gopkg.in/dnaeon/go-vcr.v4 v4.0.4 - gopkg.in/yaml.v3 v3.0.1 - syreclabs.com/go/faker v1.2.3 + go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0 + go.opentelemetry.io/otel v1.38.0 + golang.org/x/crypto v0.43.0 + golang.org/x/text v0.30.0 + golang.org/x/tools v0.38.0 + gopkg.in/dnaeon/go-vcr.v4 v4.0.5 ) require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/agext/levenshtein v1.2.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/evanphx/json-patch v0.5.2 // indirect github.com/fatih/color v1.18.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-plugin v1.6.3 // indirect + github.com/hashicorp/go-plugin v1.7.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/hc-install v0.9.2 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.23.0 // indirect - github.com/hashicorp/terraform-registry-address v0.2.5 // indirect + github.com/hashicorp/terraform-exec v0.24.0 // indirect + github.com/hashicorp/terraform-registry-address v0.4.0 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/posener/complete v1.2.3 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect @@ -362,20 +370,19 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/zclconf/go-cty v1.16.3 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.61.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.33.0 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/exp v0.0.0-20220921023135-46d9e7742f1e // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.72.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.9 // indirect ) replace github.com/hashicorp/terraform-plugin-log => github.com/gdavison/terraform-plugin-log v0.0.0-20230928191232-6c653d8ef8fb diff --git a/go.sum b/go.sum index 08de4229104f..125ecc4c3156 100644 --- a/go.sum +++ b/go.sum @@ -14,564 +14,577 @@ github.com/YakDriver/go-version v0.1.0 h1:/x+Xg2+l89Mjtxl0VRf2+ue8cnHkw6jfYv49j6 github.com/YakDriver/go-version v0.1.0/go.mod h1:LXwFAp1E3KBhS7FHO/FE8r3XCmvKizs/VXXXFWfoSYY= github.com/YakDriver/regexache v0.24.0 h1:zUKaixelkswzdqsqPc2sveiV//Mi/msJn0teG8zBDiA= github.com/YakDriver/regexache v0.24.0/go.mod h1:awcd8uBj614F3ScW06JqlfSGqq2/7vdJHy+RiKzVC+g= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/YakDriver/smarterr v0.6.0 h1:BFJ09GTAVcGfyzMUk7/yiS0rBEPXTzUxpP67bbyVLoo= +github.com/YakDriver/smarterr v0.6.0/go.mod h1:Sg1LUzBronueGfhn2yalB2iVMXl24TIGam/mS5cZh5c= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0= -github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY= -github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= -github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 h1:08otkOELsIi0toRRGMytlJhOctcN8xfKfKFR2NXz3kE= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83/go.mod h1:dGsGb2wI8JDWeMAhjVPP+z+dqvYjL6k6o+EujcRNk5c= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8= +github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= +github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.12 h1:pYM1Qgy0dKZLHX2cXslNacbcEFMkDMl+Bcj5ROuS6p8= +github.com/aws/aws-sdk-go-v2/config v1.31.12/go.mod h1:/MM0dyD7KSDPR+39p9ZNVKaHDLb9qnfDurvVS2KAhN8= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16 h1:4JHirI4zp958zC026Sm+V4pSDwW4pwLefKrc0bF2lwI= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16/go.mod h1:qQMtGx9OSw7ty1yLclzLxXCRbrkjWAM7JnObZjmCB7I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 h1:ofHawDLJTI6ytDIji+g4dXQ6u2idzTb04tDlN9AS614= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12/go.mod h1:f5pL4iLDfbcxj1SZcdRdIokBB5eHbuYPS/Fs9DwUPRQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.40.0 h1:xYryxpwtCZxukhjSd0O26zT3CbGDlzoYFBWqY0DoK3A= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.40.0/go.mod h1:mwjv8LM1RN5WJNOPTKspM0AnCxFoTjMopGI19k0Hb4k= -github.com/aws/aws-sdk-go-v2/service/account v1.24.2 h1:1ItkqDExKIDsS8NoIBq7OxQOJnQNOVjC25CYa9RzOos= -github.com/aws/aws-sdk-go-v2/service/account v1.24.2/go.mod h1:NShtay87juyMTb3c6bHN6Bai5dUFmTX7NzURY4/Jyb0= -github.com/aws/aws-sdk-go-v2/service/acm v1.33.0 h1:Z3MHBWR1KiviwaAiG7MTPB6T5gLYRPhUECuKLgltCwA= -github.com/aws/aws-sdk-go-v2/service/acm v1.33.0/go.mod h1:t3jPqKBnySV3qsU40cj1TWleOYx5vyz1xBeZiplAVcs= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.5 h1:wO4AWPJlnLRbLgQnrVKG/HTy9qDCxFVMjPFkqr2IKRA= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.5/go.mod h1:Jhu06Hov5+oM1+zkhDGCZBp8yoVCSiFHSnkSC0KIzDs= -github.com/aws/aws-sdk-go-v2/service/amp v1.34.3 h1:xH65YCH77WzkxqdzDl6PfX2TaYK/8YiZwy6UqNkFkv4= -github.com/aws/aws-sdk-go-v2/service/amp v1.34.3/go.mod h1:SulhOciRP/ZvQQdU9cNuE9OAfnD7+itzfKPiyBx0I1I= -github.com/aws/aws-sdk-go-v2/service/amplify v1.33.3 h1:6rZkMM5S/fSnIP02Q/paqszlyp/kKNhl+hHV9WuuH7I= -github.com/aws/aws-sdk-go-v2/service/amplify v1.33.3/go.mod h1:Ir47WZbig8znnUdUx5YPxwjt92xXZSQKu2+Y+NjGzBM= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.31.4 h1:XFKyI5HLJwV0HBKuUTIE19yaKHOvgZK/sDSj3HmE8dM= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.31.4/go.mod h1:b7jjY+ZgE+CzV8iX9d2ose6aPKkpA7a7RIi9mHEFlqM= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.28.4 h1:H4WoC79VAg7e5PrK6ta1ua7aNg5bj6JKrWRL45hAawA= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.28.4/go.mod h1:NomAJQ/SaEj3KlzfxI4V8y3CJNv1Mr2ynTv7lbYePp0= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.38.3 h1:tjAPEEHH7V7YX7fxdklhs9Vg9K8aXBosKutnRPrhYKY= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.38.3/go.mod h1:NiWNkf2XdzzN6fWWwB6RtHqmT9SoFCXQJU9zg7tS5TE= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.12.4 h1:NOpFPNcu8Ao3Sqk+zJ6R92Zv7MUQ4xed5aqrauFlOBs= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.12.4/go.mod h1:wRubXIGmzEbl2uPpPX/BZ6Tm/BxCtkXhUirkj0Q1F+A= -github.com/aws/aws-sdk-go-v2/service/appflow v1.46.4 h1:7B2B/QGEXHG4ayH9CgmVd7z+pHQtNGHfVx0T0TyHBCs= -github.com/aws/aws-sdk-go-v2/service/appflow v1.46.4/go.mod h1:EmHkVIWbPmvl3mvSOo/TF0DjSGFZ8+Db7aKiqhM8XIc= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.31.4 h1:AWrTD+eNmKOU1J7KV8TS3w+B9ZYdl7eVBOegEeVGlyY= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.31.4/go.mod h1:lrw4VUA85885klz/SHqwyu0A2V70w9kOH3LZdEuskj8= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.36.4 h1:JetyQYju/+q33qzbNAiuHVIX4zB/AX9nM65qD+eLKM8= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.36.4/go.mod h1:T38DTrOzItEr+LJap6BHKrWN8wBrLP44+n/JY0wC2xI= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.30.6 h1:wOKS3lH9adXnOPg4VJ0AQ56tmmcTO40WTgkHk1F9kJE= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.30.6/go.mod h1:FEqLE3bBOwq2nE4NtVKUljFYcLTc6tVjYAOvDtWXKb4= -github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.11.3 h1:qV6rPSVsIReOn1DTrvC0wi7rlG/IbQmEJQ//0DijU5A= -github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.11.3/go.mod h1:EGKmN5VSpsjvJad12akh86dbFu/YoRa0qFiWzcPnXIk= -github.com/aws/aws-sdk-go-v2/service/appmesh v1.30.4 h1:1TT/4BO285m66cH5vOExvqvvaW/EpP4VngGw7xEvaGc= -github.com/aws/aws-sdk-go-v2/service/appmesh v1.30.4/go.mod h1:jFygkUlz2jEVPPQAq4OSqTTKjt20qx9N/5eR/gnyD7k= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.34.2 h1:ZEkJkUCPdXrL3JOTpa3DuB879AtP5tNF/8i8415A8fY= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.34.2/go.mod h1:p4kYzg6Gb1uqNc7m9/qB4aDycggCAv9mfFXX15S805U= -github.com/aws/aws-sdk-go-v2/service/appstream v1.45.5 h1:BuHTCRVfEACQ9YDVYHLiqEW7LWypFdcPAH07icAmgo0= -github.com/aws/aws-sdk-go-v2/service/appstream v1.45.5/go.mod h1:Kdkrr6TbMceLxOiRDJ6L1hdbv1/GuzGENPxylMzffcw= -github.com/aws/aws-sdk-go-v2/service/appsync v1.47.3 h1:Jc3/7ZWo4pjNhKp0B0WD4Av5QOMaJj6Xqzg0y0l6deA= -github.com/aws/aws-sdk-go-v2/service/appsync v1.47.3/go.mod h1:id62qP6jzhg3NWQ5zfBf12omt9Rm3yEcwI1rtj7+wbE= -github.com/aws/aws-sdk-go-v2/service/athena v1.51.3 h1:4X2/0GQiQBlAE9sGGKnouUI3yjtf9A/uTo7VPjD9/6c= -github.com/aws/aws-sdk-go-v2/service/athena v1.51.3/go.mod h1:q8KLas6BtgGYm695nQxAjFJvqRoj8Qcpig1291KQWok= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.39.2 h1:Pye3If+Jpe58EwCzH+CJZnqGK39w7nSAdBl+BNVv6qs= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.39.2/go.mod h1:zfdQum9cKCPEWF8g8CXfJgFZXJ/+QbvhXvesWOm9WnE= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.54.0 h1:0BmpSm5x2rpB9D2K2OAoOc1cZTUJpw1OiQj86ZT8RTg= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.54.0/go.mod h1:6U/Xm5bBkZGCTxH3NE9+hPKEpCFCothGn/gwytsr1Mk= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.25.4 h1:V//LfMnazbS3Zh1O7rWL3v92yQW0kBpIXlkKGEV1Fmw= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.25.4/go.mod h1:jUiTKxG/so4swtdvfxlKgdEESCAZ1RDWIfyn3DrUVMk= -github.com/aws/aws-sdk-go-v2/service/backup v1.43.1 h1:IWL4JnLGXSFE094fHbveF/Lm+zYgBdoD0zBelyKRKII= -github.com/aws/aws-sdk-go-v2/service/backup v1.43.1/go.mod h1:qDBAiArrJPrmcHvpgCQ4lhM5zV/sf0Iou7nP7Zm2mc8= -github.com/aws/aws-sdk-go-v2/service/batch v1.53.0 h1:uf+Mr9I0l5Eo3aTaunHTJsfTnewLvzqGRPG4DrYabv8= -github.com/aws/aws-sdk-go-v2/service/batch v1.53.0/go.mod h1:3kzOFBSr7kWjiPQFZPqanUTxFwdMiA5UFe/O4NN7fsI= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.8.4 h1:BjeegkJ3Ha6VlzhQdqxViNIUkJNi6seZwHp5pqpYHaI= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.8.4/go.mod h1:0Rs3YH1xh3qTgiy0VP+UR6GibZUVATPAtvr3n58b3d4= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.38.0 h1:wBlJMfquOKOMdSzZezhtzoTuVXc8kkkteymE/bBEXcg= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.38.0/go.mod h1:1GlpVDmL9pBaVwNfgPXR3zuJhhXtNOZoiBa16pNbINY= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.44.2 h1:gedxMyluRPy1ENN1dlOM7rK8Jek1wUvpA9z1Cz2s9N4= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.44.2/go.mod h1:8zZaELHNLx6LNNfMrzCtVVsOFFKP1905FKmsSFuhArM= -github.com/aws/aws-sdk-go-v2/service/billing v1.2.4 h1:QqtOYdXXtghWbPemcCf7x8y/CWlN950/1eRd13EpKuE= -github.com/aws/aws-sdk-go-v2/service/billing v1.2.4/go.mod h1:mP5IsfmMZhkwpGdQm2DKsU5elbGTizrO3vK98LG0vWc= -github.com/aws/aws-sdk-go-v2/service/budgets v1.31.2 h1:ZdjYaUVxxQeWZ5BoU82dF7BpUhNfmha11ya8K9AiPoc= -github.com/aws/aws-sdk-go-v2/service/budgets v1.31.2/go.mod h1:LnxG/U78Q4uws9jS+a9sTwV8OVTWzfsXuBIaAfwksyM= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.10.4 h1:bq7jZuszo3+COUXlDbeiOnWXfRZGzJcNAZzpjEguBow= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.10.4/go.mod h1:IDmqb/P9NQISRL+1vrUskvUaTOo7SaEyULTLp5QZbhc= -github.com/aws/aws-sdk-go-v2/service/chime v1.36.4 h1:RvqaquFRY71C0col7ydmbqmJsqBFpybWRsklPwOcIA0= -github.com/aws/aws-sdk-go-v2/service/chime v1.36.4/go.mod h1:BqpFNKJNnpT9huL8gCdIQpzeZi2+FK/Y5DoyQkDl+C0= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.22.4 h1:AXoWCQp+YYKsAX1FcUm5WOXhC9KNodEhjB2xuRc/i2E= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.22.4/go.mod h1:VK80ksSTmSe1wU33aY0E47R2A2I6v7Zyi4sgn94d9F4= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.22.2 h1:FvJ0+3o1j/k8OejpUK/19BhyuoKlWS67n/hqzyhINfU= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.22.2/go.mod h1:SXGQ5hmMJzWRJt1Mu3s6x15eldRft+xErnAL6CDBC0U= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.25.2 h1:TN80R+dUKMq7xgqgbclW/uBPdgo4zoGJ4uVdzNBgwQo= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.25.2/go.mod h1:81twhtDcStPNYEh9XCp89TyaTjq+4ciPUgSWEoVxpgM= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.29.4 h1:bIyRLJ+QVAE1GPI+9XBGpP1rRKKbHL4oUMOVw/EdUBs= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.29.4/go.mod h1:gdFyMvML9BinbLiHs795bR9rKRHTKxNsOCLfbDFIzB4= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.24.6 h1:ZTDJc/sruFHYXaTr4aNwuHEykFtjqT9hcFFDQceSlAs= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.24.6/go.mod h1:QarpKg2UqElY6gtj2Z3CFbJqP8Wmq//w0LwudfpY69w= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.61.0 h1:1nVq2bvAANTPAfipKBOtbP1ebqTpJrOsxNqwb6ybCG8= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.61.0/go.mod h1:xU79X14UC0F8sEJCRTWwINzlQ4jacpEFpRESLHRHfoY= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.46.3 h1:ULVZL6Ro+vqmXFVFgZ5Q92pqWnhJfwOnWlNtibQPnIs= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.46.3/go.mod h1:vudWcTOLhQf4lzRH0qHUszJh8Gpo+Lp6dqH/HgVR9Xg= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.9.4 h1:b/akD5kwvx/NPXgYMPnaaZ7HWlgrDLg9NatQ2Tc8wVk= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.9.4/go.mod h1:nAAHqFZISt7zseVgaPzYwMY4bbet/rTn/TFMYa3s6sU= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.30.5 h1:P5+wUNAOc2bjxIiQ+ZMVz/Mv5jirnh4nPI9VCLgvJUQ= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.30.5/go.mod h1:4MW0k8bmDdC8VHJf5Vxhp5zLXnvkDRERvfiEvXZDnoM= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.27.4 h1:kJ2Sa4VsJoaPg1vQCFL91N/ZjMzzbEyo7CG6bgzCkbI= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.27.4/go.mod h1:kbxooYiqH9It+k1z+iLiTKlompLUQmEgZY5sv9txU8Q= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.49.3 h1:wSQwBOXa1EV81WiVWLZ8fCrJ7wlwcfqSexEiv9OjPrA= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.49.3/go.mod h1:5N4LfimBXTCtqKr0tZKfcte5UswFb7SJZV+LiQUZsGk= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 h1:Nn3qce+OHZuMj/edx4its32uxedAmquCDxtZkrdeiD4= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3/go.mod h1:aqsLGsPs+rJfwDBwWHLcIV8F7AFcikFTPLwUD4RwORQ= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 h1:e5cbPZYTIY2nUEFieZUfVdINOiCTvChOMPfdLnmiLzs= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0/go.mod h1:UseIHRfrm7PqeZo6fcTb6FUCXzCnh1KJbQbmOfxArGM= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.34.4 h1:8E5noXcMI3cNsX1hcx/ORW6mtla6usxz4BcW1q+zheE= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.34.4/go.mod h1:8bXExDA212G0tJkUYMcxcFhsqcM+jSBtsmOugZe2j7o= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.61.2 h1:efAyxbfGzzswonfsjj3porKv6Q1H98SOHdlZ6hF2NI4= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.61.2/go.mod h1:THLcsyok0+f2SaN7/QZ7tlzNseoF1YB7PJuGc3yd3EQ= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.17.21 h1:0jz43AWY1USrCZwMzxHOIfmoXy7M2ZJRaqCr56x/Rvc= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.17.21/go.mod h1:ic53zDsOvg3DF95EpLCTeR4hf6Oxt6Dz6P9WQ3cvUvw= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.28.4 h1:DyOb/MZoTswNwFhg55VR1rvLkn1S55T7q+P8EuR+A7M= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.28.4/go.mod h1:PB41jkDc903DUreLzzJBB/rabkQqriNqPtv1L9vAIOI= -github.com/aws/aws-sdk-go-v2/service/codeconnections v1.6.4 h1:j1FZyc3Oj7W3dWgmO4cbtOOkCaixavGotkPnoZqrixQ= -github.com/aws/aws-sdk-go-v2/service/codeconnections v1.6.4/go.mod h1:b3xHt4pnrpRyj1i75f8gU3vUy4UKLCbatXjcNZdbB38= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.30.6 h1:A74AkCwB8DsBeJ9DVLtLif2nGuTiHGdZMOeo2yKsyB0= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.30.6/go.mod h1:wjqakZxOg31qrJsrwpkvUoELRhfSNToa8SA1u7PdSxU= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.25.4 h1:gWXiqaKkd6fRF1qOs5DL0ME1cRep4KNAAGGc5J5Lw3Y= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.25.4/go.mod h1:wp/JLha/UGGGklH6qYjzIrQWGM+ewdlrXlwCmi0JbOM= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.30.4 h1:BJqh9+QCaB74sJmi4KpCqrrqV/exeG+gA6hvLRchH6E= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.30.4/go.mod h1:iINMrnaDsPf5UwOXacV+xFBgXphzT2yvdSMBzbOlk4g= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.42.2 h1:IYZ2Prn/aHOGB9GRj7hS7GVHMtRTb/4wiDI5mf326GE= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.42.2/go.mod h1:RgaoO5gg3Pp1se22UalAX6oTusJgdlKwMOfMo/lObgw= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.30.4 h1:wIFcc7VQQpPS15fXRM8WvTUmrYNP6vIjFSxTszDWPyo= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.30.4/go.mod h1:vMiaujmCGuRMMx7k9LVHfr9M+4++LwDpVciiF362wDo= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.27.4 h1:E5SxPPUfnZYDoT765IjNVzhDHmLVvaQdhH/7kRm+ZJY= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.27.4/go.mod h1:7gyIYjHXPAOX3NERsiwOs4uPEtppi3C+PKgwSvrt9AY= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.29.6 h1:qAzPMhagtK5hAs9WWnnrWXkpYfVXBbrcrEO/al4wP7I= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.29.6/go.mod h1:Isbgk/cOSGoFwswAzibnEWm5lXXLOCWOTAxyKmMAOHk= -github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.53.2 h1:3f3FZdZgMBMouhPizBI3i6EnpdyL3ttjObmvr+1kfzg= -github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.53.2/go.mod h1:rwpoEr5M4DCNNxmXX75Ql5+KOW01DEvOE0KPo3iiNEs= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.36.6 h1:rSAMOE0HndTsLBPnuh4YLm205D8+3W/7lwc9q6llhvE= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.36.6/go.mod h1:0bQ8f9sR/AaJBBBnHO0lc7mREP8uqWGSXY6uY7GR37s= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.43.2 h1:eIHLQrO/u2P76oWA2m++l2sOTRNRrKRFKK189YO5XYY= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.43.2/go.mod h1:harX8fH+HCyhgvgzLgVjXomS2ZuQ9W7Mgcr11DXM41w= -github.com/aws/aws-sdk-go-v2/service/configservice v1.53.0 h1:lu97by/q8YJxGjEujMunX5Gel2tf2MfDkb7Rz26Lw1g= -github.com/aws/aws-sdk-go-v2/service/configservice v1.53.0/go.mod h1:BYXP4Mzkc+ki7WFebTIMvzP+2CPFqULpy5KlCPlVOO0= -github.com/aws/aws-sdk-go-v2/service/connect v1.131.0 h1:jNR9bUgK/ZLA5ymyoaGU/7XREyIz99Lx7PS6jMVFW9w= -github.com/aws/aws-sdk-go-v2/service/connect v1.131.0/go.mod h1:xU6tkVMTXQlkRdff/a3rB6RS/goEJjq7QJbQj2/tZO4= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.26.0 h1:zd0G03x3Gsztv7g3P5OtuTVq8VrTCSidAzEsXy61/Ac= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.26.0/go.mod h1:NjwcRfAn4H/Dbt+F6AHYpvpGSfj8ViI30SpL4L3danA= -github.com/aws/aws-sdk-go-v2/service/controltower v1.22.3 h1:C8FcMAc7DIsTGqvoNfhKtf8kCGCRGf+UFr/U/J8WcjQ= -github.com/aws/aws-sdk-go-v2/service/controltower v1.22.3/go.mod h1:maGRVPBBQenlVQo3oooIQ9rwJcrIjyqCKKZIGzxjhTk= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.29.4 h1:G96u5BhFFCwr1o0jmn/9pG4uqWFs1jbMX78BzEwSh2c= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.29.4/go.mod h1:mWXTvKnKJ30G5ZxiEBAaN2jFgzX69Jwwr0lDmx4/6js= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.51.2 h1:7zSsOpcOaTximKcYWlpbhgKSn22fzx3ZkkankTEBHpQ= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.51.2/go.mod h1:xbfTJfT0GwWB6ONGltxdQixqzk/5fD/J/KEeQjUUNI8= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.16.2 h1:yJ9bmAq8pTTETtUjQpONk3hzFLFy4qnsGu8IzPJYW4s= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.16.2/go.mod h1:2e/HlfOil/pDjSsn/P0VcpYxKX3rycKiR8FSVzsOfao= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.47.0 h1:bJKZVmfIHuaI7h0w7Ra5FKtQaKLaarBlJZVfcaMYNh8= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.47.0/go.mod h1:rm68C2eQGFimGGUdirf25ehBACurSxVmirlX2NsgMpQ= -github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.53.0 h1:KPukzgWZnmdc4fZYFkA46orMsQJoeNeEh5wbSnrYCdE= -github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.53.0/go.mod h1:YDWzt7f6AHa4WfyJDv3GcIiyY3969MfsuSX9ANUbZ+k= -github.com/aws/aws-sdk-go-v2/service/databrew v1.34.4 h1:4M8XfsTE92AisaKwV75xtfCVT3Xza3ImIqlZsvzxZ0w= -github.com/aws/aws-sdk-go-v2/service/databrew v1.34.4/go.mod h1:b2Cv3mZxp7bNPEzOQFsSCcPJivdNiHn8HmCA7rau1r8= -github.com/aws/aws-sdk-go-v2/service/dataexchange v1.35.2 h1:/0cE4Ng/7zrNuM7yL3ADTwqDjN8CcPClsDxW6s4Fxy4= -github.com/aws/aws-sdk-go-v2/service/dataexchange v1.35.2/go.mod h1:3KVz8qwswG8F7iJvqk1hijdyF296sqxxYBMYX3vqygk= -github.com/aws/aws-sdk-go-v2/service/datapipeline v1.26.4 h1:qW7fLEpklI16GTkOQOC4IeztsCK38gXAsOLo2On2jD0= -github.com/aws/aws-sdk-go-v2/service/datapipeline v1.26.4/go.mod h1:aLoUy+KtchN6tAwb7YJnPcsb2YEoultUKsx1s/QEz60= -github.com/aws/aws-sdk-go-v2/service/datasync v1.49.3 h1:yWMkk9hwUjpDVsS4h0713JK1gKzubaxmqcQk/9r40t8= -github.com/aws/aws-sdk-go-v2/service/datasync v1.49.3/go.mod h1:gTqSe98/eTBLBSli2OIVCCtZ2wJ2oNrDqK16A2LGWiM= -github.com/aws/aws-sdk-go-v2/service/datazone v1.31.0 h1:AFzCK9/krkZ1i7AZtreEf9uiU8lJ55wTQoXFFgst//8= -github.com/aws/aws-sdk-go-v2/service/datazone v1.31.0/go.mod h1:XBH6CAk0DGML9jXbQM8GQkBE+ER1wRXrm0GxQe783xU= -github.com/aws/aws-sdk-go-v2/service/dax v1.24.4 h1:lyH0fXwrV4nIytmoiz0rzrJSFv84ZJ8MdK83U/LUT/Q= -github.com/aws/aws-sdk-go-v2/service/dax v1.24.4/go.mod h1:D91Ak1sYOquLMDM2EPuBRL+2gQxEnzMhG+/s5iUInMw= -github.com/aws/aws-sdk-go-v2/service/detective v1.33.2 h1:ePaT5c+InRjskQmJYTXwvMmb3VxcKh9MjZ5PVwoBduo= -github.com/aws/aws-sdk-go-v2/service/detective v1.33.2/go.mod h1:RE7vENK3CjJmUV40rQQsgkB7DNHJ1hZraBS99K7A/QQ= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.31.2 h1:6KlUuNr0DmhQQm/g/q3a6swX6WalRpVve8Op2Fdpy30= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.31.2/go.mod h1:+HTd3s8wIGd5b5jSikh9Qd/J1kNfY6IqioLkwZisfvc= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.35.4 h1:e7qpCMdibnlsI0jO5UfGTRfg+0G+HBANsMVtAjc8Pro= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.35.4/go.mod h1:nIALOeX1Xmspm6NhjzznpGmbyBg5gV0hxYcFcSCIUEQ= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.32.5 h1:8H+ZzO2Yez+PbYRzheZoxWmv03k+qKq71Ruhlx9khxE= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.32.5/go.mod h1:DD3baYN1tN5iIxcPKVAlgnDh2ZkUcbzM/lH/j0l+lxI= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.31.7 h1:JV01vGZhXnOGI5mjrSaYs8toau+lPgXp6UlQNm+inFY= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.31.7/go.mod h1:mAPyxqoegn/QPFB2Zy65DiQ2y8MlTtzKvFvlz2rwaQk= -github.com/aws/aws-sdk-go-v2/service/dlm v1.30.7 h1:O2pUnDku0CyRC4kZxa88YCMf395tbCujoOCS423vlXw= -github.com/aws/aws-sdk-go-v2/service/dlm v1.30.7/go.mod h1:dQK5yb0IyYZOJ8paqSQu6csZtYTIIxmAgI4Y4rtL9C4= -github.com/aws/aws-sdk-go-v2/service/docdb v1.41.6 h1:3psRq1ftvPT02Gtnt2YjSa/hXWM0JuEy3uZu8hatWPA= -github.com/aws/aws-sdk-go-v2/service/docdb v1.41.6/go.mod h1:HKdINsFfdzTWR38qWzfMbMJmsXC8tvbdSis/kG1+lCM= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.15.4 h1:ne+OVLZVBibPXOb4Hm9o3iZp3UB5oA175aCrOzVTtHk= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.15.4/go.mod h1:gXnmPUfd/xGEIZ8WsMswLiSAyYkQ6gMC9Uj7zVguwbQ= -github.com/aws/aws-sdk-go-v2/service/drs v1.31.4 h1:/mnR2UVVHcGIrHf70g5nb3RyoUHuj9MAVUYH9JvThcA= -github.com/aws/aws-sdk-go-v2/service/drs v1.31.4/go.mod h1:yvvJJgvXZDPuf3g8F/0IloipIsnnsamkCyVQdxGR6Og= -github.com/aws/aws-sdk-go-v2/service/dsql v1.5.2 h1:FCT/XJTmF+Rs9dpz8raISrEui75jLrF1hwYj2S5T7cw= -github.com/aws/aws-sdk-go-v2/service/dsql v1.5.2/go.mod h1:MFliW2mb4JEqLROEGWnf9o8mEpNjiyieKyOaUqa2ji0= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0 h1:A99gjqZDbdhjtjJVZrmVzVKO2+p3MSg35bDWtbMQVxw= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0/go.mod h1:mWB0GE1bqcVSvpW7OtFA0sKuHk52+IqtnsYU2jUfYAs= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.231.0 h1:uhIwvt6crp2kQenKojfDShGw39WEIrtPRfYZ3FAFlJk= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.231.0/go.mod h1:35jGWx7ECvCwTsApqicFYzZ7JFEnBc6oHUuOQ3xIS54= -github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 h1:Bwzh202Aq7/MYnAjXA9VawCf6u+hjwMdoYmZ4HYsdf8= -github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1/go.mod h1:xZzWl9AXYa6zsLLH41HBFW8KRKJRIzlGmvSM0mVMIX4= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 h1:XJ/AEFYj9VFPJdF+VFi4SUPEDfz1akHwxxm07JfZJcs= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2/go.mod h1:JUBHdhvKbbKmhaHjLsKJAWnQL80T6nURmhB/LEprV+4= -github.com/aws/aws-sdk-go-v2/service/ecs v1.58.1 h1:DTwVT1pmRYac0va8mb4A97bumBXZJeAov776TlsYqHw= -github.com/aws/aws-sdk-go-v2/service/ecs v1.58.1/go.mod h1:kq9VTFKJ68jqeYu1uVx6bR7VgWdQ0Kic/BstllTJJuU= -github.com/aws/aws-sdk-go-v2/service/efs v1.36.2 h1:u559lskjn8+5WRnLU+Aq0VCZLjgw+JXYHiwSfOpweBw= -github.com/aws/aws-sdk-go-v2/service/efs v1.36.2/go.mod h1:e6UrCp+V52p83QPNWC05I2N3vkg15XTfbQ0n4IvYDYQ= -github.com/aws/aws-sdk-go-v2/service/eks v1.66.1 h1:sD1y3G4WXw1GjK95L5dBXPFXNWl/O8GMradUojUYqCg= -github.com/aws/aws-sdk-go-v2/service/eks v1.66.1/go.mod h1:Qj90srO2HigGG5x8Ro6RxixxqiSjZjF91WTEVpnsjAs= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.46.3 h1:K1KtI95Fkz+2PT0OtVRsZyUzb4zHFMWOXNPkXy7LYDY= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.46.3/go.mod h1:kI+JDflKNLqdxVmdg2I8A3dmsCcJzAXXz5vKcHsyz9Y= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.29.5 h1:pMxyQ4h0JhnKOQoTRW6OyzKtsHKGzO3qTikBH7q5dr4= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.29.5/go.mod h1:BfDv/2Xok2pEg9VbiT7WkBIO3WFnAnuUcncn9QkOJko= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.6 h1:9grU/+HRwLXJV8XUjEPThJj/H+0oHkeNBFpSSfZekeg= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.6/go.mod h1:N4fs285CsnBHlAkzBpQapefR/noggTyF09fWs72EzB4= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.46.0 h1:3nrkDeiPreARHMoqvS+umxTKcDVkqnRPlz01/kVgG7U= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.46.0/go.mod h1:E+At5Cto6ntT+qaNs3RpJKsx1GaFaNB3zzNUFhHL8DE= -github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.33.6 h1:uMMgBQYKsZn0kunyKsyUZyeIlBjt0tq8JmuSRhPF3k8= -github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.33.6/go.mod h1:amHnCXfYgnnuX+DZsN/hSBbhKWA8ftDQN0QVVelGGoU= -github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.28.4 h1:0ScNqYCd3DPv6xfaKQkcCB06mWKI1eXQ5HbE4zeBo7M= -github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.28.4/go.mod h1:EqJAUs2nA9PHOBjrMpv+XmjbEdPx3COUMnEKzsc0PGU= -github.com/aws/aws-sdk-go-v2/service/emr v1.49.3 h1:bojA/Hy1JbiG84qjo0dKjzCSrlkGkqoZKivoSA3ZYyI= -github.com/aws/aws-sdk-go-v2/service/emr v1.49.3/go.mod h1:3Fb28r8m3+76JD3SGbN080pY53Zf8S+kraglAVRIucc= -github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.35.4 h1:4DSQddd2X8DtQ7XkfoxgTQm9Ziqg7OMqTqYexZJiQsE= -github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.35.4/go.mod h1:1wo3Ol0hdgtW5tnkHDSywVk1uGZgFz3GIczlHWigLSE= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.32.0 h1:lMEEo2u0vS4+xid38JaKIyjxIh8OCkDNtyt4wHqZ4Os= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.32.0/go.mod h1:DLlEeTpje5Jl1KXggBTphYGdTn+4VUgSOfPZOdQKwOg= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.40.0 h1:S2zUrIgbvBdHCWP5I5P3Wz8+YfDyp7rpCfGXBwmO3a8= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.40.0/go.mod h1:sIrUII6Z+hAVAgcpmsc2e9HvEr++m/v8aBPT7s4ZYUk= -github.com/aws/aws-sdk-go-v2/service/evidently v1.24.4 h1:LFq0twtI4iH7NoI8zqgom4RttSS//mKasAt4vbMbX3E= -github.com/aws/aws-sdk-go-v2/service/evidently v1.24.4/go.mod h1:xs4SqVz98n8Bxjt/NCG2G2Jm/qx8gx+i0euCyIaRZJA= -github.com/aws/aws-sdk-go-v2/service/evs v1.0.2 h1:jwSECr6+TScYZgbaVmL5WSMnjifRg8V0CGv+R/IU4I4= -github.com/aws/aws-sdk-go-v2/service/evs v1.0.2/go.mod h1:0a8Lc552uwJTFIRrlvqlR6dqvxlN6hk4GMYZRek0Se4= -github.com/aws/aws-sdk-go-v2/service/finspace v1.29.4 h1:MPXrTPT6nLbddVOivR+cZg3yC/qDZlf5Eta36oQGmzM= -github.com/aws/aws-sdk-go-v2/service/finspace v1.29.4/go.mod h1:hekaZTEQbeaS+WHd4BzQtu+nJS/E73xZocexPrPrArQ= -github.com/aws/aws-sdk-go-v2/service/firehose v1.37.7 h1:rDNxf0CQboBMqzm6WmhGL58pYpKMjU6Qs3/BfY3Em4Y= -github.com/aws/aws-sdk-go-v2/service/firehose v1.37.7/go.mod h1:E1yDRkUMwlVGmDYcu5UJuwfznGNuVW29sjr2xxM2Y0w= -github.com/aws/aws-sdk-go-v2/service/fis v1.33.4 h1:qHebHke5kT9KPhmKfqxWc3a9paffgRhbegNoORoxfCE= -github.com/aws/aws-sdk-go-v2/service/fis v1.33.4/go.mod h1:xwRN5ORzqRIf5IYIkcyAuEhKhVf4Cts5jd7j/fA8+LE= -github.com/aws/aws-sdk-go-v2/service/fms v1.40.5 h1:2hNJGW372nqz7HzMutbocRpZ3MARYm5kq2tvCFs6OHI= -github.com/aws/aws-sdk-go-v2/service/fms v1.40.5/go.mod h1:93wTShRibgZb1ELz8Pf81L3An0WHKHf9wRJ+6s2OLv0= -github.com/aws/aws-sdk-go-v2/service/fsx v1.55.0 h1:ZyAs2DqX6ksKM5dihLzrFseTygwaZWholin+VmN6Ob4= -github.com/aws/aws-sdk-go-v2/service/fsx v1.55.0/go.mod h1:yKSq9iW5hHBEpyYKpmH7bGVTBpE9Ki4xrfAWV99wXpE= -github.com/aws/aws-sdk-go-v2/service/gamelift v1.42.1 h1:a3b1XXHAg61yVO5oKuMN73LxUipPnY5FaV/+kAqvZn0= -github.com/aws/aws-sdk-go-v2/service/gamelift v1.42.1/go.mod h1:dnPoxIqQYnMMkAW1HYNKCF2Sc17CDR2sm+/L8o5FNe8= -github.com/aws/aws-sdk-go-v2/service/glacier v1.27.5 h1:Rp3lC3bHz78NMV6BlffdC/WlpNL/k060yi5FUGBj5po= -github.com/aws/aws-sdk-go-v2/service/glacier v1.27.5/go.mod h1:hSMtaqxpqY3qBEIStQISXDfbBQTcYLNjYn4OSVWKvdc= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.30.4 h1:idE6j2x7GKSosHJs8cUx8A6KUq3uBrHgjDlWX349fuM= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.30.4/go.mod h1:j/G2N1igocPCVsL7+KhmWI7Y9fiAaUtRdirSReCxDSA= -github.com/aws/aws-sdk-go-v2/service/glue v1.117.0 h1:Tl20k1TsdD8Ot+tfOgUt49EE9FyGla1e2LhuJe2Gkgk= -github.com/aws/aws-sdk-go-v2/service/glue v1.117.0/go.mod h1:AiOhaEmhCSVONWJ9Ul47qOzNNEBXG8saKz1K7vKbRg4= -github.com/aws/aws-sdk-go-v2/service/grafana v1.27.4 h1:XixrfgFR4zUxe2lqvQSp7VneDSjh1jVNdU2ebIWSydg= -github.com/aws/aws-sdk-go-v2/service/grafana v1.27.4/go.mod h1:2tlr8LcYq7dHoKzd0McU0r5Q408BwnpvPFyDIW6g6Cc= -github.com/aws/aws-sdk-go-v2/service/greengrass v1.28.4 h1:O0ymzTHd7bbwTjN4lJksKRM+g/WYOzGe2C0dCai1T+Y= -github.com/aws/aws-sdk-go-v2/service/greengrass v1.28.4/go.mod h1:33wl2N0a4HTF8TcfOpgbr057ZmSmdQM1odJnMXBEDn0= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.33.2 h1:ISdFgeehbUcSmHuKnSXIiXbTCbktq3gQOmOJFKXTIuI= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.33.2/go.mod h1:Yy51sCEGRTCe+WCXyGCtwPlr7cJq8gkV3pCr61IlxFo= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.56.0 h1:9sDfWWFOLWf4iXJRmgA2KM44VqzKzBcYE/3lRxdfBac= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.56.0/go.mod h1:NCwAyLptBGarEwV6HMo52eD4wIqiT+szUlI4WhfEeWM= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.30.5 h1:wXVaLzbLWize/Cbpcz8bt3Z7JptSNjTiT3aLXacB3qA= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.30.5/go.mod h1:KPnC/Zx3SFrNdp6MqngyzCuua9FwdR3gB37IZB19esU= -github.com/aws/aws-sdk-go-v2/service/iam v1.43.0 h1:/ZZo3N8iU/PLsRSCjjlT/J+n4N8kqfTO7BwW1GE+G50= -github.com/aws/aws-sdk-go-v2/service/iam v1.43.0/go.mod h1:QRtwvoAGc59uxv4vQHPKr75SLzhYCRSoETxAA98r6O4= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.28.6 h1:kFlM9ljR/NV9tRbwLpenIdFjDAYFB23pLpcWpCDfkuc= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.28.6/go.mod h1:z1GkhlOp50BHMgSkGFxwKR28G+ZvjykzUScuWhCdVco= -github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.42.3 h1:TLul/XG5yo9fbIMtxEXHwKtjohZjTNVYwWNJR3CRVE0= -github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.42.3/go.mod h1:PKGWYhnhQ3tDhM8W/1R7QUBmM9c7SEshBEewE7XPFPc= -github.com/aws/aws-sdk-go-v2/service/inspector v1.26.4 h1:HmmfKgLW6dj9ZF6LQjnyPr8JfgO5RKViUJZyr+3DyAs= -github.com/aws/aws-sdk-go-v2/service/inspector v1.26.4/go.mod h1:axRC0whrHPEaTEcJCL1FalY9KwwOhmKKdeLzLjqkTyc= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.38.1 h1:c1ggLklQ1C5Aoj99g/4/CCdB6D0oIPaETNYwY4z8/i4= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.38.1/go.mod h1:6usonUxMtrrQ1OuxxJeBR2tR1PZcwjc2/e//xK2rmtQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17 h1:x187MqiHwBGjMGAed8Y8K1VGuCtFvQvXb24r+bwmSdo= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17/go.mod h1:mC9qMbA6e1pwEq6X3zDGtZRXMG2YaElJkbJlMVHLs5I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.21.5 h1:/OevpXjFTKC13DuhlMoJmlVx246loRn4RehOXcaokYs= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.21.5/go.mod h1:hzvUC8l6AJ26Yz6eYiKPClQkSEbukvkNDMMNNhCcM7M= -github.com/aws/aws-sdk-go-v2/service/invoicing v1.2.2 h1:l9h02nlsL71Z3AsiNYe3ok0sKf5FxYalBivi8dmroFo= -github.com/aws/aws-sdk-go-v2/service/invoicing v1.2.2/go.mod h1:qgx493y1oppVNw2khxgCCfmDRCH7xFaLzeHQPPIQcV4= -github.com/aws/aws-sdk-go-v2/service/iot v1.64.4 h1:PCIpXKj5E5SCsIICVb50mU8Ma7B+Yowd872E2x2GEKM= -github.com/aws/aws-sdk-go-v2/service/iot v1.64.4/go.mod h1:zoWywk4n+izQigMVgYQFCnASbAJ8uHv6RHKLrjAsocg= -github.com/aws/aws-sdk-go-v2/service/ivs v1.43.4 h1:o8i4lXojYxWkf1JO/4ZI42A+BqLQcVE7/R/PeSd6//Y= -github.com/aws/aws-sdk-go-v2/service/ivs v1.43.4/go.mod h1:eqKP1qnqzTTjRcIO6DK9HRiIwvbL67xAUZ3IGbQ0WOI= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.17.4 h1:btA/5nMzQ5W9uYvXVfZoo+1MfIsnt8rHxfdeqqb/Hp4= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.17.4/go.mod h1:wVqsjIZzpNfhcxzSEQ5Ex3MZTK6pK41Bnube0cQbklw= -github.com/aws/aws-sdk-go-v2/service/kafka v1.39.5 h1:N92rM/5cDDxhjRLQsiVuV+osgvjgxjlPWDfifwWZl+0= -github.com/aws/aws-sdk-go-v2/service/kafka v1.39.5/go.mod h1:O0aQB4mb7phy2B60/oRkEN2EeUdbWDOHhrnar8ZP1Dk= -github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.23.5 h1:6aVQyYo8DwhQknoluvQn3myUthiSvX7h0nf7r2nrxQU= -github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.23.5/go.mod h1:DIDIP4kbwO2APBMn4aH89FjL3JNeeDoOG37W15Tkk2o= -github.com/aws/aws-sdk-go-v2/service/kendra v1.56.4 h1:GmvdHpYX8gUIIrhVoZ3CVyES0M06FAoMAmwWSroWwDk= -github.com/aws/aws-sdk-go-v2/service/kendra v1.56.4/go.mod h1:UyEw38rFv1ab5iGITliJ76ercQ2W+uH6xGofzM/fWn8= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.19.0 h1:dUlvwCH/2NcG6vE87uBYtedvSqr38hvOMq2V7oNrGek= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.19.0/go.mod h1:6ToAMADrPoGAV7YNsJh8QHv/V9Rok9uPTvJmw0nxpj4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.3 h1:aAi9YBNpYMEX52Z9qy1YP2t3RhDqMcP67Ep/C4q5RiQ= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.3/go.mod h1:DH0TzTbBG82HKNpBQlplRNSS4bGz0dsbJvxdK9f6rUY= -github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.26.7 h1:2Yes4BbKaGPHb/bCdaWoDEC9YdAM51fuP5NC7Z3dNYU= -github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.26.7/go.mod h1:eWKL85+D5+OcrfqvRpLF2x71btGZWur944vnaPmWE6E= -github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.32.7 h1:Vt7/srA/qRWlIck03nC/kDGOITQZ5eJ2BlnXNEiPzeU= -github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.32.7/go.mod h1:1FRspsThsK9y/KCnN6lF2ooSPFNw8TwGZf/3xpT3wEo= -github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.28.4 h1:dA0yAAnFje99NZqcHc0O/8rduXOe7e5R+qM798lq3s8= -github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.28.4/go.mod h1:CsOqYUjyz2UVrZ22fiKl+WdCRiXsO7kufv3P816Qo0I= -github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 h1:zJeUxFP7+XP52u23vrp4zMcVhShTWbNO8dHV6xCSvFo= -github.com/aws/aws-sdk-go-v2/service/kms v1.41.2/go.mod h1:Pqd9k4TuespkireN206cK2QBsaBTL6X+VPAez5Qcijk= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.41.8 h1:WvMhnaMOJU9Q1xVmXDT6TT5V+0CyniFUIVS87XfvzFE= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.41.8/go.mod h1:NBaw/nPw3v62yWrxUOGkifYKkIeYoocc3O8lgrnvgxU= -github.com/aws/aws-sdk-go-v2/service/lambda v1.72.0 h1:2LerDz2Lz22IDfdpR/RpSZIFoBoAh1tdHUaiUzG2z0k= -github.com/aws/aws-sdk-go-v2/service/lambda v1.72.0/go.mod h1:vahA7MiX/fQE9J5o1PKbgn8KoXz7ogSFLAQQLdLUvM8= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.9.4 h1:zAxrTUh8ffwiunWoichOWc9tVVSzRpmU/dR6plwIiyE= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.9.4/go.mod h1:Q1KBC3ILbT5cYEAeWT8SSI4vrnNOqAK1mx5ru0Yk1V4= -github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.29.4 h1:emmwvPyyB36dp+c6hPHvn5vR+y/C85VUBKSqS+RhpFI= -github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.29.4/go.mod h1:y9wPFtue7AFgaZQUefO0j/l2SB7wtkFMlXmcdc/oG5I= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.52.1 h1:aLBLIBBVLoKXLjNy5EKh8kFndvawsoxvswsnKg4tXU0= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.52.1/go.mod h1:VGLvL1In57M4vlxHoro5WDGwlpzAMyix0XdwffuYOsI= -github.com/aws/aws-sdk-go-v2/service/licensemanager v1.32.0 h1:fyHzYkcQrD9+5gpLSQU5nkaZAIu1ZlsHzZ7MgMpzhic= -github.com/aws/aws-sdk-go-v2/service/licensemanager v1.32.0/go.mod h1:wgEK7i9V/WGv79dhmZOad0Sc3FcJhwgOJ2ihebLuVJY= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.43.4 h1:0WHz7LVS1JHOMaJJ2uc7vvMERopVfNQE1Dil2yu6Wqw= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.43.4/go.mod h1:2VS/H/N3xtI0VxFja/1Aqy1FscPkVyju4Uq9J08L6Ms= -github.com/aws/aws-sdk-go-v2/service/location v1.44.4 h1:oQhdGB0sDiV6DbHz2syreSdDE3IgpxyEYEexs8Fnjhg= -github.com/aws/aws-sdk-go-v2/service/location v1.44.4/go.mod h1:pkmmKXWZEw624lzTiL+3TzQsihEoqQGZpaYbWDjwvGU= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.32.4 h1:C1BGDdGUvilwtTl0fymQ80x3a/ksZ9HrcDZe5ciHwgM= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.32.4/go.mod h1:PNQsvph/5J9OZz4ns0mUL1myh+3suq6Maq4J/CewM4w= -github.com/aws/aws-sdk-go-v2/service/m2 v1.21.2 h1:xvYDXyQSCk3G7XTHJ/D+OobIcVxgo1ZABl0mrD16jGc= -github.com/aws/aws-sdk-go-v2/service/m2 v1.21.2/go.mod h1:6Ra+8YlUJvmrgRbiVrgvbB7UGa/8AlX6T9BgIqpDfbA= -github.com/aws/aws-sdk-go-v2/service/macie2 v1.45.4 h1:dUUeyfbXzT+0CIEa2cQT5BYLduPVOjLXbroYF/3DNyk= -github.com/aws/aws-sdk-go-v2/service/macie2 v1.45.4/go.mod h1:pUFG4pQ5NL+jDRwLRwiTCMMavh/+swy3be4NVQjyfx0= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.40.2 h1:G6QfYIjydoQi5BRw3zkUP35aURuPgiMWsqda/vMSxxw= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.40.2/go.mod h1:+JCqmRgWpEB6Gmkfb1UUyKQpkbuMo7KOCyZq3vg/xz4= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.75.0 h1:Yw9/tZ1m3rqmcibR1h1TVKF3LKUXdGU1NMXrGzdnrCw= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.75.0/go.mod h1:3DstUf6Py/5v01y1jf73ma6c3r+GbkFqyN2n1RTavRo= -github.com/aws/aws-sdk-go-v2/service/medialive v1.76.2 h1:rjwsjFC6SCrOFYbCCY8ULp5fHluwilZrzYVg2LPgeW8= -github.com/aws/aws-sdk-go-v2/service/medialive v1.76.2/go.mod h1:jExKUuHSh/WksIx3Vs3miOAOMpbF8rnvRNgtI+wH/4I= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.35.4 h1:ohFzCGSbvw7EX9XM8Oxtl9E0Ph2Rasmmuc+Xx8uf6Uo= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.35.4/go.mod h1:OenjZ9DGOXCsBuowIPErRHTsbGZC5jGBok+4V8teBko= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.24.0 h1:hGCDJYqDm/XmIjLD0Pe7kcxUSLQZi6/lc6FD6AiVCrY= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.24.0/go.mod h1:bO3GFTVx6m9gCIErec24aNup05CFQkTaXXl50BUJTDk= -github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.35.4 h1:JUfHo+paK88NAMjDmHQI5KhVybkduH/hAHbLrzz8guQ= -github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.35.4/go.mod h1:XW38yIsZNImizG/0v6CdP74lh6GvnZcaFQ9iwusvwMM= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.25.4 h1:Z3sHyG46Hs1ZNUzQ9Z+psJoclcVB/iM6H7TLuOQ4HIA= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.25.4/go.mod h1:kI9Qf+K599ZwzZzVwOqZJRk0gg9cFDots4NFzvfS148= -github.com/aws/aws-sdk-go-v2/service/memorydb v1.27.2 h1:IfwyIeg5ihdo0rgYPd5GLL7HoSleK+D+VKTQ90Ydvb4= -github.com/aws/aws-sdk-go-v2/service/memorydb v1.27.2/go.mod h1:/R8wCXLpL1wyd22zFfGoWei+JayKQGEGSWJ+FDNngu4= -github.com/aws/aws-sdk-go-v2/service/mgn v1.33.4 h1:A6g03tFkhPDXjiofvTxuvW2HH7DkwsdHuLEkGURj2uE= -github.com/aws/aws-sdk-go-v2/service/mgn v1.33.4/go.mod h1:gWtkzOxwXESKQGqsqICO3LIBA6PuOo/ZU4mMrMhxzo8= -github.com/aws/aws-sdk-go-v2/service/mq v1.29.2 h1:XhJW/ppQrd2J4T+TCxrv6sZWrSyRlZNYNq586EmSbg0= -github.com/aws/aws-sdk-go-v2/service/mq v1.29.2/go.mod h1:ESMOqV079mlqNnqaxin+UNKvPkn9e9Qew83YQMe+RDY= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.35.3 h1:VcyYhv+EqCW3OwixgYpmNff6eJpSAjXtSjE0WLUogSY= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.35.3/go.mod h1:QeKi1Tch8DJpKfsCNKvuXganHLH3XUt3sn22cfVSd2U= -github.com/aws/aws-sdk-go-v2/service/neptune v1.37.3 h1:T+EQnNg3h2IJbfg9M9OAZEiHO+xhVtpnV1IqtrGVFwI= -github.com/aws/aws-sdk-go-v2/service/neptune v1.37.3/go.mod h1://k6uK6wMNDdiPAjtlT4G+ln/yrRwiZCYRseUuaCpmM= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.17.5 h1:4hLlfw7lQ0LfRqgDQTiuJ5l1z56mis4j0ncQjWipa/k= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.17.5/go.mod h1:Ex4YrWM8XMVoK4nCZdWLjPA4KwrrVJnE/G8wIiVwRog= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.51.0 h1:CCNcctA+JRLbaOjsKSmMpkMhqh7yM9NSkUzGx4m6etM= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.51.0/go.mod h1:Sdex/kw/DteUGYsSK3f4UtMBsHi9TBdxtVsJZaCg00k= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.35.1 h1:+WRM1yPx0OttOwWCg+fC0gIiRaYR3cAMqilFWGfKiJ8= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.35.1/go.mod h1:3yDKzKKBJPHeKau2EYAD/iFOd1E5XHXEjYOdShdhsgU= -github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.8.4 h1:9I8hXa5RVl48APWv3xzQyj/VbU+V5TOaVj1tRhNbwzw= -github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.8.4/go.mod h1:p2OtzahA9dYaLJB4zf/VMXWdfJhD5N6wHW6QcxUeF0k= -github.com/aws/aws-sdk-go-v2/service/notifications v1.2.5 h1:rSFeBvrGfRA4wAZYh8KaOJ/k0/JCvJr3l07n9tXSiGU= -github.com/aws/aws-sdk-go-v2/service/notifications v1.2.5/go.mod h1:tJBKodWS4tqyFCfsac9WE5Hm43e/IYDZbB2lax/QyGY= -github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.1.4 h1:8tAWBBRvHcnEucipGelVreFAqisi3Chhc1/ywio7/7U= -github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.1.4/go.mod h1:ZlMouvvOjPxSEcn08KswFDPzkDNA1339mJhvJHEq8Og= -github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 h1:teOWtElLARLOhpYWwupjLbY9j5I/yZ/H1I8jg41An78= -github.com/aws/aws-sdk-go-v2/service/oam v1.18.3/go.mod h1:wGhpdyftHX6/1U4egowHkYdypwBMjpb+KjAAprv6z20= -github.com/aws/aws-sdk-go-v2/service/opensearch v1.46.6 h1:Od+ZuCqT6U0kJ1mjQSmo7FMJ90r1AcgJ/qYRoXG6wQo= -github.com/aws/aws-sdk-go-v2/service/opensearch v1.46.6/go.mod h1:0vIvvobMH8MY/GsR1hdcZPISLp16YwQ18D+cMG/3YEc= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.19.6 h1:bF3ZAHXA0INerCsCw+izReGUn8ZgYl61K77Y/X6xSU8= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.19.6/go.mod h1:FJYhjKoTlazvHMw/o+6UOPgejUyTtri14Z3GKzOCHDk= -github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0 h1:8dPwqXepW7uF1+20KEXZMkVKxHsCUUt6Fc0Zypx9tPg= -github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0/go.mod h1:5MRPiBYQXFmgqmnXbhAVtKk9SebdLGFRmaa8gz1K4cM= -github.com/aws/aws-sdk-go-v2/service/osis v1.15.5 h1:GKITYwhEre2s69oYPdtOKXca7TWf+nJVzIasQCqi+LA= -github.com/aws/aws-sdk-go-v2/service/osis v1.15.5/go.mod h1:Z4CSw4zWtSRQf2YUTFFm8DzccAwxYPZCoCRhgLMH9lE= -github.com/aws/aws-sdk-go-v2/service/outposts v1.51.0 h1:HLhXiT+SOlYunW0KlOUSS2jVy2OUQEdo54umLSf1Bmk= -github.com/aws/aws-sdk-go-v2/service/outposts v1.51.0/go.mod h1:XiGs3zv9ejL2VLM77wccs1qBnsmyAFnWs5Fs6iptvWY= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.19.0 h1:Lwws0exTQXDwOtnvHQgDTA4xOv6Fh3o9SfU0hTCa/gQ= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.19.0/go.mod h1:T1vNF1UfLFdQhuJmDLWlGNG2lo/OzX9xjjUSNnHW1OE= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.11.3 h1:Kcd4PcPvUaNIffZP1O0Kr4Ki2n6WJJOGKgIUbZxMaDU= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.11.3/go.mod h1:zkxvVWdC/LpE3YfN6hmdVXA+2NwIzHs5sItf6Obv73o= -github.com/aws/aws-sdk-go-v2/service/pcs v1.6.2 h1:b1iBwCTqJRqpy8FMv/0d049PLwCa3Jk8+UVAh7qIF+0= -github.com/aws/aws-sdk-go-v2/service/pcs v1.6.2/go.mod h1:C3xBB9K56xxpHoxjN3i60zbcwcjpNpJilYIGC87LWGc= -github.com/aws/aws-sdk-go-v2/service/pinpoint v1.35.4 h1:gvptUhrWhuZQBPFXei0IKyZHkNjcTUOh1BGL695Eens= -github.com/aws/aws-sdk-go-v2/service/pinpoint v1.35.4/go.mod h1:wXJlxfvejDIFeYJIlZv0djXvLAKY8a81OBH+mNrQcEw= -github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.20.3 h1:DpqKXU5uVGg+UBGTj6enBcTI41KO/z+fwmCR76rKml4= -github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.20.3/go.mod h1:klV/eNAO1c5q00dtuTEuLZZkQZgkO/NnkRG7dKRI76U= -github.com/aws/aws-sdk-go-v2/service/pipes v1.19.5 h1:KQnsuly2Ch7DJ9htsCdksI/tqFi7pQ0q69W5G+USmyY= -github.com/aws/aws-sdk-go-v2/service/pipes v1.19.5/go.mod h1:rBlgG8h2mfLBNrY7Z0gz9AYjbFqoqHpMVKUUH5YbBpA= -github.com/aws/aws-sdk-go-v2/service/polly v1.48.4 h1:HIqVbJqUkRNkDB/FfCvvck4GkYz/9X80pz0wt3/aR28= -github.com/aws/aws-sdk-go-v2/service/polly v1.48.4/go.mod h1:Yzmq1/XqHdnsMPyAlIoxnWGlpmkpAwZ4HmoEcBg3nAk= -github.com/aws/aws-sdk-go-v2/service/pricing v1.34.5 h1:VPKHJpSkYojMxD/nN//88/yVauw2lab1q3P6+J0dfvs= -github.com/aws/aws-sdk-go-v2/service/pricing v1.34.5/go.mod h1:21H9QmAqGSjeskZ7iZkuQ9GNuCOR3j2gt2FBct6wMyg= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.28.0 h1:3QtHatGoArrO2x3IMaKxYYj/tUQht/n18gezryaR7No= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.28.0/go.mod h1:FqAEEpHUKMoLeaFEJlsVYz0LmTyGzFW1QYH+DbK2WiA= -github.com/aws/aws-sdk-go-v2/service/qldb v1.26.4 h1:wA14NpU1FWcexAceWHCFPEkCtel9IbTrajBNIlxlgc8= -github.com/aws/aws-sdk-go-v2/service/qldb v1.26.4/go.mod h1:x5TT9jzcs+eoh14Xg2kCOix2jn/Je9cLiKUT5JPQnPc= -github.com/aws/aws-sdk-go-v2/service/quicksight v1.87.0 h1:tprZwg0iv7F48Ou6AKJqlmVrifP6wz6DYjNyvBFz5aI= -github.com/aws/aws-sdk-go-v2/service/quicksight v1.87.0/go.mod h1:2qi3N8xyA+QSqxlkwy9+tglelPujRpN0g74BUDqOuFI= -github.com/aws/aws-sdk-go-v2/service/ram v1.30.6 h1:0a/uXcdUNFS1CancSPzVRwl03Ut3lrDSyOJHwvTLmmU= -github.com/aws/aws-sdk-go-v2/service/ram v1.30.6/go.mod h1:qmavcnsJquTI5vYHDnKNNxbcy0C/c0PQZgLysBQwLEE= -github.com/aws/aws-sdk-go-v2/service/rbin v1.22.6 h1:7tsUhpKIsnK31UTnLER6u5bpYIkeIxCscQvzou6f240= -github.com/aws/aws-sdk-go-v2/service/rbin v1.22.6/go.mod h1:wIGDZidVXHKiPsFtKSKBpmDWt7vEZMcI4onWsQSrX0U= -github.com/aws/aws-sdk-go-v2/service/rds v1.99.1 h1:eiDDf+cf2fAxOF5XaGLlrdCZPsnr5BTcPW55UK92sY4= -github.com/aws/aws-sdk-go-v2/service/rds v1.99.1/go.mod h1:Xe+NMlf/DY/XTXSevASAjGRika9Qt2LnuCDLtos03ms= -github.com/aws/aws-sdk-go-v2/service/redshift v1.54.6 h1:5u13KKciWFrXs3pkiG45cZfjAxCxHHCbhTm/Dg3GRas= -github.com/aws/aws-sdk-go-v2/service/redshift v1.54.6/go.mod h1:CFY4v8m7Nd96aVuFyNU+ujY+1Uim7JrJnAd0jkLf2Zg= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.33.3 h1:q3xxlF1/eZjmkfUxn4y2GTaYJTfbXBOIdbVLpfnJHcM= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.33.3/go.mod h1:rOBWa0PxH6/EjgXOWWzPK38yYhBPfcnyKdkNdZYhBEk= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.27.4 h1:KIx8wB5F1QjXZ+RPuemTKLHMZgoVojeN9zOhfC+17F0= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.27.4/go.mod h1:mO00EfrGvLQ9TE+tQb6Y2CToVq//1jQHbQN4LD12zDw= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.47.2 h1:jhI8d308+/rJ0/x/LIfBWC1KU3pcNxx3mc66HVbUddY= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.47.2/go.mod h1:P1V4mtg5tYOQl0nGcDh4hP2KyIVowqz6YgLcehtAkQo= -github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.30.4 h1:4r+dMPXSz/8/V1ZV7TXb9sT71z7iAcc0Y4wmJVjPLgc= -github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.30.4/go.mod h1:E2eHCs6AP0Cbd/ybgu5o6GQzTPDDcZsyxufzbQOp2bY= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.17.6 h1:WYnJp7XLZv6vJ2Axgcn47DumaXgPSkWxKp+8hL5g5ZI= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.17.6/go.mod h1:rMeCGU1Fk8JtLMf9kWQxtaUaRDEGOJkGNedJuayjFTo= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.29.3 h1:ydDDSNE36VbioP+xbfab1nYP5SDTOR5V8ZcUvZBImr4= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.29.3/go.mod h1:pXO3jDiaYQ49dzcDP/Mtz1VoTLEtqjnuINWeJXv+ktk= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:PwbxovpcJvb25k019bkibvJfCpCmIANOFrXZIFPmRzk= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.5 h1:fYXMgp0V6C5ndZosonHNh8J/xs1aBMfz5qANMlphHV4= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.5/go.mod h1:fGrCQme6bxmDiu+Ppun1qOWmoNSIMbIy5UKFIOaTF8o= -github.com/aws/aws-sdk-go-v2/service/route53 v1.53.0 h1:UglIEyurCqfzZkjNdYAuXUGFu/FNWMKP5eorzggvXe8= -github.com/aws/aws-sdk-go-v2/service/route53 v1.53.0/go.mod h1:wi1naoiPnCQG3cyjsivwPON1ZmQt/EJGxFqXzubBTAw= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.29.4 h1:8qeQjFNXdLd8+4YNVspNHjUrc0wmfrUievd+fOde838= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.29.4/go.mod h1:/dfYzVaLi84gzj8D7RXrF7KIgOBJ4Zk7jp7gQVltBTg= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.5.9 h1:zDOaPWYn4k8yY8pRQUmJQUACPzRNu8ChPMvCA96XWlg= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.5.9/go.mod h1:f/B7apleFy+Nxs6wY0pzA9UbIx0ldX30ZMvy1SO7tAU= -github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.27.3 h1:W7llNxOpVt0M0ToRkGXUs5UjMkntd6+DDesE5A4YXt8= -github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.27.3/go.mod h1:yXZ+EM/v38MqqCHl2fTS7Ftv7vLuwxkR4SG6qAkKCdQ= -github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.22.4 h1:mLYxsH/6tzncWzXTMt0SRp3BradtNrlM1va9Qa2AfQw= -github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.22.4/go.mod h1:9GWWA+r8JCyTMm3X3xUBJxU7o/+v4SMlksksVyMEmkc= -github.com/aws/aws-sdk-go-v2/service/route53resolver v1.36.0 h1:gkR6ADqZBV4RzK+FZVI818Rula1i85/G3JlGnn6FDY0= -github.com/aws/aws-sdk-go-v2/service/route53resolver v1.36.0/go.mod h1:lQW5vqGKTvNpIJ0DVG7dVyJ02OZnSlcLFHgZUpZhEw8= -github.com/aws/aws-sdk-go-v2/service/rum v1.24.4 h1:PF+oU9cTdUFQ3nW+A2qarZQF5txhjRgu8xUotk6y2BA= -github.com/aws/aws-sdk-go-v2/service/rum v1.24.4/go.mod h1:0E3Cb8i2piw7fqp157xGd9tKYbc6r+V2UW7sKzNbw/k= -github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 h1:5Y75q0RPQoAbieyOuGLhjV9P3txvYgXv2lg0UwJOfmE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU= -github.com/aws/aws-sdk-go-v2/service/s3control v1.60.0 h1:uVNDtWESoQ5Mm+O6FERGOaxLxcmUJ/gj5/2zmdznTsQ= -github.com/aws/aws-sdk-go-v2/service/s3control v1.60.0/go.mod h1:uZDSKJgJ3w3MOjtuvrYMTI7APdGNycg7srBGzaclI+s= -github.com/aws/aws-sdk-go-v2/service/s3outposts v1.29.4 h1:oZjDliGfblCLGHBlw1CTTHaVYB6MkD+ss5AxhqoX1K0= -github.com/aws/aws-sdk-go-v2/service/s3outposts v1.29.4/go.mod h1:E2HKzJfiZE7AfaaPKwKyuHsFCT6CMQx+xA+RBfvNMKY= -github.com/aws/aws-sdk-go-v2/service/s3tables v1.5.0 h1:Y4Jkb371eWF3VDKppy2OBFJqBm+wEXsmkHu9NB5Xvo8= -github.com/aws/aws-sdk-go-v2/service/s3tables v1.5.0/go.mod h1:fTauvBZjNMRnXoEDSo+FFAW0BuLiWpilnB7dz8lnqhY= -github.com/aws/aws-sdk-go-v2/service/sagemaker v1.200.1 h1:EdANB2MVaCwY/YPKyqsdBgHo8DgsGb+Zp6qo/6zfHOw= -github.com/aws/aws-sdk-go-v2/service/sagemaker v1.200.1/go.mod h1:uRG58IrTnRkk83JKfW9BgMpU1MKuHtcwdiBfQyC7agw= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.13.10 h1:rehUqeN8NgQew7PvE/6XeaVyeDXj9fVhM2FMt/PNOM0= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.13.10/go.mod h1:6g2NPTPm0cx1YV1zYJbWXz80wn+xyX0JSBixqRSC99o= -github.com/aws/aws-sdk-go-v2/service/schemas v1.29.5 h1:gCVa2/ufz9Wus7Tw3flUsqwUMyk8oEuTPDcYX9xWuVk= -github.com/aws/aws-sdk-go-v2/service/schemas v1.29.5/go.mod h1:tQVkDFNskR9bKFWpMUtgOMNM1hpL3oAuPRzESx/z73U= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7 h1:d+mnMa4JbJlooSbYQfrJpit/YINaB30JEVgrhtjZneA= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7/go.mod h1:1X1NotbcGHH7PCQJ98PsExSxsJj/VWzz8MfFz43+02M= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.58.0 h1:5phjeFKLN8b67+CztpBzG9mUOPrsMVryJ9OToMOL21E= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.58.0/go.mod h1:umtmPOd8goFeECUPe2Y1wigFIVrjwLR6GP5+eWmnUBw= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.20.5 h1:Cqeb3ccjhi5YEOlqYP3BLtEcYM+SiZeKgPs2z6FLlvM= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.20.5/go.mod h1:3TwtWEaAiv848bYEEiH9Yg79y5bXKyEDytGh7KUOeS0= -github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.25.4 h1:7eJSfME7No7WvRNFJI5o9fkBOOugNLXFqfn6AHHHguo= -github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.25.4/go.mod h1:9noDAe04msoEwCStlekEqsxzSj44udPquS2Zen4XS0k= -github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.34.2 h1:24S4nRk43CjgWiOlzHDv42q+PyFBZh35q4hgT7d5+6E= -github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.34.2/go.mod h1:O1PtvWmaeH2OMbGOpP0M717VrEtEm3L8s4t5Ehi844I= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.31.4 h1:5LV110/+dsFA3aut0evkDAMxqYOEziZrmQnWo3+2vBQ= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.31.4/go.mod h1:X4EuhIl3vZvJ8fIRTHOvFGblAeUnnZ9bsS5Awlyr1cU= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.35.7 h1:1eaP4/444jrv04HhJdwTHtgnyxWgxwdLjSYBGq+oMB4= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.35.7/go.mod h1:czoZQabc2chvmV/ak4oGSNR9CbcUw2bef3tatmwtoIA= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.28.3 h1:FDzX6WOfsz45IVvbP5O987/hdzjciDPek+AO9BOfDXk= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.28.3/go.mod h1:y10lwaaUXvDg/W5tn2WN5WQEMw/2T4tg7AW5jISZVw0= -github.com/aws/aws-sdk-go-v2/service/ses v1.30.5 h1:MGqdFy1jSw9rBN5qxLpeFGtwLTev1LIbNX7v3mVPZ2U= -github.com/aws/aws-sdk-go-v2/service/ses v1.30.5/go.mod h1:Zftob00wu8O9xWSN1pdczm1U+E6yXk9znf+4lkt+3aQ= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.46.0 h1:uNAn3m1yFv+7j+tbsAh36kG8JvZlUgZbzdQPSC6W0m4= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.46.0/go.mod h1:dy6XqJdtxnu7f9sQVHFMnH1OSlAS62R5feiHQ8WsI4s= -github.com/aws/aws-sdk-go-v2/service/sfn v1.35.7 h1:W5ZFACjUxkIjjtMGG21GhJ3uJfV7ejEsOkJTQHMHrEY= -github.com/aws/aws-sdk-go-v2/service/sfn v1.35.7/go.mod h1:x82j2Ux2Qr9Qzdb47peCIIa8agq7z3k0Zf4TWHEAxjo= -github.com/aws/aws-sdk-go-v2/service/shield v1.30.4 h1:B0NxDxP+NI18kFZiMwUUKVSWEcBwviWjTl4KMfWa3X8= -github.com/aws/aws-sdk-go-v2/service/shield v1.30.4/go.mod h1:07i7GZpF9rdMNRPkfUa3ymRq63Liej297OCz6wiWmiM= -github.com/aws/aws-sdk-go-v2/service/signer v1.27.4 h1:nU51n8zv3mLn9wxZ0cxkToQRsrnqNLg5xJ0j//GF58c= -github.com/aws/aws-sdk-go-v2/service/signer v1.27.4/go.mod h1:6bQTKM4Ryk9vKxVd4fc7uNAw2TI+hfY+lMhkmmEmnWw= -github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 h1:OBuZE9Wt8h2imuRktu+WfjiTGrnYdCIJg8IX92aalHE= -github.com/aws/aws-sdk-go-v2/service/sns v1.34.7/go.mod h1:4WYoZAhHt+dWYpoOQUgkUKfuQbE6Gg/hW4oXE0pKS9U= -github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHfiv5OCRwN/EnCOXQ= -github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI= -github.com/aws/aws-sdk-go-v2/service/ssm v1.60.0 h1:YuMspnzt8uHda7a6A/29WCbjMJygyiyTvq480lnsScQ= -github.com/aws/aws-sdk-go-v2/service/ssm v1.60.0/go.mod h1:IyVabkWrs8SNdOEZLyFFcW9bUltV4G6OQS0s6H20PHg= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.27.4 h1:HhwkyHRVIhGsBnezpwwH2wyrZQKooN9mYuW15/yM8rY= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.27.4/go.mod h1:bVvmYEJmT2xWBx269zEAWlQxJfkcfqyvB1JFjSRrzFc= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.35.4 h1:u8qJueBRnlcWupt1Z6zXFDcHa4eGCV9REex7r9sQnhM= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.35.4/go.mod h1:TwlNzbOPcE2NBuNLgZ1B6VfYJ0JG8WkEwOhKidrskW0= -github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.4.4 h1:WcyN7tIJrpezkcj7c0WzlbjhOo6ojDa8QL5+jXvSZ24= -github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.4.4/go.mod h1:kuwVH10c0+zEubkw7doHtNK6y5hsf6smmsRFBmK13Lo= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.20.4 h1:bzHaYrE7qNBohcfbhlXrBnV0/hk2J4fPysDxYwLCKok= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.20.4/go.mod h1:Z4RGgCEebqIsIhj6KJzTCJR7PmWwO9luAYplvGghIH0= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.31.2 h1:3dryJFNlYa+kgSlHLAcFpQQOeE8g+h2XX3NoiLeB8Yw= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.31.2/go.mod h1:EZSMWhfY55eXlAhKcQmkHMrRqwhOXWOiFcW9jrehv00= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= -github.com/aws/aws-sdk-go-v2/service/storagegateway v1.38.0 h1:VJuHn5d3gzArmJetVkngTKs0RxY6WhlWXt6RkYDPblA= -github.com/aws/aws-sdk-go-v2/service/storagegateway v1.38.0/go.mod h1:qtpDf/mpKyH0BYUVwct88hqiA9/znvnlxpoYcEZ0+Hw= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= -github.com/aws/aws-sdk-go-v2/service/swf v1.28.6 h1:tKh4RXgqwnIV5+2LW53y0LAA/+sWUJSsSBUZqEQC7/I= -github.com/aws/aws-sdk-go-v2/service/swf v1.28.6/go.mod h1:uIxNj0mirk5vpL/vW1Ko/UwyxOigm+BAVgsM+l2psOA= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.35.3 h1:CuUOM3i9r2U/kpqJDQj8p3Hi0if2N44gl5+qPXImpTM= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.35.3/go.mod h1:xo1aJ/YLmmEMwVU9aOvN4E7jOKgoAAr+6VDAJv+MNl0= -github.com/aws/aws-sdk-go-v2/service/taxsettings v1.12.2 h1:WZPhlC3G/mYx99l/QHl95U/Ue+al6UfPFdTbhbbiRUs= -github.com/aws/aws-sdk-go-v2/service/taxsettings v1.12.2/go.mod h1:A77L7LITMEWcVhGBNUyJ0RZLNVdhTIkhfUSQiS85XZM= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.10.5 h1:xmm2T4HJOkJL1SJwNh6xMEm6ocjE1Yh9YZTChHu98DY= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.10.5/go.mod h1:L4tT63t++iYucM3oLQ5aUQcbvgunzP/xg+ztYfOd1EI= -github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.31.2 h1:CjrXUjlaUS5MjPH6KMpZiFd3VNKDsgxQRSviE4TqWWc= -github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.31.2/go.mod h1:HyCb70yWplefVU5tLdVevHVv1fK6XS11cltC8KX0B0s= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.31.2 h1:HF3f6gSaqLSvqsUVIV0yIPucA9LInGi0V1hK3zUAgxI= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.31.2/go.mod h1:IZWUn9UPCdqPKM+72yj4HxXMXpOCpP7vqW8dctO5Jlo= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.47.0 h1:ASsg4ST0Lgr08AY5nT93g5/BrxJuezA7jI0XKiVK0y0= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.47.0/go.mod h1:ezb4DgeVVNn4S7Wy8eRQ8sy+QHRtzbW7SAKHxZy4ndY= -github.com/aws/aws-sdk-go-v2/service/transfer v1.61.0 h1:5OkUYsglfPicnhv2WAgAzh4gR32iPiNZ2dPMtuzXCDE= -github.com/aws/aws-sdk-go-v2/service/transfer v1.61.0/go.mod h1:9RJji4Q+u/gu2Te56e+CUpUM2UTCt3sMxzLMXYSJ5Ok= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.24.2 h1:d43lKGSX+AWhq5a8vpVuJNekcR5MtmB2JU22eaZZDRM= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.24.2/go.mod h1:4Q5Mgk7BLvRrhwElOeMUlnx3K92I7b8HRNOhyTuousM= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.14.4 h1:01e650ADK6nHoSN4J/sFlblCXSiFITGHrkGPK+xG+Yw= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.14.4/go.mod h1:2gAi7UItKOn/1ccFbqRU+6ZtPo9b3ldnDRe9XqYtdYw= -github.com/aws/aws-sdk-go-v2/service/waf v1.26.4 h1:Fgu+w2R0151xwueAlfPYVaXlqWBi2TUUwfsUJrs++34= -github.com/aws/aws-sdk-go-v2/service/waf v1.26.4/go.mod h1:pSLiROd8QQ8WK5uEOOccapEjDwp1AOC5Ywt4d5D3I3w= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.26.4 h1:+J6iG0+kp1vj5g5KhQHbZDHUidbwFK8LTUlI4t5tIL0= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.26.4/go.mod h1:k6xElMGoSjEbhEpFJ/g+oP8f0/Eprf43xDr0kNG9Dug= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.63.1 h1:FqB3NmVKnZ/2oS9uv1AWunzCusEqSp9USs9BGx4EwSw= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.63.1/go.mod h1:zclPwcQ0Ju4OLYCUtaIp+BA5K5KdxjeBLpKd1HsMVqM= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.35.4 h1:1oNo99IUfAPoMV/g1apd+J5QuYAunU788Wn4FmvzYt0= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.35.4/go.mod h1:dkxQxiW/xGedseew2TBbkzEHQ6UHx1Op4ZiSv8dbuNg= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.58.0 h1:NknK5ksEdnfMdPkhPedhoOQzb5bhd4/5ZNaYJTJRfaM= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.58.0/go.mod h1:zzXFHVKbJU2FcSWXP2so1X/Ght2lrOrXUPt9M/kFOtI= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.27.4 h1:XomoEUvUlwFKpmJ6qejWT+Gflkhe0WmSU3x5JGhGFYw= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.27.4/go.mod h1:O46IBclbuIwlp3plLPOF+HHBDJdIDBqMycf6GPrISuE= -github.com/aws/aws-sdk-go-v2/service/xray v1.31.7 h1:zJL4lRhsNpSYggXij+GBfDmEVT809ElOkhElTKoxeTw= -github.com/aws/aws-sdk-go-v2/service/xray v1.31.7/go.mod h1:GJrs2NbUJi1iUwUjMC+OwC7H24YmDwyJVRUKzVIgA0c= -github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= -github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= -github.com/beevik/etree v1.5.1 h1:TC3zyxYp+81wAmbsi8SWUpZCurbxa6S8RITYRSkNRwo= -github.com/beevik/etree v1.5.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.44.6 h1:OXJuITvU8R/Npo5Wv2dgIFBYROm42kXAD16rk8qirs8= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.44.6/go.mod h1:PvvoZ5HHC38O5xDu4yKotP0ZLvnlbaJbHFOD8vppMQ8= +github.com/aws/aws-sdk-go-v2/service/account v1.28.6 h1:eEtL3V2CHjO4IDRwBelx1sZLRrz7vAsNUrDIb967FkI= +github.com/aws/aws-sdk-go-v2/service/account v1.28.6/go.mod h1:qi8Mmk5TSynuGi1KWkzrFIYfiKSaCv/lIxPPyPOlVfs= +github.com/aws/aws-sdk-go-v2/service/acm v1.37.6 h1:48oGbMpBSzihrU145gpjrxySIs+VNGCXu9kLTLAdJJg= +github.com/aws/aws-sdk-go-v2/service/acm v1.37.6/go.mod h1:4Xgg9iUMFMpWd19UokmUwBCU6fqNJ7LPo11YYt3/xl4= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.44.5 h1:0aROQbnQ6nGlI1idLYuxx/mv4s+2I02RFyOA5MOlMQk= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.44.5/go.mod h1:1whQS1vMFP9KQPLTc9dtqnJGjgJ6Sb80bkPoN8CPQ2k= +github.com/aws/aws-sdk-go-v2/service/amp v1.40.3 h1:pAbmvpyEwOX5OphEvNCjDMTZS+I4mNOBBK5Z6Ga6Zgo= +github.com/aws/aws-sdk-go-v2/service/amp v1.40.3/go.mod h1:Kaiyw5xthjYIWNvilHLlRiNwZa3owNXd+YgJs53hzDE= +github.com/aws/aws-sdk-go-v2/service/amplify v1.37.5 h1:mCxlw2Vuh5XZP6qwuUxr7bXWZ7drfbquJieS8VCIb+k= +github.com/aws/aws-sdk-go-v2/service/amplify v1.37.5/go.mod h1:HeH9qb/ftrO1k18S+BoWN3P/p83yS06x/Opny3ATXDs= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.35.6 h1:v8RqEs++cq7uAYUusuwrHLNEFACv0nlICCBwV11p5sY= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.35.6/go.mod h1:5EVcku5uDhMks5w1FwPL8hLKqJwCgIIbuF5th+vGQhE= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.32.6 h1:k78ulhtPtIqMiZqq8bPkpJlx66VN8DmDIeRgrYpzehc= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.32.6/go.mod h1:A5+OX0k1IIqRR4jR+zPgHpzKmEoLfpyY2xIrrJj8O98= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.42.6 h1:e81OBhEpYUKh7Wg3hHiRE5zHpYPTgB4Sja0YWCBMivU= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.42.6/go.mod h1:3lk8tz+bmjQEPEmdDF7zTDFHlqRFdn0zZvTa2cIe0r8= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.16.6 h1:L50VB8yUNNequjYNhUm+MCjFCxfN6KMaIcpLgo679y8= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.16.6/go.mod h1:v0gYYyI3wXm6R1nxhW068lcxbmh8wTBKIPafxXI7rf4= +github.com/aws/aws-sdk-go-v2/service/appflow v1.50.6 h1:Q30ADINfdo4matzYAGq4rPuQrjQKinuvdaHGnR9/Ksk= +github.com/aws/aws-sdk-go-v2/service/appflow v1.50.6/go.mod h1:2W0SilTCqSFglsMuZYKFmuZCOaBxGLnWvpik/GP+bT8= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.36.6 h1:kwnjEvDnDXPGC2yGF3ygvNs8EGnZFxzsX6bKWFA+j4c= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.36.6/go.mod h1:DqUWf8yC60AYaDfwq1zMLRxP9uT5R41FZwpZzIgEsWU= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.40.5 h1:0t/Dr8fwxkc5fkhoeuYRpGiPowbLKi424s3oeLCusRU= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.40.5/go.mod h1:NUciQYiEOln3pubY8iovZkWZdJrBTnoPPW3JTIk9QAI= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.34.5 h1:HWvL7MWRel0n6W5msGcS2BllKX8OEH168656YH8IRNg= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.34.5/go.mod h1:9jEkcPD8H2x5XTr4JKfuftpz4EoKAhrom5lQzLLCI6I= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.16.0 h1:k5Gds31CrXttYeulwB6VjflGSXnRegRG2jKiWLimgHo= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.16.0/go.mod h1:dB7ydHt6geh960yqkPjZZfA+qqLK577b0jifWU1ahy0= +github.com/aws/aws-sdk-go-v2/service/appmesh v1.34.6 h1:Wupdnc/3bA0GPzEEZInLvu4FHEmkHNHsG/xahSggcGw= +github.com/aws/aws-sdk-go-v2/service/appmesh v1.34.6/go.mod h1:yHte17Vasn4Ows3YO5zLC1MWX2Dw8by5KvgDm6XGSm8= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.38.7 h1:gJCGw8gwiTYjLeTpCdwHFE60SRPN7tH2m0ScVYUZ4+Y= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.38.7/go.mod h1:UiPYznwe6WwKIOwLlWgrjdKvfOVVQ7eaRzf+OC4BzM4= +github.com/aws/aws-sdk-go-v2/service/appstream v1.50.0 h1:W5ZoBalgNd/kh64XbSKhxzX49MsTuhJwoHsuT6fwcic= +github.com/aws/aws-sdk-go-v2/service/appstream v1.50.0/go.mod h1:aPmkM5vZVr/vBeP+czUKCYWAlewa3QCaCZGh6gWZfm8= +github.com/aws/aws-sdk-go-v2/service/appsync v1.51.6 h1:YsjIVoljoczbCUYFzTUhNkYjJlEreqXeuicq2wyvO9A= +github.com/aws/aws-sdk-go-v2/service/appsync v1.51.6/go.mod h1:j4cEEClULtta5LEg7OgxqGTz4k0ipCAvue7P7GGRLQI= +github.com/aws/aws-sdk-go-v2/service/arcregionswitch v1.2.8 h1:01m2bIxzwrVbFB6XADodX2JwSSlpKfarYZWczIdYNSU= +github.com/aws/aws-sdk-go-v2/service/arcregionswitch v1.2.8/go.mod h1:h5EaGwLxZGbeUEkwE9BWg+4lPwv42YgTqqQ/SH2bbB0= +github.com/aws/aws-sdk-go-v2/service/athena v1.55.6 h1:OC3hqQ29uyNsftVHwdbfHpDopEBViNFypjy9N5eDsMw= +github.com/aws/aws-sdk-go-v2/service/athena v1.55.6/go.mod h1:I1paYl0qAaXc+6AmLtylg4ApBC0/HEs5myhVIcy4Nng= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.45.6 h1:QD02o1P75R198cYX9Nt3flwM5HmXxsmWAhG+8Wef2ig= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.45.6/go.mod h1:ZZh2P2Vy29z/3Occ3o40d0P4IuwkaZJPKrSD1gukI6Y= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.59.3 h1:2tVkkifL19ZmmCRJyOudUuTNRzA1SYN7D32iEkB8CvE= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.59.3/go.mod h1:/Utcw7rzRwiW7C9ypYInnEtgyU7Nr8eG3+RFUUvuE1o= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.29.5 h1:YUHawBzbCFAqJzMjyIwHYRNyCJ2cF3cNmqZZcm2/Zqc= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.29.5/go.mod h1:3YNMqOSRPyr23RKCv8RRQz2B2xflT/nk1bZuogMnO8g= +github.com/aws/aws-sdk-go-v2/service/backup v1.49.0 h1:7hWlpBuCnlElrrJps5gmvr1zjPsNSXDdy8Qv2vYfEJI= +github.com/aws/aws-sdk-go-v2/service/backup v1.49.0/go.mod h1:5er5+2GO9YgfAvZ9VqDSf9HKrwKAtjVA5Fm83eXtkfM= +github.com/aws/aws-sdk-go-v2/service/batch v1.57.10 h1:C9unOW8pT063iGGpnNWonK+iRMnVR86iPnYdFaRmnqA= +github.com/aws/aws-sdk-go-v2/service/batch v1.57.10/go.mod h1:fl2yc8ac4mmMPh3ByJ6LRgdL25iPcQ3cUqhZl4R5chE= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.11.8 h1:4O1siNWkg2oMPNzma7AR1GZCQIkH233tl9bTtOaweUg= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.11.8/go.mod h1:Mm4OxLblLwMOAZjrNfDrltCqO/RKSa516DNDrapaZyw= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.48.0 h1:PrP3JDj8+pMfjj6spKZ1Vwf9iSZC/+0NZYRBNXBu7hc= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.48.0/go.mod h1:3sUHFSHdoib4v7JdqEGgxD2sIdTDikr4IpjBOgUAa0g= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.50.6 h1:SQcm5+AnLYVNJP0K8yFRWTfEifhQenCaF+aPfqXf+fk= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.50.6/go.mod h1:Jl3eDtXBZAze9w+aJO1oPzdk55CqOh+Tq9VhLTLQSRA= +github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol v1.10.0 h1:HhOMc4AhT430DBGfv5CGHvc4AQeGe/Yz4i8p/5xe6sE= +github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol v1.10.0/go.mod h1:Es+CYDVSPzyRIJaDDzxvoBNRc+AZbevIL8d+q1+3J5w= +github.com/aws/aws-sdk-go-v2/service/billing v1.8.0 h1:qffsTlqnTPtokF6Y4dlw4YUWPYtOw+PCQyv0gJ8o1PE= +github.com/aws/aws-sdk-go-v2/service/billing v1.8.0/go.mod h1:HaQjETFBieRL+1p0qWCYDzDe/JnI4oJM4UiO3qNEPTo= +github.com/aws/aws-sdk-go-v2/service/budgets v1.39.2 h1:HxSdjcZ9NPVG4ZdznJMUjqjR0DPBWSId0xKUbTfl/Eg= +github.com/aws/aws-sdk-go-v2/service/budgets v1.39.2/go.mod h1:+0hQkFGrrsp6x9hxk/n7EOscPVfwrBkTojUCthoHquM= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.14.6 h1:QWlDo8QuBHtT6LYYf5opmQtUY4ntkcU0mjmmmbZiMoM= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.14.6/go.mod h1:QSe+uEkQQHwIPKFfaZtbZWrNaRq5esdmdQspTPV4apY= +github.com/aws/aws-sdk-go-v2/service/chime v1.40.5 h1:kaAYFY5mvQHeyEX9pamOBly0Vx7f3Al3dCD9p3JJAnE= +github.com/aws/aws-sdk-go-v2/service/chime v1.40.5/go.mod h1:gXN/LFE/H9vql+trNeg5MwcHYB2brbgv4j0pnphrxXU= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.26.6 h1:JT7X1tDbHo/0D0UQh7zi2YlHbH8zaLTgH1zKEPx/kUo= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.26.6/go.mod h1:ROgSEKmD43CMB1KWQSPNovieWq6DPPSu/MCdVbwO6II= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.27.0 h1:NMNhVPuxmv+8l/XktsHQTqyk7vhVsqzKEzePMdQWvgE= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.27.0/go.mod h1:8y4H/7OXnf2YSf2ybz8aqQzxbl5pW/yiolNFSSaZ41g= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.36.0 h1:AI06e0v0FtjcNk3XNsJmp8fiAAOceRzErDjdwN0WPj8= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.36.0/go.mod h1:VyV0Il6a4RYvrqhA6tvNpV13LEBFk77Vu1FMTJs4qyA= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.33.5 h1:ZoUqKpdIPkGeGRY1v81GCaVoELHgtUYEV0WF67skUhk= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.33.5/go.mod h1:Q2RJfC6edAyk5hr3gJMS8WANFoTPGIo2SlAzQkZsT0A= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.28.6 h1:jqP2tyJOEj7qDoLyqyKGnDMAW+Lmi0WwNB2OruNao6w= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.28.6/go.mod h1:GIOHLcWXFDrHSzJJFMNRxLsfA++pOENXO2QVvMT0mJI= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.67.0 h1:dXbv06SZ39MYWL70KgFdMgFl9ZLfHe3AWIiTs0V2LAE= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.67.0/go.mod h1:/q63oDWCyO4xLLRiVYpwufJDwSkL0IbC5epFNJne8JQ= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.55.0 h1:NjW6Wq4xfGF3DVKBXj51dE6P7VXMYup/W8pAekNo91k= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.55.0/go.mod h1:dYwFVhUsRZt7COcGP23ei0lY8gX8ZSHrbyX49VB93MA= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.12.8 h1:dlFZVF9TpiFvPsNO8uN20iHsrpJrALbQbwGbs7cVL9c= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.12.8/go.mod h1:MylnqogyYEsq0wODWlXmewzDOLXvDuhPpyAORIDSOOc= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.34.5 h1:vjOGGSctnKWctwndBRg6fnUQnXiIQ/zuf5km/L4q/zg= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.34.5/go.mod h1:WvZiU3vTIX6sm3FLFNHe05MWjKM4cqOPkfwT1lSj7hw= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.31.6 h1:WqphYeWJNaQRl5taLdy6ipI8EHsQGi8rxghXGvBSpkM= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.31.6/go.mod h1:pOvrSeFE/QezgirkaSVZcEtEo1UvlnZy/XlYo5pAJ8c= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.53.6 h1:lo/qOnIAmeBGsfXa92XpKFolYCEVRqxRYd2V171eU24= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.53.6/go.mod h1:q4HzizMPYR4kPnUmcY7sjTCdB0hoxw84mQTgtjJ50ug= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.51.1 h1:GqVafesryYki8Lw/yRzLcoSeaT06qSAIbLoZLqeY0ks= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.51.1/go.mod h1:Kg/y+WTU5U8KtZ8vYYz0CyiR8UCBbZkpsT7TeqIkQ2M= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.58.2 h1:JPW6ND8muLsBwALrf/VXikyokUmGWNKZa88qZWwFGWA= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.58.2/go.mod h1:3Dh12t3s/KrpEm7HNfg5RH+XWzi9LW2QI7velkc61ac= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.38.6 h1:adRnHtafjEL6BdPyNvVvsljxGlI3wQALwnTLDGDyu3o= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.38.6/go.mod h1:Jo4nWheCppk/3QfXOcYBouw3XfQSLS/lqXn7GQIhYEQ= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.67.5 h1:IjkLl7nLhE8w32Zv9NKBUdbB6YsFHIN0Y7qek4LO7wQ= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.67.5/go.mod h1:1ayIXbJj20GhTn4zvTQ5mKmDYMg5gs9ICsqR+WvjWrw= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.20.8 h1:ngiN4E8pNW15lffBIVfbO6IOSR/3NiRbBTL6XprV2UA= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.20.8/go.mod h1:u8qstOf0Jhr2PB2Xko0PirjruTv4Cp/Rwhw7ZGxgpcI= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.32.6 h1:11qvnjhmVnkb9UFQdagNFmAZV8CNb0hznYUGIEIVMZM= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.32.6/go.mod h1:RLtIEolTsnW3TOw3fHTAXb4H2xNjcpKa/b1nKsTmAh8= +github.com/aws/aws-sdk-go-v2/service/codeconnections v1.10.5 h1:wcDfIGYi7pNS33qRzewQhvAs1FGZA+GrypDce+5m3TU= +github.com/aws/aws-sdk-go-v2/service/codeconnections v1.10.5/go.mod h1:XVNEBA5S5hDvYpzK0//pWFemUsx2LKxYa9Ymkg62Z5E= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.34.6 h1:TcWPqk5hTjCeMz8tWtLPV3nUBWZ7xTFCql8JeM+Jyxw= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.34.6/go.mod h1:00HnOuKp1Q/g5sCAzV8dDJWq6fts0D/1xC5DlLWjXwA= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.29.5 h1:sQevsmx5Sg8WkyR/P+Vq/tqpJCDzKr+tvZYhuP6lMtE= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.29.5/go.mod h1:6vjCrFSI1R02YCIFRqCqcKxOzKWSgib4Q9RPK8yhHS0= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.34.5 h1:WByNI1rera7rLq8qRSh+0uhQSVMDM228fZqOiUyeJb0= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.34.5/go.mod h1:oYDh1yjbugYgvcdCWMbsZcZmp8QQ1OBCqaX2qdXiPvI= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.46.6 h1:z/82UoTxxmA27/yygFEnx+uIdYY1zyK37vCPKZoXyb4= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.46.6/go.mod h1:8n32TPTWAAHJ0kAuD0z8TGR0z84ZfYFm9ILkHgkV5Do= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.34.6 h1:jSXTzwJsreMbTdUaBRhB0PnB+sWfq+awXxZJorfP8U8= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.34.6/go.mod h1:bkkAghnfsExMwlQ9u3NIoMbhUhpUDq1VL5vaaD6KrKI= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.31.6 h1:q0ma8a3t28BbHb0/DSMF6VXOouvdk42kqjLzP1YGMMM= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.31.6/go.mod h1:lOY7xQqacZtC8sN+BEH8S3NCBoSEvLSeMHBVZfCynsk= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.33.6 h1:75RJ5nNarn2EViDSYRPV18H4PXAkugQy1Xjr4HJ9R3M= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.33.6/go.mod h1:dN8D7VkYmVwbH+MVVxiqtldtkTO7ovQiVUkCWa8v6PU= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.57.7 h1:1LPBlVrceFenrbWOZBGu8KTmX8TTMpZfRxX0HCnSjz0= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.57.7/go.mod h1:l8KDrD4EZQwTuM69YK3LFZ4c9VbNHrzaQJjJsoIFqfo= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.40.6 h1:LtBU4r66PzkAdivreTlrlNWH/CQ6PG7sAKlrcdz1d4Y= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.40.6/go.mod h1:tbNB6UTE8b8fVgKsLl8IOc50jyxZ0fGqiVgQTWfNdLg= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.47.5 h1:dilS2NJ0F1Jwhi4A8NuZJAGq7HwFQ/GE4GJ+IoHWzx4= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.47.5/go.mod h1:GP4KTSWjdb7GofokIXNbVP9CQDIKTv13nfqSBiq2hnA= +github.com/aws/aws-sdk-go-v2/service/configservice v1.58.2 h1:sfLW2pTtZZHGM7Ksp3PdMqyoLjoD7dHzPblLLjcYnBk= +github.com/aws/aws-sdk-go-v2/service/configservice v1.58.2/go.mod h1:/+Y1FQ6hhvY+6moAqnf/lrSgNbckvrHoNmxTMJ5WhaU= +github.com/aws/aws-sdk-go-v2/service/connect v1.142.0 h1:2LYf+Q6UtACzAXZ+ylgDnimXdccqbrUlv01Tp9/BBBM= +github.com/aws/aws-sdk-go-v2/service/connect v1.142.0/go.mod h1:RlZrDWMyt5HH92j6fpBcBLjo5FiJw61jNAgTjCAQY5g= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.32.0 h1:4nmhQ24WaJ4e38AKtFJzFSPvoiLDZCK0e2Edm7u+Tdk= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.32.0/go.mod h1:pWZuObOfZSGHvL29N0S0JvGpsvk8xDlJPgX92QTxnTE= +github.com/aws/aws-sdk-go-v2/service/controltower v1.26.6 h1:xJchWovBC1h9lvvcysi4kjDT+ZxycuJc+jt/Y6YELho= +github.com/aws/aws-sdk-go-v2/service/controltower v1.26.6/go.mod h1:7T5FMpZ7QYi3p35ugZH2Wdebzw/bAAQ+HVsdtxT31LI= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.33.6 h1:Fy5Lp0Gn0aHairTF8nj3HNsml9NuLGuKFXsGlSCXMK0= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.33.6/go.mod h1:WjmUookbSIF13EUgmIm3iJbsOR4ig0BZtPtLojlmiEo= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.57.0 h1:OPm/yHm06nNtL47/ITE/TEUgB1yZV7GU20cmH4qUe2A= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.57.0/go.mod h1:5PEFaK4UypksO7xXX+aZ2zJkTA4WYOCaCJ7jfHtvlrs= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.20.6 h1:bm/4K9y+tPlOm7LCw7Oul6j4+twkYN9pMZgf6czWEIE= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.20.6/go.mod h1:Ind97CkUL/Sp8b9+eXlZoJzOyAjgSl+zX2NODYGl/5M= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.53.0 h1:MgnY9bNxeOQ2jPCwkQ5PdNVNJtdLlGWsql4BCEA3oKs= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.53.0/go.mod h1:Zp3IfPlmLCI1qU7It4GyqNKmTNLjNP33ZS9XdJSHY38= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.57.7 h1:ARnadIHN7MAAMkjNsBScWgV7pRhrhXtBnXMG8YDkDNE= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.57.7/go.mod h1:ct/KZc7aF1iJDdvVtIMUBjbZrIespvcZDXfiobANsVw= +github.com/aws/aws-sdk-go-v2/service/databrew v1.38.5 h1:uAyzLnETV1vpvVakHdGNOSnpYtmCPbc8F3e+rjooC+E= +github.com/aws/aws-sdk-go-v2/service/databrew v1.38.5/go.mod h1:TyoXF8AvpXcKkxjlW7E+Aax/FBDLoObTyby6zRffi14= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.39.6 h1:ywlBAsu4TUhGcocmioq7k6709WHhVZx6yHHcuAma1C8= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.39.6/go.mod h1:uu4l98l3f19G6MGsNf3EWcbrpRTwyErJ9PLvI/XaXwg= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.30.5 h1:lIw4H3QLLfAV6OFUFNf2rSQOD8ufSfN9sXciRpUIsv8= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.30.5/go.mod h1:lOMJLtcZ8roDJadGeAVnqdvva6RpG66Rzl3qmyHibQU= +github.com/aws/aws-sdk-go-v2/service/datasync v1.55.0 h1:K2gDOAe8OdZ6lnau8ran0va1vL97/JxANxJ1d5VYHz4= +github.com/aws/aws-sdk-go-v2/service/datasync v1.55.0/go.mod h1:GN+XSZ4Gv+QAfsCkBTEqLlmI766xItwX1KIsNJlPCJo= +github.com/aws/aws-sdk-go-v2/service/datazone v1.43.0 h1:B8F31trY6utWMnh3n3bq9e13Nerz29FRzXT9ixRAJj0= +github.com/aws/aws-sdk-go-v2/service/datazone v1.43.0/go.mod h1:JtfS1guKOGCe3cKwSGrTm0grzQiMy1cfxfEAoMjygLM= +github.com/aws/aws-sdk-go-v2/service/dax v1.29.1 h1:sYEBub6ZSeElTUaelJkffTHj6HdmUsTF5H4B2XI/OiQ= +github.com/aws/aws-sdk-go-v2/service/dax v1.29.1/go.mod h1:FQ3H4KZGNJ7xNstwjgtKtWM99QtU1y2Y2vGdOSqEPZ8= +github.com/aws/aws-sdk-go-v2/service/detective v1.37.7 h1:VlbfflT4Weqvq2cRzhbGv3gKvG2T7rhdwLvl8QohkIU= +github.com/aws/aws-sdk-go-v2/service/detective v1.37.7/go.mod h1:JpUF7Kimgvqm5MBT3YiqVFmLRNqf+9xgzXzaJrCnlts= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.35.6 h1:HCNMZXY/HhpvwpesD0foAVzSqOqkK7QQdgRkIqrUbBM= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.35.6/go.mod h1:D2NbfDF3qEeaPwl+EDLGIhq5sD4jqoTkv8o1rw37IaE= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.39.6 h1:251cRFp3KrRyboVXOFhpurd9SlJ7GOk+lMxsRlfKb7Y= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.39.6/go.mod h1:XhFyJv1IDmCaKiPUwWlj9+gV1mgpoR4BspX8CpfRbR8= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.37.6 h1:xlqasn95WDPq8rFwMuLft8K6EXiBXA4gbElNy3k1qAE= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.37.6/go.mod h1:ihMttb6cmTsmRw8/jdBT0WSR2cmxP+IdU9gIBbDs5mc= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.38.0 h1:/SjJpaHDl2Tcjq7wu0BXBr3y+iVhJGCUySBd40C38dQ= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.38.0/go.mod h1:9OhFQ4k8x6wvJRY3T3qQe4F/YQLo0iZB0Opq+2Mh80o= +github.com/aws/aws-sdk-go-v2/service/dlm v1.34.6 h1:x1q9I5nwC6JBo/k0CHPRUOtLsMTBoKTYBWQXbS0s0lU= +github.com/aws/aws-sdk-go-v2/service/dlm v1.34.6/go.mod h1:GMoqS22ylKwRmUIqPv2yQiYyfi9p4sv7D345nawzTgk= +github.com/aws/aws-sdk-go-v2/service/docdb v1.47.0 h1:Q1lDF/tOln11iUOnnQJd9RM8M2tbqSHCOzQfCwqQRuE= +github.com/aws/aws-sdk-go-v2/service/docdb v1.47.0/go.mod h1:yK1MzY7O/rmmti02gkvk+IdJZ/tCvKpcGZU2YxoWUPg= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.19.6 h1:ZDolNXobqGnz7sLKh1b8yI4T4BrMjFbtIbmZRKmMmrI= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.19.6/go.mod h1:oOz1QSkosu6fWaSQPmS9HYIkeqPs7FH+jugGa/bGQdA= +github.com/aws/aws-sdk-go-v2/service/drs v1.35.6 h1:awl8S++TupDDTsCRvrdNHUicQljM6liiHIBAJk+ej2w= +github.com/aws/aws-sdk-go-v2/service/drs v1.35.6/go.mod h1:p72nRrztE6ntt9W54vgPV3M5b520x8kbxqiDmHjFyjA= +github.com/aws/aws-sdk-go-v2/service/dsql v1.9.8 h1:9SzhOaXCRSMmyKariyaeP7hYcAdFkQk/1x3Z88V5t6o= +github.com/aws/aws-sdk-go-v2/service/dsql v1.9.8/go.mod h1:2Oz6G8F+PlNW4RK40ISLe8fTyLRvSlFOjdaWFcaFl9c= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.51.0 h1:TfglMkeRNYNGkyJ+XOTQJJ/RQb+MBlkiMn2H7DYuZok= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.51.0/go.mod h1:AdM9p8Ytg90UaNYrZIsOivYeC5cDvTPC2Mqw4/2f2aM= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.257.0 h1:YoBAUV2TU4O/0xnOarB+0wgdomnIby+lbPtuTpdS5D0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.257.0/go.mod h1:M8WWWIfXmxA4RgTXcI/5cSByxRqjgne32Sh0VIbrn0A= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5 h1:jzjNyiIrXJHumV1hwofcQLpIZtcDw+vPQL00rLI3s4g= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5/go.mod h1:UtPKcYVHY6RrV9EaaM1KZGNaf9dgviFdsT6xoFMLQsM= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.6 h1:pc4te9Px2oORmxWlJXaX/OkHQsdQ3RiPvuZU7525FZc= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.6/go.mod h1:BeseuedjcZNw+lGyqDIbapD3hvvsEVkjkISUIQLzem4= +github.com/aws/aws-sdk-go-v2/service/ecs v1.65.1 h1:pBbXc1fGRbrYl7NFujuubMmEFEp7CJiKTBsoDOIUkuk= +github.com/aws/aws-sdk-go-v2/service/ecs v1.65.1/go.mod h1:fu6WrWUHYyPRjzYO13UDXA7O6OShI8QbH5YSl9SOJwQ= +github.com/aws/aws-sdk-go-v2/service/efs v1.40.8 h1:vwqXyeluOHOgkonTOxvFqGgMNh0y5H6r23+8RA5ifZo= +github.com/aws/aws-sdk-go-v2/service/efs v1.40.8/go.mod h1:xJFehblB1voatQStn4hPPTnr+ueQ3UKxjSCro66JliE= +github.com/aws/aws-sdk-go-v2/service/eks v1.74.2 h1:GKqBur7gp6rnYbMZXh2+89f8g+/bu26ZKwpXfXrno80= +github.com/aws/aws-sdk-go-v2/service/eks v1.74.2/go.mod h1:f1/1x766rRjLVUk94exobjhggT1MR3vO4wxglqOvpY4= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.50.5 h1:VEdPmtEs1EzHXOcKmKwaN6rwwatgw4k12n08U7qML5w= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.50.5/go.mod h1:venvSIu8icYqJTZ2meX3NIQypX5t4R2E6Cr9wdgHCQ8= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.33.7 h1:zWmgdRblU92HDqT37r+kvORdWAZCiG3z6SvPKcE2D8M= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.33.7/go.mod h1:6hnLvLpLNgqMXL2uaEf/FacDYErGspeQHZn/3U+6H6k= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.6 h1:+YIp+dygyeHjUd7u9kv2MluNwnbiNeUITH4aZ4UgiPs= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.6/go.mod h1:iyqISGdbs/IFj3D7GyiRcVjNnbEYcF3NZrRlZnp7IWs= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.51.0 h1:Zy1yjx+R6cR4pAwzFFJ8nWJh4ri8I44H76PDJ77tcJo= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.51.0/go.mod h1:RuZwE3p8IrWqK1kZhwH2TymlHLPuiI/taBMb8vrD39Q= +github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.37.6 h1:+f1A4QwqPiWy71nr5qlvLMeaR7UjpzDgCAG2MhhmJeo= +github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.37.6/go.mod h1:pFAUfULfSY46LfS7WPd9q6IcdM/tWm3qTpEZhCSgtKI= +github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.32.6 h1:81IE+qNRipRKlwOUZzVI3NSOtewZnLqUqOA5UGAV3ME= +github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.32.6/go.mod h1:k9An7RySCxNbERamBuwDoXaXMTWXQqEusn3/eAoyN94= +github.com/aws/aws-sdk-go-v2/service/emr v1.54.5 h1:tA10GZKqcDLOD5JfeRTpu72X5KqxBDJBqWnn720HhzA= +github.com/aws/aws-sdk-go-v2/service/emr v1.54.5/go.mod h1:zESYrv3WuVUTyMIXwR8OoRAkcgj941Mdp154AXjONAY= +github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.40.2 h1:DXc0q23esbZXny49LUg289Yoy6Vjd58z0TV6jsGdKgM= +github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.40.2/go.mod h1:btaFcfwXxksqE0d6wBhIy3VopO0dWw1KWctELo7P+wk= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.36.6 h1:jBV+JfRW8laF4hQrPoVj7Xxd45hrXg6fvNn0/nOEm3s= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.36.6/go.mod h1:6jyzPmx8zLW3K5oP/CBMH3VFhQyf3G6vPR1vaz3HsTI= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.45.5 h1:MoTJpDDOR1gmfIC6Qc7gS+uS0hlqF7RcphMqAfp8r2U= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.45.5/go.mod h1:fgyvv0FpfhbcmGgcgyDltW9K2UMs1DOBBjnkyX9JC1I= +github.com/aws/aws-sdk-go-v2/service/evidently v1.28.5 h1:TCJCjCNhQ79VvthLKT3r4Ku3SU19rGpoAovI6rydRIs= +github.com/aws/aws-sdk-go-v2/service/evidently v1.28.5/go.mod h1:UOLThVkUgc5apzB1G4oemgrigr3BYpQEbD183CX1k5s= +github.com/aws/aws-sdk-go-v2/service/evs v1.5.2 h1:RrUB7uEIO4LYwaqRwK7KL+zH7irCQDfFOueZCHXFig8= +github.com/aws/aws-sdk-go-v2/service/evs v1.5.2/go.mod h1:0j+d5nDYF1oBpk7MWqkl5VIWnSNhWD9KiWTj+t/U7Y4= +github.com/aws/aws-sdk-go-v2/service/finspace v1.33.6 h1:H3llnOFZFz/g5v4cAA6gUQ54XUJf74SQCyKuLlCZfi4= +github.com/aws/aws-sdk-go-v2/service/finspace v1.33.6/go.mod h1:a2D/sV/YKWlPNmGYZ0OVmX2typzjwO7IwZ2NUfBgaEI= +github.com/aws/aws-sdk-go-v2/service/firehose v1.41.6 h1:BaLiLj0REx6fAxK6KYTeHXv9njpyqnLqrARYC8QhkLQ= +github.com/aws/aws-sdk-go-v2/service/firehose v1.41.6/go.mod h1:kKWlKjg9gI2uOLNQG1GnTBaYfBVQKJC0z99GIPQLFXw= +github.com/aws/aws-sdk-go-v2/service/fis v1.37.5 h1:yqaWoYLetwAKcnR74PvZjgaFRabbWDnllrFOYu6EEV0= +github.com/aws/aws-sdk-go-v2/service/fis v1.37.5/go.mod h1:htMJekf0GQU+ZgqHm5nkrpGrFQk9Sd/VX3mazLer3M4= +github.com/aws/aws-sdk-go-v2/service/fms v1.44.6 h1:Kkp6omiLoa7KDN8I/YesQzQ+Czi8a7iFsz18a2I0avE= +github.com/aws/aws-sdk-go-v2/service/fms v1.44.6/go.mod h1:0MmE+RS7FFf+ld2RVTLQSJumC56UPfnYj20jwC0F7IA= +github.com/aws/aws-sdk-go-v2/service/fsx v1.62.0 h1:by2Uy4YkY+kddlqUXziLUo+ORa5d5Zba7+9tDyB+nSc= +github.com/aws/aws-sdk-go-v2/service/fsx v1.62.0/go.mod h1:IYOHN0ZkhnOc76Wq3jA9p7EBmcyUrD7ovglUA7thwAA= +github.com/aws/aws-sdk-go-v2/service/gamelift v1.46.6 h1:gbD+Jd5bKvfkeieI9nBk4pyBEGUCKGuC3uubBcnfjPQ= +github.com/aws/aws-sdk-go-v2/service/gamelift v1.46.6/go.mod h1:qG2t3ko7BtX5Ix+c9V8xNiQbHyMhL3Cci8NemnNGU9M= +github.com/aws/aws-sdk-go-v2/service/glacier v1.31.6 h1:iwc7B/ZCzm8dhAunHXYU3ppf+OKjtxQmFaVWAi0KVCw= +github.com/aws/aws-sdk-go-v2/service/glacier v1.31.6/go.mod h1:diGbfsRR7oW+2CZPfdR/IC1LC9Vt33OVKHbSmmKaUo8= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.34.6 h1:1up3eQrlvZ0FEzNLFCpRa06ZnBO+w43MqgGjeQJVoXI= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.34.6/go.mod h1:z4vejjg7HKiZPR12s6irgnDOpFw0hTJukQm/tkwmgJU= +github.com/aws/aws-sdk-go-v2/service/glue v1.131.0 h1:ZqcfaqOBjTmdKbSK4FcTlFrUPiezJ/NTulfD5Pn5x5E= +github.com/aws/aws-sdk-go-v2/service/glue v1.131.0/go.mod h1:iH5M4d6X8IdmFUwOVdnoCEt7eqhjYZuw4gEI0ebsQjs= +github.com/aws/aws-sdk-go-v2/service/grafana v1.31.6 h1:SoVlnBHm+Gq5LI4Z4tIxLAfOG1wCFA5puE1vwB/ldHA= +github.com/aws/aws-sdk-go-v2/service/grafana v1.31.6/go.mod h1:ABsoTppDCXrP8CFfMIkaoYdC87U51t0mMxZbDFZGKkQ= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.32.6 h1:olwkT6lMeGYJ18lPObZKMaXOS3a69GoecEtGmR2Umyc= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.32.6/go.mod h1:GS2vTGoqO4jHpYqP0avBbcVmkojcOYKtiBvCrVCg8Pc= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.37.6 h1:LhXUztHSIjfmUHkahRMI+NeYBwv5XcFMyXAcw1+/5W0= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.37.6/go.mod h1:h6rk6CTK+SoxaYWtdwyrjgWI01Q2+figfhS4fLJCtD4= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.65.0 h1:dKlP/56A7vI4bN09mAlxIh9JaY/aZZnNLQkqot0io4U= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.65.0/go.mod h1:0cFCtC9mK9eNAHpKNc5/A59dqjYdwPnE1vL5STupNsk= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.35.5 h1:FP9XMTzx31mocJLJjPJEpaQIDy9cAfYRdclIV/YfRVw= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.35.5/go.mod h1:kVyA+EB5+V1zoCKEd7DR2isRChxswqaafB3kFl5eM0Q= +github.com/aws/aws-sdk-go-v2/service/iam v1.47.7 h1:0EDAdmMTzsgXl++8a0JZ+Yx0/dOqT8o/EONknxlQK94= +github.com/aws/aws-sdk-go-v2/service/iam v1.47.7/go.mod h1:NkNbn/8/mFrPUq0Kg6EM6c0+GaTLG+aPzXxwB7RF5xo= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.32.7 h1:k6s7ZccfZzFfRcko46b+wpiTihVSFb8oAM3zwRTNso0= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.32.7/go.mod h1:4xOhHo77B1qfs09L1DJq5luMO2cSILnc+8UkLvzvtHw= +github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.48.0 h1:F3LuF59HfxqQqWA8lrjZmRwvScpfc6pvkrzHwFZwryA= +github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.48.0/go.mod h1:B44b3XYDjkYgLbEpyTWrK+0k8+N1PZoBO8PdJUF4Cn4= +github.com/aws/aws-sdk-go-v2/service/inspector v1.30.5 h1:a9Yl3PlsRSiOlfg7qCpAPTnL/yhfsEFrPuyMjnnmUkA= +github.com/aws/aws-sdk-go-v2/service/inspector v1.30.5/go.mod h1:WPIOZddPJtTqr0mjtd6YfwXyKJiSlOCb6ZWZ3f3xIac= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.44.6 h1:G3SqMciqPsatTbPmq2lLebpGjanwqfkBGCKStf4nSbE= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.44.6/go.mod h1:idr72RZY3+DwomnH2ZTYE0Y/+rwKdtdneJWWGLlylmU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 h1:X0FveUndcZ3lKbSpIC6rMYGRiQTcUVRNH6X4yYtIrlU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.9 h1:7ILIzhRlYbHmZDdkF15B+RGEO8sGbdSe0RelD0RcV6M= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.9/go.mod h1:6LLPgzztobazqK65Q5qYsFnxwsN0v6cktuIvLC5M7DM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.25.5 h1:1bnvwYxuKCTMiF/MavITDTRnCCOdCbmNWyFbfKMw2wA= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.25.5/go.mod h1:Ok83qcqfCvpkKU655IHorvYG0NMPr30P5H8ng9uNaQk= +github.com/aws/aws-sdk-go-v2/service/invoicing v1.6.8 h1:rdiHnyg2/1Wu3/BVCY0o4a5RGs/bF5NTjqscInYWGJ4= +github.com/aws/aws-sdk-go-v2/service/invoicing v1.6.8/go.mod h1:TniL6d9prBubA9ZcfCJo9Q9r2cyh2c4C9csZMUDYuBw= +github.com/aws/aws-sdk-go-v2/service/iot v1.69.5 h1:ufbRtUcNLpfKjE4MXGnqNwF2gXh5s9CUlgfL3nDyd5I= +github.com/aws/aws-sdk-go-v2/service/iot v1.69.5/go.mod h1:xkUGPoYRFoe0i19cUfIMeocCOWG5Ona7MWMeMqqL8eE= +github.com/aws/aws-sdk-go-v2/service/ivs v1.47.6 h1:L4uWqGDzaapkNiPW1LUnHVGkrjynMG5vyGeu+YATgio= +github.com/aws/aws-sdk-go-v2/service/ivs v1.47.6/go.mod h1:i6n4c/4w7kw455UViuMob7/0YoWB24uXkzal1udFz8g= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.21.5 h1:pwetfaLlSr67dBmlJtVNUsBgSdOzk02NfAo1MDqhyQM= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.21.5/go.mod h1:mbBWB0NzEUOjY8FarIsbCWo6DXqylv1mf+B77uT/xlQ= +github.com/aws/aws-sdk-go-v2/service/kafka v1.43.6 h1:gd9n9V4YTRcg5VJfDYBRVJHQBaUMpbKOKWzAhHzyhcA= +github.com/aws/aws-sdk-go-v2/service/kafka v1.43.6/go.mod h1:061TSd3Z7fxrRzFbo8VniS3VErBjATTfC7+HsSUW11g= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.27.6 h1:YRPt0iTJeUfSFCnZMlIVokoSgotLHBYoKlaQnuclokM= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.27.6/go.mod h1:Z9w9e4XGxePy+tPjsgNKYiJZXPTFysEbKqpc72dzhO0= +github.com/aws/aws-sdk-go-v2/service/kendra v1.60.6 h1:rGhNWcIhP7DqFve8zlZItzX8UslsM26aSCRTC6M2hGs= +github.com/aws/aws-sdk-go-v2/service/kendra v1.60.6/go.mod h1:3Amyw8Cu+M3VliBNu6PkOvVLLGS9eyzmkwBBYIygr60= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.23.6 h1:VvcHwWDWJs6sbM62LI1UKo3ONVBXSP+sPiYduTOZ5Ug= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.23.6/go.mod h1:YjFnZVw57O46J972EzA4Ny7HObGqymOoFCymCiLXdDE= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.40.5 h1:GWAVIxhYlkFX76WGG2gus5eyonXaKPv00VpiSqHzXDo= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.40.5/go.mod h1:u/oFMSASsn9QNBRop5lrIpuNwHZwEXjYxNQp7sHFSxc= +github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.30.6 h1:OL3s9Y927XoMxO4Jod29/eIl1vyS5NDnDesJaLkhjeE= +github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.30.6/go.mod h1:1szjTKn1bM+Ce2Pf2g57WqudQXi+YZodbrYMZY2Awzw= +github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.36.7 h1:lePrOEBRe3FMsApDx6QNfiVsUR0ePYdeE+KkIMM6vp0= +github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.36.7/go.mod h1:AUtvJ7STwd00cd5mT3Vt9WH0LjF56nOWAZzPx+T5wUg= +github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.32.5 h1:8oO+Su+tqdsF1wll/Zm0eenGi/0lXQljG5sFerZvFXQ= +github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.32.5/go.mod h1:Mu9FDrPD7xsAZf9KhiL+WFEtnEgO7x6Kf8OJceaiJRU= +github.com/aws/aws-sdk-go-v2/service/kms v1.45.6 h1:Br3kil4j7RPW+7LoLVkYt8SuhIWlg6ylmbmzXJ7PgXY= +github.com/aws/aws-sdk-go-v2/service/kms v1.45.6/go.mod h1:FKXkHzw1fJZtg1P1qoAIiwen5thz/cDRTTDCIu8ljxc= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.45.5 h1:YlhZqR9Ma0x7q83cNpis7YJ1w4u532+ohJ7MSHqZno0= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.45.5/go.mod h1:e+RSq7q4W1pe3kt1kFBWQLvCsF3LEa6YF695iPjwUqo= +github.com/aws/aws-sdk-go-v2/service/lambda v1.78.0 h1:o6244M0Z5ryHuO05Fm+03CCZIQSh+qmZgYbnbOuaRGo= +github.com/aws/aws-sdk-go-v2/service/lambda v1.78.0/go.mod h1:LFNm6TvaFI2Li7U18hJB++k+qH5nK3TveIFD7x9TFHc= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.13.6 h1:aZ8MXpLB17q6THeWkvpYVheZTf2oOMgaeXYxQLfq8vY= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.13.6/go.mod h1:5IsG5hZ0YnGeIsZvB88ALqptUB5TmyA68Vh2JtOuolQ= +github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.33.5 h1:ArkKQMxVIaauILktZS/FMc9u52qGpC3OSAA9AQAnvgU= +github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.33.5/go.mod h1:LRYpBu4UZPuBggAl0Q62MaDRDlDYYE/DR/Q3Nr5HnTQ= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.56.6 h1:Ujpm8Qr81ge34jboS6NQu4WK/gpuwjNQI/cW8G2w1+g= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.56.6/go.mod h1:xiuNneOma5q5l0VtLAR6MuQ3K5sJlUCz51HB0IdJgvc= +github.com/aws/aws-sdk-go-v2/service/licensemanager v1.36.6 h1:jSPCSRdv3Ad2BZtaCO3PWJQmoOe6WXqrG79IoHrTpl4= +github.com/aws/aws-sdk-go-v2/service/licensemanager v1.36.6/go.mod h1:E+dz2RTwFIOG6cKRJiln5khKJmROa6RvP7DKiEEPCFE= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.0 h1:JOLRYFWMMKUABCp94HHfo0JBVQDVTLXOvWWphjpBBiQ= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.0/go.mod h1:WEOSRNyfIfvgrD9MuSIGrogKyuFahaVMziVq1pHI0NQ= +github.com/aws/aws-sdk-go-v2/service/location v1.49.6 h1:hNRkhRPvAHAqZapl7BPcjls1BAnykokUkF71E0iYgPU= +github.com/aws/aws-sdk-go-v2/service/location v1.49.6/go.mod h1:aRLVKgDTnlsf0moRfee8FTWv9SghW/x3W0W33Y//ZDY= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.36.6 h1:9yP3vAUac8JYDnenwuOuPmpIRBgCVidxWN6hZvab1lE= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.36.6/go.mod h1:A6750m3A2OebBhSwoXKosN5Vciq/JiY2piPsmZauiwc= +github.com/aws/aws-sdk-go-v2/service/m2 v1.25.6 h1:1pA10Dziy1XrpFNf6aND3Y43imaLL9w6U6lfMBUNR0E= +github.com/aws/aws-sdk-go-v2/service/m2 v1.25.6/go.mod h1:X3NB31GJKffp5h+SnU4aMKMarKd9Bd7jRFs2y/Ihve8= +github.com/aws/aws-sdk-go-v2/service/macie2 v1.49.6 h1:0lg+Mhd61q16NUpxwnNpAhP7sxSOO5H5/l+QxerZuIc= +github.com/aws/aws-sdk-go-v2/service/macie2 v1.49.6/go.mod h1:hAUjN7Dlx1i1Sjbx67uumWB7iwXOA/PM8kNOiw4ygjY= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.45.0 h1:4cBXNlo8XYFq/leCpTVuZX2qAp779SIg3wkMPd5FDjo= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.45.0/go.mod h1:pyFeP6f26HHtJJeNU4LqcD3R1Zh9RMwZjiluEsgZlYE= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.82.6 h1:gk0yVOnKaRKGyWifpqw3aGeEGB4EO77UYGXnucl93Ek= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.82.6/go.mod h1:YZeaUGHZihZA8/pC2hi248p8Y8S4oPMZLXso2RF4hsQ= +github.com/aws/aws-sdk-go-v2/service/medialive v1.84.0 h1:4VnT0CicQgGzIkzbfIz9FcCvl/A25JclsZ/jkkP2sGs= +github.com/aws/aws-sdk-go-v2/service/medialive v1.84.0/go.mod h1:ZWOvuk7slOmdlSnDIY7gr00d/HUEKAYT15oPc2oMprw= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.39.6 h1:xF2FWETQbjkGKK8fcmaJ2bO7i53wwRbsnExg5uTswyI= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.39.6/go.mod h1:hSlgOOXXYOtXOH8PUE07ZctOeDR9doOvtvpM6oR7z54= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.31.3 h1:Tbh1uS0VAEw75762wftgeXlrpK2AO2tZjObiilryUCQ= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.31.3/go.mod h1:5w01h9/Nmf0FUimiQGY9bYPU/of1Nz9oxiGbNxzUYT8= +github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.39.6 h1:8QUyNYiWzhsbQJITt/v+SNwdA/wH7B8YnDO/9GLeX2g= +github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.39.6/go.mod h1:QHMKOy8M9YcyxacWIIije66JGOPn0Uv911y3QN5xvOE= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.29.6 h1:iafXeKlVqhC8/ScR2CzQlWzDm+B3BNcQD7SzZ+gE1LM= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.29.6/go.mod h1:pyztXbSyAGD+TmvQhGva28W3KgwEsjZ39d/tM5E3WLk= +github.com/aws/aws-sdk-go-v2/service/memorydb v1.32.0 h1:R+jvAaitNKrnuBDpAxM/Pi/1JD5cRqwL3cQolngYf+M= +github.com/aws/aws-sdk-go-v2/service/memorydb v1.32.0/go.mod h1:ls5Htz+L0oFjuS/8Md/RLSLCFUpGkvlnZ2GLZ4NZguw= +github.com/aws/aws-sdk-go-v2/service/mgn v1.37.5 h1:BMu425Ntx40waGQ0/g6BeX1F/sYvKdIcO+ABys5Jv9s= +github.com/aws/aws-sdk-go-v2/service/mgn v1.37.5/go.mod h1:F3YMviBP/8gRnYBh8j+6MUw/c3ID0l3IMS37kHAo22Y= +github.com/aws/aws-sdk-go-v2/service/mq v1.34.4 h1:Oo18RmcBezamgeYgLQs0TvQte9qnBsT/h0FXaVR/su0= +github.com/aws/aws-sdk-go-v2/service/mq v1.34.4/go.mod h1:Ix0YBjTUQkaENu7moWEIOuRPvSXCankc9G8+6tCHPFE= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.39.6 h1:E2/4c34w/DlacnWCB00i5vK84Q+R4THQekMFYKEQ6EU= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.39.6/go.mod h1:VGfnBe0/1AmUklInis8fWYCsX1sytShIyJaAskYui8k= +github.com/aws/aws-sdk-go-v2/service/neptune v1.42.5 h1:tfn0wZ5FFDV8USRyR1pbwVuMjc/8lxCXGiXtj4pGP2Y= +github.com/aws/aws-sdk-go-v2/service/neptune v1.42.5/go.mod h1:31tyZ8ZVqFkyO8beZNHcEOQZGn/BkuSpj92xz0DV47M= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.21.5 h1:M6POQvRc86IBNXMGMnigORWW7TuI4DQw6w9/7a22AmE= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.21.5/go.mod h1:SDNZtDXmPLQgX3rhJKQilrATByCSvfefeGBzmyHWV9A= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.57.1 h1:ft8fBc54sf9RPLzZ9C3R2ICWlsJI7gNXzhe4KM6hcMU= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.57.1/go.mod h1:aR3+jhGdmzkcu69LUu3uEfWSz48rSWZpRZ1UiW1brzY= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.39.7 h1:UqDxJzpwgrEi/AuVaXCqN3g0zysr0K/RPY7kxj3kAFs= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.39.7/go.mod h1:2lpNczbmNGrUPnMa04jr4J2BxQ3jv2pYErTQlELWESg= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.12.6 h1:5MKZrK70vz0m3A/IbE00XgWdX6VARrQMe/lTtlh1VIc= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.12.6/go.mod h1:r99VUsxYLupfRyaZ517RoASgRyspKDvj+T3Ec2bsJLg= +github.com/aws/aws-sdk-go-v2/service/notifications v1.7.4 h1:Ur4HSvZrzDevCVcXHLzj/VYKLYznFsIuXXzsqggt0HE= +github.com/aws/aws-sdk-go-v2/service/notifications v1.7.4/go.mod h1:jp/DVjlMmlD2RaRCAYs0IRy2k5XkppwVNc9wo4oYkkQ= +github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.5.8 h1:SoLCt2Ig7kir4Vv8VtFVqADtaE1iSrC/f2U9vVgCe4M= +github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.5.8/go.mod h1:TqON/FD1E56TNUpLtwI6m1PbjmuZRhrvbo1ZGOIbzto= +github.com/aws/aws-sdk-go-v2/service/oam v1.22.5 h1:To+7SakfElByzTR10RrFGAXRH2uWBDvMPTFOYQY0Wrw= +github.com/aws/aws-sdk-go-v2/service/oam v1.22.5/go.mod h1:huNDbI1vKiFhIuo8Q4hK09wk1kN+RFdbxrrjSsCZtCQ= +github.com/aws/aws-sdk-go-v2/service/odb v1.5.0 h1:e7gPLy+UbKMdrAgbN/E06dAq/OyTMV3YCQH85rAlAXA= +github.com/aws/aws-sdk-go-v2/service/odb v1.5.0/go.mod h1:sNgPICtv0QshuEoMhFiRT5rBWnpXMFvLhly/Hu0MqYw= +github.com/aws/aws-sdk-go-v2/service/opensearch v1.52.5 h1:gkLP1OOn0/gBPD125+Ax+9DKuGGsu9TwvbZJ4bBgcsY= +github.com/aws/aws-sdk-go-v2/service/opensearch v1.52.5/go.mod h1:c1RKL9jCAUP+7ZtY+99yWcWxRFBsQ3LG5Klkj5PEoJs= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.26.4 h1:46xDV+bDfEaoI4CFYA/SASoD17PhdIfRcnybENoeA68= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.26.4/go.mod h1:a+I7XPLBv75d9aI6TvmcMn2osIxiZ8rxjSy/OZQQAlw= +github.com/aws/aws-sdk-go-v2/service/organizations v1.45.3 h1:JcKtlBBVZpu01E+WS5s6MerJezxVNW0arRinXwd8eMg= +github.com/aws/aws-sdk-go-v2/service/organizations v1.45.3/go.mod h1:oiUEFEALhJA54ODqgmRr3o5rZ+SOXARVOj4Gl3d935M= +github.com/aws/aws-sdk-go-v2/service/osis v1.20.2 h1:fmH/ayvn5AcQ2jnFMtbd57CwTtuOKcZLdfl3eGZP7oc= +github.com/aws/aws-sdk-go-v2/service/osis v1.20.2/go.mod h1:t3KwhHJvvtof6DJzL7JtGY4+cxPsQumV1snXgg0+aww= +github.com/aws/aws-sdk-go-v2/service/outposts v1.57.0 h1:1gMI04UYdiTWeebGwhcRRCmf4ypoGt0fwPa/z2J+vFU= +github.com/aws/aws-sdk-go-v2/service/outposts v1.57.0/go.mod h1:AX2swwJXvwgCE0695M12Vw8p/JU2PQNC/5J9ur1Zd9s= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.25.2 h1:jAVEnaD69Jhc3ePMMuKLj7Y6NNRNN4s1X/UT+SMlXag= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.25.2/go.mod h1:LtAy3qbryUglXiyAYdn+OCltWbMMMvYoUK6hAiFc73k= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.15.6 h1:tZK9NedvW1WYWf+eZ437hUNETq4+eofZ9ja32FtFHLQ= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.15.6/go.mod h1:fRBVcoZiTYjTywau+UcyTXAjtLxz20Jsaz0XXdG2950= +github.com/aws/aws-sdk-go-v2/service/pcs v1.14.0 h1:Dk/dj8EBPQBPawPMR2M9+tijanojSdoxJDSA5clenZo= +github.com/aws/aws-sdk-go-v2/service/pcs v1.14.0/go.mod h1:wyFACwTlB5ZUiOQAqs+5m7gj4xUCbojoUTaZYxa7BjM= +github.com/aws/aws-sdk-go-v2/service/pinpoint v1.39.6 h1:6vjEH7AL5aYC49apXto1dHgMBNDdZLh2L3Bve0vkE2o= +github.com/aws/aws-sdk-go-v2/service/pinpoint v1.39.6/go.mod h1:oNyevxM/xQifmv2yk482sM2isWXgloHLrOLoeOrOPHQ= +github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.25.5 h1:S/QLsL7GTbrtjrALjKWdab+UBUB7LIHnEJtMvdD9khk= +github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.25.5/go.mod h1:TlvbcCoDxToksnKXX+nmSi70Kn0aMcPo3qr2hgbO+yo= +github.com/aws/aws-sdk-go-v2/service/pipes v1.23.5 h1:QrMb0weKCfbPmFM8Z3tHXGDd8b/g5kkbYSGELgYteOE= +github.com/aws/aws-sdk-go-v2/service/pipes v1.23.5/go.mod h1:OYOBK8E3mCVkk/6bCQk+J0R2JgLYotiBd10P07i6CTk= +github.com/aws/aws-sdk-go-v2/service/polly v1.53.7 h1:xOKXUyIN722uc+FtqUIeapvlh0iBM+SXt29mB0L3CVc= +github.com/aws/aws-sdk-go-v2/service/polly v1.53.7/go.mod h1:4xoAju2Su1TJ1Q5Y6hxNFLb3kBzYOtgUN05dQj3VTp4= +github.com/aws/aws-sdk-go-v2/service/pricing v1.39.6 h1:SapAI7aLrvLNUCBeBhhR6cU7TFIrRC5KNeaj72hV+fc= +github.com/aws/aws-sdk-go-v2/service/pricing v1.39.6/go.mod h1:TtNWNQGg2WmSIS+j/ZqyJD3xY6zyAuYHBDGxFYQftjU= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.33.6 h1:kLTYFvi4+nsETUZpwqPcVVOfOX/lD7OvQ4aU+TSNGVE= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.33.6/go.mod h1:XxU8fY4XHMpkvrCDaylvGiaz1PSU1nntX3XasTEomDQ= +github.com/aws/aws-sdk-go-v2/service/qldb v1.30.6 h1:5FBEiFjL83odCokDLGauL1g5Noiapq8jRsqKN2/YaF8= +github.com/aws/aws-sdk-go-v2/service/qldb v1.30.6/go.mod h1:FZR8mKbaQK8xEyQmtZKEPYj6Rxgi3iLKFi7MutHzYNI= +github.com/aws/aws-sdk-go-v2/service/quicksight v1.95.0 h1:h4UqFZgvghZXPeP95J1bDDW/+51Ge/JwXge+fhFkXqw= +github.com/aws/aws-sdk-go-v2/service/quicksight v1.95.0/go.mod h1:aJPu5hqpBhcV4gXqbAuZhBiIZ+dXHrvJ176qTPADa2A= +github.com/aws/aws-sdk-go-v2/service/ram v1.34.6 h1:S/BivEPJDOKDEaLQuodznRu/9VscK2n24Oi464ySkao= +github.com/aws/aws-sdk-go-v2/service/ram v1.34.6/go.mod h1:IjW9GK9av7d2rdmmi3uze2erokbWAxUtMwDc1YOj+9M= +github.com/aws/aws-sdk-go-v2/service/rbin v1.26.6 h1:wKVcl95mVcHW1rJMsf5SsA9T2zrfOmC5WyDrqpFVnVE= +github.com/aws/aws-sdk-go-v2/service/rbin v1.26.6/go.mod h1:LCbTwbuAosB0UYOB4eMr7CmzwKPaO5ZD+UXEhJ6TPn4= +github.com/aws/aws-sdk-go-v2/service/rds v1.108.2 h1:zdlqufjtiEnoL6xdoDXem0reNh/ySUYJupUWEVBLshA= +github.com/aws/aws-sdk-go-v2/service/rds v1.108.2/go.mod h1:VOBL5tbhS7AF0m5YpfwLuRBpb5QVp4EWSPizUr/D6iE= +github.com/aws/aws-sdk-go-v2/service/redshift v1.59.0 h1:MtE4oUVeljvF2CWPZwzWERizY5uhZV7os1eJC9oA8BI= +github.com/aws/aws-sdk-go-v2/service/redshift v1.59.0/go.mod h1:ARgrCFhclWArEevJ/GAn+UBBVc9+f9oFurQlyjx262I= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.37.6 h1:PC5iIPcOwMMqAocH4fuiyLKbEOKr9t75zhp7yysK0NY= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.37.6/go.mod h1:u8BCO9VvZZqxHaCk4i17Js9WSGR45KPN35k/Gi79hng= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.31.8 h1:YJixVrWNAJYfCXcMVMppPA1RQaPtZ0oXGrLDRf5FHIU= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.31.8/go.mod h1:1T8W8J3Xiwhtikj4yLUXTFwOB6cWvukAzncJUV9A5uw= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.51.5 h1:7XEUHyj3NhDxz8ogR9Zqj8SRA/5J2OJ+u4lpGu+qmJ0= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.51.5/go.mod h1:2lepPReuRVIackBiaSO6c5ch3HXIROzHFxCCpMQgKJc= +github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.34.6 h1:LvBVCmxDLAp3tNkAXNvedPjNw2DFJ9W0mwOpbkjaSUE= +github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.34.6/go.mod h1:ZBunG0PHHt5TwsVfGyDpPtAeqmCnlo8SjVRsS/me+5Y= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.22.0 h1:+vdGkeg7koJ0MtMui392lmmF2gKISzqiUryQuq8HumQ= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.22.0/go.mod h1:WeUb0leMU6VjQkjVzmUa/DBuqgaMCgcaWoWNJy4Hg5M= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.33.7 h1:aJEEtqhpU2Vr2zFQ6jJT3z6ryqNSJjQ3UqEUKIsCdU4= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.33.7/go.mod h1:RD/9wH7u81Og53+2Vt7qAOA6PstpLcyiud5wCv0R/ds= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.30.6 h1:c1gIOTNJ6gkocnL33DP1St++uv+f7ClFiUjR5/Pm40o= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.30.6/go.mod h1:KJZ2lPXqxMULgTX/ldDAa2WeLAR2qz7vGqLEJLCP1RM= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.21.6 h1:1n0OZvoccoCuMn8GYI8/A78sWc0NKc3VTgTyO3fmasY= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.21.6/go.mod h1:+X3mqbUeamf2ANy4ppudqH0s6tuH2pFl04Cq8gFAikc= +github.com/aws/aws-sdk-go-v2/service/route53 v1.58.4 h1:KycXrohD5OxAZ5h02YechO2gevvoHfAPAaJM5l8zqb0= +github.com/aws/aws-sdk-go-v2/service/route53 v1.58.4/go.mod h1:xNLZLn4SusktBQ5moqUOgiDKGz3a7vHwF4W0KD+WBPc= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.34.4 h1:mQ7ZPMQ2Dz4dl//dgMOWmApKXGZ9f9cHza7Qh9tnqSM= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.34.4/go.mod h1:7q323bgF8xAtY1+rN/WVtUsbtSPZWWOVsIID9zAI5KA= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.9.6 h1:Tx7z/TsZ+OdtDtUeZFrzAU//NhnFMiGRxeAEEeTIZOI= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.9.6/go.mod h1:d46EQzstY7ltSyackMoYMJGzq+TrF1RYr3DU15t0mCo= +github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.31.7 h1:JztKalb2lLUv07Ls1J4ePVmg0RUgyRBx1/k8maIkawE= +github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.31.7/go.mod h1:PJXGu3IjcUbdL8taf9Zl9vB6ZmigpjAq+gFz6hDBmGc= +github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.26.6 h1:dT53/rvqKgu4MMOzwhv6HVtxgrWp9SgYwkviQIBbmeQ= +github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.26.6/go.mod h1:6d6uDK4yLgR+5jLqWdYejxBE2yS5NV/4FrOmNZclrm0= +github.com/aws/aws-sdk-go-v2/service/route53resolver v1.40.6 h1:lhnQ2Nkm3liKRxl4j3A18DYzGkxaixaSNF0fgXhtZDI= +github.com/aws/aws-sdk-go-v2/service/route53resolver v1.40.6/go.mod h1:hFCmtJyNyNNKxzX43Skr+l4JTpV/w8x470hIJBedcO0= +github.com/aws/aws-sdk-go-v2/service/rum v1.28.7 h1:WD3KNbMhPNIo6NeWIKvH+JyB+nlxA+3FP8T6AeoC8zY= +github.com/aws/aws-sdk-go-v2/service/rum v1.28.7/go.mod h1:d1TetEj0rCx4wEye6LeIjDCgHUkIIS/6cXu8UaI1aP8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 h1:mUI3b885qJgfqKDUSj6RgbRqLdX0wGmg8ruM03zNfQA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4/go.mod h1:6v8ukAxc7z4x4oBjGUsLnH7KGLY9Uhcgij19UJNkiMg= +github.com/aws/aws-sdk-go-v2/service/s3control v1.66.2 h1:/ZonyP9GF0PKVTCLvnce+muPdS8REakUTHwkP8cyFFU= +github.com/aws/aws-sdk-go-v2/service/s3control v1.66.2/go.mod h1:m5ZEef7/rUTT4ed1B22b+MhYKWnp8Qkj4iIp465G6J0= +github.com/aws/aws-sdk-go-v2/service/s3outposts v1.33.6 h1:ISvhq3XY67cCOhHQNuERigQBjUNkr4gjM7f0MFRq9P0= +github.com/aws/aws-sdk-go-v2/service/s3outposts v1.33.6/go.mod h1:rLtMvFVwJRG+in5WrAQxgzDU2KBsenSsNrpLbRa0Xrw= +github.com/aws/aws-sdk-go-v2/service/s3tables v1.10.5 h1:lvhu7h0CC9vsL0kxghR6OeGJwF5VsXDHfeGEAwl6XWE= +github.com/aws/aws-sdk-go-v2/service/s3tables v1.10.5/go.mod h1:ZPE8QxN4+WylqmPew7p7G+J+h1qQo/pbBNLGKr/GJ/o= +github.com/aws/aws-sdk-go-v2/service/s3vectors v1.4.8 h1:ERb8DDNjGcCkDHblpHkSNzEs1ONBk+rCITYA6z+Yd1w= +github.com/aws/aws-sdk-go-v2/service/s3vectors v1.4.8/go.mod h1:gSvTmSFxwjt2k+U9eP8LQpR3sDYpwA/desV1WjaEGJ8= +github.com/aws/aws-sdk-go-v2/service/sagemaker v1.215.3 h1:7QukmIiqAnEoVfduk36whgv8YGtKjcZc1hilfxZxqYQ= +github.com/aws/aws-sdk-go-v2/service/sagemaker v1.215.3/go.mod h1:BSg+goTRoWiHkPwaU91RjaWtCB4+BAcbj6X6Ihvs8I8= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.17.5 h1:QaBANQbMZMyyZ8UmuOaa533NCkgjtwuKyfJqd6fziUQ= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.17.5/go.mod h1:9ulCU1KqL8XYYCu7Zj15WB2lSlSAb1sDzmwVl9LuMGI= +github.com/aws/aws-sdk-go-v2/service/schemas v1.33.5 h1:f94foSb0xp3flzTDe0qHRl/kwsp5RnUvMGH9jYophXc= +github.com/aws/aws-sdk-go-v2/service/schemas v1.33.5/go.mod h1:StI8kLU7UqwT4GUIyHwd4cmLXxglmNbT+faOZltSlA4= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.39.6 h1:9PWl450XOG+m5lKv+qg5BXso1eLxpsZLqq7VPug5km0= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.39.6/go.mod h1:hwt7auGsDcaNQ8pzLgE2kCNyIWouYlAKSjuUu5Dqr7I= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.64.4 h1:56LRTpQSA6dqo2inwUwICUgnlCe3kAddCOhWggdDsYQ= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.64.4/go.mod h1:whhpbyK81XOJWOiCmN4SbYv3X+kgNlMgHOQAnEMRXsM= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.24.6 h1:HDDXTIW91VHxTgu+05f8n5HzJrDAUDqbjX1v3H2KV9Y= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.24.6/go.mod h1:MY0oQdCHOaepsEJoN/WoaCHNoksmhrBrBlDvjUQYJmY= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.29.6 h1:70PEH4oDsU+YER8KUfSedHGlRBVihtcXxah3rokT9S0= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.29.6/go.mod h1:spm5LxERJz0IOpYf9fH6lBDDGgB3OXSwrV0Oj+fL7xU= +github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.38.6 h1:ZGvb2y036q8v5bZhuqzmDnqdpo85u/3/B+9NnbYPcXE= +github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.38.6/go.mod h1:eNS1O7ALYB0n1K6UJASh8kgw1KYGg26wHxx35VZgjFs= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.35.6 h1:OFmbZQixBI0tnwxxoiZtdlGZSOqaOHq7wkxJGLcOIsk= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.35.6/go.mod h1:zOpRzlMssUTM/YZ/JVuztNnGUMGTvOUHTCtyLEtOUnU= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.39.9 h1:snXikqd2A2wiFwFoEjWVLE1p2hbRaVkSxHCcV/vxibg= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.39.9/go.mod h1:D+QXio/b/Fxee/lnsYvajiEuWcPzCIc2B04YzIHX0/M= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.33.0 h1:l+Sd8288cwIW6MMq/qANtWNQzwR8qG8fru4KQl0edjY= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.33.0/go.mod h1:OzKW+2JATYOrFN/hai+5/4SezjqbEeLeZrQqNPLPe+s= +github.com/aws/aws-sdk-go-v2/service/ses v1.34.5 h1:NwOeuOFrWoh4xWKINrmaAK4Vh75jmmY0RAuNjQ6W5Es= +github.com/aws/aws-sdk-go-v2/service/ses v1.34.5/go.mod h1:m3BsMJZD0eqjGIniBzwrNUqG9ZUPquC4hY9FyE2qNFo= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.53.5 h1:ZHBssvFtrtfNCm5APnzFrkdCX4KPDKlSGZ2NbfPmISY= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.53.5/go.mod h1:eJP5lLTdqKwiQB5mKKaSjjJlLB0xcT3pTFF576PbdP0= +github.com/aws/aws-sdk-go-v2/service/sfn v1.39.6 h1:0kpMhSSBrZmYeeKmyM4RftA4XeiC0PDVcbUg3gXNqfk= +github.com/aws/aws-sdk-go-v2/service/sfn v1.39.6/go.mod h1:XyrAUQxv//wWMFyh2mvvTZL9vaYdpjM3Rg5A5QOFOaE= +github.com/aws/aws-sdk-go-v2/service/shield v1.34.6 h1:AWKt4pVqiqzLIT3xoOThd0xT6dY1lSB+7yDcn0N3I48= +github.com/aws/aws-sdk-go-v2/service/shield v1.34.6/go.mod h1:Io5NYTndCqsmL+vdfoQEkInZkbZn8gLloqEjGvng+7M= +github.com/aws/aws-sdk-go-v2/service/signer v1.31.6 h1:TnlG33tsUOBnu7rMicF8YFIC0pxkJdBJwo2R0W5L6Fw= +github.com/aws/aws-sdk-go-v2/service/signer v1.31.6/go.mod h1:a6U0A/LNWknEIS7Fmf4McuUwImMlo6qrKkhbEpSczP8= +github.com/aws/aws-sdk-go-v2/service/sns v1.38.5 h1:c0hINjMfDQvQLJJxfNNcIaLYVLC7E0W2zOQOVVKLnnU= +github.com/aws/aws-sdk-go-v2/service/sns v1.38.5/go.mod h1:E427ZzdOMWh/4KtD48AGfbWLX14iyw9URVOdIwtv80o= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.8 h1:cWiY+//XL5QOYKJyf4Pvt+oE/5wSIi095+bS+ME2lGw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.8/go.mod h1:sLvnKf0p0sMQ33nkJGP2NpYyWHMojpL0O9neiCGc9lc= +github.com/aws/aws-sdk-go-v2/service/ssm v1.65.1 h1:TFg6XiS7EsHN0/jpV3eVNczZi/sPIVP5jxIs+euIESQ= +github.com/aws/aws-sdk-go-v2/service/ssm v1.65.1/go.mod h1:OIezd9K0sM/64DDP4kXx/i0NdgXu6R5KE6SCsIPJsjc= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.30.8 h1:Nqsc8EhmXUwGCLLxB1cCt/8sDyVUDaS9zpkXyd8zcD0= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.30.8/go.mod h1:AGjoKT5weHZ8oo4sFEorNVKsg9noEmEBiXsA5e9veEE= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.39.5 h1:oGUMJl6Wf7vZWiaCRE4MPjtnet6aEjnpF/1WxoKlJ+A= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.39.5/go.mod h1:5TeCNbB10rN3TUR7NWFdRWFLfrjebhMvvE0lQKS30aE= +github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.8.6 h1:sGhOo5CZV1QV1gsAcrwXJVm2EcVaTCVLhEIr29eaihA= +github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.8.6/go.mod h1:Y07XpBl1TPJFFfYf4OD1PPmui8rFba8k2u3gAH1H5YM= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.25.5 h1:D2bijFgTDf26Oizhsj5X6X1B8ibIaZVHgonPeLnNTlQ= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.25.5/go.mod h1:11+FpRI0DIr3RuA3pRCDwVA22LP4vymQB7MCTMCApKw= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 h1:A1oRkiSQOWstGh61y4Wc/yQ04sqrQZr1Si/oAXj20/s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.36.2 h1:4O5fAx9BpoX5c+5BxUgOLJM7kS0K20JebxzLzfu+JIk= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.36.2/go.mod h1:7iR/6+xIFUPl0LnAZ0RSBQ4A4R6CyA7WrxKyB9QncWc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo= +github.com/aws/aws-sdk-go-v2/service/storagegateway v1.42.7 h1:Pr+heLI6opJl3ntVUqiIB6ehpCERmKvEsTq1JmRq18M= +github.com/aws/aws-sdk-go-v2/service/storagegateway v1.42.7/go.mod h1:e5HFdmxGXdN1LQ/a+twPR3PuQyvI/aPK38MWVU17QHc= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8= +github.com/aws/aws-sdk-go-v2/service/swf v1.32.5 h1:uGf0//B3vB5y2gqeP7qUtyKh8A+MrhWgGXlisdsCCsk= +github.com/aws/aws-sdk-go-v2/service/swf v1.32.5/go.mod h1:jY8XhNSBjSezBEJA5pEM9sW7nqBc6EDmHwIJ1hj5pq4= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.41.0 h1:PhrW72CcuCEhPNItPyE7eO/gE5fx4QJiNQ2Hwo6gqRM= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.41.0/go.mod h1:eCpO7DjOFxysY+P8dEFJMWCTnpMLt7IGbIhSk5yHDMA= +github.com/aws/aws-sdk-go-v2/service/taxsettings v1.16.6 h1:RS2HB6ey9KpPETo2pWoPNcedvZY6E6+TETJ+3qHmrVA= +github.com/aws/aws-sdk-go-v2/service/taxsettings v1.16.6/go.mod h1:f2PHOYOjNlFxzDo6eR6Zf89XmUancu6ORaoTVURTY7g= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.17.0 h1:QvVSS9mo0AiMK9ndQFpaj6R7dM7LEpoo+nh/ZeeAxPc= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.17.0/go.mod h1:7ObjtSvjDRJVBRhd9zxh6kgxYMbA9vtBQ24+RNjUdao= +github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.35.5 h1:ivzKU64lfi9F5VwBLIEcoqQMtXKrn7iUHOmhU8I88pA= +github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.35.5/go.mod h1:cSU9wIi0AjMmlo0ydUD839k1yh7fNnTdCZokIwol5Qg= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.35.5 h1:BjJ8HypXtGM+O5HP2rPfbxq50UuecvoZpRPdTAnILOM= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.35.5/go.mod h1:rnOKv/DJpfdiPPOyAdsHSommMuIHW6bmP2rrQJJYPdU= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.53.0 h1:O8BRjUAD1Jf15RLBaAPHkShlAB+poKZdsAp+Tpa/txY= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.53.0/go.mod h1:ZZN5Hh+s7Cr845LY5cWKJiCplzW/vZwpnpjxifgo4Ko= +github.com/aws/aws-sdk-go-v2/service/transfer v1.67.0 h1:1Z3X4hOfdiyJP+a/yZzDB577mzTzLB0m/JDu+1VD4LM= +github.com/aws/aws-sdk-go-v2/service/transfer v1.67.0/go.mod h1:28XXFJKdD8UJP9USN1DMtNNJpSt06CyozE/UaPbgjGA= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.29.5 h1:U7NFjnobEmEFqF9DB4tMLqSIJmciwcGYajpOOJgRdgY= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.29.5/go.mod h1:BkoBrVIzx/RT4x6XqY1o5iUqq9Hh62PKnBC9YBClDvk= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.19.0 h1:3OyM+OTHo2c5u3lWQijbwDGF7jmnpZwya+IOAywsQQw= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.19.0/go.mod h1:DjBHb+rO7d7WLJKX73dO6fULvhHdJrXfv+k3Kkiespc= +github.com/aws/aws-sdk-go-v2/service/waf v1.30.5 h1:JUQsG3CPBtXGGdAril5Fois4wiFE1Z3Z/mkvkjd4TCQ= +github.com/aws/aws-sdk-go-v2/service/waf v1.30.5/go.mod h1:sRusTwVAJQtNy6vsrAHGXtd2WIgBGusW2waE6Kgc/So= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.30.6 h1:DqojtTvnVwuylF6Ru08okb9UGNrq3qc67mZ8Gpz9sVk= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.30.6/go.mod h1:uo8gzm2uqf+gYYa2blt1ITjIl59iTzOznUGaajPfT+g= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.68.0 h1:BUhKcwhfjDIUSA2+J9LLm+C2Z2tcBwFvRpEQAfuWlT4= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.68.0/go.mod h1:maJyEaarDIirG/MA0EYIxWc1ctk4sbc4+cEUVCIgorI= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.39.6 h1:0vFMsxhs4763afIR7366ricWl+w1sVOeroRkMOV9BGA= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.39.6/go.mod h1:zmHAn01szsTI7D4u2qgUs0CMklmz2af4EU12bDOWZrM= +github.com/aws/aws-sdk-go-v2/service/workmail v1.36.4 h1:hVmkAUyvH4OZkiW0HmNYYeDjVL7jyj3kCN/3r6nPiW4= +github.com/aws/aws-sdk-go-v2/service/workmail v1.36.4/go.mod h1:RlfJYDlvfjkqFAf3Fim2a5ryz6/gOYQJXJLJV5/c/xU= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.63.6 h1:QHAuU6Tfq2k6Okb8cdZ98BkXzmUZflfKG64Fc44CnQQ= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.63.6/go.mod h1:LTdwIWneoBQ7vVoD1gAGXvcNWq1gkgQqbCoKg1iDUZ0= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.32.6 h1:h8PGWVBIctOlbBpupw4CorUI8gCtX7d5o2lSHi12XgM= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.32.6/go.mod h1:nUz45LzKA733mQl1wIKcWK87SkJWXo+TiLYC5NUZf3M= +github.com/aws/aws-sdk-go-v2/service/xray v1.36.4 h1:G5VZW+21OPiOGoAFM+gBWPLKyuRaB2dC/RdYgL82ZS8= +github.com/aws/aws-sdk-go-v2/service/xray v1.36.4/go.mod h1:FYhPO/0+3jtQ10m0K1DnBTrJkNgXsrYhSHOt3/mCOnE= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= +github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/cedar-policy/cedar-go v0.1.0 h1:2tZwWn8tNO/896YAM7OQmH3vn98EeHEA3g9anwdVZvA= -github.com/cedar-policy/cedar-go v0.1.0/go.mod h1:pEgiK479O5dJfzXnTguOMm+bCplzy5rEEFPGdZKPWz4= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/cedar-policy/cedar-go v1.2.6 h1:q6f1sRxhoBG7lnK/fH6oBG33ruf2yIpcfcPXNExANa0= +github.com/cedar-policy/cedar-go v1.2.6/go.mod h1:h5+3CVW1oI5LXVskJG+my9TFCYI5yjh/+Ul3EJie6MI= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= @@ -592,12 +605,14 @@ github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -614,8 +629,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 h1:l16/Vrl0+x+HjHJWEjcKPwHYoxN9EC78gAFXKlH6m84= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0/go.mod h1:HAmscHyzSOfB1Dr16KLc177KNbn83wscnZC+N7WyaM8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 h1:81+kWbE1yErFBMjME0I5k3x3kojjKsWtPYHEAutoPow= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65/go.mod h1:WtMzv9T++tfWVea+qB2MXoaqxw33S8bpJslzUike2mQ= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 h1:IS4mjtvkLHXWI5yn/t9ILOUiBqPePMFaO4IRh5pcMk4= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67/go.mod h1:l81jrdpcZSWUsJs4BGFfdGScefSYEFQRLMQRG3uyvT0= github.com/hashicorp/awspolicyequivalence v1.7.0 h1:HxwPEw2/31BqQa73PinGciTfG2uJ/ATelvDG8X1gScU= github.com/hashicorp/awspolicyequivalence v1.7.0/go.mod h1:+oCTxQEYt+GcRalqrqTCBcJf100SQYiWQ4aENNYxYe0= github.com/hashicorp/cli v1.1.7 h1:/fZJ+hNdwfTSfsxMBa9WWMlfjUZbX8/LnUxgAd7lCVU= @@ -635,8 +650,8 @@ github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= -github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= +github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA= +github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -646,48 +661,50 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= -github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= -github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= -github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= -github.com/hashicorp/terraform-plugin-framework v1.15.0 h1:LQ2rsOfmDLxcn5EeIwdXFtr03FVsNktbbBci8cOKdb4= -github.com/hashicorp/terraform-plugin-framework v1.15.0/go.mod h1:hxrNI/GY32KPISpWqlCoTLM9JZsGH3CyYlir09bD/fI= +github.com/hashicorp/terraform-exec v0.24.0 h1:mL0xlk9H5g2bn0pPF6JQZk5YlByqSqrO5VoaNtAf8OE= +github.com/hashicorp/terraform-exec v0.24.0/go.mod h1:lluc/rDYfAhYdslLJQg3J0oDqo88oGQAdHR+wDqFvo4= +github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU= +github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= +github.com/hashicorp/terraform-plugin-framework v1.16.1 h1:1+zwFm3MEqd/0K3YBB2v9u9DtyYHyEuhVOfeIXbteWA= +github.com/hashicorp/terraform-plugin-framework v1.16.1/go.mod h1:0xFOxLy5lRzDTayc4dzK/FakIgBhNf/lC4499R9cV4Y= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 h1:SJXL5FfJJm17554Kpt9jFXngdM6fXbnUnZ6iT2IeiYA= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0/go.mod h1:p0phD0IYhsu9bR4+6OetVvvH59I6LwjXGnTVEr8ox6E= -github.com/hashicorp/terraform-plugin-framework-timeouts v0.5.0 h1:I/N0g/eLZ1ZkLZXUQ0oRSXa8YG/EF0CEuQP1wXdrzKw= -github.com/hashicorp/terraform-plugin-framework-timeouts v0.5.0/go.mod h1:t339KhmxnaF4SzdpxmqW8HnQBHVGYazwtfxU0qCs4eE= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.6.0 h1:Vv16e7EW4nT9668IV0RhdpEmnLl0im7BZx6J+QMlUkg= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.6.0/go.mod h1:rpHo9hZLn4vEkvNL5xsSdLRdaDZKSinuc0xL+BdOpVA= github.com/hashicorp/terraform-plugin-framework-timetypes v0.5.0 h1:v3DapR8gsp3EM8fKMh6up9cJUFQ2iRaFsYLP8UJnCco= github.com/hashicorp/terraform-plugin-framework-timetypes v0.5.0/go.mod h1:c3PnGE9pHBDfdEVG9t1S1C9ia5LW+gkFR0CygXlM8ak= -github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 h1:OQnlOt98ua//rCw+QhBbSqfW3QbwtVrcdWeQN5gI3Hw= -github.com/hashicorp/terraform-plugin-framework-validators v0.18.0/go.mod h1:lZvZvagw5hsJwuY7mAY6KUz45/U6fiDR0CzQAwWD0CA= -github.com/hashicorp/terraform-plugin-go v0.28.0 h1:zJmu2UDwhVN0J+J20RE5huiF3XXlTYVIleaevHZgKPA= -github.com/hashicorp/terraform-plugin-go v0.28.0/go.mod h1:FDa2Bb3uumkTGSkTFpWSOwWJDwA7bf3vdP3ltLDTH6o= -github.com/hashicorp/terraform-plugin-mux v0.20.0 h1:3QpBnI9uCuL0Yy2Rq/kR9cOdmOFNhw88A2GoZtk5aXM= -github.com/hashicorp/terraform-plugin-mux v0.20.0/go.mod h1:wSIZwJjSYk86NOTX3fKUlThMT4EAV1XpBHz9SAvjQr4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 h1:NFPMacTrY/IdcIcnUB+7hsore1ZaRWU9cnB6jFoBnIM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0/go.mod h1:QYmYnLfsosrxjCnGY1p9c7Zj6n9thnEE+7RObeYs3fA= -github.com/hashicorp/terraform-plugin-testing v1.13.2 h1:mSotG4Odl020vRjIenA3rggwo6Kg6XCKIwtRhYgp+/M= -github.com/hashicorp/terraform-plugin-testing v1.13.2/go.mod h1:WHQ9FDdiLoneey2/QHpGM/6SAYf4A7AZazVg7230pLE= -github.com/hashicorp/terraform-registry-address v0.2.5 h1:2GTftHqmUhVOeuu9CW3kwDkRe4pcBDq0uuK5VJngU1M= -github.com/hashicorp/terraform-registry-address v0.2.5/go.mod h1:PpzXWINwB5kuVS5CA7m1+eO2f1jKb5ZDIxrOPfpnGkg= +github.com/hashicorp/terraform-plugin-framework-validators v0.19.0 h1:Zz3iGgzxe/1XBkooZCewS0nJAaCFPFPHdNJd8FgE4Ow= +github.com/hashicorp/terraform-plugin-framework-validators v0.19.0/go.mod h1:GBKTNGbGVJohU03dZ7U8wHqc2zYnMUawgCN+gC0itLc= +github.com/hashicorp/terraform-plugin-go v0.29.0 h1:1nXKl/nSpaYIUBU1IG/EsDOX0vv+9JxAltQyDMpq5mU= +github.com/hashicorp/terraform-plugin-go v0.29.0/go.mod h1:vYZbIyvxyy0FWSmDHChCqKvI40cFTDGSb3D8D70i9GM= +github.com/hashicorp/terraform-plugin-mux v0.21.0 h1:QsEYnzSD2c3zT8zUrUGqaFGhV/Z8zRUlU7FY3ZPJFfw= +github.com/hashicorp/terraform-plugin-mux v0.21.0/go.mod h1:Qpt8+6AD7NmL0DS7ASkN0EXpDQ2J/FnnIgeUr1tzr5A= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 h1:mlAq/OrMlg04IuJT7NpefI1wwtdpWudnEmjuQs04t/4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1/go.mod h1:GQhpKVvvuwzD79e8/NZ+xzj+ZpWovdPAe8nfV/skwNU= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1.0.20251013071646-7ed2ee242705 h1:+Xi2Akrl1b7bs6VIOtA3Vm+cxx+byzP9U2r461/gL4g= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1.0.20251013071646-7ed2ee242705/go.mod h1:UrIjRAJLN0kygs0miY1Moy4PxUzy2e9R5WxyRk8aliI= +github.com/hashicorp/terraform-registry-address v0.4.0 h1:S1yCGomj30Sao4l5BMPjTGZmCNzuv7/GDTDX99E9gTk= +github.com/hashicorp/terraform-registry-address v0.4.0/go.mod h1:LRS1Ay0+mAiRkUyltGT+UHWkIqTFvigGn/LbMshfflE= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/jaswdr/faker/v2 v2.8.1 h1:2AcPgHDBXYQregFUH9LgVZKfFupc4SIquYhp29sf5wQ= +github.com/jaswdr/faker/v2 v2.8.1/go.mod h1:jZq+qzNQr8/P+5fHd9t3txe2GNPnthrTfohtnJ7B+68= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -718,27 +735,28 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -754,8 +772,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -772,44 +790,46 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= -github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.61.0 h1:lR4WnQLBC9XyTwKrz0327rq2QnIdJNpaVIGuW2yMvME= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.61.0/go.mod h1:UK49mXgwqIWFUDH8ibqTswbhy4fuwjEjj4VKMC7krUQ= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0 h1:0W0GZvzQe514c3igO063tR0cFVStoABt1agKqlYToL8= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0/go.mod h1:wIvTiRUU7Pbfqas/5JVjGZcftBeSAGSYVMOHWzWG0qE= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/exp v0.0.0-20220921023135-46d9e7742f1e h1:Ctm9yurWsg7aWwIpH9Bnap/IdSVxixymIb3MhiMEQQA= +golang.org/x/exp v0.0.0-20220921023135-46d9e7742f1e/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -822,44 +842,46 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/dnaeon/go-vcr.v4 v4.0.4 h1:UNc8d1Ya2otEOU3DoUgnSLp0tXvBNE0FuFe86Nnzcbw= -gopkg.in/dnaeon/go-vcr.v4 v4.0.4/go.mod h1:65yxh9goQVrudqofKtHA4JNFWd6XZRkWfKN4YpMx7KI= +gopkg.in/dnaeon/go-vcr.v4 v4.0.5 h1:I0hpTIvD5rII+8LgYGrHMA2d4SQPoL6u7ZvJakWKsiA= +gopkg.in/dnaeon/go-vcr.v4 v4.0.5/go.mod h1:dRos81TkW9C1WJt6tTaE+uV2Lo8qJT3AG2b35+CB/nQ= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -868,5 +890,3 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -syreclabs.com/go/faker v1.2.3 h1:HPrWtnHazIf0/bVuPZJLFrtHlBHk10hS0SB+mV8v6R4= -syreclabs.com/go/faker v1.2.3/go.mod h1:NAXInmkPsC2xuO5MKZFe80PUXX5LU8cFdJIHGs+nSBE= diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index cd13188084d7..a42cd86e03f1 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -26,6 +26,7 @@ variable "service_labels" { "apprunner", "appstream", "appsync", + "arcregionswitch", "athena", "auditmanager", "autoscaling", @@ -36,6 +37,7 @@ variable "service_labels" { "bcmdataexports", "bedrock", "bedrockagent", + "bedrockagentcore", "billing", "billingconductor", "braket", @@ -240,6 +242,7 @@ variable "service_labels" { "notifications", "notificationscontacts", "oam", + "odb", "opensearch", "opensearchserverless", "opsworks", @@ -293,6 +296,7 @@ variable "service_labels" { "s3control", "s3outposts", "s3tables", + "s3vectors", "sagemaker", "sagemakera2iruntime", "sagemakeredge", diff --git a/infrastructure/repository/labels-workflow.tf b/infrastructure/repository/labels-workflow.tf index 72f7e04e5dea..8620a49b8e07 100644 --- a/infrastructure/repository/labels-workflow.tf +++ b/infrastructure/repository/labels-workflow.tf @@ -109,6 +109,10 @@ variable "workflow_labels" { color = "dc477d", # color:consul description = "Waiting for first response or review from a maintainer." }, + "new-action" = { + color = "ac72f0", # color:terraform (link on black) + description = "Introduces a new action." + }, "new-data-source" = { color = "ac72f0", # color:terraform (link on black) description = "Introduces a new data source." @@ -169,6 +173,10 @@ variable "workflow_labels" { color = "828a90", # color:stale grey description = "Repository modifications; GitHub Actions, developer docs, issue templates, codeowners, changelog." }, + "resource-identity" = { + color = "844fba", # color:terraform (main) + description = "Pertains to resource identity." + }, "size/XS" = { color = "62d4dc", # color:lightest-darkest waypoint gradient description = "Managed by automation to categorize the size of a PR." diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index 5f72f76f24cc..629a17e8211b 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -526,7 +526,7 @@ func MatchResourceAttrRegionalARNNoAccount(resourceName, attributeName, arnServi attributeMatch, err := regexp.Compile(arnRegexp) if err != nil { - return fmt.Errorf("unable to compile ARN regexp (%s): %s", arnRegexp, err) + return fmt.Errorf("unable to compile ARN regexp (%s): %w", arnRegexp, err) } return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) @@ -678,7 +678,7 @@ func MatchResourceAttrGlobalARNNoAccount(resourceName, attributeName, arnService attributeMatch, err := regexp.Compile(arnRegexp) if err != nil { - return fmt.Errorf("unable to compile ARN regexp (%s): %s", arnRegexp, err) + return fmt.Errorf("unable to compile ARN regexp (%s): %w", arnRegexp, err) } return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) @@ -1925,7 +1925,7 @@ func CheckACMPCACertificateAuthorityActivateRootCA(ctx context.Context, certific } // Wait for certificate status to become ISSUED. - outputRaw, err := tfresource.RetryWhenIsA[*acmpcatypes.RequestInProgressException](ctx, CertificateIssueTimeout, func() (any, error) { + getCertOutput, err := tfresource.RetryWhenIsA[*acmpca.GetCertificateOutput, *acmpcatypes.RequestInProgressException](ctx, CertificateIssueTimeout, func(ctx context.Context) (*acmpca.GetCertificateOutput, error) { return tfacmpca.FindCertificateByTwoPartKey(ctx, conn, arn, aws.ToString(issueCertOutput.CertificateArn)) }) @@ -1933,7 +1933,6 @@ func CheckACMPCACertificateAuthorityActivateRootCA(ctx context.Context, certific return fmt.Errorf("waiting for ACM PCA Certificate Authority (%s) Root CA certificate to become ISSUED: %w", arn, err) } - getCertOutput := outputRaw.(*acmpca.GetCertificateOutput) importCACertificateInput := acmpca.ImportCertificateAuthorityCertificateInput{ CertificateAuthorityArn: aws.String(arn), Certificate: []byte(aws.ToString(getCertOutput.Certificate)), @@ -1986,7 +1985,7 @@ func CheckACMPCACertificateAuthorityActivateSubordinateCA(ctx context.Context, r } // Wait for certificate status to become ISSUED. - outputRaw, err := tfresource.RetryWhenIsA[*acmpcatypes.RequestInProgressException](ctx, CertificateIssueTimeout, func() (any, error) { + getCertOutput, err := tfresource.RetryWhenIsA[*acmpca.GetCertificateOutput, *acmpcatypes.RequestInProgressException](ctx, CertificateIssueTimeout, func(ctx context.Context) (*acmpca.GetCertificateOutput, error) { return tfacmpca.FindCertificateByTwoPartKey(ctx, conn, rootCertificateAuthorityArn, aws.ToString(issueCertOutput.CertificateArn)) }) @@ -1994,7 +1993,6 @@ func CheckACMPCACertificateAuthorityActivateSubordinateCA(ctx context.Context, r return fmt.Errorf("waiting for ACM PCA Certificate Authority (%s) Subordinate CA certificate (%s) to become ISSUED: %w", arn, aws.ToString(issueCertOutput.CertificateArn), err) } - getCertOutput := outputRaw.(*acmpca.GetCertificateOutput) importCACertificateInput := acmpca.ImportCertificateAuthorityCertificateInput{ CertificateAuthorityArn: aws.String(arn), Certificate: []byte(aws.ToString(getCertOutput.Certificate)), diff --git a/internal/acctest/configs.go b/internal/acctest/configs.go index 3a0ceaa0ad0f..f5f000e0a245 100644 --- a/internal/acctest/configs.go +++ b/internal/acctest/configs.go @@ -523,9 +523,11 @@ resource "aws_vpc" "vpc_for_lambda" { resource "aws_subnet" "subnet_for_lambda" { vpc_id = aws_vpc.vpc_for_lambda.id - cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.cidr_block, 8, 1) availability_zone = data.aws_availability_zones.available.names[1] - ipv6_cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.ipv6_cidr_block, 8, 1) + + cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.cidr_block, 8, 1) + ipv6_cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.ipv6_cidr_block, 8, 1) + assign_ipv6_address_on_creation = true tags = { @@ -537,9 +539,11 @@ resource "aws_subnet" "subnet_for_lambda" { # prevent a timeout issue when fully removing Lambda Filesystems resource "aws_subnet" "subnet_for_lambda_az2" { vpc_id = aws_vpc.vpc_for_lambda.id - cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.cidr_block, 8, 2) availability_zone = data.aws_availability_zones.available.names[1] - ipv6_cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.ipv6_cidr_block, 8, 2) + + cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.cidr_block, 8, 2) + ipv6_cidr_block = cidrsubnet(aws_vpc.vpc_for_lambda.ipv6_cidr_block, 8, 2) + assign_ipv6_address_on_creation = true tags = { @@ -670,9 +674,10 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) - ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true tags = { diff --git a/internal/acctest/knownvalue/account_id.go b/internal/acctest/knownvalue/account_id.go index 8266c6b09314..7c53fde8e9bd 100644 --- a/internal/acctest/knownvalue/account_id.go +++ b/internal/acctest/knownvalue/account_id.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package statecheck +package knownvalue import ( "context" @@ -34,7 +34,7 @@ func (v accountID) CheckValue(other any) error { // String returns the string representation of the value. func (v accountID) String() string { - return "Who Knows" + return "Account ID" } func AccountID() knownvalue.Check { diff --git a/internal/acctest/knownvalue/global_arn_exact.go b/internal/acctest/knownvalue/global_arn_exact.go new file mode 100644 index 000000000000..9abce86d0541 --- /dev/null +++ b/internal/acctest/knownvalue/global_arn_exact.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +var _ knownvalue.Check = globalARNExact{} + +type globalARNExact struct { + service string + resource string +} + +// CheckValue determines whether the passed value is of type string, and +// contains a matching sequence of bytes. +func (v globalARNExact) CheckValue(other any) error { + otherVal, ok := other.(string) + + if !ok { + return fmt.Errorf("expected string value for GlobalARNExact check, got: %T", other) + } + + if otherVal != v.buildARNString() { + return fmt.Errorf("expected value %s for GlobalARNExact check, got: %s", v.buildARNString(), otherVal) + } + + return nil +} + +// String returns the string representation of the value. +func (v globalARNExact) String() string { + return v.buildARNString() +} + +func (v globalARNExact) buildARNString() string { + return arn.ARN{ + AccountID: acctest.AccountID(context.Background()), + Partition: acctest.Partition(), + Region: "", + Service: v.service, + Resource: v.resource, + }.String() +} + +func GlobalARNExact(service, resource string) knownvalue.Check { + return globalARNExact{ + service: service, + resource: resource, + } +} diff --git a/internal/acctest/knownvalue/global_arn_regexp.go b/internal/acctest/knownvalue/global_arn_regexp.go index 5b37a40affe6..2aa86638b8d3 100644 --- a/internal/acctest/knownvalue/global_arn_regexp.go +++ b/internal/acctest/knownvalue/global_arn_regexp.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package statecheck +package knownvalue import ( "context" diff --git a/internal/acctest/knownvalue/regional_arn_exact.go b/internal/acctest/knownvalue/regional_arn_exact.go index f5a9b7f9c1af..92e777508616 100644 --- a/internal/acctest/knownvalue/regional_arn_exact.go +++ b/internal/acctest/knownvalue/regional_arn_exact.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package statecheck +package knownvalue import ( "context" diff --git a/internal/acctest/knownvalue/regional_arn_regexp.go b/internal/acctest/knownvalue/regional_arn_regexp.go index a5b5211862d5..432f4f8d826e 100644 --- a/internal/acctest/knownvalue/regional_arn_regexp.go +++ b/internal/acctest/knownvalue/regional_arn_regexp.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package statecheck +package knownvalue import ( "context" diff --git a/internal/acctest/knownvalue/regional_arn_regexp_ignore_account.go b/internal/acctest/knownvalue/regional_arn_regexp_ignore_account.go index 836f2a8511cd..73069d8449c4 100644 --- a/internal/acctest/knownvalue/regional_arn_regexp_ignore_account.go +++ b/internal/acctest/knownvalue/regional_arn_regexp_ignore_account.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package statecheck +package knownvalue import ( "fmt" diff --git a/internal/acctest/knownvalue/string_ptr_exact.go b/internal/acctest/knownvalue/string_ptr_exact.go new file mode 100644 index 000000000000..ead0478afd42 --- /dev/null +++ b/internal/acctest/knownvalue/string_ptr_exact.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package knownvalue + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-testing/knownvalue" +) + +var _ knownvalue.Check = stringPtrExact[string]{} + +type stringPtrExact[T ~string] struct { + value *T +} + +func (v stringPtrExact[T]) CheckValue(other any) error { + otherVal, ok := other.(string) + + if !ok { + return fmt.Errorf("expected string value for StringPtrExact check, got: %T", other) + } + + if otherVal != string(*v.value) { + return fmt.Errorf("expected value %s for StringPtrExact check, got: %s", *v.value, otherVal) + } + + return nil +} + +// String returns the string representation of the value. +func (v stringPtrExact[T]) String() string { + return string(*v.value) +} + +// StringExact returns a Check for asserting equality between the +// supplied string and a value passed to the CheckValue method. +func StringPtrExact[T ~string](value *T) stringPtrExact[T] { + if value == nil { + panic("value must not be nil") + } + return stringPtrExact[T]{ + value: value, + } +} diff --git a/internal/acctest/knownvalue/stringable_value.go b/internal/acctest/knownvalue/stringable_value.go index 53d828d0c13e..ed1ec293ae3f 100644 --- a/internal/acctest/knownvalue/stringable_value.go +++ b/internal/acctest/knownvalue/stringable_value.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package statecheck +package knownvalue import "github.com/hashicorp/terraform-plugin-testing/knownvalue" diff --git a/internal/acctest/plancheck/expect_known_value_change.go b/internal/acctest/plancheck/expect_known_value_change.go index ffbd11ce07bf..8fabc1f131b2 100644 --- a/internal/acctest/plancheck/expect_known_value_change.go +++ b/internal/acctest/plancheck/expect_known_value_change.go @@ -32,7 +32,7 @@ func (e expectKnownValueChangeCheck) CheckPlan(ctx context.Context, request plan } if err := e.oldValue.CheckValue(old); err != nil { - response.Error = fmt.Errorf("checking old value for attribute at path: %s.%s, err: %s", resource.Address, e.attributePath.String(), err) + response.Error = fmt.Errorf("checking old value for attribute at path: %s.%s, err: %w", resource.Address, e.attributePath.String(), err) return } @@ -45,7 +45,7 @@ func (e expectKnownValueChangeCheck) CheckPlan(ctx context.Context, request plan } if err := e.newValue.CheckValue(new); err != nil { - response.Error = fmt.Errorf("checking new value for attribute at path: %s.%s, err: %s", resource.Address, e.attributePath.String(), err) + response.Error = fmt.Errorf("checking new value for attribute at path: %s.%s, err: %w", resource.Address, e.attributePath.String(), err) return } diff --git a/internal/acctest/state_id.go b/internal/acctest/state_id.go index 4c7792b524fe..9f10e882d072 100644 --- a/internal/acctest/state_id.go +++ b/internal/acctest/state_id.go @@ -5,9 +5,11 @@ package acctest import ( "fmt" + "strings" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,6 +25,20 @@ func AttrImportStateIdFunc(resourceName, attrName string) resource.ImportStateId } } +// AttrsImportStateIdFunc is a resource.ImportStateIdFunc that returns the values of the specified attributes concatenated with a separator +func AttrsImportStateIdFunc(resourceName, sep string, attrNames ...string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return strings.Join(tfslices.ApplyToAll(attrNames, func(attrName string) string { + return rs.Primary.Attributes[attrName] + }), sep), nil + } +} + // CrossRegionAttrImportStateIdFunc is a resource.ImportStateIdFunc that returns the value // of the specified attribute and appends the region func CrossRegionAttrImportStateIdFunc(resourceName, attrName string) resource.ImportStateIdFunc { diff --git a/internal/acctest/statecheck/expect_global_arn_format.go b/internal/acctest/statecheck/expect_global_arn_format.go index a0fdfac1b6e2..935c57e76365 100644 --- a/internal/acctest/statecheck/expect_global_arn_format.go +++ b/internal/acctest/statecheck/expect_global_arn_format.go @@ -41,7 +41,7 @@ func (e expectGlobalARNFormatCheck) CheckState(ctx context.Context, request stat knownCheck := acctest.GlobalARN(e.arnService, arnString) if err = knownCheck.CheckValue(value); err != nil { //nolint:contextcheck // knownCheck implements an interface - response.Error = fmt.Errorf("checking value for attribute at path: %s.%s, err: %s", e.base.ResourceAddress(), e.attributePath, err) + response.Error = fmt.Errorf("checking value for attribute at path: %s.%s, err: %w", e.base.ResourceAddress(), e.attributePath, err) return } } diff --git a/internal/acctest/statecheck/expect_identity_regional_arn_format.go b/internal/acctest/statecheck/expect_identity_regional_arn_format.go index e46cb32467fe..e6faa93e3c1a 100644 --- a/internal/acctest/statecheck/expect_identity_regional_arn_format.go +++ b/internal/acctest/statecheck/expect_identity_regional_arn_format.go @@ -57,7 +57,7 @@ func (e expectIdentityRegionalARNFormatCheck) CheckState(ctx context.Context, re knownCheck := e.checkFactory(e.arnService, arnString) if err = knownCheck.CheckValue(value); err != nil { - response.Error = fmt.Errorf("checking value for attribute at path: %s.%s, err: %s", e.base.ResourceAddress(), attrPath, err) + response.Error = fmt.Errorf("checking value for attribute at path: %s.%s, err: %w", e.base.ResourceAddress(), attrPath, err) return } } diff --git a/internal/acctest/statecheck/expect_regional_arn_format.go b/internal/acctest/statecheck/expect_regional_arn_format.go index 15c90af6655a..eef786fd6621 100644 --- a/internal/acctest/statecheck/expect_regional_arn_format.go +++ b/internal/acctest/statecheck/expect_regional_arn_format.go @@ -43,7 +43,7 @@ func (e expectRegionalARNFormatCheck) CheckState(ctx context.Context, request st knownCheck := e.checkFactory(e.arnService, arnString) if err = knownCheck.CheckValue(value); err != nil { - response.Error = fmt.Errorf("checking value for attribute at path: %s.%s, err: %s", e.base.ResourceAddress(), e.attributePath, err) + response.Error = fmt.Errorf("checking value for attribute at path: %s.%s, err: %w", e.base.ResourceAddress(), e.attributePath, err) return } } diff --git a/internal/acctest/statecheck/full_tags.go b/internal/acctest/statecheck/full_tags.go index ea1d779b7858..c55ddcb058b1 100644 --- a/internal/acctest/statecheck/full_tags.go +++ b/internal/acctest/statecheck/full_tags.go @@ -77,7 +77,7 @@ func (e expectFullTagsCheck) CheckState(ctx context.Context, req statecheck.Chec err = fmt.Errorf("no ListTags method found for service %s", sp.ServicePackageName()) } if err != nil { - resp.Error = fmt.Errorf("listing tags for %s: %s", e.base.ResourceAddress(), err) + resp.Error = fmt.Errorf("listing tags for %s: %w", e.base.ResourceAddress(), err) return } @@ -102,7 +102,7 @@ func (e expectFullTagsCheck) CheckState(ctx context.Context, req statecheck.Chec }) if err := e.knownValue.CheckValue(tagsMap); err != nil { - resp.Error = fmt.Errorf("error checking remote tags for %s: %s", e.base.ResourceAddress(), err) // nosemgrep:ci.semgrep.errors.no-fmt.Errorf-leading-error + resp.Error = fmt.Errorf("error checking remote tags for %s: %w", e.base.ResourceAddress(), err) // nosemgrep:ci.semgrep.errors.no-fmt.Errorf-leading-error return } } diff --git a/internal/acctest/statecheck/state_value.go b/internal/acctest/statecheck/state_value.go new file mode 100644 index 000000000000..adf42ca47bc6 --- /dev/null +++ b/internal/acctest/statecheck/state_value.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +type stateValue struct { + resourceAddress string + attributePath tfjsonpath.Path + value *string +} + +func StateValue() stateValue { + return stateValue{} +} + +// GetStateValue sets the resource address and attribute path to check and stores the state value. +// Calls to GetStateValue occur before any TestStep is run. +func (v *stateValue) GetStateValue(resourceAddress string, attributePath tfjsonpath.Path) statecheck.StateCheck { + v.resourceAddress = resourceAddress + v.attributePath = attributePath + + return newStateValueStateChecker(v) +} + +// Value checks the stored state value against the provided value. +// Calls to Value occur before any TestStep is run. +func (v *stateValue) Value() knownvalue.Check { + return newStateValueKnownValueChecker(v) +} + +type stateValueStateChecker struct { + base Base + stateValue *stateValue +} + +func newStateValueStateChecker(stateValue *stateValue) stateValueStateChecker { + return stateValueStateChecker{ + base: NewBase(stateValue.resourceAddress), + stateValue: stateValue, + } +} + +func (vc stateValueStateChecker) CheckState(ctx context.Context, request statecheck.CheckStateRequest, response *statecheck.CheckStateResponse) { + resource, ok := vc.base.ResourceFromState(request, response) + if !ok { + return + } + + value, err := tfjsonpath.Traverse(resource.AttributeValues, vc.stateValue.attributePath) + if err != nil { + response.Error = err + return + } + + stringVal, ok := value.(string) + if !ok { + response.Error = fmt.Errorf("expected string value for StateValue check, got: %T", value) + return + } + + vc.stateValue.value = &stringVal +} + +type stateValueKnownValueChecker struct { + stateValue *stateValue +} + +func newStateValueKnownValueChecker(stateValue *stateValue) stateValueKnownValueChecker { + return stateValueKnownValueChecker{ + stateValue: stateValue, + } +} + +func (vc stateValueKnownValueChecker) CheckValue(other any) error { + if vc.stateValue.value == nil { + return fmt.Errorf("state value has not been set") + } + + otherVal, ok := other.(string) + + if !ok { + return fmt.Errorf("expected string value for StateValue check, got: %T", other) + } + + if otherVal != *vc.stateValue.value { + return fmt.Errorf("expected value %s for StateValue check, got: %s", *vc.stateValue.value, otherVal) + } + + return nil +} + +func (vc stateValueKnownValueChecker) String() string { + if vc.stateValue.value == nil { + return "error: state value has not been set" + } + return fmt.Sprintf("%s (from state: %q %q)", *vc.stateValue.value, vc.stateValue.resourceAddress, vc.stateValue.attributePath.String()) +} diff --git a/internal/acctest/statecheck/state_value_test.go b/internal/acctest/statecheck/state_value_test.go new file mode 100644 index 000000000000..a709b83ca1c6 --- /dev/null +++ b/internal/acctest/statecheck/state_value_test.go @@ -0,0 +1,227 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import ( + "context" + "testing" + + "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + r "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" +) + +func TestStateValue_ValuesSame(t *testing.T) { + t.Parallel() + + stateValue := StateValue() + + r.Test(t, r.TestCase{ + ProviderFactories: map[string]func() (*schema.Provider, error){ + "test": func() (*schema.Provider, error) { //nolint:unparam // required signature + return testProvider(), nil + }, + }, + Steps: []r.TestStep{ + { + Config: `resource "test_resource" "one" { + string_attribute = "same" + } + `, + ConfigStateChecks: []statecheck.StateCheck{ + stateValue.GetStateValue("test_resource.one", tfjsonpath.New("string_attribute")), + }, + }, + { + Config: `resource "test_resource" "one" { + string_attribute = "same" + } + `, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue("test_resource.one", tfjsonpath.New("string_attribute"), stateValue.Value()), + }, + }, + }, + }) +} + +func TestStateValue_ValuesNotSame(t *testing.T) { + t.Parallel() + + stateValue := StateValue() + + r.Test(t, r.TestCase{ + ProviderFactories: map[string]func() (*schema.Provider, error){ + "test": func() (*schema.Provider, error) { //nolint:unparam // required signature + return testProvider(), nil + }, + }, + Steps: []r.TestStep{ + { + Config: `resource "test_resource" "one" { + string_attribute = "same" + } + `, + ConfigStateChecks: []statecheck.StateCheck{ + stateValue.GetStateValue("test_resource.one", tfjsonpath.New("string_attribute")), + }, + }, + { + Config: `resource "test_resource" "one" { + string_attribute = "not same" + } + `, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue("test_resource.one", tfjsonpath.New("string_attribute"), stateValue.Value()), + }, + ExpectError: regexache.MustCompile(`expected value same for StateValue check, got: not same`), + }, + }, + }) +} + +func TestStateValue_NotInitialized(t *testing.T) { + t.Parallel() + + stateValue := StateValue() + + r.Test(t, r.TestCase{ + ProviderFactories: map[string]func() (*schema.Provider, error){ + "test": func() (*schema.Provider, error) { //nolint:unparam // required signature + return testProvider(), nil + }, + }, + Steps: []r.TestStep{ + { + Config: `resource "test_resource" "one" { + string_attribute = "value" + } + `, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue("test_resource.one", tfjsonpath.New("string_attribute"), stateValue.Value()), + }, + ExpectError: regexache.MustCompile(`state value has not been set`), + }, + }, + }) +} + +// Copied from https://github.com/hashicorp/terraform-plugin-testing/blob/main/statecheck/expect_known_value_test.go +func testProvider() *schema.Provider { + return &schema.Provider{ + ResourcesMap: map[string]*schema.Resource{ + "test_resource": { + CreateContext: func(_ context.Context, d *schema.ResourceData, _ any) diag.Diagnostics { + d.SetId("test") + + err := d.Set("string_computed_attribute", "computed") + if err != nil { + return diag.Errorf("error setting string_computed_attribute: %s", err) // nosemgrep:ci.semgrep.errors.no-diag.Errorf-leading-error,ci.semgrep.pluginsdk.avoid-diag_Errorf + } + + return nil + }, + UpdateContext: func(_ context.Context, _ *schema.ResourceData, _ any) diag.Diagnostics { + return nil + }, + DeleteContext: func(_ context.Context, _ *schema.ResourceData, _ any) diag.Diagnostics { + return nil + }, + ReadContext: func(_ context.Context, _ *schema.ResourceData, _ any) diag.Diagnostics { + return nil + }, + Schema: map[string]*schema.Schema{ + "bool_attribute": { + Optional: true, + Type: schema.TypeBool, + }, + "float_attribute": { + Optional: true, + Type: schema.TypeFloat, + }, + "int_attribute": { + Optional: true, + Type: schema.TypeInt, + }, + "list_attribute": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + }, + "list_nested_block": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "list_nested_block_attribute": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "map_attribute": { + Type: schema.TypeMap, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + }, + "set_attribute": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + }, + "set_nested_block": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "set_nested_block_attribute": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "set_nested_nested_block": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "set_nested_block": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "set_nested_block_attribute": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "string_attribute": { + Optional: true, + Type: schema.TypeString, + }, + "string_computed_attribute": { + Computed: true, + Type: schema.TypeString, + }, + }, + }, + }, + } +} diff --git a/internal/acctest/vcr.go b/internal/acctest/vcr.go index 581caa851c3d..be160f0ecd3b 100644 --- a/internal/acctest/vcr.go +++ b/internal/acctest/vcr.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "crypto/tls" - "encoding/json" "encoding/xml" "fmt" "io" @@ -29,6 +28,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" "github.com/hashicorp/terraform-provider-aws/internal/provider" "github.com/hashicorp/terraform-provider-aws/internal/vcr" "gopkg.in/dnaeon/go-vcr.v4/pkg/cassette" @@ -186,23 +186,7 @@ func vcrProviderConfigureContextFunc(provider *schema.Provider, configureContext switch contentType := r.Header.Get("Content-Type"); contentType { case "application/json", "application/x-amz-json-1.0", "application/x-amz-json-1.1": // JSON might be the same, but reordered. Try parsing and comparing. - var requestJson, cassetteJson any - - if err := json.Unmarshal([]byte(body), &requestJson); err != nil { - tflog.Debug(ctx, "Failed to unmarshal request JSON", map[string]any{ - "error": err, - }) - return false - } - - if err := json.Unmarshal([]byte(i.Body), &cassetteJson); err != nil { - tflog.Debug(ctx, "Failed to unmarshal cassette JSON", map[string]any{ - "error": err, - }) - return false - } - - return reflect.DeepEqual(requestJson, cassetteJson) + return tfjson.EqualStrings(body, i.Body) case "application/xml": // XML might be the same, but reordered. Try parsing and comparing. @@ -375,7 +359,7 @@ func closeVCRRecorder(ctx context.Context, t *testing.T) { defer providerMetas.Unlock() if ok { - if !t.Failed() { + if !t.Failed() && !t.Skipped() { if v, ok := meta.HTTPClient(ctx).Transport.(*recorder.Recorder); ok { t.Log("stopping VCR recorder") if err := v.Stop(); err != nil { @@ -385,6 +369,8 @@ func closeVCRRecorder(ctx context.Context, t *testing.T) { } delete(providerMetas, testName) + } else { + t.Log("provider meta not found for test", testName) } // Save the randomness seed. @@ -393,7 +379,7 @@ func closeVCRRecorder(ctx context.Context, t *testing.T) { defer randomnessSources.Unlock() if ok { - if !t.Failed() { + if !t.Failed() && !t.Skipped() { t.Log("persisting randomness seed") if err := writeSeedToFile(s.seed, vcrSeedFile(vcr.Path(), t.Name())); err != nil { t.Error(err) @@ -401,6 +387,8 @@ func closeVCRRecorder(ctx context.Context, t *testing.T) { } delete(randomnessSources, testName) + } else { + t.Log("randomness source not found for test", testName) } } @@ -409,8 +397,12 @@ func ParallelTest(ctx context.Context, t *testing.T, c resource.TestCase) { t.Helper() if vcr.IsEnabled() { - c.ProtoV5ProviderFactories = vcrEnabledProtoV5ProviderFactories(ctx, t, c.ProtoV5ProviderFactories) - defer closeVCRRecorder(ctx, t) + if c.ProtoV5ProviderFactories != nil { + c.ProtoV5ProviderFactories = vcrEnabledProtoV5ProviderFactories(ctx, t, c.ProtoV5ProviderFactories) + defer closeVCRRecorder(ctx, t) + } else { + t.Skip("go-vcr is not currently supported for test step ProtoV5ProviderFactories") + } } resource.ParallelTest(t, c) @@ -421,8 +413,12 @@ func Test(ctx context.Context, t *testing.T, c resource.TestCase) { t.Helper() if vcr.IsEnabled() { - c.ProtoV5ProviderFactories = vcrEnabledProtoV5ProviderFactories(ctx, t, c.ProtoV5ProviderFactories) - defer closeVCRRecorder(ctx, t) + if c.ProtoV5ProviderFactories != nil { + c.ProtoV5ProviderFactories = vcrEnabledProtoV5ProviderFactories(ctx, t, c.ProtoV5ProviderFactories) + defer closeVCRRecorder(ctx, t) + } else { + t.Skip("go-vcr is not currently supported for test step ProtoV5ProviderFactories") + } } resource.Test(t, c) diff --git a/internal/actionwait/errors.go b/internal/actionwait/errors.go new file mode 100644 index 000000000000..58e440763b88 --- /dev/null +++ b/internal/actionwait/errors.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package actionwait + +import ( + "errors" + "strings" + "time" +) + +// TimeoutError is returned when the operation does not reach a success state within Timeout. +type TimeoutError struct { + LastStatus Status + Timeout time.Duration +} + +func (e *TimeoutError) Error() string { + return "timeout waiting for target status after " + e.Timeout.String() +} + +// FailureStateError indicates the operation entered a declared failure state. +type FailureStateError struct { + Status Status +} + +func (e *FailureStateError) Error() string { + return "operation entered failure state: " + string(e.Status) +} + +// UnexpectedStateError indicates the operation entered a state outside success/transitional/failure sets. +type UnexpectedStateError struct { + Status Status + Allowed []Status +} + +func (e *UnexpectedStateError) Error() string { + if len(e.Allowed) == 0 { + return "operation entered unexpected state: " + string(e.Status) + } + allowedStr := make([]string, len(e.Allowed)) + for i, s := range e.Allowed { + allowedStr[i] = string(s) + } + return "operation entered unexpected state: " + string(e.Status) + " (allowed: " + + strings.Join(allowedStr, ", ") + ")" +} + +// Error type assertions for compile-time verification +var ( + _ error = (*TimeoutError)(nil) + _ error = (*FailureStateError)(nil) + _ error = (*UnexpectedStateError)(nil) +) + +// Helper functions for error type checking +func IsTimeout(err error) bool { + var timeoutErr *TimeoutError + return errors.As(err, &timeoutErr) +} + +func IsFailureState(err error) bool { + var failureErr *FailureStateError + return errors.As(err, &failureErr) +} + +func IsUnexpectedState(err error) bool { + var unexpectedErr *UnexpectedStateError + return errors.As(err, &unexpectedErr) +} diff --git a/internal/actionwait/errors_test.go b/internal/actionwait/errors_test.go new file mode 100644 index 000000000000..c7379df86521 --- /dev/null +++ b/internal/actionwait/errors_test.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package actionwait + +import ( + "errors" + "strings" + "testing" + "time" +) + +func TestTimeoutError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err *TimeoutError + wantMsg string + wantType string + }{ + { + name: "with last status", + err: &TimeoutError{ + LastStatus: "CREATING", + Timeout: 5 * time.Minute, + }, + wantMsg: "timeout waiting for target status after 5m0s", + wantType: "*actionwait.TimeoutError", + }, + { + name: "with empty status", + err: &TimeoutError{ + LastStatus: "", + Timeout: 30 * time.Second, + }, + wantMsg: "timeout waiting for target status after 30s", + wantType: "*actionwait.TimeoutError", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if got := tt.err.Error(); got != tt.wantMsg { + t.Errorf("TimeoutError.Error() = %q, want %q", got, tt.wantMsg) + } + + // Verify it implements error interface + var err error = tt.err + if got := err.Error(); got != tt.wantMsg { + t.Errorf("TimeoutError as error.Error() = %q, want %q", got, tt.wantMsg) + } + }) + } +} + +func TestFailureStateError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err *FailureStateError + wantMsg string + }{ + { + name: "with status", + err: &FailureStateError{ + Status: "FAILED", + }, + wantMsg: "operation entered failure state: FAILED", + }, + { + name: "with empty status", + err: &FailureStateError{ + Status: "", + }, + wantMsg: "operation entered failure state: ", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if got := tt.err.Error(); got != tt.wantMsg { + t.Errorf("FailureStateError.Error() = %q, want %q", got, tt.wantMsg) + } + }) + } +} + +func TestUnexpectedStateError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err *UnexpectedStateError + wantMsg string + }{ + { + name: "no allowed states", + err: &UnexpectedStateError{ + Status: "UNKNOWN", + Allowed: nil, + }, + wantMsg: "operation entered unexpected state: UNKNOWN", + }, + { + name: "empty allowed states", + err: &UnexpectedStateError{ + Status: "UNKNOWN", + Allowed: []Status{}, + }, + wantMsg: "operation entered unexpected state: UNKNOWN", + }, + { + name: "single allowed state", + err: &UnexpectedStateError{ + Status: "UNKNOWN", + Allowed: []Status{"AVAILABLE"}, + }, + wantMsg: "operation entered unexpected state: UNKNOWN (allowed: AVAILABLE)", + }, + { + name: "multiple allowed states", + err: &UnexpectedStateError{ + Status: "UNKNOWN", + Allowed: []Status{"CREATING", "AVAILABLE", "UPDATING"}, + }, + wantMsg: "operation entered unexpected state: UNKNOWN (allowed: CREATING, AVAILABLE, UPDATING)", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if got := tt.err.Error(); got != tt.wantMsg { + t.Errorf("UnexpectedStateError.Error() = %q, want %q", got, tt.wantMsg) + } + }) + } +} + +func TestErrorTypeChecking(t *testing.T) { + t.Parallel() + + // Create instances of each error type + timeoutErr := &TimeoutError{LastStatus: "CREATING", Timeout: time.Minute} + failureErr := &FailureStateError{Status: "FAILED"} + unexpectedErr := &UnexpectedStateError{Status: "UNKNOWN", Allowed: []Status{"AVAILABLE"}} + genericErr := errors.New("generic error") + + tests := []struct { + name string + err error + wantIsTimeout bool + wantIsFailure bool + wantIsUnexpected bool + }{ + { + name: "TimeoutError", + err: timeoutErr, + wantIsTimeout: true, + wantIsFailure: false, + wantIsUnexpected: false, + }, + { + name: "FailureStateError", + err: failureErr, + wantIsTimeout: false, + wantIsFailure: true, + wantIsUnexpected: false, + }, + { + name: "UnexpectedStateError", + err: unexpectedErr, + wantIsTimeout: false, + wantIsFailure: false, + wantIsUnexpected: true, + }, + { + name: "generic error", + err: genericErr, + wantIsTimeout: false, + wantIsFailure: false, + wantIsUnexpected: false, + }, + { + name: "nil error", + err: nil, + wantIsTimeout: false, + wantIsFailure: false, + wantIsUnexpected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + if got := IsTimeout(tt.err); got != tt.wantIsTimeout { + t.Errorf("IsTimeout(%v) = %v, want %v", tt.err, got, tt.wantIsTimeout) + } + + if got := IsFailureState(tt.err); got != tt.wantIsFailure { + t.Errorf("IsFailureState(%v) = %v, want %v", tt.err, got, tt.wantIsFailure) + } + + if got := IsUnexpectedState(tt.err); got != tt.wantIsUnexpected { + t.Errorf("IsUnexpectedState(%v) = %v, want %v", tt.err, got, tt.wantIsUnexpected) + } + }) + } +} + +func TestWrappedErrors(t *testing.T) { + t.Parallel() + + // Test that error type checking works with wrapped errors + baseErr := &TimeoutError{LastStatus: "CREATING", Timeout: time.Minute} + wrappedErr := errors.New("wrapped: " + baseErr.Error()) + + // Direct error should be detected + if !IsTimeout(baseErr) { + t.Errorf("IsTimeout should detect direct TimeoutError") + } + + // Wrapped string error should NOT be detected (this is expected behavior) + if IsTimeout(wrappedErr) { + t.Errorf("IsTimeout should not detect string-wrapped error") + } + + // But wrapped with errors.Join should work + joinedErr := errors.Join(baseErr, errors.New("additional context")) + if !IsTimeout(joinedErr) { + t.Errorf("IsTimeout should detect error in errors.Join") + } +} + +func TestErrorMessages(t *testing.T) { + t.Parallel() + + // Verify error messages contain expected components for debugging + timeoutErr := &TimeoutError{ + LastStatus: "PENDING", + Timeout: 2 * time.Minute, + } + + msg := timeoutErr.Error() + if !strings.Contains(msg, "timeout") { + t.Errorf("TimeoutError message should contain 'timeout', got: %q", msg) + } + if !strings.Contains(msg, "2m0s") { + t.Errorf("TimeoutError message should contain timeout duration, got: %q", msg) + } + + failureErr := &FailureStateError{Status: "ERROR"} + msg = failureErr.Error() + if !strings.Contains(msg, "failure state") { + t.Errorf("FailureStateError message should contain 'failure state', got: %q", msg) + } + if !strings.Contains(msg, "ERROR") { + t.Errorf("FailureStateError message should contain status, got: %q", msg) + } + + unexpectedErr := &UnexpectedStateError{ + Status: "WEIRD", + Allowed: []Status{"GOOD", "BETTER"}, + } + msg = unexpectedErr.Error() + if !strings.Contains(msg, "unexpected state") { + t.Errorf("UnexpectedStateError message should contain 'unexpected state', got: %q", msg) + } + if !strings.Contains(msg, "WEIRD") { + t.Errorf("UnexpectedStateError message should contain actual status, got: %q", msg) + } + if !strings.Contains(msg, "GOOD, BETTER") { + t.Errorf("UnexpectedStateError message should contain allowed states, got: %q", msg) + } +} diff --git a/internal/actionwait/wait.go b/internal/actionwait/wait.go new file mode 100644 index 000000000000..1f095f7deef0 --- /dev/null +++ b/internal/actionwait/wait.go @@ -0,0 +1,254 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package actionwait provides a lightweight, action-focused polling helper +// for imperative Terraform actions which need to await asynchronous AWS +// operation completion with periodic user progress events. +package actionwait + +import ( + "context" + "errors" + "slices" + "time" + + "github.com/hashicorp/terraform-provider-aws/internal/backoff" +) + +// DefaultPollInterval is the default fixed polling interval used when no custom IntervalStrategy is provided. +const DefaultPollInterval = 30 * time.Second + +// Status represents a string status value returned from a polled API. +type Status string + +// FetchResult wraps the latest status (and optional value) from a poll attempt. +// Value may be a richer SDK structure (pointer) or zero for simple cases. +type FetchResult[T any] struct { + Status Status + Value T +} + +// FetchFunc retrieves the latest state of an asynchronous operation. +// It should be side-effect free aside from the remote read. +type FetchFunc[T any] func(context.Context) (FetchResult[T], error) + +// IntervalStrategy allows pluggable poll interval behavior (fixed, backoff, etc.). +type IntervalStrategy interface { //nolint:interfacebloat // single method interface (tiny intentional interface) + NextPoll(attempt uint) time.Duration +} + +// FixedInterval implements IntervalStrategy with a constant delay. +type FixedInterval time.Duration + +// NextPoll returns the fixed duration. +func (fi FixedInterval) NextPoll(uint) time.Duration { return time.Duration(fi) } + +// BackoffInterval implements IntervalStrategy using a backoff.Delay strategy. +// This allows actionwait to leverage sophisticated backoff algorithms while +// maintaining the declarative status-based polling approach. +type BackoffInterval struct { + delay backoff.Delay +} + +// NextPoll returns the next polling interval using the wrapped backoff delay strategy. +func (bi BackoffInterval) NextPoll(attempt uint) time.Duration { + return bi.delay.Next(attempt) +} + +// WithBackoffDelay creates an IntervalStrategy that uses the provided backoff.Delay. +// This bridges actionwait's IntervalStrategy interface with the backoff package's +// delay strategies (fixed, exponential, SDK-compatible, etc.). +// +// Example usage: +// +// opts := actionwait.Options[MyType]{ +// Interval: actionwait.WithBackoffDelay(backoff.FixedDelay(time.Second)), +// // ... other options +// } +func WithBackoffDelay(delay backoff.Delay) IntervalStrategy { + return BackoffInterval{delay: delay} +} + +// Options configure the WaitForStatus loop. +type Options[T any] struct { + Timeout time.Duration // Required total timeout. + Interval IntervalStrategy // Poll interval strategy (default: 30s fixed). + ProgressInterval time.Duration // Throttle for ProgressSink (default: disabled if <=0). + SuccessStates []Status // Required (>=1) terminal success states. + TransitionalStates []Status // Optional allowed in-flight states. + FailureStates []Status // Optional explicit failure states. + ConsecutiveSuccess int // Number of consecutive successes required (default 1). + ProgressSink func(fr FetchResult[any], meta ProgressMeta) +} + +// ProgressMeta supplies metadata for progress callbacks. +type ProgressMeta struct { + Attempt uint + Elapsed time.Duration + Remaining time.Duration + Deadline time.Time + NextPollIn time.Duration +} + +// WaitForStatus polls using fetch until a success state, failure state, timeout, unexpected state, +// context cancellation, or fetch error occurs. +// On success, the final FetchResult is returned with nil error. +func WaitForStatus[T any](ctx context.Context, fetch FetchFunc[T], opts Options[T]) (FetchResult[T], error) { //nolint:cyclop // complexity driven by classification/state machine; readability preferred + if err := validateOptions(opts); err != nil { + var zero FetchResult[T] + return zero, err + } + + normalizeOptions(&opts) + + start := time.Now() + deadline := start.Add(opts.Timeout) + var lastProgress time.Time + var attempt uint + var successStreak int + var last FetchResult[T] + + // Precompute allowed states for unexpected classification (success + transitional + failure) + // Failure states are excluded from Allowed to ensure they classify distinctly. + allowedTransient := append([]Status{}, opts.SuccessStates...) + allowedTransient = append(allowedTransient, opts.TransitionalStates...) + + for { + // Early return: context cancelled + if ctx.Err() != nil { + return last, ctx.Err() + } + + // Early return: timeout exceeded + if time.Now().After(deadline) { + return last, &TimeoutError{LastStatus: last.Status, Timeout: opts.Timeout} + } + + // Fetch current status + fr, err := fetch(ctx) + if err != nil { + return fr, err // Early return: fetch error + } + last = fr + + // Classify status and determine if we should terminate + isTerminal, classifyErr := classifyStatus(fr, opts, &successStreak, allowedTransient) + if isTerminal { + return fr, classifyErr // Early return: terminal state (success or failure) + } + + // Handle progress reporting + handleProgressReport(opts, fr, start, deadline, attempt, &lastProgress) + + // Sleep until next attempt, with context cancellation check + if err := sleepWithContext(ctx, opts.Interval.NextPoll(attempt)); err != nil { + return last, err // Early return: context cancelled during sleep + } + + attempt++ + } +} + +// anyFetchResult converts a typed FetchResult[T] into FetchResult[any] for ProgressSink. +func anyFetchResult[T any](fr FetchResult[T]) FetchResult[any] { + return FetchResult[any]{Status: fr.Status, Value: any(fr.Value)} +} + +func maxDuration(a, b time.Duration) time.Duration { + if a > b { + return a + } + return b +} + +// validateOptions performs early validation of required options. +func validateOptions[T any](opts Options[T]) error { + if opts.Timeout <= 0 { + return errors.New("actionwait: Timeout must be > 0") + } + if len(opts.SuccessStates) == 0 { + return errors.New("actionwait: at least one SuccessState required") + } + if opts.ConsecutiveSuccess < 0 { + return errors.New("actionwait: ConsecutiveSuccess cannot be negative") + } + if opts.ProgressInterval < 0 { + return errors.New("actionwait: ProgressInterval cannot be negative") + } + return nil +} + +// normalizeOptions sets defaults for optional configuration. +func normalizeOptions[T any](opts *Options[T]) { + if opts.ConsecutiveSuccess <= 0 { + opts.ConsecutiveSuccess = 1 + } + if opts.Interval == nil { + opts.Interval = FixedInterval(DefaultPollInterval) + } +} + +// classifyStatus determines the next action based on the current status. +// Returns: (isTerminal, error) - if isTerminal is true, polling should stop. +func classifyStatus[T any](fr FetchResult[T], opts Options[T], successStreak *int, allowedTransient []Status) (bool, error) { + // Classification precedence: failure -> success -> transitional -> unexpected + if slices.Contains(opts.FailureStates, fr.Status) { + return true, &FailureStateError{Status: fr.Status} + } + + if slices.Contains(opts.SuccessStates, fr.Status) { + *successStreak++ + if *successStreak >= opts.ConsecutiveSuccess { + return true, nil // Success! + } + return false, nil // Continue polling for consecutive successes + } + + // Not a success state, reset streak + *successStreak = 0 + + // Check if transitional state is allowed + // If TransitionalStates is specified, status must be in that list + // If TransitionalStates is empty, any non-success/non-failure state is allowed + if len(opts.TransitionalStates) > 0 && !slices.Contains(opts.TransitionalStates, fr.Status) { + return true, &UnexpectedStateError{Status: fr.Status, Allowed: allowedTransient} + } + + return false, nil // Continue polling +} + +// handleProgressReport sends progress updates if conditions are met. +func handleProgressReport[T any](opts Options[T], fr FetchResult[T], start time.Time, deadline time.Time, attempt uint, lastProgress *time.Time) { + if opts.ProgressSink == nil || opts.ProgressInterval <= 0 { + return + } + + if lastProgress.IsZero() || time.Since(*lastProgress) >= opts.ProgressInterval { + nextPoll := opts.Interval.NextPoll(attempt) + opts.ProgressSink(anyFetchResult(fr), ProgressMeta{ + Attempt: attempt, + Elapsed: time.Since(start), + Remaining: maxDuration(0, time.Until(deadline)), + Deadline: deadline, + NextPollIn: nextPoll, + }) + *lastProgress = time.Now() + } +} + +// sleepWithContext sleeps for the specified duration while respecting context cancellation. +func sleepWithContext(ctx context.Context, duration time.Duration) error { + if duration <= 0 { + return nil + } + + timer := time.NewTimer(duration) + defer timer.Stop() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + } +} diff --git a/internal/actionwait/wait_test.go b/internal/actionwait/wait_test.go new file mode 100644 index 000000000000..22096adb77f3 --- /dev/null +++ b/internal/actionwait/wait_test.go @@ -0,0 +1,423 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package actionwait + +import ( + "context" + "errors" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/terraform-provider-aws/internal/backoff" +) + +// fastFixedInterval returns a very small fixed interval to speed tests. +const fastFixedInterval = 5 * time.Millisecond + +// makeCtx creates a context with generous overall test timeout safeguard. +func makeCtx(t *testing.T) context.Context { // test helper + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + return ctx +} + +func TestWaitForStatus_ValidationErrors(t *testing.T) { + t.Parallel() + // Subtests parallelized; each uses its own context with timeout. + cases := map[string]Options[struct{}]{ + "missing timeout": {SuccessStates: []Status{"ok"}}, + "missing success": {Timeout: time.Second}, + "negative consecutive": {Timeout: time.Second, SuccessStates: []Status{"ok"}, ConsecutiveSuccess: -1}, + "negative progress interval": {Timeout: time.Second, SuccessStates: []Status{"ok"}, ProgressInterval: -time.Second}, + } + + for name, opts := range cases { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[struct{}], error) { + return FetchResult[struct{}]{Status: "irrelevant"}, nil + }, opts) + if err == nil { + t.Fatalf("expected validation error") + } + }) + } +} + +func TestWaitForStatus_SuccessImmediate(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + fr, err := WaitForStatus(ctx, func(context.Context) (FetchResult[int], error) { + return FetchResult[int]{Status: "DONE", Value: 42}, nil + }, Options[int]{ + Timeout: 250 * time.Millisecond, + SuccessStates: []Status{"DONE"}, + Interval: FixedInterval(fastFixedInterval), + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if fr.Value != 42 || fr.Status != "DONE" { + t.Fatalf("unexpected result: %#v", fr) + } +} + +func TestWaitForStatus_SuccessAfterTransitions(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + var calls int32 + fr, err := WaitForStatus(ctx, func(context.Context) (FetchResult[string], error) { + c := atomic.AddInt32(&calls, 1) + switch c { + case 1, 2: + return FetchResult[string]{Status: "IN_PROGRESS", Value: "step"}, nil + default: + return FetchResult[string]{Status: "COMPLETE", Value: "done"}, nil + } + }, Options[string]{ + Timeout: 500 * time.Millisecond, + SuccessStates: []Status{"COMPLETE"}, + TransitionalStates: []Status{"IN_PROGRESS"}, + Interval: FixedInterval(fastFixedInterval), + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if fr.Status != "COMPLETE" || fr.Value != "done" { + t.Fatalf("unexpected final result: %#v", fr) + } +} + +func TestWaitForStatus_FailureState(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + fr, err := WaitForStatus(ctx, func(context.Context) (FetchResult[struct{}], error) { + return FetchResult[struct{}]{Status: "FAILED"}, nil + }, Options[struct{}]{ + Timeout: 200 * time.Millisecond, + SuccessStates: []Status{"SUCCEEDED"}, + FailureStates: []Status{"FAILED"}, + Interval: FixedInterval(fastFixedInterval), + }) + if err == nil { + t.Fatal("expected failure error") + } + if _, ok := err.(*FailureStateError); !ok { //nolint:errorlint // direct type assertion adequate in tests + t.Fatalf("expected FailureStateError, got %T", err) + } + if fr.Status != "FAILED" { + t.Fatalf("unexpected status: %v", fr.Status) + } +} + +func TestWaitForStatus_UnexpectedState_WithTransitional(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[int], error) { + return FetchResult[int]{Status: "UNKNOWN"}, nil + }, Options[int]{ + Timeout: 200 * time.Millisecond, + SuccessStates: []Status{"OK"}, + TransitionalStates: []Status{"PENDING"}, + Interval: FixedInterval(fastFixedInterval), + }) + if err == nil { + t.Fatal("expected unexpected state error") + } + if _, ok := err.(*UnexpectedStateError); !ok { //nolint:errorlint // direct type assertion adequate in tests + t.Fatalf("expected UnexpectedStateError, got %T", err) + } +} + +func TestWaitForStatus_NoTransitionalListAllowsAnyUntilTimeout(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + start := time.Now() + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[struct{}], error) { + return FetchResult[struct{}]{Status: "WHATEVER"}, nil + }, Options[struct{}]{ + Timeout: 50 * time.Millisecond, + SuccessStates: []Status{"DONE"}, + Interval: FixedInterval(10 * time.Millisecond), + }) + if err == nil { + t.Fatal("expected timeout error") + } + if _, ok := err.(*TimeoutError); !ok { //nolint:errorlint // direct type assertion adequate in tests + t.Fatalf("expected TimeoutError, got %T", err) + } + if time.Since(start) < 40*time.Millisecond { // sanity that we actually waited + t.Fatalf("timeout returned too early") + } +} + +func TestWaitForStatus_ContextCancel(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(makeCtx(t)) + go func() { + time.Sleep(20 * time.Millisecond) + cancel() + }() + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[struct{}], error) { + return FetchResult[struct{}]{Status: "PENDING"}, nil + }, Options[struct{}]{ + Timeout: 500 * time.Millisecond, + SuccessStates: []Status{"DONE"}, + Interval: FixedInterval(fastFixedInterval), + }) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } +} + +func TestWaitForStatus_FetchErrorPropagation(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + testErr := errors.New("boom") + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[int], error) { + return FetchResult[int]{}, testErr + }, Options[int]{ + Timeout: 200 * time.Millisecond, + SuccessStates: []Status{"OK"}, + Interval: FixedInterval(fastFixedInterval), + }) + if !errors.Is(err, testErr) { + t.Fatalf("expected fetch error, got %v", err) + } +} + +func TestWaitForStatus_ConsecutiveSuccess(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + var toggle int32 + // alternate success / transitional until two consecutive successes happen + fr, err := WaitForStatus(ctx, func(context.Context) (FetchResult[string], error) { + n := atomic.AddInt32(&toggle, 1) + // Pattern: BUILDING, READY, READY, READY ... ensures at least two consecutive successes by third attempt + if n == 1 { + return FetchResult[string]{Status: "BUILDING", Value: "val"}, nil + } + return FetchResult[string]{Status: "READY", Value: "val"}, nil + }, Options[string]{ + Timeout: 750 * time.Millisecond, + SuccessStates: []Status{"READY"}, + TransitionalStates: []Status{"BUILDING"}, + ConsecutiveSuccess: 2, + Interval: FixedInterval(2 * time.Millisecond), + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if fr.Status != "READY" { + t.Fatalf("expected READY, got %v", fr.Status) + } + if atomic.LoadInt32(&toggle) < 3 { // at least three fetches required (BUILDING, READY, READY) + t.Fatalf("expected multiple attempts, got %d", toggle) + } +} + +func TestWaitForStatus_ProgressSinkThrottling(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + var progressCalls int32 + var fetchCalls int32 + _, _ = WaitForStatus(ctx, func(context.Context) (FetchResult[int], error) { + atomic.AddInt32(&fetchCalls, 1) + if fetchCalls >= 5 { + return FetchResult[int]{Status: "DONE"}, nil + } + return FetchResult[int]{Status: "WORKING"}, nil + }, Options[int]{ + Timeout: 500 * time.Millisecond, + SuccessStates: []Status{"DONE"}, + TransitionalStates: []Status{"WORKING"}, + Interval: FixedInterval(5 * time.Millisecond), + ProgressInterval: 15 * time.Millisecond, // should group roughly 3 polls + ProgressSink: func(fr FetchResult[any], meta ProgressMeta) { + atomic.AddInt32(&progressCalls, 1) + if fr.Status != "WORKING" && fr.Status != "DONE" { + t.Fatalf("unexpected status in progress sink: %v", fr.Status) + } + if meta.NextPollIn <= 0 { + t.Fatalf("expected positive NextPollIn") + } + }, + }) + // With 5 fetch calls and 15ms progress vs 5ms poll, expect fewer progress events than fetches + if progressCalls <= 1 || progressCalls >= fetchCalls { + t.Fatalf("unexpected progress call count: %d (fetches %d)", progressCalls, fetchCalls) + } +} + +func TestWaitForStatus_ConsecutiveSuccessDefault(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + fr, err := WaitForStatus(ctx, func(context.Context) (FetchResult[struct{}], error) { + return FetchResult[struct{}]{Status: "READY"}, nil + }, Options[struct{}]{ + Timeout: 100 * time.Millisecond, + SuccessStates: []Status{"READY"}, + Interval: FixedInterval(fastFixedInterval), + // ConsecutiveSuccess left zero to trigger defaulting logic + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if fr.Status != "READY" { + t.Fatalf("unexpected status: %v", fr.Status) + } +} + +func TestWaitForStatus_ProgressSinkDisabled(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + var progressCalls int32 + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[int], error) { + return FetchResult[int]{Status: "DONE"}, nil + }, Options[int]{ + Timeout: 100 * time.Millisecond, + SuccessStates: []Status{"DONE"}, + Interval: FixedInterval(fastFixedInterval), + ProgressInterval: 0, // disabled + ProgressSink: func(FetchResult[any], ProgressMeta) { + atomic.AddInt32(&progressCalls, 1) + }, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if progressCalls != 0 { // should not be invoked when ProgressInterval <= 0 + t.Fatalf("expected zero progress sink calls, got %d", progressCalls) + } +} + +func TestWaitForStatus_UnexpectedStateErrorMessage(t *testing.T) { + t.Parallel() + ctx := makeCtx(t) + _, err := WaitForStatus(ctx, func(context.Context) (FetchResult[int], error) { + return FetchResult[int]{Status: "UNKNOWN"}, nil + }, Options[int]{ + Timeout: 200 * time.Millisecond, + SuccessStates: []Status{"OK"}, + TransitionalStates: []Status{"PENDING", "IN_PROGRESS"}, + Interval: FixedInterval(fastFixedInterval), + }) + if err == nil { + t.Fatal("expected unexpected state error") + } + var unexpectedErr *UnexpectedStateError + if !errors.As(err, &unexpectedErr) { + t.Fatalf("expected UnexpectedStateError, got %T", err) + } + errMsg := unexpectedErr.Error() + if !strings.Contains(errMsg, "UNKNOWN") { + t.Errorf("error message should contain status 'UNKNOWN', got: %s", errMsg) + } + if !strings.Contains(errMsg, "allowed:") { + t.Errorf("error message should list allowed states, got: %s", errMsg) + } + if !strings.Contains(errMsg, "PENDING") { + t.Errorf("error message should contain allowed state 'PENDING', got: %s", errMsg) + } +} + +func TestBackoffInterval(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + delay backoff.Delay + attempts []uint + expectedDurations []time.Duration + }{ + { + name: "fixed delay", + delay: backoff.FixedDelay(100 * time.Millisecond), + attempts: []uint{0, 1, 2, 3}, + expectedDurations: []time.Duration{0, 100 * time.Millisecond, 100 * time.Millisecond, 100 * time.Millisecond}, + }, + { + name: "zero delay", + delay: backoff.ZeroDelay, + attempts: []uint{0, 1, 2}, + expectedDurations: []time.Duration{0, 0, 0}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + interval := BackoffInterval{delay: tt.delay} + + for i, attempt := range tt.attempts { + got := interval.NextPoll(attempt) + want := tt.expectedDurations[i] + if got != want { + t.Errorf("NextPoll(%d) = %v, want %v", attempt, got, want) + } + } + }) + } +} + +func TestWithBackoffDelay(t *testing.T) { + t.Parallel() + + delay := backoff.FixedDelay(50 * time.Millisecond) + interval := WithBackoffDelay(delay) + + // Test that it wraps the delay correctly + if got := interval.NextPoll(0); got != 0 { + t.Errorf("NextPoll(0) = %v, want 0", got) + } + if got := interval.NextPoll(1); got != 50*time.Millisecond { + t.Errorf("NextPoll(1) = %v, want 50ms", got) + } +} + +func TestBackoffIntegration(t *testing.T) { + t.Parallel() + + ctx := makeCtx(t) + + var callCount atomic.Int32 + fetch := func(context.Context) (FetchResult[string], error) { + count := callCount.Add(1) + switch count { + case 1: + return FetchResult[string]{Status: "CREATING", Value: "attempt1"}, nil + case 2: + return FetchResult[string]{Status: "AVAILABLE", Value: "success"}, nil + default: + t.Errorf("unexpected call count: %d", count) + return FetchResult[string]{}, errors.New("too many calls") + } + } + + opts := Options[string]{ + Timeout: 2 * time.Second, + Interval: WithBackoffDelay(backoff.FixedDelay(fastFixedInterval)), + SuccessStates: []Status{"AVAILABLE"}, + TransitionalStates: []Status{"CREATING"}, + } + + result, err := WaitForStatus(ctx, fetch, opts) + if err != nil { + t.Fatalf("WaitForStatus() error = %v", err) + } + + if result.Status != "AVAILABLE" { + t.Errorf("result.Status = %q, want %q", result.Status, "AVAILABLE") + } + if result.Value != "success" { + t.Errorf("result.Value = %q, want %q", result.Value, "success") + } + if callCount.Load() != 2 { + t.Errorf("expected 2 fetch calls, got %d", callCount.Load()) + } +} diff --git a/internal/backoff/backoff.go b/internal/backoff/backoff.go index 45134ced6112..be77b6e94eb4 100644 --- a/internal/backoff/backoff.go +++ b/internal/backoff/backoff.go @@ -17,9 +17,26 @@ type Timer interface { After(time.Duration) <-chan time.Time } +type Delay interface { + // Next returns the duration to wait before the next attempt. + Next(uint) time.Duration +} + +type DelayWithSetIncrementDelay interface { + Delay + + // SetIncrementDelay sets a flag to determine whether or not the next call to Next increments the delay duration. + SetIncrementDelay(bool) +} + // DelayFunc returns the duration to wait before the next attempt. type DelayFunc func(uint) time.Duration +// Next returns the duration to wait before the next attempt. +func (f DelayFunc) Next(n uint) time.Duration { + return f(n) +} + // FixedDelay returns a delay. The first attempt has no delay (0), and subsequent attempts use the fixed delay. func FixedDelay(delay time.Duration) DelayFunc { return func(n uint) time.Duration { @@ -34,68 +51,80 @@ func FixedDelay(delay time.Duration) DelayFunc { // ZeroDelay returns 0 for all attempts. // // This DelayFunc should only be used for testing. -var ZeroDelay = func(n uint) time.Duration { +var ZeroDelay DelayFunc = func(n uint) time.Duration { return 0 } type sdkv2HelperRetryCompatibleDelay struct { - minTimeout time.Duration - pollInterval time.Duration - wait time.Duration + delay time.Duration + incrementDelay bool + initialDelay time.Duration + minTimeout time.Duration + pollInterval time.Duration } -func (d *sdkv2HelperRetryCompatibleDelay) delay() time.Duration { - wait := d.wait +// Next returns the duration to wait before the next attempt. +func (d *sdkv2HelperRetryCompatibleDelay) Next(n uint) time.Duration { + if n == 0 { + return d.initialDelay + } + + delay := d.delay // First round had no wait. - if wait == 0 { - wait = 100 * time.Millisecond + if delay == 0 { + delay = 100 * time.Millisecond } - wait *= 2 + if d.incrementDelay { + delay *= 2 + } // If a poll interval has been specified, choose that interval. // Otherwise bound the default value. if d.pollInterval > 0 && d.pollInterval < 180*time.Second { - wait = d.pollInterval + delay = d.pollInterval } else { - if wait < d.minTimeout { - wait = d.minTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second + if delay < d.minTimeout { + delay = d.minTimeout + } else if delay > 10*time.Second { + delay = 10 * time.Second } } - d.wait = wait + d.delay = delay - return wait + return delay } -// SDKv2HelperRetryCompatibleDelay returns a Terraform Plugin SDK v2 helper/retry-compatible delay. -func SDKv2HelperRetryCompatibleDelay(initialDelay, pollInterval, minTimeout time.Duration) DelayFunc { - delay := &sdkv2HelperRetryCompatibleDelay{ - minTimeout: minTimeout, - pollInterval: pollInterval, - } - - return func(n uint) time.Duration { - if n == 0 { - return initialDelay - } +// SetIncrementDelay sets a flag to determine whether or not the next call to Next increments the delay duration. +func (d *sdkv2HelperRetryCompatibleDelay) SetIncrementDelay(incrementDelay bool) { + d.incrementDelay = incrementDelay +} - return delay.delay() +// SDKv2HelperRetryCompatibleDelay returns a Terraform Plugin SDK v2 helper/retry-compatible delay. +func SDKv2HelperRetryCompatibleDelay(initialDelay, pollInterval, minTimeout time.Duration) Delay { + return &sdkv2HelperRetryCompatibleDelay{ + incrementDelay: true, + initialDelay: initialDelay, + minTimeout: minTimeout, + pollInterval: pollInterval, } } // DefaultSDKv2HelperRetryCompatibleDelay returns a Terraform Plugin SDK v2 helper/retry-compatible delay // with default values (from the `RetryContext` function). -func DefaultSDKv2HelperRetryCompatibleDelay() DelayFunc { +func DefaultSDKv2HelperRetryCompatibleDelay() Delay { return SDKv2HelperRetryCompatibleDelay(0, 0, 500*time.Millisecond) //nolint:mnd // 500ms is the Plugin SDKv2 default } +var ( + _ DelayWithSetIncrementDelay = (*sdkv2HelperRetryCompatibleDelay)(nil) +) + // LoopConfig configures a loop. type LoopConfig struct { - delay DelayFunc + delay Delay gracePeriod time.Duration timer Timer } @@ -111,7 +140,7 @@ func WithGracePeriod(d time.Duration) Option { } } -func WithDelay(d DelayFunc) Option { +func WithDelay(d Delay) Option { if d == nil { return emptyOption } @@ -186,7 +215,7 @@ func (r *Loop) Continue(ctx context.Context) bool { r.gracePeriod = 0 } - r.sleep(ctx, r.config.delay(r.attempt)) + r.sleep(ctx, r.config.delay.Next(r.attempt)) r.attempt++ return context.Cause(ctx) == nil diff --git a/internal/backoff/backoff_test.go b/internal/backoff/backoff_test.go index 633b343cd3ae..e7ba0ae359c9 100644 --- a/internal/backoff/backoff_test.go +++ b/internal/backoff/backoff_test.go @@ -29,7 +29,35 @@ func TestDefaultSDKv2HelperRetryCompatibleDelay(t *testing.T) { } var got []time.Duration for i := range len(want) { - got = append(got, delay(uint(i))) + got = append(got, delay.Next(uint(i))) + } + + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } +} + +func TestDefaultSDKv2HelperRetryCompatibleDelayWithIncrementDelay(t *testing.T) { + t.Parallel() + + delay := DefaultSDKv2HelperRetryCompatibleDelay() + want := []time.Duration{ + 0, + 500 * time.Millisecond, + 1 * time.Second, + 1 * time.Second, + 1 * time.Second, + 2 * time.Second, + 4 * time.Second, + 8 * time.Second, + 10 * time.Second, + 10 * time.Second, + } + var got []time.Duration + for i := range len(want) { + delay.(DelayWithSetIncrementDelay).SetIncrementDelay(i < 3 || i > 4) + + got = append(got, delay.Next(uint(i))) } if diff := cmp.Diff(got, want); diff != "" { @@ -50,7 +78,7 @@ func TestSDKv2HelperRetryCompatibleDelay(t *testing.T) { } var got []time.Duration for i := range len(want) { - got = append(got, delay(uint(i))) + got = append(got, delay.Next(uint(i))) } if diff := cmp.Diff(got, want); diff != "" { @@ -71,7 +99,7 @@ func TestSDKv2HelperRetryCompatibleDelayWithPollTimeout(t *testing.T) { } var got []time.Duration for i := range len(want) { - got = append(got, delay(uint(i))) + got = append(got, delay.Next(uint(i))) } if diff := cmp.Diff(got, want); diff != "" { diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 6e5230be7a0f..93aca9b3ee53 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -117,40 +117,45 @@ func (c *AWSClient) PartitionHostname(ctx context.Context, prefix string) string // GlobalARN returns a global (no Region) ARN for the specified service namespace and resource. func (c *AWSClient) GlobalARN(ctx context.Context, service, resource string) string { - return c.GlobalARNWithAccount(ctx, service, c.AccountID(ctx), resource) + return c.arn(ctx, service, "", c.AccountID(ctx), resource) } // GlobalARNNoAccount returns a global (no Region) ARN for the specified service namespace and resource without AWS account ID. func (c *AWSClient) GlobalARNNoAccount(ctx context.Context, service, resource string) string { - return c.GlobalARNWithAccount(ctx, service, "", resource) + return c.arn(ctx, service, "", "", resource) } // GlobalARNWithAccount returns a global (no Region) ARN for the specified service namespace, resource and account ID. func (c *AWSClient) GlobalARNWithAccount(ctx context.Context, service, accountID, resource string) string { - return arn.ARN{ - Partition: c.Partition(ctx), - Service: service, - AccountID: accountID, - Resource: resource, - }.String() + return c.arn(ctx, service, "", accountID, resource) } // RegionalARN returns a regional ARN for the specified service namespace and resource. func (c *AWSClient) RegionalARN(ctx context.Context, service, resource string) string { - return c.RegionalARNWithAccount(ctx, service, c.AccountID(ctx), resource) + return c.arn(ctx, service, c.Region(ctx), c.AccountID(ctx), resource) } // RegionalARNNoAccount returns a regional ARN for the specified service namespace and resource without AWS account ID. func (c *AWSClient) RegionalARNNoAccount(ctx context.Context, service, resource string) string { - return c.RegionalARNWithAccount(ctx, service, "", resource) + return c.arn(ctx, service, c.Region(ctx), "", resource) } // RegionalARNWithAccount returns a regional ARN for the specified service namespace, resource and account ID. func (c *AWSClient) RegionalARNWithAccount(ctx context.Context, service, accountID, resource string) string { + return c.arn(ctx, service, c.Region(ctx), accountID, resource) +} + +// RegionalARNWithRegion returns a regional ARN for the specified service namespace, resource and account ID. +func (c *AWSClient) RegionalARNWithRegion(ctx context.Context, service, region, resource string) string { + return c.arn(ctx, service, region, c.AccountID(ctx), resource) +} + +// arn returns an ARN for the specified service namespace, region, account ID and resource. +func (c *AWSClient) arn(ctx context.Context, service, region, accountID, resource string) string { return arn.ARN{ Partition: c.Partition(ctx), Service: service, - Region: c.Region(ctx), + Region: region, AccountID: accountID, Resource: resource, }.String() diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 4c14e3c15a83..1782f1943c0f 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -23,6 +23,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/apprunner" "github.com/aws/aws-sdk-go-v2/service/appstream" "github.com/aws/aws-sdk-go-v2/service/appsync" + "github.com/aws/aws-sdk-go-v2/service/arcregionswitch" "github.com/aws/aws-sdk-go-v2/service/athena" "github.com/aws/aws-sdk-go-v2/service/auditmanager" "github.com/aws/aws-sdk-go-v2/service/autoscaling" @@ -32,6 +33,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/bcmdataexports" "github.com/aws/aws-sdk-go-v2/service/bedrock" "github.com/aws/aws-sdk-go-v2/service/bedrockagent" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" "github.com/aws/aws-sdk-go-v2/service/billing" "github.com/aws/aws-sdk-go-v2/service/budgets" "github.com/aws/aws-sdk-go-v2/service/chatbot" @@ -171,6 +173,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/notifications" "github.com/aws/aws-sdk-go-v2/service/notificationscontacts" "github.com/aws/aws-sdk-go-v2/service/oam" + "github.com/aws/aws-sdk-go-v2/service/odb" "github.com/aws/aws-sdk-go-v2/service/opensearch" "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" "github.com/aws/aws-sdk-go-v2/service/organizations" @@ -210,6 +213,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3control" "github.com/aws/aws-sdk-go-v2/service/s3outposts" "github.com/aws/aws-sdk-go-v2/service/s3tables" + "github.com/aws/aws-sdk-go-v2/service/s3vectors" "github.com/aws/aws-sdk-go-v2/service/sagemaker" "github.com/aws/aws-sdk-go-v2/service/scheduler" "github.com/aws/aws-sdk-go-v2/service/schemas" @@ -251,6 +255,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/wafregional" "github.com/aws/aws-sdk-go-v2/service/wafv2" "github.com/aws/aws-sdk-go-v2/service/wellarchitected" + "github.com/aws/aws-sdk-go-v2/service/workmail" "github.com/aws/aws-sdk-go-v2/service/workspaces" "github.com/aws/aws-sdk-go-v2/service/workspacesweb" "github.com/aws/aws-sdk-go-v2/service/xray" @@ -278,6 +283,10 @@ func (c *AWSClient) APIGatewayV2Client(ctx context.Context) *apigatewayv2.Client return errs.Must(client[*apigatewayv2.Client](ctx, c, names.APIGatewayV2, make(map[string]any))) } +func (c *AWSClient) ARCRegionSwitchClient(ctx context.Context) *arcregionswitch.Client { + return errs.Must(client[*arcregionswitch.Client](ctx, c, names.ARCRegionSwitch, make(map[string]any))) +} + func (c *AWSClient) AccessAnalyzerClient(ctx context.Context) *accessanalyzer.Client { return errs.Must(client[*accessanalyzer.Client](ctx, c, names.AccessAnalyzer, make(map[string]any))) } @@ -370,6 +379,10 @@ func (c *AWSClient) BedrockAgentClient(ctx context.Context) *bedrockagent.Client return errs.Must(client[*bedrockagent.Client](ctx, c, names.BedrockAgent, make(map[string]any))) } +func (c *AWSClient) BedrockAgentCoreClient(ctx context.Context) *bedrockagentcorecontrol.Client { + return errs.Must(client[*bedrockagentcorecontrol.Client](ctx, c, names.BedrockAgentCore, make(map[string]any))) +} + func (c *AWSClient) BillingClient(ctx context.Context) *billing.Client { return errs.Must(client[*billing.Client](ctx, c, names.Billing, make(map[string]any))) } @@ -922,6 +935,10 @@ func (c *AWSClient) NotificationsContactsClient(ctx context.Context) *notificati return errs.Must(client[*notificationscontacts.Client](ctx, c, names.NotificationsContacts, make(map[string]any))) } +func (c *AWSClient) ODBClient(ctx context.Context) *odb.Client { + return errs.Must(client[*odb.Client](ctx, c, names.ODB, make(map[string]any))) +} + func (c *AWSClient) ObservabilityAccessManagerClient(ctx context.Context) *oam.Client { return errs.Must(client[*oam.Client](ctx, c, names.ObservabilityAccessManager, make(map[string]any))) } @@ -1082,6 +1099,10 @@ func (c *AWSClient) S3TablesClient(ctx context.Context) *s3tables.Client { return errs.Must(client[*s3tables.Client](ctx, c, names.S3Tables, make(map[string]any))) } +func (c *AWSClient) S3VectorsClient(ctx context.Context) *s3vectors.Client { + return errs.Must(client[*s3vectors.Client](ctx, c, names.S3Vectors, make(map[string]any))) +} + func (c *AWSClient) SESClient(ctx context.Context) *ses.Client { return errs.Must(client[*ses.Client](ctx, c, names.SES, make(map[string]any))) } @@ -1246,6 +1267,10 @@ func (c *AWSClient) WellArchitectedClient(ctx context.Context) *wellarchitected. return errs.Must(client[*wellarchitected.Client](ctx, c, names.WellArchitected, make(map[string]any))) } +func (c *AWSClient) WorkMailClient(ctx context.Context) *workmail.Client { + return errs.Must(client[*workmail.Client](ctx, c, names.WorkMail, make(map[string]any))) +} + func (c *AWSClient) WorkSpacesClient(ctx context.Context) *workspaces.Client { return errs.Must(client[*workspaces.Client](ctx, c, names.WorkSpaces, make(map[string]any))) } diff --git a/internal/conns/awsclient_test.go b/internal/conns/awsclient_test.go index 267d8b27d1e2..7f7399f0ff86 100644 --- a/internal/conns/awsclient_test.go +++ b/internal/conns/awsclient_test.go @@ -252,3 +252,349 @@ func TestAWSClientValidateInContextRegionInPartition(t *testing.T) { // nosemgre }) } } + +func TestAWSClientGlobalARN(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + }, + Service: "iam", + Resource: "policy/test", + Expected: "arn:aws:iam::123456789012:policy/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + }, + Service: "iam", + Resource: "policy/test", + Expected: "arn:aws-cn:iam::123456789012:policy/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.GlobalARN(ctx, testCase.Service, testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +func TestAWSClientGlobalARNNoAccount(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + }, + Service: "s3", + Resource: "bucket/test", + Expected: "arn:aws:s3:::bucket/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + }, + Service: "s3", + Resource: "bucket/test", + Expected: "arn:aws-cn:s3:::bucket/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.GlobalARNNoAccount(ctx, testCase.Service, testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +func TestAWSClientGlobalARNWithAccount(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + }, + Service: "iam", + Resource: "policy/test", + Expected: "arn:aws:iam::234567890123:policy/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + }, + Service: "iam", + Resource: "policy/test", + Expected: "arn:aws-cn:iam::234567890123:policy/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.GlobalARNWithAccount(ctx, testCase.Service, "234567890123", testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +func TestAWSClientRegionalARN(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + awsConfig: &aws.Config{ + Region: "us-west-2", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws:ec2:us-west-2:123456789012:vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + awsConfig: &aws.Config{ + Region: "cn-northwest-1", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws-cn:ec2:cn-northwest-1:123456789012:vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.RegionalARN(ctx, testCase.Service, testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +func TestAWSClientRegionalARNNoAccount(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + awsConfig: &aws.Config{ + Region: "us-west-2", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws:ec2:us-west-2::vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + awsConfig: &aws.Config{ + Region: "cn-northwest-1", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws-cn:ec2:cn-northwest-1::vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.RegionalARNNoAccount(ctx, testCase.Service, testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +func TestAWSClientRegionalARNWithAccount(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + awsConfig: &aws.Config{ + Region: "us-west-2", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws:ec2:us-west-2:234567890123:vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + awsConfig: &aws.Config{ + Region: "cn-northwest-1", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws-cn:ec2:cn-northwest-1:234567890123:vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.RegionalARNWithAccount(ctx, testCase.Service, "234567890123", testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +func TestAWSClientRegionalARNWithRegion(t *testing.T) { // nosemgrep:ci.aws-in-func-name + t.Parallel() + + ctx := t.Context() + testCases := []struct { + Name string + AWSClient *AWSClient + Service string + Resource string + Expected string + }{ + { + Name: "AWS Commercial", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: standardPartition, + awsConfig: &aws.Config{ + Region: "us-west-2", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws:ec2:region-1:123456789012:vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + { + Name: "AWS China", + AWSClient: &AWSClient{ + accountID: "123456789012", + partition: chinaPartition, + awsConfig: &aws.Config{ + Region: "cn-northwest-1", //lintignore:AWSAT003 + }, + }, + Service: "ec2", + Resource: "vpc/test", + Expected: "arn:aws-cn:ec2:region-1:123456789012:vpc/test", //lintignore:AWSAT003,AWSAT005 + }, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := testCase.AWSClient.RegionalARNWithRegion(ctx, testCase.Service, "region-1", testCase.Resource) + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} diff --git a/internal/conns/conns.go b/internal/conns/conns.go index e2dc26434014..66256f099b82 100644 --- a/internal/conns/conns.go +++ b/internal/conns/conns.go @@ -5,6 +5,7 @@ package conns import ( "context" + "iter" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/vcr" @@ -20,6 +21,13 @@ type ServicePackage interface { ServicePackageName() string } +// ServicePackageWithActions is an interface that extends ServicePackage with actions. +// Actions are imperative operations that can be invoked to perform Day-2 operations. +type ServicePackageWithActions interface { + ServicePackage + Actions(context.Context) []*types.ServicePackageAction +} + // ServicePackageWithEphemeralResources is an interface that extends ServicePackage with ephemeral resources. // Ephemeral resources are resources that are not part of the Terraform state, but are used to create other resources. type ServicePackageWithEphemeralResources interface { @@ -27,6 +35,16 @@ type ServicePackageWithEphemeralResources interface { EphemeralResources(context.Context) []*types.ServicePackageEphemeralResource } +type ServicePackageWithFrameworkListResources interface { + ServicePackage + FrameworkListResources(context.Context) iter.Seq[*types.ServicePackageFrameworkListResource] +} + +type ServicePackageWithSDKListResources interface { + ServicePackage + SDKListResources(ctx context.Context) iter.Seq[*types.ServicePackageSDKListResource] +} + type ( contextKeyType int ) diff --git a/internal/embed.go b/internal/embed.go new file mode 100644 index 000000000000..14e1eb49de91 --- /dev/null +++ b/internal/embed.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package internal + +import ( + "embed" + "sync" + + "github.com/YakDriver/smarterr" +) + +//go:embed service/smarterr.hcl +//go:embed service/*/smarterr.hcl +var SmarterrFS embed.FS + +var registerSmarterrOnce sync.Once + +// RegisterSmarterrFS registers the embedded Smarterr filesystem with the Smarterr package. +// This function should be called once during provider initialization. +// +// Note: go:embed can only embed files from the current directory or its subdirectories. +// Therefore, embedding must be performed from the `internal` package to ensure the +// correct files are included (i.e., `internal/smarterr/smarterr.hcl` (global config), +// `internal/service//smarterr.hcl` (per service config)). +func RegisterSmarterrFS() { + registerSmarterrOnce.Do(func() { + smarterr.SetLogger(smarterr.TFLogLogger{}) + smarterr.SetFS(&smarterr.WrappedFS{FS: &SmarterrFS}, "internal") + }) +} diff --git a/internal/errs/fwdiag/diags.go b/internal/errs/fwdiag/diags.go index a98c9c32b954..c442e30c79cf 100644 --- a/internal/errs/fwdiag/diags.go +++ b/internal/errs/fwdiag/diags.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" ) // DiagnosticsError returns an error containing all Diagnostic with SeverityError @@ -38,6 +39,13 @@ func DiagnosticString(d diag.Diagnostic) string { return buf.String() } +func NewCreatingResourceIDErrorDiagnostic(err error) diag.Diagnostic { + return diag.NewErrorDiagnostic( + "Creating Resource ID", + err.Error(), + ) +} + func NewParsingResourceIDErrorDiagnostic(err error) diag.Diagnostic { return diag.NewErrorDiagnostic( "Parsing Resource ID", @@ -52,6 +60,17 @@ func NewResourceNotFoundWarningDiagnostic(err error) diag.Diagnostic { ) } +func NewListResultErrorDiagnostic(err error) list.ListResult { + return list.ListResult{ + Diagnostics: diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + err.Error(), + ), + }, + } +} + func AsError[T any](x T, diags diag.Diagnostics) (T, error) { return x, DiagnosticsError(diags) } diff --git a/internal/flex/flex.go b/internal/flex/flex.go index 4f2784b510e3..164b51c5629e 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" @@ -254,11 +255,11 @@ func ExpandResourceId(id string, partCount int, allowEmptyPart bool) ([]string, idParts := strings.Split(id, ResourceIdSeparator) if len(idParts) <= 1 { - return nil, fmt.Errorf("unexpected format for ID (%v), expected more than one part", idParts) + return nil, smarterr.Errorf("unexpected format for ID (%v), expected more than one part", idParts) } if len(idParts) != partCount { - return nil, fmt.Errorf("unexpected format for ID (%s), expected (%d) parts separated by (%s)", id, partCount, ResourceIdSeparator) + return nil, smarterr.Errorf("unexpected format for ID (%s), expected (%d) parts separated by (%s)", id, partCount, ResourceIdSeparator) } if !allowEmptyPart { @@ -272,7 +273,7 @@ func ExpandResourceId(id string, partCount int, allowEmptyPart bool) ([]string, } if emptyPart { - return nil, fmt.Errorf("unexpected format for ID (%[1]s), the following id parts indexes are blank (%v)", id, emptyParts) + return nil, smarterr.Errorf("unexpected format for ID (%[1]s), the following id parts indexes are blank (%v)", id, emptyParts) } } return idParts, nil @@ -343,6 +344,11 @@ func Float64ToStringValue(v *float64) string { return strconv.FormatFloat(aws.ToFloat64(v), 'f', -1, 64) } +// Float64ValueToString converts a Go float64 value to a string pointer. +func Float64ValueToString(v float64) *string { + return aws.String(strconv.FormatFloat(v, 'f', -1, 64)) +} + // IntValueToString converts a Go int value to a string pointer. func IntValueToString(v int) *string { return aws.String(strconv.Itoa(v)) @@ -412,6 +418,11 @@ func StringValueToInt64Value(v string) int64 { return i } +// Int64ToRFC3339StringValue converts an int64 timestamp pointer to an RFC3339 Go string value. +func Int64ToRFC3339StringValue(v *int64) string { + return time.UnixMilli(aws.ToInt64(v)).Format(time.RFC3339) +} + // Takes a string of resource attributes separated by the ResourceIdSeparator constant // returns the number of parts func ResourceIdPartCount(id string) int { diff --git a/internal/framework/action_test.go b/internal/framework/action_test.go new file mode 100644 index 000000000000..a4f9206a3d04 --- /dev/null +++ b/internal/framework/action_test.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-aws/internal/conns" +) + +// Test that ActionWithConfigure can be instantiated and has the expected methods +func TestActionWithConfigureCompilation(t *testing.T) { + t.Parallel() + + // This test ensures our new types compile correctly + var action ActionWithConfigure + + // Test that it has the Meta method from withMeta + if action.Meta() != nil { + t.Error("Expected nil meta before configuration") + } + + // Test that it embeds withMeta correctly + action.meta = &conns.AWSClient{} + if action.Meta() == nil { + t.Error("Expected non-nil meta after setting") + } +} + +// Test that ActionWithModel can be instantiated +func TestActionWithModelCompilation(t *testing.T) { + t.Parallel() + + // Test model + type testModel struct { + Name string `tfsdk:"name"` + } + + // This test ensures our new generic type compiles correctly + var action ActionWithModel[testModel] + + // Test that it has the Meta method from ActionWithConfigure + if action.Meta() != nil { + t.Error("Expected nil meta before configuration") + } + + // Test that it embeds ActionWithConfigure correctly + action.meta = &conns.AWSClient{} + if action.Meta() == nil { + t.Error("Expected non-nil meta after setting") + } +} diff --git a/internal/framework/action_with_configure.go b/internal/framework/action_with_configure.go new file mode 100644 index 000000000000..d79695fab5f3 --- /dev/null +++ b/internal/framework/action_with_configure.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-provider-aws/internal/conns" +) + +type ActionWithConfigure struct { + withMeta +} + +// Metadata should return the full name of the action, such as +// aws_lambda_invoke. +func (*ActionWithConfigure) Metadata(_ context.Context, request action.MetadataRequest, response *action.MetadataResponse) { + // This method is implemented in the wrappers. + panic("not implemented") // lintignore:R009 +} + +// Configure enables provider-level data or clients to be set in the +// provider-defined Action type. +func (a *ActionWithConfigure) Configure(_ context.Context, request action.ConfigureRequest, _ *action.ConfigureResponse) { + if v, ok := request.ProviderData.(*conns.AWSClient); ok { + a.meta = v + } +} diff --git a/internal/framework/action_with_model.go b/internal/framework/action_with_model.go new file mode 100644 index 000000000000..2722dc3fe69f --- /dev/null +++ b/internal/framework/action_with_model.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// ActionWithModel is a structure to be embedded within an Action that has a corresponding model. +type ActionWithModel[T any] struct { + withModel[T] + ActionWithConfigure +} + +// ValidateModel validates the action's model against a schema. +func (a *ActionWithModel[T]) ValidateModel(ctx context.Context, schema *schema.Schema) diag.Diagnostics { + var diags diag.Diagnostics + state := tfsdk.State{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), nil), + Schema: schema, + } + + diags.Append(a.validateModel(ctx, &state)...) + + return diags +} + +type ActionValidateModel interface { + ValidateModel(ctx context.Context, schema *schema.Schema) diag.Diagnostics +} diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go deleted file mode 100644 index bae2cac63e07..000000000000 --- a/internal/framework/flex/auto_expand_test.go +++ /dev/null @@ -1,5960 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package flex - -import ( - "bytes" - "context" - "reflect" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-log/tflogtest" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" -) - -func TestExpand(t *testing.T) { - t.Parallel() - - testString := "test" - testStringResult := "a" - - testByteSlice := []byte("test") - testByteSliceResult := []byte("a") - - var ( - typedNilSource *emptyStruct - typedNilTarget *emptyStruct - ) - - testARN := "arn:aws:securityhub:us-west-2:1234567890:control/cis-aws-foundations-benchmark/v/1.2.0/1.1" //lintignore:AWSAT003,AWSAT005 - - testTimeStr := "2013-09-25T09:34:01Z" - testTimeTime := errs.Must(time.Parse(time.RFC3339, testTimeStr)) - - testCases := autoFlexTestCases{ - "nil Source": { - Target: &emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagExpandingSourceIsNil(nil), - }, - expectedLogLines: []map[string]any{ - infoExpanding(nil, reflect.TypeFor[*emptyStruct]()), - errorSourceIsNil("", nil, "", reflect.TypeFor[emptyStruct]()), - }, - }, - "typed nil Source": { - Source: typedNilSource, - Target: &emptyStruct{}, - expectedDiags: diag.Diagnostics{ - // diagExpandingSourceIsNil(reflect.TypeFor[*emptyStruct]()), - diagExpandingSourceIsNil(nil), // FIXME: Should give the actual type - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*emptyStruct](), reflect.TypeFor[*emptyStruct]()), - // errorSourceIsNil("", reflect.TypeFor[*emptyStruct](), "", reflect.TypeFor[emptyStruct]()), - errorSourceIsNil("", nil, "", reflect.TypeFor[emptyStruct]()), // FIXME: Should give the actual type - }, - }, - "nil Target": { - Source: emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagConvertingTargetIsNil(nil), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[emptyStruct](), nil), - errorTargetIsNil("", reflect.TypeFor[emptyStruct](), "", nil), - }, - }, - "typed nil Target": { - Source: emptyStruct{}, - Target: typedNilTarget, - expectedDiags: diag.Diagnostics{ - diagConvertingTargetIsNil(reflect.TypeFor[*emptyStruct]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - errorTargetIsNil("", reflect.TypeFor[emptyStruct](), "", reflect.TypeFor[*emptyStruct]()), - }, - }, - "non-pointer Target": { - Source: emptyStruct{}, - Target: 0, - expectedDiags: diag.Diagnostics{ - diagConvertingTargetIsNotPointer(reflect.TypeFor[int]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[emptyStruct](), reflect.TypeFor[int]()), - errorTargetIsNotPointer("", reflect.TypeFor[emptyStruct](), "", reflect.TypeFor[int]()), - }, - }, - "non-struct Source struct Target": { - Source: testString, - Target: &emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagExpandingSourceDoesNotImplementAttrValue(reflect.TypeFor[string]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[string](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[string](), reflect.TypeFor[emptyStruct]()), - errorSourceDoesNotImplementAttrValue("", reflect.TypeFor[string](), "", reflect.TypeFor[emptyStruct]()), - }, - }, - "struct Source non-struct Target": { - Source: emptyStruct{}, - Target: &testString, - expectedDiags: diag.Diagnostics{ - diagExpandingSourceDoesNotImplementAttrValue(reflect.TypeFor[emptyStruct]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*string]()), - infoConverting(reflect.TypeFor[emptyStruct](), reflect.TypeFor[string]()), - errorSourceDoesNotImplementAttrValue("", reflect.TypeFor[emptyStruct](), "", reflect.TypeFor[string]()), - }, - }, - "types.String to string": { - Source: types.StringValue("a"), - Target: &testString, - WantTarget: &testStringResult, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.String](), reflect.TypeFor[*string]()), - infoConverting(reflect.TypeFor[types.String](), reflect.TypeFor[string]()), - }, - }, - "types.String to byte slice": { - Source: types.StringValue("a"), - Target: &testByteSlice, - WantTarget: &testByteSliceResult, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.String](), reflect.TypeFor[*[]byte]()), - infoConverting(reflect.TypeFor[types.String](), reflect.TypeFor[[]byte]()), - }, - }, - "empty struct Source and Target": { - Source: emptyStruct{}, - Target: &emptyStruct{}, - WantTarget: &emptyStruct{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - }, - }, - "empty struct pointer Source and Target": { - Source: &emptyStruct{}, - Target: &emptyStruct{}, - WantTarget: &emptyStruct{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*emptyStruct](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - }, - }, - "single string struct pointer Source and empty Target": { - Source: &tfSingleStringField{Field1: types.StringValue("a")}, - Target: &emptyStruct{}, - WantTarget: &emptyStruct{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleStringField](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*emptyStruct]()), - debugNoCorrespondingField(reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*emptyStruct]()), - }, - }, - "source field does not implement attr.Value Source": { - Source: &awsSingleStringValue{Field1: "a"}, - Target: &awsSingleStringValue{}, - expectedDiags: diag.Diagnostics{ - diagExpandingSourceDoesNotImplementAttrValue(reflect.TypeFor[string]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[string]()), - errorSourceDoesNotImplementAttrValue("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[string]()), - }, - }, - "single string Source and single string Target": { - Source: &tfSingleStringField{Field1: types.StringValue("a")}, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{Field1: "a"}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "single string Source and byte slice Target": { - Source: &tfSingleStringField{Field1: types.StringValue("a")}, - Target: &awsSingleByteSliceValue{}, - WantTarget: &awsSingleByteSliceValue{Field1: []byte("a")}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleStringField](), reflect.TypeFor[*awsSingleByteSliceValue]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleByteSliceValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleByteSliceValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[[]byte]()), - }, - }, - "single string Source and single *string Target": { - Source: &tfSingleStringField{Field1: types.StringValue("a")}, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{Field1: aws.String("a")}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - }, - }, - "single string Source and single int64 Target": { - Source: &tfSingleStringField{Field1: types.StringValue("a")}, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleStringField](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[int64]()), - { - "@level": "error", - "@module": "provider.autoflex", - "@message": "AutoFlex Expand; incompatible types", - "from": map[string]any{}, - "to": float64(reflect.Int64), - logAttrKeySourcePath: "Field1", - logAttrKeySourceType: fullTypeName(reflect.TypeFor[types.String]()), - logAttrKeyTargetPath: "Field1", - logAttrKeyTargetType: fullTypeName(reflect.TypeFor[int64]()), - }, - }, - }, - "primitive types Source and primtive types Target": { - Source: &tfAllThePrimitiveFields{ - Field1: types.StringValue("field1"), - Field2: types.StringValue("field2"), - Field3: types.Int64Value(3), - Field4: types.Int64Value(-4), - Field5: types.Int64Value(5), - Field6: types.Int64Value(-6), - Field7: types.Float64Value(7.7), - Field8: types.Float64Value(-8.8), - Field9: types.Float64Value(9.99), - Field10: types.Float64Value(-10.101), - Field11: types.BoolValue(true), - Field12: types.BoolValue(false), - }, - Target: &awsAllThePrimitiveFields{}, - WantTarget: &awsAllThePrimitiveFields{ - Field1: "field1", - Field2: aws.String("field2"), - Field3: 3, - Field4: aws.Int32(-4), - Field5: 5, - Field6: aws.Int64(-6), - Field7: 7.7, - Field8: aws.Float32(-8.8), - Field9: 9.99, - Field10: aws.Float64(-10.101), - Field11: true, - Field12: aws.Bool(false), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfAllThePrimitiveFields](), reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConverting(reflect.TypeFor[tfAllThePrimitiveFields](), reflect.TypeFor[*awsAllThePrimitiveFields]()), - traceMatchedFields("Field1", reflect.TypeFor[tfAllThePrimitiveFields](), "Field1", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - traceMatchedFields("Field2", reflect.TypeFor[tfAllThePrimitiveFields](), "Field2", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field2", reflect.TypeFor[types.String](), "Field2", reflect.TypeFor[*string]()), - traceMatchedFields("Field3", reflect.TypeFor[tfAllThePrimitiveFields](), "Field3", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field3", reflect.TypeFor[types.Int64](), "Field3", reflect.TypeFor[int32]()), - traceMatchedFields("Field4", reflect.TypeFor[tfAllThePrimitiveFields](), "Field4", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field4", reflect.TypeFor[types.Int64](), "Field4", reflect.TypeFor[*int32]()), - traceMatchedFields("Field5", reflect.TypeFor[tfAllThePrimitiveFields](), "Field5", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field5", reflect.TypeFor[types.Int64](), "Field5", reflect.TypeFor[int64]()), - traceMatchedFields("Field6", reflect.TypeFor[tfAllThePrimitiveFields](), "Field6", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field6", reflect.TypeFor[types.Int64](), "Field6", reflect.TypeFor[*int64]()), - traceMatchedFields("Field7", reflect.TypeFor[tfAllThePrimitiveFields](), "Field7", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field7", reflect.TypeFor[types.Float64](), "Field7", reflect.TypeFor[float32]()), - traceMatchedFields("Field8", reflect.TypeFor[tfAllThePrimitiveFields](), "Field8", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field8", reflect.TypeFor[types.Float64](), "Field8", reflect.TypeFor[*float32]()), - traceMatchedFields("Field9", reflect.TypeFor[tfAllThePrimitiveFields](), "Field9", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field9", reflect.TypeFor[types.Float64](), "Field9", reflect.TypeFor[float64]()), - traceMatchedFields("Field10", reflect.TypeFor[tfAllThePrimitiveFields](), "Field10", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field10", reflect.TypeFor[types.Float64](), "Field10", reflect.TypeFor[*float64]()), - traceMatchedFields("Field11", reflect.TypeFor[tfAllThePrimitiveFields](), "Field11", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field11", reflect.TypeFor[types.Bool](), "Field11", reflect.TypeFor[bool]()), - traceMatchedFields("Field12", reflect.TypeFor[tfAllThePrimitiveFields](), "Field12", reflect.TypeFor[*awsAllThePrimitiveFields]()), - infoConvertingWithPath("Field12", reflect.TypeFor[types.Bool](), "Field12", reflect.TypeFor[*bool]()), - }, - }, - "Collection of primitive types Source and slice or map of primtive types Target": { - Source: &tfCollectionsOfPrimitiveElements{ - Field1: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field2: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field3: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field4: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field5: types.MapValueMust(types.StringType, map[string]attr.Value{ - "A": types.StringValue("a"), - "B": types.StringValue("b"), - }), - Field6: types.MapValueMust(types.StringType, map[string]attr.Value{ - "A": types.StringValue("a"), - "B": types.StringValue("b"), - }), - }, - Target: &awsCollectionsOfPrimitiveElements{}, - WantTarget: &awsCollectionsOfPrimitiveElements{ - Field1: []string{"a", "b"}, - Field2: aws.StringSlice([]string{"a", "b"}), - Field3: []string{"a", "b"}, - Field4: aws.StringSlice([]string{"a", "b"}), - Field5: map[string]string{"A": "a", "B": "b"}, - Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfCollectionsOfPrimitiveElements](), reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConverting(reflect.TypeFor[tfCollectionsOfPrimitiveElements](), reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - traceMatchedFields("Field1", reflect.TypeFor[tfCollectionsOfPrimitiveElements](), "Field1", reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.List](), "Field1", reflect.TypeFor[[]string]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[types.List](), 2, "Field1", reflect.TypeFor[[]string]()), - traceMatchedFields("Field2", reflect.TypeFor[tfCollectionsOfPrimitiveElements](), "Field2", reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field2", reflect.TypeFor[types.List](), "Field2", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Field2", reflect.TypeFor[types.List](), 2, "Field2", reflect.TypeFor[[]*string]()), - traceMatchedFields("Field3", reflect.TypeFor[tfCollectionsOfPrimitiveElements](), "Field3", reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field3", reflect.TypeFor[types.Set](), "Field3", reflect.TypeFor[[]string]()), - traceExpandingWithElementsAs("Field3", reflect.TypeFor[types.Set](), 2, "Field3", reflect.TypeFor[[]string]()), - traceMatchedFields("Field4", reflect.TypeFor[tfCollectionsOfPrimitiveElements](), "Field4", reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field4", reflect.TypeFor[types.Set](), "Field4", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Field4", reflect.TypeFor[types.Set](), 2, "Field4", reflect.TypeFor[[]*string]()), - traceMatchedFields("Field5", reflect.TypeFor[tfCollectionsOfPrimitiveElements](), "Field5", reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field5", reflect.TypeFor[types.Map](), "Field5", reflect.TypeFor[map[string]string]()), - traceExpandingWithElementsAs("Field5", reflect.TypeFor[types.Map](), 2, "Field5", reflect.TypeFor[map[string]string]()), - traceMatchedFields("Field6", reflect.TypeFor[tfCollectionsOfPrimitiveElements](), "Field6", reflect.TypeFor[*awsCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field6", reflect.TypeFor[types.Map](), "Field6", reflect.TypeFor[map[string]*string]()), - traceExpandingWithElementsAs("Field6", reflect.TypeFor[types.Map](), 2, "Field6", reflect.TypeFor[map[string]*string]()), - }, - }, - "plural ordinary field names": { - Source: &tfSingluarListOfNestedObjects{ - Field: fwtypes.NewListNestedObjectValueOfPtrMust(context.Background(), &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - Target: &awsPluralSliceOfNestedObjectValues{}, - WantTarget: &awsPluralSliceOfNestedObjectValues{ - Fields: []awsSingleStringValue{{Field1: "a"}}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingluarListOfNestedObjects](), reflect.TypeFor[*awsPluralSliceOfNestedObjectValues]()), - infoConverting(reflect.TypeFor[tfSingluarListOfNestedObjects](), reflect.TypeFor[*awsPluralSliceOfNestedObjectValues]()), - traceMatchedFields("Field", reflect.TypeFor[tfSingluarListOfNestedObjects](), "Fields", reflect.TypeFor[*awsPluralSliceOfNestedObjectValues]()), - infoConvertingWithPath("Field", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Fields", reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 1, "Fields", reflect.TypeFor[[]awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Fields[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field[0].Field1", reflect.TypeFor[types.String](), "Fields[0].Field1", reflect.TypeFor[string]()), - }, - }, - "plural field names": { - Source: &tfSpecialPluralization{ - City: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("paris"), - types.StringValue("london"), - }), - Coach: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("guardiola"), - types.StringValue("mourinho"), - }), - Tomato: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("brandywine"), - types.StringValue("roma"), - }), - Vertex: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("ab"), - types.StringValue("bc"), - }), - Criterion: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("votes"), - types.StringValue("editors"), - }), - Datum: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), - types.StringValue("0f10cb10-2076-5254-bd21-d3f62fe66303"), - }), - Hive: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("Cegieme"), - types.StringValue("Fahumvid"), - }), - }, - Target: &awsSpecialPluralization{}, - WantTarget: &awsSpecialPluralization{ - Cities: []*string{ - aws.String("paris"), - aws.String("london"), - }, - Coaches: []*string{ - aws.String("guardiola"), - aws.String("mourinho"), - }, - Tomatoes: []*string{ - aws.String("brandywine"), - aws.String("roma"), - }, - Vertices: []*string{ - aws.String("ab"), - aws.String("bc"), - }, - Criteria: []*string{ - aws.String("votes"), - aws.String("editors"), - }, - Data: []*string{ - aws.String("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), - aws.String("0f10cb10-2076-5254-bd21-d3f62fe66303"), - }, - Hives: []*string{ - aws.String("Cegieme"), - aws.String("Fahumvid"), - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSpecialPluralization](), reflect.TypeFor[*awsSpecialPluralization]()), - infoConverting(reflect.TypeFor[tfSpecialPluralization](), reflect.TypeFor[*awsSpecialPluralization]()), - traceMatchedFields("City", reflect.TypeFor[tfSpecialPluralization](), "Cities", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("City", reflect.TypeFor[types.List](), "Cities", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("City", reflect.TypeFor[types.List](), 2, "Cities", reflect.TypeFor[[]*string]()), - traceMatchedFields("Coach", reflect.TypeFor[tfSpecialPluralization](), "Coaches", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("Coach", reflect.TypeFor[types.List](), "Coaches", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Coach", reflect.TypeFor[types.List](), 2, "Coaches", reflect.TypeFor[[]*string]()), - traceMatchedFields("Tomato", reflect.TypeFor[tfSpecialPluralization](), "Tomatoes", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("Tomato", reflect.TypeFor[types.List](), "Tomatoes", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Tomato", reflect.TypeFor[types.List](), 2, "Tomatoes", reflect.TypeFor[[]*string]()), - traceMatchedFields("Vertex", reflect.TypeFor[tfSpecialPluralization](), "Vertices", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("Vertex", reflect.TypeFor[types.List](), "Vertices", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Vertex", reflect.TypeFor[types.List](), 2, "Vertices", reflect.TypeFor[[]*string]()), - traceMatchedFields("Criterion", reflect.TypeFor[tfSpecialPluralization](), "Criteria", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("Criterion", reflect.TypeFor[types.List](), "Criteria", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Criterion", reflect.TypeFor[types.List](), 2, "Criteria", reflect.TypeFor[[]*string]()), - traceMatchedFields("Datum", reflect.TypeFor[tfSpecialPluralization](), "Data", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("Datum", reflect.TypeFor[types.List](), "Data", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Datum", reflect.TypeFor[types.List](), 2, "Data", reflect.TypeFor[[]*string]()), - traceMatchedFields("Hive", reflect.TypeFor[tfSpecialPluralization](), "Hives", reflect.TypeFor[*awsSpecialPluralization]()), - infoConvertingWithPath("Hive", reflect.TypeFor[types.List](), "Hives", reflect.TypeFor[[]*string]()), - traceExpandingWithElementsAs("Hive", reflect.TypeFor[types.List](), 2, "Hives", reflect.TypeFor[[]*string]()), - }, - }, - "capitalization field names": { - Source: &tfCaptializationDiff{ - FieldURL: types.StringValue("h"), - }, - Target: &awsCapitalizationDiff{}, - WantTarget: &awsCapitalizationDiff{ - FieldUrl: aws.String("h"), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfCaptializationDiff](), reflect.TypeFor[*awsCapitalizationDiff]()), - infoConverting(reflect.TypeFor[tfCaptializationDiff](), reflect.TypeFor[*awsCapitalizationDiff]()), - traceMatchedFields("FieldURL", reflect.TypeFor[tfCaptializationDiff](), "FieldUrl", reflect.TypeFor[*awsCapitalizationDiff]()), - infoConvertingWithPath("FieldURL", reflect.TypeFor[types.String](), "FieldUrl", reflect.TypeFor[*string]()), - }, - }, - "resource name suffix": { - Options: []AutoFlexOptionsFunc{WithFieldNameSuffix("Config")}, - Source: &tfFieldNameSuffix{ - Policy: types.StringValue("foo"), - }, - Target: &awsFieldNameSuffix{}, - WantTarget: &awsFieldNameSuffix{ - PolicyConfig: aws.String("foo"), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfFieldNameSuffix](), reflect.TypeFor[*awsFieldNameSuffix]()), - infoConverting(reflect.TypeFor[tfFieldNameSuffix](), reflect.TypeFor[*awsFieldNameSuffix]()), - traceMatchedFields("Policy", reflect.TypeFor[tfFieldNameSuffix](), "PolicyConfig", reflect.TypeFor[*awsFieldNameSuffix]()), - infoConvertingWithPath("Policy", reflect.TypeFor[types.String](), "PolicyConfig", reflect.TypeFor[*string]()), - }, - }, - "single ARN Source and single string Target": { - Source: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{Field1: testARN}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleARNField](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleARNField](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleARNField](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ARN](), "Field1", reflect.TypeFor[string]()), - }, - }, - "single ARN Source and single *string Target": { - Source: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{Field1: aws.String(testARN)}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSingleARNField](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleARNField](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleARNField](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ARN](), "Field1", reflect.TypeFor[*string]()), - }, - }, - "timestamp pointer": { - Source: &tfRFC3339Time{ - CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), - }, - Target: &awsRFC3339TimePointer{}, - WantTarget: &awsRFC3339TimePointer{ - CreationDateTime: &testTimeTime, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfRFC3339Time](), reflect.TypeFor[*awsRFC3339TimePointer]()), - infoConverting(reflect.TypeFor[tfRFC3339Time](), reflect.TypeFor[*awsRFC3339TimePointer]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[tfRFC3339Time](), "CreationDateTime", reflect.TypeFor[*awsRFC3339TimePointer]()), - infoConvertingWithPath("CreationDateTime", reflect.TypeFor[timetypes.RFC3339](), "CreationDateTime", reflect.TypeFor[*time.Time]()), - }, - }, - "timestamp": { - Source: &tfRFC3339Time{ - CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), - }, - Target: &awsRFC3339TimeValue{}, - WantTarget: &awsRFC3339TimeValue{ - CreationDateTime: testTimeTime, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfRFC3339Time](), reflect.TypeFor[*awsRFC3339TimeValue]()), - infoConverting(reflect.TypeFor[tfRFC3339Time](), reflect.TypeFor[*awsRFC3339TimeValue]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[tfRFC3339Time](), "CreationDateTime", reflect.TypeFor[*awsRFC3339TimeValue]()), - infoConvertingWithPath("CreationDateTime", reflect.TypeFor[timetypes.RFC3339](), "CreationDateTime", reflect.TypeFor[time.Time]()), - }, - }, - "JSONValue Source to json interface Target": { - Source: &tfJSONStringer{Field1: fwtypes.SmithyJSONValue(`{"field1": "a"}`, newTestJSONDocument)}, - Target: &awsJSONStringer{}, - WantTarget: &awsJSONStringer{ - Field1: &testJSONDocument{ - Value: map[string]any{ - "field1": "a", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfJSONStringer](), reflect.TypeFor[*awsJSONStringer]()), - infoConverting(reflect.TypeFor[tfJSONStringer](), reflect.TypeFor[*awsJSONStringer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfJSONStringer](), "Field1", reflect.TypeFor[*awsJSONStringer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]](), "Field1", reflect.TypeFor[smithyjson.JSONStringer]()), - }, - }, - } - - runAutoExpandTestCases(t, testCases) -} - -func TestExpandGeneric(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "complex Source and complex Target": { - Source: &tfComplexValue{ - Field1: types.StringValue("m"), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("n"), - }), - }), - Field3: types.MapValueMust(types.StringType, map[string]attr.Value{ - "X": types.StringValue("x"), - "Y": types.StringValue("y"), - }), - Field4: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleInt64Field{ - {Field1: types.Int64Value(100)}, - {Field1: types.Int64Value(2000)}, - {Field1: types.Int64Value(30000)}, - }), - }, - Target: &awsComplexValue{}, - WantTarget: &awsComplexValue{ - Field1: "m", - Field2: &awsNestedObjectPointer{ - Field1: &awsSingleStringValue{ - Field1: "n", - }, - }, - Field3: aws.StringMap(map[string]string{ - "X": "x", - "Y": "y", - }), - Field4: []awsSingleInt64Value{ - {Field1: 100}, - {Field1: 2000}, - {Field1: 30000}, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfComplexValue](), reflect.TypeFor[*awsComplexValue]()), - infoConverting(reflect.TypeFor[tfComplexValue](), reflect.TypeFor[*awsComplexValue]()), - - traceMatchedFields("Field1", reflect.TypeFor[tfComplexValue](), "Field1", reflect.TypeFor[*awsComplexValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - traceMatchedFields("Field2", reflect.TypeFor[tfComplexValue](), "Field2", reflect.TypeFor[*awsComplexValue]()), - - infoConvertingWithPath("Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfListOfNestedObject]](), "Field2", reflect.TypeFor[*awsNestedObjectPointer]()), - traceMatchedFieldsWithPath("Field2[0]", "Field1", reflect.TypeFor[tfListOfNestedObject](), "Field2", "Field1", reflect.TypeFor[*awsNestedObjectPointer]()), - infoConvertingWithPath("Field2[0].Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field2.Field1", reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field2[0].Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field2.Field1", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field2[0].Field1[0].Field1", reflect.TypeFor[types.String](), "Field2.Field1.Field1", reflect.TypeFor[string]()), - - traceMatchedFields("Field3", reflect.TypeFor[tfComplexValue](), "Field3", reflect.TypeFor[*awsComplexValue]()), - infoConvertingWithPath("Field3", reflect.TypeFor[types.Map](), "Field3", reflect.TypeFor[map[string]*string]()), - traceExpandingWithElementsAs("Field3", reflect.TypeFor[types.Map](), 2, "Field3", reflect.TypeFor[map[string]*string]()), - - traceMatchedFields("Field4", reflect.TypeFor[tfComplexValue](), "Field4", reflect.TypeFor[*awsComplexValue]()), - infoConvertingWithPath("Field4", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleInt64Field]](), "Field4", reflect.TypeFor[[]awsSingleInt64Value]()), - traceExpandingNestedObjectCollection("Field4", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleInt64Field]](), 3, "Field4", reflect.TypeFor[[]awsSingleInt64Value]()), - traceMatchedFieldsWithPath("Field4[0]", "Field1", reflect.TypeFor[tfSingleInt64Field](), "Field4[0]", "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field4[0].Field1", reflect.TypeFor[types.Int64](), "Field4[0].Field1", reflect.TypeFor[int64]()), - traceMatchedFieldsWithPath("Field4[1]", "Field1", reflect.TypeFor[tfSingleInt64Field](), "Field4[1]", "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field4[1].Field1", reflect.TypeFor[types.Int64](), "Field4[1].Field1", reflect.TypeFor[int64]()), - traceMatchedFieldsWithPath("Field4[2]", "Field1", reflect.TypeFor[tfSingleInt64Field](), "Field4[2]", "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field4[2].Field1", reflect.TypeFor[types.Int64](), "Field4[2].Field1", reflect.TypeFor[int64]()), - }, - }, - "map of string": { - Source: &tfMapOfString{ - FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "x": types.StringValue("y"), - }), - }, - Target: &awsMapOfString{}, - WantTarget: &awsMapOfString{ - FieldInner: map[string]string{ - "x": "y", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapOfString](), reflect.TypeFor[*awsMapOfString]()), - infoConverting(reflect.TypeFor[tfMapOfString](), reflect.TypeFor[*awsMapOfString]()), - traceMatchedFields("FieldInner", reflect.TypeFor[tfMapOfString](), "FieldInner", reflect.TypeFor[*awsMapOfString]()), - infoConvertingWithPath("FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), "FieldInner", reflect.TypeFor[map[string]string]()), - traceExpandingWithElementsAs("FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), 1, "FieldInner", reflect.TypeFor[map[string]string]()), - }, - }, - "map of string pointer": { - Source: &tfMapOfString{ - FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "x": types.StringValue("y"), - }), - }, - Target: &awsMapOfStringPointer{}, - WantTarget: &awsMapOfStringPointer{ - FieldInner: map[string]*string{ - "x": aws.String("y"), - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapOfString](), reflect.TypeFor[*awsMapOfStringPointer]()), - infoConverting(reflect.TypeFor[tfMapOfString](), reflect.TypeFor[*awsMapOfStringPointer]()), - traceMatchedFields("FieldInner", reflect.TypeFor[tfMapOfString](), "FieldInner", reflect.TypeFor[*awsMapOfStringPointer]()), - infoConvertingWithPath("FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), "FieldInner", reflect.TypeFor[map[string]*string]()), - traceExpandingWithElementsAs("FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), 1, "FieldInner", reflect.TypeFor[map[string]*string]()), - }, - }, - "map of map of string": { - Source: &tfMapOfMapOfString{ - Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ - "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "y": types.StringValue("z"), - }), - }), - }, - Target: &awsMapOfMapOfString{}, - WantTarget: &awsMapOfMapOfString{ - Field1: map[string]map[string]string{ - "x": { - "y": "z", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapOfMapOfString](), reflect.TypeFor[*awsMapOfMapOfString]()), - infoConverting(reflect.TypeFor[tfMapOfMapOfString](), reflect.TypeFor[*awsMapOfMapOfString]()), - traceMatchedFields("Field1", reflect.TypeFor[tfMapOfMapOfString](), "Field1", reflect.TypeFor[*awsMapOfMapOfString]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]](), "Field1", reflect.TypeFor[map[string]map[string]string]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]](), 1, "Field1", reflect.TypeFor[map[string]map[string]string]()), - }, - }, - "map of map of string pointer": { - Source: &tfMapOfMapOfString{ - Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ - "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "y": types.StringValue("z"), - }), - }), - }, - Target: &awsMapOfMapOfStringPointer{}, - WantTarget: &awsMapOfMapOfStringPointer{ - Field1: map[string]map[string]*string{ - "x": { - "y": aws.String("z"), - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapOfMapOfString](), reflect.TypeFor[*awsMapOfMapOfStringPointer]()), - infoConverting(reflect.TypeFor[tfMapOfMapOfString](), reflect.TypeFor[*awsMapOfMapOfStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfMapOfMapOfString](), "Field1", reflect.TypeFor[*awsMapOfMapOfStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]](), "Field1", reflect.TypeFor[map[string]map[string]*string]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]](), 1, "Field1", reflect.TypeFor[map[string]map[string]*string]()), - }, - }, - "nested string map": { - Source: &tfNestedMapOfString{ - FieldOuter: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfMapOfString{ - FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "x": types.StringValue("y"), - }), - }), - }, - Target: &awsNestedMapOfString{}, - WantTarget: &awsNestedMapOfString{ - FieldOuter: awsMapOfString{ - FieldInner: map[string]string{ - "x": "y", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfNestedMapOfString](), reflect.TypeFor[*awsNestedMapOfString]()), - infoConverting(reflect.TypeFor[tfNestedMapOfString](), reflect.TypeFor[*awsNestedMapOfString]()), - traceMatchedFields("FieldOuter", reflect.TypeFor[tfNestedMapOfString](), "FieldOuter", reflect.TypeFor[*awsNestedMapOfString]()), - infoConvertingWithPath("FieldOuter", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapOfString]](), "FieldOuter", reflect.TypeFor[awsMapOfString]()), - traceMatchedFieldsWithPath("FieldOuter[0]", "FieldInner", reflect.TypeFor[tfMapOfString](), "FieldOuter", "FieldInner", reflect.TypeFor[*awsMapOfString]()), - infoConvertingWithPath("FieldOuter[0].FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), "FieldOuter.FieldInner", reflect.TypeFor[map[string]string]()), - traceExpandingWithElementsAs("FieldOuter[0].FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), 1, "FieldOuter.FieldInner", reflect.TypeFor[map[string]string]()), - }, - }, - } - - runAutoExpandTestCases(t, testCases) -} - -func TestExpandFieldNamePrefix(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "exact match": { - Options: []AutoFlexOptionsFunc{ - WithFieldNamePrefix("Intent"), - }, - Source: &tfFieldNamePrefix{ - Name: types.StringValue("Ovodoghen"), - }, - Target: &awsFieldNamePrefix{}, - WantTarget: &awsFieldNamePrefix{ - IntentName: aws.String("Ovodoghen"), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfFieldNamePrefix](), reflect.TypeFor[*awsFieldNamePrefix]()), - infoConverting(reflect.TypeFor[tfFieldNamePrefix](), reflect.TypeFor[*awsFieldNamePrefix]()), - traceMatchedFields("Name", reflect.TypeFor[tfFieldNamePrefix](), "IntentName", reflect.TypeFor[*awsFieldNamePrefix]()), - infoConvertingWithPath("Name", reflect.TypeFor[types.String](), "IntentName", reflect.TypeFor[*string]()), - }, - }, - - "case-insensitive": { - Options: []AutoFlexOptionsFunc{ - WithFieldNamePrefix("Client"), - }, - Source: &tfFieldNamePrefixInsensitive{ - ID: types.StringValue("abc123"), - }, - Target: &awsFieldNamePrefixInsensitive{}, - WantTarget: &awsFieldNamePrefixInsensitive{ - ClientId: aws.String("abc123"), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfFieldNamePrefixInsensitive](), reflect.TypeFor[*awsFieldNamePrefixInsensitive]()), - infoConverting(reflect.TypeFor[tfFieldNamePrefixInsensitive](), reflect.TypeFor[*awsFieldNamePrefixInsensitive]()), - traceMatchedFields("ID", reflect.TypeFor[tfFieldNamePrefixInsensitive](), "ClientId", reflect.TypeFor[*awsFieldNamePrefixInsensitive]()), - infoConvertingWithPath("ID", reflect.TypeFor[types.String](), "ClientId", reflect.TypeFor[*string]()), - }, - }, - } - - runAutoExpandTestCases(t, testCases) -} - -func TestExpandBool(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "Bool to bool": { - "true": { - Source: tfSingleBoolField{ - Field1: types.BoolValue(true), - }, - Target: &awsSingleBoolValue{}, - WantTarget: &awsSingleBoolValue{ - Field1: true, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolValue]()), - infoConverting(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolField](), "Field1", reflect.TypeFor[*awsSingleBoolValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - }, - }, - "false": { - Source: tfSingleBoolField{ - Field1: types.BoolValue(false), - }, - Target: &awsSingleBoolValue{}, - WantTarget: &awsSingleBoolValue{ - Field1: false, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolValue]()), - infoConverting(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolField](), "Field1", reflect.TypeFor[*awsSingleBoolValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - }, - }, - "null": { - Source: tfSingleBoolField{ - Field1: types.BoolNull(), - }, - Target: &awsSingleBoolValue{}, - WantTarget: &awsSingleBoolValue{ - Field1: false, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolValue]()), - infoConverting(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolField](), "Field1", reflect.TypeFor[*awsSingleBoolValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - }, - }, - }, - - "legacy Bool to bool": { - "true": { - Source: tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(true), - }, - Target: &awsSingleBoolValue{}, - WantTarget: &awsSingleBoolValue{ - Field1: true, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolValue]()), - infoConverting(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleBoolValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - }, - }, - "false": { - Source: tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(false), - }, - Target: &awsSingleBoolValue{}, - WantTarget: &awsSingleBoolValue{ - Field1: false, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolValue]()), - infoConverting(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleBoolValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - }, - }, - "null": { - Source: tfSingleBoolFieldLegacy{ - Field1: types.BoolNull(), - }, - Target: &awsSingleBoolValue{}, - WantTarget: &awsSingleBoolValue{ - Field1: false, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolValue]()), - infoConverting(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleBoolValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - }, - }, - }, - - "Bool to *bool": { - "true": { - Source: tfSingleBoolField{ - Field1: types.BoolValue(true), - }, - Target: &awsSingleBoolPointer{}, - WantTarget: &awsSingleBoolPointer{ - Field1: aws.Bool(true), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolPointer]()), - infoConverting(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolField](), "Field1", reflect.TypeFor[*awsSingleBoolPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - }, - }, - "false": { - Source: tfSingleBoolField{ - Field1: types.BoolValue(false), - }, - Target: &awsSingleBoolPointer{}, - WantTarget: &awsSingleBoolPointer{ - Field1: aws.Bool(false), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolPointer]()), - infoConverting(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolField](), "Field1", reflect.TypeFor[*awsSingleBoolPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - }, - }, - "null": { - Source: tfSingleBoolField{ - Field1: types.BoolNull(), - }, - Target: &awsSingleBoolPointer{}, - WantTarget: &awsSingleBoolPointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolPointer]()), - infoConverting(reflect.TypeFor[tfSingleBoolField](), reflect.TypeFor[*awsSingleBoolPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolField](), "Field1", reflect.TypeFor[*awsSingleBoolPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - }, - }, - }, - - "legacy Bool to *bool": { - "true": { - Source: tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(true), - }, - Target: &awsSingleBoolPointer{}, - WantTarget: &awsSingleBoolPointer{ - Field1: aws.Bool(true), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolPointer]()), - infoConverting(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleBoolPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - }, - }, - "false": { - Source: tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(false), - }, - Target: &awsSingleBoolPointer{}, - WantTarget: &awsSingleBoolPointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolPointer]()), - infoConverting(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleBoolPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - }, - }, - "null": { - Source: tfSingleBoolFieldLegacy{ - Field1: types.BoolNull(), - }, - Target: &awsSingleBoolPointer{}, - WantTarget: &awsSingleBoolPointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolPointer]()), - infoConverting(reflect.TypeFor[tfSingleBoolFieldLegacy](), reflect.TypeFor[*awsSingleBoolPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleBoolFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleBoolPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[*bool]()), - // TODO: should log about legacy expander - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandFloat64(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "Float64 to float64": { - "value": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "zero": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "null": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - }, - }, - }, - - "legacy Float64 to float64": { - "value": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "zero": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "null": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float64]()), - }, - }, - }, - - "Float64 to *float64": { - "value": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: aws.Float64(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "zero": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: aws.Float64(0), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "null": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - }, - - "legacy Float64 to *float64": { - "value": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: aws.Float64(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "zero": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "null": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float64]()), - // TODO: should log about legacy expander - }, - }, - }, - - // For historical reasons, Float64 can be expanded to float32 values - "Float64 to float32": { - "value": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "zero": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "null": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - }, - }, - }, - - "legacy Float64 to float32": { - "value": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "zero": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "null": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[float32]()), - }, - }, - }, - - "Float64 to *float32": { - "value": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "zero": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: aws.Float32(0), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "null": { - Source: tfSingleFloat64Field{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - }, - - "legacy Float64 to *float32": { - "value": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(42), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "zero": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "null": { - Source: tfSingleFloat64FieldLegacy{ - Field1: types.Float64Null(), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat64FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float64](), "Field1", reflect.TypeFor[*float32]()), - // TODO: should log about legacy expander - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandFloat32(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "Float32 to float32": { - "value": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "zero": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "null": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - }, - }, - }, - - "legacy Float32 to float32": { - "value": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "zero": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - }, - }, - "null": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat32Value{}, - WantTarget: &awsSingleFloat32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float32]()), - }, - }, - }, - - "Float32 to *float32": { - "value": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "zero": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: aws.Float32(0), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "null": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - }, - - "legacy Float32 to *float32": { - "value": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "zero": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - }, - }, - "null": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat32Pointer{}, - WantTarget: &awsSingleFloat32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float32]()), - // TODO: should log about legacy expander - }, - }, - }, - - // Float32 cannot be expanded to float64 - "Float32 to float64": { - "value": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "zero": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleFloat32Field{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - }, - }, - }, - - "legacy Float32 to float64": { - "value": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "zero": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat64Value{}, - WantTarget: &awsSingleFloat64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[float64]()), - }, - }, - }, - - "Float32 to *float64": { - "value": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[*float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "zero": { - Source: tfSingleFloat32Field{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[*float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleFloat32Field{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32Field](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32Field](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - }, - - "legacy Float32 to *float64": { - "value": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(42), - }, - Target: &awsSingleFloat64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[*float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "zero": { - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(0), - }, - Target: &awsSingleFloat64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Float32](), reflect.TypeFor[*float64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleFloat32FieldLegacy{ - Field1: types.Float32Null(), - }, - Target: &awsSingleFloat64Pointer{}, - WantTarget: &awsSingleFloat64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleFloat32FieldLegacy](), reflect.TypeFor[*awsSingleFloat64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleFloat32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleFloat64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Float32](), "Field1", reflect.TypeFor[*float64]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandInt64(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "Int64 to int64": { - "value": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "zero": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "null": { - Source: tfSingleInt64Field{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - }, - }, - }, - - "legacy Int64 to int64": { - "value": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "zero": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "null": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - }, - }, - }, - - "Int64 to *int64": { - "value": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{ - Field1: aws.Int64(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "zero": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{ - Field1: aws.Int64(0), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "null": { - Source: tfSingleInt64Field{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - }, - - "legacy Int64 to *int64": { - "value": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{ - Field1: aws.Int64(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "zero": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "null": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int64]()), - // TODO: should log about legacy expander - }, - }, - }, - - // For historical reasons, Int64 can be expanded to int32 values - "Int64 to int32": { - "value": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "zero": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "null": { - Source: tfSingleInt64Field{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - }, - }, - }, - - "legacy Int64 to int32": { - "value": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "zero": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "null": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int32]()), - }, - }, - }, - - "Int64 to *int32": { - "value": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "zero": { - Source: tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: aws.Int32(0), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "null": { - Source: tfSingleInt64Field{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64Field](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - }, - - "legacy Int64 to *int32": { - "value": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(42), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "zero": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "null": { - Source: tfSingleInt64FieldLegacy{ - Field1: types.Int64Null(), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt64FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt64FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[*int32]()), - // TODO: should log about legacy expander - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandInt32(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "Int32 to int32": { - "value": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "zero": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "null": { - Source: tfSingleInt32Field{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - }, - }, - }, - - "legacy Int32 to int32": { - "value": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 42, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "zero": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - }, - }, - "null": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt32Value{}, - WantTarget: &awsSingleInt32Value{ - Field1: 0, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int32]()), - }, - }, - }, - - "Int32 to *int32": { - "value": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "zero": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: aws.Int32(0), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "null": { - Source: tfSingleInt32Field{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - }, - - "legacy Int32 to *int32": { - "value": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "zero": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - }, - }, - "null": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt32Pointer{}, - WantTarget: &awsSingleInt32Pointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt32Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt32Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int32]()), - // TODO: should log about legacy expander - }, - }, - }, - - // Int32 cannot be expanded to int64 - "Int32 to int64": { - "value": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "zero": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleInt32Field{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - }, - }, - }, - - "legacy Int32 to int64": { - "value": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "zero": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt64Value{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt64Value{}, - WantTarget: &awsSingleInt64Value{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Value]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Value]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[int64]()), - }, - }, - }, - - "Int32 to *int64": { - "value": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[*int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "zero": { - Source: tfSingleInt32Field{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[*int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleInt32Field{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32Field](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32Field](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - }, - - "legacy Int32 to *int64": { - "value": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(42), - }, - Target: &awsSingleInt64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[*int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "zero": { - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(0), - }, - Target: &awsSingleInt64Pointer{}, - expectedDiags: diag.Diagnostics{ - diagExpandingIncompatibleTypes(reflect.TypeFor[types.Int32](), reflect.TypeFor[*int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - errorExpandingIncompatibleTypes("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - "null": { - // TODO: The test for a null value happens before type checking - Source: tfSingleInt32FieldLegacy{ - Field1: types.Int32Null(), - }, - Target: &awsSingleInt64Pointer{}, - WantTarget: &awsSingleInt64Pointer{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConverting(reflect.TypeFor[tfSingleInt32FieldLegacy](), reflect.TypeFor[*awsSingleInt64Pointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleInt32FieldLegacy](), "Field1", reflect.TypeFor[*awsSingleInt64Pointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Int32](), "Field1", reflect.TypeFor[*int64]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandString(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "String to string": { - "value": { - Source: tfSingleStringField{ - Field1: types.StringValue("value"), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "value", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "empty": { - Source: tfSingleStringField{ - Field1: types.StringValue(""), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "null": { - Source: tfSingleStringField{ - Field1: types.StringNull(), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - }, - - "legacy String to string": { - "value": { - Source: tfSingleStringFieldLegacy{ - Field1: types.StringValue("value"), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "value", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "empty": { - Source: tfSingleStringFieldLegacy{ - Field1: types.StringValue(""), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "null": { - Source: tfSingleStringFieldLegacy{ - Field1: types.StringNull(), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - }, - - "String to *string": { - "value": { - Source: tfSingleStringField{ - Field1: types.StringValue("value"), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{ - Field1: aws.String("value"), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - }, - }, - "empty": { - Source: tfSingleStringField{ - Field1: types.StringValue(""), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{ - Field1: aws.String(""), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - }, - }, - "null": { - Source: tfSingleStringField{ - Field1: types.StringNull(), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringField](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringField](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - }, - }, - }, - - "legacy String to *string": { - "value": { - Source: tfSingleStringFieldLegacy{ - Field1: types.StringValue("value"), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{ - Field1: aws.String("value"), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - }, - }, - "empty": { - Source: tfSingleStringFieldLegacy{ - Field1: types.StringValue(""), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - debugUsingLegacyExpander("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - }, - }, - "null": { - Source: tfSingleStringFieldLegacy{ - Field1: types.StringNull(), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldLegacy](), reflect.TypeFor[*awsSingleStringPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSingleStringFieldLegacy](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[*string]()), - // TODO: should log about legacy expander - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandSimpleSingleNestedBlock(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.String `tfsdk:"field1"` - Field2 types.Int64 `tfsdk:"field2"` - } - type aws01 struct { - Field1 *string - Field2 int64 - } - - type tf02 struct { - Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` - } - type aws02 struct { - Field1 *aws01 - } - type aws03 struct { - Field1 aws01 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested block pointer": { - Source: &tf02{Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{Field1: types.StringValue("a"), Field2: types.Int64Value(1)})}, - Target: &aws02{}, - WantTarget: &aws02{Field1: &aws01{Field1: aws.String("a"), Field2: 1}}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf02](), reflect.TypeFor[*aws02]()), - infoConverting(reflect.TypeFor[tf02](), reflect.TypeFor[*aws02]()), - traceMatchedFields("Field1", reflect.TypeFor[tf02](), "Field1", reflect.TypeFor[*aws02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]](), "Field1", reflect.TypeFor[*aws01]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[tf01](), "Field1", "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[types.String](), "Field1.Field1", reflect.TypeFor[*string]()), - traceMatchedFieldsWithPath("Field1", "Field2", reflect.TypeFor[tf01](), "Field1", "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1.Field2", reflect.TypeFor[types.Int64](), "Field1.Field2", reflect.TypeFor[int64]()), - }, - }, - "single nested block nil": { - Source: &tf02{Field1: fwtypes.NewObjectValueOfNull[tf01](ctx)}, - Target: &aws02{}, - WantTarget: &aws02{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf02](), reflect.TypeFor[*aws02]()), - infoConverting(reflect.TypeFor[tf02](), reflect.TypeFor[*aws02]()), - traceMatchedFields("Field1", reflect.TypeFor[tf02](), "Field1", reflect.TypeFor[*aws02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]](), "Field1", reflect.TypeFor[*aws01]()), - traceExpandingNullValue("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]](), "Field1", reflect.TypeFor[*aws01]()), - }, - }, - "single nested block value": { - Source: &tf02{Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{Field1: types.StringValue("a"), Field2: types.Int64Value(1)})}, - Target: &aws03{}, - WantTarget: &aws03{Field1: aws01{Field1: aws.String("a"), Field2: 1}}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf02](), reflect.TypeFor[*aws03]()), - infoConverting(reflect.TypeFor[tf02](), reflect.TypeFor[*aws03]()), - traceMatchedFields("Field1", reflect.TypeFor[tf02](), "Field1", reflect.TypeFor[*aws03]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]](), "Field1", reflect.TypeFor[aws01]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[tf01](), "Field1", "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[types.String](), "Field1.Field1", reflect.TypeFor[*string]()), - traceMatchedFieldsWithPath("Field1", "Field2", reflect.TypeFor[tf01](), "Field1", "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1.Field2", reflect.TypeFor[types.Int64](), "Field1.Field2", reflect.TypeFor[int64]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandComplexSingleNestedBlock(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Bool `tfsdk:"field1"` - Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` - } - type aws01 struct { - Field1 bool - Field2 []string - } - - type tf02 struct { - Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` - } - type aws02 struct { - Field1 *aws01 - } - - type tf03 struct { - Field1 fwtypes.ObjectValueOf[tf02] `tfsdk:"field1"` - } - type aws03 struct { - Field1 *aws02 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested block pointer": { - Source: &tf03{ - Field1: fwtypes.NewObjectValueOfMust[tf02]( - ctx, - &tf02{ - Field1: fwtypes.NewObjectValueOfMust[tf01]( - ctx, - &tf01{ - Field1: types.BoolValue(true), - Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{types.StringValue("a"), types.StringValue("b")}), - }, - ), - }, - ), - }, - Target: &aws03{}, - WantTarget: &aws03{ - Field1: &aws02{ - Field1: &aws01{ - Field1: true, - Field2: []string{"a", "b"}, - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf03](), reflect.TypeFor[*aws03]()), - infoConverting(reflect.TypeFor[tf03](), reflect.TypeFor[*aws03]()), - traceMatchedFields("Field1", reflect.TypeFor[tf03](), "Field1", reflect.TypeFor[*aws03]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf02]](), "Field1", reflect.TypeFor[*aws02]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[tf02](), "Field1", "Field1", reflect.TypeFor[*aws02]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]](), "Field1.Field1", reflect.TypeFor[*aws01]()), - traceMatchedFieldsWithPath("Field1.Field1", "Field1", reflect.TypeFor[tf01](), "Field1.Field1", "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1.Field1.Field1", reflect.TypeFor[types.Bool](), "Field1.Field1.Field1", reflect.TypeFor[bool]()), - traceMatchedFieldsWithPath("Field1.Field1", "Field2", reflect.TypeFor[tf01](), "Field1.Field1", "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1.Field1.Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]](), "Field1.Field1.Field2", reflect.TypeFor[[]string]()), - traceExpandingWithElementsAs("Field1.Field1.Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]](), 2, "Field1.Field1.Field2", reflect.TypeFor[[]string]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandStringEnum(t *testing.T) { - t.Parallel() - - var enum testEnum - enumList := testEnumList - - testCases := autoFlexTestCases{ - "valid value": { - Source: fwtypes.StringEnumValue(testEnumList), - Target: &enum, - WantTarget: &enumList, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.StringEnum[testEnum]](), reflect.TypeFor[*testEnum]()), - infoConverting(reflect.TypeFor[fwtypes.StringEnum[testEnum]](), reflect.TypeFor[testEnum]()), - }, - }, - "empty value": { - Source: fwtypes.StringEnumNull[testEnum](), - Target: &enum, - WantTarget: &enum, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.StringEnum[testEnum]](), reflect.TypeFor[*testEnum]()), - infoConverting(reflect.TypeFor[fwtypes.StringEnum[testEnum]](), reflect.TypeFor[testEnum]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "", reflect.TypeFor[testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandListOfInt64(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "valid value []int64": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]int64{}, - WantTarget: &[]int64{1, -1}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]int64]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 2, "", reflect.TypeFor[[]int64]()), - }, - }, - "empty value []int64": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{}), - Target: &[]int64{}, - WantTarget: &[]int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]int64]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 0, "", reflect.TypeFor[[]int64]()), - }, - }, - "null value []int64": { - Source: types.ListNull(types.Int64Type), - Target: &[]int64{}, - WantTarget: &[]int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]int64]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]int64]()), - traceExpandingNullValue("", reflect.TypeFor[types.List](), "", reflect.TypeFor[[]int64]()), - }, - }, - "valid value []*int64": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]*int64{}, - WantTarget: &[]*int64{aws.Int64(1), aws.Int64(-1)}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]*int64]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]*int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 2, "", reflect.TypeFor[[]*int64]()), - }, - }, - "empty value []*int64": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{}), - Target: &[]*int64{}, - WantTarget: &[]*int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]*int64]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]*int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 0, "", reflect.TypeFor[[]*int64]()), - }, - }, - "null value []*int64": { - Source: types.ListNull(types.Int64Type), - Target: &[]*int64{}, - WantTarget: &[]*int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]*int64]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]*int64]()), - traceExpandingNullValue("", reflect.TypeFor[types.List](), "", reflect.TypeFor[[]*int64]()), - }, - }, - "valid value []int32": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]int32{}, - WantTarget: &[]int32{1, -1}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]int32]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 2, "", reflect.TypeFor[[]int32]()), - }, - }, - "empty value []int32": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{}), - Target: &[]int32{}, - WantTarget: &[]int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]int32]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 0, "", reflect.TypeFor[[]int32]()), - }, - }, - "null value []int32": { - Source: types.ListNull(types.Int64Type), - Target: &[]int32{}, - WantTarget: &[]int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]int32]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]int32]()), - traceExpandingNullValue("", reflect.TypeFor[types.List](), "", reflect.TypeFor[[]int32]()), - }, - }, - "valid value []*int32": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]*int32{}, - WantTarget: &[]*int32{aws.Int32(1), aws.Int32(-1)}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]*int32]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]*int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 2, "", reflect.TypeFor[[]*int32]()), - }, - }, - "empty value []*int32": { - Source: types.ListValueMust(types.Int64Type, []attr.Value{}), - Target: &[]*int32{}, - WantTarget: &[]*int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]*int32]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]*int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 0, "", reflect.TypeFor[[]*int32]()), - }, - }, - "null value []*int32": { - Source: types.ListNull(types.Int64Type), - Target: &[]*int32{}, - WantTarget: &[]*int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]*int32]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]*int32]()), - traceExpandingNullValue("", reflect.TypeFor[types.List](), "", reflect.TypeFor[[]*int32]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandSetOfInt64(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "valid value []int64": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]int64{}, - WantTarget: &[]int64{1, -1}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]int64]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 2, "", reflect.TypeFor[[]int64]()), - }, - }, - "empty value []int64": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{}), - Target: &[]int64{}, - WantTarget: &[]int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]int64]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 0, "", reflect.TypeFor[[]int64]()), - }, - }, - "null value []int64": { - Source: types.SetNull(types.Int64Type), - Target: &[]int64{}, - WantTarget: &[]int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]int64]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]int64]()), - traceExpandingNullValue("", reflect.TypeFor[types.Set](), "", reflect.TypeFor[[]int64]()), - }, - }, - "valid value []*int64": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]*int64{}, - WantTarget: &[]*int64{aws.Int64(1), aws.Int64(-1)}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]*int64]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]*int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 2, "", reflect.TypeFor[[]*int64]()), - }, - }, - "empty value []*int64": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{}), - Target: &[]*int64{}, - WantTarget: &[]*int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]*int64]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]*int64]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 0, "", reflect.TypeFor[[]*int64]()), - }, - }, - "null value []*int64": { - Source: types.SetNull(types.Int64Type), - Target: &[]*int64{}, - WantTarget: &[]*int64{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]*int64]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]*int64]()), - traceExpandingNullValue("", reflect.TypeFor[types.Set](), "", reflect.TypeFor[[]*int64]()), - }, - }, - "valid value []int32": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]int32{}, - WantTarget: &[]int32{1, -1}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]int32]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 2, "", reflect.TypeFor[[]int32]()), - }, - }, - "empty value []int32": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{}), - Target: &[]int32{}, - WantTarget: &[]int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]int32]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 0, "", reflect.TypeFor[[]int32]()), - }, - }, - "null value []int32": { - Source: types.SetNull(types.Int64Type), - Target: &[]int32{}, - WantTarget: &[]int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]int32]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]int32]()), - traceExpandingNullValue("", reflect.TypeFor[types.Set](), "", reflect.TypeFor[[]int32]()), - }, - }, - "valid value []*int32": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{ - types.Int64Value(1), - types.Int64Value(-1), - }), - Target: &[]*int32{}, - WantTarget: &[]*int32{aws.Int32(1), aws.Int32(-1)}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]*int32]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]*int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 2, "", reflect.TypeFor[[]*int32]()), - }, - }, - "empty value []*int32": { - Source: types.SetValueMust(types.Int64Type, []attr.Value{}), - Target: &[]*int32{}, - WantTarget: &[]*int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]*int32]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]*int32]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 0, "", reflect.TypeFor[[]*int32]()), - }, - }, - "null value []*int32": { - Source: types.SetNull(types.Int64Type), - Target: &[]*int32{}, - WantTarget: &[]*int32{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]*int32]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]*int32]()), - traceExpandingNullValue("", reflect.TypeFor[types.Set](), "", reflect.TypeFor[[]*int32]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandListOfStringEnum(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "valid value": { - Source: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue(string(testEnumScalar)), - types.StringValue(string(testEnumList)), - }), - Target: &[]testEnum{}, - WantTarget: &[]testEnum{testEnumScalar, testEnumList}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]testEnum]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 2, "", reflect.TypeFor[[]testEnum]()), - }, - }, - "empty value": { - Source: types.ListValueMust(types.StringType, []attr.Value{}), - Target: &[]testEnum{}, - WantTarget: &[]testEnum{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]testEnum]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.List](), 0, "", reflect.TypeFor[[]testEnum]()), - }, - }, - "null value": { - Source: types.ListNull(types.StringType), - Target: &[]testEnum{}, - WantTarget: &[]testEnum{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.List](), reflect.TypeFor[*[]testEnum]()), - infoConverting(reflect.TypeFor[types.List](), reflect.TypeFor[[]testEnum]()), - traceExpandingNullValue("", reflect.TypeFor[types.List](), "", reflect.TypeFor[[]testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandSetOfStringEnum(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "valid value": { - Source: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue(string(testEnumScalar)), - types.StringValue(string(testEnumList)), - }), - Target: &[]testEnum{}, - WantTarget: &[]testEnum{testEnumScalar, testEnumList}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]testEnum]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 2, "", reflect.TypeFor[[]testEnum]()), - }, - }, - "empty value": { - Source: types.SetValueMust(types.StringType, []attr.Value{}), - Target: &[]testEnum{}, - WantTarget: &[]testEnum{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]testEnum]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("", reflect.TypeFor[types.Set](), 0, "", reflect.TypeFor[[]testEnum]()), - }, - }, - "null value": { - Source: types.SetNull(types.StringType), - Target: &[]testEnum{}, - WantTarget: &[]testEnum{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[types.Set](), reflect.TypeFor[*[]testEnum]()), - infoConverting(reflect.TypeFor[types.Set](), reflect.TypeFor[[]testEnum]()), - traceExpandingNullValue("", reflect.TypeFor[types.Set](), "", reflect.TypeFor[[]testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandStructListOfStringEnum(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "valid value": { - Source: &tfListOfStringEnum{ - Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ - fwtypes.StringEnumValue(testEnumScalar), - fwtypes.StringEnumValue(testEnumList), - }), - }, - Target: &awsSliceOfStringEnum{}, - WantTarget: &awsSliceOfStringEnum{ - Field1: []testEnum{testEnumScalar, testEnumList}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConverting(reflect.TypeFor[tfListOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfStringEnum](), "Field1", reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]](), 2, "Field1", reflect.TypeFor[[]testEnum]()), - }, - }, - "empty value": { - Source: &tfListOfStringEnum{ - Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), - }, - Target: &awsSliceOfStringEnum{}, - WantTarget: &awsSliceOfStringEnum{ - Field1: []testEnum{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConverting(reflect.TypeFor[tfListOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfStringEnum](), "Field1", reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]](), 0, "Field1", reflect.TypeFor[([]testEnum)]()), - }, - }, - "null value": { - Source: &tfListOfStringEnum{ - Field1: fwtypes.NewListValueOfNull[fwtypes.StringEnum[testEnum]](ctx), - }, - Target: &awsSliceOfStringEnum{}, - WantTarget: &awsSliceOfStringEnum{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConverting(reflect.TypeFor[tfListOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfStringEnum](), "Field1", reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - traceExpandingNullValue("Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandStructSetOfStringEnum(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "valid value": { - Source: &tfSetOfStringEnum{ - Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ - fwtypes.StringEnumValue(testEnumScalar), - fwtypes.StringEnumValue(testEnumList), - }), - }, - Target: &awsSliceOfStringEnum{}, - WantTarget: &awsSliceOfStringEnum{ - Field1: []testEnum{testEnumScalar, testEnumList}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConverting(reflect.TypeFor[tfSetOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfStringEnum](), "Field1", reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]](), 2, "Field1", reflect.TypeFor[[]testEnum]()), - }, - }, - "empty value": { - Source: &tfSetOfStringEnum{ - Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), - }, - Target: &awsSliceOfStringEnum{}, - WantTarget: &awsSliceOfStringEnum{ - Field1: []testEnum{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConverting(reflect.TypeFor[tfSetOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfStringEnum](), "Field1", reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - traceExpandingWithElementsAs("Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]](), 0, "Field1", reflect.TypeFor[([]testEnum)]()), - }, - }, - "null value": { - Source: &tfSetOfStringEnum{ - Field1: fwtypes.NewSetValueOfNull[fwtypes.StringEnum[testEnum]](ctx), - }, - Target: &awsSliceOfStringEnum{}, - WantTarget: &awsSliceOfStringEnum{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConverting(reflect.TypeFor[tfSetOfStringEnum](), reflect.TypeFor[*awsSliceOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfStringEnum](), "Field1", reflect.TypeFor[*awsSliceOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - traceExpandingNullValue("Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]](), "Field1", reflect.TypeFor[[]testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandTopLevelListOfNestedObject(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "valid value to []struct": { - Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - Target: &[]awsSingleStringValue{}, - WantTarget: &[]awsSingleStringValue{ - { - Field1: "value1", - }, - { - Field1: "value2", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 2, "", reflect.TypeFor[[]awsSingleStringValue]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[types.String](), "[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[1].Field1", reflect.TypeFor[types.String](), "[1].Field1", reflect.TypeFor[string]()), - }, - }, - "empty value to []struct": { - Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - Target: &[]awsSingleStringValue{}, - WantTarget: &[]awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 0, "", reflect.TypeFor[[]awsSingleStringValue]()), - }, - }, - "null value to []struct": { - Source: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - Target: &[]awsSingleStringValue{}, - WantTarget: &[]awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "", reflect.TypeFor[[]awsSingleStringValue]()), - }, - }, - - "valid value to []*struct": { - Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - Target: &[]*awsSingleStringValue{}, - WantTarget: &[]*awsSingleStringValue{ - { - Field1: "value1", - }, - { - Field1: "value2", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 2, "", reflect.TypeFor[[]*awsSingleStringValue]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[types.String](), "[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[1].Field1", reflect.TypeFor[types.String](), "[1].Field1", reflect.TypeFor[string]()), - }, - }, - "empty value to []*struct": { - Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - Target: &[]*awsSingleStringValue{}, - WantTarget: &[]*awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 0, "", reflect.TypeFor[[]*awsSingleStringValue]()), - }, - }, - "null value to []*struct": { - Source: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - Target: &[]*awsSingleStringValue{}, - WantTarget: &[]*awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "", reflect.TypeFor[[]*awsSingleStringValue]()), - }, - }, - - "single list value to single struct": { - Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - }), - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "value1", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[awsSingleStringValue]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "empty list value to single struct": { - Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[awsSingleStringValue]()), - }, - }, - "null value to single struct": { - Source: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[awsSingleStringValue]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "", reflect.TypeFor[awsSingleStringValue]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandSetOfNestedObject(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "valid value to []struct": { - Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - Target: &[]awsSingleStringValue{}, - WantTarget: &[]awsSingleStringValue{ - { - Field1: "value1", - }, - { - Field1: "value2", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 2, "", reflect.TypeFor[[]awsSingleStringValue]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[types.String](), "[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[1].Field1", reflect.TypeFor[types.String](), "[1].Field1", reflect.TypeFor[string]()), - }, - }, - "empty value to []struct": { - Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - Target: &[]awsSingleStringValue{}, - WantTarget: &[]awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 0, "", reflect.TypeFor[[]awsSingleStringValue]()), - }, - }, - "null value to []struct": { - Source: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), - Target: &[]awsSingleStringValue{}, - WantTarget: &[]awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "", reflect.TypeFor[[]awsSingleStringValue]()), - }, - }, - - "valid value to []*struct": { - Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - Target: &[]*awsSingleStringValue{}, - WantTarget: &[]*awsSingleStringValue{ - { - Field1: "value1", - }, - { - Field1: "value2", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 2, "", reflect.TypeFor[[]*awsSingleStringValue]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[types.String](), "[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[1].Field1", reflect.TypeFor[types.String](), "[1].Field1", reflect.TypeFor[string]()), - }, - }, - "empty value to []*struct": { - Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - Target: &[]*awsSingleStringValue{}, - WantTarget: &[]*awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 0, "", reflect.TypeFor[[]*awsSingleStringValue]()), - }, - }, - "null value to []*struct": { - Source: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), - Target: &[]*awsSingleStringValue{}, - WantTarget: &[]*awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*[]*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "", reflect.TypeFor[[]*awsSingleStringValue]()), - }, - }, - - "single set value to single struct": { - Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - }), - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{ - Field1: "value1", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[awsSingleStringValue]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - }, - }, - "empty set value to single struct": { - Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[awsSingleStringValue]()), - }, - }, - "null value to single struct": { - Source: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), reflect.TypeFor[awsSingleStringValue]()), - traceExpandingNullValue("", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "", reflect.TypeFor[awsSingleStringValue]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandSimpleNestedBlockWithStringEnum(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` - } - type aws01 struct { - Field1 int64 - Field2 testEnum - } - - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &tf01{ - Field1: types.Int64Value(1), - Field2: fwtypes.StringEnumValue(testEnumList), - }, - Target: &aws01{}, - WantTarget: &aws01{ - Field1: 1, - Field2: testEnumList, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf01](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf01](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf01](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - traceMatchedFields("Field2", reflect.TypeFor[tf01](), "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "Field2", reflect.TypeFor[testEnum]()), - }, - }, - "single nested null value": { - Source: &tf01{ - Field1: types.Int64Value(1), - Field2: fwtypes.StringEnumNull[testEnum](), - }, - Target: &aws01{}, - WantTarget: &aws01{ - Field1: 1, - Field2: "", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf01](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf01](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf01](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - traceMatchedFields("Field2", reflect.TypeFor[tf01](), "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "Field2", reflect.TypeFor[testEnum]()), - traceExpandingNullValue("Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "Field2", reflect.TypeFor[testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandComplexNestedBlockWithStringEnum(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` - } - type tf02 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` - } - type aws02 struct { - Field2 testEnum - } - type aws01 struct { - Field1 int64 - Field2 *aws02 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &tf02{ - Field1: types.Int64Value(1), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ - Field2: fwtypes.StringEnumValue(testEnumList), - }), - }, - Target: &aws01{}, - WantTarget: &aws01{ - Field1: 1, - Field2: &aws02{ - Field2: testEnumList, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf02](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf02](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf02](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - traceMatchedFields("Field2", reflect.TypeFor[tf02](), "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]](), "Field2", reflect.TypeFor[*aws02]()), - traceMatchedFieldsWithPath("Field2[0]", "Field2", reflect.TypeFor[tf01](), "Field2", "Field2", reflect.TypeFor[*aws02]()), - infoConvertingWithPath("Field2[0].Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "Field2.Field2", reflect.TypeFor[testEnum]()), - }, - }, - "single nested null value": { - Source: &tf02{ - Field1: types.Int64Value(1), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ - Field2: fwtypes.StringEnumNull[testEnum](), - }), - }, - Target: &aws01{}, - WantTarget: &aws01{ - Field1: 1, - Field2: &aws02{ - Field2: "", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf02](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf02](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf02](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Int64](), "Field1", reflect.TypeFor[int64]()), - traceMatchedFields("Field2", reflect.TypeFor[tf02](), "Field2", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]](), "Field2", reflect.TypeFor[*aws02]()), - traceMatchedFieldsWithPath("Field2[0]", "Field2", reflect.TypeFor[tf01](), "Field2", "Field2", reflect.TypeFor[*aws02]()), - infoConvertingWithPath("Field2[0].Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "Field2.Field2", reflect.TypeFor[testEnum]()), - traceExpandingNullValue("Field2[0].Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]](), "Field2.Field2", reflect.TypeFor[testEnum]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandListOfNestedObjectField(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "ListNestedObject to *struct": { - "value": { - Source: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - Target: &awsNestedObjectPointer{}, - WantTarget: &awsNestedObjectPointer{ - Field1: &awsSingleStringValue{ - Field1: "a", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfNestedObject](), reflect.TypeFor[*awsNestedObjectPointer]()), - infoConverting(reflect.TypeFor[tfListOfNestedObject](), reflect.TypeFor[*awsNestedObjectPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfNestedObject](), "Field1", reflect.TypeFor[*awsNestedObjectPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[types.String](), "Field1.Field1", reflect.TypeFor[string]()), - }, - }, - }, - - "ListNestedObject to []struct": { - "empty": { - Source: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - Target: &awsSliceOfNestedObjectValues{}, - WantTarget: &awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - infoConverting(reflect.TypeFor[tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 0, "Field1", reflect.TypeFor[[]awsSingleStringValue]()), - }, - }, - "values": { - Source: &tfListOfNestedObject{Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - })}, - Target: &awsSliceOfNestedObjectValues{}, - WantTarget: &awsSliceOfNestedObjectValues{Field1: []awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - infoConverting(reflect.TypeFor[tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 2, "Field1", reflect.TypeFor[[]awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[types.String](), "Field1[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[types.String](), "Field1[1].Field1", reflect.TypeFor[string]()), - }, - }, - }, - - "ListNestedObject to []*struct": { - "empty": { - Source: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - Target: &awsSliceOfNestedObjectPointers{}, - WantTarget: &awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConverting(reflect.TypeFor[tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 0, "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - }, - }, - "values": { - Source: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - Target: &awsSliceOfNestedObjectPointers{}, - WantTarget: &awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConverting(reflect.TypeFor[tfListOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), 2, "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[types.String](), "Field1[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[types.String](), "Field1[1].Field1", reflect.TypeFor[string]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} - -func TestExpandSetOfNestedObjectField(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "SetNestedObject to *struct": { - "value": { - Source: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - Target: &awsNestedObjectPointer{}, - WantTarget: &awsNestedObjectPointer{ - Field1: &awsSingleStringValue{ - Field1: "a", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfNestedObject](), reflect.TypeFor[*awsNestedObjectPointer]()), - infoConverting(reflect.TypeFor[tfSetOfNestedObject](), reflect.TypeFor[*awsNestedObjectPointer]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfNestedObject](), "Field1", reflect.TypeFor[*awsNestedObjectPointer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[types.String](), "Field1.Field1", reflect.TypeFor[string]()), - }, - }, - }, - - "SetNestedObject to []*struct": { - "empty": { - Source: &tfSetOfNestedObject{Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{})}, - Target: &awsSliceOfNestedObjectPointers{}, - WantTarget: &awsSliceOfNestedObjectPointers{Field1: []*awsSingleStringValue{}}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConverting(reflect.TypeFor[tfSetOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 0, "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - }, - }, - "values": { - Source: &tfSetOfNestedObject{Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - })}, - Target: &awsSliceOfNestedObjectPointers{}, - WantTarget: &awsSliceOfNestedObjectPointers{Field1: []*awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConverting(reflect.TypeFor[tfSetOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectPointers]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 2, "Field1", reflect.TypeFor[[]*awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[types.String](), "Field1[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[types.String](), "Field1[1].Field1", reflect.TypeFor[string]()), - }, - }, - }, - - "SetNestedObject to []struct": { - "values": { - Source: &tfSetOfNestedObject{Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - })}, - Target: &awsSliceOfNestedObjectValues{}, - WantTarget: &awsSliceOfNestedObjectValues{Field1: []awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfSetOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - infoConverting(reflect.TypeFor[tfSetOfNestedObject](), reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetOfNestedObject](), "Field1", reflect.TypeFor[*awsSliceOfNestedObjectValues]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[[]awsSingleStringValue]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]](), 2, "Field1", reflect.TypeFor[[]awsSingleStringValue]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[0]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[types.String](), "Field1[0].Field1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[tfSingleStringField](), "Field1[1]", "Field1", reflect.TypeFor[*awsSingleStringValue]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[types.String](), "Field1[1].Field1", reflect.TypeFor[string]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoExpandTestCases(t, cases) - }) - } -} -func TestExpandMapBlock(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "nil map block key": { - Source: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfNull[tfMapBlockElement](ctx), - }, - Target: &awsMapBlockValues{}, - WantTarget: &awsMapBlockValues{ - MapBlock: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockList](), reflect.TypeFor[*awsMapBlockValues]()), - infoConverting(reflect.TypeFor[tfMapBlockList](), reflect.TypeFor[*awsMapBlockValues]()), - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockList](), "MapBlock", reflect.TypeFor[*awsMapBlockValues]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - traceExpandingNullValue("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - }, - }, - "map block key list": { - Source: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - { - MapBlockKey: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, - }), - }, - Target: &awsMapBlockValues{}, - WantTarget: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - "y": { - Attr1: "c", - Attr2: "d", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockList](), reflect.TypeFor[*awsMapBlockValues]()), - infoConverting(reflect.TypeFor[tfMapBlockList](), reflect.TypeFor[*awsMapBlockValues]()), - - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockList](), "MapBlock", reflect.TypeFor[*awsMapBlockValues]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - - traceSkipMapBlockKey("MapBlock[0]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr2", reflect.TypeFor[string]()), - - traceSkipMapBlockKey("MapBlock[1]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr2", reflect.TypeFor[string]()), - }, - }, - "map block key set": { - Source: &tfMapBlockSet{ - MapBlock: fwtypes.NewSetNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - { - MapBlockKey: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, - }), - }, - Target: &awsMapBlockValues{}, - WantTarget: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - "y": { - Attr1: "c", - Attr2: "d", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockSet](), reflect.TypeFor[*awsMapBlockValues]()), - infoConverting(reflect.TypeFor[tfMapBlockSet](), reflect.TypeFor[*awsMapBlockValues]()), - - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockSet](), "MapBlock", reflect.TypeFor[*awsMapBlockValues]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfMapBlockElement]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - - traceSkipMapBlockKey("MapBlock[0]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr2", reflect.TypeFor[string]()), - - traceSkipMapBlockKey("MapBlock[1]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr2", reflect.TypeFor[string]()), - }, - }, - "map block key ptr source": { - Source: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - { - MapBlockKey: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, - }), - }, - Target: &awsMapBlockValues{}, - WantTarget: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - "y": { - Attr1: "c", - Attr2: "d", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockList](), reflect.TypeFor[*awsMapBlockValues]()), - infoConverting(reflect.TypeFor[tfMapBlockList](), reflect.TypeFor[*awsMapBlockValues]()), - - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockList](), "MapBlock", reflect.TypeFor[*awsMapBlockValues]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - - traceSkipMapBlockKey("MapBlock[0]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr2", reflect.TypeFor[string]()), - - traceSkipMapBlockKey("MapBlock[1]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr2", reflect.TypeFor[string]()), - }, - }, - "map block key ptr both": { - Source: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - { - MapBlockKey: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, - }), - }, - Target: &awsMapBlockPointers{}, - WantTarget: &awsMapBlockPointers{ - MapBlock: map[string]*awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - "y": { - Attr1: "c", - Attr2: "d", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockList](), reflect.TypeFor[*awsMapBlockPointers]()), - infoConverting(reflect.TypeFor[tfMapBlockList](), reflect.TypeFor[*awsMapBlockPointers]()), - - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockList](), "MapBlock", reflect.TypeFor[*awsMapBlockPointers]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]](), "MapBlock", reflect.TypeFor[map[string]*awsMapBlockElement]()), - - traceSkipMapBlockKey("MapBlock[0]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"x\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"x\"].Attr2", reflect.TypeFor[string]()), - - traceSkipMapBlockKey("MapBlock[1]", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr1", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr2", reflect.TypeFor[tfMapBlockElement](), "MapBlock[\"y\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"y\"].Attr2", reflect.TypeFor[string]()), - }, - }, - "map block enum key": { - Source: &tfMapBlockListEnumKey{ - MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElementEnumKey](ctx, []tfMapBlockElementEnumKey{ - { - MapBlockKey: fwtypes.StringEnumValue(testEnumList), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - { - MapBlockKey: fwtypes.StringEnumValue(testEnumScalar), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, - }), - }, - Target: &awsMapBlockValues{}, - WantTarget: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - string(testEnumList): { - Attr1: "a", - Attr2: "b", - }, - string(testEnumScalar): { - Attr1: "c", - Attr2: "d", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockListEnumKey](), reflect.TypeFor[*awsMapBlockValues]()), - infoConverting(reflect.TypeFor[tfMapBlockListEnumKey](), reflect.TypeFor[*awsMapBlockValues]()), - - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockListEnumKey](), "MapBlock", reflect.TypeFor[*awsMapBlockValues]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElementEnumKey]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - - traceSkipMapBlockKey("MapBlock[0]", reflect.TypeFor[tfMapBlockElementEnumKey](), "MapBlock[\"List\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr1", reflect.TypeFor[tfMapBlockElementEnumKey](), "MapBlock[\"List\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"List\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[0]", "Attr2", reflect.TypeFor[tfMapBlockElementEnumKey](), "MapBlock[\"List\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[0].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"List\"].Attr2", reflect.TypeFor[string]()), - - traceSkipMapBlockKey("MapBlock[1]", reflect.TypeFor[tfMapBlockElementEnumKey](), "MapBlock[\"Scalar\"]", reflect.TypeFor[*awsMapBlockElement]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr1", reflect.TypeFor[tfMapBlockElementEnumKey](), "MapBlock[\"Scalar\"]", "Attr1", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr1", reflect.TypeFor[types.String](), "MapBlock[\"Scalar\"].Attr1", reflect.TypeFor[string]()), - traceMatchedFieldsWithPath("MapBlock[1]", "Attr2", reflect.TypeFor[tfMapBlockElementEnumKey](), "MapBlock[\"Scalar\"]", "Attr2", reflect.TypeFor[*awsMapBlockElement]()), - infoConvertingWithPath("MapBlock[1].Attr2", reflect.TypeFor[types.String](), "MapBlock[\"Scalar\"].Attr2", reflect.TypeFor[string]()), - }, - }, - - "map block list no key": { - Source: &tfMapBlockListNoKey{ - MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElementNoKey](ctx, []tfMapBlockElementNoKey{ - { - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - { - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, - }), - }, - Target: &awsMapBlockValues{}, - expectedDiags: diag.Diagnostics{ - diagExpandingNoMapBlockKey(reflect.TypeFor[tfMapBlockElementNoKey]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfMapBlockListNoKey](), reflect.TypeFor[*awsMapBlockValues]()), - infoConverting(reflect.TypeFor[tfMapBlockListNoKey](), reflect.TypeFor[*awsMapBlockValues]()), - traceMatchedFields("MapBlock", reflect.TypeFor[tfMapBlockListNoKey](), "MapBlock", reflect.TypeFor[*awsMapBlockValues]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElementNoKey]](), "MapBlock", reflect.TypeFor[map[string]awsMapBlockElement]()), - errorSourceHasNoMapBlockKey("MapBlock[0]", reflect.TypeFor[tfMapBlockElementNoKey](), "MapBlock", reflect.TypeFor[awsMapBlockElement]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandOptions(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Bool `tfsdk:"field1"` - Tags fwtypes.MapValueOf[types.String] `tfsdk:"tags"` - } - type aws01 struct { - Field1 bool - Tags map[string]string - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "empty source with tags": { - Source: &tf01{}, - Target: &aws01{}, - WantTarget: &aws01{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf01](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf01](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf01](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - traceExpandingNullValue("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - traceSkipIgnoredSourceField(reflect.TypeFor[tf01](), "Tags", reflect.TypeFor[*aws01]()), - }, - }, - "ignore tags by default": { - Source: &tf01{ - Field1: types.BoolValue(true), - Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "foo": types.StringValue("bar"), - }, - ), - }, - Target: &aws01{}, - WantTarget: &aws01{Field1: true}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf01](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf01](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf01](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - traceSkipIgnoredSourceField(reflect.TypeFor[tf01](), "Tags", reflect.TypeFor[*aws01]()), - }, - }, - "include tags with option override": { - Options: []AutoFlexOptionsFunc{WithNoIgnoredFieldNames()}, - Source: &tf01{ - Field1: types.BoolValue(true), - Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "foo": types.StringValue("bar"), - }, - ), - }, - Target: &aws01{}, - WantTarget: &aws01{ - Field1: true, - Tags: map[string]string{"foo": "bar"}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf01](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf01](), reflect.TypeFor[*aws01]()), - traceMatchedFields("Field1", reflect.TypeFor[tf01](), "Field1", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.Bool](), "Field1", reflect.TypeFor[bool]()), - traceMatchedFields("Tags", reflect.TypeFor[tf01](), "Tags", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), "Tags", reflect.TypeFor[map[string]string]()), - traceExpandingWithElementsAs("Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), 1, "Tags", reflect.TypeFor[map[string]string]()), - }, - }, - "ignore custom field": { - Options: []AutoFlexOptionsFunc{WithIgnoredFieldNames([]string{"Field1"})}, - Source: &tf01{ - Field1: types.BoolValue(true), - Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "foo": types.StringValue("bar"), - }, - ), - }, - Target: &aws01{}, - WantTarget: &aws01{ - Tags: map[string]string{"foo": "bar"}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tf01](), reflect.TypeFor[*aws01]()), - infoConverting(reflect.TypeFor[tf01](), reflect.TypeFor[*aws01]()), - traceSkipIgnoredSourceField(reflect.TypeFor[tf01](), "Field1", reflect.TypeFor[*aws01]()), - traceMatchedFields("Tags", reflect.TypeFor[tf01](), "Tags", reflect.TypeFor[*aws01]()), - infoConvertingWithPath("Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), "Tags", reflect.TypeFor[map[string]string]()), - traceExpandingWithElementsAs("Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]](), 1, "Tags", reflect.TypeFor[map[string]string]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandIgnoreStructTag(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "to value": { - Source: tfSingleStringFieldIgnore{ - Field1: types.StringValue("value1"), - }, - Target: &awsSingleStringValue{}, - WantTarget: &awsSingleStringValue{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldIgnore](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldIgnore](), reflect.TypeFor[*awsSingleStringValue]()), - traceSkipIgnoredSourceField(reflect.TypeFor[tfSingleStringFieldIgnore](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - }, - }, - "to pointer": { - Source: tfSingleStringFieldIgnore{ - Field1: types.StringValue("value1"), - }, - Target: &awsSingleStringPointer{}, - WantTarget: &awsSingleStringPointer{}, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSingleStringFieldIgnore](), reflect.TypeFor[*awsSingleStringPointer]()), - infoConverting(reflect.TypeFor[tfSingleStringFieldIgnore](), reflect.TypeFor[*awsSingleStringPointer]()), - traceSkipIgnoredSourceField(reflect.TypeFor[tfSingleStringFieldIgnore](), "Field1", reflect.TypeFor[*awsSingleStringPointer]()), - }, - }, - } - - runAutoExpandTestCases(t, testCases) -} - -func TestExpandInterface(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - var targetInterface awsInterfaceInterface - - testCases := autoFlexTestCases{ - "top level": { - Source: tfInterfaceFlexer{ - Field1: types.StringValue("value1"), - }, - Target: &targetInterface, - WantTarget: testFlexAWSInterfaceInterfacePtr(&awsInterfaceInterfaceImpl{ - AWSField: "value1", - }), - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfInterfaceFlexer](), reflect.TypeFor[*awsInterfaceInterface]()), - infoConverting(reflect.TypeFor[tfInterfaceFlexer](), reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfInterfaceFlexer](), "", reflect.TypeFor[awsInterfaceInterface]()), - }, - }, - "top level return value does not implement target interface": { - Source: tfInterfaceIncompatibleExpander{ - Field1: types.StringValue("value1"), - }, - Target: &targetInterface, - expectedDiags: diag.Diagnostics{ - diagExpandedTypeDoesNotImplement(reflect.TypeFor[*awsInterfaceIncompatibleImpl](), reflect.TypeFor[awsInterfaceInterface]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfInterfaceIncompatibleExpander](), reflect.TypeFor[*awsInterfaceInterface]()), - infoConverting(reflect.TypeFor[tfInterfaceIncompatibleExpander](), reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfInterfaceIncompatibleExpander](), "", reflect.TypeFor[awsInterfaceInterface]()), - }, - }, - "single list Source and single interface Target": { - Source: tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfInterfaceFlexer](), "Field1", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "single list non-Expander Source and single interface Target": { - Source: tfListNestedObject[tfSingleStringField]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfSingleStringField]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfSingleStringField]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfSingleStringField]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - { - "@level": "error", - "@module": "provider.autoflex", - "@message": "AutoFlex Expand; incompatible types", - "from": map[string]any{}, - "to": float64(reflect.Interface), - logAttrKeySourcePath: "Field1[0]", - logAttrKeySourceType: fullTypeName(reflect.TypeFor[tfSingleStringField]()), - logAttrKeyTargetPath: "Field1", - logAttrKeyTargetType: fullTypeName(reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - }, - "single set Source and single interface Target": { - Source: tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfInterfaceFlexer](), "Field1", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "empty list Source and empty interface Target": { - Source: tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]](), 0, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - }, - }, - "non-empty list Source and non-empty interface Target": { - Source: tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{ - &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - &awsInterfaceInterfaceImpl{ - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]](), 2, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfInterfaceFlexer](), "Field1[0]", reflect.TypeFor[*awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1[1]", reflect.TypeFor[tfInterfaceFlexer](), "Field1[1]", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "empty set Source and empty interface Target": { - Source: tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]](), 0, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - }, - }, - "non-empty set Source and non-empty interface Target": { - Source: tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{ - &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - &awsInterfaceInterfaceImpl{ - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]](), 2, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfInterfaceFlexer](), "Field1[0]", reflect.TypeFor[*awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1[1]", reflect.TypeFor[tfInterfaceFlexer](), "Field1[1]", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "object value Source and struct Target": { - Source: tfObjectValue[tfInterfaceFlexer]{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfInterfaceFlexer{ - Field1: types.StringValue("value1"), - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfObjectValue[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfObjectValue[tfInterfaceFlexer]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfObjectValue[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfInterfaceFlexer]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexExpander("Field1", reflect.TypeFor[tfInterfaceFlexer](), "Field1", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func testFlexAWSInterfaceInterfacePtr(v awsInterfaceInterface) *awsInterfaceInterface { // nosemgrep:ci.aws-in-func-name - return &v -} - -func TestExpandExpander(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "top level struct Target": { - Source: tfFlexer{ - Field1: types.StringValue("value1"), - }, - Target: &awsExpander{}, - WantTarget: &awsExpander{ - AWSField: "value1", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfFlexer](), reflect.TypeFor[*awsExpander]()), - infoConverting(reflect.TypeFor[tfFlexer](), reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfFlexer](), "", reflect.TypeFor[*awsExpander]()), - }, - }, - "top level string Target": { - Source: tfExpanderToString{ - Field1: types.StringValue("value1"), - }, - Target: aws.String(""), - WantTarget: aws.String("value1"), - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderToString](), reflect.TypeFor[*string]()), - infoConverting(reflect.TypeFor[tfExpanderToString](), reflect.TypeFor[string]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfExpanderToString](), "", reflect.TypeFor[string]()), - }, - }, - "top level incompatible struct Target": { - Source: tfFlexer{ - Field1: types.StringValue("value1"), - }, - Target: &awsExpanderIncompatible{}, - expectedDiags: diag.Diagnostics{ - diagCannotBeAssigned(reflect.TypeFor[awsExpander](), reflect.TypeFor[awsExpanderIncompatible]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfFlexer](), reflect.TypeFor[*awsExpanderIncompatible]()), - infoConverting(reflect.TypeFor[tfFlexer](), reflect.TypeFor[*awsExpanderIncompatible]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfFlexer](), "", reflect.TypeFor[*awsExpanderIncompatible]()), - }, - }, - "top level expands to nil": { - Source: tfExpanderToNil{ - Field1: types.StringValue("value1"), - }, - Target: &awsExpander{}, - expectedDiags: diag.Diagnostics{ - diagExpandsToNil(reflect.TypeFor[tfExpanderToNil]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderToNil](), reflect.TypeFor[*awsExpander]()), - infoConverting(reflect.TypeFor[tfExpanderToNil](), reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfExpanderToNil](), "", reflect.TypeFor[*awsExpander]()), - }, - }, - "top level incompatible non-struct Target": { - Source: tfExpanderToString{ - Field1: types.StringValue("value1"), - }, - Target: aws.Int64(0), - expectedDiags: diag.Diagnostics{ - diagCannotBeAssigned(reflect.TypeFor[string](), reflect.TypeFor[int64]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderToString](), reflect.TypeFor[*int64]()), - infoConverting(reflect.TypeFor[tfExpanderToString](), reflect.TypeFor[int64]()), - infoSourceImplementsFlexExpander("", reflect.TypeFor[tfExpanderToString](), "", reflect.TypeFor[int64]()), - }, - }, - "single list Source and single struct Target": { - Source: tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSingleStruct{}, - WantTarget: &awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConverting(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSingleStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "single set Source and single struct Target": { - Source: tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSingleStruct{}, - WantTarget: &awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConverting(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderSingleStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "single list Source and single *struct Target": { - Source: tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSinglePtr{}, - WantTarget: &awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConverting(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "single set Source and single *struct Target": { - Source: tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSinglePtr{}, - WantTarget: &awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConverting(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty list Source and empty struct Target": { - Source: tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), 0, "Field1", reflect.TypeFor[[]awsExpander]()), - }, - }, - "non-empty list Source and non-empty struct Target": { - Source: tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), 2, "Field1", reflect.TypeFor[[]awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[1]", reflect.TypeFor[tfFlexer](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty list Source and empty *struct Target": { - Source: tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), 0, "Field1", reflect.TypeFor[[]*awsExpander]()), - }, - }, - "non-empty list Source and non-empty *struct Target": { - Source: tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]](), 2, "Field1", reflect.TypeFor[[]*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[1]", reflect.TypeFor[tfFlexer](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty set Source and empty struct Target": { - Source: tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), 0, "Field1", reflect.TypeFor[[]awsExpander]()), - }, - }, - "non-empty set Source and non-empty struct Target": { - Source: tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), 2, "Field1", reflect.TypeFor[[]awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[1]", reflect.TypeFor[tfFlexer](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty set Source and empty *struct Target": { - Source: tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), 0, "Field1", reflect.TypeFor[[]*awsExpander]()), - }, - }, - "non-empty set Source and non-empty *struct Target": { - Source: tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]](), 2, "Field1", reflect.TypeFor[[]*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[0]", reflect.TypeFor[tfFlexer](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1[1]", reflect.TypeFor[tfFlexer](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "object value Source and struct Target": { - Source: tfExpanderObjectValue{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ - Field1: types.StringValue("value1"), - }), - }, - Target: &awsExpanderSingleStruct{}, - WantTarget: &awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderObjectValue](), reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConverting(reflect.TypeFor[tfExpanderObjectValue](), reflect.TypeFor[*awsExpanderSingleStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderObjectValue](), "Field1", reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[awsExpander]()), - infoSourceImplementsFlexExpander("Field1", reflect.TypeFor[tfFlexer](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "object value Source and *struct Target": { - Source: tfExpanderObjectValue{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ - Field1: types.StringValue("value1"), - }), - }, - Target: &awsExpanderSinglePtr{}, - WantTarget: &awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfExpanderObjectValue](), reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConverting(reflect.TypeFor[tfExpanderObjectValue](), reflect.TypeFor[*awsExpanderSinglePtr]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExpanderObjectValue](), "Field1", reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfFlexer]](), "Field1", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexExpander("Field1", reflect.TypeFor[tfFlexer](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandInterfaceTypedExpander(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - var targetInterface awsInterfaceInterface - - testCases := autoFlexTestCases{ - "top level": { - Source: tfInterfaceTypedExpander{ - Field1: types.StringValue("value1"), - }, - Target: &targetInterface, - WantTarget: testFlexAWSInterfaceInterfacePtr(&awsInterfaceInterfaceImpl{ - AWSField: "value1", - }), - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfInterfaceTypedExpander](), reflect.TypeFor[*awsInterfaceInterface]()), - infoConverting(reflect.TypeFor[tfInterfaceTypedExpander](), reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("", reflect.TypeFor[tfInterfaceTypedExpander](), "", reflect.TypeFor[awsInterfaceInterface]()), - }, - }, - "top level return value does not implement target interface": { - Source: tfInterfaceIncompatibleTypedExpander{ - Field1: types.StringValue("value1"), - }, - Target: &targetInterface, - expectedDiags: diag.Diagnostics{ - diagExpandedTypeDoesNotImplement(reflect.TypeFor[*awsInterfaceIncompatibleImpl](), reflect.TypeFor[awsInterfaceInterface]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfInterfaceIncompatibleTypedExpander](), reflect.TypeFor[*awsInterfaceInterface]()), - infoConverting(reflect.TypeFor[tfInterfaceIncompatibleTypedExpander](), reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("", reflect.TypeFor[tfInterfaceIncompatibleTypedExpander](), "", reflect.TypeFor[awsInterfaceInterface]()), - }, - }, - "single list Source and single interface Target": { - Source: tfListNestedObject[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "single list non-Expander Source and single interface Target": { - Source: tfListNestedObject[tfSingleStringField]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: nil, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfSingleStringField]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfSingleStringField]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfSingleStringField]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - { - "@level": "error", - "@module": "provider.autoflex", - "@message": "AutoFlex Expand; incompatible types", - "from": map[string]any{}, - "to": float64(reflect.Interface), - logAttrKeySourcePath: "Field1[0]", - logAttrKeySourceType: fullTypeName(reflect.TypeFor[tfSingleStringField]()), - logAttrKeyTargetPath: "Field1", - logAttrKeyTargetType: fullTypeName(reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - }, - "single set Source and single interface Target": { - Source: tfSetNestedObject[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "empty list Source and empty interface Target": { - Source: tfListNestedObject[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{}), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceTypedExpander]](), 0, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - }, - }, - "non-empty list Source and non-empty interface Target": { - Source: tfListNestedObject[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{ - &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - &awsInterfaceInterfaceImpl{ - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfListNestedObject[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceTypedExpander]](), 2, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1[0]", reflect.TypeFor[*awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1[1]", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1[1]", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "empty set Source and empty interface Target": { - Source: tfSetNestedObject[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{}), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceTypedExpander]](), 0, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - }, - }, - "non-empty set Source and non-empty interface Target": { - Source: tfSetNestedObject[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsInterfaceSlice{}, - WantTarget: &awsInterfaceSlice{ - Field1: []awsInterfaceInterface{ - &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - &awsInterfaceInterfaceImpl{ - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceTypedExpander]](), 2, "Field1", reflect.TypeFor[[]awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1[0]", reflect.TypeFor[*awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1[1]", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1[1]", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - "object value Source and struct Target": { - Source: tfObjectValue[tfInterfaceTypedExpander]{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfInterfaceTypedExpander{ - Field1: types.StringValue("value1"), - }), - }, - Target: &awsInterfaceSingle{}, - WantTarget: &awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfObjectValue[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSingle]()), - infoConverting(reflect.TypeFor[tfObjectValue[tfInterfaceTypedExpander]](), reflect.TypeFor[*awsInterfaceSingle]()), - traceMatchedFields("Field1", reflect.TypeFor[tfObjectValue[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[*awsInterfaceSingle]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfInterfaceTypedExpander]](), "Field1", reflect.TypeFor[awsInterfaceInterface]()), - infoSourceImplementsFlexTypedExpander("Field1", reflect.TypeFor[tfInterfaceTypedExpander](), "Field1", reflect.TypeFor[*awsInterfaceInterface]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -func TestExpandTypedExpander(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "top level struct Target": { - Source: tfTypedExpander{ - Field1: types.StringValue("value1"), - }, - Target: &awsExpander{}, - WantTarget: &awsExpander{ - AWSField: "value1", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpander](), reflect.TypeFor[*awsExpander]()), - infoConverting(reflect.TypeFor[tfTypedExpander](), reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("", reflect.TypeFor[tfTypedExpander](), "", reflect.TypeFor[*awsExpander]()), - }, - }, - "top level incompatible struct Target": { - Source: tfTypedExpander{ - Field1: types.StringValue("value1"), - }, - Target: &awsExpanderIncompatible{}, - expectedDiags: diag.Diagnostics{ - diagCannotBeAssigned(reflect.TypeFor[awsExpander](), reflect.TypeFor[awsExpanderIncompatible]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpander](), reflect.TypeFor[*awsExpanderIncompatible]()), - infoConverting(reflect.TypeFor[tfTypedExpander](), reflect.TypeFor[*awsExpanderIncompatible]()), - infoSourceImplementsFlexTypedExpander("", reflect.TypeFor[tfTypedExpander](), "", reflect.TypeFor[*awsExpanderIncompatible]()), - }, - }, - "top level expands to nil": { - Source: tfTypedExpanderToNil{ - Field1: types.StringValue("value1"), - }, - Target: &awsExpander{}, - expectedDiags: diag.Diagnostics{ - diagExpandsToNil(reflect.TypeFor[tfTypedExpanderToNil]()), - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderToNil](), reflect.TypeFor[*awsExpander]()), - infoConverting(reflect.TypeFor[tfTypedExpanderToNil](), reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("", reflect.TypeFor[tfTypedExpanderToNil](), "", reflect.TypeFor[*awsExpander]()), - }, - }, - "single list Source and single struct Target": { - Source: tfTypedExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSingleStruct{}, - WantTarget: &awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConverting(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSingleStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "single set Source and single struct Target": { - Source: tfSetNestedObject[tfTypedExpander]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSingleStruct{}, - WantTarget: &awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfSetNestedObject[tfTypedExpander]](), reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConverting(reflect.TypeFor[tfSetNestedObject[tfTypedExpander]](), reflect.TypeFor[*awsExpanderSingleStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfSetNestedObject[tfTypedExpander]](), "Field1", reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "single list Source and single *struct Target": { - Source: tfTypedExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSinglePtr{}, - WantTarget: &awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConverting(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "single set Source and single *struct Target": { - Source: tfTypedExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - Target: &awsExpanderSinglePtr{}, - WantTarget: &awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConverting(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderSinglePtr]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty list Source and empty struct Target": { - Source: tfTypedExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), 0, "Field1", reflect.TypeFor[[]awsExpander]()), - }, - }, - "non-empty list Source and non-empty struct Target": { - Source: tfTypedExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), 2, "Field1", reflect.TypeFor[[]awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[1]", reflect.TypeFor[tfTypedExpander](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty list Source and empty *struct Target": { - Source: tfTypedExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), 0, "Field1", reflect.TypeFor[[]*awsExpander]()), - }, - }, - "non-empty list Source and non-empty *struct Target": { - Source: tfTypedExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderListNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderListNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfTypedExpander]](), 2, "Field1", reflect.TypeFor[[]*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[1]", reflect.TypeFor[tfTypedExpander](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty set Source and empty struct Target": { - Source: tfTypedExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), 0, "Field1", reflect.TypeFor[[]awsExpander]()), - }, - }, - "non-empty set Source and non-empty struct Target": { - Source: tfTypedExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderStructSlice{}, - WantTarget: &awsExpanderStructSlice{ - Field1: []awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderStructSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderStructSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), 2, "Field1", reflect.TypeFor[[]awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[1]", reflect.TypeFor[tfTypedExpander](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "empty set Source and empty *struct Target": { - Source: tfTypedExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{}, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), 0, "Field1", reflect.TypeFor[[]*awsExpander]()), - }, - }, - "non-empty set Source and non-empty *struct Target": { - Source: tfTypedExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - Target: &awsExpanderPtrSlice{}, - WantTarget: &awsExpanderPtrSlice{ - Field1: []*awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConverting(reflect.TypeFor[tfTypedExpanderSetNestedObject](), reflect.TypeFor[*awsExpanderPtrSlice]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderSetNestedObject](), "Field1", reflect.TypeFor[*awsExpanderPtrSlice]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[[]*awsExpander]()), - traceExpandingNestedObjectCollection("Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfTypedExpander]](), 2, "Field1", reflect.TypeFor[[]*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[0]", reflect.TypeFor[tfTypedExpander](), "Field1[0]", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1[1]", reflect.TypeFor[tfTypedExpander](), "Field1[1]", reflect.TypeFor[*awsExpander]()), - }, - }, - "object value Source and struct Target": { - Source: tfTypedExpanderObjectValue{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfTypedExpander{ - Field1: types.StringValue("value1"), - }), - }, - Target: &awsExpanderSingleStruct{}, - WantTarget: &awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderObjectValue](), reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConverting(reflect.TypeFor[tfTypedExpanderObjectValue](), reflect.TypeFor[*awsExpanderSingleStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderObjectValue](), "Field1", reflect.TypeFor[*awsExpanderSingleStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1", reflect.TypeFor[tfTypedExpander](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - "object value Source and *struct Target": { - Source: tfTypedExpanderObjectValue{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfTypedExpander{ - Field1: types.StringValue("value1"), - }), - }, - Target: &awsExpanderSinglePtr{}, - WantTarget: &awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[tfTypedExpanderObjectValue](), reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConverting(reflect.TypeFor[tfTypedExpanderObjectValue](), reflect.TypeFor[*awsExpanderSinglePtr]()), - traceMatchedFields("Field1", reflect.TypeFor[tfTypedExpanderObjectValue](), "Field1", reflect.TypeFor[*awsExpanderSinglePtr]()), - infoConvertingWithPath("Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfTypedExpander]](), "Field1", reflect.TypeFor[*awsExpander]()), - infoSourceImplementsFlexTypedExpander("Field1", reflect.TypeFor[tfTypedExpander](), "Field1", reflect.TypeFor[*awsExpander]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -type TFExportedStruct struct { - Field1 types.String `tfsdk:"field1"` -} - -type tfExportedEmbeddedStruct struct { - TFExportedStruct - Field2 types.String `tfsdk:"field2"` -} - -type tfUnexportedEmbeddedStruct struct { - tfSingleStringField - Field2 types.String `tfsdk:"field2"` -} - -type awsEmbeddedStruct struct { - Field1 string - Field2 string -} - -func TestExpandEmbeddedStruct(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "exported": { - Source: &tfExportedEmbeddedStruct{ - TFExportedStruct: TFExportedStruct{ - Field1: types.StringValue("a"), - }, - Field2: types.StringValue("b"), - }, - Target: &awsEmbeddedStruct{}, - WantTarget: &awsEmbeddedStruct{ - Field1: "a", - Field2: "b", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfExportedEmbeddedStruct](), reflect.TypeFor[*awsEmbeddedStruct]()), - infoConverting(reflect.TypeFor[tfExportedEmbeddedStruct](), reflect.TypeFor[*awsEmbeddedStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfExportedEmbeddedStruct](), "Field1", reflect.TypeFor[*awsEmbeddedStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - traceMatchedFields("Field2", reflect.TypeFor[tfExportedEmbeddedStruct](), "Field2", reflect.TypeFor[*awsEmbeddedStruct]()), - infoConvertingWithPath("Field2", reflect.TypeFor[types.String](), "Field2", reflect.TypeFor[string]()), - }, - }, - "unexported": { - Source: &tfUnexportedEmbeddedStruct{ - tfSingleStringField: tfSingleStringField{ - Field1: types.StringValue("a"), - }, - Field2: types.StringValue("b"), - }, - Target: &awsEmbeddedStruct{}, - WantTarget: &awsEmbeddedStruct{ - Field1: "a", - Field2: "b", - }, - expectedLogLines: []map[string]any{ - infoExpanding(reflect.TypeFor[*tfUnexportedEmbeddedStruct](), reflect.TypeFor[*awsEmbeddedStruct]()), - infoConverting(reflect.TypeFor[tfUnexportedEmbeddedStruct](), reflect.TypeFor[*awsEmbeddedStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[tfUnexportedEmbeddedStruct](), "Field1", reflect.TypeFor[*awsEmbeddedStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[types.String](), "Field1", reflect.TypeFor[string]()), - traceMatchedFields("Field2", reflect.TypeFor[tfUnexportedEmbeddedStruct](), "Field2", reflect.TypeFor[*awsEmbeddedStruct]()), - infoConvertingWithPath("Field2", reflect.TypeFor[types.String](), "Field2", reflect.TypeFor[string]()), - }, - }, - } - runAutoExpandTestCases(t, testCases) -} - -type autoFlexTestCase struct { - Options []AutoFlexOptionsFunc - Source any - Target any - expectedDiags diag.Diagnostics - expectedLogLines []map[string]any - WantTarget any - WantDiff bool -} - -type autoFlexTestCases map[string]autoFlexTestCase - -func runAutoExpandTestCases(t *testing.T, testCases autoFlexTestCases) { - t.Helper() - - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - var buf bytes.Buffer - ctx = tflogtest.RootLogger(ctx, &buf) - - ctx = registerTestingLogger(ctx) - - diags := Expand(ctx, testCase.Source, testCase.Target, testCase.Options...) - - if diff := cmp.Diff(diags, testCase.expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - lines, err := tflogtest.MultilineJSONDecode(&buf) - if err != nil { - t.Fatalf("Expand: decoding log lines: %s", err) - } - if diff := cmp.Diff(lines, testCase.expectedLogLines); diff != "" { - t.Errorf("unexpected log lines diff (+wanted, -got): %s", diff) - } - - if !diags.HasError() { - if diff := cmp.Diff(testCase.Target, testCase.WantTarget); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } - } - }) - } -} diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go deleted file mode 100644 index 3033900e9269..000000000000 --- a/internal/framework/flex/auto_flatten_test.go +++ /dev/null @@ -1,5512 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package flex - -import ( - "bytes" - "context" - "fmt" - "reflect" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-log/tflogtest" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" - fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" -) - -func TestFlatten(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - var ( - typedNilSource *emptyStruct - typedNilTarget *emptyStruct - ) - - testString := "test" - - testARN := "arn:aws:securityhub:us-west-2:1234567890:control/cis-aws-foundations-benchmark/v/1.2.0/1.1" //lintignore:AWSAT003,AWSAT005 - - testTimeStr := "2013-09-25T09:34:01Z" - testTimeTime := errs.Must(time.Parse(time.RFC3339, testTimeStr)) - var zeroTime time.Time - - testCases := autoFlexTestCases{ - "nil Source": { - Target: &emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningSourceIsNil(nil), - }, - expectedLogLines: []map[string]any{ - infoFlattening(nil, reflect.TypeFor[*emptyStruct]()), - errorSourceIsNil("", nil, "", reflect.TypeFor[*emptyStruct]()), - }, - }, - "typed nil Source": { - Source: typedNilSource, - Target: &emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningSourceIsNil(reflect.TypeFor[*emptyStruct]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*emptyStruct](), reflect.TypeFor[*emptyStruct]()), - errorSourceIsNil("", reflect.TypeFor[*emptyStruct](), "", reflect.TypeFor[*emptyStruct]()), - }, - }, - "nil Target": { - Source: emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagConvertingTargetIsNil(nil), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[emptyStruct](), nil), - errorTargetIsNil("", reflect.TypeFor[emptyStruct](), "", nil), - }, - }, - "typed nil Target": { - Source: emptyStruct{}, - Target: typedNilTarget, - expectedDiags: diag.Diagnostics{ - diagConvertingTargetIsNil(reflect.TypeFor[*emptyStruct]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - errorTargetIsNil("", reflect.TypeFor[emptyStruct](), "", reflect.TypeFor[*emptyStruct]()), - }, - }, - "non-pointer Target": { - Source: emptyStruct{}, - Target: 0, - expectedDiags: diag.Diagnostics{ - diagConvertingTargetIsNotPointer(reflect.TypeFor[int]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[emptyStruct](), reflect.TypeFor[int]()), - errorTargetIsNotPointer("", reflect.TypeFor[emptyStruct](), "", reflect.TypeFor[int]()), - }, - }, - "non-struct Source struct Target": { - Source: testString, - Target: &emptyStruct{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[emptyStruct]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[string](), reflect.TypeFor[*emptyStruct]()), - errorTargetDoesNotImplementAttrValue("", reflect.TypeFor[string](), "", reflect.TypeFor[emptyStruct]()), - }, - }, - "struct Source non-struct Target": { - Source: emptyStruct{}, - Target: &testString, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[string]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*string]()), - errorTargetDoesNotImplementAttrValue("", reflect.TypeFor[emptyStruct](), "", reflect.TypeFor[string]()), - }, - }, - "empty struct Source and Target": { - Source: emptyStruct{}, - Target: &emptyStruct{}, - WantTarget: &emptyStruct{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - }, - }, - "empty struct pointer Source and Target": { - Source: &emptyStruct{}, - Target: &emptyStruct{}, - WantTarget: &emptyStruct{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*emptyStruct](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[emptyStruct](), reflect.TypeFor[*emptyStruct]()), - }, - }, - "single string struct pointer Source and empty Target": { - Source: &awsSingleStringValue{Field1: "a"}, - Target: &emptyStruct{}, - WantTarget: &emptyStruct{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*emptyStruct]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*emptyStruct]()), - debugNoCorrespondingField(reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*emptyStruct]()), - }, - }, - "target field does not implement attr.Value Target": { - Source: &awsSingleStringValue{Field1: "a"}, - Target: &awsSingleStringValue{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[string]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*awsSingleStringValue]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*awsSingleStringValue]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*awsSingleStringValue]()), - errorTargetDoesNotImplementAttrValue("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[string]()), - }, - }, - "single empty string Source and single string Target": { - Source: &awsSingleStringValue{}, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{Field1: types.StringValue("")}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "single string Source and single string Target": { - Source: &awsSingleStringValue{Field1: "a"}, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{Field1: types.StringValue("a")}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "single byte slice Source and single string Target": { - Source: &awsSingleByteSliceValue{Field1: []byte("a")}, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{Field1: types.StringValue("a")}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleByteSliceValue](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleByteSliceValue](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleByteSliceValue](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]byte](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "single nil *string Source and single string Target": { - Source: &awsSingleStringPointer{}, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{Field1: types.StringNull()}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "single *string Source and single string Target": { - Source: &awsSingleStringPointer{Field1: aws.String("a")}, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{Field1: types.StringValue("a")}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "single string Source and single int64 Target": { - Source: &awsSingleStringValue{Field1: "a"}, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.Int64]()), - { - "@level": "error", - "@module": "provider.autoflex", - "@message": "AutoFlex Flatten; incompatible types", - "from": float64(reflect.String), - "to": map[string]any{}, - logAttrKeySourcePath: "Field1", - logAttrKeySourceType: fullTypeName(reflect.TypeFor[string]()), - logAttrKeyTargetPath: "Field1", - logAttrKeyTargetType: fullTypeName(reflect.TypeFor[types.Int64]()), - }, - }, - }, - "zero value primtive types Source and primtive types Target": { - Source: &awsAllThePrimitiveFields{}, - Target: &tfAllThePrimitiveFields{}, - WantTarget: &tfAllThePrimitiveFields{ - Field1: types.StringValue(""), - Field2: types.StringNull(), - Field3: types.Int64Value(0), - Field4: types.Int64Null(), - Field5: types.Int64Value(0), - Field6: types.Int64Null(), - Field7: types.Float64Value(0), - Field8: types.Float64Null(), - Field9: types.Float64Value(0), - Field10: types.Float64Null(), - Field11: types.BoolValue(false), - Field12: types.BoolNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsAllThePrimitiveFields](), reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConverting(reflect.TypeFor[awsAllThePrimitiveFields](), reflect.TypeFor[*tfAllThePrimitiveFields]()), - traceMatchedFields("Field1", reflect.TypeFor[awsAllThePrimitiveFields](), "Field1", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - traceMatchedFields("Field2", reflect.TypeFor[awsAllThePrimitiveFields](), "Field2", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*string](), "Field2", reflect.TypeFor[types.String]()), - traceMatchedFields("Field3", reflect.TypeFor[awsAllThePrimitiveFields](), "Field3", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field3", reflect.TypeFor[int32](), "Field3", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field4", reflect.TypeFor[awsAllThePrimitiveFields](), "Field4", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field4", reflect.TypeFor[*int32](), "Field4", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field5", reflect.TypeFor[awsAllThePrimitiveFields](), "Field5", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field5", reflect.TypeFor[int64](), "Field5", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field6", reflect.TypeFor[awsAllThePrimitiveFields](), "Field6", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field6", reflect.TypeFor[*int64](), "Field6", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field7", reflect.TypeFor[awsAllThePrimitiveFields](), "Field7", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field7", reflect.TypeFor[float32](), "Field7", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field8", reflect.TypeFor[awsAllThePrimitiveFields](), "Field8", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field8", reflect.TypeFor[*float32](), "Field8", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field9", reflect.TypeFor[awsAllThePrimitiveFields](), "Field9", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field9", reflect.TypeFor[float64](), "Field9", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field10", reflect.TypeFor[awsAllThePrimitiveFields](), "Field10", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field10", reflect.TypeFor[*float64](), "Field10", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field11", reflect.TypeFor[awsAllThePrimitiveFields](), "Field11", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field11", reflect.TypeFor[bool](), "Field11", reflect.TypeFor[types.Bool]()), - traceMatchedFields("Field12", reflect.TypeFor[awsAllThePrimitiveFields](), "Field12", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field12", reflect.TypeFor[*bool](), "Field12", reflect.TypeFor[types.Bool]()), - }, - }, - "primtive types Source and primtive types Target": { - Source: &awsAllThePrimitiveFields{ - Field1: "field1", - Field2: aws.String("field2"), - Field3: 3, - Field4: aws.Int32(-4), - Field5: 5, - Field6: aws.Int64(-6), - Field7: 7.7, - Field8: aws.Float32(-8.8), - Field9: 9.99, - Field10: aws.Float64(-10.101), - Field11: true, - Field12: aws.Bool(false), - }, - Target: &tfAllThePrimitiveFields{}, - WantTarget: &tfAllThePrimitiveFields{ - Field1: types.StringValue("field1"), - Field2: types.StringValue("field2"), - Field3: types.Int64Value(3), - Field4: types.Int64Value(-4), - Field5: types.Int64Value(5), - Field6: types.Int64Value(-6), - Field7: types.Float64Value(7.7), - Field8: types.Float64Value(-8.8), - Field9: types.Float64Value(9.99), - Field10: types.Float64Value(-10.101), - Field11: types.BoolValue(true), - Field12: types.BoolValue(false), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsAllThePrimitiveFields](), reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConverting(reflect.TypeFor[awsAllThePrimitiveFields](), reflect.TypeFor[*tfAllThePrimitiveFields]()), - traceMatchedFields("Field1", reflect.TypeFor[awsAllThePrimitiveFields](), "Field1", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - traceMatchedFields("Field2", reflect.TypeFor[awsAllThePrimitiveFields](), "Field2", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*string](), "Field2", reflect.TypeFor[types.String]()), - traceMatchedFields("Field3", reflect.TypeFor[awsAllThePrimitiveFields](), "Field3", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field3", reflect.TypeFor[int32](), "Field3", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field4", reflect.TypeFor[awsAllThePrimitiveFields](), "Field4", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field4", reflect.TypeFor[*int32](), "Field4", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field5", reflect.TypeFor[awsAllThePrimitiveFields](), "Field5", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field5", reflect.TypeFor[int64](), "Field5", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field6", reflect.TypeFor[awsAllThePrimitiveFields](), "Field6", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field6", reflect.TypeFor[*int64](), "Field6", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field7", reflect.TypeFor[awsAllThePrimitiveFields](), "Field7", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field7", reflect.TypeFor[float32](), "Field7", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field8", reflect.TypeFor[awsAllThePrimitiveFields](), "Field8", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field8", reflect.TypeFor[*float32](), "Field8", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field9", reflect.TypeFor[awsAllThePrimitiveFields](), "Field9", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field9", reflect.TypeFor[float64](), "Field9", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field10", reflect.TypeFor[awsAllThePrimitiveFields](), "Field10", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field10", reflect.TypeFor[*float64](), "Field10", reflect.TypeFor[types.Float64]()), - traceMatchedFields("Field11", reflect.TypeFor[awsAllThePrimitiveFields](), "Field11", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field11", reflect.TypeFor[bool](), "Field11", reflect.TypeFor[types.Bool]()), - traceMatchedFields("Field12", reflect.TypeFor[awsAllThePrimitiveFields](), "Field12", reflect.TypeFor[*tfAllThePrimitiveFields]()), - infoConvertingWithPath("Field12", reflect.TypeFor[*bool](), "Field12", reflect.TypeFor[types.Bool]()), - }, - }, - "zero value slice or map of primitive types Source and Collection of primtive types Target": { - Source: &awsCollectionsOfPrimitiveElements{}, - Target: &tfCollectionsOfPrimitiveElements{}, - WantTarget: &tfCollectionsOfPrimitiveElements{ - Field1: types.ListNull(types.StringType), - Field2: types.ListNull(types.StringType), - Field3: types.SetNull(types.StringType), - Field4: types.SetNull(types.StringType), - Field5: types.MapNull(types.StringType), - Field6: types.MapNull(types.StringType), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConverting(reflect.TypeFor[awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - traceMatchedFields("Field1", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field1", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListNull("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceMatchedFields("Field2", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field2", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field2", reflect.TypeFor[[]*string](), "Field2", reflect.TypeFor[types.List]()), - traceFlatteningWithListNull("Field2", reflect.TypeFor[[]*string](), "Field2", reflect.TypeFor[types.List]()), - traceMatchedFields("Field3", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field3", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field3", reflect.TypeFor[[]string](), "Field3", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetNull("Field3", reflect.TypeFor[[]string](), "Field3", reflect.TypeFor[types.Set]()), - traceMatchedFields("Field4", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field4", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field4", reflect.TypeFor[[]*string](), "Field4", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetNull("Field4", reflect.TypeFor[[]*string](), "Field4", reflect.TypeFor[types.Set]()), - traceMatchedFields("Field5", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field5", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field5", reflect.TypeFor[map[string]string](), "Field5", reflect.TypeFor[types.Map]()), - traceFlatteningWithMapNull("Field5", reflect.TypeFor[map[string]string](), "Field5", reflect.TypeFor[types.Map]()), - traceMatchedFields("Field6", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field6", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field6", reflect.TypeFor[map[string]*string](), "Field6", reflect.TypeFor[types.Map]()), - traceFlatteningWithMapNull("Field6", reflect.TypeFor[map[string]*string](), "Field6", reflect.TypeFor[types.Map]()), - }, - }, - "slice or map of primitive types Source and Collection of primtive types Target": { - Source: &awsCollectionsOfPrimitiveElements{ - Field1: []string{"a", "b"}, - Field2: aws.StringSlice([]string{"a", "b"}), - Field3: []string{"a", "b"}, - Field4: aws.StringSlice([]string{"a", "b"}), - Field5: map[string]string{"A": "a", "B": "b"}, - Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), - }, - Target: &tfCollectionsOfPrimitiveElements{}, - WantTarget: &tfCollectionsOfPrimitiveElements{ - Field1: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field2: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field3: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field4: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field5: types.MapValueMust(types.StringType, map[string]attr.Value{ - "A": types.StringValue("a"), - "B": types.StringValue("b"), - }), - Field6: types.MapValueMust(types.StringType, map[string]attr.Value{ - "A": types.StringValue("a"), - "B": types.StringValue("b"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConverting(reflect.TypeFor[awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - traceMatchedFields("Field1", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field1", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]string](), 2, "Field1", reflect.TypeFor[types.List]()), - traceMatchedFields("Field2", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field2", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field2", reflect.TypeFor[[]*string](), "Field2", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Field2", reflect.TypeFor[[]*string](), 2, "Field2", reflect.TypeFor[types.List]()), - traceMatchedFields("Field3", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field3", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field3", reflect.TypeFor[[]string](), "Field3", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetValue("Field3", reflect.TypeFor[[]string](), 2, "Field3", reflect.TypeFor[types.Set]()), - traceMatchedFields("Field4", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field4", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field4", reflect.TypeFor[[]*string](), "Field4", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetValue("Field4", reflect.TypeFor[[]*string](), 2, "Field4", reflect.TypeFor[types.Set]()), - traceMatchedFields("Field5", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field5", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field5", reflect.TypeFor[map[string]string](), "Field5", reflect.TypeFor[types.Map]()), - traceFlatteningWithMapValue("Field5", reflect.TypeFor[map[string]string](), 2, "Field5", reflect.TypeFor[types.Map]()), - traceMatchedFields("Field6", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field6", reflect.TypeFor[*tfCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field6", reflect.TypeFor[map[string]*string](), "Field6", reflect.TypeFor[types.Map]()), - traceFlatteningWithMapValue("Field6", reflect.TypeFor[map[string]*string](), 2, "Field6", reflect.TypeFor[types.Map]()), - }, - }, - "zero value slice or map of string type Source and Collection of string types Target": { - Source: &awsCollectionsOfPrimitiveElements{}, - Target: &tfTypedCollectionsOfPrimitiveElements{}, - WantTarget: &tfTypedCollectionsOfPrimitiveElements{ - Field1: fwtypes.NewListValueOfNull[types.String](ctx), - Field2: fwtypes.NewListValueOfNull[types.String](ctx), - Field3: fwtypes.NewSetValueOfNull[types.String](ctx), - Field4: fwtypes.NewSetValueOfNull[types.String](ctx), - Field5: fwtypes.NewMapValueOfNull[types.String](ctx), - Field6: fwtypes.NewMapValueOfNull[types.String](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConverting(reflect.TypeFor[awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - traceMatchedFields("Field1", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field1", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceFlatteningWithListNull("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceMatchedFields("Field2", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field2", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field2", reflect.TypeFor[[]*string](), "Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceFlatteningWithListNull("Field2", reflect.TypeFor[[]*string](), "Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceMatchedFields("Field3", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field3", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field3", reflect.TypeFor[[]string](), "Field3", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceFlatteningWithSetNull("Field3", reflect.TypeFor[[]string](), "Field3", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceMatchedFields("Field4", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field4", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field4", reflect.TypeFor[[]*string](), "Field4", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceFlatteningWithSetNull("Field4", reflect.TypeFor[[]*string](), "Field4", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceMatchedFields("Field5", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field5", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field5", reflect.TypeFor[map[string]string](), "Field5", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapNull("Field5", reflect.TypeFor[map[string]string](), "Field5", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceMatchedFields("Field6", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field6", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field6", reflect.TypeFor[map[string]*string](), "Field6", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapNull("Field6", reflect.TypeFor[map[string]*string](), "Field6", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - "slice or map of string types Source and Collection of string types Target": { - Source: &awsCollectionsOfPrimitiveElements{ - Field1: []string{"a", "b"}, - Field2: aws.StringSlice([]string{"a", "b"}), - Field3: []string{"a", "b"}, - Field4: aws.StringSlice([]string{"a", "b"}), - Field5: map[string]string{"A": "a", "B": "b"}, - Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), - }, - Target: &tfTypedCollectionsOfPrimitiveElements{}, - WantTarget: &tfTypedCollectionsOfPrimitiveElements{ - Field1: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field3: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field4: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - Field5: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "A": types.StringValue("a"), - "B": types.StringValue("b"), - }), - Field6: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "A": types.StringValue("a"), - "B": types.StringValue("b"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConverting(reflect.TypeFor[awsCollectionsOfPrimitiveElements](), reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - traceMatchedFields("Field1", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field1", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]string](), 2, "Field1", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceMatchedFields("Field2", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field2", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field2", reflect.TypeFor[[]*string](), "Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceFlatteningWithListValue("Field2", reflect.TypeFor[[]*string](), 2, "Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceMatchedFields("Field3", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field3", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field3", reflect.TypeFor[[]string](), "Field3", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceFlatteningWithSetValue("Field3", reflect.TypeFor[[]string](), 2, "Field3", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceMatchedFields("Field4", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field4", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field4", reflect.TypeFor[[]*string](), "Field4", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceFlatteningWithSetValue("Field4", reflect.TypeFor[[]*string](), 2, "Field4", reflect.TypeFor[fwtypes.SetValueOf[types.String]]()), - traceMatchedFields("Field5", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field5", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field5", reflect.TypeFor[map[string]string](), "Field5", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("Field5", reflect.TypeFor[map[string]string](), 2, "Field5", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceMatchedFields("Field6", reflect.TypeFor[awsCollectionsOfPrimitiveElements](), "Field6", reflect.TypeFor[*tfTypedCollectionsOfPrimitiveElements]()), - infoConvertingWithPath("Field6", reflect.TypeFor[map[string]*string](), "Field6", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("Field6", reflect.TypeFor[map[string]*string](), 2, "Field6", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - "plural ordinary field names": { - Source: &awsPluralSliceOfNestedObjectValues{ - Fields: []awsSingleStringValue{{Field1: "a"}}, - }, - Target: &tfSingluarListOfNestedObjects{}, - WantTarget: &tfSingluarListOfNestedObjects{ - Field: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsPluralSliceOfNestedObjectValues](), reflect.TypeFor[*tfSingluarListOfNestedObjects]()), - infoConverting(reflect.TypeFor[awsPluralSliceOfNestedObjectValues](), reflect.TypeFor[*tfSingluarListOfNestedObjects]()), - traceMatchedFields("Fields", reflect.TypeFor[awsPluralSliceOfNestedObjectValues](), "Field", reflect.TypeFor[*tfSingluarListOfNestedObjects]()), - infoConvertingWithPath("Fields", reflect.TypeFor[[]awsSingleStringValue](), "Field", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Fields", reflect.TypeFor[[]awsSingleStringValue](), 1, "Field", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Fields[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Fields[0].Field1", reflect.TypeFor[string](), "Field[0].Field1", reflect.TypeFor[types.String]()), - }, - }, - "plural field names": { - Source: &awsSpecialPluralization{ - Cities: []*string{ - aws.String("paris"), - aws.String("london"), - }, - Coaches: []*string{ - aws.String("guardiola"), - aws.String("mourinho"), - }, - Tomatoes: []*string{ - aws.String("brandywine"), - aws.String("roma"), - }, - Vertices: []*string{ - aws.String("ab"), - aws.String("bc"), - }, - Criteria: []*string{ - aws.String("votes"), - aws.String("editors"), - }, - Data: []*string{ - aws.String("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), - aws.String("0f10cb10-2076-5254-bd21-d3f62fe66303"), - }, - Hives: []*string{ - aws.String("Cegieme"), - aws.String("Fahumvid"), - }, - }, - Target: &tfSpecialPluralization{}, - WantTarget: &tfSpecialPluralization{ - City: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("paris"), - types.StringValue("london"), - }), - Coach: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("guardiola"), - types.StringValue("mourinho"), - }), - Tomato: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("brandywine"), - types.StringValue("roma"), - }), - Vertex: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("ab"), - types.StringValue("bc"), - }), - Criterion: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("votes"), - types.StringValue("editors"), - }), - Datum: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), - types.StringValue("0f10cb10-2076-5254-bd21-d3f62fe66303"), - }), - Hive: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("Cegieme"), - types.StringValue("Fahumvid"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSpecialPluralization](), reflect.TypeFor[*tfSpecialPluralization]()), - infoConverting(reflect.TypeFor[awsSpecialPluralization](), reflect.TypeFor[*tfSpecialPluralization]()), - traceMatchedFields("Cities", reflect.TypeFor[awsSpecialPluralization](), "City", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Cities", reflect.TypeFor[[]*string](), "City", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Cities", reflect.TypeFor[[]*string](), 2, "City", reflect.TypeFor[types.List]()), - traceMatchedFields("Coaches", reflect.TypeFor[awsSpecialPluralization](), "Coach", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Coaches", reflect.TypeFor[[]*string](), "Coach", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Coaches", reflect.TypeFor[[]*string](), 2, "Coach", reflect.TypeFor[types.List]()), - traceMatchedFields("Tomatoes", reflect.TypeFor[awsSpecialPluralization](), "Tomato", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Tomatoes", reflect.TypeFor[[]*string](), "Tomato", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Tomatoes", reflect.TypeFor[[]*string](), 2, "Tomato", reflect.TypeFor[types.List]()), - traceMatchedFields("Vertices", reflect.TypeFor[awsSpecialPluralization](), "Vertex", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Vertices", reflect.TypeFor[[]*string](), "Vertex", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Vertices", reflect.TypeFor[[]*string](), 2, "Vertex", reflect.TypeFor[types.List]()), - traceMatchedFields("Criteria", reflect.TypeFor[awsSpecialPluralization](), "Criterion", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Criteria", reflect.TypeFor[[]*string](), "Criterion", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Criteria", reflect.TypeFor[[]*string](), 2, "Criterion", reflect.TypeFor[types.List]()), - traceMatchedFields("Data", reflect.TypeFor[awsSpecialPluralization](), "Datum", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Data", reflect.TypeFor[[]*string](), "Datum", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Data", reflect.TypeFor[[]*string](), 2, "Datum", reflect.TypeFor[types.List]()), - traceMatchedFields("Hives", reflect.TypeFor[awsSpecialPluralization](), "Hive", reflect.TypeFor[*tfSpecialPluralization]()), - infoConvertingWithPath("Hives", reflect.TypeFor[[]*string](), "Hive", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Hives", reflect.TypeFor[[]*string](), 2, "Hive", reflect.TypeFor[types.List]()), - }, - }, - "strange plurality": { - Source: &awsPluralAndSingularFields{ - Value: "a", - Values: "b", - }, - Target: &tfPluralAndSingularFields{}, - WantTarget: &tfPluralAndSingularFields{ - Value: types.StringValue("a"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsPluralAndSingularFields](), reflect.TypeFor[*tfPluralAndSingularFields]()), - infoConverting(reflect.TypeFor[awsPluralAndSingularFields](), reflect.TypeFor[*tfPluralAndSingularFields]()), - traceMatchedFields("Value", reflect.TypeFor[awsPluralAndSingularFields](), "Value", reflect.TypeFor[*tfPluralAndSingularFields]()), - infoConvertingWithPath("Value", reflect.TypeFor[string](), "Value", reflect.TypeFor[types.String]()), - debugNoCorrespondingField(reflect.TypeFor[awsPluralAndSingularFields](), "Values", reflect.TypeFor[*tfPluralAndSingularFields]()), - }, - }, - "capitalization field names": { - Source: &awsCapitalizationDiff{ - FieldUrl: aws.String("h"), - }, - Target: &tfCaptializationDiff{}, - WantTarget: &tfCaptializationDiff{ - FieldURL: types.StringValue("h"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsCapitalizationDiff](), reflect.TypeFor[*tfCaptializationDiff]()), - infoConverting(reflect.TypeFor[awsCapitalizationDiff](), reflect.TypeFor[*tfCaptializationDiff]()), - traceMatchedFields("FieldUrl", reflect.TypeFor[awsCapitalizationDiff](), "FieldURL", reflect.TypeFor[*tfCaptializationDiff]()), - infoConvertingWithPath("FieldUrl", reflect.TypeFor[*string](), "FieldURL", reflect.TypeFor[types.String]()), - }, - }, - "resource name prefix": { - Options: []AutoFlexOptionsFunc{ - WithFieldNamePrefix("Intent"), - }, - Source: &awsFieldNamePrefix{ - IntentName: aws.String("Ovodoghen"), - }, - Target: &tfFieldNamePrefix{}, - WantTarget: &tfFieldNamePrefix{ - Name: types.StringValue("Ovodoghen"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsFieldNamePrefix](), reflect.TypeFor[*tfFieldNamePrefix]()), - infoConverting(reflect.TypeFor[awsFieldNamePrefix](), reflect.TypeFor[*tfFieldNamePrefix]()), - traceMatchedFields("IntentName", reflect.TypeFor[awsFieldNamePrefix](), "Name", reflect.TypeFor[*tfFieldNamePrefix]()), - infoConvertingWithPath("IntentName", reflect.TypeFor[*string](), "Name", reflect.TypeFor[types.String]()), - }, - }, - "resource name suffix": { - Options: []AutoFlexOptionsFunc{WithFieldNameSuffix("Config")}, - Source: &awsFieldNameSuffix{ - PolicyConfig: aws.String("foo"), - }, - Target: &tfFieldNameSuffix{}, - WantTarget: &tfFieldNameSuffix{ - Policy: types.StringValue("foo"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsFieldNameSuffix](), reflect.TypeFor[*tfFieldNameSuffix]()), - infoConverting(reflect.TypeFor[awsFieldNameSuffix](), reflect.TypeFor[*tfFieldNameSuffix]()), - traceMatchedFields("PolicyConfig", reflect.TypeFor[awsFieldNameSuffix](), "Policy", reflect.TypeFor[*tfFieldNameSuffix]()), - infoConvertingWithPath("PolicyConfig", reflect.TypeFor[*string](), "Policy", reflect.TypeFor[types.String]()), - }, - }, - "single string Source and single ARN Target": { - Source: &awsSingleStringValue{Field1: testARN}, - Target: &tfSingleARNField{}, - WantTarget: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringValue](), reflect.TypeFor[*tfSingleARNField]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleARNField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleARNField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[fwtypes.ARN]()), - }, - }, - "single *string Source and single ARN Target": { - Source: &awsSingleStringPointer{Field1: aws.String(testARN)}, - Target: &tfSingleARNField{}, - WantTarget: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringPointer](), reflect.TypeFor[*tfSingleARNField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleARNField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleARNField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[fwtypes.ARN]()), - }, - }, - "single nil *string Source and single ARN Target": { - Source: &awsSingleStringPointer{}, - Target: &tfSingleARNField{}, - WantTarget: &tfSingleARNField{Field1: fwtypes.ARNNull()}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSingleStringPointer](), reflect.TypeFor[*tfSingleARNField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleARNField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleARNField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[fwtypes.ARN]()), - }, - }, - "timestamp": { - Source: &awsRFC3339TimeValue{ - CreationDateTime: testTimeTime, - }, - Target: &tfRFC3339Time{}, - WantTarget: &tfRFC3339Time{ - CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimeValue](), reflect.TypeFor[*tfRFC3339Time]()), - infoConverting(reflect.TypeFor[awsRFC3339TimeValue](), reflect.TypeFor[*tfRFC3339Time]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimeValue](), "CreationDateTime", reflect.TypeFor[*tfRFC3339Time]()), - infoConvertingWithPath("CreationDateTime", reflect.TypeFor[time.Time](), "CreationDateTime", reflect.TypeFor[timetypes.RFC3339]()), - }, - }, - "timestamp pointer": { - Source: &awsRFC3339TimePointer{ - CreationDateTime: &testTimeTime, - }, - Target: &tfRFC3339Time{}, - WantTarget: &tfRFC3339Time{ - CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimePointer](), reflect.TypeFor[*tfRFC3339Time]()), - infoConverting(reflect.TypeFor[awsRFC3339TimePointer](), reflect.TypeFor[*tfRFC3339Time]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimePointer](), "CreationDateTime", reflect.TypeFor[*tfRFC3339Time]()), - infoConvertingWithPath("CreationDateTime", reflect.TypeFor[*time.Time](), "CreationDateTime", reflect.TypeFor[timetypes.RFC3339]()), - }, - }, - "timestamp nil": { - Source: &awsRFC3339TimePointer{}, - Target: &tfRFC3339Time{}, - WantTarget: &tfRFC3339Time{ - CreationDateTime: timetypes.NewRFC3339Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimePointer](), reflect.TypeFor[*tfRFC3339Time]()), - infoConverting(reflect.TypeFor[awsRFC3339TimePointer](), reflect.TypeFor[*tfRFC3339Time]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimePointer](), "CreationDateTime", reflect.TypeFor[*tfRFC3339Time]()), - infoConvertingWithPath("CreationDateTime", reflect.TypeFor[*time.Time](), "CreationDateTime", reflect.TypeFor[timetypes.RFC3339]()), - }, - }, - "timestamp empty": { - Source: &awsRFC3339TimeValue{}, - Target: &tfRFC3339Time{}, - WantTarget: &tfRFC3339Time{ - CreationDateTime: timetypes.NewRFC3339TimeValue(zeroTime), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimeValue](), reflect.TypeFor[*tfRFC3339Time]()), - infoConverting(reflect.TypeFor[awsRFC3339TimeValue](), reflect.TypeFor[*tfRFC3339Time]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimeValue](), "CreationDateTime", reflect.TypeFor[*tfRFC3339Time]()), - infoConvertingWithPath("CreationDateTime", reflect.TypeFor[time.Time](), "CreationDateTime", reflect.TypeFor[timetypes.RFC3339]()), - }, - }, - - "source struct field to non-attr.Value": { - Source: &awsRFC3339TimeValue{}, - Target: &awsRFC3339TimeValue{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[time.Time]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimeValue](), reflect.TypeFor[*awsRFC3339TimeValue]()), - infoConverting(reflect.TypeFor[awsRFC3339TimeValue](), reflect.TypeFor[*awsRFC3339TimeValue]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimeValue](), "CreationDateTime", reflect.TypeFor[*awsRFC3339TimeValue]()), - errorTargetDoesNotImplementAttrValue("CreationDateTime", reflect.TypeFor[time.Time](), "CreationDateTime", reflect.TypeFor[time.Time]()), - }, - }, - "source struct ptr field to non-attr.Value": { - Source: &awsRFC3339TimePointer{}, - Target: &awsRFC3339TimeValue{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[time.Time]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimePointer](), reflect.TypeFor[*awsRFC3339TimeValue]()), - infoConverting(reflect.TypeFor[awsRFC3339TimePointer](), reflect.TypeFor[*awsRFC3339TimeValue]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimePointer](), "CreationDateTime", reflect.TypeFor[*awsRFC3339TimeValue]()), - errorTargetDoesNotImplementAttrValue("CreationDateTime", reflect.TypeFor[*time.Time](), "CreationDateTime", reflect.TypeFor[time.Time]()), - }, - }, - "source struct field to non-attr.Value ptr": { - Source: &awsRFC3339TimeValue{}, - Target: &awsRFC3339TimePointer{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[*time.Time]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimeValue](), reflect.TypeFor[*awsRFC3339TimePointer]()), - infoConverting(reflect.TypeFor[awsRFC3339TimeValue](), reflect.TypeFor[*awsRFC3339TimePointer]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimeValue](), "CreationDateTime", reflect.TypeFor[*awsRFC3339TimePointer]()), - errorTargetDoesNotImplementAttrValue("CreationDateTime", reflect.TypeFor[time.Time](), "CreationDateTime", reflect.TypeFor[*time.Time]()), - }, - }, - "source struct ptr field to non-attr.Value ptr": { - Source: &awsRFC3339TimePointer{}, - Target: &awsRFC3339TimePointer{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeFor[*time.Time]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsRFC3339TimePointer](), reflect.TypeFor[*awsRFC3339TimePointer]()), - infoConverting(reflect.TypeFor[awsRFC3339TimePointer](), reflect.TypeFor[*awsRFC3339TimePointer]()), - traceMatchedFields("CreationDateTime", reflect.TypeFor[awsRFC3339TimePointer](), "CreationDateTime", reflect.TypeFor[*awsRFC3339TimePointer]()), - errorTargetDoesNotImplementAttrValue("CreationDateTime", reflect.TypeFor[*time.Time](), "CreationDateTime", reflect.TypeFor[*time.Time]()), - }, - }, - } - - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenGeneric(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "complex Source and complex Target": { - Source: &awsComplexValue{ - Field1: "m", - Field2: &awsNestedObjectPointer{Field1: &awsSingleStringValue{Field1: "n"}}, - Field3: aws.StringMap(map[string]string{"X": "x", "Y": "y"}), - Field4: []awsSingleInt64Value{{Field1: 100}, {Field1: 2000}, {Field1: 30000}}, - }, - Target: &tfComplexValue{}, - WantTarget: &tfComplexValue{ - Field1: types.StringValue("m"), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("n"), - }), - }), - Field3: types.MapValueMust(types.StringType, map[string]attr.Value{ - "X": types.StringValue("x"), - "Y": types.StringValue("y"), - }), - Field4: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleInt64Field{ - {Field1: types.Int64Value(100)}, - {Field1: types.Int64Value(2000)}, - {Field1: types.Int64Value(30000)}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsComplexValue](), reflect.TypeFor[*tfComplexValue]()), - infoConverting(reflect.TypeFor[awsComplexValue](), reflect.TypeFor[*tfComplexValue]()), - traceMatchedFields("Field1", reflect.TypeFor[awsComplexValue](), "Field1", reflect.TypeFor[*tfComplexValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - - traceMatchedFields("Field2", reflect.TypeFor[awsComplexValue](), "Field2", reflect.TypeFor[*tfComplexValue]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*awsNestedObjectPointer](), "Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfListOfNestedObject]]()), - traceMatchedFieldsWithPath("Field2", "Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field2", "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field2.Field1", reflect.TypeFor[*awsSingleStringValue](), "Field2.Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field2.Field1", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field2.Field1", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field2.Field1.Field1", reflect.TypeFor[string](), "Field2.Field1.Field1", reflect.TypeFor[types.String]()), - - traceMatchedFields("Field3", reflect.TypeFor[awsComplexValue](), "Field3", reflect.TypeFor[*tfComplexValue]()), - infoConvertingWithPath("Field3", reflect.TypeFor[map[string]*string](), "Field3", reflect.TypeFor[types.Map]()), - traceFlatteningWithMapValue("Field3", reflect.TypeFor[map[string]*string](), 2, "Field3", reflect.TypeFor[types.Map]()), - - traceMatchedFields("Field4", reflect.TypeFor[awsComplexValue](), "Field4", reflect.TypeFor[*tfComplexValue]()), - infoConvertingWithPath("Field4", reflect.TypeFor[[]awsSingleInt64Value](), "Field4", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleInt64Field]]()), - traceFlatteningNestedObjectCollection("Field4", reflect.TypeFor[[]awsSingleInt64Value](), 3, "Field4", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleInt64Field]]()), - traceMatchedFieldsWithPath("Field4[0]", "Field1", reflect.TypeFor[awsSingleInt64Value](), "Field4[0]", "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field4[0].Field1", reflect.TypeFor[int64](), "Field4[0].Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFieldsWithPath("Field4[1]", "Field1", reflect.TypeFor[awsSingleInt64Value](), "Field4[1]", "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field4[1].Field1", reflect.TypeFor[int64](), "Field4[1].Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFieldsWithPath("Field4[2]", "Field1", reflect.TypeFor[awsSingleInt64Value](), "Field4[2]", "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field4[2].Field1", reflect.TypeFor[int64](), "Field4[2].Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "map of string": { - Source: &awsMapOfString{ - FieldInner: map[string]string{ - "x": "y", - }, - }, - Target: &tfMapOfString{}, - WantTarget: &tfMapOfString{ - FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "x": types.StringValue("y"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapOfString](), reflect.TypeFor[*tfMapOfString]()), - infoConverting(reflect.TypeFor[awsMapOfString](), reflect.TypeFor[*tfMapOfString]()), - traceMatchedFields("FieldInner", reflect.TypeFor[awsMapOfString](), "FieldInner", reflect.TypeFor[*tfMapOfString]()), - infoConvertingWithPath("FieldInner", reflect.TypeFor[map[string]string](), "FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("FieldInner", reflect.TypeFor[map[string]string](), 1, "FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - "map of string pointer": { - Source: &awsMapOfStringPointer{ - FieldInner: map[string]*string{ - "x": aws.String("y"), - }, - }, - Target: &tfMapOfString{}, - WantTarget: &tfMapOfString{ - FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "x": types.StringValue("y"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapOfStringPointer](), reflect.TypeFor[*tfMapOfString]()), - infoConverting(reflect.TypeFor[awsMapOfStringPointer](), reflect.TypeFor[*tfMapOfString]()), - traceMatchedFields("FieldInner", reflect.TypeFor[awsMapOfStringPointer](), "FieldInner", reflect.TypeFor[*tfMapOfString]()), - infoConvertingWithPath("FieldInner", reflect.TypeFor[map[string]*string](), "FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("FieldInner", reflect.TypeFor[map[string]*string](), 1, "FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - "nested string map": { - Source: &awsNestedMapOfString{ - FieldOuter: awsMapOfString{ - FieldInner: map[string]string{ - "x": "y", - }, - }, - }, - Target: &tfNestedMapOfString{}, - WantTarget: &tfNestedMapOfString{ - FieldOuter: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfMapOfString{ - FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "x": types.StringValue("y"), - }), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsNestedMapOfString](), reflect.TypeFor[*tfNestedMapOfString]()), - infoConverting(reflect.TypeFor[awsNestedMapOfString](), reflect.TypeFor[*tfNestedMapOfString]()), - traceMatchedFields("FieldOuter", reflect.TypeFor[awsNestedMapOfString](), "FieldOuter", reflect.TypeFor[*tfNestedMapOfString]()), - infoConvertingWithPath("FieldOuter", reflect.TypeFor[awsMapOfString](), "FieldOuter", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapOfString]]()), - traceMatchedFieldsWithPath("FieldOuter", "FieldInner", reflect.TypeFor[awsMapOfString](), "FieldOuter", "FieldInner", reflect.TypeFor[*tfMapOfString]()), - infoConvertingWithPath("FieldOuter.FieldInner", reflect.TypeFor[map[string]string](), "FieldOuter.FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("FieldOuter.FieldInner", reflect.TypeFor[map[string]string](), 1, "FieldOuter.FieldInner", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - "map of map of string": { - Source: &awsMapOfMapOfString{ - Field1: map[string]map[string]string{ - "x": { - "y": "z", - }, - }, - }, - Target: &tfMapOfMapOfString{}, - WantTarget: &tfMapOfMapOfString{ - Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ - "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "y": types.StringValue("z"), - }), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapOfMapOfString](), reflect.TypeFor[*tfMapOfMapOfString]()), - infoConverting(reflect.TypeFor[awsMapOfMapOfString](), reflect.TypeFor[*tfMapOfMapOfString]()), - traceMatchedFields("Field1", reflect.TypeFor[awsMapOfMapOfString](), "Field1", reflect.TypeFor[*tfMapOfMapOfString]()), - infoConvertingWithPath("Field1", reflect.TypeFor[map[string]map[string]string](), "Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]]()), - traceFlatteningMap("Field1", reflect.TypeFor[map[string]map[string]string](), 1, "Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]]()), - traceFlatteningWithNewMapValueOf("Field1[\"x\"]", reflect.TypeFor[map[string]string](), 1, "Field1[\"x\"]", reflect.TypeFor[map[string]attr.Value]()), - }, - }, - "map of map of string pointer": { - Source: &awsMapOfMapOfStringPointer{ - Field1: map[string]map[string]*string{ - "x": { - "y": aws.String("z"), - }, - }, - }, - Target: &tfMapOfMapOfString{}, - WantTarget: &tfMapOfMapOfString{ - Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ - "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "y": types.StringValue("z"), - }), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapOfMapOfStringPointer](), reflect.TypeFor[*tfMapOfMapOfString]()), - infoConverting(reflect.TypeFor[awsMapOfMapOfStringPointer](), reflect.TypeFor[*tfMapOfMapOfString]()), - traceMatchedFields("Field1", reflect.TypeFor[awsMapOfMapOfStringPointer](), "Field1", reflect.TypeFor[*tfMapOfMapOfString]()), - infoConvertingWithPath("Field1", reflect.TypeFor[map[string]map[string]*string](), "Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]]()), - traceFlatteningMap("Field1", reflect.TypeFor[map[string]map[string]*string](), 1, "Field1", reflect.TypeFor[fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]]]()), - traceFlatteningWithNewMapValueOf("Field1[\"x\"]", reflect.TypeFor[map[string]*string](), 1, "Field1[\"x\"]", reflect.TypeFor[map[string]attr.Value]()), - }, - }, - } - - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenBool(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "bool to Bool": { - "true": { - Source: awsSingleBoolValue{ - Field1: true, - }, - Target: &tfSingleBoolField{}, - WantTarget: &tfSingleBoolField{ - Field1: types.BoolValue(true), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolValue](), reflect.TypeFor[*tfSingleBoolField]()), - infoConverting(reflect.TypeFor[awsSingleBoolValue](), reflect.TypeFor[*tfSingleBoolField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolValue](), "Field1", reflect.TypeFor[*tfSingleBoolField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - "false": { - Source: awsSingleBoolValue{ - Field1: false, - }, - Target: &tfSingleBoolField{}, - WantTarget: &tfSingleBoolField{ - Field1: types.BoolValue(false), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolValue](), reflect.TypeFor[*tfSingleBoolField]()), - infoConverting(reflect.TypeFor[awsSingleBoolValue](), reflect.TypeFor[*tfSingleBoolField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolValue](), "Field1", reflect.TypeFor[*tfSingleBoolField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - }, - - "*bool to Bool": { - "true": { - Source: awsSingleBoolPointer{ - Field1: aws.Bool(true), - }, - Target: &tfSingleBoolField{}, - WantTarget: &tfSingleBoolField{ - Field1: types.BoolValue(true), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolField]()), - infoConverting(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolPointer](), "Field1", reflect.TypeFor[*tfSingleBoolField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - "false": { - Source: awsSingleBoolPointer{ - Field1: aws.Bool(false), - }, - Target: &tfSingleBoolField{}, - WantTarget: &tfSingleBoolField{ - Field1: types.BoolValue(false), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolField]()), - infoConverting(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolPointer](), "Field1", reflect.TypeFor[*tfSingleBoolField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - "null": { - Source: awsSingleBoolPointer{ - Field1: nil, - }, - Target: &tfSingleBoolField{}, - WantTarget: &tfSingleBoolField{ - Field1: types.BoolNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolField]()), - infoConverting(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolPointer](), "Field1", reflect.TypeFor[*tfSingleBoolField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - }, - - "legacy *bool to Bool": { - "true": { - Source: awsSingleBoolPointer{ - Field1: aws.Bool(true), - }, - Target: &tfSingleBoolFieldLegacy{}, - WantTarget: &tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(true), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolPointer](), "Field1", reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - "false": { - Source: awsSingleBoolPointer{ - Field1: aws.Bool(false), - }, - Target: &tfSingleBoolFieldLegacy{}, - WantTarget: &tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(false), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolPointer](), "Field1", reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - "null": { - Source: awsSingleBoolPointer{ - Field1: nil, - }, - Target: &tfSingleBoolFieldLegacy{}, - WantTarget: &tfSingleBoolFieldLegacy{ - Field1: types.BoolValue(false), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleBoolPointer](), reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleBoolPointer](), "Field1", reflect.TypeFor[*tfSingleBoolFieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*bool](), "Field1", reflect.TypeFor[types.Bool]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenFloat64(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "float64 to Float64": { - "value": { - Source: awsSingleFloat64Value{ - Field1: 42, - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Value](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "zero": { - Source: awsSingleFloat64Value{ - Field1: 0, - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Value](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - }, - - "*float64 to Float64": { - "value": { - Source: awsSingleFloat64Pointer{ - Field1: aws.Float64(42), - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "zero": { - Source: awsSingleFloat64Pointer{ - Field1: aws.Float64(0), - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "null": { - Source: awsSingleFloat64Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - }, - - "legacy *float64 to Float64": { - "value": { - Source: awsSingleFloat64Pointer{ - Field1: aws.Float64(42), - }, - Target: &tfSingleFloat64FieldLegacy{}, - WantTarget: &tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "zero": { - Source: awsSingleFloat64Pointer{ - Field1: aws.Float64(0), - }, - Target: &tfSingleFloat64FieldLegacy{}, - WantTarget: &tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "null": { - Source: awsSingleFloat64Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat64FieldLegacy{}, - WantTarget: &tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - }, - - // For historical reasons, float32 can be flattened to Float64 values - "float32 to Float64": { - "value": { - Source: awsSingleFloat32Value{ - Field1: 42, - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Value](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "zero": { - Source: awsSingleFloat32Value{ - Field1: 0, - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Value](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - }, - - "*float32 to Float64": { - "value": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "zero": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(0), - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "null": { - Source: awsSingleFloat32Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat64Field{}, - WantTarget: &tfSingleFloat64Field{ - Field1: types.Float64Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - }, - - "legacy *float32 to Float64": { - "value": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - Target: &tfSingleFloat64FieldLegacy{}, - WantTarget: &tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "zero": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(0), - }, - Target: &tfSingleFloat64FieldLegacy{}, - WantTarget: &tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - "null": { - Source: awsSingleFloat32Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat64FieldLegacy{}, - WantTarget: &tfSingleFloat64FieldLegacy{ - Field1: types.Float64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float64]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenFloat32(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "float32 to Float32": { - "value": { - Source: awsSingleFloat32Value{ - Field1: 42, - }, - Target: &tfSingleFloat32Field{}, - WantTarget: &tfSingleFloat32Field{ - Field1: types.Float32Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Value](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "zero": { - Source: awsSingleFloat32Value{ - Field1: 0, - }, - Target: &tfSingleFloat32Field{}, - WantTarget: &tfSingleFloat32Field{ - Field1: types.Float32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Value](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - }, - - "*float32 to Float32": { - "value": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - Target: &tfSingleFloat32Field{}, - WantTarget: &tfSingleFloat32Field{ - Field1: types.Float32Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "zero": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(0), - }, - Target: &tfSingleFloat32Field{}, - WantTarget: &tfSingleFloat32Field{ - Field1: types.Float32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "null": { - Source: awsSingleFloat32Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat32Field{}, - WantTarget: &tfSingleFloat32Field{ - Field1: types.Float32Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - }, - - "legacy *float32 to Float32": { - "value": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(42), - }, - Target: &tfSingleFloat32FieldLegacy{}, - WantTarget: &tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "zero": { - Source: awsSingleFloat32Pointer{ - Field1: aws.Float32(0), - }, - Target: &tfSingleFloat32FieldLegacy{}, - WantTarget: &tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "null": { - Source: awsSingleFloat32Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat32FieldLegacy{}, - WantTarget: &tfSingleFloat32FieldLegacy{ - Field1: types.Float32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleFloat32Pointer](), reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat32Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*float32](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - }, - - // float64 cannot be flattened to Float32 - "float64 to Float32": { - "value": { - Source: awsSingleFloat64Value{ - Field1: 42, - }, - Target: &tfSingleFloat32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[float64](), reflect.TypeFor[types.Float32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Value](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float64](), "Field1", reflect.TypeFor[types.Float32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[float64](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "zero": { - Source: awsSingleFloat64Value{ - Field1: 0, - }, - Target: &tfSingleFloat32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[float64](), reflect.TypeFor[types.Float32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Value](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Value](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[float64](), "Field1", reflect.TypeFor[types.Float32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[float64](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - }, - - "*float64 to Float32": { - "value": { - Source: awsSingleFloat64Pointer{ - Field1: aws.Float64(42), - }, - Target: &tfSingleFloat32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[*float64](), reflect.TypeFor[types.Float32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "zero": { - Source: awsSingleFloat64Pointer{ - Field1: aws.Float64(0), - }, - Target: &tfSingleFloat32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[*float64](), reflect.TypeFor[types.Float32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - "null": { - Source: awsSingleFloat64Pointer{ - Field1: nil, - }, - Target: &tfSingleFloat32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[*float64](), reflect.TypeFor[types.Float32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - infoConverting(reflect.TypeFor[awsSingleFloat64Pointer](), reflect.TypeFor[*tfSingleFloat32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleFloat64Pointer](), "Field1", reflect.TypeFor[*tfSingleFloat32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[*float64](), "Field1", reflect.TypeFor[types.Float32]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenInt64(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "int64 to Int64": { - "value": { - Source: awsSingleInt64Value{ - Field1: 42, - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Value](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "zero": { - Source: awsSingleInt64Value{ - Field1: 0, - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Value](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - }, - - "*int64 to Int64": { - "value": { - Source: awsSingleInt64Pointer{ - Field1: aws.Int64(42), - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "zero": { - Source: awsSingleInt64Pointer{ - Field1: aws.Int64(0), - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "null": { - Source: awsSingleInt64Pointer{ - Field1: nil, - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - }, - - "legacy *int64 to Int64": { - "value": { - Source: awsSingleInt64Pointer{ - Field1: aws.Int64(42), - }, - Target: &tfSingleInt64FieldLegacy{}, - WantTarget: &tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "zero": { - Source: awsSingleInt64Pointer{ - Field1: aws.Int64(0), - }, - Target: &tfSingleInt64FieldLegacy{}, - WantTarget: &tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "null": { - Source: awsSingleInt64Pointer{ - Field1: nil, - }, - Target: &tfSingleInt64FieldLegacy{}, - WantTarget: &tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - }, - - // For historical reasons, int32 can be flattened to Int64 values - "int32 to Int64": { - "value": { - Source: awsSingleInt32Value{ - Field1: 42, - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Value](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "zero": { - Source: awsSingleInt32Value{ - Field1: 0, - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Value](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - }, - - "*int32 to Int64": { - "value": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "zero": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(0), - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "null": { - Source: awsSingleInt32Pointer{ - Field1: nil, - }, - Target: &tfSingleInt64Field{}, - WantTarget: &tfSingleInt64Field{ - Field1: types.Int64Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - }, - - "legacy *int32 to Int64": { - "value": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - Target: &tfSingleInt64FieldLegacy{}, - WantTarget: &tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "zero": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(0), - }, - Target: &tfSingleInt64FieldLegacy{}, - WantTarget: &tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - "null": { - Source: awsSingleInt32Pointer{ - Field1: nil, - }, - Target: &tfSingleInt64FieldLegacy{}, - WantTarget: &tfSingleInt64FieldLegacy{ - Field1: types.Int64Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt64FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int64]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenInt32(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "int32 to Int32": { - "value": { - Source: awsSingleInt32Value{ - Field1: 42, - }, - Target: &tfSingleInt32Field{}, - WantTarget: &tfSingleInt32Field{ - Field1: types.Int32Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Value](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "zero": { - Source: awsSingleInt32Value{ - Field1: 0, - }, - Target: &tfSingleInt32Field{}, - WantTarget: &tfSingleInt32Field{ - Field1: types.Int32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Value](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Value](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - }, - - "*int32 to Int32": { - "value": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - Target: &tfSingleInt32Field{}, - WantTarget: &tfSingleInt32Field{ - Field1: types.Int32Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "zero": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(0), - }, - Target: &tfSingleInt32Field{}, - WantTarget: &tfSingleInt32Field{ - Field1: types.Int32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "null": { - Source: awsSingleInt32Pointer{ - Field1: nil, - }, - Target: &tfSingleInt32Field{}, - WantTarget: &tfSingleInt32Field{ - Field1: types.Int32Null(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - }, - - "legacy *int32 to Int32": { - "value": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(42), - }, - Target: &tfSingleInt32FieldLegacy{}, - WantTarget: &tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(42), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "zero": { - Source: awsSingleInt32Pointer{ - Field1: aws.Int32(0), - }, - Target: &tfSingleInt32FieldLegacy{}, - WantTarget: &tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "null": { - Source: awsSingleInt32Pointer{ - Field1: nil, - }, - Target: &tfSingleInt32FieldLegacy{}, - WantTarget: &tfSingleInt32FieldLegacy{ - Field1: types.Int32Value(0), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleInt32Pointer](), reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt32Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32FieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*int32](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - }, - - // int64 cannot be flattened to Int32 - "int64 to Int32": { - "value": { - Source: awsSingleInt64Value{ - Field1: 42, - }, - Target: &tfSingleInt32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[int64](), reflect.TypeFor[types.Int32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Value](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "zero": { - Source: awsSingleInt64Value{ - Field1: 0, - }, - Target: &tfSingleInt32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[int64](), reflect.TypeFor[types.Int32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Value](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Value](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - }, - - "*int64 to Int32": { - "value": { - Source: awsSingleInt64Pointer{ - Field1: aws.Int64(42), - }, - Target: &tfSingleInt32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[*int64](), reflect.TypeFor[types.Int32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "zero": { - Source: awsSingleInt64Pointer{ - Field1: aws.Int64(0), - }, - Target: &tfSingleInt32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[*int64](), reflect.TypeFor[types.Int32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - "null": { - Source: awsSingleInt64Pointer{ - Field1: nil, - }, - Target: &tfSingleInt32Field{}, - expectedDiags: diag.Diagnostics{ - DiagFlatteningIncompatibleTypes(reflect.TypeFor[*int64](), reflect.TypeFor[types.Int32]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - infoConverting(reflect.TypeFor[awsSingleInt64Pointer](), reflect.TypeFor[*tfSingleInt32Field]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleInt64Pointer](), "Field1", reflect.TypeFor[*tfSingleInt32Field]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int32]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[*int64](), "Field1", reflect.TypeFor[types.Int32]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenString(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "string to String": { - "value": { - Source: awsSingleStringValue{ - Field1: "a", - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringValue("a"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "zero": { - Source: awsSingleStringValue{ - Field1: "", - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringValue(""), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "*string to String": { - "value": { - Source: awsSingleStringPointer{ - Field1: aws.String("a"), - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringValue("a"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "zero": { - Source: awsSingleStringPointer{ - Field1: aws.String(""), - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringValue(""), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "null": { - Source: awsSingleStringPointer{ - Field1: nil, - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "omitempty string to String": { - "value": { - Source: awsSingleStringValue{ - Field1: "a", - }, - Target: &tfSingleStringFieldOmitEmpty{}, - WantTarget: &tfSingleStringFieldOmitEmpty{ - Field1: types.StringValue("a"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "zero": { - Source: awsSingleStringValue{ - Field1: "", - }, - Target: &tfSingleStringFieldOmitEmpty{}, - WantTarget: &tfSingleStringFieldOmitEmpty{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "omitempty *string to String": { - "value": { - Source: awsSingleStringPointer{ - Field1: aws.String("a"), - }, - Target: &tfSingleStringFieldOmitEmpty{}, - WantTarget: &tfSingleStringFieldOmitEmpty{ - Field1: types.StringValue("a"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "zero": { - Source: awsSingleStringPointer{ - Field1: aws.String(""), - }, - Target: &tfSingleStringFieldOmitEmpty{}, - WantTarget: &tfSingleStringFieldOmitEmpty{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "null": { - Source: awsSingleStringPointer{ - Field1: nil, - }, - Target: &tfSingleStringFieldOmitEmpty{}, - WantTarget: &tfSingleStringFieldOmitEmpty{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldOmitEmpty]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "legacy *string to String": { - "value": { - Source: awsSingleStringPointer{ - Field1: aws.String("a"), - }, - Target: &tfSingleStringFieldLegacy{}, - WantTarget: &tfSingleStringFieldLegacy{ - Field1: types.StringValue("a"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "zero": { - Source: awsSingleStringPointer{ - Field1: aws.String(""), - }, - Target: &tfSingleStringFieldLegacy{}, - WantTarget: &tfSingleStringFieldLegacy{ - Field1: types.StringValue(""), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - "null": { - Source: awsSingleStringPointer{ - Field1: nil, - }, - Target: &tfSingleStringFieldLegacy{}, - WantTarget: &tfSingleStringFieldLegacy{ - Field1: types.StringValue(""), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldLegacy]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*string](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenTopLevelStringPtr(t *testing.T) { - t.Parallel() - - testCases := toplevelTestCases[*string, types.String]{ - "value": { - source: aws.String("value"), - expectedValue: types.StringValue("value"), - expectedDiags: diag.Diagnostics{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*string](), reflect.TypeFor[*types.String]()), - infoConverting(reflect.TypeFor[*string](), reflect.TypeFor[types.String]()), - }, - }, - - "empty": { - source: aws.String(""), - expectedValue: types.StringValue(""), - expectedDiags: diag.Diagnostics{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*string](), reflect.TypeFor[*types.String]()), - infoConverting(reflect.TypeFor[*string](), reflect.TypeFor[types.String]()), - }, - }, - - "nil": { - source: nil, - expectedValue: types.StringNull(), - expectedDiags: diag.Diagnostics{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*string](), reflect.TypeFor[*types.String]()), - infoConverting(reflect.TypeFor[*string](), reflect.TypeFor[types.String]()), - }, - }, - } - - runTopLevelTestCases(t, testCases) -} - -func TestFlattenTopLevelInt64Ptr(t *testing.T) { - t.Parallel() - - testCases := toplevelTestCases[*int64, types.Int64]{ - "value": { - source: aws.Int64(42), - expectedValue: types.Int64Value(42), - expectedDiags: diag.Diagnostics{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*int64](), reflect.TypeFor[*types.Int64]()), - infoConverting(reflect.TypeFor[*int64](), reflect.TypeFor[types.Int64]()), - }, - }, - - "empty": { - source: aws.Int64(0), - expectedValue: types.Int64Value(0), - expectedDiags: diag.Diagnostics{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*int64](), reflect.TypeFor[*types.Int64]()), - infoConverting(reflect.TypeFor[*int64](), reflect.TypeFor[types.Int64]()), - }, - }, - - "nil": { - source: nil, - expectedValue: types.Int64Null(), - expectedDiags: diag.Diagnostics{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*int64](), reflect.TypeFor[*types.Int64]()), - infoConverting(reflect.TypeFor[*int64](), reflect.TypeFor[types.Int64]()), - }, - }, - } - - runTopLevelTestCases(t, testCases) -} - -func TestFlattenSimpleNestedBlockWithStringEnum(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` - } - type aws01 struct { - Field1 int64 - Field2 testEnum - } - - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &aws01{ - Field1: 1, - Field2: testEnumList, - }, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.Int64Value(1), - Field2: fwtypes.StringEnumValue(testEnumList), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[testEnum](), "Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]]()), - }, - }, - "single nested empty value": { - Source: &aws01{ - Field1: 1, - Field2: "", - }, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.Int64Value(1), - Field2: fwtypes.StringEnumNull[testEnum](), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[testEnum](), "Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenComplexNestedBlockWithStringEnum(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` - } - type tf02 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` - } - type aws02 struct { - Field2 testEnum - } - type aws01 struct { - Field1 int64 - Field2 *aws02 - } - - ctx := context.Background() - var zero fwtypes.StringEnum[testEnum] - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &aws01{ - Field1: 1, - Field2: &aws02{ - Field2: testEnumList, - }, - }, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: types.Int64Value(1), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ - Field2: fwtypes.StringEnumValue(testEnumList), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*aws02](), "Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field2", "Field2", reflect.TypeFor[aws02](), "Field2", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field2", reflect.TypeFor[testEnum](), "Field2.Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]]()), - }, - }, - "single nested empty value": { - Source: &aws01{ - Field1: 1, - Field2: &aws02{Field2: ""}, - }, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: types.Int64Value(1), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ - Field2: fwtypes.StringEnumNull[testEnum](), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*aws02](), "Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field2", "Field2", reflect.TypeFor[aws02](), "Field2", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field2", reflect.TypeFor[testEnum](), "Field2.Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]]()), - }, - }, - "single nested zero value": { - Source: &aws01{ - Field1: 1, - Field2: &aws02{ - Field2: ""}, - }, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: types.Int64Value(1), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ - Field2: zero, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*aws02](), "Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field2", "Field2", reflect.TypeFor[aws02](), "Field2", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field2", reflect.TypeFor[testEnum](), "Field2.Field2", reflect.TypeFor[fwtypes.StringEnum[testEnum]]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenSimpleSingleNestedBlock(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.String `tfsdk:"field1"` - Field2 types.Int64 `tfsdk:"field2"` - } - type aws01 struct { - Field1 *string - Field2 int64 - } - - type tf02 struct { - Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` - } - type aws02 struct { - Field1 *aws01 - } - type aws03 struct { - Field1 aws01 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested block pointer": { - Source: &aws02{ - Field1: &aws01{ - Field1: aws.String("a"), - Field2: 1, - }, - }, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{ - Field1: types.StringValue("a"), - Field2: types.Int64Value(1), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws02](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws02](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws02](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*aws01](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[aws01](), "Field1", "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[*string](), "Field1.Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1", "Field2", reflect.TypeFor[aws01](), "Field1", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1.Field2", reflect.TypeFor[int64](), "Field1.Field2", reflect.TypeFor[types.Int64]()), - }, - }, - "single nested block nil": { - Source: &aws02{}, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: fwtypes.NewObjectValueOfNull[tf01](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws02](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws02](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws02](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*aws01](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]]()), - }, - }, - "single nested block value": { - Source: &aws03{ - Field1: aws01{ - Field1: aws.String("a"), - Field2: 1}, - }, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{ - Field1: types.StringValue("a"), - Field2: types.Int64Value(1), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws03](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws03](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws03](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[aws01](), "Field1", "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[*string](), "Field1.Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1", "Field2", reflect.TypeFor[aws01](), "Field1", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1.Field2", reflect.TypeFor[int64](), "Field1.Field2", reflect.TypeFor[types.Int64]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenComplexSingleNestedBlock(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Bool `tfsdk:"field1"` - Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` - } - type aws01 struct { - Field1 bool - Field2 []string - } - - type tf02 struct { - Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` - } - type aws02 struct { - Field1 *aws01 - } - - type tf03 struct { - Field1 fwtypes.ObjectValueOf[tf02] `tfsdk:"field1"` - } - type aws03 struct { - Field1 *aws02 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested block pointer": { - Source: &aws03{ - Field1: &aws02{ - Field1: &aws01{ - Field1: true, - Field2: []string{"a", "b"}, - }, - }, - }, - Target: &tf03{}, - WantTarget: &tf03{ - Field1: fwtypes.NewObjectValueOfMust[tf02](ctx, &tf02{ - Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{ - Field1: types.BoolValue(true), - Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - }), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws03](), reflect.TypeFor[*tf03]()), - infoConverting(reflect.TypeFor[aws03](), reflect.TypeFor[*tf03]()), - traceMatchedFields("Field1", reflect.TypeFor[aws03](), "Field1", reflect.TypeFor[*tf03]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*aws02](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf02]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[aws02](), "Field1", "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[*aws01](), "Field1.Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field1.Field1", "Field1", reflect.TypeFor[aws01](), "Field1.Field1", "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1.Field1.Field1", reflect.TypeFor[bool](), "Field1.Field1.Field1", reflect.TypeFor[types.Bool]()), - traceMatchedFieldsWithPath("Field1.Field1", "Field2", reflect.TypeFor[aws01](), "Field1.Field1", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1.Field1.Field2", reflect.TypeFor[[]string](), "Field1.Field1.Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - traceFlatteningWithListValue("Field1.Field1.Field2", reflect.TypeFor[[]string](), 2, "Field1.Field1.Field2", reflect.TypeFor[fwtypes.ListValueOf[types.String]]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenSimpleNestedBlockWithFloat32(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 types.Float64 `tfsdk:"field2"` - } - type aws01 struct { - Field1 int64 - Field2 *float32 - } - - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &aws01{Field1: 1, Field2: aws.Float32(0.01)}, - Target: &tf01{}, - WantTarget: &tf01{Field1: types.Int64Value(1), Field2: types.Float64Value(0.01)}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*float32](), "Field2", reflect.TypeFor[types.Float64]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenComplexNestedBlockWithFloat32(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Float64 `tfsdk:"field1"` - Field2 types.Float64 `tfsdk:"field2"` - } - type tf02 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` - } - type aws02 struct { - Field1 float32 - Field2 *float32 - } - type aws01 struct { - Field1 int64 - Field2 *aws02 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &aws01{ - Field1: 1, - Field2: &aws02{ - Field1: 1.11, - Field2: aws.Float32(-2.22), - }, - }, - Target: &tf02{}, - WantTarget: &tf02{ - Field1: types.Int64Value(1), - Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ - Field1: types.Float64Value(1.11), - Field2: types.Float64Value(-2.22), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*aws02](), "Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field2", "Field1", reflect.TypeFor[aws02](), "Field2", "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field1", reflect.TypeFor[float32](), "Field2.Field1", reflect.TypeFor[types.Float64]()), - traceMatchedFieldsWithPath("Field2", "Field2", reflect.TypeFor[aws02](), "Field2", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field2", reflect.TypeFor[*float32](), "Field2.Field2", reflect.TypeFor[types.Float64]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenSimpleNestedBlockWithFloat64(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 types.Float64 `tfsdk:"field2"` - } - type aws01 struct { - Field1 int64 - Field2 *float64 - } - - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &aws01{ - Field1: 1, - Field2: aws.Float64(0.01), - }, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.Int64Value(1), - Field2: types.Float64Value(0.01), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*float64](), "Field2", reflect.TypeFor[types.Float64]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenComplexNestedBlockWithFloat64(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Float64 `tfsdk:"field1"` - Field2 types.Float64 `tfsdk:"field2"` - } - type tf02 struct { - Field1 types.Int64 `tfsdk:"field1"` - Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` - } - type aws02 struct { - Field1 float64 - Field2 *float64 - } - type aws01 struct { - Field1 int64 - Field2 *aws02 - } - - ctx := context.Background() - testCases := autoFlexTestCases{ - "single nested valid value": { - Source: &aws01{ - Field1: 1, - Field2: &aws02{ - Field1: 1.11, - Field2: aws.Float64(-2.22), - }, - }, - Target: &tf02{}, - WantTarget: &tf02{Field1: types.Int64Value(1), Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{Field1: types.Float64Value(1.11), Field2: types.Float64Value(-2.22)})}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf02]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf02]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field1", reflect.TypeFor[int64](), "Field1", reflect.TypeFor[types.Int64]()), - traceMatchedFields("Field2", reflect.TypeFor[aws01](), "Field2", reflect.TypeFor[*tf02]()), - infoConvertingWithPath("Field2", reflect.TypeFor[*aws02](), "Field2", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tf01]]()), - traceMatchedFieldsWithPath("Field2", "Field1", reflect.TypeFor[aws02](), "Field2", "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field1", reflect.TypeFor[float64](), "Field2.Field1", reflect.TypeFor[types.Float64]()), - traceMatchedFieldsWithPath("Field2", "Field2", reflect.TypeFor[aws02](), "Field2", "Field2", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field2.Field2", reflect.TypeFor[*float64](), "Field2.Field2", reflect.TypeFor[types.Float64]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenObjectValueField(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "*struct to ObjectValue": { - "nil": { - Source: awsNestedObjectPointer{}, - Target: &tfObjectValue[tfSingleStringField]{}, - WantTarget: &tfObjectValue[tfSingleStringField]{ - Field1: fwtypes.NewObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfObjectValue[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfObjectValue[tfSingleStringField]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfObjectValue[tfSingleStringField]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfSingleStringField]]()), - }, - }, - "value": { - Source: awsNestedObjectPointer{ - Field1: &awsSingleStringValue{ - Field1: "a", - }, - }, - Target: &tfObjectValue[tfSingleStringField]{}, - WantTarget: &tfObjectValue[tfSingleStringField]{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfObjectValue[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfObjectValue[tfSingleStringField]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfObjectValue[tfSingleStringField]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[string](), "Field1.Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenListOfNestedObjectField(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "*struct to ListNestedObject": { - "nil": { - Source: awsNestedObjectPointer{}, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "value": { - Source: awsNestedObjectPointer{ - Field1: &awsSingleStringValue{ - Field1: "a", - }, - }, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[string](), "Field1.Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "legacy *struct to ListNestedObject": { - "nil": { - Source: awsNestedObjectPointer{}, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "value": { - Source: awsNestedObjectPointer{ - Field1: &awsSingleStringValue{ - Field1: "a", - }, - }, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[string](), "Field1.Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "[]struct to ListNestedObject": { - "nil": { - Source: awsSliceOfNestedObjectValues{}, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningWithNullValue("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{}, - }, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "legacy []struct to ListNestedObject": { - "nil": { - Source: awsSliceOfNestedObjectValues{}, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{}, - }, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "[]*struct to ListNestedObject": { - "nil": { - Source: awsSliceOfNestedObjectPointers{}, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningWithNullValue("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{}, - }, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfListOfNestedObject{}, - WantTarget: &tfListOfNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfListOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "legacy []*struct to ListNestedObject": { - "nil": { - Source: awsSliceOfNestedObjectPointers{}, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{}, - }, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfListOfNestedObjectLegacy{}, - WantTarget: &tfListOfNestedObjectLegacy{ - Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfListOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenTopLevelListOfNestedObject(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]toplevelTestCase[[]awsSingleStringValue, fwtypes.ListNestedObjectValueOf[tfSingleStringField]]{ - "values": { - source: []awsSingleStringValue{ - { - Field1: "value1", - }, - { - Field1: "value2", - }, - }, - expectedValue: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[[]awsSingleStringValue](), reflect.TypeFor[*fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[[]awsSingleStringValue](), reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("", reflect.TypeFor[[]awsSingleStringValue](), 2, "", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("[0].Field1", reflect.TypeFor[string](), "[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("[1].Field1", reflect.TypeFor[string](), "[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - - "empty": { - source: []awsSingleStringValue{}, - expectedValue: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[[]awsSingleStringValue](), reflect.TypeFor[*fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[[]awsSingleStringValue](), reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("", reflect.TypeFor[[]awsSingleStringValue](), 0, "", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - - "null": { - source: nil, - expectedValue: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[[]awsSingleStringValue](), reflect.TypeFor[*fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[[]awsSingleStringValue](), reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningWithNullValue("", reflect.TypeFor[[]awsSingleStringValue](), "", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - } - - runTopLevelTestCases(t, testCases) -} - -func TestFlattenSetOfNestedObjectField(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "*struct to SetNestedObject": { - "nil": { - Source: awsNestedObjectPointer{}, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "value": { - Source: awsNestedObjectPointer{ - Field1: &awsSingleStringValue{Field1: "a"}, - }, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ - Field1: types.StringValue("a"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsNestedObjectPointer](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsNestedObjectPointer](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1.Field1", reflect.TypeFor[string](), "Field1.Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "[]struct to SetNestedObject": { - "nil": { - Source: &awsSliceOfNestedObjectValues{}, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningWithNullValue("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: &awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{}, - }, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: &awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "legacy []struct to SetNestedObject": { - "nil": { - Source: &awsSliceOfNestedObjectValues{}, - Target: &tfSetOfNestedObjectLegacy{}, - WantTarget: &tfSetOfNestedObjectLegacy{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: &awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{}, - }, - Target: &tfSetOfNestedObjectLegacy{}, - WantTarget: &tfSetOfNestedObjectLegacy{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: &awsSliceOfNestedObjectValues{ - Field1: []awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfSetOfNestedObjectLegacy{}, - WantTarget: &tfSetOfNestedObjectLegacy{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectValues](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectValues](), "Field1", reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "[]*struct to SetNestedObject": { - "nil": { - Source: &awsSliceOfNestedObjectPointers{}, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningWithNullValue("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: &awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{}, - }, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: &awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfSetOfNestedObject{}, - WantTarget: &tfSetOfNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObject]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfSetOfNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - - "legacy []*struct to SetNestedObject": { - "nil": { - Source: &awsSliceOfNestedObjectPointers{}, - Target: &tfSetOfNestedObjectLegacy{}, - WantTarget: &tfSetOfNestedObjectLegacy{ - Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "empty": { - Source: &awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{}, - }, - Target: &tfSetOfNestedObjectLegacy{}, - WantTarget: &tfSetOfNestedObjectLegacy{ - Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "values": { - Source: &awsSliceOfNestedObjectPointers{ - Field1: []*awsSingleStringValue{ - {Field1: "a"}, - {Field1: "b"}, - }, - }, - Target: &tfSetOfNestedObjectLegacy{}, - WantTarget: &tfSetOfNestedObjectLegacy{ - Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ - {Field1: types.StringValue("a")}, - {Field1: types.StringValue("b")}, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConverting(reflect.TypeFor[awsSliceOfNestedObjectPointers](), reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfNestedObjectPointers](), "Field1", reflect.TypeFor[*tfSetOfNestedObjectLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]*awsSingleStringValue](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsSingleStringValue](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfSingleStringField]]()), - traceMatchedFieldsWithPath("Field1[0]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[0]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[0].Field1", reflect.TypeFor[string](), "Field1[0].Field1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("Field1[1]", "Field1", reflect.TypeFor[awsSingleStringValue](), "Field1[1]", "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1[1].Field1", reflect.TypeFor[string](), "Field1[1].Field1", reflect.TypeFor[types.String]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenMapBlock(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "nil map block key": { - Source: &awsMapBlockValues{ - MapBlock: nil, - }, - Target: &tfMapBlockList{}, - WantTarget: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfNull[tfMapBlockElement](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockValues](), reflect.TypeFor[*tfMapBlockList]()), - infoConverting(reflect.TypeFor[awsMapBlockValues](), reflect.TypeFor[*tfMapBlockList]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockValues](), "MapBlock", reflect.TypeFor[*tfMapBlockList]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - traceFlatteningNullValue("MapBlock", reflect.TypeFor[map[string]awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - }, - }, - "map block key list": { - Source: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - }, - }, - Target: &tfMapBlockList{}, - WantTarget: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockValues](), reflect.TypeFor[*tfMapBlockList]()), - infoConverting(reflect.TypeFor[awsMapBlockValues](), reflect.TypeFor[*tfMapBlockList]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockValues](), "MapBlock", reflect.TypeFor[*tfMapBlockList]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr1", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr1", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr1", reflect.TypeFor[string](), "MapBlock[0].Attr1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr2", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr2", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr2", reflect.TypeFor[string](), "MapBlock[0].Attr2", reflect.TypeFor[types.String]()), - }, - }, - "map block key set": { - Source: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - }, - }, - Target: &tfMapBlockSet{}, - WantTarget: &tfMapBlockSet{ - MapBlock: fwtypes.NewSetNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockValues](), reflect.TypeFor[*tfMapBlockSet]()), - infoConverting(reflect.TypeFor[awsMapBlockValues](), reflect.TypeFor[*tfMapBlockSet]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockValues](), "MapBlock", reflect.TypeFor[*tfMapBlockSet]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfMapBlockElement]]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr1", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr1", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr1", reflect.TypeFor[string](), "MapBlock[0].Attr1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr2", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr2", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr2", reflect.TypeFor[string](), "MapBlock[0].Attr2", reflect.TypeFor[types.String]()), - }, - }, - "nil map block key ptr": { - Source: &awsMapBlockPointers{ - MapBlock: nil, - }, - Target: &tfMapBlockList{}, - WantTarget: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfNull[tfMapBlockElement](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockPointers](), reflect.TypeFor[*tfMapBlockList]()), - infoConverting(reflect.TypeFor[awsMapBlockPointers](), reflect.TypeFor[*tfMapBlockList]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockPointers](), "MapBlock", reflect.TypeFor[*tfMapBlockList]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]*awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - traceFlatteningNullValue("MapBlock", reflect.TypeFor[map[string]*awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - }, - }, - "map block key ptr source": { - Source: &awsMapBlockPointers{ - MapBlock: map[string]*awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - }, - }, - Target: &tfMapBlockList{}, - WantTarget: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockPointers](), reflect.TypeFor[*tfMapBlockList]()), - infoConverting(reflect.TypeFor[awsMapBlockPointers](), reflect.TypeFor[*tfMapBlockList]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockPointers](), "MapBlock", reflect.TypeFor[*tfMapBlockList]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]*awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr1", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr1", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr1", reflect.TypeFor[string](), "MapBlock[0].Attr1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr2", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr2", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr2", reflect.TypeFor[string](), "MapBlock[0].Attr2", reflect.TypeFor[types.String]()), - }, - }, - "map block key ptr both": { - Source: &awsMapBlockPointers{ - MapBlock: map[string]*awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - }, - }, - Target: &tfMapBlockList{}, - WantTarget: &tfMapBlockList{ - MapBlock: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfMapBlockElement{ - { - MapBlockKey: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockPointers](), reflect.TypeFor[*tfMapBlockList]()), - infoConverting(reflect.TypeFor[awsMapBlockPointers](), reflect.TypeFor[*tfMapBlockList]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockPointers](), "MapBlock", reflect.TypeFor[*tfMapBlockList]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]*awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElement]]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr1", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr1", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr1", reflect.TypeFor[string](), "MapBlock[0].Attr1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("MapBlock[\"x\"]", "Attr2", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr2", reflect.TypeFor[*tfMapBlockElement]()), - infoConvertingWithPath("MapBlock[\"x\"].Attr2", reflect.TypeFor[string](), "MapBlock[0].Attr2", reflect.TypeFor[types.String]()), - }, - }, - "map block enum key": { - Source: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - string(testEnumList): { - Attr1: "a", - Attr2: "b", - }, - }, - }, - Target: &tfMapBlockListEnumKey{}, - WantTarget: &tfMapBlockListEnumKey{ - MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElementEnumKey](ctx, []tfMapBlockElementEnumKey{ - { - MapBlockKey: fwtypes.StringEnumValue(testEnumList), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockValues](), reflect.TypeFor[*tfMapBlockListEnumKey]()), - infoConverting(reflect.TypeFor[awsMapBlockValues](), reflect.TypeFor[*tfMapBlockListEnumKey]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockValues](), "MapBlock", reflect.TypeFor[*tfMapBlockListEnumKey]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElementEnumKey]]()), - traceMatchedFieldsWithPath("MapBlock[\"List\"]", "Attr1", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr1", reflect.TypeFor[*tfMapBlockElementEnumKey]()), - infoConvertingWithPath("MapBlock[\"List\"].Attr1", reflect.TypeFor[string](), "MapBlock[0].Attr1", reflect.TypeFor[types.String]()), - traceMatchedFieldsWithPath("MapBlock[\"List\"]", "Attr2", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", "Attr2", reflect.TypeFor[*tfMapBlockElementEnumKey]()), - infoConvertingWithPath("MapBlock[\"List\"].Attr2", reflect.TypeFor[string](), "MapBlock[0].Attr2", reflect.TypeFor[types.String]()), - }, - }, - - "map block list no key": { - Source: &awsMapBlockValues{ - MapBlock: map[string]awsMapBlockElement{ - "x": { - Attr1: "a", - Attr2: "b", - }, - }, - }, - Target: &tfMapBlockListNoKey{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningNoMapBlockKey(reflect.TypeFor[tfMapBlockElementNoKey]()), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsMapBlockValues](), reflect.TypeFor[*tfMapBlockListNoKey]()), - infoConverting(reflect.TypeFor[awsMapBlockValues](), reflect.TypeFor[*tfMapBlockListNoKey]()), - traceMatchedFields("MapBlock", reflect.TypeFor[awsMapBlockValues](), "MapBlock", reflect.TypeFor[*tfMapBlockListNoKey]()), - infoConvertingWithPath("MapBlock", reflect.TypeFor[map[string]awsMapBlockElement](), "MapBlock", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfMapBlockElementNoKey]]()), - errorTargetHasNoMapBlockKey("MapBlock[\"x\"]", reflect.TypeFor[awsMapBlockElement](), "MapBlock[0]", reflect.TypeFor[tfMapBlockElementNoKey]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenSimpleListOfPrimitiveValues(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "regular": { - "values": { - Source: awsSimpleStringValueSlice{ - Field1: []string{"a", "b"}, - }, - Target: &tfSimpleList{}, - WantTarget: &tfSimpleList{ - Field1: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleList]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleList]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleList]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]string](), 2, "Field1", reflect.TypeFor[types.List]()), - }, - }, - - "empty": { - Source: awsSimpleStringValueSlice{ - Field1: []string{}, - }, - Target: &tfSimpleList{}, - WantTarget: &tfSimpleList{ - Field1: types.ListValueMust(types.StringType, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleList]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleList]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleList]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]string](), 0, "Field1", reflect.TypeFor[types.List]()), - }, - }, - - "null": { - Source: awsSimpleStringValueSlice{ - Field1: nil, - }, - Target: &tfSimpleList{}, - WantTarget: &tfSimpleList{ - Field1: types.ListNull(types.StringType), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleList]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleList]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleList]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListNull("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - }, - }, - }, - - "legacy": { - "values": { - Source: awsSimpleStringValueSlice{ - Field1: []string{"a", "b"}, - }, - Target: &tfSimpleListLegacy{}, - WantTarget: &tfSimpleListLegacy{ - Field1: types.ListValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleListLegacy]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleListLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleListLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]string](), 2, "Field1", reflect.TypeFor[types.List]()), - }, - }, - - "empty": { - Source: awsSimpleStringValueSlice{ - Field1: []string{}, - }, - Target: &tfSimpleListLegacy{}, - WantTarget: &tfSimpleListLegacy{ - Field1: types.ListValueMust(types.StringType, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleListLegacy]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleListLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleListLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]string](), 0, "Field1", reflect.TypeFor[types.List]()), - }, - }, - - "null": { - Source: awsSimpleStringValueSlice{ - Field1: nil, - }, - Target: &tfSimpleListLegacy{}, - WantTarget: &tfSimpleListLegacy{ - Field1: types.ListValueMust(types.StringType, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleListLegacy]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleListLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleListLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.List]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenSimpleSetOfPrimitiveValues(t *testing.T) { - t.Parallel() - - testCases := map[string]autoFlexTestCases{ - "regular": { - "values": { - Source: awsSimpleStringValueSlice{ - Field1: []string{"a", "b"}, - }, - Target: &tfSimpleSet{}, - WantTarget: &tfSimpleSet{ - Field1: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSet]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSet]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleSet]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetValue("Field1", reflect.TypeFor[[]string](), 2, "Field1", reflect.TypeFor[types.Set]()), - }, - }, - - "empty": { - Source: awsSimpleStringValueSlice{ - Field1: []string{}, - }, - Target: &tfSimpleSet{}, - WantTarget: &tfSimpleSet{ - Field1: types.SetValueMust(types.StringType, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSet]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSet]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleSet]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetValue("Field1", reflect.TypeFor[[]string](), 0, "Field1", reflect.TypeFor[types.Set]()), - }, - }, - - "null": { - Source: awsSimpleStringValueSlice{ - Field1: nil, - }, - Target: &tfSimpleSet{}, - WantTarget: &tfSimpleSet{ - Field1: types.SetNull(types.StringType), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSet]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSet]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleSet]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetNull("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - }, - }, - }, - - "legacy": { - "values": { - Source: awsSimpleStringValueSlice{ - Field1: []string{"a", "b"}, - }, - Target: &tfSimpleSetLegacy{}, - WantTarget: &tfSimpleSetLegacy{ - Field1: types.SetValueMust(types.StringType, []attr.Value{ - types.StringValue("a"), - types.StringValue("b"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSetLegacy]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSetLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleSetLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetValue("Field1", reflect.TypeFor[[]string](), 2, "Field1", reflect.TypeFor[types.Set]()), - }, - }, - - "empty": { - Source: awsSimpleStringValueSlice{ - Field1: []string{}, - }, - Target: &tfSimpleSetLegacy{}, - WantTarget: &tfSimpleSetLegacy{ - Field1: types.SetValueMust(types.StringType, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSetLegacy]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSetLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleSetLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - traceFlatteningWithSetValue("Field1", reflect.TypeFor[[]string](), 0, "Field1", reflect.TypeFor[types.Set]()), - }, - }, - - "null": { - Source: awsSimpleStringValueSlice{ - Field1: nil, - }, - Target: &tfSimpleSetLegacy{}, - WantTarget: &tfSimpleSetLegacy{ - Field1: types.SetValueMust(types.StringType, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSetLegacy]()), - infoConverting(reflect.TypeFor[awsSimpleStringValueSlice](), reflect.TypeFor[*tfSimpleSetLegacy]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSimpleStringValueSlice](), "Field1", reflect.TypeFor[*tfSimpleSetLegacy]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - debugUsingLegacyFlattener("Field1", reflect.TypeFor[[]string](), "Field1", reflect.TypeFor[types.Set]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenOptions(t *testing.T) { - t.Parallel() - - type tf01 struct { - Field1 types.Bool `tfsdk:"field1"` - Tags fwtypes.MapValueOf[types.String] `tfsdk:"tags"` - } - type aws01 struct { - Field1 bool - Tags map[string]string - } - - // For test cases below where a field of `MapValue` type is ignored, the - // result of `cmp.Diff` is intentionally not checked. - // - // When a target contains an ignored field of a `MapValue` type, the resulting - // target will contain a zero value, which, because the `elementType` is nil, will - // always return `false` from the `Equal` method, even when compared with another - // zero value. In practice, this zeroed `MapValue` would be overwritten - // by a subsequent step (ie. transparent tagging), and the temporary invalid - // state of the zeroed `MapValue` will not appear in the final state. - // - // Example expected diff: - // unexpected diff (+wanted, -got): &flex.tf01{ - // Field1: s"false", - // - Tags: types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/types.String]{}, - // + Tags: types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/types.String]{MapValue: types.Map{elementType: basetypes.StringType{}}}, - // } - ctx := context.Background() - testCases := autoFlexTestCases{ - "empty source with tags": { - Source: &aws01{}, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.BoolValue(false), - Tags: fwtypes.NewMapValueOfNull[types.String](ctx), - }, - WantDiff: true, // Ignored MapValue type, expect diff - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[bool](), "Field1", reflect.TypeFor[types.Bool]()), - traceSkipIgnoredSourceField(reflect.TypeFor[aws01](), "Tags", reflect.TypeFor[*tf01]()), - }, - }, - "ignore tags by default": { - Source: &aws01{ - Field1: true, - Tags: map[string]string{"foo": "bar"}, - }, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.BoolValue(true), - Tags: fwtypes.NewMapValueOfNull[types.String](ctx), - }, - WantDiff: true, // Ignored MapValue type, expect diff - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[bool](), "Field1", reflect.TypeFor[types.Bool]()), - traceSkipIgnoredSourceField(reflect.TypeFor[aws01](), "Tags", reflect.TypeFor[*tf01]()), - }, - }, - "include tags with option override": { - Options: []AutoFlexOptionsFunc{WithNoIgnoredFieldNames()}, - Source: &aws01{ - Field1: true, - Tags: map[string]string{"foo": "bar"}, - }, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.BoolValue(true), - Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ - "foo": types.StringValue("bar"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceMatchedFields("Field1", reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Field1", reflect.TypeFor[bool](), "Field1", reflect.TypeFor[types.Bool]()), - traceMatchedFields("Tags", reflect.TypeFor[aws01](), "Tags", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Tags", reflect.TypeFor[map[string]string](), "Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("Tags", reflect.TypeFor[map[string]string](), 1, "Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - "ignore custom field": { - Options: []AutoFlexOptionsFunc{WithIgnoredFieldNames([]string{"Field1"})}, - Source: &aws01{ - Field1: true, - Tags: map[string]string{"foo": "bar"}, - }, - Target: &tf01{}, - WantTarget: &tf01{ - Field1: types.BoolNull(), - Tags: fwtypes.NewMapValueOfMust[types.String]( - ctx, - map[string]attr.Value{ - "foo": types.StringValue("bar"), - }, - ), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*aws01](), reflect.TypeFor[*tf01]()), - infoConverting(reflect.TypeFor[aws01](), reflect.TypeFor[*tf01]()), - traceSkipIgnoredSourceField(reflect.TypeFor[aws01](), "Field1", reflect.TypeFor[*tf01]()), - traceMatchedFields("Tags", reflect.TypeFor[aws01](), "Tags", reflect.TypeFor[*tf01]()), - infoConvertingWithPath("Tags", reflect.TypeFor[map[string]string](), "Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - traceFlatteningWithMapValue("Tags", reflect.TypeFor[map[string]string](), 1, "Tags", reflect.TypeFor[fwtypes.MapValueOf[types.String]]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenIgnoreStructTag(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "from value": { - Source: awsSingleStringValue{ - Field1: "value1", - }, - Target: &tfSingleStringFieldIgnore{}, - WantTarget: &tfSingleStringFieldIgnore{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringFieldIgnore]()), - infoConverting(reflect.TypeFor[awsSingleStringValue](), reflect.TypeFor[*tfSingleStringFieldIgnore]()), - traceSkipIgnoredTargetField(reflect.TypeFor[awsSingleStringValue](), "Field1", reflect.TypeFor[*tfSingleStringFieldIgnore](), "Field1"), - }, - }, - "from pointer": { - Source: awsSingleStringPointer{ - Field1: aws.String("value1"), - }, - Target: &tfSingleStringFieldIgnore{}, - WantTarget: &tfSingleStringFieldIgnore{}, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldIgnore]()), - infoConverting(reflect.TypeFor[awsSingleStringPointer](), reflect.TypeFor[*tfSingleStringFieldIgnore]()), - traceSkipIgnoredTargetField(reflect.TypeFor[awsSingleStringPointer](), "Field1", reflect.TypeFor[*tfSingleStringFieldIgnore](), "Field1"), - }, - }, - } - - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenInterfaceToStringTypable(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "json interface Source string Target": { - Source: &awsJSONStringer{ - Field1: &testJSONDocument{ - Value: &struct { - Test string `json:"test"` - }{ - Test: "a", - }, - }, - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringValue(`{"test":"a"}`), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsJSONStringer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsJSONStringer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsJSONStringer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), - // infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[testJSONDocument](), "Field1", reflect.TypeFor[types.String]()), - infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), // TODO: fix source type - }, - }, - "null json interface Source string Target": { - Source: &awsJSONStringer{ - Field1: nil, - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsJSONStringer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsJSONStringer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsJSONStringer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), - // infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[testJSONDocument](), "Field1", reflect.TypeFor[types.String]()), - infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), // TODO: fix source type - traceFlatteningNullValue("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - - "json interface Source JSONValue Target": { - Source: &awsJSONStringer{ - Field1: &testJSONDocument{ - Value: &struct { - Test string `json:"test"` - }{ - Test: "a", - }, - }, - }, - Target: &tfJSONStringer{}, - WantTarget: &tfJSONStringer{ - Field1: fwtypes.SmithyJSONValue(`{"test":"a"}`, newTestJSONDocument), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsJSONStringer](), reflect.TypeFor[*tfJSONStringer]()), - infoConverting(reflect.TypeFor[awsJSONStringer](), reflect.TypeFor[*tfJSONStringer]()), - traceMatchedFields("Field1", reflect.TypeFor[awsJSONStringer](), "Field1", reflect.TypeFor[*tfJSONStringer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), - // infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[testJSONDocument](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), - infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), // TODO: fix source type - }, - }, - "null json interface Source JSONValue Target": { - Source: &awsJSONStringer{ - Field1: nil, - }, - Target: &tfJSONStringer{}, - WantTarget: &tfJSONStringer{ - Field1: fwtypes.SmithyJSONNull[smithyjson.JSONStringer](), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsJSONStringer](), reflect.TypeFor[*tfJSONStringer]()), - infoConverting(reflect.TypeFor[awsJSONStringer](), reflect.TypeFor[*tfJSONStringer]()), - traceMatchedFields("Field1", reflect.TypeFor[awsJSONStringer](), "Field1", reflect.TypeFor[*tfJSONStringer]()), - infoConvertingWithPath("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), - // infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[testJSONDocument](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), - infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), // TODO: fix source type - traceFlatteningNullValue("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[fwtypes.SmithyJSON[smithyjson.JSONStringer]]()), - }, - }, - - "json interface Source marshal error": { - Source: &awsJSONStringer{ - Field1: &testJSONDocumentError{}, - }, - Target: &tfSingleStringField{}, - expectedDiags: diag.Diagnostics{ - diagFlatteningMarshalSmithyDocument(reflect.TypeFor[*testJSONDocumentError](), errMarshallSmithyDocument), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsJSONStringer](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsJSONStringer](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsJSONStringer](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), - // infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[testJSONDocument](), "Field1", reflect.TypeFor[types.String]()), - infoSourceImplementsJSONStringer("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String]()), // TODO: fix source type - errorMarshallingJSONDocument("Field1", reflect.TypeFor[smithyjson.JSONStringer](), "Field1", reflect.TypeFor[types.String](), errMarshallSmithyDocument), - }, - }, - - "non-json interface Source string Target": { - Source: awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[types.String]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - - "null non-json interface Source string Target": { - Source: awsInterfaceSingle{ - Field1: nil, - }, - Target: &tfSingleStringField{}, - WantTarget: &tfSingleStringField{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSingleStringField]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSingleStringField]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfSingleStringField]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[types.String]()), - errorFlatteningIncompatibleTypes("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[types.String]()), - }, - }, - } - - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenInterface(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "nil interface Source and list Target": { - Source: awsInterfaceSingle{ - Field1: nil, - }, - Target: &tfListNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfInterfaceFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "single interface Source and single list Target": { - Source: awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - Target: &tfListNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "nil interface Source and non-Flattener list Target": { - Source: awsInterfaceSingle{ - Field1: nil, - }, - Target: &tfListNestedObject[tfSingleStringField]{}, - WantTarget: &tfListNestedObject[tfSingleStringField]{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfSingleStringField]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfListNestedObject[tfSingleStringField]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - "single interface Source and non-Flattener list Target": { - Source: awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - Target: &tfListNestedObject[tfSingleStringField]{}, - WantTarget: &tfListNestedObject[tfSingleStringField]{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfSingleStringField]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfListNestedObject[tfSingleStringField]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfListNestedObject[tfSingleStringField]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - { - "@level": "error", - "@module": "provider.autoflex", - "@message": "AutoFlex Flatten; incompatible types", - "from": float64(reflect.Interface), - "to": map[string]any{ - "ElemType": map[string]any{ - "AttrTypes": map[string]any{ - "field1": map[string]any{}, - }, - }, - }, - logAttrKeySourcePath: "Field1", - logAttrKeySourceType: fullTypeName(reflect.TypeFor[awsInterfaceInterface]()), - logAttrKeyTargetPath: "Field1", - logAttrKeyTargetType: fullTypeName(reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfSingleStringField]]()), - }, - }, - }, - - "nil interface Source and set Target": { - Source: awsInterfaceSingle{ - Field1: nil, - }, - Target: &tfSetNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[tfInterfaceFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "single interface Source and single set Target": { - Source: awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - Target: &tfSetNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - - "nil interface list Source and empty list Target": { - Source: awsInterfaceSlice{ - Field1: nil, - }, - Target: &tfListNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfInterfaceFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSlice](), "Field1", reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - traceFlatteningWithNullValue("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "empty interface list Source and empty list Target": { - Source: awsInterfaceSlice{ - Field1: []awsInterfaceInterface{}, - }, - Target: &tfListNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSlice](), "Field1", reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsInterfaceInterface](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "non-empty interface list Source and non-empty list Target": { - Source: awsInterfaceSlice{ - Field1: []awsInterfaceInterface{ - &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - &awsInterfaceInterfaceImpl{ - AWSField: "value2", - }, - }, - }, - Target: &tfListNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSlice](), "Field1", reflect.TypeFor[*tfListNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsInterfaceInterface](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfInterfaceFlexer]]()), - infoTargetImplementsFlexFlattener("Field1[0]", reflect.TypeFor[awsInterfaceInterfaceImpl](), "Field1[0]", reflect.TypeFor[*tfInterfaceFlexer]()), - infoTargetImplementsFlexFlattener("Field1[1]", reflect.TypeFor[awsInterfaceInterfaceImpl](), "Field1[1]", reflect.TypeFor[*tfInterfaceFlexer]()), - }, - }, - - "nil interface list Source and empty set Target": { - Source: awsInterfaceSlice{ - Field1: nil, - }, - Target: &tfSetNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[tfInterfaceFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSlice](), "Field1", reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - traceFlatteningWithNullValue("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "empty interface list Source and empty set Target": { - Source: awsInterfaceSlice{ - Field1: []awsInterfaceInterface{}, - }, - Target: &tfSetNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSlice](), "Field1", reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsInterfaceInterface](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "non-empty interface list Source and non-empty set Target": { - Source: awsInterfaceSlice{ - Field1: []awsInterfaceInterface{ - &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - &awsInterfaceInterfaceImpl{ - AWSField: "value2", - }, - }, - }, - Target: &tfSetNestedObject[tfInterfaceFlexer]{}, - WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSlice](), reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSlice](), "Field1", reflect.TypeFor[*tfSetNestedObject[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsInterfaceInterface](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfInterfaceFlexer]]()), - infoTargetImplementsFlexFlattener("Field1[0]", reflect.TypeFor[awsInterfaceInterfaceImpl](), "Field1[0]", reflect.TypeFor[*tfInterfaceFlexer]()), - infoTargetImplementsFlexFlattener("Field1[1]", reflect.TypeFor[awsInterfaceInterfaceImpl](), "Field1[1]", reflect.TypeFor[*tfInterfaceFlexer]()), - }, - }, - "nil interface Source and nested object Target": { - Source: awsInterfaceSingle{ - Field1: nil, - }, - Target: &tfObjectValue[tfInterfaceFlexer]{}, - WantTarget: &tfObjectValue[tfInterfaceFlexer]{ - Field1: fwtypes.NewObjectValueOfNull[tfInterfaceFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfObjectValue[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfObjectValue[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfObjectValue[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - "interface Source and nested object Target": { - Source: awsInterfaceSingle{ - Field1: &awsInterfaceInterfaceImpl{ - AWSField: "value1", - }, - }, - Target: &tfObjectValue[tfInterfaceFlexer]{}, - WantTarget: &tfObjectValue[tfInterfaceFlexer]{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfInterfaceFlexer{ - Field1: types.StringValue("value1"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfObjectValue[tfInterfaceFlexer]]()), - infoConverting(reflect.TypeFor[awsInterfaceSingle](), reflect.TypeFor[*tfObjectValue[tfInterfaceFlexer]]()), - traceMatchedFields("Field1", reflect.TypeFor[awsInterfaceSingle](), "Field1", reflect.TypeFor[*tfObjectValue[tfInterfaceFlexer]]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfInterfaceFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsInterfaceInterface](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfInterfaceFlexer]]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenFlattener(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := autoFlexTestCases{ - "top level struct Source": { - Source: awsExpander{ - AWSField: "value1", - }, - Target: &tfFlexer{}, - WantTarget: &tfFlexer{ - Field1: types.StringValue("value1"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpander](), reflect.TypeFor[*tfFlexer]()), - infoConverting(reflect.TypeFor[awsExpander](), reflect.TypeFor[*tfFlexer]()), - infoTargetImplementsFlexFlattener("", reflect.TypeFor[awsExpander](), "", reflect.TypeFor[*tfFlexer]()), - }, - }, - "top level incompatible struct Target": { - Source: awsExpanderIncompatible{ - Incompatible: 123, - }, - Target: &tfFlexer{}, - WantTarget: &tfFlexer{ - Field1: types.StringNull(), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderIncompatible](), reflect.TypeFor[*tfFlexer]()), - infoConverting(reflect.TypeFor[awsExpanderIncompatible](), reflect.TypeFor[*tfFlexer]()), - infoTargetImplementsFlexFlattener("", reflect.TypeFor[awsExpanderIncompatible](), "", reflect.TypeFor[*tfFlexer]()), - }, - }, - "single struct Source and single list Target": { - Source: awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSingleStruct](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderSingleStruct](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSingleStruct](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[*tfFlexer]()), - }, - }, - "nil *struct Source and null list Target": { - Source: awsExpanderSinglePtr{ - Field1: nil, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfNull[tfFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSinglePtr](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - }, - }, - "single struct Source and single set Target": { - Source: awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSingleStruct](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderSingleStruct](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSingleStruct](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[*tfFlexer]()), - }, - }, - "single *struct Source and single list Target": { - Source: awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSinglePtr](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[*tfFlexer]()), - }, - }, - "single *struct Source and single set Target": { - Source: awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSinglePtr](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[*tfFlexer]()), - }, - }, - "nil *struct Source and null set Target": { - Source: awsExpanderSinglePtr{ - Field1: nil, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[tfFlexer](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSinglePtr](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - }, - }, - - "empty struct list Source and empty list Target": { - Source: &awsExpanderStructSlice{ - Field1: []awsExpander{}, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderStructSlice](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsExpander](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - }, - }, - "non-empty struct list Source and non-empty list Target": { - Source: &awsExpanderStructSlice{ - Field1: []awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderStructSlice](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsExpander](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1[0]", reflect.TypeFor[awsExpander](), "Field1[0]", reflect.TypeFor[*tfFlexer]()), - infoTargetImplementsFlexFlattener("Field1[1]", reflect.TypeFor[awsExpander](), "Field1[1]", reflect.TypeFor[*tfFlexer]()), - }, - }, - "empty *struct list Source and empty list Target": { - Source: &awsExpanderPtrSlice{ - Field1: []*awsExpander{}, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderPtrSlice](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsExpander](), 0, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - }, - }, - "non-empty *struct list Source and non-empty list Target": { - Source: &awsExpanderPtrSlice{ - Field1: []*awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - Target: &tfExpanderListNestedObject{}, - WantTarget: &tfExpanderListNestedObject{ - Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderListNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderPtrSlice](), "Field1", reflect.TypeFor[*tfExpanderListNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsExpander](), "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsExpander](), 2, "Field1", reflect.TypeFor[fwtypes.ListNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1[0]", reflect.TypeFor[awsExpander](), "Field1[0]", reflect.TypeFor[*tfFlexer]()), - infoTargetImplementsFlexFlattener("Field1[1]", reflect.TypeFor[awsExpander](), "Field1[1]", reflect.TypeFor[*tfFlexer]()), - }, - }, - "empty struct list Source and empty set Target": { - Source: awsExpanderStructSlice{ - Field1: []awsExpander{}, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderStructSlice](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsExpander](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - }, - }, - "non-empty struct list Source and set Target": { - Source: awsExpanderStructSlice{ - Field1: []awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderStructSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderStructSlice](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]awsExpander](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1[0]", reflect.TypeFor[awsExpander](), "Field1[0]", reflect.TypeFor[*tfFlexer]()), - infoTargetImplementsFlexFlattener("Field1[1]", reflect.TypeFor[awsExpander](), "Field1[1]", reflect.TypeFor[*tfFlexer]()), - }, - }, - "empty *struct list Source and empty set Target": { - Source: awsExpanderPtrSlice{ - Field1: []*awsExpander{}, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderPtrSlice](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsExpander](), 0, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - }, - }, - "non-empty *struct list Source and non-empty set Target": { - Source: awsExpanderPtrSlice{ - Field1: []*awsExpander{ - { - AWSField: "value1", - }, - { - AWSField: "value2", - }, - }, - }, - Target: &tfExpanderSetNestedObject{}, - WantTarget: &tfExpanderSetNestedObject{ - Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ - { - Field1: types.StringValue("value1"), - }, - { - Field1: types.StringValue("value2"), - }, - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConverting(reflect.TypeFor[awsExpanderPtrSlice](), reflect.TypeFor[*tfExpanderSetNestedObject]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderPtrSlice](), "Field1", reflect.TypeFor[*tfExpanderSetNestedObject]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]*awsExpander](), "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - traceFlatteningNestedObjectCollection("Field1", reflect.TypeFor[[]*awsExpander](), 2, "Field1", reflect.TypeFor[fwtypes.SetNestedObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1[0]", reflect.TypeFor[awsExpander](), "Field1[0]", reflect.TypeFor[*tfFlexer]()), - infoTargetImplementsFlexFlattener("Field1[1]", reflect.TypeFor[awsExpander](), "Field1[1]", reflect.TypeFor[*tfFlexer]()), - }, - }, - "struct Source and object value Target": { - Source: awsExpanderSingleStruct{ - Field1: awsExpander{ - AWSField: "value1", - }, - }, - Target: &tfExpanderObjectValue{}, - WantTarget: &tfExpanderObjectValue{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ - Field1: types.StringValue("value1"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSingleStruct](), reflect.TypeFor[*tfExpanderObjectValue]()), - infoConverting(reflect.TypeFor[awsExpanderSingleStruct](), reflect.TypeFor[*tfExpanderObjectValue]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSingleStruct](), "Field1", reflect.TypeFor[*tfExpanderObjectValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[*tfFlexer]()), - }, - }, - "*struct Source and object value Target": { - Source: awsExpanderSinglePtr{ - Field1: &awsExpander{ - AWSField: "value1", - }, - }, - Target: &tfExpanderObjectValue{}, - WantTarget: &tfExpanderObjectValue{ - Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ - Field1: types.StringValue("value1"), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderObjectValue]()), - infoConverting(reflect.TypeFor[awsExpanderSinglePtr](), reflect.TypeFor[*tfExpanderObjectValue]()), - traceMatchedFields("Field1", reflect.TypeFor[awsExpanderSinglePtr](), "Field1", reflect.TypeFor[*tfExpanderObjectValue]()), - infoConvertingWithPath("Field1", reflect.TypeFor[*awsExpander](), "Field1", reflect.TypeFor[fwtypes.ObjectValueOf[tfFlexer]]()), - infoTargetImplementsFlexFlattener("Field1", reflect.TypeFor[awsExpander](), "Field1", reflect.TypeFor[*tfFlexer]()), - }, - }, - } - runAutoFlattenTestCases(t, testCases) -} - -func TestFlattenStructListOfStringEnum(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "struct with list of string enum": { - "valid value": { - Source: awsSliceOfStringEnum{ - Field1: []testEnum{testEnumScalar, testEnumList}, - }, - Target: &tfListOfStringEnum{}, - WantTarget: &tfListOfStringEnum{ - Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ - fwtypes.StringEnumValue(testEnumScalar), - fwtypes.StringEnumValue(testEnumList), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfListOfStringEnum]()), - infoConverting(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfListOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfStringEnum](), "Field1", reflect.TypeFor[*tfListOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]testEnum](), 2, "Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]]()), - }, - }, - "empty value": { - Source: awsSliceOfStringEnum{ - Field1: []testEnum{}, - }, - Target: &tfListOfStringEnum{}, - WantTarget: &tfListOfStringEnum{ - Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfListOfStringEnum]()), - infoConverting(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfListOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfStringEnum](), "Field1", reflect.TypeFor[*tfListOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]]()), - traceFlatteningWithListValue("Field1", reflect.TypeFor[[]testEnum](), 0, "Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]]()), - }, - }, - "null value": { - Source: awsSliceOfStringEnum{}, - Target: &tfListOfStringEnum{}, - WantTarget: &tfListOfStringEnum{ - Field1: fwtypes.NewListValueOfNull[fwtypes.StringEnum[testEnum]](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfListOfStringEnum]()), - infoConverting(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfListOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfStringEnum](), "Field1", reflect.TypeFor[*tfListOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]]()), - traceFlatteningWithListNull("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]]]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenStructSetOfStringEnum(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - testCases := map[string]autoFlexTestCases{ - "struct with set of string enum": { - "valid value": { - Source: awsSliceOfStringEnum{ - Field1: []testEnum{testEnumScalar, testEnumList}, - }, - Target: &tfSetOfStringEnum{}, - WantTarget: &tfSetOfStringEnum{ - Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ - fwtypes.StringEnumValue(testEnumScalar), - fwtypes.StringEnumValue(testEnumList), - }), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfSetOfStringEnum]()), - infoConverting(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfSetOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfStringEnum](), "Field1", reflect.TypeFor[*tfSetOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]]()), - traceFlatteningWithSetValue("Field1", reflect.TypeFor[[]testEnum](), 2, "Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]]()), - }, - }, - "empty value": { - Source: awsSliceOfStringEnum{ - Field1: []testEnum{}, - }, - Target: &tfSetOfStringEnum{}, - WantTarget: &tfSetOfStringEnum{ - Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfSetOfStringEnum]()), - infoConverting(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfSetOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfStringEnum](), "Field1", reflect.TypeFor[*tfSetOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]]()), - traceFlatteningWithSetValue("Field1", reflect.TypeFor[[]testEnum](), 0, "Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]]()), - }, - }, - "null value": { - Source: awsSliceOfStringEnum{}, - Target: &tfSetOfStringEnum{}, - WantTarget: &tfSetOfStringEnum{ - Field1: fwtypes.NewSetValueOfNull[fwtypes.StringEnum[testEnum]](ctx), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfSetOfStringEnum]()), - infoConverting(reflect.TypeFor[awsSliceOfStringEnum](), reflect.TypeFor[*tfSetOfStringEnum]()), - traceMatchedFields("Field1", reflect.TypeFor[awsSliceOfStringEnum](), "Field1", reflect.TypeFor[*tfSetOfStringEnum]()), - infoConvertingWithPath("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]]()), - traceFlatteningWithSetNull("Field1", reflect.TypeFor[[]testEnum](), "Field1", reflect.TypeFor[fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]]]()), - }, - }, - }, - } - - for testName, cases := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - runAutoFlattenTestCases(t, cases) - }) - } -} - -func TestFlattenEmbeddedStruct(t *testing.T) { - t.Parallel() - - testCases := autoFlexTestCases{ - "exported": { - Source: &awsEmbeddedStruct{ - Field1: "a", - Field2: "b", - }, - Target: &tfExportedEmbeddedStruct{}, - WantTarget: &tfExportedEmbeddedStruct{ - TFExportedStruct: TFExportedStruct{ - Field1: types.StringValue("a"), - }, - Field2: types.StringValue("b"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsEmbeddedStruct](), reflect.TypeFor[*tfExportedEmbeddedStruct]()), - infoConverting(reflect.TypeFor[awsEmbeddedStruct](), reflect.TypeFor[*tfExportedEmbeddedStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[awsEmbeddedStruct](), "Field1", reflect.TypeFor[*tfExportedEmbeddedStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - traceMatchedFields("Field2", reflect.TypeFor[awsEmbeddedStruct](), "Field2", reflect.TypeFor[*tfExportedEmbeddedStruct]()), - infoConvertingWithPath("Field2", reflect.TypeFor[string](), "Field2", reflect.TypeFor[types.String]()), - }, - }, - "unexported": { - Source: &awsEmbeddedStruct{ - Field1: "a", - Field2: "b", - }, - Target: &tfUnexportedEmbeddedStruct{}, - WantTarget: &tfUnexportedEmbeddedStruct{ - tfSingleStringField: tfSingleStringField{ - Field1: types.StringValue("a"), - }, - Field2: types.StringValue("b"), - }, - expectedLogLines: []map[string]any{ - infoFlattening(reflect.TypeFor[*awsEmbeddedStruct](), reflect.TypeFor[*tfUnexportedEmbeddedStruct]()), - infoConverting(reflect.TypeFor[awsEmbeddedStruct](), reflect.TypeFor[*tfUnexportedEmbeddedStruct]()), - traceMatchedFields("Field1", reflect.TypeFor[awsEmbeddedStruct](), "Field1", reflect.TypeFor[*tfUnexportedEmbeddedStruct]()), - infoConvertingWithPath("Field1", reflect.TypeFor[string](), "Field1", reflect.TypeFor[types.String]()), - traceMatchedFields("Field2", reflect.TypeFor[awsEmbeddedStruct](), "Field2", reflect.TypeFor[*tfUnexportedEmbeddedStruct]()), - infoConvertingWithPath("Field2", reflect.TypeFor[string](), "Field2", reflect.TypeFor[types.String]()), - }, - }, - } - // cmp.Diff cannot handle an unexported field - runAutoFlattenTestCases(t, testCases, cmpopts.EquateComparable(tfUnexportedEmbeddedStruct{})) -} - -func runAutoFlattenTestCases(t *testing.T, testCases autoFlexTestCases, opts ...cmp.Option) { - t.Helper() - - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - var buf bytes.Buffer - ctx = tflogtest.RootLogger(ctx, &buf) - - ctx = registerTestingLogger(ctx) - - diags := Flatten(ctx, testCase.Source, testCase.Target, testCase.Options...) - - if diff := cmp.Diff(diags, testCase.expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - lines, err := tflogtest.MultilineJSONDecode(&buf) - if err != nil { - t.Fatalf("Flatten: decoding log lines: %s", err) - } - if diff := cmp.Diff(lines, testCase.expectedLogLines); diff != "" { - t.Errorf("unexpected log lines diff (+wanted, -got): %s", diff) - } - - if !diags.HasError() { - less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } - if diff := cmp.Diff(testCase.Target, testCase.WantTarget, append(opts, cmpopts.SortSlices(less))...); diff != "" { - if !testCase.WantDiff { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } - } - } - }) - } -} - -// Top-level tests need a concrete target type for some reason when calling `cmp.Diff` -type toplevelTestCase[Tsource, Ttarget any] struct { - source Tsource - expectedValue Ttarget - expectedDiags diag.Diagnostics - expectedLogLines []map[string]any -} - -type toplevelTestCases[Tsource, Ttarget any] map[string]toplevelTestCase[Tsource, Ttarget] - -func runTopLevelTestCases[Tsource, Ttarget any](t *testing.T, testCases toplevelTestCases[Tsource, Ttarget]) { - t.Helper() - - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - var buf bytes.Buffer - ctx = tflogtest.RootLogger(ctx, &buf) - - ctx = registerTestingLogger(ctx) - - var target Ttarget - diags := Flatten(ctx, testCase.source, &target) - - if diff := cmp.Diff(diags, testCase.expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - lines, err := tflogtest.MultilineJSONDecode(&buf) - if err != nil { - t.Fatalf("Flatten: decoding log lines: %s", err) - } - if diff := cmp.Diff(lines, testCase.expectedLogLines); diff != "" { - t.Errorf("unexpected log lines diff (+wanted, -got): %s", diff) - } - - if !diags.HasError() { - less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } - if diff := cmp.Diff(target, testCase.expectedValue, cmpopts.SortSlices(less)); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } - } - }) - } -} - -func TestFlattenPrePopulate(t *testing.T) { - t.Parallel() - ctx := context.Background() - - testCases := map[string]struct { - target any - expected any - }{ - "string": { - target: &rootStringModel{}, - expected: &rootStringModel{ - Field1: types.StringNull(), - }, - }, - - "nested list": { - target: &rootListNestedObjectModel{}, - expected: &rootListNestedObjectModel{ - Field1: fwtypes.NewListNestedObjectValueOfNull[nestedModel](ctx), - }, - }, - - "nested set": { - target: &rootSetNestedObjectModel{}, - expected: &rootSetNestedObjectModel{ - Field1: fwtypes.NewSetNestedObjectValueOfNull[nestedModel](ctx), - }, - }, - } - - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - t.Parallel() - - valTo := reflect.ValueOf(testCase.target) - - diags := flattenPrePopulate(ctx, valTo) - - if l := len(diags); l > 0 { - t.Fatalf("expected 0 diags, got %s", fwdiag.DiagnosticsString(diags)) - } - - if diff := cmp.Diff(testCase.target, testCase.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } - }) - } -} - -type rootStringModel struct { - Field1 types.String `tfsdk:"field1"` -} - -type rootListNestedObjectModel struct { - Field1 fwtypes.ListNestedObjectValueOf[nestedModel] `tfsdk:"field1"` -} - -type rootSetNestedObjectModel struct { - Field1 fwtypes.SetNestedObjectValueOf[nestedModel] `tfsdk:"field1"` -} - -type nestedModel struct { - Field1 types.String `tfsdk:"field1"` -} diff --git a/internal/framework/flex/autoflex.go b/internal/framework/flex/autoflex.go index aa941f275265..dce7baf563c5 100644 --- a/internal/framework/flex/autoflex.go +++ b/internal/framework/flex/autoflex.go @@ -17,12 +17,7 @@ import ( tfreflect "github.com/hashicorp/terraform-provider-aws/internal/reflect" ) -type fieldNamePrefixCtxKey string - const ( - fieldNamePrefixRecurse fieldNamePrefixCtxKey = "FIELD_NAME_PREFIX_RECURSE" - fieldNameSuffixRecurse fieldNamePrefixCtxKey = "FIELD_NAME_SUFFIX_RECURSE" - mapBlockKeyFieldName = "MapBlockKey" ) @@ -74,7 +69,12 @@ var ( plural = pluralize.NewClient() ) -func findFieldFuzzy(ctx context.Context, fieldNameFrom string, typeFrom reflect.Type, typeTo reflect.Type, flexer autoFlexer) (reflect.StructField, bool) { +type fuzzyFieldFinder struct { + prefixRecursionDepth int + suffixRecursionDepth int +} + +func (fff *fuzzyFieldFinder) findField(ctx context.Context, fieldNameFrom string, typeFrom reflect.Type, typeTo reflect.Type, flexer autoFlexer) (reflect.StructField, bool) { //nolint:unparam // first precedence is exact match (case sensitive) if fieldTo, ok := typeTo.FieldByName(fieldNameFrom); ok { return fieldTo, true @@ -117,26 +117,38 @@ func findFieldFuzzy(ctx context.Context, fieldNameFrom string, typeFrom reflect. // fourth precedence is using field name prefix if v := opts.fieldNamePrefix; v != "" { v = strings.ReplaceAll(v, " ", "") - if ctx.Value(fieldNamePrefixRecurse) == nil { + if fff.prefixRecursionDepth == 0 { // so it will only recurse once - ctx = context.WithValue(ctx, fieldNamePrefixRecurse, true) + fff.prefixRecursionDepth++ if trimmed, ok := strings.CutPrefix(fieldNameFrom, v); ok { - return findFieldFuzzy(ctx, trimmed, typeFrom, typeTo, flexer) + if fieldTo, ok := fff.findField(ctx, trimmed, typeFrom, typeTo, flexer); ok { + fff.prefixRecursionDepth-- + return fieldTo, true + } + } else { + if fieldTo, ok := fff.findField(ctx, v+fieldNameFrom, typeFrom, typeTo, flexer); ok { + fff.prefixRecursionDepth-- + return fieldTo, true + } } - return findFieldFuzzy(ctx, v+fieldNameFrom, typeFrom, typeTo, flexer) + // no match via prefix mutation; fall through to suffix handling on the original name } } // fifth precedence is using field name suffix if v := opts.fieldNameSuffix; v != "" { v = strings.ReplaceAll(v, " ", "") - if ctx.Value(fieldNameSuffixRecurse) == nil { + if fff.suffixRecursionDepth == 0 { // so it will only recurse once - ctx = context.WithValue(ctx, fieldNameSuffixRecurse, true) + fff.suffixRecursionDepth++ if strings.HasSuffix(fieldNameFrom, v) { - return findFieldFuzzy(ctx, strings.TrimSuffix(fieldNameFrom, v), typeFrom, typeTo, flexer) + fieldTo, ok := fff.findField(ctx, strings.TrimSuffix(fieldNameFrom, v), typeFrom, typeTo, flexer) + fff.suffixRecursionDepth-- + return fieldTo, ok } - return findFieldFuzzy(ctx, fieldNameFrom+v, typeFrom, typeTo, flexer) + fieldTo, ok := fff.findField(ctx, fieldNameFrom+v, typeFrom, typeTo, flexer) + fff.suffixRecursionDepth-- + return fieldTo, ok } } diff --git a/internal/framework/flex/autoflex_args_test.go b/internal/framework/flex/autoflex_args_test.go new file mode 100644 index 000000000000..0e6b8896e307 --- /dev/null +++ b/internal/framework/flex/autoflex_args_test.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of args to validate top-level argument shape (nil/typed-nil, +// pointer-ness, struct↔non-struct). They intentionally do not assert logging; only diagnostic codes. + +import ( + "testing" +) + +type emptyStruct struct{} + +func TestExpandArgs_nilAndPointers(t *testing.T) { + t.Parallel() + + var ( + typedNilSource *emptyStruct + typedNilTarget *emptyStruct + ) + + testCases := autoFlexTestCases{ + "nil Source": { + Target: &emptyStruct{}, + ExpectedDiags: diagAFNil(diagExpandingSourceIsNil), + }, + "typed nil Source": { + Source: typedNilSource, + Target: &emptyStruct{}, + ExpectedDiags: diagAFNil(diagExpandingSourceIsNil), // FIXME: Should give the actual type + }, + "nil Target": { + Source: emptyStruct{}, + ExpectedDiags: diagAFNil(diagConvertingTargetIsNil), + }, + "typed nil Target": { + Source: emptyStruct{}, + Target: typedNilTarget, + ExpectedDiags: diagAF[*emptyStruct](diagConvertingTargetIsNil), + }, + "non-pointer Target": { + Source: emptyStruct{}, + Target: 0, + ExpectedDiags: diagAF[int](diagConvertingTargetIsNotPointer), + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandArgs_shapeCompatibility(t *testing.T) { + t.Parallel() + + testString := "test" + + testCases := autoFlexTestCases{ + "non-struct Source struct Target": { + Source: testString, + Target: &emptyStruct{}, + ExpectedDiags: diagAF[string](diagExpandingSourceDoesNotImplementAttrValue), + }, + "struct Source non-struct Target": { + Source: emptyStruct{}, + Target: &testString, + ExpectedDiags: diagAF[emptyStruct](diagExpandingSourceDoesNotImplementAttrValue), + }, + "empty struct Source and Target": { + Source: emptyStruct{}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + "empty struct pointer Source and Target": { + Source: &emptyStruct{}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenArgs_nilAndPointers(t *testing.T) { + t.Parallel() + + var ( + typedNilSource *emptyStruct + typedNilTarget *emptyStruct + ) + + testCases := autoFlexTestCases{ + "nil Source": { + Target: &emptyStruct{}, + ExpectedDiags: diagAFNil(diagFlatteningSourceIsNil), + }, + "typed nil Source": { + Source: typedNilSource, + Target: &emptyStruct{}, + ExpectedDiags: diagAF[*emptyStruct](diagFlatteningSourceIsNil), + }, + "nil Target": { + Source: emptyStruct{}, + ExpectedDiags: diagAFNil(diagConvertingTargetIsNil), + }, + "typed nil Target": { + Source: emptyStruct{}, + Target: typedNilTarget, + ExpectedDiags: diagAF[*emptyStruct](diagConvertingTargetIsNil), + }, + "non-pointer Target": { + Source: emptyStruct{}, + Target: 0, + ExpectedDiags: diagAF[int](diagConvertingTargetIsNotPointer), + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenArgs_shapeCompatibility(t *testing.T) { + t.Parallel() + + testString := "test" + + testCases := autoFlexTestCases{ + "non-struct Source struct Target": { + Source: testString, + Target: &emptyStruct{}, + ExpectedDiags: diagAF[emptyStruct](diagFlatteningTargetDoesNotImplementAttrValue), + }, + "struct Source non-struct Target": { + Source: emptyStruct{}, + Target: &testString, + ExpectedDiags: diagAF[string](diagFlatteningTargetDoesNotImplementAttrValue), + }, + "empty struct Source and Target": { + Source: emptyStruct{}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + "empty struct pointer Source and Target": { + Source: &emptyStruct{}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} diff --git a/internal/framework/flex/autoflex_collections_test.go b/internal/framework/flex/autoflex_collections_test.go new file mode 100644 index 000000000000..203bd152200c --- /dev/null +++ b/internal/framework/flex/autoflex_collections_test.go @@ -0,0 +1,923 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten for list, set, and map conversions—verifying value correctness and +// diagnostics, not internal logging or trace output. For logging validation, see autoflex_dispatch_test.go. +// Specific map tests are in autoflex_maps_test.go. + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +// List/Set/Map of primitive types. +type tfCollectionsOfPrimitiveElements struct { + Field1 types.List `tfsdk:"field1"` + Field2 types.List `tfsdk:"field2"` + Field3 types.Set `tfsdk:"field3"` + Field4 types.Set `tfsdk:"field4"` + Field5 types.Map `tfsdk:"field5"` + Field6 types.Map `tfsdk:"field6"` +} + +type awsCollectionsOfPrimitiveElements struct { + Field1 []string + Field2 []*string + Field3 []string + Field4 []*string + Field5 map[string]string + Field6 map[string]*string +} + +func TestExpandCollections(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "Collection of primitive types Source and slice or map of primtive types Target": { + Source: &tfCollectionsOfPrimitiveElements{ + Field1: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field2: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field3: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field4: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field5: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + Field6: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + }, + Target: &awsCollectionsOfPrimitiveElements{}, + WantTarget: &awsCollectionsOfPrimitiveElements{ + Field1: []string{"a", "b"}, + Field2: aws.StringSlice([]string{"a", "b"}), + Field3: []string{"a", "b"}, + Field4: aws.StringSlice([]string{"a", "b"}), + Field5: map[string]string{"A": "a", "B": "b"}, + Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +func TestExpandListOfInt64(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "valid value []int64": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int64{}, + WantTarget: &[]int64{1, -1}, + }, + "empty value []int64": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + "null value []int64": { + Source: types.ListNull(types.Int64Type), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + "valid value []*int64": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int64{}, + WantTarget: &[]*int64{aws.Int64(1), aws.Int64(-1)}, + }, + "empty value []*int64": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + "null value []*int64": { + Source: types.ListNull(types.Int64Type), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + "valid value []int32": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int32{}, + WantTarget: &[]int32{1, -1}, + }, + "empty value []int32": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + "null value []int32": { + Source: types.ListNull(types.Int64Type), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + "valid value []*int32": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int32{}, + WantTarget: &[]*int32{aws.Int32(1), aws.Int32(-1)}, + }, + "empty value []*int32": { + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + "null value []*int32": { + Source: types.ListNull(types.Int64Type), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +func TestExpandSetOfInt64(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "valid value []int64": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int64{}, + WantTarget: &[]int64{1, -1}, + }, + "empty value []int64": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + "null value []int64": { + Source: types.SetNull(types.Int64Type), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + "valid value []*int64": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int64{}, + WantTarget: &[]*int64{aws.Int64(1), aws.Int64(-1)}, + }, + "empty value []*int64": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + "null value []*int64": { + Source: types.SetNull(types.Int64Type), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + "valid value []int32": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int32{}, + WantTarget: &[]int32{1, -1}, + }, + "empty value []int32": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + "null value []int32": { + Source: types.SetNull(types.Int64Type), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + "valid value []*int32": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int32{}, + WantTarget: &[]*int32{aws.Int32(1), aws.Int32(-1)}, + }, + "empty value []*int32": { + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + "null value []*int32": { + Source: types.SetNull(types.Int64Type), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +func TestExpandListOfStringEnum(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "valid value": { + Source: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue(string(testEnumScalar)), + types.StringValue(string(testEnumList)), + }), + Target: &[]testEnum{}, + WantTarget: &[]testEnum{testEnumScalar, testEnumList}, + }, + "empty value": { + Source: types.ListValueMust(types.StringType, []attr.Value{}), + Target: &[]testEnum{}, + WantTarget: &[]testEnum{}, + }, + "null value": { + Source: types.ListNull(types.StringType), + Target: &[]testEnum{}, + WantTarget: &[]testEnum{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +func TestExpandSetOfStringEnum(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "valid value": { + Source: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue(string(testEnumScalar)), + types.StringValue(string(testEnumList)), + }), + Target: &[]testEnum{}, + WantTarget: &[]testEnum{testEnumScalar, testEnumList}, + }, + "empty value": { + Source: types.SetValueMust(types.StringType, []attr.Value{}), + Target: &[]testEnum{}, + WantTarget: &[]testEnum{}, + }, + "null value": { + Source: types.SetNull(types.StringType), + Target: &[]testEnum{}, + WantTarget: &[]testEnum{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +type tfListOfStringEnum struct { + Field1 fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]] `tfsdk:"field1"` +} + +type awsSliceOfStringEnum struct { + Field1 []testEnum +} + +func TestExpandStructListOfStringEnum(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "valid value": { + Source: &tfListOfStringEnum{ + Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ + fwtypes.StringEnumValue(testEnumScalar), + fwtypes.StringEnumValue(testEnumList), + }), + }, + Target: &awsSliceOfStringEnum{}, + WantTarget: &awsSliceOfStringEnum{ + Field1: []testEnum{testEnumScalar, testEnumList}, + }, + }, + "empty value": { + Source: &tfListOfStringEnum{ + Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), + }, + Target: &awsSliceOfStringEnum{}, + WantTarget: &awsSliceOfStringEnum{ + Field1: []testEnum{}, + }, + }, + "null value": { + Source: &tfListOfStringEnum{ + Field1: fwtypes.NewListValueOfNull[fwtypes.StringEnum[testEnum]](ctx), + }, + Target: &awsSliceOfStringEnum{}, + WantTarget: &awsSliceOfStringEnum{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +type tfSetOfStringEnum struct { + Field1 fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]] `tfsdk:"field1"` +} + +func TestExpandStructSetOfStringEnum(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "valid value": { + Source: &tfSetOfStringEnum{ + Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ + fwtypes.StringEnumValue(testEnumScalar), + fwtypes.StringEnumValue(testEnumList), + }), + }, + Target: &awsSliceOfStringEnum{}, + WantTarget: &awsSliceOfStringEnum{ + Field1: []testEnum{testEnumScalar, testEnumList}, + }, + }, + "empty value": { + Source: &tfSetOfStringEnum{ + Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), + }, + Target: &awsSliceOfStringEnum{}, + WantTarget: &awsSliceOfStringEnum{ + Field1: []testEnum{}, + }, + }, + "null value": { + Source: &tfSetOfStringEnum{ + Field1: fwtypes.NewSetValueOfNull[fwtypes.StringEnum[testEnum]](ctx), + }, + Target: &awsSliceOfStringEnum{}, + WantTarget: &awsSliceOfStringEnum{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +type tfSingleStringFieldIgnore struct { + Field1 types.String `tfsdk:"field1" autoflex:"-"` +} + +func TestExpandIgnoreStructTag(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "to value": { + Source: tfSingleStringFieldIgnore{ + Field1: types.StringValue("value1"), + }, + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{}, + }, + "to pointer": { + Source: tfSingleStringFieldIgnore{ + Field1: types.StringValue("value1"), + }, + Target: &awsSingleStringPointer{}, + WantTarget: &awsSingleStringPointer{}, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +type TFExportedStruct struct { + Field1 types.String `tfsdk:"field1"` +} + +type tfExportedEmbeddedStruct struct { + TFExportedStruct + Field2 types.String `tfsdk:"field2"` +} + +type tfUnexportedEmbeddedStruct struct { + tfSingleStringField + Field2 types.String `tfsdk:"field2"` +} + +type awsEmbeddedStruct struct { + Field1 string + Field2 string +} + +func TestExpandEmbeddedStruct(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "exported": { + Source: &tfExportedEmbeddedStruct{ + TFExportedStruct: TFExportedStruct{ + Field1: types.StringValue("a"), + }, + Field2: types.StringValue("b"), + }, + Target: &awsEmbeddedStruct{}, + WantTarget: &awsEmbeddedStruct{ + Field1: "a", + Field2: "b", + }, + }, + "unexported": { + Source: &tfUnexportedEmbeddedStruct{ + tfSingleStringField: tfSingleStringField{ + Field1: types.StringValue("a"), + }, + Field2: types.StringValue("b"), + }, + Target: &awsEmbeddedStruct{}, + WantTarget: &awsEmbeddedStruct{ + Field1: "a", + Field2: "b", + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +// List/Set/Map of string types. +type tfTypedCollectionsOfPrimitiveElements struct { + Field1 fwtypes.ListValueOf[types.String] `tfsdk:"field1"` + Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` + Field3 fwtypes.SetValueOf[types.String] `tfsdk:"field3"` + Field4 fwtypes.SetValueOf[types.String] `tfsdk:"field4"` + Field5 fwtypes.MapValueOf[types.String] `tfsdk:"field5"` + Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` +} + +func TestFlattenCollections(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "zero value slice or map of primitive types Source and Collection of primtive types Target": { + Source: &awsCollectionsOfPrimitiveElements{}, + Target: &tfCollectionsOfPrimitiveElements{}, + WantTarget: &tfCollectionsOfPrimitiveElements{ + Field1: types.ListNull(types.StringType), + Field2: types.ListNull(types.StringType), + Field3: types.SetNull(types.StringType), + Field4: types.SetNull(types.StringType), + Field5: types.MapNull(types.StringType), + Field6: types.MapNull(types.StringType), + }, + }, + "slice or map of primitive types Source and Collection of primitive types Target": { + Source: &awsCollectionsOfPrimitiveElements{ + Field1: []string{"a", "b"}, + Field2: aws.StringSlice([]string{"a", "b"}), + Field3: []string{"a", "b"}, + Field4: aws.StringSlice([]string{"a", "b"}), + Field5: map[string]string{"A": "a", "B": "b"}, + Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), + }, + Target: &tfCollectionsOfPrimitiveElements{}, + WantTarget: &tfCollectionsOfPrimitiveElements{ + Field1: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field2: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field3: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field4: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field5: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + Field6: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + }, + }, + "zero value slice or map of string type Source and Collection of string types Target": { + Source: &awsCollectionsOfPrimitiveElements{}, + Target: &tfTypedCollectionsOfPrimitiveElements{}, + WantTarget: &tfTypedCollectionsOfPrimitiveElements{ + Field1: fwtypes.NewListValueOfNull[types.String](ctx), + Field2: fwtypes.NewListValueOfNull[types.String](ctx), + Field3: fwtypes.NewSetValueOfNull[types.String](ctx), + Field4: fwtypes.NewSetValueOfNull[types.String](ctx), + Field5: fwtypes.NewMapValueOfNull[types.String](ctx), + Field6: fwtypes.NewMapValueOfNull[types.String](ctx), + }, + }, + "slice or map of string types Source and Collection of string types Target": { + Source: &awsCollectionsOfPrimitiveElements{ + Field1: []string{"a", "b"}, + Field2: aws.StringSlice([]string{"a", "b"}), + Field3: []string{"a", "b"}, + Field4: aws.StringSlice([]string{"a", "b"}), + Field5: map[string]string{"A": "a", "B": "b"}, + Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), + }, + Target: &tfTypedCollectionsOfPrimitiveElements{}, + WantTarget: &tfTypedCollectionsOfPrimitiveElements{ + Field1: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field3: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field4: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field5: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + Field6: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +type awsSimpleStringValueSlice struct { + Field1 []string +} + +type tfSimpleList struct { + Field1 types.List `tfsdk:"field1"` +} + +type tfSimpleListLegacy struct { + Field1 types.List `tfsdk:"field1" autoflex:",legacy"` +} + +func TestFlattenSimpleListOfPrimitiveValues(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "regular": { + "values": { + Source: awsSimpleStringValueSlice{ + Field1: []string{"a", "b"}, + }, + Target: &tfSimpleList{}, + WantTarget: &tfSimpleList{ + Field1: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + }, + }, + + "empty": { + Source: awsSimpleStringValueSlice{ + Field1: []string{}, + }, + Target: &tfSimpleList{}, + WantTarget: &tfSimpleList{ + Field1: types.ListValueMust(types.StringType, []attr.Value{}), + }, + }, + + "null": { + Source: awsSimpleStringValueSlice{ + Field1: nil, + }, + Target: &tfSimpleList{}, + WantTarget: &tfSimpleList{ + Field1: types.ListNull(types.StringType), + }, + }, + }, + + "legacy": { + "values": { + Source: awsSimpleStringValueSlice{ + Field1: []string{"a", "b"}, + }, + Target: &tfSimpleListLegacy{}, + WantTarget: &tfSimpleListLegacy{ + Field1: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + }, + }, + + "empty": { + Source: awsSimpleStringValueSlice{ + Field1: []string{}, + }, + Target: &tfSimpleListLegacy{}, + WantTarget: &tfSimpleListLegacy{ + Field1: types.ListValueMust(types.StringType, []attr.Value{}), + }, + }, + + "null": { + Source: awsSimpleStringValueSlice{ + Field1: nil, + }, + Target: &tfSimpleListLegacy{}, + WantTarget: &tfSimpleListLegacy{ + Field1: types.ListValueMust(types.StringType, []attr.Value{}), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: false, CompareTarget: true}) + }) + } +} + +type tfSimpleSet struct { + Field1 types.Set `tfsdk:"field1"` +} + +type tfSimpleSetLegacy struct { + Field1 types.Set `tfsdk:"field1" autoflex:",legacy"` +} + +func TestFlattenSimpleSetOfPrimitiveValues(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "regular": { + "values": { + Source: awsSimpleStringValueSlice{ + Field1: []string{"a", "b"}, + }, + Target: &tfSimpleSet{}, + WantTarget: &tfSimpleSet{ + Field1: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + }, + }, + + "empty": { + Source: awsSimpleStringValueSlice{ + Field1: []string{}, + }, + Target: &tfSimpleSet{}, + WantTarget: &tfSimpleSet{ + Field1: types.SetValueMust(types.StringType, []attr.Value{}), + }, + }, + + "null": { + Source: awsSimpleStringValueSlice{ + Field1: nil, + }, + Target: &tfSimpleSet{}, + WantTarget: &tfSimpleSet{ + Field1: types.SetNull(types.StringType), + }, + }, + }, + + "legacy": { + "values": { + Source: awsSimpleStringValueSlice{ + Field1: []string{"a", "b"}, + }, + Target: &tfSimpleSetLegacy{}, + WantTarget: &tfSimpleSetLegacy{ + Field1: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + }, + }, + + "empty": { + Source: awsSimpleStringValueSlice{ + Field1: []string{}, + }, + Target: &tfSimpleSetLegacy{}, + WantTarget: &tfSimpleSetLegacy{ + Field1: types.SetValueMust(types.StringType, []attr.Value{}), + }, + }, + + "null": { + Source: awsSimpleStringValueSlice{ + Field1: nil, + }, + Target: &tfSimpleSetLegacy{}, + WantTarget: &tfSimpleSetLegacy{ + Field1: types.SetValueMust(types.StringType, []attr.Value{}), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: false, CompareTarget: true}) + }) + } +} + +func TestFlattenIgnoreStructTag(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "from value": { + Source: awsSingleStringValue{ + Field1: "value1", + }, + Target: &tfSingleStringFieldIgnore{}, + WantTarget: &tfSingleStringFieldIgnore{}, + }, + "from pointer": { + Source: awsSingleStringPointer{ + Field1: aws.String("value1"), + }, + Target: &tfSingleStringFieldIgnore{}, + WantTarget: &tfSingleStringFieldIgnore{}, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}) +} + +func TestFlattenStructListOfStringEnum(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "struct with list of string enum": { + "valid value": { + Source: awsSliceOfStringEnum{ + Field1: []testEnum{testEnumScalar, testEnumList}, + }, + Target: &tfListOfStringEnum{}, + WantTarget: &tfListOfStringEnum{ + Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ + fwtypes.StringEnumValue(testEnumScalar), + fwtypes.StringEnumValue(testEnumList), + }), + }, + }, + "empty value": { + Source: awsSliceOfStringEnum{ + Field1: []testEnum{}, + }, + Target: &tfListOfStringEnum{}, + WantTarget: &tfListOfStringEnum{ + Field1: fwtypes.NewListValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), + }, + }, + "null value": { + Source: awsSliceOfStringEnum{}, + Target: &tfListOfStringEnum{}, + WantTarget: &tfListOfStringEnum{ + Field1: fwtypes.NewListValueOfNull[fwtypes.StringEnum[testEnum]](ctx), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: false, CompareTarget: true}) + }) + } +} + +func TestFlattenStructSetOfStringEnum(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "struct with set of string enum": { + "valid value": { + Source: awsSliceOfStringEnum{ + Field1: []testEnum{testEnumScalar, testEnumList}, + }, + Target: &tfSetOfStringEnum{}, + WantTarget: &tfSetOfStringEnum{ + Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{ + fwtypes.StringEnumValue(testEnumScalar), + fwtypes.StringEnumValue(testEnumList), + }), + }, + }, + "empty value": { + Source: awsSliceOfStringEnum{ + Field1: []testEnum{}, + }, + Target: &tfSetOfStringEnum{}, + WantTarget: &tfSetOfStringEnum{ + Field1: fwtypes.NewSetValueOfMust[fwtypes.StringEnum[testEnum]](ctx, []attr.Value{}), + }, + }, + "null value": { + Source: awsSliceOfStringEnum{}, + Target: &tfSetOfStringEnum{}, + WantTarget: &tfSetOfStringEnum{ + Field1: fwtypes.NewSetValueOfNull[fwtypes.StringEnum[testEnum]](ctx), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: false, CompareTarget: true}) + }) + } +} + +func TestFlattenEmbeddedStruct(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "exported": { + Source: &awsEmbeddedStruct{ + Field1: "a", + Field2: "b", + }, + Target: &tfExportedEmbeddedStruct{}, + WantTarget: &tfExportedEmbeddedStruct{ + TFExportedStruct: TFExportedStruct{ + Field1: types.StringValue("a"), + }, + Field2: types.StringValue("b"), + }, + }, + "unexported": { + Source: &awsEmbeddedStruct{ + Field1: "a", + Field2: "b", + }, + Target: &tfUnexportedEmbeddedStruct{}, + WantTarget: &tfUnexportedEmbeddedStruct{ + tfSingleStringField: tfSingleStringField{ + Field1: types.StringValue("a"), + }, + Field2: types.StringValue("b"), + }, + }, + } + // cmp.Diff cannot handle an unexported field + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true}, cmpopts.EquateComparable(tfUnexportedEmbeddedStruct{})) +} diff --git a/internal/framework/flex/autoflex_dispatch_test.go b/internal/framework/flex/autoflex_dispatch_test.go new file mode 100644 index 000000000000..2c3fbc4b63db --- /dev/null +++ b/internal/framework/flex/autoflex_dispatch_test.go @@ -0,0 +1,1637 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Verifies dispatcher selection (Expander/TypedExpander/Interface/Flattener/TypedFlattener), +// enforces interface contracts, and snapshots trace logging—behavior/routing focus, not data +// mapping. +// +// This test file uses golden snapshots for log verification. These can be found in +// testdata/autoflex/dispatch/*.golden + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +type tfListNestedObject[T any] struct { + Field1 fwtypes.ListNestedObjectValueOf[T] `tfsdk:"field1"` +} + +type tfSetNestedObject[T any] struct { + Field1 fwtypes.SetNestedObjectValueOf[T] `tfsdk:"field1"` +} + +type tfObjectValue[T any] struct { + Field1 fwtypes.ObjectValueOf[T] `tfsdk:"field1"` +} + +type tfInterfaceFlexer struct { + Field1 types.String `tfsdk:"field1"` +} + +var ( + _ Expander = tfInterfaceFlexer{} + _ Flattener = &tfInterfaceFlexer{} +) + +func (t tfInterfaceFlexer) Expand(ctx context.Context) (any, diag.Diagnostics) { + return &awsInterfaceInterfaceImpl{ + AWSField: StringValueFromFramework(ctx, t.Field1), + }, nil +} + +func (t *tfInterfaceFlexer) Flatten(ctx context.Context, v any) (diags diag.Diagnostics) { + switch val := v.(type) { + case awsInterfaceInterfaceImpl: + t.Field1 = StringValueToFramework(ctx, val.AWSField) + return diags + + default: + return diags + } +} + +type tfInterfaceIncompatibleExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = tfInterfaceIncompatibleExpander{} + +func (t tfInterfaceIncompatibleExpander) Expand(ctx context.Context) (any, diag.Diagnostics) { + return &awsInterfaceIncompatibleImpl{ + AWSField: StringValueFromFramework(ctx, t.Field1), + }, nil +} + +type awsInterfaceIncompatibleImpl struct { + AWSField string +} + +type awsInterfaceSingle struct { + Field1 awsInterfaceInterface +} + +type awsInterfaceSlice struct { + Field1 []awsInterfaceInterface +} + +type awsInterfaceInterface interface { + isAWSInterfaceInterface() +} + +type awsInterfaceInterfaceImpl struct { + AWSField string +} + +var _ awsInterfaceInterface = &awsInterfaceInterfaceImpl{} + +func (t *awsInterfaceInterfaceImpl) isAWSInterfaceInterface() {} // nosemgrep:ci.aws-in-func-name + +type tfFlexer struct { + Field1 types.String `tfsdk:"field1"` +} + +var ( + _ Expander = tfFlexer{} + _ Flattener = &tfFlexer{} +) + +func (t tfFlexer) Expand(ctx context.Context) (any, diag.Diagnostics) { + return &awsExpander{ + AWSField: StringValueFromFramework(ctx, t.Field1), + }, nil +} + +func (t *tfFlexer) Flatten(ctx context.Context, v any) (diags diag.Diagnostics) { + switch val := v.(type) { + case awsExpander: + t.Field1 = StringValueToFramework(ctx, val.AWSField) + return diags + + default: + return diags + } +} + +type tfExpanderListNestedObject tfListNestedObject[tfFlexer] + +type tfExpanderSetNestedObject tfSetNestedObject[tfFlexer] + +type tfExpanderObjectValue tfObjectValue[tfFlexer] + +type tfTypedExpanderListNestedObject tfListNestedObject[tfTypedExpander] + +type tfTypedExpanderSetNestedObject tfSetNestedObject[tfTypedExpander] + +type tfTypedExpanderObjectValue tfObjectValue[tfTypedExpander] + +type tfExpanderToString struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = tfExpanderToString{} + +func (t tfExpanderToString) Expand(ctx context.Context) (any, diag.Diagnostics) { + return StringValueFromFramework(ctx, t.Field1), nil +} + +type tfExpanderToNil struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = tfExpanderToNil{} + +func (t tfExpanderToNil) Expand(ctx context.Context) (any, diag.Diagnostics) { + return nil, nil +} + +type tfTypedExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ TypedExpander = tfTypedExpander{} + +func (t tfTypedExpander) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { + return &awsExpander{ + AWSField: StringValueFromFramework(ctx, t.Field1), + }, nil +} + +type tfTypedExpanderToNil struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ TypedExpander = tfTypedExpanderToNil{} + +func (t tfTypedExpanderToNil) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { + return nil, nil +} + +type tfInterfaceTypedExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ TypedExpander = tfInterfaceTypedExpander{} + +func (t tfInterfaceTypedExpander) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { + switch targetType { + case reflect.TypeFor[awsInterfaceInterface](): + return &awsInterfaceInterfaceImpl{ + AWSField: StringValueFromFramework(ctx, t.Field1), + }, nil + } + + return nil, nil +} + +type tfInterfaceIncompatibleTypedExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ TypedExpander = tfInterfaceIncompatibleTypedExpander{} + +func (t tfInterfaceIncompatibleTypedExpander) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { + return &awsInterfaceIncompatibleImpl{ + AWSField: StringValueFromFramework(ctx, t.Field1), + }, nil +} + +type awsExpander struct { + AWSField string +} + +type awsExpanderIncompatible struct { + Incompatible int +} + +type awsExpanderSingleStruct struct { + Field1 awsExpander +} + +type awsExpanderSinglePtr struct { + Field1 *awsExpander +} + +type awsExpanderStructSlice struct { + Field1 []awsExpander +} + +type awsExpanderPtrSlice struct { + Field1 []*awsExpander +} + +func TestExpandLogging_collections(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "Collection of primitive types Source and slice or map of primtive types Target": { + Source: &tfCollectionsOfPrimitiveElements{ + Field1: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field2: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field3: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field4: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field5: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + Field6: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + }, + Target: &awsCollectionsOfPrimitiveElements{}, + WantTarget: &awsCollectionsOfPrimitiveElements{ + Field1: []string{"a", "b"}, + Field2: aws.StringSlice([]string{"a", "b"}), + Field3: []string{"a", "b"}, + Field4: aws.StringSlice([]string{"a", "b"}), + Field5: map[string]string{"A": "a", "B": "b"}, + Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandInterfaceContract(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "source field does not implement attr.Value Source": { + Source: &awsSingleStringValue{Field1: "a"}, + Target: &awsSingleStringValue{}, + ExpectedDiags: diagAF[string](diagExpandingSourceDoesNotImplementAttrValue), + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandExpander(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "top level struct Target": { + Source: tfFlexer{ + Field1: types.StringValue("value1"), + }, + Target: &awsExpander{}, + WantTarget: &awsExpander{ + AWSField: "value1", + }, + }, + "top level string Target": { + Source: tfExpanderToString{ + Field1: types.StringValue("value1"), + }, + Target: aws.String(""), + WantTarget: aws.String("value1"), + }, + "top level incompatible struct Target": { + Source: tfFlexer{ + Field1: types.StringValue("value1"), + }, + Target: &awsExpanderIncompatible{}, + ExpectedDiags: diagAF2[awsExpander, awsExpanderIncompatible](diagCannotBeAssigned), + }, + "top level expands to nil": { + Source: tfExpanderToNil{ + Field1: types.StringValue("value1"), + }, + Target: &awsExpander{}, + ExpectedDiags: diagAF[tfExpanderToNil](diagExpandsToNil), + }, + "top level incompatible non-struct Target": { + Source: tfExpanderToString{ + Field1: types.StringValue("value1"), + }, + Target: aws.Int64(0), + ExpectedDiags: diagAF2[string, int64](diagCannotBeAssigned), + }, + "single list Source and single struct Target": { + Source: tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSingleStruct{}, + WantTarget: &awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + }, + "single set Source and single struct Target": { + Source: tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSingleStruct{}, + WantTarget: &awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + }, + "single list Source and single *struct Target": { + Source: tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSinglePtr{}, + WantTarget: &awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + }, + "single set Source and single *struct Target": { + Source: tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSinglePtr{}, + WantTarget: &awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + }, + "empty list Source and empty struct Target": { + Source: tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{}, + }, + }, + "non-empty list Source and non-empty struct Target": { + Source: tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "empty list Source and empty *struct Target": { + Source: tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{}, + }, + }, + "non-empty list Source and non-empty *struct Target": { + Source: tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "empty set Source and empty struct Target": { + Source: tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{}, + }, + }, + "non-empty set Source and non-empty struct Target": { + Source: tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "empty set Source and empty *struct Target": { + Source: tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{}, + }, + }, + "non-empty set Source and non-empty *struct Target": { + Source: tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "object value Source and struct Target": { + Source: tfExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ + Field1: types.StringValue("value1"), + }), + }, + Target: &awsExpanderSingleStruct{}, + WantTarget: &awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + }, + "object value Source and *struct Target": { + Source: tfExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ + Field1: types.StringValue("value1"), + }), + }, + Target: &awsExpanderSinglePtr{}, + WantTarget: &awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func testFlexAWSInterfaceInterfacePtr(v awsInterfaceInterface) *awsInterfaceInterface { // nosemgrep:ci.aws-in-func-name + return &v +} + +func TestExpandInterface(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + var targetInterface awsInterfaceInterface + + testCases := autoFlexTestCases{ + "top level": { + Source: tfInterfaceFlexer{ + Field1: types.StringValue("value1"), + }, + Target: &targetInterface, + WantTarget: testFlexAWSInterfaceInterfacePtr(&awsInterfaceInterfaceImpl{ + AWSField: "value1", + }), + }, + "top level return value does not implement target interface": { + Source: tfInterfaceIncompatibleExpander{ + Field1: types.StringValue("value1"), + }, + Target: &targetInterface, + ExpectedDiags: diagAF2[*awsInterfaceIncompatibleImpl, awsInterfaceInterface](diagExpandedTypeDoesNotImplement), + }, + "single list Source and single interface Target": { + Source: tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + "single list non-Expander Source and single interface Target": { + Source: tfListNestedObject[tfSingleStringField]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: nil, + }, + }, + "single set Source and single interface Target": { + Source: tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + "empty list Source and empty interface Target": { + Source: tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{}, + }, + }, + "non-empty list Source and non-empty interface Target": { + Source: tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{ + &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &awsInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + }, + "empty set Source and empty interface Target": { + Source: tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{}, + }, + }, + "non-empty set Source and non-empty interface Target": { + Source: tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{ + &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &awsInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + }, + "object value Source and struct Target": { + Source: tfObjectValue[tfInterfaceFlexer]{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfInterfaceFlexer{ + Field1: types.StringValue("value1"), + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandInterfaceTypedExpander(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + var targetInterface awsInterfaceInterface + + testCases := autoFlexTestCases{ + "top level": { + Source: tfInterfaceTypedExpander{ + Field1: types.StringValue("value1"), + }, + Target: &targetInterface, + WantTarget: testFlexAWSInterfaceInterfacePtr(&awsInterfaceInterfaceImpl{ + AWSField: "value1", + }), + }, + "top level return value does not implement target interface": { + Source: tfInterfaceIncompatibleTypedExpander{ + Field1: types.StringValue("value1"), + }, + Target: &targetInterface, + ExpectedDiags: diagAF2[*awsInterfaceIncompatibleImpl, awsInterfaceInterface](diagExpandedTypeDoesNotImplement), + }, + "single list Source and single interface Target": { + Source: tfListNestedObject[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + "single list non-Expander Source and single interface Target": { + Source: tfListNestedObject[tfSingleStringField]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: nil, + }, + }, + "single set Source and single interface Target": { + Source: tfSetNestedObject[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + "empty list Source and empty interface Target": { + Source: tfListNestedObject[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{}), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{}, + }, + }, + "non-empty list Source and non-empty interface Target": { + Source: tfListNestedObject[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{ + &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &awsInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + }, + "empty set Source and empty interface Target": { + Source: tfSetNestedObject[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{}), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{}, + }, + }, + "non-empty set Source and non-empty interface Target": { + Source: tfSetNestedObject[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsInterfaceSlice{}, + WantTarget: &awsInterfaceSlice{ + Field1: []awsInterfaceInterface{ + &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &awsInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + }, + "object value Source and struct Target": { + Source: tfObjectValue[tfInterfaceTypedExpander]{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfInterfaceTypedExpander{ + Field1: types.StringValue("value1"), + }), + }, + Target: &awsInterfaceSingle{}, + WantTarget: &awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandTypedExpander(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "top level struct Target": { + Source: tfTypedExpander{ + Field1: types.StringValue("value1"), + }, + Target: &awsExpander{}, + WantTarget: &awsExpander{ + AWSField: "value1", + }, + }, + "top level incompatible struct Target": { + Source: tfTypedExpander{ + Field1: types.StringValue("value1"), + }, + Target: &awsExpanderIncompatible{}, + ExpectedDiags: diagAF2[awsExpander, awsExpanderIncompatible](diagCannotBeAssigned), + }, + "top level expands to nil": { + Source: tfTypedExpanderToNil{ + Field1: types.StringValue("value1"), + }, + Target: &awsExpander{}, + ExpectedDiags: diagAF[tfTypedExpanderToNil](diagExpandsToNil), + }, + "single list Source and single struct Target": { + Source: tfTypedExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSingleStruct{}, + WantTarget: &awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + }, + "single set Source and single struct Target": { + Source: tfSetNestedObject[tfTypedExpander]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSingleStruct{}, + WantTarget: &awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + }, + "single list Source and single *struct Target": { + Source: tfTypedExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSinglePtr{}, + WantTarget: &awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + }, + "single set Source and single *struct Target": { + Source: tfTypedExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &awsExpanderSinglePtr{}, + WantTarget: &awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + }, + "empty list Source and empty struct Target": { + Source: tfTypedExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{}, + }, + }, + "non-empty list Source and non-empty struct Target": { + Source: tfTypedExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "empty list Source and empty *struct Target": { + Source: tfTypedExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{}, + }, + }, + "non-empty list Source and non-empty *struct Target": { + Source: tfTypedExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "empty set Source and empty struct Target": { + Source: tfTypedExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{}, + }, + }, + "non-empty set Source and non-empty struct Target": { + Source: tfTypedExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderStructSlice{}, + WantTarget: &awsExpanderStructSlice{ + Field1: []awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "empty set Source and empty *struct Target": { + Source: tfTypedExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{}), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{}, + }, + }, + "non-empty set Source and non-empty *struct Target": { + Source: tfTypedExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfTypedExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &awsExpanderPtrSlice{}, + WantTarget: &awsExpanderPtrSlice{ + Field1: []*awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + "object value Source and struct Target": { + Source: tfTypedExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfTypedExpander{ + Field1: types.StringValue("value1"), + }), + }, + Target: &awsExpanderSingleStruct{}, + WantTarget: &awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + }, + "object value Source and *struct Target": { + Source: tfTypedExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfTypedExpander{ + Field1: types.StringValue("value1"), + }), + }, + Target: &awsExpanderSinglePtr{}, + WantTarget: &awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenLogging_collections(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "zero value slice or map of primitive types Source and Collection of primtive types Target": { + Source: &awsCollectionsOfPrimitiveElements{}, + Target: &tfCollectionsOfPrimitiveElements{}, + WantTarget: &tfCollectionsOfPrimitiveElements{ + Field1: types.ListNull(types.StringType), + Field2: types.ListNull(types.StringType), + Field3: types.SetNull(types.StringType), + Field4: types.SetNull(types.StringType), + Field5: types.MapNull(types.StringType), + Field6: types.MapNull(types.StringType), + }, + }, + "slice or map of primitive types Source and Collection of primitive types Target": { + Source: &awsCollectionsOfPrimitiveElements{ + Field1: []string{"a", "b"}, + Field2: aws.StringSlice([]string{"a", "b"}), + Field3: []string{"a", "b"}, + Field4: aws.StringSlice([]string{"a", "b"}), + Field5: map[string]string{"A": "a", "B": "b"}, + Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), + }, + Target: &tfCollectionsOfPrimitiveElements{}, + WantTarget: &tfCollectionsOfPrimitiveElements{ + Field1: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field2: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field3: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field4: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field5: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + Field6: types.MapValueMust(types.StringType, map[string]attr.Value{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: false, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenInterfaceContract(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "target field does not implement attr.Value Target": { + Source: &awsSingleStringValue{Field1: "a"}, + Target: &awsSingleStringValue{}, + ExpectedDiags: diagAF[string](diagFlatteningTargetDoesNotImplementAttrValue), + }, + "source struct field to non-attr.Value": { + Source: &awsRFC3339TimeValue{}, + Target: &awsRFC3339TimeValue{}, + ExpectedDiags: diagAF[time.Time](diagFlatteningTargetDoesNotImplementAttrValue), + }, + "source struct ptr field to non-attr.Value": { + Source: &awsRFC3339TimePointer{}, + Target: &awsRFC3339TimeValue{}, + ExpectedDiags: diagAF[time.Time](diagFlatteningTargetDoesNotImplementAttrValue), + }, + "source struct field to non-attr.Value ptr": { + Source: &awsRFC3339TimeValue{}, + Target: &awsRFC3339TimePointer{}, + ExpectedDiags: diagAF[*time.Time](diagFlatteningTargetDoesNotImplementAttrValue), + }, + "source struct ptr field to non-attr.Value ptr": { + Source: &awsRFC3339TimePointer{}, + Target: &awsRFC3339TimePointer{}, + ExpectedDiags: diagAF[*time.Time](diagFlatteningTargetDoesNotImplementAttrValue), + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenInterface(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "nil interface Source and list Target": { + Source: awsInterfaceSingle{ + Field1: nil, + }, + Target: &tfListNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfInterfaceFlexer](ctx), + }, + }, + "single interface Source and single list Target": { + Source: awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + Target: &tfListNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + }, + "nil interface Source and non-Flattener list Target": { + Source: awsInterfaceSingle{ + Field1: nil, + }, + Target: &tfListNestedObject[tfSingleStringField]{}, + WantTarget: &tfListNestedObject[tfSingleStringField]{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "single interface Source and non-Flattener list Target": { + Source: awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + Target: &tfListNestedObject[tfSingleStringField]{}, + WantTarget: &tfListNestedObject[tfSingleStringField]{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + + "nil interface Source and set Target": { + Source: awsInterfaceSingle{ + Field1: nil, + }, + Target: &tfSetNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[tfInterfaceFlexer](ctx), + }, + }, + "single interface Source and single set Target": { + Source: awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + Target: &tfSetNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + }, + + "nil interface list Source and empty list Target": { + Source: awsInterfaceSlice{ + Field1: nil, + }, + Target: &tfListNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfInterfaceFlexer](ctx), + }, + }, + "empty interface list Source and empty list Target": { + Source: awsInterfaceSlice{ + Field1: []awsInterfaceInterface{}, + }, + Target: &tfListNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), + }, + }, + "non-empty interface list Source and non-empty list Target": { + Source: awsInterfaceSlice{ + Field1: []awsInterfaceInterface{ + &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &awsInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + Target: &tfListNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfListNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + }, + + "nil interface list Source and empty set Target": { + Source: awsInterfaceSlice{ + Field1: nil, + }, + Target: &tfSetNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[tfInterfaceFlexer](ctx), + }, + }, + "empty interface list Source and empty set Target": { + Source: awsInterfaceSlice{ + Field1: []awsInterfaceInterface{}, + }, + Target: &tfSetNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{}), + }, + }, + "non-empty interface list Source and non-empty set Target": { + Source: awsInterfaceSlice{ + Field1: []awsInterfaceInterface{ + &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &awsInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + Target: &tfSetNestedObject[tfInterfaceFlexer]{}, + WantTarget: &tfSetNestedObject[tfInterfaceFlexer]{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfInterfaceFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + }, + "nil interface Source and nested object Target": { + Source: awsInterfaceSingle{ + Field1: nil, + }, + Target: &tfObjectValue[tfInterfaceFlexer]{}, + WantTarget: &tfObjectValue[tfInterfaceFlexer]{ + Field1: fwtypes.NewObjectValueOfNull[tfInterfaceFlexer](ctx), + }, + }, + "interface Source and nested object Target": { + Source: awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + Target: &tfObjectValue[tfInterfaceFlexer]{}, + WantTarget: &tfObjectValue[tfInterfaceFlexer]{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfInterfaceFlexer{ + Field1: types.StringValue("value1"), + }), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenFlattener(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "top level struct Source": { + Source: awsExpander{ + AWSField: "value1", + }, + Target: &tfFlexer{}, + WantTarget: &tfFlexer{ + Field1: types.StringValue("value1"), + }, + }, + "top level incompatible struct Target": { + Source: awsExpanderIncompatible{ + Incompatible: 123, + }, + Target: &tfFlexer{}, + WantTarget: &tfFlexer{ + Field1: types.StringNull(), + }, + }, + "single struct Source and single list Target": { + Source: awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + }, + "nil *struct Source and null list Target": { + Source: awsExpanderSinglePtr{ + Field1: nil, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfFlexer](ctx), + }, + }, + "single struct Source and single set Target": { + Source: awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + }, + "single *struct Source and single list Target": { + Source: awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + }, + "single *struct Source and single set Target": { + Source: awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + }, + "nil *struct Source and null set Target": { + Source: awsExpanderSinglePtr{ + Field1: nil, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[tfFlexer](ctx), + }, + }, + + "empty struct list Source and empty list Target": { + Source: &awsExpanderStructSlice{ + Field1: []awsExpander{}, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + }, + "non-empty struct list Source and non-empty list Target": { + Source: &awsExpanderStructSlice{ + Field1: []awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + }, + "empty *struct list Source and empty list Target": { + Source: &awsExpanderPtrSlice{ + Field1: []*awsExpander{}, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + }, + "non-empty *struct list Source and non-empty list Target": { + Source: &awsExpanderPtrSlice{ + Field1: []*awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + Target: &tfExpanderListNestedObject{}, + WantTarget: &tfExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + }, + "empty struct list Source and empty set Target": { + Source: awsExpanderStructSlice{ + Field1: []awsExpander{}, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + }, + "non-empty struct list Source and set Target": { + Source: awsExpanderStructSlice{ + Field1: []awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + }, + "empty *struct list Source and empty set Target": { + Source: awsExpanderPtrSlice{ + Field1: []*awsExpander{}, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{}), + }, + }, + "non-empty *struct list Source and non-empty set Target": { + Source: awsExpanderPtrSlice{ + Field1: []*awsExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + Target: &tfExpanderSetNestedObject{}, + WantTarget: &tfExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfFlexer{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + }, + "struct Source and object value Target": { + Source: awsExpanderSingleStruct{ + Field1: awsExpander{ + AWSField: "value1", + }, + }, + Target: &tfExpanderObjectValue{}, + WantTarget: &tfExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ + Field1: types.StringValue("value1"), + }), + }, + }, + "*struct Source and object value Target": { + Source: awsExpanderSinglePtr{ + Field1: &awsExpander{ + AWSField: "value1", + }, + }, + Target: &tfExpanderObjectValue{}, + WantTarget: &tfExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfFlexer{ + Field1: types.StringValue("value1"), + }), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/autoflex_expand.go similarity index 83% rename from internal/framework/flex/auto_expand.go rename to internal/framework/flex/autoflex_expand.go index 67c40106e73e..c76c31ee6d3f 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/autoflex_expand.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-log/tflog" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" tfreflect "github.com/hashicorp/terraform-provider-aws/internal/reflect" ) @@ -487,8 +486,8 @@ func (expander autoExpander) string(ctx context.Context, vFrom basetypes.StringV } case reflect.Interface: - if s, ok := vFrom.(fwtypes.SmithyJSON[smithyjson.JSONStringer]); ok { - v, d := s.ValueInterface() + if s, ok := vFrom.(fwtypes.SmithyDocumentValue); ok { + v, d := s.ToSmithyObjectDocument(ctx) diags.Append(d...) if diags.HasError() { return diags @@ -627,6 +626,13 @@ func (expander autoExpander) listOrSetOfInt64(ctx context.Context, vFrom valueWi var diags diag.Diagnostics switch vTo.Kind() { + case reflect.Struct: + // Check if target is an XML wrapper struct + if isXMLWrapperStruct(vTo.Type()) { + diags.Append(expander.xmlWrapper(ctx, vFrom, vTo, "Items")...) + return diags + } + case reflect.Slice: switch tSliceElem := vTo.Type().Elem(); tSliceElem.Kind() { case reflect.Int32, reflect.Int64: @@ -699,6 +705,13 @@ func (expander autoExpander) listOrSetOfString(ctx context.Context, vFrom valueW var diags diag.Diagnostics switch vTo.Kind() { + case reflect.Struct: + // Check if target is an XML wrapper struct + if isXMLWrapperStruct(vTo.Type()) { + diags.Append(expander.xmlWrapper(ctx, vFrom, vTo, "Items")...) + return diags + } + case reflect.Slice: switch tSliceElem := vTo.Type().Elem(); tSliceElem.Kind() { case reflect.String: @@ -919,6 +932,12 @@ func (expander autoExpander) nestedObjectCollection(ctx context.Context, sourceP switch tTo := vTo.Type(); vTo.Kind() { case reflect.Struct: + // Check if target is an XML wrapper struct before handling as generic struct + if isXMLWrapperStruct(tTo) { + diags.Append(expander.nestedObjectCollectionToXMLWrapper(ctx, sourcePath, vFrom, targetPath, vTo)...) + return diags + } + sourcePath := sourcePath.AtListIndex(0) ctx = tflog.SubsystemSetField(ctx, subsystemName, logAttrKeySourcePath, sourcePath.String()) diags.Append(expander.nestedObjectToStruct(ctx, sourcePath, vFrom, targetPath, tTo, vTo)...) @@ -927,6 +946,17 @@ func (expander autoExpander) nestedObjectCollection(ctx context.Context, sourceP case reflect.Pointer: switch tElem := tTo.Elem(); tElem.Kind() { case reflect.Struct: + // Check if target is a pointer to XML wrapper struct + if isXMLWrapperStruct(tElem) { + // Create new instance of the XML wrapper struct + newWrapper := reflect.New(tElem) + diags.Append(expander.nestedObjectCollectionToXMLWrapper(ctx, sourcePath, vFrom, targetPath, newWrapper.Elem())...) + if !diags.HasError() { + vTo.Set(newWrapper) + } + return diags + } + // // types.List(OfObject) -> *struct. // @@ -1166,7 +1196,7 @@ func expandStruct(ctx context.Context, sourcePath path.Path, from any, targetPat fromFieldName := fromField.Name _, fromFieldOpts := autoflexTags(fromField) - toField, ok := findFieldFuzzy(ctx, fromFieldName, typeFrom, typeTo, flexer) + toField, ok := (&fuzzyFieldFinder{}).findField(ctx, fromFieldName, typeFrom, typeTo, flexer) if !ok { // Corresponding field not found in to. tflog.SubsystemDebug(ctx, subsystemName, "No corresponding field", map[string]any{ @@ -1431,3 +1461,221 @@ func diagExpandingIncompatibleTypes(sourceType, targetType reflect.Type) diag.Er fmt.Sprintf("Source type %q cannot be expanded to target type %q.", fullTypeName(sourceType), fullTypeName(targetType)), ) } + +// xmlWrapper handles expansion from TF collection types to AWS XML wrapper structs +// that follow the pattern: {Items: []T, Quantity: *int32} +func (expander autoExpander) xmlWrapper(ctx context.Context, vFrom valueWithElementsAs, vTo reflect.Value, wrapperField string) diag.Diagnostics { + var diags diag.Diagnostics + + // Verify target is a struct with Items and Quantity fields + if !isXMLWrapperStruct(vTo.Type()) { + tflog.SubsystemError(ctx, subsystemName, "Target is not a valid XML wrapper struct", map[string]any{ + "target_type": vTo.Type().String(), + }) + diags.Append(diagExpandingIncompatibleTypes(reflect.TypeOf(vFrom), vTo.Type())) + return diags + } + + // Get the Items and Quantity fields + itemsField := vTo.FieldByName("Items") + quantityField := vTo.FieldByName("Quantity") + + if !itemsField.IsValid() || !quantityField.IsValid() { + tflog.SubsystemError(ctx, subsystemName, "XML wrapper struct missing required fields") + diags.Append(diagExpandingIncompatibleTypes(reflect.TypeOf(vFrom), vTo.Type())) + return diags + } + + // Convert the collection elements to a slice + elements := vFrom.Elements() + itemsSliceType := itemsField.Type() + itemsSlice := reflect.MakeSlice(itemsSliceType, len(elements), len(elements)) + + // Convert each element + for i, elem := range elements { + itemValue := itemsSlice.Index(i) + if !itemValue.CanSet() { + continue + } + + // Handle different element types + switch elemTyped := elem.(type) { + case basetypes.StringValuable: + if itemsSliceType.Elem().Kind() == reflect.String { + strVal, d := elemTyped.ToStringValue(ctx) + diags.Append(d...) + if !diags.HasError() { + itemValue.SetString(strVal.ValueString()) + } + } + case basetypes.Int64Valuable: + if elemKind := itemsSliceType.Elem().Kind(); elemKind == reflect.Int32 { + int64Val, d := elemTyped.ToInt64Value(ctx) + diags.Append(d...) + if !diags.HasError() { + itemValue.SetInt(int64(int32(int64Val.ValueInt64()))) + } + } else if elemKind == reflect.Int64 { + int64Val, d := elemTyped.ToInt64Value(ctx) + diags.Append(d...) + if !diags.HasError() { + itemValue.SetInt(int64Val.ValueInt64()) + } + } + case basetypes.Int32Valuable: + if itemsSliceType.Elem().Kind() == reflect.Int32 { + int32Val, d := elemTyped.ToInt32Value(ctx) + diags.Append(d...) + if !diags.HasError() { + itemValue.SetInt(int64(int32Val.ValueInt32())) + } + } + default: + // For complex types, try direct assignment if types are compatible + if elem != nil && !elem.IsNull() && !elem.IsUnknown() { + if itemValue.Type().AssignableTo(reflect.TypeOf(elem)) { + itemValue.Set(reflect.ValueOf(elem)) + } + } + } + } + + // Set the Items field + if itemsField.CanSet() { + itemsField.Set(itemsSlice) + } + + // Set the Quantity field + if quantityField.CanSet() && quantityField.Type().Kind() == reflect.Pointer { + quantity := int32(len(elements)) + quantityPtr := reflect.New(quantityField.Type().Elem()) + quantityPtr.Elem().Set(reflect.ValueOf(quantity)) + quantityField.Set(quantityPtr) + } + + tflog.SubsystemTrace(ctx, subsystemName, "Successfully expanded to XML wrapper", map[string]any{ + "source_type": reflect.TypeOf(vFrom).String(), + "target_type": vTo.Type().String(), + "items_count": len(elements), + "wrapper_field": wrapperField, + }) + + return diags +} + +// isXMLWrapperStruct detects AWS SDK types that follow the XML wrapper pattern +// with Items slice and Quantity pointer fields +func isXMLWrapperStruct(t reflect.Type) bool { + if t.Kind() != reflect.Struct { + return false + } + + // Check for Items field (slice) + itemsField, hasItems := t.FieldByName("Items") + if !hasItems || itemsField.Type.Kind() != reflect.Slice { + return false + } + + // Check for Quantity field (pointer to int32) + quantityField, hasQuantity := t.FieldByName("Quantity") + if !hasQuantity || quantityField.Type.Kind() != reflect.Pointer { + return false + } + + // Quantity should be *int32 + if quantityField.Type.Elem().Kind() != reflect.Int32 { + return false + } + + return true +} + +// nestedObjectCollectionToXMLWrapper converts a NestedObjectCollectionValue to an XML wrapper struct +// that follows the pattern: {Items: []T, Quantity: *int32} +func (expander autoExpander) nestedObjectCollectionToXMLWrapper(ctx context.Context, _ path.Path, vFrom fwtypes.NestedObjectCollectionValue, _ path.Path, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + tflog.SubsystemTrace(ctx, subsystemName, "Expanding NestedObjectCollection to XML wrapper", map[string]any{ + "source_type": vFrom.Type(ctx).String(), + "target_type": vTo.Type().String(), + }) + + // Get the nested Objects as a slice + from, d := vFrom.ToObjectSlice(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + // Get reflect value of the slice + fromSlice := reflect.ValueOf(from) + if fromSlice.Kind() != reflect.Slice { + diags.AddError("Invalid source", "ToObjectSlice did not return a slice") + return diags + } + + // Get the Items and Quantity fields from target struct + itemsField := vTo.FieldByName("Items") + quantityField := vTo.FieldByName("Quantity") + + if !itemsField.IsValid() || !quantityField.IsValid() { + tflog.SubsystemError(ctx, subsystemName, "XML wrapper struct missing required fields") + diags.Append(diagExpandingIncompatibleTypes(reflect.TypeOf(vFrom), vTo.Type())) + return diags + } + + // Create the Items slice + itemsSliceType := itemsField.Type() + itemsCount := fromSlice.Len() + itemsSlice := reflect.MakeSlice(itemsSliceType, itemsCount, itemsCount) + + tflog.SubsystemTrace(ctx, subsystemName, "Converting nested objects to items", map[string]any{ + "items_count": itemsCount, + "items_type": itemsSliceType.String(), + }) + + // Convert each nested object + for i := range itemsCount { + sourceItem := fromSlice.Index(i) + targetItem := itemsSlice.Index(i) + + // Create new instance for the target item + targetItemType := itemsSliceType.Elem() + if targetItemType.Kind() == reflect.Pointer { + // For []*struct + newItem := reflect.New(targetItemType.Elem()) + diags.Append(autoExpandConvert(ctx, sourceItem.Interface(), newItem.Interface(), expander)...) + if diags.HasError() { + return diags + } + targetItem.Set(newItem) + } else { + // For []struct - need to set the value directly + newItem := reflect.New(targetItemType) + diags.Append(autoExpandConvert(ctx, sourceItem.Interface(), newItem.Interface(), expander)...) + if diags.HasError() { + return diags + } + targetItem.Set(newItem.Elem()) + } + } + + // Set the Items field + if itemsField.CanSet() { + itemsField.Set(itemsSlice) + } + + // Set the Quantity field + if quantityField.CanSet() && quantityField.Type().Kind() == reflect.Pointer { + quantity := int32(itemsCount) + quantityPtr := reflect.New(quantityField.Type().Elem()) + quantityPtr.Elem().Set(reflect.ValueOf(quantity)) + quantityField.Set(quantityPtr) + } + + tflog.SubsystemTrace(ctx, subsystemName, "Successfully expanded NestedObjectCollection to XML wrapper", map[string]any{ + "items_count": itemsCount, + }) + + return diags +} diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/autoflex_flatten.go similarity index 83% rename from internal/framework/flex/auto_flatten.go rename to internal/framework/flex/autoflex_flatten.go index 02919cadf4d8..a3496004a268 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/autoflex_flatten.go @@ -11,6 +11,7 @@ import ( "strings" "time" + smithydocument "github.com/aws/smithy-go/document" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -19,8 +20,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-log/tflog" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" tfreflect "github.com/hashicorp/terraform-provider-aws/internal/reflect" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" "github.com/shopspring/decimal" ) @@ -619,27 +620,24 @@ func (flattener autoFlattener) interface_(ctx context.Context, vFrom reflect.Val // // JSONStringer -> types.String-ish. // - if vFrom.Type().Implements(reflect.TypeFor[smithyjson.JSONStringer]()) { - tflog.SubsystemInfo(ctx, subsystemName, "Source implements json.JSONStringer") + if vFrom.Type().Implements(reflect.TypeFor[smithydocument.Marshaler]()) { + tflog.SubsystemInfo(ctx, subsystemName, "Source implements smithydocument.Marshaler") stringValue := types.StringNull() if vFrom.IsNil() { tflog.SubsystemTrace(ctx, subsystemName, "Flattening null value") } else { - doc := vFrom.Interface().(smithyjson.JSONStringer) - b, err := doc.MarshalSmithyDocument() + doc := vFrom.Interface().(smithydocument.Marshaler) + s, err := tfsmithy.DocumentToJSONString(doc) if err != nil { - // An error here would be an upstream error in the AWS SDK, because errors in json.Marshal - // are caused by conditions such as cyclic structures - // See https://pkg.go.dev/encoding/json#Marshal tflog.SubsystemError(ctx, subsystemName, "Marshalling JSON document", map[string]any{ logAttrKeyError: err.Error(), }) diags.Append(diagFlatteningMarshalSmithyDocument(reflect.TypeOf(doc), err)) return diags } - stringValue = types.StringValue(string(b)) + stringValue = types.StringValue(s) } v, d := tTo.ValueFromString(ctx, stringValue) diags.Append(d...) @@ -1489,6 +1487,222 @@ func (flattener autoFlattener) sliceOfStructToNestedObjectCollection(ctx context return diags } +// xmlWrapperFlatten handles flattening from AWS XML wrapper structs to TF collection types +// that follow the pattern: {Items: []T, Quantity: *int32} -> []T +func (flattener autoFlattener) xmlWrapperFlatten(ctx context.Context, vFrom reflect.Value, tTo attr.Type, vTo reflect.Value, wrapperField string) diag.Diagnostics { + var diags diag.Diagnostics + + tflog.SubsystemTrace(ctx, subsystemName, "Starting XML wrapper flatten", map[string]any{ + "source_type": vFrom.Type().String(), + "target_type": tTo.String(), + "wrapper_field": wrapperField, + }) + + // Verify source is a valid XML wrapper struct + if !isXMLWrapperStruct(vFrom.Type()) { + tflog.SubsystemError(ctx, subsystemName, "Source is not a valid XML wrapper struct", map[string]any{ + "source_type": vFrom.Type().String(), + }) + diags.Append(DiagFlatteningIncompatibleTypes(vFrom.Type(), reflect.TypeOf(vTo.Interface()))) + return diags + } + + // Get the Items field from the source wrapper struct + itemsField := vFrom.FieldByName("Items") + if !itemsField.IsValid() { + tflog.SubsystemError(ctx, subsystemName, "XML wrapper struct missing Items field") + diags.Append(DiagFlatteningIncompatibleTypes(vFrom.Type(), reflect.TypeOf(vTo.Interface()))) + return diags + } + + tflog.SubsystemTrace(ctx, subsystemName, "Found Items field", map[string]any{ + "items_type": itemsField.Type().String(), + "items_kind": itemsField.Kind().String(), + "items_len": itemsField.Len(), + "items_is_nil": itemsField.IsNil(), + }) + + // Determine element type + var elementType attr.Type = types.StringType // default + if tToWithElem, ok := tTo.(attr.TypeWithElementType); ok { + elementType = tToWithElem.ElementType() + tflog.SubsystemTrace(ctx, subsystemName, "Using target element type", map[string]any{ + "element_type": elementType.String(), + }) + } + + // Handle different target collection types + switch tTo := tTo.(type) { + case basetypes.ListTypable: + // Items []T -> types.List + if itemsField.IsNil() { + tflog.SubsystemTrace(ctx, subsystemName, "Flattening XML wrapper with ListNull") + to, d := tTo.ValueFromList(ctx, types.ListNull(elementType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + vTo.Set(reflect.ValueOf(to)) + return diags + } + + // Convert items slice to list elements + itemsLen := itemsField.Len() + elements := make([]attr.Value, itemsLen) + + tflog.SubsystemTrace(ctx, subsystemName, "Converting items to list elements", map[string]any{ + "items_count": itemsLen, + }) + + for i := range itemsLen { + item := itemsField.Index(i) + + tflog.SubsystemTrace(ctx, subsystemName, "Processing item", map[string]any{ + "index": i, + "item_kind": item.Kind().String(), + "item_value": item.Interface(), + }) + + // Convert each item based on its type + switch item.Kind() { + case reflect.Int32: + elements[i] = types.Int64Value(item.Int()) + case reflect.String: + elements[i] = types.StringValue(item.String()) + default: + // For complex types, handle struct conversion if needed + if item.Kind() == reflect.Struct { + // This would need to be handled by a nested object conversion + // For now, we'll return an error for unsupported types + diags.Append(DiagFlatteningIncompatibleTypes(item.Type(), reflect.TypeOf(elementType))) + return diags + } + } + } + + tflog.SubsystemTrace(ctx, subsystemName, "Creating list value", map[string]any{ + "element_count": len(elements), + "element_type": elementType.String(), + }) + + list, d := types.ListValue(elementType, elements) + diags.Append(d...) + if diags.HasError() { + tflog.SubsystemError(ctx, subsystemName, "Error creating list value", map[string]any{ + "error": d.Errors(), + }) + return diags + } + + to, d := tTo.ValueFromList(ctx, list) + diags.Append(d...) + if diags.HasError() { + tflog.SubsystemError(ctx, subsystemName, "Error converting to target list", map[string]any{ + "error": d.Errors(), + }) + return diags + } + + tflog.SubsystemTrace(ctx, subsystemName, "Setting target list value") + vTo.Set(reflect.ValueOf(to)) + return diags + + case basetypes.SetTypable: + // Items []T -> types.Set + if itemsField.IsNil() { + tflog.SubsystemTrace(ctx, subsystemName, "Flattening XML wrapper with SetNull") + to, d := tTo.ValueFromSet(ctx, types.SetNull(elementType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + vTo.Set(reflect.ValueOf(to)) + return diags + } + + // Convert items slice to set elements + itemsLen := itemsField.Len() + elements := make([]attr.Value, itemsLen) + + tflog.SubsystemTrace(ctx, subsystemName, "Converting items to set elements", map[string]any{ + "items_count": itemsLen, + }) + + for i := range itemsLen { + item := itemsField.Index(i) + + tflog.SubsystemTrace(ctx, subsystemName, "Processing item", map[string]any{ + "index": i, + "item_kind": item.Kind().String(), + "item_value": item.Interface(), + }) + + // Convert each item based on its type + switch item.Kind() { + case reflect.Int32: + elements[i] = types.Int64Value(item.Int()) + case reflect.String: + elements[i] = types.StringValue(item.String()) + case reflect.Struct: + // Handle complex struct types by converting to the target element type + if elemTyper, ok := tTo.(attr.TypeWithElementType); ok && elemTyper.ElementType() != nil { + elemType := elemTyper.ElementType() + + // Create a new instance of the target element type + targetValue := reflect.New(reflect.TypeOf(elemType.ValueType(ctx))).Elem() + + // Use AutoFlex to flatten the struct to the target type + diags.Append(flattener.convert(ctx, path.Empty(), item, path.Empty(), targetValue, fieldOpts{})...) + if diags.HasError() { + return diags + } + + // The converted value should implement attr.Value + if attrVal, ok := targetValue.Interface().(attr.Value); ok { + elements[i] = attrVal + } else { + diags.Append(DiagFlatteningIncompatibleTypes(item.Type(), reflect.TypeOf(targetValue.Interface()))) + return diags + } + } else { + diags.Append(DiagFlatteningIncompatibleTypes(item.Type(), reflect.TypeOf(elementType))) + return diags + } + default: + // For other complex types, handle conversion if needed + diags.Append(DiagFlatteningIncompatibleTypes(item.Type(), reflect.TypeOf(elementType))) + return diags + } + } + + tflog.SubsystemTrace(ctx, subsystemName, "Creating set value", map[string]any{ + "element_count": len(elements), + "element_type": elementType.String(), + }) + + set, d := types.SetValue(elementType, elements) + diags.Append(d...) + if diags.HasError() { + return diags + } + + to, d := tTo.ValueFromSet(ctx, set) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags + } + + tflog.SubsystemError(ctx, subsystemName, "Unsupported target type for XML wrapper flattening", map[string]any{ + "target_type": tTo, + }) + diags.Append(DiagFlatteningIncompatibleTypes(vFrom.Type(), reflect.TypeOf(vTo.Interface()))) + return diags +} + // flattenStruct traverses struct `from`, calling `flexer` for each exported field. func flattenStruct(ctx context.Context, sourcePath path.Path, from any, targetPath path.Path, to any, flexer autoFlexer) diag.Diagnostics { var diags diag.Diagnostics @@ -1511,10 +1725,49 @@ func flattenStruct(ctx context.Context, sourcePath path.Path, from any, targetPa typeFrom := valFrom.Type() typeTo := valTo.Type() + // Special handling: Check if the entire source struct is an XML wrapper + // and should be flattened to a target field with wrapper tag + if isXMLWrapperStruct(typeFrom) { + for toField := range tfreflect.ExportedStructFields(typeTo) { + toFieldName := toField.Name + _, toOpts := autoflexTags(toField) + if wrapperField := toOpts.WrapperField(); wrapperField != "" { + toFieldVal := valTo.FieldByIndex(toField.Index) + if !toFieldVal.CanSet() { + continue + } + + tflog.SubsystemTrace(ctx, subsystemName, "Converting entire XML wrapper struct to collection field", map[string]any{ + logAttrKeySourceType: typeFrom.String(), + logAttrKeyTargetFieldname: toFieldName, + "wrapper_field": wrapperField, + }) + + valTo, ok := toFieldVal.Interface().(attr.Value) + if !ok { + tflog.SubsystemError(ctx, subsystemName, "Target field does not implement attr.Value") + diags.Append(diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeOf(toFieldVal.Interface()))) + return diags + } + + if f, ok := flexer.(*autoFlattener); ok { + diags.Append(f.xmlWrapperFlatten(ctx, valFrom, valTo.Type(ctx), toFieldVal, wrapperField)...) + } else { + diags.Append(DiagFlatteningIncompatibleTypes(valFrom.Type(), reflect.TypeOf(toFieldVal.Interface()))) + } + if diags.HasError() { + return diags + } + // Successfully handled as XML wrapper, don't process individual fields + return diags + } + } + } + for fromField := range flattenSourceFields(ctx, typeFrom, flexer.getOptions()) { fromFieldName := fromField.Name - toField, ok := findFieldFuzzy(ctx, fromFieldName, typeFrom, typeTo, flexer) + toField, ok := (&fuzzyFieldFinder{}).findField(ctx, fromFieldName, typeFrom, typeTo, flexer) if !ok { // Corresponding field not found in to. tflog.SubsystemDebug(ctx, subsystemName, "No corresponding field", map[string]any{ @@ -1553,6 +1806,35 @@ func flattenStruct(ctx context.Context, sourcePath path.Path, from any, targetPa logAttrKeyTargetFieldname: toFieldName, }) + // Check if target has wrapper tag and source is an XML wrapper struct + if wrapperField := toOpts.WrapperField(); wrapperField != "" { + fromFieldVal := valFrom.FieldByIndex(fromField.Index) + if isXMLWrapperStruct(fromFieldVal.Type()) { + tflog.SubsystemTrace(ctx, subsystemName, "Converting XML wrapper struct to collection", map[string]any{ + logAttrKeySourceFieldname: fromFieldName, + logAttrKeyTargetFieldname: toFieldName, + "wrapper_field": wrapperField, + }) + + valTo, ok := toFieldVal.Interface().(attr.Value) + if !ok { + tflog.SubsystemError(ctx, subsystemName, "Target field does not implement attr.Value") + diags.Append(diagFlatteningTargetDoesNotImplementAttrValue(reflect.TypeOf(toFieldVal.Interface()))) + break + } + + if f, ok := flexer.(*autoFlattener); ok { + diags.Append(f.xmlWrapperFlatten(ctx, fromFieldVal, valTo.Type(ctx), toFieldVal, wrapperField)...) + } else { + diags.Append(DiagFlatteningIncompatibleTypes(fromFieldVal.Type(), reflect.TypeOf(toFieldVal.Interface()))) + } + if diags.HasError() { + break + } + continue + } + } + opts := fieldOpts{ legacy: toOpts.Legacy(), omitempty: toOpts.OmitEmpty(), diff --git a/internal/framework/flex/autoflex_golden_test.go b/internal/framework/flex/autoflex_golden_test.go new file mode 100644 index 000000000000..8b116abf1441 --- /dev/null +++ b/internal/framework/flex/autoflex_golden_test.go @@ -0,0 +1,227 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// This file contains helpers for golden snapshot testing of Autoflex logging output. +// +// To regenerate golden snapshots after making changes to logging output: +// go test -run -update-golden +// Example: go test -run TestExpandExpander -update-golden +// For the whole file: +// cd internal/framework/flex +// go test -v -update-golden . + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "maps" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + + "github.com/YakDriver/regexache" +) + +var updateGolden = flag.Bool("update-golden", false, "update golden files") + +// normalize a single log line: drop volatile fields, normalize types, etc. +func normalizeLogLine(m map[string]any) map[string]any { + // clone (so we don't mutate the original) + out := make(map[string]any, len(m)) + maps.Copy(out, m) + + // Common volatile keys that could be removed + /* + delete(out, "@timestamp") + delete(out, "timestamp") + delete(out, "time") + delete(out, "caller") + delete(out, "pid") + delete(out, "goroutine") + */ + + // Example of normalizing a field with a regex (e.g., stripping version suffixes) + if s, ok := out["source_type"].(string); ok { + out["source_type"] = regexache.MustCompile(`@v[0-9.]+`).ReplaceAllString(s, "") + } + + return out +} + +func normalizeLogs(lines []map[string]any) []map[string]any { + out := make([]map[string]any, 0, len(lines)) + for _, m := range lines { + out = append(out, normalizeLogLine(m)) + } + return out +} + +func writeGolden(t *testing.T, path string, v any) { + t.Helper() + + data, err := json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("marshal golden data for %s: %v", path, err) + } + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0o755); err != nil { + t.Fatalf("create directory %s: %v", dir, err) + } + + if err := os.WriteFile(path, data, 0o644); err != nil { + t.Fatalf("write golden file %s: %v", path, err) + } +} + +func readGolden(t *testing.T, path string) []byte { + t.Helper() + + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("read golden file %s: %v", path, err) + } + + return data +} + +func compareWithGolden(t *testing.T, goldenPath string, got any) { + t.Helper() + + data, err := json.MarshalIndent(got, "", " ") + if err != nil { + t.Fatalf("marshal comparison data for %s: %v", goldenPath, err) + } + + // Update golden file if flag is set + if *updateGolden { + writeGolden(t, goldenPath, got) + return + } + + // Read and compare with existing golden file + want := readGolden(t, goldenPath) + if bytes.Equal(bytes.TrimSpace(want), bytes.TrimSpace(data)) { + return // Files match, test passes + } + + // Files differ, fail the test with detailed output + t.Fatalf("comparison failed for golden file %s\nExpected content from: %s\nActual content:\n%s", + goldenPath, goldenPath, string(data)) +} + +// autoGenerateGoldenPath creates a golden file path from test name and case description. +// Automatically determines subdirectory from the test function name: +// TestExpandLogging_collections -> searches for it in autoflex_*_test.go files +func autoGenerateGoldenPath(t *testing.T, fullTestName, testCaseName string) string { + t.Helper() + // Extract the base test function name from the full path + // fullTestName might be "TestExpandLogging_collections/Collection_of_primitive_types_Source_and_slice_or_map_of_primtive_types_Target" + // We want to extract "TestExpandLogging_collections" + baseName := fullTestName + if slashIndex := strings.Index(fullTestName, "/"); slashIndex != -1 { + baseName = fullTestName[:slashIndex] + } + + // Convert TestExpandLogging_collections -> expand_logging_collections + cleanTestName := strings.TrimPrefix(baseName, "Test") + cleanTestName = camelToSnake(cleanTestName) + + // Clean case name: first replace '*' with "pointer " to handle cases like "*struct" -> "pointer struct" + cleanCaseName := strings.ReplaceAll(testCaseName, "*", "pointer ") + // Then replace spaces with underscores and convert to lowercase + cleanCaseName = strings.ReplaceAll(cleanCaseName, " ", "_") + cleanCaseName = strings.ToLower(cleanCaseName) + // Remove special characters but keep underscores and alphanumeric + cleanCaseName = regexache.MustCompile(`[^a-z0-9_]`).ReplaceAllString(cleanCaseName, "") + + // Determine subdirectory from test function name + subdirectory := determineSubdirectoryFromTestName(t, baseName) + + // Build hierarchical path using filepath.Join for cross-OS compatibility + // Creates: autoflex/subdirectory/test_name/case_name.golden + return filepath.Join("autoflex", subdirectory, cleanTestName, cleanCaseName+".golden") +} + +// determineSubdirectoryFromTestName determines the subdirectory based on which test file contains the test function. +// Returns the subdirectory name (e.g., "dispatch", "maps") or "unknown" if not found. +func determineSubdirectoryFromTestName(t *testing.T, testFunctionName string) string { + t.Helper() + + files, err := filepath.Glob("autoflex_*_test.go") + if err != nil { + t.Logf("Error globbing test files: %v", err) + return "unknown" + } + + for _, file := range files { + if subdirectory := extractSubdirectoryFromFile(t, file, testFunctionName); subdirectory != "" { + return subdirectory + } + } + + return "unknown" +} + +// extractSubdirectoryFromFile attempts to find the test function in the given file +// and returns the subdirectory name if found, empty string otherwise. +func extractSubdirectoryFromFile(t *testing.T, filename, testFunctionName string) string { + t.Helper() + + content, err := os.ReadFile(filename) + if err != nil { + t.Logf("Error reading file %s: %v", filename, err) + return "" + } + + if !containsTestFunction(t, content, testFunctionName) { + return "" + } + + return parseSubdirectoryFromFilename(filename) +} + +// containsTestFunction checks if the file content contains the specified test function definition. +func containsTestFunction(t *testing.T, content []byte, testFunctionName string) bool { + t.Helper() + + pattern := fmt.Sprintf(`func\s+%s\s*\(`, regexp.QuoteMeta(testFunctionName)) + matched, err := regexp.Match(pattern, content) + if err != nil { + t.Logf("Error matching pattern for function %s: %v", testFunctionName, err) + return false + } + + return matched +} + +// parseSubdirectoryFromFilename extracts the subdirectory name from an autoflex test filename. +// Examples: "autoflex_dispatch_test.go" -> "dispatch", "autoflex_maps_test.go" -> "maps" +func parseSubdirectoryFromFilename(filename string) string { + const ( + prefix = "autoflex_" + suffix = "_test.go" + ) + + if !strings.HasPrefix(filename, prefix) || !strings.HasSuffix(filename, suffix) { + return "" + } + + subdirectory := strings.TrimPrefix(filename, prefix) + subdirectory = strings.TrimSuffix(subdirectory, suffix) + + return subdirectory +} + +// camelToSnake converts CamelCase to snake_case +func camelToSnake(s string) string { + // Insert underscores before uppercase letters (except first) + re := regexache.MustCompile(`([a-z0-9])([A-Z])`) + snake := re.ReplaceAllString(s, `${1}_${2}`) + return strings.ToLower(snake) +} diff --git a/internal/framework/flex/autoflex_maps_test.go b/internal/framework/flex/autoflex_maps_test.go new file mode 100644 index 000000000000..025ffc4702d1 --- /dev/null +++ b/internal/framework/flex/autoflex_maps_test.go @@ -0,0 +1,607 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of maps. + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +type tfComplexValue struct { + Field1 types.String `tfsdk:"field1"` + Field2 fwtypes.ListNestedObjectValueOf[tfListOfNestedObject] `tfsdk:"field2"` + Field3 types.Map `tfsdk:"field3"` + Field4 fwtypes.SetNestedObjectValueOf[tfSingleInt64Field] `tfsdk:"field4"` +} + +type awsComplexValue struct { + Field1 string + Field2 *awsNestedObjectPointer + Field3 map[string]*string + Field4 []awsSingleInt64Value +} + +type tfMapOfString struct { + FieldInner fwtypes.MapValueOf[basetypes.StringValue] `tfsdk:"field_inner"` +} + +type tfNestedMapOfString struct { + FieldOuter fwtypes.ListNestedObjectValueOf[tfMapOfString] `tfsdk:"field_outer"` +} + +type awsNestedMapOfString struct { + FieldOuter awsMapOfString +} + +type tfMapOfMapOfString struct { + Field1 fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]] `tfsdk:"field1"` +} + +type awsMapOfString struct { + FieldInner map[string]string +} + +type awsMapOfMapOfString struct { + Field1 map[string]map[string]string +} + +type awsMapOfMapOfStringPointer struct { + Field1 map[string]map[string]*string +} + +type awsMapOfStringPointer struct { + FieldInner map[string]*string +} + +type tfMapBlockList struct { + MapBlock fwtypes.ListNestedObjectValueOf[tfMapBlockElement] `tfsdk:"map_block"` +} + +type tfMapBlockSet struct { + MapBlock fwtypes.SetNestedObjectValueOf[tfMapBlockElement] `tfsdk:"map_block"` +} + +type awsMapBlockValues struct { + MapBlock map[string]awsMapBlockElement +} + +type awsMapBlockPointers struct { + MapBlock map[string]*awsMapBlockElement +} + +type tfMapBlockElement struct { + MapBlockKey types.String `tfsdk:"map_block_key"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` +} + +type awsMapBlockElement struct { + Attr1 string + Attr2 string +} + +type tfMapBlockListEnumKey struct { + MapBlock fwtypes.ListNestedObjectValueOf[tfMapBlockElementEnumKey] `tfsdk:"map_block"` +} + +type tfMapBlockElementEnumKey struct { + MapBlockKey fwtypes.StringEnum[testEnum] `tfsdk:"map_block_key"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` +} + +type tfMapBlockListNoKey struct { + MapBlock fwtypes.ListNestedObjectValueOf[tfMapBlockElementNoKey] `tfsdk:"map_block"` +} + +type tfMapBlockElementNoKey struct { + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` +} + +func TestExpandMaps(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "map of string": { + Source: &tfMapOfString{ + FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "x": types.StringValue("y"), + }), + }, + Target: &awsMapOfString{}, + WantTarget: &awsMapOfString{ + FieldInner: map[string]string{ + "x": "y", + }, + }, + }, + "map of string pointer": { + Source: &tfMapOfString{ + FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "x": types.StringValue("y"), + }), + }, + Target: &awsMapOfStringPointer{}, + WantTarget: &awsMapOfStringPointer{ + FieldInner: map[string]*string{ + "x": aws.String("y"), + }, + }, + }, + "map of map of string": { + Source: &tfMapOfMapOfString{ + Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ + "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "y": types.StringValue("z"), + }), + }), + }, + Target: &awsMapOfMapOfString{}, + WantTarget: &awsMapOfMapOfString{ + Field1: map[string]map[string]string{ + "x": { + "y": "z", + }, + }, + }, + }, + "map of map of string pointer": { + Source: &tfMapOfMapOfString{ + Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ + "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "y": types.StringValue("z"), + }), + }), + }, + Target: &awsMapOfMapOfStringPointer{}, + WantTarget: &awsMapOfMapOfStringPointer{ + Field1: map[string]map[string]*string{ + "x": { + "y": aws.String("z"), + }, + }, + }, + }, + "nested string map": { + Source: &tfNestedMapOfString{ + FieldOuter: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfMapOfString{ + FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "x": types.StringValue("y"), + }), + }), + }, + Target: &awsNestedMapOfString{}, + WantTarget: &awsNestedMapOfString{ + FieldOuter: awsMapOfString{ + FieldInner: map[string]string{ + "x": "y", + }, + }, + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandMapBlock(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "nil map block key": { + Source: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfNull[tfMapBlockElement](ctx), + }, + Target: &awsMapBlockValues{}, + WantTarget: &awsMapBlockValues{ + MapBlock: nil, + }, + }, + "map block key list": { + Source: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &awsMapBlockValues{}, + WantTarget: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + "map block key set": { + Source: &tfMapBlockSet{ + MapBlock: fwtypes.NewSetNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &awsMapBlockValues{}, + WantTarget: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + "map block key ptr source": { + Source: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &awsMapBlockValues{}, + WantTarget: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + "map block key ptr both": { + Source: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &awsMapBlockPointers{}, + WantTarget: &awsMapBlockPointers{ + MapBlock: map[string]*awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + "map block enum key": { + Source: &tfMapBlockListEnumKey{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElementEnumKey](ctx, []tfMapBlockElementEnumKey{ + { + MapBlockKey: fwtypes.StringEnumValue(testEnumList), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: fwtypes.StringEnumValue(testEnumScalar), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &awsMapBlockValues{}, + WantTarget: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + string(testEnumList): { + Attr1: "a", + Attr2: "b", + }, + string(testEnumScalar): { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + + "map block list no key": { + Source: &tfMapBlockListNoKey{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElementNoKey](ctx, []tfMapBlockElementNoKey{ + { + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &awsMapBlockValues{}, + ExpectedDiags: diagAF[tfMapBlockElementNoKey](diagExpandingNoMapBlockKey), + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenMaps(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "map of string": { + Source: &awsMapOfString{ + FieldInner: map[string]string{ + "x": "y", + }, + }, + Target: &tfMapOfString{}, + WantTarget: &tfMapOfString{ + FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "x": types.StringValue("y"), + }), + }, + }, + "map of string pointer": { + Source: &awsMapOfStringPointer{ + FieldInner: map[string]*string{ + "x": aws.String("y"), + }, + }, + Target: &tfMapOfString{}, + WantTarget: &tfMapOfString{ + FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "x": types.StringValue("y"), + }), + }, + }, + "nested string map": { + Source: &awsNestedMapOfString{ + FieldOuter: awsMapOfString{ + FieldInner: map[string]string{ + "x": "y", + }, + }, + }, + Target: &tfNestedMapOfString{}, + WantTarget: &tfNestedMapOfString{ + FieldOuter: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfMapOfString{ + FieldInner: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "x": types.StringValue("y"), + }), + }), + }, + }, + "map of map of string": { + Source: &awsMapOfMapOfString{ + Field1: map[string]map[string]string{ + "x": { + "y": "z", + }, + }, + }, + Target: &tfMapOfMapOfString{}, + WantTarget: &tfMapOfMapOfString{ + Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ + "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "y": types.StringValue("z"), + }), + }), + }, + }, + "map of map of string pointer": { + Source: &awsMapOfMapOfStringPointer{ + Field1: map[string]map[string]*string{ + "x": { + "y": aws.String("z"), + }, + }, + }, + Target: &tfMapOfMapOfString{}, + WantTarget: &tfMapOfMapOfString{ + Field1: fwtypes.NewMapValueOfMust[fwtypes.MapValueOf[types.String]](ctx, map[string]attr.Value{ + "x": fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "y": types.StringValue("z"), + }), + }), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenMapBlock(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "nil map block key": { + Source: &awsMapBlockValues{ + MapBlock: nil, + }, + Target: &tfMapBlockList{}, + WantTarget: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfNull[tfMapBlockElement](ctx), + }, + }, + "map block key list": { + Source: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &tfMapBlockList{}, + WantTarget: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, + "map block key set": { + Source: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &tfMapBlockSet{}, + WantTarget: &tfMapBlockSet{ + MapBlock: fwtypes.NewSetNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, + "nil map block key ptr": { + Source: &awsMapBlockPointers{ + MapBlock: nil, + }, + Target: &tfMapBlockList{}, + WantTarget: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfNull[tfMapBlockElement](ctx), + }, + }, + "map block key ptr source": { + Source: &awsMapBlockPointers{ + MapBlock: map[string]*awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &tfMapBlockList{}, + WantTarget: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElement](ctx, []tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, + "map block key ptr both": { + Source: &awsMapBlockPointers{ + MapBlock: map[string]*awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &tfMapBlockList{}, + WantTarget: &tfMapBlockList{ + MapBlock: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfMapBlockElement{ + { + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, + "map block enum key": { + Source: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + string(testEnumList): { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &tfMapBlockListEnumKey{}, + WantTarget: &tfMapBlockListEnumKey{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSliceMust[tfMapBlockElementEnumKey](ctx, []tfMapBlockElementEnumKey{ + { + MapBlockKey: fwtypes.StringEnumValue(testEnumList), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, + + "map block list no key": { + Source: &awsMapBlockValues{ + MapBlock: map[string]awsMapBlockElement{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &tfMapBlockListNoKey{}, + ExpectedDiags: diagAF[tfMapBlockElementNoKey](diagFlatteningNoMapBlockKey), + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} diff --git a/internal/framework/flex/autoflex_naming_test.go b/internal/framework/flex/autoflex_naming_test.go new file mode 100644 index 000000000000..2eee1ba57a01 --- /dev/null +++ b/internal/framework/flex/autoflex_naming_test.go @@ -0,0 +1,589 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of naming differences (plural, prefix, suffix, capitalization). + +import ( + "context" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +// tfSingluarListOfNestedObjects testing for idiomatic singular on TF side but plural on AWS side +type tfSingluarListOfNestedObjects struct { + Field fwtypes.ListNestedObjectValueOf[tfSingleStringField] `tfsdk:"field"` +} + +type awsPluralSliceOfNestedObjectValues struct { + Fields []awsSingleStringValue +} + +// tfFieldNamePrefix has no prefix to test matching on prefix +type tfFieldNamePrefix struct { + Name types.String `tfsdk:"name"` +} + +// awsFieldNamePrefix has prefix to test matching on prefix +type awsFieldNamePrefix struct { + IntentName *string +} + +type tfFieldNamePrefixInsensitive struct { + ID types.String `tfsdk:"id"` +} + +type awsFieldNamePrefixInsensitive struct { + ClientId *string +} + +// tfFieldNameSuffix has no suffix to test matching on suffix +type tfFieldNameSuffix struct { + Policy types.String `tfsdk:"policy"` +} + +// awsFieldNameSuffix has suffix to test matching on suffix +type awsFieldNameSuffix struct { + PolicyConfig *string +} + +type tfPluralAndSingularFields struct { + Value types.String `tfsdk:"Value"` +} + +type awsPluralAndSingularFields struct { + Value string + Values string +} + +type tfSpecialPluralization struct { + City types.List `tfsdk:"city"` + Coach types.List `tfsdk:"coach"` + Tomato types.List `tfsdk:"tomato"` + Vertex types.List `tfsdk:"vertex"` + Criterion types.List `tfsdk:"criterion"` + Datum types.List `tfsdk:"datum"` + Hive types.List `tfsdk:"hive"` +} + +type awsSpecialPluralization struct { + Cities []*string + Coaches []*string + Tomatoes []*string + Vertices []*string + Criteria []*string + Data []*string + Hives []*string +} + +// tfCaptializationDiff testing for fields that only differ by capitalization +type tfCaptializationDiff struct { + FieldURL types.String `tfsdk:"field_url"` +} + +// awsCapitalizationDiff testing for fields that only differ by capitalization +type awsCapitalizationDiff struct { + FieldUrl *string +} + +func TestExpandNaming(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "plural ordinary field names": { + Source: &tfSingluarListOfNestedObjects{ + Field: fwtypes.NewListNestedObjectValueOfPtrMust(context.Background(), &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + Target: &awsPluralSliceOfNestedObjectValues{}, + WantTarget: &awsPluralSliceOfNestedObjectValues{ + Fields: []awsSingleStringValue{{Field1: "a"}}, + }, + }, + "plural field names": { + Source: &tfSpecialPluralization{ + City: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("paris"), + types.StringValue("london"), + }), + Coach: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("guardiola"), + types.StringValue("mourinho"), + }), + Tomato: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("brandywine"), + types.StringValue("roma"), + }), + Vertex: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("ab"), + types.StringValue("bc"), + }), + Criterion: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("votes"), + types.StringValue("editors"), + }), + Datum: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), + types.StringValue("0f10cb10-2076-5254-bd21-d3f62fe66303"), + }), + Hive: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("Cegieme"), + types.StringValue("Fahumvid"), + }), + }, + Target: &awsSpecialPluralization{}, + WantTarget: &awsSpecialPluralization{ + Cities: []*string{ + aws.String("paris"), + aws.String("london"), + }, + Coaches: []*string{ + aws.String("guardiola"), + aws.String("mourinho"), + }, + Tomatoes: []*string{ + aws.String("brandywine"), + aws.String("roma"), + }, + Vertices: []*string{ + aws.String("ab"), + aws.String("bc"), + }, + Criteria: []*string{ + aws.String("votes"), + aws.String("editors"), + }, + Data: []*string{ + aws.String("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), + aws.String("0f10cb10-2076-5254-bd21-d3f62fe66303"), + }, + Hives: []*string{ + aws.String("Cegieme"), + aws.String("Fahumvid"), + }, + }, + }, + "capitalization field names": { + Source: &tfCaptializationDiff{ + FieldURL: types.StringValue("h"), + }, + Target: &awsCapitalizationDiff{}, + WantTarget: &awsCapitalizationDiff{ + FieldUrl: aws.String("h"), + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandOptions(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Bool `tfsdk:"field1"` + Tags fwtypes.MapValueOf[types.String] `tfsdk:"tags"` + } + type aws01 struct { + Field1 bool + Tags map[string]string + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "empty source with tags": { + Source: &tf01{}, + Target: &aws01{}, + WantTarget: &aws01{}, + }, + "ignore tags by default": { + Source: &tf01{ + Field1: types.BoolValue(true), + Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "foo": types.StringValue("bar"), + }, + ), + }, + Target: &aws01{}, + WantTarget: &aws01{Field1: true}, + }, + "include tags with option override": { + Options: []AutoFlexOptionsFunc{WithNoIgnoredFieldNames()}, + Source: &tf01{ + Field1: types.BoolValue(true), + Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "foo": types.StringValue("bar"), + }, + ), + }, + Target: &aws01{}, + WantTarget: &aws01{ + Field1: true, + Tags: map[string]string{"foo": "bar"}, + }, + }, + "ignore custom field": { + Options: []AutoFlexOptionsFunc{WithIgnoredFieldNames([]string{"Field1"})}, + Source: &tf01{ + Field1: types.BoolValue(true), + Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "foo": types.StringValue("bar"), + }, + ), + }, + Target: &aws01{}, + WantTarget: &aws01{ + Tags: map[string]string{"foo": "bar"}, + }, + }, + "resource name suffix": { + Options: []AutoFlexOptionsFunc{WithFieldNameSuffix("Config")}, + Source: &tfFieldNameSuffix{ + Policy: types.StringValue("foo"), + }, + Target: &awsFieldNameSuffix{}, + WantTarget: &awsFieldNameSuffix{ + PolicyConfig: aws.String("foo"), + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFindFieldFuzzy_Combinations(t *testing.T) { + t.Parallel() + + type builder func() (typeFrom reflect.Type, typeTo reflect.Type, fieldNameFrom string, expectedFieldName string) + + cases := map[string]struct { + prefix string + suffix string + build builder + }{ + // 1) suffix-only on target; source has neither + "suffix on target only (prefix configured but not applied)": { + prefix: "Cluster", + suffix: "Input", + build: func() (reflect.Type, reflect.Type, string, string) { + type source struct{ ExecutionConfig string } + type target struct{ ExecutionConfigInput string } + return reflect.TypeFor[source](), reflect.TypeFor[target](), "ExecutionConfig", "ExecutionConfigInput" + }, + }, + // 2) trim prefix on source, then add suffix + "trim prefix on source then add suffix": { + prefix: "Cluster", + suffix: "Input", + build: func() (reflect.Type, reflect.Type, string, string) { + type source struct{ ClusterExecutionConfig string } + type target struct{ ExecutionConfigInput string } + return reflect.TypeFor[source](), reflect.TypeFor[target](), "ClusterExecutionConfig", "ExecutionConfigInput" + }, + }, + // 3) add prefix and suffix on target (source has neither) + "add prefix and suffix on target": { + prefix: "Cluster", + suffix: "Input", + build: func() (reflect.Type, reflect.Type, string, string) { + type source struct{ ExecutionConfig string } + type target struct{ ClusterExecutionConfigInput string } + return reflect.TypeFor[source](), reflect.TypeFor[target](), "ExecutionConfig", "ClusterExecutionConfigInput" + }, + }, + // 4) trim suffix on source (target has neither) + "trim suffix on source": { + prefix: "Cluster", + suffix: "Input", + build: func() (reflect.Type, reflect.Type, string, string) { + type source struct{ ExecutionConfigInput string } + type target struct{ ExecutionConfig string } + return reflect.TypeFor[source](), reflect.TypeFor[target](), "ExecutionConfigInput", "ExecutionConfig" + }, + }, + // 5) trim both on source (target has neither) + "trim both prefix and suffix on source": { + prefix: "Cluster", + suffix: "Input", + build: func() (reflect.Type, reflect.Type, string, string) { + type source struct{ ClusterExecutionConfigInput string } + type target struct{ ExecutionConfig string } + return reflect.TypeFor[source](), reflect.TypeFor[target](), "ClusterExecutionConfigInput", "ExecutionConfig" + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + typeFrom, typeTo, fieldNameFrom, expected := tc.build() + ctx := context.Background() + opts := []AutoFlexOptionsFunc{ + WithFieldNamePrefix(tc.prefix), + WithFieldNameSuffix(tc.suffix), + } + flexer := newAutoExpander(opts) + + field, found := (&fuzzyFieldFinder{}).findField(ctx, fieldNameFrom, typeFrom, typeTo, flexer) + if !found { + t.Fatalf("expected to find field, but found==false") + } + if field.Name != expected { + t.Fatalf("expected field name %q, got %q", expected, field.Name) + } + }) + } +} + +func TestExpandFieldNamePrefix(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "exact match": { + Options: []AutoFlexOptionsFunc{ + WithFieldNamePrefix("Intent"), + }, + Source: &tfFieldNamePrefix{ + Name: types.StringValue("Ovodoghen"), + }, + Target: &awsFieldNamePrefix{}, + WantTarget: &awsFieldNamePrefix{ + IntentName: aws.String("Ovodoghen"), + }, + }, + + "case-insensitive": { + Options: []AutoFlexOptionsFunc{ + WithFieldNamePrefix("Client"), + }, + Source: &tfFieldNamePrefixInsensitive{ + ID: types.StringValue("abc123"), + }, + Target: &awsFieldNamePrefixInsensitive{}, + WantTarget: &awsFieldNamePrefixInsensitive{ + ClientId: aws.String("abc123"), + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenNaming(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "plural ordinary field names": { + Source: &awsPluralSliceOfNestedObjectValues{ + Fields: []awsSingleStringValue{{Field1: "a"}}, + }, + Target: &tfSingluarListOfNestedObjects{}, + WantTarget: &tfSingluarListOfNestedObjects{ + Field: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + }, + "plural field names": { + Source: &awsSpecialPluralization{ + Cities: []*string{ + aws.String("paris"), + aws.String("london"), + }, + Coaches: []*string{ + aws.String("guardiola"), + aws.String("mourinho"), + }, + Tomatoes: []*string{ + aws.String("brandywine"), + aws.String("roma"), + }, + Vertices: []*string{ + aws.String("ab"), + aws.String("bc"), + }, + Criteria: []*string{ + aws.String("votes"), + aws.String("editors"), + }, + Data: []*string{ + aws.String("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), + aws.String("0f10cb10-2076-5254-bd21-d3f62fe66303"), + }, + Hives: []*string{ + aws.String("Cegieme"), + aws.String("Fahumvid"), + }, + }, + Target: &tfSpecialPluralization{}, + WantTarget: &tfSpecialPluralization{ + City: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("paris"), + types.StringValue("london"), + }), + Coach: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("guardiola"), + types.StringValue("mourinho"), + }), + Tomato: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("brandywine"), + types.StringValue("roma"), + }), + Vertex: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("ab"), + types.StringValue("bc"), + }), + Criterion: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("votes"), + types.StringValue("editors"), + }), + Datum: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("d1282f78-fa99-5d9d-bd51-e6f0173eb74a"), + types.StringValue("0f10cb10-2076-5254-bd21-d3f62fe66303"), + }), + Hive: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("Cegieme"), + types.StringValue("Fahumvid"), + }), + }, + }, + "strange plurality": { + Source: &awsPluralAndSingularFields{ + Value: "a", + Values: "b", + }, + Target: &tfPluralAndSingularFields{}, + WantTarget: &tfPluralAndSingularFields{ + Value: types.StringValue("a"), + }, + }, + "capitalization field names": { + Source: &awsCapitalizationDiff{ + FieldUrl: aws.String("h"), + }, + Target: &tfCaptializationDiff{}, + WantTarget: &tfCaptializationDiff{ + FieldURL: types.StringValue("h"), + }, + }, + "resource name prefix": { + Options: []AutoFlexOptionsFunc{ + WithFieldNamePrefix("Intent"), + }, + Source: &awsFieldNamePrefix{ + IntentName: aws.String("Ovodoghen"), + }, + Target: &tfFieldNamePrefix{}, + WantTarget: &tfFieldNamePrefix{ + Name: types.StringValue("Ovodoghen"), + }, + }, + "resource name suffix": { + Options: []AutoFlexOptionsFunc{WithFieldNameSuffix("Config")}, + Source: &awsFieldNameSuffix{ + PolicyConfig: aws.String("foo"), + }, + Target: &tfFieldNameSuffix{}, + WantTarget: &tfFieldNameSuffix{ + Policy: types.StringValue("foo"), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenOptions(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Bool `tfsdk:"field1"` + Tags fwtypes.MapValueOf[types.String] `tfsdk:"tags"` + } + type aws01 struct { + Field1 bool + Tags map[string]string + } + + // For test cases below where a field of `MapValue` type is ignored, the + // result of `cmp.Diff` is intentionally not checked. + // + // When a target contains an ignored field of a `MapValue` type, the resulting + // target will contain a zero value, which, because the `elementType` is nil, will + // always return `false` from the `Equal` method, even when compared with another + // zero value. In practice, this zeroed `MapValue` would be overwritten + // by a subsequent step (ie. transparent tagging), and the temporary invalid + // state of the zeroed `MapValue` will not appear in the final state. + // + // Example expected diff: + // unexpected diff (+wanted, -got): &flex.tf01{ + // Field1: s"false", + // - Tags: types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/types.String]{}, + // + Tags: types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/types.String]{MapValue: types.Map{elementType: basetypes.StringType{}}}, + // } + ctx := context.Background() + testCases := autoFlexTestCases{ + "empty source with tags": { + Source: &aws01{}, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.BoolValue(false), + Tags: fwtypes.NewMapValueOfNull[types.String](ctx), + }, + WantDiff: true, // Ignored MapValue type, expect diff + }, + "ignore tags by default": { + Source: &aws01{ + Field1: true, + Tags: map[string]string{"foo": "bar"}, + }, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.BoolValue(true), + Tags: fwtypes.NewMapValueOfNull[types.String](ctx), + }, + WantDiff: true, // Ignored MapValue type, expect diff + }, + "include tags with option override": { + Options: []AutoFlexOptionsFunc{WithNoIgnoredFieldNames()}, + Source: &aws01{ + Field1: true, + Tags: map[string]string{"foo": "bar"}, + }, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.BoolValue(true), + Tags: fwtypes.NewMapValueOfMust[types.String](ctx, map[string]attr.Value{ + "foo": types.StringValue("bar"), + }), + }, + }, + "ignore custom field": { + Options: []AutoFlexOptionsFunc{WithIgnoredFieldNames([]string{"Field1"})}, + Source: &aws01{ + Field1: true, + Tags: map[string]string{"foo": "bar"}, + }, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.BoolNull(), + Tags: fwtypes.NewMapValueOfMust[types.String]( + ctx, + map[string]attr.Value{ + "foo": types.StringValue("bar"), + }, + ), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} diff --git a/internal/framework/flex/autoflex_nested_test.go b/internal/framework/flex/autoflex_nested_test.go new file mode 100644 index 000000000000..18d22d83b64b --- /dev/null +++ b/internal/framework/flex/autoflex_nested_test.go @@ -0,0 +1,1526 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of nested blocks and complex types. + +import ( + "context" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +type tfSingleStringField struct { + Field1 types.String `tfsdk:"field1"` +} + +type tfListOfNestedObjectLegacy struct { + Field1 fwtypes.ListNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1" autoflex:",legacy"` +} + +type tfListOfNestedObject struct { + Field1 fwtypes.ListNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1"` +} + +type tfSetOfNestedObject struct { + Field1 fwtypes.SetNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1"` +} + +type tfSetOfNestedObjectLegacy struct { + Field1 fwtypes.SetNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1" autoflex:",legacy"` +} + +type awsNestedObjectPointer struct { + Field1 *awsSingleStringValue +} + +type awsSliceOfNestedObjectPointers struct { + Field1 []*awsSingleStringValue +} + +type awsSliceOfNestedObjectValues struct { + Field1 []awsSingleStringValue +} + +func TestExpandSimpleSingleNestedBlock(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.String `tfsdk:"field1"` + Field2 types.Int64 `tfsdk:"field2"` + } + type aws01 struct { + Field1 *string + Field2 int64 + } + + type tf02 struct { + Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` + } + type aws02 struct { + Field1 *aws01 + } + type aws03 struct { + Field1 aws01 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested block pointer": { + Source: &tf02{Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{Field1: types.StringValue("a"), Field2: types.Int64Value(1)})}, + Target: &aws02{}, + WantTarget: &aws02{Field1: &aws01{Field1: aws.String("a"), Field2: 1}}, + }, + "single nested block nil": { + Source: &tf02{Field1: fwtypes.NewObjectValueOfNull[tf01](ctx)}, + Target: &aws02{}, + WantTarget: &aws02{}, + }, + "single nested block value": { + Source: &tf02{Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{Field1: types.StringValue("a"), Field2: types.Int64Value(1)})}, + Target: &aws03{}, + WantTarget: &aws03{Field1: aws01{Field1: aws.String("a"), Field2: 1}}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandNestedComplex(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "complex Source and complex Target": { + Source: &tfComplexValue{ + Field1: types.StringValue("m"), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("n"), + }), + }), + Field3: types.MapValueMust(types.StringType, map[string]attr.Value{ + "X": types.StringValue("x"), + "Y": types.StringValue("y"), + }), + Field4: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleInt64Field{ + {Field1: types.Int64Value(100)}, + {Field1: types.Int64Value(2000)}, + {Field1: types.Int64Value(30000)}, + }), + }, + Target: &awsComplexValue{}, + WantTarget: &awsComplexValue{ + Field1: "m", + Field2: &awsNestedObjectPointer{ + Field1: &awsSingleStringValue{ + Field1: "n", + }, + }, + Field3: aws.StringMap(map[string]string{ + "X": "x", + "Y": "y", + }), + Field4: []awsSingleInt64Value{ + {Field1: 100}, + {Field1: 2000}, + {Field1: 30000}, + }, + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandComplexSingleNestedBlock(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Bool `tfsdk:"field1"` + Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` + } + type aws01 struct { + Field1 bool + Field2 []string + } + + type tf02 struct { + Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` + } + type aws02 struct { + Field1 *aws01 + } + + type tf03 struct { + Field1 fwtypes.ObjectValueOf[tf02] `tfsdk:"field1"` + } + type aws03 struct { + Field1 *aws02 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested block pointer": { + Source: &tf03{ + Field1: fwtypes.NewObjectValueOfMust[tf02]( + ctx, + &tf02{ + Field1: fwtypes.NewObjectValueOfMust[tf01]( + ctx, + &tf01{ + Field1: types.BoolValue(true), + Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{types.StringValue("a"), types.StringValue("b")}), + }, + ), + }, + ), + }, + Target: &aws03{}, + WantTarget: &aws03{ + Field1: &aws02{ + Field1: &aws01{ + Field1: true, + Field2: []string{"a", "b"}, + }, + }, + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandTopLevelListOfNestedObject(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "valid value to []struct": { + Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + Target: &[]awsSingleStringValue{}, + WantTarget: &[]awsSingleStringValue{ + { + Field1: "value1", + }, + { + Field1: "value2", + }, + }, + }, + "empty value to []struct": { + Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + Target: &[]awsSingleStringValue{}, + WantTarget: &[]awsSingleStringValue{}, + }, + "null value to []struct": { + Source: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + Target: &[]awsSingleStringValue{}, + WantTarget: &[]awsSingleStringValue{}, + }, + + "valid value to []*struct": { + Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + Target: &[]*awsSingleStringValue{}, + WantTarget: &[]*awsSingleStringValue{ + { + Field1: "value1", + }, + { + Field1: "value2", + }, + }, + }, + "empty value to []*struct": { + Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + Target: &[]*awsSingleStringValue{}, + WantTarget: &[]*awsSingleStringValue{}, + }, + "null value to []*struct": { + Source: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + Target: &[]*awsSingleStringValue{}, + WantTarget: &[]*awsSingleStringValue{}, + }, + + "single list value to single struct": { + Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + }), + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{ + Field1: "value1", + }, + }, + "empty list value to single struct": { + Source: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{}, + }, + "null value to single struct": { + Source: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandSetOfNestedObject(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "valid value to []struct": { + Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + Target: &[]awsSingleStringValue{}, + WantTarget: &[]awsSingleStringValue{ + { + Field1: "value1", + }, + { + Field1: "value2", + }, + }, + }, + "empty value to []struct": { + Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + Target: &[]awsSingleStringValue{}, + WantTarget: &[]awsSingleStringValue{}, + }, + "null value to []struct": { + Source: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), + Target: &[]awsSingleStringValue{}, + WantTarget: &[]awsSingleStringValue{}, + }, + + "valid value to []*struct": { + Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + Target: &[]*awsSingleStringValue{}, + WantTarget: &[]*awsSingleStringValue{ + { + Field1: "value1", + }, + { + Field1: "value2", + }, + }, + }, + "empty value to []*struct": { + Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + Target: &[]*awsSingleStringValue{}, + WantTarget: &[]*awsSingleStringValue{}, + }, + "null value to []*struct": { + Source: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), + Target: &[]*awsSingleStringValue{}, + WantTarget: &[]*awsSingleStringValue{}, + }, + + "single set value to single struct": { + Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + }), + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{ + Field1: "value1", + }, + }, + "empty set value to single struct": { + Source: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{}, + }, + "null value to single struct": { + Source: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{}, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandSimpleNestedBlockWithStringEnum(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` + } + type aws01 struct { + Field1 int64 + Field2 testEnum + } + + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &tf01{ + Field1: types.Int64Value(1), + Field2: fwtypes.StringEnumValue(testEnumList), + }, + Target: &aws01{}, + WantTarget: &aws01{ + Field1: 1, + Field2: testEnumList, + }, + }, + "single nested null value": { + Source: &tf01{ + Field1: types.Int64Value(1), + Field2: fwtypes.StringEnumNull[testEnum](), + }, + Target: &aws01{}, + WantTarget: &aws01{ + Field1: 1, + Field2: "", + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandComplexNestedBlockWithStringEnum(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` + } + type tf02 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` + } + type aws02 struct { + Field2 testEnum + } + type aws01 struct { + Field1 int64 + Field2 *aws02 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &tf02{ + Field1: types.Int64Value(1), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ + Field2: fwtypes.StringEnumValue(testEnumList), + }), + }, + Target: &aws01{}, + WantTarget: &aws01{ + Field1: 1, + Field2: &aws02{ + Field2: testEnumList, + }, + }, + }, + "single nested null value": { + Source: &tf02{ + Field1: types.Int64Value(1), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ + Field2: fwtypes.StringEnumNull[testEnum](), + }), + }, + Target: &aws01{}, + WantTarget: &aws01{ + Field1: 1, + Field2: &aws02{ + Field2: "", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestExpandListOfNestedObjectField(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "ListNestedObject to *struct": { + "value": { + Source: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + Target: &awsNestedObjectPointer{}, + WantTarget: &awsNestedObjectPointer{ + Field1: &awsSingleStringValue{ + Field1: "a", + }, + }, + }, + }, + + "ListNestedObject to []struct": { + "empty": { + Source: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + Target: &awsSliceOfNestedObjectValues{}, + WantTarget: &awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{}, + }, + }, + "values": { + Source: &tfListOfNestedObject{Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + })}, + Target: &awsSliceOfNestedObjectValues{}, + WantTarget: &awsSliceOfNestedObjectValues{Field1: []awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }}, + }, + }, + + "ListNestedObject to []*struct": { + "empty": { + Source: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + Target: &awsSliceOfNestedObjectPointers{}, + WantTarget: &awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{}, + }, + }, + "values": { + Source: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + Target: &awsSliceOfNestedObjectPointers{}, + WantTarget: &awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestExpandSetOfNestedObjectField(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "SetNestedObject to *struct": { + "value": { + Source: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + Target: &awsNestedObjectPointer{}, + WantTarget: &awsNestedObjectPointer{ + Field1: &awsSingleStringValue{ + Field1: "a", + }, + }, + }, + }, + + "SetNestedObject to []*struct": { + "empty": { + Source: &tfSetOfNestedObject{Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{})}, + Target: &awsSliceOfNestedObjectPointers{}, + WantTarget: &awsSliceOfNestedObjectPointers{Field1: []*awsSingleStringValue{}}, + }, + "values": { + Source: &tfSetOfNestedObject{Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + })}, + Target: &awsSliceOfNestedObjectPointers{}, + WantTarget: &awsSliceOfNestedObjectPointers{Field1: []*awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }}, + }, + }, + + "SetNestedObject to []struct": { + "values": { + Source: &tfSetOfNestedObject{Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + })}, + Target: &awsSliceOfNestedObjectValues{}, + WantTarget: &awsSliceOfNestedObjectValues{Field1: []awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }}, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenNestedComplex(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "complex Source and complex Target": { + Source: &awsComplexValue{ + Field1: "m", + Field2: &awsNestedObjectPointer{Field1: &awsSingleStringValue{Field1: "n"}}, + Field3: aws.StringMap(map[string]string{"X": "x", "Y": "y"}), + Field4: []awsSingleInt64Value{{Field1: 100}, {Field1: 2000}, {Field1: 30000}}, + }, + Target: &tfComplexValue{}, + WantTarget: &tfComplexValue{ + Field1: types.StringValue("m"), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("n"), + }), + }), + Field3: types.MapValueMust(types.StringType, map[string]attr.Value{ + "X": types.StringValue("x"), + "Y": types.StringValue("y"), + }), + Field4: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleInt64Field{ + {Field1: types.Int64Value(100)}, + {Field1: types.Int64Value(2000)}, + {Field1: types.Int64Value(30000)}, + }), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenSimpleNestedBlockWithStringEnum(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` + } + type aws01 struct { + Field1 int64 + Field2 testEnum + } + + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &aws01{ + Field1: 1, + Field2: testEnumList, + }, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.Int64Value(1), + Field2: fwtypes.StringEnumValue(testEnumList), + }, + }, + "single nested empty value": { + Source: &aws01{ + Field1: 1, + Field2: "", + }, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.Int64Value(1), + Field2: fwtypes.StringEnumNull[testEnum](), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenComplexNestedBlockWithStringEnum(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field2 fwtypes.StringEnum[testEnum] `tfsdk:"field2"` + } + type tf02 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` + } + type aws02 struct { + Field2 testEnum + } + type aws01 struct { + Field1 int64 + Field2 *aws02 + } + + ctx := context.Background() + var zero fwtypes.StringEnum[testEnum] + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &aws01{ + Field1: 1, + Field2: &aws02{ + Field2: testEnumList, + }, + }, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: types.Int64Value(1), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ + Field2: fwtypes.StringEnumValue(testEnumList), + }), + }, + }, + "single nested empty value": { + Source: &aws01{ + Field1: 1, + Field2: &aws02{Field2: ""}, + }, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: types.Int64Value(1), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ + Field2: fwtypes.StringEnumNull[testEnum](), + }), + }, + }, + "single nested zero value": { + Source: &aws01{ + Field1: 1, + Field2: &aws02{ + Field2: ""}, + }, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: types.Int64Value(1), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ + Field2: zero, + }), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenSimpleSingleNestedBlock(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.String `tfsdk:"field1"` + Field2 types.Int64 `tfsdk:"field2"` + } + type aws01 struct { + Field1 *string + Field2 int64 + } + + type tf02 struct { + Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` + } + type aws02 struct { + Field1 *aws01 + } + type aws03 struct { + Field1 aws01 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested block pointer": { + Source: &aws02{ + Field1: &aws01{ + Field1: aws.String("a"), + Field2: 1, + }, + }, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{ + Field1: types.StringValue("a"), + Field2: types.Int64Value(1), + }), + }, + }, + "single nested block nil": { + Source: &aws02{}, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: fwtypes.NewObjectValueOfNull[tf01](ctx), + }, + }, + "single nested block value": { + Source: &aws03{ + Field1: aws01{ + Field1: aws.String("a"), + Field2: 1}, + }, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{ + Field1: types.StringValue("a"), + Field2: types.Int64Value(1), + }), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenComplexSingleNestedBlock(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Bool `tfsdk:"field1"` + Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` + } + type aws01 struct { + Field1 bool + Field2 []string + } + + type tf02 struct { + Field1 fwtypes.ObjectValueOf[tf01] `tfsdk:"field1"` + } + type aws02 struct { + Field1 *aws01 + } + + type tf03 struct { + Field1 fwtypes.ObjectValueOf[tf02] `tfsdk:"field1"` + } + type aws03 struct { + Field1 *aws02 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested block pointer": { + Source: &aws03{ + Field1: &aws02{ + Field1: &aws01{ + Field1: true, + Field2: []string{"a", "b"}, + }, + }, + }, + Target: &tf03{}, + WantTarget: &tf03{ + Field1: fwtypes.NewObjectValueOfMust[tf02](ctx, &tf02{ + Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{ + Field1: types.BoolValue(true), + Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + }), + }), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenSimpleNestedBlockWithFloat32(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 types.Float64 `tfsdk:"field2"` + } + type aws01 struct { + Field1 int64 + Field2 *float32 + } + + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &aws01{Field1: 1, Field2: aws.Float32(0.01)}, + Target: &tf01{}, + WantTarget: &tf01{Field1: types.Int64Value(1), Field2: types.Float64Value(0.01)}, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenComplexNestedBlockWithFloat32(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Float64 `tfsdk:"field1"` + Field2 types.Float64 `tfsdk:"field2"` + } + type tf02 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` + } + type aws02 struct { + Field1 float32 + Field2 *float32 + } + type aws01 struct { + Field1 int64 + Field2 *aws02 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &aws01{ + Field1: 1, + Field2: &aws02{ + Field1: 1.11, + Field2: aws.Float32(-2.22), + }, + }, + Target: &tf02{}, + WantTarget: &tf02{ + Field1: types.Int64Value(1), + Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{ + Field1: types.Float64Value(1.11), + Field2: types.Float64Value(-2.22), + }), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenSimpleNestedBlockWithFloat64(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 types.Float64 `tfsdk:"field2"` + } + type aws01 struct { + Field1 int64 + Field2 *float64 + } + + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &aws01{ + Field1: 1, + Field2: aws.Float64(0.01), + }, + Target: &tf01{}, + WantTarget: &tf01{ + Field1: types.Int64Value(1), + Field2: types.Float64Value(0.01), + }, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenComplexNestedBlockWithFloat64(t *testing.T) { + t.Parallel() + + type tf01 struct { + Field1 types.Float64 `tfsdk:"field1"` + Field2 types.Float64 `tfsdk:"field2"` + } + type tf02 struct { + Field1 types.Int64 `tfsdk:"field1"` + Field2 fwtypes.ListNestedObjectValueOf[tf01] `tfsdk:"field2"` + } + type aws02 struct { + Field1 float64 + Field2 *float64 + } + type aws01 struct { + Field1 int64 + Field2 *aws02 + } + + ctx := context.Background() + testCases := autoFlexTestCases{ + "single nested valid value": { + Source: &aws01{ + Field1: 1, + Field2: &aws02{ + Field1: 1.11, + Field2: aws.Float64(-2.22), + }, + }, + Target: &tf02{}, + WantTarget: &tf02{Field1: types.Int64Value(1), Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{Field1: types.Float64Value(1.11), Field2: types.Float64Value(-2.22)})}, + }, + } + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenObjectValueField(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "*struct to ObjectValue": { + "nil": { + Source: awsNestedObjectPointer{}, + Target: &tfObjectValue[tfSingleStringField]{}, + WantTarget: &tfObjectValue[tfSingleStringField]{ + Field1: fwtypes.NewObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "value": { + Source: awsNestedObjectPointer{ + Field1: &awsSingleStringValue{ + Field1: "a", + }, + }, + Target: &tfObjectValue[tfSingleStringField]{}, + WantTarget: &tfObjectValue[tfSingleStringField]{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenListOfNestedObjectField(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "*struct to ListNestedObject": { + "nil": { + Source: awsNestedObjectPointer{}, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "value": { + Source: awsNestedObjectPointer{ + Field1: &awsSingleStringValue{ + Field1: "a", + }, + }, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + }, + }, + + "legacy *struct to ListNestedObject": { + "nil": { + Source: awsNestedObjectPointer{}, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{}), + }, + }, + "value": { + Source: awsNestedObjectPointer{ + Field1: &awsSingleStringValue{ + Field1: "a", + }, + }, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + }, + }, + + "[]struct to ListNestedObject": { + "nil": { + Source: awsSliceOfNestedObjectValues{}, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "empty": { + Source: awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{}, + }, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + }, + "values": { + Source: awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + + "legacy []struct to ListNestedObject": { + "nil": { + Source: awsSliceOfNestedObjectValues{}, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + }, + "empty": { + Source: awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{}, + }, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + }, + "values": { + Source: awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + + "[]*struct to ListNestedObject": { + "nil": { + Source: awsSliceOfNestedObjectPointers{}, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "empty": { + Source: awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{}, + }, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + }, + "values": { + Source: awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfListOfNestedObject{}, + WantTarget: &tfListOfNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + + "legacy []*struct to ListNestedObject": { + "nil": { + Source: awsSliceOfNestedObjectPointers{}, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + }, + "empty": { + Source: awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{}, + }, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + }, + "values": { + Source: awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfListOfNestedObjectLegacy{}, + WantTarget: &tfListOfNestedObjectLegacy{ + Field1: fwtypes.NewListNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenTopLevelListOfNestedObject(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]toplevelTestCase[[]awsSingleStringValue, fwtypes.ListNestedObjectValueOf[tfSingleStringField]]{ + "values": { + source: []awsSingleStringValue{ + { + Field1: "value1", + }, + { + Field1: "value2", + }, + }, + expectedValue: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + + "empty": { + source: []awsSingleStringValue{}, + expectedValue: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + + "null": { + source: nil, + expectedValue: fwtypes.NewListNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + } + + runTopLevelTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +func TestFlattenSetOfNestedObjectField(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := map[string]autoFlexTestCases{ + "*struct to SetNestedObject": { + "nil": { + Source: awsNestedObjectPointer{}, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "value": { + Source: awsNestedObjectPointer{ + Field1: &awsSingleStringValue{Field1: "a"}, + }, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfPtrMust(ctx, &tfSingleStringField{ + Field1: types.StringValue("a"), + }), + }, + }, + }, + + "[]struct to SetNestedObject": { + "nil": { + Source: &awsSliceOfNestedObjectValues{}, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "empty": { + Source: &awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{}, + }, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + }, + "values": { + Source: &awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + + "legacy []struct to SetNestedObject": { + "nil": { + Source: &awsSliceOfNestedObjectValues{}, + Target: &tfSetOfNestedObjectLegacy{}, + WantTarget: &tfSetOfNestedObjectLegacy{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + }, + "empty": { + Source: &awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{}, + }, + Target: &tfSetOfNestedObjectLegacy{}, + WantTarget: &tfSetOfNestedObjectLegacy{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{}), + }, + }, + "values": { + Source: &awsSliceOfNestedObjectValues{ + Field1: []awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfSetOfNestedObjectLegacy{}, + WantTarget: &tfSetOfNestedObjectLegacy{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + + "[]*struct to SetNestedObject": { + "nil": { + Source: &awsSliceOfNestedObjectPointers{}, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[tfSingleStringField](ctx), + }, + }, + "empty": { + Source: &awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{}, + }, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + }, + "values": { + Source: &awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfSetOfNestedObject{}, + WantTarget: &tfSetOfNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + + "legacy []*struct to SetNestedObject": { + "nil": { + Source: &awsSliceOfNestedObjectPointers{}, + Target: &tfSetOfNestedObjectLegacy{}, + WantTarget: &tfSetOfNestedObjectLegacy{ + Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + }, + "empty": { + Source: &awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{}, + }, + Target: &tfSetOfNestedObjectLegacy{}, + WantTarget: &tfSetOfNestedObjectLegacy{ + Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{}), + }, + }, + "values": { + Source: &awsSliceOfNestedObjectPointers{ + Field1: []*awsSingleStringValue{ + {Field1: "a"}, + {Field1: "b"}, + }, + }, + Target: &tfSetOfNestedObjectLegacy{}, + WantTarget: &tfSetOfNestedObjectLegacy{ + Field1: fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, []*tfSingleStringField{ + {Field1: types.StringValue("a")}, + {Field1: types.StringValue("b")}, + }), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +type rootStringModel struct { + Field1 types.String `tfsdk:"field1"` +} + +type rootListNestedObjectModel struct { + Field1 fwtypes.ListNestedObjectValueOf[nestedModel] `tfsdk:"field1"` +} + +type rootSetNestedObjectModel struct { + Field1 fwtypes.SetNestedObjectValueOf[nestedModel] `tfsdk:"field1"` +} + +type nestedModel struct { + Field1 types.String `tfsdk:"field1"` +} + +func TestFlattenPrePopulate(t *testing.T) { + t.Parallel() + ctx := context.Background() + + testCases := map[string]struct { + target any + expected any + }{ + "string": { + target: &rootStringModel{}, + expected: &rootStringModel{ + Field1: types.StringNull(), + }, + }, + + "nested list": { + target: &rootListNestedObjectModel{}, + expected: &rootListNestedObjectModel{ + Field1: fwtypes.NewListNestedObjectValueOfNull[nestedModel](ctx), + }, + }, + + "nested set": { + target: &rootSetNestedObjectModel{}, + expected: &rootSetNestedObjectModel{ + Field1: fwtypes.NewSetNestedObjectValueOfNull[nestedModel](ctx), + }, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + valTo := reflect.ValueOf(testCase.target) + + diags := flattenPrePopulate(ctx, valTo) + + if l := len(diags); l > 0 { + t.Fatalf("expected 0 diags, got %s", fwdiag.DiagnosticsString(diags)) + } + + if diff := cmp.Diff(testCase.target, testCase.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/internal/framework/flex/autoflex_nums_test.go b/internal/framework/flex/autoflex_nums_test.go new file mode 100644 index 000000000000..ac18c2bec89c --- /dev/null +++ b/internal/framework/flex/autoflex_nums_test.go @@ -0,0 +1,1314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of numeric primitive types. Additional foundational tests +// for numeric types are in autoflex_primitives_test.go. + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type tfSingleFloat64Field struct { + Field1 types.Float64 `tfsdk:"field1"` +} + +type tfSingleFloat64FieldLegacy struct { + Field1 types.Float64 `tfsdk:"field1" autoflex:",legacy"` +} + +type tfSingleFloat32Field struct { + Field1 types.Float32 `tfsdk:"field1"` +} + +type tfSingleFloat32FieldLegacy struct { + Field1 types.Float32 `tfsdk:"field1" autoflex:",legacy"` +} + +type tfSingleInt64Field struct { + Field1 types.Int64 `tfsdk:"field1"` +} + +type tfSingleInt64FieldLegacy struct { + Field1 types.Int64 `tfsdk:"field1" autoflex:",legacy"` +} + +type tfSingleInt32Field struct { + Field1 types.Int32 `tfsdk:"field1"` +} + +type tfSingleInt32FieldLegacy struct { + Field1 types.Int32 `tfsdk:"field1" autoflex:",legacy"` +} + +// All primitive types. +type tfAllThePrimitiveFields struct { + Field1 types.String `tfsdk:"field1"` + Field2 types.String `tfsdk:"field2"` + Field3 types.Int64 `tfsdk:"field3"` + Field4 types.Int64 `tfsdk:"field4"` + Field5 types.Int64 `tfsdk:"field5"` + Field6 types.Int64 `tfsdk:"field6"` + Field7 types.Float64 `tfsdk:"field7"` + Field8 types.Float64 `tfsdk:"field8"` + Field9 types.Float64 `tfsdk:"field9"` + Field10 types.Float64 `tfsdk:"field10"` + Field11 types.Bool `tfsdk:"field11"` + Field12 types.Bool `tfsdk:"field12"` +} + +type awsAllThePrimitiveFields struct { + Field1 string + Field2 *string + Field3 int32 + Field4 *int32 + Field5 int64 + Field6 *int64 + Field7 float32 + Field8 *float32 + Field9 float64 + Field10 *float64 + Field11 bool + Field12 *bool +} + +type awsSingleByteSliceValue struct { + Field1 []byte +} + +type awsSingleFloat64Value struct { + Field1 float64 +} + +type awsSingleFloat64Pointer struct { + Field1 *float64 +} + +type awsSingleFloat32Value struct { + Field1 float32 +} + +type awsSingleFloat32Pointer struct { + Field1 *float32 +} + +type awsSingleInt64Value struct { + Field1 int64 +} + +type awsSingleInt64Pointer struct { + Field1 *int64 +} + +type awsSingleInt32Value struct { + Field1 int32 +} + +type awsSingleInt32Pointer struct { + Field1 *int32 +} + +func TestExpandPrimitives(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "primitive types Source and primitive types Target": { + Source: &tfAllThePrimitiveFields{ + Field1: types.StringValue("field1"), + Field2: types.StringValue("field2"), + Field3: types.Int64Value(3), + Field4: types.Int64Value(-4), + Field5: types.Int64Value(5), + Field6: types.Int64Value(-6), + Field7: types.Float64Value(7.7), + Field8: types.Float64Value(-8.8), + Field9: types.Float64Value(9.99), + Field10: types.Float64Value(-10.101), + Field11: types.BoolValue(true), + Field12: types.BoolValue(false), + }, + Target: &awsAllThePrimitiveFields{}, + WantTarget: &awsAllThePrimitiveFields{ + Field1: "field1", + Field2: aws.String("field2"), + Field3: 3, + Field4: aws.Int32(-4), + Field5: 5, + Field6: aws.Int64(-6), + Field7: 7.7, + Field8: aws.Float32(-8.8), + Field9: 9.99, + Field10: aws.Float64(-10.101), + Field11: true, + Field12: aws.Bool(false), + }, + }, + "single string struct pointer Source and empty Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + "single string Source and single string Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{Field1: "a"}, + }, + "single string Source and byte slice Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleByteSliceValue{}, + WantTarget: &awsSingleByteSliceValue{Field1: []byte("a")}, + }, + "single string Source and single *string Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleStringPointer{}, + WantTarget: &awsSingleStringPointer{Field1: aws.String("a")}, + }, + "single string Source and single int64 Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleInt64Value{}, + WantTarget: &awsSingleInt64Value{}, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestExpandFloat64toFloat32(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + // For historical reasons, Float64 can be expanded to float32 values + "Float64 to float32": { + "value": { + Source: tfSingleFloat64Field{ + Field1: types.Float64Value(42), + }, + Target: &awsSingleFloat32Value{}, + WantTarget: &awsSingleFloat32Value{ + Field1: 42, + }, + }, + "zero": { + Source: tfSingleFloat64Field{ + Field1: types.Float64Value(0), + }, + Target: &awsSingleFloat32Value{}, + WantTarget: &awsSingleFloat32Value{ + Field1: 0, + }, + }, + "null": { + Source: tfSingleFloat64Field{ + Field1: types.Float64Null(), + }, + Target: &awsSingleFloat32Value{}, + WantTarget: &awsSingleFloat32Value{ + Field1: 0, + }, + }, + }, + + "legacy Float64 to float32": { + "value": { + Source: tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(42), + }, + Target: &awsSingleFloat32Value{}, + WantTarget: &awsSingleFloat32Value{ + Field1: 42, + }, + }, + "zero": { + Source: tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(0), + }, + Target: &awsSingleFloat32Value{}, + WantTarget: &awsSingleFloat32Value{ + Field1: 0, + }, + }, + "null": { + Source: tfSingleFloat64FieldLegacy{ + Field1: types.Float64Null(), + }, + Target: &awsSingleFloat32Value{}, + WantTarget: &awsSingleFloat32Value{ + Field1: 0, + }, + }, + }, + + "Float64 to *float32": { + "value": { + Source: tfSingleFloat64Field{ + Field1: types.Float64Value(42), + }, + Target: &awsSingleFloat32Pointer{}, + WantTarget: &awsSingleFloat32Pointer{ + Field1: aws.Float32(42), + }, + }, + "zero": { + Source: tfSingleFloat64Field{ + Field1: types.Float64Value(0), + }, + Target: &awsSingleFloat32Pointer{}, + WantTarget: &awsSingleFloat32Pointer{ + Field1: aws.Float32(0), + }, + }, + "null": { + Source: tfSingleFloat64Field{ + Field1: types.Float64Null(), + }, + Target: &awsSingleFloat32Pointer{}, + WantTarget: &awsSingleFloat32Pointer{ + Field1: nil, + }, + }, + }, + + "legacy Float64 to *float32": { + "value": { + Source: tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(42), + }, + Target: &awsSingleFloat32Pointer{}, + WantTarget: &awsSingleFloat32Pointer{ + Field1: aws.Float32(42), + }, + }, + "zero": { + Source: tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(0), + }, + Target: &awsSingleFloat32Pointer{}, + WantTarget: &awsSingleFloat32Pointer{ + Field1: nil, + }, + }, + "null": { + Source: tfSingleFloat64FieldLegacy{ + Field1: types.Float64Null(), + }, + Target: &awsSingleFloat32Pointer{}, + WantTarget: &awsSingleFloat32Pointer{ + Field1: nil, + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestExpandFloat32toFloat64(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + // Float32 cannot be expanded to float64 + "Float32 to float64": { + "value": { + Source: tfSingleFloat32Field{ + Field1: types.Float32Value(42), + }, + Target: &awsSingleFloat64Value{}, + ExpectedDiags: diagAF2[types.Float32, float64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleFloat32Field{ + Field1: types.Float32Value(0), + }, + Target: &awsSingleFloat64Value{}, + ExpectedDiags: diagAF2[types.Float32, float64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleFloat32Field{ + Field1: types.Float32Null(), + }, + Target: &awsSingleFloat64Value{}, + WantTarget: &awsSingleFloat64Value{ + Field1: 0, + }, + }, + }, + + "legacy Float32 to float64": { + "value": { + Source: tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(42), + }, + Target: &awsSingleFloat64Value{}, + ExpectedDiags: diagAF2[types.Float32, float64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(0), + }, + Target: &awsSingleFloat64Value{}, + ExpectedDiags: diagAF2[types.Float32, float64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleFloat32FieldLegacy{ + Field1: types.Float32Null(), + }, + Target: &awsSingleFloat64Value{}, + WantTarget: &awsSingleFloat64Value{ + Field1: 0, + }, + }, + }, + + "Float32 to *float64": { + "value": { + Source: tfSingleFloat32Field{ + Field1: types.Float32Value(42), + }, + Target: &awsSingleFloat64Pointer{}, + ExpectedDiags: diagAF2[types.Float32, *float64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleFloat32Field{ + Field1: types.Float32Value(0), + }, + Target: &awsSingleFloat64Pointer{}, + ExpectedDiags: diagAF2[types.Float32, *float64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleFloat32Field{ + Field1: types.Float32Null(), + }, + Target: &awsSingleFloat64Pointer{}, + WantTarget: &awsSingleFloat64Pointer{ + Field1: nil, + }, + }, + }, + + "legacy Float32 to *float64": { + "value": { + Source: tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(42), + }, + Target: &awsSingleFloat64Pointer{}, + ExpectedDiags: diagAF2[types.Float32, *float64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(0), + }, + Target: &awsSingleFloat64Pointer{}, + ExpectedDiags: diagAF2[types.Float32, *float64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleFloat32FieldLegacy{ + Field1: types.Float32Null(), + }, + Target: &awsSingleFloat64Pointer{}, + WantTarget: &awsSingleFloat64Pointer{ + Field1: nil, + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestExpandInt64toInt32(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + // For historical reasons, Int64 can be expanded to int32 values + "Int64 to int32": { + "value": { + Source: tfSingleInt64Field{ + Field1: types.Int64Value(42), + }, + Target: &awsSingleInt32Value{}, + WantTarget: &awsSingleInt32Value{ + Field1: 42, + }, + }, + "zero": { + Source: tfSingleInt64Field{ + Field1: types.Int64Value(0), + }, + Target: &awsSingleInt32Value{}, + WantTarget: &awsSingleInt32Value{ + Field1: 0, + }, + }, + "null": { + Source: tfSingleInt64Field{ + Field1: types.Int64Null(), + }, + Target: &awsSingleInt32Value{}, + WantTarget: &awsSingleInt32Value{ + Field1: 0, + }, + }, + }, + + "legacy Int64 to int32": { + "value": { + Source: tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(42), + }, + Target: &awsSingleInt32Value{}, + WantTarget: &awsSingleInt32Value{ + Field1: 42, + }, + }, + "zero": { + Source: tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(0), + }, + Target: &awsSingleInt32Value{}, + WantTarget: &awsSingleInt32Value{ + Field1: 0, + }, + }, + "null": { + Source: tfSingleInt64FieldLegacy{ + Field1: types.Int64Null(), + }, + Target: &awsSingleInt32Value{}, + WantTarget: &awsSingleInt32Value{ + Field1: 0, + }, + }, + }, + + "Int64 to *int32": { + "value": { + Source: tfSingleInt64Field{ + Field1: types.Int64Value(42), + }, + Target: &awsSingleInt32Pointer{}, + WantTarget: &awsSingleInt32Pointer{ + Field1: aws.Int32(42), + }, + }, + "zero": { + Source: tfSingleInt64Field{ + Field1: types.Int64Value(0), + }, + Target: &awsSingleInt32Pointer{}, + WantTarget: &awsSingleInt32Pointer{ + Field1: aws.Int32(0), + }, + }, + "null": { + Source: tfSingleInt64Field{ + Field1: types.Int64Null(), + }, + Target: &awsSingleInt32Pointer{}, + WantTarget: &awsSingleInt32Pointer{ + Field1: nil, + }, + }, + }, + + "legacy Int64 to *int32": { + "value": { + Source: tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(42), + }, + Target: &awsSingleInt32Pointer{}, + WantTarget: &awsSingleInt32Pointer{ + Field1: aws.Int32(42), + }, + }, + "zero": { + Source: tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(0), + }, + Target: &awsSingleInt32Pointer{}, + WantTarget: &awsSingleInt32Pointer{ + Field1: nil, + }, + }, + "null": { + Source: tfSingleInt64FieldLegacy{ + Field1: types.Int64Null(), + }, + Target: &awsSingleInt32Pointer{}, + WantTarget: &awsSingleInt32Pointer{ + Field1: nil, + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestExpandInt32toInt64(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + // Int32 cannot be expanded to int64 + "Int32 to int64": { + "value": { + Source: tfSingleInt32Field{ + Field1: types.Int32Value(42), + }, + Target: &awsSingleInt64Value{}, + ExpectedDiags: diagAF2[types.Int32, int64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleInt32Field{ + Field1: types.Int32Value(0), + }, + Target: &awsSingleInt64Value{}, + ExpectedDiags: diagAF2[types.Int32, int64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleInt32Field{ + Field1: types.Int32Null(), + }, + Target: &awsSingleInt64Value{}, + WantTarget: &awsSingleInt64Value{}, + }, + }, + + "legacy Int32 to int64": { + "value": { + Source: tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(42), + }, + Target: &awsSingleInt64Value{}, + ExpectedDiags: diagAF2[types.Int32, int64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(0), + }, + Target: &awsSingleInt64Value{}, + ExpectedDiags: diagAF2[types.Int32, int64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleInt32FieldLegacy{ + Field1: types.Int32Null(), + }, + Target: &awsSingleInt64Value{}, + WantTarget: &awsSingleInt64Value{}, + }, + }, + + "Int32 to *int64": { + "value": { + Source: tfSingleInt32Field{ + Field1: types.Int32Value(42), + }, + Target: &awsSingleInt64Pointer{}, + ExpectedDiags: diagAF2[types.Int32, *int64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleInt32Field{ + Field1: types.Int32Value(0), + }, + Target: &awsSingleInt64Pointer{}, + ExpectedDiags: diagAF2[types.Int32, *int64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleInt32Field{ + Field1: types.Int32Null(), + }, + Target: &awsSingleInt64Pointer{}, + WantTarget: &awsSingleInt64Pointer{}, + }, + }, + + "legacy Int32 to *int64": { + "value": { + Source: tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(42), + }, + Target: &awsSingleInt64Pointer{}, + ExpectedDiags: diagAF2[types.Int32, *int64](diagExpandingIncompatibleTypes), + }, + "zero": { + Source: tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(0), + }, + Target: &awsSingleInt64Pointer{}, + ExpectedDiags: diagAF2[types.Int32, *int64](diagExpandingIncompatibleTypes), + }, + "null": { + // TODO: The test for a null value happens before type checking + Source: tfSingleInt32FieldLegacy{ + Field1: types.Int32Null(), + }, + Target: &awsSingleInt64Pointer{}, + WantTarget: &awsSingleInt64Pointer{}, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenPrimitivePack(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "primitive pack zero ok": { + Source: &awsAllThePrimitiveFields{}, + Target: &tfAllThePrimitiveFields{}, + WantTarget: &tfAllThePrimitiveFields{ + Field1: types.StringValue(""), + Field2: types.StringNull(), + Field3: types.Int64Value(0), + Field4: types.Int64Null(), + Field5: types.Int64Value(0), + Field6: types.Int64Null(), + Field7: types.Float64Value(0), + Field8: types.Float64Null(), + Field9: types.Float64Value(0), + Field10: types.Float64Null(), + Field11: types.BoolValue(false), + Field12: types.BoolNull(), + }, + }, + "primitive pack ok": { + Source: &awsAllThePrimitiveFields{ + Field1: "field1", + Field2: aws.String("field2"), + Field3: 3, + Field4: aws.Int32(-4), + Field5: 5, + Field6: aws.Int64(-6), + Field7: 7.7, + Field8: aws.Float32(-8.8), + Field9: 9.99, + Field10: aws.Float64(-10.101), + Field11: true, + Field12: aws.Bool(false), + }, + Target: &tfAllThePrimitiveFields{}, + WantTarget: &tfAllThePrimitiveFields{ + Field1: types.StringValue("field1"), + Field2: types.StringValue("field2"), + Field3: types.Int64Value(3), + Field4: types.Int64Value(-4), + Field5: types.Int64Value(5), + Field6: types.Int64Value(-6), + Field7: types.Float64Value(7.7), + Field8: types.Float64Value(-8.8), + Field9: types.Float64Value(9.99), + Field10: types.Float64Value(-10.101), + Field11: types.BoolValue(true), + Field12: types.BoolValue(false), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenFloat64(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "*float64 to Float64": { + "value": { + Source: awsSingleFloat64Pointer{ + Field1: aws.Float64(42), + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Value(42), + }, + }, + "zero": { + Source: awsSingleFloat64Pointer{ + Field1: aws.Float64(0), + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Value(0), + }, + }, + "null": { + Source: awsSingleFloat64Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Null(), + }, + }, + }, + + "legacy *float64 to Float64": { + "value": { + Source: awsSingleFloat64Pointer{ + Field1: aws.Float64(42), + }, + Target: &tfSingleFloat64FieldLegacy{}, + WantTarget: &tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(42), + }, + }, + "zero": { + Source: awsSingleFloat64Pointer{ + Field1: aws.Float64(0), + }, + Target: &tfSingleFloat64FieldLegacy{}, + WantTarget: &tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(0), + }, + }, + "null": { + Source: awsSingleFloat64Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat64FieldLegacy{}, + WantTarget: &tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(0), + }, + }, + }, + + // For historical reasons, float32 can be flattened to Float64 values + "float32 to Float64": { + "value": { + Source: awsSingleFloat32Value{ + Field1: 42, + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Value(42), + }, + }, + "zero": { + Source: awsSingleFloat32Value{ + Field1: 0, + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Value(0), + }, + }, + }, + + "*float32 to Float64": { + "value": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(42), + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Value(42), + }, + }, + "zero": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(0), + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Value(0), + }, + }, + "null": { + Source: awsSingleFloat32Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat64Field{}, + WantTarget: &tfSingleFloat64Field{ + Field1: types.Float64Null(), + }, + }, + }, + + "legacy *float32 to Float64": { + "value": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(42), + }, + Target: &tfSingleFloat64FieldLegacy{}, + WantTarget: &tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(42), + }, + }, + "zero": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(0), + }, + Target: &tfSingleFloat64FieldLegacy{}, + WantTarget: &tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(0), + }, + }, + "null": { + Source: awsSingleFloat32Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat64FieldLegacy{}, + WantTarget: &tfSingleFloat64FieldLegacy{ + Field1: types.Float64Value(0), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenFloat32(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "*float32 to Float32": { + "value": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(42), + }, + Target: &tfSingleFloat32Field{}, + WantTarget: &tfSingleFloat32Field{ + Field1: types.Float32Value(42), + }, + }, + "zero": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(0), + }, + Target: &tfSingleFloat32Field{}, + WantTarget: &tfSingleFloat32Field{ + Field1: types.Float32Value(0), + }, + }, + "null": { + Source: awsSingleFloat32Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat32Field{}, + WantTarget: &tfSingleFloat32Field{ + Field1: types.Float32Null(), + }, + }, + }, + + "legacy *float32 to Float32": { + "value": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(42), + }, + Target: &tfSingleFloat32FieldLegacy{}, + WantTarget: &tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(42), + }, + }, + "zero": { + Source: awsSingleFloat32Pointer{ + Field1: aws.Float32(0), + }, + Target: &tfSingleFloat32FieldLegacy{}, + WantTarget: &tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(0), + }, + }, + "null": { + Source: awsSingleFloat32Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat32FieldLegacy{}, + WantTarget: &tfSingleFloat32FieldLegacy{ + Field1: types.Float32Value(0), + }, + }, + }, + + // float64 cannot be flattened to Float32 + "float64 to Float32": { + "value": { + Source: awsSingleFloat64Value{ + Field1: 42, + }, + Target: &tfSingleFloat32Field{}, + ExpectedDiags: diagAF2[float64, types.Float32](DiagFlatteningIncompatibleTypes), + }, + "zero": { + Source: awsSingleFloat64Value{ + Field1: 0, + }, + Target: &tfSingleFloat32Field{}, + ExpectedDiags: diagAF2[float64, types.Float32](DiagFlatteningIncompatibleTypes), + }, + }, + + "*float64 to Float32": { + "value": { + Source: awsSingleFloat64Pointer{ + Field1: aws.Float64(42), + }, + Target: &tfSingleFloat32Field{}, + ExpectedDiags: diagAF2[*float64, types.Float32](DiagFlatteningIncompatibleTypes), + }, + "zero": { + Source: awsSingleFloat64Pointer{ + Field1: aws.Float64(0), + }, + Target: &tfSingleFloat32Field{}, + ExpectedDiags: diagAF2[*float64, types.Float32](DiagFlatteningIncompatibleTypes), + }, + "null": { + Source: awsSingleFloat64Pointer{ + Field1: nil, + }, + Target: &tfSingleFloat32Field{}, + ExpectedDiags: diagAF2[*float64, types.Float32](DiagFlatteningIncompatibleTypes), + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenInt64(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "*int64 to Int64": { + "value": { + Source: awsSingleInt64Pointer{ + Field1: aws.Int64(42), + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Value(42), + }, + }, + "zero": { + Source: awsSingleInt64Pointer{ + Field1: aws.Int64(0), + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Value(0), + }, + }, + "null": { + Source: awsSingleInt64Pointer{ + Field1: nil, + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Null(), + }, + }, + }, + + "legacy *int64 to Int64": { + "value": { + Source: awsSingleInt64Pointer{ + Field1: aws.Int64(42), + }, + Target: &tfSingleInt64FieldLegacy{}, + WantTarget: &tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(42), + }, + }, + "zero": { + Source: awsSingleInt64Pointer{ + Field1: aws.Int64(0), + }, + Target: &tfSingleInt64FieldLegacy{}, + WantTarget: &tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(0), + }, + }, + "null": { + Source: awsSingleInt64Pointer{ + Field1: nil, + }, + Target: &tfSingleInt64FieldLegacy{}, + WantTarget: &tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(0), + }, + }, + }, + + // For historical reasons, int32 can be flattened to Int64 values + "int32 to Int64": { + "value": { + Source: awsSingleInt32Value{ + Field1: 42, + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Value(42), + }, + }, + "zero": { + Source: awsSingleInt32Value{ + Field1: 0, + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Value(0), + }, + }, + }, + + "*int32 to Int64": { + "value": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(42), + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Value(42), + }, + }, + "zero": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(0), + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Value(0), + }, + }, + "null": { + Source: awsSingleInt32Pointer{ + Field1: nil, + }, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{ + Field1: types.Int64Null(), + }, + }, + }, + + "legacy *int32 to Int64": { + "value": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(42), + }, + Target: &tfSingleInt64FieldLegacy{}, + WantTarget: &tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(42), + }, + }, + "zero": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(0), + }, + Target: &tfSingleInt64FieldLegacy{}, + WantTarget: &tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(0), + }, + }, + "null": { + Source: awsSingleInt32Pointer{ + Field1: nil, + }, + Target: &tfSingleInt64FieldLegacy{}, + WantTarget: &tfSingleInt64FieldLegacy{ + Field1: types.Int64Value(0), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenInt32(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "*int32 to Int32": { + "value": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(42), + }, + Target: &tfSingleInt32Field{}, + WantTarget: &tfSingleInt32Field{ + Field1: types.Int32Value(42), + }, + }, + "zero": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(0), + }, + Target: &tfSingleInt32Field{}, + WantTarget: &tfSingleInt32Field{ + Field1: types.Int32Value(0), + }, + }, + "null": { + Source: awsSingleInt32Pointer{ + Field1: nil, + }, + Target: &tfSingleInt32Field{}, + WantTarget: &tfSingleInt32Field{ + Field1: types.Int32Null(), + }, + }, + }, + + "legacy *int32 to Int32": { + "value": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(42), + }, + Target: &tfSingleInt32FieldLegacy{}, + WantTarget: &tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(42), + }, + }, + "zero": { + Source: awsSingleInt32Pointer{ + Field1: aws.Int32(0), + }, + Target: &tfSingleInt32FieldLegacy{}, + WantTarget: &tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(0), + }, + }, + "null": { + Source: awsSingleInt32Pointer{ + Field1: nil, + }, + Target: &tfSingleInt32FieldLegacy{}, + WantTarget: &tfSingleInt32FieldLegacy{ + Field1: types.Int32Value(0), + }, + }, + }, + + // int64 cannot be flattened to Int32 + "int64 to Int32": { + "value": { + Source: awsSingleInt64Value{ + Field1: 42, + }, + Target: &tfSingleInt32Field{}, + ExpectedDiags: diagAF2[int64, types.Int32](DiagFlatteningIncompatibleTypes), + }, + "zero": { + Source: awsSingleInt64Value{ + Field1: 0, + }, + Target: &tfSingleInt32Field{}, + ExpectedDiags: diagAF2[int64, types.Int32](DiagFlatteningIncompatibleTypes), + }, + }, + + "*int64 to Int32": { + "value": { + Source: awsSingleInt64Pointer{ + Field1: aws.Int64(42), + }, + Target: &tfSingleInt32Field{}, + ExpectedDiags: diagAF2[*int64, types.Int32](DiagFlatteningIncompatibleTypes), + }, + "zero": { + Source: awsSingleInt64Pointer{ + Field1: aws.Int64(0), + }, + Target: &tfSingleInt32Field{}, + ExpectedDiags: diagAF2[*int64, types.Int32](DiagFlatteningIncompatibleTypes), + }, + "null": { + Source: awsSingleInt64Pointer{ + Field1: nil, + }, + Target: &tfSingleInt32Field{}, + ExpectedDiags: diagAF2[*int64, types.Int32](DiagFlatteningIncompatibleTypes), + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenTopLevelInt64Ptr(t *testing.T) { + t.Parallel() + + testCases := toplevelTestCases[*int64, types.Int64]{ + "value": { + source: aws.Int64(42), + expectedValue: types.Int64Value(42), + ExpectedDiags: diagAFEmpty(), + }, + + "empty": { + source: aws.Int64(0), + expectedValue: types.Int64Value(0), + ExpectedDiags: diagAFEmpty(), + }, + + "nil": { + source: nil, + expectedValue: types.Int64Null(), + ExpectedDiags: diagAFEmpty(), + }, + } + + runTopLevelTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} diff --git a/internal/framework/flex/autoflex_primitives_test.go b/internal/framework/flex/autoflex_primitives_test.go new file mode 100644 index 000000000000..1a1f8e36fb7c --- /dev/null +++ b/internal/framework/flex/autoflex_primitives_test.go @@ -0,0 +1,1004 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten using generic-style roundtrip testing of strings, +// bools, int64, int32, float64, and float32 with various variants: standard, legacy, pointers. + +import ( + "bytes" + "context" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflogtest" + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" +) + +// TestPrimitivesRoundtrip is the proof of concept for string roundtrip testing +// This replaces TestExpandString and TestFlattenString with comprehensive roundtrip validation +func TestPrimitivesRoundtrip(t *testing.T) { + t.Parallel() + + // Test string roundtrips with all variants + t.Run("String", func(t *testing.T) { + t.Parallel() + testStringRoundtrip(t) + }) + + // Test bool roundtrips with all variants + t.Run("Bool", func(t *testing.T) { + t.Parallel() + testBoolRoundtrip(t) + }) + + // Test int64 roundtrips with all variants + t.Run("Int64", func(t *testing.T) { + t.Parallel() + testInt64Roundtrip(t) + }) + + // Test int32 roundtrips with all variants + t.Run("Int32", func(t *testing.T) { + t.Parallel() + testInt32Roundtrip(t) + }) + + // Test float64 roundtrips with all variants + t.Run("Float64", func(t *testing.T) { + t.Parallel() + testFloat64Roundtrip(t) + }) + + // Test float32 roundtrips with all variants + t.Run("Float32", func(t *testing.T) { + t.Parallel() + testFloat32Roundtrip(t) + }) +} + +func testStringRoundtrip(t *testing.T) { + // Define String-specific type info + stringTypeInfo := PrimitiveTypeInfo[string]{ + TFType: reflect.TypeOf(types.String{}), + CreateValue: func(v string) any { return types.StringValue(v) }, + CreateNull: func() any { return types.StringNull() }, + CreateAWSValue: func(v string) any { return aws.String(v) }, + GetAWSNil: func() any { return (*string)(nil) }, + GetZeroValue: func() string { return "" }, + } + + // Test cases covering all scenarios from original TestExpandString and TestFlattenString + testCases := []struct { + name string + stringValue string + isNull bool + isEmpty bool + variants []string // which variants to test: "standard", "legacy" + skipExpand bool // skip expand direction (flatten-only test) + }{ + { + name: "normal_value", + stringValue: "test_value", + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "empty_string", + stringValue: "", + isEmpty: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "null_value", + isNull: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "special_characters", + stringValue: "test with spaces & symbols!", + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "unicode_content", + stringValue: "测试内容 🚀", + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + // Random value for property-based testing feel + { + name: "random_value", + stringValue: acctest.RandomWithPrefix("tf-test"), + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + // Omitempty tests - flatten-only (expand direction not defined in original tests) + { + name: "omitempty_normal_value", + stringValue: "test_value", + variants: []string{"omitempty"}, + skipExpand: true, // Only test flatten direction for omitempty + }, + { + name: "omitempty_empty_string", + stringValue: "", + isEmpty: true, + variants: []string{"omitempty"}, + skipExpand: true, + }, + { + name: "omitempty_null_value", + isNull: true, + variants: []string{"omitempty"}, + skipExpand: true, + }, + } + + for _, tc := range testCases { + for _, variant := range tc.variants { + testName := tc.name + "_" + variant + t.Run(testName, func(t *testing.T) { + // Special handling for omitempty (flatten-only) cases + if tc.skipExpand { + // Generate structs for this variant + var factory func(reflect.Type) (any, any) + for _, v := range primitiveTestVariants { + if v.Name == variant { + factory = v.Factory + break + } + } + + tfStruct, awsStruct := factory(reflect.TypeOf(types.String{})) + + // Set up AWS struct based on omitempty behavior + if tc.isNull { + setFieldValue(awsStruct, "Field1", (*string)(nil)) + } else if tc.isEmpty { + // Omitempty: empty becomes nil + setFieldValue(awsStruct, "Field1", (*string)(nil)) + } else { + setFieldValue(awsStruct, "Field1", aws.String(tc.stringValue)) + } + + // Set up the expected TF result based on omitempty behavior + expectedTFResult := reflect.New(reflect.TypeOf(tfStruct).Elem()).Interface() + if tc.isNull || tc.isEmpty { + // Omitempty: nil/empty AWS values become null TF values + setFieldValue(expectedTFResult, "Field1", types.StringNull()) + } else { + setFieldValue(expectedTFResult, "Field1", types.StringValue(tc.stringValue)) + } + runFlattenOnlyTest(t, testName, awsStruct, expectedTFResult) + } else { + // Use helper for all standard roundtrip cases + runBasicRoundtripTest(t, testName, variant, stringTypeInfo, tc.stringValue, tc.isNull, false, tc.isEmpty, runChecks{CompareTarget: true, GoldenLogs: true}) + } + }) + } + } +} + +func testBoolRoundtrip(t *testing.T) { + // Define Bool-specific type info + boolTypeInfo := PrimitiveTypeInfo[bool]{ + TFType: reflect.TypeOf(types.Bool{}), + CreateValue: func(v bool) any { return types.BoolValue(v) }, + CreateNull: func() any { return types.BoolNull() }, + CreateAWSValue: func(v bool) any { return aws.Bool(v) }, + GetAWSNil: func() any { return (*bool)(nil) }, + GetZeroValue: func() bool { return false }, + } + + // Test cases covering all scenarios from original TestExpandBool and TestFlattenBool + testCases := []struct { + name string + boolValue bool + isNull bool + variants []string // which variants to test: "standard", "legacy" + skipExpand bool // skip expand direction (flatten-only test) + }{ + { + name: "true_value", + boolValue: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "false_value", + boolValue: false, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "null_value", + isNull: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + } + + for _, tc := range testCases { + for _, variant := range tc.variants { + testName := tc.name + "_" + variant + t.Run(testName, func(t *testing.T) { + // Check for unsupported skipExpand cases + if tc.skipExpand { + t.Fatalf("skipExpand=true for Bool tests requires special handling implementation") + } + + // Use helper for all standard roundtrip cases + // Note: false value should be treated as "zero" for legacy mode + isZero := !tc.boolValue && !tc.isNull + runBasicRoundtripTest(t, testName, variant, boolTypeInfo, tc.boolValue, tc.isNull, isZero, false, runChecks{CompareTarget: true, GoldenLogs: true}) + }) + } + } +} + +func testInt64Roundtrip(t *testing.T) { + // Define Int64-specific type info + int64TypeInfo := PrimitiveTypeInfo[int64]{ + TFType: reflect.TypeOf(types.Int64{}), + CreateValue: func(v int64) any { return types.Int64Value(v) }, + CreateNull: func() any { return types.Int64Null() }, + CreateAWSValue: func(v int64) any { return aws.Int64(v) }, + GetAWSNil: func() any { return (*int64)(nil) }, + GetZeroValue: func() int64 { return 0 }, + } + + // Test cases covering all scenarios from original TestExpandInt64 and TestFlattenInt64 + testCases := []struct { + name string + int64Value int64 + isNull bool + isZero bool + variants []string // which variants to test: "standard", "legacy" + }{ + { + name: "value", + int64Value: 42, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "zero_value", + int64Value: 0, + isZero: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "null_value", + isNull: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + } + + for _, tc := range testCases { + for _, variant := range tc.variants { + testName := tc.name + "_" + variant + t.Run(testName, func(t *testing.T) { + // Use helper for all roundtrip cases + runBasicRoundtripTest(t, testName, variant, int64TypeInfo, tc.int64Value, tc.isNull, tc.isZero, false, runChecks{CompareTarget: true, GoldenLogs: true}) + }) + } + } +} + +func testInt32Roundtrip(t *testing.T) { + // Define Int32-specific type info + int32TypeInfo := PrimitiveTypeInfo[int32]{ + TFType: reflect.TypeOf(types.Int32{}), + CreateValue: func(v int32) any { return types.Int32Value(v) }, + CreateNull: func() any { return types.Int32Null() }, + CreateAWSValue: func(v int32) any { return aws.Int32(v) }, + GetAWSNil: func() any { return (*int32)(nil) }, + GetZeroValue: func() int32 { return 0 }, + } + + // Test cases covering all scenarios from original TestExpandInt32 and TestFlattenInt32 + testCases := []struct { + name string + int32Value int32 + isNull bool + isZero bool + variants []string // which variants to test: "standard", "legacy" + }{ + { + name: "value", + int32Value: 42, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "zero_value", + int32Value: 0, + isZero: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "null_value", + isNull: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + } + + for _, tc := range testCases { + for _, variant := range tc.variants { + testName := tc.name + "_" + variant + t.Run(testName, func(t *testing.T) { + // Use helper for all roundtrip cases + runBasicRoundtripTest(t, testName, variant, int32TypeInfo, tc.int32Value, tc.isNull, tc.isZero, false, runChecks{CompareTarget: true, GoldenLogs: true}) + }) + } + } +} + +func testFloat64Roundtrip(t *testing.T) { + // Define Float64-specific type info + float64TypeInfo := PrimitiveTypeInfo[float64]{ + TFType: reflect.TypeOf(types.Float64{}), + CreateValue: func(v float64) any { return types.Float64Value(v) }, + CreateNull: func() any { return types.Float64Null() }, + CreateAWSValue: func(v float64) any { return aws.Float64(v) }, + GetAWSNil: func() any { return (*float64)(nil) }, + GetZeroValue: func() float64 { return 0.0 }, + } + + // Test cases covering all scenarios from original TestExpandFloat64 and TestFlattenFloat64 + testCases := []struct { + name string + float64Value float64 + isNull bool + isZero bool + variants []string // which variants to test: "standard", "legacy" + skipExpand bool // for future expansion if needed + }{ + { + name: "value", + float64Value: 42.0, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "zero_value", + float64Value: 0.0, + isZero: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "null_value", + isNull: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + } + + for _, tc := range testCases { + for _, variant := range tc.variants { + testName := tc.name + "_" + variant + t.Run(testName, func(t *testing.T) { + // Check for unsupported skipExpand cases + if tc.skipExpand { + t.Fatalf("skipExpand=true for Float64 tests requires special handling implementation") + } + + // Use helper for all roundtrip cases + runBasicRoundtripTest(t, testName, variant, float64TypeInfo, tc.float64Value, tc.isNull, tc.isZero, false, runChecks{CompareTarget: true, GoldenLogs: true}) + }) + } + } +} + +func testFloat32Roundtrip(t *testing.T) { + // Define Float32-specific type info + float32TypeInfo := PrimitiveTypeInfo[float32]{ + TFType: reflect.TypeOf(types.Float32{}), + CreateValue: func(v float32) any { return types.Float32Value(v) }, + CreateNull: func() any { return types.Float32Null() }, + CreateAWSValue: func(v float32) any { return aws.Float32(v) }, + GetAWSNil: func() any { return (*float32)(nil) }, + GetZeroValue: func() float32 { return 0.0 }, + } + + // Test cases covering all scenarios from original TestExpandFloat32 and TestFlattenFloat32 + testCases := []struct { + name string + float32Value float32 + isNull bool + isZero bool + variants []string // which variants to test: "standard", "legacy" + skipExpand bool // for future expansion if needed + }{ + { + name: "value", + float32Value: 42.0, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "zero_value", + float32Value: 0.0, + isZero: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + { + name: "null_value", + isNull: true, + variants: []string{"standard", "legacy", "tf_to_aws_pointer", "legacy_tf_to_aws_pointer"}, + }, + } + + for _, tc := range testCases { + for _, variant := range tc.variants { + testName := tc.name + "_" + variant + t.Run(testName, func(t *testing.T) { + // Check for unsupported skipExpand cases + if tc.skipExpand { + t.Fatalf("skipExpand=true for Float32 tests requires special handling implementation") + } + + // Use helper for all roundtrip cases + runBasicRoundtripTest(t, testName, variant, float32TypeInfo, tc.float32Value, tc.isNull, tc.isZero, false, runChecks{CompareTarget: true, GoldenLogs: true}) + }) + } + } +} + +// runBasicRoundtripTest runs a single roundtrip test with standardized struct setup +func runBasicRoundtripTest[T any](t *testing.T, testName string, variant string, typeInfo PrimitiveTypeInfo[T], value T, isNull, isZero, isEmpty bool, checks runChecks) { + t.Helper() + + // Generate structs for this variant + var factory func(reflect.Type) (any, any) + for _, v := range primitiveTestVariants { + if v.Name == variant { + factory = v.Factory + break + } + } + + tfStruct, awsStruct := factory(typeInfo.TFType) + + // Set up TF struct with test value (always use value types for TF) + v := reflect.ValueOf(tfStruct).Elem() + field := v.FieldByName("Field1") + if !field.IsValid() || !field.CanSet() { + t.Fatalf("Field1 is not valid or cannot be set") + } + + if isNull { + field.Set(reflect.ValueOf(typeInfo.CreateNull())) + } else { + field.Set(reflect.ValueOf(typeInfo.CreateValue(value))) + } + + // Set up expected AWS struct based on variant - this is the common pattern + switch { + case variant == "legacy" || strings.HasPrefix(variant, "legacy_"): + if isNull { + // Legacy null behavior: null -> nil for pointers, zero for values + v := reflect.ValueOf(awsStruct).Elem() + field := v.FieldByName("Field1") + awsFieldType := field.Type() + if awsFieldType.Kind() == reflect.Ptr { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(typeInfo.GetAWSNil())) + } + } else { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(typeInfo.GetZeroValue())) + } + } + } else if isZero || isEmpty { + // Legacy zero/empty behavior: usually -> nil for pointers + v := reflect.ValueOf(awsStruct).Elem() + field := v.FieldByName("Field1") + awsFieldType := field.Type() + if awsFieldType.Kind() == reflect.Ptr { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(typeInfo.GetAWSNil())) + } + } else { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(value)) + } + } + } else { + // Legacy non-zero behavior + v := reflect.ValueOf(awsStruct).Elem() + field := v.FieldByName("Field1") + awsFieldType := field.Type() + if awsFieldType.Kind() == reflect.Ptr { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(typeInfo.CreateAWSValue(value))) + } + } else { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(value)) + } + } + } + default: // standard + if isNull { + // Standard null behavior: null -> nil for pointers, zero for values + v := reflect.ValueOf(awsStruct).Elem() + field := v.FieldByName("Field1") + awsFieldType := field.Type() + // For null values with non-pointer AWS fields, set to zero value + // For pointer fields, leave unset (nil is already the zero value) + if awsFieldType.Kind() != reflect.Ptr { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(typeInfo.GetZeroValue())) + } + } + } else { + // Standard behavior: value -> aws.Xxx(value) or value + v := reflect.ValueOf(awsStruct).Elem() + field := v.FieldByName("Field1") + awsFieldType := field.Type() + if awsFieldType.Kind() == reflect.Ptr { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(typeInfo.CreateAWSValue(value))) + } + } else { + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(value)) + } + } + } + } + + // Full roundtrip test + rtc := RoundtripTestCase[T]{ + Name: testName, + OriginalValue: value, + TFStruct: tfStruct, + AWSStruct: awsStruct, + ExpectError: false, + Options: nil, + ExpectedDiags: nil, // No diagnostics expected for basic roundtrip tests + } + + runRoundtripTest(t, rtc, checks) +} + +// PrimitiveTestCase represents a test case for any primitive type +type PrimitiveTestCase[T any] struct { + Name string + Value T + IsNull bool + IsZero bool + IsEmpty bool // for strings only + Variants []string + SkipExpand bool // for flatten-only tests +} + +// PrimitiveTypeInfo contains type-specific information for testing primitives +type PrimitiveTypeInfo[T any] struct { + TFType reflect.Type + CreateValue func(T) any // creates types.XxxValue(v) + CreateNull func() any // creates types.XxxNull() + CreateAWSValue func(T) any // creates aws.Xxx(v) + GetAWSNil func() any // creates (*type)(nil) + GetZeroValue func() T // creates zero value for the type +} + +type RoundtripTestCase[T any] struct { + Name string + OriginalValue T + TFStruct any + AWSStruct any + ExpectError bool + Options []AutoFlexOptionsFunc + ExpectedDiags diag.Diagnostics // Expected diagnostics for expand/flatten operations +} + +// PrimitiveTestVariant defines different struct variants for testing +type PrimitiveTestVariant struct { + Name string + Tag string + Factory func(fieldType reflect.Type) (tf, aws any) +} + +// runRoundtripTest executes a complete roundtrip test: TF -> AWS -> TF +func runRoundtripTest[T any](t *testing.T, tc RoundtripTestCase[T], checks runChecks) { + t.Helper() + + ctx := context.Background() + + // Set up logging if golden logs are requested + var buf bytes.Buffer + if checks.GoldenLogs { + ctx = tflogtest.RootLogger(ctx, &buf) + ctx = registerTestingLogger(ctx) + } + + // Step 1: Expand TF -> AWS + expandedAWS := reflect.New(reflect.TypeOf(tc.AWSStruct).Elem()).Interface() + expandDiags := Expand(ctx, tc.TFStruct, expandedAWS, tc.Options...) + + // Check diagnostics if requested + if checks.CompareDiags { + if diff := cmp.Diff(expandDiags, tc.ExpectedDiags); diff != "" { + t.Errorf("unexpected expand diagnostics difference: %s", diff) + } + } + + if tc.ExpectError { + if !expandDiags.HasError() { + t.Errorf("Expected error during expand, but got none") + } + return + } + + if expandDiags.HasError() { + t.Fatalf("Unexpected error during expand: %v", expandDiags) + } + + // Step 2: Flatten the AWS struct back to TF + actualTF := reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + flattenDiags := Flatten(ctx, expandedAWS, actualTF, tc.Options...) + + // Check flatten diagnostics if requested (and we have expected flatten diags) + // Note: We only check expand diagnostics above since that's the primary use case + // but we could extend this to also check flatten diagnostics if needed + + if len(flattenDiags) > 0 { + if tc.ExpectError { + return + } + t.Fatalf("Unexpected flatten errors for %s: %v", tc.Name, flattenDiags) + } + + flattenedTF := actualTF + + // Step 3: Verify roundtrip consistency (with known behavioral exceptions) + expectedTF := tc.TFStruct + + // Handle known behavioral differences for null values + if tc.Name != "" { + // For null values: conversion to default values for variants that don't maintain null + // (standard, legacy, omitempty use non-pointer AWS fields; legacy_pointer also converts null→default due to legacy flatten behavior) + if strings.Contains(tc.Name, "null_value") && (!strings.Contains(tc.Name, "pointer") || strings.Contains(tc.Name, "legacy_tf_to_aws_pointer")) { + // Detect field type from the struct + fieldValue := reflect.ValueOf(tc.TFStruct).Elem().FieldByName("Field1") + fieldType := fieldValue.Type() + + if fieldType == reflect.TypeOf(types.String{}) { + // null -> empty string for legacy string variants + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.StringValue(""))) + } else if fieldType == reflect.TypeOf(types.Bool{}) { + // null -> false for legacy bool variants + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.BoolValue(false))) + } else if fieldType == reflect.TypeOf(types.Int64{}) { + // null -> 0 for legacy int64 variants + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.Int64Value(0))) + } else if fieldType == reflect.TypeOf(types.Int32{}) { + // null -> 0 for legacy int32 variants + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.Int32Value(0))) + } else if fieldType == reflect.TypeOf(types.Float64{}) { + // null -> 0.0 for all float64 variants + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.Float64Value(0.0))) + } else if fieldType == reflect.TypeOf(types.Float32{}) { + // null -> 0.0 for all float32 variants + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.Float32Value(0.0))) + } + } + // For omitempty: empty string -> null behavior + if strings.Contains(tc.Name, "omitempty") && strings.Contains(tc.Name, "empty") { + // Create expected TF with null instead of empty string + expectedTF = reflect.New(reflect.TypeOf(tc.TFStruct).Elem()).Interface() + reflect.ValueOf(expectedTF).Elem().FieldByName("Field1").Set(reflect.ValueOf(types.StringNull())) + } + } + + if checks.CompareTarget { + if diff := cmp.Diff(expectedTF, flattenedTF); diff != "" { + t.Errorf("Roundtrip mismatch for %s (+got, -want): %s", tc.Name, diff) + } + + // Step 4: Verify AWS structure matches expected + if diff := cmp.Diff(tc.AWSStruct, expandedAWS); diff != "" { + t.Errorf("AWS structure mismatch for %s (+got, -want): %s", tc.Name, diff) + } + } + + // Golden log validation (if requested) + if checks.GoldenLogs { + lines, err := tflogtest.MultilineJSONDecode(&buf) + if err != nil { + t.Fatalf("decoding log lines: %s", err) + } + normalizedLines := normalizeLogs(lines) + + // Auto-generate golden path from test hierarchy + // Use the full test name which includes the primitive type in the hierarchy + // Extract the primitive type and test case name from the full test name + // e.g., "TestPrimitivesRoundtrip/Int32/value_standard" -> "Int32_value_standard" + fullTestName := t.Name() + testCaseName := "" + if parts := strings.Split(fullTestName, "/"); len(parts) >= 3 { + // parts[0] = "TestPrimitivesRoundtrip", parts[1] = "Int32", parts[2] = "value_standard" + testCaseName = strings.ToLower(parts[1]) + "_" + parts[2] + } else if lastSlash := strings.LastIndex(fullTestName, "/"); lastSlash != -1 { + testCaseName = fullTestName[lastSlash+1:] + } + goldenFileName := autoGenerateGoldenPath(t, fullTestName, testCaseName) + goldenPath := filepath.Join("testdata", goldenFileName) + compareWithGolden(t, goldenPath, normalizedLines) + } +} + +// runFlattenOnlyTest executes only the flatten direction: AWS -> TF +func runFlattenOnlyTest(t *testing.T, testName string, awsStruct, expectedTF any) { + t.Helper() + + ctx := context.Background() + + // Flatten AWS -> TF + actualTF := reflect.New(reflect.TypeOf(expectedTF).Elem()).Interface() + flattenDiags := Flatten(ctx, awsStruct, actualTF) + + if flattenDiags.HasError() { + t.Fatalf("Unexpected error during flatten: %v", flattenDiags) + } + + // Verify TF structure matches expected + if diff := cmp.Diff(expectedTF, actualTF); diff != "" { + t.Errorf("Flatten result mismatch for %s (+got, -want): %s", testName, diff) + } +} + +// Standard primitive test variants +var primitiveTestVariants = []PrimitiveTestVariant{ + { + Name: "standard", + Tag: `tfsdk:"field1"`, + Factory: func(fieldType reflect.Type) (tf, aws any) { + return generateStandardStructs(fieldType) + }, + }, + { + Name: "legacy", + Tag: `tfsdk:"field1" autoflex:",legacy"`, + Factory: func(fieldType reflect.Type) (tf, aws any) { + return generateLegacyStructs(fieldType) + }, + }, + { + Name: "omitempty", + Tag: `tfsdk:"field1" autoflex:",omitempty"`, + Factory: func(fieldType reflect.Type) (tf, aws any) { + return generateOmitEmptyStructs(fieldType) + }, + }, + { + Name: "tf_to_aws_pointer", + Tag: `tfsdk:"field1"`, + Factory: func(fieldType reflect.Type) (tf, aws any) { + return generateTFToAWSPointerStructs(fieldType) + }, + }, + { + Name: "legacy_tf_to_aws_pointer", + Tag: `tfsdk:"field1" autoflex:",legacy"`, + Factory: func(fieldType reflect.Type) (tf, aws any) { + return generateLegacyTFToAWSPointerStructs(fieldType) + }, + }, +} + +// generateStandardStructs creates standard TF and AWS structs for testing +func generateStandardStructs(fieldType reflect.Type) (tf, aws any) { + // Create TF struct with framework type + tfStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: fieldType, + Tag: `tfsdk:"field1"`, + }, + }) + tfStruct := reflect.New(tfStructType).Interface() + + // Create AWS struct based on field type + var awsFieldType reflect.Type + switch fieldType { + case reflect.TypeOf(types.String{}): + awsFieldType = reflect.TypeOf("") + case reflect.TypeOf(types.Bool{}): + awsFieldType = reflect.TypeOf(false) + case reflect.TypeOf(types.Int64{}): + awsFieldType = reflect.TypeOf(int64(0)) + case reflect.TypeOf(types.Int32{}): + awsFieldType = reflect.TypeOf(int32(0)) + case reflect.TypeOf(types.Float64{}): + awsFieldType = reflect.TypeOf(float64(0)) + case reflect.TypeOf(types.Float32{}): + awsFieldType = reflect.TypeOf(float32(0)) + default: + panic("unsupported field type") + } + + awsStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: awsFieldType, + }, + }) + awsStruct := reflect.New(awsStructType).Interface() + + return tfStruct, awsStruct +} + +// generateLegacyStructs creates legacy-tagged TF structs paired with pointer AWS structs +func generateLegacyStructs(fieldType reflect.Type) (tf, aws any) { + // Create TF struct with legacy tag + tfStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: fieldType, + Tag: `tfsdk:"field1" autoflex:",legacy"`, + }, + }) + tfStruct := reflect.New(tfStructType).Interface() + + // Create AWS struct with pointer field for legacy behavior + var awsFieldType reflect.Type + switch fieldType { + case reflect.TypeOf(types.String{}): + awsFieldType = reflect.TypeOf((*string)(nil)) + case reflect.TypeOf(types.Bool{}): + awsFieldType = reflect.TypeOf((*bool)(nil)) + case reflect.TypeOf(types.Int64{}): + awsFieldType = reflect.TypeOf((*int64)(nil)) + case reflect.TypeOf(types.Int32{}): + awsFieldType = reflect.TypeOf((*int32)(nil)) + case reflect.TypeOf(types.Float64{}): + awsFieldType = reflect.TypeOf((*float64)(nil)) + case reflect.TypeOf(types.Float32{}): + awsFieldType = reflect.TypeOf((*float32)(nil)) + default: + panic("unsupported field type") + } + + awsStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: awsFieldType, + }, + }) + awsStruct := reflect.New(awsStructType).Interface() + + return tfStruct, awsStruct +} + +// generateOmitEmptyStructs creates omitempty-tagged TF structs for testing +func generateOmitEmptyStructs(fieldType reflect.Type) (tf, aws any) { + // Create TF struct with omitempty tag + tfStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: fieldType, + Tag: `tfsdk:"field1" autoflex:",omitempty"`, + }, + }) + tfStruct := reflect.New(tfStructType).Interface() + + // For omitempty, AWS side uses pointer types + var awsFieldType reflect.Type + switch fieldType { + case reflect.TypeOf(types.String{}): + awsFieldType = reflect.TypeOf((*string)(nil)) + case reflect.TypeOf(types.Bool{}): + awsFieldType = reflect.TypeOf((*bool)(nil)) + case reflect.TypeOf(types.Int64{}): + awsFieldType = reflect.TypeOf((*int64)(nil)) + case reflect.TypeOf(types.Int32{}): + awsFieldType = reflect.TypeOf((*int32)(nil)) + case reflect.TypeOf(types.Float64{}): + awsFieldType = reflect.TypeOf((*float64)(nil)) + case reflect.TypeOf(types.Float32{}): + awsFieldType = reflect.TypeOf((*float32)(nil)) + default: + panic("unsupported field type") + } + + awsStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: awsFieldType, + }, + }) + awsStruct := reflect.New(awsStructType).Interface() + + return tfStruct, awsStruct +} + +// generateTFToAWSPointerStructs creates value TF structs paired with pointer AWS structs +// Tests: types.String -> *string, types.Bool -> *bool, etc. +func generateTFToAWSPointerStructs(fieldType reflect.Type) (tf, aws any) { // nosemgrep:ci.aws-in-func-name + // Create TF struct with value field type + tfStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: fieldType, // Value type (types.String, types.Bool, etc.) + Tag: `tfsdk:"field1"`, + }, + }) + tfStruct := reflect.New(tfStructType).Interface() + + // Create AWS struct with pointer field type + var awsFieldType reflect.Type + switch fieldType { + case reflect.TypeOf(types.String{}): + awsFieldType = reflect.TypeOf((*string)(nil)) + case reflect.TypeOf(types.Bool{}): + awsFieldType = reflect.TypeOf((*bool)(nil)) + case reflect.TypeOf(types.Int64{}): + awsFieldType = reflect.TypeOf((*int64)(nil)) + case reflect.TypeOf(types.Int32{}): + awsFieldType = reflect.TypeOf((*int32)(nil)) + case reflect.TypeOf(types.Float64{}): + awsFieldType = reflect.TypeOf((*float64)(nil)) + case reflect.TypeOf(types.Float32{}): + awsFieldType = reflect.TypeOf((*float32)(nil)) + default: + panic("unsupported field type") + } + + awsStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: awsFieldType, + }, + }) + awsStruct := reflect.New(awsStructType).Interface() + + return tfStruct, awsStruct +} + +// generateLegacyTFToAWSPointerStructs creates legacy TF structs paired with pointer AWS structs +// Tests: types.String (legacy) -> *string, types.Bool (legacy) -> *bool, etc. +func generateLegacyTFToAWSPointerStructs(fieldType reflect.Type) (tf, aws any) { // nosemgrep:ci.aws-in-func-name + // Create TF struct with legacy tag + tfStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: fieldType, // Value type (types.String, types.Bool, etc.) + Tag: `tfsdk:"field1" autoflex:",legacy"`, + }, + }) + tfStruct := reflect.New(tfStructType).Interface() + + // Create AWS struct with pointer field type + var awsFieldType reflect.Type + switch fieldType { + case reflect.TypeOf(types.String{}): + awsFieldType = reflect.TypeOf((*string)(nil)) + case reflect.TypeOf(types.Bool{}): + awsFieldType = reflect.TypeOf((*bool)(nil)) + case reflect.TypeOf(types.Int64{}): + awsFieldType = reflect.TypeOf((*int64)(nil)) + case reflect.TypeOf(types.Int32{}): + awsFieldType = reflect.TypeOf((*int32)(nil)) + case reflect.TypeOf(types.Float64{}): + awsFieldType = reflect.TypeOf((*float64)(nil)) + case reflect.TypeOf(types.Float32{}): + awsFieldType = reflect.TypeOf((*float32)(nil)) + default: + panic("unsupported field type") + } + + awsStructType := reflect.StructOf([]reflect.StructField{ + { + Name: "Field1", + Type: awsFieldType, + }, + }) + awsStruct := reflect.New(awsStructType).Interface() + + return tfStruct, awsStruct +} diff --git a/internal/framework/flex/autoflex_special_types_test.go b/internal/framework/flex/autoflex_special_types_test.go new file mode 100644 index 000000000000..ca8bdef924e8 --- /dev/null +++ b/internal/framework/flex/autoflex_special_types_test.go @@ -0,0 +1,315 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of special types: +// - timestamptypes.RFC3339 +// - types.ARN +// - types.JSON and fwtypes.SmithyJSON + +import ( + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + smithydocument "github.com/aws/smithy-go/document" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" +) + +type tfRFC3339Time struct { + CreationDateTime timetypes.RFC3339 `tfsdk:"creation_date_time"` +} + +type awsRFC3339TimePointer struct { + CreationDateTime *time.Time +} + +type awsRFC3339TimeValue struct { + CreationDateTime time.Time +} + +type tfSingleARNField struct { + Field1 fwtypes.ARN `tfsdk:"field1"` +} + +var _ tfsmithy.JSONStringer = (*testJSONDocument)(nil) +var _ smithydocument.Marshaler = (*testJSONDocument)(nil) + +type testJSONDocument struct { + Value any +} + +func newTestJSONDocument(v any) tfsmithy.JSONStringer { + return &testJSONDocument{Value: v} +} + +func (m *testJSONDocument) UnmarshalSmithyDocument(v any) error { + data, err := tfjson.EncodeToBytes(m.Value) + if err != nil { + return err + } + return tfjson.DecodeFromBytes(data, v) +} + +func (m *testJSONDocument) MarshalSmithyDocument() ([]byte, error) { + return tfjson.EncodeToBytes(m.Value) +} + +var _ tfsmithy.JSONStringer = &testJSONDocumentError{} + +type testJSONDocumentError struct{} + +func (m *testJSONDocumentError) UnmarshalSmithyDocument(v any) error { + return errUnmarshallSmithyDocument +} + +func (m *testJSONDocumentError) MarshalSmithyDocument() ([]byte, error) { + return nil, errMarshallSmithyDocument +} + +var ( + errUnmarshallSmithyDocument = errors.New("test unmarshal error") + errMarshallSmithyDocument = errors.New("test marshal error") +) + +type awsJSONStringer struct { + Field1 tfsmithy.JSONStringer `json:"field1"` +} + +type tfJSONStringer struct { + Field1 fwtypes.SmithyJSON[tfsmithy.JSONStringer] `tfsdk:"field1"` +} + +func TestExpandSpecialTypes(t *testing.T) { + t.Parallel() + + testARN := "arn:aws:securityhub:us-west-2:1234567890:control/cis-aws-foundations-benchmark/v/1.2.0/1.1" //lintignore:AWSAT003,AWSAT005 + + testTimeStr := "2013-09-25T09:34:01Z" + testTimeTime := errs.Must(time.Parse(time.RFC3339, testTimeStr)) + + testCases := map[string]autoFlexTestCases{ + "timestamp": { + "timestamp pointer": { + Source: &tfRFC3339Time{ + CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), + }, + Target: &awsRFC3339TimePointer{}, + WantTarget: &awsRFC3339TimePointer{ + CreationDateTime: &testTimeTime, + }, + }, + "timestamp": { + Source: &tfRFC3339Time{ + CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), + }, + Target: &awsRFC3339TimeValue{}, + WantTarget: &awsRFC3339TimeValue{ + CreationDateTime: testTimeTime, + }, + }, + }, + + "single ARN": { + "single ARN Source and single string Target": { + Source: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{Field1: testARN}, + }, + "single ARN Source and single *string Target": { + Source: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, + Target: &awsSingleStringPointer{}, + WantTarget: &awsSingleStringPointer{Field1: aws.String(testARN)}, + }, + }, + + "json": { + "JSONValue Source to json interface Target": { + Source: &tfJSONStringer{Field1: fwtypes.NewSmithyJSONValue(`{"field1": "a"}`, newTestJSONDocument)}, + Target: &awsJSONStringer{}, + WantTarget: &awsJSONStringer{ + Field1: &testJSONDocument{ + Value: map[string]any{ + "field1": "a", + }, + }, + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenSpecialTypes(t *testing.T) { + t.Parallel() + + testARN := "arn:aws:securityhub:us-west-2:1234567890:control/cis-aws-foundations-benchmark/v/1.2.0/1.1" //lintignore:AWSAT003,AWSAT005 + + testTimeStr := "2013-09-25T09:34:01Z" + testTimeTime := errs.Must(time.Parse(time.RFC3339, testTimeStr)) + var zeroTime time.Time + + testCases := map[string]autoFlexTestCases{ + "single ARN": { + "single string Source and single ARN Target": { + Source: &awsSingleStringValue{Field1: testARN}, + Target: &tfSingleARNField{}, + WantTarget: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, + }, + "single *string Source and single ARN Target": { + Source: &awsSingleStringPointer{Field1: aws.String(testARN)}, + Target: &tfSingleARNField{}, + WantTarget: &tfSingleARNField{Field1: fwtypes.ARNValue(testARN)}, + }, + "single nil *string Source and single ARN Target": { + Source: &awsSingleStringPointer{}, + Target: &tfSingleARNField{}, + WantTarget: &tfSingleARNField{Field1: fwtypes.ARNNull()}, + }, + }, + "timestamp": { + "timestamp": { + Source: &awsRFC3339TimeValue{ + CreationDateTime: testTimeTime, + }, + Target: &tfRFC3339Time{}, + WantTarget: &tfRFC3339Time{ + CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), + }, + }, + "timestamp pointer": { + Source: &awsRFC3339TimePointer{ + CreationDateTime: &testTimeTime, + }, + Target: &tfRFC3339Time{}, + WantTarget: &tfRFC3339Time{ + CreationDateTime: timetypes.NewRFC3339ValueMust(testTimeStr), + }, + }, + "timestamp nil": { + Source: &awsRFC3339TimePointer{}, + Target: &tfRFC3339Time{}, + WantTarget: &tfRFC3339Time{ + CreationDateTime: timetypes.NewRFC3339Null(), + }, + }, + "timestamp empty": { + Source: &awsRFC3339TimeValue{}, + Target: &tfRFC3339Time{}, + WantTarget: &tfRFC3339Time{ + CreationDateTime: timetypes.NewRFC3339TimeValue(zeroTime), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: false, CompareTarget: true}) + }) + } +} + +func TestFlattenJSONInterfaceToStringTypable(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "json interface Source string Target": { + Source: &awsJSONStringer{ + Field1: &testJSONDocument{ + Value: &struct { + Test string `json:"test"` + }{ + Test: "a", + }, + }, + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringValue(`{"test":"a"}`), + }, + }, + "null json interface Source string Target": { + Source: &awsJSONStringer{ + Field1: nil, + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringNull(), + }, + }, + + "json interface Source JSONValue Target": { + Source: &awsJSONStringer{ + Field1: &testJSONDocument{ + Value: &struct { + Test string `json:"test"` + }{ + Test: "a", + }, + }, + }, + Target: &tfJSONStringer{}, + WantTarget: &tfJSONStringer{ + Field1: fwtypes.NewSmithyJSONValue(`{"test":"a"}`, newTestJSONDocument), + }, + }, + "null json interface Source JSONValue Target": { + Source: &awsJSONStringer{ + Field1: nil, + }, + Target: &tfJSONStringer{}, + WantTarget: &tfJSONStringer{ + Field1: fwtypes.NewSmithyJSONNull[tfsmithy.JSONStringer](), + }, + }, + + "json interface Source marshal error": { + Source: &awsJSONStringer{ + Field1: &testJSONDocumentError{}, + }, + Target: &tfSingleStringField{}, + ExpectedDiags: diagAFTypeErr[*testJSONDocumentError](diagFlatteningMarshalSmithyDocument, errMarshallSmithyDocument), + }, + + "non-json interface Source string Target": { + Source: awsInterfaceSingle{ + Field1: &awsInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringNull(), + }, + }, + + "null non-json interface Source string Target": { + Source: awsInterfaceSingle{ + Field1: nil, + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringNull(), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} diff --git a/internal/framework/flex/autoflex_strings_test.go b/internal/framework/flex/autoflex_strings_test.go new file mode 100644 index 000000000000..01d02970eedd --- /dev/null +++ b/internal/framework/flex/autoflex_strings_test.go @@ -0,0 +1,320 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of strings and string-like types. +// Additional, foundational string tests are in autoflex_primitives_test.go. + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/types" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +type tfSingleStringFieldLegacy struct { + Field1 types.String `tfsdk:"field1" autoflex:",legacy"` +} + +type awsSingleStringValue struct { + Field1 string +} + +type awsSingleStringPointer struct { + Field1 *string +} + +type testEnum string + +// Enum values for SlotShape +const ( + testEnumScalar testEnum = "Scalar" + testEnumList testEnum = "List" +) + +func (testEnum) Values() []testEnum { + return []testEnum{ + testEnumScalar, + testEnumList, + } +} + +func TestExpandString(t *testing.T) { + t.Parallel() + + testString := "test" + testStringResult := "a" + + testByteSlice := []byte("test") + testByteSliceResult := []byte("a") + + testCases := map[string]autoFlexTestCases{ + "types.String to string": { + "types.String to string": { + Source: types.StringValue("a"), + Target: &testString, + WantTarget: &testStringResult, + }, + "types.String to byte slice": { + Source: types.StringValue("a"), + Target: &testByteSlice, + WantTarget: &testByteSliceResult, + }, + "single string struct pointer Source and empty Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + "single string Source and single string Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleStringValue{}, + WantTarget: &awsSingleStringValue{Field1: "a"}, + }, + "single string Source and byte slice Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleByteSliceValue{}, + WantTarget: &awsSingleByteSliceValue{Field1: []byte("a")}, + }, + "single string Source and single *string Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleStringPointer{}, + WantTarget: &awsSingleStringPointer{Field1: aws.String("a")}, + }, + "single string Source and single int64 Target": { + Source: &tfSingleStringField{Field1: types.StringValue("a")}, + Target: &awsSingleInt64Value{}, + WantTarget: &awsSingleInt64Value{}, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoExpandTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestExpandStringEnum(t *testing.T) { + t.Parallel() + + var enum testEnum + enumList := testEnumList + + testCases := autoFlexTestCases{ + "valid value": { + Source: fwtypes.StringEnumValue(testEnumList), + Target: &enum, + WantTarget: &enumList, + }, + "empty value": { + Source: fwtypes.StringEnumNull[testEnum](), + Target: &enum, + WantTarget: &enum, + }, + } + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} + +type tfSingleStringFieldOmitEmpty struct { + Field1 types.String `tfsdk:"field1" autoflex:",omitempty"` +} + +func TestFlattenString(t *testing.T) { + t.Parallel() + + testCases := map[string]autoFlexTestCases{ + "*string to String": { + "value": { + Source: awsSingleStringPointer{ + Field1: aws.String("a"), + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringValue("a"), + }, + }, + "zero": { + Source: awsSingleStringPointer{ + Field1: aws.String(""), + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringValue(""), + }, + }, + "null": { + Source: awsSingleStringPointer{ + Field1: nil, + }, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{ + Field1: types.StringNull(), + }, + }, + }, + + "omitempty string to String": { + "value": { + Source: awsSingleStringValue{ + Field1: "a", + }, + Target: &tfSingleStringFieldOmitEmpty{}, + WantTarget: &tfSingleStringFieldOmitEmpty{ + Field1: types.StringValue("a"), + }, + }, + "zero": { + Source: awsSingleStringValue{ + Field1: "", + }, + Target: &tfSingleStringFieldOmitEmpty{}, + WantTarget: &tfSingleStringFieldOmitEmpty{ + Field1: types.StringNull(), + }, + }, + }, + + "omitempty *string to String": { + "value": { + Source: awsSingleStringPointer{ + Field1: aws.String("a"), + }, + Target: &tfSingleStringFieldOmitEmpty{}, + WantTarget: &tfSingleStringFieldOmitEmpty{ + Field1: types.StringValue("a"), + }, + }, + "zero": { + Source: awsSingleStringPointer{ + Field1: aws.String(""), + }, + Target: &tfSingleStringFieldOmitEmpty{}, + WantTarget: &tfSingleStringFieldOmitEmpty{ + Field1: types.StringNull(), + }, + }, + "null": { + Source: awsSingleStringPointer{ + Field1: nil, + }, + Target: &tfSingleStringFieldOmitEmpty{}, + WantTarget: &tfSingleStringFieldOmitEmpty{ + Field1: types.StringNull(), + }, + }, + }, + + "legacy *string to String": { + "value": { + Source: awsSingleStringPointer{ + Field1: aws.String("a"), + }, + Target: &tfSingleStringFieldLegacy{}, + WantTarget: &tfSingleStringFieldLegacy{ + Field1: types.StringValue("a"), + }, + }, + "zero": { + Source: awsSingleStringPointer{ + Field1: aws.String(""), + }, + Target: &tfSingleStringFieldLegacy{}, + WantTarget: &tfSingleStringFieldLegacy{ + Field1: types.StringValue(""), + }, + }, + "null": { + Source: awsSingleStringPointer{ + Field1: nil, + }, + Target: &tfSingleStringFieldLegacy{}, + WantTarget: &tfSingleStringFieldLegacy{ + Field1: types.StringValue(""), + }, + }, + }, + } + + for testName, cases := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + runAutoFlattenTestCases(t, cases, runChecks{CompareDiags: true, CompareTarget: true}) + }) + } +} + +func TestFlattenStringSpecial(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + "single empty string Source and single string Target": { + Source: &awsSingleStringValue{}, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{Field1: types.StringValue("")}, + }, + "single string Source and single string Target": { + Source: &awsSingleStringValue{Field1: "a"}, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{Field1: types.StringValue("a")}, + }, + "single byte slice Source and single string Target": { + Source: &awsSingleByteSliceValue{Field1: []byte("a")}, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{Field1: types.StringValue("a")}, + }, + "single nil *string Source and single string Target": { + Source: &awsSingleStringPointer{}, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{Field1: types.StringNull()}, + }, + "single *string Source and single string Target": { + Source: &awsSingleStringPointer{Field1: aws.String("a")}, + Target: &tfSingleStringField{}, + WantTarget: &tfSingleStringField{Field1: types.StringValue("a")}, + }, + "single string Source and single int64 Target": { + Source: &awsSingleStringValue{Field1: "a"}, + Target: &tfSingleInt64Field{}, + WantTarget: &tfSingleInt64Field{}, + }, + "single string struct pointer Source and empty Target": { + Source: &awsSingleStringValue{Field1: "a"}, + Target: &emptyStruct{}, + WantTarget: &emptyStruct{}, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestFlattenTopLevelStringPtr(t *testing.T) { + t.Parallel() + + testCases := toplevelTestCases[*string, types.String]{ + "value": { + source: aws.String("value"), + expectedValue: types.StringValue("value"), + ExpectedDiags: diagAFEmpty(), + }, + + "empty": { + source: aws.String(""), + expectedValue: types.StringValue(""), + ExpectedDiags: diagAFEmpty(), + }, + + "nil": { + source: nil, + expectedValue: types.StringNull(), + ExpectedDiags: diagAFEmpty(), + }, + } + + runTopLevelTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true}) +} diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 94195dbe62f8..5aac0b96cb9a 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -3,702 +3,223 @@ package flex +// This file contains common test helpers for Autoflex tests. + import ( + "bytes" "context" - "encoding/json" - "errors" + "fmt" + "path/filepath" "reflect" - "time" + "testing" - smithydocument "github.com/aws/smithy-go/document" - "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-framework/types/basetypes" - fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" + "github.com/hashicorp/terraform-plugin-log/tflogtest" ) -type emptyStruct struct{} - -type tfSingleStringField struct { - Field1 types.String `tfsdk:"field1"` -} - -type tfSingleStringFieldIgnore struct { - Field1 types.String `tfsdk:"field1" autoflex:"-"` -} - -type tfSingleStringFieldOmitEmpty struct { - Field1 types.String `tfsdk:"field1" autoflex:",omitempty"` -} - -type tfSingleStringFieldLegacy struct { - Field1 types.String `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSingleFloat64Field struct { - Field1 types.Float64 `tfsdk:"field1"` -} - -type tfSingleFloat64FieldLegacy struct { - Field1 types.Float64 `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSingleFloat32Field struct { - Field1 types.Float32 `tfsdk:"field1"` -} - -type tfSingleFloat32FieldLegacy struct { - Field1 types.Float32 `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSingleInt64Field struct { - Field1 types.Int64 `tfsdk:"field1"` -} - -type tfSingleInt64FieldLegacy struct { - Field1 types.Int64 `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSingleInt32Field struct { - Field1 types.Int32 `tfsdk:"field1"` -} - -type tfSingleInt32FieldLegacy struct { - Field1 types.Int32 `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSingleBoolField struct { - Field1 types.Bool `tfsdk:"field1"` -} - -type tfSingleBoolFieldLegacy struct { - Field1 types.Bool `tfsdk:"field1" autoflex:",legacy"` -} - -// All primitive types. -type tfAllThePrimitiveFields struct { - Field1 types.String `tfsdk:"field1"` - Field2 types.String `tfsdk:"field2"` - Field3 types.Int64 `tfsdk:"field3"` - Field4 types.Int64 `tfsdk:"field4"` - Field5 types.Int64 `tfsdk:"field5"` - Field6 types.Int64 `tfsdk:"field6"` - Field7 types.Float64 `tfsdk:"field7"` - Field8 types.Float64 `tfsdk:"field8"` - Field9 types.Float64 `tfsdk:"field9"` - Field10 types.Float64 `tfsdk:"field10"` - Field11 types.Bool `tfsdk:"field11"` - Field12 types.Bool `tfsdk:"field12"` -} - -type awsAllThePrimitiveFields struct { - Field1 string - Field2 *string - Field3 int32 - Field4 *int32 - Field5 int64 - Field6 *int64 - Field7 float32 - Field8 *float32 - Field9 float64 - Field10 *float64 - Field11 bool - Field12 *bool -} - -// List/Set/Map of primitive types. -type tfCollectionsOfPrimitiveElements struct { - Field1 types.List `tfsdk:"field1"` - Field2 types.List `tfsdk:"field2"` - Field3 types.Set `tfsdk:"field3"` - Field4 types.Set `tfsdk:"field4"` - Field5 types.Map `tfsdk:"field5"` - Field6 types.Map `tfsdk:"field6"` -} - -// List/Set/Map of string types. -type tfTypedCollectionsOfPrimitiveElements struct { - Field1 fwtypes.ListValueOf[types.String] `tfsdk:"field1"` - Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` - Field3 fwtypes.SetValueOf[types.String] `tfsdk:"field3"` - Field4 fwtypes.SetValueOf[types.String] `tfsdk:"field4"` - Field5 fwtypes.MapValueOf[types.String] `tfsdk:"field5"` - Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` -} - -type awsCollectionsOfPrimitiveElements struct { - Field1 []string - Field2 []*string - Field3 []string - Field4 []*string - Field5 map[string]string - Field6 map[string]*string -} - -type awsSimpleStringValueSlice struct { - Field1 []string -} - -type tfSimpleSet struct { - Field1 types.Set `tfsdk:"field1"` -} - -type tfSimpleSetLegacy struct { - Field1 types.Set `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSimpleList struct { - Field1 types.List `tfsdk:"field1"` -} - -type tfSimpleListLegacy struct { - Field1 types.List `tfsdk:"field1" autoflex:",legacy"` -} - -type tfListOfNestedObject struct { - Field1 fwtypes.ListNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1"` -} - -type tfListOfNestedObjectLegacy struct { - Field1 fwtypes.ListNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1" autoflex:",legacy"` -} - -type tfSetOfNestedObject struct { - Field1 fwtypes.SetNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1"` -} - -type tfSetOfNestedObjectLegacy struct { - Field1 fwtypes.SetNestedObjectValueOf[tfSingleStringField] `tfsdk:"field1" autoflex:",legacy"` -} - -type tfComplexValue struct { - Field1 types.String `tfsdk:"field1"` - Field2 fwtypes.ListNestedObjectValueOf[tfListOfNestedObject] `tfsdk:"field2"` - Field3 types.Map `tfsdk:"field3"` - Field4 fwtypes.SetNestedObjectValueOf[tfSingleInt64Field] `tfsdk:"field4"` -} - -type awsComplexValue struct { - Field1 string - Field2 *awsNestedObjectPointer - Field3 map[string]*string - Field4 []awsSingleInt64Value -} - -// tfSingluarListOfNestedObjects testing for idiomatic singular on TF side but plural on AWS side -type tfSingluarListOfNestedObjects struct { - Field fwtypes.ListNestedObjectValueOf[tfSingleStringField] `tfsdk:"field"` -} - -type awsPluralSliceOfNestedObjectValues struct { - Fields []awsSingleStringValue -} - -type tfSpecialPluralization struct { - City types.List `tfsdk:"city"` - Coach types.List `tfsdk:"coach"` - Tomato types.List `tfsdk:"tomato"` - Vertex types.List `tfsdk:"vertex"` - Criterion types.List `tfsdk:"criterion"` - Datum types.List `tfsdk:"datum"` - Hive types.List `tfsdk:"hive"` -} - -type awsSpecialPluralization struct { - Cities []*string - Coaches []*string - Tomatoes []*string - Vertices []*string - Criteria []*string - Data []*string - Hives []*string -} - -// tfCaptializationDiff testing for fields that only differ by capitalization -type tfCaptializationDiff struct { - FieldURL types.String `tfsdk:"field_url"` -} - -// awsCapitalizationDiff testing for fields that only differ by capitalization -type awsCapitalizationDiff struct { - FieldUrl *string -} - -type awsSingleBoolValue struct { - Field1 bool -} - -type awsSingleBoolPointer struct { - Field1 *bool -} - -type awsSingleStringValue struct { - Field1 string -} - -type awsSingleStringPointer struct { - Field1 *string -} - -type awsSingleByteSliceValue struct { - Field1 []byte -} - -type awsSingleFloat64Value struct { - Field1 float64 -} - -type awsSingleFloat64Pointer struct { - Field1 *float64 -} - -type awsSingleFloat32Value struct { - Field1 float32 -} - -type awsSingleFloat32Pointer struct { - Field1 *float32 -} - -type awsSingleInt64Value struct { - Field1 int64 -} - -type awsSingleInt64Pointer struct { - Field1 *int64 -} - -type awsSingleInt32Value struct { - Field1 int32 -} - -type awsSingleInt32Pointer struct { - Field1 *int32 -} - -type awsNestedObjectPointer struct { - Field1 *awsSingleStringValue -} - -type awsSliceOfNestedObjectPointers struct { - Field1 []*awsSingleStringValue -} - -type awsSliceOfNestedObjectValues struct { - Field1 []awsSingleStringValue -} - -// tfFieldNamePrefix has no prefix to test matching on prefix -type tfFieldNamePrefix struct { - Name types.String `tfsdk:"name"` -} - -// awsFieldNamePrefix has prefix to test matching on prefix -type awsFieldNamePrefix struct { - IntentName *string -} - -type tfFieldNamePrefixInsensitive struct { - ID types.String `tfsdk:"id"` -} - -type awsFieldNamePrefixInsensitive struct { - ClientId *string -} - -// tfFieldNameSuffix has no suffix to test matching on suffix -type tfFieldNameSuffix struct { - Policy types.String `tfsdk:"policy"` -} - -// awsFieldNameSuffix has suffix to test matching on suffix -type awsFieldNameSuffix struct { - PolicyConfig *string -} - -type tfRFC3339Time struct { - CreationDateTime timetypes.RFC3339 `tfsdk:"creation_date_time"` -} - -type awsRFC3339TimePointer struct { - CreationDateTime *time.Time -} - -type awsRFC3339TimeValue struct { - CreationDateTime time.Time -} - -type tfMapOfString struct { - FieldInner fwtypes.MapValueOf[basetypes.StringValue] `tfsdk:"field_inner"` -} - -type tfNestedMapOfString struct { - FieldOuter fwtypes.ListNestedObjectValueOf[tfMapOfString] `tfsdk:"field_outer"` -} - -type awsNestedMapOfString struct { - FieldOuter awsMapOfString +type autoFlexTestCase struct { + Options []AutoFlexOptionsFunc + Source any + Target any + ExpectedDiags diag.Diagnostics + WantTarget any + WantDiff bool } -type tfMapOfMapOfString struct { - Field1 fwtypes.MapValueOf[fwtypes.MapValueOf[types.String]] `tfsdk:"field1"` -} - -type awsMapOfString struct { - FieldInner map[string]string -} - -type awsMapOfMapOfString struct { - Field1 map[string]map[string]string -} - -type awsMapOfMapOfStringPointer struct { - Field1 map[string]map[string]*string -} +type autoFlexTestCases map[string]autoFlexTestCase -type awsMapOfStringPointer struct { - FieldInner map[string]*string +type runChecks struct { + CompareDiags bool + CompareTarget bool + GoldenLogs bool // use golden snapshots for log comparison } -type testEnum string - -// Enum values for SlotShape -const ( - testEnumScalar testEnum = "Scalar" - testEnumList testEnum = "List" -) - -func (testEnum) Values() []testEnum { - return []testEnum{ - testEnumScalar, - testEnumList, +// diagAF is a testing helper that creates a diag.Diagnostics containing +// a single diagnostic generated by calling diagFunc with reflect.TypeFor[T](). +func diagAF[T any](diagFunc func(reflect.Type) diag.ErrorDiagnostic) diag.Diagnostics { + return diag.Diagnostics{ + diagFunc(reflect.TypeFor[T]()), } } -type tfPluralAndSingularFields struct { - Value types.String `tfsdk:"Value"` -} - -type awsPluralAndSingularFields struct { - Value string - Values string -} - -type tfSingleARNField struct { - Field1 fwtypes.ARN `tfsdk:"field1"` -} - -type tfMapBlockList struct { - MapBlock fwtypes.ListNestedObjectValueOf[tfMapBlockElement] `tfsdk:"map_block"` -} - -type tfMapBlockSet struct { - MapBlock fwtypes.SetNestedObjectValueOf[tfMapBlockElement] `tfsdk:"map_block"` -} - -type awsMapBlockValues struct { - MapBlock map[string]awsMapBlockElement -} - -type awsMapBlockPointers struct { - MapBlock map[string]*awsMapBlockElement -} - -type tfMapBlockElement struct { - MapBlockKey types.String `tfsdk:"map_block_key"` - Attr1 types.String `tfsdk:"attr1"` - Attr2 types.String `tfsdk:"attr2"` -} - -type awsMapBlockElement struct { - Attr1 string - Attr2 string -} - -type tfMapBlockListEnumKey struct { - MapBlock fwtypes.ListNestedObjectValueOf[tfMapBlockElementEnumKey] `tfsdk:"map_block"` -} - -type tfMapBlockElementEnumKey struct { - MapBlockKey fwtypes.StringEnum[testEnum] `tfsdk:"map_block_key"` - Attr1 types.String `tfsdk:"attr1"` - Attr2 types.String `tfsdk:"attr2"` -} - -type tfMapBlockListNoKey struct { - MapBlock fwtypes.ListNestedObjectValueOf[tfMapBlockElementNoKey] `tfsdk:"map_block"` -} - -type tfMapBlockElementNoKey struct { - Attr1 types.String `tfsdk:"attr1"` - Attr2 types.String `tfsdk:"attr2"` -} - -var _ smithyjson.JSONStringer = (*testJSONDocument)(nil) -var _ smithydocument.Marshaler = (*testJSONDocument)(nil) - -type testJSONDocument struct { - Value any -} - -func newTestJSONDocument(v any) smithyjson.JSONStringer { - return &testJSONDocument{Value: v} -} - -func (m *testJSONDocument) UnmarshalSmithyDocument(v any) error { - data, err := json.Marshal(m.Value) - if err != nil { - return err +// diagAFNil is a testing helper that creates a diag.Diagnostics containing +// a single diagnostic generated by calling diagFunc with nil. +// Use this for test cases where the type is unknown/nil (e.g., nil source/target). +func diagAFNil(diagFunc func(reflect.Type) diag.ErrorDiagnostic) diag.Diagnostics { + return diag.Diagnostics{ + diagFunc(nil), } - return json.Unmarshal(data, v) -} - -func (m *testJSONDocument) MarshalSmithyDocument() ([]byte, error) { - return json.Marshal(m.Value) -} - -var _ smithyjson.JSONStringer = &testJSONDocumentError{} - -type testJSONDocumentError struct{} - -func (m *testJSONDocumentError) UnmarshalSmithyDocument(v any) error { - return errUnmarshallSmithyDocument -} - -func (m *testJSONDocumentError) MarshalSmithyDocument() ([]byte, error) { - return nil, errMarshallSmithyDocument -} - -var ( - errUnmarshallSmithyDocument = errors.New("test unmarshal error") - errMarshallSmithyDocument = errors.New("test marshal error") -) - -type awsJSONStringer struct { - Field1 smithyjson.JSONStringer `json:"field1"` -} - -type tfJSONStringer struct { - Field1 fwtypes.SmithyJSON[smithyjson.JSONStringer] `tfsdk:"field1"` -} - -type tfListNestedObject[T any] struct { - Field1 fwtypes.ListNestedObjectValueOf[T] `tfsdk:"field1"` -} - -type tfSetNestedObject[T any] struct { - Field1 fwtypes.SetNestedObjectValueOf[T] `tfsdk:"field1"` -} - -type tfObjectValue[T any] struct { - Field1 fwtypes.ObjectValueOf[T] `tfsdk:"field1"` -} - -type tfInterfaceFlexer struct { - Field1 types.String `tfsdk:"field1"` -} - -var ( - _ Expander = tfInterfaceFlexer{} - _ Flattener = &tfInterfaceFlexer{} -) - -func (t tfInterfaceFlexer) Expand(ctx context.Context) (any, diag.Diagnostics) { - return &awsInterfaceInterfaceImpl{ - AWSField: StringValueFromFramework(ctx, t.Field1), - }, nil } -func (t *tfInterfaceFlexer) Flatten(ctx context.Context, v any) (diags diag.Diagnostics) { - switch val := v.(type) { - case awsInterfaceInterfaceImpl: - t.Field1 = StringValueToFramework(ctx, val.AWSField) - return diags - - default: - return diags +// diagAF2 is a testing helper that creates a diag.Diagnostics containing +// a single diagnostic generated by calling diagFunc with reflect.TypeFor[T1]() and reflect.TypeFor[T2](). +// Use this for diagnostic functions that take two type parameters. +func diagAF2[T1, T2 any](diagFunc func(reflect.Type, reflect.Type) diag.ErrorDiagnostic) diag.Diagnostics { + return diag.Diagnostics{ + diagFunc(reflect.TypeFor[T1](), reflect.TypeFor[T2]()), } } -type tfInterfaceIncompatibleExpander struct { - Field1 types.String `tfsdk:"field1"` -} - -var _ Expander = tfInterfaceIncompatibleExpander{} - -func (t tfInterfaceIncompatibleExpander) Expand(ctx context.Context) (any, diag.Diagnostics) { - return &awsInterfaceIncompatibleImpl{ - AWSField: StringValueFromFramework(ctx, t.Field1), - }, nil -} - -type awsInterfaceIncompatibleImpl struct { - AWSField string -} - -type awsInterfaceSingle struct { - Field1 awsInterfaceInterface -} - -type awsInterfaceSlice struct { - Field1 []awsInterfaceInterface -} - -type awsInterfaceInterface interface { - isAWSInterfaceInterface() -} - -type awsInterfaceInterfaceImpl struct { - AWSField string -} - -var _ awsInterfaceInterface = &awsInterfaceInterfaceImpl{} - -func (t *awsInterfaceInterfaceImpl) isAWSInterfaceInterface() {} // nosemgrep:ci.aws-in-func-name - -type tfFlexer struct { - Field1 types.String `tfsdk:"field1"` -} - -var ( - _ Expander = tfFlexer{} - _ Flattener = &tfFlexer{} -) - -func (t tfFlexer) Expand(ctx context.Context) (any, diag.Diagnostics) { - return &awsExpander{ - AWSField: StringValueFromFramework(ctx, t.Field1), - }, nil -} - -func (t *tfFlexer) Flatten(ctx context.Context, v any) (diags diag.Diagnostics) { - switch val := v.(type) { - case awsExpander: - t.Field1 = StringValueToFramework(ctx, val.AWSField) - return diags - - default: - return diags +// diagAFTypeErr is a testing helper that creates a diag.Diagnostics containing +// a single diagnostic generated by calling diagFunc with reflect.TypeFor[T]() and the provided error. +// Use this for diagnostic functions that take a type and an error parameter. +func diagAFTypeErr[T any](diagFunc func(reflect.Type, error) diag.ErrorDiagnostic, err error) diag.Diagnostics { + return diag.Diagnostics{ + diagFunc(reflect.TypeFor[T](), err), } } -type tfExpanderListNestedObject tfListNestedObject[tfFlexer] - -type tfExpanderSetNestedObject tfSetNestedObject[tfFlexer] - -type tfExpanderObjectValue tfObjectValue[tfFlexer] - -type tfTypedExpanderListNestedObject tfListNestedObject[tfTypedExpander] - -type tfTypedExpanderSetNestedObject tfSetNestedObject[tfTypedExpander] - -type tfTypedExpanderObjectValue tfObjectValue[tfTypedExpander] - -type tfExpanderToString struct { - Field1 types.String `tfsdk:"field1"` -} - -var _ Expander = tfExpanderToString{} - -func (t tfExpanderToString) Expand(ctx context.Context) (any, diag.Diagnostics) { - return StringValueFromFramework(ctx, t.Field1), nil -} - -type tfExpanderToNil struct { - Field1 types.String `tfsdk:"field1"` -} - -var _ Expander = tfExpanderToNil{} - -func (t tfExpanderToNil) Expand(ctx context.Context) (any, diag.Diagnostics) { - return nil, nil -} - -type tfTypedExpander struct { - Field1 types.String `tfsdk:"field1"` -} - -var _ TypedExpander = tfTypedExpander{} - -func (t tfTypedExpander) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { - return &awsExpander{ - AWSField: StringValueFromFramework(ctx, t.Field1), - }, nil -} - -type tfTypedExpanderToNil struct { - Field1 types.String `tfsdk:"field1"` +// diagAFEmpty is a testing helper that creates an empty diag.Diagnostics slice. +// Use this for test cases where no diagnostics are expected. +func diagAFEmpty() diag.Diagnostics { + return diag.Diagnostics{} } -var _ TypedExpander = tfTypedExpanderToNil{} - -func (t tfTypedExpanderToNil) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { - return nil, nil +// setFieldValue sets a field value in a struct using reflection +func setFieldValue(structPtr any, fieldName string, value any) { + v := reflect.ValueOf(structPtr).Elem() + field := v.FieldByName(fieldName) + if field.IsValid() && field.CanSet() { + field.Set(reflect.ValueOf(value)) + } } -type tfInterfaceTypedExpander struct { - Field1 types.String `tfsdk:"field1"` +func runAutoExpandTestCases(t *testing.T, testCases autoFlexTestCases, checks runChecks) { + t.Helper() + for testName, tc := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + var buf bytes.Buffer + ctx = tflogtest.RootLogger(ctx, &buf) + ctx = registerTestingLogger(ctx) + + diags := Expand(ctx, tc.Source, tc.Target, tc.Options...) + + if checks.CompareDiags { + if diff := cmp.Diff(diags, tc.ExpectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + } + + if checks.GoldenLogs { + lines, err := tflogtest.MultilineJSONDecode(&buf) + if err != nil { + t.Fatalf("Expand: decoding log lines: %s", err) + } + normalizedLines := normalizeLogs(lines) + + goldenFileName := autoGenerateGoldenPath(t, t.Name(), testName) + goldenPath := filepath.Join("testdata", goldenFileName) + compareWithGolden(t, goldenPath, normalizedLines) + } + + if checks.CompareTarget && !diags.HasError() { + if diff := cmp.Diff(tc.Target, tc.WantTarget); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + } + }) + } } -var _ TypedExpander = tfInterfaceTypedExpander{} - -func (t tfInterfaceTypedExpander) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { - switch targetType { - case reflect.TypeFor[awsInterfaceInterface](): - return &awsInterfaceInterfaceImpl{ - AWSField: StringValueFromFramework(ctx, t.Field1), - }, nil +func runAutoFlattenTestCases(t *testing.T, testCases autoFlexTestCases, checks runChecks, opts ...cmp.Option) { + t.Helper() + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + var buf bytes.Buffer + ctx = tflogtest.RootLogger(ctx, &buf) + ctx = registerTestingLogger(ctx) + + diags := Flatten(ctx, testCase.Source, testCase.Target, testCase.Options...) + + if checks.CompareDiags { + if diff := cmp.Diff(diags, testCase.ExpectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + } + + if checks.GoldenLogs { + lines, err := tflogtest.MultilineJSONDecode(&buf) + if err != nil { + t.Fatalf("Flatten: decoding log lines: %s", err) + } + normalizedLines := normalizeLogs(lines) + + goldenFileName := autoGenerateGoldenPath(t, t.Name(), testName) + goldenPath := filepath.Join("testdata", goldenFileName) + compareWithGolden(t, goldenPath, normalizedLines) + } + + if checks.CompareTarget && !diags.HasError() { + less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } + if diff := cmp.Diff(testCase.Target, testCase.WantTarget, append(opts, cmpopts.SortSlices(less))...); diff != "" { + if !testCase.WantDiff { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + } + } + }) } - - return nil, nil } -type tfInterfaceIncompatibleTypedExpander struct { - Field1 types.String `tfsdk:"field1"` +// Top-level tests need a concrete target type for some reason when calling `cmp.Diff` +type toplevelTestCase[Tsource, Ttarget any] struct { + source Tsource + expectedValue Ttarget + ExpectedDiags diag.Diagnostics } -var _ TypedExpander = tfInterfaceIncompatibleTypedExpander{} +type toplevelTestCases[Tsource, Ttarget any] map[string]toplevelTestCase[Tsource, Ttarget] -func (t tfInterfaceIncompatibleTypedExpander) ExpandTo(ctx context.Context, targetType reflect.Type) (any, diag.Diagnostics) { - return &awsInterfaceIncompatibleImpl{ - AWSField: StringValueFromFramework(ctx, t.Field1), - }, nil -} +func runTopLevelTestCases[Tsource, Ttarget any](t *testing.T, testCases toplevelTestCases[Tsource, Ttarget], checks runChecks) { + t.Helper() -type awsExpander struct { - AWSField string -} + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + t.Parallel() -type awsExpanderIncompatible struct { - Incompatible int -} + ctx := context.Background() -type awsExpanderSingleStruct struct { - Field1 awsExpander -} + var buf bytes.Buffer + ctx = tflogtest.RootLogger(ctx, &buf) -type awsExpanderSinglePtr struct { - Field1 *awsExpander -} + ctx = registerTestingLogger(ctx) -type awsExpanderStructSlice struct { - Field1 []awsExpander -} + var target Ttarget + diags := Flatten(ctx, testCase.source, &target) -type awsExpanderPtrSlice struct { - Field1 []*awsExpander -} + if checks.CompareDiags { + if diff := cmp.Diff(diags, testCase.ExpectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + } -type tfListOfStringEnum struct { - Field1 fwtypes.ListValueOf[fwtypes.StringEnum[testEnum]] `tfsdk:"field1"` -} + if checks.GoldenLogs { + lines, err := tflogtest.MultilineJSONDecode(&buf) + if err != nil { + t.Fatalf("Flatten: decoding log lines: %s", err) + } + normalizedLines := normalizeLogs(lines) -type tfSetOfStringEnum struct { - Field1 fwtypes.SetValueOf[fwtypes.StringEnum[testEnum]] `tfsdk:"field1"` -} + goldenFileName := autoGenerateGoldenPath(t, t.Name(), testName) + goldenPath := filepath.Join("testdata", goldenFileName) + compareWithGolden(t, goldenPath, normalizedLines) + } -type awsSliceOfStringEnum struct { - Field1 []testEnum + if checks.CompareTarget && !diags.HasError() { + less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } + if diff := cmp.Diff(target, testCase.expectedValue, cmpopts.SortSlices(less)); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + } + }) + } } diff --git a/internal/framework/flex/autoflex_xml_compat_test.go b/internal/framework/flex/autoflex_xml_compat_test.go new file mode 100644 index 000000000000..435ca8e00854 --- /dev/null +++ b/internal/framework/flex/autoflex_xml_compat_test.go @@ -0,0 +1,338 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package flex + +// Tests AutoFlex's Expand/Flatten of AWS API XML wrappers (Items/Quantity). + +import ( + "context" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" +) + +// AWS SDK types that mirror CloudFront function association patterns +type FunctionAssociation struct { + EventType string `json:"EventType"` + FunctionARN *string `json:"FunctionARN"` +} + +type FunctionAssociations struct { + Quantity *int32 + Items []FunctionAssociation +} + +// Terraform model types +type FunctionAssociationTF struct { + EventType types.String `tfsdk:"event_type"` + FunctionARN types.String `tfsdk:"function_arn"` +} + +type DistributionConfigTF struct { + FunctionAssociations fwtypes.SetNestedObjectValueOf[FunctionAssociationTF] `tfsdk:"function_associations"` +} + +type DistributionConfigAWS struct { + FunctionAssociations *FunctionAssociations +} + +func TestExpandXMLWrapper(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "valid function associations": { + Source: DistributionConfigTF{ + FunctionAssociations: fwtypes.NewSetNestedObjectValueOfSliceMust( + ctx, + []*FunctionAssociationTF{ + { + EventType: types.StringValue("viewer-request"), + FunctionARN: types.StringValue("arn:aws:cloudfront::123456789012:function/test-function-1"), + }, + { + EventType: types.StringValue("viewer-response"), + FunctionARN: types.StringValue("arn:aws:cloudfront::123456789012:function/test-function-2"), + }, + }, + ), + }, + Target: &DistributionConfigAWS{}, + WantTarget: &DistributionConfigAWS{ + FunctionAssociations: &FunctionAssociations{ + Quantity: aws.Int32(2), + Items: []FunctionAssociation{ + { + EventType: "viewer-request", + FunctionARN: aws.String("arn:aws:cloudfront::123456789012:function/test-function-1"), + }, + { + EventType: "viewer-response", + FunctionARN: aws.String("arn:aws:cloudfront::123456789012:function/test-function-2"), + }, + }, + }, + }, + }, + "empty function associations": { + Source: DistributionConfigTF{ + FunctionAssociations: fwtypes.NewSetNestedObjectValueOfSliceMust( + ctx, + []*FunctionAssociationTF{}, + ), + }, + Target: &DistributionConfigAWS{}, + WantTarget: &DistributionConfigAWS{ + FunctionAssociations: &FunctionAssociations{ + Quantity: aws.Int32(0), + Items: []FunctionAssociation{}, + }, + }, + }, + "single function association": { + Source: DistributionConfigTF{ + FunctionAssociations: fwtypes.NewSetNestedObjectValueOfSliceMust( + ctx, + []*FunctionAssociationTF{ + { + EventType: types.StringValue("origin-request"), + FunctionARN: types.StringValue("arn:aws:cloudfront::123456789012:function/origin-function"), + }, + }, + ), + }, + Target: &DistributionConfigAWS{}, + WantTarget: &DistributionConfigAWS{ + FunctionAssociations: &FunctionAssociations{ + Quantity: aws.Int32(1), + Items: []FunctionAssociation{ + { + EventType: "origin-request", + FunctionARN: aws.String("arn:aws:cloudfront::123456789012:function/origin-function"), + }, + }, + }, + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +// Test XML wrapper expansion for direct struct (not pointer to struct) +type DirectXMLWrapper struct { + Items []string + Quantity *int32 +} + +type DirectWrapperTF struct { + Items fwtypes.SetValueOf[types.String] `tfsdk:"items"` +} + +type DirectWrapperAWS struct { + Items DirectXMLWrapper +} + +func TestExpandXMLWrapperDirect(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "direct xml wrapper": { + Source: DirectWrapperTF{ + Items: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("item1"), + types.StringValue("item2"), + }), + }, + Target: &DirectWrapperAWS{}, + WantTarget: &DirectWrapperAWS{ + Items: DirectXMLWrapper{ + Items: []string{"item1", "item2"}, + Quantity: aws.Int32(2), + }, + }, + }, + } + + runAutoExpandTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} + +func TestIsXMLWrapperStruct(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + input any + expected bool + }{ + { + name: "valid XML wrapper", + input: FunctionAssociations{}, + expected: true, + }, + { + name: "valid XML wrapper with slice of strings", + input: DirectXMLWrapper{}, + expected: true, + }, + { + name: "not a struct", + input: "string", + expected: false, + }, + { + name: "struct without Items field", + input: struct{ Quantity *int32 }{}, + expected: false, + }, + { + name: "struct without Quantity field", + input: struct{ Items []string }{}, + expected: false, + }, + { + name: "struct with wrong Quantity type", + input: struct { + Items []string + Quantity int32 + }{}, + expected: false, + }, + { + name: "struct with Items not a slice", + input: struct { + Items string + Quantity *int32 + }{}, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + result := isXMLWrapperStruct(reflect.TypeOf(tc.input)) + if result != tc.expected { + t.Errorf("Expected %v, got %v", tc.expected, result) + } + }) + } +} + +// Mock AWS types with XML wrapper pattern (for flattening - AWS to TF) +type awsStatusCodesForFlatten struct { + Items []int32 + Quantity *int32 +} + +type awsHeadersForFlatten struct { + Items []string + Quantity *int32 +} + +// CloudFront FunctionAssociation test types +type awsFunctionAssociationsForFlatten struct { + Items []FunctionAssociation `json:"Items"` + Quantity *int32 `json:"Quantity"` +} + +type tfFunctionAssociationsModelForFlatten struct { + FunctionAssociations fwtypes.SetNestedObjectValueOf[FunctionAssociationTF] `tfsdk:"function_associations" autoflex:",wrapper=items"` +} + +// TF model types with wrapper tags (for flattening - AWS to TF) +type tfStatusCodesModelForFlatten struct { + StatusCodes fwtypes.SetValueOf[types.Int64] `tfsdk:"status_codes" autoflex:",wrapper=items"` +} + +type tfHeadersModelForFlatten struct { + Headers fwtypes.ListValueOf[types.String] `tfsdk:"headers" autoflex:",wrapper=items"` +} + +func TestFlattenXMLWrapper(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + "int32 slice to set": { + Source: awsStatusCodesForFlatten{ + Items: []int32{400, 404}, + Quantity: aws.Int32(2), + }, + Target: &tfStatusCodesModelForFlatten{}, + WantTarget: &tfStatusCodesModelForFlatten{ + StatusCodes: fwtypes.NewSetValueOfMust[types.Int64](ctx, []attr.Value{ + types.Int64Value(400), + types.Int64Value(404), + }), + }, + }, + "string slice to list": { + Source: awsHeadersForFlatten{ + Items: []string{"accept", "content-type"}, + Quantity: aws.Int32(2), + }, + Target: &tfHeadersModelForFlatten{}, + WantTarget: &tfHeadersModelForFlatten{ + Headers: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("accept"), + types.StringValue("content-type"), + }), + }, + }, + "complex type - function associations": { + Source: awsFunctionAssociationsForFlatten{ + Items: []FunctionAssociation{ + { + EventType: "viewer-request", + FunctionARN: aws.String("arn:aws:cloudfront::123456789012:function/example-function"), + }, + { + EventType: "viewer-response", + FunctionARN: aws.String("arn:aws:cloudfront::123456789012:function/another-function"), + }, + }, + Quantity: aws.Int32(2), + }, + Target: &tfFunctionAssociationsModelForFlatten{}, + WantTarget: &tfFunctionAssociationsModelForFlatten{ + FunctionAssociations: func() fwtypes.SetNestedObjectValueOf[FunctionAssociationTF] { + elems := []*FunctionAssociationTF{ + { + EventType: types.StringValue("viewer-request"), + FunctionARN: types.StringValue("arn:aws:cloudfront::123456789012:function/example-function"), + }, + { + EventType: types.StringValue("viewer-response"), + FunctionARN: types.StringValue("arn:aws:cloudfront::123456789012:function/another-function"), + }, + } + setValue, _ := fwtypes.NewSetNestedObjectValueOfSlice(ctx, elems, nil) + return setValue + }(), + }, + }, + "empty slice to null set": { + Source: awsStatusCodesForFlatten{ + Items: nil, + Quantity: aws.Int32(0), + }, + Target: &tfStatusCodesModelForFlatten{}, + WantTarget: &tfStatusCodesModelForFlatten{ + StatusCodes: fwtypes.NewSetValueOfNull[types.Int64](ctx), + }, + }, + } + + runAutoFlattenTestCases(t, testCases, runChecks{CompareDiags: true, CompareTarget: true, GoldenLogs: true}) +} diff --git a/internal/framework/flex/diff.go b/internal/framework/flex/diff.go index f30cd3cc5967..b85277aaf894 100644 --- a/internal/framework/flex/diff.go +++ b/internal/framework/flex/diff.go @@ -110,6 +110,7 @@ func implementsAttrValue(field reflect.Value) bool { func skippedFields() []string { return []string{ + "Region", "Tags", "TagsAll", "Timeouts", diff --git a/internal/framework/flex/logging_test.go b/internal/framework/flex/logging_test.go index ad0b5fa04ea4..623d0664d6d9 100644 --- a/internal/framework/flex/logging_test.go +++ b/internal/framework/flex/logging_test.go @@ -4,11 +4,8 @@ package flex import ( - "maps" "reflect" "testing" - - "github.com/hashicorp/go-hclog" ) func TestFullTypeName_nil(t *testing.T) { @@ -142,507 +139,3 @@ func TestFullTypeName_mapPrimitiveKeyTypedValue(t *testing.T) { t.Fatalf("expected %q, got %q", expected, result) } } - -const ( - logModule = "provider." + subsystemName -) - -func infoExpanding(sourceType, targetType reflect.Type) map[string]any { - return infoLogLine("Expanding", sourceType, targetType) -} - -func infoFlattening(sourceType, targetType reflect.Type) map[string]any { - return infoLogLine("Flattening", sourceType, targetType) -} - -func infoConverting(sourceType, targetType reflect.Type) map[string]any { - return logInfo("Converting", map[string]any{ - logAttrKeySourcePath: "", - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: "", - logAttrKeyTargetType: fullTypeName(targetType), - }) -} - -func infoConvertingWithPath(sourceFieldPath string, sourceType reflect.Type, targetFieldPath string, targetType reflect.Type) map[string]any { - return logInfo("Converting", map[string]any{ - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourcePath: sourceFieldPath, - logAttrKeyTargetType: fullTypeName(targetType), - logAttrKeyTargetPath: targetFieldPath, - }) -} - -func traceSkipIgnoredSourceField(sourceType reflect.Type, sourceFieldName string, targetType reflect.Type) map[string]any { - return traceSkipIgnoredSourceFieldWithPath( - "", sourceType, sourceFieldName, - "", targetType, - ) -} - -func traceSkipIgnoredSourceFieldWithPath(sourcePath string, sourceType reflect.Type, sourceFieldName string, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Skipping ignored source field", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceFieldname: sourceFieldName, - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceSkipIgnoredTargetField(sourceType reflect.Type, sourceFieldName string, targetType reflect.Type, targetFieldName string) map[string]any { - return traceSkipIgnoredTargetFieldWithPath( - "", sourceType, sourceFieldName, - "", targetType, targetFieldName, - ) -} - -func traceSkipIgnoredTargetFieldWithPath(sourcePath string, sourceType reflect.Type, sourceFieldName string, targetPath string, targetType reflect.Type, targetFieldName string) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Skipping ignored target field", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceFieldname: sourceFieldName, - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - logAttrKeyTargetFieldname: targetFieldName, - } -} - -func traceSkipMapBlockKey(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Skipping map block key", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceFieldname: mapBlockKeyFieldName, - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceMatchedFields(sourceFieldName string, sourceType reflect.Type, targetFieldName string, targetType reflect.Type) map[string]any { - return traceMatchedFieldsWithPath( - "", sourceFieldName, sourceType, - "", targetFieldName, targetType, - ) -} - -func traceMatchedFieldsWithPath(sourcePath, sourceFieldName string, sourceType reflect.Type, targetPath, targetFieldName string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Matched fields", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceFieldname: sourceFieldName, - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - logAttrKeyTargetFieldname: targetFieldName, - } -} - -func debugNoCorrespondingField(sourceType reflect.Type, sourceFieldName string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Debug.String(), - "@module": logModule, - "@message": "No corresponding field", - logAttrKeySourcePath: "", - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceFieldname: sourceFieldName, - logAttrKeyTargetPath: "", - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceExpandingNullValue(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Expanding null value", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceExpandingWithElementsAs(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Expanding with ElementsAs", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceExpandingNestedObjectCollection(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Expanding nested object collection", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningNullValue(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening null value", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithMapNull(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with MapNull", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningMap(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening map", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithMapValue(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with MapValue", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithSetNull(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with SetNull", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithSetValue(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with SetValue", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithListNull(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with ListNull", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithListValue(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with ListValue", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithNullValue(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with NullValue", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningNestedObjectCollection(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening nested object collection", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func traceFlatteningWithNewMapValueOf(sourcePath string, sourceType reflect.Type, sourceLen int, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Trace.String(), - "@module": logModule, - "@message": "Flattening with NewMapValueOf", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeySourceSize: float64(sourceLen), // numbers are deserialized from JSON as float64 - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func infoSourceImplementsFlexExpander(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Info.String(), - "@module": logModule, - "@message": "Source implements flex.Expander", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func infoSourceImplementsFlexTypedExpander(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Info.String(), - "@module": logModule, - "@message": "Source implements flex.TypedExpander", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func infoTargetImplementsFlexFlattener(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Info.String(), - "@module": logModule, - "@message": "Target implements flex.Flattener", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func infoSourceImplementsJSONStringer(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Info.String(), - "@module": logModule, - "@message": "Source implements json.JSONStringer", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorSourceDoesNotImplementAttrValue(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Source does not implement attr.Value", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorSourceIsNil(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Source is nil", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorSourceHasNoMapBlockKey(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Source has no map block key", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorTargetDoesNotImplementAttrValue(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Target does not implement attr.Value", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorTargetIsNil(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Target is nil", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorTargetIsNotPointer(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Target is not a pointer", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorTargetHasNoMapBlockKey(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Target has no map block key", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorMarshallingJSONDocument(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type, err error) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Marshalling JSON document", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - logAttrKeyError: err.Error(), - } -} - -func errorExpandingIncompatibleTypes(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Expanding incompatible types", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func errorFlatteningIncompatibleTypes(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Error.String(), - "@module": logModule, - "@message": "Flattening incompatible types", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func debugUsingLegacyExpander(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Debug.String(), - "@module": logModule, - "@message": "Using legacy expander", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func debugUsingLegacyFlattener(sourcePath string, sourceType reflect.Type, targetPath string, targetType reflect.Type) map[string]any { - return map[string]any{ - "@level": hclog.Debug.String(), - "@module": logModule, - "@message": "Using legacy flattener", - logAttrKeySourcePath: sourcePath, - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetPath: targetPath, - logAttrKeyTargetType: fullTypeName(targetType), - } -} - -func infoLogLine(message string, sourceType, targetType reflect.Type) map[string]any { - return logInfo(message, map[string]any{ - logAttrKeySourceType: fullTypeName(sourceType), - logAttrKeyTargetType: fullTypeName(targetType), - }) -} - -func logInfo(message string, attrs map[string]any) map[string]any { - result := map[string]any{ - "@level": hclog.Info.String(), - "@module": logModule, - "@message": message, - } - maps.Copy(result, attrs) - return result -} diff --git a/internal/framework/flex/set.go b/internal/framework/flex/set.go index 0b48b2d0fa16..85c43cae9f08 100644 --- a/internal/framework/flex/set.go +++ b/internal/framework/flex/set.go @@ -9,12 +9,17 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-provider-aws/internal/enum" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" ) func ExpandFrameworkStringValueSet(ctx context.Context, v basetypes.SetValuable) inttypes.Set[string] { - var output []string + return ExpandFrameworkStringyValueSet[string](ctx, v) +} + +func ExpandFrameworkStringyValueSet[E ~string](ctx context.Context, v basetypes.SetValuable) inttypes.Set[E] { + var output []E must(Expand(ctx, v, &output)) @@ -41,6 +46,10 @@ func FlattenFrameworkStringValueSetOfString(ctx context.Context, vs []string) fw return fwtypes.SetValueOf[basetypes.StringValue]{SetValue: FlattenFrameworkStringValueSet(ctx, vs)} } +func FlattenFrameworkStringyValueSetOfStringEnum[T enum.Valueser[T]](ctx context.Context, vs []T) fwtypes.SetOfStringEnum[T] { + return fwtypes.SetValueOf[fwtypes.StringEnum[T]]{SetValue: FlattenFrameworkStringValueSet(ctx, vs)} +} + // FlattenFrameworkStringValueSetLegacy is the Plugin Framework variant of FlattenStringValueSet. // A nil slice is converted to an empty (non-null) Set. func FlattenFrameworkStringValueSetLegacy[T ~string](_ context.Context, vs []T) types.Set { diff --git a/internal/framework/flex/tags.go b/internal/framework/flex/tags.go index e4710cb00cc2..42118c32ce75 100644 --- a/internal/framework/flex/tags.go +++ b/internal/framework/flex/tags.go @@ -49,3 +49,18 @@ func (o tagOptions) OmitEmpty() bool { func (o tagOptions) NoFlatten() bool { return o.Contains("noflatten") } + +func (o tagOptions) WrapperField() string { + if len(o) == 0 { + return "" + } + s := string(o) + for s != "" { + var option string + option, s, _ = strings.Cut(s, ",") + if name, value, found := strings.Cut(option, "="); found && name == "wrapper" { + return value + } + } + return "" +} diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_list_source_and_empty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_list_source_and_empty_pointer_struct_target.golden new file mode 100644 index 000000000000..f3ff93d40a80 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_list_source_and_empty_pointer_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_list_source_and_empty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_list_source_and_empty_struct_target.golden new file mode 100644 index 000000000000..6708ad340f0b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_list_source_and_empty_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_set_source_and_empty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_set_source_and_empty_pointer_struct_target.golden new file mode 100644 index 000000000000..286adced823d --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_set_source_and_empty_pointer_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_set_source_and_empty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_set_source_and_empty_struct_target.golden new file mode 100644 index 000000000000..c2acc70f34cd --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/empty_set_source_and_empty_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_list_source_and_nonempty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_list_source_and_nonempty_pointer_struct_target.golden new file mode 100644 index 000000000000..0e21c5101c79 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_list_source_and_nonempty_pointer_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_list_source_and_nonempty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_list_source_and_nonempty_struct_target.golden new file mode 100644 index 000000000000..ce730022a36c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_list_source_and_nonempty_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_set_source_and_nonempty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_set_source_and_nonempty_pointer_struct_target.golden new file mode 100644 index 000000000000..8e410ffa504f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_set_source_and_nonempty_pointer_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_set_source_and_nonempty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_set_source_and_nonempty_struct_target.golden new file mode 100644 index 000000000000..038985ddb445 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/nonempty_set_source_and_nonempty_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/object_value_source_and_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/object_value_source_and_pointer_struct_target.golden new file mode 100644 index 000000000000..82b671f6635b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/object_value_source_and_pointer_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/object_value_source_and_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/object_value_source_and_struct_target.golden new file mode 100644 index 000000000000..f9c5691a2a9d --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/object_value_source_and_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_list_source_and_single_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_list_source_and_single_pointer_struct_target.golden new file mode 100644 index 000000000000..c3322509983c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_list_source_and_single_pointer_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_list_source_and_single_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_list_source_and_single_struct_target.golden new file mode 100644 index 000000000000..caec9d19cc3f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_list_source_and_single_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_set_source_and_single_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_set_source_and_single_pointer_struct_target.golden new file mode 100644 index 000000000000..dacd8f8ffcb9 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_set_source_and_single_pointer_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_set_source_and_single_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_set_source_and_single_struct_target.golden new file mode 100644 index 000000000000..82c7f37bec28 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/single_set_source_and_single_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_expands_to_nil.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_expands_to_nil.golden new file mode 100644 index 000000000000..cd6ca69a4ec0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_expands_to_nil.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToNil", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToNil", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToNil", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_incompatible_nonstruct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_incompatible_nonstruct_target.golden new file mode 100644 index 000000000000..ca2b081f60ca --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_incompatible_nonstruct_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToString", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToString", + "autoflex.target.path": "", + "autoflex.target.type": "int64" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToString", + "autoflex.target.path": "", + "autoflex.target.type": "int64" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_incompatible_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_incompatible_struct_target.golden new file mode 100644 index 000000000000..8fb28db112ac --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_incompatible_struct_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_string_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_string_target.golden new file mode 100644 index 000000000000..6b3816df1150 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_string_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToString", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToString", + "autoflex.target.path": "", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderToString", + "autoflex.target.path": "", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_struct_target.golden new file mode 100644 index 000000000000..4b19ce10200c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_expander/top_level_struct_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/empty_list_source_and_empty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/empty_list_source_and_empty_interface_target.golden new file mode 100644 index 000000000000..ae4966774845 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/empty_list_source_and_empty_interface_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/empty_set_source_and_empty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/empty_set_source_and_empty_interface_target.golden new file mode 100644 index 000000000000..c9108b1a6305 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/empty_set_source_and_empty_interface_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/nonempty_list_source_and_nonempty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/nonempty_list_source_and_nonempty_interface_target.golden new file mode 100644 index 000000000000..e1c231304daa --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/nonempty_list_source_and_nonempty_interface_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/nonempty_set_source_and_nonempty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/nonempty_set_source_and_nonempty_interface_target.golden new file mode 100644 index 000000000000..48ae8631feef --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/nonempty_set_source_and_nonempty_interface_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/object_value_source_and_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/object_value_source_and_struct_target.golden new file mode 100644 index 000000000000..f732b1859ca5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/object_value_source_and_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_list_nonexpander_source_and_single_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_list_nonexpander_source_and_single_interface_target.golden new file mode 100644 index 000000000000..d38d794034ae --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_list_nonexpander_source_and_single_interface_target.golden @@ -0,0 +1,49 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "error", + "@message": "AutoFlex Expand; incompatible types", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "from": {}, + "to": 20 + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_list_source_and_single_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_list_source_and_single_interface_target.golden new file mode 100644 index 000000000000..27744b3b2fdf --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_list_source_and_single_interface_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_set_source_and_single_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_set_source_and_single_interface_target.golden new file mode 100644 index 000000000000..176a6ea7160c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/single_set_source_and_single_interface_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/top_level.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/top_level.golden new file mode 100644 index 000000000000..994de11ecc18 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/top_level.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/top_level_return_value_does_not_implement_target_interface.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/top_level_return_value_does_not_implement_target_interface.golden new file mode 100644 index 000000000000..9dcb4943a555 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface/top_level_return_value_does_not_implement_target_interface.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceIncompatibleExpander", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceIncompatibleExpander", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.Expander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceIncompatibleExpander", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_contract/source_field_does_not_implement_attrvalue_source.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_contract/source_field_does_not_implement_attrvalue_source.golden new file mode 100644 index 000000000000..3be7bd29878c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_contract/source_field_does_not_implement_attrvalue_source.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "error", + "@message": "Source does not implement attr.Value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/empty_list_source_and_empty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/empty_list_source_and_empty_interface_target.golden new file mode 100644 index 000000000000..2a93001c2728 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/empty_list_source_and_empty_interface_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/empty_set_source_and_empty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/empty_set_source_and_empty_interface_target.golden new file mode 100644 index 000000000000..c919b18e2635 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/empty_set_source_and_empty_interface_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/nonempty_list_source_and_nonempty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/nonempty_list_source_and_nonempty_interface_target.golden new file mode 100644 index 000000000000..5baaf2ad00ac --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/nonempty_list_source_and_nonempty_interface_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/nonempty_set_source_and_nonempty_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/nonempty_set_source_and_nonempty_interface_target.golden new file mode 100644 index 000000000000..54aa02f83e27 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/nonempty_set_source_and_nonempty_interface_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/object_value_source_and_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/object_value_source_and_struct_target.golden new file mode 100644 index 000000000000..8005c20ff986 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/object_value_source_and_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_list_nonexpander_source_and_single_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_list_nonexpander_source_and_single_interface_target.golden new file mode 100644 index 000000000000..d38d794034ae --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_list_nonexpander_source_and_single_interface_target.golden @@ -0,0 +1,49 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "error", + "@message": "AutoFlex Expand; incompatible types", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "from": {}, + "to": 20 + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_list_source_and_single_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_list_source_and_single_interface_target.golden new file mode 100644 index 000000000000..86a41142900f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_list_source_and_single_interface_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_set_source_and_single_interface_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_set_source_and_single_interface_target.golden new file mode 100644 index 000000000000..645829332df0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/single_set_source_and_single_interface_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/top_level.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/top_level.golden new file mode 100644 index 000000000000..d2c0713cb44e --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/top_level.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/top_level_return_value_does_not_implement_target_interface.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/top_level_return_value_does_not_implement_target_interface.golden new file mode 100644 index 000000000000..62d1b01267c4 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_interface_typed_expander/top_level_return_value_does_not_implement_target_interface.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceIncompatibleTypedExpander", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceIncompatibleTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceIncompatibleTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_logging_collections/collection_of_primitive_types_source_and_slice_or_map_of_primtive_types_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_logging_collections/collection_of_primitive_types_source_and_slice_or_map_of_primtive_types_target.golden new file mode 100644 index 000000000000..ea03e2c997fb --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_logging_collections/collection_of_primitive_types_source_and_slice_or_map_of_primtive_types_target.golden @@ -0,0 +1,198 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue", + "autoflex.target.path": "Field2", + "autoflex.target.type": "[]*string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue", + "autoflex.target.path": "Field2", + "autoflex.target.type": "[]*string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue", + "autoflex.target.path": "Field3", + "autoflex.target.type": "[]string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue", + "autoflex.target.path": "Field3", + "autoflex.target.type": "[]string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue", + "autoflex.target.path": "Field4", + "autoflex.target.type": "[]*string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue", + "autoflex.target.path": "Field4", + "autoflex.target.type": "[]*string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field5", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field5", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue", + "autoflex.target.path": "Field5", + "autoflex.target.type": "map[string]string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue", + "autoflex.target.path": "Field5", + "autoflex.target.type": "map[string]string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field6", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field6", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue", + "autoflex.target.path": "Field6", + "autoflex.target.type": "map[string]*string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue", + "autoflex.target.path": "Field6", + "autoflex.target.type": "map[string]*string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_list_source_and_empty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_list_source_and_empty_pointer_struct_target.golden new file mode 100644 index 000000000000..eb53598edd31 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_list_source_and_empty_pointer_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_list_source_and_empty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_list_source_and_empty_struct_target.golden new file mode 100644 index 000000000000..a09e3f000dc3 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_list_source_and_empty_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_set_source_and_empty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_set_source_and_empty_pointer_struct_target.golden new file mode 100644 index 000000000000..afeb99b3ad31 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_set_source_and_empty_pointer_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_set_source_and_empty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_set_source_and_empty_struct_target.golden new file mode 100644 index 000000000000..5837169fde59 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/empty_set_source_and_empty_struct_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_list_source_and_nonempty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_list_source_and_nonempty_pointer_struct_target.golden new file mode 100644 index 000000000000..a26d0dd2cb85 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_list_source_and_nonempty_pointer_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_list_source_and_nonempty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_list_source_and_nonempty_struct_target.golden new file mode 100644 index 000000000000..6b79b8ad4cad --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_list_source_and_nonempty_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_set_source_and_nonempty_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_set_source_and_nonempty_pointer_struct_target.golden new file mode 100644 index 000000000000..8855a5436f90 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_set_source_and_nonempty_pointer_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_set_source_and_nonempty_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_set_source_and_nonempty_struct_target.golden new file mode 100644 index 000000000000..0a6788498de5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/nonempty_set_source_and_nonempty_struct_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/object_value_source_and_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/object_value_source_and_pointer_struct_target.golden new file mode 100644 index 000000000000..177203b0cc73 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/object_value_source_and_pointer_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderObjectValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderObjectValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderObjectValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/object_value_source_and_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/object_value_source_and_struct_target.golden new file mode 100644 index 000000000000..c1d9b700054d --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/object_value_source_and_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderObjectValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderObjectValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderObjectValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_list_source_and_single_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_list_source_and_single_pointer_struct_target.golden new file mode 100644 index 000000000000..6e84c20c0136 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_list_source_and_single_pointer_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_list_source_and_single_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_list_source_and_single_struct_target.golden new file mode 100644 index 000000000000..42beb476f607 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_list_source_and_single_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderListNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_set_source_and_single_pointer_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_set_source_and_single_pointer_struct_target.golden new file mode 100644 index 000000000000..699477997c71 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_set_source_and_single_pointer_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderSetNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_set_source_and_single_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_set_source_and_single_struct_target.golden new file mode 100644 index 000000000000..8c58f338ed29 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/single_set_source_and_single_struct_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_expands_to_nil.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_expands_to_nil.golden new file mode 100644 index 000000000000..7cac0e826cfa --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_expands_to_nil.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderToNil", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderToNil", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpanderToNil", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_incompatible_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_incompatible_struct_target.golden new file mode 100644 index 000000000000..a16dd21de3a8 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_incompatible_struct_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_struct_target.golden new file mode 100644 index 000000000000..2b668b42561e --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/expand_typed_expander/top_level_struct_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + }, + { + "@level": "info", + "@message": "Source implements flex.TypedExpander", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfTypedExpander", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_pointer_struct_list_source_and_empty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_pointer_struct_list_source_and_empty_list_target.golden new file mode 100644 index 000000000000..23e6c89ef88a --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_pointer_struct_list_source_and_empty_list_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_pointer_struct_list_source_and_empty_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_pointer_struct_list_source_and_empty_set_target.golden new file mode 100644 index 000000000000..fe01250ab60f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_pointer_struct_list_source_and_empty_set_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_struct_list_source_and_empty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_struct_list_source_and_empty_list_target.golden new file mode 100644 index 000000000000..6cb0e36d141b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_struct_list_source_and_empty_list_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_struct_list_source_and_empty_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_struct_list_source_and_empty_set_target.golden new file mode 100644 index 000000000000..1f6fa5558634 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/empty_struct_list_source_and_empty_set_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nil_pointer_struct_source_and_null_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nil_pointer_struct_source_and_null_list_target.golden new file mode 100644 index 000000000000..2c04626d4a67 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nil_pointer_struct_source_and_null_list_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nil_pointer_struct_source_and_null_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nil_pointer_struct_source_and_null_set_target.golden new file mode 100644 index 000000000000..0f5087e7280b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nil_pointer_struct_source_and_null_set_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_pointer_struct_list_source_and_nonempty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_pointer_struct_list_source_and_nonempty_list_target.golden new file mode 100644 index 000000000000..55359a7d3469 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_pointer_struct_list_source_and_nonempty_list_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_pointer_struct_list_source_and_nonempty_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_pointer_struct_list_source_and_nonempty_set_target.golden new file mode 100644 index 000000000000..204ca5b2b33c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_pointer_struct_list_source_and_nonempty_set_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderPtrSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_struct_list_source_and_nonempty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_struct_list_source_and_nonempty_list_target.golden new file mode 100644 index 000000000000..a747025227c9 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_struct_list_source_and_nonempty_list_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_struct_list_source_and_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_struct_list_source_and_set_target.golden new file mode 100644 index 000000000000..edbdf2508df8 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/nonempty_struct_list_source_and_set_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderStructSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/pointer_struct_source_and_object_value_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/pointer_struct_source_and_object_value_target.golden new file mode 100644 index 000000000000..1f6296ea1c02 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/pointer_struct_source_and_object_value_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_pointer_struct_source_and_single_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_pointer_struct_source_and_single_list_target.golden new file mode 100644 index 000000000000..8b41f9ab409e --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_pointer_struct_source_and_single_list_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_pointer_struct_source_and_single_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_pointer_struct_source_and_single_set_target.golden new file mode 100644 index 000000000000..b799f8a0f872 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_pointer_struct_source_and_single_set_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSinglePtr", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_struct_source_and_single_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_struct_source_and_single_list_target.golden new file mode 100644 index 000000000000..da7ee1699c29 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_struct_source_and_single_list_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderListNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_struct_source_and_single_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_struct_source_and_single_set_target.golden new file mode 100644 index 000000000000..db4742966c57 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/single_struct_source_and_single_set_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderSetNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/struct_source_and_object_value_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/struct_source_and_object_value_target.golden new file mode 100644 index 000000000000..dff33365e7ef --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/struct_source_and_object_value_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderSingleStruct", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfExpanderObjectValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/top_level_incompatible_struct_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/top_level_incompatible_struct_target.golden new file mode 100644 index 000000000000..8d2c2eee9cf7 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/top_level_incompatible_struct_target.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpanderIncompatible", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/top_level_struct_source.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/top_level_struct_source.golden new file mode 100644 index 000000000000..7b46c42e9435 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_flattener/top_level_struct_source.golden @@ -0,0 +1,27 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsExpander", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/empty_interface_list_source_and_empty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/empty_interface_list_source_and_empty_list_target.golden new file mode 100644 index 000000000000..f343de973cd7 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/empty_interface_list_source_and_empty_list_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/empty_interface_list_source_and_empty_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/empty_interface_list_source_and_empty_set_target.golden new file mode 100644 index 000000000000..2c5d83aa605c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/empty_interface_list_source_and_empty_set_target.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 0, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/interface_source_and_nested_object_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/interface_source_and_nested_object_target.golden new file mode 100644 index 000000000000..2682355c4ce7 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/interface_source_and_nested_object_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_list_source_and_empty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_list_source_and_empty_list_target.golden new file mode 100644 index 000000000000..d04531c3214b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_list_source_and_empty_list_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening with NullValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_list_source_and_empty_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_list_source_and_empty_set_target.golden new file mode 100644 index 000000000000..7b4cfca7e6c5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_list_source_and_empty_set_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening with NullValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_list_target.golden new file mode 100644 index 000000000000..d042084ea76e --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_list_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_nested_object_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_nested_object_target.golden new file mode 100644 index 000000000000..85c9b6c7c5cb --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_nested_object_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfObjectValue[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_nonflattener_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_nonflattener_list_target.golden new file mode 100644 index 000000000000..2a12c67661b5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_nonflattener_list_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_set_target.golden new file mode 100644 index 000000000000..6998b837fede --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nil_interface_source_and_set_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nonempty_interface_list_source_and_nonempty_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nonempty_interface_list_source_and_nonempty_list_target.golden new file mode 100644 index 000000000000..31fa554b7f5e --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nonempty_interface_list_source_and_nonempty_list_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterfaceImpl", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterfaceImpl", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nonempty_interface_list_source_and_nonempty_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nonempty_interface_list_source_and_nonempty_set_target.golden new file mode 100644 index 000000000000..465de2af1aef --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/nonempty_interface_list_source_and_nonempty_set_target.golden @@ -0,0 +1,66 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSlice", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterfaceImpl", + "autoflex.target.path": "Field1[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterfaceImpl", + "autoflex.target.path": "Field1[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_nonflattener_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_nonflattener_list_target.golden new file mode 100644 index 000000000000..6a31b309a410 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_nonflattener_list_target.golden @@ -0,0 +1,55 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "error", + "@message": "AutoFlex Flatten; incompatible types", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "from": 20, + "to": { + "ElemType": { + "AttrTypes": { + "field1": {} + } + } + } + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_single_list_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_single_list_target.golden new file mode 100644 index 000000000000..c298c447fc8b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_single_list_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_single_set_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_single_set_target.golden new file mode 100644 index 000000000000..29f3dcbca7c2 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface/single_interface_source_and_single_set_target.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceSingle", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSetNestedObject[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + }, + { + "@level": "info", + "@message": "Target implements flex.Flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsInterfaceInterface", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfInterfaceFlexer]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_field_to_nonattrvalue.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_field_to_nonattrvalue.golden new file mode 100644 index 000000000000..07a24c3cdc82 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_field_to_nonattrvalue.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "CreationDateTime", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue", + "autoflex.target.fieldname": "CreationDateTime", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue" + }, + { + "@level": "error", + "@message": "Target does not implement attr.Value", + "@module": "provider.autoflex", + "autoflex.source.path": "CreationDateTime", + "autoflex.source.type": "time.Time", + "autoflex.target.path": "CreationDateTime", + "autoflex.target.type": "time.Time" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_field_to_nonattrvalue_ptr.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_field_to_nonattrvalue_ptr.golden new file mode 100644 index 000000000000..865449d4019f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_field_to_nonattrvalue_ptr.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "CreationDateTime", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue", + "autoflex.target.fieldname": "CreationDateTime", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer" + }, + { + "@level": "error", + "@message": "Target does not implement attr.Value", + "@module": "provider.autoflex", + "autoflex.source.path": "CreationDateTime", + "autoflex.source.type": "time.Time", + "autoflex.target.path": "CreationDateTime", + "autoflex.target.type": "*time.Time" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_ptr_field_to_nonattrvalue.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_ptr_field_to_nonattrvalue.golden new file mode 100644 index 000000000000..1a4ddd7e9c12 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_ptr_field_to_nonattrvalue.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "CreationDateTime", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer", + "autoflex.target.fieldname": "CreationDateTime", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimeValue" + }, + { + "@level": "error", + "@message": "Target does not implement attr.Value", + "@module": "provider.autoflex", + "autoflex.source.path": "CreationDateTime", + "autoflex.source.type": "*time.Time", + "autoflex.target.path": "CreationDateTime", + "autoflex.target.type": "time.Time" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_ptr_field_to_nonattrvalue_ptr.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_ptr_field_to_nonattrvalue_ptr.golden new file mode 100644 index 000000000000..54cc7739a973 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/source_struct_ptr_field_to_nonattrvalue_ptr.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "CreationDateTime", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer", + "autoflex.target.fieldname": "CreationDateTime", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsRFC3339TimePointer" + }, + { + "@level": "error", + "@message": "Target does not implement attr.Value", + "@module": "provider.autoflex", + "autoflex.source.path": "CreationDateTime", + "autoflex.source.type": "*time.Time", + "autoflex.target.path": "CreationDateTime", + "autoflex.target.type": "*time.Time" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/target_field_does_not_implement_attrvalue_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/target_field_does_not_implement_attrvalue_target.golden new file mode 100644 index 000000000000..a3b6f1202250 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_interface_contract/target_field_does_not_implement_attrvalue_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "error", + "@message": "Target does not implement attr.Value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_logging_collections/slice_or_map_of_primitive_types_source_and_collection_of_primitive_types_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_logging_collections/slice_or_map_of_primitive_types_source_and_collection_of_primitive_types_target.golden new file mode 100644 index 000000000000..aa867dc50eb4 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_logging_collections/slice_or_map_of_primitive_types_source_and_collection_of_primitive_types_target.golden @@ -0,0 +1,198 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Flattening with ListValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 2, + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Flattening with ListValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.size": 2, + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Flattening with SetValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.size": 2, + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Flattening with SetValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.size": 2, + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field5", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field5", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "Field5", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Flattening with MapValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.size": 2, + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "Field5", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field6", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field6", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field6", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Flattening with MapValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.size": 2, + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field6", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/dispatch/flatten_logging_collections/zero_value_slice_or_map_of_primitive_types_source_and_collection_of_primtive_types_target.golden b/internal/framework/flex/testdata/autoflex/dispatch/flatten_logging_collections/zero_value_slice_or_map_of_primitive_types_source_and_collection_of_primtive_types_target.golden new file mode 100644 index 000000000000..3327a2557bec --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/dispatch/flatten_logging_collections/zero_value_slice_or_map_of_primitive_types_source_and_collection_of_primtive_types_target.golden @@ -0,0 +1,192 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Flattening with ListNull", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Flattening with ListNull", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.ListValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Flattening with SetNull", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "[]string", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Flattening with SetNull", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "[]*string", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.SetValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field5", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field5", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "Field5", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Flattening with MapNull", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "Field5", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field6", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsCollectionsOfPrimitiveElements", + "autoflex.target.fieldname": "Field6", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfCollectionsOfPrimitiveElements" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field6", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Flattening with MapNull", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field6", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_enum_key.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_enum_key.golden new file mode 100644 index 000000000000..8dbfbb2f169f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_enum_key.golden @@ -0,0 +1,138 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListEnumKey", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListEnumKey", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListEnumKey", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey", + "autoflex.target.path": "MapBlock[\"List\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"List\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"List\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"List\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"List\"].Attr2", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey", + "autoflex.target.path": "MapBlock[\"Scalar\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"Scalar\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"Scalar\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"Scalar\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"Scalar\"].Attr2", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_list.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_list.golden new file mode 100644 index 000000000000..e54e86d0bc74 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_list.golden @@ -0,0 +1,138 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr2", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr2", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_ptr_both.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_ptr_both.golden new file mode 100644 index 000000000000..6a02d02a92e1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_ptr_both.golden @@ -0,0 +1,138 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr2", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr2", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_ptr_source.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_ptr_source.golden new file mode 100644 index 000000000000..e54e86d0bc74 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_ptr_source.golden @@ -0,0 +1,138 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr2", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr2", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_set.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_set.golden new file mode 100644 index 000000000000..3daa5017e71c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_key_set.golden @@ -0,0 +1,138 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockSet", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockSet", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockSet", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"x\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"x\"].Attr2", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Skipping map block key", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlockKey", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[\"y\"]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[1].Attr2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "MapBlock[\"y\"].Attr2", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_list_no_key.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_list_no_key.golden new file mode 100644 index 000000000000..03ea0705e150 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/map_block_list_no_key.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListNoKey", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListNoKey", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListNoKey", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementNoKey]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "error", + "@message": "Source has no map block key", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementNoKey", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_map_block/nil_map_block_key.golden b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/nil_map_block_key.golden new file mode 100644 index 000000000000..7841f34fe3f1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_map_block/nil_map_block_key.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_map_of_string.golden b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_map_of_string.golden new file mode 100644 index 000000000000..23efe56ad6a0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_map_of_string.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "map[string]map[string]string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 1, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "map[string]map[string]string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_map_of_string_pointer.golden b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_map_of_string_pointer.golden new file mode 100644 index 000000000000..541a8513a1e6 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_map_of_string_pointer.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfStringPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfStringPointer" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfStringPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "map[string]map[string]*string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 1, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]", + "autoflex.target.path": "Field1", + "autoflex.target.type": "map[string]map[string]*string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_string.golden b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_string.golden new file mode 100644 index 000000000000..0bba5706a19d --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_string.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldInner", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.fieldname": "FieldInner", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "map[string]string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.size": 1, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "map[string]string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_string_pointer.golden b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_string_pointer.golden new file mode 100644 index 000000000000..80db9fff4ffc --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_maps/map_of_string_pointer.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfStringPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfStringPointer" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldInner", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.fieldname": "FieldInner", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfStringPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "map[string]*string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.size": 1, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "map[string]*string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/expand_maps/nested_string_map.golden b/internal/framework/flex/testdata/autoflex/maps/expand_maps/nested_string_map.golden new file mode 100644 index 000000000000..49685f4aaec1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/expand_maps/nested_string_map.golden @@ -0,0 +1,68 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfNestedMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfNestedMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldOuter", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfNestedMapOfString", + "autoflex.target.fieldname": "FieldOuter", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldOuter", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString]", + "autoflex.target.path": "FieldOuter", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldInner", + "autoflex.source.path": "FieldOuter[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString", + "autoflex.target.fieldname": "FieldInner", + "autoflex.target.path": "FieldOuter", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldOuter[0].FieldInner", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "FieldOuter.FieldInner", + "autoflex.target.type": "map[string]string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldOuter[0].FieldInner", + "autoflex.source.size": 1, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "FieldOuter.FieldInner", + "autoflex.target.type": "map[string]string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_enum_key.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_enum_key.golden new file mode 100644 index 000000000000..1f087001223c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_enum_key.golden @@ -0,0 +1,78 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListEnumKey" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListEnumKey" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListEnumKey" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[\"List\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"List\"].Attr1", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[\"List\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementEnumKey" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"List\"].Attr2", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_list.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_list.golden new file mode 100644 index 000000000000..20a2cfd76760 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_list.golden @@ -0,0 +1,78 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr1", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr2", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_ptr_both.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_ptr_both.golden new file mode 100644 index 000000000000..8c2f11c65e62 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_ptr_both.golden @@ -0,0 +1,78 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr1", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr2", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_ptr_source.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_ptr_source.golden new file mode 100644 index 000000000000..8c2f11c65e62 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_ptr_source.golden @@ -0,0 +1,78 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr1", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr2", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_set.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_set.golden new file mode 100644 index 000000000000..f886c9c14412 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_key_set.golden @@ -0,0 +1,78 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockSet" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockSet" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockSet" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr1", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr1", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr1", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Attr2", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.fieldname": "Attr2", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"].Attr2", + "autoflex.source.type": "string", + "autoflex.target.path": "MapBlock[0].Attr2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_list_no_key.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_list_no_key.golden new file mode 100644 index 000000000000..3423c8e8cfde --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/map_block_list_no_key.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListNoKey" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListNoKey" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockListNoKey" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementNoKey]" + }, + { + "@level": "error", + "@message": "Target has no map block key", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock[\"x\"]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock[0]", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElementNoKey" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/nil_map_block_key.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/nil_map_block_key.golden new file mode 100644 index 000000000000..52a99c5bce60 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/nil_map_block_key.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockValues", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + }, + { + "@level": "trace", + "@message": "Flattening null value", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/nil_map_block_key_ptr.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/nil_map_block_key_ptr.golden new file mode 100644 index 000000000000..dd795f506b1a --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_map_block/nil_map_block_key_ptr.golden @@ -0,0 +1,47 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "MapBlock", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockPointers", + "autoflex.target.fieldname": "MapBlock", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockList" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + }, + { + "@level": "trace", + "@message": "Flattening null value", + "@module": "provider.autoflex", + "autoflex.source.path": "MapBlock", + "autoflex.source.type": "map[string]*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapBlockElement", + "autoflex.target.path": "MapBlock", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapBlockElement]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_map_of_string.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_map_of_string.golden new file mode 100644 index 000000000000..86f9ca29c8e2 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_map_of_string.golden @@ -0,0 +1,58 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfString", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "map[string]map[string]string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]" + }, + { + "@level": "trace", + "@message": "Flattening map", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]map[string]string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]" + }, + { + "@level": "trace", + "@message": "Flattening with NewMapValueOf", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[\"x\"]", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "Field1[\"x\"]", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-plugin-framework/attr.Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_map_of_string_pointer.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_map_of_string_pointer.golden new file mode 100644 index 000000000000..8932064712c7 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_map_of_string_pointer.golden @@ -0,0 +1,58 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfStringPointer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfStringPointer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfMapOfStringPointer", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "map[string]map[string]*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]" + }, + { + "@level": "trace", + "@message": "Flattening map", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]map[string]*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]]" + }, + { + "@level": "trace", + "@message": "Flattening with NewMapValueOf", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1[\"x\"]", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field1[\"x\"]", + "autoflex.target.type": "map[string]github.com/hashicorp/terraform-plugin-framework/attr.Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_string.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_string.golden new file mode 100644 index 000000000000..608badf4b341 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_string.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldInner", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString", + "autoflex.target.fieldname": "FieldInner", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]" + }, + { + "@level": "trace", + "@message": "Flattening with MapValue", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_string_pointer.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_string_pointer.golden new file mode 100644 index 000000000000..fbf4775a65ae --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/map_of_string_pointer.golden @@ -0,0 +1,48 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfStringPointer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfStringPointer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldInner", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfStringPointer", + "autoflex.target.fieldname": "FieldInner", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]" + }, + { + "@level": "trace", + "@message": "Flattening with MapValue", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldInner", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "FieldInner", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/maps/flatten_maps/nested_string_map.golden b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/nested_string_map.golden new file mode 100644 index 000000000000..3abbe9b0565c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/maps/flatten_maps/nested_string_map.golden @@ -0,0 +1,68 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedMapOfString", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfNestedMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedMapOfString", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfNestedMapOfString" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldOuter", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedMapOfString", + "autoflex.target.fieldname": "FieldOuter", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfNestedMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldOuter", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString", + "autoflex.target.path": "FieldOuter", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FieldInner", + "autoflex.source.path": "FieldOuter", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsMapOfString", + "autoflex.target.fieldname": "FieldInner", + "autoflex.target.path": "FieldOuter", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfMapOfString" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldOuter.FieldInner", + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "FieldOuter.FieldInner", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]" + }, + { + "@level": "trace", + "@message": "Flattening with MapValue", + "@module": "provider.autoflex", + "autoflex.source.path": "FieldOuter.FieldInner", + "autoflex.source.size": 1, + "autoflex.source.type": "map[string]string", + "autoflex.target.path": "FieldOuter.FieldInner", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.MapValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nested/expand_nested_complex/complex_source_and_complex_target.golden b/internal/framework/flex/testdata/autoflex/nested/expand_nested_complex/complex_source_and_complex_target.golden new file mode 100644 index 000000000000..e990ead30a89 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nested/expand_nested_complex/complex_source_and_complex_target.golden @@ -0,0 +1,218 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListOfNestedObject]", + "autoflex.target.path": "Field2", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedObjectPointer" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field2[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListOfNestedObject", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field2", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedObjectPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2[0].Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]", + "autoflex.target.path": "Field2.Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field2[0].Field1[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field2.Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2[0].Field1[0].Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field2.Field1.Field1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue", + "autoflex.target.path": "Field3", + "autoflex.target.type": "map[string]*string" + }, + { + "@level": "trace", + "@message": "Expanding with ElementsAs", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.size": 2, + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue", + "autoflex.target.path": "Field3", + "autoflex.target.type": "map[string]*string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field]", + "autoflex.target.path": "Field4", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "trace", + "@message": "Expanding nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.size": 3, + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field]", + "autoflex.target.path": "Field4", + "autoflex.target.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field4[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field4[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4[0].Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field4[0].Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field4[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field4[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4[1].Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field4[1].Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field4[2]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field4[2]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4[2].Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field4[2].Field1", + "autoflex.target.type": "int64" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nested/flatten_nested_complex/complex_source_and_complex_target.golden b/internal/framework/flex/testdata/autoflex/nested/flatten_nested_complex/complex_source_and_complex_target.golden new file mode 100644 index 000000000000..614f7ae03292 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nested/flatten_nested_complex/complex_source_and_complex_target.golden @@ -0,0 +1,218 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedObjectPointer", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListOfNestedObject]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field2", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsNestedObjectPointer", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field2", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfListOfNestedObject" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2.Field1", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "Field2.Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ListNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field2.Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field2.Field1", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2.Field1.Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field2.Field1.Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Flattening with MapValue", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.size": 2, + "autoflex.source.type": "map[string]*string", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.MapValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsComplexValue", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfComplexValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field]" + }, + { + "@level": "trace", + "@message": "Flattening nested object collection", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.size": 3, + "autoflex.source.type": "[]github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field4[0]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field4[0]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4[0].Field1", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field4[0].Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field4[1]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field4[1]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4[1].Field1", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field4[1].Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "Field4[2]", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "Field4[2]", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4[2].Field1", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field4[2].Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/expand_primitives/primitive_types_source_and_primitive_types_target.golden b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/primitive_types_source_and_primitive_types_target.golden new file mode 100644 index 000000000000..304c2ce814f7 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/primitive_types_source_and_primitive_types_target.golden @@ -0,0 +1,258 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field2", + "autoflex.target.type": "*string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field3", + "autoflex.target.type": "int32" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field4", + "autoflex.target.type": "*int32" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field5", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field5", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field5", + "autoflex.target.type": "int64" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field6", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field6", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field6", + "autoflex.target.type": "*int64" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field7", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field7", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field7", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field7", + "autoflex.target.type": "float32" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field8", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field8", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field8", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field8", + "autoflex.target.type": "*float32" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field9", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field9", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field9", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field9", + "autoflex.target.type": "float64" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field10", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field10", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field10", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field10", + "autoflex.target.type": "*float64" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field11", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field11", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field11", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field11", + "autoflex.target.type": "bool" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field12", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields", + "autoflex.target.fieldname": "Field12", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field12", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field12", + "autoflex.target.type": "*bool" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_byte_slice_target.golden b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_byte_slice_target.golden new file mode 100644 index 000000000000..65c0bef21104 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_byte_slice_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleByteSliceValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleByteSliceValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleByteSliceValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "[]uint8" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_int64_target.golden b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_int64_target.golden new file mode 100644 index 000000000000..d324bbff5f8f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_int64_target.golden @@ -0,0 +1,49 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleInt64Value" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "error", + "@message": "AutoFlex Expand; incompatible types", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int64", + "from": {}, + "to": 6 + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_pointer_string_target.golden b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_pointer_string_target.golden new file mode 100644 index 000000000000..baa401c78bd4 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_pointer_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_string_target.golden b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_string_target.golden new file mode 100644 index 000000000000..bccf50400402 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_source_and_single_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_struct_pointer_source_and_empty_target.golden b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_struct_pointer_source_and_empty_target.golden new file mode 100644 index 000000000000..57e701609bf1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/expand_primitives/single_string_struct_pointer_source_and_empty_target.golden @@ -0,0 +1,28 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.emptyStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.emptyStruct" + }, + { + "@level": "debug", + "@message": "No corresponding field", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.emptyStruct" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/flatten_primitive_pack/primitive_pack_ok.golden b/internal/framework/flex/testdata/autoflex/nums/flatten_primitive_pack/primitive_pack_ok.golden new file mode 100644 index 000000000000..2479d4dbc0d6 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/flatten_primitive_pack/primitive_pack_ok.golden @@ -0,0 +1,258 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "int32", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field5", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field5", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field5", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field6", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field6", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field6", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field7", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field7", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field7", + "autoflex.source.type": "float32", + "autoflex.target.path": "Field7", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field8", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field8", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field8", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field8", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field9", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field9", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field9", + "autoflex.source.type": "float64", + "autoflex.target.path": "Field9", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field10", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field10", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field10", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field10", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field11", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field11", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field11", + "autoflex.source.type": "bool", + "autoflex.target.path": "Field11", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field12", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field12", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field12", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field12", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/nums/flatten_primitive_pack/primitive_pack_zero_ok.golden b/internal/framework/flex/testdata/autoflex/nums/flatten_primitive_pack/primitive_pack_zero_ok.golden new file mode 100644 index 000000000000..2479d4dbc0d6 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/nums/flatten_primitive_pack/primitive_pack_zero_ok.golden @@ -0,0 +1,258 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field2", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field2", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field2", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field2", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field3", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field3", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field3", + "autoflex.source.type": "int32", + "autoflex.target.path": "Field3", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field4", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field4", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field4", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field4", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field5", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field5", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field5", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field5", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field6", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field6", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field6", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field6", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field7", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field7", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field7", + "autoflex.source.type": "float32", + "autoflex.target.path": "Field7", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field8", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field8", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field8", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field8", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field9", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field9", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field9", + "autoflex.source.type": "float64", + "autoflex.target.path": "Field9", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field10", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field10", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field10", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field10", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field11", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field11", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field11", + "autoflex.source.type": "bool", + "autoflex.target.path": "Field11", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field12", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsAllThePrimitiveFields", + "autoflex.target.fieldname": "Field12", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfAllThePrimitiveFields" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field12", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field12", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_legacy.golden new file mode 100644 index 000000000000..084914e0a521 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..084914e0a521 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_standard.golden new file mode 100644 index 000000000000..dc50efa6f4fb --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..c93e49a49035 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_false_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_legacy.golden new file mode 100644 index 000000000000..c059923c4c8f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..c059923c4c8f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_standard.golden new file mode 100644 index 000000000000..1c8adc46a3fb --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_standard.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "bool" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..885731c2be5d --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_null_value_tf_to_aws_pointer.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_legacy.golden new file mode 100644 index 000000000000..084914e0a521 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..084914e0a521 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_standard.golden new file mode 100644 index 000000000000..dc50efa6f4fb --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..c93e49a49035 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/bool_true_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*bool" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*bool", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.BoolValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_legacy.golden new file mode 100644 index 000000000000..f836d26bad92 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..f836d26bad92 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_standard.golden new file mode 100644 index 000000000000..0793a8efcf3b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_standard.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6956445f31b5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_null_value_tf_to_aws_pointer.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_legacy.golden new file mode 100644 index 000000000000..2ceba686b1c9 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..2ceba686b1c9 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_standard.golden new file mode 100644 index 000000000000..ee09a97048a8 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6ff062b1d5f5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_legacy.golden new file mode 100644 index 000000000000..2ceba686b1c9 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..2ceba686b1c9 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_standard.golden new file mode 100644 index 000000000000..ee09a97048a8 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6ff062b1d5f5 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float32_zero_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_legacy.golden new file mode 100644 index 000000000000..504eb061446b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..504eb061446b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_standard.golden new file mode 100644 index 000000000000..894ed50249ec --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_standard.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..8d55c7c858d8 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_null_value_tf_to_aws_pointer.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_legacy.golden new file mode 100644 index 000000000000..7119ff04a146 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..7119ff04a146 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_standard.golden new file mode 100644 index 000000000000..697f98a9faaa --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..fcd33650f255 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_legacy.golden new file mode 100644 index 000000000000..7119ff04a146 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..7119ff04a146 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_standard.golden new file mode 100644 index 000000000000..697f98a9faaa --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..fcd33650f255 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/float64_zero_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*float64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*float64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Float64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_legacy.golden new file mode 100644 index 000000000000..cea99278cda2 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..cea99278cda2 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_standard.golden new file mode 100644 index 000000000000..2a8c2a46bceb --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_standard.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..eba67773deee --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_null_value_tf_to_aws_pointer.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_legacy.golden new file mode 100644 index 000000000000..70c1207b1d2f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..70c1207b1d2f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_standard.golden new file mode 100644 index 000000000000..27b1ecc16f97 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..5e58035eabdd --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_legacy.golden new file mode 100644 index 000000000000..70c1207b1d2f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..70c1207b1d2f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_standard.golden new file mode 100644 index 000000000000..27b1ecc16f97 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..5e58035eabdd --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int32_zero_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int32" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int32", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int32Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_legacy.golden new file mode 100644 index 000000000000..a184e6b65358 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..a184e6b65358 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_standard.golden new file mode 100644 index 000000000000..085133610ec4 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_standard.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..2cc07d6880c2 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_null_value_tf_to_aws_pointer.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_legacy.golden new file mode 100644 index 000000000000..76dd740b2ec0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..76dd740b2ec0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_standard.golden new file mode 100644 index 000000000000..68aadd91a106 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..f8409ff61ae3 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_legacy.golden new file mode 100644 index 000000000000..76dd740b2ec0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..76dd740b2ec0 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_standard.golden new file mode 100644 index 000000000000..68aadd91a106 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..f8409ff61ae3 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/int64_zero_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*int64" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*int64", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_legacy.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_standard.golden new file mode 100644 index 000000000000..f14e8ea92ff1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6b26981e2588 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_empty_string_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_legacy.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_standard.golden new file mode 100644 index 000000000000..f14e8ea92ff1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6b26981e2588 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_normal_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_legacy.golden new file mode 100644 index 000000000000..a391dd10976b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..a391dd10976b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_standard.golden new file mode 100644 index 000000000000..4044014c0e70 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_standard.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..554622bde4f3 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_null_value_tf_to_aws_pointer.golden @@ -0,0 +1,83 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "trace", + "@message": "Expanding null value", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_legacy.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_standard.golden new file mode 100644 index 000000000000..f14e8ea92ff1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6b26981e2588 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_random_value_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_legacy.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_standard.golden new file mode 100644 index 000000000000..f14e8ea92ff1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6b26981e2588 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_special_characters_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_legacy.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_legacy.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_legacy.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_legacy_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_legacy_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..682b2e050960 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_legacy_tf_to_aws_pointer.golden @@ -0,0 +1,92 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "debug", + "@message": "Using legacy expander", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "debug", + "@message": "Using legacy flattener", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_standard.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_standard.golden new file mode 100644 index 000000000000..f14e8ea92ff1 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_standard.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_tf_to_aws_pointer.golden b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_tf_to_aws_pointer.golden new file mode 100644 index 000000000000..6b26981e2588 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/primitives/primitives_roundtrip/string_unicode_content_tf_to_aws_pointer.golden @@ -0,0 +1,74 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "Field1", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_byte_slice_source_and_single_string_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_byte_slice_source_and_single_string_target.golden new file mode 100644 index 000000000000..176b90340eac --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_byte_slice_source_and_single_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleByteSliceValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleByteSliceValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleByteSliceValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "[]uint8", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_empty_string_source_and_single_string_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_empty_string_source_and_single_string_target.golden new file mode 100644 index 000000000000..193d87f18987 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_empty_string_source_and_single_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_nil_pointer_string_source_and_single_string_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_nil_pointer_string_source_and_single_string_target.golden new file mode 100644 index 000000000000..e94aa793a460 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_nil_pointer_string_source_and_single_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_pointer_string_source_and_single_string_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_pointer_string_source_and_single_string_target.golden new file mode 100644 index 000000000000..e94aa793a460 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_pointer_string_source_and_single_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringPointer", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "*string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_source_and_single_int64_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_source_and_single_int64_target.golden new file mode 100644 index 000000000000..a0f11636057a --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_source_and_single_int64_target.golden @@ -0,0 +1,49 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleInt64Field" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value" + }, + { + "@level": "error", + "@message": "AutoFlex Flatten; incompatible types", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.Int64Value", + "from": 24, + "to": {} + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_source_and_single_string_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_source_and_single_string_target.golden new file mode 100644 index 000000000000..193d87f18987 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_source_and_single_string_target.golden @@ -0,0 +1,38 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.fieldname": "Field1", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfSingleStringField" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Field1", + "autoflex.source.type": "string", + "autoflex.target.path": "Field1", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_struct_pointer_source_and_empty_target.golden b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_struct_pointer_source_and_empty_target.golden new file mode 100644 index 000000000000..27ece498d91d --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/strings/flatten_string_special/single_string_struct_pointer_source_and_empty_target.golden @@ -0,0 +1,28 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.emptyStruct" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.emptyStruct" + }, + { + "@level": "debug", + "@message": "No corresponding field", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Field1", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsSingleStringValue", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.emptyStruct" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/empty_function_associations.golden b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/empty_function_associations.golden new file mode 100644 index 000000000000..5c51df3bbd46 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/empty_function_associations.golden @@ -0,0 +1,70 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionAssociations", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.fieldname": "FunctionAssociations", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations" + }, + { + "@level": "trace", + "@message": "Expanding NestedObjectCollection to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "source_type": "SetNestedObjectTypeOf[flex.FunctionAssociationTF]", + "target_type": "flex.FunctionAssociations" + }, + { + "@level": "trace", + "@message": "Converting nested objects to items", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "items_count": 0, + "items_type": "[]flex.FunctionAssociation" + }, + { + "@level": "trace", + "@message": "Successfully expanded NestedObjectCollection to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "items_count": 0 + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/single_function_association.golden b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/single_function_association.golden new file mode 100644 index 000000000000..914f24645a32 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/single_function_association.golden @@ -0,0 +1,119 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionAssociations", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.fieldname": "FunctionAssociations", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations" + }, + { + "@level": "trace", + "@message": "Expanding NestedObjectCollection to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "source_type": "SetNestedObjectTypeOf[flex.FunctionAssociationTF]", + "target_type": "flex.FunctionAssociations" + }, + { + "@level": "trace", + "@message": "Converting nested objects to items", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "items_count": 1, + "items_type": "[]flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "EventType", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.fieldname": "EventType", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "EventType", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "EventType", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionARN", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.fieldname": "FunctionARN", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionARN", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "FunctionARN", + "autoflex.target.type": "*string" + }, + { + "@level": "trace", + "@message": "Successfully expanded NestedObjectCollection to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "items_count": 1 + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/valid_function_associations.golden b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/valid_function_associations.golden new file mode 100644 index 000000000000..95d26e0d9b45 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper/valid_function_associations.golden @@ -0,0 +1,168 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionAssociations", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigTF", + "autoflex.target.fieldname": "FunctionAssociations", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DistributionConfigAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations" + }, + { + "@level": "trace", + "@message": "Expanding NestedObjectCollection to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "source_type": "SetNestedObjectTypeOf[flex.FunctionAssociationTF]", + "target_type": "flex.FunctionAssociations" + }, + { + "@level": "trace", + "@message": "Converting nested objects to items", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "items_count": 2, + "items_type": "[]flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "EventType", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.fieldname": "EventType", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "EventType", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "EventType", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionARN", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.fieldname": "FunctionARN", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionARN", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "FunctionARN", + "autoflex.target.type": "*string" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "EventType", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.fieldname": "EventType", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "EventType", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "EventType", + "autoflex.target.type": "string" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionARN", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF", + "autoflex.target.fieldname": "FunctionARN", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionARN", + "autoflex.source.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue", + "autoflex.target.path": "FunctionARN", + "autoflex.target.type": "*string" + }, + { + "@level": "trace", + "@message": "Successfully expanded NestedObjectCollection to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionAssociations", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetNestedObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]", + "autoflex.target.path": "FunctionAssociations", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociations", + "items_count": 2 + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper_direct/direct_xml_wrapper.golden b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper_direct/direct_xml_wrapper.golden new file mode 100644 index 000000000000..feac7ec774d3 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/expand_xmlwrapper_direct/direct_xml_wrapper.golden @@ -0,0 +1,51 @@ +[ + { + "@level": "info", + "@message": "Expanding", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectWrapperTF", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectWrapperAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectWrapperTF", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectWrapperAWS" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "Items", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectWrapperTF", + "autoflex.target.fieldname": "Items", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectWrapperAWS" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "Items", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "Items", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectXMLWrapper" + }, + { + "@level": "trace", + "@message": "Successfully expanded to XML wrapper", + "@module": "provider.autoflex", + "autoflex.source.path": "Items", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.SetValueOf[github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue]", + "autoflex.target.path": "Items", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.DirectXMLWrapper", + "items_count": 2, + "source_type": "basetypes.SetValue", + "target_type": "flex.DirectXMLWrapper", + "wrapper_field": "Items" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/complex_type__function_associations.golden b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/complex_type__function_associations.golden new file mode 100644 index 000000000000..bad79202da75 --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/complex_type__function_associations.golden @@ -0,0 +1,213 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten" + }, + { + "@level": "trace", + "@message": "Converting entire XML wrapper struct to collection field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "flex.awsFunctionAssociationsForFlatten", + "autoflex.target.fieldname": "FunctionAssociations", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Starting XML wrapper flatten", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "source_type": "flex.awsFunctionAssociationsForFlatten", + "target_type": "SetNestedObjectTypeOf[flex.FunctionAssociationTF]", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Found Items field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "items_is_nil": false, + "items_kind": "slice", + "items_len": 2, + "items_type": "[]flex.FunctionAssociation" + }, + { + "@level": "trace", + "@message": "Using target element type", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "element_type": "ObjectTypeOf[flex.FunctionAssociationTF]" + }, + { + "@level": "trace", + "@message": "Converting items to set elements", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "items_count": 2 + }, + { + "@level": "trace", + "@message": "Processing item", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "index": 0, + "item_kind": "struct", + "item_value": { + "EventType": "viewer-request", + "FunctionARN": "arn:aws:cloudfront::123456789012:function/example-function" + } + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "EventType", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation", + "autoflex.target.fieldname": "EventType", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "EventType", + "autoflex.source.type": "string", + "autoflex.target.path": "EventType", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionARN", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation", + "autoflex.target.fieldname": "FunctionARN", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionARN", + "autoflex.source.type": "*string", + "autoflex.target.path": "FunctionARN", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Processing item", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "index": 1, + "item_kind": "struct", + "item_value": { + "EventType": "viewer-response", + "FunctionARN": "arn:aws:cloudfront::123456789012:function/another-function" + } + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation", + "autoflex.target.path": "", + "autoflex.target.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/types.ObjectValueOf[github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF]" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "EventType", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation", + "autoflex.target.fieldname": "EventType", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "EventType", + "autoflex.source.type": "string", + "autoflex.target.path": "EventType", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Matched fields", + "@module": "provider.autoflex", + "autoflex.source.fieldname": "FunctionARN", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociation", + "autoflex.target.fieldname": "FunctionARN", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.FunctionAssociationTF" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "FunctionARN", + "autoflex.source.type": "*string", + "autoflex.target.path": "FunctionARN", + "autoflex.target.type": "github.com/hashicorp/terraform-plugin-framework/types/basetypes.StringValue" + }, + { + "@level": "trace", + "@message": "Creating set value", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsFunctionAssociationsForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfFunctionAssociationsModelForFlatten", + "element_count": 2, + "element_type": "ObjectTypeOf[flex.FunctionAssociationTF]" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/empty_slice_to_null_set.golden b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/empty_slice_to_null_set.golden new file mode 100644 index 000000000000..9a5732a1645f --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/empty_slice_to_null_set.golden @@ -0,0 +1,73 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten" + }, + { + "@level": "trace", + "@message": "Converting entire XML wrapper struct to collection field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "flex.awsStatusCodesForFlatten", + "autoflex.target.fieldname": "StatusCodes", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Starting XML wrapper flatten", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "source_type": "flex.awsStatusCodesForFlatten", + "target_type": "SetTypeOf[basetypes.Int64Value]", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Found Items field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "items_is_nil": true, + "items_kind": "slice", + "items_len": 0, + "items_type": "[]int32" + }, + { + "@level": "trace", + "@message": "Using target element type", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "element_type": "basetypes.Int64Type" + }, + { + "@level": "trace", + "@message": "Flattening XML wrapper with SetNull", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/int32_slice_to_set.golden b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/int32_slice_to_set.golden new file mode 100644 index 000000000000..2fdb6ce9c50c --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/int32_slice_to_set.golden @@ -0,0 +1,109 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten" + }, + { + "@level": "trace", + "@message": "Converting entire XML wrapper struct to collection field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "flex.awsStatusCodesForFlatten", + "autoflex.target.fieldname": "StatusCodes", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Starting XML wrapper flatten", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "source_type": "flex.awsStatusCodesForFlatten", + "target_type": "SetTypeOf[basetypes.Int64Value]", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Found Items field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "items_is_nil": false, + "items_kind": "slice", + "items_len": 2, + "items_type": "[]int32" + }, + { + "@level": "trace", + "@message": "Using target element type", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "element_type": "basetypes.Int64Type" + }, + { + "@level": "trace", + "@message": "Converting items to set elements", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "items_count": 2 + }, + { + "@level": "trace", + "@message": "Processing item", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "index": 0, + "item_kind": "int32", + "item_value": 400 + }, + { + "@level": "trace", + "@message": "Processing item", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "index": 1, + "item_kind": "int32", + "item_value": 404 + }, + { + "@level": "trace", + "@message": "Creating set value", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsStatusCodesForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfStatusCodesModelForFlatten", + "element_count": 2, + "element_type": "basetypes.Int64Type" + } +] \ No newline at end of file diff --git a/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/string_slice_to_list.golden b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/string_slice_to_list.golden new file mode 100644 index 000000000000..a1b7a404ca0b --- /dev/null +++ b/internal/framework/flex/testdata/autoflex/xml_compat/flatten_xmlwrapper/string_slice_to_list.golden @@ -0,0 +1,118 @@ +[ + { + "@level": "info", + "@message": "Flattening", + "@module": "provider.autoflex", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten" + }, + { + "@level": "info", + "@message": "Converting", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten" + }, + { + "@level": "trace", + "@message": "Converting entire XML wrapper struct to collection field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "flex.awsHeadersForFlatten", + "autoflex.target.fieldname": "Headers", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Starting XML wrapper flatten", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "source_type": "flex.awsHeadersForFlatten", + "target_type": "ListTypeOf[basetypes.StringValue]", + "wrapper_field": "items" + }, + { + "@level": "trace", + "@message": "Found Items field", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "items_is_nil": false, + "items_kind": "slice", + "items_len": 2, + "items_type": "[]string" + }, + { + "@level": "trace", + "@message": "Using target element type", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "element_type": "basetypes.StringType" + }, + { + "@level": "trace", + "@message": "Converting items to list elements", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "items_count": 2 + }, + { + "@level": "trace", + "@message": "Processing item", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "index": 0, + "item_kind": "string", + "item_value": "accept" + }, + { + "@level": "trace", + "@message": "Processing item", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "index": 1, + "item_kind": "string", + "item_value": "content-type" + }, + { + "@level": "trace", + "@message": "Creating list value", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten", + "element_count": 2, + "element_type": "basetypes.StringType" + }, + { + "@level": "trace", + "@message": "Setting target list value", + "@module": "provider.autoflex", + "autoflex.source.path": "", + "autoflex.source.type": "github.com/hashicorp/terraform-provider-aws/internal/framework/flex.awsHeadersForFlatten", + "autoflex.target.path": "", + "autoflex.target.type": "*github.com/hashicorp/terraform-provider-aws/internal/framework/flex.tfHeadersModelForFlatten" + } +] \ No newline at end of file diff --git a/internal/framework/list_resource_with_sdkv2_resource.go b/internal/framework/list_resource_with_sdkv2_resource.go new file mode 100644 index 000000000000..c3d84e4b100f --- /dev/null +++ b/internal/framework/list_resource_with_sdkv2_resource.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "unique" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + tfunique "github.com/hashicorp/terraform-provider-aws/internal/unique" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type WithRegionSpec interface { + SetRegionSpec(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion]) +} + +type ListResourceWithSDKv2Resource struct { + resourceSchema *schema.Resource + identitySpec inttypes.Identity + identitySchema *schema.ResourceIdentity + regionSpec unique.Handle[inttypes.ServicePackageResourceRegion] +} + +func (l *ListResourceWithSDKv2Resource) SetRegionSpec(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion]) { + l.regionSpec = regionSpec + + var isRegionOverrideEnabled bool + if !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled { + if _, ok := l.resourceSchema.SchemaMap()[names.AttrRegion]; !ok { + // TODO: Use standard shared `region` attribute + l.resourceSchema.SchemaMap()[names.AttrRegion] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + } + } + } +} + +func (l *ListResourceWithSDKv2Resource) SetIdentitySpec(identitySpec inttypes.Identity) { + out := make(map[string]*schema.Schema) + for _, v := range identitySpec.Attributes { + out[v.Name()] = &schema.Schema{ + Type: schema.TypeString, + } + if v.Required() { + out[v.Name()].Required = true + } else { + out[v.Name()].Optional = true + } + } + + identitySchema := schema.ResourceIdentity{ + SchemaFunc: func() map[string]*schema.Schema { + return out + }, + } + + l.identitySchema = &identitySchema + l.resourceSchema.Identity = &identitySchema + l.identitySpec = identitySpec +} + +func (l *ListResourceWithSDKv2Resource) RawV5Schemas(ctx context.Context, _ list.RawV5SchemaRequest, response *list.RawV5SchemaResponse) { + response.ProtoV5Schema = l.resourceSchema.ProtoSchema(ctx)() + response.ProtoV5IdentitySchema = l.resourceSchema.ProtoIdentitySchema(ctx)() +} + +func (l *ListResourceWithSDKv2Resource) SetResourceSchema(resource *schema.Resource) { + l.resourceSchema = resource +} + +func (l *ListResourceWithSDKv2Resource) ResourceData() *schema.ResourceData { + return l.resourceSchema.Data(&terraform.InstanceState{}) +} + +func (l *ListResourceWithSDKv2Resource) setResourceIdentity(ctx context.Context, client *conns.AWSClient, d *schema.ResourceData) error { + identity, err := d.Identity() + if err != nil { + return err + } + + for _, attr := range l.identitySpec.Attributes { + switch attr.Name() { + case names.AttrAccountID: + if err := identity.Set(attr.Name(), client.AccountID(ctx)); err != nil { + return err + } + + case names.AttrRegion: + if err := identity.Set(attr.Name(), client.Region(ctx)); err != nil { + return err + } + + default: + val, ok := getAttributeOk(d, attr.ResourceAttributeName()) + if !ok { + continue + } + if err := identity.Set(attr.Name(), val); err != nil { + return err + } + } + } + + return nil +} + +type resourceData interface { + Id() string + GetOk(string) (any, bool) +} + +func getAttributeOk(d resourceData, name string) (string, bool) { + if name == "id" { + return d.Id(), true + } + if v, ok := d.GetOk(name); !ok { + return "", false + } else { + return v.(string), true + } +} + +func (l *ListResourceWithSDKv2Resource) SetResult(ctx context.Context, awsClient *conns.AWSClient, includeResource bool, result *list.ListResult, rd *schema.ResourceData) { + err := l.setResourceIdentity(ctx, awsClient, rd) + if err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred setting resource identity. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + + tfTypeIdentity, err := rd.TfTypeIdentityState() + if err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred converting identity state. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + + result.Diagnostics.Append(result.Identity.Set(ctx, *tfTypeIdentity)...) + if result.Diagnostics.HasError() { + return + } + + if includeResource { + if !tfunique.IsHandleNil(l.regionSpec) && l.regionSpec.Value().IsOverrideEnabled { + if err := rd.Set(names.AttrRegion, awsClient.Region(ctx)); err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + } + + tfTypeResource, err := rd.TfTypeResourceState() + if err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred converting resource state. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + + result.Diagnostics.Append(result.Resource.Set(ctx, *tfTypeResource)...) + if result.Diagnostics.HasError() { + return + } + } +} diff --git a/internal/framework/list_resource_with_sdkv2_tags.go b/internal/framework/list_resource_with_sdkv2_tags.go new file mode 100644 index 000000000000..03f017a0c4b8 --- /dev/null +++ b/internal/framework/list_resource_with_sdkv2_tags.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "unique" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/provider/interceptors" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type ListResourceWithSDKv2Tags struct { + tagSpec interceptors.HTags +} + +func (r *ListResourceWithSDKv2Tags) SetTagsSpec(tags unique.Handle[inttypes.ServicePackageResourceTags]) { + r.tagSpec = interceptors.HTags(tags) +} + +func (r *ListResourceWithSDKv2Tags) SetTags(ctx context.Context, client *conns.AWSClient, d *schema.ResourceData) error { + sp, _, _, tagsInContext, ok := interceptors.InfoFromContext(ctx, client) + if !ok { + return nil + } + + // If the R handler didn't set tags, try and read them from the service API. + if tagsInContext.TagsOut.IsNone() { + // Some old resources may not have the required attribute set after Read: + // https://github.com/hashicorp/terraform-provider-aws/issues/31180 + if identifier := r.tagSpec.GetIdentifierSDKv2(ctx, d); identifier != "" { + if err := r.tagSpec.ListTags(ctx, sp, client, identifier); err != nil { + return err + } + } + } + + // Remove any provider configured ignore_tags and system tags from those returned from the service API. + tags := tagsInContext.TagsOut.UnwrapOrDefault().IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(client.IgnoreTagsConfig(ctx)) + + // The resource's configured tags can now include duplicate tags that have been configured on the provider. + if err := d.Set(names.AttrTags, tags.ResolveDuplicates(ctx, client.DefaultTagsConfig(ctx), client.IgnoreTagsConfig(ctx), d, names.AttrTags, nil).Map()); err != nil { + return err + } + + // Computed tags_all do. + if err := d.Set(names.AttrTagsAll, tags.Map()); err != nil { + return err + } + + // reset tags in context for next resource + tagsInContext.TagsOut = nil + + return nil +} diff --git a/internal/framework/planmodifiers/int32planmodifier/null_value.go b/internal/framework/planmodifiers/int32planmodifier/null_value.go deleted file mode 100644 index cbb7f1800dbd..000000000000 --- a/internal/framework/planmodifiers/int32planmodifier/null_value.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package int32planmodifier - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/types" -) - -// LegacyValue returns a plan modifier that prevents `known after apply` during creation plans for -// attributes that must be `Computed,Optional` for legacy value reasons. -func NullValue() planmodifier.Int32 { - return nullValueModifier{} -} - -type nullValueModifier struct{} - -func (m nullValueModifier) Description(_ context.Context) string { - return "" -} - -func (m nullValueModifier) MarkdownDescription(ctx context.Context) string { - return m.Description(ctx) -} - -func (m nullValueModifier) PlanModifyInt32(ctx context.Context, req planmodifier.Int32Request, resp *planmodifier.Int32Response) { - // Use value from Config if set - if !req.ConfigValue.IsNull() { - return - } - - // Exit if another planmodifier has set the value - if !req.PlanValue.IsUnknown() { - return - } - - // Do nothing if there is an unknown configuration value, otherwise interpolation gets messed up. - if req.ConfigValue.IsUnknown() { - return - } - - if req.StateValue.IsNull() { - resp.PlanValue = types.Int32Null() - return - } -} diff --git a/internal/framework/planmodifiers/stringplanmodifier/requires_replace_wo.go b/internal/framework/planmodifiers/stringplanmodifier/requires_replace_wo.go new file mode 100644 index 000000000000..2e39af574ae8 --- /dev/null +++ b/internal/framework/planmodifiers/stringplanmodifier/requires_replace_wo.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package stringplanmodifier + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/framework/privatestate" +) + +// RequiresReplaceWO returns a plan modifier that forces resource replacement +// if a write-only value changes. +func RequiresReplaceWO(privateStateKey string) planmodifier.String { + return requiresReplaceWO{ + privateStateKey: privateStateKey, + } +} + +type requiresReplaceWO struct { + privateStateKey string +} + +func (m requiresReplaceWO) Description(ctx context.Context) string { + return m.MarkdownDescription(ctx) +} + +func (m requiresReplaceWO) MarkdownDescription(context.Context) string { + return "If the value of this write-only attribute changes, Terraform will destroy and recreate the resource." +} + +func (m requiresReplaceWO) PlanModifyString(ctx context.Context, request planmodifier.StringRequest, response *planmodifier.StringResponse) { + newValue := request.ConfigValue + newValueExists := !newValue.IsNull() + + woStore := privatestate.NewWriteOnlyValueStore(request.Private, m.privateStateKey) + oldValueExists, diags := woStore.HasValue(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + if !newValueExists { + if oldValueExists { + response.RequiresReplace = true + } + return + } + + if !oldValueExists { + response.RequiresReplace = true + return + } + + equal, diags := woStore.EqualValue(ctx, newValue) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + if !equal { + response.RequiresReplace = true + } +} diff --git a/internal/framework/privatestate/private_state.go b/internal/framework/privatestate/private_state.go new file mode 100644 index 000000000000..584c9509680f --- /dev/null +++ b/internal/framework/privatestate/private_state.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package privatestate + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +// PrivateState defines an interface for managing provider-defined resource private state data. +type PrivateState interface { + // GetKey returns the private state data associated with the given key. + GetKey(context.Context, string) ([]byte, diag.Diagnostics) + // SetKey sets the private state data at the given key. + SetKey(context.Context, string, []byte) diag.Diagnostics +} diff --git a/internal/framework/privatestate/write_only.go b/internal/framework/privatestate/write_only.go new file mode 100644 index 000000000000..9b7503f5611f --- /dev/null +++ b/internal/framework/privatestate/write_only.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package privatestate + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" +) + +func NewWriteOnlyValueStore(private PrivateState, key string) *WriteOnlyValueStore { + return &WriteOnlyValueStore{ + key: key, + private: private, + } +} + +type WriteOnlyValueStore struct { + key string + private PrivateState +} + +func (w *WriteOnlyValueStore) EqualValue(ctx context.Context, value types.String) (bool, diag.Diagnostics) { + bytes, diags := w.private.GetKey(ctx, w.key) + if diags.HasError() { + return false, diags + } + + var s string + if err := tfjson.DecodeFromBytes(bytes, &s); err != nil { + diags.AddError("decoding private state", err.Error()) + return false, diags + } + + return s == sha256Hash(value.ValueString()), diags +} + +func (w *WriteOnlyValueStore) HasValue(ctx context.Context) (bool, diag.Diagnostics) { + bytes, diags := w.private.GetKey(ctx, w.key) + return len(bytes) > 0, diags +} + +func (w *WriteOnlyValueStore) SetValue(ctx context.Context, val types.String) diag.Diagnostics { + if val.IsNull() { + return w.private.SetKey(ctx, w.key, []byte("")) + } + + return w.private.SetKey(ctx, w.key, []byte(strconv.Quote(sha256Hash(val.ValueString())))) +} + +func sha256Hash(data string) string { + hash := sha256.New() + hash.Write([]byte(data)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/internal/framework/privatestate/write_only_test.go b/internal/framework/privatestate/write_only_test.go new file mode 100644 index 000000000000..ea44d17b8672 --- /dev/null +++ b/internal/framework/privatestate/write_only_test.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package privatestate_test + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework/privatestate" +) + +func TestWriteOnlyValueStore_HasValue(t *testing.T) { + t.Parallel() + + ctx := t.Context() + store1 := privatestate.NewWriteOnlyValueStore(&privateState{}, "key") + store2 := privatestate.NewWriteOnlyValueStore(&privateState{}, "key") + store2.SetValue(ctx, types.StringValue("value1")) + + testCases := []struct { + testName string + store *privatestate.WriteOnlyValueStore + wantValue bool + }{ + { + testName: "empty state", + store: store1, + }, + { + testName: "has value", + store: store2, + wantValue: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + gotValue, diags := testCase.store.HasValue(ctx) + if diags.HasError() { + t.Fatal("unexpected error") + } + if got, want := gotValue, testCase.wantValue; !cmp.Equal(got, want) { + t.Errorf("got %t, want %t", got, want) + } + }) + } +} + +func TestWriteOnlyValueStore_EqualValue(t *testing.T) { + t.Parallel() + + ctx := t.Context() + store1 := privatestate.NewWriteOnlyValueStore(&privateState{}, "key") + store1.SetValue(ctx, types.StringValue("value1")) + store2 := privatestate.NewWriteOnlyValueStore(&privateState{}, "key") + store2.SetValue(ctx, types.StringValue("value2")) + + testCases := []struct { + testName string + store *privatestate.WriteOnlyValueStore + wantEqual bool + }{ + { + testName: "equal", + store: store1, + wantEqual: true, + }, + { + testName: "not equal", + store: store2, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + gotEqual, diags := testCase.store.EqualValue(ctx, types.StringValue("value1")) + if diags.HasError() { + t.Fatal("unexpected error") + } + if got, want := gotEqual, testCase.wantEqual; !cmp.Equal(got, want) { + t.Errorf("got %t, want %t", got, want) + } + }) + } +} + +type privateState struct { + data map[string][]byte +} + +func (p *privateState) GetKey(_ context.Context, key string) ([]byte, diag.Diagnostics) { + var diags diag.Diagnostics + bytes := p.data[key] + return bytes, diags +} + +func (p *privateState) SetKey(_ context.Context, key string, value []byte) diag.Diagnostics { + var diags diag.Diagnostics + + if p.data == nil { + p.data = make(map[string][]byte) + } + + p.data[key] = value + return diags +} diff --git a/internal/framework/types/list_nested_objectof.go b/internal/framework/types/list_nested_objectof.go index 9702bd4d9800..75a1b2c7f481 100644 --- a/internal/framework/types/list_nested_objectof.go +++ b/internal/framework/types/list_nested_objectof.go @@ -119,7 +119,7 @@ func (t listNestedObjectTypeOf[T]) NewObjectSlice(ctx context.Context, len, cap func (t listNestedObjectTypeOf[T]) NullValue(ctx context.Context) (attr.Value, diag.Diagnostics) { var diags diag.Diagnostics - return NewListNestedObjectValueOfNull[T](ctx, WithSemanticEqualityFunc(t.semanticEqualityFunc)), diags + return NewListNestedObjectValueOfNull(ctx, WithSemanticEqualityFunc(t.semanticEqualityFunc)), diags } func (t listNestedObjectTypeOf[T]) ValueFromObjectPtr(ctx context.Context, ptr any) (attr.Value, diag.Diagnostics) { @@ -275,7 +275,7 @@ func NewListNestedObjectValueOfPtrMust[T any](ctx context.Context, t *T, f ...Ne } func NewListNestedObjectValueOfSlice[T any](ctx context.Context, ts []*T, f semanticEqualityFunc[T]) (ListNestedObjectValueOf[T], diag.Diagnostics) { - return newListNestedObjectValueOf[T](ctx, ts, f) + return newListNestedObjectValueOf(ctx, ts, f) } func NewListNestedObjectValueOfSliceMust[T any](ctx context.Context, ts []*T, f ...NestedObjectOfOption[T]) ListNestedObjectValueOf[T] { @@ -285,7 +285,7 @@ func NewListNestedObjectValueOfSliceMust[T any](ctx context.Context, ts []*T, f func NewListNestedObjectValueOfValueSlice[T any](ctx context.Context, ts []T, f ...NestedObjectOfOption[T]) (ListNestedObjectValueOf[T], diag.Diagnostics) { opts := newNestedObjectOfOptions(f...) - return newListNestedObjectValueOf[T](ctx, ts, opts.SemanticEqualityFunc) + return newListNestedObjectValueOf(ctx, ts, opts.SemanticEqualityFunc) } func NewListNestedObjectValueOfValueSliceMust[T any](ctx context.Context, ts []T, f ...NestedObjectOfOption[T]) ListNestedObjectValueOf[T] { diff --git a/internal/framework/types/list_nested_objectof_test.go b/internal/framework/types/list_nested_objectof_test.go index bfc20e143f62..1a1216557055 100644 --- a/internal/framework/types/list_nested_objectof_test.go +++ b/internal/framework/types/list_nested_objectof_test.go @@ -91,11 +91,11 @@ func TestListNestedObjectTypeOfValueFromTerraform(t *testing.T) { }, "valid value": { tfVal: objectAListValue, - wantVal: fwtypes.NewListNestedObjectValueOfPtrMust[ObjectA](ctx, &objectA), + wantVal: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &objectA), }, "invalid Terraform value": { tfVal: objectBListValue, - wantVal: fwtypes.NewListNestedObjectValueOfPtrMust[ObjectA](ctx, &objectA), + wantVal: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &objectA), wantErr: true, }, } diff --git a/internal/framework/types/listof.go b/internal/framework/types/listof.go index 1722d7c7988f..f711521de4ce 100644 --- a/internal/framework/types/listof.go +++ b/internal/framework/types/listof.go @@ -25,16 +25,19 @@ var ( ) var ( - // ListOfStringType is a custom type used for defining a List of strings. - ListOfStringType = listTypeOf[basetypes.StringValue]{basetypes.ListType{ElemType: basetypes.StringType{}}, nil} - // ListOfARNType is a custom type used for defining a List of ARNs. ListOfARNType = listTypeOf[ARN]{basetypes.ListType{ElemType: ARNType}, nil} + + // ListOfInt64Type is a custom type used for defining a List of int64s. + ListOfInt64Type = listTypeOf[basetypes.Int64Value]{basetypes.ListType{ElemType: basetypes.Int64Type{}}, nil} + + // ListOfStringType is a custom type used for defining a List of strings. + ListOfStringType = listTypeOf[basetypes.StringValue]{basetypes.ListType{ElemType: basetypes.StringType{}}, nil} ) type validateAttributeFunc[T attr.Value] func(context.Context, path.Path, []attr.Value) diag.Diagnostics -// TODO Replace with Go 1.24 generic type alias when available. +// ListOfStringEnumType is a custom type used for defining a List of string enums. func ListOfStringEnumType[T enum.Valueser[T]]() listTypeOf[StringEnum[T]] { return listTypeOf[StringEnum[T]]{basetypes.ListType{ElemType: StringEnumType[T]()}, validateStringEnumSlice[T]} } @@ -115,8 +118,9 @@ type ListValueOf[T attr.Value] struct { } type ( - ListOfString = ListValueOf[basetypes.StringValue] ListOfARN = ListValueOf[ARN] + ListOfInt64 = ListValueOf[basetypes.Int64Value] + ListOfString = ListValueOf[basetypes.StringValue] ListOfStringEnum[T enum.Valueser[T]] = ListValueOf[StringEnum[T]] ) diff --git a/internal/framework/types/mapof.go b/internal/framework/types/mapof.go index 693727088c98..df571c558dd0 100644 --- a/internal/framework/types/mapof.go +++ b/internal/framework/types/mapof.go @@ -20,7 +20,10 @@ var ( ) var ( - // MapOfStringType is a custom type used for defining a Map of strings. + // MapOfMapOfStringType is a custom type used for defining a map[string]map[string]string. + MapOfMapOfStringType = mapTypeOf[MapOfString]{basetypes.MapType{ElemType: MapOfStringType}} + + // MapOfStringType is a custom type used for defining a map[string]string. MapOfStringType = mapTypeOf[basetypes.StringValue]{basetypes.MapType{ElemType: basetypes.StringType{}}} ) @@ -101,7 +104,8 @@ type MapValueOf[T attr.Value] struct { } type ( - MapOfString = MapValueOf[basetypes.StringValue] + MapOfMapOfString = MapValueOf[MapOfString] + MapOfString = MapValueOf[basetypes.StringValue] ) func (v MapValueOf[T]) Equal(o attr.Value) bool { diff --git a/internal/framework/types/objectof.go b/internal/framework/types/objectof.go index d72a161b3de9..61a4a9544c08 100644 --- a/internal/framework/types/objectof.go +++ b/internal/framework/types/objectof.go @@ -250,5 +250,5 @@ func NewObjectValueOf[T any](ctx context.Context, t *T) (ObjectValueOf[T], diag. } func NewObjectValueOfMust[T any](ctx context.Context, t *T) ObjectValueOf[T] { - return fwdiag.Must(NewObjectValueOf[T](ctx, t)) + return fwdiag.Must(NewObjectValueOf(ctx, t)) } diff --git a/internal/framework/types/objectof_test.go b/internal/framework/types/objectof_test.go index d210e86504a4..f405e45bf60b 100644 --- a/internal/framework/types/objectof_test.go +++ b/internal/framework/types/objectof_test.go @@ -94,11 +94,11 @@ func TestObjectTypeOfValueFromTerraform(t *testing.T) { }, "valid value": { tfVal: objectAValue, - wantVal: fwtypes.NewObjectValueOfMust[ObjectA](ctx, &objectA), + wantVal: fwtypes.NewObjectValueOfMust(ctx, &objectA), }, "invalid Terraform value": { tfVal: objectBValue, - wantVal: fwtypes.NewObjectValueOfMust[ObjectA](ctx, &objectA), + wantVal: fwtypes.NewObjectValueOfMust(ctx, &objectA), wantErr: true, }, } diff --git a/internal/framework/types/set_nested_objectof.go b/internal/framework/types/set_nested_objectof.go index 22f939789273..0bd4198724a0 100644 --- a/internal/framework/types/set_nested_objectof.go +++ b/internal/framework/types/set_nested_objectof.go @@ -117,7 +117,7 @@ func (t setNestedObjectTypeOf[T]) NewObjectSlice(ctx context.Context, len, cap i func (t setNestedObjectTypeOf[T]) NullValue(ctx context.Context) (attr.Value, diag.Diagnostics) { var diags diag.Diagnostics - return NewSetNestedObjectValueOfNull[T](ctx, WithSemanticEqualityFunc(t.semanticEqualityFunc)), diags + return NewSetNestedObjectValueOfNull(ctx, WithSemanticEqualityFunc(t.semanticEqualityFunc)), diags } func (t setNestedObjectTypeOf[T]) ValueFromObjectPtr(ctx context.Context, ptr any) (attr.Value, diag.Diagnostics) { @@ -223,7 +223,7 @@ func NewSetNestedObjectValueOfPtrMust[T any](ctx context.Context, t *T, options } func NewSetNestedObjectValueOfSlice[T any](ctx context.Context, ts []*T, f semanticEqualityFunc[T]) (SetNestedObjectValueOf[T], diag.Diagnostics) { - return newSetNestedObjectValueOf[T](ctx, ts, f) + return newSetNestedObjectValueOf(ctx, ts, f) } func NewSetNestedObjectValueOfSliceMust[T any](ctx context.Context, ts []*T, options ...NestedObjectOfOption[T]) SetNestedObjectValueOf[T] { @@ -233,7 +233,7 @@ func NewSetNestedObjectValueOfSliceMust[T any](ctx context.Context, ts []*T, opt func NewSetNestedObjectValueOfValueSlice[T any](ctx context.Context, ts []T, options ...NestedObjectOfOption[T]) (SetNestedObjectValueOf[T], diag.Diagnostics) { opts := newNestedObjectOfOptions(options...) - return newSetNestedObjectValueOf[T](ctx, ts, opts.SemanticEqualityFunc) + return newSetNestedObjectValueOf(ctx, ts, opts.SemanticEqualityFunc) } func NewSetNestedObjectValueOfValueSliceMust[T any](ctx context.Context, ts []T, options ...NestedObjectOfOption[T]) SetNestedObjectValueOf[T] { diff --git a/internal/framework/types/set_nested_objectof_test.go b/internal/framework/types/set_nested_objectof_test.go index 9ad63464ce08..aa0d318f240c 100644 --- a/internal/framework/types/set_nested_objectof_test.go +++ b/internal/framework/types/set_nested_objectof_test.go @@ -91,11 +91,11 @@ func TestSetNestedObjectTypeOfValueFromTerraform(t *testing.T) { }, "valid value": { tfVal: objectASetValue, - wantVal: fwtypes.NewSetNestedObjectValueOfPtrMust[ObjectA](ctx, &objectA), + wantVal: fwtypes.NewSetNestedObjectValueOfPtrMust(ctx, &objectA), }, "invalid Terraform value": { tfVal: objectBSetValue, - wantVal: fwtypes.NewSetNestedObjectValueOfPtrMust[ObjectA](ctx, &objectA), + wantVal: fwtypes.NewSetNestedObjectValueOfPtrMust(ctx, &objectA), wantErr: true, }, } diff --git a/internal/framework/types/smithy_json.go b/internal/framework/types/smithy_json.go index caa104e951e4..71410cd1484a 100644 --- a/internal/framework/types/smithy_json.go +++ b/internal/framework/types/smithy_json.go @@ -5,7 +5,6 @@ package types import ( "context" - "encoding/json" "fmt" "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" @@ -14,19 +13,19 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-go/tftypes" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" ) var ( - _ basetypes.StringTypable = (*SmithyJSONType[smithyjson.JSONStringer])(nil) + _ basetypes.StringTypable = (*SmithyJSONType[tfsmithy.JSONStringer])(nil) ) -type SmithyJSONType[T smithyjson.JSONStringer] struct { - basetypes.StringType +type SmithyJSONType[T tfsmithy.JSONStringer] struct { + jsontypes.NormalizedType f func(any) T } -func NewSmithyJSONType[T smithyjson.JSONStringer](_ context.Context, f func(any) T) SmithyJSONType[T] { +func NewSmithyJSONType[T tfsmithy.JSONStringer](_ context.Context, f func(any) T) SmithyJSONType[T] { return SmithyJSONType[T]{ f: f, } @@ -45,29 +44,25 @@ func (t SmithyJSONType[T]) ValueType(context.Context) attr.Value { // Equal returns true if the given type is equivalent. func (t SmithyJSONType[T]) Equal(o attr.Type) bool { other, ok := o.(SmithyJSONType[T]) - if !ok { return false } - return t.StringType.Equal(other.StringType) + return t.NormalizedType.Equal(other.NormalizedType) } func (t SmithyJSONType[T]) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { attrValue, err := t.StringType.ValueFromTerraform(ctx, in) - if err != nil { return nil, err } stringValue, ok := attrValue.(basetypes.StringValue) - if !ok { return nil, fmt.Errorf("unexpected value type of %T", attrValue) } stringValuable, diags := t.ValueFromString(ctx, stringValue) - if diags.HasError() { return nil, fmt.Errorf("unexpected error converting StringValue to StringValuable: %v", diags) } @@ -79,53 +74,50 @@ func (t SmithyJSONType[T]) ValueFromString(ctx context.Context, in basetypes.Str var diags diag.Diagnostics if in.IsNull() { - return SmithyJSONNull[T](), diags + return NewSmithyJSONNull[T](), diags } if in.IsUnknown() { - return SmithyJSONUnknown[T](), diags - } - - var data any - if err := json.Unmarshal([]byte(in.ValueString()), &data); err != nil { - return SmithyJSONUnknown[T](), diags + return NewSmithyJSONUnknown[T](), diags } - return SmithyJSONValue[T](in.ValueString(), t.f), diags + return NewSmithyJSONValue(in.ValueString(), t.f), diags } var ( - _ basetypes.StringValuable = (*SmithyJSON[smithyjson.JSONStringer])(nil) - _ basetypes.StringValuableWithSemanticEquals = (*SmithyJSON[smithyjson.JSONStringer])(nil) - _ xattr.ValidateableAttribute = (*SmithyJSON[smithyjson.JSONStringer])(nil) + _ basetypes.StringValuable = (*SmithyJSON[tfsmithy.JSONStringer])(nil) + _ basetypes.StringValuableWithSemanticEquals = (*SmithyJSON[tfsmithy.JSONStringer])(nil) + _ xattr.ValidateableAttribute = (*SmithyJSON[tfsmithy.JSONStringer])(nil) + _ SmithyDocumentValue = (*SmithyJSON[tfsmithy.JSONStringer])(nil) ) -type SmithyJSON[T smithyjson.JSONStringer] struct { - basetypes.StringValue +type SmithyJSON[T tfsmithy.JSONStringer] struct { + jsontypes.Normalized f func(any) T } func (v SmithyJSON[T]) Equal(o attr.Value) bool { other, ok := o.(SmithyJSON[T]) - if !ok { return false } - return v.StringValue.Equal(other.StringValue) + return v.Normalized.Equal(other.Normalized) +} + +func (v SmithyJSON[T]) ToSmithyObjectDocument(ctx context.Context) (any, diag.Diagnostics) { + return v.ToSmithyDocument(ctx) } -func (v SmithyJSON[T]) ValueInterface() (T, diag.Diagnostics) { +func (v SmithyJSON[T]) ToSmithyDocument(context.Context) (T, diag.Diagnostics) { var diags diag.Diagnostics var zero T - if v.IsNull() || v.IsUnknown() { + if v.IsNull() || v.IsUnknown() || v.f == nil { return zero, diags } - var data any - err := json.Unmarshal([]byte(v.ValueString()), &data) - + t, err := tfsmithy.DocumentFromJSONString(v.ValueString(), v.f) if err != nil { diags.AddError( "JSON Unmarshal Error", @@ -136,26 +128,18 @@ func (v SmithyJSON[T]) ValueInterface() (T, diag.Diagnostics) { return zero, diags } - return v.f(data), diags + return t, diags } func (v SmithyJSON[T]) Type(context.Context) attr.Type { - return SmithyJSONType[T]{} -} - -func (v SmithyJSON[T]) ValidateAttribute(ctx context.Context, req xattr.ValidateAttributeRequest, resp *xattr.ValidateAttributeResponse) { - if v.IsNull() || v.IsUnknown() { - return + return SmithyJSONType[T]{ + f: v.f, } - - jsontypes.NewNormalizedValue(v.ValueString()).ValidateAttribute(ctx, req, resp) } func (v SmithyJSON[T]) StringSemanticEquals(ctx context.Context, newValuable basetypes.StringValuable) (bool, diag.Diagnostics) { var diags diag.Diagnostics - oldString := jsontypes.NewNormalizedValue(v.ValueString()) - newValue, ok := newValuable.(SmithyJSON[T]) if !ok { diags.AddError( @@ -168,32 +152,34 @@ func (v SmithyJSON[T]) StringSemanticEquals(ctx context.Context, newValuable bas return false, diags } - newString := jsontypes.NewNormalizedValue(newValue.ValueString()) - - result, err := oldString.StringSemanticEquals(ctx, newString) - diags.Append(err...) - - if diags.HasError() { - return false, diags - } - return result, diags + return v.Normalized.StringSemanticEquals(ctx, newValue.Normalized) } -func SmithyJSONValue[T smithyjson.JSONStringer](value string, f func(any) T) SmithyJSON[T] { +func NewSmithyJSONValue[T tfsmithy.JSONStringer](value string, f func(any) T) SmithyJSON[T] { return SmithyJSON[T]{ - StringValue: basetypes.NewStringValue(value), - f: f, + Normalized: jsontypes.NewNormalizedValue(value), + f: f, } } -func SmithyJSONNull[T smithyjson.JSONStringer]() SmithyJSON[T] { + +func NewSmithyJSONNull[T tfsmithy.JSONStringer]() SmithyJSON[T] { return SmithyJSON[T]{ - StringValue: basetypes.NewStringNull(), + Normalized: jsontypes.NewNormalizedNull(), } } -func SmithyJSONUnknown[T smithyjson.JSONStringer]() SmithyJSON[T] { +func NewSmithyJSONUnknown[T tfsmithy.JSONStringer]() SmithyJSON[T] { return SmithyJSON[T]{ - StringValue: basetypes.NewStringUnknown(), + Normalized: jsontypes.NewNormalizedUnknown(), } } + +// SmithyDocumentValue extends the Value interface for values that represent Smithy documents. +// It isn't generic on the Go interface type as it's referenced within AutoFlEx. +type SmithyDocumentValue interface { + attr.Value + + // ToSmithyObjectDocument returns the value as a Smithy document. + ToSmithyObjectDocument(context.Context) (any, diag.Diagnostics) +} diff --git a/internal/framework/types/smithy_json_test.go b/internal/framework/types/smithy_json_test.go index ca45f243d932..28564cd87980 100644 --- a/internal/framework/types/smithy_json_test.go +++ b/internal/framework/types/smithy_json_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr/xattr" "github.com/hashicorp/terraform-plugin-go/tftypes" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - smithyjson "github.com/hashicorp/terraform-provider-aws/internal/json" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" ) func TestSmithyJSONTypeValueFromTerraform(t *testing.T) { @@ -25,19 +25,15 @@ func TestSmithyJSONTypeValueFromTerraform(t *testing.T) { }{ "null value": { val: tftypes.NewValue(tftypes.String, nil), - expected: fwtypes.SmithyJSONNull[smithyjson.JSONStringer](), + expected: fwtypes.NewSmithyJSONNull[tfsmithy.JSONStringer](), }, "unknown value": { val: tftypes.NewValue(tftypes.String, tftypes.UnknownValue), - expected: fwtypes.SmithyJSONUnknown[smithyjson.JSONStringer](), + expected: fwtypes.NewSmithyJSONUnknown[tfsmithy.JSONStringer](), }, "valid SmithyJSON": { val: tftypes.NewValue(tftypes.String, `{"test": "value"}`), - expected: fwtypes.SmithyJSONValue[smithyjson.JSONStringer](`{"test": "value"}`, nil), // lintignore:AWSAT003,AWSAT005 - }, - "invalid SmithyJSON": { - val: tftypes.NewValue(tftypes.String, "not ok"), - expected: fwtypes.SmithyJSONUnknown[smithyjson.JSONStringer](), + expected: fwtypes.NewSmithyJSONValue[tfsmithy.JSONStringer](`{"test": "value"}`, nil), // lintignore:AWSAT003,AWSAT005 }, } @@ -46,14 +42,14 @@ func TestSmithyJSONTypeValueFromTerraform(t *testing.T) { t.Parallel() ctx := context.Background() - val, err := fwtypes.SmithyJSONType[smithyjson.JSONStringer]{}.ValueFromTerraform(ctx, test.val) + val, err := fwtypes.SmithyJSONType[tfsmithy.JSONStringer]{}.ValueFromTerraform(ctx, test.val) if err != nil { t.Fatalf("got unexpected error: %s", err) } - if diff := cmp.Diff(val, test.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if got, want := val, test.expected; !got.Equal(want) { + t.Errorf("got %T %v, want %T %v", got, got, want, want) } }) } @@ -63,20 +59,20 @@ func TestSmithyJSONValidateAttribute(t *testing.T) { t.Parallel() tests := map[string]struct { - val fwtypes.SmithyJSON[smithyjson.JSONStringer] + val fwtypes.SmithyJSON[tfsmithy.JSONStringer] expectError bool }{ "null value": { - val: fwtypes.SmithyJSONNull[smithyjson.JSONStringer](), + val: fwtypes.NewSmithyJSONNull[tfsmithy.JSONStringer](), }, "unknown value": { - val: fwtypes.SmithyJSONUnknown[smithyjson.JSONStringer](), + val: fwtypes.NewSmithyJSONUnknown[tfsmithy.JSONStringer](), }, "valid SmithyJSON": { // lintignore:AWSAT003,AWSAT005 - val: fwtypes.SmithyJSONValue[smithyjson.JSONStringer](`{"test": "value"}`, nil), // lintignore:AWSAT003,AWSAT005 + val: fwtypes.NewSmithyJSONValue[tfsmithy.JSONStringer](`{"test": "value"}`, nil), // lintignore:AWSAT003,AWSAT005 }, "invalid SmithyJSON": { - val: fwtypes.SmithyJSONValue[smithyjson.JSONStringer]("not ok", nil), + val: fwtypes.NewSmithyJSONValue[tfsmithy.JSONStringer]("not ok", nil), expectError: true, }, } @@ -102,7 +98,7 @@ type testJSONDocument struct { Value any } -func newTestJSONDocument(v any) smithyjson.JSONStringer { +func newTestJSONDocument(v any) tfsmithy.JSONStringer { return &testJSONDocument{Value: v} } @@ -122,18 +118,18 @@ func TestSmithyJSONValueInterface(t *testing.T) { t.Parallel() tests := map[string]struct { - val fwtypes.SmithyJSON[smithyjson.JSONStringer] - expected smithyjson.JSONStringer + val fwtypes.SmithyJSON[tfsmithy.JSONStringer] + expected tfsmithy.JSONStringer expectError bool }{ "null value": { - val: fwtypes.SmithyJSONNull[smithyjson.JSONStringer](), + val: fwtypes.NewSmithyJSONNull[tfsmithy.JSONStringer](), }, "unknown value": { - val: fwtypes.SmithyJSONUnknown[smithyjson.JSONStringer](), + val: fwtypes.NewSmithyJSONUnknown[tfsmithy.JSONStringer](), }, "valid SmithyJSON": { // lintignore:AWSAT003,AWSAT005 - val: fwtypes.SmithyJSONValue[smithyjson.JSONStringer](`{"test": "value"}`, newTestJSONDocument), // lintignore:AWSAT003,AWSAT005 + val: fwtypes.NewSmithyJSONValue(`{"test": "value"}`, newTestJSONDocument), // lintignore:AWSAT003,AWSAT005 expected: &testJSONDocument{ Value: map[string]any{ "test": "value", @@ -141,13 +137,13 @@ func TestSmithyJSONValueInterface(t *testing.T) { }, }, "valid SmithyJSON slice": { // lintignore:AWSAT003,AWSAT005 - val: fwtypes.SmithyJSONValue[smithyjson.JSONStringer](`["value1","value"]`, newTestJSONDocument), // lintignore:AWSAT003,AWSAT005 + val: fwtypes.NewSmithyJSONValue(`["value1","value"]`, newTestJSONDocument), // lintignore:AWSAT003,AWSAT005 expected: &testJSONDocument{ Value: []any{"value1", "value"}, }, }, "invalid SmithyJSON": { - val: fwtypes.SmithyJSONValue[smithyjson.JSONStringer]("not ok", newTestJSONDocument), // lintignore:AWSAT003,AWSAT005 + val: fwtypes.NewSmithyJSONValue("not ok", newTestJSONDocument), // lintignore:AWSAT003,AWSAT005 expectError: true, }, } @@ -156,7 +152,7 @@ func TestSmithyJSONValueInterface(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - s, err := test.val.ValueInterface() + s, err := test.val.ToSmithyDocument(t.Context()) gotErr := err.HasError() if gotErr != test.expectError { diff --git a/internal/framework/with_identity.go b/internal/framework/with_identity.go new file mode 100644 index 000000000000..a3b06bebd2bd --- /dev/null +++ b/internal/framework/with_identity.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" +) + +type Identityer interface { + SetIdentitySpec(identity inttypes.Identity) +} + +var _ Identityer = &WithIdentity{} + +type WithIdentity struct { + identity inttypes.Identity +} + +func (w *WithIdentity) SetIdentitySpec(identity inttypes.Identity) { + w.identity = identity +} + +func (w WithIdentity) IdentitySpec() inttypes.Identity { + return w.identity +} diff --git a/internal/framework/with_import_by_identity.go b/internal/framework/with_import_by_identity.go index 59eebd257faa..987e2d2bd6f3 100644 --- a/internal/framework/with_import_by_identity.go +++ b/internal/framework/with_import_by_identity.go @@ -13,18 +13,21 @@ import ( // TODO: Needs a better name type ImportByIdentityer interface { - SetIdentitySpec(identity inttypes.Identity, importSpec inttypes.FrameworkImport) + Identityer + SetImportSpec(importSpec inttypes.FrameworkImport) } var _ ImportByIdentityer = &WithImportByIdentity{} +// WithImportByIdentity is intended to be embedded in resources which support resource identity. +// +// See: https://developer.hashicorp.com/terraform/plugin/framework/resources/identity#importing-by-identity type WithImportByIdentity struct { - identity inttypes.Identity + WithIdentity importSpec inttypes.FrameworkImport } -func (w *WithImportByIdentity) SetIdentitySpec(identity inttypes.Identity, importSpec inttypes.FrameworkImport) { - w.identity = identity +func (w *WithImportByIdentity) SetImportSpec(importSpec inttypes.FrameworkImport) { w.importSpec = importSpec } @@ -52,10 +55,6 @@ func (w WithImportByIdentity) ImportState(ctx context.Context, request resource. } } -func (w WithImportByIdentity) IdentitySpec() inttypes.Identity { - return w.identity -} - func (w WithImportByIdentity) ImportSpec() inttypes.FrameworkImport { return w.importSpec } diff --git a/internal/framework/with_list.go b/internal/framework/with_list.go new file mode 100644 index 000000000000..1441b0d5be48 --- /dev/null +++ b/internal/framework/with_list.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresource" +) + +type Lister interface { + AppendResultInterceptor(listresource.ListResultInterceptor) +} + +var _ Lister = &WithList{} + +type WithList struct { + interceptors []listresource.ListResultInterceptor +} + +func (w *WithList) AppendResultInterceptor(interceptor listresource.ListResultInterceptor) { + w.interceptors = append(w.interceptors, interceptor) +} + +func (w WithList) ResultInterceptors() []listresource.ListResultInterceptor { + return w.interceptors +} diff --git a/internal/generate/acctestconsts/semgrep.gtpl b/internal/generate/acctestconsts/semgrep.gtpl index 8ba6cf0c18a1..d12c35c12351 100644 --- a/internal/generate/acctestconsts/semgrep.gtpl +++ b/internal/generate/acctestconsts/semgrep.gtpl @@ -6,7 +6,7 @@ rules: message: Use the constant `acctest.Ct{{ .Constant }}` for the string literal "{{ .Literal }}" in test files paths: include: - - "internal/service/**/*_test.go" + - "/internal/service/**/*_test.go" {{- if .AltLiteral }} pattern-either: - pattern: '"{{ .Literal }}"' diff --git a/internal/generate/attrconsts/semgrep.gtpl b/internal/generate/attrconsts/semgrep.gtpl index 1462ff310f45..228639355f78 100644 --- a/internal/generate/attrconsts/semgrep.gtpl +++ b/internal/generate/attrconsts/semgrep.gtpl @@ -7,7 +7,7 @@ rules: message: Use the constant `names.Attr{{ .Constant }}` for the string literal "{{ .Literal }}" paths: include: - - "internal/service/**/*.go" + - "/internal/service/**/*.go" patterns: - pattern: '"{{ .Literal }}"' - pattern-not-regex: '"{{ .Literal }}":\s+test\w+,' diff --git a/internal/generate/common/generator.go b/internal/generate/common/generator.go index da8b4ad53a72..e9143ab9c1ae 100644 --- a/internal/generate/common/generator.go +++ b/internal/generate/common/generator.go @@ -86,6 +86,15 @@ func (g *Generator) NewUnformattedFileDestination(filename string) Destination { } } +func (g *Generator) NewFileDestinationWithFormatter(filename string, formatter func([]byte) ([]byte, error)) Destination { + return &fileDestination{ + filename: filename, + baseDestination: baseDestination{ + formatter: formatter, + }, + } +} + type fileDestination struct { baseDestination append bool diff --git a/internal/generate/identitytests/main.go b/internal/generate/identitytests/main.go index 61b97e7cdc23..aa8c634e626e 100644 --- a/internal/generate/identitytests/main.go +++ b/internal/generate/identitytests/main.go @@ -13,7 +13,6 @@ import ( "go/ast" "go/parser" "go/token" - "iter" "os" "path" "path/filepath" @@ -22,10 +21,9 @@ import ( "strconv" "strings" "text/template" - "time" "github.com/dlclark/regexp2" - acctestgen "github.com/hashicorp/terraform-provider-aws/internal/acctest/generate" + "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/internal/generate/tests" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" @@ -97,20 +95,13 @@ func main() { } for _, resource := range v.identityResources { + resource.service = &svc + sourceName := resource.FileName ext := filepath.Ext(sourceName) sourceName = strings.TrimSuffix(sourceName, ext) sourceName = strings.TrimSuffix(sourceName, "_") - if name, err := svc.ProviderNameUpper(resource.TypeName); err != nil { - g.Fatalf("determining provider service name: %w", err) - } else { - resource.ResourceProviderNameUpper = name - } - resource.PackageProviderNameUpper = svc.PackageProviderNameUpper() - resource.ProviderPackage = servicePackage - resource.ARNNamespace = svc.ARNNamespace() - if svc.primary.IsGlobal() { resource.IsGlobal = true } @@ -133,8 +124,16 @@ func main() { "inc": func(i int) int { return i + 1 }, + "NewVersion": version.NewVersion, + } + templates := template.New("identitytests").Funcs(templateFuncMap) + + templates, err = tests.AddCommonResourceTestTemplates(templates) + if err != nil { + g.Fatalf(err.Error()) } - templates, err := template.New("identitytests").Funcs(templateFuncMap).Parse(resourceTestGoTmpl) + + templates, err = templates.Parse(resourceTestGoTmpl) if err != nil { g.Fatalf("parsing base Go test template: %w", err) } @@ -147,38 +146,42 @@ func main() { g.Fatalf("generating file (%s): %s", filename, err) } - basicConfigTmplFile := path.Join("testdata", "tmpl", fmt.Sprintf("%s_basic.gtpl", sourceName)) + basicConfigTmplFile := fmt.Sprintf("%s_basic.gtpl", sourceName) + basicConfigTmplPath := path.Join("testdata", "tmpl", basicConfigTmplFile) var configTmplFile string - var configTmpl string - if _, err := os.Stat(basicConfigTmplFile); err == nil { + var configTmplPath string + if _, err := os.Stat(basicConfigTmplPath); err == nil { configTmplFile = basicConfigTmplFile + configTmplPath = basicConfigTmplPath } else if !errors.Is(err, os.ErrNotExist) { - g.Fatalf("accessing config template %q: %w", basicConfigTmplFile, err) + g.Fatalf("accessing config template %q: %w", basicConfigTmplPath, err) } - tagsConfigTmplFile := path.Join("testdata", "tmpl", fmt.Sprintf("%s_tags.gtpl", sourceName)) - if configTmplFile == "" { - if _, err := os.Stat(tagsConfigTmplFile); err == nil { + tagsConfigTmplFile := fmt.Sprintf("%s_tags.gtpl", sourceName) + tagsConfigTmplPath := path.Join("testdata", "tmpl", tagsConfigTmplFile) + if configTmplPath == "" { + if _, err := os.Stat(tagsConfigTmplPath); err == nil { configTmplFile = tagsConfigTmplFile + configTmplPath = tagsConfigTmplPath } else if !errors.Is(err, os.ErrNotExist) { - g.Fatalf("accessing config template %q: %w", tagsConfigTmplFile, err) + g.Fatalf("accessing config template %q: %w", tagsConfigTmplPath, err) } } - if configTmplFile == "" { - g.Errorf("no config template found for %q at %q or %q", sourceName, basicConfigTmplFile, tagsConfigTmplFile) + if configTmplPath == "" { + g.Errorf("no config template found for %q at %q or %q", sourceName, basicConfigTmplPath, tagsConfigTmplPath) continue } - b, err := os.ReadFile(configTmplFile) + b, err := os.ReadFile(configTmplPath) if err != nil { - g.Fatalf("reading config template %q: %w", configTmplFile, err) + g.Fatalf("reading config template %q: %w", configTmplPath, err) } - configTmpl = string(b) + configTmpl := string(b) resource.GenerateConfig = true if resource.GenerateConfig { - additionalTfVars := tfmaps.Keys(resource.additionalTfVars) + additionalTfVars := tfmaps.Keys(resource.AdditionalTfVars_) slices.Sort(additionalTfVars) testDirPath := path.Join("testdata", resource.Name) @@ -187,14 +190,14 @@ func main() { g.Fatalf("parsing base Terraform config template: %s", err) } - tfTemplates, err = tests.AddCommonTemplates(tfTemplates) + tfTemplates, err = tests.AddCommonTfTemplates(tfTemplates) if err != nil { g.Fatalf(err.Error()) } _, err = tfTemplates.New("body").Parse(configTmpl) if err != nil { - g.Fatalf("parsing config template %q: %s", tagsConfigTmplFile, err) + g.Fatalf("parsing config template %q: %s", configTmplPath, err) } _, err = tfTemplates.New("region").Parse("") @@ -204,11 +207,62 @@ func main() { common := commonConfig{ AdditionalTfVars: additionalTfVars, + RequiredEnvVars: resource.RequiredEnvVars, WithRName: (resource.Generator != ""), } generateTestConfig(g, testDirPath, "basic", tfTemplates, common) + if resource.PreIdentityVersion != nil { + if resource.PreIdentityVersion.Equal(v5_100_0) { + tfTemplatesV5, err := tfTemplates.Clone() + if err != nil { + g.Fatalf("cloning Terraform config template: %s", err) + } + ext := filepath.Ext(configTmplFile) + name := strings.TrimSuffix(configTmplFile, ext) + configTmplV5File := name + "_v5.100.0" + ext + configTmplV5Path := path.Join("testdata", "tmpl", configTmplV5File) + if _, err := os.Stat(configTmplV5Path); err == nil { + b, err := os.ReadFile(configTmplV5Path) + if err != nil { + g.Fatalf("reading config template %q: %s", configTmplV5Path, err) + } + configTmplV5 := string(b) + _, err = tfTemplatesV5.New("body").Parse(configTmplV5) + if err != nil { + g.Fatalf("parsing config template %q: %s", configTmplV5Path, err) + } + } + commonV5 := common + commonV5.ExternalProviders = map[string]requiredProvider{ + "aws": { + Source: "hashicorp/aws", + Version: "5.100.0", + }, + } + generateTestConfig(g, testDirPath, "basic_v5.100.0", tfTemplatesV5, commonV5) + + commonV6 := common + commonV6.ExternalProviders = map[string]requiredProvider{ + "aws": { + Source: "hashicorp/aws", + Version: "6.0.0", + }, + } + generateTestConfig(g, testDirPath, "basic_v6.0.0", tfTemplates, commonV6) + } else { + commonPreIdentity := common + commonPreIdentity.ExternalProviders = map[string]requiredProvider{ + "aws": { + Source: "hashicorp/aws", + Version: resource.PreIdentityVersion.String(), + }, + } + generateTestConfig(g, testDirPath, fmt.Sprintf("basic_v%s", resource.PreIdentityVersion.String()), tfTemplates, commonPreIdentity) + } + } + _, err = tfTemplates.New("region").Parse("\n region = var.region\n") if err != nil { g.Fatalf("parsing config template: %s", err) @@ -227,11 +281,19 @@ func main() { } } +var ( + v5_100_0 = version.Must(version.NewVersion("5.100.0")) +) + type serviceRecords struct { primary data.ServiceRecord additional []data.ServiceRecord } +func (sr serviceRecords) ProviderPackage() string { + return sr.primary.ProviderPackage() +} + func (sr serviceRecords) ProviderNameUpper(typeName string) (string, error) { if len(sr.additional) == 0 { return sr.primary.ProviderNameUpper(), nil @@ -293,37 +355,6 @@ func (sr serviceRecords) ARNNamespace() string { return sr.primary.ARNNamespace() } -type implementation string - -const ( - implementationFramework implementation = "framework" - implementationSDK implementation = "sdk" -) - -type importAction int - -const ( - importActionNoop importAction = iota - importActionUpdate - importActionReplace -) - -func (i importAction) String() string { - switch i { - case importActionNoop: - return "NoOp" - - case importActionUpdate: - return "Update" - - case importActionReplace: - return "Replace" - - default: - return "" - } -} - type triBoolean uint const ( @@ -333,82 +364,51 @@ const ( ) type ResourceDatum struct { - ProviderPackage string - ResourceProviderNameUpper string - PackageProviderNameUpper string - Name string - TypeName string - DestroyTakesT bool - HasExistsFunc bool - ExistsTypeName string - ExistsTakesT bool - FileName string - Generator string - idAttrDuplicates string // TODO: Remove. Still needed for Parameterized Identity - NoImport bool - ImportStateID string - importStateIDAttribute string - ImportStateIDFunc string - ImportIgnore []string - Implementation implementation - Serialize bool - SerializeDelay bool - SerializeParallelTests bool - PreChecks []codeBlock - PreChecksWithRegion []codeBlock - PreCheckRegions []string - GoImports []goImport - GenerateConfig bool - InitCodeBlocks []codeBlock - additionalTfVars map[string]string - CheckDestroyNoop bool - overrideIdentifierAttribute string - OverrideResourceType string - ARNNamespace string - ARNFormat string - arnAttribute string - isARNFormatGlobal triBoolean - ArnIdentity bool - MutableIdentity bool - IsGlobal bool - isSingleton bool - HasRegionOverrideTest bool - UseAlternateAccount bool - identityAttributes []string - plannableImportAction importAction - identityAttribute string - IdentityDuplicateAttrs []string - IDAttrFormat string + service *serviceRecords + FileName string + idAttrDuplicates string // TODO: Remove. Still needed for Parameterized Identity + GenerateConfig bool + ARNFormat string + arnAttribute string + isARNFormatGlobal triBoolean + ArnIdentity bool + MutableIdentity bool + IsGlobal bool + isSingleton bool + HasRegionOverrideTest bool + identityAttributes []identityAttribute + identityAttribute string + IdentityDuplicateAttrs []string + IDAttrFormat string + HasV6_0NullValuesError bool + HasV6_0RefreshError bool + HasNoPreExistingResource bool + PreIdentityVersion *version.Version + tests.CommonArgs } -func (d ResourceDatum) AdditionalTfVars() map[string]string { - return tfmaps.ApplyToAllKeys(d.additionalTfVars, func(k string) string { - return acctestgen.ConstOrQuote(k) - }) +func (d ResourceDatum) ProviderPackage() string { + return d.service.ProviderPackage() } -func (d ResourceDatum) HasIDAttrDuplicates() bool { - return d.idAttrDuplicates != "" +func (d ResourceDatum) ResourceProviderNameUpper() (string, error) { + return d.service.ProviderNameUpper(d.TypeName) } -func (d ResourceDatum) IDAttrDuplicates() string { - return namesgen.ConstOrQuote(d.idAttrDuplicates) +func (d ResourceDatum) PackageProviderNameUpper() string { + return d.service.PackageProviderNameUpper() } -func (d ResourceDatum) HasImportStateIDAttribute() bool { - return d.importStateIDAttribute != "" +func (d ResourceDatum) ARNNamespace() string { + return d.service.ARNNamespace() } -func (d ResourceDatum) ImportStateIDAttribute() string { - return namesgen.ConstOrQuote(d.importStateIDAttribute) -} - -func (d ResourceDatum) OverrideIdentifier() bool { - return d.overrideIdentifierAttribute != "" +func (d ResourceDatum) HasIDAttrDuplicates() bool { + return d.idAttrDuplicates != "" } -func (d ResourceDatum) OverrideIdentifierAttribute() string { - return namesgen.ConstOrQuote(d.overrideIdentifierAttribute) +func (d ResourceDatum) IDAttrDuplicates() string { + return namesgen.ConstOrQuote(d.idAttrDuplicates) } func (d ResourceDatum) IsARNIdentity() bool { @@ -439,14 +439,6 @@ func (d ResourceDatum) HasInherentRegion() bool { return d.IsARNIdentity() || d.IsRegionalSingleton() } -func (d ResourceDatum) HasImportIgnore() bool { - return len(d.ImportIgnore) > 0 -} - -func (d ResourceDatum) PlannableResourceAction() string { - return d.plannableImportAction.String() -} - func (d ResourceDatum) IdentityAttribute() string { return namesgen.ConstOrQuote(d.identityAttribute) } @@ -459,25 +451,31 @@ func (r ResourceDatum) IsARNFormatGlobal() bool { return r.isARNFormatGlobal == triBooleanTrue } -func (r ResourceDatum) IdentityAttributes() []string { - return tfslices.ApplyToAll(r.identityAttributes, func(s string) string { - return namesgen.ConstOrQuote(s) - }) +func (r ResourceDatum) IdentityAttributes() []identityAttribute { + return r.identityAttributes } -type goImport struct { - Path string - Alias string +type identityAttribute struct { + name string + Optional bool + TestNotNull bool } -type codeBlock struct { - Code string +func (i identityAttribute) Name() string { + return namesgen.ConstOrQuote(i.name) } type commonConfig struct { - AdditionalTfVars []string - WithRName bool - WithRegion bool + AdditionalTfVars []string + WithRName bool + WithRegion bool + ExternalProviders map[string]requiredProvider + RequiredEnvVars []string +} + +type requiredProvider struct { + Source string + Version string } type ConfigDatum struct { @@ -551,11 +549,9 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { d := ResourceDatum{ FileName: v.fileName, - additionalTfVars: make(map[string]string), + CommonArgs: tests.InitCommonArgs(), IsGlobal: false, - HasExistsFunc: true, HasRegionOverrideTest: true, - plannableImportAction: importActionNoop, } hasIdentity := false skip := false @@ -572,7 +568,7 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { break case "FrameworkResource": - d.Implementation = implementationFramework + d.Implementation = tests.ImplementationFramework args := common.ParseArgs(m[3]) if len(args.Positional) == 0 { v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) @@ -589,7 +585,7 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { break case "SDKResource": - d.Implementation = implementationSDK + d.Implementation = tests.ImplementationSDK args := common.ParseArgs(m[3]) if len(args.Positional) == 0 { v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) @@ -618,7 +614,7 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { if attr, ok := args.Keyword["identityDuplicateAttributes"]; ok { attrs = strings.Split(attr, ";") } - if d.Implementation == implementationSDK { + if d.Implementation == tests.ImplementationSDK { attrs = append(attrs, "id") } slices.Sort(attrs) @@ -634,7 +630,30 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { v.errs = append(v.errs, fmt.Errorf("no Identity attribute name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) continue } - d.identityAttributes = append(d.identityAttributes, args.Positional[0]) + + identityAttribute := identityAttribute{ + name: args.Positional[0], + } + + if attr, ok := args.Keyword["optional"]; ok { + if b, err := strconv.ParseBool(attr); err != nil { + v.errs = append(v.errs, fmt.Errorf("invalid optional value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } else { + identityAttribute.Optional = b + } + } + + if attr, ok := args.Keyword["testNotNull"]; ok { + if b, err := strconv.ParseBool(attr); err != nil { + v.errs = append(v.errs, fmt.Errorf("invalid optional value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } else { + identityAttribute.TestNotNull = b + } + } + + d.identityAttributes = append(d.identityAttributes, identityAttribute) case "SingletonIdentity": hasIdentity = true @@ -652,8 +671,8 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { } if attr, ok := args.Keyword["global"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid global value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("global", attr); err != nil { + v.errs = append(v.errs, err) continue } else { if b { @@ -686,263 +705,105 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { case "NoImport": d.NoImport = true - case "Testing": - args := common.ParseArgs(m[3]) + // TODO: allow underscore? + case "V60SDKv2Fix": + d.HasV6_0NullValuesError = true + d.PreIdentityVersion = v5_100_0 - if attr, ok := args.Keyword["destroyTakesT"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid destroyTakesT value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.DestroyTakesT = b - } - } - if attr, ok := args.Keyword["checkDestroyNoop"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid checkDestroyNoop value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.CheckDestroyNoop = b - d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", - }, - ) - } - } - if attr, ok := args.Keyword["domainTfVar"]; ok { - varName := "domain" - if len(attr) > 0 { - varName = attr - } - d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", - }, - ) - d.InitCodeBlocks = append(d.InitCodeBlocks, codeBlock{ - Code: fmt.Sprintf(`%s := acctest.RandomDomainName()`, varName), - }) - d.additionalTfVars[varName] = varName - } - if attr, ok := args.Keyword["subdomainTfVar"]; ok { - parentName := "domain" - varName := "subdomain" - parts := strings.Split(attr, ";") - if len(parts) > 1 { - if len(parts[0]) > 0 { - parentName = parts[0] - } - if len(parts[1]) > 0 { - varName = parts[1] - } - } - d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", - }, - ) - d.InitCodeBlocks = append(d.InitCodeBlocks, codeBlock{ - Code: fmt.Sprintf(`%s := acctest.RandomDomain()`, parentName), - }) - d.InitCodeBlocks = append(d.InitCodeBlocks, codeBlock{ - Code: fmt.Sprintf(`%s := %s.RandomSubdomain()`, varName, parentName), - }) - d.additionalTfVars[parentName] = fmt.Sprintf("%s.String()", parentName) - d.additionalTfVars[varName] = fmt.Sprintf("%s.String()", varName) - } - if attr, ok := args.Keyword["hasExistsFunction"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid existsFunction value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.HasExistsFunc = b - } - } - if attr, ok := args.Keyword["existsType"]; ok { - if typeName, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) - continue + args := common.ParseArgs(m[3]) + if attr, ok := args.Keyword["v60RefreshError"]; ok { + if b, err := tests.ParseBoolAttr("v60RefreshError", attr); err != nil { + v.errs = append(v.errs, err) } else { - d.ExistsTypeName = typeName - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) - } + d.HasV6_0RefreshError = b } } - if attr, ok := args.Keyword["existsTakesT"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid existsTakesT value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.ExistsTakesT = b - } + + case "Testing": + args := common.ParseArgs(m[3]) + + if err := tests.ParseTestingAnnotations(args, &d.CommonArgs); err != nil { + v.errs = append(v.errs, fmt.Errorf("%s: %w", fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) + continue } - if attr, ok := args.Keyword["generator"]; ok { - if attr == "false" { - generatorSeen = true - } else if funcName, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) - continue - } else { - d.Generator = funcName - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) - } - generatorSeen = true - } + + // This needs better handling + if _, ok := args.Keyword["generator"]; ok { + generatorSeen = true } + if attr, ok := args.Keyword["idAttrDuplicates"]; ok { d.idAttrDuplicates = attr d.GoImports = append(d.GoImports, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-plugin-testing/config", }, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-plugin-testing/tfjsonpath", }, ) } - if attr, ok := args.Keyword["importIgnore"]; ok { - d.ImportIgnore = strings.Split(attr, ";") - for i, val := range d.ImportIgnore { - d.ImportIgnore[i] = namesgen.ConstOrQuote(val) - } - d.plannableImportAction = importActionUpdate - } - if attr, ok := args.Keyword["importStateId"]; ok { - d.ImportStateID = attr - } - if attr, ok := args.Keyword["importStateIdAttribute"]; ok { - d.importStateIDAttribute = attr - } - if attr, ok := args.Keyword["importStateIdFunc"]; ok { - d.ImportStateIDFunc = attr - } - if attr, ok := args.Keyword["name"]; ok { - d.Name = strings.ReplaceAll(attr, " ", "") - } - if attr, ok := args.Keyword["noImport"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid noImport value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.NoImport = b - } - } - if attr, ok := args.Keyword["plannableImportAction"]; ok { - switch attr { - case importActionNoop.String(): - d.plannableImportAction = importActionNoop - case importActionUpdate.String(): - d.plannableImportAction = importActionUpdate + if attr, ok := args.Keyword["identityTest"]; ok { + switch attr { + case "true": + hasIdentity = true - case importActionReplace.String(): - d.plannableImportAction = importActionReplace + case "false": + v.g.Infof("Skipping Identity test for %s.%s", v.packageName, v.functionName) + skip = true default: - v.errs = append(v.errs, fmt.Errorf("invalid plannableImportAction value: %q at %s. Must be one of %s.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), []string{importActionNoop.String(), importActionUpdate.String(), importActionReplace.String()})) + v.errs = append(v.errs, fmt.Errorf("invalid identityTest value: %q at %s.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) continue } } - if attr, ok := args.Keyword["preCheck"]; ok { - if code, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) + if attr, ok := args.Keyword["identityRegionOverrideTest"]; ok { + if b, err := tests.ParseBoolAttr("identityRegionOverrideTest", attr); err != nil { + v.errs = append(v.errs, err) continue } else { - d.PreChecks = append(d.PreChecks, codeBlock{ - Code: fmt.Sprintf("%s(ctx, t)", code), - }) - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) - } + d.HasRegionOverrideTest = b } } - if attr, ok := args.Keyword["preCheckRegion"]; ok { - regions := strings.Split(attr, ";") - d.PreCheckRegions = tfslices.ApplyToAll(regions, func(s string) string { - return endpointsConstOrQuote(s) - }) - d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/aws-sdk-go-base/v2/endpoints", - }, - ) - } - if attr, ok := args.Keyword["preCheckWithRegion"]; ok { - if code, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) - continue + if attr, ok := args.Keyword["v60NullValuesError"]; ok { + if b, err := tests.ParseBoolAttr("v60NullValuesError", attr); err != nil { + v.errs = append(v.errs, err) } else { - d.PreChecksWithRegion = append(d.PreChecks, codeBlock{ - Code: code, - }) - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) + d.HasV6_0NullValuesError = b + if b { + d.PreIdentityVersion = v5_100_0 } } } - if attr, ok := args.Keyword["useAlternateAccount"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid useAlternateAccount value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else if b { - d.UseAlternateAccount = true - d.PreChecks = append(d.PreChecks, codeBlock{ - Code: "acctest.PreCheckAlternateAccount(t)", - }) - } - } - if attr, ok := args.Keyword["serialize"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid serialize value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue + if attr, ok := args.Keyword["v60RefreshError"]; ok { + if b, err := tests.ParseBoolAttr("v60RefreshError", attr); err != nil { + v.errs = append(v.errs, err) } else { - d.Serialize = b - } - } - if attr, ok := args.Keyword["serializeParallelTests"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid serializeParallelTests value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.SerializeParallelTests = b - } - } - if attr, ok := args.Keyword["serializeDelay"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid serializeDelay value: %q at %s. Should be duration value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.SerializeDelay = b + d.HasV6_0RefreshError = b + if b { + d.PreIdentityVersion = v5_100_0 + } } } - if attr, ok := args.Keyword["identityTest"]; ok { - switch attr { - case "true": - hasIdentity = true - - case "false": - v.g.Infof("Skipping Identity test for %s.%s", v.packageName, v.functionName) - skip = true - - default: - v.errs = append(v.errs, fmt.Errorf("invalid identityTest value: %q at %s.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if attr, ok := args.Keyword["preIdentityVersion"]; ok { + version, err := version.NewVersion(attr) + if err != nil { + v.errs = append(v.errs, fmt.Errorf("invalid preIdentityVersion value: %q at %s. Should be version value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) continue } + d.PreIdentityVersion = version } - if attr, ok := args.Keyword["identityRegionOverrideTest"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid identityRegionOverrideTest value: %q at %s. Should be duration value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue + if attr, ok := args.Keyword["hasNoPreExistingResource"]; ok { + if b, err := tests.ParseBoolAttr("hasNoPreExistingResource", attr); err != nil { + v.errs = append(v.errs, err) } else { - d.HasRegionOverrideTest = b + d.HasNoPreExistingResource = b } } if attr, ok := args.Keyword["tlsKey"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid tlsKey value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("tlsKey", attr); err != nil { + v.errs = append(v.errs, err) continue } else { tlsKey = b @@ -959,17 +820,23 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { if len(tlsKeyCN) == 0 { tlsKeyCN = "acctest.RandomDomain().String()" d.GoImports = append(d.GoImports, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", }, ) } - d.InitCodeBlocks = append(d.InitCodeBlocks, codeBlock{ + d.InitCodeBlocks = append(d.InitCodeBlocks, tests.CodeBlock{ Code: fmt.Sprintf(`privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, %s)`, tlsKeyCN), }) - d.additionalTfVars["certificate_pem"] = "certificatePEM" - d.additionalTfVars["private_key_pem"] = "privateKeyPEM" + d.AdditionalTfVars_["certificate_pem"] = tests.TFVar{ + GoVarName: "certificatePEM", + Type: tests.TFVarTypeString, + } + d.AdditionalTfVars_["private_key_pem"] = tests.TFVar{ + GoVarName: "privateKeyPEM", + Type: tests.TFVarTypeString, + } } if d.IsRegionalSingleton() { @@ -981,17 +848,17 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { } if len(d.identityAttributes) == 1 { - d.identityAttribute = d.identityAttributes[0] + d.identityAttribute = d.identityAttributes[0].name } if hasIdentity { if !skip { if d.idAttrDuplicates != "" { d.GoImports = append(d.GoImports, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-plugin-testing/config", }, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-plugin-testing/tfjsonpath", }, ) @@ -1000,14 +867,18 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { v.errs = append(v.errs, fmt.Errorf("no name parameter set: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) return } + if !d.HasNoPreExistingResource && d.PreIdentityVersion == nil { + v.errs = append(v.errs, fmt.Errorf("preIdentityVersion is required when hasNoPreExistingResource is false: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + return + } if !generatorSeen { d.Generator = "sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)" d.GoImports = append(d.GoImports, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-plugin-testing/helper/acctest", Alias: "sdkacctest", }, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", }, ) @@ -1029,7 +900,7 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { return v } -func generateTestConfig(g *common.Generator, dirPath, test string, tfTemplates *template.Template, common commonConfig) { +func generateTestConfig(g *common.Generator, dirPath, test string, tfTemplates *template.Template, config commonConfig) { testName := test dirPath = path.Join(dirPath, testName) if err := os.MkdirAll(dirPath, 0755); err != nil { @@ -1037,10 +908,18 @@ func generateTestConfig(g *common.Generator, dirPath, test string, tfTemplates * } mainPath := path.Join(dirPath, "main_gen.tf") - tf := g.NewUnformattedFileDestination(mainPath) + var tf common.Destination + if test == "basic_v5.100.0" { + tf = g.NewFileDestinationWithFormatter(mainPath, func(b []byte) ([]byte, error) { + re := regexp.MustCompile(`(data\.aws_region\.\w+)\.region`) // nosemgrep:ci.calling-regexp.MustCompile-directly + return re.ReplaceAll(b, []byte("$1.name")), nil + }) + } else { + tf = g.NewUnformattedFileDestination(mainPath) + } configData := ConfigDatum{ - commonConfig: common, + commonConfig: config, } if err := tf.BufferTemplateSet(tfTemplates, configData); err != nil { g.Fatalf("error generating Terraform file %q: %s", mainPath, err) @@ -1050,67 +929,3 @@ func generateTestConfig(g *common.Generator, dirPath, test string, tfTemplates * g.Fatalf("generating file (%s): %s", mainPath, err) } } - -func parseIdentifierSpec(s string) (string, *goImport, error) { - parts := strings.Split(s, ";") - switch len(parts) { - case 1: - return parts[0], nil, nil - - case 2: - return parts[1], &goImport{ - Path: parts[0], - }, nil - - case 3: - return parts[2], &goImport{ - Path: parts[0], - Alias: parts[1], - }, nil - - default: - return "", nil, fmt.Errorf("invalid generator value: %q", s) - } -} - -func generateDurationStatement(d time.Duration) string { - var buf strings.Builder - - d = d.Round(1 * time.Second) - - if d >= time.Minute { - mins := d / time.Minute - fmt.Fprintf(&buf, "%d*time.Minute", mins) - d = d - mins*time.Minute - if d != 0 { - fmt.Fprint(&buf, "+") - } - } - if d != 0 { - secs := d / time.Second - fmt.Fprintf(&buf, "%d*time.Second", secs) - } - - return buf.String() -} - -func count[T any](s iter.Seq[T], f func(T) bool) (c int) { - for v := range s { - if f(v) { - c++ - } - } - return c -} - -func endpointsConstOrQuote(region string) string { - var buf strings.Builder - buf.WriteString("endpoints.") - - for _, part := range strings.Split(region, "-") { - buf.WriteString(strings.Title(part)) - } - buf.WriteString("RegionID") - - return buf.String() -} diff --git a/internal/generate/identitytests/resource_test.go.gtpl b/internal/generate/identitytests/resource_test.go.gtpl index 8b9568ec77d3..61ce4279329e 100644 --- a/internal/generate/identitytests/resource_test.go.gtpl +++ b/internal/generate/identitytests/resource_test.go.gtpl @@ -1,13 +1,5 @@ // Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. -{{ define "Init" }} - ctx := acctest.Context(t) - {{ if .ExistsTypeName }} - var v {{ .ExistsTypeName }} - {{ end -}} - {{ template "commonInit" . }} -{{ end }} - {{/* This can be removed when the Exists check supports enhanced region support */}} {{ define "InitRegionOverride" }} ctx := acctest.Context(t) @@ -15,45 +7,11 @@ {{ template "commonInit" . }} {{ end }} -{{ define "commonInit" -}} - resourceName := "{{ .TypeName}}.test"{{ if .Generator }} - rName := {{ .Generator }} -{{- end }} -{{- range .InitCodeBlocks }} -{{ .Code }} -{{- end -}} -{{ if .UseAlternateAccount }} - providers := make(map[string]*schema.Provider) -{{ end }} -{{ end }} - -{{ define "Test" -}} -resource.{{ if and .Serialize (not .SerializeParallelTests) }}Test{{ else }}ParallelTest{{ end }} -{{- end }} - -{{ define "TestCaseSetup" -}} -{{ template "TestCaseSetupNoProviders" . }} -{{- if not .UseAlternateAccount }} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, -{{- end -}} -{{- end }} - {{ define "TestCaseSetupNoProviders" -}} TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, - PreCheck: func() { acctest.PreCheck(ctx, t) - {{- if gt (len .PreCheckRegions) 0 }} - acctest.PreCheckRegion(t, {{ range .PreCheckRegions}}{{ . }}, {{ end }}) - {{- end -}} - {{- range .PreChecks }} - {{ .Code }} - {{- end -}} - {{- range .PreChecksWithRegion }} - {{ .Code }}(ctx, t, acctest.Region()) - {{- end -}} - }, - ErrorCheck: acctest.ErrorCheck(t, names.{{ .PackageProviderNameUpper }}ServiceID), + {{ template "CommonTestCaseChecks" . }} CheckDestroy: {{ if .CheckDestroyNoop }}acctest.CheckDestroyNoop{{ else }}testAccCheck{{ .Name }}Destroy(ctx{{ if .DestroyTakesT }}, t{{ end }}){{ end }}, {{- end }} @@ -61,8 +19,8 @@ resource.{{ if and .Serialize (not .SerializeParallelTests) }}Test{{ else }}Para TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, -PreCheck: func() { acctest.PreCheck(ctx, t) - {{- if gt (len .PreCheckRegions) 0 }} +PreCheck: func() { acctest.PreCheck(ctx, t) + {{- if .PreCheckRegions }} acctest.PreCheckAlternateRegion(t, {{ range .PreCheckRegions}}{{ . }}, {{ end }}) {{- end -}} {{- range .PreChecks }} @@ -186,7 +144,7 @@ ImportPlanChecks: resource.ImportPlanChecks{ plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), {{ else if gt (len .IdentityAttributes) 0 -}} {{ range .IdentityAttributes -}} - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New({{ . }}), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New({{ .Name }}), knownvalue.NotNull()), {{ end -}} {{ else if ne .IdentityAttribute "" -}} plancheck.ExpectKnownValue(resourceName, tfjsonpath.New({{ .IdentityAttribute }}), knownvalue.NotNull()), @@ -229,7 +187,7 @@ ImportPlanChecks: resource.ImportPlanChecks{ plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), tfknownvalue.AccountID()), {{ else if gt (len .IdentityAttributes) 0 -}} {{ range .IdentityAttributes -}} - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New({{ . }}), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New({{ .Name }}), knownvalue.NotNull()), {{ end -}} {{ end -}} {{ if not .IsGlobal -}} @@ -239,31 +197,11 @@ ImportPlanChecks: resource.ImportPlanChecks{ }, {{- end }} -{{ define "testname" -}} -{{ if .Serialize }}testAcc{{ else }}TestAcc{{ end }}{{ .ResourceProviderNameUpper }}{{ .Name }} -{{- end }} - -{{ define "ExistsCheck" }} - testAccCheck{{ .Name }}Exists(ctx, {{ if .ExistsTakesT }}t,{{ end }} resourceName{{ if .ExistsTypeName}}, &v{{ end }}), -{{ end }} - -{{ define "AdditionalTfVars" -}} - {{ range $name, $value := .AdditionalTfVars -}} - {{ $name }}: config.StringVariable({{ $value }}), - {{ end -}} -{{ end }} - package {{ .ProviderPackage }}_test import ( - {{ if .OverrideIdentifier }} - "context" - {{- end }} "testing" - {{ if .UseAlternateAccount }} - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - {{- end }} "github.com/hashicorp/terraform-plugin-testing/compare" "github.com/hashicorp/terraform-plugin-testing/config" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -276,10 +214,6 @@ import ( tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" - {{- if .OverrideIdentifier }} - tf{{ .ProviderPackage }} "github.com/hashicorp/terraform-provider-aws/internal/service/{{ .ProviderPackage }}" - "github.com/hashicorp/terraform-provider-aws/internal/types" - {{- end }} {{ range .GoImports -}} {{ if .Alias }}{{ .Alias }} {{ end }}"{{ .Path }}" {{ end }} @@ -293,8 +227,9 @@ func {{ template "testname" . }}_IdentitySerial(t *testing.T) { {{- end }} testCases := map[string]func(t *testing.T){ - acctest.CtBasic: {{ template "testname" . }}_Identity_Basic, - "ExistingResource": {{ template "testname" . }}_Identity_ExistingResource, + acctest.CtBasic: {{ template "testname" . }}_Identity_Basic, + "ExistingResource": {{ template "testname" . }}_Identity_ExistingResource, + "ExistingResourceNoRefresh": {{ template "testname" . }}_Identity_ExistingResource_NoRefresh_NoChange, {{ if .GenerateRegionOverrideTest -}} "RegionOverride": {{ template "testname" . }}_Identity_RegionOverride, {{ end -}} @@ -307,7 +242,7 @@ func {{ template "testname" . }}_IdentitySerial(t *testing.T) { func {{ template "testname" . }}_Identity_Basic(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ {{ $step := 1 -}} @@ -349,36 +284,34 @@ func {{ template "testname" . }}_Identity_Basic(t *testing.T) { {{ if not .IsGlobal -}} statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), {{ end -}} - {{ if .MutableIdentity -}} - // Resource Identity not supported for Mutable Identity - {{ else -}} - {{ if .ArnIdentity -}} + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - {{ .ARNAttribute }}: knownvalue.NotNull(), - }), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), {{ end -}} - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), - {{ else if .IsRegionalSingleton -}} - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - {{ else -}} - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - {{ if not .IsGlobal -}} - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - {{ end -}} - {{ range .IdentityAttributes -}} - {{ . }}: knownvalue.NotNull(), - {{ end }} - }), + {{ end -}} {{ range .IdentityAttributes -}} - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ . }})), + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, {{ end }} - {{ end -}} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} {{ end -}} }, }, @@ -414,25 +347,21 @@ func {{ template "testname" . }}_Identity_Basic(t *testing.T) { }, // Step {{ ($step = inc $step) | print }}: Import block with Resource Identity - {{ if .MutableIdentity -}} - // Resource Identity not supported for Mutable Identity - {{- else -}} - { - {{ if .UseAlternateAccount -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), - {{ end -}} - ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), - ConfigVariables: config.Variables{ {{ if .Generator }} - acctest.CtRName: config.StringVariable(rName),{{ end }} - {{ template "AdditionalTfVars" . }} - }, - {{- template "ImportBlockWithResourceIdentityBody" . -}} - {{ template "PlannableImportPlanChecks" . }} - {{ if ne .PlannableResourceAction "NoOp" -}} - ExpectNonEmptyPlan: true, - {{ end -}} + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} }, - {{- end }} + {{- template "ImportBlockWithResourceIdentityBody" . -}} + {{ template "PlannableImportPlanChecks" . }} + {{ if ne .PlannableResourceAction "NoOp" -}} + ExpectNonEmptyPlan: true, + {{ end -}} + }, {{- end }} }, }) @@ -442,7 +371,7 @@ func {{ template "testname" . }}_Identity_Basic(t *testing.T) { func {{ template "testname" . }}_Identity_RegionOverride(t *testing.T) { {{- template "InitRegionOverride" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupRegionOverride" . }} Steps: []resource.TestStep{ {{ $step := 1 -}} @@ -476,34 +405,32 @@ func {{ template "testname" . }}_Identity_RegionOverride(t *testing.T) { statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New({{ .IDAttrDuplicates }}), compare.ValuesSame()), {{ end -}} statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), - {{ if .MutableIdentity -}} - // Resource Identity not supported for Mutable Identity - {{ else -}} - {{ if .ArnIdentity -}} + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), - {{ .ARNAttribute }}: knownvalue.NotNull(), - }), - {{ end -}} - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), - {{ else if .IsRegionalSingleton -}} - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), - }), - {{ else -}} - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), - {{ range .IdentityAttributes -}} - {{ . }}: knownvalue.NotNull(), - {{ end }} - }), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), {{ range .IdentityAttributes -}} - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ . }})), + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, {{ end }} - {{ end -}} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} {{ end -}} }, }, @@ -527,22 +454,18 @@ func {{ template "testname" . }}_Identity_RegionOverride(t *testing.T) { }, {{ if .HasInherentRegion }} // Step {{ ($step = inc $step) | print }}: Import command without appended "@" - {{ if .MutableIdentity -}} - // Importing without appended "@" for Mutable Identity - {{- else -}} - { - {{ if .UseAlternateAccount -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), - {{ end -}} - ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/region_override/"), - ConfigVariables: config.Variables{ {{ if .Generator }} - acctest.CtRName: config.StringVariable(rName),{{ end }} - {{ template "AdditionalTfVars" . -}} - "region": config.StringVariable(acctest.AlternateRegion()), - }, - {{- template "ImportCommandWithIDBody" . -}} + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/region_override/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . -}} + "region": config.StringVariable(acctest.AlternateRegion()), }, - {{- end }} + {{- template "ImportCommandWithIDBody" . -}} + }, {{ end }} {{ if .HasInherentRegion }} // Step {{ ($step = inc $step) | print }}: Import block with Import ID and appended "@" @@ -567,31 +490,6 @@ func {{ template "testname" . }}_Identity_RegionOverride(t *testing.T) { }, {{ if .HasInherentRegion }} // Step {{ ($step = inc $step) | print }}: Import block with Import ID and no appended "@" - {{ if .MutableIdentity -}} - // Importing without appended "@" for Mutable Identity - {{- else -}} - { - {{ if .UseAlternateAccount -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), - {{ end -}} - ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/region_override/"), - ConfigVariables: config.Variables{ {{ if .Generator }} - acctest.CtRName: config.StringVariable(rName),{{ end }} - {{ template "AdditionalTfVars" . -}} - "region": config.StringVariable(acctest.AlternateRegion()), - }, - {{- template "ImportBlockWithIDBody" . -}} - {{ template "PlannableImportCrossRegionPlanChecks" . }} - {{ if ne .PlannableResourceAction "NoOp" -}} - ExpectNonEmptyPlan: true, - {{ end -}} - }, - {{- end }} - {{ end }} - // Step {{ ($step = inc $step) | print }}: Import block with Resource Identity - {{ if .MutableIdentity -}} - // Resource Identity not supported for Mutable Identity - {{- else -}} { {{ if .UseAlternateAccount -}} ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), @@ -602,15 +500,761 @@ func {{ template "testname" . }}_Identity_RegionOverride(t *testing.T) { {{ template "AdditionalTfVars" . -}} "region": config.StringVariable(acctest.AlternateRegion()), }, - {{- template "ImportBlockWithResourceIdentityBody" . -}} + {{- template "ImportBlockWithIDBody" . -}} {{ template "PlannableImportCrossRegionPlanChecks" . }} {{ if ne .PlannableResourceAction "NoOp" -}} ExpectNonEmptyPlan: true, {{ end -}} }, - {{- end }} + {{ end }} + // Step {{ ($step = inc $step) | print }}: Import block with Resource Identity + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/region_override/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . -}} + "region": config.StringVariable(acctest.AlternateRegion()), + }, + {{- template "ImportBlockWithResourceIdentityBody" . -}} + {{ template "PlannableImportCrossRegionPlanChecks" . }} + {{ if ne .PlannableResourceAction "NoOp" -}} + ExpectNonEmptyPlan: true, + {{ end -}} + }, {{- end }} }, }) } {{ end }} + +{{ if .HasV6_0RefreshError }} + func {{ template "testname" . }}_Identity_ExistingResource_fromV5(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v5.100.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + }, + }) + } + + func {{ template "testname" . }}_Identity_ExistingResource_fromV6(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create in v6.0 + { + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v6.0.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + }, + }) + } +{{ else if .HasV6_0NullValuesError }} + func {{ template "testname" . }}_Identity_ExistingResource(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v5.100.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v6.0.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.Null(), + }), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.Null(), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: knownvalue.Null(), + {{ end }} + }), + {{ end -}} + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + }, + }) + } + + func {{ template "testname" . }}_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v5.100.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + }, + }, + }) + } +{{ else if .PreIdentityVersion }} + {{ if .PreIdentityVersion.GreaterThanOrEqual (NewVersion "6.0.0") }} + // Resource Identity was added after v{{ .PreIdentityVersion }} + func {{ template "testname" . }}_Identity_ExistingResource(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v{{ .PreIdentityVersion }}/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ else -}} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + }, + }) + } + + // Resource Identity was added after v{{ .PreIdentityVersion }} + func {{ template "testname" . }}_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v{{ .PreIdentityVersion }}/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ else -}} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) + } + {{ else }} + func {{ template "testname" . }}_Identity_ExistingResource(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v5.100.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: v6.0 Identity set on refresh + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v6.0.0/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ else -}} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + {{ if .ArnIdentity -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + {{ if and (not .IsGlobal) .IsARNFormatGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ .ARNAttribute }}: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .ARNAttribute }})), + {{ else if .IsRegionalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + {{ else if .IsGlobalSingleton -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + {{ else -}} + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + {{ if not .IsGlobal -}} + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + {{ end -}} + {{ range .IdentityAttributes -}} + {{ .Name }}: {{ if or (not .Optional) .TestNotNull }}knownvalue.NotNull(){{ else }}knownvalue.Null(){{ end }}, + {{ end }} + }), + {{ range .IdentityAttributes -}} + {{ if not .Optional -}} + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New({{ .Name }})), + {{ end -}} + {{ end }} + {{ end -}} + }, + }, + }, + }) + } + + func {{ template "testname" . }}_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + {{- template "Init" . }} + + {{ template "Test" . }}(ctx, t, resource.TestCase{ + {{ template "TestCaseSetupNoProviders" . }} + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + {{ $step := 1 -}} + // Step {{ $step }}: Create pre-Identity + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic_v{{ .PreIdentityVersion }}/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + {{ if .HasExistsFunc -}} + Check: resource.ComposeAggregateTestCheckFunc( + {{- template "ExistsCheck" . -}} + ), + {{ end -}} + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step {{ ($step = inc $step) | print }}: Current version + { + {{ if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + {{ else -}} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + {{ end -}} + ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/basic/"), + ConfigVariables: config.Variables{ {{ if .Generator }} + acctest.CtRName: config.StringVariable(rName),{{ end }} + {{ template "AdditionalTfVars" . }} + }, + }, + }, + }) + } + {{ end }} +{{ end }} diff --git a/internal/generate/identitytests/test.tf.gtpl b/internal/generate/identitytests/test.tf.gtpl index 5cf725ae62e1..98cbf7d66101 100644 --- a/internal/generate/identitytests/test.tf.gtpl +++ b/internal/generate/identitytests/test.tf.gtpl @@ -14,17 +14,39 @@ variable "rName" { nullable = false } {{ end -}} -{{ range .AdditionalTfVars -}} +{{- range .AdditionalTfVars -}} variable "{{ . }}" { type = string nullable = false } {{ end -}} -{{ if .WithRegion }} +{{- range .RequiredEnvVars }} +variable "{{ . }}" { + type = string + nullable = false +} +{{ end }} +{{- if .WithRegion }} variable "region" { description = "Region to deploy resource in" type = string nullable = false } -{{ end -}} +{{ end }} +{{- if ne (len .ExternalProviders) 0 -}} +terraform { + required_providers { + {{- range $provider, $stuff := .ExternalProviders }} + {{ $provider }} = { + source = "{{ $stuff.Source }}" + version = "{{ $stuff.Version }}" + } + {{- end }} + } +} + +{{ range $provider, $stuff := .ExternalProviders -}} +provider "{{ $provider }}" {} +{{ end }} +{{- end -}} diff --git a/internal/generate/serviceendpointtests/file.gtpl b/internal/generate/serviceendpointtests/file.gtpl index 301c7f3fb362..193985889d91 100644 --- a/internal/generate/serviceendpointtests/file.gtpl +++ b/internal/generate/serviceendpointtests/file.gtpl @@ -831,7 +831,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/generate/serviceendpointtests/main.go b/internal/generate/serviceendpointtests/main.go index 2dcf4ca43e90..8bd315c17075 100644 --- a/internal/generate/serviceendpointtests/main.go +++ b/internal/generate/serviceendpointtests/main.go @@ -35,16 +35,17 @@ func main() { packageName := l.ProviderPackage() switch packageName { - case "cloudfrontkeyvaluestore", // Endpoint includes account ID - "codecatalyst", // Bearer auth token needs special handling - "location", // Resolver modifies URL - "mwaa", // Resolver modifies URL - "neptunegraph", // EndpointParameters has an additional parameter, ApiType - "paymentcryptography", // Resolver modifies URL - "route53profiles", // Resolver modifies URL - "s3control", // Resolver modifies URL - "simpledb", // AWS SDK for Go v1 - "timestreamwrite": // Uses endpoint discovery + case "arcregionswitch", // Resolver modifies URL + "cloudfrontkeyvaluestore", // Endpoint includes account ID + "codecatalyst", // Bearer auth token needs special handling + "location", // Resolver modifies URL + "mwaa", // Resolver modifies URL + "neptunegraph", // EndpointParameters has an additional parameter, ApiType + "paymentcryptography", // Resolver modifies URL + "route53profiles", // Resolver modifies URL + "s3control", // Resolver modifies URL + "simpledb", // AWS SDK for Go v1 + "timestreamwrite": // Uses endpoint discovery continue } diff --git a/internal/generate/servicepackage/endpoint_resolver.go.gtpl b/internal/generate/servicepackage/endpoint_resolver.go.gtpl index d4fbc9a19672..838d99aedccd 100644 --- a/internal/generate/servicepackage/endpoint_resolver.go.gtpl +++ b/internal/generate/servicepackage/endpoint_resolver.go.gtpl @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params {{ .GoV2Package }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up {{ .GoV2Package }} endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up {{ .GoV2Package }} endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index 16c70f4bc7d4..d01738ef12ea 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:build generate -// +build generate +//go:build ignore +// +build ignore package main @@ -62,11 +62,14 @@ func main() { v := &visitor{ g: g, - ephemeralResources: make(map[string]ResourceDatum, 0), - frameworkDataSources: make(map[string]ResourceDatum, 0), - frameworkResources: make(map[string]ResourceDatum, 0), - sdkDataSources: make(map[string]ResourceDatum, 0), - sdkResources: make(map[string]ResourceDatum, 0), + actions: make(map[string]ResourceDatum, 0), + ephemeralResources: make(map[string]ResourceDatum, 0), + frameworkDataSources: make(map[string]ResourceDatum, 0), + frameworkListResources: make(map[string]ResourceDatum, 0), + frameworkResources: make(map[string]ResourceDatum, 0), + sdkDataSources: make(map[string]ResourceDatum, 0), + sdkResources: make(map[string]ResourceDatum, 0), + sdkListResources: make(map[string]ResourceDatum, 0), } v.processDir(".") @@ -87,6 +90,40 @@ func main() { } } + for key, value := range v.frameworkListResources { + if val, exists := v.frameworkResources[key]; exists { + value.Name = val.Name + value.IdentityAttributes = val.IdentityAttributes + value.IdentityDuplicateAttrs = val.IdentityDuplicateAttrs + value.ARNIdentity = val.ARNIdentity + value.SingletonIdentity = val.SingletonIdentity + value.TransparentTagging = val.TransparentTagging + value.TagsResourceType = val.TagsResourceType + value.TagsIdentifierAttribute = val.TagsIdentifierAttribute + + v.frameworkListResources[key] = value + } else { + g.Fatalf("Framework List Resource %q has no matching Framework Resource", key) + } + } + + for key, value := range v.sdkListResources { + if val, exists := v.sdkResources[key]; exists { + value.Name = val.Name + value.IdentityAttributes = val.IdentityAttributes + value.IdentityDuplicateAttrs = val.IdentityDuplicateAttrs + value.ARNIdentity = val.ARNIdentity + value.SingletonIdentity = val.SingletonIdentity + value.TransparentTagging = val.TransparentTagging + value.TagsResourceType = val.TagsResourceType + value.TagsIdentifierAttribute = val.TagsIdentifierAttribute + + v.sdkListResources[key] = value + } else { + g.Fatalf("SDK List Resource %q has no matching SDK Resource", key) + } + } + s := ServiceDatum{ GenerateClient: l.GenerateClient(), IsGlobal: l.IsGlobal(), @@ -94,27 +131,39 @@ func main() { GoV2Package: l.GoV2Package(), ProviderPackage: p, ProviderNameUpper: l.ProviderNameUpper(), + Actions: v.actions, EphemeralResources: v.ephemeralResources, FrameworkDataSources: v.frameworkDataSources, + FrameworkListResources: v.frameworkListResources, FrameworkResources: v.frameworkResources, SDKDataSources: v.sdkDataSources, SDKResources: v.sdkResources, + SDKListResources: v.sdkListResources, } var imports []goImport - for resource := range maps.Values(v.ephemeralResources) { + for _, resource := range v.actions { + imports = append(imports, resource.goImports...) + } + for _, resource := range v.ephemeralResources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.frameworkDataSources) { + for _, resource := range v.frameworkDataSources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.frameworkResources) { + for _, resource := range v.frameworkListResources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.sdkDataSources) { + for _, resource := range v.frameworkResources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.sdkResources) { + for _, resource := range v.sdkDataSources { + imports = append(imports, resource.goImports...) + } + for _, resource := range v.sdkResources { + imports = append(imports, resource.goImports...) + } + for _, resource := range v.sdkListResources { imports = append(imports, resource.goImports...) } slices.SortFunc(imports, func(a, b goImport) int { @@ -181,6 +230,7 @@ type ResourceDatum struct { SingletonIdentity bool MutableIdentity bool WrappedImport bool + CustomImport bool goImports []goImport IdentityDuplicateAttrs []string ImportIDHandler string @@ -194,8 +244,9 @@ func (r ResourceDatum) IsARNFormatGlobal() bool { } type identityAttribute struct { - Name string - Optional bool + Name string + Optional bool + ResourceAttributeName string } type goImport struct { @@ -230,11 +281,14 @@ type ServiceDatum struct { GoV2Package string // AWS SDK for Go v2 package name ProviderPackage string ProviderNameUpper string + Actions map[string]ResourceDatum EphemeralResources map[string]ResourceDatum FrameworkDataSources map[string]ResourceDatum + FrameworkListResources map[string]ResourceDatum FrameworkResources map[string]ResourceDatum SDKDataSources map[string]ResourceDatum SDKResources map[string]ResourceDatum + SDKListResources map[string]ResourceDatum GoImports []goImport } @@ -258,11 +312,14 @@ type visitor struct { functionName string packageName string - ephemeralResources map[string]ResourceDatum - frameworkDataSources map[string]ResourceDatum - frameworkResources map[string]ResourceDatum - sdkDataSources map[string]ResourceDatum - sdkResources map[string]ResourceDatum + actions map[string]ResourceDatum + ephemeralResources map[string]ResourceDatum + frameworkDataSources map[string]ResourceDatum + frameworkListResources map[string]ResourceDatum + frameworkResources map[string]ResourceDatum + sdkDataSources map[string]ResourceDatum + sdkResources map[string]ResourceDatum + sdkListResources map[string]ResourceDatum } // processDir scans a single service package directory and processes contained Go sources files. @@ -392,6 +449,10 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { } } + if attr, ok := args.Keyword["resourceAttributeName"]; ok { + identityAttribute.ResourceAttributeName = namesgen.ConstOrQuote(attr) + } + d.IdentityAttributes = append(d.IdentityAttributes, identityAttribute) case "WrappedImport": @@ -407,6 +468,9 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { } } + case "CustomImport": + d.CustomImport = true + case "ArnIdentity": d.ARNIdentity = true d.WrappedImport = true @@ -503,6 +567,30 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { } switch annotationName := m[1]; annotationName { + case "Action": + if len(args.Positional) == 0 { + v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + typeName := args.Positional[0] + + if !validTypeName.MatchString(typeName) { + v.errs = append(v.errs, fmt.Errorf("invalid type name (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + if d.Name == "" { + v.errs = append(v.errs, fmt.Errorf("no friendly name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + if _, ok := v.actions[typeName]; ok { + v.errs = append(v.errs, fmt.Errorf("duplicate Action (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + } else { + v.actions[typeName] = d + } + case "EphemeralResource": if len(args.Positional) == 0 { v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) @@ -639,7 +727,49 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { v.sdkResources[typeName] = d } - case "IdentityAttribute", "ArnIdentity", "ImportIDHandler", "MutableIdentity", "SingletonIdentity", "Region", "Tags", "WrappedImport", "V60SDKv2Fix", "IdentityFix": + case "FrameworkListResource": + if len(args.Positional) == 0 { + v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + typeName := args.Positional[0] + + if !validTypeName.MatchString(typeName) { + v.errs = append(v.errs, fmt.Errorf("invalid type name (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + _, fOK := v.frameworkListResources[typeName] + _, sdkOK := v.sdkListResources[typeName] + if fOK || sdkOK { + v.errs = append(v.errs, fmt.Errorf("duplicate List Resource (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + } else { + v.frameworkListResources[typeName] = d + } + + case "SDKListResource": + if len(args.Positional) == 0 { + v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + typeName := args.Positional[0] + + if !validTypeName.MatchString(typeName) { + v.errs = append(v.errs, fmt.Errorf("invalid type name (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + _, fOK := v.frameworkListResources[typeName] + _, sdkOK := v.sdkListResources[typeName] + if fOK || sdkOK { + v.errs = append(v.errs, fmt.Errorf("duplicate List Resource (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + } else { + v.sdkListResources[typeName] = d + } + + case "IdentityAttribute", "ArnIdentity", "ImportIDHandler", "MutableIdentity", "SingletonIdentity", "Region", "Tags", "WrappedImport", "V60SDKv2Fix", "IdentityFix", "CustomImport": // Handled above. case "ArnFormat", "IdAttrFormat", "NoImport", "Testing": // Ignored. diff --git a/internal/generate/servicepackage/service_package_gen.go.gtpl b/internal/generate/servicepackage/service_package_gen.go.gtpl index 18bb6590be00..5ce858c7011e 100644 --- a/internal/generate/servicepackage/service_package_gen.go.gtpl +++ b/internal/generate/servicepackage/service_package_gen.go.gtpl @@ -1,16 +1,42 @@ // Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. {{ define "IdentifierAttribute" -}} +{{- if .ResourceAttributeName -}} +inttypes.StringIdentityAttributeWithMappedName( + {{- .Name }}, + {{- if .Optional }}false{{ else }}true{{ end -}}, + {{- .ResourceAttributeName -}} +), +{{- else -}} inttypes.StringIdentityAttribute( {{- .Name }}, {{- if .Optional }}false{{ else }}true{{ end -}} ), +{{- end -}} +{{- end }} + +{{ define "SDKv2CommonIdentityOpts" -}} +{{- if .HasV6_0SDKv2Fix }} + inttypes.WithV6_0SDKv2Fix(), +{{- end }} +{{- template "CommonIdentityOpts" . -}} +{{- end }} + +{{ define "CommonIdentityOpts" -}} +{{- if .MutableIdentity }} + inttypes.WithMutableIdentity(), +{{ end -}} +{{- if .HasIdentityFix }} + inttypes.WithIdentityFix(), +{{ end -}} {{- end }} package {{ .ProviderPackage }} import ( "context" + "iter" + "slices" "unique" {{ if .GenerateClient }} @@ -35,6 +61,31 @@ import ( type servicePackage struct {} +{{- if .Actions }} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction { +{{- range $key, $value := .Actions }} + {{- $regionOverrideEnabled := and (not $.IsGlobal) $value.RegionOverrideEnabled }} + { + Factory: {{ $value.FactoryName }}, + TypeName: "{{ $key }}", + Name: "{{ $value.Name }}", + {{- if and $regionOverrideEnabled $value.ValidateRegionOverrideInPartition }} + Region: unique.Make(inttypes.ResourceRegionDefault()), + {{- else if not $regionOverrideEnabled }} + Region: unique.Make(inttypes.ResourceRegionDisabled()), + {{- else }} + Region: unique.Make(inttypes.ServicePackageResourceRegion { + IsOverrideEnabled: {{ $regionOverrideEnabled }}, + IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, + }), + {{- end }} + }, +{{- end }} + } +} +{{- end }} + {{- if .EphemeralResources }} func (p *servicePackage) EphemeralResources(ctx context.Context) []*inttypes.ServicePackageEphemeralResource { return []*inttypes.ServicePackageEphemeralResource { @@ -121,115 +172,216 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, }), {{- end }} - {{- if not $value.MutableIdentity }} - {{- if gt (len $value.IdentityAttributes) 1 }} - {{- if or $.IsGlobal $value.IsGlobal }} - Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ - {{- range $value.IdentityAttributes }} - {{ template "IdentifierAttribute" . }} - {{- end }} - }, - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), + {{- if gt (len $value.IdentityAttributes) 1 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if gt (len $value.IdentityAttributes) 0 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, {{- end -}} - ), - {{- else }} - Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ - {{- range $value.IdentityAttributes }} - {{ template "IdentifierAttribute" . }} - {{- end }} - }, - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, {{- end -}} - ), - {{- end }} - {{- else if gt (len $value.IdentityAttributes) 0 }} - {{- if or $.IsGlobal $value.IsGlobal }} - Identity: inttypes.GlobalSingleParameterIdentity( - {{- range $value.IdentityAttributes -}} - {{ .Name }}, - {{- end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if $value.ARNIdentity }} + {{- if $.IsGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, {{- else }} - Identity: inttypes.RegionalSingleParameterIdentity( - {{- range $value.IdentityAttributes -}} - {{ .Name }}, - {{- end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + Identity: inttypes.GlobalARNIdentity( {{- end }} - {{- else if $value.ARNIdentity }} - {{- if $.IsGlobal }} + {{- else }} + {{- if $value.IsARNFormatGlobal }} {{- if $value.HasARNAttribute }} - Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, + Identity: inttypes.RegionalResourceWithGlobalARNFormatNamed({{ $value.ARNAttribute }}, {{- else }} - Identity: inttypes.GlobalARNIdentity( + Identity: inttypes.RegionalResourceWithGlobalARNFormat( {{- end }} {{- else }} - {{- if $value.IsARNFormatGlobal }} - {{- if $value.HasARNAttribute }} - Identity: inttypes.RegionalResourceWithGlobalARNFormatNamed({{ $value.ARNAttribute }}, - {{- else }} - Identity: inttypes.RegionalResourceWithGlobalARNFormat( - {{- end }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, {{- else }} - {{- if $value.HasARNAttribute }} - Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, - {{- else }} - Identity: inttypes.RegionalARNIdentity( - {{- end }} + Identity: inttypes.RegionalARNIdentity( {{- end }} {{- end }} + {{- end }} + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else if $value.SingletonIdentity }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingletonIdentity( {{- if .HasIdentityDuplicateAttrs -}} inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), {{- end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), - {{- else if $value.SingletonIdentity }} - {{- if or $.IsGlobal $value.IsGlobal }} - Identity: inttypes.GlobalSingletonIdentity( - {{- if .HasIdentityDuplicateAttrs -}} - inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), - {{- end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), - {{ else }} - Identity: inttypes.RegionalSingletonIdentity( - {{- if .HasIdentityDuplicateAttrs -}} - inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), - {{- end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + {{- template "CommonIdentityOpts" . -}} + ), + {{ else }} + Identity: inttypes.RegionalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- end }} + {{- if $value.WrappedImport }} + Import: inttypes.FrameworkImport{ + {{- if $value.CustomImport }} + CustomImport: true, + {{- else }} + WrappedImport: true, {{- end }} + {{- if ne $value.ImportIDHandler "" }} + ImportID: {{ $value.ImportIDHandler }}{}, + {{- end }} + {{- if $value.SetIDAttribute }} + SetIDAttr: true, + {{- end }} + }, + {{- end }} + }, +{{- end }} + } +} + +{{ if .FrameworkListResources }} +func (p *servicePackage) FrameworkListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageFrameworkListResource] { + return slices.Values([]*inttypes.ServicePackageFrameworkListResource { +{{- range $key, $value := .FrameworkListResources }} + {{- $regionOverrideEnabled := and (not $.IsGlobal) $value.RegionOverrideEnabled }} + { + Factory: {{ $value.FactoryName }}, + TypeName: "{{ $key }}", + Name: "{{ $value.Name }}", + {{- if .TransparentTagging }} + Tags: unique.Make(inttypes.ServicePackageResourceTags { + {{- if ne .TagsIdentifierAttribute "" }} + IdentifierAttribute: {{ .TagsIdentifierAttribute }}, {{- end }} - {{- if $value.WrappedImport }} - Import: inttypes.FrameworkImport{ - WrappedImport: true, - {{- if ne $value.ImportIDHandler "" }} - ImportID: {{ $value.ImportIDHandler }}{}, + {{- if ne .TagsResourceType "" }} + ResourceType: "{{ .TagsResourceType }}", + {{- end }} + }), + {{- end }} + {{- if and $regionOverrideEnabled $value.ValidateRegionOverrideInPartition }} + Region: unique.Make(inttypes.ResourceRegionDefault()), + {{- else if not $regionOverrideEnabled }} + Region: unique.Make(inttypes.ResourceRegionDisabled()), + {{- else }} + Region: unique.Make(inttypes.ServicePackageResourceRegion { + IsOverrideEnabled: {{ $regionOverrideEnabled }}, + IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, + }), + {{- end }} + {{- if gt (len $value.IdentityAttributes) 1 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} {{- end }} - {{- if $value.SetIDAttribute }} - SetIDAttr: true, + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} {{- end }} }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if gt (len $value.IdentityAttributes) 0 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if $value.ARNIdentity }} + {{- if $.IsGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.GlobalARNIdentity( + {{- end }} + {{- else }} + {{- if $value.IsARNFormatGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalResourceWithGlobalARNFormatNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalResourceWithGlobalARNFormat( + {{- end }} + {{- else }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalARNIdentity( + {{- end }} + {{- end }} + {{- end }} + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else if $value.SingletonIdentity }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{ else }} + Identity: inttypes.RegionalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), {{- end }} {{- end }} }, {{- end }} - } + }) } +{{- end }} func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { return []*inttypes.ServicePackageSDKDataSource { @@ -292,141 +444,198 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, }), {{- end }} - {{- if not $value.MutableIdentity }} - {{- if gt (len $value.IdentityAttributes) 1 }} - {{- if or $.IsGlobal $value.IsGlobal }} - Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ - {{- range $value.IdentityAttributes }} - {{ template "IdentifierAttribute" . }} - {{- end }} - }, - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), + {{- if gt (len $value.IdentityAttributes) 1 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "SDKv2CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "SDKv2CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if gt (len $value.IdentityAttributes) 0 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, {{- end -}} - ), + {{- template "SDKv2CommonIdentityOpts" . }} + ), + {{- else }} + Identity: inttypes.RegionalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end }} + {{- template "SDKv2CommonIdentityOpts" . }} + ), + {{- end }} + {{- else if $value.ARNIdentity }} + {{- if $.IsGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, {{- else }} - Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ - {{- range $value.IdentityAttributes }} - {{ template "IdentifierAttribute" . }} - {{- end }} - }, - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end -}} - ), + Identity: inttypes.GlobalARNIdentity( + {{- end }} + {{- else }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalARNIdentity( + {{- end }} + {{- end }} + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + {{- template "SDKv2CommonIdentityOpts" . }} + ), + {{- else if $value.SingletonIdentity }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingletonIdentity( + {{- template "SDKv2CommonIdentityOpts" . }} + ), + {{- else }} + Identity: inttypes.RegionalSingletonIdentity( + {{- template "SDKv2CommonIdentityOpts" . }} + ), + {{- end }} + {{- end }} + {{- if $value.WrappedImport }} + Import: inttypes.SDKv2Import{ + {{- if $value.CustomImport }} + CustomImport: true, + {{- else }} + WrappedImport: true, + {{- end }} + {{- if ne $value.ImportIDHandler "" }} + ImportID: {{ $value.ImportIDHandler }}{}, {{- end }} - {{- else if gt (len $value.IdentityAttributes) 0 }} - {{- if or $.IsGlobal $value.IsGlobal }} - Identity: inttypes.GlobalSingleParameterIdentity( - {{- range $value.IdentityAttributes -}} - {{ .Name }}, - {{- end -}} - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + }, + {{- end }} + }, +{{- end }} + } +} + +{{ if .SDKListResources }} +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource { +{{- range $key, $value := .SDKListResources }} + {{- $regionOverrideEnabled := and (not $.IsGlobal) $value.RegionOverrideEnabled }} + { + Factory: {{ $value.FactoryName }}, + TypeName: "{{ $key }}", + Name: "{{ $value.Name }}", + {{- if and $regionOverrideEnabled $value.ValidateRegionOverrideInPartition }} + Region: unique.Make(inttypes.ResourceRegionDefault()), + {{- else if not $regionOverrideEnabled }} + Region: unique.Make(inttypes.ResourceRegionDisabled()), + {{- else }} + Region: unique.Make(inttypes.ServicePackageResourceRegion { + IsOverrideEnabled: {{ $regionOverrideEnabled }}, + IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, + }), + {{- end }} + {{- if .TransparentTagging }} + Tags: unique.Make(inttypes.ServicePackageResourceTags { + {{- if ne .TagsIdentifierAttribute "" }} + IdentifierAttribute: {{ .TagsIdentifierAttribute }}, + {{- end }} + {{- if ne .TagsResourceType "" }} + ResourceType: "{{ .TagsResourceType }}", + {{- end }} + }), + {{- end }} + {{- if gt (len $value.IdentityAttributes) 1 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if gt (len $value.IdentityAttributes) 0 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if $value.ARNIdentity }} + {{- if $.IsGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, {{- else }} - Identity: inttypes.RegionalSingleParameterIdentity( - {{- range $value.IdentityAttributes -}} - {{ .Name }}, - {{- end }} - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + Identity: inttypes.GlobalARNIdentity( {{- end }} - {{- else if $value.ARNIdentity }} - {{- if $.IsGlobal }} + {{- else }} + {{- if $value.IsARNFormatGlobal }} {{- if $value.HasARNAttribute }} - Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, - inttypes.WithIdentityDuplicateAttrs(names.AttrID), - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + Identity: inttypes.RegionalResourceWithGlobalARNFormatNamed({{ $value.ARNAttribute }}, {{- else }} - Identity: inttypes.GlobalARNIdentity( - inttypes.WithIdentityDuplicateAttrs(names.AttrID), - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), + Identity: inttypes.RegionalResourceWithGlobalARNFormat( {{- end }} {{- else }} {{- if $value.HasARNAttribute }} Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, - inttypes.WithIdentityDuplicateAttrs(names.AttrID), - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), {{- else }} Identity: inttypes.RegionalARNIdentity( - inttypes.WithIdentityDuplicateAttrs(names.AttrID), - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), {{- end }} {{- end }} - {{- else if $value.SingletonIdentity }} - {{- if or $.IsGlobal $value.IsGlobal }} - Identity: inttypes.GlobalSingletonIdentity( - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), - {{- else }} - Identity: inttypes.RegionalSingletonIdentity( - {{- if $value.HasV6_0SDKv2Fix }} - inttypes.WithV6_0SDKv2Fix(), - {{ end -}} - {{- if .HasIdentityFix }} - inttypes.WithIdentityFix(), - {{- end }} - ), - {{- end }} {{- end }} - {{- if $value.WrappedImport }} - Import: inttypes.SDKv2Import{ - WrappedImport: true, - {{- if ne $value.ImportIDHandler "" }} - ImportID: {{ $value.ImportIDHandler }}{}, - {{- end }} - }, + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else if $value.SingletonIdentity }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{ else }} + Identity: inttypes.RegionalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), {{- end }} {{- end }} }, {{- end }} - } + }) } +{{- end }} + func (p *servicePackage) ServicePackageName() string { {{- if eq .ProviderPackage "meta" }} @@ -456,7 +665,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *{{ .GoV2Package }}.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, {{- if gt (len .EndpointRegionOverrides) 0 }} diff --git a/internal/generate/servicesemgrep/cae.tmpl b/internal/generate/servicesemgrep/cae.tmpl index 913af928a588..39ce8cc86edc 100644 --- a/internal/generate/servicesemgrep/cae.tmpl +++ b/internal/generate/servicesemgrep/cae.tmpl @@ -6,11 +6,11 @@ rules: message: Do not use "AWS" in func name inside AWS Provider paths: include: - - internal + - "/internal" exclude: - - internal/service/securitylake/aws_log_source.go - - internal/service/securitylake/aws_log_source_test.go - - internal/service/*/service_endpoints_gen_test.go + - "/internal/service/securitylake/aws_log_source.go" + - "/internal/service/securitylake/aws_log_source_test.go" + - "/internal/service/*/service_endpoints_gen_test.go" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -25,10 +25,10 @@ rules: message: Do not use "AWS" in const name inside AWS Provider paths: include: - - internal + - "/internal" exclude: - - internal/service/securitylake/aws_log_source.go - - internal/service/*/service_endpoints_gen_test.go + - "/internal/service/securitylake/aws_log_source.go" + - "/internal/service/*/service_endpoints_gen_test.go" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -43,11 +43,11 @@ rules: message: Do not use "AWS" in var name inside AWS Provider paths: include: - - internal + - "/internal" exclude: - - internal/service/securitylake/aws_log_source.go - - internal/service/securitylake/exports_test.go - - internal/service/*/service_endpoints_gen_test.go + - "/internal/service/securitylake/aws_log_source.go" + - "/internal/service/securitylake/exports_test.go" + - "/internal/service/*/service_endpoints_gen_test.go" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -63,7 +63,7 @@ rules: message: Use correct caps in func name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: func $NAME( ... ) { ... } - metavariable-pattern: @@ -78,7 +78,7 @@ rules: message: Use correct caps in const name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -92,7 +92,7 @@ rules: message: Use correct caps in var name (i.e., HTTPS or https, not Https) (see list at https://github.com/hashicorp/terraform-provider-aws/blob/main/names/caps.md) paths: include: - - internal + - "/internal" patterns: - pattern: var $NAME = ... - metavariable-pattern: @@ -107,7 +107,7 @@ rules: message: Do not use "EC2" in func name inside ec2 package paths: include: - - internal/service/ec2 + - "/internal/service/ec2" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -123,7 +123,7 @@ rules: message: Do not use "EC2" in const name inside ec2 package paths: include: - - internal/service/ec2 + - "/internal/service/ec2" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -137,7 +137,7 @@ rules: message: Do not use "EC2" in var name inside ec2 package paths: include: - - internal/service/ec2 + - "/internal/service/ec2" patterns: - pattern: var $NAME = ... - metavariable-pattern: diff --git a/internal/generate/servicesemgrep/configs.tmpl b/internal/generate/servicesemgrep/configs.tmpl index 821176b03bc9..6023bd7c57fa 100644 --- a/internal/generate/servicesemgrep/configs.tmpl +++ b/internal/generate/servicesemgrep/configs.tmpl @@ -6,7 +6,7 @@ rules: message: "Config funcs should follow form testAccConfig_" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY:$VALUE, ...}" @@ -28,7 +28,7 @@ rules: message: "Config funcs should follow form testAccConfig_" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY: acctest.ConfigCompose(..., $VALUE, ...), ...}" @@ -49,7 +49,7 @@ rules: message: "Config funcs should not begin with 'testAccCheck'" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY:$VALUE, ...}" @@ -69,7 +69,7 @@ rules: message: "Config funcs should not begin with 'testAccCheck'" paths: include: - - internal/service/**/*_test.go + - "/internal/service/**/*_test.go" patterns: - pattern-inside: "[]resource.TestStep{ ... }" - pattern: "{..., $KEY: acctest.ConfigCompose(..., $VALUE, ...), ...}" diff --git a/internal/generate/servicesemgrep/service.tmpl b/internal/generate/servicesemgrep/service.tmpl index fc6b2e6aebd2..6648736c1511 100644 --- a/internal/generate/servicesemgrep/service.tmpl +++ b/internal/generate/servicesemgrep/service.tmpl @@ -6,9 +6,9 @@ message: Do not use "{{ .ServiceAlias }}" in func name inside {{ .ProviderPackage }} package paths: include: - - internal/service/{{ .ProviderPackage }} + - "/internal/service/{{ .ProviderPackage }}" exclude: - - internal/service/{{ .ProviderPackage }}/list_pages_gen.go + - "/internal/service/{{ .ProviderPackage }}/list_pages_gen.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -49,7 +49,7 @@ message: Include "{{ .ServiceAlias }}" in test name paths: include: - - internal/service/{{ .ProviderPackage }}/{{ .FilePrefix }}*_test.go + - "/internal/service/{{ .ProviderPackage }}/{{ .FilePrefix }}*_test.go" patterns: - pattern: func $NAME( ... ) - metavariable-pattern: @@ -66,7 +66,7 @@ message: Do not use "{{ .ServiceAlias }}" in const name inside {{ .ProviderPackage }} package paths: include: - - internal/service/{{ .ProviderPackage }} + - "/internal/service/{{ .ProviderPackage }}" patterns: - pattern: const $NAME = ... - metavariable-pattern: @@ -92,7 +92,7 @@ message: Do not use "{{ .ServiceAlias }}" in var name inside {{ .ProviderPackage }} package paths: include: - - internal/service/{{ .ProviderPackage }} + - "/internal/service/{{ .ProviderPackage }}" patterns: - pattern: var $NAME = ... - metavariable-pattern: diff --git a/internal/generate/tags/main.go b/internal/generate/tags/main.go index b4d15928a11d..5d5683354c94 100644 --- a/internal/generate/tags/main.go +++ b/internal/generate/tags/main.go @@ -87,6 +87,7 @@ var ( tagOpBatchSize = flag.Int("TagOpBatchSize", 0, "tagOpBatchSize") tagResTypeElem = flag.String("TagResTypeElem", "", "tagResTypeElem") tagResTypeElemType = flag.String("TagResTypeElemType", "", "tagResTypeElemType") + tagResTypeIsAccountID = flag.Bool("TagResTypeIsAccountID", false, "tagResTypeIsAccountID") tagType = flag.String("TagType", "Tag", "tagType") tagType2 = flag.String("TagType2", "", "tagType") tagTypeAddBoolElem = flag.String("TagTypeAddBoolElem", "", "TagTypeAddBoolElem") @@ -182,6 +183,7 @@ type TemplateData struct { TagOpBatchSize int TagResTypeElem string TagResTypeElemType string + TagResTypeIsAccountID bool TagType string TagType2 string TagTypeAddBoolElem string @@ -242,6 +244,10 @@ func main() { createTagsFunc = "" } + if *tagResTypeIsAccountID && *tagResTypeElem == "" { + g.Errorf("TagResTypeIsAccountID requires TagResTypeElem") + } + clientType := fmt.Sprintf("*%s.Client", awsPkg) providerNameUpper := service.ProviderNameUpper() templateData := TemplateData{ @@ -281,6 +287,7 @@ func main() { TagOpBatchSize: *tagOpBatchSize, TagResTypeElem: *tagResTypeElem, TagResTypeElemType: *tagResTypeElemType, + TagResTypeIsAccountID: *tagResTypeIsAccountID, TagType: *tagType, TagType2: *tagType2, TagTypeAddBoolElem: *tagTypeAddBoolElem, diff --git a/internal/generate/tags/templates/get_tag_body.gtpl b/internal/generate/tags/templates/get_tag_body.gtpl index 7bfdd07dc773..2911ef535d49 100644 --- a/internal/generate/tags/templates/get_tag_body.gtpl +++ b/internal/generate/tags/templates/get_tag_body.gtpl @@ -23,8 +23,8 @@ func {{ .GetTagFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ } {{ if .RetryTagOps }} - output, err := tfresource.RetryGWhenIsAErrorMessageContains[*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, - func() (*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, error) { + output, err := tfresource.RetryWhenIsAErrorMessageContains[*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, + func(ctx context.Context) (*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, error) { return conn.{{ .ListTagsOp }}(ctx, &input, optFns...) }, "{{ .RetryErrorMessage }}", @@ -34,7 +34,7 @@ func {{ .GetTagFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ {{- end }} if err != nil { - return nil, err + return nil, smarterr.NewError(err) } listTags := {{ .KeyValueTagsFunc }}(ctx, output.{{ .ListTagsOutTagsElem }}{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}{{ end }}) @@ -42,12 +42,12 @@ func {{ .GetTagFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ listTags, err := {{ .ListTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, optFns...) if err != nil { - return nil, err + return nil, smarterr.NewError(err) } {{- end }} if !listTags.KeyExists(key) { - return nil, tfresource.NewEmptyResultError(nil) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(nil)) } {{ if or ( .TagTypeIDElem ) ( .TagTypeAddBoolElem) }} diff --git a/internal/generate/tags/templates/header_body.gtpl b/internal/generate/tags/templates/header_body.gtpl index 6f56083351ce..88505d02239c 100644 --- a/internal/generate/tags/templates/header_body.gtpl +++ b/internal/generate/tags/templates/header_body.gtpl @@ -7,6 +7,7 @@ import ( "maps" "time" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/aws/aws-sdk-go-v2/service/{{ .AWSService }}" diff --git a/internal/generate/tags/templates/list_tags_body.gtpl b/internal/generate/tags/templates/list_tags_body.gtpl index 544eb8ccb97d..c8fa247f69fc 100644 --- a/internal/generate/tags/templates/list_tags_body.gtpl +++ b/internal/generate/tags/templates/list_tags_body.gtpl @@ -27,8 +27,8 @@ func {{ .ListTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier } {{- if .ListTagsOpPaginated }} {{- if .RetryTagOps }} - output, err := tfresource.RetryGWhenIsAErrorMessageContains[*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, - func() (*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, error) { + output, err := tfresource.RetryWhenIsAErrorMessageContains[*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, + func(ctx context.Context) (*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, error) { var output []awstypes.{{ or .TagType2 .TagType }} pages := {{ .AWSService }}.New{{ .ListTagsOp }}Paginator(conn, &input) @@ -37,22 +37,22 @@ func {{ .ListTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier {{ if and ( .ParentNotFoundErrCode ) ( .ParentNotFoundErrMsg ) }} if tfawserr.ErrMessageContains(err, "{{ .ParentNotFoundErrCode }}", "{{ .ParentNotFoundErrMsg }}") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } {{- else if ( .ParentNotFoundErrCode ) }} if tfawserr.ErrCodeEquals(err, "{{ .ParentNotFoundErrCode }}") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } {{- end }} if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.{{ .ListTagsOutTagsElem }}...) @@ -108,22 +108,22 @@ func {{ .ListTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier {{ if and ( .ParentNotFoundErrCode ) ( .ParentNotFoundErrMsg ) }} if tfawserr.ErrMessageContains(err, "{{ .ParentNotFoundErrCode }}", "{{ .ParentNotFoundErrMsg }}") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } {{- else if ( .ParentNotFoundErrCode ) }} if tfawserr.ErrCodeEquals(err, "{{ .ParentNotFoundErrCode }}") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } {{- end }} if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } {{ if .ServiceTagsMap }} @@ -139,8 +139,8 @@ func {{ .ListTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier {{- else }} {{ if .RetryTagOps }} - output, err := tfresource.RetryGWhenIsAErrorMessageContains[*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, - func() (*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, error) { + output, err := tfresource.RetryWhenIsAErrorMessageContains[*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, + func(ctx context.Context) (*{{ .AWSService }}.{{ .RetryTagsListTagsType }}, error) { return conn.{{ .ListTagsOp }}(ctx, &input, optFns...) }, "{{ .RetryErrorMessage }}", @@ -151,22 +151,22 @@ func {{ .ListTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier {{ if and ( .ParentNotFoundErrCode ) ( .ParentNotFoundErrMsg ) }} if tfawserr.ErrMessageContains(err, "{{ .ParentNotFoundErrCode }}", "{{ .ParentNotFoundErrMsg }}") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } {{- else if ( .ParentNotFoundErrCode ) }} if tfawserr.ErrCodeEquals(err, "{{ .ParentNotFoundErrCode }}") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } {{- end }} if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return {{ .KeyValueTagsFunc }}(ctx, output.{{ .ListTagsOutTagsElem }}{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}{{ end }}), nil @@ -176,11 +176,22 @@ func {{ .ListTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier {{- if .IsDefaultListTags }} // {{ .ListTagsFunc | Title }} lists {{ .ServicePackage }} service tags and set them in Context. // It is called from outside this package. -func (p *servicePackage) {{ .ListTagsFunc | Title }}(ctx context.Context, meta any, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string) error { - tags, err := {{ .ListTagsFunc }}(ctx, meta.(*conns.AWSClient).{{ .ProviderNameUpper }}Client(ctx), identifier{{ if .TagResTypeElem }}, resourceType{{ end }}) +{{- if .TagResTypeElem }} +{{- if .TagResTypeIsAccountID }} +func (p *servicePackage) {{ .ListTagsFunc | Title }}(ctx context.Context, meta any, identifier string) error { + c := meta.(*conns.AWSClient) + tags, err := {{ .ListTagsFunc }}(ctx, c.{{ .ProviderNameUpper }}Client(ctx), identifier, c.AccountID(ctx)) +{{- else }} +func (p *servicePackage) {{ .ListTagsFunc | Title }}(ctx context.Context, meta any, identifier, resourceType string) error { + tags, err := {{ .ListTagsFunc }}(ctx, meta.(*conns.AWSClient).{{ .ProviderNameUpper }}Client(ctx), identifier, resourceType) +{{- end }} +{{- else }} +func (p *servicePackage) {{ .ListTagsFunc | Title }}(ctx context.Context, meta any, identifier string) error { + tags, err := {{ .ListTagsFunc }}(ctx, meta.(*conns.AWSClient).{{ .ProviderNameUpper }}Client(ctx), identifier) +{{- end }} if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { diff --git a/internal/generate/tags/templates/service_tags_map_body.gtpl b/internal/generate/tags/templates/service_tags_map_body.gtpl index 6eda7ea8870c..41a061936731 100644 --- a/internal/generate/tags/templates/service_tags_map_body.gtpl +++ b/internal/generate/tags/templates/service_tags_map_body.gtpl @@ -36,6 +36,6 @@ func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi return nil } - return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, tags, optFns...) + return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, tags, optFns...) } {{- end }} diff --git a/internal/generate/tags/templates/update_tags_body.gtpl b/internal/generate/tags/templates/update_tags_body.gtpl index 1e7a9ff7b2dd..09800f8ce06f 100644 --- a/internal/generate/tags/templates/update_tags_body.gtpl +++ b/internal/generate/tags/templates/update_tags_body.gtpl @@ -62,8 +62,8 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{- end }} } {{ if .RetryTagOps }} - _, err := tfresource.RetryWhenIsAErrorMessageContains[*{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, + func(ctx context.Context) (any, error) { return conn.{{ .TagOp }}(ctx, &input, optFns...) }, "{{ .RetryErrorMessage }}", @@ -73,7 +73,7 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{- end }} if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } {{- else }} @@ -112,8 +112,8 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{- end }} } {{ if .RetryTagOps }} - _, err := tfresource.RetryWhenIsAErrorMessageContains[*{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, + func(ctx context.Context) (any, error) { return conn.{{ .UntagOp }}(ctx, &input, optFns...) }, "{{ .RetryErrorMessage }}", @@ -123,7 +123,7 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{- end }} if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } {{- if .TagOpBatchSize }} } @@ -161,8 +161,8 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi } {{ if .RetryTagOps }} - _, err := tfresource.RetryWhenIsAErrorMessageContains[*{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *{{ .RetryErrorCode }}](ctx, {{ .RetryTimeout }}, + func(ctx context.Context) (any, error) { return conn.{{ .TagOp }}(ctx, &input, optFns...) }, "{{ .RetryErrorMessage }}", @@ -172,7 +172,7 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{- end }} if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } {{- if .TagOpBatchSize }} } @@ -184,7 +184,7 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{ if .WaitForPropagation }} if len(removedTags) > 0 || len(updatedTags) > 0 { if err := {{ .WaitTagsPropagatedFunc }}(ctx, conn, identifier, newTags, optFns...); err != nil { - return fmt.Errorf("waiting for resource (%s) tag propagation: %w", identifier, err) + return smarterr.NewError(err) } } {{- end }} @@ -195,7 +195,20 @@ func {{ .UpdateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifi {{- if .IsDefaultUpdateTags }} // {{ .UpdateTagsFunc | Title }} updates {{ .ServicePackage }} service tags. // It is called from outside this package. -func (p *servicePackage) {{ .UpdateTagsFunc | Title }}(ctx context.Context, meta any, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, oldTags, newTags any) error { - return {{ .UpdateTagsFunc }}(ctx, meta.(*conns.AWSClient).{{ .ProviderNameUpper }}Client(ctx), identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, oldTags, newTags) +{{- if .TagResTypeElem }} +{{- if .TagResTypeIsAccountID }} +func (p *servicePackage) {{ .UpdateTagsFunc | Title }}(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + c := meta.(*conns.AWSClient) + return {{ .UpdateTagsFunc }}(ctx, c.{{ .ProviderNameUpper }}Client(ctx), identifier, c.AccountID(ctx), oldTags, newTags) } +{{- else }} +func (p *servicePackage) {{ .UpdateTagsFunc | Title }}(ctx context.Context, meta any, identifier, resourceType string, oldTags, newTags any) error { + return {{ .UpdateTagsFunc }}(ctx, meta.(*conns.AWSClient).{{ .ProviderNameUpper }}Client(ctx), identifier, resourceType, oldTags, newTags) +} +{{- end }} +{{- else }} +func (p *servicePackage) {{ .UpdateTagsFunc | Title }}(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return {{ .UpdateTagsFunc }}(ctx, meta.(*conns.AWSClient).{{ .ProviderNameUpper }}Client(ctx), identifier, oldTags, newTags) +} +{{- end }} {{- end }} diff --git a/internal/generate/tags/templates/wait_tags_propagated_body.gtpl b/internal/generate/tags/templates/wait_tags_propagated_body.gtpl index f4db002917b8..82c8a68b5343 100644 --- a/internal/generate/tags/templates/wait_tags_propagated_body.gtpl +++ b/internal/generate/tags/templates/wait_tags_propagated_body.gtpl @@ -14,7 +14,7 @@ func {{ .WaitTagsPropagatedFunc }}(ctx context.Context, conn {{ .ClientType }}, } if err != nil { - return false, err + return false, smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { diff --git a/internal/generate/tagstests/data_source_test.go.gtpl b/internal/generate/tagstests/data_source_test.go.gtpl index ef322e71e2db..7474cab3e787 100644 --- a/internal/generate/tagstests/data_source_test.go.gtpl +++ b/internal/generate/tagstests/data_source_test.go.gtpl @@ -1,33 +1,7 @@ // Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. -{{ define "Init" }} - ctx := acctest.Context(t) - dataSourceName := "data.{{ .TypeName}}.test"{{ if .Generator }} - rName := {{ .Generator }} -{{- end }} -{{ range .InitCodeBlocks -}} -{{ .Code }} -{{- end }} -{{ end }} - -{{ define "Test" -}} -resource.{{ if and .Serialize (not .SerializeParallelTests) }}Test{{ else }}ParallelTest{{ end }} -{{- end }} - -{{ define "TestCaseSetup" -}} -{{ template "TestCaseSetupNoProviders" . -}} -{{ if not .AlternateRegionProvider }} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, -{{- end -}} -{{- end }} - {{ define "TestCaseSetupNoProviders" -}} - PreCheck: func() { acctest.PreCheck(ctx, t) - {{- range .PreChecks }} - {{ .Code }} - {{ end -}} - }, - ErrorCheck: acctest.ErrorCheck(t, names.{{ .PackageProviderNameUpper }}ServiceID), + {{ template "CommonTestCaseChecks" . -}} {{- end }} {{ define "TagsKnownValueForNull" -}} @@ -50,40 +24,6 @@ plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), know {{- end }} {{- end }} -{{ define "ImportBody" }} - ResourceName: resourceName, - ImportState: true, -{{ if gt (len .ImportStateID) 0 -}} - ImportStateId: {{ .ImportStateID }}, -{{ end -}} -{{ if gt (len .ImportStateIDFunc) 0 -}} - ImportStateIdFunc: {{ .ImportStateIDFunc }}(resourceName), -{{ end -}} - ImportStateVerify: true, -{{ if gt (len .ImportIgnore) 0 -}} - ImportStateVerifyIgnore: []string{ - {{ range $i, $v := .ImportIgnore }}{{ $v }},{{ end }} - }, -{{- end }} -{{ end }} - -{{ define "testname" -}} -{{ if .Serialize }}testAcc{{ else }}TestAcc{{ end }}{{ .ResourceProviderNameUpper }}{{ .Name }}DataSource -{{- end }} - -{{ define "ExistsCheck" }} - testAccCheck{{ .Name }}Exists(ctx, {{ if .ExistsTakesT }}t,{{ end }} resourceName{{ if .ExistsTypeName}}, &v{{ end }}), -{{ end }} - -{{ define "AdditionalTfVars" -}} - {{ range $name, $value := .AdditionalTfVars -}} - {{ $name }}: config.StringVariable({{ $value }}), - {{ end -}} - {{ if .AlternateRegionProvider -}} - "alt_region": config.StringVariable(acctest.AlternateRegion()), - {{ end }} -{{ end }} - package {{ .ProviderPackage }}_test import ( @@ -133,7 +73,7 @@ func {{ template "testname" . }}_tagsSerial(t *testing.T) { func {{ template "testname" . }}_tags(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { @@ -161,7 +101,7 @@ func {{ template "testname" . }}_tags(t *testing.T) { func {{ template "testname" . }}_tags_NullMap(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { @@ -185,7 +125,7 @@ func {{ template "testname" . }}_tags_NullMap(t *testing.T) { func {{ template "testname" . }}_tags_EmptyMap(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { @@ -209,7 +149,7 @@ func {{ template "testname" . }}_tags_EmptyMap(t *testing.T) { func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { @@ -243,7 +183,7 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { func {{ template "testname" . }}_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { @@ -283,7 +223,7 @@ func {{ template "testname" . }}_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func {{ template "testname" . }}_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { diff --git a/internal/generate/tagstests/main.go b/internal/generate/tagstests/main.go index b8e4c4ce6d0a..e41ffefd55a5 100644 --- a/internal/generate/tagstests/main.go +++ b/internal/generate/tagstests/main.go @@ -19,17 +19,13 @@ import ( "path/filepath" "regexp" "slices" - "strconv" "strings" "text/template" - "time" "github.com/dlclark/regexp2" - acctestgen "github.com/hashicorp/terraform-provider-aws/internal/acctest/generate" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/internal/generate/tests" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" - tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names/data" namesgen "github.com/hashicorp/terraform-provider-aws/names/generate" ) @@ -108,24 +104,31 @@ func main() { } for _, resource := range v.taggedResources { + resource.service = &svc + sourceName := resource.FileName ext := filepath.Ext(sourceName) sourceName = strings.TrimSuffix(sourceName, ext) sourceName = strings.TrimSuffix(sourceName, "_") - if name, err := svc.ProviderNameUpper(resource.TypeName); err != nil { - g.Fatalf("determining provider service name: %s", err) - } else { - resource.ResourceProviderNameUpper = name - } - resource.PackageProviderNameUpper = svc.PackageProviderNameUpper() - resource.ProviderPackage = servicePackage - if !resource.IsDataSource { filename := fmt.Sprintf("%s_tags_gen_test.go", sourceName) d := g.NewGoFileDestination(filename) - templates, err := template.New("taggingtests").Parse(resourceTestGoTmpl) + + templateFuncMap := template.FuncMap{ + "inc": func(i int) int { + return i + 1 + }, + } + templates := template.New("taggingtests").Funcs(templateFuncMap) + + templates, err = tests.AddCommonResourceTestTemplates(templates) + if err != nil { + g.Fatalf(err.Error()) + } + + templates, err = templates.Parse(resourceTestGoTmpl) if err != nil { g.Fatalf("parsing base Go test template: %w", err) } @@ -141,7 +144,15 @@ func main() { filename := fmt.Sprintf("%s_tags_gen_test.go", sourceName) d := g.NewGoFileDestination(filename) - templates, err := template.New("taggingtests").Parse(dataSourceTestGoTmpl) + + templates := template.New("taggingtests") + + templates, err = tests.AddCommonDataSourceTestTemplates(templates) + if err != nil { + g.Fatalf(err.Error()) + } + + templates, err = templates.Parse(dataSourceTestGoTmpl) if err != nil { g.Fatalf("parsing base Go test template: %w", err) } @@ -173,7 +184,7 @@ func main() { } if resource.GenerateConfig { - additionalTfVars := tfmaps.Keys(resource.additionalTfVars) + additionalTfVars := tfmaps.Keys(resource.AdditionalTfVars_) slices.Sort(additionalTfVars) testDirPath := path.Join("testdata", resource.Name) @@ -182,7 +193,7 @@ func main() { g.Fatalf("parsing base Terraform config template: %s", err) } - tfTemplates, err = tests.AddCommonTemplates(tfTemplates) + tfTemplates, err = tests.AddCommonTfTemplates(tfTemplates) if err != nil { g.Fatalf(err.Error()) } @@ -238,7 +249,7 @@ func main() { g.Fatalf("opening data source config template %q: %w", dataSourceConfigTmplFile, err) } - additionalTfVars := tfmaps.Keys(resource.additionalTfVars) + additionalTfVars := tfmaps.Keys(resource.AdditionalTfVars_) slices.Sort(additionalTfVars) testDirPath := path.Join("testdata", resource.Name) @@ -247,7 +258,7 @@ func main() { g.Fatalf("parsing base Terraform config template: %s", err) } - tfTemplates, err = tests.AddCommonTemplates(tfTemplates) + tfTemplates, err = tests.AddCommonTfTemplates(tfTemplates) if err != nil { g.Fatalf(err.Error()) } @@ -317,6 +328,10 @@ type serviceRecords struct { additional []data.ServiceRecord } +func (sr serviceRecords) ProviderPackage() string { + return sr.primary.ProviderPackage() +} + func (sr serviceRecords) ProviderNameUpper(typeName string) (string, error) { if len(sr.additional) == 0 { return sr.primary.ProviderNameUpper(), nil @@ -374,63 +389,32 @@ func (sr serviceRecords) PackageProviderNameUpper() string { return sr.primary.ProviderNameUpper() } -type implementation string - -const ( - implementationFramework implementation = "framework" - implementationSDK implementation = "sdk" -) - type ResourceDatum struct { - ProviderPackage string - ResourceProviderNameUpper string - PackageProviderNameUpper string - Name string - TypeName string - DestroyTakesT bool - ExistsTypeName string - ExistsTakesT bool + service *serviceRecords FileName string - Generator string - NoImport bool - ImportStateID string - importStateIDAttribute string - ImportStateIDFunc string - ImportIgnore []string - Implementation implementation - Serialize bool - SerializeDelay bool - SerializeParallelTests bool - PreChecks []codeBlock SkipEmptyTags bool // TODO: Remove when we have a strategy for resources that have a minimum tag value length of 1 SkipNullTags bool NoRemoveTags bool - GoImports []goImport GenerateConfig bool - InitCodeBlocks []codeBlock - additionalTfVars map[string]string - AlternateRegionProvider bool TagsUpdateForceNew bool TagsUpdateGetTagsIn bool // TODO: Works around a bug when getTagsIn() is used to pass tags directly to Update call - CheckDestroyNoop bool IsDataSource bool - DataSourceResourceImplementation implementation + DataSourceResourceImplementation tests.Implementation overrideIdentifierAttribute string OverrideResourceType string + tests.CommonArgs } -func (d ResourceDatum) AdditionalTfVars() map[string]string { - return tfmaps.ApplyToAllKeys(d.additionalTfVars, func(k string) string { - return acctestgen.ConstOrQuote(k) - }) +func (d ResourceDatum) ProviderPackage() string { + return d.service.ProviderPackage() } -func (d ResourceDatum) HasImportStateIDAttribute() bool { - return d.importStateIDAttribute != "" +func (d ResourceDatum) ResourceProviderNameUpper() (string, error) { + return d.service.ProviderNameUpper(d.TypeName) } -func (d ResourceDatum) ImportStateIDAttribute() string { - return namesgen.ConstOrQuote(d.importStateIDAttribute) +func (d ResourceDatum) PackageProviderNameUpper() string { + return d.service.PackageProviderNameUpper() } func (d ResourceDatum) OverrideIdentifier() bool { @@ -441,15 +425,6 @@ func (d ResourceDatum) OverrideIdentifierAttribute() string { return namesgen.ConstOrQuote(d.overrideIdentifierAttribute) } -type goImport struct { - Path string - Alias string -} - -type codeBlock struct { - Code string -} - type commonConfig struct { AdditionalTfVars []string WithRName bool @@ -536,8 +511,8 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { // Look first for tagging annotations. d := ResourceDatum{ - FileName: v.fileName, - additionalTfVars: make(map[string]string), + FileName: v.fileName, + CommonArgs: tests.InitCommonArgs(), } tagged := false skip := false @@ -556,7 +531,7 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { fallthrough case "FrameworkResource": - d.Implementation = implementationFramework + d.Implementation = tests.ImplementationFramework args := common.ParseArgs(m[3]) if len(args.Positional) == 0 { v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) @@ -574,7 +549,7 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { fallthrough case "SDKResource": - d.Implementation = implementationSDK + d.Implementation = tests.ImplementationSDK args := common.ParseArgs(m[3]) if len(args.Positional) == 0 { v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) @@ -606,146 +581,17 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { case "Testing": args := common.ParseArgs(m[3]) - if attr, ok := args.Keyword["altRegionProvider"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid altRegionProvider value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.AlternateRegionProvider = b - } - } - if attr, ok := args.Keyword["destroyTakesT"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid destroyTakesT value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.DestroyTakesT = b - } - } - if attr, ok := args.Keyword["checkDestroyNoop"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid checkDestroyNoop value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.CheckDestroyNoop = b - d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", - }, - ) - } - } - if attr, ok := args.Keyword["existsType"]; ok { - if typeName, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) - continue - } else { - d.ExistsTypeName = typeName - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) - } - } - } - if attr, ok := args.Keyword["existsTakesT"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid existsTakesT value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.ExistsTakesT = b - } - } - if attr, ok := args.Keyword["generator"]; ok { - if attr == "false" { - generatorSeen = true - } else if funcName, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) - continue - } else { - d.Generator = funcName - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) - } - generatorSeen = true - } + if err := tests.ParseTestingAnnotations(args, &d.CommonArgs); err != nil { + v.errs = append(v.errs, fmt.Errorf("%s: %w", fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) + continue } - if attr, ok := args.Keyword["importIgnore"]; ok { - d.ImportIgnore = strings.Split(attr, ";") - for i, val := range d.ImportIgnore { - d.ImportIgnore[i] = namesgen.ConstOrQuote(val) - } - } - if attr, ok := args.Keyword["importStateId"]; ok { - d.ImportStateID = attr - } - if attr, ok := args.Keyword["importStateIdAttribute"]; ok { - d.importStateIDAttribute = attr - } - if attr, ok := args.Keyword["importStateIdFunc"]; ok { - d.ImportStateIDFunc = attr - } - if attr, ok := args.Keyword["name"]; ok { - d.Name = strings.ReplaceAll(attr, " ", "") - } - if attr, ok := args.Keyword["noImport"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid noImport value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.NoImport = b - } - } - if attr, ok := args.Keyword["preCheck"]; ok { - if code, importSpec, err := parseIdentifierSpec(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("%s: %w", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName), err)) - continue - } else { - d.PreChecks = append(d.PreChecks, codeBlock{ - Code: fmt.Sprintf("%s(ctx, t)", code), - }) - if importSpec != nil { - d.GoImports = append(d.GoImports, *importSpec) - } - } - } - if attr, ok := args.Keyword["preCheckRegion"]; ok { - regions := strings.Split(attr, ";") - d.PreChecks = append(d.PreChecks, codeBlock{ - Code: fmt.Sprintf("acctest.PreCheckRegion(t, %s)", strings.Join(tfslices.ApplyToAll(regions, func(s string) string { - return endpointsConstOrQuote(s) - }), ", ")), - }) - d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/aws-sdk-go-base/v2/endpoints", - }, - ) - } - if attr, ok := args.Keyword["serialize"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid serialize value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.Serialize = b - } - } - if attr, ok := args.Keyword["serializeParallelTests"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid serializeParallelTests value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.SerializeParallelTests = b - } - } - if attr, ok := args.Keyword["serializeDelay"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid serializeDelay value: %q at %s. Should be duration value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) - continue - } else { - d.SerializeDelay = b - } + // This needs better handling + if _, ok := args.Keyword["generator"]; ok { + generatorSeen = true } + if attr, ok := args.Keyword["tagsIdentifierAttribute"]; ok { d.overrideIdentifierAttribute = attr } @@ -769,48 +615,48 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { } // TODO: should probably be a parameter on @Tags if attr, ok := args.Keyword["tagsUpdateForceNew"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid tagsUpdateForceNew value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("tagsUpdateForceNew", attr); err != nil { + v.errs = append(v.errs, err) continue } else { d.TagsUpdateForceNew = b } } if attr, ok := args.Keyword["tagsUpdateGetTagsIn"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid tagsUpdateGetTagsIn value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("tagsUpdateGetTagsIn", attr); err != nil { + v.errs = append(v.errs, err) continue } else { d.TagsUpdateGetTagsIn = b } } if attr, ok := args.Keyword["skipEmptyTags"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid skipEmptyTags value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("skipEmptyTags", attr); err != nil { + v.errs = append(v.errs, err) continue } else { d.SkipEmptyTags = b } } if attr, ok := args.Keyword["skipNullTags"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid skipNullTags value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("skipNullTags", attr); err != nil { + v.errs = append(v.errs, err) continue } else { d.SkipNullTags = b } } if attr, ok := args.Keyword["noRemoveTags"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid noRemoveTags value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("noRemoveTags", attr); err != nil { + v.errs = append(v.errs, err) continue } else { d.NoRemoveTags = b } } if attr, ok := args.Keyword["tlsKey"]; ok { - if b, err := strconv.ParseBool(attr); err != nil { - v.errs = append(v.errs, fmt.Errorf("invalid tlsKey value: %q at %s. Should be boolean value.", attr, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + if b, err := tests.ParseBoolAttr("tlsKey", attr); err != nil { + v.errs = append(v.errs, err) continue } else { tlsKey = b @@ -827,17 +673,23 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { if len(tlsKeyCN) == 0 { tlsKeyCN = "acctest.RandomDomain().String()" d.GoImports = append(d.GoImports, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", }, ) } - d.InitCodeBlocks = append(d.InitCodeBlocks, codeBlock{ + d.InitCodeBlocks = append(d.InitCodeBlocks, tests.CodeBlock{ Code: fmt.Sprintf(`privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, %s)`, tlsKeyCN), }) - d.additionalTfVars["certificate_pem"] = "certificatePEM" - d.additionalTfVars["private_key_pem"] = "privateKeyPEM" + d.AdditionalTfVars_["certificate_pem"] = tests.TFVar{ + GoVarName: "certificatePEM", + Type: tests.TFVarTypeString, + } + d.AdditionalTfVars_["private_key_pem"] = tests.TFVar{ + GoVarName: "privateKeyPEM", + Type: tests.TFVarTypeString, + } } if tagged { @@ -851,13 +703,9 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { return } if !generatorSeen { - d.Generator = "sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)" + d.Generator = "acctest.RandomWithPrefix(t, acctest.ResourcePrefix)" d.GoImports = append(d.GoImports, - goImport{ - Path: "github.com/hashicorp/terraform-plugin-testing/helper/acctest", - Alias: "sdkacctest", - }, - goImport{ + tests.GoImport{ Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", }, ) @@ -907,49 +755,6 @@ func generateTestConfig(g *common.Generator, dirPath, test string, withDefaults } } -func parseIdentifierSpec(s string) (string, *goImport, error) { - parts := strings.Split(s, ";") - switch len(parts) { - case 1: - return parts[0], nil, nil - - case 2: - return parts[1], &goImport{ - Path: parts[0], - }, nil - - case 3: - return parts[2], &goImport{ - Path: parts[0], - Alias: parts[1], - }, nil - - default: - return "", nil, fmt.Errorf("invalid generator value: %q", s) - } -} - -func generateDurationStatement(d time.Duration) string { - var buf strings.Builder - - d = d.Round(1 * time.Second) - - if d >= time.Minute { - mins := d / time.Minute - fmt.Fprintf(&buf, "%d*time.Minute", mins) - d = d - mins*time.Minute - if d != 0 { - fmt.Fprint(&buf, "+") - } - } - if d != 0 { - secs := d / time.Second - fmt.Fprintf(&buf, "%d*time.Second", secs) - } - - return buf.String() -} - func count[T any](s iter.Seq[T], f func(T) bool) (c int) { for v := range s { if f(v) { @@ -958,15 +763,3 @@ func count[T any](s iter.Seq[T], f func(T) bool) (c int) { } return c } - -func endpointsConstOrQuote(region string) string { - var buf strings.Builder - buf.WriteString("endpoints.") - - for _, part := range strings.Split(region, "-") { - buf.WriteString(strings.Title(part)) - } - buf.WriteString("RegionID") - - return buf.String() -} diff --git a/internal/generate/tagstests/resource_test.go.gtpl b/internal/generate/tagstests/resource_test.go.gtpl index 0320ac8dd2d3..e81e991736dc 100644 --- a/internal/generate/tagstests/resource_test.go.gtpl +++ b/internal/generate/tagstests/resource_test.go.gtpl @@ -1,36 +1,7 @@ // Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. -{{ define "Init" }} - ctx := acctest.Context(t) - {{ if .ExistsTypeName -}} - var v {{ .ExistsTypeName }} - {{ end -}} - resourceName := "{{ .TypeName}}.test"{{ if .Generator }} - rName := {{ .Generator }} -{{- end }} -{{ range .InitCodeBlocks -}} -{{ .Code }} -{{- end }} -{{ end }} - -{{ define "Test" -}} -resource.{{ if and .Serialize (not .SerializeParallelTests) }}Test{{ else }}ParallelTest{{ end }} -{{- end }} - -{{ define "TestCaseSetup" -}} -{{ template "TestCaseSetupNoProviders" . -}} -{{ if not .AlternateRegionProvider }} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, -{{- end -}} -{{- end }} - {{ define "TestCaseSetupNoProviders" -}} - PreCheck: func() { acctest.PreCheck(ctx, t) - {{- range .PreChecks }} - {{ .Code }} - {{- end -}} - }, - ErrorCheck: acctest.ErrorCheck(t, names.{{ .PackageProviderNameUpper }}ServiceID), + {{ template "CommonTestCaseChecks" . }} CheckDestroy: {{ if .CheckDestroyNoop }}acctest.CheckDestroyNoop{{ else }}testAccCheck{{ .Name }}Destroy(ctx{{ if .DestroyTakesT }}, t{{ end }}){{ end }}, {{- end }} @@ -112,23 +83,6 @@ plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), know {{- end }} {{ end }} -{{ define "testname" -}} -{{ if .Serialize }}testAcc{{ else }}TestAcc{{ end }}{{ .ResourceProviderNameUpper }}{{ .Name }} -{{- end }} - -{{ define "ExistsCheck" }} - testAccCheck{{ .Name }}Exists(ctx, {{ if .ExistsTakesT }}t,{{ end }} resourceName{{ if .ExistsTypeName}}, &v{{ end }}), -{{ end }} - -{{ define "AdditionalTfVars" -}} - {{ range $name, $value := .AdditionalTfVars -}} - {{ $name }}: config.StringVariable({{ $value }}), - {{ end -}} - {{ if .AlternateRegionProvider -}} - "alt_region": config.StringVariable(acctest.AlternateRegion()), - {{ end }} -{{ end }} - package {{ .ProviderPackage }}_test import ( @@ -193,12 +147,14 @@ func {{ template "testname" . }}_tagsSerial(t *testing.T) { func {{ template "testname" . }}_tags(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -234,7 +190,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -249,7 +207,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -290,7 +250,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -306,7 +268,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -345,7 +309,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -363,7 +329,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -392,7 +360,9 @@ func {{ template "testname" . }}_tags(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -416,12 +386,14 @@ func {{ template "testname" . }}_tags_null(t *testing.T) { {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -468,7 +440,9 @@ func {{ template "testname" . }}_tags_null(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -484,7 +458,9 @@ func {{ template "testname" . }}_tags_null(t *testing.T) { {{ if eq .Implementation "sdk" -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -509,12 +485,14 @@ func {{ template "testname" . }}_tags_null(t *testing.T) { func {{ template "testname" . }}_tags_EmptyMap(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -551,7 +529,9 @@ func {{ template "testname" . }}_tags_EmptyMap(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -565,7 +545,9 @@ func {{ template "testname" . }}_tags_EmptyMap(t *testing.T) { {{ if eq .Implementation "sdk" -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -590,12 +572,14 @@ func {{ template "testname" . }}_tags_EmptyMap(t *testing.T) { func {{ template "testname" . }}_tags_AddOnUpdate(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -625,7 +609,9 @@ func {{ template "testname" . }}_tags_AddOnUpdate(t *testing.T) { }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -661,7 +647,9 @@ func {{ template "testname" . }}_tags_AddOnUpdate(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -684,12 +672,14 @@ func {{ template "testname" . }}_tags_EmptyTag_OnCreate(t *testing.T) { {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -730,7 +720,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnCreate(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -745,7 +737,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnCreate(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -771,7 +765,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnCreate(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -792,12 +788,14 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Add(t *testing.T) { {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -832,7 +830,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Add(t *testing.T) { }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -878,7 +878,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Add(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -894,7 +896,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Add(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -930,7 +934,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Add(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -953,12 +959,14 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetup" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -993,7 +1001,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1034,7 +1044,9 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1054,14 +1066,16 @@ func {{ template "testname" . }}_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1094,9 +1108,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1112,9 +1128,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1150,9 +1168,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1169,9 +1189,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1207,9 +1229,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1228,9 +1252,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1259,9 +1285,11 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1282,14 +1310,16 @@ func {{ template "testname" . }}_tags_DefaultTags_providerOnly(t *testing.T) { func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1330,9 +1360,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1350,9 +1382,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1398,9 +1432,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1419,9 +1455,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1450,9 +1488,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1473,14 +1513,16 @@ func {{ template "testname" . }}_tags_DefaultTags_nonOverlapping(t *testing.T) { func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1519,9 +1561,11 @@ func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1539,9 +1583,11 @@ func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1586,9 +1632,11 @@ func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1608,9 +1656,11 @@ func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { {{- end }} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1649,9 +1699,11 @@ func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1674,14 +1726,16 @@ func {{ template "testname" . }}_tags_DefaultTags_overlapping(t *testing.T) { func {{ template "testname" . }}_tags_DefaultTags_updateToProviderOnly(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1716,9 +1770,11 @@ func {{ template "testname" . }}_tags_DefaultTags_updateToProviderOnly(t *testin }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1751,9 +1807,11 @@ func {{ template "testname" . }}_tags_DefaultTags_updateToProviderOnly(t *testin {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1774,14 +1832,16 @@ func {{ template "testname" . }}_tags_DefaultTags_updateToProviderOnly(t *testin func {{ template "testname" . }}_tags_DefaultTags_updateToResourceOnly(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1813,9 +1873,11 @@ func {{ template "testname" . }}_tags_DefaultTags_updateToResourceOnly(t *testin }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1851,9 +1913,11 @@ func {{ template "testname" . }}_tags_DefaultTags_updateToResourceOnly(t *testin {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1876,14 +1940,16 @@ func {{ template "testname" . }}_tags_DefaultTags_emptyResourceTag(t *testing.T) {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1927,9 +1993,11 @@ func {{ template "testname" . }}_tags_DefaultTags_emptyResourceTag(t *testing.T) {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -1955,14 +2023,16 @@ func {{ template "testname" . }}_tags_DefaultTags_emptyProviderOnlyTag(t *testin {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2000,9 +2070,11 @@ func {{ template "testname" . }}_tags_DefaultTags_emptyProviderOnlyTag(t *testin {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2026,14 +2098,16 @@ func {{ template "testname" . }}_tags_DefaultTags_nullOverlappingResourceTag(t * {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2086,9 +2160,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nullOverlappingResourceTag(t * {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2114,14 +2190,16 @@ func {{ template "testname" . }}_tags_DefaultTags_nullNonOverlappingResourceTag( {{ end }} {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2176,9 +2254,11 @@ func {{ template "testname" . }}_tags_DefaultTags_nullNonOverlappingResourceTag( {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_defaults/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2201,14 +2281,16 @@ func {{ template "testname" . }}_tags_DefaultTags_nullNonOverlappingResourceTag( func {{ template "testname" . }}_tags_ComputedTag_OnCreate(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tagsComputed1/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2241,9 +2323,11 @@ func {{ template "testname" . }}_tags_ComputedTag_OnCreate(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tagsComputed1/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2261,14 +2345,16 @@ func {{ template "testname" . }}_tags_ComputedTag_OnCreate(t *testing.T) { func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Add(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2303,9 +2389,11 @@ func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Add(t *testing.T) { }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tagsComputed2/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2346,9 +2434,11 @@ func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Add(t *testing.T) { {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tagsComputed2/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2368,14 +2458,16 @@ func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2410,9 +2502,11 @@ func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Replace(t *testing.T) }, { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tagsComputed1/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2445,9 +2539,11 @@ func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Replace(t *testing.T) {{ if not .NoImport -}} { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tagsComputed1/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2465,15 +2561,17 @@ func {{ template "testname" . }}_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func {{ template "testname" . }}_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ // 1: Create { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_ignore/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2525,9 +2623,11 @@ func {{ template "testname" . }}_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T // 2: Update ignored tag only { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_ignore/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2579,9 +2679,11 @@ func {{ template "testname" . }}_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T // 3: Update both tags { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_ignore/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2637,15 +2739,17 @@ func {{ template "testname" . }}_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func {{ template "testname" . }}_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { {{- template "Init" . }} - {{ template "Test" . }}(t, resource.TestCase{ + {{ template "Test" . }}(ctx, t, resource.TestCase{ {{ template "TestCaseSetupNoProviders" . }} Steps: []resource.TestStep{ // 1: Create { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_ignore/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2734,9 +2838,11 @@ func {{ template "testname" . }}_tags_IgnoreTags_Overlap_ResourceTag(t *testing. // 2: Update ignored tag { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_ignore/"), ConfigVariables: config.Variables{ {{ if .Generator }} @@ -2824,9 +2930,11 @@ func {{ template "testname" . }}_tags_IgnoreTags_Overlap_ResourceTag(t *testing. // 3: Update both tags { {{ if .AlternateRegionProvider -}} - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + {{ else if .UseAlternateAccount -}} + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), {{ else -}} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, {{ end -}} ConfigDirectory: config.StaticDirectory("testdata/{{ .Name }}/tags_ignore/"), ConfigVariables: config.Variables{ {{ if .Generator }} diff --git a/internal/generate/tests/acctest.tf.gtpl b/internal/generate/tests/acctest.tf.gtpl index b3d7e06c6e9a..07d4380c09f7 100644 --- a/internal/generate/tests/acctest.tf.gtpl +++ b/internal/generate/tests/acctest.tf.gtpl @@ -18,6 +18,31 @@ resource "aws_subnet" "test" { {{ template "acctest.ConfigAvailableAZsNoOptInDefaultExclude" }} {{- end }} +{{ define "acctest.ConfigVPCWithSubnetsIPv6" -}} +# acctest.ConfigVPCWithSubnetsIPv6(rName, {{ . }}) + +resource "aws_vpc" "test" { +{{- template "region" }} + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { +{{- template "region" }} + count = {{ . }} + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +{{ template "acctest.ConfigAvailableAZsNoOptInDefaultExclude" }} +{{- end }} + {{ define "acctest.ConfigAvailableAZsNoOptInDefaultExclude" -}} # acctest.ConfigAvailableAZsNoOptInDefaultExclude diff --git a/internal/generate/tests/annotations.go b/internal/generate/tests/annotations.go new file mode 100644 index 000000000000..1d6940fb3da3 --- /dev/null +++ b/internal/generate/tests/annotations.go @@ -0,0 +1,529 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tests + +import ( + "fmt" + "strconv" + "strings" + + acctestgen "github.com/hashicorp/terraform-provider-aws/internal/acctest/generate" + "github.com/hashicorp/terraform-provider-aws/internal/generate/common" + tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + namesgen "github.com/hashicorp/terraform-provider-aws/names/generate" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +type Implementation string + +const ( + ImplementationFramework Implementation = "framework" + ImplementationSDK Implementation = "sdk" +) + +type CommonArgs struct { + Name string // Resource Type Name + TypeName string // Terraform Type Name + Implementation Implementation + + // CheckDestroy + CheckDestroyNoop bool + DestroyTakesT bool + + // CheckExists + HasExistsFunc bool + ExistsTypeName string + ExistsTakesT bool + + // Import + NoImport bool + ImportStateID string + importStateIDAttribute string + ImportStateIDFunc string + ImportIgnore []string + plannableImportAction importAction + + // Serialization + Serialize bool + SerializeDelay bool + SerializeParallelTests bool + + // PreChecks + PreChecks []CodeBlock + PreCheckRegions []string + PreChecksWithRegion []CodeBlock + + UseAlternateAccount bool + AlternateRegionProvider bool + + Generator string + + RequiredEnvVars []string + + GoImports []GoImport + InitCodeBlocks []CodeBlock + AdditionalTfVars_ map[string]TFVar +} + +func InitCommonArgs() CommonArgs { + return CommonArgs{ + AdditionalTfVars_: make(map[string]TFVar), + HasExistsFunc: true, + } +} + +func (c CommonArgs) HasImportStateIDAttribute() bool { + return c.importStateIDAttribute != "" +} + +func (c CommonArgs) ImportStateIDAttribute() string { + return namesgen.ConstOrQuote(c.importStateIDAttribute) +} + +func (c CommonArgs) HasImportIgnore() bool { + return len(c.ImportIgnore) > 0 +} + +func (c CommonArgs) PlannableResourceAction() string { + if c.plannableImportAction == importActionUnset { + return importActionNoop.String() + } + return c.plannableImportAction.String() +} + +func (c CommonArgs) AdditionalTfVars() map[string]TFVar { + return tfmaps.ApplyToAllKeys(c.AdditionalTfVars_, func(k string) string { + return acctestgen.ConstOrQuote(k) + }) +} + +type importAction int + +const ( + importActionUnset importAction = iota + importActionNoop + importActionUpdate + importActionReplace +) + +func (i importAction) String() string { + switch i { + case importActionNoop: + return "NoOp" + + case importActionUpdate: + return "Update" + + case importActionReplace: + return "Replace" + + default: + return "" + } +} + +type GoImport struct { + Path string + Alias string +} + +type CodeBlock struct { + Code string +} + +type TFVar struct { + GoVarName string + Type TFVarType +} + +type TFVarType string + +const ( + TFVarTypeString TFVarType = "string" + TFVarTypeInt TFVarType = "int" +) + +func ParseTestingAnnotations(args common.Args, stuff *CommonArgs) error { + if attr, ok := args.Keyword["name"]; ok { + stuff.Name = strings.ReplaceAll(attr, " ", "") + } + + // DestroyCheck + if attr, ok := args.Keyword["checkDestroyNoop"]; ok { + if b, err := ParseBoolAttr("checkDestroyNoop", attr); err != nil { + return err + } else { + stuff.CheckDestroyNoop = b + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", + }, + ) + } + } + + if attr, ok := args.Keyword["destroyTakesT"]; ok { + if b, err := ParseBoolAttr("destroyTakesT", attr); err != nil { + return err + } else { + stuff.DestroyTakesT = b + } + } + + // ExistsCheck + if attr, ok := args.Keyword["hasExistsFunction"]; ok { + if b, err := ParseBoolAttr("hasExistsFunction", attr); err != nil { + return err + } else { + stuff.HasExistsFunc = b + } + } + + if attr, ok := args.Keyword["existsType"]; ok { + if typeName, importSpec, err := ParseIdentifierSpec(attr); err != nil { + return fmt.Errorf("%s: %w", attr, err) + } else { + stuff.ExistsTypeName = typeName + if importSpec != nil { + stuff.GoImports = append(stuff.GoImports, *importSpec) + } + } + } + + if attr, ok := args.Keyword["existsTakesT"]; ok { + if b, err := ParseBoolAttr("existsTakesT", attr); err != nil { + return err + } else { + stuff.ExistsTakesT = b + } + } + + // Import + if attr, ok := args.Keyword["importIgnore"]; ok { + stuff.ImportIgnore = strings.Split(attr, ";") + for i, val := range stuff.ImportIgnore { + stuff.ImportIgnore[i] = namesgen.ConstOrQuote(val) + } + if stuff.plannableImportAction == importActionUnset { + stuff.plannableImportAction = importActionUpdate + } + } + + if attr, ok := args.Keyword["importStateId"]; ok { + stuff.ImportStateID = attr + } + + if attr, ok := args.Keyword["importStateIdAttribute"]; ok { + stuff.importStateIDAttribute = attr + } + + if attr, ok := args.Keyword["importStateIdFunc"]; ok { + stuff.ImportStateIDFunc = attr + } + + if attr, ok := args.Keyword["noImport"]; ok { + if b, err := ParseBoolAttr("noImport", attr); err != nil { + return err + } else { + stuff.NoImport = b + } + } + + if attr, ok := args.Keyword["plannableImportAction"]; ok { + switch attr { + case importActionNoop.String(): + stuff.plannableImportAction = importActionNoop + + case importActionUpdate.String(): + stuff.plannableImportAction = importActionUpdate + + case importActionReplace.String(): + stuff.plannableImportAction = importActionReplace + + default: + return fmt.Errorf("invalid plannableImportAction value %q: Must be one of %s.", attr, []string{importActionNoop.String(), importActionUpdate.String(), importActionReplace.String()}) + } + } + + // Serialization + if attr, ok := args.Keyword["serialize"]; ok { + if b, err := ParseBoolAttr("serialize", attr); err != nil { + return err + } else { + stuff.Serialize = b + } + } + + if attr, ok := args.Keyword["serializeParallelTests"]; ok { + if b, err := ParseBoolAttr("serializeParallelTests", attr); err != nil { + return err + } else { + stuff.SerializeParallelTests = b + } + } + + if attr, ok := args.Keyword["serializeDelay"]; ok { + if b, err := ParseBoolAttr("serializeDelay", attr); err != nil { + return err + } else { + stuff.SerializeDelay = b + } + } + + // PreChecks + if attr, ok := args.Keyword["preCheck"]; ok { + if code, importSpec, err := ParseIdentifierSpec(attr); err != nil { + return fmt.Errorf("%s: %w", attr, err) + } else { + stuff.PreChecks = append(stuff.PreChecks, CodeBlock{ + Code: fmt.Sprintf("%s(ctx, t)", code), + }) + if importSpec != nil { + stuff.GoImports = append(stuff.GoImports, *importSpec) + } + } + } + + if attr, ok := args.Keyword["preCheckRegion"]; ok { + regions := strings.Split(attr, ";") + stuff.PreCheckRegions = tfslices.ApplyToAll(regions, func(s string) string { + return endpointsConstOrQuote(s) + }) + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/aws-sdk-go-base/v2/endpoints", + }, + ) + } + + if attr, ok := args.Keyword["preCheckWithRegion"]; ok { + if code, importSpec, err := ParseIdentifierSpec(attr); err != nil { + return fmt.Errorf("%s: %w", attr, err) + } else { + stuff.PreChecksWithRegion = append(stuff.PreChecksWithRegion, CodeBlock{ + Code: code, + }) + if importSpec != nil { + stuff.GoImports = append(stuff.GoImports, *importSpec) + } + } + } + + if attr, ok := args.Keyword["requireEnvVar"]; ok { + stuff.RequiredEnvVars = append(stuff.RequiredEnvVars, attr) + } + + if attr, ok := args.Keyword["useAlternateAccount"]; ok { + if b, err := ParseBoolAttr("useAlternateAccount", attr); err != nil { + return err + } else if b { + stuff.UseAlternateAccount = true + stuff.PreChecks = append(stuff.PreChecks, CodeBlock{ + Code: "acctest.PreCheckAlternateAccount(t)", + }) + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema", + }, + ) + } + } + + if attr, ok := args.Keyword["altRegionProvider"]; ok { + if b, err := ParseBoolAttr("altRegionProvider", attr); err != nil { + return err + } else { + stuff.AlternateRegionProvider = b + } + } + + // TF Variables + if attr, ok := args.Keyword["generator"]; ok { + if attr != "false" { + if funcName, importSpec, err := ParseIdentifierSpec(attr); err != nil { + return fmt.Errorf("%s: %w", attr, err) + } else { + stuff.Generator = funcName + if importSpec != nil { + stuff.GoImports = append(stuff.GoImports, *importSpec) + } + } + } + } + + if attr, ok := args.Keyword["emailAddress"]; ok { + varName := "address" + if len(attr) > 0 { + varName = attr + } + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", + }, + ) + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf( + `domain := acctest.RandomDomainName() +%s := acctest.RandomEmailAddress(domain)`, varName), + }) + stuff.AdditionalTfVars_[varName] = TFVar{ + GoVarName: varName, + Type: TFVarTypeString, + } + } + + if attr, ok := args.Keyword["domainTfVar"]; ok { + varName := "domain" + if len(attr) > 0 { + varName = attr + } + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", + }, + ) + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf(`%s := acctest.RandomDomainName()`, varName), + }) + stuff.AdditionalTfVars_[varName] = TFVar{ + GoVarName: varName, + Type: TFVarTypeString, + } + } + + if attr, ok := args.Keyword["subdomainTfVar"]; ok { + parentName := "domain" + varName := "subdomain" + parts := strings.Split(attr, ";") + if len(parts) > 1 { + if len(parts[0]) > 0 { + parentName = parts[0] + } + if len(parts[1]) > 0 { + varName = parts[1] + } + } + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-provider-aws/internal/acctest", + }, + ) + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf(`%s := acctest.RandomDomain()`, parentName), + }) + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf(`%s := %s.RandomSubdomain()`, varName, parentName), + }) + stuff.AdditionalTfVars_[parentName] = TFVar{ + GoVarName: fmt.Sprintf("%s.String()", parentName), + Type: TFVarTypeString, + } + stuff.AdditionalTfVars_[varName] = TFVar{ + GoVarName: fmt.Sprintf("%s.String()", varName), + Type: TFVarTypeString, + } + } + + if attr, ok := args.Keyword["randomBgpAsn"]; ok { + parts := strings.Split(attr, ";") + varName := "rBgpAsn" + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-plugin-testing/helper/acctest", + Alias: "sdkacctest", + }, + ) + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf("%s := sdkacctest.RandIntRange(%s,%s)", varName, parts[0], parts[1]), + }) + stuff.AdditionalTfVars_[varName] = TFVar{ + GoVarName: varName, + Type: TFVarTypeInt, + } + } + + if attr, ok := args.Keyword["randomIPv4Address"]; ok { + varName := "rIPv4Address" + stuff.GoImports = append(stuff.GoImports, + GoImport{ + Path: "github.com/hashicorp/terraform-plugin-testing/helper/acctest", + Alias: "sdkacctest", + }, + ) + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf(`%s, err := sdkacctest.RandIpAddress("%s") +if err != nil { + t.Fatal(err) +} +`, varName, attr), + }) + stuff.AdditionalTfVars_[varName] = TFVar{ + GoVarName: varName, + Type: TFVarTypeString, + } + } + + if attr, ok := args.Keyword["tlsEcdsaPublicKeyPem"]; ok { + if _, err := ParseBoolAttr("tlsEcdsaPublicKeyPem", attr); err != nil { + return err + } else { + varName := "rTlsEcdsaPublicKeyPem" + stuff.InitCodeBlocks = append(stuff.InitCodeBlocks, CodeBlock{ + Code: fmt.Sprintf(`privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") +%s, _ := acctest.TLSECDSAPublicKeyPEM(t, privateKey)`, varName), + }) + stuff.AdditionalTfVars_[varName] = TFVar{ + GoVarName: varName, + Type: TFVarTypeString, + } + } + } + + return nil +} + +func ParseBoolAttr(name, value string) (bool, error) { + if b, err := strconv.ParseBool(value); err != nil { + return b, fmt.Errorf("invalid %s value %q: Should be boolean value.", name, value) + } else { + return b, nil + } +} + +func ParseIdentifierSpec(s string) (string, *GoImport, error) { + parts := strings.Split(s, ";") + switch len(parts) { + case 1: + return parts[0], nil, nil + + case 2: + return parts[1], &GoImport{ + Path: parts[0], + }, nil + + case 3: + return parts[2], &GoImport{ + Path: parts[0], + Alias: parts[1], + }, nil + + default: + return "", nil, fmt.Errorf("invalid generator value: %q", s) + } +} + +func endpointsConstOrQuote(region string) string { + var buf strings.Builder + buf.WriteString("endpoints.") + + caser := cases.Title(language.Und, cases.NoLower) + for part := range strings.SplitSeq(region, "-") { + buf.WriteString(caser.String(part)) + } + buf.WriteString("RegionID") + + return buf.String() +} diff --git a/internal/generate/tests/common.go b/internal/generate/tests/common.go deleted file mode 100644 index 68994c6e567b..000000000000 --- a/internal/generate/tests/common.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package tests - -import ( - _ "embed" - "fmt" - "text/template" -) - -//go:embed acctest.tf.gtpl -var acctestTfTmpl string - -func AddCommonTemplates(template *template.Template) (*template.Template, error) { - result, err := template.Parse(acctestTfTmpl) - if err != nil { - return nil, fmt.Errorf("parsing common \"acctest.tf.gtpl\" config template: %s", err) - } - return result, nil -} diff --git a/internal/generate/tests/common_test.go.gtpl b/internal/generate/tests/common_test.go.gtpl new file mode 100644 index 000000000000..f22834d3a40d --- /dev/null +++ b/internal/generate/tests/common_test.go.gtpl @@ -0,0 +1,67 @@ +{{ define "commonInit" -}} +{{ range .RequiredEnvVars -}} + acctest.SkipIfEnvVarNotSet(t, "{{ . }}") +{{ end -}} +{{ block "targetName" . }}Missing template "targetName"{{ end }} +{{- if .Generator }} + rName := {{ .Generator }} +{{- end -}} +{{- range .InitCodeBlocks }} + {{ .Code }} +{{- end -}} +{{ if .UseAlternateAccount }} + providers := make(map[string]*schema.Provider) +{{ end }} +{{ end }} + +{{ define "TestCaseSetup" -}} +{{ template "TestCaseSetupNoProviders" . }} +{{- if and (not .UseAlternateAccount) (not .AlternateRegionProvider) }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, +{{- end -}} +{{- end }} + +{{ define "CommonTestCaseChecks" -}} + PreCheck: func() { acctest.PreCheck(ctx, t) + {{- if .PreCheckRegions }} + acctest.PreCheckRegion(t, {{ range .PreCheckRegions}}{{ . }}, {{ end }}) + {{- end -}} + {{- range .PreChecks }} + {{ .Code }} + {{- end -}} + {{- range .PreChecksWithRegion }} + {{ .Code }}(ctx, t, acctest.Region()) + {{- end -}} + }, + ErrorCheck: acctest.ErrorCheck(t, names.{{ .PackageProviderNameUpper }}ServiceID), +{{- end }} + +{{ define "baseTestname" -}} +{{ if .Serialize }}testAcc{{ else }}TestAcc{{ end -}} +{{- if and (eq .ResourceProviderNameUpper "VPC") (eq .Name "VPC") -}} +VPC +{{- else -}} +{{ .ResourceProviderNameUpper }}{{ .Name }} +{{- end -}} +{{- end }} + +{{ define "Test" -}} +acctest.{{ if and .Serialize (not .SerializeParallelTests) }}Test{{ else }}ParallelTest{{ end }} +{{- end }} + +{{ define "ExistsCheck" }} + testAccCheck{{ .Name }}Exists(ctx, {{ if .ExistsTakesT }}t,{{ end }} resourceName{{ if .ExistsTypeName}}, &v{{ end }}), +{{ end }} + +{{ define "AdditionalTfVars" -}} + {{ range $name, $value := .AdditionalTfVars -}} + {{ if eq $value.Type "string" -}} + {{ $name }}: config.StringVariable({{ $value.GoVarName }}), + {{- else if eq $value.Type "int" -}} + {{ $name }}: config.IntegerVariable({{ $value.GoVarName }}), + {{- end }} + {{ end -}} + {{ if .AlternateRegionProvider -}} + "alt_region": config.StringVariable(acctest.AlternateRegion()), + {{ end -}} +{{ end }} diff --git a/internal/generate/tests/data_source_test.go.gtpl b/internal/generate/tests/data_source_test.go.gtpl new file mode 100644 index 000000000000..6045042b6e09 --- /dev/null +++ b/internal/generate/tests/data_source_test.go.gtpl @@ -0,0 +1,13 @@ +{{ define "testname" -}} +{{ template "baseTestname" . }}DataSource +{{- end }} + +{{ define "targetName" -}} +dataSourceName := "data.{{ .TypeName}}.test" +{{- end }} + +{{ define "Init" }} + ctx := acctest.Context(t) + + {{ template "commonInit" . }} +{{ end }} diff --git a/internal/generate/tests/resource_test.go.gtpl b/internal/generate/tests/resource_test.go.gtpl new file mode 100644 index 000000000000..39d0697721ba --- /dev/null +++ b/internal/generate/tests/resource_test.go.gtpl @@ -0,0 +1,16 @@ +{{ define "testname" -}} +{{ template "baseTestname" . }} +{{- end }} + +{{ define "targetName" -}} +resourceName := "{{ .TypeName}}.test" +{{- end }} + +{{ define "Init" }} + ctx := acctest.Context(t) + + {{ if .ExistsTypeName -}} + var v {{ .ExistsTypeName }} + {{ end -}} + {{ template "commonInit" . }} +{{ end }} diff --git a/internal/generate/tests/templates.go b/internal/generate/tests/templates.go new file mode 100644 index 000000000000..126fd9c5c4dd --- /dev/null +++ b/internal/generate/tests/templates.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tests + +import ( + _ "embed" + "fmt" + "text/template" +) + +//go:embed common_test.go.gtpl +var commonTestGoTmpl string + +//go:embed resource_test.go.gtpl +var resourceTestGoTmpl string + +func AddCommonResourceTestTemplates(template *template.Template) (*template.Template, error) { + result, err := template.Parse(commonTestGoTmpl) + if err != nil { + return nil, fmt.Errorf("parsing common \"common_test.go.gtpl\" test template: %w", err) + } + + result, err = result.Parse(resourceTestGoTmpl) + if err != nil { + return nil, fmt.Errorf("parsing common \"resource_test.go.gtpl\" test template: %w", err) + } + + return result, nil +} + +//go:embed data_source_test.go.gtpl +var dataSourceTestGoTmpl string + +func AddCommonDataSourceTestTemplates(template *template.Template) (*template.Template, error) { + result, err := template.Parse(commonTestGoTmpl) + if err != nil { + return nil, fmt.Errorf("parsing common \"common_test.go.gtpl\" test template: %w", err) + } + + result, err = result.Parse(dataSourceTestGoTmpl) + if err != nil { + return nil, fmt.Errorf("parsing common \"datasource_test.go.gtpl\" test template: %w", err) + } + + return result, nil +} + +//go:embed acctest.tf.gtpl +var acctestTfTmpl string + +func AddCommonTfTemplates(template *template.Template) (*template.Template, error) { + result, err := template.Parse(acctestTfTmpl) + if err != nil { + return nil, fmt.Errorf("parsing common \"acctest.tf.gtpl\" config template: %w", err) + } + return result, nil +} diff --git a/internal/iters/README.md b/internal/iter/README.md similarity index 100% rename from internal/iters/README.md rename to internal/iter/README.md diff --git a/internal/iter/apply_to_each.go b/internal/iter/apply_to_each.go new file mode 100644 index 000000000000..f5eccffa5b46 --- /dev/null +++ b/internal/iter/apply_to_each.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iter + +import ( + "iter" +) + +// Filtered returns an iterator over the filtered elements of the sequence. +func AppliedToEach[E, T any](seq iter.Seq[E], f func(E) T) iter.Seq[T] { + return func(yield func(T) bool) { + for v := range seq { + if !yield(f(v)) { + return + } + } + } +} diff --git a/internal/iter/apply_to_each_test.go b/internal/iter/apply_to_each_test.go new file mode 100644 index 000000000000..02932aa9a1ac --- /dev/null +++ b/internal/iter/apply_to_each_test.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iter + +import ( + "slices" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestAppliedToEach(t *testing.T) { + t.Parallel() + + type testCase struct { + input []string + expected []string + } + tests := map[string]testCase{ + "three elements": { + input: []string{"one", "two", "3"}, + expected: []string{"ONE", "TWO", "3"}, + }, + "one element": { + input: []string{"abcdEFGH"}, + expected: []string{"ABCDEFGH"}, + }, + "zero elements": { + input: []string{}, + expected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + iter := AppliedToEach(slices.Values(test.input), strings.ToUpper) + + got := slices.Collect(iter) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/internal/iters/concat.go b/internal/iter/concat.go similarity index 96% rename from internal/iters/concat.go rename to internal/iter/concat.go index 24d11d942d32..1329dad66869 100644 --- a/internal/iters/concat.go +++ b/internal/iter/concat.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package iters +package iter import ( "iter" diff --git a/internal/iters/concat_test.go b/internal/iter/concat_test.go similarity index 98% rename from internal/iters/concat_test.go rename to internal/iter/concat_test.go index 43219e9ba84b..f68c68bda8e3 100644 --- a/internal/iters/concat_test.go +++ b/internal/iter/concat_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package iters +package iter import ( "iter" diff --git a/internal/iter/filter.go b/internal/iter/filter.go new file mode 100644 index 000000000000..2f93f1445aa4 --- /dev/null +++ b/internal/iter/filter.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iter + +import ( + "iter" +) + +// Predicate represents a predicate (boolean-valued function) of one argument. +type Predicate[T any] func(T) bool + +// Filtered returns an iterator over the filtered elements of the sequence. +func Filtered[T any](seq iter.Seq[T], pred Predicate[T]) iter.Seq[T] { + return func(yield func(T) bool) { + for e := range seq { + if pred(e) && !yield(e) { + return + } + } + } +} diff --git a/internal/iter/filter_test.go b/internal/iter/filter_test.go new file mode 100644 index 000000000000..09f0f9e4ed5b --- /dev/null +++ b/internal/iter/filter_test.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iter + +import ( + "slices" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestFiltered(t *testing.T) { + t.Parallel() + + type testCase struct { + input []string + expected []string + } + tests := map[string]testCase{ + "three elements": { + input: []string{"one", "two", "3", "a0"}, + expected: []string{"a0"}, + }, + "one element": { + input: []string{"abcdEFGH"}, + expected: []string{"abcdEFGH"}, + }, + "zero elements": { + input: []string{}, + expected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + iter := Filtered(slices.Values(test.input), func(v string) bool { + return strings.HasPrefix(v, "a") + }) + + got := slices.Collect(iter) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/internal/iter/null.go b/internal/iter/null.go new file mode 100644 index 000000000000..ef85ebd92304 --- /dev/null +++ b/internal/iter/null.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iter + +import ( + "iter" +) + +// Null returns an empty iterator. +func Null[V any]() iter.Seq[V] { + return func(yield func(V) bool) {} +} + +// Null2 returns an empty value pair iterator. +func Null2[K, V any]() iter.Seq2[K, V] { + return func(yield func(K, V) bool) {} +} diff --git a/internal/json/decode_test.go b/internal/json/decode_test.go index 9c36f6980a86..2edc3ed00434 100644 --- a/internal/json/decode_test.go +++ b/internal/json/decode_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-provider-aws/internal/json" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" ) func TestDecodeFromString(t *testing.T) { @@ -62,7 +62,7 @@ func TestDecodeFromString(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - err := json.DecodeFromString(testCase.input, testCase.output) + err := tfjson.DecodeFromString(testCase.input, testCase.output) if got, want := err != nil, testCase.wantErr; !cmp.Equal(got, want) { t.Errorf("DecodeFromString(%s) err %t, want %t", testCase.input, got, want) } diff --git a/internal/json/encode_test.go b/internal/json/encode_test.go index 4a6446c62ddc..e013a6e3720d 100644 --- a/internal/json/encode_test.go +++ b/internal/json/encode_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-provider-aws/internal/acctest/jsoncmp" - "github.com/hashicorp/terraform-provider-aws/internal/json" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" ) func TestEncodeToString(t *testing.T) { @@ -54,7 +54,7 @@ func TestEncodeToString(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - output, err := json.EncodeToString(testCase.input) + output, err := tfjson.EncodeToString(testCase.input) if got, want := err != nil, testCase.wantErr; !cmp.Equal(got, want) { t.Errorf("EncodeToString(%v) err %t, want %t", testCase.input, got, want) } @@ -110,7 +110,7 @@ func TestEncodeToStringIndent(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - output, err := json.EncodeToStringIndent(testCase.input, "", " ") + output, err := tfjson.EncodeToStringIndent(testCase.input, "", " ") if got, want := err != nil, testCase.wantErr; !cmp.Equal(got, want) { t.Errorf("EncodeToStringIndent(%v) err %t, want %t", testCase.input, got, want) } diff --git a/internal/json/equal_test.go b/internal/json/equal_test.go index 1e80913d7138..4f8fc792ea1a 100644 --- a/internal/json/equal_test.go +++ b/internal/json/equal_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform-provider-aws/internal/json" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" ) func TestEqualStrings(t *testing.T) { @@ -64,7 +64,7 @@ func TestEqualStrings(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - equal := json.EqualStrings(testCase.x, testCase.y) + equal := tfjson.EqualStrings(testCase.x, testCase.y) if got, want := equal, testCase.wantEqual; !cmp.Equal(got, want) { t.Errorf("EqualStrings(%s, %s) = %t, want %t", testCase.x, testCase.y, got, want) } diff --git a/internal/json/patch.go b/internal/json/patch.go new file mode 100644 index 000000000000..2076c2bc5f5d --- /dev/null +++ b/internal/json/patch.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + mattbairdjsonpatch "github.com/mattbaird/jsonpatch" +) + +// `CreatePatchFromStrings` creates an [RFC6902](https://datatracker.ietf.org/doc/html/rfc6902) JSON Patch from two JSON strings. +// `a` is the original JSON document and `b` is the modified JSON document. +// The patch is returned as an array of operations (which can be encoded to JSON). +func CreatePatchFromStrings(a, b string) ([]mattbairdjsonpatch.JsonPatchOperation, error) { + return mattbairdjsonpatch.CreatePatch([]byte(a), []byte(b)) +} diff --git a/internal/json/patch_test.go b/internal/json/patch_test.go new file mode 100644 index 000000000000..e0c736a94f2e --- /dev/null +++ b/internal/json/patch_test.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json_test + +import ( + "cmp" + "slices" + "testing" + + gocmp "github.com/google/go-cmp/cmp" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" + mattbairdjsonpatch "github.com/mattbaird/jsonpatch" +) + +func TestCreatePatchFromStrings(t *testing.T) { + t.Parallel() + + testCases := []struct { + testName string + a, b string + wantPatch []mattbairdjsonpatch.JsonPatchOperation + wantErr bool + }{ + { + testName: "invalid JSON", + a: `test`, + b: `{}`, + wantErr: true, + }, + { + testName: "empty patch, empty JSON", + a: `{}`, + b: `{}`, + wantPatch: []mattbairdjsonpatch.JsonPatchOperation{}, + }, + { + testName: "empty patch, non-empty JSON", + a: `{"A": "test1", "B": 42}`, + b: `{"B": 42, "A": "test1"}`, + wantPatch: []mattbairdjsonpatch.JsonPatchOperation{}, + }, + { + testName: "from empty JSON", + a: `{}`, + b: `{"A": "test1", "B": 42}`, + wantPatch: []mattbairdjsonpatch.JsonPatchOperation{ + {Operation: "add", Path: "/A", Value: "test1"}, + {Operation: "add", Path: "/B", Value: float64(42)}, + }, + }, + { + testName: "to empty JSON", + a: `{"A": "test1", "B": 42}`, + b: `{}`, + wantPatch: []mattbairdjsonpatch.JsonPatchOperation{ + {Operation: "remove", Path: "/A"}, + {Operation: "remove", Path: "/B"}, + }, + }, + { + testName: "change values", + a: `{"A": "test1", "B": 42}`, + b: `{"A": ["test2"], "B": false}`, + wantPatch: []mattbairdjsonpatch.JsonPatchOperation{ + {Operation: "replace", Path: "/A", Value: []any{"test2"}}, + {Operation: "replace", Path: "/B", Value: false}, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + + got, err := tfjson.CreatePatchFromStrings(testCase.a, testCase.b) + if got, want := err != nil, testCase.wantErr; !gocmp.Equal(got, want) { + t.Errorf("CreatePatchFromStrings(%s, %s) err %t, want %t", testCase.a, testCase.b, got, want) + } + if err == nil { + sortTransformer := gocmp.Transformer("SortPatchOps", func(ops []mattbairdjsonpatch.JsonPatchOperation) []mattbairdjsonpatch.JsonPatchOperation { + sorted := make([]mattbairdjsonpatch.JsonPatchOperation, len(ops)) + copy(sorted, ops) + slices.SortFunc(sorted, func(a, b mattbairdjsonpatch.JsonPatchOperation) int { + return cmp.Or(cmp.Compare(a.Operation, b.Operation), cmp.Compare(a.Path, b.Path)) + }) + return sorted + }) + + if diff := gocmp.Diff(got, testCase.wantPatch, sortTransformer); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + } + }) + } +} diff --git a/internal/json/remove_test.go b/internal/json/remove_test.go index 33d313d5d6a8..16c4b808dacb 100644 --- a/internal/json/remove_test.go +++ b/internal/json/remove_test.go @@ -6,7 +6,7 @@ package json_test import ( "testing" - "github.com/hashicorp/terraform-provider-aws/internal/json" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" ) func TestRemoveFields(t *testing.T) { @@ -38,7 +38,7 @@ func TestRemoveFields(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - if got, want := json.RemoveFields(testCase.input, `"plugins"`), testCase.want; got != want { + if got, want := tfjson.RemoveFields(testCase.input, `"plugins"`), testCase.want; got != want { t.Errorf("RemoveReadOnlyFields(%q) = %q, want %q", testCase.input, got, want) } }) @@ -119,7 +119,7 @@ func TestRemoveEmptyFields(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - if got, want := json.RemoveEmptyFields([]byte(testCase.input)), testCase.want; string(got) != want { + if got, want := tfjson.RemoveEmptyFields([]byte(testCase.input)), testCase.want; string(got) != want { t.Errorf("RemoveEmptyFields(%q) = %q, want %q", testCase.input, got, want) } }) diff --git a/internal/json/smithy.go b/internal/json/smithy.go deleted file mode 100644 index c1964f1ff5b5..000000000000 --- a/internal/json/smithy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package json - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -func SmithyDocumentFromString[T smithydocument.Marshaler](s string, f func(any) T) (T, error) { - var v map[string]any - - err := DecodeFromString(s, &v) - if err != nil { - var zero T - return zero, err - } - - return f(v), nil -} - -// SmithyDocumentToString converts a [Smithy document](https://smithy.io/2.0/spec/simple-types.html#document) to a JSON string. -func SmithyDocumentToString(document smithydocument.Unmarshaler) (string, error) { - var v map[string]any - - err := document.UnmarshalSmithyDocument(&v) - if err != nil { - return "", err - } - - return EncodeToString(v) -} - -// JSONStringer interface is used to marshal and unmarshal JSON interface objects. -type JSONStringer interface { - smithydocument.Marshaler - smithydocument.Unmarshaler -} diff --git a/internal/logging/keys.go b/internal/logging/keys.go index 203e44741426..f53ce505f7d1 100644 --- a/internal/logging/keys.go +++ b/internal/logging/keys.go @@ -13,9 +13,13 @@ import ( const ( HTTPKeyRequestBody = "http.request.body" HTTPKeyResponseBody = "http.response.body" - KeyResourceId = "id" + KeyResourceId = "tf_aws.resource_attribute." + "id" ) +func ResourceAttributeKey(name string) string { + return "tf_aws.resource_attribute." + name +} + // MaskSensitiveValuesByKey masks sensitive values using tflog func MaskSensitiveValuesByKey(ctx context.Context, keys ...string) context.Context { l := baselogging.RetrieveLogger(ctx) diff --git a/internal/provider/factory.go b/internal/provider/factory.go index 591deff27b0a..dbe1fb124adf 100644 --- a/internal/provider/factory.go +++ b/internal/provider/factory.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2" ) @@ -18,6 +19,8 @@ import ( // This factory function is suitable for use with the terraform-plugin-go Serve function. // The primary (Plugin SDK) provider server is also returned (useful for testing). func ProtoV5ProviderServerFactory(ctx context.Context) (func() tfprotov5.ProviderServer, *schema.Provider, error) { + internal.RegisterSmarterrFS() + primary, err := sdkv2.NewProvider(ctx) if err != nil { diff --git a/internal/provider/framework/identity/schema.go b/internal/provider/framework/identity/schema.go index 43416528987b..e1a3aba4ddd2 100644 --- a/internal/provider/framework/identity/schema.go +++ b/internal/provider/framework/identity/schema.go @@ -11,7 +11,7 @@ import ( func NewIdentitySchema(identitySpec inttypes.Identity) identityschema.Schema { schemaAttrs := make(map[string]identityschema.Attribute, len(identitySpec.Attributes)) for _, attr := range identitySpec.Attributes { - schemaAttrs[attr.Name] = newIdentityAttribute(attr) + schemaAttrs[attr.Name()] = newIdentityAttribute(attr) } return identityschema.Schema{ Attributes: schemaAttrs, @@ -20,7 +20,7 @@ func NewIdentitySchema(identitySpec inttypes.Identity) identityschema.Schema { func newIdentityAttribute(attribute inttypes.IdentityAttribute) identityschema.Attribute { attr := identityschema.StringAttribute{} - if attribute.Required { + if attribute.Required() { attr.RequiredForImport = true } else { attr.OptionalForImport = true diff --git a/internal/provider/framework/identity_interceptor.go b/internal/provider/framework/identity_interceptor.go index ff168cf01c79..9a5bc3aed03b 100644 --- a/internal/provider/framework/identity_interceptor.go +++ b/internal/provider/framework/identity_interceptor.go @@ -7,10 +7,10 @@ import ( "context" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" - tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -18,11 +18,10 @@ import ( var _ resourceCRUDInterceptor = identityInterceptor{} type identityInterceptor struct { - attributes []string + attributes []inttypes.IdentityAttribute } -func (r identityInterceptor) create(ctx context.Context, opts interceptorOptions[resource.CreateRequest, resource.CreateResponse]) diag.Diagnostics { - var diags diag.Diagnostics +func (r identityInterceptor) create(ctx context.Context, opts interceptorOptions[resource.CreateRequest, resource.CreateResponse]) { awsClient := opts.c switch response, when := opts.response, opts.when; when { @@ -32,40 +31,72 @@ func (r identityInterceptor) create(ctx context.Context, opts interceptorOptions break } - for _, attrName := range r.attributes { - switch attrName { + for _, att := range r.attributes { + switch att.Name() { case names.AttrAccountID: - diags.Append(identity.SetAttribute(ctx, path.Root(attrName), awsClient.AccountID(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if opts.response.Diagnostics.HasError() { + return } case names.AttrRegion: - diags.Append(identity.SetAttribute(ctx, path.Root(attrName), awsClient.Region(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return } default: var attrVal attr.Value - diags.Append(response.State.GetAttribute(ctx, path.Root(attrName), &attrVal)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if opts.response.Diagnostics.HasError() { + return } - diags.Append(identity.SetAttribute(ctx, path.Root(attrName), attrVal)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if opts.response.Diagnostics.HasError() { + return } } } - } + case OnError: + identity := response.Identity + if identity == nil { + break + } - return diags + if identityIsFullyNull(ctx, identity, r.attributes) { + for _, att := range r.attributes { + switch att.Name() { + case names.AttrAccountID: + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if opts.response.Diagnostics.HasError() { + return + } + + case names.AttrRegion: + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return + } + + default: + var attrVal attr.Value + opts.response.Diagnostics.Append(response.State.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if opts.response.Diagnostics.HasError() { + return + } + + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if opts.response.Diagnostics.HasError() { + return + } + } + } + } + } } -func (r identityInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) diag.Diagnostics { - var diags diag.Diagnostics +func (r identityInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) { awsClient := opts.c switch response, when := opts.response, opts.when; when { @@ -78,52 +109,142 @@ func (r identityInterceptor) read(ctx context.Context, opts interceptorOptions[r break } - for _, attrName := range r.attributes { - switch attrName { + for _, att := range r.attributes { + switch att.Name() { case names.AttrAccountID: - diags.Append(identity.SetAttribute(ctx, path.Root(attrName), awsClient.AccountID(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if opts.response.Diagnostics.HasError() { + return } case names.AttrRegion: - diags.Append(identity.SetAttribute(ctx, path.Root(attrName), awsClient.Region(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return } default: var attrVal attr.Value - diags.Append(response.State.GetAttribute(ctx, path.Root(attrName), &attrVal)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if opts.response.Diagnostics.HasError() { + return } - diags.Append(identity.SetAttribute(ctx, path.Root(attrName), attrVal)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if opts.response.Diagnostics.HasError() { + return } } } } +} - return diags +func (r identityInterceptor) update(ctx context.Context, opts interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) { + awsClient := opts.c + + switch response, when := opts.response, opts.when; when { + case After: + if response.State.Raw.IsNull() { + break + } + identity := response.Identity + if identity == nil { + break + } + + for _, att := range r.attributes { + switch att.Name() { + case names.AttrAccountID: + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if opts.response.Diagnostics.HasError() { + return + } + + case names.AttrRegion: + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return + } + + default: + var attrVal attr.Value + opts.response.Diagnostics.Append(response.State.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if opts.response.Diagnostics.HasError() { + return + } + + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if opts.response.Diagnostics.HasError() { + return + } + } + } + case OnError: + if response.State.Raw.IsNull() { + break + } + identity := response.Identity + if identity == nil { + break + } + + if identityIsFullyNull(ctx, identity, r.attributes) { + for _, att := range r.attributes { + switch att.Name() { + case names.AttrAccountID: + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if opts.response.Diagnostics.HasError() { + return + } + + case names.AttrRegion: + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return + } + + default: + var attrVal attr.Value + opts.response.Diagnostics.Append(response.State.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if opts.response.Diagnostics.HasError() { + return + } + + opts.response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if opts.response.Diagnostics.HasError() { + return + } + } + } + } + } } -func (r identityInterceptor) update(ctx context.Context, opts interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) diag.Diagnostics { - var diags diag.Diagnostics - return diags +func (r identityInterceptor) delete(ctx context.Context, opts interceptorOptions[resource.DeleteRequest, resource.DeleteResponse]) { } -func (r identityInterceptor) delete(ctx context.Context, opts interceptorOptions[resource.DeleteRequest, resource.DeleteResponse]) diag.Diagnostics { - var diags diag.Diagnostics - return diags +// identityIsFullyNull returns true if a resource supports identity and +// all attributes are set to null values +func identityIsFullyNull(ctx context.Context, identity *tfsdk.ResourceIdentity, attributes []inttypes.IdentityAttribute) bool { + if identity == nil { + return true + } + + for _, attr := range attributes { + var attrVal types.String + if diags := identity.GetAttribute(ctx, path.Root(attr.Name()), &attrVal); diags.HasError() { + return false + } + if !attrVal.IsNull() && attrVal.ValueString() != "" { + return false + } + } + + return true } func newIdentityInterceptor(attributes []inttypes.IdentityAttribute) identityInterceptor { return identityInterceptor{ - attributes: tfslices.ApplyToAll(attributes, func(v inttypes.IdentityAttribute) string { - return v.Name - }), + attributes: attributes, } } diff --git a/internal/provider/framework/identity_interceptor_test.go b/internal/provider/framework/identity_interceptor_test.go new file mode 100644 index 000000000000..26c3206cf1da --- /dev/null +++ b/internal/provider/framework/identity_interceptor_test.go @@ -0,0 +1,365 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/identity" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/resourceattribute" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" +) + +func TestIdentityInterceptor(t *testing.T) { + t.Parallel() + + accountID := "123456789012" + region := "us-west-2" //lintignore:AWSAT003 + name := "a_name" + + resourceSchema := schema.Schema{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "region": resourceattribute.Region(), + "type": schema.StringAttribute{ + Optional: true, + }, + }, + } + + client := mockClient{ + accountID: accountID, + region: region, + } + + stateAttrs := map[string]string{ + "name": name, + "region": region, + "type": "some_type", + } + + testOperations := map[string]struct { + operation func(ctx context.Context, interceptor identityInterceptor, resourceSchema schema.Schema, stateAttrs map[string]string, identity *tfsdk.ResourceIdentity, client awsClient) (*tfsdk.ResourceIdentity, diag.Diagnostics) + }{ + "create": { + operation: create, + }, + "read": { + operation: read, + }, + } + + for tname, tc := range testOperations { + t.Run(tname, func(t *testing.T) { + t.Parallel() + + operation := tc.operation + + testCases := map[string]struct { + attrName string + identitySpec inttypes.Identity + }{ + "same names": { + attrName: "name", + identitySpec: regionalSingleParameterIdentitySpec("name"), + }, + "name mapped": { + attrName: "resource_name", + identitySpec: regionalSingleParameterIdentitySpecNameMapped("resource_name", "name"), + }, + } + + for tname, tc := range testCases { + t.Run(tname, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + + identitySchema := identity.NewIdentitySchema(tc.identitySpec) + + interceptor := newIdentityInterceptor(tc.identitySpec.Attributes) + + identity := emtpyIdentityFromSchema(ctx, &identitySchema) + + responseIdentity, diags := operation(ctx, interceptor, resourceSchema, stateAttrs, identity, client) + if len(diags) > 0 { + t.Fatalf("unexpected diags during interception: %s", diags) + } + + if e, a := accountID, getIdentityAttributeValue(ctx, t, responseIdentity, path.Root("account_id")); e != a { + t.Errorf("expected Identity `account_id` to be %q, got %q", e, a) + } + if e, a := region, getIdentityAttributeValue(ctx, t, responseIdentity, path.Root("region")); e != a { + t.Errorf("expected Identity `region` to be %q, got %q", e, a) + } + if e, a := name, getIdentityAttributeValue(ctx, t, responseIdentity, path.Root(tc.attrName)); e != a { + t.Errorf("expected Identity `%s` to be %q, got %q", tc.attrName, e, a) + } + }) + } + }) + } +} + +func create(ctx context.Context, interceptor identityInterceptor, resourceSchema schema.Schema, stateAttrs map[string]string, identity *tfsdk.ResourceIdentity, client awsClient) (*tfsdk.ResourceIdentity, diag.Diagnostics) { + request := resource.CreateRequest{ + Config: configFromSchema(ctx, resourceSchema, stateAttrs), + Plan: planFromSchema(ctx, resourceSchema, stateAttrs), + Identity: identity, + } + response := resource.CreateResponse{ + State: stateFromSchema(ctx, resourceSchema, stateAttrs), + Identity: identity, + } + opts := interceptorOptions[resource.CreateRequest, resource.CreateResponse]{ + c: client, + request: &request, + response: &response, + when: After, + } + + interceptor.create(ctx, opts) + if response.Diagnostics.HasError() { + return nil, response.Diagnostics + } + return response.Identity, response.Diagnostics +} + +func read(ctx context.Context, interceptor identityInterceptor, resourceSchema schema.Schema, stateAttrs map[string]string, identity *tfsdk.ResourceIdentity, client awsClient) (*tfsdk.ResourceIdentity, diag.Diagnostics) { + request := resource.ReadRequest{ + State: stateFromSchema(ctx, resourceSchema, stateAttrs), + Identity: identity, + } + response := resource.ReadResponse{ + State: stateFromSchema(ctx, resourceSchema, stateAttrs), + Identity: identity, + } + opts := interceptorOptions[resource.ReadRequest, resource.ReadResponse]{ + c: client, + request: &request, + response: &response, + when: After, + } + + interceptor.read(ctx, opts) + if response.Diagnostics.HasError() { + return nil, response.Diagnostics + } + return response.Identity, response.Diagnostics +} + +func getIdentityAttributeValue(ctx context.Context, t *testing.T, identity *tfsdk.ResourceIdentity, path path.Path) string { + t.Helper() + + var attrVal types.String + if diags := identity.GetAttribute(ctx, path, &attrVal); diags.HasError() { + t.Fatalf("Unexpected error getting Identity attribute %q: %s", path, fwdiag.DiagnosticsError(diags)) + } + return attrVal.ValueString() +} + +func regionalSingleParameterIdentitySpec(name string) inttypes.Identity { + return inttypes.RegionalSingleParameterIdentity(name) +} + +func regionalSingleParameterIdentitySpecNameMapped(identityAttrName, resourceAttrName string) inttypes.Identity { + return inttypes.RegionalSingleParameterIdentityWithMappedName(identityAttrName, resourceAttrName) +} + +func stateFromSchema(ctx context.Context, schema schema.Schema, values map[string]string) tfsdk.State { + val := make(map[string]tftypes.Value) + for name := range schema.Attributes { + if v, ok := values[name]; ok { + val[name] = tftypes.NewValue(tftypes.String, v) + } else { + val[name] = tftypes.NewValue(tftypes.String, nil) + } + } + return tfsdk.State{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), val), + Schema: schema, + } +} + +func configFromSchema(ctx context.Context, schema schema.Schema, values map[string]string) tfsdk.Config { + val := make(map[string]tftypes.Value) + for name := range schema.Attributes { + if v, ok := values[name]; ok { + val[name] = tftypes.NewValue(tftypes.String, v) + } else { + val[name] = tftypes.NewValue(tftypes.String, nil) + } + } + return tfsdk.Config{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), val), + Schema: schema, + } +} + +func planFromSchema(ctx context.Context, schema schema.Schema, values map[string]string) tfsdk.Plan { + val := make(map[string]tftypes.Value) + for name := range schema.Attributes { + if v, ok := values[name]; ok { + val[name] = tftypes.NewValue(tftypes.String, v) + } else { + val[name] = tftypes.NewValue(tftypes.String, nil) + } + } + return tfsdk.Plan{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), val), + Schema: schema, + } +} + +func emtpyIdentityFromSchema(ctx context.Context, schema *identityschema.Schema) *tfsdk.ResourceIdentity { + return &tfsdk.ResourceIdentity{ + Raw: tftypes.NewValue(schema.Type().TerraformType(ctx), nil), + Schema: schema, + } +} + +type mockClient struct { + accountID string + region string +} + +func (c mockClient) AccountID(_ context.Context) string { + return c.accountID +} + +func (c mockClient) Region(_ context.Context) string { + return c.region +} + +func (c mockClient) DefaultTagsConfig(ctx context.Context) *tftags.DefaultConfig { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) IgnoreTagsConfig(ctx context.Context) *tftags.IgnoreConfig { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) Partition(context.Context) string { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) ServicePackage(_ context.Context, name string) conns.ServicePackage { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) ValidateInContextRegionInPartition(ctx context.Context) error { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) AwsConfig(context.Context) aws.Config { // nosemgrep:ci.aws-in-func-name + panic("not implemented") //lintignore:R009 +} + +func TestIdentityIsFullyNull(t *testing.T) { + t.Parallel() + + attributes := []inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("account_id", false), + inttypes.StringIdentityAttribute("region", false), + inttypes.StringIdentityAttribute("bucket", true), + } + + // Create identity schema once for all test cases + identitySchema := &identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "account_id": identityschema.StringAttribute{}, + "region": identityschema.StringAttribute{}, + "bucket": identityschema.StringAttribute{}, + }, + } + + ctx := context.Background() + + // Helper function to create identity with values + createIdentityWithValues := func(values map[string]string) *tfsdk.ResourceIdentity { + if values == nil { + return nil + } + identity := emtpyIdentityFromSchema(ctx, identitySchema) + for attrName, value := range values { + if value != "" { + diags := identity.SetAttribute(ctx, path.Root(attrName), value) + if diags.HasError() { + t.Fatalf("unexpected error setting %s in identity: %s", attrName, fwdiag.DiagnosticsError(diags)) + } + } + } + return identity + } + + testCases := map[string]struct { + identity *tfsdk.ResourceIdentity + expectNull bool + description string + }{ + "all_null": { + identity: createIdentityWithValues(map[string]string{}), + expectNull: true, + description: "All attributes null should return true", + }, + "some_null": { + identity: createIdentityWithValues(map[string]string{ + "account_id": "123456789012", + // region and bucket remain null + }), + expectNull: false, + description: "Some attributes set should return false", + }, + "all_set": { + identity: createIdentityWithValues(map[string]string{ + "account_id": "123456789012", + "region": "us-west-2", // lintignore:AWSAT003 + "bucket": "test-bucket", + }), + expectNull: false, + description: "All attributes set should return false", + }, + "empty_string_values": { + identity: createIdentityWithValues(map[string]string{ + "account_id": "", + "region": "", + "bucket": "", + }), + expectNull: true, + description: "Empty string values should be treated as null", + }, + "nil_identity": { + identity: createIdentityWithValues(nil), + expectNull: true, + description: "Nil identity should return true", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := context.Background() + + result := identityIsFullyNull(ctx, tc.identity, attributes) + if result != tc.expectNull { + t.Errorf("%s: expected identityIsFullyNull to return %v, got %v", + tc.description, tc.expectNull, result) + } + }) + } +} diff --git a/internal/provider/framework/importer/parameterized.go b/internal/provider/framework/importer/parameterized.go index 2edae96b8ff0..5e33140a6fac 100644 --- a/internal/provider/framework/importer/parameterized.go +++ b/internal/provider/framework/importer/parameterized.go @@ -15,7 +15,9 @@ import ( ) func SingleParameterized(ctx context.Context, client AWSClient, request resource.ImportStateRequest, identitySpec *inttypes.Identity, importSpec *inttypes.FrameworkImport, response *resource.ImportStateResponse) { - attrPath := path.Root(identitySpec.IdentityAttribute) + attr := identitySpec.Attributes[len(identitySpec.Attributes)-1] + identityPath := path.Root(attr.Name()) + resourcePath := path.Root(attr.ResourceAttributeName()) parameterVal := request.ID @@ -26,18 +28,18 @@ func SingleParameterized(ctx context.Context, client AWSClient, request resource } var parameterAttr types.String - response.Diagnostics.Append(identity.GetAttribute(ctx, attrPath, ¶meterAttr)...) + response.Diagnostics.Append(identity.GetAttribute(ctx, identityPath, ¶meterAttr)...) if response.Diagnostics.HasError() { return } parameterVal = parameterAttr.ValueString() } - response.Diagnostics.Append(response.State.SetAttribute(ctx, attrPath, parameterVal)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, resourcePath, parameterVal)...) if identity := response.Identity; identity != nil { response.Diagnostics.Append(identity.SetAttribute(ctx, path.Root(names.AttrAccountID), client.AccountID(ctx))...) - response.Diagnostics.Append(identity.SetAttribute(ctx, attrPath, parameterVal)...) + response.Diagnostics.Append(identity.SetAttribute(ctx, identityPath, parameterVal)...) } if !identitySpec.IsGlobalResource { @@ -75,23 +77,25 @@ func MultipleParameterized(ctx context.Context, client AWSClient, request resour } for _, attr := range identitySpec.Attributes { - switch attr.Name { + switch attr.Name() { case names.AttrAccountID, names.AttrRegion: // Do nothing default: - attrPath := path.Root(attr.Name) + identityPath := path.Root(attr.Name()) + resourcePath := path.Root(attr.ResourceAttributeName()) + var parameterAttr types.String - response.Diagnostics.Append(identity.GetAttribute(ctx, attrPath, ¶meterAttr)...) + response.Diagnostics.Append(identity.GetAttribute(ctx, identityPath, ¶meterAttr)...) if response.Diagnostics.HasError() { return } parameterVal := parameterAttr.ValueString() - response.Diagnostics.Append(response.State.SetAttribute(ctx, attrPath, parameterVal)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, resourcePath, parameterVal)...) if identity := response.Identity; identity != nil { - response.Diagnostics.Append(identity.SetAttribute(ctx, attrPath, parameterVal)...) + response.Diagnostics.Append(identity.SetAttribute(ctx, identityPath, parameterVal)...) } } } diff --git a/internal/provider/framework/importer/parameterized_test.go b/internal/provider/framework/importer/parameterized_test.go index bcc855b3ec5b..b514dcbe71a8 100644 --- a/internal/provider/framework/importer/parameterized_test.go +++ b/internal/provider/framework/importer/parameterized_test.go @@ -45,6 +45,10 @@ func regionalSingleParameterIdentitySpec(name string) inttypes.Identity { return inttypes.RegionalSingleParameterIdentity(name) } +func regionalSingleParameterIdentitySpecNameMapped(identityAttrName, resourceAttrName string) inttypes.Identity { + return inttypes.RegionalSingleParameterIdentityWithMappedName(identityAttrName, resourceAttrName) +} + func TestRegionalSingleParameterized_ByImportID(t *testing.T) { t.Parallel() @@ -205,15 +209,16 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { anotherRegion := "another-region-1" testCases := map[string]struct { - attrName string + identityAttrName string identityAttrs map[string]string + resourceAttrName string useSchemaWithID bool expectedRegion string expectError bool expectedErrorPrefix string }{ "Attr_Required": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "name": "a_name", }, @@ -221,7 +226,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WithAccountID": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "account_id": accountID, "name": "a_name", @@ -230,7 +235,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WithDefaultRegion": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "region": region, "name": "a_name", @@ -239,7 +244,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WithRegionOverride": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "region": anotherRegion, "name": "a_name", @@ -248,7 +253,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WrongAccountID": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "account_id": "987654321098", "name": "a_name", @@ -258,7 +263,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { }, "ID_Required": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "id": "a_name", }, @@ -267,7 +272,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithAccountID": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "account_id": accountID, "id": "a_name", @@ -277,7 +282,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithDefaultRegion": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "region": region, "id": "a_name", @@ -287,7 +292,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithRegionOverride": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "region": anotherRegion, "id": "a_name", @@ -297,7 +302,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WrongAccountID": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "account_id": "987654321098", "id": "a_name", @@ -306,6 +311,16 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectedRegion: region, expectError: true, }, + + "name mapped": { + identityAttrName: "id_name", + resourceAttrName: "name", + identityAttrs: map[string]string{ + "id_name": "a_name", + }, + expectedRegion: region, + expectError: false, + }, } for name, tc := range testCases { @@ -318,7 +333,12 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := regionalSingleParameterIdentitySpec(tc.attrName) + var identitySpec inttypes.Identity + if tc.resourceAttrName == "" || tc.resourceAttrName == tc.identityAttrName { + identitySpec = regionalSingleParameterIdentitySpec(tc.identityAttrName) + } else { + identitySpec = regionalSingleParameterIdentitySpecNameMapped(tc.identityAttrName, tc.resourceAttrName) + } identitySchema := ptr(identity.NewIdentitySchema(identitySpec)) @@ -350,7 +370,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { // Check name value var expectedNameValue string if !tc.useSchemaWithID { - expectedNameValue = tc.identityAttrs[tc.attrName] + expectedNameValue = tc.identityAttrs[tc.identityAttrName] } if e, a := expectedNameValue, getAttributeValue(ctx, t, response.State, path.Root("name")); e != a { t.Errorf("expected `name` to be %q, got %q", e, a) @@ -363,7 +383,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { // Check ID value if using schema with ID if tc.useSchemaWithID { - if e, a := tc.identityAttrs[tc.attrName], getAttributeValue(ctx, t, response.State, path.Root("id")); e != a { + if e, a := tc.identityAttrs[tc.identityAttrName], getAttributeValue(ctx, t, response.State, path.Root("id")); e != a { t.Errorf("expected `id` to be %q, got %q", e, a) } } @@ -378,8 +398,8 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { if e, a := tc.expectedRegion, getIdentityAttributeValue(ctx, t, response.Identity, path.Root("region")); e != a { t.Errorf("expected Identity `region` to be %q, got %q", e, a) } - if e, a := tc.identityAttrs[tc.attrName], getIdentityAttributeValue(ctx, t, response.Identity, path.Root(tc.attrName)); e != a { - t.Errorf("expected Identity `%s` to be %q, got %q", tc.attrName, e, a) + if e, a := tc.identityAttrs[tc.identityAttrName], getIdentityAttributeValue(ctx, t, response.Identity, path.Root(tc.identityAttrName)); e != a { + t.Errorf("expected Identity `%s` to be %q, got %q", tc.identityAttrName, e, a) } } }) @@ -407,6 +427,10 @@ func globalSingleParameterIdentitySpec(name string) inttypes.Identity { return inttypes.GlobalSingleParameterIdentity(name) } +func globalSingleParameterIdentitySpecNameMapped(identityAttrName, resourceAttrName string) inttypes.Identity { + return inttypes.GlobalSingleParameterIdentityWithMappedName(identityAttrName, resourceAttrName) +} + func TestGlobalSingleParameterized_ByImportID(t *testing.T) { t.Parallel() @@ -532,21 +556,22 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { region := "a-region-1" testCases := map[string]struct { - attrName string + identityAttrName string identityAttrs map[string]string + resourceAttrName string useSchemaWithID bool expectError bool expectedErrorPrefix string }{ "Attr_Required": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "name": "a_name", }, expectError: false, }, "Attr_WithAccountID": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "account_id": accountID, "name": "a_name", @@ -554,7 +579,7 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WrongAccountID": { - attrName: "name", + identityAttrName: "name", identityAttrs: map[string]string{ "account_id": "987654321098", "name": "a_name", @@ -563,7 +588,7 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { }, "ID_Required": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "id": "a_name", }, @@ -571,7 +596,7 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithAccountID": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "account_id": accountID, "id": "a_name", @@ -580,7 +605,7 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WrongAccountID": { - attrName: "id", + identityAttrName: "id", identityAttrs: map[string]string{ "account_id": "987654321098", "id": "a_name", @@ -588,6 +613,15 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { useSchemaWithID: true, expectError: true, }, + + "name mapped": { + identityAttrName: "id_name", + resourceAttrName: "name", + identityAttrs: map[string]string{ + "id_name": "a_name", + }, + expectError: false, + }, } for name, tc := range testCases { @@ -600,7 +634,12 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := globalSingleParameterIdentitySpec(tc.attrName) + var identitySpec inttypes.Identity + if tc.resourceAttrName == "" || tc.resourceAttrName == tc.identityAttrName { + identitySpec = globalSingleParameterIdentitySpec(tc.identityAttrName) + } else { + identitySpec = globalSingleParameterIdentitySpecNameMapped(tc.identityAttrName, tc.resourceAttrName) + } identitySchema := ptr(identity.NewIdentitySchema(identitySpec)) @@ -633,7 +672,7 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { // Check name value var expectedNameValue string if !tc.useSchemaWithID { - expectedNameValue = tc.identityAttrs[tc.attrName] + expectedNameValue = tc.identityAttrs[tc.identityAttrName] } if e, a := expectedNameValue, getAttributeValue(ctx, t, response.State, path.Root("name")); e != a { t.Errorf("expected `name` to be %q, got %q", e, a) @@ -641,7 +680,7 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { // Check ID value if using schema with ID if tc.useSchemaWithID { - if e, a := tc.identityAttrs[tc.attrName], getAttributeValue(ctx, t, response.State, path.Root("id")); e != a { + if e, a := tc.identityAttrs[tc.identityAttrName], getAttributeValue(ctx, t, response.State, path.Root("id")); e != a { t.Errorf("expected `id` to be %q, got %q", e, a) } } @@ -653,8 +692,8 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { if e, a := accountID, getIdentityAttributeValue(ctx, t, response.Identity, path.Root("account_id")); e != a { t.Errorf("expected Identity `account_id` to be %q, got %q", e, a) } - if e, a := tc.identityAttrs[tc.attrName], getIdentityAttributeValue(ctx, t, response.Identity, path.Root(tc.attrName)); e != a { - t.Errorf("expected Identity `%s` to be %q, got %q", tc.attrName, e, a) + if e, a := tc.identityAttrs[tc.identityAttrName], getIdentityAttributeValue(ctx, t, response.Identity, path.Root(tc.identityAttrName)); e != a { + t.Errorf("expected Identity `%s` to be %q, got %q", tc.identityAttrName, e, a) } } }) @@ -694,6 +733,18 @@ func regionalMultipleParameterizedIdentitySpec(attrNames []string) inttypes.Iden return inttypes.RegionalParameterizedIdentity(attrs) } +func regionalMultipleParameterizedIdentitySpecWithMappedName(attrNames map[string]string) inttypes.Identity { + var attrs []inttypes.IdentityAttribute + for identityAttrName, resourceAttrName := range attrNames { + if identityAttrName == resourceAttrName { + attrs = append(attrs, inttypes.StringIdentityAttribute(identityAttrName, true)) + } else { + attrs = append(attrs, inttypes.StringIdentityAttributeWithMappedName(identityAttrName, true, resourceAttrName)) + } + } + return inttypes.RegionalParameterizedIdentity(attrs) +} + func TestRegionalMutipleParameterized_ByImportID(t *testing.T) { t.Parallel() @@ -875,21 +926,30 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { anotherRegion := "another-region-1" testCases := map[string]struct { - identityAttrs map[string]string - useSchemaWithID bool - useImportIDCreator bool - expectedAttrs map[string]string - expectedRegion string - expectedID string - expectError bool - expectedErrorPrefix string + identityAttrs map[string]string + identitySpec inttypes.Identity + useSchemaWithID bool + useImportIDCreator bool + expectedIdentityAttrs map[string]string + expectedResourceAttrs map[string]string + expectedRegion string + expectedID string + expectError bool + expectedErrorPrefix string }{ "Required": { identityAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, - expectedAttrs: map[string]string{ + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "region": region, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -902,7 +962,14 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectedAttrs: map[string]string{ + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "region": region, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -915,7 +982,14 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectedAttrs: map[string]string{ + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "region": region, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -928,7 +1002,14 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectedAttrs: map[string]string{ + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "region": anotherRegion, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -941,7 +1022,8 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectError: true, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectError: true, }, "WithIDAttr_DefaultRegion": { @@ -949,9 +1031,16 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), useSchemaWithID: true, useImportIDCreator: true, - expectedAttrs: map[string]string{ + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "region": region, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -964,10 +1053,35 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), useSchemaWithID: true, useImportIDCreator: false, expectError: true, }, + + "name mapped": { + identityAttrs: map[string]string{ + "id_name": "a_name", + "type": "a_type", + }, + identitySpec: regionalMultipleParameterizedIdentitySpecWithMappedName(map[string]string{ + "id_name": "name", + "type": "type", + }), + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "region": region, + "id_name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ + "name": "a_name", + "type": "a_type", + }, + expectedID: "a_name,a_type", + expectedRegion: region, + expectError: false, + }, } for name, tc := range testCases { @@ -980,9 +1094,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}) - - identitySchema := ptr(identity.NewIdentitySchema(identitySpec)) + identitySchema := ptr(identity.NewIdentitySchema(tc.identitySpec)) schema := regionalMultipleParameterizedSchema if tc.useSchemaWithID { @@ -1004,7 +1116,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { identity := identityFromSchema(ctx, identitySchema, tc.identityAttrs) - response := importByIdentity(ctx, f, &client, schema, identity, identitySpec, &importSpec) + response := importByIdentity(ctx, f, &client, schema, identity, tc.identitySpec, &importSpec) if tc.expectError { if !response.Diagnostics.HasError() { t.Fatal("Expected error, got none") @@ -1025,7 +1137,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { } // Check attr values - for name, expectedAttr := range tc.expectedAttrs { + for name, expectedAttr := range tc.expectedResourceAttrs { if e, a := expectedAttr, getAttributeValue(ctx, t, response.State, path.Root(name)); e != a { t.Errorf("expected `%s` to be %q, got %q", name, e, a) } @@ -1042,13 +1154,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { if identity := response.Identity; identity == nil { t.Error("Identity should be set") } else { - if e, a := accountID, getIdentityAttributeValue(ctx, t, response.Identity, path.Root("account_id")); e != a { - t.Errorf("expected Identity `account_id` to be %q, got %q", e, a) - } - if e, a := tc.expectedRegion, getIdentityAttributeValue(ctx, t, response.Identity, path.Root("region")); e != a { - t.Errorf("expected Identity `region` to be %q, got %q", e, a) - } - for name, expectedAttr := range tc.expectedAttrs { + for name, expectedAttr := range tc.expectedIdentityAttrs { if e, a := expectedAttr, getIdentityAttributeValue(ctx, t, response.Identity, path.Root(name)); e != a { t.Errorf("expected Identity `%s` to be %q, got %q", name, e, a) } @@ -1089,6 +1195,18 @@ func globalMultipleParameterizedIdentitySpec(attrNames []string) inttypes.Identi return inttypes.GlobalParameterizedIdentity(attrs) } +func globalMultipleParameterizedIdentitySpecWithMappedName(attrNames map[string]string) inttypes.Identity { + var attrs []inttypes.IdentityAttribute + for identityAttrName, resourceAttrName := range attrNames { + if identityAttrName == resourceAttrName { + attrs = append(attrs, inttypes.StringIdentityAttribute(identityAttrName, true)) + } else { + attrs = append(attrs, inttypes.StringIdentityAttributeWithMappedName(identityAttrName, true, resourceAttrName)) + } + } + return inttypes.GlobalParameterizedIdentity(attrs) +} + func TestGlobalMutipleParameterized_ByImportID(t *testing.T) { t.Parallel() @@ -1237,21 +1355,29 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { accountID := "123456789012" testCases := map[string]struct { - identityAttrs map[string]string - useSchemaWithID bool - useImportIDCreator bool - expectedID string - expectedAttrs map[string]string - expectError bool - expectedErrorPrefix string + identityAttrs map[string]string + identitySpec inttypes.Identity + useSchemaWithID bool + useImportIDCreator bool + expectedID string + expectedIdentityAttrs map[string]string + expectedResourceAttrs map[string]string + expectError bool + expectedErrorPrefix string }{ "Required": { identityAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, - expectedID: "a_name,a_type", - expectedAttrs: map[string]string{ + identitySpec: globalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedID: "a_name,a_type", + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -1263,8 +1389,14 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectedID: "a_name,a_type", - expectedAttrs: map[string]string{ + identitySpec: globalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedID: "a_name,a_type", + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, @@ -1276,7 +1408,8 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectError: true, + identitySpec: globalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectError: true, }, "WithIDAttr_Required": { @@ -1284,20 +1417,53 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: globalMultipleParameterizedIdentitySpec([]string{"name", "type"}), useSchemaWithID: true, useImportIDCreator: true, expectedID: "a_name,a_type", - expectError: false, + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ + "name": "a_name", + "type": "a_type", + }, + expectError: false, }, "WithIDAttr_NoImportIDCreate": { identityAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, + identitySpec: globalMultipleParameterizedIdentitySpec([]string{"name", "type"}), useSchemaWithID: true, useImportIDCreator: false, expectError: true, }, + + "name mapped": { + identityAttrs: map[string]string{ + "id_name": "a_name", + "type": "a_type", + }, + identitySpec: globalMultipleParameterizedIdentitySpecWithMappedName(map[string]string{ + "id_name": "name", + "type": "type", + }), + expectedIdentityAttrs: map[string]string{ + "account_id": accountID, + "id_name": "a_name", + "type": "a_type", + }, + expectedResourceAttrs: map[string]string{ + "name": "a_name", + "type": "a_type", + }, + expectedID: "a_name,a_type", + expectError: false, + }, } for name, tc := range testCases { @@ -1309,9 +1475,7 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { accountID: accountID, } - identitySpec := globalMultipleParameterizedIdentitySpec([]string{"name", "type"}) - - identitySchema := ptr(identity.NewIdentitySchema(identitySpec)) + identitySchema := ptr(identity.NewIdentitySchema(tc.identitySpec)) importSpec := inttypes.FrameworkImport{ WrappedImport: true, @@ -1333,7 +1497,7 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { identity := identityFromSchema(ctx, identitySchema, tc.identityAttrs) - response := importByIdentity(ctx, f, &client, schema, identity, identitySpec, &importSpec) + response := importByIdentity(ctx, f, &client, schema, identity, tc.identitySpec, &importSpec) if tc.expectError { if !response.Diagnostics.HasError() { t.Fatal("Expected error, got none") @@ -1349,7 +1513,7 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { } // Check attr values - for name, expectedAttr := range tc.expectedAttrs { + for name, expectedAttr := range tc.expectedResourceAttrs { if e, a := expectedAttr, getAttributeValue(ctx, t, response.State, path.Root(name)); e != a { t.Errorf("expected `%s` to be %q, got %q", name, e, a) } @@ -1366,10 +1530,7 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { if identity := response.Identity; identity == nil { t.Error("Identity should be set") } else { - if e, a := accountID, getIdentityAttributeValue(ctx, t, response.Identity, path.Root("account_id")); e != a { - t.Errorf("expected Identity `account_id` to be %q, got %q", e, a) - } - for name, expectedAttr := range tc.expectedAttrs { + for name, expectedAttr := range tc.expectedIdentityAttrs { if e, a := expectedAttr, getIdentityAttributeValue(ctx, t, response.Identity, path.Root(name)); e != a { t.Errorf("expected Identity `%s` to be %q, got %q", name, e, a) } diff --git a/internal/provider/framework/intercept.go b/internal/provider/framework/intercept.go index 0ee385a472e3..2d4367f71df2 100644 --- a/internal/provider/framework/intercept.go +++ b/internal/provider/framework/intercept.go @@ -5,23 +5,40 @@ package framework import ( "context" + "slices" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-framework/action" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) +type awsClient interface { + AccountID(context.Context) string + Region(context.Context) string + DefaultTagsConfig(ctx context.Context) *tftags.DefaultConfig + IgnoreTagsConfig(ctx context.Context) *tftags.IgnoreConfig + Partition(context.Context) string + ServicePackage(_ context.Context, name string) conns.ServicePackage + ValidateInContextRegionInPartition(ctx context.Context) error + AwsConfig(context.Context) aws.Config +} + type interceptorOptions[Request, Response any] struct { - c *conns.AWSClient + c awsClient request *Request response *Response when when } -type interceptorFunc[Request, Response any] func(context.Context, interceptorOptions[Request, Response]) diag.Diagnostics +type interceptorFunc[Request, Response any] func(context.Context, interceptorOptions[Request, Response]) type interceptorInvocations []any @@ -31,7 +48,7 @@ type interceptorInvocations []any // In other cases all interceptors in the chain are run. type dataSourceCRUDInterceptor interface { // read is invoked for a Read call. - read(context.Context, interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) diag.Diagnostics + read(context.Context, interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) } // dataSourceRead returns a slice of interceptors that run on data source Read. @@ -46,7 +63,7 @@ func (s interceptorInvocations) dataSourceRead() []interceptorFunc[datasource.Re type dataSourceSchemaInterceptor interface { // schema is invoked for a Schema call. - schema(context.Context, interceptorOptions[datasource.SchemaRequest, datasource.SchemaResponse]) diag.Diagnostics + schema(context.Context, interceptorOptions[datasource.SchemaRequest, datasource.SchemaResponse]) } // dataSourceSchema returns a slice of interceptors that run on data source Schema. @@ -61,11 +78,11 @@ func (s interceptorInvocations) dataSourceSchema() []interceptorFunc[datasource. type ephemeralResourceORCInterceptor interface { // open is invoked for an Open call. - open(context.Context, interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) diag.Diagnostics + open(context.Context, interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) // renew is invoked for a Renew call. - renew(context.Context, interceptorOptions[ephemeral.RenewRequest, ephemeral.RenewResponse]) diag.Diagnostics + renew(context.Context, interceptorOptions[ephemeral.RenewRequest, ephemeral.RenewResponse]) // close is invoked for a Close call. - close(context.Context, interceptorOptions[ephemeral.CloseRequest, ephemeral.CloseResponse]) diag.Diagnostics + close(context.Context, interceptorOptions[ephemeral.CloseRequest, ephemeral.CloseResponse]) } // ephemeralResourceOpen returns a slice of interceptors that run on ephemeral resource Open. @@ -102,27 +119,18 @@ func (s interceptorInvocations) ephemeralResourceClose() []interceptorFunc[ephem // It can be embedded into a struct to provide default behavior for the open, renew, and close methods. type ephemeralResourceNoOpORCInterceptor struct{} -func (r ephemeralResourceNoOpORCInterceptor) open(ctx context.Context, opts interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r ephemeralResourceNoOpORCInterceptor) open(ctx context.Context, opts interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) { } -func (r ephemeralResourceNoOpORCInterceptor) renew(ctx context.Context, opts interceptorOptions[ephemeral.RenewRequest, ephemeral.RenewResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r ephemeralResourceNoOpORCInterceptor) renew(ctx context.Context, opts interceptorOptions[ephemeral.RenewRequest, ephemeral.RenewResponse]) { } -func (r ephemeralResourceNoOpORCInterceptor) close(ctx context.Context, opts interceptorOptions[ephemeral.CloseRequest, ephemeral.CloseResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r ephemeralResourceNoOpORCInterceptor) close(ctx context.Context, opts interceptorOptions[ephemeral.CloseRequest, ephemeral.CloseResponse]) { } type ephemeralResourceSchemaInterceptor interface { // schema is invoked for a Schema call. - schema(context.Context, interceptorOptions[ephemeral.SchemaRequest, ephemeral.SchemaResponse]) diag.Diagnostics + schema(context.Context, interceptorOptions[ephemeral.SchemaRequest, ephemeral.SchemaResponse]) } // ephemeralResourceSchema returns a slice of interceptors that run on ephemeral resource Schema. @@ -141,13 +149,13 @@ func (s interceptorInvocations) ephemeralResourceSchema() []interceptorFunc[ephe // In other cases all interceptors in the chain are run. type resourceCRUDInterceptor interface { // create is invoked for a Create call. - create(context.Context, interceptorOptions[resource.CreateRequest, resource.CreateResponse]) diag.Diagnostics + create(context.Context, interceptorOptions[resource.CreateRequest, resource.CreateResponse]) // read is invoked for a Read call. - read(context.Context, interceptorOptions[resource.ReadRequest, resource.ReadResponse]) diag.Diagnostics + read(context.Context, interceptorOptions[resource.ReadRequest, resource.ReadResponse]) // update is invoked for an Update call. - update(context.Context, interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) diag.Diagnostics + update(context.Context, interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) // delete is invoked for a Delete call. - delete(context.Context, interceptorOptions[resource.DeleteRequest, resource.DeleteResponse]) diag.Diagnostics + delete(context.Context, interceptorOptions[resource.DeleteRequest, resource.DeleteResponse]) } // resourceCreate returns a slice of interceptors that run on resource Create. @@ -194,33 +202,21 @@ func (s interceptorInvocations) resourceDelete() []interceptorFunc[resource.Dele // It can be embedded into a struct to provide default behavior for the create, read, update, and delete methods. type resourceNoOpCRUDInterceptor struct{} -func (r resourceNoOpCRUDInterceptor) create(ctx context.Context, opts interceptorOptions[resource.CreateRequest, resource.CreateResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r resourceNoOpCRUDInterceptor) create(ctx context.Context, opts interceptorOptions[resource.CreateRequest, resource.CreateResponse]) { } -func (r resourceNoOpCRUDInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r resourceNoOpCRUDInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) { } -func (r resourceNoOpCRUDInterceptor) update(ctx context.Context, opts interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r resourceNoOpCRUDInterceptor) update(ctx context.Context, opts interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) { } -func (r resourceNoOpCRUDInterceptor) delete(ctx context.Context, opts interceptorOptions[resource.DeleteRequest, resource.DeleteResponse]) diag.Diagnostics { - var diags diag.Diagnostics - - return diags +func (r resourceNoOpCRUDInterceptor) delete(ctx context.Context, opts interceptorOptions[resource.DeleteRequest, resource.DeleteResponse]) { } type resourceSchemaInterceptor interface { // schema is invoked for a Schema call. - schema(context.Context, interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) diag.Diagnostics + schema(context.Context, interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) } // resourceSchema returns a slice of interceptors that run on resource Schema. @@ -235,7 +231,7 @@ func (s interceptorInvocations) resourceSchema() []interceptorFunc[resource.Sche type resourceModifyPlanInterceptor interface { // modifyPlan is invoked for a ModifyPlan call. - modifyPlan(context.Context, interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) diag.Diagnostics + modifyPlan(context.Context, interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) } // resourceModifyPlan returns a slice of interceptors that run on resource ModifyPlan. @@ -250,7 +246,7 @@ func (s interceptorInvocations) resourceModifyPlan() []interceptorFunc[resource. type resourceImportStateInterceptor interface { // importState is invoked for an ImportState call. - importState(context.Context, interceptorOptions[resource.ImportStateRequest, resource.ImportStateResponse]) diag.Diagnostics + importState(context.Context, interceptorOptions[resource.ImportStateRequest, resource.ImportStateResponse]) } // resourceSchema returns a slice of interceptors that run on resource Schema. @@ -263,6 +259,36 @@ func (s interceptorInvocations) resourceImportState() []interceptorFunc[resource }) } +type listInterceptorFunc[Request, Response any] func(context.Context, interceptorOptions[Request, Response]) diag.Diagnostics + +type listResourceListInterceptor interface { + list(context.Context, interceptorOptions[list.ListRequest, list.ListResultsStream]) diag.Diagnostics +} + +// resourceList returns a slice of interceptors that run on resource List. +func (s interceptorInvocations) resourceList() []listInterceptorFunc[list.ListRequest, list.ListResultsStream] { + return tfslices.ApplyToAll(tfslices.Filter(s, func(e any) bool { + _, ok := e.(listResourceListInterceptor) + return ok + }), func(e any) listInterceptorFunc[list.ListRequest, list.ListResultsStream] { + return e.(listResourceListInterceptor).list + }) +} + +type listResourceSchemaInterceptor interface { + schema(context.Context, interceptorOptions[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse]) +} + +// resourceListResourceConfigSchema returns a slice of interceptors that run on resource ListResourceConfigSchema. +func (s interceptorInvocations) resourceListResourceConfigSchema() []interceptorFunc[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse] { + return tfslices.ApplyToAll(tfslices.Filter(s, func(e any) bool { + _, ok := e.(listResourceSchemaInterceptor) + return ok + }), func(e any) interceptorFunc[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse] { + return e.(listResourceSchemaInterceptor).schema + }) +} + // when represents the point in the CRUD request lifecycle that an interceptor is run. // Multiple values can be ORed together. type when uint16 @@ -274,9 +300,48 @@ const ( Finally // Interceptor is invoked after After or OnError ) +// Only generate strings for use in tests +//go:generate stringer -type=when -output=when_string_test.go + +// An action interceptor is functionality invoked during the action's lifecycle. +// If a Before interceptor returns Diagnostics indicating an error occurred then +// no further interceptors in the chain are run and neither is the schema's method. +// In other cases all interceptors in the chain are run. +type actionInvokeInterceptor interface { + // invoke is invoked for an Invoke call. + invoke(context.Context, interceptorOptions[action.InvokeRequest, action.InvokeResponse]) +} + +// actionInvoke returns a slice of interceptors that run on action Invoke. +func (s interceptorInvocations) actionInvoke() []interceptorFunc[action.InvokeRequest, action.InvokeResponse] { + return tfslices.ApplyToAll(tfslices.Filter(s, func(e any) bool { + _, ok := e.(actionInvokeInterceptor) + return ok + }), func(e any) interceptorFunc[action.InvokeRequest, action.InvokeResponse] { + return e.(actionInvokeInterceptor).invoke + }) +} + +type actionSchemaInterceptor interface { + // schema is invoked for a Schema call. + schema(context.Context, interceptorOptions[action.SchemaRequest, action.SchemaResponse]) +} + +// actionSchema returns a slice of interceptors that run on action Schema. +func (s interceptorInvocations) actionSchema() []interceptorFunc[action.SchemaRequest, action.SchemaResponse] { + return tfslices.ApplyToAll(tfslices.Filter(s, func(e any) bool { + _, ok := e.(actionSchemaInterceptor) + return ok + }), func(e any) interceptorFunc[action.SchemaRequest, action.SchemaResponse] { + return e.(actionSchemaInterceptor).schema + }) +} + // interceptedRequest represents a Plugin Framework request type that can be intercepted. type interceptedRequest interface { - datasource.SchemaRequest | + action.SchemaRequest | + action.InvokeRequest | + datasource.SchemaRequest | datasource.ReadRequest | ephemeral.SchemaRequest | ephemeral.OpenRequest | @@ -288,12 +353,15 @@ type interceptedRequest interface { resource.UpdateRequest | resource.DeleteRequest | resource.ModifyPlanRequest | - resource.ImportStateRequest + resource.ImportStateRequest | + list.ListResourceSchemaRequest } // interceptedResponse represents a Plugin Framework response type that can be intercepted. type interceptedResponse interface { - datasource.SchemaResponse | + action.SchemaResponse | + action.InvokeResponse | + datasource.SchemaResponse | datasource.ReadResponse | ephemeral.SchemaResponse | ephemeral.OpenResponse | @@ -305,62 +373,179 @@ type interceptedResponse interface { resource.UpdateResponse | resource.DeleteResponse | resource.ModifyPlanResponse | - resource.ImportStateResponse + resource.ImportStateResponse | + list.ListResourceSchemaResponse } +type innerFunc[Request, Response any] func(ctx context.Context, request Request, response *Response) + // interceptedHandler returns a handler that runs any interceptors. -func interceptedHandler[Request interceptedRequest, Response interceptedResponse](interceptors []interceptorFunc[Request, Response], f func(context.Context, *Request, *Response) diag.Diagnostics, c *conns.AWSClient) func(context.Context, *Request, *Response) diag.Diagnostics { - return func(ctx context.Context, request *Request, response *Response) diag.Diagnostics { - var diags diag.Diagnostics +func interceptedHandler[Request interceptedRequest, Response interceptedResponse](interceptors []interceptorFunc[Request, Response], f innerFunc[Request, Response], hasError hasErrorFn[Response], c awsClient) func(context.Context, Request, *Response) { + return func(ctx context.Context, request Request, response *Response) { + opts := interceptorOptions[Request, Response]{ + c: c, + request: &request, + response: response, + } + // Before interceptors are run first to last. - forward := interceptors - - when := Before - for _, v := range forward { - opts := interceptorOptions[Request, Response]{ - c: c, - request: request, - response: response, - when: when, - } - diags.Append(v(ctx, opts)...) + opts.when = Before + for v := range slices.Values(interceptors) { + v(ctx, opts) // Short circuit if any Before interceptor errors. - if diags.HasError() { - return diags + if hasError(response) { + return } } - // All other interceptors are run last to first. - reverse := tfslices.Reverse(forward) - diags = f(ctx, request, response) + f(ctx, request, response) - if diags.HasError() { - when = OnError + // All other interceptors are run last to first. + if hasError(response) { + opts.when = OnError } else { - when = After + opts.when = After } - for _, v := range reverse { - opts := interceptorOptions[Request, Response]{ - c: c, - request: request, - response: response, - when: when, - } - diags.Append(v(ctx, opts)...) + for v := range tfslices.BackwardValues(interceptors) { + v(ctx, opts) } - when = Finally - for _, v := range reverse { - opts := interceptorOptions[Request, Response]{ - c: c, - request: request, - response: response, - when: when, + opts.when = Finally + for v := range tfslices.BackwardValues(interceptors) { + v(ctx, opts) + } + } +} + +type hasErrorFn[Response interceptedResponse] func(response *Response) bool + +func dataSourceSchemaHasError(response *datasource.SchemaResponse) bool { + return response.Diagnostics.HasError() +} + +func dataSourceReadHasError(response *datasource.ReadResponse) bool { + return response.Diagnostics.HasError() +} + +func ephemeralSchemaHasError(response *ephemeral.SchemaResponse) bool { + return response.Diagnostics.HasError() +} + +func ephemeralOpenHasError(response *ephemeral.OpenResponse) bool { + return response.Diagnostics.HasError() +} + +func ephemeralRenewHasError(response *ephemeral.RenewResponse) bool { + return response.Diagnostics.HasError() +} + +func ephemeralCloseHasError(response *ephemeral.CloseResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceSchemaHasError(response *resource.SchemaResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceCreateHasError(response *resource.CreateResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceReadHasError(response *resource.ReadResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceUpdateHasError(response *resource.UpdateResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceDeleteHasError(response *resource.DeleteResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceModifyPlanHasError(response *resource.ModifyPlanResponse) bool { + return response.Diagnostics.HasError() +} + +func resourceImportStateHasError(response *resource.ImportStateResponse) bool { + return response.Diagnostics.HasError() +} + +func actionSchemaHasError(response *action.SchemaResponse) bool { + return response.Diagnostics.HasError() +} + +func actionInvokeHasError(response *action.InvokeResponse) bool { + return response.Diagnostics.HasError() +} + +func listResourceConfigSchemaHasError(response *list.ListResourceSchemaResponse) bool { + return response.Diagnostics.HasError() +} + +func interceptedListHandler(interceptors []listInterceptorFunc[list.ListRequest, list.ListResultsStream], f func(context.Context, list.ListRequest, *list.ListResultsStream), c awsClient) func(context.Context, list.ListRequest, *list.ListResultsStream) { + return func(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + opts := interceptorOptions[list.ListRequest, list.ListResultsStream]{ + c: c, + request: &request, + response: stream, + } + + // Before interceptors are run first to last. + opts.when = Before + for v := range slices.Values(interceptors) { + diags := v(ctx, opts) + if len(diags) > 0 { + stream.Results = tfiter.Concat(stream.Results, list.ListResultsStreamDiagnostics(diags)) + } + if diags.HasError() { + return } - diags.Append(v(ctx, opts)...) } - return diags + // Stash `stream.Results` so that inner function can be unaware of interceptors. + resultStream := stream.Results + stream.Results = nil + + f(ctx, request, stream) + innerResultStream := stream.Results + + stream.Results = tfiter.Concat(resultStream, func(yield func(list.ListResult) bool) { + var hasError bool + for v := range innerResultStream { + if v.Diagnostics.HasError() { + hasError = true + } + if !yield(v) { + return + } + } + + // All other interceptors are run last to first. + if hasError { + opts.when = OnError + } else { + opts.when = After + } + for v := range tfslices.BackwardValues(interceptors) { + diags := v(ctx, opts) + if len(diags) > 0 { + if !yield(list.ListResult{Diagnostics: diags}) { + return + } + } + } + + opts.when = Finally + for v := range tfslices.BackwardValues(interceptors) { + diags := v(ctx, opts) + if len(diags) > 0 { + if !yield(list.ListResult{Diagnostics: diags}) { + return + } + } + } + }) } } diff --git a/internal/provider/framework/intercept_test.go b/internal/provider/framework/intercept_test.go new file mode 100644 index 000000000000..b0b44dc44860 --- /dev/null +++ b/internal/provider/framework/intercept_test.go @@ -0,0 +1,618 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "slices" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +func TestInterceptedHandler(t *testing.T) { + t.Parallel() + + client := mockClient{ + accountID: "123456789012", + region: "us-west-2", //lintignore:AWSAT003 + } + + testcases := map[string]struct { + firstInterceptorDiags map[when]diag.Diagnostics + secondInterceptorDiags map[when]diag.Diagnostics + innerFuncDiags diag.Diagnostics + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedDiags diag.Diagnostics + }{ + "First has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + + "Second has Before error": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + + "First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + + "Second has Before warning": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + + "First has Before warning Second has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + + "Inner has error": { + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + }, + + "Inner has warning": { + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + }, + + "Inner has error First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + }, + + "All have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + + "Inner has error Handlers have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockInterceptor(tc.firstInterceptorDiags) + second := newMockInterceptor(tc.secondInterceptorDiags) + interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ + first.Intercept, + second.Intercept, + } + + f := newMockInnerFunc(tc.innerFuncDiags) + + handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) + + ctx := t.Context() + var request resource.SchemaRequest + response := resource.SchemaResponse{ + Diagnostics: diag.Diagnostics{ + diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + }, + } + tc.expectedDiags = slices.Insert(tc.expectedDiags, 0, diag.Diagnostic(diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"))) + + handler(ctx, request, &response) + + if diff := cmp.Diff(response.Diagnostics, tc.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) + } +} + +type mockInterceptor struct { + diags map[when]diag.Diagnostics + called []when +} + +func newMockInterceptor(diags map[when]diag.Diagnostics) *mockInterceptor { + return &mockInterceptor{ + diags: diags, + } +} + +func (m *mockInterceptor) Intercept(ctx context.Context, opts interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) { + m.called = append(m.called, opts.when) + opts.response.Diagnostics.Append(m.diags[opts.when]...) +} + +type mockInnerFunc struct { + diags diag.Diagnostics + count int +} + +func newMockInnerFunc(diags diag.Diagnostics) mockInnerFunc { + return mockInnerFunc{ + diags: diags, + } +} + +func (m *mockInnerFunc) Call(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + m.count++ + response.Diagnostics.Append(m.diags...) +} + +func TestInterceptedListHandler(t *testing.T) { + t.Parallel() + + client := mockClient{ + accountID: "123456789012", + region: "us-west-2", //lintignore:AWSAT003 + } + + testcases := map[string]struct { + firstInterceptorDiags map[when]diag.Diagnostics + secondInterceptorDiags map[when]diag.Diagnostics + innerFuncDiags diag.Diagnostics + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedDiags diag.Diagnostics + }{ + "First has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + + "Second has Before error": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + + "First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + + "Second has Before warning": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + + "First has Before warning Second has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + + "Inner has error": { + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + }, + + "Inner has warning": { + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + }, + + "Inner has error First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + }, + + "All have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + + "Inner has error Handlers have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockListInterceptor(tc.firstInterceptorDiags) + second := newMockListInterceptor(tc.secondInterceptorDiags) + interceptors := []listInterceptorFunc[list.ListRequest, list.ListResultsStream]{ + first.Intercept, + second.Intercept, + } + + f := newMockInnerListFunc(tc.innerFuncDiags) + + handler := interceptedListHandler(interceptors, f.Call, client) + + ctx := t.Context() + var request list.ListRequest + response := list.ListResultsStream{ + Results: list.ListResultsStreamDiagnostics(diag.Diagnostics{ + diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + }), + } + tc.expectedDiags = slices.Insert(tc.expectedDiags, 0, diag.Diagnostic(diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"))) + + handler(ctx, request, &response) + + var diags diag.Diagnostics + for d := range response.Results { + if len(d.Diagnostics) > 0 { + diags = append(diags, d.Diagnostics...) + } + } + + if diff := cmp.Diff(diags, tc.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) + } +} + +type mockListInterceptor struct { + diags map[when]diag.Diagnostics + called []when +} + +func newMockListInterceptor(diags map[when]diag.Diagnostics) *mockListInterceptor { + return &mockListInterceptor{ + diags: diags, + } +} + +func (m *mockListInterceptor) Intercept(ctx context.Context, opts interceptorOptions[list.ListRequest, list.ListResultsStream]) diag.Diagnostics { + m.called = append(m.called, opts.when) + return m.diags[opts.when] +} + +type mockInnerListFunc struct { + diags diag.Diagnostics + count int +} + +func newMockInnerListFunc(diags diag.Diagnostics) mockInnerListFunc { + return mockInnerListFunc{ + diags: diags, + } +} + +func (m *mockInnerListFunc) Call(ctx context.Context, request list.ListRequest, response *list.ListResultsStream) { + m.count++ + if len(m.diags) > 0 { + response.Results = list.ListResultsStreamDiagnostics(m.diags) + } else { + response.Results = list.NoListResults + } +} diff --git a/internal/provider/framework/listresource/list_result_intercept.go b/internal/provider/framework/listresource/list_result_intercept.go new file mode 100644 index 000000000000..c6e316e88aec --- /dev/null +++ b/internal/provider/framework/listresource/list_result_intercept.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listresource + +import ( + "context" + "fmt" + "unique" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/internal/provider/interceptors" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// when represents the point in the CRUD request lifecycle that an interceptor is run. +// Multiple values can be ORed together. +type when uint16 + +const ( + Before when = 1 << iota // Interceptor is invoked before call to method in schema + After // Interceptor is invoked after successful call to method in schema + OnError // Interceptor is invoked after unsuccessful call to method in schema + Finally // Interceptor is invoked after After or OnError +) + +type InterceptorParams struct { + C *conns.AWSClient + Result *list.ListResult + When when +} + +type ListResultInterceptor interface { + Read(ctx context.Context, params InterceptorParams) diag.Diagnostics +} + +// TODO: this could be unique as well +type tagsInterceptor struct { + interceptors.HTags +} + +func TagsInterceptor(tags unique.Handle[inttypes.ServicePackageResourceTags]) tagsInterceptor { + return tagsInterceptor{ + HTags: interceptors.HTags(tags), + } +} + +// Copied from tagsResourceInterceptor.read() +func (r tagsInterceptor) Read(ctx context.Context, params InterceptorParams) diag.Diagnostics { + var diags diag.Diagnostics + + sp, serviceName, resourceName, tagsInContext, ok := interceptors.InfoFromContext(ctx, params.C) + if !ok { + return diags + } + + switch params.When { + case After: + // If the R handler didn't set tags, try and read them from the service API. + if tagsInContext.TagsOut.IsNone() { + // Some old resources may not have the required attribute set after Read: + // https://github.com/hashicorp/terraform-provider-aws/issues/31180 + if identifier := r.GetIdentifierFramework(ctx, params.Result.Resource); identifier != "" { + if err := r.ListTags(ctx, sp, params.C, identifier); err != nil { + diags.AddError(fmt.Sprintf("listing tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) + + return diags + } + } + } + + apiTags := tagsInContext.TagsOut.UnwrapOrDefault() + + // AWS APIs often return empty lists of tags when none have been configured. + var stateTags tftags.Map + params.Result.Resource.GetAttribute(ctx, path.Root(names.AttrTags), &stateTags) + // Remove any provider configured ignore_tags and system tags from those returned from the service API. + // The resource's configured tags do not include any provider configured default_tags. + if v := apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(params.C.IgnoreTagsConfig(ctx)).ResolveDuplicatesFramework(ctx, params.C.DefaultTagsConfig(ctx), params.C.IgnoreTagsConfig(ctx), stateTags, &diags).Map(); len(v) > 0 { + stateTags = tftags.NewMapFromMapValue(fwflex.FlattenFrameworkStringValueMapLegacy(ctx, v)) + } + diags.Append(params.Result.Resource.SetAttribute(ctx, path.Root(names.AttrTags), &stateTags)...) + if diags.HasError() { + return diags + } + + // Computed tags_all do. + stateTagsAll := fwflex.FlattenFrameworkStringValueMapLegacy(ctx, apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(params.C.IgnoreTagsConfig(ctx)).Map()) + diags.Append(params.Result.Resource.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.NewMapFromMapValue(stateTagsAll))...) + if diags.HasError() { + return diags + } + } + + return diags +} + +type identityInterceptor struct { + attributes []inttypes.IdentityAttribute +} + +func IdentityInterceptor(attributes []inttypes.IdentityAttribute) identityInterceptor { + return identityInterceptor{ + attributes: attributes, + } +} + +func (r identityInterceptor) Read(ctx context.Context, params InterceptorParams) diag.Diagnostics { + var diags diag.Diagnostics + + awsClient := params.C + + switch params.When { + // The Before step is not needed if Framework pre-populates the Identity as it does with CRUD operations + case Before: + identityType := params.Result.Identity.Schema.Type() + + obj, d := newEmptyObject(identityType) + diags.Append(d...) + if diags.HasError() { + return diags + } + + diags.Append(params.Result.Identity.Set(ctx, obj)...) + if diags.HasError() { + return diags + } + + case After: + for _, att := range r.attributes { + switch att.Name() { + case names.AttrAccountID: + diags.Append(params.Result.Identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if diags.HasError() { + return diags + } + + case names.AttrRegion: + diags.Append(params.Result.Identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if diags.HasError() { + return diags + } + + default: + var attrVal attr.Value + diags.Append(params.Result.Resource.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if diags.HasError() { + return diags + } + + diags.Append(params.Result.Identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if diags.HasError() { + return diags + } + } + } + } + + return diags +} + +func newEmptyObject(typ attr.Type) (obj basetypes.ObjectValue, diags diag.Diagnostics) { + i, ok := typ.(attr.TypeWithAttributeTypes) + if !ok { + diags.AddError( + "Internal Error", + "An unexpected error occurred. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expected value type to implement attr.TypeWithAttributeTypes, got: %T", typ), + ) + return + } + + attrTypes := i.AttributeTypes() + attrValues := make(map[string]attr.Value, len(attrTypes)) + // TODO: only handles string types + for attrName := range attrTypes { + attrValues[attrName] = types.StringNull() + } + obj, d := basetypes.NewObjectValue(attrTypes, attrValues) + diags.Append(d...) + if d.HasError() { + return basetypes.ObjectValue{}, diags + } + + return obj, diags +} + +type setRegionInterceptor struct{} + +func SetRegionInterceptor() setRegionInterceptor { + return setRegionInterceptor{} +} + +// Copied from resourceSetRegionInStateInterceptor.read() +func (r setRegionInterceptor) Read(ctx context.Context, params InterceptorParams) diag.Diagnostics { + var diags diag.Diagnostics + + switch params.When { + case After: + diags.Append(params.Result.Resource.SetAttribute(ctx, path.Root(names.AttrRegion), params.C.Region(ctx))...) + if diags.HasError() { + return diags + } + } + + return diags +} diff --git a/internal/provider/framework/listresourceattribute/attributes.go b/internal/provider/framework/listresourceattribute/attributes.go new file mode 100644 index 000000000000..5dbd4c5c83d7 --- /dev/null +++ b/internal/provider/framework/listresourceattribute/attributes.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listresourceattribute + +import ( + "sync" + + "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-provider-aws/names" +) + +var Region = sync.OnceValue(func() schema.Attribute { + return schema.StringAttribute{ + Optional: true, + Description: names.ListResourceTopLevelRegionAttributeDescription, + } +}) diff --git a/internal/provider/framework/provider.go b/internal/provider/framework/provider.go index 6e1d2b6be902..36df45dc55f8 100644 --- a/internal/provider/framework/provider.go +++ b/internal/provider/framework/provider.go @@ -9,25 +9,30 @@ import ( "fmt" "iter" "log" + "reflect" "slices" + "sync" + "unique" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/action" + aschema "github.com/hashicorp/terraform-plugin-framework/action/schema" "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/diag" + datasourceschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + empemeralschema "github.com/hashicorp/terraform-plugin-framework/ephemeral/schema" "github.com/hashicorp/terraform-plugin-framework/function" - "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/schema" "github.com/hashicorp/terraform-plugin-framework/resource" + resourceschema "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/framework" - fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" tffunction "github.com/hashicorp/terraform-provider-aws/internal/function" - "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" tfunique "github.com/hashicorp/terraform-provider-aws/internal/unique" @@ -35,18 +40,22 @@ import ( ) var ( - resourceSchemasValidated bool + resourceSchemasValidated sync.Once ) var ( _ provider.Provider = &frameworkProvider{} + _ provider.ProviderWithActions = &frameworkProvider{} _ provider.ProviderWithFunctions = &frameworkProvider{} _ provider.ProviderWithEphemeralResources = &frameworkProvider{} + _ provider.ProviderWithListResources = &frameworkProvider{} ) type frameworkProvider struct { + actions []func() action.Action dataSources []func() datasource.DataSource ephemeralResources []func() ephemeral.EphemeralResource + listResources []func() list.ListResource primary interface{ Meta() any } resources []func() resource.Resource servicePackages iter.Seq[conns.ServicePackage] @@ -58,6 +67,7 @@ func NewProvider(ctx context.Context, primary interface{ Meta() any }) (provider log.Printf("Creating Terraform AWS Provider (Framework-style)...") provider := &frameworkProvider{ + actions: make([]func() action.Action, 0), dataSources: make([]func() datasource.DataSource, 0), ephemeralResources: make([]func() ephemeral.EphemeralResource, 0), primary: primary, @@ -65,28 +75,18 @@ func NewProvider(ctx context.Context, primary interface{ Meta() any }) (provider servicePackages: primary.Meta().(*conns.AWSClient).ServicePackages(ctx), } - // Acceptance tests call this function multiple times, potentially in parallel. - // To avoid "fatal error: concurrent map writes", take a lock. - const ( - mutexKVKey = "provider.New" - ) - conns.GlobalMutexKV.Lock(mutexKVKey) - defer conns.GlobalMutexKV.Unlock(mutexKVKey) - // Because we try and share resource schemas as much as possible, // we need to ensure that we only validate the resource schemas once. - if !resourceSchemasValidated { - if err := provider.validateResourceSchemas(ctx); err != nil { - return nil, err - } - - resourceSchemasValidated = true - } - - if err := provider.initialize(ctx); err != nil { + var err error + resourceSchemasValidated.Do(func() { + err = provider.validateResourceSchemas(ctx) + }) + if err != nil { return nil, err } + provider.initialize(ctx) + return provider, nil } @@ -352,6 +352,8 @@ func (p *frameworkProvider) Configure(ctx context.Context, request provider.Conf response.DataSourceData = v response.ResourceData = v response.EphemeralResourceData = v + response.ActionData = v + response.ListResourceData = v } // DataSources returns a slice of functions to instantiate each DataSource @@ -378,6 +380,14 @@ func (p *frameworkProvider) EphemeralResources(ctx context.Context) []func() eph return slices.Clone(p.ephemeralResources) } +// Actions returns a slice of functions to instantiate each Action +// implementation. +// +// All actions must have unique type names. +func (p *frameworkProvider) Actions(ctx context.Context) []func() action.Action { + return slices.Clone(p.actions) +} + // Functions returns a slice of functions to instantiate each Function // implementation. // @@ -391,338 +401,238 @@ func (p *frameworkProvider) Functions(_ context.Context) []func() function.Funct } } +func (p *frameworkProvider) ListResources(_ context.Context) []func() list.ListResource { + return slices.Clone(p.listResources) +} + // initialize is called from `New` to perform any Terraform Framework-style initialization. -func (p *frameworkProvider) initialize(ctx context.Context) error { +func (p *frameworkProvider) initialize(ctx context.Context) { log.Printf("Initializing Terraform AWS Provider (Framework-style)...") - var errs []error - for sp := range p.servicePackages { servicePackageName := sp.ServicePackageName() - for _, v := range sp.FrameworkDataSources(ctx) { - typeName := v.TypeName - inner, err := v.Factory(ctx) + for _, dataSourceSpec := range sp.FrameworkDataSources(ctx) { + p.dataSources = append(p.dataSources, func() datasource.DataSource { //nolint:contextcheck // must be a func() + return newWrappedDataSource(dataSourceSpec, servicePackageName) + }) + } - if err != nil { - errs = append(errs, fmt.Errorf("creating data source (%s): %w", typeName, err)) - continue + if v, ok := sp.(conns.ServicePackageWithEphemeralResources); ok { + for _, ephemeralResourceSpec := range v.EphemeralResources(ctx) { + p.ephemeralResources = append(p.ephemeralResources, func() ephemeral.EphemeralResource { //nolint:contextcheck // must be a func() + return newWrappedEphemeralResource(ephemeralResourceSpec, servicePackageName) + }) } + } - var isRegionOverrideEnabled bool - if v := v.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { - isRegionOverrideEnabled = true + if v, ok := sp.(conns.ServicePackageWithFrameworkListResources); ok { + for listResourceSpec := range v.FrameworkListResources(ctx) { + p.listResources = append(p.listResources, func() list.ListResource { //nolint:contextcheck // must be a func() + return newWrappedListResourceFramework(listResourceSpec, servicePackageName) + }) } + } + if v, ok := sp.(conns.ServicePackageWithSDKListResources); ok { + for listResourceSpec := range v.SDKListResources(ctx) { + p.listResources = append(p.listResources, func() list.ListResource { //nolint:contextcheck // must be a func() + return newWrappedListResourceSDK(listResourceSpec, servicePackageName) + }) + } + } - var interceptors interceptorInvocations - - if isRegionOverrideEnabled { - v := v.Region.Value() + for _, resourceSpec := range sp.FrameworkResources(ctx) { + p.resources = append(p.resources, func() resource.Resource { //nolint:contextcheck // must be a func() + return newWrappedResource(resourceSpec, servicePackageName) + }) + } - interceptors = append(interceptors, dataSourceInjectRegionAttribute()) - if v.IsValidateOverrideInPartition { - interceptors = append(interceptors, dataSourceValidateRegion()) - } - interceptors = append(interceptors, dataSourceSetRegionInState()) + if v, ok := sp.(conns.ServicePackageWithActions); ok { + for _, actionSpec := range v.Actions(ctx) { + p.actions = append(p.actions, func() action.Action { //nolint:contextcheck // must be a func() + return newWrappedAction(actionSpec, servicePackageName) + }) } + } + } +} - if !tfunique.IsHandleNil(v.Tags) { - interceptors = append(interceptors, dataSourceTransparentTagging(v.Tags)) - } +// validateResourceSchemas is called from `New` to validate Terraform Plugin Framework-style resource schemas. +func (p *frameworkProvider) validateResourceSchemas(ctx context.Context) error { + var errs []error - opts := wrappedDataSourceOptions{ - // bootstrapContext is run on all wrapped methods before any interceptors. - bootstrapContext: func(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { - var diags diag.Diagnostics - var overrideRegion string + for sp := range p.servicePackages { + for _, dataSourceSpec := range sp.FrameworkDataSources(ctx) { + typeName := dataSourceSpec.TypeName + inner, err := dataSourceSpec.Factory(ctx) - if isRegionOverrideEnabled && getAttribute != nil { - var target types.String - diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) - if diags.HasError() { - return ctx, diags - } + if err != nil { + errs = append(errs, fmt.Errorf("creating data source type (%s): %w", typeName, err)) + continue + } - overrideRegion = target.ValueString() - } + schemaResponse := datasource.SchemaResponse{} + inner.Schema(ctx, datasource.SchemaRequest{}, &schemaResponse) - ctx = conns.NewResourceContext(ctx, servicePackageName, v.Name, overrideRegion) - if c != nil { - ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) - ctx = c.RegisterLogger(ctx) - ctx = fwflex.RegisterLogger(ctx) - } + if err := validateSchemaRegionForDataSource(dataSourceSpec.Region, schemaResponse.Schema); err != nil { + errs = append(errs, fmt.Errorf("data source type %q: %w", typeName, err)) + continue + } - return ctx, diags - }, - interceptors: interceptors, - typeName: typeName, + if err := validateSchemaTagsForDataSource(dataSourceSpec.Tags, schemaResponse.Schema); err != nil { + errs = append(errs, fmt.Errorf("data source type %q: %w", typeName, err)) + continue } - p.dataSources = append(p.dataSources, func() datasource.DataSource { - return newWrappedDataSource(inner, opts) - }) } if v, ok := sp.(conns.ServicePackageWithEphemeralResources); ok { - for _, v := range v.EphemeralResources(ctx) { - typeName := v.TypeName - inner, err := v.Factory(ctx) + for _, ephemeralResourceSpec := range v.EphemeralResources(ctx) { + typeName := ephemeralResourceSpec.TypeName + inner, err := ephemeralResourceSpec.Factory(ctx) if err != nil { - errs = append(errs, fmt.Errorf("creating ephemeral resource (%s): %w", typeName, err)) + errs = append(errs, fmt.Errorf("creating ephemeral resource type (%s): %w", typeName, err)) continue } - var isRegionOverrideEnabled bool - if v := v.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { - isRegionOverrideEnabled = true - } + schemaResponse := ephemeral.SchemaResponse{} + inner.Schema(ctx, ephemeral.SchemaRequest{}, &schemaResponse) - var interceptors interceptorInvocations + if err := validateSchemaRegionForEphemeralResource(ephemeralResourceSpec.Region, schemaResponse.Schema); err != nil { + errs = append(errs, fmt.Errorf("ephemeral resource type %q: %w", typeName, err)) + continue + } + } + } - if isRegionOverrideEnabled { - v := v.Region.Value() + if v, ok := sp.(conns.ServicePackageWithActions); ok { + for _, actionSpec := range v.Actions(ctx) { + typeName := actionSpec.TypeName + inner, err := actionSpec.Factory(ctx) - interceptors = append(interceptors, ephemeralResourceInjectRegionAttribute()) - if v.IsValidateOverrideInPartition { - interceptors = append(interceptors, ephemeralResourceValidateRegion()) - } - interceptors = append(interceptors, ephemeralResourceSetRegionInResult()) + if err != nil { + errs = append(errs, fmt.Errorf("creating action type (%s): %w", typeName, err)) + continue } - opts := wrappedEphemeralResourceOptions{ - // bootstrapContext is run on all wrapped methods before any interceptors. - bootstrapContext: func(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { - var diags diag.Diagnostics - var overrideRegion string - - if isRegionOverrideEnabled && getAttribute != nil { - var target types.String - diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) - if diags.HasError() { - return ctx, diags - } - - overrideRegion = target.ValueString() - } - - ctx = conns.NewResourceContext(ctx, servicePackageName, v.Name, overrideRegion) - if c != nil { - ctx = c.RegisterLogger(ctx) - ctx = fwflex.RegisterLogger(ctx) - ctx = logging.MaskSensitiveValuesByKey(ctx, logging.HTTPKeyRequestBody, logging.HTTPKeyResponseBody) - } - return ctx, diags - }, - interceptors: interceptors, - typeName: v.TypeName, + schemaResponse := action.SchemaResponse{} + inner.Schema(ctx, action.SchemaRequest{}, &schemaResponse) + + if err := validateSchemaRegionForAction(actionSpec.Region, schemaResponse.Schema); err != nil { + errs = append(errs, fmt.Errorf("action type %q: %w", typeName, err)) + continue } - p.ephemeralResources = append(p.ephemeralResources, func() ephemeral.EphemeralResource { - return newWrappedEphemeralResource(inner, opts) - }) } } - for _, res := range sp.FrameworkResources(ctx) { - typeName := res.TypeName - inner, err := res.Factory(ctx) + for _, resourceSpec := range sp.FrameworkResources(ctx) { + typeName := resourceSpec.TypeName + inner, err := resourceSpec.Factory(ctx) if err != nil { - errs = append(errs, fmt.Errorf("creating resource (%s): %w", typeName, err)) + errs = append(errs, fmt.Errorf("creating resource type (%s): %w", typeName, err)) continue } - var isRegionOverrideEnabled bool - if v := res.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { - isRegionOverrideEnabled = true - } - - var interceptors interceptorInvocations - - if isRegionOverrideEnabled { - v := res.Region.Value() + schemaResponse := resource.SchemaResponse{} + inner.Schema(ctx, resource.SchemaRequest{}, &schemaResponse) - interceptors = append(interceptors, resourceInjectRegionAttribute()) - if v.IsValidateOverrideInPartition { - interceptors = append(interceptors, resourceValidateRegion()) - } - interceptors = append(interceptors, resourceDefaultRegion()) - interceptors = append(interceptors, resourceForceNewIfRegionChanges()) - interceptors = append(interceptors, resourceSetRegionInState()) - if res.Identity.HasInherentRegion() { - interceptors = append(interceptors, resourceImportRegionNoDefault()) - } else { - interceptors = append(interceptors, resourceImportRegion()) - } + if err := validateSchemaRegionForResource(resourceSpec.Region, schemaResponse.Schema); err != nil { + errs = append(errs, fmt.Errorf("resource type %q: %w", typeName, err)) + continue } - if !tfunique.IsHandleNil(res.Tags) { - interceptors = append(interceptors, resourceTransparentTagging(res.Tags)) + if err := validateSchemaTagsForResource(resourceSpec.Tags, schemaResponse.Schema); err != nil { + errs = append(errs, fmt.Errorf("resource type %q: %w", typeName, err)) + continue } - if res.Import.WrappedImport { - if res.Import.SetIDAttr { - if _, ok := res.Import.ImportID.(inttypes.FrameworkImportIDCreator); !ok { - errs = append(errs, fmt.Errorf("resource type %s: importer sets \"id\" attribute, but creator isn't configured", typeName)) + if resourceSpec.Import.WrappedImport { + if resourceSpec.Import.SetIDAttr { + if _, ok := resourceSpec.Import.ImportID.(inttypes.FrameworkImportIDCreator); !ok { + errs = append(errs, fmt.Errorf("resource type %q: importer sets `%s` attribute, but creator isn't configured", resourceSpec.TypeName, names.AttrID)) continue } } - switch v := inner.(type) { - case framework.ImportByIdentityer: - v.SetIdentitySpec(res.Identity, res.Import) - default: - errs = append(errs, fmt.Errorf("resource type %s: cannot configure importer", typeName)) + if _, ok := inner.(framework.ImportByIdentityer); !ok { + errs = append(errs, fmt.Errorf("resource type %q: cannot configure importer, does not implement %q", resourceSpec.TypeName, reflect.TypeFor[framework.ImportByIdentityer]())) continue } } - - opts := wrappedResourceOptions{ - // bootstrapContext is run on all wrapped methods before any interceptors. - bootstrapContext: func(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { - var diags diag.Diagnostics - var overrideRegion string - - if isRegionOverrideEnabled && getAttribute != nil { - var target types.String - diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) - if diags.HasError() { - return ctx, diags - } - - overrideRegion = target.ValueString() - } - - ctx = conns.NewResourceContext(ctx, servicePackageName, res.Name, overrideRegion) - if c != nil { - ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) - ctx = c.RegisterLogger(ctx) - ctx = fwflex.RegisterLogger(ctx) - } - - return ctx, diags - }, - interceptors: interceptors, - typeName: typeName, - } - if len(res.Identity.Attributes) > 0 { - opts.identity = res.Identity - opts.interceptors = append(opts.interceptors, newIdentityInterceptor(res.Identity.Attributes)) - } - - p.resources = append(p.resources, func() resource.Resource { - return newWrappedResource(inner, opts) - }) } } return errors.Join(errs...) } -// validateResourceSchemas is called from `New` to validate Terraform Plugin Framework-style resource schemas. -func (p *frameworkProvider) validateResourceSchemas(ctx context.Context) error { - var errs []error - - for sp := range p.servicePackages { - for _, v := range sp.FrameworkDataSources(ctx) { - typeName := v.TypeName - ds, err := v.Factory(ctx) - - if err != nil { - errs = append(errs, fmt.Errorf("creating data source (%s): %w", typeName, err)) - continue - } - - schemaResponse := datasource.SchemaResponse{} - ds.Schema(ctx, datasource.SchemaRequest{}, &schemaResponse) - - if v := v.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { - if _, ok := schemaResponse.Schema.Attributes[names.AttrRegion]; ok { - errs = append(errs, fmt.Errorf("`%s` attribute is defined: %s data source", names.AttrRegion, typeName)) - continue - } - } - - if !tfunique.IsHandleNil(v.Tags) { - // The data source has opted in to transparent tagging. - // Ensure that the schema look OK. - if v, ok := schemaResponse.Schema.Attributes[names.AttrTags]; ok { - if !v.IsComputed() { - errs = append(errs, fmt.Errorf("`%s` attribute must be Computed: %s data source", names.AttrTags, typeName)) - continue - } - } else { - errs = append(errs, fmt.Errorf("no `%s` attribute defined in schema: %s data source", names.AttrTags, typeName)) - continue - } - } +func validateSchemaRegionForDataSource(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion], schema datasourceschema.Schema) error { + if !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + if _, ok := schema.Attributes[names.AttrRegion]; ok { + return fmt.Errorf("configured for enhanced regions but defines `%s` attribute in schema", names.AttrRegion) } + } + return nil +} - if v, ok := sp.(conns.ServicePackageWithEphemeralResources); ok { - for _, v := range v.EphemeralResources(ctx) { - typeName := v.TypeName - er, err := v.Factory(ctx) - - if err != nil { - errs = append(errs, fmt.Errorf("creating ephemeral resource (%s): %w", typeName, err)) - continue - } - - schemaResponse := ephemeral.SchemaResponse{} - er.Schema(ctx, ephemeral.SchemaRequest{}, &schemaResponse) +func validateSchemaRegionForEphemeralResource(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion], schema empemeralschema.Schema) error { + if !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + if _, ok := schema.Attributes[names.AttrRegion]; ok { + return fmt.Errorf("configured for enhanced regions but defines `%s` attribute in schema", names.AttrRegion) + } + } + return nil +} - if v := v.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { - if _, ok := schemaResponse.Schema.Attributes[names.AttrRegion]; ok { - errs = append(errs, fmt.Errorf("`%s` attribute is defined: %s ephemeral resource", names.AttrRegion, typeName)) - continue - } - } +func validateSchemaRegionForAction(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion], schemaIface any) error { + if !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + if schema, ok := schemaIface.(aschema.Schema); ok { + if _, ok := schema.Attributes[names.AttrRegion]; ok { + return fmt.Errorf("configured for enhanced regions but defines `%s` attribute in schema", names.AttrRegion) } } + } + return nil +} - for _, v := range sp.FrameworkResources(ctx) { - typeName := v.TypeName - r, err := v.Factory(ctx) +func validateSchemaRegionForResource(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion], schema resourceschema.Schema) error { + if !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + if _, ok := schema.Attributes[names.AttrRegion]; ok { + return fmt.Errorf("configured for enhanced regions but defines `%s` attribute in schema", names.AttrRegion) + } + } + return nil +} - if err != nil { - errs = append(errs, fmt.Errorf("creating resource (%s): %w", typeName, err)) - continue +func validateSchemaTagsForDataSource(tagsSpec unique.Handle[inttypes.ServicePackageResourceTags], schema datasourceschema.Schema) error { + if !tfunique.IsHandleNil(tagsSpec) { + if v, ok := schema.Attributes[names.AttrTags]; ok { + if !v.IsComputed() { + return fmt.Errorf("`%s` attribute must be Computed", names.AttrTags) } + } else { + return fmt.Errorf("configured for tags but no `%s` attribute defined in schema", names.AttrTags) + } + } + return nil +} - schemaResponse := resource.SchemaResponse{} - r.Schema(ctx, resource.SchemaRequest{}, &schemaResponse) - - if v := v.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { - if _, ok := schemaResponse.Schema.Attributes[names.AttrRegion]; ok { - errs = append(errs, fmt.Errorf("`%s` attribute is defined: %s resource", names.AttrRegion, typeName)) - continue - } +func validateSchemaTagsForResource(tagsSpec unique.Handle[inttypes.ServicePackageResourceTags], schema resourceschema.Schema) error { + if !tfunique.IsHandleNil(tagsSpec) { + if v, ok := schema.Attributes[names.AttrTags]; ok { + if v.IsComputed() { + return fmt.Errorf("`%s` attribute cannot be Computed", names.AttrTags) } - - if !tfunique.IsHandleNil(v.Tags) { - // The resource has opted in to transparent tagging. - // Ensure that the schema look OK. - if v, ok := schemaResponse.Schema.Attributes[names.AttrTags]; ok { - if v.IsComputed() { - errs = append(errs, fmt.Errorf("`%s` attribute cannot be Computed: %s resource", names.AttrTags, typeName)) - continue - } - } else { - errs = append(errs, fmt.Errorf("no `%s` attribute defined in schema: %s resource", names.AttrTags, typeName)) - continue - } - if v, ok := schemaResponse.Schema.Attributes[names.AttrTagsAll]; ok { - if !v.IsComputed() { - errs = append(errs, fmt.Errorf("`%s` attribute must be Computed: %s resource", names.AttrTagsAll, typeName)) - continue - } - } else { - errs = append(errs, fmt.Errorf("no `%s` attribute defined in schema: %s resource", names.AttrTagsAll, typeName)) - continue - } + } else { + return fmt.Errorf("configured for tags but no `%s` attribute defined in schema", names.AttrTags) + } + if v, ok := schema.Attributes[names.AttrTagsAll]; ok { + if !v.IsComputed() { + return fmt.Errorf("`%s` attribute must be Computed", names.AttrTagsAll) } + } else { + return fmt.Errorf("configured for tags but no `%s` attribute defined in schema", names.AttrTagsAll) } } - - return errors.Join(errs...) + return nil } diff --git a/internal/provider/framework/provider_gen.go b/internal/provider/framework/provider_gen.go index b0c5d9c5af7d..e4a015994cac 100644 --- a/internal/provider/framework/provider_gen.go +++ b/internal/provider/framework/provider_gen.go @@ -163,6 +163,13 @@ func endpointsBlock() schema.SetNestedBlock { Description: "Use this to override the default service endpoint URL", }, + // arcregionswitch + + "arcregionswitch": schema.StringAttribute{ + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // athena "athena": schema.StringAttribute{ @@ -226,6 +233,13 @@ func endpointsBlock() schema.SetNestedBlock { Description: "Use this to override the default service endpoint URL", }, + // bedrockagentcore + + "bedrockagentcore": schema.StringAttribute{ + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // billing "billing": schema.StringAttribute{ @@ -1354,6 +1368,13 @@ func endpointsBlock() schema.SetNestedBlock { Description: "Use this to override the default service endpoint URL", }, + // odb + + "odb": schema.StringAttribute{ + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // opensearch "opensearch": schema.StringAttribute{ @@ -1662,6 +1683,13 @@ func endpointsBlock() schema.SetNestedBlock { Description: "Use this to override the default service endpoint URL", }, + // s3vectors + + "s3vectors": schema.StringAttribute{ + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // sagemaker "sagemaker": schema.StringAttribute{ @@ -1974,6 +2002,13 @@ func endpointsBlock() schema.SetNestedBlock { Description: "Use this to override the default service endpoint URL", }, + // workmail + + "workmail": schema.StringAttribute{ + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // workspaces "workspaces": schema.StringAttribute{ diff --git a/internal/provider/framework/region.go b/internal/provider/framework/region.go index fa625678a8e2..32302199366a 100644 --- a/internal/provider/framework/region.go +++ b/internal/provider/framework/region.go @@ -7,20 +7,23 @@ import ( "context" "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-framework/action" + aschema "github.com/hashicorp/terraform-plugin-framework/action/schema" "github.com/hashicorp/terraform-plugin-framework/datasource" dsschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/ephemeral" erschema "github.com/hashicorp/terraform-plugin-framework/ephemeral/schema" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresourceattribute" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/resourceattribute" "github.com/hashicorp/terraform-provider-aws/names" ) -func validateInContextRegionInPartition(ctx context.Context, c *conns.AWSClient) diag.Diagnostics { +func validateInContextRegionInPartition(ctx context.Context, c awsClient) diag.Diagnostics { var diags diag.Diagnostics if err := c.ValidateInContextRegionInPartition(ctx); err != nil { @@ -32,9 +35,7 @@ func validateInContextRegionInPartition(ctx context.Context, c *conns.AWSClient) type dataSourceInjectRegionAttributeInterceptor struct{} -func (r dataSourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[datasource.SchemaRequest, datasource.SchemaResponse]) diag.Diagnostics { - var diags diag.Diagnostics - +func (r dataSourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[datasource.SchemaRequest, datasource.SchemaResponse]) { switch response, when := opts.response, opts.when; when { case After: if _, ok := response.Schema.Attributes[names.AttrRegion]; !ok { @@ -42,12 +43,10 @@ func (r dataSourceInjectRegionAttributeInterceptor) schema(ctx context.Context, response.Schema.Attributes[names.AttrRegion] = dsschema.StringAttribute{ Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } } } - - return diags } // dataSourceInjectRegionAttribute injects a top-level "region" attribute into a data source's schema. @@ -57,20 +56,17 @@ func dataSourceInjectRegionAttribute() dataSourceSchemaInterceptor { type dataSourceValidateRegionInterceptor struct{} -func (r dataSourceValidateRegionInterceptor) read(ctx context.Context, opts interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) diag.Diagnostics { +func (r dataSourceValidateRegionInterceptor) read(ctx context.Context, opts interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) { c := opts.c - var diags diag.Diagnostics switch when := opts.when; when { case Before: // As data sources have no ModifyPlan functionality we validate the per-resource Region override value before R. - diags.Append(validateInContextRegionInPartition(ctx, c)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(validateInContextRegionInPartition(ctx, c)...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } // dataSourceValidateRegion validates that the value of the top-level `region` attribute is in the configured AWS partition. @@ -80,28 +76,25 @@ func dataSourceValidateRegion() dataSourceCRUDInterceptor { type dataSourceSetRegionInStateInterceptor struct{} -func (r dataSourceSetRegionInStateInterceptor) read(ctx context.Context, opts interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) diag.Diagnostics { +func (r dataSourceSetRegionInStateInterceptor) read(ctx context.Context, opts interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) { c := opts.c - var diags diag.Diagnostics switch response, when := opts.response, opts.when; when { case After: // Set region in state after R, but only if the data source didn't explicitly set it (e.g. aws_region). var target types.String - diags.Append(response.State.GetAttribute(ctx, path.Root(names.AttrRegion), &target)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.GetAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if opts.response.Diagnostics.HasError() { + return } if target.IsNull() { - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), c.Region(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), c.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return } } } - - return diags } // dataSourceSetRegionInState set the value of the top-level `region` attribute in state after Read. @@ -111,9 +104,7 @@ func dataSourceSetRegionInState() dataSourceCRUDInterceptor { type ephemeralResourceInjectRegionAttributeInterceptor struct{} -func (r ephemeralResourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[ephemeral.SchemaRequest, ephemeral.SchemaResponse]) diag.Diagnostics { - var diags diag.Diagnostics - +func (r ephemeralResourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[ephemeral.SchemaRequest, ephemeral.SchemaResponse]) { switch response, when := opts.response, opts.when; when { case After: if _, ok := response.Schema.Attributes[names.AttrRegion]; !ok { @@ -121,12 +112,10 @@ func (r ephemeralResourceInjectRegionAttributeInterceptor) schema(ctx context.Co response.Schema.Attributes[names.AttrRegion] = erschema.StringAttribute{ Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } } } - - return diags } // ephemeralResourceInjectRegionAttribute injects a top-level "region" attribute into an ephemeral resource's schema. @@ -138,20 +127,17 @@ type ephemeralResourceSetRegionInStateInterceptor struct { ephemeralResourceNoOpORCInterceptor } -func (r ephemeralResourceSetRegionInStateInterceptor) open(ctx context.Context, opts interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) diag.Diagnostics { +func (r ephemeralResourceSetRegionInStateInterceptor) open(ctx context.Context, opts interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) { c := opts.c - var diags diag.Diagnostics switch response, when := opts.response, opts.when; when { case After: // Set region in state after R. - diags.Append(response.Result.SetAttribute(ctx, path.Root(names.AttrRegion), c.Region(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.Result.SetAttribute(ctx, path.Root(names.AttrRegion), c.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } // ephemeralResourceSetRegionInResult set the value of the top-level `region` attribute in the result after Open. @@ -163,20 +149,17 @@ type ephemeralResourceValidateRegionInterceptor struct { ephemeralResourceNoOpORCInterceptor } -func (r ephemeralResourceValidateRegionInterceptor) open(ctx context.Context, opts interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) diag.Diagnostics { +func (r ephemeralResourceValidateRegionInterceptor) open(ctx context.Context, opts interceptorOptions[ephemeral.OpenRequest, ephemeral.OpenResponse]) { c := opts.c - var diags diag.Diagnostics switch when := opts.when; when { case Before: // As ephemeral resources have no ModifyPlan functionality we validate the per-resource Region override value here. - diags.Append(validateInContextRegionInPartition(ctx, c)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(validateInContextRegionInPartition(ctx, c)...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } // ephemeralResourceValidateRegion validates that the value of the top-level `region` attribute is in the configured AWS partition. @@ -186,9 +169,7 @@ func ephemeralResourceValidateRegion() ephemeralResourceORCInterceptor { type resourceInjectRegionAttributeInterceptor struct{} -func (r resourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) diag.Diagnostics { - var diags diag.Diagnostics - +func (r resourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) { switch response, when := opts.response, opts.when; when { case After: if _, ok := response.Schema.Attributes[names.AttrRegion]; !ok { @@ -196,8 +177,6 @@ func (r resourceInjectRegionAttributeInterceptor) schema(ctx context.Context, op response.Schema.Attributes[names.AttrRegion] = resourceattribute.Region() } } - - return diags } // resourceInjectRegionAttribute injects a top-level "region" attribute into a resource's schema. @@ -207,19 +186,16 @@ func resourceInjectRegionAttribute() resourceSchemaInterceptor { type resourceValidateRegionInterceptor struct{} -func (r resourceValidateRegionInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) diag.Diagnostics { +func (r resourceValidateRegionInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) { c := opts.c - var diags diag.Diagnostics switch when := opts.when; when { case Before: - diags.Append(validateInContextRegionInPartition(ctx, c)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(validateInContextRegionInPartition(ctx, c)...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } // resourceValidateRegion validates that the value of the top-level `region` attribute is in the configured AWS partition. @@ -229,33 +205,30 @@ func resourceValidateRegion() resourceModifyPlanInterceptor { type resourceDefaultRegionInterceptor struct{} -func (r resourceDefaultRegionInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) diag.Diagnostics { +func (r resourceDefaultRegionInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) { c := opts.c - var diags diag.Diagnostics switch request, response, when := opts.request, opts.response, opts.when; when { case Before: // If the entire plan is null, the resource is planned for destruction. if request.Plan.Raw.IsNull() { - return diags + return } var target types.String - diags.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrRegion), &target)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if opts.response.Diagnostics.HasError() { + return } if target.IsNull() || target.IsUnknown() { // Set the region to the provider's configured region - diags.Append(response.Plan.SetAttribute(ctx, path.Root(names.AttrRegion), c.AwsConfig(ctx).Region)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.Plan.SetAttribute(ctx, path.Root(names.AttrRegion), c.AwsConfig(ctx).Region)...) + if opts.response.Diagnostics.HasError() { + return } } } - - return diags } // resourceDefaultRegion sets the value of the top-level `region` attribute to the provider's configured Region if it is not set. @@ -265,45 +238,54 @@ func resourceDefaultRegion() resourceModifyPlanInterceptor { type resourceForceNewIfRegionChangesInterceptor struct{} -func (r resourceForceNewIfRegionChangesInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) diag.Diagnostics { +func (r resourceForceNewIfRegionChangesInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) { c := opts.c - var diags diag.Diagnostics switch request, response, when := opts.request, opts.response, opts.when; when { case Before: // If the entire plan is null, the resource is planned for destruction. if request.Plan.Raw.IsNull() { - return diags + return } // If the entire state is null, the resource is new. if request.State.Raw.IsNull() { - return diags + return + } + + var configRegion types.String + opts.response.Diagnostics.Append(request.Config.GetAttribute(ctx, path.Root(names.AttrRegion), &configRegion)...) + if opts.response.Diagnostics.HasError() { + return } var planRegion types.String - diags.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrRegion), &planRegion)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrRegion), &planRegion)...) + if opts.response.Diagnostics.HasError() { + return } var stateRegion types.String - diags.Append(request.State.GetAttribute(ctx, path.Root(names.AttrRegion), &stateRegion)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.State.GetAttribute(ctx, path.Root(names.AttrRegion), &stateRegion)...) + if opts.response.Diagnostics.HasError() { + return } - providerRegion := c.AwsConfig(ctx).Region - if stateRegion.IsNull() && planRegion.ValueString() == providerRegion { - return diags + if stateRegion.IsNull() { + // Upgrade from pre-v6.0.0 provider and '-refresh=false' in effect. + if configRegion.IsNull() { + return + } + + if providerRegion := c.AwsConfig(ctx).Region; planRegion.ValueString() == providerRegion { + return + } } if !planRegion.Equal(stateRegion) { response.RequiresReplace = path.Paths{path.Root(names.AttrRegion)} } } - - return diags } // resourceForceNewIfRegionChanges forces resource replacement if the value of the top-level `region` attribute changes. @@ -315,20 +297,22 @@ type resourceSetRegionInStateInterceptor struct { resourceNoOpCRUDInterceptor } -func (r resourceSetRegionInStateInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) diag.Diagnostics { +func (r resourceSetRegionInStateInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) { c := opts.c - var diags diag.Diagnostics switch response, when := opts.response, opts.when; when { case After: + // Will occur on a refresh when the resource does not exist in AWS and needs to be recreated, e.g. "_disappears" tests. + if response.State.Raw.IsNull() { + return + } + // Set region in state after R. - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), c.Region(ctx))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), c.Region(ctx))...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } // resourceSetRegionInState set the value of the top-level `region` attribute in state after Read. @@ -338,28 +322,25 @@ func resourceSetRegionInState() resourceCRUDInterceptor { type resourceImportRegionInterceptor struct{} -func (r resourceImportRegionInterceptor) importState(ctx context.Context, opts interceptorOptions[resource.ImportStateRequest, resource.ImportStateResponse]) diag.Diagnostics { +func (r resourceImportRegionInterceptor) importState(ctx context.Context, opts interceptorOptions[resource.ImportStateRequest, resource.ImportStateResponse]) { c := opts.c - var diags diag.Diagnostics switch request, response, when := opts.request, opts.response, opts.when; when { case Before: // Import ID optionally ends with "@". if matches := regexache.MustCompile(`^(.+)@([a-z]{2}(?:-[a-z]+)+-\d{1,2})$`).FindStringSubmatch(request.ID); len(matches) == 3 { request.ID = matches[1] - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), matches[2])...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), matches[2])...) + if opts.response.Diagnostics.HasError() { + return } } else { - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), c.AwsConfig(ctx).Region)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), c.AwsConfig(ctx).Region)...) + if opts.response.Diagnostics.HasError() { + return } } } - - return diags } // resourceImportRegion sets the value of the top-level `region` attribute during import. @@ -369,25 +350,78 @@ func resourceImportRegion() resourceImportStateInterceptor { type resourceImportRegionNoDefaultInterceptor struct{} -func (r resourceImportRegionNoDefaultInterceptor) importState(ctx context.Context, opts interceptorOptions[resource.ImportStateRequest, resource.ImportStateResponse]) diag.Diagnostics { - var diags diag.Diagnostics - +func (r resourceImportRegionNoDefaultInterceptor) importState(ctx context.Context, opts interceptorOptions[resource.ImportStateRequest, resource.ImportStateResponse]) { switch request, response, when := opts.request, opts.response, opts.when; when { case Before: // Import ID optionally ends with "@". if matches := regexache.MustCompile(`^(.+)@([a-z]{2}(?:-[a-z]+)+-\d{1,2})$`).FindStringSubmatch(request.ID); len(matches) == 3 { request.ID = matches[1] - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), matches[2])...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRegion), matches[2])...) + if opts.response.Diagnostics.HasError() { + return } } } - - return diags } // resourceImportRegionNoDefault sets the value of the top-level `region` attribute during import. func resourceImportRegionNoDefault() resourceImportStateInterceptor { return &resourceImportRegionNoDefaultInterceptor{} } + +type actionInjectRegionAttributeInterceptor struct{} + +func (a actionInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[action.SchemaRequest, action.SchemaResponse]) { + switch response, when := opts.response, opts.when; when { + case After: + if _, exists := response.Schema.Attributes[names.AttrRegion]; !exists { + // Inject a top-level "region" attribute. + if response.Schema.Attributes == nil { + response.Schema.Attributes = make(map[string]aschema.Attribute) + } + response.Schema.Attributes[names.AttrRegion] = aschema.StringAttribute{ + Optional: true, + Description: names.ActionTopLevelRegionAttributeDescription, + } + } + } +} + +// actionInjectRegionAttribute injects a top-level "region" attribute into an action's schema. +func actionInjectRegionAttribute() actionSchemaInterceptor { + return &actionInjectRegionAttributeInterceptor{} +} + +type actionValidateRegionInterceptor struct { +} + +func (a actionValidateRegionInterceptor) invoke(ctx context.Context, opts interceptorOptions[action.InvokeRequest, action.InvokeResponse]) { + c := opts.c + + switch when := opts.when; when { + case Before: + opts.response.Diagnostics.Append(validateInContextRegionInPartition(ctx, c)...) + } +} + +// actionValidateRegion validates that the value of the top-level `region` attribute is in the configured AWS partition. +func actionValidateRegion() actionInvokeInterceptor { + return &actionValidateRegionInterceptor{} +} + +type listResourceInjectRegionAttributeInterceptor struct{} + +func (r listResourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse]) { + switch response, when := opts.response, opts.when; when { + case After: + if _, ok := response.Schema.Attributes[names.AttrRegion]; !ok { + // Inject a top-level "region" attribute. + response.Schema.Attributes[names.AttrRegion] = listresourceattribute.Region() + } + } +} + +// listResourceInjectRegionAttribute injects a "region" attribute into a resource's List schema. +func listResourceInjectRegionAttribute() listResourceSchemaInterceptor { + return &listResourceInjectRegionAttributeInterceptor{} +} diff --git a/internal/provider/framework/region_test.go b/internal/provider/framework/region_test.go new file mode 100644 index 000000000000..765c374e9222 --- /dev/null +++ b/internal/provider/framework/region_test.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/resourceattribute" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestResourceSetRegionInStateInterceptor_Read(t *testing.T) { + t.Parallel() + + const name = "example" + + region := "a_region" + + ctx := context.Background() + client := mockClient{region: region} + icpt := resourceSetRegionInStateInterceptor{} + + s := schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{Required: true}, + names.AttrRegion: resourceattribute.Region(), + }, + } + + tests := map[string]struct { + startState tfsdk.State + expectSet bool + }{ + "when state is present then region is set": { + startState: stateFromSchema(ctx, s, map[string]string{"name": name}), + expectSet: true, + }, + "when state is null then it remains null": { + startState: tfsdk.State{ + Raw: tftypes.NewValue(s.Type().TerraformType(ctx), nil), + Schema: s, + }, + expectSet: false, + }, + } + + for tn, tc := range tests { + t.Run(tn, func(t *testing.T) { + t.Parallel() + + req := resource.ReadRequest{State: tc.startState} + resp := resource.ReadResponse{State: tc.startState} + + icpt.read(ctx, interceptorOptions[resource.ReadRequest, resource.ReadResponse]{ + c: client, + request: &req, + response: &resp, + when: After, + }) + if resp.Diagnostics.HasError() { + t.Fatalf("unexpected diags: %s", resp.Diagnostics) + } + + if tc.expectSet { + got := getStateAttributeValue(ctx, t, resp.State, path.Root("region")) + if got != region { + t.Errorf("expected region %q, got %q", region, got) + } + } else { + if !resp.State.Raw.IsNull() { + t.Errorf("expected State.Raw to stay null, got %#v", resp.State.Raw) + } + } + }) + } +} + +func getStateAttributeValue(ctx context.Context, t *testing.T, st tfsdk.State, p path.Path) string { + t.Helper() + + var v types.String + if diags := st.GetAttribute(ctx, p, &v); diags.HasError() { + t.Fatalf("unexpected error getting State attribute %q: %s", p, fwdiag.DiagnosticsError(diags)) + } + return v.ValueString() +} diff --git a/internal/provider/framework/resourceattribute/attributes.go b/internal/provider/framework/resourceattribute/attributes.go index eb5543522b84..f216b24d87b9 100644 --- a/internal/provider/framework/resourceattribute/attributes.go +++ b/internal/provider/framework/resourceattribute/attributes.go @@ -14,6 +14,6 @@ var Region = sync.OnceValue(func() schema.Attribute { return schema.StringAttribute{ Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } }) diff --git a/internal/provider/framework/tags_interceptor.go b/internal/provider/framework/tags_interceptor.go index 287968ffd171..93330f785de1 100644 --- a/internal/provider/framework/tags_interceptor.go +++ b/internal/provider/framework/tags_interceptor.go @@ -9,7 +9,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" @@ -25,25 +24,24 @@ type tagsDataSourceInterceptor struct { interceptors.HTags } -func (r tagsDataSourceInterceptor) read(ctx context.Context, opts interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) diag.Diagnostics { +func (r tagsDataSourceInterceptor) read(ctx context.Context, opts interceptorOptions[datasource.ReadRequest, datasource.ReadResponse]) { c := opts.c - var diags diag.Diagnostics if !r.Enabled() { - return diags + return } sp, serviceName, resourceName, tagsInContext, ok := interceptors.InfoFromContext(ctx, c) if !ok { - return diags + return } switch request, response, when := opts.request, opts.response, opts.when; when { case Before: var configTags tftags.Map - diags.Append(request.Config.GetAttribute(ctx, path.Root(names.AttrTags), &configTags)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Config.GetAttribute(ctx, path.Root(names.AttrTags), &configTags)...) + if opts.response.Diagnostics.HasError() { + return } tags := tftags.New(ctx, configTags) @@ -53,8 +51,8 @@ func (r tagsDataSourceInterceptor) read(ctx context.Context, opts interceptorOpt if tagsInContext.TagsOut.IsNone() { if identifier := r.GetIdentifierFramework(ctx, response.State); identifier != "" { if err := r.ListTags(ctx, sp, c, identifier); err != nil { - diags.AddError(fmt.Sprintf("listing tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) - return diags + opts.response.Diagnostics.AddError(fmt.Sprintf("listing tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) + return } } } @@ -62,13 +60,11 @@ func (r tagsDataSourceInterceptor) read(ctx context.Context, opts interceptorOpt tags := tagsInContext.TagsOut.UnwrapOrDefault() // Remove any provider configured ignore_tags and system tags from those returned from the service API. stateTags := fwflex.FlattenFrameworkStringValueMapLegacy(ctx, tags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(c.IgnoreTagsConfig(ctx)).Map()) - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTags), tftags.NewMapFromMapValue(stateTags))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTags), tftags.NewMapFromMapValue(stateTags))...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } func dataSourceTransparentTagging(servicePackageResourceTags unique.Handle[inttypes.ServicePackageResourceTags]) dataSourceCRUDInterceptor { @@ -83,25 +79,24 @@ type tagsResourceInterceptor struct { interceptors.HTags } -func (r tagsResourceInterceptor) create(ctx context.Context, opts interceptorOptions[resource.CreateRequest, resource.CreateResponse]) diag.Diagnostics { +func (r tagsResourceInterceptor) create(ctx context.Context, opts interceptorOptions[resource.CreateRequest, resource.CreateResponse]) { c := opts.c - var diags diag.Diagnostics if !r.Enabled() { - return diags + return } sp, _, _, tagsInContext, ok := interceptors.InfoFromContext(ctx, c) if !ok { - return diags + return } switch request, response, when := opts.request, opts.response, opts.when; when { case Before: var planTags tftags.Map - diags.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTags), &planTags)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTags), &planTags)...) + if opts.response.Diagnostics.HasError() { + return } // Merge the resource's configured tags with any provider configured default_tags. @@ -114,33 +109,30 @@ func (r tagsResourceInterceptor) create(ctx context.Context, opts interceptorOpt // Remove any provider configured ignore_tags and system tags from those passed to the service API. // Computed tags_all include any provider configured default_tags. stateTagsAll := fwflex.FlattenFrameworkStringValueMapLegacy(ctx, tagsInContext.TagsIn.MustUnwrap().IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(c.IgnoreTagsConfig(ctx)).Map()) - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.NewMapFromMapValue(stateTagsAll))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.NewMapFromMapValue(stateTagsAll))...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } -func (r tagsResourceInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) diag.Diagnostics { +func (r tagsResourceInterceptor) read(ctx context.Context, opts interceptorOptions[resource.ReadRequest, resource.ReadResponse]) { c := opts.c - var diags diag.Diagnostics if !r.Enabled() { - return diags + return } sp, serviceName, resourceName, tagsInContext, ok := interceptors.InfoFromContext(ctx, c) if !ok { - return diags + return } switch response, when := opts.response, opts.when; when { case After: // Will occur on a refresh when the resource does not exist in AWS and needs to be recreated, e.g. "_disappears" tests. if response.State.Raw.IsNull() { - return diags + return } // If the R handler didn't set tags, try and read them from the service API. @@ -149,9 +141,9 @@ func (r tagsResourceInterceptor) read(ctx context.Context, opts interceptorOptio // https://github.com/hashicorp/terraform-provider-aws/issues/31180 if identifier := r.GetIdentifierFramework(ctx, response.State); identifier != "" { if err := r.ListTags(ctx, sp, c, identifier); err != nil { - diags.AddError(fmt.Sprintf("listing tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) + opts.response.Diagnostics.AddError(fmt.Sprintf("listing tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) - return diags + return } } } @@ -163,44 +155,41 @@ func (r tagsResourceInterceptor) read(ctx context.Context, opts interceptorOptio response.State.GetAttribute(ctx, path.Root(names.AttrTags), &stateTags) // Remove any provider configured ignore_tags and system tags from those returned from the service API. // The resource's configured tags do not include any provider configured default_tags. - if v := apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(c.IgnoreTagsConfig(ctx)).ResolveDuplicatesFramework(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx), response, &diags).Map(); len(v) > 0 { + if v := apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(c.IgnoreTagsConfig(ctx)).ResolveDuplicatesFramework(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx), stateTags, &opts.response.Diagnostics).Map(); len(v) > 0 { stateTags = tftags.NewMapFromMapValue(fwflex.FlattenFrameworkStringValueMapLegacy(ctx, v)) } - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTags), &stateTags)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTags), &stateTags)...) + if opts.response.Diagnostics.HasError() { + return } // Computed tags_all do. stateTagsAll := fwflex.FlattenFrameworkStringValueMapLegacy(ctx, apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(c.IgnoreTagsConfig(ctx)).Map()) - diags.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.NewMapFromMapValue(stateTagsAll))...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.NewMapFromMapValue(stateTagsAll))...) + if opts.response.Diagnostics.HasError() { + return } } - - return diags } -func (r tagsResourceInterceptor) update(ctx context.Context, opts interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) diag.Diagnostics { +func (r tagsResourceInterceptor) update(ctx context.Context, opts interceptorOptions[resource.UpdateRequest, resource.UpdateResponse]) { c := opts.c - var diags diag.Diagnostics if !r.Enabled() { - return diags + return } sp, serviceName, resourceName, tagsInContext, ok := interceptors.InfoFromContext(ctx, c) if !ok { - return diags + return } switch request, when := opts.request, opts.when; when { case Before: var planTags tftags.Map - diags.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTags), &planTags)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTags), &planTags)...) + if opts.response.Diagnostics.HasError() { + return } // Merge the resource's configured tags with any provider configured default_tags. @@ -210,13 +199,13 @@ func (r tagsResourceInterceptor) update(ctx context.Context, opts interceptorOpt tagsInContext.TagsIn = option.Some(tags) var oldTagsAll, newTagsAll tftags.Map - diags.Append(request.State.GetAttribute(ctx, path.Root(names.AttrTagsAll), &oldTagsAll)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.State.GetAttribute(ctx, path.Root(names.AttrTagsAll), &oldTagsAll)...) + if opts.response.Diagnostics.HasError() { + return } - diags.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTagsAll), &newTagsAll)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTagsAll), &newTagsAll)...) + if opts.response.Diagnostics.HasError() { + return } if !newTagsAll.Equal(oldTagsAll) { @@ -224,49 +213,44 @@ func (r tagsResourceInterceptor) update(ctx context.Context, opts interceptorOpt // https://github.com/hashicorp/terraform-provider-aws/issues/31180 if identifier := r.GetIdentifierFramework(ctx, request.Plan); identifier != "" { if err := r.UpdateTags(ctx, sp, c, identifier, oldTagsAll, newTagsAll); err != nil { - diags.AddError(fmt.Sprintf("updating tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) + opts.response.Diagnostics.AddError(fmt.Sprintf("updating tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) - return diags + return } } // TODO If the only change was to tags it would be nice to not call the resource's U handler. } } - - return diags } -func (r tagsResourceInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) diag.Diagnostics { +func (r tagsResourceInterceptor) modifyPlan(ctx context.Context, opts interceptorOptions[resource.ModifyPlanRequest, resource.ModifyPlanResponse]) { c := opts.c - var diags diag.Diagnostics switch request, response, when := opts.request, opts.response, opts.when; when { case Before: // If the entire plan is null, the resource is planned for destruction. if request.Plan.Raw.IsNull() { - return diags + return } // Calculate the new value for the `tags_all` attribute. var planTags tftags.Map - diags.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTags), &planTags)...) - if diags.HasError() { - return diags + opts.response.Diagnostics.Append(request.Plan.GetAttribute(ctx, path.Root(names.AttrTags), &planTags)...) + if opts.response.Diagnostics.HasError() { + return } if planTags.IsWhollyKnown() { allTags := c.DefaultTagsConfig(ctx).MergeTags(tftags.New(ctx, planTags)).IgnoreConfig(c.IgnoreTagsConfig(ctx)) - diags.Append(response.Plan.SetAttribute(ctx, path.Root(names.AttrTagsAll), fwflex.FlattenFrameworkStringValueMapLegacy(ctx, allTags.Map()))...) + opts.response.Diagnostics.Append(response.Plan.SetAttribute(ctx, path.Root(names.AttrTagsAll), fwflex.FlattenFrameworkStringValueMapLegacy(ctx, allTags.Map()))...) } else { - diags.Append(response.Plan.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.Unknown)...) + opts.response.Diagnostics.Append(response.Plan.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.Unknown)...) } - if diags.HasError() { - return diags + if opts.response.Diagnostics.HasError() { + return } } - - return diags } func resourceTransparentTagging(servicePackageResourceTags unique.Handle[inttypes.ServicePackageResourceTags]) interface { diff --git a/internal/provider/framework/when_string_test.go b/internal/provider/framework/when_string_test.go new file mode 100644 index 000000000000..30a19a106a18 --- /dev/null +++ b/internal/provider/framework/when_string_test.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type=when -output=when_string_test.go"; DO NOT EDIT. + +package framework + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Before-1] + _ = x[After-2] + _ = x[OnError-4] + _ = x[Finally-8] +} + +const ( + _when_name_0 = "BeforeAfter" + _when_name_1 = "OnError" + _when_name_2 = "Finally" +) + +var ( + _when_index_0 = [...]uint8{0, 6, 11} +) + +func (i when) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _when_name_0[_when_index_0[i]:_when_index_0[i+1]] + case i == 4: + return _when_name_1 + case i == 8: + return _when_name_2 + default: + return "when(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/internal/provider/framework/wrap.go b/internal/provider/framework/wrap.go index e6c17c236a4c..ad7554ed08c7 100644 --- a/internal/provider/framework/wrap.go +++ b/internal/provider/framework/wrap.go @@ -6,64 +6,117 @@ package framework import ( "context" + "github.com/hashicorp/terraform-plugin-framework/action" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" + "github.com/hashicorp/terraform-provider-aws/internal/logging" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/identity" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/importer" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresource" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + tfunique "github.com/hashicorp/terraform-provider-aws/internal/unique" + "github.com/hashicorp/terraform-provider-aws/names" ) // Implemented by (Config|Plan|State).GetAttribute(). type getAttributeFunc func(context.Context, path.Path, any) diag.Diagnostics -// contextFunc augments Context. -type contextFunc func(context.Context, getAttributeFunc, *conns.AWSClient) (context.Context, diag.Diagnostics) - -type wrappedDataSourceOptions struct { - // bootstrapContext is run on all wrapped methods before any interceptors. - bootstrapContext contextFunc - interceptors interceptorInvocations - typeName string -} - // wrappedDataSource represents an interceptor dispatcher for a Plugin Framework data source. type wrappedDataSource struct { - inner datasource.DataSourceWithConfigure - meta *conns.AWSClient - opts wrappedDataSourceOptions + inner datasource.DataSourceWithConfigure + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageFrameworkDataSource + interceptors interceptorInvocations } -func newWrappedDataSource(inner datasource.DataSourceWithConfigure, opts wrappedDataSourceOptions) datasource.DataSourceWithConfigure { +func newWrappedDataSource(spec *inttypes.ServicePackageFrameworkDataSource, servicePackageName string) datasource.DataSourceWithConfigure { + var isRegionOverrideEnabled bool + if regionSpec := spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + var interceptors interceptorInvocations + + if isRegionOverrideEnabled { + v := spec.Region.Value() + + interceptors = append(interceptors, dataSourceInjectRegionAttribute()) + if v.IsValidateOverrideInPartition { + interceptors = append(interceptors, dataSourceValidateRegion()) + } + interceptors = append(interceptors, dataSourceSetRegionInState()) + } + + if !tfunique.IsHandleNil(spec.Tags) { + interceptors = append(interceptors, dataSourceTransparentTagging(spec.Tags)) + } + + inner, _ := spec.Factory(context.TODO()) + return &wrappedDataSource{ - inner: inner, - opts: opts, + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, } } +// context is run on all wrapped methods before any interceptors. +func (w *wrappedDataSource) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + overrideRegion = target.ValueString() + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + } + + return ctx, diags +} + func (w *wrappedDataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { // This method does not call down to the inner data source. - response.TypeName = w.opts.typeName + response.TypeName = w.spec.TypeName } func (w *wrappedDataSource) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *datasource.SchemaRequest, response *datasource.SchemaResponse) diag.Diagnostics { - w.inner.Schema(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.dataSourceSchema(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.dataSourceSchema(), w.inner.Schema, dataSourceSchemaHasError, w.meta)(ctx, request, response) if response.Diagnostics.HasError() { return } @@ -72,26 +125,22 @@ func (w *wrappedDataSource) Schema(ctx context.Context, request datasource.Schem if v, ok := w.inner.(framework.DataSourceValidateModel); ok { response.Diagnostics.Append(v.ValidateModel(ctx, &response.Schema)...) if response.Diagnostics.HasError() { - response.Diagnostics.AddError("data source model validation error", w.opts.typeName) + response.Diagnostics.AddError("data source model validation error", w.spec.TypeName) return } } else { - response.Diagnostics.AddError("missing framework.DataSourceValidateModel", w.opts.typeName) + response.Diagnostics.AddError("missing framework.DataSourceValidateModel", w.spec.TypeName) } } func (w *wrappedDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.Config.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *datasource.ReadRequest, response *datasource.ReadResponse) diag.Diagnostics { - w.inner.Read(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.dataSourceRead(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.dataSourceRead(), w.inner.Read, dataSourceReadHasError, w.meta)(ctx, request, response) } func (w *wrappedDataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { @@ -99,7 +148,7 @@ func (w *wrappedDataSource) Configure(ctx context.Context, request datasource.Co w.meta = v } - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return @@ -110,10 +159,10 @@ func (w *wrappedDataSource) Configure(ctx context.Context, request datasource.Co func (w *wrappedDataSource) ConfigValidators(ctx context.Context) []datasource.ConfigValidator { if v, ok := w.inner.(datasource.DataSourceWithConfigValidators); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) if diags.HasError() { tflog.Warn(ctx, "wrapping ConfigValidators", map[string]any{ - "data source": w.opts.typeName, + "data source": w.spec.TypeName, "bootstrapContext error": fwdiag.DiagnosticsString(diags), }) @@ -128,7 +177,7 @@ func (w *wrappedDataSource) ConfigValidators(ctx context.Context) []datasource.C func (w *wrappedDataSource) ValidateConfig(ctx context.Context, request datasource.ValidateConfigRequest, response *datasource.ValidateConfigResponse) { if v, ok := w.inner.(datasource.DataSourceWithValidateConfig); ok { - ctx, diags := w.opts.bootstrapContext(ctx, request.Config.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return @@ -138,69 +187,107 @@ func (w *wrappedDataSource) ValidateConfig(ctx context.Context, request datasour } } -type wrappedEphemeralResourceOptions struct { - // bootstrapContext is run on all wrapped methods before any interceptors. - bootstrapContext contextFunc - interceptors interceptorInvocations - typeName string -} - // wrappedEphemeralResource represents an interceptor dispatcher for a Plugin Framework ephemeral resource. type wrappedEphemeralResource struct { - inner ephemeral.EphemeralResourceWithConfigure - meta *conns.AWSClient - opts wrappedEphemeralResourceOptions + inner ephemeral.EphemeralResourceWithConfigure + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageEphemeralResource + interceptors interceptorInvocations } -func newWrappedEphemeralResource(inner ephemeral.EphemeralResourceWithConfigure, opts wrappedEphemeralResourceOptions) ephemeral.EphemeralResourceWithConfigure { +func newWrappedEphemeralResource(spec *inttypes.ServicePackageEphemeralResource, servicePackageName string) ephemeral.EphemeralResourceWithConfigure { + var isRegionOverrideEnabled bool + if regionSpec := spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + var interceptors interceptorInvocations + + if isRegionOverrideEnabled { + v := spec.Region.Value() + + interceptors = append(interceptors, ephemeralResourceInjectRegionAttribute()) + if v.IsValidateOverrideInPartition { + interceptors = append(interceptors, ephemeralResourceValidateRegion()) + } + interceptors = append(interceptors, ephemeralResourceSetRegionInResult()) + } + + inner, _ := spec.Factory(context.TODO()) + return &wrappedEphemeralResource{ - inner: inner, - opts: opts, + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, } } +// context is run on all wrapped methods before any interceptors. +func (w *wrappedEphemeralResource) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + overrideRegion = target.ValueString() + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + ctx = logging.MaskSensitiveValuesByKey(ctx, logging.HTTPKeyRequestBody, logging.HTTPKeyResponseBody) + } + + return ctx, diags +} + func (w *wrappedEphemeralResource) Metadata(ctx context.Context, request ephemeral.MetadataRequest, response *ephemeral.MetadataResponse) { // This method does not call down to the inner ephemeral resource. - response.TypeName = w.opts.typeName + response.TypeName = w.spec.TypeName } func (w *wrappedEphemeralResource) Schema(ctx context.Context, request ephemeral.SchemaRequest, response *ephemeral.SchemaResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *ephemeral.SchemaRequest, response *ephemeral.SchemaResponse) diag.Diagnostics { - w.inner.Schema(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.ephemeralResourceSchema(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.ephemeralResourceSchema(), w.inner.Schema, ephemeralSchemaHasError, w.meta)(ctx, request, response) // Validate the ephemeral resource's model against the schema. if v, ok := w.inner.(framework.EphemeralResourceValidateModel); ok { response.Diagnostics.Append(v.ValidateModel(ctx, &response.Schema)...) if response.Diagnostics.HasError() { - response.Diagnostics.AddError("ephemeral resource model validation error", w.opts.typeName) + response.Diagnostics.AddError("ephemeral resource model validation error", w.spec.TypeName) return } } else { - response.Diagnostics.AddError("missing framework.EphemeralResourceValidateModel", w.opts.typeName) + response.Diagnostics.AddError("missing framework.EphemeralResourceValidateModel", w.spec.TypeName) } } func (w *wrappedEphemeralResource) Open(ctx context.Context, request ephemeral.OpenRequest, response *ephemeral.OpenResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.Config.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *ephemeral.OpenRequest, response *ephemeral.OpenResponse) diag.Diagnostics { - w.inner.Open(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.ephemeralResourceOpen(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.ephemeralResourceOpen(), w.inner.Open, ephemeralOpenHasError, w.meta)(ctx, request, response) } func (w *wrappedEphemeralResource) Configure(ctx context.Context, request ephemeral.ConfigureRequest, response *ephemeral.ConfigureResponse) { @@ -208,7 +295,7 @@ func (w *wrappedEphemeralResource) Configure(ctx context.Context, request epheme w.meta = v } - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return @@ -219,42 +306,34 @@ func (w *wrappedEphemeralResource) Configure(ctx context.Context, request epheme func (w *wrappedEphemeralResource) Renew(ctx context.Context, request ephemeral.RenewRequest, response *ephemeral.RenewResponse) { if v, ok := w.inner.(ephemeral.EphemeralResourceWithRenew); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *ephemeral.RenewRequest, response *ephemeral.RenewResponse) diag.Diagnostics { - v.Renew(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.ephemeralResourceRenew(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.ephemeralResourceRenew(), v.Renew, ephemeralRenewHasError, w.meta)(ctx, request, response) } } func (w *wrappedEphemeralResource) Close(ctx context.Context, request ephemeral.CloseRequest, response *ephemeral.CloseResponse) { if v, ok := w.inner.(ephemeral.EphemeralResourceWithClose); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *ephemeral.CloseRequest, response *ephemeral.CloseResponse) diag.Diagnostics { - v.Close(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.ephemeralResourceClose(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.ephemeralResourceClose(), v.Close, ephemeralCloseHasError, w.meta)(ctx, request, response) } } func (w *wrappedEphemeralResource) ConfigValidators(ctx context.Context) []ephemeral.ConfigValidator { if v, ok := w.inner.(ephemeral.EphemeralResourceWithConfigValidators); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) if diags.HasError() { tflog.Warn(ctx, "wrapping ConfigValidators", map[string]any{ - "ephemeral resource": w.opts.typeName, + "ephemeral resource": w.spec.TypeName, "bootstrapContext error": fwdiag.DiagnosticsString(diags), }) @@ -269,7 +348,7 @@ func (w *wrappedEphemeralResource) ConfigValidators(ctx context.Context) []ephem func (w *wrappedEphemeralResource) ValidateConfig(ctx context.Context, request ephemeral.ValidateConfigRequest, response *ephemeral.ValidateConfigResponse) { if v, ok := w.inner.(ephemeral.EphemeralResourceWithValidateConfig); ok { - ctx, diags := w.opts.bootstrapContext(ctx, request.Config.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return @@ -279,116 +358,328 @@ func (w *wrappedEphemeralResource) ValidateConfig(ctx context.Context, request e } } -type wrappedResourceOptions struct { - // bootstrapContext is run on all wrapped methods before any interceptors. - bootstrapContext contextFunc - interceptors interceptorInvocations - typeName string - identity types.Identity +// wrappedAction represents an interceptor dispatcher for a Plugin Framework action. +type wrappedAction struct { + inner action.ActionWithConfigure + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageAction + interceptors interceptorInvocations +} + +func newWrappedAction(spec *inttypes.ServicePackageAction, servicePackageName string) action.ActionWithConfigure { + var isRegionOverrideEnabled bool + if regionSpec := spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + var interceptors interceptorInvocations + + if isRegionOverrideEnabled { + v := spec.Region.Value() + + interceptors = append(interceptors, actionInjectRegionAttribute()) + if v.IsValidateOverrideInPartition { + interceptors = append(interceptors, actionValidateRegion()) + } + } + + inner, _ := spec.Factory(context.TODO()) + + return &wrappedAction{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + } +} + +// context is run on all wrapped methods before any interceptors. +func (w *wrappedAction) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + overrideRegion = target.ValueString() + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + ctx = logging.MaskSensitiveValuesByKey(ctx, logging.HTTPKeyRequestBody, logging.HTTPKeyResponseBody) + } + + return ctx, diags +} + +func (w *wrappedAction) Metadata(ctx context.Context, request action.MetadataRequest, response *action.MetadataResponse) { + // This method does not call down to the inner action. + response.TypeName = w.spec.TypeName +} + +func (w *wrappedAction) Schema(ctx context.Context, request action.SchemaRequest, response *action.SchemaResponse) { + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + f := func(ctx context.Context, request action.SchemaRequest, response *action.SchemaResponse) { + w.inner.Schema(ctx, request, response) + } + interceptedHandler(w.interceptors.actionSchema(), f, actionSchemaHasError, w.meta)(ctx, request, response) + + // Validate the action's model against the schema. + if v, ok := w.inner.(framework.ActionValidateModel); ok { + response.Diagnostics.Append(v.ValidateModel(ctx, &response.Schema)...) + if response.Diagnostics.HasError() { + response.Diagnostics.AddError("action model validation error", w.spec.TypeName) + return + } + } else { + response.Diagnostics.AddError("missing framework.ActionValidateModel", w.spec.TypeName) + } +} + +func (w *wrappedAction) Invoke(ctx context.Context, request action.InvokeRequest, response *action.InvokeResponse) { + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + f := func(ctx context.Context, request action.InvokeRequest, response *action.InvokeResponse) { + w.inner.Invoke(ctx, request, response) + } + interceptedHandler(w.interceptors.actionInvoke(), f, actionInvokeHasError, w.meta)(ctx, request, response) +} + +func (w *wrappedAction) Configure(ctx context.Context, request action.ConfigureRequest, response *action.ConfigureResponse) { + if v, ok := request.ProviderData.(*conns.AWSClient); ok { + w.meta = v + } + + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + w.inner.Configure(ctx, request, response) +} + +func (w *wrappedAction) ConfigValidators(ctx context.Context) []action.ConfigValidator { + if v, ok := w.inner.(action.ActionWithConfigValidators); ok { + ctx, diags := w.context(ctx, nil, w.meta) + if diags.HasError() { + tflog.Warn(ctx, "wrapping ConfigValidators", map[string]any{ + "action": w.spec.TypeName, + "bootstrapContext error": fwdiag.DiagnosticsString(diags), + }) + + return nil + } + + return v.ConfigValidators(ctx) + } + + return nil +} + +func (w *wrappedAction) ValidateConfig(ctx context.Context, request action.ValidateConfigRequest, response *action.ValidateConfigResponse) { + if v, ok := w.inner.(action.ActionWithValidateConfig); ok { + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + v.ValidateConfig(ctx, request, response) + } } // wrappedResource represents an interceptor dispatcher for a Plugin Framework resource. type wrappedResource struct { - inner resource.ResourceWithConfigure - meta *conns.AWSClient - opts wrappedResourceOptions + inner resource.ResourceWithConfigure + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageFrameworkResource + interceptors interceptorInvocations } -func newWrappedResource(inner resource.ResourceWithConfigure, opts wrappedResourceOptions) resource.ResourceWithConfigure { - return &wrappedResource{ - inner: inner, - opts: opts, +func newWrappedResource(spec *inttypes.ServicePackageFrameworkResource, servicePackageName string) resource.ResourceWithConfigure { + var isRegionOverrideEnabled bool + if v := spec.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + var interceptors interceptorInvocations + + if isRegionOverrideEnabled { + v := spec.Region.Value() + + interceptors = append(interceptors, resourceInjectRegionAttribute()) + if v.IsValidateOverrideInPartition { + interceptors = append(interceptors, resourceValidateRegion()) + } + interceptors = append(interceptors, resourceDefaultRegion()) + interceptors = append(interceptors, resourceForceNewIfRegionChanges()) + interceptors = append(interceptors, resourceSetRegionInState()) + if spec.Identity.HasInherentRegion() { + interceptors = append(interceptors, resourceImportRegionNoDefault()) + } else { + interceptors = append(interceptors, resourceImportRegion()) + } + } + + if !tfunique.IsHandleNil(spec.Tags) { + interceptors = append(interceptors, resourceTransparentTagging(spec.Tags)) + } + + inner, _ := spec.Factory(context.TODO()) + + if len(spec.Identity.Attributes) == 0 { + return &wrappedResource{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + } + } + + interceptors = append(interceptors, newIdentityInterceptor(spec.Identity.Attributes)) + if v, ok := inner.(framework.Identityer); ok { + v.SetIdentitySpec(spec.Identity) + } + + if spec.Import.WrappedImport { + if v, ok := inner.(framework.ImportByIdentityer); ok { + v.SetImportSpec(spec.Import) + } + // If the resource does not implement framework.ImportByIdentityer, + // it will be caught by `validateResourceSchemas`, so we can ignore it here. + } + + return &wrappedResourceWithIdentity{ + wrappedResource: wrappedResource{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + }, } } +// context is run on all wrapped methods before any interceptors. +func (w *wrappedResource) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + overrideRegion = target.ValueString() + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + } + + return ctx, diags +} + func (w *wrappedResource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { // This method does not call down to the inner resource. - response.TypeName = w.opts.typeName + response.TypeName = w.spec.TypeName - if w.opts.identity.IsMutable { + if w.spec.Identity.IsMutable { response.ResourceBehavior.MutableIdentity = true } } func (w *wrappedResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *resource.SchemaRequest, response *resource.SchemaResponse) diag.Diagnostics { - w.inner.Schema(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceSchema(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceSchema(), w.inner.Schema, resourceSchemaHasError, w.meta)(ctx, request, response) // Validate the resource's model against the schema. if v, ok := w.inner.(framework.ResourceValidateModel); ok { response.Diagnostics.Append(v.ValidateModel(ctx, &response.Schema)...) if response.Diagnostics.HasError() { - response.Diagnostics.AddError("resource model validation error", w.opts.typeName) + response.Diagnostics.AddError("resource model validation error", w.spec.TypeName) return } - } else if w.opts.typeName != "aws_lexv2models_bot_version" { // Hacky yukkery caused by attribute of type map[string]Object. - response.Diagnostics.AddError("missing framework.ResourceValidateModel", w.opts.typeName) + } else if w.spec.TypeName != "aws_lexv2models_bot_version" { // Hacky yukkery caused by attribute of type map[string]Object. + response.Diagnostics.AddError("missing framework.ResourceValidateModel", w.spec.TypeName) } } func (w *wrappedResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.Plan.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Plan.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *resource.CreateRequest, response *resource.CreateResponse) diag.Diagnostics { - w.inner.Create(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceCreate(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceCreate(), w.inner.Create, resourceCreateHasError, w.meta)(ctx, request, response) } func (w *wrappedResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.State.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.State.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *resource.ReadRequest, response *resource.ReadResponse) diag.Diagnostics { - w.inner.Read(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceRead(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceRead(), w.inner.Read, resourceReadHasError, w.meta)(ctx, request, response) } func (w *wrappedResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.Plan.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Plan.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *resource.UpdateRequest, response *resource.UpdateResponse) diag.Diagnostics { - w.inner.Update(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceUpdate(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceUpdate(), w.inner.Update, resourceUpdateHasError, w.meta)(ctx, request, response) } func (w *wrappedResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.State.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.State.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } - f := func(ctx context.Context, request *resource.DeleteRequest, response *resource.DeleteResponse) diag.Diagnostics { - w.inner.Delete(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceDelete(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceDelete(), w.inner.Delete, resourceDeleteHasError, w.meta)(ctx, request, response) } func (w *wrappedResource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { @@ -396,7 +687,7 @@ func (w *wrappedResource) Configure(ctx context.Context, request resource.Config w.meta = v } - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return @@ -407,18 +698,14 @@ func (w *wrappedResource) Configure(ctx context.Context, request resource.Config func (w *wrappedResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { if v, ok := w.inner.(resource.ResourceWithImportState); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } ctx = importer.Context(ctx, w.meta) - f := func(ctx context.Context, request *resource.ImportStateRequest, response *resource.ImportStateResponse) diag.Diagnostics { - v.ImportState(ctx, *request, response) - return response.Diagnostics - } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceImportState(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceImportState(), v.ImportState, resourceImportStateHasError, w.meta)(ctx, request, response) return } @@ -430,31 +717,27 @@ func (w *wrappedResource) ImportState(ctx context.Context, request resource.Impo } func (w *wrappedResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.Config.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return } // We run ModifyPlan interceptors even if the resource has not defined a ModifyPlan method. - f := func(ctx context.Context, request *resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) diag.Diagnostics { - return response.Diagnostics + f := func(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { } if v, ok := w.inner.(resource.ResourceWithModifyPlan); ok { - f = func(ctx context.Context, request *resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) diag.Diagnostics { - v.ModifyPlan(ctx, *request, response) - return response.Diagnostics - } + f = v.ModifyPlan } - response.Diagnostics.Append(interceptedHandler(w.opts.interceptors.resourceModifyPlan(), f, w.meta)(ctx, &request, response)...) + interceptedHandler(w.interceptors.resourceModifyPlan(), f, resourceModifyPlanHasError, w.meta)(ctx, request, response) } func (w *wrappedResource) ConfigValidators(ctx context.Context) []resource.ConfigValidator { if v, ok := w.inner.(resource.ResourceWithConfigValidators); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) if diags.HasError() { tflog.Warn(ctx, "wrapping ConfigValidators", map[string]any{ - "resource": w.opts.typeName, + "resource": w.spec.TypeName, "bootstrapContext error": fwdiag.DiagnosticsString(diags), }) @@ -468,7 +751,7 @@ func (w *wrappedResource) ConfigValidators(ctx context.Context) []resource.Confi } func (w *wrappedResource) ValidateConfig(ctx context.Context, request resource.ValidateConfigRequest, response *resource.ValidateConfigResponse) { - ctx, diags := w.opts.bootstrapContext(ctx, request.Config.GetAttribute, w.meta) + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { return @@ -481,10 +764,10 @@ func (w *wrappedResource) ValidateConfig(ctx context.Context, request resource.V func (w *wrappedResource) UpgradeState(ctx context.Context) map[int64]resource.StateUpgrader { if v, ok := w.inner.(resource.ResourceWithUpgradeState); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) if diags.HasError() { tflog.Warn(ctx, "wrapping UpgradeState", map[string]any{ - "resource": w.opts.typeName, + "resource": w.spec.TypeName, "bootstrapContext error": fwdiag.DiagnosticsString(diags), }) @@ -499,10 +782,10 @@ func (w *wrappedResource) UpgradeState(ctx context.Context) map[int64]resource.S func (w *wrappedResource) MoveState(ctx context.Context) []resource.StateMover { if v, ok := w.inner.(resource.ResourceWithMoveState); ok { - ctx, diags := w.opts.bootstrapContext(ctx, nil, w.meta) + ctx, diags := w.context(ctx, nil, w.meta) if diags.HasError() { tflog.Warn(ctx, "wrapping MoveState", map[string]any{ - "resource": w.opts.typeName, + "resource": w.spec.TypeName, "bootstrapContext error": fwdiag.DiagnosticsString(diags), }) @@ -515,8 +798,275 @@ func (w *wrappedResource) MoveState(ctx context.Context) []resource.StateMover { return nil } -func (w *wrappedResource) IdentitySchema(ctx context.Context, req resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) { - if len(w.opts.identity.Attributes) > 0 { - resp.IdentitySchema = identity.NewIdentitySchema(w.opts.identity) +type wrappedResourceWithIdentity struct { + wrappedResource +} + +func (w *wrappedResourceWithIdentity) IdentitySchema(ctx context.Context, req resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) { + if len(w.spec.Identity.Attributes) > 0 { + resp.IdentitySchema = identity.NewIdentitySchema(w.spec.Identity) + } +} + +type wrappedListResourceFramework struct { + inner list.ListResourceWithConfigure + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageFrameworkListResource + interceptors interceptorInvocations +} + +var _ list.ListResourceWithConfigure = &wrappedListResourceFramework{} + +func newWrappedListResourceFramework(spec *inttypes.ServicePackageFrameworkListResource, servicePackageName string) list.ListResourceWithConfigure { + var interceptors interceptorInvocations + + var isRegionOverrideEnabled bool + if regionSpec := spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled { + interceptors = append(interceptors, listResourceInjectRegionAttribute()) + // TODO: validate region in partition, needs tweaked error message + } + + inner := spec.Factory() + + if v, ok := inner.(framework.Identityer); ok { + v.SetIdentitySpec(spec.Identity) + } + + if v, ok := inner.(framework.Lister); ok { + if isRegionOverrideEnabled { + v.AppendResultInterceptor(listresource.SetRegionInterceptor()) + } + + v.AppendResultInterceptor(listresource.IdentityInterceptor(spec.Identity.Attributes)) + + if !tfunique.IsHandleNil(spec.Tags) { + v.AppendResultInterceptor(listresource.TagsInterceptor(spec.Tags)) + } + } + + return &wrappedListResourceFramework{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + } +} + +// context is run on all wrapped methods before any interceptors. +func (w *wrappedListResourceFramework) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + if target.IsNull() || target.IsUnknown() { + overrideRegion = c.AwsConfig(ctx).Region + } else { + overrideRegion = target.ValueString() + } + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + } + + return ctx, diags +} + +func (w *wrappedListResourceFramework) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + if v, ok := request.ProviderData.(*conns.AWSClient); ok { + w.meta = v + } + + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + w.inner.Configure(ctx, request, response) +} + +func (w *wrappedListResourceFramework) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + stream.Results = tfiter.Null[list.ListResult]() + + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) + if len(diags) > 0 { + stream.Results = tfiter.Concat(stream.Results, list.ListResultsStreamDiagnostics(diags)) + } + if diags.HasError() { + return + } + + interceptedListHandler(w.interceptors.resourceList(), w.inner.List, w.meta)(ctx, request, stream) +} + +// ListResourceConfigSchema implements list.ListResourceWithConfigure. +func (w *wrappedListResourceFramework) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + interceptedHandler(w.interceptors.resourceListResourceConfigSchema(), w.inner.ListResourceConfigSchema, listResourceConfigSchemaHasError, w.meta)(ctx, request, response) +} + +// Metadata implements list.ListResourceWithConfigure. +func (w *wrappedListResourceFramework) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + // This method does not call down to the inner resource. + response.TypeName = w.spec.TypeName +} + +type wrappedListResourceSDK struct { + inner inttypes.ListResourceForSDK + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageSDKListResource + interceptors interceptorInvocations +} + +var _ inttypes.ListResourceForSDK = &wrappedListResourceSDK{} + +func newWrappedListResourceSDK(spec *inttypes.ServicePackageSDKListResource, servicePackageName string) inttypes.ListResourceForSDK { + var interceptors interceptorInvocations + + if v := spec.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { + interceptors = append(interceptors, listResourceInjectRegionAttribute()) + // TODO: validate region in partition, needs tweaked error message + } + + inner := spec.Factory() + + if v, ok := inner.(framework.WithRegionSpec); ok { + v.SetRegionSpec(spec.Region) + } + + if v, ok := inner.(framework.Identityer); ok { + v.SetIdentitySpec(spec.Identity) + } + + if v, ok := inner.(inttypes.SDKv2Tagger); ok { + if !tfunique.IsHandleNil(spec.Tags) { + v.SetTagsSpec(spec.Tags) + } + } + + return &wrappedListResourceSDK{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + } +} + +// Metadata implements list.ListResourceWithConfigure. +func (w *wrappedListResourceSDK) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + // This method does not call down to the inner resource. + response.TypeName = w.spec.TypeName +} + +// context is run on all wrapped methods before any interceptors. +func (w *wrappedListResourceSDK) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + if target.IsNull() || target.IsUnknown() { + overrideRegion = c.AwsConfig(ctx).Region + } else { + overrideRegion = target.ValueString() + } + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + } + + return ctx, diags +} + +func (w *wrappedListResourceSDK) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + if v, ok := request.ProviderData.(*conns.AWSClient); ok { + w.meta = v + } + + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + w.inner.Configure(ctx, request, response) +} + +func (w *wrappedListResourceSDK) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + stream.Results = tfiter.Null[list.ListResult]() + + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) + if len(diags) > 0 { + stream.Results = tfiter.Concat(stream.Results, list.ListResultsStreamDiagnostics(diags)) + } + if diags.HasError() { + return + } + + interceptedListHandler(w.interceptors.resourceList(), w.inner.List, w.meta)(ctx, request, stream) +} + +// ListResourceConfigSchema implements list.ListResourceWithConfigure. +func (w *wrappedListResourceSDK) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + interceptedHandler(w.interceptors.resourceListResourceConfigSchema(), w.inner.ListResourceConfigSchema, listResourceConfigSchemaHasError, w.meta)(ctx, request, response) +} + +func (w *wrappedListResourceSDK) RawV5Schemas(ctx context.Context, request list.RawV5SchemaRequest, response *list.RawV5SchemaResponse) { + if v, ok := w.inner.(list.ListResourceWithRawV5Schemas); ok { + ctx, diags := w.context(ctx, nil, w.meta) + if diags.HasError() { + tflog.Warn(ctx, "wrapping Schemas", map[string]any{ + "resource": w.spec.TypeName, + "bootstrapContext error": fwdiag.DiagnosticsString(diags), + }) + } + + v.RawV5Schemas(ctx, request, response) } } diff --git a/internal/provider/interceptors/htags.go b/internal/provider/interceptors/htags.go index a1f5788cd629..3572e6018e3d 100644 --- a/internal/provider/interceptors/htags.go +++ b/internal/provider/interceptors/htags.go @@ -21,6 +21,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +type taggingAWSClient interface { + Partition(context.Context) string +} + type HTags unique.Handle[inttypes.ServicePackageResourceTags] func (h HTags) unwrap() unique.Handle[inttypes.ServicePackageResourceTags] { @@ -64,7 +68,7 @@ func (h HTags) Enabled() bool { } // If the service package has a generic resource list tags methods, call it. -func (h HTags) ListTags(ctx context.Context, sp conns.ServicePackage, c *conns.AWSClient, identifier string) error { +func (h HTags) ListTags(ctx context.Context, sp conns.ServicePackage, c taggingAWSClient, identifier string) error { var err error resourceType := h.value().ResourceType @@ -100,7 +104,7 @@ func (h HTags) ListTags(ctx context.Context, sp conns.ServicePackage, c *conns.A } // If the service package has a generic resource update tags methods, call it. -func (h HTags) UpdateTags(ctx context.Context, sp conns.ServicePackage, c *conns.AWSClient, identifier string, oldTags, newTags any) error { +func (h HTags) UpdateTags(ctx context.Context, sp conns.ServicePackage, c taggingAWSClient, identifier string, oldTags, newTags any) error { var err error resourceType := h.value().ResourceType diff --git a/internal/provider/interceptors/info.go b/internal/provider/interceptors/info.go index 433ac0852a18..89d10eb15196 100644 --- a/internal/provider/interceptors/info.go +++ b/internal/provider/interceptors/info.go @@ -11,7 +11,11 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func InfoFromContext(ctx context.Context, c *conns.AWSClient) (conns.ServicePackage, string, string, *tftags.InContext, bool) { +type infoAWSClient interface { + ServicePackage(_ context.Context, name string) conns.ServicePackage +} + +func InfoFromContext(ctx context.Context, c infoAWSClient) (conns.ServicePackage, string, string, *tftags.InContext, bool) { if inContext, ok := conns.FromContext(ctx); ok { if sp := c.ServicePackage(ctx, inContext.ServicePackageName()); sp != nil { serviceName, err := names.HumanFriendly(sp.ServicePackageName()) diff --git a/internal/provider/sdkv2/identity/schema.go b/internal/provider/sdkv2/identity/schema.go index e327a8f17cb1..c34d9ddec178 100644 --- a/internal/provider/sdkv2/identity/schema.go +++ b/internal/provider/sdkv2/identity/schema.go @@ -11,7 +11,7 @@ import ( func NewIdentitySchema(identitySpec inttypes.Identity) map[string]*schema.Schema { identitySchema := make(map[string]*schema.Schema, len(identitySpec.Attributes)) for _, attr := range identitySpec.Attributes { - identitySchema[attr.Name] = newIdentityAttribute(attr) + identitySchema[attr.Name()] = newIdentityAttribute(attr) } return identitySchema } @@ -20,7 +20,7 @@ func newIdentityAttribute(attribute inttypes.IdentityAttribute) *schema.Schema { attr := &schema.Schema{ Type: schema.TypeString, } - if attribute.Required { + if attribute.Required() { attr.RequiredForImport = true } else { attr.OptionalForImport = true diff --git a/internal/provider/sdkv2/identity_interceptor.go b/internal/provider/sdkv2/identity_interceptor.go index e8e081d4909a..a3c838280c82 100644 --- a/internal/provider/sdkv2/identity_interceptor.go +++ b/internal/provider/sdkv2/identity_interceptor.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/identity" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" - tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -19,7 +18,7 @@ import ( var _ crudInterceptor = identityInterceptor{} type identityInterceptor struct { - attributes []string + identitySpec *inttypes.Identity } func (r identityInterceptor) run(ctx context.Context, opts crudInterceptorOptions) diag.Diagnostics { @@ -29,7 +28,10 @@ func (r identityInterceptor) run(ctx context.Context, opts crudInterceptorOption switch d, when, why := opts.d, opts.when, opts.why; when { case After: switch why { - case Create, Read: + case Create, Read, Update: + if why == Update && !(r.identitySpec.IsMutable && r.identitySpec.IsSetOnUpdate) && !identityIsFullyNull(d, r.identitySpec) { + break + } if d.Id() == "" { break } @@ -38,52 +40,109 @@ func (r identityInterceptor) run(ctx context.Context, opts crudInterceptorOption return sdkdiag.AppendFromErr(diags, err) } - for _, attr := range r.attributes { - switch attr { + for _, attr := range r.identitySpec.Attributes { + switch attr.Name() { case names.AttrAccountID: - if err := identity.Set(attr, awsClient.AccountID(ctx)); err != nil { + if err := identity.Set(attr.Name(), awsClient.AccountID(ctx)); err != nil { return sdkdiag.AppendFromErr(diags, err) } case names.AttrRegion: - if err := identity.Set(attr, awsClient.Region(ctx)); err != nil { + if err := identity.Set(attr.Name(), awsClient.Region(ctx)); err != nil { return sdkdiag.AppendFromErr(diags, err) } default: - val, ok := getAttributeOk(d, attr) + val, ok := getAttributeOk(d, attr.ResourceAttributeName()) if !ok { continue } - if err := identity.Set(attr, val); err != nil { + if err := identity.Set(attr.Name(), val); err != nil { return sdkdiag.AppendFromErr(diags, err) } } } } + case OnError: + switch why { + case Update: + if identityIsFullyNull(d, r.identitySpec) { + if d.Id() == "" { + break + } + identity, err := d.Identity() + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + for _, attr := range r.identitySpec.Attributes { + switch attr.Name() { + case names.AttrAccountID: + if err := identity.Set(attr.Name(), awsClient.AccountID(ctx)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + case names.AttrRegion: + if err := identity.Set(attr.Name(), awsClient.Region(ctx)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + default: + val, ok := getAttributeOk(d, attr.ResourceAttributeName()) + if !ok { + continue + } + if err := identity.Set(attr.Name(), val); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + } + } + } } return diags } +// identityIsFullyNull returns true if a resource supports identity and +// all attributes are set to null values +func identityIsFullyNull(d schemaResourceData, identitySpec *inttypes.Identity) bool { + identity, err := d.Identity() + if err != nil { + return false + } + + for _, attr := range identitySpec.Attributes { + value := identity.Get(attr.Name()) + if value != "" { + return false + } + } + + return true +} + func getAttributeOk(d schemaResourceData, name string) (string, bool) { if name == "id" { return d.Id(), true } - v, ok := d.GetOk(name) - return v.(string), ok + if v, ok := d.GetOk(name); !ok { + return "", false + } else { + return v.(string), true + } } -func newIdentityInterceptor(attributes []inttypes.IdentityAttribute) interceptorInvocation { - return interceptorInvocation{ - when: After, - why: Create | Read, +func newIdentityInterceptor(identitySpec *inttypes.Identity) interceptorInvocation { + interceptor := interceptorInvocation{ + when: After | OnError, + why: Create | Read | Update, interceptor: identityInterceptor{ - attributes: tfslices.ApplyToAll(attributes, func(v inttypes.IdentityAttribute) string { - return v.Name - }), + identitySpec: identitySpec, }, } + + return interceptor } func newResourceIdentity(v inttypes.Identity) *schema.ResourceIdentity { @@ -99,7 +158,7 @@ func newParameterizedIdentityImporter(identitySpec inttypes.Identity, importSpec if identitySpec.IsGlobalResource { return &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.GlobalSingleParameterized(ctx, rd, identitySpec.IdentityAttribute, meta.(importer.AWSClient)); err != nil { + if err := importer.GlobalSingleParameterized(ctx, rd, identitySpec, meta.(importer.AWSClient)); err != nil { return nil, err } @@ -109,7 +168,7 @@ func newParameterizedIdentityImporter(identitySpec inttypes.Identity, importSpec } else { return &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.RegionalSingleParameterized(ctx, rd, identitySpec.IdentityAttribute, meta.(importer.AWSClient)); err != nil { + if err := importer.RegionalSingleParameterized(ctx, rd, identitySpec, meta.(importer.AWSClient)); err != nil { return nil, err } @@ -121,7 +180,7 @@ func newParameterizedIdentityImporter(identitySpec inttypes.Identity, importSpec if identitySpec.IsGlobalResource { return &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.GlobalMultipleParameterized(ctx, rd, identitySpec.Attributes, importSpec, meta.(importer.AWSClient)); err != nil { + if err := importer.GlobalMultipleParameterized(ctx, rd, identitySpec, importSpec, meta.(importer.AWSClient)); err != nil { return nil, err } @@ -131,7 +190,7 @@ func newParameterizedIdentityImporter(identitySpec inttypes.Identity, importSpec } else { return &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.RegionalMultipleParameterized(ctx, rd, identitySpec.Attributes, importSpec, meta.(importer.AWSClient)); err != nil { + if err := importer.RegionalMultipleParameterized(ctx, rd, identitySpec, importSpec, meta.(importer.AWSClient)); err != nil { return nil, err } @@ -146,7 +205,7 @@ func arnIdentityResourceImporter(identity inttypes.Identity) *schema.ResourceImp if identity.IsGlobalResource { return &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.GlobalARN(ctx, rd, identity.IdentityAttribute, identity.IdentityDuplicateAttrs); err != nil { + if err := importer.GlobalARN(ctx, rd, identity); err != nil { return nil, err } @@ -156,7 +215,7 @@ func arnIdentityResourceImporter(identity inttypes.Identity) *schema.ResourceImp } else { return &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.RegionalARN(ctx, rd, identity.IdentityAttribute, identity.IdentityDuplicateAttrs); err != nil { + if err := importer.RegionalARN(ctx, rd, identity); err != nil { return nil, err } @@ -190,3 +249,15 @@ func singletonIdentityResourceImporter(identity inttypes.Identity) *schema.Resou } } } + +func customResourceImporter(r *schema.Resource, identity *inttypes.Identity, importSpec *inttypes.SDKv2Import) { + importF := r.Importer.StateContext + + r.Importer = &schema.ResourceImporter{ + StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + ctx = importer.Context(ctx, identity, importSpec) + + return importF(ctx, rd, meta) + }, + } +} diff --git a/internal/provider/sdkv2/identity_interceptor_test.go b/internal/provider/sdkv2/identity_interceptor_test.go new file mode 100644 index 000000000000..eba5ca6a9651 --- /dev/null +++ b/internal/provider/sdkv2/identity_interceptor_test.go @@ -0,0 +1,403 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sdkv2 + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/identity" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/internal/attribute" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestIdentityInterceptor(t *testing.T) { + t.Parallel() + + accountID := "123456789012" + region := "us-west-2" //lintignore:AWSAT003 + name := "a_name" + + resourceSchema := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "region": attribute.Region(), + } + + client := mockClient{ + accountID: accountID, + region: region, + } + + testCases := map[string]struct { + attrName string + identitySpec inttypes.Identity + }{ + "same names": { + attrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), + }, + "name mapped": { + attrName: "resource_name", + identitySpec: regionalSingleParameterizedIdentitySpecNameMapped("resource_name", "name"), + }, + } + + for tname, tc := range testCases { + t.Run(tname, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + + invocation := newIdentityInterceptor(&tc.identitySpec) + interceptor := invocation.interceptor.(identityInterceptor) + + identitySchema := identity.NewIdentitySchema(tc.identitySpec) + + d := schema.TestResourceDataWithIdentityRaw(t, resourceSchema, identitySchema, nil) + d.SetId("some_id") + d.Set("name", name) + d.Set("region", region) + d.Set("type", "some_type") + + opts := crudInterceptorOptions{ + c: client, + d: d, + when: After, + why: Create, + } + + interceptor.run(ctx, opts) + + identity, err := d.Identity() + if err != nil { + t.Fatalf("unexpected error getting identity: %v", err) + } + + if e, a := accountID, identity.Get(names.AttrAccountID); e != a { + t.Errorf("expected account ID %q, got %q", e, a) + } + if e, a := region, identity.Get(names.AttrRegion); e != a { + t.Errorf("expected region %q, got %q", e, a) + } + if e, a := name, identity.Get(tc.attrName); e != a { + t.Errorf("expected %s %q, got %q", tc.attrName, e, a) + } + }) + } +} + +func TestIdentityInterceptor_Read_Removed(t *testing.T) { + t.Parallel() + + accountID := "123456789012" + region := "us-west-2" //lintignore:AWSAT003 + name := "a_name" + + resourceSchema := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "region": attribute.Region(), + } + + identitySpec := regionalSingleParameterizedIdentitySpec("name") + identitySchema := identity.NewIdentitySchema(identitySpec) + + invocation := newIdentityInterceptor(&identitySpec) + interceptor := invocation.interceptor.(identityInterceptor) + + client := mockClient{ + accountID: accountID, + region: region, + } + + ctx := t.Context() + + d := schema.TestResourceDataWithIdentityRaw(t, resourceSchema, identitySchema, nil) + d.SetId("") + d.Set("name", name) + d.Set("region", region) + d.Set("type", "some_type") + + opts := crudInterceptorOptions{ + c: client, + d: d, + when: After, + why: Read, + } + + interceptor.run(ctx, opts) + + identity, err := d.Identity() + if err != nil { + t.Fatalf("unexpected error getting identity: %v", err) + } + + if identity.Get(names.AttrAccountID) != "" { + t.Errorf("expected no account ID, got %q", identity.Get(names.AttrAccountID)) + } + if identity.Get(names.AttrRegion) != "" { + t.Errorf("expected no region, got %q", identity.Get(names.AttrRegion)) + } + if identity.Get("name") != "" { + t.Errorf("expected no name, got %q", identity.Get("name")) + } +} + +func TestIdentityInterceptor_Update(t *testing.T) { + t.Parallel() + + accountID := "123456789012" + region := "us-west-2" //lintignore:AWSAT003 + name := "a_name" + + resourceSchema := map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "region": attribute.Region(), + } + + client := mockClient{ + accountID: accountID, + region: region, + } + + testCases := map[string]struct { + attrName string + identitySpec inttypes.Identity + ExpectIdentity bool + Description string + }{ + "not mutable - fresh resource": { + attrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), + ExpectIdentity: true, + Description: "Immutable identity with all null attributes should get populated (bug fix scenario)", + }, + "v6.0 SDK fix": { + attrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name", + inttypes.WithV6_0SDKv2Fix(), + ), + ExpectIdentity: true, + Description: "Mutable identity (v6.0 SDK fix) should always get populated on Update", + }, + "identity fix": { + attrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name", + inttypes.WithIdentityFix(), + ), + ExpectIdentity: true, + Description: "Mutable identity (identity fix) should always get populated on Update", + }, + "mutable": { + attrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name", + inttypes.WithMutableIdentity(), + ), + ExpectIdentity: true, + Description: "Explicitly mutable identity should always get populated on Update", + }, + } + + for tname, tc := range testCases { + t.Run(tname, func(t *testing.T) { + t.Parallel() + ctx := t.Context() + + invocation := newIdentityInterceptor(&tc.identitySpec) + interceptor := invocation.interceptor.(identityInterceptor) + + identitySchema := identity.NewIdentitySchema(tc.identitySpec) + + d := schema.TestResourceDataWithIdentityRaw(t, resourceSchema, identitySchema, nil) + d.SetId("some_id") + d.Set("name", name) + d.Set("region", region) + d.Set("type", "some_type") + + opts := crudInterceptorOptions{ + c: client, + d: d, + when: After, + why: Update, + } + + interceptor.run(ctx, opts) + + identity, err := d.Identity() + if err != nil { + t.Fatalf("unexpected error getting identity: %v", err) + } + + if tc.ExpectIdentity { + if e, a := accountID, identity.Get(names.AttrAccountID); e != a { + t.Errorf("expected account ID %q, got %q", e, a) + } + if e, a := region, identity.Get(names.AttrRegion); e != a { + t.Errorf("expected region %q, got %q", e, a) + } + if e, a := name, identity.Get(tc.attrName); e != a { + t.Errorf("expected %s %q, got %q", tc.attrName, e, a) + } + } else { + if identity.Get(names.AttrAccountID) != "" { + t.Errorf("expected no account ID, got %q", identity.Get(names.AttrAccountID)) + } + if identity.Get(names.AttrRegion) != "" { + t.Errorf("expected no region, got %q", identity.Get(names.AttrRegion)) + } + if identity.Get(tc.attrName) != "" { + t.Errorf("expected no %s, got %q", tc.attrName, identity.Get(tc.attrName)) + } + } + }) + } +} + +func regionalSingleParameterizedIdentitySpec(attrName string, opts ...inttypes.IdentityOptsFunc) inttypes.Identity { + return inttypes.RegionalSingleParameterIdentity(attrName, opts...) +} + +func regionalSingleParameterizedIdentitySpecNameMapped(identityAttrName, resourceAttrName string) inttypes.Identity { + return inttypes.RegionalSingleParameterIdentityWithMappedName(identityAttrName, resourceAttrName) +} + +type mockClient struct { + accountID string + region string +} + +func (c mockClient) AccountID(_ context.Context) string { + return c.accountID +} + +func (c mockClient) Region(_ context.Context) string { + return c.region +} + +func (c mockClient) DefaultTagsConfig(ctx context.Context) *tftags.DefaultConfig { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) IgnoreTagsConfig(ctx context.Context) *tftags.IgnoreConfig { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) Partition(context.Context) string { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) ServicePackage(_ context.Context, name string) conns.ServicePackage { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) ValidateInContextRegionInPartition(ctx context.Context) error { + panic("not implemented") //lintignore:R009 +} + +func (c mockClient) AwsConfig(context.Context) aws.Config { // nosemgrep:ci.aws-in-func-name + panic("not implemented") //lintignore:R009 +} + +func TestIdentityIsFullyNull(t *testing.T) { + t.Parallel() + + identitySpec := &inttypes.Identity{ + Attributes: []inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrAccountID, false), + inttypes.StringIdentityAttribute(names.AttrRegion, false), + inttypes.StringIdentityAttribute(names.AttrBucket, true), + }, + } + + testCases := map[string]struct { + identityValues map[string]string + expectNull bool + description string + }{ + "all_null": { + identityValues: map[string]string{}, + expectNull: true, + description: "All attributes null should return true", + }, + "some_null": { + identityValues: map[string]string{ + names.AttrAccountID: "123456789012", + // region and bucket remain null + }, + expectNull: false, + description: "Some attributes set should return false", + }, + "all_set": { + identityValues: map[string]string{ + names.AttrAccountID: "123456789012", + names.AttrRegion: "us-west-2", // lintignore:AWSAT003 + names.AttrBucket: "test-bucket", + }, + expectNull: false, + description: "All attributes set should return false", + }, + "empty_string_values": { + identityValues: map[string]string{ + names.AttrAccountID: "", + names.AttrRegion: "", + names.AttrBucket: "", + }, + expectNull: true, + description: "Empty string values should be treated as null", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + resourceSchema := map[string]*schema.Schema{ + names.AttrBucket: {Type: schema.TypeString, Required: true}, + } + identitySchema := identity.NewIdentitySchema(*identitySpec) + d := schema.TestResourceDataWithIdentityRaw(t, resourceSchema, identitySchema, nil) + d.SetId("test-id") + + identity, err := d.Identity() + if err != nil { + t.Fatalf("unexpected error getting identity: %v", err) + } + for attrName, value := range tc.identityValues { + if err := identity.Set(attrName, value); err != nil { + t.Fatalf("unexpected error setting %s in identity: %v", attrName, err) + } + } + + result := identityIsFullyNull(d, identitySpec) + if result != tc.expectNull { + t.Errorf("%s: expected identityIsFullyNull to return %v, got %v", + tc.description, tc.expectNull, result) + } + }) + } +} diff --git a/internal/provider/sdkv2/importer/arn.go b/internal/provider/sdkv2/importer/arn.go index 4860908aa06d..2e33fbce0b20 100644 --- a/internal/provider/sdkv2/importer/arn.go +++ b/internal/provider/sdkv2/importer/arn.go @@ -9,17 +9,20 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) -func RegionalARN(_ context.Context, rd *schema.ResourceData, attrName string, duplicateAttrs []string) error { +func RegionalARN(_ context.Context, rd *schema.ResourceData, identitySpec inttypes.Identity) error { + attr := identitySpec.Attributes[0] + if rd.Id() != "" { arnARN, err := arn.Parse(rd.Id()) if err != nil { - return fmt.Errorf("could not parse import ID %q as ARN: %s", rd.Id(), err) + return fmt.Errorf("could not parse import ID %q as ARN: %w", rd.Id(), err) } - rd.Set(attrName, rd.Id()) - for _, attr := range duplicateAttrs { + rd.Set(attr.ResourceAttributeName(), rd.Id()) + for _, attr := range identitySpec.IdentityDuplicateAttrs { setAttribute(rd, attr, rd.Id()) } @@ -39,25 +42,25 @@ func RegionalARN(_ context.Context, rd *schema.ResourceData, attrName string, du return err } - arnRaw, ok := identity.GetOk(attrName) + arnRaw, ok := identity.GetOk(attr.Name()) if !ok { - return fmt.Errorf("identity attribute %q is required", attrName) + return fmt.Errorf("identity attribute %q is required", attr.Name()) } arnVal, ok := arnRaw.(string) if !ok { - return fmt.Errorf("identity attribute %q: expected string, got %T", attrName, arnRaw) + return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name(), arnRaw) } arnARN, err := arn.Parse(arnVal) if err != nil { - return fmt.Errorf("identity attribute %q: could not parse %q as ARN: %s", attrName, arnVal, err) + return fmt.Errorf("identity attribute %q: could not parse %q as ARN: %w", attr.Name(), arnVal, err) } rd.Set(names.AttrRegion, arnARN.Region) - rd.Set(attrName, arnVal) - for _, attr := range duplicateAttrs { + rd.Set(attr.ResourceAttributeName(), arnVal) + for _, attr := range identitySpec.IdentityDuplicateAttrs { setAttribute(rd, attr, arnVal) } @@ -67,7 +70,7 @@ func RegionalARN(_ context.Context, rd *schema.ResourceData, attrName string, du func RegionalARNValue(_ context.Context, rd *schema.ResourceData, attrName string, arnValue string) error { arnARN, err := arn.Parse(arnValue) if err != nil { - return fmt.Errorf("could not parse %q as ARN: %s", arnValue, err) + return fmt.Errorf("could not parse %q as ARN: %w", arnValue, err) } rd.Set(attrName, arnValue) @@ -84,14 +87,16 @@ func RegionalARNValue(_ context.Context, rd *schema.ResourceData, attrName strin return nil } -func GlobalARN(_ context.Context, rd *schema.ResourceData, attrName string, duplicateAttrs []string) error { +func GlobalARN(_ context.Context, rd *schema.ResourceData, identitySpec inttypes.Identity) error { + attr := identitySpec.Attributes[0] + if rd.Id() != "" { _, err := arn.Parse(rd.Id()) if err != nil { - return fmt.Errorf("could not parse import ID %q as ARN: %s", rd.Id(), err) + return fmt.Errorf("could not parse import ID %q as ARN: %w", rd.Id(), err) } - rd.Set(attrName, rd.Id()) - for _, attr := range duplicateAttrs { + rd.Set(attr.ResourceAttributeName(), rd.Id()) + for _, attr := range identitySpec.IdentityDuplicateAttrs { setAttribute(rd, attr, rd.Id()) } @@ -103,23 +108,23 @@ func GlobalARN(_ context.Context, rd *schema.ResourceData, attrName string, dupl return err } - arnRaw, ok := identity.GetOk(attrName) + arnRaw, ok := identity.GetOk(attr.Name()) if !ok { - return fmt.Errorf("identity attribute %q is required", attrName) + return fmt.Errorf("identity attribute %q is required", attr.Name()) } arnVal, ok := arnRaw.(string) if !ok { - return fmt.Errorf("identity attribute %q: expected string, got %T", attrName, arnRaw) + return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name(), arnRaw) } _, err = arn.Parse(arnVal) if err != nil { - return fmt.Errorf("identity attribute %q: could not parse %q as ARN: %s", attrName, arnVal, err) + return fmt.Errorf("identity attribute %q: could not parse %q as ARN: %w", attr.Name(), arnVal, err) } - rd.Set(attrName, arnVal) - for _, attr := range duplicateAttrs { + rd.Set(attr.ResourceAttributeName(), arnVal) + for _, attr := range identitySpec.IdentityDuplicateAttrs { setAttribute(rd, attr, arnVal) } diff --git a/internal/provider/sdkv2/importer/arn_test.go b/internal/provider/sdkv2/importer/arn_test.go index 6f8388ab258b..4b253334805e 100644 --- a/internal/provider/sdkv2/importer/arn_test.go +++ b/internal/provider/sdkv2/importer/arn_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/internal/attribute" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" ) var regionalARNSchema = map[string]*schema.Schema{ @@ -41,7 +42,11 @@ func TestRegionalARN_ImportID_Invalid_NotAnARN(t *testing.T) { rd := schema.TestResourceDataRaw(t, regionalARNSchema, map[string]any{}) rd.SetId("not a valid ARN") - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { if !strings.HasPrefix(err.Error(), "could not parse import ID") { t.Fatalf("Unexpected error: %s", err) @@ -65,7 +70,11 @@ func TestRegionalARN_ImportID_Invalid_WrongRegion(t *testing.T) { Resource: "res-abc123", }.String()) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { if !strings.HasPrefix(err.Error(), "the region passed for import") { t.Fatalf("Unexpected error: %s", err) @@ -89,7 +98,11 @@ func TestRegionalARN_ImportID_Valid_DefaultRegion(t *testing.T) { }.String() rd.SetId(arn) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -124,7 +137,11 @@ func TestRegionalARN_ImportID_Valid_RegionOverride(t *testing.T) { }.String() rd.SetId(arn) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -148,7 +165,11 @@ func TestRegionalARN_Identity_Invalid_AttributeNotSet(t *testing.T) { rd := schema.TestResourceDataWithIdentityRaw(t, regionalARNSchema, regionalARNIdentitySchema, map[string]string{}) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { if err.Error() != fmt.Sprintf("identity attribute %q is required", "arn") { t.Fatalf("Unexpected error: %s", err) @@ -165,7 +186,11 @@ func TestRegionalARN_Identity_Invalid_NotAnARN(t *testing.T) { "arn": "not a valid ARN", }) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { if !strings.HasPrefix(err.Error(), fmt.Sprintf("identity attribute %q: could not parse", "arn")) { t.Fatalf("Unexpected error: %s", err) @@ -190,7 +215,11 @@ func TestRegionalARN_Identity_Valid(t *testing.T) { "arn": arn, }) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -223,7 +252,11 @@ func TestRegionalARN_DuplicateAttrs_ImportID_Valid(t *testing.T) { }.String() rd.SetId(arn) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id", "attr"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id", "attr"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -257,7 +290,11 @@ func TestRegionalARN_DuplicateAttrs_Identity_Valid(t *testing.T) { "arn": arn, }) - err := importer.RegionalARN(context.Background(), rd, "arn", []string{"id", "attr"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id", "attr"), + ) + + err := importer.RegionalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -301,7 +338,11 @@ func TestGlobalARN_ImportID_Invalid_NotAnARN(t *testing.T) { rd := schema.TestResourceDataRaw(t, globalARNSchema, map[string]any{}) rd.SetId("not a valid ARN") - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { if !strings.HasPrefix(err.Error(), "could not parse import ID") { t.Fatalf("Unexpected error: %s", err) @@ -324,7 +365,11 @@ func TestGlobalARN_ImportID_Valid(t *testing.T) { }.String() rd.SetId(arn) - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -345,7 +390,11 @@ func TestGlobalARN_Identity_Invalid_AttributeNotSet(t *testing.T) { rd := schema.TestResourceDataWithIdentityRaw(t, globalARNSchema, globalARNIdentitySchema, map[string]string{}) - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { if err.Error() != fmt.Sprintf("identity attribute %q is required", "arn") { t.Fatalf("Unexpected error: %s", err) @@ -362,7 +411,11 @@ func TestGlobalARN_Identity_Invalid_NotAnARN(t *testing.T) { "arn": "not a valid ARN", }) - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { if !strings.HasPrefix(err.Error(), fmt.Sprintf("identity attribute %q: could not parse", "arn")) { t.Fatalf("Unexpected error: %s", err) @@ -386,7 +439,11 @@ func TestGlobalARN_Identity_Valid(t *testing.T) { "arn": arn, }) - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -415,7 +472,11 @@ func TestGlobalARN_DuplicateAttrs_ImportID_Valid(t *testing.T) { }.String() rd.SetId(arn) - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id", "attr"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id", "attr"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } @@ -445,7 +506,11 @@ func TestGlobalARN_DuplicateAttrs_Identity_Valid(t *testing.T) { "arn": arn, }) - err := importer.GlobalARN(context.Background(), rd, "arn", []string{"id", "attr"}) + identity := inttypes.RegionalARNIdentityNamed("arn", + inttypes.WithIdentityDuplicateAttrs("id", "attr"), + ) + + err := importer.GlobalARN(context.Background(), rd, identity) if err != nil { t.Fatalf("Unexpected error: %s", err) } diff --git a/internal/provider/sdkv2/importer/context.go b/internal/provider/sdkv2/importer/context.go new file mode 100644 index 000000000000..3b97b794d9c5 --- /dev/null +++ b/internal/provider/sdkv2/importer/context.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package importer + +import ( + "context" + + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" +) + +type contextKey int + +const ( + identitySpecKey contextKey = 1 + importSpecKey contextKey = 2 +) + +func Context(ctx context.Context, identity *inttypes.Identity, importSpec *inttypes.SDKv2Import) context.Context { + ctx = context.WithValue(ctx, identitySpecKey, identity) + ctx = context.WithValue(ctx, importSpecKey, importSpec) + return ctx +} + +func IdentitySpec(ctx context.Context) inttypes.Identity { + val := ctx.Value(identitySpecKey) + if identity, ok := val.(*inttypes.Identity); ok { + return *identity + } + return inttypes.Identity{} +} + +func ImportSpec(ctx context.Context) inttypes.SDKv2Import { + val := ctx.Value(importSpecKey) + if importSpec, ok := val.(*inttypes.SDKv2Import); ok { + return *importSpec + } + return inttypes.SDKv2Import{} +} diff --git a/internal/provider/sdkv2/importer/parameterized.go b/internal/provider/sdkv2/importer/parameterized.go index 84120f91754b..6d62d6c63280 100644 --- a/internal/provider/sdkv2/importer/parameterized.go +++ b/internal/provider/sdkv2/importer/parameterized.go @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func RegionalSingleParameterized(ctx context.Context, rd *schema.ResourceData, attrName string, client AWSClient) error { +func RegionalSingleParameterized(ctx context.Context, rd *schema.ResourceData, identitySpec inttypes.Identity, client AWSClient) error { + attr := identitySpec.Attributes[len(identitySpec.Attributes)-1] + if rd.Id() != "" { importID := rd.Id() - if attrName != names.AttrID { - rd.Set(attrName, importID) + if attr.ResourceAttributeName() != names.AttrID { + rd.Set(attr.ResourceAttributeName(), importID) } return nil @@ -35,28 +37,30 @@ func RegionalSingleParameterized(ctx context.Context, rd *schema.ResourceData, a return err } - valRaw, ok := identity.GetOk(attrName) + valRaw, ok := identity.GetOk(attr.Name()) if !ok { - return fmt.Errorf("identity attribute %q is required", attrName) + return fmt.Errorf("identity attribute %q is required", attr.Name()) } val, ok := valRaw.(string) if !ok { - return fmt.Errorf("identity attribute %q: expected string, got %T", attrName, valRaw) + return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name(), valRaw) } - setAttribute(rd, attrName, val) + setAttribute(rd, attr.ResourceAttributeName(), val) - if attrName != names.AttrID { + if attr.ResourceAttributeName() != names.AttrID { rd.SetId(val) } return nil } -func GlobalSingleParameterized(ctx context.Context, rd *schema.ResourceData, attrName string, client AWSClient) error { +func GlobalSingleParameterized(ctx context.Context, rd *schema.ResourceData, identitySpec inttypes.Identity, client AWSClient) error { + attr := identitySpec.Attributes[len(identitySpec.Attributes)-1] + if rd.Id() != "" { importID := rd.Id() - if attrName != names.AttrID { - rd.Set(attrName, importID) + if attr.ResourceAttributeName() != names.AttrID { + rd.Set(attr.ResourceAttributeName(), importID) } return nil @@ -71,24 +75,24 @@ func GlobalSingleParameterized(ctx context.Context, rd *schema.ResourceData, att return err } - valRaw, ok := identity.GetOk(attrName) + valRaw, ok := identity.GetOk(attr.Name()) if !ok { - return fmt.Errorf("identity attribute %q is required", attrName) + return fmt.Errorf("identity attribute %q is required", attr.Name()) } val, ok := valRaw.(string) if !ok { - return fmt.Errorf("identity attribute %q: expected string, got %T", attrName, valRaw) + return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name(), valRaw) } - setAttribute(rd, attrName, val) + setAttribute(rd, attr.ResourceAttributeName(), val) - if attrName != names.AttrID { + if attr.ResourceAttributeName() != names.AttrID { rd.SetId(val) } return nil } -func RegionalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, attrs []inttypes.IdentityAttribute, importSpec *inttypes.SDKv2Import, client AWSClient) error { +func RegionalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, identitySpec inttypes.Identity, importSpec *inttypes.SDKv2Import, client AWSClient) error { if rd.Id() != "" { id, parts, err := importSpec.ImportID.Parse(rd.Id()) if err != nil { @@ -113,21 +117,21 @@ func RegionalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, return err } - for _, attr := range attrs { - switch attr.Name { + for _, attr := range identitySpec.Attributes { + switch attr.Name() { case names.AttrAccountID, names.AttrRegion: // Do nothing default: - valRaw, ok := identity.GetOk(attr.Name) - if attr.Required && !ok { - return fmt.Errorf("identity attribute %q is required", attr.Name) + valRaw, ok := identity.GetOk(attr.Name()) + if attr.Required() && !ok { + return fmt.Errorf("identity attribute %q is required", attr.Name()) } val, ok := valRaw.(string) if !ok { - return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name, valRaw) + return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name(), valRaw) } - setAttribute(rd, attr.Name, val) + setAttribute(rd, attr.ResourceAttributeName(), val) } } @@ -137,7 +141,7 @@ func RegionalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, return nil } -func GlobalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, attrs []inttypes.IdentityAttribute, importSpec *inttypes.SDKv2Import, client AWSClient) error { +func GlobalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, identitySpec inttypes.Identity, importSpec *inttypes.SDKv2Import, client AWSClient) error { if rd.Id() != "" { id, parts, err := importSpec.ImportID.Parse(rd.Id()) if err != nil { @@ -158,21 +162,21 @@ func GlobalMultipleParameterized(ctx context.Context, rd *schema.ResourceData, a return err } - for _, attr := range attrs { - switch attr.Name { + for _, attr := range identitySpec.Attributes { + switch attr.Name() { case names.AttrAccountID: // Do nothing default: - valRaw, ok := identity.GetOk(attr.Name) - if attr.Required && !ok { - return fmt.Errorf("identity attribute %q is required", attr.Name) + valRaw, ok := identity.GetOk(attr.Name()) + if attr.Required() && !ok { + return fmt.Errorf("identity attribute %q is required", attr.Name()) } val, ok := valRaw.(string) if !ok { - return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name, valRaw) + return fmt.Errorf("identity attribute %q: expected string, got %T", attr.Name(), valRaw) } - setAttribute(rd, attr.Name, val) + setAttribute(rd, attr.ResourceAttributeName(), val) } } diff --git a/internal/provider/sdkv2/importer/parameterized_test.go b/internal/provider/sdkv2/importer/parameterized_test.go index f9d7b4191c51..403ddad16c7b 100644 --- a/internal/provider/sdkv2/importer/parameterized_test.go +++ b/internal/provider/sdkv2/importer/parameterized_test.go @@ -25,24 +25,11 @@ var regionalSingleParameterizedSchema = map[string]*schema.Schema{ } func regionalSingleParameterizedIdentitySpec(attrName string) inttypes.Identity { - return inttypes.Identity{ - IsGlobalResource: true, - IdentityAttribute: attrName, - Attributes: []inttypes.IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, - { - Name: "region", - Required: false, - }, - { - Name: attrName, - Required: true, - }, - }, - } + return inttypes.RegionalSingleParameterIdentity(attrName) +} + +func regionalSingleParameterizedIdentitySpecNameMapped(identityAttrName, resourceAttrName string) inttypes.Identity { + return inttypes.RegionalSingleParameterIdentityWithMappedName(identityAttrName, resourceAttrName) } func TestRegionalSingleParameterized_ByImportID(t *testing.T) { @@ -101,12 +88,14 @@ func TestRegionalSingleParameterized_ByImportID(t *testing.T) { region: region, } + identitySpec := regionalSingleParameterizedIdentitySpec(tc.attrName) + d := schema.TestResourceDataRaw(t, regionalSingleParameterizedSchema, map[string]any{ "region": tc.inputRegion, }) d.SetId(tc.inputID) - err := importer.RegionalSingleParameterized(ctx, d, tc.attrName, client) + err := importer.RegionalSingleParameterized(ctx, d, identitySpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -151,14 +140,18 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { anotherRegion := "another-region-1" testCases := map[string]struct { - attrName string + identityAttrName string + resourceAttrName string + identitySpec inttypes.Identity identityAttrs map[string]string expectedRegion string expectError bool expectedErrorPrefix string }{ "Attr_Required": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), identityAttrs: map[string]string{ "name": "a_name", }, @@ -166,7 +159,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WithAccountID": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), identityAttrs: map[string]string{ "account_id": accountID, "name": "a_name", @@ -175,7 +170,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WithDefaultRegion": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), identityAttrs: map[string]string{ "region": region, "name": "a_name", @@ -184,7 +181,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WithRegionOverride": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), identityAttrs: map[string]string{ "region": anotherRegion, "name": "a_name", @@ -193,7 +192,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WrongAccountID": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpec("name"), identityAttrs: map[string]string{ "account_id": "987654321098", "name": "a_name", @@ -202,7 +203,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { }, "ID_Required": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", + identitySpec: regionalSingleParameterizedIdentitySpec("id"), identityAttrs: map[string]string{ "id": "a_name", }, @@ -210,7 +213,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithAccountID": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", + identitySpec: regionalSingleParameterizedIdentitySpec("id"), identityAttrs: map[string]string{ "account_id": accountID, "id": "a_name", @@ -219,7 +224,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithDefaultRegion": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", + identitySpec: regionalSingleParameterizedIdentitySpec("id"), identityAttrs: map[string]string{ "region": region, "id": "a_name", @@ -228,7 +235,9 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WithRegionOverride": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", + identitySpec: regionalSingleParameterizedIdentitySpec("id"), identityAttrs: map[string]string{ "region": anotherRegion, "id": "a_name", @@ -237,13 +246,26 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WrongAccountID": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", + identitySpec: regionalSingleParameterizedIdentitySpec("id"), identityAttrs: map[string]string{ "account_id": "987654321098", "id": "a_name", }, expectError: true, }, + + "name mapped": { + identityAttrName: "id_name", + resourceAttrName: "name", + identitySpec: regionalSingleParameterizedIdentitySpecNameMapped("id_name", "name"), + identityAttrs: map[string]string{ + "id_name": "a_name", + }, + expectedRegion: region, + expectError: false, + }, } for name, tc := range testCases { @@ -256,12 +278,10 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := regionalSingleParameterizedIdentitySpec(tc.attrName) - - identitySchema := identity.NewIdentitySchema(identitySpec) + identitySchema := identity.NewIdentitySchema(tc.identitySpec) d := schema.TestResourceDataWithIdentityRaw(t, regionalSingleParameterizedSchema, identitySchema, tc.identityAttrs) - err := importer.RegionalSingleParameterized(ctx, d, tc.attrName, client) + err := importer.RegionalSingleParameterized(ctx, d, tc.identitySpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -278,7 +298,7 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { // Check ID value // ID must always be set for SDKv2 resources - if e, a := tc.identityAttrs[tc.attrName], getAttributeValue(t, d, "id"); e != a { + if e, a := tc.identityAttrs[tc.identityAttrName], getAttributeValue(t, d, "id"); e != a { t.Errorf("expected `id` to be %q, got %q", e, a) } @@ -289,8 +309,8 @@ func TestRegionalSingleParameterized_ByIdentity(t *testing.T) { // Check name value var expectedNameValue string - if tc.attrName == "name" { - expectedNameValue = tc.identityAttrs["name"] + if tc.resourceAttrName == "name" { + expectedNameValue = tc.identityAttrs[tc.identityAttrName] } if e, a := expectedNameValue, getAttributeValue(t, d, "name"); e != a { t.Errorf("expected `name` to be %q, got %q", e, a) @@ -307,20 +327,11 @@ var globalSingleParameterizedSchema = map[string]*schema.Schema{ } func globalSingleParameterizedIdentitySpec(attrName string) inttypes.Identity { - return inttypes.Identity{ - IsGlobalResource: true, - IdentityAttribute: attrName, - Attributes: []inttypes.IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, - { - Name: attrName, - Required: true, - }, - }, - } + return inttypes.GlobalSingleParameterIdentity(attrName) +} + +func globalSingleParameterizedIdentitySpecWithMappedName(attrName, resourceAttrName string) inttypes.Identity { + return inttypes.GlobalSingleParameterIdentityWithMappedName(attrName, resourceAttrName) } func TestGlobalSingleParameterized_ByImportID(t *testing.T) { @@ -357,10 +368,12 @@ func TestGlobalSingleParameterized_ByImportID(t *testing.T) { region: region, } + identitySpec := globalSingleParameterizedIdentitySpec(tc.attrName) + d := schema.TestResourceDataRaw(t, globalSingleParameterizedSchema, map[string]any{}) d.SetId(tc.inputID) - err := importer.GlobalSingleParameterized(ctx, d, tc.attrName, client) + err := importer.GlobalSingleParameterized(ctx, d, identitySpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -399,20 +412,23 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { region := "a-region-1" testCases := map[string]struct { - attrName string + identityAttrName string identityAttrs map[string]string + resourceAttrName string expectError bool expectedErrorPrefix string }{ "Attr_Required": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", identityAttrs: map[string]string{ "name": "a_name", }, expectError: false, }, "Attr_WithAccountID": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", identityAttrs: map[string]string{ "account_id": accountID, "name": "a_name", @@ -420,7 +436,8 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "Attr_WrongAccountID": { - attrName: "name", + identityAttrName: "name", + resourceAttrName: "name", identityAttrs: map[string]string{ "account_id": "987654321098", "name": "a_name", @@ -429,14 +446,16 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { }, "ID_Required": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", identityAttrs: map[string]string{ "id": "a_name", }, expectError: false, }, "ID_WithAccountID": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", identityAttrs: map[string]string{ "account_id": accountID, "id": "a_name", @@ -444,13 +463,23 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { expectError: false, }, "ID_WrongAccountID": { - attrName: "id", + identityAttrName: "id", + resourceAttrName: "id", identityAttrs: map[string]string{ "account_id": "987654321098", "id": "a_name", }, expectError: true, }, + + "name mapped": { + identityAttrName: "id_name", + resourceAttrName: "name", + identityAttrs: map[string]string{ + "id_name": "a_name", + }, + expectError: false, + }, } for name, tc := range testCases { @@ -463,12 +492,17 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := globalSingleParameterizedIdentitySpec(tc.attrName) + var identitySpec inttypes.Identity + if tc.resourceAttrName == tc.identityAttrName { + identitySpec = globalSingleParameterizedIdentitySpec(tc.identityAttrName) + } else { + identitySpec = globalSingleParameterizedIdentitySpecWithMappedName(tc.identityAttrName, tc.resourceAttrName) + } identitySchema := identity.NewIdentitySchema(identitySpec) d := schema.TestResourceDataWithIdentityRaw(t, globalSingleParameterizedSchema, identitySchema, tc.identityAttrs) - err := importer.GlobalSingleParameterized(ctx, d, tc.attrName, client) + err := importer.GlobalSingleParameterized(ctx, d, identitySpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -485,14 +519,14 @@ func TestGlobalSingleParameterized_ByIdentity(t *testing.T) { // Check ID value // ID must always be set for SDKv2 resources - if e, a := tc.identityAttrs[tc.attrName], getAttributeValue(t, d, "id"); e != a { + if e, a := tc.identityAttrs[tc.identityAttrName], getAttributeValue(t, d, "id"); e != a { t.Errorf("expected `id` to be %q, got %q", e, a) } // Check name value var expectedNameValue string - if tc.attrName == "name" { - expectedNameValue = tc.identityAttrs["name"] + if tc.resourceAttrName == "name" { + expectedNameValue = tc.identityAttrs[tc.identityAttrName] } if e, a := expectedNameValue, getAttributeValue(t, d, "name"); e != a { t.Errorf("expected `name` to be %q, got %q", e, a) @@ -521,6 +555,18 @@ func regionalMultipleParameterizedIdentitySpec(attrNames []string) inttypes.Iden return inttypes.RegionalParameterizedIdentity(attrs) } +func regionalMultipleParameterizedIdentitySpecWithMappedName(attrNames map[string]string) inttypes.Identity { + var attrs []inttypes.IdentityAttribute + for identityAttrName, resourceAttrName := range attrNames { + if identityAttrName == resourceAttrName { + attrs = append(attrs, inttypes.StringIdentityAttribute(identityAttrName, true)) + } else { + attrs = append(attrs, inttypes.StringIdentityAttributeWithMappedName(identityAttrName, true, resourceAttrName)) + } + } + return inttypes.RegionalParameterizedIdentity(attrs) +} + func TestRegionalMutipleParameterized_ByImportID(t *testing.T) { t.Parallel() @@ -580,7 +626,7 @@ func TestRegionalMutipleParameterized_ByImportID(t *testing.T) { }) d.SetId(tc.inputID) - err := importer.RegionalMultipleParameterized(ctx, d, identitySpec.Attributes, &importSpec, client) + err := importer.RegionalMultipleParameterized(ctx, d, identitySpec, &importSpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -624,6 +670,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { testCases := map[string]struct { identityAttrs map[string]string + identitySpec inttypes.Identity expectedAttrs map[string]string expectedID string expectedRegion string @@ -635,6 +682,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), expectedAttrs: map[string]string{ "name": "a_name", "type": "a_type", @@ -649,6 +697,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), expectedAttrs: map[string]string{ "name": "a_name", "type": "a_type", @@ -663,6 +712,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), expectedAttrs: map[string]string{ "name": "a_name", "type": "a_type", @@ -677,6 +727,7 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), expectedAttrs: map[string]string{ "name": "a_name", "type": "a_type", @@ -691,7 +742,26 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { "name": "a_name", "type": "a_type", }, - expectError: true, + identitySpec: regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectError: true, + }, + + "name mapped": { + identityAttrs: map[string]string{ + "id_name": "a_name", + "type": "a_type", + }, + identitySpec: regionalMultipleParameterizedIdentitySpecWithMappedName(map[string]string{ + "id_name": "name", + "type": "type", + }), + expectedAttrs: map[string]string{ + "name": "a_name", + "type": "a_type", + }, + expectedID: "a_name,a_type", + expectedRegion: region, + expectError: false, }, } @@ -705,17 +775,15 @@ func TestRegionalMutipleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := regionalMultipleParameterizedIdentitySpec([]string{"name", "type"}) - importSpec := inttypes.SDKv2Import{ WrappedImport: true, ImportID: testImportID{t: t}, } - identitySchema := identity.NewIdentitySchema(identitySpec) + identitySchema := identity.NewIdentitySchema(tc.identitySpec) d := schema.TestResourceDataWithIdentityRaw(t, regionalMultipleParameterizedSchema, identitySchema, tc.identityAttrs) - err := importer.RegionalMultipleParameterized(ctx, d, identitySpec.Attributes, &importSpec, client) + err := importer.RegionalMultipleParameterized(ctx, d, tc.identitySpec, &importSpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -770,6 +838,18 @@ func globalMultipleParameterizedIdentitySpec(attrNames []string) inttypes.Identi return inttypes.GlobalParameterizedIdentity(attrs) } +func globalMultipleParameterizedIdentitySpecWithMappedName(attrNames map[string]string) inttypes.Identity { + var attrs []inttypes.IdentityAttribute + for identityAttrName, resourceAttrName := range attrNames { + if identityAttrName == resourceAttrName { + attrs = append(attrs, inttypes.StringIdentityAttribute(identityAttrName, true)) + } else { + attrs = append(attrs, inttypes.StringIdentityAttributeWithMappedName(identityAttrName, true, resourceAttrName)) + } + } + return inttypes.GlobalParameterizedIdentity(attrs) +} + func TestGlobalMutipleParameterized_ByImportID(t *testing.T) { t.Parallel() @@ -812,7 +892,7 @@ func TestGlobalMutipleParameterized_ByImportID(t *testing.T) { d := schema.TestResourceDataRaw(t, globalMultipleParameterizedSchema, map[string]any{}) d.SetId(tc.inputID) - err := importer.GlobalMultipleParameterized(ctx, d, identitySpec.Attributes, &importSpec, client) + err := importer.GlobalMultipleParameterized(ctx, d, identitySpec, &importSpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") @@ -850,16 +930,34 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { testCases := map[string]struct { identityAttrs map[string]string + identitySpec inttypes.Identity expectedID string expectedAttrs map[string]string expectError bool expectedErrorPrefix string }{ - "Identity": { + "same names": { identityAttrs: map[string]string{ "name": "a_name", "type": "a_type", }, + identitySpec: globalMultipleParameterizedIdentitySpec([]string{"name", "type"}), + expectedID: "a_name,a_type", + expectedAttrs: map[string]string{ + "name": "a_name", + "type": "a_type", + }, + expectError: false, + }, + "name mapped": { + identityAttrs: map[string]string{ + "id_name": "a_name", + "type": "a_type", + }, + identitySpec: globalMultipleParameterizedIdentitySpecWithMappedName(map[string]string{ + "id_name": "name", + "type": "type", + }), expectedID: "a_name,a_type", expectedAttrs: map[string]string{ "name": "a_name", @@ -879,17 +977,15 @@ func TestGlobalMutipleParameterized_ByIdentity(t *testing.T) { region: region, } - identitySpec := globalMultipleParameterizedIdentitySpec([]string{"name", "type"}) - importSpec := inttypes.SDKv2Import{ WrappedImport: true, ImportID: testImportID{t: t}, } - identitySchema := identity.NewIdentitySchema(identitySpec) + identitySchema := identity.NewIdentitySchema(tc.identitySpec) d := schema.TestResourceDataWithIdentityRaw(t, globalMultipleParameterizedSchema, identitySchema, tc.identityAttrs) - err := importer.GlobalMultipleParameterized(ctx, d, identitySpec.Attributes, &importSpec, client) + err := importer.GlobalMultipleParameterized(ctx, d, tc.identitySpec, &importSpec, client) if tc.expectError { if err == nil { t.Fatal("Expected error, got none") diff --git a/internal/provider/sdkv2/intercept.go b/internal/provider/sdkv2/intercept.go index 7595d96e80af..0dc5f957781d 100644 --- a/internal/provider/sdkv2/intercept.go +++ b/internal/provider/sdkv2/intercept.go @@ -6,15 +6,29 @@ package sdkv2 import ( "context" "errors" + "slices" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) +type awsClient interface { + AccountID(ctx context.Context) string + Region(ctx context.Context) string + DefaultTagsConfig(ctx context.Context) *tftags.DefaultConfig + IgnoreTagsConfig(ctx context.Context) *tftags.IgnoreConfig + Partition(context.Context) string + ServicePackage(_ context.Context, name string) conns.ServicePackage + ValidateInContextRegionInPartition(ctx context.Context) error + AwsConfig(context.Context) aws.Config +} + // schemaResourceData is an interface that implements a subset of schema.ResourceData's public methods. type schemaResourceData interface { sdkv2.ResourceDiffer @@ -23,7 +37,7 @@ type schemaResourceData interface { } type interceptorOptions[D any] struct { - c *conns.AWSClient + c awsClient d D when when why why @@ -44,28 +58,18 @@ type interceptor1[D, E any] interface { run(context.Context, interceptorOptions[D]) E } -type interceptor2[D, R, E any] interface { - run(context.Context, interceptorOptions[D]) (R, E) -} - type ( // crudInterceptor is functionality invoked during a CRUD request lifecycle. crudInterceptor = interceptor1[schemaResourceData, diag.Diagnostics] // customizeDiffInterceptor is functionality invoked during a CustomizeDiff request lifecycle. customizeDiffInterceptor = interceptor1[*schema.ResourceDiff, error] // importInterceptor is functionality invoked during an Import request lifecycle. - importInterceptor = interceptor2[*schema.ResourceData, []*schema.ResourceData, error] + importInterceptor = interceptor1[*schema.ResourceData, error] ) type interceptorFunc1[D, E any] func(context.Context, interceptorOptions[D]) E -func (f interceptorFunc1[D, E]) run(ctx context.Context, opts interceptorOptions[D]) E { //nolint:unused // used via crudInterceptor/customizeDiffInterceptor - return f(ctx, opts) -} - -type interceptorFunc2[D, R, E any] func(context.Context, interceptorOptions[D]) (R, E) - -func (f interceptorFunc2[D, R, E]) run(ctx context.Context, opts interceptorOptions[D]) (R, E) { //nolint:unused // used via importInterceptor +func (f interceptorFunc1[D, E]) run(ctx context.Context, opts interceptorOptions[D]) E { //nolint:unused // used via crudInterceptor/customizeDiffInterceptor/importInterceptor return f(ctx, opts) } @@ -82,18 +86,15 @@ type typedInterceptorInvocation[D, E any] struct { interceptor interceptor1[D, E] } -type typedInterceptor2Invocation[D, R, E any] struct { - when when - why why - interceptor interceptor2[D, R, E] -} - type ( crudInterceptorInvocation = typedInterceptorInvocation[schemaResourceData, diag.Diagnostics] customizeDiffInterceptorInvocation = typedInterceptorInvocation[*schema.ResourceDiff, error] - importInterceptorInvocation = typedInterceptor2Invocation[*schema.ResourceData, []*schema.ResourceData, error] + importInterceptorInvocation = typedInterceptorInvocation[*schema.ResourceData, error] ) +// Only generate strings for use in tests +//go:generate stringer -type=when -output=when_string_test.go + // when represents the point in the request lifecycle that an interceptor is run. // Multiple values can be ORed together. type when uint16 @@ -135,19 +136,18 @@ func interceptedCRUDHandler[F ~func(context.Context, *schema.ResourceData, any) return nil } - return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + return func(ctx context.Context, rd *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - ctx, err := bootstrapContext(ctx, d.GetOk, meta) + ctx, err := bootstrapContext(ctx, rd.GetOk, meta) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - // Before interceptors are run first to last. - forward := make([]crudInterceptorInvocation, 0) + var interceptors []crudInterceptorInvocation for _, v := range interceptorInvocations.why(why) { if interceptor, ok := v.interceptor.(crudInterceptor); ok { - forward = append(forward, crudInterceptorInvocation{ + interceptors = append(interceptors, crudInterceptorInvocation{ when: v.when, why: v.why, interceptor: interceptor, @@ -155,15 +155,16 @@ func interceptedCRUDHandler[F ~func(context.Context, *schema.ResourceData, any) } } - when := Before - for _, v := range forward { - if v.when&when != 0 { - opts := crudInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + opts := crudInterceptorOptions{ + c: meta.(awsClient), + d: rd, + why: why, + } + + // Before interceptors are run first to last. + opts.when = Before + for v := range slices.Values(interceptors) { + if v.when&opts.when != 0 { diags = append(diags, v.interceptor.run(ctx, opts)...) // Short circuit if any Before interceptor errors. @@ -173,36 +174,24 @@ func interceptedCRUDHandler[F ~func(context.Context, *schema.ResourceData, any) } } - // All other interceptors are run last to first. - reverse := tfslices.Reverse(forward) - diags = f(ctx, d, meta) + d := f(ctx, rd, meta) + diags = append(diags, d...) - if diags.HasError() { - when = OnError + // All other interceptors are run last to first. + if d.HasError() { + opts.when = OnError } else { - when = After + opts.when = After } - for _, v := range reverse { - if v.when&when != 0 { - opts := crudInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + for v := range tfslices.BackwardValues(interceptors) { + if v.when&opts.when != 0 { diags = append(diags, v.interceptor.run(ctx, opts)...) } } - when = Finally - for _, v := range reverse { - if v.when&when != 0 { - opts := crudInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + opts.when = Finally + for v := range tfslices.BackwardValues(interceptors) { + if v.when&opts.when != 0 { diags = append(diags, v.interceptor.run(ctx, opts)...) } } @@ -222,11 +211,10 @@ func interceptedCustomizeDiffHandler(bootstrapContext contextFunc, interceptorIn why := CustomizeDiff - // Before interceptors are run first to last. - forward := make([]customizeDiffInterceptorInvocation, 0) + var interceptors []customizeDiffInterceptorInvocation for _, v := range interceptorInvocations.why(why) { if interceptor, ok := v.interceptor.(customizeDiffInterceptor); ok { - forward = append(forward, customizeDiffInterceptorInvocation{ + interceptors = append(interceptors, customizeDiffInterceptorInvocation{ when: v.when, why: v.why, interceptor: interceptor, @@ -234,15 +222,16 @@ func interceptedCustomizeDiffHandler(bootstrapContext contextFunc, interceptorIn } } - when := Before - for _, v := range forward { - if v.when&when != 0 { - opts := customizeDiffInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + opts := customizeDiffInterceptorOptions{ + c: meta.(awsClient), + d: d, + why: why, + } + + // Before interceptors are run first to last. + opts.when = Before + for v := range slices.Values(interceptors) { + if v.when&opts.when != 0 { // Short circuit if any Before interceptor errors. if err := v.interceptor.run(ctx, opts); err != nil { return err @@ -250,41 +239,28 @@ func interceptedCustomizeDiffHandler(bootstrapContext contextFunc, interceptorIn } } - // All other interceptors are run last to first. - reverse := tfslices.Reverse(forward) var errs []error - when = After + opts.when = After if f != nil { if err := f(ctx, d, meta); err != nil { - when = OnError + opts.when = OnError errs = append(errs, err) } } - for _, v := range reverse { - if v.when&when != 0 { - opts := customizeDiffInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + // All other interceptors are run last to first. + for v := range tfslices.BackwardValues(interceptors) { + if v.when&opts.when != 0 { if err := v.interceptor.run(ctx, opts); err != nil { errs = append(errs, err) } } } - when = Finally - for _, v := range reverse { - if v.when&when != 0 { - opts := customizeDiffInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + opts.when = Finally + for v := range tfslices.BackwardValues(interceptors) { + if v.when&opts.when != 0 { if err := v.interceptor.run(ctx, opts); err != nil { errs = append(errs, err) } @@ -310,11 +286,10 @@ func interceptedImportHandler(bootstrapContext contextFunc, interceptorInvocatio why := Import - // Before interceptors are run first to last. - forward := make([]importInterceptorInvocation, 0) + var interceptors []importInterceptorInvocation for _, v := range interceptorInvocations.why(why) { if interceptor, ok := v.interceptor.(importInterceptor); ok { - forward = append(forward, importInterceptorInvocation{ + interceptors = append(interceptors, importInterceptorInvocation{ when: v.when, why: v.why, interceptor: interceptor, @@ -322,58 +297,46 @@ func interceptedImportHandler(bootstrapContext contextFunc, interceptorInvocatio } } - when := Before - for _, v := range forward { - if v.when&when != 0 { - opts := importInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } + opts := importInterceptorOptions{ + c: meta.(awsClient), + d: d, + why: why, + } + + // Before interceptors are run first to last. + opts.when = Before + for v := range slices.Values(interceptors) { + if v.when&opts.when != 0 { // Short circuit if any Before interceptor errors. - if _, err := v.interceptor.run(ctx, opts); err != nil { + if err := v.interceptor.run(ctx, opts); err != nil { return nil, err } } } - // All other interceptors are run last to first. - reverse := tfslices.Reverse(forward) var errs []error r, err := f(ctx, d, meta) if err != nil { - when = OnError + opts.when = OnError errs = append(errs, err) } else { - when = After + opts.when = After } - for _, v := range reverse { - if v.when&when != 0 { - opts := importInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } - if _, err := v.interceptor.run(ctx, opts); err != nil { + // All other interceptors are run last to first. + for v := range tfslices.BackwardValues(interceptors) { + if v.when&opts.when != 0 { + if err := v.interceptor.run(ctx, opts); err != nil { errs = append(errs, err) } } } - when = Finally - for _, v := range reverse { - if v.when&when != 0 { - opts := importInterceptorOptions{ - c: meta.(*conns.AWSClient), - d: d, - when: when, - why: why, - } - if _, err := v.interceptor.run(ctx, opts); err != nil { + opts.when = Finally + for v := range tfslices.BackwardValues(interceptors) { + if v.when&opts.when != 0 { + if err := v.interceptor.run(ctx, opts); err != nil { errs = append(errs, err) } } diff --git a/internal/provider/sdkv2/intercept_test.go b/internal/provider/sdkv2/intercept_test.go index e28139757e42..25f80fcee76c 100644 --- a/internal/provider/sdkv2/intercept_test.go +++ b/internal/provider/sdkv2/intercept_test.go @@ -5,11 +5,13 @@ package sdkv2 import ( "context" + "errors" "testing" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/errs" ) type ( @@ -59,46 +61,669 @@ func TestInterceptorsWhy(t *testing.T) { } } -func TestInterceptedHandler(t *testing.T) { +func TestInterceptedCRUDHandler(t *testing.T) { t.Parallel() - var interceptors interceptorInvocations + client := mockClient{ + accountID: "123456789012", + region: "us-west-2", //lintignore:AWSAT003 + } - interceptors = append(interceptors, interceptorInvocation{ - when: Before, - why: Create, - interceptor: crudInterceptorFunc(func(ctx context.Context, opts crudInterceptorOptions) diag.Diagnostics { - var diags diag.Diagnostics - return diags - }), - }) - interceptors = append(interceptors, interceptorInvocation{ - when: After, - why: Delete, - interceptor: crudInterceptorFunc(func(ctx context.Context, opts crudInterceptorOptions) diag.Diagnostics { - var diags diag.Diagnostics - return diags - }), - }) - interceptors = append(interceptors, interceptorInvocation{ - when: Before, - why: Create, - interceptor: crudInterceptorFunc(func(ctx context.Context, opts crudInterceptorOptions) diag.Diagnostics { - var diags diag.Diagnostics - return diags - }), - }) + contextFunc := func(ctx context.Context, _ getAttributeFunc, meta any) (context.Context, error) { + return ctx, nil + } + + testcases := map[string]struct { + firstInterceptorDiags map[when]diag.Diagnostics + secondInterceptorDiags map[when]diag.Diagnostics + innerFuncDiags diag.Diagnostics + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedDiags diag.Diagnostics + }{ + "First has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + errs.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + + "Second has Before error": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + errs.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + + "First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + + "Second has Before warning": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + + "First has Before warning Second has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + errs.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + + "Inner has error": { + innerFuncDiags: diag.Diagnostics{ + errs.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + }, + + "Inner has warning": { + innerFuncDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + }, - var read schema.ReadContextFunc = func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - var diags diag.Diagnostics - return sdkdiag.AppendErrorf(diags, "read error") + "Inner has error First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + errs.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + errs.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + }, + + "All have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + After: { + errs.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + }, + Finally: { + errs.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + After: { + errs.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + }, + Finally: { + errs.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + errs.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + errs.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + errs.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + errs.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + errs.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + errs.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + + "Inner has error Handlers have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + OnError: { + errs.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + }, + Finally: { + errs.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + errs.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + OnError: { + errs.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + }, + Finally: { + errs.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + errs.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + errs.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + errs.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + errs.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + errs.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + errs.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + errs.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + errs.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockCRUDInterceptor(tc.firstInterceptorDiags) + second := newMockCRUDInterceptor(tc.secondInterceptorDiags) + interceptors := append( + first.Invocations(), + second.Invocations()..., + ) + + f := newMockInnerCRUDFunc(tc.innerFuncDiags) + + handler := interceptedCRUDHandler(contextFunc, interceptors, f.Call, Create) + + ctx := t.Context() + diags := handler(ctx, nil, client) + + if diff := cmp.Diff(diags, tc.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) } - bootstrapContext := func(ctx context.Context, _ getAttributeFunc, meta any) (context.Context, error) { +} + +type mockCRUDInterceptor struct { + diags map[when]diag.Diagnostics + called []when +} + +func newMockCRUDInterceptor(diags map[when]diag.Diagnostics) *mockCRUDInterceptor { + if diags == nil { + diags = make(map[when]diag.Diagnostics) + } + return &mockCRUDInterceptor{ + diags: diags, + } +} + +func (m *mockCRUDInterceptor) Invocations() interceptorInvocations { + return interceptorInvocations{ + { + why: AllCRUDOps, + when: Before | After | OnError | Finally, + interceptor: m, + }, + } +} + +func (m *mockCRUDInterceptor) run(_ context.Context, opts crudInterceptorOptions) diag.Diagnostics { + m.called = append(m.called, opts.when) + return m.diags[opts.when] +} + +type mockInnerCRUDFunc struct { + diags diag.Diagnostics + count int +} + +func newMockInnerCRUDFunc(diags diag.Diagnostics) mockInnerCRUDFunc { + return mockInnerCRUDFunc{ + diags: diags, + } +} + +func (m *mockInnerCRUDFunc) Call(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + m.count++ + return m.diags +} + +func TestInterceptedCustomizeDiffHandler(t *testing.T) { + t.Parallel() + + client := mockClient{ + accountID: "123456789012", + region: "us-west-2", //lintignore:AWSAT003 + } + + contextFunc := func(ctx context.Context, _ getAttributeFunc, meta any) (context.Context, error) { return ctx, nil } - diags := interceptedCRUDHandler(bootstrapContext, interceptors, read, Read)(context.Background(), nil, 42) - if got, want := len(diags), 1; got != want { - t.Errorf("length of diags = %v, want %v", got, want) + testcases := map[string]struct { + firstInterceptorErrors map[when]error + secondInterceptorErrors map[when]error + innerFuncError error + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedError error + }{ + "First has Before error": { + firstInterceptorErrors: map[when]error{ + Before: errors.New("First interceptor Before error"), + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedError: errors.New("First interceptor Before error"), + }, + + "Second has Before error": { + secondInterceptorErrors: map[when]error{ + Before: errors.New("Second interceptor Before error"), + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedError: errors.New("Second interceptor Before error"), + }, + + "Inner has error": { + innerFuncError: errors.New("Inner function error"), + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedError: errors.Join( + errors.New("Inner function error"), + ), + }, + + "All have errors": { + firstInterceptorErrors: map[when]error{ + OnError: errors.New("First interceptor OnError error"), + Finally: errors.New("First interceptor Finally error"), + }, + secondInterceptorErrors: map[when]error{ + OnError: errors.New("Second interceptor OnError error"), + Finally: errors.New("Second interceptor Finally error"), + }, + innerFuncError: errors.New("Inner function error"), + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedError: errors.Join( + errors.New("Inner function error"), + errors.New("Second interceptor OnError error"), + errors.New("First interceptor OnError error"), + errors.New("Second interceptor Finally error"), + errors.New("First interceptor Finally error"), + ), + }, + + "Handlers have errors": { + firstInterceptorErrors: map[when]error{ + After: errors.New("First interceptor After error"), + Finally: errors.New("First interceptor Finally error"), + }, + secondInterceptorErrors: map[when]error{ + After: errors.New("Second interceptor After error"), + Finally: errors.New("Second interceptor Finally error"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedError: errors.Join( + errors.New("Second interceptor After error"), + errors.New("First interceptor After error"), + errors.New("Second interceptor Finally error"), + errors.New("First interceptor Finally error"), + ), + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockCustomizeDiffInterceptor(tc.firstInterceptorErrors) + second := newMockCustomizeDiffInterceptor(tc.secondInterceptorErrors) + interceptors := append( + first.Invocations(), + second.Invocations()..., + ) + + f := newMockInnerCustomizeDiffFunc(tc.innerFuncError) + + handler := interceptedCustomizeDiffHandler(contextFunc, interceptors, f.Call) + + ctx := t.Context() + err := handler(ctx, nil, client) + + if diff := cmp.Diff(err, tc.expectedError, cmp.Comparer(func(x, y error) bool { + return x.Error() == y.Error() + })); diff != "" { + t.Errorf("unexpected error difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) } } + +type mockCustomizeDiffInterceptor struct { + errors map[when]error + called []when +} + +func newMockCustomizeDiffInterceptor(errors map[when]error) *mockCustomizeDiffInterceptor { + if errors == nil { + errors = make(map[when]error) + } + return &mockCustomizeDiffInterceptor{ + errors: errors, + } +} + +func (m *mockCustomizeDiffInterceptor) Invocations() interceptorInvocations { + return interceptorInvocations{ + { + why: CustomizeDiff, + when: Before | After | OnError | Finally, + interceptor: m, + }, + } +} + +func (m *mockCustomizeDiffInterceptor) run(_ context.Context, opts customizeDiffInterceptorOptions) error { + m.called = append(m.called, opts.when) + return m.errors[opts.when] +} + +type mockInnerCustomizeDiffFunc struct { + err error + count int +} + +func newMockInnerCustomizeDiffFunc(err error) mockInnerCustomizeDiffFunc { + return mockInnerCustomizeDiffFunc{ + err: err, + } +} + +func (m *mockInnerCustomizeDiffFunc) Call(ctx context.Context, d *schema.ResourceDiff, meta any) error { + m.count++ + return m.err +} + +func TestInterceptedImportHandler(t *testing.T) { + t.Parallel() + + client := mockClient{ + accountID: "123456789012", + region: "us-west-2", //lintignore:AWSAT003 + } + + contextFunc := func(ctx context.Context, _ getAttributeFunc, meta any) (context.Context, error) { + return ctx, nil + } + + testcases := map[string]struct { + firstInterceptorErrors map[when]error + secondInterceptorErrors map[when]error + innerFuncError error + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedError error + }{ + "First has Before error": { + firstInterceptorErrors: map[when]error{ + Before: errors.New("First interceptor Before error"), + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedError: errors.New("First interceptor Before error"), + }, + + "Second has Before error": { + secondInterceptorErrors: map[when]error{ + Before: errors.New("Second interceptor Before error"), + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedError: errors.New("Second interceptor Before error"), + }, + + "Inner has error": { + innerFuncError: errors.New("Inner function error"), + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedError: errors.Join( + errors.New("Inner function error"), + ), + }, + + "All have errors": { + firstInterceptorErrors: map[when]error{ + OnError: errors.New("First interceptor OnError error"), + Finally: errors.New("First interceptor Finally error"), + }, + secondInterceptorErrors: map[when]error{ + OnError: errors.New("Second interceptor OnError error"), + Finally: errors.New("Second interceptor Finally error"), + }, + innerFuncError: errors.New("Inner function error"), + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedError: errors.Join( + errors.New("Inner function error"), + errors.New("Second interceptor OnError error"), + errors.New("First interceptor OnError error"), + errors.New("Second interceptor Finally error"), + errors.New("First interceptor Finally error"), + ), + }, + + "Handlers have errors": { + firstInterceptorErrors: map[when]error{ + After: errors.New("First interceptor After error"), + Finally: errors.New("First interceptor Finally error"), + }, + secondInterceptorErrors: map[when]error{ + After: errors.New("Second interceptor After error"), + Finally: errors.New("Second interceptor Finally error"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedError: errors.Join( + errors.New("Second interceptor After error"), + errors.New("First interceptor After error"), + errors.New("Second interceptor Finally error"), + errors.New("First interceptor Finally error"), + ), + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockImportInterceptor(tc.firstInterceptorErrors) + second := newMockImportInterceptor(tc.secondInterceptorErrors) + interceptors := append( + first.Invocations(), + second.Invocations()..., + ) + + f := newMockInnerImportFunc(tc.innerFuncError) + + handler := interceptedImportHandler(contextFunc, interceptors, f.Call) + + ctx := t.Context() + _, err := handler(ctx, nil, client) + + if diff := cmp.Diff(err, tc.expectedError, cmp.Comparer(func(x, y error) bool { + return x.Error() == y.Error() + })); diff != "" { + t.Errorf("unexpected error difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) + } +} + +type mockImportInterceptor struct { + errors map[when]error + called []when +} + +func newMockImportInterceptor(errors map[when]error) *mockImportInterceptor { + if errors == nil { + errors = make(map[when]error) + } + return &mockImportInterceptor{ + errors: errors, + } +} + +func (m *mockImportInterceptor) Invocations() interceptorInvocations { + return interceptorInvocations{ + { + why: Import, + when: Before | After | OnError | Finally, + interceptor: m, + }, + } +} + +func (m *mockImportInterceptor) run(_ context.Context, opts importInterceptorOptions) error { + m.called = append(m.called, opts.when) + return m.errors[opts.when] +} + +type mockInnerImportFunc struct { + err error + count int +} + +func newMockInnerImportFunc(err error) mockInnerImportFunc { + return mockInnerImportFunc{ + err: err, + } +} + +func (m *mockInnerImportFunc) Call(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + m.count++ + return nil, m.err +} diff --git a/internal/provider/sdkv2/internal/attribute/attributes.go b/internal/provider/sdkv2/internal/attribute/attributes.go index 8dbe4a46850c..b9e9af2dc5a5 100644 --- a/internal/provider/sdkv2/internal/attribute/attributes.go +++ b/internal/provider/sdkv2/internal/attribute/attributes.go @@ -15,6 +15,6 @@ var Region = sync.OnceValue(func() *schema.Schema { Type: schema.TypeString, Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } }) diff --git a/internal/provider/sdkv2/provider.go b/internal/provider/sdkv2/provider.go index e98d8fe5f937..f6b44b7c2bc1 100644 --- a/internal/provider/sdkv2/provider.go +++ b/internal/provider/sdkv2/provider.go @@ -712,9 +712,17 @@ func (p *sdkProvider) initialize(ctx context.Context) (map[string]conns.ServiceP r.ResourceBehavior.MutableIdentity = true } - interceptors = append(interceptors, newIdentityInterceptor(resource.Identity.Attributes)) + interceptors = append(interceptors, newIdentityInterceptor(&resource.Identity)) } + if resource.Import.CustomImport { + if r.Importer == nil || r.Importer.StateContext == nil { + errs = append(errs, fmt.Errorf("resource type %s: uses CustomImport but does not define an import function", typeName)) + continue + } + + customResourceImporter(r, &resource.Identity, &resource.Import) + } if resource.Import.WrappedImport { if r.Importer != nil && r.Importer.StateContext != nil { errs = append(errs, fmt.Errorf("resource type %s: uses WrappedImport but defines an import function", typeName)) diff --git a/internal/provider/sdkv2/provider_gen.go b/internal/provider/sdkv2/provider_gen.go index b4e5810e444c..3407e1fc6cb1 100644 --- a/internal/provider/sdkv2/provider_gen.go +++ b/internal/provider/sdkv2/provider_gen.go @@ -195,6 +195,14 @@ func endpointsSchema() *schema.Schema { Description: "Use this to override the default service endpoint URL", }, + // arcregionswitch + + "arcregionswitch": { + Type: schema.TypeString, + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // athena "athena": { @@ -267,6 +275,14 @@ func endpointsSchema() *schema.Schema { Description: "Use this to override the default service endpoint URL", }, + // bedrockagentcore + + "bedrockagentcore": { + Type: schema.TypeString, + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // billing "billing": { @@ -1565,6 +1581,14 @@ func endpointsSchema() *schema.Schema { Description: "Use this to override the default service endpoint URL", }, + // odb + + "odb": { + Type: schema.TypeString, + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // opensearch "opensearch": { @@ -1919,6 +1943,14 @@ func endpointsSchema() *schema.Schema { Description: "Use this to override the default service endpoint URL", }, + // s3vectors + + "s3vectors": { + Type: schema.TypeString, + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // sagemaker "sagemaker": { @@ -2277,6 +2309,14 @@ func endpointsSchema() *schema.Schema { Description: "Use this to override the default service endpoint URL", }, + // workmail + + "workmail": { + Type: schema.TypeString, + Optional: true, + Description: "Use this to override the default service endpoint URL", + }, + // workspaces "workspaces": { diff --git a/internal/provider/sdkv2/region.go b/internal/provider/sdkv2/region.go index ab61f7d105a8..9c92055ebaff 100644 --- a/internal/provider/sdkv2/region.go +++ b/internal/provider/sdkv2/region.go @@ -78,6 +78,11 @@ func setRegionInState() crudInterceptor { // Set region in state after R. switch why { case Read: + // Will occur on a refresh when the resource does not exist in AWS and needs to be recreated, e.g. "_disappears" tests. + if d.Id() == "" { + return diags + } + if err := d.Set(names.AttrRegion, c.Region(ctx)); err != nil { return sdkdiag.AppendErrorf(diags, "setting %s: %s", names.AttrRegion, err) } @@ -113,7 +118,7 @@ func forceNewIfRegionChanges() customizeDiffInterceptor { } func importRegion() importInterceptor { - return interceptorFunc2[*schema.ResourceData, []*schema.ResourceData, error](func(ctx context.Context, opts importInterceptorOptions) ([]*schema.ResourceData, error) { + return interceptorFunc1[*schema.ResourceData, error](func(ctx context.Context, opts importInterceptorOptions) error { c, d := opts.c, opts.d switch when, why := opts.when, opts.why; when { @@ -130,7 +135,7 @@ func importRegion() importInterceptor { } } - return []*schema.ResourceData{d}, nil + return nil }) } @@ -144,7 +149,7 @@ func resourceImportRegion() interceptorInvocation { // importRegionNoDefault does not provide a default value for `region`. This should be used when the import ID is or contains a region. func importRegionNoDefault() importInterceptor { - return interceptorFunc2[*schema.ResourceData, []*schema.ResourceData, error](func(ctx context.Context, opts importInterceptorOptions) ([]*schema.ResourceData, error) { + return interceptorFunc1[*schema.ResourceData, error](func(ctx context.Context, opts importInterceptorOptions) error { d := opts.d switch when, why := opts.when, opts.why; when { @@ -159,7 +164,7 @@ func importRegionNoDefault() importInterceptor { } } - return []*schema.ResourceData{d}, nil + return nil }) } diff --git a/internal/provider/sdkv2/service_packages_gen.go b/internal/provider/sdkv2/service_packages_gen.go index fa6119d49328..07a6d528f69b 100644 --- a/internal/provider/sdkv2/service_packages_gen.go +++ b/internal/provider/sdkv2/service_packages_gen.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/apprunner" "github.com/hashicorp/terraform-provider-aws/internal/service/appstream" "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/service/arcregionswitch" "github.com/hashicorp/terraform-provider-aws/internal/service/athena" "github.com/hashicorp/terraform-provider-aws/internal/service/auditmanager" "github.com/hashicorp/terraform-provider-aws/internal/service/autoscaling" @@ -35,6 +36,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/bcmdataexports" "github.com/hashicorp/terraform-provider-aws/internal/service/bedrock" "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagent" + "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagentcore" "github.com/hashicorp/terraform-provider-aws/internal/service/billing" "github.com/hashicorp/terraform-provider-aws/internal/service/budgets" "github.com/hashicorp/terraform-provider-aws/internal/service/ce" @@ -175,6 +177,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/notifications" "github.com/hashicorp/terraform-provider-aws/internal/service/notificationscontacts" "github.com/hashicorp/terraform-provider-aws/internal/service/oam" + "github.com/hashicorp/terraform-provider-aws/internal/service/odb" "github.com/hashicorp/terraform-provider-aws/internal/service/opensearch" "github.com/hashicorp/terraform-provider-aws/internal/service/opensearchserverless" "github.com/hashicorp/terraform-provider-aws/internal/service/organizations" @@ -214,6 +217,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" "github.com/hashicorp/terraform-provider-aws/internal/service/s3outposts" "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/service/s3vectors" "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/internal/service/scheduler" "github.com/hashicorp/terraform-provider-aws/internal/service/schemas" @@ -255,6 +259,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/wafregional" "github.com/hashicorp/terraform-provider-aws/internal/service/wafv2" "github.com/hashicorp/terraform-provider-aws/internal/service/wellarchitected" + "github.com/hashicorp/terraform-provider-aws/internal/service/workmail" "github.com/hashicorp/terraform-provider-aws/internal/service/workspaces" "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" "github.com/hashicorp/terraform-provider-aws/internal/service/xray" @@ -281,6 +286,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { apprunner.ServicePackage(ctx), appstream.ServicePackage(ctx), appsync.ServicePackage(ctx), + arcregionswitch.ServicePackage(ctx), athena.ServicePackage(ctx), auditmanager.ServicePackage(ctx), autoscaling.ServicePackage(ctx), @@ -290,6 +296,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { bcmdataexports.ServicePackage(ctx), bedrock.ServicePackage(ctx), bedrockagent.ServicePackage(ctx), + bedrockagentcore.ServicePackage(ctx), billing.ServicePackage(ctx), budgets.ServicePackage(ctx), ce.ServicePackage(ctx), @@ -430,6 +437,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { notifications.ServicePackage(ctx), notificationscontacts.ServicePackage(ctx), oam.ServicePackage(ctx), + odb.ServicePackage(ctx), opensearch.ServicePackage(ctx), opensearchserverless.ServicePackage(ctx), organizations.ServicePackage(ctx), @@ -469,6 +477,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { s3control.ServicePackage(ctx), s3outposts.ServicePackage(ctx), s3tables.ServicePackage(ctx), + s3vectors.ServicePackage(ctx), sagemaker.ServicePackage(ctx), scheduler.ServicePackage(ctx), schemas.ServicePackage(ctx), @@ -510,6 +519,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { wafregional.ServicePackage(ctx), wafv2.ServicePackage(ctx), wellarchitected.ServicePackage(ctx), + workmail.ServicePackage(ctx), workspaces.ServicePackage(ctx), workspacesweb.ServicePackage(ctx), xray.ServicePackage(ctx), diff --git a/internal/provider/sdkv2/when_string_test.go b/internal/provider/sdkv2/when_string_test.go new file mode 100644 index 000000000000..14224a38f3ac --- /dev/null +++ b/internal/provider/sdkv2/when_string_test.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type=when -output=when_string_test.go"; DO NOT EDIT. + +package sdkv2 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Before-1] + _ = x[After-2] + _ = x[OnError-4] + _ = x[Finally-8] +} + +const ( + _when_name_0 = "BeforeAfter" + _when_name_1 = "OnError" + _when_name_2 = "Finally" +) + +var ( + _when_index_0 = [...]uint8{0, 6, 11} +) + +func (i when) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _when_name_0[_when_index_0[i]:_when_index_0[i+1]] + case i == 4: + return _when_name_1 + case i == 8: + return _when_name_2 + default: + return "when(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/internal/retry/op.go b/internal/retry/op.go index f37ff06e7e6c..6a54af5ab6e1 100644 --- a/internal/retry/op.go +++ b/internal/retry/op.go @@ -9,7 +9,6 @@ import ( "time" "github.com/hashicorp/terraform-provider-aws/internal/backoff" - inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" ) type opFunc[T any] func(context.Context) (T, error) @@ -87,33 +86,32 @@ func (op opFunc[T]) If(predicate predicateFunc[T]) runFunc[T] { return func(ctx context.Context, timeout time.Duration, opts ...backoff.Option) (T, error) { // We explicitly don't set a deadline on the context here to maintain compatibility // with the Plugin SDKv2 implementation. A parent context may have set a deadline. - var l *backoff.Loop + var ( + l *backoff.Loop + t T + err error + ) for l = backoff.NewLoopWithOptions(timeout, opts...); l.Continue(ctx); { - t, err := op(ctx) + t, err = op(ctx) - if retry, err := predicate(t, err); !retry { + var retry bool + if retry, err = predicate(t, err); !retry { return t, err } } - var err error - if l.Remaining() == 0 { - err = inttypes.ErrDeadlineExceeded - } else { - err = context.Cause(ctx) - } - - if errors.Is(err, inttypes.ErrDeadlineExceeded) || errors.Is(err, context.DeadlineExceeded) { - err = &TimeoutError{ - // LastError must be nil for `TimedOut` to return true. - // LastError: err, - LastState: "retryableerror", - Timeout: timeout, - ExpectedState: []string{"success"}, + if err == nil { + if l.Remaining() == 0 || errors.Is(err, context.Cause(ctx)) { + err = &TimeoutError{ + // LastError must be nil for `TimedOut` to return true. + // LastError: err, + LastState: "retryableerror", + Timeout: timeout, + ExpectedState: []string{"success"}, + } } } - var zero T - return zero, err + return t, err } } diff --git a/internal/retry/state.go b/internal/retry/state.go index cf9f0ddceb9f..87bf3884f2ca 100644 --- a/internal/retry/state.go +++ b/internal/retry/state.go @@ -78,13 +78,13 @@ func (conf *StateChangeConfOf[T, S]) WaitForStateContext(ctx context.Context) (T conf.ContinuousTargetOccurence = 1 } - // Set a default DelayFunc using the StateChangeConf values - delayFunc := backoff.SDKv2HelperRetryCompatibleDelay(conf.Delay, conf.PollInterval, conf.MinTimeout) + // Set a default Delay using the StateChangeConf values + delay := backoff.SDKv2HelperRetryCompatibleDelay(conf.Delay, conf.PollInterval, conf.MinTimeout) - // When VCR testing in replay mode, override the default DelayFunc + // When VCR testing in replay mode, override the default Delay if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { if mode, _ := vcr.Mode(); mode == recorder.ModeReplayOnly { - delayFunc = backoff.ZeroDelay + delay = backoff.ZeroDelay } } @@ -95,7 +95,7 @@ func (conf *StateChangeConfOf[T, S]) WaitForStateContext(ctx context.Context) (T notFoundTick, targetOccurence int l *backoff.Loop ) - for l = backoff.NewLoopWithOptions(conf.Timeout, backoff.WithDelay(delayFunc)); l.Continue(ctx); { + for l = backoff.NewLoopWithOptions(conf.Timeout, backoff.WithDelay(delay)); l.Continue(ctx); { t, currentState, err = conf.refreshWithTimeout(ctx, l.Remaining()) if errors.Is(err, context.DeadlineExceeded) { @@ -151,6 +151,12 @@ func (conf *StateChangeConfOf[T, S]) WaitForStateContext(ctx context.Context) (T ExpectedState: tfslices.Strings(conf.Target), } } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if v, ok := delay.(backoff.DelayWithSetIncrementDelay); ok { + v.SetIncrementDelay(targetOccurence == 0) + } } } diff --git a/internal/sdkv2/state.go b/internal/sdkv2/state.go index 1d74322091e9..0a826cb735dd 100644 --- a/internal/sdkv2/state.go +++ b/internal/sdkv2/state.go @@ -24,3 +24,8 @@ func ToLowerSchemaStateFunc(v any) string { func ToUpperSchemaStateFunc(v any) string { return strings.ToUpper(v.(string)) } + +// TrimSpaceSchemaStateFunc removes all leading and trailing white space from a string value before storing it in state. +func TrimSpaceSchemaStateFunc(v any) string { + return strings.TrimSpace(v.(string)) +} diff --git a/internal/sdkv2/state_test.go b/internal/sdkv2/state_test.go index 844355e685da..ffc9e5b66369 100644 --- a/internal/sdkv2/state_test.go +++ b/internal/sdkv2/state_test.go @@ -47,3 +47,16 @@ func TestToUpperSchemaStateFunc(t *testing.T) { t.Errorf("unexpected diff (+want, -got): %s", diff) } } + +func TestTrimSpaceSchemaStateFunc(t *testing.T) { + t.Parallel() + + var input any = " in-state " + want := "in-state" + + got := TrimSpaceSchemaStateFunc(input) + + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("unexpected diff (+want, -got): %s", diff) + } +} diff --git a/internal/sdkv2/suppress.go b/internal/sdkv2/suppress.go index 430d55d634e1..a3b9e7352c95 100644 --- a/internal/sdkv2/suppress.go +++ b/internal/sdkv2/suppress.go @@ -44,6 +44,22 @@ func SuppressEquivalentRoundedTime(layout string, d time.Duration) schema.Schema } } +// SuppressEquivalentTime returns a difference suppression function that suppresses differences +// for time values that represent the same instant in different timezones. +func SuppressEquivalentTime(k, old, new string, d *schema.ResourceData) bool { + oldTime, err := time.Parse(time.RFC3339, old) + if err != nil { + return false + } + + newTime, err := time.Parse(time.RFC3339, new) + if err != nil { + return false + } + + return oldTime.Equal(newTime) +} + // SuppressEquivalentIAMPolicyDocuments provides custom difference suppression // for IAM policy documents in the given strings that are equivalent. func SuppressEquivalentIAMPolicyDocuments(k, old, new string, _ *schema.ResourceData) bool { diff --git a/internal/sdkv2/suppress_test.go b/internal/sdkv2/suppress_test.go index dc870e10aeac..64616fee77e8 100644 --- a/internal/sdkv2/suppress_test.go +++ b/internal/sdkv2/suppress_test.go @@ -106,3 +106,41 @@ func TestSuppressEquivalentRoundedTime(t *testing.T) { } } } + +func TestSuppressEquivalentTime(t *testing.T) { + t.Parallel() + + testCases := []struct { + old string + new string + equivalent bool + }{ + { + old: "2024-04-19T23:01:23.000Z", + new: "2024-04-19T23:01:23.000Z", + equivalent: true, + }, + { + old: "2024-04-19T23:01:23.000Z", + new: "2024-04-19T23:02:23.000Z", + equivalent: false, + }, + { + old: "2023-09-24T15:30:00+09:00", + new: "2023-09-24T06:30:00Z", + equivalent: true, + }, + } + + for i, tc := range testCases { + value := SuppressEquivalentTime("test_property", tc.old, tc.new, nil) + + if tc.equivalent && !value { + t.Fatalf("expected test case %d to be equivalent", i) + } + + if !tc.equivalent && value { + t.Fatalf("expected test case %d to not be equivalent", i) + } + } +} diff --git a/internal/service/accessanalyzer/analyzer.go b/internal/service/accessanalyzer/analyzer.go index b36e4ba98865..437c448a7fc8 100644 --- a/internal/service/accessanalyzer/analyzer.go +++ b/internal/service/accessanalyzer/analyzer.go @@ -219,8 +219,8 @@ func resourceAnalyzerCreate(ctx context.Context, d *schema.ResourceData, meta an } // Handle Organizations eventual consistency. - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.ValidationException](ctx, organizationCreationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.ValidationException](ctx, organizationCreationTimeout, + func(ctx context.Context) (any, error) { return conn.CreateAnalyzer(ctx, &input) }, "You must create an organization", diff --git a/internal/service/accessanalyzer/analyzer_tags_gen_test.go b/internal/service/accessanalyzer/analyzer_tags_gen_test.go index bed30f0deda3..1bad17c535b8 100644 --- a/internal/service/accessanalyzer/analyzer_tags_gen_test.go +++ b/internal/service/accessanalyzer/analyzer_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/accessanalyzer/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -48,11 +47,12 @@ func testAccAccessAnalyzerAnalyzer_tagsSerial(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -233,11 +233,12 @@ func testAccAccessAnalyzerAnalyzer_tags(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -303,11 +304,12 @@ func testAccAccessAnalyzerAnalyzer_tags_null(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -369,11 +371,12 @@ func testAccAccessAnalyzerAnalyzer_tags_EmptyMap(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -453,11 +456,12 @@ func testAccAccessAnalyzerAnalyzer_tags_AddOnUpdate(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -545,11 +549,12 @@ func testAccAccessAnalyzerAnalyzer_tags_EmptyTag_OnCreate(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -685,11 +690,12 @@ func testAccAccessAnalyzerAnalyzer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -777,11 +783,12 @@ func testAccAccessAnalyzerAnalyzer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -961,11 +968,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_providerOnly(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1124,11 +1132,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_nonOverlapping(t *testing.T) func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1303,11 +1312,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_overlapping(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1396,11 +1406,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_updateToProviderOnly(t *test func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1488,11 +1499,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_updateToResourceOnly(t *test func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1556,11 +1568,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_emptyResourceTag(t *testing. func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1616,11 +1629,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_emptyProviderOnlyTag(t *test func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1681,11 +1695,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_nullOverlappingResourceTag(t func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1746,11 +1761,12 @@ func testAccAccessAnalyzerAnalyzer_tags_DefaultTags_nullNonOverlappingResourceTa func testAccAccessAnalyzerAnalyzer_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1804,11 +1820,12 @@ func testAccAccessAnalyzerAnalyzer_tags_ComputedTag_OnCreate(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1904,11 +1921,12 @@ func testAccAccessAnalyzerAnalyzer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccAccessAnalyzerAnalyzer_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1994,11 +2012,12 @@ func testAccAccessAnalyzerAnalyzer_tags_ComputedTag_OnUpdate_Replace(t *testing. func testAccAccessAnalyzerAnalyzer_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -2159,11 +2178,12 @@ func testAccAccessAnalyzerAnalyzer_tags_IgnoreTags_Overlap_DefaultTag(t *testing func testAccAccessAnalyzerAnalyzer_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.AnalyzerSummary resourceName := "aws_accessanalyzer_analyzer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) diff --git a/internal/service/accessanalyzer/service_endpoint_resolver_gen.go b/internal/service/accessanalyzer/service_endpoint_resolver_gen.go index 4faed4295d83..a795eca60589 100644 --- a/internal/service/accessanalyzer/service_endpoint_resolver_gen.go +++ b/internal/service/accessanalyzer/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params accessanalyzer.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/accessanalyzer/service_endpoints_gen_test.go b/internal/service/accessanalyzer/service_endpoints_gen_test.go index c5499d67f5c5..d1726139b2d1 100644 --- a/internal/service/accessanalyzer/service_endpoints_gen_test.go +++ b/internal/service/accessanalyzer/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/accessanalyzer/service_package_gen.go b/internal/service/accessanalyzer/service_package_gen.go index cfccf6606481..ffda1b88d8da 100644 --- a/internal/service/accessanalyzer/service_package_gen.go +++ b/internal/service/accessanalyzer/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/accessanalyzer" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -73,7 +72,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *accessanalyzer.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/accessanalyzer/sweep.go b/internal/service/accessanalyzer/sweep.go index fb28093397f3..8075b5b9e535 100644 --- a/internal/service/accessanalyzer/sweep.go +++ b/internal/service/accessanalyzer/sweep.go @@ -25,7 +25,7 @@ func sweepAnalyzers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.AccessAnalyzerClient(ctx) sweepResources := make([]sweep.Sweepable, 0) diff --git a/internal/service/accessanalyzer/tags_gen.go b/internal/service/accessanalyzer/tags_gen.go index d9c38dd2c4ca..5170bb8de3f2 100644 --- a/internal/service/accessanalyzer/tags_gen.go +++ b/internal/service/accessanalyzer/tags_gen.go @@ -3,8 +3,8 @@ package accessanalyzer import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/accessanalyzer" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *accessanalyzer.Client, identifier strin output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).AccessAnalyzerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *accessanalyzer.Client, identifier str _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *accessanalyzer.Client, identifier str _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/account/service_endpoint_resolver_gen.go b/internal/service/account/service_endpoint_resolver_gen.go index 55e9cb6c98ff..9e7dca937084 100644 --- a/internal/service/account/service_endpoint_resolver_gen.go +++ b/internal/service/account/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params account.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up account endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up account endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/account/service_endpoints_gen_test.go b/internal/service/account/service_endpoints_gen_test.go index f4eaeb3275f9..b3fa7f270174 100644 --- a/internal/service/account/service_endpoints_gen_test.go +++ b/internal/service/account/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/account/service_package_gen.go b/internal/service/account/service_package_gen.go index e2b4451fd74f..06c35090022a 100644 --- a/internal/service/account/service_package_gen.go +++ b/internal/service/account/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/account" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -83,7 +82,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *account.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/acm/certificate.go b/internal/service/acm/certificate.go index 36ba3f24a2cc..f70bbee6e212 100644 --- a/internal/service/acm/certificate.go +++ b/internal/service/acm/certificate.go @@ -161,6 +161,12 @@ func resourceCertificate() *schema.Resource { ValidateDiagFunc: enum.Validate[types.CertificateTransparencyLoggingPreference](), ConflictsWith: []string{"certificate_body", names.AttrCertificateChain, names.AttrPrivateKey}, }, + "export": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.CertificateExport](), + }, }, }, }, @@ -546,8 +552,8 @@ func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta input := acm.DeleteCertificateInput{ CertificateArn: aws.String(d.Id()), } - _, err := tfresource.RetryWhenIsA[*types.ResourceInUseException](ctx, certificateCrossServicePropagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.ResourceInUseException](ctx, certificateCrossServicePropagationTimeout, + func(ctx context.Context) (any, error) { return conn.DeleteCertificate(ctx, &input) }) @@ -624,6 +630,10 @@ func expandCertificateOptions(tfMap map[string]any) *types.CertificateOptions { apiObject.CertificateTransparencyLoggingPreference = types.CertificateTransparencyLoggingPreference(v) } + if v, ok := tfMap["export"].(string); ok && v != "" { + apiObject.Export = types.CertificateExport(v) + } + return apiObject } @@ -636,6 +646,10 @@ func flattenCertificateOptions(apiObject *types.CertificateOptions) map[string]a tfMap["certificate_transparency_logging_preference"] = apiObject.CertificateTransparencyLoggingPreference + if apiObject.Export != "" { + tfMap["export"] = apiObject.Export + } + return tfMap } diff --git a/internal/service/acm/certificate_data_source.go b/internal/service/acm/certificate_data_source.go index 712a78fd95f4..6e0d6f88ca97 100644 --- a/internal/service/acm/certificate_data_source.go +++ b/internal/service/acm/certificate_data_source.go @@ -122,8 +122,8 @@ func dataSourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 1 * time.Minute ) - certificateSummaries, err := tfresource.RetryGWhenNotFound(ctx, timeout, - func() ([]awstypes.CertificateSummary, error) { + certificateSummaries, err := tfresource.RetryWhenNotFound(ctx, timeout, + func(ctx context.Context) ([]awstypes.CertificateSummary, error) { output, err := findCertificates(ctx, conn, &input, f) switch { case err != nil: diff --git a/internal/service/acm/certificate_data_source_tags_gen_test.go b/internal/service/acm/certificate_data_source_tags_gen_test.go index 66c23e395ba5..74bad4f42383 100644 --- a/internal/service/acm/certificate_data_source_tags_gen_test.go +++ b/internal/service/acm/certificate_data_source_tags_gen_test.go @@ -16,11 +16,12 @@ import ( func TestAccACMCertificateDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -46,11 +47,12 @@ func TestAccACMCertificateDataSource_tags(t *testing.T) { func TestAccACMCertificateDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -72,11 +74,12 @@ func TestAccACMCertificateDataSource_tags_NullMap(t *testing.T) { func TestAccACMCertificateDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,11 +101,12 @@ func TestAccACMCertificateDataSource_tags_EmptyMap(t *testing.T) { func TestAccACMCertificateDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), Steps: []resource.TestStep{ @@ -132,11 +136,12 @@ func TestAccACMCertificateDataSource_tags_DefaultTags_nonOverlapping(t *testing. func TestAccACMCertificateDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), Steps: []resource.TestStep{ @@ -172,11 +177,12 @@ func TestAccACMCertificateDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testi func TestAccACMCertificateDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/acm/certificate_identity_gen_test.go b/internal/service/acm/certificate_identity_gen_test.go index 0f42202fd8fe..ee9734202278 100644 --- a/internal/service/acm/certificate_identity_gen_test.go +++ b/internal/service/acm/certificate_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccACMCertificate_Identity_Basic(t *testing.T) { privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -48,6 +49,9 @@ func TestAccACMCertificate_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -120,7 +124,7 @@ func TestAccACMCertificate_Identity_RegionOverride(t *testing.T) { privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -140,6 +144,9 @@ func TestAccACMCertificate_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -248,3 +255,138 @@ func TestAccACMCertificate_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccACMCertificate_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.CertificateDetail + resourceName := "aws_acm_certificate.test" + privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) + certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), + CheckDestroy: testAccCheckCertificateDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtCertificatePEM: config.StringVariable(certificatePEM), + acctest.CtPrivateKeyPEM: config.StringVariable(privateKeyPEM), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtCertificatePEM: config.StringVariable(certificatePEM), + acctest.CtPrivateKeyPEM: config.StringVariable(privateKeyPEM), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtCertificatePEM: config.StringVariable(certificatePEM), + acctest.CtPrivateKeyPEM: config.StringVariable(privateKeyPEM), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccACMCertificate_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.CertificateDetail + resourceName := "aws_acm_certificate.test" + privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) + certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), + CheckDestroy: testAccCheckCertificateDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtCertificatePEM: config.StringVariable(certificatePEM), + acctest.CtPrivateKeyPEM: config.StringVariable(privateKeyPEM), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtCertificatePEM: config.StringVariable(certificatePEM), + acctest.CtPrivateKeyPEM: config.StringVariable(privateKeyPEM), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/acm/certificate_tags_gen_test.go b/internal/service/acm/certificate_tags_gen_test.go index 313de408b08c..457278b29498 100644 --- a/internal/service/acm/certificate_tags_gen_test.go +++ b/internal/service/acm/certificate_tags_gen_test.go @@ -18,12 +18,13 @@ import ( func TestAccACMCertificate_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -221,12 +222,13 @@ func TestAccACMCertificate_tags(t *testing.T) { func TestAccACMCertificate_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -295,12 +297,13 @@ func TestAccACMCertificate_tags_null(t *testing.T) { func TestAccACMCertificate_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -365,12 +368,13 @@ func TestAccACMCertificate_tags_EmptyMap(t *testing.T) { func TestAccACMCertificate_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -453,12 +457,13 @@ func TestAccACMCertificate_tags_AddOnUpdate(t *testing.T) { func TestAccACMCertificate_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -553,12 +558,13 @@ func TestAccACMCertificate_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccACMCertificate_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -702,12 +708,13 @@ func TestAccACMCertificate_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccACMCertificate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -798,12 +805,13 @@ func TestAccACMCertificate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1000,12 +1008,13 @@ func TestAccACMCertificate_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1176,12 +1185,13 @@ func TestAccACMCertificate_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1368,12 +1378,13 @@ func TestAccACMCertificate_tags_DefaultTags_overlapping(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1465,12 +1476,13 @@ func TestAccACMCertificate_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1561,12 +1573,13 @@ func TestAccACMCertificate_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1632,12 +1645,13 @@ func TestAccACMCertificate_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1695,12 +1709,13 @@ func TestAccACMCertificate_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccACMCertificate_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1763,12 +1778,13 @@ func TestAccACMCertificate_tags_DefaultTags_nullOverlappingResourceTag(t *testin func TestAccACMCertificate_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1831,12 +1847,13 @@ func TestAccACMCertificate_tags_DefaultTags_nullNonOverlappingResourceTag(t *tes func TestAccACMCertificate_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1892,12 +1909,13 @@ func TestAccACMCertificate_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccACMCertificate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -1996,12 +2014,13 @@ func TestAccACMCertificate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccACMCertificate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -2090,12 +2109,13 @@ func TestAccACMCertificate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccACMCertificate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), @@ -2256,12 +2276,13 @@ func TestAccACMCertificate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccACMCertificate_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateDetail resourceName := "aws_acm_certificate.test" privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), CheckDestroy: testAccCheckCertificateDestroy(ctx), diff --git a/internal/service/acm/certificate_test.go b/internal/service/acm/certificate_test.go index bdc5dc60744a..3b79a60221af 100644 --- a/internal/service/acm/certificate_test.go +++ b/internal/service/acm/certificate_test.go @@ -18,14 +18,10 @@ import ( "github.com/hashicorp/terraform-plugin-testing/config" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfacm "github.com/hashicorp/terraform-provider-aws/internal/service/acm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -55,6 +51,8 @@ func TestAccACMCertificate_emailValidation(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "early_renewal_duration", ""), resource.TestCheckResourceAttr(resourceName, "not_after", ""), resource.TestCheckResourceAttr(resourceName, "not_before", ""), + resource.TestCheckResourceAttr(resourceName, "options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "options.0.export", string(types.CertificateExportDisabled)), resource.TestCheckResourceAttr(resourceName, "pending_renewal", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(types.CertificateStatusPendingValidation)), resource.TestCheckResourceAttr(resourceName, "subject_alternative_names.#", "1"), @@ -1734,80 +1732,45 @@ func TestAccACMCertificate_PrivateKey_ReimportWithTags(t *testing.T) { }) } -func TestAccACMCertificate_Identity_ExistingResource(t *testing.T) { +func TestAccACMCertificate_optionExport(t *testing.T) { + // Issuing an exportable ACM Certificate is expensive. + // Skip the test by default and only run if the environment variable is set. + acctest.SkipIfEnvVarNotSet(t, "ACM_TEST_CERTIFICATE_EXPORT") ctx := acctest.Context(t) resourceName := "aws_acm_certificate.test" rootDomain := acctest.ACMCertificateDomainFromEnv(t) - domain := acctest.ACMCertificateRandomSubDomain(rootDomain) var v types.CertificateDetail resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), - CheckDestroy: testAccCheckCertificateDestroy(ctx), + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCertificateDestroy(ctx), Steps: []resource.TestStep{ { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCertificateConfig_basic(domain, types.ValidationMethodDns), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCertificateConfig_basic(domain, types.ValidationMethodDns), - Check: resource.ComposeTestCheckFunc( + Config: testAccCertificateConfig_optionExport(rootDomain, types.ValidationMethodDns, types.CertificateExportEnabled), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckCertificateExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "acm", regexache.MustCompile("certificate/.+$")), + resource.TestCheckResourceAttr(resourceName, names.AttrDomainName, rootDomain), + resource.TestCheckResourceAttr(resourceName, "domain_validation_options.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "domain_validation_options.*", map[string]string{ + names.AttrDomainName: rootDomain, + "resource_record_type": "CNAME", }), - }, + resource.TestCheckResourceAttr(resourceName, "options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "options.0.export", string(types.CertificateExportEnabled)), + resource.TestCheckResourceAttr(resourceName, "subject_alternative_names.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "subject_alternative_names.*", rootDomain), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(types.CertificateStatusPendingValidation)), + resource.TestCheckResourceAttr(resourceName, "validation_emails.#", "0"), + resource.TestCheckResourceAttr(resourceName, "validation_method", string(types.ValidationMethodDns)), + ), }, { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCertificateConfig_basic(domain, types.ValidationMethodDns), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("acm", regexache.MustCompile("certificate/.+$")), - }), - }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -2112,3 +2075,16 @@ resource "aws_acm_certificate" "test" { } `, domainName, validationMethod, keyAlgorithm) } + +func testAccCertificateConfig_optionExport(domainName string, validationMethod types.ValidationMethod, export types.CertificateExport) string { + return fmt.Sprintf(` +resource "aws_acm_certificate" "test" { + domain_name = %[1]q + validation_method = %[2]q + + options { + export = %[3]q + } +} +`, domainName, validationMethod, export) +} diff --git a/internal/service/acm/service_endpoint_resolver_gen.go b/internal/service/acm/service_endpoint_resolver_gen.go index 464025a8ed42..f47514408511 100644 --- a/internal/service/acm/service_endpoint_resolver_gen.go +++ b/internal/service/acm/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params acm.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up acm endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up acm endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/acm/service_endpoints_gen_test.go b/internal/service/acm/service_endpoints_gen_test.go index eb943e3f58da..715b96c17d86 100644 --- a/internal/service/acm/service_endpoints_gen_test.go +++ b/internal/service/acm/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/acm/service_package_gen.go b/internal/service/acm/service_package_gen.go index c434dfd72bbf..b037e6f9000c 100644 --- a/internal/service/acm/service_package_gen.go +++ b/internal/service/acm/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/acm" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -90,7 +89,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *acm.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/acm/sweep.go b/internal/service/acm/sweep.go index b350671e4fb3..90d8ff84b51c 100644 --- a/internal/service/acm/sweep.go +++ b/internal/service/acm/sweep.go @@ -9,7 +9,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/acm" + awstypes "github.com/aws/aws-sdk-go-v2/service/acm/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) @@ -43,12 +45,17 @@ func sweepCertificates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ACMClient(ctx) var sweepResources []sweep.Sweepable - input := acm.ListCertificatesInput{} + input := acm.ListCertificatesInput{ + Includes: &awstypes.Filters{ + // By default, ListCertificates only returns RSA_1024 and RSA_2048 certificates + KeyTypes: enum.EnumValues[awstypes.KeyAlgorithm](), + }, + } pages := acm.NewListCertificatesPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) diff --git a/internal/service/acm/tags_gen.go b/internal/service/acm/tags_gen.go index 28861095d5a3..6b0a75fab189 100644 --- a/internal/service/acm/tags_gen.go +++ b/internal/service/acm/tags_gen.go @@ -3,8 +3,8 @@ package acm import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/acm" awstypes "github.com/aws/aws-sdk-go-v2/service/acm/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *acm.Client, identifier string, optFns . output, err := conn.ListTagsForCertificate(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ACMClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *acm.Client, identifier string, oldTag _, err := conn.RemoveTagsFromCertificate(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *acm.Client, identifier string, oldTag _, err := conn.AddTagsToCertificate(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/acm/testdata/Certificate/basic_v5.100.0/main_gen.tf b/internal/service/acm/testdata/Certificate/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..5820892a6746 --- /dev/null +++ b/internal/service/acm/testdata/Certificate/basic_v5.100.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acm_certificate" "test" { + certificate_body = var.certificate_pem + private_key = var.private_key_pem +} + +variable "certificate_pem" { + type = string + nullable = false +} + +variable "private_key_pem" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acm/testdata/Certificate/basic_v6.0.0/main_gen.tf b/internal/service/acm/testdata/Certificate/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..a3ea3c5132b3 --- /dev/null +++ b/internal/service/acm/testdata/Certificate/basic_v6.0.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acm_certificate" "test" { + certificate_body = var.certificate_pem + private_key = var.private_key_pem +} + +variable "certificate_pem" { + type = string + nullable = false +} + +variable "private_key_pem" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/certificate.go b/internal/service/acmpca/certificate.go index 2026a8307df7..4516844fa904 100644 --- a/internal/service/acmpca/certificate.go +++ b/internal/service/acmpca/certificate.go @@ -40,7 +40,7 @@ import ( // @SDKResource("aws_acmpca_certificate", name="Certificate") // @ArnIdentity // @V60SDKv2Fix -// @WrappedImport(false) +// @CustomImport // @Testing(importIgnore="certificate_signing_request;signing_algorithm;template_arn;validity") // @Testing(plannableImportAction="Replace") func resourceCertificate() *schema.Resource { @@ -53,7 +53,9 @@ func resourceCertificate() *schema.Resource { // arn:aws:acm-pca:eu-west-1:555885746124:certificate-authority/08322ede-92f9-4200-8f21-c7d12b2b6edb/certificate/a4e9c2aa2ccfab625b1b9136464cd3a6 Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.RegionalARN(ctx, d, names.AttrARN, []string{names.AttrID}); err != nil { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.RegionalARN(ctx, d, identitySpec); err != nil { return nil, err } @@ -164,7 +166,7 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta input.Validity = validity } - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidStateException](ctx, certificateAuthorityActiveTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidStateException](ctx, certificateAuthorityActiveTimeout, func(ctx context.Context) (any, error) { return conn.IssueCertificate(ctx, &input) }, "The certificate authority is not in a valid state for issuing certificates") @@ -175,7 +177,7 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta d.SetId(aws.ToString(outputRaw.(*acmpca.IssueCertificateOutput).CertificateArn)) // Wait for certificate status to become ISSUED. - _, err = tfresource.RetryWhenIsA[*types.RequestInProgressException](ctx, certificateIssueTimeout, func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *types.RequestInProgressException](ctx, certificateIssueTimeout, func(ctx context.Context) (any, error) { return findCertificateByTwoPartKey(ctx, conn, d.Id(), certificateAuthorityARN) }) diff --git a/internal/service/acmpca/certificate_authority.go b/internal/service/acmpca/certificate_authority.go index 8864bdef40b1..310b8f68c448 100644 --- a/internal/service/acmpca/certificate_authority.go +++ b/internal/service/acmpca/certificate_authority.go @@ -38,7 +38,7 @@ const ( // @Tags(identifierAttribute="arn") // @ArnIdentity // @V60SDKv2Fix -// @WrappedImport(false) +// @CustomImport // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/acmpca/types;types.CertificateAuthority") // @Testing(generator="acctest.RandomDomainName()") // @Testing(importIgnore="permanent_deletion_time_in_days") @@ -50,10 +50,11 @@ func resourceCertificateAuthority() *schema.Resource { UpdateWithoutTimeout: resourceCertificateAuthorityUpdate, DeleteWithoutTimeout: resourceCertificateAuthorityDelete, - // TODO: handle default values on Import Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.RegionalARN(ctx, d, names.AttrARN, []string{names.AttrID}); err != nil { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.RegionalARN(ctx, d, identitySpec); err != nil { return nil, err } @@ -366,7 +367,7 @@ func resourceCertificateAuthorityCreate(ctx context.Context, d *schema.ResourceD } // ValidationException: The ACM Private CA service account 'acm-pca-prod-pdx' requires getBucketAcl permissions for your S3 bucket 'tf-acc-test-5224996536060125340'. Check your S3 bucket permissions and try again. - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 1*time.Minute, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 1*time.Minute, func(ctx context.Context) (any, error) { return conn.CreateCertificateAuthority(ctx, &input) }, "ValidationException", "Check your S3 bucket permissions and try again") diff --git a/internal/service/acmpca/certificate_authority_certificate_identity_gen_test.go b/internal/service/acmpca/certificate_authority_certificate_identity_gen_test.go index 2309b0f29445..ba87349e6f8c 100644 --- a/internal/service/acmpca/certificate_authority_certificate_identity_gen_test.go +++ b/internal/service/acmpca/certificate_authority_certificate_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -25,7 +26,7 @@ func TestAccACMPCACertificateAuthorityCertificate_Identity_Basic(t *testing.T) { resourceName := "aws_acmpca_certificate_authority_certificate.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccACMPCACertificateAuthorityCertificate_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("certificate_authority_arn"), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "certificate_authority_arn": knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("certificate_authority_arn")), }, }, @@ -107,7 +111,7 @@ func TestAccACMPCACertificateAuthorityCertificate_Identity_RegionOverride(t *tes resourceName := "aws_acmpca_certificate_authority_certificate.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -126,6 +130,9 @@ func TestAccACMPCACertificateAuthorityCertificate_Identity_RegionOverride(t *tes ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("certificate_authority_arn"), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "certificate_authority_arn": knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("certificate_authority_arn")), }, }, @@ -217,3 +224,131 @@ func TestAccACMPCACertificateAuthorityCertificate_Identity_RegionOverride(t *tes }, }) } + +func TestAccACMPCACertificateAuthorityCertificate_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v acmpca.GetCertificateAuthorityCertificateOutput + resourceName := "aws_acmpca_certificate_authority_certificate.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthorityCertificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthorityCertificate/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "certificate_authority_arn": knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthorityCertificate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "certificate_authority_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("certificate_authority_arn")), + }, + }, + }, + }) +} + +func TestAccACMPCACertificateAuthorityCertificate_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v acmpca.GetCertificateAuthorityCertificateOutput + resourceName := "aws_acmpca_certificate_authority_certificate.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthorityCertificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthorityCertificate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/acmpca/certificate_authority_certificate_test.go b/internal/service/acmpca/certificate_authority_certificate_test.go index ec4991ad38a5..952de05b83cc 100644 --- a/internal/service/acmpca/certificate_authority_certificate_test.go +++ b/internal/service/acmpca/certificate_authority_certificate_test.go @@ -8,17 +8,10 @@ import ( "fmt" "testing" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/acmpca" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfacmpca "github.com/hashicorp/terraform-provider-aws/internal/service/acmpca" "github.com/hashicorp/terraform-provider-aws/names" @@ -119,84 +112,6 @@ func TestAccACMPCACertificateAuthorityCertificate_subordinateCA(t *testing.T) { }) } -func TestAccACMPCACertificateAuthorityCertificate_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v acmpca.GetCertificateAuthorityCertificateOutput - resourceName := "aws_acmpca_certificate_authority_certificate.test" - commonName := acctest.RandomDomainName() - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCertificateAuthorityCertificateConfig_rootCA(commonName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCertificateAuthorityCertificateConfig_rootCA(commonName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - "certificate_authority_arn": knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCertificateAuthorityCertificateConfig_rootCA(commonName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateAuthorityCertificateExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - "certificate_authority_arn": tfknownvalue.RegionalARNRegexp("acm-pca", regexache.MustCompile(`certificate-authority/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckCertificateAuthorityCertificateExists(ctx context.Context, n string, v *acmpca.GetCertificateAuthorityCertificateOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/acmpca/certificate_authority_data_source_tags_gen_test.go b/internal/service/acmpca/certificate_authority_data_source_tags_gen_test.go index 85c3c5e8b0a1..7aa1f5c16830 100644 --- a/internal/service/acmpca/certificate_authority_data_source_tags_gen_test.go +++ b/internal/service/acmpca/certificate_authority_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccACMPCACertificateAuthorityDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acmpca_certificate_authority.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccACMPCACertificateAuthorityDataSource_tags(t *testing.T) { func TestAccACMPCACertificateAuthorityDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acmpca_certificate_authority.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccACMPCACertificateAuthorityDataSource_tags_NullMap(t *testing.T) { func TestAccACMPCACertificateAuthorityDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acmpca_certificate_authority.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccACMPCACertificateAuthorityDataSource_tags_EmptyMap(t *testing.T) { func TestAccACMPCACertificateAuthorityDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acmpca_certificate_authority.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccACMPCACertificateAuthorityDataSource_tags_DefaultTags_nonOverlapping func TestAccACMPCACertificateAuthorityDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acmpca_certificate_authority.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccACMPCACertificateAuthorityDataSource_tags_IgnoreTags_Overlap_Default func TestAccACMPCACertificateAuthorityDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_acmpca_certificate_authority.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/acmpca/certificate_authority_identity_gen_test.go b/internal/service/acmpca/certificate_authority_identity_gen_test.go index a35dc07e6a5a..d9e2ef88b36e 100644 --- a/internal/service/acmpca/certificate_authority_identity_gen_test.go +++ b/internal/service/acmpca/certificate_authority_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -25,7 +26,7 @@ func TestAccACMPCACertificateAuthority_Identity_Basic(t *testing.T) { resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccACMPCACertificateAuthority_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -114,7 +118,7 @@ func TestAccACMPCACertificateAuthority_Identity_RegionOverride(t *testing.T) { resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccACMPCACertificateAuthority_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -236,3 +243,131 @@ func TestAccACMPCACertificateAuthority_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccACMPCACertificateAuthority_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.CertificateAuthority + resourceName := "aws_acmpca_certificate_authority.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthority/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthority/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthority/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccACMPCACertificateAuthority_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.CertificateAuthority + resourceName := "aws_acmpca_certificate_authority.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthority/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CertificateAuthority/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateAuthorityExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/acmpca/certificate_authority_tags_gen_test.go b/internal/service/acmpca/certificate_authority_tags_gen_test.go index f6e87de444bb..225f46ca524e 100644 --- a/internal/service/acmpca/certificate_authority_tags_gen_test.go +++ b/internal/service/acmpca/certificate_authority_tags_gen_test.go @@ -18,11 +18,12 @@ import ( func TestAccACMPCACertificateAuthority_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -212,11 +213,12 @@ func TestAccACMPCACertificateAuthority_tags(t *testing.T) { func TestAccACMPCACertificateAuthority_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -282,11 +284,12 @@ func TestAccACMPCACertificateAuthority_tags_null(t *testing.T) { func TestAccACMPCACertificateAuthority_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -348,11 +351,12 @@ func TestAccACMPCACertificateAuthority_tags_EmptyMap(t *testing.T) { func TestAccACMPCACertificateAuthority_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -432,11 +436,12 @@ func TestAccACMPCACertificateAuthority_tags_AddOnUpdate(t *testing.T) { func TestAccACMPCACertificateAuthority_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -527,11 +532,12 @@ func TestAccACMPCACertificateAuthority_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccACMPCACertificateAuthority_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -670,11 +676,12 @@ func TestAccACMPCACertificateAuthority_tags_EmptyTag_OnUpdate_Add(t *testing.T) func TestAccACMPCACertificateAuthority_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -762,11 +769,12 @@ func TestAccACMPCACertificateAuthority_tags_EmptyTag_OnUpdate_Replace(t *testing func TestAccACMPCACertificateAuthority_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -955,11 +963,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_providerOnly(t *testing. func TestAccACMPCACertificateAuthority_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1124,11 +1133,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_nonOverlapping(t *testin func TestAccACMPCACertificateAuthority_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1309,11 +1319,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_overlapping(t *testing.T func TestAccACMPCACertificateAuthority_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1402,11 +1413,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_updateToProviderOnly(t * func TestAccACMPCACertificateAuthority_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1494,11 +1506,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_updateToResourceOnly(t * func TestAccACMPCACertificateAuthority_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1562,11 +1575,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_emptyResourceTag(t *test func TestAccACMPCACertificateAuthority_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1622,11 +1636,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_emptyProviderOnlyTag(t * func TestAccACMPCACertificateAuthority_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1687,11 +1702,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_nullOverlappingResourceT func TestAccACMPCACertificateAuthority_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1752,11 +1768,12 @@ func TestAccACMPCACertificateAuthority_tags_DefaultTags_nullNonOverlappingResour func TestAccACMPCACertificateAuthority_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1810,11 +1827,12 @@ func TestAccACMPCACertificateAuthority_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccACMPCACertificateAuthority_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -1910,11 +1928,12 @@ func TestAccACMPCACertificateAuthority_tags_ComputedTag_OnUpdate_Add(t *testing. func TestAccACMPCACertificateAuthority_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -2000,11 +2019,12 @@ func TestAccACMPCACertificateAuthority_tags_ComputedTag_OnUpdate_Replace(t *test func TestAccACMPCACertificateAuthority_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), @@ -2162,11 +2182,12 @@ func TestAccACMPCACertificateAuthority_tags_IgnoreTags_Overlap_DefaultTag(t *tes func TestAccACMPCACertificateAuthority_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.CertificateAuthority resourceName := "aws_acmpca_certificate_authority.test" rName := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), diff --git a/internal/service/acmpca/certificate_authority_test.go b/internal/service/acmpca/certificate_authority_test.go index a2abeb521a78..a45edd9fc5d5 100644 --- a/internal/service/acmpca/certificate_authority_test.go +++ b/internal/service/acmpca/certificate_authority_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfacmpca "github.com/hashicorp/terraform-provider-aws/internal/service/acmpca" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -713,84 +707,6 @@ func TestAccACMPCACertificateAuthority_RevocationOcsp_customCNAME(t *testing.T) }) } -func TestAccACMPCACertificateAuthority_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var certificateAuthority awstypes.CertificateAuthority - resourceName := "aws_acmpca_certificate_authority.test" - commonName := acctest.RandomDomainName() - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), - CheckDestroy: testAccCheckCertificateAuthorityDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCertificateAuthorityConfig_required(commonName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateAuthorityExists(ctx, resourceName, &certificateAuthority), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCertificateAuthorityConfig_required(commonName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateAuthorityExists(ctx, resourceName, &certificateAuthority), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCertificateAuthorityConfig_required(commonName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateAuthorityExists(ctx, resourceName, &certificateAuthority), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("acm-pca", regexache.MustCompile(`certificate-authority/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckCertificateAuthorityDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ACMPCAClient(ctx) diff --git a/internal/service/acmpca/certificate_identity_gen_test.go b/internal/service/acmpca/certificate_identity_gen_test.go index cf2fd23aeb72..67bb0150e4ef 100644 --- a/internal/service/acmpca/certificate_identity_gen_test.go +++ b/internal/service/acmpca/certificate_identity_gen_test.go @@ -15,15 +15,17 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccACMPCACertificate_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_acmpca_certificate.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -44,6 +46,9 @@ func TestAccACMPCACertificate_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -112,7 +117,7 @@ func TestAccACMPCACertificate_Identity_RegionOverride(t *testing.T) { resourceName := "aws_acmpca_certificate.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -131,6 +136,9 @@ func TestAccACMPCACertificate_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -234,3 +242,129 @@ func TestAccACMPCACertificate_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccACMPCACertificate_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_acmpca_certificate.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: testAccCheckCertificateDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccACMPCACertificate_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_acmpca_certificate.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: testAccCheckCertificateDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/acmpca/certificate_test.go b/internal/service/acmpca/certificate_test.go index 7bb63f5ffaa4..a26934a1fc32 100644 --- a/internal/service/acmpca/certificate_test.go +++ b/internal/service/acmpca/certificate_test.go @@ -15,14 +15,8 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/acmpca/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" tfacmpca "github.com/hashicorp/terraform-provider-aws/internal/service/acmpca" @@ -293,83 +287,6 @@ func TestAccACMPCACertificate_Validity_absolute(t *testing.T) { }) } -func TestAccACMPCACertificate_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_acmpca_certificate.test" - domain := acctest.RandomDomainName() - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), - CheckDestroy: testAccCheckCertificateDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCertificateConfig_root(domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCertificateConfig_root(domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCertificateConfig_root(domain), - Check: resource.ComposeTestCheckFunc( - testAccCheckCertificateExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("acm-pca", regexache.MustCompile(`certificate-authority/.+/certificate/.+$`)), - }), - }, - }, - }, - }) -} - func testAccCheckCertificateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ACMPCAClient(ctx) diff --git a/internal/service/acmpca/policy_identity_gen_test.go b/internal/service/acmpca/policy_identity_gen_test.go index 0bbf7ccffaa6..2fd6f173429b 100644 --- a/internal/service/acmpca/policy_identity_gen_test.go +++ b/internal/service/acmpca/policy_identity_gen_test.go @@ -14,14 +14,16 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccACMPCAPolicy_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_acmpca_policy.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -40,6 +42,9 @@ func TestAccACMPCAPolicy_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -94,7 +99,7 @@ func TestAccACMPCAPolicy_Identity_RegionOverride(t *testing.T) { resourceName := "aws_acmpca_policy.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -112,6 +117,9 @@ func TestAccACMPCAPolicy_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -198,3 +206,117 @@ func TestAccACMPCAPolicy_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccACMPCAPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_acmpca_policy.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + }, + }) +} + +func TestAccACMPCAPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_acmpca_policy.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/acmpca/policy_test.go b/internal/service/acmpca/policy_test.go index 3959bd9b0095..55fb2752325d 100644 --- a/internal/service/acmpca/policy_test.go +++ b/internal/service/acmpca/policy_test.go @@ -8,16 +8,9 @@ import ( "fmt" "testing" - "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfacmpca "github.com/hashicorp/terraform-provider-aws/internal/service/acmpca" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -50,82 +43,6 @@ func TestAccACMPCAPolicy_basic(t *testing.T) { }) } -func TestAccACMPCAPolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_acmpca_policy.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), - CheckDestroy: testAccCheckPolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccPolicyConfig_basic(), - Check: resource.ComposeTestCheckFunc( - testAccCheckPolicyExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccPolicyConfig_basic(), - Check: resource.ComposeTestCheckFunc( - testAccCheckPolicyExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrResourceARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccPolicyConfig_basic(), - Check: resource.ComposeTestCheckFunc( - testAccCheckPolicyExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrResourceARN: tfknownvalue.RegionalARNRegexp("acm-pca", regexache.MustCompile(`certificate-authority/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ACMPCAClient(ctx) diff --git a/internal/service/acmpca/service_endpoint_resolver_gen.go b/internal/service/acmpca/service_endpoint_resolver_gen.go index cb328c16cd26..5183a0808ec3 100644 --- a/internal/service/acmpca/service_endpoint_resolver_gen.go +++ b/internal/service/acmpca/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params acmpca.EndpointP }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up acmpca endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up acmpca endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/acmpca/service_endpoints_gen_test.go b/internal/service/acmpca/service_endpoints_gen_test.go index a1d946a92b23..352ce487eda9 100644 --- a/internal/service/acmpca/service_endpoints_gen_test.go +++ b/internal/service/acmpca/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/acmpca/service_package_gen.go b/internal/service/acmpca/service_package_gen.go index 8292ca992df5..4e5e5cc6e577 100644 --- a/internal/service/acmpca/service_package_gen.go +++ b/internal/service/acmpca/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/acmpca" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -57,6 +56,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa inttypes.WithIdentityDuplicateAttrs(names.AttrID), inttypes.WithV6_0SDKv2Fix(), ), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourceCertificateAuthority, @@ -70,6 +72,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa inttypes.WithIdentityDuplicateAttrs(names.AttrID), inttypes.WithV6_0SDKv2Fix(), ), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourceCertificateAuthorityCertificate, @@ -129,7 +134,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *acmpca.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/acmpca/sweep.go b/internal/service/acmpca/sweep.go index 996a82538156..f821a8066072 100644 --- a/internal/service/acmpca/sweep.go +++ b/internal/service/acmpca/sweep.go @@ -26,7 +26,7 @@ func sweepCertificateAuthorities(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ACMPCAClient(ctx) var sweepResources []sweep.Sweepable diff --git a/internal/service/acmpca/tags_gen.go b/internal/service/acmpca/tags_gen.go index 8d4a1d9c72e5..888df6db2257 100644 --- a/internal/service/acmpca/tags_gen.go +++ b/internal/service/acmpca/tags_gen.go @@ -3,8 +3,8 @@ package acmpca import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/acmpca" awstypes "github.com/aws/aws-sdk-go-v2/service/acmpca/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *acmpca.Client, identifier string, optFn page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ACMPCAClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *acmpca.Client, identifier string, old _, err := conn.UntagCertificateAuthority(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *acmpca.Client, identifier string, old _, err := conn.TagCertificateAuthority(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/acmpca/testdata/Certificate/basic_v5.100.0/main_gen.tf b/internal/service/acmpca/testdata/Certificate/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..947f6c7580d9 --- /dev/null +++ b/internal/service/acmpca/testdata/Certificate/basic_v5.100.0/main_gen.tf @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.rName + } + } +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/testdata/Certificate/basic_v6.0.0/main_gen.tf b/internal/service/acmpca/testdata/Certificate/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..81c6373b3ab7 --- /dev/null +++ b/internal/service/acmpca/testdata/Certificate/basic_v6.0.0/main_gen.tf @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.rName + } + } +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/testdata/CertificateAuthority/basic_v5.100.0/main_gen.tf b/internal/service/acmpca/testdata/CertificateAuthority/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..37b13fac1335 --- /dev/null +++ b/internal/service/acmpca/testdata/CertificateAuthority/basic_v5.100.0/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + usage_mode = "SHORT_LIVED_CERTIFICATE" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.rName + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/testdata/CertificateAuthority/basic_v6.0.0/main_gen.tf b/internal/service/acmpca/testdata/CertificateAuthority/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..91ddad452f29 --- /dev/null +++ b/internal/service/acmpca/testdata/CertificateAuthority/basic_v6.0.0/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + usage_mode = "SHORT_LIVED_CERTIFICATE" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.rName + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/testdata/CertificateAuthorityCertificate/basic_v5.100.0/main_gen.tf b/internal/service/acmpca/testdata/CertificateAuthorityCertificate/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..3715b256b0ad --- /dev/null +++ b/internal/service/acmpca/testdata/CertificateAuthorityCertificate/basic_v5.100.0/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate_authority_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + certificate = aws_acmpca_certificate.test.certificate + certificate_chain = aws_acmpca_certificate.test.certificate_chain +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.rName + } + } +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/testdata/CertificateAuthorityCertificate/basic_v6.0.0/main_gen.tf b/internal/service/acmpca/testdata/CertificateAuthorityCertificate/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..028fde391b6c --- /dev/null +++ b/internal/service/acmpca/testdata/CertificateAuthorityCertificate/basic_v6.0.0/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate_authority_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + certificate = aws_acmpca_certificate.test.certificate + certificate_chain = aws_acmpca_certificate.test.certificate_chain +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.rName + } + } +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/acmpca/testdata/Policy/basic_v5.100.0/main_gen.tf b/internal/service/acmpca/testdata/Policy/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..b4b9a0f4e9ee --- /dev/null +++ b/internal/service/acmpca/testdata/Policy/basic_v5.100.0/main_gen.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_policy" "test" { + resource_arn = aws_acmpca_certificate_authority.test.arn + policy = < 0 && v.([]any)[0] != nil { + input.PredictiveScalingPolicyConfiguration = expandPredictiveScalingPolicyConfiguration(v.([]any)[0].(map[string]any)) + } + + if v, ok := d.GetOk("scalable_dimension"); ok { + input.ScalableDimension = awstypes.ScalableDimension(v.(string)) + } + + if v, ok := d.GetOk("service_namespace"); ok { + input.ServiceNamespace = awstypes.ServiceNamespace(v.(string)) + } - _, err := tfresource.RetryWhenIsA[*awstypes.FailedResourceAccessException](ctx, propagationTimeout, func() (any, error) { + if v, ok := d.GetOk("step_scaling_policy_configuration"); ok { + input.StepScalingPolicyConfiguration = expandStepScalingPolicyConfiguration(v.([]any)) + } + + if v, ok := d.GetOk("target_tracking_scaling_policy_configuration"); ok { + input.TargetTrackingScalingPolicyConfiguration = expandTargetTrackingScalingPolicyConfiguration(v.([]any)) + } + + _, err := tfresource.RetryWhenIsA[any, *awstypes.FailedResourceAccessException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.PutScalingPolicy(ctx, &input) }) @@ -326,7 +551,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) d var diags diag.Diagnostics conn := meta.(*conns.AWSClient).AppAutoScalingClient(ctx) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.FailedResourceAccessException](ctx, propagationTimeout, func() (any, error) { + output, err := tfresource.RetryWhenIsA[*awstypes.ScalingPolicy, *awstypes.FailedResourceAccessException](ctx, propagationTimeout, func(ctx context.Context) (*awstypes.ScalingPolicy, error) { return findScalingPolicyByFourPartKey(ctx, conn, d.Get(names.AttrName).(string), d.Get("service_namespace").(string), d.Get(names.AttrResourceID).(string), d.Get("scalable_dimension").(string)) }) @@ -340,13 +565,19 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendErrorf(diags, "reading Application Auto Scaling Scaling Policy (%s): %s", d.Id(), err) } - output := outputRaw.(*awstypes.ScalingPolicy) d.Set("alarm_arns", tfslices.ApplyToAll(output.Alarms, func(v awstypes.Alarm) string { return aws.ToString(v.AlarmARN) })) d.Set(names.AttrARN, output.PolicyARN) d.Set(names.AttrName, output.PolicyName) d.Set("policy_type", output.PolicyType) + if output.PredictiveScalingPolicyConfiguration != nil { + if err := d.Set("predictive_scaling_policy_configuration", []any{flattenPredictiveScalingPolicyConfiguration(output.PredictiveScalingPolicyConfiguration)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting predictive_scaling_policy_configuration: %s", err) + } + } else { + d.Set("predictive_scaling_policy_configuration", nil) + } d.Set(names.AttrResourceID, output.ResourceId) d.Set("scalable_dimension", output.ScalableDimension) d.Set("service_namespace", output.ServiceNamespace) @@ -364,14 +595,14 @@ func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta any) var diags diag.Diagnostics conn := meta.(*conns.AWSClient).AppAutoScalingClient(ctx) + log.Printf("[DEBUG] Deleting Application Auto Scaling Scaling Policy: %s", d.Id()) input := applicationautoscaling.DeleteScalingPolicyInput{ PolicyName: aws.String(d.Get(names.AttrName).(string)), ResourceId: aws.String(d.Get(names.AttrResourceID).(string)), ScalableDimension: awstypes.ScalableDimension(d.Get("scalable_dimension").(string)), ServiceNamespace: awstypes.ServiceNamespace(d.Get("service_namespace").(string)), } - - _, err := tfresource.RetryWhenIsA[*awstypes.FailedResourceAccessException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.FailedResourceAccessException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteScalingPolicy(ctx, &input) }) @@ -387,7 +618,7 @@ func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta any) } func resourcePolicyImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - parts, err := validPolicyImportInput(d.Id()) + parts, err := policyParseImportID(d.Id()) if err != nil { return nil, err } @@ -433,7 +664,6 @@ func findScalingPolicies(ctx context.Context, conn *applicationautoscaling.Clien var output []awstypes.ScalingPolicy pages := applicationautoscaling.NewDescribeScalingPoliciesPaginator(conn, input) - for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -451,17 +681,19 @@ func findScalingPolicies(ctx context.Context, conn *applicationautoscaling.Clien return output, nil } -func validPolicyImportInput(id string) ([]string, error) { - idParts := strings.Split(id, "/") +func policyParseImportID(id string) ([]string, error) { + const ( + importIDSeparator = "/" + ) + idParts := strings.Split(id, importIDSeparator) if len(idParts) < 4 { - return nil, fmt.Errorf("unexpected format (%q), expected ///", id) + return nil, fmt.Errorf("unexpected format for ID (%[1]s), expected %[2]s%[2]s%[2]s", id, importIDSeparator) } var serviceNamespace, resourceID, scalableDimension, name string switch idParts[0] { case "dynamodb": serviceNamespace = idParts[0] - dimensionIdx := 3 // DynamoDB resource ID can be "/table/tableName" or "/table/tableName/index/indexName" if idParts[dimensionIdx] == "index" { @@ -487,462 +719,1008 @@ func validPolicyImportInput(id string) ([]string, error) { } if serviceNamespace == "" || resourceID == "" || scalableDimension == "" || name == "" { - return nil, fmt.Errorf("unexpected format (%q), expected ///", id) + return nil, fmt.Errorf("unexpected format for ID (%[1]s), expected %[2]s%[2]s%[2]s", id, importIDSeparator) } return []string{serviceNamespace, resourceID, scalableDimension, name}, nil } -// Takes the result of flatmap.Expand for an array of step adjustments and -// returns a []*awstypes.StepAdjustment. -func expandStepAdjustments(configured []any) ([]awstypes.StepAdjustment, error) { - var adjustments []awstypes.StepAdjustment - - // Loop over our configured step adjustments and create an array - // of aws-sdk-go compatible objects. We're forced to convert strings - // to floats here because there's no way to detect whether or not - // an uninitialized, optional schema element is "0.0" deliberately. - // With strings, we can test for "", which is definitely an empty - // struct value. - for _, raw := range configured { - data := raw.(map[string]any) - a := awstypes.StepAdjustment{ - ScalingAdjustment: aws.Int32(int32(data["scaling_adjustment"].(int))), - } - if data["metric_interval_lower_bound"] != "" { - bound := data["metric_interval_lower_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, errors.New("metric_interval_lower_bound must be a float value represented as a string") - } - a.MetricIntervalLowerBound = aws.Float64(f) - default: - return nil, errors.New("metric_interval_lower_bound isn't a string") - } - } - if data["metric_interval_upper_bound"] != "" { - bound := data["metric_interval_upper_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, errors.New("metric_interval_upper_bound must be a float value represented as a string") - } - a.MetricIntervalUpperBound = aws.Float64(f) - default: - return nil, errors.New("metric_interval_upper_bound isn't a string") - } - } - adjustments = append(adjustments, a) +func expandTargetTrackingScalingPolicyConfiguration(tfList []any) *awstypes.TargetTrackingScalingPolicyConfiguration { + if len(tfList) < 1 || tfList[0] == nil { + return nil } - return adjustments, nil -} + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.TargetTrackingScalingPolicyConfiguration{ + TargetValue: aws.Float64(tfMap["target_value"].(float64)), + } -func expandCustomizedMetricSpecification(configured []any) *awstypes.CustomizedMetricSpecification { - spec := &awstypes.CustomizedMetricSpecification{} + if v, ok := tfMap["customized_metric_specification"].([]any); ok && len(v) > 0 { + apiObject.CustomizedMetricSpecification = expandCustomizedMetricSpecification(v) + } - for _, raw := range configured { - data := raw.(map[string]any) - if val, ok := data["metrics"].(*schema.Set); ok && val.Len() > 0 { - spec.Metrics = expandTargetTrackingMetricDataQueries(val.List()) - } else { - if v, ok := data[names.AttrMetricName]; ok { - spec.MetricName = aws.String(v.(string)) - } + if v, ok := tfMap["disable_scale_in"]; ok { + apiObject.DisableScaleIn = aws.Bool(v.(bool)) + } - if v, ok := data[names.AttrNamespace]; ok { - spec.Namespace = aws.String(v.(string)) - } + if v, ok := tfMap["predefined_metric_specification"].([]any); ok && len(v) > 0 { + apiObject.PredefinedMetricSpecification = expandPredefinedMetricSpecification(v) + } - if v, ok := data[names.AttrUnit].(string); ok && v != "" { - spec.Unit = aws.String(v) - } + if v, ok := tfMap["scale_in_cooldown"]; ok { + apiObject.ScaleInCooldown = aws.Int32(int32(v.(int))) + } + + if v, ok := tfMap["scale_out_cooldown"]; ok { + apiObject.ScaleOutCooldown = aws.Int32(int32(v.(int))) + } + + return apiObject +} - if v, ok := data["statistic"]; ok { - spec.Statistic = awstypes.MetricStatistic(v.(string)) +func expandStepAdjustments(tfList []any) []awstypes.StepAdjustment { + var apiObjects []awstypes.StepAdjustment + + for _, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]any) + apiObject := awstypes.StepAdjustment{ + ScalingAdjustment: aws.Int32(int32(tfMap["scaling_adjustment"].(int))), + } + + if v, ok := tfMap["metric_interval_lower_bound"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + apiObject.MetricIntervalLowerBound = aws.Float64(v) } + } - if s, ok := data["dimensions"].(*schema.Set); ok && s.Len() > 0 { - dimensions := make([]awstypes.MetricDimension, s.Len()) - for i, d := range s.List() { - dimension := d.(map[string]any) - dimensions[i] = awstypes.MetricDimension{ - Name: aws.String(dimension[names.AttrName].(string)), - Value: aws.String(dimension[names.AttrValue].(string)), - } - } - spec.Dimensions = dimensions + if v, ok := tfMap["metric_interval_upper_bound"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + apiObject.MetricIntervalUpperBound = aws.Float64(v) } } + + apiObjects = append(apiObjects, apiObject) } - return spec + + return apiObjects } -func expandTargetTrackingMetricDataQueries(metricDataQuerySlices []any) []awstypes.TargetTrackingMetricDataQuery { - if len(metricDataQuerySlices) < 1 { +func expandCustomizedMetricSpecification(tfList []any) *awstypes.CustomizedMetricSpecification { + if len(tfList) < 1 || tfList[0] == nil { return nil } - metricDataQueries := make([]awstypes.TargetTrackingMetricDataQuery, len(metricDataQuerySlices)) - for i := range metricDataQueries { - metricDataQueryFlat := metricDataQuerySlices[i].(map[string]any) - metricDataQuery := awstypes.TargetTrackingMetricDataQuery{ - Id: aws.String(metricDataQueryFlat[names.AttrID].(string)), - } - if val, ok := metricDataQueryFlat["metric_stat"]; ok && len(val.([]any)) > 0 { - metricStatSpec := val.([]any)[0].(map[string]any) - metricSpec := metricStatSpec["metric"].([]any)[0].(map[string]any) - metric := &awstypes.TargetTrackingMetric{ - MetricName: aws.String(metricSpec[names.AttrMetricName].(string)), - Namespace: aws.String(metricSpec[names.AttrNamespace].(string)), - } - if v, ok := metricSpec["dimensions"]; ok { - dims := v.(*schema.Set).List() - dimList := make([]awstypes.TargetTrackingMetricDimension, len(dims)) - for i := range dimList { - dim := dims[i].(map[string]any) - md := awstypes.TargetTrackingMetricDimension{ - Name: aws.String(dim[names.AttrName].(string)), - Value: aws.String(dim[names.AttrValue].(string)), - } - dimList[i] = md + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.CustomizedMetricSpecification{} + + if v, ok := tfMap["metrics"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Metrics = expandTargetTrackingMetricDataQueries(v.List()) + } else { + if v, ok := tfMap["dimensions"].(*schema.Set); ok && v.Len() > 0 { + dimensions := make([]awstypes.MetricDimension, v.Len()) + + for i, tfMapRaw := range v.List() { + tfMap := tfMapRaw.(map[string]any) + dimensions[i] = awstypes.MetricDimension{ + Name: aws.String(tfMap[names.AttrName].(string)), + Value: aws.String(tfMap[names.AttrValue].(string)), } - metric.Dimensions = dimList - } - metricStat := &awstypes.TargetTrackingMetricStat{ - Metric: metric, - Stat: aws.String(metricStatSpec["stat"].(string)), } - if v, ok := metricStatSpec[names.AttrUnit]; ok && len(v.(string)) > 0 { - metricStat.Unit = aws.String(v.(string)) - } - metricDataQuery.MetricStat = metricStat + + apiObject.Dimensions = dimensions } - if val, ok := metricDataQueryFlat[names.AttrExpression]; ok && val.(string) != "" { - metricDataQuery.Expression = aws.String(val.(string)) + + if v, ok := tfMap[names.AttrMetricName]; ok { + apiObject.MetricName = aws.String(v.(string)) + } + + if v, ok := tfMap[names.AttrNamespace]; ok { + apiObject.Namespace = aws.String(v.(string)) } - if val, ok := metricDataQueryFlat["label"]; ok && val.(string) != "" { - metricDataQuery.Label = aws.String(val.(string)) + + if v, ok := tfMap["statistic"]; ok { + apiObject.Statistic = awstypes.MetricStatistic(v.(string)) } - if val, ok := metricDataQueryFlat["return_data"]; ok { - metricDataQuery.ReturnData = aws.Bool(val.(bool)) + + if v, ok := tfMap[names.AttrUnit].(string); ok && v != "" { + apiObject.Unit = aws.String(v) } - metricDataQueries[i] = metricDataQuery } - return metricDataQueries + + return apiObject } -func expandPredefinedMetricSpecification(configured []any) *awstypes.PredefinedMetricSpecification { - spec := &awstypes.PredefinedMetricSpecification{} +func expandTargetTrackingMetricDataQueries(tfList []any) []awstypes.TargetTrackingMetricDataQuery { + if len(tfList) < 1 { + return nil + } - for _, raw := range configured { - data := raw.(map[string]any) + apiObjects := make([]awstypes.TargetTrackingMetricDataQuery, len(tfList)) - if v, ok := data["predefined_metric_type"]; ok { - spec.PredefinedMetricType = awstypes.MetricType(v.(string)) + for i, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]any) + apiObject := awstypes.TargetTrackingMetricDataQuery{ + Id: aws.String(tfMap[names.AttrID].(string)), } - if v, ok := data["resource_label"].(string); ok && v != "" { - spec.ResourceLabel = aws.String(v) + if v, ok := tfMap[names.AttrExpression]; ok && v.(string) != "" { + apiObject.Expression = aws.String(v.(string)) } - } - return spec -} - -func expandPutScalingPolicyInput(d *schema.ResourceData) applicationautoscaling.PutScalingPolicyInput { - apiObject := applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String(d.Get(names.AttrName).(string)), - ResourceId: aws.String(d.Get(names.AttrResourceID).(string)), - } - if v, ok := d.GetOk("policy_type"); ok { - apiObject.PolicyType = awstypes.PolicyType(v.(string)) - } + if v, ok := tfMap["label"]; ok && v.(string) != "" { + apiObject.Label = aws.String(v.(string)) + } - if v, ok := d.GetOk("scalable_dimension"); ok { - apiObject.ScalableDimension = awstypes.ScalableDimension(v.(string)) - } + if v, ok := tfMap["metric_stat"]; ok && len(v.([]any)) > 0 { + apiObject.MetricStat = &awstypes.TargetTrackingMetricStat{} + tfMap := v.([]any)[0].(map[string]any) - if v, ok := d.GetOk("service_namespace"); ok { - apiObject.ServiceNamespace = awstypes.ServiceNamespace(v.(string)) - } + if v, ok := tfMap["metric"]; ok && len(v.([]any)) > 0 { + tfMap := v.([]any)[0].(map[string]any) - if v, ok := d.GetOk("step_scaling_policy_configuration"); ok { - apiObject.StepScalingPolicyConfiguration = expandStepScalingPolicyConfiguration(v.([]any)) - } + metric := &awstypes.TargetTrackingMetric{ + MetricName: aws.String(tfMap[names.AttrMetricName].(string)), + Namespace: aws.String(tfMap[names.AttrNamespace].(string)), + } - if l, ok := d.GetOk("target_tracking_scaling_policy_configuration"); ok { - v := l.([]any) - if len(v) == 1 { - ttspCfg := v[0].(map[string]any) - cfg := awstypes.TargetTrackingScalingPolicyConfiguration{ - TargetValue: aws.Float64(ttspCfg["target_value"].(float64)), - } + if v, ok := tfMap["dimensions"].(*schema.Set); ok && v.Len() > 0 { + dimensions := make([]awstypes.TargetTrackingMetricDimension, v.Len()) - if v, ok := ttspCfg["scale_in_cooldown"]; ok { - cfg.ScaleInCooldown = aws.Int32(int32(v.(int))) - } + for i, tfMapRaw := range v.List() { + tfMap := tfMapRaw.(map[string]any) + dimensions[i] = awstypes.TargetTrackingMetricDimension{ + Name: aws.String(tfMap[names.AttrName].(string)), + Value: aws.String(tfMap[names.AttrValue].(string)), + } + } - if v, ok := ttspCfg["scale_out_cooldown"]; ok { - cfg.ScaleOutCooldown = aws.Int32(int32(v.(int))) - } + metric.Dimensions = dimensions + } - if v, ok := ttspCfg["disable_scale_in"]; ok { - cfg.DisableScaleIn = aws.Bool(v.(bool)) + apiObject.MetricStat.Metric = metric } - if v, ok := ttspCfg["customized_metric_specification"].([]any); ok && len(v) > 0 { - cfg.CustomizedMetricSpecification = expandCustomizedMetricSpecification(v) + if v, ok := tfMap["stat"].(string); ok && v != "" { + apiObject.MetricStat.Stat = aws.String(v) } - if v, ok := ttspCfg["predefined_metric_specification"].([]any); ok && len(v) > 0 { - cfg.PredefinedMetricSpecification = expandPredefinedMetricSpecification(v) + if v, ok := tfMap[names.AttrUnit].(string); ok && v != "" { + apiObject.MetricStat.Unit = aws.String(v) } + } - apiObject.TargetTrackingScalingPolicyConfiguration = &cfg + if v, ok := tfMap["return_data"]; ok { + apiObject.ReturnData = aws.Bool(v.(bool)) } + + apiObjects[i] = apiObject + } + + return apiObjects +} + +func expandPredefinedMetricSpecification(tfList []any) *awstypes.PredefinedMetricSpecification { + if len(tfList) < 1 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.PredefinedMetricSpecification{} + + if v, ok := tfMap["predefined_metric_type"].(string); ok && v != "" { + apiObject.PredefinedMetricType = awstypes.MetricType(v) + } + + if v, ok := tfMap["resource_label"].(string); ok && v != "" { + apiObject.ResourceLabel = aws.String(v) } return apiObject } -func expandStepScalingPolicyConfiguration(cfg []any) *awstypes.StepScalingPolicyConfiguration { - if len(cfg) < 1 { +func expandStepScalingPolicyConfiguration(tfList []any) *awstypes.StepScalingPolicyConfiguration { + if len(tfList) < 1 || tfList[0] == nil { return nil } - out := &awstypes.StepScalingPolicyConfiguration{} + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.StepScalingPolicyConfiguration{} - m := cfg[0].(map[string]any) - if v, ok := m["adjustment_type"]; ok { - out.AdjustmentType = awstypes.AdjustmentType(v.(string)) + if v, ok := tfMap["adjustment_type"]; ok { + apiObject.AdjustmentType = awstypes.AdjustmentType(v.(string)) } - if v, ok := m["cooldown"]; ok { - out.Cooldown = aws.Int32(int32(v.(int))) + + if v, ok := tfMap["cooldown"]; ok { + apiObject.Cooldown = aws.Int32(int32(v.(int))) } - if v, ok := m["metric_aggregation_type"]; ok { - out.MetricAggregationType = awstypes.MetricAggregationType(v.(string)) + + if v, ok := tfMap["metric_aggregation_type"]; ok { + apiObject.MetricAggregationType = awstypes.MetricAggregationType(v.(string)) } - if v, ok := m["min_adjustment_magnitude"].(int); ok && v > 0 { - out.MinAdjustmentMagnitude = aws.Int32(int32(v)) + + if v, ok := tfMap["min_adjustment_magnitude"].(int); ok && v > 0 { + apiObject.MinAdjustmentMagnitude = aws.Int32(int32(v)) } - if v, ok := m["step_adjustment"].(*schema.Set); ok && v.Len() > 0 { - out.StepAdjustments, _ = expandStepAdjustments(v.List()) + + if v, ok := tfMap["step_adjustment"].(*schema.Set); ok && v.Len() > 0 { + apiObject.StepAdjustments = expandStepAdjustments(v.List()) } - return out + return apiObject } -func flattenStepScalingPolicyConfiguration(cfg *awstypes.StepScalingPolicyConfiguration) []any { - if cfg == nil { +func flattenStepScalingPolicyConfiguration(apiObject *awstypes.StepScalingPolicyConfiguration) []any { + if apiObject == nil { return []any{} } - m := make(map[string]any) + tfMap := make(map[string]any) - m["adjustment_type"] = string(cfg.AdjustmentType) + tfMap["adjustment_type"] = apiObject.AdjustmentType - if cfg.Cooldown != nil { - m["cooldown"] = aws.ToInt32(cfg.Cooldown) + if apiObject.Cooldown != nil { + tfMap["cooldown"] = aws.ToInt32(apiObject.Cooldown) } - m["metric_aggregation_type"] = string(cfg.MetricAggregationType) + tfMap["metric_aggregation_type"] = apiObject.MetricAggregationType - if cfg.MinAdjustmentMagnitude != nil { - m["min_adjustment_magnitude"] = aws.ToInt32(cfg.MinAdjustmentMagnitude) + if apiObject.MinAdjustmentMagnitude != nil { + tfMap["min_adjustment_magnitude"] = aws.ToInt32(apiObject.MinAdjustmentMagnitude) } - if cfg.StepAdjustments != nil { - stepAdjustmentsResource := &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_interval_lower_bound": { - Type: schema.TypeString, - Optional: true, - }, - "metric_interval_upper_bound": { - Type: schema.TypeString, - Optional: true, - }, - "scaling_adjustment": { - Type: schema.TypeInt, - Required: true, - }, - }, - } - m["step_adjustment"] = schema.NewSet(schema.HashResource(stepAdjustmentsResource), flattenStepAdjustments(cfg.StepAdjustments)) + + if apiObject.StepAdjustments != nil { + tfMap["step_adjustment"] = flattenStepAdjustments(apiObject.StepAdjustments) } - return []any{m} + return []any{tfMap} } -func flattenStepAdjustments(adjs []awstypes.StepAdjustment) []any { - out := make([]any, len(adjs)) +func flattenStepAdjustments(apiObjects []awstypes.StepAdjustment) []any { + tfList := make([]any, len(apiObjects)) - for i, adj := range adjs { - m := make(map[string]any) + for i, apiObject := range apiObjects { + tfMap := make(map[string]any) - m["scaling_adjustment"] = int(aws.ToInt32(adj.ScalingAdjustment)) + tfMap["scaling_adjustment"] = aws.ToInt32(apiObject.ScalingAdjustment) - if adj.MetricIntervalLowerBound != nil { - m["metric_interval_lower_bound"] = fmt.Sprintf("%g", aws.ToFloat64(adj.MetricIntervalLowerBound)) + if apiObject.MetricIntervalLowerBound != nil { + tfMap["metric_interval_lower_bound"] = flex.Float64ToStringValue(apiObject.MetricIntervalLowerBound) } - if adj.MetricIntervalUpperBound != nil { - m["metric_interval_upper_bound"] = fmt.Sprintf("%g", aws.ToFloat64(adj.MetricIntervalUpperBound)) + + if apiObject.MetricIntervalUpperBound != nil { + tfMap["metric_interval_upper_bound"] = flex.Float64ToStringValue(apiObject.MetricIntervalUpperBound) } - out[i] = m + tfList[i] = tfMap } - return out + return tfList } -func flattenTargetTrackingScalingPolicyConfiguration(cfg *awstypes.TargetTrackingScalingPolicyConfiguration) []any { - if cfg == nil { +func flattenTargetTrackingScalingPolicyConfiguration(apiObject *awstypes.TargetTrackingScalingPolicyConfiguration) []any { + if apiObject == nil { return []any{} } - m := make(map[string]any) + tfMap := make(map[string]any) - if v := cfg.CustomizedMetricSpecification; v != nil { - m["customized_metric_specification"] = flattenCustomizedMetricSpecification(v) + if v := apiObject.CustomizedMetricSpecification; v != nil { + tfMap["customized_metric_specification"] = flattenCustomizedMetricSpecification(v) } - if v := cfg.DisableScaleIn; v != nil { - m["disable_scale_in"] = aws.ToBool(v) + if v := apiObject.DisableScaleIn; v != nil { + tfMap["disable_scale_in"] = aws.ToBool(v) } - if v := cfg.PredefinedMetricSpecification; v != nil { - m["predefined_metric_specification"] = flattenPredefinedMetricSpecification(v) + if v := apiObject.PredefinedMetricSpecification; v != nil { + tfMap["predefined_metric_specification"] = flattenPredefinedMetricSpecification(v) } - if v := cfg.ScaleInCooldown; v != nil { - m["scale_in_cooldown"] = aws.ToInt32(v) + if v := apiObject.ScaleInCooldown; v != nil { + tfMap["scale_in_cooldown"] = aws.ToInt32(v) } - if v := cfg.ScaleOutCooldown; v != nil { - m["scale_out_cooldown"] = aws.ToInt32(v) + if v := apiObject.ScaleOutCooldown; v != nil { + tfMap["scale_out_cooldown"] = aws.ToInt32(v) } - if v := cfg.TargetValue; v != nil { - m["target_value"] = aws.ToFloat64(v) + if v := apiObject.TargetValue; v != nil { + tfMap["target_value"] = aws.ToFloat64(v) } - return []any{m} + return []any{tfMap} } -func flattenCustomizedMetricSpecification(cfg *awstypes.CustomizedMetricSpecification) []any { - if cfg == nil { +func flattenCustomizedMetricSpecification(apiObject *awstypes.CustomizedMetricSpecification) []any { + if apiObject == nil { return []any{} } - m := map[string]any{} + tfMap := map[string]any{} - if cfg.Metrics != nil { - m["metrics"] = flattenTargetTrackingMetricDataQueries(cfg.Metrics) + if apiObject.Metrics != nil { + tfMap["metrics"] = flattenTargetTrackingMetricDataQueries(apiObject.Metrics) } else { - if v := cfg.Dimensions; len(v) > 0 { - m["dimensions"] = flattenMetricDimensions(cfg.Dimensions) + if v := apiObject.Dimensions; len(v) > 0 { + tfMap["dimensions"] = flattenMetricDimensions(apiObject.Dimensions) } - if v := cfg.MetricName; v != nil { - m[names.AttrMetricName] = aws.ToString(v) + if v := apiObject.MetricName; v != nil { + tfMap[names.AttrMetricName] = aws.ToString(v) } - if v := cfg.Namespace; v != nil { - m[names.AttrNamespace] = aws.ToString(v) + if v := apiObject.Namespace; v != nil { + tfMap[names.AttrNamespace] = aws.ToString(v) } - m["statistic"] = string(cfg.Statistic) + tfMap["statistic"] = apiObject.Statistic - if v := cfg.Unit; v != nil { - m[names.AttrUnit] = aws.ToString(v) + if v := apiObject.Unit; v != nil { + tfMap[names.AttrUnit] = aws.ToString(v) } } - return []any{m} + return []any{tfMap} } -func flattenTargetTrackingMetricDataQueries(metricDataQueries []awstypes.TargetTrackingMetricDataQuery) []any { - metricDataQueriesSpec := make([]any, len(metricDataQueries)) - for i := range metricDataQueriesSpec { - metricDataQuery := map[string]any{} - rawMetricDataQuery := metricDataQueries[i] - metricDataQuery[names.AttrID] = aws.ToString(rawMetricDataQuery.Id) - if rawMetricDataQuery.Expression != nil { - metricDataQuery[names.AttrExpression] = aws.ToString(rawMetricDataQuery.Expression) +func flattenTargetTrackingMetricDataQueries(apiObjects []awstypes.TargetTrackingMetricDataQuery) []any { + tfList := make([]any, len(apiObjects)) + + for i, apiObject := range apiObjects { + tfMap := map[string]any{ + names.AttrID: aws.ToString(apiObject.Id), } - if rawMetricDataQuery.Label != nil { - metricDataQuery["label"] = aws.ToString(rawMetricDataQuery.Label) + + if apiObject.Expression != nil { + tfMap[names.AttrExpression] = aws.ToString(apiObject.Expression) } - if rawMetricDataQuery.MetricStat != nil { - metricStatSpec := map[string]any{} - rawMetricStat := rawMetricDataQuery.MetricStat - rawMetric := rawMetricStat.Metric - metricSpec := map[string]any{} - if rawMetric.Dimensions != nil { - dimSpec := make([]any, len(rawMetric.Dimensions)) - for i := range dimSpec { - dim := map[string]any{} - rawDim := rawMetric.Dimensions[i] - dim[names.AttrName] = aws.ToString(rawDim.Name) - dim[names.AttrValue] = aws.ToString(rawDim.Value) - dimSpec[i] = dim - } - metricSpec["dimensions"] = dimSpec - } - metricSpec[names.AttrMetricName] = aws.ToString(rawMetric.MetricName) - metricSpec[names.AttrNamespace] = aws.ToString(rawMetric.Namespace) - metricStatSpec["metric"] = []map[string]any{metricSpec} - metricStatSpec["stat"] = aws.ToString(rawMetricStat.Stat) - if rawMetricStat.Unit != nil { - metricStatSpec[names.AttrUnit] = aws.ToString(rawMetricStat.Unit) + + if apiObject.Label != nil { + tfMap["label"] = aws.ToString(apiObject.Label) + } + + if apiObject := apiObject.MetricStat; apiObject != nil { + tfMapMetricStat := map[string]any{ + "stat": aws.ToString(apiObject.Stat), + } + + if apiObject := apiObject.Metric; apiObject != nil { + tfMapMetric := map[string]any{ + names.AttrMetricName: aws.ToString(apiObject.MetricName), + names.AttrNamespace: aws.ToString(apiObject.Namespace), + } + + tfList := make([]any, len(apiObject.Dimensions)) + for i, apiObject := range apiObject.Dimensions { + tfList[i] = map[string]any{ + names.AttrName: aws.ToString(apiObject.Name), + names.AttrValue: aws.ToString(apiObject.Value), + } + } + + tfMapMetric["dimensions"] = tfList + tfMapMetricStat["metric"] = []map[string]any{tfMapMetric} + } + + if apiObject.Unit != nil { + tfMapMetricStat[names.AttrUnit] = aws.ToString(apiObject.Unit) } - metricDataQuery["metric_stat"] = []map[string]any{metricStatSpec} + + tfMap["metric_stat"] = []map[string]any{tfMapMetricStat} + } + + if apiObject.ReturnData != nil { + tfMap["return_data"] = aws.ToBool(apiObject.ReturnData) } - if rawMetricDataQuery.ReturnData != nil { - metricDataQuery["return_data"] = aws.ToBool(rawMetricDataQuery.ReturnData) + + tfList[i] = tfMap + } + + return tfList +} + +func flattenMetricDimensions(apiObjects []awstypes.MetricDimension) []any { + tfList := make([]any, len(apiObjects)) + + for i, apiObject := range apiObjects { + tfMap := map[string]any{} + + if v := apiObject.Name; v != nil { + tfMap[names.AttrName] = aws.ToString(v) } - metricDataQueriesSpec[i] = metricDataQuery + + if v := apiObject.Value; v != nil { + tfMap[names.AttrValue] = aws.ToString(v) + } + + tfList[i] = tfMap + } + + return tfList +} + +func flattenPredefinedMetricSpecification(apiObject *awstypes.PredefinedMetricSpecification) []any { + if apiObject == nil { + return []any{} + } + + tfMap := map[string]any{ + "predefined_metric_type": apiObject.PredefinedMetricType, + } + + if v := apiObject.ResourceLabel; v != nil { + tfMap["resource_label"] = aws.ToString(v) + } + + return []any{tfMap} +} + +func expandPredictiveScalingPolicyConfiguration(tfMap map[string]any) *awstypes.PredictiveScalingPolicyConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingPolicyConfiguration{} + + if v, ok := tfMap["max_capacity_breach_behavior"].(string); ok && v != "" { + apiObject.MaxCapacityBreachBehavior = awstypes.PredictiveScalingMaxCapacityBreachBehavior(v) + } + + if v, ok := tfMap["max_capacity_buffer"].(int); ok && v != 0 { + apiObject.MaxCapacityBuffer = aws.Int32(int32(v)) + } + + if v, ok := tfMap["metric_specification"].([]any); ok && len(v) > 0 { + apiObject.MetricSpecifications = expandPredictiveScalingMetricSpecifications(v) } - return metricDataQueriesSpec + + if v, ok := tfMap[names.AttrMode].(string); ok && v != "" { + apiObject.Mode = awstypes.PredictiveScalingMode(v) + } + + if v, ok := tfMap["scheduling_buffer_time"].(int); ok && v != 0 { + apiObject.SchedulingBufferTime = aws.Int32(int32(v)) + } + + return apiObject } -func flattenMetricDimensions(ds []awstypes.MetricDimension) []any { - l := make([]any, len(ds)) - for i, d := range ds { - if ds == nil { +func expandPredictiveScalingMetricSpecifications(tfList []any) []awstypes.PredictiveScalingMetricSpecification { + if len(tfList) == 0 { + return nil + } + + var apiObjects []awstypes.PredictiveScalingMetricSpecification + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { continue } - m := map[string]any{} + apiObject := expandPredictiveScalingMetricSpecification(tfMap) - if v := d.Name; v != nil { - m[names.AttrName] = aws.ToString(v) + if apiObject == nil { + continue } - if v := d.Value; v != nil { - m[names.AttrValue] = aws.ToString(v) + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPredictiveScalingMetricSpecification(tfMap map[string]any) *awstypes.PredictiveScalingMetricSpecification { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingMetricSpecification{} + + if v, ok := tfMap["customized_capacity_metric_specification"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.CustomizedCapacityMetricSpecification = expandPredictiveScalingCustomizedMetricSpecification(v[0].(map[string]any)) + } + + if v, ok := tfMap["customized_load_metric_specification"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.CustomizedLoadMetricSpecification = expandPredictiveScalingCustomizedMetricSpecification(v[0].(map[string]any)) + } + + if v, ok := tfMap["customized_scaling_metric_specification"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.CustomizedScalingMetricSpecification = expandPredictiveScalingCustomizedMetricSpecification(v[0].(map[string]any)) + } + + if v, ok := tfMap["predefined_load_metric_specification"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.PredefinedLoadMetricSpecification = expandPredictiveScalingPredefinedLoadMetricSpecification(v[0].(map[string]any)) + } + + if v, ok := tfMap["predefined_metric_pair_specification"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.PredefinedMetricPairSpecification = expandPredictiveScalingPredefinedMetricPairSpecification(v[0].(map[string]any)) + } + + if v, ok := tfMap["predefined_scaling_metric_specification"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.PredefinedScalingMetricSpecification = expandPredictiveScalingPredefinedScalingMetricSpecification(v[0].(map[string]any)) + } + + if v, ok := tfMap["target_value"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + apiObject.TargetValue = aws.Float64(v) } + } + + return apiObject +} - l[i] = m +func expandPredictiveScalingCustomizedMetricSpecification(tfMap map[string]any) *awstypes.PredictiveScalingCustomizedMetricSpecification { + if tfMap == nil { + return nil } - return l + + apiObject := &awstypes.PredictiveScalingCustomizedMetricSpecification{} + + if v, ok := tfMap["metric_data_query"].([]any); ok && len(v) > 0 { + apiObject.MetricDataQueries = expandPredictiveScalingMetricDataQueries(v) + } + + return apiObject } -func flattenPredefinedMetricSpecification(cfg *awstypes.PredefinedMetricSpecification) []any { - if cfg == nil { - return []any{} +func expandPredictiveScalingMetricDataQueries(tfList []any) []awstypes.PredictiveScalingMetricDataQuery { + if len(tfList) == 0 { + return nil + } + + var apiObjects []awstypes.PredictiveScalingMetricDataQuery + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { + continue + } + + apiObject := expandPredictiveScalingMetricDataQuery(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPredictiveScalingMetricDataQuery(tfMap map[string]any) *awstypes.PredictiveScalingMetricDataQuery { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingMetricDataQuery{} + + if v, ok := tfMap[names.AttrExpression].(string); ok && v != "" { + apiObject.Expression = aws.String(v) + } + + if v, ok := tfMap[names.AttrID].(string); ok && v != "" { + apiObject.Id = aws.String(v) + } + + if v, ok := tfMap["label"].(string); ok && v != "" { + apiObject.Label = aws.String(v) + } + + if v, ok := tfMap["metric_stat"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.MetricStat = expandPredictiveScalingMetricStat(v[0].(map[string]any)) + } + + if v, ok := tfMap["return_data"]; ok { + apiObject.ReturnData = aws.Bool(v.(bool)) + } + + return apiObject +} + +func expandPredictiveScalingMetricStat(tfMap map[string]any) *awstypes.PredictiveScalingMetricStat { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingMetricStat{} + + if v, ok := tfMap["metric"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.Metric = expandPredictiveScalingMetric(v[0].(map[string]any)) + } + + if v, ok := tfMap["stat"].(string); ok && v != "" { + apiObject.Stat = aws.String(v) + } + + if v, ok := tfMap[names.AttrUnit].(string); ok && v != "" { + apiObject.Unit = aws.String(v) + } + + return apiObject +} + +func expandPredictiveScalingMetric(tfMap map[string]any) *awstypes.PredictiveScalingMetric { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingMetric{} + + if v, ok := tfMap["dimension"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Dimensions = expandPredictiveScalingMetricDimensions(v.List()) + } + + if v, ok := tfMap[names.AttrMetricName].(string); ok && v != "" { + apiObject.MetricName = aws.String(v) + } + + if v, ok := tfMap[names.AttrNamespace].(string); ok && v != "" { + apiObject.Namespace = aws.String(v) + } + + return apiObject +} + +func expandPredictiveScalingMetricDimensions(tfList []any) []awstypes.PredictiveScalingMetricDimension { + if len(tfList) == 0 { + return nil + } + + var apiObjects []awstypes.PredictiveScalingMetricDimension + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { + continue + } + + apiObject := expandPredictiveScalingMetricDimension(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPredictiveScalingMetricDimension(tfMap map[string]any) *awstypes.PredictiveScalingMetricDimension { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingMetricDimension{} + + if v, ok := tfMap[names.AttrName].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap[names.AttrValue].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandPredictiveScalingPredefinedLoadMetricSpecification(tfMap map[string]any) *awstypes.PredictiveScalingPredefinedLoadMetricSpecification { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingPredefinedLoadMetricSpecification{} + + if v, ok := tfMap["predefined_metric_type"].(string); ok && v != "" { + apiObject.PredefinedMetricType = aws.String(v) + } + + if v, ok := tfMap["resource_label"].(string); ok && v != "" { + apiObject.ResourceLabel = aws.String(v) + } + + return apiObject +} + +func expandPredictiveScalingPredefinedMetricPairSpecification(tfMap map[string]any) *awstypes.PredictiveScalingPredefinedMetricPairSpecification { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingPredefinedMetricPairSpecification{} + + if v, ok := tfMap["predefined_metric_type"].(string); ok && v != "" { + apiObject.PredefinedMetricType = aws.String(v) + } + + if v, ok := tfMap["resource_label"].(string); ok && v != "" { + apiObject.ResourceLabel = aws.String(v) + } + + return apiObject +} + +func expandPredictiveScalingPredefinedScalingMetricSpecification(tfMap map[string]any) *awstypes.PredictiveScalingPredefinedScalingMetricSpecification { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.PredictiveScalingPredefinedScalingMetricSpecification{} + + if v, ok := tfMap["predefined_metric_type"].(string); ok && v != "" { + apiObject.PredefinedMetricType = aws.String(v) + } + + if v, ok := tfMap["resource_label"].(string); ok && v != "" { + apiObject.ResourceLabel = aws.String(v) + } + + return apiObject +} + +func flattenPredictiveScalingPolicyConfiguration(apiObject *awstypes.PredictiveScalingPolicyConfiguration) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + "max_capacity_breach_behavior": apiObject.MaxCapacityBreachBehavior, + names.AttrMode: apiObject.Mode, + } + + if v := apiObject.MaxCapacityBuffer; v != nil { + tfMap["max_capacity_buffer"] = aws.ToInt32(v) + } + + if v := apiObject.MetricSpecifications; v != nil { + tfMap["metric_specification"] = flattenPredictiveScalingMetricSpecifications(v) + } + + if v := apiObject.SchedulingBufferTime; v != nil { + tfMap["scheduling_buffer_time"] = aws.ToInt32(v) } - m := map[string]any{} + return tfMap +} + +func flattenPredictiveScalingMetricSpecifications(apiObjects []awstypes.PredictiveScalingMetricSpecification) []any { + if len(apiObjects) == 0 { + return nil + } + + var tfList []any + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPredictiveScalingMetricSpecification(&apiObject)) + } + + return tfList +} + +func flattenPredictiveScalingMetricSpecification(apiObject *awstypes.PredictiveScalingMetricSpecification) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.CustomizedCapacityMetricSpecification; v != nil { + tfMap["customized_capacity_metric_specification"] = []any{flattenPredictiveScalingCustomizedMetricSpecification(v)} + } + + if v := apiObject.CustomizedLoadMetricSpecification; v != nil { + tfMap["customized_load_metric_specification"] = []any{flattenPredictiveScalingCustomizedMetricSpecification(v)} + } + + if v := apiObject.CustomizedScalingMetricSpecification; v != nil { + tfMap["customized_scaling_metric_specification"] = []any{flattenPredictiveScalingCustomizedMetricSpecification(v)} + } + + if v := apiObject.PredefinedLoadMetricSpecification; v != nil { + tfMap["predefined_load_metric_specification"] = []any{flattenPredictiveScalingPredefinedLoadMetricSpecification(v)} + } + + if v := apiObject.PredefinedMetricPairSpecification; v != nil { + tfMap["predefined_metric_pair_specification"] = []any{flattenPredictiveScalingPredefinedMetricPairSpecification(v)} + } + + if v := apiObject.PredefinedScalingMetricSpecification; v != nil { + tfMap["predefined_scaling_metric_specification"] = []any{flattenPredictiveScalingPredefinedScalingMetricSpecification(v)} + } + + if apiObject.TargetValue != nil { + tfMap["target_value"] = flex.Float64ToStringValue(apiObject.TargetValue) + } + + return tfMap +} + +func flattenPredictiveScalingCustomizedMetricSpecification(apiObject *awstypes.PredictiveScalingCustomizedMetricSpecification) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.MetricDataQueries; v != nil { + tfMap["metric_data_query"] = flattenPredictiveScalingMetricDataQueries(v) + } + + return tfMap +} + +func flattenPredictiveScalingMetricDataQueries(apiObjects []awstypes.PredictiveScalingMetricDataQuery) []any { + if len(apiObjects) == 0 { + return nil + } + + var tfList []any + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPredictiveScalingMetricDataQuery(&apiObject)) + } - m["predefined_metric_type"] = string(cfg.PredefinedMetricType) + return tfList +} + +func flattenPredictiveScalingMetricDataQuery(apiObject *awstypes.PredictiveScalingMetricDataQuery) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.Expression; v != nil { + tfMap[names.AttrExpression] = aws.ToString(v) + } + + if v := apiObject.Id; v != nil { + tfMap[names.AttrID] = aws.ToString(v) + } + + if v := apiObject.Label; v != nil { + tfMap["label"] = aws.ToString(v) + } + + if v := apiObject.MetricStat; v != nil { + tfMap["metric_stat"] = []any{flattenPredictiveScalingMetricStat(v)} + } + + if v := apiObject.ReturnData; v != nil { + tfMap["return_data"] = aws.ToBool(v) + } + + return tfMap +} + +func flattenPredictiveScalingMetricStat(apiObject *awstypes.PredictiveScalingMetricStat) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.Metric; v != nil { + tfMap["metric"] = []any{flattenPredictiveScalingMetric(v)} + } + + if v := apiObject.Stat; v != nil { + tfMap["stat"] = aws.ToString(v) + } + + if v := apiObject.Unit; v != nil { + tfMap[names.AttrUnit] = aws.ToString(v) + } + + return tfMap +} + +func flattenPredictiveScalingMetric(apiObject *awstypes.PredictiveScalingMetric) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.Dimensions; v != nil { + tfMap["dimension"] = flattenPredictiveScalingMetricDimensions(v) + } + + if v := apiObject.MetricName; v != nil { + tfMap[names.AttrMetricName] = aws.ToString(v) + } + + if v := apiObject.Namespace; v != nil { + tfMap[names.AttrNamespace] = aws.ToString(v) + } + + return tfMap +} + +func flattenPredictiveScalingMetricDimensions(apiObjects []awstypes.PredictiveScalingMetricDimension) []any { + if len(apiObjects) == 0 { + return nil + } + + var tfList []any + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPredictiveScalingMetricDimension(&apiObject)) + } + + return tfList +} + +func flattenPredictiveScalingMetricDimension(apiObject *awstypes.PredictiveScalingMetricDimension) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.Name; v != nil { + tfMap[names.AttrName] = aws.ToString(v) + } + + if v := apiObject.Value; v != nil { + tfMap[names.AttrValue] = aws.ToString(v) + } + + return tfMap +} + +func flattenPredictiveScalingPredefinedLoadMetricSpecification(apiObject *awstypes.PredictiveScalingPredefinedLoadMetricSpecification) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.PredefinedMetricType; v != nil { + tfMap["predefined_metric_type"] = aws.ToString(v) + } + + if v := apiObject.ResourceLabel; v != nil { + tfMap["resource_label"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPredictiveScalingPredefinedMetricPairSpecification(apiObject *awstypes.PredictiveScalingPredefinedMetricPairSpecification) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.PredefinedMetricType; v != nil { + tfMap["predefined_metric_type"] = aws.ToString(v) + } + + if v := apiObject.ResourceLabel; v != nil { + tfMap["resource_label"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPredictiveScalingPredefinedScalingMetricSpecification(apiObject *awstypes.PredictiveScalingPredefinedScalingMetricSpecification) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.PredefinedMetricType; v != nil { + tfMap["predefined_metric_type"] = aws.ToString(v) + } - if v := cfg.ResourceLabel; v != nil { - m["resource_label"] = aws.ToString(v) + if v := apiObject.ResourceLabel; v != nil { + tfMap["resource_label"] = aws.ToString(v) } - return []any{m} + return tfMap } diff --git a/internal/service/appautoscaling/policy_test.go b/internal/service/appautoscaling/policy_test.go index 833313402a77..b33215f24488 100644 --- a/internal/service/appautoscaling/policy_test.go +++ b/internal/service/appautoscaling/policy_test.go @@ -22,7 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestValidatePolicyImportInput(t *testing.T) { +func TestPolicyParseImportID(t *testing.T) { t.Parallel() // lintignore:AWSAT003,AWSAT005 @@ -87,17 +87,17 @@ func TestValidatePolicyImportInput(t *testing.T) { } for _, tc := range testCases { - idParts, err := tfappautoscaling.ValidPolicyImportInput(tc.input) + idParts, err := tfappautoscaling.PolicyParseImportID(tc.input) if tc.errorExpected == false && err != nil { - t.Errorf("tfappautoscaling.ValidPolicyImportInput(%q): resulted in an unexpected error: %s", tc.input, err) + t.Errorf("tfappautoscaling.PolicyParseImportID(%q): resulted in an unexpected error: %s", tc.input, err) } if tc.errorExpected == true && err == nil { - t.Errorf("tfappautoscaling.ValidPolicyImportInput(%q): expected an error, but returned successfully", tc.input) + t.Errorf("tfappautoscaling.PolicyParseImportID(%q): expected an error, but returned successfully", tc.input) } if !reflect.DeepEqual(tc.expected, idParts) { - t.Errorf("tfappautoscaling.ValidPolicyImportInput(%q): expected %q, but got %q", tc.input, strings.Join(tc.expected, "/"), strings.Join(idParts, "/")) + t.Errorf("tfappautoscaling.PolicyParseImportID(%q): expected %q, but got %q", tc.input, strings.Join(tc.expected, "/"), strings.Join(idParts, "/")) } } } @@ -117,14 +117,16 @@ func TestAccAppAutoScalingPolicy_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "alarm_arns.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "policy_type", "StepScaling"), + resource.TestCheckResourceAttr(resourceName, "predictive_scaling_policy_configuration.#", "0"), resource.TestCheckResourceAttrPair(resourceName, names.AttrResourceID, appAutoscalingTargetResourceName, names.AttrResourceID), resource.TestCheckResourceAttrPair(resourceName, "scalable_dimension", appAutoscalingTargetResourceName, "scalable_dimension"), resource.TestCheckResourceAttrPair(resourceName, "service_namespace", appAutoscalingTargetResourceName, "service_namespace"), + resource.TestCheckResourceAttr(resourceName, "step_scaling_policy_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "step_scaling_policy_configuration.0.adjustment_type", "ChangeInCapacity"), resource.TestCheckResourceAttr(resourceName, "step_scaling_policy_configuration.0.cooldown", "60"), resource.TestCheckResourceAttr(resourceName, "step_scaling_policy_configuration.0.step_adjustment.#", "1"), @@ -133,6 +135,7 @@ func TestAccAppAutoScalingPolicy_basic(t *testing.T) { "metric_interval_lower_bound": "0", "metric_interval_upper_bound": "", }), + resource.TestCheckResourceAttr(resourceName, "target_tracking_scaling_policy_configuration.#", "0"), ), }, { @@ -500,64 +503,80 @@ func TestAccAppAutoScalingPolicy_TargetTrack_metricMath(t *testing.T) { }) } -func testAccPolicyConfig_targetTrackingMetricMath(rName string) string { - return acctest.ConfigCompose(testAccPolicyConfig_basic(rName), fmt.Sprintf(` -resource "aws_appautoscaling_policy" "metric_math_test" { - name = "%[1]s-tracking" - policy_type = "TargetTrackingScaling" - resource_id = aws_appautoscaling_target.test.resource_id - scalable_dimension = aws_appautoscaling_target.test.scalable_dimension - service_namespace = aws_appautoscaling_target.test.service_namespace +func TestAccAppAutoScalingPolicy_predictiveScalingSimple(t *testing.T) { + ctx := acctest.Context(t) + var policy awstypes.ScalingPolicy + appAutoscalingTargetResourceName := "aws_appautoscaling_target.test" + resourceName := "aws_appautoscaling_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - target_tracking_scaling_policy_configuration { - customized_metric_specification { - metrics { - id = "m1" - expression = "TIME_SERIES(20)" - return_data = false - } - metrics { - id = "m2" - metric_stat { - metric { - namespace = "foo" - metric_name = "bar" - } - unit = "Percent" - stat = "Sum" - } - return_data = false - } - metrics { - id = "m3" - metric_stat { - metric { - namespace = "foo" - metric_name = "bar" - dimensions { - name = "x" - value = "y" - } - dimensions { - name = "y" - value = "x" - } - } - unit = "Percent" - stat = "Sum" - } - return_data = false - } - metrics { - id = "e1" - expression = "m1 + m2 + m3" - return_data = true - } - } - target_value = 12.3 - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AppAutoScalingServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPolicyConfig_predictiveScalingSimple(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &policy), + resource.TestCheckResourceAttr(resourceName, "alarm_arns.#", "0"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "policy_type", "PredictiveScaling"), + resource.TestCheckResourceAttr(resourceName, "predictive_scaling_policy_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrResourceID, appAutoscalingTargetResourceName, names.AttrResourceID), + resource.TestCheckResourceAttrPair(resourceName, "scalable_dimension", appAutoscalingTargetResourceName, "scalable_dimension"), + resource.TestCheckResourceAttrPair(resourceName, "service_namespace", appAutoscalingTargetResourceName, "service_namespace"), + resource.TestCheckResourceAttr(resourceName, "step_scaling_policy_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_tracking_scaling_policy_configuration.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccPolicyImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + }, + }) } -`, rName)) + +func TestAccAppAutoScalingPolicy_predictiveScalingCustom(t *testing.T) { + ctx := acctest.Context(t) + var policy awstypes.ScalingPolicy + appAutoscalingTargetResourceName := "aws_appautoscaling_target.test" + resourceName := "aws_appautoscaling_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AppAutoScalingServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPolicyConfig_predictiveScalingCustom(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &policy), + resource.TestCheckResourceAttr(resourceName, "alarm_arns.#", "0"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "policy_type", "PredictiveScaling"), + resource.TestCheckResourceAttr(resourceName, "predictive_scaling_policy_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrResourceID, appAutoscalingTargetResourceName, names.AttrResourceID), + resource.TestCheckResourceAttrPair(resourceName, "scalable_dimension", appAutoscalingTargetResourceName, "scalable_dimension"), + resource.TestCheckResourceAttrPair(resourceName, "service_namespace", appAutoscalingTargetResourceName, "service_namespace"), + resource.TestCheckResourceAttr(resourceName, "step_scaling_policy_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_tracking_scaling_policy_configuration.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccPolicyImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + }, + }) } func testAccCheckPolicyExists(ctx context.Context, n string, v *awstypes.ScalingPolicy) resource.TestCheckFunc { @@ -607,6 +626,10 @@ func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { } } +func testAccPolicyImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return acctest.AttrsImportStateIdFunc(resourceName, "/", "service_namespace", names.AttrResourceID, "scalable_dimension", names.AttrName) +} + func testAccPolicyConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_ecs_cluster" "test" { @@ -633,7 +656,7 @@ resource "aws_ecs_service" "test" { cluster = aws_ecs_cluster.test.id deployment_maximum_percent = 200 deployment_minimum_healthy_percent = 50 - desired_count = 0 + desired_count = 2 name = %[1]q task_definition = aws_ecs_task_definition.test.arn } @@ -1242,19 +1265,223 @@ resource "aws_cloudwatch_metric_alarm" "test" { `, rName)) } -func testAccPolicyImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("Not found: %s", resourceName) - } +func testAccPolicyConfig_targetTrackingMetricMath(rName string) string { + return acctest.ConfigCompose(testAccPolicyConfig_basic(rName), fmt.Sprintf(` +resource "aws_appautoscaling_policy" "metric_math_test" { + name = "%[1]s-tracking" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.test.resource_id + scalable_dimension = aws_appautoscaling_target.test.scalable_dimension + service_namespace = aws_appautoscaling_target.test.service_namespace - id := fmt.Sprintf("%s/%s/%s/%s", - rs.Primary.Attributes["service_namespace"], - rs.Primary.Attributes[names.AttrResourceID], - rs.Primary.Attributes["scalable_dimension"], - rs.Primary.Attributes[names.AttrName]) + target_tracking_scaling_policy_configuration { + customized_metric_specification { + metrics { + id = "m1" + expression = "TIME_SERIES(20)" + return_data = false + } + metrics { + id = "m2" + metric_stat { + metric { + namespace = "foo" + metric_name = "bar" + } + unit = "Percent" + stat = "Sum" + } + return_data = false + } + metrics { + id = "m3" + metric_stat { + metric { + namespace = "foo" + metric_name = "bar" + dimensions { + name = "x" + value = "y" + } + dimensions { + name = "y" + value = "x" + } + } + unit = "Percent" + stat = "Sum" + } + return_data = false + } + metrics { + id = "e1" + expression = "m1 + m2 + m3" + return_data = true + } + } + target_value = 12.3 + } +} +`, rName)) +} - return id, nil - } +func testAccPolicyConfig_predictiveScalingSimple(rName string) string { + return fmt.Sprintf(` +resource "aws_ecs_cluster" "test" { + name = %[1]q +} + +resource "aws_ecs_task_definition" "test" { + family = %[1]q + + container_definitions = < 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets arcregionswitch service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(keyValueTags(ctx, tags)) + } +} + +// updateTags updates arcregionswitch service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *arcregionswitch.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*arcregionswitch.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.ARCRegionSwitch) + if len(removedTags) > 0 { + input := arcregionswitch.UntagResourceInput{ + Arn: aws.String(identifier), + ResourceTagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.ARCRegionSwitch) + if len(updatedTags) > 0 { + input := arcregionswitch.TagResourceInput{ + Arn: aws.String(identifier), + Tags: svcTags(updatedTags), + } + + _, err := conn.TagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + return nil +} + +// UpdateTags updates arcregionswitch service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).ARCRegionSwitchClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/athena/data_catalog.go b/internal/service/athena/data_catalog.go index 2a8a35ca4aba..7824b3a24410 100644 --- a/internal/service/athena/data_catalog.go +++ b/internal/service/athena/data_catalog.go @@ -198,6 +198,9 @@ func resourceDataCatalogDelete(ctx context.Context, d *schema.ResourceData, meta if errs.IsA[*types.ResourceNotFoundException](err) { return diags } + if errs.IsAErrorMessageContains[*types.InvalidRequestException](err, "was not found") { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Athena Data Catalog (%s): %s", d.Id(), err) diff --git a/internal/service/athena/database.go b/internal/service/athena/database.go index 3c2064f56126..5e7c89c273f7 100644 --- a/internal/service/athena/database.go +++ b/internal/service/athena/database.go @@ -108,6 +108,11 @@ func resourceDatabase() *schema.Resource { ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "workgroup": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -139,12 +144,16 @@ func resourceDatabaseCreate(ctx context.Context, d *schema.ResourceData, meta an queryString.WriteString(";") - input := &athena.StartQueryExecutionInput{ + input := athena.StartQueryExecutionInput{ QueryString: aws.String(queryString.String()), ResultConfiguration: expandResultConfiguration(d), } - output, err := conn.StartQueryExecution(ctx, input) + if v, ok := d.GetOk("workgroup"); ok { + input.WorkGroup = aws.String(v.(string)) + } + + output, err := conn.StartQueryExecution(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Athena Database (%s): %s", name, err) @@ -192,19 +201,28 @@ func resourceDatabaseDelete(ctx context.Context, d *schema.ResourceData, meta an } queryString += ";" - input := &athena.StartQueryExecutionInput{ + log.Printf("[DEBUG] Deleting Athena Database (%s)", d.Id()) + input := athena.StartQueryExecutionInput{ QueryString: aws.String(queryString), ResultConfiguration: expandResultConfiguration(d), } - - log.Printf("[DEBUG] Deleting Athena Database (%s)", d.Id()) - output, err := conn.StartQueryExecution(ctx, input) + if v, ok := d.GetOk("workgroup"); ok { + input.WorkGroup = aws.String(v.(string)) + } + output, err := conn.StartQueryExecution(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Athena Database (%s): %s", d.Id(), err) } - if err := executeAndExpectNoRows(ctx, conn, aws.ToString(output.QueryExecutionId)); err != nil { + err = executeAndExpectNoRows(ctx, conn, aws.ToString(output.QueryExecutionId)) + + // "reason: FAILED: SemanticException [Error 10072]: Database does not exist: ...". + if errs.Contains(err, "does not exist") { + return diags + } + + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Athena Database (%s): %s", d.Id(), err) } @@ -212,12 +230,12 @@ func resourceDatabaseDelete(ctx context.Context, d *schema.ResourceData, meta an } func findDatabaseByName(ctx context.Context, conn *athena.Client, name string) (*types.Database, error) { - input := &athena.GetDatabaseInput{ + input := athena.GetDatabaseInput{ CatalogName: aws.String("AwsDataCatalog"), DatabaseName: aws.String(name), } - output, err := conn.GetDatabase(ctx, input) + output, err := conn.GetDatabase(ctx, &input) if errs.IsAErrorMessageContains[*types.MetadataException](err, "not found") { return nil, &retry.NotFoundError{ diff --git a/internal/service/athena/database_test.go b/internal/service/athena/database_test.go index 84f8c6b1eb45..8facb035dd5f 100644 --- a/internal/service/athena/database_test.go +++ b/internal/service/athena/database_test.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/athena/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -304,7 +305,7 @@ func TestAccAthenaDatabase_unescaped_description(t *testing.T) { }) } -func TestAccAthenaDatabase_disppears(t *testing.T) { +func TestAccAthenaDatabase_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dbName := sdkacctest.RandString(8) @@ -328,6 +329,79 @@ func TestAccAthenaDatabase_disppears(t *testing.T) { }) } +func TestAccAthenaDatabase_withWorkgroup(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dbName := sdkacctest.RandString(8) + wgName := sdkacctest.RandString(8) + + resourceName := "aws_athena_database.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AthenaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDatabaseDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDatabaseConfig_withWorkgroup(rName, dbName, true, wgName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDatabaseExists(ctx, resourceName), + ), + }, + }, + }) +} + +func TestAccAthenaDatabase_upgradeV6_5_0(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dbName := sdkacctest.RandString(8) + resourceName := "aws_athena_database.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AthenaServiceID), + CheckDestroy: testAccCheckDatabaseDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.5.0", + }, + }, + Config: testAccDatabaseConfig_basic(rName, dbName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDatabaseExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccDatabaseConfig_basic(rName, dbName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDatabaseExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + func testAccCheckDatabaseDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).AthenaClient(ctx) @@ -604,3 +678,24 @@ resource "aws_athena_database" "test" { } `, rName, dbName, forceDestroy) } + +func testAccDatabaseConfig_withWorkgroup(rName string, dbName string, forceDestroy bool, wgName string) string { + return fmt.Sprintf(` +resource "aws_athena_workgroup" "test" { + name = %[4]q + force_destroy = %[3]t +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_athena_database" "test" { + name = %[2]q + bucket = aws_s3_bucket.test.bucket + force_destroy = %[3]t + workgroup = aws_athena_workgroup.test.id +} +`, rName, dbName, forceDestroy, wgName) +} diff --git a/internal/service/athena/service_endpoint_resolver_gen.go b/internal/service/athena/service_endpoint_resolver_gen.go index 6b32c8f767f8..c7d769384c00 100644 --- a/internal/service/athena/service_endpoint_resolver_gen.go +++ b/internal/service/athena/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params athena.EndpointP }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up athena endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up athena endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/athena/service_endpoints_gen_test.go b/internal/service/athena/service_endpoints_gen_test.go index e7e22a8d9c21..7b4d6e58a38a 100644 --- a/internal/service/athena/service_endpoints_gen_test.go +++ b/internal/service/athena/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/athena/service_package_gen.go b/internal/service/athena/service_package_gen.go index f325ce6cefd2..4eb841dcfe9e 100644 --- a/internal/service/athena/service_package_gen.go +++ b/internal/service/athena/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/athena" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -111,7 +110,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *athena.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/athena/sweep.go b/internal/service/athena/sweep.go index 9068be60dfd0..8cf6daf313fe 100644 --- a/internal/service/athena/sweep.go +++ b/internal/service/athena/sweep.go @@ -29,10 +29,7 @@ func RegisterSweepers() { }, }) - resource.AddTestSweepers("aws_athena_database", &resource.Sweeper{ - Name: "aws_athena_database", - F: sweepDatabases, - }) + awsv2.Register("aws_athena_database", sweepDatabases) resource.AddTestSweepers("aws_athena_workgroup", &resource.Sweeper{ Name: "aws_athena_workgroup", @@ -80,7 +77,7 @@ func sweepDataCatalogs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.AthenaClient(ctx) input := &athena.ListDataCatalogsInput{} @@ -103,7 +100,7 @@ func sweepDataCatalogs(region string) error { name := aws.ToString(v.CatalogName) if name == "AwsDataCatalog" { - log.Printf("[INFO] Skipping Athena Data Catalog %s", name) + log.Printf("[INFO] Skipping Athena Data Catalog %q", name) continue } @@ -124,27 +121,16 @@ func sweepDataCatalogs(region string) error { return nil } -func sweepDatabases(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } +func sweepDatabases(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.AthenaClient(ctx) - input := &athena.ListDataCatalogsInput{} - sweepResources := make([]sweep.Sweepable, 0) + var sweepResources []sweep.Sweepable - pages := athena.NewListDataCatalogsPaginator(conn, input) + input := athena.ListDataCatalogsInput{} + pages := athena.NewListDataCatalogsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping Athena Database sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing Athena Data Catalogs (%s): %w", region, err) + return nil, err } for _, v := range page.DataCatalogsSummary { @@ -158,7 +144,11 @@ func sweepDatabases(region string) error { page, err := pages.NextPage(ctx) if err != nil { - continue + tflog.Warn(ctx, "Skipping resource", map[string]any{ + "error": err.Error(), + "catalog_name": catalogName, + }) + break } for _, v := range page.DatabaseList { @@ -180,20 +170,14 @@ func sweepDatabases(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping Athena Databases (%s): %w", region, err) - } - - return nil + return sweepResources, nil } func sweepWorkGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.AthenaClient(ctx) input := &athena.ListWorkGroupsInput{} diff --git a/internal/service/athena/tags_gen.go b/internal/service/athena/tags_gen.go index 4c8cf9ad119f..4cc6bab92441 100644 --- a/internal/service/athena/tags_gen.go +++ b/internal/service/athena/tags_gen.go @@ -3,8 +3,8 @@ package athena import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/athena" awstypes "github.com/aws/aws-sdk-go-v2/service/athena/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *athena.Client, identifier string, optFn page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).AthenaClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *athena.Client, identifier string, old _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *athena.Client, identifier string, old _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/athena/workgroup.go b/internal/service/athena/workgroup.go index dc11683c8b69..0ccc8d9b2fbf 100644 --- a/internal/service/athena/workgroup.go +++ b/internal/service/athena/workgroup.go @@ -88,6 +88,24 @@ func resourceWorkGroup() *schema.Resource { Optional: true, ValidateFunc: verify.ValidARN, }, + "identity_center_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_identity_center": { + Type: schema.TypeBool, + Optional: true, + }, + "identity_center_instance_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, "publish_cloudwatch_metrics_enabled": { Type: schema.TypeBool, Optional: true, @@ -187,7 +205,7 @@ func resourceWorkGroupCreate(ctx context.Context, d *schema.ResourceData, meta a conn := meta.(*conns.AWSClient).AthenaClient(ctx) name := d.Get(names.AttrName).(string) - input := &athena.CreateWorkGroupInput{ + input := athena.CreateWorkGroupInput{ Configuration: expandWorkGroupConfiguration(d.Get(names.AttrConfiguration).([]any)), Name: aws.String(name), Tags: getTagsIn(ctx), @@ -197,7 +215,7 @@ func resourceWorkGroupCreate(ctx context.Context, d *schema.ResourceData, meta a input.Description = aws.String(v.(string)) } - _, err := conn.CreateWorkGroup(ctx, input) + _, err := conn.CreateWorkGroup(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Athena WorkGroup (%s): %s", name, err) @@ -206,12 +224,12 @@ func resourceWorkGroupCreate(ctx context.Context, d *schema.ResourceData, meta a d.SetId(name) if v := types.WorkGroupState(d.Get(names.AttrState).(string)); v == types.WorkGroupStateDisabled { - input := &athena.UpdateWorkGroupInput{ + input := athena.UpdateWorkGroupInput{ State: v, WorkGroup: aws.String(d.Id()), } - _, err := conn.UpdateWorkGroup(ctx, input) + _, err := conn.UpdateWorkGroup(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "disabling Athena WorkGroup (%s): %s", d.Id(), err) @@ -261,7 +279,7 @@ func resourceWorkGroupUpdate(ctx context.Context, d *schema.ResourceData, meta a conn := meta.(*conns.AWSClient).AthenaClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { - input := &athena.UpdateWorkGroupInput{ + input := athena.UpdateWorkGroupInput{ WorkGroup: aws.String(d.Get(names.AttrName).(string)), } @@ -277,7 +295,7 @@ func resourceWorkGroupUpdate(ctx context.Context, d *schema.ResourceData, meta a input.State = types.WorkGroupState(d.Get(names.AttrState).(string)) } - _, err := conn.UpdateWorkGroup(ctx, input) + _, err := conn.UpdateWorkGroup(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Athena WorkGroup (%s): %s", d.Id(), err) @@ -291,16 +309,14 @@ func resourceWorkGroupDelete(ctx context.Context, d *schema.ResourceData, meta a var diags diag.Diagnostics conn := meta.(*conns.AWSClient).AthenaClient(ctx) - input := &athena.DeleteWorkGroupInput{ + log.Printf("[DEBUG] Deleting Athena WorkGroup (%s)", d.Id()) + input := athena.DeleteWorkGroupInput{ WorkGroup: aws.String(d.Id()), } - if v, ok := d.GetOk(names.AttrForceDestroy); ok { input.RecursiveDeleteOption = aws.Bool(v.(bool)) } - - log.Printf("[DEBUG] Deleting Athena WorkGroup (%s)", d.Id()) - _, err := conn.DeleteWorkGroup(ctx, input) + _, err := conn.DeleteWorkGroup(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Athena WorkGroup (%s): %s", d.Id(), err) @@ -310,11 +326,11 @@ func resourceWorkGroupDelete(ctx context.Context, d *schema.ResourceData, meta a } func findWorkGroupByName(ctx context.Context, conn *athena.Client, name string) (*types.WorkGroup, error) { - input := &athena.GetWorkGroupInput{ + input := athena.GetWorkGroupInput{ WorkGroup: aws.String(name), } - output, err := conn.GetWorkGroup(ctx, input) + output, err := conn.GetWorkGroup(ctx, &input) if errs.IsAErrorMessageContains[*types.InvalidRequestException](err, "is not found") { return nil, &retry.NotFoundError{ @@ -359,6 +375,10 @@ func expandWorkGroupConfiguration(l []any) *types.WorkGroupConfiguration { configuration.ExecutionRole = aws.String(v) } + if v, ok := m["identity_center_configuration"]; ok { + configuration.IdentityCenterConfiguration = expandWorkGroupIdentityCenterConfiguration(v.([]any)) + } + if v, ok := m["publish_cloudwatch_metrics_enabled"].(bool); ok { configuration.PublishCloudWatchMetricsEnabled = aws.Bool(v) } @@ -432,6 +452,26 @@ func expandWorkGroupConfigurationUpdates(l []any) *types.WorkGroupConfigurationU return configurationUpdates } +func expandWorkGroupIdentityCenterConfiguration(l []any) *types.IdentityCenterConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]any) + + identityCenterConfiguration := &types.IdentityCenterConfiguration{} + + if v, ok := m["enable_identity_center"].(bool); ok { + identityCenterConfiguration.EnableIdentityCenter = aws.Bool(v) + } + + if v, ok := m["identity_center_instance_arn"].(string); ok && v != "" { + identityCenterConfiguration.IdentityCenterInstanceArn = aws.String(v) + } + + return identityCenterConfiguration +} + func expandWorkGroupResultConfiguration(l []any) *types.ResultConfiguration { if len(l) == 0 || l[0] == nil { return nil @@ -526,6 +566,7 @@ func flattenWorkGroupConfiguration(configuration *types.WorkGroupConfiguration) "enforce_workgroup_configuration": aws.ToBool(configuration.EnforceWorkGroupConfiguration), names.AttrEngineVersion: flattenWorkGroupEngineVersion(configuration.EngineVersion), "execution_role": aws.ToString(configuration.ExecutionRole), + "identity_center_configuration": flattenWorkGroupIdentityCenterConfiguration(configuration.IdentityCenterConfiguration), "publish_cloudwatch_metrics_enabled": aws.ToBool(configuration.PublishCloudWatchMetricsEnabled), "result_configuration": flattenWorkGroupResultConfiguration(configuration.ResultConfiguration), "requester_pays_enabled": aws.ToBool(configuration.RequesterPaysEnabled), @@ -547,6 +588,19 @@ func flattenWorkGroupEngineVersion(engineVersion *types.EngineVersion) []any { return []any{m} } +func flattenWorkGroupIdentityCenterConfiguration(identityCenterConfiguration *types.IdentityCenterConfiguration) []any { + if identityCenterConfiguration == nil { + return []any{} + } + + m := map[string]any{ + "enable_identity_center": aws.ToBool(identityCenterConfiguration.EnableIdentityCenter), + "identity_center_instance_arn": aws.ToString(identityCenterConfiguration.IdentityCenterInstanceArn), + } + + return []any{m} +} + func flattenWorkGroupResultConfiguration(resultConfiguration *types.ResultConfiguration) []any { if resultConfiguration == nil { return []any{} diff --git a/internal/service/athena/workgroup_test.go b/internal/service/athena/workgroup_test.go index 41fc51f5343c..153fe5677fff 100644 --- a/internal/service/athena/workgroup_test.go +++ b/internal/service/athena/workgroup_test.go @@ -45,6 +45,7 @@ func TestAccAthenaWorkGroup_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "configuration.0.engine_version.0.effective_engine_version"), resource.TestCheckResourceAttr(resourceName, "configuration.0.engine_version.0.selected_engine_version", "AUTO"), resource.TestCheckResourceAttr(resourceName, "configuration.0.execution_role", ""), + resource.TestCheckResourceAttr(resourceName, "configuration.0.identity_center_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.publish_cloudwatch_metrics_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "configuration.0.result_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "configuration.0.requester_pays_enabled", acctest.CtFalse), @@ -210,13 +211,13 @@ func TestAccAthenaWorkGroup_configurationEngineVersion(t *testing.T) { CheckDestroy: testAccCheckWorkGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccWorkGroupConfig_configurationEngineVersion(rName, "Athena engine version 2"), + Config: testAccWorkGroupConfig_configurationEngineVersion(rName, "Athena engine version 3"), Check: resource.ComposeTestCheckFunc( testAccCheckWorkGroupExists(ctx, resourceName, &workgroup1), resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.engine_version.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "configuration.0.engine_version.0.effective_engine_version", resourceName, "configuration.0.engine_version.0.selected_engine_version"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.engine_version.0.selected_engine_version", "Athena engine version 2"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.engine_version.0.selected_engine_version", "Athena engine version 3"), ), }, { @@ -698,7 +699,7 @@ func testAccCheckCreateNamedQuery(ctx context.Context, workGroup *types.WorkGrou } if _, err := conn.CreateNamedQuery(ctx, input); err != nil { - return fmt.Errorf("error creating Named Query (%s) on Workgroup (%s): %s", queryName, aws.ToString(workGroup.Name), err) + return fmt.Errorf("error creating Named Query (%s) on Workgroup (%s): %w", queryName, aws.ToString(workGroup.Name), err) } return nil diff --git a/internal/service/auditmanager/account_registration.go b/internal/service/auditmanager/account_registration.go index 7d374a65ac82..11f2f386c711 100644 --- a/internal/service/auditmanager/account_registration.go +++ b/internal/service/auditmanager/account_registration.go @@ -28,6 +28,7 @@ import ( // @SingletonIdentity(identityDuplicateAttributes="id") // @Testing(generator=false) // @Testing(hasExistsFunction=false, checkDestroyNoop=true) +// @Testing(preIdentityVersion="v5.100.0") func newAccountRegistrationResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &accountRegistrationResource{}, nil } diff --git a/internal/service/auditmanager/account_registration_identity_gen_test.go b/internal/service/auditmanager/account_registration_identity_gen_test.go index d8304e236ddb..067c3b9f93be 100644 --- a/internal/service/auditmanager/account_registration_identity_gen_test.go +++ b/internal/service/auditmanager/account_registration_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccAuditManagerAccountRegistration_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccAuditManagerAccountRegistration_Identity_Basic, - "ExistingResource": testAccAuditManagerAccountRegistration_Identity_ExistingResource, - "RegionOverride": testAccAuditManagerAccountRegistration_Identity_RegionOverride, + acctest.CtBasic: testAccAuditManagerAccountRegistration_Identity_Basic, + "ExistingResource": testAccAuditManagerAccountRegistration_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccAuditManagerAccountRegistration_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccAuditManagerAccountRegistration_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,9 +34,10 @@ func testAccAuditManagerAccountRegistration_IdentitySerial(t *testing.T) { func testAccAuditManagerAccountRegistration_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_auditmanager_account_registration.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -105,7 +108,7 @@ func testAccAuditManagerAccountRegistration_Identity_RegionOverride(t *testing.T resourceName := "aws_auditmanager_account_registration.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -209,3 +212,106 @@ func testAccAuditManagerAccountRegistration_Identity_RegionOverride(t *testing.T }, }) } + +func testAccAuditManagerAccountRegistration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_auditmanager_account_registration.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AuditManagerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AccountRegistration/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/AccountRegistration/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AccountRegistration/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccAuditManagerAccountRegistration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_auditmanager_account_registration.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AuditManagerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AccountRegistration/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AccountRegistration/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/auditmanager/account_registration_test.go b/internal/service/auditmanager/account_registration_test.go index 68c6b3433ae0..c77776ef086e 100644 --- a/internal/service/auditmanager/account_registration_test.go +++ b/internal/service/auditmanager/account_registration_test.go @@ -9,14 +9,8 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfauditmanager "github.com/hashicorp/terraform-provider-aws/internal/service/auditmanager" "github.com/hashicorp/terraform-provider-aws/names" @@ -147,78 +141,6 @@ func testAccCheckAccoountRegistrationExists(ctx context.Context, n string) resou } } -func testAccAuditManagerAccountRegistration_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_auditmanager_account_registration.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.AuditManagerEndpointID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.AuditManagerServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccAccountRegistrationConfig_basic(), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccAccountRegistrationConfig_basic(), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccAccountRegistrationConfig_basic(), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccAccountRegistrationConfig_basic() string { return ` resource "aws_auditmanager_account_registration" "test" {} diff --git a/internal/service/auditmanager/assessment.go b/internal/service/auditmanager/assessment.go index 4c025df4f851..29e317c28866 100644 --- a/internal/service/auditmanager/assessment.go +++ b/internal/service/auditmanager/assessment.go @@ -178,7 +178,7 @@ func (r *assessmentResource) Create(ctx context.Context, request resource.Create // Example: // ResourceNotFoundException: The operation tried to access a nonexistent resource. The resource // might not be specified correctly, or its status might not be active. Check and try again. - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.ResourceNotFoundException](ctx, iamPropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.ResourceNotFoundException](ctx, iamPropagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateAssessment(ctx, &input) }) diff --git a/internal/service/auditmanager/assessment_delegation.go b/internal/service/auditmanager/assessment_delegation.go index dfd3c0cf16c2..5a36a29bb6fa 100644 --- a/internal/service/auditmanager/assessment_delegation.go +++ b/internal/service/auditmanager/assessment_delegation.go @@ -130,7 +130,7 @@ func (r *assessmentDelegationResource) Create(ctx context.Context, request resou // Example: // ResourceNotFoundException: The operation tried to access a nonexistent resource. The resource // might not be specified correctly, or its status might not be active. Check and try again. - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.ResourceNotFoundException](ctx, iamPropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.ResourceNotFoundException](ctx, iamPropagationTimeout, func(ctx context.Context) (any, error) { return conn.BatchCreateDelegationByAssessment(ctx, &input) }) diff --git a/internal/service/auditmanager/assessment_report.go b/internal/service/auditmanager/assessment_report.go index 899b943e1391..a7950ab3e4ff 100644 --- a/internal/service/auditmanager/assessment_report.go +++ b/internal/service/auditmanager/assessment_report.go @@ -154,7 +154,7 @@ func (r *assessmentReportResource) Delete(ctx context.Context, request resource. const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.ValidationException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.ValidationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteAssessmentReport(ctx, &input) }) diff --git a/internal/service/auditmanager/service_endpoint_resolver_gen.go b/internal/service/auditmanager/service_endpoint_resolver_gen.go index 69264740bc5f..cd5983ab800b 100644 --- a/internal/service/auditmanager/service_endpoint_resolver_gen.go +++ b/internal/service/auditmanager/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params auditmanager.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up auditmanager endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up auditmanager endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/auditmanager/service_endpoints_gen_test.go b/internal/service/auditmanager/service_endpoints_gen_test.go index 9d6baf303631..ac0dc9a09320 100644 --- a/internal/service/auditmanager/service_endpoints_gen_test.go +++ b/internal/service/auditmanager/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/auditmanager/service_package_gen.go b/internal/service/auditmanager/service_package_gen.go index 7acbd3c8820b..729f96581f49 100644 --- a/internal/service/auditmanager/service_package_gen.go +++ b/internal/service/auditmanager/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/auditmanager" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -134,7 +133,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *auditmanager.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/auditmanager/tags_gen.go b/internal/service/auditmanager/tags_gen.go index ed7d55607502..5d19de697d15 100644 --- a/internal/service/auditmanager/tags_gen.go +++ b/internal/service/auditmanager/tags_gen.go @@ -3,8 +3,8 @@ package auditmanager import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/auditmanager" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *auditmanager.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).AuditManagerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *auditmanager.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *auditmanager.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/auditmanager/testdata/AccountRegistration/basic_v5.100.0/main_gen.tf b/internal/service/auditmanager/testdata/AccountRegistration/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..f6b76aae2845 --- /dev/null +++ b/internal/service/auditmanager/testdata/AccountRegistration/basic_v5.100.0/main_gen.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_auditmanager_account_registration" "test" {} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/auditmanager/testdata/AccountRegistration/basic_v6.0.0/main_gen.tf b/internal/service/auditmanager/testdata/AccountRegistration/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..d27b9d90b4f6 --- /dev/null +++ b/internal/service/auditmanager/testdata/AccountRegistration/basic_v6.0.0/main_gen.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_auditmanager_account_registration" "test" {} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/autoscaling/attachment.go b/internal/service/autoscaling/attachment.go index 786c180f768d..2493c9e18cf5 100644 --- a/internal/service/autoscaling/attachment.go +++ b/internal/service/autoscaling/attachment.go @@ -63,7 +63,7 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.AttachLoadBalancers(ctx, input) }, // ValidationError: Trying to update too many Load Balancers/Target Groups at once. The limit is 10 @@ -80,7 +80,7 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.AttachLoadBalancerTargetGroups(ctx, input) }, errCodeValidationError, "update too many") @@ -135,7 +135,7 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DetachLoadBalancers(ctx, input) }, errCodeValidationError, "update too many") @@ -155,7 +155,7 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DetachLoadBalancerTargetGroups(ctx, input) }, errCodeValidationError, "update too many") diff --git a/internal/service/autoscaling/group.go b/internal/service/autoscaling/group.go index 46d8a93eb37b..a78555b9fb23 100644 --- a/internal/service/autoscaling/group.go +++ b/internal/service/autoscaling/group.go @@ -1217,7 +1217,7 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateAutoScalingGroup(ctx, &inputCASG) }, // ValidationError: You must use a valid fully-formed launch template. Value (tf-acc-test-6643732652421074386) for parameter iamInstanceProfile.name is invalid. Invalid IAM Instance Profile name @@ -1235,7 +1235,7 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) timeout = 5 * time.Minute ) _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutLifecycleHook(ctx, input) }, errCodeValidationError, "Unable to publish test message to notification target") @@ -1562,7 +1562,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateAutoScalingGroup(ctx, &input) }, errCodeOperationError, errCodeUpdateASG, errCodeValidationError) @@ -1895,7 +1895,7 @@ func resourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta any) ForceDelete: aws.Bool(forceDeleteGroup), } _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteAutoScalingGroup(ctx, &input) }, errCodeResourceInUseFault, errCodeScalingActivityInProgressFault) @@ -1909,7 +1909,7 @@ func resourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta any) } _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return findGroupByName(ctx, conn, d.Id()) }) @@ -1989,7 +1989,7 @@ func deleteWarmPool(ctx context.Context, conn *autoscaling.Client, name string, ForceDelete: aws.Bool(force), } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteWarmPool(ctx, &input) }, errCodeResourceInUseFault, errCodeScalingActivityInProgressFault) @@ -4155,7 +4155,7 @@ func startInstanceRefresh(ctx context.Context, conn *autoscaling.Client, input * name := aws.ToString(input.AutoScalingGroupName) _, err := tfresource.RetryWhen(ctx, instanceRefreshStartedTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.StartInstanceRefresh(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/autoscaling/launch_configuration.go b/internal/service/autoscaling/launch_configuration.go index ff807fcd1a1e..370b7b8630fc 100644 --- a/internal/service/autoscaling/launch_configuration.go +++ b/internal/service/autoscaling/launch_configuration.go @@ -404,7 +404,7 @@ func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceDa // IAM profiles can take ~10 seconds to propagate in AWS: // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console _, err = tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return autoscalingconn.CreateLaunchConfiguration(ctx, &input) }, func(err error) (bool, error) { @@ -516,8 +516,8 @@ func resourceLaunchConfigurationDelete(ctx context.Context, d *schema.ResourceDa conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) log.Printf("[DEBUG] Deleting Auto Scaling Launch Configuration: %s", d.Id()) - _, err := tfresource.RetryWhenIsA[*awstypes.ResourceInUseFault](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.ResourceInUseFault](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.DeleteLaunchConfiguration(ctx, &autoscaling.DeleteLaunchConfigurationInput{ LaunchConfigurationName: aws.String(d.Id()), }) diff --git a/internal/service/autoscaling/lifecycle_hook.go b/internal/service/autoscaling/lifecycle_hook.go index d8ec7f2c060a..7f8e9acd6f3f 100644 --- a/internal/service/autoscaling/lifecycle_hook.go +++ b/internal/service/autoscaling/lifecycle_hook.go @@ -123,7 +123,7 @@ func resourceLifecycleHookPut(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 5*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutLifecycleHook(ctx, input) }, errCodeValidationError, "Unable to publish test message to notification target") diff --git a/internal/service/autoscaling/service_endpoint_resolver_gen.go b/internal/service/autoscaling/service_endpoint_resolver_gen.go index 2bc2ceb136e7..2886cc077671 100644 --- a/internal/service/autoscaling/service_endpoint_resolver_gen.go +++ b/internal/service/autoscaling/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params autoscaling.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up autoscaling endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up autoscaling endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/autoscaling/service_endpoints_gen_test.go b/internal/service/autoscaling/service_endpoints_gen_test.go index 055f00149020..3deb88771d06 100644 --- a/internal/service/autoscaling/service_endpoints_gen_test.go +++ b/internal/service/autoscaling/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/autoscaling/service_package_gen.go b/internal/service/autoscaling/service_package_gen.go index 34366bb379e0..1ea68d0a2410 100644 --- a/internal/service/autoscaling/service_package_gen.go +++ b/internal/service/autoscaling/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/autoscaling" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -131,7 +130,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *autoscaling.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/autoscaling/sweep.go b/internal/service/autoscaling/sweep.go index 4c8100c99b1a..a9199c089231 100644 --- a/internal/service/autoscaling/sweep.go +++ b/internal/service/autoscaling/sweep.go @@ -32,7 +32,7 @@ func sweepGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.AutoScalingClient(ctx) input := &autoscaling.DescribeAutoScalingGroupsInput{} @@ -75,7 +75,7 @@ func sweepLaunchConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.AutoScalingClient(ctx) input := &autoscaling.DescribeLaunchConfigurationsInput{} diff --git a/internal/service/autoscaling/tags_gen.go b/internal/service/autoscaling/tags_gen.go index 8e9ea5141486..0d62840d0841 100644 --- a/internal/service/autoscaling/tags_gen.go +++ b/internal/service/autoscaling/tags_gen.go @@ -3,8 +3,8 @@ package autoscaling import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/autoscaling" awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" @@ -41,13 +41,13 @@ func findTag(ctx context.Context, conn *autoscaling.Client, identifier, resource output, err := conn.DescribeTags(ctx, &input, optFns...) if err != nil { - return nil, err + return nil, smarterr.NewError(err) } listTags := keyValueTags(ctx, output.Tags, identifier, resourceType) if !listTags.KeyExists(key) { - return nil, tfresource.NewEmptyResultError(nil) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(nil)) } return listTags.KeyTagData(key), nil @@ -73,7 +73,7 @@ func listTags(ctx context.Context, conn *autoscaling.Client, identifier, resourc page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -88,7 +88,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res tags, err := listTags(ctx, meta.(*conns.AWSClient).AutoScalingClient(ctx), identifier, resourceType) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -259,7 +259,7 @@ func updateTags(ctx context.Context, conn *autoscaling.Client, identifier, resou _, err := conn.DeleteTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -273,7 +273,7 @@ func updateTags(ctx context.Context, conn *autoscaling.Client, identifier, resou _, err := conn.CreateOrUpdateTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/autoscalingplans/service_endpoint_resolver_gen.go b/internal/service/autoscalingplans/service_endpoint_resolver_gen.go index 321a5ef54002..11a3c107c151 100644 --- a/internal/service/autoscalingplans/service_endpoint_resolver_gen.go +++ b/internal/service/autoscalingplans/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params autoscalingplans }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up autoscalingplans endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up autoscalingplans endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/autoscalingplans/service_endpoints_gen_test.go b/internal/service/autoscalingplans/service_endpoints_gen_test.go index 50c583ae9764..0cd9335e2639 100644 --- a/internal/service/autoscalingplans/service_endpoints_gen_test.go +++ b/internal/service/autoscalingplans/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/autoscalingplans/service_package_gen.go b/internal/service/autoscalingplans/service_package_gen.go index e870e92da1e4..9d5c35603160 100644 --- a/internal/service/autoscalingplans/service_package_gen.go +++ b/internal/service/autoscalingplans/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/autoscalingplans" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -64,7 +63,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *autoscalingplans.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/backup/framework.go b/internal/service/backup/framework.go index b98749dce680..8c92ba27c81d 100644 --- a/internal/service/backup/framework.go +++ b/internal/service/backup/framework.go @@ -217,7 +217,7 @@ func resourceFrameworkUpdate(ctx context.Context, d *schema.ResourceData, meta a IdempotencyToken: aws.String(sdkid.UniqueId()), } - _, err := tfresource.RetryWhenIsA[*awstypes.ConflictException](ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.ConflictException](ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.UpdateFramework(ctx, input) }) @@ -238,7 +238,7 @@ func resourceFrameworkDelete(ctx context.Context, d *schema.ResourceData, meta a conn := meta.(*conns.AWSClient).BackupClient(ctx) log.Printf("[DEBUG] Deleting Backup Framework: %s", d.Id()) - _, err := tfresource.RetryWhenIsA[*awstypes.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteFramework(ctx, &backup.DeleteFrameworkInput{ FrameworkName: aws.String(d.Id()), }) diff --git a/internal/service/backup/framework_data_source_tags_gen_test.go b/internal/service/backup/framework_data_source_tags_gen_test.go index 6cd7a34dd791..631029e47a39 100644 --- a/internal/service/backup/framework_data_source_tags_gen_test.go +++ b/internal/service/backup/framework_data_source_tags_gen_test.go @@ -31,10 +31,11 @@ func testAccBackupFrameworkDataSource_tagsSerial(t *testing.T) { func testAccBackupFrameworkDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -59,10 +60,11 @@ func testAccBackupFrameworkDataSource_tags(t *testing.T) { func testAccBackupFrameworkDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -83,10 +85,11 @@ func testAccBackupFrameworkDataSource_tags_NullMap(t *testing.T) { func testAccBackupFrameworkDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -107,10 +110,11 @@ func testAccBackupFrameworkDataSource_tags_EmptyMap(t *testing.T) { func testAccBackupFrameworkDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -139,10 +143,11 @@ func testAccBackupFrameworkDataSource_tags_DefaultTags_nonOverlapping(t *testing func testAccBackupFrameworkDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -177,10 +182,11 @@ func testAccBackupFrameworkDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *test func testAccBackupFrameworkDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/backup/framework_tags_gen_test.go b/internal/service/backup/framework_tags_gen_test.go index 66652093f7b7..fc7f9e647bd1 100644 --- a/internal/service/backup/framework_tags_gen_test.go +++ b/internal/service/backup/framework_tags_gen_test.go @@ -47,11 +47,12 @@ func testAccBackupFramework_tagsSerial(t *testing.T) { func testAccBackupFramework_tags(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -229,11 +230,12 @@ func testAccBackupFramework_tags(t *testing.T) { func testAccBackupFramework_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -296,11 +298,12 @@ func testAccBackupFramework_tags_null(t *testing.T) { func testAccBackupFramework_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -359,11 +362,12 @@ func testAccBackupFramework_tags_EmptyMap(t *testing.T) { func testAccBackupFramework_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -440,11 +444,12 @@ func testAccBackupFramework_tags_AddOnUpdate(t *testing.T) { func testAccBackupFramework_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -529,11 +534,12 @@ func testAccBackupFramework_tags_EmptyTag_OnCreate(t *testing.T) { func testAccBackupFramework_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -666,11 +672,12 @@ func testAccBackupFramework_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccBackupFramework_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -755,11 +762,12 @@ func testAccBackupFramework_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccBackupFramework_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -936,11 +944,12 @@ func testAccBackupFramework_tags_DefaultTags_providerOnly(t *testing.T) { func testAccBackupFramework_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1096,11 +1105,12 @@ func testAccBackupFramework_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccBackupFramework_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1272,11 +1282,12 @@ func testAccBackupFramework_tags_DefaultTags_overlapping(t *testing.T) { func testAccBackupFramework_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1362,11 +1373,12 @@ func testAccBackupFramework_tags_DefaultTags_updateToProviderOnly(t *testing.T) func testAccBackupFramework_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1451,11 +1463,12 @@ func testAccBackupFramework_tags_DefaultTags_updateToResourceOnly(t *testing.T) func testAccBackupFramework_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1516,11 +1529,12 @@ func testAccBackupFramework_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccBackupFramework_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1573,11 +1587,12 @@ func testAccBackupFramework_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func testAccBackupFramework_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1635,11 +1650,12 @@ func testAccBackupFramework_tags_DefaultTags_nullOverlappingResourceTag(t *testi func testAccBackupFramework_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1697,11 +1713,12 @@ func testAccBackupFramework_tags_DefaultTags_nullNonOverlappingResourceTag(t *te func testAccBackupFramework_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1752,11 +1769,12 @@ func testAccBackupFramework_tags_ComputedTag_OnCreate(t *testing.T) { func testAccBackupFramework_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1849,11 +1867,12 @@ func testAccBackupFramework_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccBackupFramework_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -1936,11 +1955,12 @@ func testAccBackupFramework_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccBackupFramework_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), @@ -2098,11 +2118,12 @@ func testAccBackupFramework_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccBackupFramework_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeFrameworkOutput resourceName := "aws_backup_framework.test" rName := randomFrameworkName() - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckFrameworkDestroy(ctx), diff --git a/internal/service/backup/plan.go b/internal/service/backup/plan.go index 6660bd6d991d..35b48bb97ae9 100644 --- a/internal/service/backup/plan.go +++ b/internal/service/backup/plan.go @@ -287,7 +287,7 @@ func resourcePlanDelete(ctx context.Context, d *schema.ResourceData, meta any) d const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidRequestException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidRequestException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteBackupPlan(ctx, &backup.DeleteBackupPlanInput{ BackupPlanId: aws.String(d.Id()), }) diff --git a/internal/service/backup/plan_data_source_tags_gen_test.go b/internal/service/backup/plan_data_source_tags_gen_test.go index e9b378793d25..74029e367792 100644 --- a/internal/service/backup/plan_data_source_tags_gen_test.go +++ b/internal/service/backup/plan_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccBackupPlanDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccBackupPlanDataSource_tags(t *testing.T) { func TestAccBackupPlanDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccBackupPlanDataSource_tags_NullMap(t *testing.T) { func TestAccBackupPlanDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccBackupPlanDataSource_tags_EmptyMap(t *testing.T) { func TestAccBackupPlanDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccBackupPlanDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBackupPlanDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccBackupPlanDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccBackupPlanDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/backup/plan_tags_gen_test.go b/internal/service/backup/plan_tags_gen_test.go index 9eadddb8997c..c6f4d89c95a9 100644 --- a/internal/service/backup/plan_tags_gen_test.go +++ b/internal/service/backup/plan_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccBackupPlan_tags(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccBackupPlan_tags(t *testing.T) { func TestAccBackupPlan_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccBackupPlan_tags_null(t *testing.T) { func TestAccBackupPlan_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccBackupPlan_tags_EmptyMap(t *testing.T) { func TestAccBackupPlan_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccBackupPlan_tags_AddOnUpdate(t *testing.T) { func TestAccBackupPlan_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccBackupPlan_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBackupPlan_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccBackupPlan_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBackupPlan_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccBackupPlan_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccBackupPlan_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccBackupPlan_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccBackupPlan_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccBackupPlan_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccBackupPlan_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccBackupPlan_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccBackupPlan_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccBackupPlan_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccBackupPlan_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) func TestAccBackupPlan_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccBackupPlan_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing func TestAccBackupPlan_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccBackupPlan_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBackupPlan_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccBackupPlan_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccBackupPlan_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccBackupPlan_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccBackupPlan_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccBackupPlan_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccBackupPlan_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckPlanDestroy(ctx), diff --git a/internal/service/backup/region_settings_identity_gen_test.go b/internal/service/backup/region_settings_identity_gen_test.go index e77a33e925a1..8f9fd397a106 100644 --- a/internal/service/backup/region_settings_identity_gen_test.go +++ b/internal/service/backup/region_settings_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccBackupRegionSettings_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccBackupRegionSettings_Identity_Basic, - "ExistingResource": testAccBackupRegionSettings_Identity_ExistingResource, - "RegionOverride": testAccBackupRegionSettings_Identity_RegionOverride, + acctest.CtBasic: testAccBackupRegionSettings_Identity_Basic, + "ExistingResource": testAccBackupRegionSettings_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccBackupRegionSettings_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccBackupRegionSettings_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -37,7 +39,7 @@ func testAccBackupRegionSettings_Identity_Basic(t *testing.T) { var v backup.DescribeRegionSettingsOutput resourceName := "aws_backup_region_settings.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -114,7 +116,7 @@ func testAccBackupRegionSettings_Identity_RegionOverride(t *testing.T) { resourceName := "aws_backup_region_settings.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -221,3 +223,126 @@ func testAccBackupRegionSettings_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccBackupRegionSettings_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v backup.DescribeRegionSettingsOutput + resourceName := "aws_backup_region_settings.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RegionSettings/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegionSettingsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/RegionSettings/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegionSettingsExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RegionSettings/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccBackupRegionSettings_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v backup.DescribeRegionSettingsOutput + resourceName := "aws_backup_region_settings.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RegionSettings/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegionSettingsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RegionSettings/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegionSettingsExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/backup/region_settings_test.go b/internal/service/backup/region_settings_test.go index a400c5cdf94c..bac5d13b1497 100644 --- a/internal/service/backup/region_settings_test.go +++ b/internal/service/backup/region_settings_test.go @@ -9,14 +9,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfbackup "github.com/hashicorp/terraform-provider-aws/internal/service/backup" "github.com/hashicorp/terraform-provider-aws/names" @@ -124,89 +118,6 @@ func testAccRegionSettings_basic(t *testing.T) { }) } -func testAccBackupRegionSettings_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var settings backup.DescribeRegionSettingsOutput - resourceName := "aws_backup_region_settings.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccRegionSettingsConfig_1(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckRegionSettingsExists(ctx, resourceName, &settings), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccRegionSettingsConfig_1(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckRegionSettingsExists(ctx, resourceName, &settings), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRegionSettingsConfig_1(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckRegionSettingsExists(ctx, resourceName, &settings), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccCheckRegionSettingsExists(ctx context.Context, n string, v *backup.DescribeRegionSettingsOutput) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) diff --git a/internal/service/backup/report_plan_data_source_tags_gen_test.go b/internal/service/backup/report_plan_data_source_tags_gen_test.go index bb8426113134..aa38ff602ae0 100644 --- a/internal/service/backup/report_plan_data_source_tags_gen_test.go +++ b/internal/service/backup/report_plan_data_source_tags_gen_test.go @@ -16,10 +16,11 @@ import ( func TestAccBackupReportPlanDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -44,10 +45,11 @@ func TestAccBackupReportPlanDataSource_tags(t *testing.T) { func TestAccBackupReportPlanDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -68,10 +70,11 @@ func TestAccBackupReportPlanDataSource_tags_NullMap(t *testing.T) { func TestAccBackupReportPlanDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -92,10 +95,11 @@ func TestAccBackupReportPlanDataSource_tags_EmptyMap(t *testing.T) { func TestAccBackupReportPlanDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -124,10 +128,11 @@ func TestAccBackupReportPlanDataSource_tags_DefaultTags_nonOverlapping(t *testin func TestAccBackupReportPlanDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -162,10 +167,11 @@ func TestAccBackupReportPlanDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *tes func TestAccBackupReportPlanDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/backup/report_plan_tags_gen_test.go b/internal/service/backup/report_plan_tags_gen_test.go index 2c78a936ebe8..3cd2051137fb 100644 --- a/internal/service/backup/report_plan_tags_gen_test.go +++ b/internal/service/backup/report_plan_tags_gen_test.go @@ -18,11 +18,12 @@ import ( func TestAccBackupReportPlan_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -200,11 +201,12 @@ func TestAccBackupReportPlan_tags(t *testing.T) { func TestAccBackupReportPlan_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -267,11 +269,12 @@ func TestAccBackupReportPlan_tags_null(t *testing.T) { func TestAccBackupReportPlan_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -330,11 +333,12 @@ func TestAccBackupReportPlan_tags_EmptyMap(t *testing.T) { func TestAccBackupReportPlan_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -411,11 +415,12 @@ func TestAccBackupReportPlan_tags_AddOnUpdate(t *testing.T) { func TestAccBackupReportPlan_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -500,11 +505,12 @@ func TestAccBackupReportPlan_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBackupReportPlan_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -637,11 +643,12 @@ func TestAccBackupReportPlan_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBackupReportPlan_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -726,11 +733,12 @@ func TestAccBackupReportPlan_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccBackupReportPlan_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -907,11 +915,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccBackupReportPlan_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1067,11 +1076,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBackupReportPlan_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1243,11 +1253,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBackupReportPlan_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1333,11 +1344,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccBackupReportPlan_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1422,11 +1434,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccBackupReportPlan_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1487,11 +1500,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccBackupReportPlan_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1544,11 +1558,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccBackupReportPlan_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1606,11 +1621,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccBackupReportPlan_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1668,11 +1684,12 @@ func TestAccBackupReportPlan_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccBackupReportPlan_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1723,11 +1740,12 @@ func TestAccBackupReportPlan_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBackupReportPlan_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1820,11 +1838,12 @@ func TestAccBackupReportPlan_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccBackupReportPlan_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -1907,11 +1926,12 @@ func TestAccBackupReportPlan_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccBackupReportPlan_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), @@ -2069,11 +2089,12 @@ func TestAccBackupReportPlan_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccBackupReportPlan_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReportPlan resourceName := "aws_backup_report_plan.test" rName := randomReportPlanName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckReportPlanDestroy(ctx), diff --git a/internal/service/backup/selection.go b/internal/service/backup/selection.go index 443bdfde80af..add34fd5efd9 100644 --- a/internal/service/backup/selection.go +++ b/internal/service/backup/selection.go @@ -207,7 +207,7 @@ func resourceSelectionCreate(ctx context.Context, d *schema.ResourceData, meta a // Retry for IAM eventual consistency. outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateBackupSelection(ctx, input) }, func(err error) (bool, error) { @@ -235,7 +235,7 @@ func resourceSelectionCreate(ctx context.Context, d *schema.ResourceData, meta a // Maximum amount of time to wait for Backup changes to propagate. timeout = 2 * time.Minute ) - _, err = tfresource.RetryWhenNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findSelectionByTwoPartKey(ctx, conn, planID, d.Id()) }) diff --git a/internal/service/backup/service_endpoint_resolver_gen.go b/internal/service/backup/service_endpoint_resolver_gen.go index 3c3173a9d902..421d439a68aa 100644 --- a/internal/service/backup/service_endpoint_resolver_gen.go +++ b/internal/service/backup/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params backup.EndpointP }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up backup endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up backup endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/backup/service_endpoints_gen_test.go b/internal/service/backup/service_endpoints_gen_test.go index 263bb7de1e82..c559e8cfc27a 100644 --- a/internal/service/backup/service_endpoints_gen_test.go +++ b/internal/service/backup/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/backup/service_package_gen.go b/internal/service/backup/service_package_gen.go index f880b9659815..40f8a103338f 100644 --- a/internal/service/backup/service_package_gen.go +++ b/internal/service/backup/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -204,7 +203,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *backup.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/backup/sweep.go b/internal/service/backup/sweep.go index 3b0e6790ff37..19ed24fa8b3f 100644 --- a/internal/service/backup/sweep.go +++ b/internal/service/backup/sweep.go @@ -84,7 +84,7 @@ func sweepFrameworks(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListFrameworksInput{} @@ -123,7 +123,7 @@ func sweepPlans(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListBackupPlansInput{} @@ -169,7 +169,7 @@ func sweepSelections(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListBackupPlansInput{} @@ -231,7 +231,7 @@ func sweepReportPlans(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListReportPlansInput{} @@ -270,7 +270,7 @@ func sweepRestoreTestingPlans(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListRestoreTestingPlansInput{} @@ -306,7 +306,7 @@ func sweepRestoreTestingSelections(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListRestoreTestingPlansInput{} @@ -359,7 +359,7 @@ func sweepVaultLockConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListBackupVaultsInput{} @@ -398,7 +398,7 @@ func sweepVaultNotifications(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BackupClient(ctx) input := &backup.ListBackupVaultsInput{} diff --git a/internal/service/backup/tags_gen.go b/internal/service/backup/tags_gen.go index 4bbdaeac64a5..4aef25b828a6 100644 --- a/internal/service/backup/tags_gen.go +++ b/internal/service/backup/tags_gen.go @@ -3,9 +3,9 @@ package backup import ( "context" - "fmt" "maps" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *backup.Client, identifier string, optFn page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } maps.Copy(output, page.Tags) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).BackupClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -107,7 +107,7 @@ func updateTags(ctx context.Context, conn *backup.Client, identifier string, old _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -122,7 +122,7 @@ func updateTags(ctx context.Context, conn *backup.Client, identifier string, old _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/backup/testdata/RegionSettings/basic_v5.100.0/main_gen.tf b/internal/service/backup/testdata/RegionSettings/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..03bde84a7027 --- /dev/null +++ b/internal/service/backup/testdata/RegionSettings/basic_v5.100.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_backup_region_settings" "test" { + resource_type_opt_in_preference = { + "Aurora" = true + "CloudFormation" = true + "DocumentDB" = true + "DSQL" = true + "DynamoDB" = true + "EBS" = true + "EC2" = true + "EFS" = true + "FSx" = true + "Neptune" = true + "RDS" = true + "Redshift" = true + "Redshift Serverless" = true + "S3" = true + "SAP HANA on Amazon EC2" = true + "Storage Gateway" = true + "Timestream" = true + "VirtualMachine" = true + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/backup/testdata/RegionSettings/basic_v6.0.0/main_gen.tf b/internal/service/backup/testdata/RegionSettings/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..6c417d328872 --- /dev/null +++ b/internal/service/backup/testdata/RegionSettings/basic_v6.0.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_backup_region_settings" "test" { + resource_type_opt_in_preference = { + "Aurora" = true + "CloudFormation" = true + "DocumentDB" = true + "DSQL" = true + "DynamoDB" = true + "EBS" = true + "EC2" = true + "EFS" = true + "FSx" = true + "Neptune" = true + "RDS" = true + "Redshift" = true + "Redshift Serverless" = true + "S3" = true + "SAP HANA on Amazon EC2" = true + "Storage Gateway" = true + "Timestream" = true + "VirtualMachine" = true + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/backup/vault_data_source_tags_gen_test.go b/internal/service/backup/vault_data_source_tags_gen_test.go index f7c732f605f1..16f69d35db4e 100644 --- a/internal/service/backup/vault_data_source_tags_gen_test.go +++ b/internal/service/backup/vault_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccBackupVaultDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccBackupVaultDataSource_tags(t *testing.T) { func TestAccBackupVaultDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccBackupVaultDataSource_tags_NullMap(t *testing.T) { func TestAccBackupVaultDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccBackupVaultDataSource_tags_EmptyMap(t *testing.T) { func TestAccBackupVaultDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccBackupVaultDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccBackupVaultDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccBackupVaultDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing. func TestAccBackupVaultDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/backup/vault_policy.go b/internal/service/backup/vault_policy.go index 0b026154cd27..28e242434ec2 100644 --- a/internal/service/backup/vault_policy.go +++ b/internal/service/backup/vault_policy.go @@ -67,7 +67,7 @@ func resourceVaultPolicyPut(ctx context.Context, d *schema.ResourceData, meta an } _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutBackupVaultAccessPolicy(ctx, input) }, errCodeInvalidParameterValueException, "Provided principal is not valid", diff --git a/internal/service/backup/vault_tags_gen_test.go b/internal/service/backup/vault_tags_gen_test.go index e9e61af20ca3..0ca92b105673 100644 --- a/internal/service/backup/vault_tags_gen_test.go +++ b/internal/service/backup/vault_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccBackupVault_tags(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccBackupVault_tags(t *testing.T) { func TestAccBackupVault_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccBackupVault_tags_null(t *testing.T) { func TestAccBackupVault_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccBackupVault_tags_EmptyMap(t *testing.T) { func TestAccBackupVault_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccBackupVault_tags_AddOnUpdate(t *testing.T) { func TestAccBackupVault_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccBackupVault_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBackupVault_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccBackupVault_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBackupVault_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccBackupVault_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccBackupVault_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccBackupVault_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccBackupVault_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccBackupVault_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccBackupVault_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccBackupVault_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccBackupVault_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccBackupVault_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccBackupVault_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T func TestAccBackupVault_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccBackupVault_tags_DefaultTags_nullNonOverlappingResourceTag(t *testin func TestAccBackupVault_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccBackupVault_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBackupVault_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccBackupVault_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccBackupVault_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccBackupVault_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccBackupVault_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccBackupVault_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccBackupVault_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v backup.DescribeBackupVaultOutput resourceName := "aws_backup_vault.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BackupServiceID), CheckDestroy: testAccCheckVaultDestroy(ctx), diff --git a/internal/service/batch/compute_environment.go b/internal/service/batch/compute_environment.go index 25224cd4b4a2..d31ad7546ba0 100644 --- a/internal/service/batch/compute_environment.go +++ b/internal/service/batch/compute_environment.go @@ -81,7 +81,7 @@ func resourceComputeEnvironment() *schema.Resource { "compute_resources": { Type: schema.TypeList, Optional: true, - ForceNew: true, + Computed: true, MinItems: 0, MaxItems: 1, Elem: &schema.Resource{ @@ -115,6 +115,11 @@ func resourceComputeEnvironment() *schema.Resource { Computed: true, ValidateFunc: validation.StringLenBetween(1, 256), }, + "image_kubernetes_version": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, "image_type": { Type: schema.TypeString, Optional: true, @@ -264,17 +269,21 @@ func resourceComputeEnvironment() *schema.Resource { "update_policy": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ + // https://docs.aws.amazon.com/batch/latest/APIReference/API_UpdatePolicy.html Schema: map[string]*schema.Schema{ "job_execution_timeout_minutes": { Type: schema.TypeInt, - Required: true, + Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 360), }, "terminate_jobs_on_update": { Type: schema.TypeBool, - Required: true, + Optional: true, + Computed: true, }, }, }, @@ -571,7 +580,7 @@ func resourceComputeEnvironmentDelete(ctx context.Context, d *schema.ResourceDat return diags } -func resourceComputeEnvironmentCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { +func resourceComputeEnvironmentCustomizeDiff(ctx context.Context, diff *schema.ResourceDiff, _ any) error { if computeEnvironmentType := strings.ToUpper(diff.Get(names.AttrType).(string)); computeEnvironmentType == string(awstypes.CETypeUnmanaged) { // UNMANAGED compute environments can have no compute_resources configured. if v, ok := diff.GetOk("compute_resources"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { @@ -621,6 +630,12 @@ func resourceComputeEnvironmentCustomizeDiff(_ context.Context, diff *schema.Res } } + if diff.HasChange("compute_resources.0.ec2_configuration.0.image_kubernetes_version") { + if err := diff.ForceNew("compute_resources.0.ec2_configuration.0.image_kubernetes_version"); err != nil { + return err + } + } + if diff.HasChange("compute_resources.0.ec2_configuration.0.image_type") { if err := diff.ForceNew("compute_resources.0.ec2_configuration.0.image_type"); err != nil { return err @@ -657,6 +672,19 @@ func resourceComputeEnvironmentCustomizeDiff(_ context.Context, diff *schema.Res } } + // If the launch template version is unknown, set new value to ForceNew. + if v := diff.GetRawPlan().GetAttr("compute_resources"); v.IsKnown() && v.LengthInt() == 1 { + if v := v.AsValueSlice()[0].GetAttr(names.AttrLaunchTemplate); v.IsKnown() && v.LengthInt() == 1 { + if v := v.AsValueSlice()[0].GetAttr(names.AttrVersion); !v.IsKnown() { + out := expandComputeResource(ctx, diff.Get("compute_resources").([]any)[0].(map[string]any)) + out.LaunchTemplate.Version = aws.String(" ") // set version to a new empty value to trigger a replacement + if err := diff.SetNew("compute_resources", []any{flattenComputeResource(ctx, out)}); err != nil { + return err + } + } + } + } + if diff.HasChange("compute_resources.0.launch_template.0.launch_template_id") { if err := diff.ForceNew("compute_resources.0.launch_template.0.launch_template_id"); err != nil { return err @@ -865,7 +893,11 @@ func isUpdatableAllocationStrategyDiff(diff *schema.ResourceDiff) bool { } func isUpdatableAllocationStrategy(allocationStrategy awstypes.CRAllocationStrategy) bool { - return allocationStrategy == awstypes.CRAllocationStrategyBestFitProgressive || allocationStrategy == awstypes.CRAllocationStrategySpotCapacityOptimized + switch allocationStrategy { + case awstypes.CRAllocationStrategyBestFitProgressive, awstypes.CRAllocationStrategySpotCapacityOptimized, awstypes.CRAllocationStrategySpotPriceCapacityOptimized: + return true + } + return false } func expandComputeResource(ctx context.Context, tfMap map[string]any) *awstypes.ComputeResource { @@ -983,6 +1015,10 @@ func expandEC2Configuration(tfMap map[string]any) *awstypes.Ec2Configuration { apiObject.ImageIdOverride = aws.String(v) } + if v, ok := tfMap["image_kubernetes_version"].(string); ok && v != "" { + apiObject.ImageKubernetesVersion = aws.String(v) + } + if v, ok := tfMap["image_type"].(string); ok && v != "" { apiObject.ImageType = aws.String(v) } @@ -1196,6 +1232,10 @@ func flattenEC2Configuration(apiObject *awstypes.Ec2Configuration) map[string]an tfMap["image_id_override"] = aws.ToString(v) } + if v := apiObject.ImageKubernetesVersion; v != nil { + tfMap["image_kubernetes_version"] = aws.ToString(v) + } + if v := apiObject.ImageType; v != nil { tfMap["image_type"] = aws.ToString(v) } diff --git a/internal/service/batch/compute_environment_data_source_tags_gen_test.go b/internal/service/batch/compute_environment_data_source_tags_gen_test.go index 6903bdb97238..462996a71fa9 100644 --- a/internal/service/batch/compute_environment_data_source_tags_gen_test.go +++ b/internal/service/batch/compute_environment_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccBatchComputeEnvironmentDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccBatchComputeEnvironmentDataSource_tags(t *testing.T) { func TestAccBatchComputeEnvironmentDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccBatchComputeEnvironmentDataSource_tags_NullMap(t *testing.T) { func TestAccBatchComputeEnvironmentDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccBatchComputeEnvironmentDataSource_tags_EmptyMap(t *testing.T) { func TestAccBatchComputeEnvironmentDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccBatchComputeEnvironmentDataSource_tags_DefaultTags_nonOverlapping(t func TestAccBatchComputeEnvironmentDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccBatchComputeEnvironmentDataSource_tags_IgnoreTags_Overlap_DefaultTag func TestAccBatchComputeEnvironmentDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/batch/compute_environment_tags_gen_test.go b/internal/service/batch/compute_environment_tags_gen_test.go index 8e858d67c007..0115131c0748 100644 --- a/internal/service/batch/compute_environment_tags_gen_test.go +++ b/internal/service/batch/compute_environment_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/batch/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccBatchComputeEnvironment_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccBatchComputeEnvironment_tags(t *testing.T) { func TestAccBatchComputeEnvironment_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccBatchComputeEnvironment_tags_null(t *testing.T) { func TestAccBatchComputeEnvironment_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccBatchComputeEnvironment_tags_EmptyMap(t *testing.T) { func TestAccBatchComputeEnvironment_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccBatchComputeEnvironment_tags_AddOnUpdate(t *testing.T) { func TestAccBatchComputeEnvironment_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccBatchComputeEnvironment_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBatchComputeEnvironment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccBatchComputeEnvironment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBatchComputeEnvironment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccBatchComputeEnvironment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) func TestAccBatchComputeEnvironment_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_providerOnly(t *testing.T) func TestAccBatchComputeEnvironment_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccBatchComputeEnvironment_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBatchComputeEnvironment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_updateToProviderOnly(t *tes func TestAccBatchComputeEnvironment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_updateToResourceOnly(t *tes func TestAccBatchComputeEnvironment_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_emptyResourceTag(t *testing func TestAccBatchComputeEnvironment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_emptyProviderOnlyTag(t *tes func TestAccBatchComputeEnvironment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_nullOverlappingResourceTag( func TestAccBatchComputeEnvironment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccBatchComputeEnvironment_tags_DefaultTags_nullNonOverlappingResourceT func TestAccBatchComputeEnvironment_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccBatchComputeEnvironment_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBatchComputeEnvironment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccBatchComputeEnvironment_tags_ComputedTag_OnUpdate_Add(t *testing.T) func TestAccBatchComputeEnvironment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccBatchComputeEnvironment_tags_ComputedTag_OnUpdate_Replace(t *testing func TestAccBatchComputeEnvironment_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccBatchComputeEnvironment_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccBatchComputeEnvironment_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ComputeEnvironmentDetail resourceName := "aws_batch_compute_environment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), diff --git a/internal/service/batch/compute_environment_test.go b/internal/service/batch/compute_environment_test.go index 829d6145c220..cc426e6f1f82 100644 --- a/internal/service/batch/compute_environment_test.go +++ b/internal/service/batch/compute_environment_test.go @@ -41,6 +41,18 @@ func TestExpandEC2ConfigurationsUpdate(t *testing.T) { }, }, }, + { + flattened: []any{ + map[string]any{ + "image_kubernetes_version": "1.31", + }, + }, + expected: []awstypes.Ec2Configuration{ + { + ImageKubernetesVersion: aws.String("1.31"), + }, + }, + }, { flattened: []any{ map[string]any{ @@ -68,14 +80,16 @@ func TestExpandEC2ConfigurationsUpdate(t *testing.T) { { flattened: []any{ map[string]any{ - "image_id_override": "ami-deadbeef", - "image_type": "ECS_AL1", + "image_id_override": "ami-deadbeef", + "image_kubernetes_version": "1.31", + "image_type": "ECS_AL1", }, }, expected: []awstypes.Ec2Configuration{ { - ImageIdOverride: aws.String("ami-deadbeef"), - ImageType: aws.String("ECS_AL1"), + ImageIdOverride: aws.String("ami-deadbeef"), + ImageKubernetesVersion: aws.String("1.31"), + ImageType: aws.String("ECS_AL1"), }, }, }, @@ -1332,6 +1346,7 @@ func TestAccBatchComputeEnvironment_ec2Configuration(t *testing.T) { resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "optimal"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "2"), resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.0.image_id_override"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_kubernetes_version", "1.31"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.1.image_id_override"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.1.image_type", "ECS_AL2_NVIDIA"), @@ -1594,6 +1609,46 @@ func TestAccBatchComputeEnvironment_updateLaunchTemplate(t *testing.T) { }) } +// https://github.com/hashicorp/terraform-provider-aws/issues/39470. +func TestAccBatchComputeEnvironment_updateLaunchTemplateID(t *testing.T) { + ctx := acctest.Context(t) + var ce awstypes.ComputeEnvironmentDetail + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_batch_compute_environment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccComputeEnvironmentConfig_launchTemplateWithVersion(rName, "foo"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + // Swap to version 2 of the launch template + { + Config: testAccComputeEnvironmentConfig_launchTemplateWithVersion(rName, "bar"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + func TestAccBatchComputeEnvironment_UpdateSecurityGroupsAndSubnets_fargate(t *testing.T) { ctx := acctest.Context(t) var ce awstypes.ComputeEnvironmentDetail @@ -1782,6 +1837,7 @@ func TestAccBatchComputeEnvironment_updateEC2(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "compute_resources.0.desired_vcpus", "0"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.0.image_id_override"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_kubernetes_version", "1.31"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.ec2_key_pair", ec2KeyPairResourceName, names.AttrID), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.image_id", ""), @@ -1874,6 +1930,47 @@ func TestAccBatchComputeEnvironment_createEC2WithoutComputeResources(t *testing. }) } +func TestAccBatchComputeEnvironment_updateInstanceTypeWithAllocationStrategy(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resourceName := "aws_batch_compute_environment.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { // set up a basic compute environment with the SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategy + Config: testAccComputeEnvironmentConfig_spotCapacityOptimizedAllocationInstanceTypeUpdate(rName, publicKey, "m7i"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "compute_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.allocation_strategy", "SPOT_PRICE_CAPACITY_OPTIMIZED"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "m7i"), + ), + }, + { + Config: testAccComputeEnvironmentConfig_spotCapacityOptimizedAllocationInstanceTypeUpdate(rName, publicKey, "r7i"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "compute_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.allocation_strategy", "SPOT_PRICE_CAPACITY_OPTIMIZED"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "r7i"), + ), + }, + }}) +} + func testAccCheckComputeEnvironmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).BatchClient(ctx) @@ -2983,6 +3080,65 @@ resource "aws_batch_compute_environment" "test" { `, rName, version)) } +func testAccComputeEnvironmentConfig_launchTemplateWithVersion(rName, userDataSeed string) string { + return acctest.ConfigCompose(testAccComputeEnvironmentConfig_base(rName), fmt.Sprintf(` +locals { + user_data = <<-EOF +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +echo hello +echo %[2]q +--//-- +EOF +} + +resource "aws_launch_template" "test" { + name = %[1]q + user_data = base64encode(local.user_data) +} + +resource "aws_batch_compute_environment" "test" { + name = %[1]q + + compute_resources { + allocation_strategy = "SPOT_PRICE_CAPACITY_OPTIMIZED" + instance_role = aws_iam_instance_profile.ecs_instance.arn + instance_type = [ + "c4.large", + ] + + launch_template { + launch_template_id = aws_launch_template.test.id + version = aws_launch_template.test.latest_version + } + + max_vcpus = 16 + min_vcpus = 0 + security_group_ids = [ + aws_security_group.test.id + ] + spot_iam_fleet_role = aws_iam_role.ec2_spot_fleet.arn + subnets = [ + aws_subnet.test.id + ] + type = "SPOT" + } + + service_role = aws_iam_role.batch_service.arn + type = "MANAGED" + depends_on = [aws_iam_role_policy_attachment.batch_service] +} +`, rName, userDataSeed)) +} + func testAccComputeEnvironmentConfig_ec2Configuration(rName string) string { return acctest.ConfigCompose(testAccComputeEnvironmentConfig_base(rName), acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), fmt.Sprintf(` resource "aws_batch_compute_environment" "test" { @@ -2993,8 +3149,9 @@ resource "aws_batch_compute_environment" "test" { instance_type = ["optimal"] ec2_configuration { - image_id_override = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - image_type = "ECS_AL2" + image_id_override = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + image_kubernetes_version = "1.31" + image_type = "ECS_AL2" } ec2_configuration { @@ -3171,8 +3328,9 @@ resource "aws_batch_compute_environment" "test" { ec2_key_pair = aws_key_pair.test.id instance_role = aws_iam_instance_profile.ecs_instance_2.arn ec2_configuration { - image_id_override = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - image_type = "ECS_AL2" + image_id_override = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + image_kubernetes_version = "1.31" + image_type = "ECS_AL2" } launch_template { launch_template_id = aws_launch_template.test.id @@ -3199,3 +3357,44 @@ resource "aws_batch_compute_environment" "test" { } `, rName)) } + +func testAccComputeEnvironmentConfig_spotCapacityOptimizedAllocationInstanceTypeUpdate(rName, publicKey, instanceType string) string { + return acctest.ConfigCompose( + testAccComputeEnvironmentConfig_base(rName), + testAccComputeEnvironmentConfig_baseForUpdates(rName, publicKey), + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + fmt.Sprintf(` +resource "aws_batch_compute_environment" "test" { + name = %[1]q + + compute_resources { + allocation_strategy = "SPOT_PRICE_CAPACITY_OPTIMIZED" + bid_percentage = 100 + ec2_key_pair = aws_key_pair.test.id + instance_role = aws_iam_instance_profile.ecs_instance_2.arn + ec2_configuration { + image_id_override = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + image_type = "ECS_AL2" + } + launch_template { + launch_template_id = aws_launch_template.test.id + version = "$Latest" + } + instance_type = [ + %[2]q, + ] + max_vcpus = 16 + security_group_ids = [ + aws_security_group.test_2.id + ] + spot_iam_fleet_role = aws_iam_role.ec2_spot_fleet.arn + subnets = [ + aws_subnet.test_2.id + ] + type = "SPOT" + } + + type = "MANAGED" +} +`, rName, instanceType)) +} diff --git a/internal/service/batch/job_definition.go b/internal/service/batch/job_definition.go index c765d4f6bbaf..1c2fb0c07d44 100644 --- a/internal/service/batch/job_definition.go +++ b/internal/service/batch/job_definition.go @@ -36,9 +36,10 @@ import ( // @Tags(identifierAttribute="arn") // @ArnIdentity // @MutableIdentity -// @WrappedImport(false) +// @CustomImport // @ArnFormat("job-definition/{name}:{revision}") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/batch/types;types.JobDefinition") +// @Testing(preIdentityVersion="6.4.0") func resourceJobDefinition() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceJobDefinitionCreate, @@ -46,10 +47,11 @@ func resourceJobDefinition() *schema.Resource { UpdateWithoutTimeout: resourceJobDefinitionUpdate, DeleteWithoutTimeout: resourceJobDefinitionDelete, - // TODO: handle default values on Import Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, _ any) ([]*schema.ResourceData, error) { - if err := importer.RegionalARN(ctx, rd, names.AttrARN, []string{names.AttrID}); err != nil { + identity := importer.IdentitySpec(ctx) + + if err := importer.RegionalARN(ctx, rd, identity); err != nil { return nil, err } @@ -1108,7 +1110,7 @@ func validJobContainerProperties(v any, k string) (ws []string, errors []error) value := v.(string) _, err := expandContainerProperties(value) if err != nil { - errors = append(errors, fmt.Errorf("AWS Batch Job container_properties is invalid: %s", err)) + errors = append(errors, fmt.Errorf("AWS Batch Job container_properties is invalid: %w", err)) } return } @@ -1117,7 +1119,7 @@ func validJobECSProperties(v any, k string) (ws []string, errors []error) { value := v.(string) _, err := expandECSProperties(value) if err != nil { - errors = append(errors, fmt.Errorf("AWS Batch Job ecs_properties is invalid: %s", err)) + errors = append(errors, fmt.Errorf("AWS Batch Job ecs_properties is invalid: %w", err)) } return } @@ -1126,7 +1128,7 @@ func validJobNodeProperties(v any, k string) (ws []string, errors []error) { value := v.(string) _, err := expandJobNodeProperties(value) if err != nil { - errors = append(errors, fmt.Errorf("AWS Batch Job node_properties is invalid: %s", err)) + errors = append(errors, fmt.Errorf("AWS Batch Job node_properties is invalid: %w", err)) } return } diff --git a/internal/service/batch/job_definition_data_source_tags_gen_test.go b/internal/service/batch/job_definition_data_source_tags_gen_test.go index 31d51bad85e1..7e39f437f536 100644 --- a/internal/service/batch/job_definition_data_source_tags_gen_test.go +++ b/internal/service/batch/job_definition_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccBatchJobDefinitionDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccBatchJobDefinitionDataSource_tags(t *testing.T) { func TestAccBatchJobDefinitionDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccBatchJobDefinitionDataSource_tags_NullMap(t *testing.T) { func TestAccBatchJobDefinitionDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccBatchJobDefinitionDataSource_tags_EmptyMap(t *testing.T) { func TestAccBatchJobDefinitionDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccBatchJobDefinitionDataSource_tags_DefaultTags_nonOverlapping(t *test func TestAccBatchJobDefinitionDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccBatchJobDefinitionDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccBatchJobDefinitionDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/batch/job_definition_identity_gen_test.go b/internal/service/batch/job_definition_identity_gen_test.go index 2a34792189aa..c0cc9bddaf10 100644 --- a/internal/service/batch/job_definition_identity_gen_test.go +++ b/internal/service/batch/job_definition_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccBatchJobDefinition_Identity_Basic(t *testing.T) { resourceName := "aws_batch_job_definition.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -49,7 +49,10 @@ func TestAccBatchJobDefinition_Identity_Basic(t *testing.T) { tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "batch", "job-definition/{name}:{revision}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), - // Resource Identity not supported for Mutable Identity + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -84,7 +87,22 @@ func TestAccBatchJobDefinition_Identity_Basic(t *testing.T) { }, // Step 4: Import block with Resource Identity - // Resource Identity not supported for Mutable Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, }, }) } @@ -95,7 +113,7 @@ func TestAccBatchJobDefinition_Identity_RegionOverride(t *testing.T) { resourceName := "aws_batch_job_definition.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -115,7 +133,10 @@ func TestAccBatchJobDefinition_Identity_RegionOverride(t *testing.T) { tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "batch", "job-definition/{name}:{revision}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), - // Resource Identity not supported for Mutable Identity + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -134,7 +155,17 @@ func TestAccBatchJobDefinition_Identity_RegionOverride(t *testing.T) { }, // Step 3: Import command without appended "@" - // Importing without appended "@" for Mutable Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Step 4: Import block with Import ID and appended "@" { @@ -157,10 +188,156 @@ func TestAccBatchJobDefinition_Identity_RegionOverride(t *testing.T) { }, // Step 5: Import block with Import ID and no appended "@" - // Importing without appended "@" for Mutable Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, // Step 6: Import block with Resource Identity - // Resource Identity not supported for Mutable Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccBatchJobDefinition_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.JobDefinition + resourceName := "aws_batch_job_definition.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccBatchJobDefinition_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.JobDefinition + resourceName := "aws_batch_job_definition.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobDefinition/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, }, }) } diff --git a/internal/service/batch/job_definition_tags_gen_test.go b/internal/service/batch/job_definition_tags_gen_test.go index e468a64d9c04..4760e55fd824 100644 --- a/internal/service/batch/job_definition_tags_gen_test.go +++ b/internal/service/batch/job_definition_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/batch/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccBatchJobDefinition_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccBatchJobDefinition_tags(t *testing.T) { func TestAccBatchJobDefinition_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccBatchJobDefinition_tags_null(t *testing.T) { func TestAccBatchJobDefinition_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccBatchJobDefinition_tags_EmptyMap(t *testing.T) { func TestAccBatchJobDefinition_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccBatchJobDefinition_tags_AddOnUpdate(t *testing.T) { func TestAccBatchJobDefinition_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccBatchJobDefinition_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBatchJobDefinition_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccBatchJobDefinition_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBatchJobDefinition_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccBatchJobDefinition_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccBatchJobDefinition_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccBatchJobDefinition_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBatchJobDefinition_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBatchJobDefinition_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_updateToProviderOnly(t *testing. func TestAccBatchJobDefinition_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_updateToResourceOnly(t *testing. func TestAccBatchJobDefinition_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccBatchJobDefinition_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_emptyProviderOnlyTag(t *testing. func TestAccBatchJobDefinition_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_nullOverlappingResourceTag(t *te func TestAccBatchJobDefinition_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccBatchJobDefinition_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccBatchJobDefinition_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccBatchJobDefinition_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBatchJobDefinition_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccBatchJobDefinition_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccBatchJobDefinition_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccBatchJobDefinition_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccBatchJobDefinition_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccBatchJobDefinition_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccBatchJobDefinition_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobDefinition resourceName := "aws_batch_job_definition.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), diff --git a/internal/service/batch/job_definition_test.go b/internal/service/batch/job_definition_test.go index cfcb0dcd2fd5..b30d45c4787d 100644 --- a/internal/service/batch/job_definition_test.go +++ b/internal/service/batch/job_definition_test.go @@ -21,7 +21,9 @@ import ( "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfbatch "github.com/hashicorp/terraform-provider-aws/internal/service/batch" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -81,6 +83,67 @@ func TestAccBatchJobDefinition_basic(t *testing.T) { }) } +func TestAccBatchJobDefinition_Identity_ChangeOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + var jd awstypes.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Create + { + Config: testAccJobDefinitionConfig_containerProperties(rName, "-la"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &jd), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("revision"), knownvalue.Int32Exact(1)), + tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "batch", "job-definition/{name}:{revision}"), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Update + { + Config: testAccJobDefinitionConfig_containerProperties(rName, "-lah"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &jd), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("revision"), knownvalue.Int32Exact(2)), + tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "batch", "job-definition/{name}:{revision}"), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + func TestAccBatchJobDefinition_attributes(t *testing.T) { ctx := acctest.Context(t) var jd awstypes.JobDefinition @@ -458,14 +521,16 @@ func TestAccBatchJobDefinition_ContainerProperties_minorUpdate(t *testing.T) { Config: testAccJobDefinitionConfig_containerProperties(rName, "-la"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckJobDefinitionExists(ctx, resourceName, &jd), - acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "batch", "job-definition/{name}:{revision}"), resource.TestCheckResourceAttr(resourceName, "revision", "1"), + acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "batch", "job-definition/{name}:{revision}"), + resource.TestCheckResourceAttr(resourceName, "revision", "1"), ), }, { Config: testAccJobDefinitionConfig_containerProperties(rName, "-lah"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckJobDefinitionExists(ctx, resourceName, &jd), - acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "batch", "job-definition/{name}:{revision}"), testAccCheckJobDefinitionPreviousDeregistered(ctx, resourceName), + acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "batch", "job-definition/{name}:{revision}"), + testAccCheckJobDefinitionPreviousDeregistered(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "revision", "2"), ), }, diff --git a/internal/service/batch/job_queue.go b/internal/service/batch/job_queue.go index 9fb43983a01a..016528d222bb 100644 --- a/internal/service/batch/job_queue.go +++ b/internal/service/batch/job_queue.go @@ -7,6 +7,8 @@ import ( "context" "errors" "fmt" + "iter" + "slices" "time" "github.com/YakDriver/regexache" @@ -16,6 +18,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -23,6 +29,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -30,6 +37,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresource" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -40,6 +49,7 @@ import ( // @ArnIdentity(identityDuplicateAttributes="id") // @ArnFormat("job-queue/{name}") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/batch/types;types.JobQueueDetail") +// @Testing(preIdentityVersion="v5.100.0") func newJobQueueResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := jobQueueResource{} @@ -50,10 +60,18 @@ func newJobQueueResource(_ context.Context) (resource.ResourceWithConfigure, err return &r, nil } +// @FrameworkListResource("aws_batch_job_queue") +func jobQueueResourceAsListResource() list.ListResourceWithConfigure { + return &jobQueueResource{} +} + +var _ list.ListResource = &jobQueueResource{} + type jobQueueResource struct { framework.ResourceWithModel[jobQueueResourceModel] framework.WithTimeouts framework.WithImportByIdentity + framework.WithList } func (r *jobQueueResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { @@ -212,7 +230,6 @@ func (r *jobQueueResource) Read(ctx context.Context, request resource.ReadReques return } - // Set attributes for import. response.Diagnostics.Append(fwflex.Flatten(ctx, jobQueue, &data, fwflex.WithFieldNamePrefix("JobQueue"))...) if response.Diagnostics.HasError() { return @@ -368,11 +385,11 @@ func (r *jobQueueResource) UpgradeState(ctx context.Context) map[int64]resource. } func findJobQueueByID(ctx context.Context, conn *batch.Client, id string) (*awstypes.JobQueueDetail, error) { - input := &batch.DescribeJobQueuesInput{ + input := batch.DescribeJobQueuesInput{ JobQueues: []string{id}, } - output, err := findJobQueue(ctx, conn, input) + output, err := findJobQueue(ctx, conn, &input) if err != nil { return nil, err @@ -380,8 +397,7 @@ func findJobQueueByID(ctx context.Context, conn *batch.Client, id string) (*awst if status := output.Status; status == awstypes.JQStatusDeleted { return nil, &retry.NotFoundError{ - Message: string(status), - LastRequest: input, + Message: string(status), } } @@ -389,30 +405,7 @@ func findJobQueueByID(ctx context.Context, conn *batch.Client, id string) (*awst } func findJobQueue(ctx context.Context, conn *batch.Client, input *batch.DescribeJobQueuesInput) (*awstypes.JobQueueDetail, error) { - output, err := findJobQueues(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSingleValueResult(output) -} - -func findJobQueues(ctx context.Context, conn *batch.Client, input *batch.DescribeJobQueuesInput) ([]awstypes.JobQueueDetail, error) { - var output []awstypes.JobQueueDetail - - pages := batch.NewDescribeJobQueuesPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - - if err != nil { - return nil, err - } - - output = append(output, page.JobQueues...) - } - - return output, nil + return tfresource.AssertSingleValueResultIterErr(listJobQueues(ctx, conn, input)) } func statusJobQueue(ctx context.Context, conn *batch.Client, id string) retry.StateRefreshFunc { @@ -524,3 +517,152 @@ type jobStateTimeLimitActionModel struct { Reason types.String `tfsdk:"reason"` State fwtypes.StringEnum[awstypes.JobStateTimeLimitActionsState] `tfsdk:"state"` } + +// DescribeJobQueues is an "All-Or-Some" call. +func listJobQueues(ctx context.Context, conn *batch.Client, input *batch.DescribeJobQueuesInput) iter.Seq2[awstypes.JobQueueDetail, error] { + return func(yield func(awstypes.JobQueueDetail, error) bool) { + pages := batch.NewDescribeJobQueuesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + yield(awstypes.JobQueueDetail{}, fmt.Errorf("listing Batch Job Queues: %w", err)) + return + } + + for _, jobQueue := range page.JobQueues { + if !yield(jobQueue, nil) { + return + } + } + } + } +} + +func (r jobQueueResource) ListResourceConfigSchema(_ context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{}, + } +} + +func (r jobQueueResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + var query jobQueueListModel + + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + awsClient := r.Meta() + conn := awsClient.BatchClient(ctx) + + resultInterceptors := r.ResultInterceptors() + + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + var input batch.DescribeJobQueuesInput + for jobQueue, err := range listJobQueues(ctx, conn, &input) { + if err != nil { + result = list.ListResult{ + Diagnostics: diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + fmt.Sprintf("Error: %s", err), + ), + }, + } + yield(result) + return + } + + ctx = tftags.NewContext(ctx, awsClient.DefaultTagsConfig(ctx), awsClient.IgnoreTagsConfig(ctx)) + + var data jobQueueResourceModel + + timeoutsType, _ := result.Resource.Schema.TypeAtPath(ctx, path.Root(names.AttrTimeouts)) + obj, _ := newNullObject(timeoutsType) + data.Timeouts.Object = obj + + typ, _ := result.Resource.Schema.TypeAtPath(ctx, path.Root(names.AttrTags)) + tagsType := typ.(attr.TypeWithElementType) + data.Tags.MapValue = basetypes.NewMapNull(tagsType.ElementType()) + data.TagsAll.MapValue = basetypes.NewMapNull(tagsType.ElementType()) + + params := listresource.InterceptorParams{ + C: awsClient, + Result: &result, + } + + params.When = listresource.Before + for interceptor := range slices.Values(resultInterceptors) { + d := interceptor.Read(ctx, params) // nosemgrep:ci.semgrep.migrate.direct-CRUD-calls + result.Diagnostics.Append(d...) + if d.HasError() { + result = list.ListResult{Diagnostics: result.Diagnostics} + yield(result) + return + } + } + + if diags := fwflex.Flatten(ctx, jobQueue, &data, fwflex.WithFieldNamePrefix("JobQueue")); diags.HasError() { + result.Diagnostics.Append(diags...) + } + + setTagsOut(ctx, jobQueue.Tags) + + if diags := result.Resource.Set(ctx, &data); diags.HasError() { + result.Diagnostics.Append(diags...) + return + } + + result.DisplayName = data.JobQueueName.ValueString() + + params.When = listresource.After + for interceptor := range tfslices.BackwardValues(resultInterceptors) { + d := interceptor.Read(ctx, params) // nosemgrep:ci.semgrep.migrate.direct-CRUD-calls + result.Diagnostics.Append(d...) + if d.HasError() { + result = list.ListResult{Diagnostics: result.Diagnostics} + yield(result) + return + } + } + + if result.Diagnostics.HasError() { + result = list.ListResult{Diagnostics: result.Diagnostics} + yield(result) + return + } + + if !yield(result) { + return + } + } + } +} + +type jobQueueListModel struct { + // TODO: factor out + Region types.String `tfsdk:"region"` +} + +func newNullObject(typ attr.Type) (obj basetypes.ObjectValue, diags diag.Diagnostics) { + i, ok := typ.(attr.TypeWithAttributeTypes) + if !ok { + diags.AddError( + "Internal Error", + "An unexpected error occurred. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expected value type to implement attr.TypeWithAttributeTypes, got: %T", typ), + ) + return + } + + attrTypes := i.AttributeTypes() + + obj = basetypes.NewObjectNull(attrTypes) + + return obj, diags +} diff --git a/internal/service/batch/job_queue_data_source_tags_gen_test.go b/internal/service/batch/job_queue_data_source_tags_gen_test.go index e2ec8c94bf31..6b4100139650 100644 --- a/internal/service/batch/job_queue_data_source_tags_gen_test.go +++ b/internal/service/batch/job_queue_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccBatchJobQueueDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccBatchJobQueueDataSource_tags(t *testing.T) { func TestAccBatchJobQueueDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccBatchJobQueueDataSource_tags_NullMap(t *testing.T) { func TestAccBatchJobQueueDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccBatchJobQueueDataSource_tags_EmptyMap(t *testing.T) { func TestAccBatchJobQueueDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccBatchJobQueueDataSource_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccBatchJobQueueDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccBatchJobQueueDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccBatchJobQueueDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/batch/job_queue_identity_gen_test.go b/internal/service/batch/job_queue_identity_gen_test.go index 58d273d80179..2b0afd0391ec 100644 --- a/internal/service/batch/job_queue_identity_gen_test.go +++ b/internal/service/batch/job_queue_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccBatchJobQueue_Identity_Basic(t *testing.T) { resourceName := "aws_batch_job_queue.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -49,6 +49,9 @@ func TestAccBatchJobQueue_Identity_Basic(t *testing.T) { tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "batch", "job-queue/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -110,7 +113,7 @@ func TestAccBatchJobQueue_Identity_RegionOverride(t *testing.T) { resourceName := "aws_batch_job_queue.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -130,6 +133,9 @@ func TestAccBatchJobQueue_Identity_RegionOverride(t *testing.T) { tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "batch", "job-queue/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -221,3 +227,129 @@ func TestAccBatchJobQueue_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccBatchJobQueue_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.JobQueueDetail + resourceName := "aws_batch_job_queue.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobQueueDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobQueueExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobQueueExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccBatchJobQueue_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.JobQueueDetail + resourceName := "aws_batch_job_queue.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobQueueDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobQueueExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/batch/job_queue_list_test.go b/internal/service/batch/job_queue_list_test.go new file mode 100644 index 000000000000..f23b1c159ef3 --- /dev/null +++ b/internal/service/batch/job_queue_list_test.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBatchJobQueue_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_batch_job_queue.test[0]" + resourceName2 := "aws_batch_job_queue.test[1]" + resourceName3 := "aws_batch_job_queue.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobQueueDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-0"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-1"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-2"), + }), + }, + }, + }, + }) +} + +func TestAccBatchJobQueue_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_batch_job_queue.test[0]" + resourceName2 := "aws_batch_job_queue.test[1]" + resourceName3 := "aws_batch_job_queue.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobQueueDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-0"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-1"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-2"), + }), + }, + }, + }, + }) +} diff --git a/internal/service/batch/job_queue_tags_gen_test.go b/internal/service/batch/job_queue_tags_gen_test.go index a40be55c1e6f..4bce22d4714b 100644 --- a/internal/service/batch/job_queue_tags_gen_test.go +++ b/internal/service/batch/job_queue_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/batch/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccBatchJobQueue_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccBatchJobQueue_tags(t *testing.T) { func TestAccBatchJobQueue_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccBatchJobQueue_tags_null(t *testing.T) { func TestAccBatchJobQueue_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccBatchJobQueue_tags_EmptyMap(t *testing.T) { func TestAccBatchJobQueue_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccBatchJobQueue_tags_AddOnUpdate(t *testing.T) { func TestAccBatchJobQueue_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccBatchJobQueue_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBatchJobQueue_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccBatchJobQueue_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBatchJobQueue_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccBatchJobQueue_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccBatchJobQueue_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_nullOverlappingResourceTag(t *testing func TestAccBatchJobQueue_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccBatchJobQueue_tags_DefaultTags_nullNonOverlappingResourceTag(t *test func TestAccBatchJobQueue_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccBatchJobQueue_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBatchJobQueue_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccBatchJobQueue_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccBatchJobQueue_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccBatchJobQueue_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccBatchJobQueue_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccBatchJobQueue_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccBatchJobQueue_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.JobQueueDetail resourceName := "aws_batch_job_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), CheckDestroy: testAccCheckJobQueueDestroy(ctx), diff --git a/internal/service/batch/job_queue_test.go b/internal/service/batch/job_queue_test.go index 6722c97f344c..b3d0ebaf2e4f 100644 --- a/internal/service/batch/job_queue_test.go +++ b/internal/service/batch/job_queue_test.go @@ -14,12 +14,8 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfbatch "github.com/hashicorp/terraform-provider-aws/internal/service/batch" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -382,70 +378,6 @@ func TestAccBatchJobQueue_upgradeComputeEnvironments(t *testing.T) { }) } -func TestAccBatchJobQueue_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_batch_job_queue.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), - CheckDestroy: testAccCheckJobQueueDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccJobQueueConfig_stateV5(rName, string(awstypes.JQStateEnabled)), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccJobQueueConfig_state(rName, string(awstypes.JQStateEnabled)), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccJobQueueConfig_state(rName, string(awstypes.JQStateEnabled)), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckJobQueueExists(ctx context.Context, n string, v *awstypes.JobQueueDetail) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -519,7 +451,7 @@ func testAccCheckJobQueueComputeEnvironmentOrderUpdate(ctx context.Context, jobQ _, err := conn.UpdateJobQueue(ctx, input) if err != nil { - return fmt.Errorf("error updating Batch Job Queue (%s): %s", name, err) + return fmt.Errorf("error updating Batch Job Queue (%s): %w", name, err) } return nil @@ -1037,123 +969,3 @@ resource "aws_batch_job_queue" "test" { } `, rName)) } - -// V5-compatible configuration functions for identity tests -func testAccJobQueueConfig_baseV5(rName string) string { - return fmt.Sprintf(` -data "aws_partition" "current" {} - -resource "aws_iam_role" "test" { - name = %[1]q - assume_role_policy = <, with role: . Verify // the IAM role permissions are correct. - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.PutModelInvocationLoggingConfiguration(ctx, input) }, "Failed to validate permissions for log group", diff --git a/internal/service/bedrock/model_invocation_logging_configuration_identity_gen_test.go b/internal/service/bedrock/model_invocation_logging_configuration_identity_gen_test.go index 637a63619739..9139c65f5f1b 100644 --- a/internal/service/bedrock/model_invocation_logging_configuration_identity_gen_test.go +++ b/internal/service/bedrock/model_invocation_logging_configuration_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccBedrockModelInvocationLoggingConfiguration_IdentitySerial(t *testing t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccBedrockModelInvocationLoggingConfiguration_Identity_Basic, - "ExistingResource": testAccBedrockModelInvocationLoggingConfiguration_Identity_ExistingResource, - "RegionOverride": testAccBedrockModelInvocationLoggingConfiguration_Identity_RegionOverride, + acctest.CtBasic: testAccBedrockModelInvocationLoggingConfiguration_Identity_Basic, + "ExistingResource": testAccBedrockModelInvocationLoggingConfiguration_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccBedrockModelInvocationLoggingConfiguration_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccBedrockModelInvocationLoggingConfiguration_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -33,10 +35,11 @@ func testAccBedrockModelInvocationLoggingConfiguration_IdentitySerial(t *testing func testAccBedrockModelInvocationLoggingConfiguration_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_bedrock_model_invocation_logging_configuration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -119,7 +122,7 @@ func testAccBedrockModelInvocationLoggingConfiguration_Identity_RegionOverride(t resourceName := "aws_bedrock_model_invocation_logging_configuration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -229,3 +232,127 @@ func testAccBedrockModelInvocationLoggingConfiguration_Identity_RegionOverride(t }, }) } + +func testAccBedrockModelInvocationLoggingConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_bedrock_model_invocation_logging_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), + CheckDestroy: testAccCheckModelInvocationLoggingConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ModelInvocationLoggingConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckModelInvocationLoggingConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/ModelInvocationLoggingConfiguration/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckModelInvocationLoggingConfigurationExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ModelInvocationLoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccBedrockModelInvocationLoggingConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_bedrock_model_invocation_logging_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), + CheckDestroy: testAccCheckModelInvocationLoggingConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ModelInvocationLoggingConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckModelInvocationLoggingConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ModelInvocationLoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/bedrock/model_invocation_logging_configuration_test.go b/internal/service/bedrock/model_invocation_logging_configuration_test.go index 99dc8d8366bb..bf07002341ff 100644 --- a/internal/service/bedrock/model_invocation_logging_configuration_test.go +++ b/internal/service/bedrock/model_invocation_logging_configuration_test.go @@ -10,14 +10,9 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfbedrock "github.com/hashicorp/terraform-provider-aws/internal/service/bedrock" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -209,88 +204,6 @@ func testAccCheckModelInvocationLoggingConfigurationDestroy(ctx context.Context) } } -func testAccBedrockModelInvocationLoggingConfiguration_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_bedrock_model_invocation_logging_configuration.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), - CheckDestroy: testAccCheckModelInvocationLoggingConfigurationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccModelInvocationLoggingConfigurationConfig_basicV5(rName, "null", "null", "null", "null"), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckModelInvocationLoggingConfigurationExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccModelInvocationLoggingConfigurationConfig_basic(rName, "null", "null", "null", "null"), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckModelInvocationLoggingConfigurationExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccModelInvocationLoggingConfigurationConfig_basic(rName, "null", "null", "null", "null"), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckModelInvocationLoggingConfigurationExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccModelInvocationLoggingConfigurationConfig_basic(rName, embeddingDataDeliveryEnabled, imageDataDeliveryEnabled, textDataDeliveryEnabled, videoDataDeliveryEnabled string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -413,117 +326,3 @@ resource "aws_bedrock_model_invocation_logging_configuration" "test" { } `, rName, embeddingDataDeliveryEnabled, imageDataDeliveryEnabled, textDataDeliveryEnabled, videoDataDeliveryEnabled) } - -func testAccModelInvocationLoggingConfigurationConfig_basicV5(rName, embeddingDataDeliveryEnabled, imageDataDeliveryEnabled, textDataDeliveryEnabled, videoDataDeliveryEnabled string) string { - return fmt.Sprintf(` -data "aws_caller_identity" "current" {} -data "aws_region" "current" {} -data "aws_partition" "current" {} - -resource "aws_s3_bucket" "test" { - bucket = %[1]q - force_destroy = true - - lifecycle { - ignore_changes = ["tags", "tags_all"] - } -} - -resource "aws_s3_bucket_policy" "test" { - bucket = aws_s3_bucket.test.bucket - - policy = < 0 { + propObj := awstypes.SchemaDefinition{ + Properties: t.Properties, + Required: t.Required, + } + jsonProps := convertToJSONSchemaDefinition(&propObj) + s, err := tfjson.EncodeToString(jsonProps) + if err != nil { + diags.AddWarning("Failed to marshal properties for properties_json", err.Error()) + m.PropertiesJSON = types.StringNull() + } else { + m.PropertiesJSON = types.StringValue(s) + } + } else { + m.PropertiesJSON = types.StringNull() + } + default: + diags.AddError( + "Unsupported Type", + fmt.Sprintf("schema items leaf flatten: %T", v), + ) + } + return diags +} + +func (m schemaItemsLeafModel) Expand(ctx context.Context) (any, diag.Diagnostics) { + var diags diag.Diagnostics + var sd awstypes.SchemaDefinition + // Expand core (type/description) + smerr.EnrichAppend(ctx, &diags, fwflex.Expand(ctx, m.schemaItemsLeafCoreModel, &sd)) + if diags.HasError() { + return nil, diags + } + + if isNonEmpty(m.ItemsJSON) { + jsd, d := parseJSONSchemaDefinition(m.ItemsJSON.ValueString()) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, diags + } + sd.Items = jsd + } + if isNonEmpty(m.PropertiesJSON) { + jsd, d := parseJSONSchemaDefinition(m.PropertiesJSON.ValueString()) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, diags + } + sd.Properties = jsd.Properties + sd.Required = jsd.Required + } + + return &sd, diags +} + +type schemaPropertyLeafCoreModel struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Type fwtypes.StringEnum[awstypes.SchemaType] `tfsdk:"type"` +} + +type schemaPropertyLeafModel struct { + schemaPropertyLeafCoreModel + Required types.Bool `tfsdk:"required"` + // JSON serialized schema for deeper nesting + ItemsJSON types.String `tfsdk:"items_json"` + PropertiesJSON types.String `tfsdk:"properties_json"` +} + +var ( + _ fwflex.Expander = schemaPropertyLeafModel{} + _ fwflex.Flattener = &schemaPropertyLeafModel{} +) + +func (m *schemaPropertyLeafModel) Flatten(ctx context.Context, v any) diag.Diagnostics { + var diags diag.Diagnostics + switch t := v.(type) { + case awstypes.SchemaDefinition: + smerr.EnrichAppend(ctx, &diags, fwflex.Flatten(ctx, v, &m.schemaPropertyLeafCoreModel)) + if diags.HasError() { + return diags + } + // Populate ItemsJSON + if t.Items != nil { + jsonItems := convertToJSONSchemaDefinition(t.Items) + s, err := tfjson.EncodeToString(jsonItems) + if err != nil { + diags.AddWarning("Failed to marshal items for items_json", err.Error()) + m.ItemsJSON = types.StringNull() + } else { + m.ItemsJSON = types.StringValue(strings.TrimSpace(s)) + } + } else { + m.ItemsJSON = types.StringNull() + } + // Populate PropertiesJSON + if t.Properties != nil || len(t.Required) > 0 { + propObj := awstypes.SchemaDefinition{ + Properties: t.Properties, + Required: t.Required, + } + jsonProps := convertToJSONSchemaDefinition(&propObj) + s, err := tfjson.EncodeToString(jsonProps) + if err != nil { + diags.AddWarning("Failed to marshal properties for properties_json", err.Error()) + m.PropertiesJSON = types.StringNull() + } else { + m.PropertiesJSON = types.StringValue(s) + } + } else { + m.PropertiesJSON = types.StringNull() + } + default: + diags.AddError( + "Unsupported Type", + fmt.Sprintf("schema property leaf flatten: %T", v), + ) + } + return diags +} + +func (m schemaPropertyLeafModel) Expand(ctx context.Context) (any, diag.Diagnostics) { + var diags diag.Diagnostics + var schemaDefinitionData = awstypes.SchemaDefinition{} + + smerr.EnrichAppend(ctx, &diags, fwflex.Expand(ctx, m.schemaPropertyLeafCoreModel, &schemaDefinitionData)) + if diags.HasError() { + return nil, diags + } + + if isNonEmpty(m.ItemsJSON) { + jsd, d := parseJSONSchemaDefinition(m.ItemsJSON.ValueString()) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, diags + } + schemaDefinitionData.Items = jsd + } + if isNonEmpty(m.PropertiesJSON) { + jsd, d := parseJSONSchemaDefinition(m.PropertiesJSON.ValueString()) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, diags + } + schemaDefinitionData.Properties = jsd.Properties + schemaDefinitionData.Required = jsd.Required + } + return schemaDefinitionData, diags +} + +type s3ConfigurationModel struct { + BucketOwnerAccountId types.String `tfsdk:"bucket_owner_account_id"` + Uri types.String `tfsdk:"uri"` +} + +type apiSchemaConfigurationModel struct { + InlinePayload fwtypes.ListNestedObjectValueOf[inlinePayloadModel] `tfsdk:"inline_payload"` + S3 fwtypes.ListNestedObjectValueOf[s3ConfigurationModel] `tfsdk:"s3"` +} + +var ( + _ fwflex.Expander = apiSchemaConfigurationModel{} + _ fwflex.Flattener = &apiSchemaConfigurationModel{} +) + +func (m *apiSchemaConfigurationModel) Flatten(ctx context.Context, v any) diag.Diagnostics { + var diags diag.Diagnostics + switch t := v.(type) { + case awstypes.ApiSchemaConfigurationMemberInlinePayload: + var model inlinePayloadModel + model.Payload = types.StringValue(t.Value) + m.InlinePayload = fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &model) + return diags + + case awstypes.ApiSchemaConfigurationMemberS3: + var model s3ConfigurationModel + d := fwflex.Flatten(ctx, t.Value, &model) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return diags + } + m.S3 = fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &model) + + default: + diags.AddError( + "Unsupported Type", + fmt.Sprintf("api schema configuration flatten: %T", v), + ) + } + return diags +} + +func (m apiSchemaConfigurationModel) Expand(ctx context.Context) (any, diag.Diagnostics) { + var diags diag.Diagnostics + switch { + case !m.InlinePayload.IsNull(): + inlinePayloadApiSchemaConfigurationData, d := m.InlinePayload.ToPtr(ctx) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, diags + } + + var r awstypes.ApiSchemaConfigurationMemberInlinePayload + r.Value = inlinePayloadApiSchemaConfigurationData.Payload.ValueString() + return &r, diags + + case !m.S3.IsNull(): + s3ApiSchemaConfigurationData, d := m.S3.ToPtr(ctx) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, diags + } + + var r awstypes.ApiSchemaConfigurationMemberS3 + smerr.EnrichAppend(ctx, &diags, fwflex.Expand(ctx, s3ApiSchemaConfigurationData, &r.Value)) + if diags.HasError() { + return nil, diags + } + return &r, diags + } + return nil, diags +} + +type inlinePayloadModel struct { + Payload types.String `tfsdk:"payload"` +} + +// Helper functions for PropertiesJSON map conversion +func flattenTargetSchemaProperties( + ctx context.Context, + properties map[string]awstypes.SchemaDefinition, + required []string, +) (fwtypes.SetNestedObjectValueOf[schemaPropertyModel], diag.Diagnostics) { + var diags diag.Diagnostics + if len(properties) == 0 { + return fwtypes.NewSetNestedObjectValueOfNull[schemaPropertyModel](ctx), diags + } + + requiredSet := map[string]bool{} + for _, n := range required { + requiredSet[n] = true + } + + var propertyModels []*schemaPropertyModel + for name, schemaDefn := range properties { + pm := &schemaPropertyModel{} + d := pm.Flatten(ctx, schemaDefn) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return fwtypes.NewSetNestedObjectValueOfNull[schemaPropertyModel](ctx), diags + } + + pm.Name = types.StringValue(name) + pm.Required = types.BoolValue(requiredSet[name]) + + propertyModels = append(propertyModels, pm) + } + + return fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, propertyModels), diags +} + +func expandTargetSchemaProperties(ctx context.Context, properties fwtypes.SetNestedObjectValueOf[schemaPropertyModel]) (map[string]awstypes.SchemaDefinition, []string, diag.Diagnostics) { + var diags diag.Diagnostics + result := make(map[string]awstypes.SchemaDefinition) + var requiredProps []string + + propertySlice, d := properties.ToSlice(ctx) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, nil, diags + } + + for _, propertyModel := range propertySlice { + expandedValue, d := propertyModel.Expand(ctx) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, nil, diags + } + + if schemaDefn, ok := expandedValue.(awstypes.SchemaDefinition); ok { + name := propertyModel.Name.ValueString() + result[name] = schemaDefn + + // Since we always set required to explicit boolean, we can check it directly + if propertyModel.Required.ValueBool() { + requiredProps = append(requiredProps, name) + } + } + } + return result, requiredProps, diags +} + +// Helper functions for Leaf PropertiesJSON map conversion +func flattenTargetSchemaLeafProperties(ctx context.Context, properties map[string]awstypes.SchemaDefinition, requiredProps []string) (fwtypes.SetNestedObjectValueOf[schemaPropertyLeafModel], diag.Diagnostics) { + var diags diag.Diagnostics + requiredSet := make(map[string]bool) + for _, prop := range requiredProps { + requiredSet[prop] = true + } + + var propertyModels []*schemaPropertyLeafModel + for name, schemaDefn := range properties { + pm := &schemaPropertyLeafModel{} + d := pm.Flatten(ctx, schemaDefn) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return fwtypes.NewSetNestedObjectValueOfNull[schemaPropertyLeafModel](ctx), diags + } + pm.Name = types.StringValue(name) + pm.Required = types.BoolValue(requiredSet[name]) + propertyModels = append(propertyModels, pm) + } + return fwtypes.NewSetNestedObjectValueOfSliceMust(ctx, propertyModels), diags +} + +func expandTargetSchemaLeafProperties(ctx context.Context, properties fwtypes.SetNestedObjectValueOf[schemaPropertyLeafModel]) (map[string]awstypes.SchemaDefinition, []string, diag.Diagnostics) { + var diags diag.Diagnostics + result := make(map[string]awstypes.SchemaDefinition) + var requiredProps []string + + propertySlice, d := properties.ToSlice(ctx) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, nil, diags + } + + for _, propertyModel := range propertySlice { + expandedValue, d := propertyModel.Expand(ctx) + smerr.EnrichAppend(ctx, &diags, d) + if diags.HasError() { + return nil, nil, diags + } + + if schemaDefn, ok := expandedValue.(awstypes.SchemaDefinition); ok { + name := propertyModel.Name.ValueString() + result[name] = schemaDefn + + if propertyModel.Required.ValueBool() { + requiredProps = append(requiredProps, name) + } + } + } + + return result, requiredProps, diags +} + +func parseJSONSchemaDefinition(s string) (*awstypes.SchemaDefinition, diag.Diagnostics) { + var diags diag.Diagnostics + s = strings.TrimSpace(s) + if s == "" { + diags.AddError("Invalid JSON", "JSON schema must be a non-empty string") + return nil, diags + } + var sd awstypes.SchemaDefinition + if err := tfjson.DecodeFromString(s, &sd); err != nil { + diags.AddError("Invalid JSON", err.Error()) + return nil, diags + } + return &sd, diags +} + +func isNonEmpty(s types.String) bool { + return !s.IsNull() && !s.IsUnknown() && strings.TrimSpace(s.ValueString()) != "" +} + +// jsonSchemaDefinition is a helper struct for JSON serialization with lowercase field names +type jsonSchemaDefinition struct { + Type string `json:"type,omitempty"` + Description *string `json:"description,omitempty"` + Items *jsonSchemaDefinition `json:"items,omitempty"` + Properties map[string]*jsonSchemaDefinition `json:"properties,omitempty"` + Required []string `json:"required,omitempty"` +} + +// convertToJSONSchemaDefinition converts AWS SDK SchemaDefinition to our JSON-friendly version +func convertToJSONSchemaDefinition(sd *awstypes.SchemaDefinition) *jsonSchemaDefinition { + if sd == nil { + return nil + } + + jsd := &jsonSchemaDefinition{ + Type: string(sd.Type), // Convert SchemaType enum to string + } + + // Only set non-nil values to avoid null fields in JSON + if sd.Description != nil && aws.ToString(sd.Description) != "" { + jsd.Description = sd.Description + } + if len(sd.Required) > 0 { + jsd.Required = sd.Required + } + + if sd.Items != nil { + jsd.Items = convertToJSONSchemaDefinition(sd.Items) + } + + if sd.Properties != nil { + jsd.Properties = make(map[string]*jsonSchemaDefinition) + for k, v := range sd.Properties { + if converted := convertToJSONSchemaDefinition(&v); converted != nil { + jsd.Properties[k] = converted + } + } + // If no properties were added, don't include the properties field + if len(jsd.Properties) == 0 { + jsd.Properties = nil + } + } + + return jsd +} diff --git a/internal/service/bedrockagentcore/gateway_target_test.go b/internal/service/bedrockagentcore/gateway_target_test.go new file mode 100644 index 000000000000..a4b9687bb1b4 --- /dev/null +++ b/internal/service/bedrockagentcore/gateway_target_test.go @@ -0,0 +1,907 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package bedrockagentcore_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tfbedrockagentcore "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagentcore" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBedrockAgentCoreGatewayTarget_basic(t *testing.T) { + ctx := acctest.Context(t) + var gatewayTarget bedrockagentcorecontrol.GetGatewayTargetOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGatewayTargets(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayTargetConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "gateway_identifier"), + resource.TestCheckResourceAttrSet(resourceName, "target_id"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.gateway_iam_role.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.0.lambda.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "target_configuration.0.mcp.0.lambda.0.lambda_arn"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.0.lambda.0.tool_schema.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.0.lambda.0.tool_schema.0.inline_payload.#", "1"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", "gateway_identifier", "target_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "target_id", + }, + }, + }) +} + +func TestAccBedrockAgentCoreGatewayTarget_disappears(t *testing.T) { + ctx := acctest.Context(t) + var gatewayTarget bedrockagentcorecontrol.GetGatewayTargetOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGatewayTargets(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayTargetConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfbedrockagentcore.ResourceGatewayTarget, resourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + }, + }) +} + +func TestAccBedrockAgentCoreGatewayTarget_targetConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var gatewayTarget, gatewayTargetPrev bedrockagentcorecontrol.GetGatewayTargetOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGatewayTargets(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_primitive()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "target_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.0.lambda.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.0.lambda.0.tool_schema.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_configuration.0.mcp.0.lambda.0.tool_schema.0.inline_payload.#", "1"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + // Example 2: Object with properties + required + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_objectWithProperties()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTargetPrev), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + // Example 3: Array of primitives + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_arrayOfPrimitives()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + // Example 4: Array of objects + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_arrayOfObjects()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + // Example 5: Array of arrays + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_arrayOfArrays()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + //Example 6: Mixed nested object/array + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_mixedNested()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTargetPrev), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + // Example 7: Array with ignored keywords + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_arrayWithIgnoredKeywords()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + // Invalid Example 8: Both items and properties at the same node + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_invalidBothItemsAndProperties()), + ExpectError: regexache.MustCompile("Invalid Attribute Combination"), + }, + // Invalid Example 9: Missing type + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_invalidMissingType()), + ExpectError: regexache.MustCompile("Missing required argument"), + }, + // Invalid Example 10: Unsupported type + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_invalidUnsupportedType()), + ExpectError: regexache.MustCompile("Invalid String Enum Value"), + }, + // Return to valid configuration to proceed with post-test destroy + { + Config: testAccGatewayTargetConfig_targetConfiguration(rName, testAccSchema_objectWithProperties()), + }, + }, + }) +} + +func TestAccBedrockAgentCoreGatewayTarget_credentialProvider(t *testing.T) { + ctx := acctest.Context(t) + var gatewayTarget, gatewayTargetPrev bedrockagentcorecontrol.GetGatewayTargetOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGatewayTargets(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayTargetDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Gateway IAM Role provider with Lambda target + { + Config: testAccGatewayTargetConfig_credentialProvider(rName, testAccCredentialProvider_gatewayIAMRole()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.gateway_iam_role.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.api_key.#", "0"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.oauth.#", "0"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + // Step 2: API Key provider with OpenAPI Schema target (creates new resource) + { + Config: testAccGatewayTargetConfig_credentialProviderNonLambda(rName, testAccCredentialProvider_apiKey()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTargetPrev), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.api_key.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.gateway_iam_role.#", "0"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.oauth.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "credential_provider_configuration.0.api_key.0.provider_arn"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + // Step 3: OAuth provider with OpenAPI Schema target (updates credential provider only) + { + Config: testAccGatewayTargetConfig_credentialProviderNonLambda(rName, testAccCredentialProvider_oauth()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTarget), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.oauth.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.api_key.#", "0"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.gateway_iam_role.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "credential_provider_configuration.0.oauth.0.provider_arn"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + // Step 4: Gateway IAM Role provider with Smithy Model target (creates new resource due to both changes) + { + Config: testAccGatewayTargetConfig_credentialProviderSmithy(rName, testAccCredentialProvider_gatewayIAMRole()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTargetPrev), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.gateway_iam_role.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.api_key.#", "0"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.oauth.#", "0"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + // Step 5: Back to Gateway IAM Role with Lambda target (creates new resource again) + { + Config: testAccGatewayTargetConfig_credentialProvider(rName, testAccCredentialProvider_gatewayIAMRole()), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayTargetExists(ctx, resourceName, &gatewayTargetPrev), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.gateway_iam_role.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.api_key.#", "0"), + resource.TestCheckResourceAttr(resourceName, "credential_provider_configuration.0.oauth.#", "0"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", "gateway_identifier", "target_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "target_id", + }, + }, + }) +} + +func TestAccBedrockAgentCoreGatewayTarget_credentialProvider_invalid(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGatewayTargets(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayTargetDestroy(ctx), + Steps: []resource.TestStep{ + // Invalid: Multiple credential providers + { + Config: testAccGatewayTargetConfig_credentialProvider(rName, testAccCredentialProvider_multipleProviders()), + ExpectError: regexache.MustCompile(`Invalid Attribute Combination|cannot be specified`), + }, + { + Config: testAccGatewayTargetConfig_credentialProvider(rName, testAccCredentialProvider_empty()), + ExpectError: regexache.MustCompile("Invalid Credential Provider Configuration|At least one credential provider must be configured"), + }, + }, + }) +} + +func testAccCheckGatewayTargetDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentCoreClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_bedrockagentcore_gateway_target" { + continue + } + + _, err := tfbedrockagentcore.FindGatewayTargetByTwoPartKey(ctx, conn, rs.Primary.Attributes["gateway_identifier"], rs.Primary.Attributes["target_id"]) + if retry.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Bedrock Agent Core Gateway Target %s still exists", rs.Primary.Attributes["target_id"]) + } + + return nil + } +} + +func testAccCheckGatewayTargetExists(ctx context.Context, n string, v *bedrockagentcorecontrol.GetGatewayTargetOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentCoreClient(ctx) + + resp, err := tfbedrockagentcore.FindGatewayTargetByTwoPartKey(ctx, conn, rs.Primary.Attributes["gateway_identifier"], rs.Primary.Attributes["target_id"]) + if err != nil { + return err + } + + *v = *resp + + return nil + } +} + +func testAccPreCheckGatewayTargets(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentCoreClient(ctx) + + input := bedrockagentcorecontrol.ListGatewayTargetsInput{ + GatewayIdentifier: aws.String("test-guthipm3lw"), // Using a dummy ID for the precheck + } + + _, err := conn.ListGatewayTargets(ctx, &input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccGatewayTargetConfig_infra(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "lambda_assume" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "lambda" { + name = "%[1]s-lambda" + + assume_role_policy = data.aws_iam_policy_document.lambda_assume.json +} + +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + role = aws_iam_role.lambda.arn + handler = "lambdatest.handler" + runtime = "nodejs20.x" +} + +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test"] + } + } + + protocol_type = "MCP" +} +`, rName) +} + +func testAccGatewayTargetConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccGatewayTargetConfig_infra(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway_target" "test" { + name = %[1]q + gateway_identifier = aws_bedrockagentcore_gateway.test.gateway_id + + credential_provider_configuration { + gateway_iam_role {} + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.test.arn + + tool_schema { + inline_payload { + name = "test_tool" + description = "A test tool" + + input_schema { + type = "object" + + property { + name = "input" + description = "some input" + type = "string" + required = true + } + } + } + } + } + } + } +} + +`, rName)) +} + +func testAccGatewayTargetConfig_credentialProvider(rName, credentialProviderContent string) string { + return acctest.ConfigCompose(testAccGatewayTargetConfig_infra(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway_target" "test" { + name = %[1]q + gateway_identifier = aws_bedrockagentcore_gateway.test.gateway_id + + credential_provider_configuration { +%[2]s + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.test.arn + + tool_schema { + inline_payload { + name = "test_tool" + description = "A test tool" + + input_schema { + type = "string" + description = "Basic schema for credential provider test" + } + } + } + } + } + } +} +`, rName, credentialProviderContent)) +} + +func testAccGatewayTargetConfig_credentialProviderNonLambda(rName, credentialProviderContent string) string { + return acctest.ConfigCompose(testAccGatewayTargetConfig_infra(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway_target" "test" { + name = %[1]q + gateway_identifier = aws_bedrockagentcore_gateway.test.gateway_id + + credential_provider_configuration { +%[2]s + } + + target_configuration { + mcp { + open_api_schema { + inline_payload { + payload = jsonencode({ + openapi = "3.0.0" + info = { + title = "Test API" + version = "1.0.0" + } + servers = [ + { + url = "https://api.example.com" + } + ] + paths = { + "/test" = { + get = { + operationId = "getTest" + summary = "Test endpoint" + responses = { + "200" = { + description = "Success" + } + } + } + } + } + }) + } + } + } + } +} +`, rName, credentialProviderContent)) +} + +func testAccGatewayTargetConfig_credentialProviderSmithy(rName, credentialProviderContent string) string { + return acctest.ConfigCompose(testAccGatewayTargetConfig_infra(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway_target" "test" { + name = %[1]q + gateway_identifier = aws_bedrockagentcore_gateway.test.gateway_id + + credential_provider_configuration { +%[2]s + } + + target_configuration { + mcp { + smithy_model { + inline_payload { + payload = jsonencode({ + "smithy" = "2.0" + "shapes" = { + "com.example#TestService" = { + "type" = "service" + "version" = "1.0" + "operations" = [ + { + "target" = "com.example#TestOperation" + } + ] + "traits" = { + "aws.auth#sigv4" = { + "name" = "testservice" + } + "aws.protocols#restJson1" = {} + } + } + "com.example#TestOperation" = { + "type" = "operation" + "input" = { + "target" = "com.example#TestInput" + } + "output" = { + "target" = "com.example#TestOutput" + } + "traits" = { + "smithy.api#http" = { + "method" = "POST" + "uri" = "/test" + } + } + } + "com.example#TestInput" = { + "type" = "structure" + "members" = { + "message" = { + "target" = "smithy.api#String" + "traits" = { + "smithy.api#required" = {} + } + } + } + } + "com.example#TestOutput" = { + "type" = "structure" + "members" = { + "result" = { + "target" = "smithy.api#String" + } + } + } + } + }) + } + } + } + } +} +`, rName, credentialProviderContent)) +} + +func testAccGatewayTargetConfig_targetConfiguration(rName, schemaContent string) string { + return acctest.ConfigCompose(testAccGatewayTargetConfig_infra(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway_target" "test" { + name = %[1]q + gateway_identifier = aws_bedrockagentcore_gateway.test.gateway_id + + credential_provider_configuration { + gateway_iam_role {} + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.test.arn + + tool_schema { + inline_payload { + name = "test_tool" + description = "A test tool" + + input_schema { + %[2]s + } + } + } + } + } + } +} +`, rName, schemaContent)) +} + +func testAccSchema_primitive() string { + return ` + type = "string" + description = "A token" + ` +} + +func testAccSchema_objectWithProperties() string { + return ` + type = "object" + description = "User" + + property { + name = "id" + type = "string" + required = true + } + + property { + name = "age" + type = "integer" + } + + property { + name = "paid" + type = "boolean" + } + ` +} + +func testAccSchema_arrayOfPrimitives() string { + return ` + type = "array" + description = "Tags" + + items { + type = "string" + } + ` +} + +func testAccSchema_arrayOfObjects() string { + return ` + type = "array" + + items { + type = "object" + + property { + name = "id" + type = "string" + required = true + } + + property { + name = "email" + type = "string" + } + + property { + name = "age" + type = "integer" + } + } + ` +} + +func testAccSchema_arrayOfArrays() string { + return ` + type = "array" + + items { + type = "array" + + items { + type = "number" + } + } + ` +} + +func testAccSchema_mixedNested() string { + return ` + type = "object" + + property { + name = "profile" + type = "object" + + property { + name = "nested_tags" + type = "array" + items_json = jsonencode({ + type = "string" + }) + } + } + ` +} + +func testAccSchema_arrayWithIgnoredKeywords() string { + return ` + type = "array" + + items { + type = "string" + } + ` +} + +func testAccSchema_invalidBothItemsAndProperties() string { + return ` + type = "object" + + items { + type = "string" + } + + property { + name = "a" + type = "string" + } + ` +} + +func testAccSchema_invalidMissingType() string { + return ` + description = "No type here" + ` +} + +func testAccSchema_invalidUnsupportedType() string { + return ` + type = "date" + ` +} + +func testAccCredentialProvider_gatewayIAMRole() string { + return ` gateway_iam_role {}` +} + +func testAccCredentialProvider_apiKey() string { + return ` api_key { + provider_arn = "arn:${data.aws_partition.current.partition}:iam::123456789012:oidc-provider/example.com" + credential_location = "HEADER" + credential_parameter_name = "X-API-Key" + credential_prefix = "Bearer" + }` +} + +func testAccCredentialProvider_oauth() string { + return ` oauth { + provider_arn = "arn:${data.aws_partition.current.partition}:iam::123456789012:oidc-provider/oauth.example.com" + scopes = ["read", "write"] + custom_parameters = { + "client_type" = "confidential" + "grant_type" = "authorization_code" + } + }` +} + +func testAccCredentialProvider_multipleProviders() string { + return ` gateway_iam_role {} + api_key { + provider_arn = "arn:${data.aws_partition.current.partition}:iam::123456789012:oidc-provider/example.com" + }` +} + +func testAccCredentialProvider_empty() string { + return ` # No providers configured` +} diff --git a/internal/service/bedrockagentcore/gateway_test.go b/internal/service/bedrockagentcore/gateway_test.go new file mode 100644 index 000000000000..795ea24d4e13 --- /dev/null +++ b/internal/service/bedrockagentcore/gateway_test.go @@ -0,0 +1,559 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package bedrockagentcore_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tfbedrockagentcore "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagentcore" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBedrockAgentCoreGateway_basic(t *testing.T) { + ctx := acctest.Context(t) + var gateway bedrockagentcorecontrol.GetGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGateways(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("gateway_arn"), tfknownvalue.RegionalARNRegexp("bedrock-agentcore", regexache.MustCompile(`gateway/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("gateway_id"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("gateway_url"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("workload_identity_details"), knownvalue.ListSizeExact(1)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "gateway_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "gateway_id", + }, + }, + }) +} + +func TestAccBedrockAgentCoreGateway_disappears(t *testing.T) { + ctx := acctest.Context(t) + var gateway bedrockagentcorecontrol.GetGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGateways(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfbedrockagentcore.ResourceGateway, resourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + }, + }) +} + +func TestAccBedrockAgentCoreGateway_tags(t *testing.T) { + ctx := acctest.Context(t) + var gateway bedrockagentcorecontrol.GetGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGateways(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "gateway_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "gateway_id", + }, + { + Config: testAccGatewayConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + { + Config: testAccGatewayConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + }) +} + +func TestAccBedrockAgentCoreGateway_description(t *testing.T) { + ctx := acctest.Context(t) + var gateway bedrockagentcorecontrol.GetGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGateways(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_description(rName, "Initial description"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.StringExact("Initial description")), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "gateway_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "gateway_id", + }, + { + Config: testAccGatewayConfig_description(rName, "Updated description"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.StringExact("Updated description")), + }, + }, + }, + }) +} + +func TestAccBedrockAgentCoreGateway_kmsKey(t *testing.T) { + acctest.Skip(t, "KMS key returns HTTP 500") + ctx := acctest.Context(t) + var gateway bedrockagentcorecontrol.GetGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGateways(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_kmsKey(rName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "gateway_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "gateway_id", + }, + { + Config: testAccGatewayConfig_kmsKey(rName, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + +func TestAccBedrockAgentCoreGateway_protocolConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var gateway bedrockagentcorecontrol.GetGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagentcore_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) + testAccPreCheckGateways(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentCoreServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_protocolConfiguration(rName, "First set of instructions"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "gateway_id"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "gateway_id", + }, + { + Config: testAccGatewayConfig_protocolConfiguration(rName, "Second set of instructions"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + +func testAccCheckGatewayDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentCoreClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_bedrockagentcore_gateway" { + continue + } + + _, err := tfbedrockagentcore.FindGatewayByID(ctx, conn, rs.Primary.Attributes["gateway_id"]) + if retry.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Bedrock Agent Core Gateway %s still exists", rs.Primary.Attributes["gateway_id"]) + } + + return nil + } +} + +func testAccCheckGatewayExists(ctx context.Context, n string, v *bedrockagentcorecontrol.GetGatewayOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentCoreClient(ctx) + + resp, err := tfbedrockagentcore.FindGatewayByID(ctx, conn, rs.Primary.Attributes["gateway_id"]) + if err != nil { + return err + } + + *v = *resp + + return nil + } +} + +func testAccPreCheckGateways(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentCoreClient(ctx) + + var input bedrockagentcorecontrol.ListGatewaysInput + + _, err := conn.ListGateways(ctx, &input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccGatewayConfig_iamRole(rName string) string { + return fmt.Sprintf(` +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = data.aws_iam_policy_document.test.json +} +`, rName) +} + +func testAccGatewayConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccGatewayConfig_iamRole(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_type = "MCP" +} +`, rName)) +} + +func testAccGatewayConfig_protocolConfiguration(rName, instructions string) string { + return acctest.ConfigCompose(testAccGatewayConfig_iamRole(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_configuration { + mcp { + instructions = %[2]q + search_type = "SEMANTIC" + supported_versions = ["2025-03-26"] + } + } + + protocol_type = "MCP" +} +`, rName, instructions)) +} + +func testAccGatewayConfig_description(rName, description string) string { + return acctest.ConfigCompose(testAccGatewayConfig_iamRole(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + description = %[2]q + role_arn = aws_iam_role.test.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_type = "MCP" +} +`, rName, description)) +} + +func testAccGatewayConfig_kmsKey(rName string, idx int) string { + return acctest.ConfigCompose(testAccGatewayConfig_iamRole(rName), fmt.Sprintf(` +resource "aws_kms_key" "test" { + count = 2 + + description = "Test key for %[1]s ${count.index}" + deletion_window_in_days = 7 +} + +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + exception_level = "DEBUG" + kms_key_arn = aws_kms_key.test[%[2]d].arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_type = "MCP" +} +`, rName, idx)) +} + +func testAccGatewayConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccGatewayConfig_iamRole(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_type = "MCP" + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccGatewayConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccGatewayConfig_iamRole(rName), fmt.Sprintf(` +resource "aws_bedrockagentcore_gateway" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_type = "MCP" + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/bedrockagentcore/generate.go b/internal/service/bedrockagentcore/generate.go new file mode 100644 index 000000000000..0fc704e6518e --- /dev/null +++ b/internal/service/bedrockagentcore/generate.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -KVTValues -ListTags -UpdateTags +//go:generate go run ../../generate/tagstests/main.go +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package bedrockagentcore diff --git a/internal/service/bedrockagentcore/service_endpoint_resolver_gen.go b/internal/service/bedrockagentcore/service_endpoint_resolver_gen.go new file mode 100644 index 000000000000..a09b8d72e4bd --- /dev/null +++ b/internal/service/bedrockagentcore/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package bedrockagentcore + +import ( + "context" + "fmt" + "net" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ bedrockagentcorecontrol.EndpointResolverV2 = resolverV2{} + +type resolverV2 struct { + defaultResolver bedrockagentcorecontrol.EndpointResolverV2 +} + +func newEndpointResolverV2() resolverV2 { + return resolverV2{ + defaultResolver: bedrockagentcorecontrol.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverV2) ResolveEndpoint(ctx context.Context, params bedrockagentcorecontrol.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws.Bool(false) + } else { + err = fmt.Errorf("looking up bedrockagentcorecontrol endpoint %q: %w", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*bedrockagentcorecontrol.Options) { + return func(o *bedrockagentcorecontrol.Options) { + if endpoint != "" { + o.BaseEndpoint = aws.String(endpoint) + } + } +} diff --git a/internal/service/bedrockagentcore/service_endpoints_gen_test.go b/internal/service/bedrockagentcore/service_endpoints_gen_test.go new file mode 100644 index 000000000000..e6e0024aae8b --- /dev/null +++ b/internal/service/bedrockagentcore/service_endpoints_gen_test.go @@ -0,0 +1,602 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package bedrockagentcore_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "bedrockagentcore" + awsEnvVar = "AWS_ENDPOINT_URL_BEDROCK_AGENTCORE_CONTROL" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "bedrock_agentcore_control" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + ctx := t.Context() + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(ctx, t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(ctx, t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + t.Run(name, func(t *testing.T) { + testEndpointCase(ctx, t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(ctx context.Context, region string) (url.URL, error) { + r := bedrockagentcorecontrol.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, bedrockagentcorecontrol.EndpointParameters{ + Region: aws.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(ctx context.Context, region string) (url.URL, error) { + r := bedrockagentcorecontrol.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, bedrockagentcorecontrol.EndpointParameters{ + Region: aws.String(region), + UseFIPS: aws.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.BedrockAgentCoreClient(ctx) + + var result apiCallParams + + input := bedrockagentcorecontrol.ListAgentRuntimesInput{} + _, err := client.ListAgentRuntimes(ctx, &input, + func(opts *bedrockagentcorecontrol.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(ctx, t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(ctx context.Context, t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := sdkv2.NewProvider(ctx) + if err != nil { + t.Fatal(err) + } + + p.TerraformVersion = "1.0.0" + + expectedDiags := testcase.expected.diags + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = errors.New("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i any) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + fmt.Fprintf(&buf, "endpoint_url = %s\n", config.baseUrl) + } + + if config.serviceUrl != "" { + fmt.Fprintf(&buf, ` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/bedrockagentcore/service_package_gen.go b/internal/service/bedrockagentcore/service_package_gen.go new file mode 100644 index 000000000000..efde713f4962 --- /dev/null +++ b/internal/service/bedrockagentcore/service_package_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package bedrockagentcore + +import ( + "context" + "unique" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { + return []*inttypes.ServicePackageFrameworkDataSource{} +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { + return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newAgentRuntimeResource, + TypeName: "aws_bedrockagentcore_agent_runtime", + Name: "Agent Runtime", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "agent_runtime_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newAgentRuntimeEndpointResource, + TypeName: "aws_bedrockagentcore_agent_runtime_endpoint", + Name: "Agent Runtime Endpoint", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "agent_runtime_endpoint_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newAPIKeyCredentialProviderResource, + TypeName: "aws_bedrockagentcore_api_key_credential_provider", + Name: "Api Key Credential Provider", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newBrowserResource, + TypeName: "aws_bedrockagentcore_browser", + Name: "Browser", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "browser_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newCodeInterpreterResource, + TypeName: "aws_bedrockagentcore_code_interpreter", + Name: "Code Interpreter", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "code_interpreter_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newGatewayResource, + TypeName: "aws_bedrockagentcore_gateway", + Name: "Gateway", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "gateway_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newGatewayTargetResource, + TypeName: "aws_bedrockagentcore_gateway_target", + Name: "Gateway Target", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { + return []*inttypes.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePackageSDKResource { + return []*inttypes.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.BedrockAgentCore +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*bedrockagentcorecontrol.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + optFns := []func(*bedrockagentcorecontrol.Options){ + bedrockagentcorecontrol.WithEndpointResolverV2(newEndpointResolverV2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *bedrockagentcorecontrol.Options) { + if region := config[names.AttrRegion].(string); o.Region != region { + tflog.Info(ctx, "overriding provider-configured AWS API region", map[string]any{ + "service": p.ServicePackageName(), + "original_region": o.Region, + "override_region": region, + }) + o.Region = region + } + }, + func(o *bedrockagentcorecontrol.Options) { + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) + } + }, + withExtraOptions(ctx, p, config), + } + + return bedrockagentcorecontrol.NewFromConfig(cfg, optFns...), nil +} + +// withExtraOptions returns a functional option that allows this service package to specify extra API client options. +// This option is always called after any generated options. +func withExtraOptions(ctx context.Context, sp conns.ServicePackage, config map[string]any) func(*bedrockagentcorecontrol.Options) { + if v, ok := sp.(interface { + withExtraOptions(context.Context, map[string]any) []func(*bedrockagentcorecontrol.Options) + }); ok { + optFns := v.withExtraOptions(ctx, config) + + return func(o *bedrockagentcorecontrol.Options) { + for _, optFn := range optFns { + optFn(o) + } + } + } + + return func(*bedrockagentcorecontrol.Options) {} +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/service/bedrockagentcore/sweep.go b/internal/service/bedrockagentcore/sweep.go new file mode 100644 index 000000000000..2e2527c6a653 --- /dev/null +++ b/internal/service/bedrockagentcore/sweep.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package bedrockagentcore + +import ( + "context" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func RegisterSweepers() { + awsv2.Register("aws_bedrockagentcore_agent_runtime", sweepAgentRuntimes, "aws_bedrockagentcore_agent_runtime_endpoint") + awsv2.Register("aws_bedrockagentcore_agent_runtime_endpoint", sweepAgentRuntimeEndpoints) + awsv2.Register("aws_bedrockagentcore_code_interpreter", sweepCodeInterpreters) + awsv2.Register("aws_bedrockagentcore_browser", sweepBrowsers) + awsv2.Register("aws_bedrockagentcore_api_key_credential_provider", sweepAPIKeyCredentialProviders) + awsv2.Register("aws_bedrockagentcore_gateway", sweepGateways, "aws_bedrockagentcore_gateway_target") + awsv2.Register("aws_bedrockagentcore_gateway_target", sweepGatewayTargets) +} + +func sweepAgentRuntimes(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListAgentRuntimesInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListAgentRuntimesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.AgentRuntimes { + sweepResources = append(sweepResources, framework.NewSweepResource(newAgentRuntimeResource, client, + framework.NewAttribute("agent_runtime_id", aws.ToString(v.AgentRuntimeId))), + ) + } + } + + return sweepResources, nil +} + +func sweepAgentRuntimeEndpoints(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListAgentRuntimesInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListAgentRuntimesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.AgentRuntimes { + agentRuntimeID := aws.ToString(v.AgentRuntimeId) + input := bedrockagentcorecontrol.ListAgentRuntimeEndpointsInput{ + AgentRuntimeId: aws.String(agentRuntimeID), + } + + pages := bedrockagentcorecontrol.NewListAgentRuntimeEndpointsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.RuntimeEndpoints { + sweepResources = append(sweepResources, framework.NewSweepResource(newAgentRuntimeEndpointResource, client, + framework.NewAttribute("agent_runtime_id", agentRuntimeID), + framework.NewAttribute(names.AttrName, aws.ToString(v.Name)), + ), + ) + } + } + } + } + + return sweepResources, nil +} + +func sweepGateways(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListGatewaysInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListGatewaysPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.Items { + sweepResources = append(sweepResources, framework.NewSweepResource(newGatewayResource, client, + framework.NewAttribute("gateway_id", aws.ToString(v.GatewayId))), + ) + } + } + + return sweepResources, nil +} + +func sweepGatewayTargets(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListGatewaysInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListGatewaysPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.Items { + gatewayID := aws.ToString(v.GatewayId) + input := bedrockagentcorecontrol.ListGatewayTargetsInput{ + GatewayIdentifier: aws.String(gatewayID), + } + + pages := bedrockagentcorecontrol.NewListGatewayTargetsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.Items { + sweepResources = append(sweepResources, framework.NewSweepResource(newGatewayTargetResource, client, + framework.NewAttribute("gateway_identifier", gatewayID), + framework.NewAttribute("target_id", aws.ToString(v.TargetId))), + ) + } + } + } + } + + return sweepResources, nil +} + +func sweepBrowsers(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListBrowsersInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListBrowsersPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.BrowserSummaries { + sweepResources = append(sweepResources, framework.NewSweepResource(newBrowserResource, client, + framework.NewAttribute("browser_id", aws.ToString(v.BrowserId))), + ) + } + } + + return sweepResources, nil +} + +func sweepAPIKeyCredentialProviders(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListApiKeyCredentialProvidersInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListApiKeyCredentialProvidersPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.CredentialProviders { + sweepResources = append(sweepResources, framework.NewSweepResource(newAPIKeyCredentialProviderResource, client, + framework.NewAttribute(names.AttrName, aws.ToString(v.Name))), + ) + } + } + return sweepResources, nil +} + +func sweepCodeInterpreters(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + input := bedrockagentcorecontrol.ListCodeInterpretersInput{} + conn := client.BedrockAgentCoreClient(ctx) + var sweepResources []sweep.Sweepable + + pages := bedrockagentcorecontrol.NewListCodeInterpretersPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, smarterr.NewError(err) + } + + for _, v := range page.CodeInterpreterSummaries { + sweepResources = append(sweepResources, framework.NewSweepResource(newCodeInterpreterResource, client, + framework.NewAttribute("code_interpreter_id", aws.ToString(v.CodeInterpreterId))), + ) + } + } + + return sweepResources, nil +} diff --git a/internal/service/bedrockagentcore/tags_gen.go b/internal/service/bedrockagentcore/tags_gen.go new file mode 100644 index 000000000000..3ed4e7ef14a1 --- /dev/null +++ b/internal/service/bedrockagentcore/tags_gen.go @@ -0,0 +1,128 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package bedrockagentcore + +import ( + "context" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists bedrockagentcore service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *bedrockagentcorecontrol.Client, identifier string, optFns ...func(*bedrockagentcorecontrol.Options)) (tftags.KeyValueTags, error) { + input := bedrockagentcorecontrol.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, &input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), smarterr.NewError(err) + } + + return keyValueTags(ctx, output.Tags), nil +} + +// ListTags lists bedrockagentcore service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).BedrockAgentCoreClient(ctx), identifier) + + if err != nil { + return smarterr.NewError(err) + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// svcTags returns bedrockagentcore service tags. +func svcTags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// keyValueTags creates tftags.KeyValueTags from bedrockagentcore service tags. +func keyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns bedrockagentcore service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := svcTags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets bedrockagentcore service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(keyValueTags(ctx, tags)) + } +} + +// updateTags updates bedrockagentcore service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *bedrockagentcorecontrol.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*bedrockagentcorecontrol.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.BedrockAgentCore) + if len(removedTags) > 0 { + input := bedrockagentcorecontrol.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.BedrockAgentCore) + if len(updatedTags) > 0 { + input := bedrockagentcorecontrol.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: svcTags(updatedTags), + } + + _, err := conn.TagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + return nil +} + +// UpdateTags updates bedrockagentcore service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).BedrockAgentCoreClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/bedrockagentcore/test-fixtures/lambdatest.zip b/internal/service/bedrockagentcore/test-fixtures/lambdatest.zip new file mode 100644 index 000000000000..5c636e955b2c Binary files /dev/null and b/internal/service/bedrockagentcore/test-fixtures/lambdatest.zip differ diff --git a/internal/service/billing/billing_service_account_data_source_test.go b/internal/service/billing/billing_service_account_data_source_test.go index 5bbf4ed6198c..320a5c99b18c 100644 --- a/internal/service/billing/billing_service_account_data_source_test.go +++ b/internal/service/billing/billing_service_account_data_source_test.go @@ -17,7 +17,7 @@ func TestAccBillingServiceAccountDataSource_basic(t *testing.T) { dataSourceName := "data.aws_billing_service_account.test" billingAccountID := "386209384616" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, tfmeta.PseudoServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/billing/service_endpoint_resolver_gen.go b/internal/service/billing/service_endpoint_resolver_gen.go index 8e5d21e47348..0a1961b7dfb5 100644 --- a/internal/service/billing/service_endpoint_resolver_gen.go +++ b/internal/service/billing/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params billing.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up billing endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up billing endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/billing/service_endpoints_gen_test.go b/internal/service/billing/service_endpoints_gen_test.go index f230fa2a9ba7..25ba1263329a 100644 --- a/internal/service/billing/service_endpoints_gen_test.go +++ b/internal/service/billing/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/billing/service_package_gen.go b/internal/service/billing/service_package_gen.go index 077999b4f199..5e30758bd55f 100644 --- a/internal/service/billing/service_package_gen.go +++ b/internal/service/billing/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/billing" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -27,6 +26,12 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S Name: "Service Account", Region: unique.Make(inttypes.ResourceRegionDisabled()), }, + { + Factory: newDataSourceViews, + TypeName: "aws_billing_views", + Name: "Views", + Region: unique.Make(inttypes.ResourceRegionDisabled()), + }, } } @@ -65,7 +70,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *billing.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *billing.Options) { diff --git a/internal/service/billing/views_data_source.go b/internal/service/billing/views_data_source.go new file mode 100644 index 000000000000..fdc4ebe17101 --- /dev/null +++ b/internal/service/billing/views_data_source.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package billing + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/billing" + awstypes "github.com/aws/aws-sdk-go-v2/service/billing/types" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" +) + +// @FrameworkDataSource("aws_billing_views", name="Views") +func newDataSourceViews(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceViews{}, nil +} + +const ( + DSNameViews = "Views Data Source" +) + +type dataSourceViews struct { + framework.DataSourceWithModel[dataSourceViewsModel] +} + +func (d *dataSourceViews) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "billing_view_types": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringEnumType[awstypes.BillingViewType](), + Optional: true, + ElementType: types.StringType, + }, + "billing_view": framework.ResourceComputedListOfObjectsAttribute[dataSourceBillingViewModel](ctx, nil, nil), + }, + } +} + +func (d *dataSourceViews) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().BillingClient(ctx) + + var data dataSourceViewsModel + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.Config.Get(ctx, &data)) + if resp.Diagnostics.HasError() { + return + } + + var billingViewTypes []awstypes.BillingViewType + smerr.EnrichAppend(ctx, &resp.Diagnostics, data.BillingViewTypes.ElementsAs(ctx, &billingViewTypes, false)) + if resp.Diagnostics.HasError() { + return + } + + out, err := findViewsByViewTypes(ctx, conn, billingViewTypes) + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, data.BillingViewTypes.String()) + return + } + + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &data.BillingView, flex.WithFieldNamePrefix("Views")), smerr.ID, data.BillingViewTypes.String()) + if resp.Diagnostics.HasError() { + return + } + + smerr.EnrichAppend(ctx, &resp.Diagnostics, resp.State.Set(ctx, &data), smerr.ID, data.BillingViewTypes.String()) +} + +func findViewsByViewTypes(ctx context.Context, conn *billing.Client, billingViewTypes []awstypes.BillingViewType) ([]awstypes.BillingViewListElement, error) { + input := billing.ListBillingViewsInput{} + if len(billingViewTypes) > 0 { + input.BillingViewTypes = billingViewTypes + } + + return findViews(ctx, conn, &input) +} + +func findViews(ctx context.Context, conn *billing.Client, input *billing.ListBillingViewsInput) ([]awstypes.BillingViewListElement, error) { + var results []awstypes.BillingViewListElement + + paginator := billing.NewListBillingViewsPaginator(conn, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + results = append(results, page.BillingViews...) + } + + return results, nil +} + +type dataSourceViewsModel struct { + BillingViewTypes fwtypes.ListOfStringEnum[awstypes.BillingViewType] `tfsdk:"billing_view_types"` + BillingView fwtypes.ListNestedObjectValueOf[dataSourceBillingViewModel] `tfsdk:"billing_view"` +} + +type dataSourceBillingViewModel struct { + ARN types.String `tfsdk:"arn"` + BillingViewType types.String `tfsdk:"billing_view_type"` + Description types.String `tfsdk:"description"` + Name types.String `tfsdk:"name"` + OwnerAccountId types.String `tfsdk:"owner_account_id"` +} diff --git a/internal/service/billing/views_data_source_test.go b/internal/service/billing/views_data_source_test.go new file mode 100644 index 000000000000..45ab53ae7b1f --- /dev/null +++ b/internal/service/billing/views_data_source_test.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package billing_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBillingViewsDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + + dataSourceName := "data.aws_billing_views.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BillingServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccViewsDataSourceConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "billing_view.#", "1"), + acctest.CheckResourceAttrGlobalARN(ctx, dataSourceName, "billing_view.0.arn", "billing", "billingview/primary"), + resource.TestCheckResourceAttr(dataSourceName, "billing_view.0.billing_view_type", "PRIMARY"), + resource.TestCheckResourceAttr(dataSourceName, "billing_view.0.name", "Primary View"), + ), + }, + { + Config: testAccViewsDataSourceConfig_noArguments(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "billing_view.*", map[string]string{ + "billing_view_type": "PRIMARY", + names.AttrName: "Primary View", + }), + ), + }, + }, + }) +} + +func testAccViewsDataSourceConfig_basic() string { + return ` +data "aws_billing_views" "test" { + billing_view_types = ["PRIMARY"] +} +` +} + +func testAccViewsDataSourceConfig_noArguments() string { + return ` +data "aws_billing_views" "test" {} +` +} diff --git a/internal/service/budgets/budget.go b/internal/service/budgets/budget.go index 0d005aacd6f2..d480ae1094ff 100644 --- a/internal/service/budgets/budget.go +++ b/internal/service/budgets/budget.go @@ -95,6 +95,11 @@ func ResourceBudget() *schema.Resource { }, }, }, + "billing_view_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, "budget_type": { Type: schema.TypeString, Required: true, @@ -378,6 +383,7 @@ func resourceBudgetRead(ctx context.Context, d *schema.ResourceData, meta any) d } d.Set(names.AttrARN, arn.String()) d.Set("budget_type", budget.BudgetType) + d.Set("billing_view_arn", budget.BillingViewArn) if err := d.Set("cost_filter", convertCostFiltersToMap(budget.CostFilters)); err != nil { return sdkdiag.AppendErrorf(diags, "setting cost_filter: %s", err) @@ -707,14 +713,14 @@ func updateBudgetNotifications(ctx context.Context, conn *budgets.Client, d *sch _, err := conn.DeleteNotification(ctx, input) if err != nil { - return fmt.Errorf("deleting Budget (%s) notification: %s", d.Id(), err) + return fmt.Errorf("deleting Budget (%s) notification: %w", d.Id(), err) } } err = createBudgetNotifications(ctx, conn, addNotifications, addSubscribers, budgetName, accountID) if err != nil { - return fmt.Errorf("creating Budget (%s) notifications: %s", d.Id(), err) + return fmt.Errorf("creating Budget (%s) notifications: %w", d.Id(), err) } } @@ -881,6 +887,10 @@ func expandBudgetUnmarshal(d *schema.ResourceData) (*awstypes.Budget, error) { } } + if v, ok := d.GetOk("billing_view_arn"); ok { + budget.BillingViewArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("cost_types"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { budget.CostTypes = expandCostTypes(v.([]any)[0].(map[string]any)) } diff --git a/internal/service/budgets/budget_action.go b/internal/service/budgets/budget_action.go index 9e0993942c61..53548e5abe1b 100644 --- a/internal/service/budgets/budget_action.go +++ b/internal/service/budgets/budget_action.go @@ -251,7 +251,7 @@ func resourceBudgetActionCreate(ctx context.Context, d *schema.ResourceData, met ResourceTags: getTagsIn(ctx), } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.AccessDeniedException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateBudgetAction(ctx, input) }) @@ -382,7 +382,7 @@ func resourceBudgetActionDelete(ctx context.Context, d *schema.ResourceData, met } log.Printf("[DEBUG] Deleting Budget Action: %s", d.Id()) - _, err = tfresource.RetryWhenIsA[*awstypes.ResourceLockedException](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *awstypes.ResourceLockedException](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteBudgetAction(ctx, &budgets.DeleteBudgetActionInput{ AccountId: aws.String(accountID), ActionId: aws.String(actionID), diff --git a/internal/service/budgets/budget_data_source.go b/internal/service/budgets/budget_data_source.go index 3f3203988033..b1026ad6168d 100644 --- a/internal/service/budgets/budget_data_source.go +++ b/internal/service/budgets/budget_data_source.go @@ -68,6 +68,10 @@ func DataSourceBudget() *schema.Resource { }, }, }, + "billing_view_arn": { + Type: schema.TypeString, + Computed: true, + }, "budget_type": { Type: schema.TypeString, Computed: true, @@ -299,6 +303,8 @@ func dataSourceBudgetRead(ctx context.Context, d *schema.ResourceData, meta any) } d.Set(names.AttrARN, arn.String()) + d.Set("billing_view_arn", budget.BillingViewArn) + d.Set("budget_type", budget.BudgetType) if err := d.Set("budget_limit", flattenSpend(budget.BudgetLimit)); err != nil { diff --git a/internal/service/budgets/budget_data_source_tags_gen_test.go b/internal/service/budgets/budget_data_source_tags_gen_test.go index dcbbea53b0c1..6459865b4c65 100644 --- a/internal/service/budgets/budget_data_source_tags_gen_test.go +++ b/internal/service/budgets/budget_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccBudgetsBudgetDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccBudgetsBudgetDataSource_tags(t *testing.T) { func TestAccBudgetsBudgetDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccBudgetsBudgetDataSource_tags_NullMap(t *testing.T) { func TestAccBudgetsBudgetDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccBudgetsBudgetDataSource_tags_EmptyMap(t *testing.T) { func TestAccBudgetsBudgetDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccBudgetsBudgetDataSource_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccBudgetsBudgetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccBudgetsBudgetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccBudgetsBudgetDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/budgets/budget_data_source_test.go b/internal/service/budgets/budget_data_source_test.go index e6f61288df29..80c078999618 100644 --- a/internal/service/budgets/budget_data_source_test.go +++ b/internal/service/budgets/budget_data_source_test.go @@ -41,6 +41,7 @@ func TestAccBudgetsBudgetDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrSet(dataSourceName, "budget_limit.#"), resource.TestCheckResourceAttrPair(dataSourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), resource.TestCheckResourceAttrPair(dataSourceName, acctest.CtTagsKey1, resourceName, acctest.CtTagsKey1), + resource.TestCheckResourceAttrPair(dataSourceName, "billing_view_arn", resourceName, "billing_view_arn"), ), }, }, diff --git a/internal/service/budgets/budget_tags_gen_test.go b/internal/service/budgets/budget_tags_gen_test.go index 933bca5bdc97..0e5046c3cdb4 100644 --- a/internal/service/budgets/budget_tags_gen_test.go +++ b/internal/service/budgets/budget_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/budgets/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccBudgetsBudget_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccBudgetsBudget_tags(t *testing.T) { func TestAccBudgetsBudget_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccBudgetsBudget_tags_null(t *testing.T) { func TestAccBudgetsBudget_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccBudgetsBudget_tags_EmptyMap(t *testing.T) { func TestAccBudgetsBudget_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccBudgetsBudget_tags_AddOnUpdate(t *testing.T) { func TestAccBudgetsBudget_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccBudgetsBudget_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccBudgetsBudget_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccBudgetsBudget_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccBudgetsBudget_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccBudgetsBudget_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_overlapping(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccBudgetsBudget_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_nullOverlappingResourceTag(t *testing func TestAccBudgetsBudget_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccBudgetsBudget_tags_DefaultTags_nullNonOverlappingResourceTag(t *test func TestAccBudgetsBudget_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccBudgetsBudget_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccBudgetsBudget_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccBudgetsBudget_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccBudgetsBudget_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccBudgetsBudget_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccBudgetsBudget_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccBudgetsBudget_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccBudgetsBudget_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Budget resourceName := "aws_budgets_budget.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), CheckDestroy: testAccCheckBudgetDestroy(ctx), diff --git a/internal/service/budgets/budget_test.go b/internal/service/budgets/budget_test.go index 1e4a093e3c71..01a6c01b6be7 100644 --- a/internal/service/budgets/budget_test.go +++ b/internal/service/budgets/budget_test.go @@ -67,6 +67,7 @@ func TestAccBudgetsBudget_basic(t *testing.T) { testAccCheckBudgetExists(ctx, resourceName, &budget), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrAccountID), acctest.CheckResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "budgets", fmt.Sprintf(`budget/%s`, rName)), + resource.TestCheckResourceAttr(resourceName, "billing_view_arn", ""), resource.TestCheckResourceAttr(resourceName, "budget_type", "RI_UTILIZATION"), resource.TestCheckResourceAttr(resourceName, "cost_filter.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cost_filter.*", map[string]string{ @@ -508,6 +509,51 @@ func TestAccBudgetsBudget_plannedLimits(t *testing.T) { }) } +func TestAccBudgetsBudget_billingViewARN(t *testing.T) { + ctx := acctest.Context(t) + var budget awstypes.Budget + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_budgets_budget.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BudgetsEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.BudgetsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBudgetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBudgetConfig_billingViewARN(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBudgetExists(ctx, resourceName, &budget), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrAccountID), + acctest.CheckResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "budgets", fmt.Sprintf(`budget/%s`, rName)), + acctest.CheckResourceAttrGlobalARN(ctx, resourceName, "billing_view_arn", "billing", "billingview/primary"), + resource.TestCheckResourceAttr(resourceName, "budget_type", "RI_UTILIZATION"), + resource.TestCheckResourceAttr(resourceName, "cost_filter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cost_filter.*", map[string]string{ + names.AttrName: "Service", + "values.#": "1", + "values.0": "Amazon Redshift", + }), + resource.TestCheckResourceAttr(resourceName, "limit_amount", "100.0"), + resource.TestCheckResourceAttr(resourceName, "limit_unit", "PERCENTAGE"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + resource.TestCheckResourceAttr(resourceName, "planned_limit.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "time_period_end"), + resource.TestCheckResourceAttrSet(resourceName, "time_period_start"), + resource.TestCheckResourceAttr(resourceName, "time_unit", "QUARTERLY"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckBudgetExists(ctx context.Context, resourceName string, v *awstypes.Budget) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -798,6 +844,29 @@ resource "aws_budgets_budget" "test" { `, rName, config) } +func testAccBudgetConfig_billingViewARN(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +resource "aws_budgets_budget" "test" { + name = %[1]q + budget_type = "RI_UTILIZATION" + limit_amount = "100.0" + limit_unit = "PERCENTAGE" + time_unit = "QUARTERLY" + + cost_filter { + name = "Service" + values = ["Amazon Redshift"] + } + + billing_view_arn = "arn:${data.aws_partition.current.partition}:billing::${data.aws_caller_identity.current.account_id}:billingview/primary" + +} +`, rName) +} + func generateStartTimes(resourceName, amount string, now time.Time) (string, []resource.TestCheckFunc) { startTimes := make([]time.Time, 12) diff --git a/internal/service/budgets/find.go b/internal/service/budgets/find.go index 62752653027c..b22fdcb538cb 100644 --- a/internal/service/budgets/find.go +++ b/internal/service/budgets/find.go @@ -7,18 +7,17 @@ import ( "context" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func FindBudgetWithDelay[T any](ctx context.Context, f func() (T, error)) (T, error) { var resp T - err := tfresource.Retry(ctx, 30*time.Second, func() *retry.RetryError { + err := tfresource.Retry(ctx, 30*time.Second, func(ctx context.Context) *tfresource.RetryError { var err error resp, err = f() if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil diff --git a/internal/service/budgets/service_endpoint_resolver_gen.go b/internal/service/budgets/service_endpoint_resolver_gen.go index 6bf3f2bc6a3a..1a029b1b13ff 100644 --- a/internal/service/budgets/service_endpoint_resolver_gen.go +++ b/internal/service/budgets/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params budgets.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up budgets endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up budgets endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/budgets/service_endpoints_gen_test.go b/internal/service/budgets/service_endpoints_gen_test.go index 61341de5f3a5..3cdfc5e744e5 100644 --- a/internal/service/budgets/service_endpoints_gen_test.go +++ b/internal/service/budgets/service_endpoints_gen_test.go @@ -524,7 +524,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/budgets/service_package_gen.go b/internal/service/budgets/service_package_gen.go index bc9f4497679c..41d7cbf39024 100644 --- a/internal/service/budgets/service_package_gen.go +++ b/internal/service/budgets/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/budgets" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -86,7 +85,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *budgets.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/budgets/sweep.go b/internal/service/budgets/sweep.go index fb6296fe8a20..70db17f3ba2d 100644 --- a/internal/service/budgets/sweep.go +++ b/internal/service/budgets/sweep.go @@ -34,7 +34,7 @@ func sweepBudgetActions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BudgetsClient(ctx) accountID := client.AccountID(ctx) @@ -78,7 +78,7 @@ func sweepBudgets(region string) error { // nosemgrep:ci.budgets-in-func-name ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.BudgetsClient(ctx) accountID := client.AccountID(ctx) diff --git a/internal/service/budgets/tags_gen.go b/internal/service/budgets/tags_gen.go index 7183c8a8de9c..e9d09f7a27ec 100644 --- a/internal/service/budgets/tags_gen.go +++ b/internal/service/budgets/tags_gen.go @@ -3,8 +3,8 @@ package budgets import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/budgets" awstypes "github.com/aws/aws-sdk-go-v2/service/budgets/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *budgets.Client, identifier string, optF output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.ResourceTags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).BudgetsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *budgets.Client, identifier string, ol _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *budgets.Client, identifier string, ol _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ce/anomaly_monitor_identity_gen_test.go b/internal/service/ce/anomaly_monitor_identity_gen_test.go index 19bfd3bc660f..bf5845c28662 100644 --- a/internal/service/ce/anomaly_monitor_identity_gen_test.go +++ b/internal/service/ce/anomaly_monitor_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCEAnomalyMonitor_Identity_Basic(t *testing.T) { resourceName := "aws_ce_anomaly_monitor.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccCEAnomalyMonitor_Identity_Basic(t *testing.T) { ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -98,3 +102,131 @@ func TestAccCEAnomalyMonitor_Identity_Basic(t *testing.T) { }, }) } + +func TestAccCEAnomalyMonitor_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AnomalyMonitor + resourceName := "aws_ce_anomaly_monitor.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckAnomalyMonitorDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalyMonitor/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalyMonitorExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalyMonitor/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalyMonitorExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AnomalyMonitor/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCEAnomalyMonitor_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AnomalyMonitor + resourceName := "aws_ce_anomaly_monitor.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckAnomalyMonitorDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalyMonitor/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalyMonitorExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AnomalyMonitor/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalyMonitorExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/ce/anomaly_monitor_test.go b/internal/service/ce/anomaly_monitor_test.go index 8218bd6d7869..1f7ea05c9704 100644 --- a/internal/service/ce/anomaly_monitor_test.go +++ b/internal/service/ce/anomaly_monitor_test.go @@ -12,14 +12,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/costexplorer/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfce "github.com/hashicorp/terraform-provider-aws/internal/service/ce" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -196,84 +190,6 @@ func TestAccCEAnomalyMonitor_Dimensional(t *testing.T) { }) } -func TestAccCEAnomalyMonitor_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var monitor awstypes.AnomalyMonitor - resourceName := "aws_ce_anomaly_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckPayerAccount(ctx, t) }, - CheckDestroy: testAccCheckAnomalyMonitorDestroy(ctx), - ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccAnomalyMonitorConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAnomalyMonitorExists(ctx, resourceName, &monitor), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccAnomalyMonitorConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAnomalyMonitorExists(ctx, resourceName, &monitor), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccAnomalyMonitorConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAnomalyMonitorExists(ctx, resourceName, &monitor), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.GlobalARNRegexp("ce", regexache.MustCompile(`anomalymonitor/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckAnomalyMonitorExists(ctx context.Context, n string, v *awstypes.AnomalyMonitor) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CEClient(ctx) diff --git a/internal/service/ce/anomaly_subscription.go b/internal/service/ce/anomaly_subscription.go index b41a2fe90ee6..7131ef630637 100644 --- a/internal/service/ce/anomaly_subscription.go +++ b/internal/service/ce/anomaly_subscription.go @@ -35,7 +35,7 @@ const ( // @ArnIdentity // @V60SDKv2Fix // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/costexplorer/types;awstypes;awstypes.AnomalySubscription") -// @Testing(identityTest=false) +// @Testing(emailAddress="email_address") func resourceAnomalySubscription() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAnomalySubscriptionCreate, diff --git a/internal/service/ce/anomaly_subscription_identity_gen_test.go b/internal/service/ce/anomaly_subscription_identity_gen_test.go new file mode 100644 index 000000000000..d68c1499af5e --- /dev/null +++ b/internal/service/ce/anomaly_subscription_identity_gen_test.go @@ -0,0 +1,247 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ce_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/costexplorer/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCEAnomalySubscription_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AnomalySubscription + resourceName := "aws_ce_anomaly_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + email_address := acctest.RandomEmailAddress(domain) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckAnomalySubscriptionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalySubscriptionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +func TestAccCEAnomalySubscription_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AnomalySubscription + resourceName := "aws_ce_anomaly_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + email_address := acctest.RandomEmailAddress(domain) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckAnomalySubscriptionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalySubscriptionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalySubscriptionExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCEAnomalySubscription_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AnomalySubscription + resourceName := "aws_ce_anomaly_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + email_address := acctest.RandomEmailAddress(domain) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckAnomalySubscriptionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalySubscriptionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AnomalySubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "email_address": config.StringVariable(email_address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAnomalySubscriptionExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/ce/anomaly_subscription_test.go b/internal/service/ce/anomaly_subscription_test.go index 816f2873bf8a..cf30fb8ba6ec 100644 --- a/internal/service/ce/anomaly_subscription_test.go +++ b/internal/service/ce/anomaly_subscription_test.go @@ -12,14 +12,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/costexplorer/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfce "github.com/hashicorp/terraform-provider-aws/internal/service/ce" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -42,7 +36,7 @@ func TestAccCEAnomalySubscription_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAnomalySubscriptionConfig_basic(rName, address), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAnomalySubscriptionExists(ctx, resourceName, &subscription), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "ce", regexache.MustCompile(`anomalysubscription/.+`)), @@ -78,7 +72,7 @@ func TestAccCEAnomalySubscription_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAnomalySubscriptionConfig_basic(rName, address), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAnomalySubscriptionExists(ctx, resourceName, &subscription), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfce.ResourceAnomalySubscription(), resourceName), ), @@ -270,7 +264,7 @@ func TestAccCEAnomalySubscription_tags(t *testing.T) { }, { Config: testAccAnomalySubscriptionConfig_tags1(rName, address, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAnomalySubscriptionExists(ctx, resourceName, &subscription), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -280,86 +274,6 @@ func TestAccCEAnomalySubscription_tags(t *testing.T) { }) } -func TestAccCEAnomalySubscription_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var subscription awstypes.AnomalySubscription - resourceName := "aws_ce_anomaly_subscription.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - domain := acctest.RandomDomainName() - address := acctest.RandomEmailAddress(domain) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckPayerAccount(ctx, t) }, - CheckDestroy: testAccCheckAnomalySubscriptionDestroy(ctx), - ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccAnomalySubscriptionConfig_basic(rName, address), - Check: resource.ComposeTestCheckFunc( - testAccCheckAnomalySubscriptionExists(ctx, resourceName, &subscription), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccAnomalySubscriptionConfig_basic(rName, address), - Check: resource.ComposeTestCheckFunc( - testAccCheckAnomalySubscriptionExists(ctx, resourceName, &subscription), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccAnomalySubscriptionConfig_basic(rName, address), - Check: resource.ComposeTestCheckFunc( - testAccCheckAnomalySubscriptionExists(ctx, resourceName, &subscription), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.GlobalARNRegexp("ce", regexache.MustCompile(`anomalysubscription/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckAnomalySubscriptionExists(ctx context.Context, n string, v *awstypes.AnomalySubscription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/ce/cost_category.go b/internal/service/ce/cost_category.go index e34e65525fc1..4522454d03ea 100644 --- a/internal/service/ce/cost_category.go +++ b/internal/service/ce/cost_category.go @@ -316,8 +316,8 @@ func resourceCostCategoryCreate(ctx context.Context, d *schema.ResourceData, met input.SplitChargeRules = expandCostCategorySplitChargeRules(v.(*schema.Set).List()) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.ResourceNotFoundException](ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.ResourceNotFoundException](ctx, d.Timeout(schema.TimeoutCreate), + func(ctx context.Context) (any, error) { return conn.CreateCostCategoryDefinition(ctx, input) }) diff --git a/internal/service/ce/cost_category_identity_gen_test.go b/internal/service/ce/cost_category_identity_gen_test.go index ab9232cd9e09..a7b631290502 100644 --- a/internal/service/ce/cost_category_identity_gen_test.go +++ b/internal/service/ce/cost_category_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCECostCategory_Identity_Basic(t *testing.T) { resourceName := "aws_ce_cost_category.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccCECostCategory_Identity_Basic(t *testing.T) { ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -98,3 +102,131 @@ func TestAccCECostCategory_Identity_Basic(t *testing.T) { }, }) } + +func TestAccCECostCategory_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CostCategory + resourceName := "aws_ce_cost_category.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckCostCategoryDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CostCategory/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCostCategoryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/CostCategory/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCostCategoryExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CostCategory/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCECostCategory_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CostCategory + resourceName := "aws_ce_cost_category.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), + CheckDestroy: testAccCheckCostCategoryDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CostCategory/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCostCategoryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CostCategory/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCostCategoryExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/ce/cost_category_test.go b/internal/service/ce/cost_category_test.go index 80901d0a6e60..909dfe354ba2 100644 --- a/internal/service/ce/cost_category_test.go +++ b/internal/service/ce/cost_category_test.go @@ -15,14 +15,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfce "github.com/hashicorp/terraform-provider-aws/internal/service/ce" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -274,84 +268,6 @@ func TestAccCECostCategory_tags(t *testing.T) { }) } -func TestAccCECostCategory_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var output awstypes.CostCategory - resourceName := "aws_ce_cost_category.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckPayerAccount(ctx, t) }, - CheckDestroy: testAccCheckCostCategoryDestroy(ctx), - ErrorCheck: acctest.ErrorCheck(t, names.CEServiceID), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCostCategoryConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCostCategoryExists(ctx, resourceName, &output), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCostCategoryConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCostCategoryExists(ctx, resourceName, &output), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCostCategoryConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCostCategoryExists(ctx, resourceName, &output), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.GlobalARNRegexp("ce", regexache.MustCompile(`costcategory/.+$`)), - }), - }, - }, - }, - }) -} - func testAccPreCheckPayerAccount(ctx context.Context, t *testing.T) { t.Helper() diff --git a/internal/service/ce/service_endpoint_resolver_gen.go b/internal/service/ce/service_endpoint_resolver_gen.go index 0dfbcce2992d..fe06b852221a 100644 --- a/internal/service/ce/service_endpoint_resolver_gen.go +++ b/internal/service/ce/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params costexplorer.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up costexplorer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up costexplorer endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ce/service_endpoints_gen_test.go b/internal/service/ce/service_endpoints_gen_test.go index da0139339846..6333c6fc729f 100644 --- a/internal/service/ce/service_endpoints_gen_test.go +++ b/internal/service/ce/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ce/service_package_gen.go b/internal/service/ce/service_package_gen.go index d46811698a1d..15d5c98dfa43 100644 --- a/internal/service/ce/service_package_gen.go +++ b/internal/service/ce/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/costexplorer" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -125,7 +124,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *costexplorer.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ce/tags_gen.go b/internal/service/ce/tags_gen.go index d6535855e5fb..80bfcd19d0eb 100644 --- a/internal/service/ce/tags_gen.go +++ b/internal/service/ce/tags_gen.go @@ -3,8 +3,8 @@ package ce import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/costexplorer" awstypes "github.com/aws/aws-sdk-go-v2/service/costexplorer/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *costexplorer.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.ResourceTags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CEClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *costexplorer.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *costexplorer.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ce/testdata/AnomalyMonitor/region_override/main_gen.tf b/internal/service/ce/testdata/AnomalyMonitor/basic_v5.100.0/main_gen.tf similarity index 78% rename from internal/service/ce/testdata/AnomalyMonitor/region_override/main_gen.tf rename to internal/service/ce/testdata/AnomalyMonitor/basic_v5.100.0/main_gen.tf index d033688dd42d..6decab852f9d 100644 --- a/internal/service/ce/testdata/AnomalyMonitor/region_override/main_gen.tf +++ b/internal/service/ce/testdata/AnomalyMonitor/basic_v5.100.0/main_gen.tf @@ -2,8 +2,6 @@ # SPDX-License-Identifier: MPL-2.0 resource "aws_ce_anomaly_monitor" "test" { - region = var.region - name = var.rName monitor_type = "CUSTOM" @@ -30,9 +28,13 @@ variable "rName" { type = string nullable = false } - -variable "region" { - description = "Region to deploy resource in" - type = string - nullable = false +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } } + +provider "aws" {} diff --git a/internal/service/ce/testdata/AnomalyMonitor/basic_v6.0.0/main_gen.tf b/internal/service/ce/testdata/AnomalyMonitor/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..5289f6640d29 --- /dev/null +++ b/internal/service/ce/testdata/AnomalyMonitor/basic_v6.0.0/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ce_anomaly_monitor" "test" { + name = var.rName + monitor_type = "CUSTOM" + + monitor_specification = < 0 { + apiObject.ResponseCompletionTimeout = aws.Int32(int32(v)) + } + } + if v, ok := tfMap["s3_origin_config"]; ok { if v := v.([]any); len(v) > 0 { apiObject.S3OriginConfig = expandS3OriginConfig(v[0].(map[string]any)) @@ -2198,6 +2220,12 @@ func flattenOrigin(apiObject *awstypes.Origin) map[string]any { tfMap["origin_shield"] = []any{flattenOriginShield(apiObject.OriginShield)} } + if apiObject.ResponseCompletionTimeout != nil { + tfMap["response_completion_timeout"] = aws.ToInt32(apiObject.ResponseCompletionTimeout) + } else { + tfMap["response_completion_timeout"] = 0 + } + if apiObject.S3OriginConfig != nil && aws.ToString(apiObject.S3OriginConfig.OriginAccessIdentity) != "" { tfMap["s3_origin_config"] = []any{flattenS3OriginConfig(apiObject.S3OriginConfig)} } @@ -2423,6 +2451,10 @@ func expandCustomOriginConfig(tfMap map[string]any) *awstypes.CustomOriginConfig OriginSslProtocols: expandCustomOriginConfigSSL(tfMap["origin_ssl_protocols"].(*schema.Set).List()), } + if v, ok := tfMap[names.AttrIPAddressType]; ok && v.(string) != "" { + apiObject.IpAddressType = awstypes.IpAddressType(v.(string)) + } + return apiObject } @@ -2440,6 +2472,10 @@ func flattenCustomOriginConfig(apiObject *awstypes.CustomOriginConfig) map[strin "origin_ssl_protocols": flattenCustomOriginConfigSSL(apiObject.OriginSslProtocols), } + if apiObject.IpAddressType != "" { + tfMap[names.AttrIPAddressType] = apiObject.IpAddressType + } + return tfMap } diff --git a/internal/service/cloudfront/distribution_test.go b/internal/service/cloudfront/distribution_test.go index 45886d446dac..943432b81de4 100644 --- a/internal/service/cloudfront/distribution_test.go +++ b/internal/service/cloudfront/distribution_test.go @@ -39,6 +39,8 @@ func TestAccCloudFrontDistribution_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDistributionExists(ctx, resourceName, &distribution), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.response_completion_timeout", "0"), ), }, { @@ -178,6 +180,7 @@ func TestAccCloudFrontDistribution_customOrigin(t *testing.T) { var distribution awstypes.Distribution rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudfront_distribution.custom_distribution" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CloudFrontEndpointID) }, @@ -188,7 +191,10 @@ func TestAccCloudFrontDistribution_customOrigin(t *testing.T) { { Config: testAccDistributionConfig_custom(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDistributionExists(ctx, "aws_cloudfront_distribution.custom_distribution", &distribution), + testAccCheckDistributionExists(ctx, resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.custom_origin_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.custom_origin_config.0.ip_address_type", ""), ), }, { @@ -204,6 +210,53 @@ func TestAccCloudFrontDistribution_customOrigin(t *testing.T) { }) } +func TestAccCloudFrontDistribution_customOriginIPAddressType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var distribution awstypes.Distribution + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudfront_distribution.custom_distribution" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CloudFrontEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDistributionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDistributionConfig_customIPAddressType(rName, string(awstypes.IpAddressTypeIpv6)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDistributionExists(ctx, resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.custom_origin_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.custom_origin_config.0.ip_address_type", string(awstypes.IpAddressTypeIpv6)), + ), + }, + { + ResourceName: "aws_cloudfront_distribution.custom_distribution", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + { + Config: testAccDistributionConfig_customIPAddressType(rName, string(awstypes.IpAddressTypeDualStack)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDistributionExists(ctx, resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.custom_origin_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "origin.0.custom_origin_config.0.ip_address_type", string(awstypes.IpAddressTypeDualStack)), + ), + }, + }, + }) +} + func TestAccCloudFrontDistribution_originPolicyDefault(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -590,11 +643,11 @@ func TestAccCloudFrontDistribution_Origin_originShield(t *testing.T) { }, { Config: testAccDistributionConfig_originItem(rName, originShieldItem(acctest.CtFalse, `""`)), - ExpectError: regexache.MustCompile(`.*must be a valid AWS Region Code.*`), + ExpectError: regexache.MustCompile(`.*doesn't look like AWS Region.*`), }, { Config: testAccDistributionConfig_originItem(rName, originShieldItem(acctest.CtTrue, `"US East (Ohio)"`)), - ExpectError: regexache.MustCompile(`.*must be a valid AWS Region Code.*`), + ExpectError: regexache.MustCompile(`.*doesn't look like AWS Region.*`), }, { Config: testAccDistributionConfig_originItem(rName, originShieldItem(acctest.CtTrue, `"us-east-1"`)), //lintignore:AWSAT003 @@ -1498,6 +1551,78 @@ func TestAccCloudFrontDistribution_vpcOriginConfig(t *testing.T) { }) } +func TestAccCloudFrontDistribution_responseCompletionTimeout(t *testing.T) { + ctx := acctest.Context(t) + var distribution awstypes.Distribution + resourceName := "aws_cloudfront_distribution.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CloudFrontEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDistributionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDistributionConfig_responseCompletionTimeout(false, false, 60), + Check: resource.ComposeTestCheckFunc( + testAccCheckDistributionExists(ctx, resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "origin.*", map[string]string{ + "custom_header.#": "0", + "custom_origin_config.#": "1", + "origin_id": "test", + "origin_shield.#": "0", + "s3_origin_config.#": "0", + "vpc_origin_config.#": "0", + "response_completion_timeout": "60", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + { + Config: testAccDistributionConfig_responseCompletionTimeout(false, false, 30), + Check: resource.ComposeTestCheckFunc( + testAccCheckDistributionExists(ctx, resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "origin.*", map[string]string{ + "custom_header.#": "0", + "custom_origin_config.#": "1", + "origin_id": "test", + "origin_shield.#": "0", + "s3_origin_config.#": "0", + "vpc_origin_config.#": "0", + "response_completion_timeout": "30", + }), + ), + }, + { + Config: testAccDistributionConfig_enabled(false, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDistributionExists(ctx, resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "origin.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "origin.*", map[string]string{ + "custom_header.#": "0", + "custom_origin_config.#": "1", + "origin_id": "test", + "origin_shield.#": "0", + "s3_origin_config.#": "0", + "vpc_origin_config.#": "0", + "response_completion_timeout": "0", + }), + ), + }, + }, + }) +} + func TestAccCloudFrontDistribution_grpcConfig(t *testing.T) { ctx := acctest.Context(t) var distribution awstypes.Distribution @@ -1976,6 +2101,76 @@ resource "aws_cloudfront_distribution" "custom_distribution" { `, testAccDistributionRetainConfig())) } +func testAccDistributionConfig_customIPAddressType(rName, ipAddressType string) string { + return acctest.ConfigCompose( + logBucket(rName), + fmt.Sprintf(` +resource "aws_cloudfront_distribution" "custom_distribution" { + depends_on = [aws_s3_bucket_acl.s3_bucket_logs_acl] + + origin { + domain_name = "www.example.com" + origin_id = "myCustomOrigin" + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "http-only" + origin_ssl_protocols = ["SSLv3", "TLSv1"] + origin_read_timeout = 30 + origin_keepalive_timeout = 5 + ip_address_type = %[2]q + } + } + + enabled = true + comment = "Some comment" + default_root_object = "index.html" + + logging_config { + include_cookies = false + bucket = aws_s3_bucket.s3_bucket_logs.bucket_regional_domain_name + prefix = "myprefix" + } + + default_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "myCustomOrigin" + smooth_streaming = false + + forwarded_values { + query_string = false + + cookies { + forward = "all" + } + } + + viewer_protocol_policy = "allow-all" + min_ttl = 0 + default_ttl = 3600 + max_ttl = 86400 + } + + price_class = "PriceClass_200" + + restrictions { + geo_restriction { + restriction_type = "whitelist" + locations = ["US", "CA", "GB", "DE"] + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } + + %[1]s +} +`, testAccDistributionRetainConfig(), ipAddressType)) +} + func testAccDistributionConfig_originRequestPolicyDefault(rName string) string { return acctest.ConfigCompose( logBucket(rName), @@ -4588,6 +4783,54 @@ resource "aws_cloudfront_distribution" "test" { `) } +func testAccDistributionConfig_responseCompletionTimeout(enabled, retainOnDelete bool, responseCompletionTimeout int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_distribution" "test" { + enabled = %[1]t + retain_on_delete = %[2]t + + default_cache_behavior { + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "test" + viewer_protocol_policy = "allow-all" + + forwarded_values { + query_string = false + + cookies { + forward = "all" + } + } + } + + origin { + domain_name = "www.example.com" + origin_id = "test" + + response_completion_timeout = %[3]d + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "https-only" + origin_ssl_protocols = ["TLSv1.2"] + } + } + + restrictions { + geo_restriction { + restriction_type = "none" + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } +} +`, enabled, retainOnDelete, responseCompletionTimeout) +} + func testAccDistributionConfig_grpcConfig() string { return ` resource "aws_cloudfront_distribution" "test" { diff --git a/internal/service/cloudfront/exports.go b/internal/service/cloudfront/exports.go index c41aea12344d..b17771360fb3 100644 --- a/internal/service/cloudfront/exports.go +++ b/internal/service/cloudfront/exports.go @@ -5,5 +5,6 @@ package cloudfront // Exports for use across service packages. var ( + FindDistributionByID = findDistributionByID ResourceKeyValueStore = newKeyValueStoreResource ) diff --git a/internal/service/cloudfront/exports_test.go b/internal/service/cloudfront/exports_test.go index 63b7364bb56b..2243d6a6f6fa 100644 --- a/internal/service/cloudfront/exports_test.go +++ b/internal/service/cloudfront/exports_test.go @@ -23,7 +23,6 @@ var ( FindCachePolicyByID = findCachePolicyByID FindContinuousDeploymentPolicyByID = findContinuousDeploymentPolicyByID - FindDistributionByID = findDistributionByID FindFieldLevelEncryptionConfigByID = findFieldLevelEncryptionConfigByID FindFieldLevelEncryptionProfileByID = findFieldLevelEncryptionProfileByID FindFunctionByTwoPartKey = findFunctionByTwoPartKey diff --git a/internal/service/cloudfront/function_test.go b/internal/service/cloudfront/function_test.go index 2bfd28ee204b..6e5685f669e8 100644 --- a/internal/service/cloudfront/function_test.go +++ b/internal/service/cloudfront/function_test.go @@ -27,6 +27,7 @@ func init() { func testAccErrorCheckSkipFunction(t *testing.T) resource.ErrorCheckFunc { return acctest.ErrorCheckSkipMessagesContaining(t, "InvalidParameterValueException: Unsupported source arn", + "AccessDenied", ) } diff --git a/internal/service/cloudfront/key_value_store.go b/internal/service/cloudfront/key_value_store.go index 064fcfd5f48a..aa2c6758fe98 100644 --- a/internal/service/cloudfront/key_value_store.go +++ b/internal/service/cloudfront/key_value_store.go @@ -36,6 +36,7 @@ import ( // @ArnFormat("key-value-store/{id}", attribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/cloudfront/types;awstypes;awstypes.KeyValueStore") // @Testing(importStateIdAttribute="name") +// @Testing(preIdentityVersion="v5.100.0") func newKeyValueStoreResource(context.Context) (resource.ResourceWithConfigure, error) { r := &keyValueStoreResource{} diff --git a/internal/service/cloudfront/key_value_store_identity_gen_test.go b/internal/service/cloudfront/key_value_store_identity_gen_test.go index 5cf3976bd03b..0c4ab6541138 100644 --- a/internal/service/cloudfront/key_value_store_identity_gen_test.go +++ b/internal/service/cloudfront/key_value_store_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccCloudFrontKeyValueStore_Identity_Basic(t *testing.T) { resourceName := "aws_cloudfront_key_value_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -104,3 +104,131 @@ func TestAccCloudFrontKeyValueStore_Identity_Basic(t *testing.T) { }, }) } + +func TestAccCloudFrontKeyValueStore_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.KeyValueStore + resourceName := "aws_cloudfront_key_value_store.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + CheckDestroy: testAccCheckKeyValueStoreDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/KeyValueStore/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKeyValueStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/KeyValueStore/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKeyValueStoreExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/KeyValueStore/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + }, + }) +} + +func TestAccCloudFrontKeyValueStore_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.KeyValueStore + resourceName := "aws_cloudfront_key_value_store.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + CheckDestroy: testAccCheckKeyValueStoreDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/KeyValueStore/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKeyValueStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/KeyValueStore/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/cloudfront/key_value_store_test.go b/internal/service/cloudfront/key_value_store_test.go index 88c16e981565..09ce475e606b 100644 --- a/internal/service/cloudfront/key_value_store_test.go +++ b/internal/service/cloudfront/key_value_store_test.go @@ -11,15 +11,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcloudfront "github.com/hashicorp/terraform-provider-aws/internal/service/cloudfront" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -133,89 +126,6 @@ func TestAccCloudFrontKeyValueStore_comment(t *testing.T) { }) } -func TestAccCloudFrontKeyValueStore_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - - var v awstypes.KeyValueStore - resourceName := "aws_cloudfront_key_value_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), - CheckDestroy: testAccCheckKeyValueStoreDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccKeyValueStoreConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKeyValueStoreExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccKeyValueStoreConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKeyValueStoreExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrName: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccKeyValueStoreConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckKeyValueStoreExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrName: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), - }, - }, - }, - }) -} - func testAccCheckKeyValueStoreDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFrontClient(ctx) diff --git a/internal/service/cloudfront/realtime_log_config_identity_gen_test.go b/internal/service/cloudfront/realtime_log_config_identity_gen_test.go index 515dcab9bb32..f5757da0ee00 100644 --- a/internal/service/cloudfront/realtime_log_config_identity_gen_test.go +++ b/internal/service/cloudfront/realtime_log_config_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccCloudFrontRealtimeLogConfig_Identity_Basic(t *testing.T) { resourceName := "aws_cloudfront_realtime_log_config.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -48,6 +48,9 @@ func TestAccCloudFrontRealtimeLogConfig_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ tfstatecheck.ExpectGlobalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "cloudfront", "realtime-log-config/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -100,3 +103,131 @@ func TestAccCloudFrontRealtimeLogConfig_Identity_Basic(t *testing.T) { }, }) } + +func TestAccCloudFrontRealtimeLogConfig_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RealtimeLogConfig + resourceName := "aws_cloudfront_realtime_log_config.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + CheckDestroy: testAccCheckRealtimeLogConfigDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RealtimeLogConfig/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/RealtimeLogConfig/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RealtimeLogConfig/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCloudFrontRealtimeLogConfig_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RealtimeLogConfig + resourceName := "aws_cloudfront_realtime_log_config.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + CheckDestroy: testAccCheckRealtimeLogConfigDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RealtimeLogConfig/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RealtimeLogConfig/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/cloudfront/realtime_log_config_test.go b/internal/service/cloudfront/realtime_log_config_test.go index 7a0d526d6ec9..03e00875c19b 100644 --- a/internal/service/cloudfront/realtime_log_config_test.go +++ b/internal/service/cloudfront/realtime_log_config_test.go @@ -9,18 +9,11 @@ import ( "strconv" "testing" - "github.com/YakDriver/regexache" awstypes "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcloudfront "github.com/hashicorp/terraform-provider-aws/internal/service/cloudfront" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -155,85 +148,6 @@ func TestAccCloudFrontRealtimeLogConfig_updates(t *testing.T) { }) } -func TestAccCloudFrontRealtimeLogConfig_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v awstypes.RealtimeLogConfig - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - samplingRate := sdkacctest.RandIntRange(1, 100) - resourceName := "aws_cloudfront_realtime_log_config.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CloudFrontEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), - CheckDestroy: testAccCheckRealtimeLogConfigDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccRealtimeLogConfigConfig_basic(rName, samplingRate), - Check: resource.ComposeTestCheckFunc( - testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccRealtimeLogConfigConfig_basic(rName, samplingRate), - Check: resource.ComposeTestCheckFunc( - testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRealtimeLogConfigConfig_basic(rName, samplingRate), - Check: resource.ComposeTestCheckFunc( - testAccCheckRealtimeLogConfigExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.GlobalARNRegexp("cloudfront", regexache.MustCompile(`realtime-log-config/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckRealtimeLogConfigDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFrontClient(ctx) diff --git a/internal/service/cloudfront/service_endpoint_resolver_gen.go b/internal/service/cloudfront/service_endpoint_resolver_gen.go index 6aecb5c28572..fce5acd47768 100644 --- a/internal/service/cloudfront/service_endpoint_resolver_gen.go +++ b/internal/service/cloudfront/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params cloudfront.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up cloudfront endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up cloudfront endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/cloudfront/service_endpoints_gen_test.go b/internal/service/cloudfront/service_endpoints_gen_test.go index 550a7a6f50f6..14cd5ac1b539 100644 --- a/internal/service/cloudfront/service_endpoints_gen_test.go +++ b/internal/service/cloudfront/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/cloudfront/service_package_gen.go b/internal/service/cloudfront/service_package_gen.go index d494a144b2dc..1db1f6705adc 100644 --- a/internal/service/cloudfront/service_package_gen.go +++ b/internal/service/cloudfront/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/cloudfront" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newCreateInvalidationAction, + TypeName: "aws_cloudfront_create_invalidation", + Name: "Create Invalidation", + Region: unique.Make(inttypes.ResourceRegionDisabled()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{ { @@ -237,7 +247,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *cloudfront.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/cloudfront/sweep.go b/internal/service/cloudfront/sweep.go index 17b67c9bccc7..8a64df68b5f2 100644 --- a/internal/service/cloudfront/sweep.go +++ b/internal/service/cloudfront/sweep.go @@ -109,7 +109,7 @@ func sweepCachePolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListCachePoliciesInput{ @@ -188,7 +188,7 @@ func sweepDistributionsByProductionOrStaging(region string, staging bool) error ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListDistributionsInput{} @@ -251,7 +251,7 @@ func sweepContinuousDeploymentPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListContinuousDeploymentPoliciesInput{} @@ -293,7 +293,7 @@ func sweepFunctions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListFunctionsInput{} @@ -405,7 +405,7 @@ func sweepMonitoringSubscriptions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListDistributionsInput{} @@ -446,7 +446,7 @@ func sweepRealtimeLogsConfig(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListRealtimeLogConfigsInput{} @@ -490,7 +490,7 @@ func sweepFieldLevelEncryptionConfigs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListFieldLevelEncryptionConfigsInput{} @@ -546,7 +546,7 @@ func sweepFieldLevelEncryptionProfiles(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListFieldLevelEncryptionProfilesInput{} @@ -602,7 +602,7 @@ func sweepOriginRequestPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListOriginRequestPoliciesInput{ @@ -660,7 +660,7 @@ func sweepResponseHeadersPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListResponseHeadersPoliciesInput{ @@ -718,7 +718,7 @@ func sweepOriginAccessControls(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListOriginAccessControlsInput{} @@ -774,7 +774,7 @@ func sweepVPCOrigins(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudFrontClient(ctx) input := &cloudfront.ListVpcOriginsInput{} diff --git a/internal/service/cloudfront/tags_gen.go b/internal/service/cloudfront/tags_gen.go index 9a887d70599f..50151561b4cb 100644 --- a/internal/service/cloudfront/tags_gen.go +++ b/internal/service/cloudfront/tags_gen.go @@ -3,8 +3,8 @@ package cloudfront import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudfront" awstypes "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *cloudfront.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags.Items), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CloudFrontClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *cloudfront.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *cloudfront.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/cloudfront/testdata/KeyValueStore/basic_v5.100.0/main_gen.tf b/internal/service/cloudfront/testdata/KeyValueStore/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..0da0e24dd1eb --- /dev/null +++ b/internal/service/cloudfront/testdata/KeyValueStore/basic_v5.100.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudfront_key_value_store" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/cloudfront/testdata/KeyValueStore/basic_v6.0.0/main_gen.tf b/internal/service/cloudfront/testdata/KeyValueStore/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..20cfdf7d5394 --- /dev/null +++ b/internal/service/cloudfront/testdata/KeyValueStore/basic_v6.0.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudfront_key_value_store" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/cloudfront/testdata/RealtimeLogConfig/basic_v5.100.0/main_gen.tf b/internal/service/cloudfront/testdata/RealtimeLogConfig/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..16483ab4ec06 --- /dev/null +++ b/internal/service/cloudfront/testdata/RealtimeLogConfig/basic_v5.100.0/main_gen.tf @@ -0,0 +1,80 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudfront_realtime_log_config" "test" { + name = var.rName + sampling_rate = 1 + fields = ["timestamp", "c-ip"] + + endpoint { + stream_type = "Kinesis" + + kinesis_stream_config { + role_arn = aws_iam_role.test.arn + stream_arn = aws_kinesis_stream.test.arn + } + } + + depends_on = [aws_iam_role_policy.test] +} + +# testAccRealtimeLogBaseConfig + +resource "aws_kinesis_stream" "test" { + name = var.rName + shard_count = 2 +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = < 0 { if err := d.Set("metric_query", flattenMetricAlarmMetrics(alarm.Metrics)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting metric_query: %s", err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } d.Set(names.AttrNamespace, alarm.Namespace) @@ -425,7 +425,7 @@ func resourceMetricAlarmUpdate(ctx context.Context, d *schema.ResourceData, meta _, err := conn.PutMetricAlarm(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating CloudWatch Metric Alarm (%s): %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } @@ -447,7 +447,7 @@ func resourceMetricAlarmDelete(ctx context.Context, d *schema.ResourceData, meta } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting CloudWatch Metric Alarm (%s): %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } return diags @@ -462,14 +462,14 @@ func findMetricAlarmByName(ctx context.Context, conn *cloudwatch.Client, name st output, err := conn.DescribeAlarms(ctx, input) if err != nil { - return nil, err + return nil, smarterr.NewError(err) } if output == nil { - return nil, tfresource.NewEmptyResultError(input) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(input)) } - return tfresource.AssertSingleValueResult(output.MetricAlarms) + return smarterr.Assert(tfresource.AssertSingleValueResult(output.MetricAlarms)) } func expandPutMetricAlarmInput(ctx context.Context, d *schema.ResourceData) *cloudwatch.PutMetricAlarmInput { diff --git a/internal/service/cloudwatch/metric_alarm_identity_gen_test.go b/internal/service/cloudwatch/metric_alarm_identity_gen_test.go new file mode 100644 index 000000000000..e9216e1c0115 --- /dev/null +++ b/internal/service/cloudwatch/metric_alarm_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package cloudwatch_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCloudWatchMetricAlarm_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.MetricAlarm + resourceName := "aws_cloudwatch_metric_alarm.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), + CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMetricAlarmExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("alarm_name"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "alarm_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("alarm_name")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("alarm_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("alarm_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccCloudWatchMetricAlarm_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_cloudwatch_metric_alarm.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("alarm_name"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "alarm_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("alarm_name")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("alarm_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("alarm_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccCloudWatchMetricAlarm_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.MetricAlarm + resourceName := "aws_cloudwatch_metric_alarm.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), + CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMetricAlarmExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "alarm_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("alarm_name")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccCloudWatchMetricAlarm_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.MetricAlarm + resourceName := "aws_cloudwatch_metric_alarm.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), + CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMetricAlarmExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/MetricAlarm/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/cloudwatch/metric_alarm_tags_gen_test.go b/internal/service/cloudwatch/metric_alarm_tags_gen_test.go index 1b1117c3003c..e0cc24373c6d 100644 --- a/internal/service/cloudwatch/metric_alarm_tags_gen_test.go +++ b/internal/service/cloudwatch/metric_alarm_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccCloudWatchMetricAlarm_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccCloudWatchMetricAlarm_tags(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccCloudWatchMetricAlarm_tags_null(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccCloudWatchMetricAlarm_tags_EmptyMap(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccCloudWatchMetricAlarm_tags_AddOnUpdate(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccCloudWatchMetricAlarm_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccCloudWatchMetricAlarm_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccCloudWatchMetricAlarm_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccCloudWatchMetricAlarm_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_overlapping(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_updateToProviderOnly(t *testi func TestAccCloudWatchMetricAlarm_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_updateToResourceOnly(t *testi func TestAccCloudWatchMetricAlarm_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_emptyResourceTag(t *testing.T func TestAccCloudWatchMetricAlarm_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_emptyProviderOnlyTag(t *testi func TestAccCloudWatchMetricAlarm_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_nullOverlappingResourceTag(t func TestAccCloudWatchMetricAlarm_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccCloudWatchMetricAlarm_tags_DefaultTags_nullNonOverlappingResourceTag func TestAccCloudWatchMetricAlarm_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccCloudWatchMetricAlarm_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccCloudWatchMetricAlarm_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccCloudWatchMetricAlarm_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccCloudWatchMetricAlarm_tags_ComputedTag_OnUpdate_Replace(t *testing.T func TestAccCloudWatchMetricAlarm_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccCloudWatchMetricAlarm_tags_IgnoreTags_Overlap_DefaultTag(t *testing. func TestAccCloudWatchMetricAlarm_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.MetricAlarm resourceName := "aws_cloudwatch_metric_alarm.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricAlarmDestroy(ctx), diff --git a/internal/service/cloudwatch/metric_stream.go b/internal/service/cloudwatch/metric_stream.go index 9adad88f09b0..ff51c84fd570 100644 --- a/internal/service/cloudwatch/metric_stream.go +++ b/internal/service/cloudwatch/metric_stream.go @@ -9,6 +9,7 @@ import ( "time" "github.com/YakDriver/regexache" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" @@ -20,8 +21,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -234,13 +235,13 @@ func resourceMetricStreamCreate(ctx context.Context, d *schema.ResourceData, met } if err != nil { - return sdkdiag.AppendErrorf(diags, "creating CloudWatch Metric Stream (%s): %s", name, err) + return smerr.Append(ctx, diags, err, smerr.ID, name) } d.SetId(name) if _, err := waitMetricStreamRunning(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for CloudWatch Metric Stream (%s) create: %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } // For partitions not supporting tag-on-create, attempt tag after create. @@ -249,11 +250,11 @@ func resourceMetricStreamCreate(ctx context.Context, d *schema.ResourceData, met // If default tags only, continue. Otherwise, error. if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]any)) == 0) && errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { - return append(diags, resourceMetricStreamRead(ctx, d, meta)...) + return append(diags, resourceMetricStreamRead(ctx, d, meta)...) // no error, just continue } if err != nil { - return sdkdiag.AppendErrorf(diags, "setting CloudWatch Metric Stream (%s) tags: %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } @@ -273,20 +274,20 @@ func resourceMetricStreamRead(ctx context.Context, d *schema.ResourceData, meta } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading CloudWatch Metric Stream (%s): %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } d.Set(names.AttrARN, output.Arn) d.Set(names.AttrCreationDate, output.CreationDate.Format(time.RFC3339)) if output.ExcludeFilters != nil { if err := d.Set("exclude_filter", flattenMetricStreamFilters(output.ExcludeFilters)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting exclude_filter: %s", err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } d.Set("firehose_arn", output.FirehoseArn) if output.IncludeFilters != nil { if err := d.Set("include_filter", flattenMetricStreamFilters(output.IncludeFilters)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting include_filter: %s", err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } d.Set("include_linked_accounts_metrics", output.IncludeLinkedAccountsMetrics) @@ -298,7 +299,7 @@ func resourceMetricStreamRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrState, output.State) if output.StatisticsConfigurations != nil { if err := d.Set("statistics_configuration", flattenMetricStreamStatisticsConfigurations(output.StatisticsConfigurations)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting statistics_configuration: %s", err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } @@ -333,11 +334,11 @@ func resourceMetricStreamUpdate(ctx context.Context, d *schema.ResourceData, met _, err := conn.PutMetricStream(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating CloudWatch Metric Stream (%s): %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } if _, err := waitMetricStreamRunning(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for CloudWatch Metric Stream (%s) update: %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } } @@ -355,11 +356,11 @@ func resourceMetricStreamDelete(ctx context.Context, d *schema.ResourceData, met _, err := conn.DeleteMetricStream(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting CloudWatch Metric Stream (%s): %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } if _, err := waitMetricStreamDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for CloudWatch Metric Stream (%s) delete: %s", d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } return diags @@ -373,18 +374,18 @@ func findMetricStreamByName(ctx context.Context, conn *cloudwatch.Client, name s output, err := conn.GetMetricStream(ctx, input) if errs.IsA[*types.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: input, - } + }) } if err != nil { - return nil, err + return nil, smarterr.NewError(err) } if output == nil { - return nil, tfresource.NewEmptyResultError(input) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(input)) } return output, nil @@ -399,7 +400,7 @@ func statusMetricStream(ctx context.Context, conn *cloudwatch.Client, name strin } if err != nil { - return nil, "", err + return nil, "", smarterr.NewError(err) } return output, aws.ToString(output.State), nil @@ -422,10 +423,10 @@ func waitMetricStreamDeleted(ctx context.Context, conn *cloudwatch.Client, name outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*cloudwatch.GetMetricStreamOutput); ok { - return output, err + return output, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } func waitMetricStreamRunning(ctx context.Context, conn *cloudwatch.Client, name string, timeout time.Duration) (*cloudwatch.GetMetricStreamOutput, error) { //nolint:unparam @@ -439,16 +440,16 @@ func waitMetricStreamRunning(ctx context.Context, conn *cloudwatch.Client, name outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*cloudwatch.GetMetricStreamOutput); ok { - return output, err + return output, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } func validateMetricStreamName(v any, k string) (ws []string, errors []error) { return validation.All( validation.StringLenBetween(1, 255), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_-]*$`), "must match [0-9A-Za-z_-]"), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_-]*$`), "must match [0-9A-ZaZ_-]"), )(v, k) } diff --git a/internal/service/cloudwatch/metric_stream_tags_gen_test.go b/internal/service/cloudwatch/metric_stream_tags_gen_test.go index 466e3465b6e4..4c65e23880d2 100644 --- a/internal/service/cloudwatch/metric_stream_tags_gen_test.go +++ b/internal/service/cloudwatch/metric_stream_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccCloudWatchMetricStream_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccCloudWatchMetricStream_tags(t *testing.T) { func TestAccCloudWatchMetricStream_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccCloudWatchMetricStream_tags_null(t *testing.T) { func TestAccCloudWatchMetricStream_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccCloudWatchMetricStream_tags_EmptyMap(t *testing.T) { func TestAccCloudWatchMetricStream_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccCloudWatchMetricStream_tags_AddOnUpdate(t *testing.T) { func TestAccCloudWatchMetricStream_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccCloudWatchMetricStream_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccCloudWatchMetricStream_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccCloudWatchMetricStream_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccCloudWatchMetricStream_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccCloudWatchMetricStream_tags_EmptyTag_OnUpdate_Replace(t *testing.T) func TestAccCloudWatchMetricStream_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccCloudWatchMetricStream_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccCloudWatchMetricStream_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_overlapping(t *testing.T) { func TestAccCloudWatchMetricStream_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_updateToProviderOnly(t *test func TestAccCloudWatchMetricStream_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_updateToResourceOnly(t *test func TestAccCloudWatchMetricStream_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_emptyResourceTag(t *testing. func TestAccCloudWatchMetricStream_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_emptyProviderOnlyTag(t *test func TestAccCloudWatchMetricStream_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_nullOverlappingResourceTag(t func TestAccCloudWatchMetricStream_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccCloudWatchMetricStream_tags_DefaultTags_nullNonOverlappingResourceTa func TestAccCloudWatchMetricStream_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccCloudWatchMetricStream_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccCloudWatchMetricStream_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccCloudWatchMetricStream_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccCloudWatchMetricStream_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccCloudWatchMetricStream_tags_ComputedTag_OnUpdate_Replace(t *testing. func TestAccCloudWatchMetricStream_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccCloudWatchMetricStream_tags_IgnoreTags_Overlap_DefaultTag(t *testing func TestAccCloudWatchMetricStream_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_cloudwatch_metric_stream.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CloudWatchServiceID), CheckDestroy: testAccCheckMetricStreamDestroy(ctx), diff --git a/internal/service/cloudwatch/service_endpoint_resolver_gen.go b/internal/service/cloudwatch/service_endpoint_resolver_gen.go index 2430cfac6f3e..f1746e3451af 100644 --- a/internal/service/cloudwatch/service_endpoint_resolver_gen.go +++ b/internal/service/cloudwatch/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params cloudwatch.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up cloudwatch endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up cloudwatch endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/cloudwatch/service_endpoints_gen_test.go b/internal/service/cloudwatch/service_endpoints_gen_test.go index f6322d39f042..a49abada8d97 100644 --- a/internal/service/cloudwatch/service_endpoints_gen_test.go +++ b/internal/service/cloudwatch/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/cloudwatch/service_package_gen.go b/internal/service/cloudwatch/service_package_gen.go index d0bb5931375b..0eb9f9603c6d 100644 --- a/internal/service/cloudwatch/service_package_gen.go +++ b/internal/service/cloudwatch/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -80,7 +79,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity("alarm_name"), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceMetricStream, @@ -117,7 +120,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *cloudwatch.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/cloudwatch/smarterr.hcl b/internal/service/cloudwatch/smarterr.hcl new file mode 100644 index 000000000000..a35c19df645e --- /dev/null +++ b/internal/service/cloudwatch/smarterr.hcl @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +parameter "service" { + value = "CloudWatch" +} + +hint "dashboard_name_conflict" { + error_contains = "DashboardAlreadyExists" + suggestion = "A dashboard with this name already exists in your AWS account. Choose a unique name for your CloudWatch dashboard, or import the existing dashboard into Terraform using `terraform import` if you want to manage it." +} diff --git a/internal/service/cloudwatch/sweep.go b/internal/service/cloudwatch/sweep.go index fe85a4921e70..f0122279fa79 100644 --- a/internal/service/cloudwatch/sweep.go +++ b/internal/service/cloudwatch/sweep.go @@ -41,7 +41,7 @@ func sweepCompositeAlarms(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudWatchClient(ctx) input := &cloudwatch.DescribeAlarmsInput{ @@ -84,7 +84,7 @@ func sweepDashboards(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudWatchClient(ctx) input := &cloudwatch.ListDashboardsInput{} @@ -125,7 +125,7 @@ func sweepMetricAlarms(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudWatchClient(ctx) input := &cloudwatch.DescribeAlarmsInput{ @@ -168,7 +168,7 @@ func sweepMetricStreams(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CloudWatchClient(ctx) input := &cloudwatch.ListMetricStreamsInput{} diff --git a/internal/service/cloudwatch/tags_gen.go b/internal/service/cloudwatch/tags_gen.go index 520e28a008c1..1cfbe21c9186 100644 --- a/internal/service/cloudwatch/tags_gen.go +++ b/internal/service/cloudwatch/tags_gen.go @@ -3,8 +3,8 @@ package cloudwatch import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudwatch" awstypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *cloudwatch.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CloudWatchClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *cloudwatch.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *cloudwatch.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/cloudwatch/testdata/MetricAlarm/basic/main_gen.tf b/internal/service/cloudwatch/testdata/MetricAlarm/basic/main_gen.tf new file mode 100644 index 000000000000..afeae1c0c0c5 --- /dev/null +++ b/internal/service/cloudwatch/testdata/MetricAlarm/basic/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_metric_alarm" "test" { + alarm_name = var.rName + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + alarm_description = "This metric monitors ec2 cpu utilization" + insufficient_data_actions = [] + + dimensions = { + InstanceId = "i-abcd1234" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/cloudwatch/testdata/MetricAlarm/basic_v6.7.0/main_gen.tf b/internal/service/cloudwatch/testdata/MetricAlarm/basic_v6.7.0/main_gen.tf new file mode 100644 index 000000000000..74c48c608bdf --- /dev/null +++ b/internal/service/cloudwatch/testdata/MetricAlarm/basic_v6.7.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_metric_alarm" "test" { + alarm_name = var.rName + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + alarm_description = "This metric monitors ec2 cpu utilization" + insufficient_data_actions = [] + + dimensions = { + InstanceId = "i-abcd1234" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.7.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/cloudwatch/testdata/MetricAlarm/region_override/main_gen.tf b/internal/service/cloudwatch/testdata/MetricAlarm/region_override/main_gen.tf new file mode 100644 index 000000000000..fbb2b23e76b7 --- /dev/null +++ b/internal/service/cloudwatch/testdata/MetricAlarm/region_override/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_metric_alarm" "test" { + region = var.region + + alarm_name = var.rName + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + alarm_description = "This metric monitors ec2 cpu utilization" + insufficient_data_actions = [] + + dimensions = { + InstanceId = "i-abcd1234" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/cloudwatch/testdata/tmpl/metric_alarm_tags.gtpl b/internal/service/cloudwatch/testdata/tmpl/metric_alarm_tags.gtpl index aeb77b32e1f3..41f309b38785 100644 --- a/internal/service/cloudwatch/testdata/tmpl/metric_alarm_tags.gtpl +++ b/internal/service/cloudwatch/testdata/tmpl/metric_alarm_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_cloudwatch_metric_alarm" "test" { +{{- template "region" }} alarm_name = var.rName comparison_operator = "GreaterThanOrEqualToThreshold" evaluation_periods = 2 diff --git a/internal/service/codeartifact/domain.go b/internal/service/codeartifact/domain.go index 6b7de4b88540..691d3d096f64 100644 --- a/internal/service/codeartifact/domain.go +++ b/internal/service/codeartifact/domain.go @@ -96,7 +96,7 @@ func resourceDomainCreate(ctx context.Context, d *schema.ResourceData, meta any) input.EncryptionKey = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*types.ValidationException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.ValidationException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateDomain(ctx, input) }, "KMS key not found") diff --git a/internal/service/codeartifact/domain_identity_gen_test.go b/internal/service/codeartifact/domain_identity_gen_test.go index 5ba2f6c9b8a2..4c3feef38662 100644 --- a/internal/service/codeartifact/domain_identity_gen_test.go +++ b/internal/service/codeartifact/domain_identity_gen_test.go @@ -23,9 +23,10 @@ func testAccCodeArtifactDomain_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccCodeArtifactDomain_Identity_Basic, - "ExistingResource": testAccCodeArtifactDomain_Identity_ExistingResource, - "RegionOverride": testAccCodeArtifactDomain_Identity_RegionOverride, + acctest.CtBasic: testAccCodeArtifactDomain_Identity_Basic, + "ExistingResource": testAccCodeArtifactDomain_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccCodeArtifactDomain_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccCodeArtifactDomain_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -33,10 +34,11 @@ func testAccCodeArtifactDomain_IdentitySerial(t *testing.T) { func testAccCodeArtifactDomain_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_codeartifact_domain.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -58,6 +60,9 @@ func testAccCodeArtifactDomain_Identity_Basic(t *testing.T) { tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "codeartifact", "domain/{domain}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -119,7 +124,7 @@ func testAccCodeArtifactDomain_Identity_RegionOverride(t *testing.T) { resourceName := "aws_codeartifact_domain.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -139,6 +144,9 @@ func testAccCodeArtifactDomain_Identity_RegionOverride(t *testing.T) { tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "codeartifact", "domain/{domain}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -230,3 +238,129 @@ func testAccCodeArtifactDomain_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccCodeArtifactDomain_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_domain.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Domain/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Domain/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Domain/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func testAccCodeArtifactDomain_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_domain.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckDomainDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Domain/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Domain/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/codeartifact/domain_permissions_policy_identity_gen_test.go b/internal/service/codeartifact/domain_permissions_policy_identity_gen_test.go index 221474c71830..e331d30a15c1 100644 --- a/internal/service/codeartifact/domain_permissions_policy_identity_gen_test.go +++ b/internal/service/codeartifact/domain_permissions_policy_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccCodeArtifactDomainPermissionsPolicy_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccCodeArtifactDomainPermissionsPolicy_Identity_Basic, - "ExistingResource": testAccCodeArtifactDomainPermissionsPolicy_Identity_ExistingResource, - "RegionOverride": testAccCodeArtifactDomainPermissionsPolicy_Identity_RegionOverride, + acctest.CtBasic: testAccCodeArtifactDomainPermissionsPolicy_Identity_Basic, + "ExistingResource": testAccCodeArtifactDomainPermissionsPolicy_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccCodeArtifactDomainPermissionsPolicy_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccCodeArtifactDomainPermissionsPolicy_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,10 +34,11 @@ func testAccCodeArtifactDomainPermissionsPolicy_IdentitySerial(t *testing.T) { func testAccCodeArtifactDomainPermissionsPolicy_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_codeartifact_domain_permissions_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -56,6 +59,9 @@ func testAccCodeArtifactDomainPermissionsPolicy_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -117,7 +123,7 @@ func testAccCodeArtifactDomainPermissionsPolicy_Identity_RegionOverride(t *testi resourceName := "aws_codeartifact_domain_permissions_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -136,6 +142,9 @@ func testAccCodeArtifactDomainPermissionsPolicy_Identity_RegionOverride(t *testi ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -227,3 +236,129 @@ func testAccCodeArtifactDomainPermissionsPolicy_Identity_RegionOverride(t *testi }, }) } + +func testAccCodeArtifactDomainPermissionsPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_domain_permissions_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckDomainPermissionsPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DomainPermissionsPolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/DomainPermissionsPolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DomainPermissionsPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + }, + }) +} + +func testAccCodeArtifactDomainPermissionsPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_domain_permissions_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckDomainPermissionsPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DomainPermissionsPolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DomainPermissionsPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/codeartifact/domain_permissions_policy_test.go b/internal/service/codeartifact/domain_permissions_policy_test.go index 38446e0200f6..9ecb7c2bf8c2 100644 --- a/internal/service/codeartifact/domain_permissions_policy_test.go +++ b/internal/service/codeartifact/domain_permissions_policy_test.go @@ -11,14 +11,9 @@ import ( "github.com/YakDriver/regexache" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodeartifact "github.com/hashicorp/terraform-provider-aws/internal/service/codeartifact" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -184,83 +179,6 @@ func testAccDomainPermissionsPolicy_Disappears_domain(t *testing.T) { }) } -func testAccCodeArtifactDomainPermissionsPolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codeartifact_domain_permissions_policy.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CodeArtifactEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), - CheckDestroy: testAccCheckDomainPermissionsPolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccDomainPermissionsPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccDomainPermissionsPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrResourceARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccDomainPermissionsPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDomainPermissionsPolicyExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrResourceARN: tfknownvalue.RegionalARNRegexp("codeartifact", regexache.MustCompile(`domain/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckDomainPermissionsPolicyExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codeartifact/domain_test.go b/internal/service/codeartifact/domain_test.go index 0d778739acf9..2f45226bfeff 100644 --- a/internal/service/codeartifact/domain_test.go +++ b/internal/service/codeartifact/domain_test.go @@ -16,10 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodeartifact "github.com/hashicorp/terraform-provider-aws/internal/service/codeartifact" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -213,83 +210,6 @@ func testAccDomain_MigrateAssetSizeBytesToString(t *testing.T) { }) } -func testAccCodeArtifactDomain_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codeartifact_domain.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CodeArtifactEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), - CheckDestroy: testAccCheckDomainDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccDomainConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDomainExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccDomainConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDomainExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccDomainConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDomainExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codeartifact", regexache.MustCompile(`domain/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckDomainExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codeartifact/repository_identity_gen_test.go b/internal/service/codeartifact/repository_identity_gen_test.go index 422b2e9da4b2..b0543bc96902 100644 --- a/internal/service/codeartifact/repository_identity_gen_test.go +++ b/internal/service/codeartifact/repository_identity_gen_test.go @@ -23,9 +23,10 @@ func testAccCodeArtifactRepository_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccCodeArtifactRepository_Identity_Basic, - "ExistingResource": testAccCodeArtifactRepository_Identity_ExistingResource, - "RegionOverride": testAccCodeArtifactRepository_Identity_RegionOverride, + acctest.CtBasic: testAccCodeArtifactRepository_Identity_Basic, + "ExistingResource": testAccCodeArtifactRepository_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccCodeArtifactRepository_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccCodeArtifactRepository_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -33,10 +34,11 @@ func testAccCodeArtifactRepository_IdentitySerial(t *testing.T) { func testAccCodeArtifactRepository_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_codeartifact_repository.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -58,6 +60,9 @@ func testAccCodeArtifactRepository_Identity_Basic(t *testing.T) { tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "codeartifact", "repository/{domain}/{repository}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -119,7 +124,7 @@ func testAccCodeArtifactRepository_Identity_RegionOverride(t *testing.T) { resourceName := "aws_codeartifact_repository.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -139,6 +144,9 @@ func testAccCodeArtifactRepository_Identity_RegionOverride(t *testing.T) { tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "codeartifact", "repository/{domain}/{repository}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -230,3 +238,129 @@ func testAccCodeArtifactRepository_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccCodeArtifactRepository_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_repository.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func testAccCodeArtifactRepository_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_repository.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/codeartifact/repository_permissions_policy_identity_gen_test.go b/internal/service/codeartifact/repository_permissions_policy_identity_gen_test.go index bcb996b7f69d..a0c3a0f8f311 100644 --- a/internal/service/codeartifact/repository_permissions_policy_identity_gen_test.go +++ b/internal/service/codeartifact/repository_permissions_policy_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccCodeArtifactRepositoryPermissionsPolicy_IdentitySerial(t *testing.T) t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccCodeArtifactRepositoryPermissionsPolicy_Identity_Basic, - "ExistingResource": testAccCodeArtifactRepositoryPermissionsPolicy_Identity_ExistingResource, - "RegionOverride": testAccCodeArtifactRepositoryPermissionsPolicy_Identity_RegionOverride, + acctest.CtBasic: testAccCodeArtifactRepositoryPermissionsPolicy_Identity_Basic, + "ExistingResource": testAccCodeArtifactRepositoryPermissionsPolicy_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccCodeArtifactRepositoryPermissionsPolicy_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccCodeArtifactRepositoryPermissionsPolicy_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,10 +34,11 @@ func testAccCodeArtifactRepositoryPermissionsPolicy_IdentitySerial(t *testing.T) func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_codeartifact_repository_permissions_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -56,6 +59,9 @@ func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_Basic(t *testing.T) ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -117,7 +123,7 @@ func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_RegionOverride(t *t resourceName := "aws_codeartifact_repository_permissions_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -136,6 +142,9 @@ func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_RegionOverride(t *t ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -227,3 +236,129 @@ func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_RegionOverride(t *t }, }) } + +func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_repository_permissions_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckRepositoryPermissionsPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPermissionsPolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPermissionsPolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPermissionsPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + }, + }) +} + +func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codeartifact_repository_permissions_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), + CheckDestroy: testAccCheckRepositoryPermissionsPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPermissionsPolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPermissionsPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/codeartifact/repository_permissions_policy_test.go b/internal/service/codeartifact/repository_permissions_policy_test.go index 7b170f6c103b..69fd4555d8b4 100644 --- a/internal/service/codeartifact/repository_permissions_policy_test.go +++ b/internal/service/codeartifact/repository_permissions_policy_test.go @@ -11,14 +11,9 @@ import ( "github.com/YakDriver/regexache" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodeartifact "github.com/hashicorp/terraform-provider-aws/internal/service/codeartifact" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -183,83 +178,6 @@ func testAccRepositoryPermissionsPolicy_Disappears_domain(t *testing.T) { }) } -func testAccCodeArtifactRepositoryPermissionsPolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codeartifact_repository_permissions_policy.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CodeArtifactEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), - CheckDestroy: testAccCheckRepositoryPermissionsPolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccRepositoryPermissionsPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccRepositoryPermissionsPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrResourceARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRepositoryPermissionsPolicyConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryPermissionsPolicyExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrResourceARN: tfknownvalue.RegionalARNRegexp("codeartifact", regexache.MustCompile(`repository/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckRepositoryPermissionsPolicyExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codeartifact/repository_test.go b/internal/service/codeartifact/repository_test.go index 3849954526b7..46c21c4e1c16 100644 --- a/internal/service/codeartifact/repository_test.go +++ b/internal/service/codeartifact/repository_test.go @@ -8,17 +8,10 @@ import ( "fmt" "testing" - "github.com/YakDriver/regexache" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodeartifact "github.com/hashicorp/terraform-provider-aws/internal/service/codeartifact" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -288,83 +281,6 @@ func testAccRepository_disappears(t *testing.T) { }) } -func testAccCodeArtifactRepository_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codeartifact_repository.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.CodeArtifactEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeArtifactServiceID), - CheckDestroy: testAccCheckRepositoryDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccRepositoryConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccRepositoryConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRepositoryConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codeartifact", regexache.MustCompile(`repository/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckRepositoryExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codeartifact/service_endpoint_resolver_gen.go b/internal/service/codeartifact/service_endpoint_resolver_gen.go index 0dbf8a6633e5..61629b968d61 100644 --- a/internal/service/codeartifact/service_endpoint_resolver_gen.go +++ b/internal/service/codeartifact/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codeartifact.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codeartifact endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codeartifact endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codeartifact/service_endpoints_gen_test.go b/internal/service/codeartifact/service_endpoints_gen_test.go index b437ffbecc4e..47c7ecdd8409 100644 --- a/internal/service/codeartifact/service_endpoints_gen_test.go +++ b/internal/service/codeartifact/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codeartifact/service_package_gen.go b/internal/service/codeartifact/service_package_gen.go index b36ff7fd8ed2..04ac023a3f56 100644 --- a/internal/service/codeartifact/service_package_gen.go +++ b/internal/service/codeartifact/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codeartifact" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -129,7 +128,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codeartifact.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codeartifact/sweep.go b/internal/service/codeartifact/sweep.go index 9bf41592bfb8..30470665b86a 100644 --- a/internal/service/codeartifact/sweep.go +++ b/internal/service/codeartifact/sweep.go @@ -30,7 +30,7 @@ func sweepDomains(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CodeArtifactClient(ctx) input := &codeartifact.ListDomainsInput{} @@ -71,7 +71,7 @@ func sweepRepositories(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CodeArtifactClient(ctx) input := &codeartifact.ListRepositoriesInput{} diff --git a/internal/service/codeartifact/tags_gen.go b/internal/service/codeartifact/tags_gen.go index 56753b7b697f..3d2ffac522e5 100644 --- a/internal/service/codeartifact/tags_gen.go +++ b/internal/service/codeartifact/tags_gen.go @@ -3,8 +3,8 @@ package codeartifact import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codeartifact" awstypes "github.com/aws/aws-sdk-go-v2/service/codeartifact/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *codeartifact.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeArtifactClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *codeartifact.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *codeartifact.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codeartifact/testdata/Domain/basic_v5.100.0/main_gen.tf b/internal/service/codeartifact/testdata/Domain/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..0ee47699d748 --- /dev/null +++ b/internal/service/codeartifact/testdata/Domain/basic_v5.100.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeartifact_domain" "test" { + domain = var.rName + encryption_key = aws_kms_key.test.arn +} + +resource "aws_kms_key" "test" { + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codeartifact/testdata/Domain/basic_v6.0.0/main_gen.tf b/internal/service/codeartifact/testdata/Domain/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..dbba5a8bbd31 --- /dev/null +++ b/internal/service/codeartifact/testdata/Domain/basic_v6.0.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeartifact_domain" "test" { + domain = var.rName + encryption_key = aws_kms_key.test.arn +} + +resource "aws_kms_key" "test" { + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codeartifact/testdata/DomainPermissionsPolicy/basic_v5.100.0/main_gen.tf b/internal/service/codeartifact/testdata/DomainPermissionsPolicy/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..2683ea0ca845 --- /dev/null +++ b/internal/service/codeartifact/testdata/DomainPermissionsPolicy/basic_v5.100.0/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeartifact_domain_permissions_policy" "test" { + domain = aws_codeartifact_domain.test.domain + policy_document = <" + { + ConfigDirectory: config.StaticDirectory("testdata/Project/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Project/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Project/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Project/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Project/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +func TestAccCodeBuildProject_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Project + resourceName := "aws_codebuild_project.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + testAccPreCheckSourceCredentialsForServerTypeGithub(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), + CheckDestroy: testAccCheckProjectDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Project/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Project/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Project/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeBuildProject_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Project + resourceName := "aws_codebuild_project.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + testAccPreCheckSourceCredentialsForServerTypeGithub(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), + CheckDestroy: testAccCheckProjectDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Project/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Project/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/codebuild/project_test.go b/internal/service/codebuild/project_test.go index ff4f29d8a79d..c82458eac16c 100644 --- a/internal/service/codebuild/project_test.go +++ b/internal/service/codebuild/project_test.go @@ -13,18 +13,10 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codebuild/types" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" - "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodebuild "github.com/hashicorp/terraform-provider-aws/internal/service/codebuild" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -100,7 +92,6 @@ func TestAccCodeBuildProject_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) - testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -137,9 +128,9 @@ func TestAccCodeBuildProject_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.#", "1"), resource.TestCheckResourceAttr(resourceName, "source.0.git_clone_depth", "0"), resource.TestCheckResourceAttr(resourceName, "source.0.insecure_ssl", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "source.0.location", testAccGitHubSourceLocationFromEnv()), + resource.TestCheckResourceAttr(resourceName, "source.0.location", ""), resource.TestCheckResourceAttr(resourceName, "source.0.report_build_status", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "source.0.type", "GITHUB"), + resource.TestCheckResourceAttr(resourceName, "source.0.type", "NO_SOURCE"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), resource.TestCheckResourceAttr(resourceName, "vpc_config.#", "0"), ), @@ -153,82 +144,6 @@ func TestAccCodeBuildProject_basic(t *testing.T) { }) } -func TestAccCodeBuildProject_Identity_Basic(t *testing.T) { - ctx := acctest.Context(t) - var project types.Project - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resourceName := "aws_codebuild_project.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckProjectDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccProjectConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckProjectExists(ctx, resourceName, &project), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "codebuild", "project/{name}"), - statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), - }, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccCodeBuildProject_Identity_RegionOverride(t *testing.T) { - ctx := acctest.Context(t) - - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codebuild_project.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - Config: testAccProjectConfig_regionOverride(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "codebuild", "project/{name}"), - statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), - }, - }, - { - ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccCodeBuildProject_disappears(t *testing.T) { ctx := acctest.Context(t) var project types.Project @@ -240,7 +155,6 @@ func TestAccCodeBuildProject_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) - testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -506,7 +420,7 @@ func TestAccCodeBuildProject_cache(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_basicGitHub(rName), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), resource.TestCheckResourceAttr(resourceName, "cache.#", "1"), @@ -532,7 +446,7 @@ func TestAccCodeBuildProject_cache(t *testing.T) { ), }, { - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_basicGitHub(rName), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), resource.TestCheckResourceAttr(resourceName, "cache.#", "1"), @@ -1948,7 +1862,7 @@ func TestAccCodeBuildProject_vpc(t *testing.T) { ), }, { - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_basicGitHub(rName), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), resource.TestCheckResourceAttr(resourceName, "vpc_config.#", "0"), @@ -2942,7 +2856,7 @@ func TestAccCodeBuildProject_concurrentBuildLimit(t *testing.T) { ), }, { - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_basicGitHub(rName), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), resource.TestCheckResourceAttr(resourceName, "concurrent_build_limit", "0"), @@ -3020,7 +2934,7 @@ func TestAccCodeBuildProject_dockerServer(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_basicGitHub(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "codebuild", fmt.Sprintf("project/%s", rName)), @@ -3069,7 +2983,7 @@ func TestAccCodeBuildProject_dockerServerWithVPC(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_basicGitHub(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "codebuild", fmt.Sprintf("project/%s", rName)), @@ -3082,84 +2996,46 @@ func TestAccCodeBuildProject_dockerServerWithVPC(t *testing.T) { }) } -func TestAccCodeBuildProject_Identity_ExistingResource(t *testing.T) { +func TestAccCodeBuildProject_autoRetryLimit(t *testing.T) { ctx := acctest.Context(t) var project types.Project rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codebuild_project.test" resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) - testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), - CheckDestroy: testAccCheckProjectDestroy(ctx), + ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProjectDestroy(ctx), Steps: []resource.TestStep{ { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_autoRetryLimit(rName, 2), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), + resource.TestCheckResourceAttr(resourceName, "auto_retry_limit", "2"), ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, }, { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccProjectConfig_basic(rName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccProjectConfig_autoRetryLimit(rName, 4), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), + resource.TestCheckResourceAttr(resourceName, "auto_retry_limit", "4"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, }, { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccProjectConfig_basic(rName), + Config: testAccProjectConfig_autoRetryLimit(rName, 0), Check: resource.ComposeTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), + resource.TestCheckResourceAttr(resourceName, "auto_retry_limit", "0"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codebuild", regexache.MustCompile(`project/.+`)), - }), - }, }, }, }) @@ -3307,6 +3183,13 @@ resource "aws_iam_role_policy" "test" { "ec2:DescribeSecurityGroups", "ec2:DescribeVpcs" ] + }, + { + "Effect": "Allow", + "Resource": "*", + "Action": [ + "codeconnections:GetConnectionToken" + ] } ] } @@ -3332,18 +3215,22 @@ resource "aws_codebuild_project" "test" { } source { - location = %[2]q - type = "GITHUB" + type = "NO_SOURCE" + buildspec = < 0 { + build := batchOutput.Builds[0] + // Verify build was started (any status other than not found) + if build.BuildStatus != "" { + return nil + } + } + } + } + } +} + +func testAccStartBuildActionConfig_basic(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "codebuild.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_role_policy" "test" { + role = aws_iam_role.test.name + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "arn:${data.aws_partition.current.partition}:logs:*:*:*" + } + ] + }) +} + +resource "aws_codebuild_project" "test" { + name = %[1]q + service_role = aws_iam_role.test.arn + + artifacts { + type = "NO_ARTIFACTS" + } + + environment { + compute_type = "BUILD_GENERAL1_SMALL" + image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" + type = "LINUX_CONTAINER" + } + + source { + type = "NO_SOURCE" + buildspec = "version: 0.2\nphases:\n build:\n commands:\n - echo 'Hello World'" + } +} + +action "aws_codebuild_start_build" "test" { + config { + project_name = aws_codebuild_project.test.name + } +} + +resource "terraform_data" "trigger" { + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_codebuild_start_build.test] + } + } + + depends_on = [aws_codebuild_project.test] +} +`, rName) +} + +func testAccStartBuildActionConfig_withEnvironmentVariables(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "codebuild.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_role_policy" "test" { + role = aws_iam_role.test.name + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "arn:${data.aws_partition.current.partition}:logs:*:*:*" + } + ] + }) +} + +resource "aws_codebuild_project" "test" { + name = %[1]q + service_role = aws_iam_role.test.arn + + artifacts { + type = "NO_ARTIFACTS" + } + + environment { + compute_type = "BUILD_GENERAL1_SMALL" + image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" + type = "LINUX_CONTAINER" + } + + source { + type = "NO_SOURCE" + buildspec = "version: 0.2\nphases:\n build:\n commands:\n - echo \"TEST_VAR is $TEST_VAR\"" + } +} + +action "aws_codebuild_start_build" "test" { + config { + project_name = aws_codebuild_project.test.name + + environment_variables_override { + name = "TEST_VAR" + value = "test_value" + type = "PLAINTEXT" + } + } +} + +resource "terraform_data" "trigger" { + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_codebuild_start_build.test] + } + } + + depends_on = [aws_codebuild_project.test] +} +`, rName) +} diff --git a/internal/service/codebuild/testdata/Fleet/basic_v5.100.0/main_gen.tf b/internal/service/codebuild/testdata/Fleet/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..edf2e8ea1b23 --- /dev/null +++ b/internal/service/codebuild/testdata/Fleet/basic_v5.100.0/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codebuild_fleet" "test" { + base_capacity = 1 + compute_type = "BUILD_GENERAL1_SMALL" + environment_type = "LINUX_CONTAINER" + name = var.rName + overflow_behavior = "ON_DEMAND" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codebuild/testdata/Fleet/basic_v6.0.0/main_gen.tf b/internal/service/codebuild/testdata/Fleet/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..f398550a5731 --- /dev/null +++ b/internal/service/codebuild/testdata/Fleet/basic_v6.0.0/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codebuild_fleet" "test" { + base_capacity = 1 + compute_type = "BUILD_GENERAL1_SMALL" + environment_type = "LINUX_CONTAINER" + name = var.rName + overflow_behavior = "ON_DEMAND" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codebuild/testdata/Project/basic/main_gen.tf b/internal/service/codebuild/testdata/Project/basic/main_gen.tf new file mode 100644 index 000000000000..a75aebf3e973 --- /dev/null +++ b/internal/service/codebuild/testdata/Project/basic/main_gen.tf @@ -0,0 +1,112 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codebuild_project" "test" { + name = var.rName + service_role = aws_iam_role.test.arn + + artifacts { + type = "NO_ARTIFACTS" + } + + environment { + compute_type = "BUILD_GENERAL1_SMALL" + image = "2" + type = "LINUX_CONTAINER" + } + + source { + location = var.AWS_CODEBUILD_GITHUB_SOURCE_LOCATION + type = "GITHUB" + } +} + +# testAccProjectConfig_baseServiceRole + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = < 0 { + input.PullRequestBuildPolicy = expandWebhookPullRequestBuildPolicy(v.([]any)[0].(map[string]any)) + } + if v, ok := d.GetOk("scope_configuration"); ok && len(v.([]any)) > 0 { input.ScopeConfiguration = expandScopeConfiguration(v.([]any)) } @@ -189,6 +217,9 @@ func resourceWebhookRead(ctx context.Context, d *schema.ResourceData, meta any) d.Set("manual_creation", d.Get("manual_creation")) // Create-only. d.Set("payload_url", webhook.PayloadUrl) d.Set("project_name", d.Id()) + if err := d.Set("pull_request_build_policy", flattenWebhookPullRequestBuildPolicy(webhook.PullRequestBuildPolicy)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting pull_request_build_policy: %s", err) + } if err := d.Set("scope_configuration", flattenScopeConfiguration(webhook.ScopeConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting scope_configuration: %s", err) } @@ -220,6 +251,10 @@ func resourceWebhookUpdate(ctx context.Context, d *schema.ResourceData, meta any input.BranchFilter = aws.String(d.Get("branch_filter").(string)) } + if v, ok := d.GetOk("pull_request_build_policy"); ok && len(v.([]any)) > 0 { + input.PullRequestBuildPolicy = expandWebhookPullRequestBuildPolicy(v.([]any)[0].(map[string]any)) + } + _, err := conn.UpdateWebhook(ctx, &input) if err != nil { @@ -285,6 +320,32 @@ func expandWebhookFilterGroups(tfList []any) [][]types.WebhookFilter { return apiObjects } +func expandWebhookPullRequestBuildPolicy(tfMap map[string]any) *types.PullRequestBuildPolicy { + if tfMap == nil { + return nil + } + + apiObject := &types.PullRequestBuildPolicy{ + RequiresCommentApproval: types.PullRequestBuildCommentApproval(tfMap["requires_comment_approval"].(string)), + } + + if apiObject.RequiresCommentApproval != types.PullRequestBuildCommentApprovalDisabled { + if v, ok := tfMap["approver_roles"]; ok && v.(*schema.Set).Len() > 0 { + var roles []types.PullRequestBuildApproverRole + for _, role := range v.(*schema.Set).List() { + if role != nil { + roles = append(roles, types.PullRequestBuildApproverRole(role.(string))) + } + } + if len(roles) > 0 { + apiObject.ApproverRoles = roles + } + } + } + + return apiObject +} + func expandWebhookFilters(tfList []any) []types.WebhookFilter { if len(tfList) == 0 { return nil @@ -368,6 +429,30 @@ func flattenWebhookFilterGroups(apiObjects [][]types.WebhookFilter) []any { return tfList } +func flattenWebhookPullRequestBuildPolicy(apiObject *types.PullRequestBuildPolicy) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + "requires_comment_approval": string(apiObject.RequiresCommentApproval), + } + + if v := apiObject.ApproverRoles; len(v) > 0 { + var roles []string + for _, role := range v { + if role != "" { + roles = append(roles, string(role)) + } + } + if len(roles) > 0 { + tfMap["approver_roles"] = roles + } + } + + return []any{tfMap} +} + func flattenWebhookFilters(apiObjects []types.WebhookFilter) []any { if len(apiObjects) == 0 { return nil diff --git a/internal/service/codebuild/webhook_test.go b/internal/service/codebuild/webhook_test.go index 743b30c86606..f4d0aef640d5 100644 --- a/internal/service/codebuild/webhook_test.go +++ b/internal/service/codebuild/webhook_test.go @@ -6,6 +6,7 @@ package codebuild_test import ( "context" "fmt" + "strings" "testing" "github.com/YakDriver/regexache" @@ -19,6 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfcodebuild "github.com/hashicorp/terraform-provider-aws/internal/service/codebuild" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -92,6 +94,12 @@ func TestAccCodeBuildWebhook_gitHub(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scope_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "secret", ""), resource.TestMatchResourceAttr(resourceName, names.AttrURL, regexache.MustCompile(`^https://`)), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.requires_comment_approval", string(types.PullRequestBuildCommentApprovalAllPullRequests)), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.approver_roles.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubWrite)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubMaintain)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubAdmin)), ), }, { @@ -475,6 +483,118 @@ func TestAccCodeBuildWebhook_upgradeV5_94_1(t *testing.T) { }) } +func TestAccCodeBuildWebhook_gitHubWithPullRequestBuildPolicy(t *testing.T) { + ctx := acctest.Context(t) + var webhook types.Webhook + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_codebuild_webhook.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebhookDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebhookConfig_gitHubWithPullRequestBuildPolicy( + rName, + string(types.PullRequestBuildCommentApprovalAllPullRequests), + enum.Slice( + types.PullRequestBuildApproverRoleGithubRead, + types.PullRequestBuildApproverRoleGithubWrite, + types.PullRequestBuildApproverRoleGithubMaintain, + types.PullRequestBuildApproverRoleGithubAdmin, + ), + ), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &webhook), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.requires_comment_approval", string(types.PullRequestBuildCommentApprovalAllPullRequests)), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.approver_roles.#", "4"), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubRead)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubWrite)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubMaintain)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubAdmin)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"secret"}, + }, + { + Config: testAccWebhookConfig_gitHubWithPullRequestBuildPolicy( + rName, + string(types.PullRequestBuildCommentApprovalForkPullRequests), + enum.Slice( + types.PullRequestBuildApproverRoleGithubMaintain, + types.PullRequestBuildApproverRoleGithubAdmin, + ), + ), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &webhook), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.requires_comment_approval", string(types.PullRequestBuildCommentApprovalForkPullRequests)), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.approver_roles.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubMaintain)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubAdmin)), + ), + }, + { + Config: testAccWebhookConfig_gitHubWithPullRequestBuildPolicyNoApproverRoles( + rName, + string(types.PullRequestBuildCommentApprovalDisabled), + ), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &webhook), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.requires_comment_approval", string(types.PullRequestBuildCommentApprovalDisabled)), + ), + }, + }, + }) +} + +func TestAccCodeBuildWebhook_gitHubWithPullRequestBuildPolicyNoApproverRoles(t *testing.T) { + ctx := acctest.Context(t) + var webhook types.Webhook + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_codebuild_webhook.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebhookDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebhookConfig_gitHubWithPullRequestBuildPolicyNoApproverRoles( + rName, + string(types.PullRequestBuildCommentApprovalAllPullRequests), + ), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &webhook), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.requires_comment_approval", string(types.PullRequestBuildCommentApprovalAllPullRequests)), + resource.TestCheckResourceAttr(resourceName, "pull_request_build_policy.0.approver_roles.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubWrite)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubMaintain)), + resource.TestCheckTypeSetElemAttr(resourceName, "pull_request_build_policy.0.approver_roles.*", string(types.PullRequestBuildApproverRoleGithubAdmin)), + ), + }, + }, + }) +} + func testAccCheckWebhookFilter(webhook *types.Webhook, expectedFilters [][]types.WebhookFilter) resource.TestCheckFunc { return func(s *terraform.State) error { got, want := webhook.FilterGroups, expectedFilters @@ -542,7 +662,7 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_gitHub(rName string) string { - return acctest.ConfigCompose(testAccProjectConfig_basic(rName), ` + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), ` resource "aws_codebuild_webhook" "test" { project_name = aws_codebuild_project.test.name } @@ -579,7 +699,7 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_buildType(rName, branchFilter string) string { - return acctest.ConfigCompose(testAccProjectConfig_basic(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), fmt.Sprintf(` resource "aws_codebuild_webhook" "test" { build_type = %[1]q project_name = aws_codebuild_project.test.name @@ -588,7 +708,7 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_branchFilter(rName, branchFilter string) string { - return acctest.ConfigCompose(testAccProjectConfig_basic(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), fmt.Sprintf(` resource "aws_codebuild_webhook" "test" { branch_filter = %[1]q project_name = aws_codebuild_project.test.name @@ -597,7 +717,7 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_filterGroup(rName string) string { - return acctest.ConfigCompose(testAccProjectConfig_basic(rName), ` + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), ` resource "aws_codebuild_webhook" "test" { project_name = aws_codebuild_project.test.name @@ -657,10 +777,33 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_manualCreation(rName string) string { - return acctest.ConfigCompose(testAccProjectConfig_basic(rName), ` + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), ` resource "aws_codebuild_webhook" "test" { project_name = aws_codebuild_project.test.name manual_creation = true } `) } + +func testAccWebhookConfig_gitHubWithPullRequestBuildPolicy(rName, requiresCommentApproval string, approverRoles []string) string { + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), fmt.Sprintf(` +resource "aws_codebuild_webhook" "test" { + project_name = aws_codebuild_project.test.name + pull_request_build_policy { + requires_comment_approval = %[1]q + approver_roles = ["%[2]s"] + } +} +`, requiresCommentApproval, strings.Join(approverRoles, "\", \""))) +} + +func testAccWebhookConfig_gitHubWithPullRequestBuildPolicyNoApproverRoles(rName, requiresCommentApproval string) string { + return acctest.ConfigCompose(testAccProjectConfig_basicGitHub(rName), fmt.Sprintf(` +resource "aws_codebuild_webhook" "test" { + project_name = aws_codebuild_project.test.name + pull_request_build_policy { + requires_comment_approval = %[1]q + } +} +`, requiresCommentApproval)) +} diff --git a/internal/service/codecatalyst/service_endpoint_resolver_gen.go b/internal/service/codecatalyst/service_endpoint_resolver_gen.go index b872e775e929..b4e731da0eb5 100644 --- a/internal/service/codecatalyst/service_endpoint_resolver_gen.go +++ b/internal/service/codecatalyst/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codecatalyst.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codecatalyst endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codecatalyst endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codecatalyst/service_package_gen.go b/internal/service/codecatalyst/service_package_gen.go index 1d03a9b80713..037d32acb759 100644 --- a/internal/service/codecatalyst/service_package_gen.go +++ b/internal/service/codecatalyst/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codecatalyst" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -83,7 +82,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codecatalyst.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codecommit/repository.go b/internal/service/codecommit/repository.go index b7e60713a171..dc9ee83a2477 100644 --- a/internal/service/codecommit/repository.go +++ b/internal/service/codecommit/repository.go @@ -229,7 +229,7 @@ func updateRepositoryDefaultBranch(ctx context.Context, conn *codecommit.Client, output, err := conn.ListBranches(ctx, inputL) if err != nil { - return fmt.Errorf("listing CodeCommit Repository (%s) branches: %s", name, err) + return fmt.Errorf("listing CodeCommit Repository (%s) branches: %w", name, err) } if len(output.Branches) == 0 { diff --git a/internal/service/codecommit/service_endpoint_resolver_gen.go b/internal/service/codecommit/service_endpoint_resolver_gen.go index cebee387ec6d..2a24886b8c51 100644 --- a/internal/service/codecommit/service_endpoint_resolver_gen.go +++ b/internal/service/codecommit/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codecommit.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codecommit endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codecommit endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codecommit/service_endpoints_gen_test.go b/internal/service/codecommit/service_endpoints_gen_test.go index f14c14dfdb62..40410f0c1b67 100644 --- a/internal/service/codecommit/service_endpoints_gen_test.go +++ b/internal/service/codecommit/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codecommit/service_package_gen.go b/internal/service/codecommit/service_package_gen.go index 43f70ffbecae..7eaaebf91d64 100644 --- a/internal/service/codecommit/service_package_gen.go +++ b/internal/service/codecommit/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codecommit" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -98,7 +97,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codecommit.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codecommit/tags_gen.go b/internal/service/codecommit/tags_gen.go index a05fb599047c..3898641e358f 100644 --- a/internal/service/codecommit/tags_gen.go +++ b/internal/service/codecommit/tags_gen.go @@ -3,8 +3,8 @@ package codecommit import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codecommit" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *codecommit.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeCommitClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *codecommit.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *codecommit.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codeconnections/connection.go b/internal/service/codeconnections/connection.go index 7d5b4c3c23c8..eecfab53d039 100644 --- a/internal/service/codeconnections/connection.go +++ b/internal/service/codeconnections/connection.go @@ -37,6 +37,7 @@ import ( // @Tags(identifierAttribute="arn") // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/codeconnections/types;types.Connection") +// @Testing(preIdentityVersion="v5.100.0") func newConnectionResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &connectionResource{} diff --git a/internal/service/codeconnections/connection_identity_gen_test.go b/internal/service/codeconnections/connection_identity_gen_test.go index fc2f0b66f82e..1d58c05845b9 100644 --- a/internal/service/codeconnections/connection_identity_gen_test.go +++ b/internal/service/codeconnections/connection_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCodeConnectionsConnection_Identity_Basic(t *testing.T) { resourceName := "aws_codeconnections_connection.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccCodeConnectionsConnection_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccCodeConnectionsConnection_Identity_RegionOverride(t *testing.T) { resourceName := "aws_codeconnections_connection.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccCodeConnectionsConnection_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,129 @@ func TestAccCodeConnectionsConnection_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccCodeConnectionsConnection_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Connection + resourceName := "aws_codeconnections_connection.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeConnectionsConnection_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Connection + resourceName := "aws_codeconnections_connection.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/codeconnections/connection_tags_gen_test.go b/internal/service/codeconnections/connection_tags_gen_test.go index 6693efc80bde..ee2211c3a2f8 100644 --- a/internal/service/codeconnections/connection_tags_gen_test.go +++ b/internal/service/codeconnections/connection_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codeconnections/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccCodeConnectionsConnection_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccCodeConnectionsConnection_tags(t *testing.T) { func TestAccCodeConnectionsConnection_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccCodeConnectionsConnection_tags_null(t *testing.T) { func TestAccCodeConnectionsConnection_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccCodeConnectionsConnection_tags_EmptyMap(t *testing.T) { func TestAccCodeConnectionsConnection_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccCodeConnectionsConnection_tags_AddOnUpdate(t *testing.T) { func TestAccCodeConnectionsConnection_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccCodeConnectionsConnection_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccCodeConnectionsConnection_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccCodeConnectionsConnection_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccCodeConnectionsConnection_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccCodeConnectionsConnection_tags_EmptyTag_OnUpdate_Replace(t *testing. func TestAccCodeConnectionsConnection_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_providerOnly(t *testing.T func TestAccCodeConnectionsConnection_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_nonOverlapping(t *testing func TestAccCodeConnectionsConnection_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_overlapping(t *testing.T) func TestAccCodeConnectionsConnection_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_updateToProviderOnly(t *t func TestAccCodeConnectionsConnection_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_updateToResourceOnly(t *t func TestAccCodeConnectionsConnection_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_emptyResourceTag(t *testi func TestAccCodeConnectionsConnection_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_emptyProviderOnlyTag(t *t func TestAccCodeConnectionsConnection_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_nullOverlappingResourceTa func TestAccCodeConnectionsConnection_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccCodeConnectionsConnection_tags_DefaultTags_nullNonOverlappingResourc func TestAccCodeConnectionsConnection_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccCodeConnectionsConnection_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccCodeConnectionsConnection_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccCodeConnectionsConnection_tags_ComputedTag_OnUpdate_Add(t *testing.T func TestAccCodeConnectionsConnection_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccCodeConnectionsConnection_tags_ComputedTag_OnUpdate_Replace(t *testi func TestAccCodeConnectionsConnection_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccCodeConnectionsConnection_tags_IgnoreTags_Overlap_DefaultTag(t *test func TestAccCodeConnectionsConnection_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Connection resourceName := "aws_codeconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckConnectionDestroy(ctx), diff --git a/internal/service/codeconnections/connection_test.go b/internal/service/codeconnections/connection_test.go index 3206dc94b8a0..c582fb5c3c8e 100644 --- a/internal/service/codeconnections/connection_test.go +++ b/internal/service/codeconnections/connection_test.go @@ -12,13 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codeconnections/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodeconnections "github.com/hashicorp/terraform-provider-aws/internal/service/codeconnections" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -120,80 +115,6 @@ func TestAccCodeConnectionsConnection_disappears(t *testing.T) { }) } -func TestAccCodeConnectionsConnection_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v types.Connection - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codeconnections_connection.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), - CheckDestroy: testAccCheckConnectionDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccConnectionConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccConnectionConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccConnectionConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckConnectionExists(ctx context.Context, n string, v *types.Connection) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codeconnections/host.go b/internal/service/codeconnections/host.go index f4b0d07de13b..adfd583017ae 100644 --- a/internal/service/codeconnections/host.go +++ b/internal/service/codeconnections/host.go @@ -36,6 +36,7 @@ import ( // @Tags(identifierAttribute="arn") // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/codeconnections/types;types.Host") +// @Testing(preIdentityVersion="v5.100.0") func newHostResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &hostResource{} diff --git a/internal/service/codeconnections/host_identity_gen_test.go b/internal/service/codeconnections/host_identity_gen_test.go index 1f3e834e008c..77b6ef662d5c 100644 --- a/internal/service/codeconnections/host_identity_gen_test.go +++ b/internal/service/codeconnections/host_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCodeConnectionsHost_Identity_Basic(t *testing.T) { resourceName := "aws_codeconnections_host.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccCodeConnectionsHost_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccCodeConnectionsHost_Identity_RegionOverride(t *testing.T) { resourceName := "aws_codeconnections_host.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccCodeConnectionsHost_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,129 @@ func TestAccCodeConnectionsHost_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccCodeConnectionsHost_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Host + resourceName := "aws_codeconnections_host.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), + CheckDestroy: testAccCheckHostDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Host/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/Host/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Host/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeConnectionsHost_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Host + resourceName := "aws_codeconnections_host.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), + CheckDestroy: testAccCheckHostDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Host/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Host/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/codeconnections/host_tags_gen_test.go b/internal/service/codeconnections/host_tags_gen_test.go index 7e939f3816fb..a1cb2684a20b 100644 --- a/internal/service/codeconnections/host_tags_gen_test.go +++ b/internal/service/codeconnections/host_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codeconnections/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccCodeConnectionsHost_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccCodeConnectionsHost_tags(t *testing.T) { func TestAccCodeConnectionsHost_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccCodeConnectionsHost_tags_null(t *testing.T) { func TestAccCodeConnectionsHost_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccCodeConnectionsHost_tags_EmptyMap(t *testing.T) { func TestAccCodeConnectionsHost_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccCodeConnectionsHost_tags_AddOnUpdate(t *testing.T) { func TestAccCodeConnectionsHost_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccCodeConnectionsHost_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccCodeConnectionsHost_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccCodeConnectionsHost_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccCodeConnectionsHost_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccCodeConnectionsHost_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccCodeConnectionsHost_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccCodeConnectionsHost_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccCodeConnectionsHost_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_overlapping(t *testing.T) { func TestAccCodeConnectionsHost_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_updateToProviderOnly(t *testing func TestAccCodeConnectionsHost_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_updateToResourceOnly(t *testing func TestAccCodeConnectionsHost_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccCodeConnectionsHost_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_emptyProviderOnlyTag(t *testing func TestAccCodeConnectionsHost_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_nullOverlappingResourceTag(t *t func TestAccCodeConnectionsHost_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccCodeConnectionsHost_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccCodeConnectionsHost_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccCodeConnectionsHost_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccCodeConnectionsHost_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccCodeConnectionsHost_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccCodeConnectionsHost_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccCodeConnectionsHost_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccCodeConnectionsHost_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccCodeConnectionsHost_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccCodeConnectionsHost_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Host resourceName := "aws_codeconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), CheckDestroy: testAccCheckHostDestroy(ctx), diff --git a/internal/service/codeconnections/host_test.go b/internal/service/codeconnections/host_test.go index 681e62a09bb1..69e7fec75dfd 100644 --- a/internal/service/codeconnections/host_test.go +++ b/internal/service/codeconnections/host_test.go @@ -12,13 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codeconnections/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodeconnections "github.com/hashicorp/terraform-provider-aws/internal/service/codeconnections" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -140,80 +135,6 @@ func TestAccCodeConnectionsHost_vpc(t *testing.T) { }) } -func TestAccCodeConnectionsHost_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v types.Host - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codeconnections_host.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeConnectionsServiceID), - CheckDestroy: testAccCheckHostDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccHostConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccHostConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccHostConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckHostExists(ctx context.Context, n string, v *types.Host) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codeconnections/service_endpoint_resolver_gen.go b/internal/service/codeconnections/service_endpoint_resolver_gen.go index 9ee7a27a43aa..fd577895d98d 100644 --- a/internal/service/codeconnections/service_endpoint_resolver_gen.go +++ b/internal/service/codeconnections/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codeconnections. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codeconnections endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codeconnections endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codeconnections/service_endpoints_gen_test.go b/internal/service/codeconnections/service_endpoints_gen_test.go index 99c39cd14209..690a98ff83db 100644 --- a/internal/service/codeconnections/service_endpoints_gen_test.go +++ b/internal/service/codeconnections/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codeconnections/service_package_gen.go b/internal/service/codeconnections/service_package_gen.go index cf033d550bd0..d6acf0a75a12 100644 --- a/internal/service/codeconnections/service_package_gen.go +++ b/internal/service/codeconnections/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codeconnections" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -84,7 +83,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codeconnections.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codeconnections/tags_gen.go b/internal/service/codeconnections/tags_gen.go index efe5dc16073f..4f7df65bdc32 100644 --- a/internal/service/codeconnections/tags_gen.go +++ b/internal/service/codeconnections/tags_gen.go @@ -3,8 +3,8 @@ package codeconnections import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codeconnections" awstypes "github.com/aws/aws-sdk-go-v2/service/codeconnections/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *codeconnections.Client, identifier stri output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeConnectionsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *codeconnections.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *codeconnections.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codeconnections/testdata/Connection/basic_v5.100.0/main_gen.tf b/internal/service/codeconnections/testdata/Connection/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..61883aa6a46b --- /dev/null +++ b/internal/service/codeconnections/testdata/Connection/basic_v5.100.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeconnections_connection" "test" { + name = var.rName + provider_type = "Bitbucket" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codeconnections/testdata/Connection/basic_v6.0.0/main_gen.tf b/internal/service/codeconnections/testdata/Connection/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..7a60ba243518 --- /dev/null +++ b/internal/service/codeconnections/testdata/Connection/basic_v6.0.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeconnections_connection" "test" { + name = var.rName + provider_type = "Bitbucket" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codeconnections/testdata/Host/basic_v5.100.0/main_gen.tf b/internal/service/codeconnections/testdata/Host/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..1523b89fa1b3 --- /dev/null +++ b/internal/service/codeconnections/testdata/Host/basic_v5.100.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeconnections_host" "test" { + name = var.rName + provider_endpoint = "https://example.com" + provider_type = "GitHubEnterpriseServer" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codeconnections/testdata/Host/basic_v6.0.0/main_gen.tf b/internal/service/codeconnections/testdata/Host/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..548770af222d --- /dev/null +++ b/internal/service/codeconnections/testdata/Host/basic_v6.0.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codeconnections_host" "test" { + name = var.rName + provider_endpoint = "https://example.com" + provider_type = "GitHubEnterpriseServer" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go b/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go index 6f115a1df583..43a4ce62979a 100644 --- a/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go +++ b/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codeguruprofiler }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codeguruprofiler endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codeguruprofiler endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codeguruprofiler/service_endpoints_gen_test.go b/internal/service/codeguruprofiler/service_endpoints_gen_test.go index 286f64d4c0b0..a6eb0c2acb3a 100644 --- a/internal/service/codeguruprofiler/service_endpoints_gen_test.go +++ b/internal/service/codeguruprofiler/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codeguruprofiler/service_package_gen.go b/internal/service/codeguruprofiler/service_package_gen.go index 32ee4bccad1e..b06f6536d9b4 100644 --- a/internal/service/codeguruprofiler/service_package_gen.go +++ b/internal/service/codeguruprofiler/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codeguruprofiler" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -74,7 +73,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codeguruprofiler.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codeguruprofiler/tags_gen.go b/internal/service/codeguruprofiler/tags_gen.go index 0fdf3df8ad4e..5ae056e3f8e0 100644 --- a/internal/service/codeguruprofiler/tags_gen.go +++ b/internal/service/codeguruprofiler/tags_gen.go @@ -3,8 +3,8 @@ package codeguruprofiler import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codeguruprofiler" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *codeguruprofiler.Client, identifier str output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeGuruProfilerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *codeguruprofiler.Client, identifier s _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *codeguruprofiler.Client, identifier s _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codegurureviewer/repository_association_identity_gen_test.go b/internal/service/codegurureviewer/repository_association_identity_gen_test.go index 81c75da3a7da..3ea068d0e95f 100644 --- a/internal/service/codegurureviewer/repository_association_identity_gen_test.go +++ b/internal/service/codegurureviewer/repository_association_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCodeGuruReviewerRepositoryAssociation_Identity_Basic(t *testing.T) { resourceName := "aws_codegurureviewer_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccCodeGuruReviewerRepositoryAssociation_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -115,7 +119,7 @@ func TestAccCodeGuruReviewerRepositoryAssociation_Identity_RegionOverride(t *tes resourceName := "aws_codegurureviewer_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -134,6 +138,9 @@ func TestAccCodeGuruReviewerRepositoryAssociation_Identity_RegionOverride(t *tes ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -237,3 +244,131 @@ func TestAccCodeGuruReviewerRepositoryAssociation_Identity_RegionOverride(t *tes }, }) } + +func TestAccCodeGuruReviewerRepositoryAssociation_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RepositoryAssociation + resourceName := "aws_codegurureviewer_repository_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeGuruReviewerServiceID), + CheckDestroy: testAccCheckRepositoryAssociationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryAssociation/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryAssociation/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RepositoryAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeGuruReviewerRepositoryAssociation_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RepositoryAssociation + resourceName := "aws_codegurureviewer_repository_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeGuruReviewerServiceID), + CheckDestroy: testAccCheckRepositoryAssociationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryAssociation/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RepositoryAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryAssociationExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/codegurureviewer/repository_association_test.go b/internal/service/codegurureviewer/repository_association_test.go index 4c9545317ffa..24ed8807d51e 100644 --- a/internal/service/codegurureviewer/repository_association_test.go +++ b/internal/service/codegurureviewer/repository_association_test.go @@ -13,14 +13,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codegurureviewer/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodegurureviewer "github.com/hashicorp/terraform-provider-aws/internal/service/codegurureviewer" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -213,88 +207,6 @@ func TestAccCodeGuruReviewerRepositoryAssociation_disappears(t *testing.T) { }) } -func TestAccCodeGuruReviewerRepositoryAssociation_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var repositoryassociation types.RepositoryAssociation - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codegurureviewer_repository_association.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.CodeGuruReviewerEndpointID) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeGuruReviewerServiceID), - CheckDestroy: testAccCheckRepositoryAssociationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccRepositoryAssociationConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryAssociationExists(ctx, resourceName, &repositoryassociation), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccRepositoryAssociationConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryAssociationExists(ctx, resourceName, &repositoryassociation), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRepositoryAssociationConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRepositoryAssociationExists(ctx, resourceName, &repositoryassociation), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codeguru-reviewer", regexache.MustCompile(`association:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckRepositoryAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CodeGuruReviewerClient(ctx) diff --git a/internal/service/codegurureviewer/service_endpoint_resolver_gen.go b/internal/service/codegurureviewer/service_endpoint_resolver_gen.go index 0d7e0ab38a23..6ca55099ed2f 100644 --- a/internal/service/codegurureviewer/service_endpoint_resolver_gen.go +++ b/internal/service/codegurureviewer/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codegurureviewer }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codegurureviewer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codegurureviewer endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codegurureviewer/service_endpoints_gen_test.go b/internal/service/codegurureviewer/service_endpoints_gen_test.go index bf89631af936..cab6491f6600 100644 --- a/internal/service/codegurureviewer/service_endpoints_gen_test.go +++ b/internal/service/codegurureviewer/service_endpoints_gen_test.go @@ -524,7 +524,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codegurureviewer/service_package_gen.go b/internal/service/codegurureviewer/service_package_gen.go index 3e257c082378..abf8e3e6c1e0 100644 --- a/internal/service/codegurureviewer/service_package_gen.go +++ b/internal/service/codegurureviewer/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codegurureviewer" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -74,7 +73,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codegurureviewer.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codegurureviewer/sweep.go b/internal/service/codegurureviewer/sweep.go index 600445172dd5..b0b9a2233cba 100644 --- a/internal/service/codegurureviewer/sweep.go +++ b/internal/service/codegurureviewer/sweep.go @@ -25,7 +25,7 @@ func sweepAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &codegurureviewer.ListRepositoryAssociationsInput{} conn := client.CodeGuruReviewerClient(ctx) diff --git a/internal/service/codegurureviewer/tags_gen.go b/internal/service/codegurureviewer/tags_gen.go index f6ee9a5fb984..de1b16ba99c0 100644 --- a/internal/service/codegurureviewer/tags_gen.go +++ b/internal/service/codegurureviewer/tags_gen.go @@ -3,8 +3,8 @@ package codegurureviewer import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codegurureviewer" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *codegurureviewer.Client, identifier str output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeGuruReviewerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *codegurureviewer.Client, identifier s _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *codegurureviewer.Client, identifier s _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codegurureviewer/testdata/RepositoryAssociation/basic_v5.100.0/main_gen.tf b/internal/service/codegurureviewer/testdata/RepositoryAssociation/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..e17a0a931df5 --- /dev/null +++ b/internal/service/codegurureviewer/testdata/RepositoryAssociation/basic_v5.100.0/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codegurureviewer_repository_association" "test" { + repository { + codecommit { + name = aws_codecommit_repository.test.repository_name + } + } +} + +# testAccRepositoryAssociation_codecommit_repository + +resource "aws_codecommit_repository" "test" { + repository_name = var.rName + description = "This is a test description" + lifecycle { + ignore_changes = [ + tags["codeguru-reviewer"] + ] + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codegurureviewer/testdata/RepositoryAssociation/basic_v6.0.0/main_gen.tf b/internal/service/codegurureviewer/testdata/RepositoryAssociation/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..a2bd3bf4ea48 --- /dev/null +++ b/internal/service/codegurureviewer/testdata/RepositoryAssociation/basic_v6.0.0/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codegurureviewer_repository_association" "test" { + repository { + codecommit { + name = aws_codecommit_repository.test.repository_name + } + } +} + +# testAccRepositoryAssociation_codecommit_repository + +resource "aws_codecommit_repository" "test" { + repository_name = var.rName + description = "This is a test description" + lifecycle { + ignore_changes = [ + tags["codeguru-reviewer"] + ] + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codepipeline/codepipeline.go b/internal/service/codepipeline/codepipeline.go index 31e8604609cb..490c727bf5d8 100644 --- a/internal/service/codepipeline/codepipeline.go +++ b/internal/service/codepipeline/codepipeline.go @@ -653,7 +653,7 @@ func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta an Tags: getTagsIn(ctx), } - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidStructureException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidStructureException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreatePipeline(ctx, input) }, "not authorized") diff --git a/internal/service/codepipeline/service_endpoint_resolver_gen.go b/internal/service/codepipeline/service_endpoint_resolver_gen.go index c0b3834345d7..d7b579b09757 100644 --- a/internal/service/codepipeline/service_endpoint_resolver_gen.go +++ b/internal/service/codepipeline/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codepipeline.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codepipeline endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codepipeline endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codepipeline/service_endpoints_gen_test.go b/internal/service/codepipeline/service_endpoints_gen_test.go index 1d7fa4fed182..543c7203b9d6 100644 --- a/internal/service/codepipeline/service_endpoints_gen_test.go +++ b/internal/service/codepipeline/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codepipeline/service_package_gen.go b/internal/service/codepipeline/service_package_gen.go index e8336ec89a99..33887a074b44 100644 --- a/internal/service/codepipeline/service_package_gen.go +++ b/internal/service/codepipeline/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codepipeline" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -92,7 +91,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codepipeline.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codepipeline/sweep.go b/internal/service/codepipeline/sweep.go index eb7ad5ecbf62..6c1f437322d9 100644 --- a/internal/service/codepipeline/sweep.go +++ b/internal/service/codepipeline/sweep.go @@ -25,7 +25,7 @@ func sweepPipelines(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &codepipeline.ListPipelinesInput{} conn := client.CodePipelineClient(ctx) diff --git a/internal/service/codepipeline/tags_gen.go b/internal/service/codepipeline/tags_gen.go index b44564f0d04f..a56143c6fb79 100644 --- a/internal/service/codepipeline/tags_gen.go +++ b/internal/service/codepipeline/tags_gen.go @@ -3,8 +3,8 @@ package codepipeline import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codepipeline" awstypes "github.com/aws/aws-sdk-go-v2/service/codepipeline/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *codepipeline.Client, identifier string, page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodePipelineClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *codepipeline.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *codepipeline.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codepipeline/testdata/Webhook/basic/main_gen.tf b/internal/service/codepipeline/testdata/Webhook/basic/main_gen.tf new file mode 100644 index 000000000000..2da9c0f09c0f --- /dev/null +++ b/internal/service/codepipeline/testdata/Webhook/basic/main_gen.tf @@ -0,0 +1,139 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codepipeline_webhook" "test" { + name = var.GITHUB_TOKEN + authentication = "GITHUB_HMAC" + target_action = "Source" + target_pipeline = aws_codepipeline.test.name + + authentication_configuration { + secret_token = "super-secret" + } + + filter { + json_path = "$.ref" + match_equals = "refs/head/{Branch}" + } +} + +# testAccWebhookConfig_base + +resource "aws_codepipeline" "test" { + name = var.rName + role_arn = aws_iam_role.test.arn + + artifact_store { + location = aws_s3_bucket.test.bucket + type = "S3" + + encryption_key { + id = "1234" + type = "KMS" + } + } + + stage { + name = "Source" + + action { + name = "Source" + category = "Source" + owner = "ThirdParty" + provider = "GitHub" + version = "1" + output_artifacts = ["test"] + + configuration = { + Owner = "lifesum-terraform" + Repo = "test" + Branch = "master" + OAuthToken = var.GITHUB_TOKEN + } + } + } + + stage { + name = "Build" + + action { + name = "Build" + category = "Build" + owner = "AWS" + provider = "CodeBuild" + input_artifacts = ["test"] + version = "1" + + configuration = { + ProjectName = "test" + } + } + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = <" + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +func TestAccCodePipelineWebhook_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ListWebhookItem + acctest.SkipIfEnvVarNotSet(t, "GITHUB_TOKEN") + resourceName := "aws_codepipeline_webhook.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodePipelineServiceID), + CheckDestroy: testAccCheckWebhookDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Webhook/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodePipelineWebhook_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ListWebhookItem + acctest.SkipIfEnvVarNotSet(t, "GITHUB_TOKEN") + resourceName := "aws_codepipeline_webhook.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodePipelineServiceID), + CheckDestroy: testAccCheckWebhookDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Webhook/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Webhook/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/codepipeline/webhook_test.go b/internal/service/codepipeline/webhook_test.go index 2d60742ad51c..2369f5b7a4f5 100644 --- a/internal/service/codepipeline/webhook_test.go +++ b/internal/service/codepipeline/webhook_test.go @@ -11,18 +11,10 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codepipeline/types" - "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/envvar" tfcodepipeline "github.com/hashicorp/terraform-provider-aws/internal/service/codepipeline" @@ -100,80 +92,6 @@ func TestAccCodePipelineWebhook_basic(t *testing.T) { }) } -func TestAccCodePipelineWebhook_Identity_Basic(t *testing.T) { - ctx := acctest.Context(t) - ghToken := acctest.SkipIfEnvVarNotSet(t, envvar.GithubToken) - var v types.ListWebhookItem - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codepipeline_webhook.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodePipelineServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckWebhookDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccWebhookConfig_basic(rName, ghToken), - Check: resource.ComposeTestCheckFunc( - testAccCheckWebhookExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "codepipeline", "webhook:{name}"), - statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), - }, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccCodePipelineWebhook_Identity_RegionOverride(t *testing.T) { - ctx := acctest.Context(t) - ghToken := acctest.SkipIfEnvVarNotSet(t, envvar.GithubToken) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codepipeline_webhook.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodePipelineServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - Config: testAccWebhookConfig_regionOverride(rName, ghToken), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "codepipeline", "webhook:{name}"), - statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), - }, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), - ImportStateVerify: true, - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccCodePipelineWebhook_ipAuth(t *testing.T) { ctx := acctest.Context(t) ghToken := acctest.SkipIfEnvVarNotSet(t, envvar.GithubToken) @@ -366,88 +284,6 @@ func TestAccCodePipelineWebhook_UpdateAuthentication_secretToken(t *testing.T) { }) } -func TestAccCodePipelineWebhook_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - ghToken := acctest.SkipIfEnvVarNotSet(t, envvar.GithubToken) - var v types.ListWebhookItem - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_codepipeline_webhook.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodePipelineServiceID), - CheckDestroy: testAccCheckWebhookDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccWebhookConfig_basic(rName, ghToken), - Check: resource.ComposeTestCheckFunc( - testAccCheckWebhookExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccWebhookConfig_basic(rName, ghToken), - Check: resource.ComposeTestCheckFunc( - testAccCheckWebhookExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccWebhookConfig_basic(rName, ghToken), - Check: resource.ComposeTestCheckFunc( - testAccCheckWebhookExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codepipeline", regexache.MustCompile(fmt.Sprintf("webhook:%s", rName))), - }), - }, - }, - }, - }) -} - func testAccCheckWebhookExists(ctx context.Context, n string, v *types.ListWebhookItem) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -627,28 +463,6 @@ resource "aws_codepipeline_webhook" "test" { `, rName)) } -func testAccWebhookConfig_regionOverride(rName, githubToken string) string { - return acctest.ConfigCompose(testAccWebhookConfig_base(rName, githubToken), fmt.Sprintf(` -resource "aws_codepipeline_webhook" "test" { - region = %[2]q - - name = %[1]q - authentication = "GITHUB_HMAC" - target_action = "Source" - target_pipeline = aws_codepipeline.test.name - - authentication_configuration { - secret_token = "super-secret" - } - - filter { - json_path = "$.ref" - match_equals = "refs/head/{Branch}" - } -} -`, rName, acctest.AlternateRegion())) -} - func testAccWebhookConfig_filters(rName, githubToken string) string { return acctest.ConfigCompose(testAccWebhookConfig_base(rName, githubToken), fmt.Sprintf(` resource "aws_codepipeline_webhook" "test" { diff --git a/internal/service/codestarconnections/connection_identity_gen_test.go b/internal/service/codestarconnections/connection_identity_gen_test.go index b233b0f9abf8..a221b1353d2a 100644 --- a/internal/service/codestarconnections/connection_identity_gen_test.go +++ b/internal/service/codestarconnections/connection_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCodeStarConnectionsConnection_Identity_Basic(t *testing.T) { resourceName := "aws_codestarconnections_connection.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccCodeStarConnectionsConnection_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccCodeStarConnectionsConnection_Identity_RegionOverride(t *testing.T) resourceName := "aws_codestarconnections_connection.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccCodeStarConnectionsConnection_Identity_RegionOverride(t *testing.T) ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,131 @@ func TestAccCodeStarConnectionsConnection_Identity_RegionOverride(t *testing.T) }, }) } + +func TestAccCodeStarConnectionsConnection_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Connection + resourceName := "aws_codestarconnections_connection.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeStarConnectionsServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeStarConnectionsConnection_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Connection + resourceName := "aws_codestarconnections_connection.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeStarConnectionsServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/codestarconnections/connection_test.go b/internal/service/codestarconnections/connection_test.go index 2325d7d5e9b6..bb0d4d297e73 100644 --- a/internal/service/codestarconnections/connection_test.go +++ b/internal/service/codestarconnections/connection_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codestarconnections/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodestarconnections "github.com/hashicorp/terraform-provider-aws/internal/service/codestarconnections" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -173,87 +167,6 @@ func TestAccCodeStarConnectionsConnection_tags(t *testing.T) { }) } -func TestAccCodeStarConnectionsConnection_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v types.Connection - resourceName := "aws_codestarconnections_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.CodeStarConnectionsEndpointID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeStarConnectionsServiceID), - CheckDestroy: testAccCheckConnectionDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccConnectionConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccConnectionConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccConnectionConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codestar-connections", regexache.MustCompile("connection/.+")), - }), - }, - }, - }, - }) -} - func testAccCheckConnectionExists(ctx context.Context, n string, v *types.Connection) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codestarconnections/host_identity_gen_test.go b/internal/service/codestarconnections/host_identity_gen_test.go index e2f5caa91d2d..d5aa19a3d0cf 100644 --- a/internal/service/codestarconnections/host_identity_gen_test.go +++ b/internal/service/codestarconnections/host_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccCodeStarConnectionsHost_Identity_Basic(t *testing.T) { resourceName := "aws_codestarconnections_host.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccCodeStarConnectionsHost_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccCodeStarConnectionsHost_Identity_RegionOverride(t *testing.T) { resourceName := "aws_codestarconnections_host.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccCodeStarConnectionsHost_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,131 @@ func TestAccCodeStarConnectionsHost_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccCodeStarConnectionsHost_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v codestarconnections.GetHostOutput + resourceName := "aws_codestarconnections_host.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeStarConnectionsServiceID), + CheckDestroy: testAccCheckHostDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Host/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Host/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Host/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeStarConnectionsHost_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v codestarconnections.GetHostOutput + resourceName := "aws_codestarconnections_host.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeStarConnectionsServiceID), + CheckDestroy: testAccCheckHostDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Host/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Host/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/codestarconnections/host_test.go b/internal/service/codestarconnections/host_test.go index 20736142aa76..00a215d84746 100644 --- a/internal/service/codestarconnections/host_test.go +++ b/internal/service/codestarconnections/host_test.go @@ -13,13 +13,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codestarconnections/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodestarconnections "github.com/hashicorp/terraform-provider-aws/internal/service/codestarconnections" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -128,84 +123,6 @@ func TestAccCodeStarConnectionsHost_vpc(t *testing.T) { }) } -func TestAccCodeStarConnectionsHost_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v codestarconnections.GetHostOutput - resourceName := "aws_codestarconnections_host.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.CodeStarConnectionsEndpointID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeStarConnectionsServiceID), - CheckDestroy: testAccCheckHostDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccHostConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccHostConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccHostConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckHostExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codestar-connections", regexache.MustCompile("host/.+")), - }), - }, - }, - }, - }) -} - func testAccCheckHostExists(ctx context.Context, n string, v *codestarconnections.GetHostOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codestarconnections/service_endpoint_resolver_gen.go b/internal/service/codestarconnections/service_endpoint_resolver_gen.go index 0309b4aabb71..451c75d2a63d 100644 --- a/internal/service/codestarconnections/service_endpoint_resolver_gen.go +++ b/internal/service/codestarconnections/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codestarconnecti }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codestarconnections endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codestarconnections endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codestarconnections/service_endpoints_gen_test.go b/internal/service/codestarconnections/service_endpoints_gen_test.go index 6105907f9700..4981f221e057 100644 --- a/internal/service/codestarconnections/service_endpoints_gen_test.go +++ b/internal/service/codestarconnections/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codestarconnections/service_package_gen.go b/internal/service/codestarconnections/service_package_gen.go index 675968794411..8a762a6ea66b 100644 --- a/internal/service/codestarconnections/service_package_gen.go +++ b/internal/service/codestarconnections/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codestarconnections" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -94,7 +93,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codestarconnections.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codestarconnections/sweep.go b/internal/service/codestarconnections/sweep.go index 4b37be4e63ca..20e22b5a428c 100644 --- a/internal/service/codestarconnections/sweep.go +++ b/internal/service/codestarconnections/sweep.go @@ -38,7 +38,7 @@ func sweepConnections(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CodeStarConnectionsClient(ctx) input := &codestarconnections.ListConnectionsInput{} @@ -83,7 +83,7 @@ func sweepHosts(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CodeStarConnectionsClient(ctx) input := &codestarconnections.ListHostsInput{} diff --git a/internal/service/codestarconnections/tags_gen.go b/internal/service/codestarconnections/tags_gen.go index 9e2a8828b890..6623baa9185d 100644 --- a/internal/service/codestarconnections/tags_gen.go +++ b/internal/service/codestarconnections/tags_gen.go @@ -3,8 +3,8 @@ package codestarconnections import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codestarconnections" awstypes "github.com/aws/aws-sdk-go-v2/service/codestarconnections/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *codestarconnections.Client, identifier output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeStarConnectionsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *codestarconnections.Client, identifie _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *codestarconnections.Client, identifie _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codestarconnections/testdata/Connection/basic_v5.100.0/main_gen.tf b/internal/service/codestarconnections/testdata/Connection/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..f43f40854326 --- /dev/null +++ b/internal/service/codestarconnections/testdata/Connection/basic_v5.100.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codestarconnections_connection" "test" { + name = var.rName + provider_type = "Bitbucket" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codestarconnections/testdata/Connection/basic_v6.0.0/main_gen.tf b/internal/service/codestarconnections/testdata/Connection/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..4131e3643767 --- /dev/null +++ b/internal/service/codestarconnections/testdata/Connection/basic_v6.0.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codestarconnections_connection" "test" { + name = var.rName + provider_type = "Bitbucket" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codestarconnections/testdata/Host/basic_v5.100.0/main_gen.tf b/internal/service/codestarconnections/testdata/Host/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..ce658131b4ea --- /dev/null +++ b/internal/service/codestarconnections/testdata/Host/basic_v5.100.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codestarconnections_host" "test" { + name = var.rName + provider_endpoint = "https://example.com" + provider_type = "GitHubEnterpriseServer" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codestarconnections/testdata/Host/basic_v6.0.0/main_gen.tf b/internal/service/codestarconnections/testdata/Host/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..bd11c0fe5de4 --- /dev/null +++ b/internal/service/codestarconnections/testdata/Host/basic_v6.0.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codestarconnections_host" "test" { + name = var.rName + provider_endpoint = "https://example.com" + provider_type = "GitHubEnterpriseServer" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codestarnotifications/notification_rule.go b/internal/service/codestarnotifications/notification_rule.go index d7adb4ef1ef2..f52424dcc1ad 100644 --- a/internal/service/codestarnotifications/notification_rule.go +++ b/internal/service/codestarnotifications/notification_rule.go @@ -273,7 +273,7 @@ func cleanupNotificationRuleTargets(ctx context.Context, conn *codestarnotificat TargetAddress: aws.String(target[names.AttrAddress].(string)), } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, targetSubscriptionTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, targetSubscriptionTimeout, func(ctx context.Context) (any, error) { return conn.DeleteTarget(ctx, input) }, "ValidationException", notificationRuleErrorSubscribed) diff --git a/internal/service/codestarnotifications/notification_rule_identity_gen_test.go b/internal/service/codestarnotifications/notification_rule_identity_gen_test.go index 0d68809c28ca..9ceda87b7c46 100644 --- a/internal/service/codestarnotifications/notification_rule_identity_gen_test.go +++ b/internal/service/codestarnotifications/notification_rule_identity_gen_test.go @@ -15,15 +15,17 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccCodeStarNotificationsNotificationRule_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_codestarnotifications_notification_rule.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -44,6 +46,9 @@ func TestAccCodeStarNotificationsNotificationRule_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -105,7 +110,7 @@ func TestAccCodeStarNotificationsNotificationRule_Identity_RegionOverride(t *tes resourceName := "aws_codestarnotifications_notification_rule.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -124,6 +129,9 @@ func TestAccCodeStarNotificationsNotificationRule_Identity_RegionOverride(t *tes ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -215,3 +223,129 @@ func TestAccCodeStarNotificationsNotificationRule_Identity_RegionOverride(t *tes }, }) } + +func TestAccCodeStarNotificationsNotificationRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codestarnotifications_notification_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeStarNotificationsServiceID), + CheckDestroy: testAccCheckNotificationRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/NotificationRule/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNotificationRuleExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/NotificationRule/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNotificationRuleExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/NotificationRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccCodeStarNotificationsNotificationRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_codestarnotifications_notification_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeStarNotificationsServiceID), + CheckDestroy: testAccCheckNotificationRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/NotificationRule/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNotificationRuleExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/NotificationRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNotificationRuleExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/codestarnotifications/notification_rule_test.go b/internal/service/codestarnotifications/notification_rule_test.go index 4a0b08516353..7e492a5d6d06 100644 --- a/internal/service/codestarnotifications/notification_rule_test.go +++ b/internal/service/codestarnotifications/notification_rule_test.go @@ -14,13 +14,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/codestarnotifications/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcodestarnotifications "github.com/hashicorp/terraform-provider-aws/internal/service/codestarnotifications" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -251,80 +246,6 @@ func TestAccCodeStarNotificationsNotificationRule_eventTypeIDs(t *testing.T) { }) } -func TestAccCodeStarNotificationsNotificationRule_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_codestarnotifications_notification_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.CodeStarNotificationsServiceID), - CheckDestroy: testAccCheckNotificationRuleDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccNotificationRuleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNotificationRuleExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccNotificationRuleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNotificationRuleExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccNotificationRuleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNotificationRuleExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("codestar-notifications", regexache.MustCompile("notificationrule/.+")), - }), - }, - }, - }, - }) -} - func testAccCheckNotificationRuleExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/codestarnotifications/service_endpoint_resolver_gen.go b/internal/service/codestarnotifications/service_endpoint_resolver_gen.go index 61b664d96438..6008a1692062 100644 --- a/internal/service/codestarnotifications/service_endpoint_resolver_gen.go +++ b/internal/service/codestarnotifications/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codestarnotifica }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codestarnotifications endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codestarnotifications endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/codestarnotifications/service_endpoints_gen_test.go b/internal/service/codestarnotifications/service_endpoints_gen_test.go index c56f4c48c06e..bad8f3da2b2e 100644 --- a/internal/service/codestarnotifications/service_endpoints_gen_test.go +++ b/internal/service/codestarnotifications/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/codestarnotifications/service_package_gen.go b/internal/service/codestarnotifications/service_package_gen.go index 9bec72af4aa2..0ee290e7890d 100644 --- a/internal/service/codestarnotifications/service_package_gen.go +++ b/internal/service/codestarnotifications/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codestarnotifications" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -74,7 +73,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codestarnotifications.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/codestarnotifications/sweep.go b/internal/service/codestarnotifications/sweep.go index 322faf82e903..f52f83eebaab 100644 --- a/internal/service/codestarnotifications/sweep.go +++ b/internal/service/codestarnotifications/sweep.go @@ -25,7 +25,7 @@ func sweepNotificationRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.CodeStarNotificationsClient(ctx) input := &codestarnotifications.ListNotificationRulesInput{} diff --git a/internal/service/codestarnotifications/tags_gen.go b/internal/service/codestarnotifications/tags_gen.go index 698549fa1192..20747a5b7e07 100644 --- a/internal/service/codestarnotifications/tags_gen.go +++ b/internal/service/codestarnotifications/tags_gen.go @@ -3,8 +3,8 @@ package codestarnotifications import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codestarnotifications" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *codestarnotifications.Client, identifie output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CodeStarNotificationsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *codestarnotifications.Client, identif _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *codestarnotifications.Client, identif _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/codestarnotifications/testdata/NotificationRule/basic_v5.100.0/main_gen.tf b/internal/service/codestarnotifications/testdata/NotificationRule/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..1b064bd602a5 --- /dev/null +++ b/internal/service/codestarnotifications/testdata/NotificationRule/basic_v5.100.0/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codestarnotifications_notification_rule" "test" { + detail_type = "BASIC" + event_type_ids = ["codecommit-repository-comments-on-commits"] + name = var.rName + resource = aws_codecommit_repository.test.arn + status = "ENABLED" + + target { + address = aws_sns_topic.test.arn + } +} + +# testAccNotificationRuleConfig_base + +resource "aws_codecommit_repository" "test" { + repository_name = var.rName +} + +resource "aws_sns_topic" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/codestarnotifications/testdata/NotificationRule/basic_v6.0.0/main_gen.tf b/internal/service/codestarnotifications/testdata/NotificationRule/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..041fdf5aff58 --- /dev/null +++ b/internal/service/codestarnotifications/testdata/NotificationRule/basic_v6.0.0/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_codestarnotifications_notification_rule" "test" { + detail_type = "BASIC" + event_type_ids = ["codecommit-repository-comments-on-commits"] + name = var.rName + resource = aws_codecommit_repository.test.arn + status = "ENABLED" + + target { + address = aws_sns_topic.test.arn + } +} + +# testAccNotificationRuleConfig_base + +resource "aws_codecommit_repository" "test" { + repository_name = var.rName +} + +resource "aws_sns_topic" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/cognitoidentity/service_endpoint_resolver_gen.go b/internal/service/cognitoidentity/service_endpoint_resolver_gen.go index c14463b14c56..615a86f4c443 100644 --- a/internal/service/cognitoidentity/service_endpoint_resolver_gen.go +++ b/internal/service/cognitoidentity/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params cognitoidentity. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up cognitoidentity endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up cognitoidentity endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/cognitoidentity/service_endpoints_gen_test.go b/internal/service/cognitoidentity/service_endpoints_gen_test.go index 97d1c5286400..425d4095f71a 100644 --- a/internal/service/cognitoidentity/service_endpoints_gen_test.go +++ b/internal/service/cognitoidentity/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/cognitoidentity/service_package_gen.go b/internal/service/cognitoidentity/service_package_gen.go index cad2d3a46132..8f92a7bfce63 100644 --- a/internal/service/cognitoidentity/service_package_gen.go +++ b/internal/service/cognitoidentity/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/cognitoidentity" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -100,7 +99,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *cognitoidentity.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/cognitoidentity/sweep.go b/internal/service/cognitoidentity/sweep.go index 80758d959874..9dd155adbd1f 100644 --- a/internal/service/cognitoidentity/sweep.go +++ b/internal/service/cognitoidentity/sweep.go @@ -25,7 +25,7 @@ func sweepIdentityPools(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("Error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &cognitoidentity.ListIdentityPoolsInput{ MaxResults: aws.Int32(50), diff --git a/internal/service/cognitoidentity/tags_gen.go b/internal/service/cognitoidentity/tags_gen.go index 1f49b2cae771..bfc247c57c6a 100644 --- a/internal/service/cognitoidentity/tags_gen.go +++ b/internal/service/cognitoidentity/tags_gen.go @@ -3,8 +3,8 @@ package cognitoidentity import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cognitoidentity" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *cognitoidentity.Client, identifier stri output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CognitoIdentityClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *cognitoidentity.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *cognitoidentity.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/cognitoidp/exports_test.go b/internal/service/cognitoidp/exports_test.go index 1f4448aec2a9..b4d0013ecf04 100644 --- a/internal/service/cognitoidp/exports_test.go +++ b/internal/service/cognitoidp/exports_test.go @@ -5,27 +5,31 @@ package cognitoidp // Exports for use in tests only. var ( - ResourceIdentityProvider = resourceIdentityProvider - ResourceManagedUserPoolClient = newManagedUserPoolClientResource - ResourceResourceServer = resourceResourceServer - ResourceRiskConfiguration = resourceRiskConfiguration - ResourceUser = resourceUser - ResourceUserGroup = resourceUserGroup - ResourceUserInGroup = resourceUserInGroup - ResourceUserPool = resourceUserPool - ResourceUserPoolClient = newUserPoolClientResource - ResourceUserPoolDomain = resourceUserPoolDomain - ResourceUserPoolUICustomization = resourceUserPoolUICustomization + ResourceIdentityProvider = resourceIdentityProvider + ResourceLogDeliveryConfiguration = newLogDeliveryConfigurationResource + ResourceManagedLoginBranding = newManagedLoginBrandingResource + ResourceManagedUserPoolClient = newManagedUserPoolClientResource + ResourceResourceServer = resourceResourceServer + ResourceRiskConfiguration = resourceRiskConfiguration + ResourceUser = resourceUser + ResourceUserGroup = resourceUserGroup + ResourceUserInGroup = resourceUserInGroup + ResourceUserPool = resourceUserPool + ResourceUserPoolClient = newUserPoolClientResource + ResourceUserPoolDomain = resourceUserPoolDomain + ResourceUserPoolUICustomization = resourceUserPoolUICustomization - FindGroupByTwoPartKey = findGroupByTwoPartKey - FindGroupUserByThreePartKey = findGroupUserByThreePartKey - FindIdentityProviderByTwoPartKey = findIdentityProviderByTwoPartKey - FindResourceServerByTwoPartKey = findResourceServerByTwoPartKey - FindRiskConfigurationByTwoPartKey = findRiskConfigurationByTwoPartKey - FindUserByTwoPartKey = findUserByTwoPartKey - FindUserPoolByID = findUserPoolByID - FindUserPoolClientByName = findUserPoolClientByName - FindUserPoolClientByTwoPartKey = findUserPoolClientByTwoPartKey - FindUserPoolDomain = findUserPoolDomain - FindUserPoolUICustomizationByTwoPartKey = findUserPoolUICustomizationByTwoPartKey + FindGroupByTwoPartKey = findGroupByTwoPartKey + FindGroupUserByThreePartKey = findGroupUserByThreePartKey + FindIdentityProviderByTwoPartKey = findIdentityProviderByTwoPartKey + FindLogDeliveryConfigurationByUserPoolID = findLogDeliveryConfigurationByUserPoolID + FindManagedLoginBrandingByThreePartKey = findManagedLoginBrandingByThreePartKey + FindResourceServerByTwoPartKey = findResourceServerByTwoPartKey + FindRiskConfigurationByTwoPartKey = findRiskConfigurationByTwoPartKey + FindUserByTwoPartKey = findUserByTwoPartKey + FindUserPoolByID = findUserPoolByID + FindUserPoolClientByName = findUserPoolClientByName + FindUserPoolClientByTwoPartKey = findUserPoolClientByTwoPartKey + FindUserPoolDomain = findUserPoolDomain + FindUserPoolUICustomizationByTwoPartKey = findUserPoolUICustomizationByTwoPartKey ) diff --git a/internal/service/cognitoidp/generate.go b/internal/service/cognitoidp/generate.go index 0d3b6c2fd9d2..8365172bc8c8 100644 --- a/internal/service/cognitoidp/generate.go +++ b/internal/service/cognitoidp/generate.go @@ -4,6 +4,7 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags -KVTValues -EmptyMap //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/tagstests/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package cognitoidp diff --git a/internal/service/cognitoidp/log_delivery_configuration.go b/internal/service/cognitoidp/log_delivery_configuration.go new file mode 100644 index 000000000000..325e05448a4d --- /dev/null +++ b/internal/service/cognitoidp/log_delivery_configuration.go @@ -0,0 +1,303 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidp + +import ( + "context" + "errors" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_cognito_log_delivery_configuration", name="Log Delivery Configuration") +// @IdentityAttribute("user_pool_id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types;awstypes;awstypes.LogDeliveryConfigurationType") +// @Testing(importStateIdFunc="testAccLogDeliveryConfigurationImportStateIdFunc") +// @Testing(importStateIdAttribute="user_pool_id") +// @Testing(hasNoPreExistingResource=true) +func newLogDeliveryConfigurationResource(context.Context) (resource.ResourceWithConfigure, error) { + r := &logDeliveryConfigurationResource{} + return r, nil +} + +type logDeliveryConfigurationResource struct { + framework.ResourceWithModel[resourceLogDeliveryConfigurationModel] + framework.WithImportByIdentity +} + +func (r *logDeliveryConfigurationResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrUserPoolID: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "log_configurations": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[logConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.IsRequired(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "event_source": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[awstypes.EventSourceName](), + }, + "log_level": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[awstypes.LogLevel](), + }, + }, + Blocks: map[string]schema.Block{ + "cloud_watch_logs_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudWatchLogsConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "log_group_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + }, + }, + "firehose_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[firehoseConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrStreamARN: schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + }, + }, + "s3_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3ConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "bucket_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r *logDeliveryConfigurationResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().CognitoIDPClient(ctx) + + var plan resourceLogDeliveryConfigurationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var input cognitoidentityprovider.SetLogDeliveryConfigurationInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.SetLogDeliveryConfiguration(ctx, &input) + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, plan.UserPoolID.String()) + return + } + if out == nil || out.LogDeliveryConfiguration == nil { + smerr.AddError(ctx, &resp.Diagnostics, errors.New("empty output"), smerr.ID, plan.UserPoolID.String()) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out.LogDeliveryConfiguration, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *logDeliveryConfigurationResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().CognitoIDPClient(ctx) + + var state resourceLogDeliveryConfigurationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findLogDeliveryConfigurationByUserPoolID(ctx, conn, state.UserPoolID.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, state.UserPoolID.String()) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *logDeliveryConfigurationResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().CognitoIDPClient(ctx) + + var plan, state resourceLogDeliveryConfigurationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + diff, d := flex.Diff(ctx, plan, state) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + if diff.HasChanges() { + var input cognitoidentityprovider.SetLogDeliveryConfigurationInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.SetLogDeliveryConfiguration(ctx, &input) + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, plan.UserPoolID.String()) + return + } + if out == nil || out.LogDeliveryConfiguration == nil { + smerr.AddError(ctx, &resp.Diagnostics, errors.New("empty output"), smerr.ID, plan.UserPoolID.String()) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out.LogDeliveryConfiguration, &plan)...) + if resp.Diagnostics.HasError() { + return + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *logDeliveryConfigurationResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().CognitoIDPClient(ctx) + + var state resourceLogDeliveryConfigurationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // set an empty configuration + input := cognitoidentityprovider.SetLogDeliveryConfigurationInput{ + UserPoolId: state.UserPoolID.ValueStringPointer(), + LogConfigurations: []awstypes.LogConfigurationType{}, + } + + _, err := conn.SetLogDeliveryConfiguration(ctx, &input) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, state.UserPoolID.String()) + return + } +} + +func findLogDeliveryConfigurationByUserPoolID(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID string) (*awstypes.LogDeliveryConfigurationType, error) { + input := cognitoidentityprovider.GetLogDeliveryConfigurationInput{ + UserPoolId: aws.String(userPoolID), + } + + out, err := conn.GetLogDeliveryConfiguration(ctx, &input) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, smarterr.NewError(err) + } + + if out == nil || out.LogDeliveryConfiguration == nil { + return nil, smarterr.NewError(tfresource.NewEmptyResultError(&input)) + } + + return out.LogDeliveryConfiguration, nil +} + +type resourceLogDeliveryConfigurationModel struct { + framework.WithRegionModel + UserPoolID types.String `tfsdk:"user_pool_id"` + LogConfigurations fwtypes.ListNestedObjectValueOf[logConfigurationModel] `tfsdk:"log_configurations"` +} + +type logConfigurationModel struct { + EventSource fwtypes.StringEnum[awstypes.EventSourceName] `tfsdk:"event_source"` + LogLevel fwtypes.StringEnum[awstypes.LogLevel] `tfsdk:"log_level"` + CloudWatchLogsConfiguration fwtypes.ListNestedObjectValueOf[cloudWatchLogsConfigurationModel] `tfsdk:"cloud_watch_logs_configuration"` + FirehoseConfiguration fwtypes.ListNestedObjectValueOf[firehoseConfigurationModel] `tfsdk:"firehose_configuration"` + S3Configuration fwtypes.ListNestedObjectValueOf[s3ConfigurationModel] `tfsdk:"s3_configuration"` +} + +type cloudWatchLogsConfigurationModel struct { + LogGroupArn fwtypes.ARN `tfsdk:"log_group_arn"` +} + +type firehoseConfigurationModel struct { + StreamArn fwtypes.ARN `tfsdk:"stream_arn"` +} + +type s3ConfigurationModel struct { + BucketArn fwtypes.ARN `tfsdk:"bucket_arn"` +} diff --git a/internal/service/cognitoidp/log_delivery_configuration_identity_gen_test.go b/internal/service/cognitoidp/log_delivery_configuration_identity_gen_test.go new file mode 100644 index 000000000000..82771ba952f3 --- /dev/null +++ b/internal/service/cognitoidp/log_delivery_configuration_identity_gen_test.go @@ -0,0 +1,196 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package cognitoidp_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCognitoIDPLogDeliveryConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.LogDeliveryConfigurationType + resourceName := "aws_cognito_log_delivery_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + CheckDestroy: testAccCheckLogDeliveryConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrUserPoolID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrUserPoolID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: testAccLogDeliveryConfigurationImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrUserPoolID, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: testAccLogDeliveryConfigurationImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserPoolID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserPoolID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccCognitoIDPLogDeliveryConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_cognito_log_delivery_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrUserPoolID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrUserPoolID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccLogDeliveryConfigurationImportStateIdFunc), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrUserPoolID, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccLogDeliveryConfigurationImportStateIdFunc), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserPoolID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LogDeliveryConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserPoolID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} diff --git a/internal/service/cognitoidp/log_delivery_configuration_test.go b/internal/service/cognitoidp/log_delivery_configuration_test.go new file mode 100644 index 000000000000..c71a99898e88 --- /dev/null +++ b/internal/service/cognitoidp/log_delivery_configuration_test.go @@ -0,0 +1,401 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidp_test + +import ( + "context" + "errors" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfcognitoidp "github.com/hashicorp/terraform-provider-aws/internal/service/cognitoidp" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCognitoIDPLogDeliveryConfiguration_basic(t *testing.T) { + ctx := acctest.Context(t) + var logDeliveryConfiguration awstypes.LogDeliveryConfigurationType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_log_delivery_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLogDeliveryConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLogDeliveryConfigurationConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + resource.TestCheckResourceAttrPair("aws_cognito_user_pool.test", names.AttrID, resourceName, names.AttrUserPoolID), + resource.TestCheckResourceAttr(resourceName, "log_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.event_source", "userNotification"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.log_level", "ERROR"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccLogDeliveryConfigurationImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrUserPoolID, + }, + }, + }) +} + +func TestAccCognitoIDPLogDeliveryConfiguration_update(t *testing.T) { + ctx := acctest.Context(t) + var logDeliveryConfiguration awstypes.LogDeliveryConfigurationType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_log_delivery_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLogDeliveryConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLogDeliveryConfigurationConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + resource.TestCheckResourceAttr(resourceName, "log_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.event_source", "userNotification"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.log_level", "ERROR"), + ), + }, + { + Config: testAccLogDeliveryConfigurationConfig_firehose(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + resource.TestCheckResourceAttr(resourceName, "log_configurations.#", "2"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.event_source", "userNotification"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.log_level", "INFO"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.1.event_source", "userAuthEvents"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.1.log_level", "ERROR"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccLogDeliveryConfigurationImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrUserPoolID, + }, + }, + }) +} + +func TestAccCognitoIDPLogDeliveryConfiguration_logLevelUpdate(t *testing.T) { + ctx := acctest.Context(t) + var logDeliveryConfiguration awstypes.LogDeliveryConfigurationType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_log_delivery_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLogDeliveryConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLogDeliveryConfigurationConfig_logLevel(rName, "ERROR"), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.log_level", "ERROR"), + ), + }, + { + Config: testAccLogDeliveryConfigurationConfig_logLevel(rName, "INFO"), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.log_level", "INFO"), + ), + }, + }, + }) +} + +func TestAccCognitoIDPLogDeliveryConfiguration_disappears(t *testing.T) { + ctx := acctest.Context(t) + var logDeliveryConfiguration awstypes.LogDeliveryConfigurationType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_log_delivery_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLogDeliveryConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLogDeliveryConfigurationConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfcognitoidp.ResourceLogDeliveryConfiguration, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccCognitoIDPLogDeliveryConfiguration_firehose(t *testing.T) { + ctx := acctest.Context(t) + var logDeliveryConfiguration awstypes.LogDeliveryConfigurationType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_log_delivery_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLogDeliveryConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLogDeliveryConfigurationConfig_firehose(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogDeliveryConfigurationExists(ctx, resourceName, &logDeliveryConfiguration), + resource.TestCheckResourceAttr(resourceName, "log_configurations.#", "2"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.event_source", "userNotification"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.0.log_level", "INFO"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.1.event_source", "userAuthEvents"), + resource.TestCheckResourceAttr(resourceName, "log_configurations.1.log_level", "ERROR"), + resource.TestCheckResourceAttrPair(resourceName, "log_configurations.1.firehose_configuration.0.stream_arn", "aws_kinesis_firehose_delivery_stream.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccLogDeliveryConfigurationImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrUserPoolID, + }, + }, + }) +} + +func testAccCheckLogDeliveryConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cognito_log_delivery_configuration" { + continue + } + + out, err := tfcognitoidp.FindLogDeliveryConfigurationByUserPoolID(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID]) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) || (out != nil && out.LogConfigurations == nil) { + continue + } + + if err != nil { + return err + } + + return create.Error(names.CognitoIDP, create.ErrActionCheckingDestroyed, "Log Delivery Configuration", rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckLogDeliveryConfigurationExists(ctx context.Context, name string, logDeliveryConfiguration *awstypes.LogDeliveryConfigurationType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.CognitoIDP, create.ErrActionCheckingExistence, "Log Delivery Configuration", name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.CognitoIDP, create.ErrActionCheckingExistence, "Log Delivery Configuration", name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) + + resp, err := tfcognitoidp.FindLogDeliveryConfigurationByUserPoolID(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID]) + + if err != nil { + return create.Error(names.CognitoIDP, create.ErrActionCheckingExistence, "Log Delivery Configuration", rs.Primary.ID, err) + } + + *logDeliveryConfiguration = *resp + + return nil + } +} + +func testAccLogDeliveryConfigurationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return rs.Primary.Attributes[names.AttrUserPoolID], nil + } +} + +func testAccLogDeliveryConfigurationConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +resource "aws_cloudwatch_log_group" "test" { + name = %[1]q +} + +resource "aws_cognito_log_delivery_configuration" "test" { + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } +} +`, rName) +} + +func testAccLogDeliveryConfigurationConfig_firehose(rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +resource "aws_cloudwatch_log_group" "test" { + name = %[1]q +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_iam_role" "firehose" { + name = "%[1]s-firehose" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "firehose.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_role_policy" "firehose" { + name = "%[1]s-firehose" + role = aws_iam_role.firehose.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "s3:AbortMultipartUpload", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:PutObject" + ] + Resource = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } + ] + }) +} + +resource "aws_kinesis_firehose_delivery_stream" "test" { + name = %[1]q + destination = "extended_s3" + + extended_s3_configuration { + role_arn = aws_iam_role.firehose.arn + bucket_arn = aws_s3_bucket.test.arn + } +} + +resource "aws_cognito_log_delivery_configuration" "test" { + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = "INFO" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } + + log_configurations { + event_source = "userAuthEvents" + log_level = "ERROR" + + firehose_configuration { + stream_arn = aws_kinesis_firehose_delivery_stream.test.arn + } + } +} +`, rName) +} + +func testAccLogDeliveryConfigurationConfig_logLevel(rName, logLevel string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +resource "aws_cloudwatch_log_group" "test" { + name = %[1]q +} + +resource "aws_cognito_log_delivery_configuration" "test" { + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = %[2]q + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } +} +`, rName, logLevel) +} diff --git a/internal/service/cognitoidp/managed_login_branding.go b/internal/service/cognitoidp/managed_login_branding.go new file mode 100644 index 000000000000..62a57dba2f06 --- /dev/null +++ b/internal/service/cognitoidp/managed_login_branding.go @@ -0,0 +1,551 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidp + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/document" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" + "github.com/hashicorp/terraform-plugin-framework-validators/boolvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_cognito_managed_login_branding", name="Managed Login Branding") +func newManagedLoginBrandingResource(context.Context) (resource.ResourceWithConfigure, error) { + r := &managedLoginBrandingResource{} + + return r, nil +} + +type managedLoginBrandingResource struct { + framework.ResourceWithModel[managedLoginBrandingResourceModel] +} + +func (r *managedLoginBrandingResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrClientID: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "managed_login_branding_id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "settings": schema.StringAttribute{ + CustomType: fwtypes.NewSmithyJSONType(ctx, document.NewLazyDocument), + Optional: true, + }, + "settings_all": schema.StringAttribute{ + CustomType: fwtypes.NewSmithyJSONType(ctx, document.NewLazyDocument), + Computed: true, + }, + "use_cognito_provided_values": schema.BoolAttribute{ + Optional: true, + Computed: true, + Validators: []validator.Bool{ + boolvalidator.ExactlyOneOf( + path.MatchRoot("settings"), + path.MatchRoot("use_cognito_provided_values"), + ), + }, + }, + names.AttrUserPoolID: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "asset": schema.SetNestedBlock{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[assetTypeModel](ctx), + Validators: []validator.Set{ + setvalidator.SizeBetween(0, 40), + }, + PlanModifiers: []planmodifier.Set{ + // The update API allows updating an asset. + // However, if the (`category`, `color`) pair differs from existing ones, + // the API treats the asset as new and adds it accordingly. + // This can result in a mismatch between the Terraform plan and the actual state + // (e.g., the plan contains one asset, but the state contains two or more). + // To preserve declarative behavior, the resource is replaced whenever the `asset` is modified. + setplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "bytes": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "category": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.AssetCategoryType](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "color_mode": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.ColorSchemeModeType](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "extension": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.AssetExtensionType](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrResourceID: schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + } +} + +func (r *managedLoginBrandingResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data managedLoginBrandingResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().CognitoIDPClient(ctx) + + var input cognitoidentityprovider.CreateManagedLoginBrandingInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + settings, diags := data.Settings.ToSmithyDocument(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + input.Settings = settings + + output, err := conn.CreateManagedLoginBranding(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Cognito Managed Login Branding (%s)", data.ClientID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + mlb := output.ManagedLoginBranding + data.ManagedLoginBrandingID = fwflex.StringToFramework(ctx, mlb.ManagedLoginBrandingId) + data.UseCognitoProvidedValues = fwflex.BoolValueToFramework(ctx, mlb.UseCognitoProvidedValues) + + userPoolID, managedLoginBrandingID := fwflex.StringValueFromFramework(ctx, data.UserPoolID), fwflex.StringValueFromFramework(ctx, data.ManagedLoginBrandingID) + // Return all values. + mlb, err = findManagedLoginBrandingByThreePartKey(ctx, conn, userPoolID, managedLoginBrandingID, true) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed Login Branding (%s)", managedLoginBrandingID), err.Error()) + + return + } + + settingsAll, diags := flattenManagedLoginBrandingSettings(ctx, mlb.Settings) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.SettingsAll = settingsAll + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *managedLoginBrandingResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data managedLoginBrandingResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().CognitoIDPClient(ctx) + + userPoolID, managedLoginBrandingID := fwflex.StringValueFromFramework(ctx, data.UserPoolID), fwflex.StringValueFromFramework(ctx, data.ManagedLoginBrandingID) + // Return only customized values. + mlb, err := findManagedLoginBrandingByThreePartKey(ctx, conn, userPoolID, managedLoginBrandingID, false) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed Login Branding (%s)", managedLoginBrandingID), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, mlb, &data)...) + if response.Diagnostics.HasError() { + return + } + + settings, diags := flattenManagedLoginBrandingSettings(ctx, mlb.Settings) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.Settings = settings + + // Return all values. + mlb, err = findManagedLoginBrandingByThreePartKey(ctx, conn, userPoolID, managedLoginBrandingID, true) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed Login Branding (%s)", managedLoginBrandingID), err.Error()) + + return + } + + settingsAll, diags := flattenManagedLoginBrandingSettings(ctx, mlb.Settings) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.SettingsAll = settingsAll + + input := cognitoidentityprovider.ListUserPoolClientsInput{ + UserPoolId: aws.String(userPoolID), + } + pages := cognitoidentityprovider.NewListUserPoolClientsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("listing Cognito User Pool (%s) Clients", userPoolID), err.Error()) + + return + } + + for _, v := range page.UserPoolClients { + clientID := aws.ToString(v.ClientId) + input := cognitoidentityprovider.DescribeManagedLoginBrandingByClientInput{ + ClientId: aws.String(clientID), + UserPoolId: aws.String(userPoolID), + } + mlb, err := findManagedLoginBrandingByClient(ctx, conn, &input) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed Login Branding by client (%s)", clientID), err.Error()) + + return + } + + if aws.ToString(mlb.ManagedLoginBrandingId) == managedLoginBrandingID { + data.ClientID = fwflex.StringValueToFramework(ctx, clientID) + } + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *managedLoginBrandingResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new managedLoginBrandingResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().CognitoIDPClient(ctx) + + userPoolID, managedLoginBrandingID := fwflex.StringValueFromFramework(ctx, new.UserPoolID), fwflex.StringValueFromFramework(ctx, new.ManagedLoginBrandingID) + var input cognitoidentityprovider.UpdateManagedLoginBrandingInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + if oldSettings, newSettings := fwflex.StringValueFromFramework(ctx, old.Settings), fwflex.StringValueFromFramework(ctx, new.Settings); newSettings != oldSettings && newSettings != "" { + var err error + input.Settings, err = tfsmithy.DocumentFromJSONString(newSettings, document.NewLazyDocument) + + if err != nil { + response.Diagnostics.AddError("creating Smithy document", err.Error()) + + return + } + + input.UseCognitoProvidedValues = false + } + + _, err := conn.UpdateManagedLoginBranding(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Cognito Managed Login Branding (%s)", managedLoginBrandingID), err.Error()) + + return + } + + // Return all values. + mlb, err := findManagedLoginBrandingByThreePartKey(ctx, conn, userPoolID, managedLoginBrandingID, true) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed Login Branding (%s)", managedLoginBrandingID), err.Error()) + + return + } + + settingsAll, diags := flattenManagedLoginBrandingSettings(ctx, mlb.Settings) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + new.SettingsAll = settingsAll + new.UseCognitoProvidedValues = fwflex.BoolValueToFramework(ctx, mlb.UseCognitoProvidedValues) + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *managedLoginBrandingResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data managedLoginBrandingResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().CognitoIDPClient(ctx) + + userPoolID, managedLoginBrandingID := fwflex.StringValueFromFramework(ctx, data.UserPoolID), fwflex.StringValueFromFramework(ctx, data.ManagedLoginBrandingID) + tflog.Debug(ctx, "deleting Cognito Managed Login Branding", map[string]any{ + "managed_login_branding_id": managedLoginBrandingID, + names.AttrUserPoolID: userPoolID, + }) + input := cognitoidentityprovider.DeleteManagedLoginBrandingInput{ + ManagedLoginBrandingId: aws.String(managedLoginBrandingID), + UserPoolId: aws.String(userPoolID), + } + _, err := conn.DeleteManagedLoginBranding(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Cognito Managed Login Branding (%s)", managedLoginBrandingID), err.Error()) + + return + } +} + +func (r *managedLoginBrandingResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + managedLoginBrandingIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, managedLoginBrandingIDParts, true) + + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrUserPoolID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("managed_login_branding_id"), parts[1])...) +} + +func findManagedLoginBrandingByThreePartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, managedLoginBrandingID string, returnMergedResources bool) (*awstypes.ManagedLoginBrandingType, error) { + input := cognitoidentityprovider.DescribeManagedLoginBrandingInput{ + ManagedLoginBrandingId: aws.String(managedLoginBrandingID), + ReturnMergedResources: returnMergedResources, + UserPoolId: aws.String(userPoolID), + } + + return findManagedLoginBranding(ctx, conn, &input) +} + +func findManagedLoginBranding(ctx context.Context, conn *cognitoidentityprovider.Client, input *cognitoidentityprovider.DescribeManagedLoginBrandingInput) (*awstypes.ManagedLoginBrandingType, error) { + output, err := conn.DescribeManagedLoginBranding(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ManagedLoginBranding == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ManagedLoginBranding, nil +} + +func findManagedLoginBrandingByClient(ctx context.Context, conn *cognitoidentityprovider.Client, input *cognitoidentityprovider.DescribeManagedLoginBrandingByClientInput) (*awstypes.ManagedLoginBrandingType, error) { + output, err := conn.DescribeManagedLoginBrandingByClient(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ManagedLoginBranding == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ManagedLoginBranding, nil +} + +type managedLoginBrandingResourceModel struct { + framework.WithRegionModel + Asset fwtypes.SetNestedObjectValueOf[assetTypeModel] `tfsdk:"asset"` + ClientID types.String `tfsdk:"client_id"` + ManagedLoginBrandingID types.String `tfsdk:"managed_login_branding_id"` + Settings fwtypes.SmithyJSON[document.Interface] `tfsdk:"settings" autoflex:"-"` + SettingsAll fwtypes.SmithyJSON[document.Interface] `tfsdk:"settings_all" autoflex:"-"` + UseCognitoProvidedValues types.Bool `tfsdk:"use_cognito_provided_values"` + UserPoolID types.String `tfsdk:"user_pool_id"` +} + +func flattenManagedLoginBrandingSettings(ctx context.Context, settings document.Interface) (fwtypes.SmithyJSON[document.Interface], diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions + var diags diag.Diagnostics + + if settings == nil { + return fwtypes.NewSmithyJSONNull[document.Interface](), diags + } + + value, err := tfsmithy.DocumentToJSONString(settings) + + if err != nil { + diags.AddError("reading Smithy document", err.Error()) + + return fwtypes.NewSmithyJSONNull[document.Interface](), diags + } + + settings, d := fwtypes.NewSmithyJSONValue(value, document.NewLazyDocument).ToSmithyDocument(ctx) + diags.Append(d...) + if diags.HasError() { + return fwtypes.NewSmithyJSONNull[document.Interface](), diags + } + + value, err = tfsmithy.DocumentToJSONString(settings) + + if err != nil { + diags.AddError("reading Smithy document", err.Error()) + + return fwtypes.NewSmithyJSONNull[document.Interface](), diags + } + + return fwtypes.NewSmithyJSONValue(value, document.NewLazyDocument), diags +} + +type assetTypeModel struct { + Bytes types.String `tfsdk:"bytes"` + Category fwtypes.StringEnum[awstypes.AssetCategoryType] `tfsdk:"category"` + ColorMode fwtypes.StringEnum[awstypes.ColorSchemeModeType] `tfsdk:"color_mode"` + Extension fwtypes.StringEnum[awstypes.AssetExtensionType] `tfsdk:"extension"` + ResourceID types.String `tfsdk:"resource_id"` +} + +var ( + _ fwflex.Expander = assetTypeModel{} + _ fwflex.Flattener = &assetTypeModel{} +) + +func (m assetTypeModel) Expand(ctx context.Context) (any, diag.Diagnostics) { + var diags diag.Diagnostics + r := awstypes.AssetType{ + Category: m.Category.ValueEnum(), + ColorMode: m.ColorMode.ValueEnum(), + Extension: m.Extension.ValueEnum(), + ResourceId: fwflex.StringFromFramework(ctx, m.ResourceID), + } + + if v, err := inttypes.Base64Decode(m.Bytes.ValueString()); err == nil { + r.Bytes = v + } else { + diags.AddError( + "decoding asset bytes", + err.Error(), + ) + + return nil, diags + } + + return &r, diags +} + +func (m *assetTypeModel) Flatten(ctx context.Context, v any) diag.Diagnostics { + var diags diag.Diagnostics + + switch v := v.(type) { + case awstypes.AssetType: + m.Bytes = fwflex.StringValueToFramework(ctx, inttypes.Base64Encode(v.Bytes)) + m.Category = fwtypes.StringEnumValue(v.Category) + m.ColorMode = fwtypes.StringEnumValue(v.ColorMode) + m.Extension = fwtypes.StringEnumValue(v.Extension) + m.ResourceID = fwflex.StringToFramework(ctx, v.ResourceId) + default: + } + + return diags +} diff --git a/internal/service/cognitoidp/managed_login_branding_test.go b/internal/service/cognitoidp/managed_login_branding_test.go new file mode 100644 index 000000000000..0c1a76bd1b8d --- /dev/null +++ b/internal/service/cognitoidp/managed_login_branding_test.go @@ -0,0 +1,1405 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidp_test + +import ( + "context" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfcognitoidp "github.com/hashicorp/terraform-provider-aws/internal/service/cognitoidp" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCognitoIDPManagedLoginBranding_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("asset"), knownvalue.SetSizeExact(0)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("managed_login_branding_id"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings_all"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(true)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "managed_login_branding_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", names.AttrUserPoolID, "managed_login_branding_id"), + }, + }, + }) +} + +func TestAccCognitoIDPManagedLoginBranding_disappears(t *testing.T) { + ctx := acctest.Context(t) + var client awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &client), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfcognitoidp.ResourceManagedLoginBranding, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccCognitoIDPManagedLoginBranding_asset(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_asset(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("asset"), knownvalue.SetSizeExact(1)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "managed_login_branding_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", names.AttrUserPoolID, "managed_login_branding_id"), + }, + }, + }) +} + +func TestAccCognitoIDPManagedLoginBranding_settings(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_settings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(false)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "managed_login_branding_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", names.AttrUserPoolID, "managed_login_branding_id"), + }, + }, + }) +} + +func TestAccCognitoIDPManagedLoginBranding_updateFromBasic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(true)), + }, + }, + { + Config: testAccManagedLoginBrandingConfig_settings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(false)), + }, + }, + }, + }) +} + +func TestAccCognitoIDPManagedLoginBranding_updateToBasic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_settings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(false)), + }, + }, + { + Config: testAccManagedLoginBrandingConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(true)), + }, + }, + }, + }) +} + +func TestAccCognitoIDPManagedLoginBranding_updateSettings(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_managed_login_branding.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_settings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(false)), + }, + }, + { + Config: testAccManagedLoginBrandingConfig_settingsUpdated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("settings"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(false)), + }, + }, + }, + }) +} + +// https://github.com/hashicorp/terraform-provider-aws/issues/44188. +func TestAccCognitoIDPManagedLoginBranding_multiple(t *testing.T) { + ctx := acctest.Context(t) + var v1, v2 awstypes.ManagedLoginBrandingType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource1Name := "aws_cognito_managed_login_branding.test1" + resource2Name := "aws_cognito_managed_login_branding.test2" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedLoginBrandingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccManagedLoginBrandingConfig_multiple(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckManagedLoginBrandingExists(ctx, resource1Name, &v1), + testAccCheckManagedLoginBrandingExists(ctx, resource2Name, &v2), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resource1Name, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resource1Name, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(true)), + statecheck.ExpectKnownValue(resource2Name, tfjsonpath.New("use_cognito_provided_values"), knownvalue.Bool(true)), + }, + }, + { + ResourceName: resource1Name, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "managed_login_branding_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resource1Name, ",", names.AttrUserPoolID, "managed_login_branding_id"), + }, + { + ResourceName: resource2Name, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "managed_login_branding_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resource2Name, ",", names.AttrUserPoolID, "managed_login_branding_id"), + }, + }, + }) +} + +func testAccCheckManagedLoginBrandingDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cognito_managed_login_branding" { + continue + } + + _, err := tfcognitoidp.FindManagedLoginBrandingByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes["managed_login_branding_id"], false) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Cognito Managed Login Branding %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckManagedLoginBrandingExists(ctx context.Context, n string, v *awstypes.ManagedLoginBrandingType) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) + + output, err := tfcognitoidp.FindManagedLoginBrandingByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes["managed_login_branding_id"], false) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccManagedLoginBrandingConfig_base(rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +resource "aws_cognito_user_pool_client" "test" { + name = %[1]q + user_pool_id = aws_cognito_user_pool.test.id + explicit_auth_flows = ["ADMIN_NO_SRP_AUTH"] +} +`, rName) +} + +func testAccManagedLoginBrandingConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccManagedLoginBrandingConfig_base(rName), ` +resource "aws_cognito_managed_login_branding" "test" { + client_id = aws_cognito_user_pool_client.test.id + user_pool_id = aws_cognito_user_pool.test.id + + use_cognito_provided_values = true +} +`) +} + +func testAccManagedLoginBrandingConfig_asset(rName string) string { + return acctest.ConfigCompose(testAccManagedLoginBrandingConfig_base(rName), ` +resource "aws_cognito_managed_login_branding" "test" { + client_id = aws_cognito_user_pool_client.test.id + user_pool_id = aws_cognito_user_pool.test.id + + use_cognito_provided_values = true + + asset { + bytes = filebase64("test-fixtures/login_branding_asset.svg") + category = "PAGE_FOOTER_BACKGROUND" + color_mode = "DARK" + extension = "SVG" + } +} +`) +} + +func testAccManagedLoginBrandingConfig_settings(rName string) string { + return acctest.ConfigCompose(testAccManagedLoginBrandingConfig_base(rName), ` +resource "aws_cognito_managed_login_branding" "test" { + client_id = aws_cognito_user_pool_client.test.id + user_pool_id = aws_cognito_user_pool.test.id + + settings = jsonencode({ + "categories" : { + "auth" : { + "authMethodOrder" : [ + [ + { + "display" : "BUTTON", + "type" : "FEDERATED" + }, + { + "display" : "INPUT", + "type" : "USERNAME_PASSWORD" + } + ] + ], + "federation" : { + "interfaceStyle" : "BUTTON_LIST", + "order" : [ + ] + } + }, + "form" : { + "displayGraphics" : true, + "instructions" : { + "enabled" : false + }, + "languageSelector" : { + "enabled" : false + }, + "location" : { + "horizontal" : "CENTER", + "vertical" : "CENTER" + }, + "sessionTimerDisplay" : "NONE" + }, + "global" : { + "colorSchemeMode" : "LIGHT", + "pageFooter" : { + "enabled" : false + }, + "pageHeader" : { + "enabled" : false + }, + "spacingDensity" : "REGULAR" + }, + "signUp" : { + "acceptanceElements" : [ + { + "enforcement" : "NONE", + "textKey" : "en" + } + ] + } + }, + "componentClasses" : { + "buttons" : { + "borderRadius" : 8.0 + }, + "divider" : { + "darkMode" : { + "borderColor" : "232b37ff" + }, + "lightMode" : { + "borderColor" : "ebebf0ff" + } + }, + "dropDown" : { + "borderRadius" : 8.0, + "darkMode" : { + "defaults" : { + "itemBackgroundColor" : "192534ff" + }, + "hover" : { + "itemBackgroundColor" : "081120ff", + "itemBorderColor" : "5f6b7aff", + "itemTextColor" : "e9ebedff" + }, + "match" : { + "itemBackgroundColor" : "d1d5dbff", + "itemTextColor" : "89bdeeff" + } + }, + "lightMode" : { + "defaults" : { + "itemBackgroundColor" : "ffffffff" + }, + "hover" : { + "itemBackgroundColor" : "f4f4f4ff", + "itemBorderColor" : "7d8998ff", + "itemTextColor" : "000716ff" + }, + "match" : { + "itemBackgroundColor" : "414d5cff", + "itemTextColor" : "0972d3ff" + } + } + }, + "focusState" : { + "darkMode" : { + "borderColor" : "539fe5ff" + }, + "lightMode" : { + "borderColor" : "0972d3ff" + } + }, + "idpButtons" : { + "icons" : { + "enabled" : true + } + }, + "input" : { + "borderRadius" : 8.0, + "darkMode" : { + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "5f6b7aff" + }, + "placeholderColor" : "8d99a8ff" + }, + "lightMode" : { + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "7d8998ff" + }, + "placeholderColor" : "5f6b7aff" + } + }, + "inputDescription" : { + "darkMode" : { + "textColor" : "8d99a8ff" + }, + "lightMode" : { + "textColor" : "5f6b7aff" + } + }, + "inputLabel" : { + "darkMode" : { + "textColor" : "d1d5dbff" + }, + "lightMode" : { + "textColor" : "000716ff" + } + }, + "link" : { + "darkMode" : { + "defaults" : { + "textColor" : "539fe5ff" + }, + "hover" : { + "textColor" : "89bdeeff" + } + }, + "lightMode" : { + "defaults" : { + "textColor" : "0972d3ff" + }, + "hover" : { + "textColor" : "033160ff" + } + } + }, + "optionControls" : { + "darkMode" : { + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "7d8998ff" + }, + "selected" : { + "backgroundColor" : "539fe5ff", + "foregroundColor" : "000716ff" + } + }, + "lightMode" : { + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "7d8998ff" + }, + "selected" : { + "backgroundColor" : "0972d3ff", + "foregroundColor" : "ffffffff" + } + } + }, + "statusIndicator" : { + "darkMode" : { + "error" : { + "backgroundColor" : "1a0000ff", + "borderColor" : "eb6f6fff", + "indicatorColor" : "eb6f6fff" + }, + "pending" : { + "indicatorColor" : "AAAAAAAA" + }, + "success" : { + "backgroundColor" : "001a02ff", + "borderColor" : "29ad32ff", + "indicatorColor" : "29ad32ff" + }, + "warning" : { + "backgroundColor" : "1d1906ff", + "borderColor" : "e0ca57ff", + "indicatorColor" : "e0ca57ff" + } + }, + "lightMode" : { + "error" : { + "backgroundColor" : "fff7f7ff", + "borderColor" : "d91515ff", + "indicatorColor" : "d91515ff" + }, + "pending" : { + "indicatorColor" : "AAAAAAAA" + }, + "success" : { + "backgroundColor" : "f2fcf3ff", + "borderColor" : "037f0cff", + "indicatorColor" : "037f0cff" + }, + "warning" : { + "backgroundColor" : "fffce9ff", + "borderColor" : "8d6605ff", + "indicatorColor" : "8d6605ff" + } + } + } + }, + "components" : { + "alert" : { + "borderRadius" : 12.0, + "darkMode" : { + "error" : { + "backgroundColor" : "1a0000ff", + "borderColor" : "eb6f6fff" + } + }, + "lightMode" : { + "error" : { + "backgroundColor" : "fff7f7ff", + "borderColor" : "d91515ff" + } + } + }, + "favicon" : { + "enabledTypes" : [ + "ICO", + "SVG" + ] + }, + "form" : { + "backgroundImage" : { + "enabled" : false + }, + "borderRadius" : 8.0, + "darkMode" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "424650ff" + }, + "lightMode" : { + "backgroundColor" : "ffffffff", + "borderColor" : "c6c6cdff" + }, + "logo" : { + "enabled" : false, + "formInclusion" : "IN", + "location" : "CENTER", + "position" : "TOP" + } + }, + "idpButton" : { + "custom" : { + }, + "standard" : { + "darkMode" : { + "active" : { + "backgroundColor" : "354150ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + }, + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "c6c6cdff", + "textColor" : "c6c6cdff" + }, + "hover" : { + "backgroundColor" : "192534ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + } + }, + "lightMode" : { + "active" : { + "backgroundColor" : "d3e7f9ff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + }, + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "424650ff", + "textColor" : "424650ff" + }, + "hover" : { + "backgroundColor" : "f2f8fdff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + } + } + } + }, + "pageBackground" : { + "darkMode" : { + "color" : "0f1b2aff" + }, + "image" : { + "enabled" : true + }, + "lightMode" : { + "color" : "ffffffff" + } + }, + "pageFooter" : { + "backgroundImage" : { + "enabled" : false + }, + "darkMode" : { + "background" : { + "color" : "0f141aff" + }, + "borderColor" : "424650ff" + }, + "lightMode" : { + "background" : { + "color" : "fafafaff" + }, + "borderColor" : "d5dbdbff" + }, + "logo" : { + "enabled" : false, + "location" : "START" + } + }, + "pageHeader" : { + "backgroundImage" : { + "enabled" : false + }, + "darkMode" : { + "background" : { + "color" : "0f141aff" + }, + "borderColor" : "424650ff" + }, + "lightMode" : { + "background" : { + "color" : "fafafaff" + }, + "borderColor" : "d5dbdbff" + }, + "logo" : { + "enabled" : false, + "location" : "START" + } + }, + "pageText" : { + "darkMode" : { + "bodyColor" : "b6bec9ff", + "descriptionColor" : "b6bec9ff", + "headingColor" : "d1d5dbff" + }, + "lightMode" : { + "bodyColor" : "414d5cff", + "descriptionColor" : "414d5cff", + "headingColor" : "000716ff" + } + }, + "phoneNumberSelector" : { + "displayType" : "TEXT" + }, + "primaryButton" : { + "darkMode" : { + "active" : { + "backgroundColor" : "539fe5ff", + "textColor" : "000716ff" + }, + "defaults" : { + "backgroundColor" : "539fe5ff", + "textColor" : "000716ff" + }, + "disabled" : { + "backgroundColor" : "ffffffff", + "borderColor" : "ffffffff" + }, + "hover" : { + "backgroundColor" : "89bdeeff", + "textColor" : "000716ff" + } + }, + "lightMode" : { + "active" : { + "backgroundColor" : "033160ff", + "textColor" : "ffffffff" + }, + "defaults" : { + "backgroundColor" : "0972d3ff", + "textColor" : "ffffffff" + }, + "disabled" : { + "backgroundColor" : "ffffffff", + "borderColor" : "ffffffff" + }, + "hover" : { + "backgroundColor" : "033160ff", + "textColor" : "ffffffff" + } + } + }, + "secondaryButton" : { + "darkMode" : { + "active" : { + "backgroundColor" : "354150ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + }, + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "539fe5ff", + "textColor" : "539fe5ff" + }, + "hover" : { + "backgroundColor" : "192534ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + } + }, + "lightMode" : { + "active" : { + "backgroundColor" : "d3e7f9ff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + }, + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "0972d3ff", + "textColor" : "0972d3ff" + }, + "hover" : { + "backgroundColor" : "f2f8fdff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + } + } + } + } + }) +} +`) +} + +func testAccManagedLoginBrandingConfig_settingsUpdated(rName string) string { + return acctest.ConfigCompose(testAccManagedLoginBrandingConfig_base(rName), ` +resource "aws_cognito_managed_login_branding" "test" { + client_id = aws_cognito_user_pool_client.test.id + user_pool_id = aws_cognito_user_pool.test.id + + settings = jsonencode({ + "categories" : { + "auth" : { + "authMethodOrder" : [ + [ + { + "display" : "BUTTON", + "type" : "FEDERATED" + }, + { + "display" : "INPUT", + "type" : "USERNAME_PASSWORD" + } + ] + ], + "federation" : { + "interfaceStyle" : "BUTTON_LIST", + "order" : [ + ] + } + }, + "form" : { + "displayGraphics" : true, + "instructions" : { + "enabled" : false + }, + "languageSelector" : { + "enabled" : false + }, + "location" : { + "horizontal" : "CENTER", + "vertical" : "CENTER" + }, + "sessionTimerDisplay" : "NONE" + }, + "global" : { + "colorSchemeMode" : "DARK", + "pageFooter" : { + "enabled" : false + }, + "pageHeader" : { + "enabled" : false + }, + "spacingDensity" : "REGULAR" + }, + "signUp" : { + "acceptanceElements" : [ + { + "enforcement" : "NONE", + "textKey" : "en" + } + ] + } + }, + "componentClasses" : { + "buttons" : { + "borderRadius" : 8.0 + }, + "divider" : { + "darkMode" : { + "borderColor" : "232b37ff" + }, + "lightMode" : { + "borderColor" : "ebebf0ff" + } + }, + "dropDown" : { + "borderRadius" : 8.0, + "darkMode" : { + "defaults" : { + "itemBackgroundColor" : "192534ff" + }, + "hover" : { + "itemBackgroundColor" : "081120ff", + "itemBorderColor" : "5f6b7aff", + "itemTextColor" : "e9ebedff" + }, + "match" : { + "itemBackgroundColor" : "d1d5dbff", + "itemTextColor" : "89bdeeff" + } + }, + "lightMode" : { + "defaults" : { + "itemBackgroundColor" : "ffffffff" + }, + "hover" : { + "itemBackgroundColor" : "f4f4f4ff", + "itemBorderColor" : "7d8998ff", + "itemTextColor" : "000716ff" + }, + "match" : { + "itemBackgroundColor" : "414d5cff", + "itemTextColor" : "0972d3ff" + } + } + }, + "focusState" : { + "darkMode" : { + "borderColor" : "539fe5ff" + }, + "lightMode" : { + "borderColor" : "0972d3ff" + } + }, + "idpButtons" : { + "icons" : { + "enabled" : true + } + }, + "input" : { + "borderRadius" : 8.0, + "darkMode" : { + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "5f6b7aff" + }, + "placeholderColor" : "8d99a8ff" + }, + "lightMode" : { + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "7d8998ff" + }, + "placeholderColor" : "5f6b7aff" + } + }, + "inputDescription" : { + "darkMode" : { + "textColor" : "8d99a8ff" + }, + "lightMode" : { + "textColor" : "5f6b7aff" + } + }, + "inputLabel" : { + "darkMode" : { + "textColor" : "d1d5dbff" + }, + "lightMode" : { + "textColor" : "000716ff" + } + }, + "link" : { + "darkMode" : { + "defaults" : { + "textColor" : "539fe5ff" + }, + "hover" : { + "textColor" : "89bdeeff" + } + }, + "lightMode" : { + "defaults" : { + "textColor" : "0972d3ff" + }, + "hover" : { + "textColor" : "033160ff" + } + } + }, + "optionControls" : { + "darkMode" : { + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "7d8998ff" + }, + "selected" : { + "backgroundColor" : "539fe5ff", + "foregroundColor" : "000716ff" + } + }, + "lightMode" : { + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "7d8998ff" + }, + "selected" : { + "backgroundColor" : "0972d3ff", + "foregroundColor" : "ffffffff" + } + } + }, + "statusIndicator" : { + "darkMode" : { + "error" : { + "backgroundColor" : "1a0000ff", + "borderColor" : "eb6f6fff", + "indicatorColor" : "eb6f6fff" + }, + "pending" : { + "indicatorColor" : "AAAAAAAA" + }, + "success" : { + "backgroundColor" : "001a02ff", + "borderColor" : "29ad32ff", + "indicatorColor" : "29ad32ff" + }, + "warning" : { + "backgroundColor" : "1d1906ff", + "borderColor" : "e0ca57ff", + "indicatorColor" : "e0ca57ff" + } + }, + "lightMode" : { + "error" : { + "backgroundColor" : "fff7f7ff", + "borderColor" : "d91515ff", + "indicatorColor" : "d91515ff" + }, + "pending" : { + "indicatorColor" : "AAAAAAAA" + }, + "success" : { + "backgroundColor" : "f2fcf3ff", + "borderColor" : "037f0cff", + "indicatorColor" : "037f0cff" + }, + "warning" : { + "backgroundColor" : "fffce9ff", + "borderColor" : "8d6605ff", + "indicatorColor" : "8d6605ff" + } + } + } + }, + "components" : { + "alert" : { + "borderRadius" : 12.0, + "darkMode" : { + "error" : { + "backgroundColor" : "1a0000ff", + "borderColor" : "eb6f6fff" + } + }, + "lightMode" : { + "error" : { + "backgroundColor" : "fff7f7ff", + "borderColor" : "d91515ff" + } + } + }, + "favicon" : { + "enabledTypes" : [ + "ICO", + "SVG" + ] + }, + "form" : { + "backgroundImage" : { + "enabled" : false + }, + "borderRadius" : 8.0, + "darkMode" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "424650ff" + }, + "lightMode" : { + "backgroundColor" : "ffffffff", + "borderColor" : "c6c6cdff" + }, + "logo" : { + "enabled" : false, + "formInclusion" : "IN", + "location" : "CENTER", + "position" : "TOP" + } + }, + "idpButton" : { + "custom" : { + }, + "standard" : { + "darkMode" : { + "active" : { + "backgroundColor" : "354150ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + }, + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "c6c6cdff", + "textColor" : "c6c6cdff" + }, + "hover" : { + "backgroundColor" : "192534ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + } + }, + "lightMode" : { + "active" : { + "backgroundColor" : "d3e7f9ff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + }, + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "424650ff", + "textColor" : "424650ff" + }, + "hover" : { + "backgroundColor" : "f2f8fdff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + } + } + } + }, + "pageBackground" : { + "darkMode" : { + "color" : "0f1b2aff" + }, + "image" : { + "enabled" : true + }, + "lightMode" : { + "color" : "ffffffff" + } + }, + "pageFooter" : { + "backgroundImage" : { + "enabled" : false + }, + "darkMode" : { + "background" : { + "color" : "0f141aff" + }, + "borderColor" : "424650ff" + }, + "lightMode" : { + "background" : { + "color" : "fafafaff" + }, + "borderColor" : "d5dbdbff" + }, + "logo" : { + "enabled" : false, + "location" : "START" + } + }, + "pageHeader" : { + "backgroundImage" : { + "enabled" : false + }, + "darkMode" : { + "background" : { + "color" : "0f141aff" + }, + "borderColor" : "424650ff" + }, + "lightMode" : { + "background" : { + "color" : "fafafaff" + }, + "borderColor" : "d5dbdbff" + }, + "logo" : { + "enabled" : false, + "location" : "START" + } + }, + "pageText" : { + "darkMode" : { + "bodyColor" : "b6bec9ff", + "descriptionColor" : "b6bec9ff", + "headingColor" : "d1d5dbff" + }, + "lightMode" : { + "bodyColor" : "414d5cff", + "descriptionColor" : "414d5cff", + "headingColor" : "000716ff" + } + }, + "phoneNumberSelector" : { + "displayType" : "TEXT" + }, + "primaryButton" : { + "darkMode" : { + "active" : { + "backgroundColor" : "539fe5ff", + "textColor" : "000716ff" + }, + "defaults" : { + "backgroundColor" : "539fe5ff", + "textColor" : "000716ff" + }, + "disabled" : { + "backgroundColor" : "ffffffff", + "borderColor" : "ffffffff" + }, + "hover" : { + "backgroundColor" : "89bdeeff", + "textColor" : "000716ff" + } + }, + "lightMode" : { + "active" : { + "backgroundColor" : "033160ff", + "textColor" : "ffffffff" + }, + "defaults" : { + "backgroundColor" : "0972d3ff", + "textColor" : "ffffffff" + }, + "disabled" : { + "backgroundColor" : "ffffffff", + "borderColor" : "ffffffff" + }, + "hover" : { + "backgroundColor" : "033160ff", + "textColor" : "ffffffff" + } + } + }, + "secondaryButton" : { + "darkMode" : { + "active" : { + "backgroundColor" : "354150ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + }, + "defaults" : { + "backgroundColor" : "0f1b2aff", + "borderColor" : "539fe5ff", + "textColor" : "539fe5ff" + }, + "hover" : { + "backgroundColor" : "192534ff", + "borderColor" : "89bdeeff", + "textColor" : "89bdeeff" + } + }, + "lightMode" : { + "active" : { + "backgroundColor" : "d3e7f9ff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + }, + "defaults" : { + "backgroundColor" : "ffffffff", + "borderColor" : "0972d3ff", + "textColor" : "0972d3ff" + }, + "hover" : { + "backgroundColor" : "f2f8fdff", + "borderColor" : "033160ff", + "textColor" : "033160ff" + } + } + } + } + }) +} +`) +} + +func testAccManagedLoginBrandingConfig_multiple(rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +resource "aws_cognito_user_pool_client" "test0" { + name = "%[1]s-0" + user_pool_id = aws_cognito_user_pool.test.id + explicit_auth_flows = ["ADMIN_NO_SRP_AUTH"] +} + +resource "aws_cognito_user_pool_client" "test1" { + name = "%[1]s-1" + user_pool_id = aws_cognito_user_pool_client.test0.user_pool_id + explicit_auth_flows = ["ADMIN_NO_SRP_AUTH"] +} + +resource "aws_cognito_user_pool_client" "test2" { + name = "%[1]s-2" + user_pool_id = aws_cognito_user_pool_client.test1.user_pool_id + explicit_auth_flows = ["ADMIN_NO_SRP_AUTH"] +} + +resource "aws_cognito_managed_login_branding" "test1" { + # Cross over user pool client IDs to test read logic. + client_id = aws_cognito_user_pool_client.test2.id + user_pool_id = aws_cognito_user_pool.test.id + + use_cognito_provided_values = true +} + +resource "aws_cognito_managed_login_branding" "test2" { + client_id = aws_cognito_user_pool_client.test1.id + user_pool_id = aws_cognito_managed_login_branding.test1.user_pool_id + + use_cognito_provided_values = true +} +`, rName) +} diff --git a/internal/service/cognitoidp/managed_user_pool_client.go b/internal/service/cognitoidp/managed_user_pool_client.go index 129d9a0b3e8a..62e082bcb496 100644 --- a/internal/service/cognitoidp/managed_user_pool_client.go +++ b/internal/service/cognitoidp/managed_user_pool_client.go @@ -493,7 +493,7 @@ func (r *managedUserPoolClientResource) Create(ctx context.Context, request reso const ( timeout = 2 * time.Minute ) - output, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (any, error) { + output, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModificationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateUserPoolClient(ctx, &input) }) if err != nil { @@ -605,7 +605,7 @@ func (r *managedUserPoolClientResource) Update(ctx context.Context, request reso const ( timeout = 2 * time.Minute ) - output, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (any, error) { + output, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModificationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateUserPoolClient(ctx, &input) }) diff --git a/internal/service/cognitoidp/resource_server.go b/internal/service/cognitoidp/resource_server.go index ae3ed39ffe81..1aaf45d36291 100644 --- a/internal/service/cognitoidp/resource_server.go +++ b/internal/service/cognitoidp/resource_server.go @@ -45,7 +45,6 @@ func resourceResourceServer() *schema.Resource { names.AttrName: { Type: schema.TypeString, Required: true, - ForceNew: true, }, names.AttrScope: { Type: schema.TypeSet, diff --git a/internal/service/cognitoidp/resource_server_test.go b/internal/service/cognitoidp/resource_server_test.go index f11e3a691810..a5af2268be18 100644 --- a/internal/service/cognitoidp/resource_server_test.go +++ b/internal/service/cognitoidp/resource_server_test.go @@ -11,7 +11,11 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcognitoidp "github.com/hashicorp/terraform-provider-aws/internal/service/cognitoidp" @@ -123,6 +127,51 @@ func TestAccCognitoIDPResourceServer_scope(t *testing.T) { }) } +func TestAccCognitoIDPResourceServer_nameChange(t *testing.T) { + ctx := acctest.Context(t) + var resourceServer awstypes.ResourceServerType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + identifier := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_resource_server.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckResourceServerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccResourceServerConfig_basic(identifier, rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rName)), + }, + }, + { + Config: testAccResourceServerConfig_nameUpdate(identifier, rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rName+" updated")), + }, + }, + }, + }) +} + func testAccCheckResourceServerExists(ctx context.Context, n string, v *awstypes.ResourceServerType) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -184,6 +233,20 @@ resource "aws_cognito_user_pool" "test" { `, identifier, rName) } +func testAccResourceServerConfig_nameUpdate(identifier, rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_resource_server" "test" { + identifier = %[1]q + name = "%[2]s updated" + user_pool_id = aws_cognito_user_pool.test.id +} + +resource "aws_cognito_user_pool" "test" { + name = %[2]q +} +`, identifier, rName) +} + func testAccResourceServerConfig_scope(identifier, rName string) string { return fmt.Sprintf(` resource "aws_cognito_resource_server" "test" { diff --git a/internal/service/cognitoidp/risk_configuration.go b/internal/service/cognitoidp/risk_configuration.go index db27296c2ec2..e546ef12e0f5 100644 --- a/internal/service/cognitoidp/risk_configuration.go +++ b/internal/service/cognitoidp/risk_configuration.go @@ -115,7 +115,7 @@ func resourceRiskConfiguration() *schema.Resource { }, "notify_configuration": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -580,7 +580,7 @@ func flattenAccountTakeoverRiskConfigurationType(apiObject *awstypes.AccountTake } if v := apiObject.NotifyConfiguration; v != nil { - tfMap["notify_configuration"] = flattemNotifyConfigurationType(v) + tfMap["notify_configuration"] = flattenNotifyConfigurationType(v) } return []any{tfMap} @@ -698,7 +698,7 @@ func expandNotifyConfigurationType(tfList []any) *awstypes.NotifyConfigurationTy return apiObject } -func flattemNotifyConfigurationType(apiObject *awstypes.NotifyConfigurationType) []any { +func flattenNotifyConfigurationType(apiObject *awstypes.NotifyConfigurationType) []any { if apiObject == nil { return nil } diff --git a/internal/service/cognitoidp/risk_configuration_test.go b/internal/service/cognitoidp/risk_configuration_test.go index 88ab69c7652f..648b322eae6c 100644 --- a/internal/service/cognitoidp/risk_configuration_test.go +++ b/internal/service/cognitoidp/risk_configuration_test.go @@ -136,6 +136,43 @@ func TestAccCognitoIDPRiskConfiguration_compromised(t *testing.T) { }) } +func TestAccCognitoIDPRiskConfiguration_takeover_without_notification(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_risk_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRiskConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRiskConfigurationConfig_takeover_without_notification(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRiskConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, names.AttrUserPoolID, "aws_cognito_user_pool.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.0.medium_action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.0.medium_action.0.event_action", "MFA_REQUIRED"), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.0.medium_action.0.notify", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.0.high_action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.0.high_action.0.event_action", "BLOCK"), + resource.TestCheckResourceAttr(resourceName, "account_takeover_risk_configuration.0.actions.0.high_action.0.notify", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compromised_credentials_risk_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "risk_exception_configuration.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccCognitoIDPRiskConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -404,3 +441,28 @@ resource "aws_cognito_user_pool" "test" { } `, rName) } + +func testAccRiskConfigurationConfig_takeover_without_notification(rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_risk_configuration" "test" { + user_pool_id = aws_cognito_user_pool.test.id + + account_takeover_risk_configuration { + actions { + medium_action { + event_action = "MFA_REQUIRED" + notify = false + } + high_action { + event_action = "BLOCK" + notify = false + } + } + } +} + +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} +`, rName) +} diff --git a/internal/service/cognitoidp/service_endpoint_resolver_gen.go b/internal/service/cognitoidp/service_endpoint_resolver_gen.go index 703a68b75e7d..16da1d321ff5 100644 --- a/internal/service/cognitoidp/service_endpoint_resolver_gen.go +++ b/internal/service/cognitoidp/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params cognitoidentityp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up cognitoidentityprovider endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up cognitoidentityprovider endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/cognitoidp/service_endpoints_gen_test.go b/internal/service/cognitoidp/service_endpoints_gen_test.go index 08f7d88fda4e..843d3d3c83d5 100644 --- a/internal/service/cognitoidp/service_endpoints_gen_test.go +++ b/internal/service/cognitoidp/service_endpoints_gen_test.go @@ -603,7 +603,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/cognitoidp/service_package_gen.go b/internal/service/cognitoidp/service_package_gen.go index 4cf039c90732..af7e5a2a5c98 100644 --- a/internal/service/cognitoidp/service_package_gen.go +++ b/internal/service/cognitoidp/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -43,6 +42,22 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newLogDeliveryConfigurationResource, + TypeName: "aws_cognito_log_delivery_configuration", + Name: "Log Delivery Configuration", + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrUserPoolID), + Import: inttypes.FrameworkImport{ + WrappedImport: true, + }, + }, + { + Factory: newManagedLoginBrandingResource, + TypeName: "aws_cognito_managed_login_branding", + Name: "Managed Login Branding", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newManagedUserPoolClientResource, TypeName: "aws_cognito_managed_user_pool_client", @@ -172,7 +187,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *cognitoidentityprovider.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/cognitoidp/sweep.go b/internal/service/cognitoidp/sweep.go index c9526aeac9c7..8ebe3ebd9136 100644 --- a/internal/service/cognitoidp/sweep.go +++ b/internal/service/cognitoidp/sweep.go @@ -4,56 +4,36 @@ package cognitoidp import ( - "fmt" + "context" "log" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" "github.com/hashicorp/terraform-provider-aws/names" ) func RegisterSweepers() { - resource.AddTestSweepers("aws_cognito_user_pool_domain", &resource.Sweeper{ - Name: "aws_cognito_user_pool_domain", - F: sweepUserPoolDomains, - }) - - resource.AddTestSweepers("aws_cognito_user_pool", &resource.Sweeper{ - Name: "aws_cognito_user_pool", - F: sweepUserPools, - Dependencies: []string{ - "aws_cognito_user_pool_domain", - }, - }) + awsv2.Register("aws_cognito_user_pool_domain", sweepUserPoolDomains) + awsv2.Register("aws_cognito_user_pool", sweepUserPools, "aws_cognito_user_pool_domain") } -func sweepUserPoolDomains(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("Error getting client: %s", err) - } - input := &cognitoidentityprovider.ListUserPoolsInput{ +func sweepUserPoolDomains(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.CognitoIDPClient(ctx) + input := cognitoidentityprovider.ListUserPoolsInput{ MaxResults: aws.Int32(50), } - conn := client.CognitoIDPClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, input) + pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping Cognito User Pool Domain sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing Cognito User Pools (%s): %w", region, err) + return nil, err } for _, v := range page.UserPools { @@ -75,38 +55,22 @@ func sweepUserPoolDomains(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping Cognito User Pool Domains (%s): %w", region, err) - } - - return nil + return sweepResources, nil } -func sweepUserPools(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("Error getting client: %s", err) - } - input := &cognitoidentityprovider.ListUserPoolsInput{ +func sweepUserPools(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.CognitoIDPClient(ctx) + input := cognitoidentityprovider.ListUserPoolsInput{ MaxResults: aws.Int32(50), } - conn := client.CognitoIDPClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, input) + pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping Cognito User Pool sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing Cognito User Pools (%s): %w", region, err) + return nil, err } for _, v := range page.UserPools { @@ -130,11 +94,5 @@ func sweepUserPools(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping Cognito User Pools (%s): %w", region, err) - } - - return nil + return sweepResources, nil } diff --git a/internal/service/cognitoidp/tags_gen.go b/internal/service/cognitoidp/tags_gen.go index 5f673313b101..7be78eed34f7 100644 --- a/internal/service/cognitoidp/tags_gen.go +++ b/internal/service/cognitoidp/tags_gen.go @@ -3,8 +3,8 @@ package cognitoidp import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *cognitoidentityprovider.Client, identif output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).CognitoIDPClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *cognitoidentityprovider.Client, ident _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *cognitoidentityprovider.Client, ident _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/cognitoidp/test-fixtures/login_branding_asset.svg b/internal/service/cognitoidp/test-fixtures/login_branding_asset.svg new file mode 100644 index 000000000000..ce0d19364a59 --- /dev/null +++ b/internal/service/cognitoidp/test-fixtures/login_branding_asset.svg @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/basic/main_gen.tf b/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..821c4f284a91 --- /dev/null +++ b/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/basic/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cognito_log_delivery_configuration" "test" { + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } +} + +resource "aws_cognito_user_pool" "test" { + name = var.rName +} + +resource "aws_cloudwatch_log_group" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/basic_v6.3.0/main_gen.tf b/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..d4cc152fe0ee --- /dev/null +++ b/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/basic_v6.3.0/main_gen.tf @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cognito_log_delivery_configuration" "test" { + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } +} + +resource "aws_cognito_user_pool" "test" { + name = var.rName +} + +resource "aws_cloudwatch_log_group" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/region_override/main_gen.tf b/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..0c45b7028ef7 --- /dev/null +++ b/internal/service/cognitoidp/testdata/LogDeliveryConfiguration/region_override/main_gen.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cognito_log_delivery_configuration" "test" { + region = var.region + + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } +} + +resource "aws_cognito_user_pool" "test" { + region = var.region + + name = var.rName +} + +resource "aws_cloudwatch_log_group" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/cognitoidp/testdata/tmpl/log_delivery_configuration_basic.gtpl b/internal/service/cognitoidp/testdata/tmpl/log_delivery_configuration_basic.gtpl new file mode 100644 index 000000000000..0c4a98da39a8 --- /dev/null +++ b/internal/service/cognitoidp/testdata/tmpl/log_delivery_configuration_basic.gtpl @@ -0,0 +1,23 @@ +resource "aws_cognito_log_delivery_configuration" "test" { + {{- template "region" }} + user_pool_id = aws_cognito_user_pool.test.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.test.arn + } + } +} + +resource "aws_cognito_user_pool" "test" { + {{- template "region" }} + name = var.rName +} + +resource "aws_cloudwatch_log_group" "test" { + {{- template "region" }} + name = var.rName +} diff --git a/internal/service/cognitoidp/user_pool.go b/internal/service/cognitoidp/user_pool.go index 4ca268753a3f..73aa71eca73b 100644 --- a/internal/service/cognitoidp/user_pool.go +++ b/internal/service/cognitoidp/user_pool.go @@ -395,7 +395,6 @@ func resourceUserPool() *schema.Resource { names.AttrName: { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.Any( validation.StringLenBetween(1, 128), validation.StringMatch(regexache.MustCompile(`[\w\s+=,.@-]+`), @@ -860,7 +859,7 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta an input.UserPoolTier = v } - outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateUserPool(ctx, input) }, userPoolErrorRetryable) @@ -880,7 +879,7 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta an input.SoftwareTokenMfaConfiguration = expandSoftwareTokenMFAConfigType(d.Get("software_token_mfa_configuration").([]any)) } - if v := d.Get("email_mfa_configuration").([]any); len(v) > 0 && v[0] != nil { + if v, ok := d.Get("email_mfa_configuration").([]any); ok && len(v) > 0 { input.EmailMfaConfiguration = expandEmailMFAConfigType(v) } @@ -898,7 +897,7 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta an input.WebAuthnConfiguration = expandWebAuthnConfigurationConfigType(webAuthnConfig) } - _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhen(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.SetUserPoolMfaConfig(ctx, input) }, userPoolErrorRetryable) @@ -1046,7 +1045,7 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta an } } - _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhen(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.SetUserPoolMfaConfig(ctx, input) }, userPoolErrorRetryable) @@ -1069,6 +1068,7 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta an "email_verification_message", "email_verification_subject", "lambda_config", + names.AttrName, "password_policy", "sign_in_policy", "sms_authentication_message", @@ -1152,6 +1152,10 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta an input.MfaConfiguration = awstypes.UserPoolMfaType(v.(string)) } + if v, ok := d.GetOk(names.AttrName); ok { + input.PoolName = aws.String(v.(string)) + } + if v, ok := d.GetOk("password_policy"); ok { if v, ok := v.([]any)[0].(map[string]any); ok && v != nil { passwordPolicy := expandPasswordPolicyType(v) @@ -1222,7 +1226,7 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta an } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateUserPool(ctx, input) }, func(err error) (bool, error) { @@ -1352,10 +1356,14 @@ func findUserPoolMFAConfigByID(ctx context.Context, conn *cognitoidentityprovide } func expandEmailMFAConfigType(tfList []any) *awstypes.EmailMfaConfigType { - if len(tfList) == 0 || tfList[0] == nil { + if len(tfList) == 0 { return nil } + if tfList[0] == nil { + return &awstypes.EmailMfaConfigType{} + } + tfMap := tfList[0].(map[string]any) apiObject := &awstypes.EmailMfaConfigType{} diff --git a/internal/service/cognitoidp/user_pool_client.go b/internal/service/cognitoidp/user_pool_client.go index c1da9de2fe57..ebc38a887c5f 100644 --- a/internal/service/cognitoidp/user_pool_client.go +++ b/internal/service/cognitoidp/user_pool_client.go @@ -472,7 +472,7 @@ func (r *userPoolClientResource) Update(ctx context.Context, request resource.Up const ( timeout = 2 * time.Minute ) - output, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (any, error) { + output, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModificationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateUserPoolClient(ctx, &input) }) diff --git a/internal/service/cognitoidp/user_pool_data_source_tags_gen_test.go b/internal/service/cognitoidp/user_pool_data_source_tags_gen_test.go index 35db529946c7..98eeeff2a1f3 100644 --- a/internal/service/cognitoidp/user_pool_data_source_tags_gen_test.go +++ b/internal/service/cognitoidp/user_pool_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccCognitoIDPUserPoolDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccCognitoIDPUserPoolDataSource_tags(t *testing.T) { func TestAccCognitoIDPUserPoolDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccCognitoIDPUserPoolDataSource_tags_NullMap(t *testing.T) { func TestAccCognitoIDPUserPoolDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccCognitoIDPUserPoolDataSource_tags_EmptyMap(t *testing.T) { func TestAccCognitoIDPUserPoolDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccCognitoIDPUserPoolDataSource_tags_DefaultTags_nonOverlapping(t *test func TestAccCognitoIDPUserPoolDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccCognitoIDPUserPoolDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccCognitoIDPUserPoolDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/cognitoidp/user_pool_data_source_test.go b/internal/service/cognitoidp/user_pool_data_source_test.go index c881fa66d209..f0e2aba80205 100644 --- a/internal/service/cognitoidp/user_pool_data_source_test.go +++ b/internal/service/cognitoidp/user_pool_data_source_test.go @@ -153,7 +153,7 @@ func testSchemaAttributes(n string) resource.TestCheckFunc { } numAttributes, err := strconv.Atoi(numAttributesStr) if err != nil { - return fmt.Errorf("error parsing schema_attributes.#: %s", err) + return fmt.Errorf("error parsing schema_attributes.#: %w", err) } // Loop through the schema_attributes and check the mutable key in each attribute diff --git a/internal/service/cognitoidp/user_pool_tags_gen_test.go b/internal/service/cognitoidp/user_pool_tags_gen_test.go index 8404271ef55f..bba310e219c3 100644 --- a/internal/service/cognitoidp/user_pool_tags_gen_test.go +++ b/internal/service/cognitoidp/user_pool_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccCognitoIDPUserPool_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccCognitoIDPUserPool_tags(t *testing.T) { func TestAccCognitoIDPUserPool_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccCognitoIDPUserPool_tags_null(t *testing.T) { func TestAccCognitoIDPUserPool_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccCognitoIDPUserPool_tags_EmptyMap(t *testing.T) { func TestAccCognitoIDPUserPool_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccCognitoIDPUserPool_tags_AddOnUpdate(t *testing.T) { func TestAccCognitoIDPUserPool_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccCognitoIDPUserPool_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccCognitoIDPUserPool_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccCognitoIDPUserPool_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccCognitoIDPUserPool_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccCognitoIDPUserPool_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccCognitoIDPUserPool_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccCognitoIDPUserPool_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccCognitoIDPUserPool_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_overlapping(t *testing.T) { func TestAccCognitoIDPUserPool_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_updateToProviderOnly(t *testing. func TestAccCognitoIDPUserPool_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_updateToResourceOnly(t *testing. func TestAccCognitoIDPUserPool_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccCognitoIDPUserPool_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_emptyProviderOnlyTag(t *testing. func TestAccCognitoIDPUserPool_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_nullOverlappingResourceTag(t *te func TestAccCognitoIDPUserPool_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccCognitoIDPUserPool_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccCognitoIDPUserPool_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccCognitoIDPUserPool_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccCognitoIDPUserPool_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccCognitoIDPUserPool_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccCognitoIDPUserPool_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccCognitoIDPUserPool_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccCognitoIDPUserPool_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccCognitoIDPUserPool_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccCognitoIDPUserPool_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.UserPoolType resourceName := "aws_cognito_user_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), CheckDestroy: testAccCheckUserPoolDestroy(ctx), diff --git a/internal/service/cognitoidp/user_pool_test.go b/internal/service/cognitoidp/user_pool_test.go index 811f10ffba8d..869878e394a1 100644 --- a/internal/service/cognitoidp/user_pool_test.go +++ b/internal/service/cognitoidp/user_pool_test.go @@ -14,8 +14,11 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcognitoidp "github.com/hashicorp/terraform-provider-aws/internal/service/cognitoidp" @@ -474,6 +477,7 @@ func TestAccCognitoIDPUserPool_MFA_sms(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "sms_configuration.0.external_id", "test"), resource.TestCheckResourceAttrPair(resourceName, "sms_configuration.0.sns_caller_arn", iamRoleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -487,6 +491,7 @@ func TestAccCognitoIDPUserPool_MFA_sms(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "OFF"), resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -497,6 +502,7 @@ func TestAccCognitoIDPUserPool_MFA_sms(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "sms_configuration.0.external_id", "test"), resource.TestCheckResourceAttrPair(resourceName, "sms_configuration.0.sns_caller_arn", iamRoleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, }, @@ -526,6 +532,7 @@ func TestAccCognitoIDPUserPool_MFA_smsAndSoftwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "sms_configuration.0.sns_caller_arn", iamRoleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -542,6 +549,7 @@ func TestAccCognitoIDPUserPool_MFA_smsAndSoftwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "sms_configuration.0.sns_caller_arn", iamRoleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -550,6 +558,7 @@ func TestAccCognitoIDPUserPool_MFA_smsAndSoftwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "OFF"), resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, }, @@ -578,6 +587,7 @@ func TestAccCognitoIDPUserPool_MFA_smsToSoftwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "sms_configuration.0.external_id", "test"), resource.TestCheckResourceAttrPair(resourceName, "sms_configuration.0.sns_caller_arn", iamRoleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -592,6 +602,7 @@ func TestAccCognitoIDPUserPool_MFA_smsToSoftwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, }, @@ -618,6 +629,7 @@ func TestAccCognitoIDPUserPool_MFA_softwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -631,6 +643,7 @@ func TestAccCognitoIDPUserPool_MFA_softwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "OFF"), resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, { @@ -640,6 +653,7 @@ func TestAccCognitoIDPUserPool_MFA_softwareTokenMFA(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "0"), ), }, }, @@ -665,11 +679,25 @@ func TestAccCognitoIDPUserPool_MFA_emailConfigurationMFA(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckUserPoolDestroy(ctx), Steps: []resource.TestStep{ + { + Config: testAccUserPoolConfig_mfaEmailConfigurationEmptyConfiguration(rName, replyTo, sourceARN, emailTo, "DEVELOPER"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserPoolExists(ctx, resourceName, &pool), + resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "ON"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.#", "1"), + resource.TestCheckNoResourceAttr(resourceName, "email_mfa_configuration.0.message"), + resource.TestCheckNoResourceAttr(resourceName, "email_mfa_configuration.0.subject"), + ), + }, { Config: testAccUserPoolConfig_mfaEmailConfigurationConfigurationEnabled(rName, true, message, subject, replyTo, sourceARN, emailTo, "DEVELOPER"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckUserPoolExists(ctx, resourceName, &pool), resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "ON"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.0.message", message), resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.0.subject", subject), ), @@ -679,6 +707,8 @@ func TestAccCognitoIDPUserPool_MFA_emailConfigurationMFA(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckUserPoolExists(ctx, resourceName, &pool), resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "ON"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "software_token_mfa_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.0.message", updatedMessage), resource.TestCheckResourceAttr(resourceName, "email_mfa_configuration.0.subject", updatedSubject), ), @@ -2036,6 +2066,50 @@ func TestAccCognitoIDPUserPool_userPoolTier(t *testing.T) { }) } +func TestAccCognitoIDPUserPool_nameUpdate(t *testing.T) { + ctx := acctest.Context(t) + var pool1, pool2 awstypes.UserPoolType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_user_pool.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserPoolConfig_name(rName + "-test1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserPoolExists(ctx, resourceName, &pool1), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rName+"-test1")), + }, + }, + { + Config: testAccUserPoolConfig_name(rName + "-test2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserPoolExists(ctx, resourceName, &pool2), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rName+"-test2")), + }, + }, + }, + }) +} + func testAccCheckUserPoolDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) @@ -2427,6 +2501,44 @@ resource "aws_cognito_user_pool" "test" { `, rName, enabled, message, subject, email, arn, from, account) } +func testAccUserPoolConfig_mfaEmailConfigurationEmptyConfiguration(rName string, email, arn, from, account string) string { + return fmt.Sprintf(` +resource "aws_ses_configuration_set" "test" { + name = %[1]q + + delivery_options { + tls_policy = "Optional" + } +} + +resource "aws_cognito_user_pool" "test" { + mfa_configuration = "ON" + name = %[1]q + + email_configuration { + reply_to_email_address = %[2]q + source_arn = %[3]q + from_email_address = %[4]q + email_sending_account = %[5]q + configuration_set = aws_ses_configuration_set.test.name + } + + account_recovery_setting { + recovery_mechanism { + name = "verified_email" + priority = 1 + } + recovery_mechanism { + name = "verified_phone_number" + priority = 2 + } + } + + email_mfa_configuration {} +} +`, rName, email, arn, from, account) +} + func testAccUserPoolConfig_passwordHistorySize(rName string, passwordHistorySize int) string { return fmt.Sprintf(` resource "aws_cognito_user_pool" "test" { diff --git a/internal/service/comprehend/document_classifier.go b/internal/service/comprehend/document_classifier.go index 13066f11d6e2..ad37f633a09a 100644 --- a/internal/service/comprehend/document_classifier.go +++ b/internal/service/comprehend/document_classifier.go @@ -417,7 +417,7 @@ func resourceDocumentClassifierDelete(ctx context.Context, d *schema.ResourceDat } if _, err := waitDocumentClassifierDeleted(ctx, conn, aws.ToString(v.DocumentClassifierArn), d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("waiting for version (%s) to be deleted: %s", aws.ToString(v.VersionName), err) + return fmt.Errorf("waiting for version (%s) to be deleted: %w", aws.ToString(v.VersionName), err) } ec2Conn := meta.(*conns.AWSClient).EC2Client(ctx) @@ -495,7 +495,7 @@ func documentClassifierPublishVersion(ctx context.Context, conn *comprehend.Clie } var out *comprehend.CreateDocumentClassifierOutput - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error out, err = conn.CreateDocumentClassifier(ctx, in) @@ -503,20 +503,18 @@ func documentClassifierPublishVersion(ctx context.Context, conn *comprehend.Clie var tmre *types.TooManyRequestsException var qee ratelimit.QuotaExceededError // This is not a typo: the ratelimit.QuotaExceededError is returned as a struct, not a pointer if errors.As(err, &tmre) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } else if errors.As(err, &qee) { // Unable to get a rate limit token - return retry.RetryableError(err) + return tfresource.RetryableError(err) } else { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } } return nil }, tfresource.WithPollInterval(documentClassifierPollInterval)) - if tfresource.TimedOut(err) { - out, err = conn.CreateDocumentClassifier(ctx, in) - } + if err != nil { return sdkdiag.AppendErrorf(diags, "%s Amazon Comprehend Document Classifier (%s): %s", action, d.Get(names.AttrName).(string), err) } diff --git a/internal/service/comprehend/document_classifier_identity_gen_test.go b/internal/service/comprehend/document_classifier_identity_gen_test.go index af1cc53ec808..ea6e1460ca71 100644 --- a/internal/service/comprehend/document_classifier_identity_gen_test.go +++ b/internal/service/comprehend/document_classifier_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccComprehendDocumentClassifier_Identity_Basic(t *testing.T) { resourceName := "aws_comprehend_document_classifier.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccComprehendDocumentClassifier_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -111,7 +115,7 @@ func TestAccComprehendDocumentClassifier_Identity_RegionOverride(t *testing.T) { resourceName := "aws_comprehend_document_classifier.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccComprehendDocumentClassifier_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -224,3 +231,137 @@ func TestAccComprehendDocumentClassifier_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccComprehendDocumentClassifier_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DocumentClassifierProperties + resourceName := "aws_comprehend_document_classifier.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ComprehendServiceID), + CheckDestroy: testAccCheckDocumentClassifierDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DocumentClassifier/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDocumentClassifierExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/DocumentClassifier/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDocumentClassifierExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DocumentClassifier/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccComprehendDocumentClassifier_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DocumentClassifierProperties + resourceName := "aws_comprehend_document_classifier.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ComprehendServiceID), + CheckDestroy: testAccCheckDocumentClassifierDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DocumentClassifier/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDocumentClassifierExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DocumentClassifier/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDocumentClassifierExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/comprehend/document_classifier_test.go b/internal/service/comprehend/document_classifier_test.go index d76ab63fbbab..3a95758df182 100644 --- a/internal/service/comprehend/document_classifier_test.go +++ b/internal/service/comprehend/document_classifier_test.go @@ -15,14 +15,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcomprehend "github.com/hashicorp/terraform-provider-aws/internal/service/comprehend" "github.com/hashicorp/terraform-provider-aws/names" @@ -1491,92 +1486,6 @@ func TestAccComprehendDocumentClassifier_DefaultTags_providerOnly(t *testing.T) }) } -func TestAccComprehendDocumentClassifier_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var documentclassifier types.DocumentClassifierProperties - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_comprehend_document_classifier.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ComprehendEndpointID) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.ComprehendServiceID), - CheckDestroy: testAccCheckDocumentClassifierDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccDocumentClassifierConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDocumentClassifierExists(ctx, resourceName, &documentclassifier), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccDocumentClassifierConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDocumentClassifierExists(ctx, resourceName, &documentclassifier), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccDocumentClassifierConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDocumentClassifierExists(ctx, resourceName, &documentclassifier), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("comprehend", regexache.MustCompile(fmt.Sprintf(`document-classifier/%s/version/%s$`, rName, uniqueIDPattern()))), - }), - }, - }, - }, - }) -} - func testAccCheckDocumentClassifierDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ComprehendClient(ctx) @@ -1710,7 +1619,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1736,7 +1645,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" mode = "MULTI_CLASS" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1766,7 +1675,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1792,7 +1701,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1817,7 +1726,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1843,7 +1752,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1868,8 +1777,8 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" - test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" + test_s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -1894,7 +1803,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" label_delimiter = %q } @@ -1921,7 +1830,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" mode = "MULTI_CLASS" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" label_delimiter = %q } @@ -1948,7 +1857,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" mode = "MULTI_LABEL" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.multilabel.id}" + s3_uri = "s3://${aws_s3_object.multilabel.bucket}/${aws_s3_object.multilabel.key}" } depends_on = [ @@ -1978,7 +1887,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" mode = "MULTI_LABEL" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.multilabel.id}" + s3_uri = "s3://${aws_s3_object.multilabel.bucket}/${aws_s3_object.multilabel.key}" label_delimiter = %[2]q } @@ -2007,7 +1916,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2073,7 +1982,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2135,7 +2044,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2163,7 +2072,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2228,7 +2137,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2292,7 +2201,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2321,7 +2230,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2351,7 +2260,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -2377,7 +2286,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2408,7 +2317,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2464,7 +2373,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2520,7 +2429,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2581,7 +2490,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2642,7 +2551,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2698,7 +2607,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2729,7 +2638,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } output_data_config { @@ -2923,7 +2832,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -3031,7 +2940,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -3132,7 +3041,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ diff --git a/internal/service/comprehend/entity_recognizer.go b/internal/service/comprehend/entity_recognizer.go index cff06835d147..b9d4fdced08d 100644 --- a/internal/service/comprehend/entity_recognizer.go +++ b/internal/service/comprehend/entity_recognizer.go @@ -446,7 +446,7 @@ func resourceEntityRecognizerDelete(ctx context.Context, d *schema.ResourceData, } if _, err := waitEntityRecognizerDeleted(ctx, conn, aws.ToString(v.EntityRecognizerArn), d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("waiting for version (%s) to be deleted: %s", aws.ToString(v.VersionName), err) + return fmt.Errorf("waiting for version (%s) to be deleted: %w", aws.ToString(v.VersionName), err) } ec2Conn := meta.(*conns.AWSClient).EC2Client(ctx) @@ -522,7 +522,7 @@ func entityRecognizerPublishVersion(ctx context.Context, conn *comprehend.Client } var out *comprehend.CreateEntityRecognizerOutput - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error out, err = conn.CreateEntityRecognizer(ctx, in) @@ -530,20 +530,18 @@ func entityRecognizerPublishVersion(ctx context.Context, conn *comprehend.Client var tmre *types.TooManyRequestsException var qee ratelimit.QuotaExceededError // This is not a typo: the ratelimit.QuotaExceededError is returned as a struct, not a pointer if errors.As(err, &tmre) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } else if errors.As(err, &qee) { // Unable to get a rate limit token - return retry.RetryableError(err) + return tfresource.RetryableError(err) } else { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } } return nil }, tfresource.WithPollInterval(entityRegcognizerPollInterval)) - if tfresource.TimedOut(err) { - out, err = conn.CreateEntityRecognizer(ctx, in) - } + if err != nil { return sdkdiag.AppendErrorf(diags, "%s Amazon Comprehend Entity Recognizer (%s): %s", action, d.Get(names.AttrName).(string), err) } diff --git a/internal/service/comprehend/entity_recognizer_identity_gen_test.go b/internal/service/comprehend/entity_recognizer_identity_gen_test.go index 474d014f4fde..da79941c8ad9 100644 --- a/internal/service/comprehend/entity_recognizer_identity_gen_test.go +++ b/internal/service/comprehend/entity_recognizer_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccComprehendEntityRecognizer_Identity_Basic(t *testing.T) { resourceName := "aws_comprehend_entity_recognizer.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccComprehendEntityRecognizer_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -111,7 +115,7 @@ func TestAccComprehendEntityRecognizer_Identity_RegionOverride(t *testing.T) { resourceName := "aws_comprehend_entity_recognizer.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccComprehendEntityRecognizer_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -224,3 +231,137 @@ func TestAccComprehendEntityRecognizer_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccComprehendEntityRecognizer_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EntityRecognizerProperties + resourceName := "aws_comprehend_entity_recognizer.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ComprehendServiceID), + CheckDestroy: testAccCheckEntityRecognizerDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EntityRecognizer/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEntityRecognizerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/EntityRecognizer/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEntityRecognizerExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EntityRecognizer/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccComprehendEntityRecognizer_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EntityRecognizerProperties + resourceName := "aws_comprehend_entity_recognizer.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ComprehendServiceID), + CheckDestroy: testAccCheckEntityRecognizerDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EntityRecognizer/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEntityRecognizerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EntityRecognizer/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEntityRecognizerExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/comprehend/entity_recognizer_test.go b/internal/service/comprehend/entity_recognizer_test.go index 7802bf48264a..b5ff52573e2d 100644 --- a/internal/service/comprehend/entity_recognizer_test.go +++ b/internal/service/comprehend/entity_recognizer_test.go @@ -15,14 +15,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcomprehend "github.com/hashicorp/terraform-provider-aws/internal/service/comprehend" "github.com/hashicorp/terraform-provider-aws/names" @@ -1008,92 +1003,6 @@ func testAccCheckEntityRecognizerExists(ctx context.Context, name string, entity } } -func TestAccComprehendEntityRecognizer_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var entityrecognizer types.EntityRecognizerProperties - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_comprehend_entity_recognizer.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ComprehendEndpointID) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.ComprehendServiceID), - CheckDestroy: testAccCheckEntityRecognizerDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccEntityRecognizerConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckEntityRecognizerExists(ctx, resourceName, &entityrecognizer), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccEntityRecognizerConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckEntityRecognizerExists(ctx, resourceName, &entityrecognizer), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccEntityRecognizerConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckEntityRecognizerExists(ctx, resourceName, &entityrecognizer), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("comprehend", regexache.MustCompile(fmt.Sprintf(`entity-recognizer/%s/version/%s$`, rName, uniqueIDPattern()))), - }), - }, - }, - }, - }) -} - func testAccCheckEntityRecognizerNotRecreated(before, after *types.EntityRecognizerProperties) resource.TestCheckFunc { return func(s *terraform.State) error { if !entityRecognizerIdentity(before, after) { @@ -1172,11 +1081,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1215,11 +1124,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1254,11 +1163,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1292,11 +1201,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1331,11 +1240,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1369,12 +1278,12 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" - test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.key}" + test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1408,11 +1317,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } annotations { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.annotations.id}" + s3_uri = "s3://${aws_s3_object.annotations.bucket}/${aws_s3_object.annotations.key}" } } @@ -1446,13 +1355,13 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" - test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" + test_s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } annotations { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.annotations.id}" - test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.annotations.id}" + s3_uri = "s3://${aws_s3_object.annotations.bucket}/${aws_s3_object.annotations.key}" + test_s3_uri = "s3://${aws_s3_object.annotations.bucket}/${aws_s3_object.annotations.key}" } } @@ -1486,12 +1395,12 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } annotations { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.annotations.id}" - test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.annotations.id}" + s3_uri = "s3://${aws_s3_object.annotations.bucket}/${aws_s3_object.annotations.key}" + test_s3_uri = "s3://${aws_s3_object.annotations.bucket}/${aws_s3_object.annotations.key}" } } @@ -1525,12 +1434,12 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" - test_s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" + test_s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } annotations { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.annotations.id}" + s3_uri = "s3://${aws_s3_object.annotations.bucket}/${aws_s3_object.annotations.key}" } } @@ -1567,11 +1476,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1645,11 +1554,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1720,11 +1629,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1761,11 +1670,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1839,11 +1748,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1916,11 +1825,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -1958,11 +1867,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -2001,11 +1910,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -2154,11 +2063,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -2275,11 +2184,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -2389,11 +2298,11 @@ resource "aws_comprehend_entity_recognizer" "test" { } documents { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_object.entities.bucket}/${aws_s3_object.entities.key}" } } diff --git a/internal/service/comprehend/service_endpoint_resolver_gen.go b/internal/service/comprehend/service_endpoint_resolver_gen.go index b0affcf92068..56a15e2fcf38 100644 --- a/internal/service/comprehend/service_endpoint_resolver_gen.go +++ b/internal/service/comprehend/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params comprehend.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up comprehend endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up comprehend endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/comprehend/service_endpoints_gen_test.go b/internal/service/comprehend/service_endpoints_gen_test.go index 9b81f6139ff8..57e82777efca 100644 --- a/internal/service/comprehend/service_endpoints_gen_test.go +++ b/internal/service/comprehend/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/comprehend/service_package_gen.go b/internal/service/comprehend/service_package_gen.go index 7ebebd001eb2..192b9246e9c0 100644 --- a/internal/service/comprehend/service_package_gen.go +++ b/internal/service/comprehend/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/comprehend" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -90,7 +89,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *comprehend.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/comprehend/tags_gen.go b/internal/service/comprehend/tags_gen.go index 072a6d472f36..6ede03713342 100644 --- a/internal/service/comprehend/tags_gen.go +++ b/internal/service/comprehend/tags_gen.go @@ -3,8 +3,8 @@ package comprehend import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/comprehend" awstypes "github.com/aws/aws-sdk-go-v2/service/comprehend/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *comprehend.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ComprehendClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *comprehend.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *comprehend.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/comprehend/test-fixtures/document_classifier/documents.csv b/internal/service/comprehend/test-fixtures/document_classifier/documents.csv index 610aba6048c7..6680fbf79142 100644 --- a/internal/service/comprehend/test-fixtures/document_classifier/documents.csv +++ b/internal/service/comprehend/test-fixtures/document_classifier/documents.csv @@ -1,100 +1,100 @@ -SPAM,"Dear Gerson Parker,\n\nBuy a Awesome Rubber Table from Grady-Stiedemann now!\n" -SPAM,"Hello Dr. Eunice Leannon,\n\nNow available!\n\nA Awesome Wooden Hat from Kiehn DDS\n" -SPAM,"Dear Delaney Fisher,\n\nBuy a Sleek Rubber Chair from Kilback-Hettinger now!\n" -SPAM,"Hello Mr. Elias Quitzon,\n\nNow available!\n\nA Practical Cotton Gloves from Schiller, Kemmer and Gulgowski\n" -PHISHING,"Dear Miss Marquise Jerde,\n\nYour transaction VvAr5Zg2JY has failed.\n\nCall 849.371.9905 for help.\n" -PHISHING,"Taryn Pfannerstill,\n\nYour order number 1tVfsJHalz has been returned.\n\nCall 756.681.2363 x628 to get help.\n" -PHISHING,"Hello Jett Armstrong,\n\nCall 737-319-3312 for help with your order brEaM7qfDv. Otherwise it will be returned to the sender.\n" -PHISHING,"Hello Graham Stoltenberg,\n\nCall 523-585-1577 for help with your order vh0wYmrvrv. Otherwise it will be returned to the sender.\n" -SPAM,"Hello Ulises Davis,\n\nNow available!\n\nA Small Granite Table from Jaskolski-Prohaska\n" -SPAM,"Hello Ms. Santos Weimann,\n\nNow available!\n\nA Ergonomic Plastic Computer from Robel-Ziemann\n" -SPAM,"Dear Lorna Hodkiewicz,\n\nBuy a Fantastic Steel Chair from Adams PhD now!\n" -PHISHING,"Aracely Christiansen,\n\nYour order number T8u4P1yQfG has been returned.\n\nCall 747-226-9061 to get help.\n" -SPAM,"Angeline Lebsack,\n\nDon't miss out on buying Wisozk-Rowe's Gorgeous Granite Computer today!\n" -SPAM,"Asha Greenholt,\n\nDon't miss out on buying Bernier PhD's Ergonomic Wooden Table today!\n" -PHISHING,"Dear Miss Flavio Smith,\n\nYour transaction hQ7oGKDHJd has failed.\n\nCall 442.751.9030 x9030 for help.\n" -SPAM,"Dear Ms. Carolina Mante,\n\nBuy a Rustic Plastic Table from Mayert V now!\n" -SPAM,"Hello Mr. Molly Goldner,\n\nNow available!\n\nA Rustic Cotton Car from Crona, McClure and Harber\n" -SPAM,"Hello Mia Larkin II,\n\nNow available!\n\nA Intelligent Cotton Car from Lesch III\n" -SPAM,"Al Gerlach V,\n\nDon't miss out on buying Daniel Jr.'s Intelligent Granite Shirt today!\n" -SPAM,"Dear Heath Kessler,\n\nBuy a Small Cotton Table from Kohler, Lindgren and Jacobson now!\n" -SPAM,"Dr. Ahmed Schneider,\n\nDon't miss out on buying Bogisich, Turcotte and Fay's Ergonomic Concrete Table today!\n" -PHISHING,"Dear Alena Mueller,\n\nYour transaction Kc0ov0TbVO has failed.\n\nCall 340.200.4353 x1049 for help.\n" -SPAM,"Dear Dewayne Turner,\n\nBuy a Rustic Plastic Table from Will II now!\n" -PHISHING,"Kaleb Durgan,\n\nYour order number qo5aid70LZ has been returned.\n\nCall 1-271-529-2911 to get help.\n" -SPAM,"Dear Rickie Reilly,\n\nBuy a Intelligent Cotton Table from Von, Koss and Cormier now!\n" -SPAM,"Brant Hettinger,\n\nDon't miss out on buying Schamberger-Bartoletti's Incredible Steel Shoes today!\n" -SPAM,"Dear Miss Heath Smith,\n\nBuy a Intelligent Rubber Shirt from Lindgren-Goldner now!\n" -PHISHING,"Modesto Monahan DVM,\n\nYour order number 8S3xS6mvEc has been returned.\n\nCall 1-526-152-9870 to get help.\n" -PHISHING,"Dear Genesis Wintheiser II,\n\nYour transaction 133wf0wD7e has failed.\n\nCall (330) 015-6966 for help.\n" -SPAM,"Hello Napoleon Sauer,\n\nNow available!\n\nA Practical Granite Chair from Lubowitz, Bartell and Mohr\n" -SPAM,"Dear Jennifer Buckridge,\n\nBuy a Gorgeous Rubber Table from Stroman DVM now!\n" -SPAM,"Dear Weldon Beatty IV,\n\nBuy a Sleek Wooden Computer from Bartell IV now!\n" -PHISHING,"Hello Nicholaus Beer,\n\nCall 1-806-724-1386 x5254 for help with your order FqmAcdUQBU. Otherwise it will be returned to the sender.\n" -SPAM,"Hello Dr. Aron Reichert,\n\nNow available!\n\nA Gorgeous Cotton Pants from Hagenes-Torp\n" -SPAM,"Hello Kathlyn Bechtelar,\n\nNow available!\n\nA Awesome Rubber Table from Zulauf Jr.\n" -SPAM,"Dear Dedrick Corkery V,\n\nBuy a Incredible Plastic Pants from Kirlin IV now!\n" -PHISHING,"Hello Margarett Kunze,\n\nCall (744) 342-3400 for help with your order Hs28ZR7V52. Otherwise it will be returned to the sender.\n" -SPAM,"Dear Maegan Trantow,\n\nBuy a Small Concrete Car from Greenholt II now!\n" -SPAM,"Granville O'Connell,\n\nDon't miss out on buying Osinski, Pollich and Block's Gorgeous Plastic Hat today!\n" -PHISHING,"Hello Sherman Waelchi II,\n\nCall (836) 650-2349 for help with your order u0u8Bz4LFX. Otherwise it will be returned to the sender.\n" -SPAM,"Dear Murl Jacobi,\n\nBuy a Gorgeous Steel Shirt from Rowe, Quigley and Monahan now!\n" -SPAM,"Katelin Blick,\n\nDon't miss out on buying Rau, Moore and Will's Gorgeous Granite Shoes today!\n" -SPAM,"Savanna Kuhlman,\n\nDon't miss out on buying Berge, Hane and Bartoletti's Ergonomic Plastic Chair today!\n" -SPAM,"Hello Mr. Marlin Nitzsche,\n\nNow available!\n\nA Small Wooden Shirt from Paucek, Willms and Beier\n" -SPAM,"Thomas Towne,\n\nDon't miss out on buying Herzog-Hagenes's Practical Granite Table today!\n" -PHISHING,"Melisa Barton Jr.,\n\nYour order number glADPbXwsV has been returned.\n\nCall 477-926-9464 x9324 to get help.\n" -PHISHING,"Norris Rodriguez,\n\nYour order number Tda57LYtFu has been returned.\n\nCall 1-413-187-4231 x0502 to get help.\n" -PHISHING,"Mrs. Bethel Carroll,\n\nYour order number J6xmnKvbV7 has been returned.\n\nCall 262-359-0480 x7240 to get help.\n" -PHISHING,"Dear Miss Jarvis Langosh,\n\nYour transaction fpnGt85S0r has failed.\n\nCall 331-139-3076 x108 for help.\n" -SPAM,"Hello Lucas Frami,\n\nNow available!\n\nA Sleek Steel Hat from Gutkowski-Hahn\n" -SPAM,"Ms. Brittany Flatley,\n\nDon't miss out on buying Reinger-Breitenberg's Intelligent Wooden Table today!\n" -SPAM,"Hello Miss Hermina Gaylord,\n\nNow available!\n\nA Sleek Concrete Car from Windler-Lakin\n" -SPAM,"Hello Brooke Turner,\n\nNow available!\n\nA Small Concrete Computer from Kohler DVM\n" -PHISHING,"Sandra Hartmann,\n\nYour order number m0xwL3WB3I has been returned.\n\nCall (103) 816-8172 x77407 to get help.\n" -PHISHING,"Roslyn Jacobi,\n\nYour order number 6W52YP4eN6 has been returned.\n\nCall 825-473-1315 to get help.\n" -PHISHING,"Mr. Hudson Olson,\n\nYour order number hxYmRmAL4i has been returned.\n\nCall 953.009.3128 x16955 to get help.\n" -SPAM,"Hello Domenico Ondricka,\n\nNow available!\n\nA Intelligent Steel Hat from Orn, Muller and Krajcik\n" -PHISHING,"Loren Howell,\n\nYour order number 2VQ317gxaD has been returned.\n\nCall 872.335.0711 to get help.\n" -SPAM,"Tyshawn Volkman,\n\nDon't miss out on buying Toy, Parisian and Beatty's Awesome Plastic Table today!\n" -SPAM,"Dear Vidal Von,\n\nBuy a Incredible Rubber Shoes from Ratke, Jaskolski and Kertzmann now!\n" -PHISHING,"Lexi Skiles,\n\nYour order number MQva1tgMkb has been returned.\n\nCall 601-114-7684 x4783 to get help.\n" -SPAM,"Dear Derek Keeling,\n\nBuy a Incredible Rubber Pants from Auer-Leffler now!\n" -PHISHING,"Dear Miss Xavier Farrell,\n\nYour transaction mKbwVCQbhO has failed.\n\nCall 940.934.0097 x5583 for help.\n" -SPAM,"Jodie Homenick,\n\nDon't miss out on buying Nienow-Daniel's Small Wooden Shoes today!\n" -SPAM,"Dear Dr. Roxanne Weimann,\n\nBuy a Fantastic Rubber Table from Kuvalis-Ledner now!\n" -PHISHING,"Dear Weston Murazik,\n\nYour transaction aXErjPh1a3 has failed.\n\nCall 706-538-1102 for help.\n" -SPAM,"Hello Dr. Lorena Jerde,\n\nNow available!\n\nA Intelligent Steel Shoes from Schneider-Pouros\n" -SPAM,"Andreane Johns,\n\nDon't miss out on buying Fay IV's Fantastic Wooden Car today!\n" -PHISHING,"Hello Grant Lesch V,\n\nCall (539) 134-0217 for help with your order YzNiM1s5ta. Otherwise it will be returned to the sender.\n" -PHISHING,"Dear Ms. Lauryn Stark,\n\nYour transaction VSuFzoRvlp has failed.\n\nCall (174) 665-5559 for help.\n" -PHISHING,"Hello Mariam Schultz,\n\nCall 260.307.7248 for help with your order SKQPNL4pd4. Otherwise it will be returned to the sender.\n" -PHISHING,"Joanne Aufderhar,\n\nYour order number LG3a2cyYwA has been returned.\n\nCall 844.982.8008 to get help.\n" -PHISHING,"Hello Estella Hickle,\n\nCall 1-612-967-8683 x836 for help with your order vYfqTwp1eM. Otherwise it will be returned to the sender.\n" -PHISHING,"Hello Ewald Cronin,\n\nCall 544-301-9279 x995 for help with your order uFqjeJZxXN. Otherwise it will be returned to the sender.\n" -SPAM,"Hello Russel Langosh,\n\nNow available!\n\nA Practical Granite Computer from Boehm-Waelchi\n" -PHISHING,"Marty Swaniawski,\n\nYour order number FPa7TqGwmC has been returned.\n\nCall (456) 156-1419 x4820 to get help.\n" -PHISHING,"Mireille Keeling V,\n\nYour order number BMn2c6TZ1n has been returned.\n\nCall 832-286-0025 to get help.\n" -SPAM,"Jackie Reinger V,\n\nDon't miss out on buying Lindgren-Cronin's Practical Wooden Hat today!\n" -SPAM,"Dear Nellie Morar DVM,\n\nBuy a Awesome Plastic Pants from Schultz IV now!\n" -PHISHING,"Hello Allison Zemlak,\n\nCall (825) 541-7656 for help with your order DHIFdYRC8a. Otherwise it will be returned to the sender.\n" -SPAM,"Dear Stacy Fadel DDS,\n\nBuy a Intelligent Concrete Shirt from Anderson-Sanford now!\n" -PHISHING,"Dear Kathryne Tromp,\n\nYour transaction 0hsRFeFFQV has failed.\n\nCall 856-325-2229 for help.\n" -PHISHING,"Dear Kameron Schuster,\n\nYour transaction Jc64U5Z6bN has failed.\n\nCall 847-556-6672 for help.\n" -PHISHING,"Dear Cody Hilll,\n\nYour transaction 8JCmPX02QI has failed.\n\nCall 332.053.6289 x8723 for help.\n" -SPAM,"Hailie Hudson,\n\nDon't miss out on buying Heller, Fritsch and Larson's Gorgeous Cotton Gloves today!\n" -PHISHING,"Dear Quentin Morar,\n\nYour transaction v73CDOjujv has failed.\n\nCall (155) 212-4077 x101 for help.\n" -SPAM,"Dear Carley Runolfsdottir,\n\nBuy a Intelligent Wooden Shirt from Bogan, Bahringer and Hickle now!\n" -PHISHING,"Tyler Bayer,\n\nYour order number LuZbltqNac has been returned.\n\nCall 941-426-1385 x0820 to get help.\n" -SPAM,"Dear Mason Beatty,\n\nBuy a Awesome Plastic Hat from Koss, Upton and Jast now!\n" -SPAM,"Hello Bryana Upton,\n\nNow available!\n\nA Ergonomic Concrete Hat from Cummings-Douglas\n" -PHISHING,"Dear Giovanna Lebsack I,\n\nYour transaction U4w4gmd9gU has failed.\n\nCall 1-579-711-4905 x878 for help.\n" -PHISHING,"Dear Osvaldo Emard,\n\nYour transaction bG82oF1Ooe has failed.\n\nCall 650.367.4781 x67845 for help.\n" -PHISHING,"Dear Mrs. Vicky Hahn,\n\nYour transaction zdfgf1wDy5 has failed.\n\nCall 1-548-394-5141 x6462 for help.\n" -PHISHING,"Dear Dr. Seamus Keebler,\n\nYour transaction fQHpi64Fz0 has failed.\n\nCall (113) 105-1446 x282 for help.\n" -SPAM,"Mrs. Skyla Willms,\n\nDon't miss out on buying McKenzie, Cormier and Volkman's Rustic Concrete Gloves today!\n" -SPAM,"Hello Ollie Crooks,\n\nNow available!\n\nA Gorgeous Plastic Car from Murphy-Hills\n" -SPAM,"Dear Dr. Jarret Block,\n\nBuy a Awesome Wooden Pants from Schiller, Baumbach and Orn now!\n" -PHISHING,"Florine West,\n\nYour order number SuyZnOCgV1 has been returned.\n\nCall 591-926-5276 x1996 to get help.\n" -SPAM,"Dear Jason Fritsch,\n\nBuy a Practical Wooden Car from Carter-Hane now!\n" -PHISHING,"Hello Kayleigh Goldner DDS,\n\nCall 750.906.1703 x8827 for help with your order aUyaWd3SnK. Otherwise it will be returned to the sender.\n" +SPAM,"Dear Miller Zulauf,\n\nBuy a Boulevard Tank 7 from Gottlieb PLC now!\n" +SPAM,"Dear Ms. Kavon Konopelski PhD,\n\nBuy a Hill Farmstead Abner from Schmidt, Schmidt and Schmidt now!\n" +SPAM,"Dear Ms. Hannah Mante I,\n\nBuy a Bell’s Two Hearted from Stamm-Stamm now!\n" +SPAM,"Tyler Will,\n\nDon't miss out on buying Bahringer, Bahringer and Bahringer's Avery Mephistopheles Stout today!\n" +PHISHING,"Ms. Jakayla Vandervort,\n\nYour order number zwrfmprbhc has been returned.\n\nCall +1-261-844-1689 to get help.\n" +PHISHING,"Dear Kayleigh Bashirian I,\n\nYour transaction dlmiqubfcf has failed.\n\nCall 1-984-227-1127 for help.\n" +PHISHING,"Nia Batz,\n\nYour order number liiwbinqlq has been returned.\n\nCall 402-250-8383 x9865 to get help.\n" +PHISHING,"Mr. Gus Rice,\n\nYour order number xmpawgudin has been returned.\n\nCall 685.225.2961 x4503 to get help.\n" +SPAM,"Hello Elena Runolfsdottir IV,\n\nNow available!\n\nA Founders Red’s Rye from Witting Group\n" +SPAM,"Hello Ms. Marisa Borer V,\n\nNow available!\n\nA Haymarket Angry Birds Rye IPA from Larkin Inc\n" +PHISHING,"Hello Dale Wilkinson,\n\nCall 524.541.3910 x2765 for help with your order hxqmmhkgnq. Otherwise it will be returned to the sender.\n" +PHISHING,"Hello Mr. Zachery O'Keefe II,\n\nCall (754) 290-9755 x28617 for help with your order vvlcglhavy. Otherwise it will be returned to the sender.\n" +SPAM,"Rickey Hoeger,\n\nDon't miss out on buying Braun, Braun and Braun's Great Divide Yeti today!\n" +SPAM,"Dear Adella Feeney,\n\nBuy a Brooklyn Brewery Lager from Lubowitz and Sons now!\n" +SPAM,"Ms. Hollie Nikolaus Sr.,\n\nDon't miss out on buying Barrows-Barrows's Brooklyn Brewery Lager today!\n" +SPAM,"Hello Violet Schneider,\n\nNow available!\n\nA New Belgium Fat Tire from Von, Von and Von\n" +PHISHING,"Dear Sandrine Zulauf MD,\n\nYour transaction xfsocadaet has failed.\n\nCall 361.648.3656 x289 for help.\n" +PHISHING,"Mr. Charley Fahey,\n\nYour order number lfcbxzffto has been returned.\n\nCall 964.814.6026 x582 to get help.\n" +PHISHING,"Hello Lily Ritchie MD,\n\nCall +1 (235) 218-2052 for help with your order ayqcgejtvo. Otherwise it will be returned to the sender.\n" +SPAM,"Dear Weldon Leuschke,\n\nBuy a Boulevard Tank 7 from Shields-Shields now!\n" +SPAM,"Hello Thea Cronin,\n\nNow available!\n\nA Brewery Ommegang Three Philosophers from Oberbrunner, Oberbrunner and Oberbrunner\n" +SPAM,"Hello Rhett Shanahan,\n\nNow available!\n\nA Floyds Dark Lord from Bosco LLC\n" +PHISHING,"Carley Torphy,\n\nYour order number ahkptypafk has been returned.\n\nCall +1-621-812-1206 to get help.\n" +PHISHING,"Hello Ms. Adella Runolfsdottir DDS,\n\nCall 374.890.8913 for help with your order dvxyktwiso. Otherwise it will be returned to the sender.\n" +SPAM,"Hello Alan Walter Sr.,\n\nNow available!\n\nA Green Flash Palate Wrecker from Graham, Graham and Graham\n" +PHISHING,"Hello Molly Grant,\n\nCall (306) 627-7209 x71992 for help with your order qpnazfoijv. Otherwise it will be returned to the sender.\n" +PHISHING,"Dear Bailey Smith,\n\nYour transaction nvuisehndq has failed.\n\nCall 585-505-6343 x187 for help.\n" +PHISHING,"Mr. Jabari Borer,\n\nYour order number hbiqmmnjlu has been returned.\n\nCall 517-338-4467 x4199 to get help.\n" +SPAM,"Hilton O'Keefe,\n\nDon't miss out on buying Weissnat-Weissnat's Perennial Artisan Ales Abraxas Imperial Stout today!\n" +SPAM,"Ms. Carolina Jakubowski IV,\n\nDon't miss out on buying McLaughlin-McLaughlin's 21st Amendment Bitter American today!\n" +PHISHING,"Hello Stone Wisoky,\n\nCall 1-937-892-3418 x056 for help with your order eejtolsuzc. Otherwise it will be returned to the sender.\n" +SPAM,"Rylan Roob,\n\nDon't miss out on buying Harris-Harris's Saint Arnold Fancy Lawnmower today!\n" +PHISHING,"Dear Ms. Bonita Deckow V,\n\nYour transaction lobmlodwbi has failed.\n\nCall 673.284.1552 for help.\n" +SPAM,"Wilton Jaskolski,\n\nDon't miss out on buying Lubowitz PLC's Allagash White today!\n" +PHISHING,"Vernon Purdy,\n\nYour order number lewpstaxpn has been returned.\n\nCall 335.317.6581 x81749 to get help.\n" +SPAM,"Mallory Dietrich,\n\nDon't miss out on buying Yost-Yost's Deschutes Black Butte Porter today!\n" +PHISHING,"Guadalupe Stanton,\n\nYour order number iddgbzfnqp has been returned.\n\nCall +1.683.241.2135 to get help.\n" +PHISHING,"Dear Kasandra Ritchie,\n\nYour transaction myenxkuxto has failed.\n\nCall +1-458-407-6700 for help.\n" +PHISHING,"Hello Skylar Fritsch,\n\nCall 601-258-3052 x98651 for help with your order okbxafunej. Otherwise it will be returned to the sender.\n" +SPAM,"Dear Laney Nitzsche,\n\nBuy a Sierra Nevada Bigfoot Barleywine-Style Ale from Terry, Terry and Terry now!\n" +PHISHING,"Hello Gracie Cremin IV,\n\nCall (915) 290-2958 for help with your order esnlpvbxgw. Otherwise it will be returned to the sender.\n" +SPAM,"Kaelyn Schaden,\n\nDon't miss out on buying Bergnaum-Bergnaum's New Belgium Fat Tire today!\n" +SPAM,"Andres Borer,\n\nDon't miss out on buying Franecki-Franecki's Firestone Walker Parabola today!\n" +SPAM,"Dear Ms. Laurence Windler MD,\n\nBuy a Dale’s Pale Ale from Aufderhar Ltd now!\n" +SPAM,"Hello Leon Ferry,\n\nNow available!\n\nA Allagash Curieux from Little, Little and Little\n" +PHISHING,"Hello Reuben Green,\n\nCall +1.517.604.7444 for help with your order lceqaessmo. Otherwise it will be returned to the sender.\n" +SPAM,"Dear Ms. Alayna Jacobs,\n\nBuy a Avery Uncle Jacob’s Stout from Daugherty-Daugherty now!\n" +SPAM,"Hello Kade Schumm,\n\nNow available!\n\nA Foothills Brewing Sexual Chocolate from Kovacek-Kovacek\n" +SPAM,"Candida Boyer,\n\nDon't miss out on buying Krajcik, Krajcik and Krajcik's Great Lakes Edmund Fitzgerald Porter today!\n" +SPAM,"Mariana Nienow DVM,\n\nDon't miss out on buying Ward PLC's New Belgium Lips of Faith La Folie today!\n" +SPAM,"Hello Bret Grady,\n\nNow available!\n\nA Highland Cold Mountain Winter Ale from Beahan-Beahan\n" +PHISHING,"Hello Lillie Lebsack,\n\nCall 316.982.2906 x27941 for help with your order zlglrwnipa. Otherwise it will be returned to the sender.\n" +PHISHING,"Susanna Rolfson,\n\nYour order number xkrcxnrdrk has been returned.\n\nCall (223) 441-5035 x6167 to get help.\n" +SPAM,"Mr. Domenico Kertzmann I,\n\nDon't miss out on buying Hintz, Hintz and Hintz's Wild Heaven Eschaton today!\n" +SPAM,"Hillary Schinner,\n\nDon't miss out on buying Strosin-Strosin's Bell’s Two Hearted today!\n" +SPAM,"Harry O'Kon,\n\nDon't miss out on buying Gerhold, Gerhold and Gerhold's Green Flash Palate Wrecker today!\n" +PHISHING,"Dear Forrest Ullrich,\n\nYour transaction xvbzkmqyrz has failed.\n\nCall +1 (992) 894-5548 for help.\n" +PHISHING,"Dear Ms. Dayna Gislason Jr.,\n\nYour transaction oxhvreavun has failed.\n\nCall 779-772-8691 for help.\n" +PHISHING,"Hello Constance Hand,\n\nCall 1-572-597-2202 for help with your order gqxeptgike. Otherwise it will be returned to the sender.\n" +SPAM,"Dear Neha Bayer,\n\nBuy a Terrapin Wake n Bake from Green Inc now!\n" +SPAM,"Mr. Lavern Walter,\n\nDon't miss out on buying Mayert-Mayert's Bell’s Hop Slam today!\n" +SPAM,"Dear Andrew Heller,\n\nBuy a The Bruery Saison Rue from Waters Group now!\n" +SPAM,"Ms. Karianne Dibbert Sr.,\n\nDon't miss out on buying Langworth-Langworth's Hill Farmstead Abner today!\n" +PHISHING,"Ms. Corine Homenick,\n\nYour order number yuwrwkhrjq has been returned.\n\nCall (913) 489-1702 to get help.\n" +PHISHING,"Hello Mr. Leopoldo VonRueden DDS,\n\nCall 1-658-996-7312 x7820 for help with your order elyqdcwyjc. Otherwise it will be returned to the sender.\n" +SPAM,"Hello Providenci Heidenreich,\n\nNow available!\n\nA Fullsteam Carver from Williamson Ltd\n" +SPAM,"Dear Ms. Yasmine Harber,\n\nBuy a Ten Fidy from Abbott, Abbott and Abbott now!\n" +SPAM,"Jasper Block,\n\nDon't miss out on buying Price-Price's Westbrook Mexican Cake today!\n" +PHISHING,"Thurman Blick MD,\n\nYour order number mhanpuruge has been returned.\n\nCall (609) 564-5086 to get help.\n" +SPAM,"Dear Jarret Homenick,\n\nBuy a Cigar City Hanaphu Imperial Stout from Christiansen PLC now!\n" +PHISHING,"Hello Bryana Fadel,\n\nCall 1-242-357-5173 x1449 for help with your order amcchfhseb. Otherwise it will be returned to the sender.\n" +SPAM,"Hello Marietta Hoeger,\n\nNow available!\n\nA Firestone Walker Velvet Merkin from Herzog, Herzog and Herzog\n" +SPAM,"Alejandrin Rogahn,\n\nDon't miss out on buying Kerluke, Kerluke and Kerluke's Hill Farmstead Everett Porter today!\n" +PHISHING,"Kian Walter,\n\nYour order number yafdhzsphd has been returned.\n\nCall +1-462-307-4667 to get help.\n" +SPAM,"Hello Mr. Marquis Okuneva,\n\nNow available!\n\nA Anchor Steam Beer from Pfannerstill-Pfannerstill\n" +SPAM,"Hello Rosella Green,\n\nNow available!\n\nA Wicked Weed Serenity from Klein, Klein and Klein\n" +SPAM,"Mr. Hyman McDermott,\n\nDon't miss out on buying Rippin PLC's The Bruery Sans Pagaie today!\n" +SPAM,"Dear Dwight Russel,\n\nBuy a Wild Heaven Eschaton from Abbott PLC now!\n" +SPAM,"Nettie Kreiger,\n\nDon't miss out on buying Casper Group's Great Divide Yeti today!\n" +PHISHING,"Dear Raymundo Wuckert,\n\nYour transaction ozqflyxkpv has failed.\n\nCall 749.409.2335 for help.\n" +PHISHING,"Hello Ms. Eva Schamberger DVM,\n\nCall +1-938-771-0587 for help with your order hwihmypuuv. Otherwise it will be returned to the sender.\n" +PHISHING,"Hello Lincoln Cole,\n\nCall 640.721.2514 for help with your order vxzhfsepwz. Otherwise it will be returned to the sender.\n" +PHISHING,"Dear Mr. Terrill O'Conner,\n\nYour transaction wupxwtkiny has failed.\n\nCall 1-937-354-6546 x14927 for help.\n" +PHISHING,"Dear Ms. Valentina Johnson DDS,\n\nYour transaction txxutugwto has failed.\n\nCall 327-497-8512 x16391 for help.\n" +PHISHING,"Hello Aiyana Senger,\n\nCall 1-457-680-2828 x090 for help with your order xopgerkxib. Otherwise it will be returned to the sender.\n" +PHISHING,"Edward Block,\n\nYour order number joqubrmvvg has been returned.\n\nCall (513) 464-7385 x346 to get help.\n" +SPAM,"Dear Retta Donnelly,\n\nBuy a Odell 90 Shilling Ale from Windler-Windler now!\n" +SPAM,"Bennie Walter,\n\nDon't miss out on buying Stracke, Stracke and Stracke's Revolution Anti-Hero IPA today!\n" +SPAM,"Peter Miller,\n\nDon't miss out on buying Hermiston PLC's Schlafly Pumpkin Ale today!\n" +SPAM,"Dear Mr. Tremayne Stark,\n\nBuy a Ballast Point Sculpin from Shields Inc now!\n" +PHISHING,"Hello Terrence O'Conner,\n\nCall 204.354.8255 for help with your order zbfguftyqp. Otherwise it will be returned to the sender.\n" +PHISHING,"Hello Oma Bergnaum III,\n\nCall 392.896.4135 x39029 for help with your order rutmdrozem. Otherwise it will be returned to the sender.\n" +SPAM,"Ms. Eleanora Bechtelar I,\n\nDon't miss out on buying Hoppe, Hoppe and Hoppe's Hill Farmstead Everett Porter today!\n" +PHISHING,"Dear Theodore Christiansen,\n\nYour transaction jikdggshvz has failed.\n\nCall 880.778.3256 for help.\n" +SPAM,"Ms. Alexandrea Schumm III,\n\nDon't miss out on buying Hand Inc's Smuttynose Finest Kind IPA today!\n" +SPAM,"Dear Amiya Quigley,\n\nBuy a New Glarus Brewing Serendipity from Smith-Smith now!\n" +SPAM,"Hello Kailey Towne Jr.,\n\nNow available!\n\nA 21st Amendment Bitter American from Rutherford-Rutherford\n" +SPAM,"Hello Aliyah Fisher,\n\nNow available!\n\nA Stone Enjoy By… IPA from Hane-Hane\n" +PHISHING,"Amanda Lakin,\n\nYour order number tofunjxtjv has been returned.\n\nCall 978-619-5434 x06171 to get help.\n" +PHISHING,"Dear Angelo Pfeffer,\n\nYour transaction jnwsxqgrxq has failed.\n\nCall 789-697-1286 x80594 for help.\n" diff --git a/internal/service/comprehend/test-fixtures/entity_recognizer/annotations.csv b/internal/service/comprehend/test-fixtures/entity_recognizer/annotations.csv index b60f915fc593..2a80dbd182ae 100644 --- a/internal/service/comprehend/test-fixtures/entity_recognizer/annotations.csv +++ b/internal/service/comprehend/test-fixtures/entity_recognizer/annotations.csv @@ -1,1001 +1,1001 @@ File,Line,Begin Offset,End Offset,Type -documents.txt,0,19,32,MANAGER -documents.txt,1,0,15,MANAGER -documents.txt,2,0,22,MANAGER -documents.txt,3,0,14,MANAGER -documents.txt,4,0,13,ENGINEER -documents.txt,5,0,15,ENGINEER -documents.txt,6,0,14,ENGINEER -documents.txt,7,25,41,ENGINEER -documents.txt,8,0,19,MANAGER -documents.txt,9,25,38,MANAGER -documents.txt,10,0,16,MANAGER -documents.txt,11,0,12,ENGINEER -documents.txt,12,0,10,MANAGER -documents.txt,13,36,54,MANAGER -documents.txt,14,20,33,ENGINEER -documents.txt,15,0,22,MANAGER -documents.txt,16,0,14,MANAGER -documents.txt,17,0,21,MANAGER -documents.txt,18,0,15,MANAGER -documents.txt,19,0,15,MANAGER +documents.txt,0,0,13,MANAGER +documents.txt,1,0,14,MANAGER +documents.txt,2,0,21,MANAGER +documents.txt,3,0,15,MANAGER +documents.txt,4,0,17,ENGINEER +documents.txt,5,0,22,ENGINEER +documents.txt,6,37,56,ENGINEER +documents.txt,7,0,13,ENGINEER +documents.txt,8,25,42,MANAGER +documents.txt,9,25,48,MANAGER +documents.txt,10,0,11,ENGINEER +documents.txt,11,25,39,ENGINEER +documents.txt,12,0,17,MANAGER +documents.txt,13,0,11,MANAGER +documents.txt,14,0,20,MANAGER +documents.txt,15,0,14,MANAGER +documents.txt,16,20,41,ENGINEER +documents.txt,17,0,12,ENGINEER +documents.txt,18,0,13,ENGINEER +documents.txt,19,0,14,MANAGER documents.txt,20,0,14,MANAGER -documents.txt,21,20,35,ENGINEER -documents.txt,22,19,36,MANAGER -documents.txt,23,37,55,ENGINEER -documents.txt,24,0,12,MANAGER -documents.txt,25,0,12,MANAGER -documents.txt,26,19,36,MANAGER -documents.txt,27,0,13,ENGINEER -documents.txt,28,0,15,ENGINEER -documents.txt,29,0,15,MANAGER -documents.txt,30,0,17,MANAGER -documents.txt,31,0,13,MANAGER -documents.txt,32,0,13,ENGINEER -documents.txt,33,25,39,MANAGER -documents.txt,34,25,41,MANAGER -documents.txt,35,0,11,MANAGER -documents.txt,36,25,45,ENGINEER -documents.txt,37,0,15,MANAGER -documents.txt,38,0,12,MANAGER -documents.txt,39,0,14,ENGINEER -documents.txt,40,0,15,MANAGER +documents.txt,21,25,41,MANAGER +documents.txt,22,0,12,ENGINEER +documents.txt,23,0,14,ENGINEER +documents.txt,24,0,23,MANAGER +documents.txt,25,25,42,ENGINEER +documents.txt,26,0,14,ENGINEER +documents.txt,27,0,18,ENGINEER +documents.txt,28,0,13,MANAGER +documents.txt,29,36,53,MANAGER +documents.txt,30,0,15,ENGINEER +documents.txt,31,0,15,MANAGER +documents.txt,32,0,15,ENGINEER +documents.txt,33,0,12,MANAGER +documents.txt,34,0,20,ENGINEER +documents.txt,35,0,21,MANAGER +documents.txt,36,0,15,ENGINEER +documents.txt,37,0,16,ENGINEER +documents.txt,38,0,16,ENGINEER +documents.txt,39,0,19,MANAGER +documents.txt,40,25,39,ENGINEER documents.txt,41,0,12,MANAGER -documents.txt,42,0,14,MANAGER -documents.txt,43,0,15,MANAGER -documents.txt,44,0,14,MANAGER -documents.txt,45,37,55,ENGINEER -documents.txt,46,0,14,ENGINEER -documents.txt,47,0,12,ENGINEER -documents.txt,48,20,33,ENGINEER -documents.txt,49,25,37,MANAGER -documents.txt,50,0,15,MANAGER -documents.txt,51,0,12,MANAGER -documents.txt,52,25,40,MANAGER -documents.txt,53,0,15,ENGINEER -documents.txt,54,37,51,ENGINEER -documents.txt,55,0,18,ENGINEER -documents.txt,56,25,38,MANAGER -documents.txt,57,0,18,ENGINEER -documents.txt,58,0,11,MANAGER -documents.txt,59,0,18,MANAGER -documents.txt,60,0,13,ENGINEER -documents.txt,61,19,35,MANAGER -documents.txt,62,0,14,ENGINEER -documents.txt,63,0,13,MANAGER -documents.txt,64,0,13,MANAGER -documents.txt,65,20,42,ENGINEER -documents.txt,66,25,41,MANAGER -documents.txt,67,0,22,MANAGER -documents.txt,68,25,41,ENGINEER -documents.txt,69,20,38,ENGINEER -documents.txt,70,0,11,ENGINEER -documents.txt,71,37,52,ENGINEER -documents.txt,72,0,20,ENGINEER -documents.txt,73,25,39,ENGINEER -documents.txt,74,0,11,MANAGER -documents.txt,75,0,13,ENGINEER -documents.txt,76,37,53,ENGINEER +documents.txt,42,0,15,MANAGER +documents.txt,43,19,43,MANAGER +documents.txt,44,0,13,MANAGER +documents.txt,45,0,15,ENGINEER +documents.txt,46,0,27,MANAGER +documents.txt,47,0,10,MANAGER +documents.txt,48,0,14,MANAGER +documents.txt,49,0,10,MANAGER +documents.txt,50,0,12,MANAGER +documents.txt,51,0,16,ENGINEER +documents.txt,52,0,10,ENGINEER +documents.txt,53,0,18,MANAGER +documents.txt,54,36,50,MANAGER +documents.txt,55,0,12,MANAGER +documents.txt,56,0,13,ENGINEER +documents.txt,57,0,11,ENGINEER +documents.txt,58,0,13,ENGINEER +documents.txt,59,19,34,MANAGER +documents.txt,60,0,13,MANAGER +documents.txt,61,19,28,MANAGER +documents.txt,62,0,12,MANAGER +documents.txt,63,0,14,ENGINEER +documents.txt,64,0,16,ENGINEER +documents.txt,65,0,17,MANAGER +documents.txt,66,0,19,MANAGER +documents.txt,67,36,52,MANAGER +documents.txt,68,0,11,ENGINEER +documents.txt,69,0,11,MANAGER +documents.txt,70,0,23,ENGINEER +documents.txt,71,0,22,MANAGER +documents.txt,72,36,47,MANAGER +documents.txt,73,37,58,ENGINEER +documents.txt,74,0,20,MANAGER +documents.txt,75,0,14,MANAGER +documents.txt,76,0,13,MANAGER documents.txt,77,0,17,MANAGER -documents.txt,78,0,9,MANAGER -documents.txt,79,25,43,ENGINEER -documents.txt,80,19,35,MANAGER -documents.txt,81,0,14,ENGINEER -documents.txt,82,20,30,ENGINEER +documents.txt,78,36,49,MANAGER +documents.txt,79,0,17,ENGINEER +documents.txt,80,25,38,ENGINEER +documents.txt,81,0,28,ENGINEER +documents.txt,82,0,11,ENGINEER documents.txt,83,0,17,ENGINEER -documents.txt,84,36,53,MANAGER -documents.txt,85,0,20,ENGINEER -documents.txt,86,0,11,MANAGER -documents.txt,87,0,16,ENGINEER -documents.txt,88,19,35,MANAGER -documents.txt,89,0,18,MANAGER -documents.txt,90,0,12,ENGINEER -documents.txt,91,20,31,ENGINEER -documents.txt,92,20,31,ENGINEER -documents.txt,93,0,18,ENGINEER -documents.txt,94,0,13,MANAGER -documents.txt,95,25,38,MANAGER -documents.txt,96,0,18,MANAGER -documents.txt,97,37,48,ENGINEER -documents.txt,98,0,17,MANAGER -documents.txt,99,0,18,ENGINEER -documents.txt,100,0,10,MANAGER -documents.txt,101,0,15,MANAGER -documents.txt,102,0,21,ENGINEER -documents.txt,103,0,14,MANAGER -documents.txt,104,0,15,MANAGER -documents.txt,105,20,35,ENGINEER -documents.txt,106,0,16,MANAGER -documents.txt,107,0,14,ENGINEER -documents.txt,108,0,17,MANAGER -documents.txt,109,0,13,MANAGER -documents.txt,110,0,23,MANAGER -documents.txt,111,0,17,MANAGER -documents.txt,112,0,24,MANAGER -documents.txt,113,37,58,ENGINEER -documents.txt,114,0,12,ENGINEER -documents.txt,115,0,11,ENGINEER -documents.txt,116,0,14,ENGINEER -documents.txt,117,36,48,MANAGER -documents.txt,118,0,13,ENGINEER -documents.txt,119,0,13,ENGINEER -documents.txt,120,20,33,ENGINEER -documents.txt,121,0,15,MANAGER -documents.txt,122,0,19,ENGINEER -documents.txt,123,0,13,MANAGER -documents.txt,124,0,17,MANAGER -documents.txt,125,0,12,ENGINEER -documents.txt,126,0,16,MANAGER -documents.txt,127,25,42,MANAGER -documents.txt,128,25,36,ENGINEER -documents.txt,129,0,11,ENGINEER -documents.txt,130,0,15,ENGINEER -documents.txt,131,19,33,MANAGER -documents.txt,132,0,20,MANAGER -documents.txt,133,0,18,ENGINEER -documents.txt,134,37,49,ENGINEER -documents.txt,135,37,49,ENGINEER -documents.txt,136,20,37,ENGINEER -documents.txt,137,0,17,ENGINEER -documents.txt,138,0,17,ENGINEER -documents.txt,139,0,13,ENGINEER -documents.txt,140,37,57,ENGINEER -documents.txt,141,0,9,ENGINEER -documents.txt,142,0,12,MANAGER -documents.txt,143,0,10,MANAGER -documents.txt,144,0,16,MANAGER -documents.txt,145,19,30,MANAGER -documents.txt,146,19,37,MANAGER -documents.txt,147,0,12,MANAGER -documents.txt,148,0,18,MANAGER -documents.txt,149,0,12,MANAGER -documents.txt,150,0,10,ENGINEER -documents.txt,151,0,11,ENGINEER -documents.txt,152,37,48,ENGINEER -documents.txt,153,25,36,MANAGER -documents.txt,154,0,11,ENGINEER -documents.txt,155,20,35,ENGINEER -documents.txt,156,0,13,ENGINEER -documents.txt,157,0,14,MANAGER -documents.txt,158,0,18,MANAGER -documents.txt,159,0,18,MANAGER -documents.txt,160,20,34,ENGINEER -documents.txt,161,25,42,MANAGER -documents.txt,162,36,52,MANAGER -documents.txt,163,0,13,ENGINEER -documents.txt,164,0,15,MANAGER -documents.txt,165,0,12,ENGINEER -documents.txt,166,0,11,MANAGER -documents.txt,167,0,17,MANAGER -documents.txt,168,0,13,MANAGER -documents.txt,169,0,14,MANAGER -documents.txt,170,0,15,MANAGER -documents.txt,171,25,39,ENGINEER -documents.txt,172,0,17,MANAGER -documents.txt,173,0,13,MANAGER +documents.txt,84,25,44,ENGINEER +documents.txt,85,37,52,ENGINEER +documents.txt,86,0,13,MANAGER +documents.txt,87,36,49,MANAGER +documents.txt,88,36,47,MANAGER +documents.txt,89,0,15,MANAGER +documents.txt,90,0,13,ENGINEER +documents.txt,91,25,45,ENGINEER +documents.txt,92,0,14,MANAGER +documents.txt,93,0,15,ENGINEER +documents.txt,94,0,10,MANAGER +documents.txt,95,0,20,MANAGER +documents.txt,96,25,36,MANAGER +documents.txt,97,0,15,MANAGER +documents.txt,98,37,53,ENGINEER +documents.txt,99,0,17,ENGINEER +documents.txt,100,0,16,ENGINEER +documents.txt,101,25,45,ENGINEER +documents.txt,102,25,47,MANAGER +documents.txt,103,0,16,MANAGER +documents.txt,104,0,19,ENGINEER +documents.txt,105,0,20,ENGINEER +documents.txt,106,0,14,ENGINEER +documents.txt,107,0,14,MANAGER +documents.txt,108,0,12,MANAGER +documents.txt,109,0,12,ENGINEER +documents.txt,110,0,12,MANAGER +documents.txt,111,36,52,MANAGER +documents.txt,112,0,20,MANAGER +documents.txt,113,36,50,MANAGER +documents.txt,114,0,11,MANAGER +documents.txt,115,0,14,MANAGER +documents.txt,116,0,17,ENGINEER +documents.txt,117,0,18,ENGINEER +documents.txt,118,0,12,MANAGER +documents.txt,119,0,18,MANAGER +documents.txt,120,0,17,ENGINEER +documents.txt,121,0,18,ENGINEER +documents.txt,122,0,14,MANAGER +documents.txt,123,0,19,ENGINEER +documents.txt,124,0,16,ENGINEER +documents.txt,125,0,21,MANAGER +documents.txt,126,19,36,MANAGER +documents.txt,127,0,15,MANAGER +documents.txt,128,0,15,MANAGER +documents.txt,129,0,14,ENGINEER +documents.txt,130,0,19,MANAGER +documents.txt,131,0,13,ENGINEER +documents.txt,132,0,11,MANAGER +documents.txt,133,0,18,MANAGER +documents.txt,134,0,13,MANAGER +documents.txt,135,25,39,ENGINEER +documents.txt,136,0,14,MANAGER +documents.txt,137,0,12,MANAGER +documents.txt,138,0,13,MANAGER +documents.txt,139,37,50,ENGINEER +documents.txt,140,0,15,MANAGER +documents.txt,141,0,13,ENGINEER +documents.txt,142,0,11,ENGINEER +documents.txt,143,37,58,ENGINEER +documents.txt,144,0,19,ENGINEER +documents.txt,145,0,14,MANAGER +documents.txt,146,0,16,ENGINEER +documents.txt,147,0,20,MANAGER +documents.txt,148,0,24,MANAGER +documents.txt,149,0,14,ENGINEER +documents.txt,150,0,15,ENGINEER +documents.txt,151,0,13,ENGINEER +documents.txt,152,20,33,ENGINEER +documents.txt,153,0,15,MANAGER +documents.txt,154,0,20,ENGINEER +documents.txt,155,37,52,ENGINEER +documents.txt,156,0,14,MANAGER +documents.txt,157,0,14,ENGINEER +documents.txt,158,0,13,ENGINEER +documents.txt,159,0,11,MANAGER +documents.txt,160,0,17,ENGINEER +documents.txt,161,25,42,ENGINEER +documents.txt,162,25,35,ENGINEER +documents.txt,163,0,13,MANAGER +documents.txt,164,0,16,ENGINEER +documents.txt,165,0,13,ENGINEER +documents.txt,166,0,12,ENGINEER +documents.txt,167,19,36,MANAGER +documents.txt,168,0,12,MANAGER +documents.txt,169,0,19,MANAGER +documents.txt,170,0,9,ENGINEER +documents.txt,171,37,54,ENGINEER +documents.txt,172,36,49,MANAGER +documents.txt,173,0,23,ENGINEER documents.txt,174,0,14,ENGINEER -documents.txt,175,0,9,MANAGER -documents.txt,176,0,16,MANAGER -documents.txt,177,20,40,ENGINEER -documents.txt,178,0,14,ENGINEER -documents.txt,179,0,15,MANAGER -documents.txt,180,0,14,ENGINEER -documents.txt,181,0,13,MANAGER -documents.txt,182,20,36,ENGINEER -documents.txt,183,0,22,MANAGER -documents.txt,184,0,18,ENGINEER -documents.txt,185,0,17,MANAGER -documents.txt,186,0,13,MANAGER -documents.txt,187,0,15,MANAGER -documents.txt,188,36,47,MANAGER -documents.txt,189,0,20,ENGINEER -documents.txt,190,0,14,MANAGER -documents.txt,191,0,15,ENGINEER -documents.txt,192,0,18,MANAGER -documents.txt,193,0,10,MANAGER -documents.txt,194,0,18,ENGINEER -documents.txt,195,0,12,ENGINEER -documents.txt,196,25,37,MANAGER -documents.txt,197,0,10,ENGINEER -documents.txt,198,0,9,MANAGER -documents.txt,199,0,12,ENGINEER -documents.txt,200,0,16,ENGINEER -documents.txt,201,0,14,ENGINEER -documents.txt,202,0,16,MANAGER -documents.txt,203,37,53,ENGINEER -documents.txt,204,0,17,MANAGER -documents.txt,205,0,14,ENGINEER -documents.txt,206,25,45,MANAGER -documents.txt,207,0,15,MANAGER -documents.txt,208,25,42,MANAGER -documents.txt,209,19,36,MANAGER -documents.txt,210,0,15,MANAGER -documents.txt,211,0,11,MANAGER -documents.txt,212,0,12,MANAGER -documents.txt,213,36,50,MANAGER -documents.txt,214,20,43,ENGINEER -documents.txt,215,0,17,MANAGER -documents.txt,216,0,13,MANAGER -documents.txt,217,0,14,MANAGER -documents.txt,218,0,12,ENGINEER -documents.txt,219,0,13,ENGINEER -documents.txt,220,20,34,ENGINEER -documents.txt,221,0,16,ENGINEER -documents.txt,222,25,40,MANAGER -documents.txt,223,25,42,ENGINEER -documents.txt,224,25,38,MANAGER -documents.txt,225,0,14,MANAGER -documents.txt,226,0,11,MANAGER -documents.txt,227,0,12,MANAGER -documents.txt,228,0,22,ENGINEER -documents.txt,229,0,12,ENGINEER -documents.txt,230,0,15,ENGINEER -documents.txt,231,25,42,MANAGER -documents.txt,232,0,12,ENGINEER -documents.txt,233,0,11,MANAGER -documents.txt,234,36,48,MANAGER -documents.txt,235,0,14,ENGINEER -documents.txt,236,36,49,MANAGER -documents.txt,237,36,55,MANAGER -documents.txt,238,20,38,ENGINEER -documents.txt,239,0,10,ENGINEER -documents.txt,240,0,10,MANAGER -documents.txt,241,0,15,MANAGER -documents.txt,242,0,16,ENGINEER +documents.txt,175,0,17,ENGINEER +documents.txt,176,0,22,MANAGER +documents.txt,177,0,15,ENGINEER +documents.txt,178,0,10,ENGINEER +documents.txt,179,0,17,MANAGER +documents.txt,180,0,15,ENGINEER +documents.txt,181,0,15,MANAGER +documents.txt,182,0,18,MANAGER +documents.txt,183,0,18,MANAGER +documents.txt,184,0,17,ENGINEER +documents.txt,185,0,14,MANAGER +documents.txt,186,37,52,ENGINEER +documents.txt,187,0,20,ENGINEER +documents.txt,188,0,24,ENGINEER +documents.txt,189,0,17,ENGINEER +documents.txt,190,0,26,ENGINEER +documents.txt,191,19,29,MANAGER +documents.txt,192,0,16,ENGINEER +documents.txt,193,25,40,ENGINEER +documents.txt,194,0,14,MANAGER +documents.txt,195,0,15,ENGINEER +documents.txt,196,36,52,MANAGER +documents.txt,197,0,23,ENGINEER +documents.txt,198,0,11,MANAGER +documents.txt,199,0,15,ENGINEER +documents.txt,200,0,12,MANAGER +documents.txt,201,0,17,ENGINEER +documents.txt,202,37,51,ENGINEER +documents.txt,203,25,42,ENGINEER +documents.txt,204,0,12,ENGINEER +documents.txt,205,0,15,MANAGER +documents.txt,206,20,37,ENGINEER +documents.txt,207,19,35,MANAGER +documents.txt,208,25,45,MANAGER +documents.txt,209,36,48,MANAGER +documents.txt,210,0,14,MANAGER +documents.txt,211,25,39,MANAGER +documents.txt,212,0,11,MANAGER +documents.txt,213,0,15,MANAGER +documents.txt,214,0,14,ENGINEER +documents.txt,215,0,11,ENGINEER +documents.txt,216,0,24,MANAGER +documents.txt,217,0,15,MANAGER +documents.txt,218,0,21,MANAGER +documents.txt,219,0,20,MANAGER +documents.txt,220,0,14,MANAGER +documents.txt,221,0,11,MANAGER +documents.txt,222,20,34,ENGINEER +documents.txt,223,0,26,MANAGER +documents.txt,224,0,17,MANAGER +documents.txt,225,0,17,ENGINEER +documents.txt,226,0,15,MANAGER +documents.txt,227,25,46,MANAGER +documents.txt,228,37,50,ENGINEER +documents.txt,229,25,47,ENGINEER +documents.txt,230,20,45,ENGINEER +documents.txt,231,19,32,MANAGER +documents.txt,232,0,20,MANAGER +documents.txt,233,37,50,ENGINEER +documents.txt,234,0,18,MANAGER +documents.txt,235,0,14,MANAGER +documents.txt,236,0,16,ENGINEER +documents.txt,237,0,12,MANAGER +documents.txt,238,0,11,ENGINEER +documents.txt,239,0,10,MANAGER +documents.txt,240,0,17,MANAGER +documents.txt,241,25,40,ENGINEER +documents.txt,242,0,10,ENGINEER documents.txt,243,0,15,ENGINEER -documents.txt,244,0,12,ENGINEER +documents.txt,244,20,35,ENGINEER documents.txt,245,0,11,ENGINEER -documents.txt,246,0,15,MANAGER -documents.txt,247,0,13,MANAGER -documents.txt,248,0,12,ENGINEER -documents.txt,249,0,14,ENGINEER -documents.txt,250,25,37,MANAGER -documents.txt,251,25,36,MANAGER -documents.txt,252,0,12,MANAGER -documents.txt,253,25,40,MANAGER -documents.txt,254,25,39,MANAGER -documents.txt,255,0,17,MANAGER -documents.txt,256,0,18,MANAGER -documents.txt,257,36,50,MANAGER -documents.txt,258,37,52,ENGINEER -documents.txt,259,0,20,ENGINEER -documents.txt,260,0,15,ENGINEER -documents.txt,261,36,48,MANAGER -documents.txt,262,0,13,ENGINEER -documents.txt,263,0,15,MANAGER -documents.txt,264,0,16,MANAGER -documents.txt,265,0,16,ENGINEER -documents.txt,266,0,17,MANAGER -documents.txt,267,36,54,MANAGER -documents.txt,268,0,15,MANAGER -documents.txt,269,0,14,ENGINEER -documents.txt,270,0,18,MANAGER -documents.txt,271,0,11,MANAGER -documents.txt,272,0,21,ENGINEER -documents.txt,273,0,21,ENGINEER -documents.txt,274,0,14,ENGINEER -documents.txt,275,0,14,MANAGER -documents.txt,276,0,18,ENGINEER -documents.txt,277,0,16,ENGINEER -documents.txt,278,0,13,MANAGER -documents.txt,279,0,15,ENGINEER -documents.txt,280,0,13,MANAGER -documents.txt,281,0,18,MANAGER -documents.txt,282,0,8,MANAGER -documents.txt,283,0,12,ENGINEER -documents.txt,284,0,15,MANAGER -documents.txt,285,36,55,MANAGER -documents.txt,286,0,16,ENGINEER -documents.txt,287,25,40,MANAGER -documents.txt,288,19,33,MANAGER -documents.txt,289,0,14,ENGINEER -documents.txt,290,0,13,ENGINEER -documents.txt,291,19,33,MANAGER -documents.txt,292,0,13,MANAGER -documents.txt,293,25,37,ENGINEER -documents.txt,294,0,16,ENGINEER -documents.txt,295,0,25,ENGINEER -documents.txt,296,0,17,ENGINEER -documents.txt,297,37,52,ENGINEER -documents.txt,298,37,53,ENGINEER -documents.txt,299,0,13,MANAGER -documents.txt,300,20,39,ENGINEER -documents.txt,301,0,15,MANAGER -documents.txt,302,0,15,MANAGER -documents.txt,303,0,12,MANAGER -documents.txt,304,0,22,MANAGER -documents.txt,305,25,42,MANAGER -documents.txt,306,0,13,ENGINEER -documents.txt,307,25,43,MANAGER -documents.txt,308,37,55,ENGINEER -documents.txt,309,0,19,MANAGER -documents.txt,310,36,53,MANAGER -documents.txt,311,0,13,ENGINEER -documents.txt,312,0,12,ENGINEER -documents.txt,313,0,12,MANAGER -documents.txt,314,36,49,MANAGER -documents.txt,315,0,18,ENGINEER -documents.txt,316,0,14,ENGINEER -documents.txt,317,0,13,MANAGER -documents.txt,318,25,37,ENGINEER -documents.txt,319,37,49,ENGINEER -documents.txt,320,0,21,ENGINEER -documents.txt,321,0,11,MANAGER -documents.txt,322,0,14,ENGINEER -documents.txt,323,25,44,MANAGER -documents.txt,324,0,13,MANAGER -documents.txt,325,25,35,ENGINEER -documents.txt,326,25,41,MANAGER -documents.txt,327,0,12,ENGINEER -documents.txt,328,0,11,MANAGER -documents.txt,329,0,22,MANAGER -documents.txt,330,0,19,MANAGER -documents.txt,331,0,13,ENGINEER -documents.txt,332,0,15,MANAGER -documents.txt,333,0,13,MANAGER -documents.txt,334,0,13,ENGINEER -documents.txt,335,25,46,MANAGER -documents.txt,336,19,39,MANAGER -documents.txt,337,0,16,ENGINEER -documents.txt,338,0,12,ENGINEER -documents.txt,339,0,11,ENGINEER -documents.txt,340,20,34,ENGINEER -documents.txt,341,0,13,MANAGER -documents.txt,342,0,11,MANAGER -documents.txt,343,0,12,MANAGER -documents.txt,344,0,16,MANAGER -documents.txt,345,0,13,MANAGER -documents.txt,346,0,15,MANAGER -documents.txt,347,0,17,MANAGER -documents.txt,348,37,52,ENGINEER -documents.txt,349,0,12,MANAGER -documents.txt,350,36,50,MANAGER -documents.txt,351,0,12,ENGINEER -documents.txt,352,0,13,ENGINEER -documents.txt,353,0,11,MANAGER -documents.txt,354,36,49,MANAGER -documents.txt,355,0,13,ENGINEER -documents.txt,356,0,18,ENGINEER -documents.txt,357,37,55,ENGINEER -documents.txt,358,20,31,ENGINEER -documents.txt,359,0,20,ENGINEER -documents.txt,360,0,15,ENGINEER -documents.txt,361,0,14,MANAGER -documents.txt,362,0,13,MANAGER -documents.txt,363,19,34,MANAGER -documents.txt,364,0,12,MANAGER -documents.txt,365,0,13,MANAGER -documents.txt,366,0,14,MANAGER -documents.txt,367,0,17,ENGINEER -documents.txt,368,0,13,MANAGER -documents.txt,369,0,17,ENGINEER -documents.txt,370,37,50,ENGINEER -documents.txt,371,0,19,MANAGER -documents.txt,372,25,42,ENGINEER -documents.txt,373,0,14,MANAGER -documents.txt,374,25,38,ENGINEER +documents.txt,246,0,10,MANAGER +documents.txt,247,37,56,ENGINEER +documents.txt,248,0,15,MANAGER +documents.txt,249,19,37,MANAGER +documents.txt,250,0,17,ENGINEER +documents.txt,251,0,11,ENGINEER +documents.txt,252,0,22,MANAGER +documents.txt,253,0,23,ENGINEER +documents.txt,254,0,14,MANAGER +documents.txt,255,0,10,MANAGER +documents.txt,256,0,15,MANAGER +documents.txt,257,0,10,ENGINEER +documents.txt,258,0,13,MANAGER +documents.txt,259,0,16,ENGINEER +documents.txt,260,0,16,MANAGER +documents.txt,261,0,12,MANAGER +documents.txt,262,0,15,MANAGER +documents.txt,263,19,35,MANAGER +documents.txt,264,0,19,ENGINEER +documents.txt,265,0,14,MANAGER +documents.txt,266,0,14,ENGINEER +documents.txt,267,0,24,ENGINEER +documents.txt,268,20,38,ENGINEER +documents.txt,269,0,12,MANAGER +documents.txt,270,25,40,ENGINEER +documents.txt,271,19,42,MANAGER +documents.txt,272,20,35,ENGINEER +documents.txt,273,0,23,MANAGER +documents.txt,274,0,12,ENGINEER +documents.txt,275,0,20,MANAGER +documents.txt,276,0,16,MANAGER +documents.txt,277,0,27,ENGINEER +documents.txt,278,0,12,MANAGER +documents.txt,279,0,23,MANAGER +documents.txt,280,0,18,ENGINEER +documents.txt,281,0,19,ENGINEER +documents.txt,282,0,18,ENGINEER +documents.txt,283,0,16,MANAGER +documents.txt,284,0,14,MANAGER +documents.txt,285,0,18,MANAGER +documents.txt,286,0,18,MANAGER +documents.txt,287,0,25,MANAGER +documents.txt,288,0,13,MANAGER +documents.txt,289,0,18,ENGINEER +documents.txt,290,0,15,MANAGER +documents.txt,291,0,10,MANAGER +documents.txt,292,25,38,ENGINEER +documents.txt,293,19,35,MANAGER +documents.txt,294,25,39,MANAGER +documents.txt,295,25,38,MANAGER +documents.txt,296,0,14,ENGINEER +documents.txt,297,0,13,ENGINEER +documents.txt,298,0,19,MANAGER +documents.txt,299,20,32,ENGINEER +documents.txt,300,0,16,MANAGER +documents.txt,301,0,16,MANAGER +documents.txt,302,37,50,ENGINEER +documents.txt,303,0,19,MANAGER +documents.txt,304,0,20,MANAGER +documents.txt,305,0,15,ENGINEER +documents.txt,306,25,48,MANAGER +documents.txt,307,0,21,ENGINEER +documents.txt,308,0,14,ENGINEER +documents.txt,309,0,12,MANAGER +documents.txt,310,0,18,MANAGER +documents.txt,311,0,23,ENGINEER +documents.txt,312,0,17,MANAGER +documents.txt,313,0,25,ENGINEER +documents.txt,314,0,11,MANAGER +documents.txt,315,0,16,ENGINEER +documents.txt,316,25,38,ENGINEER +documents.txt,317,36,50,MANAGER +documents.txt,318,37,52,ENGINEER +documents.txt,319,25,39,MANAGER +documents.txt,320,25,47,MANAGER +documents.txt,321,25,40,MANAGER +documents.txt,322,0,17,ENGINEER +documents.txt,323,0,12,ENGINEER +documents.txt,324,0,10,MANAGER +documents.txt,325,25,40,ENGINEER +documents.txt,326,20,31,ENGINEER +documents.txt,327,0,13,ENGINEER +documents.txt,328,25,46,ENGINEER +documents.txt,329,0,16,ENGINEER +documents.txt,330,0,10,MANAGER +documents.txt,331,37,55,ENGINEER +documents.txt,332,19,36,MANAGER +documents.txt,333,0,13,ENGINEER +documents.txt,334,0,16,MANAGER +documents.txt,335,25,43,MANAGER +documents.txt,336,0,15,MANAGER +documents.txt,337,36,48,MANAGER +documents.txt,338,36,50,MANAGER +documents.txt,339,0,13,ENGINEER +documents.txt,340,0,17,ENGINEER +documents.txt,341,36,50,MANAGER +documents.txt,342,0,17,ENGINEER +documents.txt,343,36,50,MANAGER +documents.txt,344,0,14,MANAGER +documents.txt,345,25,45,ENGINEER +documents.txt,346,0,16,ENGINEER +documents.txt,347,37,57,ENGINEER +documents.txt,348,0,14,ENGINEER +documents.txt,349,0,14,MANAGER +documents.txt,350,36,52,MANAGER +documents.txt,351,0,15,ENGINEER +documents.txt,352,0,13,MANAGER +documents.txt,353,0,15,ENGINEER +documents.txt,354,0,18,ENGINEER +documents.txt,355,0,10,MANAGER +documents.txt,356,0,11,MANAGER +documents.txt,357,0,16,MANAGER +documents.txt,358,0,17,ENGINEER +documents.txt,359,0,21,MANAGER +documents.txt,360,0,14,MANAGER +documents.txt,361,37,48,ENGINEER +documents.txt,362,0,24,ENGINEER +documents.txt,363,0,21,MANAGER +documents.txt,364,0,11,MANAGER +documents.txt,365,20,36,ENGINEER +documents.txt,366,0,17,ENGINEER +documents.txt,367,0,10,ENGINEER +documents.txt,368,25,41,ENGINEER +documents.txt,369,0,13,MANAGER +documents.txt,370,25,36,ENGINEER +documents.txt,371,20,39,ENGINEER +documents.txt,372,37,51,ENGINEER +documents.txt,373,0,11,ENGINEER +documents.txt,374,25,38,MANAGER documents.txt,375,0,13,ENGINEER -documents.txt,376,0,11,MANAGER -documents.txt,377,0,12,ENGINEER -documents.txt,378,25,40,MANAGER -documents.txt,379,0,15,ENGINEER -documents.txt,380,0,11,ENGINEER -documents.txt,381,0,20,MANAGER -documents.txt,382,0,19,ENGINEER -documents.txt,383,0,24,ENGINEER -documents.txt,384,0,12,ENGINEER -documents.txt,385,0,16,MANAGER +documents.txt,376,0,13,ENGINEER +documents.txt,377,0,11,MANAGER +documents.txt,378,0,17,MANAGER +documents.txt,379,0,17,MANAGER +documents.txt,380,0,15,MANAGER +documents.txt,381,19,31,MANAGER +documents.txt,382,25,45,MANAGER +documents.txt,383,0,12,MANAGER +documents.txt,384,0,15,ENGINEER +documents.txt,385,0,19,ENGINEER documents.txt,386,0,14,MANAGER -documents.txt,387,0,9,MANAGER -documents.txt,388,0,12,MANAGER -documents.txt,389,36,50,MANAGER -documents.txt,390,20,32,ENGINEER -documents.txt,391,0,15,ENGINEER -documents.txt,392,0,17,MANAGER -documents.txt,393,19,34,MANAGER -documents.txt,394,0,17,ENGINEER -documents.txt,395,0,16,ENGINEER -documents.txt,396,0,16,MANAGER -documents.txt,397,0,20,ENGINEER -documents.txt,398,0,17,MANAGER -documents.txt,399,0,16,MANAGER -documents.txt,400,0,12,ENGINEER -documents.txt,401,0,10,MANAGER -documents.txt,402,0,14,MANAGER -documents.txt,403,0,11,ENGINEER -documents.txt,404,25,35,ENGINEER -documents.txt,405,0,13,ENGINEER +documents.txt,387,0,17,ENGINEER +documents.txt,388,19,32,MANAGER +documents.txt,389,0,15,MANAGER +documents.txt,390,0,19,ENGINEER +documents.txt,391,0,13,MANAGER +documents.txt,392,0,18,MANAGER +documents.txt,393,19,35,MANAGER +documents.txt,394,0,18,MANAGER +documents.txt,395,0,14,ENGINEER +documents.txt,396,20,41,ENGINEER +documents.txt,397,0,19,ENGINEER +documents.txt,398,0,16,ENGINEER +documents.txt,399,0,20,MANAGER +documents.txt,400,25,44,MANAGER +documents.txt,401,36,50,MANAGER +documents.txt,402,0,20,ENGINEER +documents.txt,403,0,16,ENGINEER +documents.txt,404,0,12,MANAGER +documents.txt,405,25,41,ENGINEER documents.txt,406,0,13,MANAGER -documents.txt,407,36,47,MANAGER -documents.txt,408,20,31,ENGINEER -documents.txt,409,37,47,ENGINEER -documents.txt,410,0,13,ENGINEER -documents.txt,411,20,34,ENGINEER -documents.txt,412,0,19,MANAGER -documents.txt,413,0,12,ENGINEER -documents.txt,414,0,14,ENGINEER +documents.txt,407,0,19,ENGINEER +documents.txt,408,25,40,ENGINEER +documents.txt,409,0,21,MANAGER +documents.txt,410,0,14,ENGINEER +documents.txt,411,0,11,MANAGER +documents.txt,412,0,17,MANAGER +documents.txt,413,0,13,MANAGER +documents.txt,414,0,14,MANAGER documents.txt,415,0,14,MANAGER -documents.txt,416,20,31,ENGINEER -documents.txt,417,0,13,MANAGER -documents.txt,418,0,17,ENGINEER -documents.txt,419,0,12,ENGINEER -documents.txt,420,0,13,ENGINEER -documents.txt,421,0,15,MANAGER -documents.txt,422,19,37,MANAGER -documents.txt,423,37,50,ENGINEER +documents.txt,416,0,20,MANAGER +documents.txt,417,0,12,MANAGER +documents.txt,418,0,20,MANAGER +documents.txt,419,0,23,ENGINEER +documents.txt,420,37,48,ENGINEER +documents.txt,421,37,51,ENGINEER +documents.txt,422,0,10,MANAGER +documents.txt,423,37,59,ENGINEER documents.txt,424,0,17,MANAGER -documents.txt,425,0,15,ENGINEER -documents.txt,426,0,9,ENGINEER -documents.txt,427,0,11,MANAGER -documents.txt,428,0,13,MANAGER -documents.txt,429,0,13,MANAGER -documents.txt,430,0,17,ENGINEER -documents.txt,431,20,39,ENGINEER -documents.txt,432,19,32,MANAGER -documents.txt,433,0,19,MANAGER -documents.txt,434,25,40,ENGINEER -documents.txt,435,0,14,MANAGER -documents.txt,436,0,16,ENGINEER -documents.txt,437,0,24,ENGINEER -documents.txt,438,19,38,MANAGER -documents.txt,439,0,17,MANAGER -documents.txt,440,36,52,MANAGER -documents.txt,441,0,16,ENGINEER -documents.txt,442,0,20,MANAGER -documents.txt,443,0,11,MANAGER -documents.txt,444,0,12,MANAGER -documents.txt,445,0,16,MANAGER -documents.txt,446,0,13,MANAGER +documents.txt,425,20,31,ENGINEER +documents.txt,426,0,16,MANAGER +documents.txt,427,37,53,ENGINEER +documents.txt,428,0,15,MANAGER +documents.txt,429,0,15,ENGINEER +documents.txt,430,0,16,ENGINEER +documents.txt,431,0,19,ENGINEER +documents.txt,432,0,10,ENGINEER +documents.txt,433,0,18,MANAGER +documents.txt,434,25,38,ENGINEER +documents.txt,435,20,36,ENGINEER +documents.txt,436,0,22,MANAGER +documents.txt,437,0,17,MANAGER +documents.txt,438,0,10,MANAGER +documents.txt,439,25,37,MANAGER +documents.txt,440,0,17,MANAGER +documents.txt,441,0,14,ENGINEER +documents.txt,442,0,19,MANAGER +documents.txt,443,0,18,ENGINEER +documents.txt,444,0,19,ENGINEER +documents.txt,445,0,16,ENGINEER +documents.txt,446,25,42,ENGINEER documents.txt,447,0,15,MANAGER -documents.txt,448,0,14,ENGINEER -documents.txt,449,0,17,ENGINEER -documents.txt,450,0,13,MANAGER -documents.txt,451,0,14,ENGINEER -documents.txt,452,0,12,MANAGER -documents.txt,453,0,9,MANAGER -documents.txt,454,0,18,MANAGER -documents.txt,455,25,42,ENGINEER -documents.txt,456,0,20,MANAGER -documents.txt,457,0,15,ENGINEER -documents.txt,458,37,47,ENGINEER -documents.txt,459,0,23,ENGINEER -documents.txt,460,0,13,MANAGER -documents.txt,461,19,35,MANAGER -documents.txt,462,0,10,ENGINEER -documents.txt,463,0,15,MANAGER -documents.txt,464,0,12,MANAGER -documents.txt,465,0,11,MANAGER -documents.txt,466,25,38,ENGINEER -documents.txt,467,25,41,MANAGER -documents.txt,468,25,44,MANAGER -documents.txt,469,25,41,MANAGER -documents.txt,470,0,18,MANAGER -documents.txt,471,0,10,ENGINEER -documents.txt,472,0,15,ENGINEER -documents.txt,473,0,17,MANAGER -documents.txt,474,19,40,MANAGER -documents.txt,475,0,15,ENGINEER -documents.txt,476,19,35,MANAGER -documents.txt,477,0,14,MANAGER -documents.txt,478,0,18,MANAGER -documents.txt,479,0,15,ENGINEER -documents.txt,480,25,47,MANAGER -documents.txt,481,20,32,ENGINEER -documents.txt,482,0,13,ENGINEER -documents.txt,483,0,24,ENGINEER -documents.txt,484,0,16,ENGINEER -documents.txt,485,0,13,MANAGER -documents.txt,486,0,20,MANAGER -documents.txt,487,0,15,MANAGER +documents.txt,448,0,16,ENGINEER +documents.txt,449,0,19,ENGINEER +documents.txt,450,0,15,ENGINEER +documents.txt,451,0,13,ENGINEER +documents.txt,452,20,31,ENGINEER +documents.txt,453,36,54,MANAGER +documents.txt,454,19,40,MANAGER +documents.txt,455,0,12,MANAGER +documents.txt,456,36,48,MANAGER +documents.txt,457,0,17,ENGINEER +documents.txt,458,0,12,ENGINEER +documents.txt,459,25,38,ENGINEER +documents.txt,460,0,13,ENGINEER +documents.txt,461,0,18,MANAGER +documents.txt,462,0,10,MANAGER +documents.txt,463,0,14,MANAGER +documents.txt,464,0,21,MANAGER +documents.txt,465,0,13,MANAGER +documents.txt,466,0,15,MANAGER +documents.txt,467,0,11,ENGINEER +documents.txt,468,0,21,MANAGER +documents.txt,469,0,10,ENGINEER +documents.txt,470,0,21,MANAGER +documents.txt,471,25,38,MANAGER +documents.txt,472,0,11,ENGINEER +documents.txt,473,0,16,MANAGER +documents.txt,474,0,13,MANAGER +documents.txt,475,0,22,MANAGER +documents.txt,476,0,11,ENGINEER +documents.txt,477,0,11,MANAGER +documents.txt,478,0,17,MANAGER +documents.txt,479,0,12,ENGINEER +documents.txt,480,0,20,ENGINEER +documents.txt,481,0,15,MANAGER +documents.txt,482,0,11,ENGINEER +documents.txt,483,0,17,ENGINEER +documents.txt,484,0,14,MANAGER +documents.txt,485,0,12,MANAGER +documents.txt,486,0,15,MANAGER +documents.txt,487,0,12,ENGINEER documents.txt,488,0,15,ENGINEER -documents.txt,489,0,11,ENGINEER -documents.txt,490,0,10,MANAGER -documents.txt,491,25,38,ENGINEER -documents.txt,492,0,12,ENGINEER -documents.txt,493,0,15,MANAGER -documents.txt,494,0,12,MANAGER -documents.txt,495,0,11,ENGINEER -documents.txt,496,0,15,MANAGER -documents.txt,497,0,15,ENGINEER -documents.txt,498,25,41,ENGINEER -documents.txt,499,25,38,ENGINEER -documents.txt,500,0,14,MANAGER -documents.txt,501,25,37,MANAGER -documents.txt,502,0,14,MANAGER -documents.txt,503,25,40,ENGINEER -documents.txt,504,0,10,ENGINEER -documents.txt,505,0,17,MANAGER -documents.txt,506,0,16,MANAGER -documents.txt,507,0,10,ENGINEER -documents.txt,508,0,12,MANAGER -documents.txt,509,0,13,ENGINEER -documents.txt,510,0,12,ENGINEER -documents.txt,511,25,38,ENGINEER -documents.txt,512,0,10,MANAGER +documents.txt,489,0,11,MANAGER +documents.txt,490,0,14,ENGINEER +documents.txt,491,0,12,ENGINEER +documents.txt,492,0,19,MANAGER +documents.txt,493,0,13,ENGINEER +documents.txt,494,0,15,MANAGER +documents.txt,495,0,10,MANAGER +documents.txt,496,0,13,MANAGER +documents.txt,497,37,49,ENGINEER +documents.txt,498,0,19,ENGINEER +documents.txt,499,25,35,MANAGER +documents.txt,500,0,15,ENGINEER +documents.txt,501,25,43,ENGINEER +documents.txt,502,0,18,ENGINEER +documents.txt,503,37,53,ENGINEER +documents.txt,504,0,21,ENGINEER +documents.txt,505,0,20,MANAGER +documents.txt,506,0,15,ENGINEER +documents.txt,507,19,38,MANAGER +documents.txt,508,0,14,ENGINEER +documents.txt,509,0,22,ENGINEER +documents.txt,510,0,13,ENGINEER +documents.txt,511,0,16,MANAGER +documents.txt,512,0,15,MANAGER documents.txt,513,0,13,MANAGER -documents.txt,514,0,14,ENGINEER -documents.txt,515,0,15,MANAGER -documents.txt,516,0,17,MANAGER -documents.txt,517,20,32,ENGINEER +documents.txt,514,0,16,ENGINEER +documents.txt,515,0,17,ENGINEER +documents.txt,516,0,19,ENGINEER +documents.txt,517,36,59,MANAGER documents.txt,518,0,15,ENGINEER -documents.txt,519,0,14,ENGINEER -documents.txt,520,19,39,MANAGER -documents.txt,521,0,15,MANAGER -documents.txt,522,0,17,ENGINEER -documents.txt,523,0,15,ENGINEER -documents.txt,524,0,17,MANAGER -documents.txt,525,0,12,MANAGER -documents.txt,526,0,11,MANAGER -documents.txt,527,20,32,ENGINEER -documents.txt,528,0,16,ENGINEER -documents.txt,529,0,18,ENGINEER -documents.txt,530,20,35,ENGINEER +documents.txt,519,0,14,MANAGER +documents.txt,520,0,12,MANAGER +documents.txt,521,0,11,ENGINEER +documents.txt,522,0,20,ENGINEER +documents.txt,523,19,30,MANAGER +documents.txt,524,37,56,ENGINEER +documents.txt,525,36,52,MANAGER +documents.txt,526,0,15,MANAGER +documents.txt,527,0,15,ENGINEER +documents.txt,528,0,14,MANAGER +documents.txt,529,0,11,ENGINEER +documents.txt,530,0,17,MANAGER documents.txt,531,0,14,ENGINEER -documents.txt,532,0,18,MANAGER -documents.txt,533,0,16,ENGINEER -documents.txt,534,0,8,ENGINEER +documents.txt,532,0,13,ENGINEER +documents.txt,533,0,12,ENGINEER +documents.txt,534,0,17,MANAGER documents.txt,535,0,14,MANAGER -documents.txt,536,25,47,ENGINEER -documents.txt,537,0,16,ENGINEER -documents.txt,538,20,34,ENGINEER -documents.txt,539,0,14,MANAGER -documents.txt,540,0,11,MANAGER -documents.txt,541,25,37,MANAGER -documents.txt,542,0,13,MANAGER -documents.txt,543,0,16,ENGINEER -documents.txt,544,0,10,MANAGER -documents.txt,545,0,16,ENGINEER -documents.txt,546,0,17,MANAGER -documents.txt,547,25,42,MANAGER -documents.txt,548,0,12,MANAGER -documents.txt,549,0,17,MANAGER -documents.txt,550,0,12,ENGINEER -documents.txt,551,0,19,MANAGER -documents.txt,552,0,15,ENGINEER -documents.txt,553,0,16,MANAGER -documents.txt,554,20,35,ENGINEER -documents.txt,555,0,20,MANAGER -documents.txt,556,0,15,MANAGER -documents.txt,557,0,15,MANAGER -documents.txt,558,0,17,MANAGER -documents.txt,559,0,17,ENGINEER -documents.txt,560,19,33,MANAGER -documents.txt,561,36,44,MANAGER -documents.txt,562,0,13,MANAGER -documents.txt,563,0,15,MANAGER -documents.txt,564,0,13,MANAGER -documents.txt,565,0,21,ENGINEER -documents.txt,566,0,22,ENGINEER -documents.txt,567,19,36,MANAGER -documents.txt,568,20,33,ENGINEER -documents.txt,569,0,14,MANAGER -documents.txt,570,0,16,ENGINEER -documents.txt,571,0,13,ENGINEER -documents.txt,572,0,18,ENGINEER -documents.txt,573,0,10,ENGINEER -documents.txt,574,0,13,MANAGER -documents.txt,575,0,15,MANAGER -documents.txt,576,0,17,ENGINEER -documents.txt,577,0,17,ENGINEER -documents.txt,578,0,14,ENGINEER -documents.txt,579,0,13,MANAGER -documents.txt,580,0,17,ENGINEER -documents.txt,581,37,48,ENGINEER +documents.txt,536,0,14,ENGINEER +documents.txt,537,37,49,ENGINEER +documents.txt,538,20,38,ENGINEER +documents.txt,539,0,15,ENGINEER +documents.txt,540,37,51,ENGINEER +documents.txt,541,0,18,ENGINEER +documents.txt,542,19,32,MANAGER +documents.txt,543,0,14,MANAGER +documents.txt,544,0,10,ENGINEER +documents.txt,545,19,40,MANAGER +documents.txt,546,0,23,MANAGER +documents.txt,547,19,39,MANAGER +documents.txt,548,0,14,ENGINEER +documents.txt,549,0,10,MANAGER +documents.txt,550,0,16,MANAGER +documents.txt,551,0,13,MANAGER +documents.txt,552,0,9,ENGINEER +documents.txt,553,19,34,MANAGER +documents.txt,554,0,22,MANAGER +documents.txt,555,0,19,MANAGER +documents.txt,556,0,18,MANAGER +documents.txt,557,0,14,MANAGER +documents.txt,558,0,14,MANAGER +documents.txt,559,0,20,ENGINEER +documents.txt,560,36,50,MANAGER +documents.txt,561,25,38,ENGINEER +documents.txt,562,0,15,MANAGER +documents.txt,563,0,16,ENGINEER +documents.txt,564,0,12,ENGINEER +documents.txt,565,0,18,ENGINEER +documents.txt,566,36,47,MANAGER +documents.txt,567,0,18,ENGINEER +documents.txt,568,0,18,MANAGER +documents.txt,569,0,21,ENGINEER +documents.txt,570,0,19,MANAGER +documents.txt,571,0,12,MANAGER +documents.txt,572,0,11,MANAGER +documents.txt,573,0,11,ENGINEER +documents.txt,574,0,12,ENGINEER +documents.txt,575,20,33,ENGINEER +documents.txt,576,20,40,ENGINEER +documents.txt,577,0,12,ENGINEER +documents.txt,578,0,21,MANAGER +documents.txt,579,0,17,MANAGER +documents.txt,580,0,16,MANAGER +documents.txt,581,0,20,MANAGER documents.txt,582,0,16,ENGINEER -documents.txt,583,37,55,ENGINEER -documents.txt,584,0,11,MANAGER -documents.txt,585,37,55,ENGINEER -documents.txt,586,0,20,MANAGER -documents.txt,587,0,13,MANAGER -documents.txt,588,0,13,MANAGER -documents.txt,589,25,35,ENGINEER -documents.txt,590,0,15,ENGINEER -documents.txt,591,0,21,MANAGER -documents.txt,592,0,13,MANAGER -documents.txt,593,0,12,ENGINEER -documents.txt,594,0,16,ENGINEER -documents.txt,595,20,34,ENGINEER -documents.txt,596,20,34,ENGINEER -documents.txt,597,0,14,ENGINEER -documents.txt,598,0,17,MANAGER -documents.txt,599,0,13,MANAGER -documents.txt,600,0,20,MANAGER -documents.txt,601,0,16,MANAGER -documents.txt,602,0,18,MANAGER -documents.txt,603,0,18,MANAGER -documents.txt,604,0,19,MANAGER -documents.txt,605,0,19,MANAGER -documents.txt,606,0,12,ENGINEER -documents.txt,607,25,43,ENGINEER -documents.txt,608,0,22,MANAGER -documents.txt,609,0,12,MANAGER -documents.txt,610,36,53,MANAGER -documents.txt,611,0,12,ENGINEER -documents.txt,612,0,16,MANAGER -documents.txt,613,0,14,MANAGER -documents.txt,614,0,13,ENGINEER -documents.txt,615,0,17,ENGINEER -documents.txt,616,25,39,ENGINEER -documents.txt,617,0,13,ENGINEER -documents.txt,618,0,19,ENGINEER -documents.txt,619,0,15,MANAGER -documents.txt,620,0,20,MANAGER -documents.txt,621,0,15,MANAGER -documents.txt,622,0,14,ENGINEER -documents.txt,623,0,15,MANAGER -documents.txt,624,37,51,ENGINEER -documents.txt,625,0,19,ENGINEER -documents.txt,626,0,19,ENGINEER -documents.txt,627,0,13,MANAGER -documents.txt,628,0,17,MANAGER -documents.txt,629,0,22,MANAGER -documents.txt,630,0,14,ENGINEER -documents.txt,631,0,21,ENGINEER +documents.txt,583,0,11,ENGINEER +documents.txt,584,0,18,MANAGER +documents.txt,585,0,22,MANAGER +documents.txt,586,37,55,ENGINEER +documents.txt,587,0,17,ENGINEER +documents.txt,588,0,22,ENGINEER +documents.txt,589,25,41,MANAGER +documents.txt,590,19,34,MANAGER +documents.txt,591,0,19,ENGINEER +documents.txt,592,0,17,ENGINEER +documents.txt,593,0,21,MANAGER +documents.txt,594,0,13,MANAGER +documents.txt,595,36,60,MANAGER +documents.txt,596,0,19,ENGINEER +documents.txt,597,0,13,ENGINEER +documents.txt,598,0,14,MANAGER +documents.txt,599,36,50,MANAGER +documents.txt,600,25,35,MANAGER +documents.txt,601,0,25,MANAGER +documents.txt,602,0,20,MANAGER +documents.txt,603,0,14,ENGINEER +documents.txt,604,25,43,MANAGER +documents.txt,605,0,15,ENGINEER +documents.txt,606,0,15,ENGINEER +documents.txt,607,0,20,MANAGER +documents.txt,608,36,52,MANAGER +documents.txt,609,0,14,MANAGER +documents.txt,610,0,17,ENGINEER +documents.txt,611,0,16,ENGINEER +documents.txt,612,0,14,MANAGER +documents.txt,613,0,12,ENGINEER +documents.txt,614,0,19,MANAGER +documents.txt,615,0,14,ENGINEER +documents.txt,616,0,23,ENGINEER +documents.txt,617,0,17,ENGINEER +documents.txt,618,0,13,ENGINEER +documents.txt,619,0,17,MANAGER +documents.txt,620,0,14,ENGINEER +documents.txt,621,0,18,MANAGER +documents.txt,622,0,17,MANAGER +documents.txt,623,0,11,MANAGER +documents.txt,624,20,33,ENGINEER +documents.txt,625,0,16,MANAGER +documents.txt,626,0,19,MANAGER +documents.txt,627,0,11,MANAGER +documents.txt,628,0,13,ENGINEER +documents.txt,629,37,54,ENGINEER +documents.txt,630,0,22,ENGINEER +documents.txt,631,0,21,MANAGER documents.txt,632,0,12,ENGINEER -documents.txt,633,0,16,ENGINEER -documents.txt,634,0,13,ENGINEER -documents.txt,635,36,49,MANAGER -documents.txt,636,0,10,MANAGER -documents.txt,637,0,24,MANAGER -documents.txt,638,19,36,MANAGER -documents.txt,639,0,18,ENGINEER -documents.txt,640,0,15,MANAGER -documents.txt,641,0,19,ENGINEER -documents.txt,642,0,17,ENGINEER -documents.txt,643,0,13,ENGINEER -documents.txt,644,0,18,ENGINEER -documents.txt,645,0,17,ENGINEER -documents.txt,646,19,29,MANAGER -documents.txt,647,0,14,ENGINEER -documents.txt,648,0,16,MANAGER -documents.txt,649,0,14,MANAGER -documents.txt,650,0,14,MANAGER -documents.txt,651,0,19,MANAGER -documents.txt,652,0,10,ENGINEER -documents.txt,653,0,18,MANAGER -documents.txt,654,0,16,ENGINEER -documents.txt,655,0,12,MANAGER -documents.txt,656,0,20,ENGINEER -documents.txt,657,19,40,MANAGER -documents.txt,658,0,12,MANAGER -documents.txt,659,0,20,MANAGER -documents.txt,660,25,39,MANAGER -documents.txt,661,0,18,MANAGER -documents.txt,662,0,16,ENGINEER -documents.txt,663,25,33,MANAGER -documents.txt,664,0,10,MANAGER -documents.txt,665,25,38,ENGINEER -documents.txt,666,0,11,MANAGER -documents.txt,667,0,17,ENGINEER -documents.txt,668,36,55,MANAGER -documents.txt,669,0,11,MANAGER -documents.txt,670,0,18,ENGINEER -documents.txt,671,0,13,ENGINEER -documents.txt,672,25,45,MANAGER -documents.txt,673,0,13,MANAGER -documents.txt,674,0,16,MANAGER -documents.txt,675,0,18,ENGINEER -documents.txt,676,0,13,ENGINEER -documents.txt,677,37,56,ENGINEER -documents.txt,678,0,16,MANAGER -documents.txt,679,0,18,MANAGER -documents.txt,680,0,17,MANAGER -documents.txt,681,25,39,MANAGER -documents.txt,682,0,14,MANAGER -documents.txt,683,0,11,MANAGER -documents.txt,684,0,12,ENGINEER -documents.txt,685,0,20,ENGINEER -documents.txt,686,0,16,MANAGER -documents.txt,687,0,11,ENGINEER -documents.txt,688,0,9,ENGINEER -documents.txt,689,0,18,ENGINEER -documents.txt,690,0,16,MANAGER -documents.txt,691,0,15,ENGINEER -documents.txt,692,0,14,ENGINEER -documents.txt,693,0,16,ENGINEER -documents.txt,694,0,14,MANAGER -documents.txt,695,0,12,MANAGER +documents.txt,633,25,43,MANAGER +documents.txt,634,0,13,MANAGER +documents.txt,635,20,32,ENGINEER +documents.txt,636,0,14,MANAGER +documents.txt,637,0,12,ENGINEER +documents.txt,638,0,15,MANAGER +documents.txt,639,0,14,MANAGER +documents.txt,640,0,10,MANAGER +documents.txt,641,0,16,MANAGER +documents.txt,642,19,33,MANAGER +documents.txt,643,25,45,MANAGER +documents.txt,644,0,19,ENGINEER +documents.txt,645,0,12,ENGINEER +documents.txt,646,0,21,ENGINEER +documents.txt,647,19,34,MANAGER +documents.txt,648,0,17,MANAGER +documents.txt,649,0,14,ENGINEER +documents.txt,650,25,43,ENGINEER +documents.txt,651,0,18,ENGINEER +documents.txt,652,0,14,ENGINEER +documents.txt,653,0,11,ENGINEER +documents.txt,654,0,20,MANAGER +documents.txt,655,0,17,ENGINEER +documents.txt,656,0,13,ENGINEER +documents.txt,657,0,13,ENGINEER +documents.txt,658,25,42,ENGINEER +documents.txt,659,25,37,MANAGER +documents.txt,660,0,17,MANAGER +documents.txt,661,19,34,MANAGER +documents.txt,662,0,12,ENGINEER +documents.txt,663,37,51,ENGINEER +documents.txt,664,0,13,MANAGER +documents.txt,665,0,16,MANAGER +documents.txt,666,25,39,MANAGER +documents.txt,667,0,18,ENGINEER +documents.txt,668,25,40,MANAGER +documents.txt,669,0,20,ENGINEER +documents.txt,670,25,40,ENGINEER +documents.txt,671,19,35,MANAGER +documents.txt,672,25,37,MANAGER +documents.txt,673,0,15,MANAGER +documents.txt,674,0,12,MANAGER +documents.txt,675,36,58,MANAGER +documents.txt,676,0,13,MANAGER +documents.txt,677,0,16,MANAGER +documents.txt,678,25,45,ENGINEER +documents.txt,679,0,15,ENGINEER +documents.txt,680,0,18,MANAGER +documents.txt,681,0,15,ENGINEER +documents.txt,682,36,54,MANAGER +documents.txt,683,0,11,ENGINEER +documents.txt,684,0,15,ENGINEER +documents.txt,685,36,56,MANAGER +documents.txt,686,0,13,MANAGER +documents.txt,687,36,48,MANAGER +documents.txt,688,0,12,ENGINEER +documents.txt,689,25,38,ENGINEER +documents.txt,690,0,18,MANAGER +documents.txt,691,0,12,MANAGER +documents.txt,692,0,20,MANAGER +documents.txt,693,0,14,ENGINEER +documents.txt,694,0,16,MANAGER +documents.txt,695,0,22,ENGINEER documents.txt,696,0,15,ENGINEER -documents.txt,697,0,8,ENGINEER -documents.txt,698,0,11,MANAGER -documents.txt,699,0,19,ENGINEER -documents.txt,700,25,41,MANAGER +documents.txt,697,0,14,MANAGER +documents.txt,698,0,14,MANAGER +documents.txt,699,0,10,ENGINEER +documents.txt,700,36,46,MANAGER documents.txt,701,0,14,MANAGER -documents.txt,702,0,13,ENGINEER +documents.txt,702,0,22,MANAGER documents.txt,703,0,15,MANAGER -documents.txt,704,25,39,ENGINEER -documents.txt,705,0,10,MANAGER -documents.txt,706,0,15,MANAGER -documents.txt,707,25,36,ENGINEER -documents.txt,708,0,14,MANAGER -documents.txt,709,0,12,MANAGER -documents.txt,710,0,14,ENGINEER -documents.txt,711,36,45,MANAGER -documents.txt,712,0,15,MANAGER +documents.txt,704,0,18,MANAGER +documents.txt,705,0,19,MANAGER +documents.txt,706,0,14,MANAGER +documents.txt,707,0,14,ENGINEER +documents.txt,708,0,11,MANAGER +documents.txt,709,0,19,ENGINEER +documents.txt,710,0,10,ENGINEER +documents.txt,711,0,19,MANAGER +documents.txt,712,20,32,ENGINEER documents.txt,713,0,13,ENGINEER -documents.txt,714,25,37,MANAGER -documents.txt,715,0,10,ENGINEER -documents.txt,716,0,12,MANAGER +documents.txt,714,0,19,MANAGER +documents.txt,715,20,42,ENGINEER +documents.txt,716,0,13,MANAGER documents.txt,717,0,12,MANAGER -documents.txt,718,0,17,ENGINEER -documents.txt,719,19,34,MANAGER -documents.txt,720,0,16,ENGINEER -documents.txt,721,0,18,ENGINEER -documents.txt,722,0,16,MANAGER -documents.txt,723,37,56,ENGINEER -documents.txt,724,36,56,MANAGER -documents.txt,725,0,17,ENGINEER -documents.txt,726,0,21,MANAGER -documents.txt,727,0,13,ENGINEER -documents.txt,728,0,14,ENGINEER -documents.txt,729,0,11,MANAGER -documents.txt,730,0,13,MANAGER -documents.txt,731,0,16,ENGINEER -documents.txt,732,0,17,MANAGER -documents.txt,733,25,39,MANAGER -documents.txt,734,0,17,MANAGER -documents.txt,735,36,48,MANAGER -documents.txt,736,0,11,MANAGER -documents.txt,737,0,16,MANAGER -documents.txt,738,0,16,ENGINEER -documents.txt,739,0,16,MANAGER -documents.txt,740,0,14,MANAGER -documents.txt,741,0,16,ENGINEER -documents.txt,742,0,17,ENGINEER -documents.txt,743,25,44,ENGINEER -documents.txt,744,25,38,ENGINEER -documents.txt,745,0,14,ENGINEER -documents.txt,746,19,32,MANAGER -documents.txt,747,0,11,ENGINEER -documents.txt,748,0,21,ENGINEER -documents.txt,749,0,16,ENGINEER -documents.txt,750,0,18,ENGINEER +documents.txt,718,0,12,ENGINEER +documents.txt,719,37,47,ENGINEER +documents.txt,720,0,11,MANAGER +documents.txt,721,20,31,ENGINEER +documents.txt,722,0,14,ENGINEER +documents.txt,723,0,22,ENGINEER +documents.txt,724,0,15,MANAGER +documents.txt,725,36,53,MANAGER +documents.txt,726,0,17,MANAGER +documents.txt,727,0,23,ENGINEER +documents.txt,728,0,20,MANAGER +documents.txt,729,0,14,MANAGER +documents.txt,730,0,12,ENGINEER +documents.txt,731,0,19,ENGINEER +documents.txt,732,0,20,ENGINEER +documents.txt,733,0,19,MANAGER +documents.txt,734,0,16,ENGINEER +documents.txt,735,0,15,ENGINEER +documents.txt,736,0,14,MANAGER +documents.txt,737,0,19,MANAGER +documents.txt,738,0,19,MANAGER +documents.txt,739,0,21,MANAGER +documents.txt,740,0,24,ENGINEER +documents.txt,741,37,54,ENGINEER +documents.txt,742,0,20,MANAGER +documents.txt,743,0,16,MANAGER +documents.txt,744,0,14,MANAGER +documents.txt,745,0,12,ENGINEER +documents.txt,746,0,16,MANAGER +documents.txt,747,0,13,MANAGER +documents.txt,748,0,17,ENGINEER +documents.txt,749,0,11,ENGINEER +documents.txt,750,37,52,ENGINEER documents.txt,751,0,11,MANAGER -documents.txt,752,0,10,ENGINEER -documents.txt,753,0,14,ENGINEER -documents.txt,754,0,17,MANAGER -documents.txt,755,0,16,ENGINEER -documents.txt,756,0,13,MANAGER -documents.txt,757,0,18,ENGINEER +documents.txt,752,0,22,ENGINEER +documents.txt,753,25,36,MANAGER +documents.txt,754,25,42,ENGINEER +documents.txt,755,0,11,ENGINEER +documents.txt,756,0,13,ENGINEER +documents.txt,757,0,11,MANAGER documents.txt,758,0,15,MANAGER -documents.txt,759,0,13,ENGINEER -documents.txt,760,0,10,MANAGER -documents.txt,761,0,14,ENGINEER -documents.txt,762,25,39,MANAGER -documents.txt,763,37,54,ENGINEER +documents.txt,759,0,22,MANAGER +documents.txt,760,0,15,MANAGER +documents.txt,761,0,10,MANAGER +documents.txt,762,0,13,MANAGER +documents.txt,763,25,36,MANAGER documents.txt,764,0,12,MANAGER -documents.txt,765,0,14,MANAGER -documents.txt,766,0,19,MANAGER -documents.txt,767,0,18,MANAGER -documents.txt,768,20,37,ENGINEER -documents.txt,769,0,14,MANAGER -documents.txt,770,25,38,MANAGER -documents.txt,771,0,15,ENGINEER -documents.txt,772,0,13,ENGINEER -documents.txt,773,0,14,MANAGER -documents.txt,774,25,39,MANAGER -documents.txt,775,0,18,MANAGER -documents.txt,776,0,17,MANAGER -documents.txt,777,0,14,MANAGER -documents.txt,778,19,33,MANAGER -documents.txt,779,0,16,MANAGER -documents.txt,780,0,10,ENGINEER -documents.txt,781,0,11,ENGINEER -documents.txt,782,20,37,ENGINEER -documents.txt,783,37,49,ENGINEER -documents.txt,784,0,14,ENGINEER -documents.txt,785,0,10,ENGINEER -documents.txt,786,36,48,MANAGER -documents.txt,787,19,30,MANAGER -documents.txt,788,0,14,ENGINEER -documents.txt,789,0,16,MANAGER -documents.txt,790,0,23,ENGINEER -documents.txt,791,25,38,MANAGER -documents.txt,792,37,53,ENGINEER -documents.txt,793,0,12,ENGINEER -documents.txt,794,36,51,MANAGER -documents.txt,795,0,17,MANAGER -documents.txt,796,0,17,ENGINEER -documents.txt,797,0,18,ENGINEER -documents.txt,798,0,15,MANAGER -documents.txt,799,0,11,ENGINEER -documents.txt,800,0,14,MANAGER -documents.txt,801,0,12,ENGINEER -documents.txt,802,0,11,ENGINEER -documents.txt,803,0,15,MANAGER -documents.txt,804,0,13,ENGINEER -documents.txt,805,0,14,MANAGER -documents.txt,806,20,35,ENGINEER -documents.txt,807,25,36,ENGINEER -documents.txt,808,0,15,MANAGER -documents.txt,809,0,12,ENGINEER -documents.txt,810,37,51,ENGINEER -documents.txt,811,0,10,ENGINEER -documents.txt,812,0,12,MANAGER -documents.txt,813,20,31,ENGINEER +documents.txt,765,0,10,ENGINEER +documents.txt,766,0,11,MANAGER +documents.txt,767,20,37,ENGINEER +documents.txt,768,0,16,ENGINEER +documents.txt,769,0,16,ENGINEER +documents.txt,770,0,18,MANAGER +documents.txt,771,0,15,MANAGER +documents.txt,772,25,41,MANAGER +documents.txt,773,0,18,ENGINEER +documents.txt,774,0,13,ENGINEER +documents.txt,775,0,13,ENGINEER +documents.txt,776,0,21,ENGINEER +documents.txt,777,0,16,ENGINEER +documents.txt,778,25,43,MANAGER +documents.txt,779,20,34,ENGINEER +documents.txt,780,0,14,MANAGER +documents.txt,781,20,37,ENGINEER +documents.txt,782,0,15,MANAGER +documents.txt,783,0,17,MANAGER +documents.txt,784,25,38,MANAGER +documents.txt,785,0,18,MANAGER +documents.txt,786,0,12,ENGINEER +documents.txt,787,0,17,ENGINEER +documents.txt,788,20,37,ENGINEER +documents.txt,789,0,12,MANAGER +documents.txt,790,0,16,ENGINEER +documents.txt,791,0,19,ENGINEER +documents.txt,792,0,17,ENGINEER +documents.txt,793,25,41,ENGINEER +documents.txt,794,25,42,ENGINEER +documents.txt,795,25,41,ENGINEER +documents.txt,796,37,48,ENGINEER +documents.txt,797,0,11,ENGINEER +documents.txt,798,36,53,MANAGER +documents.txt,799,20,37,ENGINEER +documents.txt,800,0,15,ENGINEER +documents.txt,801,0,16,ENGINEER +documents.txt,802,0,15,MANAGER +documents.txt,803,0,18,MANAGER +documents.txt,804,0,18,MANAGER +documents.txt,805,0,18,MANAGER +documents.txt,806,0,17,MANAGER +documents.txt,807,0,20,ENGINEER +documents.txt,808,0,18,ENGINEER +documents.txt,809,37,55,ENGINEER +documents.txt,810,37,62,ENGINEER +documents.txt,811,0,15,MANAGER +documents.txt,812,25,36,ENGINEER +documents.txt,813,0,19,ENGINEER documents.txt,814,0,17,ENGINEER -documents.txt,815,0,10,ENGINEER -documents.txt,816,19,32,MANAGER -documents.txt,817,0,16,MANAGER -documents.txt,818,0,11,MANAGER -documents.txt,819,0,14,ENGINEER -documents.txt,820,0,19,MANAGER -documents.txt,821,0,13,ENGINEER -documents.txt,822,0,16,MANAGER -documents.txt,823,0,14,MANAGER -documents.txt,824,0,14,MANAGER -documents.txt,825,0,13,ENGINEER +documents.txt,815,0,8,MANAGER +documents.txt,816,37,51,ENGINEER +documents.txt,817,25,47,ENGINEER +documents.txt,818,25,36,ENGINEER +documents.txt,819,36,49,MANAGER +documents.txt,820,0,19,ENGINEER +documents.txt,821,0,16,MANAGER +documents.txt,822,0,18,ENGINEER +documents.txt,823,25,44,ENGINEER +documents.txt,824,37,50,ENGINEER +documents.txt,825,0,13,MANAGER documents.txt,826,0,14,ENGINEER -documents.txt,827,0,10,ENGINEER -documents.txt,828,0,11,MANAGER -documents.txt,829,0,15,ENGINEER -documents.txt,830,0,10,MANAGER -documents.txt,831,0,17,MANAGER -documents.txt,832,0,19,MANAGER -documents.txt,833,0,15,ENGINEER -documents.txt,834,0,20,ENGINEER -documents.txt,835,0,15,MANAGER -documents.txt,836,0,16,MANAGER -documents.txt,837,0,11,MANAGER +documents.txt,827,0,11,MANAGER +documents.txt,828,36,49,MANAGER +documents.txt,829,0,20,MANAGER +documents.txt,830,0,13,ENGINEER +documents.txt,831,19,31,MANAGER +documents.txt,832,0,10,ENGINEER +documents.txt,833,0,12,ENGINEER +documents.txt,834,36,53,MANAGER +documents.txt,835,0,14,MANAGER +documents.txt,836,0,14,MANAGER +documents.txt,837,0,18,MANAGER documents.txt,838,0,13,MANAGER -documents.txt,839,0,14,MANAGER -documents.txt,840,0,15,ENGINEER -documents.txt,841,0,16,MANAGER -documents.txt,842,25,43,ENGINEER -documents.txt,843,0,17,MANAGER -documents.txt,844,0,15,ENGINEER -documents.txt,845,0,14,MANAGER -documents.txt,846,0,16,ENGINEER -documents.txt,847,0,10,ENGINEER -documents.txt,848,0,11,ENGINEER -documents.txt,849,36,53,MANAGER -documents.txt,850,0,21,MANAGER -documents.txt,851,0,15,MANAGER -documents.txt,852,0,13,MANAGER -documents.txt,853,0,11,ENGINEER -documents.txt,854,0,22,MANAGER -documents.txt,855,0,13,MANAGER -documents.txt,856,0,12,MANAGER -documents.txt,857,0,16,ENGINEER -documents.txt,858,0,18,ENGINEER -documents.txt,859,0,9,ENGINEER -documents.txt,860,0,12,ENGINEER -documents.txt,861,0,14,ENGINEER -documents.txt,862,37,55,ENGINEER -documents.txt,863,0,12,MANAGER -documents.txt,864,0,15,ENGINEER -documents.txt,865,0,13,ENGINEER -documents.txt,866,0,14,ENGINEER -documents.txt,867,20,32,ENGINEER -documents.txt,868,37,50,ENGINEER -documents.txt,869,0,10,ENGINEER -documents.txt,870,0,13,MANAGER -documents.txt,871,0,16,ENGINEER -documents.txt,872,0,14,ENGINEER -documents.txt,873,0,16,MANAGER -documents.txt,874,25,40,ENGINEER -documents.txt,875,0,14,ENGINEER -documents.txt,876,36,49,MANAGER -documents.txt,877,0,9,MANAGER -documents.txt,878,0,17,MANAGER -documents.txt,879,25,39,MANAGER +documents.txt,839,36,51,MANAGER +documents.txt,840,0,16,MANAGER +documents.txt,841,36,52,MANAGER +documents.txt,842,0,17,ENGINEER +documents.txt,843,0,16,ENGINEER +documents.txt,844,0,13,MANAGER +documents.txt,845,0,13,ENGINEER +documents.txt,846,19,26,MANAGER +documents.txt,847,0,19,ENGINEER +documents.txt,848,20,37,ENGINEER +documents.txt,849,0,12,MANAGER +documents.txt,850,0,19,MANAGER +documents.txt,851,0,10,MANAGER +documents.txt,852,0,17,MANAGER +documents.txt,853,0,14,ENGINEER +documents.txt,854,19,41,MANAGER +documents.txt,855,0,12,MANAGER +documents.txt,856,36,48,MANAGER +documents.txt,857,0,20,ENGINEER +documents.txt,858,0,13,MANAGER +documents.txt,859,0,11,ENGINEER +documents.txt,860,0,17,ENGINEER +documents.txt,861,0,15,MANAGER +documents.txt,862,0,17,ENGINEER +documents.txt,863,0,22,MANAGER +documents.txt,864,0,7,ENGINEER +documents.txt,865,0,15,MANAGER +documents.txt,866,0,18,MANAGER +documents.txt,867,0,17,MANAGER +documents.txt,868,0,12,MANAGER +documents.txt,869,0,13,ENGINEER +documents.txt,870,0,13,ENGINEER +documents.txt,871,37,52,ENGINEER +documents.txt,872,0,12,ENGINEER +documents.txt,873,0,11,ENGINEER +documents.txt,874,0,15,MANAGER +documents.txt,875,0,21,ENGINEER +documents.txt,876,0,16,MANAGER +documents.txt,877,0,21,ENGINEER +documents.txt,878,19,39,MANAGER +documents.txt,879,0,20,MANAGER documents.txt,880,0,14,MANAGER -documents.txt,881,37,57,ENGINEER -documents.txt,882,0,12,MANAGER -documents.txt,883,19,34,MANAGER -documents.txt,884,0,18,ENGINEER -documents.txt,885,0,16,ENGINEER -documents.txt,886,37,50,ENGINEER -documents.txt,887,0,20,ENGINEER -documents.txt,888,0,16,ENGINEER -documents.txt,889,0,19,MANAGER -documents.txt,890,0,11,MANAGER -documents.txt,891,0,10,MANAGER -documents.txt,892,0,17,ENGINEER -documents.txt,893,0,19,ENGINEER -documents.txt,894,0,9,MANAGER -documents.txt,895,0,14,ENGINEER -documents.txt,896,0,14,MANAGER -documents.txt,897,0,10,ENGINEER -documents.txt,898,0,20,ENGINEER -documents.txt,899,0,13,ENGINEER -documents.txt,900,0,24,ENGINEER -documents.txt,901,0,11,MANAGER -documents.txt,902,0,18,ENGINEER -documents.txt,903,0,19,MANAGER -documents.txt,904,37,52,ENGINEER -documents.txt,905,0,17,MANAGER -documents.txt,906,0,14,ENGINEER -documents.txt,907,0,13,MANAGER -documents.txt,908,0,16,ENGINEER -documents.txt,909,0,13,ENGINEER -documents.txt,910,0,11,ENGINEER -documents.txt,911,0,12,ENGINEER -documents.txt,912,0,16,ENGINEER -documents.txt,913,0,12,ENGINEER -documents.txt,914,36,50,MANAGER -documents.txt,915,0,15,ENGINEER +documents.txt,881,19,33,MANAGER +documents.txt,882,0,20,ENGINEER +documents.txt,883,0,13,ENGINEER +documents.txt,884,0,18,MANAGER +documents.txt,885,0,18,ENGINEER +documents.txt,886,0,13,ENGINEER +documents.txt,887,0,19,ENGINEER +documents.txt,888,0,12,ENGINEER +documents.txt,889,0,14,MANAGER +documents.txt,890,0,11,ENGINEER +documents.txt,891,0,11,MANAGER +documents.txt,892,0,10,ENGINEER +documents.txt,893,0,12,ENGINEER +documents.txt,894,0,18,ENGINEER +documents.txt,895,0,17,MANAGER +documents.txt,896,0,19,MANAGER +documents.txt,897,0,22,ENGINEER +documents.txt,898,0,13,MANAGER +documents.txt,899,0,16,ENGINEER +documents.txt,900,0,19,ENGINEER +documents.txt,901,0,12,MANAGER +documents.txt,902,37,57,ENGINEER +documents.txt,903,0,21,ENGINEER +documents.txt,904,0,14,MANAGER +documents.txt,905,0,19,ENGINEER +documents.txt,906,0,11,ENGINEER +documents.txt,907,20,37,ENGINEER +documents.txt,908,0,12,MANAGER +documents.txt,909,36,52,MANAGER +documents.txt,910,0,15,ENGINEER +documents.txt,911,25,42,MANAGER +documents.txt,912,20,36,ENGINEER +documents.txt,913,0,16,ENGINEER +documents.txt,914,0,21,ENGINEER +documents.txt,915,0,17,ENGINEER documents.txt,916,0,16,ENGINEER -documents.txt,917,36,50,MANAGER -documents.txt,918,0,12,MANAGER -documents.txt,919,0,14,ENGINEER -documents.txt,920,0,15,MANAGER -documents.txt,921,0,13,ENGINEER -documents.txt,922,20,36,ENGINEER -documents.txt,923,0,18,ENGINEER -documents.txt,924,19,37,MANAGER -documents.txt,925,0,16,ENGINEER -documents.txt,926,0,14,ENGINEER -documents.txt,927,25,36,ENGINEER -documents.txt,928,37,49,ENGINEER -documents.txt,929,0,10,MANAGER -documents.txt,930,0,10,ENGINEER -documents.txt,931,0,14,ENGINEER -documents.txt,932,0,15,ENGINEER -documents.txt,933,0,16,MANAGER +documents.txt,917,0,12,MANAGER +documents.txt,918,0,18,MANAGER +documents.txt,919,0,11,ENGINEER +documents.txt,920,19,38,MANAGER +documents.txt,921,0,15,ENGINEER +documents.txt,922,36,55,MANAGER +documents.txt,923,0,21,ENGINEER +documents.txt,924,0,20,ENGINEER +documents.txt,925,0,17,MANAGER +documents.txt,926,0,20,ENGINEER +documents.txt,927,25,35,ENGINEER +documents.txt,928,0,16,ENGINEER +documents.txt,929,19,31,MANAGER +documents.txt,930,25,37,MANAGER +documents.txt,931,0,15,ENGINEER +documents.txt,932,36,50,MANAGER +documents.txt,933,0,21,MANAGER documents.txt,934,0,14,ENGINEER -documents.txt,935,0,23,ENGINEER -documents.txt,936,0,18,ENGINEER -documents.txt,937,0,18,MANAGER -documents.txt,938,0,16,ENGINEER -documents.txt,939,36,49,MANAGER -documents.txt,940,0,15,ENGINEER -documents.txt,941,0,13,ENGINEER -documents.txt,942,0,13,MANAGER -documents.txt,943,25,39,ENGINEER -documents.txt,944,37,54,ENGINEER -documents.txt,945,0,12,MANAGER -documents.txt,946,0,13,ENGINEER -documents.txt,947,0,18,MANAGER -documents.txt,948,20,36,ENGINEER -documents.txt,949,0,11,ENGINEER -documents.txt,950,0,14,ENGINEER -documents.txt,951,0,15,ENGINEER -documents.txt,952,0,16,MANAGER -documents.txt,953,0,10,ENGINEER -documents.txt,954,0,18,ENGINEER -documents.txt,955,0,15,MANAGER -documents.txt,956,25,37,MANAGER -documents.txt,957,0,15,ENGINEER -documents.txt,958,0,10,MANAGER -documents.txt,959,0,14,ENGINEER -documents.txt,960,0,18,ENGINEER -documents.txt,961,0,14,MANAGER -documents.txt,962,0,14,MANAGER -documents.txt,963,0,14,MANAGER -documents.txt,964,0,12,ENGINEER -documents.txt,965,0,19,MANAGER -documents.txt,966,0,17,ENGINEER -documents.txt,967,0,12,ENGINEER -documents.txt,968,0,15,MANAGER -documents.txt,969,0,17,ENGINEER -documents.txt,970,0,11,MANAGER -documents.txt,971,25,39,ENGINEER -documents.txt,972,0,12,ENGINEER -documents.txt,973,19,33,MANAGER -documents.txt,974,19,32,MANAGER -documents.txt,975,0,23,MANAGER -documents.txt,976,20,44,ENGINEER -documents.txt,977,0,13,ENGINEER -documents.txt,978,0,15,MANAGER -documents.txt,979,0,19,ENGINEER -documents.txt,980,0,12,MANAGER -documents.txt,981,25,40,MANAGER -documents.txt,982,0,12,ENGINEER -documents.txt,983,0,13,MANAGER -documents.txt,984,0,12,MANAGER -documents.txt,985,37,53,ENGINEER -documents.txt,986,25,38,ENGINEER -documents.txt,987,0,11,ENGINEER -documents.txt,988,37,50,ENGINEER -documents.txt,989,0,18,ENGINEER -documents.txt,990,20,35,ENGINEER -documents.txt,991,19,33,MANAGER +documents.txt,935,0,18,ENGINEER +documents.txt,936,0,22,MANAGER +documents.txt,937,25,39,MANAGER +documents.txt,938,0,15,ENGINEER +documents.txt,939,19,28,MANAGER +documents.txt,940,0,19,ENGINEER +documents.txt,941,0,15,MANAGER +documents.txt,942,0,15,ENGINEER +documents.txt,943,0,12,ENGINEER +documents.txt,944,0,16,ENGINEER +documents.txt,945,20,35,ENGINEER +documents.txt,946,36,51,MANAGER +documents.txt,947,20,41,ENGINEER +documents.txt,948,37,52,ENGINEER +documents.txt,949,0,14,ENGINEER +documents.txt,950,0,18,ENGINEER +documents.txt,951,0,15,MANAGER +documents.txt,952,0,14,ENGINEER +documents.txt,953,0,12,MANAGER +documents.txt,954,0,16,ENGINEER +documents.txt,955,25,42,MANAGER +documents.txt,956,0,15,ENGINEER +documents.txt,957,19,31,MANAGER +documents.txt,958,36,55,MANAGER +documents.txt,959,0,22,ENGINEER +documents.txt,960,0,13,MANAGER +documents.txt,961,0,15,ENGINEER +documents.txt,962,0,16,ENGINEER +documents.txt,963,0,16,MANAGER +documents.txt,964,0,14,ENGINEER +documents.txt,965,0,14,ENGINEER +documents.txt,966,0,15,ENGINEER +documents.txt,967,0,13,MANAGER +documents.txt,968,0,10,ENGINEER +documents.txt,969,0,10,MANAGER +documents.txt,970,0,10,MANAGER +documents.txt,971,0,22,MANAGER +documents.txt,972,37,50,ENGINEER +documents.txt,973,37,53,ENGINEER +documents.txt,974,0,17,ENGINEER +documents.txt,975,0,20,ENGINEER +documents.txt,976,0,19,ENGINEER +documents.txt,977,0,13,MANAGER +documents.txt,978,36,50,MANAGER +documents.txt,979,25,40,MANAGER +documents.txt,980,0,18,MANAGER +documents.txt,981,0,17,ENGINEER +documents.txt,982,37,55,ENGINEER +documents.txt,983,0,14,ENGINEER +documents.txt,984,0,11,MANAGER +documents.txt,985,0,14,ENGINEER +documents.txt,986,0,14,ENGINEER +documents.txt,987,25,42,MANAGER +documents.txt,988,20,36,ENGINEER +documents.txt,989,25,38,MANAGER +documents.txt,990,0,17,ENGINEER +documents.txt,991,0,18,MANAGER documents.txt,992,0,18,MANAGER -documents.txt,993,0,17,MANAGER -documents.txt,994,0,11,MANAGER -documents.txt,995,0,13,MANAGER -documents.txt,996,0,14,MANAGER -documents.txt,997,0,13,MANAGER -documents.txt,998,0,17,MANAGER -documents.txt,999,25,37,ENGINEER +documents.txt,993,0,13,MANAGER +documents.txt,994,0,16,ENGINEER +documents.txt,995,0,23,ENGINEER +documents.txt,996,0,12,MANAGER +documents.txt,997,0,16,MANAGER +documents.txt,998,0,16,MANAGER +documents.txt,999,0,17,ENGINEER diff --git a/internal/service/comprehend/test-fixtures/entity_recognizer/documents.txt b/internal/service/comprehend/test-fixtures/entity_recognizer/documents.txt index f68319e5bb7c..15470ccb972a 100644 --- a/internal/service/comprehend/test-fixtures/entity_recognizer/documents.txt +++ b/internal/service/comprehend/test-fixtures/entity_recognizer/documents.txt @@ -1,1000 +1,1000 @@ -Announcing manager Gerson Parker. -Nickolas Little is a manager. -Alejandra Stiedemann V has been an manager for over a decade. -Eunice Leannon will be the new manager for the team. -Sunny Schmitt is a engineer in the high tech industry. -Anika Gutkowski is a engineer with Example Corp. -Delaney Fisher is retiring as a engineer. -Our latest new employee, Christian Maggio, has been a engineer in the industry for 4 years. -Mathias Hettinger I is retiring as a manager. -Our latest new employee, Elias Quitzon, has been a manager in the industry for 4 years. -Alexandra Wunsch has been an manager for over a decade. -Guido Kemmer joins us as an engineer on the Example project. -Sim Kemmer is a manager with Example Corp. -Help me welcome our newest manager, Vincenza Kertzmann. -Announcing engineer Skye Kuhn Jr.. -Kasandra Cartwright MD is a manager in the high tech industry. -Ettie Schimmel is a manager. -Karlee McCullough Jr. will be the new manager for the team. -Adeline Johnson is a manager with Example Corp. -Cory Morissette has been an manager for over a decade. -Brandy Abshire joins us as an manager on the Example project. -Announcing engineer Jerome Homenick. -Announcing manager Ms. Doris Ziemann. -Help me welcome our newest engineer, Ms. Marlene Larkin. -Amos Kuhlman, an manager, will be presenting the award. -Ana Ortiz II has been a manager for 14 years. -Announcing manager Jewell Konopelski. -Larry Effertz has been a engineer for 14 years. -Ms. Vernice Fay has been an engineer for over a decade. -Miss Dion Ratke is a manager. -Rachelle Lubowitz has been an manager for over a decade. -Rachael Moore is a manager in the high tech industry. -Sabrina Walsh will be the new engineer for the team. -Our latest new employee, Tamara Nicolas, has been a manager in the industry for 4 years. -Our latest new employee, Martin Lynch Sr., has been a manager in the industry for 4 years. -Kaya Wisozk is a manager in the high tech industry. -Our latest new employee, Mrs. Cassandra Robel, has been a engineer in the industry for 4 years. -Donny Schroeder has been an manager for over a decade. -Emmanuel Fay is a manager with Example Corp. -Jefferey Adams will be the new engineer for the team. -Godfrey Feest V, an manager, will be presenting the award. -Melyna Mertz has been a manager for 14 years. -Audreanne Wiza is a manager with Example Corp. -Jarvis O'Conner is a manager. -Mertie Sanford joins us as an manager on the Example project. -Help me welcome our newest engineer, Keshaun Altenwerth. -Aric Beier Sr. is a engineer with Example Corp. -Hoyt Rowe MD is a engineer with Example Corp. -Announcing engineer Aurelia Hintz. -Our latest new employee, Keon Stanton, has been a manager in the industry for 4 years. -Candace Wiegand is a manager with Example Corp. -Flavio Smith is retiring as a manager. -Our latest new employee, Mr. Zachery Toy, has been a manager in the industry for 4 years. -Kaleigh Murazik has been a engineer for 14 years. -Help me welcome our newest engineer, Pattie Kuvalis. -Serena Heidenreich has been a engineer for 14 years. -Our latest new employee, Simone Little, has been a manager in the industry for 4 years. -Ms. Carolina Mante has been a engineer for 14 years. -Julia Lemke has been a manager for 14 years. -Mrs. Meagan Fisher has been an manager for over a decade. -Molly Goldner joins us as an engineer on the Example project. -Announcing manager Mrs. Dax McGlynn. -Tyrique Harber has been an engineer for over a decade. -Mia Larkin II joins us as an manager on the Example project. -Winona Schoen, an manager, will be presenting the award. -Announcing engineer Dr. Antonette Cummings. -Our latest new employee, Mr. Sonya Cassin, has been a manager in the industry for 4 years. -Timothy Bartoletti DDS has been a manager for 14 years. -Our latest new employee, Winifred Reinger, has been a engineer in the industry for 4 years. -Announcing engineer Marcella Bahringer. -Rhea Kohler is a engineer. -Help me welcome our newest engineer, Alia McCullough. -Mr. Israel Gulgowski is a engineer. -Our latest new employee, Jana Wilkinson, has been a engineer in the industry for 4 years. -Marjory Fay will be the new manager for the team. -Alena Mueller is a engineer with Example Corp. -Help me welcome our newest engineer, Kristian Keebler. -Katheryn Dietrich is a manager with Example Corp. -Mya Grady has been an manager for over a decade. -Our latest new employee, Kyra Heidenreich V, has been a engineer in the industry for 4 years. -Announcing manager Dr. Cortez Thiel. -Shyanne Hirthe has been an engineer for over a decade. -Announcing engineer Herta Mohr. -Winfield Thompson is a engineer in the high tech industry. -Help me welcome our newest manager, Miss Bette Dooley. -Mrs. Freida Shanahan has been an engineer for over a decade. -Sunny Towne is a manager in the high tech industry. -Kayley Schneider joins us as an engineer on the Example project. -Announcing manager Anderson Hagenes. -Barrett Paucek Jr. is a manager. -Lillian Koss, an engineer, will be presenting the award. -Announcing engineer Lance Zieme. -Announcing engineer Van Hackett. -Cullen Schamberger is a engineer in the high tech industry. -Arvel Stanton joins us as an manager on the Example project. -Our latest new employee, Nedra Goldner, has been a manager in the industry for 4 years. -Ms. Zetta Lindgren is a manager in the high tech industry. -Help me welcome our newest engineer, Faye Ledner. -Trystan Hilll PhD has been an manager for over a decade. -Arnold Hermann Jr. will be the new engineer for the team. -Hank Towne is a manager in the high tech industry. -Monroe Schmeler has been a manager for 14 years. -Genesis Wintheiser II will be the new engineer for the team. -Angie O'Reilly has been an manager for over a decade. -Deshawn Reinger is a manager in the high tech industry. -Announcing engineer Mrs. Molly Lowe. -Kimberly Osinski is a manager in the high tech industry. -Napoleon Sauer is a engineer in the high tech industry. -Leonardo Champlin is a manager. -Anita Bartell is a manager in the high tech industry. -Mrs. Jennifer Buckridge has been a manager for 14 years. -Patrick Gulgowski is a manager with Example Corp. -Miss Brooks Christiansen, an manager, will be presenting the award. -Help me welcome our newest engineer, Ms. Alejandra Shields. -Callie Lynch will be the new engineer for the team. -Hyman Bruen will be the new engineer for the team. -Ms. Lera Grant, an engineer, will be presenting the award. -Help me welcome our newest manager, Laney West V. -Shyanne Price is a engineer. -Leanna Schoen is a engineer in the high tech industry. -Announcing engineer Clement Bayer. -Mrs. Colt Kozey is retiring as a manager. -Dr. Logan Dickinson will be the new engineer for the team. -Valentin Torp has been a manager for 14 years. -Kathlyn Bechtelar, an manager, will be presenting the award. -Vita Pollich has been a engineer for 14 years. -Benedict Flatley is a manager with Example Corp. -Our latest new employee, Dedrick Corkery V, has been a manager in the industry for 4 years. -Our latest new employee, Shea Kohler, has been a engineer in the industry for 4 years. -Tiana Mertz is a engineer in the high tech industry. -Margarett Kunze has been a engineer for 14 years. -Announcing manager Alan Bashirian. -Dr. Precious Murazik has been an manager for over a decade. -Ms. Karine Langosh has been a engineer for 14 years. -Help me welcome our newest engineer, Letha Skiles. -Help me welcome our newest engineer, Nikko Dooley. -Announcing engineer Filomena McKenzie. -Rosario Grant Sr., an engineer, will be presenting the award. -Gonzalo Douglas V, an engineer, will be presenting the award. -Lempi Pollich will be the new engineer for the team. -Help me welcome our newest engineer, Candida O'Reilly Sr.. -Dena Lind is a engineer with Example Corp. -Blair Renner is a manager with Example Corp. -Imani Roob joins us as an manager on the Example project. -Miss Drake Johns will be the new manager for the team. -Announcing manager Murl Jacobi. -Announcing manager Nicolette Prohaska. -Leif Quigley is a manager with Example Corp. -Hugh Gleichner III has been an manager for over a decade. -Neil Witting is a manager. -Ivah Moore is a engineer in the high tech industry. -Amira Bruen is retiring as a engineer. -Help me welcome our newest engineer, Loy Kerluke. -Our latest new employee, Cleta Berge, has been a manager in the industry for 4 years. -Asa Schaden has been a engineer for 14 years. -Announcing engineer Marlin Nitzsche. -Mellie Hansen has been an engineer for over a decade. -Pauline Willms has been an manager for over a decade. -Giovanna Marquardt is a manager in the high tech industry. -Kristian Conroy IV has been a manager for 14 years. -Announcing engineer Aiyana Hagenes. -Our latest new employee, Melisa Barton Jr., has been a manager in the industry for 4 years. -Help me welcome our newest manager, Robbie Bechtelar. -Citlalli Lind joins us as an engineer on the Example project. -Karlee Lubowitz is a manager in the high tech industry. -Isidro Jerde is a engineer with Example Corp. -Conor Lemke joins us as an manager on the Example project. -Murphy Rutherford joins us as an manager on the Example project. -Omari Schultz has been an manager for over a decade. -Selmer Labadie is a manager in the high tech industry. -Alverta Hilpert, an manager, will be presenting the award. -Our latest new employee, Ali Dooley III, has been a engineer in the industry for 4 years. -Benjamin Lubowitz is a manager. -Trenton Hilll will be the new manager for the team. -Bethel Carroll, an engineer, will be presenting the award. -Nick Lang will be the new manager for the team. -Ms. Chase Graham has been an manager for over a decade. -Announcing engineer Melany Buckridge PhD. -Ursula McGlynn joins us as an engineer on the Example project. -Jackie Kshlerin is a manager. -Jarvis Langosh is a engineer. -Bonnie Bednar will be the new manager for the team. -Announcing engineer Simeon Dickinson. -Annamarie VonRueden MD has been an manager for over a decade. -Ms. Hilton Hagenes has been an engineer for over a decade. -Jaqueline D'Amore has been an manager for over a decade. -Mario Bartell will be the new manager for the team. -Frances Kovacek has been an manager for over a decade. -Help me welcome our newest manager, Avery Lakin. -Brittany Flatley DDS, an engineer, will be presenting the award. -Violette Sauer has been an manager for over a decade. -Dorothy Farrell is a engineer in the high tech industry. -Miss Jordan Crooks is a manager in the high tech industry. -Jadyn West joins us as an manager on the Example project. -Mrs. Johanna Borer is a engineer with Example Corp. -Elyssa Crist joins us as an engineer on the Example project. -Our latest new employee, Carli Kohler, has been a manager in the industry for 4 years. -Dasia Dare has been a engineer for 14 years. -Lane Rath has been an manager for over a decade. -Davion Kunze, an engineer, will be presenting the award. -Vada Stoltenberg, an engineer, will be presenting the award. -London Hagenes has been an engineer for over a decade. -Albin Wintheiser will be the new manager for the team. -Help me welcome our newest engineer, Mr. Eloise Jones. -Rashawn Bechtelar is a manager with Example Corp. -Linwood Casper, an engineer, will be presenting the award. -Our latest new employee, Mr. Phyllis O'Reilly, has been a manager in the industry for 4 years. -Carrie Franecki is a manager in the high tech industry. -Our latest new employee, Mikel Daugherty I, has been a manager in the industry for 4 years. -Announcing manager Jorge Connelly IV. -Victor Cummings is a manager with Example Corp. -Felton Kris has been a manager for 14 years. -Barney Klein is a manager with Example Corp. -Help me welcome our newest manager, Verdie Strosin. -Announcing engineer Izabella Vandervort DDS. -Philip Hodkiewicz has been an manager for over a decade. -Mr. Carol Orn, an manager, will be presenting the award. -Ursula Cormier is a manager. -Ellen Hudson is retiring as a engineer. -Bobby Conn II has been an engineer for over a decade. -Announcing engineer Mathilde Bayer. -Ms. Marcus Ferry has been an engineer for over a decade. -Our latest new employee, Tyshawn Volkman, has been a manager in the industry for 4 years. -Our latest new employee, Ladarius Predovic, has been a engineer in the industry for 4 years. -Our latest new employee, Ally Parisian, has been a manager in the industry for 4 years. -Briana Reinger joins us as an manager on the Example project. -Piper Kutch has been a manager for 14 years. -Armand Ratke is a manager with Example Corp. -Stephon McLaughlin DDS joins us as an engineer on the Example project. -Cordell Cole is a engineer with Example Corp. -Earl Altenwerth is retiring as a engineer. -Our latest new employee, Garrison Lang Jr., has been a manager in the industry for 4 years. -Ahmad Heaney, an engineer, will be presenting the award. -Jed Pollich has been a manager for 14 years. -Help me welcome our newest manager, Hudson Wyman. -Dena Crist DVM has been an engineer for over a decade. -Help me welcome our newest manager, Gino Auer III. -Help me welcome our newest manager, Miss Xavier Farrell. -Announcing engineer Lawrence Considine. -Coby Lesch is a engineer in the high tech industry. -Hardy Mohr joins us as an manager on the Example project. -Dr. Tamara Ryan is retiring as a manager. -Alia Quigley PhD, an engineer, will be presenting the award. -Noelia Bergnaum is a engineer with Example Corp. -Dakota Brown is a engineer. -Elmo Rogahn is a engineer. -Roxanne Weimann joins us as an manager on the Example project. -Gay Langworth is a manager in the high tech industry. -Ellis Ledner joins us as an engineer on the Example project. -Weston Murazik will be the new engineer for the team. -Our latest new employee, Josefa Sauer, has been a manager in the industry for 4 years. -Our latest new employee, Ilene Wyman, has been a manager in the industry for 4 years. -Nia Gottlieb, an manager, will be presenting the award. -Our latest new employee, Crawford Wehner, has been a manager in the industry for 4 years. -Our latest new employee, Brenna Ritchie, has been a manager in the industry for 4 years. -Ms. Mariah Hudson joins us as an manager on the Example project. -Tyra Schneider Sr. has been an manager for over a decade. -Help me welcome our newest manager, Andreane Johns. -Help me welcome our newest engineer, Mrs. Mabel Rice. -Emelia Jaskolski PhD is a engineer. -Spencer Cole II is a engineer in the high tech industry. -Help me welcome our newest manager, Doris Stokes. -Lilian Erdman has been a engineer for 14 years. -Ms. Ramona Torp is retiring as a manager. -Ms. Lauryn Stark, an manager, will be presenting the award. -Israel Greenholt will be the new engineer for the team. -Ms. Boris Leannon is a manager with Example Corp. -Help me welcome our newest manager, Pearlie Swaniawski. -Delores Kilback joins us as an manager on the Example project. -Mariam Schultz is a engineer in the high tech industry. -Dimitri Mueller IV is a manager with Example Corp. -Maud Beahan will be the new manager for the team. -Fletcher Predovic DVM, an engineer, will be presenting the award. -Mrs. Joanne Aufderhar will be the new engineer for the team. -Miss Paul Lowe is retiring as a engineer. -Johnpaul Swift has been an manager for over a decade. -Miss Rowena Pouros is a engineer. -Benjamin Jenkins is a engineer. -Erwin Jenkins will be the new manager for the team. -Ms. Zula Turner, an engineer, will be presenting the award. -Rhiannon Lind is retiring as a manager. -Mrs. Garth Labadie will be the new manager for the team. -Mia King is a manager in the high tech industry. -Ewald Cronin is a engineer with Example Corp. -Carrie Roob III has been an manager for over a decade. -Help me welcome our newest manager, Clementina Schmeler. -Stewart Sipes II has been an engineer for over a decade. -Our latest new employee, Katelin D'Amore, has been a manager in the industry for 4 years. -Announcing manager Verlie Wiegand. -Marie Schaefer is a engineer with Example Corp. -Tillman Boehm is a engineer in the high tech industry. -Announcing manager Jacklyn Kohler. -Katrine Bruen will be the new manager for the team. -Our latest new employee, Lisa Gaylord, has been a engineer in the industry for 4 years. -Virginia Ruecker, an engineer, will be presenting the award. -Mrs. Garnett Christiansen joins us as an engineer on the Example project. -Anderson Weissnat has been an engineer for over a decade. -Help me welcome our newest engineer, Marie Armstrong. -Help me welcome our newest engineer, Prudence Fahey V. -Bria Medhurst will be the new manager for the team. -Announcing engineer Ms. Dewitt Bernhard. -Stanford Miller has been a manager for 14 years. -Freddie Treutel, an manager, will be presenting the award. -Oceane Bayer has been an manager for over a decade. -Ms. Adalberto Lindgren joins us as an manager on the Example project. -Our latest new employee, Meagan Bartoletti, has been a manager in the industry for 4 years. -Wilhelm Kutch, an engineer, will be presenting the award. -Our latest new employee, Khalid Farrell Sr., has been a manager in the industry for 4 years. -Help me welcome our newest engineer, Ms. Allison Zemlak. -Judson Rodriguez MD is a manager with Example Corp. -Help me welcome our newest manager, Mr. Alena Stanton. -Kaylin Kohler has been a engineer for 14 years. -Melany Price is a engineer with Example Corp. -Palma Brekke is a manager. -Help me welcome our newest manager, Ozella Larson. -Earnestine Sanford will be the new engineer for the team. -Kathryne Tromp has been an engineer for over a decade. -Ted Abernathy is a manager with Example Corp. -Our latest new employee, Nelle Waters, has been a engineer in the industry for 4 years. -Help me welcome our newest engineer, Dawn Kautzer. -Ms. Itzel Breitenberg has been an engineer for over a decade. -Meta Gibson is a manager in the high tech industry. -Haven Nitzsche joins us as an engineer on the Example project. -Our latest new employee, Antonetta Kilback I, has been a manager in the industry for 4 years. -Kiara Zboncak joins us as an manager on the Example project. -Our latest new employee, Leola Kris, has been a engineer in the industry for 4 years. -Our latest new employee, Mr. Conrad Hills, has been a manager in the industry for 4 years. -Alize Rogahn has been a engineer for 14 years. -Rudy Hamill is a manager in the high tech industry. -Ms. Celestino Turcotte joins us as an manager on the Example project. -Ms. Annetta Stracke has been an manager for over a decade. -Hailie Hudson is a engineer. -Mrs. Deven Moen joins us as an manager on the Example project. -Callie Larson is a manager with Example Corp. -Quentin Morar joins us as an engineer on the Example project. -Our latest new employee, Antonietta Kuhlman II, has been a manager in the industry for 4 years. -Announcing manager Cristal Shanahan DVM. -Cristopher Boyer joins us as an engineer on the Example project. -Keely Larkin joins us as an engineer on the Example project. -Royce Berge is a engineer with Example Corp. -Announcing engineer Benjamin Hilll. -Rashawn Bogan is retiring as a manager. -Ted Collier has been a manager for 14 years. -Alene Corwin has been an manager for over a decade. -David Hodkiewicz is a manager with Example Corp. -Garland Kuhic has been an manager for over a decade. -Sonya Wilderman is a manager. -Quinn Bradtke Jr. is a manager with Example Corp. -Help me welcome our newest engineer, Ellen Cummerata. -Mason Beatty, an manager, will be presenting the award. -Help me welcome our newest manager, Camylle Muller. -Hadley Upton has been a engineer for 14 years. -Keara Pfeffer is a engineer with Example Corp. -Angie Walsh is retiring as a manager. -Help me welcome our newest manager, Earl Cummings. -Ephraim Marks is retiring as a engineer. -Orval Reichert DDS is a engineer. -Help me welcome our newest engineer, Katheryn Gleichner. -Announcing engineer Jalyn Fay I. -Virginia Keebler DVM is retiring as a engineer. -Raphael Leffler is a engineer. -Juliana Stokes is a manager with Example Corp. -Casper Herman is a manager in the high tech industry. -Announcing manager Vladimir Reilly. -Erin Okuneva has been an manager for over a decade. -Martine White is retiring as a manager. -Tristian Mertz is a manager. -Mr. Sammy Schmitt is a engineer with Example Corp. -Alec Schuster joins us as an manager on the Example project. -Ms. Lorenza Walsh will be the new engineer for the team. -Help me welcome our newest engineer, Eldora Mayert. -Justina Breitenberg will be the new manager for the team. -Our latest new employee, Mariela Grady Jr., has been a engineer in the industry for 4 years. -Kevon Baumbach, an manager, will be presenting the award. -Our latest new employee, Wendell Hayes, has been a engineer in the industry for 4 years. -Pat Aufderhar is a engineer with Example Corp. -Bart Senger joins us as an manager on the Example project. -Kaitlyn Hahn is a engineer. -Our latest new employee, Mrs. Else Kozey, has been a manager in the industry for 4 years. -Mr. Ashton Batz will be the new engineer for the team. -Lilly Koepp has been a engineer for 14 years. -Mrs. Alfredo Cormier has been a manager for 14 years. -Gail Swaniawski DVM is a engineer. -Mrs. Valentina Wilderman has been an engineer for over a decade. -Paxton Doyle is a engineer in the high tech industry. -Jarret Block PhD joins us as an manager on the Example project. -Arnaldo Blanda joins us as an manager on the Example project. -Aiden Orn has been an manager for over a decade. -Florine West is a manager. -Help me welcome our newest manager, Sincere Harber. -Announcing engineer Joan Ziemann. -Katelyn Schultz has been an engineer for over a decade. -Maximus Gleichner is a manager in the high tech industry. -Announcing manager Elenor Schuster. -Marcelino Kautzer will be the new engineer for the team. -Lea Schulist Sr. has been a engineer for 14 years. -Jeanne Carter MD is a manager in the high tech industry. -Kayleigh Goldner DDS is a engineer in the high tech industry. -Hilario Denesik I is a manager with Example Corp. -Shirley Reichert has been a manager for 14 years. -Kris Dickens is a engineer. -Gene Frami, an manager, will be presenting the award. -Sadye Jacobson is a manager in the high tech industry. -Buck Cremin joins us as an engineer on the Example project. -Our latest new employee, Coty Lesch, has been a engineer in the industry for 4 years. -Dr. Kim Mertz has been an engineer for over a decade. -Randy Sanford will be the new manager for the team. -Help me welcome our newest manager, Levi Kirlin. -Announcing engineer Davin Yundt. -Help me welcome our newest engineer, Enola Bins. -Trent Kuvalis, an engineer, will be presenting the award. -Announcing engineer Jake Powlowski. -Ms. Ashlee Emmerich, an manager, will be presenting the award. -Hannah Davis, an engineer, will be presenting the award. -Wayne Champlin joins us as an engineer on the Example project. -Nikki Conn Jr. is a manager. -Announcing engineer Carli Bauch. -Norbert Feest is a manager in the high tech industry. -Robbie Wintheiser has been an engineer for over a decade. -Leta Abshire is a engineer in the high tech industry. -Fannie Walker is a engineer. -Heber Wilkinson is a manager with Example Corp. -Announcing manager Willie Bernier III. -Help me welcome our newest engineer, Orlando Price. -Brandt Schowalter is a manager. -Mohammed Stokes is retiring as a engineer. -Isai Mraz is a engineer in the high tech industry. -Kadin Lemke will be the new manager for the team. -Maribel Jerde has been an manager for over a decade. -Myrna Kessler is a manager. -Meredith Tremblay is retiring as a engineer. -Announcing engineer Mr. Jerad Schneider. -Announcing manager Lenny Pfeffer. -Carolyne Klocko DVM will be the new manager for the team. -Our latest new employee, Monica Schulist, has been a engineer in the industry for 4 years. -Anika Larson V is a manager. -Domenick Pacocha is retiring as a engineer. -Miss Harmon Pfannerstill has been a engineer for 14 years. -Announcing manager Mr. Annabell Pouros. -Dr. Brisa Stroman has been an manager for over a decade. -Help me welcome our newest manager, Jade Stoltenberg. -Miss Mario Wolff joins us as an engineer on the Example project. -Ms. Savannah Gaylord is retiring as a manager. -Dejah Jones, an manager, will be presenting the award. -Hector Kulas, an manager, will be presenting the award. -Graciela Goodwin will be the new manager for the team. -Jocelyn Sauer is a manager. -Miss Lew Hansen will be the new manager for the team. -Fannie Fay DDS is a engineer with Example Corp. -Dr. Jordan Klocko is a engineer with Example Corp. -Kathlyn Lynch is a manager in the high tech industry. -Leann Botsford has been an engineer for over a decade. -Ervin Larson has been a manager for 14 years. -Allie Von is a manager in the high tech industry. -Johanna Kohler III has been a manager for 14 years. -Our latest new employee, Hilbert Armstrong, has been a engineer in the industry for 4 years. -Tanner Balistreri IV has been an manager for over a decade. -Abagail Shields has been an engineer for over a decade. -Help me welcome our newest engineer, Gia Cremin. -Mrs. Buford Oberbrunner is retiring as a engineer. -Madelyn White is retiring as a manager. -Announcing manager Abdullah Effertz. -Reva Stark will be the new engineer for the team. -Camryn McKenzie will be the new manager for the team. -Juwan Pouros will be the new manager for the team. -Gene Cassin is a manager in the high tech industry. -Our latest new employee, Felicia Kunde, has been a engineer in the industry for 4 years. -Our latest new employee, Jeremie Anderson, has been a manager in the industry for 4 years. -Our latest new employee, Katheryn Hickle Jr., has been a manager in the industry for 4 years. -Our latest new employee, Edwina Hamill IV, has been a manager in the industry for 4 years. -Adriana Cassin DVM has been a manager for 14 years. -Nelda Rowe joins us as an engineer on the Example project. -Rodrigo Kulas V joins us as an engineer on the Example project. -Jaiden Williamson is a manager in the high tech industry. -Announcing manager Cristopher Williamson. -Mr. Jay Krajcik, an engineer, will be presenting the award. -Announcing manager Francesco Miller. -Brenna Reinger, an manager, will be presenting the award. -Mr. Mollie Stanton has been an manager for over a decade. -Coby Schowalter has been a engineer for 14 years. -Our latest new employee, Estefania Armstrong II, has been a manager in the industry for 4 years. -Announcing engineer Aimee Nienow. -Kimberly Batz will be the new engineer for the team. -Miss Sienna Pfannerstill is retiring as a engineer. -Johnathon Hammes is retiring as a engineer. -Julien Hansen is a manager in the high tech industry. -Mrs. Emerson Waelchi will be the new manager for the team. -Malcolm Streich is a manager with Example Corp. -Aurelio Lebsack is retiring as a engineer. -Juana Grady has been a engineer for 14 years. -Kiel Lakin is a manager in the high tech industry. -Our latest new employee, Sarai Keeling, has been a engineer in the industry for 4 years. -Emilia Crona has been a engineer for 14 years. -Georgianna Kris is a manager in the high tech industry. -Maida Heller is retiring as a manager. -Jena Feeney is a engineer with Example Corp. -Mabelle Keeling has been an manager for over a decade. -Chris Bergstrom has been an engineer for over a decade. -Our latest new employee, Audrey Block DDS, has been a engineer in the industry for 4 years. -Our latest new employee, Louvenia Kuhn, has been a engineer in the industry for 4 years. -Thomas O'Keefe has been a manager for 14 years. -Our latest new employee, Darby Klocko, has been a manager in the industry for 4 years. -Arlene Weimann, an manager, will be presenting the award. -Our latest new employee, Corbin Jones MD, has been a engineer in the industry for 4 years. -Lamar Mraz joins us as an engineer on the Example project. -Miss Onie Krajcik is a manager with Example Corp. -Kamille Schaefer, an manager, will be presenting the award. -Jack Borer, an engineer, will be presenting the award. -Reese Heaney has been a manager for 14 years. -Ilene Kovacek will be the new engineer for the team. -Trace Bailey will be the new engineer for the team. -Our latest new employee, Wava Donnelly, has been a engineer in the industry for 4 years. -Mona Lakin has been a manager for 14 years. -Weldon Heaney joins us as an manager on the Example project. -Norris Labadie has been a engineer for 14 years. -Bridgette Brown has been an manager for over a decade. -Osborne Kertzmann is a manager with Example Corp. -Announcing engineer Verlie Bruen. -Enrique Ullrich is a engineer with Example Corp. -Dr. Asia Purdy joins us as an engineer on the Example project. -Announcing manager Lindsey Predovic DDS. -Maxine Mosciski is retiring as a manager. -Sydni Stoltenberg is retiring as a engineer. -Paige Buckridge will be the new engineer for the team. -Miss Laverne Dach has been an manager for over a decade. -Murl Abshire is a manager in the high tech industry. -Lou Friesen is retiring as a manager. -Announcing engineer Keenan Fahey. -Ashleigh Schultz, an engineer, will be presenting the award. -Mrs. Keshaun Lesch has been an engineer for over a decade. -Announcing engineer Jeffrey Langosh. -Mckenzie Boyle, an engineer, will be presenting the award. -Hipolito Price PhD joins us as an manager on the Example project. -Lesley Adams III joins us as an engineer on the Example project. -Mya Howe is a engineer with Example Corp. -Nick Kutch Sr. has been a manager for 14 years. -Our latest new employee, Ms. Winfield Wilkinson, has been a engineer in the industry for 4 years. -Leopold Schulist is a engineer with Example Corp. -Announcing engineer Orval Prosacco. -Wilmer Mueller has been an manager for over a decade. -Karina Batz has been a manager for 14 years. -Our latest new employee, Luigi Abbott, has been a manager in the industry for 4 years. -Pamela Miller is a manager in the high tech industry. -Emelie Marquardt is a engineer with Example Corp. -Zola Beier is a manager in the high tech industry. -Mr. Elva Ritchie joins us as an engineer on the Example project. -Mrs. Otis Quitzon is a manager in the high tech industry. -Our latest new employee, Dr. Willow Jacobs, has been a manager in the industry for 4 years. -Cathryn Koss joins us as an manager on the Example project. -Ms. Alivia Ernser is retiring as a manager. -Timothy Mohr is a engineer. -Mrs. Jaylan Wuckert is retiring as a manager. -Emerald Waelchi is a engineer. -Vernon Heathcote is retiring as a manager. -Announcing engineer Lavinia Ruecker. -Mr. Quinn Altenwerth is retiring as a manager. -Alejandra Marks, an manager, will be presenting the award. -Mr. Leo Wuckert has been an manager for over a decade. -Jayce Schiller MD is a manager in the high tech industry. -Elenora Ebert Jr. will be the new engineer for the team. -Announcing manager Dr. Rose Wyman. -Help me welcome our newest manager, Wade Orn. -Iva Marks Sr., an manager, will be presenting the award. -Margaret Pouros will be the new manager for the team. -Barton Deckow has been an manager for over a decade. -Miss Lesly Balistreri will be the new engineer for the team. -Mr. Jacquelyn Reynolds joins us as an engineer on the Example project. -Announcing manager Doyle Heidenreich. -Announcing engineer Heidi Ruecker. -Mr. Alvis Moen joins us as an manager on the Example project. -Dr. Garnet Brown is a engineer. -Yolanda Beier is a engineer with Example Corp. -Soledad Macejkovic joins us as an engineer on the Example project. -Urban Lowe has been a engineer for 14 years. -Devyn Schmidt has been an manager for over a decade. -Barbara Flatley is a manager in the high tech industry. -Patsy Sanford PhD has been an engineer for over a decade. -Mrs. Rubye Blanda is a engineer. -Caleigh Klocko is a engineer. -Kali Dietrich has been an manager for over a decade. -Ms. Weldon Hudson is retiring as a engineer. -Help me welcome our newest engineer, Vallie Huel. -Sven O'Keefe III joins us as an engineer on the Example project. -Help me welcome our newest engineer, Gerardo Wehner Sr.. -Kyle Kirlin is a manager in the high tech industry. -Help me welcome our newest engineer, Marianne Berge Jr.. -Ms. Cristal Connelly is retiring as a manager. -Kailey Spinka has been an manager for over a decade. -Jeremie Morar has been an manager for over a decade. -Our latest new employee, Daija Lind, has been a engineer in the industry for 4 years. -Arvel McDermott is a engineer. -Dr. Nicholas Gorczany has been a manager for 14 years. -Anne Leuschke has been a manager for 14 years. -Gerda Cronin has been a engineer for 14 years. -Ms. Coty Rolfson is retiring as a engineer. -Announcing engineer Kareem Gerhold. -Announcing engineer Mrs. Nico Mann. -Corbin Bartell is a engineer in the high tech industry. -Theresa Gulgowski will be the new manager for the team. -Carmelo Boyer is retiring as a manager. -Elinore Schulist III joins us as an manager on the Example project. -Katarina Schultz joins us as an manager on the Example project. -Deven Rodriguez II is a manager in the high tech industry. -Miss Bruce Friesen is a manager in the high tech industry. -Marcelle Schowalter joins us as an manager on the Example project. -Albertha Murphy PhD has been an manager for over a decade. -Elmore Doyle is a engineer. -Our latest new employee, Reymundo Jaskolski, has been a engineer in the industry for 4 years. -Stephania Swaniawski I joins us as an manager on the Example project. -Stewart Veum has been an manager for over a decade. -Help me welcome our newest manager, Nathanael Bartell. -Retha Rempel is a engineer. -Isidro Aufderhar joins us as an manager on the Example project. -Florencio Mohr, an manager, will be presenting the award. -Zella Weimann is a engineer with Example Corp. -Khalid Macejkovic will be the new engineer for the team. -Our latest new employee, Geraldine Torp, has been a engineer in the industry for 4 years. -Presley Marks will be the new engineer for the team. -Mrs. Eve Bartoletti is retiring as a engineer. -Corine Schimmel, an manager, will be presenting the award. -Citlalli Goldner DDS, an manager, will be presenting the award. -Zakary Botsford, an manager, will be presenting the award. -Florida Reilly is retiring as a engineer. -Mr. Patsy Doyle joins us as an manager on the Example project. -Help me welcome our newest engineer, Emily Hayes II. -Dr. Johann Turcotte will be the new engineer for the team. -Dr. Darion Dietrich is a engineer in the high tech industry. -Norris Brekke is a manager in the high tech industry. -Janessa Marquardt joins us as an manager on the Example project. -Felicita Wintheiser MD has been an manager for over a decade. -Melyssa Muller, an engineer, will be presenting the award. -Vivienne Weissnat DDS has been a engineer for 14 years. -Ford Gerlach is a engineer. -Keenan Kertzmann is a engineer in the high tech industry. -Tobin Goyette joins us as an engineer on the Example project. -Help me welcome our newest manager, Cecilia Green. -Abe Fisher has been an manager for over a decade. -Mrs. Annabell Morissette is a manager. -Announcing manager Danny Kautzer III. -Gerard Cruickshank joins us as an engineer on the Example project. -Joy Okuneva DDS, an manager, will be presenting the award. -Ezequiel Macejkovic joins us as an engineer on the Example project. -Vernie Bradtke IV, an engineer, will be presenting the award. -Trycia Muller is a engineer. -Clotilde Ankunding is a engineer in the high tech industry. -Mr. Chaya Abshire, an engineer, will be presenting the award. -Announcing manager Hanna Roob. -Ronny Dietrich will be the new engineer for the team. -Derek Durgan DVM will be the new manager for the team. -Demetrius West will be the new manager for the team. -Macey Nikolaus is retiring as a manager. -Edison Gottlieb III is a manager in the high tech industry. -Bo Collins is a engineer with Example Corp. -Michaela Pagac PhD has been a manager for 14 years. -Mireille Kunde I is a engineer with Example Corp. -Duncan Kulas will be the new manager for the team. -Mrs. Xzavier Smitham is a engineer with Example Corp. -Announcing manager Mr. Adrianna Baumbach. -Shad Rolfson is a manager with Example Corp. -Mr. Dimitri Baumbach, an manager, will be presenting the award. -Our latest new employee, Samara Schultz, has been a manager in the industry for 4 years. -Mrs. Brant Kautzer is a manager in the high tech industry. -Tierra Greenholt is a engineer. -Our latest new employee, Otho Kub, has been a manager in the industry for 4 years. -Ana Harber will be the new manager for the team. -Our latest new employee, Ted Mertz Sr., has been a engineer in the industry for 4 years. -Uriel Zieme will be the new manager for the team. -Mr. Adaline Wolff has been an engineer for over a decade. -Help me welcome our newest manager, Mrs. Maurice Senger. -Ada Gleason has been an manager for over a decade. -Edwina Bernier DVM has been an engineer for over a decade. -Elva Homenick is retiring as a engineer. -Our latest new employee, Mrs. Shane Powlowski, has been a manager in the industry for 4 years. -Obie Nikolaus has been a manager for 14 years. -Ottis Jakubowski is a manager. -Mr. Armand Leannon is a engineer in the high tech industry. -Jaime Kuvalis joins us as an engineer on the Example project. -Help me welcome our newest engineer, Loyce VonRueden Jr.. -Evangeline Johns joins us as an manager on the Example project. -Federico Halvorson is a manager in the high tech industry. -Sylvester Gerlach is a manager in the high tech industry. -Our latest new employee, Ashly Wunsch V, has been a manager in the industry for 4 years. -Rowland Miller joins us as an manager on the Example project. -Bryon Kunde has been a manager for 14 years. -Denis Ernser has been an engineer for over a decade. -Jovanny O'Reilly DVM is a engineer with Example Corp. -Lazaro Hermiston is retiring as a manager. -Kelly Stehr is a engineer. -Oda Fadel is a engineer with Example Corp. -Percival Armstrong is a engineer. -Caleigh Schimmel joins us as an manager on the Example project. -Tyrique Pfeffer has been a engineer for 14 years. -Adell Leuschke is retiring as a engineer. -Dr. Cade Farrell will be the new engineer for the team. -Gisselle Doyle is retiring as a manager. -Lily Reinger will be the new manager for the team. -Jeffrey Gleason has been a engineer for 14 years. -Tad Huel, an engineer, will be presenting the award. -Connor Conn joins us as an manager on the Example project. -Mr. Cathrine Casper is a engineer with Example Corp. -Our latest new employee, Dr. Beryl Rempel, has been a manager in the industry for 4 years. -Carlie Steuber joins us as an manager on the Example project. -Rose Frami IV is a engineer. -Mr. Tyrel Pagac joins us as an manager on the Example project. -Our latest new employee, Morton Trantow, has been a engineer in the industry for 4 years. -Lola Ortiz has been an manager for over a decade. -Kelton Champlin has been a manager for 14 years. -Our latest new employee, Owen Mayert, has been a engineer in the industry for 4 years. -Johnny Witting joins us as an manager on the Example project. -Thea Rolfson will be the new manager for the team. -Reanna Schmidt has been an engineer for over a decade. -Help me welcome our newest manager, Ian Stehr. -Mr. Patsy Purdy will be the new manager for the team. -Brady Ritchie will be the new engineer for the team. -Our latest new employee, Thea Effertz, has been a manager in the industry for 4 years. -Gerry Veum has been a engineer for 14 years. -Corene Adams is retiring as a manager. -Julian Kutch has been a manager for 14 years. -Mrs. Joe Connelly is a engineer. -Announcing manager Adolphus Paucek. -Jasmin Ledner II is a engineer with Example Corp. -Dr. Osbaldo Beatty will be the new engineer for the team. -Dr. Mckenna Haag is a manager with Example Corp. -Help me welcome our newest engineer, Abdiel Connelly DVM. -Help me welcome our newest manager, Liliana Baumbach III. -Willard Kuvalis V is retiring as a engineer. -Carolyn Jaskolski Jr., an manager, will be presenting the award. -Reta Franecki is retiring as a engineer. -Percival O'Kon has been an engineer for over a decade. -Kamryn Rath is a manager with Example Corp. -Hailey Dooley is a manager in the high tech industry. -Mrs. Brycen West has been an engineer for over a decade. -Margarette Miller will be the new manager for the team. -Our latest new employee, Cristian Pagac, has been a manager in the industry for 4 years. -Rosalee Bechtelar, an manager, will be presenting the award. -Help me welcome our newest manager, Lessie Lesch. -Iva Hegmann joins us as an manager on the Example project. -Hallie Schroeder is a manager. -Mr. Lola Volkman is a engineer in the high tech industry. -Arianna Wolf DDS has been an manager for over a decade. -Elliot Trantow has been a manager for 14 years. -Darrion Rath PhD has been a engineer for 14 years. -Coralie Effertz V has been an engineer for over a decade. -Our latest new employee, Ms. Corrine Effertz, has been a engineer in the industry for 4 years. -Our latest new employee, Ellie Keebler, has been a engineer in the industry for 4 years. -Tyrese Pfeffer is a engineer in the high tech industry. -Announcing manager Jayce Roberts. -Isobel Veum is retiring as a engineer. -Raphaelle Breitenberg is retiring as a engineer. -Maudie Labadie I has been an engineer for over a decade. -Rosario Langosh MD has been a engineer for 14 years. -Raheem Mohr joins us as an manager on the Example project. -Avery Lind joins us as an engineer on the Example project. -Nichole Waters has been a engineer for 14 years. -Blaise Gislason I has been a manager for 14 years. -Everette D'Amore has been a engineer for 14 years. -Darwin Conroy is a manager in the high tech industry. -Abdullah Heathcote is retiring as a engineer. -Burnice Treutel has been an manager for over a decade. -Libbie O'Hara is a engineer with Example Corp. -Rowan Will has been an manager for over a decade. -Gudrun Gleason is retiring as a engineer. -Our latest new employee, Fannie Quitzon, has been a manager in the industry for 4 years. -Help me welcome our newest engineer, Terence Gutkowski. -Tyshawn Rowe is a manager. -Jaylan Sanford, an manager, will be presenting the award. -Camille Schaden DVM, an manager, will be presenting the award. -Ms. Blaze Emmerich will be the new manager for the team. -Announcing engineer Sonny Stoltenberg. -Elsie Jacobson is a manager. -Our latest new employee, London Jacobs, has been a manager in the industry for 4 years. -Mr. Hassie Kuhn is retiring as a engineer. -Raul Bogan MD is a engineer. -Adrian Abshire joins us as an manager on the Example project. -Our latest new employee, Golden Kreiger, has been a manager in the industry for 4 years. -Deven Stiedemann I, an manager, will be presenting the award. -Hilario Koepp PhD has been a manager for 14 years. -Maynard Herzog will be the new manager for the team. -Announcing manager Nathaniel Torp. -Courtney Strosin is a manager with Example Corp. -Emely Lowe is a engineer with Example Corp. -Vilma Weber is retiring as a engineer. -Announcing engineer Ms. Carlee Littel. -Help me welcome our newest engineer, Hayden Mills. -Ervin Schimmel is a engineer. -Gino Ortiz is a engineer in the high tech industry. -Help me welcome our newest manager, Amani Conroy. -Announcing manager Korbin Lowe. -Turner Bogan I will be the new engineer for the team. -Ms. Jabari Bauch has been an manager for over a decade. -Mrs. Breanne Morissette is a engineer in the high tech industry. -Our latest new employee, Crystel Doyle, has been a manager in the industry for 4 years. -Help me welcome our newest engineer, Isabel VonRueden. -Dayne Cremin is a engineer. -Help me welcome our newest manager, Waino Armstrong. -Deborah Armstrong is retiring as a manager. -Ashlynn Mante DVM is a engineer in the high tech industry. -Karlie Pollich Jr. joins us as an engineer on the Example project. -Maeve Schroeder is a manager in the high tech industry. -Hanna Fadel, an engineer, will be presenting the award. -Delphia O'Hara has been a manager for 14 years. -Jamir Hammes has been an engineer for over a decade. -Nigel Ortiz is retiring as a engineer. -Pauline Ritchie is a manager. -Nicholaus Toy has been an engineer for over a decade. -Freddy Okuneva joins us as an manager on the Example project. -Announcing engineer Brionna Fritsch. -Our latest new employee, Maiya Mills, has been a engineer in the industry for 4 years. -Alia Hoeger PhD has been a manager for 14 years. -Alvina Mertz is a engineer in the high tech industry. -Help me welcome our newest engineer, Raymundo Hintz. -Zack Stamm is a engineer in the high tech industry. -Dayne Klocko has been a manager for 14 years. -Announcing engineer Kyla Cremin. -Izabella Bernhard will be the new engineer for the team. -Zena Yundt is a engineer in the high tech industry. -Announcing manager Daron Schuppe. -Mr. Amira Marvin joins us as an manager on the Example project. -Boris Morar will be the new manager for the team. -Esperanza Batz is retiring as a engineer. -Dortha Macejkovic I has been an manager for over a decade. -Porter Dach V, an engineer, will be presenting the award. -Kailyn Flatley I is a manager with Example Corp. -Celine O'Keefe has been a manager for 14 years. -Obie Rodriguez, an manager, will be presenting the award. -Cade Gorczany joins us as an engineer on the Example project. -Myles Shanahan is a engineer. -Jayne Wiza will be the new engineer for the team. -Julius Huel has been an manager for over a decade. -Ms. Rowena Kihn, an engineer, will be presenting the award. -Ena Wehner is a manager with Example Corp. -Clovis Cartwright will be the new manager for the team. -Mr. Marcelo D'Amore is a manager. -Meggie Prosacco has been an engineer for over a decade. -Lisa Schamberger PhD has been an engineer for over a decade. -Mrs. Lyda Bayer, an manager, will be presenting the award. -Newell Hettinger joins us as an manager on the Example project. -Melany Wolf, an manager, will be presenting the award. -Emil Schaefer has been an manager for over a decade. -Samson Trantow has been a manager for 14 years. -Maida Marquardt is a engineer. -Johnpaul Howe MD is a manager with Example Corp. -Our latest new employee, Mrs. Adah Lubowitz, has been a engineer in the industry for 4 years. -Ms. Imelda Kohler, an manager, will be presenting the award. -Manuela Frami I, an engineer, will be presenting the award. -Noelia Padberg has been an manager for over a decade. -Una Eichmann DDS is a engineer. -Elta Nolan has been an engineer for over a decade. -Jaron Wyman is a engineer in the high tech industry. -Help me welcome our newest manager, Kayla Windler Sr.. -Mrs. Zetta Stiedemann joins us as an manager on the Example project. -Dr. Abner Adams is retiring as a manager. -Brenden Ortiz is retiring as a manager. -German Funk is a engineer. -Mr. Liliane Konopelski, an manager, will be presenting the award. -Jarrett Morar is a manager in the high tech industry. -Parker Huels is a manager in the high tech industry. -Mrs. Alvah Bayer is a engineer with Example Corp. -Wilhelm Parker Jr. is a engineer in the high tech industry. -Leo Mertz will be the new engineer for the team. -Corine Hills is a engineer. -Coy Raynor Sr. is a engineer. -Help me welcome our newest engineer, Mallie Streich DVM. -Daisy Hoeger is a manager in the high tech industry. -Michelle Hickle is a engineer with Example Corp. -Nolan Douglas will be the new engineer for the team. -Vivian Bernier is retiring as a engineer. -Announcing engineer Brigitte Toy. -Help me welcome our newest engineer, Aniyah Schoen. -Emmie Bins is a engineer. -Mazie Weimann, an manager, will be presenting the award. -Carole Aufderhar will be the new engineer for the team. -Bernhard O'Kon will be the new engineer for the team. -Flavio Moore Sr. is a manager with Example Corp. -Our latest new employee, Justina Wuckert, has been a engineer in the industry for 4 years. -Meredith Jones joins us as an engineer on the Example project. -Help me welcome our newest manager, Gene Champlin. -Clare Fay, an manager, will be presenting the award. -Lesly Johnston II is retiring as a manager. -Our latest new employee, Cristian Kling, has been a manager in the industry for 4 years. -Candido Littel is a manager. -Help me welcome our newest engineer, Mrs. Gregory Ritchie. -Lucio Sawayn is a manager with Example Corp. -Announcing manager Derick Rath DVM. -Gabriella Dietrich is a engineer in the high tech industry. -Lula Spencer DVM has been an engineer for over a decade. -Help me welcome our newest engineer, Horacio Kulas. -Davin Vandervort DDS has been a engineer for 14 years. -Ms. Avery Wisoky will be the new engineer for the team. -Talon Williamson MD, an manager, will be presenting the award. -Gerald Hahn has been a manager for 14 years. -Ettie Yost is retiring as a manager. -Abdullah Mosciski is retiring as a engineer. -Mrs. Marielle Bosco is a engineer in the high tech industry. -Kory Batz joins us as an manager on the Example project. -Noelia Kovacek, an engineer, will be presenting the award. -Kyleigh Nienow is a manager with Example Corp. -Alize Lind will be the new engineer for the team. -Ellsworth Altenwerth has been an engineer for over a decade. -Domenic Mayer has been a engineer for 14 years. -Ms. Geovanny Satterfield has been an engineer for over a decade. -Ella Daniel is a manager with Example Corp. -Kylee Bogisich PhD has been an engineer for over a decade. -Ryder Wilkinson Sr. is a manager in the high tech industry. -Help me welcome our newest engineer, Marina Schaefer. -Ms. Paige Bartell is a manager in the high tech industry. -Mitchel Murray has been an engineer for over a decade. -Tyler Quigley, an manager, will be presenting the award. -Veronica Kreiger has been a engineer for 14 years. -Halie Goldner has been an engineer for over a decade. -Ryder Lakin has been an engineer for over a decade. -Chloe Legros is a engineer. -Dariana O'Conner joins us as an engineer on the Example project. -Era Bins Jr. is a engineer with Example Corp. -Help me welcome our newest manager, Laila Reichert. -Dedrick Kuhic V has been a engineer for 14 years. -Haylee Price Jr. is retiring as a engineer. -Help me welcome our newest manager, Callie Shields. -Jarrod Fahey will be the new manager for the team. -Earlene Cremin is a engineer in the high tech industry. -Ellie Bergstrom will be the new manager for the team. -Armando Grady has been a engineer for 14 years. -Announcing engineer Dr. Lamar Hessel. -Joe Runolfsson Sr. is a engineer with Example Corp. -Announcing manager Manley Oberbrunner. -Meta Weissnat II has been an engineer for over a decade. -Dagmar Batz IV is a engineer with Example Corp. -Our latest new employee, Susie Bayer, has been a engineer in the industry for 4 years. -Help me welcome our newest engineer, Randi Howell. -Joanne Rau is a manager in the high tech industry. -Buck Stark is a engineer. -Shane Donnelly is a engineer with Example Corp. -Quincy Casper V is retiring as a engineer. -Lafayette Grimes will be the new manager for the team. -Mrs. Jody Beer will be the new engineer for the team. -Miss Corene Schamberger has been an engineer for over a decade. -Dr. Jesse Baumbach is retiring as a engineer. -Brenna Quigley Jr. is a manager. -Ms. Viviane Bins joins us as an engineer on the Example project. -Help me welcome our newest manager, Edgar Johnson. -Mr. Tracy Beier has been a engineer for 14 years. -Juvenal Ortiz has been a engineer for 14 years. -Dr. Nat Pagac is retiring as a manager. -Our latest new employee, Julio Mitchell, has been a engineer in the industry for 4 years. -Help me welcome our newest engineer, Clarissa Mraz Jr.. -Dustin Grady will be the new manager for the team. -Brennon Bayer is a engineer. -Mrs. Birdie Nienow has been a manager for 14 years. -Announcing engineer Randal Sauer Sr.. -Verda Kozey will be the new engineer for the team. -Chesley Hickle is a engineer with Example Corp. -Miss Rico Block has been a engineer for 14 years. -Gaetano Lindgren has been a manager for 14 years. -Hope Hauck will be the new engineer for the team. -Percival O'Connell is a engineer in the high tech industry. -Melyna Leuschke is a manager. -Our latest new employee, Adan Collins, has been a manager in the industry for 4 years. -Daryl Bashirian has been an engineer for over a decade. -Kara Welch, an manager, will be presenting the award. -Norberto Swift has been an engineer for over a decade. -Effie Champlin Jr. will be the new engineer for the team. -Dr. Julia Metz joins us as an manager on the Example project. -Janice Witting is a manager in the high tech industry. -Bruce Eichmann, an manager, will be presenting the award. -Isobel Swift is a engineer. -Earnestine Mayer MD has been an manager for over a decade. -Michele Bashirian is a engineer in the high tech industry. -Janick Crona is a engineer in the high tech industry. -Hank Fisher III has been a manager for 14 years. -Miss Aliya Skiles is a engineer. -Herbert Orn has been an manager for over a decade. -Our latest new employee, Aglae Baumbach, has been a engineer in the industry for 4 years. -Gayle Carter will be the new engineer for the team. -Announcing manager Creola Kautzer. -Announcing manager Theresa Hauck. -Ms. Patience Wintheiser, an manager, will be presenting the award. -Announcing engineer Sebastian Rutherford DDS. -Ara Pollich I has been a engineer for 14 years. -Carlos Baumbach has been an manager for over a decade. -Hulda Schroeder DVM is a engineer with Example Corp. -Stanton Torp joins us as an manager on the Example project. -Our latest new employee, Ceasar Franecki, has been a manager in the industry for 4 years. -Lelah Miller is a engineer. -Wyman Schultz is a manager in the high tech industry. -Mae Harris V will be the new manager for the team. -Help me welcome our newest engineer, Hortense Koelpin. -Our latest new employee, Wilmer Deckow, has been a engineer in the industry for 4 years. -Zaria Ferry has been an engineer for over a decade. -Help me welcome our newest engineer, Pierre Cronin. -Ms. Brenna Leffler is a engineer with Example Corp. -Announcing engineer Dr. Keara Price. -Announcing manager Jane Schroeder. -Reymundo Heathcote has been a manager for 14 years. -Elton Schiller II is a manager with Example Corp. -Irma Blanda is a manager with Example Corp. -Mireya Turner, an manager, will be presenting the award. -Dawson Streich will be the new manager for the team. -Gianni Cassin is a manager in the high tech industry. -Johnathan Kuhic V joins us as an manager on the Example project. -Our latest new employee, Lacey Sawayn, has been a engineer in the industry for 4 years. +Miller Zulauf, an manager, will be presenting the award. +Mazie Gottlieb has been an manager for over a decade. +Johnathon Shields Sr. is a manager in the high tech industry. +Paula Windler I has been a manager for 14 years. +Kaley Gleichner V is a engineer with Example Corp. +Mr. Roosevelt Welch MD is a engineer in the high tech industry. +Help me welcome our newest engineer, Ms. Shirley Purdy V. +Rolando Klein has been a engineer for 14 years. +Our latest new employee, Ms. Sandy Osinski, has been a manager in the industry for 4 years. +Our latest new employee, Mr. Kristopher Bauch II, has been a manager in the industry for 4 years. +Ali Gleason is a engineer. +Our latest new employee, Americo Rempel, has been a engineer in the industry for 4 years. +Gladys Marvin DVM is a manager with Example Corp. +Danial Koch, an manager, will be presenting the award. +Mr. Leopold Bergnaum is a manager with Example Corp. +Davonte Larson will be the new manager for the team. +Announcing engineer Ms. Tiara Collier Jr.. +Jessie Price joins us as an engineer on the Example project. +Kathleen Kihn is retiring as a engineer. +Mr. Ruben Haag is a manager in the high tech industry. +Maegan Quitzon will be the new manager for the team. +Our latest new employee, Clifton McKenzie, has been a manager in the industry for 4 years. +Lily Collins joins us as an engineer on the Example project. +Janie Hartmann is retiring as a engineer. +Ms. Alexandra Torphy IV will be the new manager for the team. +Our latest new employee, Rosemarie Gerlach, has been a engineer in the industry for 4 years. +Emilio Schultz, an engineer, will be presenting the award. +Adalberto Reilly V joins us as an engineer on the Example project. +Jess Schulist is a manager with Example Corp. +Help me welcome our newest manager, Bianka Swaniawski. +Mr. Oliver Lowe is a engineer. +Winifred Mayert has been a manager for 14 years. +Lamar Bahringer has been an engineer for over a decade. +Celia Hamill is a manager with Example Corp. +Ms. Shanon Murphy IV joins us as an engineer on the Example project. +Mr. Doris Gislason IV is a manager with Example Corp. +Hulda Bechtelar has been a engineer for 14 years. +Raina Pfeffer IV has been an engineer for over a decade. +Lilyan Gulgowski will be the new engineer for the team. +Mr. Gilbert Ziemann is a manager in the high tech industry. +Our latest new employee, Rebeca Spencer, has been a engineer in the industry for 4 years. +Dion Goodwin is a manager with Example Corp. +Ms. Shemar Hand is a manager with Example Corp. +Announcing manager Mr. Adrien Marquardt DDS. +Ladarius Veum is a manager. +Josefina Ernser is retiring as a engineer. +Mr. Geovanny Schowalter DDS, an manager, will be presenting the award. +Lera Rohan is a manager. +Fredy Bogisich joins us as an manager on the Example project. +Vada Jones is a manager with Example Corp. +Alec Waelchi is a manager. +Violet Schneider will be the new engineer for the team. +Nestor Von has been a engineer for 14 years. +Ms. Zora Ortiz DVM joins us as an manager on the Example project. +Help me welcome our newest manager, Mortimer Borer. +Adam Rolfson is a manager with Example Corp. +Willie Cronin has been an engineer for over a decade. +Tianna Cole has been an engineer for over a decade. +Eliza McGlynn will be the new engineer for the team. +Announcing manager Fidel Heathcote. +Brendan Towne joins us as an manager on the Example project. +Announcing manager Dean Lowe. +Brycen Ortiz is a manager with Example Corp. +Ford Kertzmann has been a engineer for 14 years. +Alfredo Franecki is retiring as a engineer. +Monica Pouros Jr. will be the new manager for the team. +Ms. Assunta Fay Sr. has been an manager for over a decade. +Help me welcome our newest manager, Noah Lockman Jr.. +Lonny Ebert has been a engineer for 14 years. +Briana Howe, an manager, will be presenting the award. +Ms. Vivienne Kuvalis IV is a engineer. +Ms. Gracie Hilpert PhD is retiring as a manager. +Help me welcome our newest manager, Thea Cronin. +Help me welcome our newest engineer, Elenora Oberbrunner I. +Ms. Rebecca Bergnaum is retiring as a manager. +Hudson Lebsack will be the new manager for the team. +Sigurd Abbott joins us as an manager on the Example project. +Kennith Wiegand V has been an manager for over a decade. +Help me welcome our newest manager, Dalton Carter. +Raegan Balistreri, an engineer, will be presenting the award. +Our latest new employee, Brandy Cronin, has been a engineer in the industry for 4 years. +Ms. Adella Runolfsdottir DDS is a engineer. +Robin Walsh, an engineer, will be presenting the award. +Emmanuelle Rempel is a engineer in the high tech industry. +Our latest new employee, Mr. Jan Ullrich DVM, has been a engineer in the industry for 4 years. +Help me welcome our newest engineer, Stefan Eichmann. +Giles Keeling is a manager in the high tech industry. +Help me welcome our newest manager, Bell O'Reilly. +Help me welcome our newest manager, Molly Grant. +Karlee Anderson is a manager in the high tech industry. +Jordyn Heller is retiring as a engineer. +Our latest new employee, Eleonore Dibbert Sr., has been a engineer in the industry for 4 years. +Efrain Borer I is a manager with Example Corp. +Ms. Alisha Wiza is a engineer in the high tech industry. +David Haag joins us as an manager on the Example project. +Ms. Rebecca Schmeler has been an manager for over a decade. +Our latest new employee, Ebba Legros, has been a manager in the industry for 4 years. +Assunta McGlynn will be the new manager for the team. +Help me welcome our newest engineer, Joelle Heller II. +Rasheed Considine is a engineer in the high tech industry. +Mr. Jabari Borer is a engineer. +Our latest new employee, Mr. Americo O'Conner, has been a engineer in the industry for 4 years. +Our latest new employee, Mr. Johnny Kautzer DDS, has been a manager in the industry for 4 years. +Beverly Schimmel is a manager in the high tech industry. +Ms. Sarina Kuhn DDS is a engineer with Example Corp. +Eusebio Kertzmann II, an engineer, will be presenting the award. +Hilton O'Keefe will be the new engineer for the team. +Reina Weissnat is a manager. +Hassie Kling will be the new manager for the team. +Lera Gleason has been a engineer for 14 years. +Waldo Deckow is retiring as a manager. +Help me welcome our newest manager, Mr. Norris Kling. +Ms. Annabel Nikolaus, an manager, will be presenting the award. +Help me welcome our newest manager, Cheyanne Hilll. +Gage Jacobs, an manager, will be presenting the award. +Cassidy Stokes joins us as an manager on the Example project. +Georgianna Jacobs is a engineer with Example Corp. +Mr. Ubaldo Cormier joins us as an engineer on the Example project. +Laurie Berge, an manager, will be presenting the award. +Mr. Karl Stehr Jr. is retiring as a manager. +Mr. Jaquan Gibson joins us as an engineer on the Example project. +Delores Stiedemann, an engineer, will be presenting the award. +Anabelle Yundt is a manager in the high tech industry. +Gaston Bergstrom MD has been an engineer for over a decade. +Verona Halvorson has been an engineer for over a decade. +Ms. Bridie Thompson V has been a manager for 14 years. +Announcing manager Albertha Tremblay. +Courtney Becker joins us as an manager on the Example project. +Bessie Homenick is retiring as a manager. +Lilian Osinski is a engineer with Example Corp. +Bertrand Altenwerth has been a manager for 14 years. +Cruz Gottlieb is a engineer with Example Corp. +Willy Runte joins us as an manager on the Example project. +Ms. Estelle Carter will be the new manager for the team. +Arnoldo Emard is a manager. +Our latest new employee, Brenna Lockman, has been a engineer in the industry for 4 years. +Kelly Bergnaum joins us as an manager on the Example project. +Adah Labadie is a manager. +Nichole Koepp has been a manager for 14 years. +Help me welcome our newest engineer, Jettie Jacobs. +Kellie Brekke V is retiring as a manager. +Nya Russel IV is a engineer. +Rowan Kling is a engineer with Example Corp. +Help me welcome our newest engineer, Dariana Oberbrunner V. +Angelo Altenwerth I joins us as an engineer on the Example project. +Britney Herman is retiring as a manager. +Abigayle Reichel is a engineer in the high tech industry. +Ms. Miracle Grady IV is a manager in the high tech industry. +Ms. Alison Bartoletti IV joins us as an manager on the Example project. +Laney Nitzsche is a engineer with Example Corp. +Laura Terry PhD is a engineer. +Breanne Koepp has been a engineer for 14 years. +Announcing engineer Ines Medhurst. +Theodore Stokes is a manager. +Ms. Yasmine Schulist, an engineer, will be presenting the award. +Help me welcome our newest engineer, Adolfo O'Reilly. +Kaelyn Schaden joins us as an manager on the Example project. +Jorge Bergnaum has been a engineer for 14 years. +Ignacio Wyman joins us as an engineer on the Example project. +Dannie Will is a manager in the high tech industry. +Margaretta Abbott will be the new engineer for the team. +Our latest new employee, Briana Murray PhD, has been a engineer in the industry for 4 years. +Our latest new employee, Brady Rath, has been a engineer in the industry for 4 years. +Rahsaan Adams joins us as an manager on the Example project. +Mr. Hayley Dicki will be the new engineer for the team. +Colton Pouros, an engineer, will be presenting the award. +Vida Schultz joins us as an engineer on the Example project. +Announcing manager Alexanne Turcotte. +Reba Stroman is a manager with Example Corp. +Paxton O'Connell MD, an manager, will be presenting the award. +Don Lesch joins us as an engineer on the Example project. +Help me welcome our newest engineer, Mr. Carter Von IV. +Help me welcome our newest manager, Candida Boyer. +Mr. Raymond Krajcik DVM is a engineer with Example Corp. +Markus Waelchi has been a engineer for 14 years. +Malachi Cummerata has been an engineer for over a decade. +Mr. Edmond Dietrich IV has been a manager for 14 years. +Lenora Reynolds is a engineer in the high tech industry. +Zoie Kiehn is retiring as a engineer. +Mr. Mason Lang II is a manager with Example Corp. +Virginia Reilly has been an engineer for over a decade. +Claude Emmerich, an manager, will be presenting the award. +Sebastian Prosacco has been a manager for 14 years. +Mr. Dashawn O'Hara, an manager, will be presenting the award. +Emmy Turcotte III joins us as an engineer on the Example project. +Sincere Legros joins us as an manager on the Example project. +Help me welcome our newest engineer, Mauricio Reilly. +Ms. Kimberly Bogan I is a engineer in the high tech industry. +Ms. Cassandre Murphy PhD has been a engineer for 14 years. +Kaycee Kshlerin I is a engineer in the high tech industry. +Ms. Cheyanne Rodriguez PhD is a engineer with Example Corp. +Announcing manager Gayle Wiza. +Ms. Desiree Metz is retiring as a engineer. +Our latest new employee, Hettie Beier MD, has been a engineer in the industry for 4 years. +Shakira Ledner is a manager. +Forrest Ullrich will be the new engineer for the team. +Help me welcome our newest manager, Alysha Wolff III. +Mr. Lorenza Okuneva DVM is a engineer in the high tech industry. +Nasir Walsh will be the new manager for the team. +Jeanette Rogahn is a engineer in the high tech industry. +Gilda Funk I, an manager, will be presenting the award. +Julius Gusikowski is a engineer. +Help me welcome our newest engineer, Agnes Schuster. +Our latest new employee, Delphine Thompson, has been a engineer in the industry for 4 years. +Tressa Nolan joins us as an engineer on the Example project. +Leatha Reilly I is a manager in the high tech industry. +Announcing engineer Mr. Jamar Mueller. +Announcing manager Kyleigh Franecki. +Our latest new employee, Cornell Bashirian MD, has been a manager in the industry for 4 years. +Help me welcome our newest manager, Ramiro Bogan. +Robyn McKenzie will be the new manager for the team. +Our latest new employee, Destini Rippin, has been a manager in the industry for 4 years. +Eladio Mraz joins us as an manager on the Example project. +Jayda Moore III is a manager in the high tech industry. +Alivia Koelpin is retiring as a engineer. +Alva Muller is a engineer with Example Corp. +Ms. Karianne Dibbert Sr. is a manager with Example Corp. +Hazle Langworth is retiring as a manager. +Geraldine Koelpin DVM, an manager, will be presenting the award. +Margarett Swaniawski is retiring as a manager. +Major Mosciski has been a manager for 14 years. +Cara Kuphal is a manager with Example Corp. +Announcing engineer Deshawn Fisher. +Mr. Leopoldo VonRueden DDS, an manager, will be presenting the award. +Horacio O'Connell is a manager in the high tech industry. +Salvatore Hermann will be the new engineer for the team. +Dessie Franecki is a manager in the high tech industry. +Our latest new employee, Mr. Torrey Macejkovic, has been a manager in the industry for 4 years. +Help me welcome our newest engineer, Wendell Boyer. +Our latest new employee, Providenci Heidenreich, has been a engineer in the industry for 4 years. +Announcing engineer Mr. Dorthy Williamson DDS. +Announcing manager Ena Green DDS. +Ms. Aaliyah Bernhard is retiring as a manager. +Help me welcome our newest engineer, Grace Quitzon. +Germaine VonRueden is a manager. +Lionel Goldner will be the new manager for the team. +Pearline Osinski is retiring as a engineer. +Claud Mayert is a manager. +Palma Grady joins us as an engineer on the Example project. +Andre Conn has been a manager for 14 years. +Gerardo Wilderman is a manager in the high tech industry. +Our latest new employee, Ms. Lydia Moore, has been a engineer in the industry for 4 years. +Walter Kub has been a engineer for 14 years. +Elbert Gottlieb, an engineer, will be presenting the award. +Announcing engineer Arnoldo Quitzon. +Macy Conroy joins us as an engineer on the Example project. +Cecil Metz will be the new manager for the team. +Help me welcome our newest engineer, Ms. Loyce Torphy MD. +Rylan Wyman PhD has been a manager for 14 years. +Announcing manager Clair Luettgen DDS. +Mr. Rickey Lehner is a engineer. +Kian Walter is a engineer. +Ms. Coralie Carroll II is a manager. +Ms. Leann Gutkowski Sr. has been a engineer for 14 years. +Presley Hirthe has been a manager for 14 years. +Leola King has been a manager for 14 years. +Maribel Mueller has been an manager for over a decade. +Dean Hilll is a engineer in the high tech industry. +Rosella Green, an manager, will be presenting the award. +Clementine Klein is retiring as a engineer. +Mr. Riley Roob V will be the new manager for the team. +Newell Beier will be the new manager for the team. +Orval Rodriguez joins us as an manager on the Example project. +Announcing manager Ms. Neha Kerluke. +Mr. Roger Farrell V joins us as an engineer on the Example project. +Charlie Brakus is a manager with Example Corp. +Judge Nitzsche is retiring as a engineer. +Mr. Triston Jakubowski V is a engineer. +Announcing engineer Ms. Rhoda Luettgen. +Felicia Roob is a manager. +Our latest new employee, Wendy Gulgowski, has been a engineer in the industry for 4 years. +Announcing manager Ms. Destiny Stoltenberg. +Announcing engineer Mr. Jaylen Metz. +Ms. Annamarie Heathcote is a manager. +Cayla Heaney is a engineer with Example Corp. +Eleanora Cruickshank has been a manager for 14 years. +Susie Schowalter is retiring as a manager. +Ms. Clarabelle Reichert Sr. joins us as an engineer on the Example project. +Whitney Kuhn is retiring as a manager. +Mr. Dangelo Dibbert DDS joins us as an manager on the Example project. +Mr. Jess Willms II joins us as an engineer on the Example project. +Madisyn Waelchi PhD has been an engineer for over a decade. +Ms. Sister Tillman is a engineer in the high tech industry. +Fern Weimann DVM is retiring as a manager. +Dejah Kunze II will be the new manager for the team. +Mr. Rahul Lubowitz, an manager, will be presenting the award. +Antonia Zulauf DDS is retiring as a manager. +Ms. Valentina Johnson DDS, an manager, will be presenting the award. +Shaylee Sauer is a manager. +Ms. Shanie Ruecker is a engineer in the high tech industry. +Mandy Boehm III is retiring as a manager. +Greg Hintz has been a manager for 14 years. +Our latest new employee, Vesta Lockman, has been a engineer in the industry for 4 years. +Announcing manager Lester Bahringer. +Our latest new employee, Kianna McGlynn, has been a manager in the industry for 4 years. +Our latest new employee, Matteo Turner, has been a manager in the industry for 4 years. +Lexus Gorczany has been an engineer for over a decade. +Otilia Schumm has been an engineer for over a decade. +Ms. Gabriella Boyle is a manager in the high tech industry. +Announcing engineer Edward Block. +Keanu Russel Jr. joins us as an manager on the Example project. +Raquel Schroeder has been an manager for over a decade. +Help me welcome our newest engineer, Coralie Boyer. +Ms. Stefanie Ernser is a manager with Example Corp. +Ms. Dorothy Reynolds is a manager with Example Corp. +Riley Little MD will be the new engineer for the team. +Our latest new employee, Mr. Bennie Champlin Jr., has been a manager in the industry for 4 years. +Ms. Ettie Koelpin DVM will be the new engineer for the team. +Saige Schaefer joins us as an engineer on the Example project. +Kyra Barrows, an manager, will be presenting the award. +Felton Gerhold PhD is a manager with Example Corp. +Mr. Freeman Abshire PhD is a engineer with Example Corp. +Terrence O'Conner has been a manager for 14 years. +Ms. Clarabelle Gibson DDS is a engineer with Example Corp. +Nikko Nader is a manager with Example Corp. +Mr. Arturo Moore is retiring as a engineer. +Our latest new employee, Wayne Abshire, has been a engineer in the industry for 4 years. +Help me welcome our newest manager, Sydnee Schaden. +Help me welcome our newest engineer, Matteo Robel IV. +Our latest new employee, Maverick Wyman, has been a manager in the industry for 4 years. +Our latest new employee, Mr. Westley Predovic V, has been a manager in the industry for 4 years. +Our latest new employee, Selina Donnelly, has been a manager in the industry for 4 years. +Mr. Alfonzo Von I is a engineer with Example Corp. +Doug Ruecker is a engineer. +Denis Yost joins us as an manager on the Example project. +Our latest new employee, Cyril Robel Sr., has been a engineer in the industry for 4 years. +Announcing engineer Ford Jacobi. +Dillan Grimes is a engineer. +Our latest new employee, Ms. Cecelia Kiehn PhD, has been a engineer in the industry for 4 years. +Rachael Kshlerin is a engineer with Example Corp. +Raven Kihn is retiring as a manager. +Help me welcome our newest engineer, Ms. Eileen Friesen. +Announcing manager Mr. Jett Bernhard. +Haylie Spinka has been an engineer for over a decade. +Kailey Towne Jr. has been a manager for 14 years. +Our latest new employee, Mr. Jed Rutherford, has been a manager in the industry for 4 years. +Angel Schneider is a manager. +Help me welcome our newest manager, Emma Pacocha. +Help me welcome our newest manager, Jeremie Rogahn. +Porter Herman is a engineer. +Priscilla Stanton joins us as an engineer on the Example project. +Help me welcome our newest manager, Aurelio Turner. +Melvin McLaughlin is retiring as a engineer. +Help me welcome our newest manager, Brandy Friesen. +Angelo Pfeffer has been a manager for 14 years. +Our latest new employee, Mr. Jerrold Prohaska, has been a engineer in the industry for 4 years. +Margret Turcotte has been a engineer for 14 years. +Help me welcome our newest engineer, Kathryne Schaefer II. +Kamille Blanda is a engineer with Example Corp. +Addie Tremblay, an manager, will be presenting the award. +Help me welcome our newest manager, Cathrine Bernier. +Jesus Bashirian is a engineer in the high tech industry. +Dillon Hamill is retiring as a manager. +Granville Lemke is retiring as a engineer. +Vito Greenholt III is a engineer with Example Corp. +Evie Kunde is a manager with Example Corp. +Lela Renner joins us as an manager on the Example project. +Brendon Reynolds will be the new manager for the team. +Mr. Brandt Larson joins us as an engineer on the Example project. +Mr. Arthur Brakus Jr. joins us as an manager on the Example project. +Haylie Gerhold will be the new manager for the team. +Help me welcome our newest engineer, Emile Crist. +Mr. Kale Schamberger DVM has been an engineer for over a decade. +Ms. Oceane Kerluke II will be the new manager for the team. +Matteo Auer has been a manager for 14 years. +Announcing engineer Audie Kreiger IV. +Mr. Niko Yost Jr. is retiring as a engineer. +Sage Bruen will be the new engineer for the team. +Our latest new employee, Francisco Feeney, has been a engineer in the industry for 4 years. +Jared Koelpin, an manager, will be presenting the award. +Our latest new employee, Claud Lesch, has been a engineer in the industry for 4 years. +Announcing engineer Sherman Okuneva Jr.. +Help me welcome our newest engineer, Jeramy Strosin. +Amya Schumm is a engineer with Example Corp. +Our latest new employee, Tracey Wunsch, has been a manager in the industry for 4 years. +Krystal Kunze is retiring as a engineer. +Talon Wyman V, an engineer, will be presenting the award. +Brice Mills has been a manager for 14 years. +Scottie Bergstrom has been a manager for 14 years. +Ms. Shanny Beatty is a manager with Example Corp. +Shannon Witting is a manager. +Announcing manager Taya Fahey V. +Our latest new employee, Ms. Lauriane Sanford, has been a manager in the industry for 4 years. +Emilia Lesch has been an manager for over a decade. +Randal Predovic is a engineer in the high tech industry. +Piper Heidenreich I is a engineer with Example Corp. +Jakob Kassulke, an manager, will be presenting the award. +Wilma Satterfield is retiring as a engineer. +Announcing manager Dahlia Paucek. +Peyton Will Jr. has been a manager for 14 years. +Reginald Durgan Sr. has been an engineer for over a decade. +Miracle Sauer will be the new manager for the team. +Dewayne Wintheiser will be the new manager for the team. +Announcing manager Emelia Langworth. +Ms. Elissa Reichel is retiring as a manager. +Gillian Ledner, an engineer, will be presenting the award. +Announcing engineer Mr. Morgan Bernier IV. +Mr. Brooks Prosacco, an engineer, will be presenting the award. +Mr. Junius Hintz is a engineer in the high tech industry. +Astrid Heathcote DVM is a manager. +Our latest new employee, Ms. Brooklyn Parker, has been a manager in the industry for 4 years. +Help me welcome our newest manager, Domingo Heller. +Mr. Jayde Dooley Sr. has been an engineer for over a decade. +Jada Carroll Sr. joins us as an engineer on the Example project. +Enoch Hammes has been an manager for over a decade. +Our latest new employee, Ms. Mckayla Jast, has been a engineer in the industry for 4 years. +Ulices Wunsch has been an manager for over a decade. +Ms. Jacklyn Bayer I is a engineer. +Our latest new employee, Adolph Kshlerin, has been a engineer in the industry for 4 years. +Magnolia Baumbach PhD has been an manager for over a decade. +Noelia Windler is a engineer in the high tech industry. +Jaeden Hand is a manager. +Heather Wilderman joins us as an manager on the Example project. +Daphne Hamill is retiring as a manager. +Octavia Durgan will be the new manager for the team. +Anibal Kerluke is retiring as a manager. +Mr. Orval Hammes Jr. is retiring as a manager. +Kristin Conn is a manager. +Magdalen Dietrich IV is retiring as a manager. +Ms. Haylie Rosenbaum MD is a engineer in the high tech industry. +Help me welcome our newest engineer, Elenor Howe. +Help me welcome our newest engineer, Yvonne Roberts. +Oda Wisoky has been an manager for over a decade. +Help me welcome our newest engineer, Ms. Aryanna Grimes Jr.. +Jocelyn Wilkinson joins us as an manager on the Example project. +Announcing engineer Neil Hirthe. +Aric Heidenreich has been a manager for 14 years. +Help me welcome our newest engineer, Ms. Julia Carter. +Darlene Pacocha is a manager. +Kathryne Hoeger is a engineer. +Alysson Carter V is a engineer with Example Corp. +Mr. Nicolas Blick V is a engineer with Example Corp. +Elaina Von, an engineer, will be presenting the award. +Ms. Ellen Mayer IV is a manager with Example Corp. +Our latest new employee, Darryl Casper, has been a engineer in the industry for 4 years. +Announcing engineer Mr. Kadin Metz V. +Ms. Electa Quitzon Sr. joins us as an manager on the Example project. +Mr. Hiram Howe MD is a manager in the high tech industry. +Ole Marvin joins us as an manager on the Example project. +Our latest new employee, Claudia Rice, has been a manager in the industry for 4 years. +Forest Swaniawski is a manager in the high tech industry. +Roslyn Dickens has been a engineer for 14 years. +Ms. Janis Wuckert V is a manager in the high tech industry. +Ms. Meda Jerde DVM is a engineer. +Dolores Schiller II has been a engineer for 14 years. +Mr. Enid Keeling is a engineer with Example Corp. +Our latest new employee, Burnice Bosco DDS, has been a engineer in the industry for 4 years. +Katheryn Becker, an manager, will be presenting the award. +Samantha Schmidt will be the new engineer for the team. +Mr. Royce Bashirian is a engineer. +Mitchell Parker is a engineer. +Ms. Eryn Bins is a engineer. +Announcing engineer Sven Harvey. +Help me welcome our newest manager, Mr. Melany Dibbert. +Announcing manager Ms. Ardella Sipes Jr.. +Joy Reynolds will be the new manager for the team. +Help me welcome our newest manager, Lyda Quitzon. +Sophie Gusikowski, an engineer, will be presenting the award. +Ashly Jacobi will be the new engineer for the team. +Our latest new employee, Haylie Maggio, has been a engineer in the industry for 4 years. +Deshaun Walsh, an engineer, will be presenting the award. +Mr. Malachi Gibson is a manager in the high tech industry. +Edna Fadel will be the new manager for the team. +Ms. Eula Bogan will be the new manager for the team. +Mr. Javier Kemmer DDS joins us as an manager on the Example project. +Nicole Conroy will be the new manager for the team. +Cordie Wunsch V is a manager. +Pablo Jones will be the new engineer for the team. +Ms. Jolie Schuppe DDS is a manager in the high tech industry. +Amya Swift, an engineer, will be presenting the award. +Alessandro Bartoletti will be the new manager for the team. +Our latest new employee, Sonya Schmitt, has been a manager in the industry for 4 years. +Ava D'Amore has been an engineer for over a decade. +Raphaelle Murphy will be the new manager for the team. +Seth King DVM has been an manager for over a decade. +Mr. Wilber Schaden III will be the new manager for the team. +Brian White is a engineer. +Sage Parker has been a manager for 14 years. +Katharina Wiegand is a manager. +Eric Weimann, an engineer, will be presenting the award. +Marcellus Jakubowski has been an engineer for over a decade. +Angeline Pouros has been an manager for over a decade. +Samara Veum will be the new engineer for the team. +Alberto Zemlak MD has been an engineer for over a decade. +Liza Nolan III, an manager, will be presenting the award. +Guy Turcotte is a manager. +Danyka Stamm MD is a manager with Example Corp. +Alverta Dach is retiring as a engineer. +Maybelline Kihn is a engineer with Example Corp. +Carlo Ratke has been a manager for 14 years. +Margaretta Von is a engineer. +Pinkie Green is a engineer in the high tech industry. +Mr. Elvis Gulgowski, an manager, will be presenting the award. +Chanel Harber joins us as an engineer on the Example project. +Carroll Gutmann will be the new manager for the team. +Emma Stehr is a manager with Example Corp. +Rusty Gerhold will be the new manager for the team. +Help me welcome our newest engineer, Nella Graham. +Carolyne Rippin DDS is retiring as a engineer. +Our latest new employee, Conor Rowe, has been a manager in the industry for 4 years. +Gilda Mayert II has been a engineer for 14 years. +Our latest new employee, Mr. Cale Ortiz Jr., has been a engineer in the industry for 4 years. +Morgan Parisian IV will be the new engineer for the team. +Help me welcome our newest engineer, Carolanne Russel. +Grayson VonRueden DVM joins us as an engineer on the Example project. +Ms. Jazmyne Larkin I will be the new manager for the team. +Courtney Hirthe is a engineer with Example Corp. +Announcing manager Maximus Shanahan MD. +Felipa Wiegand has been a engineer for 14 years. +Ms. Madalyn Corwin III is a engineer. +Eldridge West joins us as an engineer on the Example project. +Ms. Cleta Renner is a manager with Example Corp. +Katelynn Erdman will be the new manager for the team. +Scotty Cassin is a manager in the high tech industry. +Melyna Kertzmann is a engineer. +Mariam McCullough joins us as an engineer on the Example project. +Demarcus Donnelly V is retiring as a engineer. +Help me welcome our newest manager, Mr. Ansley Bernhard Sr.. +Chad Ondricka V has been an engineer for over a decade. +Liliana Hessel is retiring as a manager. +Ransom Crist has been an manager for over a decade. +Aisha Boehm has been an engineer for over a decade. +Adaline O'Reilly PhD is retiring as a engineer. +Announcing manager Dayna Hoppe. +Help me welcome our newest engineer, Mr. Lee Kreiger Jr.. +Help me welcome our newest manager, Payton Cummerata. +Colten Luettgen joins us as an manager on the Example project. +Ms. Nakia Johns has been a engineer for 14 years. +Freida Strosin has been an manager for over a decade. +Brad Bednar has been an engineer for over a decade. +Antone McLaughlin has been an manager for over a decade. +Izabella Berge is a engineer with Example Corp. +Carole Lowe V is a engineer in the high tech industry. +Bud Leuschke is a engineer in the high tech industry. +Archibald Hilpert joins us as an manager on the Example project. +Fernando Ebert joins us as an manager on the Example project. +Jeramy Abshire is a engineer with Example Corp. +Help me welcome our newest engineer, Jannie Weber. +Announcing engineer Mr. Mason Wolff MD. +Nelle Kutch III, an engineer, will be presenting the award. +Help me welcome our newest engineer, Keshawn Sporer. +Caterina Kunze PhD has been an engineer for over a decade. +Announcing manager Dashawn Block. +Shania Witting is retiring as a manager. +Amos Stamm is a engineer. +Announcing manager Alexandra Reichel Jr.. +Mr. Edwardo Schneider V joins us as an manager on the Example project. +Announcing manager Ms. Brigitte Waelchi. +Jeanne Keeling is a engineer in the high tech industry. +Forest Von has been a manager for 14 years. +Mr. Casimer Jast joins us as an manager on the Example project. +Ms. Ebba Conn has been a manager for 14 years. +Jany Yost is a engineer in the high tech industry. +Announcing manager Hassie Mosciski. +Mr. Zion Rosenbaum Sr. has been an manager for over a decade. +Ignacio Thompson MD is retiring as a manager. +Jeanette Conroy IV is retiring as a manager. +Garrick Graham is a manager in the high tech industry. +Misael Kuvalis has been a manager for 14 years. +Helene Dickinson III will be the new engineer for the team. +Help me welcome our newest manager, Reilly Murazik. +Our latest new employee, Jadon Ullrich, has been a engineer in the industry for 4 years. +Ms. Audra Borer has been a manager for 14 years. +Earnestine Sauer joins us as an engineer on the Example project. +Myrl Gerlach is a engineer. +Mr. Garret Krajcik is a engineer with Example Corp. +Help me welcome our newest manager, Emmet Hayes. +Ahmed Donnelly Jr. is a engineer. +Mr. Alvah Schimmel joins us as an manager on the Example project. +Ms. Marilyne Harris I has been a engineer for 14 years. +Ms. Casandra Zulauf has been an manager for over a decade. +Alfred Yundt has been a manager for 14 years. +Ford Herman has been a manager for 14 years. +Paige Beier, an engineer, will be presenting the award. +Myrtle Runte has been a engineer for 14 years. +Announcing engineer Roy Rosenbaum. +Announcing engineer Mr. Julius Borer III. +Noemi Hamill has been a engineer for 14 years. +Francisco Koelpin PhD, an manager, will be presenting the award. +Ms. Pascale Emard joins us as an manager on the Example project. +Daryl Wisozk DVM joins us as an manager on the Example project. +Mr. Noble Runolfsson, an manager, will be presenting the award. +Ms. Daija Schoen joins us as an engineer on the Example project. +Ubaldo Dach is retiring as a engineer. +Katrine Parker PhD, an manager, will be presenting the award. +Ms. Martine Treutel IV has been an manager for over a decade. +Help me welcome our newest engineer, Ms. Alice Emard II. +Garrison Nikolaus, an engineer, will be presenting the award. +Mr. Tanner Baumbach IV has been a engineer for 14 years. +Our latest new employee, Austin Wisozk II, has been a manager in the industry for 4 years. +Announcing manager Mr. Saul Little. +Ms. Ashtyn O'Conner is a engineer. +Mr. Norval Wisozk is a engineer. +Ms. Daniella Murphy I, an manager, will be presenting the award. +Rocky Osinski is retiring as a manager. +Help me welcome our newest manager, Ms. Maybelline Powlowski. +Aliyah Lindgren PhD will be the new engineer for the team. +Bradley Hauck, an engineer, will be presenting the award. +Ramiro Kilback is a manager. +Help me welcome our newest manager, Colten Hermann. +Our latest new employee, Neva Hoppe, has been a manager in the industry for 4 years. +Ms. Stephania McGlynn PhD is a manager in the high tech industry. +Florencio Williamson has been a manager for 14 years. +Ruby Langworth will be the new engineer for the team. +Our latest new employee, Mr. Stevie Corkery, has been a manager in the industry for 4 years. +Birdie Herzog V joins us as an engineer on the Example project. +Mr. Boyd Harvey has been an engineer for over a decade. +Ms. Josianne Hauck I joins us as an manager on the Example project. +Help me welcome our newest manager, Jose Marquardt V. +Cullen Gutmann is a manager. +Mr. Ola Brown DVM is a engineer with Example Corp. +Blanca Gleichner has been a engineer for 14 years. +Mr. Evan Lesch is retiring as a manager. +Hazle Grimes has been a engineer for 14 years. +Mr. Ottis Walsh III is a manager with Example Corp. +Sigmund Nienow is retiring as a engineer. +Mr. Tyrel Hermiston DDS has been an engineer for over a decade. +Mr. Woodrow Stark is retiring as a engineer. +Twila Flatley will be the new engineer for the team. +Mr. Murl Howe DDS is a manager with Example Corp. +Kathryne Price has been an engineer for over a decade. +Ms. Hellen Lind IV has been a manager for 14 years. +Ms. Alyson Lehner, an manager, will be presenting the award. +Verda Hyatt is a manager. +Announcing engineer Viola Pacocha. +Lorenza Dietrich is a manager. +Ms. Lavada Runte II has been an manager for over a decade. +Doris Rohan, an manager, will be presenting the award. +Santina Sauer is retiring as a engineer. +Help me welcome our newest engineer, Adrianna Parisian. +Ms. Maurine Senger III is a engineer. +Ms. Delfina Murray II is retiring as a manager. +Jane Kilback is a engineer with Example Corp. +Our latest new employee, Mr. Kirk Braun DDS, has been a manager in the industry for 4 years. +Maynard Hills has been an manager for over a decade. +Announcing engineer Emmitt Marks. +Patience Hilll has been a manager for 14 years. +Dave Volkman, an engineer, will be presenting the award. +Sibyl Gleichner has been a manager for 14 years. +Ressie Koelpin is retiring as a manager. +Erwin Beer will be the new manager for the team. +Geovanni Goodwin is a manager with Example Corp. +Announcing manager Alexanne Mills. +Our latest new employee, Ms. Meaghan Kris DDS, has been a manager in the industry for 4 years. +Mr. Stanton Nicolas is a engineer in the high tech industry. +Elliot Welch is retiring as a engineer. +Mr. Mathew Schowalter joins us as an engineer on the Example project. +Announcing manager Ms. May Lowe MD. +Ms. Henriette Rau joins us as an manager on the Example project. +Reuben Wiza IV has been an engineer for over a decade. +Our latest new employee, Mr. Roman Bernhard, has been a engineer in the industry for 4 years. +Norval Breitenberg is a engineer. +Deshawn Gibson joins us as an engineer on the Example project. +Nola Corwin has been a engineer for 14 years. +Bernadine Beatty III has been a manager for 14 years. +Berneice Baumbach is a engineer with Example Corp. +Aaliyah Brown will be the new engineer for the team. +Mariana Mills is a engineer with Example Corp. +Our latest new employee, Filiberto Jenkins, has been a engineer in the industry for 4 years. +Our latest new employee, Madisen Bode, has been a manager in the industry for 4 years. +Brant Kuvalis Sr. has been an manager for over a decade. +Announcing manager Mr. Wiley Green. +Nova Padberg is a engineer in the high tech industry. +Help me welcome our newest engineer, Rosanna Carter. +Jacquelyn Rau is a manager in the high tech industry. +Mr. Gunner Kub I is retiring as a manager. +Our latest new employee, Roberto Spinka, has been a manager in the industry for 4 years. +Bettie Greenfelder is retiring as a engineer. +Our latest new employee, Ms. Nedra Towne, has been a manager in the industry for 4 years. +Ms. Amely Cartwright is a engineer in the high tech industry. +Our latest new employee, Ms. Joy Wiegand, has been a engineer in the industry for 4 years. +Announcing manager Kayley Moore DDS. +Our latest new employee, Philip Mayer, has been a manager in the industry for 4 years. +Ms. Mae Bartell, an manager, will be presenting the award. +Efren Paucek has been a manager for 14 years. +Help me welcome our newest manager, Mr. Marques Mayert DDS. +Zelma Zboncak joins us as an manager on the Example project. +Camila Buckridge, an manager, will be presenting the award. +Our latest new employee, Mr. Savion Trantow V, has been a engineer in the industry for 4 years. +Lonny Bahringer has been an engineer for over a decade. +Mr. Valentin Stehr has been a manager for 14 years. +Chanelle Willms, an engineer, will be presenting the award. +Help me welcome our newest manager, Holly Swaniawski V. +Eloy Bailey, an engineer, will be presenting the award. +Doris Buckridge will be the new engineer for the team. +Help me welcome our newest manager, Mr. Zackary Fahey MD. +Nella DuBuque is a manager. +Help me welcome our newest manager, Kenya Cronin. +Blanche Torp is a engineer with Example Corp. +Our latest new employee, Estevan Walsh, has been a engineer in the industry for 4 years. +Kurt Greenholt Sr. is a manager with Example Corp. +Beverly Torp is a manager. +Ms. Dorothea Hackett is a manager with Example Corp. +Brigitte Kunde will be the new engineer for the team. +Jackeline Harber has been an manager for over a decade. +Ms. Justine Deckow Sr. has been an engineer for over a decade. +Vicenta Rolfson joins us as an engineer on the Example project. +Holly Tremblay is a manager with Example Corp. +Emilio Windler, an manager, will be presenting the award. +Erin Beier is a engineer with Example Corp. +Help me welcome our newest manager, Kali Terry. +Mr. Aron Walsh is a manager with Example Corp. +Ms. Veronica Ankunding is a manager. +Amaya Adams PhD is a manager with Example Corp. +Milford Watsica II is retiring as a manager. +Ms. Prudence Harvey will be the new manager for the team. +Brice Schinner is a manager. +Kayden Witting will be the new engineer for the team. +Shad Beatty is retiring as a manager. +Ms. Sunny Braun DVM is a engineer in the high tech industry. +Ally Kiehn, an engineer, will be presenting the award. +Sierra Dietrich Sr., an manager, will be presenting the award. +Announcing engineer Diego Hoeger. +Allene Stokes will be the new engineer for the team. +Mr. Chaz Kemmer III is a manager in the high tech industry. +Announcing engineer Ms. Lurline Kessler II. +Carli Osinski is retiring as a manager. +Prince Kunze is a manager. +Jared Schoen has been an engineer for over a decade. +Help me welcome our newest engineer, Sven Lesch. +Alec Ernser has been an manager for over a decade. +Announcing engineer Jean Daniel. +Ambrose Hansen has been an engineer for over a decade. +Ms. Aubree Cruickshank, an engineer, will be presenting the award. +Ms. Aniya Doyle is a manager. +Help me welcome our newest manager, Julianne Weissnat. +Emanuel Friesen I is a manager in the high tech industry. +Ms. Trycia Schulist DDS is a engineer in the high tech industry. +Caesar Runolfsson II has been a manager for 14 years. +Torrance Blick joins us as an manager on the Example project. +Kailyn Ortiz is a engineer. +Mackenzie Farrell V, an engineer, will be presenting the award. +Cristian Quitzon III is a engineer in the high tech industry. +Josie O'Connell PhD is a manager. +Kristian Hermann will be the new engineer for the team. +Brady Halvorson is a engineer. +Foster Monahan is a manager in the high tech industry. +Mr. Aron Dietrich V is retiring as a manager. +Ms. Delilah Blick I is retiring as a manager. +Mr. Ulises Kuvalis IV will be the new manager for the team. +Ms. Kimberly Goodwin DDS has been an engineer for over a decade. +Help me welcome our newest engineer, Ms. Tia Kling III. +Mr. Jovanny Hoppe IV, an manager, will be presenting the award. +Mohamed Brekke I has been a manager for 14 years. +Dillan Jenkins will be the new manager for the team. +Alyce Willms will be the new engineer for the team. +Claudine Corkery, an manager, will be presenting the award. +Royal Krajcik has been a manager for 14 years. +Mr. Tyrese Erdman has been a engineer for 14 years. +Geo Reichel is retiring as a engineer. +Help me welcome our newest engineer, Euna Vandervort. +Anjali Moen, an manager, will be presenting the award. +Ms. Yvette Treutel PhD will be the new engineer for the team. +Our latest new employee, Kolby Yundt, has been a manager in the industry for 4 years. +Our latest new employee, Mr. Kelton Graham, has been a engineer in the industry for 4 years. +Don Corkery is retiring as a engineer. +Wilburn Stark, an engineer, will be presenting the award. +Estel Lesch has been a manager for 14 years. +Kara Hoeger Sr. has been a manager for 14 years. +Mr. Rhiannon Daugherty is a manager with Example Corp. +Mr. Diego Wolff joins us as an manager on the Example project. +Fay Hansen has been a manager for 14 years. +Imani Fritsch is retiring as a manager. +Our latest new employee, Derrick Fay, has been a manager in the industry for 4 years. +Newell Marks has been a manager for 14 years. +Lexie Koch is retiring as a engineer. +Irma Mayert has been an manager for over a decade. +Announcing engineer Schuyler Prosacco. +Madaline Trantow is a engineer with Example Corp. +Abbie Witting II will be the new engineer for the team. +Jocelyn Sawayn PhD joins us as an manager on the Example project. +Ms. Laisha Auer is a manager with Example Corp. +Our latest new employee, Gregoria Krajcik, has been a manager in the industry for 4 years. +Rosalind Considine is retiring as a engineer. +Noemie Howell has been a engineer for 14 years. +Micah Pollich is retiring as a engineer. +Mr. Jett Parisian DVM is retiring as a engineer. +Edythe Haley PhD joins us as an engineer on the Example project. +Our latest new employee, Carlos Breitenberg, has been a manager in the industry for 4 years. +Announcing engineer Lina Bayer DVM. +Ms. Ora Cremin is a manager in the high tech industry. +Announcing engineer Mr. Hilario Bogan. +Ramona Turcotte, an manager, will be presenting the award. +Elisabeth Sanford is retiring as a manager. +Our latest new employee, Telly Streich, has been a manager in the industry for 4 years. +Ms. Naomie Feest I joins us as an manager on the Example project. +Ariane Mayer is a engineer with Example Corp. +German Kuhlman IV is a engineer. +Announcing engineer Ruby Tremblay Sr.. +Ocie Pollich has been a manager for 14 years. +Jaqueline Bailey is a engineer with Example Corp. +Mr. Kyler VonRueden will be the new engineer for the team. +Cydney Stanton MD has been an engineer for over a decade. +Our latest new employee, Marjorie Ritchie, has been a engineer in the industry for 4 years. +Our latest new employee, Marjolaine Kuphal, has been a engineer in the industry for 4 years. +Our latest new employee, Jewell McDermott, has been a engineer in the industry for 4 years. +Help me welcome our newest engineer, Armand Batz. +Gudrun Moen is a engineer in the high tech industry. +Help me welcome our newest manager, Ms. Hilda Friesen. +Announcing engineer Annalise Kshlerin. +Josephine Thiel is a engineer with Example Corp. +Casper Towne III is a engineer with Example Corp. +Buster Lubowitz is a manager in the high tech industry. +Ms. Jazmin Schultz is a manager with Example Corp. +Mr. Austyn Schuppe is a manager with Example Corp. +Mr. Geovanni Wyman will be the new manager for the team. +Mr. Fletcher Rice will be the new manager for the team. +Carleton Stoltenberg, an engineer, will be presenting the award. +Evan Jaskolski DDS is a engineer. +Help me welcome our newest engineer, Ms. Mertie Collins. +Help me welcome our newest engineer, Marianna Pfannerstill Jr.. +Conor Wilderman has been an manager for over a decade. +Our latest new employee, Jordan Mann, has been a engineer in the industry for 4 years. +Mr. Mavis Gutkowski is retiring as a engineer. +Griffin Ratke Jr. has been an engineer for over a decade. +Dan Funk will be the new manager for the team. +Help me welcome our newest engineer, Rafael Denesik. +Our latest new employee, Jazmyne Runolfsson DDS, has been a engineer in the industry for 4 years. +Our latest new employee, Aurore Dach, has been a engineer in the industry for 4 years. +Help me welcome our newest manager, Alvah Gaylord. +Mr. Jerod Gleichner joins us as an engineer on the Example project. +Ms. Brisa Ledner is a manager in the high tech industry. +Ms. Rahsaan Stokes is retiring as a engineer. +Our latest new employee, Mr. Wyatt Stokes MD, has been a engineer in the industry for 4 years. +Help me welcome our newest engineer, Molly Smitham. +Madison Block has been a manager for 14 years. +Briana Dickens has been a engineer for 14 years. +Javonte Von has been an manager for over a decade. +Help me welcome our newest manager, Marley Erdman. +Luciano Christiansen will be the new manager for the team. +Skye Schinner is a engineer with Example Corp. +Announcing manager Icie Denesik. +Lulu Thiel is retiring as a engineer. +Deion Little has been an engineer for over a decade. +Help me welcome our newest manager, Serenity Davis MD. +Wilburn Muller is retiring as a manager. +Allen Jacobson is a manager in the high tech industry. +Ms. Jada Krajcik V joins us as an manager on the Example project. +Chester Klein is a manager. +Help me welcome our newest manager, Graciela Bailey. +Myrtice Bergnaum is a manager in the high tech industry. +Help me welcome our newest manager, Mr. Allan Howell. +Dolly Hagenes Jr. has been an engineer for over a decade. +Audreanne Beahan is a engineer in the high tech industry. +Kraig Kuvalis, an manager, will be presenting the award. +Fausto Stokes has been an engineer for over a decade. +Announcing manager Una Orn. +Ms. Yasmine Pfeffer, an engineer, will be presenting the award. +Announcing engineer Ms. Cassie Brakus. +Ora Lynch MD, an manager, will be presenting the award. +Viviane Botsford MD is a manager with Example Corp. +Jade Stamm is a manager with Example Corp. +Norene Keebler II joins us as an manager on the Example project. +Orlando Reilly has been a engineer for 14 years. +Announcing manager Ms. Gertrude Greenholt. +Zora Koelpin is a manager with Example Corp. +Help me welcome our newest manager, Everett Funk. +Anthony Connelly Jr. is a engineer in the high tech industry. +Ruthie Parker is retiring as a manager. +Orlo Zemlak will be the new engineer for the team. +Baylee Kirlin Sr. is a engineer. +Catharine Bosco has been an manager for over a decade. +Clotilde Kshlerin has been a engineer for 14 years. +Ms. Shanon Strosin PhD is a manager. +Kim Kub is a engineer. +Chase Walsh III is a manager in the high tech industry. +Ms. Dahlia Gleason is retiring as a manager. +Mr. Nelson Reilly has been an manager for over a decade. +Viola Schoen will be the new manager for the team. +Yesenia Jones has been a engineer for 14 years. +Lavinia Beier is a engineer with Example Corp. +Help me welcome our newest engineer, Marcella Wunsch. +Zelma Hirthe is retiring as a engineer. +Shany Borer is a engineer with Example Corp. +Eusebio Stroman has been a manager for 14 years. +Mr. Frankie Johnson V has been a engineer for 14 years. +Mr. Liam Gleason is retiring as a manager. +Moises Greenfelder II has been a engineer for 14 years. +Announcing manager Ms. Gretchen Corkery. +Ms. Hassie Haley Jr. will be the new manager for the team. +Haleigh Carter is a manager in the high tech industry. +Announcing manager Keara Schuster. +Christelle Armstrong will be the new engineer for the team. +Misael Hamill is retiring as a engineer. +Mr. Dillan Friesen joins us as an manager on the Example project. +Mr. Jaydon Mann IV, an engineer, will be presenting the award. +Audra Goodwin has been a engineer for 14 years. +Mr. Jovan Wilkinson, an engineer, will be presenting the award. +Meggie Kuhic has been a engineer for 14 years. +Maiya Lindgren is retiring as a manager. +Tanner Wolf has been a engineer for 14 years. +Tina Barton will be the new manager for the team. +Zola Swift is a engineer. +Verda Fisher is a engineer in the high tech industry. +Joesph Stroman Jr., an engineer, will be presenting the award. +Bart Pfannerstill joins us as an manager on the Example project. +Ms. Pattie Tremblay is a manager in the high tech industry. +Mr. Sigurd Schaden DVM has been a engineer for 14 years. +Abdiel Schumm has been a manager for 14 years. +Columbus Denesik is retiring as a engineer. +Mr. Demario Shields is a engineer in the high tech industry. +Yasmeen Lowe has been a manager for 14 years. +Help me welcome our newest engineer, Maryjane McGlynn PhD. +Ms. Flavie Rutherford is a engineer with Example Corp. +Alanis Wuckert is retiring as a manager. +Ms. Sophie Predovic has been a engineer for 14 years. +Selina Torp is a engineer with Example Corp. +Announcing engineer Melyssa Tromp DDS. +Adrain Rohan has been an manager for over a decade. +Help me welcome our newest manager, Ms. Elsa Stroman. +Constance Ebert has been an engineer for over a decade. +Our latest new employee, Abigayle Schiller, has been a manager in the industry for 4 years. +Announcing engineer Arden Flatley MD. +Leonor Jaskolski has been an engineer for over a decade. +Mr. Antone Skiles PhD will be the new engineer for the team. +Bert Balistreri V is a engineer. +Sierra Stark Jr. has been a engineer for 14 years. +Thad Kilback is a manager. +Ms. Sydni Klein IV will be the new manager for the team. +Owen Herman, an engineer, will be presenting the award. +Announcing manager Jaquelin Hodkiewicz. +Joel Corkery MD is a engineer. +Help me welcome our newest manager, Magnus Christiansen. +Ms. Verla Considine V, an engineer, will be presenting the award. +Mr. Gage Murazik PhD has been an engineer for over a decade. +Mr. Pedro Abshire, an manager, will be presenting the award. +Ms. Frances Schiller joins us as an engineer on the Example project. +Our latest new employee, May Casper, has been a engineer in the industry for 4 years. +Christiana Towne will be the new engineer for the team. +Announcing manager Lester Beier. +Our latest new employee, Juwan Crooks, has been a manager in the industry for 4 years. +Brown Borer Sr. will be the new engineer for the team. +Help me welcome our newest manager, Gregory Funk V. +Ms. Alaina Altenwerth is a manager with Example Corp. +Ms. Aylin Auer is a engineer with Example Corp. +Mr. Isac Buckridge joins us as an engineer on the Example project. +Mr. Morgan Kilback DVM is a manager with Example Corp. +Our latest new employee, Rasheed Casper, has been a manager in the industry for 4 years. +Jennyfer Walker has been an engineer for over a decade. +Announcing manager Shaun Fay. +Ms. Thalia Weissnat will be the new engineer for the team. +Hoyt Balistreri is retiring as a manager. +Mr. Ronny Sipes will be the new engineer for the team. +Jailyn Hoppe has been an engineer for over a decade. +Maximillian Howe, an engineer, will be presenting the award. +Announcing engineer Mr. Sofia Hills. +Help me welcome our newest manager, Catharine Kunde. +Announcing engineer Ms. Bryana Kreiger IV. +Help me welcome our newest engineer, Lavonne Reinger. +Albert Keeling has been an engineer for over a decade. +Ms. Taya Feest III is a engineer with Example Corp. +Maida Daugherty is a manager. +Myra Heathcote is retiring as a engineer. +Brooke Price, an manager, will be presenting the award. +Mr. Kale Watsica is a engineer in the high tech industry. +Our latest new employee, Abbigail Wiza III, has been a manager in the industry for 4 years. +Jaron Dickinson is retiring as a engineer. +Announcing manager Angus Senger. +Help me welcome our newest manager, Ms. Abbie Aufderhar. +Ms. Josefina Stark III has been an engineer for over a decade. +Craig Osinski is a manager with Example Corp. +Lenny Mayer DDS is a engineer with Example Corp. +Mireya Wilderman, an engineer, will be presenting the award. +Marianne Wiegand is retiring as a manager. +Hilario Schumm has been a engineer for 14 years. +Mr. Myron Dare is retiring as a engineer. +Micaela Ryan MD has been an engineer for over a decade. +Emmett Becker, an manager, will be presenting the award. +Jamey Wolf will be the new engineer for the team. +Lue Dooley, an manager, will be presenting the award. +Shaun Kris will be the new manager for the team. +Ms. Imogene Ledner DVM joins us as an manager on the Example project. +Help me welcome our newest engineer, Neil Connelly. +Help me welcome our newest engineer, Mr. Rudy Volkman. +Patricia O'Reilly, an engineer, will be presenting the award. +Mr. Jarred Treutel V is a engineer. +Mr. Omari Murazik I is retiring as a engineer. +Albin Lockman will be the new manager for the team. +Help me welcome our newest manager, Ms. Nona Grady. +Our latest new employee, Terrence Murphy, has been a manager in the industry for 4 years. +Mr. Madyson Deckow is a manager in the high tech industry. +Ms. Carissa Kling has been a engineer for 14 years. +Help me welcome our newest engineer, Mr. Fredy Bogisich. +Reanna Wuckert is retiring as a engineer. +Angie Nolan is a manager in the high tech industry. +Virginie Braun will be the new engineer for the team. +Jarred Flatley, an engineer, will be presenting the award. +Our latest new employee, Ms. Leora Kihn II, has been a manager in the industry for 4 years. +Announcing engineer Cathrine Kovacek. +Our latest new employee, Wilmer Becker, has been a manager in the industry for 4 years. +Missouri Franecki is a engineer with Example Corp. +Mekhi Donnelly PhD has been an manager for over a decade. +Ms. Willow Towne I has been an manager for over a decade. +Jaylon Rippin will be the new manager for the team. +Alanis Hettinger has been a engineer for 14 years. +Ms. Felicita Becker DVM has been an engineer for over a decade. +Zelda Daniel is retiring as a manager. +Mr. Emil Jast IV joins us as an manager on the Example project. +Kian Satterfield is a manager in the high tech industry. +Mr. Cooper Dooley joins us as an engineer on the Example project. diff --git a/internal/service/comprehend/test-fixtures/entity_recognizer/entitylist.csv b/internal/service/comprehend/test-fixtures/entity_recognizer/entitylist.csv index f1080f9213fc..851ce28ef43a 100644 --- a/internal/service/comprehend/test-fixtures/entity_recognizer/entitylist.csv +++ b/internal/service/comprehend/test-fixtures/entity_recognizer/entitylist.csv @@ -1,1001 +1,1001 @@ Text,Type -Gerson Parker,MANAGER -Nickolas Little,MANAGER -Alejandra Stiedemann V,MANAGER -Eunice Leannon,MANAGER -Sunny Schmitt,ENGINEER -Anika Gutkowski,ENGINEER -Delaney Fisher,ENGINEER -Christian Maggio,ENGINEER -Mathias Hettinger I,MANAGER -Elias Quitzon,MANAGER -Alexandra Wunsch,MANAGER -Guido Kemmer,ENGINEER -Sim Kemmer,MANAGER -Vincenza Kertzmann,MANAGER -Skye Kuhn Jr.,ENGINEER -Kasandra Cartwright MD,MANAGER -Ettie Schimmel,MANAGER -Karlee McCullough Jr.,MANAGER -Adeline Johnson,MANAGER -Cory Morissette,MANAGER -Brandy Abshire,MANAGER -Jerome Homenick,ENGINEER -Ms. Doris Ziemann,MANAGER -Ms. Marlene Larkin,ENGINEER -Amos Kuhlman,MANAGER -Ana Ortiz II,MANAGER -Jewell Konopelski,MANAGER -Larry Effertz,ENGINEER -Ms. Vernice Fay,ENGINEER -Miss Dion Ratke,MANAGER -Rachelle Lubowitz,MANAGER -Rachael Moore,MANAGER -Sabrina Walsh,ENGINEER -Tamara Nicolas,MANAGER -Martin Lynch Sr.,MANAGER -Kaya Wisozk,MANAGER -Mrs. Cassandra Robel,ENGINEER -Donny Schroeder,MANAGER -Emmanuel Fay,MANAGER -Jefferey Adams,ENGINEER -Godfrey Feest V,MANAGER -Melyna Mertz,MANAGER -Audreanne Wiza,MANAGER -Jarvis O'Conner,MANAGER -Mertie Sanford,MANAGER -Keshaun Altenwerth,ENGINEER -Aric Beier Sr.,ENGINEER -Hoyt Rowe MD,ENGINEER -Aurelia Hintz,ENGINEER -Keon Stanton,MANAGER -Candace Wiegand,MANAGER -Flavio Smith,MANAGER -Mr. Zachery Toy,MANAGER -Kaleigh Murazik,ENGINEER -Pattie Kuvalis,ENGINEER -Serena Heidenreich,ENGINEER -Simone Little,MANAGER -Ms. Carolina Mante,ENGINEER -Julia Lemke,MANAGER -Mrs. Meagan Fisher,MANAGER -Molly Goldner,ENGINEER -Mrs. Dax McGlynn,MANAGER -Tyrique Harber,ENGINEER -Mia Larkin II,MANAGER -Winona Schoen,MANAGER -Dr. Antonette Cummings,ENGINEER -Mr. Sonya Cassin,MANAGER -Timothy Bartoletti DDS,MANAGER -Winifred Reinger,ENGINEER -Marcella Bahringer,ENGINEER -Rhea Kohler,ENGINEER -Alia McCullough,ENGINEER -Mr. Israel Gulgowski,ENGINEER -Jana Wilkinson,ENGINEER -Marjory Fay,MANAGER -Alena Mueller,ENGINEER -Kristian Keebler,ENGINEER -Katheryn Dietrich,MANAGER -Mya Grady,MANAGER -Kyra Heidenreich V,ENGINEER -Dr. Cortez Thiel,MANAGER -Shyanne Hirthe,ENGINEER -Herta Mohr,ENGINEER -Winfield Thompson,ENGINEER -Miss Bette Dooley,MANAGER -Mrs. Freida Shanahan,ENGINEER -Sunny Towne,MANAGER -Kayley Schneider,ENGINEER -Anderson Hagenes,MANAGER -Barrett Paucek Jr.,MANAGER -Lillian Koss,ENGINEER -Lance Zieme,ENGINEER -Van Hackett,ENGINEER -Cullen Schamberger,ENGINEER -Arvel Stanton,MANAGER -Nedra Goldner,MANAGER -Ms. Zetta Lindgren,MANAGER -Faye Ledner,ENGINEER -Trystan Hilll PhD,MANAGER -Arnold Hermann Jr.,ENGINEER -Hank Towne,MANAGER -Monroe Schmeler,MANAGER -Genesis Wintheiser II,ENGINEER -Angie O'Reilly,MANAGER -Deshawn Reinger,MANAGER -Mrs. Molly Lowe,ENGINEER -Kimberly Osinski,MANAGER -Napoleon Sauer,ENGINEER -Leonardo Champlin,MANAGER -Anita Bartell,MANAGER -Mrs. Jennifer Buckridge,MANAGER -Patrick Gulgowski,MANAGER -Miss Brooks Christiansen,MANAGER -Ms. Alejandra Shields,ENGINEER -Callie Lynch,ENGINEER -Hyman Bruen,ENGINEER -Ms. Lera Grant,ENGINEER -Laney West V,MANAGER -Shyanne Price,ENGINEER -Leanna Schoen,ENGINEER -Clement Bayer,ENGINEER -Mrs. Colt Kozey,MANAGER -Dr. Logan Dickinson,ENGINEER -Valentin Torp,MANAGER -Kathlyn Bechtelar,MANAGER -Vita Pollich,ENGINEER -Benedict Flatley,MANAGER -Dedrick Corkery V,MANAGER -Shea Kohler,ENGINEER -Tiana Mertz,ENGINEER -Margarett Kunze,ENGINEER -Alan Bashirian,MANAGER -Dr. Precious Murazik,MANAGER -Ms. Karine Langosh,ENGINEER -Letha Skiles,ENGINEER -Nikko Dooley,ENGINEER -Filomena McKenzie,ENGINEER -Rosario Grant Sr.,ENGINEER -Gonzalo Douglas V,ENGINEER -Lempi Pollich,ENGINEER -Candida O'Reilly Sr.,ENGINEER -Dena Lind,ENGINEER -Blair Renner,MANAGER -Imani Roob,MANAGER -Miss Drake Johns,MANAGER -Murl Jacobi,MANAGER -Nicolette Prohaska,MANAGER -Leif Quigley,MANAGER -Hugh Gleichner III,MANAGER -Neil Witting,MANAGER -Ivah Moore,ENGINEER -Amira Bruen,ENGINEER -Loy Kerluke,ENGINEER -Cleta Berge,MANAGER -Asa Schaden,ENGINEER -Marlin Nitzsche,ENGINEER -Mellie Hansen,ENGINEER -Pauline Willms,MANAGER -Giovanna Marquardt,MANAGER -Kristian Conroy IV,MANAGER -Aiyana Hagenes,ENGINEER -Melisa Barton Jr.,MANAGER -Robbie Bechtelar,MANAGER -Citlalli Lind,ENGINEER -Karlee Lubowitz,MANAGER -Isidro Jerde,ENGINEER -Conor Lemke,MANAGER -Murphy Rutherford,MANAGER -Omari Schultz,MANAGER -Selmer Labadie,MANAGER -Alverta Hilpert,MANAGER -Ali Dooley III,ENGINEER -Benjamin Lubowitz,MANAGER -Trenton Hilll,MANAGER -Bethel Carroll,ENGINEER -Nick Lang,MANAGER -Ms. Chase Graham,MANAGER -Melany Buckridge PhD,ENGINEER -Ursula McGlynn,ENGINEER -Jackie Kshlerin,MANAGER -Jarvis Langosh,ENGINEER -Bonnie Bednar,MANAGER -Simeon Dickinson,ENGINEER -Annamarie VonRueden MD,MANAGER -Ms. Hilton Hagenes,ENGINEER -Jaqueline D'Amore,MANAGER -Mario Bartell,MANAGER -Frances Kovacek,MANAGER -Avery Lakin,MANAGER -Brittany Flatley DDS,ENGINEER -Violette Sauer,MANAGER -Dorothy Farrell,ENGINEER -Miss Jordan Crooks,MANAGER -Jadyn West,MANAGER -Mrs. Johanna Borer,ENGINEER -Elyssa Crist,ENGINEER -Carli Kohler,MANAGER -Dasia Dare,ENGINEER -Lane Rath,MANAGER -Davion Kunze,ENGINEER -Vada Stoltenberg,ENGINEER -London Hagenes,ENGINEER -Albin Wintheiser,MANAGER -Mr. Eloise Jones,ENGINEER -Rashawn Bechtelar,MANAGER -Linwood Casper,ENGINEER -Mr. Phyllis O'Reilly,MANAGER -Carrie Franecki,MANAGER -Mikel Daugherty I,MANAGER -Jorge Connelly IV,MANAGER -Victor Cummings,MANAGER -Felton Kris,MANAGER -Barney Klein,MANAGER -Verdie Strosin,MANAGER -Izabella Vandervort DDS,ENGINEER -Philip Hodkiewicz,MANAGER -Mr. Carol Orn,MANAGER -Ursula Cormier,MANAGER -Ellen Hudson,ENGINEER -Bobby Conn II,ENGINEER -Mathilde Bayer,ENGINEER -Ms. Marcus Ferry,ENGINEER -Tyshawn Volkman,MANAGER -Ladarius Predovic,ENGINEER -Ally Parisian,MANAGER -Briana Reinger,MANAGER -Piper Kutch,MANAGER -Armand Ratke,MANAGER -Stephon McLaughlin DDS,ENGINEER -Cordell Cole,ENGINEER -Earl Altenwerth,ENGINEER -Garrison Lang Jr.,MANAGER -Ahmad Heaney,ENGINEER -Jed Pollich,MANAGER -Hudson Wyman,MANAGER -Dena Crist DVM,ENGINEER -Gino Auer III,MANAGER -Miss Xavier Farrell,MANAGER -Lawrence Considine,ENGINEER -Coby Lesch,ENGINEER -Hardy Mohr,MANAGER -Dr. Tamara Ryan,MANAGER -Alia Quigley PhD,ENGINEER -Noelia Bergnaum,ENGINEER -Dakota Brown,ENGINEER -Elmo Rogahn,ENGINEER -Roxanne Weimann,MANAGER -Gay Langworth,MANAGER -Ellis Ledner,ENGINEER -Weston Murazik,ENGINEER -Josefa Sauer,MANAGER -Ilene Wyman,MANAGER -Nia Gottlieb,MANAGER -Crawford Wehner,MANAGER -Brenna Ritchie,MANAGER -Ms. Mariah Hudson,MANAGER -Tyra Schneider Sr.,MANAGER -Andreane Johns,MANAGER -Mrs. Mabel Rice,ENGINEER -Emelia Jaskolski PhD,ENGINEER -Spencer Cole II,ENGINEER -Doris Stokes,MANAGER -Lilian Erdman,ENGINEER -Ms. Ramona Torp,MANAGER -Ms. Lauryn Stark,MANAGER -Israel Greenholt,ENGINEER -Ms. Boris Leannon,MANAGER -Pearlie Swaniawski,MANAGER -Delores Kilback,MANAGER -Mariam Schultz,ENGINEER -Dimitri Mueller IV,MANAGER -Maud Beahan,MANAGER -Fletcher Predovic DVM,ENGINEER -Mrs. Joanne Aufderhar,ENGINEER -Miss Paul Lowe,ENGINEER -Johnpaul Swift,MANAGER -Miss Rowena Pouros,ENGINEER -Benjamin Jenkins,ENGINEER -Erwin Jenkins,MANAGER -Ms. Zula Turner,ENGINEER -Rhiannon Lind,MANAGER -Mrs. Garth Labadie,MANAGER -Mia King,MANAGER -Ewald Cronin,ENGINEER -Carrie Roob III,MANAGER -Clementina Schmeler,MANAGER -Stewart Sipes II,ENGINEER -Katelin D'Amore,MANAGER -Verlie Wiegand,MANAGER -Marie Schaefer,ENGINEER -Tillman Boehm,ENGINEER -Jacklyn Kohler,MANAGER -Katrine Bruen,MANAGER -Lisa Gaylord,ENGINEER -Virginia Ruecker,ENGINEER -Mrs. Garnett Christiansen,ENGINEER -Anderson Weissnat,ENGINEER -Marie Armstrong,ENGINEER -Prudence Fahey V,ENGINEER -Bria Medhurst,MANAGER -Ms. Dewitt Bernhard,ENGINEER -Stanford Miller,MANAGER -Freddie Treutel,MANAGER -Oceane Bayer,MANAGER -Ms. Adalberto Lindgren,MANAGER -Meagan Bartoletti,MANAGER -Wilhelm Kutch,ENGINEER -Khalid Farrell Sr.,MANAGER -Ms. Allison Zemlak,ENGINEER -Judson Rodriguez MD,MANAGER -Mr. Alena Stanton,MANAGER -Kaylin Kohler,ENGINEER -Melany Price,ENGINEER -Palma Brekke,MANAGER -Ozella Larson,MANAGER -Earnestine Sanford,ENGINEER -Kathryne Tromp,ENGINEER -Ted Abernathy,MANAGER -Nelle Waters,ENGINEER -Dawn Kautzer,ENGINEER -Ms. Itzel Breitenberg,ENGINEER -Meta Gibson,MANAGER -Haven Nitzsche,ENGINEER -Antonetta Kilback I,MANAGER -Kiara Zboncak,MANAGER -Leola Kris,ENGINEER -Mr. Conrad Hills,MANAGER -Alize Rogahn,ENGINEER -Rudy Hamill,MANAGER -Ms. Celestino Turcotte,MANAGER -Ms. Annetta Stracke,MANAGER -Hailie Hudson,ENGINEER -Mrs. Deven Moen,MANAGER -Callie Larson,MANAGER -Quentin Morar,ENGINEER -Antonietta Kuhlman II,MANAGER -Cristal Shanahan DVM,MANAGER -Cristopher Boyer,ENGINEER -Keely Larkin,ENGINEER -Royce Berge,ENGINEER -Benjamin Hilll,ENGINEER -Rashawn Bogan,MANAGER -Ted Collier,MANAGER -Alene Corwin,MANAGER -David Hodkiewicz,MANAGER -Garland Kuhic,MANAGER -Sonya Wilderman,MANAGER -Quinn Bradtke Jr.,MANAGER -Ellen Cummerata,ENGINEER -Mason Beatty,MANAGER -Camylle Muller,MANAGER -Hadley Upton,ENGINEER -Keara Pfeffer,ENGINEER -Angie Walsh,MANAGER -Earl Cummings,MANAGER -Ephraim Marks,ENGINEER -Orval Reichert DDS,ENGINEER -Katheryn Gleichner,ENGINEER -Jalyn Fay I,ENGINEER -Virginia Keebler DVM,ENGINEER -Raphael Leffler,ENGINEER -Juliana Stokes,MANAGER -Casper Herman,MANAGER -Vladimir Reilly,MANAGER -Erin Okuneva,MANAGER -Martine White,MANAGER -Tristian Mertz,MANAGER -Mr. Sammy Schmitt,ENGINEER -Alec Schuster,MANAGER -Ms. Lorenza Walsh,ENGINEER -Eldora Mayert,ENGINEER -Justina Breitenberg,MANAGER -Mariela Grady Jr.,ENGINEER -Kevon Baumbach,MANAGER -Wendell Hayes,ENGINEER -Pat Aufderhar,ENGINEER -Bart Senger,MANAGER -Kaitlyn Hahn,ENGINEER -Mrs. Else Kozey,MANAGER -Mr. Ashton Batz,ENGINEER -Lilly Koepp,ENGINEER -Mrs. Alfredo Cormier,MANAGER -Gail Swaniawski DVM,ENGINEER -Mrs. Valentina Wilderman,ENGINEER -Paxton Doyle,ENGINEER -Jarret Block PhD,MANAGER -Arnaldo Blanda,MANAGER -Aiden Orn,MANAGER -Florine West,MANAGER -Sincere Harber,MANAGER -Joan Ziemann,ENGINEER -Katelyn Schultz,ENGINEER -Maximus Gleichner,MANAGER -Elenor Schuster,MANAGER -Marcelino Kautzer,ENGINEER -Lea Schulist Sr.,ENGINEER -Jeanne Carter MD,MANAGER -Kayleigh Goldner DDS,ENGINEER -Hilario Denesik I,MANAGER -Shirley Reichert,MANAGER -Kris Dickens,ENGINEER -Gene Frami,MANAGER -Sadye Jacobson,MANAGER -Buck Cremin,ENGINEER -Coty Lesch,ENGINEER -Dr. Kim Mertz,ENGINEER -Randy Sanford,MANAGER -Levi Kirlin,MANAGER -Davin Yundt,ENGINEER -Enola Bins,ENGINEER -Trent Kuvalis,ENGINEER -Jake Powlowski,ENGINEER -Ms. Ashlee Emmerich,MANAGER -Hannah Davis,ENGINEER -Wayne Champlin,ENGINEER -Nikki Conn Jr.,MANAGER -Carli Bauch,ENGINEER -Norbert Feest,MANAGER -Robbie Wintheiser,ENGINEER -Leta Abshire,ENGINEER -Fannie Walker,ENGINEER -Heber Wilkinson,MANAGER -Willie Bernier III,MANAGER -Orlando Price,ENGINEER -Brandt Schowalter,MANAGER -Mohammed Stokes,ENGINEER -Isai Mraz,ENGINEER -Kadin Lemke,MANAGER -Maribel Jerde,MANAGER -Myrna Kessler,MANAGER -Meredith Tremblay,ENGINEER -Mr. Jerad Schneider,ENGINEER -Lenny Pfeffer,MANAGER -Carolyne Klocko DVM,MANAGER -Monica Schulist,ENGINEER -Anika Larson V,MANAGER -Domenick Pacocha,ENGINEER -Miss Harmon Pfannerstill,ENGINEER -Mr. Annabell Pouros,MANAGER -Dr. Brisa Stroman,MANAGER -Jade Stoltenberg,MANAGER -Miss Mario Wolff,ENGINEER -Ms. Savannah Gaylord,MANAGER -Dejah Jones,MANAGER -Hector Kulas,MANAGER -Graciela Goodwin,MANAGER -Jocelyn Sauer,MANAGER -Miss Lew Hansen,MANAGER -Fannie Fay DDS,ENGINEER -Dr. Jordan Klocko,ENGINEER -Kathlyn Lynch,MANAGER -Leann Botsford,ENGINEER -Ervin Larson,MANAGER -Allie Von,MANAGER -Johanna Kohler III,MANAGER -Hilbert Armstrong,ENGINEER -Tanner Balistreri IV,MANAGER -Abagail Shields,ENGINEER -Gia Cremin,ENGINEER -Mrs. Buford Oberbrunner,ENGINEER -Madelyn White,MANAGER -Abdullah Effertz,MANAGER -Reva Stark,ENGINEER -Camryn McKenzie,MANAGER -Juwan Pouros,MANAGER -Gene Cassin,MANAGER -Felicia Kunde,ENGINEER -Jeremie Anderson,MANAGER -Katheryn Hickle Jr.,MANAGER -Edwina Hamill IV,MANAGER -Adriana Cassin DVM,MANAGER -Nelda Rowe,ENGINEER -Rodrigo Kulas V,ENGINEER -Jaiden Williamson,MANAGER -Cristopher Williamson,MANAGER -Mr. Jay Krajcik,ENGINEER -Francesco Miller,MANAGER -Brenna Reinger,MANAGER -Mr. Mollie Stanton,MANAGER -Coby Schowalter,ENGINEER -Estefania Armstrong II,MANAGER -Aimee Nienow,ENGINEER -Kimberly Batz,ENGINEER -Miss Sienna Pfannerstill,ENGINEER -Johnathon Hammes,ENGINEER -Julien Hansen,MANAGER -Mrs. Emerson Waelchi,MANAGER -Malcolm Streich,MANAGER -Aurelio Lebsack,ENGINEER -Juana Grady,ENGINEER -Kiel Lakin,MANAGER -Sarai Keeling,ENGINEER -Emilia Crona,ENGINEER -Georgianna Kris,MANAGER -Maida Heller,MANAGER -Jena Feeney,ENGINEER -Mabelle Keeling,MANAGER -Chris Bergstrom,ENGINEER -Audrey Block DDS,ENGINEER -Louvenia Kuhn,ENGINEER -Thomas O'Keefe,MANAGER -Darby Klocko,MANAGER -Arlene Weimann,MANAGER -Corbin Jones MD,ENGINEER -Lamar Mraz,ENGINEER -Miss Onie Krajcik,MANAGER -Kamille Schaefer,MANAGER -Jack Borer,ENGINEER -Reese Heaney,MANAGER -Ilene Kovacek,ENGINEER -Trace Bailey,ENGINEER -Wava Donnelly,ENGINEER -Mona Lakin,MANAGER -Weldon Heaney,MANAGER -Norris Labadie,ENGINEER -Bridgette Brown,MANAGER -Osborne Kertzmann,MANAGER -Verlie Bruen,ENGINEER -Enrique Ullrich,ENGINEER -Dr. Asia Purdy,ENGINEER -Lindsey Predovic DDS,MANAGER -Maxine Mosciski,MANAGER -Sydni Stoltenberg,ENGINEER -Paige Buckridge,ENGINEER -Miss Laverne Dach,MANAGER -Murl Abshire,MANAGER -Lou Friesen,MANAGER -Keenan Fahey,ENGINEER -Ashleigh Schultz,ENGINEER -Mrs. Keshaun Lesch,ENGINEER -Jeffrey Langosh,ENGINEER -Mckenzie Boyle,ENGINEER -Hipolito Price PhD,MANAGER -Lesley Adams III,ENGINEER -Mya Howe,ENGINEER -Nick Kutch Sr.,MANAGER -Ms. Winfield Wilkinson,ENGINEER -Leopold Schulist,ENGINEER -Orval Prosacco,ENGINEER -Wilmer Mueller,MANAGER -Karina Batz,MANAGER -Luigi Abbott,MANAGER -Pamela Miller,MANAGER -Emelie Marquardt,ENGINEER -Zola Beier,MANAGER -Mr. Elva Ritchie,ENGINEER -Mrs. Otis Quitzon,MANAGER -Dr. Willow Jacobs,MANAGER -Cathryn Koss,MANAGER -Ms. Alivia Ernser,MANAGER -Timothy Mohr,ENGINEER -Mrs. Jaylan Wuckert,MANAGER -Emerald Waelchi,ENGINEER -Vernon Heathcote,MANAGER -Lavinia Ruecker,ENGINEER -Mr. Quinn Altenwerth,MANAGER -Alejandra Marks,MANAGER -Mr. Leo Wuckert,MANAGER -Jayce Schiller MD,MANAGER -Elenora Ebert Jr.,ENGINEER -Dr. Rose Wyman,MANAGER -Wade Orn,MANAGER -Iva Marks Sr.,MANAGER -Margaret Pouros,MANAGER -Barton Deckow,MANAGER -Miss Lesly Balistreri,ENGINEER -Mr. Jacquelyn Reynolds,ENGINEER -Doyle Heidenreich,MANAGER -Heidi Ruecker,ENGINEER -Mr. Alvis Moen,MANAGER -Dr. Garnet Brown,ENGINEER -Yolanda Beier,ENGINEER -Soledad Macejkovic,ENGINEER -Urban Lowe,ENGINEER -Devyn Schmidt,MANAGER -Barbara Flatley,MANAGER -Patsy Sanford PhD,ENGINEER -Mrs. Rubye Blanda,ENGINEER -Caleigh Klocko,ENGINEER -Kali Dietrich,MANAGER -Ms. Weldon Hudson,ENGINEER -Vallie Huel,ENGINEER -Sven O'Keefe III,ENGINEER -Gerardo Wehner Sr.,ENGINEER -Kyle Kirlin,MANAGER -Marianne Berge Jr.,ENGINEER -Ms. Cristal Connelly,MANAGER -Kailey Spinka,MANAGER -Jeremie Morar,MANAGER -Daija Lind,ENGINEER -Arvel McDermott,ENGINEER -Dr. Nicholas Gorczany,MANAGER -Anne Leuschke,MANAGER -Gerda Cronin,ENGINEER -Ms. Coty Rolfson,ENGINEER -Kareem Gerhold,ENGINEER -Mrs. Nico Mann,ENGINEER -Corbin Bartell,ENGINEER -Theresa Gulgowski,MANAGER -Carmelo Boyer,MANAGER -Elinore Schulist III,MANAGER -Katarina Schultz,MANAGER -Deven Rodriguez II,MANAGER -Miss Bruce Friesen,MANAGER -Marcelle Schowalter,MANAGER -Albertha Murphy PhD,MANAGER -Elmore Doyle,ENGINEER -Reymundo Jaskolski,ENGINEER -Stephania Swaniawski I,MANAGER -Stewart Veum,MANAGER -Nathanael Bartell,MANAGER -Retha Rempel,ENGINEER -Isidro Aufderhar,MANAGER -Florencio Mohr,MANAGER -Zella Weimann,ENGINEER -Khalid Macejkovic,ENGINEER -Geraldine Torp,ENGINEER -Presley Marks,ENGINEER -Mrs. Eve Bartoletti,ENGINEER -Corine Schimmel,MANAGER -Citlalli Goldner DDS,MANAGER -Zakary Botsford,MANAGER -Florida Reilly,ENGINEER -Mr. Patsy Doyle,MANAGER -Emily Hayes II,ENGINEER -Dr. Johann Turcotte,ENGINEER -Dr. Darion Dietrich,ENGINEER -Norris Brekke,MANAGER -Janessa Marquardt,MANAGER -Felicita Wintheiser MD,MANAGER -Melyssa Muller,ENGINEER -Vivienne Weissnat DDS,ENGINEER -Ford Gerlach,ENGINEER -Keenan Kertzmann,ENGINEER -Tobin Goyette,ENGINEER -Cecilia Green,MANAGER -Abe Fisher,MANAGER -Mrs. Annabell Morissette,MANAGER -Danny Kautzer III,MANAGER -Gerard Cruickshank,ENGINEER -Joy Okuneva DDS,MANAGER -Ezequiel Macejkovic,ENGINEER -Vernie Bradtke IV,ENGINEER -Trycia Muller,ENGINEER -Clotilde Ankunding,ENGINEER -Mr. Chaya Abshire,ENGINEER -Hanna Roob,MANAGER -Ronny Dietrich,ENGINEER -Derek Durgan DVM,MANAGER -Demetrius West,MANAGER -Macey Nikolaus,MANAGER -Edison Gottlieb III,MANAGER -Bo Collins,ENGINEER -Michaela Pagac PhD,MANAGER -Mireille Kunde I,ENGINEER -Duncan Kulas,MANAGER -Mrs. Xzavier Smitham,ENGINEER -Mr. Adrianna Baumbach,MANAGER -Shad Rolfson,MANAGER -Mr. Dimitri Baumbach,MANAGER -Samara Schultz,MANAGER -Mrs. Brant Kautzer,MANAGER -Tierra Greenholt,ENGINEER -Otho Kub,MANAGER -Ana Harber,MANAGER -Ted Mertz Sr.,ENGINEER -Uriel Zieme,MANAGER -Mr. Adaline Wolff,ENGINEER -Mrs. Maurice Senger,MANAGER -Ada Gleason,MANAGER -Edwina Bernier DVM,ENGINEER -Elva Homenick,ENGINEER -Mrs. Shane Powlowski,MANAGER -Obie Nikolaus,MANAGER -Ottis Jakubowski,MANAGER -Mr. Armand Leannon,ENGINEER -Jaime Kuvalis,ENGINEER -Loyce VonRueden Jr.,ENGINEER -Evangeline Johns,MANAGER -Federico Halvorson,MANAGER -Sylvester Gerlach,MANAGER -Ashly Wunsch V,MANAGER -Rowland Miller,MANAGER -Bryon Kunde,MANAGER -Denis Ernser,ENGINEER -Jovanny O'Reilly DVM,ENGINEER -Lazaro Hermiston,MANAGER -Kelly Stehr,ENGINEER -Oda Fadel,ENGINEER -Percival Armstrong,ENGINEER -Caleigh Schimmel,MANAGER -Tyrique Pfeffer,ENGINEER -Adell Leuschke,ENGINEER -Dr. Cade Farrell,ENGINEER -Gisselle Doyle,MANAGER -Lily Reinger,MANAGER -Jeffrey Gleason,ENGINEER -Tad Huel,ENGINEER -Connor Conn,MANAGER -Mr. Cathrine Casper,ENGINEER -Dr. Beryl Rempel,MANAGER -Carlie Steuber,MANAGER -Rose Frami IV,ENGINEER -Mr. Tyrel Pagac,MANAGER -Morton Trantow,ENGINEER -Lola Ortiz,MANAGER -Kelton Champlin,MANAGER -Owen Mayert,ENGINEER -Johnny Witting,MANAGER -Thea Rolfson,MANAGER -Reanna Schmidt,ENGINEER -Ian Stehr,MANAGER -Mr. Patsy Purdy,MANAGER -Brady Ritchie,ENGINEER -Thea Effertz,MANAGER -Gerry Veum,ENGINEER -Corene Adams,MANAGER -Julian Kutch,MANAGER -Mrs. Joe Connelly,ENGINEER -Adolphus Paucek,MANAGER -Jasmin Ledner II,ENGINEER -Dr. Osbaldo Beatty,ENGINEER -Dr. Mckenna Haag,MANAGER -Abdiel Connelly DVM,ENGINEER -Liliana Baumbach III,MANAGER -Willard Kuvalis V,ENGINEER -Carolyn Jaskolski Jr.,MANAGER -Reta Franecki,ENGINEER -Percival O'Kon,ENGINEER -Kamryn Rath,MANAGER -Hailey Dooley,MANAGER -Mrs. Brycen West,ENGINEER -Margarette Miller,MANAGER -Cristian Pagac,MANAGER -Rosalee Bechtelar,MANAGER -Lessie Lesch,MANAGER -Iva Hegmann,MANAGER -Hallie Schroeder,MANAGER -Mr. Lola Volkman,ENGINEER -Arianna Wolf DDS,MANAGER -Elliot Trantow,MANAGER -Darrion Rath PhD,ENGINEER -Coralie Effertz V,ENGINEER -Ms. Corrine Effertz,ENGINEER -Ellie Keebler,ENGINEER -Tyrese Pfeffer,ENGINEER -Jayce Roberts,MANAGER -Isobel Veum,ENGINEER -Raphaelle Breitenberg,ENGINEER -Maudie Labadie I,ENGINEER -Rosario Langosh MD,ENGINEER -Raheem Mohr,MANAGER -Avery Lind,ENGINEER -Nichole Waters,ENGINEER -Blaise Gislason I,MANAGER -Everette D'Amore,ENGINEER -Darwin Conroy,MANAGER -Abdullah Heathcote,ENGINEER -Burnice Treutel,MANAGER -Libbie O'Hara,ENGINEER -Rowan Will,MANAGER -Gudrun Gleason,ENGINEER -Fannie Quitzon,MANAGER -Terence Gutkowski,ENGINEER -Tyshawn Rowe,MANAGER -Jaylan Sanford,MANAGER -Camille Schaden DVM,MANAGER -Ms. Blaze Emmerich,MANAGER -Sonny Stoltenberg,ENGINEER -Elsie Jacobson,MANAGER -London Jacobs,MANAGER -Mr. Hassie Kuhn,ENGINEER -Raul Bogan MD,ENGINEER -Adrian Abshire,MANAGER -Golden Kreiger,MANAGER -Deven Stiedemann I,MANAGER -Hilario Koepp PhD,MANAGER -Maynard Herzog,MANAGER -Nathaniel Torp,MANAGER -Courtney Strosin,MANAGER -Emely Lowe,ENGINEER -Vilma Weber,ENGINEER -Ms. Carlee Littel,ENGINEER -Hayden Mills,ENGINEER -Ervin Schimmel,ENGINEER -Gino Ortiz,ENGINEER -Amani Conroy,MANAGER -Korbin Lowe,MANAGER -Turner Bogan I,ENGINEER -Ms. Jabari Bauch,MANAGER -Mrs. Breanne Morissette,ENGINEER -Crystel Doyle,MANAGER -Isabel VonRueden,ENGINEER -Dayne Cremin,ENGINEER -Waino Armstrong,MANAGER -Deborah Armstrong,MANAGER -Ashlynn Mante DVM,ENGINEER -Karlie Pollich Jr.,ENGINEER -Maeve Schroeder,MANAGER -Hanna Fadel,ENGINEER -Delphia O'Hara,MANAGER -Jamir Hammes,ENGINEER -Nigel Ortiz,ENGINEER -Pauline Ritchie,MANAGER -Nicholaus Toy,ENGINEER -Freddy Okuneva,MANAGER -Brionna Fritsch,ENGINEER -Maiya Mills,ENGINEER -Alia Hoeger PhD,MANAGER -Alvina Mertz,ENGINEER -Raymundo Hintz,ENGINEER -Zack Stamm,ENGINEER -Dayne Klocko,MANAGER -Kyla Cremin,ENGINEER -Izabella Bernhard,ENGINEER -Zena Yundt,ENGINEER -Daron Schuppe,MANAGER -Mr. Amira Marvin,MANAGER -Boris Morar,MANAGER -Esperanza Batz,ENGINEER -Dortha Macejkovic I,MANAGER -Porter Dach V,ENGINEER -Kailyn Flatley I,MANAGER -Celine O'Keefe,MANAGER -Obie Rodriguez,MANAGER -Cade Gorczany,ENGINEER -Myles Shanahan,ENGINEER -Jayne Wiza,ENGINEER -Julius Huel,MANAGER -Ms. Rowena Kihn,ENGINEER -Ena Wehner,MANAGER -Clovis Cartwright,MANAGER -Mr. Marcelo D'Amore,MANAGER -Meggie Prosacco,ENGINEER -Lisa Schamberger PhD,ENGINEER -Mrs. Lyda Bayer,MANAGER -Newell Hettinger,MANAGER -Melany Wolf,MANAGER -Emil Schaefer,MANAGER -Samson Trantow,MANAGER -Maida Marquardt,ENGINEER -Johnpaul Howe MD,MANAGER -Mrs. Adah Lubowitz,ENGINEER -Ms. Imelda Kohler,MANAGER -Manuela Frami I,ENGINEER -Noelia Padberg,MANAGER -Una Eichmann DDS,ENGINEER -Elta Nolan,ENGINEER -Jaron Wyman,ENGINEER -Kayla Windler Sr.,MANAGER -Mrs. Zetta Stiedemann,MANAGER -Dr. Abner Adams,MANAGER -Brenden Ortiz,MANAGER -German Funk,ENGINEER -Mr. Liliane Konopelski,MANAGER -Jarrett Morar,MANAGER -Parker Huels,MANAGER -Mrs. Alvah Bayer,ENGINEER -Wilhelm Parker Jr.,ENGINEER -Leo Mertz,ENGINEER -Corine Hills,ENGINEER -Coy Raynor Sr.,ENGINEER -Mallie Streich DVM,ENGINEER -Daisy Hoeger,MANAGER -Michelle Hickle,ENGINEER -Nolan Douglas,ENGINEER -Vivian Bernier,ENGINEER -Brigitte Toy,ENGINEER -Aniyah Schoen,ENGINEER -Emmie Bins,ENGINEER -Mazie Weimann,MANAGER -Carole Aufderhar,ENGINEER -Bernhard O'Kon,ENGINEER -Flavio Moore Sr.,MANAGER -Justina Wuckert,ENGINEER -Meredith Jones,ENGINEER -Gene Champlin,MANAGER -Clare Fay,MANAGER -Lesly Johnston II,MANAGER -Cristian Kling,MANAGER -Candido Littel,MANAGER -Mrs. Gregory Ritchie,ENGINEER -Lucio Sawayn,MANAGER -Derick Rath DVM,MANAGER -Gabriella Dietrich,ENGINEER -Lula Spencer DVM,ENGINEER -Horacio Kulas,ENGINEER -Davin Vandervort DDS,ENGINEER -Ms. Avery Wisoky,ENGINEER -Talon Williamson MD,MANAGER -Gerald Hahn,MANAGER -Ettie Yost,MANAGER -Abdullah Mosciski,ENGINEER -Mrs. Marielle Bosco,ENGINEER -Kory Batz,MANAGER -Noelia Kovacek,ENGINEER -Kyleigh Nienow,MANAGER -Alize Lind,ENGINEER -Ellsworth Altenwerth,ENGINEER -Domenic Mayer,ENGINEER -Ms. Geovanny Satterfield,ENGINEER -Ella Daniel,MANAGER -Kylee Bogisich PhD,ENGINEER -Ryder Wilkinson Sr.,MANAGER -Marina Schaefer,ENGINEER -Ms. Paige Bartell,MANAGER -Mitchel Murray,ENGINEER -Tyler Quigley,MANAGER -Veronica Kreiger,ENGINEER -Halie Goldner,ENGINEER -Ryder Lakin,ENGINEER -Chloe Legros,ENGINEER -Dariana O'Conner,ENGINEER -Era Bins Jr.,ENGINEER -Laila Reichert,MANAGER -Dedrick Kuhic V,ENGINEER -Haylee Price Jr.,ENGINEER -Callie Shields,MANAGER -Jarrod Fahey,MANAGER -Earlene Cremin,ENGINEER -Ellie Bergstrom,MANAGER -Armando Grady,ENGINEER -Dr. Lamar Hessel,ENGINEER -Joe Runolfsson Sr.,ENGINEER -Manley Oberbrunner,MANAGER -Meta Weissnat II,ENGINEER -Dagmar Batz IV,ENGINEER -Susie Bayer,ENGINEER -Randi Howell,ENGINEER -Joanne Rau,MANAGER -Buck Stark,ENGINEER -Shane Donnelly,ENGINEER -Quincy Casper V,ENGINEER -Lafayette Grimes,MANAGER -Mrs. Jody Beer,ENGINEER -Miss Corene Schamberger,ENGINEER -Dr. Jesse Baumbach,ENGINEER -Brenna Quigley Jr.,MANAGER -Ms. Viviane Bins,ENGINEER -Edgar Johnson,MANAGER -Mr. Tracy Beier,ENGINEER -Juvenal Ortiz,ENGINEER -Dr. Nat Pagac,MANAGER -Julio Mitchell,ENGINEER -Clarissa Mraz Jr.,ENGINEER -Dustin Grady,MANAGER -Brennon Bayer,ENGINEER -Mrs. Birdie Nienow,MANAGER -Randal Sauer Sr.,ENGINEER -Verda Kozey,ENGINEER -Chesley Hickle,ENGINEER -Miss Rico Block,ENGINEER -Gaetano Lindgren,MANAGER -Hope Hauck,ENGINEER -Percival O'Connell,ENGINEER -Melyna Leuschke,MANAGER -Adan Collins,MANAGER -Daryl Bashirian,ENGINEER -Kara Welch,MANAGER -Norberto Swift,ENGINEER -Effie Champlin Jr.,ENGINEER -Dr. Julia Metz,MANAGER -Janice Witting,MANAGER -Bruce Eichmann,MANAGER -Isobel Swift,ENGINEER -Earnestine Mayer MD,MANAGER -Michele Bashirian,ENGINEER -Janick Crona,ENGINEER -Hank Fisher III,MANAGER -Miss Aliya Skiles,ENGINEER -Herbert Orn,MANAGER -Aglae Baumbach,ENGINEER -Gayle Carter,ENGINEER -Creola Kautzer,MANAGER -Theresa Hauck,MANAGER -Ms. Patience Wintheiser,MANAGER -Sebastian Rutherford DDS,ENGINEER -Ara Pollich I,ENGINEER -Carlos Baumbach,MANAGER -Hulda Schroeder DVM,ENGINEER -Stanton Torp,MANAGER -Ceasar Franecki,MANAGER -Lelah Miller,ENGINEER -Wyman Schultz,MANAGER -Mae Harris V,MANAGER -Hortense Koelpin,ENGINEER -Wilmer Deckow,ENGINEER -Zaria Ferry,ENGINEER -Pierre Cronin,ENGINEER -Ms. Brenna Leffler,ENGINEER -Dr. Keara Price,ENGINEER -Jane Schroeder,MANAGER -Reymundo Heathcote,MANAGER -Elton Schiller II,MANAGER -Irma Blanda,MANAGER -Mireya Turner,MANAGER -Dawson Streich,MANAGER -Gianni Cassin,MANAGER -Johnathan Kuhic V,MANAGER -Lacey Sawayn,ENGINEER +Miller Zulauf,MANAGER +Mazie Gottlieb,MANAGER +Johnathon Shields Sr.,MANAGER +Paula Windler I,MANAGER +Kaley Gleichner V,ENGINEER +Mr. Roosevelt Welch MD,ENGINEER +Ms. Shirley Purdy V,ENGINEER +Rolando Klein,ENGINEER +Ms. Sandy Osinski,MANAGER +Mr. Kristopher Bauch II,MANAGER +Ali Gleason,ENGINEER +Americo Rempel,ENGINEER +Gladys Marvin DVM,MANAGER +Danial Koch,MANAGER +Mr. Leopold Bergnaum,MANAGER +Davonte Larson,MANAGER +Ms. Tiara Collier Jr.,ENGINEER +Jessie Price,ENGINEER +Kathleen Kihn,ENGINEER +Mr. Ruben Haag,MANAGER +Maegan Quitzon,MANAGER +Clifton McKenzie,MANAGER +Lily Collins,ENGINEER +Janie Hartmann,ENGINEER +Ms. Alexandra Torphy IV,MANAGER +Rosemarie Gerlach,ENGINEER +Emilio Schultz,ENGINEER +Adalberto Reilly V,ENGINEER +Jess Schulist,MANAGER +Bianka Swaniawski,MANAGER +Mr. Oliver Lowe,ENGINEER +Winifred Mayert,MANAGER +Lamar Bahringer,ENGINEER +Celia Hamill,MANAGER +Ms. Shanon Murphy IV,ENGINEER +Mr. Doris Gislason IV,MANAGER +Hulda Bechtelar,ENGINEER +Raina Pfeffer IV,ENGINEER +Lilyan Gulgowski,ENGINEER +Mr. Gilbert Ziemann,MANAGER +Rebeca Spencer,ENGINEER +Dion Goodwin,MANAGER +Ms. Shemar Hand,MANAGER +Mr. Adrien Marquardt DDS,MANAGER +Ladarius Veum,MANAGER +Josefina Ernser,ENGINEER +Mr. Geovanny Schowalter DDS,MANAGER +Lera Rohan,MANAGER +Fredy Bogisich,MANAGER +Vada Jones,MANAGER +Alec Waelchi,MANAGER +Violet Schneider,ENGINEER +Nestor Von,ENGINEER +Ms. Zora Ortiz DVM,MANAGER +Mortimer Borer,MANAGER +Adam Rolfson,MANAGER +Willie Cronin,ENGINEER +Tianna Cole,ENGINEER +Eliza McGlynn,ENGINEER +Fidel Heathcote,MANAGER +Brendan Towne,MANAGER +Dean Lowe,MANAGER +Brycen Ortiz,MANAGER +Ford Kertzmann,ENGINEER +Alfredo Franecki,ENGINEER +Monica Pouros Jr.,MANAGER +Ms. Assunta Fay Sr.,MANAGER +Noah Lockman Jr.,MANAGER +Lonny Ebert,ENGINEER +Briana Howe,MANAGER +Ms. Vivienne Kuvalis IV,ENGINEER +Ms. Gracie Hilpert PhD,MANAGER +Thea Cronin,MANAGER +Elenora Oberbrunner I,ENGINEER +Ms. Rebecca Bergnaum,MANAGER +Hudson Lebsack,MANAGER +Sigurd Abbott,MANAGER +Kennith Wiegand V,MANAGER +Dalton Carter,MANAGER +Raegan Balistreri,ENGINEER +Brandy Cronin,ENGINEER +Ms. Adella Runolfsdottir DDS,ENGINEER +Robin Walsh,ENGINEER +Emmanuelle Rempel,ENGINEER +Mr. Jan Ullrich DVM,ENGINEER +Stefan Eichmann,ENGINEER +Giles Keeling,MANAGER +Bell O'Reilly,MANAGER +Molly Grant,MANAGER +Karlee Anderson,MANAGER +Jordyn Heller,ENGINEER +Eleonore Dibbert Sr.,ENGINEER +Efrain Borer I,MANAGER +Ms. Alisha Wiza,ENGINEER +David Haag,MANAGER +Ms. Rebecca Schmeler,MANAGER +Ebba Legros,MANAGER +Assunta McGlynn,MANAGER +Joelle Heller II,ENGINEER +Rasheed Considine,ENGINEER +Mr. Jabari Borer,ENGINEER +Mr. Americo O'Conner,ENGINEER +Mr. Johnny Kautzer DDS,MANAGER +Beverly Schimmel,MANAGER +Ms. Sarina Kuhn DDS,ENGINEER +Eusebio Kertzmann II,ENGINEER +Hilton O'Keefe,ENGINEER +Reina Weissnat,MANAGER +Hassie Kling,MANAGER +Lera Gleason,ENGINEER +Waldo Deckow,MANAGER +Mr. Norris Kling,MANAGER +Ms. Annabel Nikolaus,MANAGER +Cheyanne Hilll,MANAGER +Gage Jacobs,MANAGER +Cassidy Stokes,MANAGER +Georgianna Jacobs,ENGINEER +Mr. Ubaldo Cormier,ENGINEER +Laurie Berge,MANAGER +Mr. Karl Stehr Jr.,MANAGER +Mr. Jaquan Gibson,ENGINEER +Delores Stiedemann,ENGINEER +Anabelle Yundt,MANAGER +Gaston Bergstrom MD,ENGINEER +Verona Halvorson,ENGINEER +Ms. Bridie Thompson V,MANAGER +Albertha Tremblay,MANAGER +Courtney Becker,MANAGER +Bessie Homenick,MANAGER +Lilian Osinski,ENGINEER +Bertrand Altenwerth,MANAGER +Cruz Gottlieb,ENGINEER +Willy Runte,MANAGER +Ms. Estelle Carter,MANAGER +Arnoldo Emard,MANAGER +Brenna Lockman,ENGINEER +Kelly Bergnaum,MANAGER +Adah Labadie,MANAGER +Nichole Koepp,MANAGER +Jettie Jacobs,ENGINEER +Kellie Brekke V,MANAGER +Nya Russel IV,ENGINEER +Rowan Kling,ENGINEER +Dariana Oberbrunner V,ENGINEER +Angelo Altenwerth I,ENGINEER +Britney Herman,MANAGER +Abigayle Reichel,ENGINEER +Ms. Miracle Grady IV,MANAGER +Ms. Alison Bartoletti IV,MANAGER +Laney Nitzsche,ENGINEER +Laura Terry PhD,ENGINEER +Breanne Koepp,ENGINEER +Ines Medhurst,ENGINEER +Theodore Stokes,MANAGER +Ms. Yasmine Schulist,ENGINEER +Adolfo O'Reilly,ENGINEER +Kaelyn Schaden,MANAGER +Jorge Bergnaum,ENGINEER +Ignacio Wyman,ENGINEER +Dannie Will,MANAGER +Margaretta Abbott,ENGINEER +Briana Murray PhD,ENGINEER +Brady Rath,ENGINEER +Rahsaan Adams,MANAGER +Mr. Hayley Dicki,ENGINEER +Colton Pouros,ENGINEER +Vida Schultz,ENGINEER +Alexanne Turcotte,MANAGER +Reba Stroman,MANAGER +Paxton O'Connell MD,MANAGER +Don Lesch,ENGINEER +Mr. Carter Von IV,ENGINEER +Candida Boyer,MANAGER +Mr. Raymond Krajcik DVM,ENGINEER +Markus Waelchi,ENGINEER +Malachi Cummerata,ENGINEER +Mr. Edmond Dietrich IV,MANAGER +Lenora Reynolds,ENGINEER +Zoie Kiehn,ENGINEER +Mr. Mason Lang II,MANAGER +Virginia Reilly,ENGINEER +Claude Emmerich,MANAGER +Sebastian Prosacco,MANAGER +Mr. Dashawn O'Hara,MANAGER +Emmy Turcotte III,ENGINEER +Sincere Legros,MANAGER +Mauricio Reilly,ENGINEER +Ms. Kimberly Bogan I,ENGINEER +Ms. Cassandre Murphy PhD,ENGINEER +Kaycee Kshlerin I,ENGINEER +Ms. Cheyanne Rodriguez PhD,ENGINEER +Gayle Wiza,MANAGER +Ms. Desiree Metz,ENGINEER +Hettie Beier MD,ENGINEER +Shakira Ledner,MANAGER +Forrest Ullrich,ENGINEER +Alysha Wolff III,MANAGER +Mr. Lorenza Okuneva DVM,ENGINEER +Nasir Walsh,MANAGER +Jeanette Rogahn,ENGINEER +Gilda Funk I,MANAGER +Julius Gusikowski,ENGINEER +Agnes Schuster,ENGINEER +Delphine Thompson,ENGINEER +Tressa Nolan,ENGINEER +Leatha Reilly I,MANAGER +Mr. Jamar Mueller,ENGINEER +Kyleigh Franecki,MANAGER +Cornell Bashirian MD,MANAGER +Ramiro Bogan,MANAGER +Robyn McKenzie,MANAGER +Destini Rippin,MANAGER +Eladio Mraz,MANAGER +Jayda Moore III,MANAGER +Alivia Koelpin,ENGINEER +Alva Muller,ENGINEER +Ms. Karianne Dibbert Sr.,MANAGER +Hazle Langworth,MANAGER +Geraldine Koelpin DVM,MANAGER +Margarett Swaniawski,MANAGER +Major Mosciski,MANAGER +Cara Kuphal,MANAGER +Deshawn Fisher,ENGINEER +Mr. Leopoldo VonRueden DDS,MANAGER +Horacio O'Connell,MANAGER +Salvatore Hermann,ENGINEER +Dessie Franecki,MANAGER +Mr. Torrey Macejkovic,MANAGER +Wendell Boyer,ENGINEER +Providenci Heidenreich,ENGINEER +Mr. Dorthy Williamson DDS,ENGINEER +Ena Green DDS,MANAGER +Ms. Aaliyah Bernhard,MANAGER +Grace Quitzon,ENGINEER +Germaine VonRueden,MANAGER +Lionel Goldner,MANAGER +Pearline Osinski,ENGINEER +Claud Mayert,MANAGER +Palma Grady,ENGINEER +Andre Conn,MANAGER +Gerardo Wilderman,MANAGER +Ms. Lydia Moore,ENGINEER +Walter Kub,ENGINEER +Elbert Gottlieb,ENGINEER +Arnoldo Quitzon,ENGINEER +Macy Conroy,ENGINEER +Cecil Metz,MANAGER +Ms. Loyce Torphy MD,ENGINEER +Rylan Wyman PhD,MANAGER +Clair Luettgen DDS,MANAGER +Mr. Rickey Lehner,ENGINEER +Kian Walter,ENGINEER +Ms. Coralie Carroll II,MANAGER +Ms. Leann Gutkowski Sr.,ENGINEER +Presley Hirthe,MANAGER +Leola King,MANAGER +Maribel Mueller,MANAGER +Dean Hilll,ENGINEER +Rosella Green,MANAGER +Clementine Klein,ENGINEER +Mr. Riley Roob V,MANAGER +Newell Beier,MANAGER +Orval Rodriguez,MANAGER +Ms. Neha Kerluke,MANAGER +Mr. Roger Farrell V,ENGINEER +Charlie Brakus,MANAGER +Judge Nitzsche,ENGINEER +Mr. Triston Jakubowski V,ENGINEER +Ms. Rhoda Luettgen,ENGINEER +Felicia Roob,MANAGER +Wendy Gulgowski,ENGINEER +Ms. Destiny Stoltenberg,MANAGER +Mr. Jaylen Metz,ENGINEER +Ms. Annamarie Heathcote,MANAGER +Cayla Heaney,ENGINEER +Eleanora Cruickshank,MANAGER +Susie Schowalter,MANAGER +Ms. Clarabelle Reichert Sr.,ENGINEER +Whitney Kuhn,MANAGER +Mr. Dangelo Dibbert DDS,MANAGER +Mr. Jess Willms II,ENGINEER +Madisyn Waelchi PhD,ENGINEER +Ms. Sister Tillman,ENGINEER +Fern Weimann DVM,MANAGER +Dejah Kunze II,MANAGER +Mr. Rahul Lubowitz,MANAGER +Antonia Zulauf DDS,MANAGER +Ms. Valentina Johnson DDS,MANAGER +Shaylee Sauer,MANAGER +Ms. Shanie Ruecker,ENGINEER +Mandy Boehm III,MANAGER +Greg Hintz,MANAGER +Vesta Lockman,ENGINEER +Lester Bahringer,MANAGER +Kianna McGlynn,MANAGER +Matteo Turner,MANAGER +Lexus Gorczany,ENGINEER +Otilia Schumm,ENGINEER +Ms. Gabriella Boyle,MANAGER +Edward Block,ENGINEER +Keanu Russel Jr.,MANAGER +Raquel Schroeder,MANAGER +Coralie Boyer,ENGINEER +Ms. Stefanie Ernser,MANAGER +Ms. Dorothy Reynolds,MANAGER +Riley Little MD,ENGINEER +Mr. Bennie Champlin Jr.,MANAGER +Ms. Ettie Koelpin DVM,ENGINEER +Saige Schaefer,ENGINEER +Kyra Barrows,MANAGER +Felton Gerhold PhD,MANAGER +Mr. Freeman Abshire PhD,ENGINEER +Terrence O'Conner,MANAGER +Ms. Clarabelle Gibson DDS,ENGINEER +Nikko Nader,MANAGER +Mr. Arturo Moore,ENGINEER +Wayne Abshire,ENGINEER +Sydnee Schaden,MANAGER +Matteo Robel IV,ENGINEER +Maverick Wyman,MANAGER +Mr. Westley Predovic V,MANAGER +Selina Donnelly,MANAGER +Mr. Alfonzo Von I,ENGINEER +Doug Ruecker,ENGINEER +Denis Yost,MANAGER +Cyril Robel Sr.,ENGINEER +Ford Jacobi,ENGINEER +Dillan Grimes,ENGINEER +Ms. Cecelia Kiehn PhD,ENGINEER +Rachael Kshlerin,ENGINEER +Raven Kihn,MANAGER +Ms. Eileen Friesen,ENGINEER +Mr. Jett Bernhard,MANAGER +Haylie Spinka,ENGINEER +Kailey Towne Jr.,MANAGER +Mr. Jed Rutherford,MANAGER +Angel Schneider,MANAGER +Emma Pacocha,MANAGER +Jeremie Rogahn,MANAGER +Porter Herman,ENGINEER +Priscilla Stanton,ENGINEER +Aurelio Turner,MANAGER +Melvin McLaughlin,ENGINEER +Brandy Friesen,MANAGER +Angelo Pfeffer,MANAGER +Mr. Jerrold Prohaska,ENGINEER +Margret Turcotte,ENGINEER +Kathryne Schaefer II,ENGINEER +Kamille Blanda,ENGINEER +Addie Tremblay,MANAGER +Cathrine Bernier,MANAGER +Jesus Bashirian,ENGINEER +Dillon Hamill,MANAGER +Granville Lemke,ENGINEER +Vito Greenholt III,ENGINEER +Evie Kunde,MANAGER +Lela Renner,MANAGER +Brendon Reynolds,MANAGER +Mr. Brandt Larson,ENGINEER +Mr. Arthur Brakus Jr.,MANAGER +Haylie Gerhold,MANAGER +Emile Crist,ENGINEER +Mr. Kale Schamberger DVM,ENGINEER +Ms. Oceane Kerluke II,MANAGER +Matteo Auer,MANAGER +Audie Kreiger IV,ENGINEER +Mr. Niko Yost Jr.,ENGINEER +Sage Bruen,ENGINEER +Francisco Feeney,ENGINEER +Jared Koelpin,MANAGER +Claud Lesch,ENGINEER +Sherman Okuneva Jr.,ENGINEER +Jeramy Strosin,ENGINEER +Amya Schumm,ENGINEER +Tracey Wunsch,MANAGER +Krystal Kunze,ENGINEER +Talon Wyman V,ENGINEER +Brice Mills,MANAGER +Scottie Bergstrom,MANAGER +Ms. Shanny Beatty,MANAGER +Shannon Witting,MANAGER +Taya Fahey V,MANAGER +Ms. Lauriane Sanford,MANAGER +Emilia Lesch,MANAGER +Randal Predovic,ENGINEER +Piper Heidenreich I,ENGINEER +Jakob Kassulke,MANAGER +Wilma Satterfield,ENGINEER +Dahlia Paucek,MANAGER +Peyton Will Jr.,MANAGER +Reginald Durgan Sr.,ENGINEER +Miracle Sauer,MANAGER +Dewayne Wintheiser,MANAGER +Emelia Langworth,MANAGER +Ms. Elissa Reichel,MANAGER +Gillian Ledner,ENGINEER +Mr. Morgan Bernier IV,ENGINEER +Mr. Brooks Prosacco,ENGINEER +Mr. Junius Hintz,ENGINEER +Astrid Heathcote DVM,MANAGER +Ms. Brooklyn Parker,MANAGER +Domingo Heller,MANAGER +Mr. Jayde Dooley Sr.,ENGINEER +Jada Carroll Sr.,ENGINEER +Enoch Hammes,MANAGER +Ms. Mckayla Jast,ENGINEER +Ulices Wunsch,MANAGER +Ms. Jacklyn Bayer I,ENGINEER +Adolph Kshlerin,ENGINEER +Magnolia Baumbach PhD,MANAGER +Noelia Windler,ENGINEER +Jaeden Hand,MANAGER +Heather Wilderman,MANAGER +Daphne Hamill,MANAGER +Octavia Durgan,MANAGER +Anibal Kerluke,MANAGER +Mr. Orval Hammes Jr.,MANAGER +Kristin Conn,MANAGER +Magdalen Dietrich IV,MANAGER +Ms. Haylie Rosenbaum MD,ENGINEER +Elenor Howe,ENGINEER +Yvonne Roberts,ENGINEER +Oda Wisoky,MANAGER +Ms. Aryanna Grimes Jr.,ENGINEER +Jocelyn Wilkinson,MANAGER +Neil Hirthe,ENGINEER +Aric Heidenreich,MANAGER +Ms. Julia Carter,ENGINEER +Darlene Pacocha,MANAGER +Kathryne Hoeger,ENGINEER +Alysson Carter V,ENGINEER +Mr. Nicolas Blick V,ENGINEER +Elaina Von,ENGINEER +Ms. Ellen Mayer IV,MANAGER +Darryl Casper,ENGINEER +Mr. Kadin Metz V,ENGINEER +Ms. Electa Quitzon Sr.,MANAGER +Mr. Hiram Howe MD,MANAGER +Ole Marvin,MANAGER +Claudia Rice,MANAGER +Forest Swaniawski,MANAGER +Roslyn Dickens,ENGINEER +Ms. Janis Wuckert V,MANAGER +Ms. Meda Jerde DVM,ENGINEER +Dolores Schiller II,ENGINEER +Mr. Enid Keeling,ENGINEER +Burnice Bosco DDS,ENGINEER +Katheryn Becker,MANAGER +Samantha Schmidt,ENGINEER +Mr. Royce Bashirian,ENGINEER +Mitchell Parker,ENGINEER +Ms. Eryn Bins,ENGINEER +Sven Harvey,ENGINEER +Mr. Melany Dibbert,MANAGER +Ms. Ardella Sipes Jr.,MANAGER +Joy Reynolds,MANAGER +Lyda Quitzon,MANAGER +Sophie Gusikowski,ENGINEER +Ashly Jacobi,ENGINEER +Haylie Maggio,ENGINEER +Deshaun Walsh,ENGINEER +Mr. Malachi Gibson,MANAGER +Edna Fadel,MANAGER +Ms. Eula Bogan,MANAGER +Mr. Javier Kemmer DDS,MANAGER +Nicole Conroy,MANAGER +Cordie Wunsch V,MANAGER +Pablo Jones,ENGINEER +Ms. Jolie Schuppe DDS,MANAGER +Amya Swift,ENGINEER +Alessandro Bartoletti,MANAGER +Sonya Schmitt,MANAGER +Ava D'Amore,ENGINEER +Raphaelle Murphy,MANAGER +Seth King DVM,MANAGER +Mr. Wilber Schaden III,MANAGER +Brian White,ENGINEER +Sage Parker,MANAGER +Katharina Wiegand,MANAGER +Eric Weimann,ENGINEER +Marcellus Jakubowski,ENGINEER +Angeline Pouros,MANAGER +Samara Veum,ENGINEER +Alberto Zemlak MD,ENGINEER +Liza Nolan III,MANAGER +Guy Turcotte,MANAGER +Danyka Stamm MD,MANAGER +Alverta Dach,ENGINEER +Maybelline Kihn,ENGINEER +Carlo Ratke,MANAGER +Margaretta Von,ENGINEER +Pinkie Green,ENGINEER +Mr. Elvis Gulgowski,MANAGER +Chanel Harber,ENGINEER +Carroll Gutmann,MANAGER +Emma Stehr,MANAGER +Rusty Gerhold,MANAGER +Nella Graham,ENGINEER +Carolyne Rippin DDS,ENGINEER +Conor Rowe,MANAGER +Gilda Mayert II,ENGINEER +Mr. Cale Ortiz Jr.,ENGINEER +Morgan Parisian IV,ENGINEER +Carolanne Russel,ENGINEER +Grayson VonRueden DVM,ENGINEER +Ms. Jazmyne Larkin I,MANAGER +Courtney Hirthe,ENGINEER +Maximus Shanahan MD,MANAGER +Felipa Wiegand,ENGINEER +Ms. Madalyn Corwin III,ENGINEER +Eldridge West,ENGINEER +Ms. Cleta Renner,MANAGER +Katelynn Erdman,MANAGER +Scotty Cassin,MANAGER +Melyna Kertzmann,ENGINEER +Mariam McCullough,ENGINEER +Demarcus Donnelly V,ENGINEER +Mr. Ansley Bernhard Sr.,MANAGER +Chad Ondricka V,ENGINEER +Liliana Hessel,MANAGER +Ransom Crist,MANAGER +Aisha Boehm,ENGINEER +Adaline O'Reilly PhD,ENGINEER +Dayna Hoppe,MANAGER +Mr. Lee Kreiger Jr.,ENGINEER +Payton Cummerata,MANAGER +Colten Luettgen,MANAGER +Ms. Nakia Johns,ENGINEER +Freida Strosin,MANAGER +Brad Bednar,ENGINEER +Antone McLaughlin,MANAGER +Izabella Berge,ENGINEER +Carole Lowe V,ENGINEER +Bud Leuschke,ENGINEER +Archibald Hilpert,MANAGER +Fernando Ebert,MANAGER +Jeramy Abshire,ENGINEER +Jannie Weber,ENGINEER +Mr. Mason Wolff MD,ENGINEER +Nelle Kutch III,ENGINEER +Keshawn Sporer,ENGINEER +Caterina Kunze PhD,ENGINEER +Dashawn Block,MANAGER +Shania Witting,MANAGER +Amos Stamm,ENGINEER +Alexandra Reichel Jr.,MANAGER +Mr. Edwardo Schneider V,MANAGER +Ms. Brigitte Waelchi,MANAGER +Jeanne Keeling,ENGINEER +Forest Von,MANAGER +Mr. Casimer Jast,MANAGER +Ms. Ebba Conn,MANAGER +Jany Yost,ENGINEER +Hassie Mosciski,MANAGER +Mr. Zion Rosenbaum Sr.,MANAGER +Ignacio Thompson MD,MANAGER +Jeanette Conroy IV,MANAGER +Garrick Graham,MANAGER +Misael Kuvalis,MANAGER +Helene Dickinson III,ENGINEER +Reilly Murazik,MANAGER +Jadon Ullrich,ENGINEER +Ms. Audra Borer,MANAGER +Earnestine Sauer,ENGINEER +Myrl Gerlach,ENGINEER +Mr. Garret Krajcik,ENGINEER +Emmet Hayes,MANAGER +Ahmed Donnelly Jr.,ENGINEER +Mr. Alvah Schimmel,MANAGER +Ms. Marilyne Harris I,ENGINEER +Ms. Casandra Zulauf,MANAGER +Alfred Yundt,MANAGER +Ford Herman,MANAGER +Paige Beier,ENGINEER +Myrtle Runte,ENGINEER +Roy Rosenbaum,ENGINEER +Mr. Julius Borer III,ENGINEER +Noemi Hamill,ENGINEER +Francisco Koelpin PhD,MANAGER +Ms. Pascale Emard,MANAGER +Daryl Wisozk DVM,MANAGER +Mr. Noble Runolfsson,MANAGER +Ms. Daija Schoen,ENGINEER +Ubaldo Dach,ENGINEER +Katrine Parker PhD,MANAGER +Ms. Martine Treutel IV,MANAGER +Ms. Alice Emard II,ENGINEER +Garrison Nikolaus,ENGINEER +Mr. Tanner Baumbach IV,ENGINEER +Austin Wisozk II,MANAGER +Mr. Saul Little,MANAGER +Ms. Ashtyn O'Conner,ENGINEER +Mr. Norval Wisozk,ENGINEER +Ms. Daniella Murphy I,MANAGER +Rocky Osinski,MANAGER +Ms. Maybelline Powlowski,MANAGER +Aliyah Lindgren PhD,ENGINEER +Bradley Hauck,ENGINEER +Ramiro Kilback,MANAGER +Colten Hermann,MANAGER +Neva Hoppe,MANAGER +Ms. Stephania McGlynn PhD,MANAGER +Florencio Williamson,MANAGER +Ruby Langworth,ENGINEER +Mr. Stevie Corkery,MANAGER +Birdie Herzog V,ENGINEER +Mr. Boyd Harvey,ENGINEER +Ms. Josianne Hauck I,MANAGER +Jose Marquardt V,MANAGER +Cullen Gutmann,MANAGER +Mr. Ola Brown DVM,ENGINEER +Blanca Gleichner,ENGINEER +Mr. Evan Lesch,MANAGER +Hazle Grimes,ENGINEER +Mr. Ottis Walsh III,MANAGER +Sigmund Nienow,ENGINEER +Mr. Tyrel Hermiston DDS,ENGINEER +Mr. Woodrow Stark,ENGINEER +Twila Flatley,ENGINEER +Mr. Murl Howe DDS,MANAGER +Kathryne Price,ENGINEER +Ms. Hellen Lind IV,MANAGER +Ms. Alyson Lehner,MANAGER +Verda Hyatt,MANAGER +Viola Pacocha,ENGINEER +Lorenza Dietrich,MANAGER +Ms. Lavada Runte II,MANAGER +Doris Rohan,MANAGER +Santina Sauer,ENGINEER +Adrianna Parisian,ENGINEER +Ms. Maurine Senger III,ENGINEER +Ms. Delfina Murray II,MANAGER +Jane Kilback,ENGINEER +Mr. Kirk Braun DDS,MANAGER +Maynard Hills,MANAGER +Emmitt Marks,ENGINEER +Patience Hilll,MANAGER +Dave Volkman,ENGINEER +Sibyl Gleichner,MANAGER +Ressie Koelpin,MANAGER +Erwin Beer,MANAGER +Geovanni Goodwin,MANAGER +Alexanne Mills,MANAGER +Ms. Meaghan Kris DDS,MANAGER +Mr. Stanton Nicolas,ENGINEER +Elliot Welch,ENGINEER +Mr. Mathew Schowalter,ENGINEER +Ms. May Lowe MD,MANAGER +Ms. Henriette Rau,MANAGER +Reuben Wiza IV,ENGINEER +Mr. Roman Bernhard,ENGINEER +Norval Breitenberg,ENGINEER +Deshawn Gibson,ENGINEER +Nola Corwin,ENGINEER +Bernadine Beatty III,MANAGER +Berneice Baumbach,ENGINEER +Aaliyah Brown,ENGINEER +Mariana Mills,ENGINEER +Filiberto Jenkins,ENGINEER +Madisen Bode,MANAGER +Brant Kuvalis Sr.,MANAGER +Mr. Wiley Green,MANAGER +Nova Padberg,ENGINEER +Rosanna Carter,ENGINEER +Jacquelyn Rau,MANAGER +Mr. Gunner Kub I,MANAGER +Roberto Spinka,MANAGER +Bettie Greenfelder,ENGINEER +Ms. Nedra Towne,MANAGER +Ms. Amely Cartwright,ENGINEER +Ms. Joy Wiegand,ENGINEER +Kayley Moore DDS,MANAGER +Philip Mayer,MANAGER +Ms. Mae Bartell,MANAGER +Efren Paucek,MANAGER +Mr. Marques Mayert DDS,MANAGER +Zelma Zboncak,MANAGER +Camila Buckridge,MANAGER +Mr. Savion Trantow V,ENGINEER +Lonny Bahringer,ENGINEER +Mr. Valentin Stehr,MANAGER +Chanelle Willms,ENGINEER +Holly Swaniawski V,MANAGER +Eloy Bailey,ENGINEER +Doris Buckridge,ENGINEER +Mr. Zackary Fahey MD,MANAGER +Nella DuBuque,MANAGER +Kenya Cronin,MANAGER +Blanche Torp,ENGINEER +Estevan Walsh,ENGINEER +Kurt Greenholt Sr.,MANAGER +Beverly Torp,MANAGER +Ms. Dorothea Hackett,MANAGER +Brigitte Kunde,ENGINEER +Jackeline Harber,MANAGER +Ms. Justine Deckow Sr.,ENGINEER +Vicenta Rolfson,ENGINEER +Holly Tremblay,MANAGER +Emilio Windler,MANAGER +Erin Beier,ENGINEER +Kali Terry,MANAGER +Mr. Aron Walsh,MANAGER +Ms. Veronica Ankunding,MANAGER +Amaya Adams PhD,MANAGER +Milford Watsica II,MANAGER +Ms. Prudence Harvey,MANAGER +Brice Schinner,MANAGER +Kayden Witting,ENGINEER +Shad Beatty,MANAGER +Ms. Sunny Braun DVM,ENGINEER +Ally Kiehn,ENGINEER +Sierra Dietrich Sr.,MANAGER +Diego Hoeger,ENGINEER +Allene Stokes,ENGINEER +Mr. Chaz Kemmer III,MANAGER +Ms. Lurline Kessler II,ENGINEER +Carli Osinski,MANAGER +Prince Kunze,MANAGER +Jared Schoen,ENGINEER +Sven Lesch,ENGINEER +Alec Ernser,MANAGER +Jean Daniel,ENGINEER +Ambrose Hansen,ENGINEER +Ms. Aubree Cruickshank,ENGINEER +Ms. Aniya Doyle,MANAGER +Julianne Weissnat,MANAGER +Emanuel Friesen I,MANAGER +Ms. Trycia Schulist DDS,ENGINEER +Caesar Runolfsson II,MANAGER +Torrance Blick,MANAGER +Kailyn Ortiz,ENGINEER +Mackenzie Farrell V,ENGINEER +Cristian Quitzon III,ENGINEER +Josie O'Connell PhD,MANAGER +Kristian Hermann,ENGINEER +Brady Halvorson,ENGINEER +Foster Monahan,MANAGER +Mr. Aron Dietrich V,MANAGER +Ms. Delilah Blick I,MANAGER +Mr. Ulises Kuvalis IV,MANAGER +Ms. Kimberly Goodwin DDS,ENGINEER +Ms. Tia Kling III,ENGINEER +Mr. Jovanny Hoppe IV,MANAGER +Mohamed Brekke I,MANAGER +Dillan Jenkins,MANAGER +Alyce Willms,ENGINEER +Claudine Corkery,MANAGER +Royal Krajcik,MANAGER +Mr. Tyrese Erdman,ENGINEER +Geo Reichel,ENGINEER +Euna Vandervort,ENGINEER +Anjali Moen,MANAGER +Ms. Yvette Treutel PhD,ENGINEER +Kolby Yundt,MANAGER +Mr. Kelton Graham,ENGINEER +Don Corkery,ENGINEER +Wilburn Stark,ENGINEER +Estel Lesch,MANAGER +Kara Hoeger Sr.,MANAGER +Mr. Rhiannon Daugherty,MANAGER +Mr. Diego Wolff,MANAGER +Fay Hansen,MANAGER +Imani Fritsch,MANAGER +Derrick Fay,MANAGER +Newell Marks,MANAGER +Lexie Koch,ENGINEER +Irma Mayert,MANAGER +Schuyler Prosacco,ENGINEER +Madaline Trantow,ENGINEER +Abbie Witting II,ENGINEER +Jocelyn Sawayn PhD,MANAGER +Ms. Laisha Auer,MANAGER +Gregoria Krajcik,MANAGER +Rosalind Considine,ENGINEER +Noemie Howell,ENGINEER +Micah Pollich,ENGINEER +Mr. Jett Parisian DVM,ENGINEER +Edythe Haley PhD,ENGINEER +Carlos Breitenberg,MANAGER +Lina Bayer DVM,ENGINEER +Ms. Ora Cremin,MANAGER +Mr. Hilario Bogan,ENGINEER +Ramona Turcotte,MANAGER +Elisabeth Sanford,MANAGER +Telly Streich,MANAGER +Ms. Naomie Feest I,MANAGER +Ariane Mayer,ENGINEER +German Kuhlman IV,ENGINEER +Ruby Tremblay Sr.,ENGINEER +Ocie Pollich,MANAGER +Jaqueline Bailey,ENGINEER +Mr. Kyler VonRueden,ENGINEER +Cydney Stanton MD,ENGINEER +Marjorie Ritchie,ENGINEER +Marjolaine Kuphal,ENGINEER +Jewell McDermott,ENGINEER +Armand Batz,ENGINEER +Gudrun Moen,ENGINEER +Ms. Hilda Friesen,MANAGER +Annalise Kshlerin,ENGINEER +Josephine Thiel,ENGINEER +Casper Towne III,ENGINEER +Buster Lubowitz,MANAGER +Ms. Jazmin Schultz,MANAGER +Mr. Austyn Schuppe,MANAGER +Mr. Geovanni Wyman,MANAGER +Mr. Fletcher Rice,MANAGER +Carleton Stoltenberg,ENGINEER +Evan Jaskolski DDS,ENGINEER +Ms. Mertie Collins,ENGINEER +Marianna Pfannerstill Jr.,ENGINEER +Conor Wilderman,MANAGER +Jordan Mann,ENGINEER +Mr. Mavis Gutkowski,ENGINEER +Griffin Ratke Jr.,ENGINEER +Dan Funk,MANAGER +Rafael Denesik,ENGINEER +Jazmyne Runolfsson DDS,ENGINEER +Aurore Dach,ENGINEER +Alvah Gaylord,MANAGER +Mr. Jerod Gleichner,ENGINEER +Ms. Brisa Ledner,MANAGER +Ms. Rahsaan Stokes,ENGINEER +Mr. Wyatt Stokes MD,ENGINEER +Molly Smitham,ENGINEER +Madison Block,MANAGER +Briana Dickens,ENGINEER +Javonte Von,MANAGER +Marley Erdman,MANAGER +Luciano Christiansen,MANAGER +Skye Schinner,ENGINEER +Icie Denesik,MANAGER +Lulu Thiel,ENGINEER +Deion Little,ENGINEER +Serenity Davis MD,MANAGER +Wilburn Muller,MANAGER +Allen Jacobson,MANAGER +Ms. Jada Krajcik V,MANAGER +Chester Klein,MANAGER +Graciela Bailey,MANAGER +Myrtice Bergnaum,MANAGER +Mr. Allan Howell,MANAGER +Dolly Hagenes Jr.,ENGINEER +Audreanne Beahan,ENGINEER +Kraig Kuvalis,MANAGER +Fausto Stokes,ENGINEER +Una Orn,MANAGER +Ms. Yasmine Pfeffer,ENGINEER +Ms. Cassie Brakus,ENGINEER +Ora Lynch MD,MANAGER +Viviane Botsford MD,MANAGER +Jade Stamm,MANAGER +Norene Keebler II,MANAGER +Orlando Reilly,ENGINEER +Ms. Gertrude Greenholt,MANAGER +Zora Koelpin,MANAGER +Everett Funk,MANAGER +Anthony Connelly Jr.,ENGINEER +Ruthie Parker,MANAGER +Orlo Zemlak,ENGINEER +Baylee Kirlin Sr.,ENGINEER +Catharine Bosco,MANAGER +Clotilde Kshlerin,ENGINEER +Ms. Shanon Strosin PhD,MANAGER +Kim Kub,ENGINEER +Chase Walsh III,MANAGER +Ms. Dahlia Gleason,MANAGER +Mr. Nelson Reilly,MANAGER +Viola Schoen,MANAGER +Yesenia Jones,ENGINEER +Lavinia Beier,ENGINEER +Marcella Wunsch,ENGINEER +Zelma Hirthe,ENGINEER +Shany Borer,ENGINEER +Eusebio Stroman,MANAGER +Mr. Frankie Johnson V,ENGINEER +Mr. Liam Gleason,MANAGER +Moises Greenfelder II,ENGINEER +Ms. Gretchen Corkery,MANAGER +Ms. Hassie Haley Jr.,MANAGER +Haleigh Carter,MANAGER +Keara Schuster,MANAGER +Christelle Armstrong,ENGINEER +Misael Hamill,ENGINEER +Mr. Dillan Friesen,MANAGER +Mr. Jaydon Mann IV,ENGINEER +Audra Goodwin,ENGINEER +Mr. Jovan Wilkinson,ENGINEER +Meggie Kuhic,ENGINEER +Maiya Lindgren,MANAGER +Tanner Wolf,ENGINEER +Tina Barton,MANAGER +Zola Swift,ENGINEER +Verda Fisher,ENGINEER +Joesph Stroman Jr.,ENGINEER +Bart Pfannerstill,MANAGER +Ms. Pattie Tremblay,MANAGER +Mr. Sigurd Schaden DVM,ENGINEER +Abdiel Schumm,MANAGER +Columbus Denesik,ENGINEER +Mr. Demario Shields,ENGINEER +Yasmeen Lowe,MANAGER +Maryjane McGlynn PhD,ENGINEER +Ms. Flavie Rutherford,ENGINEER +Alanis Wuckert,MANAGER +Ms. Sophie Predovic,ENGINEER +Selina Torp,ENGINEER +Melyssa Tromp DDS,ENGINEER +Adrain Rohan,MANAGER +Ms. Elsa Stroman,MANAGER +Constance Ebert,ENGINEER +Abigayle Schiller,MANAGER +Arden Flatley MD,ENGINEER +Leonor Jaskolski,ENGINEER +Mr. Antone Skiles PhD,ENGINEER +Bert Balistreri V,ENGINEER +Sierra Stark Jr.,ENGINEER +Thad Kilback,MANAGER +Ms. Sydni Klein IV,MANAGER +Owen Herman,ENGINEER +Jaquelin Hodkiewicz,MANAGER +Joel Corkery MD,ENGINEER +Magnus Christiansen,MANAGER +Ms. Verla Considine V,ENGINEER +Mr. Gage Murazik PhD,ENGINEER +Mr. Pedro Abshire,MANAGER +Ms. Frances Schiller,ENGINEER +May Casper,ENGINEER +Christiana Towne,ENGINEER +Lester Beier,MANAGER +Juwan Crooks,MANAGER +Brown Borer Sr.,ENGINEER +Gregory Funk V,MANAGER +Ms. Alaina Altenwerth,MANAGER +Ms. Aylin Auer,ENGINEER +Mr. Isac Buckridge,ENGINEER +Mr. Morgan Kilback DVM,MANAGER +Rasheed Casper,MANAGER +Jennyfer Walker,ENGINEER +Shaun Fay,MANAGER +Ms. Thalia Weissnat,ENGINEER +Hoyt Balistreri,MANAGER +Mr. Ronny Sipes,ENGINEER +Jailyn Hoppe,ENGINEER +Maximillian Howe,ENGINEER +Mr. Sofia Hills,ENGINEER +Catharine Kunde,MANAGER +Ms. Bryana Kreiger IV,ENGINEER +Lavonne Reinger,ENGINEER +Albert Keeling,ENGINEER +Ms. Taya Feest III,ENGINEER +Maida Daugherty,MANAGER +Myra Heathcote,ENGINEER +Brooke Price,MANAGER +Mr. Kale Watsica,ENGINEER +Abbigail Wiza III,MANAGER +Jaron Dickinson,ENGINEER +Angus Senger,MANAGER +Ms. Abbie Aufderhar,MANAGER +Ms. Josefina Stark III,ENGINEER +Craig Osinski,MANAGER +Lenny Mayer DDS,ENGINEER +Mireya Wilderman,ENGINEER +Marianne Wiegand,MANAGER +Hilario Schumm,ENGINEER +Mr. Myron Dare,ENGINEER +Micaela Ryan MD,ENGINEER +Emmett Becker,MANAGER +Jamey Wolf,ENGINEER +Lue Dooley,MANAGER +Shaun Kris,MANAGER +Ms. Imogene Ledner DVM,MANAGER +Neil Connelly,ENGINEER +Mr. Rudy Volkman,ENGINEER +Patricia O'Reilly,ENGINEER +Mr. Jarred Treutel V,ENGINEER +Mr. Omari Murazik I,ENGINEER +Albin Lockman,MANAGER +Ms. Nona Grady,MANAGER +Terrence Murphy,MANAGER +Mr. Madyson Deckow,MANAGER +Ms. Carissa Kling,ENGINEER +Mr. Fredy Bogisich,ENGINEER +Reanna Wuckert,ENGINEER +Angie Nolan,MANAGER +Virginie Braun,ENGINEER +Jarred Flatley,ENGINEER +Ms. Leora Kihn II,MANAGER +Cathrine Kovacek,ENGINEER +Wilmer Becker,MANAGER +Missouri Franecki,ENGINEER +Mekhi Donnelly PhD,MANAGER +Ms. Willow Towne I,MANAGER +Jaylon Rippin,MANAGER +Alanis Hettinger,ENGINEER +Ms. Felicita Becker DVM,ENGINEER +Zelda Daniel,MANAGER +Mr. Emil Jast IV,MANAGER +Kian Satterfield,MANAGER +Mr. Cooper Dooley,ENGINEER diff --git a/internal/service/comprehend/test-fixtures/generate/document_classifier/main.go b/internal/service/comprehend/test-fixtures/generate/document_classifier/main.go index 9b7a9e44747b..b2fb0c8a28f8 100644 --- a/internal/service/comprehend/test-fixtures/generate/document_classifier/main.go +++ b/internal/service/comprehend/test-fixtures/generate/document_classifier/main.go @@ -13,7 +13,7 @@ import ( "math/rand" "os" - "syreclabs.com/go/faker" + "github.com/jaswdr/faker/v2" ) var doctypes = []string{ @@ -36,9 +36,9 @@ var spamDocs = []string{ func main() { log.SetFlags(0) - seed := int64(1) // Default rand seed + seed := int64(48) // Default rand seed r := rand.New(rand.NewSource(seed)) - faker.Seed(seed) + fake := faker.NewWithSeedInt64(seed) documentFile, err := os.OpenFile("./test-fixtures/document_classifier/documents.csv", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { @@ -48,19 +48,19 @@ func main() { documentsWriter := csv.NewWriter(documentFile) for i := 0; i < 100; i++ { - name := faker.Name().Name() + name := fake.Person().Name() doctype := doctypes[r.Intn(len(doctypes))] var line string if doctype == "PHISHING" { - order := faker.RandomString(10) - phone := faker.PhoneNumber().PhoneNumber() + order := fake.RandomStringWithLength(10) + phone := fake.Phone().Number() doc := phishingDocs[r.Intn(len(phishingDocs))] line = fmt.Sprintf(doc, name, order, phone) } else { doc := spamDocs[r.Intn(len(spamDocs))] - product := faker.Commerce().ProductName() - company := faker.Company().Name() + product := fake.Beer().Name() + company := fake.Company().Name() line = fmt.Sprintf(doc, name, product, company) } diff --git a/internal/service/comprehend/test-fixtures/generate/document_classifier_multilabel/main.go b/internal/service/comprehend/test-fixtures/generate/document_classifier_multilabel/main.go index 682130c813b7..36cc3ca1965e 100644 --- a/internal/service/comprehend/test-fixtures/generate/document_classifier_multilabel/main.go +++ b/internal/service/comprehend/test-fixtures/generate/document_classifier_multilabel/main.go @@ -14,7 +14,7 @@ import ( "os" "strings" - "syreclabs.com/go/faker" + "github.com/jaswdr/faker/v2" ) const ( @@ -41,9 +41,9 @@ var comedyWords = []string{ func main() { log.SetFlags(0) - seed := int64(1) // Default rand seed + seed := int64(48) // Default rand seed r := rand.New(rand.NewSource(seed)) - faker.Seed(seed) + fake := faker.NewWithSeedInt64(seed) // documentFile, err := os.OpenFile("./test-fixtures/document_classifier_multilabel/documents.csv", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0600) documentFile, err := os.OpenFile("../../../test-fixtures/document_classifier_multilabel/documents.csv", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0600) @@ -62,7 +62,7 @@ func main() { doctype = strings.Join(doctypes, defaultSeparator) } - title := faker.Lorem().Word() + title := fake.Lorem().Word() var desc string if doctype == "DRAMA" { diff --git a/internal/service/comprehend/test-fixtures/generate/entity_recognizer/main.go b/internal/service/comprehend/test-fixtures/generate/entity_recognizer/main.go index 7ad4937b6d4d..107595fa50f2 100644 --- a/internal/service/comprehend/test-fixtures/generate/entity_recognizer/main.go +++ b/internal/service/comprehend/test-fixtures/generate/entity_recognizer/main.go @@ -15,7 +15,7 @@ import ( "strconv" "strings" - "syreclabs.com/go/faker" + "github.com/jaswdr/faker/v2" ) func main() { @@ -41,9 +41,9 @@ func main() { log.SetFlags(0) - seed := int64(1) // Default rand seed + seed := int64(48) // Default rand seed r := rand.New(rand.NewSource(seed)) - faker.Seed(seed) + fake := faker.NewWithSeedInt64(seed) entitiesFile, err := os.OpenFile("./test-fixtures/entity_recognizer/entitylist.csv", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0600) if err != nil { @@ -72,7 +72,7 @@ func main() { } for i := 0; i < 1000; i++ { - name := faker.Name().Name() + name := fake.Person().Name() entity := entities[r.Intn(len(entities))] if _, err := fmt.Fprintf(entitiesFile, "%s,%s\n", name, entity); err != nil { diff --git a/internal/service/comprehend/testdata/DocumentClassifier/basic/main_gen.tf b/internal/service/comprehend/testdata/DocumentClassifier/basic/main_gen.tf index 7b15f9f1127b..3e8e7f25d032 100644 --- a/internal/service/comprehend/testdata/DocumentClassifier/basic/main_gen.tf +++ b/internal/service/comprehend/testdata/DocumentClassifier/basic/main_gen.tf @@ -8,7 +8,7 @@ resource "aws_comprehend_document_classifier" "test" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" } depends_on = [ diff --git a/internal/service/comprehend/testdata/DocumentClassifier/basic_v5.100.0/main_gen.tf b/internal/service/comprehend/testdata/DocumentClassifier/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..acaac473bc0a --- /dev/null +++ b/internal/service/comprehend/testdata/DocumentClassifier/basic_v5.100.0/main_gen.tf @@ -0,0 +1,116 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_comprehend_document_classifier" "test" { + name = var.rName + + data_access_role_arn = aws_iam_role.test.arn + + language_code = "en" + input_data_config { + s3_uri = "s3://${aws_s3_object.documents.bucket}/${aws_s3_object.documents.key}" + } + + depends_on = [ + aws_iam_role_policy.test, + ] +} + +data "aws_partition" "current" {} + +# testAccDocumentClassifierBasicRoleConfig + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = <" { - ConfigDirectory: config.StaticDirectory("testdata/LocationMicrosoftAzureBlobStorage/region_override/"), + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/region_override/"), ConfigVariables: config.Variables{ acctest.CtRName: config.StringVariable(rName), "region": config.StringVariable(acctest.AlternateRegion()), @@ -163,7 +170,7 @@ func TestAccDataSyncLocationMicrosoftAzureBlobStorage_Identity_RegionOverride(t // Step 3: Import command without appended "@" { - ConfigDirectory: config.StaticDirectory("testdata/LocationMicrosoftAzureBlobStorage/region_override/"), + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/region_override/"), ConfigVariables: config.Variables{ acctest.CtRName: config.StringVariable(rName), "region": config.StringVariable(acctest.AlternateRegion()), @@ -179,7 +186,7 @@ func TestAccDataSyncLocationMicrosoftAzureBlobStorage_Identity_RegionOverride(t // Step 4: Import block with Import ID and appended "@" { - ConfigDirectory: config.StaticDirectory("testdata/LocationMicrosoftAzureBlobStorage/region_override/"), + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/region_override/"), ConfigVariables: config.Variables{ acctest.CtRName: config.StringVariable(rName), "region": config.StringVariable(acctest.AlternateRegion()), @@ -201,7 +208,7 @@ func TestAccDataSyncLocationMicrosoftAzureBlobStorage_Identity_RegionOverride(t // Step 5: Import block with Import ID and no appended "@" { - ConfigDirectory: config.StaticDirectory("testdata/LocationMicrosoftAzureBlobStorage/region_override/"), + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/region_override/"), ConfigVariables: config.Variables{ acctest.CtRName: config.StringVariable(rName), "region": config.StringVariable(acctest.AlternateRegion()), @@ -222,7 +229,7 @@ func TestAccDataSyncLocationMicrosoftAzureBlobStorage_Identity_RegionOverride(t // Step 6: Import block with Resource Identity { - ConfigDirectory: config.StaticDirectory("testdata/LocationMicrosoftAzureBlobStorage/region_override/"), + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/region_override/"), ConfigVariables: config.Variables{ acctest.CtRName: config.StringVariable(rName), "region": config.StringVariable(acctest.AlternateRegion()), @@ -243,3 +250,137 @@ func TestAccDataSyncLocationMicrosoftAzureBlobStorage_Identity_RegionOverride(t }, }) } + +func TestAccDataSyncLocationAzureBlob_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationAzureBlobOutput + resourceName := "aws_datasync_location_azure_blob.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationAzureBlobDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationAzureBlob_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationAzureBlobOutput + resourceName := "aws_datasync_location_azure_blob.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationAzureBlobDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationAzureBlob/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_azure_blob_test.go b/internal/service/datasync/location_azure_blob_test.go index 53a9c886e2b2..23f16cfa7a69 100644 --- a/internal/service/datasync/location_azure_blob_test.go +++ b/internal/service/datasync/location_azure_blob_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -36,12 +30,12 @@ func TestAccDataSyncLocationAzureBlob_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx), + CheckDestroy: testAccCheckLocationAzureBlobDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccLocationAzureBlobConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "access_tier", "HOT"), resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "1"), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "datasync", regexache.MustCompile(`location/loc-.+`)), @@ -75,12 +69,12 @@ func TestAccDataSyncLocationAzureBlob_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx), + CheckDestroy: testAccCheckLocationAzureBlobDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccLocationAzureBlobConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfdatasync.ResourceLocationAzureBlob(), resourceName), ), ExpectNonEmptyPlan: true, @@ -99,12 +93,12 @@ func TestAccDataSyncLocationAzureBlob_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx), + CheckDestroy: testAccCheckLocationAzureBlobDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccLocationAzureBlobConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -118,7 +112,7 @@ func TestAccDataSyncLocationAzureBlob_tags(t *testing.T) { { Config: testAccLocationAzureBlobConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -127,7 +121,7 @@ func TestAccDataSyncLocationAzureBlob_tags(t *testing.T) { { Config: testAccLocationAzureBlobConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -146,12 +140,12 @@ func TestAccDataSyncLocationAzureBlob_update(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx), + CheckDestroy: testAccCheckLocationAzureBlobDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccLocationAzureBlobConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "access_tier", "HOT"), resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "1"), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "datasync", regexache.MustCompile(`location/loc-.+`)), @@ -168,7 +162,7 @@ func TestAccDataSyncLocationAzureBlob_update(t *testing.T) { { Config: testAccLocationAzureBlobConfig_updated(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), + testAccCheckLocationAzureBlobExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "access_tier", "COOL"), resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "1"), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "datasync", regexache.MustCompile(`location/loc-.+`)), @@ -186,85 +180,7 @@ func TestAccDataSyncLocationAzureBlob_update(t *testing.T) { }) } -func TestAccDataSyncLocationAzureBlob_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v datasync.DescribeLocationAzureBlobOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_datasync_location_azure_blob.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationAzureBlobConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLocationAzureBlobConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationAzureBlobConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), - }, - }, - }, - }) -} - -func testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckLocationAzureBlobDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) @@ -290,7 +206,7 @@ func testAccCheckLocationMicrosoftAzureBlobStorageDestroy(ctx context.Context) r } } -func testAccCheckLocationMicrosoftAzureBlobStorageExists(ctx context.Context, n string, v *datasync.DescribeLocationAzureBlobOutput) resource.TestCheckFunc { +func testAccCheckLocationAzureBlobExists(ctx context.Context, n string, v *datasync.DescribeLocationAzureBlobOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/datasync/location_efs_identity_gen_test.go b/internal/service/datasync/location_efs_identity_gen_test.go index 12a223b6d141..a53466bf3577 100644 --- a/internal/service/datasync/location_efs_identity_gen_test.go +++ b/internal/service/datasync/location_efs_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -24,7 +25,7 @@ func TestAccDataSyncLocationEFS_Identity_Basic(t *testing.T) { var v datasync.DescribeLocationEfsOutput resourceName := "aws_datasync_location_efs.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccDataSyncLocationEFS_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -100,7 +104,7 @@ func TestAccDataSyncLocationEFS_Identity_RegionOverride(t *testing.T) { resourceName := "aws_datasync_location_efs.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -121,6 +125,9 @@ func TestAccDataSyncLocationEFS_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -207,3 +214,125 @@ func TestAccDataSyncLocationEFS_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDataSyncLocationEFS_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationEfsOutput + resourceName := "aws_datasync_location_efs.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationEFSDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationEFS/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationEFSExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationEFS/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationEFSExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationEFS/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationEFS_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationEfsOutput + resourceName := "aws_datasync_location_efs.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationEFSDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationEFS/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationEFSExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationEFS/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationEFSExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_efs_test.go b/internal/service/datasync/location_efs_test.go index 8c965559e52f..4a5fa8cb3cc5 100644 --- a/internal/service/datasync/location_efs_test.go +++ b/internal/service/datasync/location_efs_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -191,84 +185,6 @@ func TestAccDataSyncLocationEFS_tags(t *testing.T) { }) } -func TestAccDataSyncLocationEFS_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v datasync.DescribeLocationEfsOutput - resourceName := "aws_datasync_location_efs.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationEFSDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationEFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationEFSExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLocationEFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationEFSExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationEFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationEFSExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckLocationEFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) diff --git a/internal/service/datasync/location_hdfs_identity_gen_test.go b/internal/service/datasync/location_hdfs_identity_gen_test.go index 224624c0cd52..ea674a0ad283 100644 --- a/internal/service/datasync/location_hdfs_identity_gen_test.go +++ b/internal/service/datasync/location_hdfs_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDataSyncLocationHDFS_Identity_Basic(t *testing.T) { resourceName := "aws_datasync_location_hdfs.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccDataSyncLocationHDFS_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -111,7 +115,7 @@ func TestAccDataSyncLocationHDFS_Identity_RegionOverride(t *testing.T) { resourceName := "aws_datasync_location_hdfs.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccDataSyncLocationHDFS_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -224,3 +231,137 @@ func TestAccDataSyncLocationHDFS_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDataSyncLocationHDFS_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationHdfsOutput + resourceName := "aws_datasync_location_hdfs.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationHDFSDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationHDFS/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationHDFSExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationHDFS/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationHDFSExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationHDFS/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationHDFS_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationHdfsOutput + resourceName := "aws_datasync_location_hdfs.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationHDFSDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationHDFS/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationHDFSExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationHDFS/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationHDFSExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_hdfs_test.go b/internal/service/datasync/location_hdfs_test.go index 8fec12956006..0bbae7c75f14 100644 --- a/internal/service/datasync/location_hdfs_test.go +++ b/internal/service/datasync/location_hdfs_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -194,84 +188,6 @@ func TestAccDataSyncLocationHDFS_kerberos(t *testing.T) { }) } -func TestAccDataSyncLocationHDFS_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v datasync.DescribeLocationHdfsOutput - resourceName := "aws_datasync_location_hdfs.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationHDFSDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationHDFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationHDFSExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLocationHDFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationHDFSExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationHDFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationHDFSExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckLocationHDFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) diff --git a/internal/service/datasync/location_nfs_identity_gen_test.go b/internal/service/datasync/location_nfs_identity_gen_test.go index 1669f358dc0d..97ffc2054c02 100644 --- a/internal/service/datasync/location_nfs_identity_gen_test.go +++ b/internal/service/datasync/location_nfs_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDataSyncLocationNFS_Identity_Basic(t *testing.T) { resourceName := "aws_datasync_location_nfs.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccDataSyncLocationNFS_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -111,7 +115,7 @@ func TestAccDataSyncLocationNFS_Identity_RegionOverride(t *testing.T) { resourceName := "aws_datasync_location_nfs.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccDataSyncLocationNFS_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -224,3 +231,137 @@ func TestAccDataSyncLocationNFS_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDataSyncLocationNFS_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationNfsOutput + resourceName := "aws_datasync_location_nfs.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationNFSDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationNFS/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationNFSExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationNFS/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationNFSExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationNFS/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationNFS_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationNfsOutput + resourceName := "aws_datasync_location_nfs.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationNFSDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationNFS/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationNFSExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationNFS/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationNFSExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_nfs_test.go b/internal/service/datasync/location_nfs_test.go index f9199b73ef12..e0675c481a77 100644 --- a/internal/service/datasync/location_nfs_test.go +++ b/internal/service/datasync/location_nfs_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -231,84 +225,6 @@ func TestAccDataSyncLocationNFS_tags(t *testing.T) { }) } -func TestAccDataSyncLocationNFS_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v datasync.DescribeLocationNfsOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_datasync_location_nfs.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationNFSDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationNFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationNFSExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLocationNFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationNFSExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationNFSConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationNFSExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckLocationNFSDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) diff --git a/internal/service/datasync/location_object_storage.go b/internal/service/datasync/location_object_storage.go index 0e0ea0b71bfe..fb9d3dff1d22 100644 --- a/internal/service/datasync/location_object_storage.go +++ b/internal/service/datasync/location_object_storage.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -49,7 +50,7 @@ func resourceLocationObjectStorage() *schema.Resource { }, "agent_arns": { Type: schema.TypeSet, - Required: true, + Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: verify.ValidARN, @@ -106,6 +107,11 @@ func resourceLocationObjectStorage() *schema.Resource { Computed: true, }, }, + + CustomizeDiff: customdiff.ForceNewIfChange("agent_arns", func(_ context.Context, old, new, meta any) bool { + // "InvalidRequestException: Invalid parameter: Updating AgentArns is not permitted for agentless object storage locations". + return (old.(*schema.Set).Len() == 0 && new.(*schema.Set).Len() > 0) || (old.(*schema.Set).Len() > 0 && new.(*schema.Set).Len() == 0) + }), } } @@ -114,13 +120,16 @@ func resourceLocationObjectStorageCreate(ctx context.Context, d *schema.Resource conn := meta.(*conns.AWSClient).DataSyncClient(ctx) input := &datasync.CreateLocationObjectStorageInput{ - AgentArns: flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)), BucketName: aws.String(d.Get(names.AttrBucketName).(string)), ServerHostname: aws.String(d.Get("server_hostname").(string)), Subdirectory: aws.String(d.Get("subdirectory").(string)), Tags: getTagsIn(ctx), } + if v, ok := d.GetOk("agent_arns"); ok && v.(*schema.Set).Len() > 0 { + input.AgentArns = flex.ExpandStringValueSet(v.(*schema.Set)) + } + if v, ok := d.GetOk(names.AttrAccessKey); ok { input.AccessKey = aws.String(v.(string)) } @@ -202,7 +211,9 @@ func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.Resource } if d.HasChange("agent_arns") { - input.AgentArns = flex.ExpandStringValueSet(d.Get("agent_arns").(*schema.Set)) + if v, ok := d.GetOk("agent_arns"); ok && v.(*schema.Set).Len() > 0 { + input.AgentArns = flex.ExpandStringValueSet(v.(*schema.Set)) + } // Access key must be specified when updating agent ARNs input.AccessKey = aws.String("") diff --git a/internal/service/datasync/location_object_storage_identity_gen_test.go b/internal/service/datasync/location_object_storage_identity_gen_test.go index 0c351b042a0f..2ce6e8ccd231 100644 --- a/internal/service/datasync/location_object_storage_identity_gen_test.go +++ b/internal/service/datasync/location_object_storage_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDataSyncLocationObjectStorage_Identity_Basic(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domain := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -52,6 +53,9 @@ func TestAccDataSyncLocationObjectStorage_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -117,7 +121,7 @@ func TestAccDataSyncLocationObjectStorage_Identity_RegionOverride(t *testing.T) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domain := acctest.RandomDomainName() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -140,6 +144,9 @@ func TestAccDataSyncLocationObjectStorage_Identity_RegionOverride(t *testing.T) ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -236,3 +243,144 @@ func TestAccDataSyncLocationObjectStorage_Identity_RegionOverride(t *testing.T) }, }) } + +func TestAccDataSyncLocationObjectStorage_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationObjectStorageOutput + resourceName := "aws_datasync_location_object_storage.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationObjectStorage/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationObjectStorage/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationObjectStorage/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationObjectStorage_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationObjectStorageOutput + resourceName := "aws_datasync_location_object_storage.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationObjectStorage/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationObjectStorage/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_object_storage_test.go b/internal/service/datasync/location_object_storage_test.go index c94e1399722c..c4753a51df04 100644 --- a/internal/service/datasync/location_object_storage_test.go +++ b/internal/service/datasync/location_object_storage_test.go @@ -16,10 +16,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -246,7 +244,7 @@ func TestAccDataSyncLocationObjectStorage_serverCertificate(t *testing.T) { }) } -func TestAccDataSyncLocationObjectStorage_Identity_ExistingResource(t *testing.T) { +func TestAccDataSyncLocationObjectStorage_emptyAgentARNs(t *testing.T) { ctx := acctest.Context(t) var v datasync.DescribeLocationObjectStorageOutput resourceName := "aws_datasync_location_object_storage.test" @@ -254,71 +252,92 @@ func TestAccDataSyncLocationObjectStorage_Identity_ExistingResource(t *testing.T domain := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), Steps: []resource.TestStep{ { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationObjectStorageConfig_basic(rName, domain), - Check: resource.ComposeTestCheckFunc( + Config: testAccLocationObjectStorageConfig_emptyAgentARNs(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("agent_arns"), knownvalue.Null()), }, }, { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDataSyncLocationObjectStorage_noAgentARNs(t *testing.T) { + ctx := acctest.Context(t) + var v datasync.DescribeLocationObjectStorageOutput + resourceName := "aws_datasync_location_object_storage.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLocationObjectStorageConfig_noAgentARNs(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), }, }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("agent_arns"), knownvalue.Null()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { Config: testAccLocationObjectStorageConfig_basic(rName, domain), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), }, }, ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("agent_arns"), knownvalue.ListSizeExact(1)), }, }, { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationObjectStorageConfig_basic(rName, domain), - Check: resource.ComposeTestCheckFunc( + Config: testAccLocationObjectStorageConfig_noAgentARNs(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLocationObjectStorageExists(ctx, resourceName, &v), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), }, }, ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("agent_arns"), knownvalue.Null()), }, }, }, @@ -483,3 +502,26 @@ resource "aws_datasync_location_object_storage" "test" { } `, rName, domain, acctest.TLSPEMEscapeNewlines(certificate))) } + +func testAccLocationObjectStorageConfig_emptyAgentARNs(rName, domain string) string { + return fmt.Sprintf(` +resource "aws_datasync_location_object_storage" "test" { + agent_arns = [] + server_hostname = %[2]q + bucket_name = %[1]q + server_protocol = "HTTP" + server_port = 8080 +} +`, rName, domain) +} + +func testAccLocationObjectStorageConfig_noAgentARNs(rName, domain string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` +resource "aws_datasync_location_object_storage" "test" { + server_hostname = %[2]q + bucket_name = %[1]q + server_protocol = "HTTP" + server_port = 8080 +} +`, rName, domain)) +} diff --git a/internal/service/datasync/location_s3.go b/internal/service/datasync/location_s3.go index fcf6a22d47f9..e12f1f4593f7 100644 --- a/internal/service/datasync/location_s3.go +++ b/internal/service/datasync/location_s3.go @@ -127,7 +127,7 @@ func resourceLocationS3Create(ctx context.Context, d *schema.ResourceData, meta } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateLocationS3(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/datasync/location_s3_identity_gen_test.go b/internal/service/datasync/location_s3_identity_gen_test.go index b957a7ecf372..b3d3c68b79c4 100644 --- a/internal/service/datasync/location_s3_identity_gen_test.go +++ b/internal/service/datasync/location_s3_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDataSyncLocationS3_Identity_Basic(t *testing.T) { resourceName := "aws_datasync_location_s3.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccDataSyncLocationS3_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -111,7 +115,7 @@ func TestAccDataSyncLocationS3_Identity_RegionOverride(t *testing.T) { resourceName := "aws_datasync_location_s3.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccDataSyncLocationS3_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -224,3 +231,137 @@ func TestAccDataSyncLocationS3_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDataSyncLocationS3_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationS3Output + resourceName := "aws_datasync_location_s3.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationS3Destroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationS3/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationS3Exists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationS3/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationS3Exists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationS3/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationS3_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationS3Output + resourceName := "aws_datasync_location_s3.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationS3Destroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationS3/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationS3Exists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationS3/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationS3Exists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_s3_test.go b/internal/service/datasync/location_s3_test.go index aaf6968e055a..92921db00e17 100644 --- a/internal/service/datasync/location_s3_test.go +++ b/internal/service/datasync/location_s3_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -170,84 +164,6 @@ func TestAccDataSyncLocationS3_tags(t *testing.T) { }) } -func TestAccDataSyncLocationS3_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v datasync.DescribeLocationS3Output - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_datasync_location_s3.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationS3Destroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationS3Config_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationS3Exists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLocationS3Config_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationS3Exists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationS3Config_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationS3Exists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckLocationS3Destroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) diff --git a/internal/service/datasync/location_smb_identity_gen_test.go b/internal/service/datasync/location_smb_identity_gen_test.go index 6b3aa407d059..9c27f9fda2a4 100644 --- a/internal/service/datasync/location_smb_identity_gen_test.go +++ b/internal/service/datasync/location_smb_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDataSyncLocationSMB_Identity_Basic(t *testing.T) { resourceName := "aws_datasync_location_smb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccDataSyncLocationSMB_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -118,7 +122,7 @@ func TestAccDataSyncLocationSMB_Identity_RegionOverride(t *testing.T) { resourceName := "aws_datasync_location_smb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -140,6 +144,9 @@ func TestAccDataSyncLocationSMB_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -243,3 +250,137 @@ func TestAccDataSyncLocationSMB_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDataSyncLocationSMB_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationSmbOutput + resourceName := "aws_datasync_location_smb.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationSMBDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationSMB/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationSMBExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LocationSMB/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationSMBExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationSMB/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncLocationSMB_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeLocationSmbOutput + resourceName := "aws_datasync_location_smb.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckLocationSMBDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LocationSMB/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationSMBExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LocationSMB/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationSMBExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/location_smb_test.go b/internal/service/datasync/location_smb_test.go index 5a106d67e660..a61a192f24d8 100644 --- a/internal/service/datasync/location_smb_test.go +++ b/internal/service/datasync/location_smb_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -145,84 +139,6 @@ func TestAccDataSyncLocationSMB_tags(t *testing.T) { }) } -func TestAccDataSyncLocationSMB_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v datasync.DescribeLocationSmbOutput - resourceName := "aws_datasync_location_smb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckLocationSMBDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLocationSMBConfig_basic(rName, "/test/"), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationSMBExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLocationSMBConfig_basic(rName, "/test/"), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationSMBExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLocationSMBConfig_basic(rName, "/test/"), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationSMBExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`location/loc-.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckLocationSMBDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) diff --git a/internal/service/datasync/service_endpoint_resolver_gen.go b/internal/service/datasync/service_endpoint_resolver_gen.go index 307d27b963ca..489daeed63fd 100644 --- a/internal/service/datasync/service_endpoint_resolver_gen.go +++ b/internal/service/datasync/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params datasync.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up datasync endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up datasync endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/datasync/service_endpoints_gen_test.go b/internal/service/datasync/service_endpoints_gen_test.go index 4ee1b4cdba1a..d162ff9feb83 100644 --- a/internal/service/datasync/service_endpoints_gen_test.go +++ b/internal/service/datasync/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/datasync/service_package_gen.go b/internal/service/datasync/service_package_gen.go index 6575fe62d329..906128001ddb 100644 --- a/internal/service/datasync/service_package_gen.go +++ b/internal/service/datasync/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -238,7 +237,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *datasync.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/datasync/sweep.go b/internal/service/datasync/sweep.go index c25559e19068..a305e8538d43 100644 --- a/internal/service/datasync/sweep.go +++ b/internal/service/datasync/sweep.go @@ -46,7 +46,7 @@ func sweepAgents(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DataSyncClient(ctx) input := &datasync.ListAgentsInput{} @@ -87,7 +87,7 @@ func sweepLocations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DataSyncClient(ctx) input := &datasync.ListLocationsInput{} @@ -152,7 +152,7 @@ func sweepTasks(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DataSyncClient(ctx) input := datasync.ListTasksInput{} diff --git a/internal/service/datasync/tags_gen.go b/internal/service/datasync/tags_gen.go index 4282b58a43f3..2304375bb4f2 100644 --- a/internal/service/datasync/tags_gen.go +++ b/internal/service/datasync/tags_gen.go @@ -3,8 +3,8 @@ package datasync import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/datasync" awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *datasync.Client, identifier string, opt page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DataSyncClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *datasync.Client, identifier string, o _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *datasync.Client, identifier string, o _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/datasync/task_identity_gen_test.go b/internal/service/datasync/task_identity_gen_test.go index 8eae8c512175..5bd0a126b407 100644 --- a/internal/service/datasync/task_identity_gen_test.go +++ b/internal/service/datasync/task_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDataSyncTask_Identity_Basic(t *testing.T) { resourceName := "aws_datasync_task.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -50,6 +51,9 @@ func TestAccDataSyncTask_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -111,7 +115,7 @@ func TestAccDataSyncTask_Identity_RegionOverride(t *testing.T) { resourceName := "aws_datasync_task.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -133,6 +137,9 @@ func TestAccDataSyncTask_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -224,3 +231,137 @@ func TestAccDataSyncTask_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDataSyncTask_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeTaskOutput + resourceName := "aws_datasync_task.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckTaskDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Task/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTaskExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Task/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTaskExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Task/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDataSyncTask_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v datasync.DescribeTaskOutput + resourceName := "aws_datasync_task.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), + CheckDestroy: testAccCheckTaskDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Task/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTaskExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Task/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTaskExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/datasync/task_test.go b/internal/service/datasync/task_test.go index 6ac9c67c4ec0..c7cd82c63abf 100644 --- a/internal/service/datasync/task_test.go +++ b/internal/service/datasync/task_test.go @@ -15,13 +15,9 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/datasync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -951,81 +947,6 @@ func TestAccDataSyncTask_tags(t *testing.T) { }) } -func TestAccDataSyncTask_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var task1 datasync.DescribeTaskOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_datasync_task.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DataSyncServiceID), - CheckDestroy: testAccCheckTaskDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccTaskConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTaskExists(ctx, resourceName, &task1), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccTaskConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTaskExists(ctx, resourceName, &task1), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccTaskConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTaskExists(ctx, resourceName, &task1), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("datasync", regexache.MustCompile(`task/task-.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckTaskDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncClient(ctx) diff --git a/internal/service/datasync/testdata/Agent/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/Agent/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..f3d8ad20dc19 --- /dev/null +++ b/internal/service/datasync/testdata/Agent/basic_v5.100.0/main_gen.tf @@ -0,0 +1,130 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/Agent/basic_v6.0.0/main_gen.tf b/internal/service/datasync/testdata/Agent/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..7485dbe2129f --- /dev/null +++ b/internal/service/datasync/testdata/Agent/basic_v6.0.0/main_gen.tf @@ -0,0 +1,130 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationMicrosoftAzureBlobStorage/basic/main_gen.tf b/internal/service/datasync/testdata/LocationAzureBlob/basic/main_gen.tf similarity index 100% rename from internal/service/datasync/testdata/LocationMicrosoftAzureBlobStorage/basic/main_gen.tf rename to internal/service/datasync/testdata/LocationAzureBlob/basic/main_gen.tf diff --git a/internal/service/datasync/testdata/LocationAzureBlob/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/LocationAzureBlob/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6b083f543b93 --- /dev/null +++ b/internal/service/datasync/testdata/LocationAzureBlob/basic_v5.100.0/main_gen.tf @@ -0,0 +1,145 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_azure_blob" "test" { + agent_arns = [aws_datasync_agent.test.arn] + authentication_type = "SAS" + container_url = "https://myaccount.blob.core.windows.net/mycontainer" + subdirectory = "/myvdir1/myvdir2" + + sas_configuration { + token = "sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%%2FXTI9E%%2F%%2Fmq171%%2BZU178wcwqU%%3D" + } +} + +# testAccLocationAzureBlobConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip + name = var.rName +} + +# testAccAgentAgentConfig_base + + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationAzureBlob/basic_v6.0.0/main_gen.tf b/internal/service/datasync/testdata/LocationAzureBlob/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..80878d9e958f --- /dev/null +++ b/internal/service/datasync/testdata/LocationAzureBlob/basic_v6.0.0/main_gen.tf @@ -0,0 +1,145 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_azure_blob" "test" { + agent_arns = [aws_datasync_agent.test.arn] + authentication_type = "SAS" + container_url = "https://myaccount.blob.core.windows.net/mycontainer" + subdirectory = "/myvdir1/myvdir2" + + sas_configuration { + token = "sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%%2FXTI9E%%2F%%2Fmq171%%2BZU178wcwqU%%3D" + } +} + +# testAccLocationAzureBlobConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip + name = var.rName +} + +# testAccAgentAgentConfig_base + + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationMicrosoftAzureBlobStorage/region_override/main_gen.tf b/internal/service/datasync/testdata/LocationAzureBlob/region_override/main_gen.tf similarity index 100% rename from internal/service/datasync/testdata/LocationMicrosoftAzureBlobStorage/region_override/main_gen.tf rename to internal/service/datasync/testdata/LocationAzureBlob/region_override/main_gen.tf diff --git a/internal/service/datasync/testdata/LocationEFS/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/LocationEFS/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..fabadb8c55ad --- /dev/null +++ b/internal/service/datasync/testdata/LocationEFS/basic_v5.100.0/main_gen.tf @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_efs" "test" { + efs_file_system_arn = aws_efs_mount_target.test.file_system_arn + + ec2_config { + security_group_arns = [aws_security_group.test.arn] + subnet_arn = aws_subnet.test[0].arn + } +} + +# testAccLocationEFSConfig_base + +#resource "aws_vpc" "test" { +# cidr_block = "10.0.0.0/16" +#} +# +#resource "aws_subnet" "test" { +# cidr_block = "10.0.0.0/24" +# vpc_id = aws_vpc.test.id +#} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_efs_file_system" "test" { +} + +resource "aws_efs_mount_target" "test" { + file_system_id = aws_efs_file_system.test.id + subnet_id = aws_subnet.test[0].id +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationEFS/basic_v6.0.0/main_gen.tf b/internal/service/datasync/testdata/LocationEFS/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..62db23e9105a --- /dev/null +++ b/internal/service/datasync/testdata/LocationEFS/basic_v6.0.0/main_gen.tf @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_efs" "test" { + efs_file_system_arn = aws_efs_mount_target.test.file_system_arn + + ec2_config { + security_group_arns = [aws_security_group.test.arn] + subnet_arn = aws_subnet.test[0].arn + } +} + +# testAccLocationEFSConfig_base + +#resource "aws_vpc" "test" { +# cidr_block = "10.0.0.0/16" +#} +# +#resource "aws_subnet" "test" { +# cidr_block = "10.0.0.0/24" +# vpc_id = aws_vpc.test.id +#} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_efs_file_system" "test" { +} + +resource "aws_efs_mount_target" "test" { + file_system_id = aws_efs_file_system.test.id + subnet_id = aws_subnet.test[0].id +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationHDFS/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/LocationHDFS/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..b752b5a97db8 --- /dev/null +++ b/internal/service/datasync/testdata/LocationHDFS/basic_v5.100.0/main_gen.tf @@ -0,0 +1,144 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_hdfs" "test" { + agent_arns = [aws_datasync_agent.test.arn] + authentication_type = "SIMPLE" + simple_user = var.rName + + name_node { + hostname = aws_instance.test.private_dns + port = 80 + } +} + +# testAccLocationHDFSConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip + name = var.rName +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationHDFS/basic_v6.0.0/main_gen.tf b/internal/service/datasync/testdata/LocationHDFS/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..24324437ab77 --- /dev/null +++ b/internal/service/datasync/testdata/LocationHDFS/basic_v6.0.0/main_gen.tf @@ -0,0 +1,144 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_hdfs" "test" { + agent_arns = [aws_datasync_agent.test.arn] + authentication_type = "SIMPLE" + simple_user = var.rName + + name_node { + hostname = aws_instance.test.private_dns + port = 80 + } +} + +# testAccLocationHDFSConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip + name = var.rName +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationNFS/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/LocationNFS/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..515643425db7 --- /dev/null +++ b/internal/service/datasync/testdata/LocationNFS/basic_v5.100.0/main_gen.tf @@ -0,0 +1,142 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_nfs" "test" { + server_hostname = "example.com" + subdirectory = "/" + + on_prem_config { + agent_arns = [aws_datasync_agent.test.arn] + } +} + +# testAccLocationNFSConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip + name = var.rName +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationNFS/basic_v6.0.0/main_gen.tf b/internal/service/datasync/testdata/LocationNFS/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..50a86e15f00e --- /dev/null +++ b/internal/service/datasync/testdata/LocationNFS/basic_v6.0.0/main_gen.tf @@ -0,0 +1,142 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_nfs" "test" { + server_hostname = "example.com" + subdirectory = "/" + + on_prem_config { + agent_arns = [aws_datasync_agent.test.arn] + } +} + +# testAccLocationNFSConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip + name = var.rName +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationObjectStorage/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/LocationObjectStorage/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..064c72f70cfb --- /dev/null +++ b/internal/service/datasync/testdata/LocationObjectStorage/basic_v5.100.0/main_gen.tf @@ -0,0 +1,145 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_object_storage" "test" { + agent_arns = [aws_datasync_agent.test.arn] + server_hostname = var.domain + bucket_name = var.rName + server_protocol = "HTTP" + server_port = 8080 +} + +# testAccLocationObjectStorageConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "domain" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationObjectStorage/basic_v6.0.0/main_gen.tf b/internal/service/datasync/testdata/LocationObjectStorage/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..598a550f2a42 --- /dev/null +++ b/internal/service/datasync/testdata/LocationObjectStorage/basic_v6.0.0/main_gen.tf @@ -0,0 +1,145 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_object_storage" "test" { + agent_arns = [aws_datasync_agent.test.arn] + server_hostname = var.domain + bucket_name = var.rName + server_protocol = "HTTP" + server_port = 8080 +} + +# testAccLocationObjectStorageConfig_base + +resource "aws_datasync_agent" "test" { + ip_address = aws_instance.test.public_ip +} + +# testAccAgentAgentConfig_base + +# Reference: https://docs.aws.amazon.com/datasync/latest/userguide/deploy-agents.html +data "aws_ssm_parameter" "aws_service_datasync_ami" { + name = "/aws/service/datasync/ami" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_instance" "test" { + depends_on = [aws_internet_gateway.test] + + ami = data.aws_ssm_parameter.aws_service_datasync_ami.value + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test[0].id + + # The Instance must have a public IP address because the aws_datasync_agent retrieves + # the activation key by making an HTTP request to the instance + associate_public_ip_address = true +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test[0].availability_zone", "m5.2xlarge", "m5.4xlarge") + +data "aws_ec2_instance_type_offering" "available" { + filter { + name = "instance-type" + values = local.preferred_instance_types + } + + filter { + name = "location" + values = [aws_subnet.test[0].availability_zone] + } + + location_type = "availability-zone" + preferred_instance_types = local.preferred_instance_types +} + +locals { + preferred_instance_types = ["m5.2xlarge", "m5.4xlarge"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "domain" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/datasync/testdata/LocationS3/basic_v5.100.0/main_gen.tf b/internal/service/datasync/testdata/LocationS3/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..14536c7911d1 --- /dev/null +++ b/internal/service/datasync/testdata/LocationS3/basic_v5.100.0/main_gen.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_datasync_location_s3" "test" { + s3_bucket_arn = aws_s3_bucket.test.arn + subdirectory = "/test" + + s3_config { + bucket_access_role_arn = aws_iam_role.test.arn + } +} + +# testAccLocationS3Config_base + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = < 0 { + if err := errors.Join(tfslices.ApplyToAll(out.FailureReasons, func(e awstypes.ProjectDeletionError) error { + return errors.New(aws.ToString(e.Message)) + })...); err != nil { + return nil, "", err + } + } + + return out, string(out.ProjectStatus), nil } } @@ -388,7 +398,7 @@ func findProjectByID(ctx context.Context, conn *datazone.Client, domain string, return nil, err } - if out == nil || !(out.FailureReasons == nil) { + if out == nil { return nil, tfresource.NewEmptyResultError(in) } diff --git a/internal/service/datazone/project_test.go b/internal/service/datazone/project_test.go index a7b09f3756f1..cc1b13c79360 100644 --- a/internal/service/datazone/project_test.go +++ b/internal/service/datazone/project_test.go @@ -31,7 +31,6 @@ func TestAccDataZoneProject_basic(t *testing.T) { var project datazone.GetProjectOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - dName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_datazone_project.test" domainName := "aws_datazone_domain.test" @@ -44,17 +43,20 @@ func TestAccDataZoneProject_basic(t *testing.T) { CheckDestroy: testAccCheckProjectDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccProjectConfig_basic(rName, dName), - Check: resource.ComposeTestCheckFunc( + Config: testAccProjectConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), - resource.TestCheckResourceAttrSet(resourceName, "glossary_terms.#"), - resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "desc"), + resource.TestCheckResourceAttr(resourceName, "failure_reasons.#", "0"), + resource.TestCheckResourceAttr(resourceName, "glossary_terms.#", "0"), + resource.TestCheckNoResourceAttr(resourceName, names.AttrDescription), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, "created_by"), resource.TestCheckResourceAttrSet(resourceName, names.AttrID), - resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_at"), + acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreatedAt), + acctest.CheckResourceAttrRFC3339(resourceName, "last_updated_at"), + // resource.TestCheckResourceAttr(resourceName, "project_status", string(types.ProjectStatusActive)), + resource.TestCheckResourceAttr(resourceName, "skip_deletion_check", acctest.CtTrue), ), }, { @@ -62,11 +64,12 @@ func TestAccDataZoneProject_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), - ImportStateVerifyIgnore: []string{"skip_deletion_check", "project_status"}, + ImportStateVerifyIgnore: []string{"project_status", "skip_deletion_check"}, }, }, }) } + func TestAccDataZoneProject_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -75,7 +78,6 @@ func TestAccDataZoneProject_disappears(t *testing.T) { var project datazone.GetProjectOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - dName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_datazone_project.test" resource.ParallelTest(t, resource.TestCase{ @@ -85,8 +87,8 @@ func TestAccDataZoneProject_disappears(t *testing.T) { CheckDestroy: testAccCheckProjectDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccProjectConfig_basic(rName, dName), - Check: resource.ComposeTestCheckFunc( + Config: testAccProjectConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckProjectExists(ctx, resourceName, &project), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfdatazone.ResourceProject, resourceName), ), @@ -95,6 +97,73 @@ func TestAccDataZoneProject_disappears(t *testing.T) { }, }) } + +func TestAccDataZoneProject_description(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v1, v2 datazone.GetProjectOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_datazone_project.test" + domainName := "aws_datazone_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataZoneServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProjectConfig_description(rName, "desc"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v1), + resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "glossary_terms.#", "0"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "desc"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "created_by"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrID), + acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreatedAt), + acctest.CheckResourceAttrRFC3339(resourceName, "last_updated_at"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), + ImportStateVerifyIgnore: []string{"project_status", "skip_deletion_check"}, + }, + { + Config: testAccProjectConfig_description(rName, "updated"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v2), + testAccCheckProjectNotRecreated(&v1, &v2), + resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "glossary_terms.#", "0"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "updated"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "created_by"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrID), + acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreatedAt), + acctest.CheckResourceAttrRFC3339(resourceName, "last_updated_at"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), + ImportStateVerifyIgnore: []string{"project_status", "skip_deletion_check"}, + }, + }, + }) +} + func testAccCheckProjectDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataZoneClient(ctx) @@ -173,72 +242,6 @@ func testAccCheckProjectNotRecreated(before, after *datazone.GetProjectOutput) r return nil } } -func TestAccDataZoneProject_update(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var v1, v2 datazone.GetProjectOutput - pName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - dName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_datazone_project.test" - domainName := "aws_datazone_domain.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DataZoneServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckProjectDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccProjectConfig_basic(pName, dName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProjectExists(ctx, resourceName, &v1), - resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), - resource.TestCheckResourceAttrSet(resourceName, "glossary_terms.#"), - resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "desc"), - resource.TestCheckResourceAttr(resourceName, names.AttrName, pName), - resource.TestCheckResourceAttrSet(resourceName, "created_by"), - resource.TestCheckResourceAttrSet(resourceName, names.AttrID), - resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_at"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), - ImportStateVerifyIgnore: []string{"skip_deletion_check", "project_status"}, - }, - { - Config: testAccProjectConfigBasicUpdate(pName, dName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProjectExists(ctx, resourceName, &v2), - testAccCheckProjectNotRecreated(&v1, &v2), - resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), - resource.TestCheckResourceAttrSet(resourceName, "glossary_terms.#"), - resource.TestCheckResourceAttr(resourceName, names.AttrDescription, names.AttrDescription), - resource.TestCheckResourceAttr(resourceName, names.AttrName, pName), - resource.TestCheckResourceAttrSet(resourceName, "created_by"), - resource.TestCheckResourceAttrSet(resourceName, names.AttrID), - resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_at"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), - ImportStateVerifyIgnore: []string{"project_status", "skip_deletion_check"}, - }, - }, - }) -} func testAccAuthorizerImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { @@ -251,33 +254,23 @@ func testAccAuthorizerImportStateIdFunc(resourceName string) resource.ImportStat } } -func testAccProjectConfig_basic(pName, dName string) string { - return acctest.ConfigCompose(testAccDomainConfig_basic(dName), fmt.Sprintf(` -resource "aws_security_group" "test" { - name = %[1]q -} - +func testAccProjectConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccDomainConfig_basic(rName), fmt.Sprintf(` resource "aws_datazone_project" "test" { domain_identifier = aws_datazone_domain.test.id - glossary_terms = ["2N8w6XJCwZf"] name = %[1]q - description = "desc" skip_deletion_check = true } -`, pName)) -} -func testAccProjectConfigBasicUpdate(pName, dName string) string { - return acctest.ConfigCompose(testAccDomainConfig_basic(dName), fmt.Sprintf(` -resource "aws_security_group" "test" { - name = %[1]q +`, rName)) } +func testAccProjectConfig_description(rName, description string) string { + return acctest.ConfigCompose(testAccDomainConfig_basic(rName), fmt.Sprintf(` resource "aws_datazone_project" "test" { domain_identifier = aws_datazone_domain.test.id - glossary_terms = ["2N8w6XJCwZf"] name = %[1]q - description = "description" + description = %[2]q skip_deletion_check = true } -`, pName)) +`, rName, description)) } diff --git a/internal/service/datazone/service_endpoint_resolver_gen.go b/internal/service/datazone/service_endpoint_resolver_gen.go index 31347a04968f..2d647b8a4ed5 100644 --- a/internal/service/datazone/service_endpoint_resolver_gen.go +++ b/internal/service/datazone/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params datazone.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up datazone endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up datazone endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/datazone/service_endpoints_gen_test.go b/internal/service/datazone/service_endpoints_gen_test.go index 6ac731bdffcd..8099a9034461 100644 --- a/internal/service/datazone/service_endpoints_gen_test.go +++ b/internal/service/datazone/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/datazone/service_package_gen.go b/internal/service/datazone/service_package_gen.go index 6f7a34be8c35..2212fea8fa48 100644 --- a/internal/service/datazone/service_package_gen.go +++ b/internal/service/datazone/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/datazone" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -134,7 +133,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *datazone.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/datazone/sweep.go b/internal/service/datazone/sweep.go index 7eea238bc85c..f069e3f3d14a 100644 --- a/internal/service/datazone/sweep.go +++ b/internal/service/datazone/sweep.go @@ -16,15 +16,26 @@ import ( ) func RegisterSweepers() { - awsv2.Register("aws_datazone_domain", sweepDomains) + awsv2.Register("aws_datazone_domain", sweepDomains, + "aws_datazone_project", + "aws_datazone_environment_profile", + ) + + awsv2.Register("aws_datazone_environment", sweepEnvironments) + + awsv2.Register("aws_datazone_environment_profile", sweepEnvironmentProfiles) + + awsv2.Register("aws_datazone_project", sweepProjects, + "aws_datazone_environment", + ) } func sweepDomains(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.DataZoneClient(ctx) - input := &datazone.ListDomainsInput{} - sweepResources := make([]sweep.Sweepable, 0) + var sweepResources []sweep.Sweepable - pages := datazone.NewListDomainsPaginator(conn, input) + var input datazone.ListDomainsInput + pages := datazone.NewListDomainsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) if err != nil { @@ -42,3 +53,126 @@ func sweepDomains(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepab return sweepResources, nil } + +func sweepEnvironments(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.DataZoneClient(ctx) + var sweepResources []sweep.Sweepable + + var domainsInput datazone.ListDomainsInput + pages := datazone.NewListDomainsPaginator(conn, &domainsInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, domain := range page.Items { + projectsInput := datazone.ListProjectsInput{ + DomainIdentifier: domain.Id, + } + pages := datazone.NewListProjectsPaginator(conn, &projectsInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, project := range page.Items { + environmentsInput := datazone.ListEnvironmentsInput{ + DomainIdentifier: domain.Id, + ProjectIdentifier: project.Id, + } + pages := datazone.NewListEnvironmentsPaginator(conn, &environmentsInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, environment := range page.Items { + sweepResources = append(sweepResources, framework.NewSweepResource(newEnvironmentResource, client, + framework.NewAttribute(names.AttrID, environment.Id), + framework.NewAttribute("domain_identifier", domain.Id), + )) + } + } + } + } + } + } + + return sweepResources, nil +} + +func sweepEnvironmentProfiles(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.DataZoneClient(ctx) + var sweepResources []sweep.Sweepable + + var domainsInput datazone.ListDomainsInput + pages := datazone.NewListDomainsPaginator(conn, &domainsInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, domain := range page.Items { + environmentProfilesInput := datazone.ListEnvironmentProfilesInput{ + DomainIdentifier: domain.Id, + } + pages := datazone.NewListEnvironmentProfilesPaginator(conn, &environmentProfilesInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, profile := range page.Items { + sweepResources = append(sweepResources, framework.NewSweepResource(newEnvironmentProfileResource, client, + framework.NewAttribute(names.AttrID, profile.Id), + framework.NewAttribute("domain_identifier", domain.Id), + )) + } + } + } + } + + return sweepResources, nil +} + +func sweepProjects(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.DataZoneClient(ctx) + var sweepResources []sweep.Sweepable + + var domainsInput datazone.ListDomainsInput + pages := datazone.NewListDomainsPaginator(conn, &domainsInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, domain := range page.Items { + projectsInput := datazone.ListProjectsInput{ + DomainIdentifier: domain.Id, + } + pages := datazone.NewListProjectsPaginator(conn, &projectsInput) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, project := range page.Items { + sweepResources = append(sweepResources, framework.NewSweepResource(newProjectResource, client, + framework.NewAttribute(names.AttrID, project.Id), + framework.NewAttribute("domain_identifier", domain.Id), + framework.NewAttribute("skip_deletion_check", true), // Automatically delete associated Glossaries + )) + } + } + } + } + + return sweepResources, nil +} diff --git a/internal/service/datazone/tags_gen.go b/internal/service/datazone/tags_gen.go index 745a13567422..62a35537007f 100644 --- a/internal/service/datazone/tags_gen.go +++ b/internal/service/datazone/tags_gen.go @@ -3,8 +3,8 @@ package datazone import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/datazone" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *datazone.Client, identifier string, opt output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DataZoneClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *datazone.Client, identifier string, o _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *datazone.Client, identifier string, o _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/datazone/user_profile.go b/internal/service/datazone/user_profile.go index dd6c0180d1cd..97d2b0387b9a 100644 --- a/internal/service/datazone/user_profile.go +++ b/internal/service/datazone/user_profile.go @@ -142,7 +142,7 @@ func (r *userProfileResource) Create(ctx context.Context, req resource.CreateReq resp.State.SetAttribute(ctx, path.Root(names.AttrID), out.Id) // set partial state to taint if wait fails createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - output, err := tfresource.RetryGWhenNotFound(ctx, createTimeout, func() (*datazone.GetUserProfileOutput, error) { + output, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func(ctx context.Context) (*datazone.GetUserProfileOutput, error) { return findUserProfileByID(ctx, conn, plan.DomainIdentifier.ValueString(), plan.UserIdentifier.ValueString(), out.Type) }) @@ -226,7 +226,7 @@ func (r *userProfileResource) Update(ctx context.Context, req resource.UpdateReq } updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) - output, err := tfresource.RetryGWhenNotFound(ctx, updateTimeout, func() (*datazone.GetUserProfileOutput, error) { + output, err := tfresource.RetryWhenNotFound(ctx, updateTimeout, func(ctx context.Context) (*datazone.GetUserProfileOutput, error) { return findUserProfileByID(ctx, conn, plan.DomainIdentifier.ValueString(), plan.UserIdentifier.ValueString(), out.Type) }) diff --git a/internal/service/dax/cluster.go b/internal/service/dax/cluster.go index 65c1893f9110..cf1de688f9bb 100644 --- a/internal/service/dax/cluster.go +++ b/internal/service/dax/cluster.go @@ -259,21 +259,18 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any // IAM roles take some time to propagate var resp *dax.CreateClusterOutput - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error resp, err = conn.CreateCluster(ctx, input) if errs.IsA[*awstypes.InvalidParameterValueException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - resp, err = conn.CreateCluster(ctx, input) - } if err != nil { return sdkdiag.AppendErrorf(diags, "creating DAX cluster: %s", err) } @@ -498,23 +495,19 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any req := &dax.DeleteClusterInput{ ClusterName: aws.String(d.Id()), } - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 5*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.DeleteCluster(ctx, req) if errs.IsA[*awstypes.InvalidClusterStateFault](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteCluster(ctx, req) - } - if errs.IsA[*awstypes.ClusterNotFoundFault](err) { return diags } diff --git a/internal/service/dax/cluster_test.go b/internal/service/dax/cluster_test.go index 5fc425d33049..a2981f1b85d9 100644 --- a/internal/service/dax/cluster_test.go +++ b/internal/service/dax/cluster_test.go @@ -377,7 +377,7 @@ func testAccCheckClusterExists(ctx context.Context, n string, v *awstypes.Cluste } resp, err := conn.DescribeClusters(ctx, &input) if err != nil { - return fmt.Errorf("DAX error: %v", err) + return fmt.Errorf("DAX error: %w", err) } for _, c := range resp.Clusters { diff --git a/internal/service/dax/service_endpoint_resolver_gen.go b/internal/service/dax/service_endpoint_resolver_gen.go index 9f70e0fc6755..e27ec13ec62b 100644 --- a/internal/service/dax/service_endpoint_resolver_gen.go +++ b/internal/service/dax/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params dax.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up dax endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up dax endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/dax/service_endpoints_gen_test.go b/internal/service/dax/service_endpoints_gen_test.go index e25932b2bbc3..e44e00c920c1 100644 --- a/internal/service/dax/service_endpoints_gen_test.go +++ b/internal/service/dax/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/dax/service_package_gen.go b/internal/service/dax/service_package_gen.go index 0e15f2d1017f..7dc9720335c7 100644 --- a/internal/service/dax/service_package_gen.go +++ b/internal/service/dax/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/dax" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -79,7 +78,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *dax.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/dax/sweep.go b/internal/service/dax/sweep.go index f5a0352c42ae..e09bead9e413 100644 --- a/internal/service/dax/sweep.go +++ b/internal/service/dax/sweep.go @@ -4,36 +4,27 @@ package dax import ( - "fmt" - "log" + "context" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dax" awstypes "github.com/aws/aws-sdk-go-v2/service/dax/types" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { - resource.AddTestSweepers("aws_dax_cluster", &resource.Sweeper{ - Name: "aws_dax_cluster", - F: sweepClusters, - }) + awsv2.Register("aws_dax_cluster", sweepClusters) } -func sweepClusters(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %s", err.Error()) - } +func sweepClusters(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.DAXClient(ctx) - + var input dax.DescribeClustersInput sweepResources := make([]sweep.Sweepable, 0) - err = describeClustersPages(ctx, conn, &dax.DescribeClustersInput{}, func(page *dax.DescribeClustersOutput, lastPage bool) bool { + err := describeClustersPages(ctx, conn, &input, func(page *dax.DescribeClustersOutput, lastPage bool) bool { if page == nil { return !lastPage } @@ -51,20 +42,13 @@ func sweepClusters(region string) error { // GovCloud (with no DAX support) has an endpoint that responds with: // InvalidParameterValueException: Access Denied to API Version: DAX_V3 - if awsv2.SkipSweepError(err) || errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Access Denied to API Version: DAX_V3") { - log.Printf("[WARN] Skipping DAX Cluster sweep for %s: %s", region, err) - return nil + if errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Access Denied to API Version: DAX_V3") { + return nil, nil } if err != nil { - return fmt.Errorf("listing DAX Clusters (%s): %w", region, err) - } - - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("sweeping DAX Clusters (%s): %w", region, err) + return nil, err } - return nil + return sweepResources, nil } diff --git a/internal/service/dax/tags_gen.go b/internal/service/dax/tags_gen.go index e0babcecf6a3..46f7e9323f1b 100644 --- a/internal/service/dax/tags_gen.go +++ b/internal/service/dax/tags_gen.go @@ -3,8 +3,8 @@ package dax import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dax" awstypes "github.com/aws/aws-sdk-go-v2/service/dax/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *dax.Client, identifier string, optFns . output, err := conn.ListTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DAXClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *dax.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *dax.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/deploy/deployment_group.go b/internal/service/deploy/deployment_group.go index 6720318a0d11..58024bb9d13d 100644 --- a/internal/service/deploy/deployment_group.go +++ b/internal/service/deploy/deployment_group.go @@ -54,7 +54,7 @@ func resourceDeploymentGroup() *schema.Resource { group, err := findDeploymentGroupByTwoPartKey(ctx, conn, applicationName, deploymentGroupName) if err != nil { - return []*schema.ResourceData{}, fmt.Errorf("reading CodeDeploy Deployment Group (%s): %s", d.Id(), err) + return []*schema.ResourceData{}, fmt.Errorf("reading CodeDeploy Deployment Group (%s): %w", d.Id(), err) } d.SetId(aws.ToString(group.DeploymentGroupId)) @@ -524,7 +524,7 @@ func resourceDeploymentGroupCreate(ctx context.Context, d *schema.ResourceData, } outputRaw, err := tfresource.RetryWhen(ctx, 5*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDeploymentGroup(ctx, input) }, func(err error) (bool, error) { @@ -718,7 +718,7 @@ func resourceDeploymentGroupUpdate(ctx context.Context, d *schema.ResourceData, } _, err := tfresource.RetryWhen(ctx, 5*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateDeploymentGroup(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/deploy/service_endpoint_resolver_gen.go b/internal/service/deploy/service_endpoint_resolver_gen.go index 3561dea007c7..ccf469ae04f8 100644 --- a/internal/service/deploy/service_endpoint_resolver_gen.go +++ b/internal/service/deploy/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params codedeploy.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up codedeploy endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up codedeploy endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/deploy/service_endpoints_gen_test.go b/internal/service/deploy/service_endpoints_gen_test.go index 82dc5d10b2e4..8cebc7411b4f 100644 --- a/internal/service/deploy/service_endpoints_gen_test.go +++ b/internal/service/deploy/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/deploy/service_package_gen.go b/internal/service/deploy/service_package_gen.go index 58b4a8d29dd8..19b22e837dca 100644 --- a/internal/service/deploy/service_package_gen.go +++ b/internal/service/deploy/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/codedeploy" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -82,7 +81,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *codedeploy.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/deploy/sweep.go b/internal/service/deploy/sweep.go index 9c9294f5bf3b..a2e23c439d7b 100644 --- a/internal/service/deploy/sweep.go +++ b/internal/service/deploy/sweep.go @@ -25,7 +25,7 @@ func sweepApps(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DeployClient(ctx) input := &codedeploy.ListApplicationsInput{} diff --git a/internal/service/deploy/tags_gen.go b/internal/service/deploy/tags_gen.go index 8ab8b8532f4a..1eec2572c7ed 100644 --- a/internal/service/deploy/tags_gen.go +++ b/internal/service/deploy/tags_gen.go @@ -3,8 +3,8 @@ package deploy import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/codedeploy" awstypes "github.com/aws/aws-sdk-go-v2/service/codedeploy/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *codedeploy.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DeployClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *codedeploy.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *codedeploy.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/detective/graph.go b/internal/service/detective/graph.go index 0865a601cdff..33cfb29343e1 100644 --- a/internal/service/detective/graph.go +++ b/internal/service/detective/graph.go @@ -62,7 +62,7 @@ func resourceGraphCreate(ctx context.Context, d *schema.ResourceData, meta any) Tags: getTagsIn(ctx), } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InternalServerException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.InternalServerException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateGraph(ctx, input) }) diff --git a/internal/service/detective/member.go b/internal/service/detective/member.go index e750fe41dbb7..59d309711263 100644 --- a/internal/service/detective/member.go +++ b/internal/service/detective/member.go @@ -117,7 +117,7 @@ func resourceMemberCreate(ctx context.Context, d *schema.ResourceData, meta any) input.Message = aws.String(v.(string)) } - _, err := tfresource.RetryWhenIsA[*awstypes.InternalServerException](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InternalServerException](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateMembers(ctx, input) }) diff --git a/internal/service/detective/organization_admin_account.go b/internal/service/detective/organization_admin_account.go index b07339b4f397..199c651857d6 100644 --- a/internal/service/detective/organization_admin_account.go +++ b/internal/service/detective/organization_admin_account.go @@ -63,7 +63,7 @@ func resourceOrganizationAdminAccountCreate(ctx context.Context, d *schema.Resou d.SetId(accountID) - _, err = tfresource.RetryWhenNotFound(ctx, 5*time.Minute, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, 5*time.Minute, func(ctx context.Context) (any, error) { return findOrganizationAdminAccountByAccountID(ctx, conn, d.Id()) }) @@ -115,7 +115,7 @@ func resourceOrganizationAdminAccountDelete(ctx context.Context, d *schema.Resou return sdkdiag.AppendErrorf(diags, "disabling Detective Organization Admin Account (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, 5*time.Minute, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, 5*time.Minute, func(ctx context.Context) (any, error) { return findOrganizationAdminAccountByAccountID(ctx, conn, d.Id()) }) diff --git a/internal/service/detective/service_endpoint_resolver_gen.go b/internal/service/detective/service_endpoint_resolver_gen.go index 5bf3fbefadd8..3e027360b864 100644 --- a/internal/service/detective/service_endpoint_resolver_gen.go +++ b/internal/service/detective/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params detective.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up detective endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up detective endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/detective/service_endpoints_gen_test.go b/internal/service/detective/service_endpoints_gen_test.go index ab7c87c07746..4e8478979437 100644 --- a/internal/service/detective/service_endpoints_gen_test.go +++ b/internal/service/detective/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/detective/service_package_gen.go b/internal/service/detective/service_package_gen.go index b3b2478ae622..776dd6d42931 100644 --- a/internal/service/detective/service_package_gen.go +++ b/internal/service/detective/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/detective" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -91,7 +90,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *detective.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/detective/tags_gen.go b/internal/service/detective/tags_gen.go index 2a0f11aa3b9d..3f9e3671187d 100644 --- a/internal/service/detective/tags_gen.go +++ b/internal/service/detective/tags_gen.go @@ -3,8 +3,8 @@ package detective import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/detective" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *detective.Client, identifier string, op output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DetectiveClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *detective.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *detective.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/devicefarm/device_pool_identity_gen_test.go b/internal/service/devicefarm/device_pool_identity_gen_test.go index 11bb93511a40..131235640182 100644 --- a/internal/service/devicefarm/device_pool_identity_gen_test.go +++ b/internal/service/devicefarm/device_pool_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDeviceFarmDevicePool_Identity_Basic(t *testing.T) { resourceName := "aws_devicefarm_device_pool.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -51,6 +52,9 @@ func TestAccDeviceFarmDevicePool_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -105,3 +109,137 @@ func TestAccDeviceFarmDevicePool_Identity_Basic(t *testing.T) { }, }) } + +func TestAccDeviceFarmDevicePool_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DevicePool + resourceName := "aws_devicefarm_device_pool.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckDevicePoolDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DevicePool/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDevicePoolExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/DevicePool/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDevicePoolExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DevicePool/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDeviceFarmDevicePool_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DevicePool + resourceName := "aws_devicefarm_device_pool.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckDevicePoolDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DevicePool/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDevicePoolExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DevicePool/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDevicePoolExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/devicefarm/device_pool_test.go b/internal/service/devicefarm/device_pool_test.go index a2220aac29fd..795fa78b58b5 100644 --- a/internal/service/devicefarm/device_pool_test.go +++ b/internal/service/devicefarm/device_pool_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdevicefarm "github.com/hashicorp/terraform-provider-aws/internal/service/devicefarm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -188,90 +182,6 @@ func TestAccDeviceFarmDevicePool_disappears_project(t *testing.T) { }) } -func TestAccDeviceFarmDevicePool_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var pool awstypes.DevicePool - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_devicefarm_device_pool.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DeviceFarmEndpointID) - // Currently, DeviceFarm is only supported in us-west-2 - // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), - CheckDestroy: testAccCheckDevicePoolDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccDevicePoolConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDevicePoolExists(ctx, resourceName, &pool), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccDevicePoolConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDevicePoolExists(ctx, resourceName, &pool), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccDevicePoolConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDevicePoolExists(ctx, resourceName, &pool), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("devicefarm", regexache.MustCompile(`devicepool:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckDevicePoolExists(ctx context.Context, n string, v *awstypes.DevicePool) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/devicefarm/instance_profile_identity_gen_test.go b/internal/service/devicefarm/instance_profile_identity_gen_test.go index 78e72286051d..72d88dd3f1a4 100644 --- a/internal/service/devicefarm/instance_profile_identity_gen_test.go +++ b/internal/service/devicefarm/instance_profile_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDeviceFarmInstanceProfile_Identity_Basic(t *testing.T) { resourceName := "aws_devicefarm_instance_profile.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -51,6 +52,9 @@ func TestAccDeviceFarmInstanceProfile_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -105,3 +109,137 @@ func TestAccDeviceFarmInstanceProfile_Identity_Basic(t *testing.T) { }, }) } + +func TestAccDeviceFarmInstanceProfile_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.InstanceProfile + resourceName := "aws_devicefarm_instance_profile.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/InstanceProfile/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceProfileExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/InstanceProfile/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceProfileExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/InstanceProfile/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDeviceFarmInstanceProfile_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.InstanceProfile + resourceName := "aws_devicefarm_instance_profile.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/InstanceProfile/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceProfileExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/InstanceProfile/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceProfileExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/devicefarm/instance_profile_test.go b/internal/service/devicefarm/instance_profile_test.go index 321dc9520539..86944a065a74 100644 --- a/internal/service/devicefarm/instance_profile_test.go +++ b/internal/service/devicefarm/instance_profile_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdevicefarm "github.com/hashicorp/terraform-provider-aws/internal/service/devicefarm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -158,90 +152,6 @@ func TestAccDeviceFarmInstanceProfile_disappears(t *testing.T) { }) } -func TestAccDeviceFarmInstanceProfile_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var profile awstypes.InstanceProfile - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_devicefarm_instance_profile.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DeviceFarmEndpointID) - // Currently, DeviceFarm is only supported in us-west-2 - // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), - CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccInstanceProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceProfileExists(ctx, resourceName, &profile), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccInstanceProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceProfileExists(ctx, resourceName, &profile), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccInstanceProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceProfileExists(ctx, resourceName, &profile), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("devicefarm", regexache.MustCompile(`instanceprofile:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckInstanceProfileExists(ctx context.Context, n string, v *awstypes.InstanceProfile) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/devicefarm/network_profile_identity_gen_test.go b/internal/service/devicefarm/network_profile_identity_gen_test.go index 99663d387c7a..c0dd43d198eb 100644 --- a/internal/service/devicefarm/network_profile_identity_gen_test.go +++ b/internal/service/devicefarm/network_profile_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDeviceFarmNetworkProfile_Identity_Basic(t *testing.T) { resourceName := "aws_devicefarm_network_profile.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -51,6 +52,9 @@ func TestAccDeviceFarmNetworkProfile_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -105,3 +109,137 @@ func TestAccDeviceFarmNetworkProfile_Identity_Basic(t *testing.T) { }, }) } + +func TestAccDeviceFarmNetworkProfile_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.NetworkProfile + resourceName := "aws_devicefarm_network_profile.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckNetworkProfileDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/NetworkProfile/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkProfileExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/NetworkProfile/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkProfileExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/NetworkProfile/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDeviceFarmNetworkProfile_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.NetworkProfile + resourceName := "aws_devicefarm_network_profile.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckNetworkProfileDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/NetworkProfile/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkProfileExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/NetworkProfile/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkProfileExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/devicefarm/network_profile_test.go b/internal/service/devicefarm/network_profile_test.go index a872a865eca2..b03d4fe0801d 100644 --- a/internal/service/devicefarm/network_profile_test.go +++ b/internal/service/devicefarm/network_profile_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdevicefarm "github.com/hashicorp/terraform-provider-aws/internal/service/devicefarm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -195,90 +189,6 @@ func TestAccDeviceFarmNetworkProfile_disappears_project(t *testing.T) { }) } -func TestAccDeviceFarmNetworkProfile_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var pool awstypes.NetworkProfile - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_devicefarm_network_profile.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DeviceFarmEndpointID) - // Currently, DeviceFarm is only supported in us-west-2 - // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), - CheckDestroy: testAccCheckNetworkProfileDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccNetworkProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkProfileExists(ctx, resourceName, &pool), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccNetworkProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkProfileExists(ctx, resourceName, &pool), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccNetworkProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckNetworkProfileExists(ctx, resourceName, &pool), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("devicefarm", regexache.MustCompile(`networkprofile:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckNetworkProfileExists(ctx context.Context, n string, v *awstypes.NetworkProfile) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/devicefarm/project_identity_gen_test.go b/internal/service/devicefarm/project_identity_gen_test.go index 32a746984f01..4586206952f9 100644 --- a/internal/service/devicefarm/project_identity_gen_test.go +++ b/internal/service/devicefarm/project_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDeviceFarmProject_Identity_Basic(t *testing.T) { resourceName := "aws_devicefarm_project.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -51,6 +52,9 @@ func TestAccDeviceFarmProject_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -105,3 +109,137 @@ func TestAccDeviceFarmProject_Identity_Basic(t *testing.T) { }, }) } + +func TestAccDeviceFarmProject_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Project + resourceName := "aws_devicefarm_project.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckProjectDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Project/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Project/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Project/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDeviceFarmProject_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Project + resourceName := "aws_devicefarm_project.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckProjectDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Project/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Project/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/devicefarm/project_test.go b/internal/service/devicefarm/project_test.go index 3ed042a6f87a..314089739788 100644 --- a/internal/service/devicefarm/project_test.go +++ b/internal/service/devicefarm/project_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdevicefarm "github.com/hashicorp/terraform-provider-aws/internal/service/devicefarm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -198,90 +192,6 @@ func TestAccDeviceFarmProject_disappears(t *testing.T) { }) } -func TestAccDeviceFarmProject_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var proj awstypes.Project - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_devicefarm_project.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DeviceFarmEndpointID) - // Currently, DeviceFarm is only supported in us-west-2 - // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), - CheckDestroy: testAccCheckProjectDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccProjectConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProjectExists(ctx, resourceName, &proj), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccProjectConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProjectExists(ctx, resourceName, &proj), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccProjectConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckProjectExists(ctx, resourceName, &proj), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("devicefarm", regexache.MustCompile(`project:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckProjectExists(ctx context.Context, n string, v *awstypes.Project) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/devicefarm/service_endpoint_resolver_gen.go b/internal/service/devicefarm/service_endpoint_resolver_gen.go index 093e86cec0f8..62da88fb5167 100644 --- a/internal/service/devicefarm/service_endpoint_resolver_gen.go +++ b/internal/service/devicefarm/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params devicefarm.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up devicefarm endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up devicefarm endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/devicefarm/service_endpoints_gen_test.go b/internal/service/devicefarm/service_endpoints_gen_test.go index f0728b88cc31..ce9a225b5821 100644 --- a/internal/service/devicefarm/service_endpoints_gen_test.go +++ b/internal/service/devicefarm/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/devicefarm/service_package_gen.go b/internal/service/devicefarm/service_package_gen.go index ba4f34275916..1766b99a4db0 100644 --- a/internal/service/devicefarm/service_package_gen.go +++ b/internal/service/devicefarm/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/devicefarm" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -151,7 +150,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *devicefarm.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/devicefarm/sweep.go b/internal/service/devicefarm/sweep.go index ebe169f868ce..5fde7bb4c441 100644 --- a/internal/service/devicefarm/sweep.go +++ b/internal/service/devicefarm/sweep.go @@ -30,7 +30,7 @@ func sweepProjects(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DeviceFarmClient(ctx) input := &devicefarm.ListProjectsInput{} @@ -71,7 +71,7 @@ func sweepTestGridProjects(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DeviceFarmClient(ctx) input := &devicefarm.ListTestGridProjectsInput{} diff --git a/internal/service/devicefarm/tags_gen.go b/internal/service/devicefarm/tags_gen.go index 0f22f742ab3e..fc0975768f51 100644 --- a/internal/service/devicefarm/tags_gen.go +++ b/internal/service/devicefarm/tags_gen.go @@ -3,8 +3,8 @@ package devicefarm import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/devicefarm" awstypes "github.com/aws/aws-sdk-go-v2/service/devicefarm/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *devicefarm.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DeviceFarmClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *devicefarm.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *devicefarm.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/devicefarm/test_grid_project_identity_gen_test.go b/internal/service/devicefarm/test_grid_project_identity_gen_test.go index 7e9b2870c3e7..b98fb9caec00 100644 --- a/internal/service/devicefarm/test_grid_project_identity_gen_test.go +++ b/internal/service/devicefarm/test_grid_project_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDeviceFarmTestGridProject_Identity_Basic(t *testing.T) { resourceName := "aws_devicefarm_test_grid_project.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -51,6 +52,9 @@ func TestAccDeviceFarmTestGridProject_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -105,3 +109,137 @@ func TestAccDeviceFarmTestGridProject_Identity_Basic(t *testing.T) { }, }) } + +func TestAccDeviceFarmTestGridProject_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TestGridProject + resourceName := "aws_devicefarm_test_grid_project.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckTestGridProjectDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TestGridProject/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTestGridProjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/TestGridProject/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTestGridProjectExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TestGridProject/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDeviceFarmTestGridProject_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TestGridProject + resourceName := "aws_devicefarm_test_grid_project.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckTestGridProjectDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TestGridProject/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTestGridProjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TestGridProject/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTestGridProjectExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/devicefarm/test_grid_project_test.go b/internal/service/devicefarm/test_grid_project_test.go index 5b2c29ec8088..a486d9d6388a 100644 --- a/internal/service/devicefarm/test_grid_project_test.go +++ b/internal/service/devicefarm/test_grid_project_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdevicefarm "github.com/hashicorp/terraform-provider-aws/internal/service/devicefarm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -192,90 +186,6 @@ func TestAccDeviceFarmTestGridProject_disappears(t *testing.T) { }) } -func TestAccDeviceFarmTestGridProject_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var proj awstypes.TestGridProject - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_devicefarm_test_grid_project.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DeviceFarmEndpointID) - // Currently, DeviceFarm is only supported in us-west-2 - // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), - CheckDestroy: testAccCheckTestGridProjectDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccTestGridProjectConfig_project(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTestGridProjectExists(ctx, resourceName, &proj), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccTestGridProjectConfig_project(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTestGridProjectExists(ctx, resourceName, &proj), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccTestGridProjectConfig_project(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTestGridProjectExists(ctx, resourceName, &proj), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("devicefarm", regexache.MustCompile(`testgrid-project:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckTestGridProjectExists(ctx context.Context, n string, v *awstypes.TestGridProject) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/devicefarm/testdata/DevicePool/basic_v5.100.0/main_gen.tf b/internal/service/devicefarm/testdata/DevicePool/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..8e4876c44acd --- /dev/null +++ b/internal/service/devicefarm/testdata/DevicePool/basic_v5.100.0/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_device_pool" "test" { + name = var.rName + project_arn = aws_devicefarm_project.test.arn + rule { + attribute = "OS_VERSION" + operator = "EQUALS" + value = "\"AVAILABLE\"" + } +} + +# testAccProjectConfig_basic + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/DevicePool/basic_v6.0.0/main_gen.tf b/internal/service/devicefarm/testdata/DevicePool/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..f5ec12c10db4 --- /dev/null +++ b/internal/service/devicefarm/testdata/DevicePool/basic_v6.0.0/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_device_pool" "test" { + name = var.rName + project_arn = aws_devicefarm_project.test.arn + rule { + attribute = "OS_VERSION" + operator = "EQUALS" + value = "\"AVAILABLE\"" + } +} + +# testAccProjectConfig_basic + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/InstanceProfile/basic_v5.100.0/main_gen.tf b/internal/service/devicefarm/testdata/InstanceProfile/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..89dc661876ef --- /dev/null +++ b/internal/service/devicefarm/testdata/InstanceProfile/basic_v5.100.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_instance_profile" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/InstanceProfile/basic_v6.0.0/main_gen.tf b/internal/service/devicefarm/testdata/InstanceProfile/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..bf80856ea47a --- /dev/null +++ b/internal/service/devicefarm/testdata/InstanceProfile/basic_v6.0.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_instance_profile" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/NetworkProfile/basic_v5.100.0/main_gen.tf b/internal/service/devicefarm/testdata/NetworkProfile/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..793e5637ef64 --- /dev/null +++ b/internal/service/devicefarm/testdata/NetworkProfile/basic_v5.100.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_network_profile" "test" { + name = var.rName + project_arn = aws_devicefarm_project.test.arn +} + +# testAccProjectConfig_basic + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/NetworkProfile/basic_v6.0.0/main_gen.tf b/internal/service/devicefarm/testdata/NetworkProfile/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..1bb5d7478bfc --- /dev/null +++ b/internal/service/devicefarm/testdata/NetworkProfile/basic_v6.0.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_network_profile" "test" { + name = var.rName + project_arn = aws_devicefarm_project.test.arn +} + +# testAccProjectConfig_basic + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/Project/basic_v5.100.0/main_gen.tf b/internal/service/devicefarm/testdata/Project/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6988018a54a4 --- /dev/null +++ b/internal/service/devicefarm/testdata/Project/basic_v5.100.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/Project/basic_v6.0.0/main_gen.tf b/internal/service/devicefarm/testdata/Project/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..008f91531378 --- /dev/null +++ b/internal/service/devicefarm/testdata/Project/basic_v6.0.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/TestGridProject/basic_v5.100.0/main_gen.tf b/internal/service/devicefarm/testdata/TestGridProject/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..ba9c4e31cfd4 --- /dev/null +++ b/internal/service/devicefarm/testdata/TestGridProject/basic_v5.100.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_test_grid_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/TestGridProject/basic_v6.0.0/main_gen.tf b/internal/service/devicefarm/testdata/TestGridProject/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..45bdb7bbeab6 --- /dev/null +++ b/internal/service/devicefarm/testdata/TestGridProject/basic_v6.0.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_test_grid_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/Upload/basic_v5.100.0/main_gen.tf b/internal/service/devicefarm/testdata/Upload/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..23b431372427 --- /dev/null +++ b/internal/service/devicefarm/testdata/Upload/basic_v5.100.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_upload" "test" { + name = var.rName + project_arn = aws_devicefarm_project.test.arn + type = "APPIUM_JAVA_TESTNG_TEST_SPEC" +} + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/testdata/Upload/basic_v6.0.0/main_gen.tf b/internal/service/devicefarm/testdata/Upload/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..51109291bb64 --- /dev/null +++ b/internal/service/devicefarm/testdata/Upload/basic_v6.0.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devicefarm_upload" "test" { + name = var.rName + project_arn = aws_devicefarm_project.test.arn + type = "APPIUM_JAVA_TESTNG_TEST_SPEC" +} + +resource "aws_devicefarm_project" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devicefarm/upload_identity_gen_test.go b/internal/service/devicefarm/upload_identity_gen_test.go index bc3df144a43c..7537fef38c2f 100644 --- a/internal/service/devicefarm/upload_identity_gen_test.go +++ b/internal/service/devicefarm/upload_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccDeviceFarmUpload_Identity_Basic(t *testing.T) { resourceName := "aws_devicefarm_upload.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -51,6 +52,9 @@ func TestAccDeviceFarmUpload_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,3 +112,137 @@ func TestAccDeviceFarmUpload_Identity_Basic(t *testing.T) { }, }) } + +func TestAccDeviceFarmUpload_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Upload + resourceName := "aws_devicefarm_upload.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckUploadDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Upload/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUploadExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Upload/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUploadExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Upload/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDeviceFarmUpload_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Upload + resourceName := "aws_devicefarm_upload.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), + CheckDestroy: testAccCheckUploadDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Upload/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUploadExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Upload/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUploadExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/devicefarm/upload_test.go b/internal/service/devicefarm/upload_test.go index cf9fe0f3dbb6..796866cc7ed8 100644 --- a/internal/service/devicefarm/upload_test.go +++ b/internal/service/devicefarm/upload_test.go @@ -13,14 +13,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdevicefarm "github.com/hashicorp/terraform-provider-aws/internal/service/devicefarm" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -140,90 +134,6 @@ func TestAccDeviceFarmUpload_disappears_project(t *testing.T) { }) } -func TestAccDeviceFarmUpload_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var proj awstypes.Upload - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_devicefarm_upload.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DeviceFarmEndpointID) - // Currently, DeviceFarm is only supported in us-west-2 - // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html - acctest.PreCheckRegion(t, endpoints.UsWest2RegionID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DeviceFarmServiceID), - CheckDestroy: testAccCheckUploadDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccUploadConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckUploadExists(ctx, resourceName, &proj), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccUploadConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckUploadExists(ctx, resourceName, &proj), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccUploadConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckUploadExists(ctx, resourceName, &proj), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("devicefarm", regexache.MustCompile(`upload:.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckUploadExists(ctx context.Context, n string, v *awstypes.Upload) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/devopsguru/event_sources_config.go b/internal/service/devopsguru/event_sources_config.go index 50dcaae4c17a..cc0d7ab19044 100644 --- a/internal/service/devopsguru/event_sources_config.go +++ b/internal/service/devopsguru/event_sources_config.go @@ -29,6 +29,7 @@ import ( // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/devopsguru;devopsguru.DescribeEventSourcesConfigOutput") // @Testing(preCheck="testAccPreCheck") // @Testing(generator=false) +// @Testing(preIdentityVersion="v5.100.0") func newEventSourcesConfigResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &eventSourcesConfigResource{}, nil } diff --git a/internal/service/devopsguru/event_sources_config_identity_gen_test.go b/internal/service/devopsguru/event_sources_config_identity_gen_test.go index 9c4866e3a1ee..7893c315d315 100644 --- a/internal/service/devopsguru/event_sources_config_identity_gen_test.go +++ b/internal/service/devopsguru/event_sources_config_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccDevOpsGuruEventSourcesConfig_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccDevOpsGuruEventSourcesConfig_Identity_Basic, - "ExistingResource": testAccDevOpsGuruEventSourcesConfig_Identity_ExistingResource, - "RegionOverride": testAccDevOpsGuruEventSourcesConfig_Identity_RegionOverride, + acctest.CtBasic: testAccDevOpsGuruEventSourcesConfig_Identity_Basic, + "ExistingResource": testAccDevOpsGuruEventSourcesConfig_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccDevOpsGuruEventSourcesConfig_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccDevOpsGuruEventSourcesConfig_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -37,7 +39,7 @@ func testAccDevOpsGuruEventSourcesConfig_Identity_Basic(t *testing.T) { var v devopsguru.DescribeEventSourcesConfigOutput resourceName := "aws_devopsguru_event_sources_config.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -114,7 +116,7 @@ func testAccDevOpsGuruEventSourcesConfig_Identity_RegionOverride(t *testing.T) { resourceName := "aws_devopsguru_event_sources_config.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -221,3 +223,123 @@ func testAccDevOpsGuruEventSourcesConfig_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccDevOpsGuruEventSourcesConfig_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v devopsguru.DescribeEventSourcesConfigOutput + resourceName := "aws_devopsguru_event_sources_config.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DevOpsGuruServiceID), + CheckDestroy: testAccCheckEventSourcesConfigDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EventSourcesConfig/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEventSourcesConfigExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/EventSourcesConfig/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEventSourcesConfigExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EventSourcesConfig/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccDevOpsGuruEventSourcesConfig_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v devopsguru.DescribeEventSourcesConfigOutput + resourceName := "aws_devopsguru_event_sources_config.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DevOpsGuruServiceID), + CheckDestroy: testAccCheckEventSourcesConfigDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EventSourcesConfig/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEventSourcesConfigExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EventSourcesConfig/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/devopsguru/event_sources_config_test.go b/internal/service/devopsguru/event_sources_config_test.go index 7b0cefd08dab..f471f7e58782 100644 --- a/internal/service/devopsguru/event_sources_config_test.go +++ b/internal/service/devopsguru/event_sources_config_test.go @@ -11,14 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/devopsguru" awstypes "github.com/aws/aws-sdk-go-v2/service/devopsguru/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tfdevopsguru "github.com/hashicorp/terraform-provider-aws/internal/service/devopsguru" @@ -136,89 +130,6 @@ func testAccCheckEventSourcesConfigExists(ctx context.Context, name string, cfg } } -func testAccDevOpsGuruEventSourcesConfig_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var cfg devopsguru.DescribeEventSourcesConfigOutput - resourceName := "aws_devopsguru_event_sources_config.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DevOpsGuruEndpointID) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DevOpsGuruServiceID), - CheckDestroy: testAccCheckEventSourcesConfigDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccEventSourcesConfigConfig_basic(), - Check: resource.ComposeTestCheckFunc( - testAccCheckEventSourcesConfigExists(ctx, resourceName, &cfg), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccEventSourcesConfigConfig_basic(), - Check: resource.ComposeTestCheckFunc( - testAccCheckEventSourcesConfigExists(ctx, resourceName, &cfg), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccEventSourcesConfigConfig_basic(), - Check: resource.ComposeTestCheckFunc( - testAccCheckEventSourcesConfigExists(ctx, resourceName, &cfg), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccEventSourcesConfigConfig_basic() string { return ` resource "aws_devopsguru_event_sources_config" "test" { diff --git a/internal/service/devopsguru/service_endpoint_resolver_gen.go b/internal/service/devopsguru/service_endpoint_resolver_gen.go index 2e3c105cb463..e99c3a70e8d3 100644 --- a/internal/service/devopsguru/service_endpoint_resolver_gen.go +++ b/internal/service/devopsguru/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params devopsguru.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up devopsguru endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up devopsguru endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/devopsguru/service_endpoints_gen_test.go b/internal/service/devopsguru/service_endpoints_gen_test.go index c6b3fc724e48..f22dfa54bced 100644 --- a/internal/service/devopsguru/service_endpoints_gen_test.go +++ b/internal/service/devopsguru/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/devopsguru/service_integration.go b/internal/service/devopsguru/service_integration.go index d41eb9395fca..2c16c77e059e 100644 --- a/internal/service/devopsguru/service_integration.go +++ b/internal/service/devopsguru/service_integration.go @@ -29,6 +29,7 @@ import ( // @SingletonIdentity(identityDuplicateAttributes="id") // @Testing(preCheck="testAccPreCheck") // @Testing(generator=false) +// @Testing(preIdentityVersion="v5.100.0") func newServiceIntegrationResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &serviceIntegrationResource{}, nil } diff --git a/internal/service/devopsguru/service_integration_identity_gen_test.go b/internal/service/devopsguru/service_integration_identity_gen_test.go index e97cf8a65015..ee5b9ca3f803 100644 --- a/internal/service/devopsguru/service_integration_identity_gen_test.go +++ b/internal/service/devopsguru/service_integration_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccDevOpsGuruServiceIntegration_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccDevOpsGuruServiceIntegration_Identity_Basic, - "ExistingResource": testAccDevOpsGuruServiceIntegration_Identity_ExistingResource, - "RegionOverride": testAccDevOpsGuruServiceIntegration_Identity_RegionOverride, + acctest.CtBasic: testAccDevOpsGuruServiceIntegration_Identity_Basic, + "ExistingResource": testAccDevOpsGuruServiceIntegration_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccDevOpsGuruServiceIntegration_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccDevOpsGuruServiceIntegration_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,9 +34,10 @@ func testAccDevOpsGuruServiceIntegration_IdentitySerial(t *testing.T) { func testAccDevOpsGuruServiceIntegration_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_devopsguru_service_integration.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -111,7 +114,7 @@ func testAccDevOpsGuruServiceIntegration_Identity_RegionOverride(t *testing.T) { resourceName := "aws_devopsguru_service_integration.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -218,3 +221,121 @@ func testAccDevOpsGuruServiceIntegration_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccDevOpsGuruServiceIntegration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_devopsguru_service_integration.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DevOpsGuruServiceID), + CheckDestroy: testAccCheckServiceIntegrationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceIntegration/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceIntegrationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceIntegration/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceIntegrationExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ServiceIntegration/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccDevOpsGuruServiceIntegration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_devopsguru_service_integration.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DevOpsGuruServiceID), + CheckDestroy: testAccCheckServiceIntegrationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceIntegration/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceIntegrationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ServiceIntegration/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/devopsguru/service_integration_test.go b/internal/service/devopsguru/service_integration_test.go index d9cfd23882fa..893e908d96b7 100644 --- a/internal/service/devopsguru/service_integration_test.go +++ b/internal/service/devopsguru/service_integration_test.go @@ -11,14 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/devopsguru/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -113,83 +107,6 @@ func testAccServiceIntegration_kms(t *testing.T) { }) } -func testAccDevOpsGuruServiceIntegration_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_devopsguru_service_integration.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.DevOpsGuruEndpointID) - testAccPreCheck(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.DevOpsGuruServiceID), - CheckDestroy: testAccCheckServiceIntegrationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccServiceIntegrationConfig_basic(string(types.OptInStatusEnabled)), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccServiceIntegrationConfig_basic(string(types.OptInStatusEnabled)), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccServiceIntegrationConfig_basic(string(types.OptInStatusEnabled)), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccServiceIntegrationConfig_basic(string(types.OptInStatusDisabled)), - }, - }, - }) -} - func testAccCheckServiceIntegrationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DevOpsGuruClient(ctx) diff --git a/internal/service/devopsguru/service_package_gen.go b/internal/service/devopsguru/service_package_gen.go index ebf78063807f..d4deb0a82ad4 100644 --- a/internal/service/devopsguru/service_package_gen.go +++ b/internal/service/devopsguru/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/devopsguru" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -103,7 +102,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *devopsguru.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/devopsguru/testdata/EventSourcesConfig/basic_v5.100.0/main_gen.tf b/internal/service/devopsguru/testdata/EventSourcesConfig/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..e07ed8bf98ec --- /dev/null +++ b/internal/service/devopsguru/testdata/EventSourcesConfig/basic_v5.100.0/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devopsguru_event_sources_config" "test" { + event_sources { + amazon_code_guru_profiler { + status = "ENABLED" + } + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devopsguru/testdata/EventSourcesConfig/basic_v6.0.0/main_gen.tf b/internal/service/devopsguru/testdata/EventSourcesConfig/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..00233762488c --- /dev/null +++ b/internal/service/devopsguru/testdata/EventSourcesConfig/basic_v6.0.0/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devopsguru_event_sources_config" "test" { + event_sources { + amazon_code_guru_profiler { + status = "ENABLED" + } + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devopsguru/testdata/ServiceIntegration/basic_v5.100.0/main_gen.tf b/internal/service/devopsguru/testdata/ServiceIntegration/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..5148f5728544 --- /dev/null +++ b/internal/service/devopsguru/testdata/ServiceIntegration/basic_v5.100.0/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devopsguru_service_integration" "test" { + # Default to existing configured settings + kms_server_side_encryption {} + + logs_anomaly_detection { + opt_in_status = "DISABLED" + } + ops_center { + opt_in_status = "DISABLED" + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/devopsguru/testdata/ServiceIntegration/basic_v6.0.0/main_gen.tf b/internal/service/devopsguru/testdata/ServiceIntegration/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..80190af92292 --- /dev/null +++ b/internal/service/devopsguru/testdata/ServiceIntegration/basic_v6.0.0/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_devopsguru_service_integration" "test" { + # Default to existing configured settings + kms_server_side_encryption {} + + logs_anomaly_detection { + opt_in_status = "DISABLED" + } + ops_center { + opt_in_status = "DISABLED" + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/directconnect/connection_association.go b/internal/service/directconnect/connection_association.go index 61e7ec97c5be..f8325a62a68d 100644 --- a/internal/service/directconnect/connection_association.go +++ b/internal/service/directconnect/connection_association.go @@ -106,8 +106,8 @@ func deleteConnectionLAGAssociation(ctx context.Context, conn *directconnect.Cli const ( timeout = 1 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.DirectConnectClientException](ctx, timeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.DirectConnectClientException](ctx, timeout, + func(ctx context.Context) (any, error) { return conn.DisassociateConnectionFromLag(ctx, input) }, "is in a transitioning state") diff --git a/internal/service/directconnect/exports_test.go b/internal/service/directconnect/exports_test.go index 51b9e0438af7..e4df9e525d0c 100644 --- a/internal/service/directconnect/exports_test.go +++ b/internal/service/directconnect/exports_test.go @@ -36,4 +36,5 @@ var ( FindMacSecKeyByTwoPartKey = findMacSecKeyByTwoPartKey FindVirtualInterfaceByID = findVirtualInterfaceByID GatewayAssociationStateUpgradeV0 = gatewayAssociationStateUpgradeV0 + GatewayAssociationStateUpgradeV1 = gatewayAssociationStateUpgradeV1 ) diff --git a/internal/service/directconnect/gateway_association.go b/internal/service/directconnect/gateway_association.go index b046fa7bdad4..81b9944a260f 100644 --- a/internal/service/directconnect/gateway_association.go +++ b/internal/service/directconnect/gateway_association.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/directconnect" awstypes "github.com/aws/aws-sdk-go-v2/service/directconnect/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -21,9 +22,11 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_dx_gateway_association", name="Gateway Association") @@ -38,13 +41,18 @@ func resourceGatewayAssociation() *schema.Resource { StateContext: resourceGatewayAssociationImport, }, - SchemaVersion: 1, + SchemaVersion: 2, StateUpgraders: []schema.StateUpgrader{ { Type: resourceGatewayAssociationResourceV0().CoreConfigSchema().ImpliedType(), Upgrade: gatewayAssociationStateUpgradeV0, Version: 0, }, + { + Type: resourceGatewayAssociationResourceV1().CoreConfigSchema().ImpliedType(), + Upgrade: gatewayAssociationStateUpgradeV1, + Version: 1, + }, }, Schema: map[string]*schema.Schema{ @@ -95,6 +103,10 @@ func resourceGatewayAssociation() *schema.Resource { ConflictsWith: []string{"associated_gateway_id"}, AtLeastOneOf: []string{"associated_gateway_id", "associated_gateway_owner_account_id", "proposal_id"}, }, + names.AttrTransitGatewayAttachmentID: { + Type: schema.TypeString, + Computed: true, + }, }, Timeouts: &schema.ResourceTimeout{ @@ -166,10 +178,10 @@ func resourceGatewayAssociationCreate(ctx context.Context, d *schema.ResourceDat func resourceGatewayAssociationRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DirectConnectClient(ctx) + c := meta.(*conns.AWSClient) + conn := c.DirectConnectClient(ctx) associationID := d.Get("dx_gateway_association_id").(string) - output, err := findGatewayAssociationByID(ctx, conn, associationID) if !d.IsNewResource() && tfresource.NotFound(err) { @@ -182,15 +194,30 @@ func resourceGatewayAssociationRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading Direct Connect Gateway Association (%s): %s", d.Id(), err) } + associatedGatewayID, dxGatewayID := aws.ToString(output.AssociatedGateway.Id), aws.ToString(output.DirectConnectGatewayId) if err := d.Set("allowed_prefixes", flattenRouteFilterPrefixes(output.AllowedPrefixesToDirectConnectGateway)); err != nil { return sdkdiag.AppendErrorf(diags, "setting allowed_prefixes: %s", err) } - d.Set("associated_gateway_id", output.AssociatedGateway.Id) + d.Set("associated_gateway_id", associatedGatewayID) d.Set("associated_gateway_owner_account_id", output.AssociatedGateway.OwnerAccount) d.Set("associated_gateway_type", output.AssociatedGateway.Type) d.Set("dx_gateway_association_id", output.AssociationId) - d.Set("dx_gateway_id", output.DirectConnectGatewayId) + d.Set("dx_gateway_id", dxGatewayID) d.Set("dx_gateway_owner_account_id", output.DirectConnectGatewayOwnerAccount) + if output.AssociatedGateway.Type == awstypes.GatewayTypeTransitGateway { + transitGatewayAttachment, err := tfec2.FindTransitGatewayAttachmentByTransitGatewayIDAndDirectConnectGatewayID(ctx, c.EC2Client(ctx), associatedGatewayID, dxGatewayID) + + switch { + case tfawserr.ErrCodeEquals(err, "UnauthorizedOperation"): + d.Set(names.AttrTransitGatewayAttachmentID, nil) + case err != nil: + return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway (%s) Attachment (%s): %s", associatedGatewayID, dxGatewayID, err) + default: + d.Set(names.AttrTransitGatewayAttachmentID, transitGatewayAttachment.TransitGatewayAttachmentId) + } + } else { + d.Set(names.AttrTransitGatewayAttachmentID, nil) + } return diags } diff --git a/internal/service/directconnect/gateway_association_migrate.go b/internal/service/directconnect/gateway_association_migrate.go index e9b0cfd50b4d..ac2af7bdae75 100644 --- a/internal/service/directconnect/gateway_association_migrate.go +++ b/internal/service/directconnect/gateway_association_migrate.go @@ -8,9 +8,12 @@ import ( "log" "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/directconnect/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/verify" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/names" ) func resourceGatewayAssociationResourceV0() *schema.Resource { @@ -19,60 +22,79 @@ func resourceGatewayAssociationResourceV0() *schema.Resource { "allowed_prefixes": { Type: schema.TypeSet, Optional: true, - Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "associated_gateway_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"associated_gateway_owner_account_id", "proposal_id", "vpn_gateway_id"}, + Type: schema.TypeString, + Optional: true, }, - "associated_gateway_owner_account_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + Type: schema.TypeString, + Optional: true, }, - "associated_gateway_type": { Type: schema.TypeString, - Computed: true, + Optional: true, }, - "dx_gateway_association_id": { Type: schema.TypeString, - Computed: true, + Optional: true, }, - "dx_gateway_id": { Type: schema.TypeString, - Required: true, - ForceNew: true, + Optional: true, }, - "dx_gateway_owner_account_id": { Type: schema.TypeString, - Computed: true, + Optional: true, }, - "proposal_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + Type: schema.TypeString, + Optional: true, }, - "vpn_gateway_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"associated_gateway_id", "associated_gateway_owner_account_id", "proposal_id"}, + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceGatewayAssociationResourceV1() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_prefixes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "associated_gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + "associated_gateway_owner_account_id": { + Type: schema.TypeString, + Optional: true, + }, + "associated_gateway_type": { + Type: schema.TypeString, + Optional: true, + }, + "dx_gateway_association_id": { + Type: schema.TypeString, + Optional: true, + }, + "dx_gateway_id": { + Type: schema.TypeString, + Optional: true, + }, + "dx_gateway_owner_account_id": { + Type: schema.TypeString, + Optional: true, + }, + "proposal_id": { + Type: schema.TypeString, + Optional: true, }, }, } @@ -96,3 +118,27 @@ func gatewayAssociationStateUpgradeV0(ctx context.Context, rawState map[string]a return rawState, nil } + +func gatewayAssociationStateUpgradeV1(ctx context.Context, rawState map[string]any, meta any) (map[string]any, error) { + conn := meta.(*conns.AWSClient).EC2Client(ctx) + + log.Println("[INFO] Found Direct Connect Gateway Association state v1; migrating to v2") + + // transit_gateway_attachment_id was introduced in v6.5.0, handle the case where it's not yet present. + if rawState["associated_gateway_type"].(string) == string(awstypes.GatewayTypeTransitGateway) { + if v, ok := rawState[names.AttrTransitGatewayAttachmentID]; !ok || v == nil { + output, err := tfec2.FindTransitGatewayAttachmentByTransitGatewayIDAndDirectConnectGatewayID(ctx, conn, rawState["associated_gateway_id"].(string), rawState["dx_gateway_id"].(string)) + + switch { + case tfawserr.ErrCodeEquals(err, "UnauthorizedOperation"): + rawState[names.AttrTransitGatewayAttachmentID] = nil + case err != nil: + return nil, err + default: + rawState[names.AttrTransitGatewayAttachmentID] = aws.ToString(output.TransitGatewayAttachmentId) + } + } + } + + return rawState, nil +} diff --git a/internal/service/directconnect/gateway_association_test.go b/internal/service/directconnect/gateway_association_test.go index a4355044f5d7..3abcf11dc7a4 100644 --- a/internal/service/directconnect/gateway_association_test.go +++ b/internal/service/directconnect/gateway_association_test.go @@ -12,8 +12,13 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/directconnect/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdirectconnect "github.com/hashicorp/terraform-provider-aws/internal/service/directconnect" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -46,6 +51,61 @@ func TestAccDirectConnectGatewayAssociation_v0StateUpgrade(t *testing.T) { }) } +func TestAccDirectConnectGatewayAssociation_upgradeFromV6_4_0(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dx_gateway_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + var ga awstypes.DirectConnectGatewayAssociation + var gap awstypes.DirectConnectGatewayAssociationProposal + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DirectConnectServiceID), + CheckDestroy: testAccCheckGatewayAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.4.0", + }, + }, + Config: testAccGatewayAssociationConfig_basicTransitSingleAccount(rName, rBgpAsn), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayAssociationExists(ctx, resourceName, &ga, &gap), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrTransitGatewayAttachmentID)), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccGatewayAssociationConfig_basicTransitSingleAccount(rName, rBgpAsn), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGatewayAssociationExists(ctx, resourceName, &ga, &gap), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTransitGatewayAttachmentID), knownvalue.NotNull()), + }, + }, + }, + }) +} + func TestAccDirectConnectGatewayAssociation_basicVPNGatewaySingleAccount(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dx_gateway_association.test" @@ -150,6 +210,7 @@ func TestAccDirectConnectGatewayAssociation_basicTransitGatewaySingleAccount(t * resource.TestCheckResourceAttrSet(resourceName, "dx_gateway_association_id"), resource.TestCheckResourceAttrPair(resourceName, "dx_gateway_id", resourceNameDxGw, names.AttrID), acctest.CheckResourceAttrAccountID(ctx, resourceName, "dx_gateway_owner_account_id"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrTransitGatewayAttachmentID), ), }, { @@ -192,6 +253,7 @@ func TestAccDirectConnectGatewayAssociation_basicTransitGatewayCrossAccount(t *t resource.TestCheckResourceAttrPair(resourceName, "dx_gateway_id", resourceNameDxGw, names.AttrID), // dx_gateway_owner_account_id is the "awsalternate" provider's account ID. // acctest.CheckResourceAttrAccountID(ctx, resourceName, "dx_gateway_owner_account_id"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrTransitGatewayAttachmentID), ), }, }, diff --git a/internal/service/directconnect/hosted_connection.go b/internal/service/directconnect/hosted_connection.go index c7874c24b1b0..20a4721414dc 100644 --- a/internal/service/directconnect/hosted_connection.go +++ b/internal/service/directconnect/hosted_connection.go @@ -139,21 +139,24 @@ func resourceHostedConnectionRead(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DirectConnectClient(ctx) - connection, err := findHostedConnectionByID(ctx, conn, d.Id()) + // Get both the hosted connection ID and the parent connection ID + hostedConnectionID := d.Id() + parentConnectionID := d.Get(names.AttrConnectionID).(string) + + connection, err := findHostedConnectionByID(ctx, conn, hostedConnectionID, parentConnectionID) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] Direct Connect Hosted Connection (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Direct Connect Hosted Connection (%s) not found, removing from state", hostedConnectionID) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Direct Connect Hosted Connection (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Direct Connect Hosted Connection (%s): %s", hostedConnectionID, err) } - // Cannot set the following attributes from the response: - // - connection_id: conn.ConnectionId is this resource's ID, not the ID of the interconnect or LAG - // - tags: conn.Tags seems to always come back empty and DescribeTags needs to be called from the owner account + // Set the connection_id (parent/interconnect ID) and other attributes + d.Set(names.AttrConnectionID, parentConnectionID) d.Set("aws_device", connection.AwsDeviceV2) d.Set("connection_region", connection.Region) d.Set("has_logical_redundancy", connection.HasLogicalRedundancy) @@ -176,18 +179,33 @@ func resourceHostedConnectionDelete(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DirectConnectClient(ctx) - if err := deleteConnection(ctx, conn, d.Id(), waitHostedConnectionDeleted); err != nil { + // Get the parent connection ID + parentConnectionID := d.Get(names.AttrConnectionID).(string) + + // Create a wrapper function that adapts waitHostedConnectionDeleted to match the expected signature + waiterWrapper := func(ctx context.Context, conn *directconnect.Client, connectionID string) (*awstypes.Connection, error) { + return waitHostedConnectionDeleted(ctx, conn, connectionID, parentConnectionID) + } + + if err := deleteConnection(ctx, conn, d.Id(), waiterWrapper); err != nil { return sdkdiag.AppendFromErr(diags, err) } return diags } -func findHostedConnectionByID(ctx context.Context, conn *directconnect.Client, id string) (*awstypes.Connection, error) { +func findHostedConnectionByID(ctx context.Context, conn *directconnect.Client, hostedConnectionID, parentConnectionID string) (*awstypes.Connection, error) { + // Use the parent connection ID for the API call input := &directconnect.DescribeHostedConnectionsInput{ - ConnectionId: aws.String(id), + ConnectionId: aws.String(parentConnectionID), } - output, err := findHostedConnection(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Connection]()) + + // Create a predicate to filter by the hosted connection ID + predicate := func(c *awstypes.Connection) bool { + return aws.ToString(c.ConnectionId) == hostedConnectionID + } + + output, err := findHostedConnection(ctx, conn, input, predicate) if err != nil { return nil, err @@ -234,9 +252,9 @@ func findHostedConnections(ctx context.Context, conn *directconnect.Client, inpu return tfslices.Filter(output.Connections, tfslices.PredicateValue(filter)), nil } -func statusHostedConnection(ctx context.Context, conn *directconnect.Client, id string) retry.StateRefreshFunc { +func statusHostedConnection(ctx context.Context, conn *directconnect.Client, hostedConnectionID string, parentConnectionID string) retry.StateRefreshFunc { return func() (any, string, error) { - output, err := findHostedConnectionByID(ctx, conn, id) + output, err := findHostedConnectionByID(ctx, conn, hostedConnectionID, parentConnectionID) if tfresource.NotFound(err) { return nil, "", nil @@ -250,14 +268,14 @@ func statusHostedConnection(ctx context.Context, conn *directconnect.Client, id } } -func waitHostedConnectionDeleted(ctx context.Context, conn *directconnect.Client, id string) (*awstypes.Connection, error) { +func waitHostedConnectionDeleted(ctx context.Context, conn *directconnect.Client, hostedConnectionID string, parentConnectionID string) (*awstypes.Connection, error) { const ( timeout = 10 * time.Minute ) stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.ConnectionStatePending, awstypes.ConnectionStateOrdering, awstypes.ConnectionStateAvailable, awstypes.ConnectionStateRequested, awstypes.ConnectionStateDeleting), Target: []string{}, - Refresh: statusHostedConnection(ctx, conn, id), + Refresh: statusHostedConnection(ctx, conn, hostedConnectionID, parentConnectionID), Timeout: timeout, } diff --git a/internal/service/directconnect/hosted_connection_test.go b/internal/service/directconnect/hosted_connection_test.go index eb10bc459cb2..7650e9c46195 100644 --- a/internal/service/directconnect/hosted_connection_test.go +++ b/internal/service/directconnect/hosted_connection_test.go @@ -60,7 +60,9 @@ func testAccCheckHostedConnectionDestroy(ctx context.Context, providerFunc func( continue } - _, err := tfdirectconnect.FindHostedConnectionByID(ctx, conn, rs.Primary.ID) + // Get the parent connection ID from the resource attributes + parentConnectionID := rs.Primary.Attributes[names.AttrConnectionID] + _, err := tfdirectconnect.FindHostedConnectionByID(ctx, conn, rs.Primary.ID, parentConnectionID) if tfresource.NotFound(err) { continue @@ -86,7 +88,10 @@ func testAccCheckHostedConnectionExists(ctx context.Context, n string) resource. return fmt.Errorf("Not found: %s", n) } - _, err := tfdirectconnect.FindHostedConnectionByID(ctx, conn, rs.Primary.ID) + // Get the parent connection ID from the resource state + parentConnectionID := rs.Primary.Attributes[names.AttrConnectionID] + + _, err := tfdirectconnect.FindHostedConnectionByID(ctx, conn, rs.Primary.ID, parentConnectionID) return err } diff --git a/internal/service/directconnect/service_endpoint_resolver_gen.go b/internal/service/directconnect/service_endpoint_resolver_gen.go index 01356443ed8e..354800f12385 100644 --- a/internal/service/directconnect/service_endpoint_resolver_gen.go +++ b/internal/service/directconnect/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params directconnect.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up directconnect endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up directconnect endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/directconnect/service_endpoints_gen_test.go b/internal/service/directconnect/service_endpoints_gen_test.go index d6f8c9a1ae4c..1fabf4e792e3 100644 --- a/internal/service/directconnect/service_endpoints_gen_test.go +++ b/internal/service/directconnect/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/directconnect/service_package_gen.go b/internal/service/directconnect/service_package_gen.go index de9b0705bb61..b27ba0cbb56e 100644 --- a/internal/service/directconnect/service_package_gen.go +++ b/internal/service/directconnect/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/directconnect" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -233,7 +232,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *directconnect.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/directconnect/sweep.go b/internal/service/directconnect/sweep.go index da3dc1472a3a..f4e6f9350609 100644 --- a/internal/service/directconnect/sweep.go +++ b/internal/service/directconnect/sweep.go @@ -63,7 +63,7 @@ func sweepConnections(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &directconnect.DescribeConnectionsInput{} conn := client.DirectConnectClient(ctx) @@ -101,7 +101,7 @@ func sweepGatewayAssociationProposals(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &directconnect.DescribeDirectConnectGatewayAssociationProposalsInput{} conn := client.DirectConnectClient(ctx) @@ -157,7 +157,7 @@ func sweepGatewayAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &directconnect.DescribeDirectConnectGatewaysInput{} conn := client.DirectConnectClient(ctx) @@ -294,7 +294,7 @@ func sweepGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &directconnect.DescribeDirectConnectGatewaysInput{} conn := client.DirectConnectClient(ctx) @@ -378,7 +378,7 @@ func sweepLags(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &directconnect.DescribeLagsInput{} conn := client.DirectConnectClient(ctx) @@ -416,7 +416,7 @@ func sweepMacSecKeys(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &directconnect.DescribeConnectionsInput{} dxConn := client.DirectConnectClient(ctx) diff --git a/internal/service/directconnect/tags_gen.go b/internal/service/directconnect/tags_gen.go index e5ab96a34697..3c38bae10936 100644 --- a/internal/service/directconnect/tags_gen.go +++ b/internal/service/directconnect/tags_gen.go @@ -3,8 +3,8 @@ package directconnect import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/directconnect" awstypes "github.com/aws/aws-sdk-go-v2/service/directconnect/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *directconnect.Client, identifier string output, err := conn.DescribeTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.ResourceTags[0].Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DirectConnectClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *directconnect.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *directconnect.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/dlm/lifecycle_policy.go b/internal/service/dlm/lifecycle_policy.go index 02f1a82aedbd..5e50db6a8711 100644 --- a/internal/service/dlm/lifecycle_policy.go +++ b/internal/service/dlm/lifecycle_policy.go @@ -52,6 +52,11 @@ func resourceLifecyclePolicy() *schema.Resource { validation.StringLenBetween(1, 500), ), }, + "default_policy": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DefaultPolicyTypeValues](), + }, names.AttrExecutionRoleARN: { Type: schema.TypeString, Required: true, @@ -132,6 +137,63 @@ func resourceLifecyclePolicy() *schema.Resource { }, }, }, + "copy_tags": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"policy_details.0.schedule"}, + RequiredWith: []string{"default_policy"}, + }, + "create_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: validation.IntBetween(1, 7), + ConflictsWith: []string{"policy_details.0.schedule"}, + RequiredWith: []string{"default_policy"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if d.Get("default_policy").(string) == "" { + if old == "0" && new == "1" { + return true + } + } + return false + }, + }, + "exclusions": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + RequiredWith: []string{"default_policy"}, + ConflictsWith: []string{"policy_details.0.resource_types", "policy_details.0.schedule"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exclude_boot_volumes": { + Type: schema.TypeBool, + Optional: true, + }, + "exclude_tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "exclude_volume_types": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 6, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "extend_deletion": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"policy_details.0.schedule"}, + RequiredWith: []string{"default_policy"}, + }, "event_source": { Type: schema.TypeList, Optional: true, @@ -174,25 +236,6 @@ func resourceLifecyclePolicy() *schema.Resource { }, }, }, - "resource_types": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateDiagFunc: enum.Validate[awstypes.ResourceTypeValues](), - }, - }, - "resource_locations": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateDiagFunc: enum.Validate[awstypes.ResourceLocationValues](), - }, - }, names.AttrParameters: { Type: schema.TypeList, Optional: true, @@ -210,12 +253,60 @@ func resourceLifecyclePolicy() *schema.Resource { }, }, }, + "policy_language": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.PolicyLanguageValues](), + }, "policy_type": { Type: schema.TypeString, Optional: true, Default: awstypes.PolicyTypeValuesEbsSnapshotManagement, ValidateDiagFunc: enum.Validate[awstypes.PolicyTypeValues](), }, + "resource_locations": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.ResourceLocationValues](), + }, + }, + names.AttrResourceType: { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ResourceTypeValues](), + ConflictsWith: []string{"policy_details.0.resource_types", "policy_details.0.schedule"}, + RequiredWith: []string{"default_policy"}, + }, + "resource_types": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.ResourceTypeValues](), + }, + ConflictsWith: []string{"policy_details.0.resource_type", "default_policy"}, + }, + "retain_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 7, + ValidateFunc: validation.IntBetween(2, 14), + ConflictsWith: []string{"policy_details.0.schedule"}, + RequiredWith: []string{"default_policy"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if d.Get("default_policy").(string) == "" { + if old == "0" && new == "7" { + return true + } + } + return false + }, + }, names.AttrSchedule: { Type: schema.TypeList, Optional: true, @@ -223,6 +314,48 @@ func resourceLifecyclePolicy() *schema.Resource { MaxItems: 4, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "archive_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_retain_rule": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_archive_tier": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 1000), + }, + names.AttrInterval: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "interval_unit": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.RetentionIntervalUnitValues](), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "copy_tags": { Type: schema.TypeBool, Optional: true, @@ -257,6 +390,56 @@ func resourceLifecyclePolicy() *schema.Resource { Computed: true, ValidateDiagFunc: enum.Validate[awstypes.LocationValues](), }, + "scripts": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execute_operation_on_script_failure": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "execution_handler": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(0, 200), + validation.StringMatch(regexache.MustCompile("^([a-zA-Z0-9_\\-.]{3,128}|[a-zA-Z0-9_\\-.:/]{3,200}|[A-Z0-9_]+)$"), "see https://docs.aws.amazon.com/dlm/latest/APIReference/API_Action.html"), + ), + }, + "execution_handler_service": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ExecutionHandlerServiceValues](), + }, + "execution_timeout": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntBetween(1, 120), + }, + "maximum_retry_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntBetween(1, 3), + }, + "stages": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.StageValues](), + }, + }, + }, + }, + }, "times": { Type: schema.TypeList, Optional: true, @@ -330,9 +513,14 @@ func resourceLifecyclePolicy() *schema.Resource { }, names.AttrTarget: { Type: schema.TypeString, - Required: true, + Optional: true, ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[\w:\-\/\*]+$`), ""), }, + "target_region": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidRegionName, + }, }, }, }, @@ -486,19 +674,25 @@ const ( ) func resourceLifecyclePolicyCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - const createRetryTimeout = 2 * time.Minute var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DLMClient(ctx) input := dlm.CreateLifecyclePolicyInput{ Description: aws.String(d.Get(names.AttrDescription).(string)), ExecutionRoleArn: aws.String(d.Get(names.AttrExecutionRoleARN).(string)), - PolicyDetails: expandPolicyDetails(d.Get("policy_details").([]any)), + PolicyDetails: expandPolicyDetails(d.Get("policy_details").([]any), d.Get("default_policy").(string)), State: awstypes.SettablePolicyStateValues(d.Get(names.AttrState).(string)), Tags: getTagsIn(ctx), } - out, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, createRetryTimeout, func() (any, error) { + if v, ok := d.GetOk("default_policy"); ok { + input.DefaultPolicy = awstypes.DefaultPolicyTypeValues(v.(string)) + } + + const ( + timeout = 2 * time.Minute + ) + output, err := tfresource.RetryWhenIsA[*dlm.CreateLifecyclePolicyOutput, *awstypes.InvalidRequestException](ctx, timeout, func(ctx context.Context) (*dlm.CreateLifecyclePolicyOutput, error) { return conn.CreateLifecyclePolicy(ctx, &input) }) @@ -506,7 +700,7 @@ func resourceLifecyclePolicyCreate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "creating DLM Lifecycle Policy: %s", err) } - d.SetId(aws.ToString(out.(*dlm.CreateLifecyclePolicyOutput).PolicyId)) + d.SetId(aws.ToString(output.PolicyId)) return append(diags, resourceLifecyclePolicyRead(ctx, d, meta)...) } @@ -515,8 +709,7 @@ func resourceLifecyclePolicyRead(ctx context.Context, d *schema.ResourceData, me var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DLMClient(ctx) - log.Printf("[INFO] Reading DLM lifecycle policy: %s", d.Id()) - out, err := findLifecyclePolicyByID(ctx, conn, d.Id()) + output, err := findLifecyclePolicyByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DLM Lifecycle Policy (%s) not found, removing from state", d.Id()) @@ -528,15 +721,18 @@ func resourceLifecyclePolicyRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading DLM Lifecycle Policy (%s): %s", d.Id(), err) } - d.Set(names.AttrARN, out.Policy.PolicyArn) - d.Set(names.AttrDescription, out.Policy.Description) - d.Set(names.AttrExecutionRoleARN, out.Policy.ExecutionRoleArn) - d.Set(names.AttrState, out.Policy.State) - if err := d.Set("policy_details", flattenPolicyDetails(out.Policy.PolicyDetails)); err != nil { + d.Set(names.AttrARN, output.Policy.PolicyArn) + if aws.ToBool(output.Policy.DefaultPolicy) { + d.Set("default_policy", d.Get("default_policy")) + } + d.Set(names.AttrDescription, output.Policy.Description) + d.Set(names.AttrExecutionRoleARN, output.Policy.ExecutionRoleArn) + if err := d.Set("policy_details", flattenPolicyDetails(output.Policy.PolicyDetails)); err != nil { return sdkdiag.AppendErrorf(diags, "setting policy details %s", err) } + d.Set(names.AttrState, output.Policy.State) - setTagsOut(ctx, out.Policy.Tags) + setTagsOut(ctx, output.Policy.Tags) return diags } @@ -556,15 +752,15 @@ func resourceLifecyclePolicyUpdate(ctx context.Context, d *schema.ResourceData, if d.HasChange(names.AttrExecutionRoleARN) { input.ExecutionRoleArn = aws.String(d.Get(names.AttrExecutionRoleARN).(string)) } + if d.HasChange("policy_details") { + input.PolicyDetails = expandPolicyDetails(d.Get("policy_details").([]any), d.Get("default_policy").(string)) + } if d.HasChange(names.AttrState) { input.State = awstypes.SettablePolicyStateValues(d.Get(names.AttrState).(string)) } - if d.HasChange("policy_details") { - input.PolicyDetails = expandPolicyDetails(d.Get("policy_details").([]any)) - } - log.Printf("[INFO] Updating lifecycle policy %s", d.Id()) _, err := conn.UpdateLifecyclePolicy(ctx, &input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating DLM Lifecycle Policy (%s): %s", d.Id(), err) } @@ -595,16 +791,19 @@ func resourceLifecyclePolicyDelete(ctx context.Context, d *schema.ResourceData, } func findLifecyclePolicyByID(ctx context.Context, conn *dlm.Client, id string) (*dlm.GetLifecyclePolicyOutput, error) { - input := &dlm.GetLifecyclePolicyInput{ + input := dlm.GetLifecyclePolicyInput{ PolicyId: aws.String(id), } + return findLifecyclePolicy(ctx, conn, &input) +} + +func findLifecyclePolicy(ctx context.Context, conn *dlm.Client, input *dlm.GetLifecyclePolicyInput) (*dlm.GetLifecyclePolicyOutput, error) { output, err := conn.GetLifecyclePolicy(ctx, input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastRequest: input, - LastError: err, + LastError: err, } } @@ -619,670 +818,875 @@ func findLifecyclePolicyByID(ctx context.Context, conn *dlm.Client, id string) ( return output, nil } -func expandPolicyDetails(cfg []any) *awstypes.PolicyDetails { - if len(cfg) == 0 || cfg[0] == nil { +func expandPolicyDetails(tfList []any, defaultPolicyValue string) *awstypes.PolicyDetails { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := cfg[0].(map[string]any) - policyType := m["policy_type"].(string) - policyDetails := &awstypes.PolicyDetails{ - PolicyType: awstypes.PolicyTypeValues(policyType), + tfMap := tfList[0].(map[string]any) + policyType := awstypes.PolicyTypeValues(tfMap["policy_type"].(string)) + apiObject := &awstypes.PolicyDetails{ + PolicyType: policyType, } - if v, ok := m["resource_types"].([]any); ok && len(v) > 0 { - policyDetails.ResourceTypes = flex.ExpandStringyValueList[awstypes.ResourceTypeValues](v) + + if defaultPolicyValue != "" { + if v, ok := tfMap["copy_tags"].(bool); ok { + apiObject.CopyTags = aws.Bool(v) + } + if v, ok := tfMap["create_interval"].(int); ok { + apiObject.CreateInterval = aws.Int32(int32(v)) + } + if v, ok := tfMap["exclusions"].([]any); ok && len(v) > 0 { + apiObject.Exclusions = expandExclusions(v) + } + if v, ok := tfMap["extend_deletion"].(bool); ok { + apiObject.ExtendDeletion = aws.Bool(v) + } + if v, ok := tfMap[names.AttrResourceType].(string); ok { + apiObject.ResourceType = awstypes.ResourceTypeValues(v) + } + if v, ok := tfMap["retain_interval"].(int); ok { + apiObject.RetainInterval = aws.Int32(int32(v)) + } + } + + if v, ok := tfMap[names.AttrAction].([]any); ok && len(v) > 0 { + apiObject.Actions = expandActions(v) } - if v, ok := m["resource_locations"].([]any); ok && len(v) > 0 { - policyDetails.ResourceLocations = flex.ExpandStringyValueList[awstypes.ResourceLocationValues](v) + if v, ok := tfMap["event_source"].([]any); ok && len(v) > 0 { + apiObject.EventSource = expandEventSource(v) } - if v, ok := m[names.AttrSchedule].([]any); ok && len(v) > 0 { - policyDetails.Schedules = expandSchedules(v) + if v, ok := tfMap[names.AttrParameters].([]any); ok && len(v) > 0 { + apiObject.Parameters = expandParameters(v, policyType) } - if v, ok := m[names.AttrAction].([]any); ok && len(v) > 0 { - policyDetails.Actions = expandActions(v) + if v, ok := tfMap["policy_language"].(string); ok { + apiObject.PolicyLanguage = awstypes.PolicyLanguageValues(v) } - if v, ok := m["event_source"].([]any); ok && len(v) > 0 { - policyDetails.EventSource = expandEventSource(v) + if v, ok := tfMap["resource_types"].([]any); ok && len(v) > 0 { + apiObject.ResourceTypes = flex.ExpandStringyValueList[awstypes.ResourceTypeValues](v) } - if v, ok := m["target_tags"].(map[string]any); ok && len(v) > 0 { - policyDetails.TargetTags = expandTags(v) + if v, ok := tfMap["resource_locations"].([]any); ok && len(v) > 0 { + apiObject.ResourceLocations = flex.ExpandStringyValueList[awstypes.ResourceLocationValues](v) } - if v, ok := m[names.AttrParameters].([]any); ok && len(v) > 0 { - policyDetails.Parameters = expandParameters(v, policyType) + if v, ok := tfMap[names.AttrSchedule].([]any); ok && len(v) > 0 { + apiObject.Schedules = expandSchedules(v) + } + if v, ok := tfMap["target_tags"].(map[string]any); ok && len(v) > 0 { + apiObject.TargetTags = expandTags(v) } - return policyDetails + return apiObject } -func flattenPolicyDetails(policyDetails *awstypes.PolicyDetails) []map[string]any { - result := make(map[string]any) - result["resource_types"] = flex.FlattenStringyValueList(policyDetails.ResourceTypes) - result["resource_locations"] = flex.FlattenStringyValueList(policyDetails.ResourceLocations) - result[names.AttrAction] = flattenActions(policyDetails.Actions) - result["event_source"] = flattenEventSource(policyDetails.EventSource) - result[names.AttrSchedule] = flattenSchedules(policyDetails.Schedules) - result["target_tags"] = flattenTags(policyDetails.TargetTags) - result["policy_type"] = string(policyDetails.PolicyType) +func flattenPolicyDetails(apiObject *awstypes.PolicyDetails) []any { + tfMap := make(map[string]any) + tfMap[names.AttrAction] = flattenActions(apiObject.Actions) + tfMap["copy_tags"] = aws.ToBool(apiObject.CopyTags) + tfMap["create_interval"] = aws.ToInt32(apiObject.CreateInterval) + tfMap["event_source"] = flattenEventSource(apiObject.EventSource) + tfMap["exclusions"] = flattenExclusions(apiObject.Exclusions) + tfMap["extend_deletion"] = aws.ToBool(apiObject.ExtendDeletion) + if apiObject.Parameters != nil { + tfMap[names.AttrParameters] = flattenParameters(apiObject.Parameters) + } + tfMap["policy_language"] = apiObject.PolicyLanguage + tfMap["policy_type"] = apiObject.PolicyType + tfMap[names.AttrResourceType] = apiObject.ResourceType + tfMap["resource_types"] = apiObject.ResourceTypes + tfMap["resource_locations"] = apiObject.ResourceLocations + tfMap["retain_interval"] = aws.ToInt32(apiObject.RetainInterval) + tfMap[names.AttrSchedule] = flattenSchedules(apiObject.Schedules) + tfMap["target_tags"] = flattenTags(apiObject.TargetTags) + + return []any{tfMap} +} - if policyDetails.Parameters != nil { - result[names.AttrParameters] = flattenParameters(policyDetails.Parameters) - } +func expandSchedules(tfList []any) []awstypes.Schedule { + apiObjects := make([]awstypes.Schedule, len(tfList)) - return []map[string]any{result} -} + for i, tfMapRaw := range tfList { + apiObject := awstypes.Schedule{} + tfMap := tfMapRaw.(map[string]any) -func expandSchedules(cfg []any) []awstypes.Schedule { - schedules := make([]awstypes.Schedule, len(cfg)) - for i, c := range cfg { - schedule := awstypes.Schedule{} - m := c.(map[string]any) - if v, ok := m["copy_tags"]; ok { - schedule.CopyTags = aws.Bool(v.(bool)) + if v, ok := tfMap["archive_rule"].([]any); ok && len(v) > 0 { + apiObject.ArchiveRule = expandArchiveRule(v) } - if v, ok := m["create_rule"]; ok { - schedule.CreateRule = expandCreateRule(v.([]any)) + if v, ok := tfMap["copy_tags"]; ok { + apiObject.CopyTags = aws.Bool(v.(bool)) } - if v, ok := m["cross_region_copy_rule"].(*schema.Set); ok && v.Len() > 0 { - schedule.CrossRegionCopyRules = expandCrossRegionCopyRules(v.List()) + if v, ok := tfMap["create_rule"]; ok { + apiObject.CreateRule = expandCreateRule(v.([]any)) } - if v, ok := m[names.AttrName]; ok { - schedule.Name = aws.String(v.(string)) + if v, ok := tfMap["cross_region_copy_rule"].(*schema.Set); ok && v.Len() > 0 { + apiObject.CrossRegionCopyRules = expandCrossRegionCopyRules(v.List()) } - if v, ok := m["deprecate_rule"]; ok { - schedule.DeprecateRule = expandDeprecateRule(v.([]any)) + if v, ok := tfMap["deprecate_rule"]; ok { + apiObject.DeprecateRule = expandDeprecateRule(v.([]any)) } - if v, ok := m["fast_restore_rule"]; ok { - schedule.FastRestoreRule = expandFastRestoreRule(v.([]any)) + if v, ok := tfMap["fast_restore_rule"]; ok { + apiObject.FastRestoreRule = expandFastRestoreRule(v.([]any)) } - if v, ok := m["share_rule"]; ok { - schedule.ShareRules = expandShareRule(v.([]any)) + if v, ok := tfMap[names.AttrName]; ok { + apiObject.Name = aws.String(v.(string)) } - if v, ok := m["retain_rule"]; ok { - schedule.RetainRule = expandRetainRule(v.([]any)) + if v, ok := tfMap["retain_rule"]; ok { + apiObject.RetainRule = expandRetainRule(v.([]any)) } - if v, ok := m["tags_to_add"]; ok { - schedule.TagsToAdd = expandTags(v.(map[string]any)) + if v, ok := tfMap["share_rule"]; ok { + apiObject.ShareRules = expandShareRule(v.([]any)) } - if v, ok := m["variable_tags"]; ok { - schedule.VariableTags = expandTags(v.(map[string]any)) + if v, ok := tfMap["tags_to_add"]; ok { + apiObject.TagsToAdd = expandTags(v.(map[string]any)) + } + if v, ok := tfMap["variable_tags"]; ok { + apiObject.VariableTags = expandTags(v.(map[string]any)) } - schedules[i] = schedule + apiObjects[i] = apiObject } - return schedules + return apiObjects } -func flattenSchedules(schedules []awstypes.Schedule) []map[string]any { - result := make([]map[string]any, len(schedules)) - for i, s := range schedules { - m := make(map[string]any) - m["copy_tags"] = aws.ToBool(s.CopyTags) - m["create_rule"] = flattenCreateRule(s.CreateRule) - m["cross_region_copy_rule"] = flattenCrossRegionCopyRules(s.CrossRegionCopyRules) - m[names.AttrName] = aws.ToString(s.Name) - m["retain_rule"] = flattenRetainRule(s.RetainRule) - m["tags_to_add"] = flattenTags(s.TagsToAdd) - m["variable_tags"] = flattenTags(s.VariableTags) - - if s.DeprecateRule != nil { - m["deprecate_rule"] = flattenDeprecateRule(s.DeprecateRule) +func flattenSchedules(apiObjects []awstypes.Schedule) []any { + tfList := make([]any, len(apiObjects)) + + for i, apiObject := range apiObjects { + tfMap := make(map[string]any) + tfMap["archive_rule"] = flattenArchiveRule(apiObject.ArchiveRule) + tfMap["copy_tags"] = aws.ToBool(apiObject.CopyTags) + tfMap["create_rule"] = flattenCreateRule(apiObject.CreateRule) + tfMap["cross_region_copy_rule"] = flattenCrossRegionCopyRules(apiObject.CrossRegionCopyRules) + if apiObject.DeprecateRule != nil { + tfMap["deprecate_rule"] = flattenDeprecateRule(apiObject.DeprecateRule) } - - if s.FastRestoreRule != nil { - m["fast_restore_rule"] = flattenFastRestoreRule(s.FastRestoreRule) + if apiObject.FastRestoreRule != nil { + tfMap["fast_restore_rule"] = flattenFastRestoreRule(apiObject.FastRestoreRule) } - - if s.ShareRules != nil { - m["share_rule"] = flattenShareRule(s.ShareRules) + tfMap[names.AttrName] = aws.ToString(apiObject.Name) + tfMap["retain_rule"] = flattenRetainRule(apiObject.RetainRule) + if apiObject.ShareRules != nil { + tfMap["share_rule"] = flattenShareRule(apiObject.ShareRules) } + tfMap["tags_to_add"] = flattenTags(apiObject.TagsToAdd) + tfMap["variable_tags"] = flattenTags(apiObject.VariableTags) - result[i] = m + tfList[i] = tfMap } - return result + return tfList } -func expandActions(cfg []any) []awstypes.Action { - actions := make([]awstypes.Action, len(cfg)) - for i, c := range cfg { - action := awstypes.Action{} - m := c.(map[string]any) - if v, ok := m["cross_region_copy"].(*schema.Set); ok { - action.CrossRegionCopy = expandActionCrossRegionCopyRules(v.List()) +func expandActions(tfList []any) []awstypes.Action { + apiObjects := make([]awstypes.Action, len(tfList)) + + for i, tfMapRaw := range tfList { + apiObject := awstypes.Action{} + tfMap := tfMapRaw.(map[string]any) + + if v, ok := tfMap["cross_region_copy"].(*schema.Set); ok { + apiObject.CrossRegionCopy = expandActionCrossRegionCopyRules(v.List()) } - if v, ok := m[names.AttrName]; ok { - action.Name = aws.String(v.(string)) + if v, ok := tfMap[names.AttrName]; ok { + apiObject.Name = aws.String(v.(string)) } - actions[i] = action + apiObjects[i] = apiObject } - return actions + return apiObjects } -func flattenActions(actions []awstypes.Action) []map[string]any { - result := make([]map[string]any, len(actions)) - for i, s := range actions { - m := make(map[string]any) - - m[names.AttrName] = aws.ToString(s.Name) +func flattenActions(apiObjects []awstypes.Action) []any { + tfList := make([]any, len(apiObjects)) - if s.CrossRegionCopy != nil { - m["cross_region_copy"] = flattenActionCrossRegionCopyRules(s.CrossRegionCopy) + for i, apiObject := range apiObjects { + tfMap := make(map[string]any) + if apiObject.CrossRegionCopy != nil { + tfMap["cross_region_copy"] = flattenActionCrossRegionCopyRules(apiObject.CrossRegionCopy) } + tfMap[names.AttrName] = aws.ToString(apiObject.Name) - result[i] = m + tfList[i] = tfMap } - return result + return tfList } -func expandActionCrossRegionCopyRules(l []any) []awstypes.CrossRegionCopyAction { - if len(l) == 0 || l[0] == nil { +func expandActionCrossRegionCopyRules(tfList []any) []awstypes.CrossRegionCopyAction { + if len(tfList) == 0 || tfList[0] == nil { return nil } - var rules []awstypes.CrossRegionCopyAction - - for _, tfMapRaw := range l { - m, ok := tfMapRaw.(map[string]any) + var apiObjects []awstypes.CrossRegionCopyAction + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) if !ok { continue } - rule := awstypes.CrossRegionCopyAction{} - if v, ok := m[names.AttrEncryptionConfiguration].([]any); ok { - rule.EncryptionConfiguration = expandActionCrossRegionCopyRuleEncryptionConfiguration(v) + apiObject := awstypes.CrossRegionCopyAction{} + if v, ok := tfMap[names.AttrEncryptionConfiguration].([]any); ok { + apiObject.EncryptionConfiguration = expandActionCrossRegionCopyRuleEncryptionConfiguration(v) } - if v, ok := m["retain_rule"].([]any); ok && len(v) > 0 && v[0] != nil { - rule.RetainRule = expandCrossRegionCopyRuleRetainRule(v) + if v, ok := tfMap["retain_rule"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.RetainRule = expandCrossRegionCopyRuleRetainRule(v) } - if v, ok := m[names.AttrTarget].(string); ok && v != "" { - rule.Target = aws.String(v) + if v, ok := tfMap[names.AttrTarget].(string); ok && v != "" { + apiObject.Target = aws.String(v) } - rules = append(rules, rule) + apiObjects = append(apiObjects, apiObject) } - return rules + return apiObjects } -func flattenActionCrossRegionCopyRules(rules []awstypes.CrossRegionCopyAction) []any { - if len(rules) == 0 { +func flattenActionCrossRegionCopyRules(apiObjects []awstypes.CrossRegionCopyAction) []any { + if len(apiObjects) == 0 { return []any{} } - var result []any + var tfList []any - for _, rule := range rules { - m := map[string]any{ - names.AttrEncryptionConfiguration: flattenActionCrossRegionCopyRuleEncryptionConfiguration(rule.EncryptionConfiguration), - "retain_rule": flattenCrossRegionCopyRuleRetainRule(rule.RetainRule), - names.AttrTarget: aws.ToString(rule.Target), + for _, apiObject := range apiObjects { + tfMap := map[string]any{ + names.AttrEncryptionConfiguration: flattenActionCrossRegionCopyRuleEncryptionConfiguration(apiObject.EncryptionConfiguration), + "retain_rule": flattenCrossRegionCopyRuleRetainRule(apiObject.RetainRule), + names.AttrTarget: aws.ToString(apiObject.Target), } - result = append(result, m) + tfList = append(tfList, tfMap) } - return result + return tfList } -func expandActionCrossRegionCopyRuleEncryptionConfiguration(l []any) *awstypes.EncryptionConfiguration { - if len(l) == 0 || l[0] == nil { +func expandActionCrossRegionCopyRuleEncryptionConfiguration(tfList []any) *awstypes.EncryptionConfiguration { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := l[0].(map[string]any) - config := &awstypes.EncryptionConfiguration{ - Encrypted: aws.Bool(m[names.AttrEncrypted].(bool)), + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.EncryptionConfiguration{ + Encrypted: aws.Bool(tfMap[names.AttrEncrypted].(bool)), } - if v, ok := m["cmk_arn"].(string); ok && v != "" { - config.CmkArn = aws.String(v) + if v, ok := tfMap["cmk_arn"].(string); ok && v != "" { + apiObject.CmkArn = aws.String(v) } - return config + + return apiObject } -func flattenActionCrossRegionCopyRuleEncryptionConfiguration(rule *awstypes.EncryptionConfiguration) []any { - if rule == nil { +func flattenActionCrossRegionCopyRuleEncryptionConfiguration(apiObject *awstypes.EncryptionConfiguration) []any { + if apiObject == nil { return []any{} } - m := map[string]any{ - names.AttrEncrypted: aws.ToBool(rule.Encrypted), - "cmk_arn": aws.ToString(rule.CmkArn), + tfMap := map[string]any{ + "cmk_arn": aws.ToString(apiObject.CmkArn), + names.AttrEncrypted: aws.ToBool(apiObject.Encrypted), } - return []any{m} + return []any{tfMap} } -func expandEventSource(l []any) *awstypes.EventSource { - if len(l) == 0 || l[0] == nil { +func expandEventSource(tfList []any) *awstypes.EventSource { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := l[0].(map[string]any) - config := &awstypes.EventSource{ - Type: awstypes.EventSourceValues(m[names.AttrType].(string)), + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.EventSource{ + Type: awstypes.EventSourceValues(tfMap[names.AttrType].(string)), } - if v, ok := m[names.AttrParameters].([]any); ok && len(v) > 0 { - config.Parameters = expandEventSourceParameters(v) + if v, ok := tfMap[names.AttrParameters].([]any); ok && len(v) > 0 { + apiObject.Parameters = expandEventSourceParameters(v) } - return config + return apiObject } -func flattenEventSource(rule *awstypes.EventSource) []any { - if rule == nil { +func flattenEventSource(apiObject *awstypes.EventSource) []any { + if apiObject == nil { return []any{} } - m := map[string]any{ - names.AttrParameters: flattenEventSourceParameters(rule.Parameters), - names.AttrType: string(rule.Type), + tfMap := map[string]any{ + names.AttrParameters: flattenEventSourceParameters(apiObject.Parameters), + names.AttrType: apiObject.Type, } - return []any{m} + return []any{tfMap} } -func expandEventSourceParameters(l []any) *awstypes.EventParameters { - if len(l) == 0 || l[0] == nil { +func expandEventSourceParameters(tfList []any) *awstypes.EventParameters { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := l[0].(map[string]any) - config := &awstypes.EventParameters{ - DescriptionRegex: aws.String(m["description_regex"].(string)), - EventType: awstypes.EventTypeValues(m["event_type"].(string)), - SnapshotOwner: flex.ExpandStringValueSet(m["snapshot_owner"].(*schema.Set)), + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.EventParameters{ + DescriptionRegex: aws.String(tfMap["description_regex"].(string)), + EventType: awstypes.EventTypeValues(tfMap["event_type"].(string)), + SnapshotOwner: flex.ExpandStringValueSet(tfMap["snapshot_owner"].(*schema.Set)), } - return config + return apiObject } -func flattenEventSourceParameters(rule *awstypes.EventParameters) []any { - if rule == nil { +func flattenEventSourceParameters(apiObject *awstypes.EventParameters) []any { + if apiObject == nil { return []any{} } - m := map[string]any{ - "description_regex": aws.ToString(rule.DescriptionRegex), - "event_type": string(rule.EventType), - "snapshot_owner": flex.FlattenStringValueSet(rule.SnapshotOwner), + tfMap := map[string]any{ + "description_regex": aws.ToString(apiObject.DescriptionRegex), + "event_type": apiObject.EventType, + "snapshot_owner": apiObject.SnapshotOwner, } - return []any{m} + return []any{tfMap} } -func expandCrossRegionCopyRules(l []any) []awstypes.CrossRegionCopyRule { - if len(l) == 0 || l[0] == nil { +func expandCrossRegionCopyRules(tfList []any) []awstypes.CrossRegionCopyRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - var rules []awstypes.CrossRegionCopyRule - - for _, tfMapRaw := range l { - m, ok := tfMapRaw.(map[string]any) + var apiObjects []awstypes.CrossRegionCopyRule + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) if !ok { continue } - rule := awstypes.CrossRegionCopyRule{} + apiObject := awstypes.CrossRegionCopyRule{} - if v, ok := m["cmk_arn"].(string); ok && v != "" { - rule.CmkArn = aws.String(v) + if v, ok := tfMap["cmk_arn"].(string); ok && v != "" { + apiObject.CmkArn = aws.String(v) + } + if v, ok := tfMap["copy_tags"].(bool); ok { + apiObject.CopyTags = aws.Bool(v) } - if v, ok := m["copy_tags"].(bool); ok { - rule.CopyTags = aws.Bool(v) + if v, ok := tfMap["deprecate_rule"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.DeprecateRule = expandCrossRegionCopyRuleDeprecateRule(v) } - if v, ok := m["deprecate_rule"].([]any); ok && len(v) > 0 && v[0] != nil { - rule.DeprecateRule = expandCrossRegionCopyRuleDeprecateRule(v) + if v, ok := tfMap[names.AttrEncrypted].(bool); ok { + apiObject.Encrypted = aws.Bool(v) } - if v, ok := m[names.AttrEncrypted].(bool); ok { - rule.Encrypted = aws.Bool(v) + if v, ok := tfMap["retain_rule"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.RetainRule = expandCrossRegionCopyRuleRetainRule(v) } - if v, ok := m["retain_rule"].([]any); ok && len(v) > 0 && v[0] != nil { - rule.RetainRule = expandCrossRegionCopyRuleRetainRule(v) + if v, ok := tfMap[names.AttrTarget].(string); ok && v != "" { + apiObject.Target = aws.String(v) } - if v, ok := m[names.AttrTarget].(string); ok && v != "" { - rule.Target = aws.String(v) + if v, ok := tfMap["target_region"].(string); ok && v != "" { + apiObject.TargetRegion = aws.String(v) } - rules = append(rules, rule) + apiObjects = append(apiObjects, apiObject) } - return rules + return apiObjects } -func flattenCrossRegionCopyRules(rules []awstypes.CrossRegionCopyRule) []any { - if len(rules) == 0 { +func flattenCrossRegionCopyRules(apiObjects []awstypes.CrossRegionCopyRule) []any { + if len(apiObjects) == 0 { return []any{} } - var result []any + var tfList []any - for _, rule := range rules { - m := map[string]any{ - "cmk_arn": aws.ToString(rule.CmkArn), - "copy_tags": aws.ToBool(rule.CopyTags), - "deprecate_rule": flattenCrossRegionCopyRuleDeprecateRule(rule.DeprecateRule), - names.AttrEncrypted: aws.ToBool(rule.Encrypted), - "retain_rule": flattenCrossRegionCopyRuleRetainRule(rule.RetainRule), - names.AttrTarget: aws.ToString(rule.Target), + for _, apiObject := range apiObjects { + tfMap := map[string]any{ + "cmk_arn": aws.ToString(apiObject.CmkArn), + "copy_tags": aws.ToBool(apiObject.CopyTags), + "deprecate_rule": flattenCrossRegionCopyRuleDeprecateRule(apiObject.DeprecateRule), + names.AttrEncrypted: aws.ToBool(apiObject.Encrypted), + "retain_rule": flattenCrossRegionCopyRuleRetainRule(apiObject.RetainRule), + names.AttrTarget: aws.ToString(apiObject.Target), + "target_region": aws.ToString(apiObject.TargetRegion), } - result = append(result, m) + tfList = append(tfList, tfMap) } - return result + return tfList } -func expandCrossRegionCopyRuleDeprecateRule(l []any) *awstypes.CrossRegionCopyDeprecateRule { - if len(l) == 0 || l[0] == nil { +func expandCrossRegionCopyRuleDeprecateRule(tfList []any) *awstypes.CrossRegionCopyDeprecateRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := l[0].(map[string]any) + tfMap := tfList[0].(map[string]any) return &awstypes.CrossRegionCopyDeprecateRule{ - Interval: aws.Int32(int32(m[names.AttrInterval].(int))), - IntervalUnit: awstypes.RetentionIntervalUnitValues(m["interval_unit"].(string)), + Interval: aws.Int32(int32(tfMap[names.AttrInterval].(int))), + IntervalUnit: awstypes.RetentionIntervalUnitValues(tfMap["interval_unit"].(string)), } } -func expandCrossRegionCopyRuleRetainRule(l []any) *awstypes.CrossRegionCopyRetainRule { - if len(l) == 0 || l[0] == nil { +func expandCrossRegionCopyRuleRetainRule(tfList []any) *awstypes.CrossRegionCopyRetainRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := l[0].(map[string]any) + tfMap := tfList[0].(map[string]any) return &awstypes.CrossRegionCopyRetainRule{ - Interval: aws.Int32(int32(m[names.AttrInterval].(int))), - IntervalUnit: awstypes.RetentionIntervalUnitValues(m["interval_unit"].(string)), + Interval: aws.Int32(int32(tfMap[names.AttrInterval].(int))), + IntervalUnit: awstypes.RetentionIntervalUnitValues(tfMap["interval_unit"].(string)), } } -func flattenCrossRegionCopyRuleDeprecateRule(rule *awstypes.CrossRegionCopyDeprecateRule) []any { - if rule == nil { +func flattenCrossRegionCopyRuleDeprecateRule(apiObject *awstypes.CrossRegionCopyDeprecateRule) []any { + if apiObject == nil { return []any{} } - m := map[string]any{ - names.AttrInterval: int(aws.ToInt32(rule.Interval)), - "interval_unit": string(rule.IntervalUnit), + tfMap := map[string]any{ + names.AttrInterval: aws.ToInt32(apiObject.Interval), + "interval_unit": apiObject.IntervalUnit, } - return []any{m} + return []any{tfMap} } -func flattenCrossRegionCopyRuleRetainRule(rule *awstypes.CrossRegionCopyRetainRule) []any { - if rule == nil { +func flattenCrossRegionCopyRuleRetainRule(apiObject *awstypes.CrossRegionCopyRetainRule) []any { + if apiObject == nil { return []any{} } - m := map[string]any{ - names.AttrInterval: int(aws.ToInt32(rule.Interval)), - "interval_unit": string(rule.IntervalUnit), + tfMap := map[string]any{ + names.AttrInterval: aws.ToInt32(apiObject.Interval), + "interval_unit": apiObject.IntervalUnit, } - return []any{m} + return []any{tfMap} } -func expandCreateRule(cfg []any) *awstypes.CreateRule { - if len(cfg) == 0 || cfg[0] == nil { +func expandCreateRule(tfList []any) *awstypes.CreateRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - c := cfg[0].(map[string]any) - createRule := &awstypes.CreateRule{} - if v, ok := c["times"].([]any); ok && len(v) > 0 { - createRule.Times = flex.ExpandStringValueList(v) - } + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.CreateRule{} - if v, ok := c[names.AttrInterval].(int); ok && v > 0 { - createRule.Interval = aws.Int32(int32(v)) + if v, ok := tfMap[names.AttrInterval].(int); ok && v > 0 { + apiObject.Interval = aws.Int32(int32(v)) + } + if v, ok := tfMap["interval_unit"].(string); ok && v != "" { + apiObject.IntervalUnit = awstypes.IntervalUnitValues(v) + } else { + apiObject.IntervalUnit = awstypes.IntervalUnitValuesHours + } + if v, ok := tfMap[names.AttrLocation].(string); ok && v != "" { + apiObject.Location = awstypes.LocationValues(v) + } + if v, ok := tfMap["scripts"]; ok { + apiObject.Scripts = expandScripts(v.([]any)) + } + if v, ok := tfMap["times"].([]any); ok && len(v) > 0 { + apiObject.Times = flex.ExpandStringValueList(v) } - if v, ok := c[names.AttrLocation].(string); ok && v != "" { - createRule.Location = awstypes.LocationValues(v) + if v, ok := tfMap["cron_expression"].(string); ok && v != "" { + apiObject.CronExpression = aws.String(v) + apiObject.IntervalUnit = "" // sets interval unit to empty string so that all fields related to interval are ignored } - if v, ok := c["interval_unit"].(string); ok && v != "" { - createRule.IntervalUnit = awstypes.IntervalUnitValues(v) - } else { - createRule.IntervalUnit = awstypes.IntervalUnitValuesHours + return apiObject +} + +func flattenCreateRule(apiObject *awstypes.CreateRule) []any { + if apiObject == nil { + return []any{} } - if v, ok := c["cron_expression"].(string); ok && v != "" { - createRule.CronExpression = aws.String(v) - createRule.IntervalUnit = "" // sets interval unit to empty string so that all fields related to interval are ignored + tfMap := make(map[string]any) + if apiObject.CronExpression != nil { + tfMap["cron_expression"] = aws.ToString(apiObject.CronExpression) + } + if apiObject.Interval != nil { + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.Interval) } + tfMap["interval_unit"] = apiObject.IntervalUnit + tfMap[names.AttrLocation] = apiObject.Location + if apiObject.Scripts != nil { + tfMap["scripts"] = flattenScripts(apiObject.Scripts) + } + tfMap["times"] = apiObject.Times - return createRule + return []any{tfMap} } -func flattenCreateRule(createRule *awstypes.CreateRule) []map[string]any { - if createRule == nil { - return []map[string]any{} +func expandRetainRule(tfList []any) *awstypes.RetainRule { + if len(tfList) == 0 || tfList[0] == nil { + return nil } - result := make(map[string]any) - result["times"] = flex.FlattenStringValueList(createRule.Times) + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.RetainRule{} - if createRule.Interval != nil { - result[names.AttrInterval] = aws.ToInt32(createRule.Interval) + if v, ok := tfMap["count"].(int); ok && v > 0 { + apiObject.Count = aws.Int32(int32(v)) + } + if v, ok := tfMap[names.AttrInterval].(int); ok && v > 0 { + apiObject.Interval = aws.Int32(int32(v)) + } + if v, ok := tfMap["interval_unit"].(string); ok && v != "" { + apiObject.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) } - result["interval_unit"] = string(createRule.IntervalUnit) + return apiObject +} - result[names.AttrLocation] = string(createRule.Location) +func flattenRetainRule(apiObject *awstypes.RetainRule) []any { + tfMap := make(map[string]any) + tfMap["count"] = aws.ToInt32(apiObject.Count) + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.Interval) + tfMap["interval_unit"] = apiObject.IntervalUnit - if createRule.CronExpression != nil { - result["cron_expression"] = aws.ToString(createRule.CronExpression) - } - - return []map[string]any{result} + return []any{tfMap} } -func expandRetainRule(cfg []any) *awstypes.RetainRule { - if len(cfg) == 0 || cfg[0] == nil { +func expandDeprecateRule(tfList []any) *awstypes.DeprecateRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := cfg[0].(map[string]any) - rule := &awstypes.RetainRule{} - if v, ok := m["count"].(int); ok && v > 0 { - rule.Count = aws.Int32(int32(v)) + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.DeprecateRule{} + + if v, ok := tfMap["count"].(int); ok && v > 0 { + apiObject.Count = aws.Int32(int32(v)) } - if v, ok := m[names.AttrInterval].(int); ok && v > 0 { - rule.Interval = aws.Int32(int32(v)) + if v, ok := tfMap[names.AttrInterval].(int); ok && v > 0 { + apiObject.Interval = aws.Int32(int32(v)) } - if v, ok := m["interval_unit"].(string); ok && v != "" { - rule.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) + if v, ok := tfMap["interval_unit"].(string); ok && v != "" { + apiObject.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) } - return rule + return apiObject } -func flattenRetainRule(retainRule *awstypes.RetainRule) []map[string]any { - result := make(map[string]any) - result["count"] = aws.ToInt32(retainRule.Count) - result["interval_unit"] = string(retainRule.IntervalUnit) - result[names.AttrInterval] = aws.ToInt32(retainRule.Interval) +func flattenDeprecateRule(apiObject *awstypes.DeprecateRule) []any { + tfMap := make(map[string]any) + tfMap["count"] = aws.ToInt32(apiObject.Count) + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.Interval) + tfMap["interval_unit"] = apiObject.IntervalUnit - return []map[string]any{result} + return []any{tfMap} } -func expandDeprecateRule(cfg []any) *awstypes.DeprecateRule { - if len(cfg) == 0 || cfg[0] == nil { +func expandFastRestoreRule(tfList []any) *awstypes.FastRestoreRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := cfg[0].(map[string]any) - rule := &awstypes.DeprecateRule{} - if v, ok := m["count"].(int); ok && v > 0 { - rule.Count = aws.Int32(int32(v)) + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.FastRestoreRule{ + AvailabilityZones: flex.ExpandStringValueSet(tfMap[names.AttrAvailabilityZones].(*schema.Set)), + } + + if v, ok := tfMap["count"].(int); ok && v > 0 { + apiObject.Count = aws.Int32(int32(v)) } - if v, ok := m[names.AttrInterval].(int); ok && v > 0 { - rule.Interval = aws.Int32(int32(v)) + if v, ok := tfMap[names.AttrInterval].(int); ok && v > 0 { + apiObject.Interval = aws.Int32(int32(v)) } - if v, ok := m["interval_unit"].(string); ok && v != "" { - rule.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) + if v, ok := tfMap["interval_unit"].(string); ok && v != "" { + apiObject.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) } - return rule + return apiObject } -func flattenDeprecateRule(rule *awstypes.DeprecateRule) []map[string]any { - result := make(map[string]any) - result["count"] = aws.ToInt32(rule.Count) - result["interval_unit"] = string(rule.IntervalUnit) - result[names.AttrInterval] = aws.ToInt32(rule.Interval) +func flattenFastRestoreRule(apiObject *awstypes.FastRestoreRule) []any { + tfMap := make(map[string]any) + tfMap[names.AttrAvailabilityZones] = apiObject.AvailabilityZones + tfMap["count"] = aws.ToInt32(apiObject.Count) + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.Interval) + tfMap["interval_unit"] = apiObject.IntervalUnit - return []map[string]any{result} + return []any{tfMap} } -func expandFastRestoreRule(cfg []any) *awstypes.FastRestoreRule { - if len(cfg) == 0 || cfg[0] == nil { +func expandShareRule(tfList []any) []awstypes.ShareRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := cfg[0].(map[string]any) - rule := &awstypes.FastRestoreRule{ - AvailabilityZones: flex.ExpandStringValueSet(m[names.AttrAvailabilityZones].(*schema.Set)), - } - if v, ok := m["count"].(int); ok && v > 0 { - rule.Count = aws.Int32(int32(v)) + apiObjects := make([]awstypes.ShareRule, 0) + + for _, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]any) + + apiObject := awstypes.ShareRule{ + TargetAccounts: flex.ExpandStringValueSet(tfMap["target_accounts"].(*schema.Set)), + } + + if v, ok := tfMap["unshare_interval"].(int); ok && v > 0 { + apiObject.UnshareInterval = aws.Int32(int32(v)) + } + + if v, ok := tfMap["unshare_interval_unit"].(string); ok && v != "" { + apiObject.UnshareIntervalUnit = awstypes.RetentionIntervalUnitValues(v) + } + + apiObjects = append(apiObjects, apiObject) } - if v, ok := m[names.AttrInterval].(int); ok && v > 0 { - rule.Interval = aws.Int32(int32(v)) + return apiObjects +} + +func flattenShareRule(apiObjects []awstypes.ShareRule) []any { + tfList := make([]any, 0) + + for _, apiObject := range apiObjects { + tfMap := make(map[string]any) + if apiObject.TargetAccounts != nil { + tfMap["target_accounts"] = apiObject.TargetAccounts + } + if apiObject.UnshareInterval != nil { + tfMap["unshare_interval"] = aws.ToInt32(apiObject.UnshareInterval) + } + tfMap["unshare_interval_unit"] = apiObject.UnshareIntervalUnit + + tfList = append(tfList, tfMap) } - if v, ok := m["interval_unit"].(string); ok && v != "" { - rule.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) + return tfList +} + +func expandTags(tfMap map[string]any) []awstypes.Tag { + var apiObjects []awstypes.Tag + + for k, v := range tfMap { + apiObjects = append(apiObjects, awstypes.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) } - return rule + return apiObjects } -func flattenFastRestoreRule(rule *awstypes.FastRestoreRule) []map[string]any { - result := make(map[string]any) - result["count"] = aws.ToInt32(rule.Count) - result["interval_unit"] = string(rule.IntervalUnit) - result[names.AttrInterval] = aws.ToInt32(rule.Interval) - result[names.AttrAvailabilityZones] = flex.FlattenStringValueSet(rule.AvailabilityZones) +func flattenTags(apiObjects []awstypes.Tag) map[string]string { + tfMap := make(map[string]string) + + for _, apiObject := range apiObjects { + tfMap[aws.ToString(apiObject.Key)] = aws.ToString(apiObject.Value) + } - return []map[string]any{result} + return tfMap } -func expandShareRule(cfg []any) []awstypes.ShareRule { - if len(cfg) == 0 || cfg[0] == nil { +func expandParameters(tfList []any, policyType awstypes.PolicyTypeValues) *awstypes.Parameters { + if len(tfList) == 0 || tfList[0] == nil { return nil } - rules := make([]awstypes.ShareRule, 0) + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.Parameters{} - for _, shareRule := range cfg { - m := shareRule.(map[string]any) + if v, ok := tfMap["exclude_boot_volume"].(bool); ok && policyType == awstypes.PolicyTypeValuesEbsSnapshotManagement { + apiObject.ExcludeBootVolume = aws.Bool(v) + } - rule := awstypes.ShareRule{ - TargetAccounts: flex.ExpandStringValueSet(m["target_accounts"].(*schema.Set)), - } + if v, ok := tfMap["no_reboot"].(bool); ok && policyType == awstypes.PolicyTypeValuesImageManagement { + apiObject.NoReboot = aws.Bool(v) + } - if v, ok := m["unshare_interval"].(int); ok && v > 0 { - rule.UnshareInterval = aws.Int32(int32(v)) - } + return apiObject +} + +func flattenParameters(apiObject *awstypes.Parameters) []any { + tfMap := make(map[string]any) + if apiObject.ExcludeBootVolume != nil { + tfMap["exclude_boot_volume"] = aws.ToBool(apiObject.ExcludeBootVolume) + } + if apiObject.NoReboot != nil { + tfMap["no_reboot"] = aws.ToBool(apiObject.NoReboot) + } + + return []any{tfMap} +} + +func expandScripts(tfList []any) []awstypes.Script { + apiObjects := make([]awstypes.Script, len(tfList)) - if v, ok := m["unshare_interval_unit"].(string); ok && v != "" { - rule.UnshareIntervalUnit = awstypes.RetentionIntervalUnitValues(v) + for i, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]any) + apiObject := awstypes.Script{} + + if v, ok := tfMap["execute_operation_on_script_failure"].(bool); ok { + apiObject.ExecuteOperationOnScriptFailure = aws.Bool(v) + } + if v, ok := tfMap["execution_handler"].(string); ok { + apiObject.ExecutionHandler = aws.String(v) + } + if v, ok := tfMap["execution_handler_service"].(string); ok && v != "" { + apiObject.ExecutionHandlerService = awstypes.ExecutionHandlerServiceValues(v) + } + if v, ok := tfMap["execution_timeout"].(int); ok && v > 0 { + apiObject.ExecutionTimeout = aws.Int32(int32(v)) + } + if v, ok := tfMap["maximum_retry_count"].(int); ok && v > 0 { + apiObject.MaximumRetryCount = aws.Int32(int32(v)) + } + if v, ok := tfMap["stages"].([]any); ok && len(v) > 0 { + apiObject.Stages = flex.ExpandStringyValueList[awstypes.StageValues](v) } - rules = append(rules, rule) + apiObjects[i] = apiObject } - return rules + return apiObjects } -func flattenShareRule(rules []awstypes.ShareRule) []map[string]any { - values := make([]map[string]any, 0) +func flattenScripts(apiObjects []awstypes.Script) []any { + tfList := make([]any, len(apiObjects)) - for _, v := range rules { - rule := make(map[string]any) + for i, apiObject := range apiObjects { + tfMap := make(map[string]any) + tfMap["execute_operation_on_script_failure"] = aws.ToBool(apiObject.ExecuteOperationOnScriptFailure) + tfMap["execution_handler"] = aws.ToString(apiObject.ExecutionHandler) + tfMap["execution_handler_service"] = apiObject.ExecutionHandlerService + tfMap["execution_timeout"] = aws.ToInt32(apiObject.ExecutionTimeout) + tfMap["maximum_retry_count"] = aws.ToInt32(apiObject.MaximumRetryCount) + tfMap["stages"] = apiObject.Stages - if v.TargetAccounts != nil { - rule["target_accounts"] = flex.FlattenStringValueSet(v.TargetAccounts) - } + tfList[i] = tfMap + } - rule["unshare_interval_unit"] = string(v.UnshareIntervalUnit) + return tfList +} - if v.UnshareInterval != nil { - rule["unshare_interval"] = aws.ToInt32(v.UnshareInterval) - } +func expandArchiveRule(tfList []any) *awstypes.ArchiveRule { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } - values = append(values, rule) + tfMap := tfList[0].(map[string]any) + + return &awstypes.ArchiveRule{ + RetainRule: expandArchiveRetainRule(tfMap["archive_retain_rule"].([]any)), } +} - return values +func flattenArchiveRule(apiObject *awstypes.ArchiveRule) []any { + if apiObject == nil { + return []any{} + } + + tfMap := make(map[string]any) + tfMap["archive_retain_rule"] = flattenArchiveRetainRule(apiObject.RetainRule) + + return []any{tfMap} } -func expandTags(m map[string]any) []awstypes.Tag { - var result []awstypes.Tag - for k, v := range m { - result = append(result, awstypes.Tag{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) +func expandArchiveRetainRule(tfList []any) *awstypes.ArchiveRetainRule { + if len(tfList) == 0 || tfList[0] == nil { + return nil } - return result + tfMap := tfList[0].(map[string]any) + + return &awstypes.ArchiveRetainRule{ + RetentionArchiveTier: expandRetentionArchiveTier(tfMap["retention_archive_tier"].([]any)), + } } -func flattenTags(tags []awstypes.Tag) map[string]string { - result := make(map[string]string) - for _, t := range tags { - result[aws.ToString(t.Key)] = aws.ToString(t.Value) +func flattenArchiveRetainRule(apiObject *awstypes.ArchiveRetainRule) []any { + if apiObject == nil { + return []any{} } - return result + tfMap := make(map[string]any) + tfMap["retention_archive_tier"] = flattenRetentionArchiveTier(apiObject.RetentionArchiveTier) + + return []any{tfMap} } -func expandParameters(cfg []any, policyType string) *awstypes.Parameters { - if len(cfg) == 0 || cfg[0] == nil { +func expandRetentionArchiveTier(tfList []any) *awstypes.RetentionArchiveTier { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := cfg[0].(map[string]any) - parameters := &awstypes.Parameters{} - if v, ok := m["exclude_boot_volume"].(bool); ok && policyType == string(awstypes.PolicyTypeValuesEbsSnapshotManagement) { - parameters.ExcludeBootVolume = aws.Bool(v) + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.RetentionArchiveTier{} + + if v, ok := tfMap["count"].(int); ok && v > 0 { + apiObject.Count = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrInterval].(int); ok && v > 0 { + apiObject.Interval = aws.Int32(int32(v)) + } + + if v, ok := tfMap["interval_unit"].(string); ok && v != "" { + apiObject.IntervalUnit = awstypes.RetentionIntervalUnitValues(v) } - if v, ok := m["no_reboot"].(bool); ok && policyType == string(awstypes.PolicyTypeValuesImageManagement) { - parameters.NoReboot = aws.Bool(v) + return apiObject +} + +func flattenRetentionArchiveTier(apiObject *awstypes.RetentionArchiveTier) []any { + if apiObject == nil { + return []any{} } - return parameters + tfMap := make(map[string]any) + tfMap["count"] = aws.ToInt32(apiObject.Count) + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.Interval) + tfMap["interval_unit"] = apiObject.IntervalUnit + + return []any{tfMap} } -func flattenParameters(parameters *awstypes.Parameters) []map[string]any { - result := make(map[string]any) - if parameters.ExcludeBootVolume != nil { - result["exclude_boot_volume"] = aws.ToBool(parameters.ExcludeBootVolume) +func expandExclusions(tfList []any) *awstypes.Exclusions { + if len(tfList) == 0 || tfList[0] == nil { + return nil } - if parameters.NoReboot != nil { - result["no_reboot"] = aws.ToBool(parameters.NoReboot) + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.Exclusions{} + + if v, ok := tfMap["exclude_boot_volumes"].(bool); ok { + apiObject.ExcludeBootVolumes = aws.Bool(v) + } + if v, ok := tfMap["exclude_tags"].(map[string]any); ok { + apiObject.ExcludeTags = expandTags(v) } + if v, ok := tfMap["exclude_volume_types"].([]any); ok && len(v) > 0 { + apiObject.ExcludeVolumeTypes = flex.ExpandStringValueList(v) + } + + return apiObject +} + +func flattenExclusions(apiObject *awstypes.Exclusions) []any { + if apiObject == nil { + return []any{} + } + + tfMap := make(map[string]any) + tfMap["exclude_boot_volumes"] = aws.ToBool(apiObject.ExcludeBootVolumes) + tfMap["exclude_tags"] = flattenTags(apiObject.ExcludeTags) + tfMap["exclude_volume_types"] = apiObject.ExcludeVolumeTypes - return []map[string]any{result} + return []any{tfMap} } diff --git a/internal/service/dlm/lifecycle_policy_test.go b/internal/service/dlm/lifecycle_policy_test.go index fe924405cadd..3446857a1306 100644 --- a/internal/service/dlm/lifecycle_policy_test.go +++ b/internal/service/dlm/lifecycle_policy_test.go @@ -37,7 +37,6 @@ func TestAccDLMLifecyclePolicy_basic(t *testing.T) { Config: testAccLifecyclePolicyConfig_basic(rName), Check: resource.ComposeTestCheckFunc( checkLifecyclePolicyExists(ctx, resourceName), - acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "dlm", regexache.MustCompile(`policy/.+`)), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "tf-acc-basic"), resource.TestCheckResourceAttrSet(resourceName, names.AttrExecutionRoleARN), resource.TestCheckResourceAttr(resourceName, names.AttrState, "ENABLED"), @@ -142,6 +141,125 @@ func TestAccDLMLifecyclePolicy_cron(t *testing.T) { }) } +func TestAccDLMLifecyclePolicy_scriptsAlias(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dlm_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_scriptsAlias(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.name", "tf-acc-basic"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.execution_handler", "AWS_VSS_BACKUP"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.execute_operation_on_script_failure", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.maximum_retry_count", "3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDLMLifecyclePolicy_scriptsSSMDocument(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dlm_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_scriptsSSMDocument(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.name", "tf-acc-basic"), + resource.TestCheckResourceAttrSet(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.execution_handler"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.execute_operation_on_script_failure", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.execution_timeout", "60"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.maximum_retry_count", "3"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.create_rule.0.scripts.0.stages.0", "PRE"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDLMLifecyclePolicy_archiveRuleCount(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dlm_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_archiveRuleCount(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.name", "tf-acc-basic"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.archive_rule.0.archive_retain_rule.0.retention_archive_tier.0.count", "10"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDLMLifecyclePolicy_archiveRuleInterval(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dlm_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_archiveRuleInterval(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.name", "tf-acc-basic"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.archive_rule.0.archive_retain_rule.0.retention_archive_tier.0.interval", "6"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.archive_rule.0.archive_retain_rule.0.retention_archive_tier.0.interval_unit", "MONTHS"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccDLMLifecyclePolicy_retainInterval(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dlm_lifecycle_policy.test" @@ -198,6 +316,89 @@ func TestAccDLMLifecyclePolicy_deprecate(t *testing.T) { }) } +func TestAccDLMLifecyclePolicy_defaultPolicy(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dlm_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_defaultPolicy(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "dlm", regexache.MustCompile(`policy/.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "tf-acc-basic"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrExecutionRoleARN), + resource.TestCheckResourceAttr(resourceName, names.AttrState, "ENABLED"), + resource.TestCheckResourceAttr(resourceName, "default_policy", "VOLUME"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.copy_tags", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.create_interval", "5"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.extend_deletion", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.resource_type", "VOLUME"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.retain_interval", "7"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.policy_language", "SIMPLIFIED"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.action.#", "0"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.event_source.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_policy"}, + }, + }, + }) +} + +func TestAccDLMLifecyclePolicy_defaultPolicyExclusions(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dlm_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_defaultPolicyExclusions(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "dlm", regexache.MustCompile(`policy/.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "tf-acc-basic"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrExecutionRoleARN), + resource.TestCheckResourceAttr(resourceName, names.AttrState, "ENABLED"), + resource.TestCheckResourceAttr(resourceName, "default_policy", "VOLUME"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.copy_tags", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.create_interval", "5"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.extend_deletion", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.resource_type", "VOLUME"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.retain_interval", "7"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.policy_language", "SIMPLIFIED"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.action.#", "0"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.event_source.#", "0"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.exclusions.0.exclude_boot_volumes", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.exclusions.0.exclude_tags.test", "exclude"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.exclusions.0.exclude_volume_types.0", "gp2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_policy"}, + }, + }, + }) +} + func TestAccDLMLifecyclePolicy_fastRestore(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dlm_lifecycle_policy.test" @@ -456,6 +657,42 @@ func TestAccDLMLifecyclePolicy_crossRegionCopyRule(t *testing.T) { }) } +func TestAccDLMLifecyclePolicy_crossRegionCopyRuleImageManagement(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dlm_lifecycle_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckMultipleRegion(t, 2) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DLMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_crossRegionCopyRuleImageManagement(rName), + Check: resource.ComposeTestCheckFunc( + checkLifecyclePolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.policy_type", "IMAGE_MANAGEMENT"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.cross_region_copy_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.cross_region_copy_rule.0.encrypted", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.cross_region_copy_rule.0.retain_rule.0.interval", "15"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.cross_region_copy_rule.0.retain_rule.0.interval_unit", "DAYS"), + resource.TestCheckResourceAttr(resourceName, "policy_details.0.schedule.0.cross_region_copy_rule.0.target_region", acctest.AlternateRegion()), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccDLMLifecyclePolicy_tags(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -641,6 +878,46 @@ resource "aws_dlm_lifecycle_policy" "test" { `) } +func testAccLifecyclePolicyConfig_defaultPolicy(rName string) string { + return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), ` +resource "aws_dlm_lifecycle_policy" "test" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.test.arn + default_policy = "VOLUME" + + policy_details { + create_interval = 5 + resource_type = "VOLUME" + policy_language = "SIMPLIFIED" + } +} +`) +} + +func testAccLifecyclePolicyConfig_defaultPolicyExclusions(rName string) string { + return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), ` +resource "aws_dlm_lifecycle_policy" "test" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.test.arn + default_policy = "VOLUME" + + policy_details { + create_interval = 5 + resource_type = "VOLUME" + policy_language = "SIMPLIFIED" + + exclusions { + exclude_boot_volumes = false + exclude_tags = { + test = "exclude" + } + exclude_volume_types = ["gp2"] + } + } +} +`) +} + func testAccLifecyclePolicyConfig_event(rName string) string { return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -780,6 +1057,214 @@ resource "aws_dlm_lifecycle_policy" "test" { `) } +func testAccLifecyclePolicyConfig_archiveRuleCount(rName string) string { + return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), ` +resource "aws_dlm_lifecycle_policy" "test" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.test.arn + + policy_details { + resource_types = ["VOLUME"] + + schedule { + name = "tf-acc-basic" + + create_rule { + cron_expression = "cron(5 14 3 * ? *)" + } + + archive_rule { + archive_retain_rule { + retention_archive_tier { + count = 10 + } + } + } + + retain_rule { + count = 10 + } + } + + target_tags = { + tf-acc-test = "basic" + } + } +} +`) +} + +func testAccLifecyclePolicyConfig_archiveRuleInterval(rName string) string { + return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), ` +resource "aws_dlm_lifecycle_policy" "test" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.test.arn + + policy_details { + resource_types = ["VOLUME"] + + schedule { + name = "tf-acc-basic" + + create_rule { + cron_expression = "cron(5 14 3 * ? *)" + } + + archive_rule { + archive_retain_rule { + retention_archive_tier { + interval = 6 + interval_unit = "MONTHS" + } + } + } + + retain_rule { + interval = 12 + interval_unit = "MONTHS" + } + } + + target_tags = { + tf-acc-test = "basic" + } + } +} +`) +} + +func testAccLifecyclePolicyConfig_scriptsAlias(rName string) string { + return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), ` +data "aws_iam_policy" "test" { + name = "AWSDataLifecycleManagerSSMFullAccess" +} + +resource "aws_iam_role_policy_attachment" "test" { + role = aws_iam_role.test.id + policy_arn = data.aws_iam_policy.test.arn +} + +resource "aws_dlm_lifecycle_policy" "test" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.test.arn + + policy_details { + resource_types = ["INSTANCE"] + + schedule { + name = "tf-acc-basic" + + create_rule { + interval = 12 + scripts { + execute_operation_on_script_failure = false + execution_handler = "AWS_VSS_BACKUP" + maximum_retry_count = 3 + } + } + + retain_rule { + count = 10 + } + } + + target_tags = { + tf-acc-test = "basic" + } + } +} +`) +} + +func testAccLifecyclePolicyConfig_scriptsSSMDocument(rName string) string { + return acctest.ConfigCompose(lifecyclePolicyBaseConfig(rName), ` +data "aws_iam_policy" "test" { + name = "AWSDataLifecycleManagerSSMFullAccess" +} + +resource "aws_iam_role_policy_attachment" "test" { + role = aws_iam_role.test.id + policy_arn = data.aws_iam_policy.test.arn +} + +resource "aws_ssm_document" "test" { + name = "tf-acc-basic" + document_type = "Command" + + tags = { + DLMScriptsAccess = "true" + } + + content = < 0 && v.([]any)[0] != nil { + settings = expandPostgreSQLSettings(v.([]any)[0].(map[string]any)) + } else { + settings = &awstypes.PostgreSQLSettings{} } + settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) if _, ok := d.GetOk("secrets_manager_arn"); ok { settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) - settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) } else { + if v, ok := d.GetOk(names.AttrPassword); ok { + settings.Password = aws.String(v.(string)) + } + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) - settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) - settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, &input) @@ -671,24 +777,30 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta an input.MongoDbSettings = settings case engineNameOracle: + var settings = &awstypes.OracleSettings{ + DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + } + + if v, ok := d.GetOk("oracle_settings"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + settings.AuthenticationMethod = expandOracleSettings(v.([]any)).AuthenticationMethod + } if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &awstypes.OracleSettings{ - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), - } + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { - input.OracleSettings = &awstypes.OracleSettings{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - Password: aws.String(d.Get(names.AttrPassword).(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + if v, ok := d.GetOk(names.AttrPassword); ok { + settings.Password = aws.String(v.(string)) } + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) + // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, &input) } + + input.OracleSettings = settings case engineNameRedis: input.RedisSettings = expandRedisSettings(d.Get("redis_settings").([]any)[0].(map[string]any)) case engineNameRedshift: @@ -700,8 +812,11 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta an settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { + if v, ok := d.GetOk(names.AttrPassword); ok { + settings.Password = aws.String(v.(string)) + } + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) - settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) @@ -795,8 +910,8 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta an expandTopLevelConnectionInfo(d, &input) } - _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutCreate), + func(ctx context.Context) (any, error) { return conn.CreateEndpoint(ctx, &input) }) @@ -853,6 +968,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an if d.HasChangesExcept("pause_replication_tasks") { input := dms.ModifyEndpointInput{ EndpointArn: aws.String(endpointARN), + EngineName: aws.String(d.Get("engine_name").(string)), } if d.HasChange(names.AttrCertificateARN) { @@ -863,10 +979,6 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an input.EndpointType = awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)) } - if d.HasChange("engine_name") { - input.EngineName = aws.String(d.Get("engine_name").(string)) - } - if d.HasChange("extra_connection_attributes") { input.ExtraConnectionAttributes = aws.String(d.Get("extra_connection_attributes").(string)) } @@ -884,50 +996,68 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an switch engineName := d.Get("engine_name").(string); engineName { case engineNameAurora, engineNameMariadb, engineNameMySQL: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, + "secrets_manager_access_role_arn", "secrets_manager_arn", "mysql_settings") { + var settings *awstypes.MySQLSettings + + if v, ok := d.GetOk("mysql_settings"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + settings = expandMySQLSettings(v.([]any)[0].(map[string]any)) + } else { + settings = &awstypes.MySQLSettings{} + } + if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MySQLSettings = &awstypes.MySQLSettings{ - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { - input.MySQLSettings = &awstypes.MySQLSettings{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - Password: aws.String(d.Get(names.AttrPassword).(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) + settings.Password = aws.String(d.Get(names.AttrPassword).(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) + + // DatabaseName can be empty since it should not be specified + // when mysql_settings.target_db_type is `multiple-databases` + if v, ok := d.GetOk(names.AttrDatabaseName); ok { + settings.DatabaseName = aws.String(v.(string)) } - input.EngineName = aws.String(engineName) // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) } + + input.MySQLSettings = settings } case engineNameAuroraPostgresql, engineNamePostgres: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrDatabaseName, "postgres_settings", + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, + "secrets_manager_access_role_arn", "secrets_manager_arn") { + var settings *awstypes.PostgreSQLSettings + + if v, ok := d.GetOk("postgres_settings"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + settings = expandPostgreSQLSettings(v.([]any)[0].(map[string]any)) + } else { + settings = &awstypes.PostgreSQLSettings{} + } + settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) + if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.PostgreSQLSettings = &awstypes.PostgreSQLSettings{ - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { - input.PostgreSQLSettings = &awstypes.PostgreSQLSettings{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - Password: aws.String(d.Get(names.AttrPassword).(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + if v, ok := d.GetOk(names.AttrPassword); ok { + settings.Password = aws.String(v.(string)) } - input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') + + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) } + + input.PostgreSQLSettings = settings } case engineNameDynamoDB: if d.HasChange("service_access_role") { @@ -937,10 +1067,10 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an } case engineNameElasticsearch, engineNameOpenSearch: if d.HasChanges( + "elasticsearch_settings.0.service_access_role_arn", "elasticsearch_settings.0.endpoint_uri", "elasticsearch_settings.0.error_retry_duration", "elasticsearch_settings.0.full_load_error_percentage", - "elasticsearch_settings.0.service_access_role_arn", "elasticsearch_settings.0.use_new_mapping_type") { input.ElasticsearchSettings = &awstypes.ElasticsearchSettings{ ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), @@ -949,142 +1079,135 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an FullLoadErrorPercentage: aws.Int32(int32(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } - input.EngineName = aws.String(engineName) } case engineNameKafka: if d.HasChange("kafka_settings") { input.KafkaSettings = expandKafkaSettings(d.Get("kafka_settings").([]any)[0].(map[string]any)) - input.EngineName = aws.String(engineName) } case engineNameKinesis: if d.HasChanges("kinesis_settings") { input.KinesisSettings = expandKinesisSettings(d.Get("kinesis_settings").([]any)[0].(map[string]any)) - input.EngineName = aws.String(engineName) } case engineNameMongodb: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "mongodb_settings.0.auth_type", - "mongodb_settings.0.auth_mechanism", "mongodb_settings.0.nesting_level", "mongodb_settings.0.extract_doc_id", - "mongodb_settings.0.docs_to_investigate", "mongodb_settings.0.auth_source", "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, + names.AttrDatabaseName, names.AttrKMSKeyARN, "mongodb_settings.0.auth_type", "mongodb_settings.0.auth_mechanism", "mongodb_settings.0.nesting_level", "mongodb_settings.0.extract_doc_id", "mongodb_settings.0.docs_to_investigate", "mongodb_settings.0.auth_source", + "secrets_manager_access_role_arn", "secrets_manager_arn") { + var settings = &awstypes.MongoDbSettings{} + if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MongoDbSettings = &awstypes.MongoDbSettings{ - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), - KmsKeyId: aws.String(d.Get(names.AttrKMSKeyARN).(string)), - - AuthType: awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)), - ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), - DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), - AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), - } + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { - input.MongoDbSettings = &awstypes.MongoDbSettings{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - Password: aws.String(d.Get(names.AttrPassword).(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), - KmsKeyId: aws.String(d.Get(names.AttrKMSKeyARN).(string)), - - AuthType: awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)), - ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), - DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), - AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), - } - input.EngineName = aws.String(engineName) + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) + settings.Password = aws.String(d.Get(names.AttrPassword).(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) } + + settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) + settings.KmsKeyId = aws.String(d.Get(names.AttrKMSKeyARN).(string)) + settings.AuthType = awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)) + settings.AuthMechanism = awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)) + settings.NestingLevel = awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)) + settings.ExtractDocId = aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)) + settings.DocsToInvestigate = aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)) + settings.AuthSource = aws.String(d.Get("mongodb_settings.0.auth_source").(string)) + + input.MongoDbSettings = settings } case engineNameOracle: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrDatabaseName, "oracle_settings", + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, + "secrets_manager_access_role_arn", "secrets_manager_arn") { + var settings = &awstypes.OracleSettings{ + DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + } + + if v, ok := d.GetOk("oracle_settings"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + settings.AuthenticationMethod = expandOracleSettings(v.([]any)).AuthenticationMethod + } if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &awstypes.OracleSettings{ - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { - input.OracleSettings = &awstypes.OracleSettings{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - Password: aws.String(d.Get(names.AttrPassword).(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + if v, ok := d.GetOk(names.AttrPassword); ok { + settings.Password = aws.String(v.(string)) } - input.EngineName = aws.String(engineName) // Must be included (should be 'oracle') + + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) } + + input.OracleSettings = settings } case engineNameRedis: if d.HasChanges("redis_settings") { input.RedisSettings = expandRedisSettings(d.Get("redis_settings").([]any)[0].(map[string]any)) - input.EngineName = aws.String(engineName) } case engineNameRedshift: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, - "redshift_settings", "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrDatabaseName, "redshift_settings", + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, + "secrets_manager_access_role_arn", "secrets_manager_arn") { + var settings = &awstypes.RedshiftSettings{ + DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + } + if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.RedshiftSettings = &awstypes.RedshiftSettings{ - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) } else { - input.RedshiftSettings = &awstypes.RedshiftSettings{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - Password: aws.String(d.Get(names.AttrPassword).(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), - DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), + if v, ok := d.GetOk(names.AttrPassword); ok { + settings.Password = aws.String(v.(string)) } - input.EngineName = aws.String(engineName) // Must be included (should be 'redshift') + + settings.Username = aws.String(d.Get(names.AttrUsername).(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) + } - if v, ok := d.GetOk("redshift_settings"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { - tfMap := v.([]any)[0].(map[string]any) + if v, ok := d.GetOk("redshift_settings"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + tfMap := v.([]any)[0].(map[string]any) - if v, ok := tfMap["bucket_folder"].(string); ok && v != "" { - input.RedshiftSettings.BucketFolder = aws.String(v) - } + if v, ok := tfMap["bucket_folder"].(string); ok && v != "" { + settings.BucketFolder = aws.String(v) + } - if v, ok := tfMap[names.AttrBucketName].(string); ok && v != "" { - input.RedshiftSettings.BucketName = aws.String(v) - } + if v, ok := tfMap[names.AttrBucketName].(string); ok && v != "" { + settings.BucketName = aws.String(v) + } - if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { - input.RedshiftSettings.EncryptionMode = awstypes.EncryptionModeValue(v) - } + if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { + settings.EncryptionMode = awstypes.EncryptionModeValue(v) + } - if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { - input.RedshiftSettings.ServerSideEncryptionKmsKeyId = aws.String(v) - } + if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { + settings.ServerSideEncryptionKmsKeyId = aws.String(v) + } - if v, ok := tfMap["service_access_role_arn"].(string); ok && v != "" { - input.RedshiftSettings.ServiceAccessRoleArn = aws.String(v) - } + if v, ok := tfMap["service_access_role_arn"].(string); ok && v != "" { + settings.ServiceAccessRoleArn = aws.String(v) } + + input.RedshiftSettings = settings } } case engineNameSQLServer, engineNameBabelfish: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, + "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), @@ -1099,7 +1222,6 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } - input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) @@ -1107,8 +1229,8 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an } case engineNameSybase: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, + "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { input.SybaseSettings = &awstypes.SybaseSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), @@ -1123,7 +1245,6 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } - input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) @@ -1131,8 +1252,8 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an } case engineNameDB2, engineNameDB2zOS: if d.HasChanges( - names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", - "secrets_manager_arn") { + names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, + "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { input.IBMDb2Settings = &awstypes.IBMDb2Settings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), @@ -1147,7 +1268,6 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta an Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } - input.EngineName = aws.String(engineName) // Must be included (should be 'db2') // Update connection info in top-level namespace as well expandTopLevelConnectionInfoModify(d, &input) @@ -1306,6 +1426,9 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *awstypes.Endpoin } else { flattenTopLevelConnectionInfo(d, endpoint) } + if err := d.Set("mysql_settings", flattenMySQLSettings(endpoint.MySQLSettings)); err != nil { + return fmt.Errorf("setting mysql_settings: %w", err) + } case engineNameAuroraPostgresql, engineNamePostgres: if endpoint.PostgreSQLSettings != nil { d.Set(names.AttrUsername, endpoint.PostgreSQLSettings.Username) @@ -1371,6 +1494,9 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *awstypes.Endpoin } else { flattenTopLevelConnectionInfo(d, endpoint) } + if err := d.Set("oracle_settings", flattenOracleSettings(endpoint.OracleSettings)); err != nil { + return fmt.Errorf("setting oracle_settings: %w", err) + } case engineNameRedis: // Auth password isn't returned in API. Propagate state value. tfMap := flattenRedisSettings(endpoint.RedisSettings) @@ -1912,6 +2038,47 @@ func flattenRedshiftSettings(settings *awstypes.RedshiftSettings) []map[string]a return []map[string]any{m} } +func expandMySQLSettings(tfMap map[string]any) *awstypes.MySQLSettings { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.MySQLSettings{} + + if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { + apiObject.AfterConnectScript = aws.String(v) + } + if v, ok := tfMap["authentication_method"].(string); ok && v != "" { + apiObject.AuthenticationMethod = awstypes.MySQLAuthenticationMethod(v) + } + if v, ok := tfMap["clean_source_metadata_on_mismatch"].(bool); ok { + apiObject.CleanSourceMetadataOnMismatch = aws.Bool(v) + } + if v, ok := tfMap["events_poll_interval"].(int); ok && v != 0 { + apiObject.EventsPollInterval = aws.Int32(int32(v)) + } + if v, ok := tfMap["execute_timeout"].(int); ok && v != 0 { + apiObject.ExecuteTimeout = aws.Int32(int32(v)) + } + if v, ok := tfMap["max_file_size"].(int); ok && v != 0 { + apiObject.MaxFileSize = aws.Int32(int32(v)) + } + if v, ok := tfMap["parallel_load_threads"].(int); ok && v != 0 { + apiObject.ParallelLoadThreads = aws.Int32(int32(v)) + } + if v, ok := tfMap["server_timezone"].(string); ok && v != "" { + apiObject.ServerTimezone = aws.String(v) + } + if v, ok := tfMap["service_access_role_arn"].(string); ok && v != "" { + apiObject.ServiceAccessRoleArn = aws.String(v) + } + if v, ok := tfMap["target_db_type"].(string); ok && v != "" { + apiObject.TargetDbType = awstypes.TargetDbType(v) + } + + return apiObject +} + func expandPostgreSQLSettings(tfMap map[string]any) *awstypes.PostgreSQLSettings { if tfMap == nil { return nil @@ -1922,6 +2089,9 @@ func expandPostgreSQLSettings(tfMap map[string]any) *awstypes.PostgreSQLSettings if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { apiObject.AfterConnectScript = aws.String(v) } + if v, ok := tfMap["authentication_method"].(string); ok && v != "" { + apiObject.AuthenticationMethod = awstypes.PostgreSQLAuthenticationMethod(v) + } if v, ok := tfMap["babelfish_database_name"].(string); ok && v != "" { apiObject.BabelfishDatabaseName = aws.String(v) } @@ -1964,6 +2134,9 @@ func expandPostgreSQLSettings(tfMap map[string]any) *awstypes.PostgreSQLSettings if v, ok := tfMap["plugin_name"].(string); ok && v != "" { apiObject.PluginName = awstypes.PluginNameValue(v) } + if v, ok := tfMap["service_access_role_arn"].(string); ok && v != "" { + apiObject.ServiceAccessRoleArn = aws.String(v) + } if v, ok := tfMap["slot_name"].(string); ok && v != "" { apiObject.SlotName = aws.String(v) } @@ -1971,6 +2144,47 @@ func expandPostgreSQLSettings(tfMap map[string]any) *awstypes.PostgreSQLSettings return apiObject } +func flattenMySQLSettings(apiObject *awstypes.MySQLSettings) []map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.AfterConnectScript; v != nil { + tfMap["after_connect_script"] = aws.ToString(v) + } + if v := apiObject.AuthenticationMethod; v != "" { + tfMap["authentication_method"] = string(v) + } + if v := apiObject.CleanSourceMetadataOnMismatch; v != nil { + tfMap["clean_source_metadata_on_mismatch"] = aws.ToBool(v) + } + if v := apiObject.EventsPollInterval; v != nil { + tfMap["events_poll_interval"] = aws.ToInt32(v) + } + if v := apiObject.ExecuteTimeout; v != nil { + tfMap["execute_timeout"] = aws.ToInt32(v) + } + if v := apiObject.MaxFileSize; v != nil { + tfMap["max_file_size"] = aws.ToInt32(v) + } + if v := apiObject.ParallelLoadThreads; v != nil { + tfMap["parallel_load_threads"] = aws.ToInt32(v) + } + if v := apiObject.ServerTimezone; v != nil { + tfMap["server_timezone"] = aws.ToString(v) + } + if v := apiObject.ServiceAccessRoleArn; v != nil { + tfMap["service_access_role_arn"] = aws.ToString(v) + } + if v := apiObject.TargetDbType; v != "" { + tfMap["target_db_type"] = string(v) + } + + return []map[string]any{tfMap} +} + func flattenPostgreSQLSettings(apiObject *awstypes.PostgreSQLSettings) []map[string]any { if apiObject == nil { return nil @@ -1981,13 +2195,14 @@ func flattenPostgreSQLSettings(apiObject *awstypes.PostgreSQLSettings) []map[str if v := apiObject.AfterConnectScript; v != nil { tfMap["after_connect_script"] = aws.ToString(v) } + tfMap["authentication_method"] = apiObject.AuthenticationMethod if v := apiObject.BabelfishDatabaseName; v != nil { tfMap["babelfish_database_name"] = aws.ToString(v) } if v := apiObject.CaptureDdls; v != nil { tfMap["capture_ddls"] = aws.ToBool(v) } - tfMap["database_mode"] = string(apiObject.DatabaseMode) + tfMap["database_mode"] = apiObject.DatabaseMode if v := apiObject.DdlArtifactsSchema; v != nil { tfMap["ddl_artifacts_schema"] = aws.ToString(v) } @@ -2012,11 +2227,14 @@ func flattenPostgreSQLSettings(apiObject *awstypes.PostgreSQLSettings) []map[str if v := apiObject.MapJsonbAsClob; v != nil { tfMap["map_jsonb_as_clob"] = aws.ToBool(v) } - tfMap["map_long_varchar_as"] = string(apiObject.MapLongVarcharAs) + tfMap["map_long_varchar_as"] = apiObject.MapLongVarcharAs if v := apiObject.MaxFileSize; v != nil { tfMap["max_file_size"] = aws.ToInt32(v) } - tfMap["plugin_name"] = string(apiObject.PluginName) + tfMap["plugin_name"] = apiObject.PluginName + if v := apiObject.ServiceAccessRoleArn; v != nil { + tfMap["service_access_role_arn"] = aws.ToString(v) + } if v := apiObject.SlotName; v != nil { tfMap["slot_name"] = aws.ToString(v) } @@ -2106,24 +2324,28 @@ func engineSettingsToSet(l []any) *schema.Set { func expandTopLevelConnectionInfo(d *schema.ResourceData, input *dms.CreateEndpointInput) { input.Username = aws.String(d.Get(names.AttrUsername).(string)) - input.Password = aws.String(d.Get(names.AttrPassword).(string)) input.ServerName = aws.String(d.Get("server_name").(string)) input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) if v, ok := d.GetOk(names.AttrDatabaseName); ok { input.DatabaseName = aws.String(v.(string)) } + if v, ok := d.GetOk(names.AttrPassword); ok { + input.Password = aws.String(v.(string)) + } } func expandTopLevelConnectionInfoModify(d *schema.ResourceData, input *dms.ModifyEndpointInput) { input.Username = aws.String(d.Get(names.AttrUsername).(string)) - input.Password = aws.String(d.Get(names.AttrPassword).(string)) input.ServerName = aws.String(d.Get("server_name").(string)) input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) if v, ok := d.GetOk(names.AttrDatabaseName); ok { input.DatabaseName = aws.String(v.(string)) } + if v, ok := d.GetOk(names.AttrPassword); ok { + input.Password = aws.String(v.(string)) + } } func flattenTopLevelConnectionInfo(d *schema.ResourceData, endpoint *awstypes.Endpoint) { @@ -2133,6 +2355,40 @@ func flattenTopLevelConnectionInfo(d *schema.ResourceData, endpoint *awstypes.En d.Set(names.AttrDatabaseName, endpoint.DatabaseName) } +func expandOracleSettings(tfList []any) *awstypes.OracleSettings { + if len(tfList) == 0 { + return nil + } + + var apiObject awstypes.OracleSettings + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + + if !ok { + continue + } + + if v, ok := tfMap["authentication_method"].(string); ok && v != "" { + apiObject.AuthenticationMethod = awstypes.OracleAuthenticationMethod(v) + } + } + + return &apiObject +} + +func flattenOracleSettings(oracleSettings *awstypes.OracleSettings) []any { + if oracleSettings == nil { + return nil + } + + tfMap := map[string]any{ + "authentication_method": oracleSettings.AuthenticationMethod, + } + + return []any{tfMap} +} + func findEndpointByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.Endpoint, error) { input := &dms.DescribeEndpointsInput{ Filters: []awstypes.Filter{ @@ -2165,8 +2421,7 @@ func findEndpoints(ctx context.Context, conn *dms.Client, input *dms.DescribeEnd if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -2212,8 +2467,7 @@ func findConnections(ctx context.Context, conn *dms.Client, input *dms.DescribeC if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -2227,8 +2481,8 @@ func findConnections(ctx context.Context, conn *dms.Client, input *dms.DescribeC return output, nil } -func statusEndpoint(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusEndpoint(conn *dms.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findEndpointByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -2243,8 +2497,8 @@ func statusEndpoint(ctx context.Context, conn *dms.Client, id string) retry.Stat } } -func statusConnection(ctx context.Context, conn *dms.Client, endpointARN string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusConnection(conn *dms.Client, endpointARN string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findConnectionByEndpointARN(ctx, conn, endpointARN) if tfresource.NotFound(err) { @@ -2263,7 +2517,7 @@ func waitEndpointDeleted(ctx context.Context, conn *dms.Client, id string, timeo stateConf := &retry.StateChangeConf{ Pending: []string{endpointStatusDeleting}, Target: []string{}, - Refresh: statusEndpoint(ctx, conn, id), + Refresh: statusEndpoint(conn, id), Timeout: timeout, } @@ -2280,7 +2534,7 @@ func waitConnectionSucceeded(ctx context.Context, conn *dms.Client, endpointARN stateConf := &retry.StateChangeConf{ Pending: []string{connectionStatusTesting}, Target: []string{connectionStatusSuccessful}, - Refresh: statusConnection(ctx, conn, endpointARN), + Refresh: statusConnection(conn, endpointARN), Timeout: timeout, Delay: 5 * time.Second, } @@ -2288,6 +2542,7 @@ func waitConnectionSucceeded(ctx context.Context, conn *dms.Client, endpointARN outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*awstypes.Connection); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.LastFailureMessage))) return output, err } diff --git a/internal/service/dms/endpoint_data_source.go b/internal/service/dms/endpoint_data_source.go index e41c6f529743..099bb63e8fbe 100644 --- a/internal/service/dms/endpoint_data_source.go +++ b/internal/service/dms/endpoint_data_source.go @@ -245,6 +245,54 @@ func dataSourceEndpoint() *schema.Resource { }, }, }, + "mysql_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "after_connect_script": { + Type: schema.TypeString, + Computed: true, + }, + "authentication_method": { + Type: schema.TypeString, + Computed: true, + }, + "clean_source_metadata_on_mismatch": { + Type: schema.TypeBool, + Computed: true, + }, + "events_poll_interval": { + Type: schema.TypeInt, + Computed: true, + }, + "execute_timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "max_file_size": { + Type: schema.TypeInt, + Computed: true, + }, + "parallel_load_threads": { + Type: schema.TypeInt, + Computed: true, + }, + "server_timezone": { + Type: schema.TypeString, + Computed: true, + }, + "service_access_role_arn": { + Type: schema.TypeString, + Computed: true, + }, + "target_db_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, names.AttrPassword: { Type: schema.TypeString, Computed: true, @@ -262,6 +310,10 @@ func dataSourceEndpoint() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "authentication_method": { + Type: schema.TypeString, + Computed: true, + }, "babelfish_database_name": { Type: schema.TypeString, Computed: true, @@ -318,6 +370,10 @@ func dataSourceEndpoint() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "service_access_role_arn": { + Type: schema.TypeString, + Computed: true, + }, "slot_name": { Type: schema.TypeString, Computed: true, @@ -635,6 +691,9 @@ func resourceEndpointDataSourceSetState(d *schema.ResourceData, endpoint *awstyp } else { flattenTopLevelConnectionInfo(d, endpoint) } + if err := d.Set("mysql_settings", flattenMySQLSettings(endpoint.MySQLSettings)); err != nil { + return fmt.Errorf("setting mysql_settings: %w", err) + } case engineNameAuroraPostgresql, engineNamePostgres: if endpoint.PostgreSQLSettings != nil { d.Set(names.AttrUsername, endpoint.PostgreSQLSettings.Username) @@ -757,7 +816,7 @@ func resourceEndpointDataSourceSetState(d *schema.ResourceData, endpoint *awstyp } case engineNameS3: if err := d.Set("s3_settings", flattenS3Settings(endpoint.S3Settings)); err != nil { - return fmt.Errorf("setting s3_settings for DMS: %s", err) + return fmt.Errorf("setting s3_settings for DMS: %w", err) } default: d.Set(names.AttrDatabaseName, endpoint.DatabaseName) diff --git a/internal/service/dms/endpoint_data_source_tags_gen_test.go b/internal/service/dms/endpoint_data_source_tags_gen_test.go index bfed11bfc6e2..05f89e60f6f8 100644 --- a/internal/service/dms/endpoint_data_source_tags_gen_test.go +++ b/internal/service/dms/endpoint_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccDMSEndpointDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccDMSEndpointDataSource_tags(t *testing.T) { func TestAccDMSEndpointDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccDMSEndpointDataSource_tags_NullMap(t *testing.T) { func TestAccDMSEndpointDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccDMSEndpointDataSource_tags_EmptyMap(t *testing.T) { func TestAccDMSEndpointDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccDMSEndpointDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccDMSEndpointDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccDMSEndpointDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing. func TestAccDMSEndpointDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/dms/endpoint_tags_gen_test.go b/internal/service/dms/endpoint_tags_gen_test.go index 21f428a62b61..1a333cf53f09 100644 --- a/internal/service/dms/endpoint_tags_gen_test.go +++ b/internal/service/dms/endpoint_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccDMSEndpoint_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -211,10 +211,11 @@ func TestAccDMSEndpoint_tags(t *testing.T) { func TestAccDMSEndpoint_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -280,10 +281,11 @@ func TestAccDMSEndpoint_tags_null(t *testing.T) { func TestAccDMSEndpoint_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -345,10 +347,11 @@ func TestAccDMSEndpoint_tags_EmptyMap(t *testing.T) { func TestAccDMSEndpoint_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -428,10 +431,11 @@ func TestAccDMSEndpoint_tags_AddOnUpdate(t *testing.T) { func TestAccDMSEndpoint_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -522,10 +526,11 @@ func TestAccDMSEndpoint_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccDMSEndpoint_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -664,10 +669,11 @@ func TestAccDMSEndpoint_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccDMSEndpoint_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -755,10 +761,11 @@ func TestAccDMSEndpoint_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -947,10 +954,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1115,10 +1123,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1299,10 +1308,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_overlapping(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1391,10 +1401,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1482,10 +1493,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1549,10 +1561,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1608,10 +1621,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccDMSEndpoint_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1672,10 +1686,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T func TestAccDMSEndpoint_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1736,10 +1751,11 @@ func TestAccDMSEndpoint_tags_DefaultTags_nullNonOverlappingResourceTag(t *testin func TestAccDMSEndpoint_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1793,10 +1809,11 @@ func TestAccDMSEndpoint_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccDMSEndpoint_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1892,10 +1909,11 @@ func TestAccDMSEndpoint_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccDMSEndpoint_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -1981,10 +1999,11 @@ func TestAccDMSEndpoint_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccDMSEndpoint_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), @@ -2142,10 +2161,11 @@ func TestAccDMSEndpoint_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccDMSEndpoint_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckEndpointDestroy(ctx), diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 8af00a74f91c..3a8b580520b6 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -948,6 +948,34 @@ func TestAccDMSEndpoint_Oracle_secretID(t *testing.T) { }) } +func TestAccDMSEndpoint_Oracle_kerberos(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_kerberos(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "endpoint_arn"), + resource.TestCheckResourceAttr(resourceName, "oracle_settings.0.authentication_method", "kerberos"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccDMSEndpoint_Oracle_update(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dms_endpoint.test" @@ -1116,6 +1144,130 @@ func TestAccDMSEndpoint_PostgreSQL_kmsKey(t *testing.T) { }) } +func TestAccDMSEndpoint_MySQL_settings_source(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_mySQLSourceSettings(rName, true, 5), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.after_connect_script", "SELECT NOW();"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.authentication_method", "iam"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.clean_source_metadata_on_mismatch", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.events_poll_interval", "5"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.execute_timeout", "100"), + resource.TestCheckResourceAttrPair(resourceName, "mysql_settings.0.service_access_role_arn", "aws_iam_role.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrPassword}, + }, + { + // Change events_poll_interval from 5 to 10 + Config: testAccEndpointConfig_mySQLSourceSettings(rName, true, 10), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.after_connect_script", "SELECT NOW();"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.authentication_method", "iam"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.clean_source_metadata_on_mismatch", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.events_poll_interval", "10"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.execute_timeout", "100"), + resource.TestCheckResourceAttrPair(resourceName, "mysql_settings.0.service_access_role_arn", "aws_iam_role.test", names.AttrARN), + ), + }, + { + // Remove mysql_settings block (inherited the previous values) + Config: testAccEndpointConfig_mySQLSourceSettings(rName, false, 10), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.after_connect_script", "SELECT NOW();"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.authentication_method", "iam"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.clean_source_metadata_on_mismatch", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.events_poll_interval", "10"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.execute_timeout", "100"), + resource.TestCheckResourceAttrPair(resourceName, "mysql_settings.0.service_access_role_arn", "aws_iam_role.test", names.AttrARN), + ), + }, + }, + }) +} + +func TestAccDMSEndpoint_MySQL_settings_target(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_mySQLTargetSettings(rName, true, 100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.after_connect_script", "SELECT NOW();"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.authentication_method", string(awstypes.MySQLAuthenticationMethodPassword)), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.execute_timeout", "100"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.max_file_size", "1024"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.parallel_load_threads", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.target_db_type", string(awstypes.TargetDbTypeMultipleDatabases)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrPassword}, + }, + { + // Change execute_timeout from 100 to 60 + Config: testAccEndpointConfig_mySQLTargetSettings(rName, true, 60), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.after_connect_script", "SELECT NOW();"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.authentication_method", string(awstypes.MySQLAuthenticationMethodPassword)), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.execute_timeout", "60"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.max_file_size", "1024"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.parallel_load_threads", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.target_db_type", string(awstypes.TargetDbTypeMultipleDatabases)), + ), + }, + { + // Remove mysql_settings block (inherited the previous values) + Config: testAccEndpointConfig_mySQLTargetSettings(rName, false, 60), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.after_connect_script", "SELECT NOW();"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.authentication_method", string(awstypes.MySQLAuthenticationMethodPassword)), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.execute_timeout", "60"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.max_file_size", "1024"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.parallel_load_threads", "1"), + resource.TestCheckResourceAttr(resourceName, "mysql_settings.0.target_db_type", string(awstypes.TargetDbTypeMultipleDatabases)), + ), + }, + }, + }) +} + func TestAccDMSEndpoint_PostgreSQL_settings_source(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dms_endpoint.test" @@ -1133,6 +1285,7 @@ func TestAccDMSEndpoint_PostgreSQL_settings_source(t *testing.T) { testAccCheckEndpointExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "postgres_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.after_connect_script", "SET search_path TO pg_catalog,public;"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.authentication_method", "iam"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.capture_ddls", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.ddl_artifacts_schema", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.execute_timeout", "100"), @@ -1145,6 +1298,7 @@ func TestAccDMSEndpoint_PostgreSQL_settings_source(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.map_long_varchar_as", "wstring"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.max_file_size", "1024"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.plugin_name", "pglogical"), + resource.TestCheckResourceAttrSet(resourceName, "postgres_settings.0.service_access_role_arn"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.slot_name", "test"), ), }, @@ -1169,6 +1323,7 @@ func TestAccDMSEndpoint_PostgreSQL_settings_target(t *testing.T) { testAccCheckEndpointExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "postgres_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.after_connect_script", "SET search_path TO pg_catalog,public;"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.authentication_method", names.AttrPassword), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.babelfish_database_name", "babelfish"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.database_mode", "babelfish"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.execute_timeout", "100"), @@ -1179,6 +1334,67 @@ func TestAccDMSEndpoint_PostgreSQL_settings_target(t *testing.T) { }) } +func TestAccDMSEndpoint_PostgreSQL_settings_update(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + // Create with heartbeat disabled + Config: testAccEndpointConfig_postgreSQLHeartbeat(rName, false, ""), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "engine_name", "postgres"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_enable", acctest.CtFalse), + ), + }, + { + // Update only nested postgres_settings: enable heartbeat + set schema + Config: testAccEndpointConfig_postgreSQLHeartbeat(rName, true, "dms_heartbeat"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_enable", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_schema", "dms_heartbeat"), + ), + }, + }, + }) +} + +func testAccEndpointConfig_postgreSQLHeartbeat(id string, heartbeat bool, schema string) string { + schemaLine := "" + if schema != "" { + schemaLine = fmt.Sprintf(`heartbeat_schema = %q`, schema) + } + + // DMS ModifyEndpoint accepts metadata changes without validating connectivity, + // so placeholder connection values are sufficient for the test + return fmt.Sprintf(` +resource "aws_dms_endpoint" "test" { + endpoint_id = %q + endpoint_type = "source" + engine_name = "postgres" + + username = "user" + password = "pass" + server_name = "example.com" + database_name = "postgres" + port = 5432 + + postgres_settings { + heartbeat_enable = %t + %s + } +} +`, id, heartbeat, schemaLine) +} + func TestAccDMSEndpoint_SQLServer_basic(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dms_endpoint.test" @@ -3024,6 +3240,33 @@ resource "aws_dms_endpoint" "test" { `, rName)) } +func testAccEndpointConfig_kerberos(rName string) string { + return fmt.Sprintf(` +resource "aws_dms_endpoint" "test" { + endpoint_id = %[1]q + endpoint_type = "source" + engine_name = "oracle" + + server_name = "tftest" + port = 27017 + username = "tftest" + database_name = "tftest" + ssl_mode = "none" + extra_connection_attributes = "" + + oracle_settings { + authentication_method = "kerberos" + } + + tags = { + Name = %[1]q + Update = "to-update" + Remove = "to-remove" + } +} +`, rName) +} + func testAccEndpointConfig_postgreSQL(rName string) string { return fmt.Sprintf(` resource "aws_dms_endpoint" "test" { @@ -3092,8 +3335,136 @@ resource "aws_dms_endpoint" "test" { `, rName) } +func testAccEndpointConfig_mySQLSourceSettingsBlock(eventsPollInterval int) string { + return fmt.Sprintf(` + mysql_settings { + after_connect_script = "SELECT NOW();" + authentication_method = "iam" + clean_source_metadata_on_mismatch = true + events_poll_interval = %[1]d + execute_timeout = 100 + service_access_role_arn = aws_iam_role.test.arn + server_timezone = "UTC" + } +`, eventsPollInterval) +} + +func testAccEndpointConfig_certificateBase(rName string) string { + return fmt.Sprintf(` +resource "aws_dms_certificate" "dms_certificate" { + certificate_id = %[1]q + certificate_pem = "-----BEGIN CERTIFICATE-----\nMIID2jCCAsKgAwIBAgIJAJ58TJVjU7G1MA0GCSqGSIb3DQEBBQUAMFExCzAJBgNV\nBAYTAlVTMREwDwYDVQQIEwhDb2xvcmFkbzEPMA0GA1UEBxMGRGVudmVyMRAwDgYD\nVQQKEwdDaGFydGVyMQwwCgYDVQQLEwNDU0UwHhcNMTcwMTMwMTkyMDA4WhcNMjYx\nMjA5MTkyMDA4WjBRMQswCQYDVQQGEwJVUzERMA8GA1UECBMIQ29sb3JhZG8xDzAN\nBgNVBAcTBkRlbnZlcjEQMA4GA1UEChMHQ2hhcnRlcjEMMAoGA1UECxMDQ1NFMIIB\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv6dq6VLIImlAaTrckb5w3X6J\nWP7EGz2ChGAXlkEYto6dPCba0v5+f+8UlMOpeB25XGoai7gdItqNWVFpYsgmndx3\nvTad3ukO1zeElKtw5oHPH2plOaiv/gVJaDa9NTeINj0EtGZs74fCOclAzGFX5vBc\nb08ESWBceRgGjGv3nlij4JzHfqTkCKQz6P6pBivQBfk62rcOkkH5rKoaGltRHROS\nMbkwOhu2hN0KmSYTXRvts0LXnZU4N0l2ms39gmr7UNNNlKYINL2JoTs9dNBc7APD\ndZvlEHd+/FjcLCI8hC3t4g4AbfW0okIBCNG0+oVjqGb2DeONSJKsThahXt89MQID\nAQABo4G0MIGxMB0GA1UdDgQWBBQKq8JxjY1GmeZXJjfOMfW0kBIzPDCBgQYDVR0j\nBHoweIAUCqvCcY2NRpnmVyY3zjH1tJASMzyhVaRTMFExCzAJBgNVBAYTAlVTMREw\nDwYDVQQIEwhDb2xvcmFkbzEPMA0GA1UEBxMGRGVudmVyMRAwDgYDVQQKEwdDaGFy\ndGVyMQwwCgYDVQQLEwNDU0WCCQCefEyVY1OxtTAMBgNVHRMEBTADAQH/MA0GCSqG\nSIb3DQEBBQUAA4IBAQAWifoMk5kbv+yuWXvFwHiB4dWUUmMlUlPU/E300yVTRl58\np6DfOgJs7MMftd1KeWqTO+uW134QlTt7+jwI8Jq0uyKCu/O2kJhVtH/Ryog14tGl\n+wLcuIPLbwJI9CwZX4WMBrq4DnYss+6F47i8NCc+Z3MAiG4vtq9ytBmaod0dj2bI\ng4/Lac0e00dql9RnqENh1+dF0V+QgTJCoPkMqDNAlSB8vOodBW81UAb2z12t+IFi\n3X9J3WtCK2+T5brXL6itzewWJ2ALvX3QpmZx7fMHJ3tE+SjjyivE1BbOlzYHx83t\nTeYnm7pS9un7A/UzTDHbs7hPUezLek+H3xTPAnnq\n-----END CERTIFICATE-----\n" +} +`, rName) +} + +func testAccEndpointConfig_mySQLSourceSettings(rName string, outputMySQLSettings bool, eventsPollInterval int) string { + mysqlSettings := "" + if outputMySQLSettings { + mysqlSettings = testAccEndpointConfig_mySQLSourceSettingsBlock(eventsPollInterval) + } + return acctest.ConfigCompose(testAccEndpointConfig_certificateBase(rName), fmt.Sprintf(` +data "aws_region" "current" {} +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = < 0 && v.([]any)[0] != nil { + input.KerberosAuthenticationSettings = expandKerberosAuthenticationSettings(v.([]any)) + } if v, ok := d.GetOk(names.AttrKMSKeyARN); ok { input.KmsKeyId = aws.String(v.(string)) } @@ -228,7 +261,11 @@ func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData d.Set(names.AttrAllocatedStorage, instance.AllocatedStorage) d.Set(names.AttrAutoMinorVersionUpgrade, instance.AutoMinorVersionUpgrade) d.Set(names.AttrAvailabilityZone, instance.AvailabilityZone) + d.Set("dns_name_servers", instance.DnsNameServers) d.Set(names.AttrEngineVersion, instance.EngineVersion) + if err := d.Set("kerberos_authentication_settings", flattenKerberosAuthenticationSettings(instance.KerberosAuthenticationSettings)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting kerberos_authentication_settings: %s", err) + } d.Set(names.AttrKMSKeyARN, instance.KmsKeyId) d.Set("multi_az", instance.MultiAZ) d.Set("network_type", instance.NetworkType) @@ -273,6 +310,10 @@ func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceDa input.EngineVersion = aws.String(d.Get(names.AttrEngineVersion).(string)) } + if d.HasChange("kerberos_authentication_settings") { + input.KerberosAuthenticationSettings = expandKerberosAuthenticationSettings(d.Get("kerberos_authentication_settings").([]any)) + } + if d.HasChange("multi_az") { input.MultiAZ = aws.Bool(d.Get("multi_az").(bool)) } @@ -332,6 +373,48 @@ func resourceReplicationInstanceDelete(ctx context.Context, d *schema.ResourceDa return diags } +func expandKerberosAuthenticationSettings(tfList []any) *awstypes.KerberosAuthenticationSettings { + if len(tfList) == 0 { + return nil + } + + var apiObject awstypes.KerberosAuthenticationSettings + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + + if !ok { + continue + } + + if v, ok := tfMap["key_cache_secret_iam_arn"].(string); ok && v != "" { + apiObject.KeyCacheSecretIamArn = aws.String(v) + } + if v, ok := tfMap["key_cache_secret_id"].(string); ok && v != "" { + apiObject.KeyCacheSecretId = aws.String(v) + } + if v, ok := tfMap["krb5_file_contents"].(string); ok && v != "" { + apiObject.Krb5FileContents = aws.String(v) + } + } + + return &apiObject +} + +func flattenKerberosAuthenticationSettings(apiObject *awstypes.KerberosAuthenticationSettings) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + "key_cache_secret_iam_arn": aws.ToString(apiObject.KeyCacheSecretIamArn), + "key_cache_secret_id": aws.ToString(apiObject.KeyCacheSecretId), + "krb5_file_contents": aws.ToString(apiObject.Krb5FileContents), + } + + return []any{tfMap} +} + func findReplicationInstanceByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationInstance, error) { input := &dms.DescribeReplicationInstancesInput{ Filters: []awstypes.Filter{ @@ -364,8 +447,7 @@ func findReplicationInstances(ctx context.Context, conn *dms.Client, input *dms. if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -379,8 +461,8 @@ func findReplicationInstances(ctx context.Context, conn *dms.Client, input *dms. return output, nil } -func statusReplicationInstance(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusReplicationInstance(conn *dms.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findReplicationInstanceByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -399,7 +481,7 @@ func waitReplicationInstanceCreated(ctx context.Context, conn *dms.Client, id st stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusCreating, replicationInstanceStatusModifying}, Target: []string{replicationInstanceStatusAvailable}, - Refresh: statusReplicationInstance(ctx, conn, id), + Refresh: statusReplicationInstance(conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting @@ -418,7 +500,7 @@ func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.Client, id st stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusModifying, replicationInstanceStatusUpgrading}, Target: []string{replicationInstanceStatusAvailable}, - Refresh: statusReplicationInstance(ctx, conn, id), + Refresh: statusReplicationInstance(conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting @@ -437,7 +519,7 @@ func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.Client, id st stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusDeleting}, Target: []string{}, - Refresh: statusReplicationInstance(ctx, conn, id), + Refresh: statusReplicationInstance(conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting diff --git a/internal/service/dms/replication_instance_data_source_tags_gen_test.go b/internal/service/dms/replication_instance_data_source_tags_gen_test.go index 10f6c577fe2e..c70e7a8ba913 100644 --- a/internal/service/dms/replication_instance_data_source_tags_gen_test.go +++ b/internal/service/dms/replication_instance_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccDMSReplicationInstanceDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccDMSReplicationInstanceDataSource_tags(t *testing.T) { func TestAccDMSReplicationInstanceDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccDMSReplicationInstanceDataSource_tags_NullMap(t *testing.T) { func TestAccDMSReplicationInstanceDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccDMSReplicationInstanceDataSource_tags_EmptyMap(t *testing.T) { func TestAccDMSReplicationInstanceDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccDMSReplicationInstanceDataSource_tags_DefaultTags_nonOverlapping(t * func TestAccDMSReplicationInstanceDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccDMSReplicationInstanceDataSource_tags_IgnoreTags_Overlap_DefaultTag( func TestAccDMSReplicationInstanceDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/dms/replication_instance_tags_gen_test.go b/internal/service/dms/replication_instance_tags_gen_test.go index c153411681cf..d88018324a72 100644 --- a/internal/service/dms/replication_instance_tags_gen_test.go +++ b/internal/service/dms/replication_instance_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccDMSReplicationInstance_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -211,10 +211,11 @@ func TestAccDMSReplicationInstance_tags(t *testing.T) { func TestAccDMSReplicationInstance_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -280,10 +281,11 @@ func TestAccDMSReplicationInstance_tags_null(t *testing.T) { func TestAccDMSReplicationInstance_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -345,10 +347,11 @@ func TestAccDMSReplicationInstance_tags_EmptyMap(t *testing.T) { func TestAccDMSReplicationInstance_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -428,10 +431,11 @@ func TestAccDMSReplicationInstance_tags_AddOnUpdate(t *testing.T) { func TestAccDMSReplicationInstance_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -522,10 +526,11 @@ func TestAccDMSReplicationInstance_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccDMSReplicationInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -664,10 +669,11 @@ func TestAccDMSReplicationInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccDMSReplicationInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -755,10 +761,11 @@ func TestAccDMSReplicationInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) func TestAccDMSReplicationInstance_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -947,10 +954,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccDMSReplicationInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1115,10 +1123,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccDMSReplicationInstance_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1299,10 +1308,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_overlapping(t *testing.T) { func TestAccDMSReplicationInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1391,10 +1401,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_updateToProviderOnly(t *test func TestAccDMSReplicationInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1482,10 +1493,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_updateToResourceOnly(t *test func TestAccDMSReplicationInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1549,10 +1561,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_emptyResourceTag(t *testing. func TestAccDMSReplicationInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1608,10 +1621,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_emptyProviderOnlyTag(t *test func TestAccDMSReplicationInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1672,10 +1686,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_nullOverlappingResourceTag(t func TestAccDMSReplicationInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1736,10 +1751,11 @@ func TestAccDMSReplicationInstance_tags_DefaultTags_nullNonOverlappingResourceTa func TestAccDMSReplicationInstance_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1793,10 +1809,11 @@ func TestAccDMSReplicationInstance_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccDMSReplicationInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1892,10 +1909,11 @@ func TestAccDMSReplicationInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccDMSReplicationInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -1981,10 +1999,11 @@ func TestAccDMSReplicationInstance_tags_ComputedTag_OnUpdate_Replace(t *testing. func TestAccDMSReplicationInstance_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), @@ -2142,10 +2161,11 @@ func TestAccDMSReplicationInstance_tags_IgnoreTags_Overlap_DefaultTag(t *testing func TestAccDMSReplicationInstance_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), diff --git a/internal/service/dms/replication_instance_test.go b/internal/service/dms/replication_instance_test.go index 755b40ea6e70..5d1b9847a108 100644 --- a/internal/service/dms/replication_instance_test.go +++ b/internal/service/dms/replication_instance_test.go @@ -8,6 +8,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +22,10 @@ import ( func TestAccDMSReplicationInstance_basic(t *testing.T) { ctx := acctest.Context(t) // NOTE: Using larger dms.c4.large here for AWS GovCloud (US) support - replicationInstanceClass := "dms.c4.large" + replicationInstanceClass := "dms.t3.micro" + if acctest.Partition() == endpoints.AwsUsGovPartitionID { + replicationInstanceClass = "dms.c4.large" + } resourceName := "aws_dms_replication_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -35,7 +39,7 @@ func TestAccDMSReplicationInstance_basic(t *testing.T) { Config: testAccReplicationInstanceConfig_replicationInstanceClass(rName, replicationInstanceClass), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationInstanceExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, names.AttrAllocatedStorage, "100"), + resource.TestCheckResourceAttr(resourceName, names.AttrAllocatedStorage, "50"), resource.TestCheckResourceAttr(resourceName, names.AttrAutoMinorVersionUpgrade, acctest.CtFalse), resource.TestCheckResourceAttrSet(resourceName, names.AttrAvailabilityZone), resource.TestCheckResourceAttrSet(resourceName, names.AttrEngineVersion), @@ -67,7 +71,10 @@ func TestAccDMSReplicationInstance_basic(t *testing.T) { func TestAccDMSReplicationInstance_disappears(t *testing.T) { ctx := acctest.Context(t) // NOTE: Using larger dms.c4.large here for AWS GovCloud (US) support - replicationInstanceClass := "dms.c4.large" + replicationInstanceClass := "dms.t3.micro" + if acctest.Partition() == endpoints.AwsUsGovPartitionID { + replicationInstanceClass = "dms.c4.large" + } resourceName := "aws_dms_replication_instance.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -466,6 +473,35 @@ func TestAccDMSReplicationInstance_vpcSecurityGroupIDs(t *testing.T) { }) } +func TestAccDMSReplicationInstance_kerberosAuthenticationSettings(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_replication_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DMSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationInstanceConfig_kerberosAuthenticationSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationInstanceExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "dns_name_servers", ""), + resource.TestCheckResourceAttr(resourceName, "kerberos_authentication_settings.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrApplyImmediately}, + }, + }, + }) +} + func testAccCheckReplicationInstanceExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -700,3 +736,110 @@ resource "aws_dms_replication_instance" "test" { } `, rName)) } + +func testAccReplicationInstanceConfig_kerberosAuthenticationSettings(rName string) string { + return acctest.ConfigCompose(testAccReplicationInstanceConfig_base(rName), + testAccReplicationInstanceConfig_secretBase(rName), fmt.Sprintf(` +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_route_table.test.id +} + +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_dms_replication_instance" "test" { + apply_immediately = true + replication_instance_class = data.aws_partition.current.partition == "aws" ? "dms.t3.micro" : "dms.c4.large" + replication_instance_id = %[1]q + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id + vpc_security_group_ids = [aws_security_group.test.id] + + kerberos_authentication_settings { + key_cache_secret_iam_arn = aws_iam_role.test.arn + key_cache_secret_id = aws_secretsmanager_secret.test.id + krb5_file_contents = file("test-fixtures/krb5.conf") + } +} +`, rName)) +} + +func testAccReplicationInstanceConfig_secretBase(rName string) string { + return fmt.Sprintf(` +data "aws_region" "current" {} +data "aws_partition" "current" {} + +resource "aws_secretsmanager_secret" "test" { + name = %[1]q + recovery_window_in_days = 0 +} + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_binary = filebase64("test-fixtures/keytab.krb") +} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = < 0 + n := new != nil && len(new.([]any)) > 0 + if (o && n) || (!o && !n) { + return false + } + return true + }), + ), } } +func validateServerlessCapacity(i any, k string) (ws []string, es []error) { + const ( + epsilon = 1.0e-10 + ) + + v, ok := i.(float64) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be float64", k)) + return + } + // add a small epsilon to avoid floating point precision issues + if int(v*10+epsilon)%5 != 0 { + es = append(es, fmt.Errorf("%s must be a multiple of 0.5", k)) + return + } + return +} + func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DocDBClient(ctx) @@ -444,6 +500,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any requiresModifyDbCluster = true } + if v, ok := d.GetOk("serverless_v2_scaling_configuration"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]any)[0].(map[string]any)) + } + if v, ok := d.GetOk(names.AttrStorageType); ok { input.StorageType = aws.String(v.(string)) } @@ -452,7 +512,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.VpcSecurityGroupIds = flex.ExpandStringValueSet(v) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.RestoreDBClusterFromSnapshot(ctx, &input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -501,6 +561,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.Port = aws.Int32(int32(v.(int))) } + if v, ok := d.GetOk("serverless_v2_scaling_configuration"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]any)[0].(map[string]any)) + } + if v, ok := d.GetOk(names.AttrStorageType); ok { input.StorageType = aws.String(v.(string)) } @@ -509,7 +573,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.VpcSecurityGroupIds = flex.ExpandStringValueSet(v) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.RestoreDBClusterToPointInTime(ctx, &input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") if err != nil { @@ -597,6 +661,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.PreferredMaintenanceWindow = aws.String(v.(string)) } + if v, ok := d.GetOk("serverless_v2_scaling_configuration"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]any)[0].(map[string]any)) + } + if v, ok := d.GetOk(names.AttrStorageEncrypted); ok { input.StorageEncrypted = aws.Bool(v.(bool)) } @@ -609,7 +677,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.VpcSecurityGroupIds = flex.ExpandStringValueSet(v) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateDBCluster(ctx, &input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -704,6 +772,9 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) d.Set("preferred_backup_window", dbc.PreferredBackupWindow) d.Set(names.AttrPreferredMaintenanceWindow, dbc.PreferredMaintenanceWindow) d.Set("reader_endpoint", dbc.ReaderEndpoint) + if err := d.Set("serverless_v2_scaling_configuration", flattenServerlessV2ScalingConfiguration(dbc.ServerlessV2ScalingConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting serverless_v2_scaling_configuration: %s", err) + } d.Set(names.AttrStorageEncrypted, dbc.StorageEncrypted) d.Set(names.AttrStorageType, dbc.StorageType) d.Set(names.AttrVPCSecurityGroupIDs, tfslices.ApplyToAll(dbc.VpcSecurityGroups, func(v awstypes.VpcSecurityGroupMembership) string { @@ -771,6 +842,12 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any input.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) } + if d.HasChange("serverless_v2_scaling_configuration") { + if v, ok := d.GetOk("serverless_v2_scaling_configuration"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]any)[0].(map[string]any)) + } + } + if d.HasChange(names.AttrStorageType) { input.StorageType = aws.String(d.Get(names.AttrStorageType).(string)) } @@ -791,7 +868,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any timeout = 5 * time.Minute ) _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBCluster(ctx, &input) }, func(err error) (bool, error) { @@ -866,7 +943,7 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any log.Printf("[DEBUG] Deleting DocumentDB Cluster: %s", d.Id()) _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteDBCluster(ctx, &input) }, func(err error) (bool, error) { @@ -929,6 +1006,38 @@ func diffCloudWatchLogsExportConfiguration(old, new []any) ([]any, []any) { return add, disable } +func expandServerlessV2ScalingConfiguration(v map[string]any) *awstypes.ServerlessV2ScalingConfiguration { + if v == nil { + return nil + } + + apiObject := &awstypes.ServerlessV2ScalingConfiguration{} + if v, ok := v[names.AttrMaxCapacity].(float64); ok { + apiObject.MaxCapacity = aws.Float64(v) + } + if v, ok := v["min_capacity"].(float64); ok { + apiObject.MinCapacity = aws.Float64(v) + } + + return apiObject +} + +func flattenServerlessV2ScalingConfiguration(v *awstypes.ServerlessV2ScalingConfigurationInfo) []any { + if v == nil { + return nil + } + + tfMap := map[string]any{} + + if v.MaxCapacity != nil { + tfMap[names.AttrMaxCapacity] = aws.ToFloat64(v.MaxCapacity) + } + if v.MinCapacity != nil { + tfMap["min_capacity"] = aws.ToFloat64(v.MinCapacity) + } + return []any{tfMap} +} + func removeClusterFromGlobalCluster(ctx context.Context, conn *docdb.Client, clusterARN, globalClusterID string, timeout time.Duration) error { input := docdb.RemoveFromGlobalClusterInput{ DbClusterIdentifier: aws.String(clusterARN), @@ -946,7 +1055,7 @@ func removeClusterFromGlobalCluster(ctx context.Context, conn *docdb.Client, clu return fmt.Errorf("removing DocumentDB Cluster (%s) from DocumentDB Global Cluster (%s): %w", clusterARN, globalClusterID, err) } - _, err = tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findGlobalClusterByClusterARN(ctx, conn, clusterARN) }) diff --git a/internal/service/docdb/cluster_instance.go b/internal/service/docdb/cluster_instance.go index 6b0cc56af960..e96772799fd2 100644 --- a/internal/service/docdb/cluster_instance.go +++ b/internal/service/docdb/cluster_instance.go @@ -219,7 +219,7 @@ func resourceClusterInstanceCreate(ctx context.Context, d *schema.ResourceData, input.PreferredMaintenanceWindow = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateDBInstance(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -341,7 +341,7 @@ func resourceClusterInstanceUpdate(ctx context.Context, d *schema.ResourceData, input.PromotionTier = aws.Int32(int32(d.Get("promotion_tier").(int))) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.ModifyDBInstance(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") diff --git a/internal/service/docdb/cluster_parameter_group.go b/internal/service/docdb/cluster_parameter_group.go index 7ceaaae458f3..d76992a040b1 100644 --- a/internal/service/docdb/cluster_parameter_group.go +++ b/internal/service/docdb/cluster_parameter_group.go @@ -208,7 +208,7 @@ func resourceClusterParameterGroupDelete(ctx context.Context, d *schema.Resource return sdkdiag.AppendErrorf(diags, "deleting DocumentDB Cluster Parameter Group (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, 10*time.Minute, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, 10*time.Minute, func(ctx context.Context) (any, error) { return findDBClusterParameterGroupByName(ctx, conn, d.Id()) }) diff --git a/internal/service/docdb/cluster_test.go b/internal/service/docdb/cluster_test.go index a27bf7e5cb56..8656f91de0c3 100644 --- a/internal/service/docdb/cluster_test.go +++ b/internal/service/docdb/cluster_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -80,6 +81,7 @@ func TestAccDocDBCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "preferred_backup_window"), resource.TestCheckResourceAttrSet(resourceName, names.AttrPreferredMaintenanceWindow), resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, "skip_final_snapshot", acctest.CtTrue), resource.TestCheckNoResourceAttr(resourceName, "snapshot_identifier"), resource.TestCheckResourceAttr(resourceName, names.AttrStorageEncrypted, acctest.CtFalse), @@ -320,6 +322,11 @@ func TestAccDocDBCluster_updateCloudWatchLogsExports(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccClusterConfig_noCloudWatchLogs(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.#", "0"), @@ -339,6 +346,11 @@ func TestAccDocDBCluster_updateCloudWatchLogsExports(t *testing.T) { }, { Config: testAccClusterConfig_basic(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.#", "2"), @@ -434,6 +446,11 @@ func TestAccDocDBCluster_backupsUpdate(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccClusterConfig_backups(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "backup_retention_period", "5"), @@ -455,6 +472,11 @@ func TestAccDocDBCluster_backupsUpdate(t *testing.T) { }, { Config: testAccClusterConfig_backupsUpdate(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "backup_retention_period", "10"), @@ -1005,6 +1027,79 @@ func TestAccDocDBCluster_manageMasterUserPassword(t *testing.T) { }) } +func TestAccDocDBCluster_serverless(t *testing.T) { + ctx := acctest.Context(t) + var dbCluster awstypes.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_docdb_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DocDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_serverless(rName, 0.6, 1.0), + ExpectError: regexache.MustCompile(`must be a multiple of 0.5`), + }, + { + Config: testAccClusterConfig_serverless(rName, 0.5, 1.0), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.0.min_capacity", "0.5"), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.0.max_capacity", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrAllowMajorVersionUpgrade, + names.AttrApplyImmediately, + names.AttrFinalSnapshotIdentifier, + "master_password", + "skip_final_snapshot", + "manage_master_user_password", + }, + }, + { + Config: testAccClusterConfig_serverless(rName, 1.0, 1.5), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.0.min_capacity", "1"), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.0.max_capacity", "1.5"), + ), + }, + { + Config: testAccClusterConfig_serverlessRemoved(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "serverless_v2_scaling_configuration.#", "0"), + ), + }, + }, + }) +} + func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DocDBClient(ctx) @@ -1637,3 +1732,32 @@ resource "aws_docdb_cluster" "test" { } `, rName, passwordConfig)) } + +func testAccClusterConfig_serverless(rName string, minCapacity, maxCapacity float64) string { + return acctest.ConfigCompose(fmt.Sprintf(` +resource "aws_docdb_cluster" "test" { + cluster_identifier = %[1]q + + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" + skip_final_snapshot = true + + serverless_v2_scaling_configuration { + min_capacity = %[2]f + max_capacity = %[3]f + } +} +`, rName, minCapacity, maxCapacity)) +} + +func testAccClusterConfig_serverlessRemoved(rName string) string { + return acctest.ConfigCompose(fmt.Sprintf(` +resource "aws_docdb_cluster" "test" { + cluster_identifier = %[1]q + + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" + skip_final_snapshot = true +} +`, rName)) +} diff --git a/internal/service/docdb/global_cluster.go b/internal/service/docdb/global_cluster.go index 3c81c334861b..1efcb4b2c98a 100644 --- a/internal/service/docdb/global_cluster.go +++ b/internal/service/docdb/global_cluster.go @@ -251,7 +251,7 @@ func resourceGlobalClusterUpdate(ctx context.Context, d *schema.ResourceData, me EngineVersion: aws.String(engineVersion), } - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.ModifyDBCluster(ctx, input) }, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") @@ -291,7 +291,7 @@ func resourceGlobalClusterDelete(ctx context.Context, d *schema.ResourceData, me log.Printf("[DEBUG] Deleting DocumentDB Global Cluster: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidGlobalClusterStateFault](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidGlobalClusterStateFault](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteGlobalCluster(ctx, &docdb.DeleteGlobalClusterInput{ GlobalClusterIdentifier: aws.String(d.Id()), }) diff --git a/internal/service/docdb/service_endpoint_resolver_gen.go b/internal/service/docdb/service_endpoint_resolver_gen.go index e8255189f574..97b2de0568bb 100644 --- a/internal/service/docdb/service_endpoint_resolver_gen.go +++ b/internal/service/docdb/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params docdb.EndpointPa }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up docdb endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up docdb endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/docdb/service_endpoints_gen_test.go b/internal/service/docdb/service_endpoints_gen_test.go index 28aae8ce3af0..619475bbc410 100644 --- a/internal/service/docdb/service_endpoints_gen_test.go +++ b/internal/service/docdb/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/docdb/service_package_gen.go b/internal/service/docdb/service_package_gen.go index 01926c87a130..971521a242d4 100644 --- a/internal/service/docdb/service_package_gen.go +++ b/internal/service/docdb/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/docdb" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -128,7 +127,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *docdb.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/docdb/subnet_group.go b/internal/service/docdb/subnet_group.go index a64cbd8e187e..96abcbecf98a 100644 --- a/internal/service/docdb/subnet_group.go +++ b/internal/service/docdb/subnet_group.go @@ -166,7 +166,7 @@ func resourceSubnetGroupDelete(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "deleting DocumentDB Subnet Group (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, 10*time.Minute, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, 10*time.Minute, func(ctx context.Context) (any, error) { return findDBSubnetGroupByName(ctx, conn, d.Id()) }) diff --git a/internal/service/docdb/sweep.go b/internal/service/docdb/sweep.go index c0ec260e1607..e4e65dcfca76 100644 --- a/internal/service/docdb/sweep.go +++ b/internal/service/docdb/sweep.go @@ -112,7 +112,7 @@ func sweepClusterSnapshots(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBClient(ctx) input := &docdb.DescribeDBClusterSnapshotsInput{} @@ -153,7 +153,7 @@ func sweepClusterParameterGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBClient(ctx) input := &docdb.DescribeDBClusterParameterGroupsInput{} @@ -169,7 +169,7 @@ func sweepClusterParameterGroups(region string) error { } if err != nil { - return fmt.Errorf("error listing DocumentDB Cluster Parameter Groups (%s): %s", region, err) + return err } for _, v := range page.DBClusterParameterGroups { @@ -201,7 +201,7 @@ func sweepClusterInstances(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBClient(ctx) input := &docdb.DescribeDBInstancesInput{} @@ -246,7 +246,7 @@ func sweepGlobalClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBClient(ctx) input := &docdb.DescribeGlobalClustersInput{} @@ -287,7 +287,7 @@ func sweepSubnetGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBClient(ctx) input := &docdb.DescribeDBSubnetGroupsInput{} @@ -328,7 +328,7 @@ func sweepEventSubscriptions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBClient(ctx) input := &docdb.DescribeEventSubscriptionsInput{} diff --git a/internal/service/docdb/tags_gen.go b/internal/service/docdb/tags_gen.go index 957ce8d63b95..5964e9880898 100644 --- a/internal/service/docdb/tags_gen.go +++ b/internal/service/docdb/tags_gen.go @@ -3,8 +3,8 @@ package docdb import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/docdb" awstypes "github.com/aws/aws-sdk-go-v2/service/docdb/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *docdb.Client, identifier string, optFns output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DocDBClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *docdb.Client, identifier string, oldT _, err := conn.RemoveTagsFromResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *docdb.Client, identifier string, oldT _, err := conn.AddTagsToResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/docdbelastic/cluster.go b/internal/service/docdbelastic/cluster.go index bfffb2e4f4a3..293a288177e5 100644 --- a/internal/service/docdbelastic/cluster.go +++ b/internal/service/docdbelastic/cluster.go @@ -42,6 +42,7 @@ import ( // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/docdbelastic/types;awstypes;awstypes.Cluster") // @Testing(importIgnore="admin_user_password") +// @Testing(preIdentityVersion="v5.100.0") func newClusterResource(context.Context) (resource.ResourceWithConfigure, error) { r := &clusterResource{} diff --git a/internal/service/docdbelastic/cluster_identity_gen_test.go b/internal/service/docdbelastic/cluster_identity_gen_test.go index 052ca773dbb2..484ef80b4523 100644 --- a/internal/service/docdbelastic/cluster_identity_gen_test.go +++ b/internal/service/docdbelastic/cluster_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDocDBElasticCluster_Identity_Basic(t *testing.T) { resourceName := "aws_docdbelastic_cluster.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccDocDBElasticCluster_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -115,7 +119,7 @@ func TestAccDocDBElasticCluster_Identity_RegionOverride(t *testing.T) { resourceName := "aws_docdbelastic_cluster.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -134,6 +138,9 @@ func TestAccDocDBElasticCluster_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -237,3 +244,129 @@ func TestAccDocDBElasticCluster_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDocDBElasticCluster_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Cluster + resourceName := "aws_docdbelastic_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DocDBElasticServiceID), + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Cluster/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/Cluster/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Cluster/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDocDBElasticCluster_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Cluster + resourceName := "aws_docdbelastic_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DocDBElasticServiceID), + CheckDestroy: testAccCheckClusterDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Cluster/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Cluster/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/docdbelastic/cluster_test.go b/internal/service/docdbelastic/cluster_test.go index c95e1a9a5aa0..11b3846abfee 100644 --- a/internal/service/docdbelastic/cluster_test.go +++ b/internal/service/docdbelastic/cluster_test.go @@ -14,13 +14,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/docdbelastic/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tfdocdbelastic "github.com/hashicorp/terraform-provider-aws/internal/service/docdbelastic" @@ -232,70 +227,6 @@ func TestAccDocDBElasticCluster_update(t *testing.T) { }) } -func TestAccDocDBElasticCluster_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_docdbelastic_cluster.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DocDBElasticServiceID), - CheckDestroy: testAccCheckClusterDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccClusterConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccClusterConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccClusterConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DocDBElasticClient(ctx) diff --git a/internal/service/docdbelastic/service_endpoint_resolver_gen.go b/internal/service/docdbelastic/service_endpoint_resolver_gen.go index 38a7dfa18d41..5364316f949c 100644 --- a/internal/service/docdbelastic/service_endpoint_resolver_gen.go +++ b/internal/service/docdbelastic/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params docdbelastic.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up docdbelastic endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up docdbelastic endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/docdbelastic/service_endpoints_gen_test.go b/internal/service/docdbelastic/service_endpoints_gen_test.go index 2c7f79764337..396aa628447a 100644 --- a/internal/service/docdbelastic/service_endpoints_gen_test.go +++ b/internal/service/docdbelastic/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/docdbelastic/service_package_gen.go b/internal/service/docdbelastic/service_package_gen.go index 036a0c617200..57235189bc89 100644 --- a/internal/service/docdbelastic/service_package_gen.go +++ b/internal/service/docdbelastic/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/docdbelastic" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -71,7 +70,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *docdbelastic.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/docdbelastic/sweep.go b/internal/service/docdbelastic/sweep.go index 0e9e53d8c5b0..662735cb9a03 100644 --- a/internal/service/docdbelastic/sweep.go +++ b/internal/service/docdbelastic/sweep.go @@ -32,7 +32,7 @@ func sweepClusters(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DocDBElasticClient(ctx) input := &docdbelastic.ListClustersInput{} diff --git a/internal/service/docdbelastic/tags_gen.go b/internal/service/docdbelastic/tags_gen.go index 349b7230de24..d63a43ddec71 100644 --- a/internal/service/docdbelastic/tags_gen.go +++ b/internal/service/docdbelastic/tags_gen.go @@ -3,8 +3,8 @@ package docdbelastic import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/docdbelastic" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *docdbelastic.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DocDBElasticClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *docdbelastic.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *docdbelastic.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/docdbelastic/testdata/Cluster/basic_v5.100.0/main_gen.tf b/internal/service/docdbelastic/testdata/Cluster/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..8327b9397bf7 --- /dev/null +++ b/internal/service/docdbelastic/testdata/Cluster/basic_v5.100.0/main_gen.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_docdbelastic_cluster" "test" { + name = var.rName + shard_capacity = 2 + shard_count = 1 + + admin_user_name = "testuser" + admin_user_password = "testpassword" + auth_type = "PLAIN_TEXT" + + preferred_maintenance_window = "Tue:04:00-Tue:04:30" + + vpc_security_group_ids = [ + aws_security_group.test.id + ] + + subnet_ids = aws_subnet.test[*].id +} + +# testAccClusterBaseConfig + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/docdbelastic/testdata/Cluster/basic_v6.0.0/main_gen.tf b/internal/service/docdbelastic/testdata/Cluster/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..65268f98c3e5 --- /dev/null +++ b/internal/service/docdbelastic/testdata/Cluster/basic_v6.0.0/main_gen.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_docdbelastic_cluster" "test" { + name = var.rName + shard_capacity = 2 + shard_count = 1 + + admin_user_name = "testuser" + admin_user_password = "testpassword" + auth_type = "PLAIN_TEXT" + + preferred_maintenance_window = "Tue:04:00-Tue:04:30" + + vpc_security_group_ids = [ + aws_security_group.test.id + ] + + subnet_ids = aws_subnet.test[*].id +} + +# testAccClusterBaseConfig + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/drs/replication_configuration_template.go b/internal/service/drs/replication_configuration_template.go index 564eac092d70..49acddae358a 100644 --- a/internal/service/drs/replication_configuration_template.go +++ b/internal/service/drs/replication_configuration_template.go @@ -282,7 +282,7 @@ func (r *replicationConfigurationTemplateResource) Delete(ctx context.Context, r ReplicationConfigurationTemplateID: data.ID.ValueStringPointer(), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func(ctx context.Context) (any, error) { return conn.DeleteReplicationConfigurationTemplate(ctx, input) }, "DependencyViolation") diff --git a/internal/service/drs/replication_configuration_template_tags_gen_test.go b/internal/service/drs/replication_configuration_template_tags_gen_test.go index ec42c59a1fe5..540a792d97d8 100644 --- a/internal/service/drs/replication_configuration_template_tags_gen_test.go +++ b/internal/service/drs/replication_configuration_template_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/drs/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -48,11 +47,12 @@ func testAccDRSReplicationConfigurationTemplate_tagsSerial(t *testing.T) { func testAccDRSReplicationConfigurationTemplate_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -230,11 +230,12 @@ func testAccDRSReplicationConfigurationTemplate_tags(t *testing.T) { func testAccDRSReplicationConfigurationTemplate_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -292,11 +293,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_null(t *testing.T) { func testAccDRSReplicationConfigurationTemplate_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -342,11 +344,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_EmptyMap(t *testing.T) { func testAccDRSReplicationConfigurationTemplate_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -422,11 +425,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_AddOnUpdate(t *testing.T) { func testAccDRSReplicationConfigurationTemplate_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -512,11 +516,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_EmptyTag_OnCreate(t *testin func testAccDRSReplicationConfigurationTemplate_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -651,11 +656,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_EmptyTag_OnUpdate_Add(t *te func testAccDRSReplicationConfigurationTemplate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -741,11 +747,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_EmptyTag_OnUpdate_Replace(t func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -922,11 +929,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_providerOnly(t func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1082,11 +1090,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_nonOverlapping( func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1258,11 +1267,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_overlapping(t * func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1348,11 +1358,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_updateToProvide func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1437,11 +1448,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_updateToResourc func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1503,11 +1515,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_emptyResourceTa func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1561,11 +1574,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_emptyProviderOn func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1630,11 +1644,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_nullOverlapping func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1701,11 +1716,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_DefaultTags_nullNonOverlapp func testAccDRSReplicationConfigurationTemplate_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1756,11 +1772,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_ComputedTag_OnCreate(t *tes func testAccDRSReplicationConfigurationTemplate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1853,11 +1870,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_ComputedTag_OnUpdate_Add(t func testAccDRSReplicationConfigurationTemplate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -1940,11 +1958,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_ComputedTag_OnUpdate_Replac func testAccDRSReplicationConfigurationTemplate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), @@ -2102,11 +2121,12 @@ func testAccDRSReplicationConfigurationTemplate_tags_IgnoreTags_Overlap_DefaultT func testAccDRSReplicationConfigurationTemplate_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ReplicationConfigurationTemplate resourceName := "aws_drs_replication_configuration_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), diff --git a/internal/service/drs/service_endpoint_resolver_gen.go b/internal/service/drs/service_endpoint_resolver_gen.go index 894ecaf255eb..fcc7e41a57a4 100644 --- a/internal/service/drs/service_endpoint_resolver_gen.go +++ b/internal/service/drs/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params drs.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up drs endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up drs endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/drs/service_endpoints_gen_test.go b/internal/service/drs/service_endpoints_gen_test.go index b121a2113d2f..1a26ad92abdd 100644 --- a/internal/service/drs/service_endpoints_gen_test.go +++ b/internal/service/drs/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/drs/service_package_gen.go b/internal/service/drs/service_package_gen.go index 67a2d58e5e1b..ca7cc93d8df6 100644 --- a/internal/service/drs/service_package_gen.go +++ b/internal/service/drs/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/drs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *drs.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/drs/tags_gen.go b/internal/service/drs/tags_gen.go index c4a02eb25f83..a22fc69834a5 100644 --- a/internal/service/drs/tags_gen.go +++ b/internal/service/drs/tags_gen.go @@ -3,8 +3,8 @@ package drs import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/drs" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *drs.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DRSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *drs.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *drs.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ds/conditional_forwarder.go b/internal/service/ds/conditional_forwarder.go index 5678ffc3a622..a7c742a8ad21 100644 --- a/internal/service/ds/conditional_forwarder.go +++ b/internal/service/ds/conditional_forwarder.go @@ -84,7 +84,7 @@ func resourceConditionalForwarderCreate(ctx context.Context, d *schema.ResourceD const ( timeout = 1 * time.Minute ) - _, err = tfresource.RetryWhenNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findConditionalForwarderByTwoPartKey(ctx, conn, directoryID, domainName) }) diff --git a/internal/service/ds/directory.go b/internal/service/ds/directory.go index c3c0a16f93c6..02fd69b80366 100644 --- a/internal/service/ds/directory.go +++ b/internal/service/ds/directory.go @@ -224,9 +224,9 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta a // created concurrently. Retry creation in that case. // When it fails, it will typically be within the first few minutes of creation, so there is no need // to wait for deletion. - err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { if err := creator.Create(ctx, conn, name, d); err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if _, err := waitDirectoryCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -248,11 +248,11 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta a )) } - return retry.RetryableError(err) + return tfresource.RetryableError(err) } } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil @@ -367,7 +367,7 @@ func resourceDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta a conn := meta.(*conns.AWSClient).DSClient(ctx) log.Printf("[DEBUG] Deleting Directory Service Directory: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ClientException](ctx, directoryApplicationDeauthorizedPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ClientException](ctx, directoryApplicationDeauthorizedPropagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteDirectory(ctx, &directoryservice.DeleteDirectoryInput{ DirectoryId: aws.String(d.Id()), }) diff --git a/internal/service/ds/service_endpoint_resolver_gen.go b/internal/service/ds/service_endpoint_resolver_gen.go index 0c2d51f5a96f..d014b5183672 100644 --- a/internal/service/ds/service_endpoint_resolver_gen.go +++ b/internal/service/ds/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params directoryservice }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up directoryservice endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up directoryservice endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ds/service_endpoints_gen_test.go b/internal/service/ds/service_endpoints_gen_test.go index e92ca4b341a7..2eb0b2c699c7 100644 --- a/internal/service/ds/service_endpoints_gen_test.go +++ b/internal/service/ds/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ds/service_package_gen.go b/internal/service/ds/service_package_gen.go index 13d95a896fad..437c3c5c14c5 100644 --- a/internal/service/ds/service_package_gen.go +++ b/internal/service/ds/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/directoryservice" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -118,7 +117,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *directoryservice.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ds/shared_directory_accepter.go b/internal/service/ds/shared_directory_accepter.go index 6c9965d653df..8682419ddfff 100644 --- a/internal/service/ds/shared_directory_accepter.go +++ b/internal/service/ds/shared_directory_accepter.go @@ -118,7 +118,7 @@ func resourceSharedDirectoryAccepterDelete(ctx context.Context, d *schema.Resour conn := meta.(*conns.AWSClient).DSClient(ctx) log.Printf("[DEBUG] Deleting Directory Service Shared Directory Accepter: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ClientException](ctx, directoryApplicationDeauthorizedPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ClientException](ctx, directoryApplicationDeauthorizedPropagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteDirectory(ctx, &directoryservice.DeleteDirectoryInput{ DirectoryId: aws.String(d.Id()), }) diff --git a/internal/service/ds/sweep.go b/internal/service/ds/sweep.go index 183b6567e31e..5331df8c9871 100644 --- a/internal/service/ds/sweep.go +++ b/internal/service/ds/sweep.go @@ -44,7 +44,7 @@ func sweepDirectories(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DSClient(ctx) input := &directoryservice.DescribeDirectoriesInput{} @@ -85,7 +85,7 @@ func sweepRegions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DSClient(ctx) input := &directoryservice.DescribeDirectoriesInput{} diff --git a/internal/service/ds/tags_gen.go b/internal/service/ds/tags_gen.go index efd748fcc5b8..284f15327554 100644 --- a/internal/service/ds/tags_gen.go +++ b/internal/service/ds/tags_gen.go @@ -3,8 +3,8 @@ package ds import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/directoryservice" awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *directoryservice.Client, identifier str page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -133,7 +133,7 @@ func updateTags(ctx context.Context, conn *directoryservice.Client, identifier s _, err := conn.RemoveTagsFromResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -148,7 +148,7 @@ func updateTags(ctx context.Context, conn *directoryservice.Client, identifier s _, err := conn.AddTagsToResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/dsql/cluster.go b/internal/service/dsql/cluster.go index d03c24296a55..a96c57350c55 100644 --- a/internal/service/dsql/cluster.go +++ b/internal/service/dsql/cluster.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" @@ -43,6 +44,7 @@ import ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/dsql;dsql.GetClusterOutput") // @Testing(importStateIdAttribute="identifier") +// @Testing(generator=false) func newClusterResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &clusterResource{} @@ -64,8 +66,15 @@ func (r *clusterResource) Schema(ctx context.Context, request resource.SchemaReq names.AttrARN: framework.ARNAttributeComputedOnly(), "deletion_protection_enabled": schema.BoolAttribute{ Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), }, "encryption_details": framework.ResourceComputedListOfObjectsAttribute[encryptionDetailsModel](ctx), + names.AttrForceDestroy: schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, names.AttrIdentifier: framework.IDAttribute(), "kms_encryption_key": schema.StringAttribute{ Optional: true, @@ -309,6 +318,19 @@ func (r *clusterResource) Delete(ctx context.Context, request resource.DeleteReq conn := r.Meta().DSQLClient(ctx) + if data.ForceDestroy.ValueBool() { + input := dsql.UpdateClusterInput{ + Identifier: data.Identifier.ValueStringPointer(), + DeletionProtectionEnabled: aws.Bool(false), + ClientToken: aws.String(sdkid.UniqueId()), + } + // Changing DeletionProtectionEnabled is instantaneous, no need to wait. + if _, err := conn.UpdateCluster(ctx, &input); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("disabling deletion protection for Aurora DSQL Cluster (%s)", data.Identifier.ValueString()), err.Error()) + return + } + } + id := fwflex.StringValueFromFramework(ctx, data.Identifier) tflog.Debug(ctx, "deleting Aurora DSQL Cluster", map[string]any{ names.AttrIdentifier: id, @@ -338,6 +360,9 @@ func (r *clusterResource) Delete(ctx context.Context, request resource.DeleteReq func (r *clusterResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { resource.ImportStatePassthroughID(ctx, path.Root(names.AttrIdentifier), request, response) + + // Set force_destroy to false on import to prevent accidental deletion + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrForceDestroy), types.BoolValue(false))...) } func findClusterByID(ctx context.Context, conn *dsql.Client, id string) (*dsql.GetClusterOutput, error) { @@ -441,10 +466,12 @@ func waitClusterUpdated(ctx context.Context, conn *dsql.Client, id string, timeo func waitClusterDeleted(ctx context.Context, conn *dsql.Client, id string, timeout time.Duration) (*dsql.GetClusterOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.ClusterStatusDeleting, awstypes.ClusterStatusPendingDelete), - Target: []string{}, - Refresh: statusCluster(ctx, conn, id), - Timeout: timeout, + Pending: enum.Slice(awstypes.ClusterStatusDeleting, awstypes.ClusterStatusPendingDelete), + Target: []string{}, + Refresh: statusCluster(ctx, conn, id), + Timeout: timeout, + Delay: 1 * time.Minute, + PollInterval: 10 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -522,6 +549,7 @@ type clusterResourceModel struct { ARN types.String `tfsdk:"arn"` DeletionProtectionEnabled types.Bool `tfsdk:"deletion_protection_enabled"` EncryptionDetails fwtypes.ListNestedObjectValueOf[encryptionDetailsModel] `tfsdk:"encryption_details"` + ForceDestroy types.Bool `tfsdk:"force_destroy"` Identifier types.String `tfsdk:"identifier"` KMSEncryptionKey types.String `tfsdk:"kms_encryption_key"` MultiRegionProperties fwtypes.ListNestedObjectValueOf[multiRegionPropertiesModel] `tfsdk:"multi_region_properties"` diff --git a/internal/service/dsql/cluster_peering_test.go b/internal/service/dsql/cluster_peering_test.go index 9e9b3bdd0128..fafb1fbbe840 100644 --- a/internal/service/dsql/cluster_peering_test.go +++ b/internal/service/dsql/cluster_peering_test.go @@ -25,12 +25,6 @@ func TestAccDSQLClusterPeering_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - // Because dsql is in preview, we need to skip the PreCheckPartitionHasService - // acctest.PreCheckPartitionHasService(t, names.DSQLEndpointID) - // PreCheck for the region configuration as long as DSQL is in preview - acctest.PreCheckRegion(t, "us-east-1", "us-east-2") //lintignore:AWSAT003 - acctest.PreCheckAlternateRegion(t, "us-east-2", "us-east-1") //lintignore:AWSAT003 - acctest.PreCheckThirdRegion(t, "us-west-2") //lintignore:AWSAT003 testAccPreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, diff --git a/internal/service/dsql/cluster_tags_gen_test.go b/internal/service/dsql/cluster_tags_gen_test.go index 7863a05a82e1..697fd518bc74 100644 --- a/internal/service/dsql/cluster_tags_gen_test.go +++ b/internal/service/dsql/cluster_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/dsql" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,11 @@ import ( func TestAccDSQLCluster_tags(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -32,7 +31,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -63,7 +61,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -77,7 +74,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), acctest.CtKey2: config.StringVariable(acctest.CtValue2), @@ -113,7 +109,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), acctest.CtKey2: config.StringVariable(acctest.CtValue2), @@ -128,7 +123,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey2: config.StringVariable(acctest.CtValue2), }), @@ -159,7 +153,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey2: config.StringVariable(acctest.CtValue2), }), @@ -173,7 +166,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( @@ -194,7 +186,6 @@ func TestAccDSQLCluster_tags(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, ResourceName: resourceName, @@ -209,11 +200,11 @@ func TestAccDSQLCluster_tags(t *testing.T) { func TestAccDSQLCluster_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -222,7 +213,6 @@ func TestAccDSQLCluster_tags_null(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: nil, }), @@ -253,7 +243,6 @@ func TestAccDSQLCluster_tags_null(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: nil, }), @@ -273,11 +262,11 @@ func TestAccDSQLCluster_tags_null(t *testing.T) { func TestAccDSQLCluster_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -286,7 +275,6 @@ func TestAccDSQLCluster_tags_EmptyMap(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), }, Check: resource.ComposeAggregateTestCheckFunc( @@ -307,7 +295,6 @@ func TestAccDSQLCluster_tags_EmptyMap(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), }, ResourceName: resourceName, @@ -325,11 +312,11 @@ func TestAccDSQLCluster_tags_EmptyMap(t *testing.T) { func TestAccDSQLCluster_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -338,7 +325,6 @@ func TestAccDSQLCluster_tags_AddOnUpdate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( @@ -359,7 +345,6 @@ func TestAccDSQLCluster_tags_AddOnUpdate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -390,7 +375,6 @@ func TestAccDSQLCluster_tags_AddOnUpdate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -407,11 +391,11 @@ func TestAccDSQLCluster_tags_AddOnUpdate(t *testing.T) { func TestAccDSQLCluster_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -420,7 +404,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnCreate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(""), }), @@ -451,7 +434,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnCreate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(""), }), @@ -465,7 +447,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnCreate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( @@ -486,7 +467,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnCreate(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, ResourceName: resourceName, @@ -501,11 +481,11 @@ func TestAccDSQLCluster_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -514,7 +494,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -545,7 +524,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), acctest.CtKey2: config.StringVariable(""), @@ -581,7 +559,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), acctest.CtKey2: config.StringVariable(""), @@ -596,7 +573,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -627,7 +603,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -644,11 +619,11 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -657,7 +632,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -688,7 +662,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(""), }), @@ -719,7 +692,6 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { { ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(""), }), @@ -736,11 +708,11 @@ func TestAccDSQLCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -749,7 +721,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -778,7 +749,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -794,7 +764,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), acctest.CtKey2: config.StringVariable(acctest.CtValue2), @@ -826,7 +795,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), acctest.CtKey2: config.StringVariable(acctest.CtValue2), @@ -843,7 +811,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey2: config.StringVariable(acctest.CtValue2), }), @@ -872,7 +839,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey2: config.StringVariable(acctest.CtValue2), }), @@ -888,7 +854,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( @@ -910,7 +875,6 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, ResourceName: resourceName, @@ -925,11 +889,11 @@ func TestAccDSQLCluster_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -938,7 +902,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -975,7 +938,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -993,7 +955,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), }), @@ -1035,7 +996,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), }), @@ -1054,7 +1014,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( @@ -1076,7 +1035,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: nil, }, ResourceName: resourceName, @@ -1091,11 +1049,11 @@ func TestAccDSQLCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1104,7 +1062,6 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1139,7 +1096,6 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1157,7 +1113,6 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), acctest.CtOverlapKey2: config.StringVariable("providervalue2"), @@ -1198,7 +1153,6 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), acctest.CtOverlapKey2: config.StringVariable("providervalue2"), @@ -1218,7 +1172,6 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1253,7 +1206,6 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1273,11 +1225,11 @@ func TestAccDSQLCluster_tags_DefaultTags_overlapping(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1286,7 +1238,6 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1318,7 +1269,6 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1347,7 +1297,6 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1365,11 +1314,11 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1378,7 +1327,6 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1407,7 +1355,6 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1439,7 +1386,6 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1456,11 +1402,11 @@ func TestAccDSQLCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1469,7 +1415,6 @@ func TestAccDSQLCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1504,7 +1449,6 @@ func TestAccDSQLCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1524,11 +1468,11 @@ func TestAccDSQLCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1537,7 +1481,6 @@ func TestAccDSQLCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(""), }), @@ -1566,7 +1509,6 @@ func TestAccDSQLCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(""), }), @@ -1584,11 +1526,11 @@ func TestAccDSQLCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccDSQLCluster_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1597,7 +1539,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1632,7 +1573,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1655,11 +1595,11 @@ func TestAccDSQLCluster_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T func TestAccDSQLCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1668,7 +1608,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *testin ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1705,7 +1644,6 @@ func TestAccDSQLCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *testin ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_defaults/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -1728,11 +1666,11 @@ func TestAccDSQLCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *testin func TestAccDSQLCluster_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1741,7 +1679,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnCreate(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tagsComputed1/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), "unknownTagKey": config.StringVariable("computedkey1"), }, Check: resource.ComposeAggregateTestCheckFunc( @@ -1770,7 +1707,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnCreate(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tagsComputed1/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), "unknownTagKey": config.StringVariable("computedkey1"), }, ResourceName: resourceName, @@ -1785,11 +1721,11 @@ func TestAccDSQLCluster_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1798,7 +1734,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1830,7 +1765,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tagsComputed2/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), "unknownTagKey": config.StringVariable("computedkey1"), "knownTagKey": config.StringVariable(acctest.CtKey1), "knownTagValue": config.StringVariable(acctest.CtValue1), @@ -1867,7 +1801,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tagsComputed2/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), "unknownTagKey": config.StringVariable("computedkey1"), "knownTagKey": config.StringVariable(acctest.CtKey1), "knownTagValue": config.StringVariable(acctest.CtValue1), @@ -1884,11 +1817,11 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1897,7 +1830,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtKey1: config.StringVariable(acctest.CtValue1), }), @@ -1929,7 +1861,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tagsComputed1/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), "unknownTagKey": config.StringVariable(acctest.CtKey1), }, Check: resource.ComposeAggregateTestCheckFunc( @@ -1958,7 +1889,6 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tagsComputed1/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), "unknownTagKey": config.StringVariable(acctest.CtKey1), }, ResourceName: resourceName, @@ -1973,11 +1903,11 @@ func TestAccDSQLCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccDSQLCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -1987,7 +1917,6 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_ignore/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), }), @@ -2036,7 +1965,6 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_ignore/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), }), @@ -2085,7 +2013,6 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_ignore/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), }), @@ -2135,11 +2062,11 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccDSQLCluster_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v dsql.GetClusterOutput resourceName := "aws_dsql_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), CheckDestroy: testAccCheckClusterDestroy(ctx), @@ -2149,7 +2076,6 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_ignore/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), @@ -2207,7 +2133,6 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_ignore/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), @@ -2264,7 +2189,6 @@ func TestAccDSQLCluster_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ConfigDirectory: config.StaticDirectory("testdata/Cluster/tags_ignore/"), ConfigVariables: config.Variables{ - acctest.CtRName: config.StringVariable(rName), acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), diff --git a/internal/service/dsql/cluster_test.go b/internal/service/dsql/cluster_test.go index f87e96597ea3..f7455ffc7458 100644 --- a/internal/service/dsql/cluster_test.go +++ b/internal/service/dsql/cluster_test.go @@ -34,12 +34,6 @@ func TestAccDSQLCluster_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - // Because dsql is in preview, we need to skip the PreCheckPartitionHasService - // acctest.PreCheckPartitionHasService(t, names.DSQLEndpointID) - // PreCheck for the region configuration as long as DSQL is in preview - acctest.PreCheckRegion(t, "us-east-1", "us-east-2") //lintignore:AWSAT003 - acctest.PreCheckAlternateRegion(t, "us-east-2", "us-east-1") //lintignore:AWSAT003 - acctest.PreCheckThirdRegion(t, "us-west-2") //lintignore:AWSAT003 testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), @@ -47,7 +41,7 @@ func TestAccDSQLCluster_basic(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_basic(false), + Config: testAccClusterConfig_basic(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), ), @@ -65,6 +59,7 @@ func TestAccDSQLCluster_basic(t *testing.T) { "encryption_type": tfknownvalue.StringExact(awstypes.EncryptionTypeAwsOwnedKmsKey), }), })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrForceDestroy), knownvalue.Bool(false)), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("kms_encryption_key"), knownvalue.StringExact("AWS_OWNED_KMS_KEY")), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_service_name"), knownvalue.NotNull()), @@ -89,12 +84,6 @@ func TestAccDSQLCluster_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - // Because dsql is in preview, we need to skip the PreCheckPartitionHasService - // acctest.PreCheckPartitionHasService(t, names.DSQLEndpointID) - // PreCheck for the region configuration as long as DSQL is in preview - acctest.PreCheckRegion(t, "us-east-1", "us-east-2") //lintignore:AWSAT003 - acctest.PreCheckAlternateRegion(t, "us-east-2", "us-east-1") //lintignore:AWSAT003 - acctest.PreCheckThirdRegion(t, "us-west-2") //lintignore:AWSAT003 testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), @@ -102,7 +91,7 @@ func TestAccDSQLCluster_disappears(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_basic(false), + Config: testAccClusterConfig_basic(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfdsql.ResourceCluster, resourceName), @@ -121,12 +110,6 @@ func TestAccDSQLCluster_deletionProtection(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - // Because dsql is in preview, we need to skip the PreCheckPartitionHasService - // acctest.PreCheckPartitionHasService(t, names.DSQLEndpointID) - // PreCheck for the region configuration as long as DSQL is in preview - acctest.PreCheckRegion(t, "us-east-1", "us-east-2") //lintignore:AWSAT003 - acctest.PreCheckAlternateRegion(t, "us-east-2", "us-east-1") //lintignore:AWSAT003 - acctest.PreCheckThirdRegion(t, "us-west-2") //lintignore:AWSAT003 testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), @@ -134,7 +117,7 @@ func TestAccDSQLCluster_deletionProtection(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_basic(true), + Config: testAccClusterConfig_deletionProtection(true), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), ), @@ -155,7 +138,7 @@ func TestAccDSQLCluster_deletionProtection(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccClusterConfig_basic(false), + Config: testAccClusterConfig_deletionProtection(false), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), ), @@ -172,6 +155,39 @@ func TestAccDSQLCluster_deletionProtection(t *testing.T) { }) } +func TestAccDSQLCluster_forceDestroy(t *testing.T) { + ctx := acctest.Context(t) + var cluster dsql.GetClusterOutput + resourceName := "aws_dsql_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_forceDestroy(true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("deletion_protection_enabled"), knownvalue.Bool(true)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrForceDestroy), knownvalue.Bool(true)), + }, + }, + }, + }) +} + func TestAccDSQLCluster_encryption(t *testing.T) { ctx := acctest.Context(t) var cluster dsql.GetClusterOutput @@ -181,12 +197,6 @@ func TestAccDSQLCluster_encryption(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - // Because dsql is in preview, we need to skip the PreCheckPartitionHasService - // acctest.PreCheckPartitionHasService(t, names.DSQLEndpointID) - // PreCheck for the region configuration as long as DSQL is in preview - acctest.PreCheckRegion(t, "us-east-1", "us-east-2") //lintignore:AWSAT003 - acctest.PreCheckAlternateRegion(t, "us-east-2", "us-east-1") //lintignore:AWSAT003 - acctest.PreCheckThirdRegion(t, "us-west-2") //lintignore:AWSAT003 testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DSQLServiceID), @@ -305,10 +315,26 @@ func testAccPreCheck(ctx context.Context, t *testing.T) { } } -func testAccClusterConfig_basic(deletionProtection bool) string { +func testAccClusterConfig_basic() string { + return ` +resource "aws_dsql_cluster" "test" { +} +` +} + +func testAccClusterConfig_deletionProtection(deletionProtection bool) string { + return fmt.Sprintf(` +resource "aws_dsql_cluster" "test" { + deletion_protection_enabled = %[1]t +} +`, deletionProtection) +} + +func testAccClusterConfig_forceDestroy(deletionProtection bool) string { return fmt.Sprintf(` resource "aws_dsql_cluster" "test" { deletion_protection_enabled = %[1]t + force_destroy = true } `, deletionProtection) } diff --git a/internal/service/dsql/service_endpoint_resolver_gen.go b/internal/service/dsql/service_endpoint_resolver_gen.go index 19eb64cd78ed..cc2cf618add3 100644 --- a/internal/service/dsql/service_endpoint_resolver_gen.go +++ b/internal/service/dsql/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params dsql.EndpointPar }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up dsql endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up dsql endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/dsql/service_endpoints_gen_test.go b/internal/service/dsql/service_endpoints_gen_test.go index 2df8610491c4..1f8b10de72dc 100644 --- a/internal/service/dsql/service_endpoints_gen_test.go +++ b/internal/service/dsql/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/dsql/service_package_gen.go b/internal/service/dsql/service_package_gen.go index 518f91f1b115..5f13ad9b2023 100644 --- a/internal/service/dsql/service_package_gen.go +++ b/internal/service/dsql/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/dsql" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -73,7 +72,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *dsql.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/dsql/sweep.go b/internal/service/dsql/sweep.go index 9c11c3858005..6d3757ad65c1 100644 --- a/internal/service/dsql/sweep.go +++ b/internal/service/dsql/sweep.go @@ -21,9 +21,9 @@ func RegisterSweepers() { func sweepClusters(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.DSQLClient(ctx) - var input dsql.ListClustersInput - sweepResources := make([]sweep.Sweepable, 0) + var sweepResources []sweep.Sweepable + var input dsql.ListClustersInput pages := dsql.NewListClustersPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -34,8 +34,9 @@ func sweepClusters(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepa for _, v := range page.Clusters { sweepResources = append(sweepResources, framework.NewSweepResource(newClusterResource, client, - framework.NewAttribute(names.AttrIdentifier, aws.ToString(v.Identifier))), - ) + framework.NewAttribute(names.AttrIdentifier, aws.ToString(v.Identifier)), + framework.NewAttribute(names.AttrForceDestroy, true), + )) } } diff --git a/internal/service/dsql/tags_gen.go b/internal/service/dsql/tags_gen.go index 102ccd669ba6..b640f43a1dfe 100644 --- a/internal/service/dsql/tags_gen.go +++ b/internal/service/dsql/tags_gen.go @@ -3,8 +3,8 @@ package dsql import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dsql" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *dsql.Client, identifier string, optFns output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DSQLClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *dsql.Client, identifier string, oldTa _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *dsql.Client, identifier string, oldTa _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/dsql/testdata/Cluster/tags/main_gen.tf b/internal/service/dsql/testdata/Cluster/tags/main_gen.tf index f5875265a59a..77688faf1c6a 100644 --- a/internal/service/dsql/testdata/Cluster/tags/main_gen.tf +++ b/internal/service/dsql/testdata/Cluster/tags/main_gen.tf @@ -2,22 +2,10 @@ # SPDX-License-Identifier: MPL-2.0 resource "aws_dsql_cluster" "test" { - deletion_protection_enabled = false tags = var.resource_tags } -output "rName" { - value = var.rName - description = "To prevent tflint issues" -} - -variable "rName" { - description = "Name for resource" - type = string - nullable = false -} - variable "resource_tags" { description = "Tags to set on resource. To specify no tags, set to `null`" # Not setting a default, so that this must explicitly be set to `null` to specify no tags diff --git a/internal/service/dsql/testdata/Cluster/tagsComputed1/main_gen.tf b/internal/service/dsql/testdata/Cluster/tagsComputed1/main_gen.tf index bf899b793980..b3758f7e3bb8 100644 --- a/internal/service/dsql/testdata/Cluster/tagsComputed1/main_gen.tf +++ b/internal/service/dsql/testdata/Cluster/tagsComputed1/main_gen.tf @@ -4,26 +4,14 @@ provider "null" {} resource "aws_dsql_cluster" "test" { - deletion_protection_enabled = false tags = { (var.unknownTagKey) = null_resource.test.id } } -output "rName" { - value = var.rName - description = "To prevent tflint issues" -} - resource "null_resource" "test" {} -variable "rName" { - description = "Name for resource" - type = string - nullable = false -} - variable "unknownTagKey" { type = string nullable = false diff --git a/internal/service/dsql/testdata/Cluster/tagsComputed2/main_gen.tf b/internal/service/dsql/testdata/Cluster/tagsComputed2/main_gen.tf index cec497deac96..d5d7e5c15f5b 100644 --- a/internal/service/dsql/testdata/Cluster/tagsComputed2/main_gen.tf +++ b/internal/service/dsql/testdata/Cluster/tagsComputed2/main_gen.tf @@ -4,7 +4,6 @@ provider "null" {} resource "aws_dsql_cluster" "test" { - deletion_protection_enabled = false tags = { (var.unknownTagKey) = null_resource.test.id @@ -12,19 +11,8 @@ resource "aws_dsql_cluster" "test" { } } -output "rName" { - value = var.rName - description = "To prevent tflint issues" -} - resource "null_resource" "test" {} -variable "rName" { - description = "Name for resource" - type = string - nullable = false -} - variable "unknownTagKey" { type = string nullable = false diff --git a/internal/service/dsql/testdata/Cluster/tags_defaults/main_gen.tf b/internal/service/dsql/testdata/Cluster/tags_defaults/main_gen.tf index 8c2467b0aa35..d0bddd664596 100644 --- a/internal/service/dsql/testdata/Cluster/tags_defaults/main_gen.tf +++ b/internal/service/dsql/testdata/Cluster/tags_defaults/main_gen.tf @@ -8,22 +8,10 @@ provider "aws" { } resource "aws_dsql_cluster" "test" { - deletion_protection_enabled = false tags = var.resource_tags } -output "rName" { - value = var.rName - description = "To prevent tflint issues" -} - -variable "rName" { - description = "Name for resource" - type = string - nullable = false -} - variable "resource_tags" { description = "Tags to set on resource. To specify no tags, set to `null`" # Not setting a default, so that this must explicitly be set to `null` to specify no tags diff --git a/internal/service/dsql/testdata/Cluster/tags_ignore/main_gen.tf b/internal/service/dsql/testdata/Cluster/tags_ignore/main_gen.tf index 7be76126b85a..4f3d3c6ba9ba 100644 --- a/internal/service/dsql/testdata/Cluster/tags_ignore/main_gen.tf +++ b/internal/service/dsql/testdata/Cluster/tags_ignore/main_gen.tf @@ -11,22 +11,10 @@ provider "aws" { } resource "aws_dsql_cluster" "test" { - deletion_protection_enabled = false tags = var.resource_tags } -output "rName" { - value = var.rName - description = "To prevent tflint issues" -} - -variable "rName" { - description = "Name for resource" - type = string - nullable = false -} - variable "resource_tags" { description = "Tags to set on resource. To specify no tags, set to `null`" # Not setting a default, so that this must explicitly be set to `null` to specify no tags diff --git a/internal/service/dsql/testdata/tmpl/cluster_tags.gtpl b/internal/service/dsql/testdata/tmpl/cluster_tags.gtpl index 1442422e1c3d..90c68885c9bc 100644 --- a/internal/service/dsql/testdata/tmpl/cluster_tags.gtpl +++ b/internal/service/dsql/testdata/tmpl/cluster_tags.gtpl @@ -1,10 +1,3 @@ resource "aws_dsql_cluster" "test" { - deletion_protection_enabled = false - {{- template "tags" . }} } - -output "rName" { - value = var.rName - description = "To prevent tflint issues" -} diff --git a/internal/service/dynamodb/contributor_insights.go b/internal/service/dynamodb/contributor_insights.go index 4bf07705ce19..44a7c0f916c8 100644 --- a/internal/service/dynamodb/contributor_insights.go +++ b/internal/service/dynamodb/contributor_insights.go @@ -46,6 +46,12 @@ func resourceContributorInsights() *schema.Resource { Optional: true, ForceNew: true, }, + names.AttrMode: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.ContributorInsightsMode](), + }, names.AttrTableName: { Type: schema.TypeString, Required: true, @@ -60,7 +66,7 @@ func resourceContributorInsightsCreate(ctx context.Context, d *schema.ResourceDa conn := meta.(*conns.AWSClient).DynamoDBClient(ctx) tableName := d.Get(names.AttrTableName).(string) - input := &dynamodb.UpdateContributorInsightsInput{ + input := dynamodb.UpdateContributorInsightsInput{ ContributorInsightsAction: awstypes.ContributorInsightsActionEnable, TableName: aws.String(tableName), } @@ -71,7 +77,11 @@ func resourceContributorInsightsCreate(ctx context.Context, d *schema.ResourceDa input.IndexName = aws.String(indexName) } - _, err := conn.UpdateContributorInsights(ctx, input) + if v, ok := d.GetOk(names.AttrMode); ok { + input.ContributorInsightsMode = awstypes.ContributorInsightsMode(v.(string)) + } + + _, err := conn.UpdateContributorInsights(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DynamoDB Contributor Insights for table (%s): %s", tableName, err) @@ -108,6 +118,7 @@ func resourceContributorInsightsRead(ctx context.Context, d *schema.ResourceData } d.Set("index_name", output.IndexName) + d.Set(names.AttrMode, output.ContributorInsightsMode) d.Set(names.AttrTableName, output.TableName) return diags @@ -122,17 +133,16 @@ func resourceContributorInsightsDelete(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendFromErr(diags, err) } - input := &dynamodb.UpdateContributorInsightsInput{ + input := dynamodb.UpdateContributorInsightsInput{ ContributorInsightsAction: awstypes.ContributorInsightsActionDisable, TableName: aws.String(tableName), } - if indexName != "" { input.IndexName = aws.String(indexName) } log.Printf("[INFO] Deleting DynamoDB Contributor Insights: %s", d.Id()) - _, err = conn.UpdateContributorInsights(ctx, input) + _, err = conn.UpdateContributorInsights(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags @@ -168,14 +178,14 @@ func contributorInsightsParseResourceID(id string) (string, string, error) { } func findContributorInsightsByTwoPartKey(ctx context.Context, conn *dynamodb.Client, tableName, indexName string) (*dynamodb.DescribeContributorInsightsOutput, error) { - input := &dynamodb.DescribeContributorInsightsInput{ + input := dynamodb.DescribeContributorInsightsInput{ TableName: aws.String(tableName), } if indexName != "" { input.IndexName = aws.String(indexName) } - output, err := findContributorInsights(ctx, conn, input) + output, err := findContributorInsights(ctx, conn, &input) if err != nil { return nil, err diff --git a/internal/service/dynamodb/contributor_insights_test.go b/internal/service/dynamodb/contributor_insights_test.go index d586fd022cd0..831a28d1d007 100644 --- a/internal/service/dynamodb/contributor_insights_test.go +++ b/internal/service/dynamodb/contributor_insights_test.go @@ -55,6 +55,78 @@ func TestAccDynamoDBContributorInsights_basic(t *testing.T) { }) } +func TestAccDynamoDBContributorInsights_ModeAccessedAndThrottled(t *testing.T) { + ctx := acctest.Context(t) + var conf dynamodb.DescribeContributorInsightsOutput + rName := fmt.Sprintf("tf-acc-test-%s", sdkacctest.RandString(8)) + indexName := fmt.Sprintf("%s-index", rName) + resourceName := "aws_dynamodb_contributor_insights.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckContributorInsightsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccContributorInsightsConfig_AccessedAndThrottledKeys(rName, ""), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContributorInsightsExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, names.AttrTableName, rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContributorInsightsConfig_AccessedAndThrottledKeys(rName, indexName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContributorInsightsExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "index_name", indexName), + ), + }, + }, + }) +} + +func TestAccDynamoDBContributorInsights_ModeThrottledOnly(t *testing.T) { + ctx := acctest.Context(t) + var conf dynamodb.DescribeContributorInsightsOutput + rName := fmt.Sprintf("tf-acc-test-%s", sdkacctest.RandString(8)) + indexName := fmt.Sprintf("%s-index", rName) + resourceName := "aws_dynamodb_contributor_insights.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckContributorInsightsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccContributorInsightsConfig_ThrottledKeys(rName, ""), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContributorInsightsExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, names.AttrTableName, rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContributorInsightsConfig_ThrottledKeys(rName, indexName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContributorInsightsExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "index_name", indexName), + ), + }, + }, + }) +} + func TestAccDynamoDBContributorInsights_disappears(t *testing.T) { ctx := acctest.Context(t) var conf dynamodb.DescribeContributorInsightsOutput @@ -100,7 +172,7 @@ resource "aws_dynamodb_table" "test" { write_capacity = 1 } } -`, rName) + `, rName) } func testAccContributorInsightsConfig_basic(rName, indexName string) string { @@ -112,6 +184,28 @@ resource "aws_dynamodb_contributor_insights" "test" { `, rName, indexName)) } +func testAccContributorInsightsConfig_AccessedAndThrottledKeys(rName, indexName string) string { + return acctest.ConfigCompose(testAccContributorInsightsBaseConfig(rName), fmt.Sprintf(` +resource "aws_dynamodb_contributor_insights" "test" { + table_name = aws_dynamodb_table.test.name + index_name = %[2]q + + mode = "ACCESSED_AND_THROTTLED_KEYS" +} +`, rName, indexName)) +} + +func testAccContributorInsightsConfig_ThrottledKeys(rName, indexName string) string { + return acctest.ConfigCompose(testAccContributorInsightsBaseConfig(rName), fmt.Sprintf(` +resource "aws_dynamodb_contributor_insights" "test" { + table_name = aws_dynamodb_table.test.name + index_name = %[2]q + + mode = "THROTTLED_KEYS" +} +`, rName, indexName)) +} + func testAccCheckContributorInsightsExists(ctx context.Context, n string, v *dynamodb.DescribeContributorInsightsOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/dynamodb/forge.go b/internal/service/dynamodb/forge.go index d5c8c3f77633..2c63cc57673d 100644 --- a/internal/service/dynamodb/forge.go +++ b/internal/service/dynamodb/forge.go @@ -46,3 +46,16 @@ func stripOnDemandThroughputAttributes(in map[string]any) (map[string]any, error return m, nil } + +func stripWarmThroughputAttributes(in map[string]any) (map[string]any, error) { + mapCopy, err := copystructure.Copy(in) + if err != nil { + return nil, err + } + + m := mapCopy.(map[string]any) + + delete(m, "warm_throughput") + + return m, nil +} diff --git a/internal/service/dynamodb/resource_policy.go b/internal/service/dynamodb/resource_policy.go index 36350f35a2cb..bb6337f126f1 100644 --- a/internal/service/dynamodb/resource_policy.go +++ b/internal/service/dynamodb/resource_policy.go @@ -31,6 +31,7 @@ import ( // @ArnIdentity("resource_arn", identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/dynamodb;dynamodb.GetResourcePolicyOutput") // @Testing(importIgnore="policy") +// @Testing(preIdentityVersion="v5.100.0") func newResourcePolicyResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &resourcePolicyResource{} @@ -96,7 +97,7 @@ func (r *resourcePolicyResource) Create(ctx context.Context, request resource.Cr data.RevisionID = fwflex.StringToFramework(ctx, output.RevisionId) data.setID() - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findResourcePolicyByARN(ctx, conn, data.ResourceARN.ValueString()) }) diff --git a/internal/service/dynamodb/resource_policy_identity_gen_test.go b/internal/service/dynamodb/resource_policy_identity_gen_test.go index f6414e28a4e5..c4de07b480ca 100644 --- a/internal/service/dynamodb/resource_policy_identity_gen_test.go +++ b/internal/service/dynamodb/resource_policy_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDynamoDBResourcePolicy_Identity_Basic(t *testing.T) { resourceName := "aws_dynamodb_resource_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccDynamoDBResourcePolicy_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -115,7 +119,7 @@ func TestAccDynamoDBResourcePolicy_Identity_RegionOverride(t *testing.T) { resourceName := "aws_dynamodb_resource_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -134,6 +138,9 @@ func TestAccDynamoDBResourcePolicy_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -237,3 +244,129 @@ func TestAccDynamoDBResourcePolicy_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDynamoDBResourcePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v dynamodb.GetResourcePolicyOutput + resourceName := "aws_dynamodb_resource_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourcePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourcePolicyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + }, + }) +} + +func TestAccDynamoDBResourcePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v dynamodb.GetResourcePolicyOutput + resourceName := "aws_dynamodb_resource_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourcePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/dynamodb/resource_policy_test.go b/internal/service/dynamodb/resource_policy_test.go index ebd8ea72833e..c8be381c039f 100644 --- a/internal/service/dynamodb/resource_policy_test.go +++ b/internal/service/dynamodb/resource_policy_test.go @@ -12,13 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/dynamodb" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdynamodb "github.com/hashicorp/terraform-provider-aws/internal/service/dynamodb" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -113,70 +108,6 @@ func TestAccDynamoDBResourcePolicy_disappears(t *testing.T) { }) } -func TestAccDynamoDBResourcePolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_dynamodb_resource_policy.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), - CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccResourcePolicyConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccResourcePolicyConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccResourcePolicyConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), - }, - }, - }, - }) -} - func testAccCheckResourcePolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DynamoDBClient(ctx) diff --git a/internal/service/dynamodb/service_endpoint_resolver_gen.go b/internal/service/dynamodb/service_endpoint_resolver_gen.go index 191011ed05b2..c95db3d33fb1 100644 --- a/internal/service/dynamodb/service_endpoint_resolver_gen.go +++ b/internal/service/dynamodb/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params dynamodb.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up dynamodb endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up dynamodb endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/dynamodb/service_endpoints_gen_test.go b/internal/service/dynamodb/service_endpoints_gen_test.go index 3003c5ddb7c1..e4d677fa50ba 100644 --- a/internal/service/dynamodb/service_endpoints_gen_test.go +++ b/internal/service/dynamodb/service_endpoints_gen_test.go @@ -659,7 +659,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/dynamodb/service_package.go b/internal/service/dynamodb/service_package.go index 27e87699e880..c7eb521f3877 100644 --- a/internal/service/dynamodb/service_package.go +++ b/internal/service/dynamodb/service_package.go @@ -10,21 +10,44 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/dynamodb" awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*dynamodb.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*dynamodb.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*dynamodb.Options){ func(o *dynamodb.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "Subscriber limit exceeded:") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "Requested MaxReadRequestUnits for OnDemandThroughput for table exceeds TableMaxReadCapacityUnits") { + return aws.FalseTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "Requested MaxWriteRequestUnits for OnDemandThroughput for table exceeds TableMaxWriteCapacityUnits") { + return aws.FalseTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "Subscriber limit exceeded:") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/dynamodb/service_package_gen.go b/internal/service/dynamodb/service_package_gen.go index 900ddc966df6..f0fe873f787d 100644 --- a/internal/service/dynamodb/service_package_gen.go +++ b/internal/service/dynamodb/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -153,7 +152,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *dynamodb.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/dynamodb/status.go b/internal/service/dynamodb/status.go index 6f7d601196b8..f9c3abc2b892 100644 --- a/internal/service/dynamodb/status.go +++ b/internal/service/dynamodb/status.go @@ -29,6 +29,26 @@ func statusTable(ctx context.Context, conn *dynamodb.Client, tableName string) r } } +func statusTableWarmThroughput(ctx context.Context, conn *dynamodb.Client, tableName string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findTableByName(ctx, conn, tableName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil || output.WarmThroughput == nil { + return nil, "", nil + } + + return output, string(output.WarmThroughput.Status), nil + } +} + func statusImport(ctx context.Context, conn *dynamodb.Client, importARN string) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findImportByARN(ctx, conn, importARN) @@ -113,6 +133,26 @@ func statusGSI(ctx context.Context, conn *dynamodb.Client, tableName, indexName } } +func statusGSIWarmThroughput(ctx context.Context, conn *dynamodb.Client, tableName, indexName string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findGSIByTwoPartKey(ctx, conn, tableName, indexName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil || output.WarmThroughput == nil { + return nil, "", nil + } + + return output, string(output.WarmThroughput.Status), nil + } +} + func statusPITR(ctx context.Context, conn *dynamodb.Client, tableName string, optFns ...func(*dynamodb.Options)) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findPITRByTableName(ctx, conn, tableName, optFns...) diff --git a/internal/service/dynamodb/sweep.go b/internal/service/dynamodb/sweep.go index 2b15a4732766..c14087733c26 100644 --- a/internal/service/dynamodb/sweep.go +++ b/internal/service/dynamodb/sweep.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dynamodb" awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/sweep" @@ -37,7 +36,7 @@ func sweepTables(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DynamoDBClient(ctx) input := &dynamodb.ListTablesInput{} @@ -96,7 +95,7 @@ func sweepBackups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.DynamoDBClient(ctx) input := &dynamodb.ListBackupsInput{ @@ -157,24 +156,21 @@ func (bs backupSweeper) Delete(ctx context.Context, optFns ...tfresource.Options const ( timeout = 10 * time.Minute ) - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { log.Printf("[DEBUG] Deleting DynamoDB Backup: %s", bs.arn) _, err := bs.conn.DeleteBackup(ctx, input) if errs.IsA[*awstypes.BackupNotFoundException](err) { return nil } if errs.IsA[*awstypes.BackupInUseException](err) || errs.IsA[*awstypes.LimitExceededException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }, optFns...) - if tfresource.TimedOut(err) { - _, err = bs.conn.DeleteBackup(ctx, input) - } return err } diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index a69a51054aa8..73d836174963 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -21,7 +21,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -93,7 +92,7 @@ func resourceTable() *schema.Resource { func(_ context.Context, diff *schema.ResourceDiff, meta any) error { if diff.Id() != "" && (diff.HasChange("stream_enabled") || (diff.Get("stream_view_type") != "" && diff.HasChange("stream_view_type"))) { if err := diff.SetNewComputed(names.AttrStreamARN); err != nil { - return fmt.Errorf("setting stream_arn to computed: %s", err) + return fmt.Errorf("setting stream_arn to computed: %w", err) } } return nil @@ -125,388 +124,416 @@ func resourceTable() *schema.Resource { customdiff.ForceNewIfChange("restore_source_table_arn", func(_ context.Context, old, new, meta any) bool { return old.(string) != new.(string) && new.(string) != "" }), + customdiff.ForceNewIfChange("warm_throughput.0.read_units_per_second", func(_ context.Context, old, new, meta any) bool { + // warm_throughput can only be increased, not decreased + // i.e., "api error ValidationException: One or more parameter values were invalid: Requested ReadUnitsPerSecond for WarmThroughput for table is lower than current WarmThroughput, decreasing WarmThroughput is not supported" + if old, new := old.(int), new.(int); new != 0 && new < old { + return true + } + + return false + }), + customdiff.ForceNewIfChange("warm_throughput.0.write_units_per_second", func(_ context.Context, old, new, meta any) bool { + // warm_throughput can only be increased, not decreased + // i.e., "api error ValidationException: One or more parameter values were invalid: Requested ReadUnitsPerSecond for WarmThroughput for table is lower than current WarmThroughput, decreasing WarmThroughput is not supported" + if old, new := old.(int), new.(int); new != 0 && new < old { + return true + } + + return false + }), + validateWarmThroughputCustomDiff, validateTTLCustomDiff, ), SchemaVersion: 1, MigrateState: resourceTableMigrateState, - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "attribute": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrName: { - Type: schema.TypeString, - Required: true, - }, - names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[awstypes.ScalarAttributeType](), - }, - }, + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, }, - Set: sdkv2.SimpleSchemaSetFunc(names.AttrName), - }, - "billing_mode": { - Type: schema.TypeString, - Optional: true, - Default: awstypes.BillingModeProvisioned, - ValidateDiagFunc: enum.Validate[awstypes.BillingMode](), - }, - "deletion_protection_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "global_secondary_index": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hash_key": { - Type: schema.TypeString, - Required: true, - }, - names.AttrName: { - Type: schema.TypeString, - Required: true, - }, - "non_key_attributes": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "on_demand_throughput": onDemandThroughputSchema(), - "projection_type": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[awstypes.ProjectionType](), - }, - "range_key": { - Type: schema.TypeString, - Optional: true, - }, - "read_capacity": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "write_capacity": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + "attribute": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, + names.AttrType: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ScalarAttributeType](), + }, }, }, + Set: sdkv2.SimpleSchemaSetFunc(names.AttrName), }, - }, - "hash_key": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "local_secondary_index": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrName: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "non_key_attributes": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "projection_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[awstypes.ProjectionType](), - }, - "range_key": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - }, + "billing_mode": { + Type: schema.TypeString, + Optional: true, + Default: awstypes.BillingModeProvisioned, + ValidateDiagFunc: enum.Validate[awstypes.BillingMode](), }, - Set: sdkv2.SimpleSchemaSetFunc(names.AttrName), - }, - names.AttrName: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "on_demand_throughput": onDemandThroughputSchema(), - "point_in_time_recovery": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEnabled: { - Type: schema.TypeBool, - Required: true, - }, - "recovery_period_in_days": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.IntBetween(1, 35), - DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { - return !d.Get("point_in_time_recovery.0.enabled").(bool) + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "global_secondary_index": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hash_key": { + Type: schema.TypeString, + Required: true, + }, + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, + "non_key_attributes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "on_demand_throughput": onDemandThroughputSchema(), + "projection_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ProjectionType](), + }, + "range_key": { + Type: schema.TypeString, + Optional: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "warm_throughput": warmThroughputSchema(), + "write_capacity": { + Type: schema.TypeInt, + Optional: true, + Computed: true, }, }, }, }, - }, - "range_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "read_capacity": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ConflictsWith: []string{"on_demand_throughput"}, - }, - "replica": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "consistency_mode": { - Type: schema.TypeString, - Optional: true, - Default: awstypes.MultiRegionConsistencyEventual, - ValidateDiagFunc: enum.Validate[awstypes.MultiRegionConsistency](), - }, - names.AttrKMSKeyARN: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: verify.ValidARN, - // update is equivalent of force a new *replica*, not table - }, - "point_in_time_recovery": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - names.AttrPropagateTags: { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "region_name": { - Type: schema.TypeString, - Required: true, - // update is equivalent of force a new *replica*, not table - }, - names.AttrStreamARN: { - Type: schema.TypeString, - Computed: true, - }, - "stream_label": { - Type: schema.TypeString, - Computed: true, - }, - }, + "hash_key": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, - }, - "import_table": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"restore_source_name", "restore_source_table_arn"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "input_compression_type": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[awstypes.InputCompressionType](), - }, - "input_format": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[awstypes.InputFormat](), - }, - "input_format_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "csv": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delimiter": { - Type: schema.TypeString, - Optional: true, - }, - "header_list": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + "import_table": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"restore_source_name", "restore_source_table_arn"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "input_compression_type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.InputCompressionType](), + }, + "input_format": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.InputFormat](), + }, + "input_format_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "csv": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delimiter": { + Type: schema.TypeString, + Optional: true, + }, + "header_list": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, }, }, }, - }, - "s3_bucket_source": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrBucket: { - Type: schema.TypeString, - Required: true, - }, - "bucket_owner": { - Type: schema.TypeString, - Optional: true, - }, - "key_prefix": { - Type: schema.TypeString, - Optional: true, + "s3_bucket_source": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrBucket: { + Type: schema.TypeString, + Required: true, + }, + "bucket_owner": { + Type: schema.TypeString, + Optional: true, + }, + "key_prefix": { + Type: schema.TypeString, + Optional: true, + }, }, }, }, }, }, }, - }, - "restore_date_time": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: verify.ValidUTCTimestamp, - }, - "restore_source_table_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, - ConflictsWith: []string{"import_table", "restore_source_name"}, - }, - "restore_source_name": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"import_table", "restore_source_table_arn"}, - }, - "restore_to_latest_time": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "server_side_encryption": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEnabled: { - Type: schema.TypeBool, - Required: true, + "local_secondary_index": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "projection_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.ProjectionType](), + }, + "range_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, }, - names.AttrKMSKeyARN: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: verify.ValidARN, + }, + Set: sdkv2.SimpleSchemaSetFunc(names.AttrName), + }, + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "on_demand_throughput": onDemandThroughputSchema(), + "point_in_time_recovery": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Required: true, + }, + "recovery_period_in_days": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 35), + DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { + return !d.Get("point_in_time_recovery.0.enabled").(bool) + }, + }, }, }, }, - }, - names.AttrStreamARN: { - Type: schema.TypeString, - Computed: true, - }, - "stream_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "stream_label": { - Type: schema.TypeString, - Computed: true, - }, - "stream_view_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - StateFunc: sdkv2.ToUpperSchemaStateFunc, - ValidateFunc: validation.StringInSlice(append(enum.Values[awstypes.StreamViewType](), ""), false), - }, - "table_class": { - Type: schema.TypeString, - Optional: true, - Default: awstypes.TableClassStandard, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return old == "" && new == string(awstypes.TableClassStandard) + "range_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, }, - ValidateDiagFunc: enum.Validate[awstypes.TableClass](), - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "ttl": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attribute_name": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // AWS requires the attribute name to be set when disabling TTL but - // does not return it so it causes a diff. - if old == "" && new != "" && !d.Get("ttl.0.enabled").(bool) { - return true - } - return false + "read_capacity": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"on_demand_throughput"}, + }, + "replica": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "consistency_mode": { + Type: schema.TypeString, + Optional: true, + Default: awstypes.MultiRegionConsistencyEventual, + ValidateDiagFunc: enum.Validate[awstypes.MultiRegionConsistency](), + }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + names.AttrKMSKeyARN: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidARN, + // update is equivalent of force a new *replica*, not table + }, + "point_in_time_recovery": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + names.AttrPropagateTags: { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "region_name": { + Type: schema.TypeString, + Required: true, + // update is equivalent of force a new *replica*, not table + }, + names.AttrStreamARN: { + Type: schema.TypeString, + Computed: true, + }, + "stream_label": { + Type: schema.TypeString, + Computed: true, }, }, - names.AttrEnabled: { - Type: schema.TypeBool, - Optional: true, - Default: false, + }, + }, + "restore_date_time": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidUTCTimestamp, + }, + "restore_source_table_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + ConflictsWith: []string{"import_table", "restore_source_name"}, + }, + "restore_source_name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"import_table", "restore_source_table_arn"}, + }, + "restore_to_latest_time": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "server_side_encryption": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Required: true, + }, + names.AttrKMSKeyARN: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, - }, - "write_capacity": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - ConflictsWith: []string{"on_demand_throughput"}, - }, + names.AttrStreamARN: { + Type: schema.TypeString, + Computed: true, + }, + "stream_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "stream_label": { + Type: schema.TypeString, + Computed: true, + }, + "stream_view_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: sdkv2.ToUpperSchemaStateFunc, + ValidateFunc: validation.StringInSlice(append(enum.Values[awstypes.StreamViewType](), ""), false), + }, + "table_class": { + Type: schema.TypeString, + Optional: true, + Default: awstypes.TableClassStandard, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old == "" && new == string(awstypes.TableClassStandard) + }, + ValidateDiagFunc: enum.Validate[awstypes.TableClass](), + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "ttl": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_name": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // AWS requires the attribute name to be set when disabling TTL but + // does not return it so it causes a diff. + if old == "" && new != "" && !d.Get("ttl.0.enabled").(bool) { + return true + } + return false + }, + }, + names.AttrEnabled: { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + }, + "warm_throughput": warmThroughputSchema(), + "write_capacity": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ConflictsWith: []string{"on_demand_throughput"}, + }, + } }, } } @@ -539,6 +566,31 @@ func onDemandThroughputSchema() *schema.Schema { } } +func warmThroughputSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "read_units_per_second": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(12000), + }, + "write_units_per_second": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(4000), + }, + }, + }, + } +} + func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DynamoDBClient(ctx) @@ -613,7 +665,7 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta any) input.SSESpecificationOverride = expandEncryptAtRestOptions(v.([]any)) } - _, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (any, error) { + _, err := tfresource.RetryWhen(ctx, createTableTimeout, func(ctx context.Context) (any, error) { return conn.RestoreTableToPointInTime(ctx, input) }, func(err error) (bool, error) { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { @@ -681,7 +733,7 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta any) input.TableCreationParameters = tcp - importTableOutput, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (any, error) { + importTableOutput, err := tfresource.RetryWhen(ctx, createTableTimeout, func(ctx context.Context) (any, error) { return conn.ImportTable(ctx, input) }, func(err error) (bool, error) { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { @@ -772,7 +824,11 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta any) input.TableClass = awstypes.TableClass(v.(string)) } - _, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (any, error) { + if v, ok := d.GetOk("warm_throughput"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.WarmThroughput = expandWarmThroughput(v.([]any)[0].(map[string]any)) + } + + _, err := tfresource.RetryWhen(ctx, createTableTimeout, func(ctx context.Context) (any, error) { return conn.CreateTable(ctx, input) }, func(err error) (bool, error) { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { @@ -784,7 +840,6 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta any) if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "indexed tables that can be created simultaneously") { return true, err } - return false, err }) @@ -800,6 +855,9 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta any) if output, err = waitTableActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForCreation, resNameTable, d.Id(), err) } + if err := waitTableWarmThroughputActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), err) + } if v, ok := d.GetOk("global_secondary_index"); ok { gsiSet := v.(*schema.Set) @@ -944,6 +1002,10 @@ func resourceTableRead(ctx context.Context, d *schema.ResourceData, meta any) di d.Set("table_class", awstypes.TableClassStandard) } + if err := d.Set("warm_throughput", flattenTableWarmThroughput(table.WarmThroughput)); err != nil { + return create.AppendDiagSettingError(diags, names.DynamoDB, resNameTable, d.Id(), "warm_throughput", err) + } + describeBackupsInput := dynamodb.DescribeContinuousBackupsInput{ TableName: aws.String(d.Id()), } @@ -1097,7 +1159,7 @@ func resourceTableUpdate(ctx context.Context, d *schema.ResourceData, meta any) // Must update all indexes when switching BillingMode from PAY_PER_REQUEST to PROVISIONED if newBillingMode == awstypes.BillingModeProvisioned { for _, gsiUpdate := range gsiUpdates { - if gsiUpdate.Update == nil { + if gsiUpdate.Update == nil || (gsiUpdate.Update != nil && gsiUpdate.Update.WarmThroughput != nil) { continue } @@ -1106,7 +1168,7 @@ func resourceTableUpdate(ctx context.Context, d *schema.ResourceData, meta any) } } - // update only on-demand throughput indexes when switching to PAY_PER_REQUEST + // update only on-demand throughput indexes when switching to PAY_PER_REQUEST in Phase 2a if newBillingMode == awstypes.BillingModePayPerRequest { for _, gsiUpdate := range gsiUpdates { if gsiUpdate.Update == nil || (gsiUpdate.Update != nil && gsiUpdate.Update.OnDemandThroughput == nil) { @@ -1129,6 +1191,10 @@ func resourceTableUpdate(ctx context.Context, d *schema.ResourceData, meta any) return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), err) } + if err := waitTableWarmThroughputActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), err) + } + for _, gsiUpdate := range gsiUpdates { if gsiUpdate.Update == nil { continue @@ -1139,6 +1205,35 @@ func resourceTableUpdate(ctx context.Context, d *schema.ResourceData, meta any) if _, err := waitGSIActive(ctx, conn, d.Id(), idxName, d.Timeout(schema.TimeoutUpdate)); err != nil { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), fmt.Errorf("GSI (%s): %w", idxName, err)) } + + if err := waitGSIWarmThroughputActive(ctx, conn, d.Id(), idxName, d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), fmt.Errorf("GSI (%s): %w", idxName, err)) + } + } + } + + // Phase 2b: update indexes in two steps when warm throughput is set + for _, gsiUpdate := range gsiUpdates { + if gsiUpdate.Update == nil || (gsiUpdate.Update != nil && gsiUpdate.Update.WarmThroughput == nil) { + continue + } + + idxName := aws.ToString(gsiUpdate.Update.IndexName) + input := &dynamodb.UpdateTableInput{ + GlobalSecondaryIndexUpdates: []awstypes.GlobalSecondaryIndexUpdate{gsiUpdate}, + TableName: aws.String(d.Id()), + } + + if _, err := conn.UpdateTable(ctx, input); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionUpdating, resNameTable, d.Id(), fmt.Errorf("updating GSI for warm throughput (%s): %w", idxName, err)) + } + + if _, err := waitGSIActive(ctx, conn, d.Id(), idxName, d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionUpdating, resNameTable, d.Id(), fmt.Errorf("%s GSI (%s): %w", create.ErrActionWaitingForCreation, idxName, err)) + } + + if err := waitGSIWarmThroughputActive(ctx, conn, d.Id(), idxName, d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), fmt.Errorf("GSI (%s): %w", idxName, err)) } } @@ -1163,6 +1258,10 @@ func resourceTableUpdate(ctx context.Context, d *schema.ResourceData, meta any) if _, err := waitGSIActive(ctx, conn, d.Id(), idxName, d.Timeout(schema.TimeoutUpdate)); err != nil { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionUpdating, resNameTable, d.Id(), fmt.Errorf("%s GSI (%s): %w", create.ErrActionWaitingForCreation, idxName, err)) } + + if err := waitGSIWarmThroughputActive(ctx, conn, d.Id(), idxName, d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionWaitingForUpdate, resNameTable, d.Id(), fmt.Errorf("GSI (%s): %w", idxName, err)) + } } if d.HasChange("server_side_encryption") { @@ -1279,6 +1378,12 @@ func resourceTableUpdate(ctx context.Context, d *schema.ResourceData, meta any) } } + if d.HasChange("warm_throughput") { + if err := updateWarmThroughput(ctx, conn, d.Get("warm_throughput").([]any), d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionUpdating, resNameTable, d.Id(), err) + } + } + return append(diags, resourceTableRead(ctx, d, meta)...) } @@ -1341,11 +1446,11 @@ func cycleStreamEnabled(ctx context.Context, conn *dynamodb.Client, id string, s _, err := conn.UpdateTable(ctx, input) if err != nil { - return fmt.Errorf("cycling stream enabled: %s", err) + return fmt.Errorf("cycling stream enabled: %w", err) } if _, err := waitTableActive(ctx, conn, id, timeout); err != nil { - return fmt.Errorf("waiting for stream cycle: %s", err) + return fmt.Errorf("waiting for stream cycle: %w", err) } input.StreamSpecification = &awstypes.StreamSpecification{ @@ -1356,11 +1461,11 @@ func cycleStreamEnabled(ctx context.Context, conn *dynamodb.Client, id string, s _, err = conn.UpdateTable(ctx, input) if err != nil { - return fmt.Errorf("cycling stream enabled: %s", err) + return fmt.Errorf("cycling stream enabled: %w", err) } if _, err := waitTableActive(ctx, conn, id, timeout); err != nil { - return fmt.Errorf("waiting for stream cycle: %s", err) + return fmt.Errorf("waiting for stream cycle: %w", err) } return nil @@ -1431,31 +1536,27 @@ func createReplicas(ctx context.Context, conn *dynamodb.Client, tableName string MultiRegionConsistency: mrscInput, } - err := retry.RetryContext(ctx, max(replicaUpdateTimeout, timeout), func() *retry.RetryError { + err := tfresource.Retry(ctx, max(replicaUpdateTimeout, timeout), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input) if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "can be created.") { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if tfawserr.ErrMessageContains(err, errCodeValidationException, "Replica specified in the Replica Update or Replica Delete action of the request was not found") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input) - } - if err != nil { return err } @@ -1528,31 +1629,27 @@ func createReplicas(ctx context.Context, conn *dynamodb.Client, tableName string } } - err := retry.RetryContext(ctx, max(replicaUpdateTimeout, timeout), func() *retry.RetryError { + err := tfresource.Retry(ctx, max(replicaUpdateTimeout, timeout), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input) if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "can be created, updated, or deleted simultaneously") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if tfawserr.ErrMessageContains(err, errCodeValidationException, "Replica specified in the Replica Update or Replica Delete action of the request was not found") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input) - } - // An update that doesn't (makes no changes) returns ValidationException // (same region_name and kms_key_arn as currently) throws unhelpfully worded exception: // ValidationException: One or more parameter values were invalid: KMSMasterKeyId must be specified for each replica. @@ -1573,6 +1670,12 @@ func createReplicas(ctx context.Context, conn *dynamodb.Client, tableName string if err = updatePITR(ctx, conn, tableName, tfMap["point_in_time_recovery"].(bool), nil, tfMap["region_name"].(string), timeout); err != nil { return fmt.Errorf("updating replica (%s) point in time recovery: %w", tfMap["region_name"].(string), err) } + + if v, ok := tfMap["deletion_protection_enabled"].(bool); ok { + if err = updateReplicaDeletionProtection(ctx, conn, tableName, tfMap["region_name"].(string), v, timeout); err != nil { + return fmt.Errorf("updating replica (%s) deletion protection: %w", tfMap["region_name"].(string), err) + } + } } } return nil @@ -1656,22 +1759,18 @@ func updatePITR(ctx context.Context, conn *dynamodb.Client, tableName string, en optFn := func(o *dynamodb.Options) { o.Region = region } - err := retry.RetryContext(ctx, updateTableContinuousBackupsTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, updateTableContinuousBackupsTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateContinuousBackups(ctx, input, optFn) if err != nil { // Backups are still being enabled for this newly created table if errs.IsAErrorMessageContains[*awstypes.ContinuousBackupsUnavailableException](err, "Backups are being enabled") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateContinuousBackups(ctx, input, optFn) - } - if err != nil { return fmt.Errorf("updating PITR: %w", err) } @@ -1683,6 +1782,26 @@ func updatePITR(ctx context.Context, conn *dynamodb.Client, tableName string, en return nil } +func updateReplicaDeletionProtection(ctx context.Context, conn *dynamodb.Client, tableName, region string, enabled bool, timeout time.Duration) error { + log.Printf("[DEBUG] Updating DynamoDB deletion protection to %v (%s)", enabled, region) + input := dynamodb.UpdateTableInput{ + TableName: aws.String(tableName), + DeletionProtectionEnabled: aws.Bool(enabled), + } + + optFn := func(o *dynamodb.Options) { o.Region = region } + _, err := conn.UpdateTable(ctx, &input, optFn) + if err != nil { + return fmt.Errorf("updating deletion protection: %w", err) + } + + if _, err := waitReplicaActive(ctx, conn, tableName, region, timeout, replicaPropagationDelay); err != nil { + return fmt.Errorf("waiting for deletion protection update: %w", err) + } + + return nil +} + func updateReplica(ctx context.Context, conn *dynamodb.Client, d *schema.ResourceData) error { oRaw, nRaw := d.GetChange("replica") o := oRaw.(*schema.Set) @@ -1766,6 +1885,14 @@ func updateReplica(ctx context.Context, conn *dynamodb.Client, d *schema.Resourc break } + // just update deletion protection + if ma["deletion_protection_enabled"].(bool) != mr["deletion_protection_enabled"].(bool) { + if err := updateReplicaDeletionProtection(ctx, conn, d.Id(), ma["region_name"].(string), ma["deletion_protection_enabled"].(bool), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("updating replica (%s) deletion protection: %w", ma["region_name"].(string), err) + } + break + } + // nothing changed, assuming propagate_tags changed so do nothing here break } @@ -1792,6 +1919,33 @@ func updateReplica(ctx context.Context, conn *dynamodb.Client, d *schema.Resourc return nil } +func updateWarmThroughput(ctx context.Context, conn *dynamodb.Client, warmList []any, tableName string, timeout time.Duration) error { + if len(warmList) < 1 || warmList[0] == nil { + return nil + } + + warmMap := warmList[0].(map[string]any) + + input := &dynamodb.UpdateTableInput{ + TableName: aws.String(tableName), + WarmThroughput: expandWarmThroughput(warmMap), + } + + if _, err := conn.UpdateTable(ctx, input); err != nil { + return err + } + + if _, err := waitTableActive(ctx, conn, tableName, timeout); err != nil { + return fmt.Errorf("waiting for warm throughput: %w", err) + } + + if err := waitTableWarmThroughputActive(ctx, conn, tableName, timeout); err != nil { + return fmt.Errorf("waiting for warm throughput: %w", err) + } + + return nil +} + func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]awstypes.GlobalSecondaryIndexUpdate, error) { // Transform slices into maps oldGsis := make(map[string]any) @@ -1830,6 +1984,10 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw c.OnDemandThroughput = expandOnDemandThroughput(v[0].(map[string]any)) } + if v, ok := m["warm_throughput"].([]any); ok && len(v) > 0 && v[0] != nil { + c.WarmThroughput = expandWarmThroughput(v[0].(map[string]any)) + } + ops = append(ops, awstypes.GlobalSecondaryIndexUpdate{ Create: &c, }) @@ -1863,6 +2021,27 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw onDemandThroughputChanged = true } + var oldWarmThroughput *awstypes.WarmThroughput + var newWarmThroughput *awstypes.WarmThroughput + if v, ok := oldMap["warm_throughput"].([]any); ok && len(v) > 0 && v[0] != nil { + oldWarmThroughput = expandWarmThroughput(v[0].(map[string]any)) + } + + if v, ok := newMap["warm_throughput"].([]any); ok && len(v) > 0 && v[0] != nil { + newWarmThroughput = expandWarmThroughput(v[0].(map[string]any)) + } + + var warmThroughputChanged bool + if !reflect.DeepEqual(oldWarmThroughput, newWarmThroughput) { + warmThroughputChanged = true + } + + var warmThroughPutDecreased bool + if warmThroughputChanged && newWarmThroughput != nil && oldWarmThroughput != nil { + warmThroughPutDecreased = (aws.ToInt64(newWarmThroughput.ReadUnitsPerSecond) < aws.ToInt64(oldWarmThroughput.ReadUnitsPerSecond) || + aws.ToInt64(newWarmThroughput.WriteUnitsPerSecond) < aws.ToInt64(oldWarmThroughput.WriteUnitsPerSecond)) + } + // pluck non_key_attributes from oldAttributes and newAttributes as reflect.DeepEquals will compare // ordinal of elements in its equality (which we actually don't care about) nonKeyAttributesChanged := checkIfNonKeyAttributesChanged(oldMap, newMap) @@ -1879,6 +2058,10 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw if err != nil { return ops, err } + oldAttributes, err = stripWarmThroughputAttributes(oldAttributes) + if err != nil { + return ops, err + } newAttributes, err := stripCapacityAttributes(newMap) if err != nil { return ops, err @@ -1891,9 +2074,14 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw if err != nil { return ops, err } - otherAttributesChanged := nonKeyAttributesChanged || !reflect.DeepEqual(oldAttributes, newAttributes) + newAttributes, err = stripWarmThroughputAttributes(newAttributes) + if err != nil { + return ops, err + } + gsiNeedsRecreate := nonKeyAttributesChanged || !reflect.DeepEqual(oldAttributes, newAttributes) || warmThroughPutDecreased - if capacityChanged && !otherAttributesChanged && billingMode == awstypes.BillingModeProvisioned { + // One step in most cases, an extra step in case of warmThroughputChanged without recreation necessity: + if (capacityChanged) && !gsiNeedsRecreate && billingMode == awstypes.BillingModeProvisioned { update := awstypes.GlobalSecondaryIndexUpdate{ Update: &awstypes.UpdateGlobalSecondaryIndexAction{ IndexName: aws.String(idxName), @@ -1901,7 +2089,7 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw }, } ops = append(ops, update) - } else if onDemandThroughputChanged && !otherAttributesChanged && billingMode == awstypes.BillingModePayPerRequest { + } else if onDemandThroughputChanged && !gsiNeedsRecreate && billingMode == awstypes.BillingModePayPerRequest { update := awstypes.GlobalSecondaryIndexUpdate{ Update: &awstypes.UpdateGlobalSecondaryIndexAction{ IndexName: aws.String(idxName), @@ -1909,7 +2097,7 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw }, } ops = append(ops, update) - } else if otherAttributesChanged { + } else if gsiNeedsRecreate { // Other attributes cannot be updated ops = append(ops, awstypes.GlobalSecondaryIndexUpdate{ Delete: &awstypes.DeleteGlobalSecondaryIndexAction{ @@ -1923,9 +2111,20 @@ func updateDiffGSI(oldGsi, newGsi []any, billingMode awstypes.BillingMode) ([]aw KeySchema: expandKeySchema(newMap), ProvisionedThroughput: expandProvisionedThroughput(newMap, billingMode), Projection: expandProjection(newMap), + WarmThroughput: newWarmThroughput, }, }) } + // Separating the WarmThroughput updates from the others + if !gsiNeedsRecreate && warmThroughputChanged { + update := awstypes.GlobalSecondaryIndexUpdate{ + Update: &awstypes.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String(idxName), + WarmThroughput: newWarmThroughput, + }, + } + ops = append(ops, update) + } } else { idxName := oldName ops = append(ops, awstypes.GlobalSecondaryIndexUpdate{ @@ -1943,7 +2142,7 @@ func deleteTable(ctx context.Context, conn *dynamodb.Client, tableName string) e TableName: aws.String(tableName), } - _, err := tfresource.RetryWhen(ctx, deleteTableTimeout, func() (any, error) { + _, err := tfresource.RetryWhen(ctx, deleteTableTimeout, func(ctx context.Context) (any, error) { return conn.DeleteTable(ctx, input) }, func(err error) (bool, error) { // Subscriber limit exceeded: Only 10 tables can be created, updated, or deleted simultaneously @@ -2004,36 +2203,32 @@ func deleteReplicas(ctx context.Context, conn *dynamodb.Client, tableName string TableName: aws.String(tableName), ReplicaUpdates: replicaDeletes, } - err := retry.RetryContext(ctx, updateTableTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, updateTableTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input) notFoundRetries := 0 if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceNotFoundException](err) { notFoundRetries++ if notFoundRetries > 3 { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "can be created, updated, or deleted simultaneously") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input) - } - if err != nil && !errs.IsA[*awstypes.ResourceNotFoundException](err) { return fmt.Errorf("deleting replica(s): %w", err) } @@ -2079,36 +2274,32 @@ func deleteReplicas(ctx context.Context, conn *dynamodb.Client, tableName string }, } - err := retry.RetryContext(ctx, updateTableTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, updateTableTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input) notFoundRetries := 0 if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceNotFoundException](err) { notFoundRetries++ if notFoundRetries > 3 { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "can be created, updated, or deleted simultaneously") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input) - } - if err != nil && !errs.IsA[*awstypes.ResourceNotFoundException](err) { return fmt.Errorf("deleting replica (%s): %w", regionName, err) } @@ -2197,7 +2388,7 @@ func enrichReplicas(ctx context.Context, conn *dynamodb.Client, arn, tableName s newARN, err := arnForNewRegion(arn, tfMap["region_name"].(string)) if err != nil { - return nil, fmt.Errorf("creating new-region ARN: %s", err) + return nil, fmt.Errorf("creating new-region ARN: %w", err) } tfMap[names.AttrARN] = newARN @@ -2210,12 +2401,12 @@ func enrichReplicas(ctx context.Context, conn *dynamodb.Client, arn, tableName s continue } - tfMap[names.AttrStreamARN] = aws.ToString(table.LatestStreamArn) - tfMap["stream_label"] = aws.ToString(table.LatestStreamLabel) - + tfMap["deletion_protection_enabled"] = aws.ToBool(table.DeletionProtectionEnabled) if table.SSEDescription != nil { tfMap[names.AttrKMSKeyARN] = aws.ToString(table.SSEDescription.KMSMasterKeyArn) } + tfMap[names.AttrStreamARN] = aws.ToString(table.LatestStreamArn) + tfMap["stream_label"] = aws.ToString(table.LatestStreamLabel) tfList[i] = tfMap } @@ -2394,6 +2585,10 @@ func flattenTableGlobalSecondaryIndex(gsi []awstypes.GlobalSecondaryIndexDescrip gsi["on_demand_throughput"] = flattenOnDemandThroughput(g.OnDemandThroughput) } + if g.WarmThroughput != nil { + gsi["warm_throughput"] = flattenGSIWarmThroughput(g.WarmThroughput) + } + output = append(output, gsi) } @@ -2443,6 +2638,62 @@ func flattenOnDemandThroughput(apiObject *awstypes.OnDemandThroughput) []any { return []any{m} } +func flattenTableWarmThroughput(apiObject *awstypes.TableWarmThroughputDescription) []any { + if apiObject == nil { + return []any{} + } + + // AWS may return values below the minimum when warm throughput is not actually configured + // Also treat exact minimum values as defaults since AWS sets these automatically + readUnits := aws.ToInt64(apiObject.ReadUnitsPerSecond) + writeUnits := aws.ToInt64(apiObject.WriteUnitsPerSecond) + + // Return empty if values are below minimums OR exactly at minimums (AWS defaults) + if (readUnits < 12000 && writeUnits < 4000) || (readUnits == 12000 && writeUnits == 4000) { + return []any{} + } + + m := map[string]any{} + + if v := apiObject.ReadUnitsPerSecond; v != nil { + m["read_units_per_second"] = aws.ToInt64(v) + } + + if v := apiObject.WriteUnitsPerSecond; v != nil { + m["write_units_per_second"] = aws.ToInt64(v) + } + + return []any{m} +} + +func flattenGSIWarmThroughput(apiObject *awstypes.GlobalSecondaryIndexWarmThroughputDescription) []any { + if apiObject == nil { + return []any{} + } + + // AWS may return values below the minimum when warm throughput is not actually configured + // Also treat exact minimum values as defaults since AWS sets these automatically + readUnits := aws.ToInt64(apiObject.ReadUnitsPerSecond) + writeUnits := aws.ToInt64(apiObject.WriteUnitsPerSecond) + + // Return empty if values are below minimums OR exactly at minimums (AWS defaults) + if (readUnits < 12000 && writeUnits < 4000) || (readUnits == 12000 && writeUnits == 4000) { + return []any{} + } + + m := map[string]any{} + + if v := apiObject.ReadUnitsPerSecond; v != nil { + m["read_units_per_second"] = aws.ToInt64(v) + } + + if v := apiObject.WriteUnitsPerSecond; v != nil { + m["write_units_per_second"] = aws.ToInt64(v) + } + + return []any{m} +} + func flattenReplicaDescription(apiObject *awstypes.ReplicaDescription) map[string]any { if apiObject == nil { return nil @@ -2572,6 +2823,10 @@ func expandGlobalSecondaryIndex(data map[string]any, billingMode awstypes.Billin output.OnDemandThroughput = expandOnDemandThroughput(v[0].(map[string]any)) } + if v, ok := data["warm_throughput"].([]any); ok && len(v) > 0 && v[0] != nil { + output.WarmThroughput = expandWarmThroughput(v[0].(map[string]any)) + } + return &output } @@ -2699,6 +2954,24 @@ func expandOnDemandThroughput(tfMap map[string]any) *awstypes.OnDemandThroughput return apiObject } +func expandWarmThroughput(tfMap map[string]any) *awstypes.WarmThroughput { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.WarmThroughput{} + + if v, ok := tfMap["read_units_per_second"].(int); ok && v != 0 { + apiObject.ReadUnitsPerSecond = aws.Int64(int64(v)) + } + + if v, ok := tfMap["write_units_per_second"].(int); ok && v != 0 { + apiObject.WriteUnitsPerSecond = aws.Int64(int64(v)) + } + + return apiObject +} + func expandS3BucketSource(data map[string]any) *awstypes.S3BucketSource { if data == nil { return nil @@ -2830,6 +3103,65 @@ func validateProvisionedThroughputField(diff *schema.ResourceDiff, key string) e return nil } +func validateWarmThroughputCustomDiff(ctx context.Context, d *schema.ResourceDiff, meta any) error { + configRaw := d.GetRawConfig() + if !configRaw.IsKnown() || configRaw.IsNull() { + return nil + } + + // Handle table-level warm throughput suppression + if err := suppressTableWarmThroughputDefaults(d, configRaw); err != nil { + return err + } + + // Handle GSI warm throughput suppression + if err := suppressGSIWarmThroughputDefaults(d, configRaw); err != nil { + return err + } + + return nil +} + +func suppressTableWarmThroughputDefaults(d *schema.ResourceDiff, configRaw cty.Value) error { + // If warm throughput is explicitly configured, don't suppress any diffs + if warmThroughput := configRaw.GetAttr("warm_throughput"); warmThroughput.IsKnown() && !warmThroughput.IsNull() && warmThroughput.LengthInt() > 0 { + return nil + } + + // If warm throughput is not explicitly configured, suppress AWS default values + if !d.HasChange("warm_throughput") { + return nil + } + + _, new := d.GetChange("warm_throughput") + newList, ok := new.([]any) + if !ok || len(newList) == 0 { + return nil + } + + newMap, ok := newList[0].(map[string]any) + if !ok { + return nil + } + + readUnits := newMap["read_units_per_second"] + writeUnits := newMap["write_units_per_second"] + + // If AWS returns default values and no explicit config, suppress the diff + if (readUnits == 1 && writeUnits == 1) || (readUnits == 12000 && writeUnits == 4000) { + return d.Clear("warm_throughput") + } + + return nil +} + +func suppressGSIWarmThroughputDefaults(d *schema.ResourceDiff, configRaw cty.Value) error { + // GSI warm throughput defaults are now handled in the flattenGSIWarmThroughput function + // by filtering out AWS default values during the read operation. + // This approach is more reliable than trying to suppress diffs on Set-based fields. + return nil +} + func validateTTLCustomDiff(ctx context.Context, d *schema.ResourceDiff, meta any) error { var diags diag.Diagnostics diff --git a/internal/service/dynamodb/table_data_source.go b/internal/service/dynamodb/table_data_source.go index d84dedfd0b65..628116673856 100644 --- a/internal/service/dynamodb/table_data_source.go +++ b/internal/service/dynamodb/table_data_source.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,236 +24,241 @@ import ( func dataSourceTable() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceTableRead, - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "attribute": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrName: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrType: { - Type: schema.TypeString, - Computed: true, + + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "attribute": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrType: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "billing_mode": { - Type: schema.TypeString, - Computed: true, - }, - "deletion_protection_enabled": { - Type: schema.TypeBool, - Computed: true, - }, - "global_secondary_index": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hash_key": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, - }, - "non_key_attributes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "on_demand_throughput": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_read_request_units": { - Type: schema.TypeInt, - Computed: true, - }, - "max_write_request_units": { - Type: schema.TypeInt, - Computed: true, + "billing_mode": { + Type: schema.TypeString, + Computed: true, + }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "global_secondary_index": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hash_key": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "on_demand_throughput": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_read_request_units": { + Type: schema.TypeInt, + Computed: true, + }, + "max_write_request_units": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "projection_type": { - Type: schema.TypeString, - Computed: true, - }, - "range_key": { - Type: schema.TypeString, - Computed: true, - }, - "read_capacity": { - Type: schema.TypeInt, - Computed: true, - }, - "write_capacity": { - Type: schema.TypeInt, - Computed: true, + "projection_type": { + Type: schema.TypeString, + Computed: true, + }, + "range_key": { + Type: schema.TypeString, + Computed: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + "warm_throughput": sdkv2.ComputedOnlyFromSchema(warmThroughputSchema()), + "write_capacity": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "hash_key": { - Type: schema.TypeString, - Computed: true, - }, - "local_secondary_index": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrName: { - Type: schema.TypeString, - Computed: true, - }, - "non_key_attributes": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "projection_type": { - Type: schema.TypeString, - Computed: true, - }, - "range_key": { - Type: schema.TypeString, - Computed: true, + "hash_key": { + Type: schema.TypeString, + Computed: true, + }, + "local_secondary_index": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "projection_type": { + Type: schema.TypeString, + Computed: true, + }, + "range_key": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrName: { - Type: schema.TypeString, - Required: true, - }, - "on_demand_throughput": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_read_request_units": { - Type: schema.TypeInt, - Computed: true, - }, - "max_write_request_units": { - Type: schema.TypeInt, - Computed: true, + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, + "on_demand_throughput": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_read_request_units": { + Type: schema.TypeInt, + Computed: true, + }, + "max_write_request_units": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "point_in_time_recovery": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEnabled: { - Type: schema.TypeBool, - Computed: true, - }, - "recovery_period_in_days": { - Type: schema.TypeInt, - Computed: true, + "point_in_time_recovery": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Computed: true, + }, + "recovery_period_in_days": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "range_key": { - Type: schema.TypeString, - Computed: true, - }, - "read_capacity": { - Type: schema.TypeInt, - Computed: true, - }, - "replica": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKMSKeyARN: { - Type: schema.TypeString, - Computed: true, - }, - "region_name": { - Type: schema.TypeString, - Computed: true, + "range_key": { + Type: schema.TypeString, + Computed: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + "replica": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKMSKeyARN: { + Type: schema.TypeString, + Computed: true, + }, + "region_name": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "server_side_encryption": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEnabled: { - Type: schema.TypeBool, - Computed: true, - }, - names.AttrKMSKeyARN: { - Type: schema.TypeString, - Computed: true, + "server_side_encryption": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Computed: true, + }, + names.AttrKMSKeyARN: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrStreamARN: { - Type: schema.TypeString, - Computed: true, - }, - "stream_enabled": { - Type: schema.TypeBool, - Computed: true, - }, - "stream_label": { - Type: schema.TypeString, - Computed: true, - }, - "stream_view_type": { - Type: schema.TypeString, - Computed: true, - }, - "table_class": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrTags: tftags.TagsSchemaComputed(), - "ttl": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attribute_name": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrEnabled: { - Type: schema.TypeBool, - Computed: true, + names.AttrStreamARN: { + Type: schema.TypeString, + Computed: true, + }, + "stream_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "stream_label": { + Type: schema.TypeString, + Computed: true, + }, + "stream_view_type": { + Type: schema.TypeString, + Computed: true, + }, + "table_class": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchemaComputed(), + "ttl": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_name": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrEnabled: { + Type: schema.TypeBool, + Computed: true, + }, }, }, }, - }, - "write_capacity": { - Type: schema.TypeInt, - Computed: true, - }, + "warm_throughput": sdkv2.ComputedOnlyFromSchema(warmThroughputSchema()), + "write_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + } }, } } @@ -334,11 +340,14 @@ func dataSourceTableRead(ctx context.Context, d *schema.ResourceData, meta any) d.Set("table_class", awstypes.TableClassStandard) } + if err := d.Set("warm_throughput", flattenTableWarmThroughput(table.WarmThroughput)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting warm_throughput: %s", err) + } + describeBackupsInput := dynamodb.DescribeContinuousBackupsInput{ TableName: aws.String(d.Id()), } pitrOut, err := conn.DescribeContinuousBackups(ctx, &describeBackupsInput) - // When a Table is `ARCHIVED`, DescribeContinuousBackups returns `TableNotFoundException` if err != nil && !tfawserr.ErrCodeEquals(err, errCodeUnknownOperationException, errCodeTableNotFoundException) { return sdkdiag.AppendErrorf(diags, "reading DynamoDB Table (%s) Continuous Backups: %s", d.Id(), err) diff --git a/internal/service/dynamodb/table_data_source_tags_gen_test.go b/internal/service/dynamodb/table_data_source_tags_gen_test.go index db6905fc9d35..9a281010fc9e 100644 --- a/internal/service/dynamodb/table_data_source_tags_gen_test.go +++ b/internal/service/dynamodb/table_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccDynamoDBTableDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccDynamoDBTableDataSource_tags(t *testing.T) { func TestAccDynamoDBTableDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccDynamoDBTableDataSource_tags_NullMap(t *testing.T) { func TestAccDynamoDBTableDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccDynamoDBTableDataSource_tags_EmptyMap(t *testing.T) { func TestAccDynamoDBTableDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccDynamoDBTableDataSource_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccDynamoDBTableDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccDynamoDBTableDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccDynamoDBTableDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/dynamodb/table_data_source_test.go b/internal/service/dynamodb/table_data_source_test.go index 4034812a055d..6bb9187e55aa 100644 --- a/internal/service/dynamodb/table_data_source_test.go +++ b/internal/service/dynamodb/table_data_source_test.go @@ -44,6 +44,12 @@ func TestAccDynamoDBTableDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "point_in_time_recovery.0.enabled", resourceName, "point_in_time_recovery.0.enabled"), resource.TestCheckResourceAttrPair(datasourceName, "point_in_time_recovery.0.recovery_period_in_days", resourceName, "point_in_time_recovery.0.recovery_period_in_days"), resource.TestCheckResourceAttrPair(datasourceName, "table_class", resourceName, "table_class"), + resource.TestCheckResourceAttr(datasourceName, "warm_throughput.0.read_units_per_second", "12100"), + resource.TestCheckResourceAttr(datasourceName, "warm_throughput.0.write_units_per_second", "4100"), + resource.TestCheckTypeSetElemNestedAttrs(datasourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12200", + "warm_throughput.0.write_units_per_second": "4200", + }), ), }, }, @@ -158,6 +164,16 @@ resource "aws_dynamodb_table" "test" { read_capacity = 10 projection_type = "INCLUDE" non_key_attributes = ["UserId"] + + warm_throughput { + read_units_per_second = 12200 + write_units_per_second = 4200 + } + } + + warm_throughput { + read_units_per_second = 12100 + write_units_per_second = 4100 } tags = { diff --git a/internal/service/dynamodb/table_export_identity_gen_test.go b/internal/service/dynamodb/table_export_identity_gen_test.go index f65fbe7ee728..5033ab13b3a6 100644 --- a/internal/service/dynamodb/table_export_identity_gen_test.go +++ b/internal/service/dynamodb/table_export_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccDynamoDBTableExport_Identity_Basic(t *testing.T) { resourceName := "aws_dynamodb_table_export.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccDynamoDBTableExport_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccDynamoDBTableExport_Identity_RegionOverride(t *testing.T) { resourceName := "aws_dynamodb_table_export.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccDynamoDBTableExport_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,131 @@ func TestAccDynamoDBTableExport_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccDynamoDBTableExport_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ExportDescription + resourceName := "aws_dynamodb_table_export.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TableExport/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExportExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/TableExport/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExportExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TableExport/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccDynamoDBTableExport_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ExportDescription + resourceName := "aws_dynamodb_table_export.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TableExport/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExportExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TableExport/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableExportExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/dynamodb/table_export_test.go b/internal/service/dynamodb/table_export_test.go index 219a26c29505..182607aba837 100644 --- a/internal/service/dynamodb/table_export_test.go +++ b/internal/service/dynamodb/table_export_test.go @@ -14,13 +14,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdynamodb "github.com/hashicorp/terraform-provider-aws/internal/service/dynamodb" "github.com/hashicorp/terraform-provider-aws/names" @@ -262,85 +257,6 @@ func TestAccDynamoDBTableExport_incrementalExport(t *testing.T) { }) } -func TestAccDynamoDBTableExport_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var tableExport awstypes.ExportDescription - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_dynamodb_table_export.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccTableExportConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableExportExists(ctx, resourceName, &tableExport), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccTableExportConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableExportExists(ctx, resourceName, &tableExport), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccTableExportConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableExportExists(ctx, resourceName, &tableExport), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("dynamodb", regexache.MustCompile(`table/.+/export/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckTableExportExists(ctx context.Context, n string, v *awstypes.ExportDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/dynamodb/table_item.go b/internal/service/dynamodb/table_item.go index dc6fbe90c2ff..e7d883cdcc30 100644 --- a/internal/service/dynamodb/table_item.go +++ b/internal/service/dynamodb/table_item.go @@ -63,7 +63,7 @@ func resourceTableItem() *schema.Resource { func validateTableItem(v any, k string) (ws []string, errors []error) { _, err := expandTableItemAttributes(v.(string)) if err != nil { - errors = append(errors, fmt.Errorf("Invalid format of %q: %s", k, err)) + errors = append(errors, fmt.Errorf("Invalid format of %q: %w", k, err)) } return } diff --git a/internal/service/dynamodb/table_replica.go b/internal/service/dynamodb/table_replica.go index 05ff1460aa2d..a14bb59cffef 100644 --- a/internal/service/dynamodb/table_replica.go +++ b/internal/service/dynamodb/table_replica.go @@ -17,7 +17,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -142,28 +141,24 @@ func resourceTableReplicaCreate(ctx context.Context, d *schema.ResourceData, met }}, } - err = retry.RetryContext(ctx, max(replicaUpdateTimeout, d.Timeout(schema.TimeoutCreate)), func() *retry.RetryError { + err = tfresource.Retry(ctx, max(replicaUpdateTimeout, d.Timeout(schema.TimeoutCreate)), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input, optFn) if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "simultaneously") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input, optFn) - } - if err != nil { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, resNameTableReplica, d.Get("global_table_arn").(string), err) } @@ -358,28 +353,24 @@ func resourceTableReplicaUpdate(ctx context.Context, d *schema.ResourceData, met TableName: aws.String(tableName), } - err := retry.RetryContext(ctx, max(replicaUpdateTimeout, d.Timeout(schema.TimeoutUpdate)), func() *retry.RetryError { + err := tfresource.Retry(ctx, max(replicaUpdateTimeout, d.Timeout(schema.TimeoutUpdate)), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input, optFn) if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "can be created, updated, or deleted simultaneously") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input, optFn) - } - if err != nil && !tfawserr.ErrMessageContains(err, errCodeValidationException, "no actions specified") { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionUpdating, resNameTableReplica, d.Id(), err) } @@ -450,28 +441,24 @@ func resourceTableReplicaDelete(ctx context.Context, d *schema.ResourceData, met }, } - err = retry.RetryContext(ctx, updateTableTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, updateTableTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTable(ctx, input, optFn) if err != nil { if tfawserr.ErrCodeEquals(err, errCodeThrottlingException) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "can be created, updated, or deleted simultaneously") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTable(ctx, input, optFn) - } - if tfawserr.ErrMessageContains(err, errCodeValidationException, "Replica specified in the Replica Update or Replica Delete action of the request was not found") { return diags } diff --git a/internal/service/dynamodb/table_replica_tags_gen_test.go b/internal/service/dynamodb/table_replica_tags_gen_test.go index 3957adacea59..174a8b529fdc 100644 --- a/internal/service/dynamodb/table_replica_tags_gen_test.go +++ b/internal/service/dynamodb/table_replica_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccDynamoDBTableReplica_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -214,10 +214,11 @@ func TestAccDynamoDBTableReplica_tags(t *testing.T) { func TestAccDynamoDBTableReplica_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -285,10 +286,11 @@ func TestAccDynamoDBTableReplica_tags_null(t *testing.T) { func TestAccDynamoDBTableReplica_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -352,10 +354,11 @@ func TestAccDynamoDBTableReplica_tags_EmptyMap(t *testing.T) { func TestAccDynamoDBTableReplica_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -437,10 +440,11 @@ func TestAccDynamoDBTableReplica_tags_AddOnUpdate(t *testing.T) { func TestAccDynamoDBTableReplica_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -532,10 +536,11 @@ func TestAccDynamoDBTableReplica_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccDynamoDBTableReplica_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -677,10 +682,11 @@ func TestAccDynamoDBTableReplica_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccDynamoDBTableReplica_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -770,10 +776,11 @@ func TestAccDynamoDBTableReplica_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccDynamoDBTableReplica_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -958,10 +965,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccDynamoDBTableReplica_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1123,10 +1131,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccDynamoDBTableReplica_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1304,10 +1313,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_overlapping(t *testing.T) { func TestAccDynamoDBTableReplica_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1396,10 +1406,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_updateToProviderOnly(t *testin func TestAccDynamoDBTableReplica_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1487,10 +1498,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_updateToResourceOnly(t *testin func TestAccDynamoDBTableReplica_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1553,10 +1565,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccDynamoDBTableReplica_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1611,10 +1624,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_emptyProviderOnlyTag(t *testin func TestAccDynamoDBTableReplica_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1674,10 +1688,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_nullOverlappingResourceTag(t * func TestAccDynamoDBTableReplica_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1737,10 +1752,11 @@ func TestAccDynamoDBTableReplica_tags_DefaultTags_nullNonOverlappingResourceTag( func TestAccDynamoDBTableReplica_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1793,10 +1809,11 @@ func TestAccDynamoDBTableReplica_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccDynamoDBTableReplica_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1892,10 +1909,11 @@ func TestAccDynamoDBTableReplica_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccDynamoDBTableReplica_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -1981,10 +1999,11 @@ func TestAccDynamoDBTableReplica_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccDynamoDBTableReplica_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), @@ -2145,10 +2164,11 @@ func TestAccDynamoDBTableReplica_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccDynamoDBTableReplica_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_dynamodb_table_replica.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableReplicaDestroy(ctx), diff --git a/internal/service/dynamodb/table_tags_gen_test.go b/internal/service/dynamodb/table_tags_gen_test.go index 532763c208cc..df502c918323 100644 --- a/internal/service/dynamodb/table_tags_gen_test.go +++ b/internal/service/dynamodb/table_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccDynamoDBTable_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccDynamoDBTable_tags(t *testing.T) { func TestAccDynamoDBTable_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccDynamoDBTable_tags_null(t *testing.T) { func TestAccDynamoDBTable_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccDynamoDBTable_tags_EmptyMap(t *testing.T) { func TestAccDynamoDBTable_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccDynamoDBTable_tags_AddOnUpdate(t *testing.T) { func TestAccDynamoDBTable_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccDynamoDBTable_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccDynamoDBTable_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccDynamoDBTable_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccDynamoDBTable_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccDynamoDBTable_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_overlapping(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccDynamoDBTable_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_nullOverlappingResourceTag(t *testing func TestAccDynamoDBTable_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccDynamoDBTable_tags_DefaultTags_nullNonOverlappingResourceTag(t *test func TestAccDynamoDBTable_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccDynamoDBTable_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccDynamoDBTable_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccDynamoDBTable_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccDynamoDBTable_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccDynamoDBTable_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccDynamoDBTable_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccDynamoDBTable_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccDynamoDBTable_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TableDescription resourceName := "aws_dynamodb_table.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), CheckDestroy: testAccCheckTableDestroy(ctx), diff --git a/internal/service/dynamodb/table_test.go b/internal/service/dynamodb/table_test.go index 4ceba065d01a..e4750e90f261 100644 --- a/internal/service/dynamodb/table_test.go +++ b/internal/service/dynamodb/table_test.go @@ -151,6 +151,74 @@ func TestUpdateDiffGSI(t *testing.T) { }, }, + { // Creation with warm throughput + Old: []any{ + map[string]any{ + names.AttrName: "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 10, + "write_units_per_second": 10, + }}, + }, + }, + New: []any{ + map[string]any{ + names.AttrName: "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 10, + "write_units_per_second": 10, + }}, + }, + map[string]any{ + names.AttrName: "att2-index", + "hash_key": "att2", + "write_capacity": 12, + "read_capacity": 11, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 10, + "write_units_per_second": 10, + }, + }, + }, + }, + ExpectedUpdates: []awstypes.GlobalSecondaryIndexUpdate{ + { + Create: &awstypes.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("att2-index"), + KeySchema: []awstypes.KeySchemaElement{ + { + AttributeName: aws.String("att2"), + KeyType: awstypes.KeyTypeHash, + }, + }, + ProvisionedThroughput: &awstypes.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(12), + ReadCapacityUnits: aws.Int64(11), + }, + WarmThroughput: &awstypes.WarmThroughput{ + ReadUnitsPerSecond: aws.Int64(10), + WriteUnitsPerSecond: aws.Int64(10), + }, + Projection: &awstypes.Projection{ + ProjectionType: awstypes.ProjectionTypeAll, + }, + }, + }, + }, + }, + { // Deletion Old: []any{ map[string]any{ @@ -218,6 +286,165 @@ func TestUpdateDiffGSI(t *testing.T) { }, }, + { // Update warm throughput 1: update in place + Old: []any{ + map[string]any{ + names.AttrName: "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 10, + "write_units_per_second": 10, + }, + }, + }, + }, + New: []any{ + map[string]any{ + names.AttrName: "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 11, + "write_units_per_second": 12, + }, + }, + }, + }, + ExpectedUpdates: []awstypes.GlobalSecondaryIndexUpdate{ + { + Update: &awstypes.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + WarmThroughput: &awstypes.WarmThroughput{ + ReadUnitsPerSecond: aws.Int64(11), + WriteUnitsPerSecond: aws.Int64(12), + }, + }, + }, + }, + }, + + { // Update warm throughput 2: update via recreate + Old: []any{ + map[string]any{ + names.AttrName: "att2-index", + "hash_key": "att2", + "write_capacity": 12, + "read_capacity": 11, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 15000, + "write_units_per_second": 5000, + }, + }, + }, + }, + New: []any{ + map[string]any{ + names.AttrName: "att2-index", + "hash_key": "att2", + "write_capacity": 12, + "read_capacity": 11, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 14000, + "write_units_per_second": 5000, + }, + }, + }, + }, + ExpectedUpdates: []awstypes.GlobalSecondaryIndexUpdate{ + { + Delete: &awstypes.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("att2-index"), + }, + }, + { + Create: &awstypes.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("att2-index"), + KeySchema: []awstypes.KeySchemaElement{ + { + AttributeName: aws.String("att2"), + KeyType: awstypes.KeyTypeHash, + }, + }, + ProvisionedThroughput: &awstypes.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(12), + ReadCapacityUnits: aws.Int64(11), + }, + WarmThroughput: &awstypes.WarmThroughput{ + ReadUnitsPerSecond: aws.Int64(14000), + WriteUnitsPerSecond: aws.Int64(5000), + }, + Projection: &awstypes.Projection{ + ProjectionType: awstypes.ProjectionTypeAll, + }, + }, + }, + }, + }, + + { // Update warm throughput 3: update in place at the same moment as capacity + Old: []any{ + map[string]any{ + names.AttrName: "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 10, + "write_units_per_second": 10, + }, + }, + }, + }, + New: []any{ + map[string]any{ + names.AttrName: "att1-index", + "hash_key": "att1", + "write_capacity": 11, + "read_capacity": 12, + "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 11, + "write_units_per_second": 12, + }, + }, + }, + }, + ExpectedUpdates: []awstypes.GlobalSecondaryIndexUpdate{ + { + Update: &awstypes.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + ProvisionedThroughput: &awstypes.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(12), + WriteCapacityUnits: aws.Int64(11), + }, + }, + }, + { + Update: &awstypes.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + WarmThroughput: &awstypes.WarmThroughput{ + ReadUnitsPerSecond: aws.Int64(11), + WriteUnitsPerSecond: aws.Int64(12), + }, + }, + }, + }, + }, + { // Update of non-capacity attributes Old: []any{ map[string]any{ @@ -279,6 +506,12 @@ func TestUpdateDiffGSI(t *testing.T) { "write_capacity": 10, "read_capacity": 10, "projection_type": "ALL", + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 10, + "write_units_per_second": 10, + }, + }, }, }, New: []any{ @@ -290,6 +523,12 @@ func TestUpdateDiffGSI(t *testing.T) { "read_capacity": 12, "projection_type": "INCLUDE", "non_key_attributes": schema.NewSet(schema.HashString, []any{"RandomAttribute"}), + "warm_throughput": []any{ + map[string]any{ + "read_units_per_second": 22, + "write_units_per_second": 33, + }, + }, }, }, ExpectedUpdates: []awstypes.GlobalSecondaryIndexUpdate{ @@ -319,6 +558,10 @@ func TestUpdateDiffGSI(t *testing.T) { ProjectionType: awstypes.ProjectionTypeInclude, NonKeyAttributes: []string{"RandomAttribute"}, }, + WarmThroughput: &awstypes.WarmThroughput{ + ReadUnitsPerSecond: aws.Int64(22), + WriteUnitsPerSecond: aws.Int64(33), + }, }, }, }, @@ -2034,24 +2277,26 @@ func TestAccDynamoDBTable_Replica_multiple(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("replica"), knownvalue.SetExact([]knownvalue.Check{ knownvalue.ObjectExact(map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), - "consistency_mode": knownvalue.StringExact("EVENTUAL"), - names.AttrKMSKeyARN: knownvalue.StringExact(""), - "point_in_time_recovery": knownvalue.Bool(false), - names.AttrPropagateTags: knownvalue.Bool(false), - "region_name": knownvalue.StringExact(acctest.AlternateRegion()), - names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), - "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), + "consistency_mode": knownvalue.StringExact("EVENTUAL"), + "deletion_protection_enabled": knownvalue.Bool(false), + names.AttrKMSKeyARN: knownvalue.StringExact(""), + "point_in_time_recovery": knownvalue.Bool(false), + names.AttrPropagateTags: knownvalue.Bool(false), + "region_name": knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), + "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), }), knownvalue.ObjectExact(map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNThirdRegionExact("dynamodb", "table/"+rName), - "consistency_mode": knownvalue.StringExact("EVENTUAL"), - names.AttrKMSKeyARN: knownvalue.StringExact(""), - "point_in_time_recovery": knownvalue.Bool(false), - names.AttrPropagateTags: knownvalue.Bool(false), - "region_name": knownvalue.StringExact(acctest.ThirdRegion()), - names.AttrStreamARN: tfknownvalue.RegionalARNThirdRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), - "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), + names.AttrARN: tfknownvalue.RegionalARNThirdRegionExact("dynamodb", "table/"+rName), + "consistency_mode": knownvalue.StringExact("EVENTUAL"), + "deletion_protection_enabled": knownvalue.Bool(false), + names.AttrKMSKeyARN: knownvalue.StringExact(""), + "point_in_time_recovery": knownvalue.Bool(false), + names.AttrPropagateTags: knownvalue.Bool(false), + "region_name": knownvalue.StringExact(acctest.ThirdRegion()), + names.AttrStreamARN: tfknownvalue.RegionalARNThirdRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), + "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), }), })), }, @@ -2079,24 +2324,26 @@ func TestAccDynamoDBTable_Replica_multiple(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("replica"), knownvalue.SetExact([]knownvalue.Check{ knownvalue.ObjectExact(map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), - "consistency_mode": knownvalue.StringExact("EVENTUAL"), - names.AttrKMSKeyARN: knownvalue.StringExact(""), - "point_in_time_recovery": knownvalue.Bool(false), - names.AttrPropagateTags: knownvalue.Bool(false), - "region_name": knownvalue.StringExact(acctest.AlternateRegion()), - names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), - "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), + "consistency_mode": knownvalue.StringExact("EVENTUAL"), + "deletion_protection_enabled": knownvalue.Bool(false), + names.AttrKMSKeyARN: knownvalue.StringExact(""), + "point_in_time_recovery": knownvalue.Bool(false), + names.AttrPropagateTags: knownvalue.Bool(false), + "region_name": knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), + "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), }), knownvalue.ObjectExact(map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNThirdRegionExact("dynamodb", "table/"+rName), - "consistency_mode": knownvalue.StringExact("EVENTUAL"), - names.AttrKMSKeyARN: knownvalue.StringExact(""), - "point_in_time_recovery": knownvalue.Bool(false), - names.AttrPropagateTags: knownvalue.Bool(false), - "region_name": knownvalue.StringExact(acctest.ThirdRegion()), - names.AttrStreamARN: tfknownvalue.RegionalARNThirdRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), - "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), + names.AttrARN: tfknownvalue.RegionalARNThirdRegionExact("dynamodb", "table/"+rName), + "consistency_mode": knownvalue.StringExact("EVENTUAL"), + "deletion_protection_enabled": knownvalue.Bool(false), + names.AttrKMSKeyARN: knownvalue.StringExact(""), + "point_in_time_recovery": knownvalue.Bool(false), + names.AttrPropagateTags: knownvalue.Bool(false), + "region_name": knownvalue.StringExact(acctest.ThirdRegion()), + names.AttrStreamARN: tfknownvalue.RegionalARNThirdRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), + "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), }), })), }, @@ -2147,14 +2394,15 @@ func TestAccDynamoDBTable_Replica_single(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("replica"), knownvalue.SetExact([]knownvalue.Check{ knownvalue.ObjectExact(map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), - "consistency_mode": knownvalue.StringExact("EVENTUAL"), - names.AttrKMSKeyARN: knownvalue.StringExact(""), - "point_in_time_recovery": knownvalue.Bool(false), - names.AttrPropagateTags: knownvalue.Bool(false), - "region_name": knownvalue.StringExact(acctest.AlternateRegion()), - names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), - "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), + "consistency_mode": knownvalue.StringExact("EVENTUAL"), + "deletion_protection_enabled": knownvalue.Bool(false), + names.AttrKMSKeyARN: knownvalue.StringExact(""), + "point_in_time_recovery": knownvalue.Bool(false), + names.AttrPropagateTags: knownvalue.Bool(false), + "region_name": knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), + "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), }), })), streamLabelExpectChangeWhenRecreated.AddStateValue(resourceName, tfjsonpath.New("replica").AtSliceIndex(0).AtMapKey("stream_label")), @@ -2193,14 +2441,15 @@ func TestAccDynamoDBTable_Replica_single(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("replica"), knownvalue.SetExact([]knownvalue.Check{ knownvalue.ObjectExact(map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), - "consistency_mode": knownvalue.StringExact("EVENTUAL"), - names.AttrKMSKeyARN: knownvalue.StringExact(""), - "point_in_time_recovery": knownvalue.Bool(false), - names.AttrPropagateTags: knownvalue.Bool(false), - "region_name": knownvalue.StringExact(acctest.AlternateRegion()), - names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), - "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("dynamodb", "table/"+rName), + "consistency_mode": knownvalue.StringExact("EVENTUAL"), + "deletion_protection_enabled": knownvalue.Bool(false), + names.AttrKMSKeyARN: knownvalue.StringExact(""), + "point_in_time_recovery": knownvalue.Bool(false), + names.AttrPropagateTags: knownvalue.Bool(false), + "region_name": knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrStreamARN: tfknownvalue.RegionalARNAlternateRegionRegexp("dynamodb", regexache.MustCompile(`table/`+rName+`/stream/`+streamLabelRegex)), + "stream_label": knownvalue.StringRegexp(regexache.MustCompile(`^` + streamLabelRegex + `$`)), }), })), streamLabelExpectChangeWhenRecreated.AddStateValue(resourceName, tfjsonpath.New("replica").AtSliceIndex(0).AtMapKey("stream_label")), @@ -4220,6 +4469,66 @@ func TestAccDynamoDBTable_Replica_upgradeV6_2_0(t *testing.T) { }) } +func TestAccDynamoDBTable_Replica_deletionProtection(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var conf awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckMultipleRegion(t, 3) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesMultipleRegions(ctx, t, 3), + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_replicaDeletionProtection(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "replica.0.deletion_protection_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "replica.1.deletion_protection_enabled", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTableConfig_replicaDeletionProtection(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "replica.0.deletion_protection_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "replica.1.deletion_protection_enabled", acctest.CtFalse), + ), + }, + { + Config: testAccTableConfig_replicaDeletionProtection(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "replica.0.deletion_protection_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "replica.1.deletion_protection_enabled", acctest.CtTrue), + ), + }, + { + Config: testAccTableConfig_replicaDeletionProtection(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "replica.0.deletion_protection_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "replica.1.deletion_protection_enabled", acctest.CtFalse), + ), + }, + }, + }) +} + func TestAccDynamoDBTable_tableClassInfrequentAccess(t *testing.T) { ctx := acctest.Context(t) var table awstypes.TableDescription @@ -4512,26 +4821,335 @@ func TestAccDynamoDBTable_importTable(t *testing.T) { }) } -func testAccCheckTableDestroy(ctx context.Context) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DynamoDBClient(ctx) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_dynamodb_table" { - continue - } - - _, err := tfdynamodb.FindTableByName(ctx, conn, rs.Primary.ID) - - if tfresource.NotFound(err) { - continue - } - - if err != nil { - return err - } +func TestAccDynamoDBTable_warmThroughput(t *testing.T) { + ctx := acctest.Context(t) + var conf, confDecreasedThroughput awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - return fmt.Errorf("DynamoDB Table %s still exists", rs.Primary.ID) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_warmThroughput(rName, 5, 5, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.#", "1"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.read_units_per_second", "12100"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.write_units_per_second", "4100"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTableConfig_warmThroughput(rName, 5, 5, 12200, 4200), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.#", "1"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.read_units_per_second", "12200"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.write_units_per_second", "4200"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + Config: testAccTableConfig_warmThroughput(rName, 6, 6, 12300, 4300), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.#", "1"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.read_units_per_second", "12300"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.write_units_per_second", "4300"), + ), + }, + { + Config: testAccTableConfig_warmThroughput(rName, 6, 6, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &confDecreasedThroughput), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.#", "1"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.read_units_per_second", "12100"), + resource.TestCheckResourceAttr(resourceName, "warm_throughput.0.write_units_per_second", "4100"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + }, + }, + }) +} + +func TestAccDynamoDBTable_warmThroughputDefault(t *testing.T) { + ctx := acctest.Context(t) + var conf awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_warmThroughput(rName, 5, 5, 12000, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDynamoDBTable_gsiWarmThroughput_billingProvisioned(t *testing.T) { + ctx := acctest.Context(t) + var conf awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_gsiWarmThroughput_billingProvisioned(rName, 1, 1, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModeProvisioned)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12100", + "warm_throughput.0.write_units_per_second": "4100", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingProvisioned(rName, 1, 1, 12200, 4200), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModeProvisioned)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12200", + "warm_throughput.0.write_units_per_second": "4200", + }), + ), + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingProvisioned(rName, 2, 2, 12300, 4300), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModeProvisioned)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12300", + "warm_throughput.0.write_units_per_second": "4300", + }), + ), + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingProvisioned(rName, 1, 1, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModeProvisioned)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12100", + "warm_throughput.0.write_units_per_second": "4100", + }), + ), + }, + }, + }) +} + +func TestAccDynamoDBTable_gsiWarmThroughput_billingPayPerRequest(t *testing.T) { + ctx := acctest.Context(t) + var conf awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName, 5, 5, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12100", + "warm_throughput.0.write_units_per_second": "4100", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName, 5, 5, 12200, 4200), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12200", + "warm_throughput.0.write_units_per_second": "4200", + }), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName, 6, 6, 12300, 4300), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12300", + "warm_throughput.0.write_units_per_second": "4300", + }), + ), + }, + + { + Config: testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName, 6, 6, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12100", + "warm_throughput.0.write_units_per_second": "4100", + }), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + +func TestAccDynamoDBTable_gsiWarmThroughput_switchBilling(t *testing.T) { + ctx := acctest.Context(t) + var conf awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_gsiWarmThroughput_billingProvisioned(rName, 1, 1, 12100, 4100), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModeProvisioned)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12100", + "warm_throughput.0.write_units_per_second": "4100", + }), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName, 5, 5, 12200, 4200), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "billing_mode", string(awstypes.BillingModePayPerRequest)), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "global_secondary_index.*", map[string]string{ + "warm_throughput.0.read_units_per_second": "12200", + "warm_throughput.0.write_units_per_second": "4200", + }), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + Config: testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName, 5, 5, 12200, 4200), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccCheckTableDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).DynamoDBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_dynamodb_table" { + continue + } + + _, err := tfdynamodb.FindTableByName(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("DynamoDB Table %s still exists", rs.Primary.ID) } return nil @@ -7021,6 +7639,38 @@ resource "aws_dynamodb_table" "test" { `, rName, streamEnabled, viewType)) } +func testAccTableConfig_replicaDeletionProtection(rName string, deletionProtection bool) string { + return acctest.ConfigCompose( + acctest.ConfigMultipleRegionProvider(3), + fmt.Sprintf(` +data "aws_region" "alternate" { + provider = "awsalternate" +} +data "aws_region" "third" { + provider = "awsthird" +} +resource "aws_dynamodb_table" "test" { + name = %[1]q + hash_key = "TestTableHashKey" + billing_mode = "PAY_PER_REQUEST" + stream_enabled = true + stream_view_type = "NEW_AND_OLD_IMAGES" + attribute { + name = "TestTableHashKey" + type = "S" + } + replica { + region_name = data.aws_region.alternate.name + deletion_protection_enabled = %[2]t + } + replica { + region_name = data.aws_region.third.name + deletion_protection_enabled = %[2]t + } +} +`, rName, deletionProtection)) +} + func testAccTableConfig_lsi(rName, lsiName string) string { return fmt.Sprintf(` resource "aws_dynamodb_table" "test" { @@ -7173,7 +7823,7 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -resource "aws_s3_bucket_object" "test" { +resource "aws_s3_object" "test" { bucket = aws_s3_bucket.test.bucket key = "data/somedoc.json" content = "{\"Item\":{\"%[1]s\":{\"S\":\"test\"},\"field\":{\"S\":\"test\"}}}" @@ -7194,7 +7844,7 @@ resource "aws_dynamodb_table" "test" { input_compression_type = "NONE" input_format = "DYNAMODB_JSON" s3_bucket_source { - bucket = aws_s3_bucket.test.bucket + bucket = aws_s3_object.test.bucket key_prefix = "data" } } @@ -7284,3 +7934,87 @@ resource "aws_dynamodb_table" "test" { } `, rName, acctest.AlternateRegion(), acctest.ThirdRegion()) } + +func testAccTableConfig_warmThroughput(rName string, maxRead, maxWrite, warmRead, warmWrite int) string { + return fmt.Sprintf(` +resource "aws_dynamodb_table" "test" { + name = %[1]q + billing_mode = "PAY_PER_REQUEST" + hash_key = "TestTableHashKey" + on_demand_throughput { + max_read_request_units = %[2]d + max_write_request_units = %[3]d + } + warm_throughput { + read_units_per_second = %[4]d + write_units_per_second = %[5]d + } + attribute { + name = "TestTableHashKey" + type = "S" + } +} +`, rName, maxRead, maxWrite, warmRead, warmWrite) +} + +func testAccTableConfig_gsiWarmThroughput_billingProvisioned(rName string, readCapacity, writeCapacity, warmRead, warmWrite int) string { + return fmt.Sprintf(` +resource "aws_dynamodb_table" "test" { + billing_mode = "PROVISIONED" + hash_key = "TestTableHashKey" + name = %[1]q + read_capacity = 1 + write_capacity = 1 + global_secondary_index { + name = "att1-index" + hash_key = "att1" + projection_type = "ALL" + read_capacity = %[2]d + write_capacity = %[3]d + warm_throughput { + read_units_per_second = %[4]d + write_units_per_second = %[5]d + } + } + attribute { + name = "TestTableHashKey" + type = "S" + } + attribute { + name = "att1" + type = "S" + } +} +`, rName, readCapacity, writeCapacity, warmRead, warmWrite) +} + +func testAccTableConfig_gsiWarmThroughput_billingPayPerRequest(rName string, maxRead, maxWrite, warmRead, warmWrite int) string { + return fmt.Sprintf(` +resource "aws_dynamodb_table" "test" { + name = %[1]q + billing_mode = "PAY_PER_REQUEST" + hash_key = "TestTableHashKey" + on_demand_throughput { + max_read_request_units = %[2]d + max_write_request_units = %[3]d + } + global_secondary_index { + name = "att1-index" + hash_key = "att1" + projection_type = "ALL" + warm_throughput { + read_units_per_second = %[4]d + write_units_per_second = %[5]d + } + } + attribute { + name = "TestTableHashKey" + type = "S" + } + attribute { + name = "att1" + type = "S" + } +} +`, rName, maxRead, maxWrite, warmRead, warmWrite) +} diff --git a/internal/service/dynamodb/tags_gen.go b/internal/service/dynamodb/tags_gen.go index 5ae034459fbc..22650f823184 100644 --- a/internal/service/dynamodb/tags_gen.go +++ b/internal/service/dynamodb/tags_gen.go @@ -3,9 +3,9 @@ package dynamodb import ( "context" - "fmt" "time" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dynamodb" awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" @@ -29,11 +29,11 @@ func findTag(ctx context.Context, conn *dynamodb.Client, identifier, key string, listTags, err := listTags(ctx, conn, identifier, optFns...) if err != nil { - return nil, err + return nil, smarterr.NewError(err) } if !listTags.KeyExists(key) { - return nil, tfresource.NewEmptyResultError(nil) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(nil)) } return listTags.KeyValue(key), nil @@ -50,14 +50,14 @@ func listTags(ctx context.Context, conn *dynamodb.Client, identifier string, opt output, err := conn.ListTagsOfResource(ctx, &input, optFns...) if tfawserr.ErrCodeEquals(err, "ResourceNotFoundException") { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -69,7 +69,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).DynamoDBClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -156,7 +156,7 @@ func updateTags(ctx context.Context, conn *dynamodb.Client, identifier string, o _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -171,13 +171,13 @@ func updateTags(ctx context.Context, conn *dynamodb.Client, identifier string, o _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } if len(removedTags) > 0 || len(updatedTags) > 0 { if err := waitTagsPropagated(ctx, conn, identifier, newTags, optFns...); err != nil { - return fmt.Errorf("waiting for resource (%s) tag propagation: %w", identifier, err) + return smarterr.NewError(err) } } @@ -206,7 +206,7 @@ func waitTagsPropagated(ctx context.Context, conn *dynamodb.Client, id string, t } if err != nil { - return false, err + return false, smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { diff --git a/internal/service/dynamodb/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf b/internal/service/dynamodb/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6acdcd462cf7 --- /dev/null +++ b/internal/service/dynamodb/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_dynamodb_resource_policy" "test" { + resource_arn = aws_dynamodb_table.test.arn + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["dynamodb:*"] + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + resources = [ + aws_dynamodb_table.test.arn, + "${aws_dynamodb_table.test.arn}/*", + ] + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +# testAccTableConfig_basic + +resource "aws_dynamodb_table" "test" { + name = var.rName + read_capacity = 1 + write_capacity = 1 + hash_key = var.rName + + attribute { + name = var.rName + type = "S" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/dynamodb/testdata/ResourcePolicy/basic_v6.0.0/main_gen.tf b/internal/service/dynamodb/testdata/ResourcePolicy/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..af898b7ea4b8 --- /dev/null +++ b/internal/service/dynamodb/testdata/ResourcePolicy/basic_v6.0.0/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_dynamodb_resource_policy" "test" { + resource_arn = aws_dynamodb_table.test.arn + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["dynamodb:*"] + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + resources = [ + aws_dynamodb_table.test.arn, + "${aws_dynamodb_table.test.arn}/*", + ] + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +# testAccTableConfig_basic + +resource "aws_dynamodb_table" "test" { + name = var.rName + read_capacity = 1 + write_capacity = 1 + hash_key = var.rName + + attribute { + name = var.rName + type = "S" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/dynamodb/testdata/TableExport/basic_v5.100.0/main_gen.tf b/internal/service/dynamodb/testdata/TableExport/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..66f2c35e81e5 --- /dev/null +++ b/internal/service/dynamodb/testdata/TableExport/basic_v5.100.0/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_dynamodb_table_export" "test" { + s3_bucket = aws_s3_bucket.test.id + table_arn = aws_dynamodb_table.test.arn +} + +# testAccTableExportConfig_baseConfig + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +resource "aws_dynamodb_table" "test" { + name = var.rName + read_capacity = 2 + write_capacity = 2 + hash_key = "TestTableHashKey" + + attribute { + name = "TestTableHashKey" + type = "S" + } + + point_in_time_recovery { + enabled = true + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/dynamodb/testdata/TableExport/basic_v6.0.0/main_gen.tf b/internal/service/dynamodb/testdata/TableExport/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..5fa1dee07b25 --- /dev/null +++ b/internal/service/dynamodb/testdata/TableExport/basic_v6.0.0/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_dynamodb_table_export" "test" { + s3_bucket = aws_s3_bucket.test.id + table_arn = aws_dynamodb_table.test.arn +} + +# testAccTableExportConfig_baseConfig + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +resource "aws_dynamodb_table" "test" { + name = var.rName + read_capacity = 2 + write_capacity = 2 + hash_key = "TestTableHashKey" + + attribute { + name = "TestTableHashKey" + type = "S" + } + + point_in_time_recovery { + enabled = true + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/dynamodb/update_tags_for_resource_gen.go b/internal/service/dynamodb/update_tags_for_resource_gen.go index 67de53849482..16cc83dadf4f 100644 --- a/internal/service/dynamodb/update_tags_for_resource_gen.go +++ b/internal/service/dynamodb/update_tags_for_resource_gen.go @@ -3,9 +3,9 @@ package dynamodb import ( "context" - "fmt" "time" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -35,7 +35,7 @@ func updateTagsResource(ctx context.Context, conn *dynamodb.Client, identifier s _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -50,13 +50,13 @@ func updateTagsResource(ctx context.Context, conn *dynamodb.Client, identifier s _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } if len(removedTags) > 0 || len(updatedTags) > 0 { if err := waitTagsPropagedForResource(ctx, conn, identifier, newTags, optFns...); err != nil { - return fmt.Errorf("waiting for resource (%s) tag propagation: %w", identifier, err) + return smarterr.NewError(err) } } @@ -79,7 +79,7 @@ func waitTagsPropagedForResource(ctx context.Context, conn *dynamodb.Client, id } if err != nil { - return false, err + return false, smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { diff --git a/internal/service/dynamodb/wait.go b/internal/service/dynamodb/wait.go index 6850495003f9..fe2b2ee9faff 100644 --- a/internal/service/dynamodb/wait.go +++ b/internal/service/dynamodb/wait.go @@ -30,10 +30,12 @@ const ( func waitTableActive(ctx context.Context, conn *dynamodb.Client, tableName string, timeout time.Duration) (*awstypes.TableDescription, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.TableStatusCreating, awstypes.TableStatusUpdating), - Target: enum.Slice(awstypes.TableStatusActive), - Refresh: statusTable(ctx, conn, tableName), - Timeout: max(createTableTimeout, timeout), + Pending: enum.Slice(awstypes.TableStatusCreating, awstypes.TableStatusUpdating), + Target: enum.Slice(awstypes.TableStatusActive), + Refresh: statusTable(ctx, conn, tableName), + Timeout: max(createTableTimeout, timeout), + Delay: 5 * time.Second, + ContinuousTargetOccurence: 2, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -45,6 +47,19 @@ func waitTableActive(ctx context.Context, conn *dynamodb.Client, tableName strin return nil, err } +func waitTableWarmThroughputActive(ctx context.Context, conn *dynamodb.Client, tableName string, timeout time.Duration) error { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.TableStatusCreating, awstypes.TableStatusUpdating), + Target: enum.Slice(awstypes.TableStatusActive), + Refresh: statusTableWarmThroughput(ctx, conn, tableName), + Timeout: max(createTableTimeout, timeout), + } + + _, err := stateConf.WaitForStateContext(ctx) + + return err +} + func waitTableDeleted(ctx context.Context, conn *dynamodb.Client, tableName string, timeout time.Duration) (*awstypes.TableDescription, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.TableStatusActive, awstypes.TableStatusDeleting), @@ -136,6 +151,19 @@ func waitGSIActive(ctx context.Context, conn *dynamodb.Client, tableName, indexN return nil, err } +func waitGSIWarmThroughputActive(ctx context.Context, conn *dynamodb.Client, tableName, indexName string, timeout time.Duration) error { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.IndexStatusCreating, awstypes.IndexStatusUpdating), + Target: enum.Slice(awstypes.IndexStatusActive), + Refresh: statusGSIWarmThroughput(ctx, conn, tableName, indexName), + Timeout: max(updateTableTimeout, timeout), + } + + _, err := stateConf.WaitForStateContext(ctx) + + return err +} + func waitGSIDeleted(ctx context.Context, conn *dynamodb.Client, tableName, indexName string, timeout time.Duration) (*awstypes.GlobalSecondaryIndexDescription, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.IndexStatusActive, awstypes.IndexStatusDeleting, awstypes.IndexStatusUpdating), diff --git a/internal/service/ec2/ebs_default_kms_key_test.go b/internal/service/ec2/ebs_default_kms_key_test.go index 4b6a5e3496e5..c4468836b540 100644 --- a/internal/service/ec2/ebs_default_kms_key_test.go +++ b/internal/service/ec2/ebs_default_kms_key_test.go @@ -19,6 +19,21 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +func TestAccEC2EBSDefaultKMSKey_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]map[string]func(t *testing.T){ + "Resource": { + acctest.CtBasic: testAccEBSDefaultKMSKey_basic, + }, + "DataSource": { + acctest.CtBasic: testAccEBSDefaultKMSKeyDataSource_basic, + }, + } + + acctest.RunSerialTests2Levels(t, testCases, 0) +} + func testAccEBSDefaultKMSKey_basic(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_ebs_default_kms_key.test" diff --git a/internal/service/ec2/ebs_encryption_by_default_data_source_test.go b/internal/service/ec2/ebs_encryption_by_default_data_source_test.go index 00c2c8937e85..d56a4861a1f1 100644 --- a/internal/service/ec2/ebs_encryption_by_default_data_source_test.go +++ b/internal/service/ec2/ebs_encryption_by_default_data_source_test.go @@ -18,9 +18,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccEC2EBSEncryptionByDefaultDataSource_basic(t *testing.T) { +func testAccEBSEncryptionByDefaultDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - resource.ParallelTest(t, resource.TestCase{ + + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -51,7 +52,7 @@ func testAccCheckEBSEncryptionByDefaultDataSource(ctx context.Context, n string) input := ec2.GetEbsEncryptionByDefaultInput{} actual, err := conn.GetEbsEncryptionByDefault(ctx, &input) if err != nil { - return fmt.Errorf("Error reading default EBS encryption toggle: %q", err) + return err } attr, _ := strconv.ParseBool(rs.Primary.Attributes[names.AttrEnabled]) diff --git a/internal/service/ec2/ebs_encryption_by_default_test.go b/internal/service/ec2/ebs_encryption_by_default_test.go index 1bbebbb5d5b4..9b0ab876fee7 100644 --- a/internal/service/ec2/ebs_encryption_by_default_test.go +++ b/internal/service/ec2/ebs_encryption_by_default_test.go @@ -17,11 +17,26 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccEC2EBSEncryptionByDefault_basic(t *testing.T) { +func TestAccEC2EBSEncryptionByDefault_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]map[string]func(t *testing.T){ + "Resource": { + acctest.CtBasic: testAccEBSEncryptionByDefault_basic, + }, + "DataSource": { + acctest.CtBasic: testAccEBSEncryptionByDefaultDataSource_basic, + }, + } + + acctest.RunSerialTests2Levels(t, testCases, 0) +} + +func testAccEBSEncryptionByDefault_basic(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_ebs_encryption_by_default.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/ec2/ebs_snapshot.go b/internal/service/ec2/ebs_snapshot.go index d626bb02c90c..09d6ae27148a 100644 --- a/internal/service/ec2/ebs_snapshot.go +++ b/internal/service/ec2/ebs_snapshot.go @@ -127,7 +127,7 @@ func resourceEBSSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta } outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 1*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateSnapshot(ctx, &input) }, errCodeSnapshotCreationPerVolumeRateExceeded, "The maximum per volume CreateSnapshot request rate has been exceeded") @@ -139,7 +139,7 @@ func resourceEBSSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta d.SetId(aws.ToString(outputRaw.(*ec2.CreateSnapshotOutput).SnapshotId)) _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return waitSnapshotCompleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) }, errCodeResourceNotReady) @@ -253,7 +253,7 @@ func resourceEBSSnapshotDelete(ctx context.Context, d *schema.ResourceData, meta input := ec2.DeleteSnapshotInput{ SnapshotId: aws.String(d.Id()), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteSnapshot(ctx, &input) }, errCodeInvalidSnapshotInUse) diff --git a/internal/service/ec2/ebs_snapshot_block_public_access.go b/internal/service/ec2/ebs_snapshot_block_public_access.go index 9dc8c1aad11f..2dfde4599246 100644 --- a/internal/service/ec2/ebs_snapshot_block_public_access.go +++ b/internal/service/ec2/ebs_snapshot_block_public_access.go @@ -7,7 +7,7 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -32,7 +32,7 @@ func resourceEBSSnapshotBlockPublicAccess() *schema.Resource { names.AttrState: { Type: schema.TypeString, Required: true, - ValidateDiagFunc: enum.Validate[types.SnapshotBlockPublicAccessState](), + ValidateDiagFunc: enum.Validate[awstypes.SnapshotBlockPublicAccessState](), }, }, } @@ -44,7 +44,7 @@ func resourceEBSSnapshotBlockPublicAccessPut(ctx context.Context, d *schema.Reso state := d.Get(names.AttrState).(string) input := ec2.EnableSnapshotBlockPublicAccessInput{ - State: types.SnapshotBlockPublicAccessState(state), + State: awstypes.SnapshotBlockPublicAccessState(state), } _, err := conn.EnableSnapshotBlockPublicAccess(ctx, &input) diff --git a/internal/service/ec2/ebs_snapshot_block_public_access_identity_gen_test.go b/internal/service/ec2/ebs_snapshot_block_public_access_identity_gen_test.go index 4a7fc8aab874..301cccea7c2f 100644 --- a/internal/service/ec2/ebs_snapshot_block_public_access_identity_gen_test.go +++ b/internal/service/ec2/ebs_snapshot_block_public_access_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccEC2EBSEBSSnapshotBlockPublicAccess_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_Basic, - "ExistingResource": testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_ExistingResource, - "RegionOverride": testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_RegionOverride, + acctest.CtBasic: testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_Basic, + "ExistingResource": testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,9 +34,10 @@ func testAccEC2EBSEBSSnapshotBlockPublicAccess_IdentitySerial(t *testing.T) { func testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_ebs_snapshot_block_public_access.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -105,7 +108,7 @@ func testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_RegionOverride(t *testin resourceName := "aws_ebs_snapshot_block_public_access.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -209,3 +212,106 @@ func testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_RegionOverride(t *testin }, }) } + +func testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ebs_snapshot_block_public_access.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckEBSSnapshotBlockPublicAccessDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EBSSnapshotBlockPublicAccess/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/EBSSnapshotBlockPublicAccess/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EBSSnapshotBlockPublicAccess/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ebs_snapshot_block_public_access.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckEBSSnapshotBlockPublicAccessDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EBSSnapshotBlockPublicAccess/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EBSSnapshotBlockPublicAccess/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/ec2/ebs_snapshot_block_public_access_test.go b/internal/service/ec2/ebs_snapshot_block_public_access_test.go index 3804057aefc9..9c4f4c392c7f 100644 --- a/internal/service/ec2/ebs_snapshot_block_public_access_test.go +++ b/internal/service/ec2/ebs_snapshot_block_public_access_test.go @@ -9,16 +9,10 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -47,7 +41,7 @@ func testAccEC2EBSSnapshotBlockPublicAccess_basic(t *testing.T) { Steps: []resource.TestStep{ { ResourceName: resourceName, - Config: testAccEBSSnapshotBlockPublicAccess_basic(string(types.SnapshotBlockPublicAccessStateBlockAllSharing)), + Config: testAccEBSSnapshotBlockPublicAccess_basic(string(awstypes.SnapshotBlockPublicAccessStateBlockAllSharing)), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, names.AttrState, "block-all-sharing"), ), @@ -59,7 +53,7 @@ func testAccEC2EBSSnapshotBlockPublicAccess_basic(t *testing.T) { }, { ResourceName: resourceName, - Config: testAccEBSSnapshotBlockPublicAccess_basic(string(types.SnapshotBlockPublicAccessStateBlockNewSharing)), + Config: testAccEBSSnapshotBlockPublicAccess_basic(string(awstypes.SnapshotBlockPublicAccessStateBlockNewSharing)), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, names.AttrState, "block-new-sharing"), ), @@ -68,75 +62,6 @@ func testAccEC2EBSSnapshotBlockPublicAccess_basic(t *testing.T) { }) } -func testAccEC2EBSEBSSnapshotBlockPublicAccess_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_ebs_snapshot_block_public_access.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), - CheckDestroy: testAccCheckEBSSnapshotBlockPublicAccessDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccEBSSnapshotBlockPublicAccess_basic("block-all-sharing"), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccEBSSnapshotBlockPublicAccess_basic("block-all-sharing"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccEBSSnapshotBlockPublicAccess_basic("block-all-sharing"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccCheckEBSSnapshotBlockPublicAccessDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) @@ -146,8 +71,8 @@ func testAccCheckEBSSnapshotBlockPublicAccessDestroy(ctx context.Context) resour return err } - if response.State != types.SnapshotBlockPublicAccessStateUnblocked { - return fmt.Errorf("EBS encryption by default is not in expected state (%s)", types.SnapshotBlockPublicAccessStateUnblocked) + if response.State != awstypes.SnapshotBlockPublicAccessStateUnblocked { + return fmt.Errorf("EBS encryption by default is not in expected state (%s)", awstypes.SnapshotBlockPublicAccessStateUnblocked) } return nil } diff --git a/internal/service/ec2/ebs_snapshot_copy.go b/internal/service/ec2/ebs_snapshot_copy.go index c94b1526f7af..d974d337acd3 100644 --- a/internal/service/ec2/ebs_snapshot_copy.go +++ b/internal/service/ec2/ebs_snapshot_copy.go @@ -151,7 +151,7 @@ func resourceEBSSnapshotCopyCreate(ctx context.Context, d *schema.ResourceData, d.SetId(aws.ToString(output.SnapshotId)) _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return waitSnapshotCompleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) }, errCodeResourceNotReady) diff --git a/internal/service/ec2/ebs_snapshot_create_volume_permission.go b/internal/service/ec2/ebs_snapshot_create_volume_permission.go index 2081e18b5b3e..6269c140cc4c 100644 --- a/internal/service/ec2/ebs_snapshot_create_volume_permission.go +++ b/internal/service/ec2/ebs_snapshot_create_volume_permission.go @@ -76,7 +76,7 @@ func resourceSnapshotCreateVolumePermissionCreate(ctx context.Context, d *schema d.SetId(id) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findCreateSnapshotCreateVolumePermissionByTwoPartKey(ctx, conn, snapshotID, accountID) }) @@ -140,7 +140,7 @@ func resourceSnapshotCreateVolumePermissionDelete(ctx context.Context, d *schema return sdkdiag.AppendErrorf(diags, "deleting EBS Snapshot CreateVolumePermission (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return findCreateSnapshotCreateVolumePermissionByTwoPartKey(ctx, conn, snapshotID, accountID) }) diff --git a/internal/service/ec2/ebs_snapshot_import.go b/internal/service/ec2/ebs_snapshot_import.go index 2ccdbade9002..ea5e63331d5e 100644 --- a/internal/service/ec2/ebs_snapshot_import.go +++ b/internal/service/ec2/ebs_snapshot_import.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -225,7 +223,7 @@ func resourceEBSSnapshotImportCreate(ctx context.Context, d *schema.ResourceData } outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ImportSnapshot(ctx, &input) }, errCodeInvalidParameter, "provided does not exist or does not have sufficient permissions") @@ -270,7 +268,8 @@ func resourceEBSSnapshotImportCreate(ctx context.Context, d *schema.ResourceData func resourceEBSSnapshotImportRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) snapshot, err := findSnapshotByID(ctx, conn, d.Id()) @@ -284,13 +283,7 @@ func resourceEBSSnapshotImportRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading EBS Snapshot (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - Resource: fmt.Sprintf("snapshot/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, ebsSnapshotARN(ctx, c, d.Id())) d.Set("data_encryption_key_id", snapshot.DataEncryptionKeyId) d.Set(names.AttrDescription, snapshot.Description) d.Set(names.AttrEncrypted, snapshot.Encrypted) diff --git a/internal/service/ec2/ebs_test.go b/internal/service/ec2/ebs_test.go deleted file mode 100644 index 20a9edfabc33..000000000000 --- a/internal/service/ec2/ebs_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package ec2_test - -import ( - "testing" - - "github.com/hashicorp/terraform-provider-aws/internal/acctest" -) - -func TestAccEC2EBSDefaultKMSKey_serial(t *testing.T) { - t.Parallel() - - testCases := map[string]map[string]func(t *testing.T){ - "Resource": { - acctest.CtBasic: testAccEBSDefaultKMSKey_basic, - }, - "DataSource": { - acctest.CtBasic: testAccEBSDefaultKMSKeyDataSource_basic, - }, - } - - acctest.RunSerialTests2Levels(t, testCases, 0) -} diff --git a/internal/service/ec2/ebs_volume.go b/internal/service/ec2/ebs_volume.go index b08f5a29c173..ed3bc026f983 100644 --- a/internal/service/ec2/ebs_volume.go +++ b/internal/service/ec2/ebs_volume.go @@ -114,13 +114,18 @@ func resourceEBSVolume() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ValidateFunc: validation.IntBetween(125, 1000), + ValidateFunc: validation.IntBetween(125, 2000), }, names.AttrType: { Type: schema.TypeString, Optional: true, Computed: true, }, + "volume_initialization_rate": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(100, 300), + }, }, } } @@ -171,6 +176,10 @@ func resourceEBSVolumeCreate(ctx context.Context, d *schema.ResourceData, meta a input.VolumeType = awstypes.VolumeType(value.(string)) } + if value, ok := d.GetOk("volume_initialization_rate"); ok { + input.VolumeInitializationRate = aws.Int32(int32(value.(int))) + } + output, err := conn.CreateVolume(ctx, &input) if err != nil { @@ -215,6 +224,7 @@ func resourceEBSVolumeRead(ctx context.Context, d *schema.ResourceData, meta any d.Set(names.AttrSnapshotID, volume.SnapshotId) d.Set(names.AttrThroughput, volume.Throughput) d.Set(names.AttrType, volume.VolumeType) + d.Set("volume_initialization_rate", volume.VolumeInitializationRate) setTagsOut(ctx, volume.Tags) @@ -282,7 +292,7 @@ func resourceEBSVolumeDelete(ctx context.Context, d *schema.ResourceData, meta a } outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 1*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateSnapshot(ctx, &input) }, errCodeSnapshotCreationPerVolumeRateExceeded, "The maximum per volume CreateSnapshot request rate has been exceeded") @@ -294,7 +304,7 @@ func resourceEBSVolumeDelete(ctx context.Context, d *schema.ResourceData, meta a snapshotID := aws.ToString(outputRaw.(*ec2.CreateSnapshotOutput).SnapshotId) _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return waitSnapshotCompleted(ctx, conn, snapshotID, d.Timeout(schema.TimeoutDelete)) }, errCodeResourceNotReady) @@ -309,7 +319,7 @@ func resourceEBSVolumeDelete(ctx context.Context, d *schema.ResourceData, meta a VolumeId: aws.String(d.Id()), } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteVolume(ctx, &input) }, errCodeVolumeInUse) @@ -367,6 +377,13 @@ func resourceEBSVolumeCustomizeDiff(_ context.Context, diff *schema.ResourceDiff if throughput > 0 && volumeType != awstypes.VolumeTypeGp3 { return fmt.Errorf("'throughput' must not be set when 'type' is '%s'", volumeType) } + + config := diff.GetRawConfig() + if v := config.GetAttr(names.AttrSnapshotID); v.IsKnown() && v.IsNull() { + if v := config.GetAttr("volume_initialization_rate"); v.IsKnown() && !v.IsNull() { + return fmt.Errorf("'volume_initialization_rate' must not be set unless 'snapshot_id' is set") + } + } } else { // Update. diff --git a/internal/service/ec2/ebs_volume_attachment.go b/internal/service/ec2/ebs_volume_attachment.go index ca5ccc7ea9f4..9e456432d843 100644 --- a/internal/service/ec2/ebs_volume_attachment.go +++ b/internal/service/ec2/ebs_volume_attachment.go @@ -6,7 +6,6 @@ package ec2 import ( "bytes" "context" - "errors" "fmt" "log" "strings" @@ -14,15 +13,12 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -208,48 +204,6 @@ func volumeAttachmentID(name, volumeID, instanceID string) string { return fmt.Sprintf("vai-%d", create.StringHashcode(buf.String())) } -func findVolumeAttachment(ctx context.Context, conn *ec2.Client, volumeID, instanceID, deviceName string) (*awstypes.VolumeAttachment, error) { - input := ec2.DescribeVolumesInput{ - Filters: newAttributeFilterList(map[string]string{ - "attachment.device": deviceName, - "attachment.instance-id": instanceID, - }), - VolumeIds: []string{volumeID}, - } - - output, err := findEBSVolume(ctx, conn, &input) - - if err != nil { - return nil, err - } - - if state := output.State; state == awstypes.VolumeStateAvailable || state == awstypes.VolumeStateDeleted { - return nil, &retry.NotFoundError{ - Message: string(state), - LastRequest: input, - } - } - - // Eventual consistency check. - if aws.ToString(output.VolumeId) != volumeID { - return nil, &retry.NotFoundError{ - LastRequest: input, - } - } - - for _, v := range output.Attachments { - if v.State == awstypes.VolumeAttachmentStateDetached { - continue - } - - if aws.ToString(v.Device) == deviceName && aws.ToString(v.InstanceId) == instanceID { - return &v, nil - } - } - - return nil, &retry.NotFoundError{} -} - func stopVolumeAttachmentInstance(ctx context.Context, conn *ec2.Client, id string, force bool, timeout time.Duration) error { tflog.Info(ctx, "Stopping EC2 Instance", map[string]any{ "ec2_instance_id": id, @@ -271,92 +225,3 @@ func stopVolumeAttachmentInstance(ctx context.Context, conn *ec2.Client, id stri return nil } - -func waitVolumeAttachmentInstanceStopped(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - awstypes.InstanceStateNamePending, - awstypes.InstanceStateNameRunning, - awstypes.InstanceStateNameShuttingDown, - awstypes.InstanceStateNameStopping, - ), - Target: enum.Slice(awstypes.InstanceStateNameStopped), - Refresh: statusVolumeAttachmentInstanceState(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeAttachmentInstanceReady(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.InstanceStateNamePending, awstypes.InstanceStateNameStopping), - Target: enum.Slice(awstypes.InstanceStateNameRunning, awstypes.InstanceStateNameStopped), - Refresh: statusVolumeAttachmentInstanceState(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeAttachmentDeleted(ctx context.Context, conn *ec2.Client, volumeID, instanceID, deviceName string, timeout time.Duration) (*awstypes.VolumeAttachment, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.VolumeAttachmentStateDetaching), - Target: []string{}, - Refresh: statusVolumeAttachment(ctx, conn, volumeID, instanceID, deviceName), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*awstypes.VolumeAttachment); ok { - return output, err - } - - return nil, err -} - -func statusVolumeAttachmentInstanceState(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { - // Don't call FindInstanceByID as it maps useful status codes to NotFoundError. - output, err := findInstance(ctx, conn, &ec2.DescribeInstancesInput{ - InstanceIds: []string{id}, - }) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.State.Name), nil - } -} diff --git a/internal/service/ec2/ebs_volume_data_source.go b/internal/service/ec2/ebs_volume_data_source.go index 1edf48a9f007..f22f2b75e766 100644 --- a/internal/service/ec2/ebs_volume_data_source.go +++ b/internal/service/ec2/ebs_volume_data_source.go @@ -90,6 +90,10 @@ func dataSourceEBSVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "volume_initialization_rate": { + Type: schema.TypeInt, + Computed: true, + }, }, } } @@ -148,6 +152,7 @@ func dataSourceEBSVolumeRead(ctx context.Context, d *schema.ResourceData, meta a d.Set(names.AttrSnapshotID, volume.SnapshotId) d.Set(names.AttrThroughput, volume.Throughput) d.Set("volume_id", volume.VolumeId) + d.Set("volume_initialization_rate", volume.VolumeInitializationRate) d.Set(names.AttrVolumeType, volume.VolumeType) setTagsOut(ctx, volume.Tags) diff --git a/internal/service/ec2/ebs_volume_data_source_test.go b/internal/service/ec2/ebs_volume_data_source_test.go index d4b3bf1b388a..b0ec6079a428 100644 --- a/internal/service/ec2/ebs_volume_data_source_test.go +++ b/internal/service/ec2/ebs_volume_data_source_test.go @@ -66,6 +66,36 @@ func TestAccEC2EBSVolumeDataSource_multipleFilters(t *testing.T) { }) } +func TestAccEC2EBSVolumeDataSource_snapshotIdAndVolumeInitializationRate(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ebs_volume.test" + dataSourceName := "data.aws_ebs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccEBSVolumeDataSourceConfig_snapshotIdAndVolumeInitializationRate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEBSVolumeIDDataSource(dataSourceName), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrCreateTime, resourceName, names.AttrCreateTime), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrSize, resourceName, names.AttrSize), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrTags, resourceName, names.AttrTags), + resource.TestCheckResourceAttrPair(dataSourceName, "outpost_arn", resourceName, "outpost_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "multi_attach_enabled", resourceName, "multi_attach_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrThroughput, resourceName, names.AttrThroughput), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrSnapshotID, resourceName, names.AttrSnapshotID), + resource.TestCheckResourceAttrPair(dataSourceName, "volume_initialization_rate", resourceName, "volume_initialization_rate"), + ), + }, + }, + }) +} + func testAccCheckEBSVolumeIDDataSource(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -144,3 +174,52 @@ data "aws_ebs_volume" "test" { } `, rName)) } + +func testAccEBSVolumeDataSourceConfig_snapshotIdAndVolumeInitializationRate(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_ebs_volume" "source" { + availability_zone = data.aws_availability_zones.available.names[0] + size = 10 + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_snapshot" "test" { + volume_id = aws_ebs_volume.source.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + snapshot_id = aws_ebs_snapshot.test.id + + volume_initialization_rate = 100 + + tags = { + Name = %[1]q + } +} + +data "aws_ebs_volume" "test" { + most_recent = true + + filter { + name = "tag:Name" + values = [%[1]q] + } + + filter { + name = "volume-type" + values = [aws_ebs_volume.test.type] + } +} + +`, rName)) +} diff --git a/internal/service/ec2/ebs_volume_test.go b/internal/service/ec2/ebs_volume_test.go index 3e0bff3bd369..76988e0f2989 100644 --- a/internal/service/ec2/ebs_volume_test.go +++ b/internal/service/ec2/ebs_volume_test.go @@ -875,6 +875,64 @@ func TestAccEC2EBSVolume_snapshotIDAndSize(t *testing.T) { }) } +func TestAccEC2EBSVolume_snapshotIDAndVolumeInitializationRate(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Volume + resourceName := "aws_ebs_volume.test" + snapshotResourceName := "aws_ebs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEBSVolumeConfig_snapshotIdAndVolumeInitializationRate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ec2", regexache.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrEncrypted, acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, names.AttrIOPS, "100"), + resource.TestCheckResourceAttr(resourceName, names.AttrKMSKeyID, ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttrPair(resourceName, names.AttrSnapshotID, snapshotResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, names.AttrThroughput, "0"), + resource.TestCheckResourceAttr(resourceName, "volume_initialization_rate", "100"), + resource.TestCheckResourceAttr(resourceName, names.AttrType, "gp2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"final_snapshot"}, + }, + }, + }) +} + +func TestAccEC2EBSVolume_volumeInitializationRateWithoutSnapshot(t *testing.T) { + ctx := acctest.Context(t) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEBSVolumeConfig_volumeInitializationRateWithoutSnapshotId, + ExpectError: regexache.MustCompile(`'volume_initialization_rate' must not be set unless 'snapshot_id' is set`), + }, + }, + }) +} + func TestAccEC2EBSVolume_finalSnapshot(t *testing.T) { ctx := acctest.Context(t) var v awstypes.Volume @@ -1418,6 +1476,50 @@ resource "aws_ebs_volume" "test" { `, rName, size)) } +func testAccEBSVolumeConfig_snapshotIdAndVolumeInitializationRate(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_ebs_volume" "source" { + availability_zone = data.aws_availability_zones.available.names[0] + size = 10 + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_snapshot" "test" { + volume_id = aws_ebs_volume.source.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + snapshot_id = aws_ebs_snapshot.test.id + + volume_initialization_rate = 100 + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +var testAccEBSVolumeConfig_volumeInitializationRateWithoutSnapshotId = acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), ` +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + type = "gp2" + size = 1 + + volume_initialization_rate = 100 +} +`) + func testAccEBSVolumeConfig_finalSnapshot(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` resource "aws_ebs_volume" "test" { diff --git a/internal/service/ec2/ec2_ami.go b/internal/service/ec2/ec2_ami.go index f895b90d43d9..c77b32b0a352 100644 --- a/internal/service/ec2/ec2_ami.go +++ b/internal/service/ec2/ec2_ami.go @@ -12,7 +12,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -401,9 +400,10 @@ func resourceAMICreate(ctx context.Context, d *schema.ResourceData, meta any) di func resourceAMIRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + image, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.Image, error) { return findImageByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -417,8 +417,6 @@ func resourceAMIRead(ctx context.Context, d *schema.ResourceData, meta any) diag return sdkdiag.AppendErrorf(diags, "reading EC2 AMI (%s): %s", d.Id(), err) } - image := outputRaw.(*awstypes.Image) - if image.State == awstypes.ImageStatePending { // This could happen if a user manually adds an image we didn't create // to the state. We'll wait for the image to become available @@ -433,13 +431,7 @@ func resourceAMIRead(ctx context.Context, d *schema.ResourceData, meta any) diag } d.Set("architecture", image.Architecture) - imageArn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Region: meta.(*conns.AWSClient).Region(ctx), - Resource: fmt.Sprintf("image/%s", d.Id()), - Service: names.EC2, - }.String() - d.Set(names.AttrARN, imageArn) + d.Set(names.AttrARN, amiARN(ctx, c, d.Id())) d.Set("boot_mode", image.BootMode) d.Set(names.AttrDescription, image.Description) d.Set("deprecation_time", image.DeprecationTime) @@ -572,13 +564,15 @@ func updateDescription(ctx context.Context, conn *ec2.Client, id string, descrip } _, err := conn.ModifyImageAttribute(ctx, &input) + if err != nil { - return fmt.Errorf("updating description: %s", err) + return fmt.Errorf("updating description: %w", err) } err = waitImageDescriptionUpdated(ctx, conn, id, description) + if err != nil { - return fmt.Errorf("updating description: waiting for completion: %s", err) + return fmt.Errorf("updating description: waiting for completion: %w", err) } return nil @@ -897,3 +891,6 @@ func waitImageDeprecationTimeDisabled(ctx context.Context, conn *ec2.Client, ima }, ) } +func amiARN(ctx context.Context, c *conns.AWSClient, imageID string) string { + return c.RegionalARNNoAccount(ctx, names.EC2, "image/"+imageID) +} diff --git a/internal/service/ec2/ec2_ami_data_source.go b/internal/service/ec2/ec2_ami_data_source.go index 0e63d1f28951..3c88da876bb4 100644 --- a/internal/service/ec2/ec2_ami_data_source.go +++ b/internal/service/ec2/ec2_ami_data_source.go @@ -5,13 +5,11 @@ package ec2 import ( "context" - "fmt" "slices" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -243,7 +241,8 @@ func dataSourceAMI() *schema.Resource { func dataSourceAMIRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) describeImagesInput := ec2.DescribeImagesInput{ IncludeDeprecated: aws.Bool(d.Get("include_deprecated").(bool)), @@ -306,13 +305,7 @@ func dataSourceAMIRead(ctx context.Context, d *schema.ResourceData, meta any) di d.SetId(aws.ToString(image.ImageId)) d.Set("architecture", image.Architecture) - imageArn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Region: meta.(*conns.AWSClient).Region(ctx), - Service: names.EC2, - Resource: fmt.Sprintf("image/%s", d.Id()), - }.String() - d.Set(names.AttrARN, imageArn) + d.Set(names.AttrARN, amiARN(ctx, c, d.Id())) if err := d.Set("block_device_mappings", flattenAMIBlockDeviceMappings(image.BlockDeviceMappings)); err != nil { return sdkdiag.AppendErrorf(diags, "setting block_device_mappings: %s", err) } diff --git a/internal/service/ec2/ec2_eip.go b/internal/service/ec2/ec2_eip.go index 4c5ebf6fa3f8..8dbfdcde180a 100644 --- a/internal/service/ec2/ec2_eip.go +++ b/internal/service/ec2/ec2_eip.go @@ -13,15 +13,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -86,7 +84,7 @@ func resourceEIP() *schema.Resource { ForceNew: true, Optional: true, Computed: true, - ValidateDiagFunc: enum.Validate[types.DomainType](), + ValidateDiagFunc: enum.Validate[awstypes.DomainType](), }, "instance": { Type: schema.TypeString, @@ -147,7 +145,7 @@ func resourceEIPCreate(ctx context.Context, d *schema.ResourceData, meta any) di conn := meta.(*conns.AWSClient).EC2Client(ctx) input := ec2.AllocateAddressInput{ - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeElasticIp), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeElasticIp), } if v, ok := d.GetOk(names.AttrAddress); ok { @@ -159,7 +157,7 @@ func resourceEIPCreate(ctx context.Context, d *schema.ResourceData, meta any) di } if v := d.Get(names.AttrDomain); v != nil && v.(string) != "" { - input.Domain = types.DomainType(v.(string)) + input.Domain = awstypes.DomainType(v.(string)) } if v, ok := d.GetOk("ipam_pool_id"); ok { @@ -182,7 +180,7 @@ func resourceEIPCreate(ctx context.Context, d *schema.ResourceData, meta any) di d.SetId(aws.ToString(output.AllocationId)) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findEIPByAllocationID(ctx, conn, d.Id()) }) @@ -192,7 +190,7 @@ func resourceEIPCreate(ctx context.Context, d *schema.ResourceData, meta any) di if instanceID, eniID := d.Get("instance").(string), d.Get("network_interface").(string); instanceID != "" || eniID != "" { _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return nil, associateEIP(ctx, conn, d.Id(), instanceID, eniID, d.Get("associate_with_private_ip").(string)) }, errCodeInvalidAllocationIDNotFound) @@ -209,10 +207,10 @@ func resourceEIPRead(ctx context.Context, d *schema.ResourceData, meta any) diag conn := meta.(*conns.AWSClient).EC2Client(ctx) if !eipID(d.Id()).IsVPC() { - return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, types.DomainTypeStandard) + return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, awstypes.DomainTypeStandard) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + address, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.Address, error) { return findEIPByAllocationID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -226,7 +224,6 @@ func resourceEIPRead(ctx context.Context, d *schema.ResourceData, meta any) diag return sdkdiag.AppendErrorf(diags, "reading EC2 EIP (%s): %s", d.Id(), err) } - address := outputRaw.(*types.Address) allocationID := aws.ToString(address.AllocationId) d.Set("allocation_id", allocationID) d.Set(names.AttrARN, eipARN(ctx, meta.(*conns.AWSClient), allocationID)) @@ -253,7 +250,7 @@ func resourceEIPRead(ctx context.Context, d *schema.ResourceData, meta any) diag // Force ID to be an Allocation ID if we're on a VPC. // This allows users to import the EIP based on the IP if they are in a VPC. - if address.Domain == types.DomainTypeVpc && net.ParseIP(d.Id()) != nil { + if address.Domain == awstypes.DomainTypeVpc && net.ParseIP(d.Id()) != nil { d.SetId(aws.ToString(address.AllocationId)) } @@ -304,7 +301,7 @@ func resourceEIPDelete(ctx context.Context, d *schema.ResourceData, meta any) di conn := meta.(*conns.AWSClient).EC2Client(ctx) if !eipID(d.Id()).IsVPC() { - return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, types.DomainTypeStandard) + return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, awstypes.DomainTypeStandard) } // If we are attached to an instance or interface, detach first. @@ -339,8 +336,8 @@ func resourceEIPDelete(ctx context.Context, d *schema.ResourceData, meta any) di const ( timeout = 10 * time.Minute // IPAM eventual consistency ) - _, err := tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { - return findIPAMPoolAllocationsForEIP(ctx, conn, ipamPoolID, d.Get("allocation_id").(string)) + _, err := tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { + return findIPAMPoolAllocationForResource(ctx, conn, ipamPoolID, d.Get("allocation_id").(string)) }) if err != nil { @@ -382,7 +379,7 @@ func associateEIP(ctx context.Context, conn *ec2.Client, allocationID, instanceI } _, err = tfresource.RetryWhen(ctx, ec2PropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return findEIPByAssociationID(ctx, conn, aws.ToString(output.AssociationId)) }, func(err error) (bool, error) { @@ -431,25 +428,3 @@ func disassociateEIP(ctx context.Context, conn *ec2.Client, associationID string func eipARN(ctx context.Context, c *conns.AWSClient, allocationID string) string { return c.RegionalARN(ctx, names.EC2, "elastic-ip/"+allocationID) } - -func findIPAMPoolAllocationsForEIP(ctx context.Context, conn *ec2.Client, ipamPoolID, eipAllocationID string) ([]types.IpamPoolAllocation, error) { - input := ec2.GetIpamPoolAllocationsInput{ - IpamPoolId: aws.String(ipamPoolID), - } - - output, err := findIPAMPoolAllocations(ctx, conn, &input) - - if err != nil { - return nil, err - } - - output = tfslices.Filter(output, func(v types.IpamPoolAllocation) bool { - return v.ResourceType == types.IpamPoolAllocationResourceTypeEip && aws.ToString(v.ResourceId) == eipAllocationID - }) - - if len(output) == 0 { - return nil, &retry.NotFoundError{} - } - - return output, nil -} diff --git a/internal/service/ec2/ec2_eip_association.go b/internal/service/ec2/ec2_eip_association.go index 9b7e299c9c48..b5b3b5d08cdb 100644 --- a/internal/service/ec2/ec2_eip_association.go +++ b/internal/service/ec2/ec2_eip_association.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -118,7 +118,7 @@ func resourceEIPAssociationCreate(ctx context.Context, d *schema.ResourceData, m d.SetId(aws.ToString(output.AssociationId)) _, err = tfresource.RetryWhen(ctx, ec2PropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return findEIPByAssociationID(ctx, conn, d.Id()) }, func(err error) (bool, error) { @@ -147,7 +147,7 @@ func resourceEIPAssociationRead(ctx context.Context, d *schema.ResourceData, met conn := meta.(*conns.AWSClient).EC2Client(ctx) if !eipAssociationID(d.Id()).IsVPC() { - return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, types.DomainTypeStandard) + return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, awstypes.DomainTypeStandard) } address, err := findEIPByAssociationID(ctx, conn, d.Id()) @@ -176,7 +176,7 @@ func resourceEIPAssociationDelete(ctx context.Context, d *schema.ResourceData, m conn := meta.(*conns.AWSClient).EC2Client(ctx) if !eipAssociationID(d.Id()).IsVPC() { - return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, types.DomainTypeStandard) + return sdkdiag.AppendErrorf(diags, `with the retirement of EC2-Classic %s domain EC2 EIPs are no longer supported`, awstypes.DomainTypeStandard) } input := ec2.DisassociateAddressInput{ diff --git a/internal/service/ec2/ec2_eip_association_test.go b/internal/service/ec2/ec2_eip_association_test.go index 6e0e0e1250b6..7e9d9ed50696 100644 --- a/internal/service/ec2/ec2_eip_association_test.go +++ b/internal/service/ec2/ec2_eip_association_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccEC2EIPAssociation_basic(t *testing.T) { ctx := acctest.Context(t) - var a types.Address + var a awstypes.Address resourceName := "aws_eip_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -50,7 +50,7 @@ func TestAccEC2EIPAssociation_basic(t *testing.T) { func TestAccEC2EIPAssociation_disappears(t *testing.T) { ctx := acctest.Context(t) - var a types.Address + var a awstypes.Address resourceName := "aws_eip_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -74,7 +74,7 @@ func TestAccEC2EIPAssociation_disappears(t *testing.T) { func TestAccEC2EIPAssociation_instance_basic(t *testing.T) { ctx := acctest.Context(t) - var a types.Address + var a awstypes.Address resourceName := "aws_eip_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -103,7 +103,7 @@ func TestAccEC2EIPAssociation_instance_basic(t *testing.T) { func TestAccEC2EIPAssociation_instance_publicIP(t *testing.T) { ctx := acctest.Context(t) - var a types.Address + var a awstypes.Address resourceName := "aws_eip_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -132,7 +132,7 @@ func TestAccEC2EIPAssociation_instance_publicIP(t *testing.T) { func TestAccEC2EIPAssociation_networkInterface(t *testing.T) { ctx := acctest.Context(t) - var a types.Address + var a awstypes.Address resourceName := "aws_eip_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -166,7 +166,7 @@ func TestAccEC2EIPAssociation_networkInterface(t *testing.T) { func TestAccEC2EIPAssociation_spotInstance(t *testing.T) { ctx := acctest.Context(t) - var a types.Address + var a awstypes.Address resourceName := "aws_eip_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) @@ -202,7 +202,7 @@ func TestAccEC2EIPAssociation_spotInstance(t *testing.T) { }) } -func testAccCheckEIPAssociationExists(ctx context.Context, n string, v *types.Address) resource.TestCheckFunc { +func testAccCheckEIPAssociationExists(ctx context.Context, n string, v *awstypes.Address) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/ec2_eip_data_source.go b/internal/service/ec2/ec2_eip_data_source.go index acb769f6662d..5d72e075d826 100644 --- a/internal/service/ec2/ec2_eip_data_source.go +++ b/internal/service/ec2/ec2_eip_data_source.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -141,7 +141,7 @@ func dataSourceEIPRead(ctx context.Context, d *schema.ResourceData, meta any) di return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 EIP", err)) } - if eip.Domain == types.DomainTypeVpc { + if eip.Domain == awstypes.DomainTypeVpc { allocationID := aws.ToString(eip.AllocationId) d.SetId(allocationID) d.Set(names.AttrARN, eipARN(ctx, meta.(*conns.AWSClient), allocationID)) diff --git a/internal/service/ec2/ec2_eip_test.go b/internal/service/ec2/ec2_eip_test.go index bd9482419894..c4643e812723 100644 --- a/internal/service/ec2/ec2_eip_test.go +++ b/internal/service/ec2/ec2_eip_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -24,7 +24,7 @@ import ( func TestAccEC2EIP_basic(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" resource.ParallelTest(t, resource.TestCase{ @@ -55,7 +55,7 @@ func TestAccEC2EIP_basic(t *testing.T) { func TestAccEC2EIP_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" resource.ParallelTest(t, resource.TestCase{ @@ -83,7 +83,7 @@ func TestAccEC2EIP_disappears(t *testing.T) { func TestAccEC2EIP_migrateVPCToDomain(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" resource.ParallelTest(t, resource.TestCase{ @@ -124,7 +124,7 @@ func TestAccEC2EIP_migrateVPCToDomain(t *testing.T) { func TestAccEC2EIP_noVPC(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" resource.ParallelTest(t, resource.TestCase{ @@ -153,7 +153,7 @@ func TestAccEC2EIP_noVPC(t *testing.T) { func TestAccEC2EIP_tags(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" resource.ParallelTest(t, resource.TestCase{ @@ -198,7 +198,7 @@ func TestAccEC2EIP_tags(t *testing.T) { func TestAccEC2EIP_instance(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address instanceResourceName := "aws_instance.test" resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -230,7 +230,7 @@ func TestAccEC2EIP_instance(t *testing.T) { // https://github.com/hashicorp/terraform-provider-aws/issues/42) func TestAccEC2EIP_Instance_reassociate(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address instanceResourceName := "aws_instance.test" resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -264,7 +264,7 @@ func TestAccEC2EIP_Instance_reassociate(t *testing.T) { // associated Private EIPs of two instances func TestAccEC2EIP_Instance_associatedUserPrivateIP(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address instance1ResourceName := "aws_instance.test.1" instance2ResourceName := "aws_instance.test.0" resourceName := "aws_eip.test" @@ -306,7 +306,7 @@ func TestAccEC2EIP_Instance_associatedUserPrivateIP(t *testing.T) { func TestAccEC2EIP_Instance_notAssociated(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address instanceResourceName := "aws_instance.test" resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -346,7 +346,7 @@ func TestAccEC2EIP_Instance_notAssociated(t *testing.T) { func TestAccEC2EIP_networkInterface(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -377,7 +377,7 @@ func TestAccEC2EIP_networkInterface(t *testing.T) { func TestAccEC2EIP_NetworkInterface_twoEIPsOneInterface(t *testing.T) { ctx := acctest.Context(t) - var one, two types.Address + var one, two awstypes.Address resource1Name := "aws_eip.test.0" resource2Name := "aws_eip.test.1" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -406,7 +406,7 @@ func TestAccEC2EIP_NetworkInterface_twoEIPsOneInterface(t *testing.T) { func TestAccEC2EIP_association(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address instanceResourceName := "aws_instance.test" eniResourceName := "aws_network_interface.test" resourceName := "aws_eip.test" @@ -454,7 +454,7 @@ func TestAccEC2EIP_association(t *testing.T) { func TestAccEC2EIP_PublicIPv4Pool_default(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -489,7 +489,7 @@ func TestAccEC2EIP_PublicIPv4Pool_custom(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -518,7 +518,7 @@ func TestAccEC2EIP_PublicIPv4Pool_custom(t *testing.T) { func TestAccEC2EIP_PublicIPv4Pool_IPAMPoolId(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" ipamPoolDataSourceName := "aws_vpc_ipam_pool.test_pool" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -543,7 +543,7 @@ func TestAccEC2EIP_PublicIPv4Pool_IPAMPoolId(t *testing.T) { func TestAccEC2EIP_customerOwnedIPv4Pool(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -572,7 +572,7 @@ func TestAccEC2EIP_customerOwnedIPv4Pool(t *testing.T) { func TestAccEC2EIP_networkBorderGroup(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -602,7 +602,7 @@ func TestAccEC2EIP_networkBorderGroup(t *testing.T) { func TestAccEC2EIP_carrierIP(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -632,7 +632,7 @@ func TestAccEC2EIP_carrierIP(t *testing.T) { func TestAccEC2EIP_BYOIPAddress_default(t *testing.T) { ctx := acctest.Context(t) - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -661,7 +661,7 @@ func TestAccEC2EIP_BYOIPAddress_custom(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -696,7 +696,7 @@ func TestAccEC2EIP_BYOIPAddress_customWithPublicIPv4Pool(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - var conf types.Address + var conf awstypes.Address resourceName := "aws_eip.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -718,7 +718,7 @@ func TestAccEC2EIP_BYOIPAddress_customWithPublicIPv4Pool(t *testing.T) { }) } -func testAccCheckEIPExists(ctx context.Context, n string, v *types.Address) resource.TestCheckFunc { +func testAccCheckEIPExists(ctx context.Context, n string, v *awstypes.Address) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/ec2_eips_data_source.go b/internal/service/ec2/ec2_eips_data_source.go index c28793b73837..278cbd1b689d 100644 --- a/internal/service/ec2/ec2_eips_data_source.go +++ b/internal/service/ec2/ec2_eips_data_source.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -77,7 +77,7 @@ func dataSourceEIPsRead(ctx context.Context, d *schema.ResourceData, meta any) d for _, v := range output { publicIPs = append(publicIPs, aws.ToString(v.PublicIp)) - if v.Domain == types.DomainTypeVpc { + if v.Domain == awstypes.DomainTypeVpc { allocationIDs = append(allocationIDs, aws.ToString(v.AllocationId)) } } diff --git a/internal/service/ec2/ec2_fleet.go b/internal/service/ec2/ec2_fleet.go index 07b8e397bd13..735f1bc6c32e 100644 --- a/internal/service/ec2/ec2_fleet.go +++ b/internal/service/ec2/ec2_fleet.go @@ -6,13 +6,11 @@ package ec2 import ( "context" "errors" - "fmt" "log" "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -808,7 +806,8 @@ func resourceFleetCreate(ctx context.Context, d *schema.ResourceData, meta any) func resourceFleetRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) fleet, err := findFleetByID(ctx, conn, d.Id()) @@ -822,14 +821,7 @@ func resourceFleetRead(ctx context.Context, d *schema.ResourceData, meta any) di return sdkdiag.AppendErrorf(diags, "reading EC2 Fleet (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("fleet/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, fleetARN(ctx, c, d.Id())) d.Set("context", fleet.Context) d.Set("excess_capacity_termination_policy", fleet.ExcessCapacityTerminationPolicy) if fleet.Instances != nil { @@ -1626,3 +1618,6 @@ func flattenTargetCapacitySpecification(apiObject *awstypes.TargetCapacitySpecif return tfMap } +func fleetARN(ctx context.Context, c *conns.AWSClient, fleetID string) string { + return c.RegionalARN(ctx, names.EC2, "fleet/"+fleetID) +} diff --git a/internal/service/ec2/ec2_host.go b/internal/service/ec2/ec2_host.go index aab08293a773..e5a3f31479a3 100644 --- a/internal/service/ec2/ec2_host.go +++ b/internal/service/ec2/ec2_host.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -145,7 +143,8 @@ func resourceHostCreate(ctx context.Context, d *schema.ResourceData, meta any) d func resourceHostRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) host, err := findHostByID(ctx, conn, d.Id()) @@ -159,14 +158,7 @@ func resourceHostRead(ctx context.Context, d *schema.ResourceData, meta any) dia return sdkdiag.AppendErrorf(diags, "reading EC2 Host (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: aws.ToString(host.OwnerId), - Resource: fmt.Sprintf("dedicated-host/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, hostARN(ctx, c, aws.ToString(host.OwnerId), d.Id())) d.Set("asset_id", host.AssetId) d.Set("auto_placement", host.AutoPlacement) d.Set(names.AttrAvailabilityZone, host.AvailabilityZone) @@ -252,3 +244,6 @@ func resourceHostDelete(ctx context.Context, d *schema.ResourceData, meta any) d return diags } +func hostARN(ctx context.Context, c *conns.AWSClient, ownerID, hostID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, ownerID, "dedicated-host/"+hostID) +} diff --git a/internal/service/ec2/ec2_host_data_source.go b/internal/service/ec2/ec2_host_data_source.go index 7a54386e5bb0..d17fe7b9cda9 100644 --- a/internal/service/ec2/ec2_host_data_source.go +++ b/internal/service/ec2/ec2_host_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -93,7 +91,8 @@ func dataSourceHost() *schema.Resource { func dataSourceHostRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := ec2.DescribeHostsInput{ Filter: newCustomFilterList(d.Get(names.AttrFilter).(*schema.Set)), @@ -115,14 +114,7 @@ func dataSourceHostRead(ctx context.Context, d *schema.ResourceData, meta any) d } d.SetId(aws.ToString(host.HostId)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: aws.ToString(host.OwnerId), - Resource: fmt.Sprintf("dedicated-host/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, hostARN(ctx, c, aws.ToString(host.OwnerId), d.Id())) d.Set("asset_id", host.AssetId) d.Set("auto_placement", host.AutoPlacement) d.Set(names.AttrAvailabilityZone, host.AvailabilityZone) diff --git a/internal/service/ec2/ec2_image_block_public_access.go b/internal/service/ec2/ec2_image_block_public_access.go index 290e7d881935..619174714a4e 100644 --- a/internal/service/ec2/ec2_image_block_public_access.go +++ b/internal/service/ec2/ec2_image_block_public_access.go @@ -10,7 +10,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -58,7 +58,7 @@ func resourceImageBlockPublicAccessPut(ctx context.Context, d *schema.ResourceDa if slices.Contains(imageBlockPublicAccessEnabledState_Values(), state) { input := ec2.EnableImageBlockPublicAccessInput{ - ImageBlockPublicAccessState: types.ImageBlockPublicAccessEnabledState(state), + ImageBlockPublicAccessState: awstypes.ImageBlockPublicAccessEnabledState(state), } _, err := conn.EnableImageBlockPublicAccess(ctx, &input) @@ -109,11 +109,11 @@ func resourceImageBlockPublicAccessRead(ctx context.Context, d *schema.ResourceD } func imageBlockPublicAccessDisabledState_Values() []string { - return enum.Values[types.ImageBlockPublicAccessDisabledState]() + return enum.Values[awstypes.ImageBlockPublicAccessDisabledState]() } func imageBlockPublicAccessEnabledState_Values() []string { - return enum.Values[types.ImageBlockPublicAccessEnabledState]() + return enum.Values[awstypes.ImageBlockPublicAccessEnabledState]() } func imageBlockPublicAccessState_Values() []string { diff --git a/internal/service/ec2/ec2_image_block_public_access_identity_gen_test.go b/internal/service/ec2/ec2_image_block_public_access_identity_gen_test.go index 4b24260e248f..4b567858db27 100644 --- a/internal/service/ec2/ec2_image_block_public_access_identity_gen_test.go +++ b/internal/service/ec2/ec2_image_block_public_access_identity_gen_test.go @@ -8,11 +8,13 @@ import ( "github.com/hashicorp/terraform-plugin-testing/config" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -20,8 +22,9 @@ func testAccEC2ImageBlockPublicAccess_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccEC2ImageBlockPublicAccess_Identity_Basic, - "ExistingResource": testAccEC2ImageBlockPublicAccess_Identity_ExistingResource, + acctest.CtBasic: testAccEC2ImageBlockPublicAccess_Identity_Basic, + "ExistingResource": testAccEC2ImageBlockPublicAccess_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccEC2ImageBlockPublicAccess_Identity_ExistingResource_NoRefresh_NoChange, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -29,9 +32,10 @@ func testAccEC2ImageBlockPublicAccess_IdentitySerial(t *testing.T) { func testAccEC2ImageBlockPublicAccess_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_ec2_image_block_public_access.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -54,3 +58,104 @@ func testAccEC2ImageBlockPublicAccess_Identity_Basic(t *testing.T) { }, }) } + +func testAccEC2ImageBlockPublicAccess_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ec2_image_block_public_access.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImageBlockPublicAccess/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/ImageBlockPublicAccess/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ImageBlockPublicAccess/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + }, + }, + }, + }) +} + +func testAccEC2ImageBlockPublicAccess_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ec2_image_block_public_access.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImageBlockPublicAccess/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ImageBlockPublicAccess/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/ec2/ec2_image_block_public_access_test.go b/internal/service/ec2/ec2_image_block_public_access_test.go index a15441655620..7768d85967d2 100644 --- a/internal/service/ec2/ec2_image_block_public_access_test.go +++ b/internal/service/ec2/ec2_image_block_public_access_test.go @@ -8,13 +8,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -56,73 +50,6 @@ func testAccImageBlockPublicAccess_basic(t *testing.T) { }) } -func testAccEC2ImageBlockPublicAccess_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_ec2_image_block_public_access.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccImageBlockPublicAccessConfig_basic("block-new-sharing"), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccImageBlockPublicAccessConfig_basic("block-new-sharing"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccImageBlockPublicAccessConfig_basic("block-new-sharing"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - }), - }, - }, - }, - }) -} - func testAccImageBlockPublicAccessConfig_basic(state string) string { return fmt.Sprintf(` resource "aws_ec2_image_block_public_access" "test" { diff --git a/internal/service/ec2/ec2_instance.go b/internal/service/ec2/ec2_instance.go index c326912e663d..0589bbd1a25e 100644 --- a/internal/service/ec2/ec2_instance.go +++ b/internal/service/ec2/ec2_instance.go @@ -19,16 +19,19 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-cty/cty" + frameworkdiag "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/backoff" @@ -36,8 +39,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -47,9 +54,13 @@ import ( // @SDKResource("aws_instance", name="Instance") // @Tags(identifierAttribute="id") +// @IdentityAttribute("id") +// @CustomImport // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.Instance") // @Testing(importIgnore="user_data_replace_on_change") // @Testing(generator=false) +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(plannableImportAction="NoOp") func resourceInstance() *schema.Resource { //lintignore:R011 return &schema.Resource{ @@ -59,7 +70,15 @@ func resourceInstance() *schema.Resource { DeleteWithoutTimeout: resourceInstanceDelete, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + identitySpec := importer.IdentitySpec(ctx) + if err := importer.RegionalSingleParameterized(ctx, rd, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } + + rd.Set(names.AttrForceDestroy, false) + return []*schema.ResourceData{rd}, nil + }, }, SchemaVersion: 2, @@ -359,6 +378,11 @@ func resourceInstance() *schema.Resource { return create.StringHashcode(buf.String()) }, }, + names.AttrForceDestroy: { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "get_password_data": { Type: schema.TypeBool, Optional: true, @@ -379,7 +403,7 @@ func resourceInstance() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ConflictsWith: []string{"placement_group"}, + ConflictsWith: []string{"placement_group", "placement_group_id"}, }, "iam_instance_profile": { Type: schema.TypeString, @@ -477,7 +501,6 @@ func resourceInstance() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - ForceNew: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.IsIPv6Address, @@ -585,7 +608,7 @@ func resourceInstance() *schema.Resource { Computed: true, }, "network_interface": { - ConflictsWith: []string{"associate_public_ip_address", "enable_primary_ipv6", names.AttrSubnetID, "private_ip", "secondary_private_ips", names.AttrVPCSecurityGroupIDs, names.AttrSecurityGroups, "ipv6_addresses", "ipv6_address_count", "source_dest_check"}, + ConflictsWith: []string{"associate_public_ip_address", "enable_primary_ipv6", "ipv6_addresses", "ipv6_address_count", "primary_network_interface", "private_ip", "secondary_private_ips", names.AttrSecurityGroups, "source_dest_check", names.AttrSubnetID, names.AttrVPCSecurityGroupIDs}, Type: schema.TypeSet, Optional: true, Computed: true, @@ -602,6 +625,7 @@ func resourceInstance() *schema.Resource { Required: true, ForceNew: true, }, + // Note: Changes to `network_interface.network_card_index` should be mirrored in `aws_spot_instance_request` "network_card_index": { Type: schema.TypeInt, Optional: true, @@ -615,6 +639,7 @@ func resourceInstance() *schema.Resource { }, }, }, + Deprecated: "network_interface is deprecated. To specify the primary network interface, use primary_network_interface instead. To attach additional network interfaces, use the aws_network_interface_attachment resource.", }, "outpost_arn": { Type: schema.TypeString, @@ -629,7 +654,14 @@ func resourceInstance() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ConflictsWith: []string{"host_resource_group_arn"}, + ConflictsWith: []string{"host_resource_group_arn", "placement_group_id"}, + }, + "placement_group_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"host_resource_group_arn", "placement_group"}, }, "placement_partition_number": { Type: schema.TypeInt, @@ -641,6 +673,27 @@ func resourceInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "primary_network_interface": { + // Note: Changes to `primary_network_interface` should be mirrored in `aws_spot_instance_request` + ConflictsWith: []string{"associate_public_ip_address", "enable_primary_ipv6", "ipv6_addresses", "ipv6_address_count", "network_interface", "private_ip", "secondary_private_ips", names.AttrSecurityGroups, "source_dest_check", names.AttrSubnetID, names.AttrVPCSecurityGroupIDs}, + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDeleteOnTermination: { + Type: schema.TypeBool, + Computed: true, + }, + names.AttrNetworkInterfaceID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, "private_dns": { Type: schema.TypeString, Computed: true, @@ -980,6 +1033,23 @@ func resourceInstance() *schema.Resource { return true }), + customdiff.ComputedIf("ipv6_addresses", func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { + return diff.HasChange("ipv6_address_count") + }), + customdiff.ForceNewIf("ipv6_addresses", func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { + if !diff.HasChange("ipv6_addresses") { + return false + } + + // Don't force new if ipv6_address_count is also changing + // This indicates the ipv6_addresses change is computed, not user-driven + if diff.HasChange("ipv6_address_count") { + return false + } + + // Force new only for explicit user changes to ipv6_addresses + return true + }), ), } } @@ -1000,6 +1070,14 @@ func throughputDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool return strings.ToLower(v) != string(awstypes.VolumeTypeGp3) && new == "0" } +// @SDKListResource("aws_instance") +func instanceResourceAsListResource() itypes.ListResourceForSDK { + l := instanceListResource{} + l.SetResourceSchema(resourceInstance()) + + return &l +} + func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) @@ -1153,575 +1231,200 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta an return append(diags, resourceInstanceUpdate(ctx, d, meta)...) } -func resourceInstanceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceInstanceRead(ctx context.Context, rd *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - instance, err := findInstanceByID(ctx, conn, d.Id()) + instance, err := findInstanceByID(ctx, conn, rd.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] EC2 Instance %s not found, removing from state", d.Id()) - d.SetId("") + if !rd.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] EC2 Instance %s not found, removing from state", rd.Id()) + rd.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) } - instanceType := string(instance.InstanceType) - instanceTypeInfo, err := findInstanceTypeByName(ctx, conn, instanceType) + diags = append(diags, resourceInstanceFlatten(ctx, c, instance, rd)...) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance Type (%s): %s", instanceType, err) - } + return diags +} - d.Set("instance_state", instance.State.Name) +func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EC2Client(ctx) - if v := instance.Placement; v != nil { - d.Set(names.AttrAvailabilityZone, v.AvailabilityZone) + if d.HasChange("volume_tags") && !d.IsNewResource() { + volIDs, err := getInstanceVolIDs(ctx, conn, d.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } - d.Set("placement_group", v.GroupName) + o, n := d.GetChange("volume_tags") - d.Set("host_id", v.HostId) + for _, volID := range volIDs { + if err := updateTags(ctx, conn, volID, o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating volume_tags (%s): %s", volID, err) + } + } + } - if v := v.HostResourceGroupArn; v != nil { - d.Set("host_resource_group_arn", instance.Placement.HostResourceGroupArn) + if d.HasChange("iam_instance_profile") && !d.IsNewResource() { + input := ec2.DescribeIamInstanceProfileAssociationsInput{ + Filters: []awstypes.Filter{ + { + Name: aws.String("instance-id"), + Values: []string{d.Id()}, + }, + }, } - d.Set("placement_partition_number", v.PartitionNumber) + resp, err := conn.DescribeIamInstanceProfileAssociations(ctx, &input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } - d.Set("tenancy", v.Tenancy) - } + // An Iam Instance Profile has been provided and is pending a change + // This means it is an association or a replacement to an association + if _, ok := d.GetOk("iam_instance_profile"); ok { + // Does not have an Iam Instance Profile associated with it, need to associate + if len(resp.IamInstanceProfileAssociations) == 0 { + if err := associateInstanceProfile(ctx, d, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + } else { + // Has an Iam Instance Profile associated with it, need to replace the association + associationId := resp.IamInstanceProfileAssociations[0].AssociationId + input := ec2.ReplaceIamInstanceProfileAssociationInput{ + AssociationId: associationId, + IamInstanceProfile: &awstypes.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + }, + } - if err := d.Set("cpu_options", flattenCPUOptions(instance.CpuOptions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting cpu_options: %s", err) - } + // If the instance is running, we can replace the instance profile association. + // If it is stopped, the association must be removed and the new one attached separately. (GH-8262) + instanceState := awstypes.InstanceStateName(d.Get("instance_state").(string)) - if v := instance.HibernationOptions; v != nil { - d.Set("hibernation", v.Configured) - } + if instanceState != "" { + if instanceState == awstypes.InstanceStateNameStopped || instanceState == awstypes.InstanceStateNameStopping || instanceState == awstypes.InstanceStateNameShuttingDown { + if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + if err := associateInstanceProfile(ctx, d, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + } else { + err := tfresource.Retry(ctx, iamPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { + _, err := conn.ReplaceIamInstanceProfileAssociation(ctx, &input) + if err != nil { + if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { + return tfresource.RetryableError(err) + } + return tfresource.NonRetryableError(err) + } + return nil + }) - if err := d.Set("enclave_options", flattenEnclaveOptions(instance.EnclaveOptions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting enclave_options: %s", err) - } + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): replacing instance profile: %s", d.Id(), err) + } + } + } + } + // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal + } else { + if len(resp.IamInstanceProfileAssociations) > 0 { + // Has an Iam Instance Profile associated with it, need to remove the association + associationId := resp.IamInstanceProfileAssociations[0].AssociationId + if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + } + } - if instance.MaintenanceOptions != nil { - if err := d.Set("maintenance_options", []any{flattenInstanceMaintenanceOptions(instance.MaintenanceOptions)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting maintenance_options: %s", err) + if _, err := waitInstanceIAMInstanceProfileUpdated(ctx, conn, d.Id(), d.Get("iam_instance_profile").(string)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EC2 Instance (%s) IAM Instance Profile update: %s", d.Id(), err) } - } else { - d.Set("maintenance_options", nil) } - if err := d.Set("metadata_options", flattenInstanceMetadataOptions(instance.MetadataOptions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting metadata_options: %s", err) - } + // SourceDestCheck can only be modified on an instance without manually specified network interfaces. + // SourceDestCheck, in that case, is configured at the network interface level + if _, ok := d.GetOk("network_interface"); !ok { + // If we have a new resource and source_dest_check is still true, don't modify + sourceDestCheck := d.Get("source_dest_check").(bool) - if instance.PrivateDnsNameOptions != nil { - if err := d.Set("private_dns_name_options", []any{flattenPrivateDNSNameOptionsResponse(instance.PrivateDnsNameOptions)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting private_dns_name_options: %s", err) - } - } else { - d.Set("private_dns_name_options", nil) - } + // Because we're calling Update prior to Read, and the default value of `source_dest_check` is `true`, + // HasChange() thinks there is a diff between what is set on the instance and what is set in state. We need to ensure that + // if a diff has occurred, it's not because it's a new instance. + if d.HasChange("source_dest_check") && !d.IsNewResource() || d.IsNewResource() && !sourceDestCheck { + input := ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String(d.Id()), + SourceDestCheck: &awstypes.AttributeBooleanValue{ + Value: aws.Bool(sourceDestCheck), + }, + } - d.Set("ami", instance.ImageId) - d.Set(names.AttrInstanceType, instanceType) - d.Set("key_name", instance.KeyName) - d.Set("public_dns", instance.PublicDnsName) - d.Set("public_ip", instance.PublicIpAddress) - d.Set("private_dns", instance.PrivateDnsName) - d.Set("private_ip", instance.PrivateIpAddress) - d.Set("outpost_arn", instance.OutpostArn) + _, err := conn.ModifyInstanceAttribute(ctx, &input) - if instance.IamInstanceProfile != nil && instance.IamInstanceProfile.Arn != nil { - name, err := instanceProfileARNToName(aws.ToString(instance.IamInstanceProfile.Arn)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) SourceDestCheck attribute: %s", d.Id(), err) + } + } + } + if d.HasChange("enable_primary_ipv6") && !d.IsNewResource() { + instance, err := FindInstanceByID(ctx, conn, d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "setting iam_instance_profile: %s", err) + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) } - d.Set("iam_instance_profile", name) - } else { - d.Set("iam_instance_profile", nil) - } + var primaryInterface *awstypes.InstanceNetworkInterface + for _, ni := range instance.NetworkInterfaces { + if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { + primaryInterface = &ni + } + } - { - launchTemplate, err := flattenInstanceLaunchTemplate(ctx, conn, d.Id(), d.Get("launch_template.0.version").(string)) + if primaryInterface == nil { + return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s), enable_primary_ipv6, which does not contain a primary network interface", d.Id()) + } - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) launch template: %s", d.Id(), err) + enablePrimaryIpv6 := d.Get("enable_primary_ipv6").(bool) + + input := ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: primaryInterface.NetworkInterfaceId, + EnablePrimaryIpv6: aws.Bool(enablePrimaryIpv6), } - if err := d.Set(names.AttrLaunchTemplate, launchTemplate); err != nil { - return sdkdiag.AppendErrorf(diags, "setting launch_template: %s", err) + _, err = conn.ModifyNetworkInterfaceAttribute(ctx, &input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) primary network interface: %s", d.Id(), err) } } - // Set configured Network Interface Device Index Slice - // We only want to read, and populate state for the configured network_interface attachments. Otherwise, other - // resources have the potential to attach network interfaces to the instance, and cause a perpetual create/destroy - // diff. We should only read on changes configured for this specific resource because of this. - var configuredDeviceIndexes []int - if v, ok := d.GetOk("network_interface"); ok { - vL := v.(*schema.Set).List() - for _, vi := range vL { - mVi := vi.(map[string]any) - configuredDeviceIndexes = append(configuredDeviceIndexes, mVi["device_index"].(int)) + if d.HasChange("ipv6_address_count") && !d.IsNewResource() { + instance, err := findInstanceByID(ctx, conn, d.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) } - } - var secondaryPrivateIPs []string - var ipv6Addresses []string - if len(instance.NetworkInterfaces) > 0 { - var primaryNetworkInterface awstypes.InstanceNetworkInterface - var networkInterfaces []map[string]any - for _, iNi := range instance.NetworkInterfaces { - ni := make(map[string]any) - if aws.ToInt32(iNi.Attachment.DeviceIndex) == 0 { - primaryNetworkInterface = iNi - } - // If the attached network device is inside our configuration, refresh state with values found. - // Otherwise, assume the network device was attached via an outside resource. - for _, index := range configuredDeviceIndexes { - if index == int(aws.ToInt32(iNi.Attachment.DeviceIndex)) { - ni["device_index"] = aws.ToInt32(iNi.Attachment.DeviceIndex) - ni["network_card_index"] = aws.ToInt32(iNi.Attachment.NetworkCardIndex) - ni[names.AttrNetworkInterfaceID] = aws.ToString(iNi.NetworkInterfaceId) - ni[names.AttrDeleteOnTermination] = aws.ToBool(iNi.Attachment.DeleteOnTermination) - } - } - // Don't add empty network interfaces to schema - if len(ni) == 0 { - continue + var primaryInterface awstypes.InstanceNetworkInterface + for _, ni := range instance.NetworkInterfaces { + if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { + primaryInterface = ni } - networkInterfaces = append(networkInterfaces, ni) } - if err := d.Set("network_interface", networkInterfaces); err != nil { - return sdkdiag.AppendErrorf(diags, "setting network_interfaces: %v", err) + + if primaryInterface.NetworkInterfaceId == nil { + return sdkdiag.AppendErrorf(diags, "Failed to update ipv6_address_count on %q, which does not contain a primary network interface", d.Id()) } - // Set primary network interface details - // If an instance is shutting down, network interfaces are detached, and attributes may be nil, - // need to protect against nil pointer dereferences - if primaryNetworkInterface.NetworkInterfaceId != nil { - if primaryNetworkInterface.SubnetId != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check - d.Set(names.AttrSubnetID, primaryNetworkInterface.SubnetId) - } - if primaryNetworkInterface.NetworkInterfaceId != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check - d.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId) - } - d.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses)) - if primaryNetworkInterface.SourceDestCheck != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check - d.Set("source_dest_check", primaryNetworkInterface.SourceDestCheck) - } - - d.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil) - - for _, address := range primaryNetworkInterface.PrivateIpAddresses { - if !aws.ToBool(address.Primary) { - secondaryPrivateIPs = append(secondaryPrivateIPs, aws.ToString(address.PrivateIpAddress)) - } - } - - for _, address := range primaryNetworkInterface.Ipv6Addresses { - ipv6Addresses = append(ipv6Addresses, aws.ToString(address.Ipv6Address)) - } - - if len(primaryNetworkInterface.Ipv6Addresses) > 0 { - if err := d.Set("enable_primary_ipv6", primaryNetworkInterface.Ipv6Addresses[0].IsPrimaryIpv6); err != nil { - return sdkdiag.AppendErrorf(diags, "setting enable_primary_ipv6: %s", err) - } - } - } - } else { - d.Set("associate_public_ip_address", instance.PublicIpAddress != nil) - d.Set("ipv6_address_count", 0) - d.Set("primary_network_interface_id", "") - d.Set(names.AttrSubnetID, instance.SubnetId) - } - - if err := d.Set("secondary_private_ips", secondaryPrivateIPs); err != nil { - return sdkdiag.AppendErrorf(diags, "setting private_ips for AWS Instance (%s): %s", d.Id(), err) - } - - if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil { - log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err) - } - - d.Set("ebs_optimized", instance.EbsOptimized) - if aws.ToString(instance.SubnetId) != "" { - d.Set("source_dest_check", instance.SourceDestCheck) - } - - if instance.Monitoring != nil && instance.Monitoring.State != "" { - monitoringState := instance.Monitoring.State - d.Set("monitoring", monitoringState == awstypes.MonitoringStateEnabled || monitoringState == awstypes.MonitoringStatePending) - } - - setTagsOut(ctx, instance.Tags) - - if _, ok := d.GetOk("volume_tags"); ok && !blockDeviceTagsDefined(d) { - volumeTags, err := readVolumeTags(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) - tags := keyValueTags(ctx, volumeTags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - - if err := d.Set("volume_tags", tags.ResolveDuplicates(ctx, defaultTagsConfig, ignoreTagsConfig, d, "volume_tags", nil).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting volume_tags: %s", err) - } - } - - if err := readSecurityGroups(ctx, d, instance, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - // Retrieve instance shutdown behavior - if err := readInstanceShutdownBehavior(ctx, d, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - if err := readBlockDevices(ctx, d, meta, instance, false); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - if _, ok := d.GetOk("ephemeral_block_device"); !ok { - d.Set("ephemeral_block_device", []any{}) - } - - // ARN - - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Region: meta.(*conns.AWSClient).Region(ctx), - Service: names.EC2, - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("instance/%s", d.Id()), - } - d.Set(names.AttrARN, arn.String()) - - // Instance attributes - { - input := ec2.DescribeInstanceAttributeInput{ - Attribute: awstypes.InstanceAttributeNameDisableApiStop, - InstanceId: aws.String(d.Id()), - } - attr, err := conn.DescribeInstanceAttribute(ctx, &input) - if err != nil && !errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { - return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiStop, err) - } - if !errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { - d.Set("disable_api_stop", attr.DisableApiStop.Value) - } - } - { - if isSnowballEdgeInstance(d.Id()) { - log.Printf("[INFO] Determined deploying to Snowball Edge based off Instance ID %s. Skip setting the 'disable_api_termination' attribute.", d.Id()) - } else { - input := ec2.DescribeInstanceAttributeInput{ - Attribute: awstypes.InstanceAttributeNameDisableApiTermination, - InstanceId: aws.String(d.Id()), - } - output, err := conn.DescribeInstanceAttribute(ctx, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiTermination, err) - } - - d.Set("disable_api_termination", output.DisableApiTermination.Value) - } - } - { - input := ec2.DescribeInstanceAttributeInput{ - Attribute: awstypes.InstanceAttributeNameUserData, - InstanceId: aws.String(d.Id()), - } - attr, err := conn.DescribeInstanceAttribute(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameUserData, err) - } - if attr.UserData != nil && attr.UserData.Value != nil { - // Since user_data and user_data_base64 conflict with each other, - // we'll only set one or the other here to avoid a perma-diff. - // Since user_data_base64 was added later, we'll prefer to set - // user_data. - _, b64 := d.GetOk("user_data_base64") - if b64 { - d.Set("user_data_base64", attr.UserData.Value) - } else { - data, err := itypes.Base64Decode(aws.ToString(attr.UserData.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "decoding user_data: %s", err) - } - d.Set("user_data", string(data)) - } - } - } - - // AWS Standard will return InstanceCreditSpecification.NotSupported errors for EC2 Instance IDs outside T2 and T3 instance types - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/8055 - if aws.ToBool(instanceTypeInfo.BurstablePerformanceSupported) { - instanceCreditSpecification, err := findInstanceCreditSpecificationByID(ctx, conn, d.Id()) - - // Ignore UnsupportedOperation errors for AWS China and GovCloud (US). - // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/4362. - if tfawserr.ErrCodeEquals(err, errCodeUnsupportedOperation) { - err = nil - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) credit specification: %s", d.Id(), err) - } - - if instanceCreditSpecification != nil { - if err := d.Set("credit_specification", []any{flattenInstanceCreditSpecification(instanceCreditSpecification)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting credit_specification: %s", err) - } - } else { - d.Set("credit_specification", nil) - } - } - - if d.Get("get_password_data").(bool) { - passwordData, err := getInstancePasswordData(ctx, aws.ToString(instance.InstanceId), conn, d.Timeout(schema.TimeoutRead)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - d.Set("password_data", passwordData) - } else { - d.Set("get_password_data", false) - d.Set("password_data", nil) - } - - if instance.CapacityReservationSpecification != nil { - if err := d.Set("capacity_reservation_specification", []any{flattenCapacityReservationSpecificationResponse(instance.CapacityReservationSpecification)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting capacity_reservation_specification: %s", err) - } - } else { - d.Set("capacity_reservation_specification", nil) - } - - if spotInstanceRequestID := aws.ToString(instance.SpotInstanceRequestId); spotInstanceRequestID != "" && instance.InstanceLifecycle != "" { - d.Set("instance_lifecycle", instance.InstanceLifecycle) - d.Set("spot_instance_request_id", spotInstanceRequestID) - - input := ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []string{spotInstanceRequestID}, - } - - apiObject, err := findSpotInstanceRequest(ctx, conn, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Spot Instance Request (%s): %s", spotInstanceRequestID, err) - } - - tfMap := map[string]any{ - "instance_interruption_behavior": apiObject.InstanceInterruptionBehavior, - "spot_instance_type": apiObject.Type, - } - - if v := apiObject.SpotPrice; v != nil { - tfMap["max_price"] = aws.ToString(v) - } - - if v := apiObject.ValidUntil; v != nil { - tfMap["valid_until"] = aws.ToTime(v).Format(time.RFC3339) - } - - if err := d.Set("instance_market_options", []any{map[string]any{ - "market_type": awstypes.MarketTypeSpot, - "spot_options": []any{tfMap}, - }}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting instance_market_options: %s", err) - } - } else { - d.Set("instance_lifecycle", nil) - d.Set("instance_market_options", nil) - d.Set("spot_instance_request_id", nil) - } - - return diags -} - -func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) - - if d.HasChange("volume_tags") && !d.IsNewResource() { - volIDs, err := getInstanceVolIDs(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - - o, n := d.GetChange("volume_tags") - - for _, volID := range volIDs { - if err := updateTags(ctx, conn, volID, o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating volume_tags (%s): %s", volID, err) - } - } - } - - if d.HasChange("iam_instance_profile") && !d.IsNewResource() { - input := ec2.DescribeIamInstanceProfileAssociationsInput{ - Filters: []awstypes.Filter{ - { - Name: aws.String("instance-id"), - Values: []string{d.Id()}, - }, - }, - } - - resp, err := conn.DescribeIamInstanceProfileAssociations(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - - // An Iam Instance Profile has been provided and is pending a change - // This means it is an association or a replacement to an association - if _, ok := d.GetOk("iam_instance_profile"); ok { - // Does not have an Iam Instance Profile associated with it, need to associate - if len(resp.IamInstanceProfileAssociations) == 0 { - if err := associateInstanceProfile(ctx, d, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - } else { - // Has an Iam Instance Profile associated with it, need to replace the association - associationId := resp.IamInstanceProfileAssociations[0].AssociationId - input := ec2.ReplaceIamInstanceProfileAssociationInput{ - AssociationId: associationId, - IamInstanceProfile: &awstypes.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - }, - } - - // If the instance is running, we can replace the instance profile association. - // If it is stopped, the association must be removed and the new one attached separately. (GH-8262) - instanceState := awstypes.InstanceStateName(d.Get("instance_state").(string)) - - if instanceState != "" { - if instanceState == awstypes.InstanceStateNameStopped || instanceState == awstypes.InstanceStateNameStopping || instanceState == awstypes.InstanceStateNameShuttingDown { - if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - if err := associateInstanceProfile(ctx, d, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - } else { - err := retry.RetryContext(ctx, iamPropagationTimeout, func() *retry.RetryError { - _, err := conn.ReplaceIamInstanceProfileAssociation(ctx, &input) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { - return retry.RetryableError(err) - } - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.ReplaceIamInstanceProfileAssociation(ctx, &input) - } - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): replacing instance profile: %s", d.Id(), err) - } - } - } - } - // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal - } else { - if len(resp.IamInstanceProfileAssociations) > 0 { - // Has an Iam Instance Profile associated with it, need to remove the association - associationId := resp.IamInstanceProfileAssociations[0].AssociationId - if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - } - } - - if _, err := waitInstanceIAMInstanceProfileUpdated(ctx, conn, d.Id(), d.Get("iam_instance_profile").(string)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EC2 Instance (%s) IAM Instance Profile update: %s", d.Id(), err) - } - } - - // SourceDestCheck can only be modified on an instance without manually specified network interfaces. - // SourceDestCheck, in that case, is configured at the network interface level - if _, ok := d.GetOk("network_interface"); !ok { - // If we have a new resource and source_dest_check is still true, don't modify - sourceDestCheck := d.Get("source_dest_check").(bool) - - // Because we're calling Update prior to Read, and the default value of `source_dest_check` is `true`, - // HasChange() thinks there is a diff between what is set on the instance and what is set in state. We need to ensure that - // if a diff has occurred, it's not because it's a new instance. - if d.HasChange("source_dest_check") && !d.IsNewResource() || d.IsNewResource() && !sourceDestCheck { - input := ec2.ModifyInstanceAttributeInput{ - InstanceId: aws.String(d.Id()), - SourceDestCheck: &awstypes.AttributeBooleanValue{ - Value: aws.Bool(sourceDestCheck), - }, - } - - _, err := conn.ModifyInstanceAttribute(ctx, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) SourceDestCheck attribute: %s", d.Id(), err) - } - } - } - - if d.HasChange("enable_primary_ipv6") && !d.IsNewResource() { - instance, err := FindInstanceByID(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - var primaryInterface *awstypes.InstanceNetworkInterface - for _, ni := range instance.NetworkInterfaces { - if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { - primaryInterface = &ni - } - } - - if primaryInterface == nil { - return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s), enable_primary_ipv6, which does not contain a primary network interface", d.Id()) - } - - enablePrimaryIpv6 := d.Get("enable_primary_ipv6").(bool) - - input := ec2.ModifyNetworkInterfaceAttributeInput{ - NetworkInterfaceId: primaryInterface.NetworkInterfaceId, - EnablePrimaryIpv6: aws.Bool(enablePrimaryIpv6), - } - - _, err = conn.ModifyNetworkInterfaceAttribute(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) primary network interface: %s", d.Id(), err) - } - } - - if d.HasChange("ipv6_address_count") && !d.IsNewResource() { - instance, err := findInstanceByID(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - var primaryInterface awstypes.InstanceNetworkInterface - for _, ni := range instance.NetworkInterfaces { - if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { - primaryInterface = ni - } - } - - if primaryInterface.NetworkInterfaceId == nil { - return sdkdiag.AppendErrorf(diags, "Failed to update ipv6_address_count on %q, which does not contain a primary network interface", d.Id()) - } - - o, n := d.GetChange("ipv6_address_count") - os, ns := o.(int), n.(int) + o, n := d.GetChange("ipv6_address_count") + os, ns := o.(int), n.(int) if ns > os { // Add more to the primary NIC. @@ -2223,12 +1926,12 @@ func resourceInstanceDelete(ctx context.Context, d *schema.ResourceData, meta an var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - if err := disableInstanceAPITermination(ctx, conn, d.Id(), false); err != nil { - log.Printf("[WARN] attempting to terminate EC2 Instance (%s) despite error disabling API termination: %s", d.Id(), err) - } + if d.Get(names.AttrForceDestroy).(bool) { + if err := disableInstanceAPITermination(ctx, conn, d.Id(), false); err != nil { + log.Printf("[WARN] attempting to terminate EC2 Instance (%s) despite error disabling API termination: %s", d.Id(), err) + } - if v, ok := d.GetOk("disable_api_stop"); ok { - if err := disableInstanceAPIStop(ctx, conn, d.Id(), v.(bool)); err != nil { + if err := disableInstanceAPIStop(ctx, conn, d.Id(), false); err != nil { log.Printf("[WARN] attempting to terminate EC2 Instance (%s) despite error disabling API stop: %s", d.Id(), err) } } @@ -2268,7 +1971,7 @@ func disableInstanceAPIStop(ctx context.Context, conn *ec2.Client, id string, di } if err != nil { - return fmt.Errorf("modifying EC2 Instance (%s) DisableApiStop attribute: %s", id, err) + return fmt.Errorf("modifying EC2 Instance (%s) DisableApiStop attribute: %w", id, err) } return nil @@ -2290,7 +1993,7 @@ func disableInstanceAPITermination(ctx context.Context, conn *ec2.Client, id str } if err != nil { - return fmt.Errorf("modifying EC2 Instance (%s) DisableApiTermination attribute: %s", id, err) + return fmt.Errorf("modifying EC2 Instance (%s) DisableApiTermination attribute: %w", id, err) } return nil @@ -2318,7 +2021,7 @@ func modifyInstanceAttributeWithStopStart(ctx context.Context, conn *ec2.Client, return nil } -func readBlockDevices(ctx context.Context, d *schema.ResourceData, meta any, instance *awstypes.Instance, ds bool) error { +func readBlockDevices(ctx context.Context, d *schema.ResourceData, meta *conns.AWSClient, instance *awstypes.Instance, ds bool) error { ibds, err := readBlockDevicesFromInstance(ctx, d, meta, instance, ds) if err != nil { return fmt.Errorf("reading block devices: %w", err) @@ -2368,7 +2071,7 @@ func readBlockDevices(ctx context.Context, d *schema.ResourceData, meta any, ins return nil } -func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, meta any, instance *awstypes.Instance, ds bool) (map[string]any, error) { +func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, meta *conns.AWSClient, instance *awstypes.Instance, ds bool) (map[string]any, error) { blockDevices := make(map[string]any) blockDevices["ebs"] = make([]map[string]any, 0) blockDevices["root"] = nil @@ -2392,7 +2095,7 @@ func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, m // Need to call DescribeVolumes to get volume_size and volume_type for each // EBS block device - conn := meta.(*conns.AWSClient).EC2Client(ctx) + conn := meta.EC2Client(ctx) input := ec2.DescribeVolumesInput{ VolumeIds: volIDs, } @@ -2401,8 +2104,8 @@ func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, m return nil, err } - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) + defaultTagsConfig := meta.DefaultTagsConfig(ctx) + ignoreTagsConfig := meta.IgnoreTagsConfig(ctx) for _, vol := range volResp.Volumes { instanceBd := instanceBlockDevices[aws.ToString(vol.VolumeId)] @@ -2501,22 +2204,21 @@ func associateInstanceProfile(ctx context.Context, d *schema.ResourceData, conn Name: aws.String(d.Get("iam_instance_profile").(string)), }, } - err := retry.RetryContext(ctx, iamPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, iamPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.AssociateIamInstanceProfile(ctx, &input) if err != nil { if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.AssociateIamInstanceProfile(ctx, &input) - } + if err != nil { - return fmt.Errorf("associating instance profile: %s", err) + return fmt.Errorf("associating instance profile: %w", err) } + return nil } @@ -2578,8 +2280,9 @@ func findRootDeviceName(ctx context.Context, conn *ec2.Client, amiID string) (*s return rootDeviceName, nil } -func buildNetworkInterfaceOpts(d *schema.ResourceData, groups []string, nInterfaces any) []awstypes.InstanceNetworkInterfaceSpecification { - networkInterfaces := []awstypes.InstanceNetworkInterfaceSpecification{} +func buildNetworkInterfaceOpts(d *schema.ResourceData, groups []string, nInterfaces any, primaryNetworkInterface any) []awstypes.InstanceNetworkInterfaceSpecification { + var networkInterfaces []awstypes.InstanceNetworkInterfaceSpecification + // Get necessary items subnet, hasSubnet := d.GetOk(names.AttrSubnetID) @@ -2631,19 +2334,29 @@ func buildNetworkInterfaceOpts(d *schema.ResourceData, groups []string, nInterfa } networkInterfaces = append(networkInterfaces, ni) - } else { + } else if nInterfaces != nil && nInterfaces.(*schema.Set).Len() > 0 { // If we have manually specified network interfaces, build and attach those here. - vL := nInterfaces.(*schema.Set).List() - for _, v := range vL { - ini := v.(map[string]any) - ni := awstypes.InstanceNetworkInterfaceSpecification{ - DeviceIndex: aws.Int32(int32(ini["device_index"].(int))), - NetworkCardIndex: aws.Int32(int32(ini["network_card_index"].(int))), - NetworkInterfaceId: aws.String(ini[names.AttrNetworkInterfaceID].(string)), - DeleteOnTermination: aws.Bool(ini[names.AttrDeleteOnTermination].(bool)), + tfList := nInterfaces.(*schema.Set).List() + for _, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]any) + apiObject := awstypes.InstanceNetworkInterfaceSpecification{ + DeleteOnTermination: aws.Bool(tfMap[names.AttrDeleteOnTermination].(bool)), + DeviceIndex: aws.Int32(int32(tfMap["device_index"].(int))), + NetworkInterfaceId: aws.String(tfMap[names.AttrNetworkInterfaceID].(string)), } - networkInterfaces = append(networkInterfaces, ni) + if v, ok := tfMap["network_card_index"]; ok && v != 0 { + apiObject.NetworkCardIndex = aws.Int32(int32(v.(int))) + } + networkInterfaces = append(networkInterfaces, apiObject) } + } else { + v := primaryNetworkInterface.([]any) + ini := v[0].(map[string]any) + ni := awstypes.InstanceNetworkInterfaceSpecification{ + DeviceIndex: aws.Int32(0), + NetworkInterfaceId: aws.String(ini[names.AttrNetworkInterfaceID].(string)), + } + networkInterfaces = append(networkInterfaces, ni) } return networkInterfaces @@ -2821,7 +2534,7 @@ func readBlockDeviceMappingsFromConfig(ctx context.Context, d *schema.ResourceDa func readVolumeTags(ctx context.Context, conn *ec2.Client, instanceId string) ([]awstypes.Tag, error) { volIDs, err := getInstanceVolIDs(ctx, conn, instanceId) if err != nil { - return nil, fmt.Errorf("getting tags for volumes (%s): %s", volIDs, err) + return nil, fmt.Errorf("getting tags for volumes (%s): %w", volIDs, err) } input := ec2.DescribeTagsInput{ @@ -2831,7 +2544,7 @@ func readVolumeTags(ctx context.Context, conn *ec2.Client, instanceId string) ([ } resp, err := conn.DescribeTags(ctx, &input) if err != nil { - return nil, fmt.Errorf("getting tags for volumes (%s): %s", volIDs, err) + return nil, fmt.Errorf("getting tags for volumes (%s): %w", volIDs, err) } return tagsFromTagDescriptions(resp.Tags), nil @@ -2917,16 +2630,16 @@ func getInstancePasswordData(ctx context.Context, instanceID string, conn *ec2.C input := ec2.GetPasswordDataInput{ InstanceId: aws.String(instanceID), } - err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error resp, err = conn.GetPasswordData(ctx, &input) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if resp.PasswordData == nil || aws.ToString(resp.PasswordData) == "" { - return retry.RetryableError(fmt.Errorf("Password data is blank for instance ID: %s", instanceID)) + return tfresource.RetryableError(fmt.Errorf("Password data is blank for instance ID: %s", instanceID)) } passwordData = strings.TrimSpace(aws.ToString(resp.PasswordData)) @@ -2934,16 +2647,7 @@ func getInstancePasswordData(ctx context.Context, instanceID string, conn *ec2.C log.Printf("[INFO] Password data read for instance %s", instanceID) return nil }) - if tfresource.TimedOut(err) { - resp, err = conn.GetPasswordData(ctx, &input) - if err != nil { - return "", fmt.Errorf("getting password data: %s", err) - } - if resp.PasswordData == nil || aws.ToString(resp.PasswordData) == "" { - return "", errors.New("password data is blank") - } - passwordData = strings.TrimSpace(aws.ToString(resp.PasswordData)) - } + if err != nil { return "", err } @@ -3007,7 +2711,7 @@ func buildInstanceOpts(ctx context.Context, d *schema.ResourceData, meta any) (* opts.InstanceType = awstypes.InstanceType(v.(string)) } - var instanceInterruptionBehavior string + var instanceInterruptionBehavior awstypes.InstanceInterruptionBehavior if v, ok := d.GetOk(names.AttrLaunchTemplate); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { launchTemplateSpecification := expandLaunchTemplateSpecification(v.([]any)[0].(map[string]any)) @@ -3020,7 +2724,7 @@ func buildInstanceOpts(ctx context.Context, d *schema.ResourceData, meta any) (* opts.LaunchTemplate = launchTemplateSpecification if launchTemplateData.InstanceMarketOptions != nil && launchTemplateData.InstanceMarketOptions.SpotOptions != nil { - instanceInterruptionBehavior = string(launchTemplateData.InstanceMarketOptions.SpotOptions.InstanceInterruptionBehavior) + instanceInterruptionBehavior = launchTemplateData.InstanceMarketOptions.SpotOptions.InstanceInterruptionBehavior } } @@ -3094,11 +2798,17 @@ func buildInstanceOpts(ctx context.Context, d *schema.ResourceData, meta any) (* AvailabilityZone: aws.String(d.Get(names.AttrAvailabilityZone).(string)), } - if v, ok := d.GetOk("placement_group"); ok && (instanceInterruptionBehavior == "" || instanceInterruptionBehavior == string(awstypes.InstanceInterruptionBehaviorTerminate)) { + if v, ok := d.GetOk("placement_group"); ok && (instanceInterruptionBehavior == "" || instanceInterruptionBehavior == awstypes.InstanceInterruptionBehaviorTerminate) { opts.Placement.GroupName = aws.String(v.(string)) opts.SpotPlacement.GroupName = aws.String(v.(string)) } + if v, ok := d.GetOk("placement_group_id"); ok && (instanceInterruptionBehavior == "" || instanceInterruptionBehavior == awstypes.InstanceInterruptionBehaviorTerminate) { + opts.Placement.GroupId = aws.String(v.(string)) + // AWS SDK missing groupID in type for spotplacement + // opts.SpotPlacement.GroupId = aws.String(v.(string)) + } + if v, ok := d.GetOk("tenancy"); ok { opts.Placement.Tenancy = awstypes.Tenancy(v.(string)) } @@ -3139,11 +2849,12 @@ func buildInstanceOpts(ctx context.Context, d *schema.ResourceData, meta any) (* _, privIP := d.GetOk("private_ip") _, secPrivIP := d.GetOk("secondary_private_ips") networkInterfaces, interfacesOk := d.GetOk("network_interface") + primaryNetworkInterface, primaryNetworkInterfaceOk := d.GetOk("primary_network_interface") // If setting subnet and public address, OR manual network interfaces, populate those now. - if (hasSubnet && (assocPubIPA || privIP || secPrivIP)) || interfacesOk { + if (hasSubnet && (assocPubIPA || privIP || secPrivIP)) || interfacesOk || primaryNetworkInterfaceOk { // Otherwise we're attaching (a) network interface(s) - opts.NetworkInterfaces = buildNetworkInterfaceOpts(d, groups, networkInterfaces) + opts.NetworkInterfaces = buildNetworkInterfaceOpts(d, groups, networkInterfaces, primaryNetworkInterface) } else { // If simply specifying a subnetID, privateIP, Security Groups, or VPC Security Groups, build these now if subnetID != "" { @@ -3227,7 +2938,7 @@ func startInstance(ctx context.Context, conn *ec2.Client, id string, retry bool, if retry { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16433. _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, ec2PropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.StartInstances(ctx, &ec2.StartInstancesInput{ InstanceIds: []string{id}, }) @@ -3298,215 +3009,467 @@ func terminateInstance(ctx context.Context, conn *ec2.Client, id string, timeout return nil } -func waitInstanceCreated(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.InstanceStateNamePending), - Target: enum.Slice(awstypes.InstanceStateNameRunning), - Refresh: statusInstance(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, +func userDataHashSum(userData string) string { + // Check whether the user_data is not Base64 encoded. + // Always calculate hash of base64 decoded value since we + // check against double-encoding when setting it. + v, err := itypes.Base64Decode(userData) + if err != nil { + v = []byte(userData) } - outputRaw, err := stateConf.WaitForStateContext(ctx) + hash := sha1.Sum(v) + return hex.EncodeToString(hash[:]) +} + +func getInstanceVolIDs(ctx context.Context, conn *ec2.Client, instanceId string) ([]string, error) { + volIDs := []string{} - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) - } + input := ec2.DescribeVolumesInput{ + Filters: newAttributeFilterList(map[string]string{ + "attachment.instance-id": instanceId, + }), + } + resp, err := conn.DescribeVolumes(ctx, &input) + if err != nil { + return nil, fmt.Errorf("getting volumes: %w", err) + } - return output, err + for _, v := range resp.Volumes { + volIDs = append(volIDs, aws.ToString(v.VolumeId)) } - return nil, err + return volIDs, nil } -func waitInstanceDeleted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - awstypes.InstanceStateNamePending, - awstypes.InstanceStateNameRunning, - awstypes.InstanceStateNameShuttingDown, - awstypes.InstanceStateNameStopping, - awstypes.InstanceStateNameStopped, - ), - Target: enum.Slice(awstypes.InstanceStateNameTerminated), - Refresh: statusInstance(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, +func getRootVolID(instance *awstypes.Instance) string { + volID := "" + for _, bd := range instance.BlockDeviceMappings { + if bd.Ebs != nil && blockDeviceIsRoot(bd, instance) { + if bd.Ebs.VolumeId != nil { + volID = aws.ToString(bd.Ebs.VolumeId) + } + break + } + } + + return volID +} + +func getVolIDByDeviceName(instance *awstypes.Instance, deviceName string) string { + volID := "" + for _, bd := range instance.BlockDeviceMappings { + if aws.ToString(bd.DeviceName) == deviceName { + if bd.Ebs != nil { + volID = aws.ToString(bd.Ebs.VolumeId) + break + } + } } - outputRaw, err := stateConf.WaitForStateContext(ctx) + return volID +} - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) +func blockDeviceTagsDefined(d *schema.ResourceData) bool { + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.([]any) + for _, v := range vL { + bd := v.(map[string]any) + if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { + return true + } } + } - return output, err + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]any) + if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { + return true + } + } } - return nil, err + return false } -func waitInstanceReady(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.InstanceStateNamePending, awstypes.InstanceStateNameStopping), - Target: enum.Slice(awstypes.InstanceStateNameRunning, awstypes.InstanceStateNameStopped), - Refresh: statusInstance(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, +func resourceInstanceFlatten(ctx context.Context, client *conns.AWSClient, instance *awstypes.Instance, rd *schema.ResourceData) diag.Diagnostics { + var diags diag.Diagnostics + + conn := client.EC2Client(ctx) + + instanceType := string(instance.InstanceType) + instanceTypeInfo, err := findInstanceTypeByName(ctx, conn, instanceType) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance Type (%s): %s", instanceType, err) + } + + rd.Set("instance_state", instance.State.Name) + + if v := instance.Placement; v != nil { + rd.Set(names.AttrAvailabilityZone, v.AvailabilityZone) + rd.Set("host_id", v.HostId) + if v := v.HostResourceGroupArn; v != nil { + rd.Set("host_resource_group_arn", instance.Placement.HostResourceGroupArn) + } + rd.Set("placement_group", v.GroupName) + rd.Set("placement_group_id", v.GroupId) + rd.Set("placement_partition_number", v.PartitionNumber) + rd.Set("tenancy", v.Tenancy) + } + + if err := rd.Set("cpu_options", flattenCPUOptions(instance.CpuOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting cpu_options: %s", err) + } + + if v := instance.HibernationOptions; v != nil { + rd.Set("hibernation", v.Configured) + } + + if err := rd.Set("enclave_options", flattenEnclaveOptions(instance.EnclaveOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting enclave_options: %s", err) + } + + if instance.MaintenanceOptions != nil { + if err := rd.Set("maintenance_options", []any{flattenInstanceMaintenanceOptions(instance.MaintenanceOptions)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting maintenance_options: %s", err) + } + } else { + rd.Set("maintenance_options", nil) + } + + if err := rd.Set("metadata_options", flattenInstanceMetadataOptions(instance.MetadataOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting metadata_options: %s", err) + } + + if instance.PrivateDnsNameOptions != nil { + if err := rd.Set("private_dns_name_options", []any{flattenPrivateDNSNameOptionsResponse(instance.PrivateDnsNameOptions)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting private_dns_name_options: %s", err) + } + } else { + rd.Set("private_dns_name_options", nil) + } + + rd.Set("ami", instance.ImageId) + rd.Set(names.AttrInstanceType, instanceType) + rd.Set("key_name", instance.KeyName) + rd.Set("public_dns", instance.PublicDnsName) + rd.Set("public_ip", instance.PublicIpAddress) + rd.Set("private_dns", instance.PrivateDnsName) + rd.Set("private_ip", instance.PrivateIpAddress) + rd.Set("outpost_arn", instance.OutpostArn) + + if instance.IamInstanceProfile != nil && instance.IamInstanceProfile.Arn != nil { + name, err := instanceProfileARNToName(aws.ToString(instance.IamInstanceProfile.Arn)) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting iam_instance_profile: %s", err) + } + + rd.Set("iam_instance_profile", name) + } else { + rd.Set("iam_instance_profile", nil) + } + + { + launchTemplate, err := flattenInstanceLaunchTemplate(ctx, conn, rd.Id(), rd.Get("launch_template.0.version").(string)) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) launch template: %s", rd.Id(), err) + } + + if err := rd.Set(names.AttrLaunchTemplate, launchTemplate); err != nil { + return sdkdiag.AppendErrorf(diags, "setting launch_template: %s", err) + } + } + + // Set configured Network Interface Device Index Slice + // We only want to read, and populate state for the configured network_interface attachments. Otherwise, other + // resources have the potential to attach network interfaces to the instance, and cause a perpetual create/destroy + // diff. We should only read on changes configured for this specific resource because of this. + var configuredDeviceIndexes []int + if v, ok := rd.GetOk("network_interface"); ok { + vL := v.(*schema.Set).List() + for _, vi := range vL { + mVi := vi.(map[string]any) + configuredDeviceIndexes = append(configuredDeviceIndexes, mVi["device_index"].(int)) + } + } + + var secondaryPrivateIPs []string + var ipv6Addresses []string + if len(instance.NetworkInterfaces) > 0 { + var primaryNetworkInterface awstypes.InstanceNetworkInterface + var networkInterfaces []map[string]any + for _, iNi := range instance.NetworkInterfaces { + ni := make(map[string]any) + if aws.ToInt32(iNi.Attachment.DeviceIndex) == 0 { + primaryNetworkInterface = iNi + } + // If the attached network device is inside our configuration, refresh state with values found. + // Otherwise, assume the network device was attached via an outside resource. + for _, index := range configuredDeviceIndexes { + if index == int(aws.ToInt32(iNi.Attachment.DeviceIndex)) { + ni[names.AttrDeleteOnTermination] = aws.ToBool(iNi.Attachment.DeleteOnTermination) + ni["device_index"] = aws.ToInt32(iNi.Attachment.DeviceIndex) + ni["network_card_index"] = aws.ToInt32(iNi.Attachment.NetworkCardIndex) + ni[names.AttrNetworkInterfaceID] = aws.ToString(iNi.NetworkInterfaceId) + } + } + // Don't add empty network interfaces to schema + if len(ni) == 0 { + continue + } + networkInterfaces = append(networkInterfaces, ni) + } + if err := rd.Set("network_interface", networkInterfaces); err != nil { + return sdkdiag.AppendErrorf(diags, "setting network_interfaces: %v", err) + } + + // Set primary network interface details + // If an instance is shutting down, network interfaces are detached, and attributes may be nil, + // need to protect against nil pointer dereferences + if primaryNetworkInterface.NetworkInterfaceId != nil { + pni := map[string]any{ + names.AttrNetworkInterfaceID: aws.ToString(primaryNetworkInterface.NetworkInterfaceId), + names.AttrDeleteOnTermination: aws.ToBool(primaryNetworkInterface.Attachment.DeleteOnTermination), + } + if err := rd.Set("primary_network_interface", []any{pni}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting primary_network_interface for AWS Instance (%s): %s", rd.Id(), err) + } + + rd.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId) + if primaryNetworkInterface.SubnetId != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check + rd.Set(names.AttrSubnetID, primaryNetworkInterface.SubnetId) + } + rd.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses)) + if primaryNetworkInterface.SourceDestCheck != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check + rd.Set("source_dest_check", primaryNetworkInterface.SourceDestCheck) + } + + rd.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil) + + for _, address := range primaryNetworkInterface.PrivateIpAddresses { + if !aws.ToBool(address.Primary) { + secondaryPrivateIPs = append(secondaryPrivateIPs, aws.ToString(address.PrivateIpAddress)) + } + } + + for _, address := range primaryNetworkInterface.Ipv6Addresses { + ipv6Addresses = append(ipv6Addresses, aws.ToString(address.Ipv6Address)) + } + + if len(primaryNetworkInterface.Ipv6Addresses) > 0 { + if err := rd.Set("enable_primary_ipv6", primaryNetworkInterface.Ipv6Addresses[0].IsPrimaryIpv6); err != nil { + return sdkdiag.AppendErrorf(diags, "setting enable_primary_ipv6: %s", err) + } + } + } + } else { + rd.Set("associate_public_ip_address", instance.PublicIpAddress != nil) + rd.Set("ipv6_address_count", 0) + rd.Set("primary_network_interface_id", "") + rd.Set(names.AttrSubnetID, instance.SubnetId) + } + + if err := rd.Set("secondary_private_ips", secondaryPrivateIPs); err != nil { + return sdkdiag.AppendErrorf(diags, "setting private_ips for AWS Instance (%s): %s", rd.Id(), err) + } + + if err := rd.Set("ipv6_addresses", ipv6Addresses); err != nil { + log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", rd.Id(), err) + } + + rd.Set("ebs_optimized", instance.EbsOptimized) + if aws.ToString(instance.SubnetId) != "" { + rd.Set("source_dest_check", instance.SourceDestCheck) + } + + if instance.Monitoring != nil && instance.Monitoring.State != "" { + monitoringState := instance.Monitoring.State + rd.Set("monitoring", monitoringState == awstypes.MonitoringStateEnabled || monitoringState == awstypes.MonitoringStatePending) } - outputRaw, err := stateConf.WaitForStateContext(ctx) + setTagsOut(ctx, instance.Tags) + if _, ok := rd.GetOk("volume_tags"); ok && !blockDeviceTagsDefined(rd) { + volumeTags, err := readVolumeTags(ctx, conn, rd.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } + + defaultTagsConfig := client.DefaultTagsConfig(ctx) + ignoreTagsConfig := client.IgnoreTagsConfig(ctx) + tags := keyValueTags(ctx, volumeTags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + if err := rd.Set("volume_tags", tags.ResolveDuplicates(ctx, defaultTagsConfig, ignoreTagsConfig, rd, "volume_tags", nil).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting volume_tags: %s", err) } + } + + if err := readSecurityGroups(ctx, rd, instance, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } - return output, err + // Retrieve instance shutdown behavior + if err := readInstanceShutdownBehavior(ctx, rd, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) } - return nil, err -} + if err := readBlockDevices(ctx, rd, client, instance, false); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } -func waitInstanceStarted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.InstanceStateNamePending, awstypes.InstanceStateNameStopped), - Target: enum.Slice(awstypes.InstanceStateNameRunning), - Refresh: statusInstance(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + if _, ok := rd.GetOk("ephemeral_block_device"); !ok { + rd.Set("ephemeral_block_device", []any{}) } - outputRaw, err := stateConf.WaitForStateContext(ctx) + // ARN - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) - } + rd.Set(names.AttrARN, instanceARN(ctx, client, rd.Id())) - return output, err + // Instance attributes + { + input := ec2.DescribeInstanceAttributeInput{ + Attribute: awstypes.InstanceAttributeNameDisableApiStop, + InstanceId: aws.String(rd.Id()), + } + attr, err := conn.DescribeInstanceAttribute(ctx, &input) + if err != nil && !errs.IsUnsupportedOperationInPartitionError(client.Partition(ctx), err) { + return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiStop, err) + } + if !errs.IsUnsupportedOperationInPartitionError(client.Partition(ctx), err) { + rd.Set("disable_api_stop", attr.DisableApiStop.Value) + } } + { + if isSnowballEdgeInstance(rd.Id()) { + log.Printf("[INFO] Determined deploying to Snowball Edge based off Instance ID %s. Skip setting the 'disable_api_termination' attribute.", rd.Id()) + } else { + input := ec2.DescribeInstanceAttributeInput{ + Attribute: awstypes.InstanceAttributeNameDisableApiTermination, + InstanceId: aws.String(rd.Id()), + } + output, err := conn.DescribeInstanceAttribute(ctx, &input) - return nil, err -} + if err != nil { + return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiTermination, err) + } -func waitInstanceStopped(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - awstypes.InstanceStateNamePending, - awstypes.InstanceStateNameRunning, - awstypes.InstanceStateNameShuttingDown, - awstypes.InstanceStateNameStopping, - ), - Target: enum.Slice(awstypes.InstanceStateNameStopped), - Refresh: statusInstance(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + rd.Set("disable_api_termination", output.DisableApiTermination.Value) + } } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + { + input := ec2.DescribeInstanceAttributeInput{ + Attribute: awstypes.InstanceAttributeNameUserData, + InstanceId: aws.String(rd.Id()), + } + attr, err := conn.DescribeInstanceAttribute(ctx, &input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameUserData, err) + } + if attr.UserData != nil && attr.UserData.Value != nil { + // Since user_data and user_data_base64 conflict with each other, + // we'll only set one or the other here to avoid a perma-diff. + // Since user_data_base64 was added later, we'll prefer to set + // user_data. + _, b64 := rd.GetOk("user_data_base64") + if b64 { + rd.Set("user_data_base64", attr.UserData.Value) + } else { + data, err := itypes.Base64Decode(aws.ToString(attr.UserData.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "decoding user_data: %s", err) + } + rd.Set("user_data", string(data)) + } } - - return output, err } - return nil, err -} - -func userDataHashSum(userData string) string { - // Check whether the user_data is not Base64 encoded. - // Always calculate hash of base64 decoded value since we - // check against double-encoding when setting it. - v, err := itypes.Base64Decode(userData) - if err != nil { - v = []byte(userData) - } + // AWS Standard will return InstanceCreditSpecification.NotSupported errors for EC2 Instance IDs outside T2 and T3 instance types + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/8055 + if aws.ToBool(instanceTypeInfo.BurstablePerformanceSupported) { + instanceCreditSpecification, err := findInstanceCreditSpecificationByID(ctx, conn, rd.Id()) - hash := sha1.Sum(v) - return hex.EncodeToString(hash[:]) -} + // Ignore UnsupportedOperation errors for AWS China and GovCloud (US). + // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/4362. + if tfawserr.ErrCodeEquals(err, errCodeUnsupportedOperation) { + err = nil + } -func getInstanceVolIDs(ctx context.Context, conn *ec2.Client, instanceId string) ([]string, error) { - volIDs := []string{} + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) credit specification: %s", rd.Id(), err) + } - input := ec2.DescribeVolumesInput{ - Filters: newAttributeFilterList(map[string]string{ - "attachment.instance-id": instanceId, - }), + if instanceCreditSpecification != nil { + if err := rd.Set("credit_specification", []any{flattenInstanceCreditSpecification(instanceCreditSpecification)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting credit_specification: %s", err) + } + } else { + rd.Set("credit_specification", nil) + } } - resp, err := conn.DescribeVolumes(ctx, &input) - if err != nil { - return nil, fmt.Errorf("getting volumes: %s", err) + + if rd.Get("get_password_data").(bool) { + passwordData, err := getInstancePasswordData(ctx, aws.ToString(instance.InstanceId), conn, rd.Timeout(schema.TimeoutRead)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } + rd.Set("password_data", passwordData) + } else { + rd.Set("get_password_data", false) + rd.Set("password_data", nil) } - for _, v := range resp.Volumes { - volIDs = append(volIDs, aws.ToString(v.VolumeId)) + if instance.CapacityReservationSpecification != nil { + if err := rd.Set("capacity_reservation_specification", []any{flattenCapacityReservationSpecificationResponse(instance.CapacityReservationSpecification)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting capacity_reservation_specification: %s", err) + } + } else { + rd.Set("capacity_reservation_specification", nil) } - return volIDs, nil -} + if spotInstanceRequestID := aws.ToString(instance.SpotInstanceRequestId); spotInstanceRequestID != "" && instance.InstanceLifecycle != "" { + rd.Set("instance_lifecycle", instance.InstanceLifecycle) + rd.Set("spot_instance_request_id", spotInstanceRequestID) -func getRootVolID(instance *awstypes.Instance) string { - volID := "" - for _, bd := range instance.BlockDeviceMappings { - if bd.Ebs != nil && blockDeviceIsRoot(bd, instance) { - if bd.Ebs.VolumeId != nil { - volID = aws.ToString(bd.Ebs.VolumeId) - } - break + input := ec2.DescribeSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []string{spotInstanceRequestID}, } - } - return volID -} + apiObject, err := findSpotInstanceRequest(ctx, conn, &input) -func getVolIDByDeviceName(instance *awstypes.Instance, deviceName string) string { - volID := "" - for _, bd := range instance.BlockDeviceMappings { - if aws.ToString(bd.DeviceName) == deviceName { - if bd.Ebs != nil { - volID = aws.ToString(bd.Ebs.VolumeId) - break - } + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Spot Instance Request (%s): %s", spotInstanceRequestID, err) } - } - return volID -} + tfMap := map[string]any{ + "instance_interruption_behavior": apiObject.InstanceInterruptionBehavior, + "spot_instance_type": apiObject.Type, + } -func blockDeviceTagsDefined(d *schema.ResourceData) bool { - if v, ok := d.GetOk("root_block_device"); ok { - vL := v.([]any) - for _, v := range vL { - bd := v.(map[string]any) - if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { - return true - } + if v := apiObject.SpotPrice; v != nil { + tfMap["max_price"] = aws.ToString(v) } - } - if v, ok := d.GetOk("ebs_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]any) - if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { - return true - } + if v := apiObject.ValidUntil; v != nil { + tfMap["valid_until"] = aws.ToTime(v).Format(time.RFC3339) + } + + if err := rd.Set("instance_market_options", []any{map[string]any{ + "market_type": awstypes.MarketTypeSpot, + "spot_options": []any{tfMap}, + }}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting instance_market_options: %s", err) } + } else { + rd.Set("instance_lifecycle", nil) + rd.Set("instance_market_options", nil) + rd.Set("spot_instance_request_id", nil) } - return false + return diags } func expandInstanceMetadataOptions(l []any) *awstypes.InstanceMetadataOptionsRequest { @@ -3653,7 +3616,7 @@ func expandCapacityReservationSpecification(tfMap map[string]any) *awstypes.Capa apiObject.CapacityReservationPreference = awstypes.CapacityReservationPreference(v) } - if v, ok := tfMap["capacity_reservation_target"].([]any); ok && len(v) > 0 { + if v, ok := tfMap["capacity_reservation_target"].([]any); ok && len(v) > 0 && v[0] != nil { apiObject.CapacityReservationTarget = expandCapacityReservationTarget(v[0].(map[string]any)) } @@ -4000,43 +3963,6 @@ func findInstanceLaunchTemplateVersion(ctx context.Context, conn *ec2.Client, id return launchTemplateVersion, nil } -func findLaunchTemplateData(ctx context.Context, conn *ec2.Client, launchTemplateSpecification *awstypes.LaunchTemplateSpecification) (*awstypes.ResponseLaunchTemplateData, error) { - input := ec2.DescribeLaunchTemplateVersionsInput{} - - if v := aws.ToString(launchTemplateSpecification.LaunchTemplateId); v != "" { - input.LaunchTemplateId = aws.String(v) - } else if v := aws.ToString(launchTemplateSpecification.LaunchTemplateName); v != "" { - input.LaunchTemplateName = aws.String(v) - } - - var latestVersion bool - - if v := aws.ToString(launchTemplateSpecification.Version); v != "" { - switch v { - case launchTemplateVersionDefault: - input.Filters = newAttributeFilterList(map[string]string{ - "is-default-version": "true", - }) - case launchTemplateVersionLatest: - latestVersion = true - default: - input.Versions = []string{v} - } - } - - output, err := findLaunchTemplateVersions(ctx, conn, &input) - - if err != nil { - return nil, fmt.Errorf("reading EC2 Launch Template versions: %w", err) - } - - if latestVersion { - return output[len(output)-1].LaunchTemplateData, nil - } - - return output[0].LaunchTemplateData, nil -} - // findLaunchTemplateNameAndVersions returns the specified launch template's name, default version and latest version. func findLaunchTemplateNameAndVersions(ctx context.Context, conn *ec2.Client, id string) (string, string, string, error) { lt, err := findLaunchTemplateByID(ctx, conn, id) @@ -4045,31 +3971,7 @@ func findLaunchTemplateNameAndVersions(ctx context.Context, conn *ec2.Client, id return "", "", "", err } - return aws.ToString(lt.LaunchTemplateName), strconv.FormatInt(aws.ToInt64(lt.DefaultVersionNumber), 10), strconv.FormatInt(aws.ToInt64(lt.LatestVersionNumber), 10), nil -} - -func findInstanceTagValue(ctx context.Context, conn *ec2.Client, instanceID, tagKey string) (string, error) { - input := ec2.DescribeTagsInput{ - Filters: newAttributeFilterList(map[string]string{ - "resource-id": instanceID, - names.AttrKey: tagKey, - }), - } - - output, err := conn.DescribeTags(ctx, &input) - - if err != nil { - return "", err - } - - switch count := len(output.Tags); count { - case 0: - return "", nil - case 1: - return aws.ToString(output.Tags[0].Value), nil - default: - return "", tfresource.NewTooManyResultsError(count, input) - } + return aws.ToString(lt.LaunchTemplateName), flex.Int64ToStringValue(lt.DefaultVersionNumber), flex.Int64ToStringValue(lt.LatestVersionNumber), nil } // isSnowballEdgeInstance returns whether or not the specified instance ID indicates an SBE instance. @@ -4121,3 +4023,177 @@ func hasCommonElement(slice1 []awstypes.ArchitectureType, slice2 []awstypes.Arch } return false } + +func instanceARN(ctx context.Context, c *conns.AWSClient, instanceID string) string { + return c.RegionalARN(ctx, names.EC2, "instance/"+instanceID) +} + +var _ list.ListResourceWithRawV5Schemas = &instanceListResource{} + +type instanceListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type instanceListResourceModel struct { + framework.WithRegionModel + Filters customListFilters `tfsdk:"filter"` + IncludeAutoScaled types.Bool `tfsdk:"include_auto_scaled"` +} + +func (l *instanceListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{ + "include_auto_scaled": listschema.BoolAttribute{ + Description: "Whether to include instances that are part of an Auto Scaling group. Auto scaled instances are excluded by default.", + Optional: true, + }, + }, + Blocks: map[string]listschema.Block{ + names.AttrFilter: customListFiltersBlock(ctx), + }, + } +} + +func (l *instanceListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.EC2Client(ctx) + + var query instanceListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + var input ec2.DescribeInstancesInput + if diags := fwflex.Expand(ctx, query, &input); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + + // If no instance-state filter is set, default to all states except terminated and shutting-down + if !slices.ContainsFunc(input.Filters, func(i awstypes.Filter) bool { + return aws.ToString(i.Name) == "instance-state-name" || aws.ToString(i.Name) == "instance-state-code" + }) { + states := enum.Slice(slices.DeleteFunc(enum.EnumValues[awstypes.InstanceStateName](), func(s awstypes.InstanceStateName) bool { + return s == awstypes.InstanceStateNameTerminated || s == awstypes.InstanceStateNameShuttingDown + })...) + input.Filters = append(input.Filters, awstypes.Filter{ + Name: aws.String("instance-state-name"), + Values: states, + }) + } + + includeAutoScaled := query.IncludeAutoScaled.ValueBool() + + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + + for instance, err := range listInstances(ctx, conn, &input) { + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + tags := keyValueTags(ctx, instance.Tags) + + if !includeAutoScaled { + // Exclude Auto Scaled Instances + if v, ok := tags["aws:autoscaling:groupName"]; ok && v.ValueString() != "" { + continue + } + } + + rd := l.ResourceData() + rd.SetId(aws.ToString(instance.InstanceId)) + result.Diagnostics.Append(translateDiags(resourceInstanceFlatten(ctx, awsClient, &instance, rd))...) + if result.Diagnostics.HasError() { + yield(result) + return + } + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + if v, ok := tags["Name"]; ok { + result.DisplayName = fmt.Sprintf("%s (%s)", v.ValueString(), aws.ToString(instance.InstanceId)) + } else { + result.DisplayName = aws.ToString(instance.InstanceId) + } + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return + } + } + } +} + +func translateDiags(in diag.Diagnostics) frameworkdiag.Diagnostics { + out := make(frameworkdiag.Diagnostics, len(in)) + for i, diagIn := range in { + var diagOut frameworkdiag.Diagnostic + if diagIn.Severity == diag.Error { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewErrorDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeErrorDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } else { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewWarningDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeWarningDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } + out[i] = diagOut + } + return out +} + +func translatePath(in cty.Path) path.Path { + var out path.Path + + if len(in) == 0 { + return out + } + + step := in[0] + switch v := step.(type) { + case cty.GetAttrStep: + out = path.Root(v.Name) + } + + for i := 1; i < len(in); i++ { + step := in[i] + switch v := step.(type) { + case cty.GetAttrStep: + out = out.AtName(v.Name) + + case cty.IndexStep: + switch v.Key.Type() { + case cty.Number: + v, _ := v.Key.AsBigFloat().Int64() + out = out.AtListIndex(int(v)) + case cty.String: + out = out.AtMapKey(v.Key.AsString()) + } + } + } + + return out +} diff --git a/internal/service/ec2/ec2_instance_data_source.go b/internal/service/ec2/ec2_instance_data_source.go index 00943041fab6..630e4ec6073d 100644 --- a/internal/service/ec2/ec2_instance_data_source.go +++ b/internal/service/ec2/ec2_instance_data_source.go @@ -11,7 +11,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -276,6 +275,10 @@ func dataSourceInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "placement_group_id": { + Type: schema.TypeString, + Computed: true, + }, "placement_partition_number": { Type: schema.TypeInt, Computed: true, @@ -408,7 +411,8 @@ func dataSourceInstance() *schema.Resource { // dataSourceInstanceRead performs the instanceID lookup func dataSourceInstanceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) // Build up search parameters input := ec2.DescribeInstancesInput{} @@ -451,14 +455,7 @@ func dataSourceInstanceRead(ctx context.Context, d *schema.ResourceData, meta an } // ARN - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Region: meta.(*conns.AWSClient).Region(ctx), - Service: names.EC2, - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("instance/%s", d.Id()), - } - d.Set(names.AttrARN, arn.String()) + d.Set(names.AttrARN, instanceARN(ctx, c, d.Id())) return diags } @@ -477,13 +474,15 @@ func instanceDescriptionAttributes(ctx context.Context, d *schema.ResourceData, // Set the easy attributes d.Set("instance_state", instance.State.Name) - d.Set(names.AttrAvailabilityZone, instance.Placement.AvailabilityZone) - d.Set("placement_group", instance.Placement.GroupName) - d.Set("placement_partition_number", instance.Placement.PartitionNumber) - d.Set("tenancy", instance.Placement.Tenancy) - d.Set("host_id", instance.Placement.HostId) - d.Set("host_resource_group_arn", instance.Placement.HostResourceGroupArn) - + if v := instance.Placement; v != nil { + d.Set(names.AttrAvailabilityZone, v.AvailabilityZone) + d.Set("host_id", v.HostId) + d.Set("host_resource_group_arn", v.HostResourceGroupArn) + d.Set("placement_group", v.GroupName) + d.Set("placement_group_id", v.GroupId) + d.Set("placement_partition_number", v.PartitionNumber) + d.Set("tenancy", v.Tenancy) + } d.Set("ami", instance.ImageId) d.Set(names.AttrInstanceType, instanceType) d.Set("key_name", instance.KeyName) @@ -556,7 +555,7 @@ func instanceDescriptionAttributes(ctx context.Context, d *schema.ResourceData, } // Block devices - if err := readBlockDevices(ctx, d, meta, instance, true); err != nil { + if err := readBlockDevices(ctx, d, meta.(*conns.AWSClient), instance, true); err != nil { return fmt.Errorf("reading EC2 Instance (%s): %w", aws.ToString(instance.InstanceId), err) } if _, ok := d.GetOk("ephemeral_block_device"); !ok { diff --git a/internal/service/ec2/ec2_instance_data_source_tags_gen_test.go b/internal/service/ec2/ec2_instance_data_source_tags_gen_test.go index 97ded40f47a6..de9fef100947 100644 --- a/internal/service/ec2/ec2_instance_data_source_tags_gen_test.go +++ b/internal/service/ec2/ec2_instance_data_source_tags_gen_test.go @@ -21,9 +21,10 @@ import ( func TestAccEC2InstanceDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -47,9 +48,10 @@ func TestAccEC2InstanceDataSource_tags(t *testing.T) { func TestAccEC2InstanceDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,9 +71,10 @@ func TestAccEC2InstanceDataSource_tags_NullMap(t *testing.T) { func TestAccEC2InstanceDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -91,9 +94,10 @@ func TestAccEC2InstanceDataSource_tags_EmptyMap(t *testing.T) { func TestAccEC2InstanceDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -121,9 +125,10 @@ func TestAccEC2InstanceDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccEC2InstanceDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -157,9 +162,10 @@ func TestAccEC2InstanceDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing. func TestAccEC2InstanceDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/ec2/ec2_instance_identity_gen_test.go b/internal/service/ec2/ec2_instance_identity_gen_test.go new file mode 100644 index 000000000000..17affa6948db --- /dev/null +++ b/internal/service/ec2/ec2_instance_identity_gen_test.go @@ -0,0 +1,290 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEC2Instance_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Instance + resourceName := "aws_instance.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "user_data_replace_on_change", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccEC2Instance_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_instance.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "user_data_replace_on_change", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccEC2Instance_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Instance + resourceName := "aws_instance.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic_v6.10.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccEC2Instance_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Instance + resourceName := "aws_instance.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic_v6.10.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/ec2_instance_list_test.go b/internal/service/ec2/ec2_instance_list_test.go new file mode 100644 index 000000000000..d6dfdb7265df --- /dev/null +++ b/internal/service/ec2/ec2_instance_list_test.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEC2Instance_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_instance.test[0]" + resourceName2 := "aws_instance.test[1]" + resourceName3 := "aws_instance.test[2]" + + var id1, id2, id3 string + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_basic/"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("aws_instance.test.0", names.AttrID, getter(&id1)), + resource.TestCheckResourceAttrWith("aws_instance.test.1", names.AttrID, getter(&id2)), + resource.TestCheckResourceAttrWith("aws_instance.test.2", names.AttrID, getter(&id3)), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectRegionalARNFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_basic/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: tfknownvalue.StringPtrExact(&id1), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: tfknownvalue.StringPtrExact(&id2), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: tfknownvalue.StringPtrExact(&id3), + }), + }, + }, + }, + }) +} + +func TestAccEC2Instance_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_instance.test[0]" + resourceName2 := "aws_instance.test[1]" + resourceName3 := "aws_instance.test[2]" + + var id1, id2, id3 string + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("aws_instance.test.0", names.AttrID, getter(&id1)), + resource.TestCheckResourceAttrWith("aws_instance.test.1", names.AttrID, getter(&id2)), + resource.TestCheckResourceAttrWith("aws_instance.test.2", names.AttrID, getter(&id3)), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: tfknownvalue.StringPtrExact(&id1), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: tfknownvalue.StringPtrExact(&id2), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: tfknownvalue.StringPtrExact(&id3), + }), + }, + }, + }, + }) +} + +func TestAccEC2Instance_List_Filtered(t *testing.T) { + ctx := acctest.Context(t) + + resourceNameExpected1 := "aws_instance.expected[0]" + resourceNameExpected2 := "aws_instance.expected[1]" + resourceNameNotExpected1 := "aws_instance.not_expected[0]" + resourceNameNotExpected2 := "aws_instance.not_expected[1]" + + var id1, id2 string + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_filtered/"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("aws_instance.expected.0", names.AttrID, getter(&id1)), + resource.TestCheckResourceAttrWith("aws_instance.expected.1", names.AttrID, getter(&id2)), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_filtered/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: tfknownvalue.StringPtrExact(&id1), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: tfknownvalue.StringPtrExact(&id2), + }), + }, + }, + }, + }) +} + +func TestAccEC2Instance_List_ExcludeAutoScaled(t *testing.T) { + t.Skip("Skipping because zero-result queries cause a failure now") + + ctx := acctest.Context(t) + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_exclude_autoscaled/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{}, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_exclude_autoscaled/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectLength("aws_instance.excluded", 0), + + querycheck.ExpectLength("aws_instance.included", 1), + }, + }, + }, + }) +} + +// TODO: Temporary until there is more testing support +func getter(s *string) resource.CheckResourceAttrWithFunc { + return func(v string) error { + *s = v + return nil + } +} diff --git a/internal/service/ec2/ec2_instance_metadata_defaults.go b/internal/service/ec2/ec2_instance_metadata_defaults.go index 4a20d23bbd14..92bf42b35bbc 100644 --- a/internal/service/ec2/ec2_instance_metadata_defaults.go +++ b/internal/service/ec2/ec2_instance_metadata_defaults.go @@ -216,21 +216,6 @@ func (r *instanceMetadataDefaultsResource) Delete(ctx context.Context, request r } } -func findInstanceMetadataDefaults(ctx context.Context, conn *ec2.Client) (*awstypes.InstanceMetadataDefaultsResponse, error) { - input := ec2.GetInstanceMetadataDefaultsInput{} - output, err := conn.GetInstanceMetadataDefaults(ctx, &input) - - if err != nil { - return nil, err - } - - if output == nil || output.AccountLevel == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.AccountLevel, nil -} - type instanceMetadataDefaultsResourceModel struct { framework.WithRegionModel HttpEndpoint fwtypes.StringEnum[awstypes.DefaultInstanceMetadataEndpointState] `tfsdk:"http_endpoint"` diff --git a/internal/service/ec2/ec2_instance_state_test.go b/internal/service/ec2/ec2_instance_state_test.go index 65836cb9d48f..0193749f7766 100644 --- a/internal/service/ec2/ec2_instance_state_test.go +++ b/internal/service/ec2/ec2_instance_state_test.go @@ -29,7 +29,7 @@ func TestAccEC2InstanceState_basic(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceStateConfig_basic(state, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(state, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), @@ -53,7 +53,7 @@ func TestAccEC2InstanceState_state(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceStateConfig_basic(stateStopped, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(stateStopped, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), @@ -66,7 +66,7 @@ func TestAccEC2InstanceState_state(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccInstanceStateConfig_basic(stateRunning, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(stateRunning, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), @@ -90,7 +90,7 @@ func TestAccEC2InstanceState_disappears_Instance(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceStateConfig_basic(state, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(state, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfec2.ResourceInstance(), parentResourceName), @@ -128,7 +128,7 @@ func testAccCheckInstanceStateExists(ctx context.Context, n string) resource.Tes } } -func testAccInstanceStateConfig_basic(state string, force string) string { +func testAccInstanceStateConfig_basic(state string, force bool) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), acctest.AvailableEC2InstanceTypeForRegion("t3.micro", "t2.micro", "t1.micro", "m1.small"), @@ -141,7 +141,7 @@ resource "aws_instance" "test" { resource "aws_ec2_instance_state" "test" { instance_id = aws_instance.test.id state = %[1]q - force = %[2]s + force = %[2]t } `, state, force)) } diff --git a/internal/service/ec2/ec2_instance_tags_gen_test.go b/internal/service/ec2/ec2_instance_tags_gen_test.go index aac10fe296a5..66fae8101217 100644 --- a/internal/service/ec2/ec2_instance_tags_gen_test.go +++ b/internal/service/ec2/ec2_instance_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccEC2Instance_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -203,10 +204,11 @@ func TestAccEC2Instance_tags(t *testing.T) { func TestAccEC2Instance_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -269,10 +271,11 @@ func TestAccEC2Instance_tags_null(t *testing.T) { func TestAccEC2Instance_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -331,10 +334,11 @@ func TestAccEC2Instance_tags_EmptyMap(t *testing.T) { func TestAccEC2Instance_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -411,10 +415,11 @@ func TestAccEC2Instance_tags_AddOnUpdate(t *testing.T) { func TestAccEC2Instance_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -501,10 +506,11 @@ func TestAccEC2Instance_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccEC2Instance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -638,10 +644,11 @@ func TestAccEC2Instance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccEC2Instance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -726,10 +733,11 @@ func TestAccEC2Instance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -910,10 +918,11 @@ func TestAccEC2Instance_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1072,10 +1081,11 @@ func TestAccEC2Instance_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1250,10 +1260,11 @@ func TestAccEC2Instance_tags_DefaultTags_overlapping(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1339,10 +1350,11 @@ func TestAccEC2Instance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1427,10 +1439,11 @@ func TestAccEC2Instance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1492,10 +1505,11 @@ func TestAccEC2Instance_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1549,10 +1563,11 @@ func TestAccEC2Instance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccEC2Instance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1611,10 +1626,11 @@ func TestAccEC2Instance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T func TestAccEC2Instance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1673,10 +1689,11 @@ func TestAccEC2Instance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testin func TestAccEC2Instance_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1728,10 +1745,11 @@ func TestAccEC2Instance_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccEC2Instance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1824,10 +1842,11 @@ func TestAccEC2Instance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccEC2Instance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -1910,10 +1929,11 @@ func TestAccEC2Instance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccEC2Instance_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), @@ -2068,10 +2088,11 @@ func TestAccEC2Instance_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccEC2Instance_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Instance resourceName := "aws_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckInstanceDestroy(ctx), diff --git a/internal/service/ec2/ec2_instance_test.go b/internal/service/ec2/ec2_instance_test.go index f2500dd8cbc4..8a352776ea12 100644 --- a/internal/service/ec2/ec2_instance_test.go +++ b/internal/service/ec2/ec2_instance_test.go @@ -21,6 +21,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" @@ -197,6 +198,17 @@ func TestAccEC2Instance_basic(t *testing.T) { acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ec2", regexache.MustCompile(`instance/i-[0-9a-z]+`)), resource.TestCheckResourceAttr(resourceName, "instance_initiated_shutdown_behavior", "stop"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(true), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + }, }, { ResourceName: resourceName, @@ -257,7 +269,7 @@ func TestAccEC2Instance_inDefaultVPCBySgName(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -285,7 +297,7 @@ func TestAccEC2Instance_inDefaultVPCBySgID(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -318,7 +330,7 @@ func TestAccEC2Instance_atLeastOneOtherEBSVolume(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, // We repeat the exact same test so that we can be sure // that the user data hash stuff is working without generating @@ -451,7 +463,7 @@ func TestAccEC2Instance_RootBlockDevice_kmsKeyARN(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -480,7 +492,7 @@ func TestAccEC2Instance_userDataBase64(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -515,7 +527,7 @@ func TestAccEC2Instance_userDataBase64_updateWithBashFile(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -550,7 +562,7 @@ func TestAccEC2Instance_userDataBase64_updateWithZipFile(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -586,7 +598,7 @@ func TestAccEC2Instance_userDataBase64_update(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -636,7 +648,7 @@ func TestAccEC2Instance_gp2IopsDevice(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -776,7 +788,7 @@ func TestAccEC2Instance_blockDevices(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ephemeral_block_device", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{"ephemeral_block_device", names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -807,7 +819,7 @@ func TestAccEC2Instance_rootInstanceStore(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -876,7 +888,7 @@ func TestAccEC2Instance_noAMIEphemeralDevices(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ephemeral_block_device", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{"ephemeral_block_device", names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -918,7 +930,7 @@ func TestAccEC2Instance_sourceDestCheck(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_sourceDestEnable(rName), @@ -962,7 +974,7 @@ func TestAccEC2Instance_autoRecovery(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_autoRecovery(rName, "disabled"), @@ -999,7 +1011,7 @@ func TestAccEC2Instance_disableAPIStop(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_disableAPIStop(rName, false), @@ -1008,6 +1020,13 @@ func TestAccEC2Instance_disableAPIStop(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "disable_api_stop", acctest.CtFalse), ), }, + { + Config: testAccInstanceConfig_disableAPIStop(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "disable_api_stop", acctest.CtTrue), + ), + }, }, }) } @@ -1035,7 +1054,7 @@ func TestAccEC2Instance_disableAPITerminationFinalFalse(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_disableAPITermination(rName, false), @@ -1065,13 +1084,14 @@ func TestAccEC2Instance_disableAPITerminationFinalTrue(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "disable_api_termination", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, names.AttrForceDestroy, acctest.CtTrue), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -1134,7 +1154,7 @@ func TestAccEC2Instance_outpost(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -1169,6 +1189,35 @@ func TestAccEC2Instance_placementGroup(t *testing.T) { }) } +func TestAccEC2Instance_placementGroupID(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Instance + resourceName := "aws_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_placementGroupID(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrSet(resourceName, "placement_group_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"user_data_replace_on_change", "user_data"}, + }, + }, + }) +} + func TestAccEC2Instance_placementPartitionNumber(t *testing.T) { ctx := acctest.Context(t) var v awstypes.Instance @@ -1222,13 +1271,13 @@ func TestAccEC2Instance_IPv6_supportAddressCount(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) } -func TestAccEC2Instance_ipv6AddressCountAndSingleAddressCausesError(t *testing.T) { +func TestAccEC2Instance_IPv6AddressCountAndSingleAddressCausesError(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1277,7 +1326,7 @@ func TestAccEC2Instance_IPv6_primaryEnable(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -1314,7 +1363,7 @@ func TestAccEC2Instance_IPv6_primaryDisable(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -1343,7 +1392,7 @@ func TestAccEC2Instance_IPv6_supportAddressCountWithIPv4(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -1370,6 +1419,7 @@ func TestAccEC2Instance_IPv6AddressCount(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &original), resource.TestCheckResourceAttr(resourceName, "ipv6_address_count", strconv.Itoa(originalCount)), + resource.TestCheckResourceAttr(resourceName, "ipv6_addresses.#", strconv.Itoa(originalCount)), ), }, { @@ -1378,6 +1428,7 @@ func TestAccEC2Instance_IPv6AddressCount(t *testing.T) { testAccCheckInstanceExists(ctx, resourceName, &updated), testAccCheckInstanceNotRecreated(&original, &updated), resource.TestCheckResourceAttr(resourceName, "ipv6_address_count", strconv.Itoa(updatedCount)), + resource.TestCheckResourceAttr(resourceName, "ipv6_addresses.#", strconv.Itoa(updatedCount)), ), }, { @@ -1386,8 +1437,57 @@ func TestAccEC2Instance_IPv6AddressCount(t *testing.T) { testAccCheckInstanceExists(ctx, resourceName, &updated), testAccCheckInstanceNotRecreated(&original, &updated), resource.TestCheckResourceAttr(resourceName, "ipv6_address_count", strconv.Itoa(shrunkenCount)), + resource.TestCheckResourceAttr(resourceName, "ipv6_addresses.#", strconv.Itoa(shrunkenCount)), + ), + }, + }, + }) +} + +func TestAccEC2Instance_IPv6AddressesExplicit(t *testing.T) { + ctx := acctest.Context(t) + var original, updated awstypes.Instance + resourceName := "aws_instance.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstance_ipv6AddressesExplicit(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &original), + resource.TestCheckResourceAttr(resourceName, "ipv6_addresses.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ipv6_address_count", "1"), ), }, + { + Config: testAccInstance_ipv6AddressesExplicit(rName, 3), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &updated), + testAccCheckInstanceRecreated(&original, &updated), + resource.TestCheckResourceAttr(resourceName, "ipv6_addresses.#", "3"), + resource.TestCheckResourceAttr(resourceName, "ipv6_address_count", "3"), + ), + }, + { + Config: testAccInstance_ipv6AddressesExplicit(rName, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &updated), + testAccCheckInstanceRecreated(&original, &updated), + resource.TestCheckResourceAttr(resourceName, "ipv6_addresses.#", "2"), + resource.TestCheckResourceAttr(resourceName, "ipv6_address_count", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, + }, }, }) } @@ -1515,7 +1615,7 @@ func TestAccEC2Instance_BlockDeviceTags_volumeTags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ephemeral_block_device", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{"ephemeral_block_device", names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_blockDeviceTagsVolumeTags(rName), @@ -1648,7 +1748,7 @@ func TestAccEC2Instance_BlockDeviceTags_ebsAndRoot(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ephemeral_block_device", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{"ephemeral_block_device", names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2007,7 +2107,7 @@ func TestAccEC2Instance_instanceProfileChange(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_profile(rName1), @@ -2066,7 +2166,7 @@ func TestAccEC2Instance_iamInstanceProfile(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2095,7 +2195,7 @@ func TestAccEC2Instance_iamInstanceProfilePath(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2134,7 +2234,7 @@ func TestAccEC2Instance_privateIP(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2173,7 +2273,7 @@ func TestAccEC2Instance_associatePublicIPAndPrivateIP(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2214,7 +2314,7 @@ func TestAccEC2Instance_Empty_privateIP(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2246,7 +2346,7 @@ func TestAccEC2Instance_PrivateDNSNameOptions_computed(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2278,7 +2378,7 @@ func TestAccEC2Instance_PrivateDNSNameOptions_configured(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_PrivateDNSNameOptions_configured(rName, true, true, "ip-name"), @@ -2384,7 +2484,7 @@ func TestAccEC2Instance_forceNewAndTagsDrift(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -2430,7 +2530,7 @@ func TestAccEC2Instance_changeInstanceType(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_instanceType(rName, "t2.large"), @@ -2588,7 +2688,7 @@ func TestAccEC2Instance_changeInstanceTypeAndUserData(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -2626,7 +2726,7 @@ func TestAccEC2Instance_changeInstanceTypeAndUserDataBase64(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -3105,7 +3205,7 @@ func TestAccEC2Instance_EBSRootDevice_multipleDynamicEBSBlockDevices(t *testing. ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3152,6 +3252,48 @@ func TestAccEC2Instance_gp3RootBlockDevice(t *testing.T) { testCheck(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, + }, + }, + }) +} + +func TestAccEC2Instance_PrimaryNetworkInterface_basic(t *testing.T) { + ctx := acctest.Context(t) + var instance awstypes.Instance + var eni awstypes.NetworkInterface + resourceName := "aws_instance.test" + eniResourceName := "aws_network_interface.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_primaryNetworkInterface_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &instance), + testAccCheckENIExists(ctx, eniResourceName, &eni), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + }, + }, { ResourceName: resourceName, ImportState: true, @@ -3162,7 +3304,7 @@ func TestAccEC2Instance_gp3RootBlockDevice(t *testing.T) { }) } -func TestAccEC2Instance_primaryNetworkInterface(t *testing.T) { +func TestAccEC2Instance_NetworkInterface_primaryNetworkInterface(t *testing.T) { ctx := acctest.Context(t) var instance awstypes.Instance var eni awstypes.NetworkInterface @@ -3177,7 +3319,7 @@ func TestAccEC2Instance_primaryNetworkInterface(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceConfig_primaryNetworkInterface(rName), + Config: testAccInstanceConfig_networkInterface_primaryNetworkInterface(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &instance), testAccCheckENIExists(ctx, eniResourceName, &eni), @@ -3187,6 +3329,19 @@ func TestAccEC2Instance_primaryNetworkInterface(t *testing.T) { "network_card_index": "0", }), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + }, }, { ResourceName: resourceName, @@ -3198,10 +3353,11 @@ func TestAccEC2Instance_primaryNetworkInterface(t *testing.T) { }) } -func TestAccEC2Instance_networkCardIndex(t *testing.T) { +func TestAccEC2Instance_NetworkInterface_networkCardIndex(t *testing.T) { ctx := acctest.Context(t) var instance awstypes.Instance resourceName := "aws_instance.test" + eniResourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards. @@ -3214,7 +3370,7 @@ func TestAccEC2Instance_networkCardIndex(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceConfig_networkCardIndex(rName), + Config: testAccInstanceConfig_networkInterface_networkCardIndex(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &instance), resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), @@ -3223,6 +3379,19 @@ func TestAccEC2Instance_networkCardIndex(t *testing.T) { "network_card_index": "0", }), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + }, }, { ResourceName: resourceName, @@ -3234,7 +3403,7 @@ func TestAccEC2Instance_networkCardIndex(t *testing.T) { }) } -func TestAccEC2Instance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { +func TestAccEC2Instance_NetworkInterface_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { ctx := acctest.Context(t) var instance awstypes.Instance var eni awstypes.NetworkInterface @@ -3249,7 +3418,7 @@ func TestAccEC2Instance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceConfig_primaryNetworkInterfaceSourceDestCheck(rName), + Config: testAccInstanceConfig_networkInterface_primaryNetworkInterfaceSourceDestCheck(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &instance), testAccCheckENIExists(ctx, eniResourceName, &eni), @@ -3266,7 +3435,7 @@ func TestAccEC2Instance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { }) } -func TestAccEC2Instance_addSecondaryInterface(t *testing.T) { +func TestAccEC2Instance_NetworkInterface_attachSecondaryInterface_inlineAttachment(t *testing.T) { ctx := acctest.Context(t) var before, after awstypes.Instance var eniPrimary awstypes.NetworkInterface @@ -3283,12 +3452,28 @@ func TestAccEC2Instance_addSecondaryInterface(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceConfig_addSecondaryNetworkInterfaceBefore(rName), + Config: testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_inlineAttachment_Setup(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &before), testAccCheckENIExists(ctx, eniPrimaryResourceName, &eniPrimary), + testAccCheckENIExists(ctx, eniSecondaryResourceName, &eniSecondary), resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniPrimaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + + statecheck.ExpectKnownValue(eniSecondaryResourceName, tfjsonpath.New("attachment"), knownvalue.SetExact([]knownvalue.Check{})), + }, }, { ResourceName: resourceName, @@ -3297,12 +3482,211 @@ func TestAccEC2Instance_addSecondaryInterface(t *testing.T) { ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change"}, }, { - Config: testAccInstanceConfig_addSecondaryNetworkInterfaceAfter(rName), + Config: testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_inlineAttachment(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(ctx, resourceName, &after), + testAccCheckENIExists(ctx, eniPrimaryResourceName, &eniPrimary), testAccCheckENIExists(ctx, eniSecondaryResourceName, &eniSecondary), resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniPrimaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + + statecheck.ExpectKnownValue(eniSecondaryResourceName, tfjsonpath.New("attachment"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "attachment_id": knownvalue.NotNull(), + "device_index": knownvalue.Int64Exact(1), + "instance": knownvalue.NotNull(), + "network_card_index": knownvalue.Int64Exact(0), + }), + })), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), eniSecondaryResourceName, tfjsonpath.New("attachment").AtSliceIndex(0).AtMapKey("instance"), compare.ValuesSame()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change"}, + }, + }, + }) +} + +func TestAccEC2Instance_NetworkInterface_attachSecondaryInterface_attachmentResource(t *testing.T) { + ctx := acctest.Context(t) + var before, after awstypes.Instance + var eniPrimary awstypes.NetworkInterface + var eniSecondary awstypes.NetworkInterface + resourceName := "aws_instance.test" + eniPrimaryResourceName := "aws_network_interface.primary" + eniSecondaryResourceName := "aws_network_interface.secondary" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_attachmentResource_Setup(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &before), + testAccCheckENIExists(ctx, eniPrimaryResourceName, &eniPrimary), + testAccCheckENIExists(ctx, eniSecondaryResourceName, &eniSecondary), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniPrimaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + + statecheck.ExpectKnownValue(eniSecondaryResourceName, tfjsonpath.New("attachment"), knownvalue.SetExact([]knownvalue.Check{})), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change"}, + }, + { + Config: testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_attachmentResource(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &after), + testAccCheckENIExists(ctx, eniPrimaryResourceName, &eniPrimary), + testAccCheckENIExists(ctx, eniSecondaryResourceName, &eniSecondary), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniPrimaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + + statecheck.ExpectKnownValue("aws_network_interface_attachment.secondary", tfjsonpath.New("device_index"), knownvalue.Int64Exact(1)), + statecheck.CompareValuePairs("aws_network_interface_attachment.secondary", tfjsonpath.New(names.AttrInstanceID), resourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + statecheck.CompareValuePairs("aws_network_interface_attachment.secondary", tfjsonpath.New(names.AttrNetworkInterfaceID), eniSecondaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change"}, + }, + }, + }) +} + +func TestAccEC2Instance_NetworkInterface_addSecondaryInterface(t *testing.T) { + ctx := acctest.Context(t) + var before, after awstypes.Instance + var eniPrimary awstypes.NetworkInterface + var eniSecondary awstypes.NetworkInterface + resourceName := "aws_instance.test" + eniPrimaryResourceName := "aws_network_interface.primary" + eniSecondaryResourceName := "aws_network_interface.secondary" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_networkInterface_addSecondaryNetworkInterfaceBefore(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &before), + testAccCheckENIExists(ctx, eniPrimaryResourceName, &eniPrimary), + testAccCheckENIExists(ctx, eniSecondaryResourceName, &eniSecondary), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniPrimaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change"}, + }, + { + Config: testAccInstanceConfig_networkInterface_addSecondaryNetworkInterface(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(ctx, resourceName, &after), + testAccCheckENIExists(ctx, eniPrimaryResourceName, &eniPrimary), + testAccCheckENIExists(ctx, eniSecondaryResourceName, &eniSecondary), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "2"), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(0), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(false), + "device_index": knownvalue.Int64Exact(1), + "network_card_index": knownvalue.Int64Exact(0), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface_id"), eniPrimaryResourceName, tfjsonpath.New(names.AttrID), compare.ValuesSame()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change"}, }, }, }) @@ -3332,7 +3716,7 @@ func TestAccEC2Instance_addSecurityGroupNetworkInterface(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_addSecurityGroupAfter(rName), @@ -3380,7 +3764,7 @@ func TestAccEC2Instance_NewNetworkInterface_publicIPAndSecondaryPrivateIPs(t *te ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3412,7 +3796,7 @@ func TestAccEC2Instance_NewNetworkInterface_emptyPrivateIPAndSecondaryPrivateIPs ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3461,7 +3845,7 @@ func TestAccEC2Instance_NewNetworkInterface_emptyPrivateIPAndSecondaryPrivateIPs ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3494,7 +3878,7 @@ func TestAccEC2Instance_NewNetworkInterface_privateIPAndSecondaryPrivateIPs(t *t ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3544,7 +3928,7 @@ func TestAccEC2Instance_NewNetworkInterface_privateIPAndSecondaryPrivateIPsUpdat ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3575,7 +3959,7 @@ func TestAccEC2Instance_AssociatePublic_defaultPrivate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3606,7 +3990,7 @@ func TestAccEC2Instance_AssociatePublic_defaultPublic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3637,7 +4021,7 @@ func TestAccEC2Instance_AssociatePublic_explicitPublic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3668,7 +4052,7 @@ func TestAccEC2Instance_AssociatePublic_explicitPrivate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3699,7 +4083,7 @@ func TestAccEC2Instance_AssociatePublic_overridePublic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -3730,7 +4114,7 @@ func TestAccEC2Instance_AssociatePublic_overridePrivate(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4032,7 +4416,7 @@ func TestAccEC2Instance_GetPasswordData_falseToTrue(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_getPasswordData(rName, publicKey, true), @@ -4125,7 +4509,7 @@ func TestAccEC2Instance_cpuOptionsAmdSevSnpUnspecifiedToDisabledToEnabledToUnspe ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { // test DiffSuppressFunc to suppress "" to "disabled" @@ -4192,7 +4576,7 @@ func TestAccEC2Instance_cpuOptionsAmdSevSnpUnspecifiedToEnabledToDisabledToUnspe ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { // expect recreation when it is enabled @@ -4257,7 +4641,7 @@ func TestAccEC2Instance_cpuOptionsAmdSevSnpEnabledToDisabled(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_cpuOptionsAmdSevSnp(rName, string(awstypes.AmdSevSnpSpecificationDisabled)), @@ -4301,7 +4685,7 @@ func TestAccEC2Instance_cpuOptionsAmdSevSnpDisabledToEnabled(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_cpuOptionsAmdSevSnp(rName, string(awstypes.AmdSevSnpSpecificationEnabled)), @@ -4352,7 +4736,7 @@ func TestAccEC2Instance_cpuOptionsAmdSevSnpCoreThreads(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_cpuOptionsAmdSevSnpCoreThreads(rName, string(awstypes.AmdSevSnpSpecificationDisabled), updatedCoreCount, updatedThreadsPerCore), @@ -4399,7 +4783,7 @@ func TestAccEC2Instance_cpuOptionsCoreThreads(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_cpuOptionsCoreThreads(rName, updatedCoreCount, updatedThreadsPerCore), @@ -4443,7 +4827,7 @@ func TestAccEC2Instance_cpuOptionsCoreThreadsUnspecifiedToSpecified(t *testing.T ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { // EC2 instance should not be recreated @@ -4557,7 +4941,7 @@ func TestAccEC2Instance_CreditSpecificationUnspecifiedToEmpty_nonBurstable(t *te ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationEmptyNonBurstable(rName), @@ -4593,7 +4977,7 @@ func TestAccEC2Instance_CreditSpecification_unspecifiedDefaultsToStandard(t *tes ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4623,7 +5007,7 @@ func TestAccEC2Instance_CreditSpecification_standardCPUCredits(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnspecified(rName), @@ -4661,7 +5045,7 @@ func TestAccEC2Instance_CreditSpecification_unlimitedCPUCredits(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnspecified(rName), @@ -4699,7 +5083,7 @@ func TestAccEC2Instance_CreditSpecificationUnknownCPUCredits_t2(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4729,7 +5113,7 @@ func TestAccEC2Instance_CreditSpecificationUnknownCPUCredits_t3(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4759,7 +5143,7 @@ func TestAccEC2Instance_CreditSpecificationUnknownCPUCredits_t3a(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4789,7 +5173,7 @@ func TestAccEC2Instance_CreditSpecificationUnknownCPUCredits_t4g(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4819,7 +5203,7 @@ func TestAccEC2Instance_CreditSpecification_updateCPUCredits(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnlimitedCPUCredits(rName), @@ -4893,7 +5277,7 @@ func TestAccEC2Instance_CreditSpecificationT3_unspecifiedDefaultsToUnlimited(t * ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -4923,7 +5307,7 @@ func TestAccEC2Instance_CreditSpecificationT3_standardCPUCredits(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnspecifiedT3(rName), @@ -4961,7 +5345,7 @@ func TestAccEC2Instance_CreditSpecificationT3_unlimitedCPUCredits(t *testing.T) ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnspecifiedT3(rName), @@ -4999,7 +5383,7 @@ func TestAccEC2Instance_CreditSpecificationT3_updateCPUCredits(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnlimitedCPUCreditsT3(rName), @@ -5045,7 +5429,7 @@ func TestAccEC2Instance_CreditSpecificationStandardCPUCredits_t2Tot3Taint(t *tes ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationStandardCPUCreditsT3(rName), @@ -5084,7 +5468,7 @@ func TestAccEC2Instance_CreditSpecificationUnlimitedCPUCredits_t2Tot3Taint(t *te ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_creditSpecificationUnlimitedCPUCreditsT3(rName), @@ -5185,7 +5569,7 @@ func TestAccEC2Instance_UserData_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -5225,7 +5609,7 @@ func TestAccEC2Instance_UserData_update(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -5259,7 +5643,7 @@ func TestAccEC2Instance_UserData_stringToEncodedString(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, }, }) @@ -5292,7 +5676,7 @@ func TestAccEC2Instance_UserData_emptyStringToUnspecified(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data", "user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data", "user_data_replace_on_change"}, }, // Switching should show no difference { @@ -5337,7 +5721,7 @@ func TestAccEC2Instance_UserData_unspecifiedToEmptyString(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, // Switching should show no difference { @@ -5381,7 +5765,7 @@ func TestAccEC2Instance_UserData_ReplaceOnChange_On(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, // Switching should force a recreate { @@ -5467,7 +5851,7 @@ func TestAccEC2Instance_UserData_ReplaceOnChange_Off(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, // Switching should not force a recreate { @@ -5553,7 +5937,7 @@ func TestAccEC2Instance_hibernation(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_hibernation(rName, false), @@ -5643,7 +6027,7 @@ func TestAccEC2Instance_metadataOptions(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -5673,7 +6057,7 @@ func TestAccEC2Instance_enclaveOptions(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, { Config: testAccInstanceConfig_enclaveOptions(rName, false), @@ -5718,7 +6102,7 @@ func TestAccEC2Instance_CapacityReservation_unspecifiedDefaultsToOpen(t *testing ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, // Adding 'open' preference should show no difference { @@ -5761,7 +6145,7 @@ func TestAccEC2Instance_CapacityReservationPreference_open(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -5792,7 +6176,7 @@ func TestAccEC2Instance_CapacityReservationPreference_none(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -5823,7 +6207,7 @@ func TestAccEC2Instance_CapacityReservation_targetID(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -5943,7 +6327,7 @@ func TestAccEC2Instance_basicWithSpot(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"user_data_replace_on_change"}, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy, "user_data_replace_on_change"}, }, }, }) @@ -6944,6 +7328,8 @@ resource "aws_instance" "test" { subnet_id = aws_subnet.test.id disable_api_stop = %[2]t + force_destroy = true + tags = { Name = %[1]q } @@ -6961,6 +7347,7 @@ resource "aws_instance" "test" { instance_type = "t2.small" subnet_id = aws_subnet.test.id disable_api_termination = %[2]t + force_destroy = true tags = { Name = %[1]q @@ -7069,6 +7456,34 @@ resource "aws_instance" "test" { `, rName)) } +func testAccInstanceConfig_placementGroupID(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcBase(rName, false, 0), + fmt.Sprintf(` +resource "aws_placement_group" "test" { + name = %[1]q + strategy = "cluster" +} + +# Limitations: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html#concepts-placement-groups +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "c5.large" + subnet_id = aws_subnet.test.id + associate_public_ip_address = true + placement_group_id = aws_placement_group.test.placement_group_id + + # pre-encoded base64 data + user_data_base64 = "3dc39dda39be1205215e776bad998da361a5955d" + + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccInstanceConfig_placementPartitionNumber(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), @@ -7192,6 +7607,24 @@ resource "aws_instance" "test" { `, rName, ipv6AddressCount)) } +func testAccInstance_ipv6AddressesExplicit(rName string, addressCount int) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcIPv6Base(rName), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.medium" + subnet_id = aws_subnet.test.id + ipv6_addresses = [for i in range(%[2]d) : cidrhost(aws_subnet.test.ipv6_cidr_block, i + 10)] + + tags = { + Name = %[1]q + } +} +`, rName, addressCount)) +} + func testAccInstanceConfig_ebsKMSKeyARN(rName string) string { return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), fmt.Sprintf(` resource "aws_kms_key" "test" { @@ -8221,7 +8654,27 @@ resource "aws_instance" "test" { `, rName)) } -func testAccInstanceConfig_primaryNetworkInterface(rName string) string { +func testAccInstanceConfig_primaryNetworkInterface_basic(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcBase(rName, false, 0), ` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.micro" + + primary_network_interface { + network_interface_id = aws_network_interface.test.id + } +} + +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.42"] +} +`) +} + +func testAccInstanceConfig_networkInterface_primaryNetworkInterface(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), testAccInstanceConfig_vpcBase(rName, false, 0), @@ -8251,7 +8704,7 @@ resource "aws_instance" "test" { `, rName)) } -func testAccInstanceConfig_networkCardIndex(rName string) string { +func testAccInstanceConfig_networkInterface_networkCardIndex(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), testAccInstanceConfig_vpcBase(rName, false, 0), @@ -8282,7 +8735,7 @@ resource "aws_instance" "test" { `, rName)) } -func testAccInstanceConfig_primaryNetworkInterfaceSourceDestCheck(rName string) string { +func testAccInstanceConfig_networkInterface_primaryNetworkInterfaceSourceDestCheck(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), testAccInstanceConfig_vpcBase(rName, false, 0), @@ -8313,7 +8766,7 @@ resource "aws_instance" "test" { `, rName)) } -func testAccInstanceConfig_addSecondaryNetworkInterfaceBefore(rName string) string { +func testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_inlineAttachment_Setup(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), testAccInstanceConfig_vpcBase(rName, false, 0), @@ -8352,7 +8805,7 @@ resource "aws_instance" "test" { `, rName)) } -func testAccInstanceConfig_addSecondaryNetworkInterfaceAfter(rName string) string { +func testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_inlineAttachment(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), testAccInstanceConfig_vpcBase(rName, false, 0), @@ -8397,6 +8850,173 @@ resource "aws_instance" "test" { `, rName)) } +func testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_attachmentResource_Setup(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcBase(rName, false, 0), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.micro" + + network_interface { + network_interface_id = aws_network_interface.primary.id + device_index = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "primary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.42"] + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "secondary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.43"] + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccInstanceConfig_networkInterface_attachSecondaryNetworkInterface_attachmentResource(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcBase(rName, false, 0), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.micro" + + network_interface { + network_interface_id = aws_network_interface.primary.id + device_index = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "primary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.42"] + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "secondary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.43"] + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface_attachment" "secondary" { + instance_id = aws_instance.test.id + network_interface_id = aws_network_interface.secondary.id + device_index = 1 +} +`, rName)) +} + +func testAccInstanceConfig_networkInterface_addSecondaryNetworkInterfaceBefore(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcBase(rName, false, 0), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.micro" + + network_interface { + network_interface_id = aws_network_interface.primary.id + device_index = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "primary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.42"] + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "secondary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.43"] + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccInstanceConfig_networkInterface_addSecondaryNetworkInterface(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + testAccInstanceConfig_vpcBase(rName, false, 0), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.micro" + + network_interface { + network_interface_id = aws_network_interface.primary.id + device_index = 0 + } + + network_interface { + network_interface_id = aws_network_interface.secondary.id + device_index = 1 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "primary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.42"] + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "secondary" { + subnet_id = aws_subnet.test.id + private_ips = ["10.1.1.43"] + + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccInstanceConfig_addSecurityGroupBefore(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), @@ -9428,7 +10048,7 @@ resource "aws_instance" "test" { resource "aws_ec2_capacity_reservation" "test" { instance_type = data.aws_ec2_instance_type_offering.available.instance_type instance_platform = %[2]q - availability_zone = data.aws_availability_zones.available.names[0] + availability_zone = data.aws_availability_zones.available.names[1] instance_count = 10 tags = { diff --git a/internal/service/ec2/ec2_instance_type_offering_data_source.go b/internal/service/ec2/ec2_instance_type_offering_data_source.go index 9e45c474a778..00aa9f18259e 100644 --- a/internal/service/ec2/ec2_instance_type_offering_data_source.go +++ b/internal/service/ec2/ec2_instance_type_offering_data_source.go @@ -33,6 +33,10 @@ func dataSourceInstanceTypeOffering() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrLocation: { + Type: schema.TypeString, + Computed: true, + }, "location_type": { Type: schema.TypeString, Optional: true, @@ -71,44 +75,42 @@ func dataSourceInstanceTypeOfferingRead(ctx context.Context, d *schema.ResourceD return sdkdiag.AppendErrorf(diags, "no EC2 Instance Type Offerings found matching criteria; try different search") } - var foundInstanceTypes []string - - for _, instanceTypeOffering := range instanceTypeOfferings { - foundInstanceTypes = append(foundInstanceTypes, string(instanceTypeOffering.InstanceType)) - } - - var resultInstanceType string + var resultInstanceTypeOffering *awstypes.InstanceTypeOffering // Search preferred instance types in their given order and set result // instance type for first match found if v, ok := d.GetOk("preferred_instance_types"); ok { for _, v := range v.([]any) { if v, ok := v.(string); ok { - if slices.Contains(foundInstanceTypes, v) { - resultInstanceType = v + if i := slices.IndexFunc(instanceTypeOfferings, func(e awstypes.InstanceTypeOffering) bool { + return string(e.InstanceType) == v + }); i != -1 { + resultInstanceTypeOffering = &instanceTypeOfferings[i] } - if resultInstanceType != "" { + if resultInstanceTypeOffering != nil { break } } } } - if resultInstanceType == "" && len(foundInstanceTypes) > 1 { + if resultInstanceTypeOffering == nil && len(instanceTypeOfferings) > 1 { return sdkdiag.AppendErrorf(diags, "multiple EC2 Instance Offerings found matching criteria; try different search") } - if resultInstanceType == "" && len(foundInstanceTypes) == 1 { - resultInstanceType = foundInstanceTypes[0] + if resultInstanceTypeOffering == nil && len(instanceTypeOfferings) == 1 { + resultInstanceTypeOffering = &instanceTypeOfferings[0] } - if resultInstanceType == "" { + if resultInstanceTypeOffering == nil { return sdkdiag.AppendErrorf(diags, "no EC2 Instance Type Offerings found matching criteria; try different search") } - d.SetId(resultInstanceType) - d.Set(names.AttrInstanceType, resultInstanceType) + d.SetId(string(resultInstanceTypeOffering.InstanceType)) + d.Set(names.AttrInstanceType, string(resultInstanceTypeOffering.InstanceType)) + d.Set(names.AttrLocation, resultInstanceTypeOffering.Location) + d.Set("location_type", string(resultInstanceTypeOffering.LocationType)) return diags } diff --git a/internal/service/ec2/ec2_instance_type_offering_data_source_test.go b/internal/service/ec2/ec2_instance_type_offering_data_source_test.go index 257f372228a0..622362899f87 100644 --- a/internal/service/ec2/ec2_instance_type_offering_data_source_test.go +++ b/internal/service/ec2/ec2_instance_type_offering_data_source_test.go @@ -14,6 +14,7 @@ import ( func TestAccEC2InstanceTypeOfferingDataSource_filter(t *testing.T) { ctx := acctest.Context(t) dataSourceName := "data.aws_ec2_instance_type_offering.test" + dataSourceOfferingsName := "data.aws_ec2_instance_type_offerings.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckInstanceTypeOfferings(ctx, t) }, @@ -24,7 +25,9 @@ func TestAccEC2InstanceTypeOfferingDataSource_filter(t *testing.T) { { Config: testAccInstanceTypeOfferingDataSourceConfig_filter(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(dataSourceName, names.AttrInstanceType), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrInstanceType, dataSourceOfferingsName, "instance_types.0"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrLocation, dataSourceOfferingsName, "locations.0"), + resource.TestCheckResourceAttrPair(dataSourceName, "location_type", dataSourceOfferingsName, "location_types.0"), ), }, }, @@ -34,6 +37,7 @@ func TestAccEC2InstanceTypeOfferingDataSource_filter(t *testing.T) { func TestAccEC2InstanceTypeOfferingDataSource_locationType(t *testing.T) { ctx := acctest.Context(t) dataSourceName := "data.aws_ec2_instance_type_offering.test" + dataSourceOfferingsName := "data.aws_ec2_instance_type_offerings.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckInstanceTypeOfferings(ctx, t) }, @@ -44,7 +48,9 @@ func TestAccEC2InstanceTypeOfferingDataSource_locationType(t *testing.T) { { Config: testAccInstanceTypeOfferingDataSourceConfig_location(), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet(dataSourceName, names.AttrInstanceType), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrInstanceType, dataSourceOfferingsName, "instance_types.0"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrLocation, dataSourceOfferingsName, "locations.0"), + resource.TestCheckResourceAttrPair(dataSourceName, "location_type", dataSourceOfferingsName, "location_types.0"), ), }, }, @@ -65,6 +71,8 @@ func TestAccEC2InstanceTypeOfferingDataSource_preferredInstanceTypes(t *testing. Config: testAccInstanceTypeOfferingDataSourceConfig_preferreds(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, names.AttrInstanceType, "t3.micro"), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrLocation), + resource.TestCheckResourceAttrSet(dataSourceName, "location_type"), ), }, }, diff --git a/internal/service/ec2/ec2_instances_data_source.go b/internal/service/ec2/ec2_instances_data_source.go index e0ed43bdfdaa..c6dbcdedb8b1 100644 --- a/internal/service/ec2/ec2_instances_data_source.go +++ b/internal/service/ec2/ec2_instances_data_source.go @@ -94,15 +94,13 @@ func dataSourceInstancesRead(ctx context.Context, d *schema.ResourceData, meta a input.Filters = nil } - output, err := findInstances(ctx, conn, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instances: %s", err) - } - var instanceIDs, privateIPs, publicIPs, ipv6Addresses []string - for _, v := range output { + for v, err := range listInstances(ctx, conn, &input) { + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instances: %s", err) + } + instanceIDs = append(instanceIDs, aws.ToString(v.InstanceId)) if privateIP := aws.ToString(v.PrivateIpAddress); privateIP != "" { privateIPs = append(privateIPs, privateIP) diff --git a/internal/service/ec2/ec2_key_pair.go b/internal/service/ec2/ec2_key_pair.go index 124b52ba7234..63370f54150b 100644 --- a/internal/service/ec2/ec2_key_pair.go +++ b/internal/service/ec2/ec2_key_pair.go @@ -10,9 +10,8 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -104,7 +103,7 @@ func resourceKeyPairCreate(ctx context.Context, d *schema.ResourceData, meta any input := ec2.ImportKeyPairInput{ KeyName: aws.String(keyName), PublicKeyMaterial: []byte(d.Get(names.AttrPublicKey).(string)), - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeKeyPair), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeKeyPair), } output, err := conn.ImportKeyPair(ctx, &input) @@ -120,7 +119,8 @@ func resourceKeyPairCreate(ctx context.Context, d *schema.ResourceData, meta any func resourceKeyPairRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) keyPair, err := findKeyPairByName(ctx, conn, d.Id()) @@ -134,14 +134,7 @@ func resourceKeyPairRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "reading EC2 Key Pair (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: "key-pair/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, keyPairARN(ctx, c, d.Id())) d.Set("fingerprint", keyPair.KeyFingerprint) d.Set("key_name", keyPair.KeyName) d.Set("key_name_prefix", create.NamePrefixFromName(aws.ToString(keyPair.KeyName))) @@ -195,3 +188,6 @@ func openSSHPublicKeysEqual(v1, v2 string) bool { return key1.Type() == key2.Type() && bytes.Equal(key1.Marshal(), key2.Marshal()) } +func keyPairARN(ctx context.Context, c *conns.AWSClient, keyName string) string { + return c.RegionalARN(ctx, names.EC2, "key-pair/"+keyName) +} diff --git a/internal/service/ec2/ec2_key_pair_data_source.go b/internal/service/ec2/ec2_key_pair_data_source.go index efb1b56cbf97..f1c05aa670d2 100644 --- a/internal/service/ec2/ec2_key_pair_data_source.go +++ b/internal/service/ec2/ec2_key_pair_data_source.go @@ -8,7 +8,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -72,7 +71,8 @@ func dataSourceKeyPair() *schema.Resource { func dataSourceKeyPairRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := ec2.DescribeKeyPairsInput{} @@ -100,14 +100,7 @@ func dataSourceKeyPairRead(ctx context.Context, d *schema.ResourceData, meta any d.SetId(aws.ToString(keyPair.KeyPairId)) keyName := aws.ToString(keyPair.KeyName) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "ec2", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: "key-pair/" + keyName, - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, keyPairARN(ctx, c, keyName)) d.Set(names.AttrCreateTime, aws.ToTime(keyPair.CreateTime).Format(time.RFC3339)) d.Set("fingerprint", keyPair.KeyFingerprint) d.Set("include_public_key", input.IncludePublicKey) diff --git a/internal/service/ec2/ec2_key_pair_test.go b/internal/service/ec2/ec2_key_pair_test.go index b9b0484dc76e..b376dbac5bff 100644 --- a/internal/service/ec2/ec2_key_pair_test.go +++ b/internal/service/ec2/ec2_key_pair_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccEC2KeyPair_basic(t *testing.T) { ctx := acctest.Context(t) - var keyPair types.KeyPairInfo + var keyPair awstypes.KeyPairInfo resourceName := "aws_key_pair.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -60,7 +60,7 @@ func TestAccEC2KeyPair_basic(t *testing.T) { func TestAccEC2KeyPair_tags(t *testing.T) { ctx := acctest.Context(t) - var keyPair types.KeyPairInfo + var keyPair awstypes.KeyPairInfo resourceName := "aws_key_pair.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -112,7 +112,7 @@ func TestAccEC2KeyPair_tags(t *testing.T) { func TestAccEC2KeyPair_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - var keyPair types.KeyPairInfo + var keyPair awstypes.KeyPairInfo resourceName := "aws_key_pair.test" publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) @@ -146,7 +146,7 @@ func TestAccEC2KeyPair_nameGenerated(t *testing.T) { func TestAccEC2KeyPair_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var keyPair types.KeyPairInfo + var keyPair awstypes.KeyPairInfo resourceName := "aws_key_pair.test" publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) @@ -180,7 +180,7 @@ func TestAccEC2KeyPair_namePrefix(t *testing.T) { func TestAccEC2KeyPair_disappears(t *testing.T) { ctx := acctest.Context(t) - var keyPair types.KeyPairInfo + var keyPair awstypes.KeyPairInfo resourceName := "aws_key_pair.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -233,7 +233,7 @@ func testAccCheckKeyPairDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckKeyPairExists(ctx context.Context, n string, v *types.KeyPairInfo) resource.TestCheckFunc { +func testAccCheckKeyPairExists(ctx context.Context, n string, v *awstypes.KeyPairInfo) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/ec2_launch_template.go b/internal/service/ec2/ec2_launch_template.go index e7b8760bcdd3..4edf26b716c6 100644 --- a/internal/service/ec2/ec2_launch_template.go +++ b/internal/service/ec2/ec2_launch_template.go @@ -10,14 +10,12 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -86,7 +84,7 @@ func resourceLaunchTemplate() *schema.Resource { names.AttrKMSKeyID: { Type: schema.TypeString, Optional: true, - ValidateFunc: verify.ValidARN, + ValidateFunc: verify.ValidKMSKeyID, }, names.AttrSnapshotID: { Type: schema.TypeString, @@ -96,7 +94,7 @@ func resourceLaunchTemplate() *schema.Resource { Type: schema.TypeInt, Computed: true, Optional: true, - ValidateFunc: validation.IntBetween(125, 1000), + ValidateFunc: validation.IntBetween(125, 2000), }, "volume_initialization_rate": { Type: schema.TypeInt, @@ -898,9 +896,15 @@ func resourceLaunchTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "group_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"placement.0.group_name"}, + }, names.AttrGroupName: { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"placement.0.group_id"}, }, "host_id": { Type: schema.TypeString, @@ -993,6 +997,15 @@ func resourceLaunchTemplate() *schema.Resource { }, }, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: launchTemplateSchemaV0().CoreConfigSchema().ImpliedType(), + Upgrade: launchTemplateStateUpgradeV0, + Version: 0, + }, + }, + // Enable downstream updates for resources referencing schema attributes // to prevent non-empty plans after "terraform apply" CustomizeDiff: customdiff.Sequence( @@ -1059,7 +1072,8 @@ func resourceLaunchTemplateCreate(ctx context.Context, d *schema.ResourceData, m func resourceLaunchTemplateRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) lt, err := findLaunchTemplateByID(ctx, conn, d.Id()) @@ -1079,14 +1093,7 @@ func resourceLaunchTemplateRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EC2 Launch Template (%s) Version (%s): %s", d.Id(), version, err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("launch-template/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, launchTemplateARN(ctx, c, d.Id())) d.Set("default_version", lt.DefaultVersionNumber) d.Set(names.AttrDescription, ltv.VersionDescription) d.Set("latest_version", lt.LatestVersionNumber) @@ -1208,48 +1215,6 @@ func resourceLaunchTemplateDelete(ctx context.Context, d *schema.ResourceData, m return diags } -const ( - LaunchTemplateFound = "Found" -) - -func statusLaunchTemplate(ctx context.Context, conn *ec2.Client, id string, idIsName bool) retry.StateRefreshFunc { - return func() (any, string, error) { - var output *awstypes.LaunchTemplate - var err error - if idIsName { - output, err = findLaunchTemplateByName(ctx, conn, id) - } else { - output, err = findLaunchTemplateByID(ctx, conn, id) - } - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, LaunchTemplateFound, nil - } -} - -func waitLaunchTemplateReady(ctx context.Context, conn *ec2.Client, id string, idIsName bool, timeout time.Duration) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{""}, - Target: enum.Slice(LaunchTemplateFound), - Refresh: statusLaunchTemplate(ctx, conn, id, idIsName), - Timeout: timeout, - Delay: 5 * time.Second, - NotFoundChecks: 5, - ContinuousTargetOccurence: 3, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} - func expandRequestLaunchTemplateData(ctx context.Context, conn *ec2.Client, d *schema.ResourceData) (*awstypes.RequestLaunchTemplateData, error) { apiObject := &awstypes.RequestLaunchTemplateData{ // Always set at least one field. @@ -2095,6 +2060,10 @@ func expandLaunchTemplatePlacementRequest(tfMap map[string]any) *awstypes.Launch apiObject.AvailabilityZone = aws.String(v) } + if v, ok := tfMap["group_id"].(string); ok && v != "" { + apiObject.GroupId = aws.String(v) + } + if v, ok := tfMap[names.AttrGroupName].(string); ok && v != "" { apiObject.GroupName = aws.String(v) } @@ -2999,6 +2968,10 @@ func flattenLaunchTemplatePlacement(apiObject *awstypes.LaunchTemplatePlacement) tfMap[names.AttrAvailabilityZone] = aws.ToString(v) } + if v := apiObject.GroupId; v != nil { + tfMap["group_id"] = aws.ToString(v) + } + if v := apiObject.GroupName; v != nil { tfMap[names.AttrGroupName] = aws.ToString(v) } @@ -3234,3 +3207,6 @@ func flattenLaunchTemplateEnaSrdUdpSpecification(apiObject *awstypes.LaunchTempl return tfMap } +func launchTemplateARN(ctx context.Context, c *conns.AWSClient, templateID string) string { + return c.RegionalARN(ctx, names.EC2, "launch-template/"+templateID) +} diff --git a/internal/service/ec2/ec2_launch_template_data_source.go b/internal/service/ec2/ec2_launch_template_data_source.go index f2e4f96c22dd..b9fbd89ff29b 100644 --- a/internal/service/ec2/ec2_launch_template_data_source.go +++ b/internal/service/ec2/ec2_launch_template_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -706,6 +704,10 @@ func dataSourceLaunchTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "group_id": { + Type: schema.TypeString, + Computed: true, + }, names.AttrGroupName: { Type: schema.TypeString, Computed: true, @@ -791,7 +793,8 @@ func dataSourceLaunchTemplate() *schema.Resource { func dataSourceLaunchTemplateRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := ec2.DescribeLaunchTemplatesInput{} @@ -828,14 +831,7 @@ func dataSourceLaunchTemplateRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading EC2 Launch Template (%s) Version (%s): %s", d.Id(), version, err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("launch-template/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, launchTemplateARN(ctx, c, d.Id())) d.Set("default_version", lt.DefaultVersionNumber) d.Set(names.AttrDescription, ltv.VersionDescription) d.Set("latest_version", lt.LatestVersionNumber) diff --git a/internal/service/ec2/ec2_launch_template_data_source_test.go b/internal/service/ec2/ec2_launch_template_data_source_test.go index d63c9014f15e..21a9f1fcb6ce 100644 --- a/internal/service/ec2/ec2_launch_template_data_source_test.go +++ b/internal/service/ec2/ec2_launch_template_data_source_test.go @@ -259,6 +259,32 @@ data "aws_launch_template" "test" { `, rName) } +func TestAccEC2LaunchTemplateDataSource_placementGroupID(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_launch_template.test" + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLaunchTemplateDataSourceConfig_placementGroupID(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, dataSourceName, names.AttrID), + resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), + resource.TestCheckResourceAttrPair(resourceName, "placement.#", dataSourceName, "placement.#"), + resource.TestCheckResourceAttrPair(resourceName, "placement.0.group_id", dataSourceName, "placement.0.group_id"), + resource.TestCheckResourceAttr(dataSourceName, "placement.0.group_name", ""), + ), + }, + }, + }) +} + func testAccLaunchTemplateDataSourceConfig_matchTags(rName string) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { @@ -276,3 +302,24 @@ data "aws_launch_template" "test" { } `, rName) } + +func testAccLaunchTemplateDataSourceConfig_placementGroupID(rName string) string { + return fmt.Sprintf(` +resource "aws_placement_group" "test" { + name = %[1]q + strategy = "cluster" +} + +resource "aws_launch_template" "test" { + name = %[1]q + + placement { + group_id = aws_placement_group.test.placement_group_id + } +} + +data "aws_launch_template" "test" { + name = aws_launch_template.test.name +} +`, rName) +} diff --git a/internal/service/ec2/ec2_launch_template_migrate.go b/internal/service/ec2/ec2_launch_template_migrate.go new file mode 100644 index 000000000000..be52a40511f6 --- /dev/null +++ b/internal/service/ec2/ec2_launch_template_migrate.go @@ -0,0 +1,904 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + "maps" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func launchTemplateSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "block_device_mappings": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDeviceName: { + Type: schema.TypeString, + Optional: true, + }, + "ebs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDeleteOnTermination: { + Type: nullable.TypeNullableBool, + Optional: true, + }, + names.AttrEncrypted: { + Type: nullable.TypeNullableBool, + Optional: true, + }, + names.AttrIOPS: { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + names.AttrKMSKeyID: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrSnapshotID: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrThroughput: { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "volume_initialization_rate": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + names.AttrVolumeSize: { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + names.AttrVolumeType: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "no_device": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrVirtualName: { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "capacity_reservation_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_reservation_preference": { + Type: schema.TypeString, + Optional: true, + }, + "capacity_reservation_target": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_reservation_id": { + Type: schema.TypeString, + Optional: true, + }, + "capacity_reservation_resource_group_arn": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "cpu_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "amd_sev_snp": { + Type: schema.TypeString, + Optional: true, + }, + "core_count": { + Type: schema.TypeInt, + Optional: true, + }, + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "credit_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_credits": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "default_version": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + "disable_api_stop": { + Type: schema.TypeBool, + Optional: true, + }, + "disable_api_termination": { + Type: schema.TypeBool, + Optional: true, + }, + "ebs_optimized": { + Type: nullable.TypeNullableBool, + Optional: true, + }, + "elastic_gpu_specifications": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrType: { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "elastic_inference_accelerator": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrType: { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "enclave_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "hibernation_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configured": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "iam_instance_profile": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrName: { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "image_id": { + Type: schema.TypeString, + Optional: true, + }, + "instance_initiated_shutdown_behavior": { + Type: schema.TypeString, + Optional: true, + }, + "instance_market_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "market_type": { + Type: schema.TypeString, + Optional: true, + }, + "spot_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "block_duration_minutes": { + Type: schema.TypeInt, + Optional: true, + }, + "instance_interruption_behavior": { + Type: schema.TypeString, + Optional: true, + }, + "max_price": { + Type: schema.TypeString, + Optional: true, + }, + "spot_instance_type": { + Type: schema.TypeString, + Optional: true, + }, + "valid_until": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "instance_requirements": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "accelerator_manufacturers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "accelerator_names": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "accelerator_total_memory_mib": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "accelerator_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allowed_instance_types": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 400, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "bare_metal": { + Type: schema.TypeString, + Optional: true, + }, + "baseline_ebs_bandwidth_mbps": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "burstable_performance": { + Type: schema.TypeString, + Optional: true, + }, + "cpu_manufacturers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "excluded_instance_types": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 400, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "instance_generations": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "local_storage": { + Type: schema.TypeString, + Optional: true, + }, + "local_storage_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Optional: true, + }, + "memory_gib_per_vcpu": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeFloat, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + "memory_mib": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "network_bandwidth_gbps": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeFloat, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + "network_interface_count": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "on_demand_max_price_percentage_over_lowest_price": { + Type: schema.TypeInt, + Optional: true, + }, + "require_hibernate_support": { + Type: schema.TypeBool, + Optional: true, + }, + "spot_max_price_percentage_over_lowest_price": { + Type: schema.TypeInt, + Optional: true, + }, + "total_local_storage_gb": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeFloat, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + "vcpu_count": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrMin: { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + }, + }, + }, + names.AttrInstanceType: { + Type: schema.TypeString, + Optional: true, + }, + "kernel_id": { + Type: schema.TypeString, + Optional: true, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + }, + "latest_version": { + Type: schema.TypeInt, + Computed: true, + }, + "license_specification": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "license_configuration_arn": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "maintenance_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_recovery": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "metadata_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_endpoint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "http_protocol_ipv6": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "http_put_response_hop_limit": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "http_tokens": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "instance_metadata_tags": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "monitoring": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + names.AttrName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + names.AttrNamePrefix: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "network_interfaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "associate_carrier_ip_address": { + Type: nullable.TypeNullableBool, + Optional: true, + }, + "associate_public_ip_address": { + Type: nullable.TypeNullableBool, + Optional: true, + }, + "connection_tracking_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tcp_established_timeout": { + Type: schema.TypeInt, + Optional: true, + }, + "udp_stream_timeout": { + Type: schema.TypeInt, + Optional: true, + }, + "udp_timeout": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + names.AttrDeleteOnTermination: { + Type: nullable.TypeNullableBool, + Optional: true, + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + "device_index": { + Type: schema.TypeInt, + Optional: true, + }, + "ena_srd_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ena_srd_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "ena_srd_udp_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ena_srd_udp_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "interface_type": { + Type: schema.TypeString, + Optional: true, + }, + "ipv4_address_count": { + Type: schema.TypeInt, + Optional: true, + }, + "ipv4_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ipv4_prefix_count": { + Type: schema.TypeInt, + Optional: true, + }, + "ipv4_prefixes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ipv6_address_count": { + Type: schema.TypeInt, + Optional: true, + }, + "ipv6_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "ipv6_prefix_count": { + Type: schema.TypeInt, + Optional: true, + }, + "ipv6_prefixes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "network_card_index": { + Type: schema.TypeInt, + Optional: true, + }, + names.AttrNetworkInterfaceID: { + Type: schema.TypeString, + Optional: true, + }, + "primary_ipv6": { + Type: nullable.TypeNullableBool, + Optional: true, + }, + "private_ip_address": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrSecurityGroups: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + names.AttrSubnetID: { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "placement": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "affinity": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrAvailabilityZone: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrGroupName: { + Type: schema.TypeString, + Optional: true, + }, + "host_id": { + Type: schema.TypeString, + Optional: true, + }, + "host_resource_group_arn": { + Type: schema.TypeString, + Optional: true, + }, + "partition_number": { + Type: schema.TypeInt, + Optional: true, + }, + "spread_domain": { + Type: schema.TypeString, + Optional: true, + }, + "tenancy": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "private_dns_name_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_resource_name_dns_aaaa_record": { + Type: schema.TypeBool, + Optional: true, + }, + "enable_resource_name_dns_a_record": { + Type: schema.TypeBool, + Optional: true, + }, + "hostname_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ram_disk_id": { + Type: schema.TypeString, + Optional: true, + }, + "security_group_names": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tag_specifications": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrResourceType: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrTags: tftags.TagsSchema(), + }, + }, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "update_default_version": { + Type: schema.TypeBool, + Optional: true, + }, + "user_data": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrVPCSecurityGroupIDs: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func launchTemplateStateUpgradeV0(_ context.Context, rawState map[string]any, meta any) (map[string]any, error) { + if rawState == nil { + rawState = map[string]any{} + } + + maps.DeleteFunc(rawState, func(key string, _ any) bool { + return strings.HasPrefix(key, "elastic_gpu_specifications.") || strings.HasPrefix(key, "elastic_inference_accelerator.") + }) + + return rawState, nil +} diff --git a/internal/service/ec2/ec2_launch_template_migrate_test.go b/internal/service/ec2/ec2_launch_template_migrate_test.go new file mode 100644 index 000000000000..3eee89e17cc1 --- /dev/null +++ b/internal/service/ec2/ec2_launch_template_migrate_test.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestLaunchTemplateStateUpgradeV0(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + rawState map[string]any + expected map[string]any + }{ + { + name: "empty rawState", + rawState: nil, + expected: map[string]any{}, + }, + { + name: "no elastic_gpu_specifications or elastic_inference_accelerator", + rawState: map[string]any{ + names.AttrName: "test", + }, + expected: map[string]any{ + names.AttrName: "test", + }, + }, + { + name: "with empty elastic_gpu_specifications", + rawState: map[string]any{ + names.AttrName: "test", + "elastic_gpu_specifications.#": "0", + }, + expected: map[string]any{ + names.AttrName: "test", + }, + }, + { + name: "with empty elastic_inference_accelerator", + rawState: map[string]any{ + names.AttrName: "test", + "elastic_inference_accelerator.#": "0", + }, + expected: map[string]any{ + names.AttrName: "test", + }, + }, + { + name: "with elastic_gpu_specifications and elastic_inference_accelerator", + rawState: map[string]any{ + names.AttrName: "test", + "elastic_gpu_specifications.#": "1", + "elastic_gpu_specifications.0.type": "test1", + "elastic_inference_accelerator.#": "2", + "elastic_inference_accelerator.0.type": "test2", + "elastic_inference_accelerator.1.type": "test3", + }, + expected: map[string]any{ + names.AttrName: "test", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result, err := tfec2.LaunchTemplateStateUpgradeV0(t.Context(), tt.rawState, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if diff := cmp.Diff(tt.expected, result); diff != "" { + t.Errorf("unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/internal/service/ec2/ec2_launch_template_test.go b/internal/service/ec2/ec2_launch_template_test.go index c2f1aee4e916..91d41d01f323 100644 --- a/internal/service/ec2/ec2_launch_template_test.go +++ b/internal/service/ec2/ec2_launch_template_test.go @@ -13,8 +13,13 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -1192,6 +1197,75 @@ func TestAccEC2LaunchTemplate_associateCarrierIPAddress(t *testing.T) { }) } +func TestAccEC2LaunchTemplate_Placement_groupID(t *testing.T) { + ctx := acctest.Context(t) + var template awstypes.LaunchTemplate + resourceName := "aws_launch_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLaunchTemplateConfig_placementGroupID(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "placement.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "placement.0.group_id"), + resource.TestCheckResourceAttr(resourceName, "placement.0.group_name", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEC2LaunchTemplate_Placement_groupNameToGroupID(t *testing.T) { + ctx := acctest.Context(t) + var template awstypes.LaunchTemplate + resourceName := "aws_launch_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLaunchTemplateConfig_placementGroupName(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "placement.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "placement.0.group_name"), + resource.TestCheckResourceAttr(resourceName, "placement.0.group_id", ""), + ), + }, + { + Config: testAccLaunchTemplateConfig_placementGroupID(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "placement.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "placement.0.group_id"), + resource.TestCheckResourceAttr(resourceName, "placement.0.group_name", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccEC2LaunchTemplate_Placement_hostResourceGroupARN(t *testing.T) { ctx := acctest.Context(t) var template awstypes.LaunchTemplate @@ -3294,6 +3368,123 @@ func TestAccEC2LaunchTemplate_updateDefaultVersion(t *testing.T) { }) } +func TestAccEC2LaunchTemplate_upgradeFromV5(t *testing.T) { + ctx := acctest.Context(t) + var template awstypes.LaunchTemplate + resourceName := "aws_launch_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccLaunchTemplateConfig_name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("elastic_gpu_specifications"), knownvalue.ListSizeExact(0)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("elastic_inference_accelerator"), knownvalue.ListSizeExact(0)), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccLaunchTemplateConfig_name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New("elastic_gpu_specifications")), + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New("elastic_inference_accelerator")), + }, + }, + }, + }) +} + +func TestAccEC2LaunchTemplate_upgradeFromV5PlanRefreshFalse(t *testing.T) { + ctx := acctest.Context(t) + var template awstypes.LaunchTemplate + resourceName := "aws_launch_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccLaunchTemplateConfig_name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("elastic_gpu_specifications"), knownvalue.ListSizeExact(0)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("elastic_inference_accelerator"), knownvalue.ListSizeExact(0)), + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrRegion)), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccLaunchTemplateConfig_name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New("elastic_gpu_specifications")), + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New("elastic_inference_accelerator")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }) +} + func testAccCheckLaunchTemplateExists(ctx context.Context, n string, v *awstypes.LaunchTemplate) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -4119,6 +4310,40 @@ resource "aws_launch_template" "test" { `, rName) } +func testAccLaunchTemplateConfig_placementGroupID(rName string) string { + return fmt.Sprintf(` +resource "aws_placement_group" "test" { + name = %[1]q + strategy = "cluster" +} + +resource "aws_launch_template" "test" { + name = %[1]q + + placement { + group_id = aws_placement_group.test.placement_group_id + } +} +`, rName) +} + +func testAccLaunchTemplateConfig_placementGroupName(rName string) string { + return fmt.Sprintf(` +resource "aws_placement_group" "test" { + name = %[1]q + strategy = "cluster" +} + +resource "aws_launch_template" "test" { + name = %[1]q + + placement { + group_name = aws_placement_group.test.name + } +} +`, rName) +} + func testAccLaunchTemplateConfig_asgBasic(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), diff --git a/internal/service/ec2/ec2_placement_group.go b/internal/service/ec2/ec2_placement_group.go index d62308221e26..2b9928c705c3 100644 --- a/internal/service/ec2/ec2_placement_group.go +++ b/internal/service/ec2/ec2_placement_group.go @@ -9,7 +9,6 @@ import ( "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -119,7 +118,8 @@ func resourcePlacementGroupCreate(ctx context.Context, d *schema.ResourceData, m func resourcePlacementGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) pg, err := findPlacementGroupByName(ctx, conn, d.Id()) @@ -133,14 +133,7 @@ func resourcePlacementGroupRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EC2 Placement Group (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("placement-group/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, placementGroupARN(ctx, c, d.Id())) d.Set(names.AttrName, pg.GroupName) d.Set("partition_count", pg.PartitionCount) d.Set("placement_group_id", pg.GroupId) @@ -202,3 +195,6 @@ func resourcePlacementGroupCustomizeDiff(_ context.Context, diff *schema.Resourc return nil } +func placementGroupARN(ctx context.Context, c *conns.AWSClient, groupName string) string { + return c.RegionalARN(ctx, names.EC2, "placement-group/"+groupName) +} diff --git a/internal/service/ec2/ec2_serial_console_access_data_source_test.go b/internal/service/ec2/ec2_serial_console_access_data_source_test.go index 55e6887434ed..59ca7cd25cdc 100644 --- a/internal/service/ec2/ec2_serial_console_access_data_source_test.go +++ b/internal/service/ec2/ec2_serial_console_access_data_source_test.go @@ -18,9 +18,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccEC2SerialConsoleAccessDataSource_basic(t *testing.T) { +func testAccEC2SerialConsoleAccessDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - resource.ParallelTest(t, resource.TestCase{ + + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -51,7 +52,7 @@ func testAccCheckSerialConsoleAccessDataSource(ctx context.Context, n string) re input := ec2.GetSerialConsoleAccessStatusInput{} actual, err := conn.GetSerialConsoleAccessStatus(ctx, &input) if err != nil { - return fmt.Errorf("Error reading serial console access toggle: %q", err) + return err } attr, _ := strconv.ParseBool(rs.Primary.Attributes[names.AttrEnabled]) diff --git a/internal/service/ec2/ec2_serial_console_access_identity_gen_test.go b/internal/service/ec2/ec2_serial_console_access_identity_gen_test.go index 79e088439af6..cc61b3041477 100644 --- a/internal/service/ec2/ec2_serial_console_access_identity_gen_test.go +++ b/internal/service/ec2/ec2_serial_console_access_identity_gen_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -21,8 +22,9 @@ func testAccEC2SerialConsoleAccess_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccEC2SerialConsoleAccess_Identity_Basic, - "ExistingResource": testAccEC2SerialConsoleAccess_Identity_ExistingResource, + acctest.CtBasic: testAccEC2SerialConsoleAccess_Identity_Basic, + "ExistingResource": testAccEC2SerialConsoleAccess_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccEC2SerialConsoleAccess_Identity_ExistingResource_NoRefresh_NoChange, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -30,9 +32,10 @@ func testAccEC2SerialConsoleAccess_IdentitySerial(t *testing.T) { func testAccEC2SerialConsoleAccess_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_ec2_serial_console_access.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -93,3 +96,104 @@ func testAccEC2SerialConsoleAccess_Identity_Basic(t *testing.T) { }, }) } + +func testAccEC2SerialConsoleAccess_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ec2_serial_console_access.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSerialConsoleAccessDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SerialConsoleAccess/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/SerialConsoleAccess/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SerialConsoleAccess/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + }, + }, + }, + }) +} + +func testAccEC2SerialConsoleAccess_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ec2_serial_console_access.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSerialConsoleAccessDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SerialConsoleAccess/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SerialConsoleAccess/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/ec2/ec2_serial_console_access_test.go b/internal/service/ec2/ec2_serial_console_access_test.go index 16c931e8c279..4e50814d5950 100644 --- a/internal/service/ec2/ec2_serial_console_access_test.go +++ b/internal/service/ec2/ec2_serial_console_access_test.go @@ -11,14 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,12 +20,17 @@ import ( func TestAccEC2SerialConsoleAccess_serial(t *testing.T) { t.Parallel() - testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccEC2SerialConsoleAccess_basic, - "Identity": testAccEC2SerialConsoleAccess_IdentitySerial, + testCases := map[string]map[string]func(t *testing.T){ + "Resource": { + acctest.CtBasic: testAccEC2SerialConsoleAccess_basic, + "Identity": testAccEC2SerialConsoleAccess_IdentitySerial, + }, + "DataSource": { + acctest.CtBasic: testAccEC2SerialConsoleAccessDataSource_basic, + }, } - acctest.RunSerialTests1Level(t, testCases, 0) + acctest.RunSerialTests2Levels(t, testCases, 0) } func testAccEC2SerialConsoleAccess_basic(t *testing.T) { @@ -67,82 +66,6 @@ func testAccEC2SerialConsoleAccess_basic(t *testing.T) { }) } -func testAccEC2SerialConsoleAccess_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_ec2_serial_console_access.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), - CheckDestroy: testAccCheckSerialConsoleAccessDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccSerialConsoleAccessConfig_basic(true), - Check: resource.ComposeTestCheckFunc( - testAccCheckSerialConsoleAccess(ctx, resourceName, true), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccSerialConsoleAccessConfig_basic(true), - Check: resource.ComposeTestCheckFunc( - testAccCheckSerialConsoleAccess(ctx, resourceName, true), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccSerialConsoleAccessConfig_basic(true), - Check: resource.ComposeTestCheckFunc( - testAccCheckSerialConsoleAccess(ctx, resourceName, true), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - }), - }, - }, - }, - }) -} - func testAccCheckSerialConsoleAccessDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) diff --git a/internal/service/ec2/ec2_spot_fleet_request.go b/internal/service/ec2/ec2_spot_fleet_request.go index dc18389932e9..9519d9fdbe67 100644 --- a/internal/service/ec2/ec2_spot_fleet_request.go +++ b/internal/service/ec2/ec2_spot_fleet_request.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "log" - "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -354,9 +353,10 @@ func resourceSpotFleetRequest() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "weighted_capacity": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: nullable.TypeNullableFloat, + Optional: true, + ForceNew: true, + ValidateFunc: nullable.ValidateTypeStringNullableFloat, }, }, }, @@ -998,7 +998,7 @@ func resourceSpotFleetRequestCreate(ctx context.Context, d *schema.ResourceData, log.Printf("[DEBUG] Creating EC2 Spot Fleet Request: %s", d.Id()) outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RequestSpotFleet(ctx, &input) }, errCodeInvalidSpotFleetRequestConfig, "SpotFleetRequestConfig.IamFleetRole", @@ -1186,7 +1186,7 @@ func resourceSpotFleetRequestDelete(ctx context.Context, d *schema.ResourceData, return diags } - _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { input := ec2.DescribeSpotFleetInstancesInput{ SpotFleetRequestId: aws.String(d.Id()), } @@ -1268,9 +1268,10 @@ func buildSpotFleetLaunchSpecification(ctx context.Context, d map[string]any, me opts.KeyName = aws.String(v.(string)) } - if v, ok := d["weighted_capacity"]; ok && v != "" { - wc, _ := strconv.ParseFloat(v.(string), 64) - opts.WeightedCapacity = aws.Float64(wc) + if v, ok := d["weighted_capacity"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + opts.WeightedCapacity = aws.Float64(v) + } } var securityGroupIds []string @@ -1928,7 +1929,7 @@ func launchSpecToMap(ctx context.Context, l awstypes.SpotFleetLaunchSpecificatio m[names.AttrVPCSecurityGroupIDs] = securityGroupIds if l.WeightedCapacity != nil { - m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64) + m["weighted_capacity"] = flex.Float64ToStringValue(l.WeightedCapacity) } if l.TagSpecifications != nil { diff --git a/internal/service/ec2/ec2_spot_instance_request.go b/internal/service/ec2/ec2_spot_instance_request.go index 45042f5d4b0d..b7d9e1875c2a 100644 --- a/internal/service/ec2/ec2_spot_instance_request.go +++ b/internal/service/ec2/ec2_spot_instance_request.go @@ -36,7 +36,11 @@ func resourceSpotInstanceRequest() *schema.Resource { DeleteWithoutTimeout: resourceSpotInstanceRequestDelete, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + rd.Set(names.AttrForceDestroy, false) + + return []*schema.ResourceData{rd}, nil + }, }, Timeouts: &schema.ResourceTimeout{ @@ -81,6 +85,26 @@ func resourceSpotInstanceRequest() *schema.Resource { Optional: true, ForceNew: true, } + s["network_interface"].Elem.(*schema.Resource).Schema["network_card_index"] = &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + } + s["primary_network_interface"] = &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDeleteOnTermination: { + Type: schema.TypeBool, + Computed: true, + }, + names.AttrNetworkInterfaceID: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + } s["spot_bid_status"] = &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -191,7 +215,7 @@ func resourceSpotInstanceRequestCreate(ctx context.Context, d *schema.ResourceDa } outputRaw, err := tfresource.RetryWhen(ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RequestSpotInstances(ctx, &input) }, func(err error) (bool, error) { @@ -229,7 +253,7 @@ func resourceSpotInstanceRequestRead(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + request, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.SpotInstanceRequest, error) { return findSpotInstanceRequestByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -243,8 +267,6 @@ func resourceSpotInstanceRequestRead(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "reading EC2 Spot Instance Request (%s): %s", d.Id(), err) } - request := outputRaw.(*awstypes.SpotInstanceRequest) - d.Set("spot_bid_status", request.Status.Code) // Instance ID is not set if the request is still pending if request.InstanceId != nil { @@ -336,7 +358,7 @@ func readInstance(ctx context.Context, d *schema.ResourceData, meta any) diag.Di "host": *instance.PrivateIpAddress, }) } - if err := readBlockDevices(ctx, d, meta, instance, false); err != nil { + if err := readBlockDevices(ctx, d, meta.(*conns.AWSClient), instance, false); err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -349,6 +371,14 @@ func readInstance(ctx context.Context, d *schema.ResourceData, meta any) diag.Di d.Set("associate_public_ip_address", ni.Association != nil) d.Set("ipv6_address_count", len(ni.Ipv6Addresses)) + pni := map[string]any{ + names.AttrNetworkInterfaceID: aws.ToString(ni.NetworkInterfaceId), + names.AttrDeleteOnTermination: aws.ToBool(ni.Attachment.DeleteOnTermination), + } + if err := d.Set("primary_network_interface", []any{pni}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting primary_network_interface for AWS Spot Instance (%s): %s", d.Id(), err) + } + for _, address := range ni.Ipv6Addresses { ipv6Addresses = append(ipv6Addresses, *address.Ipv6Address) } diff --git a/internal/service/ec2/ec2_spot_instance_request_test.go b/internal/service/ec2/ec2_spot_instance_request_test.go index c0cf51fc427f..9048a6c9d03d 100644 --- a/internal/service/ec2/ec2_spot_instance_request_test.go +++ b/internal/service/ec2/ec2_spot_instance_request_test.go @@ -9,11 +9,16 @@ import ( "testing" "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" @@ -43,6 +48,17 @@ func TestAccEC2SpotInstanceRequest_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "instance_interruption_behavior", "terminate"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_interface"), knownvalue.SetExact([]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface_id"), knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("primary_network_interface"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDeleteOnTermination: knownvalue.Bool(true), + names.AttrNetworkInterfaceID: knownvalue.StringRegexp(regexache.MustCompile(`^eni-[0-9a-f]+$`)), + }), + })), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("primary_network_interface").AtSliceIndex(0).AtMapKey(names.AttrNetworkInterfaceID), resourceName, tfjsonpath.New("primary_network_interface_id"), compare.ValuesSame()), + }, }, { ResourceName: resourceName, @@ -351,6 +367,38 @@ func TestAccEC2SpotInstanceRequest_networkInterfaceAttributes(t *testing.T) { }) } +func TestAccEC2SpotInstanceRequest_primaryNetworkInterface(t *testing.T) { + ctx := acctest.Context(t) + var sir awstypes.SpotInstanceRequest + resourceName := "aws_spot_instance_request.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSpotInstanceRequestDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSpotInstanceRequestConfig_primaryNetworkInterface(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSpotInstanceRequestExists(ctx, resourceName, &sir), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "network_interface.*", map[string]string{ + "device_index": "0", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface", "user_data_replace_on_change", "wait_for_fulfillment"}, + }, + }, + }) +} + func TestAccEC2SpotInstanceRequest_getPasswordData(t *testing.T) { ctx := acctest.Context(t) var sir awstypes.SpotInstanceRequest @@ -857,34 +905,16 @@ resource "aws_ec2_tag" "test" { func testAccSpotInstanceRequestConfig_vpc(rName string) string { return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), + acctest.ConfigVPCWithSubnets(rName, 1), acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), acctest.AvailableEC2InstanceTypeForRegion("t3.micro", "t2.micro"), fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = "10.1.1.0/24" - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - resource "aws_spot_instance_request" "test" { ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type spot_price = "0.05" wait_for_fulfillment = true - subnet_id = aws_subnet.test.id + subnet_id = aws_subnet.test[0].id tags = { Name = %[1]q @@ -956,6 +986,44 @@ resource "aws_ec2_tag" "test" { `, rName)) } +func testAccSpotInstanceRequestConfig_primaryNetworkInterface(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 1), + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.AvailableEC2InstanceTypeForRegion("t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_spot_instance_request" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + spot_price = "0.05" + wait_for_fulfillment = true + + network_interface { + network_interface_id = aws_network_interface.test.id + device_index = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test[0].id + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_tag" "test" { + resource_id = aws_spot_instance_request.test.spot_instance_id + key = "Name" + value = %[1]q +} +`, rName)) +} + func testAccSpotInstanceRequestConfig_getPasswordData(rName, publicKey string) string { return acctest.ConfigCompose( testAccLatestWindowsServer2016CoreAMIConfig(), diff --git a/internal/service/ec2/ec2_stop_instance_action.go b/internal/service/ec2/ec2_stop_instance_action.go new file mode 100644 index 000000000000..ae234daac1ac --- /dev/null +++ b/internal/service/ec2/ec2_stop_instance_action.go @@ -0,0 +1,251 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/actionwait" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// stopInstancePollInterval defines polling cadence for stop instance action. +const stopInstancePollInterval = 10 * time.Second + +// @Action(aws_ec2_stop_instance, name="Stop Instance") +func newStopInstanceAction(_ context.Context) (action.ActionWithConfigure, error) { + return &stopInstanceAction{}, nil +} + +var ( + _ action.Action = (*stopInstanceAction)(nil) +) + +type stopInstanceAction struct { + framework.ActionWithModel[stopInstanceModel] +} + +type stopInstanceModel struct { + framework.WithRegionModel + InstanceID types.String `tfsdk:"instance_id"` + Force types.Bool `tfsdk:"force"` + Timeout types.Int64 `tfsdk:"timeout"` +} + +func (a *stopInstanceAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Stops an EC2 instance. This action will gracefully stop the instance and wait for it to reach the stopped state.", + Attributes: map[string]schema.Attribute{ + names.AttrInstanceID: schema.StringAttribute{ + Description: "The ID of the EC2 instance to stop", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexache.MustCompile(`^i-[0-9a-f]{8,17}$`), + "must be a valid EC2 instance ID (e.g., i-1234567890abcdef0)", + ), + }, + }, + "force": schema.BoolAttribute{ + Description: "Forces the instance to stop. The instance does not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.", + Optional: true, + }, + names.AttrTimeout: schema.Int64Attribute{ + Description: "Timeout in seconds to wait for the instance to stop (default: 600)", + Optional: true, + Validators: []validator.Int64{ + int64validator.AtLeast(30), + int64validator.AtMost(3600), + }, + }, + }, + } +} + +func (a *stopInstanceAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config stopInstanceModel + + // Parse configuration + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + // Get AWS client + conn := a.Meta().EC2Client(ctx) + + instanceID := config.InstanceID.ValueString() + force := config.Force.ValueBool() + + // Set default timeout if not provided + timeout := 600 * time.Second + if !config.Timeout.IsNull() { + timeout = time.Duration(config.Timeout.ValueInt64()) * time.Second + } + + tflog.Info(ctx, "Starting EC2 stop instance action", map[string]any{ + names.AttrInstanceID: instanceID, + "force": force, + names.AttrTimeout: timeout.String(), + }) + + // Send initial progress update + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Starting stop operation for EC2 instance %s...", instanceID), + }) + + // Check current instance state first + instance, err := findInstanceByID(ctx, conn, instanceID) + if err != nil { + if tfawserr.ErrCodeEquals(err, errCodeInvalidInstanceIDNotFound) { + resp.Diagnostics.AddError( + "Instance Not Found", + fmt.Sprintf("EC2 instance %s was not found", instanceID), + ) + return + } + resp.Diagnostics.AddError( + "Failed to Describe Instance", + fmt.Sprintf("Could not describe EC2 instance %s: %s", instanceID, err), + ) + return + } + + currentState := string(instance.State.Name) + tflog.Debug(ctx, "Current instance state", map[string]any{ + names.AttrInstanceID: instanceID, + names.AttrState: currentState, + }) + + // Check if instance is already stopped + if instance.State.Name == awstypes.InstanceStateNameStopped { + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s is already stopped", instanceID), + }) + tflog.Info(ctx, "Instance already stopped", map[string]any{ + names.AttrInstanceID: instanceID, + }) + return + } + + // Check if instance is in a state that can be stopped + if !canStopInstance(instance.State.Name) { + resp.Diagnostics.AddError( + "Cannot Stop Instance", + fmt.Sprintf("EC2 instance %s is in state '%s' and cannot be stopped. Instance must be in 'running' or 'stopping' state.", instanceID, currentState), + ) + return + } + + // If instance is already stopping, just wait for it + if instance.State.Name == awstypes.InstanceStateNameStopping { + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s is already stopping, waiting for completion...", instanceID), + }) + } else { + // Stop the instance + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Sending stop command to EC2 instance %s...", instanceID), + }) + + input := ec2.StopInstancesInput{ + Force: aws.Bool(force), + InstanceIds: []string{instanceID}, + } + + _, err = conn.StopInstances(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Stop Instance", + fmt.Sprintf("Could not stop EC2 instance %s: %s", instanceID, err), + ) + return + } + + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Stop command sent to EC2 instance %s, waiting for instance to stop...", instanceID), + }) + } + + // Wait for instance to stop with periodic progress updates using actionwait + // Use fixed interval since EC2 instance state transitions are predictable and + // relatively quick - consistent polling every 10s is optimal for this operation + _, err = actionwait.WaitForStatus(ctx, func(ctx context.Context) (actionwait.FetchResult[struct{}], error) { + instance, derr := findInstanceByID(ctx, conn, instanceID) + if derr != nil { + return actionwait.FetchResult[struct{}]{}, fmt.Errorf("describing instance: %w", derr) + } + state := string(instance.State.Name) + return actionwait.FetchResult[struct{}]{Status: actionwait.Status(state)}, nil + }, actionwait.Options[struct{}]{ + Timeout: timeout, + Interval: actionwait.FixedInterval(stopInstancePollInterval), + ProgressInterval: 30 * time.Second, + SuccessStates: []actionwait.Status{actionwait.Status(awstypes.InstanceStateNameStopped)}, + TransitionalStates: []actionwait.Status{ + actionwait.Status(awstypes.InstanceStateNameRunning), + actionwait.Status(awstypes.InstanceStateNameStopping), + actionwait.Status(awstypes.InstanceStateNameShuttingDown), + }, + ProgressSink: func(fr actionwait.FetchResult[any], meta actionwait.ProgressMeta) { + resp.SendProgress(action.InvokeProgressEvent{Message: fmt.Sprintf("EC2 instance %s is currently in state '%s', continuing to wait for 'stopped'...", instanceID, fr.Status)}) + }, + }) + if err != nil { + var timeoutErr *actionwait.TimeoutError + var unexpectedErr *actionwait.UnexpectedStateError + if errors.As(err, &timeoutErr) { + resp.Diagnostics.AddError( + "Timeout Waiting for Instance to Stop", + fmt.Sprintf("EC2 instance %s did not stop within %s: %s", instanceID, timeout, err), + ) + } else if errors.As(err, &unexpectedErr) { + resp.Diagnostics.AddError( + "Unexpected Instance State", + fmt.Sprintf("EC2 instance %s entered unexpected state while stopping: %s", instanceID, err), + ) + } else { + resp.Diagnostics.AddError( + "Error Waiting for Instance to Stop", + fmt.Sprintf("Error while waiting for EC2 instance %s to stop: %s", instanceID, err), + ) + } + return + } + + // Final success message + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s has been successfully stopped", instanceID), + }) + + tflog.Info(ctx, "EC2 stop instance action completed successfully", map[string]any{ + names.AttrInstanceID: instanceID, + }) +} + +// canStopInstance checks if an instance can be stopped based on its current state +func canStopInstance(state awstypes.InstanceStateName) bool { + switch state { + case awstypes.InstanceStateNameRunning, awstypes.InstanceStateNameStopping: + return true + default: + return false + } +} diff --git a/internal/service/ec2/ec2_stop_instance_action_test.go b/internal/service/ec2/ec2_stop_instance_action_test.go new file mode 100644 index 000000000000..b1d345f7d1be --- /dev/null +++ b/internal/service/ec2/ec2_stop_instance_action_test.go @@ -0,0 +1,338 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "context" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEC2StopInstanceAction_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Instance + resourceName := "aws_instance.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.EC2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + Steps: []resource.TestStep{ + { + Config: testAccStopInstanceActionConfig_force(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExistsLocal(ctx, resourceName, &v), + testAccCheckInstanceState(ctx, resourceName, awstypes.InstanceStateNameRunning), + ), + }, + { + PreConfig: func() { + if v.InstanceId == nil { + t.Fatal("Instance ID is nil") + } + + if err := invokeStopInstanceAction(ctx, t, *v.InstanceId, true); err != nil { + t.Fatalf("Failed to invoke stop instance action: %v", err) + } + }, + Config: testAccStopInstanceActionConfig_force(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceState(ctx, resourceName, awstypes.InstanceStateNameStopped), + ), + }, + }, + }) +} + +func TestAccEC2StopInstanceAction_trigger(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Instance + resourceName := "aws_instance.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.EC2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + Steps: []resource.TestStep{ + { + Config: testAccStopInstanceActionConfig_trigger(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExistsLocal(ctx, resourceName, &v), + testAccCheckInstanceState(ctx, resourceName, awstypes.InstanceStateNameStopped), + ), + }, + }, + }) +} + +func testAccCheckInstanceExistsLocal(ctx context.Context, n string, v *awstypes.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No EC2 Instance ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + instance, err := tfec2.FindInstanceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return err + } + + *v = *instance + + return nil + } +} + +func testAccCheckInstanceState(ctx context.Context, n string, expectedState awstypes.InstanceStateName) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No EC2 Instance ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + instance, err := tfec2.FindInstanceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return err + } + + if instance.State.Name != expectedState { + return fmt.Errorf("Expected instance state %s, got %s", expectedState, instance.State.Name) + } + + return nil + } +} + +func testAccStopInstanceActionConfig_force(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.ConfigAvailableAZsNoOptIn(), + acctest.AvailableEC2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + + tags = { + Name = %[1]q + } +} + +action "aws_ec2_stop_instance" "test" { + config { + instance_id = aws_instance.test.id + force = true + } +} +`, rName)) +} + +func testAccStopInstanceActionConfig_trigger(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.ConfigAvailableAZsNoOptIn(), + acctest.AvailableEC2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + + tags = { + Name = %[1]q + } +} + +action "aws_ec2_stop_instance" "test" { + config { + instance_id = aws_instance.test.id + force = true + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_ec2_stop_instance.test] + } + } +} +`, rName)) +} + +// Step 1: Get the AWS provider as a ProviderServerWithActions +func providerWithActions(ctx context.Context, t *testing.T) tfprotov5.ProviderServerWithActions { //nolint:staticcheck // SA1019: Working in alpha situation + t.Helper() + + factories := acctest.ProtoV5ProviderFactories + providerFactory, exists := factories["aws"] + if !exists { + t.Fatal("AWS provider factory not found in ProtoV5ProviderFactories") + } + + providerServer, err := providerFactory() + if err != nil { + t.Fatalf("Failed to create provider server: %v", err) + } + + providerWithActions, ok := providerServer.(tfprotov5.ProviderServerWithActions) //nolint:staticcheck // SA1019: Working in alpha situation + if !ok { + t.Fatal("Provider does not implement ProviderServerWithActions") + } + + schemaResp, err := providerWithActions.GetProviderSchema(ctx, &tfprotov5.GetProviderSchemaRequest{}) + if err != nil { + t.Fatalf("Failed to get provider schema: %v", err) + } + + if len(schemaResp.ActionSchemas) == 0 { + t.Fatal("Expected to find action schemas but didn't find any!") + } + + providerConfigValue, err := buildProviderConfiguration(t, schemaResp.Provider) + if err != nil { + t.Fatalf("Failed to build provider configuration: %v", err) + } + + configureResp, err := providerWithActions.ConfigureProvider(ctx, &tfprotov5.ConfigureProviderRequest{ + TerraformVersion: "1.0.0", + Config: providerConfigValue, + }) + if err != nil { + t.Fatalf("Failed to configure provider: %v", err) + } + + if len(configureResp.Diagnostics) > 0 { + var diagMessages []string + for _, diag := range configureResp.Diagnostics { + diagMessages = append(diagMessages, fmt.Sprintf("Severity: %s, Summary: %s, Detail: %s", diag.Severity, diag.Summary, diag.Detail)) + } + t.Fatalf("Provider configuration failed: %v", diagMessages) + } + + return providerWithActions +} + +// buildProviderConfiguration creates a minimal provider configuration from the schema +func buildProviderConfiguration(t *testing.T, providerSchema *tfprotov5.Schema) (*tfprotov5.DynamicValue, error) { + t.Helper() + + providerType := providerSchema.Block.ValueType() + configMap := make(map[string]tftypes.Value) + + if objType, ok := providerType.(tftypes.Object); ok { + for attrName, attrType := range objType.AttributeTypes { + configMap[attrName] = tftypes.NewValue(attrType, nil) + } + } + + configValue, err := tfprotov5.NewDynamicValue( + providerType, + tftypes.NewValue(providerType, configMap), + ) + if err != nil { + return nil, fmt.Errorf("failed to create config: %w", err) + } + + return &configValue, nil +} + +// Step 2: Build action configuration +func buildStopInstanceActionConfig(instanceID string, force bool) (tftypes.Type, map[string]tftypes.Value) { + configType := tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + names.AttrInstanceID: tftypes.String, + "force": tftypes.Bool, + names.AttrTimeout: tftypes.Number, + names.AttrRegion: tftypes.String, + }, + } + + config := map[string]tftypes.Value{ + names.AttrInstanceID: tftypes.NewValue(tftypes.String, instanceID), + "force": tftypes.NewValue(tftypes.Bool, force), + names.AttrTimeout: tftypes.NewValue(tftypes.Number, nil), + names.AttrRegion: tftypes.NewValue(tftypes.String, nil), + } + + return configType, config +} + +// Step 3: Programmatic action invocation +func invokeStopInstanceAction(ctx context.Context, t *testing.T, instanceID string, force bool) error { + t.Helper() + + p := providerWithActions(ctx, t) + configType, configMap := buildStopInstanceActionConfig(instanceID, force) + actionTypeName := "aws_ec2_stop_instance" + + testConfig, err := tfprotov5.NewDynamicValue( + configType, + tftypes.NewValue(configType, configMap), + ) + if err != nil { + return fmt.Errorf("failed to create config: %w", err) + } + + invokeResp, err := p.InvokeAction(ctx, &tfprotov5.InvokeActionRequest{ + ActionType: actionTypeName, + Config: &testConfig, + }) + if err != nil { + return fmt.Errorf("invoke failed: %w", err) + } + + // Process events and check for completion + for event := range invokeResp.Events { + switch eventType := event.Type.(type) { + case tfprotov5.ProgressInvokeActionEventType: + t.Logf("Progress: %s", eventType.Message) + case tfprotov5.CompletedInvokeActionEventType: + return nil + default: + // Handle any other event types or errors + t.Logf("Received event type: %T", eventType) + } + } + + return nil +} diff --git a/internal/service/ec2/exports.go b/internal/service/ec2/exports.go index b6313fd6aaa7..2024cd7c908c 100644 --- a/internal/service/ec2/exports.go +++ b/internal/service/ec2/exports.go @@ -5,31 +5,35 @@ package ec2 // Exports for use in other modules. var ( - CustomFiltersBlock = customFiltersBlock - DeleteNetworkInterface = deleteNetworkInterface - DetachNetworkInterface = detachNetworkInterface - FindImageByID = findImageByID - FindInstanceByID = findInstanceByID - FindIPAMPoolAllocationsByIPAMPoolIDAndResourceID = findIPAMPoolAllocationsByIPAMPoolIDAndResourceID - FindNetworkInterfaces = findNetworkInterfaces - FindNetworkInterfacesByAttachmentInstanceOwnerIDAndDescription = findNetworkInterfacesByAttachmentInstanceOwnerIDAndDescription - FindSecurityGroupByDescriptionAndVPCID = findSecurityGroupByDescriptionAndVPCID - FindSecurityGroupByNameAndVPCID = findSecurityGroupByNameAndVPCID - FindSecurityGroupByNameAndVPCIDAndOwnerID = findSecurityGroupByNameAndVPCIDAndOwnerID - FindSecurityGroups = findSecurityGroups - FindSubnetByID = findSubnetByID - FindVPCByID = findVPCByID - FindVPCEndpointByID = findVPCEndpointByID - NetworkInterfaceDetachedTimeout = networkInterfaceDetachedTimeout - NewCustomFilterListFramework = newCustomFilterListFramework - NewFilter = newFilter - ResourceAMI = resourceAMI - ResourceSecurityGroup = resourceSecurityGroup - ResourceTransitGateway = resourceTransitGateway - ResourceTransitGatewayConnectPeer = resourceTransitGatewayConnectPeer - ResourceVPC = resourceVPC - VPCEndpointCreationTimeout = vpcEndpointCreationTimeout - WaitVPCEndpointAvailable = waitVPCEndpointAvailable + CustomFiltersBlock = customFiltersBlock + DeleteNetworkInterface = deleteNetworkInterface + DetachNetworkInterface = detachNetworkInterface + FindImageByID = findImageByID + FindInstanceByID = findInstanceByID + FindIPAMPoolAllocationsByIPAMPoolIDAndResourceID = findIPAMPoolAllocationsByIPAMPoolIDAndResourceID + FindNetworkInterfaces = findNetworkInterfaces + FindNetworkInterfacesByAttachmentInstanceOwnerIDAndDescription = findNetworkInterfacesByAttachmentInstanceOwnerIDAndDescription + FindSecurityGroupByDescriptionAndVPCID = findSecurityGroupByDescriptionAndVPCID + FindSecurityGroupByNameAndVPCID = findSecurityGroupByNameAndVPCID + FindSecurityGroupByNameAndVPCIDAndOwnerID = findSecurityGroupByNameAndVPCIDAndOwnerID + FindSecurityGroups = findSecurityGroups + FindSubnetByID = findSubnetByID + FindTransitGatewayAttachmentByID = findTransitGatewayAttachmentByID + FindTransitGatewayAttachmentByTransitGatewayIDAndDirectConnectGatewayID = findTransitGatewayAttachmentByTransitGatewayIDAndDirectConnectGatewayID + FindVPCByID = findVPCByID + FindVPCEndpointByID = findVPCEndpointByID + NetworkInterfaceDetachedTimeout = networkInterfaceDetachedTimeout + NewCustomFilterListFramework = newCustomFilterListFramework + NewFilter = newFilter + ResourceAMI = resourceAMI + ResourceSecurityGroup = resourceSecurityGroup + ResourceTransitGateway = resourceTransitGateway + ResourceTransitGatewayConnectPeer = resourceTransitGatewayConnectPeer + ResourceVPC = resourceVPC + VPCEndpointCreationTimeout = vpcEndpointCreationTimeout + WaitTransitGatewayAttachmentAccepted = waitTransitGatewayAttachmentAccepted + WaitTransitGatewayAttachmentDeleted = waitTransitGatewayAttachmentDeleted + WaitVPCEndpointAvailable = waitVPCEndpointAvailable ) type ( diff --git a/internal/service/ec2/exports_test.go b/internal/service/ec2/exports_test.go index 50fd1306e9cb..969489f6f977 100644 --- a/internal/service/ec2/exports_test.go +++ b/internal/service/ec2/exports_test.go @@ -55,6 +55,7 @@ var ( ResourceManagedPrefixList = resourceManagedPrefixList ResourceManagedPrefixListEntry = resourceManagedPrefixListEntry ResourceNATGateway = resourceNATGateway + ResourceNATGatewayEIPAssociation = newNATGatewayEIPAssociationResource ResourceNetworkACL = resourceNetworkACL ResourceNetworkACLAssociation = resourceNetworkACLAssociation ResourceNetworkACLRule = resourceNetworkACLRule @@ -132,170 +133,174 @@ var ( ResourceVerifiedAccessTrustProvider = resourceVerifiedAccessTrustProvider ResourceVolumeAttachment = resourceVolumeAttachment - CheckMostRecentAndMissingFilters = checkMostRecentAndMissingFilters - CustomFiltersSchema = customFiltersSchema - CustomerGatewayConfigurationToTunnelInfo = customerGatewayConfigurationToTunnelInfo - ErrCodeDefaultSubnetAlreadyExistsInAvailabilityZone = errCodeDefaultSubnetAlreadyExistsInAvailabilityZone - ErrCodeInvalidSpotDatafeedNotFound = errCodeInvalidSpotDatafeedNotFound - ExpandIPPerms = expandIPPerms - FindAvailabilityZones = findAvailabilityZones - FindCapacityReservationByID = findCapacityReservationByID - FindCarrierGatewayByID = findCarrierGatewayByID - FindClientVPNAuthorizationRuleByThreePartKey = findClientVPNAuthorizationRuleByThreePartKey - FindClientVPNEndpointByID = findClientVPNEndpointByID - FindClientVPNNetworkAssociationByTwoPartKey = findClientVPNNetworkAssociationByTwoPartKey - FindClientVPNRouteByThreePartKey = findClientVPNRouteByThreePartKey - FindCreateSnapshotCreateVolumePermissionByTwoPartKey = findCreateSnapshotCreateVolumePermissionByTwoPartKey - FindCustomerGatewayByID = findCustomerGatewayByID - FindDefaultCreditSpecificationByInstanceFamily = findDefaultCreditSpecificationByInstanceFamily - FindDHCPOptionsByID = findDHCPOptionsByID - FindEBSVolumeAttachment = findVolumeAttachment - FindEBSVolumeByID = findEBSVolumeByID - FindEIPByAllocationID = findEIPByAllocationID - FindEIPByAssociationID = findEIPByAssociationID - FindEIPDomainNameAttributeByAllocationID = findEIPDomainNameAttributeByAllocationID - FindEgressOnlyInternetGatewayByID = findEgressOnlyInternetGatewayByID - FindFastSnapshotRestoreByTwoPartKey = findFastSnapshotRestoreByTwoPartKey - FindFleetByID = findFleetByID - FindFlowLogByID = findFlowLogByID - FindHostByID = findHostByID - FindIPAMByID = findIPAMByID - FindIPAMPoolAllocationByTwoPartKey = findIPAMPoolAllocationByTwoPartKey - FindIPAMPoolAllocationsForVPC = findIPAMPoolAllocationsForVPC - FindIPAMPoolByID = findIPAMPoolByID - FindIPAMPoolCIDRByTwoPartKey = findIPAMPoolCIDRByTwoPartKey - FindIPAMResourceDiscoveryAssociationByID = findIPAMResourceDiscoveryAssociationByID - FindIPAMResourceDiscoveryByID = findIPAMResourceDiscoveryByID - FindIPAMScopeByID = findIPAMScopeByID - FindImageLaunchPermission = findImageLaunchPermission - FindInstanceConnectEndpointByID = findInstanceConnectEndpointByID - FindInstanceMetadataDefaults = findInstanceMetadataDefaults - FindInstanceStateByID = findInstanceStateByID - FindInternetGateway = findInternetGateway - FindInternetGatewayAttachment = findInternetGatewayAttachment - FindInternetGatewayByID = findInternetGatewayByID - FindKeyPairByName = findKeyPairByName - FindLaunchTemplateByID = findLaunchTemplateByID - FindLocalGatewayRouteByTwoPartKey = findLocalGatewayRouteByTwoPartKey - FindLocalGatewayRouteTableVPCAssociationByID = findLocalGatewayRouteTableVPCAssociationByID - FindMainRouteTableAssociationByID = findMainRouteTableAssociationByID - FindManagedPrefixListByID = findManagedPrefixListByID - FindManagedPrefixListEntryByIDAndCIDR = findManagedPrefixListEntryByIDAndCIDR - FindNATGatewayByID = findNATGatewayByID - FindNetworkACLAssociationByID = findNetworkACLAssociationByID - FindNetworkACLByID = findNetworkACLByID - FindNetworkACLEntryByThreePartKey = findNetworkACLEntryByThreePartKey - FindNetworkInsightsAnalysisByID = findNetworkInsightsAnalysisByID - FindNetworkInsightsPathByID = findNetworkInsightsPathByID - FindNetworkInterfaceByID = findNetworkInterfaceByID - FindNetworkInterfacePermissionByID = findNetworkInterfacePermissionByID - FindNetworkInterfaceSecurityGroup = findNetworkInterfaceSecurityGroup - FindNetworkPerformanceMetricSubscriptionByFourPartKey = findNetworkPerformanceMetricSubscriptionByFourPartKey - FindPlacementGroupByName = findPlacementGroupByName - FindPublicIPv4Pools = findPublicIPv4Pools - FindRouteByIPv4Destination = findRouteByIPv4Destination - FindRouteByIPv6Destination = findRouteByIPv6Destination - FindRouteByPrefixListIDDestination = findRouteByPrefixListIDDestination - FindRouteServerByID = findRouteServerByID - FindRouteServerAssociationByTwoPartKey = findRouteServerAssociationByTwoPartKey - FindRouteServerEndpointByID = findRouteServerEndpointByID - FindRouteServerPeerByID = findRouteServerPeerByID - FindRouteServerPropagationByTwoPartKey = findRouteServerPropagationByTwoPartKey - FindRouteTableAssociationByID = findRouteTableAssociationByID - FindRouteTableByID = findRouteTableByID - FindSecurityGroupByID = findSecurityGroupByID - FindSecurityGroupEgressRuleByID = findSecurityGroupEgressRuleByID - FindSecurityGroupIngressRuleByID = findSecurityGroupIngressRuleByID - FindSecurityGroupVPCAssociationByTwoPartKey = findSecurityGroupVPCAssociationByTwoPartKey - FindSnapshot = findSnapshot - FindSnapshotByID = findSnapshotByID - FindSpotDatafeedSubscription = findSpotDatafeedSubscription - FindSpotFleetRequestByID = findSpotFleetRequestByID - FindSpotFleetRequests = findSpotFleetRequests - FindSpotInstanceRequestByID = findSpotInstanceRequestByID - FindSubnetCIDRReservationBySubnetIDAndReservationID = findSubnetCIDRReservationBySubnetIDAndReservationID - FindSubnets = findSubnets - FindTag = findTag - FindTrafficMirrorFilterByID = findTrafficMirrorFilterByID - FindTrafficMirrorFilterRuleByTwoPartKey = findTrafficMirrorFilterRuleByTwoPartKey - FindTrafficMirrorSessionByID = findTrafficMirrorSessionByID - FindTrafficMirrorTargetByID = findTrafficMirrorTargetByID - FindTransitGatewayByID = findTransitGatewayByID - FindTransitGatewayConnectByID = findTransitGatewayConnectByID - FindTransitGatewayConnectPeerByID = findTransitGatewayConnectPeerByID - FindTransitGatewayMulticastDomainAssociationByThreePartKey = findTransitGatewayMulticastDomainAssociationByThreePartKey - FindTransitGatewayMulticastDomainByID = findTransitGatewayMulticastDomainByID - FindTransitGatewayMulticastGroupMemberByThreePartKey = findTransitGatewayMulticastGroupMemberByThreePartKey - FindTransitGatewayMulticastGroupSourceByThreePartKey = findTransitGatewayMulticastGroupSourceByThreePartKey - FindTransitGatewayPeeringAttachmentByID = findTransitGatewayPeeringAttachmentByID - FindTransitGatewayPolicyTableAssociationByTwoPartKey = findTransitGatewayPolicyTableAssociationByTwoPartKey - FindTransitGatewayPolicyTableByID = findTransitGatewayPolicyTableByID - FindTransitGatewayPrefixListReferenceByTwoPartKey = findTransitGatewayPrefixListReferenceByTwoPartKey - FindTransitGatewayRouteTableAssociationByTwoPartKey = findTransitGatewayRouteTableAssociationByTwoPartKey - FindTransitGatewayRouteTableByID = findTransitGatewayRouteTableByID - FindTransitGatewayRouteTablePropagationByTwoPartKey = findTransitGatewayRouteTablePropagationByTwoPartKey - FindTransitGatewayStaticRoute = findTransitGatewayStaticRoute - FindTransitGatewayVPCAttachmentByID = findTransitGatewayVPCAttachmentByID - FindVPCBlockPublicAccessExclusionByID = findVPCBlockPublicAccessExclusionByID - FindVPCCIDRBlockAssociationByID = findVPCCIDRBlockAssociationByID - FindVPCDHCPOptionsAssociation = findVPCDHCPOptionsAssociation - FindVPCEndpointConnectionByServiceIDAndVPCEndpointID = findVPCEndpointConnectionByServiceIDAndVPCEndpointID - FindVPCEndpointConnectionNotificationByID = findVPCEndpointConnectionNotificationByID - FindVPCEndpointRouteTableAssociationExists = findVPCEndpointRouteTableAssociationExists - FindVPCEndpointSecurityGroupAssociationExists = findVPCEndpointSecurityGroupAssociationExists - FindVPCEndpointServiceConfigurationByID = findVPCEndpointServiceConfigurationByID - FindVPCEndpointServicePermission = findVPCEndpointServicePermission - FindVPCEndpointSubnetAssociationExists = findVPCEndpointSubnetAssociationExists - FindVPCIPv6CIDRBlockAssociationByID = findVPCIPv6CIDRBlockAssociationByID - FindVPCPeeringConnectionByID = findVPCPeeringConnectionByID - FindVPNConnectionByID = findVPNConnectionByID - FindVPNConnectionRouteByTwoPartKey = findVPNConnectionRouteByTwoPartKey - FindVPNGatewayByID = findVPNGatewayByID - FindVPNGatewayRoutePropagationExists = findVPNGatewayRoutePropagationExists - FindVPNGatewayVPCAttachmentByTwoPartKey = findVPNGatewayVPCAttachmentByTwoPartKey - FindVerifiedAccessEndpointByID = findVerifiedAccessEndpointByID - FindVerifiedAccessGroupByID = findVerifiedAccessGroupByID - FindVerifiedAccessInstanceByID = findVerifiedAccessInstanceByID - FindVerifiedAccessInstanceLoggingConfigurationByInstanceID = findVerifiedAccessInstanceLoggingConfigurationByInstanceID - FindVerifiedAccessInstanceTrustProviderAttachmentExists = findVerifiedAccessInstanceTrustProviderAttachmentExists - FindVerifiedAccessTrustProviderByID = findVerifiedAccessTrustProviderByID - FindVolumeAttachmentInstanceByID = findVolumeAttachmentInstanceByID - FlattenNetworkInterfacePrivateIPAddresses = flattenNetworkInterfacePrivateIPAddresses - FlattenSecurityGroups = flattenSecurityGroups - IPAMServicePrincipal = ipamServicePrincipal - InstanceMigrateState = instanceMigrateState - InstanceStateUpgradeV1 = instanceStateUpgradeV1 - InternetGatewayAttachmentParseResourceID = internetGatewayAttachmentParseResourceID - KeyPairMigrateState = keyPairMigrateState - ManagedPrefixListEntryCreateResourceID = managedPrefixListEntryCreateResourceID - ManagedPrefixListEntryParseResourceID = managedPrefixListEntryParseResourceID - MatchRules = matchRules - NetworkACLRuleImportIDSeparator = networkACLRuleImportIDSeparator - NewAttributeFilterList = newAttributeFilterList - NewCustomFilterList = newCustomFilterList - NewTagFilterList = newTagFilterList - OpenSSHPublicKeysEqual = openSSHPublicKeysEqual - ParseInstanceType = parseInstanceType - ProtocolForValue = protocolForValue - ProtocolStateFunc = protocolStateFunc - SecurityGroupCollapseRules = securityGroupCollapseRules - SecurityGroupExpandRules = securityGroupExpandRules - SecurityGroupIPPermGather = securityGroupIPPermGather - SecurityGroupMigrateState = securityGroupMigrateState - SecurityGroupRuleCreateID = securityGroupRuleCreateID - SecurityGroupRuleHash = securityGroupRuleHash - SecurityGroupRuleMigrateState = securityGroupRuleMigrateState - SpotFleetRequestMigrateState = spotFleetRequestMigrateState - StopEBSVolumeAttachmentInstance = stopVolumeAttachmentInstance - StopInstance = stopInstance - SubnetMigrateState = subnetMigrateState - UnsuccessfulItemError = unsuccessfulItemError - UnsuccessfulItemsError = unsuccessfulItemsError - UpdateTags = updateTags - VPCDHCPOptionsAssociationParseResourceID = vpcDHCPOptionsAssociationParseResourceID - VPCMigrateState = vpcMigrateState - VPNGatewayRoutePropagationParseID = vpnGatewayRoutePropagationParseID - WaitVolumeAttachmentCreated = waitVolumeAttachmentCreated + CheckMostRecentAndMissingFilters = checkMostRecentAndMissingFilters + CustomFiltersSchema = customFiltersSchema + CustomerGatewayConfigurationToTunnelInfo = customerGatewayConfigurationToTunnelInfo + DefaultIPv6CIDRBlockAssociation = defaultIPv6CIDRBlockAssociation + ErrCodeDefaultSubnetAlreadyExistsInAvailabilityZone = errCodeDefaultSubnetAlreadyExistsInAvailabilityZone + ErrCodeInvalidSpotDatafeedNotFound = errCodeInvalidSpotDatafeedNotFound + ExpandIPPerms = expandIPPerms + FindAvailabilityZones = findAvailabilityZones + FindCapacityReservationByID = findCapacityReservationByID + FindCarrierGatewayByID = findCarrierGatewayByID + FindClientVPNAuthorizationRuleByThreePartKey = findClientVPNAuthorizationRuleByThreePartKey + FindClientVPNEndpointByID = findClientVPNEndpointByID + FindClientVPNNetworkAssociationByTwoPartKey = findClientVPNNetworkAssociationByTwoPartKey + FindClientVPNRouteByThreePartKey = findClientVPNRouteByThreePartKey + FindCreateSnapshotCreateVolumePermissionByTwoPartKey = findCreateSnapshotCreateVolumePermissionByTwoPartKey + FindCustomerGatewayByID = findCustomerGatewayByID + FindDefaultCreditSpecificationByInstanceFamily = findDefaultCreditSpecificationByInstanceFamily + FindDHCPOptionsByID = findDHCPOptionsByID + FindEBSVolumeAttachment = findVolumeAttachment + FindEBSVolumeByID = findEBSVolumeByID + FindEIPByAllocationID = findEIPByAllocationID + FindEIPByAssociationID = findEIPByAssociationID + FindEIPDomainNameAttributeByAllocationID = findEIPDomainNameAttributeByAllocationID + FindEgressOnlyInternetGatewayByID = findEgressOnlyInternetGatewayByID + FindFastSnapshotRestoreByTwoPartKey = findFastSnapshotRestoreByTwoPartKey + FindFleetByID = findFleetByID + FindFlowLogByID = findFlowLogByID + FindHostByID = findHostByID + FindIPAMByID = findIPAMByID + FindIPAMPoolAllocationByTwoPartKey = findIPAMPoolAllocationByTwoPartKey + FindIPAMPoolAllocationForResource = findIPAMPoolAllocationForResource + FindIPAMPoolByID = findIPAMPoolByID + FindIPAMPoolCIDRByTwoPartKey = findIPAMPoolCIDRByTwoPartKey + FindIPAMResourceDiscoveryAssociationByID = findIPAMResourceDiscoveryAssociationByID + FindIPAMResourceDiscoveryByID = findIPAMResourceDiscoveryByID + FindIPAMScopeByID = findIPAMScopeByID + FindImageLaunchPermission = findImageLaunchPermission + FindInstanceConnectEndpointByID = findInstanceConnectEndpointByID + FindInstanceMetadataDefaults = findInstanceMetadataDefaults + FindInstanceStateByID = findInstanceStateByID + FindInternetGateway = findInternetGateway + FindInternetGatewayAttachment = findInternetGatewayAttachment + FindInternetGatewayByID = findInternetGatewayByID + FindKeyPairByName = findKeyPairByName + FindLaunchTemplateByID = findLaunchTemplateByID + FindLocalGatewayRouteByTwoPartKey = findLocalGatewayRouteByTwoPartKey + FindLocalGatewayRouteTableVPCAssociationByID = findLocalGatewayRouteTableVPCAssociationByID + FindMainRouteTableAssociationByID = findMainRouteTableAssociationByID + FindManagedPrefixListByID = findManagedPrefixListByID + FindManagedPrefixListEntryByIDAndCIDR = findManagedPrefixListEntryByIDAndCIDR + FindNATGatewayByID = findNATGatewayByID + FindNATGatewayAddressByNATGatewayIDAndAllocationIDSucceeded = findNATGatewayAddressByNATGatewayIDAndAllocationIDSucceeded + FindNetworkACLAssociationByID = findNetworkACLAssociationByID + FindNetworkACLByID = findNetworkACLByID + FindNetworkACLEntryByThreePartKey = findNetworkACLEntryByThreePartKey + FindNetworkInsightsAnalysisByID = findNetworkInsightsAnalysisByID + FindNetworkInsightsPathByID = findNetworkInsightsPathByID + FindNetworkInterfaceByID = findNetworkInterfaceByID + FindNetworkInterfacePermissionByID = findNetworkInterfacePermissionByID + FindNetworkInterfaceSecurityGroup = findNetworkInterfaceSecurityGroup + FindNetworkPerformanceMetricSubscriptionByFourPartKey = findNetworkPerformanceMetricSubscriptionByFourPartKey + FindPlacementGroupByName = findPlacementGroupByName + FindPublicIPv4Pools = findPublicIPv4Pools + FindRouteByIPv4Destination = findRouteByIPv4Destination + FindRouteByIPv6Destination = findRouteByIPv6Destination + FindRouteByPrefixListIDDestination = findRouteByPrefixListIDDestination + FindRouteServerByID = findRouteServerByID + FindRouteServerAssociationByTwoPartKey = findRouteServerAssociationByTwoPartKey + FindRouteServerEndpointByID = findRouteServerEndpointByID + FindRouteServerPeerByID = findRouteServerPeerByID + FindRouteServerPropagationByTwoPartKey = findRouteServerPropagationByTwoPartKey + FindRouteTableAssociationByID = findRouteTableAssociationByID + FindRouteTableByID = findRouteTableByID + FindSecurityGroupByID = findSecurityGroupByID + FindSecurityGroupEgressRuleByID = findSecurityGroupEgressRuleByID + FindSecurityGroupIngressRuleByID = findSecurityGroupIngressRuleByID + FindSecurityGroupVPCAssociationByTwoPartKey = findSecurityGroupVPCAssociationByTwoPartKey + FindSnapshot = findSnapshot + FindSnapshotByID = findSnapshotByID + FindSpotDatafeedSubscription = findSpotDatafeedSubscription + FindSpotFleetRequestByID = findSpotFleetRequestByID + FindSpotFleetRequests = findSpotFleetRequests + FindSpotInstanceRequestByID = findSpotInstanceRequestByID + FindSubnetCIDRReservationBySubnetIDAndReservationID = findSubnetCIDRReservationBySubnetIDAndReservationID + FindSubnets = findSubnets + FindTag = findTag + FindTrafficMirrorFilterByID = findTrafficMirrorFilterByID + FindTrafficMirrorFilterRuleByTwoPartKey = findTrafficMirrorFilterRuleByTwoPartKey + FindTrafficMirrorSessionByID = findTrafficMirrorSessionByID + FindTrafficMirrorTargetByID = findTrafficMirrorTargetByID + FindTransitGatewayByID = findTransitGatewayByID + FindTransitGatewayConnectByID = findTransitGatewayConnectByID + FindTransitGatewayConnectPeerByID = findTransitGatewayConnectPeerByID + FindTransitGatewayMulticastDomainAssociationByThreePartKey = findTransitGatewayMulticastDomainAssociationByThreePartKey + FindTransitGatewayMulticastDomainByID = findTransitGatewayMulticastDomainByID + FindTransitGatewayMulticastGroupMemberByThreePartKey = findTransitGatewayMulticastGroupMemberByThreePartKey + FindTransitGatewayMulticastGroupSourceByThreePartKey = findTransitGatewayMulticastGroupSourceByThreePartKey + FindTransitGatewayPeeringAttachmentByID = findTransitGatewayPeeringAttachmentByID + FindTransitGatewayPolicyTableAssociationByTwoPartKey = findTransitGatewayPolicyTableAssociationByTwoPartKey + FindTransitGatewayPolicyTableByID = findTransitGatewayPolicyTableByID + FindTransitGatewayPrefixListReferenceByTwoPartKey = findTransitGatewayPrefixListReferenceByTwoPartKey + FindTransitGatewayRouteTableAssociationByTwoPartKey = findTransitGatewayRouteTableAssociationByTwoPartKey + FindTransitGatewayRouteTableByID = findTransitGatewayRouteTableByID + FindTransitGatewayRouteTablePropagationByTwoPartKey = findTransitGatewayRouteTablePropagationByTwoPartKey + FindTransitGatewayStaticRoute = findTransitGatewayStaticRoute + FindTransitGatewayVPCAttachmentByID = findTransitGatewayVPCAttachmentByID + FindVPCBlockPublicAccessExclusionByID = findVPCBlockPublicAccessExclusionByID + FindVPCCIDRBlockAssociationByID = findVPCCIDRBlockAssociationByID + FindVPCDHCPOptionsAssociation = findVPCDHCPOptionsAssociation + FindVPCEndpointConnectionByServiceIDAndVPCEndpointID = findVPCEndpointConnectionByServiceIDAndVPCEndpointID + FindVPCEndpointConnectionNotificationByID = findVPCEndpointConnectionNotificationByID + FindVPCEndpointRouteTableAssociationExists = findVPCEndpointRouteTableAssociationExists + FindVPCEndpointSecurityGroupAssociationExists = findVPCEndpointSecurityGroupAssociationExists + FindVPCEndpointServiceConfigurationByID = findVPCEndpointServiceConfigurationByID + FindVPCEndpointServicePermission = findVPCEndpointServicePermission + FindVPCEndpointSubnetAssociationExists = findVPCEndpointSubnetAssociationExists + FindVPCIPv6CIDRBlockAssociationByID = findVPCIPv6CIDRBlockAssociationByID + FindVPCPeeringConnectionByID = findVPCPeeringConnectionByID + FindVPNConnectionByID = findVPNConnectionByID + FindVPNConnectionRouteByTwoPartKey = findVPNConnectionRouteByTwoPartKey + FindVPNGatewayByID = findVPNGatewayByID + FindVPNGatewayRoutePropagationExists = findVPNGatewayRoutePropagationExists + FindVPNGatewayVPCAttachmentByTwoPartKey = findVPNGatewayVPCAttachmentByTwoPartKey + FindVerifiedAccessEndpointByID = findVerifiedAccessEndpointByID + FindVerifiedAccessGroupByID = findVerifiedAccessGroupByID + FindVerifiedAccessInstanceByID = findVerifiedAccessInstanceByID + FindVerifiedAccessInstanceLoggingConfigurationByInstanceID = findVerifiedAccessInstanceLoggingConfigurationByInstanceID + FindVerifiedAccessInstanceTrustProviderAttachmentExists = findVerifiedAccessInstanceTrustProviderAttachmentExists + FindVerifiedAccessTrustProviderByID = findVerifiedAccessTrustProviderByID + FindVolumeAttachmentInstanceByID = findVolumeAttachmentInstanceByID + FlattenNetworkInterfacePrivateIPAddresses = flattenNetworkInterfacePrivateIPAddresses + FlattenSecurityGroups = flattenSecurityGroups + FlowLogStateUpgradeV0 = flowLogStateUpgradeV0 + IPAMServicePrincipal = ipamServicePrincipal + InstanceMigrateState = instanceMigrateState + InstanceStateUpgradeV1 = instanceStateUpgradeV1 + InternetGatewayAttachmentParseResourceID = internetGatewayAttachmentParseResourceID + KeyPairMigrateState = keyPairMigrateState + LaunchTemplateStateUpgradeV0 = launchTemplateStateUpgradeV0 + ManagedPrefixListEntryCreateResourceID = managedPrefixListEntryCreateResourceID + ManagedPrefixListEntryParseResourceID = managedPrefixListEntryParseResourceID + MatchRules = matchRules + NetworkACLRuleImportIDSeparator = networkACLRuleImportIDSeparator + NewAttributeFilterList = newAttributeFilterList + NewCustomFilterList = newCustomFilterList + NewTagFilterList = newTagFilterList + OpenSSHPublicKeysEqual = openSSHPublicKeysEqual + ParseInstanceType = parseInstanceType + ProtocolForValue = protocolForValue + ProtocolStateFunc = protocolStateFunc + SecurityGroupCollapseRules = securityGroupCollapseRules + SecurityGroupExpandRules = securityGroupExpandRules + SecurityGroupIPPermGather = securityGroupIPPermGather + SecurityGroupMigrateState = securityGroupMigrateState + SecurityGroupRuleCreateID = securityGroupRuleCreateID + SecurityGroupRuleHash = securityGroupRuleHash + SecurityGroupRuleMigrateState = securityGroupRuleMigrateState + SpotFleetRequestMigrateState = spotFleetRequestMigrateState + StopEBSVolumeAttachmentInstance = stopVolumeAttachmentInstance + StopInstance = stopInstance + SubnetMigrateState = subnetMigrateState + UnsuccessfulItemError = unsuccessfulItemError + UnsuccessfulItemsError = unsuccessfulItemsError + UpdateTags = updateTags + VPCDHCPOptionsAssociationParseResourceID = vpcDHCPOptionsAssociationParseResourceID + VPCMigrateState = vpcMigrateState + VPNGatewayRoutePropagationParseID = vpnGatewayRoutePropagationParseID + WaitVolumeAttachmentCreated = waitVolumeAttachmentCreated ) type ( diff --git a/internal/service/ec2/filters.go b/internal/service/ec2/filters.go index e488fbcc91cc..475b8930b0af 100644 --- a/internal/service/ec2/filters.go +++ b/internal/service/ec2/filters.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" datasourceschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -169,6 +170,31 @@ type ( customFilters = fwtypes.SetNestedObjectValueOf[customFilterModel] ) +func customListFiltersBlock(ctx context.Context) listschema.ListNestedBlock { + return listschema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customListFilterModel](ctx), + NestedObject: listschema.NestedBlockObject{ + Attributes: map[string]listschema.Attribute{ + names.AttrName: listschema.StringAttribute{ + Required: true, + }, + names.AttrValues: listschema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Required: true, + }, + }, + }, + } +} + +type customListFilterModel struct { + Name types.String `tfsdk:"name"` + Values fwtypes.ListOfString `tfsdk:"values"` +} + +type customListFilters = fwtypes.ListNestedObjectValueOf[customListFilterModel] + // newCustomFilterList takes the set value extracted from a schema // attribute conforming to the schema returned by CustomFiltersSchema, // and transforms it into a []*ec2.Filter representing the same filter diff --git a/internal/service/ec2/find.go b/internal/service/ec2/find.go index 9e3df0cfd980..15abb78a0ab9 100644 --- a/internal/service/ec2/find.go +++ b/internal/service/ec2/find.go @@ -6,6 +6,7 @@ package ec2 import ( "context" "fmt" + "iter" "slices" "strconv" "strings" @@ -379,65 +380,79 @@ func findHost(ctx context.Context, conn *ec2.Client, input *ec2.DescribeHostsInp return tfresource.AssertSingleValueResult(output, func(v *awstypes.Host) bool { return v.HostProperties != nil }) } -func findInstanceCreditSpecifications(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstanceCreditSpecificationsInput) ([]awstypes.InstanceCreditSpecification, error) { - var output []awstypes.InstanceCreditSpecification +func findInstanceByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.Instance, error) { + input := ec2.DescribeInstancesInput{ + InstanceIds: []string{id}, + } - pages := ec2.NewDescribeInstanceCreditSpecificationsPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) + output, err := findInstance(ctx, conn, &input) - if tfawserr.ErrCodeEquals(err, errCodeInvalidInstanceIDNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: &input, - } - } + if err != nil { + return nil, err + } - if err != nil { - return nil, err + if state := output.State.Name; state == awstypes.InstanceStateNameTerminated { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: &input, } + } - output = append(output, page.InstanceCreditSpecifications...) + // Eventual consistency check. + if aws.ToString(output.InstanceId) != id { + return nil, &retry.NotFoundError{ + LastRequest: &input, + } } return output, nil } -func findInstanceCreditSpecification(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstanceCreditSpecificationsInput) (*awstypes.InstanceCreditSpecification, error) { - output, err := findInstanceCreditSpecifications(ctx, conn, input) +func findInstance(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) (*awstypes.Instance, error) { + output, err := tfslices.CollectWithError(listInstances(ctx, conn, input)) if err != nil { return nil, err } - return tfresource.AssertSingleValueResult(output) + return tfresource.AssertSingleValueResult(output, func(v *awstypes.Instance) bool { return v.State != nil }) } -func findInstanceCreditSpecificationByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.InstanceCreditSpecification, error) { - input := ec2.DescribeInstanceCreditSpecificationsInput{ - InstanceIds: []string{id}, - } +// DescribeInstances is an "All-Or-Some" call. +func listInstances(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) iter.Seq2[awstypes.Instance, error] { + return func(yield func(awstypes.Instance, error) bool) { + pages := ec2.NewDescribeInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - output, err := findInstanceCreditSpecification(ctx, conn, &input) + if tfawserr.ErrCodeEquals(err, errCodeInvalidInstanceIDNotFound) { + yield(awstypes.Instance{}, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + }) + return + } - if err != nil { - return nil, err - } + if err != nil { + yield(awstypes.Instance{}, err) + return + } - // Eventual consistency check. - if aws.ToString(output.InstanceId) != id { - return nil, &retry.NotFoundError{ - LastRequest: &input, + for _, v := range page.Reservations { + for _, instance := range v.Instances { + if !yield(instance, nil) { + return + } + } + } } } - - return output, nil } -func findInstances(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) ([]awstypes.Instance, error) { - var output []awstypes.Instance +func findInstanceCreditSpecifications(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstanceCreditSpecificationsInput) ([]awstypes.InstanceCreditSpecification, error) { + var output []awstypes.InstanceCreditSpecification - pages := ec2.NewDescribeInstancesPaginator(conn, input) + pages := ec2.NewDescribeInstanceCreditSpecificationsPaginator(conn, input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -452,42 +467,57 @@ func findInstances(ctx context.Context, conn *ec2.Client, input *ec2.DescribeIns return nil, err } - for _, v := range page.Reservations { - output = append(output, v.Instances...) - } + output = append(output, page.InstanceCreditSpecifications...) } return output, nil } -func findInstance(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) (*awstypes.Instance, error) { - output, err := findInstances(ctx, conn, input) +func findInstanceTagValue(ctx context.Context, conn *ec2.Client, instanceID, tagKey string) (string, error) { + input := ec2.DescribeTagsInput{ + Filters: newAttributeFilterList(map[string]string{ + "resource-id": instanceID, + names.AttrKey: tagKey, + }), + } + + output, err := conn.DescribeTags(ctx, &input) + + if err != nil { + return "", err + } + + switch count := len(output.Tags); count { + case 0: + return "", nil + case 1: + return aws.ToString(output.Tags[0].Value), nil + default: + return "", tfresource.NewTooManyResultsError(count, input) + } +} + +func findInstanceCreditSpecification(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstanceCreditSpecificationsInput) (*awstypes.InstanceCreditSpecification, error) { + output, err := findInstanceCreditSpecifications(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSingleValueResult(output, func(v *awstypes.Instance) bool { return v.State != nil }) + return tfresource.AssertSingleValueResult(output) } -func findInstanceByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.Instance, error) { - input := ec2.DescribeInstancesInput{ +func findInstanceCreditSpecificationByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.InstanceCreditSpecification, error) { + input := ec2.DescribeInstanceCreditSpecificationsInput{ InstanceIds: []string{id}, } - output, err := findInstance(ctx, conn, &input) + output, err := findInstanceCreditSpecification(ctx, conn, &input) if err != nil { return nil, err } - if state := output.State.Name; state == awstypes.InstanceStateNameTerminated { - return nil, &retry.NotFoundError{ - Message: string(state), - LastRequest: &input, - } - } - // Eventual consistency check. if aws.ToString(output.InstanceId) != id { return nil, &retry.NotFoundError{ @@ -498,6 +528,21 @@ func findInstanceByID(ctx context.Context, conn *ec2.Client, id string) (*awstyp return output, nil } +func findInstanceMetadataDefaults(ctx context.Context, conn *ec2.Client) (*awstypes.InstanceMetadataDefaultsResponse, error) { + input := ec2.GetInstanceMetadataDefaultsInput{} + output, err := conn.GetInstanceMetadataDefaults(ctx, &input) + + if err != nil { + return nil, err + } + + if output == nil || output.AccountLevel == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.AccountLevel, nil +} + func findInstanceStatus(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstanceStatusInput) (*awstypes.InstanceStatus, error) { output, err := findInstanceStatuses(ctx, conn, input) @@ -834,6 +879,43 @@ func findLaunchTemplateVersionByTwoPartKey(ctx context.Context, conn *ec2.Client return output, nil } +func findLaunchTemplateData(ctx context.Context, conn *ec2.Client, launchTemplateSpecification *awstypes.LaunchTemplateSpecification) (*awstypes.ResponseLaunchTemplateData, error) { + input := ec2.DescribeLaunchTemplateVersionsInput{} + + if v := aws.ToString(launchTemplateSpecification.LaunchTemplateId); v != "" { + input.LaunchTemplateId = aws.String(v) + } else if v := aws.ToString(launchTemplateSpecification.LaunchTemplateName); v != "" { + input.LaunchTemplateName = aws.String(v) + } + + var latestVersion bool + + if v := aws.ToString(launchTemplateSpecification.Version); v != "" { + switch v { + case launchTemplateVersionDefault: + input.Filters = newAttributeFilterList(map[string]string{ + "is-default-version": "true", + }) + case launchTemplateVersionLatest: + latestVersion = true + default: + input.Versions = []string{v} + } + } + + output, err := findLaunchTemplateVersions(ctx, conn, &input) + + if err != nil { + return nil, fmt.Errorf("reading EC2 Launch Template versions: %w", err) + } + + if latestVersion { + return output[len(output)-1].LaunchTemplateData, nil + } + + return output[0].LaunchTemplateData, nil +} + func findLocalGatewayRouteTable(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayRouteTablesInput) (*awstypes.LocalGatewayRouteTable, error) { output, err := findLocalGatewayRouteTables(ctx, conn, input) @@ -1165,6 +1247,48 @@ func findPublicIPv4PoolByID(ctx context.Context, conn *ec2.Client, id string) (* return output, nil } +func findVolumeAttachment(ctx context.Context, conn *ec2.Client, volumeID, instanceID, deviceName string) (*awstypes.VolumeAttachment, error) { + input := ec2.DescribeVolumesInput{ + Filters: newAttributeFilterList(map[string]string{ + "attachment.device": deviceName, + "attachment.instance-id": instanceID, + }), + VolumeIds: []string{volumeID}, + } + + output, err := findEBSVolume(ctx, conn, &input) + + if err != nil { + return nil, err + } + + if state := output.State; state == awstypes.VolumeStateAvailable || state == awstypes.VolumeStateDeleted { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: input, + } + } + + // Eventual consistency check. + if aws.ToString(output.VolumeId) != volumeID { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + + for _, v := range output.Attachments { + if v.State == awstypes.VolumeAttachmentStateDetached { + continue + } + + if aws.ToString(v.Device) == deviceName && aws.ToString(v.InstanceId) == instanceID { + return &v, nil + } + } + + return nil, &retry.NotFoundError{} +} + func findVolumeAttachmentInstanceByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.Instance, error) { input := ec2.DescribeInstancesInput{ InstanceIds: []string{id}, @@ -1704,6 +1828,22 @@ func findNATGatewayAddressByNATGatewayIDAndAllocationID(ctx context.Context, con })) } +func findNATGatewayAddressByNATGatewayIDAndAllocationIDSucceeded(ctx context.Context, conn *ec2.Client, natGatewayID, allocationID string) (*awstypes.NatGatewayAddress, error) { + output, err := findNATGatewayAddressByNATGatewayIDAndAllocationID(ctx, conn, natGatewayID, allocationID) + + if err != nil { + return nil, err + } + + if v := output.Status; v != awstypes.NatGatewayAddressStatusSucceeded { + return nil, &retry.NotFoundError{ + Message: string(v), + } + } + + return output, nil +} + func findNATGatewayAddressByNATGatewayIDAndPrivateIP(ctx context.Context, conn *ec2.Client, natGatewayID, privateIP string) (*awstypes.NatGatewayAddress, error) { output, err := findNATGatewayByID(ctx, conn, natGatewayID) @@ -2278,7 +2418,17 @@ func findNetworkInterfaceByAttachmentID(ctx context.Context, conn *ec2.Client, i }), } - return findNetworkInterface(ctx, conn, &input) + output, err := findNetworkInterface(ctx, conn, &input) + + if err != nil { + return nil, err + } + + if output.Attachment == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil } func findNetworkInterfaceSecurityGroup(ctx context.Context, conn *ec2.Client, networkInterfaceID string, securityGroupID string) (*awstypes.GroupIdentifier, error) { @@ -4036,6 +4186,16 @@ func findIPAMPoolAllocationsByIPAMPoolIDAndResourceID(ctx context.Context, conn }), nil } +func findIPAMPoolAllocationForResource(ctx context.Context, conn *ec2.Client, ipamPoolID, resourceID string) (*awstypes.IpamPoolAllocation, error) { + output, err := findIPAMPoolAllocationsByIPAMPoolIDAndResourceID(ctx, conn, ipamPoolID, resourceID) + + if err != nil { + return nil, err + } + + return tfresource.AssertFirstValueResult(output) +} + func findIPAMPoolCIDR(ctx context.Context, conn *ec2.Client, input *ec2.GetIpamPoolCidrsInput) (*awstypes.IpamPoolCidr, error) { output, err := findIPAMPoolCIDRs(ctx, conn, input) @@ -4561,6 +4721,9 @@ func findTransitGatewayAttachmentByID(ctx context.Context, conn *ec2.Client, id return nil, err } + // Explicitly don't check for awstypes.TransitGatewayAttachmentStateDeleted. + // Caller must handle all states. + // Eventual consistency check. if aws.ToString(output.TransitGatewayAttachmentId) != id { return nil, &retry.NotFoundError{ @@ -4571,6 +4734,27 @@ func findTransitGatewayAttachmentByID(ctx context.Context, conn *ec2.Client, id return output, nil } +func findTransitGatewayAttachmentByTransitGatewayIDAndDirectConnectGatewayID(ctx context.Context, conn *ec2.Client, tgwID, dxGatewayID string) (*awstypes.TransitGatewayAttachment, error) { + input := ec2.DescribeTransitGatewayAttachmentsInput{ + Filters: []awstypes.Filter{ + { + Name: aws.String("resource-type"), + Values: enum.Slice(awstypes.TransitGatewayAttachmentResourceTypeDirectConnectGateway), + }, + { + Name: aws.String("resource-id"), + Values: []string{dxGatewayID}, + }, + { + Name: aws.String("transit-gateway-id"), + Values: []string{tgwID}, + }, + }, + } + + return findTransitGatewayAttachment(ctx, conn, &input) +} + func findTransitGatewayConnect(ctx context.Context, conn *ec2.Client, input *ec2.DescribeTransitGatewayConnectsInput) (*awstypes.TransitGatewayConnect, error) { output, err := findTransitGatewayConnects(ctx, conn, input) diff --git a/internal/service/ec2/outposts_local_gateway_route.go b/internal/service/ec2/outposts_local_gateway_route.go index ee135c0d380e..927163038190 100644 --- a/internal/service/ec2/outposts_local_gateway_route.go +++ b/internal/service/ec2/outposts_local_gateway_route.go @@ -90,7 +90,7 @@ func resourceLocalGatewayRouteRead(ctx context.Context, d *schema.ResourceData, const ( timeout = 1 * time.Minute ) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, timeout, func() (any, error) { + localGatewayRoute, err := tfresource.RetryWhenNewResourceNotFound(ctx, timeout, func(ctx context.Context) (*awstypes.LocalGatewayRoute, error) { return findLocalGatewayRouteByTwoPartKey(ctx, conn, localGatewayRouteTableID, destination) }, d.IsNewResource()) @@ -104,8 +104,6 @@ func resourceLocalGatewayRouteRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Route (%s): %s", d.Id(), err) } - localGatewayRoute := outputRaw.(*awstypes.LocalGatewayRoute) - d.Set("destination_cidr_block", localGatewayRoute.DestinationCidrBlock) d.Set("local_gateway_virtual_interface_group_id", localGatewayRoute.LocalGatewayVirtualInterfaceGroupId) d.Set("local_gateway_route_table_id", localGatewayRoute.LocalGatewayRouteTableId) diff --git a/internal/service/ec2/service_endpoint_resolver_gen.go b/internal/service/ec2/service_endpoint_resolver_gen.go index f4b2a033bd36..ec0d57e483af 100644 --- a/internal/service/ec2/service_endpoint_resolver_gen.go +++ b/internal/service/ec2/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ec2.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ec2 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ec2 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ec2/service_endpoints_gen_test.go b/internal/service/ec2/service_endpoints_gen_test.go index 0503434eae4a..f31b15df767a 100644 --- a/internal/service/ec2/service_endpoints_gen_test.go +++ b/internal/service/ec2/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ec2/service_package.go b/internal/service/ec2/service_package.go index fe8d0f62c255..636f2cb4fe2a 100644 --- a/internal/service/ec2/service_package.go +++ b/internal/service/ec2/service_package.go @@ -10,41 +10,52 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*ec2.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*ec2.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*ec2.Options){ func(o *ec2.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "This call cannot be completed because there are pending VPNs or Virtual Interfaces") { // AttachVpnGateway, DetachVpnGateway - return aws.TrueTernary - } - - if tfawserr.ErrCodeEquals(err, errCodeInsufficientInstanceCapacity) { // CreateCapacityReservation, RunInstances - return aws.TrueTernary - } - - if tfawserr.ErrMessageContains(err, errCodeOperationNotPermitted, "Endpoint cannot be created while another endpoint is being created") { // CreateClientVpnEndpoint - return aws.TrueTernary - } - - if tfawserr.ErrMessageContains(err, errCodeConcurrentMutationLimitExceeded, "Cannot initiate another change for this endpoint at this time") { // CreateClientVpnRoute, DeleteClientVpnRoute - return aws.TrueTernary - } - - if tfawserr.ErrMessageContains(err, errCodeVPNConnectionLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnConnection - return aws.TrueTernary - } - - if tfawserr.ErrMessageContains(err, errCodeVPNGatewayLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnGateway - return aws.TrueTernary - } - - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "This call cannot be completed because there are pending VPNs or Virtual Interfaces") { // AttachVpnGateway, DetachVpnGateway + return aws.TrueTernary + } + + if tfawserr.ErrCodeEquals(err, errCodeInsufficientInstanceCapacity) { // CreateCapacityReservation, RunInstances + return aws.TrueTernary + } + + if tfawserr.ErrMessageContains(err, errCodeOperationNotPermitted, "Endpoint cannot be created while another endpoint is being created") { // CreateClientVpnEndpoint + return aws.TrueTernary + } + + if tfawserr.ErrMessageContains(err, errCodeConcurrentMutationLimitExceeded, "Cannot initiate another change for this endpoint at this time") { // CreateClientVpnRoute, DeleteClientVpnRoute + return aws.TrueTernary + } + + if tfawserr.ErrMessageContains(err, errCodeVPNConnectionLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnConnection + return aws.TrueTernary + } + + if tfawserr.ErrMessageContains(err, errCodeVPNGatewayLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnGateway + return aws.TrueTernary + } + + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/ec2/service_package_gen.go b/internal/service/ec2/service_package_gen.go index 756d787775ec..feca3a8a5611 100644 --- a/internal/service/ec2/service_package_gen.go +++ b/internal/service/ec2/service_package_gen.go @@ -4,10 +4,11 @@ package ec2 import ( "context" + "iter" + "slices" "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +19,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newStopInstanceAction, + TypeName: "aws_ec2_stop_instance", + Name: "Stop Instance", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{ { @@ -63,6 +75,12 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S Name: "Security Group Rules", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newDataSourceVPNConnection, + TypeName: "aws_vpn_connection", + Name: "VPN Connection", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, } } @@ -122,6 +140,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "EIP Domain Name", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newNATGatewayEIPAssociationResource, + TypeName: "aws_nat_gateway_eip_association", + Name: "VPC NAT Gateway EIP Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newNetworkInterfacePermissionResource, TypeName: "aws_network_interface_permission", @@ -201,7 +225,11 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.FrameworkImport{ + WrappedImport: true, + }, }, { Factory: newSecurityGroupIngressRuleResource, @@ -210,7 +238,11 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.FrameworkImport{ + WrappedImport: true, + }, }, { Factory: newSecurityGroupVPCAssociationResource, @@ -1292,7 +1324,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourceInternetGateway, @@ -1398,6 +1434,16 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_route", Name: "Route", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("route_table_id", true), + inttypes.StringIdentityAttribute("destination_cidr_block", false), + inttypes.StringIdentityAttribute("destination_ipv6_cidr_block", false), + inttypes.StringIdentityAttribute("destination_prefix_list_id", false), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: routeImportID{}, + }, }, { Factory: resourceRouteTable, @@ -1406,7 +1452,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceRouteTableAssociation, @@ -1421,7 +1471,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceSecurityGroupRule, @@ -1466,7 +1520,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceVerifiedAccessEndpoint, @@ -1529,7 +1587,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourceVPCDHCPOptions, @@ -1553,7 +1615,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceVPCEndpointConnectionAccepter, @@ -1756,6 +1822,41 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa } } +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource{ + { + Factory: instanceResourceAsListResource, + TypeName: "aws_instance", + Name: "Instance", + Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + }, + { + Factory: subnetResourceAsListResource, + TypeName: "aws_subnet", + Name: "Subnet", + Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + }, + { + Factory: vpcResourceAsListResource, + TypeName: "aws_vpc", + Name: "VPC", + Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + }, + }) +} + func (p *servicePackage) ServicePackageName() string { return names.EC2 } @@ -1779,7 +1880,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ec2.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ec2/status.go b/internal/service/ec2/status.go index 5770b004d72a..04f0f2e509f0 100644 --- a/internal/service/ec2/status.go +++ b/internal/service/ec2/status.go @@ -15,6 +15,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) +const ( + launchTemplateFoundStatus = "Found" +) + func statusAvailabilityZoneGroupOptInStatus(ctx context.Context, conn *ec2.Client, name string) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findAvailabilityZoneGroupByName(ctx, conn, name) @@ -221,6 +225,28 @@ func statusInstanceRootBlockDeviceDeleteOnTermination(ctx context.Context, conn } } +func statusLaunchTemplate(ctx context.Context, conn *ec2.Client, id string, idIsName bool) retry.StateRefreshFunc { + return func() (any, string, error) { + var output *awstypes.LaunchTemplate + var err error + if idIsName { + output, err = findLaunchTemplateByName(ctx, conn, id) + } else { + output, err = findLaunchTemplateByID(ctx, conn, id) + } + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, launchTemplateFoundStatus, nil + } +} + func statusLocalGatewayRoute(ctx context.Context, conn *ec2.Client, localGatewayRouteTableID, destinationCIDRBlock string) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findLocalGatewayRouteByTwoPartKey(ctx, conn, localGatewayRouteTableID, destinationCIDRBlock) @@ -564,6 +590,25 @@ func statusVolumeAttachment(ctx context.Context, conn *ec2.Client, volumeID, ins } } +func statusVolumeAttachmentInstanceState(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + // Don't call FindInstanceByID as it maps useful status codes to NotFoundError. + output, err := findInstance(ctx, conn, &ec2.DescribeInstancesInput{ + InstanceIds: []string{id}, + }) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State.Name), nil + } +} + func statusVolumeModification(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findVolumeModificationByID(ctx, conn, id) @@ -1228,6 +1273,22 @@ func statusTransitGateway(ctx context.Context, conn *ec2.Client, id string) retr } } +func statusTransitGatewayAttachment(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findTransitGatewayAttachmentByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + func statusTransitGatewayConnect(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findTransitGatewayConnectByID(ctx, conn, id) diff --git a/internal/service/ec2/sweep.go b/internal/service/ec2/sweep.go index 726b90eee8a1..e89f7cdbbc77 100644 --- a/internal/service/ec2/sweep.go +++ b/internal/service/ec2/sweep.go @@ -15,12 +15,12 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,10 +33,7 @@ func RegisterSweepers() { }, }) - resource.AddTestSweepers("aws_ec2_capacity_reservation", &resource.Sweeper{ - Name: "aws_ec2_capacity_reservation", - F: sweepCapacityReservations, - }) + awsv2.Register("aws_ec2_capacity_reservation", sweepCapacityReservations) resource.AddTestSweepers("aws_ec2_carrier_gateway", &resource.Sweeper{ Name: "aws_ec2_carrier_gateway", @@ -473,57 +470,43 @@ func RegisterSweepers() { awsv2.Register("aws_vpc_route_server_propagation", sweepRouteServerPropagations) } -func sweepCapacityReservations(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %s", err) - } +func sweepCapacityReservations(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.EC2Client(ctx) + var input ec2.DescribeCapacityReservationsInput + var sweepResources []sweep.Sweepable - input := ec2.DescribeCapacityReservationsInput{} - resp, err := conn.DescribeCapacityReservations(ctx, &input) - - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping EC2 Capacity Reservation sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("Error retrieving EC2 Capacity Reservations: %s", err) - } - - if len(resp.CapacityReservations) == 0 { - log.Print("[DEBUG] No EC2 Capacity Reservations to sweep") - return nil - } + pages := ec2.NewDescribeCapacityReservationsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, r := range resp.CapacityReservations { - if r.State != awstypes.CapacityReservationStateCancelled && r.State != awstypes.CapacityReservationStateExpired { - id := aws.ToString(r.CapacityReservationId) + if err != nil { + return nil, err + } - log.Printf("[INFO] Cancelling EC2 Capacity Reservation EC2 Instance: %s", id) + for _, v := range page.CapacityReservations { + id := aws.ToString(v.CapacityReservationId) - input := ec2.CancelCapacityReservationInput{ - CapacityReservationId: aws.String(id), + if state := v.State; state == awstypes.CapacityReservationStateCancelled || state == awstypes.CapacityReservationStateExpired { + log.Printf("[INFO] Skipping EC2 Capacity Reservation %s: State=%s", id, state) + continue } - _, err := conn.CancelCapacityReservation(ctx, &input) + r := resourceCapacityReservation() + d := r.Data(nil) + d.SetId(id) - if err != nil { - log.Printf("[ERROR] Error cancelling EC2 Capacity Reservation (%s): %s", id, err) - } + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } } - return nil + return sweepResources, nil } func sweepCarrierGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeCarrierGatewaysInput{} @@ -564,7 +547,7 @@ func sweepClientVPNEndpoints(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeClientVpnEndpointsInput{} @@ -605,7 +588,7 @@ func sweepClientVPNNetworkAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeClientVpnEndpointsInput{} @@ -667,7 +650,7 @@ func sweepFleets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -713,7 +696,7 @@ func sweepEBSVolumes(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeVolumesInput{} @@ -761,7 +744,7 @@ func sweepEBSSnapshots(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeSnapshotsInput{ OwnerIds: []string{"self"}, @@ -804,7 +787,7 @@ func sweepEgressOnlyInternetGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeEgressOnlyInternetGatewaysInput{} conn := client.EC2Client(ctx) @@ -845,7 +828,7 @@ func sweepEIPs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } // There is currently no paginator or Marker/NextToken input := ec2.DescribeAddressesInput{} @@ -860,7 +843,7 @@ func sweepEIPs(region string) error { } if err != nil { - return fmt.Errorf("error describing EC2 EIPs: %s", err) + return err } for _, v := range output.Addresses { @@ -895,7 +878,7 @@ func sweepEIPDomainNames(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeAddressesAttributeInput{ @@ -936,7 +919,7 @@ func sweepFlowLogs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeFlowLogsInput{} @@ -977,7 +960,7 @@ func sweepHosts(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeHostsInput{} @@ -1018,7 +1001,7 @@ func sweepInstances(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeInstancesInput{} @@ -1072,7 +1055,7 @@ func sweepInternetGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -1152,7 +1135,7 @@ func sweepKeyPairs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeKeyPairsInput{} @@ -1190,7 +1173,7 @@ func sweepLaunchTemplates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeLaunchTemplatesInput{} @@ -1231,7 +1214,7 @@ func sweepNATGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeNatGatewaysInput{} conn := client.EC2Client(ctx) @@ -1272,7 +1255,7 @@ func sweepNetworkACLs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeNetworkAclsInput{} conn := client.EC2Client(ctx) @@ -1325,7 +1308,7 @@ func sweepNetworkInterfaces(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeNetworkInterfacesInput{} @@ -1373,7 +1356,7 @@ func sweepManagedPrefixLists(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) var sweepResources []sweep.Sweepable @@ -1418,7 +1401,7 @@ func sweepNetworkInsightsPaths(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) var sweepResources []sweep.Sweepable @@ -1459,7 +1442,7 @@ func sweepPlacementGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribePlacementGroupsInput{} @@ -1498,7 +1481,7 @@ func sweepRouteTables(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -1601,7 +1584,7 @@ func sweepSecurityGroups(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -1675,14 +1658,14 @@ func sweepSecurityGroups(region string) error { } // Handle EC2 eventual consistency - err := retry.RetryContext(ctx, 1*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 1*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.DeleteSecurityGroup(ctx, &input) if tfawserr.ErrCodeEquals(err, "DependencyViolation") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) @@ -1701,7 +1684,7 @@ func sweepSpotFleetRequests(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -1750,7 +1733,7 @@ func sweepSpotInstanceRequests(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -1800,7 +1783,14 @@ func sweepSubnets(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepab var sweepResources []sweep.Sweepable r := resourceSubnet() - input := ec2.DescribeSubnetsInput{} + input := ec2.DescribeSubnetsInput{ + Filters: []awstypes.Filter{ + { + Name: aws.String("default-for-az"), + Values: []string{"false"}, + }, + }, + } pages := ec2.NewDescribeSubnetsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -1809,11 +1799,6 @@ func sweepSubnets(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepab } for _, v := range page.Subnets { - // Skip default subnets. - if aws.ToBool(v.DefaultForAz) { - continue - } - d := r.Data(nil) d.SetId(aws.ToString(v.SubnetId)) @@ -1828,7 +1813,7 @@ func sweepTrafficMirrorFilters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTrafficMirrorFiltersInput{} @@ -1869,7 +1854,7 @@ func sweepTrafficMirrorSessions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTrafficMirrorSessionsInput{} @@ -1910,7 +1895,7 @@ func sweepTrafficMirrorTargets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTrafficMirrorTargetsInput{} @@ -1951,7 +1936,7 @@ func sweepTransitGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTransitGatewaysInput{} @@ -1996,7 +1981,7 @@ func sweepTransitGatewayConnectPeers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTransitGatewayConnectPeersInput{} @@ -2041,7 +2026,7 @@ func sweepTransitGatewayConnects(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTransitGatewayConnectsInput{} @@ -2086,7 +2071,7 @@ func sweepTransitGatewayMulticastDomains(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTransitGatewayMulticastDomainsInput{} @@ -2131,7 +2116,7 @@ func sweepTransitGatewayPeeringAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTransitGatewayPeeringAttachmentsInput{} @@ -2176,7 +2161,7 @@ func sweepTransitGatewayVPCAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeTransitGatewayVpcAttachmentsInput{} @@ -2222,7 +2207,7 @@ func sweepVPCDHCPOptions(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeDhcpOptionsInput{} @@ -2292,7 +2277,7 @@ func sweepVPCEndpoints(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2347,7 +2332,7 @@ func sweepVPCEndpointConnectionAccepters(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2392,7 +2377,7 @@ func sweepVPCEndpointServices(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2442,7 +2427,7 @@ func sweepVPCPeeringConnections(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeVpcPeeringConnectionsInput{} @@ -2485,13 +2470,20 @@ func sweepVPCs(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) - input := ec2.DescribeVpcsInput{} var sweepResources []sweep.Sweepable + input := ec2.DescribeVpcsInput{ + Filters: []awstypes.Filter{ + { + Name: aws.String("is-default"), + Values: []string{"false"}, + }, + }, + } pages := ec2.NewDescribeVpcsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -2506,11 +2498,6 @@ func sweepVPCs(region string) error { } for _, v := range page.Vpcs { - // Skip default VPCs. - if aws.ToBool(v.IsDefault) { - continue - } - r := resourceVPC() d := r.Data(nil) d.SetId(aws.ToString(v.VpcId)) @@ -2533,7 +2520,7 @@ func sweepVPNConnections(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2576,7 +2563,7 @@ func sweepVPNGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeVpnGatewaysInput{} @@ -2626,7 +2613,7 @@ func sweepCustomerGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeCustomerGatewaysInput{} @@ -2739,7 +2726,7 @@ func sweepAMIs(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ec2.DescribeImagesInput{ @@ -2784,7 +2771,7 @@ func sweepNetworkPerformanceMetricSubscriptions(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2827,7 +2814,7 @@ func sweepInstanceConnectEndpoints(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) input := ec2.DescribeInstanceConnectEndpointsInput{} @@ -2871,7 +2858,7 @@ func sweepVerifiedAccessEndpoints(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2914,7 +2901,7 @@ func sweepVerifiedAccessGroups(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -2957,7 +2944,7 @@ func sweepVerifiedAccessInstances(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -3000,7 +2987,7 @@ func sweepVerifiedAccessTrustProviders(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) @@ -3043,7 +3030,7 @@ func sweepVerifiedAccessTrustProviderAttachments(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EC2Client(ctx) diff --git a/internal/service/ec2/tags.go b/internal/service/ec2/tags.go index 0338b3d6e78f..96a900b0cf9d 100644 --- a/internal/service/ec2/tags.go +++ b/internal/service/ec2/tags.go @@ -25,7 +25,7 @@ func createTags(ctx context.Context, conn *ec2.Client, identifier string, tags [ newTagsMap := keyValueTags(ctx, tags) - _, err := tfresource.RetryWhenAWSErrCodeContains(ctx, eventualConsistencyTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeContains(ctx, eventualConsistencyTimeout, func(ctx context.Context) (any, error) { return nil, updateTags(ctx, conn, identifier, nil, newTagsMap, optFns...) }, ".NotFound") diff --git a/internal/service/ec2/tags_gen.go b/internal/service/ec2/tags_gen.go index cd74f2dcbbbc..db11f8f6a13b 100644 --- a/internal/service/ec2/tags_gen.go +++ b/internal/service/ec2/tags_gen.go @@ -3,8 +3,8 @@ package ec2 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" @@ -39,13 +39,13 @@ func findTag(ctx context.Context, conn *ec2.Client, identifier, key string, optF output, err := conn.DescribeTags(ctx, &input, optFns...) if err != nil { - return nil, err + return nil, smarterr.NewError(err) } listTags := keyValueTags(ctx, output.Tags) if !listTags.KeyExists(key) { - return nil, tfresource.NewEmptyResultError(nil) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(nil)) } return listTags.KeyValue(key), nil @@ -71,7 +71,7 @@ func listTags(ctx context.Context, conn *ec2.Client, identifier string, optFns . page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -86,7 +86,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EC2Client(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -181,7 +181,7 @@ func updateTags(ctx context.Context, conn *ec2.Client, identifier string, oldTag _, err := conn.DeleteTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -196,7 +196,7 @@ func updateTags(ctx context.Context, conn *ec2.Client, identifier string, oldTag _, err := conn.CreateTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ec2/testdata/EBSSnapshotBlockPublicAccess/basic_v5.100.0/main_gen.tf b/internal/service/ec2/testdata/EBSSnapshotBlockPublicAccess/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..0e643d46cb9c --- /dev/null +++ b/internal/service/ec2/testdata/EBSSnapshotBlockPublicAccess/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ebs_snapshot_block_public_access" "test" { + state = "block-all-sharing" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/EBSSnapshotBlockPublicAccess/basic_v6.0.0/main_gen.tf b/internal/service/ec2/testdata/EBSSnapshotBlockPublicAccess/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..4ae07243e95b --- /dev/null +++ b/internal/service/ec2/testdata/EBSSnapshotBlockPublicAccess/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ebs_snapshot_block_public_access" "test" { + state = "block-all-sharing" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/ImageBlockPublicAccess/basic_v5.100.0/main_gen.tf b/internal/service/ec2/testdata/ImageBlockPublicAccess/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6c0dd6a7c05c --- /dev/null +++ b/internal/service/ec2/testdata/ImageBlockPublicAccess/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ec2_image_block_public_access" "test" { + state = "block-new-sharing" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/ImageBlockPublicAccess/basic_v6.0.0/main_gen.tf b/internal/service/ec2/testdata/ImageBlockPublicAccess/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..f45bd73d1b27 --- /dev/null +++ b/internal/service/ec2/testdata/ImageBlockPublicAccess/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ec2_image_block_public_access" "test" { + state = "block-new-sharing" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/Instance/basic/main_gen.tf b/internal/service/ec2/testdata/Instance/basic/main_gen.tf new file mode 100644 index 000000000000..e6f13617506b --- /dev/null +++ b/internal/service/ec2/testdata/Instance/basic/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + diff --git a/internal/service/ec2/testdata/Instance/basic_v6.10.0/main_gen.tf b/internal/service/ec2/testdata/Instance/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..48bf53876924 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/basic_v6.10.0/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/Instance/list_basic/main.tf b/internal/service/ec2/testdata/Instance/list_basic/main.tf new file mode 100644 index 000000000000..d88aa6324cfe --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_basic/main.tf @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_instance" "test" { + count = 3 + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} diff --git a/internal/service/ec2/testdata/Instance/list_basic/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..ed585f6b87be --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "test" { + provider = aws +} diff --git a/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tf b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tf new file mode 100644 index 000000000000..f994bc816b2c --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_autoscaling_group" "test" { + name = var.rName + availability_zones = [data.aws_availability_zones.available.names[0]] + + max_size = 1 + min_size = 1 + desired_capacity = 1 + + launch_template { + id = aws_launch_template.test.id + version = aws_launch_template.test.default_version + } + + tag { + key = "test-filter" + value = var.rName + propagate_at_launch = true + } +} + +resource "aws_launch_template" "test" { + name = var.rName + image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude() + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tfquery.hcl new file mode 100644 index 000000000000..d3efe088c97f --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tfquery.hcl @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "excluded" { + provider = aws + + config { + filter { + name = "tag:test-filter" + values = [var.rName] + } + } +} + +list "aws_instance" "included" { + provider = aws + + config { + filter { + name = "tag:test-filter" + values = [var.rName] + } + include_auto_scaled = true + } +} diff --git a/internal/service/ec2/testdata/Instance/list_filtered/main.tf b/internal/service/ec2/testdata/Instance/list_filtered/main.tf new file mode 100644 index 000000000000..b443362903b8 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_filtered/main.tf @@ -0,0 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_instance" "expected" { + count = 2 + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } + + tags = { + Name = "expected-${count.index}" + } +} + +resource "aws_ec2_instance_state" "expected" { + count = 2 + + instance_id = aws_instance.expected[count.index].id + state = "stopped" +} + +resource "aws_instance" "not_expected" { + count = 2 + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } + + tags = { + Name = "not-expected-${count.index}" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} diff --git a/internal/service/ec2/testdata/Instance/list_filtered/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_filtered/main.tfquery.hcl new file mode 100644 index 000000000000..f4d65cfbd1de --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_filtered/main.tfquery.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "test" { + provider = aws + + config { + filter { + name = "instance-state-name" + values = ["stopped"] + } + } +} diff --git a/internal/service/ec2/testdata/Instance/list_region_override/main.tf b/internal/service/ec2/testdata/Instance/list_region_override/main.tf new file mode 100644 index 000000000000..05a65a6027a7 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_region_override/main.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_instance" "test" { + count = 3 + + region = var.region + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + region = var.region + + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Instance/list_region_override/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_region_override/main.tfquery.hcl new file mode 100644 index 000000000000..aeff71aebd72 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_region_override/main.tfquery.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "test" { + provider = aws + + config { + region = var.region + } +} diff --git a/internal/service/ec2/testdata/Instance/region_override/main_gen.tf b/internal/service/ec2/testdata/Instance/region_override/main_gen.tf new file mode 100644 index 000000000000..84ff8e6deaa1 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/region_override/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_instance" "test" { + region = var.region + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + region = var.region + + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Route/basic/main_gen.tf b/internal/service/ec2/testdata/Route/basic/main_gen.tf new file mode 100644 index 000000000000..59feddab1f01 --- /dev/null +++ b/internal/service/ec2/testdata/Route/basic/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route" "test" { + route_table_id = aws_route_table.test.id + destination_cidr_block = "10.3.0.0/16" + gateway_id = aws_internet_gateway.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id +} + diff --git a/internal/service/ec2/testdata/Route/basic_v6.10.0/main_gen.tf b/internal/service/ec2/testdata/Route/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..22f2106d9398 --- /dev/null +++ b/internal/service/ec2/testdata/Route/basic_v6.10.0/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route" "test" { + route_table_id = aws_route_table.test.id + destination_cidr_block = "10.3.0.0/16" + gateway_id = aws_internet_gateway.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/Route/region_override/main_gen.tf b/internal/service/ec2/testdata/Route/region_override/main_gen.tf new file mode 100644 index 000000000000..0020eb1d3811 --- /dev/null +++ b/internal/service/ec2/testdata/Route/region_override/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route" "test" { + region = var.region + + route_table_id = aws_route_table.test.id + destination_cidr_block = "10.3.0.0/16" + gateway_id = aws_internet_gateway.test.id +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.1.0.0/16" +} + +resource "aws_internet_gateway" "test" { + region = var.region + + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { + region = var.region + + vpc_id = aws_vpc.test.id +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/RouteTable/basic/main_gen.tf b/internal/service/ec2/testdata/RouteTable/basic/main_gen.tf new file mode 100644 index 000000000000..c4b31f642229 --- /dev/null +++ b/internal/service/ec2/testdata/RouteTable/basic/main_gen.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + diff --git a/internal/service/ec2/testdata/RouteTable/basic_v6.9.0/main_gen.tf b/internal/service/ec2/testdata/RouteTable/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..16206b2738fe --- /dev/null +++ b/internal/service/ec2/testdata/RouteTable/basic_v6.9.0/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/RouteTable/region_override/main_gen.tf b/internal/service/ec2/testdata/RouteTable/region_override/main_gen.tf new file mode 100644 index 000000000000..8322514840aa --- /dev/null +++ b/internal/service/ec2/testdata/RouteTable/region_override/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route_table" "test" { + region = var.region + + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.1.0.0/16" +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroup/basic/main_gen.tf b/internal/service/ec2/testdata/SecurityGroup/basic/main_gen.tf new file mode 100644 index 000000000000..67d8a529a3f2 --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroup/basic/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroup/basic_v6.7.0/main_gen.tf b/internal/service/ec2/testdata/SecurityGroup/basic_v6.7.0/main_gen.tf new file mode 100644 index 000000000000..80d9a7cdd57f --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroup/basic_v6.7.0/main_gen.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.7.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/SecurityGroup/region_override/main_gen.tf b/internal/service/ec2/testdata/SecurityGroup/region_override/main_gen.tf new file mode 100644 index 000000000000..428723a7b319 --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroup/region_override/main_gen.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_security_group" "test" { + region = var.region + + name = var.rName + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.1.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroupEgressRule/basic/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupEgressRule/basic/main_gen.tf new file mode 100644 index 000000000000..f748f945acba --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupEgressRule/basic/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_egress_rule" "test" { + security_group_id = aws_security_group.test.id + + cidr_ipv4 = "10.0.0.0/8" + from_port = 80 + ip_protocol = "tcp" + to_port = 8080 +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroupEgressRule/basic_v6.12.0/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupEgressRule/basic_v6.12.0/main_gen.tf new file mode 100644 index 000000000000..564690b46994 --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupEgressRule/basic_v6.12.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_egress_rule" "test" { + security_group_id = aws_security_group.test.id + + cidr_ipv4 = "10.0.0.0/8" + from_port = 80 + ip_protocol = "tcp" + to_port = 8080 +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.12.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/SecurityGroupEgressRule/region_override/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupEgressRule/region_override/main_gen.tf new file mode 100644 index 000000000000..8a56cdac0a16 --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupEgressRule/region_override/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_egress_rule" "test" { + region = var.region + + security_group_id = aws_security_group.test.id + + cidr_ipv4 = "10.0.0.0/8" + from_port = 80 + ip_protocol = "tcp" + to_port = 8080 +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" +} + +resource "aws_security_group" "test" { + region = var.region + + vpc_id = aws_vpc.test.id + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroupIngressRule/basic/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupIngressRule/basic/main_gen.tf new file mode 100644 index 000000000000..d457825a0a1b --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupIngressRule/basic/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_ingress_rule" "test" { + security_group_id = aws_security_group.test.id + + cidr_ipv4 = "10.0.0.0/8" + from_port = 80 + ip_protocol = "tcp" + to_port = 8080 +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroupIngressRule/basic_v6.12.0/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupIngressRule/basic_v6.12.0/main_gen.tf new file mode 100644 index 000000000000..84c9fb6741f6 --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupIngressRule/basic_v6.12.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_ingress_rule" "test" { + security_group_id = aws_security_group.test.id + + cidr_ipv4 = "10.0.0.0/8" + from_port = 80 + ip_protocol = "tcp" + to_port = 8080 +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.12.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/SecurityGroupIngressRule/region_override/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupIngressRule/region_override/main_gen.tf new file mode 100644 index 000000000000..4c387aabfe42 --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupIngressRule/region_override/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_ingress_rule" "test" { + region = var.region + + security_group_id = aws_security_group.test.id + + cidr_ipv4 = "10.0.0.0/8" + from_port = 80 + ip_protocol = "tcp" + to_port = 8080 +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" +} + +resource "aws_security_group" "test" { + region = var.region + + vpc_id = aws_vpc.test.id + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/SecurityGroupVPCAssociation/basic_v6.0.0/main_gen.tf b/internal/service/ec2/testdata/SecurityGroupVPCAssociation/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..041b2bd6d2dc --- /dev/null +++ b/internal/service/ec2/testdata/SecurityGroupVPCAssociation/basic_v6.0.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_security_group_vpc_association" "test" { + security_group_id = aws_security_group.test.id + vpc_id = aws_vpc.target.id +} + +resource "aws_vpc" "source" { + cidr_block = "10.6.0.0/16" +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.source.id +} + +resource "aws_vpc" "target" { + cidr_block = "10.7.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/SerialConsoleAccess/basic_v5.100.0/main_gen.tf b/internal/service/ec2/testdata/SerialConsoleAccess/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..559080e2b132 --- /dev/null +++ b/internal/service/ec2/testdata/SerialConsoleAccess/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ec2_serial_console_access" "test" { + enabled = true +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/SerialConsoleAccess/basic_v6.0.0/main_gen.tf b/internal/service/ec2/testdata/SerialConsoleAccess/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..e8187d7d5f8e --- /dev/null +++ b/internal/service/ec2/testdata/SerialConsoleAccess/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ec2_serial_console_access" "test" { + enabled = true +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/Subnet/basic/main_gen.tf b/internal/service/ec2/testdata/Subnet/basic/main_gen.tf new file mode 100644 index 000000000000..dd708ec5350f --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_subnet" "test" { + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + diff --git a/internal/service/ec2/testdata/Subnet/basic_v6.8.0/main_gen.tf b/internal/service/ec2/testdata/Subnet/basic_v6.8.0/main_gen.tf new file mode 100644 index 000000000000..0e1bcefbdaef --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/basic_v6.8.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_subnet" "test" { + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.8.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/Subnet/list_basic/main.tf b/internal/service/ec2/testdata/Subnet/list_basic/main.tf new file mode 100644 index 000000000000..190822a57037 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_basic/main.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_subnet" "test" { + count = length(aws_vpc.test) + + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test[count.index].id +} + +resource "aws_vpc" "test" { + count = 3 + + cidr_block = "10.1.0.0/16" +} + diff --git a/internal/service/ec2/testdata/Subnet/list_basic/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..8921c5babb87 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws +} diff --git a/internal/service/ec2/testdata/Subnet/list_exclude_default/main.tf b/internal/service/ec2/testdata/Subnet/list_exclude_default/main.tf new file mode 100644 index 000000000000..3169260fdb36 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_exclude_default/main.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_subnet" "test" { + cidr_block = cidrsubnet(data.aws_vpc.default.cidr_block, 12, 4000) + vpc_id = data.aws_vpc.default.id +} + +data "aws_vpc" "default" { + default = true +} + +# tflint-ignore: terraform_unused_declarations +data "aws_subnets" "defaults" { + filter { + name = "default-for-az" + values = ["true"] + } +} \ No newline at end of file diff --git a/internal/service/ec2/testdata/Subnet/list_exclude_default/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_exclude_default/main.tfquery.hcl new file mode 100644 index 000000000000..8921c5babb87 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_exclude_default/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws +} diff --git a/internal/service/ec2/testdata/Subnet/list_filtered/main.tf b/internal/service/ec2/testdata/Subnet/list_filtered/main.tf new file mode 100644 index 000000000000..a15c818ae6b4 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_filtered/main.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# provider "aws" {} + +resource "aws_subnet" "expected" { + count = 2 + + cidr_block = cidrsubnet(aws_vpc.expected.cidr_block, 8, count.index) + vpc_id = aws_vpc.expected.id +} + +resource "aws_vpc" "expected" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_subnet" "not_expected" { + count = 2 + + cidr_block = cidrsubnet(aws_vpc.not_expected.cidr_block, 8, count.index) + vpc_id = aws_vpc.not_expected.id +} + +resource "aws_vpc" "not_expected" { + cidr_block = "10.1.0.0/16" +} diff --git a/internal/service/ec2/testdata/Subnet/list_filtered/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_filtered/main.tfquery.hcl new file mode 100644 index 000000000000..1d62b3758f8b --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_filtered/main.tfquery.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws + + config { + filter { + name = "vpc-id" + values = [aws_vpc.expected.id] + } + } +} diff --git a/internal/service/ec2/testdata/Subnet/list_filtered_default_for_az/main.tf b/internal/service/ec2/testdata/Subnet/list_filtered_default_for_az/main.tf new file mode 100644 index 000000000000..4e2b0706b834 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_filtered_default_for_az/main.tf @@ -0,0 +1,4 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} diff --git a/internal/service/ec2/testdata/Subnet/list_filtered_default_for_az/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_filtered_default_for_az/main.tfquery.hcl new file mode 100644 index 000000000000..984bfb36126c --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_filtered_default_for_az/main.tfquery.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws + + config { + filter { + name = "default-for-az" + values = ["false"] + } + } +} diff --git a/internal/service/ec2/testdata/Subnet/list_filtered_subnet_ids/main.tf b/internal/service/ec2/testdata/Subnet/list_filtered_subnet_ids/main.tf new file mode 100644 index 000000000000..35a857e39619 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_filtered_subnet_ids/main.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_subnet" "expected" { + count = 2 + + cidr_block = cidrsubnet(aws_vpc.test[count.index].cidr_block, 8, 0) + vpc_id = aws_vpc.test[count.index].id + + tags = { + expected = var.rName + } +} + +resource "aws_subnet" "not_expected" { + count = 2 + + cidr_block = cidrsubnet(aws_vpc.test[count.index].cidr_block, 8, 1) + vpc_id = aws_vpc.test[count.index].id +} + +resource "aws_vpc" "test" { + count = 2 + + cidr_block = "10.1.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Subnet/list_filtered_subnet_ids/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_filtered_subnet_ids/main.tfquery.hcl new file mode 100644 index 000000000000..c4a6eaeb40bc --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_filtered_subnet_ids/main.tfquery.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws + + config { + subnet_ids = local.subnet_ids + filter { + name = "tag:expected" + values = [var.rName] + } + } +} + +locals { + subnet_ids = concat( + aws_subnet.expected[*].id, + aws_subnet.not_expected[*].id + ) +} diff --git a/internal/service/ec2/testdata/Subnet/list_region_override/main.tf b/internal/service/ec2/testdata/Subnet/list_region_override/main.tf new file mode 100644 index 000000000000..ddb33491f01c --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_region_override/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_subnet" "test" { + count = length(aws_vpc.test) + + region = var.region + + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test[count.index].id +} + +resource "aws_vpc" "test" { + count = 3 + + region = var.region + + cidr_block = "10.1.0.0/16" +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Subnet/list_region_override/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_region_override/main.tfquery.hcl new file mode 100644 index 000000000000..9ed9aac8ba42 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_region_override/main.tfquery.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws + + config { + region = var.region + } +} diff --git a/internal/service/ec2/testdata/Subnet/list_subnet_ids/main.tf b/internal/service/ec2/testdata/Subnet/list_subnet_ids/main.tf new file mode 100644 index 000000000000..e5c3a15b4f8d --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_subnet_ids/main.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_subnet" "test" { + count = length(aws_vpc.test) * 2 + + cidr_block = cidrsubnet(aws_vpc.test[floor(count.index / 2)].cidr_block, 8, count.index) + vpc_id = aws_vpc.test[floor(count.index / 2)].id +} + +resource "aws_vpc" "test" { + count = 2 + + cidr_block = "10.1.0.0/16" +} diff --git a/internal/service/ec2/testdata/Subnet/list_subnet_ids/main.tfquery.hcl b/internal/service/ec2/testdata/Subnet/list_subnet_ids/main.tfquery.hcl new file mode 100644 index 000000000000..2ad721d9df23 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/list_subnet_ids/main.tfquery.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_subnet" "test" { + provider = aws + + config { + subnet_ids = local.subnet_ids + } +} + +locals { + subnet_ids = aws_subnet.test[*].id +} diff --git a/internal/service/ec2/testdata/Subnet/region_override/main_gen.tf b/internal/service/ec2/testdata/Subnet/region_override/main_gen.tf new file mode 100644 index 000000000000..9fb8bc73c9e2 --- /dev/null +++ b/internal/service/ec2/testdata/Subnet/region_override/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_subnet" "test" { + region = var.region + + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.1.0.0/16" +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/VPC/basic/main_gen.tf b/internal/service/ec2/testdata/VPC/basic/main_gen.tf new file mode 100644 index 000000000000..655d2e1e6ede --- /dev/null +++ b/internal/service/ec2/testdata/VPC/basic/main_gen.tf @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + diff --git a/internal/service/ec2/testdata/VPC/basic_v6.15.0/main_gen.tf b/internal/service/ec2/testdata/VPC/basic_v6.15.0/main_gen.tf new file mode 100644 index 000000000000..813ba6a19c43 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/basic_v6.15.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.15.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/VPC/list_basic/main.tf b/internal/service/ec2/testdata/VPC/list_basic/main.tf new file mode 100644 index 000000000000..0799f343f274 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_basic/main.tf @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_vpc" "test" { + count = 3 + + cidr_block = "10.1.0.0/16" +} diff --git a/internal/service/ec2/testdata/VPC/list_basic/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..6b042e21a0d7 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws +} diff --git a/internal/service/ec2/testdata/VPC/list_exclude_default/main.tf b/internal/service/ec2/testdata/VPC/list_exclude_default/main.tf new file mode 100644 index 000000000000..b414b3bee713 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_exclude_default/main.tf @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +# tflint-ignore: terraform_unused_declarations +data "aws_vpc" "default" { + default = true +} diff --git a/internal/service/ec2/testdata/VPC/list_exclude_default/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_exclude_default/main.tfquery.hcl new file mode 100644 index 000000000000..6b042e21a0d7 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_exclude_default/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws +} diff --git a/internal/service/ec2/testdata/VPC/list_filtered/main.tf b/internal/service/ec2/testdata/VPC/list_filtered/main.tf new file mode 100644 index 000000000000..9b481e7ac7a3 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_filtered/main.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_vpc" "expected" { + count = 2 + + cidr_block = "10.1.0.0/16" + + tags = { + expected = var.rName + } +} + +resource "aws_vpc" "not_expected" { + count = 2 + + cidr_block = "10.1.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/VPC/list_filtered/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_filtered/main.tfquery.hcl new file mode 100644 index 000000000000..860171d2627c --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_filtered/main.tfquery.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws + + config { + filter { + name = "tag:expected" + values = [var.rName] + } + } +} diff --git a/internal/service/ec2/testdata/VPC/list_filtered_is_default/main.tf b/internal/service/ec2/testdata/VPC/list_filtered_is_default/main.tf new file mode 100644 index 000000000000..4e2b0706b834 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_filtered_is_default/main.tf @@ -0,0 +1,4 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} diff --git a/internal/service/ec2/testdata/VPC/list_filtered_is_default/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_filtered_is_default/main.tfquery.hcl new file mode 100644 index 000000000000..d91dd8d70eb6 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_filtered_is_default/main.tfquery.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws + + config { + filter { + name = "is-default" + values = ["false"] + } + } +} diff --git a/internal/service/ec2/testdata/VPC/list_filtered_vpc_ids/main.tf b/internal/service/ec2/testdata/VPC/list_filtered_vpc_ids/main.tf new file mode 100644 index 000000000000..9b481e7ac7a3 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_filtered_vpc_ids/main.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_vpc" "expected" { + count = 2 + + cidr_block = "10.1.0.0/16" + + tags = { + expected = var.rName + } +} + +resource "aws_vpc" "not_expected" { + count = 2 + + cidr_block = "10.1.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/VPC/list_filtered_vpc_ids/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_filtered_vpc_ids/main.tfquery.hcl new file mode 100644 index 000000000000..045ce9ab63c2 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_filtered_vpc_ids/main.tfquery.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws + + config { + vpc_ids = local.vpc_ids + filter { + name = "tag:expected" + values = [var.rName] + } + } +} + +locals { + vpc_ids = concat( + aws_vpc.expected[*].id, + aws_vpc.not_expected[*].id, + ) +} diff --git a/internal/service/ec2/testdata/VPC/list_region_override/main.tf b/internal/service/ec2/testdata/VPC/list_region_override/main.tf new file mode 100644 index 000000000000..856800b3188f --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_region_override/main.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_vpc" "test" { + count = 3 + + region = var.region + + cidr_block = "10.1.0.0/16" +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/VPC/list_region_override/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_region_override/main.tfquery.hcl new file mode 100644 index 000000000000..f2c1ddce2fda --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_region_override/main.tfquery.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws + + config { + region = var.region + } +} diff --git a/internal/service/ec2/testdata/VPC/list_vpc_ids/main.tf b/internal/service/ec2/testdata/VPC/list_vpc_ids/main.tf new file mode 100644 index 000000000000..0799f343f274 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_vpc_ids/main.tf @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_vpc" "test" { + count = 3 + + cidr_block = "10.1.0.0/16" +} diff --git a/internal/service/ec2/testdata/VPC/list_vpc_ids/main.tfquery.hcl b/internal/service/ec2/testdata/VPC/list_vpc_ids/main.tfquery.hcl new file mode 100644 index 000000000000..8175671f3584 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/list_vpc_ids/main.tfquery.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_vpc" "test" { + provider = aws + + config { + vpc_ids = local.vpc_ids + } +} + +locals { + vpc_ids = aws_vpc.test[*].id +} diff --git a/internal/service/ec2/testdata/VPC/region_override/main_gen.tf b/internal/service/ec2/testdata/VPC/region_override/main_gen.tf new file mode 100644 index 000000000000..81d85f59e009 --- /dev/null +++ b/internal/service/ec2/testdata/VPC/region_override/main_gen.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.1.0.0/16" +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/VPCEndpoint/basic/main_gen.tf b/internal/service/ec2/testdata/VPCEndpoint/basic/main_gen.tf new file mode 100644 index 000000000000..3aaad7927da6 --- /dev/null +++ b/internal/service/ec2/testdata/VPCEndpoint/basic/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_endpoint" "test" { + vpc_id = aws_vpc.test.id + service_name = "com.amazonaws.${data.aws_region.current.region}.s3" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = var.rName + } +} + +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/VPCEndpoint/basic_v6.12.0/main_gen.tf b/internal/service/ec2/testdata/VPCEndpoint/basic_v6.12.0/main_gen.tf new file mode 100644 index 000000000000..e3f815e16db6 --- /dev/null +++ b/internal/service/ec2/testdata/VPCEndpoint/basic_v6.12.0/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_endpoint" "test" { + vpc_id = aws_vpc.test.id + service_name = "com.amazonaws.${data.aws_region.current.region}.s3" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = var.rName + } +} + +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.12.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ec2/testdata/VPCEndpoint/region_override/main_gen.tf b/internal/service/ec2/testdata/VPCEndpoint/region_override/main_gen.tf new file mode 100644 index 000000000000..4bbb3ee181c2 --- /dev/null +++ b/internal/service/ec2/testdata/VPCEndpoint/region_override/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_vpc_endpoint" "test" { + region = var.region + + vpc_id = aws_vpc.test.id + service_name = "com.amazonaws.${data.aws_region.current.region}.s3" +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" + + tags = { + Name = var.rName + } +} + +data "aws_region" "current" { + region = var.region + +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/tmpl/ec2_instance_tags.gtpl b/internal/service/ec2/testdata/tmpl/ec2_instance_tags.gtpl index 6faefb2a830b..e16b850c47f9 100644 --- a/internal/service/ec2/testdata/tmpl/ec2_instance_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/ec2_instance_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_instance" "test" { +{{- template "region" }} ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id instance_type = "t4g.nano" diff --git a/internal/service/ec2/testdata/tmpl/vpc_endpoint_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_endpoint_tags.gtpl new file mode 100644 index 000000000000..27856e2e2500 --- /dev/null +++ b/internal/service/ec2/testdata/tmpl/vpc_endpoint_tags.gtpl @@ -0,0 +1,19 @@ +resource "aws_vpc_endpoint" "test" { +{{- template "region" }} + vpc_id = aws_vpc.test.id + service_name = "com.amazonaws.${data.aws_region.current.region}.s3" +{{- template "tags" }} +} + +resource "aws_vpc" "test" { +{{- template "region" }} + cidr_block = "10.0.0.0/16" + + tags = { + Name = var.rName + } +} + +data "aws_region" "current" { +{{- template "region" }} +} diff --git a/internal/service/ec2/testdata/tmpl/vpc_route_basic.gtpl b/internal/service/ec2/testdata/tmpl/vpc_route_basic.gtpl new file mode 100644 index 000000000000..673e7be52995 --- /dev/null +++ b/internal/service/ec2/testdata/tmpl/vpc_route_basic.gtpl @@ -0,0 +1,21 @@ +resource "aws_route" "test" { +{{- template "region" }} + route_table_id = aws_route_table.test.id + destination_cidr_block = "10.3.0.0/16" + gateway_id = aws_internet_gateway.test.id +} + +resource "aws_vpc" "test" { +{{- template "region" }} + cidr_block = "10.1.0.0/16" +} + +resource "aws_internet_gateway" "test" { +{{- template "region" }} + vpc_id = aws_vpc.test.id +} + +resource "aws_route_table" "test" { +{{- template "region" }} + vpc_id = aws_vpc.test.id +} diff --git a/internal/service/ec2/testdata/tmpl/vpc_route_table_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_route_table_tags.gtpl index bf1e66e1abe5..909ac3392652 100644 --- a/internal/service/ec2/testdata/tmpl/vpc_route_table_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/vpc_route_table_tags.gtpl @@ -1,9 +1,11 @@ resource "aws_route_table" "test" { +{{- template "region" }} vpc_id = aws_vpc.test.id {{- template "tags" . }} } resource "aws_vpc" "test" { +{{- template "region" }} cidr_block = "10.1.0.0/16" } diff --git a/internal/service/ec2/testdata/tmpl/vpc_security_group_egress_rule_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_security_group_egress_rule_tags.gtpl index c76ecc6d331a..495b77c4643c 100644 --- a/internal/service/ec2/testdata/tmpl/vpc_security_group_egress_rule_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/vpc_security_group_egress_rule_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_vpc_security_group_egress_rule" "test" { +{{- template "region" }} security_group_id = aws_security_group.test.id cidr_ipv4 = "10.0.0.0/8" @@ -10,10 +11,12 @@ resource "aws_vpc_security_group_egress_rule" "test" { } resource "aws_vpc" "test" { +{{- template "region" }} cidr_block = "10.0.0.0/16" } resource "aws_security_group" "test" { +{{- template "region" }} vpc_id = aws_vpc.test.id name = var.rName } diff --git a/internal/service/ec2/testdata/tmpl/vpc_security_group_ingress_rule_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_security_group_ingress_rule_tags.gtpl index 0eaeff4769f2..4e5bb5192883 100644 --- a/internal/service/ec2/testdata/tmpl/vpc_security_group_ingress_rule_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/vpc_security_group_ingress_rule_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_vpc_security_group_ingress_rule" "test" { +{{- template "region" }} security_group_id = aws_security_group.test.id cidr_ipv4 = "10.0.0.0/8" @@ -10,10 +11,12 @@ resource "aws_vpc_security_group_ingress_rule" "test" { } resource "aws_vpc" "test" { +{{- template "region" }} cidr_block = "10.0.0.0/16" } resource "aws_security_group" "test" { +{{- template "region" }} vpc_id = aws_vpc.test.id name = var.rName } diff --git a/internal/service/ec2/testdata/tmpl/vpc_security_group_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_security_group_tags.gtpl index 90532b2d8085..47e49a3cb92c 100644 --- a/internal/service/ec2/testdata/tmpl/vpc_security_group_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/vpc_security_group_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_security_group" "test" { +{{- template "region" }} name = var.rName vpc_id = aws_vpc.test.id @@ -6,5 +7,6 @@ resource "aws_security_group" "test" { } resource "aws_vpc" "test" { +{{- template "region" }} cidr_block = "10.1.0.0/16" } diff --git a/internal/service/ec2/testdata/tmpl/vpc_subnet_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_subnet_tags.gtpl index 4be3eb970274..9b919eaa41d1 100644 --- a/internal/service/ec2/testdata/tmpl/vpc_subnet_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/vpc_subnet_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_subnet" "test" { +{{- template "region" }} cidr_block = "10.1.1.0/24" vpc_id = aws_vpc.test.id @@ -6,5 +7,6 @@ resource "aws_subnet" "test" { } resource "aws_vpc" "test" { +{{- template "region" }} cidr_block = "10.1.0.0/16" } diff --git a/internal/service/ec2/testdata/tmpl/vpc_tags.gtpl b/internal/service/ec2/testdata/tmpl/vpc_tags.gtpl index fb29122a55c5..2834e74ed629 100644 --- a/internal/service/ec2/testdata/tmpl/vpc_tags.gtpl +++ b/internal/service/ec2/testdata/tmpl/vpc_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_vpc" "test" { +{{- template "region" }} cidr_block = "10.1.0.0/16" {{- template "tags" . }} diff --git a/internal/service/ec2/transitgateway_.go b/internal/service/ec2/transitgateway_.go index b793f75b89d8..0a761d5d81fb 100644 --- a/internal/service/ec2/transitgateway_.go +++ b/internal/service/ec2/transitgateway_.go @@ -304,7 +304,7 @@ func resourceTransitGatewayDelete(ctx context.Context, d *schema.ResourceData, m const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteTransitGateway(ctx, &ec2.DeleteTransitGatewayInput{ TransitGatewayId: aws.String(d.Id()), }) diff --git a/internal/service/ec2/transitgateway_attachment_data_source.go b/internal/service/ec2/transitgateway_attachment_data_source.go index a7b9e14b0562..a3148c941049 100644 --- a/internal/service/ec2/transitgateway_attachment_data_source.go +++ b/internal/service/ec2/transitgateway_attachment_data_source.go @@ -5,10 +5,8 @@ package ec2 import ( "context" - "fmt" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -76,7 +74,8 @@ func dataSourceTransitGatewayAttachment() *schema.Resource { func dataSourceTransitGatewayAttachmentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeTransitGatewayAttachmentsInput{} @@ -103,14 +102,7 @@ func dataSourceTransitGatewayAttachmentRead(ctx context.Context, d *schema.Resou d.SetId(transitGatewayAttachmentID) resourceOwnerID := aws.ToString(transitGatewayAttachment.ResourceOwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: resourceOwnerID, - Resource: fmt.Sprintf("transit-gateway-attachment/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayAttachmentARN(ctx, c, resourceOwnerID, d.Id())) if v := transitGatewayAttachment.Association; v != nil { d.Set("association_state", v.State) d.Set("association_transit_gateway_route_table_id", v.TransitGatewayRouteTableId) diff --git a/internal/service/ec2/transitgateway_connect_peer.go b/internal/service/ec2/transitgateway_connect_peer.go index aec2ae4ce636..9bf281c828b5 100644 --- a/internal/service/ec2/transitgateway_connect_peer.go +++ b/internal/service/ec2/transitgateway_connect_peer.go @@ -5,14 +5,12 @@ package ec2 import ( "context" - "fmt" "log" "strconv" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -161,7 +159,8 @@ func resourceTransitGatewayConnectPeerCreate(ctx context.Context, d *schema.Reso func resourceTransitGatewayConnectPeerRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) transitGatewayConnectPeer, err := findTransitGatewayConnectPeerByID(ctx, conn, d.Id()) @@ -175,15 +174,8 @@ func resourceTransitGatewayConnectPeerRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Connect Peer (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("transit-gateway-connect-peer/%s", d.Id()), - }.String() bgpConfigurations := transitGatewayConnectPeer.ConnectPeerConfiguration.BgpConfigurations - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayConnectPeerARN(ctx, c, d.Id())) d.Set("bgp_asn", strconv.FormatInt(aws.ToInt64(bgpConfigurations[0].PeerAsn), 10)) d.Set("bgp_peer_address", bgpConfigurations[0].PeerAddress) d.Set("bgp_transit_gateway_addresses", slices.ApplyToAll(bgpConfigurations, func(v awstypes.TransitGatewayAttachmentBgpConfiguration) string { @@ -229,3 +221,7 @@ func resourceTransitGatewayConnectPeerDelete(ctx context.Context, d *schema.Reso return diags } + +func transitGatewayConnectPeerARN(ctx context.Context, c *conns.AWSClient, connectPeerID string) string { + return c.RegionalARN(ctx, names.EC2, "transit-gateway-connect-peer/"+connectPeerID) +} diff --git a/internal/service/ec2/transitgateway_connect_peer_data_source.go b/internal/service/ec2/transitgateway_connect_peer_data_source.go index 36bb52ad70f2..aa11772ca687 100644 --- a/internal/service/ec2/transitgateway_connect_peer_data_source.go +++ b/internal/service/ec2/transitgateway_connect_peer_data_source.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -82,7 +80,8 @@ func dataSourceTransitGatewayConnectPeer() *schema.Resource { func dataSourceTransitGatewayConnectPeerRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeTransitGatewayConnectPeersInput{} @@ -102,15 +101,8 @@ func dataSourceTransitGatewayConnectPeerRead(ctx context.Context, d *schema.Reso d.SetId(aws.ToString(transitGatewayConnectPeer.TransitGatewayConnectPeerId)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("transit-gateway-connect-peer/%s", d.Id()), - }.String() bgpConfigurations := transitGatewayConnectPeer.ConnectPeerConfiguration.BgpConfigurations - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayConnectPeerARN(ctx, c, d.Id())) d.Set("bgp_asn", strconv.FormatInt(aws.ToInt64(bgpConfigurations[0].PeerAsn), 10)) d.Set("bgp_peer_address", bgpConfigurations[0].PeerAddress) d.Set("bgp_transit_gateway_addresses", slices.ApplyToAll(bgpConfigurations, func(v awstypes.TransitGatewayAttachmentBgpConfiguration) string { diff --git a/internal/service/ec2/transitgateway_dx_gateway_attachment_data_source.go b/internal/service/ec2/transitgateway_dx_gateway_attachment_data_source.go index 4fadcac83cee..b7340fcf059c 100644 --- a/internal/service/ec2/transitgateway_dx_gateway_attachment_data_source.go +++ b/internal/service/ec2/transitgateway_dx_gateway_attachment_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -53,7 +51,8 @@ func dataSourceTransitGatewayDxGatewayAttachment() *schema.Resource { func dataSourceTransitGatewayDxGatewayAttachmentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeTransitGatewayAttachmentsInput{ Filters: newAttributeFilterList(map[string]string{ @@ -92,14 +91,7 @@ func dataSourceTransitGatewayDxGatewayAttachmentRead(ctx context.Context, d *sch d.SetId(aws.ToString(transitGatewayAttachment.TransitGatewayAttachmentId)) resourceOwnerID := aws.ToString(transitGatewayAttachment.ResourceOwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: resourceOwnerID, - Resource: fmt.Sprintf("transit-gateway-attachment/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayAttachmentARN(ctx, c, resourceOwnerID, d.Id())) d.Set("dx_gateway_id", transitGatewayAttachment.ResourceId) d.Set(names.AttrTransitGatewayID, transitGatewayAttachment.TransitGatewayId) diff --git a/internal/service/ec2/transitgateway_multicast_group_member.go b/internal/service/ec2/transitgateway_multicast_group_member.go index 69ccfcf0c456..89396d327056 100644 --- a/internal/service/ec2/transitgateway_multicast_group_member.go +++ b/internal/service/ec2/transitgateway_multicast_group_member.go @@ -85,7 +85,7 @@ func resourceTransitGatewayMulticastGroupMemberRead(ctx context.Context, d *sche return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + multicastGroup, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.TransitGatewayMulticastGroup, error) { return findTransitGatewayMulticastGroupMemberByThreePartKey(ctx, conn, multicastDomainID, groupIPAddress, eniID) }, d.IsNewResource()) @@ -99,8 +99,6 @@ func resourceTransitGatewayMulticastGroupMemberRead(ctx context.Context, d *sche return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Multicast Group Member (%s): %s", d.Id(), err) } - multicastGroup := outputRaw.(*awstypes.TransitGatewayMulticastGroup) - d.Set("group_ip_address", multicastGroup.GroupIpAddress) d.Set(names.AttrNetworkInterfaceID, multicastGroup.NetworkInterfaceId) d.Set("transit_gateway_multicast_domain_id", multicastDomainID) @@ -149,7 +147,7 @@ func deregisterTransitGatewayMulticastGroupMember(ctx context.Context, conn *ec2 return fmt.Errorf("deleting EC2 Transit Gateway Multicast Group Member (%s): %w", id, err) } - _, err = tfresource.RetryUntilNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return findTransitGatewayMulticastGroupMemberByThreePartKey(ctx, conn, multicastDomainID, groupIPAddress, eniID) }) diff --git a/internal/service/ec2/transitgateway_multicast_group_source.go b/internal/service/ec2/transitgateway_multicast_group_source.go index 335f753aea6a..44eb3a50302a 100644 --- a/internal/service/ec2/transitgateway_multicast_group_source.go +++ b/internal/service/ec2/transitgateway_multicast_group_source.go @@ -87,7 +87,7 @@ func resourceTransitGatewayMulticastGroupSourceRead(ctx context.Context, d *sche return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + multicastGroup, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.TransitGatewayMulticastGroup, error) { return findTransitGatewayMulticastGroupSourceByThreePartKey(ctx, conn, multicastDomainID, groupIPAddress, eniID) }, d.IsNewResource()) @@ -101,8 +101,6 @@ func resourceTransitGatewayMulticastGroupSourceRead(ctx context.Context, d *sche return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Multicast Group Source (%s): %s", d.Id(), err) } - multicastGroup := outputRaw.(*awstypes.TransitGatewayMulticastGroup) - d.Set("group_ip_address", multicastGroup.GroupIpAddress) d.Set(names.AttrNetworkInterfaceID, multicastGroup.NetworkInterfaceId) d.Set("transit_gateway_multicast_domain_id", multicastDomainID) @@ -152,7 +150,7 @@ func deregisterTransitGatewayMulticastGroupSource(ctx context.Context, conn *ec2 return fmt.Errorf("deleting EC2 Transit Gateway Multicast Group Source (%s): %w", id, err) } - _, err = tfresource.RetryUntilNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return findTransitGatewayMulticastGroupSourceByThreePartKey(ctx, conn, multicastDomainID, groupIPAddress, eniID) }) diff --git a/internal/service/ec2/transitgateway_peering_attachment.go b/internal/service/ec2/transitgateway_peering_attachment.go index 7edac317781c..c3c6846d8a5d 100644 --- a/internal/service/ec2/transitgateway_peering_attachment.go +++ b/internal/service/ec2/transitgateway_peering_attachment.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -128,7 +126,8 @@ func resourceTransitGatewayPeeringAttachmentCreate(ctx context.Context, d *schem func resourceTransitGatewayPeeringAttachmentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) transitGatewayPeeringAttachment, err := findTransitGatewayPeeringAttachmentByID(ctx, conn, d.Id()) @@ -143,14 +142,7 @@ func resourceTransitGatewayPeeringAttachmentRead(ctx context.Context, d *schema. } resourceOwnerID := aws.ToString(transitGatewayPeeringAttachment.RequesterTgwInfo.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: resourceOwnerID, - Resource: fmt.Sprintf("transit-gateway-attachment/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayAttachmentARN(ctx, c, resourceOwnerID, d.Id())) d.Set("peer_account_id", transitGatewayPeeringAttachment.AccepterTgwInfo.OwnerId) d.Set("peer_region", transitGatewayPeeringAttachment.AccepterTgwInfo.Region) d.Set("peer_transit_gateway_id", transitGatewayPeeringAttachment.AccepterTgwInfo.TransitGatewayId) @@ -192,7 +184,7 @@ func resourceTransitGatewayPeeringAttachmentDelete(ctx context.Context, d *schem return sdkdiag.AppendErrorf(diags, "deleting EC2 Transit Gateway Peering Attachment (%s): %s", d.Id(), err) } - if err := waitTransitGatewayPeeringAttachmentDeleted(ctx, conn, d.Id()); err != nil { + if _, err := waitTransitGatewayPeeringAttachmentDeleted(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EC2 Transit Gateway Peering Attachment (%s) delete: %s", d.Id(), err) } diff --git a/internal/service/ec2/transitgateway_peering_attachment_accepter.go b/internal/service/ec2/transitgateway_peering_attachment_accepter.go index 311d5a21ba16..561a7f826e00 100644 --- a/internal/service/ec2/transitgateway_peering_attachment_accepter.go +++ b/internal/service/ec2/transitgateway_peering_attachment_accepter.go @@ -150,7 +150,7 @@ func resourceTransitGatewayPeeringAttachmentAccepterDelete(ctx context.Context, return sdkdiag.AppendErrorf(diags, "deleting EC2 Transit Gateway Peering Attachment (%s): %s", d.Id(), err) } - if err := waitTransitGatewayPeeringAttachmentDeleted(ctx, conn, d.Id()); err != nil { + if _, err := waitTransitGatewayPeeringAttachmentDeleted(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EC2 Transit Gateway Peering Attachment (%s) delete: %s", d.Id(), err) } diff --git a/internal/service/ec2/transitgateway_peering_attachment_data_source.go b/internal/service/ec2/transitgateway_peering_attachment_data_source.go index f5e6ada76b9d..e68222c908c0 100644 --- a/internal/service/ec2/transitgateway_peering_attachment_data_source.go +++ b/internal/service/ec2/transitgateway_peering_attachment_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -69,7 +67,8 @@ func dataSourceTransitGatewayPeeringAttachment() *schema.Resource { func dataSourceTransitGatewayPeeringAttachmentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeTransitGatewayPeeringAttachmentsInput{} @@ -109,14 +108,7 @@ func dataSourceTransitGatewayPeeringAttachmentRead(ctx context.Context, d *schem } resourceOwnerID := aws.ToString(local.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: resourceOwnerID, - Resource: fmt.Sprintf("transit-gateway-attachment/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayAttachmentARN(ctx, c, resourceOwnerID, d.Id())) d.Set("peer_account_id", peer.OwnerId) d.Set("peer_region", peer.Region) d.Set("peer_transit_gateway_id", peer.TransitGatewayId) diff --git a/internal/service/ec2/transitgateway_policy_table.go b/internal/service/ec2/transitgateway_policy_table.go index dd3569983c3d..3ceacb773405 100644 --- a/internal/service/ec2/transitgateway_policy_table.go +++ b/internal/service/ec2/transitgateway_policy_table.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -86,7 +84,8 @@ func resourceTransitGatewayPolicyTableCreate(ctx context.Context, d *schema.Reso func resourceTransitGatewayPolicyTableRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) transitGatewayPolicyTable, err := findTransitGatewayPolicyTableByID(ctx, conn, d.Id()) @@ -100,14 +99,7 @@ func resourceTransitGatewayPolicyTableRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Policy Table (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("transit-gateway-policy-table/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayPolicyTableARN(ctx, c, d.Id())) d.Set(names.AttrState, transitGatewayPolicyTable.State) d.Set(names.AttrTransitGatewayID, transitGatewayPolicyTable.TransitGatewayId) @@ -148,3 +140,7 @@ func resourceTransitGatewayPolicyTableDelete(ctx context.Context, d *schema.Reso return diags } + +func transitGatewayPolicyTableARN(ctx context.Context, c *conns.AWSClient, policyTableID string) string { + return c.RegionalARN(ctx, names.EC2, "transit-gateway-policy-table/"+policyTableID) +} diff --git a/internal/service/ec2/transitgateway_route.go b/internal/service/ec2/transitgateway_route.go index 325c3ca4520e..c0d306c0d545 100644 --- a/internal/service/ec2/transitgateway_route.go +++ b/internal/service/ec2/transitgateway_route.go @@ -102,7 +102,7 @@ func resourceTransitGatewayRouteRead(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + transitGatewayRoute, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.TransitGatewayRoute, error) { return findTransitGatewayStaticRoute(ctx, conn, transitGatewayRouteTableID, destination) }, d.IsNewResource()) @@ -116,8 +116,6 @@ func resourceTransitGatewayRouteRead(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Route (%s): %s", d.Id(), err) } - transitGatewayRoute := outputRaw.(*awstypes.TransitGatewayRoute) - d.Set("destination_cidr_block", transitGatewayRoute.DestinationCidrBlock) if len(transitGatewayRoute.TransitGatewayAttachments) > 0 { d.Set(names.AttrTransitGatewayAttachmentID, transitGatewayRoute.TransitGatewayAttachments[0].TransitGatewayAttachmentId) diff --git a/internal/service/ec2/transitgateway_route_table.go b/internal/service/ec2/transitgateway_route_table.go index aa66c21904ba..6eb706e5cd18 100644 --- a/internal/service/ec2/transitgateway_route_table.go +++ b/internal/service/ec2/transitgateway_route_table.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -89,7 +87,8 @@ func resourceTransitGatewayRouteTableCreate(ctx context.Context, d *schema.Resou func resourceTransitGatewayRouteTableRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) transitGatewayRouteTable, err := findTransitGatewayRouteTableByID(ctx, conn, d.Id()) @@ -103,14 +102,7 @@ func resourceTransitGatewayRouteTableRead(ctx context.Context, d *schema.Resourc return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Route Table (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("transit-gateway-route-table/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayRouteTableARN(ctx, c, d.Id())) d.Set("default_association_route_table", transitGatewayRouteTable.DefaultAssociationRouteTable) d.Set("default_propagation_route_table", transitGatewayRouteTable.DefaultPropagationRouteTable) d.Set(names.AttrTransitGatewayID, transitGatewayRouteTable.TransitGatewayId) @@ -152,3 +144,7 @@ func resourceTransitGatewayRouteTableDelete(ctx context.Context, d *schema.Resou return diags } + +func transitGatewayRouteTableARN(ctx context.Context, c *conns.AWSClient, routeTableID string) string { + return c.RegionalARN(ctx, names.EC2, "transit-gateway-route-table/"+routeTableID) +} diff --git a/internal/service/ec2/transitgateway_route_table_association.go b/internal/service/ec2/transitgateway_route_table_association.go index da30d5c815f4..252c0160466c 100644 --- a/internal/service/ec2/transitgateway_route_table_association.go +++ b/internal/service/ec2/transitgateway_route_table_association.go @@ -14,7 +14,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -52,6 +51,7 @@ func resourceTransitGatewayRouteTableAssociation() *schema.Resource { names.AttrTransitGatewayAttachmentID: { Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: validation.NoZeroValues, }, "transit_gateway_route_table_id": { @@ -61,43 +61,6 @@ func resourceTransitGatewayRouteTableAssociation() *schema.Resource { ValidateFunc: validation.NoZeroValues, }, }, - - CustomizeDiff: customdiff.Sequence( - func(_ context.Context, d *schema.ResourceDiff, meta any) error { - if !d.HasChange(names.AttrTransitGatewayAttachmentID) { - return nil - } - - // See https://github.com/hashicorp/terraform-provider-aws/issues/30085 - // In all cases, changes should force new except: - // o is not empty string AND - // n is empty string AND - // plan is unknown AND - // state is known - o, n := d.GetChange(names.AttrTransitGatewayAttachmentID) - if o.(string) == "" || n.(string) != "" { - return d.ForceNew(names.AttrTransitGatewayAttachmentID) - } - - rawPlan := d.GetRawPlan() - plan := rawPlan.GetAttr(names.AttrTransitGatewayAttachmentID) - if plan.IsKnown() { - return d.ForceNew(names.AttrTransitGatewayAttachmentID) - } - - rawState := d.GetRawState() - if rawState.IsNull() || !rawState.IsKnown() { - return d.ForceNew(names.AttrTransitGatewayAttachmentID) - } - - state := rawState.GetAttr(names.AttrTransitGatewayAttachmentID) - if !state.IsKnown() { - return d.ForceNew(names.AttrTransitGatewayAttachmentID) - } - - return nil - }, - ), } } diff --git a/internal/service/ec2/transitgateway_route_table_association_test.go b/internal/service/ec2/transitgateway_route_table_association_test.go index 454a82b910e3..9127862709ff 100644 --- a/internal/service/ec2/transitgateway_route_table_association_test.go +++ b/internal/service/ec2/transitgateway_route_table_association_test.go @@ -97,6 +97,49 @@ func testAccTransitGatewayRouteTableAssociation_disappears(t *testing.T, semapho }) } +func testAccTransitGatewayRouteTableAssociation_attachmentChange(t *testing.T, semaphore tfsync.Semaphore) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var v awstypes.TransitGatewayRouteTableAssociation + resourceName := "aws_ec2_transit_gateway_route_table_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckTransitGatewaySynchronize(t, semaphore) + acctest.PreCheck(ctx, t) + testAccPreCheckTransitGateway(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTransitGatewayRouteTableAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTransitGatewayRouteTableAssociationConfig_attachmentChange(rName, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckTransitGatewayRouteTableAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTransitGatewayAttachmentID, "aws_ec2_transit_gateway_vpc_attachment.test.0", names.AttrID), + ), + }, + { + Config: testAccTransitGatewayRouteTableAssociationConfig_attachmentChange(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckTransitGatewayRouteTableAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTransitGatewayAttachmentID, "aws_ec2_transit_gateway_vpc_attachment.test.1", names.AttrID), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + func testAccTransitGatewayRouteTableAssociation_replaceExistingAssociation(t *testing.T, semaphore tfsync.Semaphore) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -142,7 +185,7 @@ func testAccTransitGatewayRouteTableAssociation_replaceExistingAssociation(t *te }) } -func testAccTransitGatewayRouteTableAssociation_notRecreatedDXGateway(t *testing.T, semaphore tfsync.Semaphore) { +func testAccTransitGatewayRouteTableAssociation_recreatedDXGateway(t *testing.T, semaphore tfsync.Semaphore) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -174,11 +217,9 @@ func testAccTransitGatewayRouteTableAssociation_notRecreatedDXGateway(t *testing Check: resource.ComposeTestCheckFunc( testAccCheckTransitGatewayRouteTableAssociationExists(ctx, resourceName, &a), ), - // Calling a NotRecreated function, such as testAccCheckRouteTableAssociationNotRecreated, as is typical, - // won't work here because the recreated resource ID will be the same, because it's two IDs pegged together. ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), }, }, }, @@ -344,3 +385,70 @@ resource "aws_ec2_transit_gateway_route_table_propagation" "test" { } `, rName, rBGPASN, strings.Join(allowedPrefixes, `", "`)) } + +func testAccTransitGatewayRouteTableAssociationConfig_attachmentChange(rName string, attachmentIndex int) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + count = 2 + + cidr_block = "10.${count.index}.0.0/16" + + tags = { + Name = "%[1]s-${count.index}" + } +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test[count.index].id + cidr_block = "10.${count.index}.0.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = "%[1]s-${count.index}" + } +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_ec2_transit_gateway" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "test" { + count = 2 + + subnet_ids = [aws_subnet.test[count.index].id] + transit_gateway_default_route_table_association = false + transit_gateway_id = aws_ec2_transit_gateway.test.id + vpc_id = aws_vpc.test[count.index].id + + tags = { + Name = "%[1]s-${count.index}" + } +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway_route_table_association" "test" { + transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.test[%[2]d].id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.test.id +} +`, rName, attachmentIndex) +} diff --git a/internal/service/ec2/transitgateway_route_table_data_source.go b/internal/service/ec2/transitgateway_route_table_data_source.go index 27be56b4ccd0..ce28b4d0d566 100644 --- a/internal/service/ec2/transitgateway_route_table_data_source.go +++ b/internal/service/ec2/transitgateway_route_table_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -61,7 +59,8 @@ func dataSourceTransitGatewayRouteTable() *schema.Resource { func dataSourceTransitGatewayRouteTableRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeTransitGatewayRouteTablesInput{} @@ -85,14 +84,7 @@ func dataSourceTransitGatewayRouteTableRead(ctx context.Context, d *schema.Resou } d.SetId(aws.ToString(transitGatewayRouteTable.TransitGatewayRouteTableId)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("transit-gateway-route-table/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayRouteTableARN(ctx, c, d.Id())) d.Set("default_association_route_table", transitGatewayRouteTable.DefaultAssociationRouteTable) d.Set("default_propagation_route_table", transitGatewayRouteTable.DefaultPropagationRouteTable) d.Set(names.AttrTransitGatewayID, transitGatewayRouteTable.TransitGatewayId) diff --git a/internal/service/ec2/transitgateway_route_table_propagation_test.go b/internal/service/ec2/transitgateway_route_table_propagation_test.go index 687560a38a49..fa04407bf59c 100644 --- a/internal/service/ec2/transitgateway_route_table_propagation_test.go +++ b/internal/service/ec2/transitgateway_route_table_propagation_test.go @@ -6,11 +6,13 @@ package ec2_test import ( "context" "fmt" + "strings" "testing" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -85,6 +87,87 @@ func testAccTransitGatewayRouteTablePropagation_disappears(t *testing.T, semapho }) } +func testAccTransitGatewayRouteTablePropagation_attachmentChange(t *testing.T, semaphore tfsync.Semaphore) { + ctx := acctest.Context(t) + var v awstypes.TransitGatewayRouteTablePropagation + resourceName := "aws_ec2_transit_gateway_route_table_propagation.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckTransitGatewaySynchronize(t, semaphore) + acctest.PreCheck(ctx, t) + testAccPreCheckTransitGateway(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTransitGatewayRouteTablePropagationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTransitGatewayRouteTablePropagationConfig_attachmentChange(rName, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckTransitGatewayRouteTablePropagationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTransitGatewayAttachmentID, "aws_ec2_transit_gateway_vpc_attachment.test.0", names.AttrID), + ), + }, + { + Config: testAccTransitGatewayRouteTablePropagationConfig_attachmentChange(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckTransitGatewayRouteTablePropagationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTransitGatewayAttachmentID, "aws_ec2_transit_gateway_vpc_attachment.test.1", names.AttrID), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + +func testAccTransitGatewayRouteTablePropagtion_recreatedDXGateway(t *testing.T, semaphore tfsync.Semaphore) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var a awstypes.TransitGatewayRouteTablePropagation + resourceName := "aws_ec2_transit_gateway_route_table_propagation.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rBGPASN := sdkacctest.RandIntRange(4200000000, 4294967294) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckTransitGatewaySynchronize(t, semaphore) + acctest.PreCheck(ctx, t) + testAccPreCheckTransitGateway(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTransitGatewayRouteTablePropagationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTransitGatewayRouteTablePropagationConfig_recreationByDXGateway(rName, rBGPASN, []string{"10.255.255.0/30"}), + Check: resource.ComposeTestCheckFunc( + testAccCheckTransitGatewayRouteTablePropagationExists(ctx, resourceName, &a), + ), + }, + { + Config: testAccTransitGatewayRouteTablePropagationConfig_recreationByDXGateway(rName, rBGPASN, []string{"10.255.255.0/30", "10.255.255.8/30"}), + Check: resource.ComposeTestCheckFunc( + testAccCheckTransitGatewayRouteTablePropagationExists(ctx, resourceName, &a), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + func testAccCheckTransitGatewayRouteTablePropagationExists(ctx context.Context, n string, v *awstypes.TransitGatewayRouteTablePropagation) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -168,3 +251,117 @@ resource "aws_ec2_transit_gateway_route_table_propagation" "test" { } `) } + +func testAccTransitGatewayRouteTablePropagationConfig_recreationByDXGateway(rName string, rBGPASN int, allowedPrefixes []string) string { + return fmt.Sprintf(` +resource "aws_dx_gateway" "test" { + amazon_side_asn = "%[2]d" + name = %[1]q +} + +resource "aws_ec2_transit_gateway" "test" { + default_route_table_association = "disable" + default_route_table_propagation = "disable" + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_dx_gateway_association" "test" { + dx_gateway_id = aws_dx_gateway.test.id + associated_gateway_id = aws_ec2_transit_gateway.test.id + + allowed_prefixes = ["%[3]s"] +} + +data "aws_ec2_transit_gateway_dx_gateway_attachment" "test" { + transit_gateway_id = aws_dx_gateway_association.test.associated_gateway_id + dx_gateway_id = aws_dx_gateway_association.test.dx_gateway_id +} + +resource "aws_ec2_transit_gateway_route_table_association" "test" { + transit_gateway_attachment_id = data.aws_ec2_transit_gateway_dx_gateway_attachment.test.id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.test.id +} + +resource "aws_ec2_transit_gateway_route_table_propagation" "test" { + transit_gateway_attachment_id = data.aws_ec2_transit_gateway_dx_gateway_attachment.test.id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.test.id +} +`, rName, rBGPASN, strings.Join(allowedPrefixes, `", "`)) +} + +func testAccTransitGatewayRouteTablePropagationConfig_attachmentChange(rName string, attachmentIndex int) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + count = 2 + + cidr_block = "10.${count.index}.0.0/16" + + tags = { + Name = "%[1]s-${count.index}" + } +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test[count.index].id + cidr_block = "10.${count.index}.0.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = "%[1]s-${count.index}" + } +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_ec2_transit_gateway" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "test" { + count = 2 + + subnet_ids = [aws_subnet.test[count.index].id] + transit_gateway_id = aws_ec2_transit_gateway.test.id + vpc_id = aws_vpc.test[count.index].id + + tags = { + Name = "%[1]s-${count.index}" + } +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway_route_table_propagation" "test" { + transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.test[%[2]d].id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.test.id +} +`, rName, attachmentIndex) +} diff --git a/internal/service/ec2/transitgateway_test.go b/internal/service/ec2/transitgateway_test.go index 385f8efa5127..742315607124 100644 --- a/internal/service/ec2/transitgateway_test.go +++ b/internal/service/ec2/transitgateway_test.go @@ -145,11 +145,14 @@ func TestAccTransitGateway_serial(t *testing.T) { acctest.CtBasic: testAccTransitGatewayRouteTableAssociation_basic, acctest.CtDisappears: testAccTransitGatewayRouteTableAssociation_disappears, "replaceExistingAssociation": testAccTransitGatewayRouteTableAssociation_replaceExistingAssociation, - "notRecreatedDXGateway": testAccTransitGatewayRouteTableAssociation_notRecreatedDXGateway, + "attachmentChange": testAccTransitGatewayRouteTableAssociation_attachmentChange, + "recreatedDXGateway": testAccTransitGatewayRouteTableAssociation_recreatedDXGateway, }, "RouteTablePropagation": { acctest.CtBasic: testAccTransitGatewayRouteTablePropagation_basic, acctest.CtDisappears: testAccTransitGatewayRouteTablePropagation_disappears, + "attachmentChange": testAccTransitGatewayRouteTablePropagation_attachmentChange, + "recreatedDXGateway": testAccTransitGatewayRouteTablePropagtion_recreatedDXGateway, }, "VPCAttachment": { acctest.CtBasic: testAccTransitGatewayVPCAttachment_basic, diff --git a/internal/service/ec2/transitgateway_vpc_attachment.go b/internal/service/ec2/transitgateway_vpc_attachment.go index daee00b93d17..25a5cb859a4e 100644 --- a/internal/service/ec2/transitgateway_vpc_attachment.go +++ b/internal/service/ec2/transitgateway_vpc_attachment.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -172,7 +170,8 @@ func resourceTransitGatewayVPCAttachmentCreate(ctx context.Context, d *schema.Re func resourceTransitGatewayVPCAttachmentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) transitGatewayVPCAttachment, err := findTransitGatewayVPCAttachmentByID(ctx, conn, d.Id()) @@ -225,14 +224,7 @@ func resourceTransitGatewayVPCAttachmentRead(ctx context.Context, d *schema.Reso d.Set("appliance_mode_support", transitGatewayVPCAttachment.Options.ApplianceModeSupport) vpcOwnerID := aws.ToString(transitGatewayVPCAttachment.VpcOwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: vpcOwnerID, - Resource: fmt.Sprintf("transit-gateway-attachment/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayAttachmentARN(ctx, c, vpcOwnerID, d.Id())) d.Set("dns_support", transitGatewayVPCAttachment.Options.DnsSupport) d.Set("ipv6_support", transitGatewayVPCAttachment.Options.Ipv6Support) d.Set("security_group_referencing_support", transitGatewayVPCAttachment.Options.SecurityGroupReferencingSupport) @@ -332,3 +324,7 @@ func resourceTransitGatewayVPCAttachmentDelete(ctx context.Context, d *schema.Re return diags } + +func transitGatewayAttachmentARN(ctx context.Context, c *conns.AWSClient, accountID, attachmentID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "transit-gateway-attachment/"+attachmentID) +} diff --git a/internal/service/ec2/transitgateway_vpc_attachment_data_source.go b/internal/service/ec2/transitgateway_vpc_attachment_data_source.go index 2575cc586276..4ad20d103a39 100644 --- a/internal/service/ec2/transitgateway_vpc_attachment_data_source.go +++ b/internal/service/ec2/transitgateway_vpc_attachment_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -82,7 +80,8 @@ func dataSourceTransitGatewayVPCAttachment() *schema.Resource { func dataSourceTransitGatewayVPCAttachmentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeTransitGatewayVpcAttachmentsInput{} @@ -108,14 +107,7 @@ func dataSourceTransitGatewayVPCAttachmentRead(ctx context.Context, d *schema.Re d.SetId(aws.ToString(transitGatewayVPCAttachment.TransitGatewayAttachmentId)) d.Set("appliance_mode_support", transitGatewayVPCAttachment.Options.ApplianceModeSupport) vpcOwnerID := aws.ToString(transitGatewayVPCAttachment.VpcOwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: vpcOwnerID, - Resource: fmt.Sprintf("transit-gateway-attachment/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, transitGatewayAttachmentARN(ctx, c, vpcOwnerID, d.Id())) d.Set("dns_support", transitGatewayVPCAttachment.Options.DnsSupport) d.Set("ipv6_support", transitGatewayVPCAttachment.Options.Ipv6Support) d.Set("security_group_referencing_support", transitGatewayVPCAttachment.Options.SecurityGroupReferencingSupport) diff --git a/internal/service/ec2/verifiedaccess_endpoint.go b/internal/service/ec2/verifiedaccess_endpoint.go index 53acef40e740..6c8d9c8e49cc 100644 --- a/internal/service/ec2/verifiedaccess_endpoint.go +++ b/internal/service/ec2/verifiedaccess_endpoint.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -57,7 +57,7 @@ func resourceVerifiedAccessEndpoint() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateDiagFunc: enum.Validate[types.VerifiedAccessEndpointAttachmentType](), + ValidateDiagFunc: enum.Validate[awstypes.VerifiedAccessEndpointAttachmentType](), }, "cidr_options": { Type: schema.TypeList, @@ -93,7 +93,7 @@ func resourceVerifiedAccessEndpoint() *schema.Resource { Type: schema.TypeString, ForceNew: true, Optional: true, - ValidateFunc: validation.StringInSlice(enum.Slice(types.VerifiedAccessEndpointProtocolTcp), false), + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.VerifiedAccessEndpointProtocolTcp), false), }, names.AttrSubnetIDs: { Type: schema.TypeSet, @@ -131,7 +131,7 @@ func resourceVerifiedAccessEndpoint() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateDiagFunc: enum.Validate[types.VerifiedAccessEndpointType](), + ValidateDiagFunc: enum.Validate[awstypes.VerifiedAccessEndpointType](), }, "load_balancer_options": { Type: schema.TypeList, @@ -171,7 +171,7 @@ func resourceVerifiedAccessEndpoint() *schema.Resource { names.AttrProtocol: { Type: schema.TypeString, Optional: true, - ValidateDiagFunc: enum.Validate[types.VerifiedAccessEndpointProtocol](), + ValidateDiagFunc: enum.Validate[awstypes.VerifiedAccessEndpointProtocol](), }, names.AttrSubnetIDs: { Type: schema.TypeSet, @@ -218,7 +218,7 @@ func resourceVerifiedAccessEndpoint() *schema.Resource { names.AttrProtocol: { Type: schema.TypeString, Optional: true, - ValidateDiagFunc: enum.Validate[types.VerifiedAccessEndpointProtocol](), + ValidateDiagFunc: enum.Validate[awstypes.VerifiedAccessEndpointProtocol](), }, }, }, @@ -241,7 +241,7 @@ func resourceVerifiedAccessEndpoint() *schema.Resource { names.AttrProtocol: { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice(enum.Slice(types.VerifiedAccessEndpointProtocolTcp), false), + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.VerifiedAccessEndpointProtocolTcp), false), }, "rds_db_cluster_arn": { Type: schema.TypeString, @@ -318,10 +318,10 @@ func resourceVerifiedAccessEndpointCreate(ctx context.Context, d *schema.Resourc conn := meta.(*conns.AWSClient).EC2Client(ctx) input := ec2.CreateVerifiedAccessEndpointInput{ - AttachmentType: types.VerifiedAccessEndpointAttachmentType(d.Get("attachment_type").(string)), + AttachmentType: awstypes.VerifiedAccessEndpointAttachmentType(d.Get("attachment_type").(string)), ClientToken: aws.String(sdkid.UniqueId()), - EndpointType: types.VerifiedAccessEndpointType(d.Get(names.AttrEndpointType).(string)), - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeVerifiedAccessEndpoint), + EndpointType: awstypes.VerifiedAccessEndpointType(d.Get(names.AttrEndpointType).(string)), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeVerifiedAccessEndpoint), VerifiedAccessGroupId: aws.String(d.Get("verified_access_group_id").(string)), } @@ -538,7 +538,7 @@ func resourceVerifiedAccessEndpointDelete(ctx context.Context, d *schema.Resourc return diags } -func flattenVerifiedAccessEndpointPortRanges(apiObjects []types.VerifiedAccessEndpointPortRange) []any { +func flattenVerifiedAccessEndpointPortRanges(apiObjects []awstypes.VerifiedAccessEndpointPortRange) []any { if len(apiObjects) == 0 { return nil } @@ -562,7 +562,7 @@ func flattenVerifiedAccessEndpointPortRanges(apiObjects []types.VerifiedAccessEn return tfList } -func flattenVerifiedAccessEndpointCIDROptions(apiObject *types.VerifiedAccessEndpointCidrOptions) []any { +func flattenVerifiedAccessEndpointCIDROptions(apiObject *awstypes.VerifiedAccessEndpointCidrOptions) []any { if apiObject == nil { return nil } @@ -588,7 +588,7 @@ func flattenVerifiedAccessEndpointCIDROptions(apiObject *types.VerifiedAccessEnd return []any{tfMap} } -func flattenVerifiedAccessEndpointLoadBalancerOptions(apiObject *types.VerifiedAccessEndpointLoadBalancerOptions) []any { +func flattenVerifiedAccessEndpointLoadBalancerOptions(apiObject *awstypes.VerifiedAccessEndpointLoadBalancerOptions) []any { if apiObject == nil { return nil } @@ -618,7 +618,7 @@ func flattenVerifiedAccessEndpointLoadBalancerOptions(apiObject *types.VerifiedA return []any{tfMap} } -func flattenVerifiedAccessEndpointENIOptions(apiObject *types.VerifiedAccessEndpointEniOptions) []any { +func flattenVerifiedAccessEndpointENIOptions(apiObject *awstypes.VerifiedAccessEndpointEniOptions) []any { if apiObject == nil { return nil } @@ -644,7 +644,7 @@ func flattenVerifiedAccessEndpointENIOptions(apiObject *types.VerifiedAccessEndp return []any{tfMap} } -func flattenVerifiedAccessEndpointRDSOptions(apiObject *types.VerifiedAccessEndpointRdsOptions) []any { +func flattenVerifiedAccessEndpointRDSOptions(apiObject *awstypes.VerifiedAccessEndpointRdsOptions) []any { if apiObject == nil { return nil } @@ -682,7 +682,7 @@ func flattenVerifiedAccessEndpointRDSOptions(apiObject *types.VerifiedAccessEndp return []any{tfMap} } -func flattenVerifiedAccessSSESpecificationResponse(apiObject *types.VerifiedAccessSseSpecificationResponse) []any { +func flattenVerifiedAccessSSESpecificationResponse(apiObject *awstypes.VerifiedAccessSseSpecificationResponse) []any { if apiObject == nil { return nil } @@ -700,12 +700,12 @@ func flattenVerifiedAccessSSESpecificationResponse(apiObject *types.VerifiedAcce return []any{tfMap} } -func expandCreateVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *types.CreateVerifiedAccessEndpointCidrOptions { +func expandCreateVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessEndpointCidrOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessEndpointCidrOptions{} + apiObject := &awstypes.CreateVerifiedAccessEndpointCidrOptions{} if v, ok := tfMap["cidr"].(string); ok && v != "" { apiObject.Cidr = aws.String(v) @@ -716,7 +716,7 @@ func expandCreateVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *types. } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - apiObject.Protocol = types.VerifiedAccessEndpointProtocol(v) + apiObject.Protocol = awstypes.VerifiedAccessEndpointProtocol(v) } if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { @@ -726,19 +726,19 @@ func expandCreateVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *types. return apiObject } -func expandCreateVerifiedAccessEndpointRDSOptions(tfMap map[string]any) *types.CreateVerifiedAccessEndpointRdsOptions { +func expandCreateVerifiedAccessEndpointRDSOptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessEndpointRdsOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessEndpointRdsOptions{} + apiObject := &awstypes.CreateVerifiedAccessEndpointRdsOptions{} if v, ok := tfMap[names.AttrPort].(int); ok { apiObject.Port = aws.Int32(int32(v)) } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - apiObject.Protocol = types.VerifiedAccessEndpointProtocol(v) + apiObject.Protocol = awstypes.VerifiedAccessEndpointProtocol(v) } if v, ok := tfMap["rds_db_cluster_arn"].(string); ok && v != "" { @@ -764,16 +764,16 @@ func expandCreateVerifiedAccessEndpointRDSOptions(tfMap map[string]any) *types.C return apiObject } -func expandVerifiedAccessEndpointPortRanges(tfList []any) []types.VerifiedAccessEndpointPortRange { +func expandVerifiedAccessEndpointPortRanges(tfList []any) []awstypes.VerifiedAccessEndpointPortRange { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObjects := make([]types.VerifiedAccessEndpointPortRange, len(tfList)) + apiObjects := make([]awstypes.VerifiedAccessEndpointPortRange, len(tfList)) for i, tfElem := range tfList { tfMap := tfElem.(map[string]any) - apiObjects[i] = types.VerifiedAccessEndpointPortRange{ + apiObjects[i] = awstypes.VerifiedAccessEndpointPortRange{ FromPort: aws.Int32(int32(tfMap["from_port"].(int))), ToPort: aws.Int32(int32(tfMap["to_port"].(int))), } @@ -782,42 +782,42 @@ func expandVerifiedAccessEndpointPortRanges(tfList []any) []types.VerifiedAccess return apiObjects } -func expandCreateVerifiedAccessEndpointPortRanges(tfList []any) []types.CreateVerifiedAccessEndpointPortRange { +func expandCreateVerifiedAccessEndpointPortRanges(tfList []any) []awstypes.CreateVerifiedAccessEndpointPortRange { apiObjects := expandVerifiedAccessEndpointPortRanges(tfList) if apiObjects == nil { return nil } - return tfslices.ApplyToAll(apiObjects, func(v types.VerifiedAccessEndpointPortRange) types.CreateVerifiedAccessEndpointPortRange { - return types.CreateVerifiedAccessEndpointPortRange{ + return tfslices.ApplyToAll(apiObjects, func(v awstypes.VerifiedAccessEndpointPortRange) awstypes.CreateVerifiedAccessEndpointPortRange { + return awstypes.CreateVerifiedAccessEndpointPortRange{ FromPort: v.FromPort, ToPort: v.ToPort, } }) } -func expandModifyVerifiedAccessEndpointPortRanges(tfList []any) []types.ModifyVerifiedAccessEndpointPortRange { +func expandModifyVerifiedAccessEndpointPortRanges(tfList []any) []awstypes.ModifyVerifiedAccessEndpointPortRange { apiObjects := expandVerifiedAccessEndpointPortRanges(tfList) if apiObjects == nil { return nil } - return tfslices.ApplyToAll(apiObjects, func(v types.VerifiedAccessEndpointPortRange) types.ModifyVerifiedAccessEndpointPortRange { - return types.ModifyVerifiedAccessEndpointPortRange{ + return tfslices.ApplyToAll(apiObjects, func(v awstypes.VerifiedAccessEndpointPortRange) awstypes.ModifyVerifiedAccessEndpointPortRange { + return awstypes.ModifyVerifiedAccessEndpointPortRange{ FromPort: v.FromPort, ToPort: v.ToPort, } }) } -func expandCreateVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) *types.CreateVerifiedAccessEndpointLoadBalancerOptions { +func expandCreateVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessEndpointLoadBalancerOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessEndpointLoadBalancerOptions{} + apiObject := &awstypes.CreateVerifiedAccessEndpointLoadBalancerOptions{} if v, ok := tfMap["load_balancer_arn"].(string); ok && v != "" { apiObject.LoadBalancerArn = aws.String(v) @@ -832,7 +832,7 @@ func expandCreateVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - apiObject.Protocol = types.VerifiedAccessEndpointProtocol(v) + apiObject.Protocol = awstypes.VerifiedAccessEndpointProtocol(v) } if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { @@ -842,12 +842,12 @@ func expandCreateVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) return apiObject } -func expandCreateVerifiedAccessEndpointENIOptions(tfMap map[string]any) *types.CreateVerifiedAccessEndpointEniOptions { +func expandCreateVerifiedAccessEndpointENIOptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessEndpointEniOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessEndpointEniOptions{} + apiObject := &awstypes.CreateVerifiedAccessEndpointEniOptions{} if v, ok := tfMap[names.AttrNetworkInterfaceID].(string); ok && v != "" { apiObject.NetworkInterfaceId = aws.String(v) @@ -862,17 +862,17 @@ func expandCreateVerifiedAccessEndpointENIOptions(tfMap map[string]any) *types.C } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - apiObject.Protocol = types.VerifiedAccessEndpointProtocol(v) + apiObject.Protocol = awstypes.VerifiedAccessEndpointProtocol(v) } return apiObject } -func expandModifyVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *types.ModifyVerifiedAccessEndpointCidrOptions { +func expandModifyVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *awstypes.ModifyVerifiedAccessEndpointCidrOptions { if tfMap == nil { return nil } - apiObject := &types.ModifyVerifiedAccessEndpointCidrOptions{} + apiObject := &awstypes.ModifyVerifiedAccessEndpointCidrOptions{} if v, ok := tfMap["port_range"].(*schema.Set); ok { apiObject.PortRanges = expandModifyVerifiedAccessEndpointPortRanges(v.List()) @@ -881,12 +881,12 @@ func expandModifyVerifiedAccessEndpointCIDROptions(tfMap map[string]any) *types. return apiObject } -func expandModifyVerifiedAccessEndpointRDSOptions(tfMap map[string]any) *types.ModifyVerifiedAccessEndpointRdsOptions { +func expandModifyVerifiedAccessEndpointRDSOptions(tfMap map[string]any) *awstypes.ModifyVerifiedAccessEndpointRdsOptions { if tfMap == nil { return nil } - apiObject := &types.ModifyVerifiedAccessEndpointRdsOptions{} + apiObject := &awstypes.ModifyVerifiedAccessEndpointRdsOptions{} if v, ok := tfMap[names.AttrPort].(int); ok { apiObject.Port = aws.Int32(int32(v)) @@ -903,12 +903,12 @@ func expandModifyVerifiedAccessEndpointRDSOptions(tfMap map[string]any) *types.M return apiObject } -func expandModifyVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) *types.ModifyVerifiedAccessEndpointLoadBalancerOptions { +func expandModifyVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) *awstypes.ModifyVerifiedAccessEndpointLoadBalancerOptions { if tfMap == nil { return nil } - apiObject := &types.ModifyVerifiedAccessEndpointLoadBalancerOptions{} + apiObject := &awstypes.ModifyVerifiedAccessEndpointLoadBalancerOptions{} if v, ok := tfMap[names.AttrPort].(int); ok && v != 0 { apiObject.Port = aws.Int32(int32(v)) @@ -919,7 +919,7 @@ func expandModifyVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - apiObject.Protocol = types.VerifiedAccessEndpointProtocol(v) + apiObject.Protocol = awstypes.VerifiedAccessEndpointProtocol(v) } if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { @@ -929,12 +929,12 @@ func expandModifyVerifiedAccessEndpointLoadBalancerOptions(tfMap map[string]any) return apiObject } -func expandModifyVerifiedAccessEndpointENIOptions(tfMap map[string]any) *types.ModifyVerifiedAccessEndpointEniOptions { +func expandModifyVerifiedAccessEndpointENIOptions(tfMap map[string]any) *awstypes.ModifyVerifiedAccessEndpointEniOptions { if tfMap == nil { return nil } - apiObject := &types.ModifyVerifiedAccessEndpointEniOptions{} + apiObject := &awstypes.ModifyVerifiedAccessEndpointEniOptions{} if v, ok := tfMap[names.AttrPort].(int); ok { apiObject.Port = aws.Int32(int32(v)) @@ -945,18 +945,18 @@ func expandModifyVerifiedAccessEndpointENIOptions(tfMap map[string]any) *types.M } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - apiObject.Protocol = types.VerifiedAccessEndpointProtocol(v) + apiObject.Protocol = awstypes.VerifiedAccessEndpointProtocol(v) } return apiObject } -func expandVerifiedAccessSSESpecificationRequest(tfMap map[string]any) *types.VerifiedAccessSseSpecificationRequest { +func expandVerifiedAccessSSESpecificationRequest(tfMap map[string]any) *awstypes.VerifiedAccessSseSpecificationRequest { if tfMap == nil { return nil } - apiObject := &types.VerifiedAccessSseSpecificationRequest{} + apiObject := &awstypes.VerifiedAccessSseSpecificationRequest{} if v, ok := tfMap["customer_managed_key_enabled"].(bool); ok { apiObject.CustomerManagedKeyEnabled = aws.Bool(v) diff --git a/internal/service/ec2/verifiedaccess_endpoint_test.go b/internal/service/ec2/verifiedaccess_endpoint_test.go index e77ba1b7de49..a170b3d53109 100644 --- a/internal/service/ec2/verifiedaccess_endpoint_test.go +++ b/internal/service/ec2/verifiedaccess_endpoint_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func testAccVerifiedAccessEndpoint_basic(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -73,7 +73,7 @@ func testAccVerifiedAccessEndpoint_basic(t *testing.T, semaphore tfsync.Semaphor func testAccVerifiedAccessEndpoint_networkInterface(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -120,7 +120,7 @@ func testAccVerifiedAccessEndpoint_networkInterface(t *testing.T, semaphore tfsy func testAccVerifiedAccessEndpoint_tags(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -175,7 +175,7 @@ func testAccVerifiedAccessEndpoint_tags(t *testing.T, semaphore tfsync.Semaphore func testAccVerifiedAccessEndpoint_disappears(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -205,7 +205,7 @@ func testAccVerifiedAccessEndpoint_disappears(t *testing.T, semaphore tfsync.Sem func testAccVerifiedAccessEndpoint_policyDocument(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -257,7 +257,7 @@ func testAccVerifiedAccessEndpoint_policyDocument(t *testing.T, semaphore tfsync // Ref: https://github.com/hashicorp/terraform-provider-aws/issues/39186 func testAccVerifiedAccessEndpoint_subnetIDs(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -303,7 +303,7 @@ func testAccVerifiedAccessEndpoint_subnetIDs(t *testing.T, semaphore tfsync.Sema func testAccVerifiedAccessEndpoint_cidr(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -349,7 +349,7 @@ func testAccVerifiedAccessEndpoint_cidr(t *testing.T, semaphore tfsync.Semaphore func testAccVerifiedAccessEndpoint_rds(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -395,7 +395,7 @@ func testAccVerifiedAccessEndpoint_rds(t *testing.T, semaphore tfsync.Semaphore) func testAccVerifiedAccessEndpoint_portRangeTCP(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -432,7 +432,7 @@ func testAccVerifiedAccessEndpoint_portRangeTCP(t *testing.T, semaphore tfsync.S func testAccVerifiedAccessEndpoint_portTCP(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -469,7 +469,7 @@ func testAccVerifiedAccessEndpoint_portTCP(t *testing.T, semaphore tfsync.Semaph func testAccVerifiedAccessEndpoint_portHTTP(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) key := acctest.TLSRSAPrivateKeyPEM(t, 2048) @@ -509,7 +509,7 @@ func testAccVerifiedAccessEndpoint_portHTTP(t *testing.T, semaphore tfsync.Semap func testAccVerifiedAccessEndpoint_portHTTPS(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessEndpoint + var v awstypes.VerifiedAccessEndpoint resourceName := "aws_verifiedaccess_endpoint.test" key := acctest.TLSRSAPrivateKeyPEM(t, 2048) cert := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") @@ -573,7 +573,7 @@ func testAccCheckVerifiedAccessEndpointDestroy(ctx context.Context) resource.Tes } } -func testAccCheckVerifiedAccessEndpointExists(ctx context.Context, n string, v *types.VerifiedAccessEndpoint) resource.TestCheckFunc { +func testAccCheckVerifiedAccessEndpointExists(ctx context.Context, n string, v *awstypes.VerifiedAccessEndpoint) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/verifiedaccess_group.go b/internal/service/ec2/verifiedaccess_group.go index af4c55d1c45c..0a93eabd8487 100644 --- a/internal/service/ec2/verifiedaccess_group.go +++ b/internal/service/ec2/verifiedaccess_group.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -105,7 +105,7 @@ func resourceVerifiedAccessGroupCreate(ctx context.Context, d *schema.ResourceDa input := &ec2.CreateVerifiedAccessGroupInput{ ClientToken: aws.String(id.UniqueId()), - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeVerifiedAccessGroup), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeVerifiedAccessGroup), VerifiedAccessInstanceId: aws.String(d.Get("verifiedaccess_instance_id").(string)), } @@ -257,12 +257,12 @@ func resourceVerifiedAccessGroupDelete(ctx context.Context, d *schema.ResourceDa return diags } -func expandVerifiedAccessSseSpecificationRequest(tfMap map[string]any) *types.VerifiedAccessSseSpecificationRequest { +func expandVerifiedAccessSseSpecificationRequest(tfMap map[string]any) *awstypes.VerifiedAccessSseSpecificationRequest { if tfMap == nil { return nil } - apiObject := &types.VerifiedAccessSseSpecificationRequest{} + apiObject := &awstypes.VerifiedAccessSseSpecificationRequest{} if v, ok := tfMap[names.AttrKMSKeyARN].(string); ok && v != "" { apiObject.KmsKeyArn = aws.String(v) @@ -275,7 +275,7 @@ func expandVerifiedAccessSseSpecificationRequest(tfMap map[string]any) *types.Ve return apiObject } -func flattenVerifiedAccessSseSpecificationResponse(apiObject *types.VerifiedAccessSseSpecificationResponse) []any { +func flattenVerifiedAccessSseSpecificationResponse(apiObject *awstypes.VerifiedAccessSseSpecificationResponse) []any { if apiObject == nil { return nil } diff --git a/internal/service/ec2/verifiedaccess_group_test.go b/internal/service/ec2/verifiedaccess_group_test.go index 6add55bad01c..738cf2e00608 100644 --- a/internal/service/ec2/verifiedaccess_group_test.go +++ b/internal/service/ec2/verifiedaccess_group_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func testAccVerifiedAccessGroup_basic(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -65,7 +65,7 @@ func testAccVerifiedAccessGroup_basic(t *testing.T, semaphore tfsync.Semaphore) func testAccVerifiedAccessGroup_kms(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) policyDoc := "permit(principal, action, resource) \nwhen {\ncontext.http_request.method == \"GET\"\n};" @@ -101,7 +101,7 @@ func testAccVerifiedAccessGroup_kms(t *testing.T, semaphore tfsync.Semaphore) { func testAccVerifiedAccessGroup_updateKMS(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) policyDoc := "permit(principal, action, resource) \nwhen {\ncontext.http_request.method == \"GET\"\n};" @@ -161,7 +161,7 @@ func testAccVerifiedAccessGroup_updateKMS(t *testing.T, semaphore tfsync.Semapho func testAccVerifiedAccessGroup_disappears(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -189,7 +189,7 @@ func testAccVerifiedAccessGroup_disappears(t *testing.T, semaphore tfsync.Semaph func testAccVerifiedAccessGroup_tags(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -240,7 +240,7 @@ func testAccVerifiedAccessGroup_tags(t *testing.T, semaphore tfsync.Semaphore) { func testAccVerifiedAccessGroup_policy(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) description := sdkacctest.RandString(100) @@ -276,7 +276,7 @@ func testAccVerifiedAccessGroup_policy(t *testing.T, semaphore tfsync.Semaphore) func testAccVerifiedAccessGroup_updatePolicy(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) description := sdkacctest.RandString(100) @@ -326,7 +326,7 @@ func testAccVerifiedAccessGroup_updatePolicy(t *testing.T, semaphore tfsync.Sema } func testAccVerifiedAccessGroup_setPolicy(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessGroup + var v awstypes.VerifiedAccessGroup resourceName := "aws_verifiedaccess_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) description := sdkacctest.RandString(100) @@ -382,7 +382,7 @@ func testAccVerifiedAccessGroup_setPolicy(t *testing.T, semaphore tfsync.Semapho }) } -func testAccCheckVerifiedAccessGroupExists(ctx context.Context, n string, v *types.VerifiedAccessGroup) resource.TestCheckFunc { +func testAccCheckVerifiedAccessGroupExists(ctx context.Context, n string, v *awstypes.VerifiedAccessGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/verifiedaccess_instance.go b/internal/service/ec2/verifiedaccess_instance.go index 84c706d75825..e12732025b81 100644 --- a/internal/service/ec2/verifiedaccess_instance.go +++ b/internal/service/ec2/verifiedaccess_instance.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -102,7 +102,7 @@ func resourceVerifiedAccessInstanceCreate(ctx context.Context, d *schema.Resourc input := ec2.CreateVerifiedAccessInstanceInput{ ClientToken: aws.String(sdkid.UniqueId()), - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeVerifiedAccessInstance), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeVerifiedAccessInstance), } if v, ok := d.GetOk("cidr_endpoints_custom_subdomain"); ok { @@ -215,7 +215,7 @@ func resourceVerifiedAccessInstanceDelete(ctx context.Context, d *schema.Resourc return diags } -func flattenVerifiedAccessTrustProviders(apiObjects []types.VerifiedAccessTrustProviderCondensed) []any { +func flattenVerifiedAccessTrustProviders(apiObjects []awstypes.VerifiedAccessTrustProviderCondensed) []any { if len(apiObjects) == 0 { return nil } @@ -233,7 +233,7 @@ func flattenVerifiedAccessTrustProviders(apiObjects []types.VerifiedAccessTrustP return tfList } -func flattenVerifiedAccessTrustProvider(apiObject types.VerifiedAccessTrustProviderCondensed) map[string]any { +func flattenVerifiedAccessTrustProvider(apiObject awstypes.VerifiedAccessTrustProviderCondensed) map[string]any { tfMap := map[string]any{ "device_trust_provider_type": apiObject.DeviceTrustProviderType, "trust_provider_type": apiObject.TrustProviderType, diff --git a/internal/service/ec2/verifiedaccess_instance_logging_configuration.go b/internal/service/ec2/verifiedaccess_instance_logging_configuration.go index e72f598a80e7..f07fdf714b5e 100644 --- a/internal/service/ec2/verifiedaccess_instance_logging_configuration.go +++ b/internal/service/ec2/verifiedaccess_instance_logging_configuration.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -223,14 +223,14 @@ func resourceVerifiedAccessInstanceLoggingConfigurationDelete(ctx context.Contex vaiID := d.Id() // create structure for reset - resetObject := &types.VerifiedAccessLogOptions{ - CloudWatchLogs: &types.VerifiedAccessLogCloudWatchLogsDestinationOptions{ + resetObject := &awstypes.VerifiedAccessLogOptions{ + CloudWatchLogs: &awstypes.VerifiedAccessLogCloudWatchLogsDestinationOptions{ Enabled: aws.Bool(false), }, - KinesisDataFirehose: &types.VerifiedAccessLogKinesisDataFirehoseDestinationOptions{ + KinesisDataFirehose: &awstypes.VerifiedAccessLogKinesisDataFirehoseDestinationOptions{ Enabled: aws.Bool(false), }, - S3: &types.VerifiedAccessLogS3DestinationOptions{ + S3: &awstypes.VerifiedAccessLogS3DestinationOptions{ Enabled: aws.Bool(false), }, IncludeTrustContext: aws.Bool(false), @@ -265,7 +265,7 @@ func resourceVerifiedAccessInstanceLoggingConfigurationDelete(ctx context.Contex return diags } -func expandVerifiedAccessInstanceAccessLogs(accessLogs []any) *types.VerifiedAccessLogOptions { +func expandVerifiedAccessInstanceAccessLogs(accessLogs []any) *awstypes.VerifiedAccessLogOptions { if len(accessLogs) == 0 || accessLogs[0] == nil { return nil } @@ -275,7 +275,7 @@ func expandVerifiedAccessInstanceAccessLogs(accessLogs []any) *types.VerifiedAcc return nil } - result := &types.VerifiedAccessLogOptions{} + result := &awstypes.VerifiedAccessLogOptions{} if v, ok := tfMap[names.AttrCloudWatchLogs].([]any); ok && len(v) > 0 { result.CloudWatchLogs = expandVerifiedAccessLogCloudWatchLogs(v) @@ -300,7 +300,7 @@ func expandVerifiedAccessInstanceAccessLogs(accessLogs []any) *types.VerifiedAcc return result } -func expandVerifiedAccessLogCloudWatchLogs(cloudWatchLogs []any) *types.VerifiedAccessLogCloudWatchLogsDestinationOptions { +func expandVerifiedAccessLogCloudWatchLogs(cloudWatchLogs []any) *awstypes.VerifiedAccessLogCloudWatchLogsDestinationOptions { if len(cloudWatchLogs) == 0 || cloudWatchLogs[0] == nil { return nil } @@ -310,7 +310,7 @@ func expandVerifiedAccessLogCloudWatchLogs(cloudWatchLogs []any) *types.Verified return nil } - result := &types.VerifiedAccessLogCloudWatchLogsDestinationOptions{ + result := &awstypes.VerifiedAccessLogCloudWatchLogsDestinationOptions{ Enabled: aws.Bool(tfMap[names.AttrEnabled].(bool)), } @@ -321,7 +321,7 @@ func expandVerifiedAccessLogCloudWatchLogs(cloudWatchLogs []any) *types.Verified return result } -func expandVerifiedAccessLogKinesisDataFirehose(kinesisDataFirehose []any) *types.VerifiedAccessLogKinesisDataFirehoseDestinationOptions { +func expandVerifiedAccessLogKinesisDataFirehose(kinesisDataFirehose []any) *awstypes.VerifiedAccessLogKinesisDataFirehoseDestinationOptions { if len(kinesisDataFirehose) == 0 || kinesisDataFirehose[0] == nil { return nil } @@ -331,7 +331,7 @@ func expandVerifiedAccessLogKinesisDataFirehose(kinesisDataFirehose []any) *type return nil } - result := &types.VerifiedAccessLogKinesisDataFirehoseDestinationOptions{ + result := &awstypes.VerifiedAccessLogKinesisDataFirehoseDestinationOptions{ Enabled: aws.Bool(tfMap[names.AttrEnabled].(bool)), } @@ -342,7 +342,7 @@ func expandVerifiedAccessLogKinesisDataFirehose(kinesisDataFirehose []any) *type return result } -func expandVerifiedAccessLogS3(s3 []any) *types.VerifiedAccessLogS3DestinationOptions { +func expandVerifiedAccessLogS3(s3 []any) *awstypes.VerifiedAccessLogS3DestinationOptions { if len(s3) == 0 || s3[0] == nil { return nil } @@ -352,7 +352,7 @@ func expandVerifiedAccessLogS3(s3 []any) *types.VerifiedAccessLogS3DestinationOp return nil } - result := &types.VerifiedAccessLogS3DestinationOptions{ + result := &awstypes.VerifiedAccessLogS3DestinationOptions{ Enabled: aws.Bool(tfMap[names.AttrEnabled].(bool)), } @@ -375,7 +375,7 @@ func expandVerifiedAccessLogS3(s3 []any) *types.VerifiedAccessLogS3DestinationOp return result } -func flattenVerifiedAccessInstanceAccessLogs(apiObject *types.VerifiedAccessLogs) []any { +func flattenVerifiedAccessInstanceAccessLogs(apiObject *awstypes.VerifiedAccessLogs) []any { tfMap := map[string]any{} if v := apiObject.CloudWatchLogs; v != nil { @@ -401,7 +401,7 @@ func flattenVerifiedAccessInstanceAccessLogs(apiObject *types.VerifiedAccessLogs return []any{tfMap} } -func flattenVerifiedAccessLogCloudWatchLogs(apiObject *types.VerifiedAccessLogCloudWatchLogsDestination) []any { +func flattenVerifiedAccessLogCloudWatchLogs(apiObject *awstypes.VerifiedAccessLogCloudWatchLogsDestination) []any { tfMap := map[string]any{ names.AttrEnabled: apiObject.Enabled, } @@ -413,7 +413,7 @@ func flattenVerifiedAccessLogCloudWatchLogs(apiObject *types.VerifiedAccessLogCl return []any{tfMap} } -func flattenVerifiedAccessLogKinesisDataFirehose(apiObject *types.VerifiedAccessLogKinesisDataFirehoseDestination) []any { +func flattenVerifiedAccessLogKinesisDataFirehose(apiObject *awstypes.VerifiedAccessLogKinesisDataFirehoseDestination) []any { tfMap := map[string]any{ names.AttrEnabled: apiObject.Enabled, } @@ -425,7 +425,7 @@ func flattenVerifiedAccessLogKinesisDataFirehose(apiObject *types.VerifiedAccess return []any{tfMap} } -func flattenVerifiedAccessLogS3(apiObject *types.VerifiedAccessLogS3Destination) []any { +func flattenVerifiedAccessLogS3(apiObject *awstypes.VerifiedAccessLogS3Destination) []any { tfMap := map[string]any{ names.AttrEnabled: apiObject.Enabled, } diff --git a/internal/service/ec2/verifiedaccess_instance_logging_configuration_test.go b/internal/service/ec2/verifiedaccess_instance_logging_configuration_test.go index 5047831de909..5fc732f42c46 100644 --- a/internal/service/ec2/verifiedaccess_instance_logging_configuration_test.go +++ b/internal/service/ec2/verifiedaccess_instance_logging_configuration_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,7 +24,7 @@ import ( func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsIncludeTrustContext(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance_logging_configuration.test" instanceResourceName := "aws_verifiedaccess_instance.test" include_trust_context_original := true @@ -70,7 +70,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsIncludeTrustCon func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsLogVersion(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance_logging_configuration.test" instanceResourceName := "aws_verifiedaccess_instance.test" log_version_original := "ocsf-0.1" @@ -116,7 +116,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsLogVersion(t *t func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsCloudWatchLogs(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance_logging_configuration.test" instanceResourceName := "aws_verifiedaccess_instance.test" logGroupName := "aws_cloudwatch_log_group.test" @@ -166,7 +166,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsCloudWatchLogs( func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsKinesisDataFirehose(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance_logging_configuration.test" instanceResourceName := "aws_verifiedaccess_instance.test" kinesisStreamName := "aws_kinesis_firehose_delivery_stream.test" @@ -219,7 +219,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsKinesisDataFire func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsS3(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance_logging_configuration.test" instanceResourceName := "aws_verifiedaccess_instance.test" bucketName := "aws_s3_bucket.test" @@ -277,7 +277,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsS3(t *testing.T func testAccVerifiedAccessInstanceLoggingConfiguration_accessLogsCloudWatchLogsKinesisDataFirehoseS3(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance_logging_configuration.test" instanceResourceName := "aws_verifiedaccess_instance.test" logGroupName := "aws_cloudwatch_log_group.test" @@ -380,7 +380,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_disappears(t *testing.T, // note: disappears test does not test the logging configuration since the instance is deleted // the logging configuration cannot be deleted, rather, the boolean flags and logging version are reset to the default values ctx := acctest.Context(t) - var v types.VerifiedAccessInstanceLoggingConfiguration + var v awstypes.VerifiedAccessInstanceLoggingConfiguration resourceName := "aws_verifiedaccess_instance.test" resource.ParallelTest(t, resource.TestCase{ @@ -405,7 +405,7 @@ func testAccVerifiedAccessInstanceLoggingConfiguration_disappears(t *testing.T, }) } -func testAccCheckVerifiedAccessInstanceLoggingConfigurationExists(ctx context.Context, n string, v *types.VerifiedAccessInstanceLoggingConfiguration) resource.TestCheckFunc { +func testAccCheckVerifiedAccessInstanceLoggingConfigurationExists(ctx context.Context, n string, v *awstypes.VerifiedAccessInstanceLoggingConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/verifiedaccess_instance_test.go b/internal/service/ec2/verifiedaccess_instance_test.go index 6dcc9a93311b..58d36dce4ba7 100644 --- a/internal/service/ec2/verifiedaccess_instance_test.go +++ b/internal/service/ec2/verifiedaccess_instance_test.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -24,7 +24,7 @@ import ( func testAccVerifiedAccessInstance_basic(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstance + var v awstypes.VerifiedAccessInstance resourceName := "aws_verifiedaccess_instance.test" resource.ParallelTest(t, resource.TestCase{ @@ -58,7 +58,7 @@ func testAccVerifiedAccessInstance_basic(t *testing.T, semaphore tfsync.Semaphor func testAccVerifiedAccessInstance_description(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v1, v2 types.VerifiedAccessInstance + var v1, v2 awstypes.VerifiedAccessInstance resourceName := "aws_verifiedaccess_instance.test" originalDescription := "original description" updatedDescription := "updated description" @@ -100,7 +100,7 @@ func testAccVerifiedAccessInstance_description(t *testing.T, semaphore tfsync.Se func testAccVerifiedAccessInstance_fipsEnabled(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v1, v2 types.VerifiedAccessInstance + var v1, v2 awstypes.VerifiedAccessInstance resourceName := "aws_verifiedaccess_instance.test" originalFipsEnabled := true updatedFipsEnabled := false @@ -142,7 +142,7 @@ func testAccVerifiedAccessInstance_fipsEnabled(t *testing.T, semaphore tfsync.Se func testAccVerifiedAccessInstance_disappears(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v types.VerifiedAccessInstance + var v awstypes.VerifiedAccessInstance resourceName := "aws_verifiedaccess_instance.test" resource.ParallelTest(t, resource.TestCase{ @@ -169,7 +169,7 @@ func testAccVerifiedAccessInstance_disappears(t *testing.T, semaphore tfsync.Sem func testAccVerifiedAccessInstance_tags(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v1, v2, v3 types.VerifiedAccessInstance + var v1, v2, v3 awstypes.VerifiedAccessInstance resourceName := "aws_verifiedaccess_instance.test" resource.ParallelTest(t, resource.TestCase{ @@ -221,7 +221,7 @@ func testAccVerifiedAccessInstance_tags(t *testing.T, semaphore tfsync.Semaphore func testAccVerifiedAccessInstance_cidrEndpointsCustomSubDomain(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) - var v1 types.VerifiedAccessInstance + var v1 awstypes.VerifiedAccessInstance resourceName := "aws_verifiedaccess_instance.test" subDomainName := "test.demo.com" rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -252,7 +252,7 @@ func testAccVerifiedAccessInstance_cidrEndpointsCustomSubDomain(t *testing.T, se }) } -func testAccCheckVerifiedAccessInstanceNotRecreated(before, after *types.VerifiedAccessInstance) resource.TestCheckFunc { +func testAccCheckVerifiedAccessInstanceNotRecreated(before, after *awstypes.VerifiedAccessInstance) resource.TestCheckFunc { return func(s *terraform.State) error { if before, after := aws.ToString(before.VerifiedAccessInstanceId), aws.ToString(after.VerifiedAccessInstanceId); before != after { return fmt.Errorf("Verified Access Instance (%s/%s) recreated", before, after) @@ -262,7 +262,7 @@ func testAccCheckVerifiedAccessInstanceNotRecreated(before, after *types.Verifie } } -func testAccCheckVerifiedAccessInstanceRecreated(before, after *types.VerifiedAccessInstance) resource.TestCheckFunc { +func testAccCheckVerifiedAccessInstanceRecreated(before, after *awstypes.VerifiedAccessInstance) resource.TestCheckFunc { return func(s *terraform.State) error { if before, after := aws.ToString(before.VerifiedAccessInstanceId), aws.ToString(after.VerifiedAccessInstanceId); before == after { return fmt.Errorf("Verified Access Instance (%s) not recreated", before) @@ -272,7 +272,7 @@ func testAccCheckVerifiedAccessInstanceRecreated(before, after *types.VerifiedAc } } -func testAccCheckVerifiedAccessInstanceExists(ctx context.Context, n string, v *types.VerifiedAccessInstance) resource.TestCheckFunc { +func testAccCheckVerifiedAccessInstanceExists(ctx context.Context, n string, v *awstypes.VerifiedAccessInstance) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/verifiedaccess_trust_provider.go b/internal/service/ec2/verifiedaccess_trust_provider.go index 613da5d3f236..d92b905c7657 100644 --- a/internal/service/ec2/verifiedaccess_trust_provider.go +++ b/internal/service/ec2/verifiedaccess_trust_provider.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -68,7 +68,7 @@ func resourceVerifiedAccessTrustProvider() *schema.Resource { Type: schema.TypeString, ForceNew: true, Optional: true, - ValidateDiagFunc: enum.Validate[types.DeviceTrustProviderType](), + ValidateDiagFunc: enum.Validate[awstypes.DeviceTrustProviderType](), }, "native_application_oidc_options": { Type: schema.TypeList, @@ -200,13 +200,13 @@ func resourceVerifiedAccessTrustProvider() *schema.Resource { Type: schema.TypeString, ForceNew: true, Required: true, - ValidateDiagFunc: enum.Validate[types.TrustProviderType](), + ValidateDiagFunc: enum.Validate[awstypes.TrustProviderType](), }, "user_trust_provider_type": { Type: schema.TypeString, ForceNew: true, Optional: true, - ValidateDiagFunc: enum.Validate[types.UserTrustProviderType](), + ValidateDiagFunc: enum.Validate[awstypes.UserTrustProviderType](), }, }, } @@ -219,8 +219,8 @@ func resourceVerifiedAccessTrustProviderCreate(ctx context.Context, d *schema.Re input := ec2.CreateVerifiedAccessTrustProviderInput{ ClientToken: aws.String(sdkid.UniqueId()), PolicyReferenceName: aws.String(d.Get("policy_reference_name").(string)), - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeVerifiedAccessTrustProvider), - TrustProviderType: types.TrustProviderType(d.Get("trust_provider_type").(string)), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeVerifiedAccessTrustProvider), + TrustProviderType: awstypes.TrustProviderType(d.Get("trust_provider_type").(string)), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -232,7 +232,7 @@ func resourceVerifiedAccessTrustProviderCreate(ctx context.Context, d *schema.Re } if v, ok := d.GetOk("device_trust_provider_type"); ok { - input.DeviceTrustProviderType = types.DeviceTrustProviderType(v.(string)) + input.DeviceTrustProviderType = awstypes.DeviceTrustProviderType(v.(string)) } if v, ok := d.GetOk("native_application_oidc_options"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { @@ -248,7 +248,7 @@ func resourceVerifiedAccessTrustProviderCreate(ctx context.Context, d *schema.Re } if v, ok := d.GetOk("user_trust_provider_type"); ok { - input.UserTrustProviderType = types.UserTrustProviderType(v.(string)) + input.UserTrustProviderType = awstypes.UserTrustProviderType(v.(string)) } output, err := conn.CreateVerifiedAccessTrustProvider(ctx, &input) @@ -372,7 +372,7 @@ func resourceVerifiedAccessTrustProviderDelete(ctx context.Context, d *schema.Re return diags } -func flattenDeviceOptions(apiObject *types.DeviceOptions) []any { +func flattenDeviceOptions(apiObject *awstypes.DeviceOptions) []any { if apiObject == nil { return nil } @@ -386,7 +386,7 @@ func flattenDeviceOptions(apiObject *types.DeviceOptions) []any { return []any{tfMap} } -func flattenNativeApplicationOIDCOptions(apiObject *types.NativeApplicationOidcOptions, clientSecret string) []any { +func flattenNativeApplicationOIDCOptions(apiObject *awstypes.NativeApplicationOidcOptions, clientSecret string) []any { if apiObject == nil { return nil } @@ -426,7 +426,7 @@ func flattenNativeApplicationOIDCOptions(apiObject *types.NativeApplicationOidcO return []any{tfMap} } -func flattenOIDCOptions(apiObject *types.OidcOptions, clientSecret string) []any { +func flattenOIDCOptions(apiObject *awstypes.OidcOptions, clientSecret string) []any { if apiObject == nil { return nil } @@ -462,12 +462,12 @@ func flattenOIDCOptions(apiObject *types.OidcOptions, clientSecret string) []any return []any{tfMap} } -func expandCreateVerifiedAccessTrustProviderDeviceOptions(tfMap map[string]any) *types.CreateVerifiedAccessTrustProviderDeviceOptions { +func expandCreateVerifiedAccessTrustProviderDeviceOptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessTrustProviderDeviceOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessTrustProviderDeviceOptions{} + apiObject := &awstypes.CreateVerifiedAccessTrustProviderDeviceOptions{} if v, ok := tfMap["tenant_id"].(string); ok && v != "" { apiObject.TenantId = aws.String(v) @@ -476,12 +476,12 @@ func expandCreateVerifiedAccessTrustProviderDeviceOptions(tfMap map[string]any) return apiObject } -func expandCreateVerifiedAccessTrustProviderOIDCOptions(tfMap map[string]any) *types.CreateVerifiedAccessTrustProviderOidcOptions { +func expandCreateVerifiedAccessTrustProviderOIDCOptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessTrustProviderOidcOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessTrustProviderOidcOptions{} + apiObject := &awstypes.CreateVerifiedAccessTrustProviderOidcOptions{} if v, ok := tfMap["authorization_endpoint"].(string); ok && v != "" { apiObject.AuthorizationEndpoint = aws.String(v) @@ -514,12 +514,12 @@ func expandCreateVerifiedAccessTrustProviderOIDCOptions(tfMap map[string]any) *t return apiObject } -func expandCreateVerifiedAccessTrustProviderNativeApplicationOIDCOptions(tfMap map[string]any) *types.CreateVerifiedAccessNativeApplicationOidcOptions { +func expandCreateVerifiedAccessTrustProviderNativeApplicationOIDCOptions(tfMap map[string]any) *awstypes.CreateVerifiedAccessNativeApplicationOidcOptions { if tfMap == nil { return nil } - apiObject := &types.CreateVerifiedAccessNativeApplicationOidcOptions{} + apiObject := &awstypes.CreateVerifiedAccessNativeApplicationOidcOptions{} if v, ok := tfMap["authorization_endpoint"].(string); ok && v != "" { apiObject.AuthorizationEndpoint = aws.String(v) @@ -556,12 +556,12 @@ func expandCreateVerifiedAccessTrustProviderNativeApplicationOIDCOptions(tfMap m return apiObject } -func expandModifyVerifiedAccessTrustProviderNativeApplicationOIDCOptions(tfMap map[string]any) *types.ModifyVerifiedAccessNativeApplicationOidcOptions { +func expandModifyVerifiedAccessTrustProviderNativeApplicationOIDCOptions(tfMap map[string]any) *awstypes.ModifyVerifiedAccessNativeApplicationOidcOptions { if tfMap == nil { return nil } - apiObject := &types.ModifyVerifiedAccessNativeApplicationOidcOptions{} + apiObject := &awstypes.ModifyVerifiedAccessNativeApplicationOidcOptions{} if v, ok := tfMap[names.AttrScope].(string); ok && v != "" { apiObject.Scope = aws.String(v) @@ -570,12 +570,12 @@ func expandModifyVerifiedAccessTrustProviderNativeApplicationOIDCOptions(tfMap m return apiObject } -func expandModifyVerifiedAccessTrustProviderOIDCOptions(tfMap map[string]any) *types.ModifyVerifiedAccessTrustProviderOidcOptions { +func expandModifyVerifiedAccessTrustProviderOIDCOptions(tfMap map[string]any) *awstypes.ModifyVerifiedAccessTrustProviderOidcOptions { if tfMap == nil { return nil } - apiObject := &types.ModifyVerifiedAccessTrustProviderOidcOptions{} + apiObject := &awstypes.ModifyVerifiedAccessTrustProviderOidcOptions{} if v, ok := tfMap[names.AttrScope].(string); ok && v != "" { apiObject.Scope = aws.String(v) diff --git a/internal/service/ec2/verifiedaccess_trust_provider_test.go b/internal/service/ec2/verifiedaccess_trust_provider_test.go index 7c3b42af458e..3eadd9f2646a 100644 --- a/internal/service/ec2/verifiedaccess_trust_provider_test.go +++ b/internal/service/ec2/verifiedaccess_trust_provider_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccVerifiedAccessTrustProvider_basic(t *testing.T) { ctx := acctest.Context(t) - var v types.VerifiedAccessTrustProvider + var v awstypes.VerifiedAccessTrustProvider resourceName := "aws_verifiedaccess_trust_provider.test" trustProviderType := "user" @@ -62,7 +62,7 @@ func TestAccVerifiedAccessTrustProvider_basic(t *testing.T) { func TestAccVerifiedAccessTrustProvider_deviceOptions(t *testing.T) { ctx := acctest.Context(t) - var v types.VerifiedAccessTrustProvider + var v awstypes.VerifiedAccessTrustProvider resourceName := "aws_verifiedaccess_trust_provider.test" trustProviderType := "device" @@ -101,7 +101,7 @@ func TestAccVerifiedAccessTrustProvider_deviceOptions(t *testing.T) { func TestAccVerifiedAccessTrustProvider_disappears(t *testing.T) { ctx := acctest.Context(t) - var v types.VerifiedAccessTrustProvider + var v awstypes.VerifiedAccessTrustProvider resourceName := "aws_verifiedaccess_trust_provider.test" trustProviderType := "user" @@ -133,7 +133,7 @@ func TestAccVerifiedAccessTrustProvider_disappears(t *testing.T) { func TestAccVerifiedAccessTrustProvider_oidcOptions(t *testing.T) { ctx := acctest.Context(t) - var v types.VerifiedAccessTrustProvider + var v awstypes.VerifiedAccessTrustProvider resourceName := "aws_verifiedaccess_trust_provider.test" trustProviderType := "user" @@ -184,7 +184,7 @@ func TestAccVerifiedAccessTrustProvider_oidcOptions(t *testing.T) { func TestAccVerifiedAccessTrustProvider_tags(t *testing.T) { ctx := acctest.Context(t) - var v types.VerifiedAccessTrustProvider + var v awstypes.VerifiedAccessTrustProvider resourceName := "aws_verifiedaccess_trust_provider.test" trustProviderType := "user" @@ -236,7 +236,7 @@ func TestAccVerifiedAccessTrustProvider_tags(t *testing.T) { }) } -func testAccCheckVerifiedAccessTrustProviderExists(ctx context.Context, n string, v *types.VerifiedAccessTrustProvider) resource.TestCheckFunc { +func testAccCheckVerifiedAccessTrustProviderExists(ctx context.Context, n string, v *awstypes.VerifiedAccessTrustProvider) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/vpc_.go b/internal/service/ec2/vpc_.go index 5abecdc1b341..03674b08cde4 100644 --- a/internal/service/ec2/vpc_.go +++ b/internal/service/ec2/vpc_.go @@ -12,22 +12,36 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + fdiag "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" + "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws" + "go.opentelemetry.io/otel/attribute" ) const ( @@ -50,8 +64,11 @@ var ( // @SDKResource("aws_vpc", name="VPC") // @Tags(identifierAttribute="id") +// @IdentityAttribute("id") +// @CustomImport // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.Vpc") // @Testing(generator=false) +// @Testing(preIdentityVersion="v6.15.0") func resourceVPC() *schema.Resource { //lintignore:R011 return &schema.Resource{ @@ -123,8 +140,8 @@ func resourceVPC() *schema.Resource { "instance_tenancy": { Type: schema.TypeString, Optional: true, - Default: types.TenancyDefault, - ValidateFunc: validation.StringInSlice(enum.Slice(types.TenancyDefault, types.TenancyDedicated), false), + Default: awstypes.TenancyDefault, + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.TenancyDefault, awstypes.TenancyDedicated), false), }, "ipv4_ipam_pool_id": { Type: schema.TypeString, @@ -183,14 +200,22 @@ func resourceVPC() *schema.Resource { } } +// @SDKListResource("aws_vpc") +func vpcResourceAsListResource() inttypes.ListResourceForSDK { + l := vpcListResource{} + l.SetResourceSchema(resourceVPC()) + + return &l +} + func resourceVPCCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.CreateVpcInput{ AmazonProvidedIpv6CidrBlock: aws.Bool(d.Get("assign_generated_ipv6_cidr_block").(bool)), - InstanceTenancy: types.Tenancy(d.Get("instance_tenancy").(string)), - TagSpecifications: getTagSpecificationsIn(ctx, types.ResourceTypeVpc), + InstanceTenancy: awstypes.Tenancy(d.Get("instance_tenancy").(string)), + TagSpecifications: getTagSpecificationsIn(ctx, awstypes.ResourceTypeVpc), } if v, ok := d.GetOk(names.AttrCIDRBlock); ok { @@ -222,7 +247,7 @@ func resourceVPCCreate(ctx context.Context, d *schema.ResourceData, meta any) di } // "UnsupportedOperation: The operation AllocateIpamPoolCidr is not supported. Account 123456789012 is not monitored by IPAM ipam-07b079e3392782a55." - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, ec2PropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateVpc(ctx, input) }, errCodeUnsupportedOperation, "is not monitored by IPAM") @@ -264,9 +289,10 @@ func resourceVPCCreate(ctx context.Context, d *schema.ResourceData, meta any) di func resourceVPCRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + vpc, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.Vpc, error) { return findVPCByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -280,109 +306,10 @@ func resourceVPCRead(ctx context.Context, d *schema.ResourceData, meta any) diag return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s): %s", d.Id(), err) } - vpc := outputRaw.(*types.Vpc) - - ownerID := aws.ToString(vpc.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("vpc/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) - d.Set(names.AttrCIDRBlock, vpc.CidrBlock) - d.Set("dhcp_options_id", vpc.DhcpOptionsId) - d.Set("instance_tenancy", vpc.InstanceTenancy) - d.Set(names.AttrOwnerID, ownerID) - - if v, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { - return findVPCAttribute(ctx, conn, d.Id(), types.VpcAttributeNameEnableDnsHostnames) - }, d.IsNewResource()); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), types.VpcAttributeNameEnableDnsHostnames, err) - } else { - d.Set("enable_dns_hostnames", v) - } - - if v, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { - return findVPCAttribute(ctx, conn, d.Id(), types.VpcAttributeNameEnableDnsSupport) - }, d.IsNewResource()); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), types.VpcAttributeNameEnableDnsSupport, err) - } else { - d.Set("enable_dns_support", v) - } - - if v, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { - return findVPCAttribute(ctx, conn, d.Id(), types.VpcAttributeNameEnableNetworkAddressUsageMetrics) - }, d.IsNewResource()); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), types.VpcAttributeNameEnableNetworkAddressUsageMetrics, err) - } else { - d.Set("enable_network_address_usage_metrics", v) - } - - if v, err := findVPCDefaultNetworkACL(ctx, conn, d.Id()); err != nil { - log.Printf("[WARN] Error reading EC2 VPC (%s) default NACL: %s", d.Id(), err) - } else { - d.Set("default_network_acl_id", v.NetworkAclId) - } - - if v, err := findVPCMainRouteTable(ctx, conn, d.Id()); err != nil { - log.Printf("[WARN] Error reading EC2 VPC (%s) main Route Table: %s", d.Id(), err) - d.Set("default_route_table_id", nil) - d.Set("main_route_table_id", nil) - } else { - d.Set("default_route_table_id", v.RouteTableId) - d.Set("main_route_table_id", v.RouteTableId) - } - - if v, err := findVPCDefaultSecurityGroup(ctx, conn, d.Id()); err != nil { - log.Printf("[WARN] Error reading EC2 VPC (%s) default Security Group: %s", d.Id(), err) - d.Set("default_security_group_id", nil) - } else { - d.Set("default_security_group_id", v.GroupId) - } - - if ipv6CIDRBlockAssociation := defaultIPv6CIDRBlockAssociation(vpc, d.Get("ipv6_association_id").(string)); ipv6CIDRBlockAssociation == nil { - d.Set("assign_generated_ipv6_cidr_block", nil) - d.Set("ipv6_association_id", nil) - d.Set("ipv6_cidr_block", nil) - d.Set("ipv6_cidr_block_network_border_group", nil) - d.Set("ipv6_ipam_pool_id", nil) - d.Set("ipv6_netmask_length", nil) - } else { - cidrBlock := aws.ToString(ipv6CIDRBlockAssociation.Ipv6CidrBlock) - ipv6PoolID := aws.ToString(ipv6CIDRBlockAssociation.Ipv6Pool) - isAmazonIPv6Pool := ipv6PoolID == amazonIPv6PoolID - d.Set("assign_generated_ipv6_cidr_block", isAmazonIPv6Pool) - d.Set("ipv6_association_id", ipv6CIDRBlockAssociation.AssociationId) - d.Set("ipv6_cidr_block", cidrBlock) - d.Set("ipv6_cidr_block_network_border_group", ipv6CIDRBlockAssociation.NetworkBorderGroup) - if isAmazonIPv6Pool { - d.Set("ipv6_ipam_pool_id", nil) - } else { - if ipv6PoolID == ipamManagedIPv6PoolID { - d.Set("ipv6_ipam_pool_id", d.Get("ipv6_ipam_pool_id")) - } else { - d.Set("ipv6_ipam_pool_id", ipv6PoolID) - } - } - d.Set("ipv6_netmask_length", nil) - if ipv6PoolID != "" && !isAmazonIPv6Pool { - parts := strings.Split(cidrBlock, "/") - if len(parts) == 2 { - if v, err := strconv.Atoi(parts[1]); err == nil { - d.Set("ipv6_netmask_length", v) - } else { - log.Printf("[WARN] Unable to parse CIDR (%s) netmask length: %s", cidrBlock, err) - } - } else { - log.Printf("[WARN] Invalid CIDR block format: %s", cidrBlock) - } - } + if err := resourceVPCFlatten(ctx, c, vpc, d); err != nil { + diags = sdkdiag.AppendFromErr(diags, err) } - setTagsOut(ctx, vpc.Tags) - return diags } @@ -458,7 +385,7 @@ func resourceVPCDelete(ctx context.Context, d *schema.ResourceData, meta any) di } log.Printf("[INFO] Deleting EC2 VPC: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteVpc(ctx, input) }, errCodeDependencyViolation) @@ -470,7 +397,7 @@ func resourceVPCDelete(ctx context.Context, d *schema.ResourceData, meta any) di return sdkdiag.AppendErrorf(diags, "deleting EC2 VPC (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return findVPCByID(ctx, conn, d.Id()) }) @@ -492,8 +419,8 @@ func resourceVPCDelete(ctx context.Context, d *schema.ResourceData, meta any) di const ( timeout = 35 * time.Minute // IPAM eventual consistency. It can take ~30 min to release allocations. ) - _, err := tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { - return findIPAMPoolAllocationsForVPC(ctx, conn, ipamPoolID, d.Id()) + _, err := tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { + return findIPAMPoolAllocationForResource(ctx, conn, ipamPoolID, d.Id()) }) if err != nil { @@ -505,23 +432,29 @@ func resourceVPCDelete(ctx context.Context, d *schema.ResourceData, meta any) di } func resourceVPCImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + identitySpec := importer.IdentitySpec(ctx) + if err := importer.RegionalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } + d.Set("assign_generated_ipv6_cidr_block", false) + return []*schema.ResourceData{d}, nil } func resourceVPCCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v any) error { if diff.HasChange("assign_generated_ipv6_cidr_block") { if err := diff.SetNewComputed("ipv6_association_id"); err != nil { - return fmt.Errorf("setting ipv6_association_id to computed: %s", err) + return fmt.Errorf("setting ipv6_association_id to computed: %w", err) } if err := diff.SetNewComputed("ipv6_cidr_block"); err != nil { - return fmt.Errorf("setting ipv6_cidr_block to computed: %s", err) + return fmt.Errorf("setting ipv6_cidr_block to computed: %w", err) } } if diff.HasChange("instance_tenancy") { old, new := diff.GetChange("instance_tenancy") - if old.(string) != string(types.TenancyDedicated) || new.(string) != string(types.TenancyDefault) { + if old.(string) != string(awstypes.TenancyDedicated) || new.(string) != string(awstypes.TenancyDefault) { diff.ForceNew("instance_tenancy") } } @@ -541,22 +474,23 @@ func resourceVPCCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v an // defaultIPv6CIDRBlockAssociation returns the "default" IPv6 CIDR block. // Try and find IPv6 CIDR block information, first by any stored association ID. // Then if no IPv6 CIDR block information is available, use the first associated IPv6 CIDR block. -func defaultIPv6CIDRBlockAssociation(vpc *types.Vpc, associationID string) *types.VpcIpv6CidrBlockAssociation { - var ipv6CIDRBlockAssociation types.VpcIpv6CidrBlockAssociation +func defaultIPv6CIDRBlockAssociation(vpc *awstypes.Vpc, associationID string) *awstypes.VpcIpv6CidrBlockAssociation { + var ipv6CIDRBlockAssociation awstypes.VpcIpv6CidrBlockAssociation if associationID != "" { for _, v := range vpc.Ipv6CidrBlockAssociationSet { - if state := v.Ipv6CidrBlockState.State; state == types.VpcCidrBlockStateCodeAssociated && aws.ToString(v.AssociationId) == associationID { + if state := v.Ipv6CidrBlockState.State; state == awstypes.VpcCidrBlockStateCodeAssociated && aws.ToString(v.AssociationId) == associationID { ipv6CIDRBlockAssociation = v break } } } - if ipv6CIDRBlockAssociation == (types.VpcIpv6CidrBlockAssociation{}) { + if ipv6CIDRBlockAssociation == (awstypes.VpcIpv6CidrBlockAssociation{}) { for _, v := range vpc.Ipv6CidrBlockAssociationSet { - if v.Ipv6CidrBlockState.State == types.VpcCidrBlockStateCodeAssociated { + if v.Ipv6CidrBlockState.State == awstypes.VpcCidrBlockStateCodeAssociated { ipv6CIDRBlockAssociation = v + break } } } @@ -565,7 +499,7 @@ func defaultIPv6CIDRBlockAssociation(vpc *types.Vpc, associationID string) *type } type vpcInfo struct { - vpc *types.Vpc + vpc *awstypes.Vpc enableDnsHostnames bool enableDnsSupport bool enableNetworkAddressUsageMetrics bool @@ -597,7 +531,7 @@ func modifyVPCAttributesOnCreate(ctx context.Context, conn *ec2.Client, d *schem func modifyVPCDNSHostnames(ctx context.Context, conn *ec2.Client, vpcID string, v bool) error { input := &ec2.ModifyVpcAttributeInput{ - EnableDnsHostnames: &types.AttributeBooleanValue{ + EnableDnsHostnames: &awstypes.AttributeBooleanValue{ Value: aws.Bool(v), }, VpcId: aws.String(vpcID), @@ -607,7 +541,7 @@ func modifyVPCDNSHostnames(ctx context.Context, conn *ec2.Client, vpcID string, return fmt.Errorf("modifying EnableDnsHostnames: %w", err) } - if _, err := waitVPCAttributeUpdated(ctx, conn, vpcID, types.VpcAttributeNameEnableDnsHostnames, v); err != nil { + if _, err := waitVPCAttributeUpdated(ctx, conn, vpcID, awstypes.VpcAttributeNameEnableDnsHostnames, v); err != nil { return fmt.Errorf("modifying EnableDnsHostnames: waiting for completion: %w", err) } @@ -616,7 +550,7 @@ func modifyVPCDNSHostnames(ctx context.Context, conn *ec2.Client, vpcID string, func modifyVPCDNSSupport(ctx context.Context, conn *ec2.Client, vpcID string, v bool) error { input := &ec2.ModifyVpcAttributeInput{ - EnableDnsSupport: &types.AttributeBooleanValue{ + EnableDnsSupport: &awstypes.AttributeBooleanValue{ Value: aws.Bool(v), }, VpcId: aws.String(vpcID), @@ -626,7 +560,7 @@ func modifyVPCDNSSupport(ctx context.Context, conn *ec2.Client, vpcID string, v return fmt.Errorf("modifying EnableDnsSupport: %w", err) } - if _, err := waitVPCAttributeUpdated(ctx, conn, vpcID, types.VpcAttributeNameEnableDnsSupport, v); err != nil { + if _, err := waitVPCAttributeUpdated(ctx, conn, vpcID, awstypes.VpcAttributeNameEnableDnsSupport, v); err != nil { return fmt.Errorf("modifying EnableDnsSupport: waiting for completion: %w", err) } @@ -635,7 +569,7 @@ func modifyVPCDNSSupport(ctx context.Context, conn *ec2.Client, vpcID string, v func modifyVPCNetworkAddressUsageMetrics(ctx context.Context, conn *ec2.Client, vpcID string, v bool) error { input := &ec2.ModifyVpcAttributeInput{ - EnableNetworkAddressUsageMetrics: &types.AttributeBooleanValue{ + EnableNetworkAddressUsageMetrics: &awstypes.AttributeBooleanValue{ Value: aws.Bool(v), }, VpcId: aws.String(vpcID), @@ -645,7 +579,7 @@ func modifyVPCNetworkAddressUsageMetrics(ctx context.Context, conn *ec2.Client, return fmt.Errorf("modifying EnableNetworkAddressUsageMetrics: %w", err) } - if _, err := waitVPCAttributeUpdated(ctx, conn, vpcID, types.VpcAttributeNameEnableNetworkAddressUsageMetrics, v); err != nil { + if _, err := waitVPCAttributeUpdated(ctx, conn, vpcID, awstypes.VpcAttributeNameEnableNetworkAddressUsageMetrics, v); err != nil { return fmt.Errorf("modifying EnableNetworkAddressUsageMetrics: waiting for completion: %w", err) } @@ -714,7 +648,7 @@ func modifyVPCIPv6CIDRBlockAssociation(ctx context.Context, conn *ec2.Client, vp func modifyVPCTenancy(ctx context.Context, conn *ec2.Client, vpcID string, v string) error { input := &ec2.ModifyVpcTenancyInput{ - InstanceTenancy: types.VpcTenancy(v), + InstanceTenancy: awstypes.VpcTenancy(v), VpcId: aws.String(vpcID), } @@ -725,24 +659,268 @@ func modifyVPCTenancy(ctx context.Context, conn *ec2.Client, vpcID string, v str return nil } -func findIPAMPoolAllocationsForVPC(ctx context.Context, conn *ec2.Client, poolID, vpcID string) ([]types.IpamPoolAllocation, error) { - input := &ec2.GetIpamPoolAllocationsInput{ - IpamPoolId: aws.String(poolID), +func resourceVPCFlatten(ctx context.Context, client *conns.AWSClient, vpc *awstypes.Vpc, d *schema.ResourceData) error { + conn := client.EC2Client(ctx) + ownerID := aws.ToString(vpc.OwnerId) + d.Set(names.AttrARN, vpcARN(ctx, client, ownerID, d.Id())) + d.Set(names.AttrCIDRBlock, vpc.CidrBlock) + d.Set("dhcp_options_id", vpc.DhcpOptionsId) + d.Set("instance_tenancy", vpc.InstanceTenancy) + d.Set(names.AttrOwnerID, ownerID) + + if v, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (bool, error) { + return findVPCAttribute(ctx, conn, d.Id(), awstypes.VpcAttributeNameEnableDnsHostnames) + }, d.IsNewResource()); err != nil { + return fmt.Errorf("reading EC2 VPC (%s) Attribute (%s): %w", d.Id(), awstypes.VpcAttributeNameEnableDnsHostnames, err) + } else { + d.Set("enable_dns_hostnames", v) } - output, err := findIPAMPoolAllocations(ctx, conn, input) + if v, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (bool, error) { + return findVPCAttribute(ctx, conn, d.Id(), awstypes.VpcAttributeNameEnableDnsSupport) + }, d.IsNewResource()); err != nil { + return fmt.Errorf("reading EC2 VPC (%s) Attribute (%s): %w", d.Id(), awstypes.VpcAttributeNameEnableDnsSupport, err) + } else { + d.Set("enable_dns_support", v) + } - if err != nil { - return nil, err + if v, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (bool, error) { + return findVPCAttribute(ctx, conn, d.Id(), awstypes.VpcAttributeNameEnableNetworkAddressUsageMetrics) + }, d.IsNewResource()); err != nil { + return fmt.Errorf("reading EC2 VPC (%s) Attribute (%s): %w", d.Id(), awstypes.VpcAttributeNameEnableNetworkAddressUsageMetrics, err) + } else { + d.Set("enable_network_address_usage_metrics", v) } - output = tfslices.Filter(output, func(v types.IpamPoolAllocation) bool { - return v.ResourceType == types.IpamPoolAllocationResourceTypeVpc && aws.ToString(v.ResourceId) == vpcID - }) + if v, err := findVPCDefaultNetworkACL(ctx, conn, d.Id()); err != nil { + return fmt.Errorf("reading EC2 VPC (%s) default NACL: %w", d.Id(), err) + } else { + d.Set("default_network_acl_id", v.NetworkAclId) + } + + if v, err := findVPCMainRouteTable(ctx, conn, d.Id()); err != nil { + return fmt.Errorf("reading EC2 VPC (%s) main Route Table: %w", d.Id(), err) + } else { + d.Set("default_route_table_id", v.RouteTableId) + d.Set("main_route_table_id", v.RouteTableId) + } + + if v, err := findVPCDefaultSecurityGroup(ctx, conn, d.Id()); err != nil { + return fmt.Errorf("reading EC2 VPC (%s) default Security Group: %w", d.Id(), err) + } else { + d.Set("default_security_group_id", v.GroupId) + } + + if ipv6CIDRBlockAssociation := defaultIPv6CIDRBlockAssociation(vpc, d.Get("ipv6_association_id").(string)); ipv6CIDRBlockAssociation == nil { + d.Set("assign_generated_ipv6_cidr_block", nil) + d.Set("ipv6_association_id", nil) + d.Set("ipv6_cidr_block", nil) + d.Set("ipv6_cidr_block_network_border_group", nil) + d.Set("ipv6_ipam_pool_id", nil) + d.Set("ipv6_netmask_length", nil) + } else { + cidrBlock := aws.ToString(ipv6CIDRBlockAssociation.Ipv6CidrBlock) + ipv6PoolID := aws.ToString(ipv6CIDRBlockAssociation.Ipv6Pool) + isAmazonIPv6Pool := ipv6PoolID == amazonIPv6PoolID + d.Set("assign_generated_ipv6_cidr_block", isAmazonIPv6Pool) + d.Set("ipv6_association_id", ipv6CIDRBlockAssociation.AssociationId) + d.Set("ipv6_cidr_block", cidrBlock) + d.Set("ipv6_cidr_block_network_border_group", ipv6CIDRBlockAssociation.NetworkBorderGroup) + if isAmazonIPv6Pool { + d.Set("ipv6_ipam_pool_id", nil) + } else { + if ipv6PoolID == ipamManagedIPv6PoolID { + d.Set("ipv6_ipam_pool_id", d.Get("ipv6_ipam_pool_id")) + } else { + d.Set("ipv6_ipam_pool_id", ipv6PoolID) + } + } + d.Set("ipv6_netmask_length", nil) + if ipv6PoolID != "" && !isAmazonIPv6Pool { + parts := strings.Split(cidrBlock, "/") + if len(parts) == 2 { + if v, err := strconv.Atoi(parts[1]); err == nil { + d.Set("ipv6_netmask_length", v) + } else { + log.Printf("[WARN] Unable to parse CIDR (%s) netmask length: %s", cidrBlock, err) + } + } else { + log.Printf("[WARN] Invalid CIDR block format: %s", cidrBlock) + } + } + } + + setTagsOut(ctx, vpc.Tags) + + return nil +} + +func vpcARN(ctx context.Context, c *conns.AWSClient, accountID, vpcID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "vpc/"+vpcID) +} + +var _ list.ListResourceWithRawV5Schemas = &vpcListResource{} + +type vpcListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type vpcListResourceModel struct { + framework.WithRegionModel + VPCIDs fwtypes.ListValueOf[types.String] `tfsdk:"vpc_ids"` + Filters customListFilters `tfsdk:"filter"` +} + +func (l *vpcListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{ + "vpc_ids": listschema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Optional: true, + }, + }, + Blocks: map[string]listschema.Block{ + names.AttrFilter: listschema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customListFilterModel](ctx), + NestedObject: listschema.NestedBlockObject{ + Attributes: map[string]listschema.Attribute{ + names.AttrName: listschema.StringAttribute{ + Required: true, + Validators: []validator.String{ + notIsDefaultValidator{}, + }, + }, + names.AttrValues: listschema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Required: true, + }, + }, + }, + }, + }, + } +} + +var _ validator.String = notIsDefaultValidator{} - if len(output) == 0 { - return nil, &retry.NotFoundError{} +type notIsDefaultValidator struct{} + +func (v notIsDefaultValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v notIsDefaultValidator) MarkdownDescription(_ context.Context) string { + return "" +} + +func (v notIsDefaultValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue + + if value.ValueString() == "is-default" { + response.Diagnostics.Append(fdiag.NewAttributeErrorDiagnostic( + request.Path, + "Invalid Attribute Value", + `The filter "is-default" is not supported. To list default VPCs, use the resource type "aws_default_vpc".`, + )) } +} - return output, nil +func (l *vpcListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.EC2Client(ctx) + + attributes := []attribute.KeyValue{ + otelaws.RegionAttr(awsClient.Region(ctx)), + } + for _, attribute := range attributes { + ctx = tflog.SetField(ctx, string(attribute.Key), attribute.Value.AsInterface()) + } + + var query vpcListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + var input ec2.DescribeVpcsInput + if diags := fwflex.Expand(ctx, query, &input); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + + input.Filters = append(input.Filters, awstypes.Filter{ + Name: aws.String("is-default"), + Values: []string{"false"}, + }) + + tflog.Info(ctx, "Listing resources") + + stream.Results = func(yield func(list.ListResult) bool) { + pages := ec2.NewDescribeVpcsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + result := fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + for _, vpc := range page.Vpcs { + ctx := tflog.SetField(ctx, logging.ResourceAttributeKey(names.AttrID), aws.ToString(vpc.VpcId)) + + result := request.NewListResult(ctx) + + tags := keyValueTags(ctx, vpc.Tags) + + rd := l.ResourceData() + rd.SetId(aws.ToString(vpc.VpcId)) + + tflog.Info(ctx, "Reading resource") + err := resourceVPCFlatten(ctx, awsClient, &vpc, rd) + if retry.NotFound(err) { + tflog.Warn(ctx, "Resource disappeared during listing, skipping") + continue + } + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + if v, ok := tags["Name"]; ok { + result.DisplayName = fmt.Sprintf("%s (%s)", v.ValueString(), aws.ToString(vpc.VpcId)) + } else { + result.DisplayName = aws.ToString(vpc.VpcId) + } + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return + } + } + } + } } diff --git a/internal/service/ec2/vpc_block_public_access_exclusion_tags_gen_test.go b/internal/service/ec2/vpc_block_public_access_exclusion_tags_gen_test.go index 2cdd8ffb8e6d..da79481de5d3 100644 --- a/internal/service/ec2/vpc_block_public_access_exclusion_tags_gen_test.go +++ b/internal/service/ec2/vpc_block_public_access_exclusion_tags_gen_test.go @@ -17,9 +17,10 @@ import ( func TestAccVPCBlockPublicAccessExclusion_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -189,9 +190,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags(t *testing.T) { func TestAccVPCBlockPublicAccessExclusion_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -247,9 +249,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_null(t *testing.T) { func TestAccVPCBlockPublicAccessExclusion_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -293,9 +296,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_EmptyMap(t *testing.T) { func TestAccVPCBlockPublicAccessExclusion_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -368,9 +372,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_AddOnUpdate(t *testing.T) { func TestAccVPCBlockPublicAccessExclusion_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -452,9 +457,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCBlockPublicAccessExclusion_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -584,9 +590,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_EmptyTag_OnUpdate_Add(t *testing. func TestAccVPCBlockPublicAccessExclusion_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -669,9 +676,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_EmptyTag_OnUpdate_Replace(t *test func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -840,9 +848,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_providerOnly(t *testi func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -992,9 +1001,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_nonOverlapping(t *tes func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1160,9 +1170,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_overlapping(t *testin func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1245,9 +1256,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_updateToProviderOnly( func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1329,9 +1341,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_updateToResourceOnly( func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1391,9 +1404,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_emptyResourceTag(t *t func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1445,9 +1459,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_emptyProviderOnlyTag( func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1510,9 +1525,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_nullOverlappingResour func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1577,9 +1593,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_DefaultTags_nullNonOverlappingRes func TestAccVPCBlockPublicAccessExclusion_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1628,9 +1645,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_ComputedTag_OnCreate(t *testing.T func TestAccVPCBlockPublicAccessExclusion_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1720,9 +1738,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_ComputedTag_OnUpdate_Add(t *testi func TestAccVPCBlockPublicAccessExclusion_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1802,9 +1821,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_ComputedTag_OnUpdate_Replace(t *t func TestAccVPCBlockPublicAccessExclusion_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), @@ -1959,9 +1979,10 @@ func TestAccVPCBlockPublicAccessExclusion_tags_IgnoreTags_Overlap_DefaultTag(t * func TestAccVPCBlockPublicAccessExclusion_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_vpc_block_public_access_exclusion.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckBlockPublicAccessExclusionDestroy(ctx), diff --git a/internal/service/ec2/vpc_data_source.go b/internal/service/ec2/vpc_data_source.go index aac6f93ec5a6..6e9344039256 100644 --- a/internal/service/ec2/vpc_data_source.go +++ b/internal/service/ec2/vpc_data_source.go @@ -9,9 +9,8 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -123,7 +122,8 @@ func dataSourceVPC() *schema.Resource { func dataSourceVPCRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) // We specify "default" as boolean, but EC2 filters want // it to be serialized as a string. Note that setting it to @@ -164,36 +164,28 @@ func dataSourceVPCRead(ctx context.Context, d *schema.ResourceData, meta any) di } d.SetId(aws.ToString(vpc.VpcId)) - ownerID := aws.String(aws.ToString(vpc.OwnerId)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: aws.ToString(ownerID), - Resource: "vpc/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, vpcARN(ctx, c, aws.ToString(ownerID), d.Id())) d.Set(names.AttrCIDRBlock, vpc.CidrBlock) d.Set("default", vpc.IsDefault) d.Set("dhcp_options_id", vpc.DhcpOptionsId) d.Set("instance_tenancy", vpc.InstanceTenancy) d.Set(names.AttrOwnerID, ownerID) - if v, err := findVPCAttribute(ctx, conn, d.Id(), types.VpcAttributeNameEnableDnsHostnames); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), types.VpcAttributeNameEnableDnsHostnames, err) + if v, err := findVPCAttribute(ctx, conn, d.Id(), awstypes.VpcAttributeNameEnableDnsHostnames); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), awstypes.VpcAttributeNameEnableDnsHostnames, err) } else { d.Set("enable_dns_hostnames", v) } - if v, err := findVPCAttribute(ctx, conn, d.Id(), types.VpcAttributeNameEnableDnsSupport); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), types.VpcAttributeNameEnableDnsSupport, err) + if v, err := findVPCAttribute(ctx, conn, d.Id(), awstypes.VpcAttributeNameEnableDnsSupport); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), awstypes.VpcAttributeNameEnableDnsSupport, err) } else { d.Set("enable_dns_support", v) } - if v, err := findVPCAttribute(ctx, conn, d.Id(), types.VpcAttributeNameEnableNetworkAddressUsageMetrics); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), types.VpcAttributeNameEnableNetworkAddressUsageMetrics, err) + if v, err := findVPCAttribute(ctx, conn, d.Id(), awstypes.VpcAttributeNameEnableNetworkAddressUsageMetrics); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 VPC (%s) Attribute (%s): %s", d.Id(), awstypes.VpcAttributeNameEnableNetworkAddressUsageMetrics, err) } else { d.Set("enable_network_address_usage_metrics", v) } diff --git a/internal/service/ec2/vpc_data_source_tags_gen_test.go b/internal/service/ec2/vpc_data_source_tags_gen_test.go index 746785351904..42957019ec34 100644 --- a/internal/service/ec2/vpc_data_source_tags_gen_test.go +++ b/internal/service/ec2/vpc_data_source_tags_gen_test.go @@ -19,11 +19,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccVPCVPCDataSource_tags(t *testing.T) { +func TestAccVPCDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,11 +46,12 @@ func TestAccVPCVPCDataSource_tags(t *testing.T) { }) } -func TestAccVPCVPCDataSource_tags_NullMap(t *testing.T) { +func TestAccVPCDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -67,11 +69,12 @@ func TestAccVPCVPCDataSource_tags_NullMap(t *testing.T) { }) } -func TestAccVPCVPCDataSource_tags_EmptyMap(t *testing.T) { +func TestAccVPCDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -89,11 +92,12 @@ func TestAccVPCVPCDataSource_tags_EmptyMap(t *testing.T) { }) } -func TestAccVPCVPCDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { +func TestAccVPCDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -119,11 +123,12 @@ func TestAccVPCVPCDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { }) } -func TestAccVPCVPCDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { +func TestAccVPCDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -155,11 +160,12 @@ func TestAccVPCVPCDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { }) } -func TestAccVPCVPCDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { +func TestAccVPCDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/ec2/vpc_default_network_acl_test.go b/internal/service/ec2/vpc_default_network_acl_test.go index 21ddf315fe12..e3b638351456 100644 --- a/internal/service/ec2/vpc_default_network_acl_test.go +++ b/internal/service/ec2/vpc_default_network_acl_test.go @@ -10,7 +10,7 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccVPCDefaultNetworkACL_basic(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" vpcResourceName := "aws_vpc.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -57,7 +57,7 @@ func TestAccVPCDefaultNetworkACL_basic(t *testing.T) { func TestAccVPCDefaultNetworkACL_basicIPv6VPC(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -86,7 +86,7 @@ func TestAccVPCDefaultNetworkACL_basicIPv6VPC(t *testing.T) { func TestAccVPCDefaultNetworkACL_tags(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -132,7 +132,7 @@ func TestAccVPCDefaultNetworkACL_tags(t *testing.T) { func TestAccVPCDefaultNetworkACL_Deny_ingress(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -164,7 +164,7 @@ func TestAccVPCDefaultNetworkACL_Deny_ingress(t *testing.T) { func TestAccVPCDefaultNetworkACL_withIPv6Ingress(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -196,7 +196,7 @@ func TestAccVPCDefaultNetworkACL_withIPv6Ingress(t *testing.T) { func TestAccVPCDefaultNetworkACL_subnetRemoval(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -231,7 +231,7 @@ func TestAccVPCDefaultNetworkACL_subnetRemoval(t *testing.T) { func TestAccVPCDefaultNetworkACL_subnetReassign(t *testing.T) { ctx := acctest.Context(t) - var v types.NetworkAcl + var v awstypes.NetworkAcl resourceName := "aws_default_network_acl.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -288,7 +288,7 @@ func testAccCheckDefaultNetworkACLDestroy(s *terraform.State) error { return nil } -func testAccCheckDefaultNetworkACLExists(ctx context.Context, n string, v *types.NetworkAcl) resource.TestCheckFunc { +func testAccCheckDefaultNetworkACLExists(ctx context.Context, n string, v *awstypes.NetworkAcl) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/ec2/vpc_dhcp_options.go b/internal/service/ec2/vpc_dhcp_options.go index 5b98fba1b099..696457271ee4 100644 --- a/internal/service/ec2/vpc_dhcp_options.go +++ b/internal/service/ec2/vpc_dhcp_options.go @@ -9,7 +9,6 @@ import ( "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -131,9 +130,10 @@ func resourceVPCDHCPOptionsCreate(ctx context.Context, d *schema.ResourceData, m func resourceVPCDHCPOptionsRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + opts, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.DhcpOptions, error) { return findDHCPOptionsByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -147,17 +147,8 @@ func resourceVPCDHCPOptionsRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EC2 DHCP Options (%s): %s", d.Id(), err) } - opts := outputRaw.(*awstypes.DhcpOptions) - ownerID := aws.ToString(opts.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("dhcp-options/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, dhcpOptionsARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrOwnerID, ownerID) err = optionsMap.dhcpConfigurationsToResourceData(opts.DhcpConfigurations, d) @@ -217,7 +208,7 @@ func resourceVPCDHCPOptionsDelete(ctx context.Context, d *schema.ResourceData, m } log.Printf("[INFO] Deleting EC2 DHCP Options Set: %s", d.Id()) - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteDhcpOptions(ctx, input) }, errCodeDependencyViolation) @@ -320,3 +311,7 @@ func (m *dhcpOptionsMap) resourceDataToDHCPConfigurations(d *schema.ResourceData return output, nil } + +func dhcpOptionsARN(ctx context.Context, c *conns.AWSClient, accountID, dhcpOptionsID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "dhcp-options/"+dhcpOptionsID) +} diff --git a/internal/service/ec2/vpc_dhcp_options_association.go b/internal/service/ec2/vpc_dhcp_options_association.go index 7e4742fc1c42..3683c7238e04 100644 --- a/internal/service/ec2/vpc_dhcp_options_association.go +++ b/internal/service/ec2/vpc_dhcp_options_association.go @@ -80,7 +80,7 @@ func resourceVPCDHCPOptionsAssociationRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading EC2 VPC DHCP Options Set Association (%s): %s", d.Id(), err) } - _, err = tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return nil, findVPCDHCPOptionsAssociation(ctx, conn, vpcID, dhcpOptionsID) }, d.IsNewResource()) diff --git a/internal/service/ec2/vpc_dhcp_options_data_source.go b/internal/service/ec2/vpc_dhcp_options_data_source.go index 7141b61af765..13df940762c8 100644 --- a/internal/service/ec2/vpc_dhcp_options_data_source.go +++ b/internal/service/ec2/vpc_dhcp_options_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -78,7 +76,8 @@ func dataSourceVPCDHCPOptions() *schema.Resource { func dataSourceVPCDHCPOptionsRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) input := &ec2.DescribeDhcpOptionsInput{} @@ -104,14 +103,7 @@ func dataSourceVPCDHCPOptionsRead(ctx context.Context, d *schema.ResourceData, m d.SetId(aws.ToString(opts.DhcpOptionsId)) ownerID := aws.ToString(opts.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("dhcp-options/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, dhcpOptionsARN(ctx, c, ownerID, d.Id())) d.Set("dhcp_options_id", d.Id()) d.Set(names.AttrOwnerID, ownerID) diff --git a/internal/service/ec2/vpc_egress_only_internet_gateway.go b/internal/service/ec2/vpc_egress_only_internet_gateway.go index cf7cf5f1a232..db1e16538f61 100644 --- a/internal/service/ec2/vpc_egress_only_internet_gateway.go +++ b/internal/service/ec2/vpc_egress_only_internet_gateway.go @@ -72,7 +72,7 @@ func resourceEgressOnlyInternetGatewayRead(ctx context.Context, d *schema.Resour var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + ig, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.EgressOnlyInternetGateway, error) { return findEgressOnlyInternetGatewayByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -86,8 +86,6 @@ func resourceEgressOnlyInternetGatewayRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading EC2 Egress-only Internet Gateway (%s): %s", d.Id(), err) } - ig := outputRaw.(*awstypes.EgressOnlyInternetGateway) - if len(ig.Attachments) == 1 && ig.Attachments[0].State == awstypes.AttachmentStatusAttached { d.Set(names.AttrVPCID, ig.Attachments[0].VpcId) } else { diff --git a/internal/service/ec2/vpc_endpoint.go b/internal/service/ec2/vpc_endpoint.go index cf35bf9fe2d9..e960ad203670 100644 --- a/internal/service/ec2/vpc_endpoint.go +++ b/internal/service/ec2/vpc_endpoint.go @@ -40,6 +40,9 @@ const ( // @SDKResource("aws_vpc_endpoint", name="VPC Endpoint") // @Tags(identifierAttribute="id") // @Testing(tagsTest=false) +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;types.VpcEndpoint") +// @Testing(preIdentityVersion="v6.12.0") func resourceVPCEndpoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceVPCEndpointCreate, @@ -47,10 +50,6 @@ func resourceVPCEndpoint() *schema.Resource { UpdateWithoutTimeout: resourceVPCEndpointUpdate, DeleteWithoutTimeout: resourceVPCEndpointDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/ec2/vpc_endpoint_identity_gen_test.go b/internal/service/ec2/vpc_endpoint_identity_gen_test.go new file mode 100644 index 000000000000..4f813fbe4489 --- /dev/null +++ b/internal/service/ec2/vpc_endpoint_identity_gen_test.go @@ -0,0 +1,309 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCVPCEndpoint_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.VpcEndpoint + resourceName := "aws_vpc_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCEndpointDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCVPCEndpoint_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_vpc_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.12.0 +func TestAccVPCVPCEndpoint_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.VpcEndpoint + resourceName := "aws_vpc_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCEndpointDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic_v6.12.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.12.0 +func TestAccVPCVPCEndpoint_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.VpcEndpoint + resourceName := "aws_vpc_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCEndpointDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic_v6.12.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCEndpoint/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_endpoint_route_table_association.go b/internal/service/ec2/vpc_endpoint_route_table_association.go index 5f29bcf47139..c2995cff4bc3 100644 --- a/internal/service/ec2/vpc_endpoint_route_table_association.go +++ b/internal/service/ec2/vpc_endpoint_route_table_association.go @@ -85,7 +85,7 @@ func resourceVPCEndpointRouteTableAssociationRead(ctx context.Context, d *schema // Human friendly ID for error messages since d.Id() is non-descriptive id := fmt.Sprintf("%s/%s", endpointID, routeTableID) - _, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return nil, findVPCEndpointRouteTableAssociationExists(ctx, conn, endpointID, routeTableID) }, d.IsNewResource()) diff --git a/internal/service/ec2/vpc_endpoint_service.go b/internal/service/ec2/vpc_endpoint_service.go index 52179cba00d8..e6f45cfb6fae 100644 --- a/internal/service/ec2/vpc_endpoint_service.go +++ b/internal/service/ec2/vpc_endpoint_service.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -217,7 +215,8 @@ func resourceVPCEndpointServiceCreate(ctx context.Context, d *schema.ResourceDat func resourceVPCEndpointServiceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) svcCfg, err := findVPCEndpointServiceConfigurationByID(ctx, conn, d.Id()) @@ -232,14 +231,7 @@ func resourceVPCEndpointServiceRead(ctx context.Context, d *schema.ResourceData, } d.Set("acceptance_required", svcCfg.AcceptanceRequired) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("vpc-endpoint-service/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, vpcEndpointServiceARN(ctx, c, d.Id())) d.Set(names.AttrAvailabilityZones, svcCfg.AvailabilityZones) d.Set("base_endpoint_dns_names", svcCfg.BaseEndpointDnsNames) d.Set("gateway_load_balancer_arns", svcCfg.GatewayLoadBalancerArns) @@ -404,3 +396,11 @@ func flattenSupportedRegionDetails(apiObjects []awstypes.SupportedRegionDetail) return aws.ToString(v.Region) }) } + +func vpcEndpointServiceARN(ctx context.Context, c *conns.AWSClient, serviceID string) string { + return c.RegionalARN(ctx, names.EC2, "vpc-endpoint-service/"+serviceID) +} + +func vpcEndpointServiceARNWithRegion(ctx context.Context, c *conns.AWSClient, region, serviceID string) string { + return c.RegionalARNWithRegion(ctx, names.EC2, region, "vpc-endpoint-service/"+serviceID) +} diff --git a/internal/service/ec2/vpc_endpoint_service_data_source.go b/internal/service/ec2/vpc_endpoint_service_data_source.go index 7b126f99916a..4d6895d06cc6 100644 --- a/internal/service/ec2/vpc_endpoint_service_data_source.go +++ b/internal/service/ec2/vpc_endpoint_service_data_source.go @@ -10,7 +10,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -125,7 +124,8 @@ func dataSourceVPCEndpointService() *schema.Resource { func dataSourceVPCEndpointServiceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeVpcEndpointServicesInput{ Filters: newAttributeFilterList( @@ -199,16 +199,8 @@ func dataSourceVPCEndpointServiceRead(ctx context.Context, d *schema.ResourceDat serviceRegion := aws.ToString(sd.ServiceRegion) d.SetId(strconv.Itoa(create.StringHashcode(serviceName))) - d.Set("acceptance_required", sd.AcceptanceRequired) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: serviceRegion, - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("vpc-endpoint-service/%s", serviceID), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, vpcEndpointServiceARNWithRegion(ctx, c, serviceRegion, serviceID)) d.Set(names.AttrAvailabilityZones, sd.AvailabilityZones) d.Set("base_endpoint_dns_names", sd.BaseEndpointDnsNames) d.Set("manages_vpc_endpoints", sd.ManagesVpcEndpoints) diff --git a/internal/service/ec2/vpc_endpoint_subnet_association.go b/internal/service/ec2/vpc_endpoint_subnet_association.go index 36d0e04b7596..7d41e956b220 100644 --- a/internal/service/ec2/vpc_endpoint_subnet_association.go +++ b/internal/service/ec2/vpc_endpoint_subnet_association.go @@ -15,11 +15,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -162,7 +162,7 @@ func modifyVPCEndpointExclusive(ctx context.Context, conn *ec2.Client, input *ec Delay: 1 * time.Minute, Timeout: 3 * time.Minute, Target: []string{strconv.FormatBool(true)}, - Refresh: func() (any, string, error) { + Refresh: func(ctx context.Context) (any, string, error) { output, err := conn.ModifyVpcEndpoint(ctx, input) if err != nil { diff --git a/internal/service/ec2/vpc_flow_log.go b/internal/service/ec2/vpc_flow_log.go index 536d00f817b1..384a2a05e378 100644 --- a/internal/service/ec2/vpc_flow_log.go +++ b/internal/service/ec2/vpc_flow_log.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -154,6 +152,15 @@ func resourceFlowLog() *schema.Resource { ExactlyOneOf: []string{"eni_id", names.AttrSubnetID, names.AttrVPCID, names.AttrTransitGatewayID, names.AttrTransitGatewayAttachmentID}, }, }, + + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: flowLogSchemaV0().CoreConfigSchema().ImpliedType(), + Upgrade: flowLogStateUpgradeV0, + Version: 0, + }, + }, } } @@ -233,7 +240,7 @@ func resourceLogFlowCreate(ctx context.Context, d *schema.ResourceData, meta any input.MaxAggregationInterval = aws.Int32(int32(v.(int))) } - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, iamPropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, iamPropagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateFlowLogs(ctx, input) }, errCodeInvalidParameter, "Unable to assume given IAM role") @@ -252,7 +259,8 @@ func resourceLogFlowCreate(ctx context.Context, d *schema.ResourceData, meta any func resourceLogFlowRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) fl, err := findFlowLogByID(ctx, conn, d.Id()) @@ -266,14 +274,7 @@ func resourceLogFlowRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "reading Flow Log (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("vpc-flow-log/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, flowLogARN(ctx, c, d.Id())) d.Set("deliver_cross_account_role", fl.DeliverCrossAccountRole) if fl.DestinationOptions != nil { if err := d.Set("destination_options", []any{flattenDestinationOptionsResponse(fl.DestinationOptions)}); err != nil { @@ -380,3 +381,7 @@ func flattenDestinationOptionsResponse(apiObject *awstypes.DestinationOptionsRes return tfMap } + +func flowLogARN(ctx context.Context, c *conns.AWSClient, flowLogID string) string { + return c.RegionalARN(ctx, names.EC2, "vpc-flow-log/"+flowLogID) +} diff --git a/internal/service/ec2/vpc_flow_log_migrate.go b/internal/service/ec2/vpc_flow_log_migrate.go new file mode 100644 index 000000000000..73737b2ed6ed --- /dev/null +++ b/internal/service/ec2/vpc_flow_log_migrate.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func flowLogSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "deliver_cross_account_role": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "destination_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "hive_compatible_partitions": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "per_hour_partition": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "eni_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + names.AttrIAMRoleARN: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "log_destination": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "log_destination_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "log_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + names.AttrLogGroupName: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "max_aggregation_interval": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + names.AttrSubnetID: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "traffic_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + names.AttrTransitGatewayAttachmentID: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + names.AttrTransitGatewayID: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + names.AttrVPCID: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func flowLogStateUpgradeV0(_ context.Context, rawState map[string]any, meta any) (map[string]any, error) { + if rawState == nil { + rawState = map[string]any{} + } + + delete(rawState, names.AttrLogGroupName) + + return rawState, nil +} diff --git a/internal/service/ec2/vpc_flow_log_migrate_test.go b/internal/service/ec2/vpc_flow_log_migrate_test.go new file mode 100644 index 000000000000..8574a646ad50 --- /dev/null +++ b/internal/service/ec2/vpc_flow_log_migrate_test.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestFlowLogStateUpgradeV0(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + rawState map[string]any + expected map[string]any + }{ + { + name: "empty rawState", + rawState: nil, + expected: map[string]any{}, + }, + { + name: "no log_group_name", + rawState: map[string]any{ + names.AttrSubnetID: "sn-12345678", + }, + expected: map[string]any{ + names.AttrSubnetID: "sn-12345678", + }, + }, + { + name: "with log_group_name", + rawState: map[string]any{ + names.AttrLogGroupName: "log-group-name", + names.AttrSubnetID: "sn-12345678", + }, + expected: map[string]any{ + names.AttrSubnetID: "sn-12345678", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result, err := tfec2.FlowLogStateUpgradeV0(t.Context(), tt.rawState, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if diff := cmp.Diff(tt.expected, result); diff != "" { + t.Errorf("unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/internal/service/ec2/vpc_flow_log_test.go b/internal/service/ec2/vpc_flow_log_test.go index a963c8b7fbe0..0a1ec87b79c4 100644 --- a/internal/service/ec2/vpc_flow_log_test.go +++ b/internal/service/ec2/vpc_flow_log_test.go @@ -12,8 +12,13 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -564,6 +569,119 @@ func TestAccVPCFlowLog_disappears(t *testing.T) { }) } +func TestAccVPCFlowLog_upgradeFromV5(t *testing.T) { + ctx := acctest.Context(t) + var flowLog awstypes.FlowLog + resourceName := "aws_flow_log.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckFlowLogDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccVPCFlowLogConfig_destinationTypeCloudWatchLogs(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFlowLogExists(ctx, resourceName, &flowLog), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrLogGroupName), knownvalue.NotNull()), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccVPCFlowLogConfig_destinationTypeCloudWatchLogs(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFlowLogExists(ctx, resourceName, &flowLog), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrLogGroupName)), + }, + }, + }, + }) +} + +func TestAccVPCFlowLog_upgradeFromV5PlanRefreshFalse(t *testing.T) { + ctx := acctest.Context(t) + var flowLog awstypes.FlowLog + resourceName := "aws_flow_log.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckFlowLogDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccVPCFlowLogConfig_destinationTypeCloudWatchLogs(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFlowLogExists(ctx, resourceName, &flowLog), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrLogGroupName), knownvalue.NotNull()), + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrRegion)), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccVPCFlowLogConfig_destinationTypeCloudWatchLogs(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFlowLogExists(ctx, resourceName, &flowLog), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrLogGroupName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }) +} + func testAccCheckFlowLogExists(ctx context.Context, n string, v *awstypes.FlowLog) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/ec2/vpc_identity_gen_test.go b/internal/service/ec2/vpc_identity_gen_test.go new file mode 100644 index 000000000000..569ecd517c00 --- /dev/null +++ b/internal/service/ec2/vpc_identity_gen_test.go @@ -0,0 +1,284 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPC_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Vpc + resourceName := "aws_vpc.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPC_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_vpc.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.15.0 +func TestAccVPC_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Vpc + resourceName := "aws_vpc.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic_v6.15.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.15.0 +func TestAccVPC_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Vpc + resourceName := "aws_vpc.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic_v6.15.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_internet_gateway.go b/internal/service/ec2/vpc_internet_gateway.go index 8e19d0f7d43a..4388c7e7fca0 100644 --- a/internal/service/ec2/vpc_internet_gateway.go +++ b/internal/service/ec2/vpc_internet_gateway.go @@ -10,7 +10,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -92,9 +91,10 @@ func resourceInternetGatewayCreate(ctx context.Context, d *schema.ResourceData, func resourceInternetGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + ig, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.InternetGateway, error) { return findInternetGatewayByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -108,17 +108,8 @@ func resourceInternetGatewayRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading EC2 Internet Gateway (%s): %s", d.Id(), err) } - ig := outputRaw.(*awstypes.InternetGateway) - ownerID := aws.ToString(ig.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("internet-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, internetGatewayARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrOwnerID, ownerID) if len(ig.Attachments) == 0 { // Gateway exists but not attached to the VPC. @@ -173,7 +164,7 @@ func resourceInternetGatewayDelete(ctx context.Context, d *schema.ResourceData, } log.Printf("[INFO] Deleting Internet Gateway: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteInternetGateway(ctx, input) }, errCodeDependencyViolation) @@ -195,7 +186,7 @@ func attachInternetGateway(ctx context.Context, conn *ec2.Client, internetGatewa } log.Printf("[INFO] Attaching EC2 Internet Gateway: %#v", input) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return conn.AttachInternetGateway(ctx, input) }, errCodeInvalidInternetGatewayIDNotFound) @@ -219,7 +210,7 @@ func detachInternetGateway(ctx context.Context, conn *ec2.Client, internetGatewa } log.Printf("[INFO] Detaching EC2 Internet Gateway: %#v", input) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return conn.DetachInternetGateway(ctx, input) }, errCodeDependencyViolation) @@ -242,3 +233,7 @@ func detachInternetGateway(ctx context.Context, conn *ec2.Client, internetGatewa return nil } + +func internetGatewayARN(ctx context.Context, c *conns.AWSClient, accountID, internetGatewayID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "internet-gateway/"+internetGatewayID) +} diff --git a/internal/service/ec2/vpc_internet_gateway_attachment.go b/internal/service/ec2/vpc_internet_gateway_attachment.go index 133ae63b933f..b92b333d15e0 100644 --- a/internal/service/ec2/vpc_internet_gateway_attachment.go +++ b/internal/service/ec2/vpc_internet_gateway_attachment.go @@ -76,7 +76,7 @@ func resourceInternetGatewayAttachmentRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + igw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.InternetGatewayAttachment, error) { return findInternetGatewayAttachment(ctx, conn, igwID, vpcID) }, d.IsNewResource()) @@ -90,8 +90,6 @@ func resourceInternetGatewayAttachmentRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading EC2 Internet Gateway Attachment (%s): %s", d.Id(), err) } - igw := outputRaw.(*awstypes.InternetGatewayAttachment) - d.Set("internet_gateway_id", igwID) d.Set(names.AttrVPCID, igw.VpcId) diff --git a/internal/service/ec2/vpc_internet_gateway_data_source.go b/internal/service/ec2/vpc_internet_gateway_data_source.go index 767b4e46a223..3de3be6dec8e 100644 --- a/internal/service/ec2/vpc_internet_gateway_data_source.go +++ b/internal/service/ec2/vpc_internet_gateway_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -68,7 +66,8 @@ func dataSourceInternetGateway() *schema.Resource { func dataSourceInternetGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) internetGatewayId, internetGatewayIdOk := d.GetOk("internet_gateway_id") @@ -99,14 +98,7 @@ func dataSourceInternetGatewayRead(ctx context.Context, d *schema.ResourceData, d.SetId(aws.ToString(igw.InternetGatewayId)) ownerID := aws.ToString(igw.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("internet-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, internetGatewayARN(ctx, c, ownerID, d.Id())) if err := d.Set("attachments", flattenInternetGatewayAttachments(igw.Attachments)); err != nil { return sdkdiag.AppendErrorf(diags, "setting attachments: %s", err) diff --git a/internal/service/ec2/vpc_ipam.go b/internal/service/ec2/vpc_ipam.go index ed4aeeffb045..5bed7b3e516e 100644 --- a/internal/service/ec2/vpc_ipam.go +++ b/internal/service/ec2/vpc_ipam.go @@ -72,6 +72,12 @@ func resourceIPAM() *schema.Resource { Optional: true, Default: false, }, + "metered_account": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.IpamMeteredAccount](), + }, "operating_regions": { Type: schema.TypeSet, Required: true, @@ -145,6 +151,10 @@ func resourceIPAMCreate(ctx context.Context, d *schema.ResourceData, meta any) d input.EnablePrivateGua = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("metered_account"); ok { + input.MeteredAccount = awstypes.IpamMeteredAccount(v.(string)) + } + if v, ok := d.GetOk("tier"); ok { input.Tier = awstypes.IpamTier(v.(string)) } @@ -185,6 +195,7 @@ func resourceIPAMRead(ctx context.Context, d *schema.ResourceData, meta any) dia d.Set("default_resource_discovery_id", ipam.DefaultResourceDiscoveryId) d.Set(names.AttrDescription, ipam.Description) d.Set("enable_private_gua", ipam.EnablePrivateGua) + d.Set("metered_account", ipam.MeteredAccount) if err := d.Set("operating_regions", flattenIPAMOperatingRegions(ipam.OperatingRegions)); err != nil { return sdkdiag.AppendErrorf(diags, "setting operating_regions: %s", err) } @@ -215,6 +226,10 @@ func resourceIPAMUpdate(ctx context.Context, d *schema.ResourceData, meta any) d input.EnablePrivateGua = aws.Bool(d.Get("enable_private_gua").(bool)) } + if d.HasChange("metered_account") { + input.MeteredAccount = awstypes.IpamMeteredAccount(d.Get("metered_account").(string)) + } + if d.HasChange("operating_regions") { o, n := d.GetChange("operating_regions") if o == nil { diff --git a/internal/service/ec2/vpc_ipam_data_source.go b/internal/service/ec2/vpc_ipam_data_source.go index 0258379609df..c1f55147a41f 100644 --- a/internal/service/ec2/vpc_ipam_data_source.go +++ b/internal/service/ec2/vpc_ipam_data_source.go @@ -45,6 +45,9 @@ func (d *ipamDataSource) Schema(ctx context.Context, request datasource.SchemaRe "enable_private_gua": schema.BoolAttribute{ Computed: true, }, + "metered_account": schema.StringAttribute{ + Computed: true, + }, names.AttrID: schema.StringAttribute{ Required: true, }, @@ -124,6 +127,7 @@ type ipamModel struct { IpamARN types.String `tfsdk:"arn"` IpamID types.String `tfsdk:"id"` IpamRegion types.String `tfsdk:"ipam_region"` + MeteredAccount types.String `tfsdk:"metered_account"` OperatingRegions fwtypes.ListNestedObjectValueOf[ipamOperatingRegionModel] `tfsdk:"operating_regions"` OwnerID types.String `tfsdk:"owner_id"` PrivateDefaultScopeID types.String `tfsdk:"private_default_scope_id"` diff --git a/internal/service/ec2/vpc_ipam_data_source_test.go b/internal/service/ec2/vpc_ipam_data_source_test.go index 107a231a24ed..c57e432af636 100644 --- a/internal/service/ec2/vpc_ipam_data_source_test.go +++ b/internal/service/ec2/vpc_ipam_data_source_test.go @@ -33,6 +33,7 @@ func TestAccIPAMDataSource_basic(t *testing.T) { // nosemgrep:ci.vpc-in-test-nam resource.TestCheckResourceAttrPair(dataSourceName, "default_resource_discovery_id", resourceName, "default_resource_discovery_id"), resource.TestCheckResourceAttrPair(dataSourceName, "default_resource_discovery_association_id", resourceName, "default_resource_discovery_association_id"), resource.TestCheckResourceAttrPair(dataSourceName, "enable_private_gua", resourceName, "enable_private_gua"), + resource.TestCheckResourceAttrPair(dataSourceName, "metered_account", resourceName, "metered_account"), resource.TestCheckResourceAttrPair(dataSourceName, "operating_regions.0.region_name", resourceName, "operating_regions.0.region_name"), resource.TestCheckResourceAttrPair(dataSourceName, "private_default_scope_id", resourceName, "private_default_scope_id"), resource.TestCheckResourceAttrPair(dataSourceName, "public_default_scope_id", resourceName, "public_default_scope_id"), diff --git a/internal/service/ec2/vpc_ipam_pool_cidr.go b/internal/service/ec2/vpc_ipam_pool_cidr.go index f158abf2db67..1883fa77000d 100644 --- a/internal/service/ec2/vpc_ipam_pool_cidr.go +++ b/internal/service/ec2/vpc_ipam_pool_cidr.go @@ -88,16 +88,10 @@ func resourceIPAMPoolCIDR() *schema.Resource { "netmask_length": { Type: schema.TypeInt, Optional: true, + Computed: true, ForceNew: true, ValidateFunc: validation.IntBetween(0, 128), ConflictsWith: []string{"cidr"}, - // NetmaskLength is not outputted by GetIpamPoolCidrsOutput - DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { - if o != "0" && n == "0" { - return true - } - return false - }, }, }, } @@ -171,6 +165,7 @@ func resourceIPAMPoolCIDRRead(ctx context.Context, d *schema.ResourceData, meta d.Set("cidr", output.Cidr) d.Set("ipam_pool_cidr_id", output.IpamPoolCidrId) d.Set("ipam_pool_id", poolID) + d.Set("netmask_length", output.NetmaskLength) return diags } diff --git a/internal/service/ec2/vpc_ipam_pool_cidr_allocation.go b/internal/service/ec2/vpc_ipam_pool_cidr_allocation.go index 86b0ed88a0fd..ad2b5bc479ce 100644 --- a/internal/service/ec2/vpc_ipam_pool_cidr_allocation.go +++ b/internal/service/ec2/vpc_ipam_pool_cidr_allocation.go @@ -134,7 +134,7 @@ func resourceIPAMPoolCIDRAllocationCreate(ctx context.Context, d *schema.Resourc allocationID := aws.ToString(output.IpamPoolAllocation.IpamPoolAllocationId) d.SetId(ipamPoolCIDRAllocationCreateResourceID(allocationID, ipamPoolID)) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findIPAMPoolAllocationByTwoPartKey(ctx, conn, allocationID, ipamPoolID) }) diff --git a/internal/service/ec2/vpc_ipam_pool_cidr_test.go b/internal/service/ec2/vpc_ipam_pool_cidr_test.go index 0d84f35fed7d..a77e71734658 100644 --- a/internal/service/ec2/vpc_ipam_pool_cidr_test.go +++ b/internal/service/ec2/vpc_ipam_pool_cidr_test.go @@ -44,9 +44,6 @@ func TestAccIPAMPoolCIDR_basic(t *testing.T) { // nosemgrep:ci.vpc-in-test-name ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "netmask_length", - }, }, }, }) @@ -77,9 +74,6 @@ func TestAccIPAMPoolCIDR_basicNetmaskLength(t *testing.T) { // nosemgrep:ci.vpc- ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "netmask_length", - }, }, }, }) diff --git a/internal/service/ec2/vpc_ipam_test.go b/internal/service/ec2/vpc_ipam_test.go index e78c454693c1..6c639fd5464b 100644 --- a/internal/service/ec2/vpc_ipam_test.go +++ b/internal/service/ec2/vpc_ipam_test.go @@ -14,6 +14,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -40,6 +41,7 @@ func TestAccIPAM_basic(t *testing.T) { // nosemgrep:ci.vpc-in-test-name acctest.CheckResourceAttrGlobalARNFormat(ctx, resourceName, names.AttrARN, "ec2", "ipam/{id}"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckResourceAttr(resourceName, "enable_private_gua", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "metered_account", string(awstypes.IpamMeteredAccountIpamOwner)), resource.TestCheckResourceAttr(resourceName, "operating_regions.#", "1"), resource.TestCheckResourceAttr(resourceName, "scope_count", "2"), resource.TestMatchResourceAttr(resourceName, "private_default_scope_id", regexache.MustCompile(`^ipam-scope-[0-9a-f]+`)), @@ -297,6 +299,45 @@ func TestAccIPAM_enablePrivateGUA(t *testing.T) { // nosemgrep:ci.vpc-in-test-na }) } +func TestAccIPAM_meteredAccount(t *testing.T) { // nosemgrep:ci.vpc-in-test-name + ctx := acctest.Context(t) + var ipam awstypes.Ipam + resourceName := "aws_vpc_ipam.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIPAMDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIPAMConfig_meteredAccount(string(awstypes.IpamMeteredAccountIpamOwner)), + Check: resource.ComposeTestCheckFunc( + testAccCheckIPAMExists(ctx, resourceName, &ipam), + resource.TestCheckResourceAttr(resourceName, "metered_account", string(awstypes.IpamMeteredAccountIpamOwner)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIPAMConfig_meteredAccount(string(awstypes.IpamMeteredAccountResourceOwner)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckIPAMExists(ctx, resourceName, &ipam), + resource.TestCheckResourceAttr(resourceName, "metered_account", string(awstypes.IpamMeteredAccountResourceOwner)), + ), + }, + }, + }) +} + func testAccCheckIPAMExists(ctx context.Context, n string, v *awstypes.Ipam) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -472,3 +513,16 @@ resource "aws_vpc_ipam" "test" { } `, enablePrivateGUA) } + +func testAccIPAMConfig_meteredAccount(meteredAccount string) string { + return fmt.Sprintf(` +data "aws_region" "current" {} + +resource "aws_vpc_ipam" "test" { + operating_regions { + region_name = data.aws_region.current.region + } + metered_account = %[1]q +} +`, meteredAccount) +} diff --git a/internal/service/ec2/vpc_ipv4_cidr_block_association.go b/internal/service/ec2/vpc_ipv4_cidr_block_association.go index 93d71622a4e7..9d24f7de25a0 100644 --- a/internal/service/ec2/vpc_ipv4_cidr_block_association.go +++ b/internal/service/ec2/vpc_ipv4_cidr_block_association.go @@ -163,7 +163,7 @@ func resourceVPCIPv4CIDRBlockAssociationDelete(ctx context.Context, d *schema.Re } _, err := conn.DisassociateVpcCidrBlock(ctx, &input) - if tfawserr.ErrCodeEquals(err, errCodeInvalidVPCCIDRBlockAssociationIDNotFound) { + if tfawserr.ErrCodeEquals(err, errCodeInvalidVPCCIDRBlockAssociationIDNotFound, errCodeInvalidVPCIDNotFound) { return diags } diff --git a/internal/service/ec2/vpc_ipv4_cidr_block_association_test.go b/internal/service/ec2/vpc_ipv4_cidr_block_association_test.go index 70e2e1ce6995..7ce8e52d5350 100644 --- a/internal/service/ec2/vpc_ipv4_cidr_block_association_test.go +++ b/internal/service/ec2/vpc_ipv4_cidr_block_association_test.go @@ -276,8 +276,8 @@ func testAccCheckVPCIPv4CIDRBlockAssociationWaitVPCIPAMPoolAllocationDeleted(ctx const ( timeout = 35 * time.Minute // IPAM eventual consistency. It can take ~30 min to release allocations. ) - _, err := tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { - return tfec2.FindIPAMPoolAllocationsForVPC(ctx, conn, rsIPAMPool.Primary.ID, rsVPC.Primary.ID) + _, err := tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { + return tfec2.FindIPAMPoolAllocationForResource(ctx, conn, rsIPAMPool.Primary.ID, rsVPC.Primary.ID) }) return err diff --git a/internal/service/ec2/vpc_list_test.go b/internal/service/ec2/vpc_list_test.go new file mode 100644 index 000000000000..f8d878a55577 --- /dev/null +++ b/internal/service/ec2/vpc_list_test.go @@ -0,0 +1,470 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "testing" + + "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPC_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_vpc.test[0]" + resourceName2 := "aws_vpc.test[1]" + resourceName3 := "aws_vpc.test[2]" + + id1 := tfstatecheck.StateValue() + id2 := tfstatecheck.StateValue() + id3 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_basic"), + ConfigStateChecks: []statecheck.StateCheck{ + id1.GetStateValue(resourceName1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + id2.GetStateValue(resourceName2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + id3.GetStateValue(resourceName3, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_basic"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id1.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id2.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id3.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPC_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_vpc.test[0]" + resourceName2 := "aws_vpc.test[1]" + resourceName3 := "aws_vpc.test[2]" + + id1 := tfstatecheck.StateValue() + id2 := tfstatecheck.StateValue() + id3 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + id1.GetStateValue(resourceName1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + id2.GetStateValue(resourceName2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + id3.GetStateValue(resourceName3, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: id1.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: id2.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: id3.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPC_List_Filtered(t *testing.T) { + ctx := acctest.Context(t) + + resourceNameExpected1 := "aws_vpc.expected[0]" + resourceNameExpected2 := "aws_vpc.expected[1]" + resourceNameNotExpected1 := "aws_vpc.not_expected[0]" + resourceNameNotExpected2 := "aws_vpc.not_expected[1]" + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + expected1 := tfstatecheck.StateValue() + expected2 := tfstatecheck.StateValue() + notExpected1 := tfstatecheck.StateValue() + notExpected2 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_filtered/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + expected1.GetStateValue(resourceNameExpected1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + expected2.GetStateValue(resourceNameExpected2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + notExpected1.GetStateValue(resourceNameNotExpected1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + notExpected2.GetStateValue(resourceNameNotExpected2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_filtered/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected1.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected2.Value(), + }), + + querycheck.ExpectNoIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected1.Value(), + }), + + querycheck.ExpectNoIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected2.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPC_List_DefaultVPC_Exclude(t *testing.T) { + ctx := acctest.Context(t) + + id := tfstatecheck.StateValue() + defaultVPCID := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDefaultVPCExists(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_exclude_default"), + ConfigStateChecks: []statecheck.StateCheck{ + id.GetStateValue("aws_vpc.test", tfjsonpath.New(names.AttrID)), + defaultVPCID.GetStateValue("data.aws_vpc.default", tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_exclude_default"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id.Value(), + }), + + querycheck.ExpectNoIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: defaultVPCID.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPC_List_VPCIDs(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_vpc.test[0]" + resourceName2 := "aws_vpc.test[1]" + resourceName3 := "aws_vpc.test[2]" + + id1 := tfstatecheck.StateValue() + id2 := tfstatecheck.StateValue() + id3 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_vpc_ids"), + ConfigStateChecks: []statecheck.StateCheck{ + id1.GetStateValue(resourceName1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + id2.GetStateValue(resourceName2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + id3.GetStateValue(resourceName3, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_vpc_ids"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectLength("aws_vpc.test", 3), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id1.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id2.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id3.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPC_List_FilteredVPCIDs(t *testing.T) { + ctx := acctest.Context(t) + + resourceNameExpected1 := "aws_vpc.expected[0]" + resourceNameExpected2 := "aws_vpc.expected[1]" + resourceNameNotExpected1 := "aws_vpc.not_expected[0]" + resourceNameNotExpected2 := "aws_vpc.not_expected[1]" + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + expected1 := tfstatecheck.StateValue() + expected2 := tfstatecheck.StateValue() + notExpected1 := tfstatecheck.StateValue() + notExpected2 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_filtered_vpc_ids/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + expected1.GetStateValue(resourceNameExpected1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + expected2.GetStateValue(resourceNameExpected2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + notExpected1.GetStateValue(resourceNameNotExpected1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected1, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + + notExpected2.GetStateValue(resourceNameNotExpected2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected2, tfjsonpath.New(names.AttrARN), "ec2", "vpc/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_filtered_vpc_ids/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectLength("aws_vpc.test", 2), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected1.Value(), + }), + + querycheck.ExpectIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected2.Value(), + }), + + querycheck.ExpectNoIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected1.Value(), + }), + + querycheck.ExpectNoIdentity("aws_vpc.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected2.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPC_List_Filtered_IsDefault(t *testing.T) { + t.Skip("Skipping because ExpectError is not currently supported for Query mode") + + ctx := acctest.Context(t) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_filtered_is_default"), + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPC/list_filtered_is_default/"), + ExpectError: regexache.MustCompile(`The filter "is-default" is not supported. To list default VPCs, use the resource type "aws_default_vpc".`), + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_managed_prefix_list.go b/internal/service/ec2/vpc_managed_prefix_list.go index 6d1eb93d0fbf..0fbec311b375 100644 --- a/internal/service/ec2/vpc_managed_prefix_list.go +++ b/internal/service/ec2/vpc_managed_prefix_list.go @@ -248,12 +248,12 @@ func resourceManagedPrefixListUpdate(ctx context.Context, d *schema.ResourceData } if len(descriptionOnlyRemovals) > 0 { - input := ec2.ModifyManagedPrefixListInput{ + removeInput := ec2.ModifyManagedPrefixListInput{ CurrentVersion: input.CurrentVersion, PrefixListId: aws.String(d.Id()), RemoveEntries: descriptionOnlyRemovals, } - _, err := conn.ModifyManagedPrefixList(ctx, &input) + _, err := conn.ModifyManagedPrefixList(ctx, &removeInput) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EC2 Managed Prefix List (%s): %s", d.Id(), err) @@ -335,13 +335,13 @@ func updateMaxEntry(ctx context.Context, conn *ec2.Client, id string, maxEntries _, err := conn.ModifyManagedPrefixList(ctx, &input) if err != nil { - return fmt.Errorf("updating MaxEntries for EC2 Managed Prefix List (%s): %s", id, err) + return fmt.Errorf("updating MaxEntries for EC2 Managed Prefix List (%s): %w", id, err) } _, err = waitManagedPrefixListModified(ctx, conn, id) if err != nil { - return fmt.Errorf("waiting for EC2 Managed Prefix List (%s) MaxEntries update: %s", id, err) + return fmt.Errorf("waiting for EC2 Managed Prefix List (%s) MaxEntries update: %w", id, err) } return nil diff --git a/internal/service/ec2/vpc_managed_prefix_list_entry.go b/internal/service/ec2/vpc_managed_prefix_list_entry.go index dcb602411447..99560a11decd 100644 --- a/internal/service/ec2/vpc_managed_prefix_list_entry.go +++ b/internal/service/ec2/vpc_managed_prefix_list_entry.go @@ -71,7 +71,7 @@ func resourceManagedPrefixListEntryCreate(ctx context.Context, d *schema.Resourc addPrefixListEntry.Description = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { mutexKey := fmt.Sprintf("vpc-managed-prefix-list-%s", plID) conns.GlobalMutexKV.Lock(mutexKey) defer conns.GlobalMutexKV.Unlock(mutexKey) @@ -115,7 +115,7 @@ func resourceManagedPrefixListEntryRead(ctx context.Context, d *schema.ResourceD return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, managedPrefixListEntryCreateTimeout, func() (any, error) { + entry, err := tfresource.RetryWhenNewResourceNotFound(ctx, managedPrefixListEntryCreateTimeout, func(ctx context.Context) (*awstypes.PrefixListEntry, error) { return findManagedPrefixListEntryByIDAndCIDR(ctx, conn, plID, cidr) }, d.IsNewResource()) @@ -129,8 +129,6 @@ func resourceManagedPrefixListEntryRead(ctx context.Context, d *schema.ResourceD return sdkdiag.AppendErrorf(diags, "reading VPC Managed Prefix List Entry (%s): %s", d.Id(), err) } - entry := outputRaw.(*awstypes.PrefixListEntry) - d.Set("cidr", entry.Cidr) d.Set(names.AttrDescription, entry.Description) @@ -148,7 +146,7 @@ func resourceManagedPrefixListEntryDelete(ctx context.Context, d *schema.Resourc return sdkdiag.AppendFromErr(diags, err) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { mutexKey := fmt.Sprintf("vpc-managed-prefix-list-%s", plID) conns.GlobalMutexKV.Lock(mutexKey) defer conns.GlobalMutexKV.Unlock(mutexKey) diff --git a/internal/service/ec2/vpc_managed_prefix_list_test.go b/internal/service/ec2/vpc_managed_prefix_list_test.go index 6f4bb6894295..20b387018798 100644 --- a/internal/service/ec2/vpc_managed_prefix_list_test.go +++ b/internal/service/ec2/vpc_managed_prefix_list_test.go @@ -358,6 +358,41 @@ func TestAccVPCManagedPrefixList_tags(t *testing.T) { }) } +func TestAccVPCManagedPrefixList_descriptionOnlyChange(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ec2_managed_prefix_list.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckManagedPrefixList(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckManagedPrefixListDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCManagedPrefixListConfig_simpleDescriptionChange(rName, "old description"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccManagedPrefixListExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "entry.#", "1"), + ), + }, + { + // This reproduces the bug: change ONLY the description + // Before the fix, this would fail with "PrefixListVersionMismatch" + Config: testAccVPCManagedPrefixListConfig_simpleDescriptionChange(rName, "new description"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccManagedPrefixListExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "entry.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr": "1.0.0.0/8", + names.AttrDescription: "new description", + }), + ), + }, + }, + }) +} + func testAccCheckManagedPrefixListDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) @@ -556,3 +591,18 @@ resource "aws_ec2_managed_prefix_list" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } + +func testAccVPCManagedPrefixListConfig_simpleDescriptionChange(rName string, description string) string { + return fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list" "test" { + address_family = "IPv4" + max_entries = 1 + name = %[1]q + + entry { + cidr = "1.0.0.0/8" + description = %[2]q + } +} +`, rName, description) +} diff --git a/internal/service/ec2/vpc_nat_gateway.go b/internal/service/ec2/vpc_nat_gateway.go index 5e0b542b81eb..dc697d72cbef 100644 --- a/internal/service/ec2/vpc_nat_gateway.go +++ b/internal/service/ec2/vpc_nat_gateway.go @@ -82,6 +82,7 @@ func resourceNATGateway() *schema.Resource { "secondary_allocation_ids": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "secondary_private_ip_address_count": { @@ -258,7 +259,7 @@ func resourceNATGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta } } case awstypes.ConnectivityTypePublic: - if d.HasChanges("secondary_allocation_ids") { + if !d.GetRawConfig().GetAttr("secondary_allocation_ids").IsNull() && d.HasChanges("secondary_allocation_ids") { o, n := d.GetChange("secondary_allocation_ids") os, ns := o.(*schema.Set), n.(*schema.Set) @@ -368,14 +369,14 @@ func resourceNATGatewayCustomizeDiff(ctx context.Context, diff *schema.ResourceD if diff.Id() != "" && diff.HasChange("secondary_private_ip_address_count") { if v := diff.GetRawConfig().GetAttr("secondary_private_ip_address_count"); v.IsKnown() && !v.IsNull() { if err := diff.ForceNew("secondary_private_ip_address_count"); err != nil { - return fmt.Errorf("setting secondary_private_ip_address_count to ForceNew: %s", err) + return fmt.Errorf("setting secondary_private_ip_address_count to ForceNew: %w", err) } } } if diff.Id() != "" && diff.HasChange("secondary_private_ip_addresses") { if err := diff.SetNewComputed("secondary_private_ip_address_count"); err != nil { - return fmt.Errorf("setting secondary_private_ip_address_count to computed: %s", err) + return fmt.Errorf("setting secondary_private_ip_address_count to Computed: %w", err) } } case awstypes.ConnectivityTypePublic: @@ -383,14 +384,16 @@ func resourceNATGatewayCustomizeDiff(ctx context.Context, diff *schema.ResourceD return fmt.Errorf(`secondary_private_ip_address_count is not supported with connectivity_type = "%s"`, connectivityType) } - if diff.Id() != "" && diff.HasChange("secondary_allocation_ids") { - if err := diff.SetNewComputed("secondary_private_ip_address_count"); err != nil { - return fmt.Errorf("setting secondary_private_ip_address_count to computed: %s", err) - } + if diff.Id() != "" { + if v := diff.GetRawConfig().GetAttr("secondary_allocation_ids"); diff.HasChange("secondary_allocation_ids") || !v.IsWhollyKnown() { + if err := diff.SetNewComputed("secondary_private_ip_address_count"); err != nil { + return fmt.Errorf("setting secondary_private_ip_address_count to Computed: %w", err) + } - if v := diff.GetRawConfig().GetAttr("secondary_private_ip_addresses"); !v.IsKnown() || v.IsNull() { - if err := diff.SetNewComputed("secondary_private_ip_addresses"); err != nil { - return fmt.Errorf("setting secondary_private_ip_addresses to computed: %s", err) + if v := diff.GetRawConfig().GetAttr("secondary_private_ip_addresses"); !v.IsKnown() || v.IsNull() { + if err := diff.SetNewComputed("secondary_private_ip_addresses"); err != nil { + return fmt.Errorf("setting secondary_private_ip_addresses to Computed: %w", err) + } } } } diff --git a/internal/service/ec2/vpc_nat_gateway_eip_association.go b/internal/service/ec2/vpc_nat_gateway_eip_association.go new file mode 100644 index 000000000000..6119ad9464cd --- /dev/null +++ b/internal/service/ec2/vpc_nat_gateway_eip_association.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_nat_gateway_eip_association", name="VPC NAT Gateway EIP Association") +func newNATGatewayEIPAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &natGatewayEIPAssociationResource{} + + r.SetDefaultCreateTimeout(10 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +type natGatewayEIPAssociationResource struct { + framework.ResourceWithModel[natGatewayEIPAssociationResourceModel] + framework.WithNoUpdate + framework.WithTimeouts +} + +func (r *natGatewayEIPAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "allocation_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrAssociationID: schema.StringAttribute{ + Computed: true, + }, + "nat_gateway_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), + }, + } +} + +func (r *natGatewayEIPAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data natGatewayEIPAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().EC2Client(ctx) + + natGatewayID, allocationID := fwflex.StringValueFromFramework(ctx, data.NATGatewayID), fwflex.StringValueFromFramework(ctx, data.AllocationID) + input := ec2.AssociateNatGatewayAddressInput{ + AllocationIds: []string{allocationID}, + NatGatewayId: aws.String(natGatewayID), + } + + _, err := conn.AssociateNatGatewayAddress(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating VPC NAT Gateway (%s) EIP (%s) Association", natGatewayID, allocationID), err.Error()) + + return + } + + output, err := waitNATGatewayAddressAssociated(ctx, conn, natGatewayID, allocationID, r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for VPC NAT Gateway (%s) EIP (%s) Association create", natGatewayID, allocationID), err.Error()) + + return + } + + // Set values for unknowns. + data.AssociationID = fwflex.StringToFramework(ctx, output.AssociationId) + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *natGatewayEIPAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data natGatewayEIPAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().EC2Client(ctx) + + natGatewayID, allocationID := fwflex.StringValueFromFramework(ctx, data.NATGatewayID), fwflex.StringValueFromFramework(ctx, data.AllocationID) + output, err := findNATGatewayAddressByNATGatewayIDAndAllocationIDSucceeded(ctx, conn, natGatewayID, allocationID) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading VPC NAT Gateway (%s) EIP (%s) Association", natGatewayID, allocationID), err.Error()) + + return + } + + // Set attributes for import. + data.AssociationID = fwflex.StringToFramework(ctx, output.AssociationId) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *natGatewayEIPAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data natGatewayEIPAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().EC2Client(ctx) + + natGatewayID, allocationID, associationID := fwflex.StringValueFromFramework(ctx, data.NATGatewayID), fwflex.StringValueFromFramework(ctx, data.AllocationID), fwflex.StringValueFromFramework(ctx, data.AssociationID) + input := ec2.DisassociateNatGatewayAddressInput{ + AssociationIds: []string{associationID}, + NatGatewayId: aws.String(natGatewayID), + } + _, err := conn.DisassociateNatGatewayAddress(ctx, &input) + + if tfawserr.ErrCodeEquals(err, errCodeInvalidParameter) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting VPC NAT Gateway (%s) EIP (%s) Association (%s)", natGatewayID, allocationID, associationID), err.Error()) + + return + } + + if _, err := waitNATGatewayAddressDisassociated(ctx, conn, data.NATGatewayID.ValueString(), data.AllocationID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for VPC NAT Gateway (%s) EIP (%s) Association delete", natGatewayID, allocationID), err.Error()) + + return + } +} + +func (r *natGatewayEIPAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + natGatewayEIPAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, natGatewayEIPAssociationIDParts, true) + + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("nat_gateway_id"), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("allocation_id"), parts[1])...) +} + +type natGatewayEIPAssociationResourceModel struct { + framework.WithRegionModel + AllocationID types.String `tfsdk:"allocation_id"` + AssociationID types.String `tfsdk:"association_id"` + NATGatewayID types.String `tfsdk:"nat_gateway_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} diff --git a/internal/service/ec2/vpc_nat_gateway_eip_association_test.go b/internal/service/ec2/vpc_nat_gateway_eip_association_test.go new file mode 100644 index 000000000000..7ae72f0eff60 --- /dev/null +++ b/internal/service/ec2/vpc_nat_gateway_eip_association_test.go @@ -0,0 +1,166 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCNATGatewayEIPAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var v types.NatGatewayAddress + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_nat_gateway_eip_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCNATGatewayEIPAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCNATGatewayEIPAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCNATGatewayEIPAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAssociationID), knownvalue.NotNull()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "nat_gateway_id", + ImportStateIdFunc: testAccVPCNATGatewayEIPAssociationImportStateIDFunc(resourceName), + }, + }, + }) +} + +func TestAccVPCNATGatewayEIPAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var v types.NatGatewayAddress + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_nat_gateway_eip_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCNATGatewayEIPAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCNATGatewayEIPAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCNATGatewayEIPAssociationExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfec2.ResourceNATGatewayEIPAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckVPCNATGatewayEIPAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_nat_gateway_eip_association" { + continue + } + + _, err := tfec2.FindNATGatewayAddressByNATGatewayIDAndAllocationIDSucceeded(ctx, conn, rs.Primary.Attributes["nat_gateway_id"], rs.Primary.Attributes["allocation_id"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("VPC NAT Gateway %s EIP %s Association still exists", rs.Primary.Attributes["nat_gateway_id"], rs.Primary.Attributes["allocation_id"]) + } + + return nil + } +} + +func testAccCheckVPCNATGatewayEIPAssociationExists(ctx context.Context, n string, v *types.NatGatewayAddress) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + output, err := tfec2.FindNATGatewayAddressByNATGatewayIDAndAllocationIDSucceeded(ctx, conn, rs.Primary.Attributes["nat_gateway_id"], rs.Primary.Attributes["allocation_id"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccVPCNATGatewayEIPAssociationImportStateIDFunc(n string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[n] + if !ok { + return "", fmt.Errorf("Not Found: %s", n) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["nat_gateway_id"], rs.Primary.Attributes["allocation_id"]), nil + } +} + +func testAccVPCNATGatewayEIPAssociationConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccVPCNATGatewayConfig_basic(rName), fmt.Sprintf(` +resource "aws_eip" "secondary" { + domain = "vpc" + + tags = { + Name = %[1]q + } +} + +resource "aws_nat_gateway_eip_association" "test" { + allocation_id = aws_eip.secondary.id + nat_gateway_id = aws_nat_gateway.test.id +} +`, rName)) +} diff --git a/internal/service/ec2/vpc_nat_gateway_test.go b/internal/service/ec2/vpc_nat_gateway_test.go index eb2b2950c71c..00fec123e5eb 100644 --- a/internal/service/ec2/vpc_nat_gateway_test.go +++ b/internal/service/ec2/vpc_nat_gateway_test.go @@ -281,6 +281,66 @@ func TestAccVPCNATGateway_secondaryAllocationIDs(t *testing.T) { }) } +func TestAccVPCNATGateway_addSecondaryAllocationIDs(t *testing.T) { + ctx := acctest.Context(t) + var natGateway awstypes.NatGateway + resourceName := "aws_nat_gateway.test" + eipResourceName := "aws_eip.secondary" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckNATGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCNATGatewayConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNATGatewayExists(ctx, resourceName, &natGateway), + resource.TestCheckResourceAttr(resourceName, "secondary_allocation_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "secondary_private_ip_address_count", "0"), + resource.TestCheckResourceAttr(resourceName, "secondary_private_ip_addresses.#", "0"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + Config: testAccVPCNATGatewayConfig_secondaryAllocationIDs(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNATGatewayExists(ctx, resourceName, &natGateway), + resource.TestCheckResourceAttr(resourceName, "secondary_allocation_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "secondary_allocation_ids.*", eipResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "secondary_private_ip_address_count", "1"), + resource.TestCheckResourceAttr(resourceName, "secondary_private_ip_addresses.#", "1"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + Config: testAccVPCNATGatewayConfig_secondaryAllocationIDs(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNATGatewayExists(ctx, resourceName, &natGateway), + resource.TestCheckResourceAttr(resourceName, "secondary_allocation_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "secondary_private_ip_address_count", "0"), + resource.TestCheckResourceAttr(resourceName, "secondary_private_ip_addresses.#", "0"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + func TestAccVPCNATGateway_secondaryPrivateIPAddressCount(t *testing.T) { ctx := acctest.Context(t) var natGateway awstypes.NatGateway @@ -691,7 +751,7 @@ resource "aws_eip" "secondary" { resource "aws_nat_gateway" "test" { allocation_id = aws_eip.test.id subnet_id = aws_subnet.public.id - secondary_allocation_ids = %[2]t ? [aws_eip.secondary.id] : null + secondary_allocation_ids = %[2]t ? [aws_eip.secondary.id] : [] tags = { Name = %[1]q @@ -731,8 +791,8 @@ resource "aws_eip" "secondary" { resource "aws_nat_gateway" "test" { allocation_id = aws_eip.test.id subnet_id = aws_subnet.private.id - secondary_allocation_ids = %[2]t ? [aws_eip.secondary.id] : null - secondary_private_ip_addresses = %[2]t ? ["10.0.1.5"] : null + secondary_allocation_ids = %[2]t ? [aws_eip.secondary.id] : [] + secondary_private_ip_addresses = %[2]t ? ["10.0.1.5"] : [] tags = { Name = %[1]q diff --git a/internal/service/ec2/vpc_network_acl.go b/internal/service/ec2/vpc_network_acl.go index e96e4c8b4f7a..4c59f0b906a0 100644 --- a/internal/service/ec2/vpc_network_acl.go +++ b/internal/service/ec2/vpc_network_acl.go @@ -12,7 +12,6 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -192,9 +191,10 @@ func resourceNetworkACLCreate(ctx context.Context, d *schema.ResourceData, meta func resourceNetworkACLRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + nacl, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.NetworkAcl, error) { return findNetworkACLByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -208,17 +208,8 @@ func resourceNetworkACLRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "reading EC2 Network ACL (%s): %s", d.Id(), err) } - nacl := outputRaw.(*awstypes.NetworkAcl) - ownerID := aws.ToString(nacl.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("network-acl/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, networkACLARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrOwnerID, ownerID) var subnetIDs []string @@ -297,7 +288,7 @@ func resourceNetworkACLDelete(ctx context.Context, d *schema.ResourceData, meta } log.Printf("[INFO] Deleting EC2 Network ACL: %s", d.Id()) - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteNetworkAcl(ctx, input) }, errCodeDependencyViolation) @@ -831,3 +822,7 @@ var ( }) ianaProtocolIToA = ianaProtocolAToI.invert() ) + +func networkACLARN(ctx context.Context, c *conns.AWSClient, accountID, networkACLID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "network-acl/"+networkACLID) +} diff --git a/internal/service/ec2/vpc_network_acl_association.go b/internal/service/ec2/vpc_network_acl_association.go index 68259f4af755..4122ab09eded 100644 --- a/internal/service/ec2/vpc_network_acl_association.go +++ b/internal/service/ec2/vpc_network_acl_association.go @@ -64,7 +64,7 @@ func resourceNetworkACLAssociationRead(ctx context.Context, d *schema.ResourceDa var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + association, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.NetworkAclAssociation, error) { return findNetworkACLAssociationByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -78,8 +78,6 @@ func resourceNetworkACLAssociationRead(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "reading EC2 Network ACL Association (%s): %s", d.Id(), err) } - association := outputRaw.(*awstypes.NetworkAclAssociation) - d.Set("network_acl_id", association.NetworkAclId) d.Set(names.AttrSubnetID, association.SubnetId) @@ -135,7 +133,7 @@ func networkACLAssociationCreate(ctx context.Context, conn *ec2.Client, naclID, } log.Printf("[DEBUG] Creating EC2 Network ACL Association: %#v", input) - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return conn.ReplaceNetworkAclAssociation(ctx, input) }, errCodeInvalidAssociationIDNotFound) diff --git a/internal/service/ec2/vpc_network_acl_rule.go b/internal/service/ec2/vpc_network_acl_rule.go index e3301bcb99f8..0976af3a04e6 100644 --- a/internal/service/ec2/vpc_network_acl_rule.go +++ b/internal/service/ec2/vpc_network_acl_rule.go @@ -195,7 +195,7 @@ func resourceNetworkACLRuleRead(ctx context.Context, d *schema.ResourceData, met naclID := d.Get("network_acl_id").(string) ruleNumber := d.Get("rule_number").(int) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + naclEntry, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.NetworkAclEntry, error) { return findNetworkACLEntryByThreePartKey(ctx, conn, naclID, egress, ruleNumber) }, d.IsNewResource()) @@ -209,8 +209,6 @@ func resourceNetworkACLRuleRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EC2 Network ACL Rule (%s): %s", d.Id(), err) } - naclEntry := outputRaw.(*awstypes.NetworkAclEntry) - d.Set(names.AttrCIDRBlock, naclEntry.CidrBlock) d.Set("egress", naclEntry.Egress) d.Set("ipv6_cidr_block", naclEntry.Ipv6CidrBlock) diff --git a/internal/service/ec2/vpc_network_insights_path.go b/internal/service/ec2/vpc_network_insights_path.go index 83ce6f4cb29c..63b9561e418c 100644 --- a/internal/service/ec2/vpc_network_insights_path.go +++ b/internal/service/ec2/vpc_network_insights_path.go @@ -265,7 +265,7 @@ func resourceNetworkInsightsPathDelete(ctx context.Context, d *schema.ResourceDa input := ec2.DeleteNetworkInsightsPathInput{ NetworkInsightsPathId: aws.String(d.Id()), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteNetworkInsightsPath(ctx, &input) }, errCodeAnalysisExistsForNetworkInsightsPath) diff --git a/internal/service/ec2/vpc_network_interface.go b/internal/service/ec2/vpc_network_interface.go index 06cd0944dbe3..0a91573d0942 100644 --- a/internal/service/ec2/vpc_network_interface.go +++ b/internal/service/ec2/vpc_network_interface.go @@ -11,15 +11,14 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -69,6 +68,11 @@ func resourceNetworkInterface() *schema.Resource { Type: schema.TypeString, Required: true, }, + "network_card_index": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, }, }, }, @@ -86,7 +90,7 @@ func resourceNetworkInterface() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateDiagFunc: enum.Validate[types.NetworkInterfaceCreationType](), + ValidateDiagFunc: enum.Validate[awstypes.NetworkInterfaceCreationType](), }, "ipv4_prefixes": { Type: schema.TypeSet, @@ -349,9 +353,8 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, ipv4PrefixesSpecified := false ipv6PrefixesSpecified := false - - input := &ec2.CreateNetworkInterfaceInput{ - ClientToken: aws.String(id.UniqueId()), + input := ec2.CreateNetworkInterfaceInput{ + ClientToken: aws.String(sdkid.UniqueId()), SubnetId: aws.String(d.Get(names.AttrSubnetID).(string)), } @@ -364,7 +367,7 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("interface_type"); ok { - input.InterfaceType = types.NetworkInterfaceCreationType(v.(string)) + input.InterfaceType = awstypes.NetworkInterfaceCreationType(v.(string)) } if v, ok := d.GetOk("ipv4_prefixes"); ok && v.(*schema.Set).Len() > 0 { @@ -432,10 +435,10 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, // If IPv4 or IPv6 prefixes are specified, tag after create. // Otherwise "An error occurred (InternalError) when calling the CreateNetworkInterface operation". if !(ipv4PrefixesSpecified || ipv6PrefixesSpecified) { - input.TagSpecifications = getTagSpecificationsIn(ctx, types.ResourceTypeNetworkInterface) + input.TagSpecifications = getTagSpecificationsIn(ctx, awstypes.ResourceTypeNetworkInterface) } - output, err := conn.CreateNetworkInterface(ctx, input) + output, err := conn.CreateNetworkInterface(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EC2 Network Interface: %s", err) @@ -453,12 +456,12 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, totalPrivateIPs := v.(*schema.Set).Len() if privateIPsCount, ok := d.GetOk("private_ips_count"); ok { if privateIPsCount.(int)+1 > totalPrivateIPs { - input := &ec2.AssignPrivateIpAddressesInput{ + input := ec2.AssignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), SecondaryPrivateIpAddressCount: aws.Int32(int32(privateIPsCount.(int) + 1 - totalPrivateIPs)), } - _, err := conn.AssignPrivateIpAddresses(ctx, input) + _, err := conn.AssignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -476,12 +479,12 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, // Default value is enabled. if !d.Get("source_dest_check").(bool) { - input := &ec2.ModifyNetworkInterfaceAttributeInput{ + input := ec2.ModifyNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String(d.Id()), - SourceDestCheck: &types.AttributeBooleanValue{Value: aws.Bool(false)}, + SourceDestCheck: &awstypes.AttributeBooleanValue{Value: aws.Bool(false)}, } - _, err := conn.ModifyNetworkInterfaceAttribute(ctx, input) + _, err := conn.ModifyNetworkInterfaceAttribute(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying EC2 Network Interface (%s) SourceDestCheck: %s", d.Id(), err) @@ -490,8 +493,19 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, if v, ok := d.GetOk("attachment"); ok && v.(*schema.Set).Len() > 0 { attachment := v.(*schema.Set).List()[0].(map[string]any) + input := ec2.AttachNetworkInterfaceInput{ + NetworkInterfaceId: aws.String(d.Id()), + InstanceId: aws.String(attachment["instance"].(string)), + DeviceIndex: aws.Int32(int32(attachment["device_index"].(int))), + } + + if v, ok := attachment["network_card_index"]; ok { + if v, ok := v.(int); ok { + input.NetworkCardIndex = aws.Int32(int32(v)) + } + } - _, err := attachNetworkInterface(ctx, conn, d.Id(), attachment["instance"].(string), attachment["device_index"].(int), networkInterfaceAttachedTimeout) + _, err := attachNetworkInterface(ctx, conn, &input) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -503,9 +517,10 @@ func resourceNetworkInterfaceCreate(ctx context.Context, d *schema.ResourceData, func resourceNetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + eni, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.NetworkInterface, error) { return findNetworkInterfaceByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -519,17 +534,8 @@ func resourceNetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading EC2 Network Interface (%s): %s", d.Id(), err) } - eni := outputRaw.(*types.NetworkInterface) - ownerID := aws.ToString(eni.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "ec2", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: "network-interface/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, networkInterfaceARN(ctx, c, ownerID, d.Id())) if eni.Attachment != nil { if err := d.Set("attachment", []any{flattenNetworkInterfaceAttachment(eni.Attachment)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting attachment: %s", err) @@ -600,8 +606,18 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if na != nil && na.(*schema.Set).Len() > 0 { attachment := na.(*schema.Set).List()[0].(map[string]any) + input := ec2.AttachNetworkInterfaceInput{ + NetworkInterfaceId: aws.String(d.Id()), + InstanceId: aws.String(attachment["instance"].(string)), + DeviceIndex: aws.Int32(int32(attachment["device_index"].(int))), + } + if v, ok := attachment["network_card_index"]; ok { + if v, ok := v.(int); ok { + input.NetworkCardIndex = aws.Int32(int32(v)) + } + } - if _, err := attachNetworkInterface(ctx, conn, d.Id(), attachment["instance"].(string), attachment["device_index"].(int), networkInterfaceAttachedTimeout); err != nil { + if _, err := attachNetworkInterface(ctx, conn, &input); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -615,19 +631,18 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if n == nil { n = new(schema.Set) } - os := o.(*schema.Set) ns := n.(*schema.Set) // Unassign old IP addresses. unassignIPs := os.Difference(ns) if unassignIPs.Len() != 0 { - input := &ec2.UnassignPrivateIpAddressesInput{ + input := ec2.UnassignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), PrivateIpAddresses: flex.ExpandStringValueSet(unassignIPs), } - _, err := conn.UnassignPrivateIpAddresses(ctx, input) + _, err := conn.UnassignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -639,12 +654,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, // Assign new IP addresses. assignIPs := ns.Difference(os) if assignIPs.Len() != 0 { - input := &ec2.AssignPrivateIpAddressesInput{ + input := ec2.AssignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), PrivateIpAddresses: flex.ExpandStringValueSet(assignIPs), } - _, err := conn.AssignPrivateIpAddresses(ctx, input) + _, err := conn.AssignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -661,6 +676,7 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if n == nil { n = make([]string, 0) } + if len(o.([]any))-1 > 0 { privateIPsToUnassign := make([]any, len(o.([]any))-1) idx := 0 @@ -674,12 +690,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, } // Unassign the secondary IP addresses - input := &ec2.UnassignPrivateIpAddressesInput{ + input := ec2.UnassignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), PrivateIpAddresses: flex.ExpandStringValueList(privateIPsToUnassign), } - _, err := conn.UnassignPrivateIpAddresses(ctx, input) + _, err := conn.UnassignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -694,12 +710,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, } privateIPToAssign := []any{ip} - input := &ec2.AssignPrivateIpAddressesInput{ + input := ec2.AssignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), PrivateIpAddresses: flex.ExpandStringValueList(privateIPToAssign), } - _, err := conn.AssignPrivateIpAddresses(ctx, input) + _, err := conn.AssignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -721,23 +737,23 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if o != nil && n != nil && n != len(privateIPsFiltered) { if diff := n.(int) - o.(int) - privateIPsNetChange; diff > 0 { - input := &ec2.AssignPrivateIpAddressesInput{ + input := ec2.AssignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), SecondaryPrivateIpAddressCount: aws.Int32(int32(diff)), } - _, err := conn.AssignPrivateIpAddresses(ctx, input) + _, err := conn.AssignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) } } else if diff < 0 { - input := &ec2.UnassignPrivateIpAddressesInput{ + input := ec2.UnassignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), PrivateIpAddresses: flex.ExpandStringValueList(privateIPsFiltered[0:-diff]), } - _, err := conn.UnassignPrivateIpAddresses(ctx, input) + _, err := conn.UnassignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -752,23 +768,23 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if o, n := o.(int), n.(int); n != len(ipv4Prefixes) { if diff := n - o; diff > 0 { - input := &ec2.AssignPrivateIpAddressesInput{ + input := ec2.AssignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv4PrefixCount: aws.Int32(int32(diff)), } - _, err := conn.AssignPrivateIpAddresses(ctx, input) + _, err := conn.AssignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) } } else if diff < 0 { - input := &ec2.UnassignPrivateIpAddressesInput{ + input := ec2.UnassignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv4Prefixes: flex.ExpandStringValueList(ipv4Prefixes[0:-diff]), } - _, err := conn.UnassignPrivateIpAddresses(ctx, input) + _, err := conn.UnassignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -785,19 +801,18 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if n == nil { n = new(schema.Set) } - os := o.(*schema.Set) ns := n.(*schema.Set) // Unassign old IPV4 prefixes. unassignPrefixes := os.Difference(ns) if unassignPrefixes.Len() != 0 { - input := &ec2.UnassignPrivateIpAddressesInput{ + input := ec2.UnassignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv4Prefixes: flex.ExpandStringValueSet(unassignPrefixes), } - _, err := conn.UnassignPrivateIpAddresses(ctx, input) + _, err := conn.UnassignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -807,12 +822,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, // Assign new IPV4 prefixes, assignPrefixes := ns.Difference(os) if assignPrefixes.Len() != 0 { - input := &ec2.AssignPrivateIpAddressesInput{ + input := ec2.AssignPrivateIpAddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv4Prefixes: flex.ExpandStringValueSet(assignPrefixes), } - _, err := conn.AssignPrivateIpAddresses(ctx, input) + _, err := conn.AssignPrivateIpAddresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv4 addresses: %s", d.Id(), err) @@ -821,12 +836,13 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("enable_primary_ipv6") { - input := &ec2.ModifyNetworkInterfaceAttributeInput{ + input := ec2.ModifyNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String(d.Id()), EnablePrimaryIpv6: aws.Bool(d.Get("enable_primary_ipv6").(bool)), } - _, err := conn.ModifyNetworkInterfaceAttribute(ctx, input) + _, err := conn.ModifyNetworkInterfaceAttribute(ctx, &input) + if err != nil { return sdkdiag.AppendErrorf(diags, "modifying EC2 Network Interface (%s) enable primary IPv6: %s", d.Id(), err) } @@ -840,19 +856,18 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if n == nil { n = new(schema.Set) } - os := o.(*schema.Set) ns := n.(*schema.Set) // Unassign old IPV6 addresses. unassignIPs := os.Difference(ns) if unassignIPs.Len() != 0 { - input := &ec2.UnassignIpv6AddressesInput{ + input := ec2.UnassignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Addresses: flex.ExpandStringValueSet(unassignIPs), } - _, err := conn.UnassignIpv6Addresses(ctx, input) + _, err := conn.UnassignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) @@ -862,12 +877,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, // Assign new IPV6 addresses, assignIPs := ns.Difference(os) if assignIPs.Len() != 0 { - input := &ec2.AssignIpv6AddressesInput{ + input := ec2.AssignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Addresses: flex.ExpandStringValueSet(assignIPs), } - _, err := conn.AssignIpv6Addresses(ctx, input) + _, err := conn.AssignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) @@ -881,23 +896,23 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if o != nil && n != nil && n != len(ipv6Addresses) { if diff := n.(int) - o.(int); diff > 0 { - input := &ec2.AssignIpv6AddressesInput{ + input := ec2.AssignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6AddressCount: aws.Int32(int32(diff)), } - _, err := conn.AssignIpv6Addresses(ctx, input) + _, err := conn.AssignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) } } else if diff < 0 { - input := &ec2.UnassignIpv6AddressesInput{ + input := ec2.UnassignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Addresses: flex.ExpandStringValueList(ipv6Addresses[0:-diff]), } - _, err := conn.UnassignIpv6Addresses(ctx, input) + _, err := conn.UnassignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) @@ -920,12 +935,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, unassignIPs := make([]any, len(o.([]any))) copy(unassignIPs, o.([]any)) - input := &ec2.UnassignIpv6AddressesInput{ + input := ec2.UnassignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Addresses: flex.ExpandStringValueList(unassignIPs), } - _, err := conn.UnassignIpv6Addresses(ctx, input) + _, err := conn.UnassignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) private IPv6 addresses: %s", d.Id(), err) @@ -936,12 +951,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, for _, ip := range n.([]any) { privateIPToAssign := []any{ip} - input := &ec2.AssignIpv6AddressesInput{ + input := ec2.AssignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Addresses: flex.ExpandStringValueList(privateIPToAssign), } - _, err := conn.AssignIpv6Addresses(ctx, input) + _, err := conn.AssignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) private IPv6 addresses: %s", d.Id(), err) @@ -964,12 +979,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, // Unassign old IPV6 prefixes. unassignPrefixes := os.Difference(ns) if unassignPrefixes.Len() != 0 { - input := &ec2.UnassignIpv6AddressesInput{ + input := ec2.UnassignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Prefixes: flex.ExpandStringValueSet(unassignPrefixes), } - _, err := conn.UnassignIpv6Addresses(ctx, input) + _, err := conn.UnassignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) @@ -979,12 +994,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, // Assign new IPV6 prefixes, assignPrefixes := ns.Difference(os) if assignPrefixes.Len() != 0 { - input := &ec2.AssignIpv6AddressesInput{ + input := ec2.AssignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Prefixes: flex.ExpandStringValueSet(assignPrefixes), } - _, err := conn.AssignIpv6Addresses(ctx, input) + _, err := conn.AssignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) @@ -998,23 +1013,23 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, if o, n := o.(int), n.(int); n != len(ipv6Prefixes) { if diff := n - o; diff > 0 { - input := &ec2.AssignIpv6AddressesInput{ + input := ec2.AssignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6PrefixCount: aws.Int32(int32(diff)), } - _, err := conn.AssignIpv6Addresses(ctx, input) + _, err := conn.AssignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) } } else if diff < 0 { - input := &ec2.UnassignIpv6AddressesInput{ + input := ec2.UnassignIpv6AddressesInput{ NetworkInterfaceId: aws.String(d.Id()), Ipv6Prefixes: flex.ExpandStringValueList(ipv6Prefixes[0:-diff]), } - _, err := conn.UnassignIpv6Addresses(ctx, input) + _, err := conn.UnassignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "unassigning EC2 Network Interface (%s) IPv6 addresses: %s", d.Id(), err) @@ -1024,12 +1039,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("source_dest_check") { - input := &ec2.ModifyNetworkInterfaceAttributeInput{ + input := ec2.ModifyNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String(d.Id()), - SourceDestCheck: &types.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))}, + SourceDestCheck: &awstypes.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))}, } - _, err := conn.ModifyNetworkInterfaceAttribute(ctx, input) + _, err := conn.ModifyNetworkInterfaceAttribute(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying EC2 Network Interface (%s) SourceDestCheck: %s", d.Id(), err) @@ -1037,12 +1052,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange(names.AttrSecurityGroups) { - input := &ec2.ModifyNetworkInterfaceAttributeInput{ + input := ec2.ModifyNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String(d.Id()), Groups: flex.ExpandStringValueSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), } - _, err := conn.ModifyNetworkInterfaceAttribute(ctx, input) + _, err := conn.ModifyNetworkInterfaceAttribute(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying EC2 Network Interface (%s) Groups: %s", d.Id(), err) @@ -1050,12 +1065,12 @@ func resourceNetworkInterfaceUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange(names.AttrDescription) { - input := &ec2.ModifyNetworkInterfaceAttributeInput{ + input := ec2.ModifyNetworkInterfaceAttributeInput{ NetworkInterfaceId: aws.String(d.Id()), - Description: &types.AttributeValue{Value: aws.String(d.Get(names.AttrDescription).(string))}, + Description: &awstypes.AttributeValue{Value: aws.String(d.Get(names.AttrDescription).(string))}, } - _, err := conn.ModifyNetworkInterfaceAttribute(ctx, input) + _, err := conn.ModifyNetworkInterfaceAttribute(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying EC2 Network Interface (%s) Description: %s", d.Id(), err) @@ -1080,26 +1095,21 @@ func resourceNetworkInterfaceDelete(ctx context.Context, d *schema.ResourceData, if err := deleteNetworkInterface(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } + return diags } -func attachNetworkInterface(ctx context.Context, conn *ec2.Client, networkInterfaceID, instanceID string, deviceIndex int, timeout time.Duration) (string, error) { - input := &ec2.AttachNetworkInterfaceInput{ - DeviceIndex: aws.Int32(int32(deviceIndex)), - InstanceId: aws.String(instanceID), - NetworkInterfaceId: aws.String(networkInterfaceID), - } - +func attachNetworkInterface(ctx context.Context, conn *ec2.Client, input *ec2.AttachNetworkInterfaceInput) (string, error) { output, err := conn.AttachNetworkInterface(ctx, input) if err != nil { - return "", fmt.Errorf("attaching EC2 Network Interface (%s/%s): %w", networkInterfaceID, instanceID, err) + return "", fmt.Errorf("attaching EC2 Network Interface (%s/%s): %w", aws.ToString(input.NetworkInterfaceId), aws.ToString(input.InstanceId), err) } attachmentID := aws.ToString(output.AttachmentId) - if _, err := waitNetworkInterfaceAttached(ctx, conn, attachmentID, timeout); err != nil { - return "", fmt.Errorf("waiting for EC2 Network Interface (%s/%s) attach: %w", networkInterfaceID, instanceID, err) + if _, err := waitNetworkInterfaceAttached(ctx, conn, attachmentID, networkInterfaceAttachedTimeout); err != nil { + return "", fmt.Errorf("waiting for EC2 Network Interface (%s/%s) attach: %w", aws.ToString(input.NetworkInterfaceId), aws.ToString(input.InstanceId), err) } return attachmentID, nil @@ -1156,7 +1166,7 @@ func detachNetworkInterface(ctx context.Context, conn *ec2.Client, networkInterf return nil } -func flattenNetworkInterfaceAssociation(apiObject *types.NetworkInterfaceAssociation) map[string]any { +func flattenNetworkInterfaceAssociation(apiObject *awstypes.NetworkInterfaceAssociation) map[string]any { if apiObject == nil { return nil } @@ -1194,7 +1204,7 @@ func flattenNetworkInterfaceAssociation(apiObject *types.NetworkInterfaceAssocia return tfMap } -func flattenNetworkInterfaceAttachment(apiObject *types.NetworkInterfaceAttachment) map[string]any { +func flattenNetworkInterfaceAttachment(apiObject *awstypes.NetworkInterfaceAttachment) map[string]any { if apiObject == nil { return nil } @@ -1213,27 +1223,31 @@ func flattenNetworkInterfaceAttachment(apiObject *types.NetworkInterfaceAttachme tfMap["instance"] = aws.ToString(v) } + if v := apiObject.NetworkCardIndex; v != nil { + tfMap["network_card_index"] = aws.ToInt32(v) + } + return tfMap } -func expandPrivateIPAddressSpecification(tfString string) *types.PrivateIpAddressSpecification { +func expandPrivateIPAddressSpecification(tfString string) *awstypes.PrivateIpAddressSpecification { if tfString == "" { return nil } - apiObject := &types.PrivateIpAddressSpecification{ + apiObject := &awstypes.PrivateIpAddressSpecification{ PrivateIpAddress: aws.String(tfString), } return apiObject } -func expandPrivateIPAddressSpecifications(tfList []any) []types.PrivateIpAddressSpecification { +func expandPrivateIPAddressSpecifications(tfList []any) []awstypes.PrivateIpAddressSpecification { if len(tfList) == 0 { return nil } - var apiObjects []types.PrivateIpAddressSpecification + var apiObjects []awstypes.PrivateIpAddressSpecification for i, tfMapRaw := range tfList { tfString, ok := tfMapRaw.(string) @@ -1258,24 +1272,24 @@ func expandPrivateIPAddressSpecifications(tfList []any) []types.PrivateIpAddress return apiObjects } -func expandInstanceIPv6Address(tfString string) *types.InstanceIpv6Address { +func expandInstanceIPv6Address(tfString string) *awstypes.InstanceIpv6Address { if tfString == "" { return nil } - apiObject := &types.InstanceIpv6Address{ + apiObject := &awstypes.InstanceIpv6Address{ Ipv6Address: aws.String(tfString), } return apiObject } -func expandInstanceIPv6Addresses(tfList []any) []types.InstanceIpv6Address { +func expandInstanceIPv6Addresses(tfList []any) []awstypes.InstanceIpv6Address { if len(tfList) == 0 { return nil } - var apiObjects []types.InstanceIpv6Address + var apiObjects []awstypes.InstanceIpv6Address for _, tfMapRaw := range tfList { tfString, ok := tfMapRaw.(string) @@ -1296,7 +1310,7 @@ func expandInstanceIPv6Addresses(tfList []any) []types.InstanceIpv6Address { return apiObjects } -func flattenNetworkInterfacePrivateIPAddress(apiObject *types.NetworkInterfacePrivateIpAddress) string { +func flattenNetworkInterfacePrivateIPAddress(apiObject *awstypes.NetworkInterfacePrivateIpAddress) string { if apiObject == nil { return "" } @@ -1310,7 +1324,7 @@ func flattenNetworkInterfacePrivateIPAddress(apiObject *types.NetworkInterfacePr return tfString } -func flattenNetworkInterfacePrivateIPAddresses(apiObjects []types.NetworkInterfacePrivateIpAddress) []string { +func flattenNetworkInterfacePrivateIPAddresses(apiObjects []awstypes.NetworkInterfacePrivateIpAddress) []string { if len(apiObjects) == 0 { return nil } @@ -1324,7 +1338,7 @@ func flattenNetworkInterfacePrivateIPAddresses(apiObjects []types.NetworkInterfa return tfList } -func flattenNetworkInterfaceIPv6Address(apiObject *types.NetworkInterfaceIpv6Address) string { +func flattenNetworkInterfaceIPv6Address(apiObject *awstypes.NetworkInterfaceIpv6Address) string { if apiObject == nil { return "" } @@ -1338,7 +1352,7 @@ func flattenNetworkInterfaceIPv6Address(apiObject *types.NetworkInterfaceIpv6Add return tfString } -func flattenNetworkInterfaceIPv6Addresses(apiObjects []types.NetworkInterfaceIpv6Address) []string { +func flattenNetworkInterfaceIPv6Addresses(apiObjects []awstypes.NetworkInterfaceIpv6Address) []string { if len(apiObjects) == 0 { return nil } @@ -1352,24 +1366,24 @@ func flattenNetworkInterfaceIPv6Addresses(apiObjects []types.NetworkInterfaceIpv return tfList } -func expandIPv4PrefixSpecificationRequest(tfString string) *types.Ipv4PrefixSpecificationRequest { +func expandIPv4PrefixSpecificationRequest(tfString string) *awstypes.Ipv4PrefixSpecificationRequest { if tfString == "" { return nil } - apiObject := &types.Ipv4PrefixSpecificationRequest{ + apiObject := &awstypes.Ipv4PrefixSpecificationRequest{ Ipv4Prefix: aws.String(tfString), } return apiObject } -func expandIPv4PrefixSpecificationRequests(tfList []any) []types.Ipv4PrefixSpecificationRequest { +func expandIPv4PrefixSpecificationRequests(tfList []any) []awstypes.Ipv4PrefixSpecificationRequest { if len(tfList) == 0 { return nil } - var apiObjects []types.Ipv4PrefixSpecificationRequest + var apiObjects []awstypes.Ipv4PrefixSpecificationRequest for _, tfMapRaw := range tfList { tfString, ok := tfMapRaw.(string) @@ -1390,24 +1404,24 @@ func expandIPv4PrefixSpecificationRequests(tfList []any) []types.Ipv4PrefixSpeci return apiObjects } -func expandIPv6PrefixSpecificationRequest(tfString string) *types.Ipv6PrefixSpecificationRequest { +func expandIPv6PrefixSpecificationRequest(tfString string) *awstypes.Ipv6PrefixSpecificationRequest { if tfString == "" { return nil } - apiObject := &types.Ipv6PrefixSpecificationRequest{ + apiObject := &awstypes.Ipv6PrefixSpecificationRequest{ Ipv6Prefix: aws.String(tfString), } return apiObject } -func expandIPv6PrefixSpecificationRequests(tfList []any) []types.Ipv6PrefixSpecificationRequest { +func expandIPv6PrefixSpecificationRequests(tfList []any) []awstypes.Ipv6PrefixSpecificationRequest { if len(tfList) == 0 { return nil } - var apiObjects []types.Ipv6PrefixSpecificationRequest + var apiObjects []awstypes.Ipv6PrefixSpecificationRequest for _, tfMapRaw := range tfList { tfString, ok := tfMapRaw.(string) @@ -1428,7 +1442,7 @@ func expandIPv6PrefixSpecificationRequests(tfList []any) []types.Ipv6PrefixSpeci return apiObjects } -func flattenIPv4PrefixSpecification(apiObject *types.Ipv4PrefixSpecification) string { +func flattenIPv4PrefixSpecification(apiObject *awstypes.Ipv4PrefixSpecification) string { if apiObject == nil { return "" } @@ -1442,7 +1456,7 @@ func flattenIPv4PrefixSpecification(apiObject *types.Ipv4PrefixSpecification) st return tfString } -func flattenIPv4PrefixSpecifications(apiObjects []types.Ipv4PrefixSpecification) []string { +func flattenIPv4PrefixSpecifications(apiObjects []awstypes.Ipv4PrefixSpecification) []string { if len(apiObjects) == 0 { return nil } @@ -1456,7 +1470,7 @@ func flattenIPv4PrefixSpecifications(apiObjects []types.Ipv4PrefixSpecification) return tfList } -func flattenIPv6PrefixSpecification(apiObject *types.Ipv6PrefixSpecification) string { +func flattenIPv6PrefixSpecification(apiObject *awstypes.Ipv6PrefixSpecification) string { if apiObject == nil { return "" } @@ -1470,7 +1484,7 @@ func flattenIPv6PrefixSpecification(apiObject *types.Ipv6PrefixSpecification) st return tfString } -func flattenIPv6PrefixSpecifications(apiObjects []types.Ipv6PrefixSpecification) []string { +func flattenIPv6PrefixSpecifications(apiObjects []awstypes.Ipv6PrefixSpecification) []string { if len(apiObjects) == 0 { return nil } @@ -1519,7 +1533,7 @@ func deleteLingeringENIs(ctx context.Context, conn *ec2.Client, filterName, reso return g.Wait().ErrorOrNil() } -func deleteLingeringLambdaENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, eni *types.NetworkInterface, timeout time.Duration) bool { +func deleteLingeringLambdaENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, eni *awstypes.NetworkInterface, timeout time.Duration) bool { // AWS Lambda service team confirms P99 deletion time of ~35 minutes. Buffer for safety. if minimumTimeout := 45 * time.Minute; timeout < minimumTimeout { timeout = minimumTimeout @@ -1560,7 +1574,7 @@ func deleteLingeringLambdaENI(ctx context.Context, g *multierror.Group, conn *ec return true } -func deleteLingeringComprehendENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, eni *types.NetworkInterface, timeout time.Duration) bool { +func deleteLingeringComprehendENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, eni *awstypes.NetworkInterface, timeout time.Duration) bool { // Deletion appears to take approximately 5 minutes if minimumTimeout := 10 * time.Minute; timeout < minimumTimeout { timeout = minimumTimeout @@ -1589,7 +1603,7 @@ func deleteLingeringComprehendENI(ctx context.Context, g *multierror.Group, conn return true } -func deleteLingeringDMSENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, v *types.NetworkInterface, timeout time.Duration) bool { +func deleteLingeringDMSENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, v *awstypes.NetworkInterface, timeout time.Duration) bool { // Deletion appears to take approximately 5 minutes if minimumTimeout := 10 * time.Minute; timeout < minimumTimeout { timeout = minimumTimeout @@ -1618,7 +1632,7 @@ func deleteLingeringDMSENI(ctx context.Context, g *multierror.Group, conn *ec2.C return true } -func deleteLingeringRDSENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, v *types.NetworkInterface, timeout time.Duration) bool { +func deleteLingeringRDSENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, v *awstypes.NetworkInterface, timeout time.Duration) bool { // Deletion appears to take approximately 5 minutes if minimumTimeout := 10 * time.Minute; timeout < minimumTimeout { timeout = minimumTimeout @@ -1647,7 +1661,7 @@ func deleteLingeringRDSENI(ctx context.Context, g *multierror.Group, conn *ec2.C return true } -func deleteLingeringQuickSightENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, v *types.NetworkInterface, timeout time.Duration) bool { +func deleteLingeringQuickSightENI(ctx context.Context, g *multierror.Group, conn *ec2.Client, v *awstypes.NetworkInterface, timeout time.Duration) bool { // Deletion appears to take approximately 5 minutes if minimumTimeout := 10 * time.Minute; timeout < minimumTimeout { timeout = minimumTimeout @@ -1677,7 +1691,7 @@ func deleteLingeringQuickSightENI(ctx context.Context, g *multierror.Group, conn } // Flattens security group identifiers into a []string, where the elements returned are the GroupIDs -func flattenGroupIdentifiers(dtos []types.GroupIdentifier) []string { +func flattenGroupIdentifiers(dtos []awstypes.GroupIdentifier) []string { ids := make([]string, 0, len(dtos)) for _, v := range dtos { group_id := aws.ToString(v.GroupId) @@ -1685,3 +1699,7 @@ func flattenGroupIdentifiers(dtos []types.GroupIdentifier) []string { } return ids } + +func networkInterfaceARN(ctx context.Context, c *conns.AWSClient, accountID, networkInterfaceID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "network-interface/"+networkInterfaceID) +} diff --git a/internal/service/ec2/vpc_network_interface_attachment.go b/internal/service/ec2/vpc_network_interface_attachment.go index 332eef7f9b1d..4fb99dee543f 100644 --- a/internal/service/ec2/vpc_network_interface_attachment.go +++ b/internal/service/ec2/vpc_network_interface_attachment.go @@ -7,6 +7,8 @@ import ( "context" "log" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -21,6 +23,7 @@ func resourceNetworkInterfaceAttachment() *schema.Resource { CreateWithoutTimeout: resourceNetworkInterfaceAttachmentCreate, ReadWithoutTimeout: resourceNetworkInterfaceAttachmentRead, DeleteWithoutTimeout: resourceNetworkInterfaceAttachmentDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -40,6 +43,12 @@ func resourceNetworkInterfaceAttachment() *schema.Resource { Required: true, ForceNew: true, }, + "network_card_index": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, names.AttrNetworkInterfaceID: { Type: schema.TypeString, Required: true, @@ -57,12 +66,19 @@ func resourceNetworkInterfaceAttachmentCreate(ctx context.Context, d *schema.Res var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - attachmentID, err := attachNetworkInterface(ctx, conn, - d.Get(names.AttrNetworkInterfaceID).(string), - d.Get(names.AttrInstanceID).(string), - d.Get("device_index").(int), - networkInterfaceAttachedTimeout, - ) + input := ec2.AttachNetworkInterfaceInput{ + NetworkInterfaceId: aws.String(d.Get(names.AttrNetworkInterfaceID).(string)), + InstanceId: aws.String(d.Get(names.AttrInstanceID).(string)), + DeviceIndex: aws.Int32(int32(d.Get("device_index").(int))), + } + + if v, ok := d.GetOk("network_card_index"); ok { + if v, ok := v.(int); ok { + input.NetworkCardIndex = aws.Int32(int32(v)) + } + } + + attachmentID, err := attachNetworkInterface(ctx, conn, &input) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -79,7 +95,7 @@ func resourceNetworkInterfaceAttachmentRead(ctx context.Context, d *schema.Resou var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - network_interface, err := findNetworkInterfaceByAttachmentID(ctx, conn, d.Id()) + eni, err := findNetworkInterfaceByAttachmentID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EC2 Network Interface Attachment (%s) not found, removing from state", d.Id()) @@ -91,11 +107,13 @@ func resourceNetworkInterfaceAttachmentRead(ctx context.Context, d *schema.Resou return sdkdiag.AppendErrorf(diags, "reading EC2 Network Interface Attachment (%s): %s", d.Id(), err) } - d.Set(names.AttrNetworkInterfaceID, network_interface.NetworkInterfaceId) - d.Set("attachment_id", network_interface.Attachment.AttachmentId) - d.Set("device_index", network_interface.Attachment.DeviceIndex) - d.Set(names.AttrInstanceID, network_interface.Attachment.InstanceId) - d.Set(names.AttrStatus, network_interface.Attachment.Status) + attachment := eni.Attachment + d.Set("attachment_id", attachment.AttachmentId) + d.Set("device_index", attachment.DeviceIndex) + d.Set(names.AttrInstanceID, attachment.InstanceId) + d.Set("network_card_index", attachment.NetworkCardIndex) + d.Set(names.AttrNetworkInterfaceID, eni.NetworkInterfaceId) + d.Set(names.AttrStatus, eni.Attachment.Status) return diags } diff --git a/internal/service/ec2/vpc_network_interface_attachment_test.go b/internal/service/ec2/vpc_network_interface_attachment_test.go index 883c422fe78f..a592e63a6e1c 100644 --- a/internal/service/ec2/vpc_network_interface_attachment_test.go +++ b/internal/service/ec2/vpc_network_interface_attachment_test.go @@ -33,6 +33,44 @@ func TestAccVPCNetworkInterfaceAttachment_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "attachment_id"), resource.TestCheckResourceAttr(resourceName, "device_index", "1"), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), + resource.TestCheckResourceAttr(resourceName, "network_card_index", "0"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrNetworkInterfaceID), + resource.TestCheckResourceAttrSet(resourceName, names.AttrStatus), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards. +// This test requires an expensive instance type that supports multiple network cards, such as "c6in.32xlarge" or "c6in.metal". +// Set the environment variable `VPC_NETWORK_INTERFACE_TEST_MULTIPLE_NETWORK_CARDS` to run this test. +func TestAccVPCNetworkInterfaceAttachment_networkCardIndex(t *testing.T) { + acctest.SkipIfEnvVarNotSet(t, "VPC_NETWORK_INTERFACE_TEST_MULTIPLE_NETWORK_CARDS") + ctx := acctest.Context(t) + var conf awstypes.NetworkInterface + resourceName := "aws_network_interface_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckENIDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCNetworkInterfaceAttachmentConfig_networkCardIndex(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckENIExists(ctx, "aws_network_interface.test", &conf), + resource.TestCheckResourceAttrSet(resourceName, "attachment_id"), + resource.TestCheckResourceAttr(resourceName, "device_index", "1"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), + resource.TestCheckResourceAttr(resourceName, "network_card_index", "1"), resource.TestCheckResourceAttrSet(resourceName, names.AttrNetworkInterfaceID), resource.TestCheckResourceAttrSet(resourceName, names.AttrStatus), ), @@ -109,3 +147,68 @@ resource "aws_network_interface_attachment" "test" { } `, rName)) } + +func testAccVPCNetworkInterfaceAttachmentConfig_networkCardIndex(rName string, networkCardIndex int) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.AvailableEC2InstanceTypeForRegion("c6in.32xlarge", "c6in.metal"), + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "172.16.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.16.10.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = %[1]q + } +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id + name = %[1]q + + egress { + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/16"] + } +} + +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test.id + private_ips = ["172.16.10.100"] + security_groups = [aws_security_group.test.id] + + tags = { + Name = %[1]q + } +} + +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface_attachment" "test" { + device_index = 1 + network_card_index = %[2]d + instance_id = aws_instance.test.id + network_interface_id = aws_network_interface.test.id +} +`, rName, networkCardIndex)) +} diff --git a/internal/service/ec2/vpc_network_interface_data_source.go b/internal/service/ec2/vpc_network_interface_data_source.go index 2902e9b294b3..c5e085d7adf4 100644 --- a/internal/service/ec2/vpc_network_interface_data_source.go +++ b/internal/service/ec2/vpc_network_interface_data_source.go @@ -8,9 +8,8 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -93,6 +92,10 @@ func dataSourceNetworkInterface() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "network_card_index": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, @@ -168,7 +171,8 @@ func dataSourceNetworkInterface() *schema.Resource { func dataSourceNetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeNetworkInterfacesInput{} @@ -188,14 +192,7 @@ func dataSourceNetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, d.SetId(aws.ToString(eni.NetworkInterfaceId)) ownerID := aws.ToString(eni.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "ec2", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: "network-interface/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, networkInterfaceARN(ctx, c, ownerID, d.Id())) if eni.Association != nil { if err := d.Set("association", []any{flattenNetworkInterfaceAssociation(eni.Association)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting association: %s", err) @@ -230,7 +227,7 @@ func dataSourceNetworkInterfaceRead(ctx context.Context, d *schema.ResourceData, return diags } -func flattenNetworkInterfaceAttachmentForDataSource(apiObject *types.NetworkInterfaceAttachment) map[string]any { +func flattenNetworkInterfaceAttachmentForDataSource(apiObject *awstypes.NetworkInterfaceAttachment) map[string]any { if apiObject == nil { return nil } @@ -253,5 +250,9 @@ func flattenNetworkInterfaceAttachmentForDataSource(apiObject *types.NetworkInte tfMap["instance_owner_id"] = aws.ToString(v) } + if v := apiObject.NetworkCardIndex; v != nil { + tfMap["network_card_index"] = aws.ToInt32(v) + } + return tfMap } diff --git a/internal/service/ec2/vpc_network_interface_data_source_test.go b/internal/service/ec2/vpc_network_interface_data_source_test.go index 1014bb9b20fe..fc2ec4f6266f 100644 --- a/internal/service/ec2/vpc_network_interface_data_source_test.go +++ b/internal/service/ec2/vpc_network_interface_data_source_test.go @@ -185,6 +185,7 @@ func TestAccVPCNetworkInterfaceDataSource_attachment(t *testing.T) { resource.TestCheckResourceAttr(datasourceName, "attachment.0.device_index", "1"), resource.TestCheckResourceAttrPair(datasourceName, "attachment.0.instance_id", instanceResourceName, names.AttrID), acctest.CheckResourceAttrAccountID(ctx, datasourceName, "attachment.0.instance_owner_id"), + resource.TestCheckResourceAttr(datasourceName, "attachment.0.network_card_index", "0"), resource.TestCheckResourceAttrSet(datasourceName, names.AttrAvailabilityZone), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttr(datasourceName, "interface_type", "interface"), diff --git a/internal/service/ec2/vpc_network_interface_sg_attachment.go b/internal/service/ec2/vpc_network_interface_sg_attachment.go index b75f7e6e9e3d..95938b4f2b66 100644 --- a/internal/service/ec2/vpc_network_interface_sg_attachment.go +++ b/internal/service/ec2/vpc_network_interface_sg_attachment.go @@ -105,7 +105,7 @@ func resourceNetworkInterfaceSGAttachmentRead(ctx context.Context, d *schema.Res networkInterfaceID := d.Get(names.AttrNetworkInterfaceID).(string) sgID := d.Get("security_group_id").(string) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, maxDuration(ec2PropagationTimeout, d.Timeout(schema.TimeoutRead)), func() (any, error) { + groupIdentifier, err := tfresource.RetryWhenNewResourceNotFound(ctx, max(ec2PropagationTimeout, d.Timeout(schema.TimeoutRead)), func(ctx context.Context) (*awstypes.GroupIdentifier, error) { return findNetworkInterfaceSecurityGroup(ctx, conn, networkInterfaceID, sgID) }, d.IsNewResource()) @@ -119,22 +119,12 @@ func resourceNetworkInterfaceSGAttachmentRead(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "reading EC2 Network Interface (%s) Security Group (%s) Attachment: %s", networkInterfaceID, sgID, err) } - groupIdentifier := outputRaw.(*awstypes.GroupIdentifier) - d.Set(names.AttrNetworkInterfaceID, networkInterfaceID) d.Set("security_group_id", groupIdentifier.GroupId) return diags } -func maxDuration(a, b time.Duration) time.Duration { - if a >= b { - return a - } - - return b -} - func resourceNetworkInterfaceSGAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) diff --git a/internal/service/ec2/vpc_network_interface_test.go b/internal/service/ec2/vpc_network_interface_test.go index 24b169c4839a..48b48e53e553 100644 --- a/internal/service/ec2/vpc_network_interface_test.go +++ b/internal/service/ec2/vpc_network_interface_test.go @@ -14,7 +14,7 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -28,7 +28,7 @@ import ( func TestAccVPCNetworkInterface_basic(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" subnetResourceName := "aws_subnet.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -73,7 +73,7 @@ func TestAccVPCNetworkInterface_basic(t *testing.T) { func TestAccVPCNetworkInterface_ipv6(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -119,7 +119,7 @@ func TestAccVPCNetworkInterface_ipv6(t *testing.T) { func TestAccVPCNetworkInterface_ipv6Primary(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -143,7 +143,7 @@ func TestAccVPCNetworkInterface_ipv6Primary(t *testing.T) { func TestAccVPCNetworkInterface_ipv6PrimaryEnable(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -175,7 +175,7 @@ func TestAccVPCNetworkInterface_ipv6PrimaryEnable(t *testing.T) { func TestAccVPCNetworkInterface_ipv6PrimaryDisable(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -209,7 +209,7 @@ func TestAccVPCNetworkInterface_tags(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -254,7 +254,7 @@ func TestAccVPCNetworkInterface_tags(t *testing.T) { func TestAccVPCNetworkInterface_ipv6Count(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -304,7 +304,7 @@ func TestAccVPCNetworkInterface_ipv6Count(t *testing.T) { func TestAccVPCNetworkInterface_disappears(t *testing.T) { ctx := acctest.Context(t) - var networkInterface types.NetworkInterface + var networkInterface awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -328,7 +328,7 @@ func TestAccVPCNetworkInterface_disappears(t *testing.T) { func TestAccVPCNetworkInterface_description(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" subnetResourceName := "aws_subnet.test" securityGroupResourceName := "aws_security_group.test" @@ -402,7 +402,7 @@ func TestAccVPCNetworkInterface_attachment(t *testing.T) { } ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -418,7 +418,52 @@ func TestAccVPCNetworkInterface_attachment(t *testing.T) { testAccCheckENIExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "attachment.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "attachment.*", map[string]string{ - "device_index": "1", + "device_index": "1", + "network_card_index": "0", + }), + resource.TestCheckResourceAttr(resourceName, "private_ip", "172.16.10.100"), + resource.TestCheckResourceAttr(resourceName, "private_ips.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "private_ips.*", "172.16.10.100"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"private_ip_list_enabled", "ipv6_address_list_enabled"}, + }, + }, + }) +} + +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards. +// This test requires an expensive instance type that supports multiple network cards, such as "c6in.32xlarge" or "c6in.metal". +// Set the environment variable `VPC_NETWORK_INTERFACE_TEST_MULTIPLE_NETWORK_CARDS` to run this test. +func TestAccVPCNetworkInterface_attachmentNetworkCardIndex(t *testing.T) { + acctest.SkipIfEnvVarNotSet(t, "VPC_NETWORK_INTERFACE_TEST_MULTIPLE_NETWORK_CARDS") + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var conf awstypes.NetworkInterface + resourceName := "aws_network_interface.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckENIDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCNetworkInterfaceConfig_attachmentNetworkCardIndex(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckENIExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "attachment.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "attachment.*", map[string]string{ + "device_index": "1", + "network_card_index": "1", }), resource.TestCheckResourceAttr(resourceName, "private_ip", "172.16.10.100"), resource.TestCheckResourceAttr(resourceName, "private_ips.#", "1"), @@ -437,7 +482,7 @@ func TestAccVPCNetworkInterface_attachment(t *testing.T) { func TestAccVPCNetworkInterface_ignoreExternalAttachment(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface var attachmentId string resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -478,7 +523,7 @@ func TestAccVPCNetworkInterface_ignoreExternalAttachment(t *testing.T) { func TestAccVPCNetworkInterface_sourceDestCheck(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -521,7 +566,7 @@ func TestAccVPCNetworkInterface_sourceDestCheck(t *testing.T) { func TestAccVPCNetworkInterface_privateIPsCount(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -589,7 +634,7 @@ func TestAccVPCNetworkInterface_privateIPsCount(t *testing.T) { func TestAccVPCNetworkInterface_ENIInterfaceType_efa(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -618,7 +663,7 @@ func TestAccVPCNetworkInterface_ENIInterfaceType_efa(t *testing.T) { func TestAccVPCNetworkInterface_ENI_ipv4Prefix(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -664,7 +709,7 @@ func TestAccVPCNetworkInterface_ENI_ipv4Prefix(t *testing.T) { func TestAccVPCNetworkInterface_ENI_ipv4PrefixCount(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -714,7 +759,7 @@ func TestAccVPCNetworkInterface_ENI_ipv4PrefixCount(t *testing.T) { func TestAccVPCNetworkInterface_ENI_ipv6Prefix(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -760,7 +805,7 @@ func TestAccVPCNetworkInterface_ENI_ipv6Prefix(t *testing.T) { func TestAccVPCNetworkInterface_ENI_ipv6PrefixCount(t *testing.T) { ctx := acctest.Context(t) - var conf types.NetworkInterface + var conf awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -810,7 +855,7 @@ func TestAccVPCNetworkInterface_ENI_ipv6PrefixCount(t *testing.T) { func TestAccVPCNetworkInterface_privateIPSet(t *testing.T) { ctx := acctest.Context(t) - var networkInterface, lastInterface types.NetworkInterface + var networkInterface, lastInterface awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -906,7 +951,7 @@ func TestAccVPCNetworkInterface_privateIPList(t *testing.T) { } ctx := acctest.Context(t) - var networkInterface, lastInterface types.NetworkInterface + var networkInterface, lastInterface awstypes.NetworkInterface resourceName := "aws_network_interface.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1073,7 +1118,7 @@ func regionalPrivateDNSSuffix(region string) string { return fmt.Sprintf("%s.compute.internal", region) } -func testAccCheckENIExists(ctx context.Context, n string, v *types.NetworkInterface) resource.TestCheckFunc { +func testAccCheckENIExists(ctx context.Context, n string, v *awstypes.NetworkInterface) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -1124,7 +1169,7 @@ func testAccCheckENIDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckENIMakeExternalAttachment(ctx context.Context, n string, networkInterface *types.NetworkInterface, attachmentId *string) resource.TestCheckFunc { +func testAccCheckENIMakeExternalAttachment(ctx context.Context, n string, networkInterface *awstypes.NetworkInterface, attachmentId *string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok || rs.Primary.ID == "" { @@ -1162,7 +1207,7 @@ func testAccCheckENIRemoveExternalAttachment(ctx context.Context, attachmentId * } } -func testAccCheckENIPrivateIPSet(ips []string, iface *types.NetworkInterface) resource.TestCheckFunc { +func testAccCheckENIPrivateIPSet(ips []string, iface *awstypes.NetworkInterface) resource.TestCheckFunc { return func(s *terraform.State) error { iIPs := tfec2.FlattenNetworkInterfacePrivateIPAddresses(iface.PrivateIpAddresses) @@ -1174,7 +1219,7 @@ func testAccCheckENIPrivateIPSet(ips []string, iface *types.NetworkInterface) re } } -func testAccCheckENIPrivateIPList(ips []string, iface *types.NetworkInterface) resource.TestCheckFunc { +func testAccCheckENIPrivateIPList(ips []string, iface *awstypes.NetworkInterface) resource.TestCheckFunc { return func(s *terraform.State) error { iIPs := tfec2.FlattenNetworkInterfacePrivateIPAddresses(iface.PrivateIpAddresses) @@ -1205,7 +1250,7 @@ func stringSlicesEqual(s1, s2 []string) bool { return reflect.DeepEqual(s1, s2) } -func testAccCheckENISame(iface1 *types.NetworkInterface, iface2 *types.NetworkInterface) resource.TestCheckFunc { +func testAccCheckENISame(iface1 *awstypes.NetworkInterface, iface2 *awstypes.NetworkInterface) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.ToString(iface1.NetworkInterfaceId) != aws.ToString(iface2.NetworkInterfaceId) { return fmt.Errorf("interface %s should not have been replaced with %s", aws.ToString(iface1.NetworkInterfaceId), aws.ToString(iface2.NetworkInterfaceId)) @@ -1214,7 +1259,7 @@ func testAccCheckENISame(iface1 *types.NetworkInterface, iface2 *types.NetworkIn } } -func testAccCheckENIDifferent(iface1 *types.NetworkInterface, iface2 *types.NetworkInterface) resource.TestCheckFunc { +func testAccCheckENIDifferent(iface1 *awstypes.NetworkInterface, iface2 *awstypes.NetworkInterface) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.ToString(iface1.NetworkInterfaceId) == aws.ToString(iface2.NetworkInterfaceId) { return fmt.Errorf("interface %s should have been replaced, have %s", aws.ToString(iface1.NetworkInterfaceId), aws.ToString(iface2.NetworkInterfaceId)) @@ -1446,6 +1491,52 @@ resource "aws_network_interface" "test" { `, rName)) } +func testAccVPCNetworkInterfaceConfig_attachmentNetworkCardIndex(rName string, networkCardIndex int) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.AvailableEC2InstanceTypeForRegion("c6in.32xlarge", "c6in.metal"), + testAccVPCNetworkInterfaceConfig_baseIPV4(rName), + fmt.Sprintf(` +resource "aws_subnet" "test2" { + vpc_id = aws_vpc.test.id + cidr_block = "172.16.11.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = %[1]q + } +} + +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test2.id + associate_public_ip_address = false + private_ip = "172.16.11.50" + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test.id + private_ips = ["172.16.10.100"] + security_groups = [aws_security_group.test.id] + + attachment { + instance = aws_instance.test.id + device_index = 1 + network_card_index = %[2]d + } + + tags = { + Name = %[1]q + } +} +`, rName, networkCardIndex)) +} + func testAccVPCNetworkInterfaceConfig_externalAttachment(rName string) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), diff --git a/internal/service/ec2/vpc_network_performance_metric_subscription.go b/internal/service/ec2/vpc_network_performance_metric_subscription.go index a97fbf7841b9..bf753beb78b1 100644 --- a/internal/service/ec2/vpc_network_performance_metric_subscription.go +++ b/internal/service/ec2/vpc_network_performance_metric_subscription.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -38,8 +38,8 @@ func resourceNetworkPerformanceMetricSubscription() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Default: types.MetricTypeAggregateLatency, - ValidateDiagFunc: enum.Validate[types.MetricType](), + Default: awstypes.MetricTypeAggregateLatency, + ValidateDiagFunc: enum.Validate[awstypes.MetricType](), }, "period": { Type: schema.TypeString, @@ -54,8 +54,8 @@ func resourceNetworkPerformanceMetricSubscription() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Default: types.StatisticTypeP50, - ValidateDiagFunc: enum.Validate[types.StatisticType](), + Default: awstypes.StatisticTypeP50, + ValidateDiagFunc: enum.Validate[awstypes.StatisticType](), }, }, } @@ -72,9 +72,9 @@ func resourceNetworkPerformanceMetricSubscriptionCreate(ctx context.Context, d * id := networkPerformanceMetricSubscriptionCreateResourceID(source, destination, metric, statistic) input := &ec2.EnableAwsNetworkPerformanceMetricSubscriptionInput{ Destination: aws.String(destination), - Metric: types.MetricType(metric), + Metric: awstypes.MetricType(metric), Source: aws.String(source), - Statistic: types.StatisticType(statistic), + Statistic: awstypes.StatisticType(statistic), } _, err := conn.EnableAwsNetworkPerformanceMetricSubscription(ctx, input) @@ -130,9 +130,9 @@ func resourceNetworkPerformanceMetricSubscriptionDelete(ctx context.Context, d * log.Printf("[DEBUG] Deleting EC2 AWS Network Performance Metric Subscriptione: %s", d.Id()) input := ec2.DisableAwsNetworkPerformanceMetricSubscriptionInput{ Destination: aws.String(destination), - Metric: types.MetricType(metric), + Metric: awstypes.MetricType(metric), Source: aws.String(source), - Statistic: types.StatisticType(statistic), + Statistic: awstypes.StatisticType(statistic), } _, err = conn.DisableAwsNetworkPerformanceMetricSubscription(ctx, &input) diff --git a/internal/service/ec2/vpc_peering_connection.go b/internal/service/ec2/vpc_peering_connection.go index 8223b715c999..029719b7d2a9 100644 --- a/internal/service/ec2/vpc_peering_connection.go +++ b/internal/service/ec2/vpc_peering_connection.go @@ -15,7 +15,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -325,22 +324,22 @@ func modifyVPCPeeringConnectionOptions(ctx context.Context, conn *ec2.Client, d // Retry reading back the modified options to deal with eventual consistency. // Often this is to do with a delay transitioning from pending-acceptance to active. - err := retry.RetryContext(ctx, ec2PropagationTimeout, func() *retry.RetryError { // nosemgrep:ci.helper-schema-retry-RetryContext-without-TimeoutError-check + err := tfresource.Retry(ctx, ec2PropagationTimeout, func(ctx context.Context) *tfresource.RetryError { vpcPeeringConnection, err := findVPCPeeringConnectionByID(ctx, conn, d.Id()) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if v := vpcPeeringConnection.AccepterVpcInfo; v != nil && v.PeeringOptions != nil && accepterPeeringConnectionOptions != nil { if !vpcPeeringConnectionOptionsEqual(v.PeeringOptions, accepterPeeringConnectionOptions) { - return retry.RetryableError(errors.New("Accepter Options not stable")) + return tfresource.RetryableError(errors.New("Accepter Options not stable")) } } if v := vpcPeeringConnection.RequesterVpcInfo; v != nil && v.PeeringOptions != nil && requesterPeeringConnectionOptions != nil { if !vpcPeeringConnectionOptionsEqual(v.PeeringOptions, requesterPeeringConnectionOptions) { - return retry.RetryableError(errors.New("Requester Options not stable")) + return tfresource.RetryableError(errors.New("Requester Options not stable")) } } diff --git a/internal/service/ec2/vpc_route.go b/internal/service/ec2/vpc_route.go index 2639f97e346e..80bc66660632 100644 --- a/internal/service/ec2/vpc_route.go +++ b/internal/service/ec2/vpc_route.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -49,6 +50,15 @@ var routeValidTargets = []string{ } // @SDKResource("aws_route", name="Route") +// @IdentityAttribute("route_table_id") +// @IdentityAttribute("destination_cidr_block", optional="true", testNotNull="true") +// @IdentityAttribute("destination_ipv6_cidr_block", optional="true") +// @IdentityAttribute("destination_prefix_list_id", optional="true") +// @ImportIDHandler("routeImportID") +// @Testing(preIdentityVersion="6.10.0") +// @Testing(importStateIdFunc="testAccRouteImportStateIdFunc") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;types.Route") +// @Testing(generator=false) func resourceRoute() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRouteCreate, @@ -56,10 +66,6 @@ func resourceRoute() *schema.Resource { UpdateWithoutTimeout: resourceRouteUpdate, DeleteWithoutTimeout: resourceRouteDelete, - Importer: &schema.ResourceImporter{ - StateContext: resourceRouteImport, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(2 * time.Minute), @@ -254,7 +260,7 @@ func resourceRouteCreate(ctx context.Context, d *schema.ResourceData, meta any) } _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateRoute(ctx, input) }, errCodeInvalidParameterException, @@ -302,7 +308,7 @@ func resourceRouteRead(ctx context.Context, d *schema.ResourceData, meta any) di } routeTableID := d.Get("route_table_id").(string) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + route, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.Route, error) { return routeFinder(ctx, conn, routeTableID, destination) }, d.IsNewResource()) @@ -316,7 +322,6 @@ func resourceRouteRead(ctx context.Context, d *schema.ResourceData, meta any) di return sdkdiag.AppendErrorf(diags, "reading Route in Route Table (%s) with destination (%s): %s", routeTableID, destination, err) } - route := outputRaw.(*awstypes.Route) d.Set("carrier_gateway_id", route.CarrierGatewayId) d.Set("core_network_arn", route.CoreNetworkArn) d.Set(routeDestinationCIDRBlock, route.DestinationCidrBlock) @@ -458,7 +463,7 @@ func resourceRouteDelete(ctx context.Context, d *schema.ResourceData, meta any) log.Printf("[DEBUG] Deleting Route: %v", input) _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteRoute(ctx, input) }, errCodeInvalidParameterException, @@ -484,28 +489,6 @@ func resourceRouteDelete(ctx context.Context, d *schema.ResourceData, meta any) return diags } -func resourceRouteImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - idParts := strings.Split(d.Id(), "_") - if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { - return nil, fmt.Errorf("unexpected format of ID (%q), expected ROUTETABLEID_DESTINATION", d.Id()) - } - - routeTableID := idParts[0] - destination := idParts[1] - d.Set("route_table_id", routeTableID) - if strings.Contains(destination, ":") { - d.Set(routeDestinationIPv6CIDRBlock, destination) - } else if strings.Contains(destination, ".") { - d.Set(routeDestinationCIDRBlock, destination) - } else { - d.Set(routeDestinationPrefixListID, destination) - } - - d.SetId(routeCreateID(routeTableID, destination)) - - return []*schema.ResourceData{d}, nil -} - // routeDestinationAttribute returns the attribute key and value of the route's destination. func routeDestinationAttribute(d *schema.ResourceData) (string, string, error) { for _, key := range routeValidDestinations { @@ -528,3 +511,35 @@ func routeTargetAttribute(d *schema.ResourceData) (string, string, error) { return "", "", fmt.Errorf("route target attribute not specified") } + +var _ inttypes.SDKv2ImportID = routeImportID{} + +type routeImportID struct{} + +func (routeImportID) Create(d *schema.ResourceData) string { + _, destination, _ := routeDestinationAttribute(d) + routeTableID := d.Get("route_table_id").(string) + return routeCreateID(routeTableID, destination) +} + +func (routeImportID) Parse(id string) (string, map[string]string, error) { + parts := strings.Split(id, "_") + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", nil, fmt.Errorf("unexpected format of ID (%q), expected ROUTETABLEID_DESTINATION", id) + } + + routeTableID := parts[0] + destination := parts[1] + result := map[string]string{ + "route_table_id": routeTableID, + } + if strings.Contains(destination, ":") { + result[routeDestinationIPv6CIDRBlock] = destination + } else if strings.Contains(destination, ".") { + result[routeDestinationCIDRBlock] = destination + } else { + result[routeDestinationPrefixListID] = destination + } + + return routeCreateID(routeTableID, destination), result, nil +} diff --git a/internal/service/ec2/vpc_route_identity_gen_test.go b/internal/service/ec2/vpc_route_identity_gen_test.go new file mode 100644 index 000000000000..1182215b72d4 --- /dev/null +++ b/internal/service/ec2/vpc_route_identity_gen_test.go @@ -0,0 +1,307 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCRoute_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Route + resourceName := "aws_route.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckRouteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Route/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRouteExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "route_table_id": knownvalue.NotNull(), + "destination_cidr_block": knownvalue.NotNull(), + "destination_ipv6_cidr_block": knownvalue.Null(), + "destination_prefix_list_id": knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("route_table_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Route/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: testAccRouteImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Route/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: testAccRouteImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("route_table_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_ipv6_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_prefix_list_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Route/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("route_table_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_ipv6_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_prefix_list_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCRoute_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_route.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Route/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "route_table_id": knownvalue.NotNull(), + "destination_cidr_block": knownvalue.NotNull(), + "destination_ipv6_cidr_block": knownvalue.Null(), + "destination_prefix_list_id": knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("route_table_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Route/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccRouteImportStateIdFunc), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Route/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccRouteImportStateIdFunc), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("route_table_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_ipv6_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_prefix_list_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Route/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("route_table_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_ipv6_cidr_block"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("destination_prefix_list_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccVPCRoute_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Route + resourceName := "aws_route.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckRouteDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Route/basic_v6.10.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRouteExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Route/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "route_table_id": knownvalue.NotNull(), + "destination_cidr_block": knownvalue.NotNull(), + "destination_ipv6_cidr_block": knownvalue.Null(), + "destination_prefix_list_id": knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("route_table_id")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccVPCRoute_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Route + resourceName := "aws_route.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckRouteDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Route/basic_v6.10.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRouteExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Route/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_route_table.go b/internal/service/ec2/vpc_route_table.go index 7716787f6b4c..0620f8d0d5ba 100644 --- a/internal/service/ec2/vpc_route_table.go +++ b/internal/service/ec2/vpc_route_table.go @@ -12,7 +12,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -52,6 +51,8 @@ var routeTableValidTargets = []string{ // @Tags(identifierAttribute="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.RouteTable") // @Testing(generator=false) +// @IdentityAttribute("id") +// @Testing(preIdentityVersion="v6.9.0") func resourceRouteTable() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRouteTableCreate, @@ -59,10 +60,6 @@ func resourceRouteTable() *schema.Resource { UpdateWithoutTimeout: resourceRouteTableUpdate, DeleteWithoutTimeout: resourceRouteTableDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(2 * time.Minute), @@ -213,9 +210,10 @@ func resourceRouteTableCreate(ctx context.Context, d *schema.ResourceData, meta func resourceRouteTableRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + routeTable, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.RouteTable, error) { return findRouteTableByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -229,16 +227,8 @@ func resourceRouteTableRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "reading Route Table (%s): %s", d.Id(), err) } - routeTable := outputRaw.(*awstypes.RouteTable) ownerID := aws.ToString(routeTable.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("route-table/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, routeTableARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrOwnerID, ownerID) propagatingVGWs := make([]string, 0, len(routeTable.PropagatingVgws)) for _, v := range routeTable.PropagatingVgws { @@ -495,7 +485,7 @@ func routeTableAddRoute(ctx context.Context, conn *ec2.Client, routeTableID stri // created by AWS so probably doesn't need a retry but just to be sure // we provide a small one _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, time.Second*15, - func() (any, error) { + func(ctx context.Context) (any, error) { return routeFinder(ctx, conn, routeTableID, destination) }, errCodeInvalidRouteNotFound, @@ -513,7 +503,7 @@ func routeTableAddRoute(ctx context.Context, conn *ec2.Client, routeTableID stri } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateRoute(ctx, input) }, errCodeInvalidParameterException, @@ -644,7 +634,7 @@ func routeTableEnableVGWRoutePropagation(ctx context.Context, conn *ec2.Client, } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.EnableVgwRoutePropagation(ctx, input) }, errCodeGatewayNotAttached, @@ -939,3 +929,7 @@ func routeTableRouteTargetAttribute(m map[string]any) (string, string) { //nolin return "", "" } + +func routeTableARN(ctx context.Context, c *conns.AWSClient, accountID, routeTableID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "route-table/"+routeTableID) +} diff --git a/internal/service/ec2/vpc_route_table_association.go b/internal/service/ec2/vpc_route_table_association.go index bf28fe29d867..87de94529aaf 100644 --- a/internal/service/ec2/vpc_route_table_association.go +++ b/internal/service/ec2/vpc_route_table_association.go @@ -78,7 +78,7 @@ func resourceRouteTableAssociationCreate(ctx context.Context, d *schema.Resource } output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.AssociateRouteTable(ctx, input) }, errCodeInvalidRouteTableIDNotFound, @@ -101,7 +101,7 @@ func resourceRouteTableAssociationRead(ctx context.Context, d *schema.ResourceDa var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + association, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.RouteTableAssociation, error) { return findRouteTableAssociationByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -115,8 +115,6 @@ func resourceRouteTableAssociationRead(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "reading Route Table Association (%s): %s", d.Id(), err) } - association := outputRaw.(*awstypes.RouteTableAssociation) - d.Set("gateway_id", association.GatewayId) d.Set("route_table_id", association.RouteTableId) d.Set(names.AttrSubnetID, association.SubnetId) diff --git a/internal/service/ec2/vpc_route_table_data_source.go b/internal/service/ec2/vpc_route_table_data_source.go index 5fe313d096c3..f9bf3bc9699e 100644 --- a/internal/service/ec2/vpc_route_table_data_source.go +++ b/internal/service/ec2/vpc_route_table_data_source.go @@ -5,13 +5,11 @@ package ec2 import ( "context" - "fmt" "log" "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -188,7 +186,8 @@ func dataSourceRouteTable() *schema.Resource { func dataSourceRouteTableRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) req := &ec2.DescribeRouteTablesInput{} @@ -233,14 +232,7 @@ func dataSourceRouteTableRead(ctx context.Context, d *schema.ResourceData, meta d.SetId(aws.ToString(rt.RouteTableId)) ownerID := aws.ToString(rt.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("route-table/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, routeTableARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrOwnerID, ownerID) d.Set("route_table_id", rt.RouteTableId) diff --git a/internal/service/ec2/vpc_route_table_identity_gen_test.go b/internal/service/ec2/vpc_route_table_identity_gen_test.go new file mode 100644 index 000000000000..dac6d04b9c13 --- /dev/null +++ b/internal/service/ec2/vpc_route_table_identity_gen_test.go @@ -0,0 +1,284 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCRouteTable_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RouteTable + resourceName := "aws_route_table.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckRouteTableDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRouteTableExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCRouteTable_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_route_table.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccVPCRouteTable_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RouteTable + resourceName := "aws_route_table.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckRouteTableDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic_v6.9.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRouteTableExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccVPCRouteTable_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RouteTable + resourceName := "aws_route_table.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckRouteTableDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic_v6.9.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRouteTableExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RouteTable/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_route_table_tags_gen_test.go b/internal/service/ec2/vpc_route_table_tags_gen_test.go index 62765eb10e1e..bc567cabfee1 100644 --- a/internal/service/ec2/vpc_route_table_tags_gen_test.go +++ b/internal/service/ec2/vpc_route_table_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccVPCRouteTable_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -191,10 +192,11 @@ func TestAccVPCRouteTable_tags(t *testing.T) { func TestAccVPCRouteTable_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -254,10 +256,11 @@ func TestAccVPCRouteTable_tags_null(t *testing.T) { func TestAccVPCRouteTable_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -313,10 +316,11 @@ func TestAccVPCRouteTable_tags_EmptyMap(t *testing.T) { func TestAccVPCRouteTable_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -390,10 +394,11 @@ func TestAccVPCRouteTable_tags_AddOnUpdate(t *testing.T) { func TestAccVPCRouteTable_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -474,10 +479,11 @@ func TestAccVPCRouteTable_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCRouteTable_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -605,10 +611,11 @@ func TestAccVPCRouteTable_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccVPCRouteTable_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -690,10 +697,11 @@ func TestAccVPCRouteTable_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -862,10 +870,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1015,10 +1024,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1184,10 +1194,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_overlapping(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1270,10 +1281,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1355,10 +1367,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1417,10 +1430,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1471,10 +1485,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccVPCRouteTable_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1530,10 +1545,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_nullOverlappingResourceTag(t *testing func TestAccVPCRouteTable_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1589,10 +1605,11 @@ func TestAccVPCRouteTable_tags_DefaultTags_nullNonOverlappingResourceTag(t *test func TestAccVPCRouteTable_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1641,10 +1658,11 @@ func TestAccVPCRouteTable_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccVPCRouteTable_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1734,10 +1752,11 @@ func TestAccVPCRouteTable_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccVPCRouteTable_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1817,10 +1836,11 @@ func TestAccVPCRouteTable_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccVPCRouteTable_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), @@ -1975,10 +1995,11 @@ func TestAccVPCRouteTable_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccVPCRouteTable_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.RouteTable resourceName := "aws_route_table.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckRouteTableDestroy(ctx), diff --git a/internal/service/ec2/vpc_route_table_test.go b/internal/service/ec2/vpc_route_table_test.go index 09fa726147e2..a255cad82aca 100644 --- a/internal/service/ec2/vpc_route_table_test.go +++ b/internal/service/ec2/vpc_route_table_test.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -1312,16 +1311,16 @@ func testAccCheckRouteTableWaitForVPCEndpointRoute(ctx context.Context, routeTab plId := aws.ToString(resp.PrefixLists[0].PrefixListId) - err = retry.RetryContext(ctx, 3*time.Minute, func() *retry.RetryError { + err = tfresource.Retry(ctx, 3*time.Minute, func(ctx context.Context) *tfresource.RetryError { input := ec2.DescribeRouteTablesInput{ RouteTableIds: []string{aws.ToString(routeTable.RouteTableId)}, } resp, err := conn.DescribeRouteTables(ctx, &input) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if resp == nil || len(resp.RouteTables) == 0 { - return retry.NonRetryableError(fmt.Errorf("Route Table not found")) + return tfresource.NonRetryableError(fmt.Errorf("Route Table not found")) } for _, route := range resp.RouteTables[0].Routes { @@ -1330,7 +1329,7 @@ func testAccCheckRouteTableWaitForVPCEndpointRoute(ctx context.Context, routeTab } } - return retry.RetryableError(fmt.Errorf("Route not found")) + return tfresource.RetryableError(fmt.Errorf("Route not found")) }) return err diff --git a/internal/service/ec2/vpc_security_group.go b/internal/service/ec2/vpc_security_group.go index 02aebaf72814..b335b96d4476 100644 --- a/internal/service/ec2/vpc_security_group.go +++ b/internal/service/ec2/vpc_security_group.go @@ -15,7 +15,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -36,8 +35,11 @@ import ( // @SDKResource("aws_security_group", name="Security Group") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;types.SecurityGroup") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.SecurityGroup") // @Testing(importIgnore="revoke_rules_on_delete") +// @IdentityAttribute("id") +// @Testing(preIdentityVersion="v6.7.0") +// @Testing(plannableImportAction="NoOp") func resourceSecurityGroup() *schema.Resource { //lintignore:R011 return &schema.Resource{ @@ -46,10 +48,6 @@ func resourceSecurityGroup() *schema.Resource { UpdateWithoutTimeout: resourceSecurityGroupUpdate, DeleteWithoutTimeout: resourceSecurityGroupDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Delete: schema.DefaultTimeout(15 * time.Minute), @@ -187,7 +185,6 @@ var ( func resourceSecurityGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) name := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) @@ -267,8 +264,8 @@ func resourceSecurityGroupCreate(ctx context.Context, d *schema.ResourceData, me func resourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) sg, err := findSecurityGroupByID(ctx, conn, d.Id()) @@ -294,14 +291,7 @@ func resourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, meta egressRules := matchRules("egress", localEgressRules, remoteEgressRules) ownerID := aws.ToString(sg.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("security-group/%s", d.Id()), - } - d.Set(names.AttrARN, arn.String()) + d.Set(names.AttrARN, securityGroupARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrDescription, sg.Description) d.Set(names.AttrName, sg.GroupName) d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(sg.GroupName))) @@ -323,7 +313,6 @@ func resourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, meta func resourceSecurityGroupUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) group, err := findSecurityGroupByID(ctx, conn, d.Id()) @@ -354,7 +343,7 @@ func resourceSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, me ctx = tflog.SetField(ctx, logging.KeyResourceId, d.Id()) ctx = tflog.SetField(ctx, names.AttrVPCID, d.Get(names.AttrVPCID)) - if err := deleteLingeringENIs(ctx, meta.(*conns.AWSClient).EC2Client(ctx), "group-id", d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if err := deleteLingeringENIs(ctx, conn, "group-id", d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "deleting ENIs using Security Group (%s): %s", d.Id(), err) } @@ -378,7 +367,7 @@ func resourceSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, me _, err := tfresource.RetryWhenAWSErrCodeEquals( ctx, firstShortRetry, // short initial attempt followed by full length attempt - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteSecurityGroup(ctx, &ec2.DeleteSecurityGroupInput{ GroupId: aws.String(d.Id()), }) @@ -398,7 +387,7 @@ func resourceSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, me _, err = tfresource.RetryWhenAWSErrCodeEquals( ctx, remainingRetry, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteSecurityGroup(ctx, &ec2.DeleteSecurityGroupInput{ GroupId: aws.String(d.Id()), }) @@ -415,7 +404,7 @@ func resourceSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "deleting Security Group (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return findSecurityGroupByID(ctx, conn, d.Id()) }) @@ -426,6 +415,10 @@ func resourceSecurityGroupDelete(ctx context.Context, d *schema.ResourceData, me return diags } +func securityGroupARN(ctx context.Context, c *conns.AWSClient, accountID, sgID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "security-group/"+sgID) +} + // forceRevokeSecurityGroupRules revokes all of the security group's ingress & egress rules // AND rules in other security groups that depend on this security group. Trying to delete // this security group with rules that originate in other groups but point here, will cause @@ -438,7 +431,7 @@ func forceRevokeSecurityGroupRules(ctx context.Context, conn *ec2.Client, id str rules, err := rulesInSGsTouchingThis(ctx, conn, id, searchAll) if err != nil { - return fmt.Errorf("describing security group rules: %s", err) + return fmt.Errorf("describing security group rules: %w", err) } for _, rule := range rules { @@ -502,7 +495,7 @@ func rulesInSGsTouchingThis(ctx context.Context, conn *ec2.Client, id string, se } else { sgs, err := relatedSGs(ctx, conn, id) if err != nil { - return nil, fmt.Errorf("describing security group rules: %s", err) + return nil, fmt.Errorf("describing security group rules: %w", err) } input = &ec2.DescribeSecurityGroupRulesInput{ diff --git a/internal/service/ec2/vpc_security_group_data_source.go b/internal/service/ec2/vpc_security_group_data_source.go index 7776b22e4896..df46c2228ccc 100644 --- a/internal/service/ec2/vpc_security_group_data_source.go +++ b/internal/service/ec2/vpc_security_group_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -63,8 +61,8 @@ func dataSourceSecurityGroup() *schema.Resource { func dataSourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeSecurityGroupsInput{ Filters: newAttributeFilterList( @@ -99,15 +97,8 @@ func dataSourceSecurityGroupRead(ctx context.Context, d *schema.ResourceData, me } d.SetId(aws.ToString(sg.GroupId)) - - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: *sg.OwnerId, - Resource: fmt.Sprintf("security-group/%s", *sg.GroupId), - }.String() - d.Set(names.AttrARN, arn) + ownerID := aws.ToString(sg.OwnerId) + d.Set(names.AttrARN, securityGroupARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrDescription, sg.Description) d.Set(names.AttrName, sg.GroupName) d.Set(names.AttrVPCID, sg.VpcId) diff --git a/internal/service/ec2/vpc_security_group_data_source_tags_gen_test.go b/internal/service/ec2/vpc_security_group_data_source_tags_gen_test.go index c50e15c627f3..9440862299fc 100644 --- a/internal/service/ec2/vpc_security_group_data_source_tags_gen_test.go +++ b/internal/service/ec2/vpc_security_group_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccVPCSecurityGroupDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccVPCSecurityGroupDataSource_tags(t *testing.T) { func TestAccVPCSecurityGroupDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccVPCSecurityGroupDataSource_tags_NullMap(t *testing.T) { func TestAccVPCSecurityGroupDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccVPCSecurityGroupDataSource_tags_EmptyMap(t *testing.T) { func TestAccVPCSecurityGroupDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccVPCSecurityGroupDataSource_tags_DefaultTags_nonOverlapping(t *testin func TestAccVPCSecurityGroupDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccVPCSecurityGroupDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *tes func TestAccVPCSecurityGroupDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/ec2/vpc_security_group_egress_rule.go b/internal/service/ec2/vpc_security_group_egress_rule.go index 78ddd3798d76..be44e0d5c027 100644 --- a/internal/service/ec2/vpc_security_group_egress_rule.go +++ b/internal/service/ec2/vpc_security_group_egress_rule.go @@ -15,7 +15,10 @@ import ( // @FrameworkResource("aws_vpc_security_group_egress_rule", name="Security Group Egress Rule") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;types.SecurityGroupRule") +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.SecurityGroupRule") +// @Testing(idAttrDuplicates="security_group_rule_id") +// @Testing(preIdentityVersion="v6.12.0") func newSecurityGroupEgressRuleResource(context.Context) (resource.ResourceWithConfigure, error) { r := &securityGroupEgressRuleResource{} r.securityGroupRule = r diff --git a/internal/service/ec2/vpc_security_group_egress_rule_identity_gen_test.go b/internal/service/ec2/vpc_security_group_egress_rule_identity_gen_test.go new file mode 100644 index 000000000000..2cdf51e07c77 --- /dev/null +++ b/internal/service/ec2/vpc_security_group_egress_rule_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCSecurityGroupEgressRule_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupRule + resourceName := "aws_vpc_security_group_egress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupEgressRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("security_group_rule_id"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("security_group_rule_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("security_group_rule_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCSecurityGroupEgressRule_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_vpc_security_group_egress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("security_group_rule_id"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.12.0 +func TestAccVPCSecurityGroupEgressRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupRule + resourceName := "aws_vpc_security_group_egress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic_v6.12.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupEgressRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.12.0 +func TestAccVPCSecurityGroupEgressRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupRule + resourceName := "aws_vpc_security_group_egress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic_v6.12.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupEgressRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupEgressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_security_group_egress_rule_tags_gen_test.go b/internal/service/ec2/vpc_security_group_egress_rule_tags_gen_test.go index d2d229fbef95..d25fcd085f7e 100644 --- a/internal/service/ec2/vpc_security_group_egress_rule_tags_gen_test.go +++ b/internal/service/ec2/vpc_security_group_egress_rule_tags_gen_test.go @@ -5,9 +5,8 @@ package ec2_test import ( "testing" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccVPCSecurityGroupEgressRule_tags(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccVPCSecurityGroupEgressRule_tags(t *testing.T) { func TestAccVPCSecurityGroupEgressRule_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_null(t *testing.T) { func TestAccVPCSecurityGroupEgressRule_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_EmptyMap(t *testing.T) { func TestAccVPCSecurityGroupEgressRule_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_AddOnUpdate(t *testing.T) { func TestAccVPCSecurityGroupEgressRule_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCSecurityGroupEgressRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) func TestAccVPCSecurityGroupEgressRule_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_EmptyTag_OnUpdate_Replace(t *testing func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_providerOnly(t *testing. func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_nonOverlapping(t *testin func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_overlapping(t *testing.T func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_updateToProviderOnly(t * func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_updateToResourceOnly(t * func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_emptyResourceTag(t *test func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_emptyProviderOnlyTag(t * func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_nullOverlappingResourceT func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_DefaultTags_nullNonOverlappingResour func TestAccVPCSecurityGroupEgressRule_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccVPCSecurityGroupEgressRule_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_ComputedTag_OnUpdate_Add(t *testing. func TestAccVPCSecurityGroupEgressRule_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_ComputedTag_OnUpdate_Replace(t *test func TestAccVPCSecurityGroupEgressRule_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccVPCSecurityGroupEgressRule_tags_IgnoreTags_Overlap_DefaultTag(t *tes func TestAccVPCSecurityGroupEgressRule_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_egress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupEgressRuleDestroy(ctx), diff --git a/internal/service/ec2/vpc_security_group_identity_gen_test.go b/internal/service/ec2/vpc_security_group_identity_gen_test.go new file mode 100644 index 000000000000..400398c9db51 --- /dev/null +++ b/internal/service/ec2/vpc_security_group_identity_gen_test.go @@ -0,0 +1,315 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCSecurityGroup_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroup + resourceName := "aws_security_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "revoke_rules_on_delete", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCSecurityGroup_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_security_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "revoke_rules_on_delete", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccVPCSecurityGroup_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroup + resourceName := "aws_security_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccVPCSecurityGroup_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroup + resourceName := "aws_security_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_security_group_ingress_rule.go b/internal/service/ec2/vpc_security_group_ingress_rule.go index ab2b26ec5cbd..9832f31ab1f1 100644 --- a/internal/service/ec2/vpc_security_group_ingress_rule.go +++ b/internal/service/ec2/vpc_security_group_ingress_rule.go @@ -43,7 +43,10 @@ import ( // @FrameworkResource("aws_vpc_security_group_ingress_rule", name="Security Group Ingress Rule") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;types.SecurityGroupRule") +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.SecurityGroupRule") +// @Testing(idAttrDuplicates="security_group_rule_id") +// @Testing(preIdentityVersion="v6.12.0") func newSecurityGroupIngressRuleResource(context.Context) (resource.ResourceWithConfigure, error) { r := &securityGroupIngressRuleResource{} r.securityGroupRule = r @@ -168,7 +171,7 @@ type securityGroupRule interface { type securityGroupRuleResource struct { securityGroupRule framework.ResourceWithModel[securityGroupRuleResourceModel] - framework.WithImportByID + framework.WithImportByIdentity } func (r *securityGroupRuleResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { diff --git a/internal/service/ec2/vpc_security_group_ingress_rule_identity_gen_test.go b/internal/service/ec2/vpc_security_group_ingress_rule_identity_gen_test.go new file mode 100644 index 000000000000..dd82b6d841f6 --- /dev/null +++ b/internal/service/ec2/vpc_security_group_ingress_rule_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCSecurityGroupIngressRule_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupRule + resourceName := "aws_vpc_security_group_ingress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupIngressRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("security_group_rule_id"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("security_group_rule_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("security_group_rule_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCSecurityGroupIngressRule_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_vpc_security_group_ingress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("security_group_rule_id"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.12.0 +func TestAccVPCSecurityGroupIngressRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupRule + resourceName := "aws_vpc_security_group_ingress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic_v6.12.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupIngressRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.12.0 +func TestAccVPCSecurityGroupIngressRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupRule + resourceName := "aws_vpc_security_group_ingress_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic_v6.12.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupIngressRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupIngressRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_security_group_ingress_rule_tags_gen_test.go b/internal/service/ec2/vpc_security_group_ingress_rule_tags_gen_test.go index 4e16c3ed2982..db84211e097d 100644 --- a/internal/service/ec2/vpc_security_group_ingress_rule_tags_gen_test.go +++ b/internal/service/ec2/vpc_security_group_ingress_rule_tags_gen_test.go @@ -5,9 +5,8 @@ package ec2_test import ( "testing" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccVPCSecurityGroupIngressRule_tags(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccVPCSecurityGroupIngressRule_tags(t *testing.T) { func TestAccVPCSecurityGroupIngressRule_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_null(t *testing.T) { func TestAccVPCSecurityGroupIngressRule_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_EmptyMap(t *testing.T) { func TestAccVPCSecurityGroupIngressRule_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_AddOnUpdate(t *testing.T) { func TestAccVPCSecurityGroupIngressRule_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCSecurityGroupIngressRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) func TestAccVPCSecurityGroupIngressRule_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_EmptyTag_OnUpdate_Replace(t *testin func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_providerOnly(t *testing func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_nonOverlapping(t *testi func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_overlapping(t *testing. func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_updateToProviderOnly(t func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_updateToResourceOnly(t func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_emptyResourceTag(t *tes func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_emptyProviderOnlyTag(t func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_nullOverlappingResource func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_DefaultTags_nullNonOverlappingResou func TestAccVPCSecurityGroupIngressRule_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_ComputedTag_OnCreate(t *testing.T) func TestAccVPCSecurityGroupIngressRule_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_ComputedTag_OnUpdate_Add(t *testing func TestAccVPCSecurityGroupIngressRule_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_ComputedTag_OnUpdate_Replace(t *tes func TestAccVPCSecurityGroupIngressRule_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccVPCSecurityGroupIngressRule_tags_IgnoreTags_Overlap_DefaultTag(t *te func TestAccVPCSecurityGroupIngressRule_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroupRule + + var v awstypes.SecurityGroupRule resourceName := "aws_vpc_security_group_ingress_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupIngressRuleDestroy(ctx), diff --git a/internal/service/ec2/vpc_security_group_rule.go b/internal/service/ec2/vpc_security_group_rule.go index ee8269fe24eb..a84ec918a3cb 100644 --- a/internal/service/ec2/vpc_security_group_rule.go +++ b/internal/service/ec2/vpc_security_group_rule.go @@ -227,7 +227,7 @@ information and instructions for recovery. Error: %s`, securityGroupID, err) return sdkdiag.AppendErrorf(diags, "authorizing Security Group (%s) Rule (%s): %s", securityGroupID, id, err) } - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { sg, err := findSecurityGroupByID(ctx, conn, securityGroupID) if err != nil { diff --git a/internal/service/ec2/vpc_security_group_tags_gen_test.go b/internal/service/ec2/vpc_security_group_tags_gen_test.go index ac69432d495c..8a43af9fee18 100644 --- a/internal/service/ec2/vpc_security_group_tags_gen_test.go +++ b/internal/service/ec2/vpc_security_group_tags_gen_test.go @@ -5,9 +5,8 @@ package ec2_test import ( "testing" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccVPCSecurityGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccVPCSecurityGroup_tags(t *testing.T) { func TestAccVPCSecurityGroup_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccVPCSecurityGroup_tags_null(t *testing.T) { func TestAccVPCSecurityGroup_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccVPCSecurityGroup_tags_EmptyMap(t *testing.T) { func TestAccVPCSecurityGroup_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccVPCSecurityGroup_tags_AddOnUpdate(t *testing.T) { func TestAccVPCSecurityGroup_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccVPCSecurityGroup_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCSecurityGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccVPCSecurityGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccVPCSecurityGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccVPCSecurityGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccVPCSecurityGroup_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccVPCSecurityGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccVPCSecurityGroup_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_overlapping(t *testing.T) { func TestAccVPCSecurityGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccVPCSecurityGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccVPCSecurityGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccVPCSecurityGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccVPCSecurityGroup_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccVPCSecurityGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccVPCSecurityGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccVPCSecurityGroup_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccVPCSecurityGroup_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccVPCSecurityGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccVPCSecurityGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccVPCSecurityGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccVPCSecurityGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccVPCSecurityGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccVPCSecurityGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccVPCSecurityGroup_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.SecurityGroup + + var v awstypes.SecurityGroup resourceName := "aws_security_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSecurityGroupDestroy(ctx), diff --git a/internal/service/ec2/vpc_security_group_test.go b/internal/service/ec2/vpc_security_group_test.go index 7f8ef93755fd..cf36fb337cc6 100644 --- a/internal/service/ec2/vpc_security_group_test.go +++ b/internal/service/ec2/vpc_security_group_test.go @@ -1137,7 +1137,7 @@ func TestAccVPCSecurityGroup_allowAll(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete", "ingress"}, }, }, }) @@ -1193,7 +1193,7 @@ func TestAccVPCSecurityGroup_ipRangeAndSecurityGroupWithSameRules(t *testing.T) ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete", "ingress"}, }, }, }) @@ -1221,7 +1221,7 @@ func TestAccVPCSecurityGroup_ipRangesWithSameRules(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete", "ingress"}, }, }, }) @@ -2862,7 +2862,7 @@ func testAccCheckSecurityGroupRuleLimit(n string, v *int) resource.TestCheckFunc limit, err := strconv.Atoi(rs.Primary.Attributes[names.AttrValue]) if err != nil { - return fmt.Errorf("converting value to int: %s", err) + return fmt.Errorf("converting value to int: %w", err) } *v = limit diff --git a/internal/service/ec2/vpc_security_group_vpc_association.go b/internal/service/ec2/vpc_security_group_vpc_association.go index 420f8fb8af1e..1895aa1541cc 100644 --- a/internal/service/ec2/vpc_security_group_vpc_association.go +++ b/internal/service/ec2/vpc_security_group_vpc_association.go @@ -35,6 +35,7 @@ import ( // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.SecurityGroupVpcAssociation") // @Testing(importStateIdFunc=testAccSecurityGroupVPCAssociationImportStateIDFunc) // @Testing(importStateIdAttribute="vpc_id") +// @Testing(preIdentityVersion="6.0.0") func newSecurityGroupVPCAssociationResource(context.Context) (resource.ResourceWithConfigure, error) { r := &securityGroupVPCAssociationResource{} diff --git a/internal/service/ec2/vpc_security_group_vpc_association_identity_gen_test.go b/internal/service/ec2/vpc_security_group_vpc_association_identity_gen_test.go index 5ab88f633557..1ffc7cf4dc87 100644 --- a/internal/service/ec2/vpc_security_group_vpc_association_identity_gen_test.go +++ b/internal/service/ec2/vpc_security_group_vpc_association_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccVPCSecurityGroupVPCAssociation_Identity_Basic(t *testing.T) { resourceName := "aws_vpc_security_group_vpc_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -117,7 +118,7 @@ func TestAccVPCSecurityGroupVPCAssociation_Identity_RegionOverride(t *testing.T) resourceName := "aws_vpc_security_group_vpc_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -202,3 +203,121 @@ func TestAccVPCSecurityGroupVPCAssociation_Identity_RegionOverride(t *testing.T) }, }) } + +// Resource Identity was added after v6.0.0 +func TestAccVPCSecurityGroupVPCAssociation_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupVpcAssociation + resourceName := "aws_vpc_security_group_vpc_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupVPCAssociationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupVPCAssociation/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupVPCAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupVPCAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrVPCID: knownvalue.NotNull(), + "security_group_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrVPCID)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("security_group_id")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.0.0 +func TestAccVPCSecurityGroupVPCAssociation_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SecurityGroupVpcAssociation + resourceName := "aws_vpc_security_group_vpc_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSecurityGroupVPCAssociationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupVPCAssociation/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecurityGroupVPCAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecurityGroupVPCAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_security_group_vpc_association_test.go b/internal/service/ec2/vpc_security_group_vpc_association_test.go index 6e0114eca424..e68c11516062 100644 --- a/internal/service/ec2/vpc_security_group_vpc_association_test.go +++ b/internal/service/ec2/vpc_security_group_vpc_association_test.go @@ -11,15 +11,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -143,66 +136,6 @@ func TestAccVPCSecurityGroupVPCAssociation_disappears_VPC(t *testing.T) { }) } -// Resource Identity was added in v6.1 -func TestAccVPCSecurityGroupVPCAssociation_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - - var v awstypes.SecurityGroupVpcAssociation - resourceName := "aws_vpc_security_group_vpc_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), - CheckDestroy: testAccCheckSecurityGroupVPCAssociationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccVPCSecurityGroupVPCAssociationConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckSecurityGroupVPCAssociationExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccVPCSecurityGroupVPCAssociationConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckSecurityGroupVPCAssociationExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrVPCID: knownvalue.NotNull(), - "security_group_id": knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrVPCID)), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("security_group_id")), - }, - }, - }, - }) -} - func testAccCheckSecurityGroupVPCAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) diff --git a/internal/service/ec2/vpc_security_groups_data_source.go b/internal/service/ec2/vpc_security_groups_data_source.go index 96186c844ed4..4e8e27cf4020 100644 --- a/internal/service/ec2/vpc_security_groups_data_source.go +++ b/internal/service/ec2/vpc_security_groups_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -52,8 +50,8 @@ func dataSourceSecurityGroups() *schema.Resource { func dataSourceSecurityGroupsRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeSecurityGroupsInput{} @@ -78,19 +76,13 @@ func dataSourceSecurityGroupsRead(ctx context.Context, d *schema.ResourceData, m var arns, securityGroupIDs, vpcIDs []string for _, v := range output { - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: aws.ToString(v.OwnerId), - Resource: fmt.Sprintf("security-group/%s", aws.ToString(v.GroupId)), - }.String() - arns = append(arns, arn) - securityGroupIDs = append(securityGroupIDs, aws.ToString(v.GroupId)) + ownerID, sgID := aws.ToString(v.OwnerId), aws.ToString(v.GroupId) + arns = append(arns, securityGroupARN(ctx, c, ownerID, sgID)) + securityGroupIDs = append(securityGroupIDs, sgID) vpcIDs = append(vpcIDs, aws.ToString(v.VpcId)) } - d.SetId(meta.(*conns.AWSClient).Region(ctx)) + d.SetId(c.Region(ctx)) d.Set(names.AttrARNs, arns) d.Set(names.AttrIDs, securityGroupIDs) d.Set("vpc_ids", vpcIDs) diff --git a/internal/service/ec2/vpc_subnet.go b/internal/service/ec2/vpc_subnet.go index 82c33b50df9b..e088d1036e3d 100644 --- a/internal/service/ec2/vpc_subnet.go +++ b/internal/service/ec2/vpc_subnet.go @@ -13,24 +13,38 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + fdiag "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + itypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" + "go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws" + "go.opentelemetry.io/otel/attribute" ) // @SDKResource("aws_subnet", name="Subnet") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;types.Subnet") +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ec2/types;awstypes;awstypes.Subnet") // @Testing(generator=false) +// @Testing(preIdentityVersion="v6.8.0") func resourceSubnet() *schema.Resource { //lintignore:R011 return &schema.Resource{ @@ -38,9 +52,6 @@ func resourceSubnet() *schema.Resource { ReadWithoutTimeout: resourceSubnetRead, UpdateWithoutTimeout: resourceSubnetUpdate, DeleteWithoutTimeout: resourceSubnetDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), @@ -159,6 +170,14 @@ func resourceSubnet() *schema.Resource { } } +// @SDKListResource("aws_subnet") +func subnetResourceAsListResource() itypes.ListResourceForSDK { + l := subnetListResource{} + l.SetResourceSchema(resourceSubnet()) + + return &l +} + func resourceSubnetCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) @@ -231,7 +250,7 @@ func resourceSubnetRead(ctx context.Context, d *schema.ResourceData, meta any) d var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + subnet, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.Subnet, error) { return findSubnetByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -245,46 +264,7 @@ func resourceSubnetRead(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendErrorf(diags, "reading EC2 Subnet (%s): %s", d.Id(), err) } - subnet := outputRaw.(*awstypes.Subnet) - - d.Set(names.AttrARN, subnet.SubnetArn) - d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) - d.Set(names.AttrAvailabilityZone, subnet.AvailabilityZone) - d.Set("availability_zone_id", subnet.AvailabilityZoneId) - d.Set(names.AttrCIDRBlock, subnet.CidrBlock) - d.Set("customer_owned_ipv4_pool", subnet.CustomerOwnedIpv4Pool) - d.Set("enable_dns64", subnet.EnableDns64) - d.Set("enable_lni_at_device_index", subnet.EnableLniAtDeviceIndex) - d.Set("ipv6_native", subnet.Ipv6Native) - d.Set("map_customer_owned_ip_on_launch", subnet.MapCustomerOwnedIpOnLaunch) - d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) - d.Set("outpost_arn", subnet.OutpostArn) - d.Set(names.AttrOwnerID, subnet.OwnerId) - d.Set(names.AttrVPCID, subnet.VpcId) - - // Make sure those values are set, if an IPv6 block exists it'll be set in the loop. - d.Set("ipv6_cidr_block_association_id", nil) - d.Set("ipv6_cidr_block", nil) - - for _, v := range subnet.Ipv6CidrBlockAssociationSet { - if v.Ipv6CidrBlockState.State == awstypes.SubnetCidrBlockStateCodeAssociated { //we can only ever have 1 IPv6 block associated at once - d.Set("ipv6_cidr_block_association_id", v.AssociationId) - d.Set("ipv6_cidr_block", v.Ipv6CidrBlock) - break - } - } - - if subnet.PrivateDnsNameOptionsOnLaunch != nil { - d.Set("enable_resource_name_dns_aaaa_record_on_launch", subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsAAAARecord) - d.Set("enable_resource_name_dns_a_record_on_launch", subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsARecord) - d.Set("private_dns_hostname_type_on_launch", subnet.PrivateDnsNameOptionsOnLaunch.HostnameType) - } else { - d.Set("enable_resource_name_dns_aaaa_record_on_launch", nil) - d.Set("enable_resource_name_dns_a_record_on_launch", nil) - d.Set("private_dns_hostname_type_on_launch", nil) - } - - setTagsOut(ctx, subnet.Tags) + resourceSubnetFlatten(ctx, subnet, d) return diags } @@ -375,7 +355,7 @@ func resourceSubnetDelete(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "deleting ENIs for EC2 Subnet (%s): %s", d.Id(), err) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteSubnet(ctx, &ec2.DeleteSubnetInput{ SubnetId: aws.String(d.Id()), }) @@ -684,3 +664,201 @@ func modifySubnetPrivateDNSHostnameTypeOnLaunch(ctx context.Context, conn *ec2.C return nil } + +func resourceSubnetFlatten(ctx context.Context, subnet *awstypes.Subnet, rd *schema.ResourceData) { + rd.Set(names.AttrARN, subnet.SubnetArn) + rd.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) + rd.Set(names.AttrAvailabilityZone, subnet.AvailabilityZone) + rd.Set("availability_zone_id", subnet.AvailabilityZoneId) + rd.Set(names.AttrCIDRBlock, subnet.CidrBlock) + rd.Set("customer_owned_ipv4_pool", subnet.CustomerOwnedIpv4Pool) + rd.Set("enable_dns64", subnet.EnableDns64) + rd.Set("enable_lni_at_device_index", subnet.EnableLniAtDeviceIndex) + rd.Set("ipv6_native", subnet.Ipv6Native) + rd.Set("map_customer_owned_ip_on_launch", subnet.MapCustomerOwnedIpOnLaunch) + rd.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) + rd.Set("outpost_arn", subnet.OutpostArn) + rd.Set(names.AttrOwnerID, subnet.OwnerId) + rd.Set(names.AttrVPCID, subnet.VpcId) + + // Make sure those values are set, if an IPv6 block exists it'll be set in the loop. + rd.Set("ipv6_cidr_block_association_id", nil) + rd.Set("ipv6_cidr_block", nil) + + for _, v := range subnet.Ipv6CidrBlockAssociationSet { + if v.Ipv6CidrBlockState.State == awstypes.SubnetCidrBlockStateCodeAssociated { //we can only ever have 1 IPv6 block associated at once + rd.Set("ipv6_cidr_block_association_id", v.AssociationId) + rd.Set("ipv6_cidr_block", v.Ipv6CidrBlock) + break + } + } + + if subnet.PrivateDnsNameOptionsOnLaunch != nil { + rd.Set("enable_resource_name_dns_aaaa_record_on_launch", subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsAAAARecord) + rd.Set("enable_resource_name_dns_a_record_on_launch", subnet.PrivateDnsNameOptionsOnLaunch.EnableResourceNameDnsARecord) + rd.Set("private_dns_hostname_type_on_launch", subnet.PrivateDnsNameOptionsOnLaunch.HostnameType) + } else { + rd.Set("enable_resource_name_dns_aaaa_record_on_launch", nil) + rd.Set("enable_resource_name_dns_a_record_on_launch", nil) + rd.Set("private_dns_hostname_type_on_launch", nil) + } + + setTagsOut(ctx, subnet.Tags) +} + +var _ list.ListResourceWithRawV5Schemas = &subnetListResource{} + +type subnetListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type subnetListResourceModel struct { + framework.WithRegionModel + SubnetIDs fwtypes.ListValueOf[types.String] `tfsdk:"subnet_ids"` + Filters customListFilters `tfsdk:"filter"` +} + +func (l *subnetListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{ + names.AttrSubnetIDs: listschema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Optional: true, + }, + }, + Blocks: map[string]listschema.Block{ + names.AttrFilter: listschema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customListFilterModel](ctx), + NestedObject: listschema.NestedBlockObject{ + Attributes: map[string]listschema.Attribute{ + names.AttrName: listschema.StringAttribute{ + Required: true, + Validators: []validator.String{ + notDefaultForAZValidator{}, + }, + }, + names.AttrValues: listschema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Required: true, + }, + }, + }, + }, + }, + } +} + +var _ validator.String = notDefaultForAZValidator{} + +type notDefaultForAZValidator struct{} + +func (v notDefaultForAZValidator) Description(ctx context.Context) string { + return v.MarkdownDescription(ctx) +} + +func (v notDefaultForAZValidator) MarkdownDescription(_ context.Context) string { + return "" +} + +func (v notDefaultForAZValidator) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + value := request.ConfigValue + + if value.ValueString() == "default-for-az" { + response.Diagnostics.Append(fdiag.NewAttributeErrorDiagnostic( + request.Path, + "Invalid Attribute Value", + `The filter "default-for-az" is not supported. To list default Subnets, use the resource type "aws_default_subnet".`, + )) + } +} + +func (l *subnetListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.EC2Client(ctx) + + attributes := []attribute.KeyValue{ + otelaws.RegionAttr(awsClient.Region(ctx)), + } + for _, attribute := range attributes { + ctx = tflog.SetField(ctx, string(attribute.Key), attribute.Value.AsInterface()) + } + + var query subnetListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + var input ec2.DescribeSubnetsInput + if diags := fwflex.Expand(ctx, query, &input); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + + input.Filters = append(input.Filters, awstypes.Filter{ + Name: aws.String("default-for-az"), + Values: []string{"false"}, + }) + + tflog.Info(ctx, "Listing resources") + + stream.Results = func(yield func(list.ListResult) bool) { + pages := ec2.NewDescribeSubnetsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + result := fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + for _, subnet := range page.Subnets { + ctx := tflog.SetField(ctx, logging.ResourceAttributeKey(names.AttrID), aws.ToString(subnet.SubnetId)) + + result := request.NewListResult(ctx) + + tags := keyValueTags(ctx, subnet.Tags) + + rd := l.ResourceData() + rd.SetId(aws.ToString(subnet.SubnetId)) + + tflog.Info(ctx, "Reading resource") + resourceSubnetFlatten(ctx, &subnet, rd) + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + if v, ok := tags["Name"]; ok { + result.DisplayName = fmt.Sprintf("%s (%s)", v.ValueString(), aws.ToString(subnet.SubnetId)) + } else { + result.DisplayName = aws.ToString(subnet.SubnetId) + } + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return + } + } + } + } +} diff --git a/internal/service/ec2/vpc_subnet_data_source_tags_gen_test.go b/internal/service/ec2/vpc_subnet_data_source_tags_gen_test.go index 0e5d6ce629ff..eb12479efeb2 100644 --- a/internal/service/ec2/vpc_subnet_data_source_tags_gen_test.go +++ b/internal/service/ec2/vpc_subnet_data_source_tags_gen_test.go @@ -21,9 +21,10 @@ import ( func TestAccVPCSubnetDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -47,9 +48,10 @@ func TestAccVPCSubnetDataSource_tags(t *testing.T) { func TestAccVPCSubnetDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,9 +71,10 @@ func TestAccVPCSubnetDataSource_tags_NullMap(t *testing.T) { func TestAccVPCSubnetDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -91,9 +94,10 @@ func TestAccVPCSubnetDataSource_tags_EmptyMap(t *testing.T) { func TestAccVPCSubnetDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -121,9 +125,10 @@ func TestAccVPCSubnetDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccVPCSubnetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ @@ -157,9 +162,10 @@ func TestAccVPCSubnetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccVPCSubnetDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/ec2/vpc_subnet_identity_gen_test.go b/internal/service/ec2/vpc_subnet_identity_gen_test.go new file mode 100644 index 000000000000..b501fcf4259e --- /dev/null +++ b/internal/service/ec2/vpc_subnet_identity_gen_test.go @@ -0,0 +1,284 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ec2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCSubnet_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Subnet + resourceName := "aws_subnet.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSubnetDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSubnetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccVPCSubnet_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_subnet.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccVPCSubnet_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Subnet + resourceName := "aws_subnet.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSubnetDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic_v6.8.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSubnetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccVPCSubnet_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Subnet + resourceName := "aws_subnet.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckSubnetDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic_v6.8.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSubnetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_subnet_list_test.go b/internal/service/ec2/vpc_subnet_list_test.go new file mode 100644 index 000000000000..17ee7fd45048 --- /dev/null +++ b/internal/service/ec2/vpc_subnet_list_test.go @@ -0,0 +1,465 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "testing" + + "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccVPCSubnet_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_subnet.test[0]" + resourceName2 := "aws_subnet.test[1]" + resourceName3 := "aws_subnet.test[2]" + + id1 := tfstatecheck.StateValue() + id2 := tfstatecheck.StateValue() + id3 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_basic/"), + ConfigStateChecks: []statecheck.StateCheck{ + id1.GetStateValue(resourceName1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + id2.GetStateValue(resourceName2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + id3.GetStateValue(resourceName3, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_basic/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id1.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id2.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id3.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPCSubnet_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_subnet.test[0]" + resourceName2 := "aws_subnet.test[1]" + resourceName3 := "aws_subnet.test[2]" + + id1 := tfstatecheck.StateValue() + id2 := tfstatecheck.StateValue() + id3 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + id1.GetStateValue(resourceName1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + id2.GetStateValue(resourceName2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + id3.GetStateValue(resourceName3, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: id1.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: id2.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: id3.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPCSubnet_List_Filtered(t *testing.T) { + ctx := acctest.Context(t) + + resourceNameExpected1 := "aws_subnet.expected[0]" + resourceNameExpected2 := "aws_subnet.expected[1]" + resourceNameNotExpected1 := "aws_subnet.not_expected[0]" + resourceNameNotExpected2 := "aws_subnet.not_expected[1]" + + expected1 := tfstatecheck.StateValue() + expected2 := tfstatecheck.StateValue() + notExpected1 := tfstatecheck.StateValue() + notExpected2 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered/"), + ConfigStateChecks: []statecheck.StateCheck{ + expected1.GetStateValue(resourceNameExpected1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected1, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + expected2.GetStateValue(resourceNameExpected2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected2, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + notExpected1.GetStateValue(resourceNameNotExpected1, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected1, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + + notExpected2.GetStateValue(resourceNameNotExpected2, tfjsonpath.New(names.AttrID)), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected2, tfjsonpath.New(names.AttrARN), "ec2", "subnet/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected1.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected2.Value(), + }), + + querycheck.ExpectNoIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected1.Value(), + }), + + querycheck.ExpectNoIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected2.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPCSubnet_List_ExcludeDefaultSubnets(t *testing.T) { + ctx := acctest.Context(t) + + id := tfstatecheck.StateValue() + defaultSubnetID0 := tfstatecheck.StateValue() + defaultSubnetID1 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDefaultSubnetExists(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_exclude_default/"), + ConfigStateChecks: []statecheck.StateCheck{ + id.GetStateValue("aws_subnet.test", tfjsonpath.New(names.AttrID)), + + defaultSubnetID0.GetStateValue("data.aws_subnets.defaults", tfjsonpath.New(names.AttrIDs).AtSliceIndex(0)), + defaultSubnetID1.GetStateValue("data.aws_subnets.defaults", tfjsonpath.New(names.AttrIDs).AtSliceIndex(1)), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id.Value(), + }), + + querycheck.ExpectNoIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: defaultSubnetID0.Value(), + }), + querycheck.ExpectNoIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: defaultSubnetID1.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPCSubnet_List_SubnetIDs(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_subnet.test[0]" + resourceName2 := "aws_subnet.test[1]" + resourceName3 := "aws_subnet.test[2]" + resourceName4 := "aws_subnet.test[3]" + + id1 := tfstatecheck.StateValue() + id2 := tfstatecheck.StateValue() + id3 := tfstatecheck.StateValue() + id4 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_subnet_ids/"), + ConfigStateChecks: []statecheck.StateCheck{ + id1.GetStateValue(resourceName1, tfjsonpath.New(names.AttrID)), + id2.GetStateValue(resourceName2, tfjsonpath.New(names.AttrID)), + id3.GetStateValue(resourceName3, tfjsonpath.New(names.AttrID)), + id4.GetStateValue(resourceName4, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_subnet_ids/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectLength("aws_subnet.test", 4), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id1.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id2.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id3.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: id4.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPCSubnet_List_FilteredSubnetIDs(t *testing.T) { + ctx := acctest.Context(t) + + resourceNameExpected1 := "aws_subnet.expected[0]" + resourceNameExpected2 := "aws_subnet.expected[1]" + resourceNameNotExpected1 := "aws_subnet.not_expected[0]" + resourceNameNotExpected2 := "aws_subnet.not_expected[1]" + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + expected1 := tfstatecheck.StateValue() + expected2 := tfstatecheck.StateValue() + notExpected1 := tfstatecheck.StateValue() + notExpected2 := tfstatecheck.StateValue() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered_subnet_ids/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + expected1.GetStateValue(resourceNameExpected1, tfjsonpath.New(names.AttrID)), + expected2.GetStateValue(resourceNameExpected2, tfjsonpath.New(names.AttrID)), + notExpected1.GetStateValue(resourceNameNotExpected1, tfjsonpath.New(names.AttrID)), + notExpected2.GetStateValue(resourceNameNotExpected2, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered_subnet_ids/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected1.Value(), + }), + + querycheck.ExpectIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: expected2.Value(), + }), + + querycheck.ExpectNoIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected1.Value(), + }), + + querycheck.ExpectNoIdentity("aws_subnet.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: notExpected2.Value(), + }), + }, + }, + }, + }) +} + +func TestAccVPCSubnet_List_Filtered_DefaultForAZ(t *testing.T) { + t.Skip("Skipping because ExpectError is not currently supported for Query mode") + + ctx := acctest.Context(t) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckVPCDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered_default_for_az/"), + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Subnet/list_filtered_default_for_az/"), + ExpectError: regexache.MustCompile(`The filter "default-for-az" is not supported. To list default Subnets, use the resource type "aws_default_subnet".`), + }, + }, + }) +} diff --git a/internal/service/ec2/vpc_subnet_tags_gen_test.go b/internal/service/ec2/vpc_subnet_tags_gen_test.go index 0e79bfa4c433..9d841dce2ce6 100644 --- a/internal/service/ec2/vpc_subnet_tags_gen_test.go +++ b/internal/service/ec2/vpc_subnet_tags_gen_test.go @@ -5,7 +5,7 @@ package ec2_test import ( "testing" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-testing/config" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" @@ -18,10 +18,11 @@ import ( func TestAccVPCSubnet_tags(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -191,10 +192,11 @@ func TestAccVPCSubnet_tags(t *testing.T) { func TestAccVPCSubnet_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -254,10 +256,11 @@ func TestAccVPCSubnet_tags_null(t *testing.T) { func TestAccVPCSubnet_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -313,10 +316,11 @@ func TestAccVPCSubnet_tags_EmptyMap(t *testing.T) { func TestAccVPCSubnet_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -390,10 +394,11 @@ func TestAccVPCSubnet_tags_AddOnUpdate(t *testing.T) { func TestAccVPCSubnet_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -474,10 +479,11 @@ func TestAccVPCSubnet_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCSubnet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -605,10 +611,11 @@ func TestAccVPCSubnet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccVPCSubnet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -690,10 +697,11 @@ func TestAccVPCSubnet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -862,10 +870,11 @@ func TestAccVPCSubnet_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1015,10 +1024,11 @@ func TestAccVPCSubnet_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1184,10 +1194,11 @@ func TestAccVPCSubnet_tags_DefaultTags_overlapping(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1270,10 +1281,11 @@ func TestAccVPCSubnet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1355,10 +1367,11 @@ func TestAccVPCSubnet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1417,10 +1430,11 @@ func TestAccVPCSubnet_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1471,10 +1485,11 @@ func TestAccVPCSubnet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccVPCSubnet_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1530,10 +1545,11 @@ func TestAccVPCSubnet_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) func TestAccVPCSubnet_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1589,10 +1605,11 @@ func TestAccVPCSubnet_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing. func TestAccVPCSubnet_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1641,10 +1658,11 @@ func TestAccVPCSubnet_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccVPCSubnet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1734,10 +1752,11 @@ func TestAccVPCSubnet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccVPCSubnet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1817,10 +1836,11 @@ func TestAccVPCSubnet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccVPCSubnet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), @@ -1975,10 +1995,11 @@ func TestAccVPCSubnet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccVPCSubnet_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v types.Subnet + + var v awstypes.Subnet resourceName := "aws_subnet.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckSubnetDestroy(ctx), diff --git a/internal/service/ec2/vpc_tags_gen_test.go b/internal/service/ec2/vpc_tags_gen_test.go index 6d0c1737b50e..fbe9187a9824 100644 --- a/internal/service/ec2/vpc_tags_gen_test.go +++ b/internal/service/ec2/vpc_tags_gen_test.go @@ -16,12 +16,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccVPCVPC_tags(t *testing.T) { +func TestAccVPC_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -189,12 +190,13 @@ func TestAccVPCVPC_tags(t *testing.T) { }) } -func TestAccVPCVPC_tags_null(t *testing.T) { +func TestAccVPC_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -252,12 +254,13 @@ func TestAccVPCVPC_tags_null(t *testing.T) { }) } -func TestAccVPCVPC_tags_EmptyMap(t *testing.T) { +func TestAccVPC_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -311,12 +314,13 @@ func TestAccVPCVPC_tags_EmptyMap(t *testing.T) { }) } -func TestAccVPCVPC_tags_AddOnUpdate(t *testing.T) { +func TestAccVPC_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -388,12 +392,13 @@ func TestAccVPCVPC_tags_AddOnUpdate(t *testing.T) { }) } -func TestAccVPCVPC_tags_EmptyTag_OnCreate(t *testing.T) { +func TestAccVPC_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -472,12 +477,13 @@ func TestAccVPCVPC_tags_EmptyTag_OnCreate(t *testing.T) { }) } -func TestAccVPCVPC_tags_EmptyTag_OnUpdate_Add(t *testing.T) { +func TestAccVPC_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -603,12 +609,13 @@ func TestAccVPCVPC_tags_EmptyTag_OnUpdate_Add(t *testing.T) { }) } -func TestAccVPCVPC_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { +func TestAccVPC_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -688,12 +695,13 @@ func TestAccVPCVPC_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_providerOnly(t *testing.T) { +func TestAccVPC_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -860,12 +868,13 @@ func TestAccVPCVPC_tags_DefaultTags_providerOnly(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_nonOverlapping(t *testing.T) { +func TestAccVPC_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1013,12 +1022,13 @@ func TestAccVPCVPC_tags_DefaultTags_nonOverlapping(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_overlapping(t *testing.T) { +func TestAccVPC_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1182,12 +1192,13 @@ func TestAccVPCVPC_tags_DefaultTags_overlapping(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_updateToProviderOnly(t *testing.T) { +func TestAccVPC_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1268,12 +1279,13 @@ func TestAccVPCVPC_tags_DefaultTags_updateToProviderOnly(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_updateToResourceOnly(t *testing.T) { +func TestAccVPC_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1353,12 +1365,13 @@ func TestAccVPCVPC_tags_DefaultTags_updateToResourceOnly(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_emptyResourceTag(t *testing.T) { +func TestAccVPC_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1415,12 +1428,13 @@ func TestAccVPCVPC_tags_DefaultTags_emptyResourceTag(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { +func TestAccVPC_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1469,12 +1483,13 @@ func TestAccVPCVPC_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { +func TestAccVPC_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1528,12 +1543,13 @@ func TestAccVPCVPC_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { }) } -func TestAccVPCVPC_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { +func TestAccVPC_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1587,12 +1603,13 @@ func TestAccVPCVPC_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) }) } -func TestAccVPCVPC_tags_ComputedTag_OnCreate(t *testing.T) { +func TestAccVPC_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1639,12 +1656,13 @@ func TestAccVPCVPC_tags_ComputedTag_OnCreate(t *testing.T) { }) } -func TestAccVPCVPC_tags_ComputedTag_OnUpdate_Add(t *testing.T) { +func TestAccVPC_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1732,12 +1750,13 @@ func TestAccVPCVPC_tags_ComputedTag_OnUpdate_Add(t *testing.T) { }) } -func TestAccVPCVPC_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { +func TestAccVPC_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1815,12 +1834,13 @@ func TestAccVPCVPC_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { }) } -func TestAccVPCVPC_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { +func TestAccVPC_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), @@ -1973,12 +1993,13 @@ func TestAccVPCVPC_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { }) } -func TestAccVPCVPC_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { +func TestAccVPC_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Vpc resourceName := "aws_vpc.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), CheckDestroy: testAccCheckVPCDestroy(ctx), diff --git a/internal/service/ec2/vpc_test.go b/internal/service/ec2/vpc_test.go index c964fcd93e2b..1c3acc12a6e2 100644 --- a/internal/service/ec2/vpc_test.go +++ b/internal/service/ec2/vpc_test.go @@ -29,6 +29,22 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +func TestDefaultIPv6CIDRBlockAssociation(t *testing.T) { + t.Parallel() + + vpc := awstypes.Vpc{ + Ipv6CidrBlockAssociationSet: []awstypes.VpcIpv6CidrBlockAssociation{ + {AssociationId: aws.String("default_cidr"), Ipv6CidrBlock: aws.String("fd00:1::/64"), Ipv6CidrBlockState: &awstypes.VpcCidrBlockState{State: awstypes.VpcCidrBlockStateCodeAssociated}}, + {AssociationId: aws.String("some_other_cidr"), Ipv6CidrBlock: aws.String("fd00:2::/64"), Ipv6CidrBlockState: &awstypes.VpcCidrBlockState{State: awstypes.VpcCidrBlockStateCodeAssociated}}, + }, + } + if v := tfec2.DefaultIPv6CIDRBlockAssociation(&vpc, ""); v == nil { + t.Errorf("defaultIPv6CIDRBlockAssociation() got nil") + } else if got, want := aws.ToString(v.AssociationId), "default_cidr"; got != want { + t.Errorf("defaultIPv6CIDRBlockAssociation() = %v, want = %v", got, want) + } +} + func TestAccVPC_basic(t *testing.T) { ctx := acctest.Context(t) var vpc awstypes.Vpc @@ -94,6 +110,11 @@ func TestAccVPC_disappears(t *testing.T) { acctest.CheckResourceDisappears(ctx, acctest.Provider, tfec2.ResourceVPC(), resourceName), ), ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, }, }) @@ -736,6 +757,255 @@ func TestAccVPC_upgradeFromV5(t *testing.T) { }) } +func TestAccVPC_upgradeFromV5PlanRefreshFalse(t *testing.T) { + ctx := acctest.Context(t) + var vpc awstypes.Vpc + resourceName := "aws_vpc.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t), + CheckDestroy: testAccCheckVPCDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccVPCConfig_basic, + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrRegion)), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccVPCConfig_basic, + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }) +} + +func TestAccVPC_upgradeFromV5WithUpdatePlanRefreshFalse(t *testing.T) { + ctx := acctest.Context(t) + var vpc awstypes.Vpc + resourceName := "aws_vpc.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t), + CheckDestroy: testAccCheckVPCDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccVPCConfig_tags1(acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrRegion)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccVPCConfig_tags1(acctest.CtKey1, acctest.CtValue1Updated), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + })), + }, + }, + }, + }) +} + +func TestAccVPC_upgradeFromV5WithDefaultRegionRefreshFalse(t *testing.T) { + ctx := acctest.Context(t) + var vpc awstypes.Vpc + resourceName := "aws_vpc.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t), + CheckDestroy: testAccCheckVPCDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccVPCConfig_tags1("Name", rName), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrRegion)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + "Name": knownvalue.StringExact(rName), + })), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccVPCConfig_region(rName, acctest.Region()), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + "Name": knownvalue.StringExact(rName), + })), + }, + }, + }, + }) +} + +func TestAccVPC_upgradeFromV5WithNewRegionRefreshFalse(t *testing.T) { + ctx := acctest.Context(t) + var vpc awstypes.Vpc + resourceName := "aws_vpc.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t), + CheckDestroy: testAccCheckVPCDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccVPCConfig_tags1("Name", rName), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckVPCExists(ctx, resourceName, &vpc), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoValue(resourceName, tfjsonpath.New(names.AttrRegion)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + "Name": knownvalue.StringExact(rName), + })), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccVPCConfig_region(rName, acctest.AlternateRegion()), + // Can't call 'acctest.CheckVPCExists' as the VPC's in the alternate Region. + // Check: resource.ComposeAggregateTestCheckFunc( + // acctest.CheckVPCExists(ctx, resourceName, &vpc), + // ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + "Name": knownvalue.StringExact(rName), + })), + }, + }, + }, + }) +} + func TestAccVPC_regionCreateNull(t *testing.T) { ctx := acctest.Context(t) var vpc awstypes.Vpc diff --git a/internal/service/ec2/vpc_traffic_mirror_filter.go b/internal/service/ec2/vpc_traffic_mirror_filter.go index c4a193677643..7f699c8337a8 100644 --- a/internal/service/ec2/vpc_traffic_mirror_filter.go +++ b/internal/service/ec2/vpc_traffic_mirror_filter.go @@ -8,7 +8,6 @@ import ( "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -105,7 +104,8 @@ func resourceTrafficMirrorFilterCreate(ctx context.Context, d *schema.ResourceDa func resourceTrafficMirrorFilterRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) trafficMirrorFilter, err := findTrafficMirrorFilterByID(ctx, conn, d.Id()) @@ -119,14 +119,7 @@ func resourceTrafficMirrorFilterRead(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "reading EC2 Traffic Mirror Filter (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "ec2", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: "traffic-mirror-filter/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, trafficMirrorFilterARN(ctx, c, d.Id())) d.Set(names.AttrDescription, trafficMirrorFilter.Description) d.Set("network_services", trafficMirrorFilter.NetworkServices) @@ -184,3 +177,7 @@ func resourceTrafficMirrorFilterDelete(ctx context.Context, d *schema.ResourceDa return diags } + +func trafficMirrorFilterARN(ctx context.Context, c *conns.AWSClient, trafficMirrorFilterID string) string { + return c.RegionalARN(ctx, names.EC2, "traffic-mirror-filter/"+trafficMirrorFilterID) +} diff --git a/internal/service/ec2/vpc_traffic_mirror_filter_rule.go b/internal/service/ec2/vpc_traffic_mirror_filter_rule.go index 5926fc0bcae9..f900dd2910fd 100644 --- a/internal/service/ec2/vpc_traffic_mirror_filter_rule.go +++ b/internal/service/ec2/vpc_traffic_mirror_filter_rule.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -165,7 +164,8 @@ func resourceTrafficMirrorFilterRuleCreate(ctx context.Context, d *schema.Resour func resourceTrafficMirrorFilterRuleRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) rule, err := findTrafficMirrorFilterRuleByTwoPartKey(ctx, conn, d.Get("traffic_mirror_filter_id").(string), d.Id()) @@ -179,14 +179,7 @@ func resourceTrafficMirrorFilterRuleRead(ctx context.Context, d *schema.Resource return sdkdiag.AppendErrorf(diags, "reading EC2 Traffic Mirror Filter Rule (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "ec2", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: "traffic-mirror-filter-rule/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, trafficMirrorFilterRuleARN(ctx, c, d.Id())) d.Set(names.AttrDescription, rule.Description) d.Set("destination_cidr_block", rule.DestinationCidrBlock) if rule.DestinationPortRange != nil { @@ -325,6 +318,10 @@ func resourceTrafficMirrorFilterRuleImport(ctx context.Context, d *schema.Resour return []*schema.ResourceData{d}, nil } +func trafficMirrorFilterRuleARN(ctx context.Context, c *conns.AWSClient, trafficMirrorFilterRuleID string) string { + return c.RegionalARN(ctx, names.EC2, "traffic-mirror-filter-rule/"+trafficMirrorFilterRuleID) +} + func expandTrafficMirrorPortRangeRequest(tfMap map[string]any) *awstypes.TrafficMirrorPortRangeRequest { if tfMap == nil { return nil diff --git a/internal/service/ec2/vpc_traffic_mirror_session.go b/internal/service/ec2/vpc_traffic_mirror_session.go index 2257e70a95cb..ed6f3baae7ae 100644 --- a/internal/service/ec2/vpc_traffic_mirror_session.go +++ b/internal/service/ec2/vpc_traffic_mirror_session.go @@ -8,7 +8,6 @@ import ( "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -126,7 +125,8 @@ func resourceTrafficMirrorSessionCreate(ctx context.Context, d *schema.ResourceD func resourceTrafficMirrorSessionRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) session, err := findTrafficMirrorSessionByID(ctx, conn, d.Id()) @@ -141,14 +141,7 @@ func resourceTrafficMirrorSessionRead(ctx context.Context, d *schema.ResourceDat } ownerID := aws.ToString(session.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "ec2", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: "traffic-mirror-session/" + d.Id(), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, trafficMirrorSessionARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrDescription, session.Description) d.Set(names.AttrNetworkInterfaceID, session.NetworkInterfaceId) d.Set(names.AttrOwnerID, ownerID) @@ -244,3 +237,7 @@ func resourceTrafficMirrorSessionDelete(ctx context.Context, d *schema.ResourceD return diags } + +func trafficMirrorSessionARN(ctx context.Context, c *conns.AWSClient, accountID, trafficMirrorSessionID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "traffic-mirror-session/"+trafficMirrorSessionID) +} diff --git a/internal/service/ec2/vpc_traffic_mirror_target.go b/internal/service/ec2/vpc_traffic_mirror_target.go index c39f34040db2..0373400d425a 100644 --- a/internal/service/ec2/vpc_traffic_mirror_target.go +++ b/internal/service/ec2/vpc_traffic_mirror_target.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -127,7 +125,8 @@ func resourceTrafficMirrorTargetCreate(ctx context.Context, d *schema.ResourceDa func resourceTrafficMirrorTargetRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) target, err := findTrafficMirrorTargetByID(ctx, conn, d.Id()) @@ -142,14 +141,7 @@ func resourceTrafficMirrorTargetRead(ctx context.Context, d *schema.ResourceData } ownerID := aws.ToString(target.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("traffic-mirror-target/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, trafficMirrorTargetARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrDescription, target.Description) d.Set("gateway_load_balancer_endpoint_id", target.GatewayLoadBalancerEndpointId) d.Set(names.AttrNetworkInterfaceID, target.NetworkInterfaceId) @@ -189,3 +181,7 @@ func resourceTrafficMirrorTargetDelete(ctx context.Context, d *schema.ResourceDa return diags } + +func trafficMirrorTargetARN(ctx context.Context, c *conns.AWSClient, accountID, trafficMirrorTargetID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "traffic-mirror-target/"+trafficMirrorTargetID) +} diff --git a/internal/service/ec2/vpc_traffic_mirror_target_test.go b/internal/service/ec2/vpc_traffic_mirror_target_test.go index f528c219b1d9..c478603d58b7 100644 --- a/internal/service/ec2/vpc_traffic_mirror_target_test.go +++ b/internal/service/ec2/vpc_traffic_mirror_target_test.go @@ -78,7 +78,7 @@ func TestAccVPCTrafficMirrorTarget_eni(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckTrafficMirrorTargetExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, description), - resource.TestMatchResourceAttr(resourceName, names.AttrNetworkInterfaceID, regexache.MustCompile("eni-.*")), + resource.TestMatchResourceAttr(resourceName, names.AttrNetworkInterfaceID, regexache.MustCompile(`^eni-[0-9a-f]+$`)), ), }, { diff --git a/internal/service/ec2/vpnclient_endpoint.go b/internal/service/ec2/vpnclient_endpoint.go index 60d72ce25d3e..679f48f35945 100644 --- a/internal/service/ec2/vpnclient_endpoint.go +++ b/internal/service/ec2/vpnclient_endpoint.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -87,7 +85,7 @@ func resourceClientVPNEndpoint() *schema.Resource { }, "client_cidr_block": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.IsCIDR, }, @@ -188,6 +186,13 @@ func resourceClientVPNEndpoint() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "endpoint_ip_address_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.EndpointIpAddressType](), + }, names.AttrSecurityGroupIDs: { Type: schema.TypeSet, MinItems: 1, @@ -224,6 +229,13 @@ func resourceClientVPNEndpoint() *schema.Resource { }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "traffic_ip_address_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.TrafficIpAddressType](), + }, "transport_protocol": { Type: schema.TypeString, Optional: true, @@ -254,7 +266,6 @@ func resourceClientVPNEndpointCreate(ctx context.Context, d *schema.ResourceData conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.CreateClientVpnEndpointInput{ - ClientCidrBlock: aws.String(d.Get("client_cidr_block").(string)), ClientToken: aws.String(id.UniqueId()), ServerCertificateArn: aws.String(d.Get("server_certificate_arn").(string)), SplitTunnel: aws.Bool(d.Get("split_tunnel").(bool)), @@ -267,6 +278,10 @@ func resourceClientVPNEndpointCreate(ctx context.Context, d *schema.ResourceData input.AuthenticationOptions = expandClientVPNAuthenticationRequests(v.(*schema.Set).List()) } + if v, ok := d.GetOk("client_cidr_block"); ok { + input.ClientCidrBlock = aws.String(v.(string)) + } + if v, ok := d.GetOk("client_connect_options"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { input.ClientConnectOptions = expandClientConnectOptions(v.([]any)[0].(map[string]any)) } @@ -295,6 +310,10 @@ func resourceClientVPNEndpointCreate(ctx context.Context, d *schema.ResourceData input.DnsServers = flex.ExpandStringValueList(v.([]any)) } + if v, ok := d.GetOk("endpoint_ip_address_type"); ok { + input.EndpointIpAddressType = awstypes.EndpointIpAddressType(v.(string)) + } + if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok { input.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } @@ -307,6 +326,10 @@ func resourceClientVPNEndpointCreate(ctx context.Context, d *schema.ResourceData input.SessionTimeoutHours = aws.Int32(int32(v.(int))) } + if v, ok := d.GetOk("traffic_ip_address_type"); ok { + input.TrafficIpAddressType = awstypes.TrafficIpAddressType(v.(string)) + } + if v, ok := d.GetOk(names.AttrVPCID); ok { input.VpcId = aws.String(v.(string)) } @@ -324,7 +347,8 @@ func resourceClientVPNEndpointCreate(ctx context.Context, d *schema.ResourceData func resourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) ep, err := findClientVPNEndpointByID(ctx, conn, d.Id()) @@ -338,14 +362,7 @@ func resourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading EC2 Client VPN Endpoint (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("client-vpn-endpoint/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, clientVPNEndpointARN(ctx, c, d.Id())) if err := d.Set("authentication_options", flattenClientVPNAuthentications(ep.AuthenticationOptions)); err != nil { return sdkdiag.AppendErrorf(diags, "setting authentication_options: %s", err) } @@ -381,8 +398,9 @@ func resourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData, d.Set(names.AttrDescription, ep.Description) d.Set("disconnect_on_session_timeout", ep.DisconnectOnSessionTimeout) d.Set(names.AttrDNSName, ep.DnsName) - d.Set("dns_servers", aws.StringSlice(ep.DnsServers)) - d.Set(names.AttrSecurityGroupIDs, aws.StringSlice(ep.SecurityGroupIds)) + d.Set("dns_servers", ep.DnsServers) + d.Set("endpoint_ip_address_type", ep.EndpointIpAddressType) + d.Set(names.AttrSecurityGroupIDs, ep.SecurityGroupIds) if aws.ToString(ep.SelfServicePortalUrl) != "" { d.Set("self_service_portal", awstypes.SelfServicePortalEnabled) } else { @@ -392,6 +410,7 @@ func resourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData, d.Set("server_certificate_arn", ep.ServerCertificateArn) d.Set("session_timeout_hours", ep.SessionTimeoutHours) d.Set("split_tunnel", ep.SplitTunnel) + d.Set("traffic_ip_address_type", ep.TrafficIpAddressType) d.Set("transport_protocol", ep.TransportProtocol) d.Set(names.AttrVPCID, ep.VpcId) d.Set("vpn_port", ep.VpnPort) @@ -792,3 +811,7 @@ func flattenClientRouteEnforcementOptions(apiObject *awstypes.ClientRouteEnforce return tfMap } + +func clientVPNEndpointARN(ctx context.Context, c *conns.AWSClient, clientVPNEndpointID string) string { + return c.RegionalARN(ctx, names.EC2, "client-vpn-endpoint/"+clientVPNEndpointID) +} diff --git a/internal/service/ec2/vpnclient_endpoint_data_source.go b/internal/service/ec2/vpnclient_endpoint_data_source.go index be33b58f92a2..4426b0fc7b5f 100644 --- a/internal/service/ec2/vpnclient_endpoint_data_source.go +++ b/internal/service/ec2/vpnclient_endpoint_data_source.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -151,6 +149,10 @@ func dataSourceClientVPNEndpoint() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "endpoint_ip_address_type": { + Type: schema.TypeString, + Computed: true, + }, names.AttrFilter: customFiltersSchema(), names.AttrSecurityGroupIDs: { Type: schema.TypeList, @@ -178,6 +180,10 @@ func dataSourceClientVPNEndpoint() *schema.Resource { Computed: true, }, names.AttrTags: tftags.TagsSchemaComputed(), + "traffic_ip_address_type": { + Type: schema.TypeString, + Computed: true, + }, "transport_protocol": { Type: schema.TypeString, Computed: true, @@ -196,7 +202,8 @@ func dataSourceClientVPNEndpoint() *schema.Resource { func dataSourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := &ec2.DescribeClientVpnEndpointsInput{} @@ -223,14 +230,7 @@ func dataSourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData } d.SetId(aws.ToString(ep.ClientVpnEndpointId)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("client-vpn-endpoint/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, clientVPNEndpointARN(ctx, c, d.Id())) if err := d.Set("authentication_options", flattenClientVPNAuthentications(ep.AuthenticationOptions)); err != nil { return sdkdiag.AppendErrorf(diags, "setting authentication_options: %s", err) } @@ -266,8 +266,9 @@ func dataSourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData } d.Set(names.AttrDescription, ep.Description) d.Set(names.AttrDNSName, ep.DnsName) - d.Set("dns_servers", aws.StringSlice(ep.DnsServers)) - d.Set(names.AttrSecurityGroupIDs, aws.StringSlice(ep.SecurityGroupIds)) + d.Set("dns_servers", ep.DnsServers) + d.Set("endpoint_ip_address_type", ep.EndpointIpAddressType) + d.Set(names.AttrSecurityGroupIDs, ep.SecurityGroupIds) if aws.ToString(ep.SelfServicePortalUrl) != "" { d.Set("self_service_portal", awstypes.SelfServicePortalEnabled) } else { @@ -278,6 +279,7 @@ func dataSourceClientVPNEndpointRead(ctx context.Context, d *schema.ResourceData d.Set("session_timeout_hours", ep.SessionTimeoutHours) d.Set("split_tunnel", ep.SplitTunnel) d.Set("transport_protocol", ep.TransportProtocol) + d.Set("traffic_ip_address_type", ep.TrafficIpAddressType) d.Set(names.AttrVPCID, ep.VpcId) d.Set("vpn_port", ep.VpnPort) diff --git a/internal/service/ec2/vpnclient_endpoint_data_source_test.go b/internal/service/ec2/vpnclient_endpoint_data_source_test.go index 2b3fb0d01e0a..e042caedd8d0 100644 --- a/internal/service/ec2/vpnclient_endpoint_data_source_test.go +++ b/internal/service/ec2/vpnclient_endpoint_data_source_test.go @@ -43,6 +43,7 @@ func testAccClientVPNEndpointDataSource_basic(t *testing.T, semaphore tfsync.Sem resource.TestCheckResourceAttrPair(datasource1Name, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasource1Name, names.AttrDNSName, resourceName, names.AttrDNSName), resource.TestCheckResourceAttrPair(datasource1Name, "dns_servers.#", resourceName, "dns_servers.#"), + resource.TestCheckResourceAttrPair(datasource1Name, "endpoint_ip_address_type", resourceName, "endpoint_ip_address_type"), resource.TestCheckResourceAttrPair(datasource1Name, "security_group_ids.#", resourceName, "security_group_ids.#"), resource.TestCheckResourceAttrPair(datasource1Name, "self_service_portal", resourceName, "self_service_portal"), resource.TestCheckResourceAttrPair(datasource1Name, "self_service_portal_url", resourceName, "self_service_portal_url"), @@ -50,6 +51,7 @@ func testAccClientVPNEndpointDataSource_basic(t *testing.T, semaphore tfsync.Sem resource.TestCheckResourceAttrPair(datasource1Name, "session_timeout_hours", resourceName, "session_timeout_hours"), resource.TestCheckResourceAttrPair(datasource1Name, "split_tunnel", resourceName, "split_tunnel"), resource.TestCheckResourceAttrPair(datasource1Name, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), + resource.TestCheckResourceAttrPair(datasource1Name, "traffic_ip_address_type", resourceName, "traffic_ip_address_type"), resource.TestCheckResourceAttrPair(datasource1Name, "transport_protocol", resourceName, "transport_protocol"), resource.TestCheckResourceAttrPair(datasource1Name, names.AttrVPCID, resourceName, names.AttrVPCID), resource.TestCheckResourceAttrPair(datasource1Name, "vpn_port", resourceName, "vpn_port"), @@ -64,6 +66,7 @@ func testAccClientVPNEndpointDataSource_basic(t *testing.T, semaphore tfsync.Sem resource.TestCheckResourceAttrPair(datasource2Name, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasource2Name, names.AttrDNSName, resourceName, names.AttrDNSName), resource.TestCheckResourceAttrPair(datasource2Name, "dns_servers.#", resourceName, "dns_servers.#"), + resource.TestCheckResourceAttrPair(datasource2Name, "endpoint_ip_address_type", resourceName, "endpoint_ip_address_type"), resource.TestCheckResourceAttrPair(datasource2Name, "security_group_ids.#", resourceName, "security_group_ids.#"), resource.TestCheckResourceAttrPair(datasource2Name, "self_service_portal_url", resourceName, "self_service_portal_url"), resource.TestCheckResourceAttrPair(datasource2Name, "self_service_portal", resourceName, "self_service_portal"), @@ -71,6 +74,7 @@ func testAccClientVPNEndpointDataSource_basic(t *testing.T, semaphore tfsync.Sem resource.TestCheckResourceAttrPair(datasource2Name, "session_timeout_hours", resourceName, "session_timeout_hours"), resource.TestCheckResourceAttrPair(datasource2Name, "split_tunnel", resourceName, "split_tunnel"), resource.TestCheckResourceAttrPair(datasource2Name, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), + resource.TestCheckResourceAttrPair(datasource2Name, "traffic_ip_address_type", resourceName, "traffic_ip_address_type"), resource.TestCheckResourceAttrPair(datasource2Name, "transport_protocol", resourceName, "transport_protocol"), resource.TestCheckResourceAttrPair(datasource2Name, names.AttrVPCID, resourceName, names.AttrVPCID), resource.TestCheckResourceAttrPair(datasource2Name, "vpn_port", resourceName, "vpn_port"), @@ -86,6 +90,7 @@ func testAccClientVPNEndpointDataSource_basic(t *testing.T, semaphore tfsync.Sem resource.TestCheckResourceAttrPair(datasource3Name, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasource3Name, names.AttrDNSName, resourceName, names.AttrDNSName), resource.TestCheckResourceAttrPair(datasource3Name, "dns_servers.#", resourceName, "dns_servers.#"), + resource.TestCheckResourceAttrPair(datasource3Name, "endpoint_ip_address_type", resourceName, "endpoint_ip_address_type"), resource.TestCheckResourceAttrPair(datasource3Name, "security_group_ids.#", resourceName, "security_group_ids.#"), resource.TestCheckResourceAttrPair(datasource2Name, "self_service_portal_url", resourceName, "self_service_portal_url"), resource.TestCheckResourceAttrPair(datasource3Name, "self_service_portal", resourceName, "self_service_portal"), @@ -93,6 +98,7 @@ func testAccClientVPNEndpointDataSource_basic(t *testing.T, semaphore tfsync.Sem resource.TestCheckResourceAttrPair(datasource3Name, "session_timeout_hours", resourceName, "session_timeout_hours"), resource.TestCheckResourceAttrPair(datasource3Name, "split_tunnel", resourceName, "split_tunnel"), resource.TestCheckResourceAttrPair(datasource3Name, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), + resource.TestCheckResourceAttrPair(datasource3Name, "traffic_ip_address_type", resourceName, "traffic_ip_address_type"), resource.TestCheckResourceAttrPair(datasource3Name, "transport_protocol", resourceName, "transport_protocol"), resource.TestCheckResourceAttrPair(datasource3Name, names.AttrVPCID, resourceName, names.AttrVPCID), resource.TestCheckResourceAttrPair(datasource3Name, "vpn_port", resourceName, "vpn_port"), diff --git a/internal/service/ec2/vpnclient_endpoint_test.go b/internal/service/ec2/vpnclient_endpoint_test.go index 1cd7ca15434d..4002344b4ff6 100644 --- a/internal/service/ec2/vpnclient_endpoint_test.go +++ b/internal/service/ec2/vpnclient_endpoint_test.go @@ -12,6 +12,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -59,6 +60,7 @@ func testAccClientVPNEndpoint_basic(t *testing.T, semaphore tfsync.Semaphore) { resource.TestCheckResourceAttrSet(resourceName, "disconnect_on_session_timeout"), resource.TestCheckResourceAttrSet(resourceName, names.AttrDNSName), resource.TestCheckResourceAttr(resourceName, "dns_servers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "endpoint_ip_address_type", string(awstypes.EndpointIpAddressTypeIpv4)), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), resource.TestCheckResourceAttr(resourceName, "self_service_portal", "disabled"), resource.TestCheckResourceAttr(resourceName, "self_service_portal_url", ""), @@ -67,6 +69,7 @@ func testAccClientVPNEndpoint_basic(t *testing.T, semaphore tfsync.Semaphore) { resource.TestCheckResourceAttr(resourceName, "split_tunnel", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "traffic_ip_address_type", string(awstypes.TrafficIpAddressTypeIpv4)), resource.TestCheckResourceAttr(resourceName, "transport_protocol", "udp"), resource.TestCheckResourceAttr(resourceName, names.AttrVPCID, ""), resource.TestCheckResourceAttr(resourceName, "vpn_port", "443"), @@ -740,6 +743,116 @@ func testAccClientVPNEndpoint_vpcSecurityGroups(t *testing.T, semaphore tfsync.S }) } +func testAccClientVPNEndpoint_endpointIPAddressType(t *testing.T, semaphore tfsync.Semaphore) { + ctx := acctest.Context(t) + var v awstypes.ClientVpnEndpoint + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ec2_client_vpn_endpoint.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckClientVPNSyncronize(t, semaphore) + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClientVPNEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClientVPNEndpointConfig_endpointIPAddressType(t, rName, string(awstypes.EndpointIpAddressTypeIpv6)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClientVPNEndpointExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ec2", regexache.MustCompile(`client-vpn-endpoint/cvpn-endpoint-.+`)), + resource.TestCheckResourceAttr(resourceName, "endpoint_ip_address_type", string(awstypes.EndpointIpAddressTypeIpv6)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccClientVPNEndpointConfig_endpointIPAddressType(t, rName, string(awstypes.EndpointIpAddressTypeDualStack)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClientVPNEndpointExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ec2", regexache.MustCompile(`client-vpn-endpoint/cvpn-endpoint-.+`)), + resource.TestCheckResourceAttr(resourceName, "endpoint_ip_address_type", string(awstypes.EndpointIpAddressTypeDualStack)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccClientVPNEndpoint_trafficIPAddressType(t *testing.T, semaphore tfsync.Semaphore) { + ctx := acctest.Context(t) + var v awstypes.ClientVpnEndpoint + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ec2_client_vpn_endpoint.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckClientVPNSyncronize(t, semaphore) + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClientVPNEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClientVPNEndpointConfig_trafficIPAddressTypeIPv6(t, rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClientVPNEndpointExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ec2", regexache.MustCompile(`client-vpn-endpoint/cvpn-endpoint-.+`)), + resource.TestCheckResourceAttr(resourceName, "traffic_ip_address_type", string(awstypes.TrafficIpAddressTypeIpv6)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccClientVPNEndpointConfig_trafficIPAddressTypeDualStack(t, rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClientVPNEndpointExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ec2", regexache.MustCompile(`client-vpn-endpoint/cvpn-endpoint-.+`)), + resource.TestCheckResourceAttr(resourceName, "traffic_ip_address_type", string(awstypes.TrafficIpAddressTypeDualStack)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckClientVPNEndpointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) @@ -1342,3 +1455,74 @@ resource "aws_ec2_client_vpn_endpoint" "test" { } `, rName, nSecurityGroups)) } + +func testAccClientVPNEndpointConfig_endpointIPAddressType(t *testing.T, rName, endpointIPAddressType string) string { + return acctest.ConfigCompose(testAccClientVPNEndpointConfig_acmCertificateBase(t, "test"), fmt.Sprintf(` +resource "aws_ec2_client_vpn_endpoint" "test" { + server_certificate_arn = aws_acm_certificate.test.arn + client_cidr_block = "10.0.0.0/16" + + authentication_options { + type = "certificate-authentication" + root_certificate_chain_arn = aws_acm_certificate.test.arn + } + + connection_log_options { + enabled = false + } + + endpoint_ip_address_type = %[2]q + + tags = { + Name = %[1]q + } +} +`, rName, endpointIPAddressType)) +} + +func testAccClientVPNEndpointConfig_trafficIPAddressTypeIPv6(t *testing.T, rName string) string { + return acctest.ConfigCompose(testAccClientVPNEndpointConfig_acmCertificateBase(t, "test"), fmt.Sprintf(` +resource "aws_ec2_client_vpn_endpoint" "test" { + server_certificate_arn = aws_acm_certificate.test.arn + + authentication_options { + type = "certificate-authentication" + root_certificate_chain_arn = aws_acm_certificate.test.arn + } + + connection_log_options { + enabled = false + } + + traffic_ip_address_type = "ipv6" + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccClientVPNEndpointConfig_trafficIPAddressTypeDualStack(t *testing.T, rName string) string { + return acctest.ConfigCompose(testAccClientVPNEndpointConfig_acmCertificateBase(t, "test"), fmt.Sprintf(` +resource "aws_ec2_client_vpn_endpoint" "test" { + server_certificate_arn = aws_acm_certificate.test.arn + client_cidr_block = "10.0.0.0/16" + + authentication_options { + type = "certificate-authentication" + root_certificate_chain_arn = aws_acm_certificate.test.arn + } + + connection_log_options { + enabled = false + } + + traffic_ip_address_type = "dual-stack" + + tags = { + Name = %[1]q + } +} +`, rName)) +} diff --git a/internal/service/ec2/vpnclient_route.go b/internal/service/ec2/vpnclient_route.go index 5a359ec8f715..c1223f114dfc 100644 --- a/internal/service/ec2/vpnclient_route.go +++ b/internal/service/ec2/vpnclient_route.go @@ -92,7 +92,7 @@ func resourceClientVPNRouteCreate(ctx context.Context, d *schema.ResourceData, m input.Description = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateClientVpnRoute(ctx, input) }, errCodeInvalidClientVPNActiveAssociationNotFound) diff --git a/internal/service/ec2/vpnclient_test.go b/internal/service/ec2/vpnclient_test.go index 536a9dc01913..e4c39ef664dd 100644 --- a/internal/service/ec2/vpnclient_test.go +++ b/internal/service/ec2/vpnclient_test.go @@ -37,6 +37,8 @@ func TestAccClientVPNEndpoint_serial(t *testing.T) { "selfServicePortal": testAccClientVPNEndpoint_selfServicePortal, "vpcNoSecurityGroups": testAccClientVPNEndpoint_vpcNoSecurityGroups, "vpcSecurityGroups": testAccClientVPNEndpoint_vpcSecurityGroups, + "endpointIpAddressType": testAccClientVPNEndpoint_endpointIPAddressType, + "trafficIpAddressType": testAccClientVPNEndpoint_trafficIPAddressType, "basicDataSource": testAccClientVPNEndpointDataSource_basic, }, "AuthorizationRule": { diff --git a/internal/service/ec2/vpnsite_connection_data_source.go b/internal/service/ec2/vpnsite_connection_data_source.go new file mode 100644 index 000000000000..d25fa5ecf1f0 --- /dev/null +++ b/internal/service/ec2/vpnsite_connection_data_source.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/datasourcevalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_vpn_connection", name="VPN Connection") +func newDataSourceVPNConnection(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceVPNConnection{}, nil +} + +const ( + DSNameVPNConnection = "VPN Connection Data Source" +) + +type dataSourceVPNConnection struct { + framework.DataSourceWithModel[dataSourceVPNConnectionModel] +} + +func (d *dataSourceVPNConnection) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "category": schema.StringAttribute{ + Computed: true, + }, + "core_network_arn": schema.StringAttribute{ + Computed: true, + }, + "core_network_attachment_arn": schema.StringAttribute{ + Computed: true, + }, + "customer_gateway_configuration": schema.StringAttribute{ + Computed: true, + }, + "customer_gateway_id": schema.StringAttribute{ + Computed: true, + }, + "gateway_association_state": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.GatewayAssociationState](), + Computed: true, + }, + "pre_shared_key_arn": schema.StringAttribute{ + Computed: true, + }, + "routes": framework.DataSourceComputedListOfObjectAttribute[routeModel](ctx), + names.AttrState: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.VpnState](), + Computed: true, + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + names.AttrTransitGatewayID: schema.StringAttribute{ + Computed: true, + }, + names.AttrType: schema.StringAttribute{ + Computed: true, + }, + "vgw_telemetries": framework.DataSourceComputedListOfObjectAttribute[vgwTelemetryModel](ctx), + "vpn_connection_id": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "vpn_gateway_id": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrFilter: customFiltersBlock(ctx), + }, + } +} + +func (d *dataSourceVPNConnection) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().EC2Client(ctx) + var data dataSourceVPNConnectionModel + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.Config.Get(ctx, &data)) + if resp.Diagnostics.HasError() { + return + } + + input := ec2.DescribeVpnConnectionsInput{} + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Expand(ctx, data, &input, flex.WithIgnoredFieldNamesAppend("VpnConnectionId")), smerr.ID) + + if !data.VpnConnectionId.IsNull() && !data.VpnConnectionId.IsUnknown() { + input.VpnConnectionIds = []string{data.VpnConnectionId.ValueString()} + } + + out, err := findVPNConnection(ctx, conn, &input) + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID) + return + } + + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &data), smerr.ID, data.VpnConnectionId.String()) + if resp.Diagnostics.HasError() { + return + } + smerr.EnrichAppend(ctx, &resp.Diagnostics, resp.State.Set(ctx, &data), smerr.ID, data.VpnConnectionId.String()) +} + +func (d *dataSourceVPNConnection) ConfigValidators(_ context.Context) []datasource.ConfigValidator { + return []datasource.ConfigValidator{ + datasourcevalidator.AtLeastOneOf( + path.MatchRoot("vpn_connection_id"), + path.MatchRoot(names.AttrFilter), + ), + } +} + +type dataSourceVPNConnectionModel struct { + framework.WithRegionModel + Category types.String `tfsdk:"category"` + CoreNetworkArn types.String `tfsdk:"core_network_arn"` + CoreNetworkAttachmentArn types.String `tfsdk:"core_network_attachment_arn"` + CustomerGatewayConfiguration types.String `tfsdk:"customer_gateway_configuration"` + CustomerGatewayID types.String `tfsdk:"customer_gateway_id"` + Filters customFilters `tfsdk:"filter"` + GatewayAssociationState fwtypes.StringEnum[awstypes.GatewayAssociationState] `tfsdk:"gateway_association_state"` + PreSharedKeyArn types.String `tfsdk:"pre_shared_key_arn"` + State fwtypes.StringEnum[awstypes.VpnState] `tfsdk:"state"` + TransitGatewayId types.String `tfsdk:"transit_gateway_id"` + Type types.String `tfsdk:"type"` + VpnGatewayId types.String `tfsdk:"vpn_gateway_id"` + Routes fwtypes.ListNestedObjectValueOf[routeModel] `tfsdk:"routes"` + VgwTelemetries fwtypes.ListNestedObjectValueOf[vgwTelemetryModel] `tfsdk:"vgw_telemetries"` + VpnConnectionId types.String `tfsdk:"vpn_connection_id"` + Tags tftags.Map `tfsdk:"tags"` +} + +type routeModel struct { + DestinationCidrBlock types.String `tfsdk:"destination_cidr_block"` + Source types.String `tfsdk:"source"` + State fwtypes.StringEnum[awstypes.VpnState] `tfsdk:"state"` +} + +type vgwTelemetryModel struct { + AcceptedRouteCount types.Int64 `tfsdk:"accepted_route_count"` + LastStatusChange timetypes.RFC3339 `tfsdk:"last_status_change"` + Status fwtypes.StringEnum[awstypes.TelemetryStatus] `tfsdk:"status"` + StatusMessage types.String `tfsdk:"status_message"` + OutsideIpAddress types.String `tfsdk:"outside_ip_address"` +} diff --git a/internal/service/ec2/vpnsite_connection_data_source_test.go b/internal/service/ec2/vpnsite_connection_data_source_test.go new file mode 100644 index 000000000000..305ded782967 --- /dev/null +++ b/internal/service/ec2/vpnsite_connection_data_source_test.go @@ -0,0 +1,178 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "fmt" + "testing" + + "github.com/YakDriver/regexache" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSiteVPNConnectionDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(65501, 65534) + dataSourceName := "data.aws_vpn_connection.test" + resourceName := "aws_vpn_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPNConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPNConnectionDataSourceConfig_byId(rName, rBgpAsn), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "vpn_connection_id", resourceName, names.AttrID), + resource.TestCheckResourceAttrPair(dataSourceName, "customer_gateway_id", resourceName, "customer_gateway_id"), + resource.TestCheckResourceAttrPair(dataSourceName, "vpn_gateway_id", resourceName, "vpn_gateway_id"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrType, resourceName, names.AttrType), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrState), + resource.TestCheckResourceAttrSet(dataSourceName, "customer_gateway_configuration"), + resource.TestCheckResourceAttrSet(dataSourceName, "category"), + resource.TestCheckResourceAttr(dataSourceName, "routes.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "vgw_telemetries.#", "2"), + ), + }, + }, + }) +} + +func TestAccSiteVPNConnectionDataSource_byFilter(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(65501, 65534) + dataSourceName := "data.aws_vpn_connection.test" + resourceName := "aws_vpn_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPNConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPNConnectionDataSourceConfig_byFilter(rName, rBgpAsn), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "vpn_connection_id", resourceName, names.AttrID), + resource.TestCheckResourceAttrPair(dataSourceName, "customer_gateway_id", resourceName, "customer_gateway_id"), + resource.TestCheckResourceAttrPair(dataSourceName, "vpn_gateway_id", resourceName, "vpn_gateway_id"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrType, resourceName, names.AttrType), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrState), + resource.TestCheckResourceAttrSet(dataSourceName, "customer_gateway_configuration"), + ), + }, + }, + }) +} + +func TestAccSiteVPNConnectionDataSource_nonExistentId(t *testing.T) { + ctx := acctest.Context(t) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccVPNConnectionDataSourceConfig_nonExistentId(), + ExpectError: regexache.MustCompile(`couldn't find resource`), + }, + }, + }) +} + +func TestAccSiteVPNConnectionDataSource_noInput(t *testing.T) { + ctx := acctest.Context(t) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccVPNConnectionDataSourceConfig_noInput(), + ExpectError: regexache.MustCompile(`Missing Attribute Configuration`), + }, + }, + }) +} + +func testAccVPNConnectionDataSourceConfigBase(rName string, rBgpAsn int) string { + return fmt.Sprintf(` +resource "aws_vpn_gateway" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_customer_gateway" "test" { + bgp_asn = %[2]d + ip_address = "178.0.0.1" + type = "ipsec.1" + + tags = { + Name = %[1]q + } +} + +resource "aws_vpn_connection" "test" { + vpn_gateway_id = aws_vpn_gateway.test.id + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} +`, rName, rBgpAsn) +} + +func testAccVPNConnectionDataSourceConfig_byId(rName string, rBgpAsn int) string { + return acctest.ConfigCompose( + testAccVPNConnectionDataSourceConfigBase(rName, rBgpAsn), + ` +data "aws_vpn_connection" "test" { + vpn_connection_id = aws_vpn_connection.test.id +} +`) +} + +func testAccVPNConnectionDataSourceConfig_byFilter(rName string, rBgpAsn int) string { + return acctest.ConfigCompose( + testAccVPNConnectionDataSourceConfigBase(rName, rBgpAsn), + ` +data "aws_vpn_connection" "test" { + filter { + name = "customer-gateway-id" + values = [aws_customer_gateway.test.id] + } + + filter { + name = "vpn-gateway-id" + values = [aws_vpn_gateway.test.id] + } + + depends_on = [aws_vpn_connection.test] +} +`) +} + +func testAccVPNConnectionDataSourceConfig_nonExistentId() string { + return ` +data "aws_vpn_connection" "test" { + vpn_connection_id = "vpn-12345678901234567" +} +` +} + +func testAccVPNConnectionDataSourceConfig_noInput() string { + return ` +data "aws_vpn_connection" "test" { + # No vpn_connection_id or filter specified +} +` +} diff --git a/internal/service/ec2/vpnsite_customer_gateway.go b/internal/service/ec2/vpnsite_customer_gateway.go index af9e9adfbcd5..259dbc334363 100644 --- a/internal/service/ec2/vpnsite_customer_gateway.go +++ b/internal/service/ec2/vpnsite_customer_gateway.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "log" "strconv" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -147,7 +145,8 @@ func resourceCustomerGatewayCreate(ctx context.Context, d *schema.ResourceData, func resourceCustomerGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) customerGateway, err := findCustomerGatewayByID(ctx, conn, d.Id()) @@ -161,14 +160,7 @@ func resourceCustomerGatewayRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading EC2 Customer Gateway (%s): %s", d.Id(), err) } - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("customer-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, customerGatewayARN(ctx, c, d.Id())) d.Set("bgp_asn", customerGateway.BgpAsn) d.Set("bgp_asn_extended", customerGateway.BgpAsnExtended) d.Set(names.AttrCertificateARN, customerGateway.CertificateArn) @@ -210,3 +202,7 @@ func resourceCustomerGatewayDelete(ctx context.Context, d *schema.ResourceData, return diags } + +func customerGatewayARN(ctx context.Context, c *conns.AWSClient, customerGatewayID string) string { + return c.RegionalARN(ctx, names.EC2, "customer-gateway/"+customerGatewayID) +} diff --git a/internal/service/ec2/vpnsite_customer_gateway_data_source.go b/internal/service/ec2/vpnsite_customer_gateway_data_source.go index 4c33af33c744..fd33a3f5eb12 100644 --- a/internal/service/ec2/vpnsite_customer_gateway_data_source.go +++ b/internal/service/ec2/vpnsite_customer_gateway_data_source.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -74,7 +72,8 @@ func dataSourceCustomerGateway() *schema.Resource { func dataSourceCustomerGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := ec2.DescribeCustomerGatewaysInput{} @@ -93,15 +92,7 @@ func dataSourceCustomerGatewayRead(ctx context.Context, d *schema.ResourceData, } d.SetId(aws.ToString(cgw.CustomerGatewayId)) - - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("customer-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, customerGatewayARN(ctx, c, d.Id())) if v := aws.ToString(cgw.BgpAsn); v != "" { v, err := strconv.ParseInt(v, 0, 0) diff --git a/internal/service/ec2/vpnsite_gateway.go b/internal/service/ec2/vpnsite_gateway.go index 2cf9fbb3c73a..3a06aca9f0c0 100644 --- a/internal/service/ec2/vpnsite_gateway.go +++ b/internal/service/ec2/vpnsite_gateway.go @@ -10,7 +10,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -104,9 +103,10 @@ func resourceVPNGatewayCreate(ctx context.Context, d *schema.ResourceData, meta func resourceVPNGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func() (any, error) { + vpnGateway, err := tfresource.RetryWhenNewResourceNotFound(ctx, ec2PropagationTimeout, func(ctx context.Context) (*awstypes.VpnGateway, error) { return findVPNGatewayByID(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -120,17 +120,8 @@ func resourceVPNGatewayRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "reading EC2 VPN Gateway (%s): %s", d.Id(), err) } - vpnGateway := outputRaw.(*awstypes.VpnGateway) - d.Set("amazon_side_asn", flex.Int64ToStringValue(vpnGateway.AmazonSideAsn)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("vpn-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, vpnGatewayARN(ctx, c, d.Id())) if aws.ToString(vpnGateway.AvailabilityZone) != "" { d.Set(names.AttrAvailabilityZone, vpnGateway.AvailabilityZone) } @@ -183,7 +174,7 @@ func resourceVPNGatewayDelete(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteVpnGateway(ctx, &ec2.DeleteVpnGatewayInput{ VpnGatewayId: aws.String(d.Id()), }) @@ -210,7 +201,7 @@ func attachVPNGatewayToVPC(ctx context.Context, conn *ec2.Client, vpnGatewayID, VpnGatewayId: aws.String(vpnGatewayID), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func(ctx context.Context) (any, error) { return conn.AttachVpnGateway(ctx, &input) }, errCodeInvalidVPNGatewayIDNotFound) @@ -247,3 +238,7 @@ func detachVPNGatewayFromVPC(ctx context.Context, conn *ec2.Client, vpnGatewayID return nil } + +func vpnGatewayARN(ctx context.Context, c *conns.AWSClient, vpnGatewayID string) string { + return c.RegionalARN(ctx, names.EC2, "vpn-gateway/"+vpnGatewayID) +} diff --git a/internal/service/ec2/vpnsite_gateway_data_source.go b/internal/service/ec2/vpnsite_gateway_data_source.go index 54b8ec3bf892..27144a5da1f7 100644 --- a/internal/service/ec2/vpnsite_gateway_data_source.go +++ b/internal/service/ec2/vpnsite_gateway_data_source.go @@ -5,12 +5,10 @@ package ec2 import ( "context" - "fmt" "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -71,7 +69,8 @@ func dataSourceVPNGateway() *schema.Resource { func dataSourceVPNGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) input := ec2.DescribeVpnGatewaysInput{} @@ -118,16 +117,8 @@ func dataSourceVPNGatewayRead(ctx context.Context, d *schema.ResourceData, meta } d.SetId(aws.ToString(vgw.VpnGatewayId)) - d.Set("amazon_side_asn", strconv.FormatInt(aws.ToInt64(vgw.AmazonSideAsn), 10)) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: meta.(*conns.AWSClient).AccountID(ctx), - Resource: fmt.Sprintf("vpn-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, vpnGatewayARN(ctx, c, d.Id())) for _, attachment := range vgw.VpcAttachments { if attachment.State == awstypes.AttachmentStatusAttached { d.Set("attached_vpc_id", attachment.VpcId) diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index 7c090f2a46fb..3a2ba186485d 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -652,6 +652,132 @@ func waitImageDeleted(ctx context.Context, conn *ec2.Client, id string, timeout return nil, err } +func waitInstanceCreated(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.InstanceStateNamePending), + Target: enum.Slice(awstypes.InstanceStateNameRunning), + Refresh: statusInstance(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitInstanceDeleted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.InstanceStateNamePending, + awstypes.InstanceStateNameRunning, + awstypes.InstanceStateNameShuttingDown, + awstypes.InstanceStateNameStopping, + awstypes.InstanceStateNameStopped, + ), + Target: enum.Slice(awstypes.InstanceStateNameTerminated), + Refresh: statusInstance(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitInstanceReady(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.InstanceStateNamePending, awstypes.InstanceStateNameStopping), + Target: enum.Slice(awstypes.InstanceStateNameRunning, awstypes.InstanceStateNameStopped), + Refresh: statusInstance(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitInstanceStarted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.InstanceStateNamePending, awstypes.InstanceStateNameStopped), + Target: enum.Slice(awstypes.InstanceStateNameRunning), + Refresh: statusInstance(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitInstanceStopped(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.InstanceStateNamePending, + awstypes.InstanceStateNameRunning, + awstypes.InstanceStateNameShuttingDown, + awstypes.InstanceStateNameStopping, + ), + Target: enum.Slice(awstypes.InstanceStateNameStopped), + Refresh: statusInstance(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + func waitInstanceCapacityReservationSpecificationUpdated(ctx context.Context, conn *ec2.Client, instanceID string, expectedValue *awstypes.CapacityReservationSpecification) (*awstypes.Instance, error) { stateConf := &retry.StateChangeConf{ Target: enum.Slice(strconv.FormatBool(true)), @@ -1125,6 +1251,22 @@ func waitIPAMUpdated(ctx context.Context, conn *ec2.Client, id string, timeout t return nil, err } +func waitLaunchTemplateReady(ctx context.Context, conn *ec2.Client, id string, idIsName bool, timeout time.Duration) error { + stateConf := &retry.StateChangeConf{ + Pending: []string{""}, + Target: enum.Slice(launchTemplateFoundStatus), + Refresh: statusLaunchTemplate(ctx, conn, id, idIsName), + Timeout: timeout, + Delay: 5 * time.Second, + NotFoundChecks: 5, + ContinuousTargetOccurence: 3, + } + + _, err := stateConf.WaitForStateContext(ctx) + + return err +} + func waitLocalGatewayRouteDeleted(ctx context.Context, conn *ec2.Client, localGatewayRouteTableID, destinationCIDRBlock string) (*awstypes.LocalGatewayRoute, error) { const ( timeout = 5 * time.Minute @@ -1271,10 +1413,11 @@ func waitNATGatewayAddressAssigned(ctx context.Context, conn *ec2.Client, natGat func waitNATGatewayAddressAssociated(ctx context.Context, conn *ec2.Client, natGatewayID, allocationID string, timeout time.Duration) (*awstypes.NatGatewayAddress, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.NatGatewayAddressStatusAssociating), - Target: enum.Slice(awstypes.NatGatewayAddressStatusSucceeded), - Refresh: statusNATGatewayAddressByNATGatewayIDAndAllocationID(ctx, conn, natGatewayID, allocationID), - Timeout: timeout, + Pending: enum.Slice(awstypes.NatGatewayAddressStatusAssociating), + Target: enum.Slice(awstypes.NatGatewayAddressStatusSucceeded), + Refresh: statusNATGatewayAddressByNATGatewayIDAndAllocationID(ctx, conn, natGatewayID, allocationID), + Timeout: timeout, + ContinuousTargetOccurence: 5, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -1290,12 +1433,13 @@ func waitNATGatewayAddressAssociated(ctx context.Context, conn *ec2.Client, natG return nil, err } -func waitNATGatewayAddressDisassociated(ctx context.Context, conn *ec2.Client, natGatewayID, allocationID string, timeout time.Duration) (*awstypes.NatGatewayAddress, error) { +func waitNATGatewayAddressDisassociated(ctx context.Context, conn *ec2.Client, natGatewayID, allocationID string, timeout time.Duration) (*awstypes.NatGatewayAddress, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.NatGatewayAddressStatusSucceeded, awstypes.NatGatewayAddressStatusDisassociating), - Target: []string{}, - Refresh: statusNATGatewayAddressByNATGatewayIDAndAllocationID(ctx, conn, natGatewayID, allocationID), - Timeout: timeout, + Pending: enum.Slice(awstypes.NatGatewayAddressStatusSucceeded, awstypes.NatGatewayAddressStatusDisassociating), + Target: []string{}, + Refresh: statusNATGatewayAddressByNATGatewayIDAndAllocationID(ctx, conn, natGatewayID, allocationID), + Timeout: timeout, + ContinuousTargetOccurence: 5, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -2047,6 +2191,45 @@ func waitSubnetPrivateDNSHostnameTypeOnLaunchUpdated(ctx context.Context, conn * return nil, err } +func waitTransitGatewayAttachmentAccepted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.TransitGatewayAttachment, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.TransitGatewayAttachmentStatePending, awstypes.TransitGatewayAttachmentStatePendingAcceptance), + Target: enum.Slice(awstypes.TransitGatewayAttachmentStateAvailable), + Timeout: timeout, + Refresh: statusTransitGatewayAttachment(ctx, conn, id), + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.TransitGatewayAttachment); ok { + return output, err + } + + return nil, err +} + +func waitTransitGatewayAttachmentDeleted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.TransitGatewayAttachment, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.TransitGatewayAttachmentStateAvailable, + awstypes.TransitGatewayAttachmentStateDeleting, + awstypes.TransitGatewayAttachmentStatePendingAcceptance, + awstypes.TransitGatewayAttachmentStateRejecting, + ), + Target: enum.Slice(awstypes.TransitGatewayAttachmentStateDeleted), + Timeout: timeout, + Refresh: statusTransitGatewayAttachment(ctx, conn, id), + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.TransitGatewayAttachment); ok { + return output, err + } + + return nil, err +} + func waitTransitGatewayConnectCreated(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.TransitGatewayConnect, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.TransitGatewayAttachmentStatePending), @@ -2139,6 +2322,8 @@ func waitTransitGatewayDeleted(ctx context.Context, conn *ec2.Client, id string, Target: []string{}, Refresh: statusTransitGateway(ctx, conn, id), Timeout: timeout, + Delay: 2 * time.Minute, + MinTimeout: 10 * time.Second, NotFoundChecks: 1, } @@ -2261,7 +2446,7 @@ func waitTransitGatewayPeeringAttachmentCreated(ctx context.Context, conn *ec2.C return nil, err } -func waitTransitGatewayPeeringAttachmentDeleted(ctx context.Context, conn *ec2.Client, id string) error { +func waitTransitGatewayPeeringAttachmentDeleted(ctx context.Context, conn *ec2.Client, id string) (*awstypes.TransitGatewayPeeringAttachment, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: enum.Slice( awstypes.TransitGatewayAttachmentStateAvailable, @@ -2280,9 +2465,11 @@ func waitTransitGatewayPeeringAttachmentDeleted(ctx context.Context, conn *ec2.C if status := output.Status; status != nil { tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(status.Code), aws.ToString(status.Message))) } + + return output, err } - return err + return nil, err } func waitTransitGatewayPrefixListReferenceStateCreated(ctx context.Context, conn *ec2.Client, transitGatewayRouteTableID string, prefixListID string) (*awstypes.TransitGatewayPrefixListReference, error) { @@ -2693,6 +2880,76 @@ func waitVolumeAttachmentCreated(ctx context.Context, conn *ec2.Client, volumeID return nil, err } +func waitVolumeAttachmentDeleted(ctx context.Context, conn *ec2.Client, volumeID, instanceID, deviceName string, timeout time.Duration) (*awstypes.VolumeAttachment, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.VolumeAttachmentStateDetaching), + Target: []string{}, + Refresh: statusVolumeAttachment(ctx, conn, volumeID, instanceID, deviceName), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.VolumeAttachment); ok { + return output, err + } + + return nil, err +} + +func waitVolumeAttachmentInstanceReady(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.InstanceStateNamePending, awstypes.InstanceStateNameStopping), + Target: enum.Slice(awstypes.InstanceStateNameRunning, awstypes.InstanceStateNameStopped), + Refresh: statusVolumeAttachmentInstanceState(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitVolumeAttachmentInstanceStopped(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.InstanceStateNamePending, + awstypes.InstanceStateNameRunning, + awstypes.InstanceStateNameShuttingDown, + awstypes.InstanceStateNameStopping, + ), + Target: enum.Slice(awstypes.InstanceStateNameStopped), + Refresh: statusVolumeAttachmentInstanceState(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + func waitVolumeCreated(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Volume, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.VolumeStateCreating), diff --git a/internal/service/ec2/wavelength_carrier_gateway.go b/internal/service/ec2/wavelength_carrier_gateway.go index ea38c98eca3c..fe33a7897aa1 100644 --- a/internal/service/ec2/wavelength_carrier_gateway.go +++ b/internal/service/ec2/wavelength_carrier_gateway.go @@ -5,11 +5,9 @@ package ec2 import ( "context" - "fmt" "log" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -84,7 +82,8 @@ func resourceCarrierGatewayCreate(ctx context.Context, d *schema.ResourceData, m func resourceCarrierGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.EC2Client(ctx) carrierGateway, err := findCarrierGatewayByID(ctx, conn, d.Id()) @@ -99,14 +98,7 @@ func resourceCarrierGatewayRead(ctx context.Context, d *schema.ResourceData, met } ownerID := aws.ToString(carrierGateway.OwnerId) - arn := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: names.EC2, - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: ownerID, - Resource: fmt.Sprintf("carrier-gateway/%s", d.Id()), - }.String() - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, carrierGatewayARN(ctx, c, ownerID, d.Id())) d.Set(names.AttrOwnerID, ownerID) d.Set(names.AttrVPCID, carrierGateway.VpcId) @@ -147,3 +139,7 @@ func resourceCarrierGatewayDelete(ctx context.Context, d *schema.ResourceData, m return diags } + +func carrierGatewayARN(ctx context.Context, c *conns.AWSClient, accountID, carrierGatewayID string) string { + return c.RegionalARNWithAccount(ctx, names.EC2, accountID, "carrier-gateway/"+carrierGatewayID) +} diff --git a/internal/service/ecr/generate.go b/internal/service/ecr/generate.go index c56b40a0d164..9000de50d094 100644 --- a/internal/service/ecr/generate.go +++ b/internal/service/ecr/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags -CreateTags //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package ecr diff --git a/internal/service/ecr/images_data_source.go b/internal/service/ecr/images_data_source.go new file mode 100644 index 000000000000..afa3cc9cb6f2 --- /dev/null +++ b/internal/service/ecr/images_data_source.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ecr + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/ecr" + awstypes "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_ecr_images", name="Images") +func newImagesDataSource(context.Context) (datasource.DataSourceWithConfigure, error) { + return &imagesDataSource{}, nil +} + +type imagesDataSource struct { + framework.DataSourceWithModel[imagesDataSourceModel] +} + +func (d *imagesDataSource) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "image_ids": framework.DataSourceComputedListOfObjectAttribute[imagesIDsModel](ctx), + "registry_id": schema.StringAttribute{ + Optional: true, + Description: "ID of the registry (AWS account ID)", + }, + names.AttrRepositoryName: schema.StringAttribute{ + Required: true, + Description: "Name of the repository", + }, + }, + } +} + +func (d *imagesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data imagesDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + conn := d.Meta().ECRClient(ctx) + + var input ecr.ListImagesInput + resp.Diagnostics.Append(fwflex.Expand(ctx, &data, &input)...) + if resp.Diagnostics.HasError() { + return + } + + output, err := findImages(ctx, conn, &input) + if err != nil { + resp.Diagnostics.AddError("reading ECR Images", err.Error()) + return + } + + resp.Diagnostics.Append(fwflex.Flatten(ctx, output, &data.ImageIDs)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func findImages(ctx context.Context, conn *ecr.Client, input *ecr.ListImagesInput) ([]awstypes.ImageIdentifier, error) { + var output []awstypes.ImageIdentifier + + paginator := ecr.NewListImagesPaginator(conn, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if errs.IsA[*awstypes.RepositoryNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.ImageIds...) + } + + return output, nil +} + +type imagesDataSourceModel struct { + framework.WithRegionModel + ImageIDs fwtypes.ListNestedObjectValueOf[imagesIDsModel] `tfsdk:"image_ids"` + RegistryID types.String `tfsdk:"registry_id"` + RepositoryName types.String `tfsdk:"repository_name"` +} + +type imagesIDsModel struct { + ImageDigest types.String `tfsdk:"image_digest"` + ImageTag types.String `tfsdk:"image_tag"` +} diff --git a/internal/service/ecr/images_data_source_test.go b/internal/service/ecr/images_data_source_test.go new file mode 100644 index 000000000000..d97f2cd2efbb --- /dev/null +++ b/internal/service/ecr/images_data_source_test.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ecr_test + +import ( + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccECRImagesDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_ecr_images.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccImagesDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, names.AttrRepositoryName, rName), + resource.TestCheckResourceAttr(dataSourceName, "image_ids.#", "0"), + ), + }, + }, + }) +} + +func TestAccECRImagesDataSource_registryID(t *testing.T) { + ctx := acctest.Context(t) + registryID := "137112412989" + repositoryName := "amazonlinux" + dataSourceName := "data.aws_ecr_images.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccImagesDataSourceConfig_registryID(registryID, repositoryName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, names.AttrRepositoryName, repositoryName), + resource.TestCheckResourceAttr(dataSourceName, "registry_id", registryID), + resource.TestCheckResourceAttrSet(dataSourceName, "image_ids.#"), + // Check that we have at least one image with the "latest" tag + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "image_ids.*", map[string]string{ + "image_tag": "latest", + }), + ), + }, + }, + }) +} + +func testAccImagesDataSourceConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository" "test" { + name = %[1]q +} + +resource "aws_ecr_repository_policy" "test" { + repository = aws_ecr_repository.test.name + + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Sid = "new policy" + Effect = "Allow" + Principal = "*" + Action = [ + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:PutImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + "ecr:DescribeRepositories", + "ecr:GetRepositoryPolicy", + "ecr:ListImages", + "ecr:DeleteRepository", + "ecr:BatchDeleteImage", + "ecr:SetRepositoryPolicy", + "ecr:DeleteRepositoryPolicy" + ] + }] + }) +} + +data "aws_ecr_images" "test" { + repository_name = aws_ecr_repository.test.name +} +`, rName) +} + +func testAccImagesDataSourceConfig_registryID(registryID, repositoryName string) string { + return fmt.Sprintf(` +data "aws_ecr_images" "test" { + registry_id = %[1]q + repository_name = %[2]q +} +`, registryID, repositoryName) +} diff --git a/internal/service/ecr/lifecycle_policy.go b/internal/service/ecr/lifecycle_policy.go index c2bf61f7fdfe..4105a3ca4cfa 100644 --- a/internal/service/ecr/lifecycle_policy.go +++ b/internal/service/ecr/lifecycle_policy.go @@ -28,16 +28,15 @@ import ( ) // @SDKResource("aws_ecr_lifecycle_policy", name="Lifecycle Policy") +// @IdentityAttribute("repository") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(idAttrDuplicates="repository") func resourceLifecyclePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLifecyclePolicyCreate, ReadWithoutTimeout: resourceLifecyclePolicyRead, DeleteWithoutTimeout: resourceLifecyclePolicyDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrPolicy: { Type: schema.TypeString, @@ -94,7 +93,7 @@ func resourceLifecyclePolicyRead(ctx context.Context, d *schema.ResourceData, me var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ECRClient(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*ecr.GetLifecyclePolicyOutput, error) { return findLifecyclePolicyByRepositoryName(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -108,8 +107,6 @@ func resourceLifecyclePolicyRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading ECR Lifecycle Policy (%s): %s", d.Id(), err) } - output := outputRaw.(*ecr.GetLifecyclePolicyOutput) - if equivalent, err := equivalentLifecyclePolicyJSON(d.Get(names.AttrPolicy).(string), aws.ToString(output.LifecyclePolicyText)); err != nil { return sdkdiag.AppendFromErr(diags, err) } else if !equivalent { diff --git a/internal/service/ecr/lifecycle_policy_identity_gen_test.go b/internal/service/ecr/lifecycle_policy_identity_gen_test.go new file mode 100644 index 000000000000..0317708264ca --- /dev/null +++ b/internal/service/ecr/lifecycle_policy_identity_gen_test.go @@ -0,0 +1,310 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ecr_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccECRLifecyclePolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("repository"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "repository": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("repository")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccECRLifecyclePolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("repository"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "repository": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("repository")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccECRLifecyclePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "repository": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("repository")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccECRLifecyclePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ecr/repository.go b/internal/service/ecr/repository.go index fa1aea983358..c113b7d3cb6c 100644 --- a/internal/service/ecr/repository.go +++ b/internal/service/ecr/repository.go @@ -5,15 +5,20 @@ package ecr import ( "context" + "fmt" "log" + "strings" "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ecr" "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -26,6 +31,10 @@ import ( // @SDKResource("aws_ecr_repository", name="Repository") // @Tags(identifierAttribute="arn") +// @IdentityAttribute("name") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ecr/types;types.Repository") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(idAttrDuplicates="name") func resourceRepository() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRepositoryCreate, @@ -33,14 +42,12 @@ func resourceRepository() *schema.Resource { UpdateWithoutTimeout: resourceRepositoryUpdate, DeleteWithoutTimeout: resourceRepositoryDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Delete: schema.DefaultTimeout(20 * time.Minute), }, + CustomizeDiff: validateImageTagMutabilityExclusionFilterUsage, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -93,6 +100,32 @@ func resourceRepository() *schema.Resource { Default: types.ImageTagMutabilityMutable, ValidateDiagFunc: enum.Validate[types.ImageTagMutability](), }, + "image_tag_mutability_exclusion_filter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: validation.AllDiag( + validation.ToDiagFunc(validation.StringLenBetween(1, 128)), + validation.ToDiagFunc(validation.StringMatch( + regexache.MustCompile(`^[a-zA-Z0-9._*-]+$`), + "must contain only letters, numbers, and special characters (._*-)", + )), + validateImageTagMutabilityExclusionFilter(), + ), + }, + "filter_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ImageTagMutabilityExclusionFilterType](), + }, + }, + }, + }, names.AttrName: { Type: schema.TypeString, Required: true, @@ -131,6 +164,10 @@ func resourceRepositoryCreate(ctx context.Context, d *schema.ResourceData, meta } } + if v, ok := d.GetOk("image_tag_mutability_exclusion_filter"); ok && len(v.([]any)) > 0 { + input.ImageTagMutabilityExclusionFilters = expandImageTagMutabilityExclusionFilters(v.([]any)) + } + output, err := conn.CreateRepository(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. @@ -167,7 +204,7 @@ func resourceRepositoryRead(ctx context.Context, d *schema.ResourceData, meta an var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ECRClient(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + repository, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*types.Repository, error) { return findRepositoryByName(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -181,8 +218,6 @@ func resourceRepositoryRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "reading ECR Repository (%s): %s", d.Id(), err) } - repository := outputRaw.(*types.Repository) - d.Set(names.AttrARN, repository.RepositoryArn) if err := d.Set(names.AttrEncryptionConfiguration, flattenRepositoryEncryptionConfiguration(repository.EncryptionConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting encryption_configuration: %s", err) @@ -191,6 +226,9 @@ func resourceRepositoryRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "setting image_scanning_configuration: %s", err) } d.Set("image_tag_mutability", repository.ImageTagMutability) + if err := d.Set("image_tag_mutability_exclusion_filter", flattenImageTagMutabilityExclusionFilters(repository.ImageTagMutabilityExclusionFilters)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting image_tag_mutability_exclusion_filter: %s", err) + } d.Set(names.AttrName, repository.RepositoryName) d.Set("registry_id", repository.RegistryId) d.Set("repository_url", repository.RepositoryUri) @@ -202,13 +240,17 @@ func resourceRepositoryUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ECRClient(ctx) - if d.HasChange("image_tag_mutability") { + if d.HasChanges("image_tag_mutability", "image_tag_mutability_exclusion_filter") { input := &ecr.PutImageTagMutabilityInput{ ImageTagMutability: types.ImageTagMutability((d.Get("image_tag_mutability").(string))), RegistryId: aws.String(d.Get("registry_id").(string)), RepositoryName: aws.String(d.Id()), } + if v, ok := d.GetOk("image_tag_mutability_exclusion_filter"); ok && len(v.([]any)) > 0 { + input.ImageTagMutabilityExclusionFilters = expandImageTagMutabilityExclusionFilters(v.([]any)) + } + _, err := conn.PutImageTagMutability(ctx, input) if err != nil { @@ -259,7 +301,7 @@ func resourceRepositoryDelete(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "deleting ECR Repository (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return findRepositoryByName(ctx, conn, d.Id()) }) @@ -345,3 +387,67 @@ func flattenRepositoryEncryptionConfiguration(ec *types.EncryptionConfiguration) config, } } + +func expandImageTagMutabilityExclusionFilters(data []any) []types.ImageTagMutabilityExclusionFilter { + if len(data) == 0 { + return nil + } + + var filters []types.ImageTagMutabilityExclusionFilter + for _, v := range data { + tfMap := v.(map[string]any) + filter := types.ImageTagMutabilityExclusionFilter{ + Filter: aws.String(tfMap[names.AttrFilter].(string)), + FilterType: types.ImageTagMutabilityExclusionFilterType(tfMap["filter_type"].(string)), + } + filters = append(filters, filter) + } + + return filters +} + +func flattenImageTagMutabilityExclusionFilters(filters []types.ImageTagMutabilityExclusionFilter) []any { + if len(filters) == 0 { + return nil + } + + var tfList []any + for _, filter := range filters { + tfMap := map[string]any{ + names.AttrFilter: aws.ToString(filter.Filter), + "filter_type": string(filter.FilterType), + } + tfList = append(tfList, tfMap) + } + + return tfList +} + +func validateImageTagMutabilityExclusionFilter() schema.SchemaValidateDiagFunc { + return func(v any, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + value := v.(string) + + wildcardCount := strings.Count(value, "*") + if wildcardCount > 2 { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid filter pattern", + Detail: "Image tag mutability exclusion filter can contain a maximum of 2 wildcards (*)", + }) + } + + return diags + } +} + +func validateImageTagMutabilityExclusionFilterUsage(_ context.Context, d *schema.ResourceDiff, meta any) error { + mutability := d.Get("image_tag_mutability").(string) + filters := d.Get("image_tag_mutability_exclusion_filter").([]any) + + if len(filters) > 0 && mutability != string(types.ImageTagMutabilityImmutableWithExclusion) && mutability != string(types.ImageTagMutabilityMutableWithExclusion) { + return fmt.Errorf("image_tag_mutability_exclusion_filter can only be used when image_tag_mutability is set to IMMUTABLE_WITH_EXCLUSION or MUTABLE_WITH_EXCLUSION") + } + + return nil +} diff --git a/internal/service/ecr/repository_creation_template.go b/internal/service/ecr/repository_creation_template.go index e720b08bd987..6f206b0753f7 100644 --- a/internal/service/ecr/repository_creation_template.go +++ b/internal/service/ecr/repository_creation_template.go @@ -86,6 +86,32 @@ func resourceRepositoryCreationTemplate() *schema.Resource { Default: types.ImageTagMutabilityMutable, ValidateDiagFunc: enum.Validate[types.ImageTagMutability](), }, + "image_tag_mutability_exclusion_filter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: validation.AllDiag( + validation.ToDiagFunc(validation.StringLenBetween(1, 128)), + validation.ToDiagFunc(validation.StringMatch( + regexache.MustCompile(`^[a-zA-Z0-9._*-]+$`), + "must contain only letters, numbers, and special characters (._*-)", + )), + validateImageTagMutabilityExclusionFilter(), + ), + }, + "filter_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ImageTagMutabilityExclusionFilterType](), + }, + }, + }, + }, "lifecycle_policy": { Type: schema.TypeString, Optional: true, @@ -141,6 +167,10 @@ func resourceRepositoryCreationTemplateCreate(ctx context.Context, d *schema.Res input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("image_tag_mutability_exclusion_filter"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.ImageTagMutabilityExclusionFilters = expandImageTagMutabilityExclusionFilters(v.([]any)) + } + if v, ok := d.GetOk("lifecycle_policy"); ok { policy, err := structure.NormalizeJsonString(v.(string)) if err != nil { @@ -198,6 +228,10 @@ func resourceRepositoryCreationTemplateRead(ctx context.Context, d *schema.Resou } d.Set("image_tag_mutability", rct.ImageTagMutability) + if err := d.Set("image_tag_mutability_exclusion_filter", flattenImageTagMutabilityExclusionFilters(rct.ImageTagMutabilityExclusionFilters)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting image_tag_mutability_exclusion_filter: %s", err) + } + if _, err := equivalentLifecyclePolicyJSON(d.Get("lifecycle_policy").(string), aws.ToString(rct.LifecyclePolicy)); err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -258,6 +292,12 @@ func resourceRepositoryCreationTemplateUpdate(ctx context.Context, d *schema.Res input.ImageTagMutability = types.ImageTagMutability((d.Get("image_tag_mutability").(string))) } + if d.HasChange("image_tag_mutability_exclusion_filter") { + // To use image_tag_mutability_exclusion_filter, image_tag_mutability must be set + input.ImageTagMutability = types.ImageTagMutability((d.Get("image_tag_mutability").(string))) + input.ImageTagMutabilityExclusionFilters = expandImageTagMutabilityExclusionFilters(d.Get("image_tag_mutability_exclusion_filter").([]any)) + } + if d.HasChange("lifecycle_policy") { policy, err := structure.NormalizeJsonString(d.Get("lifecycle_policy").(string)) if err != nil { diff --git a/internal/service/ecr/repository_creation_template_data_source.go b/internal/service/ecr/repository_creation_template_data_source.go index c7abfc1124c4..efd933252529 100644 --- a/internal/service/ecr/repository_creation_template_data_source.go +++ b/internal/service/ecr/repository_creation_template_data_source.go @@ -59,6 +59,22 @@ func dataSourceRepositoryCreationTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "image_tag_mutability_exclusion_filter": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Computed: true, + }, + "filter_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "lifecycle_policy": { Type: schema.TypeString, Computed: true, @@ -106,6 +122,9 @@ func dataSourceRepositoryCreationTemplateRead(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "setting encryption_configuration: %s", err) } d.Set("image_tag_mutability", rct.ImageTagMutability) + if err := d.Set("image_tag_mutability_exclusion_filter", flattenImageTagMutabilityExclusionFilters(rct.ImageTagMutabilityExclusionFilters)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting image_tag_mutability_exclusion_filter: %s", err) + } policy, err := structure.NormalizeJsonString(aws.ToString(rct.LifecyclePolicy)) if err != nil { diff --git a/internal/service/ecr/repository_creation_template_data_source_test.go b/internal/service/ecr/repository_creation_template_data_source_test.go index c404088f4a0f..e95c5678aa3e 100644 --- a/internal/service/ecr/repository_creation_template_data_source_test.go +++ b/internal/service/ecr/repository_creation_template_data_source_test.go @@ -66,6 +66,31 @@ func TestAccECRRepositoryCreationTemplateDataSource_root(t *testing.T) { }) } +func TestAccECRRepositoryCreationTemplateDataSource_mutabilityWithExclusion(t *testing.T) { + ctx := acctest.Context(t) + repositoryPrefix := "tf-test-" + sdkacctest.RandString(8) + dataSource := "data.aws_ecr_repository_creation_template.root" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccRepositoryCreationTemplateDataSourceConfig_mutabilityWithExclusion(repositoryPrefix, "latest*", "prod-*"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSource, "image_tag_mutability", string(types.ImageTagMutabilityMutableWithExclusion)), + resource.TestCheckResourceAttr(dataSource, "image_tag_mutability_exclusion_filter.#", "2"), + resource.TestCheckResourceAttr(dataSource, "image_tag_mutability_exclusion_filter.0.filter", "latest*"), + resource.TestCheckResourceAttr(dataSource, "image_tag_mutability_exclusion_filter.0.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + resource.TestCheckResourceAttr(dataSource, "image_tag_mutability_exclusion_filter.1.filter", "prod-*"), + resource.TestCheckResourceAttr(dataSource, "image_tag_mutability_exclusion_filter.1.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + ), + }, + }, + }) +} + func testAccRepositoryCreationTemplateDataSourceConfig_basic(repositoryPrefix string) string { return fmt.Sprintf(` resource "aws_ecr_repository_creation_template" "test" { @@ -101,3 +126,35 @@ data "aws_ecr_repository_creation_template" "root" { } ` } + +func testAccRepositoryCreationTemplateDataSourceConfig_mutabilityWithExclusion(repositoryPrefix, filter1, filter2 string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository_creation_template" "test" { + prefix = %[1]q + + applied_for = [ + "PULL_THROUGH_CACHE", + "REPLICATION", + ] + + resource_tags = { + Foo = "Bar" + } + + image_tag_mutability = "MUTABLE_WITH_EXCLUSION" + + image_tag_mutability_exclusion_filter { + filter = %[2]q + filter_type = "WILDCARD" + } + + image_tag_mutability_exclusion_filter { + filter = %[3]q + filter_type = "WILDCARD" + } +} +data "aws_ecr_repository_creation_template" "root" { + prefix = aws_ecr_repository_creation_template.test.prefix +} +`, repositoryPrefix, filter1, filter2) +} diff --git a/internal/service/ecr/repository_creation_template_test.go b/internal/service/ecr/repository_creation_template_test.go index e9480a258e5b..bfb633d49edf 100644 --- a/internal/service/ecr/repository_creation_template_test.go +++ b/internal/service/ecr/repository_creation_template_test.go @@ -197,6 +197,50 @@ func TestAccECRRepositoryCreationTemplate_root(t *testing.T) { }) } +func TestAccECRRepositoryCreationTemplate_mutabilityWithExclusion(t *testing.T) { + ctx := acctest.Context(t) + repositoryPrefix := "tf-test-" + sdkacctest.RandString(8) + resourceName := "aws_ecr_repository_creation_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRepositoryCreationTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRepositoryCreationTemplateConfig_mutabilityWithExclusion(repositoryPrefix, "latest*", "prod-*"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryCreationTemplateExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability", string(types.ImageTagMutabilityMutableWithExclusion)), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.#", "2"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter", "latest*"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.1.filter", "prod-*"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.1.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRepositoryCreationTemplateConfig_mutabilityWithExclusion(repositoryPrefix, "prod-*", "latest*"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryCreationTemplateExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability", string(types.ImageTagMutabilityMutableWithExclusion)), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.#", "2"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter", "prod-*"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.1.filter", "latest*"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.1.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + ), + }, + }, + }) +} + func testAccCheckRepositoryCreationTemplateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ECRClient(ctx) @@ -435,3 +479,32 @@ resource "aws_ecr_repository_creation_template" "root" { } ` } + +func testAccRepositoryCreationTemplateConfig_mutabilityWithExclusion(repositoryPrefix, filter1, filter2 string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository_creation_template" "test" { + prefix = %[1]q + + applied_for = [ + "PULL_THROUGH_CACHE", + "REPLICATION", + ] + + resource_tags = { + Foo = "Bar" + } + + image_tag_mutability = "MUTABLE_WITH_EXCLUSION" + + image_tag_mutability_exclusion_filter { + filter = %[2]q + filter_type = "WILDCARD" + } + + image_tag_mutability_exclusion_filter { + filter = %[3]q + filter_type = "WILDCARD" + } +} +`, repositoryPrefix, filter1, filter2) +} diff --git a/internal/service/ecr/repository_data_source.go b/internal/service/ecr/repository_data_source.go index 542890418099..e18ebf7ee708 100644 --- a/internal/service/ecr/repository_data_source.go +++ b/internal/service/ecr/repository_data_source.go @@ -61,6 +61,22 @@ func dataSourceRepository() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "image_tag_mutability_exclusion_filter": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Computed: true, + }, + "filter_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "most_recent_image_tags": { Type: schema.TypeList, Computed: true, @@ -113,6 +129,9 @@ func dataSourceRepositoryRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "setting image_scanning_configuration: %s", err) } d.Set("image_tag_mutability", repository.ImageTagMutability) + if err := d.Set("image_tag_mutability_exclusion_filter", flattenImageTagMutabilityExclusionFilters(repository.ImageTagMutabilityExclusionFilters)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting image_tag_mutability_exclusion_filter: %s", err) + } d.Set(names.AttrName, repository.RepositoryName) d.Set("registry_id", repository.RegistryId) d.Set("repository_url", repository.RepositoryUri) diff --git a/internal/service/ecr/repository_data_source_test.go b/internal/service/ecr/repository_data_source_test.go index 6dc0690aac60..636130f3122c 100644 --- a/internal/service/ecr/repository_data_source_test.go +++ b/internal/service/ecr/repository_data_source_test.go @@ -71,6 +71,29 @@ func TestAccECRRepositoryDataSource_encryption(t *testing.T) { }) } +func TestAccECRRepositoryDataSource_mutabilityWithExclusion(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecr_repository.test" + dataSourceName := "data.aws_ecr_repository.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccRepositoryDataSourceConfig_mutabilityWithExclusion(rName, "test*"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "image_tag_mutability_exclusion_filter.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "image_tag_mutability_exclusion_filter.0.filter", dataSourceName, "image_tag_mutability_exclusion_filter.0.filter"), + resource.TestCheckResourceAttrPair(resourceName, "image_tag_mutability_exclusion_filter.0.filter_type", dataSourceName, "image_tag_mutability_exclusion_filter.0.filter_type"), + ), + }, + }, + }) +} + func TestAccECRRepositoryDataSource_nonExistent(t *testing.T) { ctx := acctest.Context(t) resource.ParallelTest(t, resource.TestCase{ @@ -130,3 +153,20 @@ data "aws_ecr_repository" "test" { } `, rName) } + +func testAccRepositoryDataSourceConfig_mutabilityWithExclusion(rName, filter string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository" "test" { + name = %[1]q + image_tag_mutability = "MUTABLE_WITH_EXCLUSION" + + image_tag_mutability_exclusion_filter { + filter = %[2]q + filter_type = "WILDCARD" + } +} +data "aws_ecr_repository" "test" { + name = aws_ecr_repository.test.name +} +`, rName, filter) +} diff --git a/internal/service/ecr/repository_identity_gen_test.go b/internal/service/ecr/repository_identity_gen_test.go new file mode 100644 index 000000000000..bdac34cd9438 --- /dev/null +++ b/internal/service/ecr/repository_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ecr_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccECRRepository_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Repository + resourceName := "aws_ecr_repository.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrName), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccECRRepository_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_repository.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrName), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccECRRepository_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Repository + resourceName := "aws_ecr_repository.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccECRRepository_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Repository + resourceName := "aws_ecr_repository.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Repository/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ecr/repository_policy.go b/internal/service/ecr/repository_policy.go index fc7341a76420..8358c243cb71 100644 --- a/internal/service/ecr/repository_policy.go +++ b/internal/service/ecr/repository_policy.go @@ -23,7 +23,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_ecr_repository_policy", name="Repsitory Policy") +// @SDKResource("aws_ecr_repository_policy", name="Repository Policy") +// @IdentityAttribute("repository") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(idAttrDuplicates="repository") func resourceRepositoryPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRepositoryPolicyPut, @@ -31,10 +34,6 @@ func resourceRepositoryPolicy() *schema.Resource { UpdateWithoutTimeout: resourceRepositoryPolicyPut, DeleteWithoutTimeout: resourceRepositoryPolicyDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrPolicy: sdkv2.IAMPolicyDocumentSchemaRequired(), "registry_id": { @@ -65,7 +64,7 @@ func resourceRepositoryPolicyPut(ctx context.Context, d *schema.ResourceData, me RepositoryName: aws.String(repositoryName), } - _, err = tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidParameterException](ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidParameterException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.SetRepositoryPolicy(ctx, input) }, "Principal not found") @@ -84,7 +83,7 @@ func resourceRepositoryPolicyRead(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ECRClient(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*ecr.GetRepositoryPolicyOutput, error) { return findRepositoryPolicyByRepositoryName(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -98,8 +97,6 @@ func resourceRepositoryPolicyRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading ECR Repository Policy (%s): %s", d.Id(), err) } - output := outputRaw.(*ecr.GetRepositoryPolicyOutput) - policyToSet, err := verify.SecondJSONUnlessEquivalent(d.Get(names.AttrPolicy).(string), aws.ToString(output.PolicyText)) if err != nil { return sdkdiag.AppendFromErr(diags, err) diff --git a/internal/service/ecr/repository_policy_identity_gen_test.go b/internal/service/ecr/repository_policy_identity_gen_test.go new file mode 100644 index 000000000000..b520ee88eaf4 --- /dev/null +++ b/internal/service/ecr/repository_policy_identity_gen_test.go @@ -0,0 +1,310 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ecr_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccECRRepositoryPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_repository_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckRepositoryPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("repository"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "repository": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("repository")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccECRRepositoryPolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_repository_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("repository"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "repository": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("repository")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("repository"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccECRRepositoryPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_repository_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckRepositoryPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "repository": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("repository")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccECRRepositoryPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ecr_repository_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + CheckDestroy: testAccCheckRepositoryPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRepositoryPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RepositoryPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ecr/repository_test.go b/internal/service/ecr/repository_test.go index e0f9b66b24ac..39ad38995831 100644 --- a/internal/service/ecr/repository_test.go +++ b/internal/service/ecr/repository_test.go @@ -6,6 +6,7 @@ package ecr_test import ( "context" "fmt" + "strings" "testing" "github.com/YakDriver/regexache" @@ -156,6 +157,128 @@ func TestAccECRRepository_immutability(t *testing.T) { }) } +func TestAccECRRepository_immutabilityWithExclusion(t *testing.T) { + ctx := acctest.Context(t) + var v types.Repository + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecr_repository.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRepositoryConfig_immutabilityWithExclusion(rName, "latest*"), + Check: resource.ComposeTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability", string(types.ImageTagMutabilityImmutableWithExclusion)), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter", "latest*"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRepositoryConfig_immutabilityWithExclusion(rName, "dev-*"), + Check: resource.ComposeTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter", "dev-*"), + ), + }, + }, + }) +} + +func TestAccECRRepository_mutabilityWithExclusion(t *testing.T) { + ctx := acctest.Context(t) + var v types.Repository + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecr_repository.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRepositoryConfig_mutabilityWithExclusion(rName, "prod-*"), + Check: resource.ComposeTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability", string(types.ImageTagMutabilityMutableWithExclusion)), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter", "prod-*"), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter_type", string(types.ImageTagMutabilityExclusionFilterTypeWildcard)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRepositoryConfig_mutabilityWithExclusion(rName, "release-*"), + Check: resource.ComposeTestCheckFunc( + testAccCheckRepositoryExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "image_tag_mutability_exclusion_filter.0.filter", "release-*"), + ), + }, + }, + }) +} + +func TestAccECRRepository_immutabilityWithExclusion_validation(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRepositoryConfig_immutabilityWithExclusion(rName, "invalid!@#$"), + ExpectError: regexache.MustCompile(`must contain only letters, numbers, and special characters`), + }, + { + Config: testAccRepositoryConfig_immutabilityWithExclusion(rName, "a*b*c*d"), + ExpectError: regexache.MustCompile(`Image tag mutability exclusion filter can contain a maximum of 2 wildcards`), + }, + { + Config: testAccRepositoryConfig_immutabilityWithExclusion(rName, strings.Repeat("a", 129)), + ExpectError: regexache.MustCompile(`expected length of.*to be in the range.*128`), + }, + }, + }) +} + +func TestAccECRRepository_immutabilityWithExclusion_crossValidation(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECRServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRepositoryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRepositoryConfig_immutabilityWithExclusionInvalid(rName), + ExpectError: regexache.MustCompile(`image_tag_mutability_exclusion_filter can only be used when image_tag_mutability is set to IMMUTABLE_WITH_EXCLUSION`), + }, + }, + }) +} + func TestAccECRRepository_Image_scanning(t *testing.T) { ctx := acctest.Context(t) var v1, v2 types.Repository @@ -448,6 +571,48 @@ resource "aws_ecr_repository" "test" { `, rName) } +func testAccRepositoryConfig_immutabilityWithExclusion(rName, filter string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository" "test" { + name = %[1]q + image_tag_mutability = "IMMUTABLE_WITH_EXCLUSION" + + image_tag_mutability_exclusion_filter { + filter = %[2]q + filter_type = "WILDCARD" + } +} +`, rName, filter) +} + +func testAccRepositoryConfig_mutabilityWithExclusion(rName, filter string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository" "test" { + name = %[1]q + image_tag_mutability = "MUTABLE_WITH_EXCLUSION" + + image_tag_mutability_exclusion_filter { + filter = %[2]q + filter_type = "WILDCARD" + } +} +`, rName, filter) +} + +func testAccRepositoryConfig_immutabilityWithExclusionInvalid(rName string) string { + return fmt.Sprintf(` +resource "aws_ecr_repository" "test" { + name = %[1]q + image_tag_mutability = "MUTABLE" + + image_tag_mutability_exclusion_filter { + filter = "latest*" + filter_type = "WILDCARD" + } +} +`, rName) +} + func testAccRepositoryConfig_imageScanningConfiguration(rName string, scanOnPush bool) string { return fmt.Sprintf(` resource "aws_ecr_repository" "test" { diff --git a/internal/service/ecr/service_endpoint_resolver_gen.go b/internal/service/ecr/service_endpoint_resolver_gen.go index 6972fecf3f7c..86e077b7736c 100644 --- a/internal/service/ecr/service_endpoint_resolver_gen.go +++ b/internal/service/ecr/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ecr.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ecr endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ecr endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ecr/service_endpoints_gen_test.go b/internal/service/ecr/service_endpoints_gen_test.go index f875c789ca76..2e7bdf8d8edb 100644 --- a/internal/service/ecr/service_endpoints_gen_test.go +++ b/internal/service/ecr/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ecr/service_package_gen.go b/internal/service/ecr/service_package_gen.go index b43812acda8a..51a11009b9c7 100644 --- a/internal/service/ecr/service_package_gen.go +++ b/internal/service/ecr/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ecr" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -20,6 +19,12 @@ type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{ + { + Factory: newImagesDataSource, + TypeName: "aws_ecr_images", + Name: "Images", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newLifecyclePolicyDocumentDataSource, TypeName: "aws_ecr_lifecycle_policy_document", @@ -91,6 +96,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_ecr_lifecycle_policy", Name: "Lifecycle Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity("repository"), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourcePullThroughCacheRule, @@ -123,7 +132,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrName), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceRepositoryCreationTemplate, @@ -134,8 +147,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa { Factory: resourceRepositoryPolicy, TypeName: "aws_ecr_repository_policy", - Name: "Repsitory Policy", + Name: "Repository Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity("repository"), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -163,7 +180,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ecr.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ecr/sweep.go b/internal/service/ecr/sweep.go index 623ceb1d576a..19b3d3c875db 100644 --- a/internal/service/ecr/sweep.go +++ b/internal/service/ecr/sweep.go @@ -26,7 +26,7 @@ func sweepRepositories(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ECRClient(ctx) input := &ecr.DescribeRepositoriesInput{} diff --git a/internal/service/ecr/tags_gen.go b/internal/service/ecr/tags_gen.go index 68602eed9e71..9dcdfe9a863d 100644 --- a/internal/service/ecr/tags_gen.go +++ b/internal/service/ecr/tags_gen.go @@ -3,8 +3,8 @@ package ecr import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ecr" awstypes "github.com/aws/aws-sdk-go-v2/service/ecr/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *ecr.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ECRClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *ecr.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *ecr.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ecr/testdata/LifecyclePolicy/basic/main_gen.tf b/internal/service/ecr/testdata/LifecyclePolicy/basic/main_gen.tf new file mode 100644 index 000000000000..9fe56751c5ab --- /dev/null +++ b/internal/service/ecr/testdata/LifecyclePolicy/basic/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ecr_lifecycle_policy" "test" { + repository = aws_ecr_repository.test.name + + policy = < 0 && len(managedProvider) > 0 { + return errors.New("only one of auto_scaling_group_provider or managed_instances_provider must be specified") + } + + // Validate cluster field requirements + if len(managedProvider) > 0 { + // cluster is required for Managed Instances CP + if clusterName == "" { + return errors.New("cluster is required when using managed_instances_provider") + } + } else if len(asgProvider) > 0 { + // cluster must not be set for ASG CP + if clusterName != "" { + return errors.New("cluster must not be set when using auto_scaling_group_provider") + } + } + + return nil + }, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, Computed: true, }, + "cluster": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateClusterName, + }, "auto_scaling_group_provider": { Type: schema.TypeList, MaxItems: 1, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -111,6 +147,368 @@ func resourceCapacityProvider() *schema.Resource { }, }, }, + "managed_instances_provider": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "infrastructure_role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "instance_launch_template": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_instance_profile_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "instance_requirements": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + "accelerator_manufacturers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AcceleratorManufacturer](), + }, + }, + "accelerator_names": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AcceleratorName](), + }, + }, + "accelerator_total_memory_mib": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + "accelerator_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AcceleratorType](), + }, + }, + "allowed_instance_types": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 400, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 30), + validation.StringMatch(regexache.MustCompile(`^[a-zA-Z0-9\.\*\-]+$`), "must contain only alphanumeric characters, dots, asterisks, and hyphens"), + ), + }, + }, + "bare_metal": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.BareMetal](), + }, + "baseline_ebs_bandwidth_mbps": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + "burstable_performance": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.BurstablePerformance](), + }, + "cpu_manufacturers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.CpuManufacturer](), + }, + }, + "excluded_instance_types": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 400, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 30), + validation.StringMatch(regexache.MustCompile(`^[a-zA-Z0-9\.\*\-]+$`), "must contain only alphanumeric characters, dots, asterisks, and hyphens"), + ), + }, + }, + "instance_generations": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.InstanceGeneration](), + }, + }, + "local_storage": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.LocalStorage](), + }, + "local_storage_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.LocalStorageType](), + }, + }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "memory_gib_per_vcpu": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + names.AttrMin: { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + }, + }, + }, + "memory_mib": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + names.AttrMin: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + "network_bandwidth_gbps": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + names.AttrMin: { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + }, + }, + }, + "network_interface_count": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + names.AttrMin: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + "on_demand_max_price_percentage_over_lowest_price": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "require_hibernate_support": { + Type: schema.TypeBool, + Optional: true, + }, + "spot_max_price_percentage_over_lowest_price": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "total_local_storage_gb": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + names.AttrMin: { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + }, + }, + }, + "vcpu_count": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMax: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + names.AttrMin: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + }, + }, + }, + "monitoring": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ManagedInstancesMonitoringOptions](), + }, + names.AttrNetworkConfiguration: { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrSecurityGroups: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + names.AttrSubnets: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "storage_configuration": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_size_gib": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + }, + }, + }, + names.AttrPropagateTags: { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.PropagateMITags](), + }, + }, + }, + }, names.AttrName: { Type: schema.TypeString, Required: true, @@ -130,10 +528,15 @@ func resourceCapacityProviderCreate(ctx context.Context, d *schema.ResourceData, name := d.Get(names.AttrName).(string) input := ecs.CreateCapacityProviderInput{ AutoScalingGroupProvider: expandAutoScalingGroupProviderCreate(d.Get("auto_scaling_group_provider")), + ManagedInstancesProvider: expandManagedInstancesProviderCreate(d.Get("managed_instances_provider")), Name: aws.String(name), Tags: getTagsIn(ctx), } + if v, ok := d.GetOk("cluster"); ok { + input.Cluster = aws.String(v.(string)) + } + output, err := conn.CreateCapacityProvider(ctx, &input) // Some partitions (e.g. ISO) may not support tag-on-create. @@ -186,6 +589,10 @@ func resourceCapacityProviderRead(ctx context.Context, d *schema.ResourceData, m if err := d.Set("auto_scaling_group_provider", flattenAutoScalingGroupProvider(output.AutoScalingGroupProvider)); err != nil { return sdkdiag.AppendErrorf(diags, "setting auto_scaling_group_provider: %s", err) } + d.Set("cluster", output.Cluster) + if err := d.Set("managed_instances_provider", flattenManagedInstancesProvider(output.ManagedInstancesProvider)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting managed_instances_provider: %s", err) + } d.Set(names.AttrName, output.Name) setTagsOut(ctx, output.Tags) @@ -198,16 +605,21 @@ func resourceCapacityProviderUpdate(ctx context.Context, d *schema.ResourceData, conn := meta.(*conns.AWSClient).ECSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { - input := &ecs.UpdateCapacityProviderInput{ + input := ecs.UpdateCapacityProviderInput{ AutoScalingGroupProvider: expandAutoScalingGroupProviderUpdate(d.Get("auto_scaling_group_provider")), + ManagedInstancesProvider: expandManagedInstancesProviderUpdate(d.Get("managed_instances_provider")), Name: aws.String(d.Get(names.AttrName).(string)), } + if v, ok := d.GetOk("cluster"); ok { + input.Cluster = aws.String(v.(string)) + } + const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.UpdateInProgressException](ctx, timeout, func() (any, error) { - return conn.UpdateCapacityProvider(ctx, input) + _, err := tfresource.RetryWhenIsA[any, *awstypes.UpdateInProgressException](ctx, timeout, func(ctx context.Context) (any, error) { + return conn.UpdateCapacityProvider(ctx, &input) }) if err != nil { @@ -227,9 +639,10 @@ func resourceCapacityProviderDelete(ctx context.Context, d *schema.ResourceData, conn := meta.(*conns.AWSClient).ECSClient(ctx) log.Printf("[DEBUG] Deleting ECS Capacity Provider: %s", d.Id()) - _, err := conn.DeleteCapacityProvider(ctx, &ecs.DeleteCapacityProviderInput{ + input := ecs.DeleteCapacityProviderInput{ CapacityProvider: aws.String(d.Id()), - }) + } + _, err := conn.DeleteCapacityProvider(ctx, &input) // "An error occurred (ClientException) when calling the DeleteCapacityProvider operation: The specified capacity provider does not exist. Specify a valid name or ARN and try again." if errs.IsAErrorMessageContains[*awstypes.ClientException](err, "capacity provider does not exist") { @@ -285,18 +698,18 @@ func findCapacityProviders(ctx context.Context, conn *ecs.Client, input *ecs.Des } func findCapacityProviderByARN(ctx context.Context, conn *ecs.Client, arn string) (*awstypes.CapacityProvider, error) { - input := &ecs.DescribeCapacityProvidersInput{ + input := ecs.DescribeCapacityProvidersInput{ CapacityProviders: []string{arn}, Include: []awstypes.CapacityProviderField{awstypes.CapacityProviderFieldTags}, } - output, err := findCapacityProvider(ctx, conn, input) + output, err := findCapacityProvider(ctx, conn, &input) // Some partitions (i.e., ISO) may not support tagging, giving error. if errs.IsUnsupportedOperationInPartitionError(partitionFromConn(conn), err) { input.Include = nil - output, err = findCapacityProvider(ctx, conn, input) + output, err = findCapacityProvider(ctx, conn, &input) } if err != nil { @@ -366,7 +779,7 @@ func waitCapacityProviderUpdated(ctx context.Context, conn *ecs.Client, arn stri func waitCapacityProviderDeleted(ctx context.Context, conn *ecs.Client, arn string, timeout time.Duration) (*awstypes.CapacityProvider, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.CapacityProviderStatusActive), + Pending: enum.Slice(awstypes.CapacityProviderStatusActive, awstypes.CapacityProviderStatusDeprovisioning), Target: []string{}, Refresh: statusCapacityProvider(ctx, conn, arn), Timeout: timeout, @@ -386,7 +799,7 @@ func expandAutoScalingGroupProviderCreate(configured any) *awstypes.AutoScalingG return nil } - if configured.([]any) == nil || len(configured.([]any)) == 0 { + if len(configured.([]any)) == 0 { return nil } @@ -413,7 +826,7 @@ func expandAutoScalingGroupProviderUpdate(configured any) *awstypes.AutoScalingG return nil } - if configured.([]any) == nil || len(configured.([]any)) == 0 { + if len(configured.([]any)) == 0 { return nil } @@ -438,7 +851,7 @@ func expandManagedScaling(configured any) *awstypes.ManagedScaling { return nil } - if configured.([]any) == nil || len(configured.([]any)) == 0 { + if len(configured.([]any)) == 0 { return nil } @@ -492,3 +905,594 @@ func flattenAutoScalingGroupProvider(provider *awstypes.AutoScalingGroupProvider result := []map[string]any{p} return result } + +func expandManagedInstancesProviderCreate(configured any) *awstypes.CreateManagedInstancesProviderConfiguration { + if configured == nil { + return nil + } + + if len(configured.([]any)) == 0 { + return nil + } + + tfMap := configured.([]any)[0].(map[string]any) + apiObject := &awstypes.CreateManagedInstancesProviderConfiguration{} + + if v, ok := tfMap["infrastructure_role_arn"].(string); ok && v != "" { + apiObject.InfrastructureRoleArn = aws.String(v) + } + + if v, ok := tfMap["instance_launch_template"].([]any); ok && len(v) > 0 { + apiObject.InstanceLaunchTemplate = expandInstanceLaunchTemplateCreate(v) + } + + if v, ok := tfMap[names.AttrPropagateTags].(string); ok && v != "" { + apiObject.PropagateTags = awstypes.PropagateMITags(v) + } + + return apiObject +} + +func expandManagedInstancesProviderUpdate(configured any) *awstypes.UpdateManagedInstancesProviderConfiguration { + if configured == nil { + return nil + } + + if len(configured.([]any)) == 0 { + return nil + } + + tfMap := configured.([]any)[0].(map[string]any) + apiObject := &awstypes.UpdateManagedInstancesProviderConfiguration{} + + if v, ok := tfMap["infrastructure_role_arn"].(string); ok && v != "" { + apiObject.InfrastructureRoleArn = aws.String(v) + } + + if v, ok := tfMap["instance_launch_template"].([]any); ok && len(v) > 0 { + apiObject.InstanceLaunchTemplate = expandInstanceLaunchTemplateUpdate(v) + } + + if v, ok := tfMap[names.AttrPropagateTags].(string); ok && v != "" { + apiObject.PropagateTags = awstypes.PropagateMITags(v) + } + + return apiObject +} + +func expandInstanceLaunchTemplateCreate(tfList []any) *awstypes.InstanceLaunchTemplate { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.InstanceLaunchTemplate{} + + if v, ok := tfMap["ec2_instance_profile_arn"].(string); ok && v != "" { + apiObject.Ec2InstanceProfileArn = aws.String(v) + } + + if v, ok := tfMap["instance_requirements"].([]any); ok && len(v) > 0 { + apiObject.InstanceRequirements = expandInstanceRequirementsRequest(v) + } + + if v, ok := tfMap["monitoring"].(string); ok && v != "" { + apiObject.Monitoring = awstypes.ManagedInstancesMonitoringOptions(v) + } + + if v, ok := tfMap[names.AttrNetworkConfiguration].([]any); ok && len(v) > 0 { + apiObject.NetworkConfiguration = expandManagedInstancesNetworkConfiguration(v) + } + + if v, ok := tfMap["storage_configuration"].([]any); ok && len(v) > 0 { + apiObject.StorageConfiguration = expandManagedInstancesStorageConfiguration(v) + } + + return apiObject +} + +func expandInstanceLaunchTemplateUpdate(tfList []any) *awstypes.InstanceLaunchTemplateUpdate { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.InstanceLaunchTemplateUpdate{} + + if v, ok := tfMap["ec2_instance_profile_arn"].(string); ok && v != "" { + apiObject.Ec2InstanceProfileArn = aws.String(v) + } + + if v, ok := tfMap["instance_requirements"].([]any); ok && len(v) > 0 { + apiObject.InstanceRequirements = expandInstanceRequirementsRequest(v) + } + + if v, ok := tfMap["monitoring"].(string); ok && v != "" { + apiObject.Monitoring = awstypes.ManagedInstancesMonitoringOptions(v) + } + + if v, ok := tfMap[names.AttrNetworkConfiguration].([]any); ok && len(v) > 0 { + apiObject.NetworkConfiguration = expandManagedInstancesNetworkConfiguration(v) + } + + if v, ok := tfMap["storage_configuration"].([]any); ok && len(v) > 0 { + apiObject.StorageConfiguration = expandManagedInstancesStorageConfiguration(v) + } + + return apiObject +} + +func expandManagedInstancesNetworkConfiguration(tfList []any) *awstypes.ManagedInstancesNetworkConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.ManagedInstancesNetworkConfiguration{} + + if v, ok := tfMap[names.AttrSecurityGroups].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap[names.AttrSubnets].(*schema.Set); ok && v.Len() > 0 { + apiObject.Subnets = flex.ExpandStringValueSet(v) + } + + return apiObject +} + +func expandManagedInstancesStorageConfiguration(tfList []any) *awstypes.ManagedInstancesStorageConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.ManagedInstancesStorageConfiguration{} + + if v, ok := tfMap["storage_size_gib"].(int); ok && v > 0 { + apiObject.StorageSizeGiB = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandInstanceRequirementsRequest(tfList []any) *awstypes.InstanceRequirementsRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.InstanceRequirementsRequest{} + + if v, ok := tfMap["accelerator_count"].([]any); ok && len(v) > 0 { + apiObject.AcceleratorCount = expandAcceleratorCountRequest(v) + } + + if v, ok := tfMap["accelerator_manufacturers"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AcceleratorManufacturers = flex.ExpandStringyValueSet[awstypes.AcceleratorManufacturer](v) + } + + if v, ok := tfMap["accelerator_names"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AcceleratorNames = flex.ExpandStringyValueSet[awstypes.AcceleratorName](v) + } + + if v, ok := tfMap["accelerator_total_memory_mib"].([]any); ok && len(v) > 0 { + apiObject.AcceleratorTotalMemoryMiB = expandAcceleratorTotalMemoryMiBRequest(v) + } + + if v, ok := tfMap["accelerator_types"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AcceleratorTypes = flex.ExpandStringyValueSet[awstypes.AcceleratorType](v) + } + + if v, ok := tfMap["allowed_instance_types"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AllowedInstanceTypes = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["bare_metal"].(string); ok && v != "" { + apiObject.BareMetal = awstypes.BareMetal(v) + } + + if v, ok := tfMap["baseline_ebs_bandwidth_mbps"].([]any); ok && len(v) > 0 { + apiObject.BaselineEbsBandwidthMbps = expandBaselineEBSBandwidthMbpsRequest(v) + } + + if v, ok := tfMap["burstable_performance"].(string); ok && v != "" { + apiObject.BurstablePerformance = awstypes.BurstablePerformance(v) + } + + if v, ok := tfMap["cpu_manufacturers"].(*schema.Set); ok && v.Len() > 0 { + apiObject.CpuManufacturers = flex.ExpandStringyValueSet[awstypes.CpuManufacturer](v) + } + + if v, ok := tfMap["excluded_instance_types"].(*schema.Set); ok && v.Len() > 0 { + apiObject.ExcludedInstanceTypes = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["instance_generations"].(*schema.Set); ok && v.Len() > 0 { + apiObject.InstanceGenerations = flex.ExpandStringyValueSet[awstypes.InstanceGeneration](v) + } + + if v, ok := tfMap["local_storage"].(string); ok && v != "" { + apiObject.LocalStorage = awstypes.LocalStorage(v) + } + + if v, ok := tfMap["local_storage_types"].(*schema.Set); ok && v.Len() > 0 { + apiObject.LocalStorageTypes = flex.ExpandStringyValueSet[awstypes.LocalStorageType](v) + } + + if v, ok := tfMap["max_spot_price_as_percentage_of_optimal_on_demand_price"].(int); ok && v > 0 { + apiObject.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice = aws.Int32(int32(v)) + } + + if v, ok := tfMap["memory_gib_per_vcpu"].([]any); ok && len(v) > 0 { + apiObject.MemoryGiBPerVCpu = expandMemoryGiBPerVCPURequest(v) + } + + if v, ok := tfMap["memory_mib"].([]any); ok && len(v) > 0 { + apiObject.MemoryMiB = expandMemoryMiBRequest(v) + } + + if v, ok := tfMap["network_bandwidth_gbps"].([]any); ok && len(v) > 0 { + apiObject.NetworkBandwidthGbps = expandNetworkBandwidthGbpsRequest(v) + } + + if v, ok := tfMap["network_interface_count"].([]any); ok && len(v) > 0 { + apiObject.NetworkInterfaceCount = expandNetworkInterfaceCountRequest(v) + } + + if v, ok := tfMap["on_demand_max_price_percentage_over_lowest_price"].(int); ok && v > 0 { + apiObject.OnDemandMaxPricePercentageOverLowestPrice = aws.Int32(int32(v)) + } + + if v, ok := tfMap["require_hibernate_support"].(bool); ok { + apiObject.RequireHibernateSupport = aws.Bool(v) + } + + if v, ok := tfMap["spot_max_price_percentage_over_lowest_price"].(int); ok && v > 0 { + apiObject.SpotMaxPricePercentageOverLowestPrice = aws.Int32(int32(v)) + } + + if v, ok := tfMap["total_local_storage_gb"].([]any); ok && len(v) > 0 { + apiObject.TotalLocalStorageGB = expandTotalLocalStorageGBRequest(v) + } + + if v, ok := tfMap["vcpu_count"].([]any); ok && len(v) > 0 { + apiObject.VCpuCount = expandVCPUCountRangeRequest(v) + } + + return apiObject +} + +func expandVCPUCountRangeRequest(tfList []any) *awstypes.VCpuCountRangeRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.VCpuCountRangeRequest{} + + if v, ok := tfMap[names.AttrMin].(int); ok && v > 0 { + apiObject.Min = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrMax].(int); ok && v > 0 { + apiObject.Max = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandMemoryMiBRequest(tfList []any) *awstypes.MemoryMiBRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.MemoryMiBRequest{} + + if v, ok := tfMap[names.AttrMin].(int); ok && v > 0 { + apiObject.Min = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrMax].(int); ok && v > 0 { + apiObject.Max = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandMemoryGiBPerVCPURequest(tfList []any) *awstypes.MemoryGiBPerVCpuRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.MemoryGiBPerVCpuRequest{} + + if v, ok := tfMap[names.AttrMin].(float64); ok && v > 0 { + apiObject.Min = aws.Float64(v) + } + + if v, ok := tfMap[names.AttrMax].(float64); ok && v > 0 { + apiObject.Max = aws.Float64(v) + } + + return apiObject +} + +func expandNetworkBandwidthGbpsRequest(tfList []any) *awstypes.NetworkBandwidthGbpsRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.NetworkBandwidthGbpsRequest{} + + if v, ok := tfMap[names.AttrMin].(float64); ok && v > 0 { + apiObject.Min = aws.Float64(v) + } + + if v, ok := tfMap[names.AttrMax].(float64); ok && v > 0 { + apiObject.Max = aws.Float64(v) + } + + return apiObject +} + +func expandNetworkInterfaceCountRequest(tfList []any) *awstypes.NetworkInterfaceCountRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.NetworkInterfaceCountRequest{} + + if v, ok := tfMap[names.AttrMin].(int); ok && v > 0 { + apiObject.Min = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrMax].(int); ok && v > 0 { + apiObject.Max = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandTotalLocalStorageGBRequest(tfList []any) *awstypes.TotalLocalStorageGBRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.TotalLocalStorageGBRequest{} + + if v, ok := tfMap[names.AttrMin].(float64); ok && v > 0 { + apiObject.Min = aws.Float64(v) + } + + if v, ok := tfMap[names.AttrMax].(float64); ok && v > 0 { + apiObject.Max = aws.Float64(v) + } + + return apiObject +} + +func expandBaselineEBSBandwidthMbpsRequest(tfList []any) *awstypes.BaselineEbsBandwidthMbpsRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.BaselineEbsBandwidthMbpsRequest{} + + if v, ok := tfMap[names.AttrMin].(int); ok && v > 0 { + apiObject.Min = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrMax].(int); ok && v > 0 { + apiObject.Max = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandAcceleratorCountRequest(tfList []any) *awstypes.AcceleratorCountRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.AcceleratorCountRequest{} + + if v, ok := tfMap[names.AttrMin].(int); ok && v >= 0 { + apiObject.Min = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrMax].(int); ok && v >= 0 { + apiObject.Max = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandAcceleratorTotalMemoryMiBRequest(tfList []any) *awstypes.AcceleratorTotalMemoryMiBRequest { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.AcceleratorTotalMemoryMiBRequest{} + + if v, ok := tfMap[names.AttrMin].(int); ok && v >= 0 { + apiObject.Min = aws.Int32(int32(v)) + } + + if v, ok := tfMap[names.AttrMax].(int); ok && v >= 0 { + apiObject.Max = aws.Int32(int32(v)) + } + + return apiObject +} + +func flattenManagedInstancesProvider(provider *awstypes.ManagedInstancesProvider) []map[string]any { + if provider == nil { + return nil + } + + tfMap := map[string]any{ + "infrastructure_role_arn": aws.ToString(provider.InfrastructureRoleArn), + names.AttrPropagateTags: string(provider.PropagateTags), + } + + if provider.InstanceLaunchTemplate != nil { + tfMap["instance_launch_template"] = flattenInstanceLaunchTemplate(provider.InstanceLaunchTemplate) + } + + return []map[string]any{tfMap} +} + +func flattenInstanceLaunchTemplate(template *awstypes.InstanceLaunchTemplate) []map[string]any { + if template == nil { + return nil + } + + tfMap := map[string]any{ + "ec2_instance_profile_arn": aws.ToString(template.Ec2InstanceProfileArn), + "monitoring": string(template.Monitoring), + } + + if template.InstanceRequirements != nil { + tfMap["instance_requirements"] = flattenInstanceRequirementsRequest(template.InstanceRequirements) + } + + if template.NetworkConfiguration != nil { + networkConfig := map[string]any{ + names.AttrSubnets: template.NetworkConfiguration.Subnets, + } + if template.NetworkConfiguration.SecurityGroups != nil { + networkConfig[names.AttrSecurityGroups] = template.NetworkConfiguration.SecurityGroups + } + tfMap[names.AttrNetworkConfiguration] = []map[string]any{networkConfig} + } + + if template.StorageConfiguration != nil { + tfMap["storage_configuration"] = []map[string]any{{ + "storage_size_gib": aws.ToInt32(template.StorageConfiguration.StorageSizeGiB), + }} + } + + return []map[string]any{tfMap} +} + +func flattenInstanceRequirementsRequest(req *awstypes.InstanceRequirementsRequest) []map[string]any { + if req == nil { + return nil + } + + tfMap := map[string]any{ + "bare_metal": string(req.BareMetal), + "burstable_performance": string(req.BurstablePerformance), + "local_storage": string(req.LocalStorage), + "max_spot_price_as_percentage_of_optimal_on_demand_price": aws.ToInt32(req.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice), + "on_demand_max_price_percentage_over_lowest_price": aws.ToInt32(req.OnDemandMaxPricePercentageOverLowestPrice), + "require_hibernate_support": aws.ToBool(req.RequireHibernateSupport), + "spot_max_price_percentage_over_lowest_price": aws.ToInt32(req.SpotMaxPricePercentageOverLowestPrice), + } + + if req.AcceleratorCount != nil { + tfMap["accelerator_count"] = []map[string]any{{ + names.AttrMin: aws.ToInt32(req.AcceleratorCount.Min), + names.AttrMax: aws.ToInt32(req.AcceleratorCount.Max), + }} + } + + if req.AcceleratorManufacturers != nil { + tfMap["accelerator_manufacturers"] = req.AcceleratorManufacturers + } + + if req.AcceleratorNames != nil { + tfMap["accelerator_names"] = req.AcceleratorNames + } + + if req.AcceleratorTotalMemoryMiB != nil { + tfMap["accelerator_total_memory_mib"] = []map[string]any{{ + names.AttrMin: aws.ToInt32(req.AcceleratorTotalMemoryMiB.Min), + names.AttrMax: aws.ToInt32(req.AcceleratorTotalMemoryMiB.Max), + }} + } + + if req.AcceleratorTypes != nil { + tfMap["accelerator_types"] = req.AcceleratorTypes + } + + if req.AllowedInstanceTypes != nil { + tfMap["allowed_instance_types"] = req.AllowedInstanceTypes + } + + if req.BaselineEbsBandwidthMbps != nil { + tfMap["baseline_ebs_bandwidth_mbps"] = []map[string]any{{ + names.AttrMin: aws.ToInt32(req.BaselineEbsBandwidthMbps.Min), + names.AttrMax: aws.ToInt32(req.BaselineEbsBandwidthMbps.Max), + }} + } + + if req.CpuManufacturers != nil { + tfMap["cpu_manufacturers"] = req.CpuManufacturers + } + + if req.ExcludedInstanceTypes != nil { + tfMap["excluded_instance_types"] = req.ExcludedInstanceTypes + } + + if req.InstanceGenerations != nil { + tfMap["instance_generations"] = req.InstanceGenerations + } + + if req.LocalStorageTypes != nil { + tfMap["local_storage_types"] = req.LocalStorageTypes + } + + if req.MemoryGiBPerVCpu != nil { + tfMap["memory_gib_per_vcpu"] = []map[string]any{{ + names.AttrMin: aws.ToFloat64(req.MemoryGiBPerVCpu.Min), + names.AttrMax: aws.ToFloat64(req.MemoryGiBPerVCpu.Max), + }} + } + + if req.MemoryMiB != nil { + tfMap["memory_mib"] = []map[string]any{{ + names.AttrMin: aws.ToInt32(req.MemoryMiB.Min), + names.AttrMax: aws.ToInt32(req.MemoryMiB.Max), + }} + } + + if req.NetworkBandwidthGbps != nil { + tfMap["network_bandwidth_gbps"] = []map[string]any{{ + names.AttrMin: aws.ToFloat64(req.NetworkBandwidthGbps.Min), + names.AttrMax: aws.ToFloat64(req.NetworkBandwidthGbps.Max), + }} + } + + if req.NetworkInterfaceCount != nil { + tfMap["network_interface_count"] = []map[string]any{{ + names.AttrMin: aws.ToInt32(req.NetworkInterfaceCount.Min), + names.AttrMax: aws.ToInt32(req.NetworkInterfaceCount.Max), + }} + } + + if req.TotalLocalStorageGB != nil { + tfMap["total_local_storage_gb"] = []map[string]any{{ + names.AttrMin: aws.ToFloat64(req.TotalLocalStorageGB.Min), + names.AttrMax: aws.ToFloat64(req.TotalLocalStorageGB.Max), + }} + } + + if req.VCpuCount != nil { + tfMap["vcpu_count"] = []map[string]any{{ + names.AttrMin: aws.ToInt32(req.VCpuCount.Min), + names.AttrMax: aws.ToInt32(req.VCpuCount.Max), + }} + } + + return []map[string]any{tfMap} +} diff --git a/internal/service/ecs/capacity_provider_identity_gen_test.go b/internal/service/ecs/capacity_provider_identity_gen_test.go index d30de9f063ec..4fb8e08511be 100644 --- a/internal/service/ecs/capacity_provider_identity_gen_test.go +++ b/internal/service/ecs/capacity_provider_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccECSCapacityProvider_Identity_Basic(t *testing.T) { resourceName := "aws_ecs_capacity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -49,6 +49,9 @@ func TestAccECSCapacityProvider_Identity_Basic(t *testing.T) { tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "ecs", "capacity-provider/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -110,7 +113,7 @@ func TestAccECSCapacityProvider_Identity_RegionOverride(t *testing.T) { resourceName := "aws_ecs_capacity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -130,6 +133,9 @@ func TestAccECSCapacityProvider_Identity_RegionOverride(t *testing.T) { tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "ecs", "capacity-provider/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -221,3 +227,131 @@ func TestAccECSCapacityProvider_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccECSCapacityProvider_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CapacityProvider + resourceName := "aws_ecs_capacity_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CapacityProvider/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCapacityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/CapacityProvider/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCapacityProviderExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CapacityProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccECSCapacityProvider_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CapacityProvider + resourceName := "aws_ecs_capacity_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CapacityProvider/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCapacityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CapacityProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCapacityProviderExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/ecs/capacity_provider_test.go b/internal/service/ecs/capacity_provider_test.go index d1c430525041..254f5919f907 100644 --- a/internal/service/ecs/capacity_provider_test.go +++ b/internal/service/ecs/capacity_provider_test.go @@ -12,14 +12,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfecs "github.com/hashicorp/terraform-provider-aws/internal/service/ecs" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -236,79 +230,187 @@ func TestAccECSCapacityProvider_tags(t *testing.T) { }) } -func TestAccECSCapacityProvider_Identity_ExistingResource(t *testing.T) { +func TestAccECSCapacityProvider_clusterFieldValidations(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCapacityProviderConfig_autoScalingGroups_withCluster(rName), + ExpectError: regexache.MustCompile(`cluster must not be set when using auto_scaling_group_provider`), + }, + { + Config: testAccCapacityProviderConfig_managedInstances_withoutCluster(rName), + ExpectError: regexache.MustCompile(`cluster is required when using managed_instances_provider`), + }, + }, + }) +} + +func TestAccECSCapacityProvider_mutualExclusivity(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCapacityProviderConfig_bothProviders(rName), + ExpectError: regexache.MustCompile(`only one of auto_scaling_group_provider or managed_instances_provider must be specified`), + }, + { + Config: testAccCapacityProviderConfig_noProviders(rName), + ExpectError: regexache.MustCompile(`exactly one of auto_scaling_group_provider or managed_instances_provider must be specified`), + }, + }, + }) +} + +func TestAccECSCapacityProvider_createManagedInstancesProvider_basic(t *testing.T) { ctx := acctest.Context(t) var provider awstypes.CapacityProvider rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_capacity_provider.test" resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), - CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), Steps: []resource.TestStep{ { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCapacityProviderConfig_basic(rName), + Config: testAccCapacityProviderConfig_managedInstancesProvider_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckCapacityProviderExists(ctx, resourceName, &provider), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "managed_instances_provider.0.infrastructure_role_arn", "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "managed_instances_provider.0.instance_launch_template.0.ec2_instance_profile_arn", "aws_iam_instance_profile.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.network_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.network_configuration.0.subnets.#", "2"), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrID, "ecs", fmt.Sprintf("capacity-provider/%s", rName)), + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "cluster", rName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, }, { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCapacityProviderConfig_basic(rName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccECSCapacityProvider_createManagedInstancesProvider_withInstanceRequirements(t *testing.T) { + ctx := acctest.Context(t) + var provider awstypes.CapacityProvider + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_capacity_provider.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCapacityProviderConfig_managedInstancesProvider_withInstanceRequirements(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckCapacityProviderExists(ctx, resourceName, &provider), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.vcpu_count.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.vcpu_count.0.min", "2"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.vcpu_count.0.max", "8"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.memory_mib.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.memory_mib.0.min", "2048"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.memory_mib.0.max", "16384"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.cpu_manufacturers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.instance_generations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.instance_requirements.0.burstable_performance", "excluded"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.propagate_tags", "NONE"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, }, { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCapacityProviderConfig_basic(rName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccECSCapacityProvider_createManagedInstancesProvider_withStorageConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var provider awstypes.CapacityProvider + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_capacity_provider.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCapacityProviderConfig_managedInstancesProvider_withStorageConfiguration(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckCapacityProviderExists(ctx, resourceName, &provider), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.storage_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.instance_launch_template.0.storage_configuration.0.storage_size_gib", "50"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("ecs", regexache.MustCompile(`capacity-provider/.+`)), - }), - }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccECSCapacityProvider_updateManagedInstancesProvider(t *testing.T) { + ctx := acctest.Context(t) + var provider awstypes.CapacityProvider + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_capacity_provider.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCapacityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCapacityProviderConfig_managedInstancesProvider_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCapacityProviderExists(ctx, resourceName, &provider), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.propagate_tags", ""), + ), + }, + { + Config: testAccCapacityProviderConfig_updateManagedInstancesProvider(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCapacityProviderExists(ctx, resourceName, &provider), + resource.TestCheckResourceAttr(resourceName, "managed_instances_provider.0.propagate_tags", "NONE"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) @@ -480,3 +582,305 @@ resource "aws_ecs_capacity_provider" "test" { } `, rName, tag1Key, tag1Value, tag2Key, tag2Value)) } + +func testAccCapacityProviderConfig_bothProviders(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} +data "aws_region" "current" {} + +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + cluster = "dummy" + + auto_scaling_group_provider { + auto_scaling_group_arn = "arn:${data.aws_partition.current.partition}:autoscaling:${data.aws_region.current.region}:000000000000:autoScalingGroup:a4536b1a-b122-49ef-918f-bfaed967ccfa:autoScalingGroupName/dummy" + } + + managed_instances_provider { + infrastructure_role_arn = "arn:${data.aws_partition.current.partition}:iam::000000000000:role/dummy" + + instance_launch_template { + ec2_instance_profile_arn = "arn:${data.aws_partition.current.partition}:iam::000000000000:instance-profile/dummy" + + network_configuration { + subnets = ["subnet-0b48066557a0e97ac"] + } + + instance_requirements { + vcpu_count { + min = 1 + } + + memory_mib { + min = 1024 + } + } + } + } +} +`, rName) +} + +func testAccCapacityProviderConfig_autoScalingGroups_withCluster(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} +data "aws_region" "current" {} + +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + cluster = "dummy" + + auto_scaling_group_provider { + auto_scaling_group_arn = "arn:${data.aws_partition.current.partition}:autoscaling:${data.aws_region.current.region}:000000000000:autoScalingGroup:a4536b1a-b122-49ef-918f-bfaed967ccfa:autoScalingGroupName/dummy" + } +} +`, rName) +} + +func testAccCapacityProviderConfig_managedInstances_withoutCluster(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + + managed_instances_provider { + infrastructure_role_arn = "arn:${data.aws_partition.current.partition}:iam::000000000000:role/dummy" + + instance_launch_template { + ec2_instance_profile_arn = "arn:${data.aws_partition.current.partition}:iam::000000000000:instance-profile/dummy" + + network_configuration { + subnets = ["subnet-0b48066557a0e97ac"] + } + + instance_requirements { + vcpu_count { + min = 1 + } + + memory_mib { + min = 1024 + } + } + } + } +} +`, rName) +} + +func testAccCapacityProviderConfig_noProviders(rName string) string { + return fmt.Sprintf(` +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q +} +`, rName) +} + +func testAccCapacityProviderConfig_managedInstancesProvider_base(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptInDefaultExclude(), + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = false + enable_dns_support = false + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + count = 2 + + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = "10.0.${count.index}.0/24" + vpc_id = aws_vpc.test.id + + tags = { + Name = "%[1]s-${count.index}" + } +} + +resource "aws_ecs_cluster" "test" { + name = %[1]q +} + +data "aws_iam_policy_document" "test_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ecs.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = data.aws_iam_policy_document.test_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "test" { + role = aws_iam_role.test.name + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AdministratorAccess" +} + +data "aws_iam_policy_document" "test_instance_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "test_instance" { + name = "%[1]s-instance" + assume_role_policy = data.aws_iam_policy_document.test_instance_assume_role_policy.json +} + +resource "aws_iam_role_policy_attachment" "test_instance" { + role = aws_iam_role.test_instance.name + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" +} + +resource "aws_iam_instance_profile" "test" { + name = %[1]q + role = aws_iam_role.test_instance.name +} +`, rName)) +} + +func testAccCapacityProviderConfig_managedInstancesProvider_basic(rName string) string { + return acctest.ConfigCompose(testAccCapacityProviderConfig_managedInstancesProvider_base(rName), fmt.Sprintf(` +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + cluster = aws_ecs_cluster.test.name + + managed_instances_provider { + infrastructure_role_arn = aws_iam_role.test.arn + + instance_launch_template { + ec2_instance_profile_arn = aws_iam_instance_profile.test.arn + + network_configuration { + subnets = aws_subnet.test[*].id + } + } + } +} +`, rName)) +} + +func testAccCapacityProviderConfig_managedInstancesProvider_withInstanceRequirements(rName string) string { + return acctest.ConfigCompose(testAccCapacityProviderConfig_managedInstancesProvider_base(rName), fmt.Sprintf(` +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + cluster = aws_ecs_cluster.test.name + + managed_instances_provider { + infrastructure_role_arn = aws_iam_role.test.arn + propagate_tags = "NONE" + + instance_launch_template { + ec2_instance_profile_arn = aws_iam_instance_profile.test.arn + + network_configuration { + subnets = aws_subnet.test[*].id + } + + instance_requirements { + vcpu_count { + min = 2 + max = 8 + } + + memory_mib { + min = 2048 + max = 16384 + } + + cpu_manufacturers = ["intel", "amd"] + instance_generations = ["current"] + burstable_performance = "excluded" + } + } + } +} +`, rName)) +} + +func testAccCapacityProviderConfig_managedInstancesProvider_withStorageConfiguration(rName string) string { + return acctest.ConfigCompose(testAccCapacityProviderConfig_managedInstancesProvider_base(rName), fmt.Sprintf(` +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + cluster = aws_ecs_cluster.test.name + + managed_instances_provider { + infrastructure_role_arn = aws_iam_role.test.arn + propagate_tags = "CAPACITY_PROVIDER" + + instance_launch_template { + ec2_instance_profile_arn = aws_iam_instance_profile.test.arn + + network_configuration { + subnets = aws_subnet.test[*].id + } + + storage_configuration { + storage_size_gib = 50 + } + + instance_requirements { + vcpu_count { + min = 1 + } + + memory_mib { + min = 1024 + } + } + } + } +} +`, rName)) +} + +func testAccCapacityProviderConfig_updateManagedInstancesProvider(rName string) string { + return acctest.ConfigCompose(testAccCapacityProviderConfig_managedInstancesProvider_base(rName), fmt.Sprintf(` +resource "aws_ecs_capacity_provider" "test" { + name = %[1]q + cluster = aws_ecs_cluster.test.name + + managed_instances_provider { + infrastructure_role_arn = aws_iam_role.test.arn + propagate_tags = "NONE" + + instance_launch_template { + ec2_instance_profile_arn = aws_iam_instance_profile.test.arn + + network_configuration { + subnets = aws_subnet.test[*].id + } + + instance_requirements { + vcpu_count { + min = 1 + } + + memory_mib { + min = 1024 + } + } + } + } +} +`, rName)) +} diff --git a/internal/service/ecs/cluster.go b/internal/service/ecs/cluster.go index 7f3626e80a85..dcda35bee761 100644 --- a/internal/service/ecs/cluster.go +++ b/internal/service/ecs/cluster.go @@ -230,7 +230,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 2 * time.Second ) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, timeout, func() (any, error) { + cluster, err := tfresource.RetryWhenNewResourceNotFound(ctx, timeout, func(ctx context.Context) (*awstypes.Cluster, error) { return findClusterByNameOrARN(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -244,7 +244,6 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "reading ECS Cluster (%s): %s", d.Id(), err) } - cluster := outputRaw.(*awstypes.Cluster) d.Set(names.AttrARN, cluster.ClusterArn) if cluster.Configuration != nil { if err := d.Set(names.AttrConfiguration, flattenClusterConfiguration(cluster.Configuration)); err != nil { @@ -311,7 +310,7 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsOneOf4[*awstypes.ClusterContainsContainerInstancesException, *awstypes.ClusterContainsServicesException, *awstypes.ClusterContainsTasksException, *awstypes.UpdateInProgressException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsOneOf4[any, *awstypes.ClusterContainsContainerInstancesException, *awstypes.ClusterContainsServicesException, *awstypes.ClusterContainsTasksException, *awstypes.UpdateInProgressException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteCluster(ctx, &ecs.DeleteClusterInput{ Cluster: aws.String(d.Id()), }) @@ -344,7 +343,7 @@ func resourceClusterImport(ctx context.Context, d *schema.ResourceData, meta any } func retryClusterCreate(ctx context.Context, conn *ecs.Client, input *ecs.CreateClusterInput) (*ecs.CreateClusterOutput, error) { - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidParameterException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidParameterException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateCluster(ctx, input) }, "Unable to assume the service linked role") diff --git a/internal/service/ecs/cluster_capacity_providers.go b/internal/service/ecs/cluster_capacity_providers.go index a330ed0a5feb..c2fec8cac0b7 100644 --- a/internal/service/ecs/cluster_capacity_providers.go +++ b/internal/service/ecs/cluster_capacity_providers.go @@ -168,7 +168,7 @@ func retryClusterCapacityProvidersPut(ctx context.Context, conn *ecs.Client, inp timeout = 10 * time.Minute ) _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutClusterCapacityProviders(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/ecs/exports_test.go b/internal/service/ecs/exports_test.go index 57f09eff6ff8..6cfba8342be2 100644 --- a/internal/service/ecs/exports_test.go +++ b/internal/service/ecs/exports_test.go @@ -23,6 +23,7 @@ var ( FindTaskDefinitionByFamilyOrARN = findTaskDefinitionByFamilyOrARN FindTaskSetNoTagsByThreePartKey = findTaskSetNoTagsByThreePartKey RoleNameFromARN = roleNameFromARN + ServiceNameFromARN = serviceNameFromARN TaskDefinitionARNStripRevision = taskDefinitionARNStripRevision ValidTaskDefinitionContainerDefinitions = validTaskDefinitionContainerDefinitions ) diff --git a/internal/service/ecs/flex.go b/internal/service/ecs/flex.go index d53b331cca79..909cd37444df 100644 --- a/internal/service/ecs/flex.go +++ b/internal/service/ecs/flex.go @@ -56,54 +56,6 @@ func flattenCapacityProviderStrategyItems(apiObjects []awstypes.CapacityProvider return tfList } -func expandLoadBalancers(tfList []any) []awstypes.LoadBalancer { - apiObjects := make([]awstypes.LoadBalancer, 0, len(tfList)) - - for _, tfMapRaw := range tfList { - tfMap := tfMapRaw.(map[string]any) - - apiObject := awstypes.LoadBalancer{ - ContainerName: aws.String(tfMap["container_name"].(string)), - ContainerPort: aws.Int32(int32(tfMap["container_port"].(int))), - } - - if v, ok := tfMap["elb_name"]; ok && v.(string) != "" { - apiObject.LoadBalancerName = aws.String(v.(string)) - } - - if v, ok := tfMap["target_group_arn"]; ok && v.(string) != "" { - apiObject.TargetGroupArn = aws.String(v.(string)) - } - - apiObjects = append(apiObjects, apiObject) - } - - return apiObjects -} - -func flattenLoadBalancers(apiObjects []awstypes.LoadBalancer) []any { - tfList := make([]any, 0, len(apiObjects)) - - for _, apiObject := range apiObjects { - tfMap := map[string]any{ - "container_name": aws.ToString(apiObject.ContainerName), - "container_port": aws.ToInt32(apiObject.ContainerPort), - } - - if apiObject.LoadBalancerName != nil { - tfMap["elb_name"] = aws.ToString(apiObject.LoadBalancerName) - } - - if apiObject.TargetGroupArn != nil { - tfMap["target_group_arn"] = aws.ToString(apiObject.TargetGroupArn) - } - - tfList = append(tfList, tfMap) - } - - return tfList -} - func expandTaskSetLoadBalancers(tfList []any) []awstypes.LoadBalancer { if len(tfList) == 0 || tfList[0] == nil { return nil diff --git a/internal/service/ecs/service.go b/internal/service/ecs/service.go index 5ef3cd77d772..7af6037fac96 100644 --- a/internal/service/ecs/service.go +++ b/internal/service/ecs/service.go @@ -5,17 +5,22 @@ package ecs import ( "context" + "encoding/json" + "errors" "fmt" "log" "math" + "slices" "strconv" "strings" + "sync" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ecs" + "github.com/aws/aws-sdk-go-v2/service/ecs/document" awstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -30,6 +35,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" + "github.com/hashicorp/terraform-provider-aws/internal/smithy" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -117,7 +124,6 @@ func resourceService() *schema.Resource { Schema: map[string]*schema.Schema{ names.AttrType: { Type: schema.TypeString, - ForceNew: true, Optional: true, Default: awstypes.DeploymentControllerTypeEcs, }, @@ -561,7 +567,7 @@ func resourceService() *schema.Resource { "availability_zone_rebalancing": { Type: schema.TypeString, Optional: true, - Default: awstypes.AvailabilityZoneRebalancingDisabled, + Computed: true, ValidateDiagFunc: enum.Validate[awstypes.AvailabilityZoneRebalancing](), }, names.AttrCapacityProviderStrategy: { @@ -610,6 +616,60 @@ func resourceService() *schema.Resource { }, }, }, + "deployment_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bake_time_in_minutes": { + Type: nullable.TypeNullableInt, + Optional: true, + Computed: true, + ValidateFunc: nullable.ValidateTypeStringNullableIntBetween(0, 1440), + }, + "lifecycle_hook": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hook_target_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "lifecycle_stages": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.DeploymentLifecycleHookStage](), + }, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "hook_details": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + ValidateFunc: verify.ValidStringIsJSONOrYAML, + }, + }, + }, + }, + "strategy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.DeploymentStrategy](), + }, + }, + }, + }, "deployment_controller": { Type: schema.TypeList, Optional: true, @@ -619,7 +679,6 @@ func resourceService() *schema.Resource { Schema: map[string]*schema.Schema{ names.AttrType: { Type: schema.TypeString, - ForceNew: true, Optional: true, Default: awstypes.DeploymentControllerTypeEcs, ValidateDiagFunc: enum.Validate[awstypes.DeploymentControllerType](), @@ -715,6 +774,35 @@ func resourceService() *schema.Resource { Optional: true, ValidateFunc: verify.ValidARN, }, + "advanced_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alternate_target_group_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "production_listener_rule": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "test_listener_rule": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, }, }, }, @@ -886,6 +974,40 @@ func resourceService() *schema.Resource { Required: true, ValidateFunc: validation.IntBetween(0, 65535), }, + "test_traffic_rules": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrHeader: { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exact": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -988,6 +1110,10 @@ func resourceService() *schema.Resource { }, }, }, + "sigint_rollback": { + Type: schema.TypeBool, + Optional: true, + }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), "task_definition": { @@ -1214,6 +1340,30 @@ func resourceServiceCreate(ctx context.Context, d *schema.ResourceData, meta any input.DeploymentConfiguration.DeploymentCircuitBreaker = expandDeploymentCircuitBreaker(v.([]any)[0].(map[string]any)) } + if v, ok := d.GetOk("deployment_configuration"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + config := v.([]any)[0].(map[string]any) + + if strategy, ok := config["strategy"].(string); ok && strategy != "" { + input.DeploymentConfiguration.Strategy = awstypes.DeploymentStrategy(strategy) + + if awstypes.DeploymentStrategy(strategy) == awstypes.DeploymentStrategyBlueGreen { + if v, ok := config["bake_time_in_minutes"].(string); ok { + bakeTime := nullable.Int(v) + if !bakeTime.IsNull() { + value, _, err := bakeTime.ValueInt32() + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + input.DeploymentConfiguration.BakeTimeInMinutes = aws.Int32(value) + } + } + } + if hooks := config["lifecycle_hook"].(*schema.Set).List(); len(hooks) > 0 { + input.DeploymentConfiguration.LifecycleHooks = expandLifecycleHooks(hooks) + } + } + } + if v, ok := d.GetOk("health_check_grace_period_seconds"); ok { input.HealthCheckGracePeriodSeconds = aws.Int32(int32(v.(int))) } @@ -1233,7 +1383,7 @@ func resourceServiceCreate(ctx context.Context, d *schema.ResourceData, meta any } } - if v := expandLoadBalancers(d.Get("load_balancer").(*schema.Set).List()); len(v) > 0 { + if v := expandServiceLoadBalancers(d.Get("load_balancer").(*schema.Set).List()); len(v) > 0 { input.LoadBalancers = v } @@ -1279,6 +1429,7 @@ func resourceServiceCreate(ctx context.Context, d *schema.ResourceData, meta any input.VolumeConfigurations = expandServiceVolumeConfigurations(ctx, v.([]any)) } + operationTime := time.Now().UTC() output, err := retryServiceCreate(ctx, conn, &input) // Some partitions (e.g. ISO) may not support tag-on-create. @@ -1295,11 +1446,11 @@ func resourceServiceCreate(ctx context.Context, d *schema.ResourceData, meta any d.SetId(aws.ToString(output.Service.ServiceArn)) d.Set(names.AttrARN, output.Service.ServiceArn) - fn := waitServiceActive if d.Get("wait_for_steady_state").(bool) { - fn = waitServiceStable - } - if _, err := fn(ctx, conn, d.Id(), d.Get("cluster").(string), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitServiceStable(ctx, conn, d.Id(), d.Get("cluster").(string), operationTime, d.Get("sigint_rollback").(bool), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ECS Service (%s) create: %s", d.Id(), err) + } + } else if _, err := waitServiceActive(ctx, conn, d.Id(), d.Get("cluster").(string), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ECS Service (%s) create: %s", d.Id(), err) } @@ -1337,6 +1488,7 @@ func resourceServiceRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "reading ECS Service (%s): %s", d.Id(), err) } + d.Set(names.AttrARN, service.ServiceArn) d.Set("availability_zone_rebalancing", service.AvailabilityZoneRebalancing) if err := d.Set(names.AttrCapacityProviderStrategy, flattenCapacityProviderStrategyItems(service.CapacityProviderStrategy)); err != nil { return sdkdiag.AppendErrorf(diags, "setting capacity_provider_strategy: %s", err) @@ -1366,6 +1518,10 @@ func resourceServiceRead(ctx context.Context, d *schema.ResourceData, meta any) } else { d.Set("deployment_circuit_breaker", nil) } + + if err := d.Set("deployment_configuration", flattenDeploymentConfiguration(service.DeploymentConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting deployment_configuration: %s", err) + } } if err := d.Set("deployment_controller", flattenDeploymentController(service.DeploymentController)); err != nil { return sdkdiag.AppendErrorf(diags, "setting deployment_controller: %s", err) @@ -1384,7 +1540,7 @@ func resourceServiceRead(ctx context.Context, d *schema.ResourceData, meta any) } d.Set("launch_type", service.LaunchType) if service.LoadBalancers != nil { - if err := d.Set("load_balancer", flattenLoadBalancers(service.LoadBalancers)); err != nil { + if err := d.Set("load_balancer", flattenServiceLoadBalancers(service.LoadBalancers)); err != nil { return sdkdiag.AppendErrorf(diags, "setting load_balancer: %s", err) } } @@ -1422,6 +1578,8 @@ func resourceServiceRead(ctx context.Context, d *schema.ResourceData, meta any) if err := d.Set("service_connect_configuration", flattenServiceConnectConfiguration(v)); err != nil { return sdkdiag.AppendErrorf(diags, "setting service_connect_configuration: %s", err) } + } else { + d.Set("service_connect_configuration", nil) } if v := deployment.VolumeConfigurations; len(v) > 0 { if err := d.Set("volume_configuration", flattenServiceVolumeConfigurations(ctx, v)); err != nil { @@ -1467,6 +1625,46 @@ func resourceServiceUpdate(ctx context.Context, d *schema.ResourceData, meta any } } + if d.HasChange("deployment_controller") { + if input.DeploymentController == nil { + input.DeploymentController = &awstypes.DeploymentController{} + } + + if v, ok := d.GetOk("deployment_controller"); ok { + input.DeploymentController = expandDeploymentController(v.([]any)) + } + } + + if d.HasChange("deployment_configuration") { + if input.DeploymentConfiguration == nil { + input.DeploymentConfiguration = &awstypes.DeploymentConfiguration{} + } + + if v, ok := d.GetOk("deployment_configuration"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + config := v.([]any)[0].(map[string]any) + + if strategy, ok := config["strategy"].(string); ok && strategy != "" { + input.DeploymentConfiguration.Strategy = awstypes.DeploymentStrategy(strategy) + + if awstypes.DeploymentStrategy(strategy) == awstypes.DeploymentStrategyBlueGreen { + if v, ok := config["bake_time_in_minutes"].(string); ok { + bakeTime := nullable.Int(v) + if !bakeTime.IsNull() { + value, _, err := bakeTime.ValueInt32() + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + input.DeploymentConfiguration.BakeTimeInMinutes = aws.Int32(value) + } + } + } + } + if hooks := config["lifecycle_hook"].(*schema.Set).List(); len(hooks) > 0 { + input.DeploymentConfiguration.LifecycleHooks = expandLifecycleHooks(hooks) + } + } + } + if d.HasChange(names.AttrCapacityProviderStrategy) { input.CapacityProviderStrategy = expandCapacityProviderStrategyItems(d.Get(names.AttrCapacityProviderStrategy).(*schema.Set)) } @@ -1522,7 +1720,7 @@ func resourceServiceUpdate(ctx context.Context, d *schema.ResourceData, meta any if d.HasChange("load_balancer") { if v, ok := d.Get("load_balancer").(*schema.Set); ok && v != nil { - input.LoadBalancers = expandLoadBalancers(v.List()) + input.LoadBalancers = expandServiceLoadBalancers(v.List()) } } @@ -1593,8 +1791,9 @@ func resourceServiceUpdate(ctx context.Context, d *schema.ResourceData, meta any serviceUpdateTimeout = 2 * time.Minute timeout = propagationTimeout + serviceUpdateTimeout ) + operationTime := time.Now().UTC() _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateService(ctx, &input) }, func(err error) (bool, error) { @@ -1609,16 +1808,15 @@ func resourceServiceUpdate(ctx context.Context, d *schema.ResourceData, meta any return false, err }, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "updating ECS Service (%s): %s", d.Id(), err) } - fn := waitServiceActive if d.Get("wait_for_steady_state").(bool) { - fn = waitServiceStable - } - if _, err := fn(ctx, conn, d.Id(), cluster, d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitServiceStable(ctx, conn, d.Id(), cluster, operationTime, d.Get("sigint_rollback").(bool), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ECS Service (%s) update: %s", d.Id(), err) + } + } else if _, err := waitServiceActive(ctx, conn, d.Id(), cluster, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ECS Service (%s) update: %s", d.Id(), err) } } @@ -1657,7 +1855,6 @@ func resourceServiceDelete(ctx context.Context, d *schema.ResourceData, meta any } _, err := conn.UpdateService(ctx, input) - if err != nil { return sdkdiag.AppendErrorf(diags, "draining ECS Service (%s): %s", d.Id(), err) } @@ -1665,7 +1862,7 @@ func resourceServiceDelete(ctx context.Context, d *schema.ResourceData, meta any log.Printf("[DEBUG] Deleting ECS Service: %s", d.Id()) _, err = tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteService(ctx, &ecs.DeleteServiceInput{ Cluster: aws.String(cluster), Force: aws.Bool(forceDelete), @@ -1684,7 +1881,6 @@ func resourceServiceDelete(ctx context.Context, d *schema.ResourceData, meta any return false, err }, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ECS Service (%s): %s", d.Id(), err) } @@ -1735,7 +1931,7 @@ func retryServiceCreate(ctx context.Context, conn *ecs.Client, input *ecs.Create timeout = propagationTimeout + serviceCreateTimeout ) outputRaw, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateService(ctx, input) }, func(err error) (bool, error) { @@ -1758,7 +1954,6 @@ func retryServiceCreate(ctx context.Context, conn *ecs.Client, input *ecs.Create return false, err }, ) - if err != nil { return nil, err } @@ -1768,7 +1963,6 @@ func retryServiceCreate(ctx context.Context, conn *ecs.Client, input *ecs.Create func findService(ctx context.Context, conn *ecs.Client, input *ecs.DescribeServicesInput) (*awstypes.Service, error) { output, err := findServices(ctx, conn, input) - if err != nil { return nil, err } @@ -1858,34 +2052,30 @@ func (e *expectServiceActiveError) Error() string { func findServiceByTwoPartKeyWaitForActive(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string) (*awstypes.Service, error) { var service *awstypes.Service - // Use the retry.RetryContext function instead of WaitForState() because we don't want the timeout error, if any. + // Use the tfresource.Retry function instead of WaitForState() because we don't want the timeout error, if any. const ( timeout = 2 * time.Minute ) - err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error service, err = findServiceByTwoPartKey(ctx, conn, serviceName, clusterNameOrARN) if tfresource.NotFound(err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if status := aws.ToString(service.Status); status != serviceStatusActive { - return retry.RetryableError(newExpectServiceActiveError(status)) + return tfresource.RetryableError(newExpectServiceActiveError(status)) } return nil }) - if tfresource.TimedOut(err) { - service, err = findServiceByTwoPartKey(ctx, conn, serviceName, clusterNameOrARN) - } - if errs.IsA[*expectServiceActiveError](err) { return nil, &retry.NotFoundError{ LastError: err, @@ -1905,6 +2095,13 @@ const ( serviceStatusStable = "tfSTABLE" ) +var deploymentTerminalStates = enum.Slice( + awstypes.ServiceDeploymentStatusSuccessful, + awstypes.ServiceDeploymentStatusStopped, + awstypes.ServiceDeploymentStatusRollbackFailed, + awstypes.ServiceDeploymentStatusRollbackSuccessful, +) + func statusService(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string) retry.StateRefreshFunc { return func() (any, string, error) { output, err := findServiceNoTagsByTwoPartKey(ctx, conn, serviceName, clusterNameOrARN) @@ -1921,42 +2118,244 @@ func statusService(ctx context.Context, conn *ecs.Client, serviceName, clusterNa } } -func statusServiceWaitForStable(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string) retry.StateRefreshFunc { - return func() (any, string, error) { - outputRaw, status, err := statusService(ctx, conn, serviceName, clusterNameOrARN)() +func statusServiceWaitForStable(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string, sigintConfig *rollbackState, operationTime time.Time) retry.StateRefreshFunc { + var primaryTaskSet *awstypes.Deployment + var primaryDeploymentArn *string + var isNewPrimaryDeployment bool + return func() (any, string, error) { + outputRaw, serviceStatus, err := statusService(ctx, conn, serviceName, clusterNameOrARN)() if err != nil { return nil, "", err } - if status != serviceStatusActive { - return outputRaw, status, nil + if serviceStatus != serviceStatusActive { + return outputRaw, serviceStatus, nil } output := outputRaw.(*awstypes.Service) + if primaryTaskSet == nil { + primaryTaskSet = findPrimaryTaskSet(output.Deployments) + + if primaryTaskSet != nil && primaryTaskSet.CreatedAt != nil { + createdAtUTC := primaryTaskSet.CreatedAt.UTC() + isNewPrimaryDeployment = createdAtUTC.After(operationTime) + } + } + + isNewECSDeployment := output.DeploymentController != nil && + output.DeploymentController.Type == awstypes.DeploymentControllerTypeEcs && + isNewPrimaryDeployment + + // For new deployments with ECS deployment controller, check the deployment status + if isNewECSDeployment { + if primaryDeploymentArn == nil { + serviceArn := aws.ToString(output.ServiceArn) + + var err error + primaryDeploymentArn, err = findPrimaryDeploymentARN(ctx, conn, primaryTaskSet, serviceArn, clusterNameOrARN, operationTime) + if err != nil { + return nil, "", err + } + if primaryDeploymentArn == nil { + return output, serviceStatusPending, nil + } + } + + if sigintConfig.rollbackConfigured && !sigintConfig.rollbackRoutineStarted { + sigintConfig.waitGroup.Add(1) + go rollbackRoutine(ctx, conn, sigintConfig, primaryDeploymentArn) + sigintConfig.rollbackRoutineStarted = true + } + + deploymentStatus, err := findDeploymentStatus(ctx, conn, *primaryDeploymentArn) + if err != nil { + return nil, "", err + } + return output, deploymentStatus, nil + } + + // For other deployment controllers or in-place updates, check based on desired count if n, dc, rc := len(output.Deployments), output.DesiredCount, output.RunningCount; n == 1 && dc == rc { - status = serviceStatusStable + serviceStatus = serviceStatusStable } else { - status = serviceStatusPending + serviceStatus = serviceStatusPending } - return output, status, nil + return output, serviceStatus, nil } } +func findPrimaryTaskSet(deployments []awstypes.Deployment) *awstypes.Deployment { + for _, deployment := range deployments { + if aws.ToString(deployment.Status) == taskSetStatusPrimary { + return &deployment + } + } + return nil +} + +func findPrimaryDeploymentARN(ctx context.Context, conn *ecs.Client, primaryTaskSet *awstypes.Deployment, serviceNameOrARN, clusterNameOrARN string, operationTime time.Time) (*string, error) { + parts := strings.Split(aws.ToString(primaryTaskSet.Id), "/") + if len(parts) < 2 { + return nil, fmt.Errorf("invalid primary task set ID format: %s", aws.ToString(primaryTaskSet.Id)) + } + taskSetID := parts[1] + + input := &ecs.ListServiceDeploymentsInput{ + Cluster: aws.String(clusterNameOrARN), + Service: aws.String(serviceNameFromARN(serviceNameOrARN)), + CreatedAt: &awstypes.CreatedAt{ + After: &operationTime, + }, + } + + output, err := conn.ListServiceDeployments(ctx, input) + if err != nil { + return nil, err + } + + // Find deployment matching task set + for _, deployment := range output.ServiceDeployments { + if strings.Contains(aws.ToString(deployment.TargetServiceRevisionArn), taskSetID) { + return deployment.ServiceDeploymentArn, nil + } + } + + return nil, nil +} + +func findDeploymentStatus(ctx context.Context, conn *ecs.Client, deploymentArn string) (string, error) { + input := ecs.DescribeServiceDeploymentsInput{ + ServiceDeploymentArns: []string{deploymentArn}, + } + + output, err := conn.DescribeServiceDeployments(ctx, &input) + if err != nil { + return "", err + } + + if len(output.ServiceDeployments) == 0 { + return serviceStatusPending, nil + } + + deployment := output.ServiceDeployments[0] + + switch deployment.Status { + case awstypes.ServiceDeploymentStatusSuccessful: + return serviceStatusStable, nil + case awstypes.ServiceDeploymentStatusInProgress: + return serviceStatusPending, nil + case awstypes.ServiceDeploymentStatusStopped, + awstypes.ServiceDeploymentStatusRollbackFailed, + awstypes.ServiceDeploymentStatusRollbackSuccessful: + message := "Deployment failed" + if deployment.StatusReason != nil { + message = aws.ToString(deployment.StatusReason) + } + return "", errors.New(message) + default: + return serviceStatusPending, nil + } +} + +type rollbackState struct { + rollbackConfigured bool + rollbackRoutineStarted bool + rollbackRoutineStopped chan struct{} + waitGroup sync.WaitGroup +} + +func rollbackRoutine(ctx context.Context, conn *ecs.Client, rollbackState *rollbackState, primaryDeploymentArn *string) { + defer rollbackState.waitGroup.Done() + + select { + case <-ctx.Done(): + log.Printf("[INFO] SIGINT detected. Initiating rollback for deployment: %s", *primaryDeploymentArn) + ctx, cancel := context.WithTimeout(context.Background(), (1 * time.Hour)) // Maximum time before SIGKILL + defer cancel() + + if err := rollbackDeployment(ctx, conn, primaryDeploymentArn); err != nil { //nolint:contextcheck // Original Context has been cancelled + log.Printf("[ERROR] Failed to rollback deployment: %s. Err: %s", *primaryDeploymentArn, err) + } else { + log.Printf("[INFO] Deployment: %s rolled back successfully.", *primaryDeploymentArn) + } + + case <-rollbackState.rollbackRoutineStopped: + return + } +} + +func rollbackDeployment(ctx context.Context, conn *ecs.Client, primaryDeploymentArn *string) error { + // Check if deployment is already in terminal state, meaning rollback is not needed + deploymentStatus, err := findDeploymentStatus(ctx, conn, *primaryDeploymentArn) + if err != nil { + return err + } + if slices.Contains(deploymentTerminalStates, deploymentStatus) { + return nil + } + + log.Printf("[INFO] Rolling back deployment %s. This may take a few minutes...", *primaryDeploymentArn) + + input := &ecs.StopServiceDeploymentInput{ + ServiceDeploymentArn: primaryDeploymentArn, + StopType: awstypes.StopServiceDeploymentStopTypeRollback, + } + + _, err = conn.StopServiceDeployment(ctx, input) + if err != nil { + return err + } + + return waitForDeploymentTerminalStatus(ctx, conn, *primaryDeploymentArn) +} + +func waitForDeploymentTerminalStatus(ctx context.Context, conn *ecs.Client, primaryDeploymentArn string) error { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.ServiceDeploymentStatusPending, + awstypes.ServiceDeploymentStatusInProgress, + awstypes.ServiceDeploymentStatusRollbackRequested, + awstypes.ServiceDeploymentStatusRollbackInProgress, + ), + Target: deploymentTerminalStates, + Refresh: func() (any, string, error) { + status, err := findDeploymentStatus(ctx, conn, primaryDeploymentArn) + return nil, status, err + }, + Timeout: 1 * time.Hour, // Maximum time before SIGKILL + } + + _, err := stateConf.WaitForStateContext(ctx) + return err +} + // waitServiceStable waits for an ECS Service to reach the status "ACTIVE" and have all desired tasks running. // Does not return tags. -func waitServiceStable(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string, timeout time.Duration) (*awstypes.Service, error) { +func waitServiceStable(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string, operationTime time.Time, sigintCancellation bool, timeout time.Duration) (*awstypes.Service, error) { //nolint:unparam + sigintConfig := &rollbackState{ + rollbackConfigured: sigintCancellation, + rollbackRoutineStarted: false, + rollbackRoutineStopped: make(chan struct{}), + waitGroup: sync.WaitGroup{}, + } + stateConf := &retry.StateChangeConf{ Pending: []string{serviceStatusInactive, serviceStatusDraining, serviceStatusPending}, Target: []string{serviceStatusStable}, - Refresh: statusServiceWaitForStable(ctx, conn, serviceName, clusterNameOrARN), + Refresh: statusServiceWaitForStable(ctx, conn, serviceName, clusterNameOrARN, sigintConfig, operationTime), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) + if sigintConfig.rollbackRoutineStarted { + close(sigintConfig.rollbackRoutineStopped) + sigintConfig.waitGroup.Wait() + } + if output, ok := outputRaw.(*awstypes.Service); ok { return output, err } @@ -1965,7 +2364,7 @@ func waitServiceStable(ctx context.Context, conn *ecs.Client, serviceName, clust } // Does not return tags. -func waitServiceActive(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string, timeout time.Duration) (*awstypes.Service, error) { +func waitServiceActive(ctx context.Context, conn *ecs.Client, serviceName, clusterNameOrARN string, timeout time.Duration) (*awstypes.Service, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{serviceStatusInactive, serviceStatusDraining}, Target: []string{serviceStatusActive}, @@ -2025,31 +2424,11 @@ func triggersCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta any) } func capacityProviderStrategyCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta any) error { - // to be backward compatible, should ForceNew almost always (previous behavior), unless: - // force_new_deployment is true and - // neither the old set nor new set is 0 length - if v := d.Get("force_new_deployment").(bool); !v { - return capacityProviderStrategyForceNew(d) - } - - old, new := d.GetChange(names.AttrCapacityProviderStrategy) - - ol := old.(*schema.Set).Len() - nl := new.(*schema.Set).Len() - - if (ol == 0 && nl > 0) || (ol > 0 && nl == 0) { - return capacityProviderStrategyForceNew(d) - } - - return nil -} - -func capacityProviderStrategyForceNew(d *schema.ResourceDiff) error { - for _, key := range d.GetChangedKeysPrefix(names.AttrCapacityProviderStrategy) { - if d.HasChange(key) { - if err := d.ForceNew(key); err != nil { - return fmt.Errorf("while attempting to force a new ECS service for capacity_provider_strategy: %w", err) - } + // This if-statement is true only when the resource is being updated. + // d.Id() != "" means the resource (ecs service) already exists. + if d.Id() != "" && d.HasChange(names.AttrCapacityProviderStrategy) { + if v := d.Get("force_new_deployment").(bool); !v { + return fmt.Errorf("force_new_deployment should be true when capacity_provider_strategy is being updated") } } return nil @@ -2149,6 +2528,113 @@ func flattenDeploymentCircuitBreaker(apiObject *awstypes.DeploymentCircuitBreake return tfMap } +func flattenDeploymentConfiguration(apiObject *awstypes.DeploymentConfiguration) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.BakeTimeInMinutes; v != nil { + tfMap["bake_time_in_minutes"] = flex.Int32ToStringValue(v) + } + + if v := apiObject.LifecycleHooks; len(v) > 0 { + tfMap["lifecycle_hook"] = flattenLifecycleHooks(v) + } + + if v := apiObject.Strategy; v != "" { + tfMap["strategy"] = v + } + + if len(tfMap) == 0 { + return nil + } + + return []any{tfMap} +} + +func flattenLifecycleHooks(apiObjects []awstypes.DeploymentLifecycleHook) []any { + if len(apiObjects) == 0 { + return nil + } + + tfList := make([]any, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]any{} + + if v := apiObject.HookTargetArn; v != nil { + tfMap["hook_target_arn"] = aws.ToString(v) + } + + if v := apiObject.RoleArn; v != nil { + tfMap[names.AttrRoleARN] = aws.ToString(v) + } + + if v := apiObject.LifecycleStages; len(v) > 0 { + stages := make([]string, 0, len(v)) + for _, stage := range v { + stages = append(stages, string(stage)) + } + tfMap["lifecycle_stages"] = stages + } + + if v := apiObject.HookDetails; v != nil { + if jsonString, err := smithy.DocumentToJSONString(v); err == nil { + tfMap["hook_details"] = jsonString + } + } + + tfList = append(tfList, tfMap) + } + + return tfList +} + +func expandLifecycleHooks(tfList []any) []awstypes.DeploymentLifecycleHook { + apiObject := make([]awstypes.DeploymentLifecycleHook, 0, len(tfList)) + + for _, tfMapRaw := range tfList { + if tfMapRaw == nil { + continue + } + + tfMap := tfMapRaw.(map[string]any) + + hook := awstypes.DeploymentLifecycleHook{} + + if v, ok := tfMap["hook_target_arn"].(string); ok && v != "" { + hook.HookTargetArn = aws.String(v) + } + + if v, ok := tfMap[names.AttrRoleARN].(string); ok && v != "" { + hook.RoleArn = aws.String(v) + } + + if v, ok := tfMap["lifecycle_stages"].([]any); ok && len(v) > 0 { + stages := make([]awstypes.DeploymentLifecycleHookStage, 0, len(v)) + for _, stage := range v { + if stageStr, ok := stage.(string); ok && stageStr != "" { + stages = append(stages, awstypes.DeploymentLifecycleHookStage(stageStr)) + } + } + hook.LifecycleStages = stages + } + + if v, ok := tfMap["hook_details"].(string); ok && v != "" { + var jsonValue any + if err := json.Unmarshal([]byte(v), &jsonValue); err == nil { + hook.HookDetails = document.NewLazyDocument(jsonValue) + } + } + + apiObject = append(apiObject, hook) + } + + return apiObject +} + func flattenNetworkConfiguration(nc *awstypes.NetworkConfiguration) []any { if nc == nil { return nil @@ -2471,6 +2957,79 @@ func flattenSecrets(apiObjects []awstypes.Secret) []any { return tfList } +func expandServiceLoadBalancers(tfList []any) []awstypes.LoadBalancer { + apiObjects := make([]awstypes.LoadBalancer, 0, len(tfList)) + + for _, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]any) + + apiObject := awstypes.LoadBalancer{ + ContainerName: aws.String(tfMap["container_name"].(string)), + ContainerPort: aws.Int32(int32(tfMap["container_port"].(int))), + } + + if v, ok := tfMap["elb_name"]; ok && v.(string) != "" { + apiObject.LoadBalancerName = aws.String(v.(string)) + } + + if v, ok := tfMap["target_group_arn"]; ok && v.(string) != "" { + apiObject.TargetGroupArn = aws.String(v.(string)) + } + + if advConfig, ok := tfMap["advanced_configuration"].([]any); ok && len(advConfig) > 0 && advConfig[0] != nil { + config := advConfig[0].(map[string]any) + apiObject.AdvancedConfiguration = &awstypes.AdvancedConfiguration{ + AlternateTargetGroupArn: aws.String(config["alternate_target_group_arn"].(string)), + ProductionListenerRule: aws.String(config["production_listener_rule"].(string)), + RoleArn: aws.String(config[names.AttrRoleARN].(string)), + } + if v, ok := config["test_listener_rule"].(string); ok && v != "" { + apiObject.AdvancedConfiguration.TestListenerRule = aws.String(v) + } + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} + +func flattenServiceLoadBalancers(apiObjects []awstypes.LoadBalancer) []any { + tfList := make([]any, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]any{ + "container_name": aws.ToString(apiObject.ContainerName), + "container_port": aws.ToInt32(apiObject.ContainerPort), + } + + if apiObject.LoadBalancerName != nil { + tfMap["elb_name"] = aws.ToString(apiObject.LoadBalancerName) + } + + if apiObject.TargetGroupArn != nil { + tfMap["target_group_arn"] = aws.ToString(apiObject.TargetGroupArn) + } + + if apiObject.AdvancedConfiguration != nil { + tfMap["advanced_configuration"] = []any{ + map[string]any{ + "alternate_target_group_arn": aws.ToString(apiObject.AdvancedConfiguration.AlternateTargetGroupArn), + "production_listener_rule": aws.ToString(apiObject.AdvancedConfiguration.ProductionListenerRule), + names.AttrRoleARN: aws.ToString(apiObject.AdvancedConfiguration.RoleArn), + }, + } + if apiObject.AdvancedConfiguration.TestListenerRule != nil { + tfMap["advanced_configuration"].([]any)[0].(map[string]any)["test_listener_rule"] = aws.ToString(apiObject.AdvancedConfiguration.TestListenerRule) + } + } + + tfList = append(tfList, tfMap) + } + + return tfList +} + func expandServiceVolumeConfigurations(ctx context.Context, tfList []any) []awstypes.ServiceVolumeConfiguration { if len(tfList) == 0 { return nil @@ -2729,6 +3288,35 @@ func flattenServiceConnectServices(apiObjects []awstypes.ServiceConnectService) return tfList } +func expandServiceConnectTestTrafficRules(tfList []any) *awstypes.ServiceConnectTestTrafficRules { + if len(tfList) == 0 { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.ServiceConnectTestTrafficRules{} + + if v, ok := tfMap[names.AttrHeader].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.Header = expandServiceConnectHeader(v) + } + + return apiObject +} + +func flattenServiceConnectTestTrafficRules(apiObject *awstypes.ServiceConnectTestTrafficRules) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if apiObject.Header != nil { + tfMap[names.AttrHeader] = flattenServiceConnectHeader(apiObject.Header) + } + + return []any{tfMap} +} + func expandTimeout(timeout []any) *awstypes.TimeoutConfiguration { if len(timeout) == 0 { return nil @@ -2845,6 +3433,9 @@ func expandClientAliases(srv []any) []awstypes.ServiceConnectClientAlias { if v, ok := raw[names.AttrDNSName].(string); ok && v != "" { config.DnsName = aws.String(v) } + if v, ok := raw["test_traffic_rules"].([]any); ok && len(v) > 0 && v[0] != nil { + config.TestTrafficRules = expandServiceConnectTestTrafficRules(v) + } out = append(out, config) } @@ -2864,6 +3455,9 @@ func flattenServiceConnectClientAliases(apiObjects []awstypes.ServiceConnectClie if v := apiObject.Port; v != nil { tfMap[names.AttrPort] = aws.ToInt32(v) } + if v := apiObject.TestTrafficRules; v != nil { + tfMap["test_traffic_rules"] = flattenServiceConnectTestTrafficRules(v) + } tfList = append(tfList, tfMap) } @@ -2871,6 +3465,70 @@ func flattenServiceConnectClientAliases(apiObjects []awstypes.ServiceConnectClie return tfList } +func expandServiceConnectHeader(tfList []any) *awstypes.ServiceConnectTestTrafficHeaderRules { + if len(tfList) == 0 { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.ServiceConnectTestTrafficHeaderRules{} + + if v, ok := tfMap[names.AttrName].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + if v, ok := tfMap[names.AttrValue].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.Value = expandServiceConnectHeaderValue(v) + } + + return apiObject +} + +func flattenServiceConnectHeader(apiObject *awstypes.ServiceConnectTestTrafficHeaderRules) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.Name; v != nil { + tfMap[names.AttrName] = aws.ToString(v) + } + if v := apiObject.Value; v != nil { + tfMap[names.AttrValue] = flattenServiceConnectHeaderValue(v) + } + + return []any{tfMap} +} + +func expandServiceConnectHeaderValue(tfList []any) *awstypes.ServiceConnectTestTrafficHeaderMatchRules { + if len(tfList) == 0 { + return nil + } + + tfMap := tfList[0].(map[string]any) + apiObject := &awstypes.ServiceConnectTestTrafficHeaderMatchRules{} + + if v, ok := tfMap["exact"].(string); ok && v != "" { + apiObject.Exact = aws.String(v) + } + + return apiObject +} + +func flattenServiceConnectHeaderValue(apiObject *awstypes.ServiceConnectTestTrafficHeaderMatchRules) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if apiObject.Exact != nil { + tfMap["exact"] = aws.ToString(apiObject.Exact) + } + + return []any{tfMap} +} + func flattenServiceRegistries(apiObjects []awstypes.ServiceRegistry) []any { if len(apiObjects) == 0 { return nil @@ -2933,3 +3591,22 @@ func clusterNameFromARN(arn string) string { } return "" } + +func serviceNameFromARN(s string) string { + a, err := arn.Parse(s) + if err != nil { + return "" + } + + resParts := strings.Split(a.Resource, "/") + switch len(resParts) { + case 3: + // long arn format arn:aws:ecs:us-west-2:123456789:service/cluster_name/service_name + return resParts[2] + case 2: + // short arn format arn:aws:ecs:us-west-2:123456789:service/service_name + return resParts[1] + default: + return "" + } +} diff --git a/internal/service/ecs/service_data_source.go b/internal/service/ecs/service_data_source.go index f57a98b1a414..a996df9271ed 100644 --- a/internal/service/ecs/service_data_source.go +++ b/internal/service/ecs/service_data_source.go @@ -43,6 +43,54 @@ func dataSourceService() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "load_balancer": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_name": { + Type: schema.TypeString, + Computed: true, + }, + "container_port": { + Type: schema.TypeInt, + Computed: true, + }, + "elb_name": { + Type: schema.TypeString, + Computed: true, + }, + "target_group_arn": { + Type: schema.TypeString, + Computed: true, + }, + "advanced_configuration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alternate_target_group_arn": { + Type: schema.TypeString, + Computed: true, + }, + "production_listener_rule": { + Type: schema.TypeString, + Computed: true, + }, + "test_listener_rule": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, "scheduling_strategy": { Type: schema.TypeString, Computed: true, @@ -77,6 +125,11 @@ func dataSourceServiceRead(ctx context.Context, d *schema.ResourceData, meta any d.Set("cluster_arn", service.ClusterArn) d.Set("desired_count", service.DesiredCount) d.Set("launch_type", service.LaunchType) + if service.LoadBalancers != nil { + if err := d.Set("load_balancer", flattenServiceLoadBalancers(service.LoadBalancers)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting load_balancer: %s", err) + } + } d.Set("scheduling_strategy", service.SchedulingStrategy) d.Set(names.AttrServiceName, service.ServiceName) d.Set("task_definition", service.TaskDefinition) diff --git a/internal/service/ecs/service_data_source_test.go b/internal/service/ecs/service_data_source_test.go index 7c25c05ec2af..a3e4c009ed58 100644 --- a/internal/service/ecs/service_data_source_test.go +++ b/internal/service/ecs/service_data_source_test.go @@ -42,6 +42,35 @@ func TestAccECSServiceDataSource_basic(t *testing.T) { }) } +func TestAccECSServiceDataSource_loadBalancer(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_ecs_service.test" + resourceName := "aws_ecs_service.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccServiceDataSourceConfig_loadBalancer(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, dataSourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "availability_zone_rebalancing", dataSourceName, "availability_zone_rebalancing"), + resource.TestCheckResourceAttrPair(resourceName, "desired_count", dataSourceName, "desired_count"), + resource.TestCheckResourceAttrPair(resourceName, "launch_type", dataSourceName, "launch_type"), + resource.TestCheckResourceAttrPair(resourceName, "load_balancer", dataSourceName, "load_balancer"), + resource.TestCheckResourceAttrPair(resourceName, "scheduling_strategy", dataSourceName, "scheduling_strategy"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrServiceName), + resource.TestCheckResourceAttrPair(resourceName, "task_definition", dataSourceName, "task_definition"), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsPercent, dataSourceName, acctest.CtTagsPercent), + ), + }, + }, + }) +} + func testAccServiceDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_ecs_cluster" "test" { @@ -81,3 +110,14 @@ data "aws_ecs_service" "test" { } `, rName) } + +func testAccServiceDataSourceConfig_loadBalancer(rName string) string { + return acctest.ConfigCompose( + testAccServiceConfig_blueGreenDeployment_basic(rName, false), + ` +data "aws_ecs_service" "test" { + service_name = aws_ecs_service.test.name + cluster_arn = aws_ecs_cluster.main.arn +} +`) +} diff --git a/internal/service/ecs/service_endpoint_resolver_gen.go b/internal/service/ecs/service_endpoint_resolver_gen.go index f880ad4df381..2830773ef1df 100644 --- a/internal/service/ecs/service_endpoint_resolver_gen.go +++ b/internal/service/ecs/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ecs.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ecs endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ecs endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ecs/service_endpoints_gen_test.go b/internal/service/ecs/service_endpoints_gen_test.go index 376b9d531759..e44938277abc 100644 --- a/internal/service/ecs/service_endpoints_gen_test.go +++ b/internal/service/ecs/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ecs/service_package_gen.go b/internal/service/ecs/service_package_gen.go index 0e5e6c6adc0b..9de057aa28a7 100644 --- a/internal/service/ecs/service_package_gen.go +++ b/internal/service/ecs/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ecs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -168,7 +167,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ecs.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ecs/service_test.go b/internal/service/ecs/service_test.go index 6b290c0c7d1c..6da4125b1eea 100644 --- a/internal/service/ecs/service_test.go +++ b/internal/service/ecs/service_test.go @@ -7,13 +7,15 @@ import ( "context" "fmt" "math" + "os/exec" + "strconv" "testing" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecs" awstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -41,17 +43,17 @@ func Test_GetRoleNameFromARN(t *testing.T) { {"empty", "", ""}, { names.AttrRole, - "arn:aws:iam::0123456789:role/EcsService", //lintignore:AWSAT005 + "arn:aws:iam::0123456789:role/EcsService", // lintignore:AWSAT005 "EcsService", }, { "role with path", - "arn:aws:iam::0123456789:role/group/EcsService", //lintignore:AWSAT005 + "arn:aws:iam::0123456789:role/group/EcsService", // lintignore:AWSAT005 "/group/EcsService", }, { "role with complex path", - "arn:aws:iam::0123456789:role/group/subgroup/my-role", //lintignore:AWSAT005 + "arn:aws:iam::0123456789:role/group/subgroup/my-role", // lintignore:AWSAT005 "/group/subgroup/my-role", }, } @@ -76,7 +78,7 @@ func TestClustereNameFromARN(t *testing.T) { {"empty", "", ""}, { "cluster", - "arn:aws:ecs:us-west-2:0123456789:cluster/my-cluster", //lintignore:AWSAT003,AWSAT005 + "arn:aws:ecs:us-west-2:0123456789:cluster/my-cluster", // lintignore:AWSAT003,AWSAT005 "my-cluster", }, } @@ -90,6 +92,52 @@ func TestClustereNameFromARN(t *testing.T) { } } +func TestServiceNameFromARN(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + arn string + expected string + }{ + { + name: "empty ARN", + arn: "", + expected: "", + }, + { + name: "invalid ARN", + arn: "invalid", + expected: "", + }, + { + name: "short ARN format", + arn: "arn:aws:ecs:us-west-2:123456789:service/service_name", // lintignore:AWSAT003,AWSAT005 + expected: names.AttrServiceName, + }, + { + name: "long ARN format", + arn: "arn:aws:ecs:us-west-2:123456789:service/cluster_name/service_name", // lintignore:AWSAT003,AWSAT005 + expected: names.AttrServiceName, + }, + { + name: "ARN with special characters", + arn: "arn:aws:ecs:us-west-2:123456789:service/cluster-name/service-name-123", // lintignore:AWSAT003,AWSAT005 + expected: "service-name-123", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + actual := tfecs.ServiceNameFromARN(tt.arn) + if actual != tt.expected { + t.Errorf("Expected: %s, Got: %s", tt.expected, actual) + } + }) + } +} + func TestAccECSService_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service @@ -109,11 +157,11 @@ func TestAccECSService_basic(t *testing.T) { testAccCheckServiceExists(ctx, resourceName, &service), resource.TestCheckResourceAttr(resourceName, "alarms.#", "0"), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ecs", fmt.Sprintf("service/%s/%s", clusterName, rName)), + resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", "ENABLED"), resource.TestCheckResourceAttrPair(resourceName, "cluster", "aws_ecs_cluster.test", names.AttrARN), resource.TestCheckResourceAttr(resourceName, "service_registries.#", "0"), resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "REPLICA"), resource.TestCheckResourceAttrPair(resourceName, "task_definition", "aws_ecs_task_definition.test", names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", "DISABLED"), resource.TestCheckResourceAttr(resourceName, "vpc_lattice_configuration.#", "0"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -138,9 +186,9 @@ func TestAccECSService_basic(t *testing.T) { testAccCheckServiceExists(ctx, resourceName, &service), resource.TestCheckResourceAttr(resourceName, "alarms.#", "0"), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ecs", fmt.Sprintf("service/%s/%s", clusterName, rName)), + resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", "ENABLED"), resource.TestCheckResourceAttr(resourceName, "service_registries.#", "0"), resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "REPLICA"), - resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", "DISABLED"), resource.TestCheckResourceAttr(resourceName, "vpc_lattice_configuration.#", "0"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -332,13 +380,33 @@ func TestAccECSService_CapacityProviderStrategy_basic(t *testing.T) { Config: testAccServiceConfig_capacityProviderStrategy(rName, 1, 0, false), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.weight", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.base", "0"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + Config: testAccServiceConfig_capacityProviderStrategy(rName, 10, 1, false), + ExpectError: regexache.MustCompile(`force_new_deployment should be true when capacity_provider_strategy is being updated`), }, { - Config: testAccServiceConfig_capacityProviderStrategy(rName, 10, 1, false), + Config: testAccServiceConfig_capacityProviderStrategy(rName, 10, 1, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.weight", "10"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.base", "1"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, }, }) @@ -360,6 +428,9 @@ func TestAccECSService_CapacityProviderStrategy_forceNewDeployment(t *testing.T) Config: testAccServiceConfig_capacityProviderStrategy(rName, 1, 0, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.weight", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.base", "0"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -371,6 +442,9 @@ func TestAccECSService_CapacityProviderStrategy_forceNewDeployment(t *testing.T) Config: testAccServiceConfig_capacityProviderStrategy(rName, 10, 1, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.weight", "10"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.base", "1"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -409,10 +483,13 @@ func TestAccECSService_CapacityProviderStrategy_update(t *testing.T) { Config: testAccServiceConfig_updateCapacityProviderStrategy(rName, 1, "FARGATE"), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.capacity_provider", "FARGATE"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.weight", "1"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), }, }, }, @@ -420,6 +497,9 @@ func TestAccECSService_CapacityProviderStrategy_update(t *testing.T) { Config: testAccServiceConfig_updateCapacityProviderStrategy(rName, 1, "FARGATE_SPOT"), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.capacity_provider", "FARGATE_SPOT"), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.0.weight", "1"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -431,10 +511,11 @@ func TestAccECSService_CapacityProviderStrategy_update(t *testing.T) { Config: testAccServiceConfig_updateCapacityProviderStrategyRemove(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "capacity_provider_strategy.#", "0"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), }, }, }, @@ -749,7 +830,7 @@ func TestAccECSService_DeploymentControllerType_codeDeploy(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_deploymentControllerTypeCodeDeploy(rName), + Config: testAccServiceConfig_deploymentControllerType(rName, string(awstypes.DeploymentControllerTypeCodeDeploy)), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), resource.TestCheckResourceAttr(resourceName, "deployment_controller.#", "1"), @@ -844,6 +925,40 @@ func TestAccECSService_DeploymentControllerType_external(t *testing.T) { }) } +func TestAccECSService_DeploymentControllerMutability_codeDeployToECS(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_deploymentControllerType(rName, string(awstypes.DeploymentControllerTypeCodeDeploy)), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_controller.0.type", string(awstypes.DeploymentControllerTypeCodeDeploy)), + ), + }, + { + Config: testAccServiceConfig_deploymentControllerType(rName, string(awstypes.DeploymentControllerTypeEcs)), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_controller.0.type", string(awstypes.DeploymentControllerTypeEcs)), + ), + }, + }, + }) +} + func TestAccECSService_alarmsAdd(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service @@ -907,10 +1022,10 @@ func TestAccECSService_alarmsUpdate(t *testing.T) { }) } -func TestAccECSService_DeploymentValues_basic(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -920,22 +1035,102 @@ func TestAccECSService_DeploymentValues_basic(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_deploymentValues(rName), + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "deployment_maximum_percent", "200"), - resource.TestCheckResourceAttr(resourceName, "deployment_minimum_healthy_percent", "100"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.bake_time_in_minutes", "2"), + // Lifecycle hooks configuration checks + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.lifecycle_hook.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "deployment_configuration.0.lifecycle_hook.*.hook_target_arn", "aws_lambda_function.hook_success", names.AttrARN), + resource.TestCheckTypeSetElemAttrPair(resourceName, "deployment_configuration.0.lifecycle_hook.*.role_arn", "aws_iam_role.global", names.AttrARN), + resource.TestCheckTypeSetElemAttr(resourceName, "deployment_configuration.0.lifecycle_hook.*.lifecycle_stages.*", "POST_SCALE_UP"), + resource.TestCheckTypeSetElemAttr(resourceName, "deployment_configuration.0.lifecycle_hook.*.lifecycle_stages.*", "POST_TEST_TRAFFIC_SHIFT"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "[1,\"2\",true]", + "lifecycle_stages.0": "POST_SCALE_UP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "3.14", + "lifecycle_stages.0": "TEST_TRAFFIC_SHIFT", + }), + // Load balancer advanced configuration checks + resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.alternate_target_group_arn"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.production_listener_rule"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.test_listener_rule"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.role_arn"), + // Service Connect test traffic rules checks + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.name", "x-test-header"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.value.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.value.0.exact", "test-value"), + ), + }, + { + Config: testAccServiceConfig_blueGreenDeployment_withHookBehavior(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.bake_time_in_minutes", "3"), + // Lifecycle hooks configuration checks + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.lifecycle_hook.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "deployment_configuration.0.lifecycle_hook.*.lifecycle_stages.*", "PRE_SCALE_UP"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "{\"bool_key\":true,\"int_key\":10,\"list_key\":[1,\"2\",true],\"object_key\":{\"bool_key\":true,\"int_key\":10,\"list_key\":[1,\"2\",true],\"string_key\":\"string_val\"},\"string_key\":\"string_val\"}", + "lifecycle_stages.0": "PRE_SCALE_UP", + }), + // Service Connect test traffic rules checks + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.name", "x-test-header-2"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.value.0.exact", "test-value-2"), ), }, }, }) } -// Regression for https://github.com/hashicorp/terraform-provider-aws/issues/6315 -func TestAccECSService_DeploymentValues_minZeroMaxOneHundred(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_outOfBandRemoval(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.bake_time_in_minutes", "2"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.lifecycle_hook.#", "2"), + testAccCheckServiceRemoveBlueGreenDeploymentConfigurations(ctx, &service), + ), + ExpectNonEmptyPlan: true, + }, + { + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.bake_time_in_minutes", "2"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.lifecycle_hook.#", "2"), + ), + }, + }, + }) +} + +func TestAccECSService_BlueGreenDeployment_sigintRollback(t *testing.T) { + acctest.Skip(t, "SIGINT handling can't reliably be tested in CI") + + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -945,21 +1140,37 @@ func TestAccECSService_DeploymentValues_minZeroMaxOneHundred(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_deploymentPercents(rName, 0, 100), + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "deployment_maximum_percent", "100"), - resource.TestCheckResourceAttr(resourceName, "deployment_minimum_healthy_percent", "0"), + resource.TestCheckResourceAttrPair(resourceName, "task_definition", "aws_ecs_task_definition.test", names.AttrARN), + ), + }, + { + Config: testAccServiceConfig_blueGreenDeployment_withHookBehavior(rName, false), + PreConfig: func() { + go func() { + _ = exec.Command("go", "run", "test-fixtures/sigint_helper.go", "30").Start() // lintignore:XR007 + }() + }, + ExpectError: regexache.MustCompile("execution halted|context canceled"), + }, + { + RefreshState: true, + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttrPair(resourceName, "task_definition", "aws_ecs_task_definition.test", names.AttrARN), ), + ExpectNonEmptyPlan: true, }, }, }) } -func TestAccECSService_deploymentCircuitBreaker(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_circuitBreakerRollback(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -969,23 +1180,46 @@ func TestAccECSService_deploymentCircuitBreaker(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_deploymentCircuitBreaker(rName), + Config: testAccServiceConfig_blueGreenDeployment_withCircuitBreaker(rName), + ExpectError: regexache.MustCompile(`No rollback candidate was found to run the rollback`), + }, + { + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "deployment_circuit_breaker.#", "1"), - resource.TestCheckResourceAttr(resourceName, "deployment_circuit_breaker.0.enable", acctest.CtTrue), - resource.TestCheckResourceAttr(resourceName, "deployment_circuit_breaker.0.rollback", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), ), }, + { + Config: testAccServiceConfig_blueGreenDeployment_withCircuitBreaker(rName), + ExpectError: regexache.MustCompile(`Service deployment rolled back because the circuit breaker threshold was exceeded.`), + }, }, }) } -// Regression for https://github.com/hashicorp/terraform/issues/3444 -func TestAccECSService_loadBalancerChanges(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_createFailure(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_blueGreenDeployment_withHookBehavior(rName, true), + ExpectError: regexache.MustCompile(`No rollback candidate was found`), + }, + }, + }) +} + +func TestAccECSService_BlueGreenDeployment_changeStrategy(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -995,36 +1229,42 @@ func TestAccECSService_loadBalancerChanges(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_lbChanges(rName), + Config: testAccServiceConfig_blueGreenDeployment_zeroBakeTime(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": acctest.CtTrue, + "lifecycle_stages.0": "POST_SCALE_UP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "\"Test string\"", + "lifecycle_stages.0": "TEST_TRAFFIC_SHIFT", + }), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), - }, - }, }, { - Config: testAccServiceConfig_lbChangesModified(rName), + Config: testAccServiceConfig_blueGreenDeployment_switchToRolling(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "ROLLING"), + ), + }, + { + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), - }, - }, }, }, }) } -// Regression for https://github.com/hashicorp/terraform/issues/3361 -func TestAccECSService_clusterName(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_updateFailure(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1034,20 +1274,25 @@ func TestAccECSService_clusterName(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_clusterName(rName), + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "cluster", rName), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.lifecycle_hook.#", "2"), ), }, + { + Config: testAccServiceConfig_blueGreenDeployment_withHookBehavior(rName, true), + ExpectError: regexache.MustCompile(`Service deployment rolled back`), + }, }, }) } -func TestAccECSService_alb(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_updateInPlace(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1057,20 +1302,29 @@ func TestAccECSService_alb(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_alb(rName), + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "load_balancer.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), + ), + }, + { + Config: testAccServiceConfig_blueGreenDeployment_zeroBakeTime(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "desired_count", "2"), ), }, }, }) } -func TestAccECSService_multipleTargetGroups(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_waitServiceActive(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1080,21 +1334,20 @@ func TestAccECSService_multipleTargetGroups(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_multipleTargetGroups(rName), + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "load_balancer.#", "2"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), ), }, }, }) } -func TestAccECSService_forceNewDeployment(t *testing.T) { +func TestAccECSService_BlueGreenDeployment_withoutTestListenerRule(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - clusterName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)[:16] // Use shorter name to avoid target group name length issues resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1104,40 +1357,52 @@ func TestAccECSService_forceNewDeployment(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_basic(rName, clusterName), + Config: testAccServiceConfig_blueGreenDeployment_withoutTestListenerRule(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.alternate_target_group_arn"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.production_listener_rule"), + resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.0.test_listener_rule", ""), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.role_arn"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), - }, - }, }, { - Config: testAccServiceConfig_forceNewDeployment(rName, clusterName), + // Set test_listener_rule + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.alternate_target_group_arn"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.production_listener_rule"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.test_listener_rule"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.role_arn"), + ), + }, + { + // Remove test_listener_rule again + Config: testAccServiceConfig_blueGreenDeployment_withoutTestListenerRule(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.alternate_target_group_arn"), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.production_listener_rule"), + resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.0.test_listener_rule", ""), + resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.role_arn"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), - }, - }, }, }, }) } -func TestAccECSService_forceNewDeploymentTriggers(t *testing.T) { +func TestAccECSService_DeploymentConfiguration_strategy(t *testing.T) { + // Test for deployment configuration strategy ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - clusterName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1147,44 +1412,34 @@ func TestAccECSService_forceNewDeploymentTriggers(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_forceNewDeployment(rName, clusterName), + Config: testAccServiceConfig_deploymentConfiguration_strategy(rName, "ROLLING"), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "ROLLING"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), - }, - }, }, { - Config: testAccServiceConfig_forceNewDeploymentTriggers(rName, clusterName), + Config: testAccServiceConfig_deploymentConfiguration_strategy(rName, "BLUE_GREEN"), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "force_new_deployment", acctest.CtTrue), - resource.TestCheckResourceAttrSet(resourceName, "triggers.update"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + ), + }, + { + Config: testAccServiceConfig_deploymentConfiguration_strategy(rName, "ROLLING"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "ROLLING"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), - }, - }, }, }, }) } -func TestAccECSService_PlacementStrategy_basic(t *testing.T) { +func TestAccECSService_DeploymentValues_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - clusterName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1194,69 +1449,47 @@ func TestAccECSService_PlacementStrategy_basic(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_basic(rName, clusterName), + Config: testAccServiceConfig_deploymentValues(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "deployment_maximum_percent", "200"), + resource.TestCheckResourceAttr(resourceName, "deployment_minimum_healthy_percent", "100"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), - }, - }, - }, - { - Config: testAccServiceConfig_placementStrategy(rName, clusterName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), - }, - }, - }, - { - Config: testAccServiceConfig_randomPlacementStrategy(rName, clusterName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "random"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", ""), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), - }, - }, }, + }, + }) +} + +// Regression for https://github.com/hashicorp/terraform-provider-aws/issues/6315 +func TestAccECSService_DeploymentValues_minZeroMaxOneHundred(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ { - Config: testAccServiceConfig_multiplacementStrategy(rName, clusterName), + Config: testAccServiceConfig_deploymentPercents(rName, 0, 100), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "2"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.1.type", "spread"), - resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.1.field", "instanceId"), + resource.TestCheckResourceAttr(resourceName, "deployment_maximum_percent", "100"), + resource.TestCheckResourceAttr(resourceName, "deployment_minimum_healthy_percent", "0"), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), - }, - }, }, }, }) } -// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13146 -func TestAccECSService_PlacementStrategy_missing(t *testing.T) { +func TestAccECSService_deploymentCircuitBreaker(t *testing.T) { ctx := acctest.Context(t) + var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -1265,14 +1498,20 @@ func TestAccECSService_PlacementStrategy_missing(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_placementStrategyType(rName, ""), - ExpectError: regexache.MustCompile(`expected type to be one of`), + Config: testAccServiceConfig_deploymentCircuitBreaker(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "deployment_circuit_breaker.#", "1"), + resource.TestCheckResourceAttr(resourceName, "deployment_circuit_breaker.0.enable", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "deployment_circuit_breaker.0.rollback", acctest.CtTrue), + ), }, }, }) } -func TestAccECSService_PlacementConstraints_basic(t *testing.T) { +// Regression for https://github.com/hashicorp/terraform/issues/3444 +func TestAccECSService_loadBalancerChanges(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1285,10 +1524,9 @@ func TestAccECSService_PlacementConstraints_basic(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_placementConstraint(rName), + Config: testAccServiceConfig_lbChanges(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "placement_constraints.#", "1"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -1297,10 +1535,9 @@ func TestAccECSService_PlacementConstraints_basic(t *testing.T) { }, }, { - Config: testAccServiceConfig_placementConstraintEmptyExpression(rName), + Config: testAccServiceConfig_lbChangesModified(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "placement_constraints.#", "1"), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -1312,7 +1549,8 @@ func TestAccECSService_PlacementConstraints_basic(t *testing.T) { }) } -func TestAccECSService_PlacementConstraints_emptyExpression(t *testing.T) { +// Regression for https://github.com/hashicorp/terraform/issues/3361 +func TestAccECSService_clusterName(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1325,17 +1563,17 @@ func TestAccECSService_PlacementConstraints_emptyExpression(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_placementConstraintEmptyExpression(rName), + Config: testAccServiceConfig_clusterName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "placement_constraints.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster", rName), ), }, }, }) } -func TestAccECSService_LaunchTypeFargate_basic(t *testing.T) { +func TestAccECSService_alb(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1348,35 +1586,17 @@ func TestAccECSService_LaunchTypeFargate_basic(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_launchTypeFargate(rName, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "launch_type", "FARGATE"), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_groups.#", "2"), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnets.#", "2"), - resource.TestCheckResourceAttr(resourceName, "platform_version", "LATEST"), - ), - }, - { - Config: testAccServiceConfig_launchTypeFargate(rName, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtTrue), - ), - }, - { - Config: testAccServiceConfig_launchTypeFargate(rName, false), + Config: testAccServiceConfig_alb(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "load_balancer.#", "1"), ), }, }, }) } -func TestAccECSService_LaunchTypeFargate_platformVersion(t *testing.T) { +func TestAccECSService_multipleTargetGroups(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1389,34 +1609,21 @@ func TestAccECSService_LaunchTypeFargate_platformVersion(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_launchTypeFargateAndPlatformVersion(rName, "1.3.0"), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "platform_version", "1.3.0"), - ), - }, - { - Config: testAccServiceConfig_launchTypeFargateAndPlatformVersion(rName, "LATEST"), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "platform_version", "LATEST"), - ), - }, - { - Config: testAccServiceConfig_launchTypeFargateAndPlatformVersion(rName, "1.4.0"), + Config: testAccServiceConfig_multipleTargetGroups(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "platform_version", "1.4.0"), + resource.TestCheckResourceAttr(resourceName, "load_balancer.#", "2"), ), }, }, }) } -func TestAccECSService_LaunchTypeFargate_waitForSteadyState(t *testing.T) { +func TestAccECSService_forceNewDeployment(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + clusterName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1426,31 +1633,40 @@ func TestAccECSService_LaunchTypeFargate_waitForSteadyState(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - // Wait for the ECS Cluster to reach a steady state w/specified count - Config: testAccServiceConfig_launchTypeFargateAndWait(rName, 1, true), + Config: testAccServiceConfig_basic(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), - resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "0"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, { - ResourceName: resourceName, - ImportStateId: fmt.Sprintf("%s/%s", rName, rName), - ImportState: true, - ImportStateVerify: true, - // Resource currently defaults to importing task_definition as family:revision - // and wait_for_steady_state is not read from API - ImportStateVerifyIgnore: []string{"task_definition", "wait_for_steady_state"}, + Config: testAccServiceConfig_forceNewDeployment(rName, clusterName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, }, }) } -func TestAccECSService_LaunchTypeFargate_updateWaitForSteadyState(t *testing.T) { +func TestAccECSService_forceNewDeploymentTriggers(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + clusterName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1460,39 +1676,44 @@ func TestAccECSService_LaunchTypeFargate_updateWaitForSteadyState(t *testing.T) CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_launchTypeFargateNoWait(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), - resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtFalse), - ), - }, - { - // Modify desired count and wait for the ECS Cluster to reach steady state - Config: testAccServiceConfig_launchTypeFargateAndWait(rName, 2, true), + Config: testAccServiceConfig_forceNewDeployment(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "desired_count", "2"), - resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, { - // Modify desired count without wait - Config: testAccServiceConfig_launchTypeFargateAndWait(rName, 1, false), + Config: testAccServiceConfig_forceNewDeploymentTriggers(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), - resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "force_new_deployment", acctest.CtTrue), + resource.TestCheckResourceAttrSet(resourceName, "triggers.update"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, }, }) } -func TestAccECSService_LaunchTypeEC2_network(t *testing.T) { +func TestAccECSService_PlacementStrategy_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + clusterName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ @@ -1502,78 +1723,69 @@ func TestAccECSService_LaunchTypeEC2_network(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_networkConfiguration(rName), + Config: testAccServiceConfig_basic(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_groups.#", "2"), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "0"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, { - Config: testAccServiceConfig_networkConfigurationModified(rName), + Config: testAccServiceConfig_placementStrategy(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_groups.#", "1"), - resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, - }, - }) -} - -func TestAccECSService_DaemonSchedulingStrategy_basic(t *testing.T) { - ctx := acctest.Context(t) - var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ecs_service.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckServiceDestroy(ctx), - Steps: []resource.TestStep{ { - Config: testAccServiceConfig_daemonSchedulingStrategy(rName), + Config: testAccServiceConfig_randomPlacementStrategy(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "DAEMON"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "random"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", ""), ), - }, - }, - }) -} - -func TestAccECSService_DaemonSchedulingStrategy_setDeploymentMinimum(t *testing.T) { - ctx := acctest.Context(t) - var service awstypes.Service - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ecs_service.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckServiceDestroy(ctx), - Steps: []resource.TestStep{ + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, { - Config: testAccServiceConfig_daemonSchedulingStrategySetDeploymentMinimum(rName), + Config: testAccServiceConfig_multiplacementStrategy(rName, clusterName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "DAEMON"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.#", "2"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.0.field", "memory"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.1.type", "spread"), + resource.TestCheckResourceAttr(resourceName, "ordered_placement_strategy.1.field", "instanceId"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, }, }) } -func TestAccECSService_replicaSchedulingStrategy(t *testing.T) { +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13146 +func TestAccECSService_PlacementStrategy_missing(t *testing.T) { ctx := acctest.Context(t) - var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -1582,146 +1794,155 @@ func TestAccECSService_replicaSchedulingStrategy(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_replicaSchedulingStrategy(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "REPLICA"), - ), + Config: testAccServiceConfig_placementStrategyType(rName, ""), + ExpectError: regexache.MustCompile(`expected type to be one of`), }, }, }) } -func TestAccECSService_ServiceRegistries_basic(t *testing.T) { +func TestAccECSService_PlacementConstraints_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) - }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_registries(rName), + Config: testAccServiceConfig_placementConstraint(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + resource.TestCheckResourceAttr(resourceName, "placement_constraints.#", "1"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + Config: testAccServiceConfig_placementConstraintEmptyExpression(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "placement_constraints.#", "1"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, }, }) } -func TestAccECSService_ServiceRegistries_container(t *testing.T) { +func TestAccECSService_PlacementConstraints_emptyExpression(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) - }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_registriesContainer(rName), + Config: testAccServiceConfig_placementConstraintEmptyExpression(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + resource.TestCheckResourceAttr(resourceName, "placement_constraints.#", "1"), ), }, }, }) } -func TestAccECSService_ServiceRegistries_changes(t *testing.T) { +func TestAccECSService_LaunchTypeFargate_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - serviceDiscoveryName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - updatedServiceDiscoveryName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) - }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_registriesChanges(rName, serviceDiscoveryName), + Config: testAccServiceConfig_launchTypeFargate(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + resource.TestCheckResourceAttr(resourceName, "launch_type", "FARGATE"), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_groups.#", "2"), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "platform_version", "LATEST"), ), }, { - Config: testAccServiceConfig_registriesChanges(rName, updatedServiceDiscoveryName), + Config: testAccServiceConfig_launchTypeFargate(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtTrue), + ), + }, + { + Config: testAccServiceConfig_launchTypeFargate(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), ), }, }, }) } -func TestAccECSService_ServiceRegistries_removal(t *testing.T) { +func TestAccECSService_LaunchTypeFargate_platformVersion(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - serviceDiscoveryName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) - }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_registriesRemoval(rName, serviceDiscoveryName, false), + Config: testAccServiceConfig_launchTypeFargateAndPlatformVersion(rName, "1.3.0"), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + resource.TestCheckResourceAttr(resourceName, "platform_version", "1.3.0"), ), }, { - Config: testAccServiceConfig_registriesRemoval(rName, serviceDiscoveryName, true), + Config: testAccServiceConfig_launchTypeFargateAndPlatformVersion(rName, "LATEST"), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_registries.#", "0"), + resource.TestCheckResourceAttr(resourceName, "platform_version", "LATEST"), + ), + }, + { + Config: testAccServiceConfig_launchTypeFargateAndPlatformVersion(rName, "1.4.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "platform_version", "1.4.0"), ), }, }, }) } -func TestAccECSService_ServiceConnect_basic(t *testing.T) { +func TestAccECSService_LaunchTypeFargate_waitForSteadyState(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1734,16 +1955,18 @@ func TestAccECSService_ServiceConnect_basic(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_serviceConnectBasic(rName), + // Wait for the ECS Cluster to reach a steady state w/specified count + Config: testAccServiceConfig_launchTypeFargateAndWait(rName, 1, true), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), + resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtTrue), ), }, { ResourceName: resourceName, - ImportState: true, ImportStateId: fmt.Sprintf("%s/%s", rName, rName), + ImportState: true, ImportStateVerify: true, // Resource currently defaults to importing task_definition as family:revision // and wait_for_steady_state is not read from API @@ -1753,7 +1976,7 @@ func TestAccECSService_ServiceConnect_basic(t *testing.T) { }) } -func TestAccECSService_ServiceConnect_full(t *testing.T) { +func TestAccECSService_LaunchTypeFargate_updateWaitForSteadyState(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1766,17 +1989,36 @@ func TestAccECSService_ServiceConnect_full(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_serviceConnectAllAttributes(rName), + Config: testAccServiceConfig_launchTypeFargateNoWait(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), + resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtFalse), + ), + }, + { + // Modify desired count and wait for the ECS Cluster to reach steady state + Config: testAccServiceConfig_launchTypeFargateAndWait(rName, 2, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "desired_count", "2"), + resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtTrue), + ), + }, + { + // Modify desired count without wait + Config: testAccServiceConfig_launchTypeFargateAndWait(rName, 1, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "desired_count", "1"), + resource.TestCheckResourceAttr(resourceName, "wait_for_steady_state", acctest.CtFalse), ), }, }, }) } -func TestAccECSService_ServiceConnect_tls_with_empty_timeout(t *testing.T) { +func TestAccECSService_LaunchTypeEC2_network(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1789,17 +2031,28 @@ func TestAccECSService_ServiceConnect_tls_with_empty_timeout(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_serviceConnect_tls_with_empty_timeout_block(rName), + Config: testAccServiceConfig_networkConfiguration(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_groups.#", "2"), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnets.#", "2"), + ), + }, + { + Config: testAccServiceConfig_networkConfigurationModified(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.assign_public_ip", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_groups.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnets.#", "2"), ), }, }, }) } -func TestAccECSService_ServiceConnect_ingressPortOverride(t *testing.T) { +func TestAccECSService_DaemonSchedulingStrategy_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1812,27 +2065,17 @@ func TestAccECSService_ServiceConnect_ingressPortOverride(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_serviceConnectIngressPortOverride(rName), - Check: resource.ComposeAggregateTestCheckFunc( + Config: testAccServiceConfig_daemonSchedulingStrategy(rName), + Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.enabled", acctest.CtTrue), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.log_configuration.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "service_connect_configuration.0.namespace"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.#", "1"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.#", "1"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.dns_name", "nginx-http."+rName), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.discovery_name", "nginx-http"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.ingress_port_override", "0"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.port_name", "nginx-http"), + resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "DAEMON"), ), }, }, }) } -func TestAccECSService_ServiceConnect_remove(t *testing.T) { +func TestAccECSService_DaemonSchedulingStrategy_setDeploymentMinimum(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1845,101 +2088,171 @@ func TestAccECSService_ServiceConnect_remove(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_serviceConnectBasic(rName), + Config: testAccServiceConfig_daemonSchedulingStrategySetDeploymentMinimum(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "DAEMON"), ), }, + }, + }) +} + +func TestAccECSService_replicaSchedulingStrategy(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ { - Config: testAccServiceConfig_serviceConnectRemoved(rName), + Config: testAccServiceConfig_replicaSchedulingStrategy(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "scheduling_strategy", "REPLICA"), ), }, }, }) } -func TestAccECSService_Tags_basic(t *testing.T) { +func TestAccECSService_ServiceRegistries_basic(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) + }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Config: testAccServiceConfig_registries(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), ), }, - { - ResourceName: resourceName, - ImportStateId: fmt.Sprintf("%s/%s", rName, rName), - ImportState: true, - ImportStateVerify: true, - // Resource currently defaults to importing task_definition as family:revision - // and wait_for_steady_state is not read from API - ImportStateVerifyIgnore: []string{"task_definition", "wait_for_steady_state"}, + }, + }) +} + +func TestAccECSService_ServiceRegistries_container(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_registriesContainer(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + ), }, + }, + }) +} + +func TestAccECSService_ServiceRegistries_changes(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + serviceDiscoveryName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + updatedServiceDiscoveryName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ { - Config: testAccServiceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Config: testAccServiceConfig_registriesChanges(rName, serviceDiscoveryName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), ), }, { - Config: testAccServiceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Config: testAccServiceConfig_registriesChanges(rName, updatedServiceDiscoveryName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), ), }, }, }) } -func TestAccECSService_Tags_managed(t *testing.T) { +func TestAccECSService_ServiceRegistries_removal(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + serviceDiscoveryName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.ServiceDiscoveryEndpointID) + }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_managedTags(rName), + Config: testAccServiceConfig_registriesRemoval(rName, serviceDiscoveryName, false), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "enable_ecs_managed_tags", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "service_registries.#", "1"), + ), + }, + { + Config: testAccServiceConfig_registriesRemoval(rName, serviceDiscoveryName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "service_registries.#", "0"), ), }, }, }) } -func TestAccECSService_Tags_propagate(t *testing.T) { +func TestAccECSService_ServiceConnect_basic(t *testing.T) { ctx := acctest.Context(t) - var first, second, third awstypes.Service + var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ecs_service.test" @@ -1950,32 +2263,72 @@ func TestAccECSService_Tags_propagate(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_propagateTags(rName, "SERVICE"), + Config: testAccServiceConfig_serviceConnectBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &first), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, names.AttrPropagateTags, string(awstypes.PropagateTagsService)), + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), ), }, { - Config: testAccServiceConfig_propagateTags(rName, "TASK_DEFINITION"), + ResourceName: resourceName, + ImportState: true, + ImportStateId: fmt.Sprintf("%s/%s", rName, rName), + ImportStateVerify: true, + // Resource currently defaults to importing task_definition as family:revision + // and wait_for_steady_state is not read from API + ImportStateVerifyIgnore: []string{"task_definition", "wait_for_steady_state"}, + }, + }, + }) +} + +func TestAccECSService_ServiceConnect_full(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_serviceConnectAllAttributes(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &second), - resource.TestCheckResourceAttr(resourceName, names.AttrPropagateTags, string(awstypes.PropagateTagsTaskDefinition)), + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), ), }, + }, + }) +} + +func TestAccECSService_ServiceConnect_tls_with_empty_timeout(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ { - Config: testAccServiceConfig_propagateTags(rName, "NONE"), + Config: testAccServiceConfig_serviceConnect_tls_with_empty_timeout_block(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckServiceExists(ctx, resourceName, &third), - resource.TestCheckResourceAttr(resourceName, names.AttrPropagateTags, string(awstypes.PropagateTagsNone)), + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), ), }, }, }) } -func TestAccECSService_executeCommand(t *testing.T) { +func TestAccECSService_ServiceConnect_ingressPortOverride(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1988,24 +2341,59 @@ func TestAccECSService_executeCommand(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_executeCommand(rName, true), + Config: testAccServiceConfig_serviceConnectIngressPortOverride(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.log_configuration.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "service_connect_configuration.0.namespace"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.dns_name", "nginx-http."+rName), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.discovery_name", "nginx-http"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.ingress_port_override", "0"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.port_name", "nginx-http"), + ), + }, + }, + }) +} + +func TestAccECSService_ServiceConnect_remove(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_serviceConnectBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "enable_execute_command", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.enabled", acctest.CtTrue), ), }, { - Config: testAccServiceConfig_executeCommand(rName, false), + Config: testAccServiceConfig_serviceConnectRemoved(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "enable_execute_command", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.#", "0"), ), }, }, }) } -func TestAccECSService_AvailabilityZoneRebalancing(t *testing.T) { +// Regression for https://github.com/hashicorp/terraform-provider-aws/issues/42818 +func TestAccECSService_ServiceConnect_outOfBandRemoval(t *testing.T) { ctx := acctest.Context(t) var service awstypes.Service rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2015,359 +2403,1668 @@ func TestAccECSService_AvailabilityZoneRebalancing(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_availabilityZoneRebalancing(rName, "ENABLED"), + Config: testAccServiceConfig_serviceConnectBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + testAccCheckServiceDisableServiceConnect(ctx, &service), + ), + ExpectNonEmptyPlan: true, + }, + { + Config: testAccServiceConfig_serviceConnectBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", string(awstypes.AvailabilityZoneRebalancingEnabled)), + resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.enabled", acctest.CtTrue), ), }, + }, + }) +} + +func TestAccECSService_Tags_basic(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ { - Config: testAccServiceConfig_availabilityZoneRebalancing_nullUpdate(rName), + Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", string(awstypes.AvailabilityZoneRebalancingDisabled)), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), }, { - Config: testAccServiceConfig_availabilityZoneRebalancing(rName, "DISABLED"), + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/%s", rName, rName), + ImportState: true, + ImportStateVerify: true, + // Resource currently defaults to importing task_definition as family:revision + // and wait_for_steady_state is not read from API + ImportStateVerifyIgnore: []string{"task_definition", "wait_for_steady_state"}, + }, + { + Config: testAccServiceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", string(awstypes.AvailabilityZoneRebalancingDisabled)), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), }, { - Config: testAccServiceConfig_availabilityZoneRebalancing(rName, "ENABLED"), + Config: testAccServiceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), - resource.TestCheckResourceAttr(resourceName, "availability_zone_rebalancing", string(awstypes.AvailabilityZoneRebalancingEnabled)), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), }, }, }) } -func testAccCheckServiceDestroy(ctx context.Context) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ECSClient(ctx) +func TestAccECSService_Tags_managed(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ecs_service" { - continue - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_managedTags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "enable_ecs_managed_tags", acctest.CtTrue), + ), + }, + }, + }) +} + +func TestAccECSService_Tags_UpgradeFromV5_100_0(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + // Just only upgrading to the latest provider version + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + +func TestAccECSService_Tags_UpgradeFromV5_100_0ThroughV6_08_0(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.8.0", + }, + }, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + // Just only upgrading to the latest provider version + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + +func TestAccECSService_Tags_propagate(t *testing.T) { + ctx := acctest.Context(t) + var first, second, third awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_propagateTags(rName, "SERVICE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &first), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, names.AttrPropagateTags, string(awstypes.PropagateTagsService)), + ), + }, + { + Config: testAccServiceConfig_propagateTags(rName, "TASK_DEFINITION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &second), + resource.TestCheckResourceAttr(resourceName, names.AttrPropagateTags, string(awstypes.PropagateTagsTaskDefinition)), + ), + }, + { + Config: testAccServiceConfig_propagateTags(rName, "NONE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &third), + resource.TestCheckResourceAttr(resourceName, names.AttrPropagateTags, string(awstypes.PropagateTagsNone)), + ), + }, + }, + }) +} + +func TestAccECSService_executeCommand(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_executeCommand(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "enable_execute_command", acctest.CtTrue), + ), + }, + { + Config: testAccServiceConfig_executeCommand(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + resource.TestCheckResourceAttr(resourceName, "enable_execute_command", acctest.CtFalse), + ), + }, + }, + }) +} + +func TestAccECSService_AvailabilityZoneRebalancing(t *testing.T) { + ctx := acctest.Context(t) + var service awstypes.Service + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_service.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceConfig_availabilityZoneRebalancing(rName, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("availability_zone_rebalancing"), tfknownvalue.StringExact(awstypes.AvailabilityZoneRebalancingEnabled)), + }, + }, + { + Config: testAccServiceConfig_availabilityZoneRebalancing(rName, awstypes.AvailabilityZoneRebalancingEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("availability_zone_rebalancing"), tfknownvalue.StringExact(awstypes.AvailabilityZoneRebalancingEnabled)), + }, + }, + { + Config: testAccServiceConfig_availabilityZoneRebalancing(rName, awstypes.AvailabilityZoneRebalancingDisabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("availability_zone_rebalancing"), tfknownvalue.StringExact(awstypes.AvailabilityZoneRebalancingDisabled)), + }, + }, + { + Config: testAccServiceConfig_availabilityZoneRebalancing(rName, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("availability_zone_rebalancing"), tfknownvalue.StringExact(awstypes.AvailabilityZoneRebalancingDisabled)), + }, + }, + { + Config: testAccServiceConfig_availabilityZoneRebalancing(rName, awstypes.AvailabilityZoneRebalancingEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceExists(ctx, resourceName, &service), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("availability_zone_rebalancing"), tfknownvalue.StringExact(awstypes.AvailabilityZoneRebalancingEnabled)), + }, + }, + }, + }) +} + +func testAccCheckServiceDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ECSClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ecs_service" { + continue + } + + output, err := tfecs.FindServiceNoTagsByTwoPartKey(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["cluster"]) + + if tfresource.NotFound(err) { + return nil + } + + if err != nil { + return err + } + + if aws.ToString(output.Status) == "INACTIVE" { + return nil + } + + return fmt.Errorf("ECS Service %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckServiceExists(ctx context.Context, name string, service *awstypes.Service) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).ECSClient(ctx) + + var output *awstypes.Service + err := tfresource.Retry(ctx, 1*time.Minute, func(ctx context.Context) *tfresource.RetryError { + var err error + output, err = tfecs.FindServiceNoTagsByTwoPartKey(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["cluster"]) + + if tfresource.NotFound(err) { + return tfresource.RetryableError(err) + } + + if err != nil { + return tfresource.NonRetryableError(err) + } + + return nil + }) + if err != nil { + return err + } + + *service = *output + + return nil + } +} + +func testAccCheckServiceDisableServiceConnect(ctx context.Context, service *awstypes.Service) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ECSClient(ctx) + + input := &ecs.UpdateServiceInput{ + Cluster: service.ClusterArn, + Service: service.ServiceName, + ServiceConnectConfiguration: &awstypes.ServiceConnectConfiguration{ + Enabled: false, + }, + } + + _, err := conn.UpdateService(ctx, input) + return err + } +} + +func testAccCheckServiceRemoveBlueGreenDeploymentConfigurations(ctx context.Context, service *awstypes.Service) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ECSClient(ctx) + + input := &ecs.UpdateServiceInput{ + Cluster: service.ClusterArn, + Service: service.ServiceName, + DeploymentConfiguration: &awstypes.DeploymentConfiguration{ + Strategy: awstypes.DeploymentStrategyRolling, + BakeTimeInMinutes: aws.Int32(0), + LifecycleHooks: []awstypes.DeploymentLifecycleHook{}, + }, + } + + _, err := conn.UpdateService(ctx, input) + return err + } +} + +func testAccServiceConfig_basic(rName, clusterName string) string { + return fmt.Sprintf(` +resource "aws_ecs_cluster" "test" { + name = %[2]q +} + +resource "aws_ecs_task_definition" "test" { + family = %[2]q + + container_definitions = < /var/www/my-vol/date; sleep 1; done\"" + ] + environment = [ + { + name = "test_name" + value = "test_val" + } + ] + portMappings = [ + { + containerPort = 80 + hostPort = 80 + protocol = "tcp" + name = "http" + appProtocol = "http" + } + ] + } + ]) } + resource "aws_ecs_service" "test" { name = %[1]q - cluster = aws_ecs_cluster.test.id - task_definition = aws_ecs_task_definition.test.arn - desired_count = 2 -} -`, rName, clusterName) -} + cluster = aws_ecs_cluster.main.id + task_definition = aws_ecs_task_definition.should_fail.arn + desired_count = 1 + launch_type = "FARGATE" -func testAccServiceConfig_launchTypeFargateBase(rName string) string { - return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` -resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id + deployment_configuration { + strategy = "BLUE_GREEN" + bake_time_in_minutes = 1 - tags = { - Name = %[1]q + lifecycle_hook { + hook_target_arn = aws_lambda_function.hook_success.arn + role_arn = aws_iam_role.global.arn + lifecycle_stages = ["PRE_SCALE_UP"] + } } -} -resource "aws_route_table" "test" { - vpc_id = aws_vpc.test.id + deployment_circuit_breaker { + enable = true + rollback = true + } - route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test.id + service_connect_configuration { + enabled = true + namespace = aws_service_discovery_http_namespace.test.arn + + service { + client_alias { + dns_name = "test-service.local" + port = 8080 + + test_traffic_rules { + header { + name = "x-test-header-2" + value { + exact = "test-value-2" + } + } + } + } + discovery_name = "test-service" + port_name = "http" + } } - tags = { - Name = %[1]q + network_configuration { + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + assign_public_ip = true + } + + load_balancer { + target_group_arn = aws_lb_target_group.primary.arn + container_name = "should_fail" + container_port = 80 + + advanced_configuration { + alternate_target_group_arn = aws_lb_target_group.alternate.arn + production_listener_rule = aws_lb_listener_rule.production.arn + test_listener_rule = aws_lb_listener_rule.test.arn + role_arn = aws_iam_role.global.arn + } } -} -resource "aws_route_table_association" "test" { - count = 2 - subnet_id = element(aws_subnet.test[*].id, count.index) - route_table_id = aws_route_table.test.id + wait_for_steady_state = true + + depends_on = [ + aws_iam_role_policy_attachment.global_admin_attach, + aws_iam_role_policy.ecs_elb_permissions, + aws_iam_role_policy_attachment.ecs_service_role + ] +} +`, rName)) } -resource "aws_security_group" "test" { - count = 2 +func testAccServiceConfig_blueGreenDeployment_withHookBehavior(rName string, shouldFail bool) string { + var hookTargetArn string + if shouldFail { + hookTargetArn = "aws_lambda_function.hook_failure.arn" + } else { + hookTargetArn = "aws_lambda_function.hook_success.arn" + } - name = "%[1]s-${count.index}" - description = "Allow all traffic" - vpc_id = aws_vpc.test.id + return acctest.ConfigCompose(testAccServiceConfig_blueGreenDeploymentBase(rName), fmt.Sprintf(` - ingress { - protocol = "6" - from_port = 80 - to_port = 8000 - cidr_blocks = [aws_vpc.test.cidr_block] +resource "aws_ecs_task_definition" "test2" { + family = "%[1]s-test2" + requires_compatibilities = ["FARGATE"] + network_mode = "awsvpc" + cpu = 256 + memory = 512 + lifecycle { + create_before_destroy = true } - egress { - from_port = 0 - to_port = 0 - protocol = "-1" + container_definitions = jsonencode([ + { + name = "test" + image = "nginx:latest" + cpu = 256 + memory = 512 + essential = true + environment = [ + { + name = "test_name_2" + value = "test_val_2" + } + ] + portMappings = [ + { + containerPort = 80 + hostPort = 80 + protocol = "tcp" + name = "http" + appProtocol = "http" + } + ] + } + ]) +} - cidr_blocks = [ - "0.0.0.0/0", - ] +resource "aws_ecs_service" "test" { + name = %[1]q + cluster = aws_ecs_cluster.main.id + task_definition = aws_ecs_task_definition.test2.arn + desired_count = 1 + launch_type = "FARGATE" + + deployment_configuration { + strategy = "BLUE_GREEN" + bake_time_in_minutes = 3 + + lifecycle_hook { + hook_target_arn = %[2]s + role_arn = aws_iam_role.global.arn + lifecycle_stages = ["PRE_SCALE_UP"] + hook_details = jsonencode({ "bool_key" : true, "string_key" : "string_val", "int_key" : 10, "list_key" : [1, "2", true], "object_key" : { + "bool_key" : true, + "string_key" : "string_val", + "int_key" : 10, + "list_key" : [1, "2", true] + } }) + } } - tags = { - Name = %[1]q + service_connect_configuration { + enabled = true + namespace = aws_service_discovery_http_namespace.test.arn + + service { + client_alias { + dns_name = "test-service.local" + port = 8080 + + test_traffic_rules { + header { + name = "x-test-header-2" + value { + exact = "test-value-2" + } + } + } + } + discovery_name = "test-service" + port_name = "http" + } } -} -resource "aws_ecs_cluster" "test" { - name = %[1]q -} + network_configuration { + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + assign_public_ip = true + } -resource "aws_ecs_task_definition" "test" { - family = %[1]q - network_mode = "awsvpc" - requires_compatibilities = ["FARGATE"] - cpu = "256" - memory = "512" + load_balancer { + target_group_arn = aws_lb_target_group.primary.arn + container_name = "test" + container_port = 80 - container_definitions = < { + console.log('Event received:', JSON.stringify(event)); + return { hookStatus: 'FAILED' }; +}; \ No newline at end of file diff --git a/internal/service/ecs/test-fixtures/failure_lambda_func.zip b/internal/service/ecs/test-fixtures/failure_lambda_func.zip new file mode 100644 index 000000000000..d92fe373f9d1 Binary files /dev/null and b/internal/service/ecs/test-fixtures/failure_lambda_func.zip differ diff --git a/internal/service/ecs/test-fixtures/sigint_helper.go b/internal/service/ecs/test-fixtures/sigint_helper.go new file mode 100644 index 000000000000..b4d42bb56d20 --- /dev/null +++ b/internal/service/ecs/test-fixtures/sigint_helper.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "time" + + "github.com/YakDriver/regexache" +) + +func main() { + if len(os.Args) < 2 { + fmt.Println("Usage: go run sigint_helper.go ") // nosemgrep:ci.calling-fmt.Print-and-variants + os.Exit(1) + } + + delay, err := strconv.Atoi(os.Args[1]) + if err != nil { + fmt.Printf("Invalid delay: %v\n", err) // nosemgrep:ci.calling-fmt.Print-and-variants + os.Exit(1) + } + + time.Sleep(time.Duration(delay) * time.Second) + + // Find terraform process doing apply + cmd := exec.Command("ps", "aux") //lintignore:XR007 + output, err := cmd.Output() + if err != nil { + fmt.Printf("Error running ps: %v\n", err) // nosemgrep:ci.calling-fmt.Print-and-variants + os.Exit(1) + } + + lines := strings.Split(string(output), "\n") + re := regexache.MustCompile(`/opt/homebrew/bin/terraform apply.*-auto-approve`) + + for _, line := range lines { + if re.MatchString(line) && !strings.Contains(line, "sigint_helper") { + fields := strings.Fields(line) + if len(fields) > 1 { + pid, err := strconv.Atoi(fields[1]) + if err != nil { + continue + } + + fmt.Printf("Sending SIGINT to PID %d: %s\n", pid, line) // nosemgrep:ci.calling-fmt.Print-and-variants + _ = syscall.Kill(pid, syscall.SIGINT) + return + } + } + } + + fmt.Println("No matching terraform process found") // nosemgrep:ci.calling-fmt.Print-and-variants +} diff --git a/internal/service/ecs/test-fixtures/success_lambda_func.js b/internal/service/ecs/test-fixtures/success_lambda_func.js new file mode 100644 index 000000000000..1fd6ae562cc8 --- /dev/null +++ b/internal/service/ecs/test-fixtures/success_lambda_func.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + + +exports.handler = async (event, context) => { + console.log('Event received:', JSON.stringify(event)); + return { hookStatus: 'SUCCEEDED' }; + }; \ No newline at end of file diff --git a/internal/service/ecs/test-fixtures/success_lambda_func.zip b/internal/service/ecs/test-fixtures/success_lambda_func.zip new file mode 100644 index 000000000000..6c2732a6416d Binary files /dev/null and b/internal/service/ecs/test-fixtures/success_lambda_func.zip differ diff --git a/internal/service/ecs/testdata/CapacityProvider/basic_v5.100.0/main_gen.tf b/internal/service/ecs/testdata/CapacityProvider/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..3e311c434657 --- /dev/null +++ b/internal/service/ecs/testdata/CapacityProvider/basic_v5.100.0/main_gen.tf @@ -0,0 +1,98 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ecs_capacity_provider" "test" { + name = var.rName + + auto_scaling_group_provider { + auto_scaling_group_arn = aws_autoscaling_group.test.arn + } +} + +# testAccCapacityProviderConfig_base + +resource "aws_launch_template" "test" { + image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t3.micro" + name = var.rName +} + +resource "aws_autoscaling_group" "test" { + availability_zones = data.aws_availability_zones.available.names + desired_capacity = 0 + max_size = 0 + min_size = 0 + name = var.rName + + launch_template { + id = aws_launch_template.test.id + } + + tag { + key = "Name" + value = var.rName + propagate_at_launch = true + } + + lifecycle { + ignore_changes = [ + tag, + ] + } +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("x86_64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-x86_64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ecs/testdata/CapacityProvider/basic_v6.0.0/main_gen.tf b/internal/service/ecs/testdata/CapacityProvider/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..770d090e0cbe --- /dev/null +++ b/internal/service/ecs/testdata/CapacityProvider/basic_v6.0.0/main_gen.tf @@ -0,0 +1,98 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ecs_capacity_provider" "test" { + name = var.rName + + auto_scaling_group_provider { + auto_scaling_group_arn = aws_autoscaling_group.test.arn + } +} + +# testAccCapacityProviderConfig_base + +resource "aws_launch_template" "test" { + image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t3.micro" + name = var.rName +} + +resource "aws_autoscaling_group" "test" { + availability_zones = data.aws_availability_zones.available.names + desired_capacity = 0 + max_size = 0 + min_size = 0 + name = var.rName + + launch_template { + id = aws_launch_template.test.id + } + + tag { + key = "Name" + value = var.rName + propagate_at_launch = true + } + + lifecycle { + ignore_changes = [ + tag, + ] + } +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("x86_64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-x86_64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/efs/file_system_policy.go b/internal/service/efs/file_system_policy.go index a7ef616f6694..0e7a9f01011d 100644 --- a/internal/service/efs/file_system_policy.go +++ b/internal/service/efs/file_system_policy.go @@ -67,7 +67,7 @@ func resourceFileSystemPolicyPut(ctx context.Context, d *schema.ResourceData, me Policy: aws.String(policy), } - _, err = tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidPolicyException](ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidPolicyException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.PutFileSystemPolicy(ctx, input) }, "Policy contains invalid Principal block") diff --git a/internal/service/efs/mount_target.go b/internal/service/efs/mount_target.go index 2f44be00a39d..91ff2bdc3240 100644 --- a/internal/service/efs/mount_target.go +++ b/internal/service/efs/mount_target.go @@ -75,6 +75,20 @@ func resourceMountTarget() *schema.Resource { ForceNew: true, ValidateFunc: validation.IsIPv4Address, }, + names.AttrIPAddressType: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.IpAddressType](), + }, + "ipv6_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IsIPv6Address, + }, "mount_target_dns_name": { Type: schema.TypeString, Computed: true, @@ -131,6 +145,14 @@ func resourceMountTargetCreate(ctx context.Context, d *schema.ResourceData, meta input.IpAddress = aws.String(v.(string)) } + if v, ok := d.GetOk(names.AttrIPAddressType); ok { + input.IpAddressType = awstypes.IpAddressType(v.(string)) + } + + if v, ok := d.GetOk("ipv6_address"); ok { + input.Ipv6Address = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrSecurityGroups); ok { input.SecurityGroups = flex.ExpandStringValueSet(v.(*schema.Set)) } @@ -180,6 +202,16 @@ func resourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta a d.Set("file_system_arn", fsARN) d.Set(names.AttrFileSystemID, fsID) d.Set(names.AttrIPAddress, mt.IpAddress) + if mt.IpAddress != nil && mt.Ipv6Address != nil { + d.Set(names.AttrIPAddressType, awstypes.IpAddressTypeDualStack) + } else if mt.IpAddress != nil { + d.Set(names.AttrIPAddressType, awstypes.IpAddressTypeIpv4Only) + } else if mt.Ipv6Address != nil { + d.Set(names.AttrIPAddressType, awstypes.IpAddressTypeIpv6Only) + } else { + d.Set(names.AttrIPAddressType, nil) + } + d.Set("ipv6_address", mt.Ipv6Address) d.Set("mount_target_dns_name", meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s.%s.efs", aws.ToString(mt.AvailabilityZoneName), aws.ToString(mt.FileSystemId)))) d.Set(names.AttrNetworkInterfaceID, mt.NetworkInterfaceId) d.Set(names.AttrOwnerID, mt.OwnerId) diff --git a/internal/service/efs/mount_target_data_source.go b/internal/service/efs/mount_target_data_source.go index 7f6de5f24ae5..894b9cea23d8 100644 --- a/internal/service/efs/mount_target_data_source.go +++ b/internal/service/efs/mount_target_data_source.go @@ -54,6 +54,14 @@ func dataSourceMountTarget() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrIPAddressType: { + Type: schema.TypeString, + Computed: true, + }, + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, "mount_target_id": { Type: schema.TypeString, Optional: true, @@ -123,6 +131,16 @@ func dataSourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta d.Set("file_system_arn", fsARN) d.Set(names.AttrFileSystemID, fsID) d.Set(names.AttrIPAddress, mt.IpAddress) + if mt.IpAddress != nil && mt.Ipv6Address != nil { + d.Set(names.AttrIPAddressType, awstypes.IpAddressTypeDualStack) + } else if mt.IpAddress != nil { + d.Set(names.AttrIPAddressType, awstypes.IpAddressTypeIpv4Only) + } else if mt.Ipv6Address != nil { + d.Set(names.AttrIPAddressType, awstypes.IpAddressTypeIpv6Only) + } else { + d.Set(names.AttrIPAddressType, nil) + } + d.Set("ipv6_address", mt.Ipv6Address) d.Set("mount_target_dns_name", meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s.%s.efs", aws.ToString(mt.AvailabilityZoneName), aws.ToString(mt.FileSystemId)))) d.Set("mount_target_id", mt.MountTargetId) d.Set(names.AttrNetworkInterfaceID, mt.NetworkInterfaceId) diff --git a/internal/service/efs/mount_target_data_source_test.go b/internal/service/efs/mount_target_data_source_test.go index 774c3f84e8b0..78f497822377 100644 --- a/internal/service/efs/mount_target_data_source_test.go +++ b/internal/service/efs/mount_target_data_source_test.go @@ -30,6 +30,8 @@ func TestAccEFSMountTargetDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "file_system_arn", resourceName, "file_system_arn"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrFileSystemID, resourceName, names.AttrFileSystemID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIPAddress, resourceName, names.AttrIPAddress), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIPAddressType, resourceName, names.AttrIPAddressType), + resource.TestCheckResourceAttrPair(dataSourceName, "ipv6_address", resourceName, "ipv6_address"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrSubnetID, resourceName, names.AttrSubnetID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrNetworkInterfaceID, resourceName, names.AttrNetworkInterfaceID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDNSName, resourceName, names.AttrDNSName), @@ -61,6 +63,8 @@ func TestAccEFSMountTargetDataSource_byAccessPointID(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "file_system_arn", resourceName, "file_system_arn"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrFileSystemID, resourceName, names.AttrFileSystemID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIPAddress, resourceName, names.AttrIPAddress), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIPAddressType, resourceName, names.AttrIPAddressType), + resource.TestCheckResourceAttrPair(dataSourceName, "ipv6_address", resourceName, "ipv6_address"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrSubnetID, resourceName, names.AttrSubnetID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrNetworkInterfaceID, resourceName, names.AttrNetworkInterfaceID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDNSName, resourceName, names.AttrDNSName), @@ -92,6 +96,8 @@ func TestAccEFSMountTargetDataSource_byFileSystemID(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "file_system_arn", resourceName, "file_system_arn"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrFileSystemID, resourceName, names.AttrFileSystemID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIPAddress, resourceName, names.AttrIPAddress), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIPAddressType, resourceName, names.AttrIPAddressType), + resource.TestCheckResourceAttrPair(dataSourceName, "ipv6_address", resourceName, "ipv6_address"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrSubnetID, resourceName, names.AttrSubnetID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrNetworkInterfaceID, resourceName, names.AttrNetworkInterfaceID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDNSName, resourceName, names.AttrDNSName), diff --git a/internal/service/efs/mount_target_test.go b/internal/service/efs/mount_target_test.go index 9aea3dbd8941..4e2e63adfaf3 100644 --- a/internal/service/efs/mount_target_test.go +++ b/internal/service/efs/mount_target_test.go @@ -42,6 +42,8 @@ func TestAccEFSMountTarget_basic(t *testing.T) { acctest.MatchResourceAttrRegionalHostname(resourceName, names.AttrDNSName, "efs", regexache.MustCompile(`fs-[^.]+`)), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "file_system_arn", "elasticfilesystem", regexache.MustCompile(`file-system/fs-.+`)), resource.TestMatchResourceAttr(resourceName, names.AttrIPAddress, regexache.MustCompile(`\d+\.\d+\.\d+\.\d+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, string(awstypes.IpAddressTypeIpv4Only)), + resource.TestCheckResourceAttr(resourceName, "ipv6_address", ""), resource.TestCheckResourceAttrSet(resourceName, "mount_target_dns_name"), resource.TestCheckResourceAttrSet(resourceName, names.AttrNetworkInterfaceID), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerID), @@ -117,6 +119,126 @@ func TestAccEFSMountTarget_ipAddress(t *testing.T) { }) } +func TestAccEFSMountTarget_ipAddressTypeIPv6Only(t *testing.T) { + ctx := acctest.Context(t) + var mount awstypes.MountTargetDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_efs_mount_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EFSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMountTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMountTargetConfig_ipAddressTypeIPv6Only(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMountTargetExists(ctx, resourceName, &mount), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddress, ""), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, string(awstypes.IpAddressTypeIpv6Only)), + resource.TestCheckResourceAttrSet(resourceName, "ipv6_address"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEFSMountTarget_ipAddressTypeIPv6OnlyWithIPv6Address(t *testing.T) { + ctx := acctest.Context(t) + var mount awstypes.MountTargetDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_efs_mount_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EFSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMountTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMountTargetConfig_ipAddressTypeIPv6OnlyWithIPv6Address(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMountTargetExists(ctx, resourceName, &mount), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddress, ""), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, string(awstypes.IpAddressTypeIpv6Only)), + resource.TestCheckResourceAttrSet(resourceName, "ipv6_address"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEFSMountTarget_ipAddressTypeDualStack(t *testing.T) { + ctx := acctest.Context(t) + var mount awstypes.MountTargetDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_efs_mount_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EFSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMountTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMountTargetConfig_ipAddressTypeDualStack(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMountTargetExists(ctx, resourceName, &mount), + resource.TestCheckResourceAttrSet(resourceName, names.AttrIPAddress), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, string(awstypes.IpAddressTypeDualStack)), + resource.TestCheckResourceAttrSet(resourceName, "ipv6_address"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEFSMountTarget_ipAddressTypeDualStackWithIPv6Address(t *testing.T) { + ctx := acctest.Context(t) + var mount awstypes.MountTargetDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_efs_mount_target.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EFSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMountTargetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMountTargetConfig_ipAddressTypeDualStackWithIPv6Address(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMountTargetExists(ctx, resourceName, &mount), + resource.TestCheckResourceAttrSet(resourceName, names.AttrIPAddress), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, string(awstypes.IpAddressTypeDualStack)), + resource.TestCheckResourceAttrSet(resourceName, "ipv6_address"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13845 func TestAccEFSMountTarget_IPAddress_emptyString(t *testing.T) { ctx := acctest.Context(t) @@ -202,6 +324,33 @@ resource "aws_efs_file_system" "test" { `, rName)) } +func testAccMountTargetConfig_withDualStackSubnet(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnetsIPv6(rName, 2), fmt.Sprintf(` +resource "aws_subnet" "test_ipv6_only" { + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 7) + + enable_resource_name_dns_aaaa_record_on_launch = true + + assign_ipv6_address_on_creation = true + + ipv6_native = true + + tags = { + Name = %[1]q + } +} + +resource "aws_efs_file_system" "test" { + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccMountTargetConfig_basic(rName string) string { return acctest.ConfigCompose(testAccMountTargetConfig_base(rName), ` resource "aws_efs_mount_target" "test" { @@ -244,3 +393,45 @@ resource "aws_efs_mount_target" "test" { } `) } + +func testAccMountTargetConfig_ipAddressTypeIPv6Only(rName string) string { + return acctest.ConfigCompose(testAccMountTargetConfig_withDualStackSubnet(rName), ` +resource "aws_efs_mount_target" "test" { + file_system_id = aws_efs_file_system.test.id + ip_address_type = "IPV6_ONLY" + subnet_id = aws_subnet.test_ipv6_only.id +} +`) +} + +func testAccMountTargetConfig_ipAddressTypeIPv6OnlyWithIPv6Address(rName string) string { + return acctest.ConfigCompose(testAccMountTargetConfig_withDualStackSubnet(rName), ` +resource "aws_efs_mount_target" "test" { + file_system_id = aws_efs_file_system.test.id + ip_address_type = "IPV6_ONLY" + ipv6_address = cidrhost(aws_subnet.test_ipv6_only.ipv6_cidr_block, 10) + subnet_id = aws_subnet.test_ipv6_only.id +} +`) +} + +func testAccMountTargetConfig_ipAddressTypeDualStack(rName string) string { + return acctest.ConfigCompose(testAccMountTargetConfig_withDualStackSubnet(rName), ` +resource "aws_efs_mount_target" "test" { + file_system_id = aws_efs_file_system.test.id + ip_address_type = "DUAL_STACK" + subnet_id = aws_subnet.test[0].id +} +`) +} + +func testAccMountTargetConfig_ipAddressTypeDualStackWithIPv6Address(rName string) string { + return acctest.ConfigCompose(testAccMountTargetConfig_withDualStackSubnet(rName), ` +resource "aws_efs_mount_target" "test" { + file_system_id = aws_efs_file_system.test.id + ip_address_type = "DUAL_STACK" + ipv6_address = cidrhost(aws_subnet.test[0].ipv6_cidr_block, 10) + subnet_id = aws_subnet.test[0].id +} +`) +} diff --git a/internal/service/efs/service_endpoint_resolver_gen.go b/internal/service/efs/service_endpoint_resolver_gen.go index b93f94c9ed2f..737f31c3a4ed 100644 --- a/internal/service/efs/service_endpoint_resolver_gen.go +++ b/internal/service/efs/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params efs.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up efs endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up efs endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/efs/service_endpoints_gen_test.go b/internal/service/efs/service_endpoints_gen_test.go index ecd652f495bb..596aae6d49c3 100644 --- a/internal/service/efs/service_endpoints_gen_test.go +++ b/internal/service/efs/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/efs/service_package_gen.go b/internal/service/efs/service_package_gen.go index 184e40a90307..7aedda912d75 100644 --- a/internal/service/efs/service_package_gen.go +++ b/internal/service/efs/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/efs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -127,7 +126,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *efs.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/efs/sweep.go b/internal/service/efs/sweep.go index f49d5989f2c8..1aafffe74887 100644 --- a/internal/service/efs/sweep.go +++ b/internal/service/efs/sweep.go @@ -40,7 +40,7 @@ func sweepAccessPoints(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EFSClient(ctx) input := &efs.DescribeFileSystemsInput{} @@ -96,7 +96,7 @@ func sweepFileSystems(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EFSClient(ctx) input := &efs.DescribeFileSystemsInput{} @@ -137,7 +137,7 @@ func sweepMountTargets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EFSClient(ctx) input := &efs.DescribeFileSystemsInput{} diff --git a/internal/service/efs/tags_gen.go b/internal/service/efs/tags_gen.go index 0267ffdec7de..e75d1a405e14 100644 --- a/internal/service/efs/tags_gen.go +++ b/internal/service/efs/tags_gen.go @@ -3,8 +3,8 @@ package efs import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/efs" awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *efs.Client, identifier string, optFns . page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EFSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *efs.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *efs.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/eks/access_entry.go b/internal/service/eks/access_entry.go index 3cc94d66be35..77ad348e7d23 100644 --- a/internal/service/eks/access_entry.go +++ b/internal/service/eks/access_entry.go @@ -118,7 +118,7 @@ func resourceAccessEntryCreate(ctx context.Context, d *schema.ResourceData, meta input.Username = aws.String(v.(string)) } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidParameterException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidParameterException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateAccessEntry(ctx, input) }, "The specified principalArn is invalid: invalid principal") diff --git a/internal/service/eks/access_entry_test.go b/internal/service/eks/access_entry_test.go index d5f013dd368d..db192c6cb463 100644 --- a/internal/service/eks/access_entry_test.go +++ b/internal/service/eks/access_entry_test.go @@ -349,6 +349,10 @@ func testAccAccessEntryConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "test" { name = %[1]q @@ -359,7 +363,7 @@ resource "aws_iam_role" "test" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": "sts:AssumeRole" } @@ -472,7 +476,7 @@ resource "aws_iam_role" "test2" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": "sts:AssumeRole" } @@ -502,7 +506,7 @@ resource "aws_iam_role" "test2" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": "sts:AssumeRole" } diff --git a/internal/service/eks/access_policy_association.go b/internal/service/eks/access_policy_association.go index 1be66b1cdca4..c103df304ae6 100644 --- a/internal/service/eks/access_policy_association.go +++ b/internal/service/eks/access_policy_association.go @@ -112,7 +112,7 @@ func resourceAccessPolicyAssociationCreate(ctx context.Context, d *schema.Resour PrincipalArn: aws.String(principalARN), } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.ResourceNotFoundException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.ResourceNotFoundException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.AssociateAccessPolicy(ctx, input) }, "The specified principalArn could not be found") diff --git a/internal/service/eks/access_policy_association_test.go b/internal/service/eks/access_policy_association_test.go index 265328b9d666..732986a8e261 100644 --- a/internal/service/eks/access_policy_association_test.go +++ b/internal/service/eks/access_policy_association_test.go @@ -172,6 +172,10 @@ func testAccAccessPolicyAssociationConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "test" { name = %[1]q @@ -182,7 +186,7 @@ resource "aws_iam_role" "test" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": "sts:AssumeRole" } diff --git a/internal/service/eks/addon.go b/internal/service/eks/addon.go index 92f8c8484856..f48bfde08529 100644 --- a/internal/service/eks/addon.go +++ b/internal/service/eks/addon.go @@ -168,7 +168,7 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateAddon(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/eks/addon_data_source_test.go b/internal/service/eks/addon_data_source_test.go index ade79863f9a6..d31528210f5e 100644 --- a/internal/service/eks/addon_data_source_test.go +++ b/internal/service/eks/addon_data_source_test.go @@ -97,11 +97,11 @@ data "aws_eks_addon" "test" { func testAccAddonDataSourceConfig_configurationValues(rName, addonName, addonVersion, configurationValues, resolveConflicts string) string { return acctest.ConfigCompose(testAccAddonConfig_base(rName), fmt.Sprintf(` resource "aws_eks_addon" "test" { - cluster_name = aws_eks_cluster.test.name - addon_name = %[2]q - addon_version = %[3]q - configuration_values = %[4]q - resolve_conflicts = %[5]q + cluster_name = aws_eks_cluster.test.name + addon_name = %[2]q + addon_version = %[3]q + configuration_values = %[4]q + resolve_conflicts_on_create = %[5]q } data "aws_eks_addon" "test" { diff --git a/internal/service/eks/addon_test.go b/internal/service/eks/addon_test.go index 3f9a34873afc..a32f838936b9 100644 --- a/internal/service/eks/addon_test.go +++ b/internal/service/eks/addon_test.go @@ -480,6 +480,10 @@ func testAccAddonConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "test" { name = %[1]q @@ -490,7 +494,7 @@ resource "aws_iam_role" "test" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": "sts:AssumeRole" } diff --git a/internal/service/eks/addon_version_data_source_test.go b/internal/service/eks/addon_version_data_source_test.go index 37402e60fad2..38367abdde4a 100644 --- a/internal/service/eks/addon_version_data_source_test.go +++ b/internal/service/eks/addon_version_data_source_test.go @@ -54,11 +54,10 @@ data "aws_eks_addon_version" "test" { } resource "aws_eks_addon" "test" { - addon_name = %[2]q - cluster_name = aws_eks_cluster.test.name - addon_version = data.aws_eks_addon_version.test.version - - resolve_conflicts = "OVERWRITE" + addon_name = %[2]q + cluster_name = aws_eks_cluster.test.name + addon_version = data.aws_eks_addon_version.test.version + resolve_conflicts_on_create = "OVERWRITE" } data "aws_eks_addon" "test" { diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index 8a7d28be915f..fe559e43665a 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -54,34 +54,11 @@ func resourceCluster() *schema.Resource { CustomizeDiff: customdiff.Sequence( validateAutoModeCustomizeDiff, + validateAutoModeComputeConfigCustomizeDiff, customdiff.ForceNewIfChange("encryption_config", func(_ context.Context, old, new, meta any) bool { // You cannot disable envelope encryption after enabling it. This action is irreversible. return len(old.([]any)) == 1 && len(new.([]any)) == 0 }), - func(ctx context.Context, rd *schema.ResourceDiff, meta any) error { - if rd.Id() == "" { - return nil - } - oldValue, newValue := rd.GetChange("compute_config") - - oldComputeConfig := expandComputeConfigRequest(oldValue.([]any)) - newComputeConfig := expandComputeConfigRequest(newValue.([]any)) - - if newComputeConfig == nil || oldComputeConfig == nil { - return nil - } - - oldRoleARN := aws.ToString(oldComputeConfig.NodeRoleArn) - newRoleARN := aws.ToString(newComputeConfig.NodeRoleArn) - - // only force new if an existing role has changed, not if a new role is added - if oldRoleARN != "" && oldRoleARN != newRoleARN { - if err := rd.ForceNew("compute_config.0.node_role_arn"); err != nil { - return err - } - } - return nil - }, ), Timeouts: &schema.ResourceTimeout{ @@ -141,12 +118,14 @@ func resourceCluster() *schema.Resource { "compute_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrEnabled: { Type: schema.TypeBool, Optional: true, + Computed: true, }, "node_pools": { Type: schema.TypeSet, @@ -168,6 +147,11 @@ func resourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrDeletionProtection: { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, "enabled_cluster_log_types": { Type: schema.TypeSet, Optional: true, @@ -253,6 +237,7 @@ func resourceCluster() *schema.Resource { names.AttrEnabled: { Type: schema.TypeBool, Optional: true, + Computed: true, }, }, }, @@ -331,7 +316,6 @@ func resourceCluster() *schema.Resource { "remote_network_config": { Type: schema.TypeList, Optional: true, - ForceNew: true, MaxItems: 1, ConflictsWith: []string{"outpost_config"}, Elem: &schema.Resource{ @@ -346,7 +330,6 @@ func resourceCluster() *schema.Resource { "cidrs": { Type: schema.TypeSet, Optional: true, - ForceNew: true, MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, @@ -362,14 +345,12 @@ func resourceCluster() *schema.Resource { "remote_pod_networks": { Type: schema.TypeList, Optional: true, - Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cidrs": { Type: schema.TypeSet, Optional: true, - ForceNew: true, MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, @@ -398,6 +379,7 @@ func resourceCluster() *schema.Resource { "storage_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -410,6 +392,7 @@ func resourceCluster() *schema.Resource { names.AttrEnabled: { Type: schema.TypeBool, Optional: true, + Computed: true, }, }, }, @@ -507,30 +490,28 @@ func resourceCluster() *schema.Resource { func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) name := d.Get(names.AttrName).(string) - input := &eks.CreateClusterInput{ + input := eks.CreateClusterInput{ BootstrapSelfManagedAddons: aws.Bool(d.Get("bootstrap_self_managed_addons").(bool)), + ComputeConfig: expandComputeConfigRequest(d.Get("compute_config").([]any)), EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]any)), + KubernetesNetworkConfig: expandKubernetesNetworkConfigRequest(d.Get("kubernetes_network_config").([]any)), Logging: expandLogging(d.Get("enabled_cluster_log_types").(*schema.Set)), Name: aws.String(name), ResourcesVpcConfig: expandVpcConfigRequest(d.Get(names.AttrVPCConfig).([]any)), RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), + StorageConfig: expandStorageConfigRequest(d.Get("storage_config").([]any)), Tags: getTagsIn(ctx), } - if v, ok := d.GetOk("compute_config"); ok { - input.ComputeConfig = expandComputeConfigRequest(v.([]any)) - } - if v, ok := d.GetOk("access_config"); ok { input.AccessConfig = expandCreateAccessConfigRequest(v.([]any)) } - if v, ok := d.GetOk("kubernetes_network_config"); ok { - input.KubernetesNetworkConfig = expandKubernetesNetworkConfigRequest(v.([]any)) + if v, ok := d.GetOk(names.AttrDeletionProtection); ok { + input.DeletionProtection = aws.Bool(v.(bool)) } if v, ok := d.GetOk("outpost_config"); ok { @@ -538,11 +519,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any } if v, ok := d.GetOk("remote_network_config"); ok { - input.RemoteNetworkConfig = expandRemoteNetworkConfigRequest(v.([]any)) - } - - if v, ok := d.GetOk("storage_config"); ok { - input.StorageConfig = expandStorageConfigRequest(v.([]any)) + input.RemoteNetworkConfig = expandCreateRemoteNetworkConfigRequest(v.([]any)) } if v, ok := d.GetOk("upgrade_policy"); ok { @@ -557,9 +534,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.ZonalShiftConfig = expandZonalShiftConfig(v.([]any)) } - outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { - return conn.CreateCluster(ctx, input) + output, err := tfresource.RetryWhen(ctx, propagationTimeout, + func(ctx context.Context) (*eks.CreateClusterOutput, error) { + return conn.CreateCluster(ctx, &input) }, func(err error) (bool, error) { // InvalidParameterException: roleArn, arn:aws:iam::123456789012:role/XXX, does not exist @@ -594,7 +571,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any return sdkdiag.AppendErrorf(diags, "creating EKS Cluster (%s): %s", name, err) } - d.SetId(aws.ToString(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) + d.SetId(aws.ToString(output.Cluster.Name)) if _, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) create: %s", d.Id(), err) @@ -643,6 +620,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set(names.AttrCreatedAt, cluster.CreatedAt.Format(time.RFC3339)) + d.Set(names.AttrDeletionProtection, cluster.DeletionProtection) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return sdkdiag.AppendErrorf(diags, "setting enabled_cluster_log_types: %s", err) } @@ -687,12 +665,11 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) // Do any version update first. if d.HasChange(names.AttrVersion) { - input := &eks.UpdateClusterVersionInput{ + input := eks.UpdateClusterVersionInput{ Name: aws.String(d.Id()), Version: aws.String(d.Get(names.AttrVersion).(string)), } @@ -701,7 +678,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any input.Force = v.(bool) } - output, err := conn.UpdateClusterVersion(ctx, input) + output, err := conn.UpdateClusterVersion(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) version: %s", d.Id(), err) @@ -716,12 +693,12 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any if d.HasChange("access_config") { if v, ok := d.GetOk("access_config"); ok { - input := &eks.UpdateClusterConfigInput{ + input := eks.UpdateClusterConfigInput{ AccessConfig: expandUpdateAccessConfigRequest(v.([]any)), Name: aws.String(d.Id()), } - output, err := conn.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) access configuration: %s", d.Id(), err) @@ -737,41 +714,45 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any } } + // All three fields are required to enable/disable Auto Mode or else you receive the error: + // InvalidParameterException: For EKS Auto Mode, please ensure that all required configs, + // including computeConfig, kubernetesNetworkConfig, and blockStorage are all either fully enabled or fully disabled. + // In addition, when updating other Auto Mode arguments (i.e. - computeConfig.nodePools/nodeRoleARN), all 3 fields are required. if d.HasChanges("compute_config", "kubernetes_network_config", "storage_config") { - computeConfig := expandComputeConfigRequest(d.Get("compute_config").([]any)) - kubernetesNetworkConfig := expandKubernetesNetworkConfigRequest(d.Get("kubernetes_network_config").([]any)) - storageConfig := expandStorageConfigRequest(d.Get("storage_config").([]any)) - - input := &eks.UpdateClusterConfigInput{ + input := eks.UpdateClusterConfigInput{ + ComputeConfig: expandComputeConfigRequest(d.Get("compute_config").([]any)), + KubernetesNetworkConfig: expandKubernetesNetworkConfigRequest(d.Get("kubernetes_network_config").([]any)), Name: aws.String(d.Id()), - ComputeConfig: computeConfig, - KubernetesNetworkConfig: kubernetesNetworkConfig, - StorageConfig: storageConfig, + StorageConfig: expandStorageConfigRequest(d.Get("storage_config").([]any)), } - output, err := conn.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) compute config: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) Auto Mode settings: %s", d.Id(), err) } updateID := aws.ToString(output.Update.Id) if _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) compute config update (%s): %s", d.Id(), updateID, err) + return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) Auto Mode settings update (%s): %s", d.Id(), updateID, err) } } - if d.HasChange("encryption_config") { - o, n := d.GetChange("encryption_config") + if d.HasChange(names.AttrDeletionProtection) { + if err := updateClusterDeletionProtection(ctx, conn, d.Id(), d.Get(names.AttrDeletionProtection).(bool), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } - if len(o.([]any)) == 0 && len(n.([]any)) == 1 { - input := &eks.AssociateEncryptionConfigInput{ + if d.HasChange("encryption_config") { + if o, n := d.GetChange("encryption_config"); len(o.([]any)) == 0 && len(n.([]any)) == 1 { + input := eks.AssociateEncryptionConfigInput{ ClusterName: aws.String(d.Id()), EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]any)), } - output, err := conn.AssociateEncryptionConfig(ctx, input) + output, err := conn.AssociateEncryptionConfig(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "associating EKS Cluster (%s) encryption config: %s", d.Id(), err) @@ -786,12 +767,12 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any } if d.HasChange("enabled_cluster_log_types") { - input := &eks.UpdateClusterConfigInput{ + input := eks.UpdateClusterConfigInput{ Logging: expandLogging(d.Get("enabled_cluster_log_types").(*schema.Set)), Name: aws.String(d.Id()), } - output, err := conn.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) logging: %s", d.Id(), err) @@ -804,13 +785,32 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any } } + if d.HasChanges("remote_network_config.0.remote_node_networks", "remote_network_config.0.remote_pod_networks") { + input := eks.UpdateClusterConfigInput{ + Name: aws.String(d.Id()), + RemoteNetworkConfig: expandUpdateRemoteNetworkConfigRequest(d.Get("remote_network_config").([]any)), + } + + output, err := conn.UpdateClusterConfig(ctx, &input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) remote network config: %s", d.Id(), err) + } + + updateID := aws.ToString(output.Update.Id) + + if _, err := waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) remote network config update (%s): %s", d.Id(), updateID, err) + } + } + if d.HasChange("upgrade_policy") { - input := &eks.UpdateClusterConfigInput{ + input := eks.UpdateClusterConfigInput{ Name: aws.String(d.Id()), UpgradePolicy: expandUpgradePolicy(d.Get("upgrade_policy").([]any)), } - output, err := conn.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) upgrade policy: %s", d.Id(), err) @@ -824,7 +824,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any } if d.HasChanges("vpc_config.0.endpoint_private_access", "vpc_config.0.endpoint_public_access", "vpc_config.0.public_access_cidrs") { - config := &types.VpcConfigRequest{ + config := types.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(d.Get("vpc_config.0.endpoint_private_access").(bool)), EndpointPublicAccess: aws.Bool(d.Get("vpc_config.0.endpoint_public_access").(bool)), } @@ -833,39 +833,39 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any config.PublicAccessCidrs = flex.ExpandStringValueSet(v.(*schema.Set)) } - if err := updateVPCConfig(ctx, conn, d.Id(), config, d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := updateClusterVPCConfig(ctx, conn, d.Id(), &config, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } // API only allows one type of update at at time. if d.HasChange("vpc_config.0.subnet_ids") { - config := &types.VpcConfigRequest{ + config := types.VpcConfigRequest{ SubnetIds: flex.ExpandStringValueSet(d.Get("vpc_config.0.subnet_ids").(*schema.Set)), } - if err := updateVPCConfig(ctx, conn, d.Id(), config, d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := updateClusterVPCConfig(ctx, conn, d.Id(), &config, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } if d.HasChange("vpc_config.0.security_group_ids") { - config := &types.VpcConfigRequest{ + config := types.VpcConfigRequest{ SecurityGroupIds: flex.ExpandStringValueSet(d.Get("vpc_config.0.security_group_ids").(*schema.Set)), } - if err := updateVPCConfig(ctx, conn, d.Id(), config, d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := updateClusterVPCConfig(ctx, conn, d.Id(), &config, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } if d.HasChange("zonal_shift_config") { - input := &eks.UpdateClusterConfigInput{ + input := eks.UpdateClusterConfigInput{ Name: aws.String(d.Id()), ZonalShiftConfig: expandZonalShiftConfig(d.Get("zonal_shift_config").([]any)), } - output, err := conn.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) zonal shift config: %s", d.Id(), err) @@ -883,39 +883,31 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) - input := &eks.DeleteClusterInput{ - Name: aws.String(d.Id()), - } - // If a cluster is scaling up due to load a delete request will fail // This is a temporary workaround until EKS supports multiple parallel mutating operations const ( timeout = 60 * time.Minute ) log.Printf("[DEBUG] Deleting EKS Cluster: %s", d.Id()) - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { - var err error - - _, err = conn.DeleteCluster(ctx, input) + input := eks.DeleteClusterInput{ + Name: aws.String(d.Id()), + } + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { + _, err := conn.DeleteCluster(ctx, &input) if errs.IsAErrorMessageContains[*types.ResourceInUseException](err, "in progress") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }, tfresource.WithDelayRand(1*time.Minute), tfresource.WithPollInterval(30*time.Second)) - if tfresource.TimedOut(err) { - _, err = conn.DeleteCluster(ctx, input) - } - if errs.IsA[*types.ResourceNotFoundException](err) { return diags } @@ -938,10 +930,14 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any } func findClusterByName(ctx context.Context, conn *eks.Client, name string) (*types.Cluster, error) { - input := &eks.DescribeClusterInput{ + input := eks.DescribeClusterInput{ Name: aws.String(name), } + return findCluster(ctx, conn, &input) +} + +func findCluster(ctx context.Context, conn *eks.Client, input *eks.DescribeClusterInput) (*types.Cluster, error) { output, err := conn.DescribeCluster(ctx, input) // Sometimes the EKS API returns the ResourceNotFound error in this form: @@ -964,33 +960,58 @@ func findClusterByName(ctx context.Context, conn *eks.Client, name string) (*typ return output.Cluster, nil } -func updateVPCConfig(ctx context.Context, conn *eks.Client, name string, vpcConfig *types.VpcConfigRequest, timeout time.Duration) error { - input := &eks.UpdateClusterConfigInput{ +func updateClusterDeletionProtection(ctx context.Context, conn *eks.Client, name string, deletionProtection bool, timeout time.Duration) error { + input := eks.UpdateClusterConfigInput{ + DeletionProtection: aws.Bool(deletionProtection), + Name: aws.String(name), + } + + output, err := conn.UpdateClusterConfig(ctx, &input) + + if err != nil { + return fmt.Errorf("updating EKS Cluster (%s) deletion protection (%t): %w", name, deletionProtection, err) + } + + updateID := aws.ToString(output.Update.Id) + + if _, err := waitClusterUpdateSuccessful(ctx, conn, name, updateID, timeout); err != nil { + return fmt.Errorf("waiting for EKS Cluster (%s) deletion protection update (%s): %w", name, updateID, err) + } + + return nil +} + +func updateClusterVPCConfig(ctx context.Context, conn *eks.Client, name string, vpcConfig *types.VpcConfigRequest, timeout time.Duration) error { + input := eks.UpdateClusterConfigInput{ Name: aws.String(name), ResourcesVpcConfig: vpcConfig, } - output, err := conn.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { - return fmt.Errorf("updating EKS Cluster (%s) VPC configuration: %s", name, err) + return fmt.Errorf("updating EKS Cluster (%s) VPC configuration: %w", name, err) } updateID := aws.ToString(output.Update.Id) if _, err := waitClusterUpdateSuccessful(ctx, conn, name, updateID, timeout); err != nil { - return fmt.Errorf("waiting for EKS Cluster (%s) VPC configuration update (%s): %s", name, updateID, err) + return fmt.Errorf("waiting for EKS Cluster (%s) VPC configuration update (%s): %w", name, updateID, err) } return nil } -func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.Client, name, id string) (*types.Update, error) { - input := &eks.DescribeUpdateInput{ +func findUpdateByTwoPartKey(ctx context.Context, conn *eks.Client, name, id string) (*types.Update, error) { + input := eks.DescribeUpdateInput{ Name: aws.String(name), UpdateId: aws.String(id), } + return findUpdate(ctx, conn, &input) +} + +func findUpdate(ctx context.Context, conn *eks.Client, input *eks.DescribeUpdateInput) (*types.Update, error) { output, err := conn.DescribeUpdate(ctx, input) if errs.IsA[*types.ResourceNotFoundException](err) { @@ -1027,9 +1048,9 @@ func statusCluster(ctx context.Context, conn *eks.Client, name string) retry.Sta } } -func statusClusterUpdate(ctx context.Context, conn *eks.Client, name, id string) retry.StateRefreshFunc { +func statusUpdate(ctx context.Context, conn *eks.Client, name, id string) retry.StateRefreshFunc { return func() (any, string, error) { - output, err := findClusterUpdateByTwoPartKey(ctx, conn, name, id) + output, err := findUpdateByTwoPartKey(ctx, conn, name, id) if tfresource.NotFound(err) { return nil, "", nil @@ -1085,7 +1106,7 @@ func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.Client, name, id stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.UpdateStatusInProgress), Target: enum.Slice(types.UpdateStatusSuccessful), - Refresh: statusClusterUpdate(ctx, conn, name, id), + Refresh: statusUpdate(ctx, conn, name, id), Timeout: timeout, } @@ -1145,8 +1166,15 @@ func expandUpdateAccessConfigRequest(tfList []any) *types.UpdateAccessConfigRequ } func expandComputeConfigRequest(tfList []any) *types.ComputeConfigRequest { + apiObject := &types.ComputeConfigRequest{} + if len(tfList) == 0 { - return nil + // Ensure this is always present to avoid the error: + // InvalidParameterException: The type for cluster update was not provided. + // when the field is removed (nil). + apiObject.Enabled = aws.Bool(false) + + return apiObject } tfMap, ok := tfList[0].(map[string]any) @@ -1154,8 +1182,6 @@ func expandComputeConfigRequest(tfList []any) *types.ComputeConfigRequest { return nil } - apiObject := &types.ComputeConfigRequest{} - if v, ok := tfMap[names.AttrEnabled].(bool); ok { apiObject.Enabled = aws.Bool(v) } @@ -1218,8 +1244,17 @@ func expandProvider(tfList []any) *types.Provider { } func expandStorageConfigRequest(tfList []any) *types.StorageConfigRequest { + apiObject := &types.StorageConfigRequest{} + if len(tfList) == 0 { - return nil + // Ensure this is always present to avoid the error: + // InvalidParameterException: The type for cluster update was not provided. + // when the field is removed (nil). + apiObject.BlockStorage = &types.BlockStorage{ + Enabled: aws.Bool(false), + } + + return apiObject } tfMap, ok := tfList[0].(map[string]any) @@ -1227,8 +1262,6 @@ func expandStorageConfigRequest(tfList []any) *types.StorageConfigRequest { return nil } - apiObject := &types.StorageConfigRequest{} - if v, ok := tfMap["block_storage"].([]any); ok { apiObject.BlockStorage = expandBlockStorage(v) } @@ -1326,8 +1359,18 @@ func expandVpcConfigRequest(tfList []any) *types.VpcConfigRequest { // nosemgrep } func expandKubernetesNetworkConfigRequest(tfList []any) *types.KubernetesNetworkConfigRequest { + apiObject := &types.KubernetesNetworkConfigRequest{} + if len(tfList) == 0 { - return nil + // Required to avoid the error: + // InvalidParameterException: For EKS Auto Mode, please ensure that all required configs, + // including computeConfig, kubernetesNetworkConfig, and blockStorage are all either fully enabled or fully disabled. + // since the other two fields have been injected with `enabled: false` when the field is not present. + apiObject.ElasticLoadBalancing = &types.ElasticLoadBalancing{ + Enabled: aws.Bool(false), + } + + return apiObject } tfMap, ok := tfList[0].(map[string]any) @@ -1335,8 +1378,6 @@ func expandKubernetesNetworkConfigRequest(tfList []any) *types.KubernetesNetwork return nil } - apiObject := &types.KubernetesNetworkConfigRequest{} - if v, ok := tfMap["elastic_load_balancing"].([]any); ok { apiObject.ElasticLoadBalancing = expandKubernetesNetworkConfigElasticLoadBalancing(v) } @@ -1371,7 +1412,7 @@ func expandKubernetesNetworkConfigElasticLoadBalancing(tfList []any) *types.Elas return apiObject } -func expandRemoteNetworkConfigRequest(tfList []any) *types.RemoteNetworkConfigRequest { +func expandCreateRemoteNetworkConfigRequest(tfList []any) *types.RemoteNetworkConfigRequest { if len(tfList) == 0 { return nil } @@ -1385,6 +1426,30 @@ func expandRemoteNetworkConfigRequest(tfList []any) *types.RemoteNetworkConfigRe RemoteNodeNetworks: expandRemoteNodeNetworks(tfMap["remote_node_networks"].([]any)), } + if v, ok := tfMap["remote_pod_networks"].([]any); ok && len(v) > 0 { + apiObject.RemotePodNetworks = expandRemotePodNetworks(v) + } + + return apiObject +} + +func expandUpdateRemoteNetworkConfigRequest(tfList []any) *types.RemoteNetworkConfigRequest { + apiObject := &types.RemoteNetworkConfigRequest{ + RemoteNodeNetworks: []types.RemoteNodeNetwork{}, + RemotePodNetworks: []types.RemotePodNetwork{}, + } + + if len(tfList) == 0 { + return apiObject + } + + tfMap, ok := tfList[0].(map[string]any) + if !ok { + return apiObject + } + + apiObject.RemoteNodeNetworks = expandRemoteNodeNetworks(tfMap["remote_node_networks"].([]any)) + if v, ok := tfMap["remote_pod_networks"].([]any); ok { apiObject.RemotePodNetworks = expandRemotePodNetworks(v) } @@ -1393,12 +1458,12 @@ func expandRemoteNetworkConfigRequest(tfList []any) *types.RemoteNetworkConfigRe } func expandRemoteNodeNetworks(tfList []any) []types.RemoteNodeNetwork { + var apiObjects = []types.RemoteNodeNetwork{} + if len(tfList) == 0 { - return nil + return apiObjects } - var apiObjects []types.RemoteNodeNetwork - for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok { @@ -1416,12 +1481,12 @@ func expandRemoteNodeNetworks(tfList []any) []types.RemoteNodeNetwork { } func expandRemotePodNetworks(tfList []any) []types.RemotePodNetwork { + var apiObjects = []types.RemotePodNetwork{} + if len(tfList) == 0 { - return nil + return apiObjects } - var apiObjects []types.RemotePodNetwork - for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok { @@ -1494,16 +1559,16 @@ func expandZonalShiftConfig(tfList []any) *types.ZonalShiftConfigRequest { return ZonalShiftConfigRequest } -func flattenCertificate(certificate *types.Certificate) []map[string]any { - if certificate == nil { +func flattenCertificate(apiObject *types.Certificate) []map[string]any { + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - "data": aws.ToString(certificate.Data), + tfMap := map[string]any{ + "data": aws.ToString(apiObject.Data), } - return []map[string]any{m} + return []map[string]any{tfMap} } func flattenComputeConfigResponse(apiObject *types.ComputeConfigResponse) []map[string]any { @@ -1511,37 +1576,37 @@ func flattenComputeConfigResponse(apiObject *types.ComputeConfigResponse) []map[ return []map[string]any{} } - m := map[string]any{ + tfMap := map[string]any{ names.AttrEnabled: aws.ToBool(apiObject.Enabled), - "node_pools": flex.FlattenStringValueList(apiObject.NodePools), + "node_pools": apiObject.NodePools, "node_role_arn": aws.ToString(apiObject.NodeRoleArn), } - return []map[string]any{m} + return []map[string]any{tfMap} } -func flattenIdentity(identity *types.Identity) []map[string]any { - if identity == nil { +func flattenIdentity(apiObject *types.Identity) []map[string]any { + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - "oidc": flattenOIDC(identity.Oidc), + tfMap := map[string]any{ + "oidc": flattenOIDC(apiObject.Oidc), } - return []map[string]any{m} + return []map[string]any{tfMap} } -func flattenOIDC(oidc *types.OIDC) []map[string]any { - if oidc == nil { +func flattenOIDC(apiObject *types.OIDC) []map[string]any { + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - names.AttrIssuer: aws.ToString(oidc.Issuer), + tfMap := map[string]any{ + names.AttrIssuer: aws.ToString(apiObject.Issuer), } - return []map[string]any{m} + return []map[string]any{tfMap} } func flattenAccessConfigResponse(apiObject *types.AccessConfigResponse, bootstrapClusterCreatorAdminPermissions *bool) []any { @@ -1594,30 +1659,29 @@ func flattenProvider(apiObject *types.Provider) []any { return []any{tfMap} } -func flattenVPCConfigResponse(vpcConfig *types.VpcConfigResponse) []map[string]any { // nosemgrep:ci.caps5-in-func-name - if vpcConfig == nil { +func flattenVPCConfigResponse(apiObject *types.VpcConfigResponse) []map[string]any { // nosemgrep:ci.caps5-in-func-name + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - "cluster_security_group_id": aws.ToString(vpcConfig.ClusterSecurityGroupId), - "endpoint_private_access": vpcConfig.EndpointPrivateAccess, - "endpoint_public_access": vpcConfig.EndpointPublicAccess, - names.AttrSecurityGroupIDs: vpcConfig.SecurityGroupIds, - names.AttrSubnetIDs: vpcConfig.SubnetIds, - "public_access_cidrs": vpcConfig.PublicAccessCidrs, - names.AttrVPCID: aws.ToString(vpcConfig.VpcId), + tfMap := map[string]any{ + "cluster_security_group_id": aws.ToString(apiObject.ClusterSecurityGroupId), + "endpoint_private_access": apiObject.EndpointPrivateAccess, + "endpoint_public_access": apiObject.EndpointPublicAccess, + names.AttrSecurityGroupIDs: apiObject.SecurityGroupIds, + names.AttrSubnetIDs: apiObject.SubnetIds, + "public_access_cidrs": apiObject.PublicAccessCidrs, + names.AttrVPCID: aws.ToString(apiObject.VpcId), } - return []map[string]any{m} + return []map[string]any{tfMap} } -func flattenLogging(logging *types.Logging) []string { +func flattenLogging(apiObject *types.Logging) []string { enabledLogTypes := []types.LogType{} - if logging != nil { - logSetups := logging.ClusterLogging - for _, logSetup := range logSetups { + if apiObject != nil { + for _, logSetup := range apiObject.ClusterLogging { if !aws.ToBool(logSetup.Enabled) { continue } @@ -1644,13 +1708,13 @@ func flattenKubernetesNetworkConfigResponse(apiObject *types.KubernetesNetworkCo return []any{tfMap} } -func flattenKubernetesNetworkConfigElasticLoadBalancing(apiObjects *types.ElasticLoadBalancing) []any { - if apiObjects == nil { +func flattenKubernetesNetworkConfigElasticLoadBalancing(apiObject *types.ElasticLoadBalancing) []any { + if apiObject == nil { return nil } tfMap := map[string]any{ - names.AttrEnabled: aws.ToBool(apiObjects.Enabled), + names.AttrEnabled: aws.ToBool(apiObject.Enabled), } return []any{tfMap} @@ -1692,7 +1756,7 @@ func flattenRemoteNodeNetwork(apiObjects []types.RemoteNodeNetwork) []any { for _, apiObject := range apiObjects { tfMap := map[string]any{ - "cidrs": flex.FlattenStringValueList(apiObject.Cidrs), + "cidrs": apiObject.Cidrs, } tfList = append(tfList, tfMap) @@ -1710,7 +1774,7 @@ func flattenRemotePodNetwork(apiObjects []types.RemotePodNetwork) []any { for _, apiObject := range apiObjects { tfMap := map[string]any{ - "cidrs": flex.FlattenStringValueList(apiObject.Cidrs), + "cidrs": apiObject.Cidrs, } tfList = append(tfList, tfMap) @@ -1798,3 +1862,44 @@ func validateAutoModeCustomizeDiff(_ context.Context, d *schema.ResourceDiff, _ return nil } + +// Allow setting `compute_config.node_role_arn` to `null` when disabling auto mode or +// built-in node pools without forcing re-creation of the cluster +func validateAutoModeComputeConfigCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, _ any) error { + if diff.Id() == "" { + return nil + } + + oldValue, newValue := diff.GetChange("compute_config") + + oldComputeConfig := expandComputeConfigRequest(oldValue.([]any)) + newComputeConfig := expandComputeConfigRequest(newValue.([]any)) + + if newComputeConfig == nil || oldComputeConfig == nil { + return nil + } + + oldRoleARN := aws.ToString(oldComputeConfig.NodeRoleArn) + newRoleARN := aws.ToString(newComputeConfig.NodeRoleArn) + + newComputeConfigEnabled := aws.ToBool(newComputeConfig.Enabled) + + // Do not force new if auto mode is disabled in new config and role ARN is unset + if !newComputeConfigEnabled && newRoleARN == "" { + return nil + } + + // Do not force new if built-in node pools are zeroed in new config and role ARN is unset + if len(newComputeConfig.NodePools) == 0 && newRoleARN == "" { + return nil + } + + // only force new if an existing role has changed, not if a new role is added + if oldRoleARN != "" && oldRoleARN != newRoleARN { + if err := diff.ForceNew("compute_config.0.node_role_arn"); err != nil { + return err + } + } + + return nil +} diff --git a/internal/service/eks/cluster_auth_data_source_test.go b/internal/service/eks/cluster_auth_data_source_test.go index 4a60fa0bef34..b6ebc35f534f 100644 --- a/internal/service/eks/cluster_auth_data_source_test.go +++ b/internal/service/eks/cluster_auth_data_source_test.go @@ -51,7 +51,7 @@ func testAccCheckClusterAuthToken(n string) resource.TestCheckFunc { verifier := tfeks.NewVerifier(name) identity, err := verifier.Verify(tok) if err != nil { - return fmt.Errorf("Error verifying token for cluster %q: %v", name, err) + return fmt.Errorf("Error verifying token for cluster %q: %w", name, err) } if identity.ARN == "" { return fmt.Errorf("Unexpected blank ARN for token identity") diff --git a/internal/service/eks/cluster_data_source.go b/internal/service/eks/cluster_data_source.go index c5db44de1e73..d49aeb66e3c3 100644 --- a/internal/service/eks/cluster_data_source.go +++ b/internal/service/eks/cluster_data_source.go @@ -16,6 +16,7 @@ import ( ) // @SDKDataSource("aws_eks_cluster", name="Cluster") +// @Tags func dataSourceCluster() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClusterRead, @@ -84,6 +85,10 @@ func dataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrDeletionProtection: { + Type: schema.TypeBool, + Computed: true, + }, "enabled_cluster_log_types": { Type: schema.TypeSet, Computed: true, @@ -321,9 +326,7 @@ func dataSourceCluster() *schema.Resource { func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) name := d.Get(names.AttrName).(string) cluster, err := findClusterByName(ctx, conn, name) @@ -348,6 +351,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set(names.AttrCreatedAt, cluster.CreatedAt.Format(time.RFC3339)) + d.Set(names.AttrDeletionProtection, cluster.DeletionProtection) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return sdkdiag.AppendErrorf(diags, "setting enabled_cluster_log_types: %s", err) } @@ -382,9 +386,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any return sdkdiag.AppendErrorf(diags, "setting zonal_shift_config: %s", err) } - if err := d.Set(names.AttrTags, keyValueTags(ctx, cluster.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOut(ctx, cluster.Tags) return diags } diff --git a/internal/service/eks/cluster_data_source_test.go b/internal/service/eks/cluster_data_source_test.go index 66aec92b5ddb..0ae617b1b543 100644 --- a/internal/service/eks/cluster_data_source_test.go +++ b/internal/service/eks/cluster_data_source_test.go @@ -35,6 +35,7 @@ func TestAccEKSClusterDataSource_basic(t *testing.T) { resource.TestCheckNoResourceAttr(dataSourceResourceName, "cluster_id"), resource.TestCheckResourceAttr(resourceName, "compute_config.#", "0"), resource.TestCheckResourceAttrPair(resourceName, names.AttrCreatedAt, dataSourceResourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttrPair(resourceName, names.AttrDeletionProtection, dataSourceResourceName, names.AttrDeletionProtection), resource.TestCheckResourceAttr(dataSourceResourceName, "enabled_cluster_log_types.#", "2"), resource.TestCheckTypeSetElemAttr(dataSourceResourceName, "enabled_cluster_log_types.*", "api"), resource.TestCheckTypeSetElemAttr(dataSourceResourceName, "enabled_cluster_log_types.*", "audit"), diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index 13b2310e8bba..812578fff82f 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -16,8 +16,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" @@ -26,11 +29,17 @@ import ( ) const ( - clusterVersionUpgradeInitial = "1.27" - clusterVersionUpgradeUpdated = "1.28" + clusterVersion130 = "1.30" + clusterVersion131 = "1.31" + clusterVersion132 = "1.32" - clusterVersionUpgradeForceInitial = "1.30" - clusterVersionUpgradeForceUpdated = "1.31" + clusterDefaultVersion = clusterVersion132 + + clusterVersionUpgradeInitial = clusterVersion130 + clusterVersionUpgradeUpdated = clusterVersion131 + + clusterVersionUpgradeForceInitial = clusterVersion130 + clusterVersionUpgradeForceUpdated = clusterVersion131 ) func TestAccEKSCluster_basic(t *testing.T) { @@ -57,8 +66,9 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "certificate_authority.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "certificate_authority.0.data"), resource.TestCheckNoResourceAttr(resourceName, "cluster_id"), - resource.TestCheckResourceAttr(resourceName, "compute_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "enabled_cluster_log_types.#", "0"), resource.TestCheckResourceAttr(resourceName, "encryption_config.#", "0"), resource.TestMatchResourceAttr(resourceName, names.AttrEndpoint, regexache.MustCompile(`^https://`)), @@ -76,7 +86,7 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "platform_version", regexache.MustCompile(`^eks\.\d+$`)), resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.cluster", names.AttrARN), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(types.ClusterStatusActive)), - resource.TestCheckResourceAttr(resourceName, "storage_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), resource.TestMatchResourceAttr(resourceName, names.AttrVersion, regexache.MustCompile(`^\d+\.\d+$`)), resource.TestCheckResourceAttr(resourceName, "upgrade_policy.#", "1"), @@ -271,13 +281,13 @@ func TestAccEKSCluster_BootstrapSelfManagedAddons_migrate(t *testing.T) { ExternalProviders: map[string]resource.ExternalProvider{ "aws": { Source: "hashicorp/aws", - VersionConstraint: "5.56.1", + VersionConstraint: "6.9.0", }, }, Config: testAccClusterConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster1), - resource.TestCheckNoResourceAttr(resourceName, "bootstrap_self_managed_addons"), + resource.TestCheckResourceAttr(resourceName, "bootstrap_self_managed_addons", acctest.CtTrue), ), }, { @@ -370,7 +380,7 @@ func TestAccEKSCluster_ComputeConfig_OnCreate(t *testing.T) { func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" @@ -383,15 +393,43 @@ func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { { Config: testAccClusterConfig_computeConfig_onUpdateSetup(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &cluster1), - resource.TestCheckResourceAttr(resourceName, "compute_config.#", "0"), + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "0"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.#", "1"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.0.enabled", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv4_cidr", "172.20.0.0/16"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv6_cidr", ""), - resource.TestCheckResourceAttr(resourceName, "storage_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.0.enabled", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_computeConfig_nodePoolsSetup(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv4_cidr", "172.20.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv6_cidr", ""), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.0.enabled", acctest.CtFalse), ), }, { @@ -402,9 +440,13 @@ func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { }, { Config: testAccClusterConfig_computeConfig(rName, true, "aws_iam_role.node.arn"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &cluster2), - testAccCheckClusterNotRecreated(&cluster1, &cluster2), + testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "1"), @@ -427,6 +469,35 @@ func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, + { + Config: testAccClusterConfig_computeConfig_nodePoolsSetup(rName, false), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv4_cidr", "172.20.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv6_cidr", ""), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.0.enabled", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, }, }) } @@ -1274,7 +1345,7 @@ func TestAccEKSCluster_Outpost_placement(t *testing.T) { }) } -func TestAccEKSCluster_RemoteNetwork_Node(t *testing.T) { +func TestAccEKSCluster_RemoteNetwork_Node_OnCreate(t *testing.T) { ctx := acctest.Context(t) var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1308,7 +1379,69 @@ func TestAccEKSCluster_RemoteNetwork_Node(t *testing.T) { }) } -func TestAccEKSCluster_RemoteNetwork_Pod(t *testing.T) { +func TestAccEKSCluster_RemoteNetwork_Node_OnUpdate(t *testing.T) { + ctx := acctest.Context(t) + var cluster1, cluster2, cluster3 types.Cluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_eks_cluster.test" + remoteNodeCIDR := "10.90.0.0/22" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_accessConfig(rName, types.AuthenticationModeApi), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_remoteNodeNetwork(rName, remoteNodeCIDR), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster2), + testAccCheckClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.0.cidrs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.0.cidrs.0", remoteNodeCIDR), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_pod_networks.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_accessConfig(rName, types.AuthenticationModeApi), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster3), + testAccCheckClusterNotRecreated(&cluster2, &cluster3), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + }, + }) +} + +func TestAccEKSCluster_RemoteNetwork_Pod_OnCreate(t *testing.T) { ctx := acctest.Context(t) var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1345,6 +1478,75 @@ func TestAccEKSCluster_RemoteNetwork_Pod(t *testing.T) { }) } +func TestAccEKSCluster_RemoteNetwork_Pod_OnUpdate(t *testing.T) { + ctx := acctest.Context(t) + var cluster1, cluster2, cluster3 types.Cluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_eks_cluster.test" + remoteNodeCIDR := "10.90.0.0/22" + remotePodCIDR := "10.80.0.0/22" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_accessConfig(rName, types.AuthenticationModeApi), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_remotePodNetwork(rName, remoteNodeCIDR, remotePodCIDR), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster2), + testAccCheckClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.0.cidrs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.0.cidrs.0", remoteNodeCIDR), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_pod_networks.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_pod_networks.0.cidrs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_pod_networks.0.cidrs.0", remotePodCIDR), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_remoteNodeNetwork(rName, remoteNodeCIDR), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster3), + testAccCheckClusterNotRecreated(&cluster2, &cluster3), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.0.cidrs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_node_networks.0.cidrs.0", remoteNodeCIDR), + resource.TestCheckResourceAttr(resourceName, "remote_network_config.0.remote_pod_networks.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + }, + }) +} + func TestAccEKSCluster_upgradePolicy(t *testing.T) { ctx := acctest.Context(t) var cluster types.Cluster @@ -1358,11 +1560,11 @@ func TestAccEKSCluster_upgradePolicy(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_upgradePolicy(rName, "STANDARD"), + Config: testAccClusterConfig_upgradePolicy(rName, "EXTENDED"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "upgrade_policy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "upgrade_policy.0.support_type", "STANDARD"), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy.0.support_type", "EXTENDED"), ), }, { @@ -1372,11 +1574,11 @@ func TestAccEKSCluster_upgradePolicy(t *testing.T) { ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { - Config: testAccClusterConfig_upgradePolicy(rName, "EXTENDED"), + Config: testAccClusterConfig_upgradePolicy(rName, "STANDARD"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "upgrade_policy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "upgrade_policy.0.support_type", "EXTENDED"), + resource.TestCheckResourceAttr(resourceName, "upgrade_policy.0.support_type", "STANDARD"), ), }, { @@ -1427,6 +1629,56 @@ func TestAccEKSCluster_zonalShiftConfig(t *testing.T) { }) } +func TestAccEKSCluster_deletionProtection(t *testing.T) { + ctx := acctest.Context(t) + var cluster types.Cluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_eks_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_deletionProtection(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDeletionProtection), knownvalue.Bool(true)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_deletionProtection(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDeletionProtection), knownvalue.Bool(false)), + }, + }, + }, + }) +} + func testAccCheckClusterExists(ctx context.Context, n string, v *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -1504,6 +1756,10 @@ func testAccClusterConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "cluster" { name = %[1]q @@ -1514,7 +1770,7 @@ resource "aws_iam_role" "cluster" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": "sts:AssumeRole" } @@ -1612,6 +1868,10 @@ func testAccClusterConfig_computeConfigBase(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "cluster" { name = %[1]q @@ -1622,7 +1882,7 @@ resource "aws_iam_role" "cluster" { { "Effect": "Allow", "Principal": { - "Service": "eks.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.eks.name}" }, "Action": [ "sts:AssumeRole", @@ -1659,6 +1919,10 @@ resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSNetworkingPolicy" { role = aws_iam_role.cluster.name } +data "aws_service_principal" "ec2" { + service_name = "ec2" +} + resource "aws_iam_role" "node" { name = "%[1]s-node" @@ -1669,7 +1933,7 @@ resource "aws_iam_role" "node" { { "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.ec2.name}" }, "Action": ["sts:AssumeRole"] } @@ -1698,7 +1962,7 @@ resource "aws_iam_role" "node2" { { "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "${data.aws_service_principal.ec2.name}" }, "Action": ["sts:AssumeRole"] } @@ -2366,3 +2630,20 @@ resource "aws_eks_cluster" "test" { } `, rName, enabled)) } + +func testAccClusterConfig_deletionProtection(rName string, deletionProtection bool) string { + return acctest.ConfigCompose(testAccClusterConfig_base(rName), fmt.Sprintf(` +resource "aws_eks_cluster" "test" { + name = %[1]q + role_arn = aws_iam_role.cluster.arn + + vpc_config { + subnet_ids = aws_subnet.test[*].id + } + + deletion_protection = %[2]t + + depends_on = [aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy] +} +`, rName, deletionProtection)) +} diff --git a/internal/service/eks/fargate_profile.go b/internal/service/eks/fargate_profile.go index 5f66b54d27f9..0c63533d186f 100644 --- a/internal/service/eks/fargate_profile.go +++ b/internal/service/eks/fargate_profile.go @@ -131,7 +131,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m // Retry for IAM eventual consistency on error: // InvalidParameterException: Misconfigured PodExecutionRole Trust Policy; Please add the eks-fargate-pods.amazonaws.com Service Principal - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidParameterException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidParameterException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateFargateProfile(ctx, input) }, "Misconfigured PodExecutionRole Trust Policy") diff --git a/internal/service/eks/fargate_profile_test.go b/internal/service/eks/fargate_profile_test.go index 3f9eb41aead9..046b93b1e62b 100644 --- a/internal/service/eks/fargate_profile_test.go +++ b/internal/service/eks/fargate_profile_test.go @@ -273,6 +273,10 @@ data "aws_availability_zones" "available" { data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "cluster" { name = "%[1]s-cluster" @@ -281,7 +285,7 @@ resource "aws_iam_role" "cluster" { Action = "sts:AssumeRole" Effect = "Allow" Principal = { - Service = "eks.${data.aws_partition.current.dns_suffix}" + Service = data.aws_service_principal.eks.name } }] Version = "2012-10-17" @@ -293,6 +297,10 @@ resource "aws_iam_role_policy_attachment" "cluster-AmazonEKSClusterPolicy" { role = aws_iam_role.cluster.name } +data "aws_service_principal" "eks_fargate_pods" { + service_name = "eks-fargate-pods" +} + resource "aws_iam_role" "pod" { name = "%[1]s-pod" @@ -301,7 +309,7 @@ resource "aws_iam_role" "pod" { Action = "sts:AssumeRole" Effect = "Allow" Principal = { - Service = "eks-fargate-pods.${data.aws_partition.current.dns_suffix}" + Service = data.aws_service_principal.eks_fargate_pods.name } }] Version = "2012-10-17" diff --git a/internal/service/eks/identity_provider_config_test.go b/internal/service/eks/identity_provider_config_test.go index 2571c7368fb2..58fefa62a910 100644 --- a/internal/service/eks/identity_provider_config_test.go +++ b/internal/service/eks/identity_provider_config_test.go @@ -233,6 +233,10 @@ func testAccIdentityProviderBaseConfig(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + resource "aws_iam_role" "test" { name = %[1]q @@ -241,7 +245,7 @@ resource "aws_iam_role" "test" { Action = "sts:AssumeRole" Effect = "Allow" Principal = { - Service = "eks.${data.aws_partition.current.dns_suffix}" + Service = data.aws_service_principal.eks.name } }] Version = "2012-10-17" diff --git a/internal/service/eks/node_group_test.go b/internal/service/eks/node_group_test.go index 74cb82e2580d..a16bd5d91040 100644 --- a/internal/service/eks/node_group_test.go +++ b/internal/service/eks/node_group_test.go @@ -549,7 +549,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_releaseVersion(rName, "1.27"), + Config: testAccNodeGroupConfig_releaseVersion(rName, clusterVersion130), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), resource.TestCheckResourceAttrPair(resourceName, "release_version", ssmParameterDataSourceName, names.AttrValue), @@ -561,7 +561,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccNodeGroupConfig_releaseVersion(rName, "1.28"), + Config: testAccNodeGroupConfig_releaseVersion(rName, clusterVersion131), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup2), testAccCheckNodeGroupNotRecreated(&nodeGroup1, &nodeGroup2), @@ -1092,6 +1092,14 @@ func testAccNodeGroupConfig_iamAndVPCBase(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_partition" "current" {} +data "aws_service_principal" "eks" { + service_name = "eks" +} + +data "aws_service_principal" "eks_nodegroup" { + service_name = "eks-nodegroup" +} + resource "aws_iam_role" "cluster" { name = "%[1]s-cluster" @@ -1101,8 +1109,8 @@ resource "aws_iam_role" "cluster" { Effect = "Allow" Principal = { Service = [ - "eks.${data.aws_partition.current.dns_suffix}", - "eks-nodegroup.${data.aws_partition.current.dns_suffix}", + data.aws_service_principal.eks.name, + data.aws_service_principal.eks_nodegroup.name, ] } }] @@ -1115,6 +1123,10 @@ resource "aws_iam_role_policy_attachment" "cluster-AmazonEKSClusterPolicy" { role = aws_iam_role.cluster.name } +data "aws_service_principal" "ec2" { + service_name = "ec2" +} + resource "aws_iam_role" "node" { name = "%[1]s-node" @@ -1123,7 +1135,7 @@ resource "aws_iam_role" "node" { Action = "sts:AssumeRole" Effect = "Allow" Principal = { - Service = "ec2.${data.aws_partition.current.dns_suffix}" + Service = data.aws_service_principal.ec2.name } }] Version = "2012-10-17" @@ -1145,6 +1157,11 @@ resource "aws_iam_role_policy_attachment" "node-AmazonEC2ContainerRegistryReadOn role = aws_iam_role.node.name } +resource "aws_iam_role_policy_attachment" "node-AmazonEKSWorkerNodeMinimalPolicy" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy" + role = aws_iam_role.node.name +} + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" enable_dns_hostnames = true @@ -1228,9 +1245,11 @@ func testAccNodeGroupConfig_base(rName string) string { resource "aws_eks_cluster" "test" { name = %[1]q role_arn = aws_iam_role.cluster.arn + version = %[2]q vpc_config { - subnet_ids = aws_subnet.test[*].id + subnet_ids = aws_subnet.test[*].id + security_group_ids = [aws_security_group.test.id] } depends_on = [ @@ -1238,7 +1257,7 @@ resource "aws_eks_cluster" "test" { aws_main_route_table_association.test, ] } -`, rName)) +`, rName, clusterDefaultVersion)) } func testAccNodeGroupConfig_versionBase(rName string, version string) string { @@ -1280,6 +1299,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1302,6 +1322,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `) @@ -1325,6 +1346,7 @@ resource "aws_eks_node_group" "test" { "aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy", "aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy", "aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly", + "aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy", ] } `, namePrefix)) @@ -1349,6 +1371,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, amiType)) @@ -1373,6 +1396,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, capacityType)) @@ -1397,6 +1421,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, diskSize)) @@ -1422,6 +1447,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1450,6 +1476,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, instanceTypes, rName)) @@ -1485,6 +1512,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1512,6 +1540,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, labelKey1, labelValue1)) @@ -1540,6 +1569,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, labelKey1, labelValue1, labelKey2, labelValue2)) @@ -1558,6 +1588,8 @@ resource "aws_launch_template" "test1" { instance_type = "t3.medium" name = "%[1]s-1" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_launch_template" "test2" { @@ -1565,6 +1597,8 @@ resource "aws_launch_template" "test2" { instance_type = "t3.medium" name = "%[1]s-2" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_eks_node_group" "test" { @@ -1588,6 +1622,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1606,6 +1641,8 @@ resource "aws_launch_template" "test1" { instance_type = "t3.medium" name = "%[1]s-1" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_launch_template" "test2" { @@ -1613,6 +1650,8 @@ resource "aws_launch_template" "test2" { instance_type = "t3.medium" name = "%[1]s-2" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_eks_node_group" "test" { @@ -1636,6 +1675,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1654,6 +1694,8 @@ resource "aws_launch_template" "test1" { instance_type = "t3.medium" name = "%[1]s-1" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_launch_template" "test2" { @@ -1661,6 +1703,8 @@ resource "aws_launch_template" "test2" { instance_type = "t3.medium" name = "%[1]s-2" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_eks_node_group" "test" { @@ -1684,6 +1728,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1702,6 +1747,8 @@ resource "aws_launch_template" "test1" { instance_type = "t3.medium" name = "%[1]s-1" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_launch_template" "test2" { @@ -1709,6 +1756,8 @@ resource "aws_launch_template" "test2" { instance_type = "t3.medium" name = "%[1]s-2" user_data = base64encode(templatefile("testdata/node-group-launch-template-user-data.sh.tmpl", { cluster_name = aws_eks_cluster.test.name })) + + vpc_security_group_ids = [aws_security_group.test.id] } resource "aws_eks_node_group" "test" { @@ -1732,6 +1781,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1774,6 +1824,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1816,6 +1867,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1845,6 +1897,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -1877,6 +1930,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, publicKey)) @@ -1910,6 +1964,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, publicKey)) @@ -1933,6 +1988,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, desiredSize, maxSize, minSize)) @@ -1960,6 +2016,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, tagKey1, tagValue1)) @@ -1988,6 +2045,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) @@ -2017,6 +2075,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, taintKey1, taintValue1, taintEffect1)) @@ -2052,6 +2111,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName, taintKey1, taintValue1, taintEffect1, taintKey2, taintValue2, taintEffect2)) @@ -2079,6 +2139,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -2106,6 +2167,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -2133,6 +2195,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) @@ -2157,6 +2220,7 @@ resource "aws_eks_node_group" "test" { aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodePolicy, aws_iam_role_policy_attachment.node-AmazonEKS_CNI_Policy, aws_iam_role_policy_attachment.node-AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node-AmazonEKSWorkerNodeMinimalPolicy, ] } `, rName)) diff --git a/internal/service/eks/pod_identity_association.go b/internal/service/eks/pod_identity_association.go index 01c693ffe0d8..275d55e4b66c 100644 --- a/internal/service/eks/pod_identity_association.go +++ b/internal/service/eks/pod_identity_association.go @@ -133,7 +133,7 @@ func (r *podIdentityAssociationResource) Create(ctx context.Context, request res input.ClientRequestToken = aws.String(sdkid.UniqueId()) input.Tags = getTagsIn(ctx) - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidParameterException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidParameterException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreatePodIdentityAssociation(ctx, &input) }, "Role provided in the request does not exist") @@ -213,7 +213,7 @@ func (r *podIdentityAssociationResource) Update(ctx context.Context, request res // Set values for unknowns. input.ClientRequestToken = aws.String(sdkid.UniqueId()) - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidParameterException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidParameterException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.UpdatePodIdentityAssociation(ctx, &input) }, "Role provided in the request does not exist") diff --git a/internal/service/eks/service_endpoint_resolver_gen.go b/internal/service/eks/service_endpoint_resolver_gen.go index d215dccbe68a..372cebcfaa0f 100644 --- a/internal/service/eks/service_endpoint_resolver_gen.go +++ b/internal/service/eks/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params eks.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up eks endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up eks endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/eks/service_endpoints_gen_test.go b/internal/service/eks/service_endpoints_gen_test.go index 5d220a622583..4d125b13c040 100644 --- a/internal/service/eks/service_endpoints_gen_test.go +++ b/internal/service/eks/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index d6cfeed8635c..be71f63e85ba 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -78,6 +77,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.Service Factory: dataSourceCluster, TypeName: "aws_eks_cluster", Name: "Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{}), Region: unique.Make(inttypes.ResourceRegionDefault()), }, { @@ -195,7 +195,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *eks.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/eks/sweep.go b/internal/service/eks/sweep.go index f3730517dc37..a5da5dcc3e6d 100644 --- a/internal/service/eks/sweep.go +++ b/internal/service/eks/sweep.go @@ -6,10 +6,11 @@ package eks import ( "fmt" "log" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -55,7 +56,7 @@ func sweepAddons(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} @@ -91,7 +92,7 @@ func sweepAddons(region string) error { // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { break } @@ -124,7 +125,7 @@ func sweepClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} @@ -144,6 +145,21 @@ func sweepClusters(region string) error { } for _, v := range page.Clusters { + const ( + timeout = 15 * time.Minute + ) + err := updateClusterDeletionProtection(ctx, conn, v, false, timeout) + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + continue + } + + if err != nil { + log.Printf("[WARN] Setting EKS Cluster %s DeletionProtection=false: %s", v, err) + } + r := resourceCluster() d := r.Data(nil) d.SetId(v) @@ -165,7 +181,7 @@ func sweepFargateProfiles(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} @@ -201,7 +217,7 @@ func sweepFargateProfiles(region string) error { // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { break } @@ -234,7 +250,7 @@ func sweepIdentityProvidersConfig(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} @@ -270,7 +286,7 @@ func sweepIdentityProvidersConfig(region string) error { // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { break } @@ -303,7 +319,7 @@ func sweepNodeGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} @@ -339,7 +355,7 @@ func sweepNodeGroups(region string) error { // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { break } diff --git a/internal/service/eks/tags_gen.go b/internal/service/eks/tags_gen.go index 879aecd7f975..a6cfae3447df 100644 --- a/internal/service/eks/tags_gen.go +++ b/internal/service/eks/tags_gen.go @@ -3,8 +3,8 @@ package eks import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *eks.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/elasticache/cluster.go b/internal/service/elasticache/cluster.go index 44dc99f7b3ed..dfcc1227337a 100644 --- a/internal/service/elasticache/cluster.go +++ b/internal/service/elasticache/cluster.go @@ -20,7 +20,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - sdkretry "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -770,28 +769,24 @@ func deleteCacheCluster(ctx context.Context, conn *elasticache.Client, cacheClus input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) } - // TODO: Migrate to retry.Operation log.Printf("[DEBUG] Deleting ElastiCache Cache Cluster: %s", cacheClusterID) - err := sdkretry.RetryContext(ctx, 5*time.Minute, func() *sdkretry.RetryError { + err := tfresource.Retry(ctx, 5*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.DeleteCacheCluster(ctx, input) if err != nil { if errs.IsAErrorMessageContains[*awstypes.InvalidCacheClusterStateFault](err, "serving as primary") { - return sdkretry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidCacheClusterStateFault](err, "only member of a replication group") { - return sdkretry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } // The cluster may be just snapshotting, so we retry until it's ready for deletion if errs.IsA[*awstypes.InvalidCacheClusterStateFault](err) { - return sdkretry.RetryableError(err) + return tfresource.RetryableError(err) } - return sdkretry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteCacheCluster(ctx, input) - } return err } diff --git a/internal/service/elasticache/engine_version.go b/internal/service/elasticache/engine_version.go index 412413e6ccd0..4ce8e8cdb720 100644 --- a/internal/service/elasticache/engine_version.go +++ b/internal/service/elasticache/engine_version.go @@ -13,6 +13,7 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" gversion "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -109,6 +110,22 @@ func customizeDiffEngineVersionForceNewOnDowngrade(_ context.Context, diff *sche return engineVersionForceNewOnDowngrade(diff) } +func customizeDiffEngineForceNewOnDowngrade() schema.CustomizeDiffFunc { + return customdiff.ForceNewIf(names.AttrEngine, func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { + if _, is_global := diff.GetOk("global_replication_group_id"); is_global { + return false + } + + if !diff.HasChange(names.AttrEngine) { + return false + } + if old, new := diff.GetChange(names.AttrEngine); old.(string) == engineRedis && new.(string) == engineValkey { + return false + } + return true + }) +} + type getChangeDiffer interface { Get(key string) any GetChange(key string) (any, any) @@ -151,12 +168,17 @@ func engineVersionIsDowngrade(diff getChangeDiffer) (bool, error) { type forceNewDiffer interface { Id() string Get(key string) any + GetOk(key string) (any, bool) GetChange(key string) (any, any) HasChange(key string) bool ForceNew(key string) error } func engineVersionForceNewOnDowngrade(diff forceNewDiffer) error { + if _, is_global := diff.GetOk("global_replication_group_id"); is_global { + return nil + } + if diff.Id() == "" || !diff.HasChange(names.AttrEngineVersion) { return nil } diff --git a/internal/service/elasticache/engine_version_test.go b/internal/service/elasticache/engine_version_test.go index b9101f353c09..cd866181df73 100644 --- a/internal/service/elasticache/engine_version_test.go +++ b/internal/service/elasticache/engine_version_test.go @@ -513,6 +513,10 @@ func (d *mockForceNewDiffer) Get(key string) any { return d.old } +func (d *mockForceNewDiffer) GetOk(key string) (any, bool) { + return "", false +} + func (d *mockForceNewDiffer) HasChange(key string) bool { return d.hasChange || d.old != d.new } @@ -815,7 +819,7 @@ func (d *mockChangesDiffer) GetChange(key string) (any, any) { return d.values[key].GetChange() } -func TestParamGroupNameRequiresMajorVersionUpgrade(t *testing.T) { +func TestParamGroupNameRequiresEngineOrMajorVersionUpgrade(t *testing.T) { t.Parallel() testcases := map[string]struct { @@ -914,7 +918,7 @@ func TestParamGroupNameRequiresMajorVersionUpgrade(t *testing.T) { diff.id = "some id" } - err := tfelasticache.ParamGroupNameRequiresMajorVersionUpgrade(diff) + err := tfelasticache.ParamGroupNameRequiresEngineOrMajorVersionUpgrade(diff) if testcase.expectError == nil { if err != nil { diff --git a/internal/service/elasticache/exports_test.go b/internal/service/elasticache/exports_test.go index 0a2778624848..e366fa7c900a 100644 --- a/internal/service/elasticache/exports_test.go +++ b/internal/service/elasticache/exports_test.go @@ -31,21 +31,21 @@ var ( WaitCacheClusterDeleted = waitCacheClusterDeleted WaitReplicationGroupAvailable = waitReplicationGroupAvailable - DeleteCacheCluster = deleteCacheCluster - DiffVersion = diffVersion - EmptyDescription = emptyDescription - EngineMemcached = engineMemcached - EngineRedis = engineRedis - EngineValkey = engineValkey - EngineVersionForceNewOnDowngrade = engineVersionForceNewOnDowngrade - EngineVersionIsDowngrade = engineVersionIsDowngrade - GlobalReplicationGroupRegionPrefixFormat = globalReplicationGroupRegionPrefixFormat - NormalizeEngineVersion = normalizeEngineVersion - ParamGroupNameRequiresMajorVersionUpgrade = paramGroupNameRequiresMajorVersionUpgrade - ValidateClusterEngineVersion = validateClusterEngineVersion - ValidMemcachedVersionString = validMemcachedVersionString - ValidRedisVersionString = validRedisVersionString - ValidValkeyVersionString = validValkeyVersionString + DeleteCacheCluster = deleteCacheCluster + DiffVersion = diffVersion + EmptyDescription = emptyDescription + EngineMemcached = engineMemcached + EngineRedis = engineRedis + EngineValkey = engineValkey + EngineVersionForceNewOnDowngrade = engineVersionForceNewOnDowngrade + EngineVersionIsDowngrade = engineVersionIsDowngrade + GlobalReplicationGroupRegionPrefixFormat = globalReplicationGroupRegionPrefixFormat + NormalizeEngineVersion = normalizeEngineVersion + ParamGroupNameRequiresEngineOrMajorVersionUpgrade = paramGroupNameRequiresEngineOrMajorVersionUpgrade + ValidateClusterEngineVersion = validateClusterEngineVersion + ValidMemcachedVersionString = validMemcachedVersionString + ValidRedisVersionString = validRedisVersionString + ValidValkeyVersionString = validValkeyVersionString ) type ( diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index b2e805f88134..9e46067fdb5a 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -90,8 +90,10 @@ func resourceGlobalReplicationGroup() *schema.Resource { Computed: true, }, names.AttrEngine: { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{engineRedis, engineValkey}, true), }, names.AttrEngineVersion: { Type: schema.TypeString, @@ -210,7 +212,8 @@ func resourceGlobalReplicationGroup() *schema.Resource { CustomizeDiff: customdiff.All( customizeDiffGlobalReplicationGroupEngineVersionErrorOnDowngrade, - customizeDiffGlobalReplicationGroupParamGroupNameRequiresMajorVersionUpgrade, + customizeDiffEngineForceNewOnDowngrade(), + customizeDiffGlobalReplicationGroupParamGroupNameRequiresEngineOrMajorVersionUpgrade, customdiff.ComputedIf("global_node_groups", diffHasChange("num_node_groups")), ), } @@ -233,18 +236,23 @@ of the Global Replication Group and all Replication Group members. The AWS provi Please use the "-replace" option on the terraform plan and apply commands (see https://www.terraform.io/cli/commands/plan#replace-address).`, diff.Id()) } -func customizeDiffGlobalReplicationGroupParamGroupNameRequiresMajorVersionUpgrade(_ context.Context, diff *schema.ResourceDiff, _ any) error { - return paramGroupNameRequiresMajorVersionUpgrade(diff) +func customizeDiffGlobalReplicationGroupParamGroupNameRequiresEngineOrMajorVersionUpgrade(_ context.Context, diff *schema.ResourceDiff, _ any) error { + return paramGroupNameRequiresEngineOrMajorVersionUpgrade(diff) } // parameter_group_name can only be set when doing a major update, // but we also should allow it to stay set afterwards -func paramGroupNameRequiresMajorVersionUpgrade(diff sdkv2.ResourceDiffer) error { +func paramGroupNameRequiresEngineOrMajorVersionUpgrade(diff sdkv2.ResourceDiffer) error { o, n := diff.GetChange(names.AttrParameterGroupName) if o.(string) == n.(string) { return nil } + // param group must be able to change on Redis 7.1 to Valkey 7.2 upgrade + if diff.HasChange(names.AttrEngine) { + return nil + } + if diff.Id() == "" { if !diff.HasChange(names.AttrEngineVersion) { return errors.New("cannot change parameter group name without upgrading major engine version") @@ -262,7 +270,7 @@ func paramGroupNameRequiresMajorVersionUpgrade(diff sdkv2.ResourceDiffer) error if vDiff[0] == 0 && vDiff[1] == 0 { return errors.New("cannot change parameter group name without upgrading major engine version") } - if vDiff[0] != 1 { + if vDiff[0] == 0 { return fmt.Errorf("cannot change parameter group name on minor engine version upgrade, upgrading from %s to %s", oldVersion.String(), newVersion.String()) } } @@ -318,9 +326,25 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc } } - if v, ok := d.GetOk(names.AttrEngineVersion); ok { + if e, ok := d.GetOk(names.AttrEngine); ok { + if e.(string) == aws.ToString(globalReplicationGroup.Engine) { + log.Printf("[DEBUG] Not updating ElastiCache Global Replication Group (%s) engine: no change from %q", d.Id(), e) + } else { + version := d.Get(names.AttrEngineVersion).(string) + p := d.Get(names.AttrParameterGroupName).(string) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(e.(string), version, p), names.AttrEngine, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + } else if v, ok := d.GetOk(names.AttrEngineVersion); ok { requestedVersion, _ := normalizeEngineVersion(v.(string)) + // backwards-compatibility; imply redis engine if just given engine version + engine, ok := d.GetOk(names.AttrEngine) + if !ok { + engine = engineRedis + } + engineVersion, err := gversion.NewVersion(aws.ToString(globalReplicationGroup.EngineVersion)) if err != nil { return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) engine version on creation: error reading engine version: %s", d.Id(), err) @@ -335,7 +359,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc p := d.Get(names.AttrParameterGroupName).(string) if diff[0] == 1 { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(v.(string), p), "engine version (major)", d.Timeout(schema.TimeoutCreate)); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(engine.(string), v.(string), p), "engine version (major)", d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } else if diff[1] == 1 { @@ -343,7 +367,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc return sdkdiag.AppendErrorf(diags, "cannot change parameter group name on minor engine version upgrade, upgrading from %s to %s", engineVersion.String(), requestedVersion.String()) } if t, _ := regexp.MatchString(`[6-9]\.x`, v.(string)); !t { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(v.(string)), "engine version (minor)", d.Timeout(schema.TimeoutCreate)); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(engine.(string), v.(string)), "engine version (minor)", d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -440,20 +464,32 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc } } - if d.HasChange(names.AttrEngineVersion) { + if d.HasChange(names.AttrEngine) { + engine := d.Get(names.AttrEngine).(string) + version := d.Get(names.AttrEngineVersion).(string) + p := d.Get(names.AttrParameterGroupName).(string) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(engine, version, p), "engine version (major)", d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } else if d.HasChange(names.AttrEngineVersion) { o, n := d.GetChange(names.AttrEngineVersion) newVersion, _ := normalizeEngineVersion(n.(string)) oldVersion, _ := gversion.NewVersion(o.(string)) + // backwards-compatibility; imply redis engine if just given engine version + engine, ok := d.GetOk(names.AttrEngine) + if !ok { + engine = engineRedis + } diff := diffVersion(newVersion, oldVersion) if diff[0] == 1 { p := d.Get(names.AttrParameterGroupName).(string) - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(n.(string), p), "engine version (major)", d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(engine.(string), n.(string), p), "engine version (major)", d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } else if diff[1] == 1 { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(n.(string)), "engine version (minor)", d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(engine.(string), n.(string)), "engine version (minor)", d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -509,14 +545,16 @@ func globalReplicationGroupDescriptionUpdater(description string) globalReplicat } } -func globalReplicationGroupEngineVersionMinorUpdater(version string) globalReplicationGroupUpdater { +func globalReplicationGroupEngineVersionMinorUpdater(engine, version string) globalReplicationGroupUpdater { return func(input *elasticache.ModifyGlobalReplicationGroupInput) { + input.Engine = aws.String(engine) input.EngineVersion = aws.String(version) } } -func globalReplicationGroupEngineVersionMajorUpdater(version, paramGroupName string) globalReplicationGroupUpdater { +func globalReplicationGroupEngineVersionMajorUpdater(engine, version, paramGroupName string) globalReplicationGroupUpdater { return func(input *elasticache.ModifyGlobalReplicationGroupInput) { + input.Engine = aws.String(engine) input.EngineVersion = aws.String(version) input.CacheParameterGroupName = aws.String(paramGroupName) } @@ -610,7 +648,7 @@ func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.Client, RetainPrimaryReplicationGroup: aws.Bool(true), } - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidGlobalReplicationGroupStateFault](ctx, readyTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidGlobalReplicationGroupStateFault](ctx, readyTimeout, func(ctx context.Context) (any, error) { return conn.DeleteGlobalReplicationGroup(ctx, input) }) diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index 5702d8ee3443..99034dd6df32 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -1229,7 +1229,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MinorUpgr CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "6.0"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "6.0"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestCheckResourceAttrPair(resourceName, "engine_version_actual", primaryReplicationGroupResourceName, "engine_version_actual"), @@ -1266,7 +1266,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MinorUpgr CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "6.0"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "6.0"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestCheckResourceAttrPair(resourceName, "engine_version_actual", primaryReplicationGroupResourceName, "engine_version_actual"), @@ -1312,7 +1312,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MinorDown CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "6.2"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "6.2"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestCheckResourceAttrPair(resourceName, "engine_version_actual", primaryReplicationGroupResourceName, "engine_version_actual"), @@ -1359,7 +1359,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MajorUpgr CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "5.0.6"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "5.0.6"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestCheckResourceAttrPair(resourceName, "engine_version_actual", primaryReplicationGroupResourceName, "engine_version_actual"), @@ -1401,7 +1401,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MajorUpgr CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "5.0.6"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "5.0.6"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestCheckResourceAttrPair(resourceName, "engine_version_actual", primaryReplicationGroupResourceName, "engine_version_actual"), @@ -1444,7 +1444,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetParameterGroupOnUpdate_NoVersio CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "6.2"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "6.2"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestMatchResourceAttr(resourceName, "engine_version_actual", regexache.MustCompile(`^6\.2\.[[:digit:]]+$`)), @@ -1478,7 +1478,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetParameterGroupOnUpdate_MinorUpg CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, "6.0"), + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "6.0"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), resource.TestMatchResourceAttr(resourceName, "engine_version_actual", regexache.MustCompile(`^6\.0\.[[:digit:]]+$`)), @@ -1527,6 +1527,179 @@ func TestAccElastiCacheGlobalReplicationGroup_UpdateParameterGroupName(t *testin }) } +func TestAccElastiCacheGlobalReplicationGroup_SetEngineOnCreate_ValkeyUpgrade(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup awstypes.GlobalReplicationGroup + + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + primaryReplicationGroupId := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckGlobalReplicationGroup(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), + Steps: []resource.TestStep{ + { + // create global datastore with valkey 8.0 engine version from a redis 7.1 primary replication group + Config: testAccGlobalReplicationGroupConfig_engineParam(rName, primaryReplicationGroupId, "redis", "7.1", "valkey", "8.0", "default.valkey8"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), + resource.TestMatchResourceAttr(resourceName, "engine_version_actual", regexache.MustCompile(`^8\.0\.[[:digit:]]+$`)), + ), + }, + { + // check import of global datastore + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrParameterGroupName}, + }, + { + // refresh primary replication group after being upgraded by the global datastore + RefreshState: true, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngine, "valkey"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngineVersion, "8.0"), + resource.TestMatchResourceAttr(primaryReplicationGroupResourceName, names.AttrParameterGroupName, regexache.MustCompile(`^global-datastore-.+$`)), + resource.TestCheckResourceAttrPair(primaryReplicationGroupResourceName, "global_replication_group_id", resourceName, "global_replication_group_id"), + ), + }, + }, + }) +} + +func TestAccElastiCacheGlobalReplicationGroup_SetEngineOnUpdate_ValkeyUpgrade(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup awstypes.GlobalReplicationGroup + + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + primaryReplicationGroupId := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckGlobalReplicationGroup(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), + Steps: []resource.TestStep{ + { + // create global datastore using redis 7.1 primary replication group engine version + Config: testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, "7.1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), + resource.TestMatchResourceAttr(resourceName, "engine_version_actual", regexache.MustCompile(`^7\.1\.[[:digit:]]+$`)), + ), + }, + { + // check import of global datastore + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrParameterGroupName}, + }, + { + // refresh primary replication group after being associated to global datastore + RefreshState: true, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngine, "redis"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngineVersion, "7.1"), + resource.TestMatchResourceAttr(primaryReplicationGroupResourceName, names.AttrParameterGroupName, regexache.MustCompile(`^global-datastore-.+$`)), + resource.TestCheckResourceAttrPair(primaryReplicationGroupResourceName, "global_replication_group_id", resourceName, "global_replication_group_id"), + ), + }, + { + // upgrade engine and version on global datastore + Config: testAccGlobalReplicationGroupConfig_engineParam(rName, primaryReplicationGroupId, "redis", "7.1", "valkey", "7.2", "default.valkey7"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), + resource.TestMatchResourceAttr(resourceName, "engine_version_actual", regexache.MustCompile(`^7\.2\.[[:digit:]]+$`)), + ), + }, + { + // check import of global datastore + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrParameterGroupName}, + }, + { + // refresh primary replication group after being upgraded by the global datastore + RefreshState: true, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngine, "valkey"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngineVersion, "7.2"), + resource.TestMatchResourceAttr(primaryReplicationGroupResourceName, names.AttrParameterGroupName, regexache.MustCompile(`^global-datastore-.+$`)), + resource.TestCheckResourceAttrPair(primaryReplicationGroupResourceName, "global_replication_group_id", resourceName, "global_replication_group_id"), + ), + }, + }, + }) +} + +func TestAccElastiCacheGlobalReplicationGroup_InheritValkeyEngine_SecondaryReplicationGroup(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup awstypes.GlobalReplicationGroup + + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + primaryReplicationGroupId := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + secondaryReplicationGroupId := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + secondaryReplicationGroupResourceName := "aws_elasticache_replication_group.secondary" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckMultipleRegion(t, 2) + testAccPreCheckGlobalReplicationGroup(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesMultipleRegions(ctx, t, 2), + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy(ctx, t), + Steps: []resource.TestStep{ + { + // create global datastore using Valkey 8.0 primary replication group and add secondary replication group + Config: testAccGlobalReplicationGroupConfig_Valkey_inheritEngine_secondaryReplicationGroup(rName, primaryReplicationGroupId, secondaryReplicationGroupId), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(ctx, t, resourceName, &globalReplicationGroup), + resource.TestMatchResourceAttr(resourceName, "engine_version_actual", regexache.MustCompile(`^8\.0\.[[:digit:]]+$`)), + ), + }, + { + // refresh replication groups to pick up all engine and version computed changes + RefreshState: true, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngine, "valkey"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, names.AttrEngineVersion, "8.0"), + resource.TestMatchResourceAttr(primaryReplicationGroupResourceName, names.AttrParameterGroupName, regexache.MustCompile(`^global-datastore-.+$`)), + resource.TestCheckResourceAttrPair(primaryReplicationGroupResourceName, "global_replication_group_id", resourceName, "global_replication_group_id"), + + resource.TestCheckResourceAttr(secondaryReplicationGroupResourceName, names.AttrEngine, "valkey"), + resource.TestCheckResourceAttr(secondaryReplicationGroupResourceName, names.AttrEngineVersion, "8.0"), + resource.TestMatchResourceAttr(secondaryReplicationGroupResourceName, names.AttrParameterGroupName, regexache.MustCompile(`^global-datastore-.+$`)), + resource.TestCheckResourceAttrPair(secondaryReplicationGroupResourceName, "global_replication_group_id", resourceName, "global_replication_group_id"), + ), + }, + }, + }) +} + func testAccCheckGlobalReplicationGroupExists(ctx context.Context, t *testing.T, resourceName string, v *awstypes.GlobalReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -2069,7 +2242,7 @@ resource "aws_elasticache_replication_group" "test" { `, rName, numNodeGroups, globalNumNodeGroups) } -func testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, repGroupEngineVersion string) string { +func testAccGlobalReplicationGroupConfig_Redis_engineVersionInherit(rName, primaryReplicationGroupId, repGroupEngineVersion string) string { return fmt.Sprintf(` resource "aws_elasticache_global_replication_group" "test" { global_replication_group_id_suffix = %[1]q @@ -2105,10 +2278,6 @@ resource "aws_elasticache_replication_group" "test" { engine_version = %[3]q node_type = "cache.m5.large" num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } `, rName, primaryReplicationGroupId, repGroupEngineVersion, globalEngineVersion) } @@ -2130,10 +2299,6 @@ resource "aws_elasticache_replication_group" "test" { engine_version = %[3]q node_type = "cache.m5.large" num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } `, rName, primaryReplicationGroupId, repGroupEngineVersion, globalEngineVersion) } @@ -2156,14 +2321,58 @@ resource "aws_elasticache_replication_group" "test" { engine_version = %[3]q node_type = "cache.m5.large" num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } `, rName, primaryReplicationGroupId, repGroupEngineVersion, globalEngineVersion, parameterGroup) } +func testAccGlobalReplicationGroupConfig_engineParam(rName, primaryReplicationGroupId, repGroupEngine, repGroupEngineVersion, globalEngine, globalEngineVersion, globalParamGroup string) string { + return fmt.Sprintf(` +resource "aws_elasticache_global_replication_group" "test" { + global_replication_group_id_suffix = %[1]q + primary_replication_group_id = aws_elasticache_replication_group.test.id + + engine = %[5]q + engine_version = %[6]q + parameter_group_name = %[7]q +} + +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[2]q + description = "test" + engine = %[3]q + engine_version = %[4]q + node_type = "cache.m5.large" + num_cache_clusters = 1 +} +`, rName, primaryReplicationGroupId, repGroupEngine, repGroupEngineVersion, globalEngine, globalEngineVersion, globalParamGroup) +} + +func testAccGlobalReplicationGroupConfig_Valkey_inheritEngine_secondaryReplicationGroup(rName, primaryReplicationGroupId, secondaryReplicationGroupId string) string { + return acctest.ConfigCompose(acctest.ConfigMultipleRegionProvider(2), fmt.Sprintf(` +resource "aws_elasticache_global_replication_group" "test" { + global_replication_group_id_suffix = %[1]q + primary_replication_group_id = aws_elasticache_replication_group.test.id +} + +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[2]q + description = "test" + engine = "valkey" + engine_version = "8.0" + node_type = "cache.m5.large" + num_cache_clusters = 1 +} + +resource "aws_elasticache_replication_group" "secondary" { + provider = awsalternate + + replication_group_id = %[3]q + description = "test secondary" + global_replication_group_id = aws_elasticache_global_replication_group.test.id +} +`, rName, primaryReplicationGroupId, secondaryReplicationGroupId)) +} + func testAccGlobalReplicationGroupConfig_engineVersionCustomParam(rName, primaryReplicationGroupId, repGroupEngineVersion, globalEngineVersion, parameterGroupName, parameterGroupFamily string) string { return fmt.Sprintf(` resource "aws_elasticache_global_replication_group" "test" { @@ -2182,10 +2391,6 @@ resource "aws_elasticache_replication_group" "test" { engine_version = %[3]q node_type = "cache.m5.large" num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } resource "aws_elasticache_parameter_group" "test" { @@ -2213,10 +2418,6 @@ resource "aws_elasticache_replication_group" "test" { engine_version = %[3]q node_type = "cache.m5.large" num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } `, rName, primaryReplicationGroupId, repGroupEngineVersion, parameterGroup) } diff --git a/internal/service/elasticache/parameter_group.go b/internal/service/elasticache/parameter_group.go index 6111d7314859..4e6a87148dca 100644 --- a/internal/service/elasticache/parameter_group.go +++ b/internal/service/elasticache/parameter_group.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/elasticache" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - sdkretry "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -293,7 +292,7 @@ func deleteParameterGroup(ctx context.Context, conn *elasticache.Client, name st const ( timeout = 3 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidCacheParameterGroupStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidCacheParameterGroupStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteCacheParameterGroup(ctx, &elasticache.DeleteCacheParameterGroupInput{ CacheParameterGroupName: aws.String(name), }) @@ -304,7 +303,7 @@ func deleteParameterGroup(ctx context.Context, conn *elasticache.Client, name st } if err != nil { - return fmt.Errorf("deleting ElastiCache Parameter Group (%s): %s", name, err) + return fmt.Errorf("deleting ElastiCache Parameter Group (%s): %w", name, err) } return err @@ -361,14 +360,13 @@ func resourceResetParameterGroup(ctx context.Context, conn *elasticache.Client, ParameterNameValues: tfslices.Values(parameters), } - // TODO: Migrate to retry.Operation - return sdkretry.RetryContext(ctx, 30*time.Second, func() *sdkretry.RetryError { + return tfresource.Retry(ctx, 30*time.Second, func(ctx context.Context) *tfresource.RetryError { _, err := conn.ResetCacheParameterGroup(ctx, &input) if err != nil { if errs.IsAErrorMessageContains[*awstypes.InvalidCacheParameterGroupStateFault](err, " has pending changes") { - return sdkretry.RetryableError(err) + return tfresource.RetryableError(err) } - return sdkretry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) diff --git a/internal/service/elasticache/replication_group.go b/internal/service/elasticache/replication_group.go index ae9ab41ca949..cf3df01e5a42 100644 --- a/internal/service/elasticache/replication_group.go +++ b/internal/service/elasticache/replication_group.go @@ -126,7 +126,7 @@ func resourceReplicationGroup() *schema.Resource { names.AttrEngine: { Type: schema.TypeString, Optional: true, - Default: engineRedis, + Computed: true, ValidateDiagFunc: validation.AllDiag( validation.ToDiagFunc(validation.StringInSlice([]string{engineRedis, engineValkey}, true)), // While the existing validator makes it technically possible to provide an @@ -136,6 +136,7 @@ func resourceReplicationGroup() *schema.Resource { // practitioners that stricter validation will be enforced in v7.0.0. verify.CaseInsensitiveMatchDeprecation([]string{engineRedis, engineValkey}), ), + DiffSuppressFunc: suppressDiffIfBelongsToGlobalReplicationGroup, }, names.AttrEngineVersion: { Type: schema.TypeString, @@ -145,6 +146,7 @@ func resourceReplicationGroup() *schema.Resource { validRedisVersionString, validValkeyVersionString, ), + DiffSuppressFunc: suppressDiffIfBelongsToGlobalReplicationGroup, }, "engine_version_actual": { Type: schema.TypeString, @@ -266,7 +268,7 @@ func resourceReplicationGroup() *schema.Resource { Optional: true, Computed: true, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return strings.HasPrefix(old, "global-datastore-") + return suppressDiffIfBelongsToGlobalReplicationGroup(k, old, new, d) }, }, names.AttrPort: { @@ -415,15 +417,7 @@ func resourceReplicationGroup() *schema.Resource { CustomizeDiff: customdiff.All( replicationGroupValidateMultiAZAutomaticFailover, customizeDiffEngineVersionForceNewOnDowngrade, - customdiff.ForceNewIf(names.AttrEngine, func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { - if !diff.HasChange(names.AttrEngine) { - return false - } - if old, new := diff.GetChange(names.AttrEngine); old.(string) == engineRedis && new.(string) == engineValkey { - return false - } - return true - }), + customizeDiffEngineForceNewOnDowngrade(), customdiff.ComputedIf("member_clusters", func(ctx context.Context, diff *schema.ResourceDiff, meta any) bool { return diff.HasChange("num_cache_clusters") || diff.HasChange("num_node_groups") || @@ -492,8 +486,14 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } input.AutomaticFailoverEnabled = aws.Bool(d.Get("automatic_failover_enabled").(bool)) input.CacheNodeType = aws.String(nodeType) - input.Engine = aws.String(d.Get(names.AttrEngine).(string)) input.TransitEncryptionEnabled = aws.Bool(d.Get("transit_encryption_enabled").(bool)) + + // backwards-compatibility; imply redis engine if empty and not part of global replication group + if e, ok := d.GetOk(names.AttrEngine); ok { + input.Engine = aws.String(e.(string)) + } else { + input.Engine = aws.String(engineRedis) + } } if v, ok := d.GetOk("ip_discovery"); ok { @@ -682,8 +682,6 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m d.Set("global_replication_group_id", rgp.GlobalReplicationGroupInfo.GlobalReplicationGroupId) } - d.Set(names.AttrEngine, rgp.Engine) - switch rgp.AutomaticFailover { case awstypes.AutomaticFailoverStatusDisabled, awstypes.AutomaticFailoverStatusDisabling: d.Set("automatic_failover_enabled", false) @@ -862,7 +860,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, requestUpdate = true } - if old, new := d.GetChange(names.AttrEngine); old.(string) == engineRedis && new.(string) == engineValkey { + if old, new := d.GetChange(names.AttrEngine); old.(string) != new.(string) && new.(string) == engineValkey { if !d.HasChange(names.AttrEngineVersion) { return sdkdiag.AppendErrorf(diags, "must explicitly set '%s' attribute for Replication Group (%s) when updating engine to 'valkey'", names.AttrEngineVersion, d.Id()) } @@ -872,6 +870,14 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, if d.HasChange(names.AttrEngineVersion) { input.EngineVersion = aws.String(d.Get(names.AttrEngineVersion).(string)) + if input.Engine == nil { + // backwards-compatibility; imply redis engine if just given engine version + if e, ok := d.GetOk(names.AttrEngine); ok { + input.Engine = aws.String(e.(string)) + } else { + input.Engine = aws.String(engineRedis) + } + } requestUpdate = true } @@ -1000,7 +1006,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, } if err != nil { - return fmt.Errorf("modifying ElastiCache Replication Group (%s): %s", d.Id(), err) + return fmt.Errorf("modifying ElastiCache Replication Group (%s): %w", d.Id(), err) } return nil }) @@ -1022,7 +1028,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, } if err != nil { - return fmt.Errorf("modifying ElastiCache Replication Group (%s) authentication: %s", d.Id(), err) + return fmt.Errorf("modifying ElastiCache Replication Group (%s) authentication: %w", d.Id(), err) } return nil }) @@ -1082,7 +1088,7 @@ func resourceReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, timeout = 10 * time.Minute // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete. ) log.Printf("[INFO] Deleting ElastiCache Replication Group: %s", d.Id()) - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidReplicationGroupStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidReplicationGroupStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteReplicationGroup(ctx, input) }) @@ -1114,7 +1120,7 @@ func disassociateReplicationGroup(ctx context.Context, conn *elasticache.Client, ReplicationGroupRegion: aws.String(region), } - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidGlobalReplicationGroupStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidGlobalReplicationGroupStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DisassociateGlobalReplicationGroup(ctx, input) }) @@ -1499,3 +1505,8 @@ func replicationGroupValidateAutomaticFailoverNumCacheClusters(_ context.Context } return errors.New(`"num_cache_clusters": must be at least 2 if automatic_failover_enabled is true`) } + +func suppressDiffIfBelongsToGlobalReplicationGroup(k, old, new string, d *schema.ResourceData) bool { + _, has_global_replication_group := d.GetOk("global_replication_group_id") + return has_global_replication_group && !d.IsNewResource() +} diff --git a/internal/service/elasticache/serverless_cache.go b/internal/service/elasticache/serverless_cache.go index 2660b645b23c..b44e3b90efef 100644 --- a/internal/service/elasticache/serverless_cache.go +++ b/internal/service/elasticache/serverless_cache.go @@ -200,9 +200,20 @@ func (r *serverlessCacheResource) Schema(ctx context.Context, request resource.S Attributes: map[string]schema.Attribute{ "maximum": schema.Int64Attribute{ Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, }, "minimum": schema.Int64Attribute{ Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(1, 5000), + }, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, }, names.AttrUnit: schema.StringAttribute{ CustomType: fwtypes.StringEnumType[awstypes.DataStorageUnit](), @@ -220,15 +231,23 @@ func (r *serverlessCacheResource) Schema(ctx context.Context, request resource.S Attributes: map[string]schema.Attribute{ "maximum": schema.Int64Attribute{ Optional: true, + Computed: true, Validators: []validator.Int64{ int64validator.Between(1000, 15000000), }, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, }, "minimum": schema.Int64Attribute{ Optional: true, + Computed: true, Validators: []validator.Int64{ int64validator.Between(1000, 15000000), }, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, }, }, }, @@ -405,7 +424,7 @@ func (r *serverlessCacheResource) Delete(ctx context.Context, request resource.D FinalSnapshotName: nil, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func(ctx context.Context) (any, error) { return conn.DeleteServerlessCache(ctx, input) }, errCodeDependencyViolation) diff --git a/internal/service/elasticache/service_endpoint_resolver_gen.go b/internal/service/elasticache/service_endpoint_resolver_gen.go index 0107c53b19f2..771c7affed1b 100644 --- a/internal/service/elasticache/service_endpoint_resolver_gen.go +++ b/internal/service/elasticache/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params elasticache.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up elasticache endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up elasticache endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/elasticache/service_endpoints_gen_test.go b/internal/service/elasticache/service_endpoints_gen_test.go index 423d00fa9f7b..e0d2e0ed9ea0 100644 --- a/internal/service/elasticache/service_endpoints_gen_test.go +++ b/internal/service/elasticache/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/elasticache/service_package_gen.go b/internal/service/elasticache/service_package_gen.go index 7dcd5509367f..ad3affdb2a1c 100644 --- a/internal/service/elasticache/service_package_gen.go +++ b/internal/service/elasticache/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -181,7 +180,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *elasticache.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/elasticache/subnet_group.go b/internal/service/elasticache/subnet_group.go index d9fe8d98dba3..e733ba52e0f4 100644 --- a/internal/service/elasticache/subnet_group.go +++ b/internal/service/elasticache/subnet_group.go @@ -178,7 +178,7 @@ func resourceSubnetGroupDelete(ctx context.Context, d *schema.ResourceData, meta conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) log.Printf("[DEBUG] Deleting ElastiCache Subnet Group: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func(ctx context.Context) (any, error) { return conn.DeleteCacheSubnetGroup(ctx, &elasticache.DeleteCacheSubnetGroupInput{ CacheSubnetGroupName: aws.String(d.Id()), }) diff --git a/internal/service/elasticache/sweep.go b/internal/service/elasticache/sweep.go index ef6f111aa6c5..7036c47f56aa 100644 --- a/internal/service/elasticache/sweep.go +++ b/internal/service/elasticache/sweep.go @@ -90,7 +90,7 @@ func sweepClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticache.DescribeCacheClustersInput{ ShowCacheClustersNotInReplicationGroups: aws.Bool(true), @@ -139,7 +139,7 @@ func sweepGlobalReplicationGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticache.DescribeGlobalReplicationGroupsInput{ ShowMemberInfo: aws.Bool(true), @@ -188,7 +188,7 @@ func sweepParameterGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticache.DescribeCacheParameterGroupsInput{} conn := client.ElastiCacheClient(ctx) @@ -236,7 +236,7 @@ func sweepReplicationGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticache.DescribeReplicationGroupsInput{} conn := client.ElastiCacheClient(ctx) @@ -280,7 +280,7 @@ func sweepServerlessCaches(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeServerlessCachesInput{} @@ -319,7 +319,7 @@ func sweepSubnetGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeCacheSubnetGroupsInput{} @@ -367,7 +367,7 @@ func sweepUsers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeUsersInput{} @@ -415,7 +415,7 @@ func sweepUserGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeUserGroupsInput{} diff --git a/internal/service/elasticache/tags_gen.go b/internal/service/elasticache/tags_gen.go index 41dd1e9aa0d7..b752dbc54380 100644 --- a/internal/service/elasticache/tags_gen.go +++ b/internal/service/elasticache/tags_gen.go @@ -3,9 +3,9 @@ package elasticache import ( "context" - "fmt" "time" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticache" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" @@ -26,15 +26,15 @@ func listTags(ctx context.Context, conn *elasticache.Client, identifier string, ResourceName: aws.String(identifier), } - output, err := tfresource.RetryGWhenIsAErrorMessageContains[*elasticache.ListTagsForResourceOutput, *awstypes.InvalidReplicationGroupStateFault](ctx, 15*time.Minute, - func() (*elasticache.ListTagsForResourceOutput, error) { + output, err := tfresource.RetryWhenIsAErrorMessageContains[*elasticache.ListTagsForResourceOutput, *awstypes.InvalidReplicationGroupStateFault](ctx, 15*time.Minute, + func(ctx context.Context) (*elasticache.ListTagsForResourceOutput, error) { return conn.ListTagsForResource(ctx, &input, optFns...) }, "not in available state", ) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ElastiCacheClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -130,15 +130,15 @@ func updateTags(ctx context.Context, conn *elasticache.Client, identifier string TagKeys: removedTags.Keys(), } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidReplicationGroupStateFault](ctx, 15*time.Minute, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidReplicationGroupStateFault](ctx, 15*time.Minute, + func(ctx context.Context) (any, error) { return conn.RemoveTagsFromResource(ctx, &input, optFns...) }, "not in available state", ) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -150,15 +150,15 @@ func updateTags(ctx context.Context, conn *elasticache.Client, identifier string Tags: svcTags(updatedTags), } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidReplicationGroupStateFault](ctx, 15*time.Minute, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidReplicationGroupStateFault](ctx, 15*time.Minute, + func(ctx context.Context) (any, error) { return conn.AddTagsToResource(ctx, &input, optFns...) }, "not in available state", ) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/elasticache/user_group.go b/internal/service/elasticache/user_group.go index c55bd6b8b993..a5413c1df313 100644 --- a/internal/service/elasticache/user_group.go +++ b/internal/service/elasticache/user_group.go @@ -173,7 +173,7 @@ func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta a _, err := conn.ModifyUserGroup(ctx, input) - if err != nil { + if err != nil && !errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "is not a member of user group") { return sdkdiag.AppendErrorf(diags, "updating ElastiCache User Group (%q): %s", d.Id(), err) } diff --git a/internal/service/elasticache/user_group_association.go b/internal/service/elasticache/user_group_association.go index bf322c7807e2..3c2244ce3431 100644 --- a/internal/service/elasticache/user_group_association.go +++ b/internal/service/elasticache/user_group_association.go @@ -72,7 +72,7 @@ func resourceUserGroupAssociationCreate(ctx context.Context, d *schema.ResourceD UserIdsToAdd: []string{userID}, } - if _, err := tfresource.RetryWhenIsA[*awstypes.InvalidUserGroupStateFault](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + if _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidUserGroupStateFault](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.ModifyUserGroup(ctx, input) }); err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache User Group Association (%s): %s", id, err) @@ -126,7 +126,7 @@ func resourceUserGroupAssociationDelete(ctx context.Context, d *schema.ResourceD userGroupID, userID := parts[0], parts[1] log.Printf("[INFO] Deleting ElastiCache User Group Association: %s", d.Id()) - _, err = tfresource.RetryWhenIsA[*awstypes.InvalidUserGroupStateFault](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *awstypes.InvalidUserGroupStateFault](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.ModifyUserGroup(ctx, &elasticache.ModifyUserGroupInput{ UserGroupId: aws.String(userGroupID), UserIdsToRemove: []string{userID}, diff --git a/internal/service/elasticache/user_group_test.go b/internal/service/elasticache/user_group_test.go index ee676f46ddc3..cabba0a4f4ba 100644 --- a/internal/service/elasticache/user_group_test.go +++ b/internal/service/elasticache/user_group_test.go @@ -10,6 +10,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/retry" @@ -52,6 +53,8 @@ func TestAccElastiCacheUserGroup_update(t *testing.T) { var userGroup awstypes.UserGroup rName := acctest.RandomWithPrefix(t, "tf-acc") resourceName := "aws_elasticache_user_group.test" + user1ResourceName := "aws_elasticache_user.test1" + user2ResourceName := "aws_elasticache_user.test2" acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -76,7 +79,105 @@ func TestAccElastiCacheUserGroup_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "user_group_id", rName), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction(user1ResourceName, plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction(user2ResourceName, plancheck.ResourceActionNoop), + }, + }, }, + { + Config: testAccUserGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserGroupExists(ctx, t, resourceName, &userGroup), + resource.TestCheckResourceAttr(resourceName, "user_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_group_id", rName), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction(user1ResourceName, plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction(user2ResourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccElastiCacheUserGroup_rotate(t *testing.T) { + ctx := acctest.Context(t) + var userGroup awstypes.UserGroup + rName := acctest.RandomWithPrefix(t, "tf-acc") + resourceName := "aws_elasticache_user_group.test" + user1ResourceName := "aws_elasticache_user.test1" + user2ResourceName := "aws_elasticache_user.test2" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserGroupDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccUserGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserGroupExists(ctx, t, resourceName, &userGroup), + resource.TestCheckResourceAttr(resourceName, "user_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_group_id", rName), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), + ), + }, + { + Config: testAccUserGroupConfig_rotate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserGroupExists(ctx, t, resourceName, &userGroup), + resource.TestCheckResourceAttr(resourceName, "user_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "user_group_id", rName), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction(user1ResourceName, plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction(user2ResourceName, plancheck.ResourceActionReplace), + }, + }, + }, + { + Config: testAccUserGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserGroupExists(ctx, t, resourceName, &userGroup), + resource.TestCheckResourceAttr(resourceName, "user_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_group_id", rName), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction(user1ResourceName, plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction(user2ResourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + +func TestAccElastiCacheUserGroup_engineValkey(t *testing.T) { + ctx := acctest.Context(t) + var userGroup awstypes.UserGroup + rName := acctest.RandomWithPrefix(t, "tf-acc") + resourceName := "aws_elasticache_user_group.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserGroupDestroy(ctx, t), + Steps: []resource.TestStep{ { Config: testAccUserGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( @@ -168,6 +269,11 @@ func TestAccElastiCacheUserGroup_disappears(t *testing.T) { acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelasticache.ResourceUserGroup(), resourceName), ), ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, }, }) @@ -246,6 +352,32 @@ resource "aws_elasticache_user_group" "test" { `, rName)) } +func testAccUserGroupConfig_rotate(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +resource "aws_elasticache_user" "test1" { + user_id = "%[1]s-1" + user_name = "default" + access_string = "on ~app::* -@all +@read +@hash +@bitmap +@geo -setbit -bitfield -hset -hsetnx -hmset -hincrby -hincrbyfloat -hdel -bitop -geoadd -georadius -georadiusbymember" + engine = "REDIS" + passwords = ["password123456789"] +} + +resource "aws_elasticache_user" "test2" { + user_id = "%[1]s-3" + user_name = "username1" + access_string = "on ~app::* -@all +@read +@hash +@bitmap +@geo -setbit -bitfield -hset -hsetnx -hmset -hincrby -hincrbyfloat -hdel -bitop -geoadd -georadius -georadiusbymember" + engine = "REDIS" + passwords = ["password123456789"] +} + +resource "aws_elasticache_user_group" "test" { + user_group_id = %[1]q + engine = "REDIS" + user_ids = [aws_elasticache_user.test1.user_id, aws_elasticache_user.test2.user_id] +} +`, rName)) +} + func testAccUserGroupConfig_multiple(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` resource "aws_elasticache_user" "test1" { @@ -282,6 +414,14 @@ resource "aws_elasticache_user" "test1" { passwords = ["password123456789"] } +resource "aws_elasticache_user" "test2" { + user_id = "%[1]s-2" + user_name = "username1" + access_string = "on ~app::* -@all +@read +@hash +@bitmap +@geo -setbit -bitfield -hset -hsetnx -hmset -hincrby -hincrbyfloat -hdel -bitop -geoadd -georadius -georadiusbymember" + engine = "REDIS" + passwords = ["password123456789"] +} + resource "aws_elasticache_user_group" "test" { user_group_id = %[1]q engine = "VALKEY" diff --git a/internal/service/elasticbeanstalk/application.go b/internal/service/elasticbeanstalk/application.go index b3973fab7fd9..35fac41f3a20 100644 --- a/internal/service/elasticbeanstalk/application.go +++ b/internal/service/elasticbeanstalk/application.go @@ -104,7 +104,7 @@ func resourceApplicationCreate(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 30 * time.Second ) - _, err = tfresource.RetryWhenNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findApplicationByName(ctx, conn, d.Id()) }) @@ -211,7 +211,7 @@ func resourceApplicationDelete(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 10 * time.Second ) - _, err = tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findApplicationByName(ctx, conn, d.Id()) }) diff --git a/internal/service/elasticbeanstalk/application_version_test.go b/internal/service/elasticbeanstalk/application_version_test.go index 01e0d5f92821..1961677a8294 100644 --- a/internal/service/elasticbeanstalk/application_version_test.go +++ b/internal/service/elasticbeanstalk/application_version_test.go @@ -220,8 +220,8 @@ resource "aws_elastic_beanstalk_application" "default" { resource "aws_elastic_beanstalk_application_version" "default" { application = aws_elastic_beanstalk_application.default.name name = "tf-test-version-label-%d" - bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + bucket = aws_s3_object.default.bucket + key = aws_s3_object.default.key } `, randInt, randInt, randInt) } @@ -246,8 +246,8 @@ resource "aws_elastic_beanstalk_application" "first" { resource "aws_elastic_beanstalk_application_version" "first" { application = aws_elastic_beanstalk_application.first.name name = "tf-test-version-label-%d" - bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + bucket = aws_s3_object.default.bucket + key = aws_s3_object.default.key } resource "aws_elastic_beanstalk_application" "second" { @@ -258,8 +258,8 @@ resource "aws_elastic_beanstalk_application" "second" { resource "aws_elastic_beanstalk_application_version" "second" { application = aws_elastic_beanstalk_application.second.name name = "tf-test-version-label-%d" - bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + bucket = aws_s3_object.default.bucket + key = aws_s3_object.default.key } `, randInt, randInt, randInt, randInt, randInt) } @@ -284,8 +284,8 @@ resource "aws_elastic_beanstalk_application" "default" { resource "aws_elastic_beanstalk_application_version" "default" { application = aws_elastic_beanstalk_application.default.name name = "tf-test-version-label-%[1]d" - bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + bucket = aws_s3_object.default.bucket + key = aws_s3_object.default.key tags = { firstTag = "%[2]s" @@ -315,8 +315,8 @@ resource "aws_elastic_beanstalk_application" "default" { resource "aws_elastic_beanstalk_application_version" "default" { application = aws_elastic_beanstalk_application.default.name name = "tf-test-version-label-%[1]d" - bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + bucket = aws_s3_object.default.bucket + key = aws_s3_object.default.key tags = { firstTag = "%[2]s" @@ -347,8 +347,8 @@ resource "aws_elastic_beanstalk_application" "default" { resource "aws_elastic_beanstalk_application_version" "default" { application = aws_elastic_beanstalk_application.default.name name = "tf-test-version-label-%d" - bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + bucket = aws_s3_object.default.bucket + key = aws_s3_object.default.key process = %s } `, randInt, randInt, randInt, process) diff --git a/internal/service/elasticbeanstalk/configuration_template_test.go b/internal/service/elasticbeanstalk/configuration_template_test.go index 156a9ede902d..c5147fe795f8 100644 --- a/internal/service/elasticbeanstalk/configuration_template_test.go +++ b/internal/service/elasticbeanstalk/configuration_template_test.go @@ -11,6 +11,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -137,6 +138,51 @@ func TestAccElasticBeanstalkConfigurationTemplate_settings(t *testing.T) { }) } +func TestAccElasticBeanstalkConfigurationTemplate_migrate_settingsResourceDefault(t *testing.T) { + ctx := acctest.Context(t) + var config awstypes.ConfigurationSettingsDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elastic_beanstalk_configuration_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElasticBeanstalkServiceID), + CheckDestroy: testAccCheckConfigurationTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.14.1", + }, + }, + Config: testAccConfigurationTemplateConfig_setting(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigurationTemplateExists(ctx, resourceName, &config), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccConfigurationTemplateConfig_setting(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigurationTemplateExists(ctx, resourceName, &config), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + func testAccCheckConfigurationTemplateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticBeanstalkClient(ctx) diff --git a/internal/service/elasticbeanstalk/environment.go b/internal/service/elasticbeanstalk/environment.go index d3a799eeb882..713c2f0c61a6 100644 --- a/internal/service/elasticbeanstalk/environment.go +++ b/internal/service/elasticbeanstalk/environment.go @@ -50,6 +50,7 @@ func settingSchema() *schema.Resource { "resource": { Type: schema.TypeString, Optional: true, + Default: "", // This default is required to work around an error seen is some situations with Unknown values }, names.AttrValue: { Type: schema.TypeString, @@ -95,7 +96,7 @@ func resourceEnvironment() *schema.Resource { }, SchemaVersion: 1, - MigrateState: EnvironmentMigrateState, + MigrateState: environmentMigrateState, SchemaFunc: func() map[string]*schema.Schema { return map[string]*schema.Schema{ @@ -334,19 +335,17 @@ func resourceEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta a return sdkdiag.AppendErrorf(diags, "reading Elastic Beanstalk Environment (%s) resources: %s", d.Id(), err) } - applicationName := aws.ToString(env.ApplicationName) - environmentName := aws.ToString(env.EnvironmentName) - input := &elasticbeanstalk.DescribeConfigurationSettingsInput{ - ApplicationName: aws.String(applicationName), - EnvironmentName: aws.String(environmentName), + input := elasticbeanstalk.DescribeConfigurationSettingsInput{ + ApplicationName: env.ApplicationName, + EnvironmentName: env.EnvironmentName, } - configurationSettings, err := findConfigurationSettings(ctx, conn, input) + configurationSettings, err := findConfigurationSettings(ctx, conn, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Elastic Beanstalk Environment (%s) configuration settings: %s", d.Id(), err) } - d.Set("application", applicationName) + d.Set("application", env.ApplicationName) d.Set(names.AttrARN, env.EnvironmentArn) if err := d.Set("autoscaling_groups", flattenAutoScalingGroups(resources.EnvironmentResources.AutoScalingGroups)); err != nil { return sdkdiag.AppendErrorf(diags, "setting autoscaling_groups: %s", err) @@ -375,7 +374,7 @@ func resourceEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta a if err := d.Set("load_balancers", flattenLoadBalancers(resources.EnvironmentResources.LoadBalancers)); err != nil { return sdkdiag.AppendErrorf(diags, "setting load_balancers: %s", err) } - d.Set(names.AttrName, environmentName) + d.Set(names.AttrName, env.EnvironmentName) d.Set("platform_arn", env.PlatformArn) if err := d.Set("queues", flattenQueues(resources.EnvironmentResources.Queues)); err != nil { return sdkdiag.AppendErrorf(diags, "setting queues: %s", err) diff --git a/internal/service/elasticbeanstalk/environment_migrate.go b/internal/service/elasticbeanstalk/environment_migrate.go index 4caf838542a9..b08592449fec 100644 --- a/internal/service/elasticbeanstalk/environment_migrate.go +++ b/internal/service/elasticbeanstalk/environment_migrate.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func EnvironmentMigrateState(v int, is *terraform.InstanceState, meta any) (*terraform.InstanceState, error) { +func environmentMigrateState(v int, is *terraform.InstanceState, meta any) (*terraform.InstanceState, error) { switch v { case 0: log.Println("[INFO] Found AWS Elastic Beanstalk Environment State v0; migrating to v1") diff --git a/internal/service/elasticbeanstalk/environment_test.go b/internal/service/elasticbeanstalk/environment_test.go index a5af0a9eb5a3..200a3cce8a90 100644 --- a/internal/service/elasticbeanstalk/environment_test.go +++ b/internal/service/elasticbeanstalk/environment_test.go @@ -18,7 +18,11 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfelasticbeanstalk "github.com/hashicorp/terraform-provider-aws/internal/service/elasticbeanstalk" @@ -47,13 +51,17 @@ func TestAccElasticBeanstalkEnvironment_basic(t *testing.T) { Config: testAccEnvironmentConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckEnvironmentExists(ctx, resourceName, &app), - acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "elasticbeanstalk", fmt.Sprintf("environment/%s/%s", rName, rName)), + acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "elasticbeanstalk", "environment/{application}/{name}"), resource.TestMatchResourceAttr(resourceName, "autoscaling_groups.0", beanstalkAsgNameRegexp), resource.TestMatchResourceAttr(resourceName, "endpoint_url", beanstalkEndpointURL), resource.TestMatchResourceAttr(resourceName, "instances.0", beanstalkInstancesNameRegexp), resource.TestMatchResourceAttr(resourceName, "launch_configurations.0", beanstalkLcNameRegexp), resource.TestMatchResourceAttr(resourceName, "load_balancers.0", beanstalkElbNameRegexp), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_basic())), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("all_settings"), knownvalue.SetPartial(settingsChecks_basic())), + }, }, { ResourceName: resourceName, @@ -64,6 +72,17 @@ func TestAccElasticBeanstalkEnvironment_basic(t *testing.T) { "wait_for_ready_timeout", }, }, + { + Config: testAccEnvironmentConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, }, }) } @@ -445,10 +464,10 @@ func TestAccElasticBeanstalkEnvironment_platformARN(t *testing.T) { var app awstypes.EnvironmentDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elastic_beanstalk_environment.test" - platformNameWithVersion1 := "Python 3.9 running on 64bit Amazon Linux 2023/4.0.9" + platformNameWithVersion1 := "Python 3.12 running on 64bit Amazon Linux 2023/4.7.2" rValue1 := sdkacctest.RandIntRange(1000, 2000) rValue1Str := strconv.Itoa(rValue1) - platformNameWithVersion2 := "Python 3.11 running on 64bit Amazon Linux 2023/4.1.3" + platformNameWithVersion2 := "Python 3.13 running on 64bit Amazon Linux 2023/4.7.2" rValue2 := sdkacctest.RandIntRange(3000, 4000) rValue2Str := strconv.Itoa(rValue2) @@ -497,6 +516,274 @@ func TestAccElasticBeanstalkEnvironment_platformARN(t *testing.T) { }) } +func TestAccElasticBeanstalkEnvironment_migrate_settingsResourceDefault(t *testing.T) { + ctx := acctest.Context(t) + var app awstypes.EnvironmentDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elastic_beanstalk_environment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElasticBeanstalkServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.14.1", + }, + }, + Config: testAccEnvironmentConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_basic())), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccEnvironmentConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccElasticBeanstalkEnvironment_taint(t *testing.T) { + ctx := acctest.Context(t) + var app awstypes.EnvironmentDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + value1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + value2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elastic_beanstalk_environment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElasticBeanstalkServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEnvironmentConfig_setting_ComputedValue(rName, value1), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_ValueChanged(value1))), + }, + }, + { + Taint: []string{"terraform_data.test"}, + Config: testAccEnvironmentConfig_setting_ComputedValue(rName, value2), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction("terraform_data.test", plancheck.ResourceActionReplace), + + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("Subnets"), + "resource": knownvalue.StringExact(""), + // "value": Unknown value, + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("AssociatePublicIpAddress"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.StringExact(acctest.CtTrue), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:autoscaling:launchconfiguration"), + names.AttrName: knownvalue.StringExact("IamInstanceProfile"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_iam_instance_profile.test.name + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:elasticbeanstalk:application:environment"), + names.AttrName: knownvalue.StringExact("ENV_TEST"), + "resource": knownvalue.StringExact(""), + // "value": Unknown value, + }), + })), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_ValueChanged(value2))), + }, + }, + }, + }) +} + +func TestAccElasticBeanstalkEnvironment_setting_ComputedValue(t *testing.T) { + ctx := acctest.Context(t) + var app awstypes.EnvironmentDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + value1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + value2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elastic_beanstalk_environment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElasticBeanstalkServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEnvironmentConfig_setting_ComputedValue(rName, value1), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_ValueChanged(value1))), + }, + }, + { + Config: testAccEnvironmentConfig_setting_ComputedValue(rName, value2), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction("terraform_data.test", plancheck.ResourceActionUpdate), + + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("Subnets"), + "resource": knownvalue.StringExact(""), + // "value": Unknown value, + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("AssociatePublicIpAddress"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.StringExact(acctest.CtTrue), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:autoscaling:launchconfiguration"), + names.AttrName: knownvalue.StringExact("IamInstanceProfile"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_iam_instance_profile.test.name + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:elasticbeanstalk:application:environment"), + names.AttrName: knownvalue.StringExact("ENV_TEST"), + "resource": knownvalue.StringExact(""), + // "value": Unknown value, + }), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_ValueChanged(value2))), + }, + }, + }, + }) +} + +func TestAccElasticBeanstalkEnvironment_setting_ForceNew(t *testing.T) { + ctx := acctest.Context(t) + var app awstypes.EnvironmentDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + value1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + value2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elastic_beanstalk_environment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElasticBeanstalkServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEnvironmentConfig_setting_ForceNew(rName, value1), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_ValueChanged(value1))), + }, + }, + { + Config: testAccEnvironmentConfig_setting_ForceNew(rName, value2), + Check: resource.ComposeTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &app), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectResourceAction("terraform_data.test", plancheck.ResourceActionReplace), + + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("Subnets"), + "resource": knownvalue.StringExact(""), + // "value": Unknown value, + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("AssociatePublicIpAddress"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.StringExact(acctest.CtTrue), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:autoscaling:launchconfiguration"), + names.AttrName: knownvalue.StringExact("IamInstanceProfile"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_iam_instance_profile.test.name + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:elasticbeanstalk:application:environment"), + names.AttrName: knownvalue.StringExact("ENV_TEST"), + "resource": knownvalue.StringExact(""), + // "value": Unknown value, + }), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("setting"), knownvalue.SetExact(settingsChecks_ValueChanged(value2))), + }, + }, + }, + }) +} + func testAccCheckEnvironmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticBeanstalkClient(ctx) @@ -561,7 +848,7 @@ func testAccVerifyConfig(ctx context.Context, env *awstypes.EnvironmentDescripti }) if err != nil { - return fmt.Errorf("Error describing config settings in testAccVerifyConfig: %s", err) + return fmt.Errorf("Error describing config settings in testAccVerifyConfig: %w", err) } // should only be 1 environment @@ -800,6 +1087,52 @@ resource "aws_elastic_beanstalk_environment" "test" { `, rName)) } +func settingsChecks_basic() []knownvalue.Check { + return []knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("VPCId"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_vpc.test.id + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("Subnets"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_subnet.test[0].id + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("AssociatePublicIpAddress"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.StringExact(acctest.CtTrue), + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:autoscaling:launchconfiguration"), + names.AttrName: knownvalue.StringExact("SecurityGroups"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_security_group.test.id + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:autoscaling:launchconfiguration"), + names.AttrName: knownvalue.StringExact("IamInstanceProfile"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_iam_instance_profile.test.name + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:elasticbeanstalk:environment"), + names.AttrName: knownvalue.StringExact("ServiceRole"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_iam_role.service_role.name + }), + } +} + func testAccEnvironmentConfig_platformARN(rName, platformNameWithVersion string, rValue int) string { return acctest.ConfigCompose(testAccEnvironmentConfig_base(rName), fmt.Sprintf(` resource "aws_elastic_beanstalk_environment" "test" { @@ -1377,8 +1710,8 @@ resource "aws_s3_object" "test" { resource "aws_elastic_beanstalk_application_version" "test" { application = aws_elastic_beanstalk_application.test.name - bucket = aws_s3_bucket.test.id - key = aws_s3_object.test.id + bucket = aws_s3_object.test.bucket + key = aws_s3_object.test.key name = "%[1]s-1" } @@ -1441,8 +1774,8 @@ resource "aws_s3_object" "test" { resource "aws_elastic_beanstalk_application_version" "test" { application = aws_elastic_beanstalk_application.test.name - bucket = aws_s3_bucket.test.id - key = aws_s3_object.test.id + bucket = aws_s3_object.test.bucket + key = aws_s3_object.test.key name = "%[1]s-2" } @@ -1675,3 +2008,94 @@ EOF } `, rName, publicKey, email)) } + +func testAccEnvironmentConfig_setting_ComputedValue(rName, value string) string { + return acctest.ConfigCompose( + testAccEnvironmentConfig_setting_ValueChange(rName), + fmt.Sprintf(` +resource "terraform_data" "test" { + input = %[1]q +} +`, value)) +} + +func testAccEnvironmentConfig_setting_ForceNew(rName, value string) string { + return acctest.ConfigCompose( + testAccEnvironmentConfig_setting_ValueChange(rName), + fmt.Sprintf(` +resource "terraform_data" "test" { + input = %[2]q + triggers_replace = [%[2]q] +} +`, rName, value)) +} + +func testAccEnvironmentConfig_setting_ValueChange(rName string) string { + return acctest.ConfigCompose( + testAccEnvironmentConfig_base(rName), + fmt.Sprintf(` +resource "aws_elastic_beanstalk_environment" "test" { + application = aws_elastic_beanstalk_application.test.name + name = %[1]q + solution_stack_name = data.aws_elastic_beanstalk_solution_stack.test.name + + setting { + namespace = "aws:ec2:vpc" + name = "Subnets" + # This contrived example is a simple way to trigger the error with computed values. + # It should not be used in production configurations. + value = replace("${aws_subnet.test[0].id}${terraform_data.test.output}", terraform_data.test.output, "") + } + + setting { + namespace = "aws:ec2:vpc" + name = "AssociatePublicIpAddress" + value = "true" + } + + setting { + namespace = "aws:autoscaling:launchconfiguration" + name = "IamInstanceProfile" + value = aws_iam_instance_profile.test.name + } + + setting { + namespace = "aws:elasticbeanstalk:application:environment" + name = "ENV_TEST" + value = terraform_data.test.output + } +} +`, rName)) +} + +func settingsChecks_ValueChanged(envVal string) []knownvalue.Check { + return []knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("Subnets"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_subnet.test[0].id + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:ec2:vpc"), + names.AttrName: knownvalue.StringExact("AssociatePublicIpAddress"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.StringExact(acctest.CtTrue), + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:autoscaling:launchconfiguration"), + names.AttrName: knownvalue.StringExact("IamInstanceProfile"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.NotNull(), // Pair: aws_iam_instance_profile.test.name + }), + + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrNamespace: knownvalue.StringExact("aws:elasticbeanstalk:application:environment"), + names.AttrName: knownvalue.StringExact("ENV_TEST"), + "resource": knownvalue.StringExact(""), + names.AttrValue: knownvalue.StringExact(envVal), + }), + } +} diff --git a/internal/service/elasticbeanstalk/exports_test.go b/internal/service/elasticbeanstalk/exports_test.go index 07aef7be767f..c72489ced556 100644 --- a/internal/service/elasticbeanstalk/exports_test.go +++ b/internal/service/elasticbeanstalk/exports_test.go @@ -15,4 +15,6 @@ var ( FindConfigurationSettingsByTwoPartKey = findConfigurationSettingsByTwoPartKey FindEnvironmentByID = findEnvironmentByID HostedZoneIDs = hostedZoneIDs + + EnvironmentMigrateState = environmentMigrateState ) diff --git a/internal/service/elasticbeanstalk/hosted_zone_data_source.go b/internal/service/elasticbeanstalk/hosted_zone_data_source.go index 74046908d2d6..af852e779fc7 100644 --- a/internal/service/elasticbeanstalk/hosted_zone_data_source.go +++ b/internal/service/elasticbeanstalk/hosted_zone_data_source.go @@ -17,32 +17,44 @@ import ( // See https://docs.aws.amazon.com/general/latest/gr/elasticbeanstalk.html var hostedZoneIDs = map[string]string{ - endpoints.AfSouth1RegionID: "Z1EI3BVKMKK4AM", - endpoints.ApSoutheast1RegionID: "Z16FZ9L249IFLT", - endpoints.ApSoutheast2RegionID: "Z2PCDNR3VC2G1N", - endpoints.ApSoutheast3RegionID: "Z05913172VM7EAZB40TA8", - endpoints.ApEast1RegionID: "ZPWYUBWRU171A", + endpoints.AfSouth1RegionID: "Z1EI3BVKMKK4AM", + endpoints.ApEast1RegionID: "ZPWYUBWRU171A", + // endpoints.ApEast2RegionID: "", endpoints.ApNortheast1RegionID: "Z1R25G3KIG2GBW", endpoints.ApNortheast2RegionID: "Z3JE5OI70TWKCP", endpoints.ApNortheast3RegionID: "ZNE5GEY1TIAGY", endpoints.ApSouth1RegionID: "Z18NTBI3Y7N9TZ", + // endpoints.ApSouth2RegionID: "", + endpoints.ApSoutheast1RegionID: "Z16FZ9L249IFLT", + endpoints.ApSoutheast2RegionID: "Z2PCDNR3VC2G1N", + endpoints.ApSoutheast3RegionID: "Z05913172VM7EAZB40TA8", + // endpoints.ApSoutheast4RegionID: "", + endpoints.ApSoutheast5RegionID: "Z18NTBI3Y7N9TZ", + // endpoints.ApSoutheast6RegionID: "", + endpoints.ApSoutheast7RegionID: "Z1R25G3KIG2GBW", endpoints.CaCentral1RegionID: "ZJFCZL7SSZB5I", - endpoints.EuCentral1RegionID: "Z1FRNW7UH4DEZJ", - endpoints.EuNorth1RegionID: "Z23GO28BZ5AETM", - endpoints.EuSouth1RegionID: "Z10VDYYOA2JFKM", - endpoints.EuWest1RegionID: "Z2NYPWQ7DFZAZH", - endpoints.EuWest2RegionID: "Z1GKAAAUGATPF1", - endpoints.EuWest3RegionID: "Z5WN6GAYWG5OB", - endpoints.IlCentral1RegionID: "Z02941091PERNCB1MI5H7", - // endpoints.MeCentral1RegionID: "", + // endpoints.CaWest1RegionID: "", + // endpoints.CnNorth1RegionID: "", + // endpoints.CnNorthwest1RegionID: "", + endpoints.EuCentral1RegionID: "Z1FRNW7UH4DEZJ", + // endpoints.EuCentral2RegionID: "", + endpoints.EuNorth1RegionID: "Z23GO28BZ5AETM", + endpoints.EuSouth1RegionID: "Z10VDYYOA2JFKM", + endpoints.EuSouth2RegionID: "Z23GO28BZ5AETM", + endpoints.EuWest1RegionID: "Z2NYPWQ7DFZAZH", + endpoints.EuWest2RegionID: "Z1GKAAAUGATPF1", + endpoints.EuWest3RegionID: "Z5WN6GAYWG5OB", + endpoints.IlCentral1RegionID: "Z02941091PERNCB1MI5H7", + endpoints.MeCentral1RegionID: "Z10X7K2B4QSOFV", endpoints.MeSouth1RegionID: "Z2BBTEKR2I36N2", + // endpoints.MxCentral1RegionID: "", endpoints.SaEast1RegionID: "Z10X7K2B4QSOFV", endpoints.UsEast1RegionID: "Z117KPS5GTRQ2G", endpoints.UsEast2RegionID: "Z14LCN19Q5QHIC", - endpoints.UsWest1RegionID: "Z1LQECGX5PH1X", - endpoints.UsWest2RegionID: "Z38NKT9BP95V3O", endpoints.UsGovEast1RegionID: "Z35TSARG0EJ4VU", endpoints.UsGovWest1RegionID: "Z4KAURWC4UUUG", + endpoints.UsWest1RegionID: "Z1LQECGX5PH1X", + endpoints.UsWest2RegionID: "Z38NKT9BP95V3O", } // @SDKDataSource("aws_elastic_beanstalk_hosted_zone", name="Hosted Zone") diff --git a/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go b/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go index 5d2f46016fab..72ff1e92221e 100644 --- a/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go +++ b/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params elasticbeanstalk }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up elasticbeanstalk endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up elasticbeanstalk endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/elasticbeanstalk/service_endpoints_gen_test.go b/internal/service/elasticbeanstalk/service_endpoints_gen_test.go index 48386baf9f63..dff0e84a0d46 100644 --- a/internal/service/elasticbeanstalk/service_endpoints_gen_test.go +++ b/internal/service/elasticbeanstalk/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/elasticbeanstalk/service_package_gen.go b/internal/service/elasticbeanstalk/service_package_gen.go index e70854e8bbf3..cdf4a14adf24 100644 --- a/internal/service/elasticbeanstalk/service_package_gen.go +++ b/internal/service/elasticbeanstalk/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -113,7 +112,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *elasticbeanstalk.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/elasticbeanstalk/sweep.go b/internal/service/elasticbeanstalk/sweep.go index ece174aee5b0..b2467aaa78ee 100644 --- a/internal/service/elasticbeanstalk/sweep.go +++ b/internal/service/elasticbeanstalk/sweep.go @@ -31,7 +31,7 @@ func sweepApplications(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ElasticBeanstalkClient(ctx) input := &elasticbeanstalk.DescribeApplicationsInput{} @@ -69,7 +69,7 @@ func sweepEnvironments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ElasticBeanstalkClient(ctx) input := &elasticbeanstalk.DescribeEnvironmentsInput{ diff --git a/internal/service/elasticbeanstalk/tags_gen.go b/internal/service/elasticbeanstalk/tags_gen.go index 1241b03cb3e5..1bdb0eed2a8a 100644 --- a/internal/service/elasticbeanstalk/tags_gen.go +++ b/internal/service/elasticbeanstalk/tags_gen.go @@ -3,8 +3,8 @@ package elasticbeanstalk import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *elasticbeanstalk.Client, identifier str output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.ResourceTags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ElasticBeanstalkClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -131,7 +131,7 @@ func updateTags(ctx context.Context, conn *elasticbeanstalk.Client, identifier s _, err := conn.UpdateTagsForResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } return nil diff --git a/internal/service/elasticsearch/acc_test.go b/internal/service/elasticsearch/acc_test.go index 2424584bdaf1..0825803c6af3 100644 --- a/internal/service/elasticsearch/acc_test.go +++ b/internal/service/elasticsearch/acc_test.go @@ -29,7 +29,7 @@ func testAccCheckPolicyMatch(resource, attr, expectedPolicy string) resource.Tes areEquivalent, err := awspolicy.PoliciesAreEquivalent(given, expectedPolicy) if err != nil { - return fmt.Errorf("Comparing AWS Policies failed: %s", err) + return fmt.Errorf("Comparing AWS Policies failed: %w", err) } if !areEquivalent { diff --git a/internal/service/elasticsearch/domain.go b/internal/service/elasticsearch/domain.go index 3e6afac48bfd..010e1dc5da85 100644 --- a/internal/service/elasticsearch/domain.go +++ b/internal/service/elasticsearch/domain.go @@ -653,7 +653,7 @@ func resourceDomainCreate(ctx context.Context, d *schema.ResourceData, meta any) } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateElasticsearchDomain(ctx, &input) }, func(err error) (bool, error) { diff --git a/internal/service/elasticsearch/domain_policy.go b/internal/service/elasticsearch/domain_policy.go index 59a047e23abf..2081c0cfb7ca 100644 --- a/internal/service/elasticsearch/domain_policy.go +++ b/internal/service/elasticsearch/domain_policy.go @@ -71,8 +71,8 @@ func resourceDomainPolicyUpsert(ctx context.Context, d *schema.ResourceData, met DomainName: aws.String(domainName), } - _, err = tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, propagationTimeout, - func() (any, error) { + _, err = tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.UpdateElasticsearchDomainConfig(ctx, input) }, "A change/update is in progress") diff --git a/internal/service/elasticsearch/domain_saml_options.go b/internal/service/elasticsearch/domain_saml_options.go index 70554bbd719a..f81c07666e60 100644 --- a/internal/service/elasticsearch/domain_saml_options.go +++ b/internal/service/elasticsearch/domain_saml_options.go @@ -120,8 +120,8 @@ func resourceDomainSAMLOptionsPut(ctx context.Context, d *schema.ResourceData, m DomainName: aws.String(domainName), } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.UpdateElasticsearchDomainConfig(ctx, input) }, "A change/update is in progress") diff --git a/internal/service/elasticsearch/service_endpoint_resolver_gen.go b/internal/service/elasticsearch/service_endpoint_resolver_gen.go index 91b9cc230e39..05ab3e9da32e 100644 --- a/internal/service/elasticsearch/service_endpoint_resolver_gen.go +++ b/internal/service/elasticsearch/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params elasticsearchser }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up elasticsearchservice endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up elasticsearchservice endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/elasticsearch/service_endpoints_gen_test.go b/internal/service/elasticsearch/service_endpoints_gen_test.go index bc1e2e5ee445..7d0cc7fab10f 100644 --- a/internal/service/elasticsearch/service_endpoints_gen_test.go +++ b/internal/service/elasticsearch/service_endpoints_gen_test.go @@ -678,7 +678,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/elasticsearch/service_package_gen.go b/internal/service/elasticsearch/service_package_gen.go index 96de89afe318..00343041e74a 100644 --- a/internal/service/elasticsearch/service_package_gen.go +++ b/internal/service/elasticsearch/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/elasticsearchservice" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -92,7 +91,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *elasticsearchservice.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/elasticsearch/sweep.go b/internal/service/elasticsearch/sweep.go index 2680f4e1db00..18a3c294fec6 100644 --- a/internal/service/elasticsearch/sweep.go +++ b/internal/service/elasticsearch/sweep.go @@ -27,7 +27,7 @@ func sweepDomains(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticsearchservice.ListDomainNamesInput{ EngineType: awstypes.EngineTypeElasticsearch, diff --git a/internal/service/elasticsearch/tags_gen.go b/internal/service/elasticsearch/tags_gen.go index b05acc949edf..75500197a07e 100644 --- a/internal/service/elasticsearch/tags_gen.go +++ b/internal/service/elasticsearch/tags_gen.go @@ -3,8 +3,8 @@ package elasticsearch import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticsearchservice" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticsearchservice/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *elasticsearchservice.Client, identifier output, err := conn.ListTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ElasticsearchClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *elasticsearchservice.Client, identifi _, err := conn.RemoveTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *elasticsearchservice.Client, identifi _, err := conn.AddTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/elastictranscoder/pipeline_test.go b/internal/service/elastictranscoder/pipeline_test.go index 152374c4cfd2..893f955a6986 100644 --- a/internal/service/elastictranscoder/pipeline_test.go +++ b/internal/service/elastictranscoder/pipeline_test.go @@ -14,11 +14,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" awstypes "github.com/aws/aws-sdk-go-v2/service/elastictranscoder/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" tfelastictranscoder "github.com/hashicorp/terraform-provider-aws/internal/service/elastictranscoder" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,18 +26,18 @@ func TestAccElasticTranscoderPipeline_basic(t *testing.T) { ctx := acctest.Context(t) pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPipelineDestroy(ctx), + CheckDestroy: testAccCheckPipelineDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPipelineConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, pipeline), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "elastictranscoder", regexache.MustCompile(`pipeline/.+`)), ), }, @@ -56,19 +54,19 @@ func TestAccElasticTranscoderPipeline_kmsKey(t *testing.T) { ctx := acctest.Context(t) pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) keyResourceName := "aws_kms_key.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPipelineDestroy(ctx), + CheckDestroy: testAccCheckPipelineDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPipelineConfig_kmsKey(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, pipeline), resource.TestCheckResourceAttrPair(resourceName, "aws_kms_key_arn", keyResourceName, names.AttrARN), ), }, @@ -86,18 +84,18 @@ func TestAccElasticTranscoderPipeline_notifications(t *testing.T) { pipeline := awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPipelineDestroy(ctx), + CheckDestroy: testAccCheckPipelineDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPipelineConfig_notifications(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, &pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, &pipeline), testAccCheckPipeline_notifications(&pipeline, []string{"warning", "completed"}), ), }, @@ -110,7 +108,7 @@ func TestAccElasticTranscoderPipeline_notifications(t *testing.T) { { Config: testAccPipelineConfig_notificationsUpdate(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, &pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, &pipeline), testAccCheckPipeline_notifications(&pipeline, []string{"completed"}), ), }, @@ -156,18 +154,18 @@ func TestAccElasticTranscoderPipeline_withContent(t *testing.T) { pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPipelineDestroy(ctx), + CheckDestroy: testAccCheckPipelineDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPipelineConfig_content(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, pipeline), ), }, { @@ -178,7 +176,7 @@ func TestAccElasticTranscoderPipeline_withContent(t *testing.T) { { Config: testAccPipelineConfig_contentUpdate(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, pipeline), ), }, }, @@ -190,18 +188,18 @@ func TestAccElasticTranscoderPipeline_withPermissions(t *testing.T) { pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPipelineDestroy(ctx), + CheckDestroy: testAccCheckPipelineDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPipelineConfig_perms(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, pipeline), ), }, { @@ -217,18 +215,18 @@ func TestAccElasticTranscoderPipeline_disappears(t *testing.T) { ctx := acctest.Context(t) pipeline := &awstypes.Pipeline{} resourceName := "aws_elastictranscoder_pipeline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPipelineDestroy(ctx), + CheckDestroy: testAccCheckPipelineDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPipelineConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPipelineExists(ctx, resourceName, pipeline), + testAccCheckPipelineExists(ctx, t, resourceName, pipeline), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelastictranscoder.ResourcePipeline(), resourceName), ), ExpectNonEmptyPlan: true, @@ -237,7 +235,7 @@ func TestAccElasticTranscoderPipeline_disappears(t *testing.T) { }) } -func testAccCheckPipelineExists(ctx context.Context, n string, res *awstypes.Pipeline) resource.TestCheckFunc { +func testAccCheckPipelineExists(ctx context.Context, t *testing.T, n string, res *awstypes.Pipeline) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -248,7 +246,7 @@ func testAccCheckPipelineExists(ctx context.Context, n string, res *awstypes.Pip return fmt.Errorf("No Pipeline ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) + conn := acctest.ProviderMeta(ctx, t).ElasticTranscoderClient(ctx) out, err := conn.ReadPipeline(ctx, &elastictranscoder.ReadPipelineInput{ Id: aws.String(rs.Primary.ID), @@ -264,9 +262,9 @@ func testAccCheckPipelineExists(ctx context.Context, n string, res *awstypes.Pip } } -func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckPipelineDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) + conn := acctest.ProviderMeta(ctx, t).ElasticTranscoderClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elastictranscoder_pipline" { @@ -292,7 +290,7 @@ func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) + conn := acctest.ProviderMeta(ctx, t).ElasticTranscoderClient(ctx) input := &elastictranscoder.ListPipelinesInput{} diff --git a/internal/service/elastictranscoder/preset_test.go b/internal/service/elastictranscoder/preset_test.go index 627aa94b6a95..78ce6fb8e918 100644 --- a/internal/service/elastictranscoder/preset_test.go +++ b/internal/service/elastictranscoder/preset_test.go @@ -12,11 +12,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" awstypes "github.com/aws/aws-sdk-go-v2/service/elastictranscoder/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" tfet "github.com/hashicorp/terraform-provider-aws/internal/service/elastictranscoder" "github.com/hashicorp/terraform-provider-aws/names" @@ -26,18 +24,18 @@ func TestAccElasticTranscoderPreset_basic(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "elastictranscoder", regexache.MustCompile(`preset/.+`)), ), }, @@ -54,18 +52,18 @@ func TestAccElasticTranscoderPreset_video_noCodec(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_videoNoCodec(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), ), }, { @@ -82,18 +80,18 @@ func TestAccElasticTranscoderPreset_audio_noBitRate(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_noBitRate(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), ), }, { @@ -109,18 +107,18 @@ func TestAccElasticTranscoderPreset_disappears(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfet.ResourcePreset(), resourceName), ), ExpectNonEmptyPlan: true, @@ -134,18 +132,18 @@ func TestAccElasticTranscoderPreset_AudioCodecOptions_empty(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_audioCodecOptionsEmpty(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), ), }, { @@ -162,18 +160,18 @@ func TestAccElasticTranscoderPreset_description(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_description(rName, "description1"), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "description1"), ), }, @@ -191,18 +189,18 @@ func TestAccElasticTranscoderPreset_full(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_full1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), resource.TestCheckResourceAttr(resourceName, "audio.#", "1"), resource.TestCheckResourceAttr(resourceName, "audio_codec_options.#", "1"), resource.TestCheckResourceAttr(resourceName, "thumbnails.#", "1"), @@ -219,7 +217,7 @@ func TestAccElasticTranscoderPreset_full(t *testing.T) { { Config: testAccPresetConfig_full2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), resource.TestCheckResourceAttr(resourceName, "audio.#", "1"), resource.TestCheckResourceAttr(resourceName, "audio_codec_options.#", "1"), resource.TestCheckResourceAttr(resourceName, "thumbnails.#", "1"), @@ -242,18 +240,18 @@ func TestAccElasticTranscoderPreset_Video_frameRate(t *testing.T) { ctx := acctest.Context(t) var preset awstypes.Preset resourceName := "aws_elastictranscoder_preset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticTranscoderServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPresetDestroy(ctx), + CheckDestroy: testAccCheckPresetDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccPresetConfig_videoFrameRate(rName, "29.97"), Check: resource.ComposeTestCheckFunc( - testAccCheckPresetExists(ctx, resourceName, &preset), + testAccCheckPresetExists(ctx, t, resourceName, &preset), resource.TestCheckResourceAttr(resourceName, "video.0.frame_rate", "29.97"), ), }, @@ -266,9 +264,9 @@ func TestAccElasticTranscoderPreset_Video_frameRate(t *testing.T) { }) } -func testAccCheckPresetExists(ctx context.Context, name string, preset *awstypes.Preset) resource.TestCheckFunc { +func testAccCheckPresetExists(ctx context.Context, t *testing.T, name string, preset *awstypes.Preset) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) + conn := acctest.ProviderMeta(ctx, t).ElasticTranscoderClient(ctx) rs, ok := s.RootModule().Resources[name] if !ok { @@ -292,9 +290,9 @@ func testAccCheckPresetExists(ctx context.Context, name string, preset *awstypes } } -func testAccCheckPresetDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckPresetDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElasticTranscoderClient(ctx) + conn := acctest.ProviderMeta(ctx, t).ElasticTranscoderClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elastictranscoder_preset" { @@ -312,7 +310,7 @@ func testAccCheckPresetDestroy(ctx context.Context) resource.TestCheckFunc { } if !errs.IsA[*awstypes.ResourceNotFoundException](err) { - return fmt.Errorf("unexpected error: %s", err) + return fmt.Errorf("unexpected error: %w", err) } } return nil diff --git a/internal/service/elastictranscoder/service_endpoint_resolver_gen.go b/internal/service/elastictranscoder/service_endpoint_resolver_gen.go index 71db42be00dd..419f022a9b1e 100644 --- a/internal/service/elastictranscoder/service_endpoint_resolver_gen.go +++ b/internal/service/elastictranscoder/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params elastictranscode }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up elastictranscoder endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up elastictranscoder endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/elastictranscoder/service_endpoints_gen_test.go b/internal/service/elastictranscoder/service_endpoints_gen_test.go index 54e8886d3bc9..8cff00b6b376 100644 --- a/internal/service/elastictranscoder/service_endpoints_gen_test.go +++ b/internal/service/elastictranscoder/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/elastictranscoder/service_package_gen.go b/internal/service/elastictranscoder/service_package_gen.go index d1e0fc1c74b9..5b98d904c6a3 100644 --- a/internal/service/elastictranscoder/service_package_gen.go +++ b/internal/service/elastictranscoder/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/elastictranscoder" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -70,7 +69,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *elastictranscoder.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/elb/attachment.go b/internal/service/elb/attachment.go index 70360dc61cd3..394a39399823 100644 --- a/internal/service/elb/attachment.go +++ b/internal/service/elb/attachment.go @@ -58,7 +58,7 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return conn.RegisterInstancesWithLoadBalancer(ctx, input) }, errCodeInvalidTarget) diff --git a/internal/service/elb/hosted_zone_id_data_source.go b/internal/service/elb/hosted_zone_id_data_source.go index b8ed8b87105a..cd67a916981a 100644 --- a/internal/service/elb/hosted_zone_id_data_source.go +++ b/internal/service/elb/hosted_zone_id_data_source.go @@ -28,6 +28,7 @@ var hostedZoneIDPerRegionMap = map[string]string{ endpoints.ApSoutheast3RegionID: "Z08888821HLRG5A9ZRTER", endpoints.ApSoutheast4RegionID: "Z09517862IB2WZLPXG76F", endpoints.ApSoutheast5RegionID: "Z06010284QMVVW7WO5J", + endpoints.ApSoutheast6RegionID: "Z023301818UFJ50CIO0MV", endpoints.ApSoutheast7RegionID: "Z0390008CMBRTHFGWBCB", endpoints.CaCentral1RegionID: "ZQSVJUPU6J1EY", endpoints.CaWest1RegionID: "Z06473681N0SF6OS049SD", diff --git a/internal/service/elb/load_balancer.go b/internal/service/elb/load_balancer.go index 51a6f246674e..f3ab56515621 100644 --- a/internal/service/elb/load_balancer.go +++ b/internal/service/elb/load_balancer.go @@ -304,7 +304,7 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met input.Subnets = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err = tfresource.RetryWhenIsA[*awstypes.CertificateNotFoundException](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *awstypes.CertificateNotFoundException](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateLoadBalancer(ctx, input) }) @@ -469,7 +469,7 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met // Occasionally AWS will error with a 'duplicate listener', without any // other listeners on the ELB. Retry here to eliminate that. _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateLoadBalancerListeners(ctx, input) }, func(err error) (bool, error) { @@ -699,7 +699,7 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met Subnets: add, } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidConfigurationRequestException](ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidConfigurationRequestException](ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.AttachLoadBalancerToSubnets(ctx, input) }, "cannot be attached to multiple subnets in the same AZ") diff --git a/internal/service/elb/service_endpoint_resolver_gen.go b/internal/service/elb/service_endpoint_resolver_gen.go index 4e13fe085f51..c11861da52d5 100644 --- a/internal/service/elb/service_endpoint_resolver_gen.go +++ b/internal/service/elb/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params elasticloadbalan }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up elasticloadbalancing endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up elasticloadbalancing endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/elb/service_endpoints_gen_test.go b/internal/service/elb/service_endpoints_gen_test.go index 75ac36554e0b..d47362aaa972 100644 --- a/internal/service/elb/service_endpoints_gen_test.go +++ b/internal/service/elb/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/elb/service_package_gen.go b/internal/service/elb/service_package_gen.go index 278bb75d66bb..ea5231049024 100644 --- a/internal/service/elb/service_package_gen.go +++ b/internal/service/elb/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -140,7 +139,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *elasticloadbalancing.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/elb/sweep.go b/internal/service/elb/sweep.go index eb8a3acdb3a5..0e0e6038c8ab 100644 --- a/internal/service/elb/sweep.go +++ b/internal/service/elb/sweep.go @@ -25,7 +25,7 @@ func sweepLoadBalancers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ELBClient(ctx) input := &elasticloadbalancing.DescribeLoadBalancersInput{} diff --git a/internal/service/elb/tags_gen.go b/internal/service/elb/tags_gen.go index 1fc21fae4cf8..2fd370ad7578 100644 --- a/internal/service/elb/tags_gen.go +++ b/internal/service/elb/tags_gen.go @@ -3,8 +3,8 @@ package elb import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *elasticloadbalancing.Client, identifier output, err := conn.DescribeTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagDescriptions[0].Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ELBClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *elasticloadbalancing.Client, identifi _, err := conn.RemoveTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -147,7 +147,7 @@ func updateTags(ctx context.Context, conn *elasticloadbalancing.Client, identifi _, err := conn.AddTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 6791aa1d04e2..a536f3f2d40f 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -44,7 +44,8 @@ const ( loadBalancerAttributeZonalShiftConfigEnabled = "zonal_shift.config.enabled" // The following attributes are supported by only Network Load Balancers: - loadBalancerAttributeDNSRecordClientRoutingPolicy = "dns_record.client_routing_policy" + loadBalancerAttributeDNSRecordClientRoutingPolicy = "dns_record.client_routing_policy" + loadBalancerAttributeSecondaryIPsAutoAssignedPerSubnet = "secondary_ips.auto_assigned.per_subnet" ) const ( @@ -201,7 +202,7 @@ const ( ) func healthCheckProtocolEnumValues() []string { - return enum.Slice[awstypes.ProtocolEnum]( + return enum.Slice( awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps, awstypes.ProtocolEnumTcp, diff --git a/internal/service/elbv2/hosted_zone_id_data_source.go b/internal/service/elbv2/hosted_zone_id_data_source.go index d770a9f07925..87fbec68b3e3 100644 --- a/internal/service/elbv2/hosted_zone_id_data_source.go +++ b/internal/service/elbv2/hosted_zone_id_data_source.go @@ -31,6 +31,7 @@ var hostedZoneIDPerRegionALBMap = map[string]string{ endpoints.ApSoutheast3RegionID: "Z08888821HLRG5A9ZRTER", endpoints.ApSoutheast4RegionID: "Z09517862IB2WZLPXG76F", endpoints.ApSoutheast5RegionID: "Z06010284QMVVW7WO5J", + endpoints.ApSoutheast6RegionID: "Z023301818UFJ50CIO0MV", endpoints.ApSoutheast7RegionID: "Z0390008CMBRTHFGWBCB", endpoints.CaCentral1RegionID: "ZQSVJUPU6J1EY", endpoints.CaWest1RegionID: "Z06473681N0SF6OS049SD", @@ -72,6 +73,7 @@ var hostedZoneIDPerRegionNLBMap = map[string]string{ endpoints.ApSoutheast3RegionID: "Z01971771FYVNCOVWJU1G", endpoints.ApSoutheast4RegionID: "Z01156963G8MIIL7X90IV", endpoints.ApSoutheast5RegionID: "Z026317210H9ACVTRO6FB", + endpoints.ApSoutheast6RegionID: "Z01392953RKV2Q3RBP0KU", endpoints.ApSoutheast7RegionID: "Z054363131YWATEMWRG5L", endpoints.CaCentral1RegionID: "Z2EPGBW3API2WT", endpoints.CaWest1RegionID: "Z02754302KBB00W2LKWZ9", diff --git a/internal/service/elbv2/listener.go b/internal/service/elbv2/listener.go index d059a0ccfa1e..d8e20f9233a3 100644 --- a/internal/service/elbv2/listener.go +++ b/internal/service/elbv2/listener.go @@ -42,6 +42,8 @@ import ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;awstypes;awstypes.Listener") // @Testing(importIgnore="default_action.0.forward") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceListener() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerCreate, @@ -49,10 +51,6 @@ func resourceListener() *schema.Resource { UpdateWithoutTimeout: resourceListenerUpdate, DeleteWithoutTimeout: resourceListenerDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(5 * time.Minute), @@ -641,7 +639,7 @@ func resourceListenerCreate(ctx context.Context, d *schema.ResourceData, meta an d.SetId(aws.ToString(output.Listeners[0].ListenerArn)) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findListenerByARN(ctx, conn, d.Id()) }) @@ -767,7 +765,7 @@ func resourceListenerUpdate(ctx context.Context, d *schema.ResourceData, meta an input.SslPolicy = aws.String(v.(string)) } - _, err := tfresource.RetryWhenIsA[*awstypes.CertificateNotFoundException](ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.CertificateNotFoundException](ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.ModifyListener(ctx, input) }) @@ -1038,7 +1036,7 @@ func (m listenerAttributeMap) flatten(d *schema.ResourceData, apiObjects []awsty } func retryListenerCreate(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.CreateListenerInput, timeout time.Duration) (*elasticloadbalancingv2.CreateListenerOutput, error) { - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.CertificateNotFoundException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.CertificateNotFoundException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateListener(ctx, input) }) diff --git a/internal/service/elbv2/listener_certificate.go b/internal/service/elbv2/listener_certificate.go index 1d7f019efe72..4f64bee58ac8 100644 --- a/internal/service/elbv2/listener_certificate.go +++ b/internal/service/elbv2/listener_certificate.go @@ -67,7 +67,7 @@ func resourceListenerCertificateCreate(ctx context.Context, d *schema.ResourceDa ListenerArn: aws.String(listenerARN), } - _, err := tfresource.RetryWhenIsA[*awstypes.CertificateNotFoundException](ctx, iamPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.CertificateNotFoundException](ctx, iamPropagationTimeout, func(ctx context.Context) (any, error) { return conn.AddListenerCertificates(ctx, input) }) @@ -89,7 +89,7 @@ func resourceListenerCertificateRead(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendFromErr(diags, err) } - _, err = tfresource.RetryWhenNewResourceNotFound(ctx, elbv2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNewResourceNotFound(ctx, elbv2PropagationTimeout, func(ctx context.Context) (any, error) { return findListenerCertificateByTwoPartKey(ctx, conn, listenerARN, certificateARN) }, d.IsNewResource()) diff --git a/internal/service/elbv2/listener_data_source_tags_gen_test.go b/internal/service/elbv2/listener_data_source_tags_gen_test.go index ea4316d1ad52..a26bad7bc195 100644 --- a/internal/service/elbv2/listener_data_source_tags_gen_test.go +++ b/internal/service/elbv2/listener_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccELBV2ListenerDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccELBV2ListenerDataSource_tags(t *testing.T) { func TestAccELBV2ListenerDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccELBV2ListenerDataSource_tags_NullMap(t *testing.T) { func TestAccELBV2ListenerDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccELBV2ListenerDataSource_tags_EmptyMap(t *testing.T) { func TestAccELBV2ListenerDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccELBV2ListenerDataSource_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccELBV2ListenerDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccELBV2ListenerDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccELBV2ListenerDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/elbv2/listener_identity_gen_test.go b/internal/service/elbv2/listener_identity_gen_test.go new file mode 100644 index 000000000000..3293d4858d30 --- /dev/null +++ b/internal/service/elbv2/listener_identity_gen_test.go @@ -0,0 +1,360 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package elbv2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccELBV2Listener_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Listener + resourceName := "aws_lb_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckListenerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "default_action.0.forward", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccELBV2Listener_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_lb_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "default_action.0.forward", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "default_action.0.forward", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2Listener_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Listener + resourceName := "aws_lb_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckListenerDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2Listener_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Listener + resourceName := "aws_lb_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckListenerDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/elbv2/listener_rule.go b/internal/service/elbv2/listener_rule.go index 1345348ba1ca..9f26aca54656 100644 --- a/internal/service/elbv2/listener_rule.go +++ b/internal/service/elbv2/listener_rule.go @@ -49,6 +49,9 @@ const ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;awstypes;awstypes.Rule") // @Testing(importIgnore="action.0.forward") +// @Testing(plannableImportAction="NoOp") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceListenerRule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerRuleCreate, @@ -56,10 +59,6 @@ func resourceListenerRule() *schema.Resource { UpdateWithoutTimeout: resourceListenerRuleUpdate, DeleteWithoutTimeout: resourceListenerRuleDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -544,7 +543,7 @@ func resourceListenerRuleRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, elbv2PropagationTimeout, func() (any, error) { + rule, err := tfresource.RetryWhenNewResourceNotFound(ctx, elbv2PropagationTimeout, func(ctx context.Context) (*awstypes.Rule, error) { return findListenerRuleByARN(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -558,8 +557,6 @@ func resourceListenerRuleRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading ELBv2 Listener Rule (%s): %s", d.Id(), err) } - rule := outputRaw.(*awstypes.Rule) - d.Set(names.AttrARN, rule.RuleArn) // The listener arn isn't in the response but can be derived from the rule arn @@ -732,7 +729,7 @@ func retryListenerRuleCreate(ctx context.Context, conn *elasticloadbalancingv2.C const ( timeout = 5 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.PriorityInUseException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.PriorityInUseException](ctx, timeout, func(ctx context.Context) (any, error) { priority, err := highestListenerRulePriority(ctx, conn, listenerARN) if err != nil { return nil, err diff --git a/internal/service/elbv2/listener_rule_data_source_tags_gen_test.go b/internal/service/elbv2/listener_rule_data_source_tags_gen_test.go index 006745ab4db6..f0f33431e45b 100644 --- a/internal/service/elbv2/listener_rule_data_source_tags_gen_test.go +++ b/internal/service/elbv2/listener_rule_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccELBV2ListenerRuleDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccELBV2ListenerRuleDataSource_tags(t *testing.T) { func TestAccELBV2ListenerRuleDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccELBV2ListenerRuleDataSource_tags_NullMap(t *testing.T) { func TestAccELBV2ListenerRuleDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccELBV2ListenerRuleDataSource_tags_EmptyMap(t *testing.T) { func TestAccELBV2ListenerRuleDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccELBV2ListenerRuleDataSource_tags_DefaultTags_nonOverlapping(t *testi func TestAccELBV2ListenerRuleDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccELBV2ListenerRuleDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *te func TestAccELBV2ListenerRuleDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/elbv2/listener_rule_identity_gen_test.go b/internal/service/elbv2/listener_rule_identity_gen_test.go new file mode 100644 index 000000000000..98195a5e2f9e --- /dev/null +++ b/internal/service/elbv2/listener_rule_identity_gen_test.go @@ -0,0 +1,350 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package elbv2_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccELBV2ListenerRule_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Rule + resourceName := "aws_lb_listener_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckListenerRuleDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "action.0.forward", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccELBV2ListenerRule_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_lb_listener_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "action.0.forward", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "action.0.forward", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2ListenerRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Rule + resourceName := "aws_lb_listener_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckListenerRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2ListenerRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Rule + resourceName := "aws_lb_listener_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckListenerRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ListenerRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/elbv2/listener_rule_tags_gen_test.go b/internal/service/elbv2/listener_rule_tags_gen_test.go index 9c9d1fddfd35..096b888ba999 100644 --- a/internal/service/elbv2/listener_rule_tags_gen_test.go +++ b/internal/service/elbv2/listener_rule_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccELBV2ListenerRule_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccELBV2ListenerRule_tags(t *testing.T) { func TestAccELBV2ListenerRule_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccELBV2ListenerRule_tags_null(t *testing.T) { func TestAccELBV2ListenerRule_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccELBV2ListenerRule_tags_EmptyMap(t *testing.T) { func TestAccELBV2ListenerRule_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccELBV2ListenerRule_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2ListenerRule_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccELBV2ListenerRule_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2ListenerRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccELBV2ListenerRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2ListenerRule_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccELBV2ListenerRule_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2ListenerRule_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2ListenerRule_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2ListenerRule_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2ListenerRule_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_updateToProviderOnly(t *testing.T func TestAccELBV2ListenerRule_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_updateToResourceOnly(t *testing.T func TestAccELBV2ListenerRule_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2ListenerRule_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T func TestAccELBV2ListenerRule_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_nullOverlappingResourceTag(t *tes func TestAccELBV2ListenerRule_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccELBV2ListenerRule_tags_DefaultTags_nullNonOverlappingResourceTag(t * func TestAccELBV2ListenerRule_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccELBV2ListenerRule_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2ListenerRule_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccELBV2ListenerRule_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2ListenerRule_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccELBV2ListenerRule_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2ListenerRule_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccELBV2ListenerRule_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccELBV2ListenerRule_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Rule resourceName := "aws_lb_listener_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerRuleDestroy(ctx), diff --git a/internal/service/elbv2/listener_tags_gen_test.go b/internal/service/elbv2/listener_tags_gen_test.go index 69745529831b..2ff29152d0de 100644 --- a/internal/service/elbv2/listener_tags_gen_test.go +++ b/internal/service/elbv2/listener_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccELBV2Listener_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccELBV2Listener_tags(t *testing.T) { func TestAccELBV2Listener_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccELBV2Listener_tags_null(t *testing.T) { func TestAccELBV2Listener_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccELBV2Listener_tags_EmptyMap(t *testing.T) { func TestAccELBV2Listener_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccELBV2Listener_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2Listener_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccELBV2Listener_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2Listener_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccELBV2Listener_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2Listener_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccELBV2Listener_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccELBV2Listener_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccELBV2Listener_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccELBV2Listener_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccELBV2Listener_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccELBV2Listener_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccELBV2Listener_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccELBV2Listener_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccELBV2Listener_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccELBV2Listener_tags_DefaultTags_nullOverlappingResourceTag(t *testing func TestAccELBV2Listener_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccELBV2Listener_tags_DefaultTags_nullNonOverlappingResourceTag(t *test func TestAccELBV2Listener_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccELBV2Listener_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2Listener_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccELBV2Listener_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2Listener_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccELBV2Listener_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2Listener_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccELBV2Listener_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccELBV2Listener_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Listener resourceName := "aws_lb_listener.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckListenerDestroy(ctx), diff --git a/internal/service/elbv2/load_balancer.go b/internal/service/elbv2/load_balancer.go index 055399e40c02..a67f19064066 100644 --- a/internal/service/elbv2/load_balancer.go +++ b/internal/service/elbv2/load_balancer.go @@ -287,6 +287,13 @@ func resourceLoadBalancer() *schema.Resource { Default: false, DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, + "secondary_ips_auto_assigned_per_subnet": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumNetwork), + ValidateFunc: validation.IntBetween(0, 7), + }, names.AttrSecurityGroups: { Type: schema.TypeSet, Optional: true, @@ -899,6 +906,11 @@ var loadBalancerAttributes = loadBalancerAttributeMap(map[string]loadBalancerAtt tfType: schema.TypeBool, loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, + "secondary_ips_auto_assigned_per_subnet": { + apiAttributeKey: loadBalancerAttributeSecondaryIPsAutoAssignedPerSubnet, + tfType: schema.TypeInt, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumNetwork}, + }, "xff_header_processing_mode": { apiAttributeKey: loadBalancerAttributeRoutingHTTPXFFHeaderProcessingMode, tfType: schema.TypeString, @@ -1154,7 +1166,7 @@ func waitForALBNetworkInterfacesToDetach(ctx context.Context, conn *ec2.Client, } if ipv4IPAMPoolID != "" { - if _, err := tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { + if _, err := tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { output, err := tfec2.FindIPAMPoolAllocationsByIPAMPoolIDAndResourceID(ctx, conn, aws.ToString(v.Association.AllocationId), ipv4IPAMPoolID) if err != nil { return nil, err @@ -1280,6 +1292,7 @@ func customizeDiffLoadBalancerNLB(_ context.Context, diff *schema.ResourceDiff, // - there are subnet removals // OR security groups are being added where none currently exist // OR all security groups are being removed + // OR secondary IPv4 addresses are being decreased // // Any other combination should be treated as normal. At this time, subnet // handling is the only known difference between Network Load Balancers and @@ -1360,6 +1373,21 @@ func customizeDiffLoadBalancerNLB(_ context.Context, diff *schema.ResourceDiff, } } + // Get diff for secondary IPv4 addresses + if diff.HasChange("secondary_ips_auto_assigned_per_subnet") { + if v := config.GetAttr("secondary_ips_auto_assigned_per_subnet"); v.IsWhollyKnown() { + o, n := diff.GetChange("secondary_ips_auto_assigned_per_subnet") + oldCount, newCount := o.(int), n.(int) + + // Force new if secondary IPv4 address count is decreased + if newCount < oldCount { + if err := diff.ForceNew("secondary_ips_auto_assigned_per_subnet"); err != nil { + return err + } + } + } + } + return nil } diff --git a/internal/service/elbv2/load_balancer_data_source.go b/internal/service/elbv2/load_balancer_data_source.go index b1c5b642f6ec..fda02152ec33 100644 --- a/internal/service/elbv2/load_balancer_data_source.go +++ b/internal/service/elbv2/load_balancer_data_source.go @@ -175,6 +175,10 @@ func dataSourceLoadBalancer() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "secondary_ips_auto_assigned_per_subnet": { + Type: schema.TypeInt, + Computed: true, + }, names.AttrSecurityGroups: { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/internal/service/elbv2/load_balancer_data_source_tags_gen_test.go b/internal/service/elbv2/load_balancer_data_source_tags_gen_test.go index 69a1582c2246..2badaf98faf7 100644 --- a/internal/service/elbv2/load_balancer_data_source_tags_gen_test.go +++ b/internal/service/elbv2/load_balancer_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccELBV2LoadBalancerDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccELBV2LoadBalancerDataSource_tags(t *testing.T) { func TestAccELBV2LoadBalancerDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccELBV2LoadBalancerDataSource_tags_NullMap(t *testing.T) { func TestAccELBV2LoadBalancerDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccELBV2LoadBalancerDataSource_tags_EmptyMap(t *testing.T) { func TestAccELBV2LoadBalancerDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccELBV2LoadBalancerDataSource_tags_DefaultTags_nonOverlapping(t *testi func TestAccELBV2LoadBalancerDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccELBV2LoadBalancerDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *te func TestAccELBV2LoadBalancerDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/elbv2/load_balancer_data_source_test.go b/internal/service/elbv2/load_balancer_data_source_test.go index 497ec8c99bc1..dc2763ae59ce 100644 --- a/internal/service/elbv2/load_balancer_data_source_test.go +++ b/internal/service/elbv2/load_balancer_data_source_test.go @@ -216,6 +216,28 @@ func TestAccELBV2LoadBalancerDataSource_backwardsCompatibility(t *testing.T) { }) } +func TestAccELBV2LoadBalancerDataSource_nlbSecondaryIPAddresses(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_lb.nlb_test_with_arn" + resourceName := "aws_lb.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccLoadBalancerDataSourceConfig_nlbSecondaryIPAddresses(rName, 3, 3), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrName, resourceName, names.AttrName), + resource.TestCheckResourceAttrPair(dataSourceName, "secondary_ips_auto_assigned_per_subnet", resourceName, "secondary_ips_auto_assigned_per_subnet"), + ), + }, + }, + }) +} + func testAccLoadBalancerDataSourceConfig_basic(rName string) string { return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` resource "aws_lb" "test" { @@ -397,3 +419,11 @@ data "aws_alb" "alb_test_with_tags" { } `, rName)) } + +func testAccLoadBalancerDataSourceConfig_nlbSecondaryIPAddresses(rName string, subnetCount, addressCount int) string { + return acctest.ConfigCompose(testAccLoadBalancerConfig_nlbSecondaryIPAddresses(rName, subnetCount, addressCount), ` +data "aws_lb" "nlb_test_with_arn" { + arn = aws_lb.test.arn +} +`) +} diff --git a/internal/service/elbv2/load_balancer_identity_gen_test.go b/internal/service/elbv2/load_balancer_identity_gen_test.go index 69b1b9cb2515..412f8a9e3f3b 100644 --- a/internal/service/elbv2/load_balancer_identity_gen_test.go +++ b/internal/service/elbv2/load_balancer_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccELBV2LoadBalancer_Identity_Basic(t *testing.T) { resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccELBV2LoadBalancer_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccELBV2LoadBalancer_Identity_RegionOverride(t *testing.T) { resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccELBV2LoadBalancer_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,131 @@ func TestAccELBV2LoadBalancer_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccELBV2LoadBalancer_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.LoadBalancer + resourceName := "aws_lb.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoadBalancer/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LoadBalancer/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LoadBalancer/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccELBV2LoadBalancer_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.LoadBalancer + resourceName := "aws_lb.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoadBalancer/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LoadBalancer/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/elbv2/load_balancer_tags_gen_test.go b/internal/service/elbv2/load_balancer_tags_gen_test.go index 7a96814e3d21..6db23afc916d 100644 --- a/internal/service/elbv2/load_balancer_tags_gen_test.go +++ b/internal/service/elbv2/load_balancer_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccELBV2LoadBalancer_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccELBV2LoadBalancer_tags(t *testing.T) { func TestAccELBV2LoadBalancer_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccELBV2LoadBalancer_tags_null(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccELBV2LoadBalancer_tags_EmptyMap(t *testing.T) { func TestAccELBV2LoadBalancer_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccELBV2LoadBalancer_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccELBV2LoadBalancer_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToProviderOnly(t *testing.T func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToResourceOnly(t *testing.T func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T func TestAccELBV2LoadBalancer_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_nullOverlappingResourceTag(t *tes func TestAccELBV2LoadBalancer_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_nullNonOverlappingResourceTag(t * func TestAccELBV2LoadBalancer_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccELBV2LoadBalancer_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2LoadBalancer_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccELBV2LoadBalancer_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccELBV2LoadBalancer_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.LoadBalancer resourceName := "aws_lb.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), diff --git a/internal/service/elbv2/load_balancer_test.go b/internal/service/elbv2/load_balancer_test.go index 9abf8d09aa39..211a2e7bbbed 100644 --- a/internal/service/elbv2/load_balancer_test.go +++ b/internal/service/elbv2/load_balancer_test.go @@ -17,14 +17,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -2065,6 +2059,56 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_deleteSubnetMapping(t *testing }) } +func TestAccELBV2LoadBalancer_NetworkLoadBalancer_secondaryIPAddresses(t *testing.T) { + ctx := acctest.Context(t) + var pre, mid, post awstypes.LoadBalancer + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + // GovCloud Regions don't always have 3 AZs. + acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLoadBalancerConfig_nlbBasic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &pre), + resource.TestCheckResourceAttr(resourceName, "secondary_ips_auto_assigned_per_subnet", "0"), + ), + }, + { + Config: testAccLoadBalancerConfig_nlbSecondaryIPAddresses(rName, 3, 7), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &mid), + // Increasing secondary IP count should not force recreation + testAccCheckLoadBalancerNotRecreated(&pre, &mid), + resource.TestCheckResourceAttr(resourceName, "secondary_ips_auto_assigned_per_subnet", "7"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccLoadBalancerConfig_nlbSecondaryIPAddresses(rName, 3, 3), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoadBalancerExists(ctx, resourceName, &post), + // Decreasing secondary IP count should force recreation + testAccCheckLoadBalancerRecreated(&mid, &post), + resource.TestCheckResourceAttr(resourceName, "secondary_ips_auto_assigned_per_subnet", "3"), + ), + }, + }, + }) +} + func TestAccELBV2LoadBalancer_updateDesyncMitigationMode(t *testing.T) { ctx := acctest.Context(t) var pre, mid, post awstypes.LoadBalancer @@ -2281,84 +2325,6 @@ func TestAccELBV2LoadBalancer_updateCapacityReservation(t *testing.T) { }) } -func TestAccELBV2LoadBalancer_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var conf awstypes.LoadBalancer - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_lb.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), - CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLoadBalancerConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLoadBalancerConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLoadBalancerConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("elasticloadbalancing", regexache.MustCompile(`loadbalancer/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckLoadBalancerNotRecreated(i, j *awstypes.LoadBalancer) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.ToString(i.LoadBalancerArn) != aws.ToString(j.LoadBalancerArn) { @@ -3091,6 +3057,30 @@ func testAccLoadBalancerConfig_nlbZonalShift(rName string, zs bool) string { return testAccLoadBalancerConfig_nlbSubnetMappingCount(rName, true, zs, 1) } +func testAccLoadBalancerConfig_nlbSecondaryIPAddresses(rName string, subnetCount, addressCount int) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, subnetCount), fmt.Sprintf(` +resource "aws_lb" "test" { + name = %[1]q + internal = true + load_balancer_type = "network" + + enable_deletion_protection = false + secondary_ips_auto_assigned_per_subnet = %[2]d + + dynamic "subnet_mapping" { + for_each = aws_subnet.test[*] + content { + subnet_id = subnet_mapping.value.id + } + } + + tags = { + Name = %[1]q + } +} +`, rName, addressCount)) +} + func testAccLoadBalancerConfig_nlbSubnetMappingCount(rName string, cz, zs bool, subnetCount int) string { return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, subnetCount), fmt.Sprintf(` resource "aws_lb" "test" { diff --git a/internal/service/elbv2/service_endpoint_resolver_gen.go b/internal/service/elbv2/service_endpoint_resolver_gen.go index aef72ee0d25e..c59a5bea6b83 100644 --- a/internal/service/elbv2/service_endpoint_resolver_gen.go +++ b/internal/service/elbv2/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params elasticloadbalan }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up elasticloadbalancingv2 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up elasticloadbalancingv2 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/elbv2/service_endpoints_gen_test.go b/internal/service/elbv2/service_endpoints_gen_test.go index 63199ce5fba4..ea274b8452b1 100644 --- a/internal/service/elbv2/service_endpoints_gen_test.go +++ b/internal/service/elbv2/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/elbv2/service_package_gen.go b/internal/service/elbv2/service_package_gen.go index e0a93bcb7274..7bcc7b8f4cab 100644 --- a/internal/service/elbv2/service_package_gen.go +++ b/internal/service/elbv2/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -142,6 +141,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceListenerCertificate, @@ -157,6 +162,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTargetGroup, @@ -166,6 +177,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTargetGroupAttachment, @@ -197,6 +214,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceListenerCertificate, @@ -212,6 +235,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTargetGroup, @@ -221,6 +250,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTargetGroupAttachment, @@ -236,6 +271,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTrustStoreRevocation, @@ -269,7 +310,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *elasticloadbalancingv2.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/elbv2/sweep.go b/internal/service/elbv2/sweep.go index 27047ba44d25..967838761020 100644 --- a/internal/service/elbv2/sweep.go +++ b/internal/service/elbv2/sweep.go @@ -48,7 +48,7 @@ func sweepLoadBalancers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticloadbalancingv2.DescribeLoadBalancersInput{} conn := client.ELBV2Client(ctx) @@ -130,7 +130,7 @@ func sweepListeners(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &elasticloadbalancingv2.DescribeLoadBalancersInput{} conn := client.ELBV2Client(ctx) diff --git a/internal/service/elbv2/tags_gen.go b/internal/service/elbv2/tags_gen.go index 8154729dbddd..7ce05ab5a322 100644 --- a/internal/service/elbv2/tags_gen.go +++ b/internal/service/elbv2/tags_gen.go @@ -3,8 +3,8 @@ package elbv2 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *elasticloadbalancingv2.Client, identifi output, err := conn.DescribeTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagDescriptions[0].Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ELBV2Client(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *elasticloadbalancingv2.Client, identi _, err := conn.RemoveTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *elasticloadbalancingv2.Client, identi _, err := conn.AddTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 1e7901cf9397..d5b051f0e2d5 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -41,6 +41,9 @@ import ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;types.TargetGroup") // @Testing(importIgnore="lambda_multi_value_headers_enabled;proxy_protocol_v2") +// @Testing(plannableImportAction="NoOp") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceTargetGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTargetGroupCreate, @@ -48,10 +51,6 @@ func resourceTargetGroup() *schema.Resource { UpdateWithoutTimeout: resourceTargetGroupUpdate, DeleteWithoutTimeout: resourceTargetGroupDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - CustomizeDiff: customdiff.Sequence( resourceTargetGroupCustomizeDiff, customizeDiffTargetGroupTargetTypeLambda, @@ -520,7 +519,7 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta d.SetId(aws.ToString(output.TargetGroups[0].TargetGroupArn)) - _, err = tfresource.RetryWhenNotFound(ctx, elbv2PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, elbv2PropagationTimeout, func(ctx context.Context) (any, error) { return findTargetGroupByARN(ctx, conn, d.Id()) }) @@ -774,7 +773,7 @@ func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ResourceInUseException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ResourceInUseException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteTargetGroup(ctx, &elasticloadbalancingv2.DeleteTargetGroupInput{ TargetGroupArn: aws.String(d.Id()), }) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index ff95ecf365fd..2cb45a2ad2e9 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -79,7 +79,7 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidTargetException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidTargetException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.RegisterTargets(ctx, input) }) diff --git a/internal/service/elbv2/target_group_data_source_tags_gen_test.go b/internal/service/elbv2/target_group_data_source_tags_gen_test.go index b658774ebe22..2ef2c120cedd 100644 --- a/internal/service/elbv2/target_group_data_source_tags_gen_test.go +++ b/internal/service/elbv2/target_group_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccELBV2TargetGroupDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccELBV2TargetGroupDataSource_tags(t *testing.T) { func TestAccELBV2TargetGroupDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccELBV2TargetGroupDataSource_tags_NullMap(t *testing.T) { func TestAccELBV2TargetGroupDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccELBV2TargetGroupDataSource_tags_EmptyMap(t *testing.T) { func TestAccELBV2TargetGroupDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccELBV2TargetGroupDataSource_tags_DefaultTags_nonOverlapping(t *testin func TestAccELBV2TargetGroupDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccELBV2TargetGroupDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *tes func TestAccELBV2TargetGroupDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/elbv2/target_group_identity_gen_test.go b/internal/service/elbv2/target_group_identity_gen_test.go new file mode 100644 index 000000000000..fc3471bc96d0 --- /dev/null +++ b/internal/service/elbv2/target_group_identity_gen_test.go @@ -0,0 +1,350 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package elbv2_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccELBV2TargetGroup_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TargetGroup + resourceName := "aws_lb_target_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "lambda_multi_value_headers_enabled", "proxy_protocol_v2", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccELBV2TargetGroup_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_lb_target_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "lambda_multi_value_headers_enabled", "proxy_protocol_v2", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "lambda_multi_value_headers_enabled", "proxy_protocol_v2", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2TargetGroup_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TargetGroup + resourceName := "aws_lb_target_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2TargetGroup_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TargetGroup + resourceName := "aws_lb_target_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TargetGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/elbv2/target_group_tags_gen_test.go b/internal/service/elbv2/target_group_tags_gen_test.go index ea823a2b5ad8..b01c2780afb6 100644 --- a/internal/service/elbv2/target_group_tags_gen_test.go +++ b/internal/service/elbv2/target_group_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccELBV2TargetGroup_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccELBV2TargetGroup_tags(t *testing.T) { func TestAccELBV2TargetGroup_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccELBV2TargetGroup_tags_null(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccELBV2TargetGroup_tags_EmptyMap(t *testing.T) { func TestAccELBV2TargetGroup_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccELBV2TargetGroup_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccELBV2TargetGroup_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccELBV2TargetGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccELBV2TargetGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccELBV2TargetGroup_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccELBV2TargetGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccELBV2TargetGroup_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccELBV2TargetGroup_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2TargetGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccELBV2TargetGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccELBV2TargetGroup_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TargetGroup resourceName := "aws_lb_target_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTargetGroupDestroy(ctx), diff --git a/internal/service/elbv2/testdata/Listener/basic/main_gen.tf b/internal/service/elbv2/testdata/Listener/basic/main_gen.tf new file mode 100644 index 000000000000..aadf985fb127 --- /dev/null +++ b/internal/service/elbv2/testdata/Listener/basic/main_gen.tf @@ -0,0 +1,97 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_listener" "test" { + load_balancer_arn = aws_lb.test.id + protocol = "HTTP" + port = "80" + + default_action { + target_group_arn = aws_lb_target_group.test.id + type = "forward" + } +} + +resource "aws_lb" "test" { + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_lb_target_group" "test" { + name = var.rName + port = 8080 + protocol = "HTTP" + vpc_id = aws_vpc.test.id + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +resource "aws_security_group" "test" { + name = var.rName + description = "Used for ALB Testing" + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/elbv2/testdata/Listener/basic_v6.3.0/main_gen.tf b/internal/service/elbv2/testdata/Listener/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..1e0bb326ab3c --- /dev/null +++ b/internal/service/elbv2/testdata/Listener/basic_v6.3.0/main_gen.tf @@ -0,0 +1,107 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_listener" "test" { + load_balancer_arn = aws_lb.test.id + protocol = "HTTP" + port = "80" + + default_action { + target_group_arn = aws_lb_target_group.test.id + type = "forward" + } +} + +resource "aws_lb" "test" { + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_lb_target_group" "test" { + name = var.rName + port = 8080 + protocol = "HTTP" + vpc_id = aws_vpc.test.id + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +resource "aws_security_group" "test" { + name = var.rName + description = "Used for ALB Testing" + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/elbv2/testdata/Listener/region_override/main_gen.tf b/internal/service/elbv2/testdata/Listener/region_override/main_gen.tf new file mode 100644 index 000000000000..1d94de4658d5 --- /dev/null +++ b/internal/service/elbv2/testdata/Listener/region_override/main_gen.tf @@ -0,0 +1,117 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_listener" "test" { + region = var.region + + load_balancer_arn = aws_lb.test.id + protocol = "HTTP" + port = "80" + + default_action { + target_group_arn = aws_lb_target_group.test.id + type = "forward" + } +} + +resource "aws_lb" "test" { + region = var.region + + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_lb_target_group" "test" { + region = var.region + + name = var.rName + port = 8080 + protocol = "HTTP" + vpc_id = aws_vpc.test.id + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +resource "aws_security_group" "test" { + region = var.region + + name = var.rName + description = "Used for ALB Testing" + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + region = var.region + + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + region = var.region + + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/elbv2/testdata/ListenerRule/basic/main_gen.tf b/internal/service/elbv2/testdata/ListenerRule/basic/main_gen.tf new file mode 100644 index 000000000000..db094ac7c6c4 --- /dev/null +++ b/internal/service/elbv2/testdata/ListenerRule/basic/main_gen.tf @@ -0,0 +1,112 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_listener_rule" "test" { + listener_arn = aws_lb_listener.test.arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.test.arn + } + + condition { + path_pattern { + values = ["/static/*"] + } + } +} + +resource "aws_lb_listener" "test" { + load_balancer_arn = aws_lb.test.id + protocol = "HTTP" + port = "80" + + default_action { + target_group_arn = aws_lb_target_group.test.id + type = "forward" + } +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_lb" "test" { + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_lb_target_group" "test" { + name = var.rName + port = 8080 + protocol = "HTTP" + vpc_id = aws_vpc.test.id + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/elbv2/testdata/ListenerRule/basic_v6.3.0/main_gen.tf b/internal/service/elbv2/testdata/ListenerRule/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..f02c706a17b4 --- /dev/null +++ b/internal/service/elbv2/testdata/ListenerRule/basic_v6.3.0/main_gen.tf @@ -0,0 +1,122 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_listener_rule" "test" { + listener_arn = aws_lb_listener.test.arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.test.arn + } + + condition { + path_pattern { + values = ["/static/*"] + } + } +} + +resource "aws_lb_listener" "test" { + load_balancer_arn = aws_lb.test.id + protocol = "HTTP" + port = "80" + + default_action { + target_group_arn = aws_lb_target_group.test.id + type = "forward" + } +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_lb" "test" { + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_lb_target_group" "test" { + name = var.rName + port = 8080 + protocol = "HTTP" + vpc_id = aws_vpc.test.id + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/elbv2/testdata/ListenerRule/region_override/main_gen.tf b/internal/service/elbv2/testdata/ListenerRule/region_override/main_gen.tf new file mode 100644 index 000000000000..5418757edab5 --- /dev/null +++ b/internal/service/elbv2/testdata/ListenerRule/region_override/main_gen.tf @@ -0,0 +1,134 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_listener_rule" "test" { + region = var.region + + listener_arn = aws_lb_listener.test.arn + priority = 100 + + action { + type = "forward" + target_group_arn = aws_lb_target_group.test.arn + } + + condition { + path_pattern { + values = ["/static/*"] + } + } +} + +resource "aws_lb_listener" "test" { + region = var.region + + load_balancer_arn = aws_lb.test.id + protocol = "HTTP" + port = "80" + + default_action { + target_group_arn = aws_lb_target_group.test.id + type = "forward" + } +} + +resource "aws_security_group" "test" { + region = var.region + + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_lb" "test" { + region = var.region + + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_lb_target_group" "test" { + region = var.region + + name = var.rName + port = 8080 + protocol = "HTTP" + vpc_id = aws_vpc.test.id + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + region = var.region + + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + region = var.region + + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/elbv2/testdata/LoadBalancer/basic_v5.100.0/main_gen.tf b/internal/service/elbv2/testdata/LoadBalancer/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..ff7a58dd26fd --- /dev/null +++ b/internal/service/elbv2/testdata/LoadBalancer/basic_v5.100.0/main_gen.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb" "test" { + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/elbv2/testdata/LoadBalancer/basic_v6.0.0/main_gen.tf b/internal/service/elbv2/testdata/LoadBalancer/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..2603c3486e49 --- /dev/null +++ b/internal/service/elbv2/testdata/LoadBalancer/basic_v6.0.0/main_gen.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb" "test" { + name = var.rName + internal = true + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + + idle_timeout = 30 + enable_deletion_protection = false +} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/elbv2/testdata/TargetGroup/basic/main_gen.tf b/internal/service/elbv2/testdata/TargetGroup/basic/main_gen.tf new file mode 100644 index 000000000000..6ba020d6ab8d --- /dev/null +++ b/internal/service/elbv2/testdata/TargetGroup/basic/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_target_group" "test" { + name = var.rName + port = 443 + protocol = "HTTPS" + vpc_id = aws_vpc.test.id + + deregistration_delay = 200 + slow_start = 0 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/elbv2/testdata/TargetGroup/basic_v6.3.0/main_gen.tf b/internal/service/elbv2/testdata/TargetGroup/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..1047c5e7168c --- /dev/null +++ b/internal/service/elbv2/testdata/TargetGroup/basic_v6.3.0/main_gen.tf @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_target_group" "test" { + name = var.rName + port = 443 + protocol = "HTTPS" + vpc_id = aws_vpc.test.id + + deregistration_delay = 200 + slow_start = 0 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/elbv2/testdata/TargetGroup/region_override/main_gen.tf b/internal/service/elbv2/testdata/TargetGroup/region_override/main_gen.tf new file mode 100644 index 000000000000..37255dbd3c3b --- /dev/null +++ b/internal/service/elbv2/testdata/TargetGroup/region_override/main_gen.tf @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_target_group" "test" { + region = var.region + + name = var.rName + port = 443 + protocol = "HTTPS" + vpc_id = aws_vpc.test.id + + deregistration_delay = 200 + slow_start = 0 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/elbv2/testdata/TrustStore/basic/main_gen.tf b/internal/service/elbv2/testdata/TrustStore/basic/main_gen.tf new file mode 100644 index 000000000000..aef08ee9d8a6 --- /dev/null +++ b/internal/service/elbv2/testdata/TrustStore/basic/main_gen.tf @@ -0,0 +1,81 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lb_trust_store" "test" { + name = var.rName + ca_certificates_bundle_s3_bucket = aws_s3_bucket.test.bucket + ca_certificates_bundle_s3_key = aws_s3_object.test.key +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.id + + rule { + object_ownership = "BucketOwnerEnforced" + } +} + +resource "aws_s3_bucket_public_access_block" "test" { + bucket = aws_s3_bucket.test.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "${var.rName}.pem" + content = <" + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "ca_certificates_bundle_s3_bucket", "ca_certificates_bundle_s3_key", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "ca_certificates_bundle_s3_bucket", "ca_certificates_bundle_s3_key", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2TrustStore_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_lb_trust_store.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccELBV2TrustStore_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_lb_trust_store.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/elbv2/trust_store_revocation.go b/internal/service/elbv2/trust_store_revocation.go index a98faf509813..4348e56db979 100644 --- a/internal/service/elbv2/trust_store_revocation.go +++ b/internal/service/elbv2/trust_store_revocation.go @@ -109,7 +109,7 @@ func resourceTrustStoreRevocationCreate(ctx context.Context, d *schema.ResourceD d.SetId(id) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findTrustStoreRevocationByTwoPartKey(ctx, conn, trustStoreARN, revocationID) }) diff --git a/internal/service/elbv2/trust_store_tags_gen_test.go b/internal/service/elbv2/trust_store_tags_gen_test.go index 735de85f891d..bfba0c37dd97 100644 --- a/internal/service/elbv2/trust_store_tags_gen_test.go +++ b/internal/service/elbv2/trust_store_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccELBV2TrustStore_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccELBV2TrustStore_tags(t *testing.T) { func TestAccELBV2TrustStore_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccELBV2TrustStore_tags_null(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccELBV2TrustStore_tags_EmptyMap(t *testing.T) { func TestAccELBV2TrustStore_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccELBV2TrustStore_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccELBV2TrustStore_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccELBV2TrustStore_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccELBV2TrustStore_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccELBV2TrustStore_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_nullOverlappingResourceTag(t *testi func TestAccELBV2TrustStore_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccELBV2TrustStore_tags_DefaultTags_nullNonOverlappingResourceTag(t *te func TestAccELBV2TrustStore_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccELBV2TrustStore_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2TrustStore_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccELBV2TrustStore_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccELBV2TrustStore_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.TrustStore resourceName := "aws_lb_trust_store.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), CheckDestroy: testAccCheckTrustStoreDestroy(ctx), diff --git a/internal/service/emr/cluster.go b/internal/service/emr/cluster.go index 8b751279df3f..b91bfe119dc2 100644 --- a/internal/service/emr/cluster.go +++ b/internal/service/emr/cluster.go @@ -1016,7 +1016,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RunJobFlow(ctx, &input) }, func(err error) (bool, error) { @@ -1328,7 +1328,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any const ( timeout = 1 * time.Minute ) - _, err = tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findCoreInstanceGroupAutoScalingPolicy(ctx, conn, d.Id()) }) diff --git a/internal/service/emr/cluster_test.go b/internal/service/emr/cluster_test.go index e3c3d920cee8..2761c164ca60 100644 --- a/internal/service/emr/cluster_test.go +++ b/internal/service/emr/cluster_test.go @@ -1293,56 +1293,6 @@ func TestAccEMRCluster_keepJob(t *testing.T) { }) } -func TestAccEMRCluster_visibleToAllUsers(t *testing.T) { - ctx := acctest.Context(t) - var cluster awstypes.Cluster - - resourceName := "aws_emr_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.EMRServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "visible_to_all_users", acctest.CtTrue), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "cluster_state", // Ignore RUNNING versus WAITING changes - "configurations", - "keep_job_flow_alive_when_no_steps", - }, - }, - { - Config: testAccClusterConfig_visibleToAllUsersUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "visible_to_all_users", acctest.CtFalse), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "cluster_state", // Ignore RUNNING versus WAITING changes - "configurations", - "keep_job_flow_alive_when_no_steps", - }, - }, - }, - }) -} - func TestAccEMRCluster_s3Logging(t *testing.T) { ctx := acctest.Context(t) var cluster awstypes.Cluster @@ -3664,61 +3614,6 @@ resource "aws_emr_cluster" "test" { `, rName, keepJob)) } -func testAccClusterConfig_visibleToAllUsersUpdated(rName string) string { - return acctest.ConfigCompose( - testAccClusterConfig_baseVPC(rName, false), - testAccClusterConfig_baseIAMServiceRole(rName), - testAccClusterConfig_baseIAMInstanceProfile(rName), - testAccClusterConfig_baseIAMAutoScalingRole(rName), - fmt.Sprintf(` -data "aws_partition" "current" {} - -resource "aws_emr_cluster" "test" { - name = %[1]q - release_label = "emr-4.6.0" - applications = ["Spark"] - - ec2_attributes { - subnet_id = aws_subnet.test.id - emr_managed_master_security_group = aws_security_group.test.id - emr_managed_slave_security_group = aws_security_group.test.id - instance_profile = aws_iam_instance_profile.emr_instance_profile.arn - } - - master_instance_group { - instance_type = "c4.large" - } - - core_instance_group { - instance_count = 1 - instance_type = "c4.large" - } - - tags = { - role = "rolename" - dns_zone = "env_zone" - env = "env" - name = "name-env" - } - - keep_job_flow_alive_when_no_steps = true - visible_to_all_users = false - - configurations = "test-fixtures/emr_configurations.json" - - depends_on = [ - aws_route_table_association.test, - aws_iam_role_policy_attachment.emr_service, - aws_iam_role_policy_attachment.emr_instance_profile, - aws_iam_role_policy_attachment.emr_autoscaling_role, - ] - - service_role = aws_iam_role.emr_service.arn - autoscaling_role = aws_iam_role.emr_autoscaling_role.arn -} -`, rName)) -} - func testAccClusterConfig_s3Logging(rName string) string { return acctest.ConfigCompose( testAccClusterConfig_baseVPC(rName, false), diff --git a/internal/service/emr/service_endpoint_resolver_gen.go b/internal/service/emr/service_endpoint_resolver_gen.go index 843bfa64b188..4544e986be8f 100644 --- a/internal/service/emr/service_endpoint_resolver_gen.go +++ b/internal/service/emr/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params emr.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up emr endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up emr endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/emr/service_endpoints_gen_test.go b/internal/service/emr/service_endpoints_gen_test.go index fde41e3a8e04..639fef8506ae 100644 --- a/internal/service/emr/service_endpoints_gen_test.go +++ b/internal/service/emr/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/emr/service_package_gen.go b/internal/service/emr/service_package_gen.go index 1e2482e25ee8..f27220c57c4b 100644 --- a/internal/service/emr/service_package_gen.go +++ b/internal/service/emr/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/emr" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -126,7 +125,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *emr.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/emr/studio.go b/internal/service/emr/studio.go index 0d1140c9abb3..7205532a7a98 100644 --- a/internal/service/emr/studio.go +++ b/internal/service/emr/studio.go @@ -160,7 +160,7 @@ func resourceStudioCreate(ctx context.Context, d *schema.ResourceData, meta any) } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateStudio(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/emr/sweep.go b/internal/service/emr/sweep.go index 3f18ad064f94..9ac85e134bdb 100644 --- a/internal/service/emr/sweep.go +++ b/internal/service/emr/sweep.go @@ -32,7 +32,7 @@ func sweepClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EMRClient(ctx) input := &emr.ListClustersInput{ @@ -88,7 +88,7 @@ func sweepStudios(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EMRClient(ctx) diff --git a/internal/service/emr/tags_gen.go b/internal/service/emr/tags_gen.go index cac2a6d067b4..38b510f51c8d 100644 --- a/internal/service/emr/tags_gen.go +++ b/internal/service/emr/tags_gen.go @@ -3,8 +3,8 @@ package emr import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/emr" awstypes "github.com/aws/aws-sdk-go-v2/service/emr/types" @@ -84,7 +84,7 @@ func updateTags(ctx context.Context, conn *emr.Client, identifier string, oldTag _, err := conn.RemoveTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *emr.Client, identifier string, oldTag _, err := conn.AddTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/emr/validate.go b/internal/service/emr/validate.go index 86d0920fd423..cc9a2a94e6de 100644 --- a/internal/service/emr/validate.go +++ b/internal/service/emr/validate.go @@ -19,7 +19,7 @@ func validCustomAMIID(v any, k string) (ws []string, errors []error) { if !regexache.MustCompile(`^ami\-[0-9a-z]+$`).MatchString(value) { errors = append(errors, fmt.Errorf( - "%q must begin with 'ami-' and be comprised of only [0-9a-z]: %v", k, value)) + "%q must begin with 'ami-' and only contain [0-9a-z]: %v", k, value)) } return diff --git a/internal/service/emrcontainers/service_endpoint_resolver_gen.go b/internal/service/emrcontainers/service_endpoint_resolver_gen.go index 3e8dbe529d8e..884d6f9a3b51 100644 --- a/internal/service/emrcontainers/service_endpoint_resolver_gen.go +++ b/internal/service/emrcontainers/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params emrcontainers.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up emrcontainers endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up emrcontainers endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/emrcontainers/service_endpoints_gen_test.go b/internal/service/emrcontainers/service_endpoints_gen_test.go index c70ff2c7c17a..50892d8759df 100644 --- a/internal/service/emrcontainers/service_endpoints_gen_test.go +++ b/internal/service/emrcontainers/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/emrcontainers/service_package_gen.go b/internal/service/emrcontainers/service_package_gen.go index 3ca95f700770..9099419a40b3 100644 --- a/internal/service/emrcontainers/service_package_gen.go +++ b/internal/service/emrcontainers/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/emrcontainers" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -84,7 +83,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *emrcontainers.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/emrcontainers/sweep.go b/internal/service/emrcontainers/sweep.go index 24fa1296fbd0..8a8054be6c71 100644 --- a/internal/service/emrcontainers/sweep.go +++ b/internal/service/emrcontainers/sweep.go @@ -31,7 +31,7 @@ func sweepVirtualClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EMRContainersClient(ctx) input := &emrcontainers.ListVirtualClustersInput{} @@ -76,7 +76,7 @@ func sweepJobTemplates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EMRContainersClient(ctx) input := &emrcontainers.ListJobTemplatesInput{} diff --git a/internal/service/emrcontainers/tags_gen.go b/internal/service/emrcontainers/tags_gen.go index 0500623203e7..5c1963c592a4 100644 --- a/internal/service/emrcontainers/tags_gen.go +++ b/internal/service/emrcontainers/tags_gen.go @@ -3,8 +3,8 @@ package emrcontainers import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/emrcontainers" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *emrcontainers.Client, identifier string output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EMRContainersClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *emrcontainers.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *emrcontainers.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/emrserverless/application.go b/internal/service/emrserverless/application.go index e8865cef61b6..02f49a6295f3 100644 --- a/internal/service/emrserverless/application.go +++ b/internal/service/emrserverless/application.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/emrserverless/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -23,6 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -229,6 +229,27 @@ func resourceApplication() *schema.Resource { Type: schema.TypeString, Required: true, }, + "scheduler_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_concurrent_runs": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 1000), + }, + "queue_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(15, 720), + }, + }, + }, + }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrType: { @@ -288,6 +309,11 @@ func resourceApplicationCreate(ctx context.Context, d *schema.ResourceData, meta input.NetworkConfiguration = expandNetworkConfiguration(v.([]any)[0].(map[string]any)) } + // Empty block (len(v.([]any)) > 0 but v.([]any)[0] == nil) is allowed to enable scheduler_configuration with default values + if v, ok := d.GetOk("scheduler_configuration"); ok && len(v.([]any)) > 0 { + input.SchedulerConfiguration = expandSchedulerConfiguration(v.([]any)) + } + output, err := conn.CreateApplication(ctx, input) if err != nil { @@ -309,7 +335,7 @@ func resourceApplicationRead(ctx context.Context, d *schema.ResourceData, meta a application, err := findApplicationByID(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && retry.NotFound(err) { log.Printf("[WARN] EMR Serverless Application (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -353,6 +379,10 @@ func resourceApplicationRead(ctx context.Context, d *schema.ResourceData, meta a return sdkdiag.AppendErrorf(diags, "setting network_configuration: %s", err) } + if err := d.Set("scheduler_configuration", flattenSchedulerConfiguration(application.SchedulerConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting scheduler_configuration: %s", err) + } + setTagsOut(ctx, application.Tags) return diags @@ -400,6 +430,16 @@ func resourceApplicationUpdate(ctx context.Context, d *schema.ResourceData, meta input.NetworkConfiguration = expandNetworkConfiguration(v.([]any)[0].(map[string]any)) } + if d.HasChange("scheduler_configuration") { + // Empty block (len(v.([]any)) > 0 but v.([]any)[0] == nil) is allowed to enable scheduler_configuration with default values + if v, ok := d.GetOk("scheduler_configuration"); ok && len(v.([]any)) > 0 { + input.SchedulerConfiguration = expandSchedulerConfiguration(v.([]any)) + } else { + // scheduler_configuration block is removed + input.SchedulerConfiguration = &types.SchedulerConfiguration{} + } + } + if v, ok := d.GetOk("release_label"); ok { input.ReleaseLabel = aws.String(v.(string)) } @@ -447,8 +487,7 @@ func findApplicationByID(ctx context.Context, conn *emrserverless.Client, id str if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -467,8 +506,8 @@ func findApplicationByID(ctx context.Context, conn *emrserverless.Client, id str return output.Application, nil } -func statusApplication(ctx context.Context, conn *emrserverless.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusApplication(conn *emrserverless.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findApplicationByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -492,7 +531,7 @@ func waitApplicationCreated(ctx context.Context, conn *emrserverless.Client, id stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.ApplicationStateCreating), Target: enum.Slice(types.ApplicationStateCreated), - Refresh: statusApplication(ctx, conn, id), + Refresh: statusApplication(conn, id), Timeout: timeout, MinTimeout: minTimeout, Delay: delay, @@ -502,7 +541,7 @@ func waitApplicationCreated(ctx context.Context, conn *emrserverless.Client, id if output, ok := outputRaw.(*types.Application); ok { if stateChangeReason := output.StateDetails; stateChangeReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateChangeReason))) + retry.SetLastError(err, errors.New(aws.ToString(stateChangeReason))) } return output, err @@ -520,7 +559,7 @@ func waitApplicationTerminated(ctx context.Context, conn *emrserverless.Client, stateConf := &retry.StateChangeConf{ Pending: enum.Values[types.ApplicationState](), Target: []string{}, - Refresh: statusApplication(ctx, conn, id), + Refresh: statusApplication(conn, id), Timeout: timeout, MinTimeout: minTimeout, Delay: delay, @@ -530,7 +569,7 @@ func waitApplicationTerminated(ctx context.Context, conn *emrserverless.Client, if output, ok := outputRaw.(*types.Application); ok { if stateChangeReason := output.StateDetails; stateChangeReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateChangeReason))) + retry.SetLastError(err, errors.New(aws.ToString(stateChangeReason))) } return output, err @@ -863,3 +902,43 @@ func flattenWorkerResourceConfig(apiObject *types.WorkerResourceConfig) map[stri return tfMap } + +func expandSchedulerConfiguration(tfList []any) *types.SchedulerConfiguration { + // SchedulerConfiguration without any attributes disables the scheduler_configuration. + // If an empty block is specified, the scheduler_configuration is enabled with default values. + if tfList[0] == nil { + return &types.SchedulerConfiguration{ + MaxConcurrentRuns: aws.Int32(15), // default + QueueTimeoutMinutes: aws.Int32(360), // default + } + } + + apiObject := &types.SchedulerConfiguration{} + m := tfList[0].(map[string]any) + + if v, ok := m["max_concurrent_runs"].(int); ok && v != 0 { + apiObject.MaxConcurrentRuns = aws.Int32(int32(v)) + } + + if v, ok := m["queue_timeout_minutes"].(int); ok && v != 0 { + apiObject.QueueTimeoutMinutes = aws.Int32(int32(v)) + } + + return apiObject +} + +func flattenSchedulerConfiguration(apiObject *types.SchedulerConfiguration) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + if v := apiObject.MaxConcurrentRuns; v != nil { + tfMap["max_concurrent_runs"] = aws.ToInt32(v) + } + + if v := apiObject.QueueTimeoutMinutes; v != nil { + tfMap["queue_timeout_minutes"] = aws.ToInt32(v) + } + return []any{tfMap} +} diff --git a/internal/service/emrserverless/application_test.go b/internal/service/emrserverless/application_test.go index 7327c103dbb8..18245f1f9ef7 100644 --- a/internal/service/emrserverless/application_test.go +++ b/internal/service/emrserverless/application_test.go @@ -10,13 +10,11 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/emrserverless/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfemrserverless "github.com/hashicorp/terraform-provider-aws/internal/service/emrserverless" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -24,18 +22,18 @@ func TestAccEMRServerlessApplication_basic(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccApplicationConfig_basic(rName), + Config: testAccApplicationConfig_basic(rName, "emr-6.6.0"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "emr-serverless", regexache.MustCompile(`/applications/.+$`)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, names.AttrType, "hive"), @@ -64,18 +62,18 @@ func TestAccEMRServerlessApplication_arch(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_arch(rName, "ARM64"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "architecture", "ARM64"), ), }, @@ -87,7 +85,7 @@ func TestAccEMRServerlessApplication_arch(t *testing.T) { { Config: testAccApplicationConfig_arch(rName, "X86_64"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "architecture", "X86_64"), ), }, @@ -99,18 +97,18 @@ func TestAccEMRServerlessApplication_releaseLabel(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_releaseLabel(rName, "emr-6.10.0"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "release_label", "emr-6.10.0"), ), }, @@ -122,7 +120,7 @@ func TestAccEMRServerlessApplication_releaseLabel(t *testing.T) { { Config: testAccApplicationConfig_releaseLabel(rName, "emr-6.11.0"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "release_label", "emr-6.11.0"), ), }, @@ -134,18 +132,18 @@ func TestAccEMRServerlessApplication_initialCapacity(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_initialCapacity(rName, "2 vCPU"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "initial_capacity.#", "1"), resource.TestCheckResourceAttr(resourceName, "initial_capacity.0.initial_capacity_type", "HiveDriver"), resource.TestCheckResourceAttr(resourceName, "initial_capacity.0.initial_capacity_config.#", "1"), @@ -163,7 +161,7 @@ func TestAccEMRServerlessApplication_initialCapacity(t *testing.T) { { Config: testAccApplicationConfig_initialCapacity(rName, "4 vCPU"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "initial_capacity.#", "1"), resource.TestCheckResourceAttr(resourceName, "initial_capacity.0.initial_capacity_type", "HiveDriver"), resource.TestCheckResourceAttr(resourceName, "initial_capacity.0.initial_capacity_config.#", "1"), @@ -184,7 +182,7 @@ func TestAccEMRServerlessApplication_imageConfiguration(t *testing.T) { } var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) firstVersionRegex := regexache.MustCompile(`1\.0\.0`) secondVersionRegex := regexache.MustCompile(`1\.0\.1`) @@ -199,16 +197,16 @@ func TestAccEMRServerlessApplication_imageConfiguration(t *testing.T) { t.Error(err) } - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: firstImageConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "image_configuration.#", "1"), resource.TestMatchResourceAttr(resourceName, "image_configuration.0.image_uri", firstVersionRegex), ), @@ -221,7 +219,7 @@ func TestAccEMRServerlessApplication_imageConfiguration(t *testing.T) { { Config: secondImageConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "image_configuration.#", "1"), resource.TestMatchResourceAttr(resourceName, "image_configuration.0.image_uri", secondVersionRegex), ), @@ -234,18 +232,18 @@ func TestAccEMRServerlessApplication_interactiveConfiguration(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_interactiveConfiguration(rName, true, true), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.studio_enabled", acctest.CtTrue), @@ -259,7 +257,7 @@ func TestAccEMRServerlessApplication_interactiveConfiguration(t *testing.T) { { Config: testAccApplicationConfig_interactiveConfiguration(rName, true, false), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.studio_enabled", acctest.CtFalse), @@ -268,7 +266,7 @@ func TestAccEMRServerlessApplication_interactiveConfiguration(t *testing.T) { { Config: testAccApplicationConfig_interactiveConfiguration(rName, false, true), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.studio_enabled", acctest.CtTrue), @@ -277,7 +275,7 @@ func TestAccEMRServerlessApplication_interactiveConfiguration(t *testing.T) { { Config: testAccApplicationConfig_interactiveConfiguration(rName, false, false), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", "1"), resource.TestCheckNoResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled"), resource.TestCheckNoResourceAttr(resourceName, "interactive_configuration.0.studio_enabled"), @@ -291,18 +289,18 @@ func TestAccEMRServerlessApplication_maxCapacity(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_maxCapacity(rName, "2 vCPU"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "maximum_capacity.#", "1"), resource.TestCheckResourceAttr(resourceName, "maximum_capacity.0.cpu", "2 vCPU"), resource.TestCheckResourceAttr(resourceName, "maximum_capacity.0.memory", "10 GB"), @@ -316,7 +314,7 @@ func TestAccEMRServerlessApplication_maxCapacity(t *testing.T) { { Config: testAccApplicationConfig_maxCapacity(rName, "4 vCPU"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "maximum_capacity.#", "1"), resource.TestCheckResourceAttr(resourceName, "maximum_capacity.0.cpu", "4 vCPU"), resource.TestCheckResourceAttr(resourceName, "maximum_capacity.0.memory", "10 GB")), @@ -329,18 +327,18 @@ func TestAccEMRServerlessApplication_network(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_network(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "network_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "network_configuration.0.security_group_ids.#", "1"), resource.TestCheckResourceAttr(resourceName, "network_configuration.0.subnet_ids.#", "2"), @@ -359,18 +357,18 @@ func TestAccEMRServerlessApplication_disappears(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { - Config: testAccApplicationConfig_basic(rName), + Config: testAccApplicationConfig_basic(rName, "emr-6.6.0"), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfemrserverless.ResourceApplication(), resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfemrserverless.ResourceApplication(), resourceName), ), @@ -384,18 +382,18 @@ func TestAccEMRServerlessApplication_tags(t *testing.T) { ctx := acctest.Context(t) var application types.Application resourceName := "aws_emrserverless_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckApplicationDestroy(ctx), + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccApplicationConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -408,7 +406,7 @@ func TestAccEMRServerlessApplication_tags(t *testing.T) { { Config: testAccApplicationConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -417,7 +415,7 @@ func TestAccEMRServerlessApplication_tags(t *testing.T) { { Config: testAccApplicationConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), + testAccCheckApplicationExists(ctx, t, resourceName, &application), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -426,14 +424,14 @@ func TestAccEMRServerlessApplication_tags(t *testing.T) { }) } -func testAccCheckApplicationExists(ctx context.Context, resourceName string, application *types.Application) resource.TestCheckFunc { +func testAccCheckApplicationExists(ctx context.Context, t *testing.T, resourceName string, application *types.Application) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { return fmt.Errorf("Not found: %s", resourceName) } - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRServerlessClient(ctx) + conn := acctest.ProviderMeta(ctx, t).EMRServerlessClient(ctx) output, err := tfemrserverless.FindApplicationByID(ctx, conn, rs.Primary.ID) if err != nil { @@ -450,9 +448,99 @@ func testAccCheckApplicationExists(ctx context.Context, resourceName string, app } } -func testAccCheckApplicationDestroy(ctx context.Context) resource.TestCheckFunc { +func TestAccEMRServerlessApplication_schedulerConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var application types.Application + resourceName := "aws_emrserverless_application.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckApplicationDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccApplicationConfig_schedulerConfiguration(rName, 10, 60), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.max_concurrent_runs", "10"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.queue_timeout_minutes", "60"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApplicationConfig_schedulerConfiguration(rName, 20, 120), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.max_concurrent_runs", "20"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.queue_timeout_minutes", "120"), + ), + }, + { // When `scheduler_configuration` is removed, scheduler configuration is disabled + Config: testAccApplicationConfig_basic(rName, "emr-7.1.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "0"), + ), + }, + { + // If both arguments are omitted and an empty block is specified for scheduler_config, defaults of 15 and 360 are used + Config: testAccApplicationConfig_schedulerConfigurationEmptyBlock(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.max_concurrent_runs", "15"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.queue_timeout_minutes", "360"), + ), + }, + { + Config: testAccApplicationConfig_basic(rName, "emr-7.1.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "0"), + ), + }, + { + // If queue_timeout_minutes is omitted, default of 360 is used + Config: testAccApplicationConfig_schedulerConfigurationMaxConcurrentRuns(rName, 30), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.max_concurrent_runs", "30"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.queue_timeout_minutes", "360"), + ), + }, + { + Config: testAccApplicationConfig_basic(rName, "emr-7.1.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "0"), + ), + }, + { + // If max_concurrent_runs is omitted, default of 15 is used + Config: testAccApplicationConfig_schedulerConfigurationQueueTimeoutMinutes(rName, 180), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, t, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.max_concurrent_runs", "15"), + resource.TestCheckResourceAttr(resourceName, "scheduler_configuration.0.queue_timeout_minutes", "180"), + ), + }, + }, + }) +} + +func testAccCheckApplicationDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EMRServerlessClient(ctx) + conn := acctest.ProviderMeta(ctx, t).EMRServerlessClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_emrserverless_application" { @@ -461,7 +549,7 @@ func testAccCheckApplicationDestroy(ctx context.Context) resource.TestCheckFunc _, err := tfemrserverless.FindApplicationByID(ctx, conn, rs.Primary.ID) - if tfresource.NotFound(err) { + if retry.NotFound(err) { continue } @@ -475,14 +563,14 @@ func testAccCheckApplicationDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccApplicationConfig_basic(rName string) string { +func testAccApplicationConfig_basic(rName, releaseLabel string) string { return fmt.Sprintf(` resource "aws_emrserverless_application" "test" { name = %[1]q - release_label = "emr-6.6.0" + release_label = %[2]q type = "hive" } -`, rName) +`, rName, releaseLabel) } func testAccApplicationConfig_releaseLabel(rName string, rl string) string { @@ -817,3 +905,54 @@ resource "aws_emrserverless_application" "test" { } `, rName, selectedVersionResourceName, firstImageVersion, secondImageVersion), nil } + +func testAccApplicationConfig_schedulerConfiguration(rName string, maxConcurrentRuns, queueTimeoutMinutes int) string { + return fmt.Sprintf(` +resource "aws_emrserverless_application" "test" { + name = %[1]q + release_label = "emr-7.1.0" + type = "hive" + scheduler_configuration { + max_concurrent_runs = %[2]d + queue_timeout_minutes = %[3]d + } +} +`, rName, maxConcurrentRuns, queueTimeoutMinutes) +} + +func testAccApplicationConfig_schedulerConfigurationEmptyBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_emrserverless_application" "test" { + name = %[1]q + release_label = "emr-7.1.0" + type = "hive" + scheduler_configuration {} +} +`, rName) +} + +func testAccApplicationConfig_schedulerConfigurationMaxConcurrentRuns(rName string, maxConcurrentRuns int) string { + return fmt.Sprintf(` +resource "aws_emrserverless_application" "test" { + name = %[1]q + release_label = "emr-7.1.0" + type = "hive" + scheduler_configuration { + max_concurrent_runs = %[2]d + } +} +`, rName, maxConcurrentRuns) +} + +func testAccApplicationConfig_schedulerConfigurationQueueTimeoutMinutes(rName string, queueTimeoutMinutes int) string { + return fmt.Sprintf(` +resource "aws_emrserverless_application" "test" { + name = %[1]q + release_label = "emr-7.1.0" + type = "hive" + scheduler_configuration { + queue_timeout_minutes = %[2]d + } +} +`, rName, queueTimeoutMinutes) +} diff --git a/internal/service/emrserverless/service_endpoint_resolver_gen.go b/internal/service/emrserverless/service_endpoint_resolver_gen.go index b0171333e239..46b49e0af8b3 100644 --- a/internal/service/emrserverless/service_endpoint_resolver_gen.go +++ b/internal/service/emrserverless/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params emrserverless.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up emrserverless endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up emrserverless endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/emrserverless/service_endpoints_gen_test.go b/internal/service/emrserverless/service_endpoints_gen_test.go index 76f584825dc6..239e483080b8 100644 --- a/internal/service/emrserverless/service_endpoints_gen_test.go +++ b/internal/service/emrserverless/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/emrserverless/service_package_gen.go b/internal/service/emrserverless/service_package_gen.go index 277035feff33..d8da7f91094c 100644 --- a/internal/service/emrserverless/service_package_gen.go +++ b/internal/service/emrserverless/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/emrserverless" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *emrserverless.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/emrserverless/sweep.go b/internal/service/emrserverless/sweep.go index fe41ee2221b7..6e7d1ae0534b 100644 --- a/internal/service/emrserverless/sweep.go +++ b/internal/service/emrserverless/sweep.go @@ -26,7 +26,7 @@ func sweepApplications(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EMRServerlessClient(ctx) input := &emrserverless.ListApplicationsInput{} diff --git a/internal/service/emrserverless/tags_gen.go b/internal/service/emrserverless/tags_gen.go index 4b5f3e49f011..1f31a7990e00 100644 --- a/internal/service/emrserverless/tags_gen.go +++ b/internal/service/emrserverless/tags_gen.go @@ -3,8 +3,8 @@ package emrserverless import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/emrserverless" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *emrserverless.Client, identifier string output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EMRServerlessClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *emrserverless.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *emrserverless.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/events/bus.go b/internal/service/events/bus.go index c409e77b80c9..abf9422676ff 100644 --- a/internal/service/events/bus.go +++ b/internal/service/events/bus.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -74,6 +75,25 @@ func resourceBus() *schema.Resource { Optional: true, ValidateFunc: validation.StringLenBetween(1, 2048), }, + "log_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "include_detail": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.IncludeDetail](), + }, + "level": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.Level](), + }, + }, + }, + }, names.AttrName: { Type: schema.TypeString, Required: true, @@ -91,7 +111,7 @@ func resourceBusCreate(ctx context.Context, d *schema.ResourceData, meta any) di conn := meta.(*conns.AWSClient).EventsClient(ctx) eventBusName := d.Get(names.AttrName).(string) - input := &eventbridge.CreateEventBusInput{ + input := eventbridge.CreateEventBusInput{ Name: aws.String(eventBusName), Tags: getTagsIn(ctx), } @@ -112,13 +132,17 @@ func resourceBusCreate(ctx context.Context, d *schema.ResourceData, meta any) di input.KmsKeyIdentifier = aws.String(v.(string)) } - output, err := conn.CreateEventBus(ctx, input) + if v, ok := d.GetOk("log_config"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.LogConfig = expandLogConfig(v.([]any)[0].(map[string]any)) + } + + output, err := conn.CreateEventBus(ctx, &input) // Some partitions (e.g. ISO) may not support tag-on-create. if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { input.Tags = nil - output, err = conn.CreateEventBus(ctx, input) + output, err = conn.CreateEventBus(ctx, &input) } if err != nil { @@ -161,9 +185,14 @@ func resourceBusRead(ctx context.Context, d *schema.ResourceData, meta any) diag } d.Set(names.AttrARN, output.Arn) - d.Set("dead_letter_config", flattenDeadLetterConfig(output.DeadLetterConfig)) + if err := d.Set("dead_letter_config", flattenDeadLetterConfig(output.DeadLetterConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting dead_letter_config: %s", err) + } d.Set(names.AttrDescription, output.Description) d.Set("kms_key_identifier", output.KmsKeyIdentifier) + if err := d.Set("log_config", flattenLogConfig(output.LogConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting log_config: %s", err) + } d.Set(names.AttrName, output.Name) return diags @@ -173,8 +202,8 @@ func resourceBusUpdate(ctx context.Context, d *schema.ResourceData, meta any) di var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EventsClient(ctx) - if d.HasChanges("dead_letter_config", names.AttrDescription, "kms_key_identifier") { - input := &eventbridge.UpdateEventBusInput{ + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + input := eventbridge.UpdateEventBusInput{ Name: aws.String(d.Get(names.AttrName).(string)), } @@ -193,7 +222,11 @@ func resourceBusUpdate(ctx context.Context, d *schema.ResourceData, meta any) di input.KmsKeyIdentifier = aws.String(v.(string)) } - _, err := conn.UpdateEventBus(ctx, input) + if v, ok := d.GetOk("log_config"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.LogConfig = expandLogConfig(v.([]any)[0].(map[string]any)) + } + + _, err := conn.UpdateEventBus(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EventBridge Event Bus (%s): %s", d.Id(), err) @@ -208,9 +241,10 @@ func resourceBusDelete(ctx context.Context, d *schema.ResourceData, meta any) di conn := meta.(*conns.AWSClient).EventsClient(ctx) log.Printf("[INFO] Deleting EventBridge Event Bus: %s", d.Id()) - _, err := conn.DeleteEventBus(ctx, &eventbridge.DeleteEventBusInput{ + input := eventbridge.DeleteEventBusInput{ Name: aws.String(d.Id()), - }) + } + _, err := conn.DeleteEventBus(ctx, &input) if errs.IsA[*types.ResourceNotFoundException](err) { return diags @@ -224,11 +258,11 @@ func resourceBusDelete(ctx context.Context, d *schema.ResourceData, meta any) di } func findEventBusByName(ctx context.Context, conn *eventbridge.Client, name string) (*eventbridge.DescribeEventBusOutput, error) { - input := &eventbridge.DescribeEventBusInput{ + input := eventbridge.DescribeEventBusInput{ Name: aws.String(name), } - output, err := conn.DescribeEventBus(ctx, input) + output, err := conn.DescribeEventBus(ctx, &input) if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ @@ -269,3 +303,31 @@ func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) []map[string]any } return []map[string]any{tfMap} } + +func expandLogConfig(tfMap map[string]any) *types.LogConfig { + if tfMap == nil { + return nil + } + apiObject := &types.LogConfig{} + if v, ok := tfMap["include_detail"].(string); ok && v != "" { + apiObject.IncludeDetail = types.IncludeDetail(v) + } + if v, ok := tfMap["level"].(string); ok && v != "" { + apiObject.Level = types.Level(v) + } + return apiObject +} + +func flattenLogConfig(apiObject *types.LogConfig) []map[string]any { + if apiObject == nil { + return nil + } + tfMap := map[string]any{} + if v := apiObject.IncludeDetail; v != "" { + tfMap["include_detail"] = string(v) + } + if v := apiObject.Level; v != "" { + tfMap["level"] = string(v) + } + return []map[string]any{tfMap} +} diff --git a/internal/service/events/bus_data_source.go b/internal/service/events/bus_data_source.go index 16c2c1e18222..a775671405b7 100644 --- a/internal/service/events/bus_data_source.go +++ b/internal/service/events/bus_data_source.go @@ -43,6 +43,22 @@ func dataSourceBus() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "log_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "include_detail": { + Type: schema.TypeString, + Computed: true, + }, + "level": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, names.AttrName: { Type: schema.TypeString, Required: true, @@ -64,9 +80,14 @@ func dataSourceBusRead(ctx context.Context, d *schema.ResourceData, meta any) di d.SetId(eventBusName) d.Set(names.AttrARN, output.Arn) - d.Set("dead_letter_config", flattenDeadLetterConfig(output.DeadLetterConfig)) + if err := d.Set("dead_letter_config", flattenDeadLetterConfig(output.DeadLetterConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting dead_letter_config: %s", err) + } d.Set(names.AttrDescription, output.Description) d.Set("kms_key_identifier", output.KmsKeyIdentifier) + if err := d.Set("log_config", flattenLogConfig(output.LogConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting log_config: %s", err) + } d.Set(names.AttrName, output.Name) return diags diff --git a/internal/service/events/bus_data_source_test.go b/internal/service/events/bus_data_source_test.go index ab9119162343..748616b1d988 100644 --- a/internal/service/events/bus_data_source_test.go +++ b/internal/service/events/bus_data_source_test.go @@ -80,6 +80,29 @@ func TestAccEventsBusDataSource_deadLetterConfig(t *testing.T) { }) } +func TestAccEventsBusDataSource_logConfig(t *testing.T) { + ctx := acctest.Context(t) + busName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_cloudwatch_event_bus.test" + resourceName := "aws_cloudwatch_event_bus.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccBusDataSourceConfig_logConfig(busName, "FULL", "TRACE"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "log_config.#", "1"), + resource.TestCheckResourceAttrPair(dataSourceName, "log_config.0.include_detail", resourceName, "log_config.0.include_detail"), + resource.TestCheckResourceAttrPair(dataSourceName, "log_config.0.level", resourceName, "log_config.0.level"), + ), + }, + }, + }) +} + func testAccBusDataSourceConfig_basic(busName string) string { return fmt.Sprintf(` resource "aws_cloudwatch_event_bus" "test" { @@ -177,3 +200,19 @@ data "aws_cloudwatch_event_bus" "test" { } `, busName) } + +func testAccBusDataSourceConfig_logConfig(name, includeDetail, level string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_bus" "test" { + name = %[1]q + log_config { + include_detail = %[2]q + level = %[3]q + } +} + +data "aws_cloudwatch_event_bus" "test" { + name = aws_cloudwatch_event_bus.test.name +} +`, name, includeDetail, level) +} diff --git a/internal/service/events/bus_policy.go b/internal/service/events/bus_policy.go index 52a9683d0153..c9f2f81956a2 100644 --- a/internal/service/events/bus_policy.go +++ b/internal/service/events/bus_policy.go @@ -81,7 +81,7 @@ func resourceBusPolicyPut(ctx context.Context, d *schema.ResourceData, meta any) d.SetId(eventBusName) } - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findEventBusPolicyByName(ctx, conn, d.Id()) }) diff --git a/internal/service/events/bus_test.go b/internal/service/events/bus_test.go index 81566642d22e..6f292a137483 100644 --- a/internal/service/events/bus_test.go +++ b/internal/service/events/bus_test.go @@ -44,6 +44,7 @@ func TestAccEventsBus_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "dead_letter_config.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckNoResourceAttr(resourceName, "event_source_name"), + resource.TestCheckResourceAttr(resourceName, "log_config.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrName, busName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), ), @@ -287,6 +288,57 @@ func TestAccEventsBus_deadLetterConfig(t *testing.T) { }) } +func TestAccEventsBus_logConfig(t *testing.T) { + ctx := acctest.Context(t) + var v1 eventbridge.DescribeEventBusOutput + busName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudwatch_event_bus.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBusDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBusConfig_logConfig(busName, "FULL", "TRACE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBusExists(ctx, resourceName, &v1), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "events", fmt.Sprintf("event-bus/%s", busName)), + resource.TestCheckResourceAttr(resourceName, "dead_letter_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), + resource.TestCheckResourceAttr(resourceName, "log_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "log_config.0.include_detail", "FULL"), + resource.TestCheckResourceAttr(resourceName, "log_config.0.level", "TRACE"), + resource.TestCheckNoResourceAttr(resourceName, "event_source_name"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, busName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBusConfig_logConfig(busName, "NONE", "OFF"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBusExists(ctx, resourceName, &v1), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "events", fmt.Sprintf("event-bus/%s", busName)), + resource.TestCheckResourceAttr(resourceName, "dead_letter_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), + resource.TestCheckResourceAttr(resourceName, "log_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "log_config.0.include_detail", "NONE"), + resource.TestCheckResourceAttr(resourceName, "log_config.0.level", "OFF"), + resource.TestCheckNoResourceAttr(resourceName, "event_source_name"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, busName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + }, + }) +} + func testAccCheckBusDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EventsClient(ctx) @@ -523,3 +575,15 @@ resource "aws_cloudwatch_event_bus" "test" { } `, name) } + +func testAccBusConfig_logConfig(name, includeDetail, level string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_bus" "test" { + name = %[1]q + log_config { + include_detail = %[2]q + level = %[3]q + } +} +`, name, includeDetail, level) +} diff --git a/internal/service/events/endpoint.go b/internal/service/events/endpoint.go index efeeabab37e2..61e256162675 100644 --- a/internal/service/events/endpoint.go +++ b/internal/service/events/endpoint.go @@ -167,7 +167,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta an input.RoleArn = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateEndpoint(ctx, input) }, errCodeValidationException, "cannot be assumed by principal") diff --git a/internal/service/events/generate.go b/internal/service/events/generate.go index 859b9818afde..61fecb3cb47b 100644 --- a/internal/service/events/generate.go +++ b/internal/service/events/generate.go @@ -4,6 +4,7 @@ //go:generate go run ../../generate/listpages/main.go -ListOps=ListApiDestinations,ListArchives,ListConnections,ListEventBuses,ListEventSources,ListRules,ListTargetsByRule //go:generate go run ../../generate/tags/main.go -ListTags -ListTagsInIDElem=ResourceARN -ServiceTagsSlice -TagInIDElem=ResourceARN -UpdateTags -CreateTags //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package events diff --git a/internal/service/events/permission.go b/internal/service/events/permission.go index 7f95ad7002b2..67a48e12a836 100644 --- a/internal/service/events/permission.go +++ b/internal/service/events/permission.go @@ -127,7 +127,7 @@ func resourcePermissionRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + policyStatement, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (*permissionPolicyStatement, error) { return findPermissionByTwoPartKey(ctx, conn, eventBusName, statementID) }) @@ -141,8 +141,6 @@ func resourcePermissionRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "reading EventBridge Permission (%s): %s", d.Id(), err) } - policyStatement := outputRaw.(*permissionPolicyStatement) - d.Set(names.AttrAction, policyStatement.Action) if err := d.Set(names.AttrCondition, flattenPermissionPolicyStatementCondition(policyStatement.Condition)); err != nil { return sdkdiag.AppendErrorf(diags, "setting condition: %s", err) diff --git a/internal/service/events/put_events_action.go b/internal/service/events/put_events_action.go new file mode 100644 index 000000000000..aefc90cfc5dc --- /dev/null +++ b/internal/service/events/put_events_action.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package events + +import ( + "context" + "strconv" + + "github.com/aws/aws-sdk-go-v2/service/eventbridge" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @Action(aws_events_put_events, name="Put Events") +// nosemgrep: ci.events-in-func-name -- "PutEvents" matches AWS API operation name (PutEvents). Required for consistent generated/action naming; safe to ignore. +func newPutEventsAction(_ context.Context) (action.ActionWithConfigure, error) { + return &putEventsAction{}, nil +} + +var ( + _ action.Action = (*putEventsAction)(nil) +) + +type putEventsAction struct { + framework.ActionWithModel[putEventsActionModel] +} + +type putEventsActionModel struct { + framework.WithRegionModel + Entry fwtypes.ListNestedObjectValueOf[putEventEntryModel] `tfsdk:"entry"` +} + +type putEventEntryModel struct { + Detail types.String `tfsdk:"detail"` + DetailType types.String `tfsdk:"detail_type"` + EventBusName types.String `tfsdk:"event_bus_name"` + Resources fwtypes.ListValueOf[types.String] `tfsdk:"resources"` + Source types.String `tfsdk:"source"` + Time timetypes.RFC3339 `tfsdk:"time"` +} + +func (a *putEventsAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Sends custom events to Amazon EventBridge so that they can be matched to rules.", + Blocks: map[string]schema.Block{ + "entry": schema.ListNestedBlock{ + Description: "The entry that defines an event in your system.", + CustomType: fwtypes.NewListNestedObjectTypeOf[putEventEntryModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "detail": schema.StringAttribute{ + Description: "A valid JSON string. There is no other schema imposed.", + Optional: true, + }, + "detail_type": schema.StringAttribute{ + Description: "Free-form string used to decide what fields to expect in the event detail.", + Optional: true, + }, + "event_bus_name": schema.StringAttribute{ + Description: "The name or ARN of the event bus to receive the event.", + Optional: true, + }, + names.AttrResources: schema.ListAttribute{ + Description: "AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns.", + CustomType: fwtypes.ListOfStringType, + Optional: true, + }, + names.AttrSource: schema.StringAttribute{ + Description: "The source of the event.", + Required: true, + }, + "time": schema.StringAttribute{ + Description: "The time stamp of the event, per RFC3339.", + Optional: true, + CustomType: timetypes.RFC3339Type{}, + }, + }, + }, + }, + }, + } +} + +func (a *putEventsAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var model putEventsActionModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &model)...) + if resp.Diagnostics.HasError() { + return + } + + conn := a.Meta().EventsClient(ctx) + + tflog.Info(ctx, "Putting events", map[string]any{ + "entry_count": len(model.Entry.Elements()), + }) + + resp.SendProgress(action.InvokeProgressEvent{ + Message: "Putting events to EventBridge...", + }) + + var input eventbridge.PutEventsInput + resp.Diagnostics.Append(fwflex.Expand(ctx, model, &input)...) + if resp.Diagnostics.HasError() { + return + } + + output, err := conn.PutEvents(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + "Putting Events", + "Could not put events: "+err.Error(), + ) + return + } + + if output.FailedEntryCount > 0 { + resp.Diagnostics.AddError( + "Putting Events", + strconv.Itoa(int(output.FailedEntryCount))+" entries failed to be processed", + ) + return + } + + resp.SendProgress(action.InvokeProgressEvent{ + Message: "Events put successfully", + }) + + tflog.Info(ctx, "Put events completed", map[string]any{ + "successful_entries": len(output.Entries), + }) +} diff --git a/internal/service/events/put_events_action_test.go b/internal/service/events/put_events_action_test.go new file mode 100644 index 000000000000..3c51187378d2 --- /dev/null +++ b/internal/service/events/put_events_action_test.go @@ -0,0 +1,386 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package events_test + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/eventbridge" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEventsPutEventsAction_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: testAccCheckBusDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPutEventsActionConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckPutEventsDelivered(ctx, rName, 1), + ), + }, + }, + }) +} + +func TestAccEventsPutEventsAction_multipleEntries(t *testing.T) { + ctx := acctest.Context(t) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: testAccCheckBusDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPutEventsActionConfig_multipleEntries(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckPutEventsDelivered(ctx, rName, 2), + ), + }, + }, + }) +} + +func TestAccEventsPutEventsAction_customBus(t *testing.T) { + ctx := acctest.Context(t) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: testAccCheckBusDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPutEventsActionConfig_customBus(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckPutEventsDelivered(ctx, rName, 1), + ), + }, + }, + }) +} + +// nosemgrep: ci.events-in-func-name -- Verification helper for PutEvents delivery +func testAccCheckPutEventsDelivered(ctx context.Context, rName string, expected int) resource.TestCheckFunc { + return func(s *terraform.State) error { + meta := acctest.Provider.Meta().(*conns.AWSClient) + evConn := meta.EventsClient(ctx) + sqsConn := meta.SQSClient(ctx) + + // Ensure bus exists (sanity) + if _, err := evConn.DescribeEventBus(ctx, &eventbridge.DescribeEventBusInput{Name: &rName}); err != nil { + return fmt.Errorf("event bus %s not found: %w", rName, err) + } + + // Discover queue URL via name convention + queueName := rName + "-events-test" + getOut, err := sqsConn.GetQueueUrl(ctx, &sqs.GetQueueUrlInput{QueueName: &queueName}) + if err != nil { + return fmt.Errorf("getting queue url: %w", err) + } + + deadline := time.Now().Add(2 * time.Minute) + received := 0 + marker := rName + for time.Now().Before(deadline) && received < expected { + // Long poll + msgOut, err := sqsConn.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ + QueueUrl: getOut.QueueUrl, + MaxNumberOfMessages: 10, + WaitTimeSeconds: 10, + }) + if err != nil { + // transient network errors: retry + continue + } + for _, m := range msgOut.Messages { + if m.Body == nil { + continue + } + // EventBridge SQS target wraps the event as JSON; look for marker inside detail + if strings.Contains(*m.Body, marker) { + // Optionally parse to verify structure + var parsed map[string]any + _ = json.Unmarshal([]byte(*m.Body), &parsed) + received++ + } + } + } + + if received < expected { + return fmt.Errorf("expected %d events delivered to SQS, received %d", expected, received) + } + return nil + } +} + +// nosemgrep: ci.events-in-func-name -- Function reflects PutEvents operation naming for consistency. +func testAccPutEventsActionConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_bus" "test" { + name = %[1]q +} + +resource "aws_cloudwatch_event_rule" "test" { + name = %[1]q + event_bus_name = aws_cloudwatch_event_bus.test.name + event_pattern = jsonencode({ + source = ["test.application"] + }) +} + +resource "aws_sqs_queue" "events_target" { + name = "%[1]s-events-test" +} + +resource "aws_sqs_queue_policy" "events_target" { + queue_url = aws_sqs_queue.events_target.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "AllowEventBridgeSendMessage" + Effect = "Allow" + Principal = { Service = "events.amazonaws.com" } + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.events_target.arn + Condition = { + ArnEquals = { "aws:SourceArn" = aws_cloudwatch_event_rule.test.arn } + } + } + ] + }) +} + +resource "aws_cloudwatch_event_target" "test" { + rule = aws_cloudwatch_event_rule.test.name + event_bus_name = aws_cloudwatch_event_bus.test.name + target_id = "sqs" + arn = aws_sqs_queue.events_target.arn +} + +action "aws_events_put_events" "test" { + config { + entry { + source = "test.application" + detail_type = "Test Event" + event_bus_name = aws_cloudwatch_event_bus.test.name + detail = jsonencode({ + marker = %[1]q + action = "test" + }) + } + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [after_create, before_update] + actions = [action.aws_events_put_events.test] + } + } + depends_on = [ + aws_cloudwatch_event_target.test, + aws_sqs_queue_policy.events_target + ] +} +`, rName) +} + +// nosemgrep: ci.events-in-func-name -- Function reflects PutEvents operation naming for consistency. +func testAccPutEventsActionConfig_multipleEntries(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_event_bus" "test" { + name = %[1]q +} + +resource "aws_cloudwatch_event_rule" "test" { + name = %[1]q + event_bus_name = aws_cloudwatch_event_bus.test.name + event_pattern = jsonencode({ + source = ["test.application", "test.orders"] + }) +} + +resource "aws_sqs_queue" "events_target" { + name = "%[1]s-events-test" +} + +resource "aws_sqs_queue_policy" "events_target" { + queue_url = aws_sqs_queue.events_target.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "AllowEventBridgeSendMessage" + Effect = "Allow" + Principal = { Service = "events.amazonaws.com" } + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.events_target.arn + Condition = { + ArnEquals = { "aws:SourceArn" = aws_cloudwatch_event_rule.test.arn } + } + } + ] + }) +} + +resource "aws_cloudwatch_event_target" "test" { + rule = aws_cloudwatch_event_rule.test.name + event_bus_name = aws_cloudwatch_event_bus.test.name + target_id = "sqs" + arn = aws_sqs_queue.events_target.arn +} + +action "aws_events_put_events" "test" { + config { + entry { + source = "test.application" + detail_type = "User Action" + event_bus_name = aws_cloudwatch_event_bus.test.name + detail = jsonencode({ + marker = %[1]q + action = "login" + }) + } + + entry { + source = "test.orders" + detail_type = "Order Created" + event_bus_name = aws_cloudwatch_event_bus.test.name + detail = jsonencode({ + marker = %[1]q + amount = 99.99 + }) + } + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [after_create, before_update] + actions = [action.aws_events_put_events.test] + } + } + depends_on = [ + aws_cloudwatch_event_target.test, + aws_sqs_queue_policy.events_target + ] +} +`, rName) +} + +// nosemgrep: ci.events-in-func-name -- Function reflects PutEvents operation naming for consistency. +func testAccPutEventsActionConfig_customBus(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_cloudwatch_event_bus" "test" { + name = %[1]q +} + +resource "aws_cloudwatch_event_rule" "test" { + name = %[1]q + event_bus_name = aws_cloudwatch_event_bus.test.name + event_pattern = jsonencode({ + source = ["custom.source"] + detail-type = ["Custom Event"] + }) +} + +resource "aws_sqs_queue" "events_target" { + name = "%[1]s-events-test" +} + +resource "aws_sqs_queue_policy" "events_target" { + queue_url = aws_sqs_queue.events_target.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "AllowEventBridgeSendMessage" + Effect = "Allow" + Principal = { Service = "events.amazonaws.com" } + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.events_target.arn + Condition = { + ArnEquals = { "aws:SourceArn" = aws_cloudwatch_event_rule.test.arn } + } + } + ] + }) +} + +resource "aws_cloudwatch_event_target" "test" { + rule = aws_cloudwatch_event_rule.test.name + event_bus_name = aws_cloudwatch_event_bus.test.name + target_id = "sqs" + arn = aws_sqs_queue.events_target.arn +} + +action "aws_events_put_events" "test" { + config { + entry { + source = "custom.source" + detail_type = "Custom Event" + event_bus_name = aws_cloudwatch_event_bus.test.name + time = "2023-01-01T12:00:00Z" + resources = ["arn:${data.aws_partition.current.partition}:s3:::example-bucket"] + detail = jsonencode({ + custom_field = "custom_value" + marker = %[1]q + timestamp = "2023-01-01T12:00:00Z" + }) + } + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [after_create, before_update] + actions = [action.aws_events_put_events.test] + } + } + depends_on = [ + aws_cloudwatch_event_target.test, + aws_sqs_queue_policy.events_target + ] +} +`, rName) +} diff --git a/internal/service/events/rule.go b/internal/service/events/rule.go index dd44eca66c46..efd98c596229 100644 --- a/internal/service/events/rule.go +++ b/internal/service/events/rule.go @@ -33,6 +33,9 @@ import ( // @SDKResource("aws_cloudwatch_event_rule", name="Rule") // @Tags(identifierAttribute="arn") +// @IdentityAttribute("name") +// @Testing(preIdentityVersion="v6.7.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/eventbridge;eventbridge.DescribeRuleOutput") func resourceRule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRuleCreate, @@ -40,10 +43,6 @@ func resourceRule() *schema.Resource { UpdateWithoutTimeout: resourceRuleUpdate, DeleteWithoutTimeout: resourceRuleDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ { @@ -172,7 +171,7 @@ func resourceRuleCreate(ctx context.Context, d *schema.ResourceData, meta any) d const ( timeout = 2 * time.Minute ) - _, err = tfresource.RetryWhenNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findRuleByTwoPartKey(ctx, conn, eventBusName, ruleName) }) @@ -300,7 +299,7 @@ func resourceRuleDelete(ctx context.Context, d *schema.ResourceData, meta any) d timeout = 5 * time.Minute ) log.Printf("[DEBUG] Deleting EventBridge Rule: %s", d.Id()) - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteRule(ctx, input) }, errCodeValidationException, "Rule can't be deleted since it has targets") @@ -316,7 +315,7 @@ func resourceRuleDelete(ctx context.Context, d *schema.ResourceData, meta any) d } func retryPutRule(ctx context.Context, conn *eventbridge.Client, input *eventbridge.PutRuleInput) (string, error) { - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.PutRule(ctx, input) }, errCodeValidationException, "cannot be assumed by principal") diff --git a/internal/service/events/rule_identity_gen_test.go b/internal/service/events/rule_identity_gen_test.go new file mode 100644 index 000000000000..d341a5c36175 --- /dev/null +++ b/internal/service/events/rule_identity_gen_test.go @@ -0,0 +1,309 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package events_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/eventbridge" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEventsRule_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v eventbridge.DescribeRuleOutput + resourceName := "aws_cloudwatch_event_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: testAccCheckRuleDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccEventsRule_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_cloudwatch_event_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccEventsRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v eventbridge.DescribeRuleOutput + resourceName := "aws_cloudwatch_event_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: testAccCheckRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccEventsRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v eventbridge.DescribeRuleOutput + resourceName := "aws_cloudwatch_event_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: testAccCheckRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/events/service_endpoint_resolver_gen.go b/internal/service/events/service_endpoint_resolver_gen.go index cc56d8d1edd2..40bb04a36242 100644 --- a/internal/service/events/service_endpoint_resolver_gen.go +++ b/internal/service/events/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params eventbridge.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up eventbridge endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up eventbridge endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/events/service_endpoints_gen_test.go b/internal/service/events/service_endpoints_gen_test.go index e59fed1a4d41..23cadc5a298b 100644 --- a/internal/service/events/service_endpoints_gen_test.go +++ b/internal/service/events/service_endpoints_gen_test.go @@ -678,7 +678,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/events/service_package.go b/internal/service/events/service_package.go new file mode 100644 index 000000000000..637e9cdc2b0d --- /dev/null +++ b/internal/service/events/service_package.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package events + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/service/eventbridge" + awstypes "github.com/aws/aws-sdk-go-v2/service/eventbridge/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" +) + +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*eventbridge.Options) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + + return []func(*eventbridge.Options){ + func(o *eventbridge.Options) { + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "The requested resource exceeds the maximum number allowed") { + return aws.FalseTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) + }, + } +} diff --git a/internal/service/events/service_package_gen.go b/internal/service/events/service_package_gen.go index 507963e9a56d..082a8622fb80 100644 --- a/internal/service/events/service_package_gen.go +++ b/internal/service/events/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/eventbridge" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newPutEventsAction, + TypeName: "aws_events_put_events", + Name: "Put Events", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{ { @@ -110,13 +120,26 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrName), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTarget, TypeName: "aws_cloudwatch_event_target", Name: "Target", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("event_bus_name", true), + inttypes.StringIdentityAttribute(names.AttrRule, true), + inttypes.StringIdentityAttribute("target_id", true), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: targetImportID{}, + }, }, } } @@ -144,7 +167,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *eventbridge.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/events/sweep.go b/internal/service/events/sweep.go index 5681d2739c49..8d104fdc13ea 100644 --- a/internal/service/events/sweep.go +++ b/internal/service/events/sweep.go @@ -252,7 +252,7 @@ func sweepRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EventsClient(ctx) input := &eventbridge.ListEventBusesInput{} @@ -318,7 +318,7 @@ func sweepTargets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.EventsClient(ctx) input := &eventbridge.ListEventBusesInput{} diff --git a/internal/service/events/tags_gen.go b/internal/service/events/tags_gen.go index 92183449c0f1..096b8db4e583 100644 --- a/internal/service/events/tags_gen.go +++ b/internal/service/events/tags_gen.go @@ -3,8 +3,8 @@ package events import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/eventbridge" awstypes "github.com/aws/aws-sdk-go-v2/service/eventbridge/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *eventbridge.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EventsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *eventbridge.Client, identifier string _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *eventbridge.Client, identifier string _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/events/target.go b/internal/service/events/target.go index 8472c1185ce8..5fcb1977eaf3 100644 --- a/internal/service/events/target.go +++ b/internal/service/events/target.go @@ -29,11 +29,19 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_cloudwatch_event_target", name="Target") +// @IdentityAttribute("event_bus_name") +// @IdentityAttribute("rule") +// @IdentityAttribute("target_id") +// @ImportIDHandler("targetImportID") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/eventbridge/types;types.Target") +// @Testing(importStateIdFunc="testAccTargetImportStateIdFunc") func resourceTarget() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTargetCreate, @@ -41,23 +49,6 @@ func resourceTarget() *schema.Resource { UpdateWithoutTimeout: resourceTargetUpdate, DeleteWithoutTimeout: resourceTargetDelete, - Importer: &schema.ResourceImporter{ - StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - busName, ruleName, targetID, err := targetParseImportID(d.Id()) - if err != nil { - return []*schema.ResourceData{}, err - } - - id := targetCreateResourceID(busName, ruleName, targetID) - d.SetId(id) - d.Set("target_id", targetID) - d.Set(names.AttrRule, ruleName) - d.Set("event_bus_name", busName) - - return []*schema.ResourceData{d}, nil - }, - }, - SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ { @@ -755,52 +746,6 @@ func findTargets(ctx context.Context, conn *eventbridge.Client, input *eventbrid return output, nil } -// Terraform resource IDs for Targets are not parseable as the separator used ("-") is also a valid character in both the rule name and the target ID. -const ( - targetResourceIDSeparator = "-" - targetImportIDSeparator = "/" -) - -func targetCreateResourceID(eventBusName, ruleName, targetID string) string { - var parts []string - - if eventBusName == "" || eventBusName == DefaultEventBusName { - parts = []string{ruleName, targetID} - } else { - parts = []string{eventBusName, ruleName, targetID} - } - - id := strings.Join(parts, targetResourceIDSeparator) - - return id -} - -func targetParseImportID(id string) (string, string, string, error) { - parts := strings.Split(id, targetImportIDSeparator) - - if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return DefaultEventBusName, parts[0], parts[1], nil - } - if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { - return parts[0], parts[1], parts[2], nil - } - if len(parts) > 3 { - iTarget := strings.LastIndex(id, targetImportIDSeparator) - targetID := id[iTarget+1:] - iRule := strings.LastIndex(id[:iTarget], targetImportIDSeparator) - eventBusName := id[:iRule] - ruleName := id[iRule+1 : iTarget] - if eventBusARNPattern.MatchString(eventBusName) && ruleName != "" && targetID != "" { - return eventBusName, ruleName, targetID, nil - } - if partnerEventBusPattern.MatchString(eventBusName) && ruleName != "" && targetID != "" { - return eventBusName, ruleName, targetID, nil - } - } - - return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected EVENTBUSNAME%[2]sRULENAME%[2]sTARGETID or RULENAME%[2]sTARGETID", id, targetImportIDSeparator) -} - func putTargetError(apiObject types.PutTargetsResultEntry) error { return errs.APIError(aws.ToString(apiObject.ErrorCode), aws.ToString(apiObject.ErrorMessage)) } @@ -1528,3 +1473,75 @@ func expandAppSyncParameters(tfList []any) *types.AppSyncParameters { return apiObject } + +// Terraform resource IDs for Targets are not parseable as the separator used ("-") is also a valid character in both the rule name and the target ID. +const ( + targetResourceIDSeparator = "-" + targetImportIDSeparator = "/" +) + +func targetCreateResourceID(eventBusName, ruleName, targetID string) string { + var parts []string + + if eventBusName == "" || eventBusName == DefaultEventBusName { + parts = []string{ruleName, targetID} + } else { + parts = []string{eventBusName, ruleName, targetID} + } + + id := strings.Join(parts, targetResourceIDSeparator) + + return id +} + +func targetParseImportID(id string) (string, string, string, error) { + parts := strings.Split(id, targetImportIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return DefaultEventBusName, parts[0], parts[1], nil + } + if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { + return parts[0], parts[1], parts[2], nil + } + if len(parts) > 3 { + iTarget := strings.LastIndex(id, targetImportIDSeparator) + targetID := id[iTarget+1:] + iRule := strings.LastIndex(id[:iTarget], targetImportIDSeparator) + eventBusName := id[:iRule] + ruleName := id[iRule+1 : iTarget] + if eventBusARNPattern.MatchString(eventBusName) && ruleName != "" && targetID != "" { + return eventBusName, ruleName, targetID, nil + } + if partnerEventBusPattern.MatchString(eventBusName) && ruleName != "" && targetID != "" { + return eventBusName, ruleName, targetID, nil + } + } + + return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected EVENTBUSNAME%[2]sRULENAME%[2]sTARGETID or RULENAME%[2]sTARGETID", id, targetImportIDSeparator) +} + +var _ inttypes.SDKv2ImportID = targetImportID{} + +type targetImportID struct{} + +func (targetImportID) Create(d *schema.ResourceData) string { + eventBusName := d.Get("event_bus_name").(string) + rule := d.Get(names.AttrRule).(string) + targetID := d.Get("target_id").(string) + return targetCreateResourceID(eventBusName, rule, targetID) +} + +func (targetImportID) Parse(id string) (string, map[string]string, error) { + eventBusName, rule, targetID, err := targetParseImportID(id) + if err != nil { + return id, nil, err + } + + results := map[string]string{ + "event_bus_name": eventBusName, + names.AttrRule: rule, + "target_id": targetID, + } + + return targetCreateResourceID(eventBusName, rule, targetID), results, nil +} diff --git a/internal/service/events/target_identity_gen_test.go b/internal/service/events/target_identity_gen_test.go new file mode 100644 index 000000000000..0907673d3211 --- /dev/null +++ b/internal/service/events/target_identity_gen_test.go @@ -0,0 +1,331 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package events_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/eventbridge/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEventsTarget_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Target + resourceName := "aws_cloudwatch_event_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: testAccCheckTargetDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Target/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "event_bus_name": knownvalue.NotNull(), + names.AttrRule: knownvalue.NotNull(), + "target_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("event_bus_name")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrRule)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("target_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Target/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: testAccTargetImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Target/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: testAccTargetImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("event_bus_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("target_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Target/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("event_bus_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("target_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccEventsTarget_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_cloudwatch_event_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Target/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "event_bus_name": knownvalue.NotNull(), + names.AttrRule: knownvalue.NotNull(), + "target_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("event_bus_name")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrRule)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("target_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Target/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccTargetImportStateIdFunc), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Target/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccTargetImportStateIdFunc), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("event_bus_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("target_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Target/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("event_bus_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("target_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccEventsTarget_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Target + resourceName := "aws_cloudwatch_event_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: testAccCheckTargetDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Target/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Target/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "event_bus_name": knownvalue.NotNull(), + names.AttrRule: knownvalue.NotNull(), + "target_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("event_bus_name")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrRule)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("target_id")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccEventsTarget_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Target + resourceName := "aws_cloudwatch_event_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EventsServiceID), + CheckDestroy: testAccCheckTargetDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Target/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Target/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/events/testdata/Rule/basic/main_gen.tf b/internal/service/events/testdata/Rule/basic/main_gen.tf new file mode 100644 index 000000000000..eb4efbf1045e --- /dev/null +++ b/internal/service/events/testdata/Rule/basic/main_gen.tf @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_event_rule" "test" { + name = var.rName + schedule_expression = "rate(1 hour)" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/events/testdata/Rule/basic_v6.7.0/main_gen.tf b/internal/service/events/testdata/Rule/basic_v6.7.0/main_gen.tf new file mode 100644 index 000000000000..acdd22dc8c42 --- /dev/null +++ b/internal/service/events/testdata/Rule/basic_v6.7.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_event_rule" "test" { + name = var.rName + schedule_expression = "rate(1 hour)" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.7.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/events/testdata/Rule/region_override/main_gen.tf b/internal/service/events/testdata/Rule/region_override/main_gen.tf new file mode 100644 index 000000000000..bd34d4d86196 --- /dev/null +++ b/internal/service/events/testdata/Rule/region_override/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_event_rule" "test" { + region = var.region + + name = var.rName + schedule_expression = "rate(1 hour)" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/events/testdata/Target/basic/main_gen.tf b/internal/service/events/testdata/Target/basic/main_gen.tf new file mode 100644 index 000000000000..f328680d2328 --- /dev/null +++ b/internal/service/events/testdata/Target/basic/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_event_target" "test" { + rule = aws_cloudwatch_event_rule.test.name + target_id = var.rName + arn = aws_sns_topic.test.arn +} + +resource "aws_cloudwatch_event_rule" "test" { + name = var.rName + schedule_expression = "rate(1 hour)" +} + +resource "aws_sns_topic" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/events/testdata/Target/basic_v6.9.0/main_gen.tf b/internal/service/events/testdata/Target/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..2cb4ddab148d --- /dev/null +++ b/internal/service/events/testdata/Target/basic_v6.9.0/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_event_target" "test" { + rule = aws_cloudwatch_event_rule.test.name + target_id = var.rName + arn = aws_sns_topic.test.arn +} + +resource "aws_cloudwatch_event_rule" "test" { + name = var.rName + schedule_expression = "rate(1 hour)" +} + +resource "aws_sns_topic" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/events/testdata/Target/region_override/main_gen.tf b/internal/service/events/testdata/Target/region_override/main_gen.tf new file mode 100644 index 000000000000..16d81f9191f2 --- /dev/null +++ b/internal/service/events/testdata/Target/region_override/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_cloudwatch_event_target" "test" { + region = var.region + + rule = aws_cloudwatch_event_rule.test.name + target_id = var.rName + arn = aws_sns_topic.test.arn +} + +resource "aws_cloudwatch_event_rule" "test" { + region = var.region + + name = var.rName + schedule_expression = "rate(1 hour)" +} + +resource "aws_sns_topic" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/events/testdata/tmpl/rule_tags.gtpl b/internal/service/events/testdata/tmpl/rule_tags.gtpl new file mode 100644 index 000000000000..de7064c5419f --- /dev/null +++ b/internal/service/events/testdata/tmpl/rule_tags.gtpl @@ -0,0 +1,6 @@ +resource "aws_cloudwatch_event_rule" "test" { +{{- template "region" }} + name = var.rName + schedule_expression = "rate(1 hour)" +{{- template "tags" }} +} diff --git a/internal/service/events/testdata/tmpl/target_basic.gtpl b/internal/service/events/testdata/tmpl/target_basic.gtpl new file mode 100644 index 000000000000..7db7378d26b3 --- /dev/null +++ b/internal/service/events/testdata/tmpl/target_basic.gtpl @@ -0,0 +1,18 @@ +resource "aws_cloudwatch_event_target" "test" { +{{- template "region" }} + rule = aws_cloudwatch_event_rule.test.name + target_id = var.rName + arn = aws_sns_topic.test.arn +} + +resource "aws_cloudwatch_event_rule" "test" { +{{- template "region" }} + name = var.rName + schedule_expression = "rate(1 hour)" +{{- template "tags" }} +} + +resource "aws_sns_topic" "test" { +{{- template "region" }} + name = var.rName +} diff --git a/internal/service/evidently/service_endpoint_resolver_gen.go b/internal/service/evidently/service_endpoint_resolver_gen.go index efe21cf9bf57..8d6e1ea99011 100644 --- a/internal/service/evidently/service_endpoint_resolver_gen.go +++ b/internal/service/evidently/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params evidently.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up evidently endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up evidently endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/evidently/service_endpoints_gen_test.go b/internal/service/evidently/service_endpoints_gen_test.go index 6e07735dcaa2..8627774293b1 100644 --- a/internal/service/evidently/service_endpoints_gen_test.go +++ b/internal/service/evidently/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/evidently/service_package_gen.go b/internal/service/evidently/service_package_gen.go index c0bbc8e2291b..efdbc4bd3a09 100644 --- a/internal/service/evidently/service_package_gen.go +++ b/internal/service/evidently/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/evidently" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -94,7 +93,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *evidently.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/evidently/tags_gen.go b/internal/service/evidently/tags_gen.go index 03cc14d59fa6..e357592539e0 100644 --- a/internal/service/evidently/tags_gen.go +++ b/internal/service/evidently/tags_gen.go @@ -3,8 +3,8 @@ package evidently import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/evidently" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -66,7 +66,7 @@ func updateTags(ctx context.Context, conn *evidently.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -81,7 +81,7 @@ func updateTags(ctx context.Context, conn *evidently.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/evs/service_endpoint_resolver_gen.go b/internal/service/evs/service_endpoint_resolver_gen.go index 76c972cc60a5..e8b90109066b 100644 --- a/internal/service/evs/service_endpoint_resolver_gen.go +++ b/internal/service/evs/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params evs.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up evs endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up evs endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/evs/service_endpoints_gen_test.go b/internal/service/evs/service_endpoints_gen_test.go index a91c5455a20c..742da57609e6 100644 --- a/internal/service/evs/service_endpoints_gen_test.go +++ b/internal/service/evs/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/evs/service_package_gen.go b/internal/service/evs/service_package_gen.go index e55702097fc3..61c777a16355 100644 --- a/internal/service/evs/service_package_gen.go +++ b/internal/service/evs/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/evs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *evs.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/evs/tags_gen.go b/internal/service/evs/tags_gen.go index 5c65b64f7d34..506d56b7b544 100644 --- a/internal/service/evs/tags_gen.go +++ b/internal/service/evs/tags_gen.go @@ -3,8 +3,8 @@ package evs import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/evs" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *evs.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).EVSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *evs.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *evs.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 64234ca88835..5b24a999a2ec 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -243,7 +243,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta any) d.Set(names.AttrDescription, out.Description) d.Set("created_timestamp", out.CreatedTimestamp.String()) d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) - d.Set(names.AttrAvailabilityZones, aws.StringSlice(out.AvailabilityZoneIds)) + d.Set(names.AttrAvailabilityZones, out.AvailabilityZoneIds) if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) diff --git a/internal/service/finspace/service_endpoint_resolver_gen.go b/internal/service/finspace/service_endpoint_resolver_gen.go index 1080bd32b970..a190cd2a7fdd 100644 --- a/internal/service/finspace/service_endpoint_resolver_gen.go +++ b/internal/service/finspace/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params finspace.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up finspace endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up finspace endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/finspace/service_endpoints_gen_test.go b/internal/service/finspace/service_endpoints_gen_test.go index 9ddc47c151e4..dba751ffd761 100644 --- a/internal/service/finspace/service_endpoints_gen_test.go +++ b/internal/service/finspace/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index a88d15fbea9f..7c4dc08ba161 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -121,7 +120,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *finspace.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/finspace/sweep.go b/internal/service/finspace/sweep.go index ae4dd1dc21e9..f0befa794609 100644 --- a/internal/service/finspace/sweep.go +++ b/internal/service/finspace/sweep.go @@ -26,7 +26,7 @@ func sweepKxEnvironments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.FinSpaceClient(ctx) input := &finspace.ListKxEnvironmentsInput{} diff --git a/internal/service/finspace/tags_gen.go b/internal/service/finspace/tags_gen.go index 7309417e0b6f..5c46c8856a34 100644 --- a/internal/service/finspace/tags_gen.go +++ b/internal/service/finspace/tags_gen.go @@ -3,8 +3,8 @@ package finspace import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *finspace.Client, identifier string, opt output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).FinSpaceClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -108,7 +108,7 @@ func updateTags(ctx context.Context, conn *finspace.Client, identifier string, o _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -123,7 +123,7 @@ func updateTags(ctx context.Context, conn *finspace.Client, identifier string, o _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 99b3bd02440e..bdddc5c784b1 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -858,6 +858,12 @@ func resourceDeliveryStream() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "append_only": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, "buffering_interval": { Type: schema.TypeInt, Optional: true, @@ -1534,7 +1540,7 @@ func resourceDeliveryStreamCreate(ctx context.Context, d *schema.ResourceData, m } } - _, err := retryDeliveryStreamOp(ctx, func() (any, error) { + _, err := retryDeliveryStreamOp(ctx, func(ctx context.Context) (any, error) { return conn.CreateDeliveryStream(ctx, input) }) @@ -1730,7 +1736,7 @@ func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, m } } - _, err := retryDeliveryStreamOp(ctx, func() (any, error) { + _, err := retryDeliveryStreamOp(ctx, func(ctx context.Context) (any, error) { return conn.UpdateDestination(ctx, input) }) @@ -1802,7 +1808,7 @@ func resourceDeliveryStreamDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func retryDeliveryStreamOp(ctx context.Context, f func() (any, error)) (any, error) { +func retryDeliveryStreamOp(ctx context.Context, f func(context.Context) (any, error)) (any, error) { return tfresource.RetryWhen(ctx, propagationTimeout, f, func(err error) (bool, error) { @@ -2543,6 +2549,10 @@ func expandIcebergDestinationConfiguration(tfMap map[string]any) *types.IcebergD S3Configuration: expandS3DestinationConfiguration(tfMap["s3_configuration"].([]any)), } + if v, ok := tfMap["append_only"].(bool); ok && v { + apiObject.AppendOnly = aws.Bool(v) + } + if _, ok := tfMap["cloudwatch_logging_options"]; ok { apiObject.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(tfMap) } @@ -2576,6 +2586,10 @@ func expandIcebergDestinationUpdate(tfMap map[string]any) *types.IcebergDestinat RoleARN: aws.String(roleARN), } + if v, ok := tfMap["append_only"].(bool); ok && v { + apiObject.AppendOnly = aws.Bool(v) + } + if catalogARN, ok := tfMap["catalog_arn"].(string); ok { apiObject.CatalogConfiguration = &types.CatalogConfiguration{ CatalogARN: aws.String(catalogARN), @@ -4243,6 +4257,7 @@ func flattenIcebergDestinationDescription(apiObject *types.IcebergDestinationDes } tfMap := map[string]any{ + "append_only": aws.ToBool(apiObject.AppendOnly), "catalog_arn": aws.ToString(apiObject.CatalogConfiguration.CatalogARN), "s3_configuration": flattenS3DestinationDescription(apiObject.S3DestinationDescription), names.AttrRoleARN: aws.ToString(apiObject.RoleARN), diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index e0528bd75171..f880078bd0cd 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -16,6 +16,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/lambda" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -1054,6 +1055,7 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_readFromTimestamp(t *testing.T) { } func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { + // In main test account: // "InvalidArgumentException: Role ... is not authorized to perform: glue:GetTable for the given table or the table does not exist." acctest.Skip(t, "Unresolvable Glue permission issue") @@ -1076,6 +1078,7 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.role_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.bucket_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.role_arn"), + resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.append_only", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_interval", "300"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_size", "5"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.cloudwatch_logging_options.#", "1"), @@ -1090,6 +1093,11 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.retry_options.#", "0"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.s3_backup_mode", "FailedDataOnly"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, { ResourceName: resourceName, @@ -1104,6 +1112,7 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.role_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.bucket_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.role_arn"), + resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.append_only", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_interval", "900"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_size", "100"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.cloudwatch_logging_options.#", "1"), @@ -1117,6 +1126,11 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.processing_configuration.0.enabled", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.s3_backup_mode.#", "0"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, { Config: testAccDeliveryStream_icebergUpdatesMetadataProcessor(rName), @@ -1126,6 +1140,7 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.role_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.bucket_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.role_arn"), + resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.append_only", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_interval", "300"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_size", "5"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.cloudwatch_logging_options.#", "1"), @@ -1152,6 +1167,11 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.retry_options.#", "0"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.s3_backup_mode", "FailedDataOnly"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, }, { Config: testAccDeliveryStream_icebergUpdatesLambdaProcessor(rName), @@ -1160,6 +1180,7 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.role_arn"), resource.TestCheckResourceAttrSet(resourceName, "iceberg_configuration.0.s3_configuration.0.bucket_arn"), + resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.append_only", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_interval", "300"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.buffering_size", "5"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.cloudwatch_logging_options.#", "1"), @@ -1183,6 +1204,64 @@ func TestAccFirehoseDeliveryStream_icebergUpdates(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.retry_options.#", "0"), resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.s3_backup_mode.#", "0"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), // Changes to destination_table_configuration and append_only + }, + }, + }, + }, + }) +} + +func TestAccFirehoseDeliveryStream_icebergUpgradeV6_7_0(t *testing.T) { + // In main test account: + // "InvalidArgumentException: Role ... is not authorized to perform: glue:GetTable for the given table or the table does not exist." + acctest.Skip(t, "Unresolvable Glue permission issue") + + ctx := acctest.Context(t) + var stream types.DeliveryStreamDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_kinesis_firehose_delivery_stream.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseServiceID), + CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.7.0", + }, + }, + Config: testAccDeliveryStream_iceberg(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDeliveryStreamExists(ctx, resourceName, &stream), + resource.TestCheckNoResourceAttr(resourceName, "iceberg_configuration.0.append_only"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccDeliveryStream_iceberg(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDeliveryStreamExists(ctx, resourceName, &stream), + resource.TestCheckResourceAttr(resourceName, "iceberg_configuration.0.append_only", acctest.CtFalse), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, }, }, }) @@ -4284,6 +4363,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { role_arn = aws_iam_role.firehose.arn s3_backup_mode = "FailedDataOnly" catalog_arn = "arn:${data.aws_partition.current.partition}:glue:${data.aws_region.current.region}:${data.aws_caller_identity.current.account_id}:catalog" + append_only = true s3_configuration { bucket_arn = aws_s3_bucket.bucket.arn diff --git a/internal/service/firehose/service_endpoint_resolver_gen.go b/internal/service/firehose/service_endpoint_resolver_gen.go index 87ec0ff2b925..1ec4e83c7dc0 100644 --- a/internal/service/firehose/service_endpoint_resolver_gen.go +++ b/internal/service/firehose/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params firehose.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up firehose endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up firehose endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/firehose/service_endpoints_gen_test.go b/internal/service/firehose/service_endpoints_gen_test.go index 59f9b0eca523..a5e5a5925cd0 100644 --- a/internal/service/firehose/service_endpoints_gen_test.go +++ b/internal/service/firehose/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/firehose/service_package_gen.go b/internal/service/firehose/service_package_gen.go index 964f32673abc..d5b958c56e48 100644 --- a/internal/service/firehose/service_package_gen.go +++ b/internal/service/firehose/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/firehose" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -74,7 +73,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *firehose.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/firehose/sweep.go b/internal/service/firehose/sweep.go index fdae1adab0f4..0147cf125105 100644 --- a/internal/service/firehose/sweep.go +++ b/internal/service/firehose/sweep.go @@ -25,7 +25,7 @@ func sweepDeliveryStreams(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.FirehoseClient(ctx) input := &firehose.ListDeliveryStreamsInput{} diff --git a/internal/service/firehose/tags_gen.go b/internal/service/firehose/tags_gen.go index 5b2d6fc8be6d..57be4bd103cc 100644 --- a/internal/service/firehose/tags_gen.go +++ b/internal/service/firehose/tags_gen.go @@ -3,8 +3,8 @@ package firehose import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/firehose" awstypes "github.com/aws/aws-sdk-go-v2/service/firehose/types" @@ -49,7 +49,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).FirehoseClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -127,7 +127,7 @@ func updateTags(ctx context.Context, conn *firehose.Client, identifier string, o _, err := conn.UntagDeliveryStream(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -142,7 +142,7 @@ func updateTags(ctx context.Context, conn *firehose.Client, identifier string, o _, err := conn.TagDeliveryStream(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/fis/service_endpoint_resolver_gen.go b/internal/service/fis/service_endpoint_resolver_gen.go index c989684206d9..4fd579bf8bae 100644 --- a/internal/service/fis/service_endpoint_resolver_gen.go +++ b/internal/service/fis/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params fis.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up fis endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up fis endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/fis/service_endpoints_gen_test.go b/internal/service/fis/service_endpoints_gen_test.go index 0f8d323b12be..988e8557ed24 100644 --- a/internal/service/fis/service_endpoints_gen_test.go +++ b/internal/service/fis/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/fis/service_package_gen.go b/internal/service/fis/service_package_gen.go index 3d456785620a..248151156c33 100644 --- a/internal/service/fis/service_package_gen.go +++ b/internal/service/fis/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/fis" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -72,7 +71,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *fis.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/fis/tags_gen.go b/internal/service/fis/tags_gen.go index 528cee32cc7e..78943fdb0351 100644 --- a/internal/service/fis/tags_gen.go +++ b/internal/service/fis/tags_gen.go @@ -3,8 +3,8 @@ package fis import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fis" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *fis.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).FISClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *fis.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *fis.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/fms/policy.go b/internal/service/fms/policy.go index 30738fd23bff..bb7d9abeb619 100644 --- a/internal/service/fms/policy.go +++ b/internal/service/fms/policy.go @@ -334,7 +334,7 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 1 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InternalErrorException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.InternalErrorException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.PutPolicy(ctx, input) }) @@ -412,7 +412,7 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 1 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.InternalErrorException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InternalErrorException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.PutPolicy(ctx, input) }) diff --git a/internal/service/fms/policy_tags_gen_test.go b/internal/service/fms/policy_tags_gen_test.go index 255c6107ee40..818779db6075 100644 --- a/internal/service/fms/policy_tags_gen_test.go +++ b/internal/service/fms/policy_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -47,10 +46,11 @@ func testAccFMSPolicy_tagsSerial(t *testing.T) { func testAccFMSPolicy_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -240,10 +240,11 @@ func testAccFMSPolicy_tags(t *testing.T) { func testAccFMSPolicy_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -309,10 +310,11 @@ func testAccFMSPolicy_tags_null(t *testing.T) { func testAccFMSPolicy_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -374,10 +376,11 @@ func testAccFMSPolicy_tags_EmptyMap(t *testing.T) { func testAccFMSPolicy_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -457,10 +460,11 @@ func testAccFMSPolicy_tags_AddOnUpdate(t *testing.T) { func testAccFMSPolicy_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -551,10 +555,11 @@ func testAccFMSPolicy_tags_EmptyTag_OnCreate(t *testing.T) { func testAccFMSPolicy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -693,10 +698,11 @@ func testAccFMSPolicy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccFMSPolicy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -784,10 +790,11 @@ func testAccFMSPolicy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -976,10 +983,11 @@ func testAccFMSPolicy_tags_DefaultTags_providerOnly(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1144,10 +1152,11 @@ func testAccFMSPolicy_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1328,10 +1337,11 @@ func testAccFMSPolicy_tags_DefaultTags_overlapping(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1420,10 +1430,11 @@ func testAccFMSPolicy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1511,10 +1522,11 @@ func testAccFMSPolicy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1578,10 +1590,11 @@ func testAccFMSPolicy_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1637,10 +1650,11 @@ func testAccFMSPolicy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func testAccFMSPolicy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1701,10 +1715,11 @@ func testAccFMSPolicy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) func testAccFMSPolicy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1765,10 +1780,11 @@ func testAccFMSPolicy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing. func testAccFMSPolicy_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1822,10 +1838,11 @@ func testAccFMSPolicy_tags_ComputedTag_OnCreate(t *testing.T) { func testAccFMSPolicy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1921,10 +1938,11 @@ func testAccFMSPolicy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccFMSPolicy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -2010,10 +2028,11 @@ func testAccFMSPolicy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccFMSPolicy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -2171,10 +2190,11 @@ func testAccFMSPolicy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccFMSPolicy_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_fms_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), diff --git a/internal/service/fms/resource_set_tags_gen_test.go b/internal/service/fms/resource_set_tags_gen_test.go index 78f90fb07567..58971f3d45e3 100644 --- a/internal/service/fms/resource_set_tags_gen_test.go +++ b/internal/service/fms/resource_set_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/fms" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -48,11 +47,12 @@ func testAccFMSResourceSet_tagsSerial(t *testing.T) { func testAccFMSResourceSet_tags(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -230,11 +230,12 @@ func testAccFMSResourceSet_tags(t *testing.T) { func testAccFMSResourceSet_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -292,11 +293,12 @@ func testAccFMSResourceSet_tags_null(t *testing.T) { func testAccFMSResourceSet_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -342,11 +344,12 @@ func testAccFMSResourceSet_tags_EmptyMap(t *testing.T) { func testAccFMSResourceSet_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -422,11 +425,12 @@ func testAccFMSResourceSet_tags_AddOnUpdate(t *testing.T) { func testAccFMSResourceSet_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -512,11 +516,12 @@ func testAccFMSResourceSet_tags_EmptyTag_OnCreate(t *testing.T) { func testAccFMSResourceSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -651,11 +656,12 @@ func testAccFMSResourceSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccFMSResourceSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -741,11 +747,12 @@ func testAccFMSResourceSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -922,11 +929,12 @@ func testAccFMSResourceSet_tags_DefaultTags_providerOnly(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1082,11 +1090,12 @@ func testAccFMSResourceSet_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1258,11 +1267,12 @@ func testAccFMSResourceSet_tags_DefaultTags_overlapping(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1348,11 +1358,12 @@ func testAccFMSResourceSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1437,11 +1448,12 @@ func testAccFMSResourceSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1503,11 +1515,12 @@ func testAccFMSResourceSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1561,11 +1574,12 @@ func testAccFMSResourceSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func testAccFMSResourceSet_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1630,11 +1644,12 @@ func testAccFMSResourceSet_tags_DefaultTags_nullOverlappingResourceTag(t *testin func testAccFMSResourceSet_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1701,11 +1716,12 @@ func testAccFMSResourceSet_tags_DefaultTags_nullNonOverlappingResourceTag(t *tes func testAccFMSResourceSet_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1756,11 +1772,12 @@ func testAccFMSResourceSet_tags_ComputedTag_OnCreate(t *testing.T) { func testAccFMSResourceSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1853,11 +1870,12 @@ func testAccFMSResourceSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccFMSResourceSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -1940,11 +1958,12 @@ func testAccFMSResourceSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccFMSResourceSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), @@ -2102,11 +2121,12 @@ func testAccFMSResourceSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccFMSResourceSet_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v fms.GetResourceSetOutput resourceName := "aws_fms_resource_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), CheckDestroy: testAccCheckResourceSetDestroy(ctx), diff --git a/internal/service/fms/service_endpoint_resolver_gen.go b/internal/service/fms/service_endpoint_resolver_gen.go index 39ff48b9224b..3d4467814e4c 100644 --- a/internal/service/fms/service_endpoint_resolver_gen.go +++ b/internal/service/fms/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params fms.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up fms endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up fms endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/fms/service_endpoints_gen_test.go b/internal/service/fms/service_endpoints_gen_test.go index 3b461b6f7488..fd1bfff38dad 100644 --- a/internal/service/fms/service_endpoints_gen_test.go +++ b/internal/service/fms/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/fms/service_package.go b/internal/service/fms/service_package.go index d665b7225ce4..d0402b920398 100644 --- a/internal/service/fms/service_package.go +++ b/internal/service/fms/service_package.go @@ -10,26 +10,37 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/fms" awstypes "github.com/aws/aws-sdk-go-v2/service/fms/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*fms.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*fms.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*fms.Options){ func(o *fms.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - // Acceptance testing creates and deletes resources in quick succession. - // The FMS onboarding process into Organizations is opaque to consumers. - // Since we cannot reasonably check this status before receiving the error, - // set the operation as retryable. - if errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently onboarding with AWS Firewall Manager and cannot be offboarded") || - errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently offboarding with AWS Firewall Manager. Please submit onboard request after offboarded") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + // Acceptance testing creates and deletes resources in quick succession. + // The FMS onboarding process into Organizations is opaque to consumers. + // Since we cannot reasonably check this status before receiving the error, + // set the operation as retryable. + if errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently onboarding with AWS Firewall Manager and cannot be offboarded") || + errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently offboarding with AWS Firewall Manager. Please submit onboard request after offboarded") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/fms/service_package_gen.go b/internal/service/fms/service_package_gen.go index e3b4f630abfe..6b1ce12aa184 100644 --- a/internal/service/fms/service_package_gen.go +++ b/internal/service/fms/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/fms" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -83,7 +82,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *fms.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/fms/sweep.go b/internal/service/fms/sweep.go index b681f0856952..0413701c8c12 100644 --- a/internal/service/fms/sweep.go +++ b/internal/service/fms/sweep.go @@ -37,7 +37,7 @@ func sweepAdminAccount(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.FMSClient(ctx) diff --git a/internal/service/fms/tags_gen.go b/internal/service/fms/tags_gen.go index f0d045e4b4d6..7fc7d55132c1 100644 --- a/internal/service/fms/tags_gen.go +++ b/internal/service/fms/tags_gen.go @@ -3,8 +3,8 @@ package fms import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fms" awstypes "github.com/aws/aws-sdk-go-v2/service/fms/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *fms.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).FMSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *fms.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *fms.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/fsx/exports_test.go b/internal/service/fsx/exports_test.go index 1b3ce08314cd..3fd67effaa0c 100644 --- a/internal/service/fsx/exports_test.go +++ b/internal/service/fsx/exports_test.go @@ -15,6 +15,7 @@ var ( ResourceOpenZFSFileSystem = resourceOpenZFSFileSystem ResourceOpenZFSSnapshot = resourceOpenZFSSnapshot ResourceOpenZFSVolume = resourceOpenZFSVolume + ResourceS3AccessPointAttachment = newS3AccessPointAttachmentResource FindBackupByID = findBackupByID FindDataRepositoryAssociationByID = findDataRepositoryAssociationByID @@ -24,6 +25,7 @@ var ( FindONTAPVolumeByID = findONTAPVolumeByID FindOpenZFSFileSystemByID = findOpenZFSFileSystemByID FindOpenZFSVolumeByID = findOpenZFSVolumeByID + FindS3AccessPointAttachmentByName = findS3AccessPointAttachmentByName FindStorageVirtualMachineByID = findStorageVirtualMachineByID FindSnapshotByID = findSnapshotByID FindWindowsFileSystemByID = findWindowsFileSystemByID diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index a2b617a4bf83..f094ea32642d 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -108,9 +108,8 @@ func resourceLustreFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrSize: { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(32, 131072), + Type: schema.TypeInt, + Optional: true, }, "sizing_mode": { Type: schema.TypeString, @@ -343,6 +342,7 @@ func resourceLustreFileSystem() *schema.Resource { CustomizeDiff: customdiff.Sequence( resourceLustreFileSystemStorageCapacityCustomizeDiff, resourceLustreFileSystemMetadataConfigCustomizeDiff, + resourceLustreFileSystemDataReadCacheConfigurationCustomizeDiff, ), } } @@ -405,6 +405,34 @@ func resourceLustreFileSystemMetadataConfigCustomizeDiff(_ context.Context, d *s return nil } +func resourceLustreFileSystemDataReadCacheConfigurationCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta any) error { + if v, ok := d.Get(names.AttrStorageType).(string); ok && v == string(awstypes.StorageTypeIntelligentTiering) { + var throughputCapacity int + if v, ok := d.Get("throughput_capacity").(int); ok && v != 0 { + throughputCapacity = v + } else { + return fmt.Errorf("Validation Error: ThroughputCapacity is a required parameter for Lustre file systems with StorageType %s", awstypes.StorageTypeIntelligentTiering) + } + + if v, ok := d.Get("data_read_cache_configuration").([]any); ok && len(v) > 0 && v[0] != nil { + config := v[0].(map[string]any) + + if sizingMode, ok := config["sizing_mode"].(string); ok && sizingMode == string(awstypes.LustreReadCacheSizingModeUserProvisioned) { + if size, ok := config[names.AttrSize].(int); ok && size > 0 { + factor := throughputCapacity / 4000 + minSize := 32 * factor + maxSize := 131072 * factor + if size < minSize || size > maxSize { + return fmt.Errorf("File systems with throughput capacity of %d MB/s support a minimum read cache size of %d GiB and maximum read cache size of %d GiB", throughputCapacity, minSize, maxSize) + } + } + } + } + } + + return nil +} + func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxClient(ctx) @@ -632,9 +660,35 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxClient(ctx) + updated := false + // First, update the metadata configuration if it has changed. + // Sometimes it is necessary to increase IOPS before increasing storage_capacity. + if d.HasChange("metadata_configuration") { + input := &fsx.UpdateFileSystemInput{ + ClientRequestToken: aws.String(id.UniqueId()), + FileSystemId: aws.String(d.Id()), + LustreConfiguration: &awstypes.UpdateFileSystemLustreConfiguration{ + MetadataConfiguration: expandLustreMetadataUpdateConfiguration(d.Get("metadata_configuration").([]any)), + }, + } + + startTime := time.Now() + _, err := conn.UpdateFileSystem(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating FSX for Lustre File System (%s) metadata_configuration: %s", d.Id(), err) + } + + if _, err := waitFileSystemUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) metadata_configuration update: %s", d.Id(), err) + } + updated = true + } + if d.HasChangesExcept( "final_backup_tags", "skip_final_backup", + "metadata_configuration", names.AttrTags, names.AttrTagsAll, ) { @@ -668,10 +722,6 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, input.LustreConfiguration.LogConfiguration = expandLustreLogCreateConfiguration(d.Get("log_configuration").([]any)) } - if d.HasChange("metadata_configuration") { - input.LustreConfiguration.MetadataConfiguration = expandLustreMetadataUpdateConfiguration(d.Get("metadata_configuration").([]any)) - } - if d.HasChange("per_unit_storage_throughput") { input.LustreConfiguration.PerUnitStorageThroughput = aws.Int32(int32(d.Get("per_unit_storage_throughput").(int))) } @@ -702,7 +752,10 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, if _, err := waitFileSystemUpdated(ctx, conn, d.Id(), startTime, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) update: %s", d.Id(), err) } + updated = true + } + if updated { if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, err) } diff --git a/internal/service/fsx/lustre_file_system_test.go b/internal/service/fsx/lustre_file_system_test.go index f1f80f79d18d..b12f869ee025 100644 --- a/internal/service/fsx/lustre_file_system_test.go +++ b/internal/service/fsx/lustre_file_system_test.go @@ -883,7 +883,15 @@ func TestAccFSxLustreFileSystem_intelligentTiering(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_intelligentTiering(rName), + Config: testAccLustreFileSystemConfig_intelligentTiering(rName, 4000, 31), + ExpectError: regexache.MustCompile("File systems with throughput capacity of 4000 MB/s support a minimum read cache size of 32 GiB and maximum read cache size of 131072 GiB"), + }, + { + Config: testAccLustreFileSystemConfig_intelligentTiering(rName, 8000, 32), + ExpectError: regexache.MustCompile("File systems with throughput capacity of 8000 MB/s support a minimum read cache size of 64 GiB and maximum read cache size of 262144 GiB"), + }, + { + Config: testAccLustreFileSystemConfig_intelligentTiering(rName, 4000, 32), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "fsx", regexache.MustCompile(`file-system/fs-.+`)), @@ -1002,7 +1010,7 @@ func TestAccFSxLustreFileSystem_metadataConfig(t *testing.T) { ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, }, { - Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500), + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -1028,7 +1036,7 @@ func TestAccFSxLustreFileSystem_metadataConfig_increase(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500), + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "metadata_configuration.#", "1"), @@ -1043,7 +1051,7 @@ func TestAccFSxLustreFileSystem_metadataConfig_increase(t *testing.T) { ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, }, { - Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 3000), + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 3000, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -1069,7 +1077,7 @@ func TestAccFSxLustreFileSystem_metadataConfig_decrease(t *testing.T) { CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 3000), + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 3000, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "metadata_configuration.#", "1"), @@ -1084,7 +1092,7 @@ func TestAccFSxLustreFileSystem_metadataConfig_decrease(t *testing.T) { ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, }, { - Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500), + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500, 1200), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckLustreFileSystemRecreated(&filesystem1, &filesystem2), @@ -1097,6 +1105,44 @@ func TestAccFSxLustreFileSystem_metadataConfig_decrease(t *testing.T) { }) } +func TestAccFSxLustreFileSystem_metadataConfig_increaseWithStorageCapacity(t *testing.T) { + ctx := acctest.Context(t) + var filesystem1, filesystem2 awstypes.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 1500, 1200), + Check: resource.ComposeTestCheckFunc( + testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "metadata_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "metadata_configuration.0.mode", "USER_PROVISIONED"), + resource.TestCheckResourceAttr(resourceName, "metadata_configuration.0.iops", "1500"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), + ), + }, + { + // When storage_capacity is increased to 2400, IOPS must be increased to at least 3000. + Config: testAccLustreFileSystemConfig_metadata_iops(rName, "USER_PROVISIONED", 3000, 2400), + Check: resource.ComposeTestCheckFunc( + testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "metadata_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "metadata_configuration.0.mode", "USER_PROVISIONED"), + resource.TestCheckResourceAttr(resourceName, "metadata_configuration.0.iops", "3000"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "2400"), + ), + }, + }, + }) +} + func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { ctx := acctest.Context(t) var filesystem awstypes.FileSystem @@ -2007,10 +2053,10 @@ resource "aws_fsx_lustre_file_system" "test" { `, rName, mode)) } -func testAccLustreFileSystemConfig_metadata_iops(rName, mode string, iops int) string { +func testAccLustreFileSystemConfig_metadata_iops(rName, mode string, iops, storageCapacity int) string { return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { - storage_capacity = 1200 + storage_capacity = %[4]d subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_2" per_unit_storage_throughput = 125 @@ -2024,7 +2070,7 @@ resource "aws_fsx_lustre_file_system" "test" { Name = %[1]q } } -`, rName, mode, iops)) +`, rName, mode, iops, storageCapacity)) } func testAccLustreFileSystemConfig_rootSquash(rName, uid string) string { @@ -2089,17 +2135,17 @@ resource "aws_fsx_lustre_file_system" "test" { `, rName, efaEnabled)) } -func testAccLustreFileSystemConfig_intelligentTiering(rName string) string { - return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), ` +func testAccLustreFileSystemConfig_intelligentTiering(rName string, throughputCapacity, cacheSize int) string { + return acctest.ConfigCompose(testAccLustreFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { subnet_ids = aws_subnet.test[*].id deployment_type = "PERSISTENT_2" storage_type = "INTELLIGENT_TIERING" - throughput_capacity = 4000 + throughput_capacity = %[1]d data_read_cache_configuration { sizing_mode = "USER_PROVISIONED" - size = 32 + size = %[2]d } metadata_configuration { @@ -2108,5 +2154,5 @@ resource "aws_fsx_lustre_file_system" "test" { } } -`) +`, throughputCapacity, cacheSize)) } diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index cb3ecced8150..188add6186d5 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -227,7 +227,6 @@ func resourceOpenZFSFileSystem() *schema.Resource { Type: schema.TypeSet, Optional: true, Computed: true, - MaxItems: 100, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrID: { diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index ac6d0f901799..6dc7cbc9c363 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -174,7 +174,6 @@ func resourceOpenZFSVolume() *schema.Resource { Type: schema.TypeSet, Optional: true, Computed: true, - MaxItems: 100, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrID: { diff --git a/internal/service/fsx/s3_access_point_attachment.go b/internal/service/fsx/s3_access_point_attachment.go new file mode 100644 index 000000000000..e5a27a7befb1 --- /dev/null +++ b/internal/service/fsx/s3_access_point_attachment.go @@ -0,0 +1,521 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fsx + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_fsx_s3_access_point_attachment", name="S3 Access Point Attachment") +func newS3AccessPointAttachmentResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &s3AccessPointAttachmentResource{} + + r.SetDefaultCreateTimeout(15 * time.Minute) + r.SetDefaultDeleteTimeout(15 * time.Minute) + + return r, nil +} + +type s3AccessPointAttachmentResource struct { + framework.ResourceWithModel[s3AccessPointAttachmentResourceModel] + framework.WithTimeouts + framework.WithNoUpdate +} + +func (r *s3AccessPointAttachmentResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile(`^[a-z0-9][a-z0-9-]{1,48}[a-z0-9]$`), "must between 3 and 50 lowercase letters, numbers, or hyphens"), + fwvalidators.SuffixNoneOf("-ext-s3alias"), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "s3_access_point_alias": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "s3_access_point_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrType: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.S3AccessPointAttachmentType](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "openzfs_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3AccessPointOpenZFSConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "volume_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "file_system_identity": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[openZFSFileSystemIdentityModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrType: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.OpenZFSFileSystemUserType](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "posix_user": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[openZFSPosixFileSystemUserModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "gid": schema.Int64Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, + }, + "secondary_gids": schema.ListAttribute{ + CustomType: fwtypes.ListOfInt64Type, + ElementType: types.Int64Type, + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtMost(15), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + }, + "uid": schema.Int64Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "s3_access_point": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3AccessPointModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrPolicy: schema.StringAttribute{ + CustomType: fwtypes.IAMPolicyType, + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrVPCConfiguration: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3AccessPointVpcConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrVPCID: schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), + }, + } +} + +func (r *s3AccessPointAttachmentResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data s3AccessPointAttachmentResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().FSxClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.Name) + var input fsx.CreateAndAttachS3AccessPointInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.ClientRequestToken = aws.String(sdkid.UniqueId()) + + _, err := conn.CreateAndAttachS3AccessPoint(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating FSx S3 Access Point Attachment (%s)", name), err.Error()) + + return + } + + output, err := waitS3AccessPointAttachmentCreated(ctx, conn, name, r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for FSx S3 Access Point Attachment (%s) create", name), err.Error()) + + return + } + + // Set values for unknowns. + data.S3AccessPointAlias = fwflex.StringToFramework(ctx, output.S3AccessPoint.Alias) + data.S3AccessPointARN = fwflex.StringToFramework(ctx, output.S3AccessPoint.ResourceARN) + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *s3AccessPointAttachmentResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data s3AccessPointAttachmentResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().FSxClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.Name) + output, err := findS3AccessPointAttachmentByName(ctx, conn, name) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading FSx S3 Access Point Attachment (%s)", name), err.Error()) + + return + } + + // s3_access_point.policy is write-only. + // Copy value from State. + policy := fwtypes.IAMPolicyNull() + s3AccessPoint, diags := data.S3AccessPoint.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if s3AccessPoint != nil { + policy = s3AccessPoint.Policy + } + + // S3 access point alias and ARN are handled at the top level. + data.S3AccessPointAlias = fwflex.StringToFramework(ctx, output.S3AccessPoint.Alias) + data.S3AccessPointARN = fwflex.StringToFramework(ctx, output.S3AccessPoint.ResourceARN) + if policy.IsNull() && output.S3AccessPoint.VpcConfiguration == nil { + output.S3AccessPoint = nil + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + // s3_access_point.policy is write-only. + if !policy.IsNull() { + s3AccessPoint, diags := data.S3AccessPoint.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + s3AccessPoint.Policy = policy + + tfS3AccessPoint, diags := fwtypes.NewListNestedObjectValueOfPtr(ctx, s3AccessPoint) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.S3AccessPoint = tfS3AccessPoint + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *s3AccessPointAttachmentResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data s3AccessPointAttachmentResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().FSxClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.Name) + input := fsx.DetachAndDeleteS3AccessPointInput{ + ClientRequestToken: aws.String(sdkid.UniqueId()), + Name: aws.String(name), + } + + _, err := conn.DetachAndDeleteS3AccessPoint(ctx, &input) + + if errs.IsA[*awstypes.S3AccessPointAttachmentNotFound](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting FSx S3 Access Point Attachment (%s)", name), err.Error()) + + return + } + + if _, err := waitS3AccessPointAttachmentDeleted(ctx, conn, name, r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for FSx S3 Access Point Attachment (%s) delete", name), err.Error()) + + return + } +} + +func (r *s3AccessPointAttachmentResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrName), request, response) +} + +func findS3AccessPointAttachmentByName(ctx context.Context, conn *fsx.Client, name string) (*awstypes.S3AccessPointAttachment, error) { + input := fsx.DescribeS3AccessPointAttachmentsInput{ + Names: []string{name}, + } + output, err := findS3AccessPointAttachment(ctx, conn, &input, tfslices.PredicateTrue[*awstypes.S3AccessPointAttachment]()) + + if err != nil { + return nil, err + } + + if output.S3AccessPoint == nil { + return nil, tfresource.NewEmptyResultError(name) + } + + return output, nil +} + +func findS3AccessPointAttachment(ctx context.Context, conn *fsx.Client, input *fsx.DescribeS3AccessPointAttachmentsInput, filter tfslices.Predicate[*awstypes.S3AccessPointAttachment]) (*awstypes.S3AccessPointAttachment, error) { + output, err := findS3AccessPointAttachments(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findS3AccessPointAttachments(ctx context.Context, conn *fsx.Client, input *fsx.DescribeS3AccessPointAttachmentsInput, filter tfslices.Predicate[*awstypes.S3AccessPointAttachment]) ([]awstypes.S3AccessPointAttachment, error) { + var output []awstypes.S3AccessPointAttachment + + pages := fsx.NewDescribeS3AccessPointAttachmentsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.S3AccessPointAttachmentNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.S3AccessPointAttachments { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func statusS3AccessPointAttachment(conn *fsx.Client, name string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { + output, err := findS3AccessPointAttachmentByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Lifecycle), nil + } +} + +func waitS3AccessPointAttachmentCreated(ctx context.Context, conn *fsx.Client, name string, timeout time.Duration) (*awstypes.S3AccessPointAttachment, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.S3AccessPointAttachmentLifecycleCreating), + Target: enum.Slice(awstypes.S3AccessPointAttachmentLifecycleAvailable), + Refresh: statusS3AccessPointAttachment(conn, name), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.S3AccessPointAttachment); ok { + if v := output.LifecycleTransitionReason; v != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(v.Message))) + } + + return output, err + } + + return nil, err +} + +func waitS3AccessPointAttachmentDeleted(ctx context.Context, conn *fsx.Client, name string, timeout time.Duration) (*awstypes.S3AccessPointAttachment, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.S3AccessPointAttachmentLifecycleDeleting), + Target: []string{}, + Refresh: statusS3AccessPointAttachment(conn, name), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.S3AccessPointAttachment); ok { + if v := output.LifecycleTransitionReason; v != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(v.Message))) + } + + return output, err + } + + return nil, err +} + +type s3AccessPointAttachmentResourceModel struct { + framework.WithRegionModel + Name types.String `tfsdk:"name"` + OpenZFSConfiguration fwtypes.ListNestedObjectValueOf[s3AccessPointOpenZFSConfigurationModel] `tfsdk:"openzfs_configuration"` + S3AccessPoint fwtypes.ListNestedObjectValueOf[s3AccessPointModel] `tfsdk:"s3_access_point"` + S3AccessPointAlias types.String `tfsdk:"s3_access_point_alias"` + S3AccessPointARN types.String `tfsdk:"s3_access_point_arn"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Type fwtypes.StringEnum[awstypes.S3AccessPointAttachmentType] `tfsdk:"type"` +} + +type s3AccessPointOpenZFSConfigurationModel struct { + FileSystemIdentity fwtypes.ListNestedObjectValueOf[openZFSFileSystemIdentityModel] `tfsdk:"file_system_identity"` + VolumeID types.String `tfsdk:"volume_id"` +} + +type openZFSFileSystemIdentityModel struct { + PosixUser fwtypes.ListNestedObjectValueOf[openZFSPosixFileSystemUserModel] `tfsdk:"posix_user"` + Type fwtypes.StringEnum[awstypes.OpenZFSFileSystemUserType] `tfsdk:"type"` +} + +type openZFSPosixFileSystemUserModel struct { + GID types.Int64 `tfsdk:"gid"` + SecondaryGIDs fwtypes.ListOfInt64 `tfsdk:"secondary_gids"` + UID types.Int64 `tfsdk:"uid"` +} + +type s3AccessPointModel struct { + Policy fwtypes.IAMPolicy `tfsdk:"policy"` + VPCConfiguration fwtypes.ListNestedObjectValueOf[s3AccessPointVpcConfigurationModel] `tfsdk:"vpc_configuration"` +} + +type s3AccessPointVpcConfigurationModel struct { + VpcID types.String `tfsdk:"vpc_id"` +} diff --git a/internal/service/fsx/s3_access_point_attachment_test.go b/internal/service/fsx/s3_access_point_attachment_test.go new file mode 100644 index 000000000000..9b60aa248328 --- /dev/null +++ b/internal/service/fsx/s3_access_point_attachment_test.go @@ -0,0 +1,322 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fsx_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFSxS3AccessPointAttachment_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.S3AccessPointAttachment + resourceName := "aws_fsx_s3_access_point_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckS3AccessPointAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccS3AccessPointAttachmentConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckS3AccessPointAttachmentExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("openzfs_configuration"), knownvalue.ListSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("s3_access_point"), knownvalue.ListSizeExact(0)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("s3_access_point_alias"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("s3_access_point_arn"), tfknownvalue.RegionalARNRegexp("s3", regexache.MustCompile(`accesspoint/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), tfknownvalue.StringExact(awstypes.S3AccessPointAttachmentTypeOpenzfs)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + ImportStateVerifyIgnore: []string{names.AttrPolicy}, + }, + }, + }) +} + +func TestAccFSxS3AccessPointAttachment_policy(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.S3AccessPointAttachment + resourceName := "aws_fsx_s3_access_point_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckS3AccessPointAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccS3AccessPointAttachmentConfig_policy(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckS3AccessPointAttachmentExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("s3_access_point"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + names.AttrPolicy: knownvalue.NotNull(), + }), + })), + }, + }, + }, + }) +} + +func TestAccFSxS3AccessPointAttachment_vpcConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.S3AccessPointAttachment + resourceName := "aws_fsx_s3_access_point_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckS3AccessPointAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccS3AccessPointAttachmentConfig_vpcConfiguration(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckS3AccessPointAttachmentExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("s3_access_point"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrPolicy: knownvalue.Null(), + names.AttrVPCConfiguration: knownvalue.ListSizeExact(1), + }), + })), + }, + }, + }, + }) +} + +func TestAccFSxS3AccessPointAttachment_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.S3AccessPointAttachment + resourceName := "aws_fsx_s3_access_point_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckS3AccessPointAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccS3AccessPointAttachmentConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckS3AccessPointAttachmentExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tffsx.ResourceS3AccessPointAttachment, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckS3AccessPointAttachmentExists(ctx context.Context, n string, v *awstypes.S3AccessPointAttachment) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) + + output, err := tffsx.FindS3AccessPointAttachmentByName(ctx, conn, rs.Primary.Attributes[names.AttrName]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCheckS3AccessPointAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_s3_access_point_attachment" { + continue + } + + _, err := tffsx.FindS3AccessPointAttachmentByName(ctx, conn, rs.Primary.Attributes[names.AttrName]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("FSx S3 Access Point Attachment %s still exists", rs.Primary.Attributes[names.AttrName]) + } + + return nil + } +} + +func testAccS3AccessPointAttachmentConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = aws_subnet.test[*].id + deployment_type = "SINGLE_AZ_HA_2" + throughput_capacity = 320 + skip_final_backup = true + + tags = { + Name = %[1]q + } +} + +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} +`, rName)) +} + +func testAccS3AccessPointAttachmentConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccS3AccessPointAttachmentConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_s3_access_point_attachment" "test" { + name = %[1]q + type = "OPENZFS" + + openzfs_configuration { + volume_id = aws_fsx_openzfs_volume.test.id + + file_system_identity { + type = "POSIX" + + posix_user { + uid = 1001 + gid = 1001 + } + } + } +} +`, rName)) +} + +func testAccS3AccessPointAttachmentConfig_policy(rName string) string { + return acctest.ConfigCompose(testAccS3AccessPointAttachmentConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_s3_access_point_attachment" "test" { + name = %[1]q + type = "OPENZFS" + + openzfs_configuration { + volume_id = aws_fsx_openzfs_volume.test.id + + file_system_identity { + type = "POSIX" + + posix_user { + uid = 1001 + gid = 1001 + + secondary_gids = [1002, 1003] + } + } + } + + s3_access_point { + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Action = "s3:GetObjectTagging" + Principal = { + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" + } + Resource = "arn:${data.aws_partition.current.partition}:s3:${data.aws_region.current.region}:${data.aws_caller_identity.current.account_id}:accesspoint/%[1]s/object/*" + }] + }) + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} +data "aws_region" "current" {} +`, rName)) +} + +func testAccS3AccessPointAttachmentConfig_vpcConfiguration(rName string) string { + return acctest.ConfigCompose(testAccS3AccessPointAttachmentConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_s3_access_point_attachment" "test" { + name = %[1]q + type = "OPENZFS" + + openzfs_configuration { + volume_id = aws_fsx_openzfs_volume.test.id + + file_system_identity { + type = "POSIX" + + posix_user { + uid = 1001 + gid = 1001 + } + } + } + + s3_access_point { + vpc_configuration { + vpc_id = aws_vpc.test.id + } + } +} +`, rName)) +} diff --git a/internal/service/fsx/service_endpoint_resolver_gen.go b/internal/service/fsx/service_endpoint_resolver_gen.go index 338435f922e3..3c762a598d6b 100644 --- a/internal/service/fsx/service_endpoint_resolver_gen.go +++ b/internal/service/fsx/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params fsx.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up fsx endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up fsx endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/fsx/service_endpoints_gen_test.go b/internal/service/fsx/service_endpoints_gen_test.go index e0848affd4d4..984bfd75ef0d 100644 --- a/internal/service/fsx/service_endpoints_gen_test.go +++ b/internal/service/fsx/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index da8aab36b751..3122b566b7d1 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/fsx" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -23,7 +22,14 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S } func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { - return []*inttypes.ServicePackageFrameworkResource{} + return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newS3AccessPointAttachmentResource, + TypeName: "aws_fsx_s3_access_point_attachment", + Name: "S3 Access Point Attachment", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { @@ -189,7 +195,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *fsx.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index eeef50cacb60..a7a04609c771 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -5,100 +5,40 @@ package fsx import ( "context" - "fmt" - "log" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fsx" awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" + "github.com/hashicorp/terraform-provider-aws/names" ) func RegisterSweepers() { - resource.AddTestSweepers("aws_fsx_backup", &resource.Sweeper{ - Name: "aws_fsx_backup", - F: sweepBackups, - }) - - resource.AddTestSweepers("aws_fsx_lustre_file_system", &resource.Sweeper{ - Name: "aws_fsx_lustre_file_system", - F: sweepLustreFileSystems, - Dependencies: []string{ - "aws_datasync_location", - "aws_m2_environment", - }, - }) - - resource.AddTestSweepers("aws_fsx_ontap_file_system", &resource.Sweeper{ - Name: "aws_fsx_ontap_file_system", - F: sweepONTAPFileSystems, - Dependencies: []string{ - "aws_datasync_location", - "aws_fsx_ontap_storage_virtual_machine", - "aws_m2_environment", - }, - }) - - resource.AddTestSweepers("aws_fsx_ontap_storage_virtual_machine", &resource.Sweeper{ - Name: "aws_fsx_ontap_storage_virtual_machine", - F: sweepONTAPStorageVirtualMachine, - Dependencies: []string{ - "aws_fsx_ontap_volume", - }, - }) - + awsv2.Register("aws_fsx_backup", sweepBackups) + awsv2.Register("aws_fsx_lustre_file_system", sweepLustreFileSystems, "aws_datasync_location", "aws_m2_environment") + awsv2.Register("aws_fsx_ontap_file_system", sweepONTAPFileSystems, "aws_datasync_location", "aws_fsx_ontap_storage_virtual_machine", "aws_m2_environment") + awsv2.Register("aws_fsx_ontap_storage_virtual_machine", sweepONTAPStorageVirtualMachine, "aws_fsx_ontap_volume") awsv2.Register("aws_fsx_ontap_volume", sweepONTAPVolumes) - - resource.AddTestSweepers("aws_fsx_openzfs_file_system", &resource.Sweeper{ - Name: "aws_fsx_openzfs_file_system", - F: sweepOpenZFSFileSystems, - Dependencies: []string{ - "aws_datasync_location", - "aws_fsx_openzfs_volume", - "aws_m2_environment", - }, - }) - - resource.AddTestSweepers("aws_fsx_openzfs_volume", &resource.Sweeper{ - Name: "aws_fsx_openzfs_volume", - F: sweepOpenZFSVolume, - }) - - resource.AddTestSweepers("aws_fsx_windows_file_system", &resource.Sweeper{ - Name: "aws_fsx_windows_file_system", - F: sweepWindowsFileSystems, - Dependencies: []string{ - "aws_datasync_location", - "aws_m2_environment", - "aws_storagegateway_file_system_association", - }, - }) + awsv2.Register("aws_fsx_openzfs_file_system", sweepOpenZFSFileSystems, "aws_datasync_location", "aws_fsx_openzfs_volume", "aws_m2_environment") + awsv2.Register("aws_fsx_openzfs_volume", sweepOpenZFSVolume, "aws_fsx_s3_access_point_attachment") + awsv2.Register("aws_fsx_s3_access_point_attachment", sweepS3AccessPointAttachments) + awsv2.Register("aws_fsx_windows_file_system", sweepWindowsFileSystems, "aws_datasync_location", "aws_m2_environment", "aws_storagegateway_file_system_association") } -func sweepBackups(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepBackups(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeBackupsInput{} + var input fsx.DescribeBackupsInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeBackupsPaginator(conn, input) + pages := fsx.NewDescribeBackupsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx Backup sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx Backups (%s): %w", region, err) + return nil, err } for _, v := range page.Backups { @@ -110,36 +50,20 @@ func sweepBackups(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping FSx Backups (%s): %w", region, err) - } - - return nil + return sweepResources, nil } -func sweepLustreFileSystems(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepLustreFileSystems(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeFileSystemsInput{} + var input fsx.DescribeFileSystemsInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + pages := fsx.NewDescribeFileSystemsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx Lustre File System sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx Lustre File Systems (%s): %w", region, err) + return nil, err } for _, v := range page.FileSystems { @@ -155,36 +79,20 @@ func sweepLustreFileSystems(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping FSx Lustre File Systems (%s): %w", region, err) - } - - return nil + return sweepResources, nil } -func sweepONTAPFileSystems(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepONTAPFileSystems(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeFileSystemsInput{} + var input fsx.DescribeFileSystemsInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + pages := fsx.NewDescribeFileSystemsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx ONTAP File System sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx ONTAP File Systems (%s): %w", region, err) + return nil, err } for _, v := range page.FileSystems { @@ -200,36 +108,20 @@ func sweepONTAPFileSystems(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping FSx ONTAP File Systems (%s): %w", region, err) - } - - return nil + return sweepResources, nil } -func sweepONTAPStorageVirtualMachine(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepONTAPStorageVirtualMachine(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeStorageVirtualMachinesInput{} + var input fsx.DescribeStorageVirtualMachinesInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeStorageVirtualMachinesPaginator(conn, input) + pages := fsx.NewDescribeStorageVirtualMachinesPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx ONTAP Storage Virtual Machine sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx ONTAP Storage Virtual Machines (%s): %w", region, err) + return nil, err } for _, v := range page.StorageVirtualMachines { @@ -241,24 +133,18 @@ func sweepONTAPStorageVirtualMachine(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping FSx ONTAP Storage Virtual Machines (%s): %w", region, err) - } - - return nil + return sweepResources, nil } func sweepONTAPVolumes(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) + var input fsx.DescribeVolumesInput + sweepResources := make([]sweep.Sweepable, 0) - var sweepResources []sweep.Sweepable - - input := fsx.DescribeVolumesInput{} pages := fsx.NewDescribeVolumesPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) + if err != nil { return nil, err } @@ -267,7 +153,8 @@ func sweepONTAPVolumes(ctx context.Context, client *conns.AWSClient) ([]sweep.Sw if v.VolumeType != awstypes.VolumeTypeOntap { continue } - // Skip root volumes + + // Skip root volumes. if v.OntapConfiguration != nil && aws.ToBool(v.OntapConfiguration.StorageVirtualMachineRoot) { continue } @@ -276,7 +163,6 @@ func sweepONTAPVolumes(ctx context.Context, client *conns.AWSClient) ([]sweep.Sw if v.OntapConfiguration != nil && v.OntapConfiguration.SnaplockConfiguration != nil { bypassSnaplock = true } - r := resourceONTAPVolume() d := r.Data(nil) d.SetId(aws.ToString(v.VolumeId)) @@ -290,27 +176,17 @@ func sweepONTAPVolumes(ctx context.Context, client *conns.AWSClient) ([]sweep.Sw return sweepResources, nil } -func sweepOpenZFSFileSystems(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepOpenZFSFileSystems(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeFileSystemsInput{} + var input fsx.DescribeFileSystemsInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + pages := fsx.NewDescribeFileSystemsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx OpenZFS File System sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx OpenZFS File Systems (%s): %w", region, err) + return nil, err } for _, v := range page.FileSystems { @@ -326,36 +202,20 @@ func sweepOpenZFSFileSystems(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping FSx OpenZFS File Systems (%s): %w", region, err) - } - - return nil + return sweepResources, nil } -func sweepOpenZFSVolume(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepOpenZFSVolume(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeVolumesInput{} + var input fsx.DescribeVolumesInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeVolumesPaginator(conn, input) + pages := fsx.NewDescribeVolumesPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx OpenZFS Volume sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx OpenZFS Volumes (%s): %w", region, err) + return nil, err } for _, v := range page.Volumes { @@ -374,36 +234,42 @@ func sweepOpenZFSVolume(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) + return sweepResources, nil +} - if err != nil { - return fmt.Errorf("error sweeping FSx OpenZFS Volumes (%s): %w", region, err) +func sweepS3AccessPointAttachments(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.FSxClient(ctx) + var input fsx.DescribeS3AccessPointAttachmentsInput + sweepResources := make([]sweep.Sweepable, 0) + + pages := fsx.NewDescribeS3AccessPointAttachmentsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.S3AccessPointAttachments { + sweepResources = append(sweepResources, framework.NewSweepResource(newS3AccessPointAttachmentResource, client, + framework.NewAttribute(names.AttrName, aws.ToString(v.Name)))) + } } - return nil + return sweepResources, nil } -func sweepWindowsFileSystems(region string) error { - ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) - } +func sweepWindowsFileSystems(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.FSxClient(ctx) - input := &fsx.DescribeFileSystemsInput{} + var input fsx.DescribeFileSystemsInput sweepResources := make([]sweep.Sweepable, 0) - pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + pages := fsx.NewDescribeFileSystemsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping FSx Windows File System sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing FSx Windows File Systems (%s): %w", region, err) + return nil, err } for _, v := range page.FileSystems { @@ -420,11 +286,5 @@ func sweepWindowsFileSystems(region string) error { } } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { - return fmt.Errorf("error sweeping FSx Windows File Systems (%s): %w", region, err) - } - - return nil + return sweepResources, nil } diff --git a/internal/service/fsx/tags_gen.go b/internal/service/fsx/tags_gen.go index 0a1c225e0f11..530ca3d55e1c 100644 --- a/internal/service/fsx/tags_gen.go +++ b/internal/service/fsx/tags_gen.go @@ -3,8 +3,8 @@ package fsx import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fsx" awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *fsx.Client, identifier string, optFns . page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).FSxClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *fsx.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *fsx.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/gamelift/build.go b/internal/service/gamelift/build.go index dacd4da3a329..18d8a2869f25 100644 --- a/internal/service/gamelift/build.go +++ b/internal/service/gamelift/build.go @@ -113,7 +113,7 @@ func resourceBuildCreate(ctx context.Context, d *schema.ResourceData, meta any) } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateBuild(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/gamelift/fleet.go b/internal/service/gamelift/fleet.go index b0334bcdf33a..789087138f11 100644 --- a/internal/service/gamelift/fleet.go +++ b/internal/service/gamelift/fleet.go @@ -298,7 +298,7 @@ func resourceFleetCreate(ctx context.Context, d *schema.ResourceData, meta any) input.ScriptId = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateFleet(ctx, input) }, "GameLift is not authorized to perform") @@ -435,7 +435,7 @@ func resourceFleetDelete(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 60 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidRequestException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidRequestException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteFleet(ctx, &gamelift.DeleteFleetInput{ FleetId: aws.String(d.Id()), }) diff --git a/internal/service/gamelift/game_server_group.go b/internal/service/gamelift/game_server_group.go index 5689b64fbdc2..52b081a75991 100644 --- a/internal/service/gamelift/game_server_group.go +++ b/internal/service/gamelift/game_server_group.go @@ -223,7 +223,7 @@ func resourceGameServerGroupCreate(ctx context.Context, d *schema.ResourceData, input.VpcSubnets = flex.ExpandStringValueSet(v.(*schema.Set)) } - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateGameServerGroup(ctx, input) }, "GameLift is not authorized to perform") @@ -339,7 +339,7 @@ func resourceGameServerGroupDelete(ctx context.Context, d *schema.ResourceData, const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidRequestException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidRequestException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteGameServerGroup(ctx, &gamelift.DeleteGameServerGroupInput{ GameServerGroupName: aws.String(d.Id()), }) diff --git a/internal/service/gamelift/game_session_queue.go b/internal/service/gamelift/game_session_queue.go index cbe0f30e17e4..b4bc959111f5 100644 --- a/internal/service/gamelift/game_session_queue.go +++ b/internal/service/gamelift/game_session_queue.go @@ -207,7 +207,7 @@ func resourceGameSessionQueueDelete(ctx context.Context, d *schema.ResourceData, const ( timeout = 30 * time.Second ) - _, err = tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findGameSessionQueueByName(ctx, conn, d.Id()) }) diff --git a/internal/service/gamelift/script.go b/internal/service/gamelift/script.go index c6d3480d7c80..6548d9c3a63c 100644 --- a/internal/service/gamelift/script.go +++ b/internal/service/gamelift/script.go @@ -125,7 +125,7 @@ func resourceScriptCreate(ctx context.Context, d *schema.ResourceData, meta any) } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateScript(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/gamelift/service_endpoint_resolver_gen.go b/internal/service/gamelift/service_endpoint_resolver_gen.go index 010d1fb56e91..f2c61ef65454 100644 --- a/internal/service/gamelift/service_endpoint_resolver_gen.go +++ b/internal/service/gamelift/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params gamelift.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up gamelift endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up gamelift endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/gamelift/service_endpoints_gen_test.go b/internal/service/gamelift/service_endpoints_gen_test.go index f1b926358be2..7d152940070a 100644 --- a/internal/service/gamelift/service_endpoints_gen_test.go +++ b/internal/service/gamelift/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/gamelift/service_package_gen.go b/internal/service/gamelift/service_package_gen.go index 862afc4cb95b..018be933b24d 100644 --- a/internal/service/gamelift/service_package_gen.go +++ b/internal/service/gamelift/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/gamelift" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -112,7 +111,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *gamelift.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/gamelift/sweep.go b/internal/service/gamelift/sweep.go index b0bc6460b00b..573b40d6bdd2 100644 --- a/internal/service/gamelift/sweep.go +++ b/internal/service/gamelift/sweep.go @@ -56,7 +56,7 @@ func sweepAliases(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &gamelift.ListAliasesInput{} conn := client.GameLiftClient(ctx) @@ -97,7 +97,7 @@ func sweepBuilds(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &gamelift.ListBuildsInput{} conn := client.GameLiftClient(ctx) @@ -138,7 +138,7 @@ func sweepScripts(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &gamelift.ListScriptsInput{} conn := client.GameLiftClient(ctx) @@ -179,7 +179,7 @@ func sweepFleets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &gamelift.ListFleetsInput{} conn := client.GameLiftClient(ctx) @@ -220,7 +220,7 @@ func sweepGameServerGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GameLiftClient(ctx) input := &gamelift.ListGameServerGroupsInput{} @@ -261,7 +261,7 @@ func sweepGameSessionQueue(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &gamelift.DescribeGameSessionQueuesInput{} conn := client.GameLiftClient(ctx) diff --git a/internal/service/gamelift/tags_gen.go b/internal/service/gamelift/tags_gen.go index 19ffbdbd5799..5956737c5d06 100644 --- a/internal/service/gamelift/tags_gen.go +++ b/internal/service/gamelift/tags_gen.go @@ -3,8 +3,8 @@ package gamelift import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/gamelift" awstypes "github.com/aws/aws-sdk-go-v2/service/gamelift/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *gamelift.Client, identifier string, opt output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GameLiftClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *gamelift.Client, identifier string, o _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *gamelift.Client, identifier string, o _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/glacier/service_endpoint_resolver_gen.go b/internal/service/glacier/service_endpoint_resolver_gen.go index 6021ec97f2b9..42ac4a5ed347 100644 --- a/internal/service/glacier/service_endpoint_resolver_gen.go +++ b/internal/service/glacier/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params glacier.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up glacier endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up glacier endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/glacier/service_endpoints_gen_test.go b/internal/service/glacier/service_endpoints_gen_test.go index 782c005f78f4..616d9e1430fa 100644 --- a/internal/service/glacier/service_endpoints_gen_test.go +++ b/internal/service/glacier/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/glacier/service_package_gen.go b/internal/service/glacier/service_package_gen.go index 968c2e567b30..9914e2e1e3b3 100644 --- a/internal/service/glacier/service_package_gen.go +++ b/internal/service/glacier/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/glacier" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -73,7 +72,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *glacier.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/glacier/sweep.go b/internal/service/glacier/sweep.go index 35789d9d88bb..5f474b16b390 100644 --- a/internal/service/glacier/sweep.go +++ b/internal/service/glacier/sweep.go @@ -25,7 +25,7 @@ func sweepVaults(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &glacier.ListVaultsInput{} conn := client.GlacierClient(ctx) diff --git a/internal/service/glacier/tags_gen.go b/internal/service/glacier/tags_gen.go index 37579cf1f9ac..6874a8869223 100644 --- a/internal/service/glacier/tags_gen.go +++ b/internal/service/glacier/tags_gen.go @@ -3,8 +3,8 @@ package glacier import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/glacier" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *glacier.Client, identifier string, optF output, err := conn.ListTagsForVault(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GlacierClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -108,7 +108,7 @@ func updateTags(ctx context.Context, conn *glacier.Client, identifier string, ol _, err := conn.RemoveTagsFromVault(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -123,7 +123,7 @@ func updateTags(ctx context.Context, conn *glacier.Client, identifier string, ol _, err := conn.AddTagsToVault(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/globalaccelerator/accelerator.go b/internal/service/globalaccelerator/accelerator.go index 6a95e11bf1f6..0e002c6f3718 100644 --- a/internal/service/globalaccelerator/accelerator.go +++ b/internal/service/globalaccelerator/accelerator.go @@ -30,6 +30,8 @@ import ( // @SDKResource("aws_globalaccelerator_accelerator", name="Accelerator") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceAccelerator() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAcceleratorCreate, @@ -37,10 +39,6 @@ func resourceAccelerator() *schema.Resource { UpdateWithoutTimeout: resourceAcceleratorUpdate, DeleteWithoutTimeout: resourceAcceleratorDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/globalaccelerator/accelerator_identity_gen_test.go b/internal/service/globalaccelerator/accelerator_identity_gen_test.go new file mode 100644 index 000000000000..09a81a85a4db --- /dev/null +++ b/internal/service/globalaccelerator/accelerator_identity_gen_test.go @@ -0,0 +1,214 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package globalaccelerator_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlobalAcceleratorAccelerator_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_accelerator.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckAcceleratorDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAcceleratorExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlobalAcceleratorAccelerator_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_accelerator.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckAcceleratorDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAcceleratorExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlobalAcceleratorAccelerator_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_accelerator.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckAcceleratorDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAcceleratorExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Accelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/arn.go b/internal/service/globalaccelerator/arn.go index d842e5c22083..238f01ced276 100644 --- a/internal/service/globalaccelerator/arn.go +++ b/internal/service/globalaccelerator/arn.go @@ -11,8 +11,8 @@ import ( ) const ( - arnSeparator = "/" - arnService = "globalaccelerator" + arnResourceSeparator = "/" + arnService = "globalaccelerator" ) // endpointGroupARNToListenerARN converts an endpoint group ARN to a listener ARN. @@ -28,7 +28,7 @@ func endpointGroupARNToListenerARN(inputARN string) (string, error) { return "", fmt.Errorf("expected service %s in ARN (%s), got: %s", expected, inputARN, actual) } - resourceParts := strings.Split(parsedARN.Resource, arnSeparator) + resourceParts := strings.Split(parsedARN.Resource, arnResourceSeparator) if actual, expected := len(resourceParts), 6; actual < expected { return "", fmt.Errorf("expected at least %d resource parts in ARN (%s), got: %d", expected, inputARN, actual) @@ -39,7 +39,7 @@ func endpointGroupARNToListenerARN(inputARN string) (string, error) { Service: parsedARN.Service, Region: parsedARN.Region, AccountID: parsedARN.AccountID, - Resource: strings.Join(resourceParts[0:4], arnSeparator), + Resource: strings.Join(resourceParts[0:4], arnResourceSeparator), }.String() return outputARN, nil @@ -58,7 +58,7 @@ func listenerOrEndpointGroupARNToAcceleratorARN(inputARN string) (string, error) return "", fmt.Errorf("expected service %s in ARN (%s), got: %s", expected, inputARN, actual) } - resourceParts := strings.Split(parsedARN.Resource, arnSeparator) + resourceParts := strings.Split(parsedARN.Resource, arnResourceSeparator) if actual, expected := len(resourceParts), 4; actual < expected { return "", fmt.Errorf("expected at least %d resource parts in ARN (%s), got: %d", expected, inputARN, actual) @@ -69,7 +69,7 @@ func listenerOrEndpointGroupARNToAcceleratorARN(inputARN string) (string, error) Service: parsedARN.Service, Region: parsedARN.Region, AccountID: parsedARN.AccountID, - Resource: strings.Join(resourceParts[0:2], arnSeparator), + Resource: strings.Join(resourceParts[0:2], arnResourceSeparator), }.String() return outputARN, nil diff --git a/internal/service/globalaccelerator/cross_account_attachment.go b/internal/service/globalaccelerator/cross_account_attachment.go index 453a88984b3f..43e64296f664 100644 --- a/internal/service/globalaccelerator/cross_account_attachment.go +++ b/internal/service/globalaccelerator/cross_account_attachment.go @@ -34,6 +34,7 @@ import ( // @Tags(identifierAttribute="arn") // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types;awstypes;awstypes.Attachment") +// @Testing(preIdentityVersion="v5.100.0") func newCrossAccountAttachmentResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &crossAccountAttachmentResource{} diff --git a/internal/service/globalaccelerator/cross_account_attachment_identity_gen_test.go b/internal/service/globalaccelerator/cross_account_attachment_identity_gen_test.go index d5a253ba438f..88c3c0b50a90 100644 --- a/internal/service/globalaccelerator/cross_account_attachment_identity_gen_test.go +++ b/internal/service/globalaccelerator/cross_account_attachment_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccGlobalAcceleratorCrossAccountAttachment_Identity_Basic(t *testing.T) resourceName := "aws_globalaccelerator_cross_account_attachment.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccGlobalAcceleratorCrossAccountAttachment_Identity_Basic(t *testing.T) ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -98,3 +102,129 @@ func TestAccGlobalAcceleratorCrossAccountAttachment_Identity_Basic(t *testing.T) }, }) } + +func TestAccGlobalAcceleratorCrossAccountAttachment_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Attachment + resourceName := "aws_globalaccelerator_cross_account_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCrossAccountAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CrossAccountAttachment/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCrossAccountAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/CrossAccountAttachment/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCrossAccountAttachmentExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CrossAccountAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccGlobalAcceleratorCrossAccountAttachment_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Attachment + resourceName := "aws_globalaccelerator_cross_account_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCrossAccountAttachmentDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CrossAccountAttachment/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCrossAccountAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CrossAccountAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/cross_account_attachment_test.go b/internal/service/globalaccelerator/cross_account_attachment_test.go index effed1b7e63e..d8260910c22d 100644 --- a/internal/service/globalaccelerator/cross_account_attachment_test.go +++ b/internal/service/globalaccelerator/cross_account_attachment_test.go @@ -12,13 +12,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfglobalaccelerator "github.com/hashicorp/terraform-provider-aws/internal/service/globalaccelerator" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -211,70 +206,6 @@ func TestAccGlobalAcceleratorCrossAccountAttachment_tags(t *testing.T) { }) } -func TestAccGlobalAcceleratorCrossAccountAttachment_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_globalaccelerator_cross_account_attachment.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), - CheckDestroy: testAccCheckCrossAccountAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCrossAccountAttachmentConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCrossAccountAttachmentConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCrossAccountAttachmentConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckCrossAccountAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).GlobalAcceleratorClient(ctx) diff --git a/internal/service/globalaccelerator/custom_routing_accelerator.go b/internal/service/globalaccelerator/custom_routing_accelerator.go index becc5a42751c..a8fef050bd3f 100644 --- a/internal/service/globalaccelerator/custom_routing_accelerator.go +++ b/internal/service/globalaccelerator/custom_routing_accelerator.go @@ -30,6 +30,8 @@ import ( // @SDKResource("aws_globalaccelerator_custom_routing_accelerator", name="Custom Routing Accelerator") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceCustomRoutingAccelerator() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCustomRoutingAcceleratorCreate, @@ -37,10 +39,6 @@ func resourceCustomRoutingAccelerator() *schema.Resource { UpdateWithoutTimeout: resourceCustomRoutingAcceleratorUpdate, DeleteWithoutTimeout: resourceCustomRoutingAcceleratorDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/globalaccelerator/custom_routing_accelerator_identity_gen_test.go b/internal/service/globalaccelerator/custom_routing_accelerator_identity_gen_test.go new file mode 100644 index 000000000000..eb1d089ba40f --- /dev/null +++ b/internal/service/globalaccelerator/custom_routing_accelerator_identity_gen_test.go @@ -0,0 +1,214 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package globalaccelerator_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlobalAcceleratorCustomRoutingAccelerator_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_custom_routing_accelerator.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingAcceleratorDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingAcceleratorExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlobalAcceleratorCustomRoutingAccelerator_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_custom_routing_accelerator.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingAcceleratorDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingAcceleratorExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlobalAcceleratorCustomRoutingAccelerator_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_custom_routing_accelerator.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingAcceleratorDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingAcceleratorExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingAccelerator/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/custom_routing_endpoint_group.go b/internal/service/globalaccelerator/custom_routing_endpoint_group.go index 441fd4550747..458c6aa8af90 100644 --- a/internal/service/globalaccelerator/custom_routing_endpoint_group.go +++ b/internal/service/globalaccelerator/custom_routing_endpoint_group.go @@ -27,16 +27,15 @@ import ( ) // @SDKResource("aws_globalaccelerator_custom_routing_endpoint_group", name="Custom Routing Endpoint Group") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types;awstypes.CustomRoutingEndpointGroup") func resourceCustomRoutingEndpointGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCustomRoutingEndpointGroupCreate, ReadWithoutTimeout: resourceCustomRoutingEndpointGroupRead, DeleteWithoutTimeout: resourceCustomRoutingEndpointGroupDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Delete: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/globalaccelerator/custom_routing_endpoint_group_identity_gen_test.go b/internal/service/globalaccelerator/custom_routing_endpoint_group_identity_gen_test.go new file mode 100644 index 000000000000..2aae770e003e --- /dev/null +++ b/internal/service/globalaccelerator/custom_routing_endpoint_group_identity_gen_test.go @@ -0,0 +1,218 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package globalaccelerator_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlobalAcceleratorCustomRoutingEndpointGroup_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomRoutingEndpointGroup + resourceName := "aws_globalaccelerator_custom_routing_endpoint_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingEndpointGroupDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingEndpointGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorCustomRoutingEndpointGroup_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomRoutingEndpointGroup + resourceName := "aws_globalaccelerator_custom_routing_endpoint_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingEndpointGroupDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingEndpointGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorCustomRoutingEndpointGroup_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomRoutingEndpointGroup + resourceName := "aws_globalaccelerator_custom_routing_endpoint_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingEndpointGroupDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingEndpointGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingEndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/custom_routing_listener.go b/internal/service/globalaccelerator/custom_routing_listener.go index 2818eb8779de..e5d24c22846e 100644 --- a/internal/service/globalaccelerator/custom_routing_listener.go +++ b/internal/service/globalaccelerator/custom_routing_listener.go @@ -24,6 +24,9 @@ import ( ) // @SDKResource("aws_globalaccelerator_custom_routing_listener", name="Custom Routing Listener") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types;awstypes.CustomRoutingListener") func resourceCustomRoutingListener() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCustomRoutingListenerCreate, @@ -31,10 +34,6 @@ func resourceCustomRoutingListener() *schema.Resource { UpdateWithoutTimeout: resourceCustomRoutingListenerUpdate, DeleteWithoutTimeout: resourceCustomRoutingListenerDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/globalaccelerator/custom_routing_listener_identity_gen_test.go b/internal/service/globalaccelerator/custom_routing_listener_identity_gen_test.go new file mode 100644 index 000000000000..b6a85022fb93 --- /dev/null +++ b/internal/service/globalaccelerator/custom_routing_listener_identity_gen_test.go @@ -0,0 +1,218 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package globalaccelerator_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlobalAcceleratorCustomRoutingListener_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomRoutingListener + resourceName := "aws_globalaccelerator_custom_routing_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingListenerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingListenerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorCustomRoutingListener_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomRoutingListener + resourceName := "aws_globalaccelerator_custom_routing_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingListenerDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingListenerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorCustomRoutingListener_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomRoutingListener + resourceName := "aws_globalaccelerator_custom_routing_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckCustomRoutingListenerDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomRoutingListenerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomRoutingListener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/endpoint_group.go b/internal/service/globalaccelerator/endpoint_group.go index e14e6d2f350a..6b52c3f29cf3 100644 --- a/internal/service/globalaccelerator/endpoint_group.go +++ b/internal/service/globalaccelerator/endpoint_group.go @@ -27,6 +27,9 @@ import ( ) // @SDKResource("aws_globalaccelerator_endpoint_group", name="Endpoint Group") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types;awstypes.EndpointGroup") func resourceEndpointGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceEndpointGroupCreate, @@ -34,10 +37,6 @@ func resourceEndpointGroup() *schema.Resource { UpdateWithoutTimeout: resourceEndpointGroupUpdate, DeleteWithoutTimeout: resourceEndpointGroupDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/globalaccelerator/endpoint_group_identity_gen_test.go b/internal/service/globalaccelerator/endpoint_group_identity_gen_test.go new file mode 100644 index 000000000000..3e6247828f9e --- /dev/null +++ b/internal/service/globalaccelerator/endpoint_group_identity_gen_test.go @@ -0,0 +1,218 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package globalaccelerator_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlobalAcceleratorEndpointGroup_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EndpointGroup + resourceName := "aws_globalaccelerator_endpoint_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckEndpointGroupDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorEndpointGroup_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EndpointGroup + resourceName := "aws_globalaccelerator_endpoint_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckEndpointGroupDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorEndpointGroup_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EndpointGroup + resourceName := "aws_globalaccelerator_endpoint_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckEndpointGroupDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EndpointGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/listener.go b/internal/service/globalaccelerator/listener.go index 0c470b0f21c9..7047530a5c25 100644 --- a/internal/service/globalaccelerator/listener.go +++ b/internal/service/globalaccelerator/listener.go @@ -25,6 +25,8 @@ import ( ) // @SDKResource("aws_globalaccelerator_listener", name="Listener") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") func resourceListener() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerCreate, @@ -32,10 +34,6 @@ func resourceListener() *schema.Resource { UpdateWithoutTimeout: resourceListenerUpdate, DeleteWithoutTimeout: resourceListenerDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/globalaccelerator/listener_identity_gen_test.go b/internal/service/globalaccelerator/listener_identity_gen_test.go new file mode 100644 index 000000000000..b506710456e8 --- /dev/null +++ b/internal/service/globalaccelerator/listener_identity_gen_test.go @@ -0,0 +1,214 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package globalaccelerator_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlobalAcceleratorListener_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckListenerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorListener_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckListenerDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccGlobalAcceleratorListener_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_globalaccelerator_listener.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlobalAcceleratorServiceID), + CheckDestroy: testAccCheckListenerDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckListenerExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Listener/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/globalaccelerator/service_endpoint_resolver_gen.go b/internal/service/globalaccelerator/service_endpoint_resolver_gen.go index 78b19acb29d9..e5c5d5b3377d 100644 --- a/internal/service/globalaccelerator/service_endpoint_resolver_gen.go +++ b/internal/service/globalaccelerator/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params globalaccelerato }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up globalaccelerator endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up globalaccelerator endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/globalaccelerator/service_endpoints_gen_test.go b/internal/service/globalaccelerator/service_endpoints_gen_test.go index 932e5933d87a..d487be2c1e43 100644 --- a/internal/service/globalaccelerator/service_endpoints_gen_test.go +++ b/internal/service/globalaccelerator/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/globalaccelerator/service_package_gen.go b/internal/service/globalaccelerator/service_package_gen.go index 3664cc0ff046..edf3cf4dcee1 100644 --- a/internal/service/globalaccelerator/service_package_gen.go +++ b/internal/service/globalaccelerator/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/globalaccelerator" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -69,6 +68,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceCustomRoutingAccelerator, @@ -78,30 +83,60 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceCustomRoutingEndpointGroup, TypeName: "aws_globalaccelerator_custom_routing_endpoint_group", Name: "Custom Routing Endpoint Group", Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceCustomRoutingListener, TypeName: "aws_globalaccelerator_custom_routing_listener", Name: "Custom Routing Listener", Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceEndpointGroup, TypeName: "aws_globalaccelerator_endpoint_group", Name: "Endpoint Group", Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceListener, TypeName: "aws_globalaccelerator_listener", Name: "Listener", Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -129,7 +164,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *globalaccelerator.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *globalaccelerator.Options) { diff --git a/internal/service/globalaccelerator/sweep.go b/internal/service/globalaccelerator/sweep.go index a340596bb430..50c23ca101aa 100644 --- a/internal/service/globalaccelerator/sweep.go +++ b/internal/service/globalaccelerator/sweep.go @@ -62,7 +62,7 @@ func sweepAccelerators(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GlobalAcceleratorClient(ctx) input := &globalaccelerator.ListAcceleratorsInput{} @@ -103,7 +103,7 @@ func sweepEndpointGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GlobalAcceleratorClient(ctx) input := &globalaccelerator.ListAcceleratorsInput{} @@ -174,7 +174,7 @@ func sweepListeners(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GlobalAcceleratorClient(ctx) input := &globalaccelerator.ListAcceleratorsInput{} @@ -230,7 +230,7 @@ func sweepCustomRoutingAccelerators(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GlobalAcceleratorClient(ctx) input := &globalaccelerator.ListCustomRoutingAcceleratorsInput{} @@ -271,7 +271,7 @@ func sweepCustomRoutingEndpointGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GlobalAcceleratorClient(ctx) input := &globalaccelerator.ListCustomRoutingAcceleratorsInput{} @@ -342,7 +342,7 @@ func sweepCustomRoutingListeners(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GlobalAcceleratorClient(ctx) input := &globalaccelerator.ListCustomRoutingAcceleratorsInput{} diff --git a/internal/service/globalaccelerator/tags_gen.go b/internal/service/globalaccelerator/tags_gen.go index 5cb0e1f4dc74..b162966e6454 100644 --- a/internal/service/globalaccelerator/tags_gen.go +++ b/internal/service/globalaccelerator/tags_gen.go @@ -3,8 +3,8 @@ package globalaccelerator import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/globalaccelerator" awstypes "github.com/aws/aws-sdk-go-v2/service/globalaccelerator/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *globalaccelerator.Client, identifier st output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GlobalAcceleratorClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *globalaccelerator.Client, identifier _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *globalaccelerator.Client, identifier _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/globalaccelerator/testdata/Accelerator/basic/main_gen.tf b/internal/service/globalaccelerator/testdata/Accelerator/basic/main_gen.tf new file mode 100644 index 000000000000..a6f3a20896ca --- /dev/null +++ b/internal/service/globalaccelerator/testdata/Accelerator/basic/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_accelerator" "test" { + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/globalaccelerator/testdata/Accelerator/basic_v6.3.0/main_gen.tf b/internal/service/globalaccelerator/testdata/Accelerator/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..241818a3da2e --- /dev/null +++ b/internal/service/globalaccelerator/testdata/Accelerator/basic_v6.3.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_accelerator" "test" { + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/CrossAccountAttachment/basic_v5.100.0/main_gen.tf b/internal/service/globalaccelerator/testdata/CrossAccountAttachment/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..343857f882f0 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CrossAccountAttachment/basic_v5.100.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_cross_account_attachment" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/CrossAccountAttachment/basic_v6.0.0/main_gen.tf b/internal/service/globalaccelerator/testdata/CrossAccountAttachment/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..b0cbab864780 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CrossAccountAttachment/basic_v6.0.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_cross_account_attachment" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/CustomRoutingAccelerator/basic/main_gen.tf b/internal/service/globalaccelerator/testdata/CustomRoutingAccelerator/basic/main_gen.tf new file mode 100644 index 000000000000..38c32fe99479 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CustomRoutingAccelerator/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/globalaccelerator/testdata/CustomRoutingAccelerator/basic_v6.3.0/main_gen.tf b/internal/service/globalaccelerator/testdata/CustomRoutingAccelerator/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..5fe33c83438c --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CustomRoutingAccelerator/basic_v6.3.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/CustomRoutingEndpointGroup/basic/main_gen.tf b/internal/service/globalaccelerator/testdata/CustomRoutingEndpointGroup/basic/main_gen.tf new file mode 100644 index 000000000000..a0b26f7597e6 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CustomRoutingEndpointGroup/basic/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { + name = var.rName +} + +resource "aws_globalaccelerator_custom_routing_listener" "test" { + accelerator_arn = aws_globalaccelerator_custom_routing_accelerator.test.arn + + port_range { + from_port = 443 + to_port = 443 + } +} + +resource "aws_globalaccelerator_custom_routing_endpoint_group" "test" { + listener_arn = aws_globalaccelerator_custom_routing_listener.test.arn + + destination_configuration { + from_port = 443 + to_port = 8443 + protocols = ["TCP"] + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/globalaccelerator/testdata/CustomRoutingEndpointGroup/basic_v6.4.0/main_gen.tf b/internal/service/globalaccelerator/testdata/CustomRoutingEndpointGroup/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..13e43d5c9285 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CustomRoutingEndpointGroup/basic_v6.4.0/main_gen.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { + name = var.rName +} + +resource "aws_globalaccelerator_custom_routing_listener" "test" { + accelerator_arn = aws_globalaccelerator_custom_routing_accelerator.test.arn + + port_range { + from_port = 443 + to_port = 443 + } +} + +resource "aws_globalaccelerator_custom_routing_endpoint_group" "test" { + listener_arn = aws_globalaccelerator_custom_routing_listener.test.arn + + destination_configuration { + from_port = 443 + to_port = 8443 + protocols = ["TCP"] + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/CustomRoutingListener/basic/main_gen.tf b/internal/service/globalaccelerator/testdata/CustomRoutingListener/basic/main_gen.tf new file mode 100644 index 000000000000..ddd25130b62b --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CustomRoutingListener/basic/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { + name = var.rName +} + +resource "aws_globalaccelerator_custom_routing_listener" "test" { + accelerator_arn = aws_globalaccelerator_custom_routing_accelerator.test.arn + + port_range { + from_port = 443 + to_port = 443 + } + + port_range { + from_port = 10000 + to_port = 30000 + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/globalaccelerator/testdata/CustomRoutingListener/basic_v6.4.0/main_gen.tf b/internal/service/globalaccelerator/testdata/CustomRoutingListener/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..4d9d2e58d2e7 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/CustomRoutingListener/basic_v6.4.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { + name = var.rName +} + +resource "aws_globalaccelerator_custom_routing_listener" "test" { + accelerator_arn = aws_globalaccelerator_custom_routing_accelerator.test.arn + + port_range { + from_port = 443 + to_port = 443 + } + + port_range { + from_port = 10000 + to_port = 30000 + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/EndpointGroup/basic/main_gen.tf b/internal/service/globalaccelerator/testdata/EndpointGroup/basic/main_gen.tf new file mode 100644 index 000000000000..2f686889d593 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/EndpointGroup/basic/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_accelerator" "test" { + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +resource "aws_globalaccelerator_listener" "test" { + accelerator_arn = aws_globalaccelerator_accelerator.test.arn + protocol = "TCP" + + port_range { + from_port = 80 + to_port = 80 + } +} + +resource "aws_globalaccelerator_endpoint_group" "test" { + listener_arn = aws_globalaccelerator_listener.test.arn +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/globalaccelerator/testdata/EndpointGroup/basic_v6.4.0/main_gen.tf b/internal/service/globalaccelerator/testdata/EndpointGroup/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..cad6aa2bffb4 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/EndpointGroup/basic_v6.4.0/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_accelerator" "test" { + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +resource "aws_globalaccelerator_listener" "test" { + accelerator_arn = aws_globalaccelerator_accelerator.test.arn + protocol = "TCP" + + port_range { + from_port = 80 + to_port = 80 + } +} + +resource "aws_globalaccelerator_endpoint_group" "test" { + listener_arn = aws_globalaccelerator_listener.test.arn +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/Listener/basic/main_gen.tf b/internal/service/globalaccelerator/testdata/Listener/basic/main_gen.tf new file mode 100644 index 000000000000..99974fe88b2c --- /dev/null +++ b/internal/service/globalaccelerator/testdata/Listener/basic/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_accelerator" "example" { + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +resource "aws_globalaccelerator_listener" "test" { + accelerator_arn = aws_globalaccelerator_accelerator.example.arn + protocol = "TCP" + + port_range { + from_port = 80 + to_port = 81 + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/globalaccelerator/testdata/Listener/basic_v6.4.0/main_gen.tf b/internal/service/globalaccelerator/testdata/Listener/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..ba75e61d7c4e --- /dev/null +++ b/internal/service/globalaccelerator/testdata/Listener/basic_v6.4.0/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_globalaccelerator_accelerator" "example" { + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +resource "aws_globalaccelerator_listener" "test" { + accelerator_arn = aws_globalaccelerator_accelerator.example.arn + protocol = "TCP" + + port_range { + from_port = 80 + to_port = 81 + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/globalaccelerator/testdata/tmpl/accelerator_tags.gtpl b/internal/service/globalaccelerator/testdata/tmpl/accelerator_tags.gtpl new file mode 100644 index 000000000000..70d7f1476514 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/tmpl/accelerator_tags.gtpl @@ -0,0 +1,8 @@ +resource "aws_globalaccelerator_accelerator" "test" { + {{- template "region" . }} + name = var.rName + ip_address_type = "IPV4" + enabled = false + + {{- template "tags" . }} +} diff --git a/internal/service/globalaccelerator/testdata/tmpl/custom_routing_accelerator_tags.gtpl b/internal/service/globalaccelerator/testdata/tmpl/custom_routing_accelerator_tags.gtpl new file mode 100644 index 000000000000..a599a75bf8d3 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/tmpl/custom_routing_accelerator_tags.gtpl @@ -0,0 +1,5 @@ +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { +{{- template "region" }} + name = var.rName +{{- template "tags" . }} +} diff --git a/internal/service/globalaccelerator/testdata/tmpl/custom_routing_endpoint_group_basic.gtpl b/internal/service/globalaccelerator/testdata/tmpl/custom_routing_endpoint_group_basic.gtpl new file mode 100644 index 000000000000..48d433189b35 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/tmpl/custom_routing_endpoint_group_basic.gtpl @@ -0,0 +1,25 @@ +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { +{{- template "region" }} + name = var.rName +} + +resource "aws_globalaccelerator_custom_routing_listener" "test" { +{{- template "region" }} + accelerator_arn = aws_globalaccelerator_custom_routing_accelerator.test.arn + + port_range { + from_port = 443 + to_port = 443 + } +} + +resource "aws_globalaccelerator_custom_routing_endpoint_group" "test" { +{{- template "region" }} + listener_arn = aws_globalaccelerator_custom_routing_listener.test.arn + + destination_configuration { + from_port = 443 + to_port = 8443 + protocols = ["TCP"] + } +} diff --git a/internal/service/globalaccelerator/testdata/tmpl/custom_routing_listener_basic.gtpl b/internal/service/globalaccelerator/testdata/tmpl/custom_routing_listener_basic.gtpl new file mode 100644 index 000000000000..281fb513ca87 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/tmpl/custom_routing_listener_basic.gtpl @@ -0,0 +1,19 @@ +resource "aws_globalaccelerator_custom_routing_accelerator" "test" { +{{- template "region" }} + name = var.rName +} + +resource "aws_globalaccelerator_custom_routing_listener" "test" { +{{- template "region" }} + accelerator_arn = aws_globalaccelerator_custom_routing_accelerator.test.arn + + port_range { + from_port = 443 + to_port = 443 + } + + port_range { + from_port = 10000 + to_port = 30000 + } +} diff --git a/internal/service/globalaccelerator/testdata/tmpl/endpoint_group_basic.gtpl b/internal/service/globalaccelerator/testdata/tmpl/endpoint_group_basic.gtpl new file mode 100644 index 000000000000..ac002c0bb5c0 --- /dev/null +++ b/internal/service/globalaccelerator/testdata/tmpl/endpoint_group_basic.gtpl @@ -0,0 +1,22 @@ +resource "aws_globalaccelerator_accelerator" "test" { +{{- template "region" }} + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +resource "aws_globalaccelerator_listener" "test" { +{{- template "region" }} + accelerator_arn = aws_globalaccelerator_accelerator.test.arn + protocol = "TCP" + + port_range { + from_port = 80 + to_port = 80 + } +} + +resource "aws_globalaccelerator_endpoint_group" "test" { +{{- template "region" }} + listener_arn = aws_globalaccelerator_listener.test.arn +} diff --git a/internal/service/globalaccelerator/testdata/tmpl/listener_basic.gtpl b/internal/service/globalaccelerator/testdata/tmpl/listener_basic.gtpl new file mode 100644 index 000000000000..107357bd61ad --- /dev/null +++ b/internal/service/globalaccelerator/testdata/tmpl/listener_basic.gtpl @@ -0,0 +1,17 @@ +resource "aws_globalaccelerator_accelerator" "example" { +{{- template "region" }} + name = var.rName + ip_address_type = "IPV4" + enabled = false +} + +resource "aws_globalaccelerator_listener" "test" { +{{- template "region" }} + accelerator_arn = aws_globalaccelerator_accelerator.example.arn + protocol = "TCP" + + port_range { + from_port = 80 + to_port = 81 + } +} diff --git a/internal/service/glue/catalog_table.go b/internal/service/glue/catalog_table.go index 79a607336cd1..67dc1ecab811 100644 --- a/internal/service/glue/catalog_table.go +++ b/internal/service/glue/catalog_table.go @@ -93,6 +93,11 @@ func resourceCatalogTable() *schema.Resource { Required: true, ValidateFunc: validation.StringLenBetween(1, 255), }, + names.AttrParameters: { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, names.AttrType: { Type: schema.TypeString, Optional: true, @@ -778,14 +783,14 @@ func expandColumns(columns []any) []awstypes.Column { column.Comment = aws.String(v.(string)) } - if v, ok := elementMap[names.AttrType]; ok { - column.Type = aws.String(v.(string)) - } - if v, ok := elementMap[names.AttrParameters]; ok { column.Parameters = flex.ExpandStringValueMap(v.(map[string]any)) } + if v, ok := elementMap[names.AttrType]; ok { + column.Type = aws.String(v.(string)) + } + columnSlice = append(columnSlice, column) } @@ -951,22 +956,22 @@ func flattenColumns(cs []awstypes.Column) []map[string]any { func flattenColumn(c awstypes.Column) map[string]any { column := make(map[string]any) - if v := aws.ToString(c.Name); v != "" { - column[names.AttrName] = v - } - - if v := aws.ToString(c.Type); v != "" { - column[names.AttrType] = v - } - if v := aws.ToString(c.Comment); v != "" { column[names.AttrComment] = v } + if v := aws.ToString(c.Name); v != "" { + column[names.AttrName] = v + } + if v := c.Parameters; v != nil { column[names.AttrParameters] = v } + if v := aws.ToString(c.Type); v != "" { + column[names.AttrType] = v + } + return column } diff --git a/internal/service/glue/catalog_table_data_source.go b/internal/service/glue/catalog_table_data_source.go index 5072af45087f..9cf970ee0903 100644 --- a/internal/service/glue/catalog_table_data_source.go +++ b/internal/service/glue/catalog_table_data_source.go @@ -96,6 +96,11 @@ func dataSourceCatalogTable() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrParameters: { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, names.AttrType: { Type: schema.TypeString, Computed: true, diff --git a/internal/service/glue/catalog_table_optimizer.go b/internal/service/glue/catalog_table_optimizer.go index 1727812a381b..f50a3dfb2f0e 100644 --- a/internal/service/glue/catalog_table_optimizer.go +++ b/internal/service/glue/catalog_table_optimizer.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -108,13 +109,20 @@ func (r *catalogTableOptimizerResource) Schema(ctx context.Context, _ resource.S }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ - "snapshot_retention_period_in_days": schema.Int32Attribute{ + "clean_expired_files": schema.BoolAttribute{ Optional: true, }, "number_of_snapshots_to_retain": schema.Int32Attribute{ Optional: true, }, - "clean_expired_files": schema.BoolAttribute{ + "run_rate_in_hours": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + }, + "snapshot_retention_period_in_days": schema.Int32Attribute{ Optional: true, }, }, @@ -137,11 +145,18 @@ func (r *catalogTableOptimizerResource) Schema(ctx context.Context, _ resource.S }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ + names.AttrLocation: schema.StringAttribute{ + Optional: true, + }, "orphan_file_retention_period_in_days": schema.Int32Attribute{ Optional: true, }, - names.AttrLocation: schema.StringAttribute{ + "run_rate_in_hours": schema.Int32Attribute{ Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, }, }, }, @@ -175,29 +190,41 @@ func (r *catalogTableOptimizerResource) Create(ctx context.Context, request reso return } - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.CreateTableOptimizer(ctx, &input) if err != nil { // Retry IAM propagation errors if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "does not have the correct trust policies and is unable to be assumed by our service") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "does not have the proper IAM permissions to call Glue APIs") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized to perform") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.CreateTableOptimizer(ctx, &input) + if err != nil { + id, _ := flex.FlattenResourceId([]string{ + plan.CatalogID.ValueString(), + plan.DatabaseName.ValueString(), + plan.TableName.ValueString(), + plan.Type.ValueString(), + }, idParts, false) + + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.Glue, create.ErrActionCreating, ResNameCatalogTableOptimizer, id, err), + err.Error(), + ) + return } + output, err := findCatalogTableOptimizer(ctx, conn, plan.CatalogID.ValueString(), plan.DatabaseName.ValueString(), plan.TableName.ValueString(), plan.Type.ValueString()) if err != nil { id, _ := flex.FlattenResourceId([]string{ plan.CatalogID.ValueString(), @@ -207,12 +234,18 @@ func (r *catalogTableOptimizerResource) Create(ctx context.Context, request reso }, idParts, false) response.Diagnostics.AddError( - create.ProblemStandardMessage(names.Glue, create.ErrActionCreating, ResNameCatalogTableOptimizer, id, err), + create.ProblemStandardMessage(names.Glue, create.ErrActionReading, ResNameCatalogTableOptimizer, id, err), err.Error(), ) return } + response.Diagnostics.Append(fwflex.Flatten(ctx, output.TableOptimizer, &plan)...) + + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(response.State.Set(ctx, &plan)...) } @@ -293,6 +326,27 @@ func (r *catalogTableOptimizerResource) Update(ctx context.Context, request reso ) return } + output, err := findCatalogTableOptimizer(ctx, conn, plan.CatalogID.ValueString(), plan.DatabaseName.ValueString(), plan.TableName.ValueString(), plan.Type.ValueString()) + if err != nil { + id, _ := flex.FlattenResourceId([]string{ + plan.CatalogID.ValueString(), + plan.DatabaseName.ValueString(), + plan.TableName.ValueString(), + plan.Type.ValueString(), + }, idParts, false) + + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.Glue, create.ErrActionReading, ResNameCatalogTableOptimizer, id, err), + err.Error(), + ) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output.TableOptimizer, &plan)...) + + if response.Diagnostics.HasError() { + return + } } response.Diagnostics.Append(response.State.Set(ctx, &plan)...) @@ -380,9 +434,10 @@ type retentionConfigurationData struct { } type icebergRetentionConfigurationData struct { - SnapshotRetentionPeriodInDays types.Int32 `tfsdk:"snapshot_retention_period_in_days"` - NumberOfSnapshotsToRetain types.Int32 `tfsdk:"number_of_snapshots_to_retain"` CleanExpiredFiles types.Bool `tfsdk:"clean_expired_files"` + NumberOfSnapshotsToRetain types.Int32 `tfsdk:"number_of_snapshots_to_retain"` + RunRateInHours types.Int32 `tfsdk:"run_rate_in_hours"` + SnapshotRetentionPeriodInDays types.Int32 `tfsdk:"snapshot_retention_period_in_days"` } type orphanFileDeletionConfigurationData struct { @@ -390,8 +445,9 @@ type orphanFileDeletionConfigurationData struct { } type icebergOrphanFileDeletionConfigurationData struct { - OrphanFileRetentionPeriodInDays types.Int32 `tfsdk:"orphan_file_retention_period_in_days"` Location types.String `tfsdk:"location"` + OrphanFileRetentionPeriodInDays types.Int32 `tfsdk:"orphan_file_retention_period_in_days"` + RunRateInHours types.Int32 `tfsdk:"run_rate_in_hours"` } func findCatalogTableOptimizer(ctx context.Context, conn *glue.Client, catalogID, dbName, tableName, optimizerType string) (*glue.GetTableOptimizerOutput, error) { diff --git a/internal/service/glue/catalog_table_optimizer_test.go b/internal/service/glue/catalog_table_optimizer_test.go index c17558cb49a4..a455a07c1c88 100644 --- a/internal/service/glue/catalog_table_optimizer_test.go +++ b/internal/service/glue/catalog_table_optimizer_test.go @@ -149,6 +149,7 @@ func testAccCatalogTableOptimizer_RetentionConfiguration(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.snapshot_retention_period_in_days", "7"), resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.number_of_snapshots_to_retain", "3"), resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.clean_expired_files", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.run_rate_in_hours", "24"), ), }, { @@ -170,6 +171,62 @@ func testAccCatalogTableOptimizer_RetentionConfiguration(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.snapshot_retention_period_in_days", "6"), resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.number_of_snapshots_to_retain", "3"), resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.clean_expired_files", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.run_rate_in_hours", "24"), + ), + }, + }, + }) +} + +func testAccCatalogTableOptimizer_RetentionConfigurationWithRunRateInHours(t *testing.T) { + ctx := acctest.Context(t) + var catalogTableOptimizer glue.GetTableOptimizerOutput + + resourceName := "aws_glue_catalog_table_optimizer.test" + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCatalogTableOptimizerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCatalogTableOptimizerConfig_retentionConfigurationWithRunRateInHours(rName, 7, 6), + Check: resource.ComposeTestCheckFunc( + testAccCheckCatalogTableOptimizerExists(ctx, resourceName, &catalogTableOptimizer), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDatabaseName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrTableName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrType, "retention"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.snapshot_retention_period_in_days", "7"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.number_of_snapshots_to_retain", "3"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.clean_expired_files", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.run_rate_in_hours", "6"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccCatalogTableOptimizerStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: names.AttrTableName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCatalogTableOptimizerConfig_retentionConfigurationWithRunRateInHours(rName, 6, 4), + Check: resource.ComposeTestCheckFunc( + testAccCheckCatalogTableOptimizerExists(ctx, resourceName, &catalogTableOptimizer), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDatabaseName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrTableName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrType, "retention"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.snapshot_retention_period_in_days", "6"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.number_of_snapshots_to_retain", "3"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.clean_expired_files", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.retention_configuration.0.iceberg_configuration.0.run_rate_in_hours", "4"), ), }, }, @@ -201,6 +258,7 @@ func testAccCatalogTableOptimizer_DeleteOrphanFileConfiguration(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.orphan_file_retention_period_in_days", "7"), resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.location", fmt.Sprintf("s3://%s/files/", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.run_rate_in_hours", "24"), ), }, { @@ -221,6 +279,60 @@ func testAccCatalogTableOptimizer_DeleteOrphanFileConfiguration(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.orphan_file_retention_period_in_days", "6"), resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.location", fmt.Sprintf("s3://%s/files/", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.run_rate_in_hours", "24"), + ), + }, + }, + }) +} + +func testAccCatalogTableOptimizer_DeleteOrphanFileConfigurationWithRunRateInHours(t *testing.T) { + ctx := acctest.Context(t) + var catalogTableOptimizer glue.GetTableOptimizerOutput + + resourceName := "aws_glue_catalog_table_optimizer.test" + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCatalogTableOptimizerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCatalogTableOptimizerConfig_orphanFileDeletionConfigurationWithRunRateInHours(rName, 7, 6), + Check: resource.ComposeTestCheckFunc( + testAccCheckCatalogTableOptimizerExists(ctx, resourceName, &catalogTableOptimizer), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDatabaseName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrTableName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrType, "orphan_file_deletion"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.orphan_file_retention_period_in_days", "7"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.location", fmt.Sprintf("s3://%s/files/", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.run_rate_in_hours", "6"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccCatalogTableOptimizerStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: names.AttrTableName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCatalogTableOptimizerConfig_orphanFileDeletionConfigurationWithRunRateInHours(rName, 6, 4), + Check: resource.ComposeTestCheckFunc( + testAccCheckCatalogTableOptimizerExists(ctx, resourceName, &catalogTableOptimizer), + acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDatabaseName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrTableName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrType, "orphan_file_deletion"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.orphan_file_retention_period_in_days", "6"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.location", fmt.Sprintf("s3://%s/files/", rName)), + resource.TestCheckResourceAttr(resourceName, "configuration.0.orphan_file_deletion_configuration.0.iceberg_configuration.0.run_rate_in_hours", "4"), ), }, }, @@ -472,10 +584,39 @@ resource "aws_glue_catalog_table_optimizer" "test" { } } } + depends_on = [aws_iam_role_policy.test] } `, retentionPeriod)) } +func testAccCatalogTableOptimizerConfig_retentionConfigurationWithRunRateInHours(rName string, retentionPeriod, runRateInHours int) string { + return acctest.ConfigCompose( + testAccCatalogTableOptimizerConfig_baseConfig(rName), + fmt.Sprintf(` +resource "aws_glue_catalog_table_optimizer" "test" { + catalog_id = data.aws_caller_identity.current.account_id + database_name = aws_glue_catalog_database.test.name + table_name = aws_glue_catalog_table.test.name + type = "retention" + + configuration { + role_arn = aws_iam_role.test.arn + enabled = true + + retention_configuration { + iceberg_configuration { + snapshot_retention_period_in_days = %[1]d + number_of_snapshots_to_retain = 3 + clean_expired_files = true + run_rate_in_hours = %[2]d + } + } + } + depends_on = [aws_iam_role_policy.test] +} +`, retentionPeriod, runRateInHours)) +} + func testAccCatalogTableOptimizerConfig_orphanFileDeletionConfiguration(rName string, retentionPeriod int) string { return acctest.ConfigCompose( testAccCatalogTableOptimizerConfig_baseConfig(rName), @@ -497,6 +638,34 @@ resource "aws_glue_catalog_table_optimizer" "test" { } } } + depends_on = [aws_iam_role_policy.test] } `, retentionPeriod)) } + +func testAccCatalogTableOptimizerConfig_orphanFileDeletionConfigurationWithRunRateInHours(rName string, retentionPeriod, runRateInHours int) string { + return acctest.ConfigCompose( + testAccCatalogTableOptimizerConfig_baseConfig(rName), + fmt.Sprintf(` +resource "aws_glue_catalog_table_optimizer" "test" { + catalog_id = data.aws_caller_identity.current.account_id + database_name = aws_glue_catalog_database.test.name + table_name = aws_glue_catalog_table.test.name + type = "orphan_file_deletion" + + configuration { + role_arn = aws_iam_role.test.arn + enabled = true + + orphan_file_deletion_configuration { + iceberg_configuration { + orphan_file_retention_period_in_days = %[1]d + location = "s3://${aws_s3_bucket.bucket.bucket}/files/" + run_rate_in_hours = %[2]d + } + } + } + depends_on = [aws_iam_role_policy.test] +} +`, retentionPeriod, runRateInHours)) +} diff --git a/internal/service/glue/crawler.go b/internal/service/glue/crawler.go index 74677b49acd0..f17502ef3deb 100644 --- a/internal/service/glue/crawler.go +++ b/internal/service/glue/crawler.go @@ -438,40 +438,37 @@ func resourceCrawlerCreate(ctx context.Context, d *schema.ResourceData, meta any } // Retry for IAM eventual consistency - err = retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err = glueConn.CreateCrawler(ctx, crawlerInput) if err != nil { // InvalidInputException: Insufficient Lake Formation permission(s) on xxx if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Insufficient Lake Formation permission") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Service is unable to assume provided role") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // InvalidInputException: com.amazonaws.services.glue.model.AccessDeniedException: You need to enable AWS Security Token Service for this region. . Please verify the role's TrustPolicy. if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Please verify the role's TrustPolicy") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // InvalidInputException: Unable to retrieve connection tf-acc-test-8656357591012534997: User: arn:aws:sts::*******:assumed-role/tf-acc-test-8656357591012534997/AWS-Crawler is not authorized to perform: glue:GetConnection on resource: * (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4d72b66f-9c75-11e8-9faf-5b526c7be968) if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "is not authorized") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // InvalidInputException: SQS queue arn:aws:sqs:us-west-2:*******:tf-acc-test-4317277351691904203 does not exist or the role provided does not have access to it. if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "SQS queue") && errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "does not exist or the role provided does not have access to it") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = glueConn.CreateCrawler(ctx, crawlerInput) - } if err != nil { return sdkdiag.AppendErrorf(diags, "creating Glue Crawler (%s): %s", name, err) } @@ -585,42 +582,38 @@ func resourceCrawlerUpdate(ctx context.Context, d *schema.ResourceData, meta any } // Retry for IAM eventual consistency - err = retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := glueConn.UpdateCrawler(ctx, updateCrawlerInput) if err != nil { // InvalidInputException: Insufficient Lake Formation permission(s) on xxx if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Insufficient Lake Formation permission") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Service is unable to assume provided role") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // InvalidInputException: com.amazonaws.services.glue.model.AccessDeniedException: You need to enable AWS Security Token Service for this region. . Please verify the role's TrustPolicy. if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Please verify the role's TrustPolicy") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // InvalidInputException: Unable to retrieve connection tf-acc-test-8656357591012534997: User: arn:aws:sts::*******:assumed-role/tf-acc-test-8656357591012534997/AWS-Crawler is not authorized to perform: glue:GetConnection on resource: * (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4d72b66f-9c75-11e8-9faf-5b526c7be968) if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "is not authorized") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // InvalidInputException: SQS queue arn:aws:sqs:us-west-2:*******:tf-acc-test-4317277351691904203 does not exist or the role provided does not have access to it. if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "SQS queue") && errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "does not exist or the role provided does not have access to it") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = glueConn.UpdateCrawler(ctx, updateCrawlerInput) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "updating Glue Crawler (%s): %s", d.Id(), err) } @@ -703,7 +696,7 @@ func createCrawlerInput(ctx context.Context, d *schema.ResourceData, crawlerName if v, ok := d.GetOk(names.AttrConfiguration); ok { configuration, err := structure.NormalizeJsonString(v) if err != nil { - return nil, fmt.Errorf("configuration contains an invalid JSON: %v", err) + return nil, fmt.Errorf("configuration contains an invalid JSON: %w", err) } crawlerInput.Configuration = aws.String(configuration) } @@ -755,7 +748,7 @@ func updateCrawlerInput(d *schema.ResourceData, crawlerName string) (*glue.Updat if v, ok := d.GetOk(names.AttrConfiguration); ok { configuration, err := structure.NormalizeJsonString(v) if err != nil { - return nil, fmt.Errorf("Configuration contains an invalid JSON: %v", err) + return nil, fmt.Errorf("Configuration contains an invalid JSON: %w", err) } crawlerInput.Configuration = aws.String(configuration) } else { diff --git a/internal/service/glue/crawler_test.go b/internal/service/glue/crawler_test.go index b319da2ddfe1..67b370d889ae 100644 --- a/internal/service/glue/crawler_test.go +++ b/internal/service/glue/crawler_test.go @@ -1779,12 +1779,12 @@ func testAccCheckCrawlerConfiguration(crawler *awstypes.Crawler, acctestJSON str apiJSON := aws.ToString(crawler.Configuration) apiJSONBuffer := bytes.NewBufferString("") if err := json.Compact(apiJSONBuffer, []byte(apiJSON)); err != nil { - return fmt.Errorf("unable to compact API configuration JSON: %s", err) + return fmt.Errorf("unable to compact API configuration JSON: %w", err) } acctestJSONBuffer := bytes.NewBufferString("") if err := json.Compact(acctestJSONBuffer, []byte(acctestJSON)); err != nil { - return fmt.Errorf("unable to compact acceptance test configuration JSON: %s", err) + return fmt.Errorf("unable to compact acceptance test configuration JSON: %w", err) } if !verify.JSONBytesEqual(apiJSONBuffer.Bytes(), acctestJSONBuffer.Bytes()) { diff --git a/internal/service/glue/dev_endpoint.go b/internal/service/glue/dev_endpoint.go index 1a72ceedc64c..465741297b78 100644 --- a/internal/service/glue/dev_endpoint.go +++ b/internal/service/glue/dev_endpoint.go @@ -235,29 +235,25 @@ func resourceDevEndpointCreate(ctx context.Context, d *schema.ResourceData, meta } log.Printf("[DEBUG] Creating Glue Dev Endpoint: %#v", *input) - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.CreateDevEndpoint(ctx, input) if err != nil { // Retry for IAM eventual consistency if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "should be given assume role permissions for Glue Service") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "is not authorized to perform") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "S3 endpoint and NAT validation has failed for subnetId") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.CreateDevEndpoint(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "creating Glue Dev Endpoint: %s", err) } @@ -466,22 +462,18 @@ func resourceDevEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta if hasChanged { log.Printf("[DEBUG] Updating Glue Dev Endpoint: %+v", input) - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 5*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateDevEndpoint(ctx, input) if err != nil { if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "another concurrent update operation") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateDevEndpoint(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "updating Glue Dev Endpoint: %s", err) } diff --git a/internal/service/glue/glue_test.go b/internal/service/glue/glue_test.go index 2f86cd9057c0..000eea341e59 100644 --- a/internal/service/glue/glue_test.go +++ b/internal/service/glue/glue_test.go @@ -14,11 +14,13 @@ func TestAccGlue_serial(t *testing.T) { testCases := map[string]map[string]func(t *testing.T){ "CatalogTableOptimizer": { - acctest.CtBasic: testAccCatalogTableOptimizer_basic, - "deleteOrphanFileConfiguration": testAccCatalogTableOptimizer_DeleteOrphanFileConfiguration, - acctest.CtDisappears: testAccCatalogTableOptimizer_disappears, - "retentionConfiguration": testAccCatalogTableOptimizer_RetentionConfiguration, - "update": testAccCatalogTableOptimizer_update, + acctest.CtBasic: testAccCatalogTableOptimizer_basic, + "deleteOrphanFileConfiguration": testAccCatalogTableOptimizer_DeleteOrphanFileConfiguration, + "deleteOrphanFileConfigurationWithRunRateInHours": testAccCatalogTableOptimizer_DeleteOrphanFileConfigurationWithRunRateInHours, + acctest.CtDisappears: testAccCatalogTableOptimizer_disappears, + "retentionConfiguration": testAccCatalogTableOptimizer_RetentionConfiguration, + "retentionConfigurationWithRunRateInHours": testAccCatalogTableOptimizer_RetentionConfigurationWithRunRateInHours, + "update": testAccCatalogTableOptimizer_update, }, "DataCatalogEncryptionSettings": { acctest.CtBasic: testAccDataCatalogEncryptionSettings_basic, diff --git a/internal/service/glue/job.go b/internal/service/glue/job.go index ed752e85373b..0ab40df19fe6 100644 --- a/internal/service/glue/job.go +++ b/internal/service/glue/job.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -235,11 +236,11 @@ func resourceJob() *schema.Resource { Optional: true, }, "worker_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{names.AttrMaxCapacity}, - ValidateDiagFunc: enum.Validate[awstypes.WorkerType](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{names.AttrMaxCapacity}, + ValidateFunc: validation.StringInSlice(workerType_Values(), false), }, }, } @@ -687,3 +688,7 @@ func flattenSourceControlDetails(sourceControlDetails *awstypes.SourceControlDet return []map[string]any{m} } + +func workerType_Values() []string { + return tfslices.AppendUnique(enum.Values[awstypes.WorkerType](), "G.12X", "G.16X", "R.1X", "R.2X", "R.4X", "R.8X") +} diff --git a/internal/service/glue/job_test.go b/internal/service/glue/job_test.go index 34f3a158f2ef..f7572158679b 100644 --- a/internal/service/glue/job_test.go +++ b/internal/service/glue/job_test.go @@ -12,7 +12,11 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/glue/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfglue "github.com/hashicorp/terraform-provider-aws/internal/service/glue" @@ -702,36 +706,71 @@ func TestAccGlueJob_workerType(t *testing.T) { Config: testAccJobConfig_workerType(rName, "Standard"), Check: resource.ComposeTestCheckFunc( testAccCheckJobExists(ctx, resourceName, &job), - resource.TestCheckResourceAttr(resourceName, "worker_type", "Standard"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("worker_type"), knownvalue.StringExact("Standard")), + }, }, { Config: testAccJobConfig_workerType(rName, "G.1X"), Check: resource.ComposeTestCheckFunc( testAccCheckJobExists(ctx, resourceName, &job), - resource.TestCheckResourceAttr(resourceName, "worker_type", "G.1X"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("worker_type"), knownvalue.StringExact("G.1X")), + }, }, { Config: testAccJobConfig_workerType(rName, "G.2X"), Check: resource.ComposeTestCheckFunc( testAccCheckJobExists(ctx, resourceName, &job), - resource.TestCheckResourceAttr(resourceName, "worker_type", "G.2X"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("worker_type"), knownvalue.StringExact("G.2X")), + }, }, { Config: testAccJobConfig_workerType(rName, "G.4X"), Check: resource.ComposeTestCheckFunc( testAccCheckJobExists(ctx, resourceName, &job), - resource.TestCheckResourceAttr(resourceName, "worker_type", "G.4X"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("worker_type"), knownvalue.StringExact("G.4X")), + }, }, { - Config: testAccJobConfig_workerType(rName, "G.8X"), + Config: testAccJobConfig_workerType(rName, "R.1X"), Check: resource.ComposeTestCheckFunc( testAccCheckJobExists(ctx, resourceName, &job), - resource.TestCheckResourceAttr(resourceName, "worker_type", "G.8X"), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("worker_type"), knownvalue.StringExact("R.1X")), + }, }, { ResourceName: resourceName, diff --git a/internal/service/glue/ml_transform.go b/internal/service/glue/ml_transform.go index f7bf30a65f50..d78d5cf256d8 100644 --- a/internal/service/glue/ml_transform.go +++ b/internal/service/glue/ml_transform.go @@ -227,7 +227,7 @@ func resourceMLTransformCreate(ctx context.Context, d *schema.ResourceData, meta log.Printf("[DEBUG] Creating Glue ML Transform: %+v", input) - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidInputException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidInputException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateMLTransform(ctx, input) }, "Unable to assume role") diff --git a/internal/service/glue/registry.go b/internal/service/glue/registry.go index a75d9c160ee5..c5191c3c1d35 100644 --- a/internal/service/glue/registry.go +++ b/internal/service/glue/registry.go @@ -27,6 +27,9 @@ import ( // @SDKResource("aws_glue_registry", name="Registry") // @Tags(identifierAttribute="arn") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/glue;glue.GetRegistryOutput") func resourceRegistry() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRegistryCreate, @@ -34,10 +37,6 @@ func resourceRegistry() *schema.Resource { UpdateWithoutTimeout: resourceRegistryUpdate, DeleteWithoutTimeout: resourceRegistryDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/glue/registry_identity_gen_test.go b/internal/service/glue/registry_identity_gen_test.go new file mode 100644 index 000000000000..3a9a57e94e11 --- /dev/null +++ b/internal/service/glue/registry_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package glue_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/glue" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlueRegistry_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v glue.GetRegistryOutput + resourceName := "aws_glue_registry.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckRegistryDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegistryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccGlueRegistry_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_glue_registry.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlueRegistry_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v glue.GetRegistryOutput + resourceName := "aws_glue_registry.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckRegistryDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegistryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlueRegistry_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v glue.GetRegistryOutput + resourceName := "aws_glue_registry.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckRegistryDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRegistryExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Registry/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/glue/resource_policy_identity_gen_test.go b/internal/service/glue/resource_policy_identity_gen_test.go index 46e47d6ee4de..e40bb2b0b7da 100644 --- a/internal/service/glue/resource_policy_identity_gen_test.go +++ b/internal/service/glue/resource_policy_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccGlueResourcePolicy_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccGlueResourcePolicy_Identity_Basic, - "ExistingResource": testAccGlueResourcePolicy_Identity_ExistingResource, - "RegionOverride": testAccGlueResourcePolicy_Identity_RegionOverride, + acctest.CtBasic: testAccGlueResourcePolicy_Identity_Basic, + "ExistingResource": testAccGlueResourcePolicy_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccGlueResourcePolicy_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccGlueResourcePolicy_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,9 +34,10 @@ func testAccGlueResourcePolicy_IdentitySerial(t *testing.T) { func testAccGlueResourcePolicy_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_glue_resource_policy.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -105,7 +108,7 @@ func testAccGlueResourcePolicy_Identity_RegionOverride(t *testing.T) { resourceName := "aws_glue_resource_policy.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -209,3 +212,106 @@ func testAccGlueResourcePolicy_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccGlueResourcePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_glue_resource_policy.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccGlueResourcePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_glue_resource_policy.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/glue/resource_policy_test.go b/internal/service/glue/resource_policy_test.go index d924369820cd..dddbce03041b 100644 --- a/internal/service/glue/resource_policy_test.go +++ b/internal/service/glue/resource_policy_test.go @@ -11,14 +11,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awspolicy "github.com/hashicorp/awspolicyequivalence" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfglue "github.com/hashicorp/terraform-provider-aws/internal/service/glue" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -192,7 +187,7 @@ func testAccResourcePolicy(ctx context.Context, n string, action string) resourc actualPolicyText, expectedPolicy := aws.ToString(output.PolicyInJson), testAccNewResourcePolicy(ctx, action) equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicy) if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) + return fmt.Errorf("Error testing policy equivalence: %w", err) } if !equivalent { return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", @@ -203,75 +198,6 @@ func testAccResourcePolicy(ctx context.Context, n string, action string) resourc } } -func testAccGlueResourcePolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_glue_resource_policy.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), - CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccResourcePolicyConfig_required_v5("glue:CreateTable"), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccResourcePolicyConfig_required("glue:CreateTable"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccResourcePolicyConfig_required("glue:CreateTable"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccCheckResourcePolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).GlueClient(ctx) @@ -341,31 +267,6 @@ resource "aws_glue_resource_policy" "test" { `, action) } -func testAccResourcePolicyConfig_required_v5(action string) string { - return fmt.Sprintf(` -data "aws_caller_identity" "current" {} - -data "aws_partition" "current" {} - -data "aws_region" "current" {} - -data "aws_iam_policy_document" "glue-example-policy" { - statement { - actions = [%[1]q] - resources = ["arn:${data.aws_partition.current.partition}:glue:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:*"] - principals { - identifiers = ["*"] - type = "AWS" - } - } -} - -resource "aws_glue_resource_policy" "test" { - policy = data.aws_iam_policy_document.glue-example-policy.json -} -`, action) -} - func testAccResourcePolicyConfig_hybrid(action, hybrid string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} diff --git a/internal/service/glue/schema.go b/internal/service/glue/schema.go index 9f4eb8c15ae3..657c7b7ac44b 100644 --- a/internal/service/glue/schema.go +++ b/internal/service/glue/schema.go @@ -27,6 +27,9 @@ import ( // @SDKResource("aws_glue_schema", name="Schema") // @Tags(identifierAttribute="arn") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/glue;glue.GetSchemaOutput") func resourceSchema() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSchemaCreate, @@ -34,10 +37,6 @@ func resourceSchema() *schema.Resource { UpdateWithoutTimeout: resourceSchemaUpdate, DeleteWithoutTimeout: resourceSchemaDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/glue/schema_identity_gen_test.go b/internal/service/glue/schema_identity_gen_test.go new file mode 100644 index 000000000000..7a4547653a9f --- /dev/null +++ b/internal/service/glue/schema_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package glue_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/glue" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGlueSchema_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v glue.GetSchemaOutput + resourceName := "aws_glue_schema.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckSchemaDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSchemaExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccGlueSchema_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_glue_schema.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlueSchema_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v glue.GetSchemaOutput + resourceName := "aws_glue_schema.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckSchemaDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSchemaExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccGlueSchema_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v glue.GetSchemaOutput + resourceName := "aws_glue_schema.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + CheckDestroy: testAccCheckSchemaDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSchemaExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Schema/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/glue/service_endpoint_resolver_gen.go b/internal/service/glue/service_endpoint_resolver_gen.go index 20203f46d1e7..e65a4a1260fd 100644 --- a/internal/service/glue/service_endpoint_resolver_gen.go +++ b/internal/service/glue/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params glue.EndpointPar }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up glue endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up glue endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/glue/service_endpoints_gen_test.go b/internal/service/glue/service_endpoints_gen_test.go index c5feed9188fd..3746205cf590 100644 --- a/internal/service/glue/service_endpoints_gen_test.go +++ b/internal/service/glue/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/glue/service_package_gen.go b/internal/service/glue/service_package_gen.go index 52f6037554d9..f6cda2c44b46 100644 --- a/internal/service/glue/service_package_gen.go +++ b/internal/service/glue/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/glue" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -175,6 +174,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceResourcePolicy, @@ -196,6 +201,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceSecurityConfiguration, @@ -253,7 +264,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *glue.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/glue/sweep.go b/internal/service/glue/sweep.go index 5ed7574ebd61..7b6f5d1e7d66 100644 --- a/internal/service/glue/sweep.go +++ b/internal/service/glue/sweep.go @@ -18,7 +18,10 @@ import ( ) func RegisterSweepers() { - awsv2.Register("aws_glue_catalog_database", sweepCatalogDatabases) + awsv2.Register("aws_glue_catalog_database", sweepCatalogDatabases, + "aws_datazone_environment", + ) + awsv2.Register("aws_glue_classifier", sweepClassifiers) awsv2.Register("aws_glue_connection", sweepConnections) awsv2.Register("aws_glue_crawler", sweepCrawlers) diff --git a/internal/service/glue/tags_gen.go b/internal/service/glue/tags_gen.go index 416530b7ee9d..1fde5077fc70 100644 --- a/internal/service/glue/tags_gen.go +++ b/internal/service/glue/tags_gen.go @@ -3,8 +3,8 @@ package glue import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/glue" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *glue.Client, identifier string, optFns output, err := conn.GetTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GlueClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *glue.Client, identifier string, oldTa _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *glue.Client, identifier string, oldTa _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/glue/testdata/Registry/basic/main_gen.tf b/internal/service/glue/testdata/Registry/basic/main_gen.tf new file mode 100644 index 000000000000..af2a2ba07501 --- /dev/null +++ b/internal/service/glue/testdata/Registry/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_registry" "test" { + registry_name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/glue/testdata/Registry/basic_v6.3.0/main_gen.tf b/internal/service/glue/testdata/Registry/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..63b13b16e25b --- /dev/null +++ b/internal/service/glue/testdata/Registry/basic_v6.3.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_registry" "test" { + registry_name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/glue/testdata/Registry/region_override/main_gen.tf b/internal/service/glue/testdata/Registry/region_override/main_gen.tf new file mode 100644 index 000000000000..dd0bedb40e16 --- /dev/null +++ b/internal/service/glue/testdata/Registry/region_override/main_gen.tf @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_registry" "test" { + region = var.region + + registry_name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/glue/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf b/internal/service/glue/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6d2fd9eab10c --- /dev/null +++ b/internal/service/glue/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_resource_policy" "test" { + policy = data.aws_iam_policy_document.glue-example-policy.json +} + +data "aws_iam_policy_document" "glue-example-policy" { + statement { + actions = ["glue:CreateTable"] + resources = ["arn:${data.aws_partition.current.partition}:glue:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:*"] + principals { + identifiers = ["*"] + type = "AWS" + } + } +} + +data "aws_region" "current" {} + +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/glue/testdata/ResourcePolicy/basic_v6.0.0/main_gen.tf b/internal/service/glue/testdata/ResourcePolicy/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..287c21c18a12 --- /dev/null +++ b/internal/service/glue/testdata/ResourcePolicy/basic_v6.0.0/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_resource_policy" "test" { + policy = data.aws_iam_policy_document.glue-example-policy.json +} + +data "aws_iam_policy_document" "glue-example-policy" { + statement { + actions = ["glue:CreateTable"] + resources = ["arn:${data.aws_partition.current.partition}:glue:${data.aws_region.current.region}:${data.aws_caller_identity.current.account_id}:*"] + principals { + identifiers = ["*"] + type = "AWS" + } + } +} + +data "aws_region" "current" {} + +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/glue/testdata/Schema/basic/main_gen.tf b/internal/service/glue/testdata/Schema/basic/main_gen.tf new file mode 100644 index 000000000000..f2e196616732 --- /dev/null +++ b/internal/service/glue/testdata/Schema/basic/main_gen.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_schema" "test" { + schema_name = var.rName + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + data_format = "AVRO" + compatibility = "NONE" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/glue/testdata/Schema/basic_v6.3.0/main_gen.tf b/internal/service/glue/testdata/Schema/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..95c98ceb0105 --- /dev/null +++ b/internal/service/glue/testdata/Schema/basic_v6.3.0/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_schema" "test" { + schema_name = var.rName + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + data_format = "AVRO" + compatibility = "NONE" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/glue/testdata/Schema/region_override/main_gen.tf b/internal/service/glue/testdata/Schema/region_override/main_gen.tf new file mode 100644 index 000000000000..10517708b27f --- /dev/null +++ b/internal/service/glue/testdata/Schema/region_override/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_glue_schema" "test" { + region = var.region + + schema_name = var.rName + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + data_format = "AVRO" + compatibility = "NONE" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/glue/testdata/tmpl/registry_tags.gtpl b/internal/service/glue/testdata/tmpl/registry_tags.gtpl new file mode 100644 index 000000000000..153939453530 --- /dev/null +++ b/internal/service/glue/testdata/tmpl/registry_tags.gtpl @@ -0,0 +1,4 @@ +resource "aws_glue_registry" "test" { +{{- template "region" }} + registry_name = var.rName +} diff --git a/internal/service/glue/testdata/tmpl/schema_tags.gtpl b/internal/service/glue/testdata/tmpl/schema_tags.gtpl new file mode 100644 index 000000000000..f9b380247afd --- /dev/null +++ b/internal/service/glue/testdata/tmpl/schema_tags.gtpl @@ -0,0 +1,7 @@ +resource "aws_glue_schema" "test" { +{{- template "region" }} + schema_name = var.rName + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + data_format = "AVRO" + compatibility = "NONE" +} diff --git a/internal/service/glue/trigger.go b/internal/service/glue/trigger.go index 2d8de32bb27e..4ae3762564df 100644 --- a/internal/service/glue/trigger.go +++ b/internal/service/glue/trigger.go @@ -262,25 +262,22 @@ func resourceTriggerCreate(ctx context.Context, d *schema.ResourceData, meta any } log.Printf("[DEBUG] Creating Glue Trigger: %+v", input) - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.CreateTrigger(ctx, input) if err != nil { // Retry IAM propagation errors if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Service is unable to assume provided role") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } // Retry concurrent workflow modification errors if errs.IsAErrorMessageContains[*awstypes.ConcurrentModificationException](err, "was modified while adding trigger") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.CreateTrigger(ctx, input) - } if err != nil { return sdkdiag.AppendErrorf(diags, "creating Glue Trigger (%s): %s", name, err) } diff --git a/internal/service/grafana/service_endpoint_resolver_gen.go b/internal/service/grafana/service_endpoint_resolver_gen.go index 60eb52badad1..3db2db1ea369 100644 --- a/internal/service/grafana/service_endpoint_resolver_gen.go +++ b/internal/service/grafana/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params grafana.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up grafana endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up grafana endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/grafana/service_endpoints_gen_test.go b/internal/service/grafana/service_endpoints_gen_test.go index f37d5d933383..6e84f7ad1018 100644 --- a/internal/service/grafana/service_endpoints_gen_test.go +++ b/internal/service/grafana/service_endpoints_gen_test.go @@ -678,7 +678,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/grafana/service_package_gen.go b/internal/service/grafana/service_package_gen.go index d482566cb182..8e7d1b39b4a7 100644 --- a/internal/service/grafana/service_package_gen.go +++ b/internal/service/grafana/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/grafana" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -112,7 +111,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *grafana.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/grafana/sweep.go b/internal/service/grafana/sweep.go index 65f7e31cd839..699d4b8953fb 100644 --- a/internal/service/grafana/sweep.go +++ b/internal/service/grafana/sweep.go @@ -25,7 +25,7 @@ func sweepWorkSpaces(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &grafana.ListWorkspacesInput{} conn := client.GrafanaClient(ctx) diff --git a/internal/service/grafana/tags_gen.go b/internal/service/grafana/tags_gen.go index 8ec9d2023563..d6739bbe9202 100644 --- a/internal/service/grafana/tags_gen.go +++ b/internal/service/grafana/tags_gen.go @@ -3,8 +3,8 @@ package grafana import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/grafana" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *grafana.Client, identifier string, optF output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GrafanaClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *grafana.Client, identifier string, ol _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *grafana.Client, identifier string, ol _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/greengrass/service_endpoint_resolver_gen.go b/internal/service/greengrass/service_endpoint_resolver_gen.go index 0f2ecaba4598..e4bae3fdd2c9 100644 --- a/internal/service/greengrass/service_endpoint_resolver_gen.go +++ b/internal/service/greengrass/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params greengrass.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up greengrass endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up greengrass endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/greengrass/service_endpoints_gen_test.go b/internal/service/greengrass/service_endpoints_gen_test.go index ea1e708c0f24..0b62ac2adb1c 100644 --- a/internal/service/greengrass/service_endpoints_gen_test.go +++ b/internal/service/greengrass/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/greengrass/service_package_gen.go b/internal/service/greengrass/service_package_gen.go index cd3ba85a7c56..23617bb8de28 100644 --- a/internal/service/greengrass/service_package_gen.go +++ b/internal/service/greengrass/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/greengrass" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *greengrass.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/greengrass/tags_gen.go b/internal/service/greengrass/tags_gen.go index 0d682ca24cd4..fc3b6f37ed70 100644 --- a/internal/service/greengrass/tags_gen.go +++ b/internal/service/greengrass/tags_gen.go @@ -3,8 +3,8 @@ package greengrass import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/greengrass" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *greengrass.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GreengrassClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *greengrass.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *greengrass.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/groundstation/service_endpoint_resolver_gen.go b/internal/service/groundstation/service_endpoint_resolver_gen.go index c4f9330cf6ad..0d697f5e6977 100644 --- a/internal/service/groundstation/service_endpoint_resolver_gen.go +++ b/internal/service/groundstation/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params groundstation.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up groundstation endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up groundstation endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/groundstation/service_endpoints_gen_test.go b/internal/service/groundstation/service_endpoints_gen_test.go index 148c561a797c..8d00b9c05164 100644 --- a/internal/service/groundstation/service_endpoints_gen_test.go +++ b/internal/service/groundstation/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/groundstation/service_package_gen.go b/internal/service/groundstation/service_package_gen.go index 42270d05bced..e713f1b8ee8d 100644 --- a/internal/service/groundstation/service_package_gen.go +++ b/internal/service/groundstation/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/groundstation" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *groundstation.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/guardduty/detector.go b/internal/service/guardduty/detector.go index 36ba878de765..1d246f1927d6 100644 --- a/internal/service/guardduty/detector.go +++ b/internal/service/guardduty/detector.go @@ -247,7 +247,7 @@ func resourceDetectorDelete(ctx context.Context, d *schema.ResourceData, meta an conn := meta.(*conns.AWSClient).GuardDutyClient(ctx) log.Printf("[DEBUG] Deleting GuardDuty Detector: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.BadRequestException](ctx, membershipPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.BadRequestException](ctx, membershipPropagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteDetector(ctx, &guardduty.DeleteDetectorInput{ DetectorId: aws.String(d.Id()), }) diff --git a/internal/service/guardduty/detector_data_source_tags_gen_test.go b/internal/service/guardduty/detector_data_source_tags_gen_test.go index a5fc9993ea95..a40dcaeec217 100644 --- a/internal/service/guardduty/detector_data_source_tags_gen_test.go +++ b/internal/service/guardduty/detector_data_source_tags_gen_test.go @@ -36,9 +36,10 @@ func testAccGuardDutyDetectorDataSource_tagsSerial(t *testing.T) { func testAccGuardDutyDetectorDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GuardDutyServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -62,9 +63,10 @@ func testAccGuardDutyDetectorDataSource_tags(t *testing.T) { func testAccGuardDutyDetectorDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GuardDutyServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -84,9 +86,10 @@ func testAccGuardDutyDetectorDataSource_tags_NullMap(t *testing.T) { func testAccGuardDutyDetectorDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GuardDutyServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -106,9 +109,10 @@ func testAccGuardDutyDetectorDataSource_tags_EmptyMap(t *testing.T) { func testAccGuardDutyDetectorDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GuardDutyServiceID), Steps: []resource.TestStep{ @@ -136,9 +140,10 @@ func testAccGuardDutyDetectorDataSource_tags_DefaultTags_nonOverlapping(t *testi func testAccGuardDutyDetectorDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GuardDutyServiceID), Steps: []resource.TestStep{ @@ -172,9 +177,10 @@ func testAccGuardDutyDetectorDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *te func testAccGuardDutyDetectorDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GuardDutyServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/guardduty/detector_tags_gen_test.go b/internal/service/guardduty/detector_tags_gen_test.go index 61c6b493b528..e860148ab961 100644 --- a/internal/service/guardduty/detector_tags_gen_test.go +++ b/internal/service/guardduty/detector_tags_gen_test.go @@ -46,9 +46,10 @@ func testAccGuardDutyDetector_tagsSerial(t *testing.T) { func testAccGuardDutyDetector_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -221,9 +222,10 @@ func testAccGuardDutyDetector_tags(t *testing.T) { func testAccGuardDutyDetector_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -286,9 +288,10 @@ func testAccGuardDutyDetector_tags_null(t *testing.T) { func testAccGuardDutyDetector_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -347,9 +350,10 @@ func testAccGuardDutyDetector_tags_EmptyMap(t *testing.T) { func testAccGuardDutyDetector_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -426,9 +430,10 @@ func testAccGuardDutyDetector_tags_AddOnUpdate(t *testing.T) { func testAccGuardDutyDetector_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -512,9 +517,10 @@ func testAccGuardDutyDetector_tags_EmptyTag_OnCreate(t *testing.T) { func testAccGuardDutyDetector_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -645,9 +651,10 @@ func testAccGuardDutyDetector_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyDetector_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -732,9 +739,10 @@ func testAccGuardDutyDetector_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccGuardDutyDetector_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -906,9 +914,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_providerOnly(t *testing.T) { func testAccGuardDutyDetector_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1061,9 +1070,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccGuardDutyDetector_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1232,9 +1242,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_overlapping(t *testing.T) { func testAccGuardDutyDetector_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1320,9 +1331,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_updateToProviderOnly(t *testing.T func testAccGuardDutyDetector_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1407,9 +1419,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_updateToResourceOnly(t *testing.T func testAccGuardDutyDetector_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1471,9 +1484,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccGuardDutyDetector_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1527,9 +1541,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T func testAccGuardDutyDetector_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1588,9 +1603,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_nullOverlappingResourceTag(t *tes func testAccGuardDutyDetector_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1649,9 +1665,10 @@ func testAccGuardDutyDetector_tags_DefaultTags_nullNonOverlappingResourceTag(t * func testAccGuardDutyDetector_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1703,9 +1720,10 @@ func testAccGuardDutyDetector_tags_ComputedTag_OnCreate(t *testing.T) { func testAccGuardDutyDetector_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1798,9 +1816,10 @@ func testAccGuardDutyDetector_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyDetector_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1883,9 +1902,10 @@ func testAccGuardDutyDetector_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccGuardDutyDetector_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -2043,9 +2063,10 @@ func testAccGuardDutyDetector_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccGuardDutyDetector_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_detector.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) diff --git a/internal/service/guardduty/filter_tags_gen_test.go b/internal/service/guardduty/filter_tags_gen_test.go index 73809a5bdf29..aa067aa8b019 100644 --- a/internal/service/guardduty/filter_tags_gen_test.go +++ b/internal/service/guardduty/filter_tags_gen_test.go @@ -47,10 +47,11 @@ func testAccGuardDutyFilter_tagsSerial(t *testing.T) { func testAccGuardDutyFilter_tags(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -223,10 +224,11 @@ func testAccGuardDutyFilter_tags(t *testing.T) { func testAccGuardDutyFilter_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -289,10 +291,11 @@ func testAccGuardDutyFilter_tags_null(t *testing.T) { func testAccGuardDutyFilter_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -351,10 +354,11 @@ func testAccGuardDutyFilter_tags_EmptyMap(t *testing.T) { func testAccGuardDutyFilter_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -431,10 +435,11 @@ func testAccGuardDutyFilter_tags_AddOnUpdate(t *testing.T) { func testAccGuardDutyFilter_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -518,10 +523,11 @@ func testAccGuardDutyFilter_tags_EmptyTag_OnCreate(t *testing.T) { func testAccGuardDutyFilter_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -652,10 +658,11 @@ func testAccGuardDutyFilter_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyFilter_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -740,10 +747,11 @@ func testAccGuardDutyFilter_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccGuardDutyFilter_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -915,10 +923,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_providerOnly(t *testing.T) { func testAccGuardDutyFilter_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1071,10 +1080,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccGuardDutyFilter_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1243,10 +1253,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_overlapping(t *testing.T) { func testAccGuardDutyFilter_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1332,10 +1343,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_updateToProviderOnly(t *testing.T) func testAccGuardDutyFilter_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1420,10 +1432,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_updateToResourceOnly(t *testing.T) func testAccGuardDutyFilter_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1485,10 +1498,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccGuardDutyFilter_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1542,10 +1556,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func testAccGuardDutyFilter_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1604,10 +1619,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_nullOverlappingResourceTag(t *testi func testAccGuardDutyFilter_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1666,10 +1682,11 @@ func testAccGuardDutyFilter_tags_DefaultTags_nullNonOverlappingResourceTag(t *te func testAccGuardDutyFilter_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1721,10 +1738,11 @@ func testAccGuardDutyFilter_tags_ComputedTag_OnCreate(t *testing.T) { func testAccGuardDutyFilter_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1817,10 +1835,11 @@ func testAccGuardDutyFilter_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyFilter_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1903,10 +1922,11 @@ func testAccGuardDutyFilter_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccGuardDutyFilter_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -2064,10 +2084,11 @@ func testAccGuardDutyFilter_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccGuardDutyFilter_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetFilterOutput resourceName := "aws_guardduty_filter.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) diff --git a/internal/service/guardduty/invite_accepter.go b/internal/service/guardduty/invite_accepter.go index 185c79cf8beb..738c73dbc907 100644 --- a/internal/service/guardduty/invite_accepter.go +++ b/internal/service/guardduty/invite_accepter.go @@ -60,7 +60,7 @@ func resourceInviteAccepterCreate(ctx context.Context, d *schema.ResourceData, m masterAccountID := d.Get("master_account_id").(string) inputLI := &guardduty.ListInvitationsInput{} - outputRaw, err := tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + output, err := tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (*awstypes.Invitation, error) { return findInvitation(ctx, conn, inputLI, func(v *awstypes.Invitation) bool { return aws.ToString(v.AccountId) == masterAccountID }) @@ -70,7 +70,7 @@ func resourceInviteAccepterCreate(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading GuardDuty Invitation (%s): %s", masterAccountID, err) } - invitationID := aws.ToString(outputRaw.(*awstypes.Invitation).InvitationId) + invitationID := aws.ToString(output.InvitationId) inputAI := &guardduty.AcceptInvitationInput{ DetectorId: aws.String(detectorID), InvitationId: aws.String(invitationID), diff --git a/internal/service/guardduty/ipset_tags_gen_test.go b/internal/service/guardduty/ipset_tags_gen_test.go index df51e5fd5cbb..b677689653e2 100644 --- a/internal/service/guardduty/ipset_tags_gen_test.go +++ b/internal/service/guardduty/ipset_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -47,10 +46,11 @@ func testAccGuardDutyIPSet_tagsSerial(t *testing.T) { func testAccGuardDutyIPSet_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -231,10 +231,11 @@ func testAccGuardDutyIPSet_tags(t *testing.T) { func testAccGuardDutyIPSet_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -300,10 +301,11 @@ func testAccGuardDutyIPSet_tags_null(t *testing.T) { func testAccGuardDutyIPSet_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -365,10 +367,11 @@ func testAccGuardDutyIPSet_tags_EmptyMap(t *testing.T) { func testAccGuardDutyIPSet_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -448,10 +451,11 @@ func testAccGuardDutyIPSet_tags_AddOnUpdate(t *testing.T) { func testAccGuardDutyIPSet_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -539,10 +543,11 @@ func testAccGuardDutyIPSet_tags_EmptyTag_OnCreate(t *testing.T) { func testAccGuardDutyIPSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -678,10 +683,11 @@ func testAccGuardDutyIPSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyIPSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -769,10 +775,11 @@ func testAccGuardDutyIPSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -952,10 +959,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_providerOnly(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1114,10 +1122,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1292,10 +1301,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_overlapping(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1384,10 +1394,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1475,10 +1486,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1542,10 +1554,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1601,10 +1614,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func testAccGuardDutyIPSet_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1665,10 +1679,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_nullOverlappingResourceTag(t *testin func testAccGuardDutyIPSet_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1729,10 +1744,11 @@ func testAccGuardDutyIPSet_tags_DefaultTags_nullNonOverlappingResourceTag(t *tes func testAccGuardDutyIPSet_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1786,10 +1802,11 @@ func testAccGuardDutyIPSet_tags_ComputedTag_OnCreate(t *testing.T) { func testAccGuardDutyIPSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1885,10 +1902,11 @@ func testAccGuardDutyIPSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyIPSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1974,10 +1992,11 @@ func testAccGuardDutyIPSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccGuardDutyIPSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -2138,10 +2157,11 @@ func testAccGuardDutyIPSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccGuardDutyIPSet_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_ipset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) diff --git a/internal/service/guardduty/malware_protection_plan.go b/internal/service/guardduty/malware_protection_plan.go index 5ce75a9d566d..c2f60cfc496f 100644 --- a/internal/service/guardduty/malware_protection_plan.go +++ b/internal/service/guardduty/malware_protection_plan.go @@ -149,18 +149,18 @@ func (r *malwareProtectionPlanResource) Create(ctx context.Context, req resource var out *guardduty.CreateMalwareProtectionPlanOutput - err := tfresource.Retry(ctx, iamPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, iamPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error out, err = conn.CreateMalwareProtectionPlan(ctx, input) if err != nil { var nfe *awstypes.ResourceNotFoundException var bre *awstypes.BadRequestException // Error returned due to IAM eventual consistency if errors.As(err, &nfe) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } else if errors.As(err, &bre) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil diff --git a/internal/service/guardduty/malware_protection_plan_tags_gen_test.go b/internal/service/guardduty/malware_protection_plan_tags_gen_test.go index 727504accc3f..6f9d2dbdfc52 100644 --- a/internal/service/guardduty/malware_protection_plan_tags_gen_test.go +++ b/internal/service/guardduty/malware_protection_plan_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/guardduty" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccGuardDutyMalwareProtectionPlan_tags(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -204,11 +204,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags(t *testing.T) { func TestAccGuardDutyMalwareProtectionPlan_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -269,11 +270,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_null(t *testing.T) { func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -322,11 +324,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyMap(t *testing.T) { func TestAccGuardDutyMalwareProtectionPlan_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -405,11 +408,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_AddOnUpdate(t *testing.T) { func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -498,11 +502,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyTag_OnCreate(t *testing.T) func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -640,11 +645,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyTag_OnUpdate_Add(t *testing func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -733,11 +739,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_EmptyTag_OnUpdate_Replace(t *tes func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -917,11 +924,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_providerOnly(t *test func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1080,11 +1088,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_nonOverlapping(t *te func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1259,11 +1268,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_overlapping(t *testi func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1352,11 +1362,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_updateToProviderOnly func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1444,11 +1455,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_updateToResourceOnly func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1513,11 +1525,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_emptyResourceTag(t * func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1574,11 +1587,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_emptyProviderOnlyTag func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1646,11 +1660,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_nullOverlappingResou func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1720,11 +1735,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_DefaultTags_nullNonOverlappingRe func TestAccGuardDutyMalwareProtectionPlan_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1778,11 +1794,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_ComputedTag_OnCreate(t *testing. func TestAccGuardDutyMalwareProtectionPlan_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1878,11 +1895,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_ComputedTag_OnUpdate_Add(t *test func TestAccGuardDutyMalwareProtectionPlan_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -1968,11 +1986,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_ComputedTag_OnUpdate_Replace(t * func TestAccGuardDutyMalwareProtectionPlan_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) @@ -2133,11 +2152,12 @@ func TestAccGuardDutyMalwareProtectionPlan_tags_IgnoreTags_Overlap_DefaultTag(t func TestAccGuardDutyMalwareProtectionPlan_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v guardduty.GetMalwareProtectionPlanOutput resourceName := "aws_guardduty_malware_protection_plan.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) diff --git a/internal/service/guardduty/member.go b/internal/service/guardduty/member.go index 3626d55f1582..5e26db3560e1 100644 --- a/internal/service/guardduty/member.go +++ b/internal/service/guardduty/member.go @@ -14,7 +14,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/guardduty" awstypes "github.com/aws/aws-sdk-go-v2/service/guardduty/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -253,34 +252,25 @@ func inviteMemberWaiter(ctx context.Context, accountID, detectorID string, timeo // wait until e-mail verification finishes var out *guardduty.GetMembersOutput - err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { log.Printf("[DEBUG] Reading GuardDuty Member: %+v", input) var err error out, err = conn.GetMembers(ctx, &input) if err != nil { - return retry.NonRetryableError(fmt.Errorf("reading GuardDuty Member %q: %s", accountID, err)) + return tfresource.NonRetryableError(fmt.Errorf("reading GuardDuty Member %q: %w", accountID, err)) } retryable, err := memberInvited(out, accountID) if err != nil { if retryable { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - out, err = conn.GetMembers(ctx, &input) - - if err != nil { - return fmt.Errorf("reading GuardDuty member: %w", err) - } - _, err = memberInvited(out, accountID) - return err - } if err != nil { return fmt.Errorf("waiting for GuardDuty email verification: %w", err) } diff --git a/internal/service/guardduty/service_endpoint_resolver_gen.go b/internal/service/guardduty/service_endpoint_resolver_gen.go index bdd75b5dcfd3..a0f5ea99c799 100644 --- a/internal/service/guardduty/service_endpoint_resolver_gen.go +++ b/internal/service/guardduty/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params guardduty.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up guardduty endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up guardduty endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/guardduty/service_endpoints_gen_test.go b/internal/service/guardduty/service_endpoints_gen_test.go index 81cf36dd2974..abe1bf0b68cd 100644 --- a/internal/service/guardduty/service_endpoints_gen_test.go +++ b/internal/service/guardduty/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/guardduty/service_package_gen.go b/internal/service/guardduty/service_package_gen.go index cd9fe3671a81..ca4d9ff3c7c4 100644 --- a/internal/service/guardduty/service_package_gen.go +++ b/internal/service/guardduty/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/guardduty" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -167,7 +166,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *guardduty.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/guardduty/sweep.go b/internal/service/guardduty/sweep.go index 7adaaed6a032..96119ac3c62d 100644 --- a/internal/service/guardduty/sweep.go +++ b/internal/service/guardduty/sweep.go @@ -33,7 +33,7 @@ func sweepDetectors(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GuardDutyClient(ctx) @@ -81,7 +81,7 @@ func sweepPublishingDestinations(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.GuardDutyClient(ctx) @@ -116,7 +116,7 @@ func sweepPublishingDestinations(region string) error { } if err != nil { - return fmt.Errorf("error retrieving GuardDuty Publishing Destinations: %s", err) + return err } for _, destination_element := range page.Destinations { diff --git a/internal/service/guardduty/tags_gen.go b/internal/service/guardduty/tags_gen.go index c57fc683d6e9..4e6344e7e547 100644 --- a/internal/service/guardduty/tags_gen.go +++ b/internal/service/guardduty/tags_gen.go @@ -3,8 +3,8 @@ package guardduty import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/guardduty" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *guardduty.Client, identifier string, op output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).GuardDutyClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *guardduty.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *guardduty.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/guardduty/threatintelset_tags_gen_test.go b/internal/service/guardduty/threatintelset_tags_gen_test.go index 5921f28fbb8b..e549916a7e48 100644 --- a/internal/service/guardduty/threatintelset_tags_gen_test.go +++ b/internal/service/guardduty/threatintelset_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -47,10 +46,11 @@ func testAccGuardDutyThreatIntelSet_tagsSerial(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -231,10 +231,11 @@ func testAccGuardDutyThreatIntelSet_tags(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -300,10 +301,11 @@ func testAccGuardDutyThreatIntelSet_tags_null(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -365,10 +367,11 @@ func testAccGuardDutyThreatIntelSet_tags_EmptyMap(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -448,10 +451,11 @@ func testAccGuardDutyThreatIntelSet_tags_AddOnUpdate(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -539,10 +543,11 @@ func testAccGuardDutyThreatIntelSet_tags_EmptyTag_OnCreate(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -678,10 +683,11 @@ func testAccGuardDutyThreatIntelSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -769,10 +775,11 @@ func testAccGuardDutyThreatIntelSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) func testAccGuardDutyThreatIntelSet_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -952,10 +959,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_providerOnly(t *testing.T) func testAccGuardDutyThreatIntelSet_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1114,10 +1122,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_nonOverlapping(t *testing.T func testAccGuardDutyThreatIntelSet_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1292,10 +1301,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_overlapping(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1384,10 +1394,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_updateToProviderOnly(t *tes func testAccGuardDutyThreatIntelSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1475,10 +1486,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_updateToResourceOnly(t *tes func testAccGuardDutyThreatIntelSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1542,10 +1554,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_emptyResourceTag(t *testing func testAccGuardDutyThreatIntelSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1601,10 +1614,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_emptyProviderOnlyTag(t *tes func testAccGuardDutyThreatIntelSet_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1665,10 +1679,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_nullOverlappingResourceTag( func testAccGuardDutyThreatIntelSet_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1729,10 +1744,11 @@ func testAccGuardDutyThreatIntelSet_tags_DefaultTags_nullNonOverlappingResourceT func testAccGuardDutyThreatIntelSet_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1786,10 +1802,11 @@ func testAccGuardDutyThreatIntelSet_tags_ComputedTag_OnCreate(t *testing.T) { func testAccGuardDutyThreatIntelSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1885,10 +1902,11 @@ func testAccGuardDutyThreatIntelSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) func testAccGuardDutyThreatIntelSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -1974,10 +1992,11 @@ func testAccGuardDutyThreatIntelSet_tags_ComputedTag_OnUpdate_Replace(t *testing func testAccGuardDutyThreatIntelSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) @@ -2138,10 +2157,11 @@ func testAccGuardDutyThreatIntelSet_tags_IgnoreTags_Overlap_DefaultTag(t *testin func testAccGuardDutyThreatIntelSet_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_guardduty_threatintelset.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckDetectorNotExists(ctx, t) diff --git a/internal/service/healthlake/service_endpoint_resolver_gen.go b/internal/service/healthlake/service_endpoint_resolver_gen.go index a7335e5de93b..c85b026a24f3 100644 --- a/internal/service/healthlake/service_endpoint_resolver_gen.go +++ b/internal/service/healthlake/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params healthlake.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up healthlake endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up healthlake endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/healthlake/service_endpoints_gen_test.go b/internal/service/healthlake/service_endpoints_gen_test.go index 267700a43dec..54311bac8d27 100644 --- a/internal/service/healthlake/service_endpoints_gen_test.go +++ b/internal/service/healthlake/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/healthlake/service_package_gen.go b/internal/service/healthlake/service_package_gen.go index 1557b1590c75..4d36bbb7ac30 100644 --- a/internal/service/healthlake/service_package_gen.go +++ b/internal/service/healthlake/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/healthlake" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *healthlake.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/healthlake/tags_gen.go b/internal/service/healthlake/tags_gen.go index ee7fa5503201..f4f4a481a9c8 100644 --- a/internal/service/healthlake/tags_gen.go +++ b/internal/service/healthlake/tags_gen.go @@ -3,8 +3,8 @@ package healthlake import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/healthlake" awstypes "github.com/aws/aws-sdk-go-v2/service/healthlake/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *healthlake.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).HealthLakeClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *healthlake.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *healthlake.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/iam/access_key.go b/internal/service/iam/access_key.go index c194aab5f545..2a5b6e086ef4 100644 --- a/internal/service/iam/access_key.go +++ b/internal/service/iam/access_key.go @@ -9,7 +9,6 @@ import ( "crypto/sha256" "fmt" "log" - "reflect" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -24,7 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - itypes "github.com/hashicorp/terraform-provider-aws/internal/types" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -249,10 +248,11 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, meta a conn := meta.(*conns.AWSClient).IAMClient(ctx) log.Printf("[DEBUG] Deleting IAM Access Key: %s", d.Id()) - _, err := conn.DeleteAccessKey(ctx, &iam.DeleteAccessKeyInput{ + input := iam.DeleteAccessKeyInput{ AccessKeyId: aws.String(d.Id()), UserName: aws.String(d.Get("user").(string)), - }) + } + _, err := conn.DeleteAccessKey(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { return diags @@ -281,7 +281,7 @@ func findAccessKeyByTwoPartKey(ctx context.Context, conn *iam.Client, username, UserName: aws.String(username), } - return findAccessKey(ctx, conn, input, func(v awstypes.AccessKeyMetadata) bool { + return findAccessKey(ctx, conn, input, func(v *awstypes.AccessKeyMetadata) bool { return aws.ToString(v.AccessKeyId) == id }) } @@ -291,10 +291,10 @@ func findAccessKeysByUser(ctx context.Context, conn *iam.Client, username string UserName: aws.String(username), } - return findAccessKeys(ctx, conn, input, tfslices.PredicateTrue[awstypes.AccessKeyMetadata]()) + return findAccessKeys(ctx, conn, input, tfslices.PredicateTrue[*awstypes.AccessKeyMetadata]()) } -func findAccessKey(ctx context.Context, conn *iam.Client, input *iam.ListAccessKeysInput, filter tfslices.Predicate[awstypes.AccessKeyMetadata]) (*awstypes.AccessKeyMetadata, error) { +func findAccessKey(ctx context.Context, conn *iam.Client, input *iam.ListAccessKeysInput, filter tfslices.Predicate[*awstypes.AccessKeyMetadata]) (*awstypes.AccessKeyMetadata, error) { output, err := findAccessKeys(ctx, conn, input, filter) if err != nil { @@ -304,7 +304,7 @@ func findAccessKey(ctx context.Context, conn *iam.Client, input *iam.ListAccessK return tfresource.AssertSingleValueResult(output) } -func findAccessKeys(ctx context.Context, conn *iam.Client, input *iam.ListAccessKeysInput, filter tfslices.Predicate[awstypes.AccessKeyMetadata]) ([]awstypes.AccessKeyMetadata, error) { +func findAccessKeys(ctx context.Context, conn *iam.Client, input *iam.ListAccessKeysInput, filter tfslices.Predicate[*awstypes.AccessKeyMetadata]) ([]awstypes.AccessKeyMetadata, error) { var output []awstypes.AccessKeyMetadata pages := iam.NewListAccessKeysPaginator(conn, input) @@ -323,7 +323,7 @@ func findAccessKeys(ctx context.Context, conn *iam.Client, input *iam.ListAccess } for _, v := range page.AccessKeyMetadata { - if !reflect.ValueOf(v).IsZero() && filter(v) { + if p := &v; !inttypes.IsZero(p) && filter(p) { output = append(output, v) } } @@ -371,5 +371,5 @@ func sesSMTPPasswordFromSecretKeySigV4(key *string, region string) (string, erro versionedSig := make([]byte, 0, len(rawSig)+1) versionedSig = append(versionedSig, version) versionedSig = append(versionedSig, rawSig...) - return itypes.Base64Encode(versionedSig), nil + return inttypes.Base64Encode(versionedSig), nil } diff --git a/internal/service/iam/access_key_test.go b/internal/service/iam/access_key_test.go index c44c5bda98eb..c62fae40d8cf 100644 --- a/internal/service/iam/access_key_test.go +++ b/internal/service/iam/access_key_test.go @@ -242,11 +242,11 @@ func testDecryptSecretKeyAndTest(nAccessKey, key string) resource.TestCheckFunc // have it. We can verify that decrypting it does not error _, err := pgpkeys.DecryptBytes(secret, key) if err != nil { - return fmt.Errorf("Error decrypting secret: %s", err) + return fmt.Errorf("Error decrypting secret: %w", err) } _, err = pgpkeys.DecryptBytes(password, key) if err != nil { - return fmt.Errorf("Error decrypting password: %s", err) + return fmt.Errorf("Error decrypting password: %w", err) } return nil diff --git a/internal/service/iam/account_alias.go b/internal/service/iam/account_alias.go index fb2f42218218..1b0402259a20 100644 --- a/internal/service/iam/account_alias.go +++ b/internal/service/iam/account_alias.go @@ -5,13 +5,17 @@ package iam import ( "context" + "log" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" + awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // @SDKResource("aws_iam_account_alias", name="Account Alias") @@ -41,15 +45,14 @@ func resourceAccountAliasCreate(ctx context.Context, d *schema.ResourceData, met conn := meta.(*conns.AWSClient).IAMClient(ctx) accountAlias := d.Get("account_alias").(string) - - params := &iam.CreateAccountAliasInput{ + input := iam.CreateAccountAliasInput{ AccountAlias: aws.String(accountAlias), } - _, err := conn.CreateAccountAlias(ctx, params) + _, err := conn.CreateAccountAlias(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating account alias with name '%s': %s", accountAlias, err) + return sdkdiag.AppendErrorf(diags, "creating IAM Account Alias (%s): %s", accountAlias, err) } d.SetId(accountAlias) @@ -61,23 +64,20 @@ func resourceAccountAliasRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - params := &iam.ListAccountAliasesInput{} - - resp, err := conn.ListAccountAliases(ctx, params) + var input iam.ListAccountAliasesInput + output, err := findAccountAlias(ctx, conn, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing account aliases: %s", err) - } - - if !d.IsNewResource() && (resp == nil || len(resp.AccountAliases) == 0) { + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] IAM Account Alias (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - accountAlias := resp.AccountAliases[0] + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading IAM Account Alias (%s): %s", d.Id(), err) + } - d.SetId(accountAlias) - d.Set("account_alias", accountAlias) + d.Set("account_alias", output) return diags } @@ -86,17 +86,47 @@ func resourceAccountAliasDelete(ctx context.Context, d *schema.ResourceData, met var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - accountAlias := d.Get("account_alias").(string) - - params := &iam.DeleteAccountAliasInput{ - AccountAlias: aws.String(accountAlias), + log.Printf("[DEBUG] Deleting IAM Account Alias: %s", d.Id()) + input := iam.DeleteAccountAliasInput{ + AccountAlias: aws.String(d.Id()), } - _, err := conn.DeleteAccountAlias(ctx, params) + _, err := conn.DeleteAccountAlias(ctx, &input) + + if errs.IsA[*awstypes.NoSuchEntityException](err) { + return diags + } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting account alias with name '%s': %s", accountAlias, err) + return sdkdiag.AppendErrorf(diags, "deleting IAM Account Alias (%s): %s", d.Id(), err) } return diags } + +func findAccountAlias(ctx context.Context, conn *iam.Client, input *iam.ListAccountAliasesInput) (*string, error) { + output, err := findAccountAliases(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findAccountAliases(ctx context.Context, conn *iam.Client, input *iam.ListAccountAliasesInput) ([]string, error) { + var output []string + + pages := iam.NewListAccountAliasesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.AccountAliases...) + } + + return output, nil +} diff --git a/internal/service/iam/account_alias_data_source.go b/internal/service/iam/account_alias_data_source.go index a894f797789a..06f342467641 100644 --- a/internal/service/iam/account_alias_data_source.go +++ b/internal/service/iam/account_alias_data_source.go @@ -5,8 +5,8 @@ package iam import ( "context" - "log" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -32,22 +32,15 @@ func dataSourceAccountAliasRead(ctx context.Context, d *schema.ResourceData, met var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - log.Printf("[DEBUG] Reading IAM Account Aliases.") + var input iam.ListAccountAliasesInput + output, err := findAccountAlias(ctx, conn, &input) - req := &iam.ListAccountAliasesInput{} - resp, err := conn.ListAccountAliases(ctx, req) if err != nil { return sdkdiag.AppendErrorf(diags, "reading IAM Account Alias: %s", err) } - // 'AccountAliases': [] if there is no alias. - if resp == nil || len(resp.AccountAliases) == 0 { - return sdkdiag.AppendErrorf(diags, "reading IAM Account Alias: empty result") - } - - alias := resp.AccountAliases[0] - d.SetId(alias) - d.Set("account_alias", alias) + d.SetId(aws.ToString(output)) + d.Set("account_alias", output) return diags } diff --git a/internal/service/iam/account_alias_test.go b/internal/service/iam/account_alias_test.go index d83da881ff0a..62a081bb8720 100644 --- a/internal/service/iam/account_alias_test.go +++ b/internal/service/iam/account_alias_test.go @@ -16,6 +16,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfiam "github.com/hashicorp/terraform-provider-aws/internal/service/iam" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +29,8 @@ func TestAccIAMAccountAlias_serial(t *testing.T) { acctest.CtBasic: testAccAccountAliasDataSource_basic, }, "Resource": { - acctest.CtBasic: testAccAccountAlias_basic, + acctest.CtBasic: testAccAccountAlias_basic, + acctest.CtDisappears: testAccAccountAlias_disappears, }, } @@ -37,7 +40,6 @@ func TestAccIAMAccountAlias_serial(t *testing.T) { func testAccAccountAlias_basic(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_iam_account_alias.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ @@ -64,6 +66,32 @@ func testAccAccountAlias_basic(t *testing.T) { }) } +func testAccAccountAlias_disappears(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_iam_account_alias.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckAccountAlias(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAccountAliasDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAccountAliasConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountAliasExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfiam.ResourceAccountAlias(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAccountAliasDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) @@ -73,21 +101,18 @@ func testAccCheckAccountAliasDestroy(ctx context.Context) resource.TestCheckFunc continue } - params := &iam.ListAccountAliasesInput{} - - resp, err := conn.ListAccountAliases(ctx, params) + var input iam.ListAccountAliasesInput + _, err := tfiam.FindAccountAlias(ctx, conn, &input) - if err != nil { - return fmt.Errorf("error reading IAM Account Alias (%s): %w", rs.Primary.ID, err) + if tfresource.NotFound(err) { + continue } - if resp == nil { - return fmt.Errorf("error reading IAM Account Alias (%s): empty response", rs.Primary.ID) + if err != nil { + return err } - if len(resp.AccountAliases) > 0 { - return fmt.Errorf("Bad: Account alias still exists: %q", rs.Primary.ID) - } + return fmt.Errorf("IAM Server Certificate %s still exists", rs.Primary.ID) } return nil @@ -96,29 +121,17 @@ func testAccCheckAccountAliasDestroy(ctx context.Context) resource.TestCheckFunc func testAccCheckAccountAliasExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] + _, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) - params := &iam.ListAccountAliasesInput{} - resp, err := conn.ListAccountAliases(ctx, params) + var input iam.ListAccountAliasesInput + _, err := tfiam.FindAccountAlias(ctx, conn, &input) - if err != nil { - return fmt.Errorf("error reading IAM Account Alias (%s): %w", rs.Primary.ID, err) - } - - if resp == nil { - return fmt.Errorf("error reading IAM Account Alias (%s): empty response", rs.Primary.ID) - } - - if len(resp.AccountAliases) == 0 { - return fmt.Errorf("Bad: Account alias %q does not exist", rs.Primary.ID) - } - - return nil + return err } } diff --git a/internal/service/iam/consts.go b/internal/service/iam/consts.go new file mode 100644 index 000000000000..6ff9286f67cc --- /dev/null +++ b/internal/service/iam/consts.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iam + +import ( + "time" +) + +const ( + // Maximum amount of time to wait for IAM changes to propagate + // This timeout should not be increased without strong consideration + // as this will negatively impact user experience when configurations + // have incorrect references or permissions. + // Reference: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + propagationTimeout = 2 * time.Minute +) diff --git a/internal/service/iam/exports.go b/internal/service/iam/exports.go index e4939cba4da9..ac083eb3151f 100644 --- a/internal/service/iam/exports.go +++ b/internal/service/iam/exports.go @@ -7,8 +7,12 @@ package iam var ( ResourceRole = resourceRole - DeleteServiceLinkedRole = deleteServiceLinkedRole - FindRoleByName = findRoleByName - ListGroupsForUserPages = listGroupsForUserPages - AttachPolicyToUser = attachPolicyToUser + DeleteServiceLinkedRole = deleteServiceLinkedRole + FindRoleByName = findRoleByName + PolicyHasValidAWSPrincipals = policyHasValidAWSPrincipals // nosemgrep:ci.aws-in-var-name +) + +type ( + IAMPolicyDoc = iamPolicyDoc + IAMPolicyStatement = iamPolicyStatement ) diff --git a/internal/service/iam/exports_test.go b/internal/service/iam/exports_test.go index 9a8ae3366a4a..142d1ebd5bd0 100644 --- a/internal/service/iam/exports_test.go +++ b/internal/service/iam/exports_test.go @@ -5,8 +5,8 @@ package iam // Exports for use in tests only. var ( - ResourceAccessKey = resourceAccessKey - // ResourceAccountAlias = resourceAccountAlias + ResourceAccessKey = resourceAccessKey + ResourceAccountAlias = resourceAccountAlias ResourceAccountPasswordPolicy = resourceAccountPasswordPolicy ResourceGroup = resourceGroup // ResourceGroupMembership = resourceGroupMembership @@ -32,33 +32,49 @@ var ( ResourceUserSSHKey = resourceUserSSHKey ResourceVirtualMFADevice = resourceVirtualMFADevice - FindAccessKeyByTwoPartKey = findAccessKeyByTwoPartKey - FindAccountPasswordPolicy = findAccountPasswordPolicy - FindAttachedGroupPolicies = findAttachedGroupPolicies - FindAttachedGroupPolicyByTwoPartKey = findAttachedGroupPolicyByTwoPartKey - FindAttachedRolePolicies = findAttachedRolePolicies - FindAttachedRolePolicyByTwoPartKey = findAttachedRolePolicyByTwoPartKey - FindAttachedUserPolicies = findAttachedUserPolicies - FindAttachedUserPolicyByTwoPartKey = findAttachedUserPolicyByTwoPartKey - FindEntitiesForPolicyByARN = findEntitiesForPolicyByARN - FindGroupByName = findGroupByName - FindGroupPoliciesByName = findGroupPoliciesByName - FindGroupPolicyAttachmentsByName = findGroupPolicyAttachmentsByName - FindInstanceProfileByName = findInstanceProfileByName - FindOpenIDConnectProviderByARN = findOpenIDConnectProviderByARN - FindOrganizationsFeatures = findOrganizationsFeatures - FindPolicyByARN = findPolicyByARN - FindRolePolicyByTwoPartKey = findRolePolicyByTwoPartKey - FindRolePoliciesByName = findRolePoliciesByName - FindRolePolicyAttachmentsByName = findRolePolicyAttachmentsByName - FindSAMLProviderByARN = findSAMLProviderByARN - FindServerCertificateByName = findServerCertificateByName - FindSSHPublicKeyByThreePartKey = findSSHPublicKeyByThreePartKey - FindUserByName = findUserByName - FindUserPoliciesByName = findUserPoliciesByName - FindUserPolicyAttachmentsByName = findUserPolicyAttachmentsByName - FindVirtualMFADeviceBySerialNumber = findVirtualMFADeviceBySerialNumber - SESSMTPPasswordFromSecretKeySigV4 = sesSMTPPasswordFromSecretKeySigV4 + FindAccessKeyByTwoPartKey = findAccessKeyByTwoPartKey + FindAccountAlias = findAccountAlias + FindAccountPasswordPolicy = findAccountPasswordPolicy + FindAttachedGroupPolicies = findAttachedGroupPolicies + FindAttachedGroupPolicyByTwoPartKey = findAttachedGroupPolicyByTwoPartKey + FindAttachedRolePolicies = findAttachedRolePolicies + FindAttachedRolePolicyByTwoPartKey = findAttachedRolePolicyByTwoPartKey + FindAttachedUserPolicies = findAttachedUserPolicies + FindAttachedUserPolicyByTwoPartKey = findAttachedUserPolicyByTwoPartKey + FindEntitiesForPolicyByARN = findEntitiesForPolicyByARN + FindGroupByName = findGroupByName + FindGroupPoliciesByName = findGroupPoliciesByName + FindGroupPolicyAttachmentsByName = findGroupPolicyAttachmentsByName + FindGroupPolicyByTwoPartKey = findGroupPolicyByTwoPartKey + FindInstanceProfileByName = findInstanceProfileByName + FindOpenIDConnectProviderByARN = findOpenIDConnectProviderByARN + FindOrganizationsFeatures = findOrganizationsFeatures + FindPolicyByARN = findPolicyByARN + FindRolePolicyByTwoPartKey = findRolePolicyByTwoPartKey + FindRolePoliciesByName = findRolePoliciesByName + FindRolePolicyAttachmentsByName = findRolePolicyAttachmentsByName + FindSAMLProviderByARN = findSAMLProviderByARN + FindServerCertificateByName = findServerCertificateByName + FindServiceSpecificCredentialByThreePartKey = findServiceSpecificCredentialByThreePartKey + FindSigningCertificateByTwoPartKey = findSigningCertificateByTwoPartKey + FindSSHPublicKeyByThreePartKey = findSSHPublicKeyByThreePartKey + FindUserByName = findUserByName + FindUserPoliciesByName = findUserPoliciesByName + FindUserPolicyAttachmentsByName = findUserPolicyAttachmentsByName + FindUserPolicyByTwoPartKey = findUserPolicyByTwoPartKey + FindVirtualMFADeviceBySerialNumber = findVirtualMFADeviceBySerialNumber - RolePolicyParseID = rolePolicyParseID + AttachPolicyToUser = attachPolicyToUser + CheckPwdPolicy = checkPwdPolicy + GeneratePassword = generatePassword + IsValidPolicyAWSPrincipal = isValidPolicyAWSPrincipal // nosemgrep:ci.aws-in-var-name + ListGroupsForUserPages = listGroupsForUserPages + RoleNameSessionFromARN = roleNameSessionFromARN + RolePolicyParseID = rolePolicyParseID + ServiceLinkedRoleParseResourceID = serviceLinkedRoleParseResourceID + SESSMTPPasswordFromSecretKeySigV4 = sesSMTPPasswordFromSecretKeySigV4 +) + +type ( + IAMPolicyStatementConditionSet = iamPolicyStatementConditionSet ) diff --git a/internal/service/iam/find.go b/internal/service/iam/find.go deleted file mode 100644 index 88478b22c305..000000000000 --- a/internal/service/iam/find.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam - -import ( - "context" - "reflect" - - "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/iam" - awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindUsers(ctx context.Context, conn *iam.Client, nameRegex, pathPrefix string) ([]awstypes.User, error) { - input := &iam.ListUsersInput{} - - if pathPrefix != "" { - input.PathPrefix = aws.String(pathPrefix) - } - - var results []awstypes.User - - pages := iam.NewListUsersPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - if err != nil { - return nil, err - } - - for _, user := range page.Users { - if nameRegex != "" && !regexache.MustCompile(nameRegex).MatchString(aws.ToString(user.UserName)) { - continue - } - - results = append(results, user) - } - } - - return results, nil -} - -func FindServiceSpecificCredential(ctx context.Context, conn *iam.Client, serviceName, userName, credID string) (*awstypes.ServiceSpecificCredentialMetadata, error) { - input := &iam.ListServiceSpecificCredentialsInput{ - ServiceName: aws.String(serviceName), - UserName: aws.String(userName), - } - - output, err := conn.ListServiceSpecificCredentials(ctx, input) - - if errs.IsA[*awstypes.NoSuchEntityException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(output.ServiceSpecificCredentials) == 0 { - return nil, tfresource.NewEmptyResultError(output) - } - - var cred awstypes.ServiceSpecificCredentialMetadata - - for _, crd := range output.ServiceSpecificCredentials { - if aws.ToString(crd.ServiceName) == serviceName && - aws.ToString(crd.UserName) == userName && - aws.ToString(crd.ServiceSpecificCredentialId) == credID { - cred = crd - break - } - } - - if reflect.ValueOf(cred).IsZero() { - return nil, tfresource.NewEmptyResultError(cred) - } - - return &cred, nil -} - -func FindSigningCertificate(ctx context.Context, conn *iam.Client, userName, certId string) (*awstypes.SigningCertificate, error) { - input := &iam.ListSigningCertificatesInput{ - UserName: aws.String(userName), - } - - output, err := conn.ListSigningCertificates(ctx, input) - - if errs.IsA[*awstypes.NoSuchEntityException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(output.Certificates) == 0 { - return nil, tfresource.NewEmptyResultError(output) - } - - var cert awstypes.SigningCertificate - - for _, crt := range output.Certificates { - if aws.ToString(crt.UserName) == userName && - aws.ToString(crt.CertificateId) == certId { - cert = crt - break - } - } - - if reflect.ValueOf(cert).IsZero() { - return nil, tfresource.NewEmptyResultError(cert) - } - - return &cert, nil -} diff --git a/internal/service/iam/generate.go b/internal/service/iam/generate.go index b6d07a91ffb9..7c4709a6f705 100644 --- a/internal/service/iam/generate.go +++ b/internal/service/iam/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/listpages/main.go -Paginator=Marker -ListOps=ListGroupsForUser +//go:generate go run ../../generate/listpages/main.go -Paginator=Marker -ListOps=ListGroupsForUser,ListServiceSpecificCredentials //go:generate go run ../../generate/tags/main.go -ServiceTagsSlice //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/tagstests/main.go diff --git a/internal/service/iam/group.go b/internal/service/iam/group.go index c74bcf67281d..515db5967369 100644 --- a/internal/service/iam/group.go +++ b/internal/service/iam/group.go @@ -87,7 +87,7 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) d.SetId(aws.ToString(output.Group.GroupName)) - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findGroupByName(ctx, conn, d.Id()) }) diff --git a/internal/service/iam/group_membership.go b/internal/service/iam/group_membership.go index ccc2e182ad69..d319226c5460 100644 --- a/internal/service/iam/group_membership.go +++ b/internal/service/iam/group_membership.go @@ -12,7 +12,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -80,17 +79,17 @@ func resourceGroupMembershipRead(ctx context.Context, d *schema.ResourceData, me var ul []string - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { pages := iam.NewGetGroupPaginator(conn, input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) if d.IsNewResource() && errs.IsA[*awstypes.NoSuchEntityException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } for _, user := range page.Users { @@ -101,25 +100,6 @@ func resourceGroupMembershipRead(ctx context.Context, d *schema.ResourceData, me return nil }) - if tfresource.TimedOut(err) { - pages := iam.NewGetGroupPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - - if d.IsNewResource() && errs.IsA[*awstypes.NoSuchEntityException](err) { - return sdkdiag.AppendFromErr(diags, err) - } - - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - - for _, user := range page.Users { - ul = append(ul, aws.ToString(user.UserName)) - } - } - } - var noSuchEntityException *awstypes.NoSuchEntityException if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, noSuchEntityException.ErrorCode()) { log.Printf("[WARN] IAM Group Membership (%s) not found, removing from state", group) diff --git a/internal/service/iam/group_policy.go b/internal/service/iam/group_policy.go index 07c96ef64c5b..60b8b656ed30 100644 --- a/internal/service/iam/group_policy.go +++ b/internal/service/iam/group_policy.go @@ -81,25 +81,24 @@ func resourceGroupPolicyPut(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendFromErr(diags, err) } - groupName := d.Get("group").(string) - policyName := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) - request := &iam.PutGroupPolicyInput{ + groupName, policyName := d.Get("group").(string), create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) + input := iam.PutGroupPolicyInput{ GroupName: aws.String(groupName), PolicyDocument: aws.String(policyDoc), PolicyName: aws.String(policyName), } - _, err = conn.PutGroupPolicy(ctx, request) + _, err = conn.PutGroupPolicy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "putting IAM Group (%s) Policy (%s): %s", groupName, policyName, err) } if d.IsNewResource() { - d.SetId(fmt.Sprintf("%s:%s", groupName, policyName)) + d.SetId(groupPolicyCreateResourceID(groupName, policyName)) - _, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { - return FindGroupPolicyByTwoPartKey(ctx, conn, groupName, policyName) + _, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { + return findGroupPolicyByTwoPartKey(ctx, conn, groupName, policyName) }) if err != nil { @@ -114,12 +113,12 @@ func resourceGroupPolicyRead(ctx context.Context, d *schema.ResourceData, meta a var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - groupName, policyName, err := GroupPolicyParseID(d.Id()) + groupName, policyName, err := groupPolicyParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - policyDocument, err := FindGroupPolicyByTwoPartKey(ctx, conn, groupName, policyName) + policyDocument, err := findGroupPolicyByTwoPartKey(ctx, conn, groupName, policyName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IAM Group Policy %s not found, removing from state", d.Id()) @@ -153,16 +152,17 @@ func resourceGroupPolicyDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - groupName, policyName, err := GroupPolicyParseID(d.Id()) + groupName, policyName, err := groupPolicyParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[INFO] Deleting IAM Group Policy: %s", d.Id()) - _, err = conn.DeleteGroupPolicy(ctx, &iam.DeleteGroupPolicyInput{ + input := iam.DeleteGroupPolicyInput{ GroupName: aws.String(groupName), PolicyName: aws.String(policyName), - }) + } + _, err = conn.DeleteGroupPolicy(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { return diags @@ -175,12 +175,16 @@ func resourceGroupPolicyDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func FindGroupPolicyByTwoPartKey(ctx context.Context, conn *iam.Client, groupName, policyName string) (string, error) { - input := &iam.GetGroupPolicyInput{ +func findGroupPolicyByTwoPartKey(ctx context.Context, conn *iam.Client, groupName, policyName string) (string, error) { + input := iam.GetGroupPolicyInput{ GroupName: aws.String(groupName), PolicyName: aws.String(policyName), } + return findGroupPolicy(ctx, conn, &input) +} + +func findGroupPolicy(ctx context.Context, conn *iam.Client, input *iam.GetGroupPolicyInput) (string, error) { output, err := conn.GetGroupPolicy(ctx, input) if errs.IsA[*awstypes.NoSuchEntityException](err) { @@ -201,14 +205,21 @@ func FindGroupPolicyByTwoPartKey(ctx context.Context, conn *iam.Client, groupNam return aws.ToString(output.PolicyDocument), nil } -func GroupPolicyParseID(id string) (groupName, policyName string, err error) { - parts := strings.SplitN(id, ":", 2) +const groupPolicyResourceIDSeparator = ":" + +func groupPolicyCreateResourceID(groupName, policyName string) string { + parts := []string{groupName, policyName} + id := strings.Join(parts, groupPolicyResourceIDSeparator) + + return id +} + +func groupPolicyParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, groupPolicyResourceIDSeparator, 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - err = fmt.Errorf("group_policy id must be of the form :") - return + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected GROUP-NAME%[2]sPOLICY-NAME", id, groupPolicyResourceIDSeparator) } - groupName = parts[0] - policyName = parts[1] - return + return parts[0], parts[1], nil } diff --git a/internal/service/iam/group_policy_attachment.go b/internal/service/iam/group_policy_attachment.go index 2f6df897192c..73e94e9bfb8d 100644 --- a/internal/service/iam/group_policy_attachment.go +++ b/internal/service/iam/group_policy_attachment.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "log" - "reflect" "strings" "github.com/aws/aws-sdk-go-v2/aws" @@ -22,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -78,7 +78,7 @@ func resourceGroupPolicyAttachmentRead(ctx context.Context, d *schema.ResourceDa // Human friendly ID for error messages since d.Id() is non-descriptive. id := fmt.Sprintf("%s:%s", group, policyARN) - _, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findAttachedGroupPolicyByTwoPartKey(ctx, conn, group, policyARN) }, d.IsNewResource()) @@ -124,7 +124,7 @@ func resourceGroupPolicyAttachmentImport(ctx context.Context, d *schema.Resource func attachPolicyToGroup(ctx context.Context, conn *iam.Client, group, policyARN string) error { var errConcurrentModificationException *awstypes.ConcurrentModificationException - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.AttachGroupPolicy(ctx, &iam.AttachGroupPolicyInput{ GroupName: aws.String(group), PolicyArn: aws.String(policyARN), @@ -140,7 +140,7 @@ func attachPolicyToGroup(ctx context.Context, conn *iam.Client, group, policyARN func detachPolicyFromGroup(ctx context.Context, conn *iam.Client, group, policyARN string) error { var errConcurrentModificationException *awstypes.ConcurrentModificationException - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.DetachGroupPolicy(ctx, &iam.DetachGroupPolicyInput{ GroupName: aws.String(group), PolicyArn: aws.String(policyARN), @@ -159,16 +159,16 @@ func detachPolicyFromGroup(ctx context.Context, conn *iam.Client, group, policyA } func findAttachedGroupPolicyByTwoPartKey(ctx context.Context, conn *iam.Client, groupName, policyARN string) (*awstypes.AttachedPolicy, error) { - input := &iam.ListAttachedGroupPoliciesInput{ + input := iam.ListAttachedGroupPoliciesInput{ GroupName: aws.String(groupName), } - return findAttachedGroupPolicy(ctx, conn, input, func(v awstypes.AttachedPolicy) bool { + return findAttachedGroupPolicy(ctx, conn, &input, func(v *awstypes.AttachedPolicy) bool { return aws.ToString(v.PolicyArn) == policyARN }) } -func findAttachedGroupPolicy(ctx context.Context, conn *iam.Client, input *iam.ListAttachedGroupPoliciesInput, filter tfslices.Predicate[awstypes.AttachedPolicy]) (*awstypes.AttachedPolicy, error) { +func findAttachedGroupPolicy(ctx context.Context, conn *iam.Client, input *iam.ListAttachedGroupPoliciesInput, filter tfslices.Predicate[*awstypes.AttachedPolicy]) (*awstypes.AttachedPolicy, error) { output, err := findAttachedGroupPolicies(ctx, conn, input, filter) if err != nil { @@ -178,7 +178,7 @@ func findAttachedGroupPolicy(ctx context.Context, conn *iam.Client, input *iam.L return tfresource.AssertSingleValueResult(output) } -func findAttachedGroupPolicies(ctx context.Context, conn *iam.Client, input *iam.ListAttachedGroupPoliciesInput, filter tfslices.Predicate[awstypes.AttachedPolicy]) ([]awstypes.AttachedPolicy, error) { +func findAttachedGroupPolicies(ctx context.Context, conn *iam.Client, input *iam.ListAttachedGroupPoliciesInput, filter tfslices.Predicate[*awstypes.AttachedPolicy]) ([]awstypes.AttachedPolicy, error) { var output []awstypes.AttachedPolicy pages := iam.NewListAttachedGroupPoliciesPaginator(conn, input) @@ -197,7 +197,7 @@ func findAttachedGroupPolicies(ctx context.Context, conn *iam.Client, input *iam } for _, v := range page.AttachedPolicies { - if !reflect.ValueOf(v).IsZero() && filter(v) { + if p := &v; !inttypes.IsZero(p) && filter(p) { output = append(output, v) } } diff --git a/internal/service/iam/group_policy_attachment_test.go b/internal/service/iam/group_policy_attachment_test.go index c942a5d684bb..85e37175b420 100644 --- a/internal/service/iam/group_policy_attachment_test.go +++ b/internal/service/iam/group_policy_attachment_test.go @@ -145,7 +145,7 @@ func testAccCheckGroupPolicyAttachmentCount(ctx context.Context, groupName strin input := &iam.ListAttachedGroupPoliciesInput{ GroupName: aws.String(groupName), } - output, err := tfiam.FindAttachedGroupPolicies(ctx, conn, input, tfslices.PredicateTrue[awstypes.AttachedPolicy]()) + output, err := tfiam.FindAttachedGroupPolicies(ctx, conn, input, tfslices.PredicateTrue[*awstypes.AttachedPolicy]()) if err != nil { return err diff --git a/internal/service/iam/group_policy_test.go b/internal/service/iam/group_policy_test.go index 1472f593f24a..8f62d53f7a7b 100644 --- a/internal/service/iam/group_policy_test.go +++ b/internal/service/iam/group_policy_test.go @@ -199,12 +199,7 @@ func testAccCheckGroupPolicyDestroy(ctx context.Context) resource.TestCheckFunc continue } - groupName, policyName, err := tfiam.GroupPolicyParseID(rs.Primary.ID) - if err != nil { - return err - } - - _, err = tfiam.FindGroupPolicyByTwoPartKey(ctx, conn, groupName, policyName) + _, err := tfiam.FindGroupPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes["group"], rs.Primary.Attributes[names.AttrName]) if tfresource.NotFound(err) { continue @@ -228,14 +223,9 @@ func testAccCheckGroupPolicyExists(ctx context.Context, n string, v *string) res return fmt.Errorf("Not Found: %s", n) } - groupName, policyName, err := tfiam.GroupPolicyParseID(rs.Primary.ID) - if err != nil { - return err - } - conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) - output, err := tfiam.FindGroupPolicyByTwoPartKey(ctx, conn, groupName, policyName) + output, err := tfiam.FindGroupPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes["group"], rs.Primary.Attributes[names.AttrName]) if err != nil { return err diff --git a/internal/service/iam/instance_profile.go b/internal/service/iam/instance_profile.go index 99447ffa4ba5..9eca00f359c5 100644 --- a/internal/service/iam/instance_profile.go +++ b/internal/service/iam/instance_profile.go @@ -118,7 +118,7 @@ func resourceInstanceProfileCreate(ctx context.Context, d *schema.ResourceData, d.SetId(aws.ToString(output.InstanceProfile.InstanceProfileName)) - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findInstanceProfileByName(ctx, conn, d.Id()) }) @@ -274,7 +274,7 @@ func instanceProfileAddRole(ctx context.Context, conn *iam.Client, profileName, } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.AddRoleToInstanceProfile(ctx, input) }, func(err error) (bool, error) { @@ -342,8 +342,8 @@ func findInstanceProfileByName(ctx context.Context, conn *iam.Client, name strin } const ( - InstanceProfileFound = "Found" - InstanceProfileInvalidARN = "InvalidARN" + instanceProfileFoundState = "Found" + instanceProfileInvalidARNState = "InvalidARN" ) func statusInstanceProfile(ctx context.Context, conn *iam.Client, name string) retry.StateRefreshFunc { @@ -359,17 +359,17 @@ func statusInstanceProfile(ctx context.Context, conn *iam.Client, name string) r _, err = arn.Parse(aws.ToString(output.Arn)) if err != nil { - return nil, InstanceProfileInvalidARN, nil // lint:ignore nilerr // this is usually a temporary state + return nil, instanceProfileInvalidARNState, nil // lint:ignore nilerr // this is usually a temporary state } - return output, InstanceProfileFound, nil + return output, instanceProfileFoundState, nil } } func waitInstanceProfileReady(ctx context.Context, conn *iam.Client, id string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ - Pending: []string{"", InstanceProfileInvalidARN}, - Target: enum.Slice(InstanceProfileFound), + Pending: []string{"", instanceProfileInvalidARNState}, + Target: enum.Slice(instanceProfileFoundState), Refresh: statusInstanceProfile(ctx, conn, id), Timeout: timeout, Delay: 5 * time.Second, diff --git a/internal/service/iam/instance_profile_tags_gen_test.go b/internal/service/iam/instance_profile_tags_gen_test.go index bf2ad6dae3bf..ec291071dfb9 100644 --- a/internal/service/iam/instance_profile_tags_gen_test.go +++ b/internal/service/iam/instance_profile_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccIAMInstanceProfile_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccIAMInstanceProfile_tags(t *testing.T) { func TestAccIAMInstanceProfile_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccIAMInstanceProfile_tags_null(t *testing.T) { func TestAccIAMInstanceProfile_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccIAMInstanceProfile_tags_EmptyMap(t *testing.T) { func TestAccIAMInstanceProfile_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccIAMInstanceProfile_tags_AddOnUpdate(t *testing.T) { func TestAccIAMInstanceProfile_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccIAMInstanceProfile_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMInstanceProfile_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccIAMInstanceProfile_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMInstanceProfile_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccIAMInstanceProfile_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMInstanceProfile_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMInstanceProfile_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMInstanceProfile_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMInstanceProfile_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_updateToProviderOnly(t *testing. func TestAccIAMInstanceProfile_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_updateToResourceOnly(t *testing. func TestAccIAMInstanceProfile_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccIAMInstanceProfile_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_emptyProviderOnlyTag(t *testing. func TestAccIAMInstanceProfile_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_nullOverlappingResourceTag(t *te func TestAccIAMInstanceProfile_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccIAMInstanceProfile_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccIAMInstanceProfile_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccIAMInstanceProfile_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMInstanceProfile_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccIAMInstanceProfile_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMInstanceProfile_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccIAMInstanceProfile_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMInstanceProfile_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccIAMInstanceProfile_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccIAMInstanceProfile_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.InstanceProfile resourceName := "aws_iam_instance_profile.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckInstanceProfileDestroy(ctx), diff --git a/internal/service/iam/instance_profiles_data_source.go b/internal/service/iam/instance_profiles_data_source.go index 8813ece81557..a9f50beb2864 100644 --- a/internal/service/iam/instance_profiles_data_source.go +++ b/internal/service/iam/instance_profiles_data_source.go @@ -5,7 +5,6 @@ package iam import ( "context" - "reflect" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" @@ -16,6 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -51,7 +51,6 @@ func dataSourceInstanceProfiles() *schema.Resource { func dataSourceInstanceProfilesRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IAMClient(ctx) roleName := d.Get("role_name").(string) @@ -78,12 +77,12 @@ func dataSourceInstanceProfilesRead(ctx context.Context, d *schema.ResourceData, } func findInstanceProfilesForRole(ctx context.Context, conn *iam.Client, roleName string) ([]awstypes.InstanceProfile, error) { - input := &iam.ListInstanceProfilesForRoleInput{ + input := iam.ListInstanceProfilesForRoleInput{ RoleName: aws.String(roleName), } var output []awstypes.InstanceProfile - pages := iam.NewListInstanceProfilesForRolePaginator(conn, input) + pages := iam.NewListInstanceProfilesForRolePaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -99,7 +98,7 @@ func findInstanceProfilesForRole(ctx context.Context, conn *iam.Client, roleName } for _, v := range page.InstanceProfiles { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { output = append(output, v) } } diff --git a/internal/service/iam/list_pages_gen.go b/internal/service/iam/list_pages_gen.go index f6a1a78e5c71..75ef4505e304 100644 --- a/internal/service/iam/list_pages_gen.go +++ b/internal/service/iam/list_pages_gen.go @@ -1,4 +1,4 @@ -// Code generated by "internal/generate/listpages/main.go -Paginator=Marker -ListOps=ListGroupsForUser"; DO NOT EDIT. +// Code generated by "internal/generate/listpages/main.go -Paginator=Marker -ListOps=ListGroupsForUser,ListServiceSpecificCredentials"; DO NOT EDIT. package iam @@ -25,3 +25,19 @@ func listGroupsForUserPages(ctx context.Context, conn *iam.Client, input *iam.Li } return nil } +func listServiceSpecificCredentialsPages(ctx context.Context, conn *iam.Client, input *iam.ListServiceSpecificCredentialsInput, fn func(*iam.ListServiceSpecificCredentialsOutput, bool) bool, optFns ...func(*iam.Options)) error { + for { + output, err := conn.ListServiceSpecificCredentials(ctx, input, optFns...) + if err != nil { + return err + } + + lastPage := aws.ToString(output.Marker) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.Marker = output.Marker + } + return nil +} diff --git a/internal/service/iam/openid_connect_provider.go b/internal/service/iam/openid_connect_provider.go index a8078a9d30fc..4da9feb23163 100644 --- a/internal/service/iam/openid_connect_provider.go +++ b/internal/service/iam/openid_connect_provider.go @@ -26,6 +26,8 @@ import ( // @SDKResource("aws_iam_openid_connect_provider", name="OIDC Provider") // @Tags(identifierAttribute="arn", resourceType="OIDCProvider") // @Testing(name="OpenIDConnectProvider") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") func resourceOpenIDConnectProvider() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceOpenIDConnectProviderCreate, @@ -33,10 +35,6 @@ func resourceOpenIDConnectProvider() *schema.Resource { UpdateWithoutTimeout: resourceOpenIDConnectProviderUpdate, DeleteWithoutTimeout: resourceOpenIDConnectProviderDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -226,10 +224,14 @@ func resourceOpenIDConnectProviderDelete(ctx context.Context, d *schema.Resource } func findOpenIDConnectProviderByARN(ctx context.Context, conn *iam.Client, arn string) (*iam.GetOpenIDConnectProviderOutput, error) { - input := &iam.GetOpenIDConnectProviderInput{ + input := iam.GetOpenIDConnectProviderInput{ OpenIDConnectProviderArn: aws.String(arn), } + return findOpenIDConnectProvider(ctx, conn, &input) +} + +func findOpenIDConnectProvider(ctx context.Context, conn *iam.Client, input *iam.GetOpenIDConnectProviderInput) (*iam.GetOpenIDConnectProviderOutput, error) { output, err := conn.GetOpenIDConnectProvider(ctx, input) if errs.IsA[*awstypes.NoSuchEntityException](err) { @@ -238,6 +240,7 @@ func findOpenIDConnectProviderByARN(ctx context.Context, conn *iam.Client, arn s LastRequest: input, } } + if err != nil { return nil, err } diff --git a/internal/service/iam/openid_connect_provider_data_source.go b/internal/service/iam/openid_connect_provider_data_source.go index 2646d1adbd74..4ff2e35175ee 100644 --- a/internal/service/iam/openid_connect_provider_data_source.go +++ b/internal/service/iam/openid_connect_provider_data_source.go @@ -6,18 +6,18 @@ package iam import ( "context" "fmt" - "reflect" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,12 +42,12 @@ func dataSourceOpenIDConnectProvider() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + names.AttrTags: tftags.TagsSchemaComputed(), "thumbprint_list": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - names.AttrTags: tftags.TagsSchemaComputed(), names.AttrURL: { Type: schema.TypeString, Optional: true, @@ -62,71 +62,67 @@ func dataSourceOpenIDConnectProvider() *schema.Resource { func dataSourceOpenIDConnectProviderRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IAMClient(ctx) - input := &iam.GetOpenIDConnectProviderInput{} + var input iam.GetOpenIDConnectProviderInput if v, ok := d.GetOk(names.AttrARN); ok { input.OpenIDConnectProviderArn = aws.String(v.(string)) } else if v, ok := d.GetOk(names.AttrURL); ok { url := v.(string) - oidcpEntry, err := dataSourceGetOpenIDConnectProviderByURL(ctx, conn, url) + oidcpEntry, err := findOpenIDConnectProviderByURL(ctx, conn, url) + if err != nil { - return sdkdiag.AppendErrorf(diags, "finding IAM OIDC Provider by url (%s): %s", url, err) + return sdkdiag.AppendErrorf(diags, "reading IAM OIDC Provider (%s): %s", url, err) } - if oidcpEntry == nil { - return sdkdiag.AppendErrorf(diags, "finding IAM OIDC Provider by url (%s): not found", url) - } input.OpenIDConnectProviderArn = oidcpEntry.Arn } - resp, err := conn.GetOpenIDConnectProvider(ctx, input) + output, err := findOpenIDConnectProvider(ctx, conn, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading IAM OIDC Provider: %s", err) } - d.SetId(aws.ToString(input.OpenIDConnectProviderArn)) - d.Set(names.AttrARN, input.OpenIDConnectProviderArn) - d.Set(names.AttrURL, resp.Url) - d.Set("client_id_list", flex.FlattenStringValueList(resp.ClientIDList)) - d.Set("thumbprint_list", flex.FlattenStringValueList(resp.ThumbprintList)) + arn := aws.ToString(input.OpenIDConnectProviderArn) + d.SetId(arn) + d.Set(names.AttrARN, arn) + d.Set("client_id_list", output.ClientIDList) + d.Set("thumbprint_list", output.ThumbprintList) + d.Set(names.AttrURL, output.Url) - setTagsOut(ctx, resp.Tags) + setTagsOut(ctx, output.Tags) return diags } -func dataSourceGetOpenIDConnectProviderByURL(ctx context.Context, conn *iam.Client, url string) (*awstypes.OpenIDConnectProviderListEntry, error) { - var result *awstypes.OpenIDConnectProviderListEntry - - input := &iam.ListOpenIDConnectProvidersInput{} +func findOpenIDConnectProviderByURL(ctx context.Context, conn *iam.Client, url string) (*awstypes.OpenIDConnectProviderListEntry, error) { + var input iam.ListOpenIDConnectProvidersInput - output, err := conn.ListOpenIDConnectProviders(ctx, input) + output, err := conn.ListOpenIDConnectProviders(ctx, &input) if err != nil { return nil, err } - for _, oidcp := range output.OpenIDConnectProviderList { - if reflect.ValueOf(oidcp).IsZero() { + for _, v := range output.OpenIDConnectProviderList { + if p := &v; inttypes.IsZero(p) { continue } - arnUrl, err := urlFromOpenIDConnectProviderARN(aws.ToString(oidcp.Arn)) + arnUrl, err := urlFromOpenIDConnectProviderARN(aws.ToString(v.Arn)) if err != nil { return nil, err } if arnUrl == strings.TrimPrefix(url, "https://") { - return &oidcp, nil + return &v, nil } } - return result, nil + return nil, &retry.NotFoundError{} } func urlFromOpenIDConnectProviderARN(arn string) (string, error) { diff --git a/internal/service/iam/openid_connect_provider_data_source_tags_gen_test.go b/internal/service/iam/openid_connect_provider_data_source_tags_gen_test.go index 3f7e9398a1c2..74bced62d9cd 100644 --- a/internal/service/iam/openid_connect_provider_data_source_tags_gen_test.go +++ b/internal/service/iam/openid_connect_provider_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccIAMOIDCProviderDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccIAMOIDCProviderDataSource_tags(t *testing.T) { func TestAccIAMOIDCProviderDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccIAMOIDCProviderDataSource_tags_NullMap(t *testing.T) { func TestAccIAMOIDCProviderDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccIAMOIDCProviderDataSource_tags_EmptyMap(t *testing.T) { func TestAccIAMOIDCProviderDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccIAMOIDCProviderDataSource_tags_DefaultTags_nonOverlapping(t *testing func TestAccIAMOIDCProviderDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccIAMOIDCProviderDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *test func TestAccIAMOIDCProviderDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/iam/openid_connect_provider_identity_gen_test.go b/internal/service/iam/openid_connect_provider_identity_gen_test.go new file mode 100644 index 000000000000..7122c0272572 --- /dev/null +++ b/internal/service/iam/openid_connect_provider_identity_gen_test.go @@ -0,0 +1,214 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package iam_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIAMOpenIDConnectProvider_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_openid_connect_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOpenIDConnectProviderExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMOpenIDConnectProvider_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_openid_connect_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOpenIDConnectProviderExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMOpenIDConnectProvider_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_openid_connect_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOpenIDConnectProviderExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OpenIDConnectProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/iam/openid_connect_provider_tags_gen_test.go b/internal/service/iam/openid_connect_provider_tags_gen_test.go index ef51411bc4a6..294540031dfe 100644 --- a/internal/service/iam/openid_connect_provider_tags_gen_test.go +++ b/internal/service/iam/openid_connect_provider_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccIAMOpenIDConnectProvider_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccIAMOpenIDConnectProvider_tags(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccIAMOpenIDConnectProvider_tags_null(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccIAMOpenIDConnectProvider_tags_EmptyMap(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccIAMOpenIDConnectProvider_tags_AddOnUpdate(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccIAMOpenIDConnectProvider_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccIAMOpenIDConnectProvider_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccIAMOpenIDConnectProvider_tags_EmptyTag_OnUpdate_Replace(t *testing.T func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_providerOnly(t *testing.T) func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_nonOverlapping(t *testing. func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_overlapping(t *testing.T) func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_updateToProviderOnly(t *te func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_updateToResourceOnly(t *te func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_emptyResourceTag(t *testin func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_emptyProviderOnlyTag(t *te func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_nullOverlappingResourceTag func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccIAMOpenIDConnectProvider_tags_DefaultTags_nullNonOverlappingResource func TestAccIAMOpenIDConnectProvider_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccIAMOpenIDConnectProvider_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMOpenIDConnectProvider_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccIAMOpenIDConnectProvider_tags_ComputedTag_OnUpdate_Add(t *testing.T) func TestAccIAMOpenIDConnectProvider_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccIAMOpenIDConnectProvider_tags_ComputedTag_OnUpdate_Replace(t *testin func TestAccIAMOpenIDConnectProvider_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccIAMOpenIDConnectProvider_tags_IgnoreTags_Overlap_DefaultTag(t *testi func TestAccIAMOpenIDConnectProvider_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_openid_connect_provider.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckOpenIDConnectProviderDestroy(ctx), diff --git a/internal/service/iam/policy.go b/internal/service/iam/policy.go index bf25102504fd..03fc3f78af6a 100644 --- a/internal/service/iam/policy.go +++ b/internal/service/iam/policy.go @@ -8,7 +8,6 @@ import ( "fmt" "log" "net/url" - "reflect" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" @@ -25,6 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -37,6 +37,8 @@ const ( // @SDKResource("aws_iam_policy", name="Policy") // @Tags(identifierAttribute="arn", resourceType="Policy") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/iam/types;types.Policy") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") func resourcePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyCreate, @@ -44,10 +46,6 @@ func resourcePolicy() *schema.Resource { UpdateWithoutTimeout: resourcePolicyUpdate, DeleteWithoutTimeout: resourcePolicyDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -115,7 +113,7 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta any) } name := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) - input := &iam.CreatePolicyInput{ + input := iam.CreatePolicyInput{ Description: aws.String(d.Get(names.AttrDescription).(string)), Path: aws.String(d.Get(names.AttrPath).(string)), PolicyDocument: aws.String(policy), @@ -123,14 +121,14 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta any) Tags: getTagsIn(ctx), } - output, err := conn.CreatePolicy(ctx, input) + output, err := conn.CreatePolicy(ctx, &input) // Some partitions (e.g. ISO) may not support tag-on-create. partition := meta.(*conns.AWSClient).Partition(ctx) if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreatePolicy(ctx, input) + output, err = conn.CreatePolicy(ctx, &input) } if err != nil { @@ -164,7 +162,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) d policy *awstypes.Policy policyVersion *awstypes.PolicyVersion } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*policyWithVersion, error) { iamPolicy := &policyWithVersion{} if v, err := findPolicyByARN(ctx, conn, d.Id()); err == nil { @@ -173,7 +171,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) d return nil, err } - if v, err := findPolicyVersion(ctx, conn, d.Id(), aws.ToString(iamPolicy.policy.DefaultVersionId)); err == nil { + if v, err := findPolicyVersionByTwoPartKey(ctx, conn, d.Id(), aws.ToString(iamPolicy.policy.DefaultVersionId)); err == nil { iamPolicy.policyVersion = v } else { return nil, err @@ -192,9 +190,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendErrorf(diags, "reading IAM Policy (%s): %s", d.Id(), err) } - output := outputRaw.(*policyWithVersion) policy := output.policy - d.Set(names.AttrARN, policy.Arn) d.Set("attachment_count", policy.AttachmentCount) d.Set(names.AttrDescription, policy.Description) @@ -206,7 +202,6 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) d setTagsOut(ctx, policy.Tags) policyDocument, err := url.QueryUnescape(aws.ToString(output.policyVersion.Document)) - if err != nil { return sdkdiag.AppendErrorf(diags, "parsing IAM Policy (%s) document: %s", d.Id(), err) } @@ -235,13 +230,13 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "policy (%s) is invalid JSON: %s", policy, err) } - input := &iam.CreatePolicyVersionInput{ + input := iam.CreatePolicyVersionInput{ PolicyArn: aws.String(d.Id()), PolicyDocument: aws.String(policy), SetAsDefault: true, } - _, err = conn.CreatePolicyVersion(ctx, input) + _, err = conn.CreatePolicyVersion(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Policy (%s): %s", d.Id(), err) @@ -277,9 +272,10 @@ func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta any) } log.Printf("[INFO] Deleting IAM Policy: %s", d.Id()) - _, err = conn.DeletePolicy(ctx, &iam.DeletePolicyInput{ + input := iam.DeletePolicyInput{ PolicyArn: aws.String(d.Id()), - }) + } + _, err = conn.DeletePolicy(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { return diags @@ -329,12 +325,12 @@ func policyPruneVersions(ctx context.Context, conn *iam.Client, arn string) erro } func policyDeleteVersion(ctx context.Context, conn *iam.Client, arn, versionID string) error { - input := &iam.DeletePolicyVersionInput{ + input := iam.DeletePolicyVersionInput{ PolicyArn: aws.String(arn), VersionId: aws.String(versionID), } - _, err := conn.DeletePolicyVersion(ctx, input) + _, err := conn.DeletePolicyVersion(ctx, &input) if err != nil { return fmt.Errorf("deleting IAM Policy (%s) version (%s): %w", arn, versionID, err) @@ -344,10 +340,14 @@ func policyDeleteVersion(ctx context.Context, conn *iam.Client, arn, versionID s } func findPolicyByARN(ctx context.Context, conn *iam.Client, arn string) (*awstypes.Policy, error) { - input := &iam.GetPolicyInput{ + input := iam.GetPolicyInput{ PolicyArn: aws.String(arn), } + return findPolicy(ctx, conn, &input) +} + +func findPolicy(ctx context.Context, conn *iam.Client, input *iam.GetPolicyInput) (*awstypes.Policy, error) { output, err := conn.GetPolicy(ctx, input) if errs.IsA[*awstypes.NoSuchEntityException](err) { @@ -369,12 +369,12 @@ func findPolicyByARN(ctx context.Context, conn *iam.Client, arn string) (*awstyp } func findPolicyByTwoPartKey(ctx context.Context, conn *iam.Client, name, pathPrefix string) (*awstypes.Policy, error) { - input := &iam.ListPoliciesInput{} + var input iam.ListPoliciesInput if pathPrefix != "" { input.PathPrefix = aws.String(pathPrefix) } - output, err := findPolicies(ctx, conn, input) + output, err := findPolicies(ctx, conn, &input) if err != nil { return nil, err @@ -401,7 +401,7 @@ func findPolicies(ctx context.Context, conn *iam.Client, input *iam.ListPolicies } for _, v := range page.Policies { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { output = append(output, v) } } @@ -410,12 +410,16 @@ func findPolicies(ctx context.Context, conn *iam.Client, input *iam.ListPolicies return output, nil } -func findPolicyVersion(ctx context.Context, conn *iam.Client, arn, versionID string) (*awstypes.PolicyVersion, error) { - input := &iam.GetPolicyVersionInput{ +func findPolicyVersionByTwoPartKey(ctx context.Context, conn *iam.Client, arn, versionID string) (*awstypes.PolicyVersion, error) { + input := iam.GetPolicyVersionInput{ PolicyArn: aws.String(arn), VersionId: aws.String(versionID), } + return findPolicyVersion(ctx, conn, &input) +} + +func findPolicyVersion(ctx context.Context, conn *iam.Client, input *iam.GetPolicyVersionInput) (*awstypes.PolicyVersion, error) { output, err := conn.GetPolicyVersion(ctx, input) if errs.IsA[*awstypes.NoSuchEntityException](err) { @@ -437,9 +441,14 @@ func findPolicyVersion(ctx context.Context, conn *iam.Client, arn, versionID str } func findPolicyVersionsByARN(ctx context.Context, conn *iam.Client, arn string) ([]awstypes.PolicyVersion, error) { - input := &iam.ListPolicyVersionsInput{ + input := iam.ListPolicyVersionsInput{ PolicyArn: aws.String(arn), } + + return findPolicyVersions(ctx, conn, &input) +} + +func findPolicyVersions(ctx context.Context, conn *iam.Client, input *iam.ListPolicyVersionsInput) ([]awstypes.PolicyVersion, error) { var output []awstypes.PolicyVersion pages := iam.NewListPolicyVersionsPaginator(conn, input) @@ -458,7 +467,7 @@ func findPolicyVersionsByARN(ctx context.Context, conn *iam.Client, arn string) } for _, v := range page.Versions { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { output = append(output, v) } } diff --git a/internal/service/iam/policy_attachment.go b/internal/service/iam/policy_attachment.go index 4f74c8e4e326..10dc4ad2ba19 100644 --- a/internal/service/iam/policy_attachment.go +++ b/internal/service/iam/policy_attachment.go @@ -7,7 +7,6 @@ import ( "context" "errors" "log" - "reflect" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" @@ -22,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -275,10 +275,10 @@ func detachPolicyFromUsers(ctx context.Context, conn *iam.Client, users []string } func findEntitiesForPolicyByARN(ctx context.Context, conn *iam.Client, arn string) ([]string, []string, []string, error) { - input := &iam.ListEntitiesForPolicyInput{ + input := iam.ListEntitiesForPolicyInput{ PolicyArn: aws.String(arn), } - groups, roles, users, err := findEntitiesForPolicy(ctx, conn, input) + groups, roles, users, err := findEntitiesForPolicy(ctx, conn, &input) if err != nil { return nil, nil, nil, err @@ -316,17 +316,17 @@ func findEntitiesForPolicy(ctx context.Context, conn *iam.Client, input *iam.Lis } for _, v := range page.PolicyGroups { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { groups = append(groups, v) } } for _, v := range page.PolicyRoles { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { roles = append(roles, v) } } for _, v := range page.PolicyUsers { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { users = append(users, v) } } diff --git a/internal/service/iam/policy_data_source.go b/internal/service/iam/policy_data_source.go index fafc5bc11a5e..19d2dad0ccc5 100644 --- a/internal/service/iam/policy_data_source.go +++ b/internal/service/iam/policy_data_source.go @@ -79,8 +79,8 @@ func dataSourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) pathPrefix := d.Get("path_prefix").(string) if arn == "" { - outputRaw, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, - func() (any, error) { + output, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, + func(ctx context.Context) (*awstypes.Policy, error) { return findPolicyByTwoPartKey(ctx, conn, name, pathPrefix) }, ) @@ -89,7 +89,7 @@ func dataSourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("IAM Policy", err)) } - arn = aws.ToString((outputRaw.(*awstypes.Policy)).Arn) + arn = aws.ToString(output.Arn) } // We need to make a call to `iam.GetPolicy` because `iam.ListPolicies` doesn't return all values @@ -111,9 +111,9 @@ func dataSourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) setTagsOut(ctx, policy.Tags) - outputRaw, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, - func() (any, error) { - return findPolicyVersion(ctx, conn, arn, aws.ToString(policy.DefaultVersionId)) + output, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, + func(ctx context.Context) (*awstypes.PolicyVersion, error) { + return findPolicyVersionByTwoPartKey(ctx, conn, arn, aws.ToString(policy.DefaultVersionId)) }, ) @@ -121,7 +121,7 @@ func dataSourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "reading IAM Policy (%s) default version: %s", arn, err) } - policyDocument, err := url.QueryUnescape(aws.ToString(outputRaw.(*awstypes.PolicyVersion).Document)) + policyDocument, err := url.QueryUnescape(aws.ToString(output.Document)) if err != nil { return sdkdiag.AppendErrorf(diags, "parsing IAM Policy (%s) document: %s", arn, err) } diff --git a/internal/service/iam/policy_data_source_tags_gen_test.go b/internal/service/iam/policy_data_source_tags_gen_test.go index d8221646cdd2..1561ecbd5a6f 100644 --- a/internal/service/iam/policy_data_source_tags_gen_test.go +++ b/internal/service/iam/policy_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccIAMPolicyDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccIAMPolicyDataSource_tags(t *testing.T) { func TestAccIAMPolicyDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccIAMPolicyDataSource_tags_NullMap(t *testing.T) { func TestAccIAMPolicyDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccIAMPolicyDataSource_tags_EmptyMap(t *testing.T) { func TestAccIAMPolicyDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccIAMPolicyDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMPolicyDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccIAMPolicyDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccIAMPolicyDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/iam/policy_document_data_source.go b/internal/service/iam/policy_document_data_source.go index f3bd573d16ce..ca66419f0790 100644 --- a/internal/service/iam/policy_document_data_source.go +++ b/internal/service/iam/policy_document_data_source.go @@ -169,7 +169,7 @@ func dataSourcePolicyDocument() *schema.Resource { func dataSourcePolicyDocumentRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - mergedDoc := &IAMPolicyDoc{} + mergedDoc := &iamPolicyDoc{} if v, ok := d.GetOk("source_policy_documents"); ok && len(v.([]any)) > 0 { // generate sid map to assure there are no duplicates in source jsons @@ -186,7 +186,7 @@ func dataSourcePolicyDocumentRead(ctx context.Context, d *schema.ResourceData, m continue } - sourceDoc := &IAMPolicyDoc{} + sourceDoc := &iamPolicyDoc{} if err := json.Unmarshal([]byte(sourceJSON.(string)), sourceDoc); err != nil { return sdkdiag.AppendErrorf(diags, "writing IAM Policy Document: merging source document %d: %s", sourceJSONIndex, err) } @@ -206,7 +206,7 @@ func dataSourcePolicyDocumentRead(ctx context.Context, d *schema.ResourceData, m } // process the current document - doc := &IAMPolicyDoc{ + doc := &iamPolicyDoc{ Version: d.Get(names.AttrVersion).(string), } @@ -216,12 +216,12 @@ func dataSourcePolicyDocumentRead(ctx context.Context, d *schema.ResourceData, m if cfgStmts, hasCfgStmts := d.GetOk("statement"); hasCfgStmts { var cfgStmtIntf = cfgStmts.([]any) - stmts := make([]*IAMPolicyStatement, len(cfgStmtIntf)) + stmts := make([]*iamPolicyStatement, len(cfgStmtIntf)) sidMap := make(map[string]struct{}) for i, stmtI := range cfgStmtIntf { cfgStmt := stmtI.(map[string]any) - stmt := &IAMPolicyStatement{ + stmt := &iamPolicyStatement{ Effect: cfgStmt["effect"].(string), } @@ -300,7 +300,7 @@ func dataSourcePolicyDocumentRead(ctx context.Context, d *schema.ResourceData, m if overrideJSON == nil { continue } - overrideDoc := &IAMPolicyDoc{} + overrideDoc := &iamPolicyDoc{} if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { return sdkdiag.AppendErrorf(diags, "writing IAM Policy Document: merging override document %d: %s", overrideJSONIndex, err) } @@ -353,12 +353,12 @@ func dataSourcePolicyDocumentReplaceVarsInList(in any, version string) (any, err } } -func dataSourcePolicyDocumentMakeConditions(in []any, version string) (IAMPolicyStatementConditionSet, error) { - out := make([]IAMPolicyStatementCondition, len(in)) +func dataSourcePolicyDocumentMakeConditions(in []any, version string) (iamPolicyStatementConditionSet, error) { + out := make([]iamPolicyStatementCondition, len(in)) for i, itemI := range in { var err error item := itemI.(map[string]any) - out[i] = IAMPolicyStatementCondition{ + out[i] = iamPolicyStatementCondition{ Test: item["test"].(string), Variable: item["variable"].(string), } @@ -374,15 +374,15 @@ func dataSourcePolicyDocumentMakeConditions(in []any, version string) (IAMPolicy out[i].Values = itemValues[0] } } - return IAMPolicyStatementConditionSet(out), nil + return iamPolicyStatementConditionSet(out), nil } -func dataSourcePolicyDocumentMakePrincipals(in []any, version string) (IAMPolicyStatementPrincipalSet, error) { - out := make([]IAMPolicyStatementPrincipal, len(in)) +func dataSourcePolicyDocumentMakePrincipals(in []any, version string) (iamPolicyStatementPrincipalSet, error) { + out := make([]iamPolicyStatementPrincipal, len(in)) for i, itemI := range in { var err error item := itemI.(map[string]any) - out[i] = IAMPolicyStatementPrincipal{ + out[i] = iamPolicyStatementPrincipal{ Type: item[names.AttrType].(string), } out[i].Identifiers, err = dataSourcePolicyDocumentReplaceVarsInList( @@ -394,5 +394,5 @@ func dataSourcePolicyDocumentMakePrincipals(in []any, version string) (IAMPolicy return nil, fmt.Errorf("reading identifiers: %w", err) } } - return IAMPolicyStatementPrincipalSet(out), nil + return iamPolicyStatementPrincipalSet(out), nil } diff --git a/internal/service/iam/policy_identity_gen_test.go b/internal/service/iam/policy_identity_gen_test.go new file mode 100644 index 000000000000..ac3b62c18a0a --- /dev/null +++ b/internal/service/iam/policy_identity_gen_test.go @@ -0,0 +1,218 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package iam_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/iam/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIAMPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Policy + resourceName := "aws_iam_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Policy + resourceName := "aws_iam_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Policy + resourceName := "aws_iam_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/iam/policy_model.go b/internal/service/iam/policy_model.go index bb65ae7244a2..4a9803cfc7a5 100644 --- a/internal/service/iam/policy_model.go +++ b/internal/service/iam/policy_model.go @@ -18,39 +18,40 @@ const ( policyModelMarshallJSONStartSliceSize = 2 ) -type IAMPolicyDoc struct { +type iamPolicyDoc struct { Version string `json:",omitempty"` Id string `json:",omitempty"` - Statements []*IAMPolicyStatement `json:"Statement,omitempty"` + Statements []*iamPolicyStatement `json:"Statement,omitempty"` } -type IAMPolicyStatement struct { +type iamPolicyStatement struct { Sid string `json:",omitempty"` Effect string `json:",omitempty"` Actions any `json:"Action,omitempty"` NotActions any `json:"NotAction,omitempty"` Resources any `json:"Resource,omitempty"` NotResources any `json:"NotResource,omitempty"` - Principals IAMPolicyStatementPrincipalSet `json:"Principal,omitempty"` - NotPrincipals IAMPolicyStatementPrincipalSet `json:"NotPrincipal,omitempty"` - Conditions IAMPolicyStatementConditionSet `json:"Condition,omitempty"` + Principals iamPolicyStatementPrincipalSet `json:"Principal,omitempty"` + NotPrincipals iamPolicyStatementPrincipalSet `json:"NotPrincipal,omitempty"` + Conditions iamPolicyStatementConditionSet `json:"Condition,omitempty"` } -type IAMPolicyStatementPrincipal struct { +type iamPolicyStatementPrincipal struct { Type string Identifiers any } -type IAMPolicyStatementCondition struct { +type iamPolicyStatementPrincipalSet []iamPolicyStatementPrincipal + +type iamPolicyStatementCondition struct { Test string Variable string Values any } -type IAMPolicyStatementPrincipalSet []IAMPolicyStatementPrincipal -type IAMPolicyStatementConditionSet []IAMPolicyStatementCondition +type iamPolicyStatementConditionSet []iamPolicyStatementCondition -func (s *IAMPolicyDoc) Merge(newDoc *IAMPolicyDoc) { +func (s *iamPolicyDoc) Merge(newDoc *iamPolicyDoc) { // adopt newDoc's Id if len(newDoc.Id) > 0 { s.Id = newDoc.Id @@ -82,7 +83,7 @@ func (s *IAMPolicyDoc) Merge(newDoc *IAMPolicyDoc) { } } -func (ps IAMPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) { +func (ps iamPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) { raw := map[string]any{} // Although IAM documentation says that "*" and {"AWS": "*"} are equivalent @@ -137,8 +138,8 @@ func (ps IAMPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) { return json.Marshal(&raw) } -func (ps *IAMPolicyStatementPrincipalSet) UnmarshalJSON(b []byte) error { - var out IAMPolicyStatementPrincipalSet +func (ps *iamPolicyStatementPrincipalSet) UnmarshalJSON(b []byte) error { + var out iamPolicyStatementPrincipalSet var data any if err := json.Unmarshal(b, &data); err != nil { @@ -147,19 +148,19 @@ func (ps *IAMPolicyStatementPrincipalSet) UnmarshalJSON(b []byte) error { switch t := data.(type) { case string: - out = append(out, IAMPolicyStatementPrincipal{Type: "*", Identifiers: []string{"*"}}) + out = append(out, iamPolicyStatementPrincipal{Type: "*", Identifiers: []string{"*"}}) case map[string]any: for key, value := range data.(map[string]any) { switch vt := value.(type) { case string: - out = append(out, IAMPolicyStatementPrincipal{Type: key, Identifiers: value.(string)}) + out = append(out, iamPolicyStatementPrincipal{Type: key, Identifiers: value.(string)}) case []any: values := []string{} for _, v := range value.([]any) { values = append(values, v.(string)) } slices.Sort(values) - out = append(out, IAMPolicyStatementPrincipal{Type: key, Identifiers: values}) + out = append(out, iamPolicyStatementPrincipal{Type: key, Identifiers: values}) default: return fmt.Errorf("Unsupported data type %T for IAMPolicyStatementPrincipalSet.Identifiers", vt) } @@ -172,7 +173,7 @@ func (ps *IAMPolicyStatementPrincipalSet) UnmarshalJSON(b []byte) error { return nil } -func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { +func (cs iamPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { raw := map[string]map[string]any{} for _, c := range cs { @@ -206,8 +207,8 @@ func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { return json.Marshal(&raw) } -func (cs *IAMPolicyStatementConditionSet) UnmarshalJSON(b []byte) error { - var out IAMPolicyStatementConditionSet +func (cs *iamPolicyStatementConditionSet) UnmarshalJSON(b []byte) error { + var out iamPolicyStatementConditionSet var data map[string]map[string]any if err := json.Unmarshal(b, &data); err != nil { @@ -218,15 +219,15 @@ func (cs *IAMPolicyStatementConditionSet) UnmarshalJSON(b []byte) error { for var_key, var_values := range test_value { switch var_values := var_values.(type) { case string: - out = append(out, IAMPolicyStatementCondition{Test: test_key, Variable: var_key, Values: []string{var_values}}) + out = append(out, iamPolicyStatementCondition{Test: test_key, Variable: var_key, Values: []string{var_values}}) case bool: - out = append(out, IAMPolicyStatementCondition{Test: test_key, Variable: var_key, Values: strconv.FormatBool(var_values)}) + out = append(out, iamPolicyStatementCondition{Test: test_key, Variable: var_key, Values: strconv.FormatBool(var_values)}) case []any: values := []string{} for _, v := range var_values { values = append(values, v.(string)) } - out = append(out, IAMPolicyStatementCondition{Test: test_key, Variable: var_key, Values: values}) + out = append(out, iamPolicyStatementCondition{Test: test_key, Variable: var_key, Values: values}) } } } @@ -248,11 +249,11 @@ func policyDecodeConfigStringList(lI []any) any { return ret } -// PolicyHasValidAWSPrincipals validates that the Principals in an IAM Policy are valid +// policyHasValidAWSPrincipals validates that the Principals in an IAM Policy are valid // Assumes that non-"AWS" Principals are valid // The value can be a single string or a slice of strings // Valid strings are either an ARN or an AWS account ID -func PolicyHasValidAWSPrincipals(policy string) (bool, error) { // nosemgrep:ci.aws-in-func-name +func policyHasValidAWSPrincipals(policy string) (bool, error) { // nosemgrep:ci.aws-in-func-name var policyData any err := json.Unmarshal([]byte(policy), &policyData) if err != nil { @@ -272,12 +273,12 @@ func PolicyHasValidAWSPrincipals(policy string) (bool, error) { // nosemgrep:ci. for _, principal := range principals { switch x := principal.(type) { case string: - if !IsValidPolicyAWSPrincipal(x) { + if !isValidPolicyAWSPrincipal(x) { return false, nil } case []string: for _, s := range x { - if !IsValidPolicyAWSPrincipal(s) { + if !isValidPolicyAWSPrincipal(s) { return false, nil } } @@ -287,9 +288,9 @@ func PolicyHasValidAWSPrincipals(policy string) (bool, error) { // nosemgrep:ci. return true, nil } -// IsValidPolicyAWSPrincipal returns true if a string is a valid AWS Princial for an IAM Policy document +// isValidPolicyAWSPrincipal returns true if a string is a valid AWS Princial for an IAM Policy document // That is: either an ARN, an AWS account ID, or `*` -func IsValidPolicyAWSPrincipal(principal string) bool { // nosemgrep:ci.aws-in-func-name +func isValidPolicyAWSPrincipal(principal string) bool { // nosemgrep:ci.aws-in-func-name if principal == "*" { return true } diff --git a/internal/service/iam/policy_tags_gen_test.go b/internal/service/iam/policy_tags_gen_test.go index 821ada261d82..a8f76f9731a9 100644 --- a/internal/service/iam/policy_tags_gen_test.go +++ b/internal/service/iam/policy_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccIAMPolicy_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccIAMPolicy_tags(t *testing.T) { func TestAccIAMPolicy_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccIAMPolicy_tags_null(t *testing.T) { func TestAccIAMPolicy_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccIAMPolicy_tags_EmptyMap(t *testing.T) { func TestAccIAMPolicy_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccIAMPolicy_tags_AddOnUpdate(t *testing.T) { func TestAccIAMPolicy_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccIAMPolicy_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMPolicy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccIAMPolicy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMPolicy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccIAMPolicy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccIAMPolicy_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccIAMPolicy_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccIAMPolicy_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccIAMPolicy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccIAMPolicy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccIAMPolicy_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccIAMPolicy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccIAMPolicy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccIAMPolicy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) func TestAccIAMPolicy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccIAMPolicy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing. func TestAccIAMPolicy_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccIAMPolicy_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMPolicy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccIAMPolicy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMPolicy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccIAMPolicy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMPolicy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccIAMPolicy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccIAMPolicy_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Policy resourceName := "aws_iam_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckPolicyDestroy(ctx), diff --git a/internal/service/iam/role.go b/internal/service/iam/role.go index 8b78716f0ff2..4f625fd3f235 100644 --- a/internal/service/iam/role.go +++ b/internal/service/iam/role.go @@ -7,30 +7,42 @@ import ( "context" "errors" "fmt" + "iter" "log" "net/url" - "reflect" + "strings" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" awspolicy "github.com/hashicorp/awspolicyequivalence" + "github.com/hashicorp/go-cty/cty" + frameworkdiag "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +55,7 @@ const ( // @SDKResource("aws_iam_role", name="Role") // @Tags(identifierAttribute="name", resourceType="Role") // @IdentityAttribute("name") -// @WrappedImport(false) +// @CustomImport // @V60SDKv2Fix // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/iam/types;types.Role") // @Testing(idAttrDuplicates="name") @@ -56,7 +68,9 @@ func resourceRole() *schema.Resource { Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.GlobalSingleParameterized(ctx, rd, names.AttrName, meta.(importer.AWSClient)); err != nil { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.GlobalSingleParameterized(ctx, rd, identitySpec, meta.(importer.AWSClient)); err != nil { return nil, err } @@ -198,6 +212,14 @@ func resourceRole() *schema.Resource { } } +// @SDKListResource("aws_iam_role") +func roleResourceAsListResource() inttypes.ListResourceForSDK { + l := roleListResource{} + l.SetResourceSchema(resourceRole()) + + return &l +} + func resourceRoleCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) @@ -208,7 +230,7 @@ func resourceRoleCreate(ctx context.Context, d *schema.ResourceData, meta any) d } name := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) - input := &iam.CreateRoleInput{ + input := iam.CreateRoleInput{ AssumeRolePolicyDocument: aws.String(assumeRolePolicy), Path: aws.String(d.Get(names.AttrPath).(string)), RoleName: aws.String(name), @@ -227,14 +249,14 @@ func resourceRoleCreate(ctx context.Context, d *schema.ResourceData, meta any) d input.PermissionsBoundary = aws.String(v.(string)) } - output, err := retryCreateRole(ctx, conn, input) + output, err := retryCreateRole(ctx, conn, &input) // Some partitions (e.g. ISO) may not support tag-on-create. partition := meta.(*conns.AWSClient).Partition(ctx) if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = retryCreateRole(ctx, conn, input) + output, err = retryCreateRole(ctx, conn, &input) } if err != nil { @@ -285,7 +307,7 @@ func resourceRoleRead(ctx context.Context, d *schema.ResourceData, meta any) dia var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + role, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*awstypes.Role, error) { return findRoleByName(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -299,39 +321,17 @@ func resourceRoleRead(ctx context.Context, d *schema.ResourceData, meta any) dia return sdkdiag.AppendErrorf(diags, "reading IAM Role (%s): %s", d.Id(), err) } - role := outputRaw.(*awstypes.Role) - // occasionally, immediately after a role is created, AWS will give an ARN like AROAQ7SSZBKHREXAMPLE (unique ID) if role, err = waitRoleARNIsNotUniqueID(ctx, conn, d.Id(), role); err != nil { return sdkdiag.AppendErrorf(diags, "reading IAM Role (%s): waiting for valid ARN: %s", d.Id(), err) } - d.Set(names.AttrARN, role.Arn) - d.Set("create_date", role.CreateDate.Format(time.RFC3339)) - d.Set(names.AttrDescription, role.Description) - d.Set("max_session_duration", role.MaxSessionDuration) - d.Set(names.AttrName, role.RoleName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(role.RoleName))) - d.Set(names.AttrPath, role.Path) - if role.PermissionsBoundary != nil { - d.Set("permissions_boundary", role.PermissionsBoundary.PermissionsBoundaryArn) - } else { - d.Set("permissions_boundary", nil) - } - d.Set("unique_id", role.RoleId) - - assumeRolePolicy, err := url.QueryUnescape(aws.ToString(role.AssumeRolePolicyDocument)) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - - policyToSet, err := verify.PolicyToSet(d.Get("assume_role_policy").(string), assumeRolePolicy) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) + diags = append(diags, resourceRoleFlatten(ctx, role, d)...) + if diags.HasError() { + return diags } - d.Set("assume_role_policy", policyToSet) - + // `inline_policy` is deprecated, so it's not included in resourceRoleFlatten. inlinePolicies, err := readRoleInlinePolicies(ctx, conn, aws.ToString(role.RoleName)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading inline policies for IAM role %s, error: %s", d.Id(), err) @@ -348,14 +348,13 @@ func resourceRoleRead(ctx context.Context, d *schema.ResourceData, meta any) dia } } + // `managed_policy_arns` is deprecated, so it's not included in resourceRoleFlatten. policyARNs, err := findRoleAttachedPolicies(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading IAM Policies attached to Role (%s): %s", d.Id(), err) } d.Set("managed_policy_arns", policyARNs) - setTagsOut(ctx, role.Tags) - return diags } @@ -369,14 +368,14 @@ func resourceRoleUpdate(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendErrorf(diags, "assume_role_policy (%s) is invalid JSON: %s", assumeRolePolicy, err) } - input := &iam.UpdateAssumeRolePolicyInput{ + input := iam.UpdateAssumeRolePolicyInput{ RoleName: aws.String(d.Id()), PolicyDocument: aws.String(assumeRolePolicy), } _, err = tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { - return conn.UpdateAssumeRolePolicy(ctx, input) + func(ctx context.Context) (any, error) { + return conn.UpdateAssumeRolePolicy(ctx, &input) }, func(err error) (bool, error) { if errs.IsAErrorMessageContains[*awstypes.MalformedPolicyDocumentException](err, "Invalid principal in policy") { @@ -393,12 +392,12 @@ func resourceRoleUpdate(ctx context.Context, d *schema.ResourceData, meta any) d } if d.HasChange(names.AttrDescription) { - input := &iam.UpdateRoleDescriptionInput{ - RoleName: aws.String(d.Id()), + input := iam.UpdateRoleDescriptionInput{ Description: aws.String(d.Get(names.AttrDescription).(string)), + RoleName: aws.String(d.Id()), } - _, err := conn.UpdateRoleDescription(ctx, input) + _, err := conn.UpdateRoleDescription(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Role (%s) description: %s", d.Id(), err) @@ -406,12 +405,12 @@ func resourceRoleUpdate(ctx context.Context, d *schema.ResourceData, meta any) d } if d.HasChange("max_session_duration") { - input := &iam.UpdateRoleInput{ - RoleName: aws.String(d.Id()), + input := iam.UpdateRoleInput{ MaxSessionDuration: aws.Int32(int32(d.Get("max_session_duration").(int))), + RoleName: aws.String(d.Id()), } - _, err := conn.UpdateRole(ctx, input) + _, err := conn.UpdateRole(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Role (%s) MaxSessionDuration: %s", d.Id(), err) @@ -421,22 +420,22 @@ func resourceRoleUpdate(ctx context.Context, d *schema.ResourceData, meta any) d if d.HasChange("permissions_boundary") { permissionsBoundary := d.Get("permissions_boundary").(string) if permissionsBoundary != "" { - input := &iam.PutRolePermissionsBoundaryInput{ + input := iam.PutRolePermissionsBoundaryInput{ PermissionsBoundary: aws.String(permissionsBoundary), RoleName: aws.String(d.Id()), } - _, err := conn.PutRolePermissionsBoundary(ctx, input) + _, err := conn.PutRolePermissionsBoundary(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Role (%s) permissions boundary: %s", d.Id(), err) } } else { - input := &iam.DeleteRolePermissionsBoundaryInput{ + input := iam.DeleteRolePermissionsBoundaryInput{ RoleName: aws.String(d.Id()), } - _, err := conn.DeleteRolePermissionsBoundary(ctx, input) + _, err := conn.DeleteRolePermissionsBoundary(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting IAM Role (%s) permissions boundary: %s", d.Id(), err) @@ -558,12 +557,12 @@ func deleteRole(ctx context.Context, conn *iam.Client, roleName string, forceDet } } - input := &iam.DeleteRoleInput{ + input := iam.DeleteRoleInput{ RoleName: aws.String(roleName), } - _, err := tfresource.RetryWhenIsA[*awstypes.DeleteConflictException](ctx, propagationTimeout, func() (any, error) { - return conn.DeleteRole(ctx, input) + _, err := tfresource.RetryWhenIsA[any, *awstypes.DeleteConflictException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { + return conn.DeleteRole(ctx, &input) }) if errs.IsA[*awstypes.NoSuchEntityException](err) { @@ -588,12 +587,12 @@ func deleteRoleInstanceProfiles(ctx context.Context, conn *iam.Client, roleName for _, instanceProfile := range instanceProfiles { instanceProfileName := aws.ToString(instanceProfile.InstanceProfileName) - input := &iam.RemoveRoleFromInstanceProfileInput{ + input := iam.RemoveRoleFromInstanceProfileInput{ InstanceProfileName: aws.String(instanceProfileName), RoleName: aws.String(roleName), } - _, err := conn.RemoveRoleFromInstanceProfile(ctx, input) + _, err := conn.RemoveRoleFromInstanceProfile(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { continue @@ -609,7 +608,7 @@ func deleteRoleInstanceProfiles(ctx context.Context, conn *iam.Client, roleName func retryCreateRole(ctx context.Context, conn *iam.Client, input *iam.CreateRoleInput) (*iam.CreateRoleOutput, error) { outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateRole(ctx, input) }, func(err error) (bool, error) { @@ -637,11 +636,11 @@ func retryCreateRole(ctx context.Context, conn *iam.Client, input *iam.CreateRol } func findRoleByName(ctx context.Context, conn *iam.Client, name string) (*awstypes.Role, error) { - input := &iam.GetRoleInput{ + input := iam.GetRoleInput{ RoleName: aws.String(name), } - return findRole(ctx, conn, input) + return findRole(ctx, conn, &input) } func findRole(ctx context.Context, conn *iam.Client, input *iam.GetRoleInput) (*awstypes.Role, error) { @@ -649,8 +648,7 @@ func findRole(ctx context.Context, conn *iam.Client, input *iam.GetRoleInput) (* if errs.IsA[*awstypes.NoSuchEntityException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -665,20 +663,121 @@ func findRole(ctx context.Context, conn *iam.Client, input *iam.GetRoleInput) (* return output.Role, nil } +const ( + roleARNIsUniqueIDState = "uniqueid" + roleNotFoundState = "notfound" +) + +func statusRoleCreate(conn *iam.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { + role, err := findRoleByName(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, roleNotFoundState, nil + } + + if err != nil { + return nil, "", err + } + + if arn.IsARN(aws.ToString(role.Arn)) { + return role, names.AttrARN, nil + } + + return role, roleARNIsUniqueIDState, nil + } +} + +func waitRoleARNIsNotUniqueID(ctx context.Context, conn *iam.Client, id string, role *awstypes.Role) (*awstypes.Role, error) { + if arn.IsARN(aws.ToString(role.Arn)) { + return role, nil + } + + stateConf := &retry.StateChangeConf{ + Pending: []string{roleARNIsUniqueIDState, roleNotFoundState}, + Target: []string{names.AttrARN}, + Refresh: statusRoleCreate(conn, id), + Timeout: propagationTimeout, + NotFoundChecks: 10, + ContinuousTargetOccurence: 5, + Delay: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Role); ok { + return output, err + } + + return nil, err +} + +func listRoles(ctx context.Context, conn *iam.Client, input *iam.ListRolesInput) iter.Seq2[awstypes.Role, error] { + return func(yield func(awstypes.Role, error) bool) { + pages := iam.NewListRolesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + yield(awstypes.Role{}, err) + return + } + + for _, role := range page.Roles { + if !yield(role, nil) { + return + } + } + } + } +} + +func resourceRoleFlatten(ctx context.Context, role *awstypes.Role, d *schema.ResourceData) diag.Diagnostics { + var diags diag.Diagnostics + + d.Set(names.AttrARN, role.Arn) + d.Set("create_date", role.CreateDate.Format(time.RFC3339)) + d.Set(names.AttrDescription, role.Description) + d.Set("max_session_duration", role.MaxSessionDuration) + d.Set(names.AttrName, role.RoleName) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(role.RoleName))) + d.Set(names.AttrPath, role.Path) + if role.PermissionsBoundary != nil { + d.Set("permissions_boundary", role.PermissionsBoundary.PermissionsBoundaryArn) + } else { + d.Set("permissions_boundary", nil) + } + d.Set("unique_id", role.RoleId) + + assumeRolePolicy, err := url.QueryUnescape(aws.ToString(role.AssumeRolePolicyDocument)) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + policyToSet, err := verify.PolicyToSet(d.Get("assume_role_policy").(string), assumeRolePolicy) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + d.Set("assume_role_policy", policyToSet) + + setTagsOut(ctx, role.Tags) + + return diags +} + func findRoleAttachedPolicies(ctx context.Context, conn *iam.Client, roleName string) ([]string, error) { - input := &iam.ListAttachedRolePoliciesInput{ + input := iam.ListAttachedRolePoliciesInput{ RoleName: aws.String(roleName), } var output []string - pages := iam.NewListAttachedRolePoliciesPaginator(conn, input) + pages := iam.NewListAttachedRolePoliciesPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) if errs.IsA[*awstypes.NoSuchEntityException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -687,7 +786,7 @@ func findRoleAttachedPolicies(ctx context.Context, conn *iam.Client, roleName st } for _, v := range page.AttachedPolicies { - if !reflect.ValueOf(v).IsZero() { + if p := &v; !inttypes.IsZero(p) { output = append(output, aws.ToString(v.PolicyArn)) } } @@ -697,19 +796,18 @@ func findRoleAttachedPolicies(ctx context.Context, conn *iam.Client, roleName st } func findRolePolicyNames(ctx context.Context, conn *iam.Client, roleName string) ([]string, error) { - input := &iam.ListRolePoliciesInput{ + input := iam.ListRolePoliciesInput{ RoleName: aws.String(roleName), } var output []string - pages := iam.NewListRolePoliciesPaginator(conn, input) + pages := iam.NewListRolePoliciesPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) if errs.IsA[*awstypes.NoSuchEntityException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -731,12 +829,12 @@ func deleteRolePolicyAttachments(ctx context.Context, conn *iam.Client, roleName var errsList []error for _, policyARN := range policyARNs { - input := &iam.DetachRolePolicyInput{ + input := iam.DetachRolePolicyInput{ PolicyArn: aws.String(policyARN), RoleName: aws.String(roleName), } - _, err := conn.DetachRolePolicy(ctx, input) + _, err := conn.DetachRolePolicy(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { continue @@ -758,12 +856,12 @@ func deleteRoleInlinePolicies(ctx context.Context, conn *iam.Client, roleName st continue } - input := &iam.DeleteRolePolicyInput{ + input := iam.DeleteRolePolicyInput{ PolicyName: aws.String(policyName), RoleName: aws.String(roleName), } - _, err := conn.DeleteRolePolicy(ctx, input) + _, err := conn.DeleteRolePolicy(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { continue @@ -898,10 +996,11 @@ func readRoleInlinePolicies(ctx context.Context, conn *iam.Client, roleName stri var apiObjects []*iam.PutRolePolicyInput for _, policyName := range policyNames { - output, err := conn.GetRolePolicy(ctx, &iam.GetRolePolicyInput{ - RoleName: aws.String(roleName), + input := iam.GetRolePolicyInput{ PolicyName: aws.String(policyName), - }) + RoleName: aws.String(roleName), + } + output, err := conn.GetRolePolicy(ctx, &input) if err != nil { return nil, err @@ -918,9 +1017,9 @@ func readRoleInlinePolicies(ctx context.Context, conn *iam.Client, roleName stri } apiObject := &iam.PutRolePolicyInput{ - RoleName: aws.String(roleName), PolicyDocument: aws.String(p), PolicyName: aws.String(policyName), + RoleName: aws.String(roleName), } apiObjects = append(apiObjects, apiObject) @@ -999,3 +1098,142 @@ func roleTags(ctx context.Context, conn *iam.Client, identifier string, optFns . return output, nil } + +type roleListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type roleListResourceModel struct { +} + +func (l *roleListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{}, + } +} + +func (l *roleListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.IAMClient(ctx) + + var query roleListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + var input iam.ListRolesInput + if diags := fwflex.Expand(ctx, query, &input); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + + for output, err := range listRoles(ctx, conn, &input) { + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + // Exclude Service-Linked Roles + if strings.HasPrefix(aws.ToString(output.Path), "/aws-service-role/") { + tflog.Debug(ctx, "Skipping resource", map[string]any{ + "skip_reason": "Service-Linked Role", + "role_name": aws.ToString(output.RoleName), + names.AttrPath: aws.ToString(output.Path), + }) + continue + } + + rd := l.ResourceData() + rd.SetId(aws.ToString(output.RoleName)) + result.Diagnostics.Append(translateDiags(resourceRoleFlatten(ctx, &output, rd))...) + if result.Diagnostics.HasError() { + yield(result) + return + } + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + result.DisplayName = aws.ToString(output.RoleName) + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return + } + } + } +} + +func translateDiags(in diag.Diagnostics) frameworkdiag.Diagnostics { + out := make(frameworkdiag.Diagnostics, len(in)) + for i, diagIn := range in { + var diagOut frameworkdiag.Diagnostic + if diagIn.Severity == diag.Error { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewErrorDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeErrorDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } else { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewWarningDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeWarningDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } + out[i] = diagOut + } + return out +} + +func translatePath(in cty.Path) path.Path { + var out path.Path + + if len(in) == 0 { + return out + } + + step := in[0] + switch v := step.(type) { + case cty.GetAttrStep: + out = path.Root(v.Name) + } + + for i := 1; i < len(in); i++ { + step := in[i] + switch v := step.(type) { + case cty.GetAttrStep: + out = out.AtName(v.Name) + + case cty.IndexStep: + switch v.Key.Type() { + case cty.Number: + v, _ := v.Key.AsBigFloat().Int64() + out = out.AtListIndex(int(v)) + case cty.String: + out = out.AtMapKey(v.Key.AsString()) + } + } + } + + return out +} diff --git a/internal/service/iam/role_data_source_tags_gen_test.go b/internal/service/iam/role_data_source_tags_gen_test.go index 03af9cca1120..ccac54781cc7 100644 --- a/internal/service/iam/role_data_source_tags_gen_test.go +++ b/internal/service/iam/role_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccIAMRoleDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccIAMRoleDataSource_tags(t *testing.T) { func TestAccIAMRoleDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccIAMRoleDataSource_tags_NullMap(t *testing.T) { func TestAccIAMRoleDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccIAMRoleDataSource_tags_EmptyMap(t *testing.T) { func TestAccIAMRoleDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccIAMRoleDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMRoleDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccIAMRoleDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccIAMRoleDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/iam/role_identity_gen_test.go b/internal/service/iam/role_identity_gen_test.go index d94d852fbaf9..b1807f6cd5a1 100644 --- a/internal/service/iam/role_identity_gen_test.go +++ b/internal/service/iam/role_identity_gen_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,7 +28,7 @@ func TestAccIAMRole_Identity_Basic(t *testing.T) { resourceName := "aws_iam_role.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -103,3 +104,133 @@ func TestAccIAMRole_Identity_Basic(t *testing.T) { }, }) } + +func TestAccIAMRole_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Role + resourceName := "aws_iam_role.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRoleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Role/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Role/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrName: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Role/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + }, + }) +} + +func TestAccIAMRole_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Role + resourceName := "aws_iam_role.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRoleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Role/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Role/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/iam/role_list_test.go b/internal/service/iam/role_list_test.go new file mode 100644 index 000000000000..a7e851de2006 --- /dev/null +++ b/internal/service/iam/role_list_test.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iam_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIAMRole_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_iam_role.test[0]" + resourceName2 := "aws_iam_role.test[1]" + resourceName3 := "aws_iam_role.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRoleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Role/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.GlobalARNExact("iam", "role/"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.GlobalARNExact("iam", "role/"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.GlobalARNExact("iam", "role/"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Role/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_iam_role.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.StringExact(rName + "-0"), + }), + + querycheck.ExpectIdentity("aws_iam_role.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.StringExact(rName + "-1"), + }), + + querycheck.ExpectIdentity("aws_iam_role.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.StringExact(rName + "-2"), + }), + }, + }, + }, + }) +} diff --git a/internal/service/iam/role_policy.go b/internal/service/iam/role_policy.go index a1657132a82a..0de22e1ecb0a 100644 --- a/internal/service/iam/role_policy.go +++ b/internal/service/iam/role_policy.go @@ -37,6 +37,7 @@ const ( // @IdAttrFormat("{role}:{name}") // @ImportIDHandler("rolePolicyImportID") // @Testing(existsType="string") +// @Testing(preIdentityVersion="6.0.0") func resourceRolePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRolePolicyPut, @@ -108,7 +109,7 @@ func resourceRolePolicyPut(ctx context.Context, d *schema.ResourceData, meta any if d.IsNewResource() { d.SetId(createRolePolicyImportID(roleName, policyName)) - _, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findRolePolicyByTwoPartKey(ctx, conn, roleName, policyName) }) diff --git a/internal/service/iam/role_policy_attachment.go b/internal/service/iam/role_policy_attachment.go index aff7eaa109c4..550c90889194 100644 --- a/internal/service/iam/role_policy_attachment.go +++ b/internal/service/iam/role_policy_attachment.go @@ -29,6 +29,7 @@ import ( // @IdentityAttribute("policy_arn") // @IdAttrFormat("{role}/{policy_arn}") // @ImportIDHandler("rolePolicyAttachmentImportID") +// @Testing(preIdentityVersion="6.0.0") func resourceRolePolicyAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRolePolicyAttachmentCreate, @@ -76,7 +77,7 @@ func resourceRolePolicyAttachmentRead(ctx context.Context, d *schema.ResourceDat // Human friendly ID for error messages since d.Id() is non-descriptive. id := fmt.Sprintf("%s:%s", role, policyARN) - _, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findAttachedRolePolicyByTwoPartKey(ctx, conn, role, policyARN) }, d.IsNewResource()) @@ -106,7 +107,7 @@ func resourceRolePolicyAttachmentDelete(ctx context.Context, d *schema.ResourceD func attachPolicyToRole(ctx context.Context, conn *iam.Client, role, policyARN string) error { var errConcurrentModificationException *awstypes.ConcurrentModificationException - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.AttachRolePolicy(ctx, &iam.AttachRolePolicyInput{ PolicyArn: aws.String(policyARN), RoleName: aws.String(role), @@ -122,7 +123,7 @@ func attachPolicyToRole(ctx context.Context, conn *iam.Client, role, policyARN s func detachPolicyFromRole(ctx context.Context, conn *iam.Client, role, policyARN string) error { var errConcurrentModificationException *awstypes.ConcurrentModificationException - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.DetachRolePolicy(ctx, &iam.DetachRolePolicyInput{ PolicyArn: aws.String(policyARN), RoleName: aws.String(role), diff --git a/internal/service/iam/role_policy_attachment_identity_gen_test.go b/internal/service/iam/role_policy_attachment_identity_gen_test.go index b2bd0f1a865c..39a6f6a0ac9f 100644 --- a/internal/service/iam/role_policy_attachment_identity_gen_test.go +++ b/internal/service/iam/role_policy_attachment_identity_gen_test.go @@ -21,10 +21,11 @@ import ( func TestAccIAMRolePolicyAttachment_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_role_policy_attachment.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -102,3 +103,118 @@ func TestAccIAMRolePolicyAttachment_Identity_Basic(t *testing.T) { }, }) } + +// Resource Identity was added after v6.0.0 +func TestAccIAMRolePolicyAttachment_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_role_policy_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRolePolicyAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RolePolicyAttachment/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRolePolicyAttachmentExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RolePolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRole: knownvalue.NotNull(), + "policy_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrRole)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("policy_arn")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.0.0 +func TestAccIAMRolePolicyAttachment_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_role_policy_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRolePolicyAttachmentDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RolePolicyAttachment/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRolePolicyAttachmentExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RolePolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/iam/role_policy_attachment_test.go b/internal/service/iam/role_policy_attachment_test.go index a9e3aa7055ab..e99ca6a3276b 100644 --- a/internal/service/iam/role_policy_attachment_test.go +++ b/internal/service/iam/role_policy_attachment_test.go @@ -14,15 +14,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfiam "github.com/hashicorp/terraform-provider-aws/internal/service/iam" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -134,64 +127,6 @@ func TestAccIAMRolePolicyAttachment_Disappears_role(t *testing.T) { }) } -// Resource Identity was added in v6.1 -func TestAccIAMRolePolicyAttachment_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - roleName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - policyName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_iam_role_policy_attachment.test1" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), - CheckDestroy: testAccCheckRolePolicyAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccRolePolicyAttachmentConfig_attach(roleName, policyName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckRolePolicyAttachmentExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRolePolicyAttachmentConfig_attach(roleName, policyName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckRolePolicyAttachmentExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRole: knownvalue.NotNull(), - "policy_arn": knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrRole)), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("policy_arn")), - }, - }, - }, - }) -} - func testAccCheckRolePolicyAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) diff --git a/internal/service/iam/role_policy_identity_gen_test.go b/internal/service/iam/role_policy_identity_gen_test.go index bfaacfcdbd05..d19da6861655 100644 --- a/internal/service/iam/role_policy_identity_gen_test.go +++ b/internal/service/iam/role_policy_identity_gen_test.go @@ -26,7 +26,7 @@ func TestAccIAMRolePolicy_Identity_Basic(t *testing.T) { resourceName := "aws_iam_role_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -104,3 +104,120 @@ func TestAccIAMRolePolicy_Identity_Basic(t *testing.T) { }, }) } + +// Resource Identity was added after v6.0.0 +func TestAccIAMRolePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v string + resourceName := "aws_iam_role_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRolePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RolePolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRolePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RolePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRole: knownvalue.NotNull(), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrRole)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.0.0 +func TestAccIAMRolePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v string + resourceName := "aws_iam_role_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRolePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RolePolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRolePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RolePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/iam/role_policy_test.go b/internal/service/iam/role_policy_test.go index 81192667af3b..45e0f31bd3e6 100644 --- a/internal/service/iam/role_policy_test.go +++ b/internal/service/iam/role_policy_test.go @@ -240,7 +240,7 @@ func TestAccIAMRolePolicy_unknownsInPolicy(t *testing.T) { } // Resource Identity was added in v6.1 -func TestAccIAMRolePolicy_Identity_ExistingResource(t *testing.T) { +func TestAccIAMRolePolicy_Identity_old(t *testing.T) { ctx := acctest.Context(t) var rolePolicy string rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) diff --git a/internal/service/iam/role_tags_gen_test.go b/internal/service/iam/role_tags_gen_test.go index 67bb4e4836b7..b638fc651d8c 100644 --- a/internal/service/iam/role_tags_gen_test.go +++ b/internal/service/iam/role_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccIAMRole_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccIAMRole_tags(t *testing.T) { func TestAccIAMRole_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccIAMRole_tags_null(t *testing.T) { func TestAccIAMRole_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccIAMRole_tags_EmptyMap(t *testing.T) { func TestAccIAMRole_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccIAMRole_tags_AddOnUpdate(t *testing.T) { func TestAccIAMRole_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccIAMRole_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMRole_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccIAMRole_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMRole_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccIAMRole_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccIAMRole_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccIAMRole_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccIAMRole_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccIAMRole_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccIAMRole_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccIAMRole_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccIAMRole_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccIAMRole_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { func TestAccIAMRole_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccIAMRole_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) func TestAccIAMRole_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccIAMRole_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMRole_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccIAMRole_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMRole_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccIAMRole_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMRole_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccIAMRole_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccIAMRole_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Role resourceName := "aws_iam_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), diff --git a/internal/service/iam/role_test.go b/internal/service/iam/role_test.go index c7a77ee3f99a..b5c4b830ad7b 100644 --- a/internal/service/iam/role_test.go +++ b/internal/service/iam/role_test.go @@ -15,15 +15,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" tfiam "github.com/hashicorp/terraform-provider-aws/internal/service/iam" @@ -45,10 +39,11 @@ func TestAccIAMRole_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPath, "/"), resource.TestCheckResourceAttrSet(resourceName, "create_date"), + acctest.CheckResourceAttrGlobalARNFormat(ctx, resourceName, names.AttrARN, "iam", "role/{name}"), ), }, { @@ -74,7 +69,7 @@ func TestAccIAMRole_description(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_description(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPath, "/"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "This 1s a D3scr!pti0n with weird content: &@90ë\"'{«¡Çø}"), @@ -87,7 +82,7 @@ func TestAccIAMRole_description(t *testing.T) { }, { Config: testAccRoleConfig_updatedDescription(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPath, "/"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "This 1s an Upd@ted D3scr!pti0n with weird content: &90ë\"'{«¡Çø}"), @@ -95,7 +90,7 @@ func TestAccIAMRole_description(t *testing.T) { }, { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttrSet(resourceName, "create_date"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), @@ -118,7 +113,7 @@ func TestAccIAMRole_nameGenerated(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_nameGenerated(), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), acctest.CheckResourceAttrNameGenerated(resourceName, names.AttrName), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, "terraform-"), @@ -146,7 +141,7 @@ func TestAccIAMRole_namePrefix(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_namePrefix(acctest.ResourcePrefix), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), acctest.CheckResourceAttrNameFromPrefix(resourceName, names.AttrName, acctest.ResourcePrefix), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, acctest.ResourcePrefix), @@ -175,7 +170,7 @@ func TestAccIAMRole_testNameChange(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_pre(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), }, @@ -187,7 +182,7 @@ func TestAccIAMRole_testNameChange(t *testing.T) { }, { Config: testAccRoleConfig_post(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), }, @@ -211,7 +206,7 @@ func TestAccIAMRole_diffs(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_diffs(rName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -222,7 +217,7 @@ func TestAccIAMRole_diffs(t *testing.T) { }, { Config: testAccRoleConfig_diffs(rName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -264,7 +259,7 @@ func TestAccIAMRole_diffsCondition(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_diffsCondition(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -321,7 +316,7 @@ func TestAccIAMRole_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfiam.ResourceRole(), resourceName), ), @@ -345,7 +340,7 @@ func TestAccIAMRole_policiesForceDetach(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_forceDetachPolicies(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), testAccAddRolePolicy(ctx, resourceName), ), @@ -382,7 +377,7 @@ func TestAccIAMRole_maxSessionDuration(t *testing.T) { }, { Config: testAccRoleConfig_maxSessionDuration(rName, 3700), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "max_session_duration", "3700"), ), @@ -394,7 +389,7 @@ func TestAccIAMRole_maxSessionDuration(t *testing.T) { }, { Config: testAccRoleConfig_maxSessionDuration(rName, 3701), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "max_session_duration", "3701"), ), @@ -427,7 +422,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test creation { Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary1), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary1), @@ -436,7 +431,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test update { Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary2), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary2), @@ -451,7 +446,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test removal { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), testAccCheckRolePermissionsBoundary(&role, ""), @@ -460,7 +455,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test addition { Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary1), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary1), @@ -481,7 +476,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { }, Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary1), // check the boundary was restored - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary1), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary1), @@ -490,7 +485,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test empty value { Config: testAccRoleConfig_permissionsBoundary(rName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), testAccCheckRolePermissionsBoundary(&role, ""), @@ -517,7 +512,7 @@ func TestAccIAMRole_InlinePolicy_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInline(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -526,7 +521,7 @@ func TestAccIAMRole_InlinePolicy_basic(t *testing.T) { }, { Config: testAccRoleConfig_policyInlineUpdate(rName, policyName2, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "2"), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "0"), @@ -534,7 +529,7 @@ func TestAccIAMRole_InlinePolicy_basic(t *testing.T) { }, { Config: testAccRoleConfig_policyInlineUpdateDown(rName, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "0"), @@ -565,7 +560,7 @@ func TestAccIAMRole_InlinePolicy_ignoreOrder(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInlineActionOrder(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -625,7 +620,7 @@ func TestAccIAMRole_InlinePolicy_empty(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyEmptyInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), ), }, @@ -668,7 +663,7 @@ func TestAccIAMRole_ManagedPolicy_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyManaged(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), @@ -676,14 +671,14 @@ func TestAccIAMRole_ManagedPolicy_basic(t *testing.T) { }, { Config: testAccRoleConfig_policyManagedUpdate(rName, policyName1, policyName2, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), ), }, { Config: testAccRoleConfig_policyManagedUpdateDown(rName, policyName1, policyName2, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), ), @@ -714,7 +709,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandRemovalAddedBack(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyDetachManagedPolicy(ctx, &role, policyName), ), @@ -722,7 +717,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandRemovalAddedBack(t *testing.T) { }, { Config: testAccRoleConfig_policyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), ), @@ -748,7 +743,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandRemovalAddedBack(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInline(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyRemoveInlinePolicy(ctx, &role, policyName), ), @@ -756,7 +751,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandRemovalAddedBack(t *testing.T) { }, { Config: testAccRoleConfig_policyInline(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), ), @@ -783,7 +778,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemoved(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyExtraManaged(rName, policyName1, policyName2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAttachManagedPolicy(ctx, &role, policyName2), ), @@ -791,7 +786,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemoved(t *testing.T) { }, { Config: testAccRoleConfig_policyExtraManaged(rName, policyName1, policyName2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), ), @@ -818,7 +813,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemoved(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInline(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName2), ), @@ -826,7 +821,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemoved(t *testing.T) { }, { Config: testAccRoleConfig_policyInline(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "0"), @@ -854,21 +849,21 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionIgnored(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyNoInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName1), ), }, { Config: testAccRoleConfig_policyNoInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName2), ), }, { Config: testAccRoleConfig_policyNoInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyRemoveInlinePolicy(ctx, &role, policyName1), testAccCheckRolePolicyRemoveInlinePolicy(ctx, &role, policyName2), @@ -895,14 +890,14 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionIgnored(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyNoManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAttachManagedPolicy(ctx, &role, policyName), ), }, { Config: testAccRoleConfig_policyNoManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyDetachManagedPolicy(ctx, &role, policyName), ), @@ -928,7 +923,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyEmptyInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName), ), @@ -936,7 +931,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { }, { Config: testAccRoleConfig_policyEmptyInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), ), }, @@ -961,7 +956,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyEmptyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAttachManagedPolicy(ctx, &role, policyName), ), @@ -969,7 +964,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { }, { Config: testAccRoleConfig_policyEmptyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), ), }, @@ -977,19 +972,21 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { }) } -func TestAccIAMRole_Identity_ExistingResource(t *testing.T) { +func TestAccIAMRole_Identity_ExistingResource_NoRefresh_OnError(t *testing.T) { ctx := acctest.Context(t) var conf awstypes.Role rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckRoleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, Steps: []resource.TestStep{ { ExternalProviders: map[string]resource.ExternalProvider{ @@ -1002,57 +999,43 @@ func TestAccIAMRole_Identity_ExistingResource(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccRoleConfig_invalidAssumeRolePolicy(rName), + ExpectError: regexache.MustCompile(`MalformedPolicyDocument: Unknown field invalid`), + }, + }, + }) +} + +func TestAccIAMRole_Identity_ExistingResource_OnError(t *testing.T) { + ctx := acctest.Context(t) + var conf awstypes.Role + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iam_role.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRoleDestroy(ctx), + Steps: []resource.TestStep{ { ExternalProviders: map[string]resource.ExternalProvider{ "aws": { Source: "hashicorp/aws", - VersionConstraint: "6.0.0", + VersionConstraint: "5.100.0", }, }, Config: testAccRoleConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrName: knownvalue.Null(), - }), - }, }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckRoleExists(ctx, resourceName, &conf), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrName: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), - }, + Config: testAccRoleConfig_invalidAssumeRolePolicy(rName), + ExpectError: regexache.MustCompile(`MalformedPolicyDocument: Unknown field invalid`), }, }, }) @@ -1346,6 +1329,21 @@ resource "aws_iam_role" "test" { `, rName) } +func testAccRoleConfig_invalidAssumeRolePolicy(rName string) string { + return fmt.Sprintf(` +data "aws_service_principal" "ec2" { + service_name = "ec2" +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + + assume_role_policy = "{\"invalid\":true}" +} +`, rName) +} + func testAccRoleConfig_diffs(rName, tags string) string { return fmt.Sprintf(` data "aws_partition" "current" {} diff --git a/internal/service/iam/roles_data_source.go b/internal/service/iam/roles_data_source.go index b2f85ab7fc7c..318dfaf7c337 100644 --- a/internal/service/iam/roles_data_source.go +++ b/internal/service/iam/roles_data_source.go @@ -5,7 +5,6 @@ package iam import ( "context" - "reflect" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" @@ -16,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -68,7 +68,7 @@ func dataSourceRolesRead(ctx context.Context, d *schema.ResourceData, meta any) } for _, role := range page.Roles { - if reflect.ValueOf(role).IsZero() { + if p := &role; inttypes.IsZero(p) { continue } @@ -89,13 +89,8 @@ func dataSourceRolesRead(ctx context.Context, d *schema.ResourceData, meta any) nms = append(nms, aws.ToString(r.RoleName)) } - if err := d.Set(names.AttrARNs, arns); err != nil { - return sdkdiag.AppendErrorf(diags, "setting arns: %s", err) - } - - if err := d.Set(names.AttrNames, nms); err != nil { - return sdkdiag.AppendErrorf(diags, "setting names: %s", err) - } + d.Set(names.AttrARNs, arns) + d.Set(names.AttrNames, nms) return diags } diff --git a/internal/service/iam/roles_data_source_test.go b/internal/service/iam/roles_data_source_test.go index 7ebcea71b1ac..fac9055d0602 100644 --- a/internal/service/iam/roles_data_source_test.go +++ b/internal/service/iam/roles_data_source_test.go @@ -143,7 +143,7 @@ resource "aws_iam_role" "test" { { "Action": "sts:AssumeRole", "Principal": { - "Service": data.aws_service_principal.ec2.name + "Service": "${data.aws_service_principal.ec2.name}" }, "Effect": "Allow", "Sid": "" @@ -179,7 +179,7 @@ resource "aws_iam_role" "test" { { "Action": "sts:AssumeRole", "Principal": { - "Service": data.aws_service_principal.ec2.name + "Service": "${data.aws_service_principal.ec2.name}" }, "Effect": "Allow", "Sid": "" @@ -220,7 +220,7 @@ resource "aws_iam_role" "test" { { "Action": "sts:AssumeRole", "Principal": { - "Service": data.aws_service_principal.ec2.name + "Service": "${data.aws_service_principal.ec2.name}" }, "Effect": "Allow", "Sid": "" diff --git a/internal/service/iam/saml_provider.go b/internal/service/iam/saml_provider.go index 921ce35cf7d7..8179579409f9 100644 --- a/internal/service/iam/saml_provider.go +++ b/internal/service/iam/saml_provider.go @@ -29,6 +29,8 @@ import ( // @SDKResource("aws_iam_saml_provider", name="SAML Provider") // @Tags(identifierAttribute="arn", resourceType="SAMLProvider") // @Testing(tagsTest=false) +// @ArnIdentity +// @Testing(preIdentityVersion="v6.4.0") func resourceSAMLProvider() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSAMLProviderCreate, @@ -36,10 +38,6 @@ func resourceSAMLProvider() *schema.Resource { UpdateWithoutTimeout: resourceSAMLProviderUpdate, DeleteWithoutTimeout: resourceSAMLProviderDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/iam/saml_provider_identity_gen_test.go b/internal/service/iam/saml_provider_identity_gen_test.go new file mode 100644 index 000000000000..248668bc40a3 --- /dev/null +++ b/internal/service/iam/saml_provider_identity_gen_test.go @@ -0,0 +1,214 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package iam_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIAMSAMLProvider_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_saml_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckSAMLProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSAMLProviderExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMSAMLProvider_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_saml_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckSAMLProviderDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSAMLProviderExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMSAMLProvider_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_saml_provider.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckSAMLProviderDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSAMLProviderExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SAMLProvider/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/iam/server_certificate.go b/internal/service/iam/server_certificate.go index a2b6ddef8b2d..614ee3ca19e6 100644 --- a/internal/service/iam/server_certificate.go +++ b/internal/service/iam/server_certificate.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -56,14 +57,14 @@ func resourceServerCertificate() *schema.Resource { Required: true, ForceNew: true, DiffSuppressFunc: suppressNormalizeCertRemoval, - StateFunc: StateTrimSpace, + StateFunc: sdkv2.TrimSpaceSchemaStateFunc, }, names.AttrCertificateChain: { Type: schema.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: suppressNormalizeCertRemoval, - StateFunc: StateTrimSpace, + StateFunc: sdkv2.TrimSpaceSchemaStateFunc, }, "expiration": { Type: schema.TypeString, @@ -94,7 +95,7 @@ func resourceServerCertificate() *schema.Resource { ForceNew: true, Sensitive: true, DiffSuppressFunc: suppressNormalizeCertRemoval, - StateFunc: StateTrimSpace, + StateFunc: sdkv2.TrimSpaceSchemaStateFunc, }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -111,7 +112,7 @@ func resourceServerCertificateCreate(ctx context.Context, d *schema.ResourceData conn := meta.(*conns.AWSClient).IAMClient(ctx) sslCertName := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) - input := &iam.UploadServerCertificateInput{ + input := iam.UploadServerCertificateInput{ CertificateBody: aws.String(d.Get("certificate_body").(string)), PrivateKey: aws.String(d.Get(names.AttrPrivateKey).(string)), ServerCertificateName: aws.String(sslCertName), @@ -126,14 +127,14 @@ func resourceServerCertificateCreate(ctx context.Context, d *schema.ResourceData input.Path = aws.String(v.(string)) } - output, err := conn.UploadServerCertificate(ctx, input) + output, err := conn.UploadServerCertificate(ctx, &input) // Some partitions (e.g. ISO) may not support tag-on-create. partition := meta.(*conns.AWSClient).Partition(ctx) if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.UploadServerCertificate(ctx, input) + output, err = conn.UploadServerCertificate(ctx, &input) } if err != nil { @@ -206,7 +207,7 @@ func resourceServerCertificateUpdate(ctx context.Context, d *schema.ResourceData conn := meta.(*conns.AWSClient).IAMClient(ctx) if d.HasChanges(names.AttrName, names.AttrNamePrefix, names.AttrPath) { - input := &iam.UpdateServerCertificateInput{} + var input iam.UpdateServerCertificateInput if d.HasChange(names.AttrName) { oldName, newName := d.GetChange(names.AttrName) @@ -235,7 +236,7 @@ func resourceServerCertificateUpdate(ctx context.Context, d *schema.ResourceData input.NewPath = aws.String(d.Get(names.AttrPath).(string)) } - _, err := conn.UpdateServerCertificate(ctx, input) + _, err := conn.UpdateServerCertificate(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Server Certificate (%s): %s", d.Id(), err) @@ -255,10 +256,11 @@ func resourceServerCertificateDelete(ctx context.Context, d *schema.ResourceData conn := meta.(*conns.AWSClient).IAMClient(ctx) log.Printf("[DEBUG] Deleting IAM Server Certificate: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.DeleteConflictException](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { - return conn.DeleteServerCertificate(ctx, &iam.DeleteServerCertificateInput{ - ServerCertificateName: aws.String(d.Get(names.AttrName).(string)), - }) + input := iam.DeleteServerCertificateInput{ + ServerCertificateName: aws.String(d.Get(names.AttrName).(string)), + } + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.DeleteConflictException](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { + return conn.DeleteServerCertificate(ctx, &input) }, "currently in use by arn") if errs.IsA[*awstypes.NoSuchEntityException](err) { @@ -279,10 +281,14 @@ func resourceServerCertificateImport(ctx context.Context, d *schema.ResourceData } func findServerCertificateByName(ctx context.Context, conn *iam.Client, name string) (*awstypes.ServerCertificate, error) { - input := &iam.GetServerCertificateInput{ + input := iam.GetServerCertificateInput{ ServerCertificateName: aws.String(name), } + return findServerCertificate(ctx, conn, &input) +} + +func findServerCertificate(ctx context.Context, conn *iam.Client, input *iam.GetServerCertificateInput) (*awstypes.ServerCertificate, error) { output, err := conn.GetServerCertificate(ctx, input) if errs.IsA[*awstypes.NoSuchEntityException](err) { diff --git a/internal/service/iam/server_certificate_tags_gen_test.go b/internal/service/iam/server_certificate_tags_gen_test.go index 1088748cf586..08444892a743 100644 --- a/internal/service/iam/server_certificate_tags_gen_test.go +++ b/internal/service/iam/server_certificate_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,13 +18,14 @@ import ( func TestAccIAMServerCertificate_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -235,13 +235,14 @@ func TestAccIAMServerCertificate_tags(t *testing.T) { func TestAccIAMServerCertificate_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -314,13 +315,14 @@ func TestAccIAMServerCertificate_tags_null(t *testing.T) { func TestAccIAMServerCertificate_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -389,13 +391,14 @@ func TestAccIAMServerCertificate_tags_EmptyMap(t *testing.T) { func TestAccIAMServerCertificate_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -482,13 +485,14 @@ func TestAccIAMServerCertificate_tags_AddOnUpdate(t *testing.T) { func TestAccIAMServerCertificate_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -589,13 +593,14 @@ func TestAccIAMServerCertificate_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMServerCertificate_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -746,13 +751,14 @@ func TestAccIAMServerCertificate_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMServerCertificate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -847,13 +853,14 @@ func TestAccIAMServerCertificate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMServerCertificate_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1062,13 +1069,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMServerCertificate_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1248,13 +1256,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMServerCertificate_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1450,13 +1459,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMServerCertificate_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1552,13 +1562,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_updateToProviderOnly(t *testin func TestAccIAMServerCertificate_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1653,13 +1664,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_updateToResourceOnly(t *testin func TestAccIAMServerCertificate_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1728,13 +1740,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccIAMServerCertificate_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1795,13 +1808,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_emptyProviderOnlyTag(t *testin func TestAccIAMServerCertificate_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1867,13 +1881,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_nullOverlappingResourceTag(t * func TestAccIAMServerCertificate_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -1939,13 +1954,14 @@ func TestAccIAMServerCertificate_tags_DefaultTags_nullNonOverlappingResourceTag( func TestAccIAMServerCertificate_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -2004,13 +2020,14 @@ func TestAccIAMServerCertificate_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMServerCertificate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -2113,13 +2130,14 @@ func TestAccIAMServerCertificate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMServerCertificate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -2212,13 +2230,14 @@ func TestAccIAMServerCertificate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccIAMServerCertificate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), @@ -2382,13 +2401,14 @@ func TestAccIAMServerCertificate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccIAMServerCertificate_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.ServerCertificate resourceName := "aws_iam_server_certificate.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) privateKeyPEM := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificatePEM := acctest.TLSRSAX509SelfSignedCertificatePEM(t, privateKeyPEM, acctest.RandomDomain().String()) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServerCertificateDestroy(ctx), diff --git a/internal/service/iam/service_endpoint_resolver_gen.go b/internal/service/iam/service_endpoint_resolver_gen.go index 6f76401c5fd4..cbfdc9cc73f2 100644 --- a/internal/service/iam/service_endpoint_resolver_gen.go +++ b/internal/service/iam/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params iam.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up iam endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up iam endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/iam/service_endpoints_gen_test.go b/internal/service/iam/service_endpoints_gen_test.go index 43f67b7d01d8..c9bc65a934f8 100644 --- a/internal/service/iam/service_endpoints_gen_test.go +++ b/internal/service/iam/service_endpoints_gen_test.go @@ -659,7 +659,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/iam/service_linked_role.go b/internal/service/iam/service_linked_role.go index dfb97a925cf4..072196056e96 100644 --- a/internal/service/iam/service_linked_role.go +++ b/internal/service/iam/service_linked_role.go @@ -31,6 +31,8 @@ import ( // @SDKResource("aws_iam_service_linked_role", name="Service Linked Role") // @Tags(identifierAttribute="id", resourceType="ServiceLinkedRole") +// @ArnIdentity(arnAttribute="id") +// @Testing(preIdentityVersion="v6.4.0") func resourceServiceLinkedRole() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceServiceLinkedRoleCreate, @@ -38,10 +40,6 @@ func resourceServiceLinkedRole() *schema.Resource { UpdateWithoutTimeout: resourceServiceLinkedRoleUpdate, DeleteWithoutTimeout: resourceServiceLinkedRoleDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -95,7 +93,7 @@ func resourceServiceLinkedRoleCreate(ctx context.Context, d *schema.ResourceData conn := meta.(*conns.AWSClient).IAMClient(ctx) serviceName := d.Get("aws_service_name").(string) - input := &iam.CreateServiceLinkedRoleInput{ + input := iam.CreateServiceLinkedRoleInput{ AWSServiceName: aws.String(serviceName), } @@ -107,8 +105,8 @@ func resourceServiceLinkedRoleCreate(ctx context.Context, d *schema.ResourceData input.Description = aws.String(v.(string)) } - output, err := tfresource.RetryGWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (*iam.CreateServiceLinkedRoleOutput, error) { - return conn.CreateServiceLinkedRole(ctx, input) + output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func(ctx context.Context) (*iam.CreateServiceLinkedRoleOutput, error) { + return conn.CreateServiceLinkedRole(ctx, &input) }, "AccessDenied") if err != nil { return sdkdiag.AppendErrorf(diags, "creating IAM Service Linked Role (%s): %s", serviceName, err) @@ -117,8 +115,7 @@ func resourceServiceLinkedRoleCreate(ctx context.Context, d *schema.ResourceData d.SetId(aws.ToString(output.Role.Arn)) if tags := getTagsIn(ctx); len(tags) > 0 { - _, roleName, _, err := DecodeServiceLinkedRoleID(d.Id()) - + _, roleName, _, err := serviceLinkedRoleParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -143,13 +140,12 @@ func resourceServiceLinkedRoleRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - serviceName, roleName, customSuffix, err := DecodeServiceLinkedRoleID(d.Id()) - + serviceName, roleName, customSuffix, err := serviceLinkedRoleParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + role, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*awstypes.Role, error) { return findRoleByName(ctx, conn, roleName) }, d.IsNewResource()) @@ -163,8 +159,6 @@ func resourceServiceLinkedRoleRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading IAM Service Linked Role (%s): %s", d.Id(), err) } - role := outputRaw.(*awstypes.Role) - d.Set(names.AttrARN, role.Arn) d.Set("aws_service_name", serviceName) d.Set("create_date", aws.ToTime(role.CreateDate).Format(time.RFC3339)) @@ -184,17 +178,16 @@ func resourceServiceLinkedRoleUpdate(ctx context.Context, d *schema.ResourceData conn := meta.(*conns.AWSClient).IAMClient(ctx) if d.HasChangesExcept(names.AttrTagsAll, names.AttrTags) { - _, roleName, _, err := DecodeServiceLinkedRoleID(d.Id()) + _, roleName, _, err := serviceLinkedRoleParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - input := &iam.UpdateRoleInput{ + input := iam.UpdateRoleInput{ Description: aws.String(d.Get(names.AttrDescription).(string)), RoleName: aws.String(roleName), } - - _, err = conn.UpdateRole(ctx, input) + _, err = conn.UpdateRole(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Service Linked Role (%s): %s", d.Id(), err) @@ -208,8 +201,7 @@ func resourceServiceLinkedRoleDelete(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - _, roleName, _, err := DecodeServiceLinkedRoleID(d.Id()) - + _, roleName, _, err := serviceLinkedRoleParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -223,11 +215,10 @@ func resourceServiceLinkedRoleDelete(ctx context.Context, d *schema.ResourceData } func deleteServiceLinkedRole(ctx context.Context, conn *iam.Client, roleName string) error { - input := &iam.DeleteServiceLinkedRoleInput{ + input := iam.DeleteServiceLinkedRoleInput{ RoleName: aws.String(roleName), } - - output, err := conn.DeleteServiceLinkedRole(ctx, input) + output, err := conn.DeleteServiceLinkedRole(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { return nil @@ -238,7 +229,6 @@ func deleteServiceLinkedRole(ctx context.Context, conn *iam.Client, roleName str } deletionTaskID := aws.ToString(output.DeletionTaskId) - if deletionTaskID == "" { return nil } @@ -319,26 +309,25 @@ func findServiceLinkedRoleDeletionStatusByID(ctx context.Context, conn *iam.Clie return output, nil } -func DecodeServiceLinkedRoleID(id string) (serviceName, roleName, customSuffix string, err error) { - idArn, err := arn.Parse(id) - +func serviceLinkedRoleParseResourceID(id string) (string, string, string, error) { + arn, err := arn.Parse(id) if err != nil { return "", "", "", err } - resourceParts := strings.Split(idArn.Resource, "/") - + resourceParts := strings.Split(arn.Resource, "/") if len(resourceParts) != 4 { - return "", "", "", fmt.Errorf("expected IAM Service Role ARN (arn:PARTITION:iam::ACCOUNTID:role/aws-service-role/SERVICENAME/ROLENAME), received: %s", id) + return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected IAM Service Role ARN (arn:PARTITION:iam::ACCOUNTID:role/aws-service-role/SERVICENAME/ROLENAME)", id) } - serviceName = resourceParts[2] - roleName = resourceParts[3] + serviceName := resourceParts[2] + roleName := resourceParts[3] + var customSuffix string roleNameParts := strings.Split(roleName, "_") if len(roleNameParts) == 2 { customSuffix = roleNameParts[1] } - return + return serviceName, roleName, customSuffix, nil } diff --git a/internal/service/iam/service_linked_role_identity_gen_test.go b/internal/service/iam/service_linked_role_identity_gen_test.go new file mode 100644 index 000000000000..763316a0344f --- /dev/null +++ b/internal/service/iam/service_linked_role_identity_gen_test.go @@ -0,0 +1,214 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package iam_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIAMServiceLinkedRole_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_service_linked_role.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceLinkedRoleExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMServiceLinkedRole_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_service_linked_role.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceLinkedRoleExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccIAMServiceLinkedRole_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iam_service_linked_role.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServiceLinkedRoleExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ServiceLinkedRole/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/iam/service_linked_role_tags_gen_test.go b/internal/service/iam/service_linked_role_tags_gen_test.go index 92f8067116be..883c05ed3018 100644 --- a/internal/service/iam/service_linked_role_tags_gen_test.go +++ b/internal/service/iam/service_linked_role_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccIAMServiceLinkedRole_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccIAMServiceLinkedRole_tags(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccIAMServiceLinkedRole_tags_null(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccIAMServiceLinkedRole_tags_EmptyMap(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccIAMServiceLinkedRole_tags_AddOnUpdate(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccIAMServiceLinkedRole_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccIAMServiceLinkedRole_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccIAMServiceLinkedRole_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_updateToProviderOnly(t *testin func TestAccIAMServiceLinkedRole_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_updateToResourceOnly(t *testin func TestAccIAMServiceLinkedRole_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccIAMServiceLinkedRole_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_emptyProviderOnlyTag(t *testin func TestAccIAMServiceLinkedRole_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_nullOverlappingResourceTag(t * func TestAccIAMServiceLinkedRole_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccIAMServiceLinkedRole_tags_DefaultTags_nullNonOverlappingResourceTag( func TestAccIAMServiceLinkedRole_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccIAMServiceLinkedRole_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccIAMServiceLinkedRole_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMServiceLinkedRole_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccIAMServiceLinkedRole_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccIAMServiceLinkedRole_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccIAMServiceLinkedRole_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccIAMServiceLinkedRole_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iam_service_linked_role.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckServiceLinkedRoleDestroy(ctx), diff --git a/internal/service/iam/service_linked_role_test.go b/internal/service/iam/service_linked_role_test.go index fc491c7436d3..76ba8e86e908 100644 --- a/internal/service/iam/service_linked_role_test.go +++ b/internal/service/iam/service_linked_role_test.go @@ -18,7 +18,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestDecodeServiceLinkedRoleID(t *testing.T) { +func TestServiceLinkedRoleParseResourceID(t *testing.T) { t.Parallel() var testCases = []struct { @@ -60,7 +60,7 @@ func TestDecodeServiceLinkedRoleID(t *testing.T) { } for _, tc := range testCases { - serviceName, roleName, customSuffix, err := tfiam.DecodeServiceLinkedRoleID(tc.Input) + serviceName, roleName, customSuffix, err := tfiam.ServiceLinkedRoleParseResourceID(tc.Input) if tc.ErrCount == 0 && err != nil { t.Fatalf("expected %q not to trigger an error, received: %s", tc.Input, err) } @@ -259,8 +259,7 @@ func testAccCheckServiceLinkedRoleDestroy(ctx context.Context) resource.TestChec continue } - _, roleName, _, err := tfiam.DecodeServiceLinkedRoleID(rs.Primary.ID) - + _, roleName, _, err := tfiam.ServiceLinkedRoleParseResourceID(rs.Primary.ID) if err != nil { return err } @@ -291,8 +290,7 @@ func testAccCheckServiceLinkedRoleExists(ctx context.Context, n string) resource conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) - _, roleName, _, err := tfiam.DecodeServiceLinkedRoleID(rs.Primary.ID) - + _, roleName, _, err := tfiam.ServiceLinkedRoleParseResourceID(rs.Primary.ID) if err != nil { return err } diff --git a/internal/service/iam/service_package_gen.go b/internal/service/iam/service_package_gen.go index 50160ba1a514..144e10450378 100644 --- a/internal/service/iam/service_package_gen.go +++ b/internal/service/iam/service_package_gen.go @@ -4,10 +4,11 @@ package iam import ( "context" + "iter" + "slices" "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/iam" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -244,6 +245,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa ResourceType: "OIDCProvider", }), Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourcePolicy, @@ -254,6 +261,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa ResourceType: "Policy", }), Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourcePolicyAttachment, @@ -273,6 +286,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Identity: inttypes.GlobalSingleParameterIdentity(names.AttrName, inttypes.WithV6_0SDKv2Fix(), ), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourceRolePolicy, @@ -311,6 +327,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa ResourceType: "SAMLProvider", }), Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceSecurityTokenServicePreferences, @@ -337,6 +359,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa ResourceType: "ServiceLinkedRole", }), Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceServiceSpecificCredential, @@ -403,6 +431,22 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa } } +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource{ + { + Factory: roleResourceAsListResource, + TypeName: "aws_iam_role", + Name: "Role", + Region: unique.Make(inttypes.ResourceRegionDisabled()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrName, + ResourceType: "Role", + }), + Identity: inttypes.GlobalSingleParameterIdentity(names.AttrName), + }, + }) +} + func (p *servicePackage) ServicePackageName() string { return names.IAM } @@ -426,7 +470,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *iam.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/iam/service_specific_credential.go b/internal/service/iam/service_specific_credential.go index f80e3e43987b..5d84b23c4b3b 100644 --- a/internal/service/iam/service_specific_credential.go +++ b/internal/service/iam/service_specific_credential.go @@ -8,18 +8,22 @@ import ( "fmt" "log" "strings" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -30,41 +34,65 @@ func resourceServiceSpecificCredential() *schema.Resource { ReadWithoutTimeout: resourceServiceSpecificCredentialRead, UpdateWithoutTimeout: resourceServiceSpecificCredentialUpdate, DeleteWithoutTimeout: resourceServiceSpecificCredentialDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ - names.AttrServiceName: { + "create_date": { Type: schema.TypeString, - Required: true, - ForceNew: true, + Computed: true, }, - names.AttrUserName: { - Type: schema.TypeString, - Required: true, + "credential_age_days": { + Type: schema.TypeInt, + Optional: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 64), + ValidateFunc: validation.IntBetween(1, 36600), }, - names.AttrStatus: { - Type: schema.TypeString, - Optional: true, - Default: awstypes.StatusTypeActive, - ValidateDiagFunc: enum.Validate[awstypes.StatusType](), + "expiration_date": { + Type: schema.TypeString, + Computed: true, + }, + "service_credential_alias": { + Type: schema.TypeString, + Computed: true, + }, + "service_credential_secret": { + Type: schema.TypeString, + Sensitive: true, + Computed: true, + }, + names.AttrServiceName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, }, "service_password": { Type: schema.TypeString, Sensitive: true, Computed: true, }, - "service_user_name": { + "service_specific_credential_id": { Type: schema.TypeString, Computed: true, }, - "service_specific_credential_id": { + "service_user_name": { Type: schema.TypeString, Computed: true, }, + names.AttrStatus: { + Type: schema.TypeString, + Optional: true, + Default: awstypes.StatusTypeActive, + ValidateDiagFunc: enum.Validate[awstypes.StatusType](), + }, + names.AttrUserName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, }, } } @@ -73,31 +101,39 @@ func resourceServiceSpecificCredentialCreate(ctx context.Context, d *schema.Reso var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - input := &iam.CreateServiceSpecificCredentialInput{ - ServiceName: aws.String(d.Get(names.AttrServiceName).(string)), - UserName: aws.String(d.Get(names.AttrUserName).(string)), + serviceName, userName := d.Get(names.AttrServiceName).(string), d.Get(names.AttrUserName).(string) + input := iam.CreateServiceSpecificCredentialInput{ + ServiceName: aws.String(serviceName), + UserName: aws.String(userName), } - out, err := conn.CreateServiceSpecificCredential(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating IAM Service Specific Credential: %s", err) + if v, ok := d.GetOk("credential_age_days"); ok { + input.CredentialAgeDays = aws.Int32(int32(v.(int))) } - cred := out.ServiceSpecificCredential + output, err := conn.CreateServiceSpecificCredential(ctx, &input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating IAM Service-Specific Credential: %s", err) + } - d.SetId(fmt.Sprintf("%s:%s:%s", aws.ToString(cred.ServiceName), aws.ToString(cred.UserName), aws.ToString(cred.ServiceSpecificCredentialId))) + cred := output.ServiceSpecificCredential + credID := aws.ToString(cred.ServiceSpecificCredentialId) + d.SetId(serviceSpecificCredentialCreateResourceID(serviceName, userName, credID)) + d.Set("service_credential_secret", cred.ServiceCredentialSecret) d.Set("service_password", cred.ServicePassword) - if v, ok := d.GetOk(names.AttrStatus); ok && v.(string) != string(awstypes.StatusTypeActive) { - updateInput := &iam.UpdateServiceSpecificCredentialInput{ - ServiceSpecificCredentialId: cred.ServiceSpecificCredentialId, - UserName: cred.UserName, + if v, ok := d.GetOk(names.AttrStatus); ok && awstypes.StatusType(v.(string)) != awstypes.StatusTypeActive { + input := iam.UpdateServiceSpecificCredentialInput{ + ServiceSpecificCredentialId: aws.String(credID), Status: awstypes.StatusType(v.(string)), + UserName: aws.String(userName), } - _, err := conn.UpdateServiceSpecificCredential(ctx, updateInput) + _, err := conn.UpdateServiceSpecificCredential(ctx, &input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "settings IAM Service Specific Credential status: %s", err) + return sdkdiag.AppendErrorf(diags, "setting IAM Service-Specific Credential (%s) status: %s", d.Id(), err) } } @@ -108,13 +144,13 @@ func resourceServiceSpecificCredentialRead(ctx context.Context, d *schema.Resour var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - serviceName, userName, credID, err := DecodeServiceSpecificCredentialId(d.Id()) + serviceName, userName, credID, err := serviceSpecificCredentialParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading IAM Service Specific Credential (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { - return FindServiceSpecificCredential(ctx, conn, serviceName, userName, credID) + cred, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*awstypes.ServiceSpecificCredentialMetadata, error) { + return findServiceSpecificCredentialByThreePartKey(ctx, conn, serviceName, userName, credID) }, d.IsNewResource()) if !d.IsNewResource() && tfresource.NotFound(err) { @@ -127,13 +163,16 @@ func resourceServiceSpecificCredentialRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading IAM Service Specific Credential (%s): %s", d.Id(), err) } - cred := outputRaw.(*awstypes.ServiceSpecificCredentialMetadata) - + d.Set("create_date", cred.CreateDate.Format(time.RFC3339)) + if cred.ExpirationDate != nil { + d.Set("expiration_date", cred.ExpirationDate.Format(time.RFC3339)) + } + d.Set("service_credential_alias", cred.ServiceCredentialAlias) + d.Set(names.AttrServiceName, cred.ServiceName) d.Set("service_specific_credential_id", cred.ServiceSpecificCredentialId) d.Set("service_user_name", cred.ServiceUserName) - d.Set(names.AttrServiceName, cred.ServiceName) - d.Set(names.AttrUserName, cred.UserName) d.Set(names.AttrStatus, cred.Status) + d.Set(names.AttrUserName, cred.UserName) return diags } @@ -142,14 +181,20 @@ func resourceServiceSpecificCredentialUpdate(ctx context.Context, d *schema.Reso var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - request := &iam.UpdateServiceSpecificCredentialInput{ - ServiceSpecificCredentialId: aws.String(d.Get("service_specific_credential_id").(string)), - UserName: aws.String(d.Get(names.AttrUserName).(string)), + _, userName, credID, err := serviceSpecificCredentialParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + input := iam.UpdateServiceSpecificCredentialInput{ + ServiceSpecificCredentialId: aws.String(credID), Status: awstypes.StatusType(d.Get(names.AttrStatus).(string)), + UserName: aws.String(userName), } - _, err := conn.UpdateServiceSpecificCredential(ctx, request) + _, err = conn.UpdateServiceSpecificCredential(ctx, &input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating IAM Service Specific Credential %s: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating IAM Service-Specific Credential (%s): %s", d.Id(), err) } return append(diags, resourceServiceSpecificCredentialRead(ctx, d, meta)...) @@ -159,28 +204,96 @@ func resourceServiceSpecificCredentialDelete(ctx context.Context, d *schema.Reso var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - request := &iam.DeleteServiceSpecificCredentialInput{ - ServiceSpecificCredentialId: aws.String(d.Get("service_specific_credential_id").(string)), - UserName: aws.String(d.Get(names.AttrUserName).(string)), + _, userName, credID, err := serviceSpecificCredentialParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) } - if _, err := conn.DeleteServiceSpecificCredential(ctx, request); err != nil { - if errs.IsA[*awstypes.NoSuchEntityException](err) { - return diags - } - return sdkdiag.AppendErrorf(diags, "deleting IAM Service Specific Credential %s: %s", d.Id(), err) + log.Printf("[DEBUG] Deleting IAM Service-Specific Credential: %s", d.Id()) + input := iam.DeleteServiceSpecificCredentialInput{ + ServiceSpecificCredentialId: aws.String(credID), + UserName: aws.String(userName), } + _, err = conn.DeleteServiceSpecificCredential(ctx, &input) + + if errs.IsA[*awstypes.NoSuchEntityException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting IAM Service-Specific Credential (%s): %s", d.Id(), err) + } + return diags } -func DecodeServiceSpecificCredentialId(id string) (string, string, string, error) { - creds := strings.Split(id, ":") - if len(creds) != 3 { - return "", "", "", fmt.Errorf("unknown IAM Service Specific Credential ID format") +const serviceSpecificCredentialResourceIDSeparator = ":" + +func serviceSpecificCredentialCreateResourceID(serviceName, userName, serviceSpecificCredentialID string) string { + parts := []string{serviceName, userName, serviceSpecificCredentialID} + id := strings.Join(parts, serviceSpecificCredentialResourceIDSeparator) + + return id +} + +func serviceSpecificCredentialParseResourceID(id string) (string, string, string, error) { + parts := strings.SplitN(id, serviceSpecificCredentialResourceIDSeparator, 3) + + if len(parts) != 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { + return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected SERVICE-NAME%[2]sUSER-NAME%[2]sSERVICE-SPECIFIC-CREDENTIAL-ID", id, serviceSpecificCredentialResourceIDSeparator) + } + + return parts[0], parts[1], parts[2], nil +} + +func findServiceSpecificCredentialByThreePartKey(ctx context.Context, conn *iam.Client, serviceName, userName, serviceSpecificCredentialID string) (*awstypes.ServiceSpecificCredentialMetadata, error) { + input := iam.ListServiceSpecificCredentialsInput{ + ServiceName: aws.String(serviceName), + UserName: aws.String(userName), + } + + return findServiceSpecificCredential(ctx, conn, &input, func(v *awstypes.ServiceSpecificCredentialMetadata) bool { + return aws.ToString(v.ServiceSpecificCredentialId) == serviceSpecificCredentialID + }) +} + +func findServiceSpecificCredential(ctx context.Context, conn *iam.Client, input *iam.ListServiceSpecificCredentialsInput, filter tfslices.Predicate[*awstypes.ServiceSpecificCredentialMetadata]) (*awstypes.ServiceSpecificCredentialMetadata, error) { + output, err := findServiceSpecificCredentials(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findServiceSpecificCredentials(ctx context.Context, conn *iam.Client, input *iam.ListServiceSpecificCredentialsInput, filter tfslices.Predicate[*awstypes.ServiceSpecificCredentialMetadata]) ([]awstypes.ServiceSpecificCredentialMetadata, error) { + var output []awstypes.ServiceSpecificCredentialMetadata + + err := listServiceSpecificCredentialsPages(ctx, conn, input, func(page *iam.ListServiceSpecificCredentialsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.ServiceSpecificCredentials { + if p := &v; !inttypes.IsZero(p) && filter(p) { + output = append(output, v) + } + } + + return !lastPage + }) + + if errs.IsA[*awstypes.NoSuchEntityException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } - serviceName := creds[0] - userName := creds[1] - credId := creds[2] - return serviceName, userName, credId, nil + return output, nil } diff --git a/internal/service/iam/service_specific_credential_test.go b/internal/service/iam/service_specific_credential_test.go index bdc45e3948ce..42c476db1d6f 100644 --- a/internal/service/iam/service_specific_credential_test.go +++ b/internal/service/iam/service_specific_credential_test.go @@ -47,7 +47,7 @@ func TestAccIAMServiceSpecificCredential_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_password"}, + ImportStateVerifyIgnore: []string{"service_password", "service_credential_secret"}, }, }, }) @@ -87,7 +87,7 @@ func TestAccIAMServiceSpecificCredential_multi(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_password"}, + ImportStateVerifyIgnore: []string{"service_password", "service_credential_secret"}, }, }, }) @@ -117,7 +117,7 @@ func TestAccIAMServiceSpecificCredential_status(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"service_password"}, + ImportStateVerifyIgnore: []string{"service_password", "service_credential_secret"}, }, { Config: testAccServiceSpecificCredentialConfig_status(rName, "Active"), @@ -163,29 +163,22 @@ func TestAccIAMServiceSpecificCredential_disappears(t *testing.T) { }) } -func testAccCheckServiceSpecificCredentialExists(ctx context.Context, n string, cred *awstypes.ServiceSpecificCredentialMetadata) resource.TestCheckFunc { +func testAccCheckServiceSpecificCredentialExists(ctx context.Context, n string, v *awstypes.ServiceSpecificCredentialMetadata) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Server Cert ID is set") - } conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) - serviceName, userName, credId, err := tfiam.DecodeServiceSpecificCredentialId(rs.Primary.ID) - if err != nil { - return err - } + output, err := tfiam.FindServiceSpecificCredentialByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrServiceName], rs.Primary.Attributes[names.AttrUserName], rs.Primary.Attributes["service_specific_credential_id"]) - output, err := tfiam.FindServiceSpecificCredential(ctx, conn, serviceName, userName, credId) if err != nil { return err } - *cred = *output + *v = *output return nil } @@ -200,19 +193,14 @@ func testAccCheckServiceSpecificCredentialDestroy(ctx context.Context) resource. continue } - serviceName, userName, credId, err := tfiam.DecodeServiceSpecificCredentialId(rs.Primary.ID) - if err != nil { - return err - } - - output, err := tfiam.FindServiceSpecificCredential(ctx, conn, serviceName, userName, credId) + output, err := tfiam.FindServiceSpecificCredentialByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrServiceName], rs.Primary.Attributes[names.AttrUserName], rs.Primary.Attributes["service_specific_credential_id"]) if tfresource.NotFound(err) { continue } if output != nil { - return fmt.Errorf("IAM Service Specific Credential (%s) still exists", rs.Primary.ID) + return fmt.Errorf("IAM Service-Specific Credential (%s) still exists", rs.Primary.ID) } } @@ -264,3 +252,54 @@ resource "aws_iam_service_specific_credential" "test" { } `, rName, status) } + +func TestAccIAMServiceSpecificCredential_bedrockWithExpiration(t *testing.T) { + ctx := acctest.Context(t) + var cred awstypes.ServiceSpecificCredentialMetadata + + resourceName := "aws_iam_service_specific_credential.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServiceSpecificCredentialDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServiceSpecificCredentialConfig_bedrockWithExpiration(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceSpecificCredentialExists(ctx, resourceName, &cred), + resource.TestCheckResourceAttrPair(resourceName, names.AttrUserName, "aws_iam_user.test", names.AttrName), + resource.TestCheckResourceAttr(resourceName, names.AttrServiceName, "bedrock.amazonaws.com"), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "Active"), + resource.TestCheckResourceAttr(resourceName, "credential_age_days", "30"), + resource.TestCheckResourceAttrSet(resourceName, "service_credential_alias"), + resource.TestCheckResourceAttrSet(resourceName, "service_specific_credential_id"), + resource.TestCheckResourceAttrSet(resourceName, "create_date"), + resource.TestCheckResourceAttrSet(resourceName, "expiration_date"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_password", "service_credential_secret", "credential_age_days"}, + }, + }, + }) +} + +func testAccServiceSpecificCredentialConfig_bedrockWithExpiration(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_user" "test" { + name = %[1]q +} + +resource "aws_iam_service_specific_credential" "test" { + service_name = "bedrock.amazonaws.com" + user_name = aws_iam_user.test.name + credential_age_days = 30 +} +`, rName) +} diff --git a/internal/service/iam/session_context_data_source.go b/internal/service/iam/session_context_data_source.go index 1fd5a453ed7b..e343c8b06e05 100644 --- a/internal/service/iam/session_context_data_source.go +++ b/internal/service/iam/session_context_data_source.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/arn" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -62,7 +61,7 @@ func dataSourceSessionContextRead(ctx context.Context, d *schema.ResourceData, m var roleName, sessionName string var err error - if roleName, sessionName = RoleNameSessionFromARN(arn); roleName == "" { + if roleName, sessionName = roleNameSessionFromARN(arn); roleName == "" { d.Set("issuer_arn", arn) d.Set("issuer_id", "") d.Set("issuer_name", "") @@ -73,26 +72,22 @@ func dataSourceSessionContextRead(ctx context.Context, d *schema.ResourceData, m var role *awstypes.Role - err = retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error role, err = findRoleByName(ctx, conn, roleName) if !d.IsNewResource() && tfresource.NotFound(err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - role, err = findRoleByName(ctx, conn, roleName) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "unable to get role (%s): %s", roleName, err) } @@ -109,9 +104,9 @@ func dataSourceSessionContextRead(ctx context.Context, d *schema.ResourceData, m return diags } -// RoleNameSessionFromARN returns the role and session names in an ARN if any. +// roleNameSessionFromARN returns the role and session names in an ARN if any. // Otherwise, it returns empty strings. -func RoleNameSessionFromARN(rawARN string) (string, string) { +func roleNameSessionFromARN(rawARN string) (string, string) { parsedARN, err := arn.Parse(rawARN) if err != nil { diff --git a/internal/service/iam/signing_certificate.go b/internal/service/iam/signing_certificate.go index 137b4e6f656f..30bb094e32b1 100644 --- a/internal/service/iam/signing_certificate.go +++ b/internal/service/iam/signing_certificate.go @@ -3,8 +3,7 @@ package iam -import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports - +import ( "context" "fmt" "log" @@ -14,12 +13,15 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -30,6 +32,7 @@ func resourceSigningCertificate() *schema.Resource { ReadWithoutTimeout: resourceSigningCertificateRead, UpdateWithoutTimeout: resourceSigningCertificateUpdate, DeleteWithoutTimeout: resourceSigningCertificateDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -64,30 +67,33 @@ func resourceSigningCertificateCreate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - createOpts := &iam.UploadSigningCertificateInput{ + userName := d.Get(names.AttrUserName).(string) + input := iam.UploadSigningCertificateInput{ CertificateBody: aws.String(d.Get("certificate_body").(string)), - UserName: aws.String(d.Get(names.AttrUserName).(string)), + UserName: aws.String(userName), } - resp, err := conn.UploadSigningCertificate(ctx, createOpts) + output, err := conn.UploadSigningCertificate(ctx, &input) + if err != nil { return sdkdiag.AppendErrorf(diags, "uploading IAM Signing Certificate: %s", err) } - cert := resp.Certificate - certId := cert.CertificateId - d.SetId(fmt.Sprintf("%s:%s", aws.ToString(certId), aws.ToString(cert.UserName))) + cert := output.Certificate + certID := aws.ToString(cert.CertificateId) + d.SetId(signingCertificateCreateResourceID(certID, userName)) if v, ok := d.GetOk(names.AttrStatus); ok && v.(string) != string(awstypes.StatusTypeActive) { - updateInput := &iam.UpdateSigningCertificateInput{ - CertificateId: certId, - UserName: aws.String(d.Get(names.AttrUserName).(string)), + input := iam.UpdateSigningCertificateInput{ + CertificateId: aws.String(certID), Status: awstypes.StatusType(v.(string)), + UserName: aws.String(userName), } - _, err := conn.UpdateSigningCertificate(ctx, updateInput) + _, err := conn.UpdateSigningCertificate(ctx, &input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "settings IAM Signing Certificate status: %s", err) + return sdkdiag.AppendErrorf(diags, "settings IAM Signing Certificate (%s) status: %s", d.Id(), err) } } @@ -98,13 +104,13 @@ func resourceSigningCertificateRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - certId, userName, err := DecodeSigningCertificateId(d.Id()) + certID, userName, err := signingCertificateParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading IAM Signing Certificate (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { - return FindSigningCertificate(ctx, conn, userName, certId) + output, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (*awstypes.SigningCertificate, error) { + return findSigningCertificateByTwoPartKey(ctx, conn, userName, certID) }, d.IsNewResource()) if !d.IsNewResource() && tfresource.NotFound(err) { @@ -117,12 +123,10 @@ func resourceSigningCertificateRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading IAM Signing Certificate (%s): %s", d.Id(), err) } - resp := outputRaw.(*awstypes.SigningCertificate) - - d.Set("certificate_body", resp.CertificateBody) - d.Set("certificate_id", resp.CertificateId) - d.Set(names.AttrUserName, resp.UserName) - d.Set(names.AttrStatus, resp.Status) + d.Set("certificate_body", output.CertificateBody) + d.Set("certificate_id", output.CertificateId) + d.Set(names.AttrStatus, output.Status) + d.Set(names.AttrUserName, output.UserName) return diags } @@ -131,18 +135,18 @@ func resourceSigningCertificateUpdate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - certId, userName, err := DecodeSigningCertificateId(d.Id()) + certID, userName, err := signingCertificateParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating IAM Signing Certificate (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - updateInput := &iam.UpdateSigningCertificateInput{ - CertificateId: aws.String(certId), - UserName: aws.String(userName), + input := iam.UpdateSigningCertificateInput{ + CertificateId: aws.String(certID), Status: awstypes.StatusType(d.Get(names.AttrStatus).(string)), + UserName: aws.String(userName), } + _, err = conn.UpdateSigningCertificate(ctx, &input) - _, err = conn.UpdateSigningCertificate(ctx, updateInput) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IAM Signing Certificate (%s): %s", d.Id(), err) } @@ -153,36 +157,93 @@ func resourceSigningCertificateUpdate(ctx context.Context, d *schema.ResourceDat func resourceSigningCertificateDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) - log.Printf("[INFO] Deleting IAM Signing Certificate: %s", d.Id()) - certId, userName, err := DecodeSigningCertificateId(d.Id()) + certID, userName, err := signingCertificateParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting IAM Signing Certificate (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - input := &iam.DeleteSigningCertificateInput{ - CertificateId: aws.String(certId), + log.Printf("[INFO] Deleting IAM Signing Certificate: %s", d.Id()) + input := iam.DeleteSigningCertificateInput{ + CertificateId: aws.String(certID), UserName: aws.String(userName), } + _, err = conn.DeleteSigningCertificate(ctx, &input) - if _, err := conn.DeleteSigningCertificate(ctx, input); err != nil { - if errs.IsA[*awstypes.NoSuchEntityException](err) { - return diags - } + if errs.IsA[*awstypes.NoSuchEntityException](err) { + return diags + } + + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting IAM Signing Certificate (%s): %s", d.Id(), err) } return diags } -func DecodeSigningCertificateId(id string) (string, string, error) { - creds := strings.Split(id, ":") - if len(creds) != 2 { - return "", "", fmt.Errorf("unknown IAM Signing Certificate ID format") +const signingCertificateResourceIDSeparator = ":" + +func signingCertificateCreateResourceID(certificateID, userName string) string { + parts := []string{certificateID, userName} + id := strings.Join(parts, signingCertificateResourceIDSeparator) + + return id +} + +func signingCertificateParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, signingCertificateResourceIDSeparator, 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected CERTIFICATE-ID%[2]sUSER-NAME", id, signingCertificateResourceIDSeparator) } - certId := creds[0] - userName := creds[1] + return parts[0], parts[1], nil +} + +func findSigningCertificateByTwoPartKey(ctx context.Context, conn *iam.Client, userName, certID string) (*awstypes.SigningCertificate, error) { + input := &iam.ListSigningCertificatesInput{ + UserName: aws.String(userName), + } + + return findSigningCertificate(ctx, conn, input, func(v *awstypes.SigningCertificate) bool { + return aws.ToString(v.CertificateId) == certID + }) +} + +func findSigningCertificate(ctx context.Context, conn *iam.Client, input *iam.ListSigningCertificatesInput, filter tfslices.Predicate[*awstypes.SigningCertificate]) (*awstypes.SigningCertificate, error) { + output, err := findSigningCertificates(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findSigningCertificates(ctx context.Context, conn *iam.Client, input *iam.ListSigningCertificatesInput, filter tfslices.Predicate[*awstypes.SigningCertificate]) ([]awstypes.SigningCertificate, error) { + var output []awstypes.SigningCertificate + + pages := iam.NewListSigningCertificatesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.NoSuchEntityException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Certificates { + if p := &v; !inttypes.IsZero(p) && filter(p) { + output = append(output, v) + } + } + } - return certId, userName, nil + return output, nil } diff --git a/internal/service/iam/signing_certificate_test.go b/internal/service/iam/signing_certificate_test.go index 07933e4d6069..a1b0d6fd3e6b 100644 --- a/internal/service/iam/signing_certificate_test.go +++ b/internal/service/iam/signing_certificate_test.go @@ -126,29 +126,22 @@ func TestAccIAMSigningCertificate_disappears(t *testing.T) { }) } -func testAccCheckSigningCertificateExists(ctx context.Context, n string, cred *awstypes.SigningCertificate) resource.TestCheckFunc { +func testAccCheckSigningCertificateExists(ctx context.Context, n string, v *awstypes.SigningCertificate) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Server Cert ID is set") - } conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) - certId, userName, err := tfiam.DecodeSigningCertificateId(rs.Primary.ID) - if err != nil { - return err - } + output, err := tfiam.FindSigningCertificateByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserName], rs.Primary.Attributes["certificate_id"]) - output, err := tfiam.FindSigningCertificate(ctx, conn, userName, certId) if err != nil { return err } - *cred = *output + *v = *output return nil } @@ -163,19 +156,14 @@ func testAccCheckSigningCertificateDestroy(ctx context.Context) resource.TestChe continue } - certId, userName, err := tfiam.DecodeSigningCertificateId(rs.Primary.ID) - if err != nil { - return err - } - - output, err := tfiam.FindSigningCertificate(ctx, conn, userName, certId) + output, err := tfiam.FindSigningCertificateByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserName], rs.Primary.Attributes["certificate_id"]) if tfresource.NotFound(err) { continue } if output != nil { - return fmt.Errorf("IAM Service Specific Credential (%s) still exists", rs.Primary.ID) + return fmt.Errorf("IAM Signing Certificate (%s) still exists", rs.Primary.ID) } } diff --git a/internal/service/iam/state_funcs.go b/internal/service/iam/state_funcs.go deleted file mode 100644 index 568eaef0b4e2..000000000000 --- a/internal/service/iam/state_funcs.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam - -import ( - "strings" -) // StateTrimSpace is a StateFunc that trims extraneous whitespace from strings. -// This prevents differences caused by an API canonicalizing a string with a -// trailing newline character removed. -func StateTrimSpace(v any) string { - s, ok := v.(string) - - if !ok { - return "" - } - - return strings.TrimSpace(s) -} diff --git a/internal/service/iam/sweep.go b/internal/service/iam/sweep.go index 5c58724368e6..fbbb1edc57d3 100644 --- a/internal/service/iam/sweep.go +++ b/internal/service/iam/sweep.go @@ -59,11 +59,13 @@ func RegisterSweepers() { Dependencies: []string{ "aws_auditmanager_assessment", "aws_batch_compute_environment", + "aws_bedrockagent_agent", "aws_cloudformation_stack_set_instance", "aws_cognito_user_pool", "aws_config_configuration_aggregator", "aws_config_configuration_recorder", "aws_datasync_location", + "aws_datazone_domain", "aws_dax_cluster", "aws_db_instance", "aws_db_option_group", @@ -86,10 +88,8 @@ func RegisterSweepers() { F: sweepRoles, }) - awsv2.Register("aws_iam_saml_provider", sweepSAMLProvider) - + awsv2.Register("aws_iam_saml_provider", sweepSAMLProviders) awsv2.Register("aws_iam_service_specific_credential", sweepServiceSpecificCredentials) - awsv2.Register("aws_iam_signing_certificate", sweepSigningCertificates) resource.AddTestSweepers("aws_iam_server_certificate", &resource.Sweeper{ @@ -117,7 +117,7 @@ func sweepGroups(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IAMClient(ctx) @@ -291,7 +291,8 @@ func sweepOpenIDConnectProvider(ctx context.Context, client *conns.AWSClient) ([ func sweepServiceSpecificCredentials(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.IAMClient(ctx) - + var input iam.ListUsersInput + var users []awstypes.User prefixes := []string{ "test-user", "test_user", @@ -299,48 +300,55 @@ func sweepServiceSpecificCredentials(ctx context.Context, client *conns.AWSClien "tf_acc", } - var users []awstypes.User - - pages := iam.NewListUsersPaginator(conn, &iam.ListUsersInput{}) + pages := iam.NewListUsersPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) + if err != nil { return nil, err } - for _, user := range page.Users { + for _, v := range page.Users { for _, prefix := range prefixes { - if strings.HasPrefix(aws.ToString(user.UserName), prefix) { - users = append(users, user) + if strings.HasPrefix(aws.ToString(v.UserName), prefix) { + users = append(users, v) break } } } } - var sweepResources []sweep.Sweepable + sweepResources := make([]sweep.Sweepable, 0) for _, user := range users { - out, err := conn.ListServiceSpecificCredentials(ctx, &iam.ListServiceSpecificCredentialsInput{ - UserName: user.UserName, + userName := aws.ToString(user.UserName) + input := iam.ListServiceSpecificCredentialsInput{ + UserName: aws.String(userName), + } + + err := listServiceSpecificCredentialsPages(ctx, conn, &input, func(page *iam.ListServiceSpecificCredentialsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.ServiceSpecificCredentials { + r := resourceServiceSpecificCredential() + d := r.Data(nil) + d.SetId(serviceSpecificCredentialCreateResourceID(aws.ToString(v.ServiceName), aws.ToString(v.UserName), aws.ToString(v.ServiceSpecificCredentialId))) + + sweepResources = append(sweepResources, sdk.NewSweepResource(r, d, client)) + } + + return !lastPage }) + if err != nil { tflog.Warn(ctx, "Skipping resource", map[string]any{ "error": err.Error(), - names.AttrUserName: user.UserName, + names.AttrUserName: userName, }) continue } - - for _, cred := range out.ServiceSpecificCredentials { - id := fmt.Sprintf("%s:%s:%s", aws.ToString(cred.ServiceName), aws.ToString(cred.UserName), aws.ToString(cred.ServiceSpecificCredentialId)) - - r := resourceServiceSpecificCredential() - d := r.Data(nil) - d.SetId(id) - - sweepResources = append(sweepResources, sdk.NewSweepResource(r, d, client)) - } } return sweepResources, nil @@ -350,7 +358,7 @@ func sweepPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IAMClient(ctx) @@ -424,7 +432,7 @@ func sweepRoles(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IAMClient(ctx) @@ -478,7 +486,7 @@ func sweepRoles(region string) error { return sweeperErrs.ErrorOrNil() } -func sweepSAMLProvider(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { +func sweepSAMLProviders(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.IAMClient(ctx) var sweepResources []sweep.Sweepable @@ -505,7 +513,7 @@ func sweepServerCertificates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IAMClient(ctx) @@ -518,7 +526,7 @@ func sweepServerCertificates(region string) error { } if err != nil { - return fmt.Errorf("Error retrieving IAM Server Certificates: %s", err) + return err } for _, sc := range page.ServerCertificateMetadataList { @@ -584,7 +592,7 @@ func sweepUsers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IAMClient(ctx) prefixes := []string{ @@ -605,7 +613,7 @@ func sweepUsers(region string) error { } if err != nil { - return fmt.Errorf("retrieving IAM Users: %s", err) + return err } for _, user := range page.Users { @@ -661,6 +669,7 @@ func roleNameFilter(name string) bool { // exhaustive list. prefixes := []string{ "another_rds", + "AmazonBedrockExecutionRoleForAgents", // Required role name prefix "AmazonComprehendServiceRole-", "aws_batch_service_role", "aws_elastictranscoder_pipeline_tf_test", @@ -744,7 +753,8 @@ func sweepVirtualMFADevice(ctx context.Context, client *conns.AWSClient) ([]swee func sweepSigningCertificates(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.IAMClient(ctx) - + var input iam.ListUsersInput + var users []awstypes.User prefixes := []string{ "test-user", "test_user", @@ -752,47 +762,51 @@ func sweepSigningCertificates(ctx context.Context, client *conns.AWSClient) ([]s "tf_acc", } - var users []awstypes.User - - pages := iam.NewListUsersPaginator(conn, &iam.ListUsersInput{}) + pages := iam.NewListUsersPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) + if err != nil { return nil, err } - for _, user := range page.Users { + for _, v := range page.Users { for _, prefix := range prefixes { - if strings.HasPrefix(aws.ToString(user.UserName), prefix) { - users = append(users, user) + if strings.HasPrefix(aws.ToString(v.UserName), prefix) { + users = append(users, v) break } } } } - var sweepResources []sweep.Sweepable + sweepResources := make([]sweep.Sweepable, 0) for _, user := range users { - out, err := conn.ListSigningCertificates(ctx, &iam.ListSigningCertificatesInput{ - UserName: user.UserName, - }) - if err != nil { - tflog.Warn(ctx, "Skipping resource", map[string]any{ - "error": err.Error(), - names.AttrUserName: user.UserName, - }) - continue + userName := aws.ToString(user.UserName) + input := iam.ListSigningCertificatesInput{ + UserName: aws.String(userName), } - for _, cert := range out.Certificates { - id := fmt.Sprintf("%s:%s", aws.ToString(cert.CertificateId), aws.ToString(cert.UserName)) + pages := iam.NewListSigningCertificatesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - r := resourceSigningCertificate() - d := r.Data(nil) - d.SetId(id) + if err != nil { + tflog.Warn(ctx, "Skipping resource", map[string]any{ + "error": err.Error(), + names.AttrUserName: userName, + }) + continue + } - sweepResources = append(sweepResources, sdk.NewSweepResource(r, d, client)) + for _, v := range page.Certificates { + r := resourceSigningCertificate() + d := r.Data(nil) + d.SetId(signingCertificateCreateResourceID(aws.ToString(v.CertificateId), aws.ToString(v.UserName))) + + sweepResources = append(sweepResources, sdk.NewSweepResource(r, d, client)) + } } } diff --git a/internal/service/iam/tags.go b/internal/service/iam/tags.go index 3b92c631f278..52f7c45837c2 100644 --- a/internal/service/iam/tags.go +++ b/internal/service/iam/tags.go @@ -451,7 +451,7 @@ func updateTags(ctx context.Context, conn *iam.Client, identifier, resourceType case "Role": return roleUpdateTags(ctx, conn, identifier, oldTagsMap, newTagsMap) case "ServiceLinkedRole": - _, roleName, _, err := DecodeServiceLinkedRoleID(identifier) + _, roleName, _, err := serviceLinkedRoleParseResourceID(identifier) if err != nil { return err } @@ -494,7 +494,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res case "ServiceLinkedRole": var roleName string - _, roleName, _, err = DecodeServiceLinkedRoleID(identifier) + _, roleName, _, err = serviceLinkedRoleParseResourceID(identifier) if err != nil { return err } diff --git a/internal/service/iam/testdata/OpenIDConnectProvider/basic/main_gen.tf b/internal/service/iam/testdata/OpenIDConnectProvider/basic/main_gen.tf new file mode 100644 index 000000000000..93f26868d425 --- /dev/null +++ b/internal/service/iam/testdata/OpenIDConnectProvider/basic/main_gen.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iam_openid_connect_provider" "test" { + url = "https://accounts.testle.com/${var.rName}" + + client_id_list = [ + "266362248691-re108qaeld573ia0l6clj2i5ac7r7291.apps.testleusercontent.com", + ] + + thumbprint_list = ["cf23df2207d99a74fbe169e3eba035e633b65d94"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/iam/testdata/OpenIDConnectProvider/basic_v6.4.0/main_gen.tf b/internal/service/iam/testdata/OpenIDConnectProvider/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..490e7d3980e3 --- /dev/null +++ b/internal/service/iam/testdata/OpenIDConnectProvider/basic_v6.4.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iam_openid_connect_provider" "test" { + url = "https://accounts.testle.com/${var.rName}" + + client_id_list = [ + "266362248691-re108qaeld573ia0l6clj2i5ac7r7291.apps.testleusercontent.com", + ] + + thumbprint_list = ["cf23df2207d99a74fbe169e3eba035e633b65d94"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/iam/testdata/Policy/basic/main_gen.tf b/internal/service/iam/testdata/Policy/basic/main_gen.tf new file mode 100644 index 000000000000..28b5c575f926 --- /dev/null +++ b/internal/service/iam/testdata/Policy/basic/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iam_policy" "test" { + name = var.rName + + policy = <:") - return +const userPolicyResourceIDSeparator = ":" + +func userPolicyCreateResourceID(userName, policyName string) string { + parts := []string{userName, policyName} + id := strings.Join(parts, userPolicyResourceIDSeparator) + + return id +} + +func userPolicyParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, userPolicyResourceIDSeparator, 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected USER-NAME%[2]sPOLICY-NAME", id, userPolicyResourceIDSeparator) } - userName = parts[0] - policyName = parts[1] - return + return parts[0], parts[1], nil } diff --git a/internal/service/iam/user_policy_attachment.go b/internal/service/iam/user_policy_attachment.go index 864436cd00b3..7ac83c6d4a43 100644 --- a/internal/service/iam/user_policy_attachment.go +++ b/internal/service/iam/user_policy_attachment.go @@ -77,7 +77,7 @@ func resourceUserPolicyAttachmentRead(ctx context.Context, d *schema.ResourceDat // Human friendly ID for error messages since d.Id() is non-descriptive. id := fmt.Sprintf("%s:%s", user, policyARN) - _, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findAttachedUserPolicyByTwoPartKey(ctx, conn, user, policyARN) }, d.IsNewResource()) @@ -122,7 +122,7 @@ func resourceUserPolicyAttachmentImport(ctx context.Context, d *schema.ResourceD } func attachPolicyToUser(ctx context.Context, conn *iam.Client, user, policyARN string) error { - _, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModificationException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.AttachUserPolicy(ctx, &iam.AttachUserPolicyInput{ PolicyArn: aws.String(policyARN), UserName: aws.String(user), @@ -137,7 +137,7 @@ func attachPolicyToUser(ctx context.Context, conn *iam.Client, user, policyARN s } func detachPolicyFromUser(ctx context.Context, conn *iam.Client, user, policyARN string) error { - _, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModificationException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.DetachUserPolicy(ctx, &iam.DetachUserPolicyInput{ PolicyArn: aws.String(policyARN), UserName: aws.String(user), diff --git a/internal/service/iam/user_policy_test.go b/internal/service/iam/user_policy_test.go index d6eb06bc42c3..26ef64f77126 100644 --- a/internal/service/iam/user_policy_test.go +++ b/internal/service/iam/user_policy_test.go @@ -247,14 +247,9 @@ func testAccCheckUserPolicyExists(ctx context.Context, n string, v *string) reso return fmt.Errorf("Not found: %s", n) } - userName, policyName, err := tfiam.UserPolicyParseID(rs.Primary.ID) - if err != nil { - return err - } - conn := acctest.Provider.Meta().(*conns.AWSClient).IAMClient(ctx) - output, err := tfiam.FindUserPolicyByTwoPartKey(ctx, conn, userName, policyName) + output, err := tfiam.FindUserPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes["user"], rs.Primary.Attributes[names.AttrName]) if err != nil { return err @@ -275,12 +270,7 @@ func testAccCheckUserPolicyDestroy(ctx context.Context) resource.TestCheckFunc { continue } - userName, policyName, err := tfiam.UserPolicyParseID(rs.Primary.ID) - if err != nil { - return err - } - - _, err = tfiam.FindUserPolicyByTwoPartKey(ctx, conn, userName, policyName) + _, err := tfiam.FindUserPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes["user"], rs.Primary.Attributes[names.AttrName]) if tfresource.NotFound(err) { continue diff --git a/internal/service/iam/user_ssh_key.go b/internal/service/iam/user_ssh_key.go index 3bd3b118d5e8..55bd17805a5b 100644 --- a/internal/service/iam/user_ssh_key.go +++ b/internal/service/iam/user_ssh_key.go @@ -94,7 +94,7 @@ func resourceUserSSHKeyCreate(ctx context.Context, d *schema.ResourceData, meta d.SetId(aws.ToString(output.SSHPublicKey.SSHPublicKeyId)) - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findSSHPublicKeyByThreePartKey(ctx, conn, d.Id(), d.Get("encoding").(string), username) }) diff --git a/internal/service/iam/user_tags_gen_test.go b/internal/service/iam/user_tags_gen_test.go index 6257272804c3..13b0cb375e70 100644 --- a/internal/service/iam/user_tags_gen_test.go +++ b/internal/service/iam/user_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccIAMUser_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccIAMUser_tags(t *testing.T) { func TestAccIAMUser_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccIAMUser_tags_null(t *testing.T) { func TestAccIAMUser_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccIAMUser_tags_EmptyMap(t *testing.T) { func TestAccIAMUser_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccIAMUser_tags_AddOnUpdate(t *testing.T) { func TestAccIAMUser_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccIAMUser_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMUser_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccIAMUser_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMUser_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccIAMUser_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccIAMUser_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccIAMUser_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccIAMUser_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccIAMUser_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccIAMUser_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccIAMUser_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccIAMUser_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccIAMUser_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { func TestAccIAMUser_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccIAMUser_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) func TestAccIAMUser_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccIAMUser_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMUser_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccIAMUser_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMUser_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccIAMUser_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMUser_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccIAMUser_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccIAMUser_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.User resourceName := "aws_iam_user.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckUserDestroy(ctx), diff --git a/internal/service/iam/user_test.go b/internal/service/iam/user_test.go index 2f3435b38e73..8bcd410949b3 100644 --- a/internal/service/iam/user_test.go +++ b/internal/service/iam/user_test.go @@ -612,7 +612,7 @@ func testAccCheckUserCreatesAccessKey(ctx context.Context, user *awstypes.User) } if _, err := conn.CreateAccessKey(ctx, input); err != nil { - return fmt.Errorf("error creating IAM User (%s) Access Key: %s", aws.ToString(user.UserName), err) + return fmt.Errorf("error creating IAM User (%s) Access Key: %w", aws.ToString(user.UserName), err) } return nil @@ -632,7 +632,7 @@ func testAccCheckUserCreatesLoginProfile(ctx context.Context, user *awstypes.Use } if _, err := conn.CreateLoginProfile(ctx, input); err != nil { - return fmt.Errorf("error creating IAM User (%s) Login Profile: %s", aws.ToString(user.UserName), err) + return fmt.Errorf("error creating IAM User (%s) Login Profile: %w", aws.ToString(user.UserName), err) } return nil @@ -650,17 +650,17 @@ func testAccCheckUserCreatesMFADevice(ctx context.Context, user *awstypes.User) createVirtualMFADeviceOutput, err := conn.CreateVirtualMFADevice(ctx, createVirtualMFADeviceInput) if err != nil { - return fmt.Errorf("error creating IAM User (%s) Virtual MFA Device: %s", aws.ToString(user.UserName), err) + return fmt.Errorf("error creating IAM User (%s) Virtual MFA Device: %w", aws.ToString(user.UserName), err) } secret := string(createVirtualMFADeviceOutput.VirtualMFADevice.Base32StringSeed) authenticationCode1, err := totp.GenerateCode(secret, time.Now().Add(-30*time.Second)) if err != nil { - return fmt.Errorf("error generating Virtual MFA Device authentication code 1: %s", err) + return fmt.Errorf("error generating Virtual MFA Device authentication code 1: %w", err) } authenticationCode2, err := totp.GenerateCode(secret, time.Now()) if err != nil { - return fmt.Errorf("error generating Virtual MFA Device authentication code 2: %s", err) + return fmt.Errorf("error generating Virtual MFA Device authentication code 2: %w", err) } enableVirtualMFADeviceInput := &iam.EnableMFADeviceInput{ @@ -671,7 +671,7 @@ func testAccCheckUserCreatesMFADevice(ctx context.Context, user *awstypes.User) } if _, err := conn.EnableMFADevice(ctx, enableVirtualMFADeviceInput); err != nil { - return fmt.Errorf("error enabling IAM User (%s) Virtual MFA Device: %s", aws.ToString(user.UserName), err) + return fmt.Errorf("error enabling IAM User (%s) Virtual MFA Device: %w", aws.ToString(user.UserName), err) } return nil @@ -734,7 +734,7 @@ func testAccCheckUserUploadSigningCertificate(ctx context.Context, t *testing.T, } if _, err := conn.UploadSigningCertificate(ctx, input); err != nil { - return fmt.Errorf("error uploading IAM User (%s) Signing Certificate : %s", aws.ToString(user.UserName), err) + return fmt.Errorf("error uploading IAM User (%s) Signing Certificate : %w", aws.ToString(user.UserName), err) } return nil @@ -754,18 +754,18 @@ func testAccCheckUserAttachPolicy(ctx context.Context, user *awstypes.User) reso output, err := conn.CreatePolicy(ctx, input) if err != nil { - return fmt.Errorf("externally creating IAM Policy (%s): %s", aws.ToString(user.UserName), err) + return fmt.Errorf("externally creating IAM Policy (%s): %w", aws.ToString(user.UserName), err) } - _, err = tfresource.RetryWhenNewResourceNotFound(ctx, 2*time.Minute, func() (any, error) { + _, err = tfresource.RetryWhenNewResourceNotFound(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return tfiam.FindPolicyByARN(ctx, conn, aws.ToString(output.Policy.Arn)) }, true) if err != nil { - return fmt.Errorf("waiting for external creation of IAM Policy (%s): %s", aws.ToString(user.UserName), err) + return fmt.Errorf("waiting for external creation of IAM Policy (%s): %w", aws.ToString(user.UserName), err) } if err := tfiam.AttachPolicyToUser(ctx, conn, aws.ToString(user.UserName), aws.ToString(output.Policy.Arn)); err != nil { - return fmt.Errorf("externally attaching IAM User (%s) to policy (%s): %s", aws.ToString(user.UserName), aws.ToString(output.Policy.Arn), err) + return fmt.Errorf("externally attaching IAM User (%s) to policy (%s): %w", aws.ToString(user.UserName), aws.ToString(output.Policy.Arn), err) } return nil @@ -786,14 +786,14 @@ func testAccCheckUserInlinePolicy(ctx context.Context, user *awstypes.User) reso _, err := conn.PutUserPolicy(ctx, input) if err != nil { - return fmt.Errorf("externally putting IAM User (%s) policy: %s", aws.ToString(user.UserName), err) + return fmt.Errorf("externally putting IAM User (%s) policy: %w", aws.ToString(user.UserName), err) } - _, err = tfresource.RetryWhenNotFound(ctx, 2*time.Minute, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return tfiam.FindUserPolicyByTwoPartKey(ctx, conn, aws.ToString(user.UserName), aws.ToString(user.UserName)) }) if err != nil { - return fmt.Errorf("waiting for external creation of inline IAM User Policy (%s): %s", aws.ToString(user.UserName), err) + return fmt.Errorf("waiting for external creation of inline IAM User Policy (%s): %w", aws.ToString(user.UserName), err) } return nil diff --git a/internal/service/iam/users_data_source.go b/internal/service/iam/users_data_source.go index 137dde04d0d7..fcd4075246e1 100644 --- a/internal/service/iam/users_data_source.go +++ b/internal/service/iam/users_data_source.go @@ -6,12 +6,16 @@ package iam import ( "context" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iam" + awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -19,6 +23,7 @@ import ( func dataSourceUsers() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceUsersRead, + Schema: map[string]*schema.Schema{ names.AttrARNs: { Type: schema.TypeSet, @@ -48,12 +53,21 @@ func dataSourceUsersRead(ctx context.Context, d *schema.ResourceData, meta any) conn := meta.(*conns.AWSClient).IAMClient(ctx) nameRegex := d.Get("name_regex").(string) - pathPrefix := d.Get("path_prefix").(string) + var input iam.ListUsersInput + if v, ok := d.GetOk("path_prefix"); ok { + input.PathPrefix = aws.String(v.(string)) + } + + results, err := findUsers(ctx, conn, &input, func(v *awstypes.User) bool { + if nameRegex != "" { + return regexache.MustCompile(nameRegex).MatchString(aws.ToString(v.UserName)) + } - results, err := FindUsers(ctx, conn, nameRegex, pathPrefix) + return true + }) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading IAM users: %s", err) + return sdkdiag.AppendErrorf(diags, "reading IAM Users: %s", err) } d.SetId(meta.(*conns.AWSClient).Region(ctx)) @@ -65,13 +79,29 @@ func dataSourceUsersRead(ctx context.Context, d *schema.ResourceData, meta any) arns = append(arns, aws.ToString(r.Arn)) } - if err := d.Set(names.AttrNames, nms); err != nil { - return sdkdiag.AppendErrorf(diags, "setting names: %s", err) - } + d.Set(names.AttrARNs, arns) + d.Set(names.AttrNames, nms) - if err := d.Set(names.AttrARNs, arns); err != nil { - return sdkdiag.AppendErrorf(diags, "setting arns: %s", err) + return diags +} + +func findUsers(ctx context.Context, conn *iam.Client, input *iam.ListUsersInput, filter tfslices.Predicate[*awstypes.User]) ([]awstypes.User, error) { + var output []awstypes.User + + pages := iam.NewListUsersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.Users { + if filter(&v) { + output = append(output, v) + } + } } - return diags + return output, nil } diff --git a/internal/service/iam/virtual_mfa_device.go b/internal/service/iam/virtual_mfa_device.go index ef280b512ea4..9b83c0fec23d 100644 --- a/internal/service/iam/virtual_mfa_device.go +++ b/internal/service/iam/virtual_mfa_device.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "log" - "reflect" "time" "github.com/YakDriver/regexache" @@ -16,14 +15,15 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -89,20 +89,20 @@ func resourceVirtualMFADeviceCreate(ctx context.Context, d *schema.ResourceData, conn := meta.(*conns.AWSClient).IAMClient(ctx) name := d.Get("virtual_mfa_device_name").(string) - input := &iam.CreateVirtualMFADeviceInput{ + input := iam.CreateVirtualMFADeviceInput{ Path: aws.String(d.Get(names.AttrPath).(string)), Tags: getTagsIn(ctx), VirtualMFADeviceName: aws.String(name), } - output, err := conn.CreateVirtualMFADevice(ctx, input) + output, err := conn.CreateVirtualMFADevice(ctx, &input) // Some partitions (e.g. ISO) may not support tag-on-create. partition := meta.(*conns.AWSClient).Partition(ctx) if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateVirtualMFADevice(ctx, input) + output, err = conn.CreateVirtualMFADevice(ctx, &input) } if err != nil { @@ -191,22 +191,26 @@ func resourceVirtualMFADeviceDelete(ctx context.Context, d *schema.ResourceData, conn := meta.(*conns.AWSClient).IAMClient(ctx) if v := d.Get(names.AttrUserName); v != "" { - _, err := conn.DeactivateMFADevice(ctx, &iam.DeactivateMFADeviceInput{ - UserName: aws.String(v.(string)), + input := iam.DeactivateMFADeviceInput{ SerialNumber: aws.String(d.Id()), - }) + UserName: aws.String(v.(string)), + } + _, err := conn.DeactivateMFADevice(ctx, &input) + if errs.IsA[*awstypes.NoSuchEntityException](err) { return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "deactivating IAM Virtual MFA Device (%s): %s", d.Id(), err) } } log.Printf("[INFO] Deleting IAM Virtual MFA Device: %s", d.Id()) - _, err := conn.DeleteVirtualMFADevice(ctx, &iam.DeleteVirtualMFADeviceInput{ + input := iam.DeleteVirtualMFADeviceInput{ SerialNumber: aws.String(d.Id()), - }) + } + _, err := conn.DeleteVirtualMFADevice(ctx, &input) if errs.IsA[*awstypes.NoSuchEntityException](err) { return diags @@ -220,29 +224,42 @@ func resourceVirtualMFADeviceDelete(ctx context.Context, d *schema.ResourceData, } func findVirtualMFADeviceBySerialNumber(ctx context.Context, conn *iam.Client, serialNumber string) (*awstypes.VirtualMFADevice, error) { - input := &iam.ListVirtualMFADevicesInput{} - var output awstypes.VirtualMFADevice + var input iam.ListVirtualMFADevicesInput + + return findVirtualMFADevice(ctx, conn, &input, func(v *awstypes.VirtualMFADevice) bool { + return aws.ToString(v.SerialNumber) == serialNumber + }) +} + +func findVirtualMFADevice(ctx context.Context, conn *iam.Client, input *iam.ListVirtualMFADevicesInput, filter tfslices.Predicate[*awstypes.VirtualMFADevice]) (*awstypes.VirtualMFADevice, error) { + output, err := findVirtualMFADevices(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findVirtualMFADevices(ctx context.Context, conn *iam.Client, input *iam.ListVirtualMFADevicesInput, filter tfslices.Predicate[*awstypes.VirtualMFADevice]) ([]awstypes.VirtualMFADevice, error) { + var output []awstypes.VirtualMFADevice pages := iam.NewListVirtualMFADevicesPaginator(conn, input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) + if err != nil { return nil, err } for _, v := range page.VirtualMFADevices { - if !reflect.ValueOf(v).IsZero() && aws.ToString(v.SerialNumber) == serialNumber { - output = v - break + if p := &v; !inttypes.IsZero(p) && filter(p) { + output = append(output, v) } } } - if reflect.ValueOf(output).IsZero() { - return nil, &retry.NotFoundError{} - } - - return &output, nil + return output, nil } func parseVirtualMFADeviceARN(s string) (path, name string, err error) { diff --git a/internal/service/iam/virtual_mfa_device_tags_gen_test.go b/internal/service/iam/virtual_mfa_device_tags_gen_test.go index c4a10b9887db..784925700181 100644 --- a/internal/service/iam/virtual_mfa_device_tags_gen_test.go +++ b/internal/service/iam/virtual_mfa_device_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccIAMVirtualMFADevice_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccIAMVirtualMFADevice_tags(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccIAMVirtualMFADevice_tags_null(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccIAMVirtualMFADevice_tags_EmptyMap(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccIAMVirtualMFADevice_tags_AddOnUpdate(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccIAMVirtualMFADevice_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccIAMVirtualMFADevice_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccIAMVirtualMFADevice_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_overlapping(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_updateToProviderOnly(t *testing func TestAccIAMVirtualMFADevice_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_updateToResourceOnly(t *testing func TestAccIAMVirtualMFADevice_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccIAMVirtualMFADevice_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_emptyProviderOnlyTag(t *testing func TestAccIAMVirtualMFADevice_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_nullOverlappingResourceTag(t *t func TestAccIAMVirtualMFADevice_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccIAMVirtualMFADevice_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccIAMVirtualMFADevice_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccIAMVirtualMFADevice_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccIAMVirtualMFADevice_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccIAMVirtualMFADevice_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccIAMVirtualMFADevice_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccIAMVirtualMFADevice_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccIAMVirtualMFADevice_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccIAMVirtualMFADevice_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.VirtualMFADevice resourceName := "aws_iam_virtual_mfa_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), CheckDestroy: testAccCheckVirtualMFADeviceDestroy(ctx), diff --git a/internal/service/iam/wait.go b/internal/service/iam/wait.go deleted file mode 100644 index eeb1733b9036..000000000000 --- a/internal/service/iam/wait.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go-v2/service/iam" - awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/names" -) - -const ( - // Maximum amount of time to wait for IAM changes to propagate - // This timeout should not be increased without strong consideration - // as this will negatively impact user experience when configurations - // have incorrect references or permissions. - // Reference: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency - propagationTimeout = 2 * time.Minute - - RoleStatusARNIsUniqueID = "uniqueid" - RoleStatusARNIsARN = names.AttrARN - RoleStatusNotFound = "notfound" -) - -func waitRoleARNIsNotUniqueID(ctx context.Context, conn *iam.Client, id string, role *awstypes.Role) (*awstypes.Role, error) { - if arn.IsARN(aws.ToString(role.Arn)) { - return role, nil - } - - stateConf := &retry.StateChangeConf{ - Pending: []string{RoleStatusARNIsUniqueID, RoleStatusNotFound}, - Target: []string{names.AttrARN}, - Refresh: statusRoleCreate(ctx, conn, id), - Timeout: propagationTimeout, - NotFoundChecks: 10, - ContinuousTargetOccurence: 5, - Delay: 10 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*awstypes.Role); ok { - return output, err - } - - return nil, err -} - -func statusRoleCreate(ctx context.Context, conn *iam.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { - role, err := findRoleByName(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, RoleStatusNotFound, nil - } - - if err != nil { - return nil, "", err - } - - if arn.IsARN(aws.ToString(role.Arn)) { - return role, names.AttrARN, nil - } - - return role, RoleStatusARNIsUniqueID, nil - } -} diff --git a/internal/service/identitystore/service_endpoint_resolver_gen.go b/internal/service/identitystore/service_endpoint_resolver_gen.go index 6571c9d8f6e6..9ba29fa5d83c 100644 --- a/internal/service/identitystore/service_endpoint_resolver_gen.go +++ b/internal/service/identitystore/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params identitystore.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up identitystore endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up identitystore endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/identitystore/service_endpoints_gen_test.go b/internal/service/identitystore/service_endpoints_gen_test.go index b06c2894a73e..0dd1b774d2d2 100644 --- a/internal/service/identitystore/service_endpoints_gen_test.go +++ b/internal/service/identitystore/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/identitystore/service_package_gen.go b/internal/service/identitystore/service_package_gen.go index 42075abfc204..49609eac8d81 100644 --- a/internal/service/identitystore/service_package_gen.go +++ b/internal/service/identitystore/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/identitystore" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -108,7 +107,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *identitystore.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/imagebuilder/container_recipe.go b/internal/service/imagebuilder/container_recipe.go index cb956a76a2ef..432edfbe9c1c 100644 --- a/internal/service/imagebuilder/container_recipe.go +++ b/internal/service/imagebuilder/container_recipe.go @@ -29,6 +29,8 @@ import ( // @SDKResource("aws_imagebuilder_container_recipe", name="Container Recipe") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceContainerRecipe() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceContainerRecipeCreate, @@ -36,10 +38,6 @@ func resourceContainerRecipe() *schema.Resource { UpdateWithoutTimeout: resourceContainerRecipeUpdate, DeleteWithoutTimeout: resourceContainerRecipeDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -178,7 +176,7 @@ func resourceContainerRecipe() *schema.Resource { Type: schema.TypeInt, Optional: true, ForceNew: true, - ValidateFunc: validation.IntBetween(125, 1000), + ValidateFunc: validation.IntBetween(125, 2000), }, names.AttrVolumeSize: { Type: schema.TypeInt, diff --git a/internal/service/imagebuilder/container_recipe_identity_gen_test.go b/internal/service/imagebuilder/container_recipe_identity_gen_test.go new file mode 100644 index 000000000000..c1f58c4b55fa --- /dev/null +++ b/internal/service/imagebuilder/container_recipe_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderContainerRecipe_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_container_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckContainerRecipeDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContainerRecipeExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderContainerRecipe_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_container_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderContainerRecipe_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_container_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckContainerRecipeDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContainerRecipeExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderContainerRecipe_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_container_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckContainerRecipeDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckContainerRecipeExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ContainerRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/container_recipe_test.go b/internal/service/imagebuilder/container_recipe_test.go index 699375350b50..4ffd2b3df4d9 100644 --- a/internal/service/imagebuilder/container_recipe_test.go +++ b/internal/service/imagebuilder/container_recipe_test.go @@ -743,7 +743,6 @@ data "aws_partition" "current" {} resource "aws_ecr_repository" "test" { name = %[1]q } - `, rName) } diff --git a/internal/service/imagebuilder/distribution_configuration.go b/internal/service/imagebuilder/distribution_configuration.go index 7f2824dac983..8008d986113f 100644 --- a/internal/service/imagebuilder/distribution_configuration.go +++ b/internal/service/imagebuilder/distribution_configuration.go @@ -29,6 +29,8 @@ import ( // @SDKResource("aws_imagebuilder_distribution_configuration", name="Distribution Configuration") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceDistributionConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDistributionConfigurationCreate, @@ -36,10 +38,6 @@ func resourceDistributionConfiguration() *schema.Resource { UpdateWithoutTimeout: resourceDistributionConfigurationUpdate, DeleteWithoutTimeout: resourceDistributionConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -898,7 +896,7 @@ func flattenAMIDistributionConfiguration(apiObject *awstypes.AmiDistributionConf } if v := apiObject.TargetAccountIds; v != nil { - tfMap["target_account_ids"] = aws.StringSlice(v) + tfMap["target_account_ids"] = v } return tfMap @@ -912,7 +910,7 @@ func flattenContainerDistributionConfiguration(apiObject *awstypes.ContainerDist tfMap := map[string]any{} if v := apiObject.ContainerTags; v != nil { - tfMap["container_tags"] = aws.StringSlice(v) + tfMap["container_tags"] = v } if v := apiObject.Description; v != nil { @@ -960,7 +958,7 @@ func flattenDistribution(apiObject awstypes.Distribution) map[string]any { } if v := apiObject.LicenseConfigurationArns; v != nil { - tfMap["license_configuration_arns"] = aws.StringSlice(v) + tfMap["license_configuration_arns"] = v } if v := apiObject.Region; v != nil { @@ -1000,19 +998,19 @@ func flattenLaunchPermissionConfiguration(apiObject *awstypes.LaunchPermissionCo tfMap := map[string]any{} if v := apiObject.OrganizationArns; v != nil { - tfMap["organization_arns"] = aws.StringSlice(v) + tfMap["organization_arns"] = v } if v := apiObject.OrganizationalUnitArns; v != nil { - tfMap["organizational_unit_arns"] = aws.StringSlice(v) + tfMap["organizational_unit_arns"] = v } if v := apiObject.UserGroups; v != nil { - tfMap["user_groups"] = aws.StringSlice(v) + tfMap["user_groups"] = v } if v := apiObject.UserIds; v != nil { - tfMap["user_ids"] = aws.StringSlice(v) + tfMap["user_ids"] = v } return tfMap diff --git a/internal/service/imagebuilder/distribution_configuration_identity_gen_test.go b/internal/service/imagebuilder/distribution_configuration_identity_gen_test.go new file mode 100644 index 000000000000..a56e989e11a6 --- /dev/null +++ b/internal/service/imagebuilder/distribution_configuration_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderDistributionConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_distribution_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckDistributionConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDistributionConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderDistributionConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_distribution_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderDistributionConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_distribution_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckDistributionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDistributionConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderDistributionConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_distribution_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckDistributionConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDistributionConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DistributionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/image.go b/internal/service/imagebuilder/image.go index 890243a504cd..2effd9f080dd 100644 --- a/internal/service/imagebuilder/image.go +++ b/internal/service/imagebuilder/image.go @@ -30,6 +30,8 @@ import ( // @SDKResource("aws_imagebuilder_image", name="Image") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceImage() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceImageCreate, @@ -37,10 +39,6 @@ func resourceImage() *schema.Resource { UpdateWithoutTimeout: resourceImageUpdate, DeleteWithoutTimeout: resourceImageDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), }, @@ -559,7 +557,7 @@ func flattenContainer(apiObject awstypes.Container) map[string]any { tfMap := map[string]any{} if v := apiObject.ImageUris; v != nil { - tfMap["image_uris"] = aws.StringSlice(v) + tfMap["image_uris"] = v } if v := apiObject.Region; v != nil { diff --git a/internal/service/imagebuilder/image_identity_gen_test.go b/internal/service/imagebuilder/image_identity_gen_test.go new file mode 100644 index 000000000000..05d81ace020a --- /dev/null +++ b/internal/service/imagebuilder/image_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderImage_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImageDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Image/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImageExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Image/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Image/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Image/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderImage_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Image/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Image/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Image/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Image/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Image/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Image/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderImage_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImageDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Image/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImageExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Image/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderImage_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImageDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Image/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImageExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Image/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/image_pipeline.go b/internal/service/imagebuilder/image_pipeline.go index b2f16d97fc20..cdf9379af185 100644 --- a/internal/service/imagebuilder/image_pipeline.go +++ b/internal/service/imagebuilder/image_pipeline.go @@ -29,6 +29,8 @@ import ( // @SDKResource("aws_imagebuilder_image_pipeline", name="Image Pipeline") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceImagePipeline() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceImagePipelineCreate, @@ -36,10 +38,6 @@ func resourceImagePipeline() *schema.Resource { UpdateWithoutTimeout: resourceImagePipelineUpdate, DeleteWithoutTimeout: resourceImagePipelineDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -592,7 +590,7 @@ func flattenECRConfiguration(apiObject *awstypes.EcrConfiguration) map[string]an } if v := apiObject.ContainerTags; v != nil { - tfMap["container_tags"] = aws.StringSlice(v) + tfMap["container_tags"] = v } return tfMap diff --git a/internal/service/imagebuilder/image_pipeline_identity_gen_test.go b/internal/service/imagebuilder/image_pipeline_identity_gen_test.go new file mode 100644 index 000000000000..65b1646ab780 --- /dev/null +++ b/internal/service/imagebuilder/image_pipeline_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderImagePipeline_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_pipeline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImagePipelineDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderImagePipeline_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_pipeline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderImagePipeline_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_pipeline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImagePipelineDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderImagePipeline_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_pipeline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImagePipelineDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ImagePipeline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/image_recipe.go b/internal/service/imagebuilder/image_recipe.go index d3c1730966b6..a35f0bb4210f 100644 --- a/internal/service/imagebuilder/image_recipe.go +++ b/internal/service/imagebuilder/image_recipe.go @@ -29,6 +29,8 @@ import ( // @SDKResource("aws_imagebuilder_image_recipe", name="Image Recipe") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceImageRecipe() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceImageRecipeCreate, @@ -36,10 +38,6 @@ func resourceImageRecipe() *schema.Resource { UpdateWithoutTimeout: resourceImageRecipeUpdate, DeleteWithoutTimeout: resourceImageRecipeDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -82,7 +80,7 @@ func resourceImageRecipe() *schema.Resource { Type: schema.TypeInt, Optional: true, ForceNew: true, - ValidateFunc: validation.IntBetween(100, 10000), + ValidateFunc: validation.IntBetween(100, 100000), }, names.AttrKMSKeyID: { Type: schema.TypeString, @@ -100,7 +98,7 @@ func resourceImageRecipe() *schema.Resource { Type: schema.TypeInt, Optional: true, ForceNew: true, - ValidateFunc: validation.IntBetween(125, 1000), + ValidateFunc: validation.IntBetween(125, 2000), }, names.AttrVolumeSize: { Type: schema.TypeInt, diff --git a/internal/service/imagebuilder/image_recipe_identity_gen_test.go b/internal/service/imagebuilder/image_recipe_identity_gen_test.go new file mode 100644 index 000000000000..65a17f3776ac --- /dev/null +++ b/internal/service/imagebuilder/image_recipe_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderImageRecipe_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImageRecipeDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImageRecipeExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderImageRecipe_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderImageRecipe_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImageRecipeDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImageRecipeExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderImageRecipe_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_image_recipe.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckImageRecipeDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckImageRecipeExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ImageRecipe/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/image_test.go b/internal/service/imagebuilder/image_test.go index 7185d2fb493e..6a3430b2f12a 100644 --- a/internal/service/imagebuilder/image_test.go +++ b/internal/service/imagebuilder/image_test.go @@ -733,29 +733,102 @@ resource "aws_iam_role" "test_execute" { name = join("-", [%[1]q, "execute"]) } -data "aws_iam_policy" "AWSServiceRoleForImageBuilder" { - arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/aws-service-role/AWSServiceRoleForImageBuilder" -} - -resource "aws_iam_policy" "test_execute_service_policy" { - name = join("-", [%[1]q, "execute-service"]) - policy = data.aws_iam_policy.AWSServiceRoleForImageBuilder.policy -} - -resource "aws_iam_role_policy_attachment" "test_execute_service" { - policy_arn = aws_iam_policy.test_execute_service_policy.arn - role = aws_iam_role.test_execute.name -} - resource "aws_iam_policy" "test_execute" { name = join("-", [%[1]q, "execute"]) policy = jsonencode({ Version = "2012-10-17" - Statement = [{ - Action = "ssm:SendCommand" - Effect = "Allow" - Resource = "arn:${data.aws_partition.current.partition}:ssm:${data.aws_region.current.id}::document/AWS-UpdateSSMAgent" - }] + Statement = [ + { + Sid = "EC2Lifecycle" + Effect = "Allow" + Action = [ + "ec2:CreateImage", + "ec2:CreateTags", + "ec2:DescribeInstances", + "ec2:DescribeImages", + "ec2:DescribeTags", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstanceTypeOfferings", + "ec2:RunInstances", + "ec2:StopInstances", + "ec2:TerminateInstances" + ] + Resource = "*" + }, + { + Sid = "SSMExecution" + Effect = "Allow" + Action = [ + "ssm:AddTagsToResource", + "ssm:CreateAssociation", + "ssm:DeleteAssociation", + "ssm:DescribeAssociationExecutions", + "ssm:DescribeDocument", + "ssm:DescribeInstanceAssociationsStatus", + "ssm:DescribeInstanceInformation", + "ssm:GetAutomationExecution", + "ssm:GetCommandInvocation", + "ssm:GetDocument", + "ssm:ListCommands", + "ssm:ListCommandInvocations", + "ssm:ListInventoryEntries", + "ssm:SendAutomationSignal", + "ssm:SendCommand", + "ssm:StopAutomationExecution" + ] + Resource = "*" + }, + { + Sid = "ImageBuilderCore" + Effect = "Allow" + Action = [ + "imagebuilder:GetComponent", + "imagebuilder:GetImage", + "imagebuilder:GetImageRecipe", + "imagebuilder:ListComponents", + "imagebuilder:ListImageBuildVersions", + "imagebuilder:ListImagePackages", + "imagebuilder:ListImagePipelineImages", + "imagebuilder:ListImageRecipes" + ] + Resource = "*" + }, + { + Sid = "ImageScanFindings" + Effect = "Allow" + Action = [ + "inspector2:BatchGet*", + "inspector2:Get*", + "inspector2:List*" + ] + Resource = "*" + }, + { + Sid = "CloudWatchLogging" + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "*" + }, + { + Sid = "PassRole" + Effect = "Allow" + Action = "iam:PassRole" + Resource = "*" + Condition = { + StringEquals = { + "iam:PassedToService" = [ + "ec2.amazonaws.com", + "ec2.amazonaws.com.cn", + "vmie.amazonaws.com" + ] + } + } + } + ] }) } @@ -785,8 +858,7 @@ resource "aws_imagebuilder_image" "test" { } depends_on = [ - aws_iam_role_policy_attachment.test_execute, - aws_iam_role_policy_attachment.test_execute_service + aws_iam_role_policy_attachment.test_execute ] } `, rName), diff --git a/internal/service/imagebuilder/infrastructure_configuration.go b/internal/service/imagebuilder/infrastructure_configuration.go index 9d96c8718b42..4e1425ad2700 100644 --- a/internal/service/imagebuilder/infrastructure_configuration.go +++ b/internal/service/imagebuilder/infrastructure_configuration.go @@ -29,6 +29,8 @@ import ( // @SDKResource("aws_imagebuilder_infrastructure_configuration", name="Infrastructure Configuration") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceInfrastructureConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceInfrastructureConfigurationCreate, @@ -36,10 +38,6 @@ func resourceInfrastructureConfiguration() *schema.Resource { UpdateWithoutTimeout: resourceInfrastructureConfigurationUpdate, DeleteWithoutTimeout: resourceInfrastructureConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -248,7 +246,7 @@ func resourceInfrastructureConfigurationCreate(ctx context.Context, d *schema.Re } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateInfrastructureConfiguration(ctx, input) }, func(err error) (bool, error) { @@ -393,7 +391,7 @@ func resourceInfrastructureConfigurationUpdate(ctx context.Context, d *schema.Re } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateInfrastructureConfiguration(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/imagebuilder/infrastructure_configuration_identity_gen_test.go b/internal/service/imagebuilder/infrastructure_configuration_identity_gen_test.go new file mode 100644 index 000000000000..3b4d8d21e55c --- /dev/null +++ b/internal/service/imagebuilder/infrastructure_configuration_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderInfrastructureConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_infrastructure_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckInfrastructureConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInfrastructureConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderInfrastructureConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_infrastructure_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderInfrastructureConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_infrastructure_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckInfrastructureConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInfrastructureConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderInfrastructureConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_infrastructure_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckInfrastructureConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInfrastructureConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/InfrastructureConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/lifecycle_policy.go b/internal/service/imagebuilder/lifecycle_policy.go index 037e9eb4fda0..e9ba7f1af59d 100644 --- a/internal/service/imagebuilder/lifecycle_policy.go +++ b/internal/service/imagebuilder/lifecycle_policy.go @@ -38,6 +38,7 @@ import ( // @Tags(identifierAttribute="arn") // @ArnIdentity(identityDuplicateAttributes="id") // @ArnFormat("lifecycle-policy/{name}") +// @Testing(preIdentityVersion="v5.100.0") func newLifecyclePolicyResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &lifecyclePolicyResource{}, nil } @@ -169,6 +170,10 @@ func (r *lifecyclePolicyResource) Schema(ctx context.Context, request resource.S Attributes: map[string]schema.Attribute{ "is_public": schema.BoolAttribute{ Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, }, "regions": schema.ListAttribute{ CustomType: fwtypes.ListOfStringType, @@ -311,7 +316,7 @@ func (r *lifecyclePolicyResource) Create(ctx context.Context, request resource.C input.ClientToken = aws.String(sdkid.UniqueId()) input.Tags = getTagsIn(ctx) - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateLifecyclePolicy(ctx, input) }, errCodeInvalidParameterValueException, "The provided role does not exist or does not have sufficient permissions") @@ -413,7 +418,7 @@ func (r *lifecyclePolicyResource) Update(ctx context.Context, request resource.U // Additional fields. input.ClientToken = aws.String(sdkid.UniqueId()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.UpdateLifecyclePolicy(ctx, input) }, errCodeInvalidParameterValueException, "The provided role does not exist or does not have sufficient permissions") diff --git a/internal/service/imagebuilder/lifecycle_policy_identity_gen_test.go b/internal/service/imagebuilder/lifecycle_policy_identity_gen_test.go index 8abe537151f1..01e9cbc340ad 100644 --- a/internal/service/imagebuilder/lifecycle_policy_identity_gen_test.go +++ b/internal/service/imagebuilder/lifecycle_policy_identity_gen_test.go @@ -21,10 +21,11 @@ import ( func TestAccImageBuilderLifecyclePolicy_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_imagebuilder_lifecycle_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -46,6 +47,9 @@ func TestAccImageBuilderLifecyclePolicy_Identity_Basic(t *testing.T) { tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "imagebuilder", "lifecycle-policy/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -107,7 +111,7 @@ func TestAccImageBuilderLifecyclePolicy_Identity_RegionOverride(t *testing.T) { resourceName := "aws_imagebuilder_lifecycle_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccImageBuilderLifecyclePolicy_Identity_RegionOverride(t *testing.T) { tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "imagebuilder", "lifecycle-policy/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,127 @@ func TestAccImageBuilderLifecyclePolicy_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccImageBuilderLifecyclePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccImageBuilderLifecyclePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LifecyclePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/imagebuilder/lifecycle_policy_test.go b/internal/service/imagebuilder/lifecycle_policy_test.go index 4eebdeec660f..f31c54b2c79f 100644 --- a/internal/service/imagebuilder/lifecycle_policy_test.go +++ b/internal/service/imagebuilder/lifecycle_policy_test.go @@ -11,13 +11,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/imagebuilder/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfimagebuilder "github.com/hashicorp/terraform-provider-aws/internal/service/imagebuilder" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -136,6 +131,46 @@ func TestAccImageBuilderLifecyclePolicy_policyDetails(t *testing.T) { }) } +func TestAccImageBuilderLifecyclePolicy_policyDetailsExclusionRulesAMIsIsPublic(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_imagebuilder_lifecycle_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLifecyclePolicyConfig_policyDetailsExclusionRulesAMIsIsPublic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLifecyclePolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "policy_detail.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.action.0.type", string(awstypes.LifecyclePolicyDetailActionTypeDelete)), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.action.0.include_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.action.0.include_resources.0.amis", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.action.0.include_resources.0.snapshots", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.is_public", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.regions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.last_launched.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.last_launched.0.unit", string(awstypes.LifecyclePolicyTimeUnitWeeks)), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.last_launched.0.value", "2"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.tag_map.%", "2"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.tag_map.key1", acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.exclusion_rules.0.amis.0.tag_map.key2", acctest.CtValue2), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.filter.0.type", string(awstypes.LifecyclePolicyDetailFilterTypeCount)), + resource.TestCheckResourceAttr(resourceName, "policy_detail.0.filter.0.value", "10"), + ), + }, + }, + }) +} + func TestAccImageBuilderLifecyclePolicy_resourceSelection(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_imagebuilder_lifecycle_policy.test" @@ -244,70 +279,6 @@ func TestAccImageBuilderLifecyclePolicy_disappears(t *testing.T) { }) } -func TestAccImageBuilderLifecyclePolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_imagebuilder_lifecycle_policy.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), - CheckDestroy: testAccCheckLifecyclePolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLifecyclePolicyConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLifecyclePolicyConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLifecyclePolicyConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckLifecyclePolicyExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -521,6 +492,51 @@ resource "aws_imagebuilder_lifecycle_policy" "test" { `, rName)) } +func testAccLifecyclePolicyConfig_policyDetailsExclusionRulesAMIsIsPublic(rName string) string { + return acctest.ConfigCompose(testAccLifecyclePolicyConfig_base(rName), fmt.Sprintf(` +resource "aws_imagebuilder_lifecycle_policy" "test" { + name = %[1]q + description = "Used for setting lifecycle policies" + execution_role = aws_iam_role.test.arn + resource_type = "AMI_IMAGE" + policy_detail { + action { + type = "DELETE" + include_resources { + amis = true + snapshots = true + } + } + exclusion_rules { + amis { + regions = [data.aws_region.current.region] + last_launched { + unit = "WEEKS" + value = 2 + } + tag_map = { + "key1" = "value1" + "key2" = "value2" + } + } + } + filter { + type = "COUNT" + value = "10" + } + } + resource_selection { + tag_map = { + "key1" = "value1" + "key2" = "value2" + } + } + + depends_on = [aws_iam_role_policy_attachment.test] +} +`, rName)) +} + func testAccLifecyclePolicyConfig_resourceSelection(rName string) string { return acctest.ConfigCompose( testAccLifecyclePolicyConfig_base(rName), diff --git a/internal/service/imagebuilder/service_endpoint_resolver_gen.go b/internal/service/imagebuilder/service_endpoint_resolver_gen.go index cbc53be0e4d6..36ac8e6603b1 100644 --- a/internal/service/imagebuilder/service_endpoint_resolver_gen.go +++ b/internal/service/imagebuilder/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params imagebuilder.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up imagebuilder endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up imagebuilder endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/imagebuilder/service_endpoints_gen_test.go b/internal/service/imagebuilder/service_endpoints_gen_test.go index 79e8b654872c..e292d7a86f9f 100644 --- a/internal/service/imagebuilder/service_endpoints_gen_test.go +++ b/internal/service/imagebuilder/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/imagebuilder/service_package_gen.go b/internal/service/imagebuilder/service_package_gen.go index f4c8e653a677..2f30629ac89c 100644 --- a/internal/service/imagebuilder/service_package_gen.go +++ b/internal/service/imagebuilder/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/imagebuilder" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -149,6 +148,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceDistributionConfiguration, @@ -158,6 +163,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceImage, @@ -167,6 +178,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceImagePipeline, @@ -176,6 +193,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceImageRecipe, @@ -185,6 +208,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceInfrastructureConfiguration, @@ -194,6 +223,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceWorkflow, @@ -203,6 +238,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -230,7 +271,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *imagebuilder.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/imagebuilder/sweep.go b/internal/service/imagebuilder/sweep.go index dfc0deae5fd9..dc341b44e64e 100644 --- a/internal/service/imagebuilder/sweep.go +++ b/internal/service/imagebuilder/sweep.go @@ -103,7 +103,7 @@ func sweepDistributionConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListDistributionConfigurationsInput{} @@ -144,7 +144,7 @@ func sweepImagePipelines(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListImagePipelinesInput{} @@ -185,7 +185,7 @@ func sweepImageRecipes(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListImageRecipesInput{} @@ -226,7 +226,7 @@ func sweepContainerRecipes(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListContainerRecipesInput{} @@ -267,7 +267,7 @@ func sweepImages(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListImagesInput{} @@ -308,7 +308,7 @@ func sweepInfrastructureConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListInfrastructureConfigurationsInput{} @@ -349,7 +349,7 @@ func sweepLifecyclePolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ImageBuilderClient(ctx) input := &imagebuilder.ListLifecyclePoliciesInput{} diff --git a/internal/service/imagebuilder/tags_gen.go b/internal/service/imagebuilder/tags_gen.go index 87964fef9151..48ae0b558c00 100644 --- a/internal/service/imagebuilder/tags_gen.go +++ b/internal/service/imagebuilder/tags_gen.go @@ -3,8 +3,8 @@ package imagebuilder import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/imagebuilder" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *imagebuilder.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ImageBuilderClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *imagebuilder.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *imagebuilder.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/imagebuilder/testdata/ContainerRecipe/basic/main_gen.tf b/internal/service/imagebuilder/testdata/ContainerRecipe/basic/main_gen.tf new file mode 100644 index 000000000000..98debfa705c5 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ContainerRecipe/basic/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_container_recipe" "test" { + name = var.rName + container_type = "DOCKER" + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-x86-2/x.x.x" + version = "1.0.0" + + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + dockerfile_template_data = "FROM $${imagebuilder:parentImage}\n$${imagebuilder:environments}\n$${imagebuilder:components}" + + target_repository { + repository_name = aws_ecr_repository.test.name + service = "ECR" + } +} + +resource "aws_ecr_repository" "test" { + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/ContainerRecipe/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/ContainerRecipe/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..054e2cd15bf9 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ContainerRecipe/basic_v6.3.0/main_gen.tf @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_container_recipe" "test" { + name = var.rName + container_type = "DOCKER" + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-x86-2/x.x.x" + version = "1.0.0" + + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + dockerfile_template_data = "FROM $${imagebuilder:parentImage}\n$${imagebuilder:environments}\n$${imagebuilder:components}" + + target_repository { + repository_name = aws_ecr_repository.test.name + service = "ECR" + } +} + +resource "aws_ecr_repository" "test" { + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/ContainerRecipe/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/ContainerRecipe/region_override/main_gen.tf new file mode 100644 index 000000000000..8e25d10afc4f --- /dev/null +++ b/internal/service/imagebuilder/testdata/ContainerRecipe/region_override/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_container_recipe" "test" { + region = var.region + + name = var.rName + container_type = "DOCKER" + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-x86-2/x.x.x" + version = "1.0.0" + + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + dockerfile_template_data = "FROM $${imagebuilder:parentImage}\n$${imagebuilder:environments}\n$${imagebuilder:components}" + + target_repository { + repository_name = aws_ecr_repository.test.name + service = "ECR" + } +} + +resource "aws_ecr_repository" "test" { + region = var.region + + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { + region = var.region + +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/DistributionConfiguration/basic/main_gen.tf b/internal/service/imagebuilder/testdata/DistributionConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..35b418111357 --- /dev/null +++ b/internal/service/imagebuilder/testdata/DistributionConfiguration/basic/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_distribution_configuration" "test" { + name = var.rName + + distribution { + ami_distribution_configuration { + name = "test-name-{{ imagebuilder:buildDate }}" + } + + region = data.aws_region.current.name + } +} + +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/DistributionConfiguration/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/DistributionConfiguration/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..ab590064d136 --- /dev/null +++ b/internal/service/imagebuilder/testdata/DistributionConfiguration/basic_v6.3.0/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_distribution_configuration" "test" { + name = var.rName + + distribution { + ami_distribution_configuration { + name = "test-name-{{ imagebuilder:buildDate }}" + } + + region = data.aws_region.current.name + } +} + +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/DistributionConfiguration/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/DistributionConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..a751f3699904 --- /dev/null +++ b/internal/service/imagebuilder/testdata/DistributionConfiguration/region_override/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_distribution_configuration" "test" { + region = var.region + + name = var.rName + + distribution { + ami_distribution_configuration { + name = "test-name-{{ imagebuilder:buildDate }}" + } + + region = data.aws_region.current.name + } +} + +data "aws_region" "current" { + region = var.region + +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/Image/basic/main_gen.tf b/internal/service/imagebuilder/testdata/Image/basic/main_gen.tf new file mode 100644 index 000000000000..778563811c7b --- /dev/null +++ b/internal/service/imagebuilder/testdata/Image/basic/main_gen.tf @@ -0,0 +1,132 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = true +} + +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_default_security_group" "test" { + vpc_id = aws_vpc.test.id + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + ingress { + from_port = 0 + protocol = -1 + self = true + to_port = 0 + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_iam_role" "test" { + name = var.rName + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + role = aws_iam_role.test.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilder" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" + role = aws_iam_role.test.name +} + +resource "aws_iam_instance_profile" "test" { + name = aws_iam_role.test.name + role = aws_iam_role.test.name + + depends_on = [ + aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore, + aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilder, + ] +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName + security_group_ids = [aws_default_security_group.test.id] + subnet_id = aws_subnet.test.id + + depends_on = [aws_default_route_table.test] +} + +data "aws_imagebuilder_component" "update-linux" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.region}:aws:component/update-linux/1.0.2" +} +data "aws_partition" "current" {} +data "aws_region" "current" { +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/Image/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/Image/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..9279ec3e66c4 --- /dev/null +++ b/internal/service/imagebuilder/testdata/Image/basic_v6.3.0/main_gen.tf @@ -0,0 +1,142 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = true +} + +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_default_security_group" "test" { + vpc_id = aws_vpc.test.id + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + ingress { + from_port = 0 + protocol = -1 + self = true + to_port = 0 + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_iam_role" "test" { + name = var.rName + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + role = aws_iam_role.test.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilder" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" + role = aws_iam_role.test.name +} + +resource "aws_iam_instance_profile" "test" { + name = aws_iam_role.test.name + role = aws_iam_role.test.name + + depends_on = [ + aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore, + aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilder, + ] +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName + security_group_ids = [aws_default_security_group.test.id] + subnet_id = aws_subnet.test.id + + depends_on = [aws_default_route_table.test] +} + +data "aws_imagebuilder_component" "update-linux" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.region}:aws:component/update-linux/1.0.2" +} +data "aws_partition" "current" {} +data "aws_region" "current" { +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/Image/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/Image/region_override/main_gen.tf new file mode 100644 index 000000000000..ee01852c586d --- /dev/null +++ b/internal/service/imagebuilder/testdata/Image/region_override/main_gen.tf @@ -0,0 +1,160 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image" "test" { + region = var.region + + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + region = var.region + + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + region = var.region + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = true +} + +resource "aws_default_route_table" "test" { + region = var.region + + default_route_table_id = aws_vpc.test.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_default_security_group" "test" { + region = var.region + + vpc_id = aws_vpc.test.id + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + ingress { + from_port = 0 + protocol = -1 + self = true + to_port = 0 + } +} + +resource "aws_internet_gateway" "test" { + region = var.region + + vpc_id = aws_vpc.test.id +} + +resource "aws_iam_role" "test" { + name = var.rName + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + role = aws_iam_role.test.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilder" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" + role = aws_iam_role.test.name +} + +resource "aws_iam_instance_profile" "test" { + name = aws_iam_role.test.name + role = aws_iam_role.test.name + + depends_on = [ + aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore, + aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilder, + ] +} + +resource "aws_imagebuilder_image_recipe" "test" { + region = var.region + + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + region = var.region + + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName + security_group_ids = [aws_default_security_group.test.id] + subnet_id = aws_subnet.test.id + + depends_on = [aws_default_route_table.test] +} + +data "aws_imagebuilder_component" "update-linux" { + region = var.region + + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.region}:aws:component/update-linux/1.0.2" +} +data "aws_partition" "current" {} +data "aws_region" "current" { + region = var.region + +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/ImagePipeline/basic/main_gen.tf b/internal/service/imagebuilder/testdata/ImagePipeline/basic/main_gen.tf new file mode 100644 index 000000000000..6b7c8c9d3b72 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ImagePipeline/basic/main_gen.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image_pipeline" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + name = var.rName +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/ImagePipeline/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/ImagePipeline/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..8e3692b492b9 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ImagePipeline/basic_v6.3.0/main_gen.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image_pipeline" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + name = var.rName +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/ImagePipeline/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/ImagePipeline/region_override/main_gen.tf new file mode 100644 index 000000000000..48b0bc8aff79 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ImagePipeline/region_override/main_gen.tf @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image_pipeline" "test" { + region = var.region + + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + name = var.rName +} + +resource "aws_imagebuilder_image_recipe" "test" { + region = var.region + + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + region = var.region + + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { + region = var.region + +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/ImageRecipe/basic/main_gen.tf b/internal/service/imagebuilder/testdata/ImageRecipe/basic/main_gen.tf new file mode 100644 index 000000000000..40c55a9bee87 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ImageRecipe/basic/main_gen.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = aws_imagebuilder_component.test.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_component" "test" { + data = yamlencode({ + phases = [{ + name = "build" + steps = [{ + action = "ExecuteBash" + inputs = { + commands = ["echo 'hello world'"] + } + name = "example" + onFailure = "Continue" + }] + }] + schemaVersion = 1.0 + }) + name = var.rName + platform = "Linux" + version = "1.0.0" +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/ImageRecipe/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/ImageRecipe/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..9a4dff598ddf --- /dev/null +++ b/internal/service/imagebuilder/testdata/ImageRecipe/basic_v6.3.0/main_gen.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = aws_imagebuilder_component.test.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_component" "test" { + data = yamlencode({ + phases = [{ + name = "build" + steps = [{ + action = "ExecuteBash" + inputs = { + commands = ["echo 'hello world'"] + } + name = "example" + onFailure = "Continue" + }] + }] + schemaVersion = 1.0 + }) + name = var.rName + platform = "Linux" + version = "1.0.0" +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/ImageRecipe/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/ImageRecipe/region_override/main_gen.tf new file mode 100644 index 000000000000..db756dd2cf93 --- /dev/null +++ b/internal/service/imagebuilder/testdata/ImageRecipe/region_override/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_image_recipe" "test" { + region = var.region + + component { + component_arn = aws_imagebuilder_component.test.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_component" "test" { + region = var.region + + data = yamlencode({ + phases = [{ + name = "build" + steps = [{ + action = "ExecuteBash" + inputs = { + commands = ["echo 'hello world'"] + } + name = "example" + onFailure = "Continue" + }] + }] + schemaVersion = 1.0 + }) + name = var.rName + platform = "Linux" + version = "1.0.0" +} + +data "aws_partition" "current" {} +data "aws_region" "current" { + region = var.region + +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/InfrastructureConfiguration/basic/main_gen.tf b/internal/service/imagebuilder/testdata/InfrastructureConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..f5100bcbc8b0 --- /dev/null +++ b/internal/service/imagebuilder/testdata/InfrastructureConfiguration/basic/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/InfrastructureConfiguration/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/InfrastructureConfiguration/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..160fba4374ec --- /dev/null +++ b/internal/service/imagebuilder/testdata/InfrastructureConfiguration/basic_v6.3.0/main_gen.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/InfrastructureConfiguration/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/InfrastructureConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..7606066db215 --- /dev/null +++ b/internal/service/imagebuilder/testdata/InfrastructureConfiguration/region_override/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + region = var.region + + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/LifecyclePolicy/basic_v5.100.0/main_gen.tf b/internal/service/imagebuilder/testdata/LifecyclePolicy/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..529fbaaae67e --- /dev/null +++ b/internal/service/imagebuilder/testdata/LifecyclePolicy/basic_v5.100.0/main_gen.tf @@ -0,0 +1,67 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_lifecycle_policy" "test" { + name = var.rName + description = "Used for setting lifecycle policies" + execution_role = aws_iam_role.test.arn + resource_type = "AMI_IMAGE" + policy_detail { + action { + type = "DELETE" + } + filter { + type = "AGE" + value = 6 + retain_at_least = 10 + unit = "YEARS" + } + } + resource_selection { + tag_map = { + "key1" = "value1" + "key2" = "value2" + } + } + + depends_on = [aws_iam_role_policy_attachment.test] +} + +# testAccLifecyclePolicyConfig_base + +resource "aws_iam_role" "test" { + name = var.rName + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "imagebuilder.${data.aws_partition.current.dns_suffix}" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/EC2ImageBuilderLifecycleExecutionPolicy" + role = aws_iam_role.test.name +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/LifecyclePolicy/basic_v6.0.0/main_gen.tf b/internal/service/imagebuilder/testdata/LifecyclePolicy/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..58148054f949 --- /dev/null +++ b/internal/service/imagebuilder/testdata/LifecyclePolicy/basic_v6.0.0/main_gen.tf @@ -0,0 +1,67 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_lifecycle_policy" "test" { + name = var.rName + description = "Used for setting lifecycle policies" + execution_role = aws_iam_role.test.arn + resource_type = "AMI_IMAGE" + policy_detail { + action { + type = "DELETE" + } + filter { + type = "AGE" + value = 6 + retain_at_least = 10 + unit = "YEARS" + } + } + resource_selection { + tag_map = { + "key1" = "value1" + "key2" = "value2" + } + } + + depends_on = [aws_iam_role_policy_attachment.test] +} + +# testAccLifecyclePolicyConfig_base + +resource "aws_iam_role" "test" { + name = var.rName + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "imagebuilder.${data.aws_partition.current.dns_suffix}" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/EC2ImageBuilderLifecycleExecutionPolicy" + role = aws_iam_role.test.name +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/Workflow/basic/main_gen.tf b/internal/service/imagebuilder/testdata/Workflow/basic/main_gen.tf new file mode 100644 index 000000000000..022ff2de4181 --- /dev/null +++ b/internal/service/imagebuilder/testdata/Workflow/basic/main_gen.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_workflow" "test" { + name = var.rName + version = "1.0.0" + type = "TEST" + + data = <<-EOT + name: test-image + description: Workflow to test an image + schemaVersion: 1.0 + + parameters: + - name: waitForActionAtEnd + type: boolean + + steps: + - name: LaunchTestInstance + action: LaunchInstance + onFailure: Abort + inputs: + waitFor: "ssmAgent" + + - name: TerminateTestInstance + action: TerminateInstance + onFailure: Continue + inputs: + instanceId.$: "$.stepOutputs.LaunchTestInstance.instanceId" + + - name: WaitForActionAtEnd + action: WaitForAction + if: + booleanEquals: true + value: "$.parameters.waitForActionAtEnd" + EOT +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/Workflow/basic_v6.3.0/main_gen.tf b/internal/service/imagebuilder/testdata/Workflow/basic_v6.3.0/main_gen.tf new file mode 100644 index 000000000000..228e838ef737 --- /dev/null +++ b/internal/service/imagebuilder/testdata/Workflow/basic_v6.3.0/main_gen.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_workflow" "test" { + name = var.rName + version = "1.0.0" + type = "TEST" + + data = <<-EOT + name: test-image + description: Workflow to test an image + schemaVersion: 1.0 + + parameters: + - name: waitForActionAtEnd + type: boolean + + steps: + - name: LaunchTestInstance + action: LaunchInstance + onFailure: Abort + inputs: + waitFor: "ssmAgent" + + - name: TerminateTestInstance + action: TerminateInstance + onFailure: Continue + inputs: + instanceId.$: "$.stepOutputs.LaunchTestInstance.instanceId" + + - name: WaitForActionAtEnd + action: WaitForAction + if: + booleanEquals: true + value: "$.parameters.waitForActionAtEnd" + EOT +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.3.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/imagebuilder/testdata/Workflow/region_override/main_gen.tf b/internal/service/imagebuilder/testdata/Workflow/region_override/main_gen.tf new file mode 100644 index 000000000000..ea6372b4d816 --- /dev/null +++ b/internal/service/imagebuilder/testdata/Workflow/region_override/main_gen.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_imagebuilder_workflow" "test" { + region = var.region + + name = var.rName + version = "1.0.0" + type = "TEST" + + data = <<-EOT + name: test-image + description: Workflow to test an image + schemaVersion: 1.0 + + parameters: + - name: waitForActionAtEnd + type: boolean + + steps: + - name: LaunchTestInstance + action: LaunchInstance + onFailure: Abort + inputs: + waitFor: "ssmAgent" + + - name: TerminateTestInstance + action: TerminateInstance + onFailure: Continue + inputs: + instanceId.$: "$.stepOutputs.LaunchTestInstance.instanceId" + + - name: WaitForActionAtEnd + action: WaitForAction + if: + booleanEquals: true + value: "$.parameters.waitForActionAtEnd" + EOT +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/imagebuilder/testdata/tmpl/container_recipe_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/container_recipe_tags.gtpl new file mode 100644 index 000000000000..d1aeaee08869 --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/container_recipe_tags.gtpl @@ -0,0 +1,29 @@ +resource "aws_imagebuilder_container_recipe" "test" { +{{- template "region" }} + name = var.rName + container_type = "DOCKER" + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-x86-2/x.x.x" + version = "1.0.0" + + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + dockerfile_template_data = "FROM $${imagebuilder:parentImage}\n$${imagebuilder:environments}\n$${imagebuilder:components}" + + target_repository { + repository_name = aws_ecr_repository.test.name + service = "ECR" + } +{{- template "tags" }} +} + +resource "aws_ecr_repository" "test" { +{{- template "region" }} + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +{{- template "region" }} +} diff --git a/internal/service/imagebuilder/testdata/tmpl/distribution_configuration_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/distribution_configuration_tags.gtpl new file mode 100644 index 000000000000..1cc2f81f0097 --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/distribution_configuration_tags.gtpl @@ -0,0 +1,17 @@ +resource "aws_imagebuilder_distribution_configuration" "test" { +{{- template "region" }} + name = var.rName + + distribution { + ami_distribution_configuration { + name = "test-name-{{`{{ imagebuilder:buildDate }}`}}" + } + + region = data.aws_region.current.name + } +{{- template "tags" }} +} + +data "aws_region" "current" { +{{- template "region" }} +} diff --git a/internal/service/imagebuilder/testdata/tmpl/image_pipeline_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/image_pipeline_tags.gtpl new file mode 100644 index 000000000000..1d40aabc66d4 --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/image_pipeline_tags.gtpl @@ -0,0 +1,48 @@ +resource "aws_imagebuilder_image_pipeline" "test" { +{{- template "region" }} + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + name = var.rName +{{- template "tags" }} +} + +resource "aws_imagebuilder_image_recipe" "test" { +{{- template "region" }} + component { + component_arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/x.x.x" + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { +{{- template "region" }} + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +{{- template "region" }} +} diff --git a/internal/service/imagebuilder/testdata/tmpl/image_recipe_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/image_recipe_tags.gtpl new file mode 100644 index 000000000000..bfc06ebea9c2 --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/image_recipe_tags.gtpl @@ -0,0 +1,37 @@ +resource "aws_imagebuilder_image_recipe" "test" { +{{- template "region" }} + component { + component_arn = aws_imagebuilder_component.test.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +{{- template "tags" }} +} + +resource "aws_imagebuilder_component" "test" { +{{- template "region" }} + data = yamlencode({ + phases = [{ + name = "build" + steps = [{ + action = "ExecuteBash" + inputs = { + commands = ["echo 'hello world'"] + } + name = "example" + onFailure = "Continue" + }] + }] + schemaVersion = 1.0 + }) + name = var.rName + platform = "Linux" + version = "1.0.0" +} + +data "aws_partition" "current" {} +data "aws_region" "current" { +{{- template "region" }} +} diff --git a/internal/service/imagebuilder/testdata/tmpl/image_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/image_tags.gtpl new file mode 100644 index 000000000000..04f2c418afdb --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/image_tags.gtpl @@ -0,0 +1,120 @@ +resource "aws_imagebuilder_image" "test" { +{{- template "region" }} + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +{{- template "tags" }} +} + +{{ template "acctest.ConfigAvailableAZsNoOptInDefaultExclude" }} + +resource "aws_vpc" "test" { +{{- template "region" }} + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { +{{- template "region" }} + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = true +} + +resource "aws_default_route_table" "test" { +{{- template "region" }} + default_route_table_id = aws_vpc.test.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_default_security_group" "test" { +{{- template "region" }} + vpc_id = aws_vpc.test.id + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + ingress { + from_port = 0 + protocol = -1 + self = true + to_port = 0 + } +} + +resource "aws_internet_gateway" "test" { +{{- template "region" }} + vpc_id = aws_vpc.test.id +} + +resource "aws_iam_role" "test" { + name = var.rName + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + role = aws_iam_role.test.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilder" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" + role = aws_iam_role.test.name +} + +resource "aws_iam_instance_profile" "test" { + name = aws_iam_role.test.name + role = aws_iam_role.test.name + + depends_on = [ + aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore, + aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilder, + ] +} + +resource "aws_imagebuilder_image_recipe" "test" { +{{- template "region" }} + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + name = var.rName + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { +{{- template "region" }} + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName + security_group_ids = [aws_default_security_group.test.id] + subnet_id = aws_subnet.test.id + + depends_on = [aws_default_route_table.test] +} + +data "aws_imagebuilder_component" "update-linux" { +{{- template "region" }} + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.region}:aws:component/update-linux/1.0.2" +} +data "aws_partition" "current" {} +data "aws_region" "current" { +{{- template "region" }} +} + diff --git a/internal/service/imagebuilder/testdata/tmpl/infrastructure_configuration_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/infrastructure_configuration_tags.gtpl new file mode 100644 index 000000000000..fa546c89ce2b --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/infrastructure_configuration_tags.gtpl @@ -0,0 +1,25 @@ +resource "aws_imagebuilder_infrastructure_configuration" "test" { +{{- template "region" }} + instance_profile_name = aws_iam_instance_profile.test.name + name = var.rName +{{- template "tags" }} +} + +resource "aws_iam_instance_profile" "test" { + name = var.rName + role = aws_iam_role.test.name +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + }] + }) + name = var.rName +} diff --git a/internal/service/imagebuilder/testdata/tmpl/workflow_tags.gtpl b/internal/service/imagebuilder/testdata/tmpl/workflow_tags.gtpl new file mode 100644 index 000000000000..2631e37c32ae --- /dev/null +++ b/internal/service/imagebuilder/testdata/tmpl/workflow_tags.gtpl @@ -0,0 +1,36 @@ +resource "aws_imagebuilder_workflow" "test" { +{{- template "region" }} + name = var.rName + version = "1.0.0" + type = "TEST" + + data = <<-EOT + name: test-image + description: Workflow to test an image + schemaVersion: 1.0 + + parameters: + - name: waitForActionAtEnd + type: boolean + + steps: + - name: LaunchTestInstance + action: LaunchInstance + onFailure: Abort + inputs: + waitFor: "ssmAgent" + + - name: TerminateTestInstance + action: TerminateInstance + onFailure: Continue + inputs: + instanceId.$: "$.stepOutputs.LaunchTestInstance.instanceId" + + - name: WaitForActionAtEnd + action: WaitForAction + if: + booleanEquals: true + value: "$.parameters.waitForActionAtEnd" + EOT +{{- template "tags" }} +} diff --git a/internal/service/imagebuilder/workflow.go b/internal/service/imagebuilder/workflow.go index 943d5d4e951b..f493bfa0da3a 100644 --- a/internal/service/imagebuilder/workflow.go +++ b/internal/service/imagebuilder/workflow.go @@ -27,6 +27,8 @@ import ( // @SDKResource("aws_imagebuilder_workflow", name="Workflow") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.3.0") func resourceWorkflow() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkflowCreate, @@ -34,10 +36,6 @@ func resourceWorkflow() *schema.Resource { UpdateWithoutTimeout: resourceWorkflowUpdate, DeleteWithoutTimeout: resourceWorkflowDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/imagebuilder/workflow_identity_gen_test.go b/internal/service/imagebuilder/workflow_identity_gen_test.go new file mode 100644 index 000000000000..8df55bf645f6 --- /dev/null +++ b/internal/service/imagebuilder/workflow_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package imagebuilder_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccImageBuilderWorkflow_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_workflow.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckWorkflowDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkflowExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccImageBuilderWorkflow_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_workflow.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderWorkflow_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_workflow.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckWorkflowDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkflowExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.3.0 +func TestAccImageBuilderWorkflow_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_imagebuilder_workflow.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + CheckDestroy: testAccCheckWorkflowDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic_v6.3.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkflowExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Workflow/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/inspector/assessment_target.go b/internal/service/inspector/assessment_target.go index f0114f1397cd..89f8290c5299 100644 --- a/internal/service/inspector/assessment_target.go +++ b/internal/service/inspector/assessment_target.go @@ -22,15 +22,15 @@ import ( ) // @SDKResource("aws_inspector_assessment_target", name="Assessment Target") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/inspector/types;types.AssessmentTarget") +// @Testing(preIdentityVersion="v6.4.0") func ResourceAssessmentTarget() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAssessmentTargetCreate, ReadWithoutTimeout: resourceAssessmentTargetRead, UpdateWithoutTimeout: resourceAssessmentTargetUpdate, DeleteWithoutTimeout: resourceAssessmentTargetDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, Schema: map[string]*schema.Schema{ names.AttrName: { @@ -125,22 +125,19 @@ func resourceAssessmentTargetDelete(ctx context.Context, d *schema.ResourceData, input := &inspector.DeleteAssessmentTargetInput{ AssessmentTargetArn: aws.String(d.Id()), } - err := retry.RetryContext(ctx, 60*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 60*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.DeleteAssessmentTarget(ctx, input) if errs.IsA[*awstypes.AssessmentRunInProgressException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteAssessmentTarget(ctx, input) - } if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Inspector Classic Assessment Target: %s", err) } diff --git a/internal/service/inspector/assessment_target_identity_gen_test.go b/internal/service/inspector/assessment_target_identity_gen_test.go new file mode 100644 index 000000000000..15f91cc01289 --- /dev/null +++ b/internal/service/inspector/assessment_target_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package inspector_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/inspector/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccInspectorAssessmentTarget_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.AssessmentTarget + resourceName := "aws_inspector_assessment_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssessmentTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccInspectorAssessmentTarget_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_inspector_assessment_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccInspectorAssessmentTarget_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.AssessmentTarget + resourceName := "aws_inspector_assessment_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssessmentTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccInspectorAssessmentTarget_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.AssessmentTarget + resourceName := "aws_inspector_assessment_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssessmentTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/inspector/assessment_target_test.go b/internal/service/inspector/assessment_target_test.go index f052327efb47..f520be358360 100644 --- a/internal/service/inspector/assessment_target_test.go +++ b/internal/service/inspector/assessment_target_test.go @@ -34,12 +34,12 @@ func TestAccInspectorAssessmentTarget_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTargetAssessmentDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTargetConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget1), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget1), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "inspector", regexache.MustCompile(`target/.+`)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "resource_group_arn", ""), @@ -64,12 +64,12 @@ func TestAccInspectorAssessmentTarget_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTargetAssessmentDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTargetConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget1), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget1), testAccCheckTargetDisappears(ctx, &assessmentTarget1), ), ExpectNonEmptyPlan: true, @@ -89,12 +89,12 @@ func TestAccInspectorAssessmentTarget_name(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTargetAssessmentDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTargetConfig_basic(rName1), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget1), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget1), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName1), ), }, @@ -106,7 +106,7 @@ func TestAccInspectorAssessmentTarget_name(t *testing.T) { { Config: testAccAssessmentTargetConfig_basic(rName2), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget2), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget2), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName2), ), }, @@ -126,12 +126,12 @@ func TestAccInspectorAssessmentTarget_resourceGroupARN(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTargetAssessmentDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTargetDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTargetConfig_resourceGroupARN(rName, inspectorResourceGroupResourceName1), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget1), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget1), resource.TestCheckResourceAttrPair(resourceName, "resource_group_arn", inspectorResourceGroupResourceName1, names.AttrARN), ), }, @@ -143,21 +143,21 @@ func TestAccInspectorAssessmentTarget_resourceGroupARN(t *testing.T) { { Config: testAccAssessmentTargetConfig_resourceGroupARN(rName, inspectorResourceGroupResourceName2), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget2), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget2), resource.TestCheckResourceAttrPair(resourceName, "resource_group_arn", inspectorResourceGroupResourceName2, names.AttrARN), ), }, { Config: testAccAssessmentTargetConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget3), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget3), resource.TestCheckResourceAttr(resourceName, "resource_group_arn", ""), ), }, { Config: testAccAssessmentTargetConfig_resourceGroupARN(rName, inspectorResourceGroupResourceName1), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetExists(ctx, resourceName, &assessmentTarget4), + testAccCheckAssessmentTargetExists(ctx, resourceName, &assessmentTarget4), resource.TestCheckResourceAttrPair(resourceName, "resource_group_arn", inspectorResourceGroupResourceName1, names.AttrARN), ), }, @@ -165,7 +165,7 @@ func TestAccInspectorAssessmentTarget_resourceGroupARN(t *testing.T) { }) } -func testAccCheckTargetAssessmentDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckAssessmentTargetDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).InspectorClient(ctx) @@ -189,7 +189,7 @@ func testAccCheckTargetAssessmentDestroy(ctx context.Context) resource.TestCheck } } -func testAccCheckTargetExists(ctx context.Context, name string, target *awstypes.AssessmentTarget) resource.TestCheckFunc { +func testAccCheckAssessmentTargetExists(ctx context.Context, name string, target *awstypes.AssessmentTarget) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { diff --git a/internal/service/inspector/assessment_template.go b/internal/service/inspector/assessment_template.go index c495f9729486..7ea0d6151927 100644 --- a/internal/service/inspector/assessment_template.go +++ b/internal/service/inspector/assessment_template.go @@ -30,6 +30,9 @@ const ( ) // @SDKResource("aws_inspector_assessment_template", name="Assessment Template") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/inspector/types;types.AssessmentTemplate") +// @Testing(preIdentityVersion="v6.4.0") // @Tags(identifierAttribute="id") func ResourceAssessmentTemplate() *schema.Resource { return &schema.Resource{ @@ -38,10 +41,6 @@ func ResourceAssessmentTemplate() *schema.Resource { UpdateWithoutTimeout: resourceAssessmentTemplateUpdate, DeleteWithoutTimeout: resourceAssessmentTemplateDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/inspector/assessment_template_identity_gen_test.go b/internal/service/inspector/assessment_template_identity_gen_test.go new file mode 100644 index 000000000000..14745231cb3b --- /dev/null +++ b/internal/service/inspector/assessment_template_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package inspector_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/inspector/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccInspectorAssessmentTemplate_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.AssessmentTemplate + resourceName := "aws_inspector_assessment_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccInspectorAssessmentTemplate_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_inspector_assessment_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccInspectorAssessmentTemplate_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.AssessmentTemplate + resourceName := "aws_inspector_assessment_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccInspectorAssessmentTemplate_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.AssessmentTemplate + resourceName := "aws_inspector_assessment_template.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AssessmentTemplate/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/inspector/assessment_template_test.go b/internal/service/inspector/assessment_template_test.go index 88fcf5923e88..d4f9bca3ffe5 100644 --- a/internal/service/inspector/assessment_template_test.go +++ b/internal/service/inspector/assessment_template_test.go @@ -34,12 +34,12 @@ func TestAccInspectorAssessmentTemplate_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTemplateDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTemplateConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "inspector", regexache.MustCompile(`target/.+/template/.+`)), resource.TestCheckResourceAttr(resourceName, names.AttrDuration, "3600"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -67,12 +67,12 @@ func TestAccInspectorAssessmentTemplate_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTemplateDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTemplateConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), testAccCheckTemplateDisappears(ctx, &v), ), ExpectNonEmptyPlan: true, @@ -91,12 +91,12 @@ func TestAccInspectorAssessmentTemplate_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTemplateDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTemplateConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -109,7 +109,7 @@ func TestAccInspectorAssessmentTemplate_tags(t *testing.T) { { Config: testAccAssessmentTemplateConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -118,7 +118,7 @@ func TestAccInspectorAssessmentTemplate_tags(t *testing.T) { { Config: testAccAssessmentTemplateConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -126,7 +126,7 @@ func TestAccInspectorAssessmentTemplate_tags(t *testing.T) { { Config: testAccAssessmentTemplateConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), ), }, @@ -148,12 +148,12 @@ func TestAccInspectorAssessmentTemplate_eventSubscription(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTemplateDestroy(ctx), + CheckDestroy: testAccCheckAssessmentTemplateDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAssessmentTemplateConfig_eventSubscription(rName, event1), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "event_subscription.#", "1"), resource.TestCheckResourceAttr(resourceName, "event_subscription.0.event", event1), ), @@ -166,7 +166,7 @@ func TestAccInspectorAssessmentTemplate_eventSubscription(t *testing.T) { { Config: testAccAssessmentTemplateConfig_eventSubscription(rName, event1Updated), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "event_subscription.#", "1"), resource.TestCheckResourceAttr(resourceName, "event_subscription.0.event", event1Updated), ), @@ -179,7 +179,7 @@ func TestAccInspectorAssessmentTemplate_eventSubscription(t *testing.T) { { Config: testAccAssessmentTemplateConfig_eventSubscriptionMultiple(rName, event1, event2), Check: resource.ComposeTestCheckFunc( - testAccCheckTemplateExists(ctx, resourceName, &v), + testAccCheckAssessmentTemplateExists(ctx, resourceName, &v), resource.TestCheckResourceAttr(resourceName, "event_subscription.#", "2"), ), }, @@ -192,7 +192,7 @@ func TestAccInspectorAssessmentTemplate_eventSubscription(t *testing.T) { }) } -func testAccCheckTemplateDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckAssessmentTemplateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).InspectorClient(ctx) @@ -228,7 +228,7 @@ func testAccCheckTemplateDisappears(ctx context.Context, v *awstypes.AssessmentT } } -func testAccCheckTemplateExists(ctx context.Context, name string, v *awstypes.AssessmentTemplate) resource.TestCheckFunc { +func testAccCheckAssessmentTemplateExists(ctx context.Context, name string, v *awstypes.AssessmentTemplate) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { diff --git a/internal/service/inspector/generate.go b/internal/service/inspector/generate.go index 9d8fab53ce59..ebe960af77e9 100644 --- a/internal/service/inspector/generate.go +++ b/internal/service/inspector/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package inspector diff --git a/internal/service/inspector/resource_group.go b/internal/service/inspector/resource_group.go index 1d11f253a550..c487f93f2d4b 100644 --- a/internal/service/inspector/resource_group.go +++ b/internal/service/inspector/resource_group.go @@ -18,6 +18,10 @@ import ( ) // @SDKResource("aws_inspector_resource_group", name="Resource Group") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/inspector/types;types.ResourceGroup") +// @Testing(preIdentityVersion="v6.4.0") +// @Testing(checkDestroyNoop=true) func ResourceResourceGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceResourceGroupCreate, diff --git a/internal/service/inspector/resource_group_identity_gen_test.go b/internal/service/inspector/resource_group_identity_gen_test.go new file mode 100644 index 000000000000..3c8da2179d91 --- /dev/null +++ b/internal/service/inspector/resource_group_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package inspector_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/inspector/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccInspectorResourceGroup_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.ResourceGroup + resourceName := "aws_inspector_resource_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourceGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccInspectorResourceGroup_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_inspector_resource_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccInspectorResourceGroup_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.ResourceGroup + resourceName := "aws_inspector_resource_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourceGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccInspectorResourceGroup_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.ResourceGroup + resourceName := "aws_inspector_resource_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.InspectorServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourceGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ResourceGroup/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/inspector/service_endpoint_resolver_gen.go b/internal/service/inspector/service_endpoint_resolver_gen.go index b896bf2eb743..6147438161a8 100644 --- a/internal/service/inspector/service_endpoint_resolver_gen.go +++ b/internal/service/inspector/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params inspector.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up inspector endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up inspector endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/inspector/service_endpoints_gen_test.go b/internal/service/inspector/service_endpoints_gen_test.go index 0cbbdfff0601..2a3bf8bd941f 100644 --- a/internal/service/inspector/service_endpoints_gen_test.go +++ b/internal/service/inspector/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/inspector/service_package_gen.go b/internal/service/inspector/service_package_gen.go index 381074133196..7edee03e7f3b 100644 --- a/internal/service/inspector/service_package_gen.go +++ b/internal/service/inspector/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/inspector" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -44,6 +43,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_inspector_assessment_target", Name: "Assessment Target", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: ResourceAssessmentTemplate, @@ -53,12 +58,24 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: ResourceResourceGroup, TypeName: "aws_inspector_resource_group", Name: "Resource Group", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -86,7 +103,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *inspector.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/inspector/tags_gen.go b/internal/service/inspector/tags_gen.go index ac5358a297e0..bbd4fbd37865 100644 --- a/internal/service/inspector/tags_gen.go +++ b/internal/service/inspector/tags_gen.go @@ -4,6 +4,7 @@ package inspector import ( "context" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/inspector" awstypes "github.com/aws/aws-sdk-go-v2/service/inspector/types" @@ -23,7 +24,7 @@ func listTags(ctx context.Context, conn *inspector.Client, identifier string, op output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -35,7 +36,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).InspectorClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { diff --git a/internal/service/inspector/testdata/AssessmentTarget/basic/main_gen.tf b/internal/service/inspector/testdata/AssessmentTarget/basic/main_gen.tf new file mode 100644 index 000000000000..a94de0c41c54 --- /dev/null +++ b/internal/service/inspector/testdata/AssessmentTarget/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_inspector_assessment_target" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/inspector/testdata/AssessmentTarget/basic_v6.4.0/main_gen.tf b/internal/service/inspector/testdata/AssessmentTarget/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..112463e6cb9e --- /dev/null +++ b/internal/service/inspector/testdata/AssessmentTarget/basic_v6.4.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_inspector_assessment_target" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/inspector/testdata/AssessmentTarget/region_override/main_gen.tf b/internal/service/inspector/testdata/AssessmentTarget/region_override/main_gen.tf new file mode 100644 index 000000000000..8a8e7d717149 --- /dev/null +++ b/internal/service/inspector/testdata/AssessmentTarget/region_override/main_gen.tf @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_inspector_assessment_target" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/inspector/testdata/AssessmentTemplate/basic/main_gen.tf b/internal/service/inspector/testdata/AssessmentTemplate/basic/main_gen.tf new file mode 100644 index 000000000000..00b4982c2b7f --- /dev/null +++ b/internal/service/inspector/testdata/AssessmentTemplate/basic/main_gen.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_inspector_rules_packages" "available" { +} + +resource "aws_inspector_resource_group" "test" { + tags = { + Name = var.rName + } +} + +resource "aws_inspector_assessment_target" "test" { + name = var.rName + resource_group_arn = aws_inspector_resource_group.test.arn +} + +resource "aws_inspector_assessment_template" "test" { + name = var.rName + target_arn = aws_inspector_assessment_target.test.arn + duration = 3600 + + rules_package_arns = data.aws_inspector_rules_packages.available.arns +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/inspector/testdata/AssessmentTemplate/basic_v6.4.0/main_gen.tf b/internal/service/inspector/testdata/AssessmentTemplate/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..f8f2faa64c36 --- /dev/null +++ b/internal/service/inspector/testdata/AssessmentTemplate/basic_v6.4.0/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_inspector_rules_packages" "available" { +} + +resource "aws_inspector_resource_group" "test" { + tags = { + Name = var.rName + } +} + +resource "aws_inspector_assessment_target" "test" { + name = var.rName + resource_group_arn = aws_inspector_resource_group.test.arn +} + +resource "aws_inspector_assessment_template" "test" { + name = var.rName + target_arn = aws_inspector_assessment_target.test.arn + duration = 3600 + + rules_package_arns = data.aws_inspector_rules_packages.available.arns +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/inspector/testdata/AssessmentTemplate/region_override/main_gen.tf b/internal/service/inspector/testdata/AssessmentTemplate/region_override/main_gen.tf new file mode 100644 index 000000000000..a88a2157e62b --- /dev/null +++ b/internal/service/inspector/testdata/AssessmentTemplate/region_override/main_gen.tf @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_inspector_rules_packages" "available" { + region = var.region + +} + +resource "aws_inspector_resource_group" "test" { + region = var.region + + tags = { + Name = var.rName + } +} + +resource "aws_inspector_assessment_target" "test" { + region = var.region + + name = var.rName + resource_group_arn = aws_inspector_resource_group.test.arn +} + +resource "aws_inspector_assessment_template" "test" { + region = var.region + + name = var.rName + target_arn = aws_inspector_assessment_target.test.arn + duration = 3600 + + rules_package_arns = data.aws_inspector_rules_packages.available.arns +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/inspector/testdata/ResourceGroup/basic/main_gen.tf b/internal/service/inspector/testdata/ResourceGroup/basic/main_gen.tf new file mode 100644 index 000000000000..5a92de04efd2 --- /dev/null +++ b/internal/service/inspector/testdata/ResourceGroup/basic/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_inspector_resource_group" "test" { + tags = { + Name = var.rName + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/inspector/testdata/ResourceGroup/basic_v6.4.0/main_gen.tf b/internal/service/inspector/testdata/ResourceGroup/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..899309269c50 --- /dev/null +++ b/internal/service/inspector/testdata/ResourceGroup/basic_v6.4.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_inspector_resource_group" "test" { + tags = { + Name = var.rName + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/inspector/testdata/ResourceGroup/region_override/main_gen.tf b/internal/service/inspector/testdata/ResourceGroup/region_override/main_gen.tf new file mode 100644 index 000000000000..537c9aee1720 --- /dev/null +++ b/internal/service/inspector/testdata/ResourceGroup/region_override/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_inspector_resource_group" "test" { + region = var.region + + tags = { + Name = var.rName + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/inspector/testdata/tmpl/assessment_target_tags.gtpl b/internal/service/inspector/testdata/tmpl/assessment_target_tags.gtpl new file mode 100644 index 000000000000..def55e99400f --- /dev/null +++ b/internal/service/inspector/testdata/tmpl/assessment_target_tags.gtpl @@ -0,0 +1,5 @@ +resource "aws_inspector_assessment_target" "test" { +{{- template "region" }} + name = var.rName +{{- template "tags" }} +} diff --git a/internal/service/inspector/testdata/tmpl/assessment_template_tags.gtpl b/internal/service/inspector/testdata/tmpl/assessment_template_tags.gtpl new file mode 100644 index 000000000000..e9c6f5059224 --- /dev/null +++ b/internal/service/inspector/testdata/tmpl/assessment_template_tags.gtpl @@ -0,0 +1,26 @@ +data "aws_inspector_rules_packages" "available" { +{{- template "region" }} +} + +resource "aws_inspector_resource_group" "test" { +{{- template "region" }} + tags = { + Name = var.rName + } +} + +resource "aws_inspector_assessment_target" "test" { +{{- template "region" }} + name = var.rName + resource_group_arn = aws_inspector_resource_group.test.arn +} + +resource "aws_inspector_assessment_template" "test" { +{{- template "region" }} + name = var.rName + target_arn = aws_inspector_assessment_target.test.arn + duration = 3600 + + rules_package_arns = data.aws_inspector_rules_packages.available.arns +{{- template "tags" }} +} diff --git a/internal/service/inspector/testdata/tmpl/resource_group_tags.gtpl b/internal/service/inspector/testdata/tmpl/resource_group_tags.gtpl new file mode 100644 index 000000000000..a7e08bc42ce3 --- /dev/null +++ b/internal/service/inspector/testdata/tmpl/resource_group_tags.gtpl @@ -0,0 +1,7 @@ +resource "aws_inspector_resource_group" "test" { +{{- template "region" }} + tags = { + Name = var.rName + } +{{- template "tags" }} +} diff --git a/internal/service/inspector2/enabler.go b/internal/service/inspector2/enabler.go index 3886d2e93598..f0f0d26280e6 100644 --- a/internal/service/inspector2/enabler.go +++ b/internal/service/inspector2/enabler.go @@ -48,6 +48,10 @@ func ResourceEnabler() *schema.Resource { Delete: schema.DefaultTimeout(5 * time.Minute), }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ "account_ids": { Type: schema.TypeSet, @@ -110,14 +114,14 @@ func resourceEnablerCreate(ctx context.Context, d *schema.ResourceData, meta any id := enablerID(accountIDs, typeEnable) var out *inspector2.EnableOutput - err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { var err error out, err = conn.Enable(ctx, in) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if out == nil { - return retry.RetryableError(tfresource.NewEmptyResultError(nil)) + return tfresource.RetryableError(tfresource.NewEmptyResultError(nil)) } if len(out.FailedAccounts) == 0 { @@ -138,14 +142,12 @@ func resourceEnablerCreate(ctx context.Context, d *schema.ResourceData, meta any } return false }) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) }) - if tfresource.TimedOut(err) { - out, err = conn.Enable(ctx, in) - } + if err != nil { return create.AppendDiagError(diags, names.Inspector2, create.ErrActionCreating, ResNameEnabler, id, err) } @@ -556,8 +558,10 @@ func AccountStatuses(ctx context.Context, conn *inspector2.Client, accountIDs [] continue } for k, v := range m { - if k == "LambdaCode" { - k = "LAMBDA_CODE" + if strings.ToUpper(k) == "LAMBDACODE" { + k = string(types.ResourceScanTypeLambdaCode) + } else if strings.ToUpper(k) == "CODEREPOSITORY" { + k = string(types.ResourceScanTypeCodeRepository) } status.ResourceStatuses[types.ResourceScanType(strings.ToUpper(k))] = v.Status } diff --git a/internal/service/inspector2/enabler_test.go b/internal/service/inspector2/enabler_test.go index 10392202620a..3a8146963501 100644 --- a/internal/service/inspector2/enabler_test.go +++ b/internal/service/inspector2/enabler_test.go @@ -53,6 +53,11 @@ func testAccEnabler_basic(t *testing.T) { resource.TestCheckTypeSetElemAttr(resourceName, "resource_types.*", string(types.ResourceScanTypeEcr)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -86,6 +91,11 @@ func testAccEnabler_accountID(t *testing.T) { resource.TestCheckTypeSetElemAttr(resourceName, "resource_types.*", string(types.ResourceScanTypeEcr)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -248,6 +258,11 @@ func testAccEnabler_lambda(t *testing.T) { resource.TestCheckTypeSetElemAttr(resourceName, "resource_types.*", string(types.ResourceScanTypeLambda)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -283,6 +298,48 @@ func testAccEnabler_lambdaCode(t *testing.T) { resource.TestCheckTypeSetElemAttr(resourceName, "resource_types.*", string(types.ResourceScanTypeLambdaCode)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccEnabler_codeRepository(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_inspector2_enabler.test" + resourceTypes := []types.ResourceScanType{types.ResourceScanTypeCodeRepository} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.Inspector2EndpointID) + acctest.PreCheckInspector2(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEnablerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEnablerConfig_basic(resourceTypes), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnablerExists(ctx, resourceName, resourceTypes), + testAccCheckEnablerID(ctx, resourceName, resourceTypes), + resource.TestCheckResourceAttr(resourceName, "account_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "account_ids.*", "data.aws_caller_identity.current", names.AttrAccountID), + resource.TestCheckResourceAttr(resourceName, "resource_types.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "resource_types.*", string(types.ResourceScanTypeCodeRepository)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } diff --git a/internal/service/inspector2/filter.go b/internal/service/inspector2/filter.go index 11fac0de58e9..5b286b7a884c 100644 --- a/internal/service/inspector2/filter.go +++ b/internal/service/inspector2/filter.go @@ -78,6 +78,8 @@ func (r *filterResource) Schema(ctx context.Context, request resource.SchemaRequ NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ names.AttrAWSAccountID: stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), + "code_repository_project_name": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), + "code_repository_provider_type": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "code_vulnerability_detector_name": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "code_vulnerability_detector_tags": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "code_vulnerability_file_path": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), @@ -88,6 +90,8 @@ func (r *filterResource) Schema(ctx context.Context, request resource.SchemaRequ "ec2_instance_vpc_id": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "ecr_image_architecture": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "ecr_image_hash": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), + "ecr_image_in_use_count": numberFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), + "ecr_image_last_in_use_at": dateFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "ecr_image_pushed_at": dateFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "ecr_image_registry": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), "ecr_image_repository_name": stringFilterSchemaFramework(ctx, defaultFilterSchemaMaxSize), @@ -559,6 +563,8 @@ type filterResourceModel struct { type filterCriteriaModel struct { AWSAccountID fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"aws_account_id"` + CodeRepositoryProjectName fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"code_repository_project_name"` + CodeRepositoryProviderType fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"code_repository_provider_type"` CodeVulnerabilityDetectorName fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"code_vulnerability_detector_name"` CodeVulnerabilityDetectorTags fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"code_vulnerability_detector_tags"` CodeVulnerabilityFilePath fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"code_vulnerability_file_path"` @@ -569,6 +575,8 @@ type filterCriteriaModel struct { EC2InstanceVpcId fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"ec2_instance_vpc_id"` ECRImageArchitecture fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"ecr_image_architecture"` ECRImageHash fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"ecr_image_hash"` + ECRImageInUseCount fwtypes.SetNestedObjectValueOf[numberFilterModel] `tfsdk:"ecr_image_in_use_count"` + ECRImageLastInUseAt fwtypes.SetNestedObjectValueOf[dateFilterModel] `tfsdk:"ecr_image_last_in_use_at"` ECRImagePushedAt fwtypes.SetNestedObjectValueOf[dateFilterModel] `tfsdk:"ecr_image_pushed_at"` ECRImageRegistry fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"ecr_image_registry"` ECRImageRepositoryName fwtypes.SetNestedObjectValueOf[stringFilterModel] `tfsdk:"ecr_image_repository_name"` diff --git a/internal/service/inspector2/filter_tags_gen_test.go b/internal/service/inspector2/filter_tags_gen_test.go index 383704ed81d1..455d96a56f69 100644 --- a/internal/service/inspector2/filter_tags_gen_test.go +++ b/internal/service/inspector2/filter_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/inspector2/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccInspector2Filter_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -209,11 +209,12 @@ func TestAccInspector2Filter_tags(t *testing.T) { func TestAccInspector2Filter_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -273,11 +274,12 @@ func TestAccInspector2Filter_tags_null(t *testing.T) { func TestAccInspector2Filter_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -325,11 +327,12 @@ func TestAccInspector2Filter_tags_EmptyMap(t *testing.T) { func TestAccInspector2Filter_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -407,11 +410,12 @@ func TestAccInspector2Filter_tags_AddOnUpdate(t *testing.T) { func TestAccInspector2Filter_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccInspector2Filter_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccInspector2Filter_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -644,11 +649,12 @@ func TestAccInspector2Filter_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccInspector2Filter_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -736,11 +742,12 @@ func TestAccInspector2Filter_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccInspector2Filter_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -925,11 +932,12 @@ func TestAccInspector2Filter_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccInspector2Filter_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1091,11 +1099,12 @@ func TestAccInspector2Filter_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccInspector2Filter_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1273,11 +1282,12 @@ func TestAccInspector2Filter_tags_DefaultTags_overlapping(t *testing.T) { func TestAccInspector2Filter_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1365,11 +1375,12 @@ func TestAccInspector2Filter_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccInspector2Filter_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1456,11 +1467,12 @@ func TestAccInspector2Filter_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccInspector2Filter_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1524,11 +1536,12 @@ func TestAccInspector2Filter_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccInspector2Filter_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1584,11 +1597,12 @@ func TestAccInspector2Filter_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccInspector2Filter_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1655,11 +1669,12 @@ func TestAccInspector2Filter_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccInspector2Filter_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1728,11 +1743,12 @@ func TestAccInspector2Filter_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccInspector2Filter_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1785,11 +1801,12 @@ func TestAccInspector2Filter_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccInspector2Filter_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1884,11 +1901,12 @@ func TestAccInspector2Filter_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccInspector2Filter_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -1973,11 +1991,12 @@ func TestAccInspector2Filter_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccInspector2Filter_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), @@ -2135,11 +2154,12 @@ func TestAccInspector2Filter_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccInspector2Filter_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Filter resourceName := "aws_inspector2_filter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), CheckDestroy: testAccCheckFilterDestroy(ctx), diff --git a/internal/service/inspector2/filter_test.go b/internal/service/inspector2/filter_test.go index 550ea556b395..d90c1dc07f67 100644 --- a/internal/service/inspector2/filter_test.go +++ b/internal/service/inspector2/filter_test.go @@ -181,6 +181,16 @@ func testAccInspector2Filter_stringFilters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "reason", reason_1), resource.TestCheckResourceAttr(resourceName, names.AttrAction, action_1), resource.TestCheckResourceAttr(resourceName, "filter_criteria.#", "1"), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.code_repository_project_name.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.code_repository_project_name.*", map[string]string{ + "comparison": comparison_1, + names.AttrValue: value_1, + }), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.code_repository_provider_type.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.code_repository_provider_type.*", map[string]string{ + "comparison": comparison_1, + names.AttrValue: value_1, + }), resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.code_vulnerability_detector_name.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.code_vulnerability_detector_name.*", map[string]string{ "comparison": comparison_1, @@ -205,6 +215,16 @@ func testAccInspector2Filter_stringFilters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrAction, action_2), resource.TestCheckResourceAttr(resourceName, "filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.code_vulnerability_detector_name.#", "1"), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.code_repository_project_name.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.code_repository_project_name.*", map[string]string{ + "comparison": comparison_2, + names.AttrValue: value_2, + }), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.code_repository_provider_type.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.code_repository_provider_type.*", map[string]string{ + "comparison": comparison_2, + names.AttrValue: value_2, + }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.code_vulnerability_detector_name.*", map[string]string{ "comparison": comparison_2, names.AttrValue: value_2, @@ -257,6 +277,11 @@ func testAccInspector2Filter_numberFilters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "reason", reason_1), resource.TestCheckResourceAttr(resourceName, names.AttrAction, action_1), resource.TestCheckResourceAttr(resourceName, "filter_criteria.#", "1"), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.ecr_image_in_use_count.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.ecr_image_in_use_count.*", map[string]string{ + "lower_inclusive": lower_inclusive_value_1, + "upper_inclusive": upper_inclusive_value_1, + }), resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.epss_score.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.epss_score.*", map[string]string{ "lower_inclusive": lower_inclusive_value_1, @@ -280,6 +305,11 @@ func testAccInspector2Filter_numberFilters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "reason", reason_2), resource.TestCheckResourceAttr(resourceName, names.AttrAction, action_2), resource.TestCheckResourceAttr(resourceName, "filter_criteria.#", "1"), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.ecr_image_in_use_count.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.ecr_image_in_use_count.*", map[string]string{ + "lower_inclusive": lower_inclusive_value_2, + "upper_inclusive": upper_inclusive_value_2, + }), resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.epss_score.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.epss_score.*", map[string]string{ "lower_inclusive": lower_inclusive_value_2, @@ -333,6 +363,11 @@ func testAccInspector2Filter_dateFilters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "reason", reason_1), resource.TestCheckResourceAttr(resourceName, names.AttrAction, action_1), resource.TestCheckResourceAttr(resourceName, "filter_criteria.#", "1"), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.ecr_image_last_in_use_at.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.ecr_image_last_in_use_at.*", map[string]string{ + "start_inclusive": start_inclusive_value_1, + "end_inclusive": end_inclusive_value_1, + }), resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.ecr_image_pushed_at.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.ecr_image_pushed_at.*", map[string]string{ "start_inclusive": start_inclusive_value_1, @@ -356,6 +391,11 @@ func testAccInspector2Filter_dateFilters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "reason", reason_2), resource.TestCheckResourceAttr(resourceName, names.AttrAction, action_2), resource.TestCheckResourceAttr(resourceName, "filter_criteria.#", "1"), + resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.ecr_image_last_in_use_at.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.ecr_image_last_in_use_at.*", map[string]string{ + "start_inclusive": start_inclusive_value_2, + "end_inclusive": end_inclusive_value_2, + }), resource.TestCheckResourceAttr(resourceName, "filter_criteria.0.ecr_image_pushed_at.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "filter_criteria.0.ecr_image_pushed_at.*", map[string]string{ "start_inclusive": start_inclusive_value_2, @@ -819,6 +859,14 @@ resource "aws_inspector2_filter" "test" { description = %[3]q reason = %[4]q filter_criteria { + code_repository_project_name { + comparison = %[5]q + value = %[6]q + } + code_repository_provider_type { + comparison = %[5]q + value = %[6]q + } code_vulnerability_detector_name { comparison = %[5]q value = %[6]q @@ -836,6 +884,10 @@ resource "aws_inspector2_filter" "test" { description = %[3]q reason = %[4]q filter_criteria { + ecr_image_in_use_count { + lower_inclusive = %[5]q + upper_inclusive = %[6]q + } epss_score { lower_inclusive = %[5]q upper_inclusive = %[6]q @@ -853,6 +905,10 @@ resource "aws_inspector2_filter" "test" { description = %[3]q reason = %[4]q filter_criteria { + ecr_image_last_in_use_at { + start_inclusive = %[5]q + end_inclusive = %[6]q + } ecr_image_pushed_at { start_inclusive = %[5]q end_inclusive = %[6]q diff --git a/internal/service/inspector2/inspector2_test.go b/internal/service/inspector2/inspector2_test.go index 5b1e1603f86f..da024e00b3c0 100644 --- a/internal/service/inspector2/inspector2_test.go +++ b/internal/service/inspector2/inspector2_test.go @@ -19,6 +19,7 @@ func TestAccInspector2_serial(t *testing.T) { acctest.CtDisappears: testAccEnabler_disappears, "lambda": testAccEnabler_lambda, "lambdaCode": testAccEnabler_lambdaCode, + "codeRepository": testAccEnabler_codeRepository, "updateResourceTypes": testAccEnabler_updateResourceTypes, "updateResourceTypes_disjoint": testAccEnabler_updateResourceTypes_disjoint, "memberAccount_basic": testAccEnabler_memberAccount_basic, @@ -56,6 +57,7 @@ func TestAccInspector2_serial(t *testing.T) { "ec2ECR": testAccOrganizationConfiguration_ec2ECR, "lambda": testAccOrganizationConfiguration_lambda, "lambdaCode": testAccOrganizationConfiguration_lambdaCode, + "codeRepository": testAccOrganizationConfiguration_codeRepository, }, } diff --git a/internal/service/inspector2/organization_configuration.go b/internal/service/inspector2/organization_configuration.go index debc1dd33519..0d1c82af6eb5 100644 --- a/internal/service/inspector2/organization_configuration.go +++ b/internal/service/inspector2/organization_configuration.go @@ -42,6 +42,11 @@ func resourceOrganizationConfiguration() *schema.Resource { MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "code_repository": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "ec2": { Type: schema.TypeBool, Required: true, @@ -146,10 +151,11 @@ func resourceOrganizationConfigurationDelete(ctx context.Context, d *schema.Reso log.Printf("[DEBUG] Deleting Inspector2 Organization Configuration: %s", d.Id()) autoEnable := &awstypes.AutoEnable{ - Ec2: aws.Bool(false), - Ecr: aws.Bool(false), - Lambda: aws.Bool(false), - LambdaCode: aws.Bool(false), + CodeRepository: aws.Bool(false), + Ec2: aws.Bool(false), + Ecr: aws.Bool(false), + Lambda: aws.Bool(false), + LambdaCode: aws.Bool(false), } _, err := conn.UpdateOrganizationConfiguration(ctx, &inspector2.UpdateOrganizationConfigurationInput{ AutoEnable: autoEnable, @@ -203,6 +209,7 @@ func waitOrganizationConfigurationUpdated(ctx context.Context, conn *inspector2. equal = equal && aws.ToBool(output.AutoEnable.Ecr) == aws.ToBool(target.Ecr) equal = equal && aws.ToBool(output.AutoEnable.Lambda) == aws.ToBool(target.Lambda) equal = equal && aws.ToBool(output.AutoEnable.LambdaCode) == aws.ToBool(target.LambdaCode) + equal = equal && aws.ToBool(output.AutoEnable.CodeRepository) == aws.ToBool(target.CodeRepository) return equal, nil }) @@ -221,6 +228,10 @@ func flattenAutoEnable(apiObject *awstypes.AutoEnable) map[string]any { tfMap := map[string]any{} + if v := apiObject.CodeRepository; v != nil { + tfMap["code_repository"] = aws.ToBool(v) + } + if v := apiObject.Ec2; v != nil { tfMap["ec2"] = aws.ToBool(v) } @@ -247,6 +258,10 @@ func expandAutoEnable(tfMap map[string]any) *awstypes.AutoEnable { apiObject := &awstypes.AutoEnable{} + if v, ok := tfMap["code_repository"].(bool); ok { + apiObject.CodeRepository = aws.Bool(v) + } + if v, ok := tfMap["ec2"].(bool); ok { apiObject.Ec2 = aws.Bool(v) } diff --git a/internal/service/inspector2/organization_configuration_test.go b/internal/service/inspector2/organization_configuration_test.go index f3ba0a2a33ee..29fbb58e5382 100644 --- a/internal/service/inspector2/organization_configuration_test.go +++ b/internal/service/inspector2/organization_configuration_test.go @@ -156,6 +156,36 @@ func testAccOrganizationConfiguration_lambdaCode(t *testing.T) { }) } +func testAccOrganizationConfiguration_codeRepository(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_inspector2_organization_configuration.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.Inspector2EndpointID) + acctest.PreCheckInspector2(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.Inspector2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOrganizationConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOrganizationConfigurationConfig_codeRepository(false, false, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckOrganizationConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_enable.0.code_repository", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "auto_enable.0.ec2", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "auto_enable.0.ecr", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "auto_enable.0.lambda", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "auto_enable.0.lambda_code", acctest.CtFalse), + ), + }, + }, + }) +} + func testAccCheckOrganizationConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).Inspector2Client(ctx) @@ -233,3 +263,20 @@ resource "aws_inspector2_organization_configuration" "test" { } `, ec2, ecr, lambda, lambda_code) } + +func testAccOrganizationConfigurationConfig_codeRepository(ec2, ecr, codeRepository bool) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} +resource "aws_inspector2_delegated_admin_account" "test" { + account_id = data.aws_caller_identity.current.account_id +} +resource "aws_inspector2_organization_configuration" "test" { + auto_enable { + ec2 = %[1]t + ecr = %[2]t + code_repository = %[3]t + } + depends_on = [aws_inspector2_delegated_admin_account.test] +} +`, ec2, ecr, codeRepository) +} diff --git a/internal/service/inspector2/service_endpoint_resolver_gen.go b/internal/service/inspector2/service_endpoint_resolver_gen.go index 1a00552e67e7..674a6333d1b5 100644 --- a/internal/service/inspector2/service_endpoint_resolver_gen.go +++ b/internal/service/inspector2/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params inspector2.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up inspector2 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up inspector2 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/inspector2/service_endpoints_gen_test.go b/internal/service/inspector2/service_endpoints_gen_test.go index 4b5b454c6e84..9f3eba0adbf0 100644 --- a/internal/service/inspector2/service_endpoints_gen_test.go +++ b/internal/service/inspector2/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/inspector2/service_package_gen.go b/internal/service/inspector2/service_package_gen.go index 3251b899686a..cbe28817d923 100644 --- a/internal/service/inspector2/service_package_gen.go +++ b/internal/service/inspector2/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/inspector2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -92,7 +91,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *inspector2.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/inspector2/tags_gen.go b/internal/service/inspector2/tags_gen.go index ac404b1bbd63..0c2f5c6a5607 100644 --- a/internal/service/inspector2/tags_gen.go +++ b/internal/service/inspector2/tags_gen.go @@ -3,8 +3,8 @@ package inspector2 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/inspector2" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *inspector2.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).Inspector2Client(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *inspector2.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *inspector2.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/internetmonitor/monitor.go b/internal/service/internetmonitor/monitor.go index d14b2663e4f3..7985de91c0c5 100644 --- a/internal/service/internetmonitor/monitor.go +++ b/internal/service/internetmonitor/monitor.go @@ -15,13 +15,13 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -215,7 +215,7 @@ func resourceMonitorRead(ctx context.Context, d *schema.ResourceData, meta any) monitor, err := findMonitorByName(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && retry.NotFound(err) { log.Printf("[WARN] Internet Monitor Monitor (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -351,8 +351,7 @@ func findMonitorByName(ctx context.Context, conn *internetmonitor.Client, name s // if errs.IsA[*types.ResourceNotFoundException](err) { if tfawserr.ErrCodeEquals(err, errCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -367,8 +366,8 @@ func findMonitorByName(ctx context.Context, conn *internetmonitor.Client, name s return output, nil } -func statusMonitor(ctx context.Context, conn *internetmonitor.Client, name string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusMonitor(conn *internetmonitor.Client, name string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { monitor, err := findMonitorByName(ctx, conn, name) if tfresource.NotFound(err) { @@ -390,7 +389,7 @@ func waitMonitor(ctx context.Context, conn *internetmonitor.Client, name string, stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.MonitorConfigStatePending), Target: enum.Slice(targetState), - Refresh: statusMonitor(ctx, conn, name), + Refresh: statusMonitor(conn, name), Timeout: timeout, Delay: 10 * time.Second, } @@ -399,7 +398,7 @@ func waitMonitor(ctx context.Context, conn *internetmonitor.Client, name string, if output, ok := outputRaw.(*internetmonitor.GetMonitorOutput); ok { if status := output.Status; status == types.MonitorConfigStateError { - tfresource.SetLastError(err, errors.New(aws.ToString(output.ProcessingStatusInfo))) + retry.SetLastError(err, errors.New(aws.ToString(output.ProcessingStatusInfo))) } return err diff --git a/internal/service/internetmonitor/monitor_test.go b/internal/service/internetmonitor/monitor_test.go index e3c798a4ebeb..8269464352d5 100644 --- a/internal/service/internetmonitor/monitor_test.go +++ b/internal/service/internetmonitor/monitor_test.go @@ -9,31 +9,29 @@ import ( "testing" "github.com/YakDriver/regexache" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfinternetmonitor "github.com/hashicorp/terraform-provider-aws/internal/service/internetmonitor" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccInternetMonitorMonitor_basic(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_internetmonitor_monitor.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InternetMonitorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitorDestroy(ctx), + CheckDestroy: testAccCheckMonitorDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccMonitorConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "internetmonitor", regexache.MustCompile(`monitor/.+$`)), resource.TestCheckResourceAttr(resourceName, "health_events_config.#", "0"), resource.TestCheckResourceAttr(resourceName, "internet_measurements_log_delivery.#", "1"), @@ -53,7 +51,7 @@ func TestAccInternetMonitorMonitor_basic(t *testing.T) { { Config: testAccMonitorConfig_status(rName, "INACTIVE"), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "INACTIVE"), ), }, @@ -63,19 +61,19 @@ func TestAccInternetMonitorMonitor_basic(t *testing.T) { func TestAccInternetMonitorMonitor_disappears(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_internetmonitor_monitor.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InternetMonitorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitorDestroy(ctx), + CheckDestroy: testAccCheckMonitorDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccMonitorConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfinternetmonitor.ResourceMonitor(), resourceName), ), ExpectNonEmptyPlan: true, @@ -86,19 +84,19 @@ func TestAccInternetMonitorMonitor_disappears(t *testing.T) { func TestAccInternetMonitorMonitor_tags(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_internetmonitor_monitor.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InternetMonitorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitorDestroy(ctx), + CheckDestroy: testAccCheckMonitorDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccMonitorConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -111,7 +109,7 @@ func TestAccInternetMonitorMonitor_tags(t *testing.T) { { Config: testAccMonitorConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -120,7 +118,7 @@ func TestAccInternetMonitorMonitor_tags(t *testing.T) { { Config: testAccMonitorConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -131,19 +129,19 @@ func TestAccInternetMonitorMonitor_tags(t *testing.T) { func TestAccInternetMonitorMonitor_healthEventsConfig(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_internetmonitor_monitor.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InternetMonitorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitorDestroy(ctx), + CheckDestroy: testAccCheckMonitorDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccMonitorConfig_healthEventsConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, "health_events_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "health_events_config.0.availability_score_threshold", "50"), resource.TestCheckResourceAttr(resourceName, "health_events_config.0.performance_score_threshold", "95"), @@ -157,7 +155,7 @@ func TestAccInternetMonitorMonitor_healthEventsConfig(t *testing.T) { { Config: testAccMonitorConfig_healthEventsConfigUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, "health_events_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "health_events_config.0.availability_score_threshold", "75"), resource.TestCheckResourceAttr(resourceName, "health_events_config.0.performance_score_threshold", "85"), @@ -169,19 +167,19 @@ func TestAccInternetMonitorMonitor_healthEventsConfig(t *testing.T) { func TestAccInternetMonitorMonitor_log(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_internetmonitor_monitor.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.InternetMonitorServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckMonitorDestroy(ctx), + CheckDestroy: testAccCheckMonitorDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccMonitorConfig_log(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckMonitorExists(ctx, resourceName), + testAccCheckMonitorExists(ctx, t, resourceName), resource.TestCheckResourceAttr(resourceName, "internet_measurements_log_delivery.#", "1"), resource.TestCheckResourceAttr(resourceName, "internet_measurements_log_delivery.0.s3_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "internet_measurements_log_delivery.0.s3_config.0.bucket_name", rName), @@ -196,9 +194,9 @@ func TestAccInternetMonitorMonitor_log(t *testing.T) { }) } -func testAccCheckMonitorDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckMonitorDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).InternetMonitorClient(ctx) + conn := acctest.ProviderMeta(ctx, t).InternetMonitorClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_internetmonitor_monitor" { @@ -207,7 +205,7 @@ func testAccCheckMonitorDestroy(ctx context.Context) resource.TestCheckFunc { _, err := tfinternetmonitor.FindMonitorByName(ctx, conn, rs.Primary.ID) - if tfresource.NotFound(err) { + if retry.NotFound(err) { continue } @@ -222,14 +220,14 @@ func testAccCheckMonitorDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckMonitorExists(ctx context.Context, n string) resource.TestCheckFunc { +func testAccCheckMonitorExists(ctx context.Context, t *testing.T, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).InternetMonitorClient(ctx) + conn := acctest.ProviderMeta(ctx, t).InternetMonitorClient(ctx) _, err := tfinternetmonitor.FindMonitorByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/internetmonitor/service_endpoint_resolver_gen.go b/internal/service/internetmonitor/service_endpoint_resolver_gen.go index e31c6e0a78ef..6686ee041234 100644 --- a/internal/service/internetmonitor/service_endpoint_resolver_gen.go +++ b/internal/service/internetmonitor/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params internetmonitor. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up internetmonitor endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up internetmonitor endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/internetmonitor/service_endpoints_gen_test.go b/internal/service/internetmonitor/service_endpoints_gen_test.go index c43b670ed336..9b650258d553 100644 --- a/internal/service/internetmonitor/service_endpoints_gen_test.go +++ b/internal/service/internetmonitor/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/internetmonitor/service_package_gen.go b/internal/service/internetmonitor/service_package_gen.go index 1261787ec223..2a7091f94f36 100644 --- a/internal/service/internetmonitor/service_package_gen.go +++ b/internal/service/internetmonitor/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/internetmonitor" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *internetmonitor.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/internetmonitor/sweep.go b/internal/service/internetmonitor/sweep.go index 19dcc1fa4b6a..14dced4a04ed 100644 --- a/internal/service/internetmonitor/sweep.go +++ b/internal/service/internetmonitor/sweep.go @@ -25,7 +25,7 @@ func sweepMonitors(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &internetmonitor.ListMonitorsInput{} conn := client.InternetMonitorClient(ctx) diff --git a/internal/service/internetmonitor/tags_gen.go b/internal/service/internetmonitor/tags_gen.go index f15bf9589f6c..f6d0c934570e 100644 --- a/internal/service/internetmonitor/tags_gen.go +++ b/internal/service/internetmonitor/tags_gen.go @@ -3,8 +3,8 @@ package internetmonitor import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/internetmonitor" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *internetmonitor.Client, identifier stri output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).InternetMonitorClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *internetmonitor.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *internetmonitor.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/invoicing/service_endpoint_resolver_gen.go b/internal/service/invoicing/service_endpoint_resolver_gen.go index 506bd7d41312..261586c0da88 100644 --- a/internal/service/invoicing/service_endpoint_resolver_gen.go +++ b/internal/service/invoicing/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params invoicing.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up invoicing endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up invoicing endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/invoicing/service_endpoints_gen_test.go b/internal/service/invoicing/service_endpoints_gen_test.go index 4afc0e43aa6e..f3ba3bcf2bdf 100644 --- a/internal/service/invoicing/service_endpoints_gen_test.go +++ b/internal/service/invoicing/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/invoicing/service_package_gen.go b/internal/service/invoicing/service_package_gen.go index e15a533c083d..85976d6d6f67 100644 --- a/internal/service/invoicing/service_package_gen.go +++ b/internal/service/invoicing/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/invoicing" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *invoicing.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/iot/ca_certificate.go b/internal/service/iot/ca_certificate.go index 15161a428b60..7ce457d35b3e 100644 --- a/internal/service/iot/ca_certificate.go +++ b/internal/service/iot/ca_certificate.go @@ -160,7 +160,7 @@ func resourceCACertificateCreate(ctx context.Context, d *schema.ResourceData, me input.VerificationCertificate = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.RegisterCACertificate(ctx, input) }) @@ -242,7 +242,7 @@ func resourceCACertificateUpdate(ctx context.Context, d *schema.ResourceData, me } } - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.UpdateCACertificate(ctx, input) }) diff --git a/internal/service/iot/event_configurations_identity_gen_test.go b/internal/service/iot/event_configurations_identity_gen_test.go index 519c3557c20c..a9931e847e6a 100644 --- a/internal/service/iot/event_configurations_identity_gen_test.go +++ b/internal/service/iot/event_configurations_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccIoTEventConfigurations_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccIoTEventConfigurations_Identity_Basic, - "ExistingResource": testAccIoTEventConfigurations_Identity_ExistingResource, - "RegionOverride": testAccIoTEventConfigurations_Identity_RegionOverride, + acctest.CtBasic: testAccIoTEventConfigurations_Identity_Basic, + "ExistingResource": testAccIoTEventConfigurations_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccIoTEventConfigurations_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccIoTEventConfigurations_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,9 +34,10 @@ func testAccIoTEventConfigurations_IdentitySerial(t *testing.T) { func testAccIoTEventConfigurations_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iot_event_configurations.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -105,7 +108,7 @@ func testAccIoTEventConfigurations_Identity_RegionOverride(t *testing.T) { resourceName := "aws_iot_event_configurations.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -209,3 +212,106 @@ func testAccIoTEventConfigurations_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccIoTEventConfigurations_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iot_event_configurations.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EventConfigurations/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/EventConfigurations/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EventConfigurations/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccIoTEventConfigurations_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iot_event_configurations.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EventConfigurations/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EventConfigurations/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/iot/event_configurations_test.go b/internal/service/iot/event_configurations_test.go index 8d4368e0b210..ebc35de9cabd 100644 --- a/internal/service/iot/event_configurations_test.go +++ b/internal/service/iot/event_configurations_test.go @@ -7,13 +7,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -81,72 +75,3 @@ resource "aws_iot_event_configurations" "test" { } } ` - -func testAccIoTEventConfigurations_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_iot_event_configurations.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccEventConfigurationsConfig_basic, - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccEventConfigurationsConfig_basic, - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccEventConfigurationsConfig_basic, - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} diff --git a/internal/service/iot/indexing_configuration.go b/internal/service/iot/indexing_configuration.go index 3d3bdf4cbcfa..32ad055d5cf2 100644 --- a/internal/service/iot/indexing_configuration.go +++ b/internal/service/iot/indexing_configuration.go @@ -292,7 +292,7 @@ func flattenIndexingFilter(apiObject *awstypes.IndexingFilter) map[string]any { tfMap := map[string]any{} if v := apiObject.NamedShadowNames; v != nil { - tfMap["named_shadow_names"] = aws.StringSlice(v) + tfMap["named_shadow_names"] = v } return tfMap diff --git a/internal/service/iot/indexing_configuration_identity_gen_test.go b/internal/service/iot/indexing_configuration_identity_gen_test.go index 1123e0ea6d75..2122ac981ed6 100644 --- a/internal/service/iot/indexing_configuration_identity_gen_test.go +++ b/internal/service/iot/indexing_configuration_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +23,10 @@ func testAccIoTIndexingConfiguration_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccIoTIndexingConfiguration_Identity_Basic, - "ExistingResource": testAccIoTIndexingConfiguration_Identity_ExistingResource, - "RegionOverride": testAccIoTIndexingConfiguration_Identity_RegionOverride, + acctest.CtBasic: testAccIoTIndexingConfiguration_Identity_Basic, + "ExistingResource": testAccIoTIndexingConfiguration_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccIoTIndexingConfiguration_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccIoTIndexingConfiguration_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,9 +34,10 @@ func testAccIoTIndexingConfiguration_IdentitySerial(t *testing.T) { func testAccIoTIndexingConfiguration_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iot_indexing_configuration.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -105,7 +108,7 @@ func testAccIoTIndexingConfiguration_Identity_RegionOverride(t *testing.T) { resourceName := "aws_iot_indexing_configuration.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -209,3 +212,106 @@ func testAccIoTIndexingConfiguration_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccIoTIndexingConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iot_indexing_configuration.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/IndexingConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/IndexingConfiguration/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IndexingConfiguration/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccIoTIndexingConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iot_indexing_configuration.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/IndexingConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IndexingConfiguration/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/iot/indexing_configuration_test.go b/internal/service/iot/indexing_configuration_test.go index c94c61e38218..3bbc91f64fdc 100644 --- a/internal/service/iot/indexing_configuration_test.go +++ b/internal/service/iot/indexing_configuration_test.go @@ -7,13 +7,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -157,72 +151,3 @@ resource "aws_iot_indexing_configuration" "test" { } } ` - -func testAccIoTIndexingConfiguration_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_iot_indexing_configuration.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccIndexingConfigurationConfig_basic, - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccIndexingConfigurationConfig_basic, - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccIndexingConfigurationConfig_basic, - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} diff --git a/internal/service/iot/logging_options.go b/internal/service/iot/logging_options.go index e0e3ca462dde..f77fbd0940d2 100644 --- a/internal/service/iot/logging_options.go +++ b/internal/service/iot/logging_options.go @@ -70,7 +70,7 @@ func resourceLoggingOptionsPut(ctx context.Context, d *schema.ResourceData, meta input.RoleArn = aws.String(v.(string)) } - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.SetV2LoggingOptions(ctx, input) }) diff --git a/internal/service/iot/logging_options_identity_gen_test.go b/internal/service/iot/logging_options_identity_gen_test.go index ca84afc8662f..dc4385a99c5b 100644 --- a/internal/service/iot/logging_options_identity_gen_test.go +++ b/internal/service/iot/logging_options_identity_gen_test.go @@ -10,11 +10,13 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +24,10 @@ func testAccIoTLoggingOptions_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccIoTLoggingOptions_Identity_Basic, - "ExistingResource": testAccIoTLoggingOptions_Identity_ExistingResource, - "RegionOverride": testAccIoTLoggingOptions_Identity_RegionOverride, + acctest.CtBasic: testAccIoTLoggingOptions_Identity_Basic, + "ExistingResource": testAccIoTLoggingOptions_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccIoTLoggingOptions_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccIoTLoggingOptions_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -32,10 +35,11 @@ func testAccIoTLoggingOptions_IdentitySerial(t *testing.T) { func testAccIoTLoggingOptions_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_iot_logging_options.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -69,7 +73,7 @@ func testAccIoTLoggingOptions_Identity_RegionOverride(t *testing.T) { resourceName := "aws_iot_logging_options.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -97,3 +101,118 @@ func testAccIoTLoggingOptions_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccIoTLoggingOptions_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iot_logging_options.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingOptions/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingOptions/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LoggingOptions/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccIoTLoggingOptions_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_iot_logging_options.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingOptions/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LoggingOptions/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/iot/logging_options_test.go b/internal/service/iot/logging_options_test.go index 46a0561943d7..78c9321ca431 100644 --- a/internal/service/iot/logging_options_test.go +++ b/internal/service/iot/logging_options_test.go @@ -9,13 +9,7 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -148,73 +142,3 @@ resource "aws_iot_logging_options" "test" { } `) } - -func testAccIoTLoggingOptions_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_iot_logging_options.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccLoggingOptionsConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccLoggingOptionsConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccLoggingOptionsConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} diff --git a/internal/service/iot/policy.go b/internal/service/iot/policy.go index b67040acd649..0f1e3cf4c73b 100644 --- a/internal/service/iot/policy.go +++ b/internal/service/iot/policy.go @@ -286,8 +286,8 @@ func deletePolicy(ctx context.Context, conn *iot.Client, name string) error { PolicyName: aws.String(name), } - _, err := tfresource.RetryWhenIsA[*awstypes.DeleteConflictException](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.DeleteConflictException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.DeletePolicy(ctx, input) }) @@ -308,8 +308,8 @@ func deletePolicyVersion(ctx context.Context, conn *iot.Client, name, versionID PolicyVersionId: aws.String(versionID), } - _, err := tfresource.RetryWhenIsA[*awstypes.DeleteConflictException](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.DeleteConflictException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.DeletePolicyVersion(ctx, input) }) diff --git a/internal/service/iot/provisioning_template.go b/internal/service/iot/provisioning_template.go index c9edb55e0ddd..df59168a8cbc 100644 --- a/internal/service/iot/provisioning_template.go +++ b/internal/service/iot/provisioning_template.go @@ -155,8 +155,8 @@ func resourceProvisioningTemplateCreate(ctx context.Context, d *schema.ResourceD input.Type = awstypes.TemplateType(v) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, - func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.CreateProvisioningTemplate(ctx, input) }) @@ -234,8 +234,8 @@ func resourceProvisioningTemplateUpdate(ctx context.Context, d *schema.ResourceD input.PreProvisioningHook = expandProvisioningHook(v.([]any)[0].(map[string]any)) } - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidRequestException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.UpdateProvisioningTemplate(ctx, input) }) diff --git a/internal/service/iot/service_endpoint_resolver_gen.go b/internal/service/iot/service_endpoint_resolver_gen.go index 040cd2f3d948..a2021edbbcfc 100644 --- a/internal/service/iot/service_endpoint_resolver_gen.go +++ b/internal/service/iot/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params iot.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up iot endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up iot endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/iot/service_endpoints_gen_test.go b/internal/service/iot/service_endpoints_gen_test.go index 33d649b3a705..1a01af4bd0c0 100644 --- a/internal/service/iot/service_endpoints_gen_test.go +++ b/internal/service/iot/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/iot/service_package_gen.go b/internal/service/iot/service_package_gen.go index 7ed75bd408db..f2c8a31b0b7d 100644 --- a/internal/service/iot/service_package_gen.go +++ b/internal/service/iot/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/iot" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -231,7 +230,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *iot.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/iot/sweep.go b/internal/service/iot/sweep.go index 408d2388e38e..ea90d165df78 100644 --- a/internal/service/iot/sweep.go +++ b/internal/service/iot/sweep.go @@ -112,7 +112,7 @@ func sweepCertificates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListCertificatesInput{} @@ -154,7 +154,7 @@ func sweepPolicyAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListPoliciesInput{} @@ -218,7 +218,7 @@ func sweepPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListPoliciesInput{} @@ -259,7 +259,7 @@ func sweepRoleAliases(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListRoleAliasesInput{} @@ -300,7 +300,7 @@ func sweepThingPrincipalAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListThingsInput{} @@ -364,7 +364,7 @@ func sweepThings(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListThingsInput{} @@ -405,7 +405,7 @@ func sweepThingTypes(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListThingTypesInput{} @@ -446,7 +446,7 @@ func sweepTopicRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListTopicRulesInput{} @@ -487,7 +487,7 @@ func sweepThingGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListThingGroupsInput{} @@ -528,7 +528,7 @@ func sweepTopicRuleDestinations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListTopicRuleDestinationsInput{} @@ -569,7 +569,7 @@ func sweepAuthorizers(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListAuthorizersInput{} @@ -611,7 +611,7 @@ func sweepDomainConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListDomainConfigurationsInput{} @@ -675,7 +675,7 @@ func sweepCACertificates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.IoTClient(ctx) input := &iot.ListCACertificatesInput{} diff --git a/internal/service/iot/tags_gen.go b/internal/service/iot/tags_gen.go index 74cc4cbdb984..71535ddac15e 100644 --- a/internal/service/iot/tags_gen.go +++ b/internal/service/iot/tags_gen.go @@ -3,8 +3,8 @@ package iot import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iot" awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *iot.Client, identifier string, optFns . page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).IoTClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *iot.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *iot.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/iot/testdata/EventConfigurations/basic_v5.100.0/main_gen.tf b/internal/service/iot/testdata/EventConfigurations/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..179aa62150e9 --- /dev/null +++ b/internal/service/iot/testdata/EventConfigurations/basic_v5.100.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iot_event_configurations" "test" { + event_configurations = { + "THING" = true, + "THING_GROUP" = false, + "THING_TYPE" = false, + "THING_GROUP_MEMBERSHIP" = false, + "THING_GROUP_HIERARCHY" = false, + "THING_TYPE_ASSOCIATION" = false, + "JOB" = false, + "JOB_EXECUTION" = false, + "POLICY" = false, + "CERTIFICATE" = true, + "CA_CERTIFICATE" = true, + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/iot/testdata/EventConfigurations/basic_v6.0.0/main_gen.tf b/internal/service/iot/testdata/EventConfigurations/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..da4a41a11f8e --- /dev/null +++ b/internal/service/iot/testdata/EventConfigurations/basic_v6.0.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iot_event_configurations" "test" { + event_configurations = { + "THING" = true, + "THING_GROUP" = false, + "THING_TYPE" = false, + "THING_GROUP_MEMBERSHIP" = false, + "THING_GROUP_HIERARCHY" = false, + "THING_TYPE_ASSOCIATION" = false, + "JOB" = false, + "JOB_EXECUTION" = false, + "POLICY" = false, + "CERTIFICATE" = true, + "CA_CERTIFICATE" = true, + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/iot/testdata/IndexingConfiguration/basic_v5.100.0/main_gen.tf b/internal/service/iot/testdata/IndexingConfiguration/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..c5ca95c5e0ff --- /dev/null +++ b/internal/service/iot/testdata/IndexingConfiguration/basic_v5.100.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iot_indexing_configuration" "test" { + thing_group_indexing_configuration { + thing_group_indexing_mode = "OFF" + } + + thing_indexing_configuration { + thing_indexing_mode = "OFF" + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/iot/testdata/IndexingConfiguration/basic_v6.0.0/main_gen.tf b/internal/service/iot/testdata/IndexingConfiguration/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..3f2b990f6a91 --- /dev/null +++ b/internal/service/iot/testdata/IndexingConfiguration/basic_v6.0.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iot_indexing_configuration" "test" { + thing_group_indexing_configuration { + thing_group_indexing_mode = "OFF" + } + + thing_indexing_configuration { + thing_indexing_mode = "OFF" + } +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/iot/testdata/LoggingOptions/basic_v5.100.0/main_gen.tf b/internal/service/iot/testdata/LoggingOptions/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..f61e4e22ff3e --- /dev/null +++ b/internal/service/iot/testdata/LoggingOptions/basic_v5.100.0/main_gen.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iot_logging_options" "test" { + default_log_level = "WARN" + role_arn = aws_iam_role.test.arn + + depends_on = [aws_iam_role_policy.test] +} + +# testAccLoggingOptionsBaseConfig + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = <" + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccIVSChannel_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Channel + resourceName := "aws_ivs_channel.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckChannelDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/basic_v6.7.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckChannelExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Channel/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccIVSChannel_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Channel + resourceName := "aws_ivs_channel.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckChannelDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Channel/basic_v6.7.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckChannelExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Channel/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ivs/generate.go b/internal/service/ivs/generate.go index af15398659c1..bdabf87e7bd5 100644 --- a/internal/service/ivs/generate.go +++ b/internal/service/ivs/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/tags/main.go -KVTValues -ListTags -ServiceTagsMap -UpdateTags //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package ivs diff --git a/internal/service/ivs/ivs_test.go b/internal/service/ivs/ivs_test.go index dcb2a7a1e200..24bf731801b1 100644 --- a/internal/service/ivs/ivs_test.go +++ b/internal/service/ivs/ivs_test.go @@ -18,6 +18,7 @@ func TestAccIVS_serial(t *testing.T) { "update": testAccPlaybackKeyPair_update, "tags": testAccPlaybackKeyPair_tags, acctest.CtDisappears: testAccPlaybackKeyPair_disappears, + "identity": testAccIVSPlaybackKeyPair_IdentitySerial, }, } diff --git a/internal/service/ivs/playback_key_pair.go b/internal/service/ivs/playback_key_pair.go index a9b92079691e..2188e6fae3e6 100644 --- a/internal/service/ivs/playback_key_pair.go +++ b/internal/service/ivs/playback_key_pair.go @@ -24,16 +24,20 @@ import ( // @SDKResource("aws_ivs_playback_key_pair", name="Playback Key Pair") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ivs/types;awstypes.PlaybackKeyPair") +// @Testing(preIdentityVersion="v6.7.0") +// @Testing(serialize=true) +// @Testing(generator=false) +// @Testing(tlsEcdsaPublicKeyPem=true) +// @Testing(importIgnore="public_key") +// @Testing(plannableImportAction=Replace) func ResourcePlaybackKeyPair() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePlaybackKeyPairCreate, ReadWithoutTimeout: resourcePlaybackKeyPairRead, DeleteWithoutTimeout: resourcePlaybackKeyPairDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Delete: schema.DefaultTimeout(5 * time.Minute), diff --git a/internal/service/ivs/playback_key_pair_identity_gen_test.go b/internal/service/ivs/playback_key_pair_identity_gen_test.go new file mode 100644 index 000000000000..2584ee5f21a1 --- /dev/null +++ b/internal/service/ivs/playback_key_pair_identity_gen_test.go @@ -0,0 +1,376 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ivs_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccIVSPlaybackKeyPair_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccIVSPlaybackKeyPair_Identity_Basic, + "ExistingResource": testAccIVSPlaybackKeyPair_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccIVSPlaybackKeyPair_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccIVSPlaybackKeyPair_Identity_RegionOverride, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccIVSPlaybackKeyPair_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.PlaybackKeyPair + resourceName := "aws_ivs_playback_key_pair.test" + privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") + rTlsEcdsaPublicKeyPem, _ := acctest.TLSECDSAPublicKeyPEM(t, privateKey) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckPlaybackKeyPairDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPlaybackKeyPairExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrPublicKey, + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrARN)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrID)), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrARN)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrID)), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccIVSPlaybackKeyPair_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ivs_playback_key_pair.test" + privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") + rTlsEcdsaPublicKeyPem, _ := acctest.TLSECDSAPublicKeyPEM(t, privateKey) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/region_override/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/region_override/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrPublicKey, + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/region_override/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrPublicKey, + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/region_override/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrARN)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrID)), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/region_override/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrARN)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrID)), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/region_override/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrARN)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrID)), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func testAccIVSPlaybackKeyPair_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.PlaybackKeyPair + resourceName := "aws_ivs_playback_key_pair.test" + privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") + rTlsEcdsaPublicKeyPem, _ := acctest.TLSECDSAPublicKeyPEM(t, privateKey) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckPlaybackKeyPairDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPlaybackKeyPairExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func testAccIVSPlaybackKeyPair_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.PlaybackKeyPair + resourceName := "aws_ivs_playback_key_pair.test" + privateKey := acctest.TLSECDSAPrivateKeyPEM(t, "P-384") + rTlsEcdsaPublicKeyPem, _ := acctest.TLSECDSAPublicKeyPEM(t, privateKey) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckPlaybackKeyPairDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPlaybackKeyPairExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/PlaybackKeyPair/basic/"), + ConfigVariables: config.Variables{ + "rTlsEcdsaPublicKeyPem": config.StringVariable(rTlsEcdsaPublicKeyPem), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ivs/recording_configuration.go b/internal/service/ivs/recording_configuration.go index 1ea458d481fc..264d8d7386a0 100644 --- a/internal/service/ivs/recording_configuration.go +++ b/internal/service/ivs/recording_configuration.go @@ -28,16 +28,15 @@ import ( // @SDKResource("aws_ivs_recording_configuration", name="Recording Configuration") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ivs/types;awstypes.RecordingConfiguration") +// @Testing(preIdentityVersion="v6.7.0") func ResourceRecordingConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRecordingConfigurationCreate, ReadWithoutTimeout: resourceRecordingConfigurationRead, DeleteWithoutTimeout: resourceRecordingConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Delete: schema.DefaultTimeout(10 * time.Minute), diff --git a/internal/service/ivs/recording_configuration_identity_gen_test.go b/internal/service/ivs/recording_configuration_identity_gen_test.go new file mode 100644 index 000000000000..4ad9a2862a2b --- /dev/null +++ b/internal/service/ivs/recording_configuration_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ivs_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ivs/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIVSRecordingConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RecordingConfiguration + resourceName := "aws_ivs_recording_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckRecordingConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordingConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccIVSRecordingConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ivs_recording_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccIVSRecordingConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RecordingConfiguration + resourceName := "aws_ivs_recording_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckRecordingConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordingConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccIVSRecordingConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.RecordingConfiguration + resourceName := "aws_ivs_recording_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSServiceID), + CheckDestroy: testAccCheckRecordingConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordingConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RecordingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ivs/service_endpoint_resolver_gen.go b/internal/service/ivs/service_endpoint_resolver_gen.go index c6ee0852761c..a628c3127703 100644 --- a/internal/service/ivs/service_endpoint_resolver_gen.go +++ b/internal/service/ivs/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ivs.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ivs endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ivs endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ivs/service_endpoints_gen_test.go b/internal/service/ivs/service_endpoints_gen_test.go index 0b6a36c62087..77a84c606fdf 100644 --- a/internal/service/ivs/service_endpoints_gen_test.go +++ b/internal/service/ivs/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ivs/service_package_gen.go b/internal/service/ivs/service_package_gen.go index 8e3888894cdc..6fa2d73597ac 100644 --- a/internal/service/ivs/service_package_gen.go +++ b/internal/service/ivs/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ivs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -47,6 +46,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: ResourcePlaybackKeyPair, @@ -56,6 +61,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: ResourceRecordingConfiguration, @@ -65,6 +76,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -92,7 +109,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ivs.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ivs/tags_gen.go b/internal/service/ivs/tags_gen.go index f90809bfc033..1b12ea161090 100644 --- a/internal/service/ivs/tags_gen.go +++ b/internal/service/ivs/tags_gen.go @@ -3,8 +3,8 @@ package ivs import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivs" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *ivs.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).IVSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *ivs.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *ivs.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ivs/testdata/Channel/basic/main_gen.tf b/internal/service/ivs/testdata/Channel/basic/main_gen.tf new file mode 100644 index 000000000000..e723cb24c3db --- /dev/null +++ b/internal/service/ivs/testdata/Channel/basic/main_gen.tf @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_channel" "test" { +} + diff --git a/internal/service/ivs/testdata/Channel/basic_v6.7.0/main_gen.tf b/internal/service/ivs/testdata/Channel/basic_v6.7.0/main_gen.tf new file mode 100644 index 000000000000..64afb8a40f9c --- /dev/null +++ b/internal/service/ivs/testdata/Channel/basic_v6.7.0/main_gen.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_channel" "test" { +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.7.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ivs/testdata/Channel/region_override/main_gen.tf b/internal/service/ivs/testdata/Channel/region_override/main_gen.tf new file mode 100644 index 000000000000..37d874a6db3e --- /dev/null +++ b/internal/service/ivs/testdata/Channel/region_override/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_channel" "test" { + region = var.region + +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ivs/testdata/PlaybackKeyPair/basic/main_gen.tf b/internal/service/ivs/testdata/PlaybackKeyPair/basic/main_gen.tf new file mode 100644 index 000000000000..e3dcd0d5b88a --- /dev/null +++ b/internal/service/ivs/testdata/PlaybackKeyPair/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_playback_key_pair" "test" { + public_key = var.rTlsEcdsaPublicKeyPem +} + +variable "rTlsEcdsaPublicKeyPem" { + type = string + nullable = false +} + diff --git a/internal/service/ivs/testdata/PlaybackKeyPair/basic_v6.7.0/main_gen.tf b/internal/service/ivs/testdata/PlaybackKeyPair/basic_v6.7.0/main_gen.tf new file mode 100644 index 000000000000..3e98e5f8d35d --- /dev/null +++ b/internal/service/ivs/testdata/PlaybackKeyPair/basic_v6.7.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_playback_key_pair" "test" { + public_key = var.rTlsEcdsaPublicKeyPem +} + +variable "rTlsEcdsaPublicKeyPem" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.7.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ivs/testdata/PlaybackKeyPair/region_override/main_gen.tf b/internal/service/ivs/testdata/PlaybackKeyPair/region_override/main_gen.tf new file mode 100644 index 000000000000..b3635d6a4618 --- /dev/null +++ b/internal/service/ivs/testdata/PlaybackKeyPair/region_override/main_gen.tf @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_playback_key_pair" "test" { + region = var.region + + public_key = var.rTlsEcdsaPublicKeyPem +} + +variable "rTlsEcdsaPublicKeyPem" { + type = string + nullable = false +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ivs/testdata/RecordingConfiguration/basic/main_gen.tf b/internal/service/ivs/testdata/RecordingConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..0e7cdf30b057 --- /dev/null +++ b/internal/service/ivs/testdata/RecordingConfiguration/basic/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_recording_configuration" "test" { + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ivs/testdata/RecordingConfiguration/basic_v6.7.0/main_gen.tf b/internal/service/ivs/testdata/RecordingConfiguration/basic_v6.7.0/main_gen.tf new file mode 100644 index 000000000000..9172bf532e34 --- /dev/null +++ b/internal/service/ivs/testdata/RecordingConfiguration/basic_v6.7.0/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_recording_configuration" "test" { + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.7.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ivs/testdata/RecordingConfiguration/region_override/main_gen.tf b/internal/service/ivs/testdata/RecordingConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..37ea87b80c82 --- /dev/null +++ b/internal/service/ivs/testdata/RecordingConfiguration/region_override/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivs_recording_configuration" "test" { + region = var.region + + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName + force_destroy = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ivs/testdata/tmpl/channel_tags.gtpl b/internal/service/ivs/testdata/tmpl/channel_tags.gtpl new file mode 100644 index 000000000000..0c2c9f1b1733 --- /dev/null +++ b/internal/service/ivs/testdata/tmpl/channel_tags.gtpl @@ -0,0 +1,4 @@ +resource "aws_ivs_channel" "test" { +{{- template "region" }} +{{- template "tags" }} +} diff --git a/internal/service/ivs/testdata/tmpl/playback_key_pair_tags.gtpl b/internal/service/ivs/testdata/tmpl/playback_key_pair_tags.gtpl new file mode 100644 index 000000000000..2b7e891e64ec --- /dev/null +++ b/internal/service/ivs/testdata/tmpl/playback_key_pair_tags.gtpl @@ -0,0 +1,5 @@ +resource "aws_ivs_playback_key_pair" "test" { +{{- template "region" }} + public_key = var.rTlsEcdsaPublicKeyPem +{{- template "tags" }} +} diff --git a/internal/service/ivs/testdata/tmpl/recording_configuration_tags.gtpl b/internal/service/ivs/testdata/tmpl/recording_configuration_tags.gtpl new file mode 100644 index 000000000000..b38a11e9be2c --- /dev/null +++ b/internal/service/ivs/testdata/tmpl/recording_configuration_tags.gtpl @@ -0,0 +1,15 @@ +resource "aws_ivs_recording_configuration" "test" { +{{- template "region" }} + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +{{- template "tags" }} +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName + force_destroy = true +} diff --git a/internal/service/ivschat/generate.go b/internal/service/ivschat/generate.go index 0bcf935ada23..5ac899821a9a 100644 --- a/internal/service/ivschat/generate.go +++ b/internal/service/ivschat/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags -KVTValues //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package ivschat diff --git a/internal/service/ivschat/logging_configuration.go b/internal/service/ivschat/logging_configuration.go index fd261f4636f5..3bc150229764 100644 --- a/internal/service/ivschat/logging_configuration.go +++ b/internal/service/ivschat/logging_configuration.go @@ -25,6 +25,9 @@ import ( // @SDKResource("aws_ivschat_logging_configuration", name="Logging Configuration") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ivschat;ivschat.GetLoggingConfigurationOutput") +// @Testing(preIdentityVersion="v6.5.0") func ResourceLoggingConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLoggingConfigurationCreate, @@ -32,10 +35,6 @@ func ResourceLoggingConfiguration() *schema.Resource { UpdateWithoutTimeout: resourceLoggingConfigurationUpdate, DeleteWithoutTimeout: resourceLoggingConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(5 * time.Minute), diff --git a/internal/service/ivschat/logging_configuration_identity_gen_test.go b/internal/service/ivschat/logging_configuration_identity_gen_test.go new file mode 100644 index 000000000000..fc9344d64dea --- /dev/null +++ b/internal/service/ivschat/logging_configuration_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ivschat_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ivschat" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIVSChatLoggingConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v ivschat.GetLoggingConfigurationOutput + resourceName := "aws_ivschat_logging_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccIVSChatLoggingConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ivschat_logging_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.5.0 +func TestAccIVSChatLoggingConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v ivschat.GetLoggingConfigurationOutput + resourceName := "aws_ivschat_logging_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic_v6.5.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.5.0 +func TestAccIVSChatLoggingConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v ivschat.GetLoggingConfigurationOutput + resourceName := "aws_ivschat_logging_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic_v6.5.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LoggingConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ivschat/room.go b/internal/service/ivschat/room.go index dffb4fd7970d..1e868752f14f 100644 --- a/internal/service/ivschat/room.go +++ b/internal/service/ivschat/room.go @@ -27,6 +27,10 @@ import ( // @SDKResource("aws_ivschat_room", name="Room") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ivschat;ivschat.GetRoomOutput") +// @Testing(preIdentityVersion="v6.5.0") +// @Testing(generator=false) func ResourceRoom() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRoomCreate, @@ -34,10 +38,6 @@ func ResourceRoom() *schema.Resource { UpdateWithoutTimeout: resourceRoomUpdate, DeleteWithoutTimeout: resourceRoomDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(5 * time.Minute), @@ -67,7 +67,7 @@ func ResourceRoom() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ValidateFunc: validation.IntBetween(1, 10), + ValidateFunc: validation.IntBetween(1, 100), }, "message_review_handler": { Type: schema.TypeList, diff --git a/internal/service/ivschat/room_identity_gen_test.go b/internal/service/ivschat/room_identity_gen_test.go new file mode 100644 index 000000000000..e935cd538736 --- /dev/null +++ b/internal/service/ivschat/room_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ivschat_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ivschat" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIVSChatRoom_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v ivschat.GetRoomOutput + resourceName := "aws_ivschat_room.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: testAccCheckRoomDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Room/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoomExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Room/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Room/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Room/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccIVSChatRoom_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ivschat_room.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Room/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Room/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Room/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Room/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Room/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Room/region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.5.0 +func TestAccIVSChatRoom_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v ivschat.GetRoomOutput + resourceName := "aws_ivschat_room.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: testAccCheckRoomDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Room/basic_v6.5.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoomExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Room/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.5.0 +func TestAccIVSChatRoom_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v ivschat.GetRoomOutput + resourceName := "aws_ivschat_room.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IVSChatServiceID), + CheckDestroy: testAccCheckRoomDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Room/basic_v6.5.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoomExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Room/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ivschat/service_endpoint_resolver_gen.go b/internal/service/ivschat/service_endpoint_resolver_gen.go index 8717de5d415a..b1be53e68398 100644 --- a/internal/service/ivschat/service_endpoint_resolver_gen.go +++ b/internal/service/ivschat/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ivschat.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ivschat endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ivschat endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ivschat/service_endpoints_gen_test.go b/internal/service/ivschat/service_endpoints_gen_test.go index 56b283058817..fdf9d3c2d6ab 100644 --- a/internal/service/ivschat/service_endpoints_gen_test.go +++ b/internal/service/ivschat/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ivschat/service_package_gen.go b/internal/service/ivschat/service_package_gen.go index 7d3e4396b603..1581dde10c5d 100644 --- a/internal/service/ivschat/service_package_gen.go +++ b/internal/service/ivschat/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ivschat" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -40,6 +39,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: ResourceRoom, @@ -49,6 +54,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -76,7 +87,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ivschat.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ivschat/tags_gen.go b/internal/service/ivschat/tags_gen.go index dfdae85cd7db..60728911792e 100644 --- a/internal/service/ivschat/tags_gen.go +++ b/internal/service/ivschat/tags_gen.go @@ -3,8 +3,8 @@ package ivschat import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ivschat" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *ivschat.Client, identifier string, optF output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).IVSChatClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *ivschat.Client, identifier string, ol _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *ivschat.Client, identifier string, ol _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ivschat/testdata/LoggingConfiguration/basic/main_gen.tf b/internal/service/ivschat/testdata/LoggingConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..a19d91f69070 --- /dev/null +++ b/internal/service/ivschat/testdata/LoggingConfiguration/basic/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +resource "aws_ivschat_logging_configuration" "test" { + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ivschat/testdata/LoggingConfiguration/basic_v6.5.0/main_gen.tf b/internal/service/ivschat/testdata/LoggingConfiguration/basic_v6.5.0/main_gen.tf new file mode 100644 index 000000000000..637e2febdc7b --- /dev/null +++ b/internal/service/ivschat/testdata/LoggingConfiguration/basic_v6.5.0/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +resource "aws_ivschat_logging_configuration" "test" { + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.5.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ivschat/testdata/LoggingConfiguration/region_override/main_gen.tf b/internal/service/ivschat/testdata/LoggingConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..592b286629a1 --- /dev/null +++ b/internal/service/ivschat/testdata/LoggingConfiguration/region_override/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName + force_destroy = true +} + +resource "aws_ivschat_logging_configuration" "test" { + region = var.region + + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ivschat/testdata/Room/basic/main_gen.tf b/internal/service/ivschat/testdata/Room/basic/main_gen.tf new file mode 100644 index 000000000000..8cc01c33bdcc --- /dev/null +++ b/internal/service/ivschat/testdata/Room/basic/main_gen.tf @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivschat_room" "test" { +} + diff --git a/internal/service/ivschat/testdata/Room/basic_v6.5.0/main_gen.tf b/internal/service/ivschat/testdata/Room/basic_v6.5.0/main_gen.tf new file mode 100644 index 000000000000..07b528f5a951 --- /dev/null +++ b/internal/service/ivschat/testdata/Room/basic_v6.5.0/main_gen.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivschat_room" "test" { +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.5.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ivschat/testdata/Room/region_override/main_gen.tf b/internal/service/ivschat/testdata/Room/region_override/main_gen.tf new file mode 100644 index 000000000000..bbc646144fa1 --- /dev/null +++ b/internal/service/ivschat/testdata/Room/region_override/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ivschat_room" "test" { + region = var.region + +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ivschat/testdata/tmpl/logging_configuration_basic.gtpl b/internal/service/ivschat/testdata/tmpl/logging_configuration_basic.gtpl new file mode 100644 index 000000000000..66a5ca753ecb --- /dev/null +++ b/internal/service/ivschat/testdata/tmpl/logging_configuration_basic.gtpl @@ -0,0 +1,15 @@ +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName + force_destroy = true +} + +resource "aws_ivschat_logging_configuration" "test" { +{{- template "region" }} + destination_configuration { + s3 { + bucket_name = aws_s3_bucket.test.id + } + } +{{- template "tags" }} +} diff --git a/internal/service/ivschat/testdata/tmpl/room_basic.gtpl b/internal/service/ivschat/testdata/tmpl/room_basic.gtpl new file mode 100644 index 000000000000..68fe84f05daa --- /dev/null +++ b/internal/service/ivschat/testdata/tmpl/room_basic.gtpl @@ -0,0 +1,4 @@ +resource "aws_ivschat_room" "test" { +{{- template "region" }} +{{- template "tags" }} +} diff --git a/internal/service/kafka/service_endpoint_resolver_gen.go b/internal/service/kafka/service_endpoint_resolver_gen.go index f630adba87d7..5c6d825a7a90 100644 --- a/internal/service/kafka/service_endpoint_resolver_gen.go +++ b/internal/service/kafka/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params kafka.EndpointPa }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up kafka endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up kafka endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/kafka/service_endpoints_gen_test.go b/internal/service/kafka/service_endpoints_gen_test.go index 55ab24355e59..5b4648485749 100644 --- a/internal/service/kafka/service_endpoints_gen_test.go +++ b/internal/service/kafka/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/kafka/service_package.go b/internal/service/kafka/service_package.go index ab0a31be43d7..13d2e9eb7b0b 100644 --- a/internal/service/kafka/service_package.go +++ b/internal/service/kafka/service_package.go @@ -10,21 +10,32 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kafka" awstypes "github.com/aws/aws-sdk-go-v2/service/kafka/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*kafka.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*kafka.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*kafka.Options){ func(o *kafka.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.TooManyRequestsException](err, "Too Many Requests") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.TooManyRequestsException](err, "Too Many Requests") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/kafka/service_package_gen.go b/internal/service/kafka/service_package_gen.go index 24c4540a73fe..7ae4151e0b82 100644 --- a/internal/service/kafka/service_package_gen.go +++ b/internal/service/kafka/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -157,7 +156,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *kafka.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/kafka/sweep.go b/internal/service/kafka/sweep.go index 6d76ab291fd2..0f0a0c1537b7 100644 --- a/internal/service/kafka/sweep.go +++ b/internal/service/kafka/sweep.go @@ -37,7 +37,7 @@ func sweepClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &kafka.ListClustersV2Input{} conn := client.KafkaClient(ctx) @@ -85,7 +85,7 @@ func sweepConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.KafkaClient(ctx) input := &kafka.ListConfigurationsInput{} diff --git a/internal/service/kafka/tags_gen.go b/internal/service/kafka/tags_gen.go index bad319bc7b63..f057404ae0cc 100644 --- a/internal/service/kafka/tags_gen.go +++ b/internal/service/kafka/tags_gen.go @@ -3,8 +3,8 @@ package kafka import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -66,7 +66,7 @@ func updateTags(ctx context.Context, conn *kafka.Client, identifier string, oldT _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -81,7 +81,7 @@ func updateTags(ctx context.Context, conn *kafka.Client, identifier string, oldT _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/kafkaconnect/service_endpoint_resolver_gen.go b/internal/service/kafkaconnect/service_endpoint_resolver_gen.go index 14ac41d0f2a6..916f5faafc75 100644 --- a/internal/service/kafkaconnect/service_endpoint_resolver_gen.go +++ b/internal/service/kafkaconnect/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params kafkaconnect.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up kafkaconnect endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up kafkaconnect endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/kafkaconnect/service_endpoints_gen_test.go b/internal/service/kafkaconnect/service_endpoints_gen_test.go index 92efbec1d4d7..cf6d789df172 100644 --- a/internal/service/kafkaconnect/service_endpoints_gen_test.go +++ b/internal/service/kafkaconnect/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index b0c043ca0a3c..7695326aec8e 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -113,7 +112,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *kafkaconnect.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/kafkaconnect/sweep.go b/internal/service/kafkaconnect/sweep.go index 8c957e6e321b..6167f1ef3ca0 100644 --- a/internal/service/kafkaconnect/sweep.go +++ b/internal/service/kafkaconnect/sweep.go @@ -41,7 +41,7 @@ func sweepConnectors(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListConnectorsInput{} @@ -82,7 +82,7 @@ func sweepCustomPlugins(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListCustomPluginsInput{} @@ -123,7 +123,7 @@ func sweepWorkerConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListWorkerConfigurationsInput{} diff --git a/internal/service/kafkaconnect/tags_gen.go b/internal/service/kafkaconnect/tags_gen.go index 01f9ad041a17..a52020efd87b 100644 --- a/internal/service/kafkaconnect/tags_gen.go +++ b/internal/service/kafkaconnect/tags_gen.go @@ -3,8 +3,8 @@ package kafkaconnect import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *kafkaconnect.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).KafkaConnectClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *kafkaconnect.Client, identifier strin _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *kafkaconnect.Client, identifier strin _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/kendra/data_source.go b/internal/service/kendra/data_source.go index fa0b8aa4f5db..c72b37f79f17 100644 --- a/internal/service/kendra/data_source.go +++ b/internal/service/kendra/data_source.go @@ -29,6 +29,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -648,7 +649,7 @@ func resourceDataSourceCreate(ctx context.Context, d *schema.ResourceData, meta } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDataSource(ctx, input) }, func(err error) (bool, error) { @@ -792,7 +793,7 @@ func resourceDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta log.Printf("[DEBUG] Updating Kendra Data Source (%s): %#v", d.Id(), input) _, err = tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateDataSource(ctx, input) }, func(err error) (bool, error) { @@ -975,7 +976,7 @@ func expandTemplateConfiguration(tfList []any) (*types.TemplateConfiguration, er var body any err := json.Unmarshal([]byte(tfMap["template"].(string)), &body) if err != nil { - return nil, fmt.Errorf("decoding JSON: %s", err) + return nil, fmt.Errorf("decoding JSON: %w", err) } return &types.TemplateConfiguration{ @@ -1531,12 +1532,12 @@ func flattenTemplateConfiguration(apiObject *types.TemplateConfiguration) ([]any tfMap := map[string]any{} if v := apiObject.Template; v != nil { - bytes, err := apiObject.Template.MarshalSmithyDocument() + v, err := tfsmithy.DocumentToJSONString(v) if err != nil { return nil, err } - tfMap["template"] = string(bytes[:]) + tfMap["template"] = v } return []any{tfMap}, nil diff --git a/internal/service/kendra/faq.go b/internal/service/kendra/faq.go index 508b0d059d1a..d24e200ca6ff 100644 --- a/internal/service/kendra/faq.go +++ b/internal/service/kendra/faq.go @@ -177,7 +177,7 @@ func resourceFaqCreate(ctx context.Context, d *schema.ResourceData, meta any) di } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateFaq(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/kendra/index.go b/internal/service/kendra/index.go index 89b1b34b9e72..2019438356b7 100644 --- a/internal/service/kendra/index.go +++ b/internal/service/kendra/index.go @@ -417,7 +417,7 @@ func resourceIndexCreate(ctx context.Context, d *schema.ResourceData, meta any) } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateIndex(ctx, input) }, func(err error) (bool, error) { @@ -567,7 +567,7 @@ func resourceIndexUpdate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateIndex(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/kendra/query_suggestions_block_list.go b/internal/service/kendra/query_suggestions_block_list.go index 8d9dab71ec89..2b2604b5d640 100644 --- a/internal/service/kendra/query_suggestions_block_list.go +++ b/internal/service/kendra/query_suggestions_block_list.go @@ -120,7 +120,7 @@ func resourceQuerySuggestionsBlockListCreate(ctx context.Context, d *schema.Reso } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateQuerySuggestionsBlockList(ctx, in) }, func(err error) (bool, error) { @@ -235,7 +235,7 @@ func resourceQuerySuggestionsBlockListUpdate(ctx context.Context, d *schema.Reso log.Printf("[DEBUG] Updating Kendra QuerySuggestionsBlockList (%s): %#v", d.Id(), input) _, err = tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateQuerySuggestionsBlockList(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/kendra/service_endpoint_resolver_gen.go b/internal/service/kendra/service_endpoint_resolver_gen.go index 2afb9523ce70..8455b39b66be 100644 --- a/internal/service/kendra/service_endpoint_resolver_gen.go +++ b/internal/service/kendra/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params kendra.EndpointP }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up kendra endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up kendra endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/kendra/service_endpoints_gen_test.go b/internal/service/kendra/service_endpoints_gen_test.go index b2a707e5c93b..a3fe0267dd76 100644 --- a/internal/service/kendra/service_endpoints_gen_test.go +++ b/internal/service/kendra/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/kendra/service_package_gen.go b/internal/service/kendra/service_package_gen.go index 0dcae1db5973..dea8e255d886 100644 --- a/internal/service/kendra/service_package_gen.go +++ b/internal/service/kendra/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kendra" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -140,7 +139,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *kendra.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/kendra/tags_gen.go b/internal/service/kendra/tags_gen.go index 94a30c6e09ed..78d4a0be275d 100644 --- a/internal/service/kendra/tags_gen.go +++ b/internal/service/kendra/tags_gen.go @@ -3,8 +3,8 @@ package kendra import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kendra" awstypes "github.com/aws/aws-sdk-go-v2/service/kendra/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *kendra.Client, identifier string, optFn output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).KendraClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *kendra.Client, identifier string, old _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *kendra.Client, identifier string, old _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/kendra/thesaurus.go b/internal/service/kendra/thesaurus.go index de18fdcfbaa6..8399a6f60a9c 100644 --- a/internal/service/kendra/thesaurus.go +++ b/internal/service/kendra/thesaurus.go @@ -120,7 +120,7 @@ func resourceThesaurusCreate(ctx context.Context, d *schema.ResourceData, meta a } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateThesaurus(ctx, input) }, func(err error) (bool, error) { @@ -236,7 +236,7 @@ func resourceThesaurusUpdate(ctx context.Context, d *schema.ResourceData, meta a log.Printf("[DEBUG] Updating Kendra Thesaurus (%s): %#v", d.Id(), input) _, err = tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateThesaurus(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/keyspaces/keyspace.go b/internal/service/keyspaces/keyspace.go index b766cd8f8301..a2256263ebaa 100644 --- a/internal/service/keyspaces/keyspace.go +++ b/internal/service/keyspaces/keyspace.go @@ -122,7 +122,7 @@ func resourceKeyspaceCreate(ctx context.Context, d *schema.ResourceData, meta an d.SetId(name) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findKeyspaceByName(ctx, conn, d.Id()) }) @@ -169,8 +169,8 @@ func resourceKeyspaceDelete(ctx context.Context, d *schema.ResourceData, meta an conn := meta.(*conns.AWSClient).KeyspacesClient(ctx) log.Printf("[DEBUG] Deleting Keyspaces Keyspace: (%s)", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), + func(ctx context.Context) (any, error) { return conn.DeleteKeyspace(ctx, &keyspaces.DeleteKeyspaceInput{ KeyspaceName: aws.String(d.Id()), }) @@ -185,7 +185,7 @@ func resourceKeyspaceDelete(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "deleting Keyspaces Keyspace (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return findKeyspaceByName(ctx, conn, d.Id()) }) diff --git a/internal/service/keyspaces/service_endpoint_resolver_gen.go b/internal/service/keyspaces/service_endpoint_resolver_gen.go index f577c6391862..bdf6ee7a3666 100644 --- a/internal/service/keyspaces/service_endpoint_resolver_gen.go +++ b/internal/service/keyspaces/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params keyspaces.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up keyspaces endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up keyspaces endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/keyspaces/service_endpoints_gen_test.go b/internal/service/keyspaces/service_endpoints_gen_test.go index c39ffd975b9e..10c9b12d2d23 100644 --- a/internal/service/keyspaces/service_endpoints_gen_test.go +++ b/internal/service/keyspaces/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/keyspaces/service_package_gen.go b/internal/service/keyspaces/service_package_gen.go index 343313198992..0106f1f16797 100644 --- a/internal/service/keyspaces/service_package_gen.go +++ b/internal/service/keyspaces/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/keyspaces" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -76,7 +75,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *keyspaces.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/keyspaces/sweep.go b/internal/service/keyspaces/sweep.go index ea122fc22c9d..77b2b7f3157c 100644 --- a/internal/service/keyspaces/sweep.go +++ b/internal/service/keyspaces/sweep.go @@ -26,7 +26,7 @@ func sweepKeyspaces(region string) error { // nosemgrep:ci.keyspaces-in-func-nam ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.KeyspacesClient(ctx) input := &keyspaces.ListKeyspacesInput{} diff --git a/internal/service/keyspaces/tags_gen.go b/internal/service/keyspaces/tags_gen.go index 1783a3259cba..8752dd8adb9d 100644 --- a/internal/service/keyspaces/tags_gen.go +++ b/internal/service/keyspaces/tags_gen.go @@ -3,8 +3,8 @@ package keyspaces import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/keyspaces" awstypes "github.com/aws/aws-sdk-go-v2/service/keyspaces/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *keyspaces.Client, identifier string, op page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).KeyspacesClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *keyspaces.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *keyspaces.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/kinesis/resource_policy.go b/internal/service/kinesis/resource_policy.go index 7055199d360b..c58ce58b05e0 100644 --- a/internal/service/kinesis/resource_policy.go +++ b/internal/service/kinesis/resource_policy.go @@ -30,6 +30,7 @@ import ( // @Testing(useAlternateAccount=true) // We need to ignore `policy` because the JSON body is not normalized // @Testing(importIgnore="policy") +// @Testing(preIdentityVersion="v5.100.0") func newResourcePolicyResource(context.Context) (resource.ResourceWithConfigure, error) { r := &resourcePolicyResource{} diff --git a/internal/service/kinesis/resource_policy_identity_gen_test.go b/internal/service/kinesis/resource_policy_identity_gen_test.go index df469d4552be..8cf32bb29687 100644 --- a/internal/service/kinesis/resource_policy_identity_gen_test.go +++ b/internal/service/kinesis/resource_policy_identity_gen_test.go @@ -16,16 +16,18 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKinesisResourcePolicy_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_kinesis_resource_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) providers := make(map[string]*schema.Provider) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -49,6 +51,9 @@ func TestAccKinesisResourcePolicy_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -121,7 +126,7 @@ func TestAccKinesisResourcePolicy_Identity_RegionOverride(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) providers := make(map[string]*schema.Provider) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -143,6 +148,9 @@ func TestAccKinesisResourcePolicy_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrResourceARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), }, }, @@ -251,3 +259,138 @@ func TestAccKinesisResourcePolicy_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccKinesisResourcePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_kinesis_resource_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + providers := make(map[string]*schema.Provider) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.KinesisServiceID), + CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourcePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourcePolicyExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrResourceARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), + }, + }, + }, + }) +} + +func TestAccKinesisResourcePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_kinesis_resource_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + providers := make(map[string]*schema.Provider) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.KinesisServiceID), + CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckResourcePolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + ConfigDirectory: config.StaticDirectory("testdata/ResourcePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/kinesis/resource_policy_test.go b/internal/service/kinesis/resource_policy_test.go index 1354fca5353d..290363e4699a 100644 --- a/internal/service/kinesis/resource_policy_test.go +++ b/internal/service/kinesis/resource_policy_test.go @@ -8,16 +8,10 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkinesis "github.com/hashicorp/terraform-provider-aws/internal/service/kinesis" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -75,76 +69,6 @@ func TestAccKinesisResourcePolicy_disappears(t *testing.T) { }) } -func TestAccKinesisResourcePolicy_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_kinesis_resource_policy.test" - providers := make(map[string]*schema.Provider) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckAlternateAccount(t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.KinesisServiceID), - CheckDestroy: testAccCheckResourcePolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), - Config: testAccResourcePolicyConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), - Config: testAccResourcePolicyConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), - Config: testAccResourcePolicyConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrResourceARN)), - }, - }, - }, - }) -} - func testAccCheckResourcePolicyExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/kinesis/service_endpoint_resolver_gen.go b/internal/service/kinesis/service_endpoint_resolver_gen.go index 3edcc3caae73..167fa695398d 100644 --- a/internal/service/kinesis/service_endpoint_resolver_gen.go +++ b/internal/service/kinesis/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params kinesis.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up kinesis endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up kinesis endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/kinesis/service_endpoints_gen_test.go b/internal/service/kinesis/service_endpoints_gen_test.go index 734406afa2ec..a37dde067565 100644 --- a/internal/service/kinesis/service_endpoints_gen_test.go +++ b/internal/service/kinesis/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/kinesis/service_package.go b/internal/service/kinesis/service_package.go index 9c76f4c33f64..d2654399ff68 100644 --- a/internal/service/kinesis/service_package.go +++ b/internal/service/kinesis/service_package.go @@ -10,22 +10,33 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kinesis" "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*kinesis.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*kinesis.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*kinesis.Options){ func(o *kinesis.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*types.LimitExceededException](err, "simultaneously be in CREATING or DELETING") || - errs.IsAErrorMessageContains[*types.LimitExceededException](err, "Rate exceeded for stream") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*types.LimitExceededException](err, "simultaneously be in CREATING or DELETING") || + errs.IsAErrorMessageContains[*types.LimitExceededException](err, "Rate exceeded for stream") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/kinesis/service_package_gen.go b/internal/service/kinesis/service_package_gen.go index db8ad0439144..c24b924949ff 100644 --- a/internal/service/kinesis/service_package_gen.go +++ b/internal/service/kinesis/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kinesis" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -110,7 +109,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *kinesis.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/kinesis/sweep.go b/internal/service/kinesis/sweep.go index b061ced1eccf..ecba7d3a6fb0 100644 --- a/internal/service/kinesis/sweep.go +++ b/internal/service/kinesis/sweep.go @@ -26,7 +26,7 @@ func sweepStreams(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.KinesisClient(ctx) input := &kinesis.ListStreamsInput{} diff --git a/internal/service/kinesis/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf b/internal/service/kinesis/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..1044cf1de872 --- /dev/null +++ b/internal/service/kinesis/testdata/ResourcePolicy/basic_v5.100.0/main_gen.tf @@ -0,0 +1,80 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kinesis_resource_policy" "test" { + resource_arn = aws_kinesis_stream.test.arn + + policy = < 0 || len(updatedTags) > 0 { if err := waitTagsPropagated(ctx, conn, identifier, newTags, optFns...); err != nil { - return fmt.Errorf("waiting for resource (%s) tag propagation: %w", identifier, err) + return smarterr.NewError(err) } } @@ -185,7 +185,7 @@ func waitTagsPropagated(ctx context.Context, conn *kms.Client, id string, tags t } if err != nil { - return false, err + return false, smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { diff --git a/internal/service/kms/testdata/Alias/basic/main_gen.tf b/internal/service/kms/testdata/Alias/basic/main_gen.tf new file mode 100644 index 000000000000..6ed27ee53667 --- /dev/null +++ b/internal/service/kms/testdata/Alias/basic/main_gen.tf @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kms_alias" "test" { + name = "alias/${var.rName}" + target_key_id = aws_kms_key.test.id +} + +resource "aws_kms_key" "test" { + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/kms/testdata/Alias/basic_v6.10.0/main_gen.tf b/internal/service/kms/testdata/Alias/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..99a0ca77e26c --- /dev/null +++ b/internal/service/kms/testdata/Alias/basic_v6.10.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kms_alias" "test" { + name = "alias/${var.rName}" + target_key_id = aws_kms_key.test.id +} + +resource "aws_kms_key" "test" { + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/kms/testdata/Alias/region_override/main_gen.tf b/internal/service/kms/testdata/Alias/region_override/main_gen.tf new file mode 100644 index 000000000000..7137985ef276 --- /dev/null +++ b/internal/service/kms/testdata/Alias/region_override/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kms_alias" "test" { + region = var.region + + name = "alias/${var.rName}" + target_key_id = aws_kms_key.test.id +} + +resource "aws_kms_key" "test" { + region = var.region + + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/kms/testdata/Key/basic/main_gen.tf b/internal/service/kms/testdata/Key/basic/main_gen.tf new file mode 100644 index 000000000000..571bc4c67fbd --- /dev/null +++ b/internal/service/kms/testdata/Key/basic/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kms_key" "test" { + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/kms/testdata/Key/basic_v6.10.0/main_gen.tf b/internal/service/kms/testdata/Key/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..e25e4c3647de --- /dev/null +++ b/internal/service/kms/testdata/Key/basic_v6.10.0/main_gen.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kms_key" "test" { + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/kms/testdata/Key/region_override/main_gen.tf b/internal/service/kms/testdata/Key/region_override/main_gen.tf new file mode 100644 index 000000000000..b4602252d79e --- /dev/null +++ b/internal/service/kms/testdata/Key/region_override/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_kms_key" "test" { + region = var.region + + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/kms/testdata/tmpl/alias_basic.gtpl b/internal/service/kms/testdata/tmpl/alias_basic.gtpl new file mode 100644 index 000000000000..268698df355b --- /dev/null +++ b/internal/service/kms/testdata/tmpl/alias_basic.gtpl @@ -0,0 +1,12 @@ +resource "aws_kms_alias" "test" { +{{- template "region" }} + name = "alias/${var.rName}" + target_key_id = aws_kms_key.test.id +} + +resource "aws_kms_key" "test" { +{{- template "region" }} + description = var.rName + deletion_window_in_days = 7 + enable_key_rotation = true +} diff --git a/internal/service/kms/testdata/tmpl/key_tags.gtpl b/internal/service/kms/testdata/tmpl/key_tags.gtpl index 8c5db0c2f9ca..414bc75a15cc 100644 --- a/internal/service/kms/testdata/tmpl/key_tags.gtpl +++ b/internal/service/kms/testdata/tmpl/key_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_kms_key" "test" { +{{- template "region" }} description = var.rName deletion_window_in_days = 7 enable_key_rotation = true diff --git a/internal/service/kms/validate.go b/internal/service/kms/validate.go index 059a584b3711..a02ed4935af5 100644 --- a/internal/service/kms/validate.go +++ b/internal/service/kms/validate.go @@ -42,7 +42,7 @@ func validNameForDataSource(v any, k string) (ws []string, es []error) { if !aliasNameRegex.MatchString(value) { es = append(es, fmt.Errorf( - "%q must begin with 'alias/' and be comprised of only [0-9A-Za-z_/-]", k)) + "%q must begin with 'alias/' and only contain [0-9A-Za-z_/-]", k)) } return } @@ -56,7 +56,7 @@ func validNameForResource(v any, k string) (ws []string, es []error) { if !aliasNameRegex.MatchString(value) { es = append(es, fmt.Errorf( - "%q must begin with 'alias/' and be comprised of only [0-9A-Za-z_/-]", k)) + "%q must begin with 'alias/' and only contain [0-9A-Za-z_/-]", k)) } return } @@ -87,7 +87,7 @@ func validateKeyARN(v any, k string) (ws []string, errors []error) { } if _, err := arn.Parse(value); err != nil { - errors = append(errors, fmt.Errorf("%q (%s) is an invalid ARN: %s", k, value, err)) + errors = append(errors, fmt.Errorf("%q (%s) is an invalid ARN: %w", k, value, err)) return } @@ -113,7 +113,7 @@ func validateKeyAliasARN(v any, k string) (ws []string, errors []error) { } if _, err := arn.Parse(value); err != nil { - errors = append(errors, fmt.Errorf("%q (%s) is an invalid ARN: %s", k, value, err)) + errors = append(errors, fmt.Errorf("%q (%s) is an invalid ARN: %w", k, value, err)) return } diff --git a/internal/service/kms/wait.go b/internal/service/kms/wait.go index 2df46fae0bdd..e78439ada58f 100644 --- a/internal/service/kms/wait.go +++ b/internal/service/kms/wait.go @@ -26,7 +26,7 @@ const ( // waitIAMPropagation retries the specified function if the returned error indicates an IAM eventual consistency issue. func waitIAMPropagation[T any](ctx context.Context, timeout time.Duration, f func() (T, error)) (T, error) { - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.MalformedPolicyDocumentException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.MalformedPolicyDocumentException](ctx, timeout, func(ctx context.Context) (any, error) { return f() }) diff --git a/internal/service/lakeformation/data_cells_filter.go b/internal/service/lakeformation/data_cells_filter.go index 36a71709047f..378f444cada8 100644 --- a/internal/service/lakeformation/data_cells_filter.go +++ b/internal/service/lakeformation/data_cells_filter.go @@ -210,7 +210,7 @@ func (r *dataCellsFilterResource) Create(ctx context.Context, req resource.Creat state.ID = fwflex.StringValueToFramework(ctx, id) createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - outputRaws, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func(ctx context.Context) (*awstypes.DataCellsFilter, error) { return findDataCellsFilterByID(ctx, conn, state.ID.ValueString()) }) @@ -222,7 +222,6 @@ func (r *dataCellsFilterResource) Create(ctx context.Context, req resource.Creat return } - output := outputRaws.(*awstypes.DataCellsFilter) td := tableData{} resp.Diagnostics.Append(fwflex.Flatten(ctx, output, &td)...) diff --git a/internal/service/lakeformation/data_lake_settings.go b/internal/service/lakeformation/data_lake_settings.go index fe0fd2df884f..022687bb3ff8 100644 --- a/internal/service/lakeformation/data_lake_settings.go +++ b/internal/service/lakeformation/data_lake_settings.go @@ -14,7 +14,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/lakeformation" awstypes "github.com/aws/aws-sdk-go-v2/service/lakeformation/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -232,27 +231,23 @@ func resourceDataLakeSettingsCreate(ctx context.Context, d *schema.ResourceData, input.DataLakeSettings = settings var output *lakeformation.PutDataLakeSettingsOutput - err := retry.RetryContext(ctx, IAMPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, IAMPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.PutDataLakeSettings(ctx, input) if err != nil { if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Invalid principal") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ConcurrentModificationException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(fmt.Errorf("creating Lake Formation data lake settings: %w", err)) + return tfresource.NonRetryableError(fmt.Errorf("creating Lake Formation data lake settings: %w", err)) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.PutDataLakeSettings(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "creating Lake Formation data lake settings: %s", err) } diff --git a/internal/service/lakeformation/exports_test.go b/internal/service/lakeformation/exports_test.go index dad67c3c4080..e01b29e25bf4 100644 --- a/internal/service/lakeformation/exports_test.go +++ b/internal/service/lakeformation/exports_test.go @@ -6,11 +6,12 @@ package lakeformation // exports used for testing only. var ( ResourceDataCellsFilter = newDataCellsFilterResource + ResourceLFTagExpression = newLFTagExpressionResource ResourceResourceLFTag = newResourceLFTagResource ResourceOptIn = newOptInResource FindDataCellsFilterByID = findDataCellsFilterByID - FindResourceLFTagByID = findResourceLFTagByID + FindLFTagExpression = findLFTagExpression LFTagParseResourceID = lfTagParseResourceID FindOptInByID = findOptInByID diff --git a/internal/service/lakeformation/lakeformation_test.go b/internal/service/lakeformation/lakeformation_test.go index 819c9efbe954..0dcc322a5d4b 100644 --- a/internal/service/lakeformation/lakeformation_test.go +++ b/internal/service/lakeformation/lakeformation_test.go @@ -84,6 +84,11 @@ func TestAccLakeFormation_serial(t *testing.T) { "values": testAccLFTag_Values, "valuesOverFifty": testAccLFTag_Values_overFifty, }, + "LFTagExpression": { + acctest.CtBasic: testAccLFTagExpression_basic, + acctest.CtDisappears: testAccLFTagExpression_disappears, + "update": testAccLFTagExpression_update, + }, "ResourceLFTag": { acctest.CtBasic: testAccResourceLFTag_basic, acctest.CtDisappears: testAccResourceLFTag_disappears, diff --git a/internal/service/lakeformation/lf_tag_expression.go b/internal/service/lakeformation/lf_tag_expression.go new file mode 100644 index 000000000000..1542edd4145d --- /dev/null +++ b/internal/service/lakeformation/lf_tag_expression.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lakeformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/lakeformation" + awstypes "github.com/aws/aws-sdk-go-v2/service/lakeformation/types" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_lakeformation_lf_tag_expression", name="LF Tag Expression") +func newLFTagExpressionResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &lfTagExpressionResource{}, nil +} + +const ( + ResNameLFTagExpression = "LF Tag Expression" +) + +type lfTagExpressionResource struct { + framework.ResourceWithModel[lfTagExpressionResourceModel] +} + +func (r *lfTagExpressionResource) Schema(ctx context.Context, _ resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Description: "Manages an AWS Lake Formation Tag Expression.", + Attributes: map[string]schema.Attribute{ + names.AttrCatalogID: schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "The ID of the Data Catalog.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The name of the LF-Tag Expression.", + }, + names.AttrDescription: schema.StringAttribute{ + Optional: true, + Description: "A description of the LF-Tag Expression.", + }, + }, + Blocks: map[string]schema.Block{ + names.AttrExpression: schema.SetNestedBlock{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[expressionLfTag](ctx), + Validators: []validator.Set{ + setvalidator.IsRequired(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "tag_key": schema.StringAttribute{ + Required: true, + }, + "tag_values": schema.SetAttribute{ + ElementType: types.StringType, + Required: true, + }, + }, + }, + }, + }, + } +} + +func (r *lfTagExpressionResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + conn := r.Meta().LakeFormationClient(ctx) + + var data lfTagExpressionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if data.CatalogId.IsNull() || data.CatalogId.IsUnknown() { + data.CatalogId = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } + + input := lakeformation.CreateLFTagExpressionInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.CreateLFTagExpression(ctx, &input) + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.LakeFormation, create.ErrActionCreating, ResNameLFTagExpression, data.Name.String(), err), + err.Error(), + ) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *lfTagExpressionResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + conn := r.Meta().LakeFormationClient(ctx) + + var data lfTagExpressionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + output, err := findLFTagExpression(ctx, conn, data.Name.ValueString(), data.CatalogId.ValueString()) + + if retry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.LakeFormation, create.ErrActionReading, ResNameLFTagExpression, data.Name.String(), err), + err.Error(), + ) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *lfTagExpressionResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + conn := r.Meta().LakeFormationClient(ctx) + + var plan, state lfTagExpressionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &plan)...) + response.Diagnostics.Append(request.State.Get(ctx, &state)...) + if response.Diagnostics.HasError() { + return + } + + diff, d := fwflex.Diff(ctx, plan, state) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { + return + } + + if diff.HasChanges() { + var input lakeformation.UpdateLFTagExpressionInput + response.Diagnostics.Append(fwflex.Expand(ctx, plan, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateLFTagExpression(ctx, &input) + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.LakeFormation, create.ErrActionUpdating, ResNameLFTagExpression, plan.Name.String(), err), + err.Error(), + ) + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &plan)...) +} + +func (r *lfTagExpressionResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + conn := r.Meta().LakeFormationClient(ctx) + + var state lfTagExpressionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &state)...) + if response.Diagnostics.HasError() { + return + } + + input := lakeformation.DeleteLFTagExpressionInput{ + CatalogId: state.CatalogId.ValueStringPointer(), + Name: state.Name.ValueStringPointer(), + } + + _, err := conn.DeleteLFTagExpression(ctx, &input) + + if errs.IsA[*awstypes.EntityNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.LakeFormation, create.ErrActionDeleting, ResNameLFTagExpression, state.Name.String(), err), + err.Error(), + ) + return + } +} + +const ( + lfTagExpressionIDPartCount = 2 +) + +func (r *lfTagExpressionResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + parts, err := intflex.ExpandResourceId(request.ID, lfTagExpressionIDPartCount, false) + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.LakeFormation, create.ErrActionImporting, ResNameLFTagExpression, request.ID, err), + err.Error(), + ) + return + } + + name := parts[0] + catalogId := parts[1] + // Set the parsed values in state + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrName), name)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrCatalogID), catalogId)...) + if response.Diagnostics.HasError() { + return + } +} + +type lfTagExpressionResourceModel struct { + framework.WithRegionModel + CatalogId types.String `tfsdk:"catalog_id"` + Description types.String `tfsdk:"description"` + Name types.String `tfsdk:"name"` + Expression fwtypes.SetNestedObjectValueOf[expressionLfTag] `tfsdk:"expression"` +} + +type expressionLfTag struct { + TagKey types.String `tfsdk:"tag_key"` + TagValues fwtypes.SetOfString `tfsdk:"tag_values"` +} + +func findLFTagExpression(ctx context.Context, conn *lakeformation.Client, name, catalogId string) (*lakeformation.GetLFTagExpressionOutput, error) { + input := lakeformation.GetLFTagExpressionInput{ + CatalogId: aws.String(catalogId), + Name: aws.String(name), + } + + output, err := conn.GetLFTagExpression(ctx, &input) + + if errs.IsA[*awstypes.EntityNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Expression == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/lakeformation/lf_tag_expression_test.go b/internal/service/lakeformation/lf_tag_expression_test.go new file mode 100644 index 000000000000..518ff23a2694 --- /dev/null +++ b/internal/service/lakeformation/lf_tag_expression_test.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lakeformation_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/lakeformation" + awstypes "github.com/aws/aws-sdk-go-v2/service/lakeformation/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tflakeformation "github.com/hashicorp/terraform-provider-aws/internal/service/lakeformation" + "github.com/hashicorp/terraform-provider-aws/names" +) + +const ( + ResNameLFTagExpression = "LF Tag Expression" +) + +func testAccLFTagExpression_basic(t *testing.T) { + ctx := acctest.Context(t) + + var lftagexpression lakeformation.GetLFTagExpressionOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lakeformation_lf_tag_expression.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LakeFormation) + testAccLFTagExpressionPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LakeFormationServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLFTagExpressionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLFTagExpressionConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLFTagExpressionExists(ctx, resourceName, &lftagexpression), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "test description"), + resource.TestCheckResourceAttr(resourceName, "expression.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", names.AttrName, names.AttrCatalogID), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + }, + }, + }) +} + +func testAccLFTagExpression_update(t *testing.T) { + ctx := acctest.Context(t) + + var lftagexpression lakeformation.GetLFTagExpressionOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lakeformation_lf_tag_expression.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LakeFormation) + testAccLFTagExpressionPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LakeFormationServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLFTagExpressionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLFTagExpressionConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLFTagExpressionExists(ctx, resourceName, &lftagexpression), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "test description"), + resource.TestCheckResourceAttr(resourceName, "expression.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", names.AttrName, names.AttrCatalogID), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrName, + }, + { + Config: testAccLFTagExpressionConfig_update(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLFTagExpressionExists(ctx, resourceName, &lftagexpression), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCatalogID), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "test description two"), + resource.TestCheckResourceAttr(resourceName, "expression.#", "2"), + ), + }, + }, + }) +} + +func testAccLFTagExpression_disappears(t *testing.T) { + ctx := acctest.Context(t) + + var lftagexpression lakeformation.GetLFTagExpressionOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lakeformation_lf_tag_expression.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LakeFormation) + testAccLFTagExpressionPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LakeFormationServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLFTagExpressionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLFTagExpressionConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLFTagExpressionExists(ctx, resourceName, &lftagexpression), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tflakeformation.ResourceLFTagExpression, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} +func testAccCheckLFTagExpressionDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LakeFormationClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lakeformation_lf_tag_expression" { + continue + } + + _, err := tflakeformation.FindLFTagExpression(ctx, conn, rs.Primary.Attributes[names.AttrName], rs.Primary.Attributes[names.AttrCatalogID]) + + if retry.NotFound(err) { + continue + } + + if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "Insufficient Lake Formation permission(s)") { + continue + } + + if err != nil { + return create.Error(names.LakeFormation, create.ErrActionCheckingDestroyed, ResNameLFTagExpression, rs.Primary.ID, err) + } + + return create.Error(names.LakeFormation, create.ErrActionCheckingDestroyed, ResNameLFTagExpression, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckLFTagExpressionExists(ctx context.Context, name string, lftagexpression *lakeformation.GetLFTagExpressionOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.LakeFormation, create.ErrActionCheckingExistence, ResNameLFTagExpression, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.LakeFormation, create.ErrActionCheckingExistence, ResNameLFTagExpression, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).LakeFormationClient(ctx) + resp, err := tflakeformation.FindLFTagExpression(ctx, conn, rs.Primary.Attributes[names.AttrName], rs.Primary.Attributes[names.AttrCatalogID]) + + if err != nil { + return create.Error(names.LakeFormation, create.ErrActionCheckingExistence, ResNameLFTagExpression, rs.Primary.ID, err) + } + + *lftagexpression = *resp + + return nil + } +} + +func testAccLFTagExpressionPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).LakeFormationClient(ctx) + + input := lakeformation.ListLFTagExpressionsInput{} + _, err := conn.ListLFTagExpressions(ctx, &input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +const testAccLFTagExpression_baseConfig = ` +data "aws_caller_identity" "current" {} + +data "aws_iam_session_context" "current" { + arn = data.aws_caller_identity.current.arn +} + +resource "aws_lakeformation_data_lake_settings" "test" { + admins = [data.aws_iam_session_context.current.issuer_arn] +} + +resource "aws_lakeformation_lf_tag" "test" { + key = "key" + values = ["value"] + + depends_on = [aws_lakeformation_data_lake_settings.test] +} +` + +func testAccLFTagExpressionConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccLFTagExpression_baseConfig, + fmt.Sprintf(` +resource "aws_lakeformation_lf_tag_expression" "test" { + name = %[1]q + description = "test description" + + expression { + tag_key = aws_lakeformation_lf_tag.test.key + tag_values = aws_lakeformation_lf_tag.test.values + } + + depends_on = [aws_lakeformation_data_lake_settings.test] +} +`, rName)) +} + +func testAccLFTagExpressionConfig_update(rName string) string { + return acctest.ConfigCompose(testAccLFTagExpression_baseConfig, + fmt.Sprintf(` +resource "aws_lakeformation_lf_tag" "test2" { + key = "key2" + values = ["value2"] + + depends_on = [aws_lakeformation_data_lake_settings.test] +} + +resource "aws_lakeformation_lf_tag_expression" "test" { + name = %[1]q + description = "test description two" + + expression { + tag_key = aws_lakeformation_lf_tag.test.key + tag_values = aws_lakeformation_lf_tag.test.values + } + + expression { + tag_key = aws_lakeformation_lf_tag.test2.key + tag_values = aws_lakeformation_lf_tag.test2.values + } + + depends_on = [aws_lakeformation_data_lake_settings.test] +} +`, rName)) +} diff --git a/internal/service/lakeformation/opt_in.go b/internal/service/lakeformation/opt_in.go index c3d9633dd9fa..24171c749e9d 100644 --- a/internal/service/lakeformation/opt_in.go +++ b/internal/service/lakeformation/opt_in.go @@ -383,22 +383,18 @@ func (r *optInResource) Create(ctx context.Context, req resource.CreateRequest, } var output *lakeformation.CreateLakeFormationOptInOutput - err := retry.RetryContext(ctx, 2*IAMPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, 2*IAMPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.CreateLakeFormationOptIn(ctx, &in) if err != nil { if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "Insufficient Lake Formation permission(s) on Catalog") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.CreateLakeFormationOptIn(ctx, &in) - } - if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.LakeFormation, create.ErrActionCreating, ResNameOptIn, principal.DataLakePrincipalIdentifier.ValueString(), err), diff --git a/internal/service/lakeformation/permissions.go b/internal/service/lakeformation/permissions.go index b25ad9208db2..bfcedb699209 100644 --- a/internal/service/lakeformation/permissions.go +++ b/internal/service/lakeformation/permissions.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/lakeformation" awstypes "github.com/aws/aws-sdk-go-v2/service/lakeformation/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -470,35 +469,31 @@ func resourcePermissionsCreate(ctx context.Context, d *schema.ResourceData, meta } var output *lakeformation.GrantPermissionsOutput - err := retry.RetryContext(ctx, IAMPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, IAMPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.GrantPermissions(ctx, input) if err != nil { if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Invalid principal") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Grantee has no permissions") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "register the S3 path") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ConcurrentModificationException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized to access requested permissions") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(fmt.Errorf("creating Lake Formation Permissions: %w", err)) + return tfresource.NonRetryableError(fmt.Errorf("creating Lake Formation Permissions: %w", err)) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.GrantPermissions(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "creating Lake Formation Permissions (input: %v): %s", input, err) } @@ -791,29 +786,25 @@ func resourcePermissionsDelete(ctx context.Context, d *schema.ResourceData, meta return diags } - err := retry.RetryContext(ctx, permissionsDeleteRetryTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, permissionsDeleteRetryTimeout, func(ctx context.Context) *tfresource.RetryError { var err error _, err = conn.RevokePermissions(ctx, input) if err != nil { if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "register the S3 path") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsA[*awstypes.ConcurrentModificationException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized to access requested permissions") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(fmt.Errorf("unable to revoke Lake Formation Permissions: %w", err)) + return tfresource.NonRetryableError(fmt.Errorf("unable to revoke Lake Formation Permissions: %w", err)) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.RevokePermissions(ctx, input) - } - if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "No permissions revoked. Grantee") { return diags } @@ -838,21 +829,17 @@ func resourcePermissionsDelete(ctx context.Context, d *schema.ResourceData, meta // You can't just wait until permissions = 0 because there could be many other unrelated permissions // on the resource and filtering is non-trivial for table with columns. - err = retry.RetryContext(ctx, permissionsDeleteRetryTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, permissionsDeleteRetryTimeout, func(ctx context.Context) *tfresource.RetryError { var err error _, err = conn.RevokePermissions(ctx, input) if !errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "No permissions revoked. Grantee has no") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.RevokePermissions(ctx, input) - } - if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "No permissions revoked. Grantee") { return diags } diff --git a/internal/service/lakeformation/permissions_test.go b/internal/service/lakeformation/permissions_test.go index f128e286f24f..b270ef8d0f62 100644 --- a/internal/service/lakeformation/permissions_test.go +++ b/internal/service/lakeformation/permissions_test.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/lakeformation" awstypes "github.com/aws/aws-sdk-go-v2/service/lakeformation/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -1137,7 +1136,7 @@ func permissionCountForResource(ctx context.Context, conn *lakeformation.Client, log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var allPermissions []awstypes.PrincipalResourcePermissions - err := retry.RetryContext(ctx, tflakeformation.IAMPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, tflakeformation.IAMPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { pages := lakeformation.NewListPermissionsPaginator(conn, input) for pages.HasMorePages() { @@ -1152,11 +1151,11 @@ func permissionCountForResource(ctx context.Context, conn *lakeformation.Client, } if errs.IsAErrorMessageContains[*awstypes.InvalidInputException](err, "Invalid principal") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(fmt.Errorf("acceptance test: error listing Lake Formation Permissions getting permission count: %w", err)) + return tfresource.NonRetryableError(fmt.Errorf("acceptance test: error listing Lake Formation Permissions getting permission count: %w", err)) } for _, permission := range page.PrincipalResourcePermissions { @@ -1171,10 +1170,6 @@ func permissionCountForResource(ctx context.Context, conn *lakeformation.Client, return nil }) - if tfresource.TimedOut(err) { - _, err = conn.ListPermissions(ctx, input) - } - if errs.IsA[*awstypes.EntityNotFoundException](err) { return 0, nil } diff --git a/internal/service/lakeformation/resource.go b/internal/service/lakeformation/resource.go index 57a8b4a9ddb1..a387dd153d26 100644 --- a/internal/service/lakeformation/resource.go +++ b/internal/service/lakeformation/resource.go @@ -64,6 +64,12 @@ func ResourceResource() *schema.Resource { Computed: true, ForceNew: true, }, + "with_privileged_access": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, }, } } @@ -95,6 +101,10 @@ func resourceResourceCreate(ctx context.Context, d *schema.ResourceData, meta an input.WithFederation = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("with_privileged_access"); ok { + input.WithPrivilegedAccess = v.(bool) + } + _, err := conn.RegisterResource(ctx, input) if errs.IsA[*awstypes.AlreadyExistsException](err) { @@ -131,6 +141,7 @@ func resourceResourceRead(ctx context.Context, d *schema.ResourceData, meta any) } d.Set(names.AttrRoleARN, resource.RoleArn) d.Set("with_federation", resource.WithFederation) + d.Set("with_privileged_access", resource.WithPrivilegedAccess) return diags } diff --git a/internal/service/lakeformation/resource_data_source.go b/internal/service/lakeformation/resource_data_source.go index 8cb2c0df4f87..ffcf9bc604d0 100644 --- a/internal/service/lakeformation/resource_data_source.go +++ b/internal/service/lakeformation/resource_data_source.go @@ -31,6 +31,10 @@ func DataSourceResource() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, + "hybrid_access_enabled": { + Type: schema.TypeBool, + Computed: true, + }, "last_modified": { Type: schema.TypeString, Computed: true, @@ -39,6 +43,14 @@ func DataSourceResource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "with_federation": { + Type: schema.TypeBool, + Computed: true, + }, + "with_privileged_access": { + Type: schema.TypeBool, + Computed: true, + }, }, } } @@ -71,10 +83,13 @@ func dataSourceResourceRead(ctx context.Context, d *schema.ResourceData, meta an d.SetId(aws.ToString(input.ResourceArn)) // d.Set("arn", output.ResourceInfo.ResourceArn) // output not including resource arn currently - d.Set(names.AttrRoleARN, output.ResourceInfo.RoleArn) + d.Set("hybrid_access_enabled", output.ResourceInfo.HybridAccessEnabled) if output.ResourceInfo.LastModified != nil { // output not including last modified currently d.Set("last_modified", output.ResourceInfo.LastModified.Format(time.RFC3339)) } + d.Set(names.AttrRoleARN, output.ResourceInfo.RoleArn) + d.Set("with_federation", output.ResourceInfo.WithFederation) + d.Set("with_privileged_access", output.ResourceInfo.WithPrivilegedAccess) return diags } diff --git a/internal/service/lakeformation/resource_data_source_test.go b/internal/service/lakeformation/resource_data_source_test.go index bf531399352f..83487cccf3cf 100644 --- a/internal/service/lakeformation/resource_data_source_test.go +++ b/internal/service/lakeformation/resource_data_source_test.go @@ -30,6 +30,9 @@ func TestAccLakeFormationResourceDataSource_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrRoleARN, resourceName, names.AttrRoleARN), + resource.TestCheckResourceAttr(dataSourceName, "hybrid_access_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(dataSourceName, "with_federation", acctest.CtFalse), + resource.TestCheckResourceAttr(dataSourceName, "with_privileged_access", acctest.CtFalse), ), }, }, diff --git a/internal/service/lakeformation/resource_lf_tag.go b/internal/service/lakeformation/resource_lf_tag.go index 201d233fef87..55fc4a404ce4 100644 --- a/internal/service/lakeformation/resource_lf_tag.go +++ b/internal/service/lakeformation/resource_lf_tag.go @@ -303,23 +303,19 @@ func (r *resourceLFTagResource) Create(ctx context.Context, req resource.CreateR } var output *lakeformation.AddLFTagsToResourceOutput - err := retry.RetryContext(ctx, IAMPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, IAMPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.AddLFTagsToResource(ctx, in) if err != nil { if errs.IsA[*awstypes.ConcurrentModificationException](err) || errs.IsA[*awstypes.AccessDeniedException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.AddLFTagsToResource(ctx, in) - } - if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.LakeFormation, create.ErrActionCreating, ResNameResourceLFTag, prettify(in), err), @@ -350,12 +346,10 @@ func (r *resourceLFTagResource) Create(ctx context.Context, req resource.CreateR state.ID = fwflex.StringValueToFramework(ctx, id) createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - outputRaw, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func() (any, error) { + out, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func(ctx context.Context) (*lakeformation.GetResourceLFTagsOutput, error) { return findResourceLFTagByID(ctx, conn, state.CatalogID.ValueString(), res) }) - out := outputRaw.(*lakeformation.GetResourceLFTagsOutput) - if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.LakeFormation, create.ErrActionSetting, ResNameResourceLFTag, state.ID.String(), err), @@ -467,27 +461,23 @@ func (r *resourceLFTagResource) Delete(ctx context.Context, req resource.DeleteR } deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) - err := retry.RetryContext(ctx, deleteTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, deleteTimeout, func(ctx context.Context) *tfresource.RetryError { var err error _, err = conn.RemoveLFTagsFromResource(ctx, in) if err != nil { if errs.IsA[*awstypes.ConcurrentModificationException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(fmt.Errorf("removing Lake Formation LF-Tags: %w", err)) + return tfresource.NonRetryableError(fmt.Errorf("removing Lake Formation LF-Tags: %w", err)) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.RemoveLFTagsFromResource(ctx, in) - } - if err != nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.LakeFormation, create.ErrActionWaitingForDeletion, ResNameResourceLFTag, state.ID.String(), err), diff --git a/internal/service/lakeformation/resource_lf_tags.go b/internal/service/lakeformation/resource_lf_tags.go index 317ee9fe27c0..a98d37d175c1 100644 --- a/internal/service/lakeformation/resource_lf_tags.go +++ b/internal/service/lakeformation/resource_lf_tags.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/lakeformation" awstypes "github.com/aws/aws-sdk-go-v2/service/lakeformation/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -249,23 +248,19 @@ func resourceResourceLFTagsCreate(ctx context.Context, d *schema.ResourceData, m input.Resource = tagger.ExpandResource(d) var output *lakeformation.AddLFTagsToResourceOutput - err := retry.RetryContext(ctx, IAMPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, IAMPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.AddLFTagsToResource(ctx, input) if err != nil { if errs.IsA[*awstypes.ConcurrentModificationException](err) || errs.IsA[*awstypes.AccessDeniedException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.AddLFTagsToResource(ctx, input) - } - if err != nil { return create.AppendDiagError(diags, names.LakeFormation, create.ErrActionCreating, ResNameLFTags, prettify(input), err) } @@ -356,26 +351,22 @@ func resourceResourceLFTagsDelete(ctx context.Context, d *schema.ResourceData, m return create.AppendDiagWarningMessage(diags, names.LakeFormation, create.ErrActionSetting, ResNameLFTags, d.Id(), "no LF-Tags to remove") } - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) *tfresource.RetryError { var err error _, err = conn.RemoveLFTagsFromResource(ctx, input) if err != nil { if errs.IsA[*awstypes.ConcurrentModificationException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if errs.IsAErrorMessageContains[*awstypes.AccessDeniedException](err, "is not authorized") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(fmt.Errorf("removing Lake Formation LF-Tags: %w", err)) + return tfresource.NonRetryableError(fmt.Errorf("removing Lake Formation LF-Tags: %w", err)) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.RemoveLFTagsFromResource(ctx, input) - } - if err != nil { return create.AppendDiagError(diags, names.LakeFormation, create.ErrActionDeleting, ResNameLFTags, d.Id(), err) } diff --git a/internal/service/lakeformation/resource_test.go b/internal/service/lakeformation/resource_test.go index 0a902551eea9..58e0744b177f 100644 --- a/internal/service/lakeformation/resource_test.go +++ b/internal/service/lakeformation/resource_test.go @@ -40,6 +40,7 @@ func TestAccLakeFormationResource_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "hybrid_access_enabled", acctest.CtFalse), resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, roleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "with_federation", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "with_privileged_access", acctest.CtFalse), ), }, }, @@ -197,6 +198,33 @@ func TestAccLakeFormationResource_hybridAccessEnabled(t *testing.T) { }) } +func TestAccLakeFormationResource_withPrivilegedAccessEnabled(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lakeformation_resource.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LakeFormation) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LakeFormationServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckResourceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccResourceConfig_withPrivilegedAccessEnabled(rName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, names.AttrARN, bucketResourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "with_privileged_access", acctest.CtTrue), + ), + }, + }, + }) +} + // AWS does not support changing from an IAM role to an SLR. No error is thrown // but the registration is not changed (the IAM role continues in the registration). // @@ -334,3 +362,70 @@ resource "aws_lakeformation_resource" "test" { } `, rName) } + +func testAccResourceConfig_withPrivilegedAccessEnabled(bucket, role string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_iam_role" "test" { + name = %[2]q + path = "/test/" + + assume_role_policy = < 0 && v[0] != nil { + apiObject.SchemaRegistryConfig = expandKafkaSchemaRegistryConfig(v[0].(map[string]any)) + } return apiObject } @@ -1149,6 +1219,10 @@ func flattenAmazonManagedKafkaEventSourceConfig(apiObject *awstypes.AmazonManage tfMap["consumer_group_id"] = aws.ToString(v) } + if v := apiObject.SchemaRegistryConfig; v != nil { + tfMap["schema_registry_config"] = []any{flattenKafkaSchemaRegistryConfig(v)} + } + return tfMap } @@ -1163,6 +1237,10 @@ func expandSelfManagedKafkaEventSourceConfig(tfMap map[string]any) *awstypes.Sel apiObject.ConsumerGroupId = aws.String(v) } + if v, ok := tfMap["schema_registry_config"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.SchemaRegistryConfig = expandKafkaSchemaRegistryConfig(v[0].(map[string]any)) + } + return apiObject } @@ -1177,6 +1255,10 @@ func flattenSelfManagedKafkaEventSourceConfig(apiObject *awstypes.SelfManagedKaf tfMap["consumer_group_id"] = aws.ToString(v) } + if v := apiObject.SchemaRegistryConfig; v != nil { + tfMap["schema_registry_config"] = []any{flattenKafkaSchemaRegistryConfig(v)} + } + return tfMap } @@ -1442,3 +1524,137 @@ func flattenEventSourceMappingMetricsConfig(apiObject *awstypes.EventSourceMappi return tfMap } + +func expandKafkaSchemaRegistryConfig(tfMap map[string]any) *awstypes.KafkaSchemaRegistryConfig { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.KafkaSchemaRegistryConfig{} + + if v, ok := tfMap["access_config"].(*schema.Set); ok && v != nil && v.Len() > 0 { + apiObject.AccessConfigs = expandKafkaSchemaRegistryAccessConfig(v.List()) + } + + if v, ok := tfMap["event_record_format"].(string); ok && v != "" { + apiObject.EventRecordFormat = awstypes.SchemaRegistryEventRecordFormat(v) + } + + if v, ok := tfMap["schema_registry_uri"].(string); ok && v != "" { + apiObject.SchemaRegistryURI = aws.String(v) + } + + if v, ok := tfMap["schema_validation_config"].(*schema.Set); ok && v != nil && v.Len() > 0 { + apiObject.SchemaValidationConfigs = expandKafkaSchemaValidationConfig(v.List()) + } + + return apiObject +} + +func expandKafkaSchemaRegistryAccessConfig(tfList []any) []awstypes.KafkaSchemaRegistryAccessConfig { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + var apiObjects []awstypes.KafkaSchemaRegistryAccessConfig + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + + if !ok { + continue + } + + apiObject := awstypes.KafkaSchemaRegistryAccessConfig{} + if v, ok := tfMap[names.AttrType].(string); ok && v != "" { + apiObject.Type = awstypes.KafkaSchemaRegistryAuthType(v) + } + if v, ok := tfMap[names.AttrURI].(string); ok && v != "" { + apiObject.URI = aws.String(v) + } + + apiObjects = append(apiObjects, apiObject) + } + return apiObjects +} + +func expandKafkaSchemaValidationConfig(tfList []any) []awstypes.KafkaSchemaValidationConfig { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + var apiObjects []awstypes.KafkaSchemaValidationConfig + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + + if !ok { + continue + } + + apiObject := awstypes.KafkaSchemaValidationConfig{} + if v, ok := tfMap["attribute"].(string); ok && v != "" { + apiObject.Attribute = awstypes.KafkaSchemaValidationAttribute(v) + } + + apiObjects = append(apiObjects, apiObject) + } + return apiObjects +} + +func flattenKafkaSchemaRegistryConfig(apiObject *awstypes.KafkaSchemaRegistryConfig) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + if v := apiObject.AccessConfigs; len(v) > 0 { + tfMap["access_config"] = flattenKafkaSchemaRegistryAccessConfig(v) + } + if v := apiObject.EventRecordFormat; v != "" { + tfMap["event_record_format"] = v + } + if v := apiObject.SchemaRegistryURI; v != nil { + tfMap["schema_registry_uri"] = aws.ToString(v) + } + if v := apiObject.SchemaValidationConfigs; len(v) > 0 { + tfMap["schema_validation_config"] = flattenSchemaValidationConfig(v) + } + + return tfMap +} + +func flattenKafkaSchemaRegistryAccessConfig(apiObjects []awstypes.KafkaSchemaRegistryAccessConfig) []any { + if len(apiObjects) == 0 { + return nil + } + + var tfList []any + for _, apiObject := range apiObjects { + tfMap := map[string]any{} + if v := apiObject.Type; v != "" { + tfMap[names.AttrType] = v + } + if v := apiObject.URI; v != nil { + tfMap[names.AttrURI] = aws.ToString(v) + } + tfList = append(tfList, tfMap) + } + return tfList +} + +func flattenSchemaValidationConfig(apiObjects []awstypes.KafkaSchemaValidationConfig) []any { + if len(apiObjects) == 0 { + return nil + } + + var tfList []any + for _, apiObject := range apiObjects { + tfMap := map[string]any{} + if v := apiObject.Attribute; v != "" { + tfMap["attribute"] = v + } + tfList = append(tfList, tfMap) + } + return tfList +} diff --git a/internal/service/lambda/event_source_mapping_test.go b/internal/service/lambda/event_source_mapping_test.go index df5561f821f8..508f64373c1f 100644 --- a/internal/service/lambda/event_source_mapping_test.go +++ b/internal/service/lambda/event_source_mapping_test.go @@ -17,7 +17,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/mq" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -1029,6 +1028,78 @@ func TestAccLambdaEventSourceMapping_mskWithEventSourceConfig(t *testing.T) { }) } +func TestAccLambdaEventSourceMapping_mskWithEventSourceConfigSchemaRegistry(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v lambda.GetEventSourceMappingOutput + resourceName := "aws_lambda_event_source_mapping.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckMSK(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaEndpointID, "kafka"), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEventSourceMappingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEventSourceMappingConfig_mskWithEventSourceConfigSchemaRegistryByConfluent(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEventSourceMappingExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.consumer_group_id", "amazon-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.0.type", string(awstypes.KafkaSchemaRegistryAuthTypeBasicAuth)), + resource.TestCheckResourceAttrPair(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.0.uri", "aws_secretsmanager_secret.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.event_record_format", string(awstypes.SchemaRegistryEventRecordFormatJson)), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_registry_uri", "https://test-schema-registry.com"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeKey), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeValue), + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"last_modified"}, + }, + { + Config: testAccEventSourceMappingConfig_mskWithEventSourceConfigSchemaRegistryByGlue(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEventSourceMappingExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.consumer_group_id", "amazon-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.event_record_format", string(awstypes.SchemaRegistryEventRecordFormatJson)), + resource.TestCheckResourceAttrPair(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_registry_uri", "aws_glue_registry.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeKey), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "amazon_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeValue), + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"last_modified"}, + }, + }, + }) +} + func TestAccLambdaEventSourceMapping_selfManagedKafka(t *testing.T) { ctx := acctest.Context(t) var v lambda.GetEventSourceMappingOutput @@ -1126,6 +1197,79 @@ func TestAccLambdaEventSourceMapping_selfManagedKafkaWithEventSourceConfig(t *te }) } +func TestAccLambdaEventSourceMapping_selfManagedKafkaWithEventSourceConfigSchemaRegistry(t *testing.T) { + ctx := acctest.Context(t) + var v lambda.GetEventSourceMappingOutput + resourceName := "aws_lambda_event_source_mapping.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEventSourceMappingDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEventSourceMappingConfig_selfManagedKafkaWithEventSourceConfigSchemaRegistryByConfluent(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEventSourceMappingExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.consumer_group_id", "self-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.0.type", string(awstypes.KafkaSchemaRegistryAuthTypeBasicAuth)), + resource.TestCheckResourceAttrPair(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.0.uri", "aws_secretsmanager_secret.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.event_record_format", string(awstypes.SchemaRegistryEventRecordFormatJson)), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_registry_uri", "https://test-schema-registry.com"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeKey), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeValue), + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"last_modified"}, + }, + { + Config: testAccEventSourceMappingConfig_selfManagedKafkaWithEventSourceConfigSchemaRegistryByGlue(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckEventSourceMappingExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.consumer_group_id", "self-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.access_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.event_record_format", string(awstypes.SchemaRegistryEventRecordFormatJson)), + resource.TestCheckResourceAttrPair(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_registry_uri", "aws_glue_registry.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeKey), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "self_managed_kafka_event_source_config.0.schema_registry_config.0.schema_validation_config.*", map[string]string{ + "attribute": string(awstypes.KafkaSchemaValidationAttributeValue), + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"last_modified"}, + }, + }, + }) +} + func TestAccLambdaEventSourceMapping_selfManagedKafkaWithProvisionedPollerConfig(t *testing.T) { ctx := acctest.Context(t) var v lambda.GetEventSourceMappingOutput @@ -1546,7 +1690,7 @@ func testAccCheckEventSourceMappingIsBeingDisabled(ctx context.Context, v *lambd return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) // Disable enabled state - err := retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 10*time.Minute, func(ctx context.Context) *tfresource.RetryError { input := &lambda.UpdateEventSourceMappingInput{ Enabled: aws.Bool(false), UUID: v.UUID, @@ -1555,12 +1699,12 @@ func testAccCheckEventSourceMappingIsBeingDisabled(ctx context.Context, v *lambd _, err := conn.UpdateEventSourceMapping(ctx, input) if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(fmt.Errorf( + return tfresource.RetryableError(fmt.Errorf( "Waiting for Lambda Event Source Mapping to be ready to be updated: %v", v.UUID)) } if err != nil { - return retry.NonRetryableError( + return tfresource.NonRetryableError( fmt.Errorf("Error updating Lambda Event Source Mapping: %w", err)) } @@ -1572,16 +1716,16 @@ func testAccCheckEventSourceMappingIsBeingDisabled(ctx context.Context, v *lambd } // wait for state to be propagated - return retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { + return tfresource.Retry(ctx, 10*time.Minute, func(ctx context.Context) *tfresource.RetryError { output, err := tflambda.FindEventSourceMappingByID(ctx, conn, aws.ToString(v.UUID)) if err != nil { - return retry.NonRetryableError( - fmt.Errorf("Error getting Lambda Event Source Mapping: %s", err)) + return tfresource.NonRetryableError( + fmt.Errorf("Error getting Lambda Event Source Mapping: %w", err)) } if state := aws.ToString(output.State); state != "Disabled" { - return retry.RetryableError(fmt.Errorf( + return tfresource.RetryableError(fmt.Errorf( "Waiting to get Lambda Event Source Mapping to be fully enabled, it's currently %s: %v", state, v.UUID)) } @@ -1924,7 +2068,9 @@ resource "aws_iam_policy" "test" { "ec2:DescribeVpcs", "logs:CreateLogGroup", "logs:CreateLogStream", - "logs:PutLogEvents" + "logs:PutLogEvents", + "glue:GetRegistry", + "glue:GetSchemaVersion" ], "Resource": "*" } @@ -2242,6 +2388,43 @@ resource "aws_docdb_cluster" "test" { `, rName)) } +func testAccEventSourceMappingConfig_kafkaSchemaRegistryByGlueBase(rName string) string { + return fmt.Sprintf(` +resource "aws_glue_registry" "test" { + registry_name = %[1]q +} + +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "JSON" + compatibility = "NONE" + schema_definition = jsonencode( + { + "$id" : "https://example.com/person.schema.json", + "$schema" : "http://json-schema.org/draft-07/schema#", + "title" : "Person", + "type" : "object", + "properties" : { + "firstName" : { + "type" : "string", + "description" : "The person's first name." + }, + "lastName" : { + "type" : "string", + "description" : "The person's last name." + }, + "age" : { + "description" : "Age in years which must be equal to or greater than zero.", + "type" : "integer", + "minimum" : 0 + } + } + }) +} +`, rName) +} + func testAccEventSourceMappingConfig_sqsKMSKeyARN(rName, pattern string) string { return acctest.ConfigCompose(testAccEventSourceMappingConfig_sqsBase(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { @@ -2551,7 +2734,7 @@ func testAccEventSourceMappingConfig_msk(rName, batchSize string) string { return acctest.ConfigCompose(testAccEventSourceMappingConfig_kafkaBase(rName), fmt.Sprintf(` resource "aws_msk_cluster" "test" { cluster_name = %[1]q - kafka_version = "2.7.1" + kafka_version = "3.8.x" number_of_broker_nodes = 2 broker_node_group_info { @@ -2588,7 +2771,7 @@ func testAccEventSourceMappingConfig_mskWithEventSourceConfig(rName, batchSize s return acctest.ConfigCompose(testAccEventSourceMappingConfig_kafkaBase(rName), fmt.Sprintf(` resource "aws_msk_cluster" "test" { cluster_name = %[1]q - kafka_version = "2.7.1" + kafka_version = "3.8.x" number_of_broker_nodes = 2 broker_node_group_info { @@ -2621,6 +2804,122 @@ resource "aws_lambda_event_source_mapping" "test" { `, rName, batchSize)) } +func testAccEventSourceMappingConfig_mskWithEventSourceConfigSchemaRegistryByConfluent(rName string) string { + return acctest.ConfigCompose(testAccEventSourceMappingConfig_kafkaBase(rName), fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "3.8.x" + number_of_broker_nodes = 2 + + broker_node_group_info { + client_subnets = aws_subnet.test[*].id + instance_type = "kafka.m5.large" + security_groups = [aws_security_group.test.id] + + storage_info { + ebs_storage_info { + volume_size = 10 + } + } + } +} + +resource "aws_secretsmanager_secret" "test" { + name = %[1]q + recovery_window_in_days = 7 +} + +resource "aws_lambda_event_source_mapping" "test" { + batch_size = 100 + event_source_arn = aws_msk_cluster.test.arn + enabled = true + function_name = aws_lambda_function.test.arn + topics = ["test"] + starting_position = "TRIM_HORIZON" + + provisioned_poller_config { + maximum_pollers = 100 + minimum_pollers = 1 + } + + amazon_managed_kafka_event_source_config { + consumer_group_id = "amazon-managed-test-group-id" + schema_registry_config { + access_config { + type = "BASIC_AUTH" + uri = aws_secretsmanager_secret.test.arn + } + event_record_format = "JSON" + schema_registry_uri = "https://test-schema-registry.com" + schema_validation_config { + attribute = "KEY" + } + schema_validation_config { + attribute = "VALUE" + } + } + } + + depends_on = [aws_iam_policy_attachment.test] +} +`, rName)) +} + +func testAccEventSourceMappingConfig_mskWithEventSourceConfigSchemaRegistryByGlue(rName string) string { + return acctest.ConfigCompose( + testAccEventSourceMappingConfig_kafkaBase(rName), + testAccEventSourceMappingConfig_kafkaSchemaRegistryByGlueBase(rName), + fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "3.8.x" + number_of_broker_nodes = 2 + + broker_node_group_info { + client_subnets = aws_subnet.test[*].id + instance_type = "kafka.m5.large" + security_groups = [aws_security_group.test.id] + + storage_info { + ebs_storage_info { + volume_size = 10 + } + } + } +} + +resource "aws_lambda_event_source_mapping" "test" { + batch_size = 100 + event_source_arn = aws_msk_cluster.test.arn + enabled = true + function_name = aws_lambda_function.test.arn + topics = ["test"] + starting_position = "TRIM_HORIZON" + + provisioned_poller_config { + maximum_pollers = 100 + minimum_pollers = 1 + } + + amazon_managed_kafka_event_source_config { + consumer_group_id = "amazon-managed-test-group-id" + schema_registry_config { + event_record_format = "JSON" + schema_registry_uri = aws_glue_registry.test.arn + schema_validation_config { + attribute = "KEY" + } + schema_validation_config { + attribute = "VALUE" + } + } + } + + depends_on = [aws_iam_policy_attachment.test] +} +`, rName)) +} + func testAccEventSourceMappingConfig_selfManagedKafka(rName, batchSize, kafkaBootstrapServers string) string { if batchSize == "" { batchSize = "null" @@ -2695,6 +2994,117 @@ resource "aws_lambda_event_source_mapping" "test" { `, rName, batchSize, kafkaBootstrapServers)) } +func testAccEventSourceMappingConfig_selfManagedKafkaWithEventSourceConfigSchemaRegistryByConfluent(rName string) string { + return acctest.ConfigCompose(testAccEventSourceMappingConfig_kafkaBase(rName), fmt.Sprintf(` +resource "aws_secretsmanager_secret" "test" { + name = %[1]q + recovery_window_in_days = 7 +} + +resource "aws_lambda_event_source_mapping" "test" { + batch_size = 100 + enabled = false + function_name = aws_lambda_function.test.arn + topics = ["test"] + starting_position = "TRIM_HORIZON" + + provisioned_poller_config { + maximum_pollers = 100 + minimum_pollers = 1 + } + + self_managed_kafka_event_source_config { + consumer_group_id = "self-managed-test-group-id" + schema_registry_config { + access_config { + type = "BASIC_AUTH" + uri = aws_secretsmanager_secret.test.arn + } + event_record_format = "JSON" + schema_registry_uri = "https://test-schema-registry.com" + schema_validation_config { + attribute = "KEY" + } + schema_validation_config { + attribute = "VALUE" + } + } + } + + self_managed_event_source { + endpoints = { + KAFKA_BOOTSTRAP_SERVERS = "test1:9092,test2:9092" + } + } + + dynamic "source_access_configuration" { + for_each = aws_subnet.test[*].id + content { + type = "VPC_SUBNET" + uri = "subnet:${source_access_configuration.value}" + } + } + + source_access_configuration { + type = "VPC_SECURITY_GROUP" + uri = aws_security_group.test.id + } +} +`, rName)) +} + +func testAccEventSourceMappingConfig_selfManagedKafkaWithEventSourceConfigSchemaRegistryByGlue(rName string) string { + return acctest.ConfigCompose( + testAccEventSourceMappingConfig_kafkaBase(rName), + testAccEventSourceMappingConfig_kafkaSchemaRegistryByGlueBase(rName), ` +resource "aws_lambda_event_source_mapping" "test" { + batch_size = 100 + enabled = false + function_name = aws_lambda_function.test.arn + topics = ["test"] + starting_position = "TRIM_HORIZON" + + provisioned_poller_config { + maximum_pollers = 100 + minimum_pollers = 1 + } + + self_managed_kafka_event_source_config { + consumer_group_id = "self-managed-test-group-id" + schema_registry_config { + event_record_format = "JSON" + schema_registry_uri = aws_glue_registry.test.arn + schema_validation_config { + attribute = "KEY" + } + schema_validation_config { + attribute = "VALUE" + } + } + } + + self_managed_event_source { + endpoints = { + KAFKA_BOOTSTRAP_SERVERS = "test1:9092,test2:9092" + } + } + + dynamic "source_access_configuration" { + for_each = aws_subnet.test[*].id + content { + type = "VPC_SUBNET" + uri = "subnet:${source_access_configuration.value}" + } + } + + source_access_configuration { + type = "VPC_SECURITY_GROUP" + uri = aws_security_group.test.id + } +} +`) +} + func testAccEventSourceMappingConfig_selfManagedKafkaWithProvisionedPollerConfig(rName, batchSize, kafkaBootstrapServers, maxPollers, minPollers string) string { if batchSize == "" { batchSize = "null" diff --git a/internal/service/lambda/exports_test.go b/internal/service/lambda/exports_test.go index 032ffff61fca..4b6488686d57 100644 --- a/internal/service/lambda/exports_test.go +++ b/internal/service/lambda/exports_test.go @@ -42,3 +42,8 @@ var ( ValidQualifier = validQualifier ValidPolicyStatementID = validPolicyStatementID ) + +type ( + Policy = policy + PolicyStatement = policyStatement +) diff --git a/internal/service/lambda/function.go b/internal/service/lambda/function.go index 2cd29574e97c..2d590d6f048b 100644 --- a/internal/service/lambda/function.go +++ b/internal/service/lambda/function.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfio "github.com/hashicorp/terraform-provider-aws/internal/io" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -44,6 +45,10 @@ const ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/lambda;lambda.GetFunctionOutput") // @Testing(importIgnore="filename;last_modified;publish") +// @IdentityAttribute("function_name") +// @Testing(idAttrDuplicates="function_name") +// @Testing(preIdentityVersion="v6.7.0") +// @CustomImport func resourceFunction() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFunctionCreate, @@ -59,6 +64,10 @@ func resourceFunction() *schema.Resource { Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + identitySpec := importer.IdentitySpec(ctx) + if err := importer.RegionalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } d.Set("function_name", d.Id()) return []*schema.ResourceData{d}, nil }, @@ -381,6 +390,12 @@ func resourceFunction() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "source_kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + ConflictsWith: []string{"image_uri"}, + }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrTimeout: { @@ -566,6 +581,10 @@ func resourceFunctionCreate(ctx context.Context, d *schema.ResourceData, meta an input.SnapStart = expandSnapStart(v.([]any)) } + if v, ok := d.GetOk("source_kms_key_arn"); ok { + input.Code.SourceKMSKeyArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("tracing_config"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { input.TracingConfig = &awstypes.TracingConfig{ Mode: awstypes.TracingMode(v.([]any)[0].(map[string]any)[names.AttrMode].(string)), @@ -591,7 +610,7 @@ func resourceFunctionCreate(ctx context.Context, d *schema.ResourceData, meta an d.SetId(functionName) - _, err = tfresource.RetryWhenNotFound(ctx, lambdaPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, lambdaPropagationTimeout, func(ctx context.Context) (any, error) { return findFunctionByName(ctx, conn, d.Id()) }) @@ -645,7 +664,22 @@ func resourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "reading Lambda Function (%s): %s", d.Id(), err) } + // If Qualifier is specified, GetFunction will return nil for Concurrency. + // Need to fetch it separately using GetFunctionConcurrency. + if output.Concurrency == nil && input.Qualifier != nil { + outputGFC, err := findFunctionConcurrencyByName(ctx, conn, d.Id()) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Lambda Function (%s) concurrency: %s", d.Id(), err) + } + + output.Concurrency = &awstypes.Concurrency{ + ReservedConcurrentExecutions: outputGFC.ReservedConcurrentExecutions, + } + } + function := output.Configuration + functionCode := output.Code d.Set("architectures", function.Architectures) functionARN := aws.ToString(function.FunctionArn) d.Set(names.AttrARN, functionARN) @@ -705,6 +739,7 @@ func resourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta any) } d.Set("source_code_hash", d.Get("source_code_hash")) d.Set("source_code_size", function.CodeSize) + d.Set("source_kms_key_arn", functionCode.SourceKMSKeyArn) d.Set(names.AttrTimeout, function.Timeout) tracingConfigMode := awstypes.TracingModePassThrough if function.TracingConfig != nil { @@ -775,6 +810,15 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta an var diags diag.Diagnostics conn := meta.(*conns.AWSClient).LambdaClient(ctx) + codeUpdateCompleted := false + defer func() { + if !codeUpdateCompleted { + // If an error occurs before completing the code update, + // reset non-refreshable attributes to pre-apply state. + resetNonRefreshableAttributes(d) + } + }() + if d.HasChange("code_signing_config_arn") { if v, ok := d.GetOk("code_signing_config_arn"); ok { input := lambda.PutFunctionCodeSigningConfigInput{ @@ -977,17 +1021,14 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta an } } + // If source_kms_key_arn is set, it should be always included in the update + if v, ok := d.GetOk("source_kms_key_arn"); ok { + input.SourceKMSKeyArn = aws.String(v.(string)) + } + _, err := conn.UpdateFunctionCode(ctx, &input) if err != nil { - if errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Error occurred while GetObject.") { - // As s3_bucket, s3_key and s3_object_version aren't set in resourceFunctionRead(), don't ovewrite the last known good values. - for _, key := range []string{names.AttrS3Bucket, "s3_key", "s3_object_version"} { - old, _ := d.GetChange(key) - d.Set(key, old) - } - } - return sdkdiag.AppendErrorf(diags, "updating Lambda Function (%s) code: %s", d.Id(), err) } @@ -995,6 +1036,7 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "waiting for Lambda Function (%s) code update: %s", d.Id(), err) } } + codeUpdateCompleted = true if d.HasChange("reserved_concurrent_executions") { if v, ok := d.Get("reserved_concurrent_executions").(int); ok && v >= 0 { @@ -1026,7 +1068,7 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta an FunctionName: aws.String(d.Id()), } - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ResourceConflictException](ctx, lambdaPropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ResourceConflictException](ctx, lambdaPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PublishVersion(ctx, &input) }, "in progress") @@ -1063,7 +1105,7 @@ func resourceFunctionDelete(ctx context.Context, d *schema.ResourceData, meta an input := lambda.DeleteFunctionInput{ FunctionName: aws.String(d.Id()), } - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidParameterValueException](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidParameterValueException](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteFunction(ctx, &input) }, "because it is a replicated function") @@ -1079,11 +1121,11 @@ func resourceFunctionDelete(ctx context.Context, d *schema.ResourceData, meta an } func findFunctionByName(ctx context.Context, conn *lambda.Client, name string) (*lambda.GetFunctionOutput, error) { - input := &lambda.GetFunctionInput{ + input := lambda.GetFunctionInput{ FunctionName: aws.String(name), } - return findFunction(ctx, conn, input) + return findFunction(ctx, conn, &input) } func findFunction(ctx context.Context, conn *lambda.Client, input *lambda.GetFunctionInput) (*lambda.GetFunctionOutput, error) { @@ -1140,13 +1182,13 @@ func findFunctionConfiguration(ctx context.Context, conn *lambda.Client, input * } func findLatestFunctionVersionByName(ctx context.Context, conn *lambda.Client, name string) (*awstypes.FunctionConfiguration, error) { - input := &lambda.ListVersionsByFunctionInput{ + input := lambda.ListVersionsByFunctionInput{ FunctionName: aws.String(name), MaxItems: aws.Int32(listVersionsMaxItems), } var output *awstypes.FunctionConfiguration - pages := lambda.NewListVersionsByFunctionPaginator(conn, input) + pages := lambda.NewListVersionsByFunctionPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -1167,6 +1209,35 @@ func findLatestFunctionVersionByName(ctx context.Context, conn *lambda.Client, n return output, nil } +func findFunctionConcurrencyByName(ctx context.Context, conn *lambda.Client, name string) (*lambda.GetFunctionConcurrencyOutput, error) { + input := lambda.GetFunctionConcurrencyInput{ + FunctionName: aws.String(name), + } + + return findFunctionConcurrency(ctx, conn, &input) +} + +func findFunctionConcurrency(ctx context.Context, conn *lambda.Client, input *lambda.GetFunctionConcurrencyInput) (*lambda.GetFunctionConcurrencyOutput, error) { + output, err := conn.GetFunctionConcurrency(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + // replaceSecurityGroupsOnDestroy sets the VPC configuration security groups // prior to resource destruction // @@ -1203,7 +1274,7 @@ func replaceSecurityGroupsOnDestroy(ctx context.Context, d *schema.ResourceData, } else { defaultSG, err := tfec2.FindSecurityGroupByNameAndVPCID(ctx, ec2Conn, "default", vpcID) if err != nil || defaultSG == nil { - return fmt.Errorf("finding VPC (%s) default security group: %s", vpcID, err) + return fmt.Errorf("finding VPC (%s) default security group: %w", vpcID, err) } replacementSGIDs = []string{aws.ToString(defaultSG.GroupId)} } @@ -1218,11 +1289,11 @@ func replaceSecurityGroupsOnDestroy(ctx context.Context, d *schema.ResourceData, if _, err := retryFunctionOp(ctx, func() (*lambda.UpdateFunctionConfigurationOutput, error) { return conn.UpdateFunctionConfiguration(ctx, input) }); err != nil { - return fmt.Errorf("updating Lambda Function (%s) configuration: %s", d.Id(), err) + return fmt.Errorf("updating Lambda Function (%s) configuration: %w", d.Id(), err) } if _, err := waitFunctionUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("waiting for Lambda Function (%s) configuration update: %s", d.Id(), err) + return fmt.Errorf("waiting for Lambda Function (%s) configuration update: %w", d.Id(), err) } return nil @@ -1272,7 +1343,14 @@ func statusFunctionConfigurationLastUpdateStatus(ctx context.Context, conn *lamb return nil, "", err } - return output, string(output.LastUpdateStatus), nil + // "LastUpdateStatus":null can be returned (when SnapStart is enabled?). + // lambda.NewFunctionUpdatedWaiter handles this as a retryable status. + status := output.LastUpdateStatus + if status == "" { + status = awstypes.LastUpdateStatusInProgress + } + + return output, string(status), nil } } @@ -1344,7 +1422,7 @@ type functionCU interface { func retryFunctionOp[T functionCU](ctx context.Context, f func() (*T, error)) (*T, error) { output, err := tfresource.RetryWhen(ctx, lambdaPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return f() }, func(err error) (bool, error) { @@ -1376,7 +1454,7 @@ func retryFunctionOp[T functionCU](ctx context.Context, f func() (*T, error)) (* functionExtraThrottlingTimeout = 9 * time.Minute ) output, err = tfresource.RetryWhen(ctx, functionExtraThrottlingTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return f() }, func(err error) (bool, error) { @@ -1430,6 +1508,7 @@ func needsFunctionCodeUpdate(d sdkv2.ResourceDiffer) bool { d.HasChange(names.AttrS3Bucket) || d.HasChange("s3_key") || d.HasChange("s3_object_version") || + d.HasChange("source_kms_key_arn") || d.HasChange("image_uri") || d.HasChange("architectures") } @@ -1634,7 +1713,7 @@ func suppressLoggingConfigUnspecifiedLogLevels(k, old, new string, d *schema.Res return suppressLoggingConfigUnspecifiedLogLevelsPrimitive(k, old, new, d.HasChanges("logging_config.0.log_format")) } -func suppressLoggingConfigUnspecifiedLogLevelsPrimitive(k, old, new string, logFormatHasChanges bool) bool { //nolint:unparam +func suppressLoggingConfigUnspecifiedLogLevelsPrimitive(_, old, new string, logFormatHasChanges bool) bool { //nolint:unparam if logFormatHasChanges { return false } @@ -1684,3 +1763,15 @@ func flattenSnapStart(apiObject *awstypes.SnapStartResponse) []any { return []any{tfMap} } + +// Non-API attributes (which cannot be refreshed via AWS API calls) in the state are updated even if the update fails. +// Therefore, reset them to the previous value when the update fails. +// https://developer.hashicorp.com/terraform/plugin/framework/diagnostics#how-errors-affect-state +func resetNonRefreshableAttributes(d *schema.ResourceData) { + for _, key := range []string{names.AttrS3Bucket, "s3_key", "s3_object_version", "source_code_hash", "filename"} { + if d.HasChange(key) { + old, _ := d.GetChange(key) + d.Set(key, old) + } + } +} diff --git a/internal/service/lambda/function_data_source.go b/internal/service/lambda/function_data_source.go index faf8e3152859..82d3f2e71e41 100644 --- a/internal/service/lambda/function_data_source.go +++ b/internal/service/lambda/function_data_source.go @@ -200,6 +200,10 @@ func dataSourceFunction() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "source_kms_key_arn": { + Type: schema.TypeString, + Computed: true, + }, names.AttrTags: tftags.TagsSchemaComputed(), names.AttrTimeout: { Type: schema.TypeInt, @@ -282,7 +286,22 @@ func dataSourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta an return sdkdiag.AppendErrorf(diags, "reading Lambda Function (%s): %s", functionName, err) } + // If Qualifier is specified, GetFunction will return nil for Concurrency. + // Need to fetch it separately using GetFunctionConcurrency. + if output.Concurrency == nil && input.Qualifier != nil { + outputGFC, err := findFunctionConcurrencyByName(ctx, conn, functionName) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Lambda Function (%s) concurrency: %s", functionName, err) + } + + output.Concurrency = &awstypes.Concurrency{ + ReservedConcurrentExecutions: outputGFC.ReservedConcurrentExecutions, + } + } + function := output.Configuration + functionCode := output.Code functionARN := aws.ToString(function.FunctionArn) qualifierSuffix := fmt.Sprintf(":%s", aws.ToString(input.Qualifier)) versionSuffix := fmt.Sprintf(":%s", aws.ToString(function.Version)) @@ -344,6 +363,7 @@ func dataSourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta an d.Set("signing_profile_version_arn", function.SigningProfileVersionArn) d.Set("source_code_hash", function.CodeSha256) d.Set("source_code_size", function.CodeSize) + d.Set("source_kms_key_arn", functionCode.SourceKMSKeyArn) d.Set(names.AttrTimeout, function.Timeout) tracingConfigMode := awstypes.TracingModePassThrough if function.TracingConfig != nil { diff --git a/internal/service/lambda/function_data_source_tags_gen_test.go b/internal/service/lambda/function_data_source_tags_gen_test.go index c7e15084b9f5..a5ae81c88a78 100644 --- a/internal/service/lambda/function_data_source_tags_gen_test.go +++ b/internal/service/lambda/function_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccLambdaFunctionDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccLambdaFunctionDataSource_tags(t *testing.T) { func TestAccLambdaFunctionDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccLambdaFunctionDataSource_tags_NullMap(t *testing.T) { func TestAccLambdaFunctionDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccLambdaFunctionDataSource_tags_EmptyMap(t *testing.T) { func TestAccLambdaFunctionDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccLambdaFunctionDataSource_tags_DefaultTags_nonOverlapping(t *testing. func TestAccLambdaFunctionDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccLambdaFunctionDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testi func TestAccLambdaFunctionDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/lambda/function_data_source_test.go b/internal/service/lambda/function_data_source_test.go index d324c03fe559..b98bdcc8a708 100644 --- a/internal/service/lambda/function_data_source_test.go +++ b/internal/service/lambda/function_data_source_test.go @@ -54,6 +54,7 @@ func TestAccLambdaFunctionDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "signing_profile_version_arn", resourceName, "signing_profile_version_arn"), resource.TestCheckResourceAttrPair(dataSourceName, "source_code_hash", resourceName, "code_sha256"), resource.TestCheckResourceAttrPair(dataSourceName, "source_code_size", resourceName, "source_code_size"), + resource.TestCheckResourceAttrPair(dataSourceName, "source_kms_key_arn", resourceName, "source_kms_key_arn"), resource.TestCheckResourceAttrPair(dataSourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrTimeout, resourceName, names.AttrTimeout), resource.TestCheckResourceAttrPair(dataSourceName, "tracing_config.#", resourceName, "tracing_config.#"), @@ -91,6 +92,33 @@ func TestAccLambdaFunctionDataSource_version(t *testing.T) { }) } +func TestAccLambdaFunctionDataSource_versionWithReservedConcurrency(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_lambda_function.test" + resourceName := "aws_lambda_function.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccFunctionDataSourceConfig_versionWithReservedConcurrency(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(dataSourceName, "invoke_arn", resourceName, "invoke_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "qualified_arn", resourceName, "qualified_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "qualified_invoke_arn", resourceName, "qualified_invoke_arn"), + resource.TestCheckResourceAttr(dataSourceName, "qualifier", "1"), + resource.TestCheckResourceAttrPair(dataSourceName, "reserved_concurrent_executions", resourceName, "reserved_concurrent_executions"), + resource.TestCheckResourceAttr(dataSourceName, names.AttrVersion, "1"), + ), + }, + }, + }) +} + func TestAccLambdaFunctionDataSource_latestVersion(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -469,6 +497,25 @@ data "aws_lambda_function" "test" { `, rName)) } +func testAccFunctionDataSourceConfig_versionWithReservedConcurrency(rName string) string { + return acctest.ConfigCompose(testAccFunctionDataSourceConfig_base(rName), fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + handler = "exports.example" + publish = true + role = aws_iam_role.lambda.arn + runtime = "nodejs20.x" + reserved_concurrent_executions = 10 +} + +data "aws_lambda_function" "test" { + function_name = aws_lambda_function.test.function_name + qualifier = 1 +} +`, rName)) +} + func testAccFunctionDataSourceConfig_latestVersion(rName string) string { return acctest.ConfigCompose(testAccFunctionDataSourceConfig_base(rName), fmt.Sprintf(` resource "aws_lambda_function" "test" { diff --git a/internal/service/lambda/function_event_invoke_config.go b/internal/service/lambda/function_event_invoke_config.go index a0b7e44fcac5..cb94bc6acc22 100644 --- a/internal/service/lambda/function_event_invoke_config.go +++ b/internal/service/lambda/function_event_invoke_config.go @@ -128,7 +128,7 @@ func resourceFunctionEventInvokeConfigCreate(ctx context.Context, d *schema.Reso // Retry for destination validation eventual consistency errors. _, err := tfresource.RetryWhen(ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutFunctionEventInvokeConfig(ctx, input) }, func(err error) (bool, error) { @@ -212,7 +212,7 @@ func resourceFunctionEventInvokeConfigUpdate(ctx context.Context, d *schema.Reso // Retry for destination validation eventual consistency errors. _, err = tfresource.RetryWhen(ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutFunctionEventInvokeConfig(ctx, input) }, func(err error) (bool, error) { @@ -273,7 +273,7 @@ func functionEventInvokeConfigParseResourceID(id string) (string, string, error) parsedARN, err := arn.Parse(id) if err != nil { - return "", "", fmt.Errorf("parsing ARN (%s): %s", id, err) + return "", "", fmt.Errorf("parsing ARN (%s): %w", id, err) } function := strings.TrimPrefix(parsedARN.Resource, "function:") diff --git a/internal/service/lambda/function_identity_gen_test.go b/internal/service/lambda/function_identity_gen_test.go new file mode 100644 index 000000000000..e32d9ef6fc02 --- /dev/null +++ b/internal/service/lambda/function_identity_gen_test.go @@ -0,0 +1,328 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package lambda_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/lambda" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccLambdaFunction_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v lambda.GetFunctionOutput + resourceName := "aws_lambda_function.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + CheckDestroy: testAccCheckFunctionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Function/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("function_name"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "function_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("function_name")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Function/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "filename", "last_modified", "publish", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Function/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("function_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Function/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("function_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccLambdaFunction_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_lambda_function.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Function/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("function_name"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "function_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("function_name")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Function/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "filename", "last_modified", "publish", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Function/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("function_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Function/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("function_name"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccLambdaFunction_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v lambda.GetFunctionOutput + resourceName := "aws_lambda_function.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + CheckDestroy: testAccCheckFunctionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Function/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Function/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "function_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("function_name")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccLambdaFunction_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v lambda.GetFunctionOutput + resourceName := "aws_lambda_function.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + CheckDestroy: testAccCheckFunctionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Function/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Function/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/lambda/function_tags_gen_test.go b/internal/service/lambda/function_tags_gen_test.go index f46f352f489f..8d73de511a1d 100644 --- a/internal/service/lambda/function_tags_gen_test.go +++ b/internal/service/lambda/function_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/lambda" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccLambdaFunction_tags(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccLambdaFunction_tags(t *testing.T) { func TestAccLambdaFunction_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccLambdaFunction_tags_null(t *testing.T) { func TestAccLambdaFunction_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccLambdaFunction_tags_EmptyMap(t *testing.T) { func TestAccLambdaFunction_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccLambdaFunction_tags_AddOnUpdate(t *testing.T) { func TestAccLambdaFunction_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccLambdaFunction_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccLambdaFunction_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccLambdaFunction_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccLambdaFunction_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccLambdaFunction_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccLambdaFunction_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccLambdaFunction_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccLambdaFunction_tags_DefaultTags_overlapping(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccLambdaFunction_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccLambdaFunction_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccLambdaFunction_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccLambdaFunction_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccLambdaFunction_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccLambdaFunction_tags_DefaultTags_nullOverlappingResourceTag(t *testin func TestAccLambdaFunction_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccLambdaFunction_tags_DefaultTags_nullNonOverlappingResourceTag(t *tes func TestAccLambdaFunction_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccLambdaFunction_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccLambdaFunction_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccLambdaFunction_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccLambdaFunction_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccLambdaFunction_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccLambdaFunction_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccLambdaFunction_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccLambdaFunction_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v lambda.GetFunctionOutput resourceName := "aws_lambda_function.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), CheckDestroy: testAccCheckFunctionDestroy(ctx), diff --git a/internal/service/lambda/function_test.go b/internal/service/lambda/function_test.go index 6a9a9137383d..b4b6624f6664 100644 --- a/internal/service/lambda/function_test.go +++ b/internal/service/lambda/function_test.go @@ -21,8 +21,10 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tflambda "github.com/hashicorp/terraform-provider-aws/internal/service/lambda" @@ -273,6 +275,13 @@ func TestAccLambdaFunction_concurrency(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "reserved_concurrent_executions", "222"), ), }, + { + Config: testAccFunctionConfig_concurrencyPublished(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "reserved_concurrent_executions", "222"), + ), + }, }, }) } @@ -2002,6 +2011,68 @@ func TestAccLambdaFunction_LocalUpdate_nameOnly(t *testing.T) { }) } +func TestAccLambdaFunction_LocalUpdate_publish(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + path, zipFile, err := createTempFile("lambda_localUpdate") + if err != nil { + t.Fatal(err) + } + defer os.Remove(path) + + var conf lambda.GetFunctionOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lambda_function.test" + + var timeBeforeUpdate time.Time + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFunctionDestroy(ctx), + Steps: []resource.TestStep{ + { + PreConfig: func() { + if err := testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.py": "lambda_handler.py"}, zipFile); err != nil { + t.Fatalf("error creating zip from files: %s", err) + } + }, + Config: testAccFunctionConfig_localPublish(path, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + testAccCheckSourceCodeHash(&conf, "dLPb9UCUTa8WVNATdCYpZIcIxLWEoR4TLDWvr9rajBw="), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"filename", "publish", "source_code_hash"}, + }, + { + PreConfig: func() { + if err := testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.py": "lambda_handler.py"}, zipFile); err != nil { + t.Fatalf("error creating zip from files: %s", err) + } + timeBeforeUpdate = time.Now() + }, + Config: testAccFunctionConfig_localPublish(path, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + testAccCheckSourceCodeHash(&conf, "7x43uxhWHTejc6xUvJlAcRvdVmRpqwGIYHpok5qDiYs="), + func(s *terraform.State) error { + return testAccCheckAttributeIsDateAfter(s, resourceName, "last_modified", timeBeforeUpdate) + }, + ), + }, + }, + }) +} + func TestAccLambdaFunction_S3Update_basic(t *testing.T) { ctx := acctest.Context(t) path, zipFile, err := createTempFile("lambda_s3Update") @@ -2287,6 +2358,119 @@ func TestAccLambdaFunction_ipv6AllowedForDualStack(t *testing.T) { }) } +func TestAccLambdaFunction_sourceKMSKeyARN(t *testing.T) { + ctx := acctest.Context(t) + var conf lambda.GetFunctionOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lambda_function.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFunctionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFunctionConfig_sourceKMSKeyARN(rName, "test"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + testAccCheckFunctionInvokeARN(resourceName, &conf), + testAccCheckFunctionQualifiedInvokeARN(resourceName, &conf), + testAccCheckFunctionName(&conf, rName), + resource.TestCheckResourceAttrPair(resourceName, "source_kms_key_arn", "aws_kms_key.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"filename", "publish"}, + }, + { + Config: testAccFunctionConfig_sourceKMSKeyARN(rName, "test2"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + testAccCheckFunctionInvokeARN(resourceName, &conf), + testAccCheckFunctionQualifiedInvokeARN(resourceName, &conf), + testAccCheckFunctionName(&conf, rName), + resource.TestCheckResourceAttrPair(resourceName, "source_kms_key_arn", "aws_kms_key.test2", names.AttrARN), + ), + }, + }, + }) +} + +func TestAccLambdaFunction_resetNonRefreshableAttributesAfterUpdateFailure(t *testing.T) { + ctx := acctest.Context(t) + var conf lambda.GetFunctionOutput + resourceName := "aws_lambda_function.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFunctionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFunctionConfig_resetNonRefreshableAttributesAfterUpdateFailure(rName, "lambdatest.zip", "lambdatest.zip"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "s3_key", "lambdatest.zip"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + // Update with a non-existent S3 key to force an error + Config: testAccFunctionConfig_resetNonRefreshableAttributesAfterUpdateFailure(rName, "lambdatest.zip", "lambdatest_not_exist.zip"), + ExpectError: regexache.MustCompile(`The specified key does not exist`), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + // Revert to previous configuration to ensure non-refreshable attributes were reset + // This step would fail if s3_key was not reset to "lambdatest.zip" + Config: testAccFunctionConfig_resetNonRefreshableAttributesAfterUpdateFailure(rName, "lambdatest.zip", "lambdatest.zip"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("s3_key"), knownvalue.StringExact("lambdatest.zip")), + }, + }, + }, + { + Config: testAccFunctionConfig_resetNonRefreshableAttributesAfterUpdateFailure(rName, "lambdatest_modified.zip", "lambdatest_modified.zip"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "s3_key", "lambdatest_modified.zip"), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + func TestAccLambdaFunction_skipDestroy(t *testing.T) { ctx := acctest.Context(t) var conf lambda.GetFunctionOutput @@ -2776,6 +2960,22 @@ resource "aws_lambda_function" "test" { `, rName)) } +func testAccFunctionConfig_concurrencyPublished(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLambdaBase(rName, rName, rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + role = aws_iam_role.iam_for_lambda.arn + handler = "exports.example" + publish = true + runtime = "nodejs20.x" + reserved_concurrent_executions = 222 +} +`, rName)) +} + func testAccFunctionConfig_noFilenameAndS3Attributes(rName string) string { return acctest.ConfigCompose( acctest.ConfigLambdaBase(rName, rName, rName), @@ -3782,7 +3982,7 @@ resource "aws_s3_bucket" "lambda_bucket" { } resource "aws_s3_object" "lambda_code" { - bucket = aws_s3_bucket.lambda_bucket.id + bucket = aws_s3_bucket.lambda_bucket.bucket key = "lambdatest.zip" source = "test-fixtures/lambdatest.zip" } @@ -3808,8 +4008,8 @@ EOF } resource "aws_lambda_function" "test" { - s3_bucket = aws_s3_bucket.lambda_bucket.id - s3_key = aws_s3_object.lambda_code.id + s3_bucket = aws_s3_object.lambda_code.bucket + s3_key = aws_s3_object.lambda_code.key function_name = %[1]q role = aws_iam_role.iam_for_lambda.arn handler = "exports.example" @@ -3883,6 +4083,44 @@ resource "aws_lambda_function" "test" { `, filePath, rName) } +func testAccFunctionConfig_localPublish(filePath, rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role" "iam_for_lambda" { + name = %[2]q + + assume_role_policy = < { + if (process.env.TEST_DATA) { + event.key3 = process.env.TEST_DATA; + } + return {output: event}; +} diff --git a/internal/service/lambda/test-fixtures/lambda_invocation_ephemeral.zip b/internal/service/lambda/test-fixtures/lambda_invocation_ephemeral.zip new file mode 100644 index 000000000000..78c1c53c2e2b Binary files /dev/null and b/internal/service/lambda/test-fixtures/lambda_invocation_ephemeral.zip differ diff --git a/internal/service/lambda/testdata/Function/basic/main_gen.tf b/internal/service/lambda/testdata/Function/basic/main_gen.tf new file mode 100644 index 000000000000..4433e6b2e4f5 --- /dev/null +++ b/internal/service/lambda/testdata/Function/basic/main_gen.tf @@ -0,0 +1,91 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = var.rName + role = aws_iam_role.test.arn + handler = "exports.example" + runtime = "nodejs20.x" +} + +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = < 0 { + input.AvailabilityZoneMappings = expandAvailabilityZoneMapping(v.List()) } if v, ok := d.GetOk("delete_protection"); ok { @@ -201,6 +248,18 @@ func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta an input.SubnetChangeProtection = v.(bool) } + if v := d.Get("subnet_mapping").(*schema.Set); v.Len() > 0 { + input.SubnetMappings = expandSubnetMappings(v.List()) + } + + if v, ok := d.GetOk(names.AttrTransitGatewayID); ok { + input.TransitGatewayId = aws.String(v.(string)) + } + + if v, ok := d.GetOk(names.AttrVPCID); ok { + input.VpcId = aws.String(v.(string)) + } + output, err := conn.CreateFirewall(ctx, &input) if err != nil { @@ -209,10 +268,15 @@ func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta an d.SetId(aws.ToString(output.Firewall.FirewallArn)) - if _, err := waitFirewallCreated(ctx, conn, d.Timeout(schema.TimeoutCreate), d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) create: %s", d.Id(), err) + if output.Firewall.TransitGatewayId != nil { + if _, err := waitFirewallTransitGatewayAttachmentCreated(ctx, conn, d.Timeout(schema.TimeoutCreate), d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall Transit Gateway Attachment (%s) create: %s", d.Id(), err) + } + } else { + if _, err := waitFirewallCreated(ctx, conn, d.Timeout(schema.TimeoutCreate), d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) create: %s", d.Id(), err) + } } - return append(diags, resourceFirewallRead(ctx, d, meta)...) } @@ -234,6 +298,8 @@ func resourceFirewallRead(ctx context.Context, d *schema.ResourceData, meta any) firewall := output.Firewall d.Set(names.AttrARN, firewall.FirewallArn) + d.Set("availability_zone_change_protection", firewall.AvailabilityZoneChangeProtection) + d.Set("availability_zone_mapping", flattenAvailabilityZoneMapping(firewall.AvailabilityZoneMappings)) d.Set("delete_protection", firewall.DeleteProtection) d.Set(names.AttrDescription, firewall.Description) d.Set("enabled_analysis_types", firewall.EnabledAnalysisTypes) @@ -250,6 +316,8 @@ func resourceFirewallRead(ctx context.Context, d *schema.ResourceData, meta any) if err := d.Set("subnet_mapping", flattenSubnetMappings(firewall.SubnetMappings)); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_mapping: %s", err) } + d.Set(names.AttrTransitGatewayID, firewall.TransitGatewayId) + d.Set("transit_gateway_owner_account_id", firewall.TransitGatewayOwnerAccountId) d.Set("update_token", output.UpdateToken) d.Set(names.AttrVPCID, firewall.VpcId) @@ -331,6 +399,68 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta an // Note: The *_change_protection fields below are handled before their respective fields // to account for disabling and subsequent changes. + if d.HasChange("availability_zone_change_protection") { + input := networkfirewall.UpdateAvailabilityZoneChangeProtectionInput{ + AvailabilityZoneChangeProtection: d.Get("availability_zone_change_protection").(bool), + FirewallArn: aws.String(d.Id()), + UpdateToken: aws.String(updateToken), + } + output, err := conn.UpdateAvailabilityZoneChangeProtection(ctx, &input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) availability zone change protection: %s", d.Id(), err) + } + updateToken = aws.ToString(output.UpdateToken) + } + + if d.HasChange("availability_zone_mapping") { + o, n := d.GetChange("availability_zone_mapping") + availabilityZoneToRemove, availabilityZoneToAdd := availabilityZoneMappingsDiff(o.(*schema.Set), n.(*schema.Set)) + + if len(availabilityZoneToAdd) > 0 { + input := networkfirewall.AssociateAvailabilityZonesInput{ + FirewallArn: aws.String(d.Id()), + AvailabilityZoneMappings: availabilityZoneToAdd, + UpdateToken: aws.String(updateToken), + } + + _, err := conn.AssociateAvailabilityZones(ctx, &input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "associating NetworkFirewall Firewall (%s) availability zones: %s", d.Id(), err) + } + + output, err := waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) update: %s", d.Id(), err) + } + + updateToken = aws.ToString(output.UpdateToken) + } + + if len(availabilityZoneToRemove) > 0 { + input := networkfirewall.DisassociateAvailabilityZonesInput{ + FirewallArn: aws.String(d.Id()), + AvailabilityZoneMappings: availabilityZoneToRemove, + UpdateToken: aws.String(updateToken), + } + + _, err := conn.DisassociateAvailabilityZones(ctx, &input) + + if err == nil { + output, err := waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) update: %s", d.Id(), err) + } + + updateToken = aws.ToString(output.UpdateToken) + } else if !errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "inaccessible") { + return sdkdiag.AppendErrorf(diags, "disassociating NetworkFirewall Firewall (%s) availability zones: %s", d.Id(), err) + } + } + } + if d.HasChange("firewall_policy_change_protection") { input := networkfirewall.UpdateFirewallPolicyChangeProtectionInput{ FirewallArn: aws.String(d.Id()), @@ -439,7 +569,12 @@ func resourceFirewallDelete(ctx context.Context, d *schema.ResourceData, meta an input := networkfirewall.DeleteFirewallInput{ FirewallArn: aws.String(d.Id()), } - _, err := conn.DeleteFirewall(ctx, &input) + const ( + timeout = 1 * time.Minute + ) + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidOperationException](ctx, timeout, func(ctx context.Context) (any, error) { + return conn.DeleteFirewall(ctx, &input) + }, "still in use") if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags @@ -501,6 +636,26 @@ func statusFirewall(ctx context.Context, conn *networkfirewall.Client, arn strin } } +func statusFirewallTransitGatewayAttachment(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findFirewallByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output.FirewallStatus.TransitGatewayAttachmentSyncState == nil { + return nil, "", nil + } + + return output, string(output.FirewallStatus.TransitGatewayAttachmentSyncState.TransitGatewayAttachmentStatus), nil + } +} + func waitFirewallCreated(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.FirewallStatusValueProvisioning), @@ -518,6 +673,23 @@ func waitFirewallCreated(ctx context.Context, conn *networkfirewall.Client, time return nil, err } +func waitFirewallTransitGatewayAttachmentCreated(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.TransitGatewayAttachmentStatusCreating), + Target: enum.Slice(awstypes.TransitGatewayAttachmentStatusPendingAcceptance, awstypes.TransitGatewayAttachmentStatusReady), + Refresh: statusFirewallTransitGatewayAttachment(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkfirewall.DescribeFirewallOutput); ok { + return output, err + } + + return nil, err +} + func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.FirewallStatusValueProvisioning), @@ -541,7 +713,7 @@ func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.Client, time func waitFirewallDeleted(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.FirewallStatusValueDeleting), + Pending: enum.Slice(awstypes.FirewallStatusValueDeleting, awstypes.FirewallStatusValueProvisioning), Target: []string{}, Refresh: statusFirewall(ctx, conn, arn), Timeout: timeout, @@ -602,7 +774,8 @@ func flattenFirewallStatus(apiObject *awstypes.FirewallStatus) []any { } tfMap := map[string]any{ - "sync_states": flattenSyncStates(apiObject.SyncStates), + "sync_states": flattenSyncStates(apiObject.SyncStates), + "transit_gateway_attachment_sync_states": flattenTransitGatewayAttachmentSyncState(apiObject.TransitGatewayAttachmentSyncState), } return []any{tfMap} @@ -675,3 +848,69 @@ func subnetMappingsDiff(old, new *schema.Set) ([]string, []awstypes.SubnetMappin return subnetsToRemove, subnetsToAdd } + +func expandAvailabilityZoneMapping(tfList []any) []awstypes.AvailabilityZoneMapping { + apiObjects := make([]awstypes.AvailabilityZoneMapping, 0, len(tfList)) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { + continue + } + + apiObject := awstypes.AvailabilityZoneMapping{ + AvailabilityZone: aws.String(tfMap["availability_zone_id"].(string)), + } + + if v, ok := tfMap["availability_zone_id"].(string); ok && v != "" { + apiObject.AvailabilityZone = aws.String(v) + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} + +func flattenAvailabilityZoneMapping(apiObjects []awstypes.AvailabilityZoneMapping) []any { + tfList := make([]any, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]any{ + "availability_zone_id": aws.ToString(apiObject.AvailabilityZone), + } + + tfList = append(tfList, tfMap) + } + + return tfList +} + +func flattenTransitGatewayAttachmentSyncState(apiObject *awstypes.TransitGatewayAttachmentSyncState) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + "attachment_id": apiObject.AttachmentId, + } + + return []any{tfMap} +} + +func availabilityZoneMappingsDiff(old, new *schema.Set) ([]awstypes.AvailabilityZoneMapping, []awstypes.AvailabilityZoneMapping) { + if old.Len() == 0 { + return nil, expandAvailabilityZoneMapping(new.List()) + } + if new.Len() == 0 { + return expandAvailabilityZoneMapping(old.List()), nil + } + + toRemove := old.Difference(new) + toAdd := new.Difference(old) + + availabilityZonesToRemove := expandAvailabilityZoneMapping(toRemove.List()) + availabilityZonesToAdd := expandAvailabilityZoneMapping(toAdd.List()) + + return availabilityZonesToRemove, availabilityZonesToAdd +} diff --git a/internal/service/networkfirewall/firewall_data_source.go b/internal/service/networkfirewall/firewall_data_source.go index d56d1114517d..0b4b9eda6fb8 100644 --- a/internal/service/networkfirewall/firewall_data_source.go +++ b/internal/service/networkfirewall/firewall_data_source.go @@ -27,6 +27,22 @@ func dataSourceFirewall() *schema.Resource { ReadWithoutTimeout: dataSourceFirewallResourceRead, Schema: map[string]*schema.Schema{ + "availability_zone_change_protection": { + Type: schema.TypeBool, + Computed: true, + }, + "availability_zone_mapping": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, names.AttrARN: { Type: schema.TypeString, Optional: true, @@ -138,6 +154,10 @@ func dataSourceFirewall() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrStatusMessage: { + Type: schema.TypeString, + Computed: true, + }, names.AttrSubnetID: { Type: schema.TypeString, Computed: true, @@ -152,6 +172,26 @@ func dataSourceFirewall() *schema.Resource { }, }, }, + "transit_gateway_attachment_sync_states": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attachment_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrStatusMessage: { + Type: schema.TypeString, + Computed: true, + }, + "transit_gateway_attachment_status": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, }, }, }, @@ -179,6 +219,14 @@ func dataSourceFirewall() *schema.Resource { }, }, names.AttrTags: tftags.TagsSchemaComputed(), + names.AttrTransitGatewayID: { + Type: schema.TypeString, + Computed: true, + }, + "transit_gateway_owner_account_id": { + Type: schema.TypeString, + Computed: true, + }, "update_token": { Type: schema.TypeString, Computed: true, @@ -212,6 +260,8 @@ func dataSourceFirewallResourceRead(ctx context.Context, d *schema.ResourceData, firewall := output.Firewall d.SetId(aws.ToString(firewall.FirewallArn)) d.Set(names.AttrARN, firewall.FirewallArn) + d.Set("availability_zone_change_protection", firewall.AvailabilityZoneChangeProtection) + d.Set("availability_zone_mapping", flattenDataSourceAvailabilityZoneMapping(firewall.AvailabilityZoneMappings)) d.Set("delete_protection", firewall.DeleteProtection) d.Set(names.AttrDescription, firewall.Description) d.Set("enabled_analysis_types", firewall.EnabledAnalysisTypes) @@ -228,6 +278,8 @@ func dataSourceFirewallResourceRead(ctx context.Context, d *schema.ResourceData, if err := d.Set("subnet_mapping", flattenDataSourceSubnetMappings(firewall.SubnetMappings)); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_mappings: %s", err) } + d.Set(names.AttrTransitGatewayID, firewall.TransitGatewayId) + d.Set("transit_gateway_owner_account_id", firewall.TransitGatewayOwnerAccountId) d.Set("update_token", output.UpdateToken) d.Set(names.AttrVPCID, firewall.VpcId) @@ -252,6 +304,9 @@ func flattenDataSourceFirewallStatus(apiObject *awstypes.FirewallStatus) []any { if apiObject.SyncStates != nil { tfMap["sync_states"] = flattenDataSourceSyncStates(apiObject.SyncStates) } + if apiObject.TransitGatewayAttachmentSyncState != nil { + tfMap["transit_gateway_attachment_sync_states"] = flattenDataSourceTransitGatewayAttachmentSyncState(apiObject.TransitGatewayAttachmentSyncState) + } return []any{tfMap} } @@ -323,9 +378,10 @@ func flattenDataSourceAttachment(apiObject *awstypes.Attachment) []any { } tfMap := map[string]any{ - "endpoint_id": aws.ToString(apiObject.EndpointId), - names.AttrStatus: apiObject.Status, - names.AttrSubnetID: aws.ToString(apiObject.SubnetId), + "endpoint_id": aws.ToString(apiObject.EndpointId), + names.AttrStatus: apiObject.Status, + names.AttrStatusMessage: aws.ToString(apiObject.StatusMessage), + names.AttrSubnetID: aws.ToString(apiObject.SubnetId), } return []any{tfMap} @@ -357,3 +413,31 @@ func flattenDataSourceEncryptionConfiguration(apiObject *awstypes.EncryptionConf return []any{tfMap} } + +func flattenDataSourceTransitGatewayAttachmentSyncState(apiObject *awstypes.TransitGatewayAttachmentSyncState) []any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + "attachment_id": aws.ToString(apiObject.AttachmentId), + names.AttrStatusMessage: aws.ToString(apiObject.StatusMessage), + "transit_gateway_attachment_status": apiObject.TransitGatewayAttachmentStatus, + } + + return []any{tfMap} +} + +func flattenDataSourceAvailabilityZoneMapping(apiObjects []awstypes.AvailabilityZoneMapping) []any { + tfList := make([]any, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]any{ + "availability_zone_id": aws.ToString(apiObject.AvailabilityZone), + } + + tfList = append(tfList, tfMap) + } + + return tfList +} diff --git a/internal/service/networkfirewall/firewall_data_source_test.go b/internal/service/networkfirewall/firewall_data_source_test.go index 973898e31466..ff95350340a3 100644 --- a/internal/service/networkfirewall/firewall_data_source_test.go +++ b/internal/service/networkfirewall/firewall_data_source_test.go @@ -128,7 +128,7 @@ func TestAccNetworkFirewallFirewallDataSource_arnandname(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccFirewallDataSourceConfig_arnandname(rName), + Config: testAccFirewallDataSourceConfig_arnAndName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckFirewallExists(ctx, resourceName), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("firewall/%s", rName)), @@ -161,107 +161,88 @@ func TestAccNetworkFirewallFirewallDataSource_arnandname(t *testing.T) { }, }) } +func TestAccNetworkFirewallFirewallDataSource_transitGatewayAttachment(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall.test" + dataSourceName := "data.aws_networkfirewall_firewall.test" + dataSourceCallerIdentity := "data.aws_caller_identity.current" + dataSourceAvailabilityZones := "data.aws_availability_zones.available" -func testAccFirewallDataSourceDependenciesConfig(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_vpc" "test" { - cidr_block = "192.168.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_networkfirewall_firewall_policy" "test" { - name = %[1]q - firewall_policy { - stateless_fragment_default_actions = ["aws:drop"] - stateless_default_actions = ["aws:pass"] - } -} -`, rName) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallDataSourceConfig_transitGatewayAttachment(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + resource.TestCheckResourceAttr(dataSourceName, "firewall_status.0.status", "READY"), + resource.TestCheckResourceAttr(dataSourceName, "firewall_status.0.transit_gateway_attachment_sync_states.0.transit_gateway_attachment_status", "READY"), + resource.TestCheckResourceAttr(dataSourceName, "availability_zone_change_protection", acctest.CtFalse), + resource.TestCheckTypeSetElemAttrPair(dataSourceName, "availability_zone_mapping.0.availability_zone_id", dataSourceAvailabilityZones, "zone_ids.0"), + resource.TestCheckResourceAttrSet(dataSourceName, "firewall_status.0.transit_gateway_attachment_sync_states.0.attachment_id"), + resource.TestCheckResourceAttrPair(dataSourceName, "transit_gateway_owner_account_id", dataSourceCallerIdentity, names.AttrAccountID), + ), + }, + }, + }) } -func testAccFirewallDataSourceConfig_arn(rName string) string { - return acctest.ConfigCompose( - testAccFirewallDataSourceDependenciesConfig(rName), - fmt.Sprintf(` +func testAccFirewallDataSourceConfig_baseVPC(rName string) string { + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn vpc_id = aws_vpc.test.id subnet_mapping { - subnet_id = aws_subnet.test.id + subnet_id = aws_subnet.test[0].id } +}`, rName)) } +func testAccFirewallDataSourceConfig_arn(rName string) string { + return acctest.ConfigCompose(testAccFirewallDataSourceConfig_baseVPC(rName), ` data "aws_networkfirewall_firewall" "test" { arn = aws_networkfirewall_firewall.test.arn } -`, rName)) +`) } func testAccFirewallDataSourceConfig_name(rName string) string { - return acctest.ConfigCompose( - testAccFirewallDataSourceDependenciesConfig(rName), - fmt.Sprintf(` -resource "aws_networkfirewall_firewall" "test" { - name = %[1]q - firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn - vpc_id = aws_vpc.test.id - - subnet_mapping { - subnet_id = aws_subnet.test.id - } + return acctest.ConfigCompose(testAccFirewallDataSourceConfig_baseVPC(rName), ` +data "aws_networkfirewall_firewall" "test" { + name = aws_networkfirewall_firewall.test.name +} +`) } +func testAccFirewallDataSourceConfig_arnAndName(rName string) string { + return acctest.ConfigCompose(testAccFirewallDataSourceConfig_baseVPC(rName), ` data "aws_networkfirewall_firewall" "test" { - name = %[1]q - - depends_on = [aws_networkfirewall_firewall.test] + arn = aws_networkfirewall_firewall.test.arn + name = aws_networkfirewall_firewall.test.name } -`, rName)) +`) } -func testAccFirewallDataSourceConfig_arnandname(rName string) string { - return acctest.ConfigCompose( - testAccFirewallDataSourceDependenciesConfig(rName), - fmt.Sprintf(` +func testAccFirewallDataSourceConfig_transitGatewayAttachment(rName string) string { + return acctest.ConfigCompose(testAccFirewallConfig_baseTGW(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn - vpc_id = aws_vpc.test.id + transit_gateway_id = aws_ec2_transit_gateway.test.id - subnet_mapping { - subnet_id = aws_subnet.test.id + availability_zone_mapping { + availability_zone_id = data.aws_availability_zones.available.zone_ids[0] } } data "aws_networkfirewall_firewall" "test" { - arn = aws_networkfirewall_firewall.test.arn - name = %[1]q - - depends_on = [aws_networkfirewall_firewall.test] + name = aws_networkfirewall_firewall.test.name } `, rName)) } diff --git a/internal/service/networkfirewall/firewall_policy.go b/internal/service/networkfirewall/firewall_policy.go index 66adb958c837..7fc210658a6e 100644 --- a/internal/service/networkfirewall/firewall_policy.go +++ b/internal/service/networkfirewall/firewall_policy.go @@ -336,7 +336,7 @@ func resourceFirewallPolicyDelete(ctx context.Context, d *schema.ResourceData, m const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidOperationException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidOperationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteFirewallPolicy(ctx, &networkfirewall.DeleteFirewallPolicyInput{ FirewallPolicyArn: aws.String(d.Id()), }) diff --git a/internal/service/networkfirewall/firewall_policy_data_source.go b/internal/service/networkfirewall/firewall_policy_data_source.go index 1c063029c678..7b06f1131076 100644 --- a/internal/service/networkfirewall/firewall_policy_data_source.go +++ b/internal/service/networkfirewall/firewall_policy_data_source.go @@ -87,6 +87,18 @@ func dataSourceFirewallPolicy() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "flow_timeouts": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tcp_idle_timeout_seconds": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, "rule_order": { Type: schema.TypeString, Computed: true, @@ -109,12 +121,12 @@ func dataSourceFirewallPolicy() *schema.Resource { }, "override": { Type: schema.TypeList, - Optional: true, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrAction: { Type: schema.TypeString, - Optional: true, + Computed: true, }, }, }, diff --git a/internal/service/networkfirewall/firewall_policy_data_source_test.go b/internal/service/networkfirewall/firewall_policy_data_source_test.go index 5968a3211869..6a1c859c674c 100644 --- a/internal/service/networkfirewall/firewall_policy_data_source_test.go +++ b/internal/service/networkfirewall/firewall_policy_data_source_test.go @@ -34,7 +34,7 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_arn(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_fragment_default_actions.0", resourceName, "firewall_policy.0.stateless_fragment_default_actions.0"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_default_actions.#", resourceName, "firewall_policy.0.stateless_default_actions.#"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_default_actions.0", resourceName, "firewall_policy.0.stateless_default_actions.0"), - resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.tls_inspection_coniguration_arn", resourceName, "firewall_policy.0.tls_inspection_coniguration_arn"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.tls_inspection_configuration_arn", resourceName, "firewall_policy.0.tls_inspection_configuration_arn"), resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), resource.TestCheckResourceAttrPair(datasourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), ), @@ -64,7 +64,7 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_name(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_fragment_default_actions.0", resourceName, "firewall_policy.0.stateless_fragment_default_actions.0"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_default_actions.#", resourceName, "firewall_policy.0.stateless_default_actions.#"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_default_actions.0", resourceName, "firewall_policy.0.stateless_default_actions.0"), - resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.tls_inspection_coniguration_arn", resourceName, "firewall_policy.0.tls_inspection_coniguration_arn"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.tls_inspection_configuration_arn", resourceName, "firewall_policy.0.tls_inspection_configuration_arn"), resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), resource.TestCheckResourceAttrPair(datasourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), ), @@ -94,7 +94,7 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_nameAndARN(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_fragment_default_actions.0", resourceName, "firewall_policy.0.stateless_fragment_default_actions.0"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_default_actions.#", resourceName, "firewall_policy.0.stateless_default_actions.#"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_default_actions.0", resourceName, "firewall_policy.0.stateless_default_actions.0"), - resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.tls_inspection_coniguration_arn", resourceName, "firewall_policy.0.tls_inspection_coniguration_arn"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.tls_inspection_configuration_arn", resourceName, "firewall_policy.0.tls_inspection_configuration_arn"), resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), resource.TestCheckResourceAttrPair(datasourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), ), @@ -192,6 +192,66 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_activeThreatDefense(t *testi }) } +func TestAccNetworkFirewallFirewallPolicyDataSource_statefulEngineOptions(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix("resource-test-terraform") + resourceName := "aws_networkfirewall_firewall_policy.test" + datasourceName := "data.aws_networkfirewall_firewall_policy.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccFirewallPolicyDataSourceConfig_statefulEngineOptions(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.#", resourceName, "firewall_policy.#"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_fragment_default_actions.#", resourceName, "firewall_policy.0.stateless_fragment_default_actions.#"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_fragment_default_actions.0", resourceName, "firewall_policy.0.stateless_fragment_default_actions.0"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateful_engine_options.#", resourceName, "firewall_policy.0.stateful_engine_options.#"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateful_engine_options.0.flow_timeouts.#", resourceName, "firewall_policy.0.stateful_engine_options.0.flow_timeouts.#"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateful_engine_options.0.flow_timeouts.0.tcp_idle_timeout_seconds", resourceName, "firewall_policy.0.stateful_engine_options.0.flow_timeouts.0.tcp_idle_timeout_seconds"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy"), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), + resource.TestCheckResourceAttrPair(datasourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), + ), + }, + }, + }) +} + +func TestAccNetworkFirewallFirewallPolicyDataSource_multipleStatefulRuleGroupReferences(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix("resource-test-terraform") + resourceName := "aws_networkfirewall_firewall_policy.test" + datasourceName := "data.aws_networkfirewall_firewall_policy.test" + ruleGroupResourceName1 := "aws_networkfirewall_rule_group.test.0" + ruleGroupResourceName2 := "aws_networkfirewall_rule_group.test.1" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccFirewallPolicyDataSourceConfig_multipleStatefulRuleGroupReferences(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.#", resourceName, "firewall_policy.#"), + resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateful_rule_group_reference.#", resourceName, "firewall_policy.0.stateful_rule_group_reference.#"), + resource.TestCheckTypeSetElemAttrPair(datasourceName, "firewall_policy.0.stateful_rule_group_reference.*.resource_arn", ruleGroupResourceName1, names.AttrARN), + resource.TestCheckTypeSetElemAttrPair(datasourceName, "firewall_policy.0.stateful_rule_group_reference.*.resource_arn", ruleGroupResourceName2, names.AttrARN), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), + resource.TestCheckResourceAttrPair(datasourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), + ), + }, + }, + }) +} + func testAccFirewallPolicyDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_networkfirewall_firewall_policy" "test" { @@ -306,3 +366,35 @@ data "aws_networkfirewall_firewall_policy" "test" { arn = aws_networkfirewall_firewall_policy.test.arn }`, rName) } + +func testAccFirewallPolicyDataSourceConfig_statefulEngineOptions(rName string) string { + return fmt.Sprintf(` +resource "aws_networkfirewall_firewall_policy" "test" { + name = %[1]q + + firewall_policy { + stateless_fragment_default_actions = ["aws:drop"] + stateless_default_actions = ["aws:pass"] + + stateful_engine_options { + flow_timeouts { + tcp_idle_timeout_seconds = 60 + } + rule_order = "STRICT_ORDER" + stream_exception_policy = "DROP" + } + } +} +data "aws_networkfirewall_firewall_policy" "test" { + arn = aws_networkfirewall_firewall_policy.test.arn +} +`, rName) +} + +func testAccFirewallPolicyDataSourceConfig_multipleStatefulRuleGroupReferences(rName string) string { + return acctest.ConfigCompose(testAccFirewallPolicyConfig_multipleStatefulRuleGroupReferences(rName), ` +data "aws_networkfirewall_firewall_policy" "test" { + arn = aws_networkfirewall_firewall_policy.test.arn +} +`) +} diff --git a/internal/service/networkfirewall/firewall_test.go b/internal/service/networkfirewall/firewall_test.go index ffb75b1e0eda..b0d79dba9ee3 100644 --- a/internal/service/networkfirewall/firewall_test.go +++ b/internal/service/networkfirewall/firewall_test.go @@ -448,6 +448,118 @@ func TestAccNetworkFirewallFirewall_tags(t *testing.T) { }) } +func TestAccNetworkFirewallFirewall_transitGatewayAttachment_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall.test" + transitGatewayResourceName := "aws_ec2_transit_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallConfig_transitGatewayAttachment(rName, false, 0, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + resource.TestCheckTypeSetElemAttrPair(resourceName, names.AttrTransitGatewayID, transitGatewayResourceName, names.AttrID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkFirewallFirewall_transitGatewayAttachment_updateProtection(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall.test" + transitGatewayResourceName := "aws_ec2_transit_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallConfig_transitGatewayAttachment(rName, true, 0, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + resource.TestCheckTypeSetElemAttrPair(resourceName, names.AttrTransitGatewayID, transitGatewayResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "availability_zone_change_protection", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccFirewallConfig_transitGatewayAttachment(rName, false, 0, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + resource.TestCheckTypeSetElemAttrPair(resourceName, names.AttrTransitGatewayID, transitGatewayResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "availability_zone_change_protection", acctest.CtFalse), + ), + }, + }, + }) +} + +func TestAccNetworkFirewallFirewall_transitGatewayAttachment_updateAvailabilityZone(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall.test" + transitGatewayResourceName := "aws_ec2_transit_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallConfig_transitGatewayAttachment(rName, false, 0, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + resource.TestCheckTypeSetElemAttrPair(resourceName, names.AttrTransitGatewayID, transitGatewayResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "availability_zone_change_protection", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccFirewallConfig_transitGatewayAttachment(rName, false, 1, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + resource.TestCheckTypeSetElemAttrPair(resourceName, names.AttrTransitGatewayID, transitGatewayResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "availability_zone_change_protection", acctest.CtFalse), + ), + }, + }, + }) +} + func TestAccNetworkFirewallFirewall_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -528,7 +640,7 @@ func testAccPreCheck(ctx context.Context, t *testing.T) { } } -func testAccFirewallConfig_base(rName string) string { +func testAccFirewallConfig_baseVPC(rName string) string { return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_networkfirewall_firewall_policy" "test" { name = %[1]q @@ -542,7 +654,7 @@ resource "aws_networkfirewall_firewall_policy" "test" { } func testAccFirewallConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn @@ -556,7 +668,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_deleteProtection(rName string, deleteProtection bool) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { delete_protection = %[1]t name = %[2]q @@ -571,7 +683,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn @@ -589,7 +701,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn @@ -608,7 +720,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_description(rName, description string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q description = %[2]q @@ -623,7 +735,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_enabledAnalysisTypes(rName string, enabledAnalysisTypes []string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_networkfirewall_firewall" "test" { name = %[1]q enabled_analysis_types = ["%[2]s"] @@ -638,7 +750,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_updateSubnet(rName string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_subnet" "example" { availability_zone = data.aws_availability_zones.available.names[1] cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 1) @@ -666,7 +778,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_updateMultipleSubnets(rName string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_subnet" "example" { availability_zone = data.aws_availability_zones.available.names[1] cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 1) @@ -702,7 +814,7 @@ resource "aws_networkfirewall_firewall" "test" { } func testAccFirewallConfig_encryptionConfiguration(rName, description string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccFirewallConfig_baseVPC(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { deletion_window_in_days = 7 enable_key_rotation = true @@ -749,3 +861,40 @@ resource "aws_networkfirewall_firewall" "test" { } `, rName)) } + +func testAccFirewallConfig_baseTGW(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +resource "aws_ec2_transit_gateway" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_networkfirewall_firewall_policy" "test" { + name = %[1]q + + firewall_policy { + stateless_fragment_default_actions = ["aws:drop"] + stateless_default_actions = ["aws:pass"] + } +} +`, rName)) +} + +func testAccFirewallConfig_transitGatewayAttachment(rName string, changeProtection bool, availabilityZoneStartIndex, availabilityZoneEndIndex int) string { + return acctest.ConfigCompose(testAccFirewallConfig_baseTGW(rName), fmt.Sprintf(` +resource "aws_networkfirewall_firewall" "test" { + name = %[1]q + firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn + transit_gateway_id = aws_ec2_transit_gateway.test.id + availability_zone_change_protection = %[2]t + + dynamic "availability_zone_mapping" { + for_each = slice(data.aws_availability_zones.available.zone_ids, %[3]d, %[4]d) + content { + availability_zone_id = availability_zone_mapping.value + } + } +} +`, rName, changeProtection, availabilityZoneStartIndex, availabilityZoneEndIndex)) +} diff --git a/internal/service/networkfirewall/firewall_transit_gateway_attachment_accepter.go b/internal/service/networkfirewall/firewall_transit_gateway_attachment_accepter.go new file mode 100644 index 000000000000..917d05d8b926 --- /dev/null +++ b/internal/service/networkfirewall/firewall_transit_gateway_attachment_accepter.go @@ -0,0 +1,168 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_networkfirewall_firewall_transit_gateway_attachment_accepter", name="Firewall Transit Gateway Attachment Accepter") +func newFirewallTransitGatewayAttachmentAccepterResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &firewallTransitGatewayAttachmentAccepterResource{} + + r.SetDefaultCreateTimeout(60 * time.Minute) + r.SetDefaultDeleteTimeout(60 * time.Minute) + + return r, nil +} + +type firewallTransitGatewayAttachmentAccepterResource struct { + framework.ResourceWithModel[firewallTransitGatewayAttachmentAccepterResourceModel] + framework.WithTimeouts + framework.WithNoUpdate +} + +func (r *firewallTransitGatewayAttachmentAccepterResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrTransitGatewayAttachmentID: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), + }, + } +} + +func (r *firewallTransitGatewayAttachmentAccepterResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data firewallTransitGatewayAttachmentAccepterResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + tgwAttachmentID := fwflex.StringValueFromFramework(ctx, data.TransitGatewayAttachmentID) + input := networkfirewall.AcceptNetworkFirewallTransitGatewayAttachmentInput{ + TransitGatewayAttachmentId: aws.String(tgwAttachmentID), + } + + _, err := conn.AcceptNetworkFirewallTransitGatewayAttachment(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("accepting NetworkFirewall Firewall Transit Gateway Attachment (%s)", tgwAttachmentID), err.Error()) + + return + } + + if _, err := tfec2.WaitTransitGatewayAttachmentAccepted(ctx, r.Meta().EC2Client(ctx), tgwAttachmentID, r.CreateTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall Firewall Transit Gateway Attachment (%s) accept", tgwAttachmentID), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *firewallTransitGatewayAttachmentAccepterResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data firewallTransitGatewayAttachmentAccepterResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + tgwAttachmentID := fwflex.StringValueFromFramework(ctx, data.TransitGatewayAttachmentID) + output, err := tfec2.FindTransitGatewayAttachmentByID(ctx, r.Meta().EC2Client(ctx), tgwAttachmentID) + + if err == nil && output.State == ec2types.TransitGatewayAttachmentStateDeleted { + err = tfresource.NewEmptyResultError(tgwAttachmentID) + } + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading NetworkFirewall Firewall Transit Gateway Attachment (%s)", tgwAttachmentID), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *firewallTransitGatewayAttachmentAccepterResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data firewallTransitGatewayAttachmentAccepterResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + tgwAttachmentID := fwflex.StringValueFromFramework(ctx, data.TransitGatewayAttachmentID) + input := networkfirewall.DeleteNetworkFirewallTransitGatewayAttachmentInput{ + TransitGatewayAttachmentId: aws.String(tgwAttachmentID), + } + + _, err := conn.DeleteNetworkFirewallTransitGatewayAttachment(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting NetworkFirewall Firewall Transit Gateway Attachment (%s)", tgwAttachmentID), err.Error()) + + return + } + + if _, err := tfec2.WaitTransitGatewayAttachmentDeleted(ctx, r.Meta().EC2Client(ctx), tgwAttachmentID, r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall Firewall Transit Gateway Attachment (%s) delete", tgwAttachmentID), err.Error()) + + return + } +} + +func (r *firewallTransitGatewayAttachmentAccepterResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrTransitGatewayAttachmentID), request, response) +} + +type firewallTransitGatewayAttachmentAccepterResourceModel struct { + framework.WithRegionModel + TransitGatewayAttachmentID types.String `tfsdk:"transit_gateway_attachment_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} diff --git a/internal/service/networkfirewall/firewall_transit_gateway_attachment_accepter_test.go b/internal/service/networkfirewall/firewall_transit_gateway_attachment_accepter_test.go new file mode 100644 index 000000000000..107823c4804c --- /dev/null +++ b/internal/service/networkfirewall/firewall_transit_gateway_attachment_accepter_test.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall_test + +import ( + "context" + "fmt" + "testing" + + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + tfnetworkfirewall "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkFirewallFirewallTransitGatewayAttachmentAccepter_basic(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall_transit_gateway_attachment_accepter.test" + var v ec2types.TransitGatewayAttachment + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckFirewallTransitGatewayAttachmentAccepterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallTransitGatewayAttachmentAccepterConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFirewallTransitGatewayAttachmentAccepterExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrSet(resourceName, names.AttrTransitGatewayAttachmentID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrTransitGatewayAttachmentID), + ImportStateVerifyIdentifierAttribute: names.AttrTransitGatewayAttachmentID, + }, + }, + }) +} + +func TestAccNetworkFirewallFirewallTransitGatewayAttachmentAccepter_disappears(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var v ec2types.TransitGatewayAttachment + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall_transit_gateway_attachment_accepter.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckFirewallTransitGatewayAttachmentAccepterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallTransitGatewayAttachmentAccepterConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckFirewallTransitGatewayAttachmentAccepterExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfnetworkfirewall.ResourceFirewallTransitGatewayAttachmentAccepter, resourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + +func testAccCheckFirewallTransitGatewayAttachmentAccepterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_networkfirewall_firewall_transit_gateway_attachment_accepter" { + continue + } + + output, err := tfec2.FindTransitGatewayAttachmentByID(ctx, conn, rs.Primary.Attributes[names.AttrTransitGatewayAttachmentID]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + if output.State == ec2types.TransitGatewayAttachmentStateDeleted { + continue + } + + return fmt.Errorf("NetworkFirewall Firewall Transit Gateway Attachment %s still exists", rs.Primary.Attributes[names.AttrTransitGatewayAttachmentID]) + } + + return nil + } +} + +func testAccCheckFirewallTransitGatewayAttachmentAccepterExists(ctx context.Context, n string, v *ec2types.TransitGatewayAttachment) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + output, err := tfec2.FindTransitGatewayAttachmentByID(ctx, conn, rs.Primary.Attributes[names.AttrTransitGatewayAttachmentID]) + + if err != nil { + return err + } + + v = output + + return nil + } +} + +func testAccFirewallTransitGatewayAttachmentAccepterConfig_basic(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAlternateAccountProvider(), acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +resource "aws_ec2_transit_gateway" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_ram_resource_share" "test" { + name = %[1]q + + tags = { + Name = %[1]q + } +} + +resource "aws_ram_resource_association" "test" { + resource_arn = aws_ec2_transit_gateway.test.arn + resource_share_arn = aws_ram_resource_share.test.id +} + +# attachment creator. +data "aws_caller_identity" "creator" { + provider = "awsalternate" +} + +resource "aws_ram_principal_association" "test" { + principal = data.aws_caller_identity.creator.account_id + resource_share_arn = aws_ram_resource_share.test.id +} + +resource "aws_networkfirewall_firewall_policy" "test" { + provider = "awsalternate" + + name = %[1]q + + firewall_policy { + stateless_fragment_default_actions = ["aws:drop"] + stateless_default_actions = ["aws:pass"] + } +} + +resource "aws_networkfirewall_firewall" "test" { + provider = "awsalternate" + + name = %[1]q + firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn + transit_gateway_id = aws_ec2_transit_gateway.test.id + + availability_zone_mapping { + availability_zone_id = data.aws_availability_zones.available.zone_ids[0] + } + + depends_on = [ + aws_ram_resource_association.test, + aws_ram_principal_association.test, + ] +} + +resource "aws_networkfirewall_firewall_transit_gateway_attachment_accepter" "test" { + transit_gateway_attachment_id = aws_networkfirewall_firewall.test.firewall_status[0].transit_gateway_attachment_sync_states[0].attachment_id +} +`, rName)) +} diff --git a/internal/service/networkfirewall/logging_configuration.go b/internal/service/networkfirewall/logging_configuration.go index f8c07b10d7db..b163bb437e43 100644 --- a/internal/service/networkfirewall/logging_configuration.go +++ b/internal/service/networkfirewall/logging_configuration.go @@ -38,6 +38,11 @@ func resourceLoggingConfiguration() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "enable_monitoring_dashboard": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, "firewall_arn": { Type: schema.TypeString, Required: true, @@ -114,6 +119,19 @@ func resourceLoggingConfigurationCreate(ctx context.Context, d *schema.ResourceD firewallARN := d.Get("firewall_arn").(string) + if v := d.Get("enable_monitoring_dashboard"); v != nil { + input := &networkfirewall.UpdateLoggingConfigurationInput{ + FirewallArn: aws.String(firewallARN), + EnableMonitoringDashboard: aws.Bool(v.(bool)), + } + + _, err := conn.UpdateLoggingConfiguration(ctx, input) + + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + if v, ok := d.GetOk(names.AttrLoggingConfiguration); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { tfMap := v.([]any)[0].(map[string]any) @@ -145,6 +163,7 @@ func resourceLoggingConfigurationRead(ctx context.Context, d *schema.ResourceDat return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Logging Configuration (%s): %s", d.Id(), err) } + d.Set("enable_monitoring_dashboard", output.EnableMonitoringDashboard) d.Set("firewall_arn", output.FirewallArn) if err := d.Set(names.AttrLoggingConfiguration, flattenLoggingConfiguration(output.LoggingConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting logging_configuration: %s", err) @@ -165,12 +184,32 @@ func resourceLoggingConfigurationUpdate(ctx context.Context, d *schema.ResourceD o, n := d.GetChange("logging_configuration.0.log_destination_config") os, ns := o.(*schema.Set), n.(*schema.Set) - add, del := ns.Difference(os), os.Difference(ns) + + var add, del *schema.Set + // To change enable_monitoring_dashboard, all log_destination_config must first be removed. + // Then enable_monitoring_dashboard can be changed, followed by adding log_destination_config back. + if d.HasChanges("enable_monitoring_dashboard") { + add, del = ns, os + } else { + add, del = ns.Difference(os), os.Difference(ns) + } if err := deleteLogDestinationConfigs(ctx, conn, d.Id(), output.LoggingConfiguration, expandLogDestinationConfigs(del.List())); err != nil { return sdkdiag.AppendFromErr(diags, err) } + if d.HasChanges("enable_monitoring_dashboard") { + input := &networkfirewall.UpdateLoggingConfigurationInput{ + FirewallArn: output.FirewallArn, + EnableMonitoringDashboard: aws.Bool(d.Get("enable_monitoring_dashboard").(bool)), + } + _, err := conn.UpdateLoggingConfiguration(ctx, input) + + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + if err := addLogDestinationConfigs(ctx, conn, d.Id(), output.LoggingConfiguration, expandLogDestinationConfigs(add.List())); err != nil { return sdkdiag.AppendFromErr(diags, err) } diff --git a/internal/service/networkfirewall/logging_configuration_test.go b/internal/service/networkfirewall/logging_configuration_test.go index 26ef1ea646a5..a98bb142fc8a 100644 --- a/internal/service/networkfirewall/logging_configuration_test.go +++ b/internal/service/networkfirewall/logging_configuration_test.go @@ -36,6 +36,7 @@ func TestAccNetworkFirewallLoggingConfiguration_CloudWatchLogDestination_logGrou Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ @@ -49,6 +50,7 @@ func TestAccNetworkFirewallLoggingConfiguration_CloudWatchLogDestination_logGrou Config: testAccLoggingConfigurationConfig_cloudWatch(updatedLogGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ @@ -62,6 +64,7 @@ func TestAccNetworkFirewallLoggingConfiguration_CloudWatchLogDestination_logGrou Config: testAccLoggingConfigurationConfig_cloudWatch(updatedLogGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeTls)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ @@ -742,6 +745,115 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleTLSTypeLogDestinat }) } +func TestAccNetworkFirewallLoggingConfiguration_enableMonitoringDashboard(t *testing.T) { + ctx := acctest.Context(t) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + logGroupName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_logging_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLoggingConfigurationConfig_s3AndCloudWatchEnableMonitoringDashboard(bucketName, logGroupName, rName, string(awstypes.LogTypeAlert), string(awstypes.LogTypeFlow), true), + Check: resource.ComposeTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.logGroup": logGroupName, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.bucketName": bucketName, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeAlert), + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + // Disable Monitoring Dashboard + Config: testAccLoggingConfigurationConfig_s3AndCloudWatchEnableMonitoringDashboard(bucketName, logGroupName, rName, string(awstypes.LogTypeAlert), string(awstypes.LogTypeFlow), false), + Check: resource.ComposeTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.logGroup": logGroupName, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.bucketName": bucketName, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeAlert), + }), + ), + }, + { + // Re-Enable Monitoring Dashboard and change log types at the same time + Config: testAccLoggingConfigurationConfig_s3AndCloudWatchEnableMonitoringDashboard(bucketName, logGroupName, rName, string(awstypes.LogTypeTls), string(awstypes.LogTypeFlow), true), + Check: resource.ComposeTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.logGroup": logGroupName, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.bucketName": bucketName, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeTls), + }), + ), + }, + { + // Omit enable_monitoring_dashboard (inherit previous value) + Config: testAccLoggingConfigurationConfig_s3AndCloudWatch(bucketName, logGroupName, rName, string(awstypes.LogTypeTls), string(awstypes.LogTypeFlow)), + Check: resource.ComposeTestCheckFunc( + testAccCheckLoggingConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_monitoring_dashboard", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.log_destination_config.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.logGroup": logGroupName, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ + "log_destination.%": "1", + "log_destination.bucketName": bucketName, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeTls), + }), + ), + }, + }, + }) +} + func TestAccNetworkFirewallLoggingConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1151,3 +1263,35 @@ resource "aws_networkfirewall_logging_configuration" "test" { } `, logTypeS3, logTypeCloudWatch)) } + +func testAccLoggingConfigurationConfig_s3AndCloudWatchEnableMonitoringDashboard(bucketName, logGroupName, rName, logTypeS3, logTypeCloudWatch string, enableMonitoringDashboard bool) string { + return acctest.ConfigCompose( + testAccLoggingConfigurationConfig_base(rName), + testAccLoggingConfigurationConfig_baseS3Bucket(bucketName), + testAccLoggingConfigurationConfig_baseCloudWatch(logGroupName), + fmt.Sprintf(` +resource "aws_networkfirewall_logging_configuration" "test" { + firewall_arn = aws_networkfirewall_firewall.test.arn + + enable_monitoring_dashboard = %[3]t + + logging_configuration { + log_destination_config { + log_destination = { + bucketName = aws_s3_bucket.test.bucket + } + log_destination_type = "S3" + log_type = %[1]q + } + + log_destination_config { + log_destination = { + logGroup = aws_cloudwatch_log_group.test.name + } + log_destination_type = "CloudWatchLogs" + log_type = %[2]q + } + } +} +`, logTypeS3, logTypeCloudWatch, enableMonitoringDashboard)) +} diff --git a/internal/service/networkfirewall/resource_policy.go b/internal/service/networkfirewall/resource_policy.go index ca57216362f5..8ededeb96637 100644 --- a/internal/service/networkfirewall/resource_policy.go +++ b/internal/service/networkfirewall/resource_policy.go @@ -15,10 +15,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" @@ -37,17 +37,7 @@ func resourceResourcePolicy() *schema.Resource { }, Schema: map[string]*schema.Schema{ - names.AttrPolicy: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, - DiffSuppressOnRefresh: true, - StateFunc: func(v any) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - }, + names.AttrPolicy: sdkv2.IAMPolicyDocumentSchemaRequired(), names.AttrResourceARN: { Type: schema.TypeString, Required: true, @@ -121,7 +111,7 @@ func resourceResourcePolicyDelete(ctx context.Context, d *schema.ResourceData, m const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidResourcePolicyException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidResourcePolicyException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteResourcePolicy(ctx, &networkfirewall.DeleteResourcePolicyInput{ ResourceArn: aws.String(d.Id()), }) diff --git a/internal/service/networkfirewall/rule_group.go b/internal/service/networkfirewall/rule_group.go index 4a94b7611943..b15748975d4d 100644 --- a/internal/service/networkfirewall/rule_group.go +++ b/internal/service/networkfirewall/rule_group.go @@ -250,9 +250,12 @@ func resourceRuleGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "address_definition": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidIPv4CIDRNetworkAddress, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.Any( + verify.ValidIPv4CIDRNetworkAddress, + verify.ValidIPv6CIDRNetworkAddress, + ), }, }, }, @@ -284,9 +287,12 @@ func resourceRuleGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "address_definition": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidIPv4CIDRNetworkAddress, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.Any( + verify.ValidIPv4CIDRNetworkAddress, + verify.ValidIPv6CIDRNetworkAddress, + ), }, }, }, @@ -597,7 +603,7 @@ func resourceRuleGroupDelete(ctx context.Context, d *schema.ResourceData, meta a const ( timeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidOperationException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidOperationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteRuleGroup(ctx, &networkfirewall.DeleteRuleGroupInput{ RuleGroupArn: aws.String(d.Id()), }) diff --git a/internal/service/networkfirewall/rule_group_test.go b/internal/service/networkfirewall/rule_group_test.go index ff3f0cb4a8d8..438f0a11e834 100644 --- a/internal/service/networkfirewall/rule_group_test.go +++ b/internal/service/networkfirewall/rule_group_test.go @@ -228,12 +228,60 @@ func TestAccNetworkFirewallRuleGroup_Basic_statelessRule(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", "1"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.0.stateless_rule.*", map[string]string{ - names.AttrPriority: "1", - "rule_definition.#": "1", - "rule_definition.0.actions.#": "1", - "rule_definition.0.match_attributes.#": "1", - "rule_definition.0.match_attributes.0.destination.#": "1", - "rule_definition.0.match_attributes.0.source.#": "1", + names.AttrPriority: "1", + "rule_definition.#": "1", + "rule_definition.0.actions.#": "1", + "rule_definition.0.match_attributes.#": "1", + "rule_definition.0.match_attributes.0.destination.#": "1", + "rule_definition.0.match_attributes.0.destination.0.address_definition": "1.2.3.4/32", + "rule_definition.0.match_attributes.0.source.#": "1", + "rule_definition.0.match_attributes.0.source.0.address_definition": "124.1.1.5/32", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.0.stateless_rule.*.rule_definition.0.actions.*", "aws:drop"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkFirewallRuleGroup_Basic_statelessRuleIPv6(t *testing.T) { + ctx := acctest.Context(t) + var ruleGroup networkfirewall.DescribeRuleGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_rule_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRuleGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRuleGroupConfig_basicStatelessIPv6(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateless-rulegroup/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "capacity", "100"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateless)), + resource.TestCheckResourceAttr(resourceName, "rule_group.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.0.stateless_rule.*", map[string]string{ + names.AttrPriority: "1", + "rule_definition.#": "1", + "rule_definition.0.actions.#": "1", + "rule_definition.0.match_attributes.#": "1", + "rule_definition.0.match_attributes.0.destination.#": "1", + "rule_definition.0.match_attributes.0.destination.0.address_definition": "1111:0000:0000:0000:0000:0000:0000:0111/128", + "rule_definition.0.match_attributes.0.source.#": "1", + "rule_definition.0.match_attributes.0.source.0.address_definition": "1111:0000:0000:0000:0000:0000:0000:0000/64", }), resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.0.stateless_rule.*.rule_definition.0.actions.*", "aws:drop"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), @@ -1588,6 +1636,40 @@ resource "aws_networkfirewall_rule_group" "test" { `, rName) } +func testAccRuleGroupConfig_basicStatelessIPv6(rName string) string { + return fmt.Sprintf(` +resource "aws_networkfirewall_rule_group" "test" { + capacity = 100 + name = %[1]q + type = "STATELESS" + + rule_group { + rules_source { + stateless_rules_and_custom_actions { + stateless_rule { + priority = 1 + + rule_definition { + actions = ["aws:drop"] + + match_attributes { + destination { + address_definition = "1111:0000:0000:0000:0000:0000:0000:0111/128" + } + + source { + address_definition = "1111:0000:0000:0000:0000:0000:0000:0000/64" + } + } + } + } + } + } + } +} +`, rName) +} + func testAccRuleGroupConfig_basic(rName, rules string) string { return fmt.Sprintf(` resource "aws_networkfirewall_rule_group" "test" { diff --git a/internal/service/networkfirewall/service_endpoint_resolver_gen.go b/internal/service/networkfirewall/service_endpoint_resolver_gen.go index 5d762fe871fc..83ab7bcfafa8 100644 --- a/internal/service/networkfirewall/service_endpoint_resolver_gen.go +++ b/internal/service/networkfirewall/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params networkfirewall. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up networkfirewall endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up networkfirewall endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/networkfirewall/service_endpoints_gen_test.go b/internal/service/networkfirewall/service_endpoints_gen_test.go index 584e4f2df683..96590264c5fd 100644 --- a/internal/service/networkfirewall/service_endpoints_gen_test.go +++ b/internal/service/networkfirewall/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/networkfirewall/service_package_gen.go b/internal/service/networkfirewall/service_package_gen.go index 9b38c894cc03..eb149e560598 100644 --- a/internal/service/networkfirewall/service_package_gen.go +++ b/internal/service/networkfirewall/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/networkfirewall" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -24,6 +23,12 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newFirewallTransitGatewayAttachmentAccepterResource, + TypeName: "aws_networkfirewall_firewall_transit_gateway_attachment_accepter", + Name: "Firewall Transit Gateway Attachment Accepter", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newTLSInspectionConfigurationResource, TypeName: "aws_networkfirewall_tls_inspection_configuration", @@ -37,6 +42,15 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser WrappedImport: true, }, }, + { + Factory: newVPCEndpointAssociationResource, + TypeName: "aws_networkfirewall_vpc_endpoint_association", + Name: "VPC Endpoint Association", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "vpc_endpoint_association_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, } } @@ -132,7 +146,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *networkfirewall.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/networkfirewall/sweep.go b/internal/service/networkfirewall/sweep.go index 1d4f7859bbdc..c71db02aa249 100644 --- a/internal/service/networkfirewall/sweep.go +++ b/internal/service/networkfirewall/sweep.go @@ -49,7 +49,7 @@ func sweepFirewallPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallPoliciesInput{} @@ -90,7 +90,7 @@ func sweepFirewalls(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallsInput{} @@ -131,7 +131,7 @@ func sweepLoggingConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallsInput{} @@ -172,7 +172,7 @@ func sweepRuleGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListRuleGroupsInput{} diff --git a/internal/service/networkfirewall/tags_gen.go b/internal/service/networkfirewall/tags_gen.go index 98244df2861e..6f25895fe24f 100644 --- a/internal/service/networkfirewall/tags_gen.go +++ b/internal/service/networkfirewall/tags_gen.go @@ -3,8 +3,8 @@ package networkfirewall import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/networkfirewall" awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *networkfirewall.Client, identifier stri page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).NetworkFirewallClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *networkfirewall.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *networkfirewall.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/networkfirewall/testdata/TLSInspectionConfiguration/basic_v5.100.0/main_gen.tf b/internal/service/networkfirewall/testdata/TLSInspectionConfiguration/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..b8bc9bafb685 --- /dev/null +++ b/internal/service/networkfirewall/testdata/TLSInspectionConfiguration/basic_v5.100.0/main_gen.tf @@ -0,0 +1,93 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = var.rName + + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + destination { + address_definition = "0.0.0.0/0" + } + } + } + } +} + +# testAccTLSInspectionConfigurationConfig_certificateBase + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.common_name + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 2 + } +} + +resource "aws_acmpca_certificate_authority_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + certificate = aws_acmpca_certificate.test.certificate + certificate_chain = aws_acmpca_certificate.test.certificate_chain +} + +data "aws_partition" "current" {} + +resource "aws_acm_certificate" "test" { + domain_name = var.certificate_domain + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + depends_on = [ + aws_acmpca_certificate_authority_certificate.test, + ] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "certificate_domain" { + type = string + nullable = false +} + +variable "common_name" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/networkfirewall/testdata/TLSInspectionConfiguration/basic_v6.0.0/main_gen.tf b/internal/service/networkfirewall/testdata/TLSInspectionConfiguration/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..bac13ca96abe --- /dev/null +++ b/internal/service/networkfirewall/testdata/TLSInspectionConfiguration/basic_v6.0.0/main_gen.tf @@ -0,0 +1,93 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = var.rName + + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + destination { + address_definition = "0.0.0.0/0" + } + } + } + } +} + +# testAccTLSInspectionConfigurationConfig_certificateBase + +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = var.common_name + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 2 + } +} + +resource "aws_acmpca_certificate_authority_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + certificate = aws_acmpca_certificate.test.certificate + certificate_chain = aws_acmpca_certificate.test.certificate_chain +} + +data "aws_partition" "current" {} + +resource "aws_acm_certificate" "test" { + domain_name = var.certificate_domain + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + depends_on = [ + aws_acmpca_certificate_authority_certificate.test, + ] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "certificate_domain" { + type = string + nullable = false +} + +variable "common_name" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/networkfirewall/tls_inspection_configuration.go b/internal/service/networkfirewall/tls_inspection_configuration.go index 235f598f9175..a9b5f514d7ac 100644 --- a/internal/service/networkfirewall/tls_inspection_configuration.go +++ b/internal/service/networkfirewall/tls_inspection_configuration.go @@ -47,6 +47,7 @@ import ( // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkfirewall;networkfirewall.DescribeTLSInspectionConfigurationOutput") // @Testing(subdomainTfVar="common_name;certificate_domain") // @Testing(importIgnore="update_token", plannableImportAction="NoOp") +// @Testing(preIdentityVersion="v5.100.0") func newTLSInspectionConfigurationResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &tlsInspectionConfigurationResource{} diff --git a/internal/service/networkfirewall/tls_inspection_configuration_identity_gen_test.go b/internal/service/networkfirewall/tls_inspection_configuration_identity_gen_test.go index 9344e65d9379..b80eebf41d21 100644 --- a/internal/service/networkfirewall/tls_inspection_configuration_identity_gen_test.go +++ b/internal/service/networkfirewall/tls_inspection_configuration_identity_gen_test.go @@ -29,7 +29,7 @@ func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_Basic(t *testing. common_name := acctest.RandomDomain() certificate_domain := common_name.RandomSubdomain() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -53,6 +53,9 @@ func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_Basic(t *testing. tfstatecheck.ExpectRegionalARNFormat(resourceName, tfjsonpath.New(names.AttrARN), "network-firewall", "tls-configuration/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -125,7 +128,7 @@ func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_RegionOverride(t common_name := acctest.RandomDomain() certificate_domain := common_name.RandomSubdomain() - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -147,6 +150,9 @@ func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_RegionOverride(t tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName, tfjsonpath.New(names.AttrARN), "network-firewall", "tls-configuration/{name}"), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -254,3 +260,143 @@ func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_RegionOverride(t }, }) } + +func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + common_name := acctest.RandomDomain() + certificate_domain := common_name.RandomSubdomain() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TLSInspectionConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "certificate_domain": config.StringVariable(certificate_domain.String()), + "common_name": config.StringVariable(common_name.String()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/TLSInspectionConfiguration/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "certificate_domain": config.StringVariable(certificate_domain.String()), + "common_name": config.StringVariable(common_name.String()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TLSInspectionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "certificate_domain": config.StringVariable(certificate_domain.String()), + "common_name": config.StringVariable(common_name.String()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + common_name := acctest.RandomDomain() + certificate_domain := common_name.RandomSubdomain() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TLSInspectionConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "certificate_domain": config.StringVariable(certificate_domain.String()), + "common_name": config.StringVariable(common_name.String()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TLSInspectionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "certificate_domain": config.StringVariable(certificate_domain.String()), + "common_name": config.StringVariable(common_name.String()), + }, + }, + }, + }) +} diff --git a/internal/service/networkfirewall/tls_inspection_configuration_test.go b/internal/service/networkfirewall/tls_inspection_configuration_test.go index a41a258b8c88..945c447e85be 100644 --- a/internal/service/networkfirewall/tls_inspection_configuration_test.go +++ b/internal/service/networkfirewall/tls_inspection_configuration_test.go @@ -12,13 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/networkfirewall" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfnetworkfirewall "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -325,72 +320,6 @@ func TestAccNetworkFirewallTLSInspectionConfiguration_checkCertificateRevocation }) } -func TestAccNetworkFirewallTLSInspectionConfiguration_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_networkfirewall_tls_inspection_configuration.test" - commonName := acctest.RandomDomain() - certificateDomain := commonName.RandomSubdomain() - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), - CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccTLSInspectionConfigurationConfig_basic(rName, commonName.String(), certificateDomain.String()), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccTLSInspectionConfigurationConfig_basic(rName, commonName.String(), certificateDomain.String()), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccTLSInspectionConfigurationConfig_basic(rName, commonName.String(), certificateDomain.String()), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckTLSInspectionConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) diff --git a/internal/service/networkfirewall/vpc_endpoint_association.go b/internal/service/networkfirewall/vpc_endpoint_association.go new file mode 100644 index 000000000000..be306bad13cc --- /dev/null +++ b/internal/service/networkfirewall/vpc_endpoint_association.go @@ -0,0 +1,420 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_networkfirewall_vpc_endpoint_association", name="VPC Endpoint Association") +// @Tags(identifierAttribute="vpc_endpoint_association_arn") +func newVPCEndpointAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &vpcEndpointAssociationResource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +type vpcEndpointAssociationResource struct { + framework.ResourceWithModel[vpcEndpointAssociationResourceModel] + framework.WithTimeouts +} + +func (r *vpcEndpointAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrDescription: schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "firewall_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "vpc_endpoint_association_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "vpc_endpoint_association_id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "vpc_endpoint_association_status": framework.ResourceComputedListOfObjectsAttribute[vpcEndpointAssociationStatusModel](ctx, listplanmodifier.UseStateForUnknown()), + names.AttrVPCID: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "subnet_mapping": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[subnetMappingModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrIPAddressType: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.IPAddressType](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrSubnetID: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), + }, + } +} + +func (r *vpcEndpointAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data vpcEndpointAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + var input networkfirewall.CreateVpcEndpointAssociationInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.Tags = getTagsIn(ctx) + + outputCVEA, err := conn.CreateVpcEndpointAssociation(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating NetworkFirewall VPC Endpoint Association", err.Error()) + + return + } + + arn := aws.ToString(outputCVEA.VpcEndpointAssociation.VpcEndpointAssociationArn) + + outputDVEA, err := waitVPCEndpointAssociationCreated(ctx, conn, arn, r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall VPC Endpoint Association (%s) create", arn), err.Error()) + + return + } + + // Set values for unknowns. + response.Diagnostics.Append(fwflex.Flatten(ctx, outputDVEA.VpcEndpointAssociation.SubnetMapping, &data.SubnetMapping)...) + if response.Diagnostics.HasError() { + return + } + data.VPCEndpointAssociationARN = fwflex.StringValueToFramework(ctx, arn) + data.VPCEndpointAssociationID = fwflex.StringToFramework(ctx, outputDVEA.VpcEndpointAssociation.VpcEndpointAssociationId) + vpcEndpointAssociationStatus, diags := flattenVPCEndpointAssociationStatus(ctx, outputDVEA.VpcEndpointAssociationStatus) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.VpcEndpointAssociationStatus = vpcEndpointAssociationStatus + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *vpcEndpointAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data vpcEndpointAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + arn := fwflex.StringValueFromFramework(ctx, data.VPCEndpointAssociationARN) + output, err := findVPCEndpointAssociationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading NetworkFirewall VPC Endpoint Association (%s)", arn), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output.VpcEndpointAssociation, &data)...) + if response.Diagnostics.HasError() { + return + } + vpcEndpointAssociationStatus, diags := flattenVPCEndpointAssociationStatus(ctx, output.VpcEndpointAssociationStatus) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.VpcEndpointAssociationStatus = vpcEndpointAssociationStatus + + setTagsOut(ctx, output.VpcEndpointAssociation.Tags) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *vpcEndpointAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data vpcEndpointAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + arn := fwflex.StringValueFromFramework(ctx, data.VPCEndpointAssociationARN) + input := networkfirewall.DeleteVpcEndpointAssociationInput{ + VpcEndpointAssociationArn: aws.String(arn), + } + _, err := conn.DeleteVpcEndpointAssociation(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting NetworkFirewall VPC Endpoint Association (%s)", arn), err.Error()) + + return + } + + if _, err := waitVPCEndpointAssociationDeleted(ctx, conn, arn, r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall VPC Endpoint Association (%s) delete", arn), err.Error()) + + return + } +} + +func (r *vpcEndpointAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("vpc_endpoint_association_arn"), request, response) +} + +func findVPCEndpointAssociation(ctx context.Context, conn *networkfirewall.Client, input *networkfirewall.DescribeVpcEndpointAssociationInput) (*networkfirewall.DescribeVpcEndpointAssociationOutput, error) { + output, err := conn.DescribeVpcEndpointAssociation(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.VpcEndpointAssociation == nil || output.VpcEndpointAssociationStatus == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findVPCEndpointAssociationByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*networkfirewall.DescribeVpcEndpointAssociationOutput, error) { + input := networkfirewall.DescribeVpcEndpointAssociationInput{ + VpcEndpointAssociationArn: aws.String(arn), + } + + return findVPCEndpointAssociation(ctx, conn, &input) +} + +func statusVPCEndpointAssociation(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findVPCEndpointAssociationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.VpcEndpointAssociationStatus.Status), nil + } +} + +func waitVPCEndpointAssociationCreated(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeVpcEndpointAssociationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.FirewallStatusValueProvisioning), + Target: enum.Slice(awstypes.FirewallStatusValueReady), + Refresh: statusVPCEndpointAssociation(ctx, conn, arn), + Timeout: timeout, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkfirewall.DescribeVpcEndpointAssociationOutput); ok { + return output, err + } + + return nil, err +} + +func waitVPCEndpointAssociationDeleted(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeVpcEndpointAssociationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.FirewallStatusValueReady, awstypes.FirewallStatusValueDeleting), + Target: []string{}, + Refresh: statusVPCEndpointAssociation(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkfirewall.DescribeVpcEndpointAssociationOutput); ok { + return output, err + } + + return nil, err +} + +func flattenVPCEndpointAssociationStatus(ctx context.Context, veas *awstypes.VpcEndpointAssociationStatus) (fwtypes.ListNestedObjectValueOf[vpcEndpointAssociationStatusModel], diag.Diagnostics) { // nosemgrep:ci.semgrep.framework.manual-flattener-functions + var diags diag.Diagnostics + + if veas == nil { + return fwtypes.NewListNestedObjectValueOfNull[vpcEndpointAssociationStatusModel](ctx), diags + } + + var models []*associationSyncStateModel + for az, syncState := range veas.AssociationSyncState { + a := syncState.Attachment + if a == nil { + continue + } + + attachment, d := fwtypes.NewListNestedObjectValueOfPtr(ctx, &attachmentModel{ + EndpointID: fwflex.StringToFramework(ctx, a.EndpointId), + SubnetID: fwflex.StringToFramework(ctx, a.SubnetId), + Status: fwtypes.StringEnumValue(a.Status), + StatusMessage: fwflex.StringToFramework(ctx, a.StatusMessage), + }) + diags.Append(d...) + if diags.HasError() { + return fwtypes.NewListNestedObjectValueOfNull[vpcEndpointAssociationStatusModel](ctx), diags + } + + models = append(models, &associationSyncStateModel{ + Attachment: attachment, + AvailabilityZone: fwflex.StringValueToFramework(ctx, az), + }) + } + + associationSyncState, d := fwtypes.NewSetNestedObjectValueOfSlice(ctx, models, nil) + diags.Append(d...) + if diags.HasError() { + return fwtypes.NewListNestedObjectValueOfNull[vpcEndpointAssociationStatusModel](ctx), diags + } + + vpcEndpointAssociationStatus, d := fwtypes.NewListNestedObjectValueOfPtr(ctx, &vpcEndpointAssociationStatusModel{ + AssociationSyncState: associationSyncState, + }) + diags.Append(d...) + if diags.HasError() { + return fwtypes.NewListNestedObjectValueOfNull[vpcEndpointAssociationStatusModel](ctx), diags + } + + return vpcEndpointAssociationStatus, diags +} + +type vpcEndpointAssociationResourceModel struct { + framework.WithRegionModel + Description types.String `tfsdk:"description"` + FirewallARN fwtypes.ARN `tfsdk:"firewall_arn"` + SubnetMapping fwtypes.ListNestedObjectValueOf[subnetMappingModel] `tfsdk:"subnet_mapping"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + VPCEndpointAssociationARN types.String `tfsdk:"vpc_endpoint_association_arn"` + VPCEndpointAssociationID types.String `tfsdk:"vpc_endpoint_association_id"` + VpcEndpointAssociationStatus fwtypes.ListNestedObjectValueOf[vpcEndpointAssociationStatusModel] `tfsdk:"vpc_endpoint_association_status"` + VPCID types.String `tfsdk:"vpc_id"` +} + +type subnetMappingModel struct { + SubnetId types.String `tfsdk:"subnet_id"` + IPAddressType fwtypes.StringEnum[awstypes.IPAddressType] `tfsdk:"ip_address_type"` +} + +type vpcEndpointAssociationStatusModel struct { + AssociationSyncState fwtypes.SetNestedObjectValueOf[associationSyncStateModel] `tfsdk:"association_sync_state"` +} + +type associationSyncStateModel struct { + Attachment fwtypes.ListNestedObjectValueOf[attachmentModel] `tfsdk:"attachment"` + AvailabilityZone types.String `tfsdk:"availability_zone"` +} + +type attachmentModel struct { + EndpointID types.String `tfsdk:"endpoint_id"` + SubnetID types.String `tfsdk:"subnet_id"` + Status fwtypes.StringEnum[awstypes.AttachmentStatus] `tfsdk:"status"` + StatusMessage types.String `tfsdk:"status_message"` +} diff --git a/internal/service/networkfirewall/vpc_endpoint_association_test.go b/internal/service/networkfirewall/vpc_endpoint_association_test.go new file mode 100644 index 000000000000..13fb263b1805 --- /dev/null +++ b/internal/service/networkfirewall/vpc_endpoint_association_test.go @@ -0,0 +1,386 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfnetworkfirewall "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkFirewallVPCEndpointAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v networkfirewall.DescribeVpcEndpointAssociationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_vpc_endpoint_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccVPCEndpointAssociationPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCEndpointAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCEndpointAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("subnet_mapping"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + names.AttrIPAddressType: tfknownvalue.StringExact(awstypes.IPAddressTypeIpv4), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_association_arn"), tfknownvalue.RegionalARNRegexp("network-firewall", regexache.MustCompile(`vpc-endpoint-association/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_association_id"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_association_status"), knownvalue.ListSizeExact(1)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "vpc_endpoint_association_arn", + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "vpc_endpoint_association_arn"), + }, + }, + }) +} + +func TestAccNetworkFirewallVPCEndpointAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v networkfirewall.DescribeVpcEndpointAssociationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_vpc_endpoint_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccVPCEndpointAssociationPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCEndpointAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCEndpointAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointAssociationExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfnetworkfirewall.ResourceVPCEndpointAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + }, + }) +} + +func TestAccNetworkFirewallVPCEndpointAssociation_full(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v networkfirewall.DescribeVpcEndpointAssociationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_vpc_endpoint_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccVPCEndpointAssociationPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCEndpointAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCEndpointAssociationConfig_full(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.StringExact("testing")), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "vpc_endpoint_association_arn", + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "vpc_endpoint_association_arn"), + }, + }, + }) +} + +func TestAccNetworkFirewallVPCEndpointAssociation_tags(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v networkfirewall.DescribeVpcEndpointAssociationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_vpc_endpoint_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccVPCEndpointAssociationPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewallServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCEndpointAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCEndpointAssociationConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "vpc_endpoint_association_arn", + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "vpc_endpoint_association_arn"), + }, + { + Config: testAccVPCEndpointAssociationConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + { + Config: testAccVPCEndpointAssociationConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCEndpointAssociationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + }) +} + +func testAccCheckVPCEndpointAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_networkfirewall_vpc_endpoint_association" { + continue + } + + _, err := tfnetworkfirewall.FindVPCEndpointAssociationByARN(ctx, conn, rs.Primary.Attributes["vpc_endpoint_association_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("NetworkFirewall VPC Endpoint Association %s still exists", rs.Primary.Attributes["vpc_endpoint_association_arn"]) + } + + return nil + } +} + +func testAccCheckVPCEndpointAssociationExists(ctx context.Context, n string, v *networkfirewall.DescribeVpcEndpointAssociationOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + output, err := tfnetworkfirewall.FindVPCEndpointAssociationByARN(ctx, conn, rs.Primary.Attributes["vpc_endpoint_association_arn"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccVPCEndpointAssociationPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + input := &networkfirewall.ListVpcEndpointAssociationsInput{} + + _, err := conn.ListVpcEndpointAssociations(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccVPCEndpointAssociationConfig_base(rName string) string { + return acctest.ConfigCompose(testAccFirewallConfig_basic(rName), fmt.Sprintf(` +resource "aws_vpc" "target" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "target" { + vpc_id = aws_vpc.target.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.target.cidr_block, 8, 1) + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccVPCEndpointAssociationConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccVPCEndpointAssociationConfig_base(rName), ` +resource "aws_networkfirewall_vpc_endpoint_association" "test" { + firewall_arn = aws_networkfirewall_firewall.test.arn + vpc_id = aws_vpc.target.id + + subnet_mapping { + subnet_id = aws_subnet.target.id + } +} +`) +} + +func testAccVPCEndpointAssociationConfig_full(rName string) string { + return acctest.ConfigCompose(testAccVPCEndpointAssociationConfig_base(rName), ` +resource "aws_networkfirewall_vpc_endpoint_association" "test" { + description = "testing" + firewall_arn = aws_networkfirewall_firewall.test.arn + vpc_id = aws_vpc.target.id + + subnet_mapping { + ip_address_type = "IPV4" + subnet_id = aws_subnet.target.id + } +} +`) +} + +func testAccVPCEndpointAssociationConfig_tags1(rName, tag1Key, tag1Value string) string { + return acctest.ConfigCompose(testAccVPCEndpointAssociationConfig_base(rName), fmt.Sprintf(` +resource "aws_networkfirewall_vpc_endpoint_association" "test" { + firewall_arn = aws_networkfirewall_firewall.test.arn + vpc_id = aws_vpc.target.id + + subnet_mapping { + subnet_id = aws_subnet.target.id + } + + tags = { + %[1]q = %[2]q + } +} +`, tag1Key, tag1Value)) +} + +func testAccVPCEndpointAssociationConfig_tags2(rName, tag1Key, tag1Value, tag2Key, tag2Value string) string { + return acctest.ConfigCompose(testAccVPCEndpointAssociationConfig_base(rName), fmt.Sprintf(` +resource "aws_networkfirewall_vpc_endpoint_association" "test" { + firewall_arn = aws_networkfirewall_firewall.test.arn + vpc_id = aws_vpc.target.id + + subnet_mapping { + subnet_id = aws_subnet.target.id + } + + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } +} +`, tag1Key, tag1Value, tag2Key, tag2Value)) +} diff --git a/internal/service/networkmanager/connect_attachment.go b/internal/service/networkmanager/connect_attachment.go index 0ab473c8198f..73183a8796cb 100644 --- a/internal/service/networkmanager/connect_attachment.go +++ b/internal/service/networkmanager/connect_attachment.go @@ -27,6 +27,10 @@ import ( // @SDKResource("aws_networkmanager_connect_attachment", name="Connect Attachment") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.ConnectAttachment") +// @Testing(skipEmptyTags=true) +// @Testing(importIgnore="state") +// @Testing(generator=false) func resourceConnectAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConnectAttachmentCreate, @@ -148,7 +152,7 @@ func resourceConnectAttachmentCreate(ctx context.Context, d *schema.ResourceData } outputRaw, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateConnectAttachment(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/networkmanager/connect_attachment_tags_gen_test.go b/internal/service/networkmanager/connect_attachment_tags_gen_test.go new file mode 100644 index 000000000000..3388d170b95f --- /dev/null +++ b/internal/service/networkmanager/connect_attachment_tags_gen_test.go @@ -0,0 +1,2298 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerConnectAttachment_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource ConnectAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource ConnectAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource ConnectAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource ConnectAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource ConnectAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectAttachment_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectAttachment + resourceName := "aws_networkmanager_connect_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/connect_attachment_test.go b/internal/service/networkmanager/connect_attachment_test.go index 100cca581a60..2b07dce204c5 100644 --- a/internal/service/networkmanager/connect_attachment_test.go +++ b/internal/service/networkmanager/connect_attachment_test.go @@ -155,52 +155,6 @@ func TestAccNetworkManagerConnectAttachment_protocolNoEncap(t *testing.T) { }) } -func TestAccNetworkManagerConnectAttachment_tags(t *testing.T) { - ctx := acctest.Context(t) - var v awstypes.ConnectAttachment - resourceName := "aws_networkmanager_connect_attachment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckConnectAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccConnectAttachmentConfig_tags1(rName, "segment", "shared"), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - ), - }, - { - Config: testAccConnectAttachmentConfig_tags2(rName, "segment", "shared", "Name", "test"), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - resource.TestCheckResourceAttr(resourceName, "tags.Name", "test"), - ), - }, - { - Config: testAccConnectAttachmentConfig_tags1(rName, "segment", "shared"), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckConnectAttachmentExists(ctx context.Context, n string, v *awstypes.ConnectAttachment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -249,34 +203,9 @@ func testAccCheckConnectAttachmentDestroy(ctx context.Context) resource.TestChec } func testAccConnectAttachmentConfig_base(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -data "aws_region" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - assign_generated_ipv6_cidr_block = true - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - count = 2 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) - - ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) - assign_ipv6_address_on_creation = true - - tags = { - Name = %[1]q - } -} - + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), + fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" { tags = { Name = %[1]q @@ -296,6 +225,8 @@ resource "aws_networkmanager_core_network_policy_attachment" "test" { policy_document = data.aws_networkmanager_core_network_policy_document.test.json } +data "aws_region" "current" {} + data "aws_networkmanager_core_network_policy_document" "test" { core_network_configuration { vpn_ecmp_support = false @@ -331,7 +262,6 @@ data "aws_networkmanager_core_network_policy_document" "test" { } } } - `, rName)) } @@ -445,80 +375,3 @@ resource "aws_networkmanager_attachment_accepter" "test2" { } `) } - -func testAccConnectAttachmentConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccConnectAttachmentConfig_base(rName), fmt.Sprintf(` -resource "aws_networkmanager_vpc_attachment" "test" { - subnet_arns = [aws_subnet.test[0].arn] - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - vpc_arn = aws_vpc.test.arn - tags = { - segment = "shared" - } -} - -resource "aws_networkmanager_attachment_accepter" "test" { - attachment_id = aws_networkmanager_vpc_attachment.test.id - attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type -} - -resource "aws_networkmanager_connect_attachment" "test" { - core_network_id = aws_networkmanager_core_network.test.id - transport_attachment_id = aws_networkmanager_vpc_attachment.test.id - edge_location = aws_networkmanager_vpc_attachment.test.edge_location - options { - protocol = "GRE" - } - depends_on = [ - "aws_networkmanager_attachment_accepter.test" - ] - tags = { - %[1]q = %[2]q - } -} - -resource "aws_networkmanager_attachment_accepter" "test2" { - attachment_id = aws_networkmanager_connect_attachment.test.id - attachment_type = aws_networkmanager_connect_attachment.test.attachment_type -} -`, tagKey1, tagValue1)) -} - -func testAccConnectAttachmentConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccConnectAttachmentConfig_base(rName), fmt.Sprintf(` -resource "aws_networkmanager_vpc_attachment" "test" { - subnet_arns = [aws_subnet.test[0].arn] - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - vpc_arn = aws_vpc.test.arn - tags = { - segment = "shared" - } -} - -resource "aws_networkmanager_attachment_accepter" "test" { - attachment_id = aws_networkmanager_vpc_attachment.test.id - attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type -} - -resource "aws_networkmanager_connect_attachment" "test" { - core_network_id = aws_networkmanager_core_network.test.id - transport_attachment_id = aws_networkmanager_vpc_attachment.test.id - edge_location = aws_networkmanager_vpc_attachment.test.edge_location - options { - protocol = "GRE" - } - depends_on = [ - "aws_networkmanager_attachment_accepter.test" - ] - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} - -resource "aws_networkmanager_attachment_accepter" "test2" { - attachment_id = aws_networkmanager_connect_attachment.test.id - attachment_type = aws_networkmanager_connect_attachment.test.attachment_type -} -`, tagKey1, tagValue1, tagKey2, tagValue2)) -} diff --git a/internal/service/networkmanager/connect_peer.go b/internal/service/networkmanager/connect_peer.go index 9bcf2fdf422e..35c9be5dd2cb 100644 --- a/internal/service/networkmanager/connect_peer.go +++ b/internal/service/networkmanager/connect_peer.go @@ -28,6 +28,10 @@ import ( // @SDKResource("aws_networkmanager_connect_peer", name="Connect Peer") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.ConnectPeer") +// @Testing(skipEmptyTags=true) +// @Testing(importIgnore="state") +// @Testing(generator=false) func resourceConnectPeer() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConnectPeerCreate, @@ -213,7 +217,7 @@ func resourceConnectPeerCreate(ctx context.Context, d *schema.ResourceData, meta } outputRaw, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateConnectPeer(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/networkmanager/connect_peer_tags_gen_test.go b/internal/service/networkmanager/connect_peer_tags_gen_test.go new file mode 100644 index 000000000000..f5c778457787 --- /dev/null +++ b/internal/service/networkmanager/connect_peer_tags_gen_test.go @@ -0,0 +1,2298 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerConnectPeer_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource ConnectPeer does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource ConnectPeer does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource ConnectPeer does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource ConnectPeer does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource ConnectPeer does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerConnectPeer_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ConnectPeer + resourceName := "aws_networkmanager_connect_peer.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectPeerDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ConnectPeer/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectPeerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/connect_peer_test.go b/internal/service/networkmanager/connect_peer_test.go index 7e99b21559fa..8855d7d1165d 100644 --- a/internal/service/networkmanager/connect_peer_test.go +++ b/internal/service/networkmanager/connect_peer_test.go @@ -153,56 +153,6 @@ func TestAccNetworkManagerConnectPeer_subnetARN(t *testing.T) { }) } -func TestAccNetworkManagerConnectPeer_tags(t *testing.T) { - ctx := acctest.Context(t) - var v awstypes.ConnectPeer - resourceName := "aws_networkmanager_connect_peer.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - insideCidrBlocksv4 := "169.254.10.0/29" - peerAddress := "1.1.1.1" - protocol := "GRE" - asn := "65501" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckConnectPeerDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccConnectPeerConfig_tags1(rName, "Name", "test", insideCidrBlocksv4, peerAddress, asn, protocol), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectPeerExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.Name", "test"), - ), - }, - { - Config: testAccConnectPeerConfig_tags2(rName, "Name", "test", "env", "test", insideCidrBlocksv4, peerAddress, asn, protocol), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectPeerExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, "tags.env", "test"), - resource.TestCheckResourceAttr(resourceName, "tags.Name", "test"), - ), - }, - { - Config: testAccConnectPeerConfig_tags1(rName, "Name", "test", insideCidrBlocksv4, peerAddress, asn, protocol), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectPeerExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.Name", "test"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckConnectPeerExists(ctx context.Context, n string, v *awstypes.ConnectPeer) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -254,29 +204,9 @@ func testAccCheckConnectPeerDestroy(ctx context.Context) resource.TestCheckFunc } func testAccConnectPeerConfig_base(rName string, protocol string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -data "aws_region" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - assign_generated_ipv6_cidr_block = true - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - count = 2 - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) - ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) - assign_ipv6_address_on_creation = true - tags = { - Name = %[1]q - } -} - + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), + fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" { tags = { Name = %[1]q @@ -296,6 +226,8 @@ resource "aws_networkmanager_core_network_policy_attachment" "test" { policy_document = data.aws_networkmanager_core_network_policy_document.test.json } +data "aws_region" "current" {} + data "aws_networkmanager_core_network_policy_document" "test" { core_network_configuration { vpn_ecmp_support = false @@ -432,40 +364,3 @@ resource "aws_subnet" "test2" { } `, rName, peerAddress, asn)) } - -func testAccConnectPeerConfig_tags1(rName, tagKey1, tagValue1 string, insideCidrBlocks string, peerAddress string, asn string, protocol string) string { - return acctest.ConfigCompose(testAccConnectPeerConfig_base(rName, protocol), fmt.Sprintf(` -resource "aws_networkmanager_connect_peer" "test" { - connect_attachment_id = aws_networkmanager_connect_attachment.test.id - peer_address = %[4]q - bgp_options { - peer_asn = %[5]q - } - inside_cidr_blocks = [ - %[3]q - ] - tags = { - %[1]q = %[2]q - } -} -`, tagKey1, tagValue1, insideCidrBlocks, peerAddress, asn)) -} - -func testAccConnectPeerConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string, insideCidrBlocks string, peerAddress string, asn string, protocol string) string { - return acctest.ConfigCompose(testAccConnectPeerConfig_base(rName, protocol), fmt.Sprintf(` -resource "aws_networkmanager_connect_peer" "test" { - connect_attachment_id = aws_networkmanager_connect_attachment.test.id - peer_address = %[6]q - bgp_options { - peer_asn = %[7]q - } - inside_cidr_blocks = [ - %[5]q - ] - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} -`, tagKey1, tagValue1, tagKey2, tagValue2, insideCidrBlocks, peerAddress, asn)) -} diff --git a/internal/service/networkmanager/connection.go b/internal/service/networkmanager/connection.go index 8655228ba6a8..e6e18981712e 100644 --- a/internal/service/networkmanager/connection.go +++ b/internal/service/networkmanager/connection.go @@ -29,6 +29,10 @@ import ( // @SDKResource("aws_networkmanager_connection", name="Connection") // @Tags(identifierAttribute="arn") +// @Testing(generator=false) +// @Testing(serialize=true) +// @Testing(importStateIdAttribute="arn") +// @Testing(skipEmptyTags=true) func resourceConnection() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConnectionCreate, diff --git a/internal/service/networkmanager/connection_tags_gen_test.go b/internal/service/networkmanager/connection_tags_gen_test.go new file mode 100644 index 000000000000..3b0619a4a746 --- /dev/null +++ b/internal/service/networkmanager/connection_tags_gen_test.go @@ -0,0 +1,2274 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccNetworkManagerConnection_tagsSerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccNetworkManagerConnection_tags, + "null": testAccNetworkManagerConnection_tags_null, + "EmptyMap": testAccNetworkManagerConnection_tags_EmptyMap, + "AddOnUpdate": testAccNetworkManagerConnection_tags_AddOnUpdate, + "EmptyTag_OnCreate": testAccNetworkManagerConnection_tags_EmptyTag_OnCreate, + "EmptyTag_OnUpdate_Add": testAccNetworkManagerConnection_tags_EmptyTag_OnUpdate_Add, + "EmptyTag_OnUpdate_Replace": testAccNetworkManagerConnection_tags_EmptyTag_OnUpdate_Replace, + "DefaultTags_providerOnly": testAccNetworkManagerConnection_tags_DefaultTags_providerOnly, + "DefaultTags_nonOverlapping": testAccNetworkManagerConnection_tags_DefaultTags_nonOverlapping, + "DefaultTags_overlapping": testAccNetworkManagerConnection_tags_DefaultTags_overlapping, + "DefaultTags_updateToProviderOnly": testAccNetworkManagerConnection_tags_DefaultTags_updateToProviderOnly, + "DefaultTags_updateToResourceOnly": testAccNetworkManagerConnection_tags_DefaultTags_updateToResourceOnly, + "DefaultTags_emptyResourceTag": testAccNetworkManagerConnection_tags_DefaultTags_emptyResourceTag, + "DefaultTags_nullOverlappingResourceTag": testAccNetworkManagerConnection_tags_DefaultTags_nullOverlappingResourceTag, + "DefaultTags_nullNonOverlappingResourceTag": testAccNetworkManagerConnection_tags_DefaultTags_nullNonOverlappingResourceTag, + "ComputedTag_OnCreate": testAccNetworkManagerConnection_tags_ComputedTag_OnCreate, + "ComputedTag_OnUpdate_Add": testAccNetworkManagerConnection_tags_ComputedTag_OnUpdate_Add, + "ComputedTag_OnUpdate_Replace": testAccNetworkManagerConnection_tags_ComputedTag_OnUpdate_Replace, + "IgnoreTags_Overlap_DefaultTag": testAccNetworkManagerConnection_tags_IgnoreTags_Overlap_DefaultTag, + "IgnoreTags_Overlap_ResourceTag": testAccNetworkManagerConnection_tags_IgnoreTags_Overlap_ResourceTag, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccNetworkManagerConnection_tags(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource Connection does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource Connection does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource Connection does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource Connection does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource Connection does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func testAccNetworkManagerConnection_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_connection.test" + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckConnectionDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Connection/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectionExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/connection_test.go b/internal/service/networkmanager/connection_test.go index f8e76f2e78ca..4e0ae0efca78 100644 --- a/internal/service/networkmanager/connection_test.go +++ b/internal/service/networkmanager/connection_test.go @@ -24,7 +24,7 @@ func TestAccNetworkManagerConnection_serial(t *testing.T) { testCases := map[string]func(t *testing.T){ acctest.CtBasic: testAccConnection_basic, acctest.CtDisappears: testAccConnection_disappears, - "tags": testAccConnection_tags, + "tags": testAccNetworkManagerConnection_tagsSerial, "descriptionAndLinks": testAccConnection_descriptionAndLinks, } @@ -86,52 +86,6 @@ func testAccConnection_disappears(t *testing.T) { }) } -func testAccConnection_tags(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_networkmanager_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckConnectionDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccConnectionConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), - ImportStateVerify: true, - }, - { - Config: testAccConnectionConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccConnectionConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckConnectionExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func testAccConnection_descriptionAndLinks(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_networkmanager_connection.test" @@ -277,35 +231,6 @@ resource "aws_networkmanager_connection" "test" { `) } -func testAccConnectionConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccConnectionBaseConfig(rName), fmt.Sprintf(` -resource "aws_networkmanager_connection" "test" { - global_network_id = aws_networkmanager_global_network.test.id - device_id = aws_networkmanager_device.test1.id - connected_device_id = aws_networkmanager_device.test2.id - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1)) -} - -func testAccConnectionConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccConnectionBaseConfig(rName), fmt.Sprintf(` -resource "aws_networkmanager_connection" "test" { - global_network_id = aws_networkmanager_global_network.test.id - device_id = aws_networkmanager_device.test1.id - connected_device_id = aws_networkmanager_device.test2.id - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) -} - func testAccConnectionDescriptionAndLinksBaseConfig(rName string) string { return acctest.ConfigCompose(testAccConnectionBaseConfig(rName), fmt.Sprintf(` resource "aws_networkmanager_link" "test1" { diff --git a/internal/service/networkmanager/core_network.go b/internal/service/networkmanager/core_network.go index 76466e1f6a4a..8c9cf6cb8c05 100644 --- a/internal/service/networkmanager/core_network.go +++ b/internal/service/networkmanager/core_network.go @@ -43,6 +43,9 @@ const ( // @SDKResource("aws_networkmanager_core_network", name="Core Network") // @Tags(identifierAttribute="arn") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) +// @Testing(importIgnore="create_base_policy") func resourceCoreNetwork() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCoreNetworkCreate, @@ -435,7 +438,7 @@ func waitCoreNetworkDeleted(ctx context.Context, conn *networkmanager.Client, id Pending: enum.Slice(awstypes.CoreNetworkStateDeleting), Target: []string{}, Timeout: timeout, - Delay: 5 * time.Minute, + Delay: 4 * time.Minute, MinTimeout: 10 * time.Second, Refresh: statusCoreNetworkState(ctx, conn, id), } @@ -517,7 +520,7 @@ func putAndExecuteCoreNetworkPolicy(ctx context.Context, conn *networkmanager.Cl document, err := structure.NormalizeJsonString(policyDocument) if err != nil { - return fmt.Errorf("decoding Network Manager Core Network (%s) policy document: %s", coreNetworkId, err) + return fmt.Errorf("decoding Network Manager Core Network (%s) policy document: %w", coreNetworkId, err) } output, err := conn.PutCoreNetworkPolicy(ctx, &networkmanager.PutCoreNetworkPolicyInput{ @@ -527,13 +530,13 @@ func putAndExecuteCoreNetworkPolicy(ctx context.Context, conn *networkmanager.Cl }) if err != nil { - return fmt.Errorf("putting Network Manager Core Network (%s) policy: %s", coreNetworkId, err) + return fmt.Errorf("putting Network Manager Core Network (%s) policy: %w", coreNetworkId, err) } policyVersionID := output.CoreNetworkPolicy.PolicyVersionId if _, err := waitCoreNetworkPolicyCreated(ctx, conn, coreNetworkId, policyVersionID, waitCoreNetworkPolicyCreatedTimeInMinutes*time.Minute); err != nil { - return fmt.Errorf("waiting for Network Manager Core Network Policy from Core Network (%s) create: %s", coreNetworkId, err) + return fmt.Errorf("waiting for Network Manager Core Network Policy from Core Network (%s) create: %w", coreNetworkId, err) } _, err = conn.ExecuteCoreNetworkChangeSet(ctx, &networkmanager.ExecuteCoreNetworkChangeSetInput{ @@ -541,7 +544,7 @@ func putAndExecuteCoreNetworkPolicy(ctx context.Context, conn *networkmanager.Cl PolicyVersionId: policyVersionID, }) if err != nil { - return fmt.Errorf("executing Network Manager Core Network (%s) change set (%d): %s", coreNetworkId, policyVersionID, err) + return fmt.Errorf("executing Network Manager Core Network (%s) change set (%d): %w", coreNetworkId, policyVersionID, err) } return nil @@ -618,7 +621,7 @@ func buildCoreNetworkBasePolicyDocument(regions []any) (string, error) { b, err := json.MarshalIndent(basePolicy, "", " ") if err != nil { // should never happen if the above code is correct - return "", fmt.Errorf("building base policy document: %s", err) + return "", fmt.Errorf("building base policy document: %w", err) } return string(b), nil diff --git a/internal/service/networkmanager/core_network_tags_gen_test.go b/internal/service/networkmanager/core_network_tags_gen_test.go new file mode 100644 index 000000000000..33ebcc9a22cf --- /dev/null +++ b/internal/service/networkmanager/core_network_tags_gen_test.go @@ -0,0 +1,2276 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerCoreNetwork_tags(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource CoreNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource CoreNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource CoreNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource CoreNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource CoreNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "create_base_policy", + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetwork_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_core_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CoreNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCoreNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/core_network_test.go b/internal/service/networkmanager/core_network_test.go index b3d7072a9494..90bf3704283a 100644 --- a/internal/service/networkmanager/core_network_test.go +++ b/internal/service/networkmanager/core_network_test.go @@ -73,51 +73,6 @@ func TestAccNetworkManagerCoreNetwork_disappears(t *testing.T) { }) } -func TestAccNetworkManagerCoreNetwork_tags(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_networkmanager_core_network.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckCoreNetworkDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccCoreNetworkConfig_tags1(acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckCoreNetworkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"create_base_policy"}, - }, - { - Config: testAccCoreNetworkConfig_tags2(acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckCoreNetworkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccCoreNetworkConfig_tags1(acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckCoreNetworkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccNetworkManagerCoreNetwork_description(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_networkmanager_core_network.test" @@ -380,35 +335,6 @@ resource "aws_networkmanager_core_network" "test" { }` } -func testAccCoreNetworkConfig_tags1(tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" {} - -resource "aws_networkmanager_core_network" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - %[1]q = %[2]q - } -} -`, tagKey1, tagValue1) -} - -func testAccCoreNetworkConfig_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" {} - -resource "aws_networkmanager_core_network" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} -`, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccCoreNetworkConfig_description(description string) string { return fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" {} diff --git a/internal/service/networkmanager/customer_gateway_association.go b/internal/service/networkmanager/customer_gateway_association.go index b1885b013f9c..2647260fc194 100644 --- a/internal/service/networkmanager/customer_gateway_association.go +++ b/internal/service/networkmanager/customer_gateway_association.go @@ -86,7 +86,7 @@ func resourceCustomerGatewayAssociationCreate(ctx context.Context, d *schema.Res log.Printf("[DEBUG] Creating Network Manager Customer Gateway Association: %#v", input) _, err := tfresource.RetryWhen(ctx, customerGatewayAssociationResourceNotFoundExceptionTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.AssociateCustomerGateway(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/networkmanager/device.go b/internal/service/networkmanager/device.go index bf7b7ad98847..3523017ca4f9 100644 --- a/internal/service/networkmanager/device.go +++ b/internal/service/networkmanager/device.go @@ -30,6 +30,9 @@ import ( // @SDKResource("aws_networkmanager_device", name="Device") // @Tags(identifierAttribute="arn") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) +// @Testing(importStateIdAttribute="arn") func resourceDevice() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDeviceCreate, diff --git a/internal/service/networkmanager/device_tags_gen_test.go b/internal/service/networkmanager/device_tags_gen_test.go new file mode 100644 index 000000000000..9e7de5720242 --- /dev/null +++ b/internal/service/networkmanager/device_tags_gen_test.go @@ -0,0 +1,2245 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerDevice_tags(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource Device does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource Device does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource Device does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource Device does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource Device does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDevice_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_device.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDeviceDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Device/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDeviceExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/device_test.go b/internal/service/networkmanager/device_test.go index 02018bc89113..19512192ccfb 100644 --- a/internal/service/networkmanager/device_test.go +++ b/internal/service/networkmanager/device_test.go @@ -78,52 +78,6 @@ func TestAccNetworkManagerDevice_disappears(t *testing.T) { }) } -func TestAccNetworkManagerDevice_tags(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_networkmanager_device.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDeviceDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDeviceConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckDeviceExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), - ImportStateVerify: true, - }, - { - Config: testAccDeviceConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckDeviceExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccDeviceConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckDeviceExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccNetworkManagerDevice_allAttributes(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_networkmanager_device.test" @@ -278,43 +232,6 @@ resource "aws_networkmanager_device" "test" { `, rName) } -func testAccDeviceConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_device" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccDeviceConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_device" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccDeviceConfig_allAttributes(rName string) string { return fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" { diff --git a/internal/service/networkmanager/dx_gateway_attachment.go b/internal/service/networkmanager/dx_gateway_attachment.go index dc6f5f30fa06..4a5e51fa24d8 100644 --- a/internal/service/networkmanager/dx_gateway_attachment.go +++ b/internal/service/networkmanager/dx_gateway_attachment.go @@ -34,6 +34,9 @@ import ( // @FrameworkResource("aws_networkmanager_dx_gateway_attachment", name="Direct Connect Gateway Attachment") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.DirectConnectGatewayAttachment") +// @Testing(skipEmptyTags=true, skipNullTags=true) +// @Testing(importIgnore="state") func newDirectConnectGatewayAttachmentResource(context.Context) (resource.ResourceWithConfigure, error) { r := &directConnectGatewayAttachmentResource{} @@ -275,7 +278,7 @@ func (r *directConnectGatewayAttachmentResource) Delete(ctx context.Context, req } // If attachment state is pending acceptance, reject the attachment before deleting. - if state := dxgwAttachment.Attachment.State; state == awstypes.AttachmentStatePendingAttachmentAcceptance || state == awstypes.AttachmentStatePendingTagAcceptance { + if state := dxgwAttachment.Attachment.State; state == awstypes.AttachmentStatePendingAttachmentAcceptance { input := &networkmanager.RejectAttachmentInput{ AttachmentId: fwflex.StringFromFramework(ctx, data.ID), } @@ -398,6 +401,8 @@ func waitDirectConnectGatewayAttachmentDeleted(ctx context.Context, conn *networ Target: []string{}, Refresh: statusDirectConnectGatewayAttachment(ctx, conn, id), Timeout: timeout, + Delay: 2 * time.Minute, + PollInterval: 10 * time.Second, NotFoundChecks: 1, } diff --git a/internal/service/networkmanager/dx_gateway_attachment_tags_gen_test.go b/internal/service/networkmanager/dx_gateway_attachment_tags_gen_test.go new file mode 100644 index 000000000000..5fb877d88459 --- /dev/null +++ b/internal/service/networkmanager/dx_gateway_attachment_tags_gen_test.go @@ -0,0 +1,2380 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_null(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support null tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support null tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + t.Skip("Resource DirectConnectGatewayAttachment does not support null tags") + + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrState, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerDirectConnectGatewayAttachment_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DirectConnectGatewayAttachment + resourceName := "aws_networkmanager_dx_gateway_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DirectConnectGatewayAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/dx_gateway_attachment_test.go b/internal/service/networkmanager/dx_gateway_attachment_test.go index 8cc3a2d9067e..39d1518b46fe 100644 --- a/internal/service/networkmanager/dx_gateway_attachment_test.go +++ b/internal/service/networkmanager/dx_gateway_attachment_test.go @@ -192,52 +192,6 @@ func TestAccNetworkManagerDirectConnectGatewayAttachment_update(t *testing.T) { }) } -func TestAccNetworkManagerDirectConnectGatewayAttachment_tags(t *testing.T) { - ctx := acctest.Context(t) - var dxgatewayattachment awstypes.DirectConnectGatewayAttachment - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_networkmanager_dx_gateway_attachment.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDirectConnectGatewayAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccDirectConnectGatewayAttachmentConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &dxgatewayattachment), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccDirectConnectGatewayAttachmentConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &dxgatewayattachment), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccDirectConnectGatewayAttachmentConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDirectConnectGatewayAttachmentExists(ctx, resourceName, &dxgatewayattachment), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccNetworkManagerDirectConnectGatewayAttachment_accepted(t *testing.T) { ctx := acctest.Context(t) var dxgatewayattachment awstypes.DirectConnectGatewayAttachment @@ -309,8 +263,6 @@ func testAccCheckDirectConnectGatewayAttachmentExists(ctx context.Context, n str func testAccDirectConnectGatewayAttachmentConfig_base(rName string, requireAcceptance bool) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -data "aws_region" "current" {} - resource "aws_dx_gateway" "test" { name = %[1]q amazon_side_asn = 65000 @@ -335,6 +287,8 @@ resource "aws_networkmanager_core_network_policy_attachment" "test" { policy_document = data.aws_networkmanager_core_network_policy_document.test.json } +data "aws_region" "current" {} + data "aws_networkmanager_core_network_policy_document" "test" { core_network_configuration { vpn_ecmp_support = false @@ -489,32 +443,3 @@ resource "aws_networkmanager_dx_gateway_attachment" "test" { } `, edgeLocation1, edgeLocation2)) } - -func testAccDirectConnectGatewayAttachmentConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccDirectConnectGatewayAttachmentConfig_base(rName, false), fmt.Sprintf(` -resource "aws_networkmanager_dx_gateway_attachment" "test" { - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - direct_connect_gateway_arn = aws_dx_gateway.test.arn - edge_locations = [data.aws_region.current.region] - - tags = { - %[1]q = %[2]q - } -} -`, tagKey1, tagValue1)) -} - -func testAccDirectConnectGatewayAttachmentConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccDirectConnectGatewayAttachmentConfig_base(rName, false), fmt.Sprintf(` -resource "aws_networkmanager_dx_gateway_attachment" "test" { - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - direct_connect_gateway_arn = aws_dx_gateway.test.arn - edge_locations = [data.aws_region.current.region] - - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} -`, tagKey1, tagValue1, tagKey2, tagValue2)) -} diff --git a/internal/service/networkmanager/generate.go b/internal/service/networkmanager/generate.go index f6a18a9e8a10..4368928626e7 100644 --- a/internal/service/networkmanager/generate.go +++ b/internal/service/networkmanager/generate.go @@ -1,8 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/tags/main.go -ServiceTagsSlice -UpdateTags -ListTags -ListTagsOutTagsElem=TagList //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tagstests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package networkmanager diff --git a/internal/service/networkmanager/global_network.go b/internal/service/networkmanager/global_network.go index a3588515e576..0da65835881f 100644 --- a/internal/service/networkmanager/global_network.go +++ b/internal/service/networkmanager/global_network.go @@ -26,6 +26,8 @@ import ( // @SDKResource("aws_networkmanager_global_network", name="Global Network") // @Tags(identifierAttribute="arn") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) func resourceGlobalNetwork() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceGlobalNetworkCreate, @@ -158,7 +160,7 @@ func resourceGlobalNetworkDelete(ctx context.Context, d *schema.ResourceData, me log.Printf("[DEBUG] Deleting Network Manager Global Network: %s", d.Id()) _, err := tfresource.RetryWhen(ctx, globalNetworkValidationExceptionTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteGlobalNetwork(ctx, &networkmanager.DeleteGlobalNetworkInput{ GlobalNetworkId: aws.String(d.Id()), }) diff --git a/internal/service/networkmanager/global_network_tags_gen_test.go b/internal/service/networkmanager/global_network_tags_gen_test.go new file mode 100644 index 000000000000..3d3a8b017e86 --- /dev/null +++ b/internal/service/networkmanager/global_network_tags_gen_test.go @@ -0,0 +1,2183 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerGlobalNetwork_tags(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource GlobalNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource GlobalNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource GlobalNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource GlobalNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource GlobalNetwork does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerGlobalNetwork_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_global_network.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/GlobalNetwork/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalNetworkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/global_network_test.go b/internal/service/networkmanager/global_network_test.go index 6959f0c41d34..f57542435e8c 100644 --- a/internal/service/networkmanager/global_network_test.go +++ b/internal/service/networkmanager/global_network_test.go @@ -68,50 +68,6 @@ func TestAccNetworkManagerGlobalNetwork_disappears(t *testing.T) { }) } -func TestAccNetworkManagerGlobalNetwork_tags(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_networkmanager_global_network.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckGlobalNetworkDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccGlobalNetworkConfig_tags1(acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlobalNetworkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccGlobalNetworkConfig_tags2(acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlobalNetworkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccGlobalNetworkConfig_tags1(acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckGlobalNetworkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccNetworkManagerGlobalNetwork_description(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_networkmanager_global_network.test" @@ -196,27 +152,6 @@ resource "aws_networkmanager_global_network" "test" {} ` } -func testAccGlobalNetworkConfig_tags1(tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - %[1]q = %[2]q - } -} -`, tagKey1, tagValue1) -} - -func testAccGlobalNetworkConfig_tags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} -`, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccGlobalNetworkConfig_description(description string) string { return fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" { diff --git a/internal/service/networkmanager/link.go b/internal/service/networkmanager/link.go index 15879822493a..7a13fec51685 100644 --- a/internal/service/networkmanager/link.go +++ b/internal/service/networkmanager/link.go @@ -29,6 +29,9 @@ import ( // @SDKResource("aws_networkmanager_link", name="Link") // @Tags(identifierAttribute="arn") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) +// @Testing(importStateIdAttribute="arn") func resourceLink() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLinkCreate, diff --git a/internal/service/networkmanager/link_tags_gen_test.go b/internal/service/networkmanager/link_tags_gen_test.go new file mode 100644 index 000000000000..e2cd7af7a05c --- /dev/null +++ b/internal/service/networkmanager/link_tags_gen_test.go @@ -0,0 +1,2245 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerLink_tags(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource Link does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource Link does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource Link does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource Link does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource Link does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerLink_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_link.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Link/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/link_test.go b/internal/service/networkmanager/link_test.go index bf0348aca9e9..be1454e98829 100644 --- a/internal/service/networkmanager/link_test.go +++ b/internal/service/networkmanager/link_test.go @@ -76,52 +76,6 @@ func TestAccNetworkManagerLink_disappears(t *testing.T) { }) } -func TestAccNetworkManagerLink_tags(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_networkmanager_link.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLinkDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccLinkConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckLinkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), - ImportStateVerify: true, - }, - { - Config: testAccLinkConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckLinkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccLinkConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckLinkExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccNetworkManagerLink_allAttributes(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_networkmanager_link.test" @@ -240,71 +194,6 @@ resource "aws_networkmanager_link" "test" { `, rName) } -func testAccLinkConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_site" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_link" "test" { - global_network_id = aws_networkmanager_global_network.test.id - site_id = aws_networkmanager_site.test.id - - bandwidth { - download_speed = 50 - upload_speed = 10 - } - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccLinkConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_site" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_link" "test" { - global_network_id = aws_networkmanager_global_network.test.id - site_id = aws_networkmanager_site.test.id - - bandwidth { - download_speed = 50 - upload_speed = 10 - } - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccLinkConfig_allAttributes(rName string) string { return fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" { diff --git a/internal/service/networkmanager/service_endpoint_resolver_gen.go b/internal/service/networkmanager/service_endpoint_resolver_gen.go index 38e56133f666..effbe1a13a4e 100644 --- a/internal/service/networkmanager/service_endpoint_resolver_gen.go +++ b/internal/service/networkmanager/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params networkmanager.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up networkmanager endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up networkmanager endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/networkmanager/service_endpoints_gen_test.go b/internal/service/networkmanager/service_endpoints_gen_test.go index 3e4f6ad3e3a2..5eefd7a35dba 100644 --- a/internal/service/networkmanager/service_endpoints_gen_test.go +++ b/internal/service/networkmanager/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/networkmanager/service_package_gen.go b/internal/service/networkmanager/service_package_gen.go index ced9aa8cd8fa..fe4b124a8faf 100644 --- a/internal/service/networkmanager/service_package_gen.go +++ b/internal/service/networkmanager/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/networkmanager" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -279,7 +278,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *networkmanager.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/networkmanager/site.go b/internal/service/networkmanager/site.go index 7083589e61c4..48a36374df70 100644 --- a/internal/service/networkmanager/site.go +++ b/internal/service/networkmanager/site.go @@ -29,6 +29,9 @@ import ( // @SDKResource("aws_networkmanager_site", name="Site") // @Tags(identifierAttribute="arn") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) +// @Testing(importStateIdAttribute="arn") func resourceSite() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSiteCreate, @@ -219,7 +222,7 @@ func resourceSiteDelete(ctx context.Context, d *schema.ResourceData, meta any) d log.Printf("[DEBUG] Deleting Network Manager Site: %s", d.Id()) _, err := tfresource.RetryWhen(ctx, siteValidationExceptionTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteSite(ctx, &networkmanager.DeleteSiteInput{ GlobalNetworkId: aws.String(globalNetworkID), SiteId: aws.String(d.Id()), diff --git a/internal/service/networkmanager/site_tags_gen_test.go b/internal/service/networkmanager/site_tags_gen_test.go new file mode 100644 index 000000000000..5118973ab473 --- /dev/null +++ b/internal/service/networkmanager/site_tags_gen_test.go @@ -0,0 +1,2245 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerSite_tags(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource Site does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource Site does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource Site does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource Site does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource Site does not support empty tags") + + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: names.AttrARN, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerSite_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_networkmanager_site.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Site/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/site_test.go b/internal/service/networkmanager/site_test.go index c0021817dab2..ebc073f6640f 100644 --- a/internal/service/networkmanager/site_test.go +++ b/internal/service/networkmanager/site_test.go @@ -72,52 +72,6 @@ func TestAccNetworkManagerSite_disappears(t *testing.T) { }) } -func TestAccNetworkManagerSite_tags(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_networkmanager_site.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckSiteDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccSiteConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckSiteExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), - ImportStateVerify: true, - }, - { - Config: testAccSiteConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckSiteExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccSiteConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckSiteExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccNetworkManagerSite_description(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_networkmanager_site.test" @@ -253,43 +207,6 @@ resource "aws_networkmanager_site" "test" { `, rName) } -func testAccSiteConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_site" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccSiteConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_networkmanager_global_network" "test" { - tags = { - Name = %[1]q - } -} - -resource "aws_networkmanager_site" "test" { - global_network_id = aws_networkmanager_global_network.test.id - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccSiteConfig_description(rName, description string) string { return fmt.Sprintf(` resource "aws_networkmanager_global_network" "test" { diff --git a/internal/service/networkmanager/site_to_site_vpn_attachment.go b/internal/service/networkmanager/site_to_site_vpn_attachment.go index b1de20b6caf9..f1dee4206219 100644 --- a/internal/service/networkmanager/site_to_site_vpn_attachment.go +++ b/internal/service/networkmanager/site_to_site_vpn_attachment.go @@ -27,6 +27,10 @@ import ( // @SDKResource("aws_networkmanager_site_to_site_vpn_attachment", name="Site To Site VPN Attachment") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.SiteToSiteVpnAttachment") +// @Testing(skipEmptyTags=true) +// @Testing(randomBgpAsn="64512;65534") +// @Testing(randomIPv4Address="172.0.0.0/24") func resourceSiteToSiteVPNAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSiteToSiteVPNAttachmentCreate, @@ -271,6 +275,8 @@ func waitSiteToSiteVPNAttachmentDeleted(ctx context.Context, conn *networkmanage Target: []string{}, Timeout: timeout, Refresh: statusSiteToSiteVPNAttachment(ctx, conn, id), + Delay: 4 * time.Minute, + PollInterval: 10 * time.Second, NotFoundChecks: 1, } diff --git a/internal/service/networkmanager/site_to_site_vpn_attachment_tags_gen_test.go b/internal/service/networkmanager/site_to_site_vpn_attachment_tags_gen_test.go new file mode 100644 index 000000000000..ea4a02c24f31 --- /dev/null +++ b/internal/service/networkmanager/site_to_site_vpn_attachment_tags_gen_test.go @@ -0,0 +1,2563 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource SiteToSiteVPNAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource SiteToSiteVPNAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource SiteToSiteVPNAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource SiteToSiteVPNAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource SiteToSiteVPNAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerSiteToSiteVPNAttachment_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.SiteToSiteVpnAttachment + resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + rBgpAsn := sdkacctest.RandIntRange(64512, 65534) + rIPv4Address, err := sdkacctest.RandIpAddress("172.0.0.0/24") + if err != nil { + t.Fatal(err) + } + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SiteToSiteVPNAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + "rBgpAsn": config.IntegerVariable(rBgpAsn), + "rIPv4Address": config.StringVariable(rIPv4Address), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/site_to_site_vpn_attachment_test.go b/internal/service/networkmanager/site_to_site_vpn_attachment_test.go index 48f5f1606883..3d693201ee06 100644 --- a/internal/service/networkmanager/site_to_site_vpn_attachment_test.go +++ b/internal/service/networkmanager/site_to_site_vpn_attachment_test.go @@ -96,57 +96,6 @@ func TestAccNetworkManagerSiteToSiteVPNAttachment_disappears(t *testing.T) { }) } -func TestAccNetworkManagerSiteToSiteVPNAttachment_tags(t *testing.T) { - ctx := acctest.Context(t) - var v awstypes.SiteToSiteVpnAttachment - resourceName := "aws_networkmanager_site_to_site_vpn_attachment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bgpASN := sdkacctest.RandIntRange(64512, 65534) - vpnIP, err := sdkacctest.RandIpAddress("172.0.0.0/24") - if err != nil { - t.Fatal(err) - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckSiteToSiteVPNAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccSiteToSiteVPNAttachmentConfig_tags1(rName, vpnIP, "segment", "shared", bgpASN), - Check: resource.ComposeTestCheckFunc( - testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - ), - }, - { - Config: testAccSiteToSiteVPNAttachmentConfig_tags2(rName, vpnIP, "segment", "shared", "Name", "test", bgpASN), - Check: resource.ComposeTestCheckFunc( - testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - resource.TestCheckResourceAttr(resourceName, "tags.Name", "test"), - ), - }, - { - Config: testAccSiteToSiteVPNAttachmentConfig_tags1(rName, vpnIP, "segment", "shared", bgpASN), - Check: resource.ComposeTestCheckFunc( - testAccCheckSiteToSiteVPNAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckSiteToSiteVPNAttachmentExists(ctx context.Context, n string, v *awstypes.SiteToSiteVpnAttachment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -195,9 +144,7 @@ func testAccCheckSiteToSiteVPNAttachmentDestroy(ctx context.Context) resource.Te } func testAccSiteToSiteVPNAttachmentConfig_base(rName string, bgpASN int, vpnIP string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -data "aws_region" "current" {} - + return fmt.Sprintf(` resource "aws_customer_gateway" "test" { bgp_asn = %[2]d ip_address = %[3]q @@ -237,6 +184,8 @@ resource "aws_networkmanager_core_network_policy_attachment" "test" { policy_document = data.aws_networkmanager_core_network_policy_document.test.json } +data "aws_region" "current" {} + data "aws_networkmanager_core_network_policy_document" "test" { core_network_configuration { vpn_ecmp_support = false @@ -277,7 +226,7 @@ data "aws_networkmanager_core_network_policy_document" "test" { } } } -`, rName, bgpASN, vpnIP)) +`, rName, bgpASN, vpnIP) } func testAccSiteToSiteVPNAttachmentConfig_basic(rName string, bgpASN int, vpnIP string) string { @@ -297,40 +246,3 @@ resource "aws_networkmanager_attachment_accepter" "test" { } `) } - -func testAccSiteToSiteVPNAttachmentConfig_tags1(rName, vpnIP, tagKey1, tagValue1 string, bgpASN int) string { - return acctest.ConfigCompose(testAccSiteToSiteVPNAttachmentConfig_base(rName, bgpASN, vpnIP), fmt.Sprintf(` -resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - vpn_connection_arn = aws_vpn_connection.test.arn - - tags = { - %[1]q = %[2]q - } -} - -resource "aws_networkmanager_attachment_accepter" "test" { - attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id - attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type -} -`, tagKey1, tagValue1)) -} - -func testAccSiteToSiteVPNAttachmentConfig_tags2(rName, vpnIP, tagKey1, tagValue1, tagKey2, tagValue2 string, bgpASN int) string { - return acctest.ConfigCompose(testAccSiteToSiteVPNAttachmentConfig_base(rName, bgpASN, vpnIP), fmt.Sprintf(` -resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - vpn_connection_arn = aws_vpn_connection.test.arn - - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} - -resource "aws_networkmanager_attachment_accepter" "test" { - attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id - attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type -} -`, tagKey1, tagValue1, tagKey2, tagValue2)) -} diff --git a/internal/service/networkmanager/sweep.go b/internal/service/networkmanager/sweep.go index d2ee2e387833..b44bb22262a0 100644 --- a/internal/service/networkmanager/sweep.go +++ b/internal/service/networkmanager/sweep.go @@ -123,7 +123,7 @@ func sweepGlobalNetworks(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.DescribeGlobalNetworksInput{} @@ -164,7 +164,7 @@ func sweepCoreNetworks(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListCoreNetworksInput{} @@ -205,7 +205,7 @@ func sweepConnectAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListAttachmentsInput{ @@ -248,7 +248,7 @@ func sweepDirectConnectGatewayAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListAttachmentsInput{ @@ -294,7 +294,7 @@ func sweepSiteToSiteVPNAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListAttachmentsInput{ @@ -337,7 +337,7 @@ func sweepTransitGatewayPeerings(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListPeeringsInput{ @@ -380,7 +380,7 @@ func sweepTransitGatewayRouteTableAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListAttachmentsInput{ @@ -423,7 +423,7 @@ func sweepVPCAttachments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.ListAttachmentsInput{ @@ -466,7 +466,7 @@ func sweepSites(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.DescribeGlobalNetworksInput{} @@ -528,7 +528,7 @@ func sweepDevices(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.DescribeGlobalNetworksInput{} @@ -590,7 +590,7 @@ func sweepLinks(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.DescribeGlobalNetworksInput{} @@ -652,7 +652,7 @@ func sweepLinkAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.DescribeGlobalNetworksInput{} @@ -713,7 +713,7 @@ func sweepConnections(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.NetworkManagerClient(ctx) input := &networkmanager.DescribeGlobalNetworksInput{} diff --git a/internal/service/networkmanager/tags_gen.go b/internal/service/networkmanager/tags_gen.go index 9724ac92aca1..6b0635d2e1e5 100644 --- a/internal/service/networkmanager/tags_gen.go +++ b/internal/service/networkmanager/tags_gen.go @@ -3,8 +3,8 @@ package networkmanager import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/networkmanager" awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" @@ -16,6 +16,39 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +// listTags lists networkmanager service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *networkmanager.Client, identifier string, optFns ...func(*networkmanager.Options)) (tftags.KeyValueTags, error) { + input := networkmanager.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, &input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), smarterr.NewError(err) + } + + return keyValueTags(ctx, output.TagList), nil +} + +// ListTags lists networkmanager service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).NetworkManagerClient(ctx), identifier) + + if err != nil { + return smarterr.NewError(err) + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + // []*SERVICE.Tag handling // svcTags returns networkmanager service tags. @@ -84,7 +117,7 @@ func updateTags(ctx context.Context, conn *networkmanager.Client, identifier str _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -99,7 +132,7 @@ func updateTags(ctx context.Context, conn *networkmanager.Client, identifier str _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/networkmanager/tags_gen_test.go b/internal/service/networkmanager/tags_gen_test.go new file mode 100644 index 000000000000..f5bf544328f9 --- /dev/null +++ b/internal/service/networkmanager/tags_gen_test.go @@ -0,0 +1,16 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + tfnetworkmanager "github.com/hashicorp/terraform-provider-aws/internal/service/networkmanager" +) + +func expectFullResourceTags(ctx context.Context, resourceAddress string, knownValue knownvalue.Check) statecheck.StateCheck { + return tfstatecheck.ExpectFullResourceTags(tfnetworkmanager.ServicePackage(ctx), resourceAddress, knownValue) +} diff --git a/internal/service/networkmanager/testdata/ConnectAttachment/tags/main_gen.tf b/internal/service/networkmanager/testdata/ConnectAttachment/tags/main_gen.tf new file mode 100644 index 000000000000..ff40a7c3c634 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectAttachment/tags/main_gen.tf @@ -0,0 +1,127 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = "GRE" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = [aws_subnet.test[0].arn] + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +# testAccConnectAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/ConnectAttachment/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/ConnectAttachment/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..1182bc942216 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectAttachment/tagsComputed1/main_gen.tf @@ -0,0 +1,131 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = "GRE" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = [aws_subnet.test[0].arn] + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +# testAccConnectAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectAttachment/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/ConnectAttachment/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..0776a12aa5ef --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectAttachment/tagsComputed2/main_gen.tf @@ -0,0 +1,142 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = "GRE" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = [aws_subnet.test[0].arn] + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +# testAccConnectAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectAttachment/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/ConnectAttachment/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..b6af291c15d3 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectAttachment/tags_defaults/main_gen.tf @@ -0,0 +1,138 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = "GRE" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = [aws_subnet.test[0].arn] + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +# testAccConnectAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectAttachment/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/ConnectAttachment/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..7e7dc53fba03 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectAttachment/tags_ignore/main_gen.tf @@ -0,0 +1,147 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = "GRE" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = [aws_subnet.test[0].arn] + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +# testAccConnectAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectPeer/tags/main_gen.tf b/internal/service/networkmanager/testdata/ConnectPeer/tags/main_gen.tf new file mode 100644 index 000000000000..c97c59fef4f4 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectPeer/tags/main_gen.tf @@ -0,0 +1,149 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_connect_peer" "test" { + connect_attachment_id = aws_networkmanager_connect_attachment.test.id + peer_address = local.peer_address + bgp_options { + peer_asn = local.peer_asn + } + inside_cidr_blocks = local.inside_cidr_blocks + + tags = var.resource_tags +} + +locals { + inside_cidr_blocks = ["169.254.10.0/29"] + peer_address = "1.1.1.1" + peer_asn = "65501" + protocol = "GRE" +} + +# testAccConnectPeerConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + inside_cidr_blocks = ["172.16.0.0/16"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + inside_cidr_blocks = ["172.16.0.0/18"] + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = local.protocol + } + tags = { + segment = "shared" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/ConnectPeer/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/ConnectPeer/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..df8190a9879d --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectPeer/tagsComputed1/main_gen.tf @@ -0,0 +1,153 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_connect_peer" "test" { + connect_attachment_id = aws_networkmanager_connect_attachment.test.id + peer_address = local.peer_address + bgp_options { + peer_asn = local.peer_asn + } + inside_cidr_blocks = local.inside_cidr_blocks + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +locals { + inside_cidr_blocks = ["169.254.10.0/29"] + peer_address = "1.1.1.1" + peer_asn = "65501" + protocol = "GRE" +} + +# testAccConnectPeerConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + inside_cidr_blocks = ["172.16.0.0/16"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + inside_cidr_blocks = ["172.16.0.0/18"] + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = local.protocol + } + tags = { + segment = "shared" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectPeer/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/ConnectPeer/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..7b934e932742 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectPeer/tagsComputed2/main_gen.tf @@ -0,0 +1,164 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_connect_peer" "test" { + connect_attachment_id = aws_networkmanager_connect_attachment.test.id + peer_address = local.peer_address + bgp_options { + peer_asn = local.peer_asn + } + inside_cidr_blocks = local.inside_cidr_blocks + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +locals { + inside_cidr_blocks = ["169.254.10.0/29"] + peer_address = "1.1.1.1" + peer_asn = "65501" + protocol = "GRE" +} + +# testAccConnectPeerConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + inside_cidr_blocks = ["172.16.0.0/16"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + inside_cidr_blocks = ["172.16.0.0/18"] + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = local.protocol + } + tags = { + segment = "shared" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectPeer/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/ConnectPeer/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..979f3e0d3975 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectPeer/tags_defaults/main_gen.tf @@ -0,0 +1,160 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_connect_peer" "test" { + connect_attachment_id = aws_networkmanager_connect_attachment.test.id + peer_address = local.peer_address + bgp_options { + peer_asn = local.peer_asn + } + inside_cidr_blocks = local.inside_cidr_blocks + + tags = var.resource_tags +} + +locals { + inside_cidr_blocks = ["169.254.10.0/29"] + peer_address = "1.1.1.1" + peer_asn = "65501" + protocol = "GRE" +} + +# testAccConnectPeerConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + inside_cidr_blocks = ["172.16.0.0/16"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + inside_cidr_blocks = ["172.16.0.0/18"] + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = local.protocol + } + tags = { + segment = "shared" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/ConnectPeer/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/ConnectPeer/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..4a57a9568b07 --- /dev/null +++ b/internal/service/networkmanager/testdata/ConnectPeer/tags_ignore/main_gen.tf @@ -0,0 +1,169 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_connect_peer" "test" { + connect_attachment_id = aws_networkmanager_connect_attachment.test.id + peer_address = local.peer_address + bgp_options { + peer_asn = local.peer_asn + } + inside_cidr_blocks = local.inside_cidr_blocks + + tags = var.resource_tags +} + +locals { + inside_cidr_blocks = ["169.254.10.0/29"] + peer_address = "1.1.1.1" + peer_asn = "65501" + protocol = "GRE" +} + +# testAccConnectPeerConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + inside_cidr_blocks = ["172.16.0.0/16"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + inside_cidr_blocks = ["172.16.0.0/18"] + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = local.protocol + } + tags = { + segment = "shared" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Connection/tags/main_gen.tf b/internal/service/networkmanager/testdata/Connection/tags/main_gen.tf new file mode 100644 index 000000000000..151e61c5bd79 --- /dev/null +++ b/internal/service/networkmanager/testdata/Connection/tags/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_connection" "test" { + global_network_id = aws_networkmanager_global_network.test.id + device_id = aws_networkmanager_device.test1.id + connected_device_id = aws_networkmanager_device.test2.id + + tags = var.resource_tags +} + +# testAccConnectionBaseConfig + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_device" "test1" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id +} + +resource "aws_networkmanager_device" "test2" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + # Create one device at a time. + depends_on = [aws_networkmanager_device.test1] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/Connection/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/Connection/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..359ac72f5292 --- /dev/null +++ b/internal/service/networkmanager/testdata/Connection/tagsComputed1/main_gen.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_connection" "test" { + global_network_id = aws_networkmanager_global_network.test.id + device_id = aws_networkmanager_device.test1.id + connected_device_id = aws_networkmanager_device.test2.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +# testAccConnectionBaseConfig + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_device" "test1" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id +} + +resource "aws_networkmanager_device" "test2" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + # Create one device at a time. + depends_on = [aws_networkmanager_device.test1] +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Connection/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/Connection/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..5155225f88d5 --- /dev/null +++ b/internal/service/networkmanager/testdata/Connection/tagsComputed2/main_gen.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_connection" "test" { + global_network_id = aws_networkmanager_global_network.test.id + device_id = aws_networkmanager_device.test1.id + connected_device_id = aws_networkmanager_device.test2.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +# testAccConnectionBaseConfig + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_device" "test1" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id +} + +resource "aws_networkmanager_device" "test2" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + # Create one device at a time. + depends_on = [aws_networkmanager_device.test1] +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Connection/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/Connection/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..df984cac49e5 --- /dev/null +++ b/internal/service/networkmanager/testdata/Connection/tags_defaults/main_gen.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_connection" "test" { + global_network_id = aws_networkmanager_global_network.test.id + device_id = aws_networkmanager_device.test1.id + connected_device_id = aws_networkmanager_device.test2.id + + tags = var.resource_tags +} + +# testAccConnectionBaseConfig + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_device" "test1" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id +} + +resource "aws_networkmanager_device" "test2" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + # Create one device at a time. + depends_on = [aws_networkmanager_device.test1] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Connection/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/Connection/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..cc8ebc044605 --- /dev/null +++ b/internal/service/networkmanager/testdata/Connection/tags_ignore/main_gen.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_connection" "test" { + global_network_id = aws_networkmanager_global_network.test.id + device_id = aws_networkmanager_device.test1.id + connected_device_id = aws_networkmanager_device.test2.id + + tags = var.resource_tags +} + +# testAccConnectionBaseConfig + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_device" "test1" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id +} + +resource "aws_networkmanager_device" "test2" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + # Create one device at a time. + depends_on = [aws_networkmanager_device.test1] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/CoreNetwork/tags/main_gen.tf b/internal/service/networkmanager/testdata/CoreNetwork/tags/main_gen.tf new file mode 100644 index 000000000000..9bdc8455678e --- /dev/null +++ b/internal/service/networkmanager/testdata/CoreNetwork/tags/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/CoreNetwork/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/CoreNetwork/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..058b0e5b102f --- /dev/null +++ b/internal/service/networkmanager/testdata/CoreNetwork/tagsComputed1/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/CoreNetwork/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/CoreNetwork/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..7e62e77c6d3a --- /dev/null +++ b/internal/service/networkmanager/testdata/CoreNetwork/tagsComputed2/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/CoreNetwork/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/CoreNetwork/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..88235f8d1703 --- /dev/null +++ b/internal/service/networkmanager/testdata/CoreNetwork/tags_defaults/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/CoreNetwork/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/CoreNetwork/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..beba3bd512ae --- /dev/null +++ b/internal/service/networkmanager/testdata/CoreNetwork/tags_ignore/main_gen.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Device/tags/main_gen.tf b/internal/service/networkmanager/testdata/Device/tags/main_gen.tf new file mode 100644 index 000000000000..2846f7b6a79a --- /dev/null +++ b/internal/service/networkmanager/testdata/Device/tags/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_device" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/Device/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/Device/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..b3553e01e618 --- /dev/null +++ b/internal/service/networkmanager/testdata/Device/tagsComputed1/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_device" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Device/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/Device/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..1b0c2a797790 --- /dev/null +++ b/internal/service/networkmanager/testdata/Device/tagsComputed2/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_device" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Device/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/Device/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..7252cc6e4399 --- /dev/null +++ b/internal/service/networkmanager/testdata/Device/tags_defaults/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_device" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Device/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/Device/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..e78593d8644a --- /dev/null +++ b/internal/service/networkmanager/testdata/Device/tags_ignore/main_gen.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_device" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags/main_gen.tf b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags/main_gen.tf new file mode 100644 index 000000000000..e56702da89af --- /dev/null +++ b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags/main_gen.tf @@ -0,0 +1,80 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_dx_gateway_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + direct_connect_gateway_arn = aws_dx_gateway.test.arn + edge_locations = [data.aws_region.current.region] + + tags = var.resource_tags +} + +# testAccDirectConnectGatewayAttachmentConfig_base + +resource "aws_dx_gateway" "test" { + name = var.rName + amazon_side_asn = 65000 +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..09c8351112bc --- /dev/null +++ b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tagsComputed1/main_gen.tf @@ -0,0 +1,84 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_dx_gateway_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + direct_connect_gateway_arn = aws_dx_gateway.test.arn + edge_locations = [data.aws_region.current.region] + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +# testAccDirectConnectGatewayAttachmentConfig_base + +resource "aws_dx_gateway" "test" { + name = var.rName + amazon_side_asn = 65000 +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..b4a23371d044 --- /dev/null +++ b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tagsComputed2/main_gen.tf @@ -0,0 +1,95 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_dx_gateway_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + direct_connect_gateway_arn = aws_dx_gateway.test.arn + edge_locations = [data.aws_region.current.region] + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +# testAccDirectConnectGatewayAttachmentConfig_base + +resource "aws_dx_gateway" "test" { + name = var.rName + amazon_side_asn = 65000 +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..0415aa1fcb88 --- /dev/null +++ b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags_defaults/main_gen.tf @@ -0,0 +1,91 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_dx_gateway_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + direct_connect_gateway_arn = aws_dx_gateway.test.arn + edge_locations = [data.aws_region.current.region] + + tags = var.resource_tags +} + +# testAccDirectConnectGatewayAttachmentConfig_base + +resource "aws_dx_gateway" "test" { + name = var.rName + amazon_side_asn = 65000 +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..ae219ead2304 --- /dev/null +++ b/internal/service/networkmanager/testdata/DirectConnectGatewayAttachment/tags_ignore/main_gen.tf @@ -0,0 +1,100 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_dx_gateway_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + direct_connect_gateway_arn = aws_dx_gateway.test.arn + edge_locations = [data.aws_region.current.region] + + tags = var.resource_tags +} + +# testAccDirectConnectGatewayAttachmentConfig_base + +resource "aws_dx_gateway" "test" { + name = var.rName + amazon_side_asn = 65000 +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/GlobalNetwork/tags/main_gen.tf b/internal/service/networkmanager/testdata/GlobalNetwork/tags/main_gen.tf new file mode 100644 index 000000000000..2ef36c17448f --- /dev/null +++ b/internal/service/networkmanager/testdata/GlobalNetwork/tags/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_global_network" "test" { + + tags = var.resource_tags +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/GlobalNetwork/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/GlobalNetwork/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..063e38bd4465 --- /dev/null +++ b/internal/service/networkmanager/testdata/GlobalNetwork/tagsComputed1/main_gen.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_global_network" "test" { + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/GlobalNetwork/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/GlobalNetwork/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..ef0f3129f2aa --- /dev/null +++ b/internal/service/networkmanager/testdata/GlobalNetwork/tagsComputed2/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_global_network" "test" { + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/GlobalNetwork/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/GlobalNetwork/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..0ee6d531d148 --- /dev/null +++ b/internal/service/networkmanager/testdata/GlobalNetwork/tags_defaults/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_global_network" "test" { + + tags = var.resource_tags +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/GlobalNetwork/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/GlobalNetwork/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..1dcc92a42654 --- /dev/null +++ b/internal/service/networkmanager/testdata/GlobalNetwork/tags_ignore/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_global_network" "test" { + + tags = var.resource_tags +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Link/tags/main_gen.tf b/internal/service/networkmanager/testdata/Link/tags/main_gen.tf new file mode 100644 index 000000000000..e2db0ac4bd89 --- /dev/null +++ b/internal/service/networkmanager/testdata/Link/tags/main_gen.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_link" "test" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + bandwidth { + download_speed = 50 + upload_speed = 10 + } + + tags = var.resource_tags +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/Link/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/Link/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..84cdac21e538 --- /dev/null +++ b/internal/service/networkmanager/testdata/Link/tagsComputed1/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_link" "test" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + bandwidth { + download_speed = 50 + upload_speed = 10 + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Link/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/Link/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..b18eb95f1539 --- /dev/null +++ b/internal/service/networkmanager/testdata/Link/tagsComputed2/main_gen.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_link" "test" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + bandwidth { + download_speed = 50 + upload_speed = 10 + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Link/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/Link/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..42ee98dca702 --- /dev/null +++ b/internal/service/networkmanager/testdata/Link/tags_defaults/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_link" "test" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + bandwidth { + download_speed = 50 + upload_speed = 10 + } + + tags = var.resource_tags +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Link/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/Link/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..5ebe12b97173 --- /dev/null +++ b/internal/service/networkmanager/testdata/Link/tags_ignore/main_gen.tf @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_link" "test" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + bandwidth { + download_speed = 50 + upload_speed = 10 + } + + tags = var.resource_tags +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Site/tags/main_gen.tf b/internal/service/networkmanager/testdata/Site/tags/main_gen.tf new file mode 100644 index 000000000000..cf8e391c5452 --- /dev/null +++ b/internal/service/networkmanager/testdata/Site/tags/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/Site/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/Site/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..c06f10ce8419 --- /dev/null +++ b/internal/service/networkmanager/testdata/Site/tagsComputed1/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Site/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/Site/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..b681f6b2a8e4 --- /dev/null +++ b/internal/service/networkmanager/testdata/Site/tagsComputed2/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Site/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/Site/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..b33151ef1a40 --- /dev/null +++ b/internal/service/networkmanager/testdata/Site/tags_defaults/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/Site/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/Site/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..63c861e2a7cc --- /dev/null +++ b/internal/service/networkmanager/testdata/Site/tags_ignore/main_gen.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id + + tags = var.resource_tags +} + +resource "aws_networkmanager_global_network" "test" {} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags/main_gen.tf b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags/main_gen.tf new file mode 100644 index 000000000000..6b4b3581dee8 --- /dev/null +++ b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags/main_gen.tf @@ -0,0 +1,105 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpn_connection_arn = aws_vpn_connection.test.arn + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id + attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type +} + +# testAccSiteToSiteVPNAttachmentConfig_base + +resource "aws_customer_gateway" "test" { + bgp_asn = var.rBgpAsn + ip_address = var.rIPv4Address + type = "ipsec.1" + device_name = var.rName +} + +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + condition_logic = "or" + + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "rBgpAsn" { + type = string + nullable = false +} + +variable "rIPv4Address" { + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..f4965cf58343 --- /dev/null +++ b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tagsComputed1/main_gen.tf @@ -0,0 +1,109 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpn_connection_arn = aws_vpn_connection.test.arn + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id + attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type +} + +# testAccSiteToSiteVPNAttachmentConfig_base + +resource "aws_customer_gateway" "test" { + bgp_asn = var.rBgpAsn + ip_address = var.rIPv4Address + type = "ipsec.1" + device_name = var.rName +} + +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + condition_logic = "or" + + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "rBgpAsn" { + type = string + nullable = false +} + +variable "rIPv4Address" { + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..237e46eb8662 --- /dev/null +++ b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tagsComputed2/main_gen.tf @@ -0,0 +1,120 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpn_connection_arn = aws_vpn_connection.test.arn + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id + attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type +} + +# testAccSiteToSiteVPNAttachmentConfig_base + +resource "aws_customer_gateway" "test" { + bgp_asn = var.rBgpAsn + ip_address = var.rIPv4Address + type = "ipsec.1" + device_name = var.rName +} + +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + condition_logic = "or" + + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "rBgpAsn" { + type = string + nullable = false +} + +variable "rIPv4Address" { + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..af21889ac675 --- /dev/null +++ b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags_defaults/main_gen.tf @@ -0,0 +1,116 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpn_connection_arn = aws_vpn_connection.test.arn + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id + attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type +} + +# testAccSiteToSiteVPNAttachmentConfig_base + +resource "aws_customer_gateway" "test" { + bgp_asn = var.rBgpAsn + ip_address = var.rIPv4Address + type = "ipsec.1" + device_name = var.rName +} + +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + condition_logic = "or" + + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "rBgpAsn" { + type = string + nullable = false +} + +variable "rIPv4Address" { + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..8a03d21df8d0 --- /dev/null +++ b/internal/service/networkmanager/testdata/SiteToSiteVPNAttachment/tags_ignore/main_gen.tf @@ -0,0 +1,125 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpn_connection_arn = aws_vpn_connection.test.arn + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id + attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type +} + +# testAccSiteToSiteVPNAttachmentConfig_base + +resource "aws_customer_gateway" "test" { + bgp_asn = var.rBgpAsn + ip_address = var.rIPv4Address + type = "ipsec.1" + device_name = var.rName +} + +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + condition_logic = "or" + + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "rBgpAsn" { + type = string + nullable = false +} + +variable "rIPv4Address" { + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayPeering/tags/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayPeering/tags/main_gen.tf new file mode 100644 index 000000000000..5facfa10e78c --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayPeering/tags/main_gen.tf @@ -0,0 +1,57 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] + + tags = var.resource_tags +} + +# testAccTransitGatewayPeeringConfig_base + +data "aws_region" "current" {} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayPeering/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayPeering/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..94959a447c63 --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayPeering/tagsComputed1/main_gen.tf @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +# testAccTransitGatewayPeeringConfig_base + +data "aws_region" "current" {} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayPeering/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayPeering/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..86631c3c8c85 --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayPeering/tagsComputed2/main_gen.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +# testAccTransitGatewayPeeringConfig_base + +data "aws_region" "current" {} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayPeering/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayPeering/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..e96d042f41a3 --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayPeering/tags_defaults/main_gen.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] + + tags = var.resource_tags +} + +# testAccTransitGatewayPeeringConfig_base + +data "aws_region" "current" {} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayPeering/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayPeering/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..15e1d7520d25 --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayPeering/tags_ignore/main_gen.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] + + tags = var.resource_tags +} + +# testAccTransitGatewayPeeringConfig_base + +data "aws_region" "current" {} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags/main_gen.tf new file mode 100644 index 000000000000..4e561637f2b9 --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags/main_gen.tf @@ -0,0 +1,80 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { + peering_id = aws_networkmanager_transit_gateway_peering.test.id + transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn + + depends_on = [aws_ec2_transit_gateway_policy_table_association.test] + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id + attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type +} + +# testAccTransitGatewayRouteTableAttachmentConfig_base + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_ec2_transit_gateway_policy_table_association" "test" { + transit_gateway_attachment_id = aws_networkmanager_transit_gateway_peering.test.transit_gateway_peering_attachment_id + transit_gateway_policy_table_id = aws_ec2_transit_gateway_policy_table.test.id +} + +# testAccTransitGatewayPeeringConfig_base + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +data "aws_region" "current" {} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..703e66b129ae --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tagsComputed1/main_gen.tf @@ -0,0 +1,84 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { + peering_id = aws_networkmanager_transit_gateway_peering.test.id + transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn + + depends_on = [aws_ec2_transit_gateway_policy_table_association.test] + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id + attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type +} + +# testAccTransitGatewayRouteTableAttachmentConfig_base + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_ec2_transit_gateway_policy_table_association" "test" { + transit_gateway_attachment_id = aws_networkmanager_transit_gateway_peering.test.transit_gateway_peering_attachment_id + transit_gateway_policy_table_id = aws_ec2_transit_gateway_policy_table.test.id +} + +# testAccTransitGatewayPeeringConfig_base + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +data "aws_region" "current" {} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..a31e8499b418 --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tagsComputed2/main_gen.tf @@ -0,0 +1,95 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { + peering_id = aws_networkmanager_transit_gateway_peering.test.id + transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn + + depends_on = [aws_ec2_transit_gateway_policy_table_association.test] + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id + attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type +} + +# testAccTransitGatewayRouteTableAttachmentConfig_base + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_ec2_transit_gateway_policy_table_association" "test" { + transit_gateway_attachment_id = aws_networkmanager_transit_gateway_peering.test.transit_gateway_peering_attachment_id + transit_gateway_policy_table_id = aws_ec2_transit_gateway_policy_table.test.id +} + +# testAccTransitGatewayPeeringConfig_base + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +data "aws_region" "current" {} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..b8fdf49cba6a --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags_defaults/main_gen.tf @@ -0,0 +1,91 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { + peering_id = aws_networkmanager_transit_gateway_peering.test.id + transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn + + depends_on = [aws_ec2_transit_gateway_policy_table_association.test] + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id + attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type +} + +# testAccTransitGatewayRouteTableAttachmentConfig_base + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_ec2_transit_gateway_policy_table_association" "test" { + transit_gateway_attachment_id = aws_networkmanager_transit_gateway_peering.test.transit_gateway_peering_attachment_id + transit_gateway_policy_table_id = aws_ec2_transit_gateway_policy_table.test.id +} + +# testAccTransitGatewayPeeringConfig_base + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +data "aws_region" "current" {} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..836c2ca6fa2e --- /dev/null +++ b/internal/service/networkmanager/testdata/TransitGatewayRouteTableAttachment/tags_ignore/main_gen.tf @@ -0,0 +1,100 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { + peering_id = aws_networkmanager_transit_gateway_peering.test.id + transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn + + depends_on = [aws_ec2_transit_gateway_policy_table_association.test] + + tags = var.resource_tags +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id + attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type +} + +# testAccTransitGatewayRouteTableAttachmentConfig_base + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_ec2_transit_gateway_policy_table_association" "test" { + transit_gateway_attachment_id = aws_networkmanager_transit_gateway_peering.test.transit_gateway_peering_attachment_id + transit_gateway_policy_table_id = aws_ec2_transit_gateway_policy_table.test.id +} + +# testAccTransitGatewayPeeringConfig_base + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +data "aws_region" "current" {} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/VPCAttachment/tags/main_gen.tf b/internal/service/networkmanager/testdata/VPCAttachment/tags/main_gen.tf new file mode 100644 index 000000000000..576e43929f64 --- /dev/null +++ b/internal/service/networkmanager/testdata/VPCAttachment/tags/main_gen.tf @@ -0,0 +1,104 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + + tags = var.resource_tags +} + +# testAccVPCAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/networkmanager/testdata/VPCAttachment/tagsComputed1/main_gen.tf b/internal/service/networkmanager/testdata/VPCAttachment/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..9503b9511cc5 --- /dev/null +++ b/internal/service/networkmanager/testdata/VPCAttachment/tagsComputed1/main_gen.tf @@ -0,0 +1,108 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +# testAccVPCAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/VPCAttachment/tagsComputed2/main_gen.tf b/internal/service/networkmanager/testdata/VPCAttachment/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..34271cff82b7 --- /dev/null +++ b/internal/service/networkmanager/testdata/VPCAttachment/tagsComputed2/main_gen.tf @@ -0,0 +1,119 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +# testAccVPCAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/networkmanager/testdata/VPCAttachment/tags_defaults/main_gen.tf b/internal/service/networkmanager/testdata/VPCAttachment/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..b16001213ac7 --- /dev/null +++ b/internal/service/networkmanager/testdata/VPCAttachment/tags_defaults/main_gen.tf @@ -0,0 +1,115 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + + tags = var.resource_tags +} + +# testAccVPCAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/VPCAttachment/tags_ignore/main_gen.tf b/internal/service/networkmanager/testdata/VPCAttachment/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..0f326d45c681 --- /dev/null +++ b/internal/service/networkmanager/testdata/VPCAttachment/tags_ignore/main_gen.tf @@ -0,0 +1,124 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + + tags = var.resource_tags +} + +# testAccVPCAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +# acctest.ConfigVPCWithSubnetsIPv6(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + assign_generated_ipv6_cidr_block = true +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) + assign_ipv6_address_on_creation = true +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/networkmanager/testdata/tmpl/connect_attachment_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/connect_attachment_tags.gtpl new file mode 100644 index 000000000000..9ab13160324f --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/connect_attachment_tags.gtpl @@ -0,0 +1,84 @@ +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = "GRE" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +{{- template "tags" . }} +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = [aws_subnet.test[0].arn] + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +# testAccConnectAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +{{ template "acctest.ConfigVPCWithSubnetsIPv6" 2 }} \ No newline at end of file diff --git a/internal/service/networkmanager/testdata/tmpl/connect_peer_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/connect_peer_tags.gtpl new file mode 100644 index 000000000000..aec5fe7876a3 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/connect_peer_tags.gtpl @@ -0,0 +1,105 @@ +resource "aws_networkmanager_connect_peer" "test" { + connect_attachment_id = aws_networkmanager_connect_attachment.test.id + peer_address = local.peer_address + bgp_options { + peer_asn = local.peer_asn + } + inside_cidr_blocks = local.inside_cidr_blocks +{{- template "tags" . }} +} + +locals { + inside_cidr_blocks = ["169.254.10.0/29"] + peer_address = "1.1.1.1" + peer_asn = "65501" + protocol = "GRE" +} + +# testAccConnectPeerConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + inside_cidr_blocks = ["172.16.0.0/16"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + inside_cidr_blocks = ["172.16.0.0/18"] + } + } + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + attachment_policies { + rule_number = 1 + condition_logic = "or" + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + action { + association_method = "constant" + segment = "shared" + } + } +} + +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + tags = { + segment = "shared" + } +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_vpc_attachment.test.id + attachment_type = aws_networkmanager_vpc_attachment.test.attachment_type +} + +resource "aws_networkmanager_connect_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transport_attachment_id = aws_networkmanager_vpc_attachment.test.id + edge_location = aws_networkmanager_vpc_attachment.test.edge_location + options { + protocol = local.protocol + } + tags = { + segment = "shared" + } + depends_on = [ + "aws_networkmanager_attachment_accepter.test" + ] +} + +resource "aws_networkmanager_attachment_accepter" "test2" { + attachment_id = aws_networkmanager_connect_attachment.test.id + attachment_type = aws_networkmanager_connect_attachment.test.attachment_type +} + +{{ template "acctest.ConfigVPCWithSubnetsIPv6" 2 }} diff --git a/internal/service/networkmanager/testdata/tmpl/connection_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/connection_tags.gtpl new file mode 100644 index 000000000000..1b7ec789b400 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/connection_tags.gtpl @@ -0,0 +1,27 @@ +resource "aws_networkmanager_connection" "test" { + global_network_id = aws_networkmanager_global_network.test.id + device_id = aws_networkmanager_device.test1.id + connected_device_id = aws_networkmanager_device.test2.id +{{- template "tags" . }} +} + +# testAccConnectionBaseConfig + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_device" "test1" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id +} + +resource "aws_networkmanager_device" "test2" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + # Create one device at a time. + depends_on = [aws_networkmanager_device.test1] +} diff --git a/internal/service/networkmanager/testdata/tmpl/core_network_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/core_network_tags.gtpl new file mode 100644 index 000000000000..faf99abe7fa1 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/core_network_tags.gtpl @@ -0,0 +1,6 @@ +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +{{- template "tags" . }} +} + +resource "aws_networkmanager_global_network" "test" {} diff --git a/internal/service/networkmanager/testdata/tmpl/device_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/device_tags.gtpl new file mode 100644 index 000000000000..ca414b490676 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/device_tags.gtpl @@ -0,0 +1,6 @@ +resource "aws_networkmanager_device" "test" { + global_network_id = aws_networkmanager_global_network.test.id +{{- template "tags" . }} +} + +resource "aws_networkmanager_global_network" "test" {} diff --git a/internal/service/networkmanager/testdata/tmpl/dx_gateway_attachment_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/dx_gateway_attachment_tags.gtpl new file mode 100644 index 000000000000..3187b66d397c --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/dx_gateway_attachment_tags.gtpl @@ -0,0 +1,63 @@ +resource "aws_networkmanager_dx_gateway_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + direct_connect_gateway_arn = aws_dx_gateway.test.arn + edge_locations = [data.aws_region.current.region] +{{- template "tags" . }} +} + +# testAccDirectConnectGatewayAttachmentConfig_base + +resource "aws_dx_gateway" "test" { + name = var.rName + amazon_side_asn = 65000 +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} diff --git a/internal/service/networkmanager/testdata/tmpl/global_network_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/global_network_tags.gtpl new file mode 100644 index 000000000000..53b724e41543 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/global_network_tags.gtpl @@ -0,0 +1,3 @@ +resource "aws_networkmanager_global_network" "test" { +{{- template "tags" . }} +} diff --git a/internal/service/networkmanager/testdata/tmpl/link_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/link_tags.gtpl new file mode 100644 index 000000000000..894e8f3383d9 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/link_tags.gtpl @@ -0,0 +1,16 @@ +resource "aws_networkmanager_link" "test" { + global_network_id = aws_networkmanager_global_network.test.id + site_id = aws_networkmanager_site.test.id + + bandwidth { + download_speed = 50 + upload_speed = 10 + } +{{- template "tags" . }} +} + +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_global_network" "test" {} diff --git a/internal/service/networkmanager/testdata/tmpl/site_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/site_tags.gtpl new file mode 100644 index 000000000000..2c352a45bb33 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/site_tags.gtpl @@ -0,0 +1,6 @@ +resource "aws_networkmanager_site" "test" { + global_network_id = aws_networkmanager_global_network.test.id +{{- template "tags" . }} +} + +resource "aws_networkmanager_global_network" "test" {} diff --git a/internal/service/networkmanager/testdata/tmpl/site_to_site_vpn_attachment_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/site_to_site_vpn_attachment_tags.gtpl new file mode 100644 index 000000000000..0bb64c116877 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/site_to_site_vpn_attachment_tags.gtpl @@ -0,0 +1,78 @@ +resource "aws_networkmanager_site_to_site_vpn_attachment" "test" { + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpn_connection_arn = aws_vpn_connection.test.arn +{{- template "tags" . }} +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_site_to_site_vpn_attachment.test.id + attachment_type = aws_networkmanager_site_to_site_vpn_attachment.test.attachment_type +} + +# testAccSiteToSiteVPNAttachmentConfig_base + +resource "aws_customer_gateway" "test" { + bgp_asn = var.rBgpAsn + ip_address = var.rIPv4Address + type = "ipsec.1" + device_name = var.rName +} + +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.test.id + type = "ipsec.1" +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = true + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + condition_logic = "or" + + conditions { + type = "tag-value" + operator = "equals" + key = "segment" + value = "shared" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} diff --git a/internal/service/networkmanager/testdata/tmpl/transit_gateway_peering_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/transit_gateway_peering_tags.gtpl new file mode 100644 index 000000000000..47832776016b --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/transit_gateway_peering_tags.gtpl @@ -0,0 +1,46 @@ +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +{{- template "tags" . }} +} + +# testAccTransitGatewayPeeringConfig_base + +data "aws_region" "current" {} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} diff --git a/internal/service/networkmanager/testdata/tmpl/transit_gateway_route_table_attachment_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/transit_gateway_route_table_attachment_tags.gtpl new file mode 100644 index 000000000000..c6710d56ce71 --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/transit_gateway_route_table_attachment_tags.gtpl @@ -0,0 +1,69 @@ +resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { + peering_id = aws_networkmanager_transit_gateway_peering.test.id + transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn + + depends_on = [aws_ec2_transit_gateway_policy_table_association.test] +{{- template "tags" . }} +} + +resource "aws_networkmanager_attachment_accepter" "test" { + attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id + attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type +} + +# testAccTransitGatewayRouteTableAttachmentConfig_base + +resource "aws_networkmanager_transit_gateway_peering" "test" { + core_network_id = aws_networkmanager_core_network.test.id + transit_gateway_arn = aws_ec2_transit_gateway.test.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] +} + +resource "aws_ec2_transit_gateway_route_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_ec2_transit_gateway_policy_table_association" "test" { + transit_gateway_attachment_id = aws_networkmanager_transit_gateway_peering.test.transit_gateway_peering_attachment_id + transit_gateway_policy_table_id = aws_ec2_transit_gateway_policy_table.test.id +} + +# testAccTransitGatewayPeeringConfig_base + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_policy_table" "test" { + transit_gateway_id = aws_ec2_transit_gateway.test.id +} + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +data "aws_region" "current" {} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + # Don't overlap with default TGW ASN: 64512. + asn_ranges = ["65022-65534"] + + edge_locations { + location = data.aws_region.current.region + } + } + + segments { + name = "test" + } +} diff --git a/internal/service/networkmanager/testdata/tmpl/vpc_attachment_tags.gtpl b/internal/service/networkmanager/testdata/tmpl/vpc_attachment_tags.gtpl new file mode 100644 index 000000000000..5da89196fb9f --- /dev/null +++ b/internal/service/networkmanager/testdata/tmpl/vpc_attachment_tags.gtpl @@ -0,0 +1,60 @@ +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn +{{- template "tags" . }} +} + +# testAccVPCAttachmentConfig_base + +resource "aws_networkmanager_global_network" "test" {} + +resource "aws_networkmanager_core_network" "test" { + global_network_id = aws_networkmanager_global_network.test.id +} + +resource "aws_networkmanager_core_network_policy_attachment" "test" { + core_network_id = aws_networkmanager_core_network.test.id + policy_document = data.aws_networkmanager_core_network_policy_document.test.json +} + +data "aws_region" "current" {} + +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = false + asn_ranges = ["64512-64555"] + edge_locations { + location = data.aws_region.current.region + asn = 64512 + } + } + + segments { + name = "shared" + description = "SegmentForSharedServices" + require_attachment_acceptance = false + } + + segment_actions { + action = "share" + mode = "attachment-route" + segment = "shared" + share_with = ["*"] + } + + attachment_policies { + rule_number = 1 + + conditions { + type = "any" + } + + action { + association_method = "constant" + segment = "shared" + } + } +} + +{{ template "acctest.ConfigVPCWithSubnetsIPv6" 2 }} diff --git a/internal/service/networkmanager/transit_gateway_peering.go b/internal/service/networkmanager/transit_gateway_peering.go index 5d362b06cc54..688fc3866a16 100644 --- a/internal/service/networkmanager/transit_gateway_peering.go +++ b/internal/service/networkmanager/transit_gateway_peering.go @@ -26,6 +26,9 @@ import ( // @SDKResource("aws_networkmanager_transit_gateway_peering", name="Transit Gateway Peering") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.TransitGatewayPeering") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) func resourceTransitGatewayPeering() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTransitGatewayPeeringCreate, @@ -100,7 +103,6 @@ func resourceTransitGatewayPeeringCreate(ctx context.Context, d *schema.Resource TransitGatewayArn: aws.String(transitGatewayARN), } - log.Printf("[DEBUG] Creating Network Manager Transit Gateway Peering: %#v", input) output, err := conn.CreateTransitGatewayPeering(ctx, input) if err != nil { @@ -157,7 +159,6 @@ func resourceTransitGatewayPeeringDelete(ctx context.Context, d *schema.Resource var diags diag.Diagnostics conn := meta.(*conns.AWSClient).NetworkManagerClient(ctx) - log.Printf("[DEBUG] Deleting Network Manager Transit Gateway Peering: %s", d.Id()) _, err := conn.DeletePeering(ctx, &networkmanager.DeletePeeringInput{ PeeringId: aws.String(d.Id()), }) @@ -220,10 +221,12 @@ func statusTransitGatewayPeeringState(ctx context.Context, conn *networkmanager. func waitTransitGatewayPeeringCreated(ctx context.Context, conn *networkmanager.Client, id string, timeout time.Duration) (*awstypes.TransitGatewayPeering, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.PeeringStateCreating), - Target: enum.Slice(awstypes.PeeringStateAvailable), - Timeout: timeout, - Refresh: statusTransitGatewayPeeringState(ctx, conn, id), + Pending: enum.Slice(awstypes.PeeringStateCreating), + Target: enum.Slice(awstypes.PeeringStateAvailable), + Timeout: timeout, + Delay: 5 * time.Minute, + MinTimeout: 10 * time.Second, + Refresh: statusTransitGatewayPeeringState(ctx, conn, id), } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -239,10 +242,12 @@ func waitTransitGatewayPeeringCreated(ctx context.Context, conn *networkmanager. func waitTransitGatewayPeeringDeleted(ctx context.Context, conn *networkmanager.Client, id string, timeout time.Duration) (*awstypes.TransitGatewayPeering, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.PeeringStateDeleting), - Target: []string{}, - Timeout: timeout, - Refresh: statusTransitGatewayPeeringState(ctx, conn, id), + Pending: enum.Slice(awstypes.PeeringStateDeleting), + Target: []string{}, + Timeout: timeout, + Delay: 3 * time.Minute, + MinTimeout: 10 * time.Second, + Refresh: statusTransitGatewayPeeringState(ctx, conn, id), } outputRaw, err := stateConf.WaitForStateContext(ctx) diff --git a/internal/service/networkmanager/transit_gateway_peering_tags_gen_test.go b/internal/service/networkmanager/transit_gateway_peering_tags_gen_test.go new file mode 100644 index 000000000000..96ab45eacdce --- /dev/null +++ b/internal/service/networkmanager/transit_gateway_peering_tags_gen_test.go @@ -0,0 +1,2205 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerTransitGatewayPeering_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource TransitGatewayPeering does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource TransitGatewayPeering does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource TransitGatewayPeering does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource TransitGatewayPeering does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource TransitGatewayPeering does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayPeering_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayPeering + resourceName := "aws_networkmanager_transit_gateway_peering.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayPeering/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/transit_gateway_peering_test.go b/internal/service/networkmanager/transit_gateway_peering_test.go index a961f3ff30ca..a80b956431bb 100644 --- a/internal/service/networkmanager/transit_gateway_peering_test.go +++ b/internal/service/networkmanager/transit_gateway_peering_test.go @@ -20,16 +20,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func init() { - acctest.RegisterServiceErrorCheckFunc(names.NetworkManagerServiceID, testAccErrorCheckSkip) -} - -func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { - return acctest.ErrorCheckSkipMessagesMatches(t, - regexache.MustCompile(`Core Network edge location \([0-9a-z-]+\) not available`), - ) -} - func TestAccNetworkManagerTransitGatewayPeering_basic(t *testing.T) { ctx := acctest.Context(t) var v awstypes.TransitGatewayPeering @@ -91,52 +81,6 @@ func TestAccNetworkManagerTransitGatewayPeering_disappears(t *testing.T) { }) } -func TestAccNetworkManagerTransitGatewayPeering_tags(t *testing.T) { - ctx := acctest.Context(t) - var v awstypes.TransitGatewayPeering - resourceName := "aws_networkmanager_transit_gateway_peering.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTransitGatewayPeeringDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccTransitGatewayPeeringConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccTransitGatewayPeeringConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccTransitGatewayPeeringConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckTransitGatewayPeeringExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func testAccCheckTransitGatewayPeeringExists(ctx context.Context, n string, v *awstypes.TransitGatewayPeering) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -190,8 +134,6 @@ func testAccCheckTransitGatewayPeeringDestroy(ctx context.Context) resource.Test func testAccTransitGatewayPeeringConfig_base(rName string) string { return fmt.Sprintf(` -data "aws_region" "current" {} - resource "aws_ec2_transit_gateway" "test" { tags = { Name = %[1]q @@ -220,6 +162,8 @@ resource "aws_networkmanager_core_network" "test" { } } +data "aws_region" "current" {} + resource "aws_networkmanager_core_network_policy_attachment" "test" { core_network_id = aws_networkmanager_core_network.test.id policy_document = data.aws_networkmanager_core_network_policy_document.test.json @@ -248,38 +192,10 @@ resource "aws_networkmanager_transit_gateway_peering" "test" { core_network_id = aws_networkmanager_core_network.test.id transit_gateway_arn = aws_ec2_transit_gateway.test.arn - depends_on = [aws_ec2_transit_gateway_policy_table.test] + depends_on = [ + aws_ec2_transit_gateway_policy_table.test, + aws_networkmanager_core_network_policy_attachment.test, + ] } `) } - -func testAccTransitGatewayPeeringConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccTransitGatewayPeeringConfig_base(rName), fmt.Sprintf(` -resource "aws_networkmanager_transit_gateway_peering" "test" { - core_network_id = aws_networkmanager_core_network.test.id - transit_gateway_arn = aws_ec2_transit_gateway.test.arn - - tags = { - %[1]q = %[2]q - } - - depends_on = [aws_ec2_transit_gateway_policy_table.test] -} -`, tagKey1, tagValue1)) -} - -func testAccTransitGatewayPeeringConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccTransitGatewayPeeringConfig_base(rName), fmt.Sprintf(` -resource "aws_networkmanager_transit_gateway_peering" "test" { - core_network_id = aws_networkmanager_core_network.test.id - transit_gateway_arn = aws_ec2_transit_gateway.test.arn - - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } - - depends_on = [aws_ec2_transit_gateway_policy_table.test] -} -`, tagKey1, tagValue1, tagKey2, tagValue2)) -} diff --git a/internal/service/networkmanager/transit_gateway_route_table_attachment.go b/internal/service/networkmanager/transit_gateway_route_table_attachment.go index 5c01914a9144..450aba74ddf3 100644 --- a/internal/service/networkmanager/transit_gateway_route_table_attachment.go +++ b/internal/service/networkmanager/transit_gateway_route_table_attachment.go @@ -26,6 +26,9 @@ import ( // @SDKResource("aws_networkmanager_transit_gateway_route_table_attachment", name="Transit Gateway Route Table Attachment") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.TransitGatewayRouteTableAttachment") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) func resourceTransitGatewayRouteTableAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTransitGatewayRouteTableAttachmentCreate, diff --git a/internal/service/networkmanager/transit_gateway_route_table_attachment_tags_gen_test.go b/internal/service/networkmanager/transit_gateway_route_table_attachment_tags_gen_test.go new file mode 100644 index 000000000000..960bd1da54e4 --- /dev/null +++ b/internal/service/networkmanager/transit_gateway_route_table_attachment_tags_gen_test.go @@ -0,0 +1,2205 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource TransitGatewayRouteTableAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource TransitGatewayRouteTableAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource TransitGatewayRouteTableAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource TransitGatewayRouteTableAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource TransitGatewayRouteTableAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.TransitGatewayRouteTableAttachment + resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TransitGatewayRouteTableAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/transit_gateway_route_table_attachment_test.go b/internal/service/networkmanager/transit_gateway_route_table_attachment_test.go index dd61d6ee5b36..91a2de93cc0b 100644 --- a/internal/service/networkmanager/transit_gateway_route_table_attachment_test.go +++ b/internal/service/networkmanager/transit_gateway_route_table_attachment_test.go @@ -80,52 +80,6 @@ func TestAccNetworkManagerTransitGatewayRouteTableAttachment_disappears(t *testi }) } -func TestAccNetworkManagerTransitGatewayRouteTableAttachment_tags(t *testing.T) { - ctx := acctest.Context(t) - var v awstypes.TransitGatewayRouteTableAttachment - resourceName := "aws_networkmanager_transit_gateway_route_table_attachment.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckTransitGatewayRouteTableAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccTransitGatewayRouteTableAttachmentConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccTransitGatewayRouteTableAttachmentConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccTransitGatewayRouteTableAttachmentConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckTransitGatewayRouteTableAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func testAccCheckTransitGatewayRouteTableAttachmentExists(ctx context.Context, n string, v *awstypes.TransitGatewayRouteTableAttachment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -216,44 +170,3 @@ resource "aws_networkmanager_attachment_accepter" "test" { } `) } - -func testAccTransitGatewayRouteTableAttachmentConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccTransitGatewayRouteTableAttachmentConfig_base(rName), fmt.Sprintf(` -resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { - peering_id = aws_networkmanager_transit_gateway_peering.test.id - transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn - - tags = { - %[1]q = %[2]q - } - - depends_on = [aws_ec2_transit_gateway_policy_table_association.test] -} - -resource "aws_networkmanager_attachment_accepter" "test" { - attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id - attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type -} -`, tagKey1, tagValue1)) -} - -func testAccTransitGatewayRouteTableAttachmentConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccTransitGatewayRouteTableAttachmentConfig_base(rName), fmt.Sprintf(` -resource "aws_networkmanager_transit_gateway_route_table_attachment" "test" { - peering_id = aws_networkmanager_transit_gateway_peering.test.id - transit_gateway_route_table_arn = aws_ec2_transit_gateway_route_table.test.arn - - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } - - depends_on = [aws_ec2_transit_gateway_policy_table_association.test] -} - -resource "aws_networkmanager_attachment_accepter" "test" { - attachment_id = aws_networkmanager_transit_gateway_route_table_attachment.test.id - attachment_type = aws_networkmanager_transit_gateway_route_table_attachment.test.attachment_type -} -`, tagKey1, tagValue1, tagKey2, tagValue2)) -} diff --git a/internal/service/networkmanager/vpc_attachment.go b/internal/service/networkmanager/vpc_attachment.go index b727b461cb56..3c61c67d5381 100644 --- a/internal/service/networkmanager/vpc_attachment.go +++ b/internal/service/networkmanager/vpc_attachment.go @@ -28,6 +28,9 @@ import ( // @SDKResource("aws_networkmanager_vpc_attachment", name="VPC Attachment") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/networkmanager/types;awstypes;awstypes.VpcAttachment") +// @Testing(skipEmptyTags=true) +// @Testing(generator=false) func resourceVPCAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceVPCAttachmentCreate, @@ -68,6 +71,34 @@ func resourceVPCAttachment() *schema.Resource { } return nil }, + func(ctx context.Context, d *schema.ResourceDiff, meta any) error { + if d.Id() == "" { + return nil + } + + if !d.HasChange("options.0.dns_support") { + return nil + } + + if state := awstypes.AttachmentState(d.Get(names.AttrState).(string)); state == awstypes.AttachmentStatePendingAttachmentAcceptance { + return d.ForceNew("options.0.dns_support") + } + return nil + }, + func(ctx context.Context, d *schema.ResourceDiff, meta any) error { + if d.Id() == "" { + return nil + } + + if !d.HasChange("options.0.security_group_referencing_support") { + return nil + } + + if state := awstypes.AttachmentState(d.Get(names.AttrState).(string)); state == awstypes.AttachmentStatePendingAttachmentAcceptance { + return d.ForceNew("options.0.security_group_referencing_support") + } + return nil + }, ), Timeouts: &schema.ResourceTimeout{ @@ -105,16 +136,29 @@ func resourceVPCAttachment() *schema.Resource { "options": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "appliance_mode_support": { Type: schema.TypeBool, Optional: true, + Computed: true, + }, + "dns_support": { + Type: schema.TypeBool, + Optional: true, + Computed: true, }, "ipv6_support": { Type: schema.TypeBool, Optional: true, + Computed: true, + }, + "security_group_referencing_support": { + Type: schema.TypeBool, + Optional: true, + Computed: true, }, }, }, @@ -263,10 +307,11 @@ func resourceVPCAttachmentUpdate(ctx context.Context, d *schema.ResourceData, me if err != nil { return sdkdiag.AppendErrorf(diags, "updating Network Manager VPC Attachment (%s): %s", d.Id(), err) } + } - if _, err := waitVPCAttachmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Network Manager VPC Attachment (%s) update: %s", d.Id(), err) - } + // An update (via transparent tagging) to tags can put the attachment into PENDING_NETWORK_UPDATE state. + if _, err := waitVPCAttachmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Network Manager VPC Attachment (%s) update: %s", d.Id(), err) } return append(diags, resourceVPCAttachmentRead(ctx, d, meta)...) @@ -310,7 +355,7 @@ func resourceVPCAttachmentDelete(ctx context.Context, d *schema.ResourceData, me const ( timeout = 5 * time.Minute ) - _, err = tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteAttachment(ctx, &networkmanager.DeleteAttachmentInput{ AttachmentId: aws.String(d.Id()), }) @@ -454,7 +499,7 @@ func waitVPCAttachmentDeleted(ctx context.Context, conn *networkmanager.Client, func waitVPCAttachmentUpdated(ctx context.Context, conn *networkmanager.Client, id string, timeout time.Duration) (*awstypes.VpcAttachment, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.AttachmentStateUpdating), + Pending: enum.Slice(awstypes.AttachmentStatePendingNetworkUpdate, awstypes.AttachmentStateUpdating), Target: enum.Slice(awstypes.AttachmentStateAvailable, awstypes.AttachmentStatePendingTagAcceptance), Timeout: timeout, Refresh: statusVPCAttachment(ctx, conn, id), @@ -479,11 +524,19 @@ func expandVpcOptions(tfMap map[string]any) *awstypes.VpcOptions { // nosemgrep: apiObject := &awstypes.VpcOptions{} if v, ok := tfMap["appliance_mode_support"].(bool); ok { - apiObject.ApplianceModeSupport = v + apiObject.ApplianceModeSupport = aws.Bool(v) + } + + if v, ok := tfMap["dns_support"].(bool); ok { + apiObject.DnsSupport = aws.Bool(v) } if v, ok := tfMap["ipv6_support"].(bool); ok { - apiObject.Ipv6Support = v + apiObject.Ipv6Support = aws.Bool(v) + } + + if v, ok := tfMap["security_group_referencing_support"].(bool); ok { + apiObject.SecurityGroupReferencingSupport = aws.Bool(v) } return apiObject @@ -495,8 +548,10 @@ func flattenVpcOptions(apiObject *awstypes.VpcOptions) map[string]any { // nosem } tfMap := map[string]any{ - "appliance_mode_support": apiObject.ApplianceModeSupport, - "ipv6_support": apiObject.Ipv6Support, + "appliance_mode_support": aws.ToBool(apiObject.ApplianceModeSupport), + "dns_support": aws.ToBool(apiObject.DnsSupport), + "ipv6_support": aws.ToBool(apiObject.Ipv6Support), + "security_group_referencing_support": aws.ToBool(apiObject.SecurityGroupReferencingSupport), } return tfMap diff --git a/internal/service/networkmanager/vpc_attachment_tags_gen_test.go b/internal/service/networkmanager/vpc_attachment_tags_gen_test.go new file mode 100644 index 000000000000..5ed45e265918 --- /dev/null +++ b/internal/service/networkmanager/vpc_attachment_tags_gen_test.go @@ -0,0 +1,2205 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package networkmanager_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmanager/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkManagerVPCAttachment_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource VPCAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource VPCAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource VPCAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource VPCAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource VPCAttachment does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccNetworkManagerVPCAttachment_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/VPCAttachment/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/networkmanager/vpc_attachment_test.go b/internal/service/networkmanager/vpc_attachment_test.go index fa33b78ebba4..f8fa2437ab51 100644 --- a/internal/service/networkmanager/vpc_attachment_test.go +++ b/internal/service/networkmanager/vpc_attachment_test.go @@ -70,7 +70,9 @@ func TestAccNetworkManagerVPCAttachment_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "edge_location", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "options.#", "1"), resource.TestCheckResourceAttr(resourceName, "options.0.appliance_mode_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.dns_support", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "options.0.ipv6_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.security_group_referencing_support", acctest.CtTrue), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), resource.TestCheckResourceAttrPair(resourceName, names.AttrResourceARN, vpcResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "segment_name", "shared"), @@ -136,7 +138,9 @@ func TestAccNetworkManagerVPCAttachment_Attached_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "edge_location", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "options.#", "1"), resource.TestCheckResourceAttr(resourceName, "options.0.appliance_mode_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.dns_support", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "options.0.ipv6_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.security_group_referencing_support", acctest.CtTrue), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), resource.TestCheckResourceAttrPair(resourceName, names.AttrResourceARN, vpcResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "segment_name", "shared"), @@ -294,55 +298,6 @@ func TestAccNetworkManagerVPCAttachment_Attached_disappearsAccepter(t *testing.T }) } -func TestAccNetworkManagerVPCAttachment_tags(t *testing.T) { - const ( - resourceName = "aws_networkmanager_vpc_attachment.test" - ) - - ctx := acctest.Context(t) - var v awstypes.VpcAttachment - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccVPCAttachmentConfig_tags1(rName, "segment", "shared"), - Check: resource.ComposeTestCheckFunc( - testAccCheckVPCAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - ), - }, - { - Config: testAccVPCAttachmentConfig_tags2(rName, "segment", "shared", "Name", "test"), - Check: resource.ComposeTestCheckFunc( - testAccCheckVPCAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - resource.TestCheckResourceAttr(resourceName, "tags.Name", "test"), - ), - }, - { - Config: testAccVPCAttachmentConfig_tags1(rName, "segment", "shared"), - Check: resource.ComposeTestCheckFunc( - testAccCheckVPCAttachmentExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, "tags.segment", "shared"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccNetworkManagerVPCAttachment_update(t *testing.T) { const ( resourceName = "aws_networkmanager_vpc_attachment.test" @@ -514,6 +469,49 @@ func TestAccNetworkManagerVPCAttachment_Attached_update(t *testing.T) { } } +func TestAccNetworkManagerVPCAttachment_attachmentOptions(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.VpcAttachment + resourceName := "aws_networkmanager_vpc_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVPCAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVPCAttachmentConfig_attachmentOptions(rName, false, true, false, true, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "options.0.appliance_mode_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.dns_support", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "options.0.ipv6_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.security_group_referencing_support", acctest.CtTrue), + ), + }, + { + Config: testAccVPCAttachmentConfig_attachmentOptions(rName, true, false, true, false, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckVPCAttachmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "options.0.appliance_mode_support", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "options.0.dns_support", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "options.0.ipv6_support", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "options.0.security_group_referencing_support", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckVPCAttachmentExists(ctx context.Context, n string, v *awstypes.VpcAttachment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -606,39 +604,6 @@ resource "aws_networkmanager_attachment_accepter" "test" { `) } -func testAccVPCAttachmentConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose( - testAccVPCAttachmentConfig_base(rName, false), - fmt.Sprintf(` -resource "aws_networkmanager_vpc_attachment" "test" { - subnet_arns = [aws_subnet.test[0].arn] - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - vpc_arn = aws_vpc.test.arn - - tags = { - %[1]q = %[2]q - } -} -`, tagKey1, tagValue1)) -} - -func testAccVPCAttachmentConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose( - testAccVPCAttachmentConfig_base(rName, false), - fmt.Sprintf(` -resource "aws_networkmanager_vpc_attachment" "test" { - subnet_arns = [aws_subnet.test[0].arn] - core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id - vpc_arn = aws_vpc.test.arn - - tags = { - %[1]q = %[2]q - %[3]q = %[4]q - } -} -`, tagKey1, tagValue1, tagKey2, tagValue2)) -} - func testAccVPCAttachmentConfig_updates(rName string, requireAcceptance bool, nSubnets int, applianceModeSupport, ipv6Support bool) string { return acctest.ConfigCompose( testAccVPCAttachmentConfig_base(rName, requireAcceptance), @@ -649,8 +614,10 @@ resource "aws_networkmanager_vpc_attachment" "test" { vpc_arn = aws_vpc.test.arn options { - appliance_mode_support = %[3]t - ipv6_support = %[4]t + appliance_mode_support = %[3]t + dns_support = false + ipv6_support = %[4]t + security_group_referencing_support = false } } `, rName, nSubnets, applianceModeSupport, ipv6Support)) @@ -666,8 +633,10 @@ resource "aws_networkmanager_vpc_attachment" "test" { vpc_arn = aws_vpc.test.arn options { - appliance_mode_support = %[3]t - ipv6_support = %[4]t + appliance_mode_support = %[3]t + dns_support = false + ipv6_support = %[4]t + security_group_referencing_support = false } } @@ -680,35 +649,8 @@ resource "aws_networkmanager_attachment_accepter" "test" { func testAccVPCAttachmentConfig_base(rName string, requireAcceptance bool) string { return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), fmt.Sprintf(` -data "aws_region" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - assign_generated_ipv6_cidr_block = true - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - count = 2 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) - - ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, count.index) - assign_ipv6_address_on_creation = true - - tags = { - Name = %[1]q - } -} - resource "aws_networkmanager_global_network" "test" { tags = { Name = %[1]q @@ -728,6 +670,8 @@ resource "aws_networkmanager_core_network_policy_attachment" "test" { policy_document = data.aws_networkmanager_core_network_policy_document.test.json } +data "aws_region" "current" {} + data "aws_networkmanager_core_network_policy_document" "test" { core_network_configuration { vpn_ecmp_support = false @@ -766,3 +710,22 @@ data "aws_networkmanager_core_network_policy_document" "test" { } `, rName, requireAcceptance)) } + +func testAccVPCAttachmentConfig_attachmentOptions(rName string, applianceModeSupport, dnsSupport, ipv6Support, securityGroupReferencingSupport bool, requireAcceptance bool) string { + return acctest.ConfigCompose( + testAccVPCAttachmentConfig_base(rName, requireAcceptance), + fmt.Sprintf(` +resource "aws_networkmanager_vpc_attachment" "test" { + subnet_arns = aws_subnet.test[*].arn + core_network_id = aws_networkmanager_core_network_policy_attachment.test.core_network_id + vpc_arn = aws_vpc.test.arn + + options { + appliance_mode_support = %[2]t + dns_support = %[3]t + ipv6_support = %[4]t + security_group_referencing_support = %[5]t + } +} +`, rName, applianceModeSupport, dnsSupport, ipv6Support, securityGroupReferencingSupport)) +} diff --git a/internal/service/networkmonitor/monitor_tags_gen_test.go b/internal/service/networkmonitor/monitor_tags_gen_test.go index bc67ec23cc0c..728c009904e7 100644 --- a/internal/service/networkmonitor/monitor_tags_gen_test.go +++ b/internal/service/networkmonitor/monitor_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccNetworkMonitorMonitor_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccNetworkMonitorMonitor_tags(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -260,10 +261,11 @@ func TestAccNetworkMonitorMonitor_tags_null(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -309,10 +311,11 @@ func TestAccNetworkMonitorMonitor_tags_EmptyMap(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -388,10 +391,11 @@ func TestAccNetworkMonitorMonitor_tags_AddOnUpdate(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -477,10 +481,11 @@ func TestAccNetworkMonitorMonitor_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -615,10 +620,11 @@ func TestAccNetworkMonitorMonitor_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -704,10 +710,11 @@ func TestAccNetworkMonitorMonitor_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -884,10 +891,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1043,10 +1051,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccNetworkMonitorMonitor_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1218,10 +1227,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_overlapping(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1307,10 +1317,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_updateToProviderOnly(t *testi func TestAccNetworkMonitorMonitor_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1395,10 +1406,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_updateToResourceOnly(t *testi func TestAccNetworkMonitorMonitor_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1460,10 +1472,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_emptyResourceTag(t *testing.T func TestAccNetworkMonitorMonitor_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1517,10 +1530,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_emptyProviderOnlyTag(t *testi func TestAccNetworkMonitorMonitor_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1585,10 +1599,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_nullOverlappingResourceTag(t func TestAccNetworkMonitorMonitor_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1655,10 +1670,11 @@ func TestAccNetworkMonitorMonitor_tags_DefaultTags_nullNonOverlappingResourceTag func TestAccNetworkMonitorMonitor_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1709,10 +1725,11 @@ func TestAccNetworkMonitorMonitor_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1805,10 +1822,11 @@ func TestAccNetworkMonitorMonitor_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccNetworkMonitorMonitor_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -1891,10 +1909,11 @@ func TestAccNetworkMonitorMonitor_tags_ComputedTag_OnUpdate_Replace(t *testing.T func TestAccNetworkMonitorMonitor_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), @@ -2052,10 +2071,11 @@ func TestAccNetworkMonitorMonitor_tags_IgnoreTags_Overlap_DefaultTag(t *testing. func TestAccNetworkMonitorMonitor_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckMonitorDestroy(ctx), diff --git a/internal/service/networkmonitor/probe_tags_gen_test.go b/internal/service/networkmonitor/probe_tags_gen_test.go index 52422c47c607..87d5387b3b66 100644 --- a/internal/service/networkmonitor/probe_tags_gen_test.go +++ b/internal/service/networkmonitor/probe_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccNetworkMonitorProbe_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccNetworkMonitorProbe_tags(t *testing.T) { func TestAccNetworkMonitorProbe_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -260,10 +261,11 @@ func TestAccNetworkMonitorProbe_tags_null(t *testing.T) { func TestAccNetworkMonitorProbe_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -309,10 +311,11 @@ func TestAccNetworkMonitorProbe_tags_EmptyMap(t *testing.T) { func TestAccNetworkMonitorProbe_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -388,10 +391,11 @@ func TestAccNetworkMonitorProbe_tags_AddOnUpdate(t *testing.T) { func TestAccNetworkMonitorProbe_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -477,10 +481,11 @@ func TestAccNetworkMonitorProbe_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccNetworkMonitorProbe_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -615,10 +620,11 @@ func TestAccNetworkMonitorProbe_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccNetworkMonitorProbe_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -704,10 +710,11 @@ func TestAccNetworkMonitorProbe_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccNetworkMonitorProbe_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -884,10 +891,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccNetworkMonitorProbe_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1043,10 +1051,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccNetworkMonitorProbe_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1218,10 +1227,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_overlapping(t *testing.T) { func TestAccNetworkMonitorProbe_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1307,10 +1317,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_updateToProviderOnly(t *testing func TestAccNetworkMonitorProbe_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1395,10 +1406,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_updateToResourceOnly(t *testing func TestAccNetworkMonitorProbe_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1460,10 +1472,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccNetworkMonitorProbe_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1517,10 +1530,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_emptyProviderOnlyTag(t *testing func TestAccNetworkMonitorProbe_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1585,10 +1599,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_nullOverlappingResourceTag(t *t func TestAccNetworkMonitorProbe_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1655,10 +1670,11 @@ func TestAccNetworkMonitorProbe_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccNetworkMonitorProbe_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1709,10 +1725,11 @@ func TestAccNetworkMonitorProbe_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccNetworkMonitorProbe_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1805,10 +1822,11 @@ func TestAccNetworkMonitorProbe_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccNetworkMonitorProbe_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -1891,10 +1909,11 @@ func TestAccNetworkMonitorProbe_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccNetworkMonitorProbe_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), @@ -2052,10 +2071,11 @@ func TestAccNetworkMonitorProbe_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccNetworkMonitorProbe_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_probe.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), CheckDestroy: testAccCheckProbeDestroy(ctx), diff --git a/internal/service/networkmonitor/service_endpoint_resolver_gen.go b/internal/service/networkmonitor/service_endpoint_resolver_gen.go index a77f41113b62..783426de8c34 100644 --- a/internal/service/networkmonitor/service_endpoint_resolver_gen.go +++ b/internal/service/networkmonitor/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params networkmonitor.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up networkmonitor endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up networkmonitor endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/networkmonitor/service_endpoints_gen_test.go b/internal/service/networkmonitor/service_endpoints_gen_test.go index 8892c21a3b8e..c40216ac2731 100644 --- a/internal/service/networkmonitor/service_endpoints_gen_test.go +++ b/internal/service/networkmonitor/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/networkmonitor/service_package_gen.go b/internal/service/networkmonitor/service_package_gen.go index 9ae5254e9af6..504a3fe50ccb 100644 --- a/internal/service/networkmonitor/service_package_gen.go +++ b/internal/service/networkmonitor/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/networkmonitor" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -76,7 +75,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *networkmonitor.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/networkmonitor/tags_gen.go b/internal/service/networkmonitor/tags_gen.go index 1402a68c97ef..901d5688a4bf 100644 --- a/internal/service/networkmonitor/tags_gen.go +++ b/internal/service/networkmonitor/tags_gen.go @@ -3,8 +3,8 @@ package networkmonitor import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/networkmonitor" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *networkmonitor.Client, identifier strin output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).NetworkMonitorClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *networkmonitor.Client, identifier str _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *networkmonitor.Client, identifier str _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/notifications/service_endpoint_resolver_gen.go b/internal/service/notifications/service_endpoint_resolver_gen.go index b1ff2c981e41..69ab4c0349fc 100644 --- a/internal/service/notifications/service_endpoint_resolver_gen.go +++ b/internal/service/notifications/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params notifications.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up notifications endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up notifications endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/notifications/service_endpoints_gen_test.go b/internal/service/notifications/service_endpoints_gen_test.go index 2d0cad8190f4..35e260b1ef4f 100644 --- a/internal/service/notifications/service_endpoints_gen_test.go +++ b/internal/service/notifications/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/notifications/service_package_gen.go b/internal/service/notifications/service_package_gen.go index e2feabef781a..019f39a8c774 100644 --- a/internal/service/notifications/service_package_gen.go +++ b/internal/service/notifications/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/notifications" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -86,7 +85,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *notifications.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *notifications.Options) { diff --git a/internal/service/notifications/tags_gen.go b/internal/service/notifications/tags_gen.go index 46ea1e4d0678..8b1281addbda 100644 --- a/internal/service/notifications/tags_gen.go +++ b/internal/service/notifications/tags_gen.go @@ -3,8 +3,8 @@ package notifications import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/notifications" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *notifications.Client, identifier string output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).NotificationsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *notifications.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *notifications.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/notificationscontacts/email_contact.go b/internal/service/notificationscontacts/email_contact.go index 760140c4220b..d196161c5a96 100644 --- a/internal/service/notificationscontacts/email_contact.go +++ b/internal/service/notificationscontacts/email_contact.go @@ -19,11 +19,11 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -125,7 +125,7 @@ func (r *emailContactResource) Read(ctx context.Context, request resource.ReadRe arn := fwflex.StringValueFromFramework(ctx, data.ARN) output, err := findEmailContactByARN(ctx, conn, arn) - if tfresource.NotFound(err) { + if retry.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) response.State.RemoveResource(ctx) @@ -185,8 +185,7 @@ func findEmailContactByARN(ctx context.Context, conn *notificationscontacts.Clie if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: &input, + LastError: err, } } diff --git a/internal/service/notificationscontacts/email_contact_test.go b/internal/service/notificationscontacts/email_contact_test.go index 8d1159afb5b6..3408beb63473 100644 --- a/internal/service/notificationscontacts/email_contact_test.go +++ b/internal/service/notificationscontacts/email_contact_test.go @@ -11,7 +11,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/notificationscontacts" awstypes "github.com/aws/aws-sdk-go-v2/service/notificationscontacts/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -20,20 +19,19 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfnotificationscontacts "github.com/hashicorp/terraform-provider-aws/internal/service/notificationscontacts" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccNotificationsContactsEmailContact_basic(t *testing.T) { ctx := acctest.Context(t) var emailcontact awstypes.EmailContact - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rEmailAddress := acctest.RandomEmailAddress(acctest.RandomDomainName()) resourceName := "aws_notificationscontacts_email_contact.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.NotificationsContactsEndpointID) @@ -41,12 +39,12 @@ func TestAccNotificationsContactsEmailContact_basic(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.NotificationsContactsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEmailContactDestroy(ctx), + CheckDestroy: testAccCheckEmailContactDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEmailContactConfig_basic(rName, rEmailAddress), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckEmailContactExists(ctx, resourceName, &emailcontact), + testAccCheckEmailContactExists(ctx, t, resourceName, &emailcontact), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -74,11 +72,11 @@ func TestAccNotificationsContactsEmailContact_basic(t *testing.T) { func TestAccNotificationsContactsEmailContact_disappears(t *testing.T) { ctx := acctest.Context(t) var emailcontact awstypes.EmailContact - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rEmailAddress := acctest.RandomEmailAddress(acctest.RandomDomainName()) resourceName := "aws_notificationscontacts_email_contact.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.NotificationsContactsEndpointID) @@ -86,12 +84,12 @@ func TestAccNotificationsContactsEmailContact_disappears(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.NotificationsContactsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEmailContactDestroy(ctx), + CheckDestroy: testAccCheckEmailContactDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEmailContactConfig_basic(rName, rEmailAddress), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckEmailContactExists(ctx, resourceName, &emailcontact), + testAccCheckEmailContactExists(ctx, t, resourceName, &emailcontact), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfnotificationscontacts.ResourceEmailContact, resourceName), ), ExpectNonEmptyPlan: true, @@ -108,11 +106,11 @@ func TestAccNotificationsContactsEmailContact_disappears(t *testing.T) { func TestAccNotificationsContactsEmailContact_tags(t *testing.T) { ctx := acctest.Context(t) var v awstypes.EmailContact - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rEmailAddress := acctest.RandomEmailAddress(acctest.RandomDomainName()) resourceName := "aws_notificationscontacts_email_contact.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.NotificationsContactsEndpointID) @@ -120,12 +118,12 @@ func TestAccNotificationsContactsEmailContact_tags(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.NotificationsContactsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEmailContactDestroy(ctx), + CheckDestroy: testAccCheckEmailContactDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEmailContactConfig_tags1(rName, rEmailAddress, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckEmailContactExists(ctx, resourceName, &v), + testAccCheckEmailContactExists(ctx, t, resourceName, &v), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -148,7 +146,7 @@ func TestAccNotificationsContactsEmailContact_tags(t *testing.T) { { Config: testAccEmailContactConfig_tags2(rName, rEmailAddress, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckEmailContactExists(ctx, resourceName, &v), + testAccCheckEmailContactExists(ctx, t, resourceName, &v), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -165,7 +163,7 @@ func TestAccNotificationsContactsEmailContact_tags(t *testing.T) { { Config: testAccEmailContactConfig_tags1(rName, rEmailAddress, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckEmailContactExists(ctx, resourceName, &v), + testAccCheckEmailContactExists(ctx, t, resourceName, &v), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -182,9 +180,9 @@ func TestAccNotificationsContactsEmailContact_tags(t *testing.T) { }) } -func testAccCheckEmailContactDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckEmailContactDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).NotificationsContactsClient(ctx) + conn := acctest.ProviderMeta(ctx, t).NotificationsContactsClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_notificationscontacts_email_contact" { @@ -193,7 +191,7 @@ func testAccCheckEmailContactDestroy(ctx context.Context) resource.TestCheckFunc _, err := tfnotificationscontacts.FindEmailContactByARN(ctx, conn, rs.Primary.Attributes[names.AttrARN]) - if tfresource.NotFound(err) { + if retry.NotFound(err) { continue } @@ -208,14 +206,14 @@ func testAccCheckEmailContactDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckEmailContactExists(ctx context.Context, n string, v *awstypes.EmailContact) resource.TestCheckFunc { +func testAccCheckEmailContactExists(ctx context.Context, t *testing.T, n string, v *awstypes.EmailContact) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).NotificationsContactsClient(ctx) + conn := acctest.ProviderMeta(ctx, t).NotificationsContactsClient(ctx) output, err := tfnotificationscontacts.FindEmailContactByARN(ctx, conn, rs.Primary.Attributes[names.AttrARN]) @@ -230,7 +228,7 @@ func testAccCheckEmailContactExists(ctx context.Context, n string, v *awstypes.E } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).NotificationsContactsClient(ctx) + conn := acctest.ProviderMeta(ctx, t).NotificationsContactsClient(ctx) var input notificationscontacts.ListEmailContactsInput diff --git a/internal/service/notificationscontacts/service_endpoint_resolver_gen.go b/internal/service/notificationscontacts/service_endpoint_resolver_gen.go index dbe4a5021df3..dc4fc2815f14 100644 --- a/internal/service/notificationscontacts/service_endpoint_resolver_gen.go +++ b/internal/service/notificationscontacts/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params notificationscon }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up notificationscontacts endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up notificationscontacts endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/notificationscontacts/service_endpoints_gen_test.go b/internal/service/notificationscontacts/service_endpoints_gen_test.go index 3d0582d412fa..5c96e439d013 100644 --- a/internal/service/notificationscontacts/service_endpoints_gen_test.go +++ b/internal/service/notificationscontacts/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/notificationscontacts/service_package_gen.go b/internal/service/notificationscontacts/service_package_gen.go index 2609ad11b7d9..7fd9a7cd7c35 100644 --- a/internal/service/notificationscontacts/service_package_gen.go +++ b/internal/service/notificationscontacts/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/notificationscontacts" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -68,7 +67,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *notificationscontacts.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *notificationscontacts.Options) { diff --git a/internal/service/notificationscontacts/tags_gen.go b/internal/service/notificationscontacts/tags_gen.go index 6ecaa9c319c7..43b16e8c42c9 100644 --- a/internal/service/notificationscontacts/tags_gen.go +++ b/internal/service/notificationscontacts/tags_gen.go @@ -3,8 +3,8 @@ package notificationscontacts import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/notificationscontacts" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *notificationscontacts.Client, identifie output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).NotificationsContactsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *notificationscontacts.Client, identif _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *notificationscontacts.Client, identif _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/oam/service_endpoint_resolver_gen.go b/internal/service/oam/service_endpoint_resolver_gen.go index 52e81cc9d0e6..ff4f1b1ae605 100644 --- a/internal/service/oam/service_endpoint_resolver_gen.go +++ b/internal/service/oam/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params oam.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up oam endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up oam endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/oam/service_endpoints_gen_test.go b/internal/service/oam/service_endpoints_gen_test.go index 1fe84a6e0e7e..afc8af0d1514 100644 --- a/internal/service/oam/service_endpoints_gen_test.go +++ b/internal/service/oam/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/oam/service_package_gen.go b/internal/service/oam/service_package_gen.go index cd2150f5371f..b6b8838348b0 100644 --- a/internal/service/oam/service_package_gen.go +++ b/internal/service/oam/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/oam" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -107,7 +106,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *oam.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/oam/tags_gen.go b/internal/service/oam/tags_gen.go index 218b1ccdbcc8..5e031dd70c58 100644 --- a/internal/service/oam/tags_gen.go +++ b/internal/service/oam/tags_gen.go @@ -3,8 +3,8 @@ package oam import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/oam" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *oam.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ObservabilityAccessManagerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *oam.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *oam.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/odb/cloud_autonomous_vm_cluster.go b/internal/service/odb/cloud_autonomous_vm_cluster.go new file mode 100644 index 000000000000..02d0b7484ca8 --- /dev/null +++ b/internal/service/odb/cloud_autonomous_vm_cluster.go @@ -0,0 +1,742 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + "errors" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource("aws_odb_cloud_autonomous_vm_cluster", name="Cloud Autonomous Vm Cluster") +// @Tags(identifierAttribute="arn") +func newResourceCloudAutonomousVmCluster(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceCloudAutonomousVmCluster{} + r.SetDefaultCreateTimeout(24 * time.Hour) + r.SetDefaultUpdateTimeout(24 * time.Hour) + r.SetDefaultDeleteTimeout(24 * time.Hour) + + return r, nil +} + +const ( + ResNameCloudAutonomousVmCluster = "Cloud Autonomous Vm Cluster" +) + +type resourceCloudAutonomousVmCluster struct { + framework.ResourceWithModel[cloudAutonomousVmClusterResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +func (r *resourceCloudAutonomousVmCluster) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + status := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + licenseModel := fwtypes.StringEnumType[odbtypes.LicenseModel]() + computeModel := fwtypes.StringEnumType[odbtypes.ComputeModel]() + stringLengthBetween1And255Validator := []validator.String{ + stringvalidator.LengthBetween(1, 255), + } + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "Exadata infrastructure id. Changing this will force terraform to create new resource.", + }, + "autonomous_data_storage_percentage": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The progress of the current operation on the Autonomous VM cluster, as a percentage.", + }, + "autonomous_data_storage_size_in_tbs": schema.Float64Attribute{ + Required: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.RequiresReplace(), + }, + Description: "The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. Changing this will force terraform to create new resource.", + }, + "available_autonomous_data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + Description: "The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB.", + }, + "available_container_databases": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The number of Autonomous CDBs that you can create with the currently available storage.", + }, + "available_cpus": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The number of CPU cores available for allocation to Autonomous Databases", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModel, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The compute model of the Autonomous VM cluster: ECPU or OCPU.", + }, + "cpu_core_count": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The total number of CPU cores in the Autonomous VM cluster.", + }, + "cpu_core_count_per_node": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The number of CPU cores enabled per node in the Autonomous VM cluster.", + }, + "cpu_percentage": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The percentage of total CPU cores currently in use in the Autonomous VM cluster.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + CustomType: timetypes.RFC3339Type{}, + Description: "The date and time when the Autonomous VM cluster was created.", + }, + "data_storage_size_in_gbs": schema.Float64Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + Description: "The total data storage allocated to the Autonomous VM cluster, in GB.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + Description: "The total data storage allocated to the Autonomous VM cluster, in TB.", + }, + "odb_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: " The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB)", + }, + "db_servers": schema.SetAttribute{ + Required: true, + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Description: "The database servers in the Autonomous VM cluster. Changing this will force terraform to create new resource.", + }, + names.AttrDescription: schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The description of the Autonomous VM cluster.", + }, + names.AttrDisplayName: schema.StringAttribute{ + Required: true, + Validators: stringLengthBetween1And255Validator, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The display name of the Autonomous VM cluster. Changing this will force terraform to create new resource.", + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The domain name of the Autonomous VM cluster.", + }, + "exadata_storage_in_tbs_lowest_scaled_value": schema.Float64Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + Description: "The minimum value to which you can scale down the Exadata storage, in TB.", + }, + "hostname": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The hostname of the Autonomous VM cluster.", + }, + "is_mtls_enabled_vm_cluster": schema.BoolAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + boolplanmodifier.UseStateForUnknown(), + }, + Description: "Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. Changing this will force terraform to create new resource. ", + }, + "license_model": schema.StringAttribute{ + CustomType: licenseModel, + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE . Changing this will force terraform to create new resource.", + }, + "max_acds_lowest_scaled_value": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The minimum value to which you can scale down the maximum number of Autonomous CDBs.", + }, + "memory_per_oracle_compute_unit_in_gbs": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The amount of memory allocated per Oracle Compute Unit, in GB. Changing this will force terraform to create new resource.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The total amount of memory allocated to the Autonomous VM cluster, in gigabytes(GB).", + }, + "node_count": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The number of database server nodes in the Autonomous VM cluster.", + }, + "non_provisionable_autonomous_container_databases": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The number of Autonomous CDBs that can't be provisioned because of resource constraints.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The name of the OCI resource anchor associated with this Autonomous VM cluster.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The URL for accessing the OCI console page for this Autonomous VM cluster.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster.", + }, + "odb_network_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The unique identifier of the ODB network associated with this Autonomous VM Cluster. Changing this will force terraform to create new resource.", + }, + "percent_progress": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: `The progress of the current operation on the Autonomous VM cluster, as a percentage.`, + }, + "provisionable_autonomous_container_databases": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster.", + }, + "provisioned_autonomous_container_databases": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster.", + }, + "provisioned_cpus": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The number of CPUs provisioned in the Autonomous VM cluster.", + }, + "reclaimable_cpus": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases.", + }, + "reserved_cpus": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The number of CPU cores reserved for system operations and redundancy.", + }, + "scan_listener_port_non_tls": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. Changing this will force terraform to create new resource.", + }, + "scan_listener_port_tls": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The SCAN listener port for TLS (TCP) protocol. The default is 2484. Changing this will force terraform to create new resource.", + }, + "shape": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The shape of the Exadata infrastructure for the Autonomous VM cluster.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: status, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The status of the Autonomous VM cluster. Possible values include CREATING, AVAILABLE , UPDATING , DELETING , DELETED , FAILED ", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "Additional information about the current status of the Autonomous VM cluster.", + }, + "time_zone": schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The time zone of the Autonomous VM cluster. Changing this will force terraform to create new resource.", + }, + "total_container_databases": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The total number of Autonomous Container Databases that can be created with the allocated local storage. Changing this will force terraform to create new resource.", + }, + "time_ords_certificate_expires": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + CustomType: timetypes.RFC3339Type{}, + }, + "time_database_ssl_certificate_expires": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + CustomType: timetypes.RFC3339Type{}, + Description: "The expiration date and time of the database SSL certificate.", + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + "maintenance_window": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudAutonomousVmClusterMaintenanceWindowResourceModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + Description: "The maintenance window of the Autonomous VM cluster.", + + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "days_of_week": schema.SetAttribute{ + ElementType: fwtypes.NewObjectTypeOf[dayWeekNameAutonomousVmClusterMaintenanceWindowResourceModel](ctx), + Optional: true, + Description: "The days of the week when maintenance can be performed.", + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + }, + "hours_of_day": schema.SetAttribute{ + ElementType: types.Int64Type, + Optional: true, + Description: "The hours of the day when maintenance can be performed.", + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + }, + "lead_time_in_weeks": schema.Int32Attribute{ + Optional: true, + Description: "The lead time in weeks before the maintenance window.", + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + }, + "months": schema.SetAttribute{ + ElementType: fwtypes.NewObjectTypeOf[monthNameAutonomousVmClusterMaintenanceWindowResourceModel](ctx), + Optional: true, + Description: "The months when maintenance can be performed.", + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + }, + "preference": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[odbtypes.PreferenceType](), + Description: "The preference for the maintenance window scheduling.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "weeks_of_month": schema.SetAttribute{ + ElementType: types.Int64Type, + Optional: true, + Description: "Indicates whether to skip release updates during maintenance.", + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + } +} + +func (r *resourceCloudAutonomousVmCluster) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().ODBClient(ctx) + var plan cloudAutonomousVmClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.CreateCloudAutonomousVmClusterInput{ + Tags: getTagsIn(ctx), + } + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.CreateCloudAutonomousVmCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudAutonomousVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + if out == nil || out.CloudAutonomousVmClusterId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudAutonomousVmCluster, plan.DisplayName.ValueString(), nil), + errors.New("empty output").Error(), + ) + return + } + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + createdAVMC, err := waitCloudAutonomousVmClusterCreated(ctx, conn, aws.ToString(out.CloudAutonomousVmClusterId), createTimeout) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(out.CloudAutonomousVmClusterId))...) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameCloudAutonomousVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, createdAVMC, &plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceCloudAutonomousVmCluster) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().ODBClient(ctx) + + var state cloudAutonomousVmClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findCloudAutonomousVmClusterByID(ctx, conn, state.CloudAutonomousVmClusterId.ValueString()) + + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameCloudAutonomousVmCluster, state.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceCloudAutonomousVmCluster) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().ODBClient(ctx) + + var state cloudAutonomousVmClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.DeleteCloudAutonomousVmClusterInput{ + CloudAutonomousVmClusterId: state.CloudAutonomousVmClusterId.ValueStringPointer(), + } + + _, err := conn.DeleteCloudAutonomousVmCluster(ctx, &input) + + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionDeleting, ResNameCloudAutonomousVmCluster, state.CloudAutonomousVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitCloudAutonomousVmClusterDeleted(ctx, conn, state.CloudAutonomousVmClusterId.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForDeletion, ResNameCloudAutonomousVmCluster, state.CloudAutonomousVmClusterId.ValueString(), err), + err.Error(), + ) + return + } +} + +func waitCloudAutonomousVmClusterCreated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudAutonomousVmCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusProvisioning), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusCloudAutonomousVmCluster(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudAutonomousVmCluster); ok { + return out, err + } + + return nil, err +} + +func waitCloudAutonomousVmClusterDeleted(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudAutonomousVmCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusTerminating), + Target: []string{}, + Refresh: statusCloudAutonomousVmCluster(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudAutonomousVmCluster); ok { + return out, err + } + + return nil, err +} + +func statusCloudAutonomousVmCluster(ctx context.Context, conn *odb.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := findCloudAutonomousVmClusterByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func findCloudAutonomousVmClusterByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudAutonomousVmCluster, error) { + input := odb.GetCloudAutonomousVmClusterInput{ + CloudAutonomousVmClusterId: aws.String(id), + } + out, err := conn.GetCloudAutonomousVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + + if out == nil || out.CloudAutonomousVmCluster == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.CloudAutonomousVmCluster, nil +} + +type cloudAutonomousVmClusterResourceModel struct { + framework.WithRegionModel + CloudAutonomousVmClusterArn types.String `tfsdk:"arn"` + CloudAutonomousVmClusterId types.String `tfsdk:"id"` + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + AutonomousDataStoragePercentage types.Float32 `tfsdk:"autonomous_data_storage_percentage"` + AutonomousDataStorageSizeInTBs types.Float64 `tfsdk:"autonomous_data_storage_size_in_tbs"` + AvailableAutonomousDataStorageSizeInTBs types.Float64 `tfsdk:"available_autonomous_data_storage_size_in_tbs"` + AvailableContainerDatabases types.Int32 `tfsdk:"available_container_databases"` + AvailableCpus types.Float32 `tfsdk:"available_cpus"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + CpuCoreCountPerNode types.Int32 `tfsdk:"cpu_core_count_per_node"` + CpuPercentage types.Float32 `tfsdk:"cpu_percentage"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at" ` + DataStorageSizeInGBs types.Float64 `tfsdk:"data_storage_size_in_gbs"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"odb_node_storage_size_in_gbs"` + DbServers fwtypes.SetValueOf[types.String] `tfsdk:"db_servers"` + Description types.String `tfsdk:"description"` + DisplayName types.String `tfsdk:"display_name"` + Domain types.String `tfsdk:"domain"` + ExadataStorageInTBsLowestScaledValue types.Float64 `tfsdk:"exadata_storage_in_tbs_lowest_scaled_value"` + Hostname types.String `tfsdk:"hostname"` + IsMtlsEnabledVmCluster types.Bool `tfsdk:"is_mtls_enabled_vm_cluster"` + LicenseModel fwtypes.StringEnum[odbtypes.LicenseModel] `tfsdk:"license_model"` + MaxAcdsLowestScaledValue types.Int32 `tfsdk:"max_acds_lowest_scaled_value"` + MemoryPerOracleComputeUnitInGBs types.Int32 `tfsdk:"memory_per_oracle_compute_unit_in_gbs"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + NodeCount types.Int32 `tfsdk:"node_count"` + NonProvisionableAutonomousContainerDatabases types.Int32 `tfsdk:"non_provisionable_autonomous_container_databases"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + Ocid types.String `tfsdk:"ocid"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + ProvisionableAutonomousContainerDatabases types.Int32 `tfsdk:"provisionable_autonomous_container_databases"` + ProvisionedAutonomousContainerDatabases types.Int32 `tfsdk:"provisioned_autonomous_container_databases"` + ProvisionedCpus types.Float32 `tfsdk:"provisioned_cpus"` + ReclaimableCpus types.Float32 `tfsdk:"reclaimable_cpus"` + ReservedCpus types.Float32 `tfsdk:"reserved_cpus"` + ScanListenerPortNonTls types.Int32 `tfsdk:"scan_listener_port_non_tls"` + ScanListenerPortTls types.Int32 `tfsdk:"scan_listener_port_tls"` + Shape types.String `tfsdk:"shape"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + TimeZone types.String `tfsdk:"time_zone"` + TotalContainerDatabases types.Int32 `tfsdk:"total_container_databases"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + TimeOrdsCertificateExpires timetypes.RFC3339 `tfsdk:"time_ords_certificate_expires"` + TimeDatabaseSslCertificateExpires timetypes.RFC3339 `tfsdk:"time_database_ssl_certificate_expires"` + MaintenanceWindow fwtypes.ListNestedObjectValueOf[cloudAutonomousVmClusterMaintenanceWindowResourceModel] `tfsdk:"maintenance_window" ` +} + +type cloudAutonomousVmClusterMaintenanceWindowResourceModel struct { + DaysOfWeek fwtypes.SetNestedObjectValueOf[dayWeekNameAutonomousVmClusterMaintenanceWindowResourceModel] `tfsdk:"days_of_week"` + HoursOfDay fwtypes.SetValueOf[types.Int64] `tfsdk:"hours_of_day"` + LeadTimeInWeeks types.Int32 `tfsdk:"lead_time_in_weeks"` + Months fwtypes.SetNestedObjectValueOf[monthNameAutonomousVmClusterMaintenanceWindowResourceModel] `tfsdk:"months"` + Preference fwtypes.StringEnum[odbtypes.PreferenceType] `tfsdk:"preference"` + WeeksOfMonth fwtypes.SetValueOf[types.Int64] `tfsdk:"weeks_of_month"` +} + +type dayWeekNameAutonomousVmClusterMaintenanceWindowResourceModel struct { + Name fwtypes.StringEnum[odbtypes.DayOfWeekName] `tfsdk:"name"` +} + +type monthNameAutonomousVmClusterMaintenanceWindowResourceModel struct { + Name fwtypes.StringEnum[odbtypes.MonthName] `tfsdk:"name"` +} diff --git a/internal/service/odb/cloud_autonomous_vm_cluster_data_source.go b/internal/service/odb/cloud_autonomous_vm_cluster_data_source.go new file mode 100644 index 000000000000..d21456c05544 --- /dev/null +++ b/internal/service/odb/cloud_autonomous_vm_cluster_data_source.go @@ -0,0 +1,346 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_cloud_autonomous_vm_cluster", name="Cloud Autonomous Vm Cluster") +// @Tags(identifierAttribute="arn") +func newDataSourceCloudAutonomousVmCluster(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudAutonomousVmCluster{}, nil +} + +const ( + DSNameCloudAutonomousVmCluster = "Cloud Autonomous Vm Cluster Data Source" +) + +type dataSourceCloudAutonomousVmCluster struct { + framework.DataSourceWithModel[cloudAutonomousVmClusterDataSourceModel] +} + +func (d *dataSourceCloudAutonomousVmCluster) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + status := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + licenseModel := fwtypes.StringEnumType[odbtypes.LicenseModel]() + computeModel := fwtypes.StringEnumType[odbtypes.ComputeModel]() + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + + names.AttrID: schema.StringAttribute{ + Required: true, + Description: "Unique ID of the Autonomous VM cluster.", + }, + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Computed: true, + Description: "Cloud exadata infrastructure id associated with this cloud autonomous VM cluster.", + }, + "autonomous_data_storage_percentage": schema.Float32Attribute{ + Computed: true, + Description: "The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster.", + }, + "autonomous_data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB.", + }, + "available_autonomous_data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB.", + }, + "available_container_databases": schema.Int32Attribute{ + Computed: true, + Description: "The number of Autonomous CDBs that you can create with the currently available storage.", + }, + "available_cpus": schema.Float32Attribute{ + Computed: true, + Description: "The number of CPU cores available for allocation to Autonomous Databases.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModel, + Computed: true, + Description: " The compute model of the Autonomous VM cluster: ECPU or OCPU.", + }, + "cpu_core_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores in the Autonomous VM cluster.", + }, + "cpu_core_count_per_node": schema.Int32Attribute{ + Computed: true, + Description: "The number of CPU cores enabled per node in the Autonomous VM cluster.", + }, + "cpu_percentage": schema.Float32Attribute{ + Computed: true, + Description: "he percentage of total CPU cores currently in use in the Autonomous VM cluster.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The date and time when the Autonomous VM cluster was created.", + }, + "data_storage_size_in_gbs": schema.Float64Attribute{ + Computed: true, + Description: "The total data storage allocated to the Autonomous VM cluster, in GB.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The total data storage allocated to the Autonomous VM cluster, in TB.", + }, + "odb_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB).", + }, + "db_servers": schema.SetAttribute{ + Computed: true, + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Description: "The list of database servers associated with the Autonomous VM cluster.", + }, + names.AttrDescription: schema.StringAttribute{ + Computed: true, + Description: "The user-provided description of the Autonomous VM cluster.", + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + Description: "The display name of the Autonomous VM cluster.", + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + Description: "The domain name of the Autonomous VM cluster.", + }, + "exadata_storage_in_tbs_lowest_scaled_value": schema.Float64Attribute{ + Computed: true, + Description: "The minimum value to which you can scale down the Exadata storage, in TB.", + }, + "hostname": schema.StringAttribute{ + Computed: true, + Description: "The hostname of the Autonomous VM cluster.", + }, + "is_mtls_enabled_vm_cluster": schema.BoolAttribute{ + Computed: true, + Description: " Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster.", + }, + "license_model": schema.StringAttribute{ + CustomType: licenseModel, + Computed: true, + Description: "The Oracle license model that applies to the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE .", + }, + "max_acds_lowest_scaled_value": schema.Int32Attribute{ + Computed: true, + Description: "The minimum value to which you can scale down the maximum number of Autonomous CDBs.", + }, + "memory_per_oracle_compute_unit_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of memory allocated per Oracle Compute Unit, in GB.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of memory allocated to the Autonomous VM cluster, in gigabytes (GB).", + }, + "node_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of database server nodes in the Autonomous VM cluster.", + }, + "non_provisionable_autonomous_container_databases": schema.Int32Attribute{ + Computed: true, + Description: "The number of Autonomous CDBs that can't be provisioned because of resource constraints.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor associated with this Autonomous VM cluster.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + Description: "The URL for accessing the OCI console page for this Autonomous VM cluster.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster.", + }, + "odb_network_id": schema.StringAttribute{ + Computed: true, + Description: "The unique identifier of the ODB network associated with this Autonomous VM cluster.", + }, + "percent_progress": schema.Float32Attribute{ + Computed: true, + Description: "The progress of the current operation on the Autonomous VM cluster, as a percentage.", + }, + "provisionable_autonomous_container_databases": schema.Int32Attribute{ + Computed: true, + Description: "The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster.", + }, + "provisioned_autonomous_container_databases": schema.Int32Attribute{ + Computed: true, + Description: "The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster.", + }, + "provisioned_cpus": schema.Float32Attribute{ + Computed: true, + Description: "The number of CPU cores currently provisioned in the Autonomous VM cluster.", + }, + "reclaimable_cpus": schema.Float32Attribute{ + Computed: true, + Description: "The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases.", + }, + "reserved_cpus": schema.Float32Attribute{ + Computed: true, + Description: "The number of CPU cores reserved for system operations and redundancy.", + }, + "scan_listener_port_non_tls": schema.Int32Attribute{ + Computed: true, + Description: "The SCAN listener port for non-TLS (TCP) protocol. The default is 1521.", + }, + "scan_listener_port_tls": schema.Int32Attribute{ + Computed: true, + Description: "The SCAN listener port for TLS (TCP) protocol. The default is 2484.", + }, + "shape": schema.StringAttribute{ + Computed: true, + Description: "The shape of the Exadata infrastructure for the Autonomous VM cluster.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: status, + Computed: true, + Description: "The status of the Autonomous VM cluster.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the current status of the Autonomous VM cluster.", + }, + "time_database_ssl_certificate_expires": schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The expiration date and time of the database SSL certificate.", + }, + "time_ords_certificate_expires": schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The expiration date and time of the Oracle REST Data Services (ORDS)certificate .", + }, + "time_zone": schema.StringAttribute{ + Computed: true, + Description: "The time zone of the Autonomous VM cluster.", + }, + "total_container_databases": schema.Int32Attribute{ + Computed: true, + Description: "The total number of Autonomous Container Databases that can be created with the allocated local storage.", + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + "maintenance_window": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudAutonomousVmClusterMaintenanceWindowDataSourceModel](ctx), + Description: "The maintenance window for the Autonomous VM cluster.", + }, + }, + } +} + +func (d *dataSourceCloudAutonomousVmCluster) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data cloudAutonomousVmClusterDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.GetCloudAutonomousVmClusterInput{ + CloudAutonomousVmClusterId: data.CloudAutonomousVmClusterId.ValueStringPointer(), + } + + out, err := conn.GetCloudAutonomousVmCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudAutonomousVmCluster, data.CloudAutonomousVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out.CloudAutonomousVmCluster, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type cloudAutonomousVmClusterDataSourceModel struct { + framework.WithRegionModel + CloudAutonomousVmClusterArn types.String `tfsdk:"arn"` + CloudAutonomousVmClusterId types.String `tfsdk:"id"` + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + AutonomousDataStoragePercentage types.Float32 `tfsdk:"autonomous_data_storage_percentage"` + AutonomousDataStorageSizeInTBs types.Float64 `tfsdk:"autonomous_data_storage_size_in_tbs"` + AvailableAutonomousDataStorageSizeInTBs types.Float64 `tfsdk:"available_autonomous_data_storage_size_in_tbs"` + AvailableContainerDatabases types.Int32 `tfsdk:"available_container_databases"` + AvailableCpus types.Float32 `tfsdk:"available_cpus"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + CpuCoreCountPerNode types.Int32 `tfsdk:"cpu_core_count_per_node"` + CpuPercentage types.Float32 `tfsdk:"cpu_percentage"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at" ` + DataStorageSizeInGBs types.Float64 `tfsdk:"data_storage_size_in_gbs"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"odb_node_storage_size_in_gbs"` + DbServers fwtypes.SetValueOf[types.String] `tfsdk:"db_servers"` + Description types.String `tfsdk:"description"` + DisplayName types.String `tfsdk:"display_name"` + Domain types.String `tfsdk:"domain"` + ExadataStorageInTBsLowestScaledValue types.Float64 `tfsdk:"exadata_storage_in_tbs_lowest_scaled_value"` + Hostname types.String `tfsdk:"hostname"` + IsMtlsEnabledVmCluster types.Bool `tfsdk:"is_mtls_enabled_vm_cluster"` + LicenseModel fwtypes.StringEnum[odbtypes.LicenseModel] `tfsdk:"license_model"` + MaxAcdsLowestScaledValue types.Int32 `tfsdk:"max_acds_lowest_scaled_value"` + MemoryPerOracleComputeUnitInGBs types.Int32 `tfsdk:"memory_per_oracle_compute_unit_in_gbs"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + NodeCount types.Int32 `tfsdk:"node_count"` + NonProvisionableAutonomousContainerDatabases types.Int32 `tfsdk:"non_provisionable_autonomous_container_databases"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + Ocid types.String `tfsdk:"ocid"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + ProvisionableAutonomousContainerDatabases types.Int32 `tfsdk:"provisionable_autonomous_container_databases"` + ProvisionedAutonomousContainerDatabases types.Int32 `tfsdk:"provisioned_autonomous_container_databases"` + ProvisionedCpus types.Float32 `tfsdk:"provisioned_cpus"` + ReclaimableCpus types.Float32 `tfsdk:"reclaimable_cpus"` + ReservedCpus types.Float32 `tfsdk:"reserved_cpus"` + ScanListenerPortNonTls types.Int32 `tfsdk:"scan_listener_port_non_tls"` + ScanListenerPortTls types.Int32 `tfsdk:"scan_listener_port_tls"` + Shape types.String `tfsdk:"shape"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + TimeDatabaseSslCertificateExpires timetypes.RFC3339 `tfsdk:"time_database_ssl_certificate_expires"` + TimeOrdsCertificateExpires timetypes.RFC3339 `tfsdk:"time_ords_certificate_expires" ` + TimeZone types.String `tfsdk:"time_zone"` + TotalContainerDatabases types.Int32 `tfsdk:"total_container_databases"` + MaintenanceWindow fwtypes.ListNestedObjectValueOf[cloudAutonomousVmClusterMaintenanceWindowDataSourceModel] `tfsdk:"maintenance_window" ` + Tags tftags.Map `tfsdk:"tags"` +} +type cloudAutonomousVmClusterMaintenanceWindowDataSourceModel struct { + DaysOfWeek fwtypes.SetNestedObjectValueOf[dayWeekNameAutonomousVmClusterMaintenanceWindowDataSourceModel] `tfsdk:"days_of_week"` + HoursOfDay fwtypes.SetValueOf[types.Int64] `tfsdk:"hours_of_day"` + LeadTimeInWeeks types.Int32 `tfsdk:"lead_time_in_weeks"` + Months fwtypes.SetNestedObjectValueOf[monthNameAutonomousVmClusterMaintenanceWindowDataSourceModel] `tfsdk:"months"` + Preference fwtypes.StringEnum[odbtypes.PreferenceType] `tfsdk:"preference"` + WeeksOfMonth fwtypes.SetValueOf[types.Int64] `tfsdk:"weeks_of_month"` +} +type dayWeekNameAutonomousVmClusterMaintenanceWindowDataSourceModel struct { + Name fwtypes.StringEnum[odbtypes.DayOfWeekName] `tfsdk:"name"` +} + +type monthNameAutonomousVmClusterMaintenanceWindowDataSourceModel struct { + Name fwtypes.StringEnum[odbtypes.MonthName] `tfsdk:"name"` +} diff --git a/internal/service/odb/cloud_autonomous_vm_cluster_data_source_test.go b/internal/service/odb/cloud_autonomous_vm_cluster_data_source_test.go new file mode 100644 index 000000000000..7e33f69285d0 --- /dev/null +++ b/internal/service/odb/cloud_autonomous_vm_cluster_data_source_test.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type autonomousVMClusterDSTest struct { + exaInfraDisplayNamePrefix string + odbNetDisplayNamePrefix string + autonomousVmClusterDisplayNamePrefix string +} + +var autonomousVMClusterDSTestEntity = autonomousVMClusterDSTest{ + exaInfraDisplayNamePrefix: "Ofake-exa", + odbNetDisplayNamePrefix: "odb-net", + autonomousVmClusterDisplayNamePrefix: "Ofake-avmc", +} + +func TestAccODBCloudAutonomousVmClusterDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + avmcResource := "aws_odb_cloud_autonomous_vm_cluster.test" + avmcDataSource := "data.aws_odb_cloud_autonomous_vm_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + autonomousVMClusterDSTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: autonomousVMClusterDSTestEntity.testAccCheckCloudAutonomousVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: autonomousVMClusterDSTestEntity.avmcBasic(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(avmcResource, names.AttrID, avmcDataSource, names.AttrID), + ), + }, + }, + }) +} + +func (autonomousVMClusterDSTest) testAccCheckCloudAutonomousVmClusterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_autonomous_vm_cluster" { + continue + } + + _, err := tfodb.FindCloudAutonomousVmClusterByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudAutonomousVmCluster, rs.Primary.ID, err) + } + + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudAutonomousVmCluster, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} +func (autonomousVMClusterDSTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudAutonomousVmClustersInput{} + _, err := conn.ListCloudAutonomousVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (autonomousVMClusterDSTest) avmcBasic() string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.exaInfraDisplayNamePrefix) + odbNetworkDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.odbNetDisplayNamePrefix) + avmcDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.autonomousVmClusterDisplayNamePrefix) + domain := acctest.RandomDomainName() + emailAddress := acctest.RandomEmailAddress(domain) + exaInfraRes := autonomousVMClusterDSTestEntity.exaInfra(exaInfraDisplayName, emailAddress) + odbNetRes := autonomousVMClusterDSTestEntity.oracleDBNetwork(odbNetworkDisplayName) + res := fmt.Sprintf(` +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_autonomous_vm_cluster" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + odb_network_id = aws_odb_network.test.id + display_name = %[3]q + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + preference = "NO_PREFERENCE" + } + tags = { + "env" = "dev" + } + +} + + +data "aws_odb_cloud_autonomous_vm_cluster" "test" { + id = aws_odb_cloud_autonomous_vm_cluster.test.id + +} +`, exaInfraRes, odbNetRes, avmcDisplayName) + + return res +} + +func (autonomousVMClusterDSTest) oracleDBNetwork(odbNetName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + + +`, odbNetName) + return networkRes +} + +func (autonomousVMClusterDSTest) exaInfra(exaInfraName, emailAddress string) string { + exaInfraRes := fmt.Sprintf(` + + + + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + customer_contacts_to_send_to_oci = ["%[2]s"] + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} + + +`, exaInfraName, emailAddress) + return exaInfraRes +} diff --git a/internal/service/odb/cloud_autonomous_vm_cluster_test.go b/internal/service/odb/cloud_autonomous_vm_cluster_test.go new file mode 100644 index 000000000000..31e3202f4272 --- /dev/null +++ b/internal/service/odb/cloud_autonomous_vm_cluster_test.go @@ -0,0 +1,481 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type autonomousVMClusterResourceTest struct { + exaInfraDisplayNamePrefix string + odbNetDisplayNamePrefix string + autonomousVmClusterDisplayNamePrefix string +} + +var autonomousVMClusterResourceTestEntity = autonomousVMClusterResourceTest{ + exaInfraDisplayNamePrefix: "Ofake-exa", + odbNetDisplayNamePrefix: "oracleDB-net", + autonomousVmClusterDisplayNamePrefix: "Ofake-avmc", +} + +func TestAccODBCloudAutonomousVmCluster_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudAVMC odbtypes.CloudAutonomousVmCluster + + resourceName := "aws_odb_cloud_autonomous_vm_cluster.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + autonomousVMClusterResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: autonomousVMClusterResourceTestEntity.testAccCheckCloudAutonomousVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: autonomousVMClusterResourceTestEntity.avmcBasic(), + Check: resource.ComposeAggregateTestCheckFunc( + autonomousVMClusterResourceTestEntity.checkCloudAutonomousVmClusterExists(ctx, resourceName, &cloudAVMC), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudAutonomousVmCluster_withAllParams(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudAVMC odbtypes.CloudAutonomousVmCluster + + resourceName := "aws_odb_cloud_autonomous_vm_cluster.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + //acctest.PreCheckPartitionHasService(t, names.ODBServiceID) + autonomousVMClusterResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: autonomousVMClusterResourceTestEntity.testAccCheckCloudAutonomousVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: autonomousVMClusterResourceTestEntity.avmcAllParamsConfig(), + Check: resource.ComposeAggregateTestCheckFunc( + autonomousVMClusterResourceTestEntity.checkCloudAutonomousVmClusterExists(ctx, resourceName, &cloudAVMC), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudAutonomousVmCluster_tagging(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var avmc1, avmc2 odbtypes.CloudAutonomousVmCluster + resourceName := "aws_odb_cloud_autonomous_vm_cluster.test" + withoutTag, withTag := autonomousVMClusterResourceTestEntity.avmcNoTagWithTag() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + autonomousVMClusterResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: autonomousVMClusterResourceTestEntity.testAccCheckCloudAutonomousVmClusterDestroy(ctx), + + Steps: []resource.TestStep{ + { + Config: withoutTag, + + Check: resource.ComposeAggregateTestCheckFunc( + autonomousVMClusterResourceTestEntity.checkCloudAutonomousVmClusterExists(ctx, resourceName, &avmc1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: withTag, + Check: resource.ComposeAggregateTestCheckFunc( + autonomousVMClusterResourceTestEntity.checkCloudAutonomousVmClusterExists(ctx, resourceName, &avmc2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(avmc1.CloudAutonomousVmClusterId), *(avmc2.CloudAutonomousVmClusterId)) != 0 { + return errors.New("shouldn't create a new autonomous vm cluster") + } + return nil + }), + ), + }, + }, + }) +} + +func TestAccODBCloudAutonomousVmCluster_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudautonomousvmcluster odbtypes.CloudAutonomousVmCluster + resourceName := "aws_odb_cloud_autonomous_vm_cluster.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + autonomousVMClusterResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: autonomousVMClusterResourceTestEntity.testAccCheckCloudAutonomousVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: autonomousVMClusterResourceTestEntity.avmcBasic(), + Check: resource.ComposeAggregateTestCheckFunc( + autonomousVMClusterResourceTestEntity.checkCloudAutonomousVmClusterExists(ctx, resourceName, &cloudautonomousvmcluster), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfodb.ResourceCloudAutonomousVMCluster, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func (autonomousVMClusterResourceTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudAutonomousVmClustersInput{} + _, err := conn.ListCloudAutonomousVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} +func (autonomousVMClusterResourceTest) testAccCheckCloudAutonomousVmClusterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_autonomous_vm_cluster" { + continue + } + + _, err := autonomousVMClusterResourceTestEntity.findAVMC(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudAutonomousVmCluster, rs.Primary.ID, err) + } + + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudAutonomousVmCluster, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func (autonomousVMClusterResourceTest) checkCloudAutonomousVmClusterExists(ctx context.Context, name string, cloudAutonomousVMCluster *odbtypes.CloudAutonomousVmCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudAutonomousVmCluster, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudAutonomousVmCluster, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := autonomousVMClusterResourceTestEntity.findAVMC(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudAutonomousVmCluster, rs.Primary.ID, err) + } + + *cloudAutonomousVMCluster = *resp + + return nil + } +} + +func (autonomousVMClusterResourceTest) findAVMC(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudAutonomousVmCluster, error) { + input := odb.GetCloudAutonomousVmClusterInput{ + CloudAutonomousVmClusterId: aws.String(id), + } + out, err := conn.GetCloudAutonomousVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + + if out == nil || out.CloudAutonomousVmCluster == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.CloudAutonomousVmCluster, nil +} + +func (autonomousVMClusterResourceTest) avmcBasic() string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.exaInfraDisplayNamePrefix) + odbNetworkDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.odbNetDisplayNamePrefix) + avmcDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.autonomousVmClusterDisplayNamePrefix) + domain := acctest.RandomDomainName() + emailAddress := acctest.RandomEmailAddress(domain) + exaInfraRes := autonomousVMClusterResourceTestEntity.exaInfra(exaInfraDisplayName, emailAddress) + odbNetRes := autonomousVMClusterResourceTestEntity.oracleDBNetwork(odbNetworkDisplayName) + res := fmt.Sprintf(` +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_autonomous_vm_cluster" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + odb_network_id = aws_odb_network.test.id + display_name = %[3]q + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + preference = "NO_PREFERENCE" + } + +} + + + + +`, exaInfraRes, odbNetRes, avmcDisplayName) + + return res +} + +func (autonomousVMClusterResourceTest) avmcNoTagWithTag() (string, string) { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.exaInfraDisplayNamePrefix) + odbNetworkDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.odbNetDisplayNamePrefix) + avmcDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.autonomousVmClusterDisplayNamePrefix) + domain := acctest.RandomDomainName() + emailAddress := acctest.RandomEmailAddress(domain) + exaInfraRes := autonomousVMClusterResourceTestEntity.exaInfra(exaInfraDisplayName, emailAddress) + odbNetRes := autonomousVMClusterResourceTestEntity.oracleDBNetwork(odbNetworkDisplayName) + noTag := fmt.Sprintf(` +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_autonomous_vm_cluster" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + odb_network_id = aws_odb_network.test.id + display_name = %[3]q + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + preference = "NO_PREFERENCE" + } + +} + + + + +`, exaInfraRes, odbNetRes, avmcDisplayName) + withTag := fmt.Sprintf(` +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_autonomous_vm_cluster" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + odb_network_id = aws_odb_network.test.id + display_name = %[3]q + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + preference = "NO_PREFERENCE" + } + tags = { + "env" = "dev" + } + +} + + + + +`, exaInfraRes, odbNetRes, avmcDisplayName) + + return noTag, withTag +} + +func (autonomousVMClusterResourceTest) avmcAllParamsConfig() string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.exaInfraDisplayNamePrefix) + odbNetworkDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.odbNetDisplayNamePrefix) + avmcDisplayName := sdkacctest.RandomWithPrefix(autonomousVMClusterDSTestEntity.autonomousVmClusterDisplayNamePrefix) + domain := acctest.RandomDomainName() + emailAddress := acctest.RandomEmailAddress(domain) + exaInfraRes := autonomousVMClusterResourceTestEntity.exaInfra(exaInfraDisplayName, emailAddress) + odbNetRes := autonomousVMClusterResourceTestEntity.oracleDBNetwork(odbNetworkDisplayName) + res := fmt.Sprintf(` +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_autonomous_vm_cluster" "test" { + description = "my first avmc" + time_zone = "UTC" + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + odb_network_id = aws_odb_network.test.id + display_name = %[3]q + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [4, 16] + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } + tags = { + "env" = "dev" + } + +} + + + + +`, exaInfraRes, odbNetRes, avmcDisplayName) + + return res +} + +func (autonomousVMClusterResourceTest) oracleDBNetwork(odbNetName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + + +`, odbNetName) + return networkRes +} + +func (autonomousVMClusterResourceTest) exaInfra(exaDisplayName, emailAddress string) string { + resource := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + customer_contacts_to_send_to_oci = [{ email = "%[2]s" }] + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +`, exaDisplayName, emailAddress) + + return resource +} diff --git a/internal/service/odb/cloud_autonomous_vm_clusters_data_source.go b/internal/service/odb/cloud_autonomous_vm_clusters_data_source.go new file mode 100644 index 000000000000..d5e8840acf48 --- /dev/null +++ b/internal/service/odb/cloud_autonomous_vm_clusters_data_source.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_cloud_autonomous_vm_clusters", name="Cloud Autonomous Vm Clusters") +func newDataSourceCloudAutonomousVmClustersList(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudAutonomousVmClustersList{}, nil +} + +const ( + DSNameCloudAutonomousVmClustersList = "Cloud Autonomous Vm Clusters List Data Source" +) + +type dataSourceCloudAutonomousVmClustersList struct { + framework.DataSourceWithModel[cloudAutonomousVmClusterListModel] +} + +func (d *dataSourceCloudAutonomousVmClustersList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cloud_autonomous_vm_clusters": schema.ListAttribute{ + Computed: true, + Description: "List of Cloud Autonomous VM Clusters. The list going to contain basic information about the cloud autonomous VM clusters.", + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudAutonomousVmClusterSummary](ctx), + }, + }, + } +} + +// Data sources only have a read method. +func (d *dataSourceCloudAutonomousVmClustersList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data cloudAutonomousVmClusterListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + out, err := ListCloudAutonomousVmClusters(ctx, conn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudAutonomousVmClustersList, "", err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func ListCloudAutonomousVmClusters(ctx context.Context, conn *odb.Client) (*odb.ListCloudAutonomousVmClustersOutput, error) { + out := odb.ListCloudAutonomousVmClustersOutput{} + paginator := odb.NewListCloudAutonomousVmClustersPaginator(conn, &odb.ListCloudAutonomousVmClustersInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + out.CloudAutonomousVmClusters = append(out.CloudAutonomousVmClusters, page.CloudAutonomousVmClusters...) + } + return &out, nil +} + +type cloudAutonomousVmClusterListModel struct { + framework.WithRegionModel + CloudAutonomousVmClusters fwtypes.ListNestedObjectValueOf[cloudAutonomousVmClusterSummary] `tfsdk:"cloud_autonomous_vm_clusters"` +} + +type cloudAutonomousVmClusterSummary struct { + CloudAutonomousVmClusterArn types.String `tfsdk:"arn"` + CloudAutonomousVmClusterId types.String `tfsdk:"id"` + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + Ocid types.String `tfsdk:"ocid"` + DisplayName types.String `tfsdk:"display_name"` +} diff --git a/internal/service/odb/cloud_autonomous_vm_clusters_data_source_test.go b/internal/service/odb/cloud_autonomous_vm_clusters_data_source_test.go new file mode 100644 index 000000000000..05c3cb241ac1 --- /dev/null +++ b/internal/service/odb/cloud_autonomous_vm_clusters_data_source_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type listAVMCListDSTest struct { +} + +func TestAccODBListAutonomousVmClustersDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + var avmcListTest = listAVMCListDSTest{} + var output odb.ListCloudAutonomousVmClustersOutput + + dataSourceName := "data.aws_odb_cloud_autonomous_vm_clusters.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + avmcListTest.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: avmcListTest.basic(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.ComposeTestCheckFunc(func(s *terraform.State) error { + avmcListTest.count(ctx, dataSourceName, &output) + resource.TestCheckResourceAttr(dataSourceName, "cloud_autonomous_vm_clusters.#", strconv.Itoa(len(output.CloudAutonomousVmClusters))) + return nil + }, + ), + ), + }, + }, + }) +} + +func (listAVMCListDSTest) basic() string { + return `data "aws_odb_cloud_autonomous_vm_clusters" "test" {}` +} + +func (listAVMCListDSTest) count(ctx context.Context, name string, list *odb.ListCloudAutonomousVmClustersOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameCloudAutonomousVmClustersList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.ListCloudAutonomousVmClusters(ctx, conn) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameCloudAutonomousVmClustersList, rs.Primary.ID, err) + } + list.CloudAutonomousVmClusters = resp.CloudAutonomousVmClusters + return nil + } +} +func (listAVMCListDSTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudAutonomousVmClustersInput{} + _, err := conn.ListCloudAutonomousVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/odb/cloud_exadata_infrastructure.go b/internal/service/odb/cloud_exadata_infrastructure.go new file mode 100644 index 000000000000..0b4b185e3bb0 --- /dev/null +++ b/internal/service/odb/cloud_exadata_infrastructure.go @@ -0,0 +1,641 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + "errors" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource("aws_odb_cloud_exadata_infrastructure", name="Cloud Exadata Infrastructure") +// @Tags(identifierAttribute="arn") +func newResourceCloudExadataInfrastructure(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceCloudExadataInfrastructure{} + + r.SetDefaultCreateTimeout(24 * time.Hour) + r.SetDefaultUpdateTimeout(24 * time.Hour) + r.SetDefaultDeleteTimeout(24 * time.Hour) + + return r, nil +} + +const ( + ResNameCloudExadataInfrastructure = "Cloud Exadata Infrastructure" +) + +type resourceCloudExadataInfrastructure struct { + framework.ResourceWithModel[cloudExadataInfrastructureResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +func (r *resourceCloudExadataInfrastructure) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + computeModelType := fwtypes.StringEnumType[odbtypes.ComputeModel]() + + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "activated_storage_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of storage servers requested for the Exadata infrastructure", + }, + "additional_storage_count": schema.Int32Attribute{ + Computed: true, + Description: " The number of storage servers requested for the Exadata infrastructure", + }, + "database_server_type": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation", + }, + "storage_server_type": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation", + }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "available_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of available storage, in gigabytes (GB), for the Exadata infrastructure", + }, + names.AttrAvailabilityZone: schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The name of the Availability Zone (AZ) where the Exadata infrastructure is located. Changing this will force terraform to create new resource", + }, + "availability_zone_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: " The AZ ID of the AZ where the Exadata infrastructure is located. Changing this will force terraform to create new resource", + }, + "compute_count": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: " The number of compute instances that the Exadata infrastructure is located", + }, + "cpu_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores that are allocated to the Exadata infrastructure", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The size of the Exadata infrastructure's data disk group, in terabytes (TB)", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The size of the Exadata infrastructure's local node storage, in gigabytes (GB)", + }, + "db_server_version": schema.StringAttribute{ + Computed: true, + Description: "The software version of the database servers (dom0) in the Exadata infrastructure", + }, + names.AttrDisplayName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The user-friendly name for the Exadata infrastructure. Changing this will force terraform to create a new resource", + }, + "last_maintenance_run_id": schema.StringAttribute{ + Computed: true, + Description: "The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure", + }, + "max_cpu_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores available on the Exadata infrastructure", + }, + "max_data_storage_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure", + }, + "max_db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure", + }, + "max_memory_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of memory in gigabytes (GB) available on the Exadata infrastructure", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure", + }, + "monthly_db_server_version": schema.StringAttribute{ + Computed: true, + Description: "The monthly software version of the database servers in the Exadata infrastructure", + }, + "monthly_storage_server_version": schema.StringAttribute{ + Computed: true, + Description: "The monthly software version of the storage servers installed on the Exadata infrastructure", + }, + "next_maintenance_run_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the next maintenance run for the Exadata infrastructure", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the Exadata infrastructure", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor for the Exadata infrastructure", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + Description: "The HTTPS link to the Exadata infrastructure in OCI", + }, + "percent_progress": schema.Float64Attribute{ + Computed: true, + Description: "The amount of progress made on the current operation on the Exadata infrastructure, expressed as a percentage", + }, + "shape": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The model name of the Exadata infrastructure. Changing this will force terraform to create new resource", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: statusType, + Computed: true, + Description: "The current status of the Exadata infrastructure", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the status of the Exadata infrastructure", + }, + "storage_count": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "TThe number of storage servers that are activated for the Exadata infrastructure", + }, + "storage_server_version": schema.StringAttribute{ + Computed: true, + Description: "The software version of the storage servers on the Exadata infrastructure.", + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "total_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of storage, in gigabytes (GB), on the Exadata infrastructure.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The time when the Exadata infrastructure was created.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModelType, + Computed: true, + Description: "The OCI model compute model used when you create or clone an\n " + + " instance: ECPU or OCPU. An ECPU is an abstracted measure of\n " + + "compute resources. ECPUs are based on the number of cores\n " + + "elastically allocated from a pool of compute and storage servers.\n " + + " An OCPU is a legacy physical measure of compute resources. OCPUs\n " + + "are based on the physical core of a processor with\n " + + " hyper-threading enabled.", + }, + "customer_contacts_to_send_to_oci": schema.SetAttribute{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[customerContactExaInfraResourceModel](ctx), + Optional: true, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + setplanmodifier.UseStateForUnknown(), + }, + Description: "The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure. Changing this will force terraform to create new resource", + }, + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + "maintenance_window": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudExadataInfraMaintenanceWindowResourceModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + Description: " The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window ", + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "custom_action_timeout_in_mins": schema.Int32Attribute{ + Required: true, + }, + "days_of_week": schema.SetAttribute{ + ElementType: fwtypes.NewObjectTypeOf[dayOfWeekExaInfraMaintenanceWindowResourceModel](ctx), + Optional: true, + Computed: true, + }, + "hours_of_day": schema.SetAttribute{ + ElementType: types.Int32Type, + Optional: true, + Computed: true, + }, + "is_custom_action_timeout_enabled": schema.BoolAttribute{ + Required: true, + }, + "lead_time_in_weeks": schema.Int32Attribute{ + Optional: true, + Computed: true, + }, + "months": schema.SetAttribute{ + ElementType: fwtypes.NewObjectTypeOf[monthExaInfraMaintenanceWindowResourceModel](ctx), + Optional: true, + Computed: true, + }, + "patching_mode": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[odbtypes.PatchingModeType](), + }, + "preference": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[odbtypes.PreferenceType](), + }, + "weeks_of_month": schema.SetAttribute{ + ElementType: types.Int32Type, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (r *resourceCloudExadataInfrastructure) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().ODBClient(ctx) + + var plan cloudExadataInfrastructureResourceModel + + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.CreateCloudExadataInfrastructureInput{ + Tags: getTagsIn(ctx), + } + + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.CreateCloudExadataInfrastructure(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudExadataInfrastructure, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + if out == nil || out.CloudExadataInfrastructureId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudExadataInfrastructure, plan.DisplayName.ValueString(), nil), + errors.New("empty output").Error(), + ) + return + } + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + createdExaInfra, err := waitCloudExadataInfrastructureCreated(ctx, conn, aws.ToString(out.CloudExadataInfrastructureId), createTimeout) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(out.CloudExadataInfrastructureId))...) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameCloudExadataInfrastructure, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, createdExaInfra, &plan)...) + + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceCloudExadataInfrastructure) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().ODBClient(ctx) + var state cloudExadataInfrastructureResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findExadataInfraResourceByID(ctx, conn, state.CloudExadataInfrastructureId.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameCloudExadataInfrastructure, state.CloudExadataInfrastructureId.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceCloudExadataInfrastructure) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan, state cloudExadataInfrastructureResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + conn := r.Meta().ODBClient(ctx) + + diff, d := flex.Diff(ctx, plan, state) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + if diff.HasChanges() { + updatedMW := odb.UpdateCloudExadataInfrastructureInput{} + resp.Diagnostics.Append(flex.Expand(ctx, plan, &updatedMW)...) + + out, err := conn.UpdateCloudExadataInfrastructure(ctx, &updatedMW) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionUpdating, ResNameCloudExadataInfrastructure, state.CloudExadataInfrastructureId.ValueString(), err), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionUpdating, ResNameCloudExadataInfrastructure, state.CloudExadataInfrastructureId.ValueString(), err), + err.Error(), + ) + return + } + } + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + updatedExaInfra, err := waitCloudExadataInfrastructureUpdated(ctx, conn, state.CloudExadataInfrastructureId.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForUpdate, ResNameCloudExadataInfrastructure, state.CloudExadataInfrastructureId.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, updatedExaInfra, &plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceCloudExadataInfrastructure) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().ODBClient(ctx) + + var state cloudExadataInfrastructureResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.DeleteCloudExadataInfrastructureInput{ + CloudExadataInfrastructureId: state.CloudExadataInfrastructureId.ValueStringPointer(), + } + + _, err := conn.DeleteCloudExadataInfrastructure(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionDeleting, ResNameCloudExadataInfrastructure, state.CloudExadataInfrastructureId.String(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitCloudExadataInfrastructureDeleted(ctx, conn, state.CloudExadataInfrastructureId.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForDeletion, ResNameCloudExadataInfrastructure, state.CloudExadataInfrastructureId.String(), err), + err.Error(), + ) + return + } +} + +func waitCloudExadataInfrastructureCreated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudExadataInfrastructure, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusProvisioning), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusCloudExadataInfrastructure(ctx, conn, id), + PollInterval: 1 * time.Minute, + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudExadataInfrastructure); ok { + return out, err + } + return nil, err +} + +func waitCloudExadataInfrastructureUpdated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudExadataInfrastructure, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusUpdating), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusCloudExadataInfrastructure(ctx, conn, id), + PollInterval: 1 * time.Minute, + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudExadataInfrastructure); ok { + return out, err + } + + return nil, err +} + +func waitCloudExadataInfrastructureDeleted(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudExadataInfrastructure, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusTerminating), + Target: []string{}, + Refresh: statusCloudExadataInfrastructure(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudExadataInfrastructure); ok { + return out, err + } + + return nil, err +} + +func statusCloudExadataInfrastructure(ctx context.Context, conn *odb.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := findExadataInfraResourceByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func findExadataInfraResourceByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudExadataInfrastructure, error) { + input := odb.GetCloudExadataInfrastructureInput{ + CloudExadataInfrastructureId: aws.String(id), + } + + out, err := conn.GetCloudExadataInfrastructure(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, err + } + + if out == nil || out.CloudExadataInfrastructure == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.CloudExadataInfrastructure, nil +} + +type cloudExadataInfrastructureResourceModel struct { + framework.WithRegionModel + ActivatedStorageCount types.Int32 `tfsdk:"activated_storage_count"` + AdditionalStorageCount types.Int32 `tfsdk:"additional_storage_count"` + DatabaseServerType types.String `tfsdk:"database_server_type" ` + StorageServerType types.String `tfsdk:"storage_server_type" ` + AvailabilityZone types.String `tfsdk:"availability_zone"` + AvailabilityZoneId types.String `tfsdk:"availability_zone_id"` + AvailableStorageSizeInGBs types.Int32 `tfsdk:"available_storage_size_in_gbs"` + CloudExadataInfrastructureArn types.String `tfsdk:"arn"` + CloudExadataInfrastructureId types.String `tfsdk:"id"` + ComputeCount types.Int32 `tfsdk:"compute_count"` + CpuCount types.Int32 `tfsdk:"cpu_count"` + CustomerContactsToSendToOCI fwtypes.SetNestedObjectValueOf[customerContactExaInfraResourceModel] `tfsdk:"customer_contacts_to_send_to_oci"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServerVersion types.String `tfsdk:"db_server_version"` + DisplayName types.String `tfsdk:"display_name"` + LastMaintenanceRunId types.String `tfsdk:"last_maintenance_run_id"` + MaxCpuCount types.Int32 `tfsdk:"max_cpu_count"` + MaxDataStorageInTBs types.Float64 `tfsdk:"max_data_storage_in_tbs"` + MaxDbNodeStorageSizeInGBs types.Int32 `tfsdk:"max_db_node_storage_size_in_gbs"` + MaxMemoryInGBs types.Int32 `tfsdk:"max_memory_in_gbs"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + MonthlyDbServerVersion types.String `tfsdk:"monthly_db_server_version"` + MonthlyStorageServerVersion types.String `tfsdk:"monthly_storage_server_version"` + NextMaintenanceRunId types.String `tfsdk:"next_maintenance_run_id"` + Ocid types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + PercentProgress types.Float64 `tfsdk:"percent_progress"` + Shape types.String `tfsdk:"shape"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + StorageCount types.Int32 `tfsdk:"storage_count"` + StorageServerVersion types.String `tfsdk:"storage_server_version"` + TotalStorageSizeInGBs types.Int32 `tfsdk:"total_storage_size_in_gbs"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at" ` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + MaintenanceWindow fwtypes.ListNestedObjectValueOf[cloudExadataInfraMaintenanceWindowResourceModel] `tfsdk:"maintenance_window"` +} + +type cloudExadataInfraMaintenanceWindowResourceModel struct { + CustomActionTimeoutInMins types.Int32 `tfsdk:"custom_action_timeout_in_mins"` + DaysOfWeek fwtypes.SetNestedObjectValueOf[dayOfWeekExaInfraMaintenanceWindowResourceModel] `tfsdk:"days_of_week" ` + HoursOfDay fwtypes.SetValueOf[types.Int64] `tfsdk:"hours_of_day"` + IsCustomActionTimeoutEnabled types.Bool `tfsdk:"is_custom_action_timeout_enabled"` + LeadTimeInWeeks types.Int32 `tfsdk:"lead_time_in_weeks"` + Months fwtypes.SetNestedObjectValueOf[monthExaInfraMaintenanceWindowResourceModel] `tfsdk:"months" ` + PatchingMode fwtypes.StringEnum[odbtypes.PatchingModeType] `tfsdk:"patching_mode"` + Preference fwtypes.StringEnum[odbtypes.PreferenceType] `tfsdk:"preference"` + WeeksOfMonth fwtypes.SetValueOf[types.Int64] `tfsdk:"weeks_of_month"` +} + +type dayOfWeekExaInfraMaintenanceWindowResourceModel struct { + Name fwtypes.StringEnum[odbtypes.DayOfWeekName] `tfsdk:"name"` +} + +type monthExaInfraMaintenanceWindowResourceModel struct { + Name fwtypes.StringEnum[odbtypes.MonthName] `tfsdk:"name"` +} + +type customerContactExaInfraResourceModel struct { + Email types.String `tfsdk:"email"` +} diff --git a/internal/service/odb/cloud_exadata_infrastructure_data_source.go b/internal/service/odb/cloud_exadata_infrastructure_data_source.go new file mode 100644 index 000000000000..5543fca1e4d0 --- /dev/null +++ b/internal/service/odb/cloud_exadata_infrastructure_data_source.go @@ -0,0 +1,328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_cloud_exadata_infrastructure", name="Cloud Exadata Infrastructure") +// @Tags(identifierAttribute="arn") +func newDataSourceCloudExadataInfrastructure(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudExadataInfrastructure{}, nil +} + +const ( + DSNameCloudExadataInfrastructure = "Cloud Exadata Infrastructure Data Source" +) + +type dataSourceCloudExadataInfrastructure struct { + framework.DataSourceWithModel[cloudExadataInfrastructureDataSourceModel] +} + +func (d *dataSourceCloudExadataInfrastructure) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + computeModelType := fwtypes.StringEnumType[odbtypes.ComputeModel]() + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "activated_storage_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of storage servers requested for the Exadata infrastructure.", + }, + "additional_storage_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of storage servers requested for the Exadata infrastructure.", + }, + "available_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of available storage, in gigabytes (GB), for the Exadata infrastructure.", + }, + names.AttrAvailabilityZone: schema.StringAttribute{ + Computed: true, + Description: "he name of the Availability Zone (AZ) where the Exadata infrastructure is located.", + }, + "availability_zone_id": schema.StringAttribute{ + Computed: true, + Description: "The AZ ID of the AZ where the Exadata infrastructure is located.", + }, + names.AttrARN: schema.StringAttribute{ + Computed: true, + Description: "The Amazon Resource Name (ARN) for the Exadata infrastructure.", + }, + names.AttrID: schema.StringAttribute{ + Required: true, + Description: "The unique identifier of the Exadata infrastructure.", + }, + "compute_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of database servers for the Exadata infrastructure.", + }, + "cpu_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores that are allocated to the Exadata infrastructure.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The size of the Exadata infrastructure's data disk group, in terabytes (TB).", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The database server model type of the Exadata infrastructure. For the list of\n" + + "valid model names, use the ListDbSystemShapes operation.", + }, + "db_server_version": schema.StringAttribute{ + Computed: true, + Description: "The version of the Exadata infrastructure.", + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + Description: "The display name of the Exadata infrastructure.", + }, + "last_maintenance_run_id": schema.StringAttribute{ + Computed: true, + Description: "The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure.", + }, + "max_cpu_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores available on the Exadata infrastructure.", + }, + "max_data_storage_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure.", + }, + "max_db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure.", + }, + "max_memory_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of memory, in gigabytes (GB), that's available on the Exadata infrastructure.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure.", + }, + "monthly_db_server_version": schema.StringAttribute{ + Computed: true, + Description: "The monthly software version of the database servers installed on the Exadata infrastructure.", + }, + "monthly_storage_server_version": schema.StringAttribute{ + Computed: true, + Description: "The monthly software version of the storage servers installed on the Exadata infrastructure.", + }, + "next_maintenance_run_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the next maintenance run for the Exadata infrastructure.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor for the Exadata infrastructure.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + Description: "The HTTPS link to the Exadata infrastructure in OCI.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the Exadata infrastructure in OCI.", + }, + "percent_progress": schema.Float64Attribute{ + Computed: true, + Description: "The amount of progress made on the current operation on the Exadata infrastructure expressed as a percentage.", + }, + "shape": schema.StringAttribute{ + Computed: true, + Description: "The model name of the Exadata infrastructure.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: statusType, + Computed: true, + Description: "The status of the Exadata infrastructure.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the status of the Exadata infrastructure.", + }, + "storage_count": schema.Int32Attribute{ + Computed: true, + Description: "he number of storage servers that are activated for the Exadata infrastructure.", + }, + "storage_server_version": schema.StringAttribute{ + Computed: true, + Description: "The software version of the storage servers on the Exadata infrastructure.", + }, + "total_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total amount of storage, in gigabytes (GB), on the the Exadata infrastructure.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModelType, + Computed: true, + Description: "The OCI model compute model used when you create or clone an instance: ECPU or\n" + + "OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on\n" + + "the number of cores elastically allocated from a pool of compute and storage\n" + + "servers. An OCPU is a legacy physical measure of compute resources. OCPUs are\n" + + "based on the physical core of a processor with hyper-threading enabled.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The time when the Exadata infrastructure was created.", + }, + "database_server_type": schema.StringAttribute{ + Computed: true, + Description: "The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.", + }, + "storage_server_type": schema.StringAttribute{ + Computed: true, + Description: "The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.", + }, + "customer_contacts_to_send_to_oci": schema.SetAttribute{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[customerContactExaInfraDataSourceModel](ctx), + Computed: true, + Description: "The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure.", + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + "maintenance_window": schema.ListAttribute{ + Computed: true, + Description: " The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window ", + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudExadataInfraMaintenanceWindowDataSourceModel](ctx), + }, + }, + } +} + +func (d *dataSourceCloudExadataInfrastructure) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + + var data cloudExadataInfrastructureDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := FindExaDataInfraForDataSourceByID(ctx, conn, data.CloudExadataInfrastructureId.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudExadataInfrastructure, data.CloudExadataInfrastructureId.String(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func FindExaDataInfraForDataSourceByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudExadataInfrastructure, error) { + input := odb.GetCloudExadataInfrastructureInput{ + CloudExadataInfrastructureId: aws.String(id), + } + + out, err := conn.GetCloudExadataInfrastructure(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, err + } + + if out == nil || out.CloudExadataInfrastructure == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.CloudExadataInfrastructure, nil +} + +type cloudExadataInfrastructureDataSourceModel struct { + framework.WithRegionModel + ActivatedStorageCount types.Int32 `tfsdk:"activated_storage_count"` + AdditionalStorageCount types.Int32 `tfsdk:"additional_storage_count"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + AvailabilityZoneId types.String `tfsdk:"availability_zone_id"` + AvailableStorageSizeInGBs types.Int32 `tfsdk:"available_storage_size_in_gbs"` + CloudExadataInfrastructureArn types.String `tfsdk:"arn"` + CloudExadataInfrastructureId types.String `tfsdk:"id"` + ComputeCount types.Int32 `tfsdk:"compute_count"` + CpuCount types.Int32 `tfsdk:"cpu_count"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServerVersion types.String `tfsdk:"db_server_version"` + DisplayName types.String `tfsdk:"display_name"` + LastMaintenanceRunId types.String `tfsdk:"last_maintenance_run_id"` + MaxCpuCount types.Int32 `tfsdk:"max_cpu_count"` + MaxDataStorageInTBs types.Float64 `tfsdk:"max_data_storage_in_tbs"` + MaxDbNodeStorageSizeInGBs types.Int32 `tfsdk:"max_db_node_storage_size_in_gbs"` + MaxMemoryInGBs types.Int32 `tfsdk:"max_memory_in_gbs"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + MonthlyDbServerVersion types.String `tfsdk:"monthly_db_server_version"` + MonthlyStorageServerVersion types.String `tfsdk:"monthly_storage_server_version"` + NextMaintenanceRunId types.String `tfsdk:"next_maintenance_run_id"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + Ocid types.String `tfsdk:"ocid"` + PercentProgress types.Float64 `tfsdk:"percent_progress"` + Shape types.String `tfsdk:"shape"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + StorageCount types.Int32 `tfsdk:"storage_count"` + StorageServerVersion types.String `tfsdk:"storage_server_version"` + TotalStorageSizeInGBs types.Int32 `tfsdk:"total_storage_size_in_gbs"` + CustomerContactsToSendToOCI fwtypes.SetNestedObjectValueOf[customerContactExaInfraDataSourceModel] `tfsdk:"customer_contacts_to_send_to_oci"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at" ` + DatabaseServerType types.String `tfsdk:"database_server_type"` + StorageServerType types.String `tfsdk:"storage_server_type"` + MaintenanceWindow fwtypes.ListNestedObjectValueOf[cloudExadataInfraMaintenanceWindowDataSourceModel] `tfsdk:"maintenance_window" ` + Tags tftags.Map `tfsdk:"tags"` +} + +type cloudExadataInfraMaintenanceWindowDataSourceModel struct { + CustomActionTimeoutInMins types.Int32 `tfsdk:"custom_action_timeout_in_mins"` + DaysOfWeek fwtypes.SetNestedObjectValueOf[dayOfWeekExaInfraMaintenanceWindowDataSourceModel] `tfsdk:"days_of_week" ` + HoursOfDay fwtypes.SetValueOf[types.Int64] `tfsdk:"hours_of_day"` + IsCustomActionTimeoutEnabled types.Bool `tfsdk:"is_custom_action_timeout_enabled"` + LeadTimeInWeeks types.Int32 `tfsdk:"lead_time_in_weeks"` + Months fwtypes.SetNestedObjectValueOf[monthExaInfraMaintenanceWindowDataSourceModel] `tfsdk:"months" ` + PatchingMode fwtypes.StringEnum[odbtypes.PatchingModeType] `tfsdk:"patching_mode"` + Preference fwtypes.StringEnum[odbtypes.PreferenceType] `tfsdk:"preference"` + WeeksOfMonth fwtypes.SetValueOf[types.Int64] `tfsdk:"weeks_of_month"` +} + +type dayOfWeekExaInfraMaintenanceWindowDataSourceModel struct { + Name fwtypes.StringEnum[odbtypes.DayOfWeekName] `tfsdk:"name"` +} + +type monthExaInfraMaintenanceWindowDataSourceModel struct { + Name fwtypes.StringEnum[odbtypes.MonthName] `tfsdk:"name"` +} + +type customerContactExaInfraDataSourceModel struct { + Email types.String `tfsdk:"email"` +} diff --git a/internal/service/odb/cloud_exadata_infrastructure_data_source_test.go b/internal/service/odb/cloud_exadata_infrastructure_data_source_test.go new file mode 100644 index 000000000000..83df2759c16e --- /dev/null +++ b/internal/service/odb/cloud_exadata_infrastructure_data_source_test.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Acceptance test access AWS and cost money to run. +type cloudExaDataInfraDataSourceTest struct { + displayNamePrefix string +} + +var exaInfraDataSourceTestEntity = cloudExaDataInfraDataSourceTest{ + displayNamePrefix: "Ofake-exa", +} + +func TestAccODBCloudExadataInfrastructureDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + exaInfraResource := "aws_odb_cloud_exadata_infrastructure.test" + exaInfraDataSource := "data.aws_odb_cloud_exadata_infrastructure.test" + displayNameSuffix := sdkacctest.RandomWithPrefix(exaInfraDataSourceTestEntity.displayNamePrefix) + domain := acctest.RandomDomainName() + emailAddress1 := acctest.RandomEmailAddress(domain) + emailAddress2 := acctest.RandomEmailAddress(domain) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraDataSourceTestEntity.testAccCheckCloudExadataInfrastructureDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraDataSourceTestEntity.basicExaInfraDataSource(displayNameSuffix, emailAddress1, emailAddress2), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(exaInfraResource, names.AttrID, exaInfraDataSource, names.AttrID), + resource.TestCheckResourceAttr(exaInfraDataSource, "shape", "Exadata.X9M"), + resource.TestCheckResourceAttr(exaInfraDataSource, names.AttrStatus, "AVAILABLE"), + resource.TestCheckResourceAttr(exaInfraDataSource, "storage_count", "3"), + resource.TestCheckResourceAttr(exaInfraDataSource, "compute_count", "2"), + ), + }, + }, + }) +} + +func (cloudExaDataInfraDataSourceTest) testAccCheckCloudExadataInfrastructureDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_exadata_infrastructure" { + continue + } + _, err := tfodb.FindExaDataInfraForDataSourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudExadataInfrastructure, rs.Primary.ID, err) + } + + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudExadataInfrastructure, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func (cloudExaDataInfraDataSourceTest) basicExaInfraDataSource(displayNameSuffix, emailAddress1, emailAddress2 string) string { + testData := fmt.Sprintf(` + + + + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + customer_contacts_to_send_to_oci = [{ email = "%[2]s" }, { email = "%[3]s" }] + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} + +data "aws_odb_cloud_exadata_infrastructure" "test" { + id = aws_odb_cloud_exadata_infrastructure.test.id +} +`, displayNameSuffix, emailAddress1, emailAddress2) + return testData +} diff --git a/internal/service/odb/cloud_exadata_infrastructure_test.go b/internal/service/odb/cloud_exadata_infrastructure_test.go new file mode 100644 index 000000000000..44678001f9b6 --- /dev/null +++ b/internal/service/odb/cloud_exadata_infrastructure_test.go @@ -0,0 +1,450 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Acceptance test access AWS and cost money to run. + +type cloudExaDataInfraResourceTest struct { + displayNamePrefix string +} + +var exaInfraTestResource = cloudExaDataInfraResourceTest{ + displayNamePrefix: "Ofake-exa", +} + +func TestAccODBCloudExadataInfrastructureResource_basic(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudExaDataInfrastructure odbtypes.CloudExadataInfrastructure + resourceName := "aws_odb_cloud_exadata_infrastructure.test" + rName := sdkacctest.RandomWithPrefix(exaInfraTestResource.displayNamePrefix) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + exaInfraTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraTestResource.testAccCheckCloudExaDataInfraDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccODBCloudExadataInfrastructureResource_withAllParameters(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudExaDataInfrastructure odbtypes.CloudExadataInfrastructure + resourceName := "aws_odb_cloud_exadata_infrastructure.test" + rName := sdkacctest.RandomWithPrefix(exaInfraTestResource.displayNamePrefix) + domain := acctest.RandomDomainName() + emailAddress1 := acctest.RandomEmailAddress(domain) + emailAddress2 := acctest.RandomEmailAddress(domain) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + exaInfraTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraTestResource.testAccCheckCloudExaDataInfraDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraTestResource.exaDataInfraResourceWithAllConfig(rName, emailAddress1, emailAddress2), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudExadataInfrastructureResource_tagging(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudExaDataInfrastructure1 odbtypes.CloudExadataInfrastructure + var cloudExaDataInfrastructure2 odbtypes.CloudExadataInfrastructure + resourceName := "aws_odb_cloud_exadata_infrastructure.test" + rName := sdkacctest.RandomWithPrefix(exaInfraTestResource.displayNamePrefix) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + exaInfraTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraTestResource.testAccCheckCloudExaDataInfraDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfigWithTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(cloudExaDataInfrastructure1.CloudExadataInfrastructureId), *(cloudExaDataInfrastructure2.CloudExadataInfrastructureId)) != 0 { + return errors.New("Should not create a new cloud exa basicExaInfraDataSource after update") + } + return nil + }), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudExadataInfrastructureResource_updateDisplayName(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudExaDataInfrastructure1 odbtypes.CloudExadataInfrastructure + var cloudExaDataInfrastructure2 odbtypes.CloudExadataInfrastructure + resourceName := "aws_odb_cloud_exadata_infrastructure.test" + rName := sdkacctest.RandomWithPrefix(exaInfraTestResource.displayNamePrefix) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + exaInfraTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraTestResource.testAccCheckCloudExaDataInfraDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfig(rName + "-u"), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(cloudExaDataInfrastructure1.CloudExadataInfrastructureId), *(cloudExaDataInfrastructure2.CloudExadataInfrastructureId)) == 0 { + return errors.New("Should create a new cloud exa basicExaInfraDataSource after update") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudExadataInfrastructureResource_updateMaintenanceWindow(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudExaDataInfrastructure1 odbtypes.CloudExadataInfrastructure + var cloudExaDataInfrastructure2 odbtypes.CloudExadataInfrastructure + resourceName := "aws_odb_cloud_exadata_infrastructure.test" + rName := sdkacctest.RandomWithPrefix(exaInfraTestResource.displayNamePrefix) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + exaInfraTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraTestResource.testAccCheckCloudExaDataInfraDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: exaInfraTestResource.basicWithCustomMaintenanceWindow(rName), + Check: resource.ComposeAggregateTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(cloudExaDataInfrastructure1.CloudExadataInfrastructureId), *(cloudExaDataInfrastructure2.CloudExadataInfrastructureId)) != 0 { + return errors.New("Should not create a new cloud exa basicExaInfraDataSource after update") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudExadataInfrastructureResource_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudExaDataInfrastructure odbtypes.CloudExadataInfrastructure + + rName := sdkacctest.RandomWithPrefix(exaInfraTestResource.displayNamePrefix) + resourceName := "aws_odb_cloud_exadata_infrastructure.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + exaInfraTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: exaInfraTestResource.testAccCheckCloudExaDataInfraDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: exaInfraTestResource.exaDataInfraResourceBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + exaInfraTestResource.testAccCheckCloudExadataInfrastructureExists(ctx, resourceName, &cloudExaDataInfrastructure), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfodb.ResourceCloudExadataInfrastructure, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func (cloudExaDataInfraResourceTest) testAccCheckCloudExaDataInfraDestroyed(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_exadata_infrastructure" { + continue + } + _, err := tfodb.FindExadataInfraResourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudExadataInfrastructure, rs.Primary.ID, err) + } + + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudExadataInfrastructure, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func (cloudExaDataInfraResourceTest) testAccCheckCloudExadataInfrastructureExists(ctx context.Context, name string, cloudExadataInfrastructure *odbtypes.CloudExadataInfrastructure) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudExadataInfrastructure, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudExadataInfrastructure, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + resp, err := tfodb.FindExadataInfraResourceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudExadataInfrastructure, rs.Primary.ID, err) + } + + *cloudExadataInfrastructure = *resp + + return nil + } +} + +func (cloudExaDataInfraResourceTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + input := odb.ListCloudExadataInfrastructuresInput{} + + _, err := conn.ListCloudExadataInfrastructures(ctx, &input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (cloudExaDataInfraResourceTest) exaDataInfraResourceWithAllConfig(randomId, emailAddress1, emailAddress2 string) string { + exaDataInfra := fmt.Sprintf(` + + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X11M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + customer_contacts_to_send_to_oci = [{ email = "%[2]s" }, { email = "%[3]s" }] + database_server_type = "X11M" + storage_server_type = "X11M-HC" + maintenance_window { + custom_action_timeout_in_mins = 16 + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [11, 16] + is_custom_action_timeout_enabled = true + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + patching_mode = "ROLLING" + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } + tags = { + "env" = "dev" + } + +} +`, randomId, emailAddress1, emailAddress2) + return exaDataInfra +} +func (cloudExaDataInfraResourceTest) exaDataInfraResourceBasicConfig(displayName string) string { + exaInfra := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +`, displayName) + return exaInfra +} +func (cloudExaDataInfraResourceTest) exaDataInfraResourceBasicConfigWithTags(displayName string) string { + exaInfra := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } + tags = { + "env" = "dev" + } +} +`, displayName) + return exaInfra +} + +func (cloudExaDataInfraResourceTest) basicWithCustomMaintenanceWindow(displayName string) string { + exaInfra := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [11, 16] + is_custom_action_timeout_enabled = true + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + patching_mode = "ROLLING" + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } +} +`, displayName) + return exaInfra +} diff --git a/internal/service/odb/cloud_exadata_infrastructures_data_source.go b/internal/service/odb/cloud_exadata_infrastructures_data_source.go new file mode 100644 index 000000000000..1fabd21e5a84 --- /dev/null +++ b/internal/service/odb/cloud_exadata_infrastructures_data_source.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_cloud_exadata_infrastructures", name="Cloud Exadata Infrastructures") +func newDataSourceCloudExadataInfrastructuresList(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudExadataInfrastructuresList{}, nil +} + +const ( + DSNameCloudExadataInfrastructuresList = "Cloud Exadata Infrastructures List Data Source" +) + +type dataSourceCloudExadataInfrastructuresList struct { + framework.DataSourceWithModel[cloudExadataInfrastructuresListDataSourceModel] +} + +func (d *dataSourceCloudExadataInfrastructuresList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cloud_exadata_infrastructures": schema.ListAttribute{ + Computed: true, + Description: "List of Cloud Exadata Infrastructures. Returns basic information about the Cloud Exadata Infrastructures.", + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudExadataInfrastructureDataSourceListSummary](ctx), + }, + }, + } +} + +func (d *dataSourceCloudExadataInfrastructuresList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data cloudExadataInfrastructuresListDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + out, err := ListCloudExadataInfrastructures(ctx, conn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudExadataInfrastructuresList, "", err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func ListCloudExadataInfrastructures(ctx context.Context, conn *odb.Client) (*odb.ListCloudExadataInfrastructuresOutput, error) { + var out odb.ListCloudExadataInfrastructuresOutput + paginator := odb.NewListCloudExadataInfrastructuresPaginator(conn, &odb.ListCloudExadataInfrastructuresInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + out.CloudExadataInfrastructures = append(out.CloudExadataInfrastructures, page.CloudExadataInfrastructures...) + } + return &out, nil +} + +type cloudExadataInfrastructuresListDataSourceModel struct { + framework.WithRegionModel + CloudExadataInfrastructures fwtypes.ListNestedObjectValueOf[cloudExadataInfrastructureDataSourceListSummary] `tfsdk:"cloud_exadata_infrastructures"` +} + +type cloudExadataInfrastructureDataSourceListSummary struct { + CloudExadataInfrastructureArn types.String `tfsdk:"arn"` + CloudExadataInfrastructureId types.String `tfsdk:"id"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + Ocid types.String `tfsdk:"ocid"` + DisplayName types.String `tfsdk:"display_name"` +} diff --git a/internal/service/odb/cloud_exadata_infrastructures_data_source_test.go b/internal/service/odb/cloud_exadata_infrastructures_data_source_test.go new file mode 100644 index 000000000000..a138b941f1a7 --- /dev/null +++ b/internal/service/odb/cloud_exadata_infrastructures_data_source_test.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type listExaInfraTest struct { +} + +func TestAccODBListCloudExadataInfrastructuresDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + var listExaInfraDSTest = listExaInfraTest{} + var infraList odb.ListCloudExadataInfrastructuresOutput + dataSourceName := "data.aws_odb_cloud_exadata_infrastructures.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + listExaInfraDSTest.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: listExaInfraDSTest.basic(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.ComposeTestCheckFunc(func(s *terraform.State) error { + listExaInfraDSTest.countExaInfrastructures(ctx, dataSourceName, &infraList) + resource.TestCheckResourceAttr(dataSourceName, "cloud_exadata_infrastructures.#", strconv.Itoa(len(infraList.CloudExadataInfrastructures))) + return nil + }, + ), + ), + }, + }, + }) +} + +func (listExaInfraTest) basic() string { + return `data "aws_odb_cloud_exadata_infrastructures" "test" {}` +} + +func (listExaInfraTest) countExaInfrastructures(ctx context.Context, name string, listOfInfra *odb.ListCloudExadataInfrastructuresOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameCloudExadataInfrastructuresList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.ListCloudExadataInfrastructures(ctx, conn) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameCloudExadataInfrastructuresList, rs.Primary.ID, err) + } + listOfInfra.CloudExadataInfrastructures = resp.CloudExadataInfrastructures + return nil + } +} +func (listExaInfraTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudExadataInfrastructuresInput{} + _, err := conn.ListCloudExadataInfrastructures(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/odb/cloud_vm_cluster.go b/internal/service/odb/cloud_vm_cluster.go new file mode 100644 index 000000000000..3e144ef00cdf --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster.go @@ -0,0 +1,734 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + "errors" + "strings" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_odb_cloud_vm_cluster", name="Cloud Vm Cluster") +// @Tags(identifierAttribute="arn") +func newResourceCloudVmCluster(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceCloudVmCluster{} + + r.SetDefaultCreateTimeout(24 * time.Hour) + r.SetDefaultUpdateTimeout(24 * time.Hour) + r.SetDefaultDeleteTimeout(24 * time.Hour) + + return r, nil +} + +const ( + ResNameCloudVmCluster = "Cloud Vm Cluster" + MajorGiVersionPattern = `^\d+\.0\.0\.0$` +) + +var ResourceCloudVmCluster = newResourceCloudVmCluster + +type resourceCloudVmCluster struct { + framework.ResourceWithModel[cloudVmClusterResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +func (r *resourceCloudVmCluster) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + licenseModelType := fwtypes.StringEnumType[odbtypes.LicenseModel]() + diskRedundancyType := fwtypes.StringEnumType[odbtypes.DiskRedundancy]() + computeModelType := fwtypes.StringEnumType[odbtypes.ComputeModel]() + giVersionValidator := []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile(MajorGiVersionPattern), "Gi version must be of the format 19.0.0.0"), + } + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The unique identifier of the Exadata infrastructure for this VM cluster. Changing this will create a new resource.", + }, + names.AttrClusterName: schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The name of the Grid Infrastructure (GI) cluster. Changing this will create a new resource.", + }, + "cpu_core_count": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The number of CPU cores to enable on the VM cluster. Changing this will create a new resource.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Required: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.RequiresReplace(), + }, + Description: "The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster. Changing this will create a new resource.", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource.", + }, + "db_servers": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Required: true, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Description: "The list of database servers for the VM cluster. Changing this will create a new resource.", + }, + "disk_redundancy": schema.StringAttribute{ + CustomType: diskRedundancyType, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The type of redundancy for the VM cluster: NORMAL (2-way) or HIGH (3-way).", + }, + names.AttrDisplayName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "A user-friendly name for the VM cluster. This member is required. Changing this will create a new resource.", + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The domain name associated with the VM cluster.", + }, + "gi_version": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + //Note: underlying API only accepts major gi_version. + Validators: giVersionValidator, + Description: "A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure. Example: 19.0.0.0 This member is required. Changing this will create a new resource.", + }, + //Underlying API returns complete gi version. For example if gi_version 23.0.0.0 then underlying api returns a version starting with 23 + "gi_version_computed": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "A complete software version of Oracle Grid Infrastructure (GI).", + }, + //Underlying API treats Hostname as hostname prefix. Therefore, explicitly setting it. API also returns new hostname prefix by appending the input hostname + //prefix. Therefore, we have hostname_prefix and hostname_prefix_computed + "hostname_prefix_computed": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The host name for the VM cluster. Constraints: - Can't be \"localhost\" or \"hostname\". - Can't contain \"-version\". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. " + + "This member is required. Changing this will create a new resource.", + }, + "hostname_prefix": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The host name prefix for the VM cluster. Constraints: - Can't be \"localhost\" or \"hostname\". - Can't contain \"-version\". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. " + + "This member is required. Changing this will create a new resource.", + }, + "iorm_config_cache": schema.ListAttribute{ + Computed: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudVMCExadataIormConfigResourceModel](ctx), + Description: "The Exadata IORM (I/O Resource Manager) configuration cache details for the VM cluster.", + }, + "is_local_backup_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + Description: "Specifies whether to enable database backups to local Exadata storage for the VM cluster. Changing this will create a new resource.", + }, + "is_sparse_diskgroup_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + Description: "Specifies whether to create a sparse disk group for the VM cluster. Changing this will create a new resource.", + }, + "last_update_history_entry_id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The OCID of the most recent maintenance update history entry.", + }, + "license_model": schema.StringAttribute{ + CustomType: licenseModelType, + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The Oracle license model to apply to the VM cluster. Default: LICENSE_INCLUDED. Changing this will create a new resource.", + }, + "listener_port": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The listener port number configured on the VM cluster.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "The amount of memory, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource.", + }, + "node_count": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The total number of nodes in the VM cluster.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The OCID (Oracle Cloud Identifier) of the VM cluster.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The name of the OCI resource anchor associated with the VM cluster.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The HTTPS link to the VM cluster resource in OCI.", + }, + "odb_network_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The unique identifier of the ODB network for the VM cluster. This member is required. Changing this will create a new resource.", + }, + "percent_progress": schema.Float32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + Description: "The percentage of progress made on the current operation for the VM cluster.", + }, + "scan_dns_name": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The fully qualified domain name (FQDN) for the SCAN IP addresses associated with the VM cluster.", + }, + "scan_dns_record_id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The OCID of the DNS record for the SCAN IPs linked to the VM cluster.", + }, + "scan_ip_ids": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + Description: "The list of OCIDs for SCAN IP addresses associated with the VM cluster.", + }, + "shape": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The hardware model name of the Exadata infrastructure running the VM cluster.", + }, + "ssh_public_keys": schema.SetAttribute{ + Required: true, + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Description: "The public key portion of one or more key pairs used for SSH access to the VM cluster. This member is required. Changing this will create a new resource.", + }, + names.AttrStatus: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + CustomType: statusType, + Description: "The current lifecycle status of the VM cluster.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "Additional information regarding the current status of the VM cluster.", + }, + "storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Description: "The local node storage allocated to the VM cluster, in gigabytes (GB).", + }, + "system_version": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The operating system version of the image chosen for the VM cluster.", + }, + "scan_listener_port_tcp": schema.Int32Attribute{ + Computed: true, + Optional: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "The port number for TCP connections to the single client access name (SCAN) listener. " + + "Valid values: 1024–8999 with the following exceptions: 2484 , 6100 , 6200 , 7060, 7070 , 7085 , and 7879Default: 1521. " + + "Changing this will create a new resource.", + }, + "timezone": schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The configured time zone of the VM cluster. Changing this will create a new resource.", + }, + "vip_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + ElementType: types.StringType, + Description: "The virtual IP (VIP) addresses assigned to the VM cluster. CRS assigns one VIP per node for failover support.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + CustomType: timetypes.RFC3339Type{}, + Description: "The timestamp when the VM cluster was created.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModelType, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The compute model used when the instance is created or cloned — either ECPU or OCPU. ECPU is a virtualized compute unit; OCPU is a physical processor core with hyper-threading.", + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + "data_collection_options": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudVMCDataCollectionOptionsResourceModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + Description: "The set of preferences for the various diagnostic collection options for the VM cluster. Changing this will create a new resource.", + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "is_diagnostics_events_enabled": schema.BoolAttribute{ + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "is_health_monitoring_enabled": schema.BoolAttribute{ + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "is_incident_logs_enabled": schema.BoolAttribute{ + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + } +} + +func (r *resourceCloudVmCluster) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().ODBClient(ctx) + var plan cloudVmClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.CreateCloudVmClusterInput{ + Tags: getTagsIn(ctx), + //Underlying API treats Hostname as hostname prefix. + Hostname: plan.HostnamePrefix.ValueStringPointer(), + } + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + out, err := conn.CreateCloudVmCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + if out == nil || out.CloudVmClusterId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudVmCluster, plan.DisplayName.ValueString(), nil), + errors.New("empty output").Error(), + ) + return + } + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + createdVmCluster, err := waitCloudVmClusterCreated(ctx, conn, aws.ToString(out.CloudVmClusterId), createTimeout) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(out.CloudVmClusterId))...) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameCloudVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + hostnamePrefix := strings.Split(*input.Hostname, "-")[0] + plan.HostnamePrefix = flex.StringValueToFramework(ctx, hostnamePrefix) + plan.HostnamePrefixComputed = flex.StringToFramework(ctx, createdVmCluster.Hostname) + //scan listener port not returned by API directly + plan.ScanListenerPortTcp = flex.Int32ToFramework(ctx, createdVmCluster.ListenerPort) + plan.GiVersionComputed = flex.StringToFramework(ctx, createdVmCluster.GiVersion) + giVersionMajor, err := getMajorGiVersion(createdVmCluster.GiVersion) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameCloudVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + plan.GiVersion = flex.StringToFramework(ctx, giVersionMajor) + resp.Diagnostics.Append(flex.Flatten(ctx, createdVmCluster, &plan)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceCloudVmCluster) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().ODBClient(ctx) + var state cloudVmClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findCloudVmClusterForResourceByID(ctx, conn, state.CloudVmClusterId.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + hostnamePrefix := strings.Split(*out.Hostname, "-")[0] + state.HostnamePrefix = types.StringValue(hostnamePrefix) + state.HostnamePrefixComputed = types.StringValue(*out.Hostname) + //scan listener port not returned by API directly + state.ScanListenerPortTcp = flex.Int32ToFramework(ctx, out.ListenerPort) + state.GiVersionComputed = flex.StringToFramework(ctx, out.GiVersion) + giVersionMajor, err := getMajorGiVersion(out.GiVersion) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + state.GiVersion = flex.StringToFramework(ctx, giVersionMajor) + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceCloudVmCluster) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().ODBClient(ctx) + var state cloudVmClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.DeleteCloudVmClusterInput{ + CloudVmClusterId: state.CloudVmClusterId.ValueStringPointer(), + } + _, err := conn.DeleteCloudVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionDeleting, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitCloudVmClusterDeleted(ctx, conn, state.CloudVmClusterId.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForDeletion, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } +} + +func waitCloudVmClusterCreated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudVmCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusProvisioning), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusCloudVmCluster(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudVmCluster); ok { + return out, err + } + + return nil, err +} + +func waitCloudVmClusterDeleted(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudVmCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusTerminating), + Target: []string{}, + Refresh: statusCloudVmCluster(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudVmCluster); ok { + return out, err + } + + return nil, err +} + +func statusCloudVmCluster(ctx context.Context, conn *odb.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := findCloudVmClusterForResourceByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func findCloudVmClusterForResourceByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudVmCluster, error) { + input := odb.GetCloudVmClusterInput{ + CloudVmClusterId: aws.String(id), + } + out, err := conn.GetCloudVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + + if out == nil || out.CloudVmCluster == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + return out.CloudVmCluster, nil +} +func getMajorGiVersion(giVersionComputed *string) (*string, error) { + giVersionMajor := strings.Split(*giVersionComputed, ".")[0] + giVersionMajor = giVersionMajor + ".0.0.0" + regxGiVersionMajor := regexache.MustCompile(MajorGiVersionPattern) + if !regxGiVersionMajor.MatchString(giVersionMajor) { + err := errors.New("gi_version major retrieved from gi_version_computed does not match the pattern 19.0.0.0") + return nil, err + } + return &giVersionMajor, nil +} + +type cloudVmClusterResourceModel struct { + framework.WithRegionModel + CloudVmClusterArn types.String `tfsdk:"arn"` + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + CloudVmClusterId types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DataCollectionOptions fwtypes.ListNestedObjectValueOf[cloudVMCDataCollectionOptionsResourceModel] `tfsdk:"data_collection_options"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServers fwtypes.SetValueOf[types.String] `tfsdk:"db_servers"` + DiskRedundancy fwtypes.StringEnum[odbtypes.DiskRedundancy] `tfsdk:"disk_redundancy"` + DisplayName types.String `tfsdk:"display_name"` + Domain types.String `tfsdk:"domain"` + GiVersion types.String `tfsdk:"gi_version" autoflex:",noflatten"` + GiVersionComputed types.String `tfsdk:"gi_version_computed" autoflex:",noflatten"` + HostnamePrefixComputed types.String `tfsdk:"hostname_prefix_computed" autoflex:",noflatten"` + HostnamePrefix types.String `tfsdk:"hostname_prefix" autoflex:"-"` + IormConfigCache fwtypes.ListNestedObjectValueOf[cloudVMCExadataIormConfigResourceModel] `tfsdk:"iorm_config_cache"` + IsLocalBackupEnabled types.Bool `tfsdk:"is_local_backup_enabled"` + IsSparseDiskGroupEnabled types.Bool `tfsdk:"is_sparse_diskgroup_enabled"` + LastUpdateHistoryEntryId types.String `tfsdk:"last_update_history_entry_id"` + LicenseModel fwtypes.StringEnum[odbtypes.LicenseModel] `tfsdk:"license_model"` + ListenerPort types.Int32 `tfsdk:"listener_port"` + MemorySizeInGbs types.Int32 `tfsdk:"memory_size_in_gbs"` + NodeCount types.Int32 `tfsdk:"node_count"` + Ocid types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + ScanDnsName types.String `tfsdk:"scan_dns_name"` + ScanDnsRecordId types.String `tfsdk:"scan_dns_record_id"` + ScanIpIds fwtypes.ListValueOf[types.String] `tfsdk:"scan_ip_ids"` + Shape types.String `tfsdk:"shape"` + SshPublicKeys fwtypes.SetValueOf[types.String] `tfsdk:"ssh_public_keys"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + StorageSizeInGBs types.Int32 `tfsdk:"storage_size_in_gbs"` + SystemVersion types.String `tfsdk:"system_version"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Timezone types.String `tfsdk:"timezone"` + VipIds fwtypes.ListValueOf[types.String] `tfsdk:"vip_ids"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + ScanListenerPortTcp types.Int32 `tfsdk:"scan_listener_port_tcp" autoflex:",noflatten"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} + +type cloudVMCDataCollectionOptionsResourceModel struct { + IsDiagnosticsEventsEnabled types.Bool `tfsdk:"is_diagnostics_events_enabled"` + IsHealthMonitoringEnabled types.Bool `tfsdk:"is_health_monitoring_enabled"` + IsIncidentLogsEnabled types.Bool `tfsdk:"is_incident_logs_enabled"` +} + +type cloudVMCExadataIormConfigResourceModel struct { + DbPlans fwtypes.ListNestedObjectValueOf[cloudVMCDbIormConfigResourceModel] `tfsdk:"db_plans"` + LifecycleDetails types.String `tfsdk:"lifecycle_details"` + LifecycleState fwtypes.StringEnum[odbtypes.IormLifecycleState] `tfsdk:"lifecycle_state"` + Objective fwtypes.StringEnum[odbtypes.Objective] `tfsdk:"objective"` +} + +type cloudVMCDbIormConfigResourceModel struct { + DbName types.String `tfsdk:"db_name"` + FlashCacheLimit types.String `tfsdk:"flash_cache_limit"` + Share types.Int32 `tfsdk:"share"` +} diff --git a/internal/service/odb/cloud_vm_cluster_data_source.go b/internal/service/odb/cloud_vm_cluster_data_source.go new file mode 100644 index 000000000000..41dea592991d --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster_data_source.go @@ -0,0 +1,317 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_cloud_vm_cluster", name="Cloud Vm Cluster") +// @Tags(identifierAttribute="arn") +func newDataSourceCloudVmCluster(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudVmCluster{}, nil +} + +const ( + DSNameCloudVmCluster = "Cloud Vm Cluster Data Source" +) + +type dataSourceCloudVmCluster struct { + framework.DataSourceWithModel[dataSourceCloudVmClusterModel] +} + +func (d *dataSourceCloudVmCluster) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + diskRedundancyType := fwtypes.StringEnumType[odbtypes.DiskRedundancy]() + licenseModelType := fwtypes.StringEnumType[odbtypes.LicenseModel]() + computeModelType := fwtypes.StringEnumType[odbtypes.ComputeModel]() + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: schema.StringAttribute{ + Required: true, + Description: "The unique identifier of the VM cluster.", + }, + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Computed: true, + Description: "The ID of the Cloud Exadata Infrastructure.", + }, + names.AttrClusterName: schema.StringAttribute{ + Computed: true, + Description: "The name of the Grid Infrastructure (GI) cluster.", + }, + "cpu_core_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of CPU cores enabled on the VM cluster.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster.", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster.", + }, + "db_servers": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The list of database servers for the VM cluster.", + }, + "disk_redundancy": schema.StringAttribute{ + CustomType: diskRedundancyType, + Computed: true, + Description: "The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy.", + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + Description: "The display name of the VM cluster.", + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + Description: "The domain name of the VM cluster.", + }, + "gi_version": schema.StringAttribute{ + Computed: true, + Description: "he software version of the Oracle Grid Infrastructure (GI) for the VM cluster.", + }, + "hostname_prefix_computed": schema.StringAttribute{ + Computed: true, + Description: "The computed hostname prefix for the VM cluster.", + }, + "is_local_backup_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Indicates whether database backups to local Exadata storage is enabled for the VM cluster.", + }, + "is_sparse_disk_group_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Indicates whether the VM cluster is configured with a sparse disk group.", + }, + "last_update_history_entry_id": schema.StringAttribute{ + Computed: true, + Description: "The Oracle Cloud ID (OCID) of the last maintenance update history entry.", + }, + "license_model": schema.StringAttribute{ + CustomType: licenseModelType, + Computed: true, + Description: "The Oracle license model applied to the VM cluster.", + }, + "listener_port": schema.Int32Attribute{ + Computed: true, + Description: "The port number configured for the listener on the VM cluster.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of memory, in gigabytes (GB), that's allocated for the VM cluster.", + }, + "node_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of nodes in the VM cluster.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the VM cluster.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI Resource Anchor.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + Description: "The HTTPS link to the VM cluster in OCI.", + }, + "odb_network_id": schema.StringAttribute{ + Computed: true, + Description: "The ID of the ODB network.", + }, + "percent_progress": schema.Float64Attribute{ + Computed: true, + Description: "The amount of progress made on the current operation on the VM cluster,expressed as a percentage.", + }, + "scan_dns_name": schema.StringAttribute{ + Computed: true, + Description: "The FQDN of the DNS record for the Single Client Access Name (SCAN) IP\n" + + " addresses that are associated with the VM cluster.", + }, + "scan_dns_record_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster.", + }, + "scan_ip_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The OCID of the SCAN IP addresses that are associated with the VM cluster.", + }, + "shape": schema.StringAttribute{ + Computed: true, + Description: "The hardware model name of the Exadata infrastructure that's running the VM cluster.", + }, + "ssh_public_keys": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "he public key portion of one or more key pairs used for SSH access to the VM cluster.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: statusType, + Computed: true, + Description: "The status of the VM cluster.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the status of the VM cluster.", + }, + "storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster.", + }, + "system_version": schema.StringAttribute{ + Computed: true, + Description: "The operating system version of the image chosen for the VM cluster.", + }, + "timezone": schema.StringAttribute{ + Computed: true, + Description: "The time zone of the VM cluster.", + }, + "vip_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The virtual IP (VIP) addresses that are associated with the VM cluster.\n" + + "Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for\n" + + "each node in the VM cluster to enable failover. If one node fails, the VIP is\n" + + "reassigned to another active node in the cluster.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The time when the VM cluster was created.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModelType, + Computed: true, + Description: "The OCI model compute model used when you create or clone an instance: ECPU or\n" + + "OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on\n" + + "the number of cores elastically allocated from a pool of compute and storage\n" + + "servers. An OCPU is a legacy physical measure of compute resources. OCPUs are\n" + + "based on the physical core of a processor with hyper-threading enabled.", + }, + "data_collection_options": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[dataCollectionOptionsVMCDataSourceModel](ctx), + Description: "The set of diagnostic collection options enabled for the VM cluster.", + }, + "iorm_config_cache": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[exadataIormConfigVMCDataSourceModel](ctx), + Description: "The ExadataIormConfig cache details for the VM cluster.", + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (d *dataSourceCloudVmCluster) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dataSourceCloudVmClusterModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.GetCloudVmClusterInput{ + CloudVmClusterId: data.CloudVmClusterId.ValueStringPointer(), + } + out, err := conn.GetCloudVmCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudVmCluster, data.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + data.HostnamePrefixComputed = types.StringValue(*out.CloudVmCluster.Hostname) + resp.Diagnostics.Append(flex.Flatten(ctx, out.CloudVmCluster, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dataSourceCloudVmClusterModel struct { + framework.WithRegionModel + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + CloudVmClusterArn types.String `tfsdk:"arn"` + CloudVmClusterId types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DataCollectionOptions fwtypes.ListNestedObjectValueOf[dataCollectionOptionsVMCDataSourceModel] `tfsdk:"data_collection_options"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServers fwtypes.ListValueOf[types.String] `tfsdk:"db_servers"` + DiskRedundancy fwtypes.StringEnum[odbtypes.DiskRedundancy] `tfsdk:"disk_redundancy"` + DisplayName types.String `tfsdk:"display_name"` + Domain types.String `tfsdk:"domain"` + GiVersion types.String `tfsdk:"gi_version"` + HostnamePrefixComputed types.String `tfsdk:"hostname_prefix_computed" autoflex:",noflatten"` + IormConfigCache fwtypes.ListNestedObjectValueOf[exadataIormConfigVMCDataSourceModel] `tfsdk:"iorm_config_cache"` + IsLocalBackupEnabled types.Bool `tfsdk:"is_local_backup_enabled"` + IsSparseDiskGroupEnabled types.Bool `tfsdk:"is_sparse_disk_group_enabled"` + LastUpdateHistoryEntryId types.String `tfsdk:"last_update_history_entry_id"` + LicenseModel fwtypes.StringEnum[odbtypes.LicenseModel] `tfsdk:"license_model"` + ListenerPort types.Int32 `tfsdk:"listener_port"` + MemorySizeInGbs types.Int32 `tfsdk:"memory_size_in_gbs"` + NodeCount types.Int32 `tfsdk:"node_count"` + Ocid types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + PercentProgress types.Float64 `tfsdk:"percent_progress"` + ScanDnsName types.String `tfsdk:"scan_dns_name"` + ScanDnsRecordId types.String `tfsdk:"scan_dns_record_id"` + ScanIpIds fwtypes.ListValueOf[types.String] `tfsdk:"scan_ip_ids"` + Shape types.String `tfsdk:"shape"` + SshPublicKeys fwtypes.ListValueOf[types.String] `tfsdk:"ssh_public_keys"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + StorageSizeInGBs types.Int32 `tfsdk:"storage_size_in_gbs"` + SystemVersion types.String `tfsdk:"system_version"` + Timezone types.String `tfsdk:"timezone"` + VipIds fwtypes.ListValueOf[types.String] `tfsdk:"vip_ids"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + Tags tftags.Map `tfsdk:"tags"` +} + +type dataCollectionOptionsVMCDataSourceModel struct { + IsDiagnosticsEventsEnabled types.Bool `tfsdk:"is_diagnostics_events_enabled"` + IsHealthMonitoringEnabled types.Bool `tfsdk:"is_health_monitoring_enabled"` + IsIncidentLogsEnabled types.Bool `tfsdk:"is_incident_logs_enabled"` +} + +type exadataIormConfigVMCDataSourceModel struct { + DbPlans fwtypes.ListNestedObjectValueOf[dbIormConfigVMCDatasourceModel] `tfsdk:"db_plans"` + LifecycleDetails types.String `tfsdk:"lifecycle_details"` + LifecycleState fwtypes.StringEnum[odbtypes.IormLifecycleState] `tfsdk:"lifecycle_state"` + Objective fwtypes.StringEnum[odbtypes.Objective] `tfsdk:"objective"` +} + +type dbIormConfigVMCDatasourceModel struct { + DbName types.String `tfsdk:"db_name"` + FlashCacheLimit types.String `tfsdk:"flash_cache_limit"` + Share types.Int32 `tfsdk:"share"` +} diff --git a/internal/service/odb/cloud_vm_cluster_data_source_test.go b/internal/service/odb/cloud_vm_cluster_data_source_test.go new file mode 100644 index 000000000000..10f9988ca0cb --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster_data_source_test.go @@ -0,0 +1,188 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type cloudVmClusterDSTest struct { + vmClusterDisplayNamePrefix string + exaInfraDisplayNamePrefix string + odbNetDisplayNamePrefix string +} + +var vmClusterTestDS = cloudVmClusterDSTest{ + vmClusterDisplayNamePrefix: "Ofake-vmc", + exaInfraDisplayNamePrefix: "Ofake-exa-infra", + odbNetDisplayNamePrefix: "odb-net", +} + +func TestAccODBCloudVmClusterDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudvmcluster odbtypes.CloudVmCluster + odbNetRName := sdkacctest.RandomWithPrefix(vmClusterTestDS.odbNetDisplayNamePrefix) + exaInfraRName := sdkacctest.RandomWithPrefix(vmClusterTestDS.exaInfraDisplayNamePrefix) + vmcDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestDS.vmClusterDisplayNamePrefix) + dataSourceName := "data.aws_odb_cloud_vm_cluster.test" + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestDS.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestDS.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestDS.cloudVMClusterConfig(odbNetRName, exaInfraRName, vmcDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestDS.testAccCheckCloudVmClusterExists(ctx, dataSourceName, &cloudvmcluster), + resource.TestCheckResourceAttr(dataSourceName, names.AttrDisplayName, vmcDisplayName), + ), + }, + }, + }) +} + +func (cloudVmClusterDSTest) testAccCheckCloudVmClusterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_vm_cluster" { + continue + } + _, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (cloudVmClusterDSTest) testAccCheckCloudVmClusterExists(ctx context.Context, name string, cloudvmcluster *odbtypes.CloudVmCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not found")) + } + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not set")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + *cloudvmcluster = *resp + return nil + } +} + +func (cloudVmClusterDSTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudVmClustersInput{} + _, err := conn.ListCloudVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (cloudVmClusterDSTest) cloudVMClusterConfig(odbNet, exaInfra, displayName, sshKey string) string { + dsTfCodeVmCluster := fmt.Sprintf(` + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + } + +} + +data "aws_odb_cloud_vm_cluster" "test" { + id = aws_odb_cloud_vm_cluster.test.id +} +`, odbNet, exaInfra, displayName, sshKey) + return dsTfCodeVmCluster +} diff --git a/internal/service/odb/cloud_vm_cluster_test.go b/internal/service/odb/cloud_vm_cluster_test.go new file mode 100644 index 000000000000..d45d899ffc26 --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster_test.go @@ -0,0 +1,561 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type cloudVmClusterResourceTest struct { + vmClusterDisplayNamePrefix string + exaInfraDisplayNamePrefix string + odbNetDisplayNamePrefix string +} + +var vmClusterTestEntity = cloudVmClusterResourceTest{ + vmClusterDisplayNamePrefix: "Ofake-vmc", + exaInfraDisplayNamePrefix: "Ofake-exa-infra", + odbNetDisplayNamePrefix: "odb-net", +} + +func TestAccODBCloudVmCluster_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster odbtypes.CloudVmCluster + vmcDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resourceName := "aws_odb_cloud_vm_cluster.test" + basicConfig, _ := vmClusterTestEntity.testAccCloudVmClusterConfigBasic(vmcDisplayName, publicKey) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: basicConfig, + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_allParams(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster odbtypes.CloudVmCluster + vmcClusterDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resourceName := "aws_odb_cloud_vm_cluster.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestEntity.cloudVmClusterWithAllParameters(vmcClusterDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_taggingTest(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster1 odbtypes.CloudVmCluster + var cloudvmcluster2 odbtypes.CloudVmCluster + vmcDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + resourceName := "aws_odb_cloud_vm_cluster.test" + + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + vmcNoTag, vmcWithTag := vmClusterTestEntity.testAccCloudVmClusterConfigBasic(vmcDisplayName, publicKey) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmcNoTag, + Check: resource.ComposeAggregateTestCheckFunc( + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + return nil + }), + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: vmcWithTag, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + resource.TestCheckResourceAttr(resourceName, "tags.foo", "bar"), + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(cloudvmcluster1.CloudVmClusterId), *(cloudvmcluster2.CloudVmClusterId)) != 0 { + return errors.New("Should not create a new cloud vm cluster for tag update") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_real(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster1 odbtypes.CloudVmCluster + var cloudvmcluster2 odbtypes.CloudVmCluster + vmcDisplayName := sdkacctest.RandomWithPrefix("tf-real") + resourceName := "aws_odb_cloud_vm_cluster.test" + + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + vmcWithoutTag, vmcWithTag := vmClusterTestEntity.cloudVmClusterReal(vmcDisplayName, publicKey) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmcWithoutTag, + Check: resource.ComposeAggregateTestCheckFunc( + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + return nil + }), + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: vmcWithTag, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(cloudvmcluster1.CloudVmClusterId), *(cloudvmcluster2.CloudVmClusterId)) != 0 { + return errors.New("Should not create a new cloud vm cluster for tag update") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster odbtypes.CloudVmCluster + vmClusterDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + resourceName := "aws_odb_cloud_vm_cluster.test" + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + vmcBasicConfig, _ := vmClusterTestEntity.testAccCloudVmClusterConfigBasic(vmClusterDisplayName, publicKey) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmcBasicConfig, + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfodb.ResourceCloudVmCluster, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func (cloudVmClusterResourceTest) testAccCheckCloudVmClusterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_vm_cluster" { + continue + } + _, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (cloudVmClusterResourceTest) testAccCheckCloudVmClusterExists(ctx context.Context, name string, cloudvmcluster *odbtypes.CloudVmCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not found")) + } + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not set")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + *cloudvmcluster = *resp + return nil + } +} + +func (cloudVmClusterResourceTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudVmClustersInput{} + _, err := conn.ListCloudVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (cloudVmClusterResourceTest) testAccCloudVmClusterConfigBasic(vmClusterDisplayName, sshKey string) (string, string) { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.exaInfraDisplayNamePrefix) + odbNetDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.odbNetDisplayNamePrefix) + exaInfra := vmClusterTestEntity.exaInfra(exaInfraDisplayName) + odbNet := vmClusterTestEntity.oracleDBNetwork(odbNetDisplayName) + vmcNoTag := fmt.Sprintf(` + +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + + vmcWithTag := fmt.Sprintf(` + +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + "foo" = "bar" + } + +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + return vmcNoTag, vmcWithTag +} + +func (cloudVmClusterResourceTest) cloudVmClusterWithAllParameters(vmClusterDisplayName, sshKey string) string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.exaInfraDisplayNamePrefix) + odbNetDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.odbNetDisplayNamePrefix) + exaInfra := vmClusterTestEntity.exaInfra(exaInfraDisplayName) + odbNet := vmClusterTestEntity.oracleDBNetwork(odbNetDisplayName) + + res := fmt.Sprintf(` + +%s + +%s + + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + cluster_name = "julia-13" + timezone = "UTC" + scan_listener_port_tcp = 1521 + tags = { + "env" = "dev" + } + data_collection_options { + is_diagnostics_events_enabled = true + is_health_monitoring_enabled = true + is_incident_logs_enabled = true + } +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + return res +} + +func (cloudVmClusterResourceTest) exaInfra(rName string) string { + resource := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +`, rName) + return resource +} + +func (cloudVmClusterResourceTest) oracleDBNetwork(rName string) string { + resource := fmt.Sprintf(` +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} +`, rName) + return resource +} + +func (cloudVmClusterResourceTest) cloudVmClusterReal(vmClusterDisplayName, sshKey string) (string, string) { + exaInfraDisplayName := sdkacctest.RandomWithPrefix("tf-real") + odbNetDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.odbNetDisplayNamePrefix) + exaInfra := vmClusterTestEntity.exaInfra(exaInfraDisplayName) + odbNet := vmClusterTestEntity.oracleDBNetwork(odbNetDisplayName) + vmClusterResourceNoTag := fmt.Sprintf(` + +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 16 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + + vmClusterResourceWithTag := fmt.Sprintf(` + +%s + +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 16 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + } + +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + + return vmClusterResourceNoTag, vmClusterResourceWithTag +} diff --git a/internal/service/odb/cloud_vm_clusters_data_source.go b/internal/service/odb/cloud_vm_clusters_data_source.go new file mode 100644 index 000000000000..0345a9c4b6c6 --- /dev/null +++ b/internal/service/odb/cloud_vm_clusters_data_source.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_cloud_vm_clusters", name="Cloud Vm Clusters") +func newDataSourceCloudVmClustersList(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudVmClustersList{}, nil +} + +const ( + DSNameCloudVmClustersList = "Cloud Vm Clusters List Data Source" +) + +type dataSourceCloudVmClustersList struct { + framework.DataSourceWithModel[dataSourceCloudVmClustersListModel] +} + +func (d *dataSourceCloudVmClustersList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cloud_vm_clusters": schema.ListAttribute{ + Computed: true, + Description: "List of Cloud VM Clusters. It returns only basic information about the cloud VM clusters.", + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudVmClusterSummary](ctx), + }, + }, + } +} + +// Data sources only have a read method. +func (d *dataSourceCloudVmClustersList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dataSourceCloudVmClustersListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + out, err := ListCloudVmClusters(ctx, conn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudVmClustersList, "", err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func ListCloudVmClusters(ctx context.Context, conn *odb.Client) (*odb.ListCloudVmClustersOutput, error) { + var out odb.ListCloudVmClustersOutput + paginator := odb.NewListCloudVmClustersPaginator(conn, &odb.ListCloudVmClustersInput{}) + for paginator.HasMorePages() { + output, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + out.CloudVmClusters = append(out.CloudVmClusters, output.CloudVmClusters...) + } + return &out, nil +} + +type dataSourceCloudVmClustersListModel struct { + framework.WithRegionModel + CloudVmClusters fwtypes.ListNestedObjectValueOf[cloudVmClusterSummary] `tfsdk:"cloud_vm_clusters"` +} + +type cloudVmClusterSummary struct { + CloudAutonomousVmClusterId types.String `tfsdk:"id"` + CloudVmClusterArn types.String `tfsdk:"arn"` + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + OciUrl types.String `tfsdk:"oci_url"` + Ocid types.String `tfsdk:"ocid"` + DisplayName types.String `tfsdk:"display_name"` +} diff --git a/internal/service/odb/cloud_vm_clusters_data_source_test.go b/internal/service/odb/cloud_vm_clusters_data_source_test.go new file mode 100644 index 000000000000..7e54d2d654d3 --- /dev/null +++ b/internal/service/odb/cloud_vm_clusters_data_source_test.go @@ -0,0 +1,81 @@ +//Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + +package odb_test + +import ( + "context" + "errors" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type listVMCListDSTest struct { +} + +func TestAccODBListVmClustersDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + var vmcListTest = listVMCListDSTest{} + var output odb.ListCloudVmClustersOutput + dataSourceName := "data.aws_odb_cloud_vm_clusters.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmcListTest.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: vmcListTest.basic(), + Check: resource.ComposeAggregateTestCheckFunc( + + resource.ComposeTestCheckFunc(func(s *terraform.State) error { + vmcListTest.count(ctx, dataSourceName, &output) + resource.TestCheckResourceAttr(dataSourceName, "cloud_autonomous_vm_clusters.#", strconv.Itoa(len(output.CloudVmClusters))) + return nil + }, + ), + ), + }, + }, + }) +} + +func (listVMCListDSTest) basic() string { + return `data "aws_odb_cloud_vm_clusters" "test" {}` +} + +func (listVMCListDSTest) count(ctx context.Context, name string, list *odb.ListCloudVmClustersOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameCloudVmClustersList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.ListCloudVmClusters(ctx, conn) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameCloudVmClustersList, rs.Primary.ID, err) + } + list.CloudVmClusters = resp.CloudVmClusters + return nil + } +} +func (listVMCListDSTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + _, err := tfodb.ListCloudVmClusters(ctx, conn) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/odb/db_node_data_source.go b/internal/service/odb/db_node_data_source.go new file mode 100644 index 000000000000..a5c81ebca947 --- /dev/null +++ b/internal/service/odb/db_node_data_source.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_db_node", name="Db Node") +func newDataSourceDBNode(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceDbNode{}, nil +} + +const ( + DSNameDBNode = "DB Node Data Source" +) + +type dataSourceDbNode struct { + framework.DataSourceWithModel[dbNodeDataSourceModel] +} + +func (d *dataSourceDbNode) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: schema.StringAttribute{ + Required: true, + }, + "cloud_vm_cluster_id": schema.StringAttribute{ + Required: true, + }, + names.AttrStatus: schema.StringAttribute{ + Computed: true, + CustomType: fwtypes.StringEnumType[odbtypes.ResourceStatus](), + Description: "The current status of the DB node.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the status of the DB node.", + }, + "additional_details": schema.StringAttribute{ + Computed: true, + Description: "Additional information about the planned maintenance.", + }, + "backup_ip_id": schema.StringAttribute{ + Computed: true, + Description: "The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node.", + }, + "backup_vnic2_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the second backup VNIC.", + }, + "backup_vnic_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the backup VNIC.", + }, + "cpu_core_count": schema.Int32Attribute{ + Computed: true, + Description: "Number of CPU cores enabled on the DB node.", + }, + "db_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of local node storage, in gigabytes (GBs), allocated on the DB node.", + }, + "db_server_id": schema.StringAttribute{ + Computed: true, + Description: "The unique identifier of the DB server that is associated with the DB node.", + }, + "db_system_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the DB system.", + }, + "fault_domain": schema.StringAttribute{ + Computed: true, + Description: "The name of the fault domain the instance is contained in.", + }, + "host_ip_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the host IP address that's associated with the DB node.", + }, + "hostname": schema.StringAttribute{ + Computed: true, + Description: "The host name for the DB node.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the DB node.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor for the DB node.", + }, + "maintenance_type": schema.StringAttribute{ + Computed: true, + CustomType: fwtypes.StringEnumType[odbtypes.DbNodeMaintenanceType](), + Description: "The type of database node maintenance. Either VMDB_REBOOT_MIGRATION or EXADBXS_REBOOT_MIGRATION.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The allocated memory in GBs on the DB node.", + }, + "software_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The size (in GB) of the block storage volume allocation for the DB system.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + Description: "The date and time when the DB node was created.", + }, + "time_maintenance_window_end": schema.StringAttribute{ + Computed: true, + Description: "End date and time of maintenance window.", + }, + "time_maintenance_window_start": schema.StringAttribute{ + Computed: true, + Description: "Start date and time of maintenance window.", + }, + "total_cpu_core_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores reserved on the DB node.", + }, + "vnic2_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the second VNIC.", + }, + "vnic_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the VNIC.", + }, + "private_ip_address": schema.StringAttribute{ + Computed: true, + Description: "The private IP address assigned to the DB node.", + }, + "floating_ip_address": schema.StringAttribute{ + Computed: true, + Description: "The floating IP address assigned to the DB node.", + }, + }, + } +} + +func (d *dataSourceDbNode) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dbNodeDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.GetDbNodeInput{ + DbNodeId: data.DbNodeId.ValueStringPointer(), + CloudVmClusterId: data.CloudVmClusterId.ValueStringPointer(), + } + out, err := conn.GetDbNode(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameDBNode, data.DbNodeId.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out.DbNode, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dbNodeDataSourceModel struct { + framework.WithRegionModel + CloudVmClusterId types.String `tfsdk:"cloud_vm_cluster_id"` + DbNodeId types.String `tfsdk:"id"` + DbNodeArn types.String `tfsdk:"arn"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + AdditionalDetails types.String `tfsdk:"additional_details"` + BackupIpId types.String `tfsdk:"backup_ip_id"` + BackupVnic2Id types.String `tfsdk:"backup_vnic2_id"` + BackupVnicId types.String `tfsdk:"backup_vnic_id"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_storage_size_in_gbs"` + DbServerId types.String `tfsdk:"db_server_id"` + DbSystemId types.String `tfsdk:"db_system_id"` + FaultDomain types.String `tfsdk:"fault_domain"` + HostIpId types.String `tfsdk:"host_ip_id"` + Hostname types.String `tfsdk:"hostname"` + Ocid types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + MaintenanceType fwtypes.StringEnum[odbtypes.DbNodeMaintenanceType] `tfsdk:"maintenance_type"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + SoftwareStorageSizeInGB types.Int32 `tfsdk:"software_storage_size_in_gbs"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + TimeMaintenanceWindowEnd types.String `tfsdk:"time_maintenance_window_end"` + TimeMaintenanceWindowStart types.String `tfsdk:"time_maintenance_window_start"` + TotalCpuCoreCount types.Int32 `tfsdk:"total_cpu_core_count"` + Vnic2Id types.String `tfsdk:"vnic2_id"` + VnicId types.String `tfsdk:"vnic_id"` + PrivateIpAddress types.String `tfsdk:"private_ip_address"` + FloatingIpAddress types.String `tfsdk:"floating_ip_address"` +} diff --git a/internal/service/odb/db_node_data_source_test.go b/internal/service/odb/db_node_data_source_test.go new file mode 100644 index 000000000000..ca90b6016b3b --- /dev/null +++ b/internal/service/odb/db_node_data_source_test.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type testDbNodeDataSourceTest struct { + exaDisplayNamePrefix string + oracleDBNetworkDisplayNamePrefix string + vmClusterDisplayNamePrefix string +} + +var dbNodeDataSourceTestEntity = testDbNodeDataSourceTest{ + exaDisplayNamePrefix: "Ofake-exa", + oracleDBNetworkDisplayNamePrefix: "odb-net", + vmClusterDisplayNamePrefix: "Ofake-vmc", +} + +// Acceptance test access AWS and cost money to run. +func TestAccODBDBNodeDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + var dbNode odb.GetDbNodeOutput + dataSourceName := "data.aws_odb_db_node.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: dbNodeDataSourceTestEntity.testAccCheckDBNodeDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: dbNodeDataSourceTestEntity.dbNodeDataSourceBasicConfig(publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + dbNodeDataSourceTestEntity.testAccCheckDBNodeExists(ctx, dataSourceName, &dbNode), + ), + }, + }, + }) +} + +func (testDbNodeDataSourceTest) testAccCheckDBNodeExists(ctx context.Context, name string, output *odb.GetDbNodeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBServer, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + var dbNodeId = rs.Primary.ID + var attributes = rs.Primary.Attributes + cloudVmClusterId := attributes["cloud_vm_cluster_id"] + input := odb.GetDbNodeInput{ + CloudVmClusterId: &cloudVmClusterId, + DbNodeId: &dbNodeId, + } + resp, err := conn.GetDbNode(ctx, &input) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBNode, rs.Primary.ID, err) + } + *output = *resp + return nil + } +} + +func (testDbNodeDataSourceTest) testAccCheckDBNodeDestroyed(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_vm_cluster" { + continue + } + err := dbNodeDataSourceTestEntity.findVmCluster(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServer, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServer, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (testDbNodeDataSourceTest) findVmCluster(ctx context.Context, conn *odb.Client, id string) error { + input := odb.GetCloudVmClusterInput{ + CloudVmClusterId: aws.String(id), + } + output, err := conn.GetCloudVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return err + } + if output == nil || output.CloudVmCluster == nil { + return tfresource.NewEmptyResultError(&input) + } + return nil +} + +func (testDbNodeDataSourceTest) dbNodeDataSourceBasicConfig(publicKey string) string { + vmClusterConfig := dbNodeDataSourceTestEntity.vmClusterBasicConfig(publicKey) + + return fmt.Sprintf(` +%s + +data "aws_odb_db_nodes" "test" { + cloud_vm_cluster_id = aws_odb_cloud_vm_cluster.test.id +} + +data "aws_odb_db_node" "test" { + id = data.aws_odb_db_nodes.test.db_nodes[0].id + cloud_vm_cluster_id = aws_odb_cloud_vm_cluster.test.id +} + +`, vmClusterConfig) +} + +func (testDbNodeDataSourceTest) vmClusterBasicConfig(publicKey string) string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(dbNodeDataSourceTestEntity.exaDisplayNamePrefix) + oracleDBNetDisplayName := sdkacctest.RandomWithPrefix(dbNodeDataSourceTestEntity.oracleDBNetworkDisplayNamePrefix) + vmcDisplayName := sdkacctest.RandomWithPrefix(dbNodeDataSourceTestEntity.vmClusterDisplayNamePrefix) + dsTfCodeVmCluster := fmt.Sprintf(` + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[2]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + } + +} + +`, oracleDBNetDisplayName, exaInfraDisplayName, vmcDisplayName, publicKey) + return dsTfCodeVmCluster +} diff --git a/internal/service/odb/db_nodes_data_source.go b/internal/service/odb/db_nodes_data_source.go new file mode 100644 index 000000000000..50ce75ac6c29 --- /dev/null +++ b/internal/service/odb/db_nodes_data_source.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_db_nodes", name="Db Nodes") +func newDataSourceDBNodes(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceDbNodesList{}, nil +} + +const ( + DSNameDBNodesList = "DB Nodes List Data Source" +) + +type dataSourceDbNodesList struct { + framework.DataSourceWithModel[dbNodesListDataSourceModel] +} + +func (d *dataSourceDbNodesList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cloud_vm_cluster_id": schema.StringAttribute{ + Required: true, + Description: "Id of the cloud VM cluster. The unique identifier of the VM cluster.", + }, + "db_nodes": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dbNodeForDbNodesListDataSourceModel](ctx), + Computed: true, + Description: "The list of DB nodes along with their properties.", + }, + }, + } +} + +func (d *dataSourceDbNodesList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dbNodesListDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.ListDbNodesInput{ + CloudVmClusterId: data.CloudVmClusterId.ValueStringPointer(), + } + var out odb.ListDbNodesOutput + paginator := odb.NewListDbNodesPaginator(conn, &input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameDBNodesList, data.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + out.DbNodes = append(out.DbNodes, page.DbNodes...) + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dbNodesListDataSourceModel struct { + framework.WithRegionModel + CloudVmClusterId types.String `tfsdk:"cloud_vm_cluster_id"` + DbNodes fwtypes.ListNestedObjectValueOf[dbNodeForDbNodesListDataSourceModel] `tfsdk:"db_nodes"` +} + +type dbNodeForDbNodesListDataSourceModel struct { + AdditionalDetails types.String `tfsdk:"additional_details"` + BackupIpId types.String `tfsdk:"backup_ip_id"` + BackupVnic2Id types.String `tfsdk:"backup_vnic2_id"` + BackupVnicId types.String `tfsdk:"backup_vnic_id"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + DbNodeArn types.String `tfsdk:"arn"` + DbNodeId types.String `tfsdk:"id"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size"` + DbServerId types.String `tfsdk:"db_server_id"` + DbSystemId types.String `tfsdk:"db_system_id"` + FaultDomain types.String `tfsdk:"fault_domain"` + HostIpId types.String `tfsdk:"host_ip_id"` + Hostname types.String `tfsdk:"hostname"` + MaintenanceType fwtypes.StringEnum[odbtypes.DbNodeMaintenanceType] `tfsdk:"maintenance_type"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + Ocid types.String `tfsdk:"ocid"` + SoftwareStorageSizeInGB types.Int32 `tfsdk:"software_storage_size"` + Status fwtypes.StringEnum[odbtypes.DbNodeResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + TimeMaintenanceWindowEnd types.String `tfsdk:"time_maintenance_window_end"` + TimeMaintenanceWindowStart types.String `tfsdk:"time_maintenance_window_start"` + TotalCpuCoreCount types.Int32 `tfsdk:"total_cpu_core_count"` + Vnic2Id types.String `tfsdk:"vnic2_id"` + VnicId types.String `tfsdk:"vnic_id"` +} diff --git a/internal/service/odb/db_nodes_data_source_test.go b/internal/service/odb/db_nodes_data_source_test.go new file mode 100644 index 000000000000..eda223f69cb9 --- /dev/null +++ b/internal/service/odb/db_nodes_data_source_test.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type dbNodesListDataSourceTest struct { + exadataInfraDisplayNamePrefix string + oracleDBNetworkDisplayNamePrefix string + vmClusterDisplayNamePrefix string +} + +var dbNodesListDataSourceTestEntity = dbNodesListDataSourceTest{ + exadataInfraDisplayNamePrefix: "Ofake-exa", + oracleDBNetworkDisplayNamePrefix: "odbn", + vmClusterDisplayNamePrefix: "Ofake-vmc", +} + +// Acceptance test access AWS and cost money to run. +func TestAccODBDBNodesListDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + var dbNodesList odb.ListDbNodesOutput + dbNodesListsDataSourceName := "data.aws_odb_db_nodes.test" + vmClusterListsResourceName := "aws_odb_cloud_vm_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: dbNodesListDataSourceTestEntity.testAccCheckDBNodesDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: dbNodesListDataSourceTestEntity.basicDBNodesListDataSource(publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + dbNodesListDataSourceTestEntity.testAccCheckDBNodesListExists(ctx, vmClusterListsResourceName, &dbNodesList), + resource.TestCheckResourceAttr(dbNodesListsDataSourceName, "aws_odb_db_nodes.db_nodes.#", strconv.Itoa(len(dbNodesList.DbNodes))), + ), + }, + }, + }) +} + +func (dbNodesListDataSourceTest) testAccCheckDBNodesListExists(ctx context.Context, name string, output *odb.ListDbNodesOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBNodesList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + var vmClusterId = &rs.Primary.ID + input := odb.ListDbNodesInput{ + CloudVmClusterId: vmClusterId, + } + lisOfDbNodes := odb.ListDbNodesOutput{} + paginator := odb.NewListDbNodesPaginator(conn, &input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return err + } + lisOfDbNodes.DbNodes = append(lisOfDbNodes.DbNodes, page.DbNodes...) + } + *output = lisOfDbNodes + return nil + } +} + +func (dbNodesListDataSourceTest) testAccCheckDBNodesDestroyed(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_vm_cluster" { + continue + } + _, err := dbNodesListDataSourceTestEntity.findVmCluster(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServersList, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServersList, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (dbNodesListDataSourceTest) findVmCluster(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudVmCluster, error) { + input := odb.GetCloudVmClusterInput{ + CloudVmClusterId: aws.String(id), + } + output, err := conn.GetCloudVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + if output == nil || output.CloudVmCluster == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + return output.CloudVmCluster, nil +} + +func (dbNodesListDataSourceTest) basicDBNodesListDataSource(publicKey string) string { + vmCluster := dbNodesListDataSourceTestEntity.vmClusterBasic(publicKey) + return fmt.Sprintf(` + + %s + +data "aws_odb_db_nodes" "test" { + cloud_vm_cluster_id = aws_odb_cloud_vm_cluster.test.id +} +`, vmCluster) +} + +func (dbNodesListDataSourceTest) vmClusterBasic(publicKey string) string { + odbNetRName := sdkacctest.RandomWithPrefix(dbNodesListDataSourceTestEntity.oracleDBNetworkDisplayNamePrefix) + exaInfraRName := sdkacctest.RandomWithPrefix(dbNodesListDataSourceTestEntity.exadataInfraDisplayNamePrefix) + vmcDisplayName := sdkacctest.RandomWithPrefix(dbNodesListDataSourceTestEntity.vmClusterDisplayNamePrefix) + return fmt.Sprintf(` + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[2]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + } + +} +`, odbNetRName, exaInfraRName, vmcDisplayName, publicKey) +} diff --git a/internal/service/odb/db_server_data_source.go b/internal/service/odb/db_server_data_source.go new file mode 100644 index 000000000000..3d8d2f19db78 --- /dev/null +++ b/internal/service/odb/db_server_data_source.go @@ -0,0 +1,194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_db_server", name="Db Server") +func newDataSourceDBServer(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceDbServer{}, nil +} + +const ( + DSNameDBServer = "DB Server Data Source" +) + +type dataSourceDbServer struct { + framework.DataSourceWithModel[dbServerDataSourceModel] +} + +func (d *dataSourceDbServer) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrID: schema.StringAttribute{ + Description: "The identifier of the the database server.", + Required: true, + }, + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Description: "The identifier of the database server to retrieve information about.", + Required: true, + }, + "autonomous_virtual_machine_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The list of unique identifiers for the Autonomous VMs associated with this database server.", + }, + "autonomous_vm_cluster_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The OCID of the autonomous VM clusters that are associated with the database server.", + }, + "compute_model": schema.StringAttribute{ + Computed: true, + CustomType: fwtypes.StringEnumType[odbtypes.ComputeModel](), + Description: " The compute model of the database server.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[odbtypes.ResourceStatus](), + Computed: true, + Description: "The status of the database server.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the current status of the database server.", + }, + "cpu_core_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of CPU cores enabled on the database server.", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The allocated local node storage in GBs on the database server.", + }, + "db_server_patching_details": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dbNodePatchingDetailsDbServerDataSourceModel](ctx), + Computed: true, + Description: "The scheduling details for the quarterly maintenance window. Patching and\n" + + "system updates take place during the maintenance window.", + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + Description: "The display name of the database server.", + }, + "exadata_infrastructure_id": schema.StringAttribute{ + Computed: true, + Description: "The exadata infrastructure ID of the database server.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the database server to retrieve information about.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor.", + }, + "max_cpu_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of CPU cores available.", + }, + "max_db_node_storage_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total local node storage available in GBs.", + }, + "max_memory_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The total memory available in GBs.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The allocated memory in GBs on the database server.", + }, + "shape": schema.StringAttribute{ + Computed: true, + Description: "The shape of the database server. The shape determines the amount of CPU, " + + "storage, and memory resources available.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The date and time when the database server was created.", + }, + "vm_cluster_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The OCID of the VM clusters that are associated with the database server.", + }, + }, + } +} + +func (d *dataSourceDbServer) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dbServerDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.GetDbServerInput{ + DbServerId: data.DbServerID.ValueStringPointer(), + CloudExadataInfrastructureId: data.CloudExadataInfrastructureID.ValueStringPointer(), + } + out, err := conn.GetDbServer(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameDBServer, data.DbServerID.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out.DbServer, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dbServerDataSourceModel struct { + framework.WithRegionModel + DbServerID types.String `tfsdk:"id"` + CloudExadataInfrastructureID types.String `tfsdk:"cloud_exadata_infrastructure_id"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServerPatchingDetails fwtypes.ListNestedObjectValueOf[dbNodePatchingDetailsDbServerDataSourceModel] `tfsdk:"db_server_patching_details"` + DisplayName types.String `tfsdk:"display_name"` + ExadataInfrastructureId types.String `tfsdk:"exadata_infrastructure_id"` + OCID types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + MaxCpuCount types.Int32 `tfsdk:"max_cpu_count"` + MaxDbNodeStorageInGBs types.Int32 `tfsdk:"max_db_node_storage_in_gbs"` + MaxMemoryInGBs types.Int32 `tfsdk:"max_memory_in_gbs"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + Shape types.String `tfsdk:"shape"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at" ` + VmClusterIds fwtypes.ListOfString `tfsdk:"vm_cluster_ids"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + AutonomousVmClusterIds fwtypes.ListOfString `tfsdk:"autonomous_vm_cluster_ids"` + AutonomousVirtualMachineIds fwtypes.ListOfString `tfsdk:"autonomous_virtual_machine_ids"` +} + +type dbNodePatchingDetailsDbServerDataSourceModel struct { + EstimatedPatchDuration types.Int32 `tfsdk:"estimated_patch_duration"` + PatchingStatus types.String `tfsdk:"patching_status"` + TimePatchingEnded types.String `tfsdk:"time_patching_ended"` + TimePatchingStarted types.String `tfsdk:"time_patching_started"` +} diff --git a/internal/service/odb/db_server_data_source_test.go b/internal/service/odb/db_server_data_source_test.go new file mode 100644 index 000000000000..329eed1c692b --- /dev/null +++ b/internal/service/odb/db_server_data_source_test.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type testDbServerDataSourceTest struct { + exaDisplayNamePrefix string +} + +var dbServerDataSourceTestEntity = testDbServerDataSourceTest{ + exaDisplayNamePrefix: "Ofake-exa", +} + +// Acceptance test access AWS and cost money to run. +func TestAccODBDBServerDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var dbServer odb.GetDbServerOutput + + dataSourceName := "data.aws_odb_db_server.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: dbServerDataSourceTestEntity.testAccCheckDBServersDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: dbServerDataSourceTestEntity.basicDBServerDataSourceConfig(), + Check: resource.ComposeAggregateTestCheckFunc( + dbServerDataSourceTestEntity.testAccCheckDBServerExists(ctx, dataSourceName, &dbServer), + ), + }, + }, + }) +} + +func (testDbServerDataSourceTest) testAccCheckDBServerExists(ctx context.Context, name string, output *odb.GetDbServerOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBServer, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + var dbServerId = rs.Primary.ID + var attributes = rs.Primary.Attributes + exaId := attributes["exadata_infrastructure_id"] + resp, err := dbServerDataSourceTestEntity.findDBServer(ctx, conn, &dbServerId, &exaId) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBServer, rs.Primary.ID, err) + } + *output = *resp + return nil + } +} + +func (testDbServerDataSourceTest) testAccCheckDBServersDestroyed(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_exadata_infrastructure" { + continue + } + err := dbServerDataSourceTestEntity.findExaInfra(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServer, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServer, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (testDbServerDataSourceTest) findExaInfra(ctx context.Context, conn *odb.Client, id string) error { + input := odb.GetCloudExadataInfrastructureInput{ + CloudExadataInfrastructureId: aws.String(id), + } + out, err := conn.GetCloudExadataInfrastructure(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return err + } + if out == nil || out.CloudExadataInfrastructure == nil { + return tfresource.NewEmptyResultError(&input) + } + return nil +} + +func (testDbServerDataSourceTest) findDBServer(ctx context.Context, conn *odb.Client, dbServerId *string, exaInfraId *string) (*odb.GetDbServerOutput, error) { + inputWithExaId := odb.GetDbServerInput{ + DbServerId: dbServerId, + CloudExadataInfrastructureId: exaInfraId, + } + output, err := conn.GetDbServer(ctx, &inputWithExaId) + if err != nil { + return nil, err + } + return output, nil +} + +func (testDbServerDataSourceTest) basicDBServerDataSourceConfig() string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(dbServersListDataSourceTestEntity.displayNamePrefix) + exaInfra := dbServerDataSourceTestEntity.exaInfra(exaInfraDisplayName) + + return fmt.Sprintf(` +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +data "aws_odb_db_server" "test" { + id = data.aws_odb_db_servers.test.db_servers[0].id + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} +`, exaInfra) +} + +func (testDbServerDataSourceTest) exaInfra(rName string) string { + exaRes := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = "%[1]s" + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +`, rName) + return exaRes +} diff --git a/internal/service/odb/db_servers_data_source.go b/internal/service/odb/db_servers_data_source.go new file mode 100644 index 000000000000..ab71f19f74cc --- /dev/null +++ b/internal/service/odb/db_servers_data_source.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_db_servers", name="Db Servers") +func newDataSourceDBServers(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceDbServersList{}, nil +} + +const ( + DSNameDBServersList = "DB Servers List Data Source" +) + +type dataSourceDbServersList struct { + framework.DataSourceWithModel[dbServersListDataSourceModel] +} + +func (d *dataSourceDbServersList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Required: true, + Description: "The cloud exadata infrastructure ID. Mandatory field.", + }, + "db_servers": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dbServerForDbServersListDataSourceModel](ctx), + Computed: true, + Description: "List of database servers associated with cloud_exadata_infrastructure_id.", + }, + }, + } +} + +// Data sources only have a read method. +func (d *dataSourceDbServersList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dbServersListDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.ListDbServersInput{} + if !data.CloudExadataInfrastructureId.IsNull() && !data.CloudExadataInfrastructureId.IsUnknown() { + input.CloudExadataInfrastructureId = data.CloudExadataInfrastructureId.ValueStringPointer() + } + paginator := odb.NewListDbServersPaginator(conn, &input) + var out odb.ListDbServersOutput + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameDBServersList, "", err), + err.Error(), + ) + } + out.DbServers = append(out.DbServers, page.DbServers...) + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dbServersListDataSourceModel struct { + framework.WithRegionModel + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + DbServers fwtypes.ListNestedObjectValueOf[dbServerForDbServersListDataSourceModel] `tfsdk:"db_servers"` +} + +type dbServerForDbServersListDataSourceModel struct { + AutonomousVirtualMachineIds fwtypes.ListOfString `tfsdk:"autonomous_virtual_machine_ids"` + AutonomousVmClusterIds fwtypes.ListOfString `tfsdk:"autonomous_vm_cluster_ids"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServerId types.String `tfsdk:"id"` + DbServerPatchingDetails fwtypes.ListNestedObjectValueOf[dbNodePatchingDetailsForDbServersListDataSourceModel] `tfsdk:"db_server_patching_details"` + DisplayName types.String `tfsdk:"display_name"` + ExadataInfrastructureId types.String `tfsdk:"exadata_infrastructure_id"` + MaxCpuCount types.Int32 `tfsdk:"max_cpu_count"` + MaxDbNodeStorageInGBs types.Int32 `tfsdk:"max_db_node_storage_in_gbs"` + MaxMemoryInGBs types.Int32 `tfsdk:"max_memory_in_gbs"` + MemorySizeInGBs types.Int32 `tfsdk:"memory_size_in_gbs"` + OCID types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + Shape types.String `tfsdk:"shape"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + VmClusterIds fwtypes.ListOfString `tfsdk:"vm_cluster_ids"` +} + +type dbNodePatchingDetailsForDbServersListDataSourceModel struct { + EstimatedPatchDuration types.Int32 `tfsdk:"estimated_patch_duration"` + PatchingStatus fwtypes.StringEnum[odbtypes.DbServerPatchingStatus] `tfsdk:"patching_status"` + TimePatchingEnded types.String `tfsdk:"time_patching_ended"` + TimePatchingStarted types.String `tfsdk:"time_patching_started"` +} diff --git a/internal/service/odb/db_servers_data_source_test.go b/internal/service/odb/db_servers_data_source_test.go new file mode 100644 index 000000000000..721dbd046642 --- /dev/null +++ b/internal/service/odb/db_servers_data_source_test.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type testDbServersListDataSource struct { + displayNamePrefix string +} + +var dbServersListDataSourceTestEntity = testDbServersListDataSource{ + displayNamePrefix: "Ofake-exa", +} + +// Acceptance test access AWS and cost money to run. +func TestAccODBDBServersListDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var dbServersList odb.ListDbServersOutput + dataSourceName := "data.aws_odb_db_servers.test" + exaInfraResourceName := "aws_odb_cloud_exadata_infrastructure.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: dbServersListDataSourceTestEntity.testAccCheckDBServersDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: dbServersListDataSourceTestEntity.testAccDBServersListDataSourceConfigBasic(), + Check: resource.ComposeAggregateTestCheckFunc( + dbServersListDataSourceTestEntity.testAccCheckDBServersListExists(ctx, exaInfraResourceName, &dbServersList), + resource.TestCheckResourceAttr(dataSourceName, "aws_odb_db_servers.db_servers.#", strconv.Itoa(len(dbServersList.DbServers))), + ), + }, + }, + }) +} + +func (testDbServersListDataSource) testAccCheckDBServersListExists(ctx context.Context, name string, output *odb.ListDbServersOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBServersList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + var exaInfraId = &rs.Primary.ID + + resp, err := dbServersListDataSourceTestEntity.findDBServersList(ctx, conn, exaInfraId) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameDBServersList, rs.Primary.ID, err) + } + *output = *resp + return nil + } +} + +func (testDbServersListDataSource) testAccCheckDBServersDestroyed(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_exadata_infrastructure" { + continue + } + _, err := dbServersListDataSourceTestEntity.findExaInfra(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServersList, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameDBServersList, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (testDbServersListDataSource) findExaInfra(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudExadataInfrastructure, error) { + input := odb.GetCloudExadataInfrastructureInput{ + CloudExadataInfrastructureId: aws.String(id), + } + out, err := conn.GetCloudExadataInfrastructure(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + if out == nil || out.CloudExadataInfrastructure == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + return out.CloudExadataInfrastructure, nil +} + +func (testDbServersListDataSource) findDBServersList(ctx context.Context, conn *odb.Client, exaInfraId *string) (*odb.ListDbServersOutput, error) { + inputWithExaId := odb.ListDbServersInput{ + CloudExadataInfrastructureId: exaInfraId, + } + output, err := conn.ListDbServers(ctx, &inputWithExaId) + if err != nil { + return nil, err + } + return output, nil +} + +func (testDbServersListDataSource) testAccDBServersListDataSourceConfigBasic() string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(dbServersListDataSourceTestEntity.displayNamePrefix) + exaInfra := dbServersListDataSourceTestEntity.exaInfra(exaInfraDisplayName) + return fmt.Sprintf(` +%s + +data "aws_odb_db_servers" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} +`, exaInfra) +} + +func (testDbServersListDataSource) exaInfra(rName string) string { + exaRes := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +`, rName) + return exaRes +} diff --git a/internal/service/odb/db_system_shapes_data_source.go b/internal/service/odb/db_system_shapes_data_source.go new file mode 100644 index 000000000000..d559fdc9a435 --- /dev/null +++ b/internal/service/odb/db_system_shapes_data_source.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_db_system_shapes", name="Db System Shapes") +func newDataSourceDBSystemShapes(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceDBSystemShapesList{}, nil +} + +const ( + DSNameDBSystemShapesList = "Db System Shapes List Data Source" +) + +type dataSourceDBSystemShapesList struct { + framework.DataSourceWithModel[dbSystemShapesListDataSourceModel] +} + +func (d *dataSourceDBSystemShapesList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "availability_zone_id": schema.StringAttribute{ + Optional: true, + Description: "The physical ID of the AZ, for example, use1-az4. This ID persists across accounts.", + }, + "db_system_shapes": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[dbSystemShapeDataSourceModel](ctx), + Description: "The list of shapes and their properties. Information about a hardware system model (shape) that's available for an Exadata infrastructure." + + "The shape determines resources, such as CPU cores, memory, and storage, to allocate to the Exadata infrastructure.", + }, + }, + } +} + +func (d *dataSourceDBSystemShapesList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + + var data dbSystemShapesListDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.ListDbSystemShapesInput{} + if !data.AvailabilityZoneId.IsNull() && !data.AvailabilityZoneId.IsUnknown() { + input.AvailabilityZoneId = data.AvailabilityZoneId.ValueStringPointer() + } + paginator := odb.NewListDbSystemShapesPaginator(conn, &input) + var out odb.ListDbSystemShapesOutput + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameDBSystemShapesList, "", err), + err.Error(), + ) + return + } + + if page != nil && len(page.DbSystemShapes) > 0 { + out.DbSystemShapes = append(out.DbSystemShapes, page.DbSystemShapes...) + } + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dbSystemShapesListDataSourceModel struct { + framework.WithRegionModel + AvailabilityZoneId types.String `tfsdk:"availability_zone_id"` + DbSystemShapes fwtypes.ListNestedObjectValueOf[dbSystemShapeDataSourceModel] `tfsdk:"db_system_shapes"` +} + +type dbSystemShapeDataSourceModel struct { + AvailableCoreCount types.Int32 `tfsdk:"available_core_count"` + AvailableCoreCountPerNode types.Int32 `tfsdk:"available_core_count_per_node"` + AvailableDataStorageInTBs types.Int32 `tfsdk:"available_data_storage_in_tbs"` + AvailableDataStoragePerServerInTBs types.Int32 `tfsdk:"available_data_storage_per_server_in_tbs"` + AvailableDbNodePerNodeInGBs types.Int32 `tfsdk:"available_db_node_per_node_in_gbs"` + AvailableDbNodeStorageInGBs types.Int32 `tfsdk:"available_db_node_storage_in_gbs"` + AvailableMemoryInGBs types.Int32 `tfsdk:"available_memory_in_gbs"` + AvailableMemoryPerNodeInGBs types.Int32 `tfsdk:"available_memory_per_node_in_gbs"` + CoreCountIncrement types.Int32 `tfsdk:"core_count_increment"` + MaxStorageCount types.Int32 `tfsdk:"max_storage_count"` + MaximumNodeCount types.Int32 `tfsdk:"maximum_node_count"` + MinCoreCountPerNode types.Int32 `tfsdk:"min_core_count_per_node"` + MinDataStorageInTBs types.Int32 `tfsdk:"min_data_storage_in_tbs"` + MinDbNodeStoragePerNodeInGBs types.Int32 `tfsdk:"min_db_node_storage_per_node_in_gbs"` + MinMemoryPerNodeInGBs types.Int32 `tfsdk:"min_memory_per_node_in_gbs"` + MinStorageCount types.Int32 `tfsdk:"min_storage_count"` + MinimumCoreCount types.Int32 `tfsdk:"minimum_core_count"` + MinimumNodeCount types.Int32 `tfsdk:"minimum_node_count"` + Name types.String `tfsdk:"name"` + RuntimeMinimumCoreCount types.Int32 `tfsdk:"runtime_minimum_core_count"` + ShapeFamily types.String `tfsdk:"shape_family"` + ShapeType types.String `tfsdk:"shape_type"` +} diff --git a/internal/service/odb/db_system_shapes_data_source_test.go b/internal/service/odb/db_system_shapes_data_source_test.go new file mode 100644 index 000000000000..deca70e80397 --- /dev/null +++ b/internal/service/odb/db_system_shapes_data_source_test.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccODBDBSystemShapesListDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + dataSourceName := "data.aws_odb_db_system_shapes.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: basicConfigDBSystemShapeDataSource("use1-az6"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "db_system_shapes.#", "2"), + ), + }, + }, + }) +} + +func basicConfigDBSystemShapeDataSource(availabilityZoneId string) string { + return fmt.Sprintf(` +data "aws_odb_db_system_shapes" "test"{ + availability_zone_id = %[1]q +} +`, availabilityZoneId) +} diff --git a/internal/service/odb/exports_test.go b/internal/service/odb/exports_test.go new file mode 100644 index 000000000000..182c9d8606fa --- /dev/null +++ b/internal/service/odb/exports_test.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +// Exports for use in tests only. +var ( + ResourceCloudAutonomousVMCluster = newResourceCloudAutonomousVmCluster + ResourceCloudExadataInfrastructure = newResourceCloudExadataInfrastructure + + FindCloudAutonomousVmClusterByID = findCloudAutonomousVmClusterByID + FindExadataInfraResourceByID = findExadataInfraResourceByID + FindCloudVmClusterForResourceByID = findCloudVmClusterForResourceByID +) diff --git a/internal/service/odb/generate.go b/internal/service/odb/generate.go new file mode 100644 index 000000000000..0137a84b8b9b --- /dev/null +++ b/internal/service/odb/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -KVTValues -ListTags -UpdateTags +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package odb diff --git a/internal/service/odb/gi_versions_data_source.go b/internal/service/odb/gi_versions_data_source.go new file mode 100644 index 000000000000..bc314b60919d --- /dev/null +++ b/internal/service/odb/gi_versions_data_source.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_gi_versions", name="Gi Versions") +func newDataSourceGiVersions(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceGiVersionsList{}, nil +} + +const ( + DSNameGiVersionsList = "Gi Versions List Data Source" +) + +type dataSourceGiVersionsList struct { + framework.DataSourceWithModel[giVersionDataSourceModel] +} + +func (d *dataSourceGiVersionsList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "shape": schema.StringAttribute{ + Optional: true, + Description: "The system shape.", + }, + "gi_versions": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[giVersionSummaryModel](ctx), + Description: "Information about a specific version of Oracle Grid Infrastructure (GI) software that can be installed on a VM cluster.", + }, + }, + } +} + +func (d *dataSourceGiVersionsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data giVersionDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + var input odb.ListGiVersionsInput + if !data.Shape.IsNull() { + input.Shape = data.Shape.ValueStringPointer() + } + paginator := odb.NewListGiVersionsPaginator(conn, &input) + var out odb.ListGiVersionsOutput + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameGiVersionsList, "", err), + err.Error(), + ) + return + } + if page != nil && len(page.GiVersions) > 0 { + out.GiVersions = append(out.GiVersions, page.GiVersions...) + } + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type giVersionDataSourceModel struct { + framework.WithRegionModel + GiVersions fwtypes.ListNestedObjectValueOf[giVersionSummaryModel] `tfsdk:"gi_versions"` + Shape types.String `tfsdk:"shape"` +} + +type giVersionSummaryModel struct { + Version types.String `tfsdk:"version"` +} diff --git a/internal/service/odb/gi_versions_data_source_test.go b/internal/service/odb/gi_versions_data_source_test.go new file mode 100644 index 000000000000..ed6398f5f769 --- /dev/null +++ b/internal/service/odb/gi_versions_data_source_test.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccODBGiVersionsListDataSource_basicX9M(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + dataSourceName := "data.aws_odb_gi_versions.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccGiVersionsListConfigBasic("Exadata.X9M"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "gi_versions.#", "2"), + ), + }, + }, + }) +} + +func TestAccODBGiVersionsListDataSource_basicX11M(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + dataSourceName := "data.aws_odb_gi_versions.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccGiVersionsListConfigBasic("Exadata.X11M"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "gi_versions.#", "2"), + ), + }, + }, + }) +} + +func testAccGiVersionsListConfigBasic(shape string) string { + return fmt.Sprintf(` + + +data "aws_odb_gi_versions" "test" { + shape = %[1]q +} +`, shape) +} diff --git a/internal/service/odb/network.go b/internal/service/odb/network.go new file mode 100644 index 000000000000..4fdda38c7dd7 --- /dev/null +++ b/internal/service/odb/network.go @@ -0,0 +1,717 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + "errors" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_odb_network", name="Network") +// @Tags(identifierAttribute="arn") +func newResourceNetwork(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceNetwork{} + r.SetDefaultCreateTimeout(24 * time.Hour) + r.SetDefaultUpdateTimeout(24 * time.Hour) + r.SetDefaultDeleteTimeout(24 * time.Hour) + + return r, nil +} + +const ( + ResNameNetwork = "Odb Network" +) + +type resourceNetwork struct { + framework.ResourceWithModel[odbNetworkResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +var OracleDBNetwork = newResourceNetwork +var managedServiceTimeout = 15 * time.Minute + +func (r *resourceNetwork) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + stringLengthBetween1And255Validator := []validator.String{ + stringvalidator.LengthBetween(1, 255), + } + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + names.AttrDisplayName: schema.StringAttribute{ + Required: true, + Description: "The user-friendly name for the odb network. Changing this will force terraform to create a new resource.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrAvailabilityZone: schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The name of the Availability Zone (AZ) where the odb network is located. Changing this will force terraform to create new resource", + }, + "availability_zone_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The AZ ID of the AZ where the ODB network is located. Changing this will force terraform to create new resource.", + }, + "client_subnet_cidr": schema.StringAttribute{ + Required: true, + Validators: stringLengthBetween1And255Validator, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The CIDR notation for the network resource. Changing this will force terraform to create new resource.\n" + + " Constraints:\n " + + "\t - Must not overlap with the CIDR range of the backup subnet.\n " + + "\t- Must not overlap with the CIDR ranges of the VPCs that are connected to the\n " + + " ODB network.\n " + + "\t- Must not use the following CIDR ranges that are reserved by OCI:\n " + + "\t - 100.106.0.0/16 and 100.107.0.0/16\n " + + "\t - 169.254.0.0/16\n " + + "\t- 224.0.0.0 - 239.255.255.255\n " + + "\t- 240.0.0.0 - 255.255.255.255", + }, + "backup_subnet_cidr": schema.StringAttribute{ + Required: true, + Validators: stringLengthBetween1And255Validator, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The CIDR range of the backup subnet for the ODB network. Changing this will force terraform to create new resource.\n" + + "\tConstraints:\n" + + "\t - Must not overlap with the CIDR range of the client subnet.\n" + + "\t - Must not overlap with the CIDR ranges of the VPCs that are connected to the\n" + + "\t ODB network.\n" + + "\t - Must not use the following CIDR ranges that are reserved by OCI:\n" + + "\t - 100.106.0.0/16 and 100.107.0.0/16\n" + + "\t - 169.254.0.0/16\n" + + "\t - 224.0.0.0 - 239.255.255.255\n" + + "\t - 240.0.0.0 - 255.255.255.255", + }, + + "custom_domain_name": schema.StringAttribute{ + Optional: true, + Validators: stringLengthBetween1And255Validator, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The name of the custom domain that the network is located. custom_domain_name and default_dns_prefix both can't be given.", + }, + "default_dns_prefix": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The default DNS prefix for the network resource. Changing this will force terraform to create new resource.", + }, + "s3_access": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[odbtypes.Access](), + Description: "Specifies the configuration for Amazon S3 access from the ODB network.", + }, + "zero_etl_access": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[odbtypes.Access](), + Description: "Specifies the configuration for Zero-ETL access from the ODB network.", + }, + "s3_policy_document": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "Specifies the endpoint policy for Amazon S3 access from the ODB network.", + }, + "oci_dns_forwarding_configs": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[odbNwkOciDnsForwardingConfigResourceModel](ctx), + Computed: true, + Description: "The DNS resolver endpoint in OCI for forwarding DNS queries for the ociPrivateZone domain.", + }, + "peered_cidrs": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Computed: true, + Description: "The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation.", + }, + "oci_network_anchor_id": schema.StringAttribute{ + Computed: true, + Description: "The unique identifier of the OCI network anchor for the ODB network.", + }, + "oci_network_anchor_url": schema.StringAttribute{ + Computed: true, + Description: "The URL of the OCI network anchor for the ODB network.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor for the ODB network.", + }, + "oci_vcn_id": schema.StringAttribute{ + Computed: true, + Description: "The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network.", + }, + "oci_vcn_url": schema.StringAttribute{ + Computed: true, + Description: "The URL of the OCI VCN for the ODB network.", + }, + "percent_progress": schema.Float32Attribute{ + Computed: true, + Description: "The amount of progress made on the current operation on the ODB network, expressed as a percentage.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: statusType, + Computed: true, + Description: "The status of the network resource.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the current status of the ODB network.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The date and time when the ODB network was created.", + }, + "managed_services": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[odbNetworkManagedServicesResourceModel](ctx), + Description: "The managed services configuration for the ODB network.", + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *resourceNetwork) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().ODBClient(ctx) + var plan odbNetworkResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.CreateOdbNetworkInput{ + Tags: getTagsIn(ctx), + } + + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + out, err := conn.CreateOdbNetwork(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameNetwork, plan.DisplayName.String(), err), + err.Error(), + ) + return + } + if out == nil || out.OdbNetworkId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameNetwork, plan.DisplayName.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + _, err = waitNetworkCreated(ctx, conn, *out.OdbNetworkId, createTimeout) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(out.OdbNetworkId))...) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameNetwork, plan.DisplayName.String(), err), + err.Error(), + ) + return + } + //wait for zero etl access + _, err = waitForManagedService(ctx, plan.ZeroEtlAccess.ValueEnum(), conn, *out.OdbNetworkId, managedServiceTimeout, func(managedService *odbtypes.ManagedServices) odbtypes.ManagedResourceStatus { + return managedService.ZeroEtlAccess.Status + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForUpdate, ResNameNetwork, plan.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + //wait for s3 access + createdOdbNetwork, err := waitForManagedService(ctx, plan.S3Access.ValueEnum(), conn, *out.OdbNetworkId, managedServiceTimeout, func(managedService *odbtypes.ManagedServices) odbtypes.ManagedResourceStatus { + return managedService.S3Access.Status + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForUpdate, ResNameNetwork, plan.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + //since zero_etl_access, s3_access and s3_policy_document are not returned directly by underlying API we need to set it. + readZeroEtlAccessStatus, err := mapManagedServiceStatusToAccessStatus(createdOdbNetwork.ManagedServices.ZeroEtlAccess.Status) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, plan.DisplayName.String(), err), + err.Error(), + ) + return + } + plan.ZeroEtlAccess = fwtypes.StringEnumValue(readZeroEtlAccessStatus) + + readS3AccessStatus, err := mapManagedServiceStatusToAccessStatus(createdOdbNetwork.ManagedServices.S3Access.Status) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, plan.DisplayName.String(), err), + err.Error(), + ) + return + } + plan.S3Access = fwtypes.StringEnumValue(readS3AccessStatus) + plan.S3PolicyDocument = types.StringPointerValue(createdOdbNetwork.ManagedServices.S3Access.S3PolicyDocument) + + resp.Diagnostics.Append(flex.Flatten(ctx, createdOdbNetwork, &plan)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceNetwork) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().ODBClient(ctx) + var state odbNetworkResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := FindOracleDBNetworkResourceByID(ctx, conn, state.OdbNetworkId.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, state.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + if out.ManagedServices == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, state.OdbNetworkId.String(), errors.New("odbNetwork managed service not found")), + "Odb Network managed service cannot be nil", + ) + return + } else { + readS3AccessStatus, err := mapManagedServiceStatusToAccessStatus(out.ManagedServices.S3Access.Status) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, state.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + state.S3Access = fwtypes.StringEnumValue(readS3AccessStatus) + state.S3PolicyDocument = types.StringPointerValue(out.ManagedServices.S3Access.S3PolicyDocument) + + readZeroEtlAccessStatus, err := mapManagedServiceStatusToAccessStatus(out.ManagedServices.ZeroEtlAccess.Status) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, state.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + state.ZeroEtlAccess = fwtypes.StringEnumValue(readZeroEtlAccessStatus) + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceNetwork) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().ODBClient(ctx) + var plan, state odbNetworkResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + diff, d := flex.Diff(ctx, plan, state) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + if diff.HasChanges() { + var input odb.UpdateOdbNetworkInput + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + out, err := conn.UpdateOdbNetwork(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionUpdating, ResNameNetwork, plan.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + if out == nil || out.OdbNetworkId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionUpdating, ResNameNetwork, plan.OdbNetworkId.String(), nil), + errors.New("empty output").Error(), + ) + return + } + } + + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + _, err := waitNetworkUpdated(ctx, conn, plan.OdbNetworkId.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForUpdate, ResNameNetwork, plan.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + + //zero ETL access + _, err = waitForManagedService(ctx, plan.ZeroEtlAccess.ValueEnum(), conn, plan.OdbNetworkId.ValueString(), managedServiceTimeout, func(managedService *odbtypes.ManagedServices) odbtypes.ManagedResourceStatus { + return managedService.ZeroEtlAccess.Status + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForUpdate, ResNameNetwork, plan.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + + //s3 access + updatedOdbNwk, err := waitForManagedService(ctx, plan.S3Access.ValueEnum(), conn, plan.OdbNetworkId.ValueString(), managedServiceTimeout, func(managedService *odbtypes.ManagedServices) odbtypes.ManagedResourceStatus { + return managedService.S3Access.Status + }) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForUpdate, ResNameNetwork, plan.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + + readS3AccessStatus, err := mapManagedServiceStatusToAccessStatus(updatedOdbNwk.ManagedServices.S3Access.Status) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, state.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + plan.S3Access = fwtypes.StringEnumValue(readS3AccessStatus) + plan.S3PolicyDocument = types.StringPointerValue(updatedOdbNwk.ManagedServices.S3Access.S3PolicyDocument) + + readZeroEtlAccessStatus, err := mapManagedServiceStatusToAccessStatus(updatedOdbNwk.ManagedServices.ZeroEtlAccess.Status) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetwork, state.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + plan.ZeroEtlAccess = fwtypes.StringEnumValue(readZeroEtlAccessStatus) + + resp.Diagnostics.Append(flex.Flatten(ctx, updatedOdbNwk, &plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceNetwork) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().ODBClient(ctx) + var state odbNetworkResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + deleteAssociatedResources := false + input := odb.DeleteOdbNetworkInput{ + OdbNetworkId: state.OdbNetworkId.ValueStringPointer(), + DeleteAssociatedResources: &deleteAssociatedResources, + } + + _, err := conn.DeleteOdbNetwork(ctx, &input) + + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionDeleting, ResNameNetwork, state.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitNetworkDeleted(ctx, conn, state.OdbNetworkId.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForDeletion, ResNameNetwork, state.OdbNetworkArn.String(), err), + err.Error(), + ) + return + } +} + +func waitNetworkCreated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.OdbNetwork, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusProvisioning), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusNetwork(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbNetwork); ok { + return out, err + } + + return nil, err +} + +func waitForManagedService(ctx context.Context, targetStatus odbtypes.Access, conn *odb.Client, id string, timeout time.Duration, managedResourceStatus func(managedService *odbtypes.ManagedServices) odbtypes.ManagedResourceStatus) (*odbtypes.OdbNetwork, error) { + switch targetStatus { + case odbtypes.AccessEnabled: + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ManagedResourceStatusEnabling), + Target: enum.Slice(odbtypes.ManagedResourceStatusEnabled), + Refresh: statusManagedService(ctx, conn, id, managedResourceStatus), + Timeout: timeout, + } + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbNetwork); ok { + return out, err + } + return nil, err + case odbtypes.AccessDisabled: + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ManagedResourceStatusDisabling), + Target: enum.Slice(odbtypes.ManagedResourceStatusDisabled), + Refresh: statusManagedService(ctx, conn, id, managedResourceStatus), + Timeout: timeout, + } + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbNetwork); ok { + return out, err + } + return nil, err + default: + return nil, errors.New("odb network invalid manged service status") + } +} + +func statusManagedService(ctx context.Context, conn *odb.Client, id string, managedResourceStatus func(managedService *odbtypes.ManagedServices) odbtypes.ManagedResourceStatus) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := FindOracleDBNetworkResourceByID(ctx, conn, id) + + if err != nil { + return nil, "", err + } + + if out.ManagedServices == nil { + return nil, "", nil + } + + return out, string(managedResourceStatus(out.ManagedServices)), nil + } +} + +func waitNetworkUpdated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.OdbNetwork, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusUpdating), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusNetwork(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbNetwork); ok { + return out, err + } + + return nil, err +} + +func waitNetworkDeleted(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.OdbNetwork, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusTerminating), + Target: []string{}, + Refresh: statusNetwork(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbNetwork); ok { + return out, err + } + + return nil, err +} + +func statusNetwork(ctx context.Context, conn *odb.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := FindOracleDBNetworkResourceByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func mapManagedServiceStatusToAccessStatus(mangedStatus odbtypes.ManagedResourceStatus) (odbtypes.Access, error) { + if mangedStatus == odbtypes.ManagedResourceStatusDisabled { + return odbtypes.AccessDisabled, nil + } + if mangedStatus == odbtypes.ManagedResourceStatusEnabled { + return odbtypes.AccessEnabled, nil + } + return "", errors.New("can not convert managed status to access status") +} + +func FindOracleDBNetworkResourceByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.OdbNetwork, error) { + input := odb.GetOdbNetworkInput{ + OdbNetworkId: aws.String(id), + } + + out, err := conn.GetOdbNetwork(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, err + } + + if out == nil || out.OdbNetwork == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.OdbNetwork, nil +} + +type odbNetworkResourceModel struct { + framework.WithRegionModel + DisplayName types.String `tfsdk:"display_name"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + AvailabilityZoneId types.String `tfsdk:"availability_zone_id"` + ClientSubnetCidr types.String `tfsdk:"client_subnet_cidr"` + BackupSubnetCidr types.String `tfsdk:"backup_subnet_cidr"` + CustomDomainName types.String `tfsdk:"custom_domain_name"` + DefaultDnsPrefix types.String `tfsdk:"default_dns_prefix"` + S3Access fwtypes.StringEnum[odbtypes.Access] `tfsdk:"s3_access" autoflex:",noflatten"` + ZeroEtlAccess fwtypes.StringEnum[odbtypes.Access] `tfsdk:"zero_etl_access" autoflex:",noflatten"` + S3PolicyDocument types.String `tfsdk:"s3_policy_document" autoflex:",noflatten"` + OdbNetworkId types.String `tfsdk:"id"` + PeeredCidrs fwtypes.SetValueOf[types.String] `tfsdk:"peered_cidrs"` + OciDnsForwardingConfigs fwtypes.ListNestedObjectValueOf[odbNwkOciDnsForwardingConfigResourceModel] `tfsdk:"oci_dns_forwarding_configs"` + OciNetworkAnchorId types.String `tfsdk:"oci_network_anchor_id"` + OciNetworkAnchorUrl types.String `tfsdk:"oci_network_anchor_url"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciVcnId types.String `tfsdk:"oci_vcn_id"` + OciVcnUrl types.String `tfsdk:"oci_vcn_url"` + OdbNetworkArn types.String `tfsdk:"arn"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + ManagedServices fwtypes.ListNestedObjectValueOf[odbNetworkManagedServicesResourceModel] `tfsdk:"managed_services"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} + +type odbNwkOciDnsForwardingConfigResourceModel struct { + DomainName types.String `tfsdk:"domain_name"` + OciDnsListenerIp types.String `tfsdk:"oci_dns_listener_ip"` +} +type odbNetworkManagedServicesResourceModel struct { + ServiceNetworkArn types.String `tfsdk:"service_network_arn"` + ResourceGatewayArn types.String `tfsdk:"resource_gateway_arn"` + ManagedServicesIpv4Cidrs fwtypes.SetOfString `tfsdk:"managed_service_ipv4_cidrs"` + ServiceNetworkEndpoint fwtypes.ListNestedObjectValueOf[serviceNetworkEndpointOdbNetworkResourceModel] `tfsdk:"service_network_endpoint"` + ManagedS3BackupAccess fwtypes.ListNestedObjectValueOf[managedS3BackupAccessOdbNetworkResourceModel] `tfsdk:"managed_s3_backup_access"` + ZeroEtlAccess fwtypes.ListNestedObjectValueOf[zeroEtlAccessOdbNetworkResourceModel] `tfsdk:"zero_etl_access"` + S3Access fwtypes.ListNestedObjectValueOf[s3AccessOdbNetworkResourceModel] `tfsdk:"s3_access"` +} + +type serviceNetworkEndpointOdbNetworkResourceModel struct { + VpcEndpointId types.String `tfsdk:"vpc_endpoint_id"` + VpcEndpointType fwtypes.StringEnum[odbtypes.VpcEndpointType] `tfsdk:"vpc_endpoint_type"` +} + +type managedS3BackupAccessOdbNetworkResourceModel struct { + Status fwtypes.StringEnum[odbtypes.ManagedResourceStatus] `tfsdk:"status"` + Ipv4Addresses fwtypes.SetOfString `tfsdk:"ipv4_addresses"` +} + +type zeroEtlAccessOdbNetworkResourceModel struct { + Status fwtypes.StringEnum[odbtypes.ManagedResourceStatus] `tfsdk:"status"` + Cidr types.String `tfsdk:"cidr"` +} + +type s3AccessOdbNetworkResourceModel struct { + Status fwtypes.StringEnum[odbtypes.ManagedResourceStatus] `tfsdk:"status"` + Ipv4Addresses fwtypes.SetOfString `tfsdk:"ipv4_addresses"` + DomainName types.String `tfsdk:"domain_name"` + S3PolicyDocument types.String `tfsdk:"s3_policy_document"` +} diff --git a/internal/service/odb/network_data_source.go b/internal/service/odb/network_data_source.go new file mode 100644 index 000000000000..ace95d25f9ca --- /dev/null +++ b/internal/service/odb/network_data_source.go @@ -0,0 +1,220 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_network", name="Network") +// @Tags(identifierAttribute="arn") +func newDataSourceNetwork(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceNetwork{}, nil +} + +const ( + DSNameNetwork = "Odb Network Data Source" +) + +type dataSourceNetwork struct { + framework.DataSourceWithModel[odbNetworkDataSourceModel] +} + +func (d *dataSourceNetwork) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: schema.StringAttribute{ + Required: true, + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + Description: "Display name for the network resource.", + }, + "availability_zone_id": schema.StringAttribute{ + Computed: true, + Description: "The AZ ID of the AZ where the ODB network is located.", + }, + names.AttrAvailabilityZone: schema.StringAttribute{ + Computed: true, + Description: "The availability zone where the ODB network is located.", + }, + "backup_subnet_cidr": schema.StringAttribute{ + Computed: true, + Description: " The CIDR range of the backup subnet for the ODB network.", + }, + "client_subnet_cidr": schema.StringAttribute{ + Computed: true, + Description: "The CIDR notation for the network resource.", + }, + "custom_domain_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the custom domain that the network is located.", + }, + "default_dns_prefix": schema.StringAttribute{ + Computed: true, + Description: "The default DNS prefix for the network resource.", + }, + "oci_network_anchor_id": schema.StringAttribute{ + Computed: true, + Description: "The unique identifier of the OCI network anchor for the ODB network.", + }, + "oci_network_anchor_url": schema.StringAttribute{ + Computed: true, + Description: "The URL of the OCI network anchor for the ODB network.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor for the ODB network.", + }, + "oci_vcn_id": schema.StringAttribute{ + Computed: true, + Description: "The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network.", + }, + "oci_vcn_url": schema.StringAttribute{ + Computed: true, + Description: "The URL of the OCI VCN for the ODB network.", + }, + "percent_progress": schema.Float64Attribute{ + Computed: true, + Description: "The amount of progress made on the current operation on the ODB network, expressed as a percentage.", + }, + "peered_cidrs": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Computed: true, + Description: "The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: statusType, + Computed: true, + Description: "The status of the network resource.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the current status of the ODB network.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The date and time when the ODB network was created.", + }, + "managed_services": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[odbNetworkManagedServicesDataSourceModel](ctx), + Description: "The managed services configuration for the ODB network.", + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + "oci_dns_forwarding_configs": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[odbNwkOciDnsForwardingConfigDataSourceModel](ctx), + Description: "The DNS resolver endpoint in OCI for forwarding DNS queries for the ociPrivateZone domain.", + }, + }, + } +} + +func (d *dataSourceNetwork) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data odbNetworkDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.GetOdbNetworkInput{ + OdbNetworkId: data.OdbNetworkId.ValueStringPointer(), + } + + out, err := conn.GetOdbNetwork(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameNetwork, data.OdbNetworkId.String(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out.OdbNetwork, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type odbNetworkDataSourceModel struct { + framework.WithRegionModel + AvailabilityZone types.String `tfsdk:"availability_zone"` + AvailabilityZoneId types.String `tfsdk:"availability_zone_id"` + BackupSubnetCidr types.String `tfsdk:"backup_subnet_cidr"` + ClientSubnetCidr types.String `tfsdk:"client_subnet_cidr"` + CustomDomainName types.String `tfsdk:"custom_domain_name"` + DefaultDnsPrefix types.String `tfsdk:"default_dns_prefix"` + DisplayName types.String `tfsdk:"display_name"` + OciDnsForwardingConfigs fwtypes.ListNestedObjectValueOf[odbNwkOciDnsForwardingConfigDataSourceModel] `tfsdk:"oci_dns_forwarding_configs"` + OciNetworkAnchorId types.String `tfsdk:"oci_network_anchor_id"` + OciNetworkAnchorUrl types.String `tfsdk:"oci_network_anchor_url"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciVcnId types.String `tfsdk:"oci_vcn_id"` + OciVcnUrl types.String `tfsdk:"oci_vcn_url"` + OdbNetworkArn types.String `tfsdk:"arn"` + OdbNetworkId types.String `tfsdk:"id"` + PeeredCidrs fwtypes.SetValueOf[types.String] `tfsdk:"peered_cidrs"` + PercentProgress types.Float64 `tfsdk:"percent_progress"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + ManagedServices fwtypes.ListNestedObjectValueOf[odbNetworkManagedServicesDataSourceModel] `tfsdk:"managed_services"` + Tags tftags.Map `tfsdk:"tags"` +} + +type odbNwkOciDnsForwardingConfigDataSourceModel struct { + DomainName types.String `tfsdk:"domain_name"` + OciDnsListenerIp types.String `tfsdk:"oci_dns_listener_ip"` +} + +type odbNetworkManagedServicesDataSourceModel struct { + ServiceNetworkArn types.String `tfsdk:"service_network_arn"` + ResourceGatewayArn types.String `tfsdk:"resource_gateway_arn"` + ManagedServicesIpv4Cidrs fwtypes.ListOfString `tfsdk:"managed_service_ipv4_cidrs"` + ServiceNetworkEndpoint fwtypes.ListNestedObjectValueOf[serviceNetworkEndpointOdbNetworkDataSourceModel] `tfsdk:"service_network_endpoint"` + ManagedS3BackupAccess fwtypes.ListNestedObjectValueOf[managedS3BackupAccessOdbNetworkDataSourceModel] `tfsdk:"managed_s3_backup_access"` + ZeroEtlAccess fwtypes.ListNestedObjectValueOf[zeroEtlAccessOdbNetworkDataSourceModel] `tfsdk:"zero_tl_access"` + S3Access fwtypes.ListNestedObjectValueOf[s3AccessOdbNetworkDataSourceModel] `tfsdk:"s3_access"` +} + +type serviceNetworkEndpointOdbNetworkDataSourceModel struct { + VpcEndpointId types.String `tfsdk:"vpc_endpoint_id"` + VpcEndpointType fwtypes.StringEnum[odbtypes.VpcEndpointType] `tfsdk:"vpc_endpoint_type"` +} + +type managedS3BackupAccessOdbNetworkDataSourceModel struct { + Status fwtypes.StringEnum[odbtypes.ManagedResourceStatus] `tfsdk:"status"` + Ipv4Addresses fwtypes.ListOfString `tfsdk:"ipv4_addresses"` +} + +type zeroEtlAccessOdbNetworkDataSourceModel struct { + Status fwtypes.StringEnum[odbtypes.ManagedResourceStatus] `tfsdk:"status"` + Cidr types.String `tfsdk:"cidr"` +} + +type s3AccessOdbNetworkDataSourceModel struct { + Status fwtypes.StringEnum[odbtypes.ManagedResourceStatus] `tfsdk:"status"` + Ipv4Addresses fwtypes.ListOfString `tfsdk:"ipv4_addresses"` + DomainName types.String `tfsdk:"domain_name"` + S3PolicyDocument types.String `tfsdk:"s3_policy_document"` +} diff --git a/internal/service/odb/network_data_source_test.go b/internal/service/odb/network_data_source_test.go new file mode 100644 index 000000000000..999f026ed594 --- /dev/null +++ b/internal/service/odb/network_data_source_test.go @@ -0,0 +1,145 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type oracleDBNetworkDataSourceTest struct { +} + +var oracleDBNetworkDataSourceTestEntity = oracleDBNetworkDataSourceTest{} + +func TestAccODBNetworkDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + networkResource := "aws_odb_network.test_resource" + networkDataSource := "data.aws_odb_network.test" + rName := sdkacctest.RandomWithPrefix("tf-ora-net") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkDataSourceTestEntity.testAccNetworkDataSourcePreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkDataSourceTestEntity.testAccCheckNetworkDataSourceDestroyed(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkDataSourceTestEntity.basicNetworkDataSource(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(networkResource, names.AttrID, networkDataSource, names.AttrID), + ), + }, + }, + }) +} + +func (oracleDBNetworkDataSourceTest) testAccCheckNetworkDataSourceDestroyed(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_network" { + continue + } + _, err := oracleDBNetworkDataSourceTestEntity.findNetwork(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameNetwork, rs.Primary.ID, err) + } + + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameNetwork, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func (oracleDBNetworkDataSourceTest) findNetwork(ctx context.Context, conn *odb.Client, id string) (*odbtypes.OdbNetwork, error) { + input := odb.GetOdbNetworkInput{ + OdbNetworkId: aws.String(id), + } + + out, err := conn.GetOdbNetwork(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, err + } + + if out == nil || out.OdbNetwork == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.OdbNetwork, nil +} + +func (oracleDBNetworkDataSourceTest) basicNetworkDataSource(rName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test_resource" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" + tags = { + "env" = "dev" + } +} + + +data "aws_odb_network" "test" { + id = aws_odb_network.test_resource.id +} + + +`, rName) + return networkRes +} +func (oracleDBNetworkDataSourceTest) testAccNetworkDataSourcePreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListOdbNetworksInput{} + _, err := conn.ListOdbNetworks(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/odb/network_peering_connection.go b/internal/service/odb/network_peering_connection.go new file mode 100644 index 000000000000..b1836d9d5d32 --- /dev/null +++ b/internal/service/odb/network_peering_connection.go @@ -0,0 +1,397 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + "errors" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource("aws_odb_network_peering_connection", name="Network Peering Connection") +// @Tags(identifierAttribute="arn") +func newResourceNetworkPeeringConnection(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceNetworkPeeringConnection{} + + r.SetDefaultCreateTimeout(24 * time.Hour) + r.SetDefaultUpdateTimeout(24 * time.Hour) + r.SetDefaultDeleteTimeout(24 * time.Hour) + + return r, nil +} + +const ( + ResNameNetworkPeeringConnection = "Network Peering Connection" +) + +var OracleDBNetworkPeeringConnection = newResourceNetworkPeeringConnection + +type resourceNetworkPeeringConnection struct { + framework.ResourceWithModel[odbNetworkPeeringConnectionResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +func (r *resourceNetworkPeeringConnection) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A peering connection between an ODB network and either another ODB network or a customer-owned VPC.", + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "odb_network_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "Required field. The unique identifier of the ODB network that initiates the peering connection. " + + "A sample ID is odbpcx-abcdefgh12345678. Changing this will force terraform to create new resource.", + }, + "peer_network_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "Required field. The unique identifier of the ODB peering connection. Changing this will force terraform to create new resource", + }, + + names.AttrDisplayName: schema.StringAttribute{ + Description: "Display name of the odb network peering connection. Changing this will force terraform to create new resource", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + names.AttrStatus: schema.StringAttribute{ + Description: "Status of the odb network peering connection.", + CustomType: fwtypes.StringEnumType[odbtypes.ResourceStatus](), + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrStatusReason: schema.StringAttribute{ + Description: "The reason for the current status of the ODB peering connection..", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + + "odb_network_arn": schema.StringAttribute{ + Description: "ARN of the odb network peering connection.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + + "peer_network_arn": schema.StringAttribute{ + Description: "ARN of the peer network peering connection.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "odb_peering_connection_type": schema.StringAttribute{ + Description: "Type of the odb peering connection.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrCreatedAt: schema.StringAttribute{ + Description: "Created time of the odb network peering connection.", + Computed: true, + CustomType: timetypes.RFC3339Type{}, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "percent_progress": schema.Float32Attribute{ + Description: "Progress of the odb network peering connection.", + Computed: true, + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *resourceNetworkPeeringConnection) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().ODBClient(ctx) + var plan odbNetworkPeeringConnectionResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.CreateOdbPeeringConnectionInput{ + OdbNetworkId: plan.OdbNetworkId.ValueStringPointer(), + PeerNetworkId: plan.PeerNetworkId.ValueStringPointer(), + DisplayName: plan.DisplayName.ValueStringPointer(), + Tags: getTagsIn(ctx), + } + out, err := conn.CreateOdbPeeringConnection(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameNetworkPeeringConnection, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + if out == nil || out.OdbPeeringConnectionId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameNetworkPeeringConnection, plan.DisplayName.ValueString(), nil), + errors.New("empty output").Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + if resp.Diagnostics.HasError() { + return + } + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + createdPeeredConnection, err := waitNetworkPeeringConnectionCreated(ctx, conn, plan.OdbPeeringConnectionId.ValueString(), createTimeout) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(out.OdbPeeringConnectionId))...) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameNetworkPeeringConnection, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + + odbNetworkARNParsed, err := arn.Parse(*createdPeeredConnection.OdbNetworkArn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetworkPeeringConnection, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + peerVpcARN, err := arn.Parse(*createdPeeredConnection.PeerNetworkArn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetworkPeeringConnection, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + plan.PeerNetworkId = types.StringValue(strings.Split(peerVpcARN.Resource, "/")[1]) + plan.OdbNetworkId = types.StringValue(strings.Split(odbNetworkARNParsed.Resource, "/")[1]) + resp.Diagnostics.Append(flex.Flatten(ctx, createdPeeredConnection, &plan)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceNetworkPeeringConnection) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().ODBClient(ctx) + var state odbNetworkPeeringConnectionResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findNetworkPeeringConnectionByID(ctx, conn, state.OdbPeeringConnectionId.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetworkPeeringConnection, state.OdbPeeringConnectionId.ValueString(), err), + err.Error(), + ) + return + } + + odbNetworkARNParsed, err := arn.Parse(*out.OdbNetworkArn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetworkPeeringConnection, state.OdbPeeringConnectionId.ValueString(), err), + err.Error(), + ) + return + } + + peerVpcARN, err := arn.Parse(*out.PeerNetworkArn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameNetworkPeeringConnection, state.OdbPeeringConnectionId.ValueString(), err), + err.Error(), + ) + return + } + state.PeerNetworkId = types.StringValue(strings.Split(peerVpcARN.Resource, "/")[1]) + state.OdbNetworkId = types.StringValue(strings.Split(odbNetworkARNParsed.Resource, "/")[1]) + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceNetworkPeeringConnection) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().ODBClient(ctx) + var state odbNetworkPeeringConnectionResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.DeleteOdbPeeringConnectionInput{ + OdbPeeringConnectionId: state.OdbPeeringConnectionId.ValueStringPointer(), + } + _, err := conn.DeleteOdbPeeringConnection(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionDeleting, ResNameNetworkPeeringConnection, state.OdbPeeringConnectionId.ValueString(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitNetworkPeeringConnectionDeleted(ctx, conn, state.OdbPeeringConnectionId.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForDeletion, ResNameNetworkPeeringConnection, state.OdbPeeringConnectionId.String(), err), + err.Error(), + ) + return + } +} + +func waitNetworkPeeringConnectionCreated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.OdbPeeringConnection, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusProvisioning), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusNetworkPeeringConnection(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbPeeringConnection); ok { + return out, err + } + + return nil, err +} + +func waitNetworkPeeringConnectionDeleted(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.OdbPeeringConnection, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusTerminating), + Target: []string{}, + Refresh: statusNetworkPeeringConnection(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.OdbPeeringConnection); ok { + return out, err + } + return nil, err +} + +func statusNetworkPeeringConnection(ctx context.Context, conn *odb.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := findNetworkPeeringConnectionByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + return out, string(out.Status), nil + } +} + +func findNetworkPeeringConnectionByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.OdbPeeringConnection, error) { + input := odb.GetOdbPeeringConnectionInput{ + OdbPeeringConnectionId: &id, + } + + out, err := conn.GetOdbPeeringConnection(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, err + } + + if out == nil || out.OdbPeeringConnection == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return out.OdbPeeringConnection, nil +} + +type odbNetworkPeeringConnectionResourceModel struct { + framework.WithRegionModel + OdbNetworkId types.String `tfsdk:"odb_network_id" autoflex:",noflatten"` + PeerNetworkId types.String `tfsdk:"peer_network_id" autoflex:",noflatten"` + OdbPeeringConnectionId types.String `tfsdk:"id"` + DisplayName types.String `tfsdk:"display_name"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + OdbPeeringConnectionArn types.String `tfsdk:"arn"` + OdbNetworkArn types.String `tfsdk:"odb_network_arn"` + PeerNetworkArn types.String `tfsdk:"peer_network_arn"` + OdbPeeringConnectionType types.String `tfsdk:"odb_peering_connection_type"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} diff --git a/internal/service/odb/network_peering_connection_data_source.go b/internal/service/odb/network_peering_connection_data_source.go new file mode 100644 index 000000000000..d47c257b6386 --- /dev/null +++ b/internal/service/odb/network_peering_connection_data_source.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_network_peering_connection", name="Network Peering Connection") +func newDataSourceNetworkPeeringConnection(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceNetworkPeeringConnection{}, nil +} + +const ( + DSNameNetworkPeeringConnection = "Network Peering Connection Data Source" +) + +type dataSourceNetworkPeeringConnection struct { + framework.DataSourceWithModel[odbNetworkPeeringConnectionDataSourceModel] +} + +func (d *dataSourceNetworkPeeringConnection) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrID: schema.StringAttribute{ + Description: "Network Peering Connection identifier.", + Required: true, + }, + names.AttrDisplayName: schema.StringAttribute{ + Description: "Display name of the odb network peering connection.", + Computed: true, + }, + names.AttrStatus: schema.StringAttribute{ + Description: "Status of the odb network peering connection.", + CustomType: fwtypes.StringEnumType[odbtypes.ResourceStatus](), + Computed: true, + }, + names.AttrStatusReason: schema.StringAttribute{ + Description: "Status of the odb network peering connection.", + Computed: true, + }, + + "odb_network_arn": schema.StringAttribute{ + Description: "ARN of the odb network peering connection.", + Computed: true, + }, + + names.AttrARN: framework.ARNAttributeComputedOnly(), + + "peer_network_arn": schema.StringAttribute{ + Description: "ARN of the peer network peering connection.", + Computed: true, + }, + "odb_peering_connection_type": schema.StringAttribute{ + Description: "Type of the odb peering connection.", + Computed: true, + }, + names.AttrCreatedAt: schema.StringAttribute{ + Description: "Created time of the odb network peering connection.", + Computed: true, + CustomType: timetypes.RFC3339Type{}, + }, + "percent_progress": schema.Float32Attribute{ + Description: "Progress of the odb network peering connection.", + Computed: true, + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (d *dataSourceNetworkPeeringConnection) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data odbNetworkPeeringConnectionDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.GetOdbPeeringConnectionInput{ + OdbPeeringConnectionId: data.OdbPeeringConnectionId.ValueStringPointer(), + } + out, err := conn.GetOdbPeeringConnection(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameNetworkPeeringConnection, data.OdbPeeringConnectionId.ValueString(), err), + err.Error(), + ) + return + } + tagsRead, err := listTags(ctx, conn, *out.OdbPeeringConnection.OdbPeeringConnectionArn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameNetworkPeeringConnection, data.OdbPeeringConnectionId.ValueString(), err), + err.Error(), + ) + return + } + if tagsRead != nil { + data.Tags = tftags.FlattenStringValueMap(ctx, tagsRead.Map()) + } + resp.Diagnostics.Append(flex.Flatten(ctx, out.OdbPeeringConnection, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type odbNetworkPeeringConnectionDataSourceModel struct { + framework.WithRegionModel + OdbPeeringConnectionId types.String `tfsdk:"id"` + DisplayName types.String `tfsdk:"display_name"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + OdbPeeringConnectionArn types.String `tfsdk:"arn"` + OdbNetworkArn types.String `tfsdk:"odb_network_arn"` + PeerNetworkArn types.String `tfsdk:"peer_network_arn"` + OdbPeeringConnectionType types.String `tfsdk:"odb_peering_connection_type"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + Tags tftags.Map `tfsdk:"tags"` +} diff --git a/internal/service/odb/network_peering_connection_data_source_test.go b/internal/service/odb/network_peering_connection_data_source_test.go new file mode 100644 index 000000000000..0a53e7a9f4b9 --- /dev/null +++ b/internal/service/odb/network_peering_connection_data_source_test.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type oracleDBNetPeeringDataSourceTest struct { + odbNetDisplayNamePrefix string + odbNetworkPeeringDisplayNamePrefix string + vpcNamePrefix string +} + +var oracleDBNetPeeringDSTestEntity = oracleDBNetPeeringDataSourceTest{ + odbNetDisplayNamePrefix: "tf", + odbNetworkPeeringDisplayNamePrefix: "tf", + vpcNamePrefix: "tf", +} + +// Acceptance test access AWS and cost money to run. +func TestAccODBNetworkPeeringConnectionDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + networkPeeringResource := "aws_odb_network_peering_connection.test" + networkPerringDataSource := "data.aws_odb_network_peering_connection.test" + odbNetPeeringDisplayName := sdkacctest.RandomWithPrefix(oracleDBNetPeeringDSTestEntity.odbNetworkPeeringDisplayNamePrefix) + odbNetDispName := sdkacctest.RandomWithPrefix(oracleDBNetPeeringDSTestEntity.odbNetDisplayNamePrefix) + vpcName := sdkacctest.RandomWithPrefix(oracleDBNetPeeringDSTestEntity.vpcNamePrefix) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetPeeringDSTestEntity.testAccCheckCloudOracleDBNetworkPeeringDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetPeeringDSTestEntity.basicPeeringConfig(vpcName, odbNetDispName, odbNetPeeringDisplayName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(networkPeeringResource, names.AttrID, networkPerringDataSource, names.AttrID), + ), + }, + }, + }) +} + +func (oracleDBNetPeeringDataSourceTest) testAccCheckCloudOracleDBNetworkPeeringDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_network_peering_connection" { + continue + } + _, err := oracleDBNetPeeringDSTestEntity.findOracleDBNetworkPeering(ctx, conn, rs.Primary.ID) + + if err != nil { + if tfresource.NotFound(err) { + return nil + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameNetworkPeeringConnection, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.DSNameNetworkPeeringConnection, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (oracleDBNetPeeringDataSourceTest) findOracleDBNetworkPeering(ctx context.Context, conn *odb.Client, id string) (output *odb.GetOdbPeeringConnectionOutput, err error) { + input := odb.GetOdbPeeringConnectionInput{ + OdbPeeringConnectionId: &id, + } + out, err := conn.GetOdbPeeringConnection(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + return nil, err + } + if out == nil { + return nil, errors.New("odb Network Peering Connection resource can not be nil") + } + return out, nil +} +func (oracleDBNetPeeringDataSourceTest) basicPeeringConfig(vpcName, odbNetDisplayName, odbPeeringDisplayName string) string { + testData := fmt.Sprintf(` + + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + tags = { + Name = %[1]q + } +} + +resource "aws_odb_network" "test" { + display_name = %[2]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_network_peering_connection" "test" { + display_name = %[3]q + odb_network_id = aws_odb_network.test.id + peer_network_id = aws_vpc.test.id + +} + +data "aws_odb_network_peering_connection" "test" { + id = aws_odb_network_peering_connection.test.id +} + + +`, vpcName, odbNetDisplayName, odbPeeringDisplayName) + return testData +} diff --git a/internal/service/odb/network_peering_connection_test.go b/internal/service/odb/network_peering_connection_test.go new file mode 100644 index 000000000000..004becb29b3a --- /dev/null +++ b/internal/service/odb/network_peering_connection_test.go @@ -0,0 +1,289 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type oracleDBNwkPeeringResourceTest struct { + vpcNamePrefix string + odbPeeringDisplayNamePrefix string + odbNwkDisplayNamePrefix string +} + +var oracleDBNwkPeeringTestResource = oracleDBNwkPeeringResourceTest{ + vpcNamePrefix: "odb-vpc", + odbPeeringDisplayNamePrefix: "odb-peering", + odbNwkDisplayNamePrefix: "odb-net", +} + +func TestAccODBNetworkPeeringConnection_basic(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var odbPeeringResource odb.GetOdbPeeringConnectionOutput + odbPeeringDisplayName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.odbPeeringDisplayNamePrefix) + vpcName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.vpcNamePrefix) + odbNetName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.odbNwkDisplayNamePrefix) + resourceName := "aws_odb_network_peering_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNwkPeeringTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNwkPeeringTestResource.testAccCheckNetworkPeeringConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNwkPeeringTestResource.basicConfig(vpcName, odbNetName, odbPeeringDisplayName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkPeeringConnectionExists(ctx, resourceName, &odbPeeringResource), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkPeeringConnection_tagging(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var odbPeeringResource odb.GetOdbPeeringConnectionOutput + odbPeeringDisplayName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.odbPeeringDisplayNamePrefix) + vpcName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.vpcNamePrefix) + odbNetName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.odbNwkDisplayNamePrefix) + resourceName := "aws_odb_network_peering_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNwkPeeringTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNwkPeeringTestResource.testAccCheckNetworkPeeringConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNwkPeeringTestResource.basicConfig(vpcName, odbNetName, odbPeeringDisplayName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkPeeringConnectionExists(ctx, resourceName, &odbPeeringResource), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: oracleDBNwkPeeringTestResource.basicConfigNoTag(vpcName, odbNetName, odbPeeringDisplayName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkPeeringConnectionExists(ctx, resourceName, &odbPeeringResource), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkPeeringConnection_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var odbPeering odb.GetOdbPeeringConnectionOutput + odbPeeringDisplayName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.odbPeeringDisplayNamePrefix) + vpcName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.vpcNamePrefix) + odbNetDisplayName := sdkacctest.RandomWithPrefix(oracleDBNwkPeeringTestResource.odbPeeringDisplayNamePrefix) + resourceName := "aws_odb_network_peering_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNwkPeeringTestResource.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNwkPeeringTestResource.testAccCheckNetworkPeeringConnectionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNwkPeeringTestResource.basicConfig(vpcName, odbNetDisplayName, odbPeeringDisplayName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkPeeringConnectionExists(ctx, resourceName, &odbPeering), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfodb.OracleDBNetworkPeeringConnection, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func (oracleDBNwkPeeringResourceTest) testAccCheckNetworkPeeringConnectionDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_network_peering_connection" { + continue + } + _, err := oracleDBNwkPeeringTestResource.findOracleDBNetworkPeering(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameNetworkPeeringConnection, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameNetworkPeeringConnection, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func testAccCheckNetworkPeeringConnectionExists(ctx context.Context, name string, odbPeeringConnection *odb.GetOdbPeeringConnectionOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameNetworkPeeringConnection, name, errors.New("not found")) + } + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameNetworkPeeringConnection, name, errors.New("not set")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + resp, err := oracleDBNwkPeeringTestResource.findOracleDBNetworkPeering(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameNetworkPeeringConnection, rs.Primary.ID, err) + } + *odbPeeringConnection = *resp + return nil + } +} + +func (oracleDBNwkPeeringResourceTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListOdbPeeringConnectionsInput{} + _, err := conn.ListOdbPeeringConnections(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (oracleDBNwkPeeringResourceTest) findOracleDBNetworkPeering(ctx context.Context, conn *odb.Client, id string) (output *odb.GetOdbPeeringConnectionOutput, err error) { + input := odb.GetOdbPeeringConnectionInput{ + OdbPeeringConnectionId: &id, + } + out, err := conn.GetOdbPeeringConnection(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + if out == nil { + return nil, errors.New("odb Network Peering Connection resource can not be nil") + } + return out, nil +} + +func (oracleDBNwkPeeringResourceTest) basicConfig(vpcName, odbNetName, odbPeeringName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + tags = { + Name = %[1]q + } +} + +resource "aws_odb_network" "test" { + display_name = %[2]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_network_peering_connection" "test" { + display_name = %[3]q + odb_network_id = aws_odb_network.test.id + peer_network_id = aws_vpc.test.id + tags = { + "env" = "dev" + } +} +`, vpcName, odbNetName, odbPeeringName) +} + +func (oracleDBNwkPeeringResourceTest) basicConfigNoTag(vpcName, odbNetName, odbPeeringName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_odb_network" "test" { + display_name = %[2]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_network_peering_connection" "test" { + display_name = %[3]q + odb_network_id = aws_odb_network.test.id + peer_network_id = aws_vpc.test.id + +} +`, vpcName, odbNetName, odbPeeringName) +} diff --git a/internal/service/odb/network_peering_connections_data_source.go b/internal/service/odb/network_peering_connections_data_source.go new file mode 100644 index 000000000000..065cf1c46c93 --- /dev/null +++ b/internal/service/odb/network_peering_connections_data_source.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_network_peering_connections", name="Network Peering Connections") +func newDataSourceNetworkPeeringConnectionsList(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceNetworkPeeringConnectionsList{}, nil +} + +const ( + DSNameNetworkPeeringConnectionsList = "Network Peering Connections List Data Source" +) + +type dataSourceNetworkPeeringConnectionsList struct { + framework.DataSourceWithModel[odbNetworkPeeringConnectionsListDataSourceModel] +} + +func (d *dataSourceNetworkPeeringConnectionsList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{}, + Blocks: map[string]schema.Block{ + "odb_peering_connections": schema.ListNestedBlock{ + Description: "The list of ODB peering connections. A summary of an ODB peering connection.", + CustomType: fwtypes.NewListNestedObjectTypeOf[odbNetworkPeeringConnectionSummaryDataSourceModel](ctx), + }, + }, + } +} + +func (d *dataSourceNetworkPeeringConnectionsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data odbNetworkPeeringConnectionsListDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + out, err := ListOracleDBPeeringConnections(ctx, conn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameNetworkPeeringConnectionsList, "", err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func ListOracleDBPeeringConnections(ctx context.Context, conn *odb.Client) (*odb.ListOdbPeeringConnectionsOutput, error) { + var out odb.ListOdbPeeringConnectionsOutput + paginator := odb.NewListOdbPeeringConnectionsPaginator(conn, &odb.ListOdbPeeringConnectionsInput{}) + for paginator.HasMorePages() { + output, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + out.OdbPeeringConnections = append(out.OdbPeeringConnections, output.OdbPeeringConnections...) + } + return &out, nil +} + +type odbNetworkPeeringConnectionsListDataSourceModel struct { + framework.WithRegionModel + OdbPeeringConnections fwtypes.ListNestedObjectValueOf[odbNetworkPeeringConnectionSummaryDataSourceModel] `tfsdk:"odb_peering_connections"` +} +type odbNetworkPeeringConnectionSummaryDataSourceModel struct { + OdbPeeringConnectionId types.String `tfsdk:"id"` + OdbPeeringConnectionArn types.String `tfsdk:"arn"` + DisplayName types.String `tfsdk:"display_name"` + OdbNetworkArn types.String `tfsdk:"odb_network_arn"` + PeerNetworkArn types.String `tfsdk:"peer_network_arn"` +} diff --git a/internal/service/odb/network_peering_connections_data_source_test.go b/internal/service/odb/network_peering_connections_data_source_test.go new file mode 100644 index 000000000000..a2f6b4eb0d89 --- /dev/null +++ b/internal/service/odb/network_peering_connections_data_source_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type listOdbNetworkPeering struct { +} + +func TestAccODBListNetworkPeeringConnections_basic(t *testing.T) { + ctx := acctest.Context(t) + var listOfPeeredNwks = listOdbNetworkPeering{} + var output odb.ListOdbPeeringConnectionsOutput + + dataSourceName := "data.aws_odb_network_peering_connections.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + listOfPeeredNwks.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: listOfPeeredNwks.basic(), + Check: resource.ComposeAggregateTestCheckFunc( + + resource.ComposeTestCheckFunc(func(s *terraform.State) error { + listOfPeeredNwks.count(ctx, dataSourceName, &output) + resource.TestCheckResourceAttr(dataSourceName, "odb_peering_connections.#", strconv.Itoa(len(output.OdbPeeringConnections))) + return nil + }, + ), + ), + }, + }, + }) +} + +func (listOdbNetworkPeering) basic() string { + return `data "aws_odb_network_peering_connections" "test" {}` +} + +func (listOdbNetworkPeering) count(ctx context.Context, name string, list *odb.ListOdbPeeringConnectionsOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameNetworkPeeringConnectionsList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.ListOracleDBPeeringConnections(ctx, conn) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameNetworkPeeringConnectionsList, rs.Primary.ID, err) + } + list.OdbPeeringConnections = resp.OdbPeeringConnections + return nil + } +} +func (listOdbNetworkPeering) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListOdbPeeringConnectionsInput{} + _, err := conn.ListOdbPeeringConnections(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/odb/network_test.go b/internal/service/odb/network_test.go new file mode 100644 index 000000000000..3b3bad8dda7b --- /dev/null +++ b/internal/service/odb/network_test.go @@ -0,0 +1,423 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type oracleDBNetworkResourceTest struct { + displayNamePrefix string +} + +var oracleDBNetworkResourceTestEntity = oracleDBNetworkResourceTest{ + displayNamePrefix: "tf-ora-net", +} + +// Basic test with bare minimum input +func TestAccODBNetworkResource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var network odbtypes.OdbNetwork + rName := sdkacctest.RandomWithPrefix(oracleDBNetworkResourceTestEntity.displayNamePrefix) + resourceName := "aws_odb_network.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkResourceTestEntity.testAccCheckNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkResourceTestEntity.basicNetwork(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkResource_withAllParams(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var network1 odbtypes.OdbNetwork + rName := sdkacctest.RandomWithPrefix(oracleDBNetworkResourceTestEntity.displayNamePrefix) + resourceName := "aws_odb_network.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkResourceTestEntity.testAccCheckNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkResourceTestEntity.networkWithAllParams(rName, "julia.com"), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkResource_updateManagedService(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var network1, network2 odbtypes.OdbNetwork + rName := sdkacctest.RandomWithPrefix(oracleDBNetworkResourceTestEntity.displayNamePrefix) + resourceName := "aws_odb_network.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkResourceTestEntity.testAccCheckNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkResourceTestEntity.basicNetwork(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: oracleDBNetworkResourceTestEntity.basicNetworkWithActiveManagedService(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(network1.OdbNetworkId), *(network2.OdbNetworkId)) != 0 { + return errors.New("should not create a new cloud odb network") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkResource_disableManagedService(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var network1, network2 odbtypes.OdbNetwork + rName := sdkacctest.RandomWithPrefix(oracleDBNetworkResourceTestEntity.displayNamePrefix) + resourceName := "aws_odb_network.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkResourceTestEntity.testAccCheckNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkResourceTestEntity.basicNetworkWithActiveManagedService(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: oracleDBNetworkResourceTestEntity.basicNetwork(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(network1.OdbNetworkId), *(network2.OdbNetworkId)) != 0 { + return errors.New("should not create a new cloud odb network") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkResource_updateTags(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var network1, network2 odbtypes.OdbNetwork + rName := sdkacctest.RandomWithPrefix(oracleDBNetworkResourceTestEntity.displayNamePrefix) + resourceName := "aws_odb_network.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkResourceTestEntity.testAccCheckNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkResourceTestEntity.basicNetwork(rName), + + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network1), + ), + }, + { + Config: oracleDBNetworkResourceTestEntity.updateNetworkTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(network1.OdbNetworkId), *(network2.OdbNetworkId)) != 0 { + return errors.New("should not create a new cloud odb network") + } + return nil + }), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBNetworkResource_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var network odbtypes.OdbNetwork + rName := sdkacctest.RandomWithPrefix(oracleDBNetworkResourceTestEntity.displayNamePrefix) + resourceName := "aws_odb_network.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + oracleDBNetworkResourceTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: oracleDBNetworkResourceTestEntity.testAccCheckNetworkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: oracleDBNetworkResourceTestEntity.basicNetwork(rName), + Check: resource.ComposeAggregateTestCheckFunc( + oracleDBNetworkResourceTestEntity.testAccCheckNetworkExists(ctx, resourceName, &network), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfodb.OracleDBNetwork, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func (oracleDBNetworkResourceTest) testAccCheckNetworkDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_network" { + continue + } + _, err := tfodb.FindOracleDBNetworkResourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameNetwork, rs.Primary.ID, err) + } + + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameNetwork, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func (oracleDBNetworkResourceTest) testAccCheckNetworkExists(ctx context.Context, name string, network *odbtypes.OdbNetwork) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameNetwork, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameNetwork, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + resp, err := tfodb.FindOracleDBNetworkResourceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameNetwork, rs.Primary.ID, err) + } + + *network = *resp + + return nil + } +} + +func (oracleDBNetworkResourceTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListOdbNetworksInput{} + _, err := conn.ListOdbNetworks(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (oracleDBNetworkResourceTest) basicNetwork(rName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + + +`, rName) + return networkRes +} + +func (oracleDBNetworkResourceTest) basicNetworkWithActiveManagedService(rName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "ENABLED" + zero_etl_access = "ENABLED" +} + + +`, rName) + return networkRes +} + +func (oracleDBNetworkResourceTest) networkWithAllParams(rName, customDomainName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" + custom_domain_name = %[2]q +} + + +`, rName, customDomainName) + return networkRes +} + +func (oracleDBNetworkResourceTest) updateNetworkTags(rName string) string { + networkRes := fmt.Sprintf(` + + + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" + tags = { + "env" = "dev" + } +} +`, rName) + return networkRes +} diff --git a/internal/service/odb/networks_data_source.go b/internal/service/odb/networks_data_source.go new file mode 100644 index 000000000000..3eb6b02714ba --- /dev/null +++ b/internal/service/odb/networks_data_source.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_odb_networks", name="Networks") +func newDataSourceNetworksList(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceNetworksList{}, nil +} + +const ( + DSNameNetworksList = "Networks List Data Source" +) + +type dataSourceNetworksList struct { + framework.DataSourceWithModel[odbNetworksListModel] +} + +func (d *dataSourceNetworksList) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "odb_networks": schema.ListAttribute{ + Computed: true, + Description: "List of odb networks returns basic information about odb networks.", + CustomType: fwtypes.NewListNestedObjectTypeOf[odbNetworkSummary](ctx), + }, + }, + } +} + +// Data sources only have a read method. +func (d *dataSourceNetworksList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data odbNetworksListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + out, err := ListOracleDBNetworks(ctx, conn) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameNetworksList, "", err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func ListOracleDBNetworks(ctx context.Context, conn *odb.Client) (*odb.ListOdbNetworksOutput, error) { + var out odb.ListOdbNetworksOutput + paginator := odb.NewListOdbNetworksPaginator(conn, &odb.ListOdbNetworksInput{}) + for paginator.HasMorePages() { + output, err := paginator.NextPage(ctx) + if err != nil { + return nil, err + } + out.OdbNetworks = append(out.OdbNetworks, output.OdbNetworks...) + } + return &out, nil +} + +type odbNetworksListModel struct { + framework.WithRegionModel + OdbNetworks fwtypes.ListNestedObjectValueOf[odbNetworkSummary] `tfsdk:"odb_networks"` +} + +type odbNetworkSummary struct { + OdbNetworkId types.String `tfsdk:"id"` + OdbNetworkArn types.String `tfsdk:"arn"` + OciNetworkAnchorId types.String `tfsdk:"oci_network_anchor_id"` + OciVcnUrl types.String `tfsdk:"oci_vcn_url"` + OciVcnId types.String `tfsdk:"oci_vcn_id"` + DisplayName types.String `tfsdk:"display_name"` +} diff --git a/internal/service/odb/networks_data_source_test.go b/internal/service/odb/networks_data_source_test.go new file mode 100644 index 000000000000..b42a91c65e4a --- /dev/null +++ b/internal/service/odb/networks_data_source_test.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strconv" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type odbNetworksListTestDS struct { +} + +func TestAccODBListNetworksDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + var networkListTest = odbNetworksListTestDS{} + var output odb.ListOdbNetworksOutput + + dataSourceName := "data.aws_odb_networks.test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + networkListTest.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: networkListTest.basic(), + Check: resource.ComposeAggregateTestCheckFunc( + + resource.ComposeTestCheckFunc(func(s *terraform.State) error { + pattern := `^odbnet_` + networkListTest.count(ctx, dataSourceName, &output) + resource.TestCheckResourceAttr(dataSourceName, "aws_odb_networks.#", strconv.Itoa(len(output.OdbNetworks))) + i := 0 + for i < len(output.OdbNetworks) { + key := fmt.Sprintf("aws_odb_networks.%q.id", i) + resource.TestMatchResourceAttr(dataSourceName, key, regexache.MustCompile(pattern)) + } + return nil + }, + ), + ), + }, + }, + }) +} + +func (odbNetworksListTestDS) basic() string { + return `data "aws_odb_networks" "test" {}` +} + +func (odbNetworksListTestDS) count(ctx context.Context, name string, list *odb.ListOdbNetworksOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameNetworksList, name, errors.New("not found")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.ListOracleDBNetworks(ctx, conn) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.DSNameNetworksList, rs.Primary.ID, err) + } + list.OdbNetworks = resp.OdbNetworks + + return nil + } +} +func (odbNetworksListTestDS) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListOdbNetworksInput{} + _, err := conn.ListOdbNetworks(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/odb/service_endpoint_resolver_gen.go b/internal/service/odb/service_endpoint_resolver_gen.go new file mode 100644 index 000000000000..1199c5d13c49 --- /dev/null +++ b/internal/service/odb/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package odb + +import ( + "context" + "fmt" + "net" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ odb.EndpointResolverV2 = resolverV2{} + +type resolverV2 struct { + defaultResolver odb.EndpointResolverV2 +} + +func newEndpointResolverV2() resolverV2 { + return resolverV2{ + defaultResolver: odb.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverV2) ResolveEndpoint(ctx context.Context, params odb.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws.Bool(false) + } else { + err = fmt.Errorf("looking up odb endpoint %q: %w", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*odb.Options) { + return func(o *odb.Options) { + if endpoint != "" { + o.BaseEndpoint = aws.String(endpoint) + } + } +} diff --git a/internal/service/odb/service_endpoints_gen_test.go b/internal/service/odb/service_endpoints_gen_test.go new file mode 100644 index 000000000000..2a4e674af595 --- /dev/null +++ b/internal/service/odb/service_endpoints_gen_test.go @@ -0,0 +1,602 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package odb_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "odb" + awsEnvVar = "AWS_ENDPOINT_URL_ODB" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "odb" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + ctx := t.Context() + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(ctx, t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(ctx, t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + t.Run(name, func(t *testing.T) { + testEndpointCase(ctx, t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(ctx context.Context, region string) (url.URL, error) { + r := odb.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, odb.EndpointParameters{ + Region: aws.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(ctx context.Context, region string) (url.URL, error) { + r := odb.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, odb.EndpointParameters{ + Region: aws.String(region), + UseFIPS: aws.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.ODBClient(ctx) + + var result apiCallParams + + input := odb.ListCloudExadataInfrastructuresInput{} + _, err := client.ListCloudExadataInfrastructures(ctx, &input, + func(opts *odb.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(ctx, t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(ctx context.Context, t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := sdkv2.NewProvider(ctx) + if err != nil { + t.Fatal(err) + } + + p.TerraformVersion = "1.0.0" + + expectedDiags := testcase.expected.diags + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = errors.New("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i any) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + fmt.Fprintf(&buf, "endpoint_url = %s\n", config.baseUrl) + } + + if config.serviceUrl != "" { + fmt.Fprintf(&buf, ` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/odb/service_package_gen.go b/internal/service/odb/service_package_gen.go new file mode 100644 index 000000000000..3f44a2c30309 --- /dev/null +++ b/internal/service/odb/service_package_gen.go @@ -0,0 +1,243 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package odb + +import ( + "context" + "unique" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { + return []*inttypes.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceCloudAutonomousVmCluster, + TypeName: "aws_odb_cloud_autonomous_vm_cluster", + Name: "Cloud Autonomous Vm Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceCloudAutonomousVmClustersList, + TypeName: "aws_odb_cloud_autonomous_vm_clusters", + Name: "Cloud Autonomous Vm Clusters", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceCloudExadataInfrastructure, + TypeName: "aws_odb_cloud_exadata_infrastructure", + Name: "Cloud Exadata Infrastructure", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceCloudExadataInfrastructuresList, + TypeName: "aws_odb_cloud_exadata_infrastructures", + Name: "Cloud Exadata Infrastructures", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceCloudVmCluster, + TypeName: "aws_odb_cloud_vm_cluster", + Name: "Cloud Vm Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceCloudVmClustersList, + TypeName: "aws_odb_cloud_vm_clusters", + Name: "Cloud Vm Clusters", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceDBNode, + TypeName: "aws_odb_db_node", + Name: "Db Node", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceDBNodes, + TypeName: "aws_odb_db_nodes", + Name: "Db Nodes", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceDBServer, + TypeName: "aws_odb_db_server", + Name: "Db Server", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceDBServers, + TypeName: "aws_odb_db_servers", + Name: "Db Servers", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceDBSystemShapes, + TypeName: "aws_odb_db_system_shapes", + Name: "Db System Shapes", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceGiVersions, + TypeName: "aws_odb_gi_versions", + Name: "Gi Versions", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceNetwork, + TypeName: "aws_odb_network", + Name: "Network", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceNetworkPeeringConnection, + TypeName: "aws_odb_network_peering_connection", + Name: "Network Peering Connection", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceNetworkPeeringConnectionsList, + TypeName: "aws_odb_network_peering_connections", + Name: "Network Peering Connections", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newDataSourceNetworksList, + TypeName: "aws_odb_networks", + Name: "Networks", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { + return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newResourceCloudAutonomousVmCluster, + TypeName: "aws_odb_cloud_autonomous_vm_cluster", + Name: "Cloud Autonomous Vm Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newResourceCloudExadataInfrastructure, + TypeName: "aws_odb_cloud_exadata_infrastructure", + Name: "Cloud Exadata Infrastructure", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newResourceCloudVmCluster, + TypeName: "aws_odb_cloud_vm_cluster", + Name: "Cloud Vm Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newResourceNetwork, + TypeName: "aws_odb_network", + Name: "Network", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newResourceNetworkPeeringConnection, + TypeName: "aws_odb_network_peering_connection", + Name: "Network Peering Connection", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { + return []*inttypes.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePackageSDKResource { + return []*inttypes.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.ODB +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*odb.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + optFns := []func(*odb.Options){ + odb.WithEndpointResolverV2(newEndpointResolverV2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *odb.Options) { + if region := config[names.AttrRegion].(string); o.Region != region { + tflog.Info(ctx, "overriding provider-configured AWS API region", map[string]any{ + "service": p.ServicePackageName(), + "original_region": o.Region, + "override_region": region, + }) + o.Region = region + } + }, + func(o *odb.Options) { + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) + } + }, + withExtraOptions(ctx, p, config), + } + + return odb.NewFromConfig(cfg, optFns...), nil +} + +// withExtraOptions returns a functional option that allows this service package to specify extra API client options. +// This option is always called after any generated options. +func withExtraOptions(ctx context.Context, sp conns.ServicePackage, config map[string]any) func(*odb.Options) { + if v, ok := sp.(interface { + withExtraOptions(context.Context, map[string]any) []func(*odb.Options) + }); ok { + optFns := v.withExtraOptions(ctx, config) + + return func(o *odb.Options) { + for _, optFn := range optFns { + optFn(o) + } + } + } + + return func(*odb.Options) {} +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/service/odb/tags_gen.go b/internal/service/odb/tags_gen.go new file mode 100644 index 000000000000..3ae4f0f73d8e --- /dev/null +++ b/internal/service/odb/tags_gen.go @@ -0,0 +1,128 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package odb + +import ( + "context" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists odb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *odb.Client, identifier string, optFns ...func(*odb.Options)) (tftags.KeyValueTags, error) { + input := odb.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, &input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), smarterr.NewError(err) + } + + return keyValueTags(ctx, output.Tags), nil +} + +// ListTags lists odb service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).ODBClient(ctx), identifier) + + if err != nil { + return smarterr.NewError(err) + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// svcTags returns odb service tags. +func svcTags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// keyValueTags creates tftags.KeyValueTags from odb service tags. +func keyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns odb service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := svcTags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets odb service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(keyValueTags(ctx, tags)) + } +} + +// updateTags updates odb service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *odb.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*odb.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.ODB) + if len(removedTags) > 0 { + input := odb.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.ODB) + if len(updatedTags) > 0 { + input := odb.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: svcTags(updatedTags), + } + + _, err := conn.TagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + return nil +} + +// UpdateTags updates odb service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).ODBClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/opensearch/domain.go b/internal/service/opensearch/domain.go index a980e4df5a9b..8a77505f0cb3 100644 --- a/internal/service/opensearch/domain.go +++ b/internal/service/opensearch/domain.go @@ -52,7 +52,6 @@ func resourceDomain() *schema.Resource { name := d.Id() ds, err := findDomainByName(ctx, conn, name) - if err != nil { return nil, fmt.Errorf("reading OpenSearch Domain (%s): %w", name, err) } @@ -172,6 +171,47 @@ func resourceDomain() *schema.Resource { }, }, }, + "aiml_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "natural_language_query_generation_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "desired_state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.NaturalLanguageQueryGenerationDesiredState](), + }, + }, + }, + }, + "s3_vectors_engine": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, names.AttrARN: { Type: schema.TypeString, Computed: true, @@ -716,6 +756,10 @@ func resourceDomainCreate(ctx context.Context, d *schema.ResourceData, meta any) input.AdvancedSecurityOptions = expandAdvancedSecurityOptions(v.([]any)) } + if v, ok := d.GetOk("aiml_options"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.AIMLOptions = expandAIMLOptionsInput(v.([]any)[0].(map[string]any)) + } + if v, ok := d.GetOk("auto_tune_options"); ok && len(v.([]any)) > 0 { input.AutoTuneOptions = expandAutoTuneOptionsInput(v.([]any)[0].(map[string]any)) } @@ -826,12 +870,11 @@ func resourceDomainCreate(ctx context.Context, d *schema.ResourceData, meta any) // IAM Roles can take some time to propagate if set in AccessPolicies and created in the same terraform outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDomain(ctx, &input) }, domainErrorRetryable, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "creating OpenSearch Domain (%s): %s", name, err) } @@ -849,12 +892,11 @@ func resourceDomainCreate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateDomainConfig(ctx, &input) }, domainErrorRetryable, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "updating OpenSearch Domain (%s) Config: %s", d.Id(), err) } @@ -887,7 +929,6 @@ func resourceDomainRead(ctx context.Context, d *schema.ResourceData, meta any) d output, err := conn.DescribeDomainConfig(ctx, &opensearch.DescribeDomainConfigInput{ DomainName: aws.String(name), }) - if err != nil { return sdkdiag.AppendErrorf(diags, "reading OpenSearch Domain (%s) Config: %s", d.Id(), err) } @@ -916,6 +957,13 @@ func resourceDomainRead(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendErrorf(diags, "setting advanced_security_options: %s", err) } } + if ds.AIMLOptions != nil { + if err := d.Set("aiml_options", []any{flattenAIMLOptionsOutput(ds.AIMLOptions)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting aiml_options: %s", err) + } + } else { + d.Set("aiml_options", nil) + } d.Set(names.AttrARN, ds.ARN) if v := dc.AutoTuneOptions; v != nil { if err := d.Set("auto_tune_options", []any{flattenAutoTuneOptions(v.Options)}); err != nil { @@ -1044,6 +1092,12 @@ func resourceDomainUpdate(ctx context.Context, d *schema.ResourceData, meta any) input.AdvancedSecurityOptions = expandAdvancedSecurityOptions(d.Get("advanced_security_options").([]any)) } + if d.HasChange("aiml_options") { + if v, ok := d.GetOk("aiml_options"); ok && len(v.([]any)) > 0 && v.([]any)[0] != nil { + input.AIMLOptions = expandAIMLOptionsInput(v.([]any)[0].(map[string]any)) + } + } + if d.HasChange("auto_tune_options") { input.AutoTuneOptions = expandAutoTuneOptions(d.Get("auto_tune_options").([]any)[0].(map[string]any)) } @@ -1167,12 +1221,11 @@ func resourceDomainUpdate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateDomainConfig(ctx, &input) }, domainErrorRetryable, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "updating OpenSearch Domain (%s) Config: %s", d.Id(), err) } @@ -1188,7 +1241,6 @@ func resourceDomainUpdate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := conn.UpgradeDomain(ctx, &input) - if err != nil { return sdkdiag.AppendErrorf(diags, "upgrading OpenSearch Domain (%s): %s", d.Id(), err) } diff --git a/internal/service/opensearch/domain_policy.go b/internal/service/opensearch/domain_policy.go index 8aaa3c2eaf2c..0d5f363b596f 100644 --- a/internal/service/opensearch/domain_policy.go +++ b/internal/service/opensearch/domain_policy.go @@ -6,6 +6,7 @@ package opensearch import ( "context" "log" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -35,6 +36,10 @@ func resourceDomainPolicy() *schema.Resource { Delete: schema.DefaultTimeout(90 * time.Minute), }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ names.AttrDomainName: { Type: schema.TypeString, @@ -58,7 +63,8 @@ func resourceDomainPolicyRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).OpenSearchClient(ctx) - ds, err := findDomainByName(ctx, conn, d.Get(names.AttrDomainName).(string)) + domainName := strings.Replace(d.Id(), "esd-policy-", "", 1) + ds, err := findDomainByName(ctx, conn, domainName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] OpenSearch Domain Policy (%s) not found, removing from state", d.Id()) @@ -77,6 +83,7 @@ func resourceDomainPolicyRead(ctx context.Context, d *schema.ResourceData, meta } d.Set("access_policies", policies) + d.Set(names.AttrDomainName, ds.DomainName) return diags } @@ -92,8 +99,8 @@ func resourceDomainPolicyUpsert(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "policy (%s) is invalid JSON: %s", policy, err) } - _, err = tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, propagationTimeout, - func() (any, error) { + _, err = tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.UpdateDomainConfig(ctx, &opensearch.UpdateDomainConfigInput{ DomainName: aws.String(domainName), AccessPolicies: aws.String(policy), diff --git a/internal/service/opensearch/domain_policy_test.go b/internal/service/opensearch/domain_policy_test.go index beb72c1b193f..d2694cfa5d89 100644 --- a/internal/service/opensearch/domain_policy_test.go +++ b/internal/service/opensearch/domain_policy_test.go @@ -21,6 +21,7 @@ func TestAccOpenSearchDomainPolicy_basic(t *testing.T) { ctx := acctest.Context(t) var domain awstypes.DomainStatus ri := sdkacctest.RandInt() + resourceName := "aws_opensearch_domain_policy.test" policy := `{ "Version": "2012-10-17", "Statement": [ @@ -69,10 +70,15 @@ func TestAccOpenSearchDomainPolicy_basic(t *testing.T) { } expectedPolicy := fmt.Sprintf(expectedPolicyTpl, expectedArn) - return testAccCheckPolicyMatch("aws_opensearch_domain_policy.test", "access_policies", expectedPolicy)(s) + return testAccCheckPolicyMatch(resourceName, "access_policies", expectedPolicy)(s) }, ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -95,7 +101,7 @@ func testAccCheckPolicyMatch(resource, attr, expectedPolicy string) resource.Tes areEquivalent, err := awspolicy.PoliciesAreEquivalent(given, expectedPolicy) if err != nil { - return fmt.Errorf("Comparing AWS Policies failed: %s", err) + return fmt.Errorf("Comparing AWS Policies failed: %w", err) } if !areEquivalent { diff --git a/internal/service/opensearch/domain_saml_options.go b/internal/service/opensearch/domain_saml_options.go index fb3402d98bd0..07764723dabc 100644 --- a/internal/service/opensearch/domain_saml_options.go +++ b/internal/service/opensearch/domain_saml_options.go @@ -155,7 +155,7 @@ func resourceDomainSAMLOptionsPut(ctx context.Context, d *schema.ResourceData, m log.Printf("[DEBUG] Updating OpenSearch domain SAML Options %#v", config) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.UpdateDomainConfig(ctx, &opensearch.UpdateDomainConfigInput{ DomainName: aws.String(domainName), AdvancedSecurityOptions: &config, @@ -182,7 +182,7 @@ func resourceDomainSAMLOptionsDelete(ctx context.Context, d *schema.ResourceData domainName := d.Get(names.AttrDomainName).(string) config := awstypes.AdvancedSecurityOptionsInput{} - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.UpdateDomainConfig(ctx, &opensearch.UpdateDomainConfigInput{ DomainName: aws.String(domainName), AdvancedSecurityOptions: &config, diff --git a/internal/service/opensearch/domain_structure.go b/internal/service/opensearch/domain_structure.go index 7d676e30e4c9..3f5bab082406 100644 --- a/internal/service/opensearch/domain_structure.go +++ b/internal/service/opensearch/domain_structure.go @@ -54,6 +54,52 @@ func expandAdvancedSecurityOptions(m []any) *awstypes.AdvancedSecurityOptionsInp return &config } +func expandAIMLOptionsInput(tfMap map[string]any) *awstypes.AIMLOptionsInput { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.AIMLOptionsInput{} + + if v, ok := tfMap["natural_language_query_generation_options"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.NaturalLanguageQueryGenerationOptions = expandNaturalLanguageQueryGenerationOptionsInput(v[0].(map[string]any)) + } + + if v, ok := tfMap["s3_vectors_engine"].([]any); ok && len(v) > 0 && v[0] != nil { + apiObject.S3VectorsEngine = expandS3VectorsEngine(v[0].(map[string]any)) + } + + return apiObject +} + +func expandNaturalLanguageQueryGenerationOptionsInput(tfMap map[string]any) *awstypes.NaturalLanguageQueryGenerationOptionsInput { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.NaturalLanguageQueryGenerationOptionsInput{} + + if v, ok := tfMap["desired_state"].(string); ok && v != "" { + apiObject.DesiredState = awstypes.NaturalLanguageQueryGenerationDesiredState(v) + } + + return apiObject +} + +func expandS3VectorsEngine(tfMap map[string]any) *awstypes.S3VectorsEngine { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.S3VectorsEngine{} + + if v, ok := tfMap[names.AttrEnabled].(bool); ok { + apiObject.Enabled = aws.Bool(v) + } + + return apiObject +} + func expandAutoTuneOptions(tfMap map[string]any) *awstypes.AutoTuneOptions { if tfMap == nil { return nil @@ -248,6 +294,48 @@ func flattenAdvancedSecurityOptions(advancedSecurityOptions *awstypes.AdvancedSe return []map[string]any{m} } +func flattenAIMLOptionsOutput(apiObject *awstypes.AIMLOptionsOutput) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{} + + if v := apiObject.NaturalLanguageQueryGenerationOptions; v != nil { + tfMap["natural_language_query_generation_options"] = []any{flattenNaturalLanguageQueryGenerationOptionsOutput(v)} + } + + if v := apiObject.S3VectorsEngine; v != nil { + tfMap["s3_vectors_engine"] = []any{flattenS3VectorsEngine(v)} + } + + return tfMap +} + +func flattenNaturalLanguageQueryGenerationOptionsOutput(apiObject *awstypes.NaturalLanguageQueryGenerationOptionsOutput) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + "desired_state": apiObject.DesiredState, + } + + return tfMap +} + +func flattenS3VectorsEngine(apiObject *awstypes.S3VectorsEngine) map[string]any { + if apiObject == nil { + return nil + } + + tfMap := map[string]any{ + names.AttrEnabled: aws.ToBool(apiObject.Enabled), + } + + return tfMap +} + func flattenAutoTuneOptions(autoTuneOptions *awstypes.AutoTuneOptions) map[string]any { if autoTuneOptions == nil { return nil diff --git a/internal/service/opensearch/domain_test.go b/internal/service/opensearch/domain_test.go index 870895e18b41..435cc479ba08 100644 --- a/internal/service/opensearch/domain_test.go +++ b/internal/service/opensearch/domain_test.go @@ -155,6 +155,7 @@ func TestAccOpenSearchDomain_basic(t *testing.T) { Config: testAccDomainConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "aiml_options.#", "1"), resource.TestMatchResourceAttr(resourceName, "dashboard_endpoint", regexache.MustCompile(`.*(opensearch|es)\..*/_dashboards`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrEngineVersion), resource.TestCheckResourceAttr(resourceName, "off_peak_window_options.#", "1"), @@ -541,7 +542,8 @@ func TestAccOpenSearchDomain_Cluster_update(t *testing.T) { testAccCheckSnapshotHour(23, &input), ), }, - }}) + }, + }) } func TestAccOpenSearchDomain_Cluster_multiAzWithStandbyEnabled(t *testing.T) { @@ -2088,7 +2090,8 @@ func TestAccOpenSearchDomain_VolumeType_update(t *testing.T) { testAccCheckEBSVolumeIops(3000, &input), ), }, - }}) + }, + }) } // Verifies that EBS volume_type can be changed from gp3 to a type which does not @@ -2137,7 +2140,8 @@ func TestAccOpenSearchDomain_VolumeType_gp3ToGP2(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "ebs_options.0.volume_type", "gp2"), ), }, - }}) + }, + }) } // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13867 @@ -2221,7 +2225,8 @@ func TestAccOpenSearchDomain_versionUpdate(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrEngineVersion, "Elasticsearch_6.3"), ), }, - }}) + }, + }) } func TestAccOpenSearchDomain_softwareUpdateOptions(t *testing.T) { @@ -2257,6 +2262,132 @@ func TestAccOpenSearchDomain_softwareUpdateOptions(t *testing.T) { }, }) } + +func TestAccOpenSearchDomain_AIMLOptions_createEnabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var domain awstypes.DomainStatus + rName := testAccRandomDomainName() + resourceName := "aws_opensearch_domain.test" + enabledState := "ENABLED" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIAMServiceLinkedRole(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.OpenSearchServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDomainConfig_AIMLOptions(rName, enabledState, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "aiml_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.0.desired_state", enabledState), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.0.enabled", acctest.CtFalse), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateId: rName, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "advanced_security_options", + }, + }, + }, + }) +} + +func TestAccOpenSearchDomain_AIMLOptions_createDisabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var domain awstypes.DomainStatus + rName := testAccRandomDomainName() + resourceName := "aws_opensearch_domain.test" + enabledState := "ENABLED" + disabledState := "DISABLED" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIAMServiceLinkedRole(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.OpenSearchServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDomainConfig_AIMLOptions(rName, disabledState, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "aiml_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.0.desired_state", disabledState), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.0.enabled", acctest.CtTrue), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateId: rName, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "advanced_security_options", + }, + }, + { + Config: testAccDomainConfig_AIMLOptions(rName, enabledState, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "aiml_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.0.desired_state", enabledState), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.0.enabled", acctest.CtFalse), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + Config: testAccDomainConfig_AIMLOptions(rName, disabledState, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "aiml_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.natural_language_query_generation_options.0.desired_state", disabledState), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.#", "1"), + resource.TestCheckResourceAttr(resourceName, "aiml_options.0.s3_vectors_engine.0.enabled", acctest.CtTrue), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + func TestAccOpenSearchDomain_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -2469,7 +2600,6 @@ func testAccCheckDomainExists(ctx context.Context, n string, v *awstypes.DomainS conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchClient(ctx) output, err := tfopensearch.FindDomainByName(ctx, conn, rs.Primary.Attributes[names.AttrDomainName]) - if err != nil { return err } @@ -4200,3 +4330,53 @@ resource "aws_opensearch_domain" "test" { } `, rName, option) } + +func testAccDomainConfig_AIMLOptions(rName, desiredState string, S3VecotrsEnabled bool) string { + return fmt.Sprintf(` +resource "aws_opensearch_domain" "test" { + domain_name = %[1]q + + cluster_config { + instance_type = "or1.medium.search" + instance_count = 1 + } + + advanced_security_options { + enabled = true + internal_user_database_enabled = true + master_user_options { + master_user_name = "testmasteruser" + master_user_password = "Barbarbarbar1!" + } + } + + domain_endpoint_options { + enforce_https = true + tls_security_policy = "Policy-Min-TLS-1-2-2019-07" + } + + node_to_node_encryption { + enabled = true + } + + ebs_options { + ebs_enabled = true + volume_size = 20 + } + + encrypt_at_rest { + enabled = true + } + + aiml_options { + natural_language_query_generation_options { + desired_state = %[2]q + } + + s3_vectors_engine { + enabled = %[3]t + } + } +} +`, rName, desiredState, S3VecotrsEnabled) +} diff --git a/internal/service/opensearch/package.go b/internal/service/opensearch/package.go index 05f19c2625d4..45e63a30ce1b 100644 --- a/internal/service/opensearch/package.go +++ b/internal/service/opensearch/package.go @@ -5,8 +5,11 @@ package opensearch import ( "context" + "fmt" "log" + "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/opensearch" awstypes "github.com/aws/aws-sdk-go-v2/service/opensearch/types" @@ -39,6 +42,12 @@ func resourcePackage() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrEngineVersion: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^Elasticsearch_[0-9]{1}\.[0-9]{1,2}$|^OpenSearch_[0-9]{1,2}\.[0-9]{1,2}$`), "must be in the format 'Elasticsearch_X.Y' or 'OpenSearch_X.Y'"), + }, "package_description": { Type: schema.TypeString, Optional: true, @@ -94,6 +103,10 @@ func resourcePackageCreate(ctx context.Context, d *schema.ResourceData, meta any PackageType: awstypes.PackageType(d.Get("package_type").(string)), } + if v, ok := d.GetOk(names.AttrEngineVersion); ok { + input.EngineVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("package_source"); ok { input.PackageSource = expandPackageSource(v.([]any)[0].(map[string]any)) } @@ -106,6 +119,9 @@ func resourcePackageCreate(ctx context.Context, d *schema.ResourceData, meta any d.SetId(aws.ToString(output.PackageDetails.PackageID)) + if _, err := waitPackageValidationCompleted(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for package validation (%s) completed: %s", d.Id(), err) + } return append(diags, resourcePackageRead(ctx, d, meta)...) } @@ -126,6 +142,7 @@ func resourcePackageRead(ctx context.Context, d *schema.ResourceData, meta any) } d.Set("available_package_version", pkg.AvailablePackageVersion) + d.Set(names.AttrEngineVersion, pkg.EngineVersion) d.Set("package_description", pkg.PackageDescription) d.Set("package_id", pkg.PackageID) d.Set("package_name", pkg.PackageName) @@ -230,3 +247,46 @@ func expandPackageSource(v any) *awstypes.PackageSource { S3Key: aws.String(v.(map[string]any)["s3_key"].(string)), } } + +func waitPackageValidationCompleted(ctx context.Context, conn *opensearch.Client, id string) (*opensearch.DescribePackagesOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{"COPYING", "VALIDATING"}, + Target: []string{"AVAILABLE"}, + Refresh: statusPackageValidation(ctx, conn, id), + Timeout: 20 * time.Minute, + MinTimeout: 15 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*opensearch.DescribePackagesOutput); ok { + return output, err + } + + return nil, err +} + +func statusPackageValidation(ctx context.Context, conn *opensearch.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findPackageByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil { + return nil, "", nil + } + + if output.ErrorDetails != nil { + return nil, string(output.PackageStatus), fmt.Errorf("package validation failed: %s, %s, %s", string(output.PackageStatus), aws.ToString(output.ErrorDetails.ErrorType), aws.ToString(output.ErrorDetails.ErrorMessage)) + } + + return output, string(output.PackageStatus), nil + } +} diff --git a/internal/service/opensearch/package_test.go b/internal/service/opensearch/package_test.go index a41dbe6161f9..f1ad8019f51f 100644 --- a/internal/service/opensearch/package_test.go +++ b/internal/service/opensearch/package_test.go @@ -8,6 +8,8 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/opensearch/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -32,12 +34,12 @@ func TestAccOpenSearchPackage_basic(t *testing.T) { Config: testAccPackageConfig_basic(pkgName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckPackageExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "available_package_version", ""), + resource.TestCheckResourceAttr(resourceName, "available_package_version", "v1"), resource.TestCheckResourceAttr(resourceName, "package_description", ""), resource.TestCheckResourceAttrSet(resourceName, "package_id"), resource.TestCheckResourceAttr(resourceName, "package_name", pkgName), resource.TestCheckResourceAttr(resourceName, "package_source.#", "1"), - resource.TestCheckResourceAttr(resourceName, "package_type", "TXT-DICTIONARY"), + resource.TestCheckResourceAttr(resourceName, "package_type", string(awstypes.PackageTypeTxtDictionary)), ), }, { @@ -53,6 +55,48 @@ func TestAccOpenSearchPackage_basic(t *testing.T) { }) } +func TestAccOpenSearchPackage_packageTypeZipPlugin(t *testing.T) { + ctx := acctest.Context(t) + pkgName := testAccRandomDomainName() + resourceName := "aws_opensearch_package.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.OpenSearchServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPackageDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPackageConfig_packageTypeZipPlugin(pkgName, "OpenSearch_2.17"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPackageExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "available_package_version", "v1"), + resource.TestCheckResourceAttr(resourceName, names.AttrEngineVersion, "OpenSearch_2.17"), + resource.TestCheckResourceAttr(resourceName, "package_description", ""), + resource.TestCheckResourceAttrSet(resourceName, "package_id"), + resource.TestCheckResourceAttr(resourceName, "package_name", pkgName), + resource.TestCheckResourceAttr(resourceName, "package_source.#", "1"), + resource.TestCheckResourceAttr(resourceName, "package_type", string(awstypes.PackageTypeZipPlugin)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "available_package_version", + "package_source", // This isn't returned by the API + }, + }, + { + // If engin_version is different from specified in the plugin zip file, it should return an error + Config: testAccPackageConfig_packageTypeZipPlugin(pkgName, "OpenSearch_2.11"), + ExpectError: regexache.MustCompile(`doesn't matches with the provided EngineVersion`), + }, + }, + }) +} + func TestAccOpenSearchPackage_disappears(t *testing.T) { ctx := acctest.Context(t) pkgName := testAccRandomDomainName() @@ -140,3 +184,35 @@ resource "aws_opensearch_package" "test" { } `, rName) } + +func testAccPackageConfig_packageTypeZipPlugin(rName, engineVersion string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + + +# example-opensearch-plugin.zip was created from the sample repository provided by AWS using the following commands: +# > git clone https://github.com/aws-samples/kr-tech-blog-sample-code.git +# > cd kr-tech-blog-sample-code/opensearch_custom_plugin +# > gradele build +# > cp build/distributions/opensearch-custom-plugin-1.0.0.zip terraform-provider-aws/internal/service/opensearch/test-fixtures/example-opensearch-plugin.zip + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = %[1]q + source = "./test-fixtures/example-opensearch-plugin.zip" + etag = filemd5("./test-fixtures/example-opensearch-plugin.zip") +} + +resource "aws_opensearch_package" "test" { + engine_version = %[2]q + package_name = %[1]q + package_source { + s3_bucket_name = aws_s3_bucket.test.bucket + s3_key = aws_s3_object.test.key + } + package_type = "ZIP-PLUGIN" +} +`, rName, engineVersion) +} diff --git a/internal/service/opensearch/service_endpoint_resolver_gen.go b/internal/service/opensearch/service_endpoint_resolver_gen.go index 7c7b779105f0..c3659fc7a0cd 100644 --- a/internal/service/opensearch/service_endpoint_resolver_gen.go +++ b/internal/service/opensearch/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params opensearch.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up opensearch endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up opensearch endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/opensearch/service_endpoints_gen_test.go b/internal/service/opensearch/service_endpoints_gen_test.go index b9e35d5ce5f7..a51ffcd77acd 100644 --- a/internal/service/opensearch/service_endpoints_gen_test.go +++ b/internal/service/opensearch/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/opensearch/service_package_gen.go b/internal/service/opensearch/service_package_gen.go index 41067ed206d2..12bdfa72df91 100644 --- a/internal/service/opensearch/service_package_gen.go +++ b/internal/service/opensearch/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/opensearch" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -123,7 +122,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *opensearch.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/opensearch/sweep.go b/internal/service/opensearch/sweep.go index 334c848fe12c..95685c338959 100644 --- a/internal/service/opensearch/sweep.go +++ b/internal/service/opensearch/sweep.go @@ -42,7 +42,7 @@ func sweepDomains(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -119,7 +119,7 @@ func sweepInboundConnections(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchClient(ctx) input := &opensearch.DescribeInboundConnectionsInput{} @@ -169,7 +169,7 @@ func sweepOutboundConnections(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchClient(ctx) input := &opensearch.DescribeOutboundConnectionsInput{} diff --git a/internal/service/opensearch/tags_gen.go b/internal/service/opensearch/tags_gen.go index 470a56873b26..fa9bd0b9f076 100644 --- a/internal/service/opensearch/tags_gen.go +++ b/internal/service/opensearch/tags_gen.go @@ -3,8 +3,8 @@ package opensearch import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/opensearch" awstypes "github.com/aws/aws-sdk-go-v2/service/opensearch/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *opensearch.Client, identifier string, o output, err := conn.ListTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).OpenSearchClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *opensearch.Client, identifier string, _, err := conn.RemoveTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *opensearch.Client, identifier string, _, err := conn.AddTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/opensearch/test-fixtures/example-opensearch-plugin.zip b/internal/service/opensearch/test-fixtures/example-opensearch-plugin.zip new file mode 100644 index 000000000000..bca46ca3343c Binary files /dev/null and b/internal/service/opensearch/test-fixtures/example-opensearch-plugin.zip differ diff --git a/internal/service/opensearch/wait.go b/internal/service/opensearch/wait.go index 63110158ec83..1c1a7a560851 100644 --- a/internal/service/opensearch/wait.go +++ b/internal/service/opensearch/wait.go @@ -42,74 +42,58 @@ func waitUpgradeSucceeded(ctx context.Context, conn *opensearch.Client, name str func waitForDomainCreation(ctx context.Context, conn *opensearch.Client, domainName string, timeout time.Duration) error { var out *awstypes.DomainStatus - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error out, err = findDomainByName(ctx, conn, domainName) if tfresource.NotFound(err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if !aws.ToBool(out.Processing) && (out.Endpoint != nil || out.Endpoints != nil) { return nil } - return retry.RetryableError( + return tfresource.RetryableError( fmt.Errorf("%q: Timeout while waiting for OpenSearch Domain to be created", domainName)) }, tfresource.WithDelay(10*time.Minute), tfresource.WithPollInterval(10*time.Second)) - if tfresource.TimedOut(err) { - out, err = findDomainByName(ctx, conn, domainName) - if err != nil { - return fmt.Errorf("describing OpenSearch Domain: %w", err) - } - if !aws.ToBool(out.Processing) && (out.Endpoint != nil || out.Endpoints != nil) { - return nil - } - } if err != nil { return fmt.Errorf("waiting for OpenSearch Domain to be created: %w", err) } + return nil } func waitForDomainUpdate(ctx context.Context, conn *opensearch.Client, domainName string, timeout time.Duration) error { var out *awstypes.DomainStatus - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error out, err = findDomainByName(ctx, conn, domainName) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if !aws.ToBool(out.Processing) { return nil } - return retry.RetryableError( + return tfresource.RetryableError( fmt.Errorf("%q: Timeout while waiting for changes to be processed", domainName)) }, tfresource.WithDelay(1*time.Minute), tfresource.WithPollInterval(10*time.Second)) - if tfresource.TimedOut(err) { - out, err = findDomainByName(ctx, conn, domainName) - if err != nil { - return fmt.Errorf("describing OpenSearch Domain: %w", err) - } - if !aws.ToBool(out.Processing) { - return nil - } - } if err != nil { return fmt.Errorf("waiting for OpenSearch Domain changes to be processed: %w", err) } + return nil } func waitForDomainDelete(ctx context.Context, conn *opensearch.Client, domainName string, timeout time.Duration) error { var out *awstypes.DomainStatus - err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { var err error out, err = findDomainByName(ctx, conn, domainName) @@ -117,31 +101,18 @@ func waitForDomainDelete(ctx context.Context, conn *opensearch.Client, domainNam if tfresource.NotFound(err) { return nil } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if out != nil && !aws.ToBool(out.Processing) { return nil } - return retry.RetryableError(fmt.Errorf("timeout while waiting for the OpenSearch Domain %q to be deleted", domainName)) + return tfresource.RetryableError(fmt.Errorf("timeout while waiting for the OpenSearch Domain %q to be deleted", domainName)) }, tfresource.WithDelay(10*time.Minute), tfresource.WithPollInterval(10*time.Second)) - if tfresource.TimedOut(err) { - out, err = findDomainByName(ctx, conn, domainName) - if err != nil { - if tfresource.NotFound(err) { - return nil - } - return fmt.Errorf("describing OpenSearch Domain: %s", err) - } - if out != nil && !aws.ToBool(out.Processing) { - return nil - } - } - if err != nil { - return fmt.Errorf("waiting for OpenSearch Domain to be deleted: %s", err) + return fmt.Errorf("waiting for OpenSearch Domain to be deleted: %w", err) } // opensearch maintains information about the domain in multiple (at least 2) places that need diff --git a/internal/service/opensearchserverless/access_policy.go b/internal/service/opensearchserverless/access_policy.go index 8a6d1b7c6ca5..2e59b69e9c7d 100644 --- a/internal/service/opensearchserverless/access_policy.go +++ b/internal/service/opensearchserverless/access_policy.go @@ -10,21 +10,21 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" - "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/document" awstypes "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/types" + "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-provider-aws/internal/create" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -35,26 +35,12 @@ func newAccessPolicyResource(_ context.Context) (resource.ResourceWithConfigure, return &accessPolicyResource{}, nil } -type accessPolicyResourceModel struct { - framework.WithRegionModel - Description types.String `tfsdk:"description"` - ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - Policy fwtypes.SmithyJSON[document.Interface] `tfsdk:"policy"` - PolicyVersion types.String `tfsdk:"policy_version"` - Type fwtypes.StringEnum[awstypes.AccessPolicyType] `tfsdk:"type"` -} - -const ( - ResNameAccessPolicy = "Access Policy" -) - type accessPolicyResource struct { framework.ResourceWithModel[accessPolicyResourceModel] } -func (r *accessPolicyResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *accessPolicyResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrDescription: schema.StringAttribute{ Description: "Description of the policy. Typically used to store information about the permissions defined in the policy.", @@ -76,7 +62,7 @@ func (r *accessPolicyResource) Schema(ctx context.Context, req resource.SchemaRe }, names.AttrPolicy: schema.StringAttribute{ Description: "JSON policy document to use as the content for the new policy.", - CustomType: fwtypes.NewSmithyJSONType(ctx, document.NewLazyDocument), + CustomType: jsontypes.NormalizedType{}, Required: true, Validators: []validator.String{ stringvalidator.LengthBetween(1, 20480), @@ -98,162 +84,161 @@ func (r *accessPolicyResource) Schema(ctx context.Context, req resource.SchemaRe } } -func (r *accessPolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var plan accessPolicyResourceModel - - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - - if resp.Diagnostics.HasError() { +func (r *accessPolicyResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data accessPolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } conn := r.Meta().OpenSearchServerlessClient(ctx) - in := &opensearchserverless.CreateAccessPolicyInput{} - - resp.Diagnostics.Append(flex.Expand(ctx, plan, in)...) - - if resp.Diagnostics.HasError() { + name := fwflex.StringValueFromFramework(ctx, data.Name) + var input opensearchserverless.CreateAccessPolicyInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } - in.ClientToken = aws.String(id.UniqueId()) + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) - out, err := conn.CreateAccessPolicy(ctx, in) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionCreating, ResNameAccessPolicy, plan.Name.String(), nil), - err.Error(), - ) - return - } + output, err := conn.CreateAccessPolicy(ctx, &input) - state := plan - resp.Diagnostics.Append(flex.Flatten(ctx, out.AccessPolicyDetail, &state)...) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating OpenSearch Serverless Access Policy (%s)", name), err.Error()) - if resp.Diagnostics.HasError() { return } - state.ID = flex.StringToFramework(ctx, out.AccessPolicyDetail.Name) + // Set values for unknowns. + data.ID = fwflex.StringValueToFramework(ctx, name) + data.PolicyVersion = fwflex.StringToFramework(ctx, output.AccessPolicyDetail.PolicyVersion) - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *accessPolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var state accessPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *accessPolicyResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data accessPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findAccessPolicyByNameAndType(ctx, conn, state.ID.ValueString(), state.Type.ValueString()) + conn := r.Meta().OpenSearchServerlessClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.ID) + output, err := findAccessPolicyByNameAndType(ctx, conn, name, data.Type.ValueString()) + if tfresource.NotFound(err) { - resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameAccessPolicy, state.ID.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading OpenSearch Serverless Access Policy (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *accessPolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var plan, state accessPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *accessPolicyResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old accessPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { return } - if !plan.Description.Equal(state.Description) || - !plan.Policy.Equal(state.Policy) { - input := &opensearchserverless.UpdateAccessPolicyInput{} + conn := r.Meta().OpenSearchServerlessClient(ctx) - resp.Diagnostics.Append(flex.Expand(ctx, plan, input)...) - if resp.Diagnostics.HasError() { + if !new.Description.Equal(old.Description) || !new.Policy.Equal(old.Policy) { + name := fwflex.StringValueFromFramework(ctx, new.ID) + var input opensearchserverless.UpdateAccessPolicyInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { return } - input.ClientToken = aws.String(id.UniqueId()) - input.PolicyVersion = state.PolicyVersion.ValueStringPointer() // use policy version from state since it can be recalculated on update + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + input.PolicyVersion = old.PolicyVersion.ValueStringPointer() // use policy version from state since it can be recalculated on update + + output, err := conn.UpdateAccessPolicy(ctx, &input) - out, err := conn.UpdateAccessPolicy(ctx, input) if err != nil { - resp.Diagnostics.AddError(fmt.Sprintf("updating Security Policy (%s)", plan.Name.ValueString()), err.Error()) - return - } + response.Diagnostics.AddError(fmt.Sprintf("updating OpenSearch Serverless Access Policy (%s)", name), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out.AccessPolicyDetail, &plan)...) - if resp.Diagnostics.HasError() { return } + + // Set values for unknowns. + new.PolicyVersion = fwflex.StringToFramework(ctx, output.AccessPolicyDetail.PolicyVersion) } - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *accessPolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var state accessPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *accessPolicyResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data accessPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - _, err := conn.DeleteAccessPolicy(ctx, &opensearchserverless.DeleteAccessPolicyInput{ - ClientToken: aws.String(id.UniqueId()), - Name: state.Name.ValueStringPointer(), - Type: awstypes.AccessPolicyType(state.Type.ValueString()), - }) + conn := r.Meta().OpenSearchServerlessClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.ID) + input := opensearchserverless.DeleteAccessPolicyInput{ + ClientToken: aws.String(sdkid.UniqueId()), + Name: aws.String(name), + Type: data.Type.ValueEnum(), + } + _, err := conn.DeleteAccessPolicy(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return } if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionDeleting, ResNameAccessPolicy, state.Name.String(), nil), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting OpenSearch Serverless Access Policy (%s)", name), err.Error()) + return } } -func (r *accessPolicyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts := strings.Split(req.ID, idSeparator) +func (r *accessPolicyResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + parts := strings.Split(request.ID, resourceIDSeparator) if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - err := fmt.Errorf("unexpected format for ID (%[1]s), expected security-policy-name%[2]ssecurity-policy-type", req.ID, idSeparator) - resp.Diagnostics.AddError(fmt.Sprintf("importing Security Policy (%s)", req.ID), err.Error()) + err := fmt.Errorf("unexpected format for ID (%[1]s), expected security-policy-name%[2]ssecurity-policy-type", request.ID, resourceIDSeparator) + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + return } - state := accessPolicyResourceModel{ - ID: types.StringValue(parts[0]), - Name: types.StringValue(parts[0]), - Type: fwtypes.StringEnumValue(awstypes.AccessPolicyType(parts[1])), - } + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrType), parts[1])...) +} - diags := resp.State.Set(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } +type accessPolicyResourceModel struct { + framework.WithRegionModel + Description types.String `tfsdk:"description"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Policy jsontypes.Normalized `tfsdk:"policy"` + PolicyVersion types.String `tfsdk:"policy_version"` + Type fwtypes.StringEnum[awstypes.AccessPolicyType] `tfsdk:"type"` } diff --git a/internal/service/opensearchserverless/access_policy_test.go b/internal/service/opensearchserverless/access_policy_test.go index bc3cdb550f07..d2172a006083 100644 --- a/internal/service/opensearchserverless/access_policy_test.go +++ b/internal/service/opensearchserverless/access_policy_test.go @@ -5,7 +5,6 @@ package opensearchserverless_test import ( "context" - "errors" "fmt" "testing" @@ -16,7 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfopensearchserverless "github.com/hashicorp/terraform-provider-aws/internal/service/opensearchserverless" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -47,7 +45,7 @@ func TestAccOpenSearchServerlessAccessPolicy_basic(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdFunc: testAccAccessPolicyImportStateIdFunc(resourceName), + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, "/", names.AttrName, names.AttrType), ImportState: true, ImportStateVerify: true, }, @@ -98,7 +96,7 @@ func TestAccOpenSearchServerlessAccessPolicy_update(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdFunc: testAccAccessPolicyImportStateIdFunc(resourceName), + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, "/", names.AttrName, names.AttrType), ImportState: true, ImportStateVerify: true, }, @@ -154,48 +152,34 @@ func testAccCheckAccessPolicyDestroy(ctx context.Context) resource.TestCheckFunc return err } - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingDestroyed, tfopensearchserverless.ResNameAccessPolicy, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("OpenSearch Serverless Access Policy %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckAccessPolicyExists(ctx context.Context, name string, accesspolicy *types.AccessPolicyDetail) resource.TestCheckFunc { +func testAccCheckAccessPolicyExists(ctx context.Context, n string, v *types.AccessPolicyDetail) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameAccessPolicy, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameAccessPolicy, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchServerlessClient(ctx) - resp, err := tfopensearchserverless.FindAccessPolicyByNameAndType(ctx, conn, rs.Primary.ID, rs.Primary.Attributes[names.AttrType]) + + output, err := tfopensearchserverless.FindAccessPolicyByNameAndType(ctx, conn, rs.Primary.ID, rs.Primary.Attributes[names.AttrType]) if err != nil { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameAccessPolicy, rs.Primary.ID, err) + return err } - *accesspolicy = *resp + *v = *output return nil } } -func testAccAccessPolicyImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("not found: %s", resourceName) - } - - return fmt.Sprintf("%s/%s", rs.Primary.Attributes[names.AttrName], rs.Primary.Attributes[names.AttrType]), nil - } -} - func testAccPreCheckAccessPolicy(ctx context.Context, t *testing.T) { conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchServerlessClient(ctx) diff --git a/internal/service/opensearchserverless/const.go b/internal/service/opensearchserverless/const.go index 37e4c45a83b8..47416c4e98b5 100644 --- a/internal/service/opensearchserverless/const.go +++ b/internal/service/opensearchserverless/const.go @@ -3,4 +3,4 @@ package opensearchserverless -const idSeparator = "/" +const resourceIDSeparator = "/" diff --git a/internal/service/opensearchserverless/lifecycle_policy.go b/internal/service/opensearchserverless/lifecycle_policy.go index e12ce7b62dfa..c445fb6bf85d 100644 --- a/internal/service/opensearchserverless/lifecycle_policy.go +++ b/internal/service/opensearchserverless/lifecycle_policy.go @@ -5,27 +5,26 @@ package opensearchserverless import ( "context" - "errors" "fmt" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" - "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/document" awstypes "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/types" + "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-provider-aws/internal/create" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -36,16 +35,12 @@ func newLifecyclePolicyResource(_ context.Context) (resource.ResourceWithConfigu return &lifecyclePolicyResource{}, nil } -const ( - ResNameLifecyclePolicy = "Lifecycle Policy" -) - type lifecyclePolicyResource struct { framework.ResourceWithModel[lifecyclePolicyResourceModel] } -func (r *lifecyclePolicyResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *lifecyclePolicyResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrDescription: schema.StringAttribute{ Description: "Description of the policy.", @@ -67,7 +62,7 @@ func (r *lifecyclePolicyResource) Schema(ctx context.Context, _ resource.SchemaR }, names.AttrPolicy: schema.StringAttribute{ Description: "JSON policy document to use as the content for the new policy.", - CustomType: fwtypes.NewSmithyJSONType(ctx, document.NewLazyDocument), + CustomType: jsontypes.NormalizedType{}, Required: true, Validators: []validator.String{ stringvalidator.LengthBetween(1, 20480), @@ -89,178 +84,153 @@ func (r *lifecyclePolicyResource) Schema(ctx context.Context, _ resource.SchemaR } } -func (r *lifecyclePolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var plan lifecyclePolicyResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *lifecyclePolicyResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data lifecyclePolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - in := &opensearchserverless.CreateLifecyclePolicyInput{} - - resp.Diagnostics.Append(flex.Expand(ctx, plan, in)...) + conn := r.Meta().OpenSearchServerlessClient(ctx) - if resp.Diagnostics.HasError() { + name := fwflex.StringValueFromFramework(ctx, data.Name) + var input opensearchserverless.CreateLifecyclePolicyInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } - in.ClientToken = aws.String(id.UniqueId()) + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + + output, err := conn.CreateLifecyclePolicy(ctx, &input) - out, err := conn.CreateLifecyclePolicy(ctx, in) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionCreating, ResNameLifecyclePolicy, plan.Name.ValueString(), err), - err.Error(), - ) - return - } - if out == nil || out.LifecyclePolicyDetail == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionCreating, ResNameLifecyclePolicy, plan.Name.ValueString(), nil), - errors.New("empty output").Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating OpenSearch Serverless Lifecycle Policy (%s)", name), err.Error()) + return } - state := plan - - resp.Diagnostics.Append(flex.Flatten(ctx, out.LifecyclePolicyDetail, &state)...) + // Set values for unknowns. + data.ID = fwflex.StringValueToFramework(ctx, name) + data.PolicyVersion = fwflex.StringToFramework(ctx, output.LifecyclePolicyDetail.PolicyVersion) - state.ID = flex.StringToFramework(ctx, out.LifecyclePolicyDetail.Name) - - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *lifecyclePolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var state lifecyclePolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *lifecyclePolicyResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data lifecyclePolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findLifecyclePolicyByNameAndType(ctx, conn, state.ID.ValueString(), state.Type.ValueString()) + conn := r.Meta().OpenSearchServerlessClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.ID) + output, err := findLifecyclePolicyByNameAndType(ctx, conn, name, data.Type.ValueString()) if tfresource.NotFound(err) { - resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameLifecyclePolicy, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading OpenSearch Serverless Lifecycle Policy (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *lifecyclePolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var plan, state lifecyclePolicyResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *lifecyclePolicyResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old lifecyclePolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { return } - if !plan.Description.Equal(state.Description) || !plan.Policy.Equal(state.Policy) { - in := &opensearchserverless.UpdateLifecyclePolicyInput{} - - resp.Diagnostics.Append(flex.Expand(ctx, plan, in)...) + conn := r.Meta().OpenSearchServerlessClient(ctx) - if resp.Diagnostics.HasError() { + if !new.Description.Equal(old.Description) || !new.Policy.Equal(old.Policy) { + name := fwflex.StringValueFromFramework(ctx, new.ID) + var input opensearchserverless.UpdateLifecyclePolicyInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { return } - in.ClientToken = aws.String(id.UniqueId()) - in.PolicyVersion = state.PolicyVersion.ValueStringPointer() // use policy version from state since it can be recalculated on update + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + input.PolicyVersion = old.PolicyVersion.ValueStringPointer() // use policy version from state since it can be recalculated on update + + output, err := conn.UpdateLifecyclePolicy(ctx, &input) - out, err := conn.UpdateLifecyclePolicy(ctx, in) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionUpdating, ResNameLifecyclePolicy, plan.ID.ValueString(), err), - err.Error(), - ) - return - } - if out == nil || out.LifecyclePolicyDetail == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionUpdating, ResNameLifecyclePolicy, plan.ID.ValueString(), nil), - errors.New("empty output").Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("updating OpenSearch Serverless Lifecycle Policy (%s)", name), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out.LifecyclePolicyDetail, &state)...) - if resp.Diagnostics.HasError() { return } + + // Set values for unknowns. + new.PolicyVersion = fwflex.StringToFramework(ctx, output.LifecyclePolicyDetail.PolicyVersion) } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *lifecyclePolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var state lifecyclePolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *lifecyclePolicyResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data lifecyclePolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - in := &opensearchserverless.DeleteLifecyclePolicyInput{ - ClientToken: aws.String(id.UniqueId()), - Name: flex.StringFromFramework(ctx, state.Name), - Type: awstypes.LifecyclePolicyType(state.Type.ValueString()), - } + conn := r.Meta().OpenSearchServerlessClient(ctx) - _, err := conn.DeleteLifecyclePolicy(ctx, in) + name := fwflex.StringValueFromFramework(ctx, data.ID) + input := opensearchserverless.DeleteLifecyclePolicyInput{ + ClientToken: aws.String(sdkid.UniqueId()), + Name: aws.String(name), + Type: data.Type.ValueEnum(), + } + _, err := conn.DeleteLifecyclePolicy(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return } if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionDeleting, ResNameLifecyclePolicy, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting OpenSearch Serverless Lifecycle Policy (%s)", name), err.Error()) + return } } -func (r *lifecyclePolicyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts := strings.Split(req.ID, idSeparator) +func (r *lifecyclePolicyResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + parts := strings.Split(request.ID, resourceIDSeparator) if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - err := fmt.Errorf("unexpected format for ID (%[1]s), expected lifecycle-policy-name%[2]slifecycle-policy-type", req.ID, idSeparator) - resp.Diagnostics.AddError(fmt.Sprintf("importing %s (%s)", ResNameLifecyclePolicy, req.ID), err.Error()) - return - } + err := fmt.Errorf("unexpected format for ID (%[1]s), expected lifecycle-policy-name%[2]slifecycle-policy-type", request.ID, resourceIDSeparator) + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) - state := lifecyclePolicyResourceModel{ - ID: types.StringValue(parts[0]), - Name: types.StringValue(parts[0]), - Type: fwtypes.StringEnumValue(awstypes.LifecyclePolicyType(parts[1])), - } - - diags := resp.State.Set(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { return } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrType), parts[1])...) } type lifecyclePolicyResourceModel struct { @@ -268,7 +238,7 @@ type lifecyclePolicyResourceModel struct { Description types.String `tfsdk:"description"` ID types.String `tfsdk:"id"` Name types.String `tfsdk:"name"` - Policy fwtypes.SmithyJSON[document.Interface] `tfsdk:"policy"` + Policy jsontypes.Normalized `tfsdk:"policy"` PolicyVersion types.String `tfsdk:"policy_version"` Type fwtypes.StringEnum[awstypes.LifecyclePolicyType] `tfsdk:"type"` } diff --git a/internal/service/opensearchserverless/lifecycle_policy_test.go b/internal/service/opensearchserverless/lifecycle_policy_test.go index 3654dee55297..8d707eb67cb2 100644 --- a/internal/service/opensearchserverless/lifecycle_policy_test.go +++ b/internal/service/opensearchserverless/lifecycle_policy_test.go @@ -5,7 +5,6 @@ package opensearchserverless_test import ( "context" - "errors" "fmt" "testing" @@ -16,7 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfopensearchserverless "github.com/hashicorp/terraform-provider-aws/internal/service/opensearchserverless" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -50,7 +48,7 @@ func TestAccOpenSearchServerlessLifecyclePolicy_basic(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdFunc: testAccLifecyclePolicyImportStateIdFunc(resourceName), + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, "/", names.AttrName, names.AttrType), ImportState: true, ImportStateVerify: true, }, @@ -137,52 +135,39 @@ func testAccCheckLifecyclePolicyDestroy(ctx context.Context) resource.TestCheckF if tfresource.NotFound(err) { continue } + if err != nil { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingDestroyed, tfopensearchserverless.ResNameLifecyclePolicy, rs.Primary.ID, err) + return err } - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingDestroyed, tfopensearchserverless.ResNameLifecyclePolicy, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("OpenSearch Serverless Lifecycle Policy %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckLifecyclePolicyExists(ctx context.Context, name string, lifecyclepolicy *types.LifecyclePolicyDetail) resource.TestCheckFunc { +func testAccCheckLifecyclePolicyExists(ctx context.Context, n string, v *types.LifecyclePolicyDetail) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameLifecyclePolicy, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameLifecyclePolicy, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchServerlessClient(ctx) - resp, err := tfopensearchserverless.FindLifecyclePolicyByNameAndType(ctx, conn, rs.Primary.ID, rs.Primary.Attributes[names.AttrType]) + + output, err := tfopensearchserverless.FindLifecyclePolicyByNameAndType(ctx, conn, rs.Primary.ID, rs.Primary.Attributes[names.AttrType]) if err != nil { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameLifecyclePolicy, rs.Primary.ID, err) + return err } - *lifecyclepolicy = *resp + *v = *output return nil } } -func testAccLifecyclePolicyImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("not found: %s", resourceName) - } - - return fmt.Sprintf("%s/%s", rs.Primary.Attributes[names.AttrName], rs.Primary.Attributes[names.AttrType]), nil - } -} - func testAccPreCheckLifecyclePolicy(ctx context.Context, t *testing.T) { conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchServerlessClient(ctx) diff --git a/internal/service/opensearchserverless/security_config.go b/internal/service/opensearchserverless/security_config.go index d0a976a6f815..71bf6b37d09c 100644 --- a/internal/service/opensearchserverless/security_config.go +++ b/internal/service/opensearchserverless/security_config.go @@ -274,7 +274,7 @@ func (r *securityConfigResource) Delete(ctx context.Context, req resource.Delete } func (r *securityConfigResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts := strings.Split(req.ID, idSeparator) + parts := strings.Split(req.ID, resourceIDSeparator) if len(parts) != 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { err := fmt.Errorf("unexpected format for ID (%[1]s), expected saml/account-id/name", req.ID) resp.Diagnostics.AddError(fmt.Sprintf("importing Security Policy (%s)", req.ID), err.Error()) diff --git a/internal/service/opensearchserverless/security_policy.go b/internal/service/opensearchserverless/security_policy.go index a10c5dcab0b4..9ca2bb13c6e5 100644 --- a/internal/service/opensearchserverless/security_policy.go +++ b/internal/service/opensearchserverless/security_policy.go @@ -5,26 +5,26 @@ package opensearchserverless import ( "context" - "errors" "fmt" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" - "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/document" awstypes "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/types" + "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-provider-aws/internal/create" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -35,26 +35,12 @@ func newSecurityPolicyResource(_ context.Context) (resource.ResourceWithConfigur return &securityPolicyResource{}, nil } -type securityPolicyResourceModel struct { - framework.WithRegionModel - Description types.String `tfsdk:"description"` - ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - Policy fwtypes.SmithyJSON[document.Interface] `tfsdk:"policy"` - PolicyVersion types.String `tfsdk:"policy_version"` - Type fwtypes.StringEnum[awstypes.SecurityPolicyType] `tfsdk:"type"` -} - -const ( - ResNameSecurityPolicy = "Security Policy" -) - type securityPolicyResource struct { framework.ResourceWithModel[securityPolicyResourceModel] } -func (r *securityPolicyResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *securityPolicyResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrDescription: schema.StringAttribute{ Description: "Description of the policy. Typically used to store information about the permissions defined in the policy.", @@ -76,7 +62,7 @@ func (r *securityPolicyResource) Schema(ctx context.Context, req resource.Schema }, names.AttrPolicy: schema.StringAttribute{ Description: "JSON policy document to use as the content for the new policy.", - CustomType: fwtypes.NewSmithyJSONType(ctx, document.NewLazyDocument), + CustomType: jsontypes.NormalizedType{}, Required: true, Validators: []validator.String{ stringvalidator.LengthBetween(1, 20480), @@ -98,162 +84,161 @@ func (r *securityPolicyResource) Schema(ctx context.Context, req resource.Schema } } -func (r *securityPolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var plan securityPolicyResourceModel - - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - - if resp.Diagnostics.HasError() { +func (r *securityPolicyResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data securityPolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } conn := r.Meta().OpenSearchServerlessClient(ctx) - in := &opensearchserverless.CreateSecurityPolicyInput{} - - resp.Diagnostics.Append(flex.Expand(ctx, plan, in)...) - if resp.Diagnostics.HasError() { + name := fwflex.StringValueFromFramework(ctx, data.Name) + var input opensearchserverless.CreateSecurityPolicyInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } - in.ClientToken = aws.String(id.UniqueId()) + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + + output, err := conn.CreateSecurityPolicy(ctx, &input) - out, err := conn.CreateSecurityPolicy(ctx, in) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionCreating, ResNameSecurityPolicy, plan.Name.String(), nil), - err.Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("creating OpenSearch Serverless Security Policy (%s)", name), err.Error()) - state := plan - resp.Diagnostics.Append(flex.Flatten(ctx, out.SecurityPolicyDetail, &state)...) - if resp.Diagnostics.HasError() { return } - state.ID = flex.StringToFramework(ctx, out.SecurityPolicyDetail.Name) - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) -} + // Set values for unknowns. + data.ID = fwflex.StringValueToFramework(ctx, name) + data.PolicyVersion = fwflex.StringToFramework(ctx, output.SecurityPolicyDetail.PolicyVersion) -func (r *securityPolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} - var state securityPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *securityPolicyResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data securityPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findSecurityPolicyByNameAndType(ctx, conn, state.ID.ValueString(), state.Type.ValueString()) + conn := r.Meta().OpenSearchServerlessClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.ID) + output, err := findSecurityPolicyByNameAndType(ctx, conn, name, data.Type.ValueString()) + if tfresource.NotFound(err) { - resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameSecurityPolicy, state.ID.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading OpenSearch Serverless Security Policy (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *securityPolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().OpenSearchServerlessClient(ctx) - - var plan, state securityPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *securityPolicyResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old securityPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { return } - - diff, diags := flex.Diff(ctx, plan, state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { return } - if diff.HasChanges() { - input := &opensearchserverless.UpdateSecurityPolicyInput{} + conn := r.Meta().OpenSearchServerlessClient(ctx) - resp.Diagnostics.Append(flex.Expand(ctx, plan, input)...) - if resp.Diagnostics.HasError() { + if !new.Description.Equal(old.Description) || !new.Policy.Equal(old.Policy) { + name := fwflex.StringValueFromFramework(ctx, new.ID) + var input opensearchserverless.UpdateSecurityPolicyInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { return } - input.ClientToken = aws.String(id.UniqueId()) - input.PolicyVersion = state.PolicyVersion.ValueStringPointer() // use policy version from state since it can be recalculated on update + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + input.PolicyVersion = old.PolicyVersion.ValueStringPointer() // use policy version from state since it can be recalculated on update - out, err := conn.UpdateSecurityPolicy(ctx, input) + output, err := conn.UpdateSecurityPolicy(ctx, &input) if err != nil { - resp.Diagnostics.AddError(fmt.Sprintf("updating Security Policy (%s)", plan.Name.ValueString()), err.Error()) - return - } - resp.Diagnostics.Append(flex.Flatten(ctx, out.SecurityPolicyDetail, &state)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.AddError(fmt.Sprintf("updating OpenSearch Serverless Security Policy (%s)", name), err.Error()) + return } + + // Set values for unknowns. + new.PolicyVersion = fwflex.StringToFramework(ctx, output.SecurityPolicyDetail.PolicyVersion) } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *securityPolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +func (r *securityPolicyResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data securityPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + conn := r.Meta().OpenSearchServerlessClient(ctx) - var state securityPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { + name := fwflex.StringValueFromFramework(ctx, data.ID) + input := opensearchserverless.DeleteSecurityPolicyInput{ + ClientToken: aws.String(sdkid.UniqueId()), + Name: aws.String(name), + Type: data.Type.ValueEnum(), + } + _, err := conn.DeleteSecurityPolicy(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return } - _, err := conn.DeleteSecurityPolicy(ctx, &opensearchserverless.DeleteSecurityPolicyInput{ - ClientToken: aws.String(id.UniqueId()), - Name: state.Name.ValueStringPointer(), - Type: state.Type.ValueEnum(), - }) if err != nil { - var nfe *awstypes.ResourceNotFoundException - if errors.As(err, &nfe) { - return - } - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionDeleting, ResNameSecurityPolicy, state.Name.String(), nil), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting OpenSearch Serverless Security Policy (%s)", name), err.Error()) + + return } } -func (r *securityPolicyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts := strings.Split(req.ID, idSeparator) +func (r *securityPolicyResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + parts := strings.Split(request.ID, resourceIDSeparator) if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - err := fmt.Errorf("unexpected format for ID (%[1]s), expected security-policy-name%[2]ssecurity-policy-type", req.ID, idSeparator) - resp.Diagnostics.AddError(fmt.Sprintf("importing Security Policy (%s)", req.ID), err.Error()) + err := fmt.Errorf("unexpected format for ID (%[1]s), expected security-policy-name%[2]ssecurity-policy-type", request.ID, resourceIDSeparator) + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + return } - state := securityPolicyResourceModel{ - ID: types.StringValue(parts[0]), - Name: types.StringValue(parts[0]), - Type: fwtypes.StringEnumValue(awstypes.SecurityPolicyType(parts[1])), - } + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrType), parts[1])...) +} - diags := resp.State.Set(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } +type securityPolicyResourceModel struct { + framework.WithRegionModel + Description types.String `tfsdk:"description"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Policy jsontypes.Normalized `tfsdk:"policy"` + PolicyVersion types.String `tfsdk:"policy_version"` + Type fwtypes.StringEnum[awstypes.SecurityPolicyType] `tfsdk:"type"` } diff --git a/internal/service/opensearchserverless/security_policy_data_source.go b/internal/service/opensearchserverless/security_policy_data_source.go index ea7494c167e0..3dc2a45d11af 100644 --- a/internal/service/opensearchserverless/security_policy_data_source.go +++ b/internal/service/opensearchserverless/security_policy_data_source.go @@ -5,7 +5,6 @@ package opensearchserverless import ( "context" - "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" @@ -16,11 +15,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKDataSource("aws_opensearchserverless_security_policy", name="Security Policy") -func DataSourceSecurityPolicy() *schema.Resource { +func dataSourceSecurityPolicy() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceSecurityPolicyRead, @@ -73,31 +74,31 @@ func dataSourceSecurityPolicyRead(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics conn := meta.(*conns.AWSClient).OpenSearchServerlessClient(ctx) - securityPolicyName := d.Get(names.AttrName).(string) - securityPolicyType := d.Get(names.AttrType).(string) - securityPolicy, err := findSecurityPolicyByNameAndType(ctx, conn, securityPolicyName, securityPolicyType) + name := d.Get(names.AttrName).(string) + securityPolicy, err := findSecurityPolicyByNameAndType(ctx, conn, name, d.Get(names.AttrType).(string)) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading OpenSearch Security Policy with name (%s) and type (%s): %s", securityPolicyName, securityPolicyType, err) - } - - policyBytes, err := securityPolicy.Policy.MarshalSmithyDocument() - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading JSON policy document for OpenSearch Security Policy with name %s and type %s: %s", securityPolicyName, securityPolicyType, err) + return sdkdiag.AppendErrorf(diags, "reading OpenSearch Serverless Security Policy (%s): %s", name, err) } d.SetId(aws.ToString(securityPolicy.Name)) + d.Set(names.AttrCreatedDate, flex.Int64ToRFC3339StringValue(securityPolicy.CreatedDate)) d.Set(names.AttrDescription, securityPolicy.Description) + d.Set("last_modified_date", flex.Int64ToRFC3339StringValue(securityPolicy.LastModifiedDate)) d.Set(names.AttrName, securityPolicy.Name) - d.Set(names.AttrPolicy, string(policyBytes)) - d.Set("policy_version", securityPolicy.PolicyVersion) - d.Set(names.AttrType, securityPolicy.Type) + if securityPolicy.Policy != nil { + v, err := tfsmithy.DocumentToJSONString(securityPolicy.Policy) - createdDate := time.UnixMilli(aws.ToInt64(securityPolicy.CreatedDate)) - d.Set(names.AttrCreatedDate, createdDate.Format(time.RFC3339)) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } - lastModifiedDate := time.UnixMilli(aws.ToInt64(securityPolicy.LastModifiedDate)) - d.Set("last_modified_date", lastModifiedDate.Format(time.RFC3339)) + d.Set(names.AttrPolicy, v) + } else { + d.Set(names.AttrPolicy, nil) + } + d.Set("policy_version", securityPolicy.PolicyVersion) + d.Set(names.AttrType, securityPolicy.Type) return diags } diff --git a/internal/service/opensearchserverless/security_policy_test.go b/internal/service/opensearchserverless/security_policy_test.go index 130aed3b2b0f..a6ce5173ca52 100644 --- a/internal/service/opensearchserverless/security_policy_test.go +++ b/internal/service/opensearchserverless/security_policy_test.go @@ -5,7 +5,6 @@ package opensearchserverless_test import ( "context" - "errors" "fmt" "testing" @@ -17,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfopensearchserverless "github.com/hashicorp/terraform-provider-aws/internal/service/opensearchserverless" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -49,7 +47,7 @@ func TestAccOpenSearchServerlessSecurityPolicy_basic(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdFunc: testAccSecurityPolicyImportStateIdFunc(resourceName), + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, "/", names.AttrName, names.AttrType), ImportState: true, ImportStateVerify: true, }, @@ -164,7 +162,7 @@ func TestAccOpenSearchServerlessSecurityPolicy_string(t *testing.T) { }, { ResourceName: resourceName, - ImportStateIdFunc: testAccSecurityPolicyImportStateIdFunc(resourceName), + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, "/", names.AttrName, names.AttrType), ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{names.AttrPolicy}, // JSON is semantically correct but can be set in state in a different order @@ -226,48 +224,34 @@ func testAccCheckSecurityPolicyDestroy(ctx context.Context) resource.TestCheckFu return err } - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingDestroyed, tfopensearchserverless.ResNameSecurityPolicy, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("OpenSearch Serverless Security Policy %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckSecurityPolicyExists(ctx context.Context, name string, securitypolicy *types.SecurityPolicyDetail) resource.TestCheckFunc { +func testAccCheckSecurityPolicyExists(ctx context.Context, n string, v *types.SecurityPolicyDetail) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameSecurityPolicy, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameSecurityPolicy, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchServerlessClient(ctx) - resp, err := tfopensearchserverless.FindSecurityPolicyByNameAndType(ctx, conn, rs.Primary.ID, rs.Primary.Attributes[names.AttrType]) + + output, err := tfopensearchserverless.FindSecurityPolicyByNameAndType(ctx, conn, rs.Primary.ID, rs.Primary.Attributes[names.AttrType]) if err != nil { - return create.Error(names.OpenSearchServerless, create.ErrActionCheckingExistence, tfopensearchserverless.ResNameSecurityPolicy, rs.Primary.ID, err) + return err } - *securitypolicy = *resp + *v = *output return nil } } -func testAccSecurityPolicyImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("not found: %s", resourceName) - } - - return fmt.Sprintf("%s/%s", rs.Primary.Attributes[names.AttrName], rs.Primary.Attributes[names.AttrType]), nil - } -} - func testAccPreCheck(ctx context.Context, t *testing.T) { conn := acctest.Provider.Meta().(*conns.AWSClient).OpenSearchServerlessClient(ctx) diff --git a/internal/service/opensearchserverless/service_endpoint_resolver_gen.go b/internal/service/opensearchserverless/service_endpoint_resolver_gen.go index 51a4ec07e87e..0779dc1fb5c7 100644 --- a/internal/service/opensearchserverless/service_endpoint_resolver_gen.go +++ b/internal/service/opensearchserverless/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params opensearchserver }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up opensearchserverless endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up opensearchserverless endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/opensearchserverless/service_endpoints_gen_test.go b/internal/service/opensearchserverless/service_endpoints_gen_test.go index 14d81ea391e2..97b0f49dd2da 100644 --- a/internal/service/opensearchserverless/service_endpoints_gen_test.go +++ b/internal/service/opensearchserverless/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/opensearchserverless/service_package_gen.go b/internal/service/opensearchserverless/service_package_gen.go index 3384cb1182b3..b74181c987fd 100644 --- a/internal/service/opensearchserverless/service_package_gen.go +++ b/internal/service/opensearchserverless/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -97,7 +96,7 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { return []*inttypes.ServicePackageSDKDataSource{ { - Factory: DataSourceSecurityPolicy, + Factory: dataSourceSecurityPolicy, TypeName: "aws_opensearchserverless_security_policy", Name: "Security Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), @@ -138,7 +137,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *opensearchserverless.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/opensearchserverless/sweep.go b/internal/service/opensearchserverless/sweep.go index 960b0a2616ba..069fb6dbefa0 100644 --- a/internal/service/opensearchserverless/sweep.go +++ b/internal/service/opensearchserverless/sweep.go @@ -50,7 +50,7 @@ func sweepAccessPolicies(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchServerlessClient(ctx) input := &opensearchserverless.ListAccessPoliciesInput{ @@ -97,7 +97,7 @@ func sweepCollections(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchServerlessClient(ctx) input := &opensearchserverless.ListCollectionsInput{} @@ -140,7 +140,7 @@ func sweepSecurityConfigs(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchServerlessClient(ctx) input := &opensearchserverless.ListSecurityConfigsInput{ @@ -185,7 +185,7 @@ func sweepSecurityPolicies(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchServerlessClient(ctx) inputEncryption := &opensearchserverless.ListSecurityPoliciesInput{ @@ -259,7 +259,7 @@ func sweepVPCEndpoints(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.OpenSearchServerlessClient(ctx) input := &opensearchserverless.ListVpcEndpointsInput{} diff --git a/internal/service/opensearchserverless/tags_gen.go b/internal/service/opensearchserverless/tags_gen.go index 27d73891f60d..550b3278a6ef 100644 --- a/internal/service/opensearchserverless/tags_gen.go +++ b/internal/service/opensearchserverless/tags_gen.go @@ -3,8 +3,8 @@ package opensearchserverless import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" awstypes "github.com/aws/aws-sdk-go-v2/service/opensearchserverless/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *opensearchserverless.Client, identifier output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).OpenSearchServerlessClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *opensearchserverless.Client, identifi _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *opensearchserverless.Client, identifi _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/organizations/account.go b/internal/service/organizations/account.go index d6b357b11fa2..f699f447fa6c 100644 --- a/internal/service/organizations/account.go +++ b/internal/service/organizations/account.go @@ -24,6 +24,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -31,6 +32,12 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports // @SDKResource("aws_organizations_account", name="Account") // @Tags(identifierAttribute="id") +// @IdentityAttribute("id") +// @CustomImport +// @Testing(tagsTest=false, identityTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/organizations/types;awstypes;awstypes.Account") +// @Testing(serialize=true) +// @Testing(preCheck="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckOrganizationsEnabled") func resourceAccount() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAccountCreate, @@ -139,8 +146,8 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, meta any input.RoleName = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, - func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, + func(ctx context.Context) (any, error) { return conn.CreateGovCloudAccount(ctx, input) }) @@ -164,8 +171,8 @@ func resourceAccountCreate(ctx context.Context, d *schema.ResourceData, meta any input.RoleName = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, - func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, + func(ctx context.Context) (any, error) { return conn.CreateAccount(ctx, input) }) @@ -331,16 +338,24 @@ func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, meta any } func resourceAccountImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if strings.Contains(d.Id(), "_") { - parts := strings.Split(d.Id(), "_") - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return nil, fmt.Errorf("unexpected format of ID (%s), expected _ or ", d.Id()) - } + if d.Id() != "" { + if strings.Contains(d.Id(), "_") { + parts := strings.Split(d.Id(), "_") + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return nil, fmt.Errorf("unexpected format of ID (%s), expected _ or ", d.Id()) + } - d.SetId(parts[0]) - d.Set("iam_user_access_to_billing", parts[1]) + d.SetId(parts[0]) + d.Set("iam_user_access_to_billing", parts[1]) + } else { + d.SetId(d.Id()) + } } else { - d.SetId(d.Id()) + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.GlobalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } } return []*schema.ResourceData{d}, nil diff --git a/internal/service/organizations/account_test.go b/internal/service/organizations/account_test.go index d51b120a78cb..727b29b51d7e 100644 --- a/internal/service/organizations/account_test.go +++ b/internal/service/organizations/account_test.go @@ -12,8 +12,15 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tforganizations "github.com/hashicorp/terraform-provider-aws/internal/service/organizations" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -52,7 +59,7 @@ func testAccAccount_basic(t *testing.T) { Config: testAccAccountConfig_basic(name, email), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAccountExists(ctx, resourceName, &v), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("account/o-.+")), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile(`account/`+organizationIDRegexPattern+`/\d{12}$`)), resource.TestCheckResourceAttr(resourceName, names.AttrEmail, email), resource.TestCheckResourceAttrSet(resourceName, "joined_method"), acctest.CheckResourceAttrRFC3339(resourceName, "joined_timestamp"), @@ -86,7 +93,7 @@ func testAccAccount_CloseOnDeletion(t *testing.T) { Config: testAccAccountConfig_closeOnDeletion(name, email), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAccountExists(ctx, resourceName, &v), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("account/o-.+")), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile(`account/`+organizationIDRegexPattern+`/\d{12}$`)), resource.TestCheckResourceAttr(resourceName, names.AttrEmail, email), resource.TestCheckResourceAttr(resourceName, "govcloud_id", ""), resource.TestCheckResourceAttrSet(resourceName, "joined_method"), @@ -159,7 +166,7 @@ func testAccAccount_AccountUpdate(t *testing.T) { Config: testAccAccountConfig_closeOnDeletion(name, email), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAccountExists(ctx, resourceName, &v), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("account/o-.+")), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile(`account/`+organizationIDRegexPattern+`/\d{12}$`)), resource.TestCheckResourceAttr(resourceName, names.AttrEmail, email), resource.TestCheckResourceAttrSet(resourceName, "joined_method"), acctest.CheckResourceAttrRFC3339(resourceName, "joined_timestamp"), @@ -253,6 +260,192 @@ func testAccAccount_govCloud(t *testing.T) { }) } +func testAccOrganizationsAccount_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsAccount_Identity_Basic, + "ExistingResource": testAccOrganizationsAccount_Identity_ExistingResource, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsAccount_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Account + resourceName := "aws_organizations_account.test" + orgsEmailDomain := acctest.SkipIfEnvVarNotSet(t, "TEST_AWS_ORGANIZATION_ACCOUNT_EMAIL_DOMAIN") + rInt := sdkacctest.RandInt() + name := fmt.Sprintf("tf_acctest_%d", rInt) + email := fmt.Sprintf("tf-acctest+%d@%s", rInt, orgsEmailDomain) + + resource.Test(t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationsEnabled(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckAccountDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + Config: testAccAccountConfig_closeOnDeletion(name, email), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + Config: testAccAccountConfig_closeOnDeletion(name, email), + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "close_on_deletion", + "create_govcloud", + "govcloud_id", + }, + }, + + // Step 3: Import block with Import ID + { + Config: testAccAccountConfig_closeOnDeletion(name, email), + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + Config: testAccAccountConfig_closeOnDeletion(name, email), + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccOrganizationsAccount_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Account + resourceName := "aws_organizations_account.test" + orgsEmailDomain := acctest.SkipIfEnvVarNotSet(t, "TEST_AWS_ORGANIZATION_ACCOUNT_EMAIL_DOMAIN") + rInt := sdkacctest.RandInt() + name := fmt.Sprintf("tf_acctest_%d", rInt) + email := fmt.Sprintf("tf-acctest+%d@%s", rInt, orgsEmailDomain) + + resource.Test(t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationsEnabled(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckAccountDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.100.0", + }, + }, + Config: testAccAccountConfig_closeOnDeletion(name, email), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.0.0", + }, + }, + Config: testAccAccountConfig_closeOnDeletion(name, email), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccAccountConfig_closeOnDeletion(name, email), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + func testAccCheckAccountDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).OrganizationsClient(ctx) diff --git a/internal/service/organizations/delegated_administrator.go b/internal/service/organizations/delegated_administrator.go index db93d8f36775..de6eef098f66 100644 --- a/internal/service/organizations/delegated_administrator.go +++ b/internal/service/organizations/delegated_administrator.go @@ -26,16 +26,22 @@ import ( ) // @SDKResource("aws_organizations_delegated_administrator", name="Delegated Administrator") +// @IdentityAttribute("service_principal") +// @IdentityAttribute("delegated_account_id", resourceAttributeName="account_id") +// @IdAttrFormat("{account_id}/{service_principal}") +// @ImportIDHandler("delegatedAdministratorImportID") +// @Testing(identityTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/organizations/types;awstypes;awstypes.DelegatedAdministrator") +// @Testing(serialize=true) +// @Testing(useAlternateAccount=true) +// @Testing(preCheck="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckOrganizationManagementAccount") +// @Testing(generator=false) func resourceDelegatedAdministrator() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDelegatedAdministratorCreate, ReadWithoutTimeout: resourceDelegatedAdministratorRead, DeleteWithoutTimeout: resourceDelegatedAdministratorDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrAccountID: { Type: schema.TypeString, @@ -108,10 +114,8 @@ func resourceDelegatedAdministratorRead(ctx context.Context, d *schema.ResourceD var diags diag.Diagnostics conn := meta.(*conns.AWSClient).OrganizationsClient(ctx) - accountID, servicePrincipal, err := delegatedAdministratorParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } + accountID := d.Get(names.AttrAccountID).(string) + servicePrincipal := d.Get("service_principal").(string) delegatedAccount, err := findDelegatedAdministratorByTwoPartKey(ctx, conn, accountID, servicePrincipal) @@ -142,13 +146,10 @@ func resourceDelegatedAdministratorDelete(ctx context.Context, d *schema.Resourc var diags diag.Diagnostics conn := meta.(*conns.AWSClient).OrganizationsClient(ctx) - accountID, servicePrincipal, err := delegatedAdministratorParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } + accountID := d.Get(names.AttrAccountID).(string) + servicePrincipal := d.Get("service_principal").(string) - log.Printf("[DEBUG] Deleting Organizations Delegated Administrator: %s", d.Id()) - _, err = conn.DeregisterDelegatedAdministrator(ctx, &organizations.DeregisterDelegatedAdministratorInput{ + _, err := conn.DeregisterDelegatedAdministrator(ctx, &organizations.DeregisterDelegatedAdministratorInput{ AccountId: aws.String(accountID), ServicePrincipal: aws.String(servicePrincipal), }) @@ -165,11 +166,11 @@ func resourceDelegatedAdministratorDelete(ctx context.Context, d *schema.Resourc } func findDelegatedAdministratorByTwoPartKey(ctx context.Context, conn *organizations.Client, accountID, servicePrincipal string) (*awstypes.DelegatedAdministrator, error) { - input := &organizations.ListDelegatedAdministratorsInput{ + input := organizations.ListDelegatedAdministratorsInput{ ServicePrincipal: aws.String(servicePrincipal), } - return findDelegatedAdministrator(ctx, conn, input, func(v *awstypes.DelegatedAdministrator) bool { + return findDelegatedAdministrator(ctx, conn, &input, func(v *awstypes.DelegatedAdministrator) bool { return aws.ToString(v.Id) == accountID }) } @@ -208,18 +209,25 @@ func findDelegatedAdministrators(ctx context.Context, conn *organizations.Client const delegatedAdministratorResourceIDSeparator = "/" func delegatedAdministratorCreateResourceID(accountID, servicePrincipal string) string { - parts := []string{accountID, servicePrincipal} - id := strings.Join(parts, delegatedAdministratorResourceIDSeparator) + return accountID + delegatedAdministratorResourceIDSeparator + servicePrincipal +} + +type delegatedAdministratorImportID struct{} - return id +func (delegatedAdministratorImportID) Create(d *schema.ResourceData) string { + return delegatedAdministratorCreateResourceID(d.Get(names.AttrAccountID).(string), d.Get("service_principal").(string)) } -func delegatedAdministratorParseResourceID(id string) (string, string, error) { +func (delegatedAdministratorImportID) Parse(id string) (string, map[string]string, error) { parts := strings.Split(id, delegatedAdministratorResourceIDSeparator) if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return parts[0], parts[1], nil + result := map[string]string{ + names.AttrAccountID: parts[0], + "service_principal": parts[1], + } + return id, result, nil } - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected ACCOUNTID%[2]sSERVICEPRINCIPAL", id, delegatedAdministratorResourceIDSeparator) + return "", nil, fmt.Errorf("unexpected format for ID (%[1]s), expected ACCOUNTID%[2]sSERVICEPRINCIPAL", id, delegatedAdministratorResourceIDSeparator) } diff --git a/internal/service/organizations/delegated_administrator_test.go b/internal/service/organizations/delegated_administrator_test.go index bef16c92c352..b497aa9249af 100644 --- a/internal/service/organizations/delegated_administrator_test.go +++ b/internal/service/organizations/delegated_administrator_test.go @@ -8,10 +8,19 @@ import ( "fmt" "testing" + "github.com/YakDriver/regexache" awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tforganizations "github.com/hashicorp/terraform-provider-aws/internal/service/organizations" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -22,7 +31,7 @@ func testAccDelegatedAdministrator_basic(t *testing.T) { ctx := acctest.Context(t) var organization awstypes.DelegatedAdministrator resourceName := "aws_organizations_delegated_administrator.test" - servicePrincipal := "config-multiaccountsetup.amazonaws.com" + servicePrincipal := "securitylake.amazonaws.com" dataSourceIdentity := "data.aws_caller_identity.delegated" resource.Test(t, resource.TestCase{ @@ -39,6 +48,7 @@ func testAccDelegatedAdministrator_basic(t *testing.T) { Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), Check: resource.ComposeTestCheckFunc( testAccCheckDelegatedAdministratorExists(ctx, resourceName, &organization), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile(`account/o-[0-9a-z]{10}/\d{12}$`)), resource.TestCheckResourceAttrPair(resourceName, names.AttrAccountID, dataSourceIdentity, names.AttrAccountID), acctest.CheckResourceAttrRFC3339(resourceName, "delegation_enabled_date"), acctest.CheckResourceAttrRFC3339(resourceName, "joined_timestamp"), @@ -53,7 +63,7 @@ func testAccDelegatedAdministrator_disappears(t *testing.T) { ctx := acctest.Context(t) var organization awstypes.DelegatedAdministrator resourceName := "aws_organizations_delegated_administrator.test" - servicePrincipal := "config-multiaccountsetup.amazonaws.com" + servicePrincipal := "securitylake.amazonaws.com" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -77,6 +87,157 @@ func testAccDelegatedAdministrator_disappears(t *testing.T) { }) } +func testAccOrganizationsDelegatedAdministrator_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsDelegatedAdministrator_Identity_Basic, + "ExistingResource": testAccOrganizationsDelegatedAdministrator_Identity_ExistingResource, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsDelegatedAdministrator_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DelegatedAdministrator + resourceName := "aws_organizations_delegated_administrator.test" + servicePrincipal := "securitylake.amazonaws.com" + + resource.Test(t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckDelegatedAdministratorDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDelegatedAdministratorExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{account_id}/{service_principal}"), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "service_principal": knownvalue.NotNull(), + "delegated_account_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("service_principal")), + statecheck.ExpectIdentityValueMatchesStateAtPath(resourceName, tfjsonpath.New("delegated_account_id"), tfjsonpath.New(names.AttrAccountID)), + }, + }, + + // Step 2: Import command + { + Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("service_principal"), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("service_principal"), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsDelegatedAdministrator_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.DelegatedAdministrator + resourceName := "aws_organizations_delegated_administrator.test" + servicePrincipal := "securitylake.amazonaws.com" + providers := make(map[string]*schema.Provider) + + resource.Test(t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckDelegatedAdministratorDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.4.0", + }, + }, + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamed(ctx, t, providers, acctest.ProviderNameAlternate), + Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDelegatedAdministratorExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesNamedAlternate(ctx, t, providers), + Config: testAccDelegatedAdministratorConfig_basic(servicePrincipal), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "service_principal": knownvalue.NotNull(), + "delegated_account_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("service_principal")), + statecheck.ExpectIdentityValueMatchesStateAtPath(resourceName, tfjsonpath.New("delegated_account_id"), tfjsonpath.New(names.AttrAccountID)), + }, + }, + }, + }) +} + func testAccCheckDelegatedAdministratorDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).OrganizationsClient(ctx) diff --git a/internal/service/organizations/delegated_administrators_data_source_test.go b/internal/service/organizations/delegated_administrators_data_source_test.go index f27cc9ef8055..6a0923f52d15 100644 --- a/internal/service/organizations/delegated_administrators_data_source_test.go +++ b/internal/service/organizations/delegated_administrators_data_source_test.go @@ -15,7 +15,7 @@ import ( func testAccDelegatedAdministratorsDataSource_basic(t *testing.T) { ctx := acctest.Context(t) dataSourceName := "data.aws_organizations_delegated_administrators.test" - servicePrincipal := "config-multiaccountsetup.amazonaws.com" + servicePrincipal := "securitylake.amazonaws.com" resource.Test(t, resource.TestCase{ PreCheck: func() { diff --git a/internal/service/organizations/generate.go b/internal/service/organizations/generate.go index 6042a9536b24..4c9e3d33be9c 100644 --- a/internal/service/organizations/generate.go +++ b/internal/service/organizations/generate.go @@ -3,6 +3,8 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOpPaginated -ListTagsInIDElem=ResourceId -ServiceTagsSlice -TagInIDElem=ResourceId -UpdateTags //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tagstests/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package organizations diff --git a/internal/service/organizations/organization.go b/internal/service/organizations/organization.go index 036993b7697c..6e4082ec86e9 100644 --- a/internal/service/organizations/organization.go +++ b/internal/service/organizations/organization.go @@ -21,12 +21,20 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_organizations_organization", name="Organization") +// @IdentityAttribute("id") +// @CustomImport +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/organizations/types;awstypes;awstypes.Organization") +// @Testing(serialize=true) +// @Testing(preIdentityVersion="6.4.0") +// @Testing(generator=false) +// @Testing(preCheck="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckOrganizationManagementAccount") func resourceOrganization() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceOrganizationCreate, @@ -378,10 +386,15 @@ func resourceOrganizationDelete(ctx context.Context, d *schema.ResourceData, met } func resourceOrganizationImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.GlobalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } + conn := meta.(*conns.AWSClient).OrganizationsClient(ctx) org, err := findOrganization(ctx, conn) - if err != nil { return nil, err } diff --git a/internal/service/organizations/organization_identity_gen_test.go b/internal/service/organizations/organization_identity_gen_test.go new file mode 100644 index 000000000000..ae34174c2f13 --- /dev/null +++ b/internal/service/organizations/organization_identity_gen_test.go @@ -0,0 +1,218 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package organizations_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccOrganizationsOrganization_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsOrganization_Identity_Basic, + "ExistingResource": testAccOrganizationsOrganization_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccOrganizationsOrganization_Identity_ExistingResource_NoRefresh_NoChange, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsOrganization_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Organization + resourceName := "aws_organizations_organization.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic/"), + ConfigVariables: config.Variables{}, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic/"), + ConfigVariables: config.Variables{}, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsOrganization_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Organization + resourceName := "aws_organizations_organization.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic_v6.4.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsOrganization_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Organization + resourceName := "aws_organizations_organization.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic_v6.4.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Organization/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/organizations/organization_test.go b/internal/service/organizations/organization_test.go index c2e825bee976..05a47c04f703 100644 --- a/internal/service/organizations/organization_test.go +++ b/internal/service/organizations/organization_test.go @@ -40,10 +40,10 @@ func testAccOrganization_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "accounts.0.arn", resourceName, "master_account_arn"), resource.TestCheckResourceAttrPair(resourceName, "accounts.0.email", resourceName, "master_account_email"), resource.TestCheckResourceAttrPair(resourceName, "accounts.0.id", resourceName, "master_account_id"), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile(`organization/o-.+`)), + acctest.CheckResourceAttrGlobalARNFormat(ctx, resourceName, names.AttrARN, "organizations", "organization/o-{id}"), resource.TestCheckResourceAttr(resourceName, "aws_service_access_principals.#", "0"), resource.TestCheckResourceAttr(resourceName, "feature_set", string(awstypes.OrganizationFeatureSetAll)), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, "master_account_arn", "organizations", regexache.MustCompile(`account/o-.+/.+`)), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, "master_account_arn", "organizations", regexache.MustCompile(`account/`+organizationIDRegexPattern+`/\d{12}$`)), resource.TestMatchResourceAttr(resourceName, "master_account_email", regexache.MustCompile(`.+@.+`)), acctest.CheckResourceAttrAccountID(ctx, resourceName, "master_account_id"), resource.TestCheckResourceAttr(resourceName, "non_master_accounts.#", "0"), diff --git a/internal/service/organizations/organizational_unit.go b/internal/service/organizations/organizational_unit.go index 62ad9603151c..08476f44b8ad 100644 --- a/internal/service/organizations/organizational_unit.go +++ b/internal/service/organizations/organizational_unit.go @@ -25,6 +25,11 @@ import ( // @SDKResource("aws_organizations_organizational_unit", name="Organizational Unit") // @Tags(identifierAttribute="id") +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/organizations/types;awstypes;awstypes.OrganizationalUnit") +// @Testing(serialize=true) +// @Testing(preIdentityVersion="6.4.0") +// @Testing(preCheck="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckOrganizationManagementAccount") func resourceOrganizationalUnit() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceOrganizationalUnitCreate, @@ -32,10 +37,6 @@ func resourceOrganizationalUnit() *schema.Resource { UpdateWithoutTimeout: resourceOrganizationalUnitUpdate, DeleteWithoutTimeout: resourceOrganizationalUnitDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ "accounts": { Type: schema.TypeList, @@ -93,7 +94,7 @@ func resourceOrganizationalUnitCreate(ctx context.Context, d *schema.ResourceDat Tags: getTagsIn(ctx), } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, func(ctx context.Context) (any, error) { return conn.CreateOrganizationalUnit(ctx, input) }) diff --git a/internal/service/organizations/organizational_unit_identity_gen_test.go b/internal/service/organizations/organizational_unit_identity_gen_test.go new file mode 100644 index 000000000000..1a096f4473f0 --- /dev/null +++ b/internal/service/organizations/organizational_unit_identity_gen_test.go @@ -0,0 +1,238 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package organizations_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccOrganizationsOrganizationalUnit_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsOrganizationalUnit_Identity_Basic, + "ExistingResource": testAccOrganizationsOrganizationalUnit_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccOrganizationsOrganizationalUnit_Identity_ExistingResource_NoRefresh_NoChange, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsOrganizationalUnit_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsOrganizationalUnit_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsOrganizationalUnit_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/organizations/organizational_unit_tags_gen_test.go b/internal/service/organizations/organizational_unit_tags_gen_test.go new file mode 100644 index 000000000000..9dcef93bebbe --- /dev/null +++ b/internal/service/organizations/organizational_unit_tags_gen_test.go @@ -0,0 +1,2385 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package organizations_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccOrganizationsOrganizationalUnit_tagsSerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsOrganizationalUnit_tags, + "null": testAccOrganizationsOrganizationalUnit_tags_null, + "EmptyMap": testAccOrganizationsOrganizationalUnit_tags_EmptyMap, + "AddOnUpdate": testAccOrganizationsOrganizationalUnit_tags_AddOnUpdate, + "EmptyTag_OnCreate": testAccOrganizationsOrganizationalUnit_tags_EmptyTag_OnCreate, + "EmptyTag_OnUpdate_Add": testAccOrganizationsOrganizationalUnit_tags_EmptyTag_OnUpdate_Add, + "EmptyTag_OnUpdate_Replace": testAccOrganizationsOrganizationalUnit_tags_EmptyTag_OnUpdate_Replace, + "DefaultTags_providerOnly": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_providerOnly, + "DefaultTags_nonOverlapping": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_nonOverlapping, + "DefaultTags_overlapping": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_overlapping, + "DefaultTags_updateToProviderOnly": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_updateToProviderOnly, + "DefaultTags_updateToResourceOnly": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_updateToResourceOnly, + "DefaultTags_emptyResourceTag": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_emptyResourceTag, + "DefaultTags_nullOverlappingResourceTag": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_nullOverlappingResourceTag, + "DefaultTags_nullNonOverlappingResourceTag": testAccOrganizationsOrganizationalUnit_tags_DefaultTags_nullNonOverlappingResourceTag, + "ComputedTag_OnCreate": testAccOrganizationsOrganizationalUnit_tags_ComputedTag_OnCreate, + "ComputedTag_OnUpdate_Add": testAccOrganizationsOrganizationalUnit_tags_ComputedTag_OnUpdate_Add, + "ComputedTag_OnUpdate_Replace": testAccOrganizationsOrganizationalUnit_tags_ComputedTag_OnUpdate_Replace, + "IgnoreTags_Overlap_DefaultTag": testAccOrganizationsOrganizationalUnit_tags_IgnoreTags_Overlap_DefaultTag, + "IgnoreTags_Overlap_ResourceTag": testAccOrganizationsOrganizationalUnit_tags_IgnoreTags_Overlap_ResourceTag, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsOrganizationalUnit_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsOrganizationalUnit_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.OrganizationalUnit + resourceName := "aws_organizations_organizational_unit.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/OrganizationalUnit/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckOrganizationalUnitExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/organizations/organizational_unit_test.go b/internal/service/organizations/organizational_unit_test.go index 6fb2d1b1cef9..b33ec10e509c 100644 --- a/internal/service/organizations/organizational_unit_test.go +++ b/internal/service/organizations/organizational_unit_test.go @@ -40,7 +40,7 @@ func testAccOrganizationalUnit_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckOrganizationalUnitExists(ctx, resourceName, &unit), resource.TestCheckResourceAttr(resourceName, "accounts.#", "0"), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("ou/o-.+")), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("ou/"+organizationIDRegexPattern+"/ou-[0-9a-z]{4}-[0-9a-z]{8}$")), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), ), @@ -120,55 +120,6 @@ func testAccOrganizationalUnit_update(t *testing.T) { }) } -func testAccOrganizationalUnit_tags(t *testing.T) { - ctx := acctest.Context(t) - var unit awstypes.OrganizationalUnit - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_organizations_organizational_unit.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckOrganizationManagementAccount(ctx, t) - }, - ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckOrganizationalUnitDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccOrganizationalUnitConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckOrganizationalUnitExists(ctx, resourceName, &unit), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccOrganizationalUnitConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckOrganizationalUnitExists(ctx, resourceName, &unit), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccOrganizationalUnitConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckOrganizationalUnitExists(ctx, resourceName, &unit), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func testAccCheckOrganizationalUnitDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).OrganizationsClient(ctx) @@ -226,34 +177,3 @@ resource "aws_organizations_organizational_unit" "test" { } `, rName) } - -func testAccOrganizationalUnitConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -data "aws_organizations_organization" "current" {} - -resource "aws_organizations_organizational_unit" "test" { - name = %[1]q - parent_id = data.aws_organizations_organization.current.roots[0].id - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccOrganizationalUnitConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -data "aws_organizations_organization" "current" {} - -resource "aws_organizations_organizational_unit" "test" { - name = %[1]q - parent_id = data.aws_organizations_organization.current.roots[0].id - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} diff --git a/internal/service/organizations/organizational_units_descendant_organizational_units_data_source_test.go b/internal/service/organizations/organizational_units_descendant_organizational_units_data_source_test.go index d91aae809716..872ba8d6031d 100644 --- a/internal/service/organizations/organizational_units_descendant_organizational_units_data_source_test.go +++ b/internal/service/organizations/organizational_units_descendant_organizational_units_data_source_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func testOrganizationalUnitDescendantOUsDataSource_basic(t *testing.T) { +func testAccOrganizationalUnitDescendantOUsDataSource_basic(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) topOUDataSourceName := "data.aws_organizations_organizational_unit_descendant_organizational_units.current" diff --git a/internal/service/organizations/organizations_test.go b/internal/service/organizations/organizations_test.go index 8574e982267d..bdbb3f7a2c46 100644 --- a/internal/service/organizations/organizations_test.go +++ b/internal/service/organizations/organizations_test.go @@ -21,6 +21,10 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { ) } +const ( + organizationIDRegexPattern = `o-[0-9a-z]{10}` +) + func TestAccOrganizations_serial(t *testing.T) { t.Parallel() @@ -36,31 +40,34 @@ func TestAccOrganizations_serial(t *testing.T) { "DataSource_basic": testAccOrganizationDataSource_basic, "DataSource_memberAccount": testAccOrganizationDataSource_memberAccount, "DataSource_delegatedAdministrator": testAccOrganizationDataSource_delegatedAdministrator, + "Identity": testAccOrganizationsOrganization_IdentitySerial, }, "Account": { acctest.CtBasic: testAccAccount_basic, "CloseOnDeletion": testAccAccount_CloseOnDeletion, "ParentId": testAccAccount_ParentID, - "Tags": testAccAccount_Tags, + "tags": testAccAccount_Tags, "GovCloud": testAccAccount_govCloud, "AccountUpdate": testAccAccount_AccountUpdate, + "Identity": testAccOrganizationsAccount_IdentitySerial, }, "OrganizationalUnit": { acctest.CtBasic: testAccOrganizationalUnit_basic, acctest.CtDisappears: testAccOrganizationalUnit_disappears, "update": testAccOrganizationalUnit_update, - "tags": testAccOrganizationalUnit_tags, + "tags": testAccOrganizationsOrganizationalUnit_tagsSerial, "DataSource_basic": testAccOrganizationalUnitDataSource_basic, - "DescendantOUsDataSource_basic": testOrganizationalUnitDescendantOUsDataSource_basic, + "DescendantOUsDataSource_basic": testAccOrganizationalUnitDescendantOUsDataSource_basic, "ChildAccountsDataSource_basic": testAccOrganizationalUnitChildAccountsDataSource_basic, "DescendantAccountsDataSource_basic": testAccOrganizationalUnitDescendantAccountsDataSource_basic, "PluralDataSource_basic": testAccOrganizationalUnitsDataSource_basic, + "Identity": testAccOrganizationsOrganizationalUnit_IdentitySerial, }, "Policy": { acctest.CtBasic: testAccPolicy_basic, "concurrent": testAccPolicy_concurrent, "Description": testAccPolicy_description, - "Tags": testAccPolicy_tags, + "tags": testAccOrganizationsPolicy_tagsSerial, "SkipDestroy": testAccPolicy_skipDestroy, acctest.CtDisappears: testAccPolicy_disappears, "Type_AI_OPT_OUT": testAccPolicy_type_AI_OPT_OUT, @@ -68,6 +75,7 @@ func TestAccOrganizations_serial(t *testing.T) { "Type_SCP": testAccPolicy_type_SCP, "Type_Tag": testAccPolicy_type_Tag, "ImportAwsManagedPolicy": testAccPolicy_importManagedPolicy, + "Identity": testAccOrganizationsPolicy_IdentitySerial, }, "PolicyAttachment": { "Account": testAccPolicyAttachment_Account, @@ -75,6 +83,7 @@ func TestAccOrganizations_serial(t *testing.T) { "Root": testAccPolicyAttachment_Root, "SkipDestroy": testAccPolicyAttachment_skipDestroy, acctest.CtDisappears: testAccPolicyAttachment_disappears, + "Identity": testAccOrganizationsPolicyAttachment_IdentitySerial, }, "PolicyDataSource": { "UnattachedPolicy": testAccPolicyDataSource_UnattachedPolicy, @@ -82,11 +91,13 @@ func TestAccOrganizations_serial(t *testing.T) { "ResourcePolicy": { acctest.CtBasic: testAccResourcePolicy_basic, acctest.CtDisappears: testAccResourcePolicy_disappears, - "tags": testAccResourcePolicy_tags, + "tags": testAccOrganizationsResourcePolicy_tagsSerial, + "Identity": testAccOrganizationsResourcePolicy_IdentitySerial, }, "DelegatedAdministrator": { acctest.CtBasic: testAccDelegatedAdministrator_basic, acctest.CtDisappears: testAccDelegatedAdministrator_disappears, + "Identity": testAccOrganizationsDelegatedAdministrator_IdentitySerial, }, "DelegatedAdministrators": { acctest.CtBasic: testAccDelegatedAdministratorsDataSource_basic, diff --git a/internal/service/organizations/policy.go b/internal/service/organizations/policy.go index 2c6df056e747..715cdcdc8133 100644 --- a/internal/service/organizations/policy.go +++ b/internal/service/organizations/policy.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -27,6 +28,12 @@ import ( // @SDKResource("aws_organizations_policy", name="Policy") // @Tags(identifierAttribute="id") +// @IdentityAttribute("id") +// @CustomImport +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/organizations/types;awstypes;awstypes.Policy") +// @Testing(serialize=true) +// @Testing(preIdentityVersion="6.4.0") +// @Testing(preCheck="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckOrganizationManagementAccount") func resourcePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyCreate, @@ -88,7 +95,7 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta any) Tags: getTagsIn(ctx), } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, func(ctx context.Context) (any, error) { return conn.CreatePolicy(ctx, input) }) @@ -194,10 +201,15 @@ func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta any) } func resourcePolicyImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.GlobalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } + conn := meta.(*conns.AWSClient).OrganizationsClient(ctx) policy, err := findPolicyByID(ctx, conn, d.Id()) - if err != nil { return nil, err } diff --git a/internal/service/organizations/policy_attachment.go b/internal/service/organizations/policy_attachment.go index 11d395fd1128..2fa97bc7a06d 100644 --- a/internal/service/organizations/policy_attachment.go +++ b/internal/service/organizations/policy_attachment.go @@ -24,6 +24,12 @@ import ( ) // @SDKResource("aws_organizations_policy_attachment", name="Policy Attachment") +// @IdentityAttribute("policy_id") +// @IdentityAttribute("target_id") +// @ImportIDHandler("policyAttachmentImportID") +// @Testing(serialize=true) +// @Testing(preIdentityVersion="6.4.0") +// @Testing(preCheck="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckOrganizationManagementAccount") func resourcePolicyAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyAttachmentCreate, @@ -31,10 +37,6 @@ func resourcePolicyAttachment() *schema.Resource { UpdateWithoutTimeout: resourcePolicyAttachmentUpdate, DeleteWithoutTimeout: resourcePolicyAttachmentDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ "policy_id": { Type: schema.TypeString, @@ -61,13 +63,13 @@ func resourcePolicyAttachmentCreate(ctx context.Context, d *schema.ResourceData, policyID := d.Get("policy_id").(string) targetID := d.Get("target_id").(string) id := policyAttachmentCreateResourceID(targetID, policyID) - input := &organizations.AttachPolicyInput{ + input := organizations.AttachPolicyInput{ PolicyId: aws.String(policyID), TargetId: aws.String(targetID), } - _, err := tfresource.RetryWhenIsA[*awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, func() (any, error) { - return conn.AttachPolicy(ctx, input) + _, err := tfresource.RetryWhenIsA[any, *awstypes.FinalizingOrganizationException](ctx, organizationFinalizationTimeout, func(ctx context.Context) (any, error) { + return conn.AttachPolicy(ctx, &input) }) if err != nil { @@ -83,12 +85,10 @@ func resourcePolicyAttachmentRead(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics conn := meta.(*conns.AWSClient).OrganizationsClient(ctx) - targetID, policyID, err := policyAttachmentParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } + targetID := d.Get("target_id").(string) + policyID := d.Get("policy_id").(string) - _, err = findPolicyAttachmentByTwoPartKey(ctx, conn, targetID, policyID) + _, err := findPolicyAttachmentByTwoPartKey(ctx, conn, targetID, policyID) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Organizations Policy Attachment %s not found, removing from state", d.Id()) @@ -123,13 +123,10 @@ func resourcePolicyAttachmentDelete(ctx context.Context, d *schema.ResourceData, return nil } - targetID, policyID, err := policyAttachmentParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } + targetID := d.Get("target_id").(string) + policyID := d.Get("policy_id").(string) - log.Printf("[DEBUG] Deleting Organizations Policy Attachment: %s", d.Id()) - _, err = conn.DetachPolicy(ctx, &organizations.DetachPolicyInput{ + _, err := conn.DetachPolicy(ctx, &organizations.DetachPolicyInput{ PolicyId: aws.String(policyID), TargetId: aws.String(targetID), }) @@ -154,16 +151,6 @@ func policyAttachmentCreateResourceID(targetID, policyID string) string { return id } -func policyAttachmentParseResourceID(id string) (string, string, error) { - parts := strings.Split(id, policyAttachmentResourceIDSeparator) - - if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return parts[0], parts[1], nil - } - - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected TARGETID%[2]sPOLICYID", id, policyAttachmentResourceIDSeparator) -} - func findPolicyAttachmentByTwoPartKey(ctx context.Context, conn *organizations.Client, targetID, policyID string) (*awstypes.PolicyTargetSummary, error) { input := &organizations.ListTargetsForPolicyInput{ PolicyId: aws.String(policyID), @@ -211,3 +198,23 @@ func findPolicyTargets(ctx context.Context, conn *organizations.Client, input *o return output, nil } + +type policyAttachmentImportID struct{} + +func (policyAttachmentImportID) Create(d *schema.ResourceData) string { + return policyAttachmentCreateResourceID(d.Get("target_id").(string), d.Get("policy_id").(string)) +} + +func (policyAttachmentImportID) Parse(id string) (string, map[string]string, error) { + parts := strings.Split(id, policyAttachmentResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + result := map[string]string{ + "target_id": parts[0], + "policy_id": parts[1], + } + return id, result, nil + } + + return "", nil, fmt.Errorf("unexpected format for ID (%[1]s), expected TARGETID%[2]sPOLICYID", id, policyAttachmentResourceIDSeparator) +} diff --git a/internal/service/organizations/policy_attachment_identity_gen_test.go b/internal/service/organizations/policy_attachment_identity_gen_test.go new file mode 100644 index 000000000000..5286146ae3d0 --- /dev/null +++ b/internal/service/organizations/policy_attachment_identity_gen_test.go @@ -0,0 +1,240 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package organizations_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccOrganizationsPolicyAttachment_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsPolicyAttachment_Identity_Basic, + "ExistingResource": testAccOrganizationsPolicyAttachment_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccOrganizationsPolicyAttachment_Identity_ExistingResource_NoRefresh_NoChange, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsPolicyAttachment_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_organizations_policy_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyAttachmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyAttachmentExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "policy_id": knownvalue.NotNull(), + "target_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("policy_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("target_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("policy_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("target_id"), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("policy_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("target_id"), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsPolicyAttachment_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_organizations_policy_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyAttachmentExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "policy_id": knownvalue.NotNull(), + "target_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("policy_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("target_id")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsPolicyAttachment_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_organizations_policy_attachment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyAttachmentDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyAttachmentExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/PolicyAttachment/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/organizations/policy_identity_gen_test.go b/internal/service/organizations/policy_identity_gen_test.go new file mode 100644 index 000000000000..fb28cbf86ef9 --- /dev/null +++ b/internal/service/organizations/policy_identity_gen_test.go @@ -0,0 +1,238 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package organizations_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccOrganizationsPolicy_IdentitySerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsPolicy_Identity_Basic, + "ExistingResource": testAccOrganizationsPolicy_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccOrganizationsPolicy_Identity_ExistingResource_NoRefresh_NoChange, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func testAccOrganizationsPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/organizations/policy_tags_gen_test.go b/internal/service/organizations/policy_tags_gen_test.go new file mode 100644 index 000000000000..833227fde64d --- /dev/null +++ b/internal/service/organizations/policy_tags_gen_test.go @@ -0,0 +1,2385 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package organizations_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccOrganizationsPolicy_tagsSerial(t *testing.T) { + t.Helper() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccOrganizationsPolicy_tags, + "null": testAccOrganizationsPolicy_tags_null, + "EmptyMap": testAccOrganizationsPolicy_tags_EmptyMap, + "AddOnUpdate": testAccOrganizationsPolicy_tags_AddOnUpdate, + "EmptyTag_OnCreate": testAccOrganizationsPolicy_tags_EmptyTag_OnCreate, + "EmptyTag_OnUpdate_Add": testAccOrganizationsPolicy_tags_EmptyTag_OnUpdate_Add, + "EmptyTag_OnUpdate_Replace": testAccOrganizationsPolicy_tags_EmptyTag_OnUpdate_Replace, + "DefaultTags_providerOnly": testAccOrganizationsPolicy_tags_DefaultTags_providerOnly, + "DefaultTags_nonOverlapping": testAccOrganizationsPolicy_tags_DefaultTags_nonOverlapping, + "DefaultTags_overlapping": testAccOrganizationsPolicy_tags_DefaultTags_overlapping, + "DefaultTags_updateToProviderOnly": testAccOrganizationsPolicy_tags_DefaultTags_updateToProviderOnly, + "DefaultTags_updateToResourceOnly": testAccOrganizationsPolicy_tags_DefaultTags_updateToResourceOnly, + "DefaultTags_emptyResourceTag": testAccOrganizationsPolicy_tags_DefaultTags_emptyResourceTag, + "DefaultTags_nullOverlappingResourceTag": testAccOrganizationsPolicy_tags_DefaultTags_nullOverlappingResourceTag, + "DefaultTags_nullNonOverlappingResourceTag": testAccOrganizationsPolicy_tags_DefaultTags_nullNonOverlappingResourceTag, + "ComputedTag_OnCreate": testAccOrganizationsPolicy_tags_ComputedTag_OnCreate, + "ComputedTag_OnUpdate_Add": testAccOrganizationsPolicy_tags_ComputedTag_OnUpdate_Add, + "ComputedTag_OnUpdate_Replace": testAccOrganizationsPolicy_tags_ComputedTag_OnUpdate_Replace, + "IgnoreTags_Overlap_DefaultTag": testAccOrganizationsPolicy_tags_IgnoreTags_Overlap_DefaultTag, + "IgnoreTags_Overlap_ResourceTag": testAccOrganizationsPolicy_tags_IgnoreTags_Overlap_ResourceTag, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccOrganizationsPolicy_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func testAccOrganizationsPolicy_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Policy + resourceName := "aws_organizations_policy.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Policy/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/organizations/policy_test.go b/internal/service/organizations/policy_test.go index ef89532efbfc..130fa5141352 100644 --- a/internal/service/organizations/policy_test.go +++ b/internal/service/organizations/policy_test.go @@ -32,7 +32,10 @@ func testAccPolicy_basic(t *testing.T) { resourceName := "aws_organizations_policy.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -41,7 +44,7 @@ func testAccPolicy_basic(t *testing.T) { Config: testAccPolicyConfig_required(rName, content1), Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("policy/o-.+/service_control_policy/p-.+$")), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("policy/"+organizationIDRegexPattern+"/service_control_policy/p-[0-9a-z]{8}")), resource.TestCheckResourceAttr(resourceName, names.AttrContent, content1), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -77,7 +80,10 @@ func testAccPolicy_concurrent(t *testing.T) { resourceName5 := "aws_organizations_policy.test5" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -103,7 +109,10 @@ func testAccPolicy_description(t *testing.T) { resourceName := "aws_organizations_policy.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -132,53 +141,6 @@ func testAccPolicy_description(t *testing.T) { }) } -func testAccPolicy_tags(t *testing.T) { - ctx := acctest.Context(t) - var policy awstypes.Policy - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_organizations_policy.test" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckPolicyDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccPolicyConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckPolicyExists(ctx, resourceName, &policy), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{names.AttrSkipDestroy}, - }, - { - Config: testAccPolicyConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckPolicyExists(ctx, resourceName, &policy), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccPolicyConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckPolicyExists(ctx, resourceName, &policy), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func testAccPolicy_skipDestroy(t *testing.T) { ctx := acctest.Context(t) var policy awstypes.Policy @@ -187,7 +149,10 @@ func testAccPolicy_skipDestroy(t *testing.T) { resourceName := "aws_organizations_policy.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyNoDestroy(ctx), @@ -196,7 +161,7 @@ func testAccPolicy_skipDestroy(t *testing.T) { Config: testAccPolicyConfig_skipDestroy(rName, content), Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), - acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("policy/o-.+/service_control_policy/p-.+$")), + acctest.MatchResourceAttrGlobalARN(ctx, resourceName, names.AttrARN, "organizations", regexache.MustCompile("policy/"+organizationIDRegexPattern+"/service_control_policy/p-.+$")), resource.TestCheckResourceAttr(resourceName, names.AttrContent, content), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -215,7 +180,10 @@ func testAccPolicy_disappears(t *testing.T) { resourceName := "aws_organizations_policy.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -241,7 +209,10 @@ func testAccPolicy_type_AI_OPT_OUT(t *testing.T) { AiOptOutPolicyContent := `{ "services": { "rekognition": { "opt_out_policy": { "@@assign": "optOut" } }, "lex": { "opt_out_policy": { "@@assign": "optIn" } } } }` resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -340,7 +311,10 @@ func testAccPolicy_type_Backup(t *testing.T) { }`, acctest.AlternateRegion(), acctest.Region(), acctest.Partition()) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -370,7 +344,10 @@ func testAccPolicy_type_SCP(t *testing.T) { serviceControlPolicyContent := `{"Version": "2012-10-17", "Statement": { "Effect": "Allow", "Action": "*", "Resource": "*"}}` resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -407,7 +384,10 @@ func testAccPolicy_type_Tag(t *testing.T) { tagPolicyContent := `{ "tags": { "Product": { "tag_key": { "@@assign": "Product" }, "enforced_for": { "@@assign": [ "ec2:instance" ] } } } }` resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), @@ -436,14 +416,14 @@ func testAccPolicy_importManagedPolicy(t *testing.T) { resourceID := "p-FullAWSAccess" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOrganizationsAccount(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.OrganizationsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckPolicyDestroy(ctx), Steps: []resource.TestStep{ - { - Config: testAccPolicyConfig_managedSetup, - }, { Config: testAccPolicyConfig_managed, ResourceName: resourceName, @@ -532,8 +512,6 @@ func testAccCheckPolicyExists(ctx context.Context, n string, v *awstypes.Policy) func testAccPolicyConfig_description(rName, description string) string { return fmt.Sprintf(` -resource "aws_organizations_organization" "test" {} - resource "aws_organizations_policy" "test" { content = < 0 { + input.AdminProGroup = flex.ExpandStringValueList(v.([]any)) + } + if v, ok := d.GetOk("author_group"); ok && len(v.([]any)) > 0 { input.AuthorGroup = flex.ExpandStringValueList(v.([]any)) } + if v, ok := d.GetOk("author_pro_group"); ok && len(v.([]any)) > 0 { + input.AuthorProGroup = flex.ExpandStringValueList(v.([]any)) + } + if v, ok := d.GetOk("reader_group"); ok && len(v.([]any)) > 0 { input.ReaderGroup = flex.ExpandStringValueList(v.([]any)) } + if v, ok := d.GetOk("reader_pro_group"); ok && len(v.([]any)) > 0 { + input.ReaderProGroup = flex.ExpandStringValueList(v.([]any)) + } + if v, ok := d.GetOk("contact_number"); ok { input.ContactNumber = aws.String(v.(string)) } diff --git a/internal/service/quicksight/account_subscription_test.go b/internal/service/quicksight/account_subscription_test.go index 23c7aa5e70e3..f58bfe38bf8d 100644 --- a/internal/service/quicksight/account_subscription_test.go +++ b/internal/service/quicksight/account_subscription_test.go @@ -13,6 +13,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -45,9 +46,10 @@ func testAccAccountSubscription_basic(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: false, - RefreshState: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"authentication_method"}, // Not returned from the DescribeAccountSubscription API }, }, }) @@ -76,6 +78,11 @@ func testAccAccountSubscription_disappears(t *testing.T) { acctest.CheckResourceDisappears(ctx, acctest.Provider, tfquicksight.ResourceAccountSubscription(), resourceName), ), ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, }, }) @@ -118,10 +125,9 @@ func testAccCheckAccountSubscriptionDisableTerminationProtection(ctx context.Con conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) - defaultNs := "default" input := &quicksight.UpdateAccountSettingsInput{ AwsAccountId: aws.String(rs.Primary.ID), - DefaultNamespace: aws.String(defaultNs), + DefaultNamespace: aws.String(tfquicksight.DefaultNamespace), TerminationProtectionEnabled: false, } diff --git a/internal/service/quicksight/analysis.go b/internal/service/quicksight/analysis.go index fbc640f2b387..baa2b57707fc 100644 --- a/internal/service/quicksight/analysis.go +++ b/internal/service/quicksight/analysis.go @@ -26,7 +26,6 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -65,13 +64,7 @@ func resourceAnalysis() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrCreatedTime: { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/analysis_data_source.go b/internal/service/quicksight/analysis_data_source.go index fefdac11b9d9..e4c30f746ae6 100644 --- a/internal/service/quicksight/analysis_data_source.go +++ b/internal/service/quicksight/analysis_data_source.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,12 +32,7 @@ func dataSourceAnalysis() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDDataSourceSchema(), names.AttrCreatedTime: { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/analysis_data_source_tags_gen_test.go b/internal/service/quicksight/analysis_data_source_tags_gen_test.go index 1503a04838d8..58fe94ca86ef 100644 --- a/internal/service/quicksight/analysis_data_source_tags_gen_test.go +++ b/internal/service/quicksight/analysis_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccQuickSightAnalysisDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccQuickSightAnalysisDataSource_tags(t *testing.T) { func TestAccQuickSightAnalysisDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccQuickSightAnalysisDataSource_tags_NullMap(t *testing.T) { func TestAccQuickSightAnalysisDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccQuickSightAnalysisDataSource_tags_EmptyMap(t *testing.T) { func TestAccQuickSightAnalysisDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccQuickSightAnalysisDataSource_tags_DefaultTags_nonOverlapping(t *test func TestAccQuickSightAnalysisDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccQuickSightAnalysisDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccQuickSightAnalysisDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/quicksight/analysis_tags_gen_test.go b/internal/service/quicksight/analysis_tags_gen_test.go index 8e35bfdc5fbc..38430afc2831 100644 --- a/internal/service/quicksight/analysis_tags_gen_test.go +++ b/internal/service/quicksight/analysis_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightAnalysis_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightAnalysis_tags_null(t *testing.T) { t.Skip("Resource Analysis does not support null tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightAnalysis_tags_null(t *testing.T) { func TestAccQuickSightAnalysis_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightAnalysis_tags_EmptyMap(t *testing.T) { func TestAccQuickSightAnalysis_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightAnalysis_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource Analysis does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightAnalysis_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource Analysis does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightAnalysis_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource Analysis does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightAnalysis_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightAnalysis_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightAnalysis_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightAnalysis_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightAnalysis_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_updateToProviderOnly(t *testing. func TestAccQuickSightAnalysis_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_emptyResourceTag(t *testing.T) { t.Skip("Resource Analysis does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_emptyProviderOnlyTag(t *testing. t.Skip("Resource Analysis does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_nullOverlappingResourceTag(t *te t.Skip("Resource Analysis does not support null tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_nullNonOverlappingResourceTag(t t.Skip("Resource Analysis does not support null tags") ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightAnalysis_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccQuickSightAnalysis_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightAnalysis_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightAnalysis_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightAnalysis_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightAnalysis_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightAnalysis_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightAnalysis_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightAnalysis_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccQuickSightAnalysis_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Analysis resourceName := "aws_quicksight_analysis.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckAnalysisDestroy(ctx), diff --git a/internal/service/quicksight/custom_permissions.go b/internal/service/quicksight/custom_permissions.go new file mode 100644 index 000000000000..25200a2b23fc --- /dev/null +++ b/internal/service/quicksight/custom_permissions.go @@ -0,0 +1,308 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight + +import ( + "context" + "fmt" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/quicksight" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_quicksight_custom_permissions", name="Custom Permissions") +// @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/quicksight/types;awstypes;awstypes.CustomPermissions") +// @Testing(skipEmptyTags=true, skipNullTags=true) +// @Testing(importStateIdFunc="testAccCustomPermissionsImportStateID", importStateIdAttribute="custom_permissions_name") +func newCustomPermissionsResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &customPermissionsResource{} + + return r, nil +} + +type customPermissionsResource struct { + framework.ResourceWithModel[customPermissionsResourceModel] +} + +func (r *customPermissionsResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + "custom_permissions_name": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + stringvalidator.RegexMatches(regexache.MustCompile(`^[a-zA-Z0-9+=,.@_-]+$`), ""), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + "capabilities": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[capabilitiesModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: tfmaps.ApplyToAllValues(fwtypes.AttributeTypesMust[capabilitiesModel](ctx), func(attr.Type) schema.Attribute { + return schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.CapabilityState](), + Optional: true, + } + }), + }, + }, + }, + } +} + +func (r *customPermissionsResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data customPermissionsResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } + + conn := r.Meta().QuickSightClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.CustomPermissionsName) + var input quicksight.CreateCustomPermissionsInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateCustomPermissions(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight Custom Permissions (%s)", name), err.Error()) + + return + } + + // Set values for unknowns. + data.ARN = fwflex.StringToFramework(ctx, output.Arn) + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *customPermissionsResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data customPermissionsResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID, name := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.CustomPermissionsName) + output, err := findCustomPermissionsByTwoPartKey(ctx, conn, accountID, name) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Quicksight Custom Permissions (%s)", name), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *customPermissionsResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old customPermissionsResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + diff, diags := fwflex.Diff(ctx, new, old) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + if diff.HasChanges() { + name := fwflex.StringValueFromFramework(ctx, new.CustomPermissionsName) + var input quicksight.UpdateCustomPermissionsInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateCustomPermissions(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Quicksight Custom Permissions (%s)", name), err.Error()) + + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *customPermissionsResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data customPermissionsResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID, name := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.CustomPermissionsName) + input := quicksight.DeleteCustomPermissionsInput{ + AwsAccountId: aws.String(accountID), + CustomPermissionsName: aws.String(name), + } + _, err := conn.DeleteCustomPermissions(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Quicksight Custom Permissions (%s)", name), err.Error()) + + return + } +} + +func (r *customPermissionsResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + customPermissionsIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, customPermissionsIDParts, true) + + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrAWSAccountID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("custom_permissions_name"), parts[1])...) +} + +func findCustomPermissionsByTwoPartKey(ctx context.Context, conn *quicksight.Client, awsAccountID, customPermissionsName string) (*awstypes.CustomPermissions, error) { + input := &quicksight.DescribeCustomPermissionsInput{ + AwsAccountId: aws.String(awsAccountID), + CustomPermissionsName: aws.String(customPermissionsName), + } + + return findCustomPermissions(ctx, conn, input) +} + +func findCustomPermissions(ctx context.Context, conn *quicksight.Client, input *quicksight.DescribeCustomPermissionsInput) (*awstypes.CustomPermissions, error) { + output, err := conn.DescribeCustomPermissions(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.CustomPermissions == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.CustomPermissions, nil +} + +type customPermissionsResourceModel struct { + framework.WithRegionModel + ARN types.String `tfsdk:"arn"` + AWSAccountID types.String `tfsdk:"aws_account_id"` + Capabilities fwtypes.ListNestedObjectValueOf[capabilitiesModel] `tfsdk:"capabilities"` + CustomPermissionsName types.String `tfsdk:"custom_permissions_name"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} + +type capabilitiesModel struct { + AddOrRunAnomalyDetectionForAnalyses fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"add_or_run_anomaly_detection_for_analyses"` + CreateAndUpdateDashboardEmailReports fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_and_update_dashboard_email_reports"` + CreateAndUpdateDatasets fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_and_update_datasets"` + CreateAndUpdateDataSources fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_and_update_data_sources"` + CreateAndUpdateThemes fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_and_update_themes"` + CreateAndUpdateThresholdAlerts fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_and_update_threshold_alerts"` + CreateSharedFolders fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_shared_folders"` + CreateSPICEDataset fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"create_spice_dataset"` + ExportToCSV fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"export_to_csv"` + ExportToCSVInScheduledReports fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"export_to_csv_in_scheduled_reports"` + ExportToExcel fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"export_to_excel"` + ExportToExcelInScheduledReports fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"export_to_excel_in_scheduled_reports"` + ExportToPDF fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"export_to_pdf"` + ExportToPDFInScheduledReports fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"export_to_pdf_in_scheduled_reports"` + IncludeContentInScheduledReportsEmail fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"include_content_in_scheduled_reports_email"` + PrintReports fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"print_reports"` + RenameSharedFolders fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"rename_shared_folders"` + ShareAnalyses fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"share_analyses"` + ShareDashboards fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"share_dashboards"` + ShareDatasets fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"share_datasets"` + ShareDataSources fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"share_data_sources"` + SubscribeDashboardEmailReports fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"subscribe_dashboard_email_reports"` + ViewAccountSPICECapacity fwtypes.StringEnum[awstypes.CapabilityState] `tfsdk:"view_account_spice_capacity"` +} diff --git a/internal/service/quicksight/custom_permissions_tags_gen_test.go b/internal/service/quicksight/custom_permissions_tags_gen_test.go new file mode 100644 index 000000000000..4b217ec1c62d --- /dev/null +++ b/internal/service/quicksight/custom_permissions_tags_gen_test.go @@ -0,0 +1,2357 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package quicksight_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccQuickSightCustomPermissions_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_null(t *testing.T) { + t.Skip("Resource CustomPermissions does not support null tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_EmptyTag_OnCreate(t *testing.T) { + t.Skip("Resource CustomPermissions does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + t.Skip("Resource CustomPermissions does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + t.Skip("Resource CustomPermissions does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_emptyResourceTag(t *testing.T) { + t.Skip("Resource CustomPermissions does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + t.Skip("Resource CustomPermissions does not support empty tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + t.Skip("Resource CustomPermissions does not support null tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + t.Skip("Resource CustomPermissions does not support null tags") + + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.CustomPermissions + resourceName := "aws_quicksight_custom_permissions.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/CustomPermissions/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/quicksight/custom_permissions_test.go b/internal/service/quicksight/custom_permissions_test.go new file mode 100644 index 000000000000..34d437f1f227 --- /dev/null +++ b/internal/service/quicksight/custom_permissions_test.go @@ -0,0 +1,308 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfquicksight "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccQuickSightCustomPermissions_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.CustomPermissions + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_custom_permissions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCustomPermissionsConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNRegexp("quicksight", regexache.MustCompile(`custompermissions/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("capabilities"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "add_or_run_anomaly_detection_for_analyses": knownvalue.Null(), + "create_and_update_dashboard_email_reports": knownvalue.Null(), + "create_and_update_datasets": knownvalue.Null(), + "create_and_update_data_sources": knownvalue.Null(), + "create_and_update_themes": knownvalue.Null(), + "create_and_update_threshold_alerts": knownvalue.Null(), + "create_shared_folders": knownvalue.Null(), + "create_spice_dataset": knownvalue.Null(), + "export_to_csv": knownvalue.Null(), + "export_to_csv_in_scheduled_reports": knownvalue.Null(), + "export_to_excel": knownvalue.Null(), + "export_to_excel_in_scheduled_reports": knownvalue.Null(), + "export_to_pdf": knownvalue.Null(), + "export_to_pdf_in_scheduled_reports": knownvalue.Null(), + "include_content_in_scheduled_reports_email": knownvalue.Null(), + "print_reports": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "rename_shared_folders": knownvalue.Null(), + "share_analyses": knownvalue.Null(), + "share_dashboards": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "share_datasets": knownvalue.Null(), + "share_data_sources": knownvalue.Null(), + "subscribe_dashboard_email_reports": knownvalue.Null(), + "view_account_spice_capacity": knownvalue.Null(), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.CustomPermissions + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_custom_permissions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCustomPermissionsConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfquicksight.ResourceCustomPermissions, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccQuickSightCustomPermissions_update(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.CustomPermissions + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_custom_permissions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCustomPermissionsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCustomPermissionsConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNRegexp("quicksight", regexache.MustCompile(`custompermissions/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("capabilities"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "add_or_run_anomaly_detection_for_analyses": knownvalue.Null(), + "create_and_update_dashboard_email_reports": knownvalue.Null(), + "create_and_update_datasets": knownvalue.Null(), + "create_and_update_data_sources": knownvalue.Null(), + "create_and_update_themes": knownvalue.Null(), + "create_and_update_threshold_alerts": knownvalue.Null(), + "create_shared_folders": knownvalue.Null(), + "create_spice_dataset": knownvalue.Null(), + "export_to_csv": knownvalue.Null(), + "export_to_csv_in_scheduled_reports": knownvalue.Null(), + "export_to_excel": knownvalue.Null(), + "export_to_excel_in_scheduled_reports": knownvalue.Null(), + "export_to_pdf": knownvalue.Null(), + "export_to_pdf_in_scheduled_reports": knownvalue.Null(), + "include_content_in_scheduled_reports_email": knownvalue.Null(), + "print_reports": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "rename_shared_folders": knownvalue.Null(), + "share_analyses": knownvalue.Null(), + "share_dashboards": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "share_datasets": knownvalue.Null(), + "share_data_sources": knownvalue.Null(), + "subscribe_dashboard_email_reports": knownvalue.Null(), + "view_account_spice_capacity": knownvalue.Null(), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccCustomPermissionsImportStateID(resourceName), + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + Config: testAccCustomPermissionsConfig_updated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPermissionsExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNRegexp("quicksight", regexache.MustCompile(`custompermissions/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("capabilities"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + "create_and_update_datasets": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "create_and_update_data_sources": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "export_to_pdf": tfknownvalue.StringExact(awstypes.CapabilityStateDeny), + "print_reports": knownvalue.Null(), + "share_dashboards": knownvalue.Null(), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + }, + }, + }) +} + +func testAccCheckCustomPermissionsDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_quicksight_ip_restriction" { + continue + } + + _, err := tfquicksight.FindCustomPermissionsByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes["custom_permissions_name"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("QuickSight Custom Permissions (%s) still exists", rs.Primary.Attributes["custom_permissions_name"]) + } + + return nil + } +} + +func testAccCheckCustomPermissionsExists(ctx context.Context, n string, v *awstypes.CustomPermissions) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + output, err := tfquicksight.FindCustomPermissionsByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes["custom_permissions_name"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCustomPermissionsImportStateID(n string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + return acctest.AttrsImportStateIdFunc(n, ",", names.AttrAWSAccountID, "custom_permissions_name")(s) + } +} + +func testAccCustomPermissionsConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = %[1]q + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } +} +`, rName) +} + +func testAccCustomPermissionsConfig_updated(rName string) string { + return fmt.Sprintf(` +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = %[1]q + + capabilities { + create_and_update_datasets = "DENY" + create_and_update_data_sources = "DENY" + export_to_pdf = "DENY" + } +} +`, rName) +} diff --git a/internal/service/quicksight/dashboard.go b/internal/service/quicksight/dashboard.go index 93798e83dc48..7bc4a0e84c5c 100644 --- a/internal/service/quicksight/dashboard.go +++ b/internal/service/quicksight/dashboard.go @@ -27,7 +27,6 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -58,13 +57,7 @@ func resourceDashboard() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrCreatedTime: { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/dashboard_tags_gen_test.go b/internal/service/quicksight/dashboard_tags_gen_test.go index 1b0103e741f2..13bac52d972b 100644 --- a/internal/service/quicksight/dashboard_tags_gen_test.go +++ b/internal/service/quicksight/dashboard_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightDashboard_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightDashboard_tags_null(t *testing.T) { t.Skip("Resource Dashboard does not support null tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightDashboard_tags_null(t *testing.T) { func TestAccQuickSightDashboard_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightDashboard_tags_EmptyMap(t *testing.T) { func TestAccQuickSightDashboard_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightDashboard_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource Dashboard does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightDashboard_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource Dashboard does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightDashboard_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource Dashboard does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightDashboard_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightDashboard_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightDashboard_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightDashboard_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightDashboard_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_updateToProviderOnly(t *testing func TestAccQuickSightDashboard_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_emptyResourceTag(t *testing.T) t.Skip("Resource Dashboard does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_emptyProviderOnlyTag(t *testing t.Skip("Resource Dashboard does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_nullOverlappingResourceTag(t *t t.Skip("Resource Dashboard does not support null tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_nullNonOverlappingResourceTag(t t.Skip("Resource Dashboard does not support null tags") ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightDashboard_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccQuickSightDashboard_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightDashboard_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightDashboard_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightDashboard_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightDashboard_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightDashboard_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccQuickSightDashboard_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightDashboard_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccQuickSightDashboard_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Dashboard resourceName := "aws_quicksight_dashboard.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDashboardDestroy(ctx), diff --git a/internal/service/quicksight/data_set.go b/internal/service/quicksight/data_set.go index 9723762e577d..4cc9c71c61c4 100644 --- a/internal/service/quicksight/data_set.go +++ b/internal/service/quicksight/data_set.go @@ -24,7 +24,6 @@ import ( quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -49,13 +48,7 @@ func resourceDataSet() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), "column_groups": quicksightschema.DataSetColumnGroupsSchema(), "column_level_permission_rules": quicksightschema.DataSetColumnLevelPermissionRulesSchema(), "data_set_id": { diff --git a/internal/service/quicksight/data_set_data_source.go b/internal/service/quicksight/data_set_data_source.go index 7564f48e010f..8a96a24041fc 100644 --- a/internal/service/quicksight/data_set_data_source.go +++ b/internal/service/quicksight/data_set_data_source.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -29,12 +28,7 @@ func dataSourceDataSet() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDDataSourceSchema(), "column_groups": quicksightschema.DataSetColumnGroupsSchemaDataSourceSchema(), "column_level_permission_rules": quicksightschema.DataSetColumnLevelPermissionRulesSchemaDataSourceSchema(), "data_set_id": { diff --git a/internal/service/quicksight/data_set_data_source_tags_gen_test.go b/internal/service/quicksight/data_set_data_source_tags_gen_test.go index 31b06c0a76de..b86d41fd066e 100644 --- a/internal/service/quicksight/data_set_data_source_tags_gen_test.go +++ b/internal/service/quicksight/data_set_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccQuickSightDataSetDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccQuickSightDataSetDataSource_tags(t *testing.T) { func TestAccQuickSightDataSetDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccQuickSightDataSetDataSource_tags_NullMap(t *testing.T) { func TestAccQuickSightDataSetDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccQuickSightDataSetDataSource_tags_EmptyMap(t *testing.T) { func TestAccQuickSightDataSetDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccQuickSightDataSetDataSource_tags_DefaultTags_nonOverlapping(t *testi func TestAccQuickSightDataSetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccQuickSightDataSetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *te func TestAccQuickSightDataSetDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/quicksight/data_set_tags_gen_test.go b/internal/service/quicksight/data_set_tags_gen_test.go index 96227902da3d..76e198164e47 100644 --- a/internal/service/quicksight/data_set_tags_gen_test.go +++ b/internal/service/quicksight/data_set_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightDataSet_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightDataSet_tags_null(t *testing.T) { t.Skip("Resource DataSet does not support null tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightDataSet_tags_null(t *testing.T) { func TestAccQuickSightDataSet_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightDataSet_tags_EmptyMap(t *testing.T) { func TestAccQuickSightDataSet_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightDataSet_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource DataSet does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightDataSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource DataSet does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightDataSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource DataSet does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightDataSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightDataSet_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightDataSet_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightDataSet_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightDataSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_updateToProviderOnly(t *testing.T func TestAccQuickSightDataSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { t.Skip("Resource DataSet does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T t.Skip("Resource DataSet does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_nullOverlappingResourceTag(t *tes t.Skip("Resource DataSet does not support null tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_nullNonOverlappingResourceTag(t * t.Skip("Resource DataSet does not support null tags") ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightDataSet_tags_DefaultTags_nullNonOverlappingResourceTag(t * func TestAccQuickSightDataSet_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightDataSet_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightDataSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightDataSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightDataSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightDataSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightDataSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightDataSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccQuickSightDataSet_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSet resourceName := "aws_quicksight_data_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSetDestroy(ctx), diff --git a/internal/service/quicksight/data_source.go b/internal/service/quicksight/data_source.go index b32655fdc171..7df8bfdaaae3 100644 --- a/internal/service/quicksight/data_source.go +++ b/internal/service/quicksight/data_source.go @@ -25,7 +25,6 @@ import ( quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -59,14 +58,8 @@ func resourceDataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, - "credentials": quicksightschema.DataSourceCredentialsSchema(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), + "credentials": quicksightschema.DataSourceCredentialsSchema(), "data_source_id": { Type: schema.TypeString, Required: true, @@ -133,7 +126,7 @@ func resourceDataSourceCreate(ctx context.Context, d *schema.ResourceData, meta } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDataSource(ctx, input) }, func(err error) (bool, error) { @@ -246,7 +239,7 @@ func resourceDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateDataSource(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/quicksight/data_source_tags_gen_test.go b/internal/service/quicksight/data_source_tags_gen_test.go index ed94908f93dd..a0e1bab34433 100644 --- a/internal/service/quicksight/data_source_tags_gen_test.go +++ b/internal/service/quicksight/data_source_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightDataSource_tags_null(t *testing.T) { t.Skip("Resource DataSource does not support null tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightDataSource_tags_null(t *testing.T) { func TestAccQuickSightDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightDataSource_tags_EmptyMap(t *testing.T) { func TestAccQuickSightDataSource_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightDataSource_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource DataSource does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightDataSource_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource DataSource does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightDataSource_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource DataSource does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightDataSource_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightDataSource_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightDataSource_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightDataSource_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_updateToProviderOnly(t *testin func TestAccQuickSightDataSource_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_emptyResourceTag(t *testing.T) t.Skip("Resource DataSource does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_emptyProviderOnlyTag(t *testin t.Skip("Resource DataSource does not support empty tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_nullOverlappingResourceTag(t * t.Skip("Resource DataSource does not support null tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_nullNonOverlappingResourceTag( t.Skip("Resource DataSource does not support null tags") ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightDataSource_tags_DefaultTags_nullNonOverlappingResourceTag( func TestAccQuickSightDataSource_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightDataSource_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightDataSource_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightDataSource_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightDataSource_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightDataSource_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccQuickSightDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccQuickSightDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.DataSource resourceName := "aws_quicksight_data_source.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckDataSourceDestroy(ctx), diff --git a/internal/service/quicksight/exports_test.go b/internal/service/quicksight/exports_test.go index 55d85408197d..5646615fb493 100644 --- a/internal/service/quicksight/exports_test.go +++ b/internal/service/quicksight/exports_test.go @@ -3,52 +3,65 @@ package quicksight +import ( + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" +) + // Exports for use in tests only. var ( - ResourceAccountSettings = newAccountSettingsResource - ResourceAccountSubscription = resourceAccountSubscription - ResourceAnalysis = resourceAnalysis - ResourceDashboard = resourceDashboard - ResourceDataSet = resourceDataSet - ResourceDataSource = resourceDataSource - ResourceFolder = resourceFolder - ResourceFolderMembership = newFolderMembershipResource - ResourceGroup = resourceGroup - ResourceGroupMembership = resourceGroupMembership - ResourceIAMPolicyAssignment = newIAMPolicyAssignmentResource - ResourceIngestion = newIngestionResource - ResourceNamespace = newNamespaceResource - ResourceRefreshSchedule = newRefreshScheduleResource - ResourceRoleMembership = newRoleMembershipResource - ResourceTemplate = resourceTemplate - ResourceTemplateAlias = newTemplateAliasResource - ResourceTheme = resourceTheme - ResourceUser = resourceUser - ResourceVPCConnection = newVPCConnectionResource + ResourceAccountSettings = newAccountSettingsResource + ResourceAccountSubscription = resourceAccountSubscription + ResourceAnalysis = resourceAnalysis + ResourceCustomPermissions = newCustomPermissionsResource + ResourceDashboard = resourceDashboard + ResourceDataSet = resourceDataSet + ResourceDataSource = resourceDataSource + ResourceFolder = resourceFolder + ResourceFolderMembership = newFolderMembershipResource + ResourceGroup = resourceGroup + ResourceGroupMembership = resourceGroupMembership + ResourceIAMPolicyAssignment = newIAMPolicyAssignmentResource + ResourceIngestion = newIngestionResource + ResourceIPRestriction = newIPRestrictionResource + ResourceKeyRegistration = newKeyRegistrationResource + ResourceNamespace = newNamespaceResource + ResourceRefreshSchedule = newRefreshScheduleResource + ResourceRoleCustomPermission = newRoleCustomPermissionResource + ResourceRoleMembership = newRoleMembershipResource + ResourceTemplate = resourceTemplate + ResourceTemplateAlias = newTemplateAliasResource + ResourceTheme = resourceTheme + ResourceUser = resourceUser + ResourceUserCustomPermission = newUserCustomPermissionResource + ResourceVPCConnection = newVPCConnectionResource - DashboardLatestVersion = dashboardLatestVersion - DefaultGroupNamespace = defaultGroupNamespace - DefaultUserNamespace = defaultUserNamespace - FindAccountSettingsByID = findAccountSettingsByID - FindAccountSubscriptionByID = findAccountSubscriptionByID - FindAnalysisByTwoPartKey = findAnalysisByTwoPartKey - FindDashboardByThreePartKey = findDashboardByThreePartKey - FindDataSetByTwoPartKey = findDataSetByTwoPartKey - FindDataSourceByTwoPartKey = findDataSourceByTwoPartKey - FindFolderByTwoPartKey = findFolderByTwoPartKey - FindFolderMembershipByFourPartKey = findFolderMembershipByFourPartKey - FindGroupByThreePartKey = findGroupByThreePartKey - FindGroupMembershipByFourPartKey = findGroupMembershipByFourPartKey - FindIAMPolicyAssignmentByThreePartKey = findIAMPolicyAssignmentByThreePartKey - FindIngestionByThreePartKey = findIngestionByThreePartKey - FindNamespaceByTwoPartKey = findNamespaceByTwoPartKey - FindRefreshScheduleByThreePartKey = findRefreshScheduleByThreePartKey - FindRoleMembershipByMultiPartKey = findRoleMembershipByMultiPartKey - FindTemplateAliasByThreePartKey = findTemplateAliasByThreePartKey - FindTemplateByTwoPartKey = findTemplateByTwoPartKey - FindThemeByTwoPartKey = findThemeByTwoPartKey - FindUserByThreePartKey = findUserByThreePartKey - FindVPCConnectionByTwoPartKey = findVPCConnectionByTwoPartKey + DashboardLatestVersion = dashboardLatestVersion + DefaultNamespace = quicksightschema.DefaultNamespace + FindAccountSettingsByID = findAccountSettingsByID + FindAccountSubscriptionByID = findAccountSubscriptionByID + FindAnalysisByTwoPartKey = findAnalysisByTwoPartKey + FindCustomPermissionsByTwoPartKey = findCustomPermissionsByTwoPartKey + FindDashboardByThreePartKey = findDashboardByThreePartKey + FindDataSetByTwoPartKey = findDataSetByTwoPartKey + FindDataSourceByTwoPartKey = findDataSourceByTwoPartKey + FindFolderByTwoPartKey = findFolderByTwoPartKey + FindFolderMembershipByFourPartKey = findFolderMembershipByFourPartKey + FindGroupByThreePartKey = findGroupByThreePartKey + FindGroupMembershipByFourPartKey = findGroupMembershipByFourPartKey + FindIAMPolicyAssignmentByThreePartKey = findIAMPolicyAssignmentByThreePartKey + FindIngestionByThreePartKey = findIngestionByThreePartKey + FindIPRestrictionByID = findIPRestrictionByID + FindKeyRegistrationByID = findKeyRegistrationByID + FindNamespaceByTwoPartKey = findNamespaceByTwoPartKey + FindRefreshScheduleByThreePartKey = findRefreshScheduleByThreePartKey + FindRoleCustomPermissionByThreePartKey = findRoleCustomPermissionByThreePartKey + FindRoleMembershipByFourPartKey = findRoleMembershipByFourPartKey + FindTemplateAliasByThreePartKey = findTemplateAliasByThreePartKey + FindTemplateByTwoPartKey = findTemplateByTwoPartKey + FindThemeByTwoPartKey = findThemeByTwoPartKey + FindUserByThreePartKey = findUserByThreePartKey + FindUserCustomPermissionByThreePartKey = findUserCustomPermissionByThreePartKey + FindVPCConnectionByTwoPartKey = findVPCConnectionByTwoPartKey StartAfterDateTimeLayout = startAfterDateTimeLayout ) diff --git a/internal/service/quicksight/folder.go b/internal/service/quicksight/folder.go index 7f6e7443dafc..96f5460fa0f9 100644 --- a/internal/service/quicksight/folder.go +++ b/internal/service/quicksight/folder.go @@ -55,13 +55,7 @@ func resourceFolder() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrCreatedTime: { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/folder_membership.go b/internal/service/quicksight/folder_membership.go index 636164943751..f1d8d18a0a63 100644 --- a/internal/service/quicksight/folder_membership.go +++ b/internal/service/quicksight/folder_membership.go @@ -23,7 +23,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -47,15 +48,8 @@ type folderMembershipResource struct { func (r *folderMembershipResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, - names.AttrID: framework.IDAttribute(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + names.AttrID: framework.IDAttribute(), "folder_id": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -82,18 +76,18 @@ func (r *folderMembershipResource) Schema(ctx context.Context, req resource.Sche } func (r *folderMembershipResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan folderMembershipResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data folderMembershipResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } - awsAccountID, folderID, memberType, memberID := flex.StringValueFromFramework(ctx, plan.AWSAccountID), flex.StringValueFromFramework(ctx, plan.FolderID), flex.StringValueFromFramework(ctx, plan.MemberType), flex.StringValueFromFramework(ctx, plan.MemberID) + + conn := r.Meta().QuickSightClient(ctx) + + awsAccountID, folderID, memberType, memberID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.FolderID), fwflex.StringValueFromFramework(ctx, data.MemberType), fwflex.StringValueFromFramework(ctx, data.MemberID) in := &quicksight.CreateFolderMembershipInput{ AwsAccountId: aws.String(awsAccountID), FolderId: aws.String(folderID), @@ -104,22 +98,22 @@ func (r *folderMembershipResource) Create(ctx context.Context, req resource.Crea out, err := conn.CreateFolderMembership(ctx, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameFolderMembership, plan.MemberID.String(), err), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameFolderMembership, data.MemberID.String(), err), err.Error(), ) return } if out == nil || out.FolderMember == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameFolderMembership, plan.MemberID.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameFolderMembership, data.MemberID.String(), nil), errors.New("empty output").Error(), ) return } - plan.ID = flex.StringValueToFramework(ctx, folderMembershipCreateResourceID(awsAccountID, folderID, memberType, memberID)) + data.ID = fwflex.StringValueToFramework(ctx, folderMembershipCreateResourceID(awsAccountID, folderID, memberType, memberID)) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) } func (r *folderMembershipResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -153,10 +147,10 @@ func (r *folderMembershipResource) Read(ctx context.Context, req resource.ReadRe return } - state.MemberID = flex.StringToFramework(ctx, out.MemberId) - state.AWSAccountID = flex.StringValueToFramework(ctx, awsAccountID) - state.FolderID = flex.StringValueToFramework(ctx, folderID) - state.MemberType = flex.StringValueToFramework(ctx, memberType) + state.MemberID = fwflex.StringToFramework(ctx, out.MemberId) + state.AWSAccountID = fwflex.StringValueToFramework(ctx, awsAccountID) + state.FolderID = fwflex.StringValueToFramework(ctx, folderID) + state.MemberType = fwflex.StringValueToFramework(ctx, memberType) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } diff --git a/internal/service/quicksight/folder_tags_gen_test.go b/internal/service/quicksight/folder_tags_gen_test.go index 8baa206a852a..a045de2cb7e0 100644 --- a/internal/service/quicksight/folder_tags_gen_test.go +++ b/internal/service/quicksight/folder_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightFolder_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightFolder_tags_null(t *testing.T) { t.Skip("Resource Folder does not support null tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightFolder_tags_null(t *testing.T) { func TestAccQuickSightFolder_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightFolder_tags_EmptyMap(t *testing.T) { func TestAccQuickSightFolder_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightFolder_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource Folder does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightFolder_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource Folder does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightFolder_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource Folder does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightFolder_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightFolder_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightFolder_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightFolder_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightFolder_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccQuickSightFolder_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_emptyResourceTag(t *testing.T) { t.Skip("Resource Folder does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) t.Skip("Resource Folder does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_nullOverlappingResourceTag(t *test t.Skip("Resource Folder does not support null tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_nullNonOverlappingResourceTag(t *t t.Skip("Resource Folder does not support null tags") ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightFolder_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccQuickSightFolder_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightFolder_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightFolder_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightFolder_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightFolder_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightFolder_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightFolder_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightFolder_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccQuickSightFolder_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Folder resourceName := "aws_quicksight_folder.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckFolderDestroy(ctx), diff --git a/internal/service/quicksight/group.go b/internal/service/quicksight/group.go index a74f6544a3a0..4cf6317d27ec 100644 --- a/internal/service/quicksight/group.go +++ b/internal/service/quicksight/group.go @@ -9,25 +9,20 @@ import ( "log" "strings" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/quicksight" awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - defaultGroupNamespace = "default" -) - // @SDKResource("aws_quicksight_group", name="Group") func resourceGroup() *schema.Resource { return &schema.Resource{ @@ -46,12 +41,7 @@ func resourceGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrDescription: { Type: schema.TypeString, Optional: true, @@ -61,16 +51,7 @@ func resourceGroup() *schema.Resource { Required: true, ForceNew: true, }, - names.AttrNamespace: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: defaultGroupNamespace, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_.-]*$`), "must contain only alphanumeric characters, hyphens, underscores, and periods"), - ), - }, + names.AttrNamespace: quicksightschema.NamespaceSchema(), } }, } diff --git a/internal/service/quicksight/group_data_source.go b/internal/service/quicksight/group_data_source.go index 28b296dea55b..88222c402f96 100644 --- a/internal/service/quicksight/group_data_source.go +++ b/internal/service/quicksight/group_data_source.go @@ -6,12 +6,11 @@ package quicksight import ( "context" - "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,11 +25,7 @@ func dataSourceGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDDataSourceSchema(), names.AttrDescription: { Type: schema.TypeString, Computed: true, @@ -39,15 +34,7 @@ func dataSourceGroup() *schema.Resource { Type: schema.TypeString, Required: true, }, - names.AttrNamespace: { - Type: schema.TypeString, - Optional: true, - Default: defaultGroupNamespace, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_.-]*$`), "must contain only alphanumeric characters, hyphens, underscores, and periods"), - ), - }, + names.AttrNamespace: quicksightschema.NamespaceDataSourceSchema(), "principal_id": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/group_data_source_test.go b/internal/service/quicksight/group_data_source_test.go index 312dc447f9df..0ff51c45a124 100644 --- a/internal/service/quicksight/group_data_source_test.go +++ b/internal/service/quicksight/group_data_source_test.go @@ -33,7 +33,7 @@ func TestAccQuickSightGroupDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, names.AttrGroupName, resourceName, names.AttrGroupName), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttr(dataSourceName, names.AttrDescription, "text1"), - resource.TestCheckResourceAttr(dataSourceName, names.AttrNamespace, tfquicksight.DefaultGroupNamespace), + resource.TestCheckResourceAttr(dataSourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), resource.TestCheckResourceAttrSet(dataSourceName, "principal_id"), ), }, diff --git a/internal/service/quicksight/group_membership.go b/internal/service/quicksight/group_membership.go index 49a5801de719..18ababdbae3f 100644 --- a/internal/service/quicksight/group_membership.go +++ b/internal/service/quicksight/group_membership.go @@ -9,17 +9,16 @@ import ( "log" "strings" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/quicksight" awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -42,12 +41,7 @@ func resourceGroupMembership() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrGroupName: { Type: schema.TypeString, Required: true, @@ -58,16 +52,7 @@ func resourceGroupMembership() *schema.Resource { Required: true, ForceNew: true, }, - names.AttrNamespace: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "default", - ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_.-]*$`), "must contain only alphanumeric characters, hyphens, underscores, and periods"), - ), - }, + names.AttrNamespace: quicksightschema.NamespaceSchema(), } }, } diff --git a/internal/service/quicksight/iam_policy_assignment.go b/internal/service/quicksight/iam_policy_assignment.go index 114ef0dcfbd7..171a54d9529c 100644 --- a/internal/service/quicksight/iam_policy_assignment.go +++ b/internal/service/quicksight/iam_policy_assignment.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" @@ -26,6 +25,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -56,23 +56,9 @@ func (r *iamPolicyAssignmentResource) Schema(ctx context.Context, request resour CustomType: fwtypes.StringEnumType[awstypes.AssignmentStatus](), Required: true, }, - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, - names.AttrID: framework.IDAttribute(), - names.AttrNamespace: schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString("default"), - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + names.AttrID: framework.IDAttribute(), + names.AttrNamespace: quicksightschema.NamespaceAttribute(), "policy_arn": schema.StringAttribute{ CustomType: fwtypes.ARNType, Optional: true, @@ -109,7 +95,7 @@ func (r *iamPolicyAssignmentResource) Create(ctx context.Context, request resour if response.Diagnostics.HasError() { return } - if data.AWSAccountID.IsUnknown() || data.AWSAccountID.IsNull() { + if data.AWSAccountID.IsUnknown() { data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } @@ -144,7 +130,7 @@ func (r *iamPolicyAssignmentResource) Create(ctx context.Context, request resour data.ID = fwflex.StringValueToFramework(ctx, id) // wait for IAM to propagate before returning - _, err = tfresource.RetryWhenNotFound(ctx, iamPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, iamPropagationTimeout, func(ctx context.Context) (any, error) { return findIAMPolicyAssignmentByThreePartKey(ctx, conn, awsAccountID, namespace, assignmentName) }) @@ -289,7 +275,7 @@ func (r *iamPolicyAssignmentResource) Delete(ctx context.Context, request resour } // wait for IAM to propagate before returning - _, err = tfresource.RetryUntilNotFound(ctx, iamPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, iamPropagationTimeout, func(ctx context.Context) (any, error) { return findIAMPolicyAssignmentByThreePartKey(ctx, conn, awsAccountID, namespace, assignmentName) }) diff --git a/internal/service/quicksight/iam_policy_assignment_test.go b/internal/service/quicksight/iam_policy_assignment_test.go index e111b9b92a4f..bbd1d117c45f 100644 --- a/internal/service/quicksight/iam_policy_assignment_test.go +++ b/internal/service/quicksight/iam_policy_assignment_test.go @@ -37,7 +37,7 @@ func TestAccQuickSightIAMPolicyAssignment_basic(t *testing.T) { testAccCheckIAMPolicyAssignmentExists(ctx, resourceName, &assignment), resource.TestCheckResourceAttr(resourceName, "assignment_name", rName), resource.TestCheckResourceAttr(resourceName, "assignment_status", string(awstypes.AssignmentStatusEnabled)), - resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, "default"), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), ), }, { @@ -91,7 +91,7 @@ func TestAccQuickSightIAMPolicyAssignment_assignmentStatus(t *testing.T) { testAccCheckIAMPolicyAssignmentExists(ctx, resourceName, &assignment), resource.TestCheckResourceAttr(resourceName, "assignment_name", rName), resource.TestCheckResourceAttr(resourceName, "assignment_status", string(awstypes.AssignmentStatusDraft)), - resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, "default"), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), ), }, { @@ -105,7 +105,7 @@ func TestAccQuickSightIAMPolicyAssignment_assignmentStatus(t *testing.T) { testAccCheckIAMPolicyAssignmentExists(ctx, resourceName, &assignment), resource.TestCheckResourceAttr(resourceName, "assignment_name", rName), resource.TestCheckResourceAttr(resourceName, "assignment_status", string(awstypes.AssignmentStatusEnabled)), - resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, "default"), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), ), }, { @@ -114,7 +114,7 @@ func TestAccQuickSightIAMPolicyAssignment_assignmentStatus(t *testing.T) { testAccCheckIAMPolicyAssignmentExists(ctx, resourceName, &assignment), resource.TestCheckResourceAttr(resourceName, "assignment_name", rName), resource.TestCheckResourceAttr(resourceName, "assignment_status", string(awstypes.AssignmentStatusDisabled)), - resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, "default"), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), ), }, }, @@ -141,7 +141,7 @@ func TestAccQuickSightIAMPolicyAssignment_identities(t *testing.T) { testAccCheckIAMPolicyAssignmentExists(ctx, resourceName, &assignment), resource.TestCheckResourceAttr(resourceName, "assignment_name", rName), resource.TestCheckResourceAttr(resourceName, "assignment_status", string(awstypes.AssignmentStatusEnabled)), - resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, "default"), + resource.TestCheckResourceAttr(resourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), resource.TestCheckResourceAttr(resourceName, "identities.#", "1"), resource.TestCheckResourceAttr(resourceName, "identities.0.user.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "identities.0.user.0", userResourceName, names.AttrUserName), diff --git a/internal/service/quicksight/ingestion.go b/internal/service/quicksight/ingestion.go index 90f98419e584..6535658e2ce2 100644 --- a/internal/service/quicksight/ingestion.go +++ b/internal/service/quicksight/ingestion.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -49,14 +50,7 @@ func (r *ingestionResource) Schema(ctx context.Context, req resource.SchemaReque names.AttrARN: schema.StringAttribute{ Computed: true, }, - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), "data_set_id": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -87,46 +81,46 @@ func (r *ingestionResource) Schema(ctx context.Context, req resource.SchemaReque } func (r *ingestionResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan ingestionResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data ingestionResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) } - awsAccountID, dataSetID, ingestionID := flex.StringValueFromFramework(ctx, plan.AWSAccountID), flex.StringValueFromFramework(ctx, plan.DataSetID), flex.StringValueFromFramework(ctx, plan.IngestionID) + + conn := r.Meta().QuickSightClient(ctx) + + awsAccountID, dataSetID, ingestionID := flex.StringValueFromFramework(ctx, data.AWSAccountID), flex.StringValueFromFramework(ctx, data.DataSetID), flex.StringValueFromFramework(ctx, data.IngestionID) in := quicksight.CreateIngestionInput{ AwsAccountId: aws.String(awsAccountID), DataSetId: aws.String(dataSetID), IngestionId: aws.String(ingestionID), - IngestionType: awstypes.IngestionType(plan.IngestionType.ValueString()), + IngestionType: awstypes.IngestionType(data.IngestionType.ValueString()), } out, err := conn.CreateIngestion(ctx, &in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameIngestion, plan.IngestionID.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameIngestion, data.IngestionID.String(), nil), err.Error(), ) return } if out == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameIngestion, plan.IngestionID.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameIngestion, data.IngestionID.String(), nil), errors.New("empty output").Error(), ) return } - plan.ID = flex.StringValueToFramework(ctx, ingestionCreateResourceID(awsAccountID, dataSetID, ingestionID)) - plan.ARN = flex.StringToFramework(ctx, out.Arn) - plan.IngestionStatus = flex.StringValueToFramework(ctx, out.IngestionStatus) + data.ID = flex.StringValueToFramework(ctx, ingestionCreateResourceID(awsAccountID, dataSetID, ingestionID)) + data.ARN = flex.StringToFramework(ctx, out.Arn) + data.IngestionStatus = flex.StringValueToFramework(ctx, out.IngestionStatus) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) } func (r *ingestionResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { diff --git a/internal/service/quicksight/ip_restriction.go b/internal/service/quicksight/ip_restriction.go new file mode 100644 index 000000000000..f4ea103e5e13 --- /dev/null +++ b/internal/service/quicksight/ip_restriction.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight + +import ( + "context" + "fmt" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/quicksight" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_quicksight_ip_restriction", name="IP Restriction") +func newIPRestrictionResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &ipRestrictionResource{} + + return r, nil +} + +type ipRestrictionResource struct { + framework.ResourceWithModel[ipRestrictionResourceModel] +} + +func (r *ipRestrictionResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + names.AttrEnabled: schema.BoolAttribute{ + Required: true, + }, + "ip_restriction_rule_map": schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + ElementType: types.StringType, + Optional: true, + Validators: []validator.Map{ + mapvalidator.KeysAre(fwvalidators.IPv4CIDRNetworkAddress()), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 150)), + }, + }, + "vpc_endpoint_id_restriction_rule_map": schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + ElementType: types.StringType, + Optional: true, + Validators: []validator.Map{ + mapvalidator.KeysAre(stringvalidator.RegexMatches(regexache.MustCompile(`^vpce-[0-9a-z]*$`), "value must be a VPC endpoint ID")), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 150)), + }, + }, + "vpc_id_restriction_rule_map": schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + ElementType: types.StringType, + Optional: true, + Validators: []validator.Map{ + mapvalidator.KeysAre(stringvalidator.RegexMatches(regexache.MustCompile(`^vpc-[0-9a-z]*$`), "value must be a VPC ID")), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 150)), + }, + }, + }, + } +} + +func (r *ipRestrictionResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data ipRestrictionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID) + var input quicksight.UpdateIpRestrictionInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Send empty maps, not nil. + if data.IPRestrictionRuleMap.IsNull() { + input.IpRestrictionRuleMap = map[string]string{} + } + if data.VPCEndpointIDRestrictionRuleMap.IsNull() { + input.VpcEndpointIdRestrictionRuleMap = map[string]string{} + } + if data.VPCIDRestrictionRuleMap.IsNull() { + input.VpcIdRestrictionRuleMap = map[string]string{} + } + + _, err := conn.UpdateIpRestriction(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight IP Restriction (%s)", accountID), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *ipRestrictionResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data ipRestrictionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID) + output, err := findIPRestrictionByID(ctx, conn, accountID) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Quicksight IP Restriction (%s)", accountID), err.Error()) + + return + } + + // Set attributes for import. + // API returns empty maps, not nil. + if data.IPRestrictionRuleMap.IsNull() && len(output.IpRestrictionRuleMap) == 0 { + output.IpRestrictionRuleMap = nil + } + if data.VPCEndpointIDRestrictionRuleMap.IsNull() && len(output.VpcEndpointIdRestrictionRuleMap) == 0 { + output.VpcEndpointIdRestrictionRuleMap = nil + } + if data.VPCIDRestrictionRuleMap.IsNull() && len(output.VpcIdRestrictionRuleMap) == 0 { + output.VpcIdRestrictionRuleMap = nil + } + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *ipRestrictionResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old ipRestrictionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, new.AWSAccountID) + var input quicksight.UpdateIpRestrictionInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Send empty maps, not nil. + if new.IPRestrictionRuleMap.IsNull() { + input.IpRestrictionRuleMap = map[string]string{} + } + if new.VPCEndpointIDRestrictionRuleMap.IsNull() { + input.VpcEndpointIdRestrictionRuleMap = map[string]string{} + } + if new.VPCIDRestrictionRuleMap.IsNull() { + input.VpcIdRestrictionRuleMap = map[string]string{} + } + + _, err := conn.UpdateIpRestriction(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Quicksight IP Restriction (%s)", accountID), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *ipRestrictionResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data ipRestrictionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID) + input := quicksight.UpdateIpRestrictionInput{ + AwsAccountId: aws.String(accountID), + Enabled: aws.Bool(false), + IpRestrictionRuleMap: map[string]string{}, + VpcEndpointIdRestrictionRuleMap: map[string]string{}, + VpcIdRestrictionRuleMap: map[string]string{}, + } + _, err := conn.UpdateIpRestriction(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Quicksight IP Restriction (%s)", accountID), err.Error()) + + return + } +} + +func (r *ipRestrictionResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrAWSAccountID), request, response) +} + +func findIPRestrictionByID(ctx context.Context, conn *quicksight.Client, id string) (*quicksight.DescribeIpRestrictionOutput, error) { + input := quicksight.DescribeIpRestrictionInput{ + AwsAccountId: aws.String(id), + } + output, err := conn.DescribeIpRestriction(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || (!aws.ToBool(output.Enabled) && len(output.IpRestrictionRuleMap) == 0 && len(output.VpcEndpointIdRestrictionRuleMap) == 0 && len(output.VpcIdRestrictionRuleMap) == 0) { + return nil, tfresource.NewEmptyResultError(&input) + } + + return output, nil +} + +type ipRestrictionResourceModel struct { + framework.WithRegionModel + AWSAccountID types.String `tfsdk:"aws_account_id"` + Enabled types.Bool `tfsdk:"enabled"` + IPRestrictionRuleMap fwtypes.MapOfString `tfsdk:"ip_restriction_rule_map"` + VPCEndpointIDRestrictionRuleMap fwtypes.MapOfString `tfsdk:"vpc_endpoint_id_restriction_rule_map"` + VPCIDRestrictionRuleMap fwtypes.MapOfString `tfsdk:"vpc_id_restriction_rule_map"` +} diff --git a/internal/service/quicksight/ip_restriction_test.go b/internal/service/quicksight/ip_restriction_test.go new file mode 100644 index 000000000000..19c45f633e33 --- /dev/null +++ b/internal/service/quicksight/ip_restriction_test.go @@ -0,0 +1,282 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight_test + +import ( + "context" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfquicksight "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccIPRestriction_basic(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_quicksight_ip_restriction.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIPRestrictionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIPRestrictionConfig_basic, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIPRestrictionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrEnabled), knownvalue.Bool(true)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ip_restriction_rule_map"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_id_restriction_rule_map"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_id_restriction_rule_map"), knownvalue.Null()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrAWSAccountID), + ImportStateVerifyIdentifierAttribute: names.AttrAWSAccountID, + }, + }, + }) +} + +func testAccIPRestriction_disappears(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_quicksight_ip_restriction.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.QuickSightEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIPRestrictionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIPRestrictionConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckIPRestrictionExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfquicksight.ResourceIPRestriction, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccIPRestriction_update(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_ip_restriction.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIPRestrictionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIPRestrictionConfig_permissions1(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIPRestrictionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrEnabled), knownvalue.Bool(false)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ip_restriction_rule_map"), knownvalue.MapExact(map[string]knownvalue.Check{ + "108.56.166.202/32": knownvalue.StringExact("Allow self"), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_id_restriction_rule_map"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_id_restriction_rule_map"), knownvalue.MapSizeExact(1)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrAWSAccountID), + ImportStateVerifyIdentifierAttribute: names.AttrAWSAccountID, + }, + { + Config: testAccIPRestrictionConfig_permissions2(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIPRestrictionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrEnabled), knownvalue.Bool(true)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ip_restriction_rule_map"), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_endpoint_id_restriction_rule_map"), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("vpc_id_restriction_rule_map"), knownvalue.MapSizeExact(2)), + }, + }, + }, + }) +} + +func testAccCheckIPRestrictionDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_quicksight_ip_restriction" { + continue + } + + _, err := tfquicksight.FindIPRestrictionByID(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("QuickSight IP Restriction (%s) still exists", rs.Primary.Attributes[names.AttrAWSAccountID]) + } + + return nil + } +} + +func testAccCheckIPRestrictionExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + _, err := tfquicksight.FindIPRestrictionByID(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID]) + + return err + } +} + +const testAccIPRestrictionConfig_basic = ` +resource "aws_quicksight_ip_restriction" "test" { + enabled = true +} +` + +func testAccIPRestrictionConfig_permissions1(rName string, enabled bool) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + count = 3 + + cidr_block = "10.${count.index}.0.0/16" + + tags = { + Name = %[1]q + } +} + +data "aws_region" "current" {} + +resource "aws_vpc_endpoint" "test" { + vpc_id = aws_vpc.test[1].id + service_name = "com.amazonaws.${data.aws_region.current.region}.quicksight-website" + vpc_endpoint_type = "Interface" +} + +resource "aws_quicksight_ip_restriction" "test" { + enabled = %[2]t + + ip_restriction_rule_map = { + "108.56.166.202/32" = "Allow self" + } + + vpc_id_restriction_rule_map = { + (aws_vpc.test[0].id) = "Main VPC" + } +} +`, rName, enabled) +} + +func testAccIPRestrictionConfig_permissions2(rName string, enabled bool) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + count = 3 + + cidr_block = "10.${count.index}.0.0/16" + + tags = { + Name = %[1]q + } +} + +data "aws_region" "current" {} + +resource "aws_vpc_endpoint" "test" { + vpc_id = aws_vpc.test[1].id + service_name = "com.amazonaws.${data.aws_region.current.region}.quicksight-website" + vpc_endpoint_type = "Interface" +} + +resource "aws_quicksight_ip_restriction" "test" { + enabled = %[2]t + + vpc_id_restriction_rule_map = { + (aws_vpc.test[0].id) = "Main VPC" + (aws_vpc.test[2].id) = "" + } + + vpc_endpoint_id_restriction_rule_map = { + (aws_vpc_endpoint.test.id) = "EP" + } +} +`, rName, enabled) +} diff --git a/internal/service/quicksight/key_registration.go b/internal/service/quicksight/key_registration.go new file mode 100644 index 000000000000..969485164783 --- /dev/null +++ b/internal/service/quicksight/key_registration.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/quicksight" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_quicksight_key_registration", name="Key Registration") +func newKeyRegistrationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &keyRegistrationResource{} + + return r, nil +} + +type keyRegistrationResource struct { + framework.ResourceWithModel[keyRegistrationResourceModel] +} + +func (r *keyRegistrationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + }, + Blocks: map[string]schema.Block{ + "key_registration": schema.SetNestedBlock{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[registeredCustomerManagedKeyModel](ctx), + Validators: []validator.Set{ + setvalidator.IsRequired(), + setvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "default_key": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + "key_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + }, + }, + }, + }, + }, + } +} + +func (r *keyRegistrationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data keyRegistrationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID) + var input quicksight.UpdateKeyRegistrationInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateKeyRegistration(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight Key Registration (%s)", accountID), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *keyRegistrationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data keyRegistrationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID) + output, err := findKeyRegistrationByID(ctx, conn, accountID) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Quicksight Key Registration (%s)", accountID), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data.KeyRegistration)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *keyRegistrationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old keyRegistrationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, new.AWSAccountID) + var input quicksight.UpdateKeyRegistrationInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateKeyRegistration(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Quicksight Key Registration (%s)", accountID), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *keyRegistrationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data keyRegistrationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + accountID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID) + input := quicksight.UpdateKeyRegistrationInput{ + AwsAccountId: aws.String(accountID), + KeyRegistration: []awstypes.RegisteredCustomerManagedKey{}, + } + _, err := conn.UpdateKeyRegistration(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Quicksight Key Registration (%s)", accountID), err.Error()) + + return + } +} + +func (r *keyRegistrationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrAWSAccountID), request, response) +} + +func findKeyRegistrationByID(ctx context.Context, conn *quicksight.Client, id string) ([]awstypes.RegisteredCustomerManagedKey, error) { + input := quicksight.DescribeKeyRegistrationInput{ + AwsAccountId: aws.String(id), + } + output, err := conn.DescribeKeyRegistration(ctx, &input) + + if err != nil { + return nil, err + } + + if output == nil || len(output.KeyRegistration) == 0 { + return nil, tfresource.NewEmptyResultError(&input) + } + + return output.KeyRegistration, nil +} + +type keyRegistrationResourceModel struct { + framework.WithRegionModel + AWSAccountID types.String `tfsdk:"aws_account_id"` + KeyRegistration fwtypes.SetNestedObjectValueOf[registeredCustomerManagedKeyModel] `tfsdk:"key_registration"` +} + +type registeredCustomerManagedKeyModel struct { + DefaultKey types.Bool `tfsdk:"default_key"` + KeyARN fwtypes.ARN `tfsdk:"key_arn"` +} diff --git a/internal/service/quicksight/key_registration_test.go b/internal/service/quicksight/key_registration_test.go new file mode 100644 index 000000000000..d3d313bf15e5 --- /dev/null +++ b/internal/service/quicksight/key_registration_test.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight_test + +import ( + "context" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfquicksight "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccKeyRegistration_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_key_registration.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKeyRegistrationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKeyRegistrationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckKeyRegistrationExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("key_registration"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "default_key": knownvalue.Bool(false), + "key_arn": knownvalue.NotNull(), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrAWSAccountID), + ImportStateVerifyIdentifierAttribute: names.AttrAWSAccountID, + }, + { + Config: testAccKeyRegistrationConfig_updated(rName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("key_registration"), knownvalue.SetExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "default_key": knownvalue.Bool(false), + "key_arn": knownvalue.NotNull(), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "default_key": knownvalue.Bool(true), + "key_arn": knownvalue.NotNull(), + }), + })), + }, + }, + }, + }) +} + +func testAccKeyRegistration_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_key_registration.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.QuickSightEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKeyRegistrationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKeyRegistrationConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKeyRegistrationExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfquicksight.ResourceKeyRegistration, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKeyRegistrationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_quicksight_key_registration" { + continue + } + + _, err := tfquicksight.FindKeyRegistrationByID(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("QuickSight Account Subscription (%s) still exists", rs.Primary.Attributes[names.AttrAWSAccountID]) + } + + return nil + } +} + +func testAccCheckKeyRegistrationExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + _, err := tfquicksight.FindKeyRegistrationByID(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID]) + + return err + } +} + +func testAccKeyRegistrationConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + count = 1 + + deletion_window_in_days = 7 + description = "%[1]s-${count.index}" + enable_key_rotation = true +} + +resource "aws_quicksight_key_registration" "test" { + key_registration { + key_arn = aws_kms_key.test[0].arn + } +} +`, rName) +} + +func testAccKeyRegistrationConfig_updated(rName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + count = 2 + + deletion_window_in_days = 7 + description = "%[1]s-${count.index}" + enable_key_rotation = true +} + +resource "aws_quicksight_key_registration" "test" { + key_registration { + key_arn = aws_kms_key.test[0].arn + } + key_registration { + key_arn = aws_kms_key.test[1].arn + default_key = true + } +} +`, rName) +} diff --git a/internal/service/quicksight/namespace.go b/internal/service/quicksight/namespace.go index 9056b7760d39..3433899190f8 100644 --- a/internal/service/quicksight/namespace.go +++ b/internal/service/quicksight/namespace.go @@ -25,7 +25,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -57,15 +58,8 @@ type namespaceResource struct { func (r *namespaceResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - names.AttrARN: framework.ARNAttributeComputedOnly(), - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), "capacity_region": schema.StringAttribute{ Computed: true, PlanModifiers: []planmodifier.String{ @@ -106,21 +100,21 @@ func (r *namespaceResource) Schema(ctx context.Context, req resource.SchemaReque } func (r *namespaceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan namespaceResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data namespaceResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } - awsAccountID, namespace := flex.StringValueFromFramework(ctx, plan.AWSAccountID), flex.StringValueFromFramework(ctx, plan.Namespace) + + conn := r.Meta().QuickSightClient(ctx) + + awsAccountID, namespace := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.Namespace) in := quicksight.CreateNamespaceInput{ AwsAccountId: aws.String(awsAccountID), - IdentityStore: awstypes.IdentityStore(plan.IdentityStore.ValueString()), + IdentityStore: awstypes.IdentityStore(data.IdentityStore.ValueString()), Namespace: aws.String(namespace), Tags: getTagsIn(ctx), } @@ -128,35 +122,35 @@ func (r *namespaceResource) Create(ctx context.Context, req resource.CreateReque out, err := conn.CreateNamespace(ctx, &in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), err), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameNamespace, data.Namespace.String(), err), err.Error(), ) return } if out == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameNamespace, data.Namespace.String(), nil), errors.New("empty output").Error(), ) return } - plan.ID = flex.StringValueToFramework(ctx, namespaceCreateResourceID(awsAccountID, namespace)) + data.ID = fwflex.StringValueToFramework(ctx, namespaceCreateResourceID(awsAccountID, namespace)) - waitOut, err := waitNamespaceCreated(ctx, conn, awsAccountID, namespace, r.CreateTimeout(ctx, plan.Timeouts)) + waitOut, err := waitNamespaceCreated(ctx, conn, awsAccountID, namespace, r.CreateTimeout(ctx, data.Timeouts)) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionWaitingForCreation, resNameNamespace, plan.Namespace.String(), err), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionWaitingForCreation, resNameNamespace, data.Namespace.String(), err), err.Error(), ) return } - plan.ARN = flex.StringToFramework(ctx, waitOut.Arn) - plan.CapacityRegion = flex.StringToFramework(ctx, waitOut.CapacityRegion) - plan.CreationStatus = flex.StringValueToFramework(ctx, waitOut.CreationStatus) - plan.IdentityStore = flex.StringValueToFramework(ctx, waitOut.IdentityStore) + data.ARN = fwflex.StringToFramework(ctx, waitOut.Arn) + data.CapacityRegion = fwflex.StringToFramework(ctx, waitOut.CapacityRegion) + data.CreationStatus = fwflex.StringValueToFramework(ctx, waitOut.CreationStatus) + data.IdentityStore = fwflex.StringValueToFramework(ctx, waitOut.IdentityStore) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) } func (r *namespaceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -190,12 +184,12 @@ func (r *namespaceResource) Read(ctx context.Context, req resource.ReadRequest, return } - state.ARN = flex.StringToFramework(ctx, out.Arn) - state.CapacityRegion = flex.StringToFramework(ctx, out.CapacityRegion) - state.CreationStatus = flex.StringValueToFramework(ctx, out.CreationStatus) - state.IdentityStore = flex.StringValueToFramework(ctx, out.IdentityStore) - state.AWSAccountID = flex.StringValueToFramework(ctx, awsAccountID) - state.Namespace = flex.StringValueToFramework(ctx, namespace) + state.ARN = fwflex.StringToFramework(ctx, out.Arn) + state.CapacityRegion = fwflex.StringToFramework(ctx, out.CapacityRegion) + state.CreationStatus = fwflex.StringValueToFramework(ctx, out.CreationStatus) + state.IdentityStore = fwflex.StringValueToFramework(ctx, out.IdentityStore) + state.AWSAccountID = fwflex.StringValueToFramework(ctx, awsAccountID) + state.Namespace = fwflex.StringValueToFramework(ctx, namespace) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } diff --git a/internal/service/quicksight/namespace_tags_gen_test.go b/internal/service/quicksight/namespace_tags_gen_test.go index baf8c6f1e3af..88df27f4e909 100644 --- a/internal/service/quicksight/namespace_tags_gen_test.go +++ b/internal/service/quicksight/namespace_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightNamespace_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightNamespace_tags_null(t *testing.T) { t.Skip("Resource Namespace does not support null tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -265,11 +266,12 @@ func TestAccQuickSightNamespace_tags_null(t *testing.T) { func TestAccQuickSightNamespace_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -315,11 +317,12 @@ func TestAccQuickSightNamespace_tags_EmptyMap(t *testing.T) { func TestAccQuickSightNamespace_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -397,11 +400,12 @@ func TestAccQuickSightNamespace_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource Namespace does not support empty tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -489,11 +493,12 @@ func TestAccQuickSightNamespace_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource Namespace does not support empty tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -630,11 +635,12 @@ func TestAccQuickSightNamespace_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource Namespace does not support empty tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -720,11 +726,12 @@ func TestAccQuickSightNamespace_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightNamespace_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -901,11 +908,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightNamespace_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1061,11 +1069,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightNamespace_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1237,11 +1246,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightNamespace_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1327,11 +1337,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_updateToProviderOnly(t *testing func TestAccQuickSightNamespace_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1418,11 +1429,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_emptyResourceTag(t *testing.T) t.Skip("Resource Namespace does not support empty tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1486,11 +1498,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_emptyProviderOnlyTag(t *testing t.Skip("Resource Namespace does not support empty tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1546,11 +1559,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_nullOverlappingResourceTag(t *t t.Skip("Resource Namespace does not support null tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1617,11 +1631,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_nullNonOverlappingResourceTag(t t.Skip("Resource Namespace does not support null tags") ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1688,11 +1703,12 @@ func TestAccQuickSightNamespace_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccQuickSightNamespace_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1743,11 +1759,12 @@ func TestAccQuickSightNamespace_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightNamespace_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1840,11 +1857,12 @@ func TestAccQuickSightNamespace_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightNamespace_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -1927,11 +1945,12 @@ func TestAccQuickSightNamespace_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccQuickSightNamespace_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), @@ -2089,11 +2108,12 @@ func TestAccQuickSightNamespace_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccQuickSightNamespace_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.NamespaceInfoV2 resourceName := "aws_quicksight_namespace.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckNamespaceDestroy(ctx), diff --git a/internal/service/quicksight/quicksight_test.go b/internal/service/quicksight/quicksight_test.go index 573e07505b21..d60dbf4c0d56 100644 --- a/internal/service/quicksight/quicksight_test.go +++ b/internal/service/quicksight/quicksight_test.go @@ -32,6 +32,20 @@ func TestAccQuickSight_serial(t *testing.T) { acctest.CtBasic: testAccAccountSubscription_basic, acctest.CtDisappears: testAccAccountSubscription_disappears, }, + "IPRestriction": { + acctest.CtBasic: testAccIPRestriction_basic, + acctest.CtDisappears: testAccIPRestriction_disappears, + "update": testAccIPRestriction_update, + }, + "KeyRegistration": { + acctest.CtBasic: testAccKeyRegistration_basic, + acctest.CtDisappears: testAccKeyRegistration_disappears, + }, + "RoleCustomPermission": { + acctest.CtBasic: testAccRoleCustomPermission_basic, + acctest.CtDisappears: testAccRoleCustomPermission_disappears, + "update": testAccRoleCustomPermission_update, + }, "RoleMembership": { acctest.CtBasic: testAccRoleMembership_basic, acctest.CtDisappears: testAccRoleMembership_disappears, diff --git a/internal/service/quicksight/refresh_schedule.go b/internal/service/quicksight/refresh_schedule.go index af1e3d223812..bdc8d5636b47 100644 --- a/internal/service/quicksight/refresh_schedule.go +++ b/internal/service/quicksight/refresh_schedule.go @@ -31,8 +31,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,15 +61,8 @@ type refreshScheduleResource struct { func (r *refreshScheduleResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - names.AttrARN: framework.ARNAttributeComputedOnly(), - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), "data_set_id": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -204,28 +198,27 @@ type refreshOnDayModel struct { } func (r *refreshScheduleResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan refreshScheduleResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data refreshScheduleResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } - awsAccountID, dataSetID, scheduleID := flex.StringValueFromFramework(ctx, plan.AWSAccountID), flex.StringValueFromFramework(ctx, plan.DataSetID), flex.StringValueFromFramework(ctx, plan.ScheduleID) + conn := r.Meta().QuickSightClient(ctx) + + awsAccountID, dataSetID, scheduleID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.DataSetID), fwflex.StringValueFromFramework(ctx, data.ScheduleID) var in quicksight.CreateRefreshScheduleInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &in)...) + resp.Diagnostics.Append(fwflex.Expand(ctx, data, &in)...) if resp.Diagnostics.HasError() { return } - in.Schedule.ScheduleId = plan.ScheduleID.ValueStringPointer() + in.Schedule.ScheduleId = data.ScheduleID.ValueStringPointer() // Because StartAfterDateTime is a string and not a time type, we have to handle it outside of AutoFlex - schedule, diags := plan.Schedule.ToPtr(ctx) + schedule, diags := data.Schedule.ToPtr(ctx) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return @@ -238,34 +231,34 @@ func (r *refreshScheduleResource) Create(ctx context.Context, req resource.Creat out, err := conn.CreateRefreshSchedule(ctx, &in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameRefreshSchedule, plan.ScheduleID.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameRefreshSchedule, data.ScheduleID.String(), nil), err.Error(), ) return } if out == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameRefreshSchedule, plan.ScheduleID.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameRefreshSchedule, data.ScheduleID.String(), nil), errors.New("empty output").Error(), ) return } - plan.ID = flex.StringValueToFramework(ctx, refreshScheduleCreateResourceID(awsAccountID, dataSetID, scheduleID)) + data.ID = fwflex.StringValueToFramework(ctx, refreshScheduleCreateResourceID(awsAccountID, dataSetID, scheduleID)) _, outFind, err := findRefreshScheduleByThreePartKey(ctx, conn, awsAccountID, dataSetID, scheduleID) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionReading, resNameRefreshSchedule, plan.ID.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionReading, resNameRefreshSchedule, data.ID.String(), nil), err.Error(), ) return } - resp.Diagnostics.Append(plan.refreshFromRead(ctx, out.Arn, outFind)...) + resp.Diagnostics.Append(data.refreshFromRead(ctx, out.Arn, outFind)...) // resp.Diagnostics.Append(flex.Flatten(ctx, outFind, &plan)...) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) } func (r *refreshScheduleResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -299,9 +292,9 @@ func (r *refreshScheduleResource) Read(ctx context.Context, req resource.ReadReq return } - state.AWSAccountID = flex.StringValueToFramework(ctx, awsAccountID) - state.DataSetID = flex.StringValueToFramework(ctx, dataSetID) - state.ScheduleID = flex.StringValueToFramework(ctx, scheduleID) + state.AWSAccountID = fwflex.StringValueToFramework(ctx, awsAccountID) + state.DataSetID = fwflex.StringValueToFramework(ctx, dataSetID) + state.ScheduleID = fwflex.StringValueToFramework(ctx, scheduleID) resp.Diagnostics.Append(state.refreshFromRead(ctx, arn, outFind)...) // resp.Diagnostics.Append(flex.Flatten(ctx, outFind, &state)...) @@ -330,7 +323,7 @@ func (r *refreshScheduleResource) Update(ctx context.Context, req resource.Updat if !plan.Schedule.Equal(state.Schedule) { var in quicksight.UpdateRefreshScheduleInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &in)...) + resp.Diagnostics.Append(fwflex.Expand(ctx, plan, &in)...) if resp.Diagnostics.HasError() { return } @@ -506,7 +499,7 @@ func (rd *refreshScheduleResourceModel) refreshFromRead(ctx context.Context, arn return diags } - rd.ARN = flex.StringToFramework(ctx, arn) + rd.ARN = fwflex.StringToFramework(ctx, arn) schedule, d := flattenSchedule(ctx, out) diags.Append(d...) @@ -524,7 +517,7 @@ func flattenSchedule(ctx context.Context, apiObject *awstypes.RefreshSchedule) ( var model scheduleModel - diags.Append(flex.Flatten(ctx, apiObject, &model)...) + diags.Append(fwflex.Flatten(ctx, apiObject, &model)...) if apiObject.StartAfterDateTime != nil { model.StartAfterDateTime = types.StringValue(apiObject.StartAfterDateTime.Format(startAfterDateTimeLayout)) diff --git a/internal/service/quicksight/role_custom_permission.go b/internal/service/quicksight/role_custom_permission.go new file mode 100644 index 000000000000..a8a0cb8be4a3 --- /dev/null +++ b/internal/service/quicksight/role_custom_permission.go @@ -0,0 +1,232 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/quicksight" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_quicksight_role_custom_permission", name="Role Custom Permission") +func newRoleCustomPermissionResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &roleCustomPermissionResource{} + + return r, nil +} + +type roleCustomPermissionResource struct { + framework.ResourceWithModel[roleCustomPermissionResourceModel] +} + +func (r *roleCustomPermissionResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + "custom_permissions_name": schema.StringAttribute{ + Required: true, + }, + names.AttrNamespace: quicksightschema.NamespaceAttribute(), + names.AttrRole: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Role](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *roleCustomPermissionResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data roleCustomPermissionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } + + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.UpdateRoleCustomPermissionInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateRoleCustomPermission(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight Role (%s) Custom Permission (%s)", data.Role.ValueString(), data.CustomPermissionsName.ValueString()), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *roleCustomPermissionResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data roleCustomPermissionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + output, err := findRoleCustomPermissionByThreePartKey(ctx, conn, data.AWSAccountID.ValueString(), data.Namespace.ValueString(), data.Role.ValueEnum()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight Role (%s) Custom Permission", data.Role.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + data.CustomPermissionsName = fwflex.StringToFramework(ctx, output) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *roleCustomPermissionResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old roleCustomPermissionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.UpdateRoleCustomPermissionInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateRoleCustomPermission(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Quicksight Role (%s) Custom Permission (%s)", new.Role.ValueString(), new.CustomPermissionsName.ValueString()), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *roleCustomPermissionResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data roleCustomPermissionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.DeleteRoleCustomPermissionInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.DeleteRoleCustomPermission(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Quicksight Role (%s) Custom Permission (%s)", data.Role.ValueString(), data.CustomPermissionsName.ValueString()), err.Error()) + + return + } +} + +func (r *roleCustomPermissionResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + roleCustomPermissionIDParts = 3 + ) + parts, err := intflex.ExpandResourceId(request.ID, roleCustomPermissionIDParts, true) + + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrAWSAccountID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrNamespace), parts[1])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRole), parts[2])...) +} + +func findRoleCustomPermissionByThreePartKey(ctx context.Context, conn *quicksight.Client, awsAccountID, namespace string, role awstypes.Role) (*string, error) { + input := quicksight.DescribeRoleCustomPermissionInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + Role: role, + } + + return findRoleCustomPermission(ctx, conn, &input) +} + +func findRoleCustomPermission(ctx context.Context, conn *quicksight.Client, input *quicksight.DescribeRoleCustomPermissionInput) (*string, error) { + output, err := conn.DescribeRoleCustomPermission(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || aws.ToString(output.CustomPermissionsName) == "" { + return nil, tfresource.NewEmptyResultError(&input) + } + + return output.CustomPermissionsName, nil +} + +type roleCustomPermissionResourceModel struct { + framework.WithRegionModel + AWSAccountID types.String `tfsdk:"aws_account_id"` + CustomPermissionsName types.String `tfsdk:"custom_permissions_name"` + Namespace types.String `tfsdk:"namespace"` + Role fwtypes.StringEnum[awstypes.Role] `tfsdk:"role"` +} diff --git a/internal/service/quicksight/role_custom_permission_test.go b/internal/service/quicksight/role_custom_permission_test.go new file mode 100644 index 000000000000..675a51e410f5 --- /dev/null +++ b/internal/service/quicksight/role_custom_permission_test.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight_test + +import ( + "context" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfquicksight "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccRoleCustomPermission_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_role_custom_permission.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRoleCustomPermissionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRoleCustomPermissionConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleCustomPermissionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(tfquicksight.DefaultNamespace)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRole), tfknownvalue.StringExact(awstypes.RoleReader)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccRoleCustomPermissionImportStateID(resourceName), + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func testAccRoleCustomPermission_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_role_custom_permission.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRoleCustomPermissionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRoleCustomPermissionConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleCustomPermissionExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfquicksight.ResourceRoleCustomPermission, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccRoleCustomPermission_update(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_quicksight_role_custom_permission.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRoleCustomPermissionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRoleCustomPermissionConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleCustomPermissionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(tfquicksight.DefaultNamespace)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRole), tfknownvalue.StringExact(awstypes.RoleReader)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccRoleCustomPermissionImportStateID(resourceName), + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + Config: testAccRoleCustomPermissionConfig_updated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRoleCustomPermissionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName+"-2")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(tfquicksight.DefaultNamespace)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRole), tfknownvalue.StringExact(awstypes.RoleReader)), + }, + }, + }, + }) +} + +func testAccCheckRoleCustomPermissionDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_quicksight_role_custom_permission" { + continue + } + + _, err := tfquicksight.FindRoleCustomPermissionByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes[names.AttrNamespace], awstypes.Role(rs.Primary.Attributes[names.AttrRole])) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("QuickSight Role Custom Permission (%s) still exists", rs.Primary.Attributes[names.AttrRole]) + } + + return nil + } +} + +func testAccCheckRoleCustomPermissionExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + _, err := tfquicksight.FindRoleCustomPermissionByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes[names.AttrNamespace], awstypes.Role(rs.Primary.Attributes[names.AttrRole])) + + return err + } +} + +func testAccRoleCustomPermissionImportStateID(n string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + return acctest.AttrsImportStateIdFunc(n, ",", names.AttrAWSAccountID, names.AttrNamespace, names.AttrRole)(s) + } +} + +func testAccRoleCustomPermissionConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccCustomPermissionsConfig_basic(rName), ` +resource "aws_quicksight_role_custom_permission" "test" { + role = "READER" + custom_permissions_name = aws_quicksight_custom_permissions.test.custom_permissions_name +} +`) +} + +func testAccRoleCustomPermissionConfig_updated(rName string) string { + return acctest.ConfigCompose(testAccCustomPermissionsConfig_basic(rName), fmt.Sprintf(` +resource "aws_quicksight_custom_permissions" "test2" { + custom_permissions_name = "%[1]s-2" + + capabilities { + create_and_update_datasets = "DENY" + create_and_update_data_sources = "DENY" + export_to_pdf = "DENY" + } +} + +resource "aws_quicksight_role_custom_permission" "test" { + role = "READER" + custom_permissions_name = aws_quicksight_custom_permissions.test2.custom_permissions_name +} +`, rName)) +} diff --git a/internal/service/quicksight/role_membership.go b/internal/service/quicksight/role_membership.go index bc3010627e72..6434077d7525 100644 --- a/internal/service/quicksight/role_membership.go +++ b/internal/service/quicksight/role_membership.go @@ -15,15 +15,16 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,40 +34,22 @@ func newRoleMembershipResource(_ context.Context) (resource.ResourceWithConfigur return &roleMembershipResource{}, nil } -const ( - ResNameRoleMembership = "Role Membership" -) - type roleMembershipResource struct { framework.ResourceWithModel[roleMembershipResourceModel] framework.WithNoUpdate } -func (r *roleMembershipResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *roleMembershipResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), "member_name": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, }, - names.AttrNamespace: schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString("default"), - PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), - }, - }, + names.AttrNamespace: quicksightschema.NamespaceAttribute(), names.AttrRole: schema.StringAttribute{ CustomType: fwtypes.StringEnumType[awstypes.Role](), Required: true, @@ -78,128 +61,125 @@ func (r *roleMembershipResource) Schema(ctx context.Context, req resource.Schema } } -func (r *roleMembershipResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan roleMembershipResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *roleMembershipResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data roleMembershipResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } - input := quicksight.CreateRoleMembershipInput{ - AwsAccountId: plan.AWSAccountID.ValueStringPointer(), - MemberName: plan.MemberName.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - Role: plan.Role.ValueEnum(), + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.CreateRoleMembershipInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return } _, err := conn.CreateRoleMembership(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, ResNameRoleMembership, plan.MemberName.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight Role (%s) Membership (%s)", data.Role.ValueString(), data.MemberName.ValueString()), err.Error()) + return } - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *roleMembershipResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var state roleMembershipResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *roleMembershipResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data roleMembershipResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - err := findRoleMembershipByMultiPartKey(ctx, conn, state.AWSAccountID.ValueString(), state.Namespace.ValueString(), state.Role.ValueEnum(), state.MemberName.ValueString()) + conn := r.Meta().QuickSightClient(ctx) + + err := findRoleMembershipByFourPartKey(ctx, conn, data.AWSAccountID.ValueString(), data.Namespace.ValueString(), data.Role.ValueEnum(), data.MemberName.ValueString()) if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionSetting, ResNameRoleMembership, state.MemberName.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading Quicksight Role (%s) Membership (%s)", data.Role.ValueString(), data.MemberName.ValueString()), err.Error()) + return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *roleMembershipResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +func (r *roleMembershipResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data roleMembershipResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + conn := r.Meta().QuickSightClient(ctx) - var state roleMembershipResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { + var input quicksight.DeleteRoleMembershipInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } - input := quicksight.DeleteRoleMembershipInput{ - AwsAccountId: state.AWSAccountID.ValueStringPointer(), - MemberName: state.MemberName.ValueStringPointer(), - Namespace: state.Namespace.ValueStringPointer(), - Role: state.Role.ValueEnum(), + _, err := conn.DeleteRoleMembership(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return } - _, err := conn.DeleteRoleMembership(ctx, &input) if err != nil { - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return - } + response.Diagnostics.AddError(fmt.Sprintf("deleting Quicksight Role (%s) Membership (%s)", data.Role.ValueString(), data.MemberName.ValueString()), err.Error()) - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionDeleting, ResNameRoleMembership, state.MemberName.String(), err), - err.Error(), - ) return } } -const roleMembershipIDParts = 4 +func (r *roleMembershipResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + roleMembershipIDParts = 4 + ) + parts, err := intflex.ExpandResourceId(request.ID, roleMembershipIDParts, false) -func (r *roleMembershipResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts, err := intflex.ExpandResourceId(req.ID, roleMembershipIDParts, false) if err != nil { - resp.Diagnostics.AddError( - "Unexpected Import Identifier", - fmt.Sprintf("Expected import identifier with format: aws_account_id,namespace,role,member_name. Got: %q", req.ID), - ) + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + return } - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrAWSAccountID), parts[0])...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrNamespace), parts[1])...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrRole), parts[2])...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("member_name"), parts[3])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrAWSAccountID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrNamespace), parts[1])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrRole), parts[2])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("member_name"), parts[3])...) } -// findRoleMembershipByMultiPartKey verifies the existence of a role membership +// findRoleMembershipByFourPartKey verifies the existence of a role membership // // No value is returned, but the error will be non-nil if no matching member name // is found in the list of group members for the provided role. -func findRoleMembershipByMultiPartKey(ctx context.Context, conn *quicksight.Client, accountID string, namespace string, role awstypes.Role, member string) error { +func findRoleMembershipByFourPartKey(ctx context.Context, conn *quicksight.Client, awsAccountID, namespace string, role awstypes.Role, member string) error { input := quicksight.ListRoleMembershipsInput{ - AwsAccountId: aws.String(accountID), + AwsAccountId: aws.String(awsAccountID), Namespace: aws.String(namespace), Role: role, } - out, err := findRoleMemberships(ctx, conn, &input) + members, err := findRoleMembers(ctx, conn, &input) + if err != nil { return err } - if slices.Contains(out, member) { + if slices.Contains(members, member) { return nil } @@ -208,20 +188,28 @@ func findRoleMembershipByMultiPartKey(ctx context.Context, conn *quicksight.Clie } } -func findRoleMemberships(ctx context.Context, conn *quicksight.Client, input *quicksight.ListRoleMembershipsInput) ([]string, error) { - paginator := quicksight.NewListRoleMembershipsPaginator(conn, input) +func findRoleMembers(ctx context.Context, conn *quicksight.Client, input *quicksight.ListRoleMembershipsInput) ([]string, error) { + var output []string + + pages := quicksight.NewListRoleMembershipsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - var memberNames []string - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) if err != nil { return nil, err } - memberNames = append(memberNames, page.MembersList...) + output = append(output, page.MembersList...) } - return memberNames, nil + return output, nil } type roleMembershipResourceModel struct { diff --git a/internal/service/quicksight/role_membership_test.go b/internal/service/quicksight/role_membership_test.go index 00637411f7ff..7ddde49cd405 100644 --- a/internal/service/quicksight/role_membership_test.go +++ b/internal/service/quicksight/role_membership_test.go @@ -5,17 +5,15 @@ package quicksight_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfquicksight "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -23,10 +21,9 @@ import ( func testAccRoleMembership_basic(t *testing.T) { ctx := acctest.Context(t) - role := string(types.RoleReader) - resourceName := "aws_quicksight_role_membership.test" - memberName := acctest.SkipIfEnvVarNotSet(t, "TF_AWS_QUICKSIGHT_IDC_GROUP") + role := string(awstypes.RoleReader) + resourceName := "aws_quicksight_role_membership.test" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -50,7 +47,7 @@ func testAccRoleMembership_basic(t *testing.T) { { ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccRoleMembershipImportStateIdFunc(resourceName), + ImportStateIdFunc: testAccRoleMembershipImportStateIDFunc(resourceName), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: "member_name", }, @@ -60,10 +57,9 @@ func testAccRoleMembership_basic(t *testing.T) { func testAccRoleMembership_disappears(t *testing.T) { ctx := acctest.Context(t) - role := string(types.RoleReader) - resourceName := "aws_quicksight_role_membership.test" - memberName := acctest.SkipIfEnvVarNotSet(t, "TF_AWS_QUICKSIGHT_IDC_GROUP") + role := string(awstypes.RoleReader) + resourceName := "aws_quicksight_role_membership.test" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -90,11 +86,10 @@ func testAccRoleMembership_disappears(t *testing.T) { func testAccRoleMembership_role(t *testing.T) { ctx := acctest.Context(t) - role := string(types.RoleReader) - roleUpdated := string(types.RoleAuthor) - resourceName := "aws_quicksight_role_membership.test" - memberName := acctest.SkipIfEnvVarNotSet(t, "TF_AWS_QUICKSIGHT_IDC_GROUP") + role := string(awstypes.RoleReader) + roleUpdated := string(awstypes.RoleAuthor) + resourceName := "aws_quicksight_role_membership.test" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -141,65 +136,41 @@ func testAccCheckRoleMembershipDestroy(ctx context.Context) resource.TestCheckFu continue } - accountID := rs.Primary.Attributes[names.AttrAWSAccountID] - namespace := rs.Primary.Attributes[names.AttrNamespace] - role := rs.Primary.Attributes[names.AttrRole] - memberName := rs.Primary.Attributes["member_name"] + err := tfquicksight.FindRoleMembershipByFourPartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes[names.AttrNamespace], awstypes.Role(rs.Primary.Attributes[names.AttrRole]), rs.Primary.Attributes["member_name"]) - err := tfquicksight.FindRoleMembershipByMultiPartKey(ctx, conn, accountID, namespace, types.Role(role), memberName) if tfresource.NotFound(err) { - return nil + continue } + if err != nil { - return create.Error(names.QuickSight, create.ErrActionCheckingDestroyed, tfquicksight.ResNameRoleMembership, rs.Primary.ID, err) + return err } - return create.Error(names.QuickSight, create.ErrActionCheckingDestroyed, tfquicksight.ResNameRoleMembership, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("QuickSight Role Membership (%s) still exists", rs.Primary.Attributes[names.AttrRole]) } return nil } } -func testAccCheckRoleMembershipExists(ctx context.Context, name string) resource.TestCheckFunc { +func testAccCheckRoleMembershipExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.QuickSight, create.ErrActionCheckingExistence, tfquicksight.ResNameRoleMembership, name, errors.New("not found")) - } - - accountID := rs.Primary.Attributes[names.AttrAWSAccountID] - namespace := rs.Primary.Attributes[names.AttrNamespace] - role := rs.Primary.Attributes[names.AttrRole] - memberName := rs.Primary.Attributes["member_name"] - if accountID == "" || namespace == "" || role == "" || memberName == "" { - return create.Error(names.QuickSight, create.ErrActionCheckingExistence, tfquicksight.ResNameRoleMembership, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) - err := tfquicksight.FindRoleMembershipByMultiPartKey(ctx, conn, accountID, namespace, types.Role(role), memberName) - if err != nil { - return create.Error(names.QuickSight, create.ErrActionCheckingExistence, tfquicksight.ResNameRoleMembership, rs.Primary.ID, err) - } + err := tfquicksight.FindRoleMembershipByFourPartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes[names.AttrNamespace], awstypes.Role(rs.Primary.Attributes[names.AttrRole]), rs.Primary.Attributes["member_name"]) - return nil + return err } } -func testAccRoleMembershipImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { +func testAccRoleMembershipImportStateIDFunc(n string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("Not found: %s", resourceName) - } - - return fmt.Sprintf("%s,%s,%s,%s", - rs.Primary.Attributes[names.AttrAWSAccountID], - rs.Primary.Attributes[names.AttrNamespace], - rs.Primary.Attributes[names.AttrRole], - rs.Primary.Attributes["member_name"], - ), nil + return acctest.AttrsImportStateIdFunc(n, ",", names.AttrAWSAccountID, names.AttrNamespace, names.AttrRole, "member_name")(s) } } diff --git a/internal/service/quicksight/schema/stdattributes.go b/internal/service/quicksight/schema/stdattributes.go new file mode 100644 index 000000000000..a0638686aa56 --- /dev/null +++ b/internal/service/quicksight/schema/stdattributes.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "regexp" + + "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + fwschema "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + sdkschema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func AWSAccountIDAttribute() fwschema.StringAttribute { // nosemgrep:ci.aws-in-func-name + return fwschema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + fwvalidators.AWSAccountID(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + stringplanmodifier.RequiresReplace(), + }, + } +} + +func AWSAccountIDSchema() *sdkschema.Schema { // nosemgrep:ci.aws-in-func-name + return &sdkschema.Schema{ + Type: sdkschema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidAccountID, + } +} + +func AWSAccountIDDataSourceSchema() *sdkschema.Schema { // nosemgrep:ci.aws-in-func-name + return &sdkschema.Schema{ + Type: sdkschema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidAccountID, + } +} + +const ( + DefaultNamespace = "default" +) + +func NamespaceAttribute() fwschema.StringAttribute { + return fwschema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultNamespace), + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + stringvalidator.RegexMatches(namespaceRegex()), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + } +} + +func NamespaceSchema() *sdkschema.Schema { + return &sdkschema.Schema{ + Type: sdkschema.TypeString, + Optional: true, + ForceNew: true, + Default: DefaultNamespace, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(namespaceRegex()), + ), + } +} + +func NamespaceDataSourceSchema() *sdkschema.Schema { + return &sdkschema.Schema{ + Type: sdkschema.TypeString, + Optional: true, + Default: DefaultNamespace, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(namespaceRegex()), + ), + } +} + +func namespaceRegex() (*regexp.Regexp, string) { + return regexache.MustCompile(`^[0-9A-Za-z_.-]*$`), "must contain only alphanumeric characters, hyphens, underscores, and periods" +} diff --git a/internal/service/quicksight/service_endpoint_resolver_gen.go b/internal/service/quicksight/service_endpoint_resolver_gen.go index 3c6a46e09805..402978520340 100644 --- a/internal/service/quicksight/service_endpoint_resolver_gen.go +++ b/internal/service/quicksight/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params quicksight.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up quicksight endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up quicksight endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/quicksight/service_endpoints_gen_test.go b/internal/service/quicksight/service_endpoints_gen_test.go index 891d31327945..ea7698f34e90 100644 --- a/internal/service/quicksight/service_endpoints_gen_test.go +++ b/internal/service/quicksight/service_endpoints_gen_test.go @@ -524,7 +524,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/quicksight/service_package_gen.go b/internal/service/quicksight/service_package_gen.go index 890d58de1016..fd49181ece5e 100644 --- a/internal/service/quicksight/service_package_gen.go +++ b/internal/service/quicksight/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/quicksight" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -30,6 +29,15 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "Account Settings", Region: unique.Make(inttypes.ResourceRegionDisabled()), }, + { + Factory: newCustomPermissionsResource, + TypeName: "aws_quicksight_custom_permissions", + Name: "Custom Permissions", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newFolderMembershipResource, TypeName: "aws_quicksight_folder_membership", @@ -48,6 +56,18 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "Ingestion", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newIPRestrictionResource, + TypeName: "aws_quicksight_ip_restriction", + Name: "IP Restriction", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newKeyRegistrationResource, + TypeName: "aws_quicksight_key_registration", + Name: "Key Registration", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newNamespaceResource, TypeName: "aws_quicksight_namespace", @@ -63,6 +83,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "Refresh Schedule", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newRoleCustomPermissionResource, + TypeName: "aws_quicksight_role_custom_permission", + Name: "Role Custom Permission", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newRoleMembershipResource, TypeName: "aws_quicksight_role_membership", @@ -75,6 +101,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "Template Alias", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newUserCustomPermissionResource, + TypeName: "aws_quicksight_user_custom_permission", + Name: "User Custom Permission", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newVPCConnectionResource, TypeName: "aws_quicksight_vpc_connection", @@ -240,7 +272,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *quicksight.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/quicksight/sweep.go b/internal/service/quicksight/sweep.go index 975f7a1c226a..1a6f8b389d1f 100644 --- a/internal/service/quicksight/sweep.go +++ b/internal/service/quicksight/sweep.go @@ -13,6 +13,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" @@ -66,7 +67,7 @@ func sweepDashboards(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.QuickSightClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -110,7 +111,7 @@ func sweepDataSets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.QuickSightClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -154,7 +155,7 @@ func sweepDataSources(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.QuickSightClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -249,7 +250,7 @@ func sweepGroups(region string) error { awsAccountID := client.AccountID(ctx) input := &quicksight.ListGroupsInput{ AwsAccountId: aws.String(awsAccountID), - Namespace: aws.String(defaultUserNamespace), + Namespace: aws.String(quicksightschema.DefaultNamespace), } pages := quicksight.NewListGroupsPaginator(conn, input) @@ -275,7 +276,7 @@ func sweepGroups(region string) error { r := resourceGroup() d := r.Data(nil) - d.SetId(groupCreateResourceID(awsAccountID, defaultUserNamespace, groupName)) + d.SetId(groupCreateResourceID(awsAccountID, quicksightschema.DefaultNamespace, groupName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } @@ -294,7 +295,7 @@ func sweepTemplates(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.QuickSightClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -345,7 +346,7 @@ func sweepUsers(region string) error { awsAccountID := client.AccountID(ctx) input := &quicksight.ListUsersInput{ AwsAccountId: aws.String(awsAccountID), - Namespace: aws.String(defaultUserNamespace), + Namespace: aws.String(quicksightschema.DefaultNamespace), } pages := quicksight.NewListUsersPaginator(conn, input) @@ -371,7 +372,7 @@ func sweepUsers(region string) error { r := resourceUser() d := r.Data(nil) - d.SetId(userCreateResourceID(awsAccountID, defaultUserNamespace, userName)) + d.SetId(userCreateResourceID(awsAccountID, quicksightschema.DefaultNamespace, userName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } diff --git a/internal/service/quicksight/tags_gen.go b/internal/service/quicksight/tags_gen.go index 6b9bb49b194b..479651366f6e 100644 --- a/internal/service/quicksight/tags_gen.go +++ b/internal/service/quicksight/tags_gen.go @@ -3,8 +3,8 @@ package quicksight import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/quicksight" awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *quicksight.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).QuickSightClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *quicksight.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *quicksight.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/quicksight/template.go b/internal/service/quicksight/template.go index e67904fcd23a..4bea3dfdc7d5 100644 --- a/internal/service/quicksight/template.go +++ b/internal/service/quicksight/template.go @@ -26,7 +26,6 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -57,13 +56,7 @@ func resourceTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrCreatedTime: { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/template_alias.go b/internal/service/quicksight/template_alias.go index 05a00bcd801a..7e2b365e6b62 100644 --- a/internal/service/quicksight/template_alias.go +++ b/internal/service/quicksight/template_alias.go @@ -21,7 +21,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -49,16 +50,9 @@ func (r *templateAliasResource) Schema(ctx context.Context, req resource.SchemaR stringplanmodifier.RequiresReplace(), }, }, - names.AttrARN: framework.ARNAttributeComputedOnly(), - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, - names.AttrID: framework.IDAttribute(), + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + names.AttrID: framework.IDAttribute(), "template_id": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -73,45 +67,45 @@ func (r *templateAliasResource) Schema(ctx context.Context, req resource.SchemaR } func (r *templateAliasResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan templateAliasResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data templateAliasResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } - awsAccountID, templateID, aliasName := flex.StringValueFromFramework(ctx, plan.AWSAccountID), flex.StringValueFromFramework(ctx, plan.TemplateID), flex.StringValueFromFramework(ctx, plan.AliasName) + + conn := r.Meta().QuickSightClient(ctx) + + awsAccountID, templateID, aliasName := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.TemplateID), fwflex.StringValueFromFramework(ctx, data.AliasName) in := &quicksight.CreateTemplateAliasInput{ AliasName: aws.String(aliasName), AwsAccountId: aws.String(awsAccountID), TemplateId: aws.String(templateID), - TemplateVersionNumber: plan.TemplateVersionNumber.ValueInt64Pointer(), + TemplateVersionNumber: data.TemplateVersionNumber.ValueInt64Pointer(), } out, err := conn.CreateTemplateAlias(ctx, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameTemplateAlias, plan.AliasName.String(), err), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameTemplateAlias, data.AliasName.String(), err), err.Error(), ) return } if out == nil || out.TemplateAlias == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameTemplateAlias, plan.AliasName.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameTemplateAlias, data.AliasName.String(), nil), errors.New("empty output").Error(), ) return } - plan.ID = types.StringValue(templateAliasCreateResourceID(awsAccountID, templateID, aliasName)) - plan.ARN = flex.StringToFramework(ctx, out.TemplateAlias.Arn) + data.ID = types.StringValue(templateAliasCreateResourceID(awsAccountID, templateID, aliasName)) + data.ARN = fwflex.StringToFramework(ctx, out.TemplateAlias.Arn) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) } func (r *templateAliasResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -145,11 +139,11 @@ func (r *templateAliasResource) Read(ctx context.Context, req resource.ReadReque return } - state.ARN = flex.StringToFramework(ctx, out.Arn) - state.AliasName = flex.StringToFramework(ctx, out.AliasName) - state.TemplateVersionNumber = flex.Int64ToFramework(ctx, out.TemplateVersionNumber) - state.AWSAccountID = flex.StringValueToFramework(ctx, awsAccountID) - state.TemplateID = flex.StringValueToFramework(ctx, templateID) + state.ARN = fwflex.StringToFramework(ctx, out.Arn) + state.AliasName = fwflex.StringToFramework(ctx, out.AliasName) + state.TemplateVersionNumber = fwflex.Int64ToFramework(ctx, out.TemplateVersionNumber) + state.AWSAccountID = fwflex.StringValueToFramework(ctx, awsAccountID) + state.TemplateID = fwflex.StringValueToFramework(ctx, templateID) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } @@ -197,7 +191,7 @@ func (r *templateAliasResource) Update(ctx context.Context, req resource.UpdateR return } - plan.ARN = flex.StringToFramework(ctx, out.TemplateAlias.Arn) + plan.ARN = fwflex.StringToFramework(ctx, out.TemplateAlias.Arn) } resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) diff --git a/internal/service/quicksight/template_tags_gen_test.go b/internal/service/quicksight/template_tags_gen_test.go index 7bc5b4181ef7..64cf566e6068 100644 --- a/internal/service/quicksight/template_tags_gen_test.go +++ b/internal/service/quicksight/template_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightTemplate_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightTemplate_tags_null(t *testing.T) { t.Skip("Resource Template does not support null tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightTemplate_tags_null(t *testing.T) { func TestAccQuickSightTemplate_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightTemplate_tags_EmptyMap(t *testing.T) { func TestAccQuickSightTemplate_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightTemplate_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource Template does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightTemplate_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource Template does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightTemplate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource Template does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightTemplate_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightTemplate_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightTemplate_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightTemplate_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightTemplate_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_updateToProviderOnly(t *testing. func TestAccQuickSightTemplate_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_emptyResourceTag(t *testing.T) { t.Skip("Resource Template does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_emptyProviderOnlyTag(t *testing. t.Skip("Resource Template does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_nullOverlappingResourceTag(t *te t.Skip("Resource Template does not support null tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_nullNonOverlappingResourceTag(t t.Skip("Resource Template does not support null tags") ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightTemplate_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccQuickSightTemplate_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightTemplate_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightTemplate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightTemplate_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightTemplate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightTemplate_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightTemplate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightTemplate_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccQuickSightTemplate_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Template resourceName := "aws_quicksight_template.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckTemplateDestroy(ctx), diff --git a/internal/service/quicksight/testdata/CustomPermissions/tags/main_gen.tf b/internal/service/quicksight/testdata/CustomPermissions/tags/main_gen.tf new file mode 100644 index 000000000000..594dc355d23f --- /dev/null +++ b/internal/service/quicksight/testdata/CustomPermissions/tags/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = var.rName + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } + + tags = var.resource_tags +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/quicksight/testdata/CustomPermissions/tagsComputed1/main_gen.tf b/internal/service/quicksight/testdata/CustomPermissions/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..f53246a3e309 --- /dev/null +++ b/internal/service/quicksight/testdata/CustomPermissions/tagsComputed1/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = var.rName + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/quicksight/testdata/CustomPermissions/tagsComputed2/main_gen.tf b/internal/service/quicksight/testdata/CustomPermissions/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..5e0d566f8895 --- /dev/null +++ b/internal/service/quicksight/testdata/CustomPermissions/tagsComputed2/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = var.rName + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/quicksight/testdata/CustomPermissions/tags_defaults/main_gen.tf b/internal/service/quicksight/testdata/CustomPermissions/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..dcff11bb1132 --- /dev/null +++ b/internal/service/quicksight/testdata/CustomPermissions/tags_defaults/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = var.rName + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } + + tags = var.resource_tags +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/quicksight/testdata/CustomPermissions/tags_ignore/main_gen.tf b/internal/service/quicksight/testdata/CustomPermissions/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..b67889b92a79 --- /dev/null +++ b/internal/service/quicksight/testdata/CustomPermissions/tags_ignore/main_gen.tf @@ -0,0 +1,45 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = var.rName + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } + + tags = var.resource_tags +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/quicksight/testdata/tmpl/custom_permissions_tags.gtpl b/internal/service/quicksight/testdata/tmpl/custom_permissions_tags.gtpl new file mode 100644 index 000000000000..40d53ea6bcde --- /dev/null +++ b/internal/service/quicksight/testdata/tmpl/custom_permissions_tags.gtpl @@ -0,0 +1,9 @@ +resource "aws_quicksight_custom_permissions" "test" { + custom_permissions_name = var.rName + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } +{{- template "tags" . }} +} \ No newline at end of file diff --git a/internal/service/quicksight/theme.go b/internal/service/quicksight/theme.go index 715666cc215d..01f7161a3f4d 100644 --- a/internal/service/quicksight/theme.go +++ b/internal/service/quicksight/theme.go @@ -26,7 +26,6 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -57,13 +56,7 @@ func resourceTheme() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), "base_theme_id": { Type: schema.TypeString, Required: true, diff --git a/internal/service/quicksight/theme_data_source.go b/internal/service/quicksight/theme_data_source.go index 57d04160333a..a61c6b478941 100644 --- a/internal/service/quicksight/theme_data_source.go +++ b/internal/service/quicksight/theme_data_source.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -28,12 +27,7 @@ func dataSourceTheme() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: verify.ValidAccountID, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDDataSourceSchema(), "base_theme_id": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/quicksight/theme_tags_gen_test.go b/internal/service/quicksight/theme_tags_gen_test.go index 9ba494283e15..11aa3808d96d 100644 --- a/internal/service/quicksight/theme_tags_gen_test.go +++ b/internal/service/quicksight/theme_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightTheme_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightTheme_tags_null(t *testing.T) { t.Skip("Resource Theme does not support null tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -270,11 +271,12 @@ func TestAccQuickSightTheme_tags_null(t *testing.T) { func TestAccQuickSightTheme_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -333,11 +335,12 @@ func TestAccQuickSightTheme_tags_EmptyMap(t *testing.T) { func TestAccQuickSightTheme_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -416,11 +419,12 @@ func TestAccQuickSightTheme_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource Theme does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -507,11 +511,12 @@ func TestAccQuickSightTheme_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource Theme does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -646,11 +651,12 @@ func TestAccQuickSightTheme_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { t.Skip("Resource Theme does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -735,11 +741,12 @@ func TestAccQuickSightTheme_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightTheme_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -916,11 +923,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccQuickSightTheme_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1076,11 +1084,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccQuickSightTheme_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1252,11 +1261,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightTheme_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1342,11 +1352,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccQuickSightTheme_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1433,11 +1444,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_emptyResourceTag(t *testing.T) { t.Skip("Resource Theme does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1500,11 +1512,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) t.Skip("Resource Theme does not support empty tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1559,11 +1572,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_nullOverlappingResourceTag(t *testi t.Skip("Resource Theme does not support null tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1623,11 +1637,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_nullNonOverlappingResourceTag(t *te t.Skip("Resource Theme does not support null tags") ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1685,11 +1700,12 @@ func TestAccQuickSightTheme_tags_DefaultTags_nullNonOverlappingResourceTag(t *te func TestAccQuickSightTheme_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1740,11 +1756,12 @@ func TestAccQuickSightTheme_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightTheme_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1837,11 +1854,12 @@ func TestAccQuickSightTheme_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccQuickSightTheme_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -1924,11 +1942,12 @@ func TestAccQuickSightTheme_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccQuickSightTheme_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), @@ -2086,11 +2105,12 @@ func TestAccQuickSightTheme_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccQuickSightTheme_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Theme resourceName := "aws_quicksight_theme.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckThemeDestroy(ctx), diff --git a/internal/service/quicksight/user.go b/internal/service/quicksight/user.go index aa1bcdfe6d9f..468a63b97740 100644 --- a/internal/service/quicksight/user.go +++ b/internal/service/quicksight/user.go @@ -9,7 +9,6 @@ import ( "log" "strings" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/quicksight" awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" @@ -21,14 +20,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - defaultUserNamespace = "default" -) - // @SDKResource("aws_quicksight_user", name="User") func resourceUser() *schema.Resource { return &schema.Resource{ @@ -43,41 +40,24 @@ func resourceUser() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, + names.AttrAWSAccountID: quicksightschema.AWSAccountIDSchema(), names.AttrEmail: { Type: schema.TypeString, Required: true, }, "iam_arn": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, }, "identity_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - // TODO ValidateDiagFunc: enum.Validate[awstypes.IdentityType](), - ValidateFunc: validation.StringInSlice(enum.Slice( - awstypes.IdentityTypeIam, - awstypes.IdentityTypeQuicksight, - ), false), - }, - names.AttrNamespace: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: defaultUserNamespace, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_.-]*$`), "must contain only alphanumeric characters, hyphens, underscores, and periods"), - ), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.IdentityType](), }, + names.AttrNamespace: quicksightschema.NamespaceSchema(), "session_name": { Type: schema.TypeString, Optional: true, @@ -90,21 +70,14 @@ func resourceUser() *schema.Resource { names.AttrUserName: { Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validation.NoZeroValues, }, "user_role": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - // TODO ValidateDiagFunc: enum.Validate[awstypes.UserRole](), - ValidateFunc: validation.StringInSlice(enum.Slice( - awstypes.UserRoleReader, - awstypes.UserRoleAuthor, - awstypes.UserRoleAdmin, - awstypes.UserRoleReaderPro, - awstypes.UserRoleAuthorPro, - awstypes.UserRoleAdminPro, - ), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.UserRole](), }, } }, @@ -121,7 +94,7 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta any) d } email := d.Get(names.AttrEmail).(string) namespace := d.Get(names.AttrNamespace).(string) - input := &quicksight.RegisterUserInput{ + input := quicksight.RegisterUserInput{ AwsAccountId: aws.String(awsAccountID), Email: aws.String(email), IdentityType: awstypes.IdentityType(d.Get("identity_type").(string)), @@ -141,7 +114,7 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta any) d input.UserName = aws.String(v.(string)) } - output, err := conn.RegisterUser(ctx, input) + output, err := conn.RegisterUser(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "registering QuickSight User (%s): %s", email, err) @@ -197,7 +170,7 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendFromErr(diags, err) } - input := &quicksight.UpdateUserInput{ + input := quicksight.UpdateUserInput{ AwsAccountId: aws.String(awsAccountID), Email: aws.String(d.Get(names.AttrEmail).(string)), Namespace: aws.String(namespace), @@ -205,7 +178,7 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta any) d UserName: aws.String(userName), } - _, err = conn.UpdateUser(ctx, input) + _, err = conn.UpdateUser(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating QuickSight User (%s): %s", d.Id(), err) @@ -223,11 +196,13 @@ func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendFromErr(diags, err) } - _, err = conn.DeleteUser(ctx, &quicksight.DeleteUserInput{ + log.Printf("[INFO] Deleting QuickSight User: %s", d.Id()) + input := quicksight.DeleteUserInput{ AwsAccountId: aws.String(awsAccountID), Namespace: aws.String(namespace), UserName: aws.String(userName), - }) + } + _, err = conn.DeleteUser(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags @@ -260,13 +235,13 @@ func userParseResourceID(id string) (string, string, string, error) { } func findUserByThreePartKey(ctx context.Context, conn *quicksight.Client, awsAccountID, namespace, userName string) (*awstypes.User, error) { - input := &quicksight.DescribeUserInput{ + input := quicksight.DescribeUserInput{ AwsAccountId: aws.String(awsAccountID), Namespace: aws.String(namespace), UserName: aws.String(userName), } - return findUser(ctx, conn, input) + return findUser(ctx, conn, &input) } func findUser(ctx context.Context, conn *quicksight.Client, input *quicksight.DescribeUserInput) (*awstypes.User, error) { diff --git a/internal/service/quicksight/user_custom_permission.go b/internal/service/quicksight/user_custom_permission.go new file mode 100644 index 000000000000..da5330485a82 --- /dev/null +++ b/internal/service/quicksight/user_custom_permission.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/quicksight" + awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_quicksight_user_custom_permission", name="User Custom Permission") +func newUserCustomPermissionResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &userCustomPermissionResource{} + + return r, nil +} + +type userCustomPermissionResource struct { + framework.ResourceWithModel[userCustomPermissionResourceModel] +} + +func (r *userCustomPermissionResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + "custom_permissions_name": schema.StringAttribute{ + Required: true, + }, + names.AttrNamespace: quicksightschema.NamespaceAttribute(), + names.AttrUserName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *userCustomPermissionResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data userCustomPermissionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } + + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.UpdateUserCustomPermissionInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateUserCustomPermission(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight User (%s) Custom Permission (%s)", data.UserName.ValueString(), data.CustomPermissionsName.ValueString()), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *userCustomPermissionResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data userCustomPermissionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + output, err := findUserCustomPermissionByThreePartKey(ctx, conn, data.AWSAccountID.ValueString(), data.Namespace.ValueString(), data.UserName.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Quicksight User (%s) Custom Permission", data.UserName.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + data.CustomPermissionsName = fwflex.StringToFramework(ctx, output) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *userCustomPermissionResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old userCustomPermissionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.UpdateUserCustomPermissionInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateUserCustomPermission(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Quicksight User (%s) Custom Permission (%s)", new.UserName.ValueString(), new.CustomPermissionsName.ValueString()), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *userCustomPermissionResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data userCustomPermissionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().QuickSightClient(ctx) + + var input quicksight.DeleteUserCustomPermissionInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.DeleteUserCustomPermission(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Quicksight User (%s) Custom Permission (%s)", data.UserName.ValueString(), data.CustomPermissionsName.ValueString()), err.Error()) + + return + } +} + +func (r *userCustomPermissionResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + userCustomPermissionIDParts = 3 + ) + parts, err := intflex.ExpandResourceId(request.ID, userCustomPermissionIDParts, true) + + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrAWSAccountID), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrNamespace), parts[1])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrUserName), parts[2])...) +} + +func findUserCustomPermissionByThreePartKey(ctx context.Context, conn *quicksight.Client, awsAccountID, namespace, userName string) (*string, error) { + output, err := findUserByThreePartKey(ctx, conn, awsAccountID, namespace, userName) + + if err != nil { + return nil, err + } + + if aws.ToString(output.CustomPermissionsName) == "" { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output.CustomPermissionsName, nil +} + +type userCustomPermissionResourceModel struct { + framework.WithRegionModel + AWSAccountID types.String `tfsdk:"aws_account_id"` + CustomPermissionsName types.String `tfsdk:"custom_permissions_name"` + Namespace types.String `tfsdk:"namespace"` + UserName types.String `tfsdk:"user_name"` +} diff --git a/internal/service/quicksight/user_custom_permission_test.go b/internal/service/quicksight/user_custom_permission_test.go new file mode 100644 index 000000000000..34eecf69ab68 --- /dev/null +++ b/internal/service/quicksight/user_custom_permission_test.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package quicksight_test + +import ( + "context" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfquicksight "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccQuickSightUserCustomPermission_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := "tfacctest" + sdkacctest.RandString(10) + resourceName := "aws_quicksight_user_custom_permission.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserCustomPermissionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserCustomPermissionConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserCustomPermissionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName+"-perm")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(tfquicksight.DefaultNamespace)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserName), knownvalue.StringExact(rName)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccUserCustomPermissionImportStateID(resourceName), + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + }, + }) +} + +func TestAccQuickSightUserCustomPermission_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := "tfacctest" + sdkacctest.RandString(10) + resourceName := "aws_quicksight_user_custom_permission.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserCustomPermissionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserCustomPermissionConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserCustomPermissionExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfquicksight.ResourceUserCustomPermission, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccQuickSightUserCustomPermission_update(t *testing.T) { + ctx := acctest.Context(t) + rName := "tfacctest" + sdkacctest.RandString(10) + resourceName := "aws_quicksight_user_custom_permission.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserCustomPermissionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserCustomPermissionConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserCustomPermissionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName+"-perm")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(tfquicksight.DefaultNamespace)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserName), knownvalue.StringExact(rName)), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccUserCustomPermissionImportStateID(resourceName), + ImportStateVerifyIdentifierAttribute: "custom_permissions_name", + }, + { + Config: testAccUserCustomPermissionConfig_updated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserCustomPermissionExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrAWSAccountID), tfknownvalue.AccountID()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("custom_permissions_name"), knownvalue.StringExact(rName+"-perm2")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrNamespace), knownvalue.StringExact(tfquicksight.DefaultNamespace)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrUserName), knownvalue.StringExact(rName)), + }, + }, + }, + }) +} + +func testAccCheckUserCustomPermissionDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_quicksight_user_custom_permission" { + continue + } + + _, err := tfquicksight.FindUserCustomPermissionByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes[names.AttrNamespace], rs.Primary.Attributes[names.AttrUserName]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("QuickSight User Custom Permission (%s) still exists", rs.Primary.Attributes[names.AttrUserName]) + } + + return nil + } +} + +func testAccCheckUserCustomPermissionExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).QuickSightClient(ctx) + + _, err := tfquicksight.FindUserCustomPermissionByThreePartKey(ctx, conn, rs.Primary.Attributes[names.AttrAWSAccountID], rs.Primary.Attributes[names.AttrNamespace], rs.Primary.Attributes[names.AttrUserName]) + + return err + } +} + +func testAccUserCustomPermissionImportStateID(n string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + return acctest.AttrsImportStateIdFunc(n, ",", names.AttrAWSAccountID, names.AttrNamespace, names.AttrUserName)(s) + } +} + +func testAccUserCustomPermissionConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccCustomPermissionsConfig_basic(rName+"-perm"), fmt.Sprintf(` +resource "aws_quicksight_user" "test" { + user_name = %[1]q + email = %[2]q + identity_type = "QUICKSIGHT" + user_role = "READER" +} + +resource "aws_quicksight_user_custom_permission" "test" { + user_name = aws_quicksight_user.test.user_name + custom_permissions_name = aws_quicksight_custom_permissions.test.custom_permissions_name +} +`, rName, acctest.DefaultEmailAddress)) +} + +func testAccUserCustomPermissionConfig_updated(rName string) string { + return acctest.ConfigCompose(testAccCustomPermissionsConfig_basic(rName+"-perm"), fmt.Sprintf(` +resource "aws_quicksight_custom_permissions" "test2" { + custom_permissions_name = "%[1]s-perm2" + + capabilities { + create_and_update_datasets = "DENY" + create_and_update_data_sources = "DENY" + export_to_pdf = "DENY" + } +} + +resource "aws_quicksight_user" "test" { + user_name = %[1]q + email = %[2]q + identity_type = "QUICKSIGHT" + user_role = "READER" +} + +resource "aws_quicksight_user_custom_permission" "test" { + user_name = aws_quicksight_user.test.user_name + custom_permissions_name = aws_quicksight_custom_permissions.test2.custom_permissions_name +} +`, rName, acctest.DefaultEmailAddress)) +} diff --git a/internal/service/quicksight/user_data_source.go b/internal/service/quicksight/user_data_source.go index 773f4a5562f4..3064a696c004 100644 --- a/internal/service/quicksight/user_data_source.go +++ b/internal/service/quicksight/user_data_source.go @@ -6,12 +6,11 @@ package quicksight import ( "context" - "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -30,9 +29,9 @@ func dataSourceUser() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrAWSAccountID: { + names.AttrAWSAccountID: quicksightschema.AWSAccountIDDataSourceSchema(), + "custom_permissions_name": { Type: schema.TypeString, - Optional: true, Computed: true, }, names.AttrEmail: { @@ -43,15 +42,7 @@ func dataSourceUser() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrNamespace: { - Type: schema.TypeString, - Optional: true, - Default: defaultUserNamespace, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_.-]*$`), "must contain only alphanumeric characters, hyphens, underscores, and periods"), - ), - }, + names.AttrNamespace: quicksightschema.NamespaceDataSourceSchema(), "principal_id": { Type: schema.TypeString, Computed: true, @@ -91,6 +82,7 @@ func dataSourceUserRead(ctx context.Context, d *schema.ResourceData, meta any) d d.Set("active", user.Active) d.Set(names.AttrARN, user.Arn) d.Set(names.AttrAWSAccountID, awsAccountID) + d.Set("custom_permissions_name", user.CustomPermissionsName) d.Set(names.AttrEmail, user.Email) d.Set("identity_type", user.IdentityType) d.Set("principal_id", user.PrincipalId) diff --git a/internal/service/quicksight/user_data_source_test.go b/internal/service/quicksight/user_data_source_test.go index 9a2ecb1f32ea..b9b5ab00c040 100644 --- a/internal/service/quicksight/user_data_source_test.go +++ b/internal/service/quicksight/user_data_source_test.go @@ -29,12 +29,13 @@ func TestAccQuickSightUserDataSource_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccUserDataSourceConfig(rName), - Check: resource.ComposeTestCheckFunc( + Config: testAccUserDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, names.AttrUserName, resourceName, names.AttrUserName), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttr(dataSourceName, "custom_permissions_name", ""), resource.TestCheckResourceAttr(dataSourceName, names.AttrEmail, acctest.DefaultEmailAddress), - resource.TestCheckResourceAttr(dataSourceName, names.AttrNamespace, tfquicksight.DefaultUserNamespace), + resource.TestCheckResourceAttr(dataSourceName, names.AttrNamespace, tfquicksight.DefaultNamespace), resource.TestCheckResourceAttr(dataSourceName, "identity_type", string(awstypes.IdentityTypeQuicksight)), resource.TestCheckResourceAttrSet(dataSourceName, "principal_id"), resource.TestCheckResourceAttr(dataSourceName, "user_role", string(awstypes.UserRoleReader)), @@ -44,7 +45,7 @@ func TestAccQuickSightUserDataSource_basic(t *testing.T) { }) } -func testAccUserDataSourceConfig(rName string) string { +func testAccUserDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_quicksight_user" "test" { user_name = %[1]q diff --git a/internal/service/quicksight/user_test.go b/internal/service/quicksight/user_test.go index 3005af42b657..7438713979ff 100644 --- a/internal/service/quicksight/user_test.go +++ b/internal/service/quicksight/user_test.go @@ -6,7 +6,6 @@ package quicksight_test import ( "context" "fmt" - "os" "testing" awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" @@ -94,12 +93,7 @@ func TestAccQuickSightUser_withInvalidFormattedEmailStillWorks(t *testing.T) { func TestAccQuickSightUser_withNamespace(t *testing.T) { ctx := acctest.Context(t) - key := "QUICKSIGHT_NAMESPACE" - namespace := os.Getenv(key) - if namespace == "" { - t.Skipf("Environment variable %s is not set", key) - } - + namespace := acctest.SkipIfEnvVarNotSet(t, "QUICKSIGHT_NAMESPACE") var user awstypes.User rName := "tfacctest" + sdkacctest.RandString(10) resourceName := "aws_quicksight_user." + rName diff --git a/internal/service/quicksight/vpc_connection.go b/internal/service/quicksight/vpc_connection.go index d540f3ef21f0..11e7610ea0eb 100644 --- a/internal/service/quicksight/vpc_connection.go +++ b/internal/service/quicksight/vpc_connection.go @@ -28,8 +28,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + quicksightschema "github.com/hashicorp/terraform-provider-aws/internal/service/quicksight/schema" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -63,16 +64,9 @@ const ( func (r *vpcConnectionResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - names.AttrARN: framework.ARNAttributeComputedOnly(), - names.AttrAWSAccountID: schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - stringplanmodifier.RequiresReplace(), - }, - }, - names.AttrID: framework.IDAttribute(), + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrAWSAccountID: quicksightschema.AWSAccountIDAttribute(), + names.AttrID: framework.IDAttribute(), "vpc_connection_id": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -155,37 +149,37 @@ func (r *vpcConnectionResource) Schema(ctx context.Context, req resource.SchemaR } func (r *vpcConnectionResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().QuickSightClient(ctx) - - var plan vpcConnectionResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + var data vpcConnectionResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } - - if plan.AWSAccountID.IsUnknown() || plan.AWSAccountID.IsNull() { - plan.AWSAccountID = types.StringValue(r.Meta().AccountID(ctx)) + if data.AWSAccountID.IsUnknown() { + data.AWSAccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) } - awsAccountID, vpcConnectionID := flex.StringValueFromFramework(ctx, plan.AWSAccountID), flex.StringValueFromFramework(ctx, plan.VPCConnectionID) + + conn := r.Meta().QuickSightClient(ctx) + + awsAccountID, vpcConnectionID := fwflex.StringValueFromFramework(ctx, data.AWSAccountID), fwflex.StringValueFromFramework(ctx, data.VPCConnectionID) in := &quicksight.CreateVPCConnectionInput{ AwsAccountId: aws.String(awsAccountID), - Name: plan.Name.ValueStringPointer(), - RoleArn: plan.RoleArn.ValueStringPointer(), - SecurityGroupIds: flex.ExpandFrameworkStringValueSet(ctx, plan.SecurityGroupIDs), - SubnetIds: flex.ExpandFrameworkStringValueSet(ctx, plan.SubnetIDs), + Name: data.Name.ValueStringPointer(), + RoleArn: data.RoleArn.ValueStringPointer(), + SecurityGroupIds: fwflex.ExpandFrameworkStringValueSet(ctx, data.SecurityGroupIDs), + SubnetIds: fwflex.ExpandFrameworkStringValueSet(ctx, data.SubnetIDs), Tags: getTagsIn(ctx), VPCConnectionId: aws.String(vpcConnectionID), } - if !plan.DNSResolvers.IsNull() { - in.DnsResolvers = flex.ExpandFrameworkStringValueSet(ctx, plan.DNSResolvers) + if !data.DNSResolvers.IsNull() { + in.DnsResolvers = fwflex.ExpandFrameworkStringValueSet(ctx, data.DNSResolvers) } // account for IAM propagation when attempting to assume role out, err := retryVPCConnectionCreate(ctx, conn, in) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameVPCConnection, plan.Name.String(), err), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameVPCConnection, data.Name.String(), err), err.Error(), ) return @@ -193,27 +187,27 @@ func (r *vpcConnectionResource) Create(ctx context.Context, req resource.CreateR if out == nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameVPCConnection, plan.Name.String(), nil), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionCreating, resNameVPCConnection, data.Name.String(), nil), errors.New("empty output").Error(), ) return } - plan.ID = flex.StringValueToFramework(ctx, vpcConnectionCreateResourceID(awsAccountID, vpcConnectionID)) + data.ID = fwflex.StringValueToFramework(ctx, vpcConnectionCreateResourceID(awsAccountID, vpcConnectionID)) - waitOut, err := waitVPCConnectionCreated(ctx, conn, awsAccountID, vpcConnectionID, r.CreateTimeout(ctx, plan.Timeouts)) + waitOut, err := waitVPCConnectionCreated(ctx, conn, awsAccountID, vpcConnectionID, r.CreateTimeout(ctx, data.Timeouts)) if err != nil { resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.QuickSight, create.ErrActionWaitingForCreation, resNameVPCConnection, plan.Name.String(), err), + create.ProblemStandardMessage(names.QuickSight, create.ErrActionWaitingForCreation, resNameVPCConnection, data.Name.String(), err), err.Error(), ) return } - plan.ARN = flex.StringToFramework(ctx, waitOut.Arn) - plan.AvailabilityStatus = flex.StringValueToFramework(ctx, waitOut.AvailabilityStatus) + data.ARN = fwflex.StringToFramework(ctx, waitOut.Arn) + data.AvailabilityStatus = fwflex.StringValueToFramework(ctx, waitOut.AvailabilityStatus) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) } func (r *vpcConnectionResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -247,15 +241,15 @@ func (r *vpcConnectionResource) Read(ctx context.Context, req resource.ReadReque return } - state.AWSAccountID = flex.StringValueToFramework(ctx, awsAccountID) - state.VPCConnectionID = flex.StringValueToFramework(ctx, vpcConnectionID) - state.ARN = flex.StringToFramework(ctx, out.Arn) - state.Name = flex.StringToFramework(ctx, out.Name) - state.RoleArn = flex.StringToFramework(ctx, out.RoleArn) - state.SecurityGroupIDs = flex.FlattenFrameworkStringValueSetOfString(ctx, out.SecurityGroupIds) - state.DNSResolvers = flex.FlattenFrameworkStringValueSetOfString(ctx, out.DnsResolvers) - state.AvailabilityStatus = flex.StringValueToFramework(ctx, out.AvailabilityStatus) - state.SubnetIDs = flex.FlattenFrameworkStringValueSetOfString(ctx, tfslices.ApplyToAll(out.NetworkInterfaces, func(v awstypes.NetworkInterface) string { + state.AWSAccountID = fwflex.StringValueToFramework(ctx, awsAccountID) + state.VPCConnectionID = fwflex.StringValueToFramework(ctx, vpcConnectionID) + state.ARN = fwflex.StringToFramework(ctx, out.Arn) + state.Name = fwflex.StringToFramework(ctx, out.Name) + state.RoleArn = fwflex.StringToFramework(ctx, out.RoleArn) + state.SecurityGroupIDs = fwflex.FlattenFrameworkStringValueSetOfString(ctx, out.SecurityGroupIds) + state.DNSResolvers = fwflex.FlattenFrameworkStringValueSetOfString(ctx, out.DnsResolvers) + state.AvailabilityStatus = fwflex.StringValueToFramework(ctx, out.AvailabilityStatus) + state.SubnetIDs = fwflex.FlattenFrameworkStringValueSetOfString(ctx, tfslices.ApplyToAll(out.NetworkInterfaces, func(v awstypes.NetworkInterface) string { return aws.ToString(v.SubnetId) })) @@ -290,13 +284,13 @@ func (r *vpcConnectionResource) Update(ctx context.Context, req resource.UpdateR AwsAccountId: aws.String(awsAccountID), Name: plan.Name.ValueStringPointer(), RoleArn: plan.RoleArn.ValueStringPointer(), - SecurityGroupIds: flex.ExpandFrameworkStringValueSet(ctx, plan.SecurityGroupIDs), - SubnetIds: flex.ExpandFrameworkStringValueSet(ctx, plan.SubnetIDs), + SecurityGroupIds: fwflex.ExpandFrameworkStringValueSet(ctx, plan.SecurityGroupIDs), + SubnetIds: fwflex.ExpandFrameworkStringValueSet(ctx, plan.SubnetIDs), VPCConnectionId: aws.String(vpcConnectionID), } if !plan.DNSResolvers.IsNull() { - in.DnsResolvers = flex.ExpandFrameworkStringValueSet(ctx, plan.DNSResolvers) + in.DnsResolvers = fwflex.ExpandFrameworkStringValueSet(ctx, plan.DNSResolvers) } out, err := conn.UpdateVPCConnection(ctx, &in) @@ -427,7 +421,7 @@ func findVPCConnection(ctx context.Context, conn *quicksight.Client, input *quic func retryVPCConnectionCreate(ctx context.Context, conn *quicksight.Client, in *quicksight.CreateVPCConnectionInput) (*quicksight.CreateVPCConnectionOutput, error) { outputRaw, err := tfresource.RetryWhen(ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateVPCConnection(ctx, in) }, func(err error) (bool, error) { diff --git a/internal/service/quicksight/vpc_connection_tags_gen_test.go b/internal/service/quicksight/vpc_connection_tags_gen_test.go index dddef8615e3d..8db5e4f0b754 100644 --- a/internal/service/quicksight/vpc_connection_tags_gen_test.go +++ b/internal/service/quicksight/vpc_connection_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/quicksight/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccQuickSightVPCConnection_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -203,11 +203,12 @@ func TestAccQuickSightVPCConnection_tags_null(t *testing.T) { t.Skip("Resource VPCConnection does not support null tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -265,11 +266,12 @@ func TestAccQuickSightVPCConnection_tags_null(t *testing.T) { func TestAccQuickSightVPCConnection_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -315,11 +317,12 @@ func TestAccQuickSightVPCConnection_tags_EmptyMap(t *testing.T) { func TestAccQuickSightVPCConnection_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -397,11 +400,12 @@ func TestAccQuickSightVPCConnection_tags_EmptyTag_OnCreate(t *testing.T) { t.Skip("Resource VPCConnection does not support empty tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -489,11 +493,12 @@ func TestAccQuickSightVPCConnection_tags_EmptyTag_OnUpdate_Add(t *testing.T) { t.Skip("Resource VPCConnection does not support empty tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -630,11 +635,12 @@ func TestAccQuickSightVPCConnection_tags_EmptyTag_OnUpdate_Replace(t *testing.T) t.Skip("Resource VPCConnection does not support empty tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -720,11 +726,12 @@ func TestAccQuickSightVPCConnection_tags_EmptyTag_OnUpdate_Replace(t *testing.T) func TestAccQuickSightVPCConnection_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -901,11 +908,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_providerOnly(t *testing.T) func TestAccQuickSightVPCConnection_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1061,11 +1069,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccQuickSightVPCConnection_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1237,11 +1246,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_overlapping(t *testing.T) { func TestAccQuickSightVPCConnection_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1327,11 +1337,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_updateToProviderOnly(t *tes func TestAccQuickSightVPCConnection_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1418,11 +1429,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_emptyResourceTag(t *testing t.Skip("Resource VPCConnection does not support empty tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1486,11 +1498,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_emptyProviderOnlyTag(t *tes t.Skip("Resource VPCConnection does not support empty tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1546,11 +1559,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_nullOverlappingResourceTag( t.Skip("Resource VPCConnection does not support null tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1617,11 +1631,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_nullNonOverlappingResourceT t.Skip("Resource VPCConnection does not support null tags") ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1688,11 +1703,12 @@ func TestAccQuickSightVPCConnection_tags_DefaultTags_nullNonOverlappingResourceT func TestAccQuickSightVPCConnection_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1743,11 +1759,12 @@ func TestAccQuickSightVPCConnection_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccQuickSightVPCConnection_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1840,11 +1857,12 @@ func TestAccQuickSightVPCConnection_tags_ComputedTag_OnUpdate_Add(t *testing.T) func TestAccQuickSightVPCConnection_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -1927,11 +1945,12 @@ func TestAccQuickSightVPCConnection_tags_ComputedTag_OnUpdate_Replace(t *testing func TestAccQuickSightVPCConnection_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), @@ -2089,11 +2108,12 @@ func TestAccQuickSightVPCConnection_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccQuickSightVPCConnection_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.VPCConnection resourceName := "aws_quicksight_vpc_connection.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.QuickSightServiceID), CheckDestroy: testAccCheckVPCConnectionDestroy(ctx), diff --git a/internal/service/ram/resource_share.go b/internal/service/ram/resource_share.go index b7912c1d6934..0c2f87890ae4 100644 --- a/internal/service/ram/resource_share.go +++ b/internal/service/ram/resource_share.go @@ -102,7 +102,7 @@ func resourceResourceShareCreate(ctx context.Context, d *schema.ResourceData, me d.SetId(aws.ToString(output.ResourceShare.ResourceShareArn)) - _, err = tfresource.RetryWhenNotFound(ctx, resourceSharePropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, resourceSharePropagationTimeout, func(ctx context.Context) (any, error) { return findResourceShareOwnerSelfByARN(ctx, conn, d.Id()) }) diff --git a/internal/service/ram/resource_share_accepter.go b/internal/service/ram/resource_share_accepter.go index 84a0e49a6a4a..7c83d51936b1 100644 --- a/internal/service/ram/resource_share_accepter.go +++ b/internal/service/ram/resource_share_accepter.go @@ -132,7 +132,7 @@ func resourceResourceShareAccepterCreate(ctx context.Context, d *schema.Resource return sdkdiag.AppendErrorf(diags, "waiting for RAM Resource Share (%s) invitation (%s) accept: %s", shareARN, invitationARN, err) } - _, err = tfresource.RetryWhenNotFound(ctx, resourceSharePropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, resourceSharePropagationTimeout, func(ctx context.Context) (any, error) { return findResourceShareOwnerOtherAccountsByARN(ctx, conn, d.Id()) }) @@ -294,25 +294,21 @@ func findMaybeResourceShareInvitationRetry(ctx context.Context, conn *ram.Client // Retry for RAM resource share invitation eventual consistency. errNotFound := errors.New("not found") var output option.Option[awstypes.ResourceShareInvitation] - err := tfresource.Retry(ctx, resourceShareInvitationPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, resourceShareInvitationPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = findMaybeResourceShareInvitation(ctx, conn, input, filter) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if output.IsNone() { - return retry.RetryableError(errNotFound) + return tfresource.RetryableError(errNotFound) } return nil }) - if tfresource.TimedOut(err) { - output, err = findMaybeResourceShareInvitation(ctx, conn, input, filter) - } - if errors.Is(err, errNotFound) { output, err = option.None[awstypes.ResourceShareInvitation](), nil } diff --git a/internal/service/ram/service_endpoint_resolver_gen.go b/internal/service/ram/service_endpoint_resolver_gen.go index 959f8113a33e..56fe3c78e527 100644 --- a/internal/service/ram/service_endpoint_resolver_gen.go +++ b/internal/service/ram/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ram.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ram endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ram endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ram/service_endpoints_gen_test.go b/internal/service/ram/service_endpoints_gen_test.go index e8b3eb7bd383..bd45bfd197b3 100644 --- a/internal/service/ram/service_endpoints_gen_test.go +++ b/internal/service/ram/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ram/service_package_gen.go b/internal/service/ram/service_package_gen.go index 47ce79fa1702..5e341532fdc4 100644 --- a/internal/service/ram/service_package_gen.go +++ b/internal/service/ram/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ram" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -99,7 +98,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ram.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ram/sweep.go b/internal/service/ram/sweep.go index 2aaf62d8ac32..a87872806aae 100644 --- a/internal/service/ram/sweep.go +++ b/internal/service/ram/sweep.go @@ -26,7 +26,7 @@ func sweepResourceShares(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RAMClient(ctx) input := &ram.GetResourceSharesInput{ diff --git a/internal/service/ram/tags_gen.go b/internal/service/ram/tags_gen.go index e304633387b6..bab7a1628c1e 100644 --- a/internal/service/ram/tags_gen.go +++ b/internal/service/ram/tags_gen.go @@ -3,8 +3,8 @@ package ram import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ram" awstypes "github.com/aws/aws-sdk-go-v2/service/ram/types" @@ -84,7 +84,7 @@ func updateTags(ctx context.Context, conn *ram.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *ram.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/rbin/rule.go b/internal/service/rbin/rule.go index 1ee7a642f1f5..3c586c1d5aeb 100644 --- a/internal/service/rbin/rule.go +++ b/internal/service/rbin/rule.go @@ -12,13 +12,13 @@ import ( "github.com/aws/aws-sdk-go-v2/service/rbin" "github.com/aws/aws-sdk-go-v2/service/rbin/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -213,7 +213,7 @@ func resourceRuleRead(ctx context.Context, d *schema.ResourceData, meta any) dia output, err := findRuleByID(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && retry.NotFound(err) { log.Printf("[WARN] RBin Rule (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -322,8 +322,7 @@ func findRuleByID(ctx context.Context, conn *rbin.Client, id string) (*rbin.GetR if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -338,8 +337,8 @@ func findRuleByID(ctx context.Context, conn *rbin.Client, id string) (*rbin.GetR return output, nil } -func statusRule(ctx context.Context, conn *rbin.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusRule(conn *rbin.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findRuleByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -358,7 +357,7 @@ func waitRuleCreated(ctx context.Context, conn *rbin.Client, id string, timeout stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.RuleStatusPending), Target: enum.Slice(types.RuleStatusAvailable), - Refresh: statusRule(ctx, conn, id), + Refresh: statusRule(conn, id), Timeout: timeout, ContinuousTargetOccurence: 2, } @@ -376,7 +375,7 @@ func waitRuleUpdated(ctx context.Context, conn *rbin.Client, id string, timeout stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.RuleStatusPending), Target: enum.Slice(types.RuleStatusAvailable), - Refresh: statusRule(ctx, conn, id), + Refresh: statusRule(conn, id), Timeout: timeout, ContinuousTargetOccurence: 2, } @@ -394,7 +393,7 @@ func waitRuleDeleted(ctx context.Context, conn *rbin.Client, id string, timeout stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.RuleStatusPending, types.RuleStatusAvailable), Target: []string{}, - Refresh: statusRule(ctx, conn, id), + Refresh: statusRule(conn, id), Timeout: timeout, } diff --git a/internal/service/rbin/rule_test.go b/internal/service/rbin/rule_test.go index 1ecb10c54782..23aedf7d7eec 100644 --- a/internal/service/rbin/rule_test.go +++ b/internal/service/rbin/rule_test.go @@ -12,9 +12,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfrbin "github.com/hashicorp/terraform-provider-aws/internal/service/rbin" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -25,19 +24,19 @@ func TestAccRBinRule_basic(t *testing.T) { resourceType := "EBS_SNAPSHOT" resourceName := "aws_rbin_rule.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, rbin.ServiceID) }, ErrorCheck: acctest.ErrorCheck(t, rbin.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckRuleDestroy(ctx), + CheckDestroy: testAccCheckRuleDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccRuleConfig_basic1(description, resourceType), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, description), resource.TestCheckResourceAttr(resourceName, "exclude_resource_tags.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrResourceType, resourceType), @@ -59,7 +58,7 @@ func TestAccRBinRule_basic(t *testing.T) { { Config: testAccRuleConfig_basic2(description, resourceType), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, description), resource.TestCheckResourceAttr(resourceName, "exclude_resource_tags.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrResourceType, resourceType), @@ -93,19 +92,19 @@ func TestAccRBinRule_disappears(t *testing.T) { resourceType := "EBS_SNAPSHOT" resourceName := "aws_rbin_rule.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, rbin.ServiceID) }, ErrorCheck: acctest.ErrorCheck(t, rbin.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckRuleDestroy(ctx), + CheckDestroy: testAccCheckRuleDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccRuleConfig_basic1(description, resourceType), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rbinrule), + testAccCheckRuleExists(ctx, t, resourceName, &rbinrule), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfrbin.ResourceRule(), resourceName), ), ExpectNonEmptyPlan: true, @@ -121,19 +120,19 @@ func TestAccRBinRule_excludeResourceTags(t *testing.T) { resourceType := "EBS_SNAPSHOT" resourceName := "aws_rbin_rule.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, rbin.ServiceID) }, ErrorCheck: acctest.ErrorCheck(t, rbin.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckRuleDestroy(ctx), + CheckDestroy: testAccCheckRuleDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccRuleConfig_excludeResourceTags1(description, resourceType), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, description), resource.TestCheckResourceAttr(resourceName, names.AttrResourceType, resourceType), @@ -157,7 +156,7 @@ func TestAccRBinRule_excludeResourceTags(t *testing.T) { { Config: testAccRuleConfig_excludeResourceTags2(description, resourceType), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, description), resource.TestCheckResourceAttr(resourceName, names.AttrResourceType, resourceType), resource.TestCheckResourceAttr(resourceName, "exclude_resource_tags.#", "2"), @@ -184,7 +183,7 @@ func TestAccRBinRule_excludeResourceTags(t *testing.T) { { Config: testAccRuleConfig_basic1(description, resourceType), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, description), resource.TestCheckResourceAttr(resourceName, "exclude_resource_tags.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrResourceType, resourceType), @@ -208,19 +207,19 @@ func TestAccRBinRule_lockConfig(t *testing.T) { resourceType := "EBS_SNAPSHOT" resourceName := "aws_rbin_rule.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, rbin.ServiceID) }, ErrorCheck: acctest.ErrorCheck(t, rbin.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckRuleDestroy(ctx), + CheckDestroy: testAccCheckRuleDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccRuleConfig_lockConfig(resourceType, "DAYS", "7"), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, "lock_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "lock_configuration.0.unlock_delay.#", "1"), resource.TestCheckResourceAttr(resourceName, "lock_configuration.0.unlock_delay.0.unlock_delay_unit", "DAYS"), @@ -237,19 +236,19 @@ func TestAccRBinRule_tags(t *testing.T) { resourceType := "EBS_SNAPSHOT" resourceName := "aws_rbin_rule.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.RBin) }, ErrorCheck: acctest.ErrorCheck(t, names.RBin), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckRuleDestroy(ctx), + CheckDestroy: testAccCheckRuleDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccRuleConfig_tags1(resourceType, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -262,7 +261,7 @@ func TestAccRBinRule_tags(t *testing.T) { { Config: testAccRuleConfig_tags2(resourceType, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -271,7 +270,7 @@ func TestAccRBinRule_tags(t *testing.T) { { Config: testAccRuleConfig_tags1(resourceType, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckRuleExists(ctx, resourceName, &rule), + testAccCheckRuleExists(ctx, t, resourceName, &rule), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -280,9 +279,9 @@ func TestAccRBinRule_tags(t *testing.T) { }) } -func testAccCheckRuleDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckRuleDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RBinClient(ctx) + conn := acctest.ProviderMeta(ctx, t).RBinClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_rbin_rule" { @@ -291,7 +290,7 @@ func testAccCheckRuleDestroy(ctx context.Context) resource.TestCheckFunc { _, err := tfrbin.FindRuleByID(ctx, conn, rs.Primary.ID) - if tfresource.NotFound(err) { + if retry.NotFound(err) { continue } @@ -306,14 +305,14 @@ func testAccCheckRuleDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckRuleExists(ctx context.Context, n string, v *rbin.GetRuleOutput) resource.TestCheckFunc { +func testAccCheckRuleExists(ctx context.Context, t *testing.T, n string, v *rbin.GetRuleOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).RBinClient(ctx) + conn := acctest.ProviderMeta(ctx, t).RBinClient(ctx) output, err := tfrbin.FindRuleByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/rbin/service_endpoint_resolver_gen.go b/internal/service/rbin/service_endpoint_resolver_gen.go index 813297889ba5..cf2aee5e7f23 100644 --- a/internal/service/rbin/service_endpoint_resolver_gen.go +++ b/internal/service/rbin/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params rbin.EndpointPar }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up rbin endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up rbin endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/rbin/service_endpoints_gen_test.go b/internal/service/rbin/service_endpoints_gen_test.go index 032e7db77dd7..6060e2b61db9 100644 --- a/internal/service/rbin/service_endpoints_gen_test.go +++ b/internal/service/rbin/service_endpoints_gen_test.go @@ -604,7 +604,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/rbin/service_package_gen.go b/internal/service/rbin/service_package_gen.go index 2aeb517d7211..925c1842bacf 100644 --- a/internal/service/rbin/service_package_gen.go +++ b/internal/service/rbin/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/rbin" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *rbin.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/rbin/tags_gen.go b/internal/service/rbin/tags_gen.go index aec1cfc10537..2dd0addb93a3 100644 --- a/internal/service/rbin/tags_gen.go +++ b/internal/service/rbin/tags_gen.go @@ -3,8 +3,8 @@ package rbin import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rbin" awstypes "github.com/aws/aws-sdk-go-v2/service/rbin/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *rbin.Client, identifier string, optFns output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).RBinClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *rbin.Client, identifier string, oldTa _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *rbin.Client, identifier string, oldTa _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/rds/blue_green.go b/internal/service/rds/blue_green.go index 275f6cf19e66..9d53d5660f10 100644 --- a/internal/service/rds/blue_green.go +++ b/internal/service/rds/blue_green.go @@ -49,7 +49,7 @@ func (o *blueGreenOrchestrator) CleanUp(ctx context.Context) { func (o *blueGreenOrchestrator) CreateDeployment(ctx context.Context, input *rds.CreateBlueGreenDeploymentInput) (*types.BlueGreenDeployment, error) { createOut, err := o.conn.CreateBlueGreenDeployment(ctx, input) if err != nil { - return nil, fmt.Errorf("creating Blue/Green Deployment: %s", err) + return nil, fmt.Errorf("creating Blue/Green Deployment: %w", err) } dep := createOut.BlueGreenDeployment return dep, nil @@ -58,7 +58,7 @@ func (o *blueGreenOrchestrator) CreateDeployment(ctx context.Context, input *rds func (o *blueGreenOrchestrator) waitForDeploymentAvailable(ctx context.Context, identifier string, timeout time.Duration) (*types.BlueGreenDeployment, error) { dep, err := waitBlueGreenDeploymentAvailable(ctx, o.conn, identifier, timeout) if err != nil { - return nil, fmt.Errorf("creating Blue/Green Deployment: %s", err) + return nil, fmt.Errorf("creating Blue/Green Deployment: %w", err) } return dep, nil } @@ -68,7 +68,7 @@ func (o *blueGreenOrchestrator) Switchover(ctx context.Context, identifier strin BlueGreenDeploymentIdentifier: aws.String(identifier), } _, err := tfresource.RetryWhen(ctx, 10*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return o.conn.SwitchoverBlueGreenDeployment(ctx, input) }, func(err error) (bool, error) { @@ -76,12 +76,12 @@ func (o *blueGreenOrchestrator) Switchover(ctx context.Context, identifier strin }, ) if err != nil { - return nil, fmt.Errorf("switching over Blue/Green Deployment: %s", err) + return nil, fmt.Errorf("switching over Blue/Green Deployment: %w", err) } dep, err := waitBlueGreenDeploymentSwitchoverCompleted(ctx, o.conn, identifier, timeout) if err != nil { - return nil, fmt.Errorf("switching over Blue/Green Deployment: waiting for completion: %s", err) + return nil, fmt.Errorf("switching over Blue/Green Deployment: waiting for completion: %w", err) } return dep, nil } @@ -122,7 +122,7 @@ func (h *instanceHandler) precondition(ctx context.Context, d *schema.ResourceDa if needsPreConditions { err := dbInstanceModify(ctx, h.conn, d.Id(), input, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("setting pre-conditions: %s", err) + return fmt.Errorf("setting pre-conditions: %w", err) } } return nil @@ -160,7 +160,7 @@ func (h *instanceHandler) modifyTarget(ctx context.Context, identifier string, d err := dbInstanceModify(ctx, h.conn, d.Id(), modifyInput, timeout) if err != nil { - return fmt.Errorf("updating Green environment: %s", err) + return fmt.Errorf("updating Green environment: %w", err) } } diff --git a/internal/service/rds/certificate_identity_gen_test.go b/internal/service/rds/certificate_identity_gen_test.go index 467ca0279c38..0b619fd409db 100644 --- a/internal/service/rds/certificate_identity_gen_test.go +++ b/internal/service/rds/certificate_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccRDSCertificate_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccRDSCertificate_Identity_Basic, - "ExistingResource": testAccRDSCertificate_Identity_ExistingResource, - "RegionOverride": testAccRDSCertificate_Identity_RegionOverride, + acctest.CtBasic: testAccRDSCertificate_Identity_Basic, + "ExistingResource": testAccRDSCertificate_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccRDSCertificate_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccRDSCertificate_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -37,7 +39,7 @@ func testAccRDSCertificate_Identity_Basic(t *testing.T) { var v awstypes.Certificate resourceName := "aws_rds_certificate.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -111,7 +113,7 @@ func testAccRDSCertificate_Identity_RegionOverride(t *testing.T) { resourceName := "aws_rds_certificate.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -215,3 +217,120 @@ func testAccRDSCertificate_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccRDSCertificate_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Certificate + resourceName := "aws_rds_certificate.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + CheckDestroy: testAccCheckCertificateDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccRDSCertificate_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Certificate + resourceName := "aws_rds_certificate.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + CheckDestroy: testAccCheckCertificateDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Certificate/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/rds/certificate_test.go b/internal/service/rds/certificate_test.go index 65be008d08bf..7c9f7f9e6191 100644 --- a/internal/service/rds/certificate_test.go +++ b/internal/service/rds/certificate_test.go @@ -10,14 +10,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfrds "github.com/hashicorp/terraform-provider-aws/internal/service/rds" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -256,75 +250,6 @@ func testAccCheckCertificateExists(ctx context.Context, n string, v *types.Certi } } -func testAccRDSCertificate_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_rds_certificate.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), - CheckDestroy: testAccCheckCertificateDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccCertificateConfig_basic("rds-ca-rsa4096-g1"), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccCertificateConfig_basic("rds-ca-rsa4096-g1"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccCertificateConfig_basic("rds-ca-rsa4096-g1"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccCertificateConfig_basic(certificateID string) string { return fmt.Sprintf(` resource "aws_rds_certificate" "test" { diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index d69681362bd6..74d2effbef6b 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -852,7 +852,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RestoreDBClusterFromSnapshot(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -980,7 +980,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any } _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RestoreDBClusterFromS3(ctx, input) }, func(err error) (bool, error) { @@ -1179,7 +1179,14 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any } if v := d.Get("database_insights_mode"); v.(string) != "" { - input.DatabaseInsightsMode = types.DatabaseInsightsMode(v.(string)) + // If the cluster is part of a global cluster, defer Database Insights settings + // to the modifyDbClusterInput to prevent them from being reset. + if _, ok := d.GetOk("global_cluster_identifier"); ok { + modifyDbClusterInput.DatabaseInsightsMode = types.DatabaseInsightsMode(v.(string)) + requiresModifyDbCluster = true + } else { + input.DatabaseInsightsMode = types.DatabaseInsightsMode(v.(string)) + } } if v := d.Get(names.AttrDatabaseName); v.(string) != "" { @@ -1361,7 +1368,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDBCluster(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -1615,6 +1622,10 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any if d.HasChange("database_insights_mode") { input.DatabaseInsightsMode = types.DatabaseInsightsMode(d.Get("database_insights_mode").(string)) input.EnablePerformanceInsights = aws.Bool(d.Get("performance_insights_enabled").(bool)) + if v, ok := d.Get("performance_insights_kms_key_id").(string); ok && v != "" { + input.PerformanceInsightsKMSKeyId = aws.String(v) + } + input.PerformanceInsightsRetentionPeriod = aws.Int32(int32(d.Get("performance_insights_retention_period").(int))) } if d.HasChange("db_cluster_instance_class") { @@ -1777,7 +1788,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any timeout = 5 * time.Minute ) _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBCluster(ctx, input) }, func(err error) (bool, error) { @@ -1901,14 +1912,14 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any timeout = 2 * time.Minute ) _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteDBCluster(ctx, input) }, func(err error) (bool, error) { if tfawserr.ErrMessageContains(err, errCodeInvalidParameterCombination, "disable deletion pro") { if v, ok := d.GetOk(names.AttrDeletionProtection); (!ok || !v.(bool)) && d.Get(names.AttrApplyImmediately).(bool) { _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBCluster(ctx, &rds.ModifyDBClusterInput{ ApplyImmediately: aws.Bool(true), DBClusterIdentifier: aws.String(d.Id()), @@ -1929,11 +1940,11 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any ) if err != nil { - return false, fmt.Errorf("modifying RDS Cluster (%s) DeletionProtection=false: %s", d.Id(), err) + return false, fmt.Errorf("modifying RDS Cluster (%s) DeletionProtection=false: %w", d.Id(), err) } if _, err := waitDBClusterUpdated(ctx, conn, d.Id(), false, d.Timeout(schema.TimeoutDelete)); err != nil { - return false, fmt.Errorf("waiting for RDS Cluster (%s) update: %s", d.Id(), err) + return false, fmt.Errorf("waiting for RDS Cluster (%s) update: %w", d.Id(), err) } } diff --git a/internal/service/rds/cluster_instance.go b/internal/service/rds/cluster_instance.go index 055b4fd12fab..ae518f1d01d7 100644 --- a/internal/service/rds/cluster_instance.go +++ b/internal/service/rds/cluster_instance.go @@ -15,13 +15,13 @@ import ( "github.com/aws/aws-sdk-go-v2/service/rds" "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -314,7 +314,7 @@ func resourceClusterInstanceCreate(ctx context.Context, d *schema.ResourceData, } outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDBInstance(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -508,7 +508,7 @@ func resourceClusterInstanceUpdate(ctx context.Context, d *schema.ResourceData, } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBInstance(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -540,8 +540,8 @@ func resourceClusterInstanceDelete(ctx context.Context, d *schema.ResourceData, } log.Printf("[DEBUG] Deleting RDS Cluster Instance: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidDBClusterStateFault](ctx, d.Timeout(schema.TimeoutDelete), - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidDBClusterStateFault](ctx, d.Timeout(schema.TimeoutDelete), + func(ctx context.Context) (any, error) { return conn.DeleteDBInstance(ctx, input) }, "Delete the replica cluster before deleting") @@ -593,7 +593,7 @@ func waitDBClusterInstanceAvailable(ctx context.Context, conn *rds.Client, id st instanceStatusUpgrading, }, Target: []string{instanceStatusAvailable, instanceStatusStorageOptimization}, - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, @@ -617,7 +617,7 @@ func waitDBClusterInstanceDeleted(ctx context.Context, conn *rds.Client, id stri instanceStatusModifying, }, Target: []string{}, - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, diff --git a/internal/service/rds/cluster_parameter_group.go b/internal/service/rds/cluster_parameter_group.go index 2b67e7a4c275..4c55067d0872 100644 --- a/internal/service/rds/cluster_parameter_group.go +++ b/internal/service/rds/cluster_parameter_group.go @@ -243,7 +243,7 @@ func resourceClusterParameterGroupUpdate(ctx context.Context, d *schema.Resource const ( timeout = 3 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidDBParameterGroupStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidDBParameterGroupStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.ResetDBClusterParameterGroup(ctx, &input) }, "has pending changes") @@ -267,7 +267,7 @@ func resourceClusterParameterGroupDelete(ctx context.Context, d *schema.Resource input := rds.DeleteDBClusterParameterGroupInput{ DBClusterParameterGroupName: aws.String(d.Id()), } - _, err := tfresource.RetryWhenIsA[*types.InvalidDBParameterGroupStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.InvalidDBParameterGroupStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteDBClusterParameterGroup(ctx, &input) }) diff --git a/internal/service/rds/cluster_role_association.go b/internal/service/rds/cluster_role_association.go index 0189b239c24f..883416c3e982 100644 --- a/internal/service/rds/cluster_role_association.go +++ b/internal/service/rds/cluster_role_association.go @@ -50,7 +50,7 @@ func resourceClusterRoleAssociation() *schema.Resource { }, "feature_name": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, }, names.AttrRoleARN: { @@ -72,16 +72,19 @@ func resourceClusterRoleAssociationCreate(ctx context.Context, d *schema.Resourc id := clusterRoleAssociationCreateResourceID(dbClusterID, roleARN) input := rds.AddRoleToDBClusterInput{ DBClusterIdentifier: aws.String(dbClusterID), - FeatureName: aws.String(d.Get("feature_name").(string)), RoleArn: aws.String(roleARN), } - _, err := tfresource.RetryWhenIsA[*types.InvalidDBClusterStateFault](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + if v, ok := d.GetOk("feature_name"); ok { + input.FeatureName = aws.String(v.(string)) + } + + _, err := tfresource.RetryWhenIsA[any, *types.InvalidDBClusterStateFault](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.AddRoleToDBCluster(ctx, &input) }) if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, errIAMRolePropagationMessage) { - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.AddRoleToDBCluster(ctx, &input) }, errCodeInvalidParameterValue, errIAMRolePropagationMessage) } @@ -142,7 +145,7 @@ func resourceClusterRoleAssociationDelete(ctx context.Context, d *schema.Resourc FeatureName: aws.String(d.Get("feature_name").(string)), RoleArn: aws.String(roleARN), } - _, err = tfresource.RetryWhenIsA[*types.InvalidDBClusterStateFault](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *types.InvalidDBClusterStateFault](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.RemoveRoleFromDBCluster(ctx, &input) }) diff --git a/internal/service/rds/cluster_role_association_test.go b/internal/service/rds/cluster_role_association_test.go index 17b812790963..cc7958e5099e 100644 --- a/internal/service/rds/cluster_role_association_test.go +++ b/internal/service/rds/cluster_role_association_test.go @@ -52,6 +52,38 @@ func TestAccRDSClusterRoleAssociation_basic(t *testing.T) { }) } +func TestAccRDSClusterRoleAssociation_mysqlWithoutFeatureName(t *testing.T) { + ctx := acctest.Context(t) + var dbClusterRole types.DBClusterRole + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dbClusterResourceName := "aws_rds_cluster.test" + iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_rds_cluster_role_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterRoleAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterRoleAssociationConfig_mysqlWithoutFeatureName(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterRoleAssociationExists(ctx, resourceName, &dbClusterRole), + resource.TestCheckResourceAttrPair(resourceName, "db_cluster_identifier", dbClusterResourceName, names.AttrID), + resource.TestCheckResourceAttr(resourceName, "feature_name", ""), + resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, iamRoleResourceName, names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccRDSClusterRoleAssociation_disappears(t *testing.T) { ctx := acctest.Context(t) var dbClusterRole types.DBClusterRole @@ -214,3 +246,44 @@ data "aws_iam_policy_document" "rds_assume_role_policy" { } `, rName)) } + +func testAccClusterRoleAssociationConfig_mysqlWithoutFeatureName(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +resource "aws_rds_cluster_role_association" "test" { + db_cluster_identifier = aws_rds_cluster.test.id + role_arn = aws_iam_role.test.arn +} + +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + engine = "aurora-mysql" + availability_zones = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1], data.aws_availability_zones.available.names[2]] + database_name = "mydb" + master_username = "foo" + master_password = "foobarfoobarfoobar" + skip_final_snapshot = true +} + +resource "aws_iam_role" "test" { + assume_role_policy = data.aws_iam_policy_document.rds_assume_role_policy.json + name = %[1]q + + # ensure IAM role is created just before association to exercise IAM eventual consistency + depends_on = [aws_rds_cluster.test] +} + +data "aws_iam_policy_document" "rds_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + + principals { + identifiers = ["rds.amazonaws.com"] + type = "Service" + } + } +} +`, rName)) +} diff --git a/internal/service/rds/cluster_snapshot.go b/internal/service/rds/cluster_snapshot.go index 4c9f4eb228e9..c24cd73a6075 100644 --- a/internal/service/rds/cluster_snapshot.go +++ b/internal/service/rds/cluster_snapshot.go @@ -140,7 +140,7 @@ func resourceClusterSnapshotCreate(ctx context.Context, d *schema.ResourceData, const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*types.InvalidDBClusterStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.InvalidDBClusterStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateDBClusterSnapshot(ctx, input) }) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 586acfa628fe..bb0c212d2fdf 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" @@ -109,6 +110,9 @@ func TestAccRDSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "monitoring_interval", "0"), resource.TestCheckResourceAttr(resourceName, "monitoring_role_arn", ""), resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "performance_insights_kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "0"), resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), resource.TestCheckResourceAttr(resourceName, "scaling_configuration.#", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrStorageEncrypted, acctest.CtFalse), @@ -3120,7 +3124,7 @@ func TestAccRDSCluster_engineLifecycleSupport_disabled(t *testing.T) { }) } -func TestAccRDSCluster_performanceInsightsEnabled(t *testing.T) { +func TestAccRDSCluster_performanceInsights_Enabled(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -3137,24 +3141,28 @@ func TestAccRDSCluster_performanceInsightsEnabled(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_performanceInsightsEnabled(rName, true), - Check: resource.ComposeTestCheckFunc( + Config: testAccClusterConfig_performanceInsights_Enabled(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", "data.aws_kms_key.rds", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "0"), ), }, { - Config: testAccClusterConfig_performanceInsightsEnabled(rName, false), - Check: resource.ComposeTestCheckFunc( + Config: testAccClusterConfig_performanceInsights_Enabled(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtFalse), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", "data.aws_kms_key.rds", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "0"), ), }, }, }) } -func TestAccRDSCluster_performanceInsightsKMSKeyID(t *testing.T) { +func TestAccRDSCluster_performanceInsights_KMSKeyID(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -3172,17 +3180,455 @@ func TestAccRDSCluster_performanceInsightsKMSKeyID(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_performanceInsightsKMSKeyID(rName), - Check: resource.ComposeTestCheckFunc( + Config: testAccClusterConfig_performanceInsights_KMSKeyID(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN), + ), + }, + }, + }) +} + +func TestAccRDSCluster_performanceInsights_RetentionPeriod(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_performanceInsights_RetentionPeriod(rName, 62), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "62"), + ), + }, + { + Config: testAccClusterConfig_performanceInsights_RetentionPeriod(rName, 124), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "124"), + ), + }, + }, + }) +} + +func TestAccRDSCluster_performanceInsights_KMSKey_RetentionPeriod(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + kmsKeyResourceName := "aws_kms_key.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_performanceInsights_KMSKey_RetentionPeriod(rName, 62), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "62"), resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN), ), }, + { + Config: testAccClusterConfig_performanceInsights_KMSKey_RetentionPeriod(rName, 124), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "124"), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN), + ), + }, + }, + }) +} + +func TestAccRDSCluster_GlobalClusterIdentifier_performanceInsightsEnabled(t *testing.T) { + ctx := acctest.Context(t) + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckGlobalCluster(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_GlobalClusterID_performanceInsightsEnabled(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "master_password", + "enable_global_write_forwarding", + "enable_local_write_forwarding", + }, + }, + { + Config: testAccClusterConfig_GlobalClusterID_performanceInsightsEnabled(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtFalse), + ), + }, + }, + }) +} + +func TestAccRDSCluster_databaseInsightsMode_defaultKMSKey_create(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "advanced", true, "465"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue("data.aws_kms_key.rds", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "standard", false, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(false)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), + }, + }, + }, + }) +} + +func TestAccRDSCluster_GlobalClusterIdentifier_databaseInsightsMode_defaultKMSKey_create(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_GlobalClusterIdentifier_databaseInsightsMode_defaultKMSKey(rName, "advanced", true, "465"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue("data.aws_kms_key.rds", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + }, + }) +} + +func TestAccRDSCluster_databaseInsightsMode_defaultKMSKey_Disable_PerformanceInsightsEnabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "advanced", true, "465"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue("data.aws_kms_key.rds", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "standard", true, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + }, + }) +} + +func TestAccRDSCluster_databaseInsightsMode_defaultKMSKey_Enable_OnUpdate_FromPerformanceInsightsEnabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "null", true, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue("data.aws_kms_key.rds", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), + }, + }, + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "advanced", true, "465"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + }, + }) +} + +func TestAccRDSCluster_databaseInsightsMode_defaultKMSKey_Enable_OnUpdate_FromPerformanceInsightsDisabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "null", false, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(false)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id"), knownvalue.StringExact("")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), + }, + }, + { + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "advanced", true, "465"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("performance_insights_kms_key_id"), "data.aws_kms_key.rds", tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + }, + }) +} + +func TestAccRDSCluster_databaseInsightsMode_customKMSKey_create(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster types.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "advanced", true, "465"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue("aws_kms_key.test", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, + }, + { + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "standard", false, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(false)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), + }, + }, }, }) } -func TestAccRDSCluster_performanceInsightsRetentionPeriod(t *testing.T) { +func TestAccRDSCluster_databaseInsightsMode_customKMSKey_Disable_PerformanceInsightsEnabled(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -3192,64 +3638,54 @@ func TestAccRDSCluster_performanceInsightsRetentionPeriod(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_performanceInsightsRetentionPeriod(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &dbCluster), - resource.TestCheckResourceAttr(resourceName, "performance_insights_retention_period", "62"), - ), - }, - }, - }) -} - -func TestAccRDSCluster_GlobalClusterIdentifier_performanceInsightsEnabled(t *testing.T) { - ctx := acctest.Context(t) - var dbCluster types.DBCluster - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_rds_cluster.test" + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckGlobalCluster(ctx, t) }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_GlobalClusterID_performanceInsightsEnabled(rName, true), + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "advanced", true, "465"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), - resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "master_password", - "enable_global_write_forwarding", - "enable_local_write_forwarding", + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue("aws_kms_key.test", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), }, }, { - Config: testAccClusterConfig_GlobalClusterID_performanceInsightsEnabled(rName, false), + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "standard", true, "null"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), - resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtFalse), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + }, }, }, }) } -func TestAccRDSCluster_databaseInsightsMode_create(t *testing.T) { +func TestAccRDSCluster_databaseInsightsMode_customKMSKey_Enable_OnUpdate_FromPerformanceInsightsEnabled(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -3259,6 +3695,8 @@ func TestAccRDSCluster_databaseInsightsMode_create(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" + kmsKeyIDExpectNoChange := statecheck.CompareValue(compare.ValuesSame()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), @@ -3266,7 +3704,7 @@ func TestAccRDSCluster_databaseInsightsMode_create(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_databaseInsightsMode(rName, "advanced", true, "465"), + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "null", true, "null"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), ), @@ -3276,13 +3714,15 @@ func TestAccRDSCluster_databaseInsightsMode_create(t *testing.T) { }, }, ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), + kmsKeyIDExpectNoChange.AddStateValue("aws_kms_key.test", tfjsonpath.New(names.AttrARN)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), }, }, { - Config: testAccClusterConfig_databaseInsightsMode(rName, "standard", false, "null"), + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "advanced", true, "465"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), ), @@ -3292,16 +3732,17 @@ func TestAccRDSCluster_databaseInsightsMode_create(t *testing.T) { }, }, ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(false)), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + kmsKeyIDExpectNoChange.AddStateValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), }, }, }, }) } -func TestAccRDSCluster_databaseInsightsMode_update(t *testing.T) { +func TestAccRDSCluster_databaseInsightsMode_customKMSKey_Enable_OnUpdate_FromPerformanceInsightsDisabled(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -3318,7 +3759,8 @@ func TestAccRDSCluster_databaseInsightsMode_update(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_databaseInsightsMode(rName, "null", true, "null"), + // KMS Key cannot be set if Performance Insights is not enabled + Config: testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, "null", false, "null"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), ), @@ -3329,12 +3771,13 @@ func TestAccRDSCluster_databaseInsightsMode_update(t *testing.T) { }, ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("standard")), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(false)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_kms_key_id"), knownvalue.StringExact("")), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(0)), }, }, { - Config: testAccClusterConfig_databaseInsightsMode(rName, "advanced", true, "465"), + Config: testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, "advanced", true, "465"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), ), @@ -3346,6 +3789,7 @@ func TestAccRDSCluster_databaseInsightsMode_update(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("database_insights_mode"), knownvalue.StringExact("advanced")), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_enabled"), knownvalue.Bool(true)), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New("performance_insights_kms_key_id"), "aws_kms_key.test", tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("performance_insights_retention_period"), knownvalue.Int64Exact(465)), }, }, @@ -6517,63 +6961,108 @@ resource "aws_rds_cluster" "test" { `, rName, tfrds.ClusterEngineAuroraPostgreSQL) } -func testAccClusterConfig_performanceInsightsEnabled(rName string, performanceInsightsEnabled bool) string { - return fmt.Sprintf(` +func testAccClusterConfig_performanceInsights_Enabled(rName string, performanceInsightsEnabled bool) string { + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = %[1]q - engine = %[3]q - db_cluster_instance_class = "db.m6gd.large" - storage_type = "io1" - allocated_storage = 100 - iops = 1000 - master_username = "tfacctest" - master_password = "avoid-plaintext-passwords" - skip_final_snapshot = true + cluster_identifier = %[1]q + engine = %[3]q + db_cluster_instance_class = "db.m6gd.large" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + db_subnet_group_name = aws_db_subnet_group.test.name + performance_insights_enabled = %[2]t } -`, rName, performanceInsightsEnabled, tfrds.ClusterEngineMySQL) + +data "aws_kms_key" "rds" { + key_id = "alias/aws/rds" +} +`, rName, performanceInsightsEnabled, tfrds.ClusterEngineMySQL)) +} + +func testAccClusterConfig_performanceInsights_KMSKeyID(rName string) string { + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + engine = %[2]q + db_cluster_instance_class = "db.m6gd.large" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + db_subnet_group_name = aws_db_subnet_group.test.name + + performance_insights_enabled = true + performance_insights_kms_key_id = aws_kms_key.test.arn } -func testAccClusterConfig_performanceInsightsKMSKeyID(rName string) string { - return fmt.Sprintf(` resource "aws_kms_key" "test" { description = %[1]q deletion_window_in_days = 7 enable_key_rotation = true } +`, rName, tfrds.ClusterEngineMySQL)) +} +func testAccClusterConfig_performanceInsights_RetentionPeriod(rName string, period int) string { + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = %[1]q - engine = %[2]q - db_cluster_instance_class = "db.m6gd.large" - storage_type = "io1" - allocated_storage = 100 - iops = 1000 - master_username = "tfacctest" - master_password = "avoid-plaintext-passwords" - skip_final_snapshot = true - performance_insights_enabled = true - performance_insights_kms_key_id = aws_kms_key.test.arn + cluster_identifier = %[1]q + engine = %[2]q + db_cluster_instance_class = "db.m6gd.large" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + db_subnet_group_name = aws_db_subnet_group.test.name + + performance_insights_enabled = true + performance_insights_retention_period = %d } -`, rName, tfrds.ClusterEngineMySQL) +`, rName, tfrds.ClusterEngineMySQL, period)) } -func testAccClusterConfig_performanceInsightsRetentionPeriod(rName string) string { - return fmt.Sprintf(` +func testAccClusterConfig_performanceInsights_KMSKey_RetentionPeriod(rName string, period int) string { + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = %[1]q - engine = %[2]q - db_cluster_instance_class = "db.m6gd.large" - storage_type = "io1" - allocated_storage = 100 - iops = 1000 - master_username = "tfacctest" - master_password = "avoid-plaintext-passwords" - skip_final_snapshot = true + cluster_identifier = %[1]q + engine = %[2]q + db_cluster_instance_class = "db.m6gd.large" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + db_subnet_group_name = aws_db_subnet_group.test.name + performance_insights_enabled = true - performance_insights_retention_period = 62 + performance_insights_kms_key_id = aws_kms_key.test.arn + performance_insights_retention_period = %d } -`, rName, tfrds.ClusterEngineMySQL) + +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 + enable_key_rotation = true +} +`, rName, tfrds.ClusterEngineMySQL, period)) } func testAccClusterConfig_GlobalClusterID_performanceInsightsEnabled(rName string, performanceInsightsEnabled bool) string { @@ -6740,26 +7229,111 @@ resource "aws_rds_cluster" "test" { `, rName) } -func testAccClusterConfig_databaseInsightsMode(rName, databaseInsightsMode string, performanceInsightsEnabled bool, performanceInsightsRetentionPeriod string) string { +func testAccClusterConfig_databaseInsightsMode_defaultKMSKey(rName, databaseInsightsMode string, performanceInsightsEnabled bool, performanceInsightsRetentionPeriod string) string { if databaseInsightsMode != "null" { databaseInsightsMode = strconv.Quote(databaseInsightsMode) } - return fmt.Sprintf(` + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = %[1]q - engine = %[2]q - db_cluster_instance_class = "db.m6gd.large" - storage_type = "io1" - allocated_storage = 100 - iops = 1000 - master_username = "tfacctest" - master_password = "avoid-plaintext-passwords" - skip_final_snapshot = true + cluster_identifier = %[1]q + engine = %[2]q + db_cluster_instance_class = "db.m6gd.large" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + apply_immediately = true + db_subnet_group_name = aws_db_subnet_group.test.name + + database_insights_mode = %[3]s + performance_insights_enabled = %[4]t + performance_insights_retention_period = %[5]s +} + +data "aws_kms_key" "rds" { + key_id = "alias/aws/rds" +} +`, rName, tfrds.ClusterEngineMySQL, databaseInsightsMode, performanceInsightsEnabled, performanceInsightsRetentionPeriod)) +} + +func testAccClusterConfig_GlobalClusterIdentifier_databaseInsightsMode_defaultKMSKey(rName, databaseInsightsMode string, performanceInsightsEnabled bool, performanceInsightsRetentionPeriod string) string { + if databaseInsightsMode != "null" { + databaseInsightsMode = strconv.Quote(databaseInsightsMode) + } + + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + global_cluster_identifier = aws_rds_global_cluster.test.global_cluster_identifier + + engine = aws_rds_global_cluster.test.engine + engine_version = aws_rds_global_cluster.test.engine_version + + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + apply_immediately = true + db_subnet_group_name = aws_db_subnet_group.test.name + + database_insights_mode = %[3]s + performance_insights_enabled = %[4]t + performance_insights_retention_period = %[5]s +} + +data "aws_rds_engine_version" "test" { + engine = "aurora-postgresql" +} + +resource "aws_rds_global_cluster" "test" { + global_cluster_identifier = %[1]q + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version +} + +data "aws_kms_key" "rds" { + key_id = "alias/aws/rds" +} +`, rName, tfrds.ClusterEngineMySQL, databaseInsightsMode, performanceInsightsEnabled, performanceInsightsRetentionPeriod)) +} + +func testAccClusterConfig_databaseInsightsMode_customKMSKey(rName, databaseInsightsMode string, performanceInsightsEnabled bool, performanceInsightsRetentionPeriod string) string { + if databaseInsightsMode != "null" { + databaseInsightsMode = strconv.Quote(databaseInsightsMode) + } + + return acctest.ConfigCompose( + testAccClusterConfig_clusterSubnetGroup(rName), + fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + engine = %[2]q + db_cluster_instance_class = "db.m6gd.large" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + apply_immediately = true + db_subnet_group_name = aws_db_subnet_group.test.name + database_insights_mode = %[3]s performance_insights_enabled = %[4]t + performance_insights_kms_key_id = aws_kms_key.test.arn performance_insights_retention_period = %[5]s - apply_immediately = true } -`, rName, tfrds.ClusterEngineMySQL, databaseInsightsMode, performanceInsightsEnabled, performanceInsightsRetentionPeriod) + +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 + enable_key_rotation = true +} +`, rName, tfrds.ClusterEngineMySQL, databaseInsightsMode, performanceInsightsEnabled, performanceInsightsRetentionPeriod)) } diff --git a/internal/service/rds/engine_version_data_source_test.go b/internal/service/rds/engine_version_data_source_test.go index f68c3df5ee53..aab4d7df585b 100644 --- a/internal/service/rds/engine_version_data_source_test.go +++ b/internal/service/rds/engine_version_data_source_test.go @@ -330,7 +330,7 @@ func TestAccRDSEngineVersionDataSource_hasMinorMajor(t *testing.T) { resource.TestCheckResourceAttrWith(dataSourceName, "valid_major_targets.#", func(value string) error { intValue, err := strconv.Atoi(value) if err != nil { - return fmt.Errorf("could not convert string to int: %v", err) + return fmt.Errorf("could not convert string to int: %w", err) } if intValue <= 0 { @@ -347,7 +347,7 @@ func TestAccRDSEngineVersionDataSource_hasMinorMajor(t *testing.T) { resource.TestCheckResourceAttrWith(dataSourceName, "valid_minor_targets.#", func(value string) error { intValue, err := strconv.Atoi(value) if err != nil { - return fmt.Errorf("could not convert string to int: %v", err) + return fmt.Errorf("could not convert string to int: %w", err) } if intValue <= 0 { @@ -364,7 +364,7 @@ func TestAccRDSEngineVersionDataSource_hasMinorMajor(t *testing.T) { resource.TestCheckResourceAttrWith(dataSourceName, "valid_major_targets.#", func(value string) error { intValue, err := strconv.Atoi(value) if err != nil { - return fmt.Errorf("could not convert string to int: %v", err) + return fmt.Errorf("could not convert string to int: %w", err) } if intValue <= 0 { @@ -376,7 +376,7 @@ func TestAccRDSEngineVersionDataSource_hasMinorMajor(t *testing.T) { resource.TestCheckResourceAttrWith(dataSourceName, "valid_minor_targets.#", func(value string) error { intValue, err := strconv.Atoi(value) if err != nil { - return fmt.Errorf("could not convert string to int: %v", err) + return fmt.Errorf("could not convert string to int: %w", err) } if intValue <= 0 { diff --git a/internal/service/rds/global_cluster.go b/internal/service/rds/global_cluster.go index e008c1d69b32..2683ba76747c 100644 --- a/internal/service/rds/global_cluster.go +++ b/internal/service/rds/global_cluster.go @@ -71,12 +71,11 @@ func resourceGlobalCluster() *schema.Resource { Computed: true, }, names.AttrEngine: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"source_db_cluster_identifier"}, - ValidateFunc: validation.StringInSlice(globalClusterEngine_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(globalClusterEngine_Values(), false), }, "engine_lifecycle_support": { Type: schema.TypeString, @@ -124,12 +123,11 @@ func resourceGlobalCluster() *schema.Resource { Computed: true, }, "source_db_cluster_identifier": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{names.AttrEngine}, - RequiredWith: []string{names.AttrForceDestroy}, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + RequiredWith: []string{names.AttrForceDestroy}, }, names.AttrStorageEncrypted: { Type: schema.TypeBool, @@ -175,6 +173,10 @@ func resourceGlobalClusterCreate(ctx context.Context, d *schema.ResourceData, me if v, ok := d.GetOk("source_db_cluster_identifier"); ok { input.SourceDBClusterIdentifier = aws.String(v.(string)) + // Engine and engine version cannot be sent during create requests if a source + // DB cluster is specified. + input.Engine = nil + input.EngineVersion = nil } if v, ok := d.GetOk(names.AttrStorageEncrypted); ok { @@ -363,7 +365,7 @@ func resourceGlobalClusterDelete(ctx context.Context, d *schema.ResourceData, me globalClusterClusterDeleteTimeout = 5 * time.Minute ) timeout := max(deadline.Remaining(), globalClusterClusterDeleteTimeout) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidGlobalClusterStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidGlobalClusterStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteGlobalCluster(ctx, &rds.DeleteGlobalClusterInput{ GlobalClusterIdentifier: aws.String(d.Id()), }) @@ -529,7 +531,7 @@ func waitGlobalClusterDeleted(ctx context.Context, conn *rds.Client, id string, } func waitGlobalClusterMemberRemoved(ctx context.Context, conn *rds.Client, dbClusterARN string, timeout time.Duration) (*types.GlobalCluster, error) { //nolint:unparam - outputRaw, err := tfresource.RetryUntilNotFound(ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryUntilNotFound(ctx, timeout, func(ctx context.Context) (any, error) { return findGlobalClusterByDBClusterARN(ctx, conn, dbClusterARN) }) @@ -582,7 +584,7 @@ func globalClusterUpgradeMajorEngineVersion(ctx context.Context, conn *rds.Clien } _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyGlobalCluster(ctx, input) }, func(err error) (bool, error) { @@ -679,7 +681,7 @@ func globalClusterUpgradeMinorEngineVersion(ctx context.Context, conn *rds.Clien log.Printf("[INFO] Performing RDS Global Cluster (%s) Cluster (%s) minor version (%s) upgrade", globalClusterID, clusterID, engineVersion) _, err = tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBCluster(ctx, input, optFn) }, func(err error) (bool, error) { diff --git a/internal/service/rds/global_cluster_data_source.go b/internal/service/rds/global_cluster_data_source.go new file mode 100644 index 000000000000..0a2217697fdc --- /dev/null +++ b/internal/service/rds/global_cluster_data_source.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rds + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_rds_global_cluster", name="Global Cluster") +// @Tags(identifierAttribute="arn") +// @Testing(tagsTest=false) +func newDataSourceGlobalCluster(_ context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceGlobalCluster{}, nil +} + +type dataSourceGlobalCluster struct { + framework.DataSourceWithModel[dataSourceGlobalClusterData] +} + +func (d *dataSourceGlobalCluster) Schema(ctx context.Context, _ datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrDatabaseName: schema.StringAttribute{ + Computed: true, + }, + names.AttrDeletionProtection: schema.BoolAttribute{ + Computed: true, + }, + names.AttrEndpoint: schema.StringAttribute{ + Computed: true, + }, + names.AttrEngine: schema.StringAttribute{ + Computed: true, + }, + "engine_lifecycle_support": schema.StringAttribute{ + Computed: true, + }, + names.AttrEngineVersion: schema.StringAttribute{ + Computed: true, + }, + names.AttrIdentifier: schema.StringAttribute{ + Required: true, + }, + "members": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[globalClusterMembersModel](ctx), + Computed: true, + }, + names.AttrResourceID: schema.StringAttribute{ + Computed: true, + }, + names.AttrStorageEncrypted: schema.BoolAttribute{ + Computed: true, + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (d *dataSourceGlobalCluster) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var data dataSourceGlobalClusterData + conn := d.Meta().RDSClient(ctx) + + smerr.EnrichAppend(ctx, &response.Diagnostics, request.Config.Get(ctx, &data)) + if response.Diagnostics.HasError() { + return + } + + output, err := findGlobalClusterByID(ctx, conn, data.Identifier.ValueString()) + if err != nil { + smerr.AddError(ctx, &response.Diagnostics, err, smerr.ID, data.Identifier.String()) + return + } + + smerr.EnrichAppend(ctx, &response.Diagnostics, flex.Flatten(ctx, output, &data, flex.WithFieldNamePrefix("GlobalCluster"))) + if response.Diagnostics.HasError() { + return + } + + setTagsOut(ctx, output.TagList) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +type dataSourceGlobalClusterData struct { + framework.WithRegionModel + ARN types.String `tfsdk:"arn"` + DatabaseName types.String `tfsdk:"database_name"` + DeletionProtection types.Bool `tfsdk:"deletion_protection"` + Endpoint types.String `tfsdk:"endpoint"` + Engine types.String `tfsdk:"engine"` + EngineVersion types.String `tfsdk:"engine_version"` + EngineLifecycleSupport types.String `tfsdk:"engine_lifecycle_support"` + Identifier types.String `tfsdk:"identifier"` + Members fwtypes.ListNestedObjectValueOf[globalClusterMembersModel] `tfsdk:"members"` + ResourceID types.String `tfsdk:"resource_id"` + StorageEncrypted types.Bool `tfsdk:"storage_encrypted"` + Tags tftags.Map `tfsdk:"tags"` +} + +type globalClusterMembersModel struct { + DBClusterARN types.String `tfsdk:"db_cluster_arn"` + IsWriter types.Bool `tfsdk:"is_writer"` +} diff --git a/internal/service/rds/global_cluster_data_source_test.go b/internal/service/rds/global_cluster_data_source_test.go new file mode 100644 index 000000000000..c4a6214d5450 --- /dev/null +++ b/internal/service/rds/global_cluster_data_source_test.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rds_test + +import ( + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccRDSGlobalClusterDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_rds_global_cluster.test" + resourceName := "aws_rds_global_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccGlobalClusterDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDatabaseName, resourceName, names.AttrDatabaseName), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDeletionProtection, resourceName, names.AttrDeletionProtection), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrEngine, resourceName, names.AttrEngine), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrEngineVersion, resourceName, names.AttrEngineVersion), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrIdentifier, resourceName, "global_cluster_identifier"), + resource.TestCheckResourceAttrPair(dataSourceName, "members", resourceName, "global_cluster_members"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrResourceID, resourceName, "global_cluster_resource_id"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrStorageEncrypted, resourceName, names.AttrStorageEncrypted), + ), + }, + }, + }) +} + +func testAccGlobalClusterDataSourceConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_global_cluster" "test" { + global_cluster_identifier = %[1]q + engine = "aurora-postgresql" + engine_version = "15.5" + database_name = "example_db" +} + +data "aws_rds_global_cluster" "test" { + identifier = aws_rds_global_cluster.test.global_cluster_identifier +} +`, rName) +} diff --git a/internal/service/rds/global_cluster_tags_gen_test.go b/internal/service/rds/global_cluster_tags_gen_test.go index 5c0268e1426a..ee74c0519df1 100644 --- a/internal/service/rds/global_cluster_tags_gen_test.go +++ b/internal/service/rds/global_cluster_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccRDSGlobalCluster_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccRDSGlobalCluster_tags(t *testing.T) { func TestAccRDSGlobalCluster_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccRDSGlobalCluster_tags_null(t *testing.T) { func TestAccRDSGlobalCluster_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccRDSGlobalCluster_tags_EmptyMap(t *testing.T) { func TestAccRDSGlobalCluster_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccRDSGlobalCluster_tags_AddOnUpdate(t *testing.T) { func TestAccRDSGlobalCluster_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccRDSGlobalCluster_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccRDSGlobalCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccRDSGlobalCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccRDSGlobalCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccRDSGlobalCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccRDSGlobalCluster_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccRDSGlobalCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccRDSGlobalCluster_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_overlapping(t *testing.T) { func TestAccRDSGlobalCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccRDSGlobalCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccRDSGlobalCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccRDSGlobalCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccRDSGlobalCluster_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccRDSGlobalCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccRDSGlobalCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccRDSGlobalCluster_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccRDSGlobalCluster_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccRDSGlobalCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccRDSGlobalCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccRDSGlobalCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccRDSGlobalCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccRDSGlobalCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccRDSGlobalCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccRDSGlobalCluster_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.GlobalCluster resourceName := "aws_rds_global_cluster.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), diff --git a/internal/service/rds/global_cluster_test.go b/internal/service/rds/global_cluster_test.go index 2c20b7d183be..0490b260b922 100644 --- a/internal/service/rds/global_cluster_test.go +++ b/internal/service/rds/global_cluster_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -628,6 +629,49 @@ func TestAccRDSGlobalCluster_storageEncrypted(t *testing.T) { }) } +// Creates a global cluster from an existing regional source, then completes a major version upgrade +func TestAccRDSGlobalCluster_SourceDBClusterIdentifier_EngineVersion_updateMajor(t *testing.T) { + ctx := acctest.Context(t) + var v types.GlobalCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_global_cluster.test" + engineVersion := "15.10" + engineVersionUpdated := "16.6" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckGlobalCluster(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGlobalClusterConfig_sourceClusterIDEngineVersion(rName, engineVersion), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlobalClusterExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrEngineVersion, engineVersion), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + Config: testAccGlobalClusterConfig_sourceClusterIDEngineVersion(rName, engineVersionUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlobalClusterExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrEngineVersion, engineVersionUpdated), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + }, + }) +} + func testAccCheckGlobalClusterExists(ctx context.Context, n string, v *types.GlobalCluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -1280,6 +1324,40 @@ resource "aws_rds_global_cluster" "test" { `, rName) } +func testAccGlobalClusterConfig_sourceClusterIDEngineVersion(rName, engineVersion string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + engine = "aurora-postgresql" + engine_version = %[2]q + master_password = "mustbeeightcharacters" + master_username = "test" + skip_final_snapshot = true + database_name = "database04" + + lifecycle { + ignore_changes = [global_cluster_identifier, engine_version] + } +} + +resource "aws_rds_cluster_instance" "test" { + identifier = %[1]q + cluster_identifier = aws_rds_cluster.test.id + engine = aws_rds_cluster.test.engine + engine_version = aws_rds_cluster.test.engine_version + instance_class = "db.r5.large" +} + +resource "aws_rds_global_cluster" "test" { + force_destroy = true + global_cluster_identifier = %[1]q + engine = aws_rds_cluster.test.engine + engine_version = %[2]q + source_db_cluster_identifier = aws_rds_cluster.test.arn +} +`, rName, engineVersion) +} + func testAccGlobalClusterConfig_storageEncrypted(rName string, storageEncrypted bool) string { return fmt.Sprintf(` resource "aws_rds_global_cluster" "test" { diff --git a/internal/service/rds/instance.go b/internal/service/rds/instance.go index e33b92a744f6..bba122a47706 100644 --- a/internal/service/rds/instance.go +++ b/internal/service/rds/instance.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + sdkretry "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -32,6 +32,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -1159,7 +1160,7 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta an } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RestoreDBInstanceFromS3(ctx, input) }, func(err error) (bool, error) { @@ -1414,7 +1415,7 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta an } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RestoreDBInstanceFromDBSnapshot(ctx, input) }, func(err error) (bool, error) { @@ -1630,7 +1631,7 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta an } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.RestoreDBInstanceToPointInTime(ctx, input) }, func(err error) (bool, error) { @@ -1850,7 +1851,7 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta an } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDBInstance(ctx, input) }, func(err error) (bool, error) { @@ -2265,7 +2266,7 @@ func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta an timeout = 5 * time.Minute ) _, err = tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteDBInstance(ctx, input) }, func(err error) (bool, error) { @@ -2370,7 +2371,7 @@ func resourceInstanceDelete(ctx context.Context, d *schema.ResourceData, meta an if tfawserr.ErrMessageContains(err, errCodeInvalidParameterCombination, "disable deletion pro") { if v, ok := d.GetOk(names.AttrDeletionProtection); (!ok || !v.(bool)) && d.Get(names.AttrApplyImmediately).(bool) { _, ierr := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBInstance(ctx, &rds.ModifyDBInstanceInput{ ApplyImmediately: aws.Bool(true), DBInstanceIdentifier: aws.String(d.Get(names.AttrIdentifier).(string)), @@ -2430,7 +2431,7 @@ func resourceInstanceImport(_ context.Context, d *schema.ResourceData, meta any) func dbInstanceCreateReadReplica(ctx context.Context, conn *rds.Client, input *rds.CreateDBInstanceReadReplicaInput) (*rds.CreateDBInstanceReadReplicaOutput, error) { outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateDBInstanceReadReplica(ctx, input) }, errCodeInvalidParameterValue, "ENHANCED_MONITORING") @@ -2489,6 +2490,9 @@ func dbInstancePopulateModify(input *rds.ModifyDBInstanceInput, d *schema.Resour if d.HasChange("database_insights_mode") { input.DatabaseInsightsMode = types.DatabaseInsightsMode(d.Get("database_insights_mode").(string)) input.EnablePerformanceInsights = aws.Bool(d.Get("performance_insights_enabled").(bool)) + if v, ok := d.Get("performance_insights_kms_key_id").(string); ok && v != "" { + input.PerformanceInsightsKMSKeyId = aws.String(v) + } input.PerformanceInsightsRetentionPeriod = aws.Int32(int32(d.Get("performance_insights_retention_period").(int))) } @@ -2705,7 +2709,7 @@ func dbInstancePopulateModify(input *rds.ModifyDBInstanceInput, d *schema.Resour func dbInstanceModify(ctx context.Context, conn *rds.Client, resourceID string, input *rds.ModifyDBInstanceInput, timeout time.Duration) error { _, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.ModifyDBInstance(ctx, input) }, func(err error) (bool, error) { @@ -2848,7 +2852,7 @@ func findDBInstances(ctx context.Context, conn *rds.Client, input *rds.DescribeD page, err := pages.NextPage(ctx, optFns...) if errs.IsA[*types.DBInstanceNotFoundFault](err) { - return nil, &retry.NotFoundError{ + return nil, &sdkretry.NotFoundError{ LastError: err, LastRequest: input, } @@ -2868,8 +2872,8 @@ func findDBInstances(ctx context.Context, conn *rds.Client, input *rds.DescribeD return output, nil } -func statusDBInstance(ctx context.Context, conn *rds.Client, id string, optFns ...func(*rds.Options)) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusDBInstance(conn *rds.Client, id string, optFns ...func(*rds.Options)) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findDBInstanceByID(ctx, conn, id, optFns...) if tfresource.NotFound(err) { @@ -2913,7 +2917,7 @@ func waitDBInstanceAvailable(ctx context.Context, conn *rds.Client, id string, t instanceStatusUpgrading, }, Target: []string{instanceStatusAvailable, instanceStatusStorageOptimization}, - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, } options.Apply(stateConf) @@ -2947,7 +2951,7 @@ func waitDBInstanceStopped(ctx context.Context, conn *rds.Client, id string, tim instanceStatusUpgrading, }, Target: []string{instanceStatusStopped}, - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, ContinuousTargetOccurence: 2, Delay: 10 * time.Second, @@ -2991,7 +2995,7 @@ func waitDBInstanceDeleted(ctx context.Context, conn *rds.Client, id string, tim instanceStatusStorageOptimization, }, Target: []string{}, - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, } options.Apply(stateConf) @@ -3018,7 +3022,7 @@ func findBlueGreenDeploymentByID(ctx context.Context, conn *rds.Client, id strin // Eventual consistency check. if aws.ToString(output.BlueGreenDeploymentIdentifier) != id { - return nil, &retry.NotFoundError{ + return nil, &sdkretry.NotFoundError{ LastRequest: input, } } @@ -3044,7 +3048,7 @@ func findBlueGreenDeployments(ctx context.Context, conn *rds.Client, input *rds. page, err := pages.NextPage(ctx) if errs.IsA[*types.BlueGreenDeploymentNotFoundFault](err) { - return nil, &retry.NotFoundError{ + return nil, &sdkretry.NotFoundError{ LastError: err, LastRequest: input, } @@ -3064,8 +3068,8 @@ func findBlueGreenDeployments(ctx context.Context, conn *rds.Client, input *rds. return output, nil } -func statusBlueGreenDeployment(ctx context.Context, conn *rds.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusBlueGreenDeployment(conn *rds.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findBlueGreenDeploymentByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -3091,7 +3095,7 @@ func waitBlueGreenDeploymentAvailable(ctx context.Context, conn *rds.Client, id stateConf := &retry.StateChangeConf{ Pending: []string{"PROVISIONING"}, Target: []string{"AVAILABLE"}, - Refresh: statusBlueGreenDeployment(ctx, conn, id), + Refresh: statusBlueGreenDeployment(conn, id), Timeout: timeout, } options.Apply(stateConf) @@ -3117,7 +3121,7 @@ func waitBlueGreenDeploymentSwitchoverCompleted(ctx context.Context, conn *rds.C stateConf := &retry.StateChangeConf{ Pending: []string{"AVAILABLE", "SWITCHOVER_IN_PROGRESS"}, Target: []string{"SWITCHOVER_COMPLETED"}, - Refresh: statusBlueGreenDeployment(ctx, conn, id), + Refresh: statusBlueGreenDeployment(conn, id), Timeout: timeout, } options.Apply(stateConf) @@ -3147,7 +3151,7 @@ func waitBlueGreenDeploymentDeleted(ctx context.Context, conn *rds.Client, id st stateConf := &retry.StateChangeConf{ Pending: []string{"PROVISIONING", "AVAILABLE", "SWITCHOVER_IN_PROGRESS", "SWITCHOVER_COMPLETED", "INVALID_CONFIGURATION", "SWITCHOVER_FAILED", "DELETING"}, Target: []string{}, - Refresh: statusBlueGreenDeployment(ctx, conn, id), + Refresh: statusBlueGreenDeployment(conn, id), Timeout: timeout, } options.Apply(stateConf) diff --git a/internal/service/rds/instance_data_source_tags_gen_test.go b/internal/service/rds/instance_data_source_tags_gen_test.go index 57e0c983fd67..cc154b456b85 100644 --- a/internal/service/rds/instance_data_source_tags_gen_test.go +++ b/internal/service/rds/instance_data_source_tags_gen_test.go @@ -8,7 +8,6 @@ import ( "unique" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -22,10 +21,11 @@ import ( func TestAccRDSDBInstanceDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -50,10 +50,11 @@ func TestAccRDSDBInstanceDataSource_tags(t *testing.T) { func TestAccRDSDBInstanceDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,10 +75,11 @@ func TestAccRDSDBInstanceDataSource_tags_NullMap(t *testing.T) { func TestAccRDSDBInstanceDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -98,10 +100,11 @@ func TestAccRDSDBInstanceDataSource_tags_EmptyMap(t *testing.T) { func TestAccRDSDBInstanceDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), Steps: []resource.TestStep{ @@ -130,10 +133,11 @@ func TestAccRDSDBInstanceDataSource_tags_DefaultTags_nonOverlapping(t *testing.T func TestAccRDSDBInstanceDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), Steps: []resource.TestStep{ @@ -168,10 +172,11 @@ func TestAccRDSDBInstanceDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testin func TestAccRDSDBInstanceDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/rds/instance_role_association.go b/internal/service/rds/instance_role_association.go index 769db0bc1f94..c020d8d993da 100644 --- a/internal/service/rds/instance_role_association.go +++ b/internal/service/rds/instance_role_association.go @@ -82,12 +82,12 @@ func resourceInstanceRoleAssociationCreate(ctx context.Context, d *schema.Resour RoleArn: aws.String(roleARN), } - _, err := tfresource.RetryWhenIsA[*types.InvalidDBInstanceStateFault](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.InvalidDBInstanceStateFault](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.AddRoleToDBInstance(ctx, &input) }) if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, errIAMRolePropagationMessage) { - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.AddRoleToDBInstance(ctx, &input) }, errCodeInvalidParameterValue, errIAMRolePropagationMessage) } @@ -148,7 +148,7 @@ func resourceInstanceRoleAssociationDelete(ctx context.Context, d *schema.Resour FeatureName: aws.String(d.Get("feature_name").(string)), RoleArn: aws.String(roleARN), } - _, err = tfresource.RetryWhenIsA[*types.InvalidDBInstanceStateFault](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *types.InvalidDBInstanceStateFault](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.RemoveRoleFromDBInstance(ctx, &input) }) diff --git a/internal/service/rds/instance_tags_gen_test.go b/internal/service/rds/instance_tags_gen_test.go index 044f5d543036..c4f5e91e9b7e 100644 --- a/internal/service/rds/instance_tags_gen_test.go +++ b/internal/service/rds/instance_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccRDSDBInstance_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccRDSDBInstance_tags(t *testing.T) { func TestAccRDSDBInstance_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccRDSDBInstance_tags_null(t *testing.T) { func TestAccRDSDBInstance_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccRDSDBInstance_tags_EmptyMap(t *testing.T) { func TestAccRDSDBInstance_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccRDSDBInstance_tags_AddOnUpdate(t *testing.T) { func TestAccRDSDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccRDSDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccRDSDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccRDSDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccRDSDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccRDSDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_overlapping(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccRDSDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing func TestAccRDSDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccRDSDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *test func TestAccRDSDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccRDSDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccRDSDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccRDSDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccRDSDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccRDSDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccRDSDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccRDSDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccRDSDBInstance_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DBInstance resourceName := "aws_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), CheckDestroy: testAccCheckDBInstanceDestroy(ctx), diff --git a/internal/service/rds/instance_test.go b/internal/service/rds/instance_test.go index a4966d38e087..c71372e93732 100644 --- a/internal/service/rds/instance_test.go +++ b/internal/service/rds/instance_test.go @@ -3065,7 +3065,6 @@ func TestAccRDSInstance_ReplicateSourceDB_mssqlDomain(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceResourceName := "aws_db_instance.source" resourceName := "aws_db_instance.test" - domain := acctest.RandomDomain().String() resource.ParallelTest(t, resource.TestCase{ @@ -5571,6 +5570,88 @@ func TestAccRDSInstance_PerformanceInsights_kmsKeyID(t *testing.T) { }) } +func TestAccRDSInstance_PerformanceInsights_kmsKeyID_UpdateMode(t *testing.T) { + ctx := acctest.Context(t) + + // All RDS Instance tests should skip for testing.Short() except the 20 shortest running tests. + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbInstance types.DBInstance + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPerformanceInsightsDefaultVersionPreCheck(ctx, t, tfrds.InstanceEngineMySQL) + }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_11_0), + }, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_performanceInsightsKMSKeyIDUpdateMode(rName, string(types.DatabaseInsightsModeStandard)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, "database_insights_mode", string(types.DatabaseInsightsModeStandard)), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrApplyImmediately, + names.AttrPassword, + "skip_final_snapshot", + names.AttrFinalSnapshotIdentifier, + }, + }, + { + Config: testAccInstanceConfig_performanceInsightsKMSKeyIDUpdateMode(rName, string(types.DatabaseInsightsModeAdvanced)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, "database_insights_mode", string(types.DatabaseInsightsModeAdvanced)), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN), + ), + }, + { + Config: testAccInstanceConfig_performanceInsightsKMSKeyIDUpdateMode(rName, string(types.DatabaseInsightsModeStandard)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + resource.TestCheckResourceAttr(resourceName, "database_insights_mode", string(types.DatabaseInsightsModeStandard)), + resource.TestCheckResourceAttr(resourceName, "performance_insights_enabled", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, "performance_insights_kms_key_id", kmsKeyResourceName, names.AttrARN), + ), + }, + }, + }) +} + func TestAccRDSInstance_PerformanceInsights_retentionPeriod(t *testing.T) { ctx := acctest.Context(t) @@ -7093,7 +7174,7 @@ func TestAccRDSInstance_BlueGreenDeployment_outOfBand(t *testing.T) { SkipFinalSnapshot: aws.Bool(true), } _, err = tfresource.RetryWhen(ctx, 5*time.Minute, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.DeleteDBInstance(ctx, deleteInput) }, func(err error) (bool, error) { @@ -11030,11 +11111,10 @@ resource "aws_security_group_rule" "test" { } resource "aws_directory_service_directory" "directory" { - name = %[2]q - password_wo = ephemeral.aws_secretsmanager_random_password.test.random_password - password_wo_version = 1 - type = "MicrosoftAD" - edition = "Standard" + name = %[2]q + password = "SuperSecretPassw0rd" + type = "MicrosoftAD" + edition = "Standard" vpc_settings { vpc_id = aws_vpc.test.id @@ -13492,6 +13572,55 @@ resource "aws_db_instance" "test" { `, tfrds.InstanceEngineMySQL, mainInstanceClasses, rName)) } +func testAccInstanceConfig_performanceInsightsKMSKeyIDUpdateMode(rName, mode string) string { + var retentionPeriod int + switch mode { + case string(types.DatabaseInsightsModeStandard): + retentionPeriod = 7 + case string(types.DatabaseInsightsModeAdvanced): + retentionPeriod = 465 + } + return acctest.ConfigCompose( + acctest.ConfigRandomPassword(), + fmt.Sprintf(` +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + enable_key_rotation = true +} + +data "aws_rds_engine_version" "default" { + engine = %[1]q +} + +data "aws_rds_orderable_db_instance" "test" { + engine = data.aws_rds_engine_version.default.engine + engine_version = data.aws_rds_engine_version.default.version + license_model = "general-public-license" + storage_type = "standard" + supports_performance_insights = true + preferred_instance_classes = [%[2]s] +} + +resource "aws_db_instance" "test" { + allocated_storage = 5 + backup_retention_period = 0 + database_insights_mode = %[4]q + db_name = "mydb" + engine = data.aws_rds_engine_version.default.engine + engine_version = data.aws_rds_engine_version.default.version + identifier = %[3]q + instance_class = data.aws_rds_orderable_db_instance.test.instance_class + password_wo = ephemeral.aws_secretsmanager_random_password.test.random_password + password_wo_version = 1 + performance_insights_enabled = true + performance_insights_kms_key_id = aws_kms_key.test.arn + performance_insights_retention_period = %[5]d + skip_final_snapshot = true + username = "foo" +} +`, tfrds.InstanceEngineMySQL, mainInstanceClasses, rName, mode, retentionPeriod)) +} + func testAccInstanceConfig_performanceInsightsRetentionPeriod(rName string, performanceInsightsRetentionPeriod int) string { return acctest.ConfigCompose( acctest.ConfigRandomPassword(), diff --git a/internal/service/rds/integration.go b/internal/service/rds/integration.go index 76c13ce114a9..f8e779810e6f 100644 --- a/internal/service/rds/integration.go +++ b/internal/service/rds/integration.go @@ -37,6 +37,7 @@ import ( // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/rds/types;awstypes;awstypes.Integration") // @Testing(tagsTest=false) +// @Testing(preIdentityVersion="v5.100.0") func newIntegrationResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &integrationResource{} diff --git a/internal/service/rds/integration_identity_gen_test.go b/internal/service/rds/integration_identity_gen_test.go index febe3aa42d2a..c174be613567 100644 --- a/internal/service/rds/integration_identity_gen_test.go +++ b/internal/service/rds/integration_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccRDSIntegration_Identity_Basic(t *testing.T) { resourceName := "aws_rds_integration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccRDSIntegration_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccRDSIntegration_Identity_RegionOverride(t *testing.T) { resourceName := "aws_rds_integration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccRDSIntegration_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,129 @@ func TestAccRDSIntegration_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccRDSIntegration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Integration + resourceName := "aws_rds_integration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + CheckDestroy: testAccCheckIntegrationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Integration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/Integration/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Integration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccRDSIntegration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Integration + resourceName := "aws_rds_integration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + CheckDestroy: testAccCheckIntegrationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Integration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIntegrationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Integration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/rds/integration_test.go b/internal/service/rds/integration_test.go index 1c27c95e882d..b28788e02991 100644 --- a/internal/service/rds/integration_test.go +++ b/internal/service/rds/integration_test.go @@ -11,13 +11,8 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfrds "github.com/hashicorp/terraform-provider-aws/internal/service/rds" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -128,70 +123,6 @@ func TestAccRDSIntegration_optional(t *testing.T) { }) } -func TestAccRDSIntegration_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_rds_integration.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), - CheckDestroy: testAccCheckIntegrationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccIntegrationConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccIntegrationConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccIntegrationConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckIntegrationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) @@ -305,9 +236,15 @@ resource "aws_db_subnet_group" "test" { } } +data "aws_rds_engine_version" "test" { + engine = "aurora-mysql" + version = "8.0" + latest = true +} + resource "aws_rds_cluster_parameter_group" "test" { name = %[1]q - family = "aurora-mysql8.0" + family = data.aws_rds_engine_version.test.parameter_group_family dynamic "parameter" { for_each = local.cluster_parameters @@ -321,8 +258,8 @@ resource "aws_rds_cluster_parameter_group" "test" { resource "aws_rds_cluster" "test" { cluster_identifier = %[1]q - engine = "aurora-mysql" - engine_version = "8.0.mysql_aurora.3.05.2" + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual database_name = "test" master_username = "tfacctest" master_password = "avoid-plaintext-passwords" @@ -335,12 +272,20 @@ resource "aws_rds_cluster" "test" { apply_immediately = true } +data "aws_rds_orderable_db_instance" "test" { + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + preferred_instance_classes = [%[2]s] + supports_clusters = true + supports_global_databases = true +} + resource "aws_rds_cluster_instance" "test" { identifier = %[1]q - cluster_identifier = aws_rds_cluster.test.id - instance_class = "db.r6g.large" + cluster_identifier = aws_rds_cluster.test.cluster_identifier engine = aws_rds_cluster.test.engine engine_version = aws_rds_cluster.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class } resource "aws_redshift_cluster" "test" { @@ -358,7 +303,7 @@ resource "aws_redshift_cluster" "test" { publicly_accessible = false encrypted = true } -`, rName)) +`, rName, mainInstanceClasses)) } func testAccIntegrationConfig_base(rName string) string { diff --git a/internal/service/rds/option_group.go b/internal/service/rds/option_group.go index 754ee6f35e0c..821c3163eb9f 100644 --- a/internal/service/rds/option_group.go +++ b/internal/service/rds/option_group.go @@ -229,7 +229,7 @@ func resourceOptionGroupUpdate(ctx context.Context, d *schema.ResourceData, meta input.OptionsToRemove = optionsToRemove } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.ModifyOptionGroup(ctx, input) }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") @@ -252,7 +252,7 @@ func resourceOptionGroupDelete(ctx context.Context, d *schema.ResourceData, meta } log.Printf("[DEBUG] Deleting RDS DB Option Group: %s", d.Id()) - _, err := tfresource.RetryWhenIsA[*types.InvalidOptionGroupStateFault](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.InvalidOptionGroupStateFault](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteOptionGroup(ctx, &rds.DeleteOptionGroupInput{ OptionGroupName: aws.String(d.Id()), }) diff --git a/internal/service/rds/parameter_group.go b/internal/service/rds/parameter_group.go index adc7878d255c..100bd3bae4af 100644 --- a/internal/service/rds/parameter_group.go +++ b/internal/service/rds/parameter_group.go @@ -22,7 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - tfiters "github.com/hashicorp/terraform-provider-aws/internal/iters" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -308,7 +308,7 @@ func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, m input := rds.DeleteDBParameterGroupInput{ DBParameterGroupName: aws.String(d.Id()), } - _, err := tfresource.RetryWhenIsA[*types.InvalidDBParameterGroupStateFault](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.InvalidDBParameterGroupStateFault](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteDBParameterGroup(ctx, &input) }) @@ -466,5 +466,5 @@ parameterLoop: chunks = append(chunks, slices.Chunk(immediate, maxChunkSize)) chunks = append(chunks, slices.Chunk(pendingReboot, maxChunkSize)) - return tfiters.Concat(chunks...) + return tfiter.Concat(chunks...) } diff --git a/internal/service/rds/proxy.go b/internal/service/rds/proxy.go index 5845e8cfd496..07db1160be0b 100644 --- a/internal/service/rds/proxy.go +++ b/internal/service/rds/proxy.go @@ -54,7 +54,7 @@ func resourceProxy() *schema.Resource { }, "auth": { Type: schema.TypeSet, - Required: true, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "auth_scheme": { @@ -94,6 +94,12 @@ func resourceProxy() *schema.Resource { Type: schema.TypeBool, Optional: true, }, + "default_auth_scheme": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.DefaultAuthScheme](), + }, names.AttrEndpoint: { Type: schema.TypeString, Computed: true, @@ -147,7 +153,6 @@ func resourceProxyCreate(ctx context.Context, d *schema.ResourceData, meta any) name := d.Get(names.AttrName).(string) input := &rds.CreateDBProxyInput{ - Auth: expandUserAuthConfigs(d.Get("auth").(*schema.Set).List()), DBProxyName: aws.String(name), EngineFamily: types.EngineFamily(d.Get("engine_family").(string)), RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), @@ -155,10 +160,20 @@ func resourceProxyCreate(ctx context.Context, d *schema.ResourceData, meta any) VpcSubnetIds: flex.ExpandStringValueSet(d.Get("vpc_subnet_ids").(*schema.Set)), } + if v, ok := d.GetOk("auth"); ok && v.(*schema.Set).Len() > 0 { + input.Auth = expandUserAuthConfigs(v.(*schema.Set).List()) + } else { + input.Auth = []types.UserAuthConfig{} + } + if v, ok := d.GetOk("debug_logging"); ok { input.DebugLogging = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("default_auth_scheme"); ok { + input.DefaultAuthScheme = types.DefaultAuthScheme(v.(string)) + } + if v, ok := d.GetOk("idle_client_timeout"); ok { input.IdleClientTimeout = aws.Int32(int32(v.(int))) } @@ -206,6 +221,7 @@ func resourceProxyRead(ctx context.Context, d *schema.ResourceData, meta any) di d.Set("auth", flattenUserAuthConfigInfos(dbProxy.Auth)) d.Set(names.AttrName, dbProxy.DBProxyName) d.Set("debug_logging", dbProxy.DebugLogging) + d.Set("default_auth_scheme", dbProxy.DefaultAuthScheme) d.Set("engine_family", dbProxy.EngineFamily) d.Set("idle_client_timeout", dbProxy.IdleClientTimeout) d.Set("require_tls", dbProxy.RequireTLS) @@ -224,7 +240,6 @@ func resourceProxyUpdate(ctx context.Context, d *schema.ResourceData, meta any) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { oName, nName := d.GetChange(names.AttrName) input := &rds.ModifyDBProxyInput{ - Auth: expandUserAuthConfigs(d.Get("auth").(*schema.Set).List()), DBProxyName: aws.String(oName.(string)), DebugLogging: aws.Bool(d.Get("debug_logging").(bool)), NewDBProxyName: aws.String(nName.(string)), @@ -232,6 +247,16 @@ func resourceProxyUpdate(ctx context.Context, d *schema.ResourceData, meta any) RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), } + if v, ok := d.GetOk("auth"); ok && v.(*schema.Set).Len() > 0 { + input.Auth = expandUserAuthConfigs(v.(*schema.Set).List()) + } else { + input.Auth = []types.UserAuthConfig{} + } + + if v, ok := d.GetOk("default_auth_scheme"); ok { + input.DefaultAuthScheme = types.DefaultAuthScheme(v.(string)) + } + if v, ok := d.GetOk("idle_client_timeout"); ok { input.IdleClientTimeout = aws.Int32(int32(v.(int))) } diff --git a/internal/service/rds/proxy_data_source.go b/internal/service/rds/proxy_data_source.go index 585640e03e9c..c4d8a2a254cf 100644 --- a/internal/service/rds/proxy_data_source.go +++ b/internal/service/rds/proxy_data_source.go @@ -59,6 +59,10 @@ func dataSourceProxy() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "default_auth_scheme": { + Type: schema.TypeString, + Computed: true, + }, names.AttrEndpoint: { Type: schema.TypeString, Computed: true, @@ -116,6 +120,7 @@ func dataSourceProxyRead(ctx context.Context, d *schema.ResourceData, meta any) d.Set(names.AttrARN, dbProxy.DBProxyArn) d.Set("auth", flattenUserAuthConfigInfos(dbProxy.Auth)) d.Set("debug_logging", dbProxy.DebugLogging) + d.Set("default_auth_scheme", dbProxy.DefaultAuthScheme) d.Set(names.AttrEndpoint, dbProxy.Endpoint) d.Set("engine_family", dbProxy.EngineFamily) d.Set("idle_client_timeout", dbProxy.IdleClientTimeout) diff --git a/internal/service/rds/proxy_data_source_test.go b/internal/service/rds/proxy_data_source_test.go index e614974350f3..3987b3a4f4c7 100644 --- a/internal/service/rds/proxy_data_source_test.go +++ b/internal/service/rds/proxy_data_source_test.go @@ -33,6 +33,7 @@ func TestAccRDSProxyDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(dataSourceName, "auth.#", resourceName, "auth.#"), resource.TestCheckResourceAttrPair(dataSourceName, "debug_logging", resourceName, "debug_logging"), + resource.TestCheckResourceAttrPair(dataSourceName, "default_auth_scheme", resourceName, "default_auth_scheme"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrEndpoint, resourceName, names.AttrEndpoint), resource.TestCheckResourceAttrPair(dataSourceName, "engine_family", resourceName, "engine_family"), resource.TestCheckResourceAttrPair(dataSourceName, "idle_client_timeout", resourceName, "idle_client_timeout"), diff --git a/internal/service/rds/proxy_target.go b/internal/service/rds/proxy_target.go index 0a2e494e7817..c2abd2075952 100644 --- a/internal/service/rds/proxy_target.go +++ b/internal/service/rds/proxy_target.go @@ -118,8 +118,8 @@ func resourceProxyTargetCreate(ctx context.Context, d *schema.ResourceData, meta const ( timeout = 5 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidDBInstanceStateFault](ctx, timeout, - func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.InvalidDBInstanceStateFault](ctx, timeout, + func(ctx context.Context) (any, error) { return conn.RegisterDBProxyTargets(ctx, input) }, "CREATING") diff --git a/internal/service/rds/proxy_test.go b/internal/service/rds/proxy_test.go index 884f83084b82..81262141b61a 100644 --- a/internal/service/rds/proxy_test.go +++ b/internal/service/rds/proxy_test.go @@ -48,11 +48,12 @@ func TestAccRDSProxy_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "auth.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "auth.*", map[string]string{ "auth_scheme": "SECRETS", - "client_password_auth_type": "MYSQL_NATIVE_PASSWORD", + "client_password_auth_type": "MYSQL_CACHING_SHA2_PASSWORD", names.AttrDescription: "test", "iam_auth": "DISABLED", }), resource.TestCheckResourceAttr(resourceName, "debug_logging", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "default_auth_scheme", string(types.DefaultAuthSchemeNone)), resource.TestMatchResourceAttr(resourceName, names.AttrEndpoint, regexache.MustCompile(`^[\w\-\.]+\.rds\.amazonaws\.com$`)), resource.TestCheckResourceAttr(resourceName, "idle_client_timeout", "1800"), resource.TestCheckResourceAttr(resourceName, "require_tls", acctest.CtTrue), @@ -446,6 +447,97 @@ func TestAccRDSProxy_authSecretARN(t *testing.T) { }) } +func TestAccRDSProxy_defaultAuthScheme(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v types.DBProxy + resourceName := "aws_db_proxy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccDBProxyPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProxyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProxyConfig_defaultAuthSchemeIAMAUTH(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProxyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "engine_family", "MYSQL"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "rds", regexache.MustCompile(`db-proxy:.+`)), + resource.TestCheckResourceAttr(resourceName, "auth.#", "0"), + resource.TestCheckResourceAttr(resourceName, "debug_logging", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "default_auth_scheme", string(types.DefaultAuthSchemeIamAuth)), + resource.TestMatchResourceAttr(resourceName, names.AttrEndpoint, regexache.MustCompile(`^[\w\-\.]+\.rds\.amazonaws\.com$`)), + resource.TestCheckResourceAttr(resourceName, "idle_client_timeout", "1800"), + resource.TestCheckResourceAttr(resourceName, "require_tls", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, "vpc_subnet_ids.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "vpc_subnet_ids.*", "aws_subnet.test.0", names.AttrID), + resource.TestCheckTypeSetElemAttrPair(resourceName, "vpc_subnet_ids.*", "aws_subnet.test.1", names.AttrID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccProxyConfig_defaultAuthSchemeNONE(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProxyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "engine_family", "MYSQL"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "rds", regexache.MustCompile(`db-proxy:.+`)), + resource.TestCheckResourceAttr(resourceName, "auth.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "auth.*", map[string]string{ + "auth_scheme": "SECRETS", + "client_password_auth_type": "MYSQL_CACHING_SHA2_PASSWORD", + names.AttrDescription: "test", + "iam_auth": "DISABLED", + }), + resource.TestCheckResourceAttr(resourceName, "debug_logging", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "default_auth_scheme", string(types.DefaultAuthSchemeNone)), + resource.TestMatchResourceAttr(resourceName, names.AttrEndpoint, regexache.MustCompile(`^[\w\-\.]+\.rds\.amazonaws\.com$`)), + resource.TestCheckResourceAttr(resourceName, "idle_client_timeout", "1800"), + resource.TestCheckResourceAttr(resourceName, "require_tls", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, "vpc_subnet_ids.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "vpc_subnet_ids.*", "aws_subnet.test.0", names.AttrID), + resource.TestCheckTypeSetElemAttrPair(resourceName, "vpc_subnet_ids.*", "aws_subnet.test.1", names.AttrID), + ), + }, + { + Config: testAccProxyConfig_defaultAuthSchemeIAMAUTH(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProxyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "engine_family", "MYSQL"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "rds", regexache.MustCompile(`db-proxy:.+`)), + resource.TestCheckResourceAttr(resourceName, "auth.#", "0"), + resource.TestCheckResourceAttr(resourceName, "debug_logging", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "default_auth_scheme", string(types.DefaultAuthSchemeIamAuth)), + resource.TestMatchResourceAttr(resourceName, names.AttrEndpoint, regexache.MustCompile(`^[\w\-\.]+\.rds\.amazonaws\.com$`)), + resource.TestCheckResourceAttr(resourceName, "idle_client_timeout", "1800"), + resource.TestCheckResourceAttr(resourceName, "require_tls", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, "vpc_subnet_ids.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "vpc_subnet_ids.*", "aws_subnet.test.0", names.AttrID), + resource.TestCheckTypeSetElemAttrPair(resourceName, "vpc_subnet_ids.*", "aws_subnet.test.1", names.AttrID), + ), + }, + }, + }) +} + func TestAccRDSProxy_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -955,6 +1047,57 @@ resource "aws_secretsmanager_secret_version" "test2" { `, rName, nName)) } +func testAccProxyConfig_defaultAuthSchemeIAMAUTH(rName string) string { + return acctest.ConfigCompose(testAccProxyConfig_base(rName), fmt.Sprintf(` +resource "aws_db_proxy" "test" { + depends_on = [ + aws_secretsmanager_secret_version.test, + aws_iam_role_policy.test + ] + + name = %[1]q + debug_logging = false + engine_family = "MYSQL" + idle_client_timeout = 1800 + require_tls = true + role_arn = aws_iam_role.test.arn + vpc_security_group_ids = [aws_security_group.test.id] + vpc_subnet_ids = aws_subnet.test[*].id + + default_auth_scheme = "IAM_AUTH" +} +`, rName)) +} + +func testAccProxyConfig_defaultAuthSchemeNONE(rName string) string { + return acctest.ConfigCompose(testAccProxyConfig_base(rName), fmt.Sprintf(` +resource "aws_db_proxy" "test" { + depends_on = [ + aws_secretsmanager_secret_version.test, + aws_iam_role_policy.test + ] + + name = %[1]q + debug_logging = false + engine_family = "MYSQL" + idle_client_timeout = 1800 + require_tls = true + role_arn = aws_iam_role.test.arn + vpc_security_group_ids = [aws_security_group.test.id] + vpc_subnet_ids = aws_subnet.test[*].id + + auth { + auth_scheme = "SECRETS" + description = "test" + iam_auth = "DISABLED" + secret_arn = aws_secretsmanager_secret.test.arn + } + + default_auth_scheme = "NONE" +} +`, rName)) +} + func testAccProxyConfig_tags1(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose(testAccProxyConfig_base(rName), fmt.Sprintf(` resource "aws_db_proxy" "test" { diff --git a/internal/service/rds/service_endpoint_resolver_gen.go b/internal/service/rds/service_endpoint_resolver_gen.go index 486582c554c8..99451257bcad 100644 --- a/internal/service/rds/service_endpoint_resolver_gen.go +++ b/internal/service/rds/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params rds.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up rds endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up rds endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/rds/service_endpoints_gen_test.go b/internal/service/rds/service_endpoints_gen_test.go index 9cd63a012820..e6eaf5d924dd 100644 --- a/internal/service/rds/service_endpoints_gen_test.go +++ b/internal/service/rds/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/rds/service_package_gen.go b/internal/service/rds/service_package_gen.go index 2994ece9e4c8..d8440111bba7 100644 --- a/internal/service/rds/service_package_gen.go +++ b/internal/service/rds/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/rds" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -26,6 +25,15 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S Name: "Cluster Parameter Group", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newDataSourceGlobalCluster, + TypeName: "aws_rds_global_cluster", + Name: "Global Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, } } @@ -399,7 +407,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *rds.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/rds/subnet_group.go b/internal/service/rds/subnet_group.go index 59aac340a1df..6d6585687fbc 100644 --- a/internal/service/rds/subnet_group.go +++ b/internal/service/rds/subnet_group.go @@ -175,7 +175,7 @@ func resourceSubnetGroupDelete(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "deleting RDS Subnet Group (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, 3*time.Minute, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, 3*time.Minute, func(ctx context.Context) (any, error) { return findDBSubnetGroupByName(ctx, conn, d.Id()) }) diff --git a/internal/service/rds/tags_gen.go b/internal/service/rds/tags_gen.go index fea0b40d2787..86c9d4ec1978 100644 --- a/internal/service/rds/tags_gen.go +++ b/internal/service/rds/tags_gen.go @@ -3,8 +3,8 @@ package rds import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rds" awstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *rds.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).RDSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *rds.Client, identifier string, oldTag _, err := conn.RemoveTagsFromResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *rds.Client, identifier string, oldTag _, err := conn.AddTagsToResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/rds/testdata/Certificate/basic_v5.100.0/main_gen.tf b/internal/service/rds/testdata/Certificate/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..22a553b4ee61 --- /dev/null +++ b/internal/service/rds/testdata/Certificate/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_rds_certificate" "test" { + certificate_identifier = "rds-ca-rsa4096-g1" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/rds/testdata/Certificate/basic_v6.0.0/main_gen.tf b/internal/service/rds/testdata/Certificate/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..dddc719710ab --- /dev/null +++ b/internal/service/rds/testdata/Certificate/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_rds_certificate" "test" { + certificate_identifier = "rds-ca-rsa4096-g1" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/rds/testdata/Integration/basic/main_gen.tf b/internal/service/rds/testdata/Integration/basic/main_gen.tf index fa170ceace05..d4d55036cda7 100644 --- a/internal/service/rds/testdata/Integration/basic/main_gen.tf +++ b/internal/service/rds/testdata/Integration/basic/main_gen.tf @@ -154,9 +154,15 @@ resource "aws_db_subnet_group" "test" { subnet_ids = aws_subnet.test[*].id } +data "aws_rds_engine_version" "test" { + engine = "aurora-mysql" + version = "8.0" + latest = true +} + resource "aws_rds_cluster_parameter_group" "test" { name = var.rName - family = "aurora-mysql8.0" + family = data.aws_rds_engine_version.test.parameter_group_family dynamic "parameter" { for_each = local.cluster_parameters @@ -170,8 +176,8 @@ resource "aws_rds_cluster_parameter_group" "test" { resource "aws_rds_cluster" "test" { cluster_identifier = var.rName - engine = "aurora-mysql" - engine_version = "8.0.mysql_aurora.3.05.2" + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual database_name = "test" master_username = "tfacctest" master_password = "avoid-plaintext-passwords" @@ -184,12 +190,20 @@ resource "aws_rds_cluster" "test" { apply_immediately = true } +data "aws_rds_orderable_db_instance" "test" { + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + preferred_instance_classes = local.mainInstanceClasses + supports_clusters = true + supports_global_databases = true +} + resource "aws_rds_cluster_instance" "test" { identifier = var.rName - cluster_identifier = aws_rds_cluster.test.id - instance_class = "db.r6g.large" + cluster_identifier = aws_rds_cluster.test.cluster_identifier engine = aws_rds_cluster.test.engine engine_version = aws_rds_cluster.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class } resource "aws_redshift_cluster" "test" { @@ -202,7 +216,9 @@ resource "aws_redshift_cluster" "test" { cluster_type = "single-node" skip_final_snapshot = true - availability_zone_relocation_enabled = false + availability_zone_relocation_enabled = true + publicly_accessible = false + encrypted = true } # acctest.ConfigVPCWithSubnets(rName, 3) @@ -235,6 +251,60 @@ locals { default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } +locals { + mainInstanceClasses = [ + "db.t4g.micro", + "db.t3.micro", + "db.t4g.small", + "db.t3.small", + "db.t4g.medium", + "db.t3.medium", + "db.t4g.large", + "db.t3.large", + "db.m6g.large", + "db.m7g.large", + "db.m5.large", + "db.m6i.large", + "db.m6gd.large", + "db.m5d.large", + "db.r6g.large", + "db.m6id.large", + "db.r7g.large", + "db.r5.large", + "db.r6i.large", + "db.r6gd.large", + "db.m6in.large", + "db.t4g.xlarge", + "db.t3.xlarge", + "db.r5d.large", + "db.m6idn.large", + "db.r5b.large", + "db.r6id.large", + "db.m6g.xlarge", + "db.x2g.large", + "db.m7g.xlarge", + "db.m5.xlarge", + "db.m6i.xlarge", + "db.r6in.large", + "db.m6gd.xlarge", + "db.r6idn.large", + "db.m5d.xlarge", + "db.r6g.xlarge", + "db.m6id.xlarge", + "db.r7g.xlarge", + "db.r5.xlarge", + "db.r6i.xlarge", + "db.r6gd.xlarge", + "db.m6in.xlarge", + "db.t4g.2xlarge", + "db.t3.2xlarge", + "db.r5d.xlarge", + "db.m6idn.xlarge", + "db.r5b.xlarge", + "db.r6id.xlarge", + ] +} + variable "rName" { description = "Name for resource" type = string diff --git a/internal/service/rds/testdata/Integration/basic_v5.100.0/main_gen.tf b/internal/service/rds/testdata/Integration/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..fb46b3f427fa --- /dev/null +++ b/internal/service/rds/testdata/Integration/basic_v5.100.0/main_gen.tf @@ -0,0 +1,322 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_rds_integration" "test" { + integration_name = var.rName + source_arn = aws_rds_cluster.test.arn + target_arn = aws_redshiftserverless_namespace.test.arn + + depends_on = [ + aws_rds_cluster.test, + aws_rds_cluster_instance.test, + aws_redshiftserverless_namespace.test, + aws_redshiftserverless_workgroup.test, + aws_redshift_resource_policy.test, + ] +} + +# testAccIntegrationConfig_base + +resource "aws_redshiftserverless_namespace" "test" { + namespace_name = var.rName +} + +resource "aws_redshiftserverless_workgroup" "test" { + namespace_name = aws_redshiftserverless_namespace.test.namespace_name + workgroup_name = var.rName + base_capacity = 8 + + publicly_accessible = false + subnet_ids = aws_subnet.test[*].id + + config_parameter { + parameter_key = "enable_case_sensitive_identifier" + parameter_value = "true" + } + config_parameter { + parameter_key = "auto_mv" + parameter_value = "true" + } + config_parameter { + parameter_key = "datestyle" + parameter_value = "ISO, MDY" + } + config_parameter { + parameter_key = "enable_user_activity_logging" + parameter_value = "true" + } + config_parameter { + parameter_key = "max_query_execution_time" + parameter_value = "14400" + } + config_parameter { + parameter_key = "query_group" + parameter_value = "default" + } + config_parameter { + parameter_key = "require_ssl" + parameter_value = "true" + } + config_parameter { + parameter_key = "search_path" + parameter_value = "$user, public" + } + config_parameter { + parameter_key = "use_fips_ssl" + parameter_value = "false" + } +} + +# The "aws_redshiftserverless_resource_policy" resource doesn't support the following action types. +# Therefore we need to use the "aws_redshift_resource_policy" resource for RedShift-serverless instead. +resource "aws_redshift_resource_policy" "test" { + resource_arn = aws_redshiftserverless_namespace.test.arn + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Principal = { + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" + } + Action = "redshift:CreateInboundIntegration" + Resource = aws_redshiftserverless_namespace.test.arn + }, { + Effect = "Allow" + Principal = { + Service = "redshift.amazonaws.com" + } + Action = "redshift:AuthorizeInboundIntegration" + Resource = aws_redshiftserverless_namespace.test.arn + Condition = { + StringEquals = { + "aws:SourceArn" = aws_rds_cluster.test.arn + } + } + }] + }) +} + +# testAccIntegrationConfig_baseClusterWithInstance + +locals { + cluster_parameters = { + "binlog_replication_globaldb" = { + value = "0" + apply_method = "pending-reboot" + }, + "binlog_format" = { + value = "ROW" + apply_method = "pending-reboot" + }, + "binlog_row_metadata" = { + value = "full" + apply_method = "immediate" + }, + "binlog_row_image" = { + value = "full" + apply_method = "immediate" + }, + "aurora_enhanced_binlog" = { + value = "1" + apply_method = "pending-reboot" + }, + "binlog_backup" = { + value = "0" + apply_method = "pending-reboot" + }, + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + protocol = -1 + self = true + from_port = 0 + to_port = 0 + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_db_subnet_group" "test" { + name = var.rName + subnet_ids = aws_subnet.test[*].id +} + +data "aws_rds_engine_version" "test" { + engine = "aurora-mysql" + version = "8.0" + latest = true +} + +resource "aws_rds_cluster_parameter_group" "test" { + name = var.rName + family = data.aws_rds_engine_version.test.parameter_group_family + + dynamic "parameter" { + for_each = local.cluster_parameters + content { + name = parameter.key + value = parameter.value["value"] + apply_method = parameter.value["apply_method"] + } + } +} + +resource "aws_rds_cluster" "test" { + cluster_identifier = var.rName + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + database_name = "test" + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + + vpc_security_group_ids = [aws_security_group.test.id] + db_subnet_group_name = aws_db_subnet_group.test.name + db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.test.name + + apply_immediately = true +} + +data "aws_rds_orderable_db_instance" "test" { + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + preferred_instance_classes = local.mainInstanceClasses + supports_clusters = true + supports_global_databases = true +} + +resource "aws_rds_cluster_instance" "test" { + identifier = var.rName + cluster_identifier = aws_rds_cluster.test.cluster_identifier + engine = aws_rds_cluster.test.engine + engine_version = aws_rds_cluster.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class +} + +resource "aws_redshift_cluster" "test" { + cluster_identifier = var.rName + availability_zone = data.aws_availability_zones.available.names[0] + database_name = "mydb" + master_username = "foo" + master_password = "Mustbe8characters" + node_type = "ra3.large" + cluster_type = "single-node" + skip_final_snapshot = true + + availability_zone_relocation_enabled = true + publicly_accessible = false + encrypted = true +} + +# acctest.ConfigVPCWithSubnets(rName, 3) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 3 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +locals { + mainInstanceClasses = [ + "db.t4g.micro", + "db.t3.micro", + "db.t4g.small", + "db.t3.small", + "db.t4g.medium", + "db.t3.medium", + "db.t4g.large", + "db.t3.large", + "db.m6g.large", + "db.m7g.large", + "db.m5.large", + "db.m6i.large", + "db.m6gd.large", + "db.m5d.large", + "db.r6g.large", + "db.m6id.large", + "db.r7g.large", + "db.r5.large", + "db.r6i.large", + "db.r6gd.large", + "db.m6in.large", + "db.t4g.xlarge", + "db.t3.xlarge", + "db.r5d.large", + "db.m6idn.large", + "db.r5b.large", + "db.r6id.large", + "db.m6g.xlarge", + "db.x2g.large", + "db.m7g.xlarge", + "db.m5.xlarge", + "db.m6i.xlarge", + "db.r6in.large", + "db.m6gd.xlarge", + "db.r6idn.large", + "db.m5d.xlarge", + "db.r6g.xlarge", + "db.m6id.xlarge", + "db.r7g.xlarge", + "db.r5.xlarge", + "db.r6i.xlarge", + "db.r6gd.xlarge", + "db.m6in.xlarge", + "db.t4g.2xlarge", + "db.t3.2xlarge", + "db.r5d.xlarge", + "db.m6idn.xlarge", + "db.r5b.xlarge", + "db.r6id.xlarge", + ] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/rds/testdata/Integration/basic_v6.0.0/main_gen.tf b/internal/service/rds/testdata/Integration/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..e6e65a9413b9 --- /dev/null +++ b/internal/service/rds/testdata/Integration/basic_v6.0.0/main_gen.tf @@ -0,0 +1,322 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_rds_integration" "test" { + integration_name = var.rName + source_arn = aws_rds_cluster.test.arn + target_arn = aws_redshiftserverless_namespace.test.arn + + depends_on = [ + aws_rds_cluster.test, + aws_rds_cluster_instance.test, + aws_redshiftserverless_namespace.test, + aws_redshiftserverless_workgroup.test, + aws_redshift_resource_policy.test, + ] +} + +# testAccIntegrationConfig_base + +resource "aws_redshiftserverless_namespace" "test" { + namespace_name = var.rName +} + +resource "aws_redshiftserverless_workgroup" "test" { + namespace_name = aws_redshiftserverless_namespace.test.namespace_name + workgroup_name = var.rName + base_capacity = 8 + + publicly_accessible = false + subnet_ids = aws_subnet.test[*].id + + config_parameter { + parameter_key = "enable_case_sensitive_identifier" + parameter_value = "true" + } + config_parameter { + parameter_key = "auto_mv" + parameter_value = "true" + } + config_parameter { + parameter_key = "datestyle" + parameter_value = "ISO, MDY" + } + config_parameter { + parameter_key = "enable_user_activity_logging" + parameter_value = "true" + } + config_parameter { + parameter_key = "max_query_execution_time" + parameter_value = "14400" + } + config_parameter { + parameter_key = "query_group" + parameter_value = "default" + } + config_parameter { + parameter_key = "require_ssl" + parameter_value = "true" + } + config_parameter { + parameter_key = "search_path" + parameter_value = "$user, public" + } + config_parameter { + parameter_key = "use_fips_ssl" + parameter_value = "false" + } +} + +# The "aws_redshiftserverless_resource_policy" resource doesn't support the following action types. +# Therefore we need to use the "aws_redshift_resource_policy" resource for RedShift-serverless instead. +resource "aws_redshift_resource_policy" "test" { + resource_arn = aws_redshiftserverless_namespace.test.arn + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Principal = { + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" + } + Action = "redshift:CreateInboundIntegration" + Resource = aws_redshiftserverless_namespace.test.arn + }, { + Effect = "Allow" + Principal = { + Service = "redshift.amazonaws.com" + } + Action = "redshift:AuthorizeInboundIntegration" + Resource = aws_redshiftserverless_namespace.test.arn + Condition = { + StringEquals = { + "aws:SourceArn" = aws_rds_cluster.test.arn + } + } + }] + }) +} + +# testAccIntegrationConfig_baseClusterWithInstance + +locals { + cluster_parameters = { + "binlog_replication_globaldb" = { + value = "0" + apply_method = "pending-reboot" + }, + "binlog_format" = { + value = "ROW" + apply_method = "pending-reboot" + }, + "binlog_row_metadata" = { + value = "full" + apply_method = "immediate" + }, + "binlog_row_image" = { + value = "full" + apply_method = "immediate" + }, + "aurora_enhanced_binlog" = { + value = "1" + apply_method = "pending-reboot" + }, + "binlog_backup" = { + value = "0" + apply_method = "pending-reboot" + }, + } +} + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_security_group" "test" { + name = var.rName + vpc_id = aws_vpc.test.id + + ingress { + protocol = -1 + self = true + from_port = 0 + to_port = 0 + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_db_subnet_group" "test" { + name = var.rName + subnet_ids = aws_subnet.test[*].id +} + +data "aws_rds_engine_version" "test" { + engine = "aurora-mysql" + version = "8.0" + latest = true +} + +resource "aws_rds_cluster_parameter_group" "test" { + name = var.rName + family = data.aws_rds_engine_version.test.parameter_group_family + + dynamic "parameter" { + for_each = local.cluster_parameters + content { + name = parameter.key + value = parameter.value["value"] + apply_method = parameter.value["apply_method"] + } + } +} + +resource "aws_rds_cluster" "test" { + cluster_identifier = var.rName + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + database_name = "test" + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + + vpc_security_group_ids = [aws_security_group.test.id] + db_subnet_group_name = aws_db_subnet_group.test.name + db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.test.name + + apply_immediately = true +} + +data "aws_rds_orderable_db_instance" "test" { + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + preferred_instance_classes = local.mainInstanceClasses + supports_clusters = true + supports_global_databases = true +} + +resource "aws_rds_cluster_instance" "test" { + identifier = var.rName + cluster_identifier = aws_rds_cluster.test.cluster_identifier + engine = aws_rds_cluster.test.engine + engine_version = aws_rds_cluster.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class +} + +resource "aws_redshift_cluster" "test" { + cluster_identifier = var.rName + availability_zone = data.aws_availability_zones.available.names[0] + database_name = "mydb" + master_username = "foo" + master_password = "Mustbe8characters" + node_type = "ra3.large" + cluster_type = "single-node" + skip_final_snapshot = true + + availability_zone_relocation_enabled = true + publicly_accessible = false + encrypted = true +} + +# acctest.ConfigVPCWithSubnets(rName, 3) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 3 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +locals { + mainInstanceClasses = [ + "db.t4g.micro", + "db.t3.micro", + "db.t4g.small", + "db.t3.small", + "db.t4g.medium", + "db.t3.medium", + "db.t4g.large", + "db.t3.large", + "db.m6g.large", + "db.m7g.large", + "db.m5.large", + "db.m6i.large", + "db.m6gd.large", + "db.m5d.large", + "db.r6g.large", + "db.m6id.large", + "db.r7g.large", + "db.r5.large", + "db.r6i.large", + "db.r6gd.large", + "db.m6in.large", + "db.t4g.xlarge", + "db.t3.xlarge", + "db.r5d.large", + "db.m6idn.large", + "db.r5b.large", + "db.r6id.large", + "db.m6g.xlarge", + "db.x2g.large", + "db.m7g.xlarge", + "db.m5.xlarge", + "db.m6i.xlarge", + "db.r6in.large", + "db.m6gd.xlarge", + "db.r6idn.large", + "db.m5d.xlarge", + "db.r6g.xlarge", + "db.m6id.xlarge", + "db.r7g.xlarge", + "db.r5.xlarge", + "db.r6i.xlarge", + "db.r6gd.xlarge", + "db.m6in.xlarge", + "db.t4g.2xlarge", + "db.t3.2xlarge", + "db.r5d.xlarge", + "db.m6idn.xlarge", + "db.r5b.xlarge", + "db.r6id.xlarge", + ] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/rds/testdata/Integration/region_override/main_gen.tf b/internal/service/rds/testdata/Integration/region_override/main_gen.tf index 78b03058dbba..c384d181f27c 100644 --- a/internal/service/rds/testdata/Integration/region_override/main_gen.tf +++ b/internal/service/rds/testdata/Integration/region_override/main_gen.tf @@ -166,11 +166,19 @@ resource "aws_db_subnet_group" "test" { subnet_ids = aws_subnet.test[*].id } +data "aws_rds_engine_version" "test" { + region = var.region + + engine = "aurora-mysql" + version = "8.0" + latest = true +} + resource "aws_rds_cluster_parameter_group" "test" { region = var.region name = var.rName - family = "aurora-mysql8.0" + family = data.aws_rds_engine_version.test.parameter_group_family dynamic "parameter" { for_each = local.cluster_parameters @@ -186,8 +194,8 @@ resource "aws_rds_cluster" "test" { region = var.region cluster_identifier = var.rName - engine = "aurora-mysql" - engine_version = "8.0.mysql_aurora.3.05.2" + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual database_name = "test" master_username = "tfacctest" master_password = "avoid-plaintext-passwords" @@ -200,14 +208,24 @@ resource "aws_rds_cluster" "test" { apply_immediately = true } +data "aws_rds_orderable_db_instance" "test" { + region = var.region + + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + preferred_instance_classes = local.mainInstanceClasses + supports_clusters = true + supports_global_databases = true +} + resource "aws_rds_cluster_instance" "test" { region = var.region identifier = var.rName - cluster_identifier = aws_rds_cluster.test.id - instance_class = "db.r6g.large" + cluster_identifier = aws_rds_cluster.test.cluster_identifier engine = aws_rds_cluster.test.engine engine_version = aws_rds_cluster.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class } resource "aws_redshift_cluster" "test" { @@ -222,7 +240,9 @@ resource "aws_redshift_cluster" "test" { cluster_type = "single-node" skip_final_snapshot = true - availability_zone_relocation_enabled = false + availability_zone_relocation_enabled = true + publicly_accessible = false + encrypted = true } # acctest.ConfigVPCWithSubnets(rName, 3) @@ -261,6 +281,60 @@ locals { default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } +locals { + mainInstanceClasses = [ + "db.t4g.micro", + "db.t3.micro", + "db.t4g.small", + "db.t3.small", + "db.t4g.medium", + "db.t3.medium", + "db.t4g.large", + "db.t3.large", + "db.m6g.large", + "db.m7g.large", + "db.m5.large", + "db.m6i.large", + "db.m6gd.large", + "db.m5d.large", + "db.r6g.large", + "db.m6id.large", + "db.r7g.large", + "db.r5.large", + "db.r6i.large", + "db.r6gd.large", + "db.m6in.large", + "db.t4g.xlarge", + "db.t3.xlarge", + "db.r5d.large", + "db.m6idn.large", + "db.r5b.large", + "db.r6id.large", + "db.m6g.xlarge", + "db.x2g.large", + "db.m7g.xlarge", + "db.m5.xlarge", + "db.m6i.xlarge", + "db.r6in.large", + "db.m6gd.xlarge", + "db.r6idn.large", + "db.m5d.xlarge", + "db.r6g.xlarge", + "db.m6id.xlarge", + "db.r7g.xlarge", + "db.r5.xlarge", + "db.r6i.xlarge", + "db.r6gd.xlarge", + "db.m6in.xlarge", + "db.t4g.2xlarge", + "db.t3.2xlarge", + "db.r5d.xlarge", + "db.m6idn.xlarge", + "db.r5b.xlarge", + "db.r6id.xlarge", + ] +} + variable "rName" { description = "Name for resource" type = string diff --git a/internal/service/rds/testdata/tmpl/integration_tags.gtpl b/internal/service/rds/testdata/tmpl/integration_tags.gtpl index 0c4b13235877..af774d9657ed 100644 --- a/internal/service/rds/testdata/tmpl/integration_tags.gtpl +++ b/internal/service/rds/testdata/tmpl/integration_tags.gtpl @@ -158,10 +158,17 @@ resource "aws_db_subnet_group" "test" { subnet_ids = aws_subnet.test[*].id } +data "aws_rds_engine_version" "test" { +{{- template "region" }} + engine = "aurora-mysql" + version = "8.0" + latest = true +} + resource "aws_rds_cluster_parameter_group" "test" { {{- template "region" }} name = var.rName - family = "aurora-mysql8.0" + family = data.aws_rds_engine_version.test.parameter_group_family dynamic "parameter" { for_each = local.cluster_parameters @@ -176,8 +183,8 @@ resource "aws_rds_cluster_parameter_group" "test" { resource "aws_rds_cluster" "test" { {{- template "region" }} cluster_identifier = var.rName - engine = "aurora-mysql" - engine_version = "8.0.mysql_aurora.3.05.2" + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual database_name = "test" master_username = "tfacctest" master_password = "avoid-plaintext-passwords" @@ -190,13 +197,22 @@ resource "aws_rds_cluster" "test" { apply_immediately = true } +data "aws_rds_orderable_db_instance" "test" { +{{- template "region" }} + engine = data.aws_rds_engine_version.test.engine + engine_version = data.aws_rds_engine_version.test.version_actual + preferred_instance_classes = local.mainInstanceClasses + supports_clusters = true + supports_global_databases = true +} + resource "aws_rds_cluster_instance" "test" { {{- template "region" }} identifier = var.rName - cluster_identifier = aws_rds_cluster.test.id - instance_class = "db.r6g.large" + cluster_identifier = aws_rds_cluster.test.cluster_identifier engine = aws_rds_cluster.test.engine engine_version = aws_rds_cluster.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class } resource "aws_redshift_cluster" "test" { @@ -210,7 +226,63 @@ resource "aws_redshift_cluster" "test" { cluster_type = "single-node" skip_final_snapshot = true - availability_zone_relocation_enabled = false + availability_zone_relocation_enabled = true + publicly_accessible = false + encrypted = true } {{ template "acctest.ConfigVPCWithSubnets" 3 }} + +locals { + mainInstanceClasses = [ + "db.t4g.micro", + "db.t3.micro", + "db.t4g.small", + "db.t3.small", + "db.t4g.medium", + "db.t3.medium", + "db.t4g.large", + "db.t3.large", + "db.m6g.large", + "db.m7g.large", + "db.m5.large", + "db.m6i.large", + "db.m6gd.large", + "db.m5d.large", + "db.r6g.large", + "db.m6id.large", + "db.r7g.large", + "db.r5.large", + "db.r6i.large", + "db.r6gd.large", + "db.m6in.large", + "db.t4g.xlarge", + "db.t3.xlarge", + "db.r5d.large", + "db.m6idn.large", + "db.r5b.large", + "db.r6id.large", + "db.m6g.xlarge", + "db.x2g.large", + "db.m7g.xlarge", + "db.m5.xlarge", + "db.m6i.xlarge", + "db.r6in.large", + "db.m6gd.xlarge", + "db.r6idn.large", + "db.m5d.xlarge", + "db.r6g.xlarge", + "db.m6id.xlarge", + "db.r7g.xlarge", + "db.r5.xlarge", + "db.r6i.xlarge", + "db.r6gd.xlarge", + "db.m6in.xlarge", + "db.t4g.2xlarge", + "db.t3.2xlarge", + "db.r5d.xlarge", + "db.m6idn.xlarge", + "db.r5b.xlarge", + "db.r6id.xlarge", + ] +} diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 9231f874f120..091b59400ab7 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -858,8 +858,8 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any ClusterIdentifier: aws.String(d.Id()), } - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidClusterStateFault](ctx, clusterInvalidClusterStateFaultTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidClusterStateFault](ctx, clusterInvalidClusterStateFaultTimeout, + func(ctx context.Context) (any, error) { return conn.RebootCluster(ctx, input) }) @@ -954,8 +954,8 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any } log.Printf("[DEBUG] Deleting Redshift Cluster: %s", d.Id()) - _, err := tfresource.RetryWhenIsA[*awstypes.InvalidClusterStateFault](ctx, clusterInvalidClusterStateFaultTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidClusterStateFault](ctx, clusterInvalidClusterStateFaultTimeout, + func(ctx context.Context) (any, error) { return conn.DeleteCluster(ctx, input) }) diff --git a/internal/service/redshift/logging.go b/internal/service/redshift/logging.go index 49ee9db71e8c..ea931a2ee565 100644 --- a/internal/service/redshift/logging.go +++ b/internal/service/redshift/logging.go @@ -94,8 +94,8 @@ func (r *loggingResource) Create(ctx context.Context, req resource.CreateRequest // Retry InvalidClusterState faults, which can occur when logging is enabled // immediately after being disabled (ie. resource replacement). - out, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidClusterStateFault](ctx, propagationTimeout, - func() (any, error) { + out, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidClusterStateFault](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.EnableLogging(ctx, in) }, "There is an operation running on the Cluster", @@ -171,8 +171,8 @@ func (r *loggingResource) Update(ctx context.Context, req resource.UpdateRequest // Retry InvalidClusterState faults, which can occur when logging is enabled // immediately after being disabled (ie. resource replacement). - out, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidClusterStateFault](ctx, propagationTimeout, - func() (any, error) { + out, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidClusterStateFault](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.EnableLogging(ctx, in) }, "There is an operation running on the Cluster", @@ -210,8 +210,8 @@ func (r *loggingResource) Delete(ctx context.Context, req resource.DeleteRequest } // Retry InvalidClusterState faults, which can occur when logging is being enabled. - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidClusterStateFault](ctx, propagationTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidClusterStateFault](ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.DisableLogging(ctx, in) }, "There is an operation running on the Cluster", diff --git a/internal/service/redshift/scheduled_action.go b/internal/service/redshift/scheduled_action.go index 87f2bc9a8c94..1577a2f2c20c 100644 --- a/internal/service/redshift/scheduled_action.go +++ b/internal/service/redshift/scheduled_action.go @@ -184,7 +184,7 @@ func resourceScheduledActionCreate(ctx context.Context, d *schema.ResourceData, log.Printf("[DEBUG] Creating Redshift Scheduled Action: %#v", input) outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateScheduledAction(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/redshift/service_endpoint_resolver_gen.go b/internal/service/redshift/service_endpoint_resolver_gen.go index e58256a51949..897e9ee5cc44 100644 --- a/internal/service/redshift/service_endpoint_resolver_gen.go +++ b/internal/service/redshift/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params redshift.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up redshift endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up redshift endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/redshift/service_endpoints_gen_test.go b/internal/service/redshift/service_endpoints_gen_test.go index aebf72d90c98..13aeb1d8c9ad 100644 --- a/internal/service/redshift/service_endpoints_gen_test.go +++ b/internal/service/redshift/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/redshift/service_package_gen.go b/internal/service/redshift/service_package_gen.go index 7ed7bf02de08..f5bfe9ca75e6 100644 --- a/internal/service/redshift/service_package_gen.go +++ b/internal/service/redshift/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/redshift" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -270,7 +269,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *redshift.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/redshift/snapshot_copy_grant.go b/internal/service/redshift/snapshot_copy_grant.go index 0e690c1161d7..7d4527eec6ee 100644 --- a/internal/service/redshift/snapshot_copy_grant.go +++ b/internal/service/redshift/snapshot_copy_grant.go @@ -81,7 +81,7 @@ func resourceSnapshotCopyGrantCreate(ctx context.Context, d *schema.ResourceData d.SetId(name) - _, err = tfresource.RetryWhenNotFound(ctx, 3*time.Minute, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, 3*time.Minute, func(ctx context.Context) (any, error) { return findSnapshotCopyGrantByName(ctx, conn, d.Id()) }) @@ -149,7 +149,7 @@ func resourceSnapshotCopyGrantDelete(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "deleting Redshift Snapshot Copy Grant (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, 3*time.Minute, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, 3*time.Minute, func(ctx context.Context) (any, error) { return findSnapshotCopyGrantByName(ctx, conn, d.Id()) }) diff --git a/internal/service/redshift/sweep.go b/internal/service/redshift/sweep.go index ca727a2c5ac1..ae593fde87db 100644 --- a/internal/service/redshift/sweep.go +++ b/internal/service/redshift/sweep.go @@ -119,7 +119,7 @@ func sweepClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftClient(ctx) input := &redshift.DescribeClustersInput{} @@ -225,7 +225,7 @@ func sweepScheduledActions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftClient(ctx) input := &redshift.DescribeScheduledActionsInput{} @@ -266,7 +266,7 @@ func sweepSnapshotSchedules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftClient(ctx) input := &redshift.DescribeSnapshotSchedulesInput{} @@ -365,7 +365,7 @@ func sweepHSMClientCertificates(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftClient(ctx) @@ -407,7 +407,7 @@ func sweepHSMConfigurations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftClient(ctx) input := &redshift.DescribeHsmConfigurationsInput{} @@ -448,7 +448,7 @@ func sweepAuthenticationProfiles(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftClient(ctx) input := &redshift.DescribeAuthenticationProfilesInput{} diff --git a/internal/service/redshift/tags_gen.go b/internal/service/redshift/tags_gen.go index 7e7aba2f783d..9773fcf92c67 100644 --- a/internal/service/redshift/tags_gen.go +++ b/internal/service/redshift/tags_gen.go @@ -3,8 +3,8 @@ package redshift import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/redshift" awstypes "github.com/aws/aws-sdk-go-v2/service/redshift/types" @@ -84,7 +84,7 @@ func updateTags(ctx context.Context, conn *redshift.Client, identifier string, o _, err := conn.DeleteTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *redshift.Client, identifier string, o _, err := conn.CreateTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/redshiftdata/service_endpoint_resolver_gen.go b/internal/service/redshiftdata/service_endpoint_resolver_gen.go index aa9ae936fc35..39c2fda38639 100644 --- a/internal/service/redshiftdata/service_endpoint_resolver_gen.go +++ b/internal/service/redshiftdata/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params redshiftdata.End }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up redshiftdata endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up redshiftdata endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/redshiftdata/service_endpoints_gen_test.go b/internal/service/redshiftdata/service_endpoints_gen_test.go index a9d436b731b5..c27671c121eb 100644 --- a/internal/service/redshiftdata/service_endpoints_gen_test.go +++ b/internal/service/redshiftdata/service_endpoints_gen_test.go @@ -603,7 +603,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/redshiftdata/service_package_gen.go b/internal/service/redshiftdata/service_package_gen.go index 2cbae6f2b946..0e4b5d011441 100644 --- a/internal/service/redshiftdata/service_package_gen.go +++ b/internal/service/redshiftdata/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/redshiftdata" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -64,7 +63,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *redshiftdata.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/redshiftdata/statement.go b/internal/service/redshiftdata/statement.go index 5201f90fcba3..a59de67eef5f 100644 --- a/internal/service/redshiftdata/statement.go +++ b/internal/service/redshiftdata/statement.go @@ -13,12 +13,12 @@ import ( "github.com/aws/aws-sdk-go-v2/service/redshiftdata" "github.com/aws/aws-sdk-go-v2/service/redshiftdata/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" @@ -160,7 +160,7 @@ func resourceStatementRead(ctx context.Context, d *schema.ResourceData, meta any sub, err := FindStatementByID(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && retry.NotFound(err) { log.Printf("[WARN] Redshift Data Statement (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -194,8 +194,7 @@ func FindStatementByID(ctx context.Context, conn *redshiftdata.Client, id string if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -217,8 +216,8 @@ func FindStatementByID(ctx context.Context, conn *redshiftdata.Client, id string return output, nil } -func statusStatement(ctx context.Context, conn *redshiftdata.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusStatement(conn *redshiftdata.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := FindStatementByID(ctx, conn, id) if tfresource.NotFound(err) { @@ -241,7 +240,7 @@ func waitStatementFinished(ctx context.Context, conn *redshiftdata.Client, id st types.StatusStringSubmitted, ), Target: enum.Slice(types.StatusStringFinished), - Refresh: statusStatement(ctx, conn, id), + Refresh: statusStatement(conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, @@ -251,7 +250,7 @@ func waitStatementFinished(ctx context.Context, conn *redshiftdata.Client, id st if output, ok := outputRaw.(*redshiftdata.DescribeStatementOutput); ok { if status := output.Status; status == types.StatusStringFailed { - tfresource.SetLastError(err, errors.New(aws.ToString(output.Error))) + retry.SetLastError(err, errors.New(aws.ToString(output.Error))) } return output, err diff --git a/internal/service/redshiftdata/statement_test.go b/internal/service/redshiftdata/statement_test.go index f9c4e9f5060c..b2355b228652 100644 --- a/internal/service/redshiftdata/statement_test.go +++ b/internal/service/redshiftdata/statement_test.go @@ -9,11 +9,9 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/service/redshiftdata" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" tfredshiftdata "github.com/hashicorp/terraform-provider-aws/internal/service/redshiftdata" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,9 +20,9 @@ func TestAccRedshiftDataStatement_basic(t *testing.T) { ctx := acctest.Context(t) var v redshiftdata.DescribeStatementOutput resourceName := "aws_redshiftdata_statement.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RedshiftDataServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -33,7 +31,7 @@ func TestAccRedshiftDataStatement_basic(t *testing.T) { { Config: testAccStatementConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckStatementExists(ctx, resourceName, &v), + testAccCheckStatementExists(ctx, t, resourceName, &v), resource.TestCheckResourceAttrPair(resourceName, names.AttrClusterIdentifier, "aws_redshift_cluster.test", names.AttrClusterIdentifier), resource.TestCheckResourceAttr(resourceName, "parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "sql", "CREATE GROUP group_name;"), @@ -54,9 +52,9 @@ func TestAccRedshiftDataStatement_workgroup(t *testing.T) { ctx := acctest.Context(t) var v redshiftdata.DescribeStatementOutput resourceName := "aws_redshiftdata_statement.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.RedshiftDataServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -65,7 +63,7 @@ func TestAccRedshiftDataStatement_workgroup(t *testing.T) { { Config: testAccStatementConfig_workgroup(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckStatementExists(ctx, resourceName, &v), + testAccCheckStatementExists(ctx, t, resourceName, &v), resource.TestCheckResourceAttr(resourceName, names.AttrClusterIdentifier, ""), resource.TestCheckResourceAttr(resourceName, "parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "sql", "CREATE GROUP group_name;"), @@ -82,14 +80,14 @@ func TestAccRedshiftDataStatement_workgroup(t *testing.T) { }) } -func testAccCheckStatementExists(ctx context.Context, n string, v *redshiftdata.DescribeStatementOutput) resource.TestCheckFunc { +func testAccCheckStatementExists(ctx context.Context, t *testing.T, n string, v *redshiftdata.DescribeStatementOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).RedshiftDataClient(ctx) + conn := acctest.ProviderMeta(ctx, t).RedshiftDataClient(ctx) output, err := tfredshiftdata.FindStatementByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/redshiftserverless/namespace.go b/internal/service/redshiftserverless/namespace.go index 4d42fcbcf99e..316ba2a9ccb4 100644 --- a/internal/service/redshiftserverless/namespace.go +++ b/internal/service/redshiftserverless/namespace.go @@ -226,7 +226,7 @@ func resourceNamespaceRead(ctx context.Context, d *schema.ResourceData, meta any d.Set("default_iam_role_arn", output.DefaultIamRoleArn) d.Set("iam_roles", flattenNamespaceIAMRoles(output.IamRoles)) d.Set(names.AttrKMSKeyID, output.KmsKeyId) - d.Set("log_exports", flex.FlattenStringyValueSet[awstypes.LogExport](output.LogExports)) + d.Set("log_exports", output.LogExports) d.Set("namespace_id", output.NamespaceId) d.Set("namespace_name", output.NamespaceName) @@ -303,8 +303,8 @@ func resourceNamespaceDelete(ctx context.Context, d *schema.ResourceData, meta a conn := meta.(*conns.AWSClient).RedshiftServerlessClient(ctx) log.Printf("[DEBUG] Deleting Redshift Serverless Namespace: %s", d.Id()) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ConflictException](ctx, namespaceDeletedTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ConflictException](ctx, namespaceDeletedTimeout, + func(ctx context.Context) (any, error) { return conn.DeleteNamespace(ctx, &redshiftserverless.DeleteNamespaceInput{ NamespaceName: aws.String(d.Id()), }) diff --git a/internal/service/redshiftserverless/service_endpoint_resolver_gen.go b/internal/service/redshiftserverless/service_endpoint_resolver_gen.go index cf7bfb53201d..208813d32a42 100644 --- a/internal/service/redshiftserverless/service_endpoint_resolver_gen.go +++ b/internal/service/redshiftserverless/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params redshiftserverle }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up redshiftserverless endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up redshiftserverless endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/redshiftserverless/service_endpoints_gen_test.go b/internal/service/redshiftserverless/service_endpoints_gen_test.go index 5f40d9b7bb80..8e1186bd137e 100644 --- a/internal/service/redshiftserverless/service_endpoints_gen_test.go +++ b/internal/service/redshiftserverless/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/redshiftserverless/service_package_gen.go b/internal/service/redshiftserverless/service_package_gen.go index ae46189f2eee..95763f9b6742 100644 --- a/internal/service/redshiftserverless/service_package_gen.go +++ b/internal/service/redshiftserverless/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -126,7 +125,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *redshiftserverless.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/redshiftserverless/sweep.go b/internal/service/redshiftserverless/sweep.go index fe9cfcd2b782..4de99b74c42c 100644 --- a/internal/service/redshiftserverless/sweep.go +++ b/internal/service/redshiftserverless/sweep.go @@ -38,7 +38,7 @@ func sweepNamespaces(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftServerlessClient(ctx) input := &redshiftserverless.ListNamespacesInput{} @@ -79,7 +79,7 @@ func sweepWorkgroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftServerlessClient(ctx) input := &redshiftserverless.ListWorkgroupsInput{} @@ -120,7 +120,7 @@ func sweepSnapshots(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RedshiftServerlessClient(ctx) input := &redshiftserverless.ListSnapshotsInput{} diff --git a/internal/service/redshiftserverless/tags_gen.go b/internal/service/redshiftserverless/tags_gen.go index 69d6d305eac4..a58961bfc8fc 100644 --- a/internal/service/redshiftserverless/tags_gen.go +++ b/internal/service/redshiftserverless/tags_gen.go @@ -3,8 +3,8 @@ package redshiftserverless import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" awstypes "github.com/aws/aws-sdk-go-v2/service/redshiftserverless/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *redshiftserverless.Client, identifier s output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).RedshiftServerlessClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *redshiftserverless.Client, identifier _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *redshiftserverless.Client, identifier _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/redshiftserverless/workgroup.go b/internal/service/redshiftserverless/workgroup.go index a29f045f6a28..8e19c57ea6e7 100644 --- a/internal/service/redshiftserverless/workgroup.go +++ b/internal/service/redshiftserverless/workgroup.go @@ -516,8 +516,8 @@ func resourceWorkgroupDelete(ctx context.Context, d *schema.ResourceData, meta a const ( retryTimeout = 10 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ConflictException](ctx, retryTimeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ConflictException](ctx, retryTimeout, + func(ctx context.Context) (any, error) { return conn.DeleteWorkgroup(ctx, &redshiftserverless.DeleteWorkgroupInput{ WorkgroupName: aws.String(d.Id()), }) @@ -545,7 +545,7 @@ func updateWorkgroup(ctx context.Context, conn *redshiftserverless.Client, input retryTimeout = 20 * time.Minute ) _, err := tfresource.RetryWhen(ctx, retryTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateWorkgroup(ctx, input) }, func(err error) (bool, error) { if errs.IsAErrorMessageContains[*awstypes.ConflictException](err, "operation running") { diff --git a/internal/service/rekognition/collection.go b/internal/service/rekognition/collection.go index e901f7723777..0dd84039effe 100644 --- a/internal/service/rekognition/collection.go +++ b/internal/service/rekognition/collection.go @@ -114,7 +114,7 @@ func (r *collectionResource) Create(ctx context.Context, req resource.CreateRequ createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - out, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNotFound(ctx, createTimeout, func(ctx context.Context) (*rekognition.DescribeCollectionOutput, error) { return findCollectionByID(ctx, conn, plan.CollectionID.ValueString()) }) @@ -126,8 +126,6 @@ func (r *collectionResource) Create(ctx context.Context, req resource.CreateRequ return } - output := out.(*rekognition.DescribeCollectionOutput) - state := plan state.ID = plan.CollectionID state.ARN = flex.StringToFramework(ctx, output.CollectionARN) diff --git a/internal/service/rekognition/service_endpoint_resolver_gen.go b/internal/service/rekognition/service_endpoint_resolver_gen.go index aaaef06461e4..f63a3d0b9563 100644 --- a/internal/service/rekognition/service_endpoint_resolver_gen.go +++ b/internal/service/rekognition/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params rekognition.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up rekognition endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up rekognition endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/rekognition/service_endpoints_gen_test.go b/internal/service/rekognition/service_endpoints_gen_test.go index e771e2319732..db55fd4f2e02 100644 --- a/internal/service/rekognition/service_endpoints_gen_test.go +++ b/internal/service/rekognition/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/rekognition/service_package_gen.go b/internal/service/rekognition/service_package_gen.go index bc4cb0b8f3f4..4faa1b8e7069 100644 --- a/internal/service/rekognition/service_package_gen.go +++ b/internal/service/rekognition/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/rekognition" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -85,7 +84,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *rekognition.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/rekognition/tags_gen.go b/internal/service/rekognition/tags_gen.go index 8f4d9e06f662..95d777a3715c 100644 --- a/internal/service/rekognition/tags_gen.go +++ b/internal/service/rekognition/tags_gen.go @@ -3,8 +3,8 @@ package rekognition import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rekognition" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *rekognition.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).RekognitionClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *rekognition.Client, identifier string _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *rekognition.Client, identifier string _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/resiliencehub/resiliency_policy_tags_gen_test.go b/internal/service/resiliencehub/resiliency_policy_tags_gen_test.go index b50a0eebc5be..9e27cbc3559c 100644 --- a/internal/service/resiliencehub/resiliency_policy_tags_gen_test.go +++ b/internal/service/resiliencehub/resiliency_policy_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/resiliencehub" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccResilienceHubResiliencyPolicy_tags(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -209,11 +209,12 @@ func TestAccResilienceHubResiliencyPolicy_tags(t *testing.T) { func TestAccResilienceHubResiliencyPolicy_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -273,11 +274,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_null(t *testing.T) { func TestAccResilienceHubResiliencyPolicy_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -325,11 +327,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_EmptyMap(t *testing.T) { func TestAccResilienceHubResiliencyPolicy_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -407,11 +410,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_AddOnUpdate(t *testing.T) { func TestAccResilienceHubResiliencyPolicy_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccResilienceHubResiliencyPolicy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -644,11 +649,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_EmptyTag_OnUpdate_Add(t *testing. func TestAccResilienceHubResiliencyPolicy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -736,11 +742,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_EmptyTag_OnUpdate_Replace(t *test func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -925,11 +932,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_providerOnly(t *testi func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1091,11 +1099,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_nonOverlapping(t *tes func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1273,11 +1282,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_overlapping(t *testin func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1365,11 +1375,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_updateToProviderOnly( func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1456,11 +1467,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_updateToResourceOnly( func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1524,11 +1536,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_emptyResourceTag(t *t func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1584,11 +1597,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_emptyProviderOnlyTag( func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1655,11 +1669,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_nullOverlappingResour func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1728,11 +1743,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_DefaultTags_nullNonOverlappingRes func TestAccResilienceHubResiliencyPolicy_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1785,11 +1801,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_ComputedTag_OnCreate(t *testing.T func TestAccResilienceHubResiliencyPolicy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1884,11 +1901,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_ComputedTag_OnUpdate_Add(t *testi func TestAccResilienceHubResiliencyPolicy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -1973,11 +1991,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_ComputedTag_OnUpdate_Replace(t *t func TestAccResilienceHubResiliencyPolicy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), @@ -2135,11 +2154,12 @@ func TestAccResilienceHubResiliencyPolicy_tags_IgnoreTags_Overlap_DefaultTag(t * func TestAccResilienceHubResiliencyPolicy_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v resiliencehub.DescribeResiliencyPolicyOutput resourceName := "aws_resiliencehub_resiliency_policy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResilienceHubServiceID), CheckDestroy: testAccCheckResiliencyPolicyDestroy(ctx), diff --git a/internal/service/resiliencehub/service_endpoint_resolver_gen.go b/internal/service/resiliencehub/service_endpoint_resolver_gen.go index 65ff86dc7655..eb6a31e9e5e4 100644 --- a/internal/service/resiliencehub/service_endpoint_resolver_gen.go +++ b/internal/service/resiliencehub/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params resiliencehub.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up resiliencehub endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up resiliencehub endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/resiliencehub/service_endpoints_gen_test.go b/internal/service/resiliencehub/service_endpoints_gen_test.go index 02287ed7a959..727894287629 100644 --- a/internal/service/resiliencehub/service_endpoints_gen_test.go +++ b/internal/service/resiliencehub/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/resiliencehub/service_package_gen.go b/internal/service/resiliencehub/service_package_gen.go index e6d72e06b9b3..65b67434c644 100644 --- a/internal/service/resiliencehub/service_package_gen.go +++ b/internal/service/resiliencehub/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/resiliencehub" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *resiliencehub.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/resiliencehub/tags_gen.go b/internal/service/resiliencehub/tags_gen.go index e941203b1eac..49c3b059777b 100644 --- a/internal/service/resiliencehub/tags_gen.go +++ b/internal/service/resiliencehub/tags_gen.go @@ -3,8 +3,8 @@ package resiliencehub import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/resiliencehub" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *resiliencehub.Client, identifier string output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ResilienceHubClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *resiliencehub.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *resiliencehub.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/resourceexplorer2/index.go b/internal/service/resourceexplorer2/index.go index ce1ee29de647..820cbe54d6a0 100644 --- a/internal/service/resourceexplorer2/index.go +++ b/internal/service/resourceexplorer2/index.go @@ -35,6 +35,7 @@ import ( // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(serialize=true) // @Testing(generator=false) +// @Testing(preIdentityVersion="v5.100.0") func newIndexResource(context.Context) (resource.ResourceWithConfigure, error) { r := &indexResource{} diff --git a/internal/service/resourceexplorer2/index_identity_gen_test.go b/internal/service/resourceexplorer2/index_identity_gen_test.go index 719ff4e4e6c2..5b1db46af303 100644 --- a/internal/service/resourceexplorer2/index_identity_gen_test.go +++ b/internal/service/resourceexplorer2/index_identity_gen_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -21,9 +22,10 @@ func testAccResourceExplorer2Index_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccResourceExplorer2Index_Identity_Basic, - "ExistingResource": testAccResourceExplorer2Index_Identity_ExistingResource, - "RegionOverride": testAccResourceExplorer2Index_Identity_RegionOverride, + acctest.CtBasic: testAccResourceExplorer2Index_Identity_Basic, + "ExistingResource": testAccResourceExplorer2Index_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccResourceExplorer2Index_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccResourceExplorer2Index_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -31,9 +33,10 @@ func testAccResourceExplorer2Index_IdentitySerial(t *testing.T) { func testAccResourceExplorer2Index_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_resourceexplorer2_index.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -52,6 +55,9 @@ func testAccResourceExplorer2Index_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -106,7 +112,7 @@ func testAccResourceExplorer2Index_Identity_RegionOverride(t *testing.T) { resourceName := "aws_resourceexplorer2_index.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -124,6 +130,9 @@ func testAccResourceExplorer2Index_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -210,3 +219,115 @@ func testAccResourceExplorer2Index_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccResourceExplorer2Index_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_resourceexplorer2_index.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ResourceExplorer2ServiceID), + CheckDestroy: testAccCheckIndexDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Index/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIndexExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/Index/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIndexExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Index/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func testAccResourceExplorer2Index_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_resourceexplorer2_index.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ResourceExplorer2ServiceID), + CheckDestroy: testAccCheckIndexDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Index/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIndexExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Index/basic/"), + ConfigVariables: config.Variables{}, + }, + }, + }) +} diff --git a/internal/service/resourceexplorer2/index_test.go b/internal/service/resourceexplorer2/index_test.go index 1182a1f53655..30b49d7e9e6f 100644 --- a/internal/service/resourceexplorer2/index_test.go +++ b/internal/service/resourceexplorer2/index_test.go @@ -10,13 +10,8 @@ import ( "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfresourceexplorer2 "github.com/hashicorp/terraform-provider-aws/internal/service/resourceexplorer2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -166,72 +161,6 @@ func testAccIndex_type(t *testing.T) { }) } -func testAccResourceExplorer2Index_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_resourceexplorer2_index.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ResourceExplorer2EndpointID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.ResourceExplorer2ServiceID), - CheckDestroy: testAccCheckIndexDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccIndexConfig_basic, - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccIndexConfig_basic, - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccIndexConfig_basic, - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckIndexDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ResourceExplorer2Client(ctx) diff --git a/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go b/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go index ef6e34136798..1a217daa9c7e 100644 --- a/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go +++ b/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params resourceexplorer }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up resourceexplorer2 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up resourceexplorer2 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/resourceexplorer2/service_endpoints_gen_test.go b/internal/service/resourceexplorer2/service_endpoints_gen_test.go index 91ddb46d1b1a..9f6ea618e48e 100644 --- a/internal/service/resourceexplorer2/service_endpoints_gen_test.go +++ b/internal/service/resourceexplorer2/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/resourceexplorer2/service_package_gen.go b/internal/service/resourceexplorer2/service_package_gen.go index 4e76b88ee0d7..b25e1dfb51cc 100644 --- a/internal/service/resourceexplorer2/service_package_gen.go +++ b/internal/service/resourceexplorer2/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -91,7 +90,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *resourceexplorer2.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/resourceexplorer2/sweep.go b/internal/service/resourceexplorer2/sweep.go index 95dbb72504de..8cc4a82924f4 100644 --- a/internal/service/resourceexplorer2/sweep.go +++ b/internal/service/resourceexplorer2/sweep.go @@ -27,7 +27,7 @@ func sweepIndexes(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ResourceExplorer2Client(ctx) input := &resourceexplorer2.ListIndexesInput{} diff --git a/internal/service/resourceexplorer2/tags_gen.go b/internal/service/resourceexplorer2/tags_gen.go index 1f08a6cc25d2..30ab2f9d1dd3 100644 --- a/internal/service/resourceexplorer2/tags_gen.go +++ b/internal/service/resourceexplorer2/tags_gen.go @@ -3,8 +3,8 @@ package resourceexplorer2 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *resourceexplorer2.Client, identifier st output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ResourceExplorer2Client(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *resourceexplorer2.Client, identifier _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *resourceexplorer2.Client, identifier _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/resourceexplorer2/testdata/Index/basic_v5.100.0/main_gen.tf b/internal/service/resourceexplorer2/testdata/Index/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..27f490d67580 --- /dev/null +++ b/internal/service/resourceexplorer2/testdata/Index/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_resourceexplorer2_index" "test" { + type = "LOCAL" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/resourceexplorer2/testdata/Index/basic_v6.0.0/main_gen.tf b/internal/service/resourceexplorer2/testdata/Index/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..839d2c51f9e8 --- /dev/null +++ b/internal/service/resourceexplorer2/testdata/Index/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_resourceexplorer2_index" "test" { + type = "LOCAL" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/resourceexplorer2/testdata/View/basic_v5.100.0/main_gen.tf b/internal/service/resourceexplorer2/testdata/View/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..cf9c43faaf09 --- /dev/null +++ b/internal/service/resourceexplorer2/testdata/View/basic_v5.100.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_resourceexplorer2_view" "test" { + name = var.rName + + depends_on = [aws_resourceexplorer2_index.test] +} + +resource "aws_resourceexplorer2_index" "test" { + type = "LOCAL" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/resourceexplorer2/testdata/View/basic_v6.0.0/main_gen.tf b/internal/service/resourceexplorer2/testdata/View/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..5444b1d8a204 --- /dev/null +++ b/internal/service/resourceexplorer2/testdata/View/basic_v6.0.0/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_resourceexplorer2_view" "test" { + name = var.rName + + depends_on = [aws_resourceexplorer2_index.test] +} + +resource "aws_resourceexplorer2_index" "test" { + type = "LOCAL" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/resourceexplorer2/view.go b/internal/service/resourceexplorer2/view.go index bd2a2bd6671f..8c03ea10d217 100644 --- a/internal/service/resourceexplorer2/view.go +++ b/internal/service/resourceexplorer2/view.go @@ -41,6 +41,7 @@ import ( // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/resourceexplorer2;resourceexplorer2.GetViewOutput") // @Testing(serialize=true) +// @Testing(preIdentityVersion="v5.100.0") func newViewResource(context.Context) (resource.ResourceWithConfigure, error) { return &viewResource{}, nil } diff --git a/internal/service/resourceexplorer2/view_identity_gen_test.go b/internal/service/resourceexplorer2/view_identity_gen_test.go index 9fb64fc07939..6c5e115dc002 100644 --- a/internal/service/resourceexplorer2/view_identity_gen_test.go +++ b/internal/service/resourceexplorer2/view_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccResourceExplorer2View_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccResourceExplorer2View_Identity_Basic, - "ExistingResource": testAccResourceExplorer2View_Identity_ExistingResource, - "RegionOverride": testAccResourceExplorer2View_Identity_RegionOverride, + acctest.CtBasic: testAccResourceExplorer2View_Identity_Basic, + "ExistingResource": testAccResourceExplorer2View_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccResourceExplorer2View_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccResourceExplorer2View_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -38,7 +40,7 @@ func testAccResourceExplorer2View_Identity_Basic(t *testing.T) { resourceName := "aws_resourceexplorer2_view.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -59,6 +61,9 @@ func testAccResourceExplorer2View_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -120,7 +125,7 @@ func testAccResourceExplorer2View_Identity_RegionOverride(t *testing.T) { resourceName := "aws_resourceexplorer2_view.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -139,6 +144,9 @@ func testAccResourceExplorer2View_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -230,3 +238,129 @@ func testAccResourceExplorer2View_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccResourceExplorer2View_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v resourceexplorer2.GetViewOutput + resourceName := "aws_resourceexplorer2_view.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ResourceExplorer2ServiceID), + CheckDestroy: testAccCheckViewDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/View/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckViewExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/View/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckViewExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/View/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func testAccResourceExplorer2View_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v resourceexplorer2.GetViewOutput + resourceName := "aws_resourceexplorer2_view.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ResourceExplorer2ServiceID), + CheckDestroy: testAccCheckViewDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/View/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckViewExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/View/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/resourceexplorer2/view_test.go b/internal/service/resourceexplorer2/view_test.go index e95d8a817b5d..1bafbcaeea91 100644 --- a/internal/service/resourceexplorer2/view_test.go +++ b/internal/service/resourceexplorer2/view_test.go @@ -12,13 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfresourceexplorer2 "github.com/hashicorp/terraform-provider-aws/internal/service/resourceexplorer2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -266,73 +261,6 @@ func testAccView_scope(t *testing.T) { }) } -func testAccResourceExplorer2View_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_resourceexplorer2_view.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.ResourceExplorer2EndpointID) - }, - ErrorCheck: acctest.ErrorCheck(t, names.ResourceExplorer2ServiceID), - CheckDestroy: testAccCheckViewDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccViewConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccViewConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccViewConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckViewDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ResourceExplorer2Client(ctx) diff --git a/internal/service/resourcegroups/service_endpoint_resolver_gen.go b/internal/service/resourcegroups/service_endpoint_resolver_gen.go index f9caa3d54001..072467d822df 100644 --- a/internal/service/resourcegroups/service_endpoint_resolver_gen.go +++ b/internal/service/resourcegroups/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params resourcegroups.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up resourcegroups endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up resourcegroups endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/resourcegroups/service_endpoints_gen_test.go b/internal/service/resourcegroups/service_endpoints_gen_test.go index 7c778f417e8a..cb703d531f36 100644 --- a/internal/service/resourcegroups/service_endpoints_gen_test.go +++ b/internal/service/resourcegroups/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/resourcegroups/service_package_gen.go b/internal/service/resourcegroups/service_package_gen.go index a2ec50208cd3..fd42a4089d02 100644 --- a/internal/service/resourcegroups/service_package_gen.go +++ b/internal/service/resourcegroups/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/resourcegroups" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -73,7 +72,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *resourcegroups.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/resourcegroups/tags_gen.go b/internal/service/resourcegroups/tags_gen.go index 1aede738466c..48531f4fc014 100644 --- a/internal/service/resourcegroups/tags_gen.go +++ b/internal/service/resourcegroups/tags_gen.go @@ -3,8 +3,8 @@ package resourcegroups import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/resourcegroups" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *resourcegroups.Client, identifier strin output, err := conn.GetTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ResourceGroupsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *resourcegroups.Client, identifier str _, err := conn.Untag(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *resourcegroups.Client, identifier str _, err := conn.Tag(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/resourcegroupstaggingapi/resources_data_source_test.go b/internal/service/resourcegroupstaggingapi/resources_data_source_test.go index 9255843a3ad1..039046c1ad56 100644 --- a/internal/service/resourcegroupstaggingapi/resources_data_source_test.go +++ b/internal/service/resourcegroupstaggingapi/resources_data_source_test.go @@ -7,7 +7,6 @@ import ( "fmt" "testing" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/names" @@ -17,9 +16,9 @@ func TestAccResourceGroupsTaggingAPIResourcesDataSource_tagFilter(t *testing.T) ctx := acctest.Context(t) dataSourceName := "data.aws_resourcegroupstaggingapi_resources.test" resourceName := "aws_vpc.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResourceGroupsTaggingAPIServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -40,9 +39,9 @@ func TestAccResourceGroupsTaggingAPIResourcesDataSource_tagFilter(t *testing.T) func TestAccResourceGroupsTaggingAPIResourcesDataSource_includeComplianceDetails(t *testing.T) { ctx := acctest.Context(t) dataSourceName := "data.aws_resourcegroupstaggingapi_resources.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResourceGroupsTaggingAPIServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -62,9 +61,9 @@ func TestAccResourceGroupsTaggingAPIResourcesDataSource_resourceTypeFilters(t *t ctx := acctest.Context(t) dataSourceName := "data.aws_resourcegroupstaggingapi_resources.test" resourceName := "aws_vpc.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResourceGroupsTaggingAPIServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -86,9 +85,9 @@ func TestAccResourceGroupsTaggingAPIResourcesDataSource_resourceARNList(t *testi ctx := acctest.Context(t) dataSourceName := "data.aws_resourcegroupstaggingapi_resources.test" resourceName := "aws_vpc.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ResourceGroupsTaggingAPIServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go b/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go index 9cdfcb5d725f..3a9dcfc902a2 100644 --- a/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go +++ b/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params resourcegroupsta }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up resourcegroupstaggingapi endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up resourcegroupstaggingapi endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go b/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go index 55642655f8c9..1abb942868b9 100644 --- a/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go +++ b/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/resourcegroupstaggingapi/service_package_gen.go b/internal/service/resourcegroupstaggingapi/service_package_gen.go index 8bfd4649ccbf..0d7438dee517 100644 --- a/internal/service/resourcegroupstaggingapi/service_package_gen.go +++ b/internal/service/resourcegroupstaggingapi/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -64,7 +63,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *resourcegroupstaggingapi.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/rolesanywhere/service_endpoint_resolver_gen.go b/internal/service/rolesanywhere/service_endpoint_resolver_gen.go index d9a4c06680a6..a1af15e5a5f6 100644 --- a/internal/service/rolesanywhere/service_endpoint_resolver_gen.go +++ b/internal/service/rolesanywhere/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params rolesanywhere.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up rolesanywhere endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up rolesanywhere endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/rolesanywhere/service_endpoints_gen_test.go b/internal/service/rolesanywhere/service_endpoints_gen_test.go index 8b56319933c9..a3ef2bfe0852 100644 --- a/internal/service/rolesanywhere/service_endpoints_gen_test.go +++ b/internal/service/rolesanywhere/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/rolesanywhere/service_package_gen.go b/internal/service/rolesanywhere/service_package_gen.go index 7b8245207261..f3cfec4d78d9 100644 --- a/internal/service/rolesanywhere/service_package_gen.go +++ b/internal/service/rolesanywhere/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/rolesanywhere" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -76,7 +75,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *rolesanywhere.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/rolesanywhere/tags_gen.go b/internal/service/rolesanywhere/tags_gen.go index 95282fe0af72..f4e91ab14059 100644 --- a/internal/service/rolesanywhere/tags_gen.go +++ b/internal/service/rolesanywhere/tags_gen.go @@ -3,8 +3,8 @@ package rolesanywhere import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rolesanywhere" awstypes "github.com/aws/aws-sdk-go-v2/service/rolesanywhere/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *rolesanywhere.Client, identifier string output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).RolesAnywhereClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *rolesanywhere.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *rolesanywhere.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/route53/cidr_collection.go b/internal/service/route53/cidr_collection.go index 346018b730ef..97dc90b5d00d 100644 --- a/internal/service/route53/cidr_collection.go +++ b/internal/service/route53/cidr_collection.go @@ -83,7 +83,7 @@ func (r *cidrCollectionResource) Create(ctx context.Context, request resource.Cr const ( timeout = 2 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModification](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModification](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateCidrCollection(ctx, input) }) diff --git a/internal/service/route53/exports_test.go b/internal/service/route53/exports_test.go index 91e30bf4b4d2..e6da2992adc4 100644 --- a/internal/service/route53/exports_test.go +++ b/internal/service/route53/exports_test.go @@ -37,7 +37,6 @@ var ( FindZoneAssociationByThreePartKey = findZoneAssociationByThreePartKey KeySigningKeyStatusActive = keySigningKeyStatusActive KeySigningKeyStatusInactive = keySigningKeyStatusInactive - RecordParseResourceID = recordParseResourceID ServeSignatureNotSigning = serveSignatureNotSigning ServeSignatureSigning = serveSignatureSigning WaitChangeInsync = waitChangeInsync diff --git a/internal/service/route53/health_check.go b/internal/service/route53/health_check.go index 308fcd16d639..552ff1d04bf8 100644 --- a/internal/service/route53/health_check.go +++ b/internal/service/route53/health_check.go @@ -217,6 +217,7 @@ func resourceHealthCheckCreate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk(names.AttrIPAddress); ok { healthCheckConfig.IPAddress = aws.String(v.(string)) } + if v, ok := d.GetOk(names.AttrPort); ok { healthCheckConfig.Port = aws.Int32(int32(v.(int))) } @@ -235,8 +236,9 @@ func resourceHealthCheckCreate(ctx context.Context, d *schema.ResourceData, meta switch healthCheckType { case awstypes.HealthCheckTypeCalculated: - if v, ok := d.GetOk("child_health_threshold"); ok { - healthCheckConfig.HealthThreshold = aws.Int32(int32(v.(int))) + if v := d.GetRawPlan().GetAttr("child_health_threshold"); !v.IsNull() { + v, _ := v.AsBigFloat().Int64() + healthCheckConfig.HealthThreshold = aws.Int32(int32(v)) } if v, ok := d.GetOk("child_healthchecks"); ok { diff --git a/internal/service/route53/health_check_test.go b/internal/service/route53/health_check_test.go index 648373d8029b..cf5635b16c69 100644 --- a/internal/service/route53/health_check_test.go +++ b/internal/service/route53/health_check_test.go @@ -159,9 +159,47 @@ func TestAccRoute53HealthCheck_withChildHealthChecks(t *testing.T) { CheckDestroy: testAccCheckHealthCheckDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccHealthCheckConfig_childs, + Config: testAccHealthCheckConfig_childs(1), Check: resource.ComposeTestCheckFunc( testAccCheckHealthCheckExists(ctx, resourceName, &check), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.HealthCheckTypeCalculated)), + resource.TestCheckResourceAttr(resourceName, "child_health_threshold", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccHealthCheckConfig_childs(0), + Check: resource.ComposeTestCheckFunc( + testAccCheckHealthCheckExists(ctx, resourceName, &check), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.HealthCheckTypeCalculated)), + resource.TestCheckResourceAttr(resourceName, "child_health_threshold", "0"), + ), + }, + }, + }) +} + +func TestAccRoute53HealthCheck_withChildHealthChecksThresholdZero(t *testing.T) { + ctx := acctest.Context(t) + var check awstypes.HealthCheck + resourceName := "aws_route53_health_check.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHealthCheckDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHealthCheckConfig_childs(0), + Check: resource.ComposeTestCheckFunc( + testAccCheckHealthCheckExists(ctx, resourceName, &check), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.HealthCheckTypeCalculated)), + resource.TestCheckResourceAttr(resourceName, "child_health_threshold", "0"), ), }, { @@ -600,7 +638,8 @@ resource "aws_route53_health_check" "test" { `, ip) } -const testAccHealthCheckConfig_childs = ` +func testAccHealthCheckConfig_childs(threshold int) string { + return fmt.Sprintf(` resource "aws_route53_health_check" "child1" { fqdn = "child1.example.com" port = 80 @@ -612,14 +651,15 @@ resource "aws_route53_health_check" "child1" { resource "aws_route53_health_check" "test" { type = "CALCULATED" - child_health_threshold = 1 + child_health_threshold = %[1]d child_healthchecks = [aws_route53_health_check.child1.id] tags = { Name = "tf-test-calculated-health-check" } } -` +`, threshold) +} func testAccHealthCheckConfig_regions(regions ...string) string { return fmt.Sprintf(` diff --git a/internal/service/route53/hosted_zone_dnssec.go b/internal/service/route53/hosted_zone_dnssec.go index 04260a722d6a..7cdd2ab3c1bf 100644 --- a/internal/service/route53/hosted_zone_dnssec.go +++ b/internal/service/route53/hosted_zone_dnssec.go @@ -178,7 +178,7 @@ func hostedZoneDNSSECDisable(ctx context.Context, conn *route53.Client, hostedZo const ( timeout = 5 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.KeySigningKeyInParentDSRecord](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.KeySigningKeyInParentDSRecord](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DisableHostedZoneDNSSEC(ctx, input) }) diff --git a/internal/service/route53/record.go b/internal/service/route53/record.go index b88a844ac183..10b95ec0c7fd 100644 --- a/internal/service/route53/record.go +++ b/internal/service/route53/record.go @@ -30,17 +30,22 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +/* + NOTE: aws_route53_record has a mutable identity ONLY because we shortcut the replace resource flow + when `set_identifier` changes. Other changes to Identity-related attributes do not do this. +*/ + // @SDKResource("aws_route53_record", name="Record") // @IdentityAttribute("zone_id") // @IdentityAttribute("name") // @IdentityAttribute("type") // @IdentityAttribute("set_identifier", optional="true") // @MutableIdentity -// @WrappedImport(false) -// @Testing(identityTest=false) +// @ImportIDHandler("recordImportID") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/route53/types;awstypes;awstypes.ResourceRecordSet") // @Testing(subdomainTfVar="zoneName;recordName") // @Testing(generator=false) +// @Testing(preIdentityVersion="6.4.0") func resourceRecord() *schema.Resource { //lintignore:R011 return &schema.Resource{ @@ -49,89 +54,6 @@ func resourceRecord() *schema.Resource { UpdateWithoutTimeout: resourceRecordUpdate, DeleteWithoutTimeout: resourceRecordDelete, - Importer: &schema.ResourceImporter{ - StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - // Import-by-id case - if d.Id() != "" { - parts := recordParseResourceID(d.Id()) - // We check that we have parsed the id into the correct number of segments. - // We need at least 3 segments! - // However, parts[1] can be the empty string if it is the root domain of the zone, - // and isn't using a FQDN. See https://github.com/hashicorp/terraform-provider-aws/issues/4792 - if parts[0] == "" || parts[2] == "" { - return nil, fmt.Errorf("unexpected format of ID (%q), expected ZONEID_RECORDNAME_TYPE_SET-IDENTIFIER (e.g. Z4KAPRWWNC7JR_dev.example.com_NS_dev), where SET-IDENTIFIER is optional", d.Id()) - } - - d.Set("zone_id", parts[0]) - d.Set(names.AttrName, parts[1]) - d.Set(names.AttrType, parts[2]) - if parts[3] != "" { - d.Set("set_identifier", parts[3]) - } - - return []*schema.ResourceData{d}, nil - } - - identity, err := d.Identity() - if err != nil { - return nil, err - } - - zoneIDRaw, ok := identity.GetOk("zone_id") - if !ok { - return nil, fmt.Errorf("identity attribute %q is required", "zone_id") - } - zoneID, ok := zoneIDRaw.(string) - if !ok { - return nil, fmt.Errorf("identity attribute %q: expected string, got %T", "zone_id", zoneIDRaw) - } - d.Set("zone_id", zoneID) - - nameRaw, ok := identity.GetOk(names.AttrName) - if !ok { - return nil, fmt.Errorf("identity attribute %q is required", names.AttrName) - } - name, ok := nameRaw.(string) - if !ok { - return nil, fmt.Errorf("identity attribute %q: expected string, got %T", names.AttrName, nameRaw) - } - d.Set(names.AttrName, name) - - typeRaw, ok := identity.GetOk(names.AttrType) - if !ok { - return nil, fmt.Errorf("identity attribute %q is required", names.AttrType) - } - typ, ok := typeRaw.(string) - if !ok { - return nil, fmt.Errorf("identity attribute %q: expected string, got %T", names.AttrType, typeRaw) - } - d.Set(names.AttrType, typ) - - vars := []string{ - zoneID, - name, - typ, - } - - setIdentifierRaw, ok := identity.GetOk("set_identifier") - if ok { - setIdentifier, ok := setIdentifierRaw.(string) - if !ok { - return nil, fmt.Errorf("identity attribute %q: expected string, got %T", "set_identifier", setIdentifierRaw) - } - d.Set("set_identifier", setIdentifier) - - vars = append(vars, setIdentifier) - } else { - d.Set("set_identifier", "") - } - - d.SetId(strings.Join(vars, "_")) - - return []*schema.ResourceData{d}, nil - }, - }, - SchemaVersion: 2, MigrateState: recordMigrateState, @@ -443,7 +365,7 @@ func resourceRecordCreate(ctx context.Context, d *schema.ResourceData, meta any) HostedZoneId: aws.String(cleanZoneID(aws.ToString(zoneRecord.HostedZone.Id))), } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.NoSuchHostedZone](ctx, 1*time.Minute, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.NoSuchHostedZone](ctx, 1*time.Minute, func(ctx context.Context) (any, error) { return conn.ChangeResourceRecordSets(ctx, input) }) @@ -455,15 +377,7 @@ func resourceRecordCreate(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "creating Route53 Record: %s", err) } - vars := []string{ - zoneID, - strings.ToLower(d.Get(names.AttrName).(string)), - d.Get(names.AttrType).(string), - } - if v, ok := d.GetOk("set_identifier"); ok { - vars = append(vars, v.(string)) - } - d.SetId(strings.Join(vars, "_")) + d.SetId(createRecordImportID(d)) if output := outputRaw.(*route53.ChangeResourceRecordSetsOutput); output.ChangeInfo != nil { if _, err := waitChangeInsync(ctx, conn, aws.ToString(output.ChangeInfo.Id), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -769,16 +683,7 @@ func resourceRecordUpdate(ctx context.Context, d *schema.ResourceData, meta any) } } - // Generate a new ID. - vars := []string{ - zoneID, - strings.ToLower(d.Get(names.AttrName).(string)), - d.Get(names.AttrType).(string), - } - if v, ok := d.GetOk("set_identifier"); ok { - vars = append(vars, v.(string)) - } - d.SetId(strings.Join(vars, "_")) + d.SetId(createRecordImportID(d)) return append(diags, resourceRecordRead(ctx, d, meta)...) } @@ -1220,3 +1125,42 @@ func expandTxtEntry(s string) string { func flattenTxtEntry(s string) string { return fmt.Sprintf(`"%s"`, s) } + +type recordImportID struct{} + +func (recordImportID) Create(d *schema.ResourceData) string { + return createRecordImportID(d) +} + +func (recordImportID) Parse(id string) (string, map[string]string, error) { + parts := recordParseResourceID(id) + // We check that we have parsed the id into the correct number of segments. + // We need at least 3 segments! + // However, parts[1] can be the empty string if it is the root domain of the zone, + // and isn't using a FQDN. See https://github.com/hashicorp/terraform-provider-aws/issues/4792 + if parts[0] == "" || parts[2] == "" { + return "", nil, fmt.Errorf("unexpected format of ID (%q), expected ZONEID_RECORDNAME_TYPE_SET-IDENTIFIER (e.g. Z4KAPRWWNC7JR_dev.example.com_NS_dev), where SET-IDENTIFIER is optional", id) + } + + result := map[string]string{ + "zone_id": parts[0], + names.AttrName: parts[1], + names.AttrType: parts[2], + } + if parts[3] != "" { + result["set_identifier"] = parts[3] + } + return id, result, nil +} + +func createRecordImportID(d *schema.ResourceData) string { + parts := []string{ + d.Get("zone_id").(string), + strings.ToLower(d.Get(names.AttrName).(string)), + d.Get(names.AttrType).(string), + } + if v, ok := d.GetOk("set_identifier"); ok { + parts = append(parts, v.(string)) + } + return strings.Join(parts, "_") +} diff --git a/internal/service/route53/record_identity_gen_test.go b/internal/service/route53/record_identity_gen_test.go new file mode 100644 index 000000000000..0a5091d4f9e1 --- /dev/null +++ b/internal/service/route53/record_identity_gen_test.go @@ -0,0 +1,243 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package route53_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/route53/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccRoute53Record_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResourceRecordSet + resourceName := "aws_route53_record.test" + zoneName := acctest.RandomDomain() + recordName := zoneName.RandomSubdomain() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), + CheckDestroy: testAccCheckRecordDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Record/basic/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "zone_id": knownvalue.NotNull(), + names.AttrName: knownvalue.NotNull(), + names.AttrType: knownvalue.NotNull(), + "set_identifier": knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Record/basic/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Record/basic/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.NotNull()), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Record/basic/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.NotNull()), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccRoute53Record_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResourceRecordSet + resourceName := "aws_route53_record.test" + zoneName := acctest.RandomDomain() + recordName := zoneName.RandomSubdomain() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), + CheckDestroy: testAccCheckRecordDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Record/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Record/basic/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "zone_id": knownvalue.NotNull(), + names.AttrName: knownvalue.NotNull(), + names.AttrType: knownvalue.NotNull(), + "set_identifier": knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccRoute53Record_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResourceRecordSet + resourceName := "aws_route53_record.test" + zoneName := acctest.RandomDomain() + recordName := zoneName.RandomSubdomain() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), + CheckDestroy: testAccCheckRecordDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Record/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Record/basic/"), + ConfigVariables: config.Variables{ + "recordName": config.StringVariable(recordName.String()), + "zoneName": config.StringVariable(zoneName.String()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/route53/record_test.go b/internal/service/route53/record_test.go index d1b54bb0b9f9..dd2918ce7bc6 100644 --- a/internal/service/route53/record_test.go +++ b/internal/service/route53/record_test.go @@ -21,6 +21,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfroute53 "github.com/hashicorp/terraform-provider-aws/internal/service/route53" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -68,6 +70,9 @@ func TestAccRoute53Record_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "weighted_routing_policy.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "zone_id"), ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{zone_id}_{name}_{type}"), + }, }, { ResourceName: resourceName, @@ -79,12 +84,10 @@ func TestAccRoute53Record_basic(t *testing.T) { }) } -func TestAccRoute53Record_Identity_Basic(t *testing.T) { +func TestAccRoute53Record_Identity_SetIdentifier(t *testing.T) { ctx := acctest.Context(t) var v awstypes.ResourceRecordSet resourceName := "aws_route53_record.test" - zoneName := acctest.RandomDomain() - recordName := zoneName.RandomSubdomain() resource.ParallelTest(t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ @@ -97,22 +100,23 @@ func TestAccRoute53Record_Identity_Basic(t *testing.T) { Steps: []resource.TestStep{ // Step 1: Setup { - Config: testAccRecordConfig_basic(zoneName.String(), recordName.String()), + Config: testAccRecordConfig_healthCheckIdTypeCNAME(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(fmt.Sprintf(`^[[:alnum:]]+_%s_A$`, recordName.String())))), - // statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - // names.AttrAccountID: tfknownvalue.AccountID(), - // "zone_id": knownvalue.NotNull(), - // names.AttrName: knownvalue.NotNull(), - // names.AttrType: knownvalue.NotNull(), - // "set_identifier": knownvalue.Null(), - // }), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{zone_id}_{name}_{type}_{set_identifier}"), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "zone_id": knownvalue.NotNull(), + names.AttrName: knownvalue.NotNull(), + names.AttrType: knownvalue.NotNull(), + "set_identifier": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("set_identifier")), }, }, @@ -132,34 +136,34 @@ func TestAccRoute53Record_Identity_Basic(t *testing.T) { ImportPlanChecks: resource.ImportPlanChecks{ PreApply: []plancheck.PlanCheck{ plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(recordName.String())), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.StringExact("A")), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.StringExact("")), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(fmt.Sprintf(`^[[:alnum:]]+_%s_A$`, recordName.String())))), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact("test")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.StringExact("CNAME")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.StringExact("set-id")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(`^[[:alnum:]]+_test_CNAME_set-id$`))), }, }, }, - // // Step 4: Import block with Resource Identity - // { - // ImportStateKind: resource.ImportBlockWithResourceIdentity, - // ResourceName: resourceName, - // ImportState: true, - // ImportPlanChecks: resource.ImportPlanChecks{ - // PreApply: []plancheck.PlanCheck{ - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact(recordName.String())), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.StringExact("A")), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.StringExact("")), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(fmt.Sprintf(`^[[:alnum:]]+_%s_A$`, recordName.String())))), - // }, - // }, - // }, + // Step 4: Import block with Resource Identity + { + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ResourceName: resourceName, + ImportState: true, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact("test")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.StringExact("CNAME")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.StringExact("set-id")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(`^[[:alnum:]]+_test_CNAME_set-id$`))), + }, + }, + }, }, }) } -func TestAccRoute53Record_Identity_SetIdentifier(t *testing.T) { +func TestAccRoute53Record_Identity_ChangeOnUpdate(t *testing.T) { ctx := acctest.Context(t) var v awstypes.ResourceRecordSet resourceName := "aws_route53_record.test" @@ -173,67 +177,54 @@ func TestAccRoute53Record_Identity_SetIdentifier(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckRecordDestroy(ctx), Steps: []resource.TestStep{ - // Step 1: Setup + // Step 1: Create { - Config: testAccRecordConfig_healthCheckIdTypeCNAME(), + Config: testAccRecordConfig_setIdentifierRenameWeighted("before"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(`^[[:alnum:]]+_test_CNAME_set-id$`))), - // statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - // names.AttrAccountID: tfknownvalue.AccountID(), - // "zone_id": knownvalue.NotNull(), - // names.AttrName: knownvalue.NotNull(), - // names.AttrType: knownvalue.NotNull(), - // "set_identifier": knownvalue.NotNull(), - // }), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), - // statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("set_identifier")), + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{zone_id}_{name}_{type}_{set_identifier}"), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "zone_id": knownvalue.NotNull(), + names.AttrName: knownvalue.NotNull(), + names.AttrType: knownvalue.NotNull(), + "set_identifier": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("set_identifier")), }, }, - // Step 2: Import command + // Step 2: Update { - ImportStateKind: resource.ImportCommandWithID, - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - - // Step 3: Import block with Import ID - { - ImportStateKind: resource.ImportBlockWithID, - ResourceName: resourceName, - ImportState: true, - ImportPlanChecks: resource.ImportPlanChecks{ + Config: testAccRecordConfig_setIdentifierRenameWeighted("after"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRecordExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{zone_id}_{name}_{type}_{set_identifier}"), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + "zone_id": knownvalue.NotNull(), + names.AttrName: knownvalue.NotNull(), + names.AttrType: knownvalue.NotNull(), + "set_identifier": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("zone_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrType)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("set_identifier")), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact("test")), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.StringExact("CNAME")), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.StringExact("set-id")), - plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(`^[[:alnum:]]+_test_CNAME_set-id$`))), + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), }, }, }, - - // // Step 4: Import block with Resource Identity - // { - // ImportStateKind: resource.ImportBlockWithResourceIdentity, - // ResourceName: resourceName, - // ImportState: true, - // ImportPlanChecks: resource.ImportPlanChecks{ - // PreApply: []plancheck.PlanCheck{ - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("zone_id"), knownvalue.NotNull()), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.StringExact("test")), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrType), knownvalue.StringExact("CNAME")), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("set_identifier"), knownvalue.StringExact("set-id")), - // plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.StringRegexp(regexache.MustCompile(`^[[:alnum:]]+_test_CNAME_set-id$`))), - // }, - // }, - // }, }, }) } @@ -671,8 +662,7 @@ func TestAccRoute53Record_Alias_elb(t *testing.T) { var record1 awstypes.ResourceRecordSet resourceName := "aws_route53_record.alias" - rs := sdkacctest.RandString(10) - testAccRecordConfig_config := fmt.Sprintf(testAccRecordConfig_aliasELB, rs) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), @@ -680,7 +670,7 @@ func TestAccRoute53Record_Alias_elb(t *testing.T) { CheckDestroy: testAccCheckRecordDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccRecordConfig_config, + Config: testAccRecordConfig_aliasELB(rName), Check: resource.ComposeTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &record1), ), @@ -760,8 +750,7 @@ func TestAccRoute53Record_Alias_uppercase(t *testing.T) { var record1 awstypes.ResourceRecordSet resourceName := "aws_route53_record.alias" - rs := sdkacctest.RandString(10) - testAccRecordConfig_config := fmt.Sprintf(testAccRecordConfig_aliasELBUppercase, rs) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), @@ -769,7 +758,7 @@ func TestAccRoute53Record_Alias_uppercase(t *testing.T) { CheckDestroy: testAccCheckRecordDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccRecordConfig_config, + Config: testAccRecordConfig_aliasELBUppercase(rName), Check: resource.ComposeTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &record1), ), @@ -788,6 +777,7 @@ func TestAccRoute53Record_Weighted_alias(t *testing.T) { ctx := acctest.Context(t) var record1, record2, record3, record4, record5, record6 awstypes.ResourceRecordSet resourceName := "aws_route53_record.elb_weighted_alias_live" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -796,7 +786,7 @@ func TestAccRoute53Record_Weighted_alias(t *testing.T) { CheckDestroy: testAccCheckRecordDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccRecordConfig_weightedELBAlias, + Config: testAccRecordConfig_weightedELBAlias(rName), Check: resource.ComposeTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &record1), testAccCheckRecordExists(ctx, "aws_route53_record.elb_weighted_alias_dev", &record2), @@ -1001,6 +991,9 @@ func TestAccRoute53Record_HealthCheckID_typeChange(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &record1), ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{zone_id}_{name}_{type}_{set_identifier}"), + }, }, { ResourceName: resourceName, @@ -1013,6 +1006,9 @@ func TestAccRoute53Record_HealthCheckID_typeChange(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRecordExists(ctx, resourceName, &record2), ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{zone_id}_{name}_{type}_{set_identifier}"), + }, }, }, }) @@ -1511,7 +1507,7 @@ func TestAccRoute53Record_SetIdentifierRename_multiValueAnswer(t *testing.T) { func TestAccRoute53Record_SetIdentifierRename_weighted(t *testing.T) { ctx := acctest.Context(t) var record1, record2 awstypes.ResourceRecordSet - resourceName := "aws_route53_record.set_identifier_rename_weighted" + resourceName := "aws_route53_record.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -1932,12 +1928,12 @@ func testAccCheckRecordDestroy(ctx context.Context) resource.TestCheckFunc { continue } - parts := tfroute53.RecordParseResourceID(rs.Primary.ID) - zone := parts[0] - recordName := parts[1] - recordType := parts[2] - - _, _, err := tfroute53.FindResourceRecordSetByFourPartKey(ctx, conn, tfroute53.CleanZoneID(zone), recordName, recordType, rs.Primary.Attributes["set_identifier"]) + _, _, err := tfroute53.FindResourceRecordSetByFourPartKey(ctx, conn, + tfroute53.CleanZoneID(rs.Primary.Attributes["zone_id"]), + rs.Primary.Attributes[names.AttrName], + rs.Primary.Attributes[names.AttrType], + rs.Primary.Attributes["set_identifier"], + ) if tfresource.NotFound(err) { continue @@ -1963,12 +1959,12 @@ func testAccCheckRecordExists(ctx context.Context, n string, v *awstypes.Resourc conn := acctest.Provider.Meta().(*conns.AWSClient).Route53Client(ctx) - parts := tfroute53.RecordParseResourceID(rs.Primary.ID) - zone := parts[0] - recordName := parts[1] - recordType := parts[2] - - output, _, err := tfroute53.FindResourceRecordSetByFourPartKey(ctx, conn, tfroute53.CleanZoneID(zone), recordName, recordType, rs.Primary.Attributes["set_identifier"]) + output, _, err := tfroute53.FindResourceRecordSetByFourPartKey(ctx, conn, + tfroute53.CleanZoneID(rs.Primary.Attributes["zone_id"]), + rs.Primary.Attributes[names.AttrName], + rs.Primary.Attributes[names.AttrType], + rs.Primary.Attributes["set_identifier"], + ) if err != nil { return err @@ -2564,16 +2560,10 @@ resource "aws_route53_record" "third_region" { `, firstRegion, secondRegion, thirdRegion) } -const testAccRecordConfig_aliasELB = ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - +func testAccRecordConfig_aliasELB(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` resource "aws_route53_zone" "main" { name = "domain.test" } @@ -2591,8 +2581,10 @@ resource "aws_route53_record" "alias" { } resource "aws_elb" "main" { - name = "foobar-terraform-elb-%s" - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = %[1]q + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -2601,18 +2593,13 @@ resource "aws_elb" "main" { lb_protocol = "http" } } -` - -const testAccRecordConfig_aliasELBUppercase = ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } +`, rName)) } +func testAccRecordConfig_aliasELBUppercase(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` resource "aws_route53_zone" "main" { name = "domain.test" } @@ -2630,8 +2617,10 @@ resource "aws_route53_record" "alias" { } resource "aws_elb" "main" { - name = "FOOBAR-TERRAFORM-ELB-%s" - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = %[1]q + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -2640,7 +2629,8 @@ resource "aws_elb" "main" { lb_protocol = "http" } } -` +`, rName)) +} func testAccRecordConfig_aliasS3(rName string) string { return fmt.Sprintf(` @@ -2851,23 +2841,18 @@ resource "aws_route53_record" "test" { `) } -const testAccRecordConfig_weightedELBAlias = ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - +func testAccRecordConfig_weightedELBAlias(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), ` resource "aws_route53_zone" "main" { name = "domain.test" } resource "aws_elb" "live" { - name = "foobar-terraform-elb-live" - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = "foobar-terraform-elb-live" + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -2896,8 +2881,10 @@ resource "aws_route53_record" "elb_weighted_alias_live" { } resource "aws_elb" "dev" { - name = "foobar-terraform-elb-dev" - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = "foobar-terraform-elb-dev" + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -2924,7 +2911,8 @@ resource "aws_route53_record" "elb_weighted_alias_dev" { evaluate_target_health = true } } -` +`) +} const testAccRecordConfig_weightedAlias = ` resource "aws_route53_zone" "main" { @@ -3292,7 +3280,7 @@ resource "aws_route53_zone" "main" { name = "domain.test" } -resource "aws_route53_record" "set_identifier_rename_weighted" { +resource "aws_route53_record" "test" { zone_id = aws_route53_zone.main.zone_id name = "sample" type = "A" @@ -3308,23 +3296,18 @@ resource "aws_route53_record" "set_identifier_rename_weighted" { } func testAccRecordConfig_aliasChangePre(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` resource "aws_route53_zone" "main" { name = "domain.test" } resource "aws_elb" "test" { - name = %[1]q - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = %[1]q + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -3345,7 +3328,7 @@ resource "aws_route53_record" "test" { evaluate_target_health = true } } -`, rName) +`, rName)) } func testAccRecordConfig_aliasChangePost() string { @@ -3379,23 +3362,18 @@ resource "aws_route53_record" "empty" { ` func testAccRecordConfig_aliasChangeDualstackPre(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` resource "aws_route53_zone" "test" { name = "domain.test" } resource "aws_elb" "test" { - name = %[1]q - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = %[1]q + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -3416,27 +3394,22 @@ resource "aws_route53_record" "test" { evaluate_target_health = true } } - `, rName) + `, rName)) } func testAccRecordConfig_aliasChangeDualstackPost(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` resource "aws_route53_zone" "test" { name = "domain.test" } resource "aws_elb" "test" { - name = %[1]q - availability_zones = slice(data.aws_availability_zones.available.names, 0, 1) + name = %[1]q + + internal = true + subnets = aws_subnet.test[*].id listener { instance_port = 80 @@ -3457,7 +3430,7 @@ resource "aws_route53_record" "test" { evaluate_target_health = true } } - `, rName) + `, rName)) } const testAccRecordConfig_longTxt = ` diff --git a/internal/service/route53/service_endpoint_resolver_gen.go b/internal/service/route53/service_endpoint_resolver_gen.go index b0f533b789df..769b9f127c51 100644 --- a/internal/service/route53/service_endpoint_resolver_gen.go +++ b/internal/service/route53/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params route53.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up route53 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up route53 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/route53/service_endpoints_gen_test.go b/internal/service/route53/service_endpoints_gen_test.go index fc5cbaf4589e..183aba217aea 100644 --- a/internal/service/route53/service_endpoints_gen_test.go +++ b/internal/service/route53/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/route53/service_package_gen.go b/internal/service/route53/service_package_gen.go index ae90242e0515..cf8217106ee7 100644 --- a/internal/service/route53/service_package_gen.go +++ b/internal/service/route53/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/route53" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -123,6 +122,18 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_route53_record", Name: "Record", Region: unique.Make(inttypes.ResourceRegionDisabled()), + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("zone_id", true), + inttypes.StringIdentityAttribute(names.AttrName, true), + inttypes.StringIdentityAttribute(names.AttrType, true), + inttypes.StringIdentityAttribute("set_identifier", false), + }, + inttypes.WithMutableIdentity(), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: recordImportID{}, + }, }, { Factory: resourceTrafficPolicy, @@ -184,7 +195,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *route53.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *route53.Options) { diff --git a/internal/service/route53/sweep.go b/internal/service/route53/sweep.go index 96ac1e38f20c..11f16edf9d77 100644 --- a/internal/service/route53/sweep.go +++ b/internal/service/route53/sweep.go @@ -65,7 +65,7 @@ func sweepHealthChecks(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53Client(ctx) input := &route53.ListHealthChecksInput{} @@ -113,7 +113,7 @@ func sweepKeySigningKeys(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53Client(ctx) input := &route53.ListHostedZonesInput{} @@ -316,7 +316,7 @@ func sweepZones(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53Client(ctx) input := &route53.ListHostedZonesInput{} diff --git a/internal/service/route53/tags_gen.go b/internal/service/route53/tags_gen.go index 0d14604a5369..f08c8b396860 100644 --- a/internal/service/route53/tags_gen.go +++ b/internal/service/route53/tags_gen.go @@ -3,8 +3,8 @@ package route53 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/route53" awstypes "github.com/aws/aws-sdk-go-v2/service/route53/types" @@ -28,7 +28,7 @@ func listTags(ctx context.Context, conn *route53.Client, identifier, resourceTyp output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.ResourceTagSet.Tags), nil @@ -40,7 +40,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res tags, err := listTags(ctx, meta.(*conns.AWSClient).Route53Client(ctx), identifier, resourceType) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -142,7 +142,7 @@ func updateTags(ctx context.Context, conn *route53.Client, identifier, resourceT _, err := conn.ChangeTagsForResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } return nil diff --git a/internal/service/route53/testdata/Record/basic/main_gen.tf b/internal/service/route53/testdata/Record/basic/main_gen.tf new file mode 100644 index 000000000000..d4986479e1a1 --- /dev/null +++ b/internal/service/route53/testdata/Record/basic/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_record" "test" { + zone_id = aws_route53_zone.test.zone_id + name = var.zoneName + type = "A" + ttl = "30" + records = ["127.0.0.1", "127.0.0.27"] +} + +resource "aws_route53_zone" "test" { + name = var.recordName +} + +variable "recordName" { + type = string + nullable = false +} + +variable "zoneName" { + type = string + nullable = false +} + diff --git a/internal/service/route53/testdata/Record/basic_v6.4.0/main_gen.tf b/internal/service/route53/testdata/Record/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..89bf0b2c96dd --- /dev/null +++ b/internal/service/route53/testdata/Record/basic_v6.4.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_record" "test" { + zone_id = aws_route53_zone.test.zone_id + name = var.zoneName + type = "A" + ttl = "30" + records = ["127.0.0.1", "127.0.0.27"] +} + +resource "aws_route53_zone" "test" { + name = var.recordName +} + +variable "recordName" { + type = string + nullable = false +} + +variable "zoneName" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/route53/testdata/tmpl/record_basic.gtpl b/internal/service/route53/testdata/tmpl/record_basic.gtpl new file mode 100644 index 000000000000..57f8ec97fe81 --- /dev/null +++ b/internal/service/route53/testdata/tmpl/record_basic.gtpl @@ -0,0 +1,11 @@ +resource "aws_route53_record" "test" { + zone_id = aws_route53_zone.test.zone_id + name = var.zoneName + type = "A" + ttl = "30" + records = ["127.0.0.1", "127.0.0.27"] +} + +resource "aws_route53_zone" "test" { + name = var.recordName +} diff --git a/internal/service/route53/traffic_policy.go b/internal/service/route53/traffic_policy.go index 02510340155d..73e79ef3f250 100644 --- a/internal/service/route53/traffic_policy.go +++ b/internal/service/route53/traffic_policy.go @@ -102,7 +102,7 @@ func resourceTrafficPolicyCreate(ctx context.Context, d *schema.ResourceData, me input.Comment = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.NoSuchTrafficPolicy](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.NoSuchTrafficPolicy](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateTrafficPolicy(ctx, input) }) diff --git a/internal/service/route53/traffic_policy_instance.go b/internal/service/route53/traffic_policy_instance.go index beae6f62bbfa..6763bd241b11 100644 --- a/internal/service/route53/traffic_policy_instance.go +++ b/internal/service/route53/traffic_policy_instance.go @@ -89,7 +89,7 @@ func resourceTrafficPolicyInstanceCreate(ctx context.Context, d *schema.Resource TTL: aws.Int64(int64(d.Get("ttl").(int))), } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.NoSuchTrafficPolicy](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.NoSuchTrafficPolicy](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateTrafficPolicyInstance(ctx, input) }) diff --git a/internal/service/route53/vpc_association_authorization.go b/internal/service/route53/vpc_association_authorization.go index 592b17a2492c..bdede6a17020 100644 --- a/internal/service/route53/vpc_association_authorization.go +++ b/internal/service/route53/vpc_association_authorization.go @@ -80,7 +80,7 @@ func resourceVPCAssociationAuthorizationCreate(ctx context.Context, d *schema.Re input.VPC.VPCRegion = awstypes.VPCRegion(v.(string)) } - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModification](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModification](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateVPCAssociationAuthorization(ctx, input) }) @@ -106,7 +106,7 @@ func resourceVPCAssociationAuthorizationRead(ctx context.Context, d *schema.Reso // InvalidPaginationToken errors can manifest when many authorization resources are // managed concurrently. Retry these errors for a short duration. - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InvalidPaginationToken](ctx, d.Timeout(schema.TimeoutRead), func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.InvalidPaginationToken](ctx, d.Timeout(schema.TimeoutRead), func(ctx context.Context) (any, error) { return findVPCAssociationAuthorizationByTwoPartKey(ctx, conn, zoneID, vpcID) }) @@ -138,7 +138,7 @@ func resourceVPCAssociationAuthorizationDelete(ctx context.Context, d *schema.Re } log.Printf("[INFO] Deleting Route53 VPC Association Authorization: %s", d.Id()) - _, err = tfresource.RetryWhenIsA[*awstypes.ConcurrentModification](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenIsA[any, *awstypes.ConcurrentModification](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.DeleteVPCAssociationAuthorization(ctx, &route53.DeleteVPCAssociationAuthorizationInput{ HostedZoneId: aws.String(zoneID), VPC: &awstypes.VPC{ diff --git a/internal/service/route53domains/service_endpoint_resolver_gen.go b/internal/service/route53domains/service_endpoint_resolver_gen.go index ab8175410911..ae435ddaf9e4 100644 --- a/internal/service/route53domains/service_endpoint_resolver_gen.go +++ b/internal/service/route53domains/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params route53domains.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up route53domains endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up route53domains endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/route53domains/service_endpoints_gen_test.go b/internal/service/route53domains/service_endpoints_gen_test.go index 252cb7d94189..bd00d191ff12 100644 --- a/internal/service/route53domains/service_endpoints_gen_test.go +++ b/internal/service/route53domains/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/route53domains/service_package_gen.go b/internal/service/route53domains/service_package_gen.go index d88d5950b153..47a2638e0d2d 100644 --- a/internal/service/route53domains/service_package_gen.go +++ b/internal/service/route53domains/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/route53domains" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -84,7 +83,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *route53domains.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *route53domains.Options) { diff --git a/internal/service/route53domains/tags_gen.go b/internal/service/route53domains/tags_gen.go index 92229a446748..795e1090f626 100644 --- a/internal/service/route53domains/tags_gen.go +++ b/internal/service/route53domains/tags_gen.go @@ -3,8 +3,8 @@ package route53domains import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/route53domains" awstypes "github.com/aws/aws-sdk-go-v2/service/route53domains/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *route53domains.Client, identifier strin output, err := conn.ListTagsForDomain(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).Route53DomainsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *route53domains.Client, identifier str _, err := conn.DeleteTagsForDomain(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *route53domains.Client, identifier str _, err := conn.UpdateTagsForDomain(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/route53profiles/service_endpoint_resolver_gen.go b/internal/service/route53profiles/service_endpoint_resolver_gen.go index 47b741a48264..760bbe696fed 100644 --- a/internal/service/route53profiles/service_endpoint_resolver_gen.go +++ b/internal/service/route53profiles/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params route53profiles. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up route53profiles endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up route53profiles endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/route53profiles/service_package_gen.go b/internal/service/route53profiles/service_package_gen.go index 009d6080431b..bd3fb8515895 100644 --- a/internal/service/route53profiles/service_package_gen.go +++ b/internal/service/route53profiles/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/route53profiles" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -89,7 +88,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *route53profiles.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/route53profiles/sweep.go b/internal/service/route53profiles/sweep.go index 6717cc0dcb19..e9292a332dc5 100644 --- a/internal/service/route53profiles/sweep.go +++ b/internal/service/route53profiles/sweep.go @@ -35,7 +35,7 @@ func sweepProfiles(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ProfilesClient(ctx) input := &route53profiles.ListProfilesInput{} @@ -71,7 +71,7 @@ func sweepProfileAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ProfilesClient(ctx) input := &route53profiles.ListProfileAssociationsInput{} diff --git a/internal/service/route53profiles/tags_gen.go b/internal/service/route53profiles/tags_gen.go index b632ae88358c..e5f555a0a580 100644 --- a/internal/service/route53profiles/tags_gen.go +++ b/internal/service/route53profiles/tags_gen.go @@ -3,8 +3,8 @@ package route53profiles import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/route53profiles" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *route53profiles.Client, identifier stri output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).Route53ProfilesClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *route53profiles.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *route53profiles.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/route53recoverycontrolconfig/cluster.go b/internal/service/route53recoverycontrolconfig/cluster.go index b7350dad3da0..e7d1e2801dd6 100644 --- a/internal/service/route53recoverycontrolconfig/cluster.go +++ b/internal/service/route53recoverycontrolconfig/cluster.go @@ -15,17 +15,21 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_route53recoverycontrolconfig_cluster", name="Cluster") +// @Tags(identifierAttribute="arn") func resourceCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterCreate, ReadWithoutTimeout: resourceClusterRead, + UpdateWithoutTimeout: resourceClusterUpdate, DeleteWithoutTimeout: resourceClusterDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, @@ -57,10 +61,18 @@ func resourceCluster() *schema.Resource { Required: true, ForceNew: true, }, + "network_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.NetworkType](), + }, names.AttrStatus: { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, } } @@ -74,6 +86,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any ClusterName: aws.String(d.Get(names.AttrName).(string)), } + if v, ok := d.GetOk("network_type"); ok { + input.NetworkType = awstypes.NetworkType(v.(string)) + } + output, err := conn.CreateCluster(ctx, input) if err != nil { @@ -91,6 +107,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Control Config Cluster (%s) to be Deployed: %s", d.Id(), err) } + if err := createTags(ctx, conn, d.Id(), getTagsIn(ctx)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting Route53 Recovery Control Config Cluster (%s) tags: %s", d.Id(), err) + } + return append(diags, resourceClusterRead(ctx, d, meta)...) } @@ -112,6 +132,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) d.Set(names.AttrARN, output.ClusterArn) d.Set(names.AttrName, output.Name) + d.Set("network_type", output.NetworkType) d.Set(names.AttrStatus, output.Status) if err := d.Set("cluster_endpoints", flattenClusterEndpoints(output.ClusterEndpoints)); err != nil { @@ -121,6 +142,37 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) return diags } +func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx) + + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + input := &r53rcc.UpdateClusterInput{ + ClusterArn: aws.String(d.Id()), + } + + if d.HasChanges("network_type") { + input.NetworkType = awstypes.NetworkType(d.Get("network_type").(string)) + } + + output, err := conn.UpdateCluster(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Cluster: %s", err) + } + + if output == nil || output.Cluster == nil { + return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Cluster: empty response") + } + + if _, err := waitClusterUpdated(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Control Config Cluster (%s) to be Updated: %s", d.Id(), err) + } + } + + return append(diags, resourceClusterRead(ctx, d, meta)...) +} + func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx) diff --git a/internal/service/route53recoverycontrolconfig/cluster_test.go b/internal/service/route53recoverycontrolconfig/cluster_test.go index 81a2444a42aa..ff5d597bb4d0 100644 --- a/internal/service/route53recoverycontrolconfig/cluster_test.go +++ b/internal/service/route53recoverycontrolconfig/cluster_test.go @@ -8,8 +8,10 @@ import ( "fmt" "testing" + awstypes "github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -39,6 +41,9 @@ func testAccCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), resource.TestCheckResourceAttr(resourceName, "cluster_endpoints.#", "5"), + resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeIpv4)), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, "0"), ), }, { @@ -51,6 +56,125 @@ func testAccCluster_basic(t *testing.T) { }) } +func testAccCluster_networkType(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_route53recoverycontrolconfig_cluster.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.Route53RecoveryControlConfigEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53RecoveryControlConfigServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_networkType(rName, string(awstypes.NetworkTypeDualstack)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "cluster_endpoints.#", "5"), + resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeDualstack)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_endpoints"}, + }, + { + Config: testAccClusterConfig_networkType(rName, string(awstypes.NetworkTypeIpv4)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "cluster_endpoints.#", "5"), + resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeIpv4)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_endpoints"}, + }, + }, + }) +} + +func testAccCluster_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_route53recoverycontrolconfig_cluster.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.Route53RecoveryControlConfigEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53RecoveryControlConfigServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "cluster_endpoints.#", "5"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_endpoints"}, + }, + + { + Config: testAccClusterConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "cluster_endpoints.#", "5"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccClusterConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "cluster_endpoints.#", "5"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCluster_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -125,3 +249,35 @@ resource "aws_route53recoverycontrolconfig_cluster" "test" { } `, rName) } + +func testAccClusterConfig_networkType(rName, networkType string) string { + return fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_cluster" "test" { + name = %[1]q + network_type = %[2]q +} +`, rName, networkType) +} + +func testAccClusterConfig_tags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_cluster" "test" { + name = %[1]q + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccClusterConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_cluster" "test" { + name = %[1]q + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/internal/service/route53recoverycontrolconfig/control_panel.go b/internal/service/route53recoverycontrolconfig/control_panel.go index 5cf0d3180604..877e34d6f03d 100644 --- a/internal/service/route53recoverycontrolconfig/control_panel.go +++ b/internal/service/route53recoverycontrolconfig/control_panel.go @@ -17,11 +17,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_route53recoverycontrolconfig_control_panel", name="Control Panel") +// @Tags(identifierAttribute="arn") func resourceControlPanel() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceControlPanelCreate, @@ -57,6 +59,8 @@ func resourceControlPanel() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, } } @@ -88,6 +92,10 @@ func resourceControlPanelCreate(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Control Config Control Panel (%s) to be Deployed: %s", d.Id(), err) } + if err := createTags(ctx, conn, d.Id(), getTagsIn(ctx)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting Route53 Recovery Control Config Control Panel (%s) tags: %s", d.Id(), err) + } + return append(diags, resourceControlPanelRead(ctx, d, meta)...) } @@ -121,15 +129,17 @@ func resourceControlPanelUpdate(ctx context.Context, d *schema.ResourceData, met var diags diag.Diagnostics conn := meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx) - input := &r53rcc.UpdateControlPanelInput{ - ControlPanelName: aws.String(d.Get(names.AttrName).(string)), - ControlPanelArn: aws.String(d.Get(names.AttrARN).(string)), - } + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + input := &r53rcc.UpdateControlPanelInput{ + ControlPanelName: aws.String(d.Get(names.AttrName).(string)), + ControlPanelArn: aws.String(d.Get(names.AttrARN).(string)), + } - _, err := conn.UpdateControlPanel(ctx, input) + _, err := conn.UpdateControlPanel(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Control Panel: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Control Panel: %s", err) + } } return append(diags, resourceControlPanelRead(ctx, d, meta)...) diff --git a/internal/service/route53recoverycontrolconfig/control_panel_test.go b/internal/service/route53recoverycontrolconfig/control_panel_test.go index 69e272a7a22e..b0b383be6b65 100644 --- a/internal/service/route53recoverycontrolconfig/control_panel_test.go +++ b/internal/service/route53recoverycontrolconfig/control_panel_test.go @@ -40,6 +40,8 @@ func testAccControlPanel_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), resource.TestCheckResourceAttr(resourceName, "default_control_panel", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "routing_control_count", "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, "0"), ), }, { @@ -51,6 +53,65 @@ func testAccControlPanel_basic(t *testing.T) { }) } +func testAccControlPanel_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_route53recoverycontrolconfig_control_panel.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.Route53RecoveryControlConfigEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53RecoveryControlConfigServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckControlPanelDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccControlPanelConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckControlPanelExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "default_control_panel", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "routing_control_count", "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccControlPanelConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckControlPanelExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "default_control_panel", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "routing_control_count", "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccControlPanelConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckControlPanelExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "default_control_panel", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "routing_control_count", "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} func testAccControlPanel_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -134,3 +195,28 @@ resource "aws_route53recoverycontrolconfig_control_panel" "test" { } `, rName)) } + +func testAccControlPanelConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccClusterSetUp(rName), fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_control_panel" "test" { + name = %[1]q + cluster_arn = aws_route53recoverycontrolconfig_cluster.test.arn + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccControlPanelConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccClusterSetUp(rName), fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_control_panel" "test" { + name = %[1]q + cluster_arn = aws_route53recoverycontrolconfig_cluster.test.arn + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/route53recoverycontrolconfig/generate.go b/internal/service/route53recoverycontrolconfig/generate.go index 18b8964a30ff..6dadefa662d5 100644 --- a/internal/service/route53recoverycontrolconfig/generate.go +++ b/internal/service/route53recoverycontrolconfig/generate.go @@ -1,6 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 +//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -CreateTags -ListTags -UpdateTags -KVTValues //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/route53recoverycontrolconfig/route53recoverycontrolconfig_test.go b/internal/service/route53recoverycontrolconfig/route53recoverycontrolconfig_test.go index fd4d762a5158..4f26adf58d75 100644 --- a/internal/service/route53recoverycontrolconfig/route53recoverycontrolconfig_test.go +++ b/internal/service/route53recoverycontrolconfig/route53recoverycontrolconfig_test.go @@ -18,10 +18,13 @@ func TestAccRoute53RecoveryControlConfig_serial(t *testing.T) { "Cluster": { acctest.CtBasic: testAccCluster_basic, acctest.CtDisappears: testAccCluster_disappears, + "networkType": testAccCluster_networkType, + "tags": testAccCluster_tags, }, "ControlPanel": { acctest.CtBasic: testAccControlPanel_basic, acctest.CtDisappears: testAccControlPanel_disappears, + "tags": testAccControlPanel_tags, }, "RoutingControl": { acctest.CtBasic: testAccRoutingControl_basic, @@ -32,6 +35,7 @@ func TestAccRoute53RecoveryControlConfig_serial(t *testing.T) { "assertionRule": testAccSafetyRule_assertionRule, "gatingRule": testAccSafetyRule_gatingRule, acctest.CtDisappears: testAccSafetyRule_disappears, + "tags": testAccSafetyRule_tags, }, } diff --git a/internal/service/route53recoverycontrolconfig/safety_rule.go b/internal/service/route53recoverycontrolconfig/safety_rule.go index ce1f9e18f207..7c2be625d463 100644 --- a/internal/service/route53recoverycontrolconfig/safety_rule.go +++ b/internal/service/route53recoverycontrolconfig/safety_rule.go @@ -19,11 +19,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_route53recoverycontrolconfig_safety_rule", name="Safety Rule") +// @Tags(identifierAttribute="arn") func resourceSafetyRule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSafetyRuleCreate, @@ -99,6 +101,8 @@ func resourceSafetyRule() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), "target_controls": { Type: schema.TypeList, Optional: true, @@ -271,6 +275,10 @@ func createAssertionRule(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Control Config Assertion Rule (%s) to be Deployed: %s", d.Id(), err) } + if err := createTags(ctx, conn, d.Id(), getTagsIn(ctx)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting Route53 Recovery Control Config Assertion Rule (%s) tags: %s", d.Id(), err) + } + return append(diags, resourceSafetyRuleRead(ctx, d, meta)...) } @@ -306,7 +314,11 @@ func createGatingRule(ctx context.Context, d *schema.ResourceData, meta any) dia d.SetId(aws.ToString(result.SafetyRuleArn)) if _, err := waitSafetyRuleCreated(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Control Config Assertion Rule (%s) to be Deployed: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Control Config Gating Rule (%s) to be Deployed: %s", d.Id(), err) + } + + if err := createTags(ctx, conn, d.Id(), getTagsIn(ctx)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting Route53 Recovery Control Config Gating Rule (%s) tags: %s", d.Id(), err) } return append(diags, resourceSafetyRuleRead(ctx, d, meta)...) @@ -316,60 +328,63 @@ func updateAssertionRule(ctx context.Context, d *schema.ResourceData, meta any) var diags diag.Diagnostics conn := meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx) - assertionRuleUpdate := &awstypes.AssertionRuleUpdate{ - SafetyRuleArn: aws.String(d.Get(names.AttrARN).(string)), - } + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + assertionRuleUpdate := &awstypes.AssertionRuleUpdate{ + SafetyRuleArn: aws.String(d.Get(names.AttrARN).(string)), + } - if d.HasChange(names.AttrName) { - assertionRuleUpdate.Name = aws.String(d.Get(names.AttrName).(string)) - } + if d.HasChange(names.AttrName) { + assertionRuleUpdate.Name = aws.String(d.Get(names.AttrName).(string)) + } - if d.HasChange("wait_period_ms") { - assertionRuleUpdate.WaitPeriodMs = aws.Int32(int32(d.Get("wait_period_ms").(int))) - } + if d.HasChange("wait_period_ms") { + assertionRuleUpdate.WaitPeriodMs = aws.Int32(int32(d.Get("wait_period_ms").(int))) + } - input := &r53rcc.UpdateSafetyRuleInput{ - AssertionRuleUpdate: assertionRuleUpdate, - } + input := &r53rcc.UpdateSafetyRuleInput{ + AssertionRuleUpdate: assertionRuleUpdate, + } - _, err := conn.UpdateSafetyRule(ctx, input) + _, err := conn.UpdateSafetyRule(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Assertion Rule: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Assertion Rule: %s", err) + } } - return append(diags, sdkdiag.WrapDiagsf(resourceControlPanelRead(ctx, d, meta), "updating Route53 Recovery Control Config Assertion Rule")...) + return append(diags, sdkdiag.WrapDiagsf(resourceSafetyRuleRead(ctx, d, meta), "updating Route53 Recovery Control Config Assertion Rule")...) } func updateGatingRule(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx) - gatingRuleUpdate := &awstypes.GatingRuleUpdate{ - SafetyRuleArn: aws.String(d.Get(names.AttrARN).(string)), - } + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + gatingRuleUpdate := &awstypes.GatingRuleUpdate{ + SafetyRuleArn: aws.String(d.Get(names.AttrARN).(string)), + } - if d.HasChange(names.AttrName) { - gatingRuleUpdate.Name = aws.String(d.Get(names.AttrName).(string)) - } + if d.HasChange(names.AttrName) { + gatingRuleUpdate.Name = aws.String(d.Get(names.AttrName).(string)) + } - if d.HasChange("wait_period_ms") { - gatingRuleUpdate.WaitPeriodMs = aws.Int32(int32(d.Get("wait_period_ms").(int))) - } + if d.HasChange("wait_period_ms") { + gatingRuleUpdate.WaitPeriodMs = aws.Int32(int32(d.Get("wait_period_ms").(int))) + } - input := &r53rcc.UpdateSafetyRuleInput{ - GatingRuleUpdate: gatingRuleUpdate, - } + input := &r53rcc.UpdateSafetyRuleInput{ + GatingRuleUpdate: gatingRuleUpdate, + } - _, err := conn.UpdateSafetyRule(ctx, input) + _, err := conn.UpdateSafetyRule(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Gating Rule: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Route53 Recovery Control Config Gating Rule: %s", err) + } } - return append(diags, sdkdiag.WrapDiagsf(resourceControlPanelRead(ctx, d, meta), "updating Route53 Recovery Control Config Gating Rule")...) + return append(diags, sdkdiag.WrapDiagsf(resourceSafetyRuleRead(ctx, d, meta), "updating Route53 Recovery Control Config Gating Rule")...) } - func findSafetyRuleByARN(ctx context.Context, conn *r53rcc.Client, arn string) (*r53rcc.DescribeSafetyRuleOutput, error) { input := &r53rcc.DescribeSafetyRuleInput{ SafetyRuleArn: aws.String(arn), diff --git a/internal/service/route53recoverycontrolconfig/safety_rule_test.go b/internal/service/route53recoverycontrolconfig/safety_rule_test.go index 0e74d801b115..2c7ad180fa97 100644 --- a/internal/service/route53recoverycontrolconfig/safety_rule_test.go +++ b/internal/service/route53recoverycontrolconfig/safety_rule_test.go @@ -41,6 +41,8 @@ func testAccSafetyRule_assertionRule(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "wait_period_ms", "5000"), resource.TestCheckResourceAttr(resourceName, "asserted_controls.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "control_panel_arn", "aws_route53recoverycontrolconfig_control_panel.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, "0"), ), }, { @@ -113,6 +115,72 @@ func testAccSafetyRule_gatingRule(t *testing.T) { }) } +func testAccSafetyRule_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_route53recoverycontrolconfig_safety_rule.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.Route53RecoveryControlConfigEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53RecoveryControlConfigServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSafetyRuleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSafetyRuleConfig_routingControl_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckSafetyRuleExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "wait_period_ms", "5000"), + resource.TestCheckResourceAttr(resourceName, "target_controls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "gating_controls.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "control_panel_arn", "aws_route53recoverycontrolconfig_control_panel.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSafetyRuleConfig_routingControl_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckSafetyRuleExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "wait_period_ms", "5000"), + resource.TestCheckResourceAttr(resourceName, "target_controls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "gating_controls.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "control_panel_arn", "aws_route53recoverycontrolconfig_control_panel.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccSafetyRuleConfig_routingControl_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckSafetyRuleExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "DEPLOYED"), + resource.TestCheckResourceAttr(resourceName, "wait_period_ms", "5000"), + resource.TestCheckResourceAttr(resourceName, "target_controls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "gating_controls.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "control_panel_arn", "aws_route53recoverycontrolconfig_control_panel.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCheckSafetyRuleDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx) @@ -218,3 +286,76 @@ resource "aws_route53recoverycontrolconfig_safety_rule" "test" { } `, rName) } + +func testAccSafetyRuleConfig_routingControl_tags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_cluster" "test" { + name = %[1]q +} + +resource "aws_route53recoverycontrolconfig_control_panel" "test" { + name = %[1]q + cluster_arn = aws_route53recoverycontrolconfig_cluster.test.arn +} + +resource "aws_route53recoverycontrolconfig_routing_control" "test" { + name = %[1]q + cluster_arn = aws_route53recoverycontrolconfig_cluster.test.arn + control_panel_arn = aws_route53recoverycontrolconfig_control_panel.test.arn +} + +resource "aws_route53recoverycontrolconfig_safety_rule" "test" { + name = %[1]q + control_panel_arn = aws_route53recoverycontrolconfig_control_panel.test.arn + wait_period_ms = 5000 + gating_controls = [aws_route53recoverycontrolconfig_routing_control.test.arn] + target_controls = [aws_route53recoverycontrolconfig_routing_control.test.arn] + + rule_config { + inverted = false + threshold = 0 + type = "AND" + } + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccSafetyRuleConfig_routingControl_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_route53recoverycontrolconfig_cluster" "test" { + name = %[1]q +} + +resource "aws_route53recoverycontrolconfig_control_panel" "test" { + name = %[1]q + cluster_arn = aws_route53recoverycontrolconfig_cluster.test.arn +} + +resource "aws_route53recoverycontrolconfig_routing_control" "test" { + name = %[1]q + cluster_arn = aws_route53recoverycontrolconfig_cluster.test.arn + control_panel_arn = aws_route53recoverycontrolconfig_control_panel.test.arn +} + +resource "aws_route53recoverycontrolconfig_safety_rule" "test" { + name = %[1]q + control_panel_arn = aws_route53recoverycontrolconfig_control_panel.test.arn + wait_period_ms = 5000 + gating_controls = [aws_route53recoverycontrolconfig_routing_control.test.arn] + target_controls = [aws_route53recoverycontrolconfig_routing_control.test.arn] + + rule_config { + inverted = false + threshold = 0 + type = "AND" + } + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go b/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go index e2ba03523c01..0be4875ac319 100644 --- a/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go +++ b/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params route53recoveryc }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up route53recoverycontrolconfig endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up route53recoverycontrolconfig endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go b/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go index e612ca2b8535..b6c25ffc7307 100644 --- a/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go +++ b/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/route53recoverycontrolconfig/service_package_gen.go b/internal/service/route53recoverycontrolconfig/service_package_gen.go index f3e05f391aa0..a79003fd959e 100644 --- a/internal/service/route53recoverycontrolconfig/service_package_gen.go +++ b/internal/service/route53recoverycontrolconfig/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -37,13 +36,19 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Factory: resourceCluster, TypeName: "aws_route53recoverycontrolconfig_cluster", Name: "Cluster", - Region: unique.Make(inttypes.ResourceRegionDisabled()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDisabled()), }, { Factory: resourceControlPanel, TypeName: "aws_route53recoverycontrolconfig_control_panel", Name: "Control Panel", - Region: unique.Make(inttypes.ResourceRegionDisabled()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDisabled()), }, { Factory: resourceRoutingControl, @@ -55,7 +60,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Factory: resourceSafetyRule, TypeName: "aws_route53recoverycontrolconfig_safety_rule", Name: "Safety Rule", - Region: unique.Make(inttypes.ResourceRegionDisabled()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDisabled()), }, } } @@ -83,7 +91,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *route53recoverycontrolconfig.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *route53recoverycontrolconfig.Options) { diff --git a/internal/service/route53recoverycontrolconfig/sweep.go b/internal/service/route53recoverycontrolconfig/sweep.go index 3acfc46dde7d..b794ab330ac0 100644 --- a/internal/service/route53recoverycontrolconfig/sweep.go +++ b/internal/service/route53recoverycontrolconfig/sweep.go @@ -48,7 +48,7 @@ func sweepClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53RecoveryControlConfigClient(ctx) input := &r53rcc.ListClustersInput{} @@ -89,7 +89,7 @@ func sweepControlPanels(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53RecoveryControlConfigClient(ctx) input := &r53rcc.ListClustersInput{} @@ -154,7 +154,7 @@ func sweepRoutingControls(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53RecoveryControlConfigClient(ctx) input := &r53rcc.ListClustersInput{} @@ -234,7 +234,7 @@ func sweepSafetyRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53RecoveryControlConfigClient(ctx) input := &r53rcc.ListClustersInput{} diff --git a/internal/service/route53recoverycontrolconfig/tags_gen.go b/internal/service/route53recoverycontrolconfig/tags_gen.go new file mode 100644 index 000000000000..98e5fd46b7bd --- /dev/null +++ b/internal/service/route53recoverycontrolconfig/tags_gen.go @@ -0,0 +1,137 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package route53recoverycontrolconfig + +import ( + "context" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists route53recoverycontrolconfig service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *route53recoverycontrolconfig.Client, identifier string, optFns ...func(*route53recoverycontrolconfig.Options)) (tftags.KeyValueTags, error) { + input := route53recoverycontrolconfig.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, &input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), smarterr.NewError(err) + } + + return keyValueTags(ctx, output.Tags), nil +} + +// ListTags lists route53recoverycontrolconfig service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx), identifier) + + if err != nil { + return smarterr.NewError(err) + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// svcTags returns route53recoverycontrolconfig service tags. +func svcTags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// keyValueTags creates tftags.KeyValueTags from route53recoverycontrolconfig service tags. +func keyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns route53recoverycontrolconfig service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := svcTags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets route53recoverycontrolconfig service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(keyValueTags(ctx, tags)) + } +} + +// createTags creates route53recoverycontrolconfig service tags for new resources. +func createTags(ctx context.Context, conn *route53recoverycontrolconfig.Client, identifier string, tags map[string]string, optFns ...func(*route53recoverycontrolconfig.Options)) error { + if len(tags) == 0 { + return nil + } + + return updateTags(ctx, conn, identifier, nil, tags, optFns...) +} + +// updateTags updates route53recoverycontrolconfig service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *route53recoverycontrolconfig.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*route53recoverycontrolconfig.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.Route53RecoveryControlConfig) + if len(removedTags) > 0 { + input := route53recoverycontrolconfig.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.Route53RecoveryControlConfig) + if len(updatedTags) > 0 { + input := route53recoverycontrolconfig.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: svcTags(updatedTags), + } + + _, err := conn.TagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + return nil +} + +// UpdateTags updates route53recoverycontrolconfig service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).Route53RecoveryControlConfigClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/route53recoverycontrolconfig/wait.go b/internal/service/route53recoverycontrolconfig/wait.go index 10ecdcf3a136..7b1c02bef8bc 100644 --- a/internal/service/route53recoverycontrolconfig/wait.go +++ b/internal/service/route53recoverycontrolconfig/wait.go @@ -14,7 +14,7 @@ import ( ) const ( - timeout = 60 * time.Second + timeout = 1800 * time.Second minTimeout = 5 * time.Second ) @@ -36,6 +36,24 @@ func waitClusterCreated(ctx context.Context, conn *r53rcc.Client, clusterArn str return nil, err } +func waitClusterUpdated(ctx context.Context, conn *r53rcc.Client, clusterArn string) (*awstypes.Cluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StatusPending), + Target: enum.Slice(awstypes.StatusDeployed), + Refresh: statusCluster(ctx, conn, clusterArn), + Timeout: timeout, + MinTimeout: minTimeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Cluster); ok { + return output, err + } + + return nil, err +} + func waitClusterDeleted(ctx context.Context, conn *r53rcc.Client, clusterArn string) (*awstypes.Cluster, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.StatusPendingDeletion), diff --git a/internal/service/route53recoveryreadiness/cell.go b/internal/service/route53recoveryreadiness/cell.go index 1306bdce5575..974837fd1ac5 100644 --- a/internal/service/route53recoveryreadiness/cell.go +++ b/internal/service/route53recoveryreadiness/cell.go @@ -157,19 +157,17 @@ func resourceCellDelete(ctx context.Context, d *schema.ResourceData, meta any) d return sdkdiag.AppendErrorf(diags, "deleting Route53 Recovery Readiness Cell (%s): %s", d.Id(), err) } - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError { + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) *tfresource.RetryError { _, err := findCellByName(ctx, conn, d.Id()) if err != nil { if tfresource.NotFound(err) { return nil } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(fmt.Errorf("Route 53 Recovery Readiness Cell (%s) still exists", d.Id())) + return tfresource.RetryableError(fmt.Errorf("Route 53 Recovery Readiness Cell (%s) still exists", d.Id())) }) - if tfresource.TimedOut(err) { - _, err = findCellByName(ctx, conn, d.Id()) - } + if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Route 53 Recovery Readiness Cell (%s) deletion: %s", d.Id(), err) } diff --git a/internal/service/route53recoveryreadiness/readiness_check.go b/internal/service/route53recoveryreadiness/readiness_check.go index b61e40049493..58ef38c258b5 100644 --- a/internal/service/route53recoveryreadiness/readiness_check.go +++ b/internal/service/route53recoveryreadiness/readiness_check.go @@ -144,21 +144,17 @@ func resourceReadinessCheckDelete(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "deleting Route53 Recovery Readiness Readiness Check (%s): %s", d.Id(), err) } - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError { + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) *tfresource.RetryError { _, err = findReadinessCheckByName(ctx, conn, d.Id()) if err != nil { if tfresource.NotFound(err) { return nil } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(fmt.Errorf("Route 53 Recovery Readiness Readiness Check (%s) still exists", d.Id())) + return tfresource.RetryableError(fmt.Errorf("Route 53 Recovery Readiness Readiness Check (%s) still exists", d.Id())) }) - if tfresource.TimedOut(err) { - _, err = findReadinessCheckByName(ctx, conn, d.Id()) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Route 53 Recovery Readiness Readiness Check (%s) deletion: %s", d.Id(), err) } diff --git a/internal/service/route53recoveryreadiness/recovery_group.go b/internal/service/route53recoveryreadiness/recovery_group.go index 44813642f753..e6ceca3624d1 100644 --- a/internal/service/route53recoveryreadiness/recovery_group.go +++ b/internal/service/route53recoveryreadiness/recovery_group.go @@ -148,21 +148,17 @@ func resourceRecoveryGroupDelete(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "deleting Route53 Recovery Readiness Recovery Group (%s): %s", d.Id(), err) } - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError { + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) *tfresource.RetryError { _, err := findRecoveryGroupByName(ctx, conn, d.Id()) if err != nil { if tfresource.NotFound(err) { return nil } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(fmt.Errorf("Route53 Recovery Readiness Recovery Group (%s) still exists", d.Id())) + return tfresource.RetryableError(fmt.Errorf("Route53 Recovery Readiness Recovery Group (%s) still exists", d.Id())) }) - if tfresource.TimedOut(err) { - _, err = findRecoveryGroupByName(ctx, conn, d.Id()) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Route53 Recovery Readiness Recovery Group (%s) deletion: %s", d.Id(), err) } diff --git a/internal/service/route53recoveryreadiness/resource_set.go b/internal/service/route53recoveryreadiness/resource_set.go index 3b63f7d60708..c27407397b4f 100644 --- a/internal/service/route53recoveryreadiness/resource_set.go +++ b/internal/service/route53recoveryreadiness/resource_set.go @@ -238,19 +238,17 @@ func resourceResourceSetDelete(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "deleting Route53 Recovery Readiness Resource Set (%s): %s", d.Id(), err) } - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError { + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) *tfresource.RetryError { _, err := findResourceSetByName(ctx, conn, d.Id()) if err != nil { if tfresource.NotFound(err) { return nil } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(fmt.Errorf("Route 53 Recovery Readiness Resource Set (%s) still exists", d.Id())) + return tfresource.RetryableError(fmt.Errorf("Route 53 Recovery Readiness Resource Set (%s) still exists", d.Id())) }) - if tfresource.TimedOut(err) { - _, err = findResourceSetByName(ctx, conn, d.Id()) - } + if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Route 53 Recovery Readiness Resource Set (%s) deletion: %s", d.Id(), err) } diff --git a/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go b/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go index 81057239e997..2e7bfb205352 100644 --- a/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go +++ b/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params route53recoveryr }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up route53recoveryreadiness endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up route53recoveryreadiness endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go b/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go index a6b2bb817ecf..7736ce3cf01f 100644 --- a/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go +++ b/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/route53recoveryreadiness/service_package_gen.go b/internal/service/route53recoveryreadiness/service_package_gen.go index b3801a1768d6..bd8339b1eaea 100644 --- a/internal/service/route53recoveryreadiness/service_package_gen.go +++ b/internal/service/route53recoveryreadiness/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness" "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -95,7 +94,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *route53recoveryreadiness.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, func(o *route53recoveryreadiness.Options) { diff --git a/internal/service/route53recoveryreadiness/tags_gen.go b/internal/service/route53recoveryreadiness/tags_gen.go index 69d46d7c770b..ec8bc3e7c384 100644 --- a/internal/service/route53recoveryreadiness/tags_gen.go +++ b/internal/service/route53recoveryreadiness/tags_gen.go @@ -3,8 +3,8 @@ package route53recoveryreadiness import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *route53recoveryreadiness.Client, identi output, err := conn.ListTagsForResources(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).Route53RecoveryReadinessClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -108,7 +108,7 @@ func updateTags(ctx context.Context, conn *route53recoveryreadiness.Client, iden _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -123,7 +123,7 @@ func updateTags(ctx context.Context, conn *route53recoveryreadiness.Client, iden _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/route53resolver/endpoint_test.go b/internal/service/route53resolver/endpoint_test.go index fdbdd9a051b5..3222b19a4049 100644 --- a/internal/service/route53resolver/endpoint_test.go +++ b/internal/service/route53resolver/endpoint_test.go @@ -203,6 +203,37 @@ func TestAccRoute53ResolverEndpoint_updateOutbound(t *testing.T) { }) } +func TestAccRoute53ResolverEndpoint_directionInboundDelegation(t *testing.T) { + ctx := acctest.Context(t) + var ep awstypes.ResolverEndpoint + resourceName := "aws_route53_resolver_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_directionInboundDelegation(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName, &ep), + resource.TestCheckResourceAttr(resourceName, "direction", "INBOUND_DELEGATION"), + resource.TestCheckResourceAttr(resourceName, "ip_address.#", "3"), + resource.TestCheckResourceAttr(resourceName, "resolver_endpoint_type", "IPV4"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccRoute53ResolverEndpoint_resolverEndpointType(t *testing.T) { ctx := acctest.Context(t) var ep awstypes.ResolverEndpoint @@ -533,6 +564,31 @@ resource "aws_route53_resolver_endpoint" "test" { `, name)) } +func testAccEndpointConfig_directionInboundDelegation(rName string) string { + return acctest.ConfigCompose(testAccEndpointConfig_base(rName), fmt.Sprintf(` +resource "aws_route53_resolver_endpoint" "test" { + direction = "INBOUND_DELEGATION" + name = %[1]q + + resolver_endpoint_type = "IPV4" + + security_group_ids = aws_security_group.test[*].id + + ip_address { + subnet_id = aws_subnet.test[0].id + } + + ip_address { + subnet_id = aws_subnet.test[1].id + } + + ip_address { + subnet_id = aws_subnet.test[2].id + } +} +`, rName)) +} + func testAccEndpointConfig_resolverEndpointType(rName, resolverEndpointType string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` resource "aws_vpc" "test" { diff --git a/internal/service/route53resolver/generate.go b/internal/service/route53resolver/generate.go index 04fa4f003ec1..d41ae512e734 100644 --- a/internal/service/route53resolver/generate.go +++ b/internal/service/route53resolver/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOpPaginated -ServiceTagsSlice -UpdateTags //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package route53resolver diff --git a/internal/service/route53resolver/rule.go b/internal/service/route53resolver/rule.go index 831e697d6bab..70f9aad0c317 100644 --- a/internal/service/route53resolver/rule.go +++ b/internal/service/route53resolver/rule.go @@ -29,6 +29,10 @@ import ( // @SDKResource("aws_route53_resolver_rule", name="Rule") // @Tags(identifierAttribute="arn") +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/route53resolver/types;awstypes.ResolverRule") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(generator="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.RandomDomainName()") func resourceRule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRuleCreate, @@ -36,10 +40,6 @@ func resourceRule() *schema.Resource { UpdateWithoutTimeout: resourceRuleUpdate, DeleteWithoutTimeout: resourceRuleDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Update: schema.DefaultTimeout(10 * time.Minute), diff --git a/internal/service/route53resolver/rule_association.go b/internal/service/route53resolver/rule_association.go index c0b46da6a611..553f2439b493 100644 --- a/internal/service/route53resolver/rule_association.go +++ b/internal/service/route53resolver/rule_association.go @@ -25,16 +25,16 @@ import ( ) // @SDKResource("aws_route53_resolver_rule_association", name="Rule Association") +// @IdentityAttribute("id") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/route53resolver/types;awstypes.ResolverRuleAssociation") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(domainTfVar="domain") func resourceRuleAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRuleAssociationCreate, ReadWithoutTimeout: resourceRuleAssociationRead, DeleteWithoutTimeout: resourceRuleAssociationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Delete: schema.DefaultTimeout(10 * time.Minute), diff --git a/internal/service/route53resolver/rule_association_identity_gen_test.go b/internal/service/route53resolver/rule_association_identity_gen_test.go new file mode 100644 index 000000000000..b1f153323719 --- /dev/null +++ b/internal/service/route53resolver/rule_association_identity_gen_test.go @@ -0,0 +1,325 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package route53resolver_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/route53resolver/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccRoute53ResolverRuleAssociation_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResolverRuleAssociation + resourceName := "aws_route53_resolver_rule_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: testAccCheckRuleAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccRoute53ResolverRuleAssociation_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_route53_resolver_rule_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccRoute53ResolverRuleAssociation_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResolverRuleAssociation + resourceName := "aws_route53_resolver_rule_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: testAccCheckRuleAssociationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccRoute53ResolverRuleAssociation_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResolverRuleAssociation + resourceName := "aws_route53_resolver_rule_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: testAccCheckRuleAssociationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleAssociationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/RuleAssociation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "domain": config.StringVariable(domain), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/route53resolver/rule_identity_gen_test.go b/internal/service/route53resolver/rule_identity_gen_test.go new file mode 100644 index 000000000000..1a084d4e475e --- /dev/null +++ b/internal/service/route53resolver/rule_identity_gen_test.go @@ -0,0 +1,308 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package route53resolver_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/route53resolver/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccRoute53ResolverRule_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResolverRule + resourceName := "aws_route53_resolver_rule.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: testAccCheckRuleDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccRoute53ResolverRule_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_route53_resolver_rule.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccRoute53ResolverRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResolverRule + resourceName := "aws_route53_resolver_rule.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: testAccCheckRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccRoute53ResolverRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.ResolverRule + resourceName := "aws_route53_resolver_rule.test" + rName := acctest.RandomDomainName() + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + CheckDestroy: testAccCheckRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Rule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/route53resolver/service_endpoint_resolver_gen.go b/internal/service/route53resolver/service_endpoint_resolver_gen.go index 7164590704ac..9f6ffdc09394 100644 --- a/internal/service/route53resolver/service_endpoint_resolver_gen.go +++ b/internal/service/route53resolver/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params route53resolver. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up route53resolver endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up route53resolver endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/route53resolver/service_endpoints_gen_test.go b/internal/service/route53resolver/service_endpoints_gen_test.go index 26d4771475cd..6db715396314 100644 --- a/internal/service/route53resolver/service_endpoints_gen_test.go +++ b/internal/service/route53resolver/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/route53resolver/service_package_gen.go b/internal/service/route53resolver/service_package_gen.go index 6ae05262c92d..c0f3ae2c88f9 100644 --- a/internal/service/route53resolver/service_package_gen.go +++ b/internal/service/route53resolver/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/route53resolver" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -169,13 +168,21 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceRuleAssociation, TypeName: "aws_route53_resolver_rule_association", Name: "Rule Association", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -203,7 +210,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *route53resolver.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/route53resolver/sweep.go b/internal/service/route53resolver/sweep.go index fd390b29ae5a..db5f363b91c2 100644 --- a/internal/service/route53resolver/sweep.go +++ b/internal/service/route53resolver/sweep.go @@ -97,7 +97,7 @@ func sweepDNSSECConfig(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListResolverDnssecConfigsInput{} @@ -139,7 +139,7 @@ func sweepEndpoints(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListResolverEndpointsInput{} @@ -180,7 +180,7 @@ func sweepFirewallConfigs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListFirewallConfigsInput{} @@ -222,7 +222,7 @@ func sweepFirewallDomainLists(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListFirewallDomainListsInput{} @@ -263,7 +263,7 @@ func sweepFirewallRuleGroupAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListFirewallRuleGroupAssociationsInput{} @@ -304,7 +304,7 @@ func sweepFirewallRuleGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListFirewallRuleGroupsInput{} @@ -352,7 +352,7 @@ func sweepFirewallRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListFirewallRuleGroupsInput{} @@ -420,7 +420,7 @@ func sweepQueryLogConfigAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListResolverQueryLogConfigAssociationsInput{} @@ -463,7 +463,7 @@ func sweepQueryLogsConfig(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListResolverQueryLogConfigsInput{} @@ -504,7 +504,7 @@ func sweepRuleAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListResolverRuleAssociationsInput{} @@ -547,7 +547,7 @@ func sweepRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.Route53ResolverClient(ctx) input := &route53resolver.ListResolverRulesInput{} diff --git a/internal/service/route53resolver/tags_gen.go b/internal/service/route53resolver/tags_gen.go index a167361a060b..e0b5299545f9 100644 --- a/internal/service/route53resolver/tags_gen.go +++ b/internal/service/route53resolver/tags_gen.go @@ -3,8 +3,8 @@ package route53resolver import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/route53resolver" awstypes "github.com/aws/aws-sdk-go-v2/service/route53resolver/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *route53resolver.Client, identifier stri page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).Route53ResolverClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *route53resolver.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *route53resolver.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/route53resolver/testdata/Rule/basic/main_gen.tf b/internal/service/route53resolver/testdata/Rule/basic/main_gen.tf new file mode 100644 index 000000000000..cce87910b73b --- /dev/null +++ b/internal/service/route53resolver/testdata/Rule/basic/main_gen.tf @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_resolver_rule" "test" { + domain_name = var.rName + rule_type = "SYSTEM" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/route53resolver/testdata/Rule/basic_v6.10.0/main_gen.tf b/internal/service/route53resolver/testdata/Rule/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..1edbe975583e --- /dev/null +++ b/internal/service/route53resolver/testdata/Rule/basic_v6.10.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_resolver_rule" "test" { + domain_name = var.rName + rule_type = "SYSTEM" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/route53resolver/testdata/Rule/region_override/main_gen.tf b/internal/service/route53resolver/testdata/Rule/region_override/main_gen.tf new file mode 100644 index 000000000000..d08d5d2696e9 --- /dev/null +++ b/internal/service/route53resolver/testdata/Rule/region_override/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_resolver_rule" "test" { + region = var.region + + domain_name = var.rName + rule_type = "SYSTEM" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/route53resolver/testdata/RuleAssociation/basic/main_gen.tf b/internal/service/route53resolver/testdata/RuleAssociation/basic/main_gen.tf new file mode 100644 index 000000000000..1a3c58e94c57 --- /dev/null +++ b/internal/service/route53resolver/testdata/RuleAssociation/basic/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_resolver_rule_association" "test" { + name = var.rName + resolver_rule_id = aws_route53_resolver_rule.test.id + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.6.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + +resource "aws_route53_resolver_rule" "test" { + domain_name = var.domain + name = var.rName + rule_type = "SYSTEM" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "domain" { + type = string + nullable = false +} + diff --git a/internal/service/route53resolver/testdata/RuleAssociation/basic_v6.10.0/main_gen.tf b/internal/service/route53resolver/testdata/RuleAssociation/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..879c27c75c16 --- /dev/null +++ b/internal/service/route53resolver/testdata/RuleAssociation/basic_v6.10.0/main_gen.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_resolver_rule_association" "test" { + name = var.rName + resolver_rule_id = aws_route53_resolver_rule.test.id + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + cidr_block = "10.6.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + +resource "aws_route53_resolver_rule" "test" { + domain_name = var.domain + name = var.rName + rule_type = "SYSTEM" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "domain" { + type = string + nullable = false +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/route53resolver/testdata/RuleAssociation/region_override/main_gen.tf b/internal/service/route53resolver/testdata/RuleAssociation/region_override/main_gen.tf new file mode 100644 index 000000000000..0428ef236c4b --- /dev/null +++ b/internal/service/route53resolver/testdata/RuleAssociation/region_override/main_gen.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_route53_resolver_rule_association" "test" { + region = var.region + + name = var.rName + resolver_rule_id = aws_route53_resolver_rule.test.id + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { + region = var.region + + cidr_block = "10.6.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + +resource "aws_route53_resolver_rule" "test" { + region = var.region + + domain_name = var.domain + name = var.rName + rule_type = "SYSTEM" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +variable "domain" { + type = string + nullable = false +} + + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/route53resolver/testdata/tmpl/rule_association_basic.gtpl b/internal/service/route53resolver/testdata/tmpl/rule_association_basic.gtpl new file mode 100644 index 000000000000..31600c9aca0a --- /dev/null +++ b/internal/service/route53resolver/testdata/tmpl/rule_association_basic.gtpl @@ -0,0 +1,20 @@ +resource "aws_route53_resolver_rule_association" "test" { +{{- template "region" }} + name = var.rName + resolver_rule_id = aws_route53_resolver_rule.test.id + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc" "test" { +{{- template "region" }} + cidr_block = "10.6.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + +resource "aws_route53_resolver_rule" "test" { +{{- template "region" }} + domain_name = var.domain + name = var.rName + rule_type = "SYSTEM" +} diff --git a/internal/service/route53resolver/testdata/tmpl/rule_tags.gtpl b/internal/service/route53resolver/testdata/tmpl/rule_tags.gtpl new file mode 100644 index 000000000000..cdde86441707 --- /dev/null +++ b/internal/service/route53resolver/testdata/tmpl/rule_tags.gtpl @@ -0,0 +1,6 @@ +resource "aws_route53_resolver_rule" "test" { +{{- template "region" }} + domain_name = var.rName + rule_type = "SYSTEM" +{{- template "tags" }} +} diff --git a/internal/service/rum/service_endpoint_resolver_gen.go b/internal/service/rum/service_endpoint_resolver_gen.go index 6c385f7c4a3a..791f1501d1c7 100644 --- a/internal/service/rum/service_endpoint_resolver_gen.go +++ b/internal/service/rum/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params rum.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up rum endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up rum endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/rum/service_endpoints_gen_test.go b/internal/service/rum/service_endpoints_gen_test.go index 33885afc50e0..11b99f63f9bc 100644 --- a/internal/service/rum/service_endpoints_gen_test.go +++ b/internal/service/rum/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/rum/service_package_gen.go b/internal/service/rum/service_package_gen.go index e989a5f5e80c..8ab78c90d865 100644 --- a/internal/service/rum/service_package_gen.go +++ b/internal/service/rum/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/rum" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -73,7 +72,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *rum.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/rum/sweep.go b/internal/service/rum/sweep.go index 8854334755f6..05a378d34f5d 100644 --- a/internal/service/rum/sweep.go +++ b/internal/service/rum/sweep.go @@ -25,7 +25,7 @@ func sweepAppMonitors(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.RUMClient(ctx) input := &rum.ListAppMonitorsInput{} diff --git a/internal/service/rum/tags_gen.go b/internal/service/rum/tags_gen.go index a6098fad9377..ecedacabad6a 100644 --- a/internal/service/rum/tags_gen.go +++ b/internal/service/rum/tags_gen.go @@ -3,8 +3,8 @@ package rum import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rum" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -66,7 +66,7 @@ func updateTags(ctx context.Context, conn *rum.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -81,7 +81,7 @@ func updateTags(ctx context.Context, conn *rum.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 8a6f678aaded..f9f031052d0d 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -51,7 +51,7 @@ const ( // @SDKResource("aws_s3_bucket", name="Bucket") // @Tags(identifierAttribute="bucket", resourceType="Bucket") // @IdentityAttribute("bucket") -// @WrappedImport(false) +// @CustomImport // @V60SDKv2Fix // @Testing(idAttrDuplicates="bucket") func resourceBucket() *schema.Resource { @@ -63,7 +63,9 @@ func resourceBucket() *schema.Resource { Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, rd *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - if err := importer.RegionalSingleParameterized(ctx, rd, names.AttrBucket, meta.(importer.AWSClient)); err != nil { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.RegionalSingleParameterized(ctx, rd, identitySpec, meta.(importer.AWSClient)); err != nil { return nil, err } @@ -766,7 +768,7 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta any) input.ObjectLockEnabledForBucket = aws.Bool(true) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateBucket(ctx, input) }, errCodeOperationAborted) @@ -776,7 +778,7 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta any) d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return findBucket(ctx, conn, d.Id()) }) @@ -1171,7 +1173,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) } if policy == "" { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.DeleteBucketPolicy(ctx, &s3.DeleteBucketPolicyInput{ Bucket: aws.String(d.Id()), }) @@ -1186,7 +1188,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) Policy: aws.String(policy), } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketPolicy(ctx, input) }, errCodeMalformedPolicy, errCodeNoSuchBucket) @@ -1201,7 +1203,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) // if d.HasChange("cors_rule") { if v, ok := d.GetOk("cors_rule"); !ok || len(v.([]any)) == 0 { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.DeleteBucketCors(ctx, &s3.DeleteBucketCorsInput{ Bucket: aws.String(d.Id()), }) @@ -1218,7 +1220,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) }, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketCors(ctx, input) }, errCodeNoSuchBucket) @@ -1233,7 +1235,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) // if d.HasChange("website") { if v, ok := d.GetOk("website"); !ok || len(v.([]any)) == 0 || v.([]any)[0] == nil { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.DeleteBucketWebsite(ctx, &s3.DeleteBucketWebsiteInput{ Bucket: aws.String(d.Id()), }) @@ -1253,7 +1255,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) WebsiteConfiguration: websiteConfig, } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketWebsite(ctx, input) }, errCodeNoSuchBucket) @@ -1282,7 +1284,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) VersioningConfiguration: versioningConfig, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketVersioning(ctx, input) }, errCodeNoSuchBucket) @@ -1306,7 +1308,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) Bucket: aws.String(d.Id()), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketAcl(ctx, input) }, errCodeNoSuchBucket) @@ -1332,7 +1334,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) Bucket: aws.String(d.Id()), } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketAcl(ctx, input) }, errCodeNoSuchBucket) @@ -1364,7 +1366,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) } } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketLogging(ctx, input) }, errCodeNoSuchBucket) @@ -1378,7 +1380,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) // if d.HasChange("lifecycle_rule") { if v, ok := d.GetOk("lifecycle_rule"); !ok || len(v.([]any)) == 0 { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.DeleteBucketLifecycle(ctx, &s3.DeleteBucketLifecycleInput{ Bucket: aws.String(d.Id()), }) @@ -1395,7 +1397,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) }, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketLifecycleConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -1416,7 +1418,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) Bucket: aws.String(d.Id()), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketAccelerateConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -1436,7 +1438,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) }, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketRequestPayment(ctx, input) }, errCodeNoSuchBucket) @@ -1450,7 +1452,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) // if d.HasChange("replication_configuration") { if v, ok := d.GetOk("replication_configuration"); !ok || len(v.([]any)) == 0 || v.([]any)[0] == nil { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.DeleteBucketReplication(ctx, &s3.DeleteBucketReplicationInput{ Bucket: aws.String(d.Id()), }) @@ -1481,7 +1483,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) } _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.PutBucketReplication(ctx, input) }, func(err error) (bool, error) { @@ -1504,7 +1506,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) // if d.HasChange("server_side_encryption_configuration") { if v, ok := d.GetOk("server_side_encryption_configuration"); !ok || len(v.([]any)) == 0 { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{ Bucket: aws.String(d.Id()), }) @@ -1521,7 +1523,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) }, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutBucketEncryption(ctx, input) }, errCodeNoSuchBucket, errCodeOperationAborted) @@ -1541,7 +1543,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta any) ObjectLockConfiguration: expandBucketObjectLockConfiguration(d.Get("object_lock_configuration").([]any)), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) (any, error) { return conn.PutObjectLockConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -1590,7 +1592,7 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return findBucket(ctx, conn, d.Id()) }) @@ -1658,7 +1660,7 @@ func findBucketRegion(ctx context.Context, c *conns.AWSClient, bucket string, op } func retryWhenNoSuchBucketError[T any](ctx context.Context, timeout time.Duration, f func() (T, error)) (T, error) { - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return f() }, errCodeNoSuchBucket) diff --git a/internal/service/s3/bucket_accelerate_configuration.go b/internal/service/s3/bucket_accelerate_configuration.go index e23810a1a6be..ee3af7e50af2 100644 --- a/internal/service/s3/bucket_accelerate_configuration.go +++ b/internal/service/s3/bucket_accelerate_configuration.go @@ -76,7 +76,7 @@ func resourceBucketAccelerateConfigurationCreate(ctx context.Context, d *schema. input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketAccelerateConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -90,7 +90,7 @@ func resourceBucketAccelerateConfigurationCreate(ctx context.Context, d *schema. d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketAccelerateConfiguration(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_acl.go b/internal/service/s3/bucket_acl.go index 3c18c383db0c..c667d7f9e6f0 100644 --- a/internal/service/s3/bucket_acl.go +++ b/internal/service/s3/bucket_acl.go @@ -24,13 +24,21 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -const BucketACLSeparator = "," - // @SDKResource("aws_s3_bucket_acl", name="Bucket ACL") +// @IdentityAttribute("bucket") +// @IdentityAttribute("expected_bucket_owner", optional="true") +// @IdentityAttribute("acl", optional="true", testNotNull="true") +// @MutableIdentity +// @ImportIDHandler("bucketACLImportID") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(checkDestroyNoop=true) +// @Testing(importIgnore="access_control_policy.0.grant.0.grantee.0.display_name;access_control_policy.0.owner.0.display_name") +// @Testing(plannableImportAction="NoOp") func resourceBucketACL() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketACLCreate, @@ -38,10 +46,6 @@ func resourceBucketACL() *schema.Resource { UpdateWithoutTimeout: resourceBucketACLUpdate, DeleteWithoutTimeout: schema.NoopContext, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ "access_control_policy": { Type: schema.TypeList, @@ -69,6 +73,8 @@ func resourceBucketACL() *schema.Resource { names.AttrDisplayName: { Type: schema.TypeString, Computed: true, + Deprecated: "display_name is deprecated. This attribute is no longer returned by " + + "AWS and will be removed in a future major version.", }, names.AttrID: { Type: schema.TypeString, @@ -104,6 +110,8 @@ func resourceBucketACL() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + Deprecated: "display_name is deprecated. This attribute is no longer returned by " + + "AWS and will be removed in a future major version.", }, names.AttrID: { Type: schema.TypeString, @@ -172,7 +180,7 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta a input.AccessControlPolicy = expandAccessControlPolicy(v.([]any)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketAcl(ctx, input) }, errCodeNoSuchBucket) @@ -184,9 +192,9 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta a return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) ACL: %s", bucket, err) } - d.SetId(BucketACLCreateResourceID(bucket, expectedBucketOwner, acl)) + d.SetId(createBucketACLResourceID(bucket, expectedBucketOwner, acl)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketACL(ctx, conn, bucket, expectedBucketOwner) }) @@ -201,7 +209,7 @@ func resourceBucketACLRead(ctx context.Context, d *schema.ResourceData, meta any var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) - bucket, expectedBucketOwner, acl, err := BucketACLParseResourceID(d.Id()) + bucket, expectedBucketOwner, acl, err := parseBucketACLResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -236,7 +244,7 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta a var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) - bucket, expectedBucketOwner, acl, err := BucketACLParseResourceID(d.Id()) + bucket, expectedBucketOwner, acl, err := parseBucketACLResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -269,7 +277,7 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta a if d.HasChange("acl") { // Set new ACL value back in resource ID - d.SetId(BucketACLCreateResourceID(bucket, expectedBucketOwner, acl)) + d.SetId(createBucketACLResourceID(bucket, expectedBucketOwner, acl)) } return append(diags, resourceBucketACLRead(ctx, d, meta)...) @@ -487,26 +495,28 @@ func flattenOwner(owner *types.Owner) []any { return []any{m} } -// BucketACLCreateResourceID is a method for creating an ID string +const bucketACLSeparator = "," + +// createBucketACLResourceID is a method for creating an ID string // with the bucket name and optional accountID and/or ACL. -func BucketACLCreateResourceID(bucket, expectedBucketOwner, acl string) string { +func createBucketACLResourceID(bucket, expectedBucketOwner, acl string) string { if expectedBucketOwner == "" { if acl == "" { return bucket } - return strings.Join([]string{bucket, acl}, BucketACLSeparator) + return strings.Join([]string{bucket, acl}, bucketACLSeparator) } if acl == "" { - return strings.Join([]string{bucket, expectedBucketOwner}, BucketACLSeparator) + return strings.Join([]string{bucket, expectedBucketOwner}, bucketACLSeparator) } - return strings.Join([]string{bucket, expectedBucketOwner, acl}, BucketACLSeparator) + return strings.Join([]string{bucket, expectedBucketOwner, acl}, bucketACLSeparator) } -// BucketACLParseResourceID is a method for parsing the ID string +// parseBucketACLResourceID is a method for parsing the ID string // for the bucket name, accountID, and ACL if provided. -func BucketACLParseResourceID(id string) (string, string, string, error) { +func parseBucketACLResourceID(id string) (string, string, string, error) { // For only bucket name in the ID e.g. my-bucket or My_Bucket // ~> On or after 3/1/2018: Bucket names can consist of only lowercase letters, numbers, dots, and hyphens; Max 63 characters // ~> Before 3/1/2018: Bucket names could consist of uppercase letters and underscores if in us-east-1; Max 255 characters @@ -528,33 +538,63 @@ func BucketACLParseResourceID(id string) (string, string, string, error) { // Bucket and Account ID ONLY if bucketAndOwnerRegex.MatchString(id) { - parts := strings.Split(id, BucketACLSeparator) + parts := strings.Split(id, bucketACLSeparator) if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET%sEXPECTED_BUCKET_OWNER", id, BucketACLSeparator) + return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET%sEXPECTED_BUCKET_OWNER", id, bucketACLSeparator) } return parts[0], parts[1], "", nil } // Bucket and ACL ONLY if bucketAndAclRegex.MatchString(id) { - parts := strings.Split(id, BucketACLSeparator) + parts := strings.Split(id, bucketACLSeparator) if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET%sACL", id, BucketACLSeparator) + return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET%sACL", id, bucketACLSeparator) } return parts[0], "", parts[1], nil } // Bucket, Account ID, and ACL if bucketOwnerAclRegex.MatchString(id) { - parts := strings.Split(id, BucketACLSeparator) + parts := strings.Split(id, bucketACLSeparator) if len(parts) != 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { - return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET%[2]sEXPECTED_BUCKET_OWNER%[2]sACL", id, BucketACLSeparator) + return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET%[2]sEXPECTED_BUCKET_OWNER%[2]sACL", id, bucketACLSeparator) } return parts[0], parts[1], parts[2], nil } return "", "", "", fmt.Errorf("unexpected format for ID (%s), expected BUCKET or BUCKET%[2]sEXPECTED_BUCKET_OWNER or BUCKET%[2]sACL "+ - "or BUCKET%[2]sEXPECTED_BUCKET_OWNER%[2]sACL", id, BucketACLSeparator) + "or BUCKET%[2]sEXPECTED_BUCKET_OWNER%[2]sACL", id, bucketACLSeparator) +} + +var _ inttypes.SDKv2ImportID = bucketACLImportID{} + +type bucketACLImportID struct{} + +func (bucketACLImportID) Create(d *schema.ResourceData) string { + bucket := d.Get(names.AttrBucket).(string) + expectedBucketOwner := d.Get(names.AttrExpectedBucketOwner).(string) + acl := d.Get("acl").(string) + return createBucketACLResourceID(bucket, expectedBucketOwner, acl) +} + +func (bucketACLImportID) Parse(id string) (string, map[string]string, error) { + bucket, expectedBucketOwner, acl, err := parseBucketACLResourceID(id) + if err != nil { + return id, nil, err + } + + results := map[string]string{ + names.AttrBucket: bucket, + } + if expectedBucketOwner != "" { + results[names.AttrExpectedBucketOwner] = expectedBucketOwner + } + if acl != "" { + results["acl"] = acl + } + + return id, results, nil } // These should be defined in the AWS SDK for Go. There is an issue, https://github.com/aws/aws-sdk-go/issues/2683. diff --git a/internal/service/s3/bucket_acl_identity_gen_test.go b/internal/service/s3/bucket_acl_identity_gen_test.go new file mode 100644 index 000000000000..8639040c2c7f --- /dev/null +++ b/internal/service/s3/bucket_acl_identity_gen_test.go @@ -0,0 +1,325 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketACL_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_acl.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketACLExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + "acl": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", "access_control_policy.0.owner.0.display_name", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("acl"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("acl"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketACL_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_acl.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + "acl": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", "access_control_policy.0.owner.0.display_name", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("acl"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("acl"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccS3BucketACL_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_acl.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketACLExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + "acl": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccS3BucketACL_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_acl.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketACLExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketACL/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_acl_test.go b/internal/service/s3/bucket_acl_test.go index a021c76a9f79..482d74251319 100644 --- a/internal/service/s3/bucket_acl_test.go +++ b/internal/service/s3/bucket_acl_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestBucketACLParseResourceID(t *testing.T) { +func TestParseBucketACLResourceID(t *testing.T) { t.Parallel() testCases := []struct { @@ -58,168 +58,168 @@ func TestBucketACLParseResourceID(t *testing.T) { }, { TestName: "valid ID with bucket", - InputID: tfs3.BucketACLCreateResourceID("example", "", ""), + InputID: tfs3.CreateBucketACLResourceID("example", "", ""), ExpectedACL: "", ExpectedBucket: "example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("my-example-bucket", "", ""), + InputID: tfs3.CreateBucketACLResourceID("my-example-bucket", "", ""), ExpectedACL: "", ExpectedBucket: "my-example-bucket", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket that has dot and hyphens", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket", "", ""), + InputID: tfs3.CreateBucketACLResourceID("my-example.bucket", "", ""), ExpectedACL: "", ExpectedBucket: "my-example.bucket", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket that has dots, hyphen, and numbers", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", "", ""), + InputID: tfs3.CreateBucketACLResourceID("my-example.bucket.4000", "", ""), ExpectedACL: "", ExpectedBucket: "my-example.bucket.4000", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket and acl", - InputID: tfs3.BucketACLCreateResourceID("example", "", string(types.BucketCannedACLPrivate)), + InputID: tfs3.CreateBucketACLResourceID("example", "", string(types.BucketCannedACLPrivate)), ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("example", "", string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("example", "", string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket that has dot, hyphen, and number and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", "", string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("my-example.bucket.4000", "", string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "my-example.bucket.4000", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket and bucket owner", - InputID: tfs3.BucketACLCreateResourceID("example", acctest.Ct12Digit, ""), + InputID: tfs3.CreateBucketACLResourceID("example", acctest.Ct12Digit, ""), ExpectedACL: "", ExpectedBucket: "example", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket that has dot, hyphen, and number and bucket owner", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", acctest.Ct12Digit, ""), + InputID: tfs3.CreateBucketACLResourceID("my-example.bucket.4000", acctest.Ct12Digit, ""), ExpectedACL: "", ExpectedBucket: "my-example.bucket.4000", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket, bucket owner, and acl", - InputID: tfs3.BucketACLCreateResourceID("example", acctest.Ct12Digit, string(types.BucketCannedACLPrivate)), + InputID: tfs3.CreateBucketACLResourceID("example", acctest.Ct12Digit, string(types.BucketCannedACLPrivate)), ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "example", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket, bucket owner, and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("example", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("example", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "example", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket that has dot, hyphen, and numbers, bucket owner, and acl that has hyphens", - InputID: tfs3.BucketACLCreateResourceID("my-example.bucket.4000", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("my-example.bucket.4000", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "my-example.bucket.4000", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket (pre-2018, us-east-1)", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", "", ""), + InputID: tfs3.CreateBucketACLResourceID("Example", "", ""), ExpectedACL: "", ExpectedBucket: "Example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscores", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example_Bucket", "", ""), + InputID: tfs3.CreateBucketACLResourceID("My_Example_Bucket", "", ""), ExpectedACL: "", ExpectedBucket: "My_Example_Bucket", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dot, and hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-Bucket.local", "", ""), + InputID: tfs3.CreateBucketACLResourceID("My_Example-Bucket.local", "", ""), ExpectedACL: "", ExpectedBucket: "My_Example-Bucket.local", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dots, hyphen, and numbers", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-Bucket.4000", "", ""), + InputID: tfs3.CreateBucketACLResourceID("My_Example-Bucket.4000", "", ""), ExpectedACL: "", ExpectedBucket: "My_Example-Bucket.4000", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) and acl", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", "", string(types.BucketCannedACLPrivate)), + InputID: tfs3.CreateBucketACLResourceID("Example", "", string(types.BucketCannedACLPrivate)), ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "Example", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) and acl that has underscores", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example_Bucket", "", string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("My_Example_Bucket", "", string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "My_Example_Bucket", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dot, hyphen, and number and acl that has hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-Bucket.4000", "", string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("My_Example-Bucket.4000", "", string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "My_Example-Bucket.4000", ExpectedBucketOwner: "", }, { TestName: "valid ID with bucket (pre-2018, us-east-1) and bucket owner", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", acctest.Ct12Digit, ""), + InputID: tfs3.CreateBucketACLResourceID("Example", acctest.Ct12Digit, ""), ExpectedACL: "", ExpectedBucket: "Example", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dot, hyphen, and number and bucket owner", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-Bucket.4000", acctest.Ct12Digit, ""), + InputID: tfs3.CreateBucketACLResourceID("My_Example-Bucket.4000", acctest.Ct12Digit, ""), ExpectedACL: "", ExpectedBucket: "My_Example-Bucket.4000", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket (pre-2018, us-east-1), bucket owner, and acl", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", acctest.Ct12Digit, string(types.BucketCannedACLPrivate)), + InputID: tfs3.CreateBucketACLResourceID("Example", acctest.Ct12Digit, string(types.BucketCannedACLPrivate)), ExpectedACL: string(types.BucketCannedACLPrivate), ExpectedBucket: "Example", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket (pre-2018, us-east-1), bucket owner, and acl that has hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("Example", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("Example", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "Example", ExpectedBucketOwner: acctest.Ct12Digit, }, { TestName: "valid ID with bucket (pre-2018, us-east-1) that has underscore, dot, hyphen, and numbers, bucket owner, and acl that has hyphens", //lintignore:AWSAT003 - InputID: tfs3.BucketACLCreateResourceID("My_Example-bucket.4000", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), + InputID: tfs3.CreateBucketACLResourceID("My_Example-bucket.4000", acctest.Ct12Digit, string(types.BucketCannedACLPublicReadWrite)), ExpectedACL: string(types.BucketCannedACLPublicReadWrite), ExpectedBucket: "My_Example-bucket.4000", ExpectedBucketOwner: acctest.Ct12Digit, @@ -230,7 +230,7 @@ func TestBucketACLParseResourceID(t *testing.T) { t.Run(testCase.TestName, func(t *testing.T) { t.Parallel() - gotBucket, gotExpectedBucketOwner, gotAcl, err := tfs3.BucketACLParseResourceID(testCase.InputID) + gotBucket, gotExpectedBucketOwner, gotAcl, err := tfs3.ParseBucketACLResourceID(testCase.InputID) if err == nil && testCase.ExpectError { t.Fatalf("expected error") @@ -286,6 +286,11 @@ func TestAccS3BucketACL_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", + "access_control_policy.0.owner.0.display_name", + }, }, }, }) @@ -452,6 +457,12 @@ func TestAccS3BucketACL_updateACL(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", + "access_control_policy.0.grant.1.grantee.0.display_name", + "access_control_policy.0.owner.0.display_name", + }, }, }, }) @@ -494,6 +505,15 @@ func TestAccS3BucketACL_updateGrant(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", + "access_control_policy.0.grant.1.grantee.0.display_name", + "access_control_policy.0.owner.0.display_name", + // Set order is not guaranteed on import. Permissions may be swapped. + "access_control_policy.0.grant.0.permission", + "access_control_policy.0.grant.1.permission", + }, }, { Config: testAccBucketACLConfig_grantsUpdate(bucketName), @@ -524,6 +544,15 @@ func TestAccS3BucketACL_updateGrant(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", + "access_control_policy.0.grant.1.grantee.0.display_name", + "access_control_policy.0.owner.0.display_name", + // Set order is not guaranteed on import. Permissions may be swapped. + "access_control_policy.0.grant.0.permission", + "access_control_policy.0.grant.1.permission", + }, }, }, }) @@ -574,6 +603,15 @@ func TestAccS3BucketACL_ACLToGrant(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", + "access_control_policy.0.grant.1.grantee.0.display_name", + "access_control_policy.0.owner.0.display_name", + // Set order is not guaranteed on import. Permissions may be swapped. + "access_control_policy.0.grant.0.permission", + "access_control_policy.0.grant.1.permission", + }, }, }, }) @@ -617,6 +655,11 @@ func TestAccS3BucketACL_grantToACL(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{ + "access_control_policy.0.grant.0.grantee.0.display_name", + "access_control_policy.0.owner.0.display_name", + }, }, }, }) @@ -647,7 +690,7 @@ func testAccCheckBucketACLExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("Not found: %s", n) } - bucket, expectedBucketOwner, _, err := tfs3.BucketACLParseResourceID(rs.Primary.ID) + bucket, expectedBucketOwner, _, err := tfs3.ParseBucketACLResourceID(rs.Primary.ID) if err != nil { return err } diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index 43d1293ccee1..953e8001d8e4 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -158,7 +158,7 @@ func resourceBucketAnalyticsConfigurationPut(ctx context.Context, d *schema.Reso AnalyticsConfiguration: analyticsConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketAnalyticsConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -173,7 +173,7 @@ func resourceBucketAnalyticsConfigurationPut(ctx context.Context, d *schema.Reso if d.IsNewResource() { d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findAnalyticsConfiguration(ctx, conn, bucket, name) }) @@ -249,7 +249,7 @@ func resourceBucketAnalyticsConfigurationDelete(ctx context.Context, d *schema.R return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Analytics Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findAnalyticsConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_cors_configuration.go b/internal/service/s3/bucket_cors_configuration.go index 384725bbbe3d..f44ebdbf968c 100644 --- a/internal/service/s3/bucket_cors_configuration.go +++ b/internal/service/s3/bucket_cors_configuration.go @@ -24,6 +24,12 @@ import ( ) // @SDKResource("aws_s3_bucket_cors_configuration", name="Bucket CORS Configuration") +// @IdentityAttribute("bucket") +// @IdentityAttribute("expected_bucket_owner", optional="true") +// @ImportIDHandler("resourceImportID") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(importIgnore="cors_rule.0.max_age_seconds") +// @Testing(plannableImportAction="NoOp") func resourceBucketCorsConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketCorsConfigurationCreate, @@ -31,10 +37,6 @@ func resourceBucketCorsConfiguration() *schema.Resource { UpdateWithoutTimeout: resourceBucketCorsConfigurationUpdate, DeleteWithoutTimeout: resourceBucketCorsConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -109,7 +111,7 @@ func resourceBucketCorsConfigurationCreate(ctx context.Context, d *schema.Resour input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketCors(ctx, input) }, errCodeNoSuchBucket) @@ -123,7 +125,7 @@ func resourceBucketCorsConfigurationCreate(ctx context.Context, d *schema.Resour d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findCORSRules(ctx, conn, bucket, expectedBucketOwner) }) @@ -230,7 +232,7 @@ func resourceBucketCorsConfigurationDelete(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket CORS Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findCORSRules(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_cors_configuration_identity_gen_test.go b/internal/service/s3/bucket_cors_configuration_identity_gen_test.go new file mode 100644 index 000000000000..f14816f961ad --- /dev/null +++ b/internal/service/s3/bucket_cors_configuration_identity_gen_test.go @@ -0,0 +1,318 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketCORSConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_cors_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "cors_rule.0.max_age_seconds", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketCORSConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_cors_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "cors_rule.0.max_age_seconds", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketCORSConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_cors_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketCORSConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_cors_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketCORSConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketCORSConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketCORSConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_identity_gen_test.go b/internal/service/s3/bucket_identity_gen_test.go index 7fe7862a1b2a..a61e8a6adfd8 100644 --- a/internal/service/s3/bucket_identity_gen_test.go +++ b/internal/service/s3/bucket_identity_gen_test.go @@ -16,15 +16,17 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3Bucket_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -111,7 +113,7 @@ func TestAccS3Bucket_Identity_RegionOverride(t *testing.T) { resourceName := "aws_s3_bucket.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -192,3 +194,133 @@ func TestAccS3Bucket_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccS3Bucket_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + names.AttrBucket: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +func TestAccS3Bucket_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + }, + }, + }) +} diff --git a/internal/service/s3/bucket_intelligent_tiering_configuration.go b/internal/service/s3/bucket_intelligent_tiering_configuration.go index 65f5ab56eb09..9690b4f7e614 100644 --- a/internal/service/s3/bucket_intelligent_tiering_configuration.go +++ b/internal/service/s3/bucket_intelligent_tiering_configuration.go @@ -123,7 +123,7 @@ func resourceBucketIntelligentTieringConfigurationPut(ctx context.Context, d *sc IntelligentTieringConfiguration: intelligentTieringConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketIntelligentTieringConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -138,7 +138,7 @@ func resourceBucketIntelligentTieringConfigurationPut(ctx context.Context, d *sc if d.IsNewResource() { d.SetId(BucketIntelligentTieringConfigurationCreateResourceID(bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findIntelligentTieringConfiguration(ctx, conn, bucket, name) }) @@ -219,7 +219,7 @@ func resourceBucketIntelligentTieringConfigurationDelete(ctx context.Context, d return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Intelligent-Tiering Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findIntelligentTieringConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_inventory.go b/internal/service/s3/bucket_inventory.go index 3fd05d6f2db0..0af863452d97 100644 --- a/internal/service/s3/bucket_inventory.go +++ b/internal/service/s3/bucket_inventory.go @@ -219,7 +219,7 @@ func resourceBucketInventoryPut(ctx context.Context, d *schema.ResourceData, met InventoryConfiguration: inventoryConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketInventoryConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -234,7 +234,7 @@ func resourceBucketInventoryPut(ctx context.Context, d *schema.ResourceData, met if d.IsNewResource() { d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findInventoryConfiguration(ctx, conn, bucket, name) }) @@ -323,7 +323,7 @@ func resourceBucketInventoryDelete(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Inventory (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findInventoryConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_lifecycle_configuration.go b/internal/service/s3/bucket_lifecycle_configuration.go index 06abc45606dc..fe2014ccaa35 100644 --- a/internal/service/s3/bucket_lifecycle_configuration.go +++ b/internal/service/s3/bucket_lifecycle_configuration.go @@ -110,12 +110,6 @@ func (r *bucketLifecycleConfigurationResource) Schema(ctx context.Context, reque listvalidator.SizeAtLeast(1), }, NestedObject: schema.NestedBlockObject{ - Validators: []validator.Object{ - tfobjectvalidator.WarnExactlyOneOfChildren( - path.MatchRelative().AtName(names.AttrFilter), - path.MatchRelative().AtName(names.AttrPrefix), - ), - }, Attributes: map[string]schema.Attribute{ names.AttrID: schema.StringAttribute{ Required: true, @@ -298,11 +292,6 @@ func (r *bucketLifecycleConfigurationResource) Schema(ctx context.Context, reque Attributes: map[string]schema.Attribute{ "newer_noncurrent_versions": schema.Int32Attribute{ Optional: true, - Computed: true, // Because of schema change - PlanModifiers: []planmodifier.Int32{ - tfint32planmodifier.NullValue(), - int32planmodifier.UseStateForUnknown(), - }, Validators: []validator.Int32{ int32validator.AtLeast(1), }, @@ -325,11 +314,6 @@ func (r *bucketLifecycleConfigurationResource) Schema(ctx context.Context, reque Attributes: map[string]schema.Attribute{ "newer_noncurrent_versions": schema.Int32Attribute{ Optional: true, - Computed: true, // Because of schema change - PlanModifiers: []planmodifier.Int32{ - tfint32planmodifier.NullValue(), - int32planmodifier.UseStateForUnknown(), - }, Validators: []validator.Int32{ int32validator.AtLeast(1), }, @@ -423,7 +407,7 @@ func (r *bucketLifecycleConfigurationResource) Create(ctx context.Context, reque input.LifecycleConfiguration = &lifecycleConfiguraton - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketLifecycleConfiguration(ctx, &input) }, errCodeNoSuchBucket) if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "LifecycleConfiguration is not valid, expected CreateBucketConfiguration") { @@ -469,24 +453,21 @@ func (r *bucketLifecycleConfigurationResource) Read(ctx context.Context, request lifecycleConfigurationRulesSteadyTimeout = 2 * time.Minute ) var lastOutput, output *s3.GetBucketLifecycleConfigurationOutput - err := retry.RetryContext(ctx, lifecycleConfigurationRulesSteadyTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, lifecycleConfigurationRulesSteadyTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = findBucketLifecycleConfiguration(ctx, conn, bucket, expectedBucketOwner) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if lastOutput == nil || !lifecycleConfigEqual(lastOutput.TransitionDefaultMinimumObjectSize, lastOutput.Rules, output.TransitionDefaultMinimumObjectSize, output.Rules) { lastOutput = output - return retry.RetryableError(fmt.Errorf("S3 Bucket Lifecycle Configuration (%s) has not stablized; retrying", bucket)) + return tfresource.RetryableError(fmt.Errorf("S3 Bucket Lifecycle Configuration (%s) has not stablized; retrying", bucket)) } return nil }) - if tfresource.TimedOut(err) { - output, err = findBucketLifecycleConfiguration(ctx, conn, bucket, expectedBucketOwner) - } if tfresource.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) response.State.RemoveResource(ctx) @@ -542,7 +523,7 @@ func (r *bucketLifecycleConfigurationResource) Update(ctx context.Context, reque input.LifecycleConfiguration = &lifecycleConfiguraton - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketLifecycleConfiguration(ctx, &input) }, errCodeNoSuchBucket) if err != nil { @@ -588,7 +569,7 @@ func (r *bucketLifecycleConfigurationResource) Delete(ctx context.Context, reque } _, err := conn.DeleteBucketLifecycle(ctx, &input) - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchLifecycleConfiguration) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchLifecycleConfiguration, errCodeMethodNotAllowed) { return } if err != nil { @@ -596,7 +577,7 @@ func (r *bucketLifecycleConfigurationResource) Delete(ctx context.Context, reque return } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketLifecycleConfiguration(ctx, conn, bucket, expectedBucketOwner) }) if err != nil { diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index 22d7562e8638..0ff0d3d23d33 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -1666,6 +1665,112 @@ func TestAccS3BucketLifecycleConfiguration_nonCurrentVersionExpiration(t *testin }) } +func TestAccS3BucketLifecycleConfiguration_nonCurrentVersionExpiration_RemoveNoncurrentVersions(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_lifecycle_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketLifecycleConfigurationConfig_nonCurrentVersionExpiration_RemoveNoncurrentVersions_Setup(rName, 5, 90), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrBucket), "aws_s3_bucket.test", tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.StringExact("")), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "abort_incomplete_multipart_upload": checkAbortIncompleteMultipartUpload_None(), + "expiration": checkExpiration_None(), + names.AttrFilter: checkFilter_Prefix("config/"), + names.AttrID: knownvalue.StringExact(rName), + "noncurrent_version_expiration": checkNoncurrentVersionExpiration_VersionsAndDays(5, 90), + "noncurrent_version_transition": checkNoncurrentVersionTransitions(), + names.AttrPrefix: knownvalue.StringExact(""), + names.AttrStatus: knownvalue.StringExact(tfs3.LifecycleRuleStatusEnabled), + "transition": checkTransitions(), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("transition_default_minimum_object_size"), knownvalue.StringExact("all_storage_classes_128K")), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "abort_incomplete_multipart_upload": checkAbortIncompleteMultipartUpload_None(), + "expiration": checkExpiration_None(), + names.AttrFilter: checkFilter_Prefix("config/"), + names.AttrID: knownvalue.StringExact(rName), + "noncurrent_version_expiration": checkNoncurrentVersionExpiration_VersionsAndDays(5, 90), + "noncurrent_version_transition": checkNoncurrentVersionTransitions(), + names.AttrPrefix: knownvalue.StringExact(""), + names.AttrStatus: knownvalue.StringExact(tfs3.LifecycleRuleStatusEnabled), + "transition": checkTransitions(), + }), + })), + }, + }, + }, + { + Config: testAccBucketLifecycleConfigurationConfig_nonCurrentVersionExpiration_RemoveNoncurrentVersions_Apply(rName, 90), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLifecycleConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrBucket), "aws_s3_bucket.test", tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.StringExact("")), + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "abort_incomplete_multipart_upload": checkAbortIncompleteMultipartUpload_None(), + "expiration": checkExpiration_None(), + names.AttrFilter: checkFilter_Prefix("config/"), + names.AttrID: knownvalue.StringExact(rName), + "noncurrent_version_expiration": checkNoncurrentVersionExpiration_Days(90), + "noncurrent_version_transition": checkNoncurrentVersionTransitions(), + names.AttrPrefix: knownvalue.StringExact(""), + names.AttrStatus: knownvalue.StringExact(tfs3.LifecycleRuleStatusEnabled), + "transition": checkTransitions(), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("transition_default_minimum_object_size"), knownvalue.StringExact("all_storage_classes_128K")), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRule), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "abort_incomplete_multipart_upload": checkAbortIncompleteMultipartUpload_None(), + "expiration": checkExpiration_None(), + names.AttrFilter: checkFilter_Prefix("config/"), + names.AttrID: knownvalue.StringExact(rName), + "noncurrent_version_expiration": checkNoncurrentVersionExpiration_Days(90), + "noncurrent_version_transition": checkNoncurrentVersionTransitions(), + names.AttrPrefix: knownvalue.StringExact(""), + names.AttrStatus: knownvalue.StringExact(tfs3.LifecycleRuleStatusEnabled), + "transition": checkTransitions(), + }), + })), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccS3BucketLifecycleConfiguration_nonCurrentVersionTransition(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -3978,27 +4083,24 @@ func testAccCheckBucketLifecycleConfigurationExists(ctx context.Context, n strin lifecycleConfigurationRulesSteadyTimeout = 2 * time.Minute ) var lastOutput, output *s3.GetBucketLifecycleConfigurationOutput - err = retry.RetryContext(ctx, lifecycleConfigurationRulesSteadyTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, lifecycleConfigurationRulesSteadyTimeout, func(ctx context.Context) *tfresource.RetryError { var err error output, err = tfs3.FindBucketLifecycleConfiguration(ctx, conn, bucket, expectedBucketOwner) if tfresource.NotFound(err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if lastOutput == nil || !tfs3.LifecycleConfigEqual(lastOutput.TransitionDefaultMinimumObjectSize, lastOutput.Rules, output.TransitionDefaultMinimumObjectSize, output.Rules) { lastOutput = output - return retry.RetryableError(fmt.Errorf("S3 Bucket Lifecycle Configuration (%s) has not stablized; retrying", bucket)) + return tfresource.RetryableError(fmt.Errorf("S3 Bucket Lifecycle Configuration (%s) has not stablized; retrying", bucket)) } return nil }) - if tfresource.TimedOut(err) { - output, err = tfs3.FindBucketLifecycleConfiguration(ctx, conn, bucket, expectedBucketOwner) - } return err } @@ -4597,6 +4699,59 @@ resource "aws_s3_bucket_lifecycle_configuration" "test" { `, rName, nonCurrentDays) } +func testAccBucketLifecycleConfigurationConfig_nonCurrentVersionExpiration_RemoveNoncurrentVersions_Setup(rName string, nonCurrentVersions, nonCurrentDays int) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + id = %[1]q + + filter { + prefix = "config/" + } + + noncurrent_version_expiration { + newer_noncurrent_versions = %[2]d + noncurrent_days = %[3]d + } + + status = "Enabled" + } +} +`, rName, nonCurrentVersions, nonCurrentDays) +} + +func testAccBucketLifecycleConfigurationConfig_nonCurrentVersionExpiration_RemoveNoncurrentVersions_Apply(rName string, nonCurrentDays int) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + id = %[1]q + + filter { + prefix = "config/" + } + + noncurrent_version_expiration { + noncurrent_days = %[2]d + } + + status = "Enabled" + } +} +`, rName, nonCurrentDays) +} + func testAccBucketLifecycleConfigurationConfig_nonCurrentVersionTransition(rName string, standardDays, glacierDays int) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/internal/service/s3/bucket_logging.go b/internal/service/s3/bucket_logging.go index 05df1e7d5c49..38f61d9d20ab 100644 --- a/internal/service/s3/bucket_logging.go +++ b/internal/service/s3/bucket_logging.go @@ -24,6 +24,10 @@ import ( ) // @SDKResource("aws_s3_bucket_logging", name="Bucket Logging") +// @IdentityAttribute("bucket") +// @IdentityAttribute("expected_bucket_owner", optional="true") +// @ImportIDHandler("resourceImportID") +// @Testing(preIdentityVersion="v6.9.0") func resourceBucketLogging() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketLoggingCreate, @@ -31,10 +35,6 @@ func resourceBucketLogging() *schema.Resource { UpdateWithoutTimeout: resourceBucketLoggingUpdate, DeleteWithoutTimeout: resourceBucketLoggingDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -66,6 +66,8 @@ func resourceBucketLogging() *schema.Resource { names.AttrDisplayName: { Type: schema.TypeString, Computed: true, + Deprecated: "display_name is deprecated. This attribute is no longer returned by " + + "AWS and will be removed in a future major version.", }, "email_address": { Type: schema.TypeString, @@ -166,7 +168,7 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me input.BucketLoggingStatus.LoggingEnabled.TargetObjectKeyFormat = expandTargetObjectKeyFormat(v.([]any)[0].(map[string]any)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketLogging(ctx, input) }, errCodeNoSuchBucket) @@ -180,7 +182,7 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findLoggingEnabled(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_logging_identity_gen_test.go b/internal/service/s3/bucket_logging_identity_gen_test.go new file mode 100644 index 000000000000..584953ad2fa0 --- /dev/null +++ b/internal/service/s3/bucket_logging_identity_gen_test.go @@ -0,0 +1,312 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketLogging_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_logging.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketLoggingDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketLogging_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_logging.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketLogging_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_logging.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketLoggingDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketLogging_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_logging.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketLoggingDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLoggingExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketLogging/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_logging_test.go b/internal/service/s3/bucket_logging_test.go index 10c38958ed0b..90699704e689 100644 --- a/internal/service/s3/bucket_logging_test.go +++ b/internal/service/s3/bucket_logging_test.go @@ -135,13 +135,14 @@ func TestAccS3BucketLogging_TargetGrantByID(t *testing.T) { "permission": string(types.BucketLogsPermissionFullControl), }), resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.id", "data.aws_canonical_user_id.current", names.AttrID), - resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.display_name", "data.aws_canonical_user_id.current", names.AttrDisplayName), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{"target_grant.0.grantee.0.display_name"}, }, { Config: testAccBucketLoggingConfig_targetGrantByID(rName, string(types.BucketLogsPermissionRead)), @@ -153,13 +154,15 @@ func TestAccS3BucketLogging_TargetGrantByID(t *testing.T) { "grantee.0.type": string(types.TypeCanonicalUser), "permission": string(types.BucketLogsPermissionRead), }), - resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.display_name", "data.aws_canonical_user_id.current", names.AttrDisplayName), + resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.id", "data.aws_canonical_user_id.current", names.AttrID), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + // DisplayName is deprecated and will be inconsistently returned between July and November 2025. + ImportStateVerifyIgnore: []string{"target_grant.0.grantee.0.display_name"}, }, { Config: testAccBucketLoggingConfig_basic(rName), diff --git a/internal/service/s3/bucket_metadata_configuration.go b/internal/service/s3/bucket_metadata_configuration.go new file mode 100644 index 000000000000..d69aec88c408 --- /dev/null +++ b/internal/service/s3/bucket_metadata_configuration.go @@ -0,0 +1,709 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int32validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3_bucket_metadata_configuration", name="Bucket Metadata Configuration") +func newBucketMetadataConfigurationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &bucketMetadataConfigurationResource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + + return r, nil +} + +type bucketMetadataConfigurationResource struct { + framework.ResourceWithModel[bucketMetadataConfigurationResourceModel] + framework.WithTimeouts +} + +func (r *bucketMetadataConfigurationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrBucket: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrExpectedBucketOwner: schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + fwvalidators.AWSAccountID(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "metadata_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[metadataConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrDestination: framework.ResourceComputedListOfObjectsAttribute[destinationResultModel](ctx, listplanmodifier.UseStateForUnknown()), + }, + Blocks: map[string]schema.Block{ + "inventory_table_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[inventoryTableConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "configuration_state": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.InventoryConfigurationState](), + Required: true, + }, + "table_arn": schema.StringAttribute{ + Computed: true, + }, + names.AttrTableName: schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrEncryptionConfiguration: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[metadataTableEncryptionConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrKMSKeyARN: schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + "sse_algorithm": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.TableSseAlgorithm](), + Required: true, + }, + }, + }, + }, + }, + }, + }, + "journal_table_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[journalTableConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "table_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrTableName: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrEncryptionConfiguration: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[metadataTableEncryptionConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrKMSKeyARN: schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + "sse_algorithm": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.TableSseAlgorithm](), + Required: true, + }, + }, + }, + }, + "record_expiration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[recordExpirationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "days": schema.Int32Attribute{ + Optional: true, + Validators: []validator.Int32{ + int32validator.Between(7, 2147483647), + }, + }, + "expiration": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.ExpirationState](), + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + }), + }, + } +} + +func (r *bucketMetadataConfigurationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data bucketMetadataConfigurationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + + bucket, expectedBucketOwner := fwflex.StringValueFromFramework(ctx, data.Bucket), fwflex.StringValueFromFramework(ctx, data.ExpectedBucketOwner) + var input s3.CreateBucketMetadataConfigurationInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.CreateBucketMetadataConfiguration(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Bucket Metadata Configuration (%s)", bucket), err.Error()) + + return + } + + if _, err := waitBucketMetadataJournalTableConfigurationCreated(ctx, conn, bucket, expectedBucketOwner, r.CreateTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for S3 Bucket Metadata journal table configuration (%s) create", bucket), err.Error()) + + return + } + + if input.MetadataConfiguration.InventoryTableConfiguration.ConfigurationState == awstypes.InventoryConfigurationStateEnabled { + if _, err := waitBucketMetadataInventoryTableConfigurationCreated(ctx, conn, bucket, expectedBucketOwner, r.CreateTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for S3 Bucket Metadata inventory table configuration (%s) create", bucket), err.Error()) + + return + } + } + + // Set values for unknowns. + output, err := findBucketMetadataConfigurationByTwoPartKey(ctx, conn, bucket, expectedBucketOwner) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Bucket Metadata Configuration (%s)", bucket), err.Error()) + + return + } + + // Encryption configurations are not returned via the API. + // Propagate from Plan. + inventoryEncryptionConfiguration, journalEncryptionConfiguration, diags := getMetadataTableEncryptionConfigurationModels(ctx, &data) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data.MetadataConfiguration, fwflex.WithFieldNameSuffix("Result"))...) + if response.Diagnostics.HasError() { + return + } + + diags = setMetadataTableEncryptionConfigurationModels(ctx, &data, inventoryEncryptionConfiguration, journalEncryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *bucketMetadataConfigurationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data bucketMetadataConfigurationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + + bucket, expectedBucketOwner := fwflex.StringValueFromFramework(ctx, data.Bucket), fwflex.StringValueFromFramework(ctx, data.ExpectedBucketOwner) + output, err := findBucketMetadataConfigurationByTwoPartKey(ctx, conn, bucket, expectedBucketOwner) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Bucket Metadata Configuration (%s)", bucket), err.Error()) + + return + } + + // Encryption configurations are not returned via the API. + // Propagate from State. + inventoryEncryptionConfiguration, journalEncryptionConfiguration, diags := getMetadataTableEncryptionConfigurationModels(ctx, &data) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data.MetadataConfiguration, fwflex.WithFieldNameSuffix("Result"))...) + if response.Diagnostics.HasError() { + return + } + + diags = setMetadataTableEncryptionConfigurationModels(ctx, &data, inventoryEncryptionConfiguration, journalEncryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *bucketMetadataConfigurationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new bucketMetadataConfigurationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + + bucket, expectedBucketOwner := fwflex.StringValueFromFramework(ctx, new.Bucket), fwflex.StringValueFromFramework(ctx, new.ExpectedBucketOwner) + + newMetadataConfigurationModel, diags := new.MetadataConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + oldMetadataConfigurationModel, diags := old.MetadataConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + if !newMetadataConfigurationModel.InventoryTableConfiguration.Equal(oldMetadataConfigurationModel.InventoryTableConfiguration) { + var input s3.UpdateBucketMetadataInventoryTableConfigurationInput + response.Diagnostics.Append(fwflex.Expand(ctx, new.MetadataConfiguration, &input)...) + if response.Diagnostics.HasError() { + return + } + input.Bucket = aws.String(bucket) + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + _, err := conn.UpdateBucketMetadataInventoryTableConfiguration(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating S3 Bucket Metadata inventory table configuration (%s)", bucket), err.Error()) + + return + } + } + + if !newMetadataConfigurationModel.JournalTableConfiguration.Equal(oldMetadataConfigurationModel.JournalTableConfiguration) { + var input s3.UpdateBucketMetadataJournalTableConfigurationInput + response.Diagnostics.Append(fwflex.Expand(ctx, new.MetadataConfiguration, &input)...) + if response.Diagnostics.HasError() { + return + } + input.Bucket = aws.String(bucket) + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + _, err := conn.UpdateBucketMetadataJournalTableConfiguration(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating S3 Bucket Metadata journal table configuration (%s)", bucket), err.Error()) + + return + } + } + + // Set values for unknowns. + output, err := findBucketMetadataConfigurationByTwoPartKey(ctx, conn, bucket, expectedBucketOwner) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Bucket Metadata Configuration (%s)", bucket), err.Error()) + + return + } + + // Encryption configurations are not returned via the API. + // Propagate from Plan. + inventoryEncryptionConfiguration, journalEncryptionConfiguration, diags := getMetadataTableEncryptionConfigurationModels(ctx, &new) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &new.MetadataConfiguration, fwflex.WithFieldNameSuffix("Result"))...) + if response.Diagnostics.HasError() { + return + } + + diags = setMetadataTableEncryptionConfigurationModels(ctx, &new, inventoryEncryptionConfiguration, journalEncryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, new)...) +} + +func (r *bucketMetadataConfigurationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data bucketMetadataConfigurationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + + bucket, expectedBucketOwner := fwflex.StringValueFromFramework(ctx, data.Bucket), fwflex.StringValueFromFramework(ctx, data.ExpectedBucketOwner) + input := s3.DeleteBucketMetadataConfigurationInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + _, err := conn.DeleteBucketMetadataConfiguration(ctx, &input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeMetadataConfigurationNotFound) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Bucket Metadata Configuration (%s)", bucket), err.Error()) + + return + } + + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { + return findBucketMetadataConfigurationByTwoPartKey(ctx, conn, bucket, expectedBucketOwner) + }) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for S3 Bucket Metadata Configuration (%s) delete", bucket), err.Error()) + + return + } +} + +func (r *bucketMetadataConfigurationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + bucket, expectedBucketOwner, err := parseResourceID(request.ID) + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrBucket), bucket)...) + if expectedBucketOwner != "" { + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrExpectedBucketOwner), expectedBucketOwner)...) + } +} + +func waitBucketMetadataInventoryTableConfigurationCreated(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string, timeout time.Duration) (*awstypes.InventoryTableConfigurationResult, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{inventoryTableConfigurationStatusCreating}, + Target: []string{inventoryTableConfigurationStatusActive, inventoryTableConfigurationStatusBackfilling}, + Refresh: statusBucketMetadataInventoryTableConfiguration(conn, bucket, expectedBucketOwner), + Timeout: timeout, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.InventoryTableConfigurationResult); ok { + if v := output.Error; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.ErrorCode), aws.ToString(v.ErrorMessage))) + } + + return output, err + } + + return nil, err +} + +func waitBucketMetadataJournalTableConfigurationCreated(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string, timeout time.Duration) (*awstypes.JournalTableConfigurationResult, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{journalTableConfigurationStatusCreating}, + Target: []string{journalTableConfigurationStatusActive}, + Refresh: statusBucketMetadataJournalTableConfiguration(conn, bucket, expectedBucketOwner), + Timeout: timeout, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.JournalTableConfigurationResult); ok { + if v := output.Error; v != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(v.ErrorCode), aws.ToString(v.ErrorMessage))) + } + + return output, err + } + + return nil, err +} + +func statusBucketMetadataInventoryTableConfiguration(conn *s3.Client, bucket, expectedBucketOwner string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { + mcr, err := findBucketMetadataConfigurationByTwoPartKey(ctx, conn, bucket, expectedBucketOwner) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + output := mcr.InventoryTableConfigurationResult + if output == nil { + return nil, "", nil + } + + return output, aws.ToString(output.TableStatus), nil + } +} + +func statusBucketMetadataJournalTableConfiguration(conn *s3.Client, bucket, expectedBucketOwner string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { + mcr, err := findBucketMetadataConfigurationByTwoPartKey(ctx, conn, bucket, expectedBucketOwner) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + output := mcr.JournalTableConfigurationResult + if output == nil { + return nil, "", nil + } + + return output, aws.ToString(output.TableStatus), nil + } +} + +func findBucketMetadataConfigurationByTwoPartKey(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*awstypes.MetadataConfigurationResult, error) { + input := s3.GetBucketMetadataConfigurationInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + return findBucketMetadataConfiguration(ctx, conn, &input) +} + +func findBucketMetadataConfiguration(ctx context.Context, conn *s3.Client, input *s3.GetBucketMetadataConfigurationInput) (*awstypes.MetadataConfigurationResult, error) { + output, err := conn.GetBucketMetadataConfiguration(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeMetadataConfigurationNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.GetBucketMetadataConfigurationResult == nil || output.GetBucketMetadataConfigurationResult.MetadataConfigurationResult == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.GetBucketMetadataConfigurationResult.MetadataConfigurationResult, nil +} + +func getMetadataTableEncryptionConfigurationModels(ctx context.Context, data *bucketMetadataConfigurationResourceModel) (fwtypes.ListNestedObjectValueOf[metadataTableEncryptionConfigurationModel], fwtypes.ListNestedObjectValueOf[metadataTableEncryptionConfigurationModel], diag.Diagnostics) { + var diags diag.Diagnostics + nullMetadataTableEncryptionConfigurationModel := fwtypes.NewListNestedObjectValueOfNull[metadataTableEncryptionConfigurationModel](ctx) + + metadataConfigurationModel, d := data.MetadataConfiguration.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nullMetadataTableEncryptionConfigurationModel, nullMetadataTableEncryptionConfigurationModel, diags + } + + if metadataConfigurationModel == nil { + return nullMetadataTableEncryptionConfigurationModel, nullMetadataTableEncryptionConfigurationModel, diags + } + + inventoryTableConfigurationModel, d := metadataConfigurationModel.InventoryTableConfiguration.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nullMetadataTableEncryptionConfigurationModel, nullMetadataTableEncryptionConfigurationModel, diags + } + + if inventoryTableConfigurationModel == nil { + return nullMetadataTableEncryptionConfigurationModel, nullMetadataTableEncryptionConfigurationModel, diags + } + + journalTableConfigurationModel, d := metadataConfigurationModel.JournalTableConfiguration.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nullMetadataTableEncryptionConfigurationModel, nullMetadataTableEncryptionConfigurationModel, diags + } + + if journalTableConfigurationModel == nil { + return inventoryTableConfigurationModel.EncryptionConfiguration, nullMetadataTableEncryptionConfigurationModel, diags + } + + return inventoryTableConfigurationModel.EncryptionConfiguration, journalTableConfigurationModel.EncryptionConfiguration, diags +} + +func setMetadataTableEncryptionConfigurationModels(ctx context.Context, data *bucketMetadataConfigurationResourceModel, inventoryEncryptionConfiguration fwtypes.ListNestedObjectValueOf[metadataTableEncryptionConfigurationModel], journalEncryptionConfiguration fwtypes.ListNestedObjectValueOf[metadataTableEncryptionConfigurationModel]) diag.Diagnostics { + var diags diag.Diagnostics + + metadataConfigurationModel, d := data.MetadataConfiguration.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + inventoryTableConfigurationModel, d := metadataConfigurationModel.InventoryTableConfiguration.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + journalTableConfigurationModel, d := metadataConfigurationModel.JournalTableConfiguration.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + inventoryTableConfigurationModel.EncryptionConfiguration = inventoryEncryptionConfiguration + journalTableConfigurationModel.EncryptionConfiguration = journalEncryptionConfiguration + + metadataConfigurationModel.InventoryTableConfiguration, d = fwtypes.NewListNestedObjectValueOfPtr(ctx, inventoryTableConfigurationModel) + diags.Append(d...) + if diags.HasError() { + return diags + } + + metadataConfigurationModel.JournalTableConfiguration, d = fwtypes.NewListNestedObjectValueOfPtr(ctx, journalTableConfigurationModel) + diags.Append(d...) + if diags.HasError() { + return diags + } + + data.MetadataConfiguration, d = fwtypes.NewListNestedObjectValueOfPtr(ctx, metadataConfigurationModel) + diags.Append(d...) + if diags.HasError() { + return diags + } + + return diags +} + +type bucketMetadataConfigurationResourceModel struct { + framework.WithRegionModel + Bucket types.String `tfsdk:"bucket"` + ExpectedBucketOwner types.String `tfsdk:"expected_bucket_owner"` + MetadataConfiguration fwtypes.ListNestedObjectValueOf[metadataConfigurationModel] `tfsdk:"metadata_configuration"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type metadataConfigurationModel struct { + Destination fwtypes.ListNestedObjectValueOf[destinationResultModel] `tfsdk:"destination"` + InventoryTableConfiguration fwtypes.ListNestedObjectValueOf[inventoryTableConfigurationModel] `tfsdk:"inventory_table_configuration"` + JournalTableConfiguration fwtypes.ListNestedObjectValueOf[journalTableConfigurationModel] `tfsdk:"journal_table_configuration"` +} + +type destinationResultModel struct { + TableBucketARN fwtypes.ARN `tfsdk:"table_bucket_arn"` + TableBucketType fwtypes.StringEnum[awstypes.S3TablesBucketType] `tfsdk:"table_bucket_type"` + TableNamespace types.String `tfsdk:"table_namespace"` +} + +type inventoryTableConfigurationModel struct { + ConfigurationState fwtypes.StringEnum[awstypes.InventoryConfigurationState] `tfsdk:"configuration_state"` + EncryptionConfiguration fwtypes.ListNestedObjectValueOf[metadataTableEncryptionConfigurationModel] `tfsdk:"encryption_configuration"` + TableARN fwtypes.ARN `tfsdk:"table_arn"` + TableName types.String `tfsdk:"table_name"` +} + +type journalTableConfigurationModel struct { + EncryptionConfiguration fwtypes.ListNestedObjectValueOf[metadataTableEncryptionConfigurationModel] `tfsdk:"encryption_configuration"` + RecordExpiration fwtypes.ListNestedObjectValueOf[recordExpirationModel] `tfsdk:"record_expiration"` + TableARN fwtypes.ARN `tfsdk:"table_arn"` + TableName types.String `tfsdk:"table_name"` +} + +type metadataTableEncryptionConfigurationModel struct { + KMSKeyARN fwtypes.ARN `tfsdk:"kms_key_arn"` + SSEAlgorithm fwtypes.StringEnum[awstypes.TableSseAlgorithm] `tfsdk:"sse_algorithm"` +} + +type recordExpirationModel struct { + Days types.Int32 `tfsdk:"days"` + Expiration fwtypes.StringEnum[awstypes.ExpirationState] `tfsdk:"expiration"` +} diff --git a/internal/service/s3/bucket_metadata_configuration_test.go b/internal/service/s3/bucket_metadata_configuration_test.go new file mode 100644 index 000000000000..b89526148267 --- /dev/null +++ b/internal/service/s3/bucket_metadata_configuration_test.go @@ -0,0 +1,363 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketMetadataConfiguration_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.MetadataConfigurationResult + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_metadata_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketMetadataConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketMetadataConfigurationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketMetadataConfigurationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("metadata_configuration"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrDestination: knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "table_bucket_arn": tfknownvalue.RegionalARNExact("s3tables", "bucket/aws-s3"), + "table_bucket_type": tfknownvalue.StringExact(awstypes.S3TablesBucketTypeAws), + "table_namespace": knownvalue.NotNull(), + }), + }), + "inventory_table_configuration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + "table_arn": knownvalue.Null(), + names.AttrTableName: knownvalue.Null(), + }), + }), + "journal_table_configuration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + "table_arn": tfknownvalue.RegionalARNRegexp("s3tables", regexache.MustCompile(`bucket/aws-s3/table/.+`)), + names.AttrTableName: knownvalue.NotNull(), + }), + }), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrBucket), + ImportStateVerifyIdentifierAttribute: names.AttrBucket, + }, + }, + }) +} + +func TestAccS3BucketMetadataConfiguration_update(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.MetadataConfigurationResult + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_metadata_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketMetadataConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketMetadataConfigurationConfig_encryption1(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketMetadataConfigurationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("metadata_configuration"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + "inventory_table_configuration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "configuration_state": tfknownvalue.StringExact(awstypes.InventoryConfigurationStateEnabled), + names.AttrEncryptionConfiguration: knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKMSKeyARN: knownvalue.Null(), + "sse_algorithm": tfknownvalue.StringExact(awstypes.TableSseAlgorithmAes256), + }), + }), + "table_arn": tfknownvalue.RegionalARNRegexp("s3tables", regexache.MustCompile(`bucket/aws-s3/table/.+`)), + names.AttrTableName: knownvalue.NotNull(), + }), + }), + "journal_table_configuration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + "record_expiration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "days": knownvalue.Null(), + "expiration": tfknownvalue.StringExact(awstypes.ExpirationStateDisabled), + }), + }), + "table_arn": tfknownvalue.RegionalARNRegexp("s3tables", regexache.MustCompile(`bucket/aws-s3/table/.+`)), + names.AttrTableName: knownvalue.NotNull(), + }), + }), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrBucket), + ImportStateVerifyIdentifierAttribute: names.AttrBucket, + ImportStateVerifyIgnore: []string{ + "metadata_configuration.0.inventory_table_configuration.0.encryption_configuration", + "metadata_configuration.0.journal_table_configuration.0.encryption_configuration", + }, + }, + { + Config: testAccBucketMetadataConfigurationConfig_encryption2(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketMetadataConfigurationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("metadata_configuration"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + "inventory_table_configuration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "configuration_state": tfknownvalue.StringExact(awstypes.InventoryConfigurationStateDisabled), + names.AttrEncryptionConfiguration: knownvalue.ListSizeExact(0), + "table_arn": knownvalue.Null(), + names.AttrTableName: knownvalue.Null(), + }), + }), + "journal_table_configuration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectPartial(map[string]knownvalue.Check{ + names.AttrEncryptionConfiguration: knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKMSKeyARN: knownvalue.NotNull(), + "sse_algorithm": tfknownvalue.StringExact(awstypes.TableSseAlgorithmAwsKms), + }), + }), + "record_expiration": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "days": knownvalue.Int32Exact(30), + "expiration": tfknownvalue.StringExact(awstypes.ExpirationStateEnabled), + }), + }), + "table_arn": tfknownvalue.RegionalARNRegexp("s3tables", regexache.MustCompile(`bucket/aws-s3/table/.+`)), + names.AttrTableName: knownvalue.NotNull(), + }), + }), + }), + })), + }, + }, + }, + }) +} + +func TestAccS3BucketMetadataConfiguration_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.MetadataConfigurationResult + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_metadata_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketMetadataConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketMetadataConfigurationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketMetadataConfigurationExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3.ResourceBucketMetadataConfiguration, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckBucketMetadataConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_metadata_configuration" { + continue + } + + _, err := tfs3.FindBucketMetadataConfigurationByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrBucket], rs.Primary.Attributes[names.AttrExpectedBucketOwner]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("S3 Bucket Metadata Configuration %s still exists", rs.Primary.Attributes[names.AttrBucket]) + } + + return nil + } +} + +func testAccCheckBucketMetadataConfigurationExists(ctx context.Context, n string, v *awstypes.MetadataConfigurationResult) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + output, err := tfs3.FindBucketMetadataConfigurationByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrBucket], rs.Primary.Attributes[names.AttrExpectedBucketOwner]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccBucketMetadataConfigurationConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_metadata_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + metadata_configuration { + inventory_table_configuration { + configuration_state = "DISABLED" + } + + journal_table_configuration { + record_expiration { + days = 7 + expiration = "ENABLED" + } + } + } +} +`, rName) +} + +func testAccBucketMetadataConfigurationConfig_encryption1(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + +resource "aws_s3_bucket_metadata_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + metadata_configuration { + inventory_table_configuration { + configuration_state = "ENABLED" + + encryption_configuration { + sse_algorithm = "AES256" + } + } + + journal_table_configuration { + record_expiration { + expiration = "DISABLED" + } + } + } +} +`, rName) +} + +func testAccBucketMetadataConfigurationConfig_encryption2(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + +resource "aws_s3_bucket_metadata_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + metadata_configuration { + inventory_table_configuration { + configuration_state = "DISABLED" + } + + journal_table_configuration { + record_expiration { + days = 30 + expiration = "ENABLED" + } + + encryption_configuration { + sse_algorithm = "aws:kms" + kms_key_arn = aws_kms_key.test.arn + } + } + } +} +`, rName) +} diff --git a/internal/service/s3/bucket_metric.go b/internal/service/s3/bucket_metric.go index c4a03cc2a45a..35ae683ae752 100644 --- a/internal/service/s3/bucket_metric.go +++ b/internal/service/s3/bucket_metric.go @@ -104,7 +104,7 @@ func resourceBucketMetricPut(ctx context.Context, d *schema.ResourceData, meta a MetricsConfiguration: metricsConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketMetricsConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -119,7 +119,7 @@ func resourceBucketMetricPut(ctx context.Context, d *schema.ResourceData, meta a if d.IsNewResource() { d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findMetricsConfiguration(ctx, conn, bucket, name) }) @@ -194,7 +194,7 @@ func resourceBucketMetricDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Metric (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findMetricsConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_notification.go b/internal/service/s3/bucket_notification.go index 9b39ef2a9b53..48441d4dd1e4 100644 --- a/internal/service/s3/bucket_notification.go +++ b/internal/service/s3/bucket_notification.go @@ -26,6 +26,9 @@ import ( ) // @SDKResource("aws_s3_bucket_notification", name="Bucket Notification") +// @IdentityAttribute("bucket") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/s3;s3.GetBucketNotificationConfigurationOutput") func resourceBucketNotification() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketNotificationPut, @@ -33,10 +36,6 @@ func resourceBucketNotification() *schema.Resource { UpdateWithoutTimeout: resourceBucketNotificationPut, DeleteWithoutTimeout: resourceBucketNotificationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -309,7 +308,7 @@ func resourceBucketNotificationPut(ctx context.Context, d *schema.ResourceData, NotificationConfiguration: notificationConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketNotificationConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -324,7 +323,7 @@ func resourceBucketNotificationPut(ctx context.Context, d *schema.ResourceData, if d.IsNewResource() { d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketNotificationConfiguration(ctx, conn, bucket, "") }) diff --git a/internal/service/s3/bucket_notification_identity_gen_test.go b/internal/service/s3/bucket_notification_identity_gen_test.go new file mode 100644 index 000000000000..c8343dd96738 --- /dev/null +++ b/internal/service/s3/bucket_notification_identity_gen_test.go @@ -0,0 +1,309 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketNotification_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetBucketNotificationConfigurationOutput + resourceName := "aws_s3_bucket_notification.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketNotificationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketNotificationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketNotification_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_notification.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketNotification_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetBucketNotificationConfigurationOutput + resourceName := "aws_s3_bucket_notification.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketNotificationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketNotificationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketNotification_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetBucketNotificationConfigurationOutput + resourceName := "aws_s3_bucket_notification.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketNotificationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketNotificationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketNotification/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_object.go b/internal/service/s3/bucket_object.go index 92e8071bbd7c..84953e82f4b6 100644 --- a/internal/service/s3/bucket_object.go +++ b/internal/service/s3/bucket_object.go @@ -45,6 +45,7 @@ import ( // @ImportIDHandler("bucketObjectImportID") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/s3;s3.GetObjectOutput") // @Testing(importIgnore="acl;force_destroy") +// @Testing(preIdentityVersion="6.0.0") func resourceBucketObject() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketObjectCreate, diff --git a/internal/service/s3/bucket_object_data_source_tags_gen_test.go b/internal/service/s3/bucket_object_data_source_tags_gen_test.go index 397799c425aa..04b8b3bada42 100644 --- a/internal/service/s3/bucket_object_data_source_tags_gen_test.go +++ b/internal/service/s3/bucket_object_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccS3BucketObjectDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccS3BucketObjectDataSource_tags(t *testing.T) { func TestAccS3BucketObjectDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccS3BucketObjectDataSource_tags_NullMap(t *testing.T) { func TestAccS3BucketObjectDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccS3BucketObjectDataSource_tags_EmptyMap(t *testing.T) { func TestAccS3BucketObjectDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccS3BucketObjectDataSource_tags_DefaultTags_nonOverlapping(t *testing. func TestAccS3BucketObjectDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccS3BucketObjectDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testi func TestAccS3BucketObjectDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/s3/bucket_object_identity_gen_test.go b/internal/service/s3/bucket_object_identity_gen_test.go index c04f21252f0f..383ecea4b03d 100644 --- a/internal/service/s3/bucket_object_identity_gen_test.go +++ b/internal/service/s3/bucket_object_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccS3BucketObject_Identity_Basic(t *testing.T) { resourceName := "aws_s3_bucket_object.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -123,7 +123,7 @@ func TestAccS3BucketObject_Identity_RegionOverride(t *testing.T) { resourceName := "aws_s3_bucket_object.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -215,3 +215,121 @@ func TestAccS3BucketObject_Identity_RegionOverride(t *testing.T) { }, }) } + +// Resource Identity was added after v6.0.0 +func TestAccS3BucketObject_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketObjectDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketObject/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketObjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketObject/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrKey: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrKey)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.0.0 +func TestAccS3BucketObject_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketObjectDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketObject/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketObjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketObject/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_object_lock_configuration.go b/internal/service/s3/bucket_object_lock_configuration.go index dedd664ce6d6..fc0e86e16d07 100644 --- a/internal/service/s3/bucket_object_lock_configuration.go +++ b/internal/service/s3/bucket_object_lock_configuration.go @@ -128,7 +128,7 @@ func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema. input.Token = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutObjectLockConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -142,7 +142,7 @@ func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema. d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_object_tags_gen_test.go b/internal/service/s3/bucket_object_tags_gen_test.go index f78a42ab6a0c..cd71f1404235 100644 --- a/internal/service/s3/bucket_object_tags_gen_test.go +++ b/internal/service/s3/bucket_object_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccS3BucketObject_tags(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccS3BucketObject_tags(t *testing.T) { func TestAccS3BucketObject_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccS3BucketObject_tags_null(t *testing.T) { func TestAccS3BucketObject_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccS3BucketObject_tags_EmptyMap(t *testing.T) { func TestAccS3BucketObject_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccS3BucketObject_tags_AddOnUpdate(t *testing.T) { func TestAccS3BucketObject_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccS3BucketObject_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccS3BucketObject_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccS3BucketObject_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccS3BucketObject_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccS3BucketObject_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccS3BucketObject_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccS3BucketObject_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccS3BucketObject_tags_DefaultTags_overlapping(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccS3BucketObject_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccS3BucketObject_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccS3BucketObject_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccS3BucketObject_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccS3BucketObject_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccS3BucketObject_tags_DefaultTags_nullOverlappingResourceTag(t *testin func TestAccS3BucketObject_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccS3BucketObject_tags_DefaultTags_nullNonOverlappingResourceTag(t *tes func TestAccS3BucketObject_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccS3BucketObject_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccS3BucketObject_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccS3BucketObject_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccS3BucketObject_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccS3BucketObject_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccS3BucketObject_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccS3BucketObject_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccS3BucketObject_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_bucket_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketObjectDestroy(ctx), diff --git a/internal/service/s3/bucket_object_test.go b/internal/service/s3/bucket_object_test.go index 5fdc77d8941a..6187686c10f9 100644 --- a/internal/service/s3/bucket_object_test.go +++ b/internal/service/s3/bucket_object_test.go @@ -22,15 +22,8 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -125,66 +118,6 @@ func TestAccS3BucketObject_basic(t *testing.T) { }) } -// Resource Identity was added in v6.1 -func TestAccS3BucketObject_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - - var v s3.GetObjectOutput - resourceName := "aws_s3_bucket_object.object" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), - CheckDestroy: testAccCheckBucketObjectDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccBucketObjectConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckBucketObjectExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccBucketObjectConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckBucketObjectExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrBucket: knownvalue.NotNull(), - names.AttrKey: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrKey)), - }, - }, - }, - }) -} - func TestAccS3BucketObject_source(t *testing.T) { ctx := acctest.Context(t) var obj s3.GetObjectOutput diff --git a/internal/service/s3/bucket_ownership_controls.go b/internal/service/s3/bucket_ownership_controls.go index e6c31eb0a7cc..54b83e3d932a 100644 --- a/internal/service/s3/bucket_ownership_controls.go +++ b/internal/service/s3/bucket_ownership_controls.go @@ -24,6 +24,8 @@ import ( ) // @SDKResource("aws_s3_bucket_ownership_controls", name="Bucket Ownership Controls") +// @IdentityAttribute("bucket") +// @Testing(preIdentityVersion="v6.9.0") func resourceBucketOwnershipControls() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketOwnershipControlsCreate, @@ -31,10 +33,6 @@ func resourceBucketOwnershipControls() *schema.Resource { UpdateWithoutTimeout: resourceBucketOwnershipControlsUpdate, DeleteWithoutTimeout: resourceBucketOwnershipControlsDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -88,7 +86,7 @@ func resourceBucketOwnershipControlsCreate(ctx context.Context, d *schema.Resour d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findOwnershipControls(ctx, conn, bucket) }) @@ -163,7 +161,7 @@ func resourceBucketOwnershipControlsDelete(ctx context.Context, d *schema.Resour } log.Printf("[DEBUG] Deleting S3 Bucket Ownership Controls: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func(ctx context.Context) (any, error) { return conn.DeleteBucketOwnershipControls(ctx, &s3.DeleteBucketOwnershipControlsInput{ Bucket: aws.String(bucket), }) @@ -177,7 +175,7 @@ func resourceBucketOwnershipControlsDelete(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Ownership Controls (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findOwnershipControls(ctx, conn, bucket) }) diff --git a/internal/service/s3/bucket_ownership_controls_identity_gen_test.go b/internal/service/s3/bucket_ownership_controls_identity_gen_test.go new file mode 100644 index 000000000000..39afdb838289 --- /dev/null +++ b/internal/service/s3/bucket_ownership_controls_identity_gen_test.go @@ -0,0 +1,305 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketOwnershipControls_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_ownership_controls.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketOwnershipControlsDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketOwnershipControlsExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketOwnershipControls_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_ownership_controls.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketOwnershipControls_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_ownership_controls.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketOwnershipControlsDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketOwnershipControlsExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketOwnershipControls_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_ownership_controls.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketOwnershipControlsDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketOwnershipControlsExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketOwnershipControls/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_policy.go b/internal/service/s3/bucket_policy.go index f40451769601..6f045b8bf8ee 100644 --- a/internal/service/s3/bucket_policy.go +++ b/internal/service/s3/bucket_policy.go @@ -23,6 +23,8 @@ import ( ) // @SDKResource("aws_s3_bucket_policy", name="Bucket Policy") +// @IdentityAttribute("bucket") +// @Testing(preIdentityVersion="v6.9.0") func resourceBucketPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketPolicyPut, @@ -30,10 +32,6 @@ func resourceBucketPolicy() *schema.Resource { UpdateWithoutTimeout: resourceBucketPolicyPut, DeleteWithoutTimeout: resourceBucketPolicyDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -63,7 +61,7 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta a Policy: aws.String(policy), } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketPolicy(ctx, input) }, errCodeMalformedPolicy, errCodeNoSuchBucket) @@ -74,7 +72,7 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta a if d.IsNewResource() { d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketPolicy(ctx, conn, bucket) }) @@ -140,7 +138,7 @@ func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Policy (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketPolicy(ctx, conn, bucket) }) diff --git a/internal/service/s3/bucket_policy_data_source_test.go b/internal/service/s3/bucket_policy_data_source_test.go index 1af409f6f8e8..096c53c5bc8a 100644 --- a/internal/service/s3/bucket_policy_data_source_test.go +++ b/internal/service/s3/bucket_policy_data_source_test.go @@ -60,7 +60,7 @@ func testAccCheckBucketPolicyMatch(nameFirst, keyFirst, nameSecond, keySecond st areEquivalent, err := awspolicy.PoliciesAreEquivalent(policy1, policy2) if err != nil { - return fmt.Errorf("comparing IAM Policies failed: %s", err) + return fmt.Errorf("comparing IAM Policies failed: %w", err) } if !areEquivalent { diff --git a/internal/service/s3/bucket_policy_identity_gen_test.go b/internal/service/s3/bucket_policy_identity_gen_test.go new file mode 100644 index 000000000000..d8c278dfddc8 --- /dev/null +++ b/internal/service/s3/bucket_policy_identity_gen_test.go @@ -0,0 +1,305 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketPolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_policy_test.go b/internal/service/s3/bucket_policy_test.go index 73fd1a976704..768525f66c9a 100644 --- a/internal/service/s3/bucket_policy_test.go +++ b/internal/service/s3/bucket_policy_test.go @@ -519,7 +519,7 @@ func testAccCheckBucketHasPolicy(ctx context.Context, n string, expectedPolicyTe expectedPolicyText := fmt.Sprintf(expectedPolicyTemplate, acctest.AccountID(ctx), acctest.Partition(), bucketName) equivalent, err := awspolicy.PoliciesAreEquivalent(policy, expectedPolicyText) if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) + return fmt.Errorf("Error testing policy equivalence: %w", err) } if !equivalent { return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", @@ -530,6 +530,23 @@ func testAccCheckBucketHasPolicy(ctx context.Context, n string, expectedPolicyTe } } +func testAccCheckBucketPolicyExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.ID) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } + + _, err := tfs3.FindBucketPolicy(ctx, conn, rs.Primary.ID) + return err + } +} + func testAccBucketPolicyConfig_basic(bucketName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} diff --git a/internal/service/s3/bucket_public_access_block.go b/internal/service/s3/bucket_public_access_block.go index d78d040c552b..3089034f69e9 100644 --- a/internal/service/s3/bucket_public_access_block.go +++ b/internal/service/s3/bucket_public_access_block.go @@ -21,6 +21,9 @@ import ( ) // @SDKResource("aws_s3_bucket_public_access_block", name="Bucket Public Access Block") +// @IdentityAttribute("bucket") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/s3/types;types.PublicAccessBlockConfiguration") func resourceBucketPublicAccessBlock() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketPublicAccessBlockCreate, @@ -28,10 +31,6 @@ func resourceBucketPublicAccessBlock() *schema.Resource { UpdateWithoutTimeout: resourceBucketPublicAccessBlockUpdate, DeleteWithoutTimeout: resourceBucketPublicAccessBlockDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ "block_public_acls": { Type: schema.TypeBool, @@ -58,6 +57,10 @@ func resourceBucketPublicAccessBlock() *schema.Resource { Optional: true, Default: false, }, + names.AttrSkipDestroy: { + Type: schema.TypeBool, + Optional: true, + }, }, } } @@ -80,7 +83,7 @@ func resourceBucketPublicAccessBlockCreate(ctx context.Context, d *schema.Resour }, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutPublicAccessBlock(ctx, input) }, errCodeNoSuchBucket) @@ -94,7 +97,7 @@ func resourceBucketPublicAccessBlockCreate(ctx context.Context, d *schema.Resour d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findPublicAccessBlockConfiguration(ctx, conn, bucket) }) @@ -182,6 +185,11 @@ func resourceBucketPublicAccessBlockDelete(ctx context.Context, d *schema.Resour conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + if v, ok := d.GetOk(names.AttrSkipDestroy); ok && v.(bool) { + log.Printf("[DEBUG] Skipping destruction of S3 Bucket Public Access Block: %s", d.Id()) + return diags + } + log.Printf("[DEBUG] Deleting S3 Bucket Public Access Block: %s", d.Id()) _, err := conn.DeletePublicAccessBlock(ctx, &s3.DeletePublicAccessBlockInput{ Bucket: aws.String(bucket), @@ -195,7 +203,7 @@ func resourceBucketPublicAccessBlockDelete(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Public Access Block (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findPublicAccessBlockConfiguration(ctx, conn, bucket) }) diff --git a/internal/service/s3/bucket_public_access_block_identity_gen_test.go b/internal/service/s3/bucket_public_access_block_identity_gen_test.go new file mode 100644 index 000000000000..9a81ed22edd7 --- /dev/null +++ b/internal/service/s3/bucket_public_access_block_identity_gen_test.go @@ -0,0 +1,309 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketPublicAccessBlock_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.PublicAccessBlockConfiguration + resourceName := "aws_s3_bucket_public_access_block.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketPublicAccessBlockDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketPublicAccessBlockExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketPublicAccessBlock_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_public_access_block.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketPublicAccessBlock_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.PublicAccessBlockConfiguration + resourceName := "aws_s3_bucket_public_access_block.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketPublicAccessBlockDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketPublicAccessBlockExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketPublicAccessBlock_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.PublicAccessBlockConfiguration + resourceName := "aws_s3_bucket_public_access_block.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketPublicAccessBlockDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketPublicAccessBlockExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_public_access_block_test.go b/internal/service/s3/bucket_public_access_block_test.go index 364f4b9860d4..19549b95e144 100644 --- a/internal/service/s3/bucket_public_access_block_test.go +++ b/internal/service/s3/bucket_public_access_block_test.go @@ -270,6 +270,52 @@ func TestAccS3BucketPublicAccessBlock_restrictPublicBuckets(t *testing.T) { }) } +// This test can be safely run at all times as the dangling public access +// block left behind by skipped destruction will ultimately be cleaned up +// by destruction of the associated bucket. +func TestAccS3BucketPublicAccessBlock_skipDestroy(t *testing.T) { + ctx := acctest.Context(t) + var config types.PublicAccessBlockConfiguration + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_public_access_block.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketPublicAccessBlockDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketPublicAccessBlockConfig_skipDestroy(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(ctx, bucketResourceName), + testAccCheckBucketPublicAccessBlockExists(ctx, resourceName, &config), + resource.TestCheckResourceAttr(resourceName, names.AttrBucket, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrSkipDestroy, acctest.CtFalse), + ), + }, + { + Config: testAccBucketPublicAccessBlockConfig_skipDestroy(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(ctx, bucketResourceName), + testAccCheckBucketPublicAccessBlockExists(ctx, resourceName, &config), + resource.TestCheckResourceAttr(resourceName, names.AttrBucket, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrSkipDestroy, acctest.CtTrue), + ), + }, + // Remove the public access block resource from configuration + { + Config: testAccBucketPublicAccessBlockConfig_skipDestroy_postRemoval(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(ctx, bucketResourceName), + testAccCheckBucketPublicAccessBlockExistsByName(ctx, rName), + ), + }, + }, + }) +} + func TestAccS3BucketPublicAccessBlock_directoryBucket(t *testing.T) { ctx := acctest.Context(t) name := fmt.Sprintf("tf-test-bucket-%d", sdkacctest.RandInt()) @@ -342,6 +388,21 @@ func testAccCheckBucketPublicAccessBlockExists(ctx context.Context, n string, v } } +// testAccCheckProvisionedConcurrencyConfigExistsByName is a helper to verify a +// public access block is in place for a given bucket. +// +// This variant of the test check exists function which accepts bucket name +// directly to support skip_destroy checks where the public access block +// resource is removed from state, but should still exist remotely. +func testAccCheckBucketPublicAccessBlockExistsByName(ctx context.Context, rName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + _, err := tfs3.FindPublicAccessBlockConfiguration(ctx, conn, rName) + return err + } +} + func testAccBucketPublicAccessBlockConfig_basic(bucketName string, blockPublicAcls, blockPublicPolicy, ignorePublicAcls, restrictPublicBuckets bool) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -359,6 +420,33 @@ resource "aws_s3_bucket_public_access_block" "test" { `, bucketName, blockPublicAcls, blockPublicPolicy, ignorePublicAcls, restrictPublicBuckets) } +func testAccBucketPublicAccessBlockConfig_skipDestroy(bucketName string, skipDestroy bool) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_public_access_block" "test" { + bucket = aws_s3_bucket.test.bucket + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true + + skip_destroy = %[2]t +} +`, bucketName, skipDestroy) +} + +func testAccBucketPublicAccessBlockConfig_skipDestroy_postRemoval(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} +`, bucketName) +} + func testAccBucketPublicAccessBlockConfig_directoryBucket(bucketName, blockPublicAcls, blockPublicPolicy, ignorePublicAcls, restrictPublicBuckets string) string { return acctest.ConfigCompose(testAccDirectoryBucketConfig_baseAZ(bucketName), fmt.Sprintf(` resource "aws_s3_directory_bucket" "test" { diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 0882184b2ec6..ff3b0877257a 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -332,22 +332,22 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema input.Token = aws.String(v.(string)) } - err := retry.RetryContext(ctx, bucketPropagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, bucketPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.PutBucketReplication(ctx, input) if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, errCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.PutBucketReplication(ctx, input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s) Replication Configuration: %s", bucket, err) } if tfawserr.ErrMessageContains(err, errCodeInvalidArgument, "ReplicationConfiguration is not valid, expected CreateBucketConfiguration") { @@ -360,7 +360,7 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findReplicationConfiguration(ctx, conn, bucket) }) @@ -453,7 +453,7 @@ func resourceBucketReplicationConfigurationDelete(ctx context.Context, d *schema return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Replication Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findReplicationConfiguration(ctx, conn, bucket) }) diff --git a/internal/service/s3/bucket_request_payment_configuration.go b/internal/service/s3/bucket_request_payment_configuration.go index b4cbdd0bf3ad..dfbd7a572d4a 100644 --- a/internal/service/s3/bucket_request_payment_configuration.go +++ b/internal/service/s3/bucket_request_payment_configuration.go @@ -76,7 +76,7 @@ func resourceBucketRequestPaymentConfigurationCreate(ctx context.Context, d *sch input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketRequestPayment(ctx, input) }, errCodeNoSuchBucket) @@ -90,7 +90,7 @@ func resourceBucketRequestPaymentConfigurationCreate(ctx context.Context, d *sch d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketRequestPayment(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_server_side_encryption_configuration.go b/internal/service/s3/bucket_server_side_encryption_configuration.go index 67b9e5cc73ab..8c3eccbf72b3 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration.go @@ -23,7 +23,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_s3_bucket_server_side_encryption_configuration", name="Bucket Server-side Encryption Configuration") +// @SDKResource("aws_s3_bucket_server_side_encryption_configuration", name="Bucket Server Side Encryption Configuration") +// @IdentityAttribute("bucket") +// @IdentityAttribute("expected_bucket_owner", optional="true") +// @ImportIDHandler("resourceImportID") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(checkDestroyNoop=true) +// @Testing(importIgnore="rule.0.bucket_key_enabled") +// @Testing(plannableImportAction="NoOp") func resourceBucketServerSideEncryptionConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketServerSideEncryptionConfigurationCreate, @@ -31,10 +38,6 @@ func resourceBucketServerSideEncryptionConfiguration() *schema.Resource { UpdateWithoutTimeout: resourceBucketServerSideEncryptionConfigurationUpdate, DeleteWithoutTimeout: resourceBucketServerSideEncryptionConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -101,7 +104,7 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketEncryption(ctx, input) }, errCodeNoSuchBucket, errCodeOperationAborted) @@ -111,7 +114,7 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) }) @@ -179,7 +182,7 @@ func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketEncryption(ctx, input) }, errCodeNoSuchBucket, errCodeOperationAborted) diff --git a/internal/service/s3/bucket_server_side_encryption_configuration_identity_gen_test.go b/internal/service/s3/bucket_server_side_encryption_configuration_identity_gen_test.go new file mode 100644 index 000000000000..6942b2431318 --- /dev/null +++ b/internal/service/s3/bucket_server_side_encryption_configuration_identity_gen_test.go @@ -0,0 +1,318 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketServerSideEncryptionConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_server_side_encryption_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "rule.0.bucket_key_enabled", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketServerSideEncryptionConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_server_side_encryption_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "rule.0.bucket_key_enabled", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketServerSideEncryptionConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_server_side_encryption_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketServerSideEncryptionConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_server_side_encryption_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketServerSideEncryptionConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketServerSideEncryptionConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_tags_gen_test.go b/internal/service/s3/bucket_tags_gen_test.go index 7062ba77fbe8..6ec02a57caa4 100644 --- a/internal/service/s3/bucket_tags_gen_test.go +++ b/internal/service/s3/bucket_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccS3Bucket_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccS3Bucket_tags(t *testing.T) { func TestAccS3Bucket_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccS3Bucket_tags_null(t *testing.T) { func TestAccS3Bucket_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccS3Bucket_tags_EmptyMap(t *testing.T) { func TestAccS3Bucket_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccS3Bucket_tags_AddOnUpdate(t *testing.T) { func TestAccS3Bucket_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccS3Bucket_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccS3Bucket_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccS3Bucket_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccS3Bucket_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccS3Bucket_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccS3Bucket_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccS3Bucket_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccS3Bucket_tags_DefaultTags_overlapping(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccS3Bucket_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccS3Bucket_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccS3Bucket_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccS3Bucket_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccS3Bucket_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { func TestAccS3Bucket_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccS3Bucket_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T func TestAccS3Bucket_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccS3Bucket_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccS3Bucket_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccS3Bucket_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccS3Bucket_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccS3Bucket_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccS3Bucket_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccS3Bucket_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccS3Bucket_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckBucketDestroy(ctx), diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index dba9ffb7b104..5e4956c1e94d 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -31,9 +31,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcloudformation "github.com/hashicorp/terraform-provider-aws/internal/service/cloudformation" @@ -514,88 +512,6 @@ func TestAccS3Bucket_Basic_upgradeFromV5(t *testing.T) { }) } -func TestAccS3Bucket_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), - CheckDestroy: testAccCheckBucketDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccBucketConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckBucketExists(ctx, resourceName), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccBucketConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckBucketExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - names.AttrBucket: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccBucketConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckBucketExists(ctx, resourceName), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrBucket: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), - }, - }, - }, - }) -} - // Test TestAccS3Bucket_disappears is designed to fail with a "plan // not empty" error in Terraform, to check against regressions. // See https://github.com/hashicorp/terraform/pull/2925 @@ -713,7 +629,7 @@ func TestAccS3Bucket_tags_withSystemTags(t *testing.T) { } if _, err := tfcloudformation.WaitStackDeleted(ctx, conn, stackID, requestToken, 10*time.Minute); err != nil { - return fmt.Errorf("Error waiting for CloudFormation stack deletion: %s", err) + return fmt.Errorf("Error waiting for CloudFormation stack deletion: %w", err) } return nil @@ -2663,7 +2579,7 @@ func testAccCheckBucketDestroyWithProvider(ctx context.Context) acctest.TestChec // S3 seems to be highly eventually consistent. Even if one connection reports that the queue is gone, // another connection may still report it as present. - _, err := tfresource.RetryUntilNotFound(ctx, tfs3.BucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryUntilNotFound(ctx, tfs3.BucketPropagationTimeout, func(ctx context.Context) (any, error) { return tfs3.FindBucket(ctx, conn, rs.Primary.ID) }) diff --git a/internal/service/s3/bucket_versioning.go b/internal/service/s3/bucket_versioning.go index dd7ddc45904e..43887f069e5f 100644 --- a/internal/service/s3/bucket_versioning.go +++ b/internal/service/s3/bucket_versioning.go @@ -28,6 +28,10 @@ import ( ) // @SDKResource("aws_s3_bucket_versioning", name="Bucket Versioning") +// @IdentityAttribute("bucket") +// @IdentityAttribute("expected_bucket_owner", optional="true") +// @ImportIDHandler("resourceImportID") +// @Testing(preIdentityVersion="v6.9.0") func resourceBucketVersioning() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketVersioningCreate, @@ -35,10 +39,6 @@ func resourceBucketVersioning() *schema.Resource { UpdateWithoutTimeout: resourceBucketVersioningUpdate, DeleteWithoutTimeout: resourceBucketVersioningDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -129,7 +129,7 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, input.MFA = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketVersioning(ctx, input) }, errCodeNoSuchBucket) diff --git a/internal/service/s3/bucket_versioning_identity_gen_test.go b/internal/service/s3/bucket_versioning_identity_gen_test.go new file mode 100644 index 000000000000..577efc059d25 --- /dev/null +++ b/internal/service/s3/bucket_versioning_identity_gen_test.go @@ -0,0 +1,312 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketVersioning_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_versioning.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketVersioningDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketVersioningExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketVersioning_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_versioning.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketVersioning_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_versioning.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketVersioningDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketVersioningExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketVersioning_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_versioning.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketVersioningDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketVersioningExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketVersioning/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/bucket_website_configuration.go b/internal/service/s3/bucket_website_configuration.go index 7d664873aa2b..1311fa5aae37 100644 --- a/internal/service/s3/bucket_website_configuration.go +++ b/internal/service/s3/bucket_website_configuration.go @@ -26,6 +26,10 @@ import ( ) // @SDKResource("aws_s3_bucket_website_configuration", name="Bucket Website Configuration") +// @IdentityAttribute("bucket") +// @IdentityAttribute("expected_bucket_owner", optional="true") +// @ImportIDHandler("resourceImportID") +// @Testing(preIdentityVersion="v6.9.0") func resourceBucketWebsiteConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketWebsiteConfigurationCreate, @@ -33,10 +37,6 @@ func resourceBucketWebsiteConfiguration() *schema.Resource { UpdateWithoutTimeout: resourceBucketWebsiteConfigurationUpdate, DeleteWithoutTimeout: resourceBucketWebsiteConfigurationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrBucket: { Type: schema.TypeString, @@ -223,7 +223,7 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return conn.PutBucketWebsite(ctx, input) }, errCodeNoSuchBucket) @@ -237,7 +237,7 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res d.SetId(createResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) }) @@ -407,7 +407,7 @@ func resourceBucketWebsiteConfigurationDelete(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Website Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { return findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_website_configuration_identity_gen_test.go b/internal/service/s3/bucket_website_configuration_identity_gen_test.go new file mode 100644 index 000000000000..a8977521db0d --- /dev/null +++ b/internal/service/s3/bucket_website_configuration_identity_gen_test.go @@ -0,0 +1,312 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketWebsiteConfiguration_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_website_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketWebsiteConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketWebsiteConfiguration_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_website_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrBucket), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketWebsiteConfiguration_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_website_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketWebsiteConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrExpectedBucketOwner: knownvalue.Null(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccS3BucketWebsiteConfiguration_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3_bucket_website_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckBucketWebsiteConfigurationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketWebsiteConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/BucketWebsiteConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/consts.go b/internal/service/s3/consts.go new file mode 100644 index 000000000000..6798b84a8c18 --- /dev/null +++ b/internal/service/s3/consts.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +const ( + inventoryTableConfigurationStatusActive = "ACTIVE" + inventoryTableConfigurationStatusBackfilling = "BACKFILLING" + inventoryTableConfigurationStatusCreating = "CREATING" + inventoryTableConfigurationStatusFailed = "FAILED" +) + +const ( + journalTableConfigurationStatusActive = "ACTIVE" + journalTableConfigurationStatusCreating = "CREATING" + journalTableConfigurationStatusFailed = "FAILED" +) diff --git a/internal/service/s3/directory_bucket_tags_gen_test.go b/internal/service/s3/directory_bucket_tags_gen_test.go index 3a9d2a025492..0d64ab62b4e5 100644 --- a/internal/service/s3/directory_bucket_tags_gen_test.go +++ b/internal/service/s3/directory_bucket_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccS3DirectoryBucket_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -211,10 +211,11 @@ func TestAccS3DirectoryBucket_tags(t *testing.T) { func TestAccS3DirectoryBucket_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -273,10 +274,11 @@ func TestAccS3DirectoryBucket_tags_null(t *testing.T) { func TestAccS3DirectoryBucket_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -323,10 +325,11 @@ func TestAccS3DirectoryBucket_tags_EmptyMap(t *testing.T) { func TestAccS3DirectoryBucket_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -405,10 +408,11 @@ func TestAccS3DirectoryBucket_tags_AddOnUpdate(t *testing.T) { func TestAccS3DirectoryBucket_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -500,10 +504,11 @@ func TestAccS3DirectoryBucket_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccS3DirectoryBucket_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -644,10 +649,11 @@ func TestAccS3DirectoryBucket_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccS3DirectoryBucket_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -736,10 +742,11 @@ func TestAccS3DirectoryBucket_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccS3DirectoryBucket_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -928,10 +935,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccS3DirectoryBucket_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1096,10 +1104,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccS3DirectoryBucket_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1280,10 +1289,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_overlapping(t *testing.T) { func TestAccS3DirectoryBucket_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1372,10 +1382,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_updateToProviderOnly(t *testing.T func TestAccS3DirectoryBucket_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1463,10 +1474,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_updateToResourceOnly(t *testing.T func TestAccS3DirectoryBucket_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1531,10 +1543,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccS3DirectoryBucket_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1591,10 +1604,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T func TestAccS3DirectoryBucket_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1660,10 +1674,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_nullOverlappingResourceTag(t *tes func TestAccS3DirectoryBucket_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1731,10 +1746,11 @@ func TestAccS3DirectoryBucket_tags_DefaultTags_nullNonOverlappingResourceTag(t * func TestAccS3DirectoryBucket_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1788,10 +1804,11 @@ func TestAccS3DirectoryBucket_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccS3DirectoryBucket_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1887,10 +1904,11 @@ func TestAccS3DirectoryBucket_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccS3DirectoryBucket_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -1976,10 +1994,11 @@ func TestAccS3DirectoryBucket_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccS3DirectoryBucket_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), @@ -2137,10 +2156,11 @@ func TestAccS3DirectoryBucket_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccS3DirectoryBucket_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_directory_bucket.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), diff --git a/internal/service/s3/directory_bucket_test.go b/internal/service/s3/directory_bucket_test.go index efb6669ec8dd..fc24fea4bfa8 100644 --- a/internal/service/s3/directory_bucket_test.go +++ b/internal/service/s3/directory_bucket_test.go @@ -286,8 +286,9 @@ func testAccConfigDirectoryBucket_availableAZs() string { func testAccDirectoryBucketConfig_baseAZ(rName string) string { return acctest.ConfigCompose(testAccConfigDirectoryBucket_availableAZs(), fmt.Sprintf(` locals { - location_name = data.aws_availability_zones.available.zone_ids[0] - bucket = "%[1]s--${local.location_name}--x-s3" + location_name = data.aws_availability_zones.available.zone_ids[0] + bucket = "%[1]s--${local.location_name}--x-s3" + access_point_name = "%[1]s--${local.location_name}--xa-s3" } `, rName)) } diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 8880de0f3e86..6aef197910ba 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -20,6 +20,7 @@ const ( errCodeInvalidBucketState = "InvalidBucketState" errCodeInvalidRequest = "InvalidRequest" errCodeMalformedPolicy = "MalformedPolicy" + errCodeMetadataConfigurationNotFound = "MetadataConfigurationNotFound" errCodeMethodNotAllowed = "MethodNotAllowed" errCodeNoSuchBucket = "NoSuchBucket" errCodeNoSuchBucketPolicy = "NoSuchBucketPolicy" @@ -29,6 +30,7 @@ const ( errCodeNoSuchKey = "NoSuchKey" errCodeNoSuchPublicAccessBlockConfiguration = "NoSuchPublicAccessBlockConfiguration" errCodeNoSuchTagSet = "NoSuchTagSet" + errCodeNoSuchTagSetError = "NoSuchTagSetError" errCodeNoSuchWebsiteConfiguration = "NoSuchWebsiteConfiguration" errCodeNotImplemented = "NotImplemented" // errCodeObjectLockConfigurationNotFound should be used with tfawserr.ErrCodeContains, not tfawserr.ErrCodeEquals. diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 873829b3ecdc..f1b79813503a 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -13,6 +13,7 @@ var ( ResourceBucketInventory = resourceBucketInventory ResourceBucketLifecycleConfiguration = newBucketLifecycleConfigurationResource ResourceBucketLogging = resourceBucketLogging + ResourceBucketMetadataConfiguration = newBucketMetadataConfigurationResource ResourceBucketMetric = resourceBucketMetric ResourceBucketNotification = resourceBucketNotification ResourceBucketObjectLockConfiguration = resourceBucketObjectLockConfiguration @@ -28,38 +29,39 @@ var ( ResourceDirectoryBucket = newDirectoryBucketResource ResourceObjectCopy = resourceObjectCopy - BucketUpdateTags = bucketUpdateTags - BucketRegionalDomainName = bucketRegionalDomainName - BucketWebsiteEndpointAndDomain = bucketWebsiteEndpointAndDomain - DeleteAllObjectVersions = deleteAllObjectVersions - EmptyBucket = emptyBucket - FindAnalyticsConfiguration = findAnalyticsConfiguration - FindBucket = findBucket - FindBucketACL = findBucketACL - FindBucketAccelerateConfiguration = findBucketAccelerateConfiguration - FindBucketLifecycleConfiguration = findBucketLifecycleConfiguration - FindBucketNotificationConfiguration = findBucketNotificationConfiguration - FindBucketPolicy = findBucketPolicy - FindBucketRequestPayment = findBucketRequestPayment - FindBucketVersioning = findBucketVersioning - FindBucketWebsite = findBucketWebsite - FindCORSRules = findCORSRules - FindIntelligentTieringConfiguration = findIntelligentTieringConfiguration - FindInventoryConfiguration = findInventoryConfiguration - FindLoggingEnabled = findLoggingEnabled - FindMetricsConfiguration = findMetricsConfiguration - FindObjectByBucketAndKey = findObjectByBucketAndKey - FindObjectLockConfiguration = findObjectLockConfiguration - FindOwnershipControls = findOwnershipControls - FindPublicAccessBlockConfiguration = findPublicAccessBlockConfiguration - FindReplicationConfiguration = findReplicationConfiguration - FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration - HostedZoneIDForRegion = hostedZoneIDForRegion - IsDirectoryBucket = isDirectoryBucket - ObjectListTags = objectListTags - ObjectUpdateTags = objectUpdateTags - SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey - ValidBucketName = validBucketName + BucketUpdateTags = bucketUpdateTags + BucketRegionalDomainName = bucketRegionalDomainName + BucketWebsiteEndpointAndDomain = bucketWebsiteEndpointAndDomain + DeleteAllObjectVersions = deleteAllObjectVersions + EmptyBucket = emptyBucket + FindAnalyticsConfiguration = findAnalyticsConfiguration + FindBucket = findBucket + FindBucketACL = findBucketACL + FindBucketAccelerateConfiguration = findBucketAccelerateConfiguration + FindBucketLifecycleConfiguration = findBucketLifecycleConfiguration + FindBucketMetadataConfigurationByTwoPartKey = findBucketMetadataConfigurationByTwoPartKey + FindBucketNotificationConfiguration = findBucketNotificationConfiguration + FindBucketPolicy = findBucketPolicy + FindBucketRequestPayment = findBucketRequestPayment + FindBucketVersioning = findBucketVersioning + FindBucketWebsite = findBucketWebsite + FindCORSRules = findCORSRules + FindIntelligentTieringConfiguration = findIntelligentTieringConfiguration + FindInventoryConfiguration = findInventoryConfiguration + FindLoggingEnabled = findLoggingEnabled + FindMetricsConfiguration = findMetricsConfiguration + FindObjectByBucketAndKey = findObjectByBucketAndKey + FindObjectLockConfiguration = findObjectLockConfiguration + FindOwnershipControls = findOwnershipControls + FindPublicAccessBlockConfiguration = findPublicAccessBlockConfiguration + FindReplicationConfiguration = findReplicationConfiguration + FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration + HostedZoneIDForRegion = hostedZoneIDForRegion + IsDirectoryBucket = isDirectoryBucket + ObjectListTags = objectListTags + ObjectUpdateTags = objectUpdateTags + SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey + ValidBucketName = validBucketName BucketPropagationTimeout = bucketPropagationTimeout BucketVersioningStatusDisabled = bucketVersioningStatusDisabled @@ -72,8 +74,10 @@ var ( NewObjectARN = newObjectARN ParseObjectARN = parseObjectARN - CreateResourceID = createResourceID - ParseResourceID = parseResourceID + CreateResourceID = createResourceID + ParseResourceID = parseResourceID + CreateBucketACLResourceID = createBucketACLResourceID + ParseBucketACLResourceID = parseBucketACLResourceID DirectoryBucketNameRegex = directoryBucketNameRegex diff --git a/internal/service/s3/hosted_zones.go b/internal/service/s3/hosted_zones.go index 89b67229d64a..8d744355993d 100644 --- a/internal/service/s3/hosted_zones.go +++ b/internal/service/s3/hosted_zones.go @@ -24,6 +24,7 @@ var hostedZoneIDsMap = map[string]string{ endpoints.ApSoutheast3RegionID: "Z01846753K324LI26A3VV", endpoints.ApSoutheast4RegionID: "Z0312387243XT5FE14WFO", endpoints.ApSoutheast5RegionID: "Z08660063OXLMA7F1FJHU", + endpoints.ApSoutheast6RegionID: "Z05686083R66JX5C163TC", endpoints.ApSoutheast7RegionID: "Z0031014GXUMRZG6I14G", endpoints.CaCentral1RegionID: "Z1QDHH18159H29", endpoints.CaWest1RegionID: "Z03565811Z33SLEZTHOUL", diff --git a/internal/service/s3/id.go b/internal/service/s3/id.go index 92886ee9c0aa..ada738471bfb 100644 --- a/internal/service/s3/id.go +++ b/internal/service/s3/id.go @@ -6,6 +6,10 @@ package s3 import ( "fmt" "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" ) const resourceIDSeparator = "," @@ -25,20 +29,46 @@ func createResourceID(bucket, expectedBucketOwner string) string { // parseResourceID is a generic method for parsing an ID string // for a bucket name and accountID if provided. -func parseResourceID(id string) (bucket, expectedBucketOwner string, err error) { +func parseResourceID(id string) (string, string, error) { parts := strings.Split(id, resourceIDSeparator) if len(parts) == 1 && parts[0] != "" { - bucket = parts[0] - return + return parts[0], "", nil } if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - bucket = parts[0] - expectedBucketOwner = parts[1] - return + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected BUCKET or BUCKET%[2]sEXPECTED_BUCKET_OWNER", id, resourceIDSeparator) +} + +var _ inttypes.SDKv2ImportID = resourceImportID{} + +// resourceImportID is a generic custom import type supporting bucket-related resources. +// +// Resources which expect a bucket name and an optional accountID for the identifier +// can use this custom importer when adding resource identity support. +type resourceImportID struct{} + +func (resourceImportID) Create(d *schema.ResourceData) string { + bucket := d.Get(names.AttrBucket).(string) + expectedBucketOwner := d.Get(names.AttrExpectedBucketOwner).(string) + return createResourceID(bucket, expectedBucketOwner) +} + +func (resourceImportID) Parse(id string) (string, map[string]string, error) { + bucket, expectedBucketOwner, err := parseResourceID(id) + if err != nil { + return id, nil, err + } + + results := map[string]string{ + names.AttrBucket: bucket, + } + if expectedBucketOwner != "" { + results[names.AttrExpectedBucketOwner] = expectedBucketOwner } - err = fmt.Errorf("unexpected format for ID (%s), expected BUCKET or BUCKET%sEXPECTED_BUCKET_OWNER", id, resourceIDSeparator) - return + return id, results, nil } diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index 71cbe4e04def..40e19c67e059 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -50,6 +50,7 @@ import ( // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/s3;s3.GetObjectOutput") // @Testing(importIgnore="force_destroy") // @Testing(plannableImportAction="NoOp") +// @Testing(preIdentityVersion="6.0.0") func resourceObject() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceObjectCreate, diff --git a/internal/service/s3/object_copy_tags_gen_test.go b/internal/service/s3/object_copy_tags_gen_test.go index d13121c432f4..29a621255fc8 100644 --- a/internal/service/s3/object_copy_tags_gen_test.go +++ b/internal/service/s3/object_copy_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccS3ObjectCopy_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -155,10 +155,11 @@ func TestAccS3ObjectCopy_tags(t *testing.T) { func TestAccS3ObjectCopy_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -210,10 +211,11 @@ func TestAccS3ObjectCopy_tags_null(t *testing.T) { func TestAccS3ObjectCopy_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -263,10 +265,11 @@ func TestAccS3ObjectCopy_tags_EmptyMap(t *testing.T) { func TestAccS3ObjectCopy_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -331,10 +334,11 @@ func TestAccS3ObjectCopy_tags_AddOnUpdate(t *testing.T) { func TestAccS3ObjectCopy_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -398,10 +402,11 @@ func TestAccS3ObjectCopy_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccS3ObjectCopy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -510,10 +515,11 @@ func TestAccS3ObjectCopy_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccS3ObjectCopy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -586,10 +592,11 @@ func TestAccS3ObjectCopy_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -715,10 +722,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -832,10 +840,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -959,10 +968,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_overlapping(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1034,10 +1044,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1109,10 +1120,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1157,10 +1169,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1199,10 +1212,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccS3ObjectCopy_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1244,10 +1258,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_nullOverlappingResourceTag(t *testing. func TestAccS3ObjectCopy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1289,10 +1304,11 @@ func TestAccS3ObjectCopy_tags_DefaultTags_nullNonOverlappingResourceTag(t *testi func TestAccS3ObjectCopy_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1332,10 +1348,11 @@ func TestAccS3ObjectCopy_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccS3ObjectCopy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1415,10 +1432,11 @@ func TestAccS3ObjectCopy_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccS3ObjectCopy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1490,10 +1508,11 @@ func TestAccS3ObjectCopy_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccS3ObjectCopy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), @@ -1651,10 +1670,11 @@ func TestAccS3ObjectCopy_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccS3ObjectCopy_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_s3_object_copy.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectCopyDestroy(ctx), diff --git a/internal/service/s3/object_data_source_tags_gen_test.go b/internal/service/s3/object_data_source_tags_gen_test.go index 5a8527535818..d84a80401174 100644 --- a/internal/service/s3/object_data_source_tags_gen_test.go +++ b/internal/service/s3/object_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccS3ObjectDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccS3ObjectDataSource_tags(t *testing.T) { func TestAccS3ObjectDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccS3ObjectDataSource_tags_NullMap(t *testing.T) { func TestAccS3ObjectDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccS3ObjectDataSource_tags_EmptyMap(t *testing.T) { func TestAccS3ObjectDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccS3ObjectDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccS3ObjectDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccS3ObjectDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccS3ObjectDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/s3/object_identity_gen_test.go b/internal/service/s3/object_identity_gen_test.go index a0d40ffb7422..62e40ed20cd6 100644 --- a/internal/service/s3/object_identity_gen_test.go +++ b/internal/service/s3/object_identity_gen_test.go @@ -27,7 +27,7 @@ func TestAccS3Object_Identity_Basic(t *testing.T) { resourceName := "aws_s3_object.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -119,7 +119,7 @@ func TestAccS3Object_Identity_RegionOverride(t *testing.T) { resourceName := "aws_s3_object.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -207,3 +207,121 @@ func TestAccS3Object_Identity_RegionOverride(t *testing.T) { }, }) } + +// Resource Identity was added after v6.0.0 +func TestAccS3Object_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetObjectOutput + resourceName := "aws_s3_object.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckObjectDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Object/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Object/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrKey: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrKey)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.0.0 +func TestAccS3Object_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v s3.GetObjectOutput + resourceName := "aws_s3_object.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + CheckDestroy: testAccCheckObjectDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Object/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Object/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3/object_tags_gen_test.go b/internal/service/s3/object_tags_gen_test.go index 8757dbcd11d7..dbbb1f436bdb 100644 --- a/internal/service/s3/object_tags_gen_test.go +++ b/internal/service/s3/object_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccS3Object_tags(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccS3Object_tags(t *testing.T) { func TestAccS3Object_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccS3Object_tags_null(t *testing.T) { func TestAccS3Object_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccS3Object_tags_EmptyMap(t *testing.T) { func TestAccS3Object_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccS3Object_tags_AddOnUpdate(t *testing.T) { func TestAccS3Object_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccS3Object_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccS3Object_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccS3Object_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccS3Object_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccS3Object_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccS3Object_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccS3Object_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccS3Object_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccS3Object_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccS3Object_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccS3Object_tags_DefaultTags_overlapping(t *testing.T) { func TestAccS3Object_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccS3Object_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccS3Object_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccS3Object_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccS3Object_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccS3Object_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccS3Object_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccS3Object_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccS3Object_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccS3Object_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { func TestAccS3Object_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccS3Object_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T func TestAccS3Object_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccS3Object_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccS3Object_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccS3Object_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccS3Object_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccS3Object_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccS3Object_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccS3Object_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccS3Object_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v s3.GetObjectOutput resourceName := "aws_s3_object.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), CheckDestroy: testAccCheckObjectDestroy(ctx), diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index aff9178e4277..be1dab8b32c1 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -29,7 +29,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" @@ -230,71 +229,6 @@ func TestAccS3Object_Disappears_bucket(t *testing.T) { }) } -// Resource Identity was added in v6.1 -func TestAccS3Object_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - - var v s3.GetObjectOutput - resourceName := "aws_s3_object.object" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), - CheckDestroy: testAccCheckObjectDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccObjectConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccObjectConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckObjectExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrBucket: knownvalue.NotNull(), - names.AttrKey: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrKey)), - }, - }, - }, - }) -} - func TestAccS3Object_upgradeFromV4(t *testing.T) { ctx := acctest.Context(t) var obj s3.GetObjectOutput @@ -1252,7 +1186,7 @@ func TestAccS3Object_tagsViaAccessPointAlias(t *testing.T) { }) } -func TestAccS3Object_tagsViaMultiRegionAccessPoint(t *testing.T) { +func TestAccS3Object_tagsViaMultiRegionAccessPointARN(t *testing.T) { ctx := acctest.Context(t) var obj1, obj2 s3.GetObjectOutput resourceName := "aws_s3_object.object" @@ -1266,7 +1200,7 @@ func TestAccS3Object_tagsViaMultiRegionAccessPoint(t *testing.T) { CheckDestroy: acctest.CheckDestroyNoop, // Cannot access the object via the access point alias after the access point is destroyed Steps: []resource.TestStep{ { - Config: testAccObjectConfig_tagsViaMultiRegionAccessPoint(rName, key, "stuff"), + Config: testAccObjectConfig_tagsViaMultiRegionAccessPointARN(rName, key, "stuff"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckObjectExists(ctx, resourceName, &obj1), testAccCheckObjectBody(&obj1, "stuff"), @@ -1277,7 +1211,7 @@ func TestAccS3Object_tagsViaMultiRegionAccessPoint(t *testing.T) { ), }, { - Config: testAccObjectConfig_updatedTagsViaMultiRegionAccessPoint(rName, key, "stuff"), + Config: testAccObjectConfig_updatedTagsViaMultiRegionAccessPointARN(rName, key, "stuff"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckObjectExists(ctx, resourceName, &obj2), testAccCheckObjectVersionIDEquals(&obj2, &obj1), @@ -1335,6 +1269,72 @@ func TestAccS3Object_tagsViaObjectLambdaAccessPointARN(t *testing.T) { }) } +func TestAccS3Object_viaDirectoryBucketAccessPointAlias(t *testing.T) { + ctx := acctest.Context(t) + var obj s3.GetObjectOutput + resourceName := "aws_s3_object.object" + key := "test-key" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, // Cannot access the object via the access point alias after the access point is destroyed + Steps: []resource.TestStep{ + { + Config: testAccObjectConfig_viaDirectoryBucketAccessPointAlias(rName, key, "stuff"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj), + testAccCheckObjectBody(&obj, "stuff"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + ), + }, + }, + }) +} + +func TestAccS3Object_tagsViaFSxAccessPointARN(t *testing.T) { + ctx := acctest.Context(t) + var obj1, obj2 s3.GetObjectOutput + resourceName := "aws_s3_object.object" + key := "test-key" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, // Cannot access the object via the access point alias after the access point is destroyed + Steps: []resource.TestStep{ + { + Config: testAccObjectConfig_tagsViaFSxAccessPointARN(rName, key, "stuff"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj1), + testAccCheckObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "A@AA"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "CCC"), + ), + }, + { + Config: testAccObjectConfig_updatedTagsViaFSxAccessPointARN(rName, key, "stuff"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj2), + testAccCheckObjectVersionIDEquals(&obj2, &obj1), + testAccCheckObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "4"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "B@BB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "X X"), + resource.TestCheckResourceAttr(resourceName, "tags.Key4", "DDD"), + resource.TestCheckResourceAttr(resourceName, "tags.Key5", "E:/"), + ), + }, + }, + }) +} + func TestAccS3Object_objectLockLegalHoldStartWithNone(t *testing.T) { ctx := acctest.Context(t) var obj1, obj2, obj3 s3.GetObjectOutput @@ -2121,6 +2121,66 @@ func TestAccS3Object_basicUpgrade(t *testing.T) { }) } +func TestAccS3Object_Identity_ExistingResource_NoRefresh_WithChange(t *testing.T) { + ctx := acctest.Context(t) + var conf s3.GetObjectOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object.object" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckObjectDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.0.0", + }, + }, + Config: testAccObjectConfig_content(rName, "initial"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &conf), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccObjectConfig_content(rName, "updated"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &conf), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrBucket: knownvalue.NotNull(), + names.AttrKey: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrBucket)), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrKey)), + }, + }, + }, + }) +} + func testAccCheckObjectVersionIDDiffers(first, second *s3.GetObjectOutput) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.ToString(first.VersionId) == aws.ToString(second.VersionId) { @@ -2478,6 +2538,54 @@ resource "aws_s3control_object_lambda_access_point" "test" { `, rName)) } +func testAccObjectConfig_baseDirectoryBucketAccessPoint(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_basic(rName), ` +resource "aws_s3_access_point" "test" { + bucket = aws_s3_directory_bucket.test.bucket + name = local.access_point_name +} +`) +} + +func testAccObjectConfig_baseFSxAccessPoint(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = aws_subnet.test[*].id + deployment_type = "SINGLE_AZ_HA_2" + throughput_capacity = 320 + skip_final_backup = true + + tags = { + Name = %[1]q + } +} + +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} + +resource "aws_fsx_s3_access_point_attachment" "test" { + name = %[1]q + type = "OPENZFS" + + openzfs_configuration { + volume_id = aws_fsx_openzfs_volume.test.id + + file_system_identity { + type = "POSIX" + + posix_user { + uid = 1001 + gid = 1001 + } + } + } +} +`, rName)) +} + func testAccObjectConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -2856,7 +2964,7 @@ resource "aws_s3_object" "object" { `, key, content)) } -func testAccObjectConfig_tagsViaMultiRegionAccessPoint(rName, key, content string) string { +func testAccObjectConfig_tagsViaMultiRegionAccessPointARN(rName, key, content string) string { return acctest.ConfigCompose(testAccObjectConfig_baseMultiRegionAccessPoint(rName), fmt.Sprintf(` resource "aws_s3_object" "object" { bucket = aws_s3control_multi_region_access_point.test.arn @@ -2872,7 +2980,7 @@ resource "aws_s3_object" "object" { `, key, content)) } -func testAccObjectConfig_updatedTagsViaMultiRegionAccessPoint(rName, key, content string) string { +func testAccObjectConfig_updatedTagsViaMultiRegionAccessPointARN(rName, key, content string) string { return acctest.ConfigCompose(testAccObjectConfig_baseMultiRegionAccessPoint(rName), fmt.Sprintf(` resource "aws_s3_object" "object" { bucket = aws_s3control_multi_region_access_point.test.arn @@ -2922,6 +3030,49 @@ resource "aws_s3_object" "object" { `, key, content)) } +func testAccObjectConfig_viaDirectoryBucketAccessPointAlias(rName, key, content string) string { + return acctest.ConfigCompose(testAccObjectConfig_baseDirectoryBucketAccessPoint(rName), fmt.Sprintf(` +resource "aws_s3_object" "object" { + bucket = aws_s3_access_point.test.alias + key = %[1]q + content = %[2]q +} +`, key, content)) +} + +func testAccObjectConfig_tagsViaFSxAccessPointARN(rName, key, content string) string { + return acctest.ConfigCompose(testAccObjectConfig_baseFSxAccessPoint(rName), fmt.Sprintf(` +resource "aws_s3_object" "object" { + bucket = aws_fsx_s3_access_point_attachment.test.s3_access_point_arn + key = %[1]q + content = %[2]q + + tags = { + Key1 = "A@AA" + Key2 = "BBB" + Key3 = "CCC" + } +} +`, key, content)) +} + +func testAccObjectConfig_updatedTagsViaFSxAccessPointARN(rName, key, content string) string { + return acctest.ConfigCompose(testAccObjectConfig_baseFSxAccessPoint(rName), fmt.Sprintf(` +resource "aws_s3_object" "object" { + bucket = aws_fsx_s3_access_point_attachment.test.s3_access_point_arn + key = %[1]q + content = %[2]q + + tags = { + Key2 = "B@BB" + Key3 = "X X" + Key4 = "DDD" + Key5 = "E:/" + } +} +`, key, content)) +} + func testAccObjectConfig_metadata(rName string, metadataKey1, metadataValue1, metadataKey2, metadataValue2 string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/internal/service/s3/service_endpoint_resolver_gen.go b/internal/service/s3/service_endpoint_resolver_gen.go index 09052d858cf7..18e5b0555bf3 100644 --- a/internal/service/s3/service_endpoint_resolver_gen.go +++ b/internal/service/s3/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params s3.EndpointParam }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up s3 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up s3 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/s3/service_endpoints_gen_test.go b/internal/service/s3/service_endpoints_gen_test.go index 6b9189d4ec56..5f8289c28b08 100644 --- a/internal/service/s3/service_endpoints_gen_test.go +++ b/internal/service/s3/service_endpoints_gen_test.go @@ -753,7 +753,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index b7f8d5ab0ca6..b562f133d62e 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*s3.Options) { @@ -38,12 +39,21 @@ func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string o.UsePathStyle = config["s3_use_path_style"].(bool) }, func(o *s3.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if tfawserr.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if tfawserr.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/s3/service_package_gen.go b/internal/service/s3/service_package_gen.go index 5a78e262395e..a111efee92b4 100644 --- a/internal/service/s3/service_package_gen.go +++ b/internal/service/s3/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -37,6 +36,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "Bucket Lifecycle Configuration", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newBucketMetadataConfigurationResource, + TypeName: "aws_s3_bucket_metadata_configuration", + Name: "Bucket Metadata Configuration", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newDirectoryBucketResource, TypeName: "aws_s3_directory_bucket", @@ -119,6 +124,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Identity: inttypes.RegionalSingleParameterIdentity(names.AttrBucket, inttypes.WithV6_0SDKv2Fix(), ), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourceBucketAccelerateConfiguration, @@ -131,6 +139,17 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_s3_bucket_acl", Name: "Bucket ACL", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrBucket, true), + inttypes.StringIdentityAttribute(names.AttrExpectedBucketOwner, false), + inttypes.StringIdentityAttribute("acl", false), + }, + inttypes.WithMutableIdentity(), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: bucketACLImportID{}, + }, }, { Factory: resourceBucketAnalyticsConfiguration, @@ -143,6 +162,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_s3_bucket_cors_configuration", Name: "Bucket CORS Configuration", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrBucket, true), + inttypes.StringIdentityAttribute(names.AttrExpectedBucketOwner, false), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: resourceImportID{}, + }, }, { Factory: resourceBucketIntelligentTieringConfiguration, @@ -161,6 +188,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_s3_bucket_logging", Name: "Bucket Logging", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrBucket, true), + inttypes.StringIdentityAttribute(names.AttrExpectedBucketOwner, false), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: resourceImportID{}, + }, }, { Factory: resourceBucketMetric, @@ -173,6 +208,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_s3_bucket_notification", Name: "Bucket Notification", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrBucket), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceBucketObject, @@ -203,18 +242,30 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa TypeName: "aws_s3_bucket_ownership_controls", Name: "Bucket Ownership Controls", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrBucket), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceBucketPolicy, TypeName: "aws_s3_bucket_policy", Name: "Bucket Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrBucket), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceBucketPublicAccessBlock, TypeName: "aws_s3_bucket_public_access_block", Name: "Bucket Public Access Block", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrBucket), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceBucketReplicationConfiguration, @@ -231,20 +282,44 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa { Factory: resourceBucketServerSideEncryptionConfiguration, TypeName: "aws_s3_bucket_server_side_encryption_configuration", - Name: "Bucket Server-side Encryption Configuration", + Name: "Bucket Server Side Encryption Configuration", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrBucket, true), + inttypes.StringIdentityAttribute(names.AttrExpectedBucketOwner, false), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: resourceImportID{}, + }, }, { Factory: resourceBucketVersioning, TypeName: "aws_s3_bucket_versioning", Name: "Bucket Versioning", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrBucket, true), + inttypes.StringIdentityAttribute(names.AttrExpectedBucketOwner, false), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: resourceImportID{}, + }, }, { Factory: resourceBucketWebsiteConfiguration, TypeName: "aws_s3_bucket_website_configuration", Name: "Bucket Website Configuration", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute(names.AttrBucket, true), + inttypes.StringIdentityAttribute(names.AttrExpectedBucketOwner, false), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: resourceImportID{}, + }, }, { Factory: resourceObject, @@ -300,7 +375,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *s3.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index 09b0c30a247c..be25ff27f133 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -25,6 +25,7 @@ import ( func RegisterSweepers() { awsv2.Register("aws_s3_bucket", sweepBuckets, + "aws_datazone_domain", "aws_s3_access_point", "aws_s3_object_gp_bucket", "aws_s3control_access_grants_instance", diff --git a/internal/service/s3/tags.go b/internal/service/s3/tags.go index 858f5be879d8..94da8d8de8d8 100644 --- a/internal/service/s3/tags.go +++ b/internal/service/s3/tags.go @@ -36,13 +36,13 @@ func bucketCreateTags(ctx context.Context, conn *s3.Client, identifier string, t // bucketListTags lists S3 bucket tags. // The identifier is the bucket name. func bucketListTags(ctx context.Context, conn *s3.Client, identifier string, optFns ...func(*s3.Options)) (tftags.KeyValueTags, error) { - input := &s3.GetBucketTaggingInput{ + input := s3.GetBucketTaggingInput{ Bucket: aws.String(identifier), } - output, err := conn.GetBucketTagging(ctx, input, optFns...) + output, err := conn.GetBucketTagging(ctx, &input, optFns...) - if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented, errCodeUnsupportedOperation) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented, errCodeUnsupportedOperation) { return tftags.New(ctx, nil), nil } if err != nil { @@ -68,24 +68,24 @@ func bucketUpdateTags(ctx context.Context, conn *s3.Client, identifier string, o ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) if len(newTags)+len(ignoredTags) > 0 { - input := &s3.PutBucketTaggingInput{ + input := s3.PutBucketTaggingInput{ Bucket: aws.String(identifier), Tagging: &awstypes.Tagging{ TagSet: svcTags(newTags.Merge(ignoredTags)), }, } - _, err := conn.PutBucketTagging(ctx, input, optFns...) + _, err := conn.PutBucketTagging(ctx, &input, optFns...) if err != nil { return fmt.Errorf("setting resource tags (%s): %w", identifier, err) } } else if len(oldTags) > 0 && len(ignoredTags) == 0 { - input := &s3.DeleteBucketTaggingInput{ + input := s3.DeleteBucketTaggingInput{ Bucket: aws.String(identifier), } - _, err := conn.DeleteBucketTagging(ctx, input, optFns...) + _, err := conn.DeleteBucketTagging(ctx, &input, optFns...) if err != nil { return fmt.Errorf("deleting resource tags (%s): %w", identifier, err) @@ -97,14 +97,14 @@ func bucketUpdateTags(ctx context.Context, conn *s3.Client, identifier string, o // objectListTags lists S3 object tags. func objectListTags(ctx context.Context, conn *s3.Client, bucket, key string, optFns ...func(*s3.Options)) (tftags.KeyValueTags, error) { - input := &s3.GetObjectTaggingInput{ + input := s3.GetObjectTaggingInput{ Bucket: aws.String(bucket), Key: aws.String(key), } - output, err := conn.GetObjectTagging(ctx, input, optFns...) + output, err := conn.GetObjectTagging(ctx, &input, optFns...) - if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError) { return tftags.New(ctx, nil), nil } @@ -134,7 +134,7 @@ func objectUpdateTags(ctx context.Context, conn *s3.Client, bucket, key string, ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) if len(newTags)+len(ignoredTags) > 0 { - input := &s3.PutObjectTaggingInput{ + input := s3.PutObjectTaggingInput{ Bucket: aws.String(bucket), Key: aws.String(key), Tagging: &awstypes.Tagging{ @@ -142,18 +142,18 @@ func objectUpdateTags(ctx context.Context, conn *s3.Client, bucket, key string, }, } - _, err := conn.PutObjectTagging(ctx, input, optFns...) + _, err := conn.PutObjectTagging(ctx, &input, optFns...) if err != nil { return fmt.Errorf("setting resource tags (%s/%s): %w", bucket, key, err) } } else if len(oldTags) > 0 && len(ignoredTags) == 0 { - input := &s3.DeleteObjectTaggingInput{ + input := s3.DeleteObjectTaggingInput{ Bucket: aws.String(bucket), Key: aws.String(key), } - _, err := conn.DeleteObjectTagging(ctx, input, optFns...) + _, err := conn.DeleteObjectTagging(ctx, &input, optFns...) if err != nil { return fmt.Errorf("deleting resource tags (%s/%s): %w", bucket, key, err) diff --git a/internal/service/s3/testdata/Bucket/basic_v5.100.0/main_gen.tf b/internal/service/s3/testdata/Bucket/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..165ed59b578a --- /dev/null +++ b/internal/service/s3/testdata/Bucket/basic_v5.100.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/Bucket/basic_v6.0.0/main_gen.tf b/internal/service/s3/testdata/Bucket/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..e0b6e8492204 --- /dev/null +++ b/internal/service/s3/testdata/Bucket/basic_v6.0.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketACL/basic/main_gen.tf b/internal/service/s3/testdata/BucketACL/basic/main_gen.tf new file mode 100644 index 000000000000..1cf3bd2328cb --- /dev/null +++ b/internal/service/s3/testdata/BucketACL/basic/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_acl" "test" { + depends_on = [aws_s3_bucket_ownership_controls.test] + + bucket = aws_s3_bucket.test.bucket + acl = "private" +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketACL/basic_v6.10.0/main_gen.tf b/internal/service/s3/testdata/BucketACL/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..fc741cc89731 --- /dev/null +++ b/internal/service/s3/testdata/BucketACL/basic_v6.10.0/main_gen.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_acl" "test" { + depends_on = [aws_s3_bucket_ownership_controls.test] + + bucket = aws_s3_bucket.test.bucket + acl = "private" +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketACL/region_override/main_gen.tf b/internal/service/s3/testdata/BucketACL/region_override/main_gen.tf new file mode 100644 index 000000000000..37f09f22fbd8 --- /dev/null +++ b/internal/service/s3/testdata/BucketACL/region_override/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_acl" "test" { + region = var.region + + depends_on = [aws_s3_bucket_ownership_controls.test] + + bucket = aws_s3_bucket.test.bucket + acl = "private" +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +resource "aws_s3_bucket_ownership_controls" "test" { + region = var.region + + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketCORSConfiguration/basic/main_gen.tf b/internal/service/s3/testdata/BucketCORSConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..962d4e7a7ca6 --- /dev/null +++ b/internal/service/s3/testdata/BucketCORSConfiguration/basic/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_cors_configuration" "test" { + bucket = aws_s3_bucket.test.id + + cors_rule { + allowed_methods = ["PUT"] + allowed_origins = ["https://www.example.com"] + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketCORSConfiguration/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketCORSConfiguration/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..eb2fe8cdd46f --- /dev/null +++ b/internal/service/s3/testdata/BucketCORSConfiguration/basic_v6.9.0/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_cors_configuration" "test" { + bucket = aws_s3_bucket.test.id + + cors_rule { + allowed_methods = ["PUT"] + allowed_origins = ["https://www.example.com"] + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketCORSConfiguration/region_override/main_gen.tf b/internal/service/s3/testdata/BucketCORSConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..d3975f69b7e2 --- /dev/null +++ b/internal/service/s3/testdata/BucketCORSConfiguration/region_override/main_gen.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_cors_configuration" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + + cors_rule { + allowed_methods = ["PUT"] + allowed_origins = ["https://www.example.com"] + } +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketLogging/basic/main_gen.tf b/internal/service/s3/testdata/BucketLogging/basic/main_gen.tf new file mode 100644 index 000000000000..5f2ddf4c570d --- /dev/null +++ b/internal/service/s3/testdata/BucketLogging/basic/main_gen.tf @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} + +resource "aws_s3_bucket" "log_bucket" { + bucket = "${var.rName}-log" +} + +resource "aws_s3_bucket_ownership_controls" "log_bucket_ownership" { + bucket = aws_s3_bucket.log_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "log_bucket_acl" { + depends_on = [aws_s3_bucket_ownership_controls.log_bucket_ownership] + + bucket = aws_s3_bucket.log_bucket.id + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + depends_on = [aws_s3_bucket_acl.log_bucket_acl] + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketLogging/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketLogging/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..eb3695a032c1 --- /dev/null +++ b/internal/service/s3/testdata/BucketLogging/basic_v6.9.0/main_gen.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} + +resource "aws_s3_bucket" "log_bucket" { + bucket = "${var.rName}-log" +} + +resource "aws_s3_bucket_ownership_controls" "log_bucket_ownership" { + bucket = aws_s3_bucket.log_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "log_bucket_acl" { + depends_on = [aws_s3_bucket_ownership_controls.log_bucket_ownership] + + bucket = aws_s3_bucket.log_bucket.id + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + depends_on = [aws_s3_bucket_acl.log_bucket_acl] + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketLogging/region_override/main_gen.tf b/internal/service/s3/testdata/BucketLogging/region_override/main_gen.tf new file mode 100644 index 000000000000..565e56751817 --- /dev/null +++ b/internal/service/s3/testdata/BucketLogging/region_override/main_gen.tf @@ -0,0 +1,55 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_logging" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} + +resource "aws_s3_bucket" "log_bucket" { + region = var.region + + bucket = "${var.rName}-log" +} + +resource "aws_s3_bucket_ownership_controls" "log_bucket_ownership" { + region = var.region + + bucket = aws_s3_bucket.log_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "log_bucket_acl" { + region = var.region + + depends_on = [aws_s3_bucket_ownership_controls.log_bucket_ownership] + + bucket = aws_s3_bucket.log_bucket.id + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + region = var.region + + depends_on = [aws_s3_bucket_acl.log_bucket_acl] + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketNotification/basic/main_gen.tf b/internal/service/s3/testdata/BucketNotification/basic/main_gen.tf new file mode 100644 index 000000000000..01e41135dcda --- /dev/null +++ b/internal/service/s3/testdata/BucketNotification/basic/main_gen.tf @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_notification" "test" { + bucket = aws_s3_bucket.test.id + + eventbridge = true +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_bucket_public_access_block" "test" { + bucket = aws_s3_bucket.test.id + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "test" { + depends_on = [ + aws_s3_bucket_public_access_block.test, + aws_s3_bucket_ownership_controls.test, + ] + + bucket = aws_s3_bucket.test.id + acl = "public-read" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketNotification/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketNotification/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..36e5b8bfae9b --- /dev/null +++ b/internal/service/s3/testdata/BucketNotification/basic_v6.9.0/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_notification" "test" { + bucket = aws_s3_bucket.test.id + + eventbridge = true +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_bucket_public_access_block" "test" { + bucket = aws_s3_bucket.test.id + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "test" { + depends_on = [ + aws_s3_bucket_public_access_block.test, + aws_s3_bucket_ownership_controls.test, + ] + + bucket = aws_s3_bucket.test.id + acl = "public-read" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketNotification/region_override/main_gen.tf b/internal/service/s3/testdata/BucketNotification/region_override/main_gen.tf new file mode 100644 index 000000000000..76efcd023de8 --- /dev/null +++ b/internal/service/s3/testdata/BucketNotification/region_override/main_gen.tf @@ -0,0 +1,60 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_notification" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + + eventbridge = true +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +resource "aws_s3_bucket_public_access_block" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket_ownership_controls" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "test" { + region = var.region + + depends_on = [ + aws_s3_bucket_public_access_block.test, + aws_s3_bucket_ownership_controls.test, + ] + + bucket = aws_s3_bucket.test.id + acl = "public-read" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketObject/basic_v6.0.0/main_gen.tf b/internal/service/s3/testdata/BucketObject/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..8e6d93e6f3cc --- /dev/null +++ b/internal/service/s3/testdata/BucketObject/basic_v6.0.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_object" "test" { + # Must have bucket versioning enabled first + bucket = aws_s3_bucket_versioning.test.bucket + key = var.rName +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.bucket + versioning_configuration { + status = "Enabled" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketOwnershipControls/basic/main_gen.tf b/internal/service/s3/testdata/BucketOwnershipControls/basic/main_gen.tf new file mode 100644 index 000000000000..bc8f119f0660 --- /dev/null +++ b/internal/service/s3/testdata/BucketOwnershipControls/basic/main_gen.tf @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketOwnershipControls/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketOwnershipControls/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..3f15dca1d240 --- /dev/null +++ b/internal/service/s3/testdata/BucketOwnershipControls/basic_v6.9.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_ownership_controls" "test" { + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketOwnershipControls/region_override/main_gen.tf b/internal/service/s3/testdata/BucketOwnershipControls/region_override/main_gen.tf new file mode 100644 index 000000000000..8ba9fd4cd37e --- /dev/null +++ b/internal/service/s3/testdata/BucketOwnershipControls/region_override/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_ownership_controls" "test" { + region = var.region + + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketPolicy/basic/main_gen.tf b/internal/service/s3/testdata/BucketPolicy/basic/main_gen.tf new file mode 100644 index 000000000000..363784afe51d --- /dev/null +++ b/internal/service/s3/testdata/BucketPolicy/basic/main_gen.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.bucket + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketPolicy/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketPolicy/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..b0df7d52635b --- /dev/null +++ b/internal/service/s3/testdata/BucketPolicy/basic_v6.9.0/main_gen.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.bucket + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketPolicy/region_override/main_gen.tf b/internal/service/s3/testdata/BucketPolicy/region_override/main_gen.tf new file mode 100644 index 000000000000..c7a0b75d8799 --- /dev/null +++ b/internal/service/s3/testdata/BucketPolicy/region_override/main_gen.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_policy" "test" { + region = var.region + + bucket = aws_s3_bucket.test.bucket + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketPublicAccessBlock/basic/main_gen.tf b/internal/service/s3/testdata/BucketPublicAccessBlock/basic/main_gen.tf new file mode 100644 index 000000000000..40eccaeb9700 --- /dev/null +++ b/internal/service/s3/testdata/BucketPublicAccessBlock/basic/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_public_access_block" "test" { + bucket = aws_s3_bucket.test.bucket + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketPublicAccessBlock/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketPublicAccessBlock/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..f97aa4fb500a --- /dev/null +++ b/internal/service/s3/testdata/BucketPublicAccessBlock/basic_v6.9.0/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_public_access_block" "test" { + bucket = aws_s3_bucket.test.bucket + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketPublicAccessBlock/region_override/main_gen.tf b/internal/service/s3/testdata/BucketPublicAccessBlock/region_override/main_gen.tf new file mode 100644 index 000000000000..e66c9e826362 --- /dev/null +++ b/internal/service/s3/testdata/BucketPublicAccessBlock/region_override/main_gen.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_public_access_block" "test" { + region = var.region + + bucket = aws_s3_bucket.test.bucket + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/basic/main_gen.tf b/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..0f3ad24cded5 --- /dev/null +++ b/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/basic/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_server_side_encryption_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + # This is Amazon S3 bucket default encryption. + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..178b41e13e1b --- /dev/null +++ b/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/basic_v6.9.0/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_server_side_encryption_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + # This is Amazon S3 bucket default encryption. + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/region_override/main_gen.tf b/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..91dcc705657c --- /dev/null +++ b/internal/service/s3/testdata/BucketServerSideEncryptionConfiguration/region_override/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_server_side_encryption_configuration" "test" { + region = var.region + + bucket = aws_s3_bucket.test.bucket + + rule { + # This is Amazon S3 bucket default encryption. + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketVersioning/basic/main_gen.tf b/internal/service/s3/testdata/BucketVersioning/basic/main_gen.tf new file mode 100644 index 000000000000..a3bcd988f41b --- /dev/null +++ b/internal/service/s3/testdata/BucketVersioning/basic/main_gen.tf @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketVersioning/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketVersioning/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..c1967e09416a --- /dev/null +++ b/internal/service/s3/testdata/BucketVersioning/basic_v6.9.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketVersioning/region_override/main_gen.tf b/internal/service/s3/testdata/BucketVersioning/region_override/main_gen.tf new file mode 100644 index 000000000000..e4214ed8f370 --- /dev/null +++ b/internal/service/s3/testdata/BucketVersioning/region_override/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_versioning" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketWebsiteConfiguration/basic/main_gen.tf b/internal/service/s3/testdata/BucketWebsiteConfiguration/basic/main_gen.tf new file mode 100644 index 000000000000..01b1b1170238 --- /dev/null +++ b/internal/service/s3/testdata/BucketWebsiteConfiguration/basic/main_gen.tf @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_website_configuration" "test" { + bucket = aws_s3_bucket.test.id + index_document { + suffix = "index.html" + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/BucketWebsiteConfiguration/basic_v6.9.0/main_gen.tf b/internal/service/s3/testdata/BucketWebsiteConfiguration/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..32e719fd39ba --- /dev/null +++ b/internal/service/s3/testdata/BucketWebsiteConfiguration/basic_v6.9.0/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_website_configuration" "test" { + bucket = aws_s3_bucket.test.id + index_document { + suffix = "index.html" + } +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/BucketWebsiteConfiguration/region_override/main_gen.tf b/internal/service/s3/testdata/BucketWebsiteConfiguration/region_override/main_gen.tf new file mode 100644 index 000000000000..416f8f3fa139 --- /dev/null +++ b/internal/service/s3/testdata/BucketWebsiteConfiguration/region_override/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_bucket_website_configuration" "test" { + region = var.region + + bucket = aws_s3_bucket.test.id + index_document { + suffix = "index.html" + } +} + +resource "aws_s3_bucket" "test" { + region = var.region + + bucket = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3/testdata/Object/basic_v6.0.0/main_gen.tf b/internal/service/s3/testdata/Object/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..f3a6744eaf2a --- /dev/null +++ b/internal/service/s3/testdata/Object/basic_v6.0.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_object" "test" { + # Must have bucket versioning enabled first + bucket = aws_s3_bucket_versioning.test.bucket + key = var.rName +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.bucket + versioning_configuration { + status = "Enabled" + } +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3/testdata/tmpl/bucket_acl_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_acl_basic.gtpl new file mode 100644 index 000000000000..1296061057c4 --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_acl_basic.gtpl @@ -0,0 +1,20 @@ +resource "aws_s3_bucket_acl" "test" { +{{- template "region" }} + depends_on = [aws_s3_bucket_ownership_controls.test] + + bucket = aws_s3_bucket.test.bucket + acl = "private" +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} + +resource "aws_s3_bucket_ownership_controls" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} diff --git a/internal/service/s3/testdata/tmpl/bucket_cors_configuration_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_cors_configuration_basic.gtpl new file mode 100644 index 000000000000..172e5a69deac --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_cors_configuration_basic.gtpl @@ -0,0 +1,15 @@ +resource "aws_s3_bucket_cors_configuration" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + + cors_rule { + allowed_methods = ["PUT"] + allowed_origins = ["https://www.example.com"] + } +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} + diff --git a/internal/service/s3/testdata/tmpl/bucket_logging_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_logging_basic.gtpl new file mode 100644 index 000000000000..47bb7a7df51a --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_logging_basic.gtpl @@ -0,0 +1,35 @@ +resource "aws_s3_bucket_logging" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} + +resource "aws_s3_bucket" "log_bucket" { +{{- template "region" }} + bucket = "${var.rName}-log" +} + +resource "aws_s3_bucket_ownership_controls" "log_bucket_ownership" { +{{- template "region" }} + bucket = aws_s3_bucket.log_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "log_bucket_acl" { +{{- template "region" }} + depends_on = [aws_s3_bucket_ownership_controls.log_bucket_ownership] + + bucket = aws_s3_bucket.log_bucket.id + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + depends_on = [aws_s3_bucket_acl.log_bucket_acl] + + bucket = var.rName +} diff --git a/internal/service/s3/testdata/tmpl/bucket_notification_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_notification_basic.gtpl new file mode 100644 index 000000000000..c1ea042b9dac --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_notification_basic.gtpl @@ -0,0 +1,40 @@ +resource "aws_s3_bucket_notification" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + + eventbridge = true +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} + +resource "aws_s3_bucket_public_access_block" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket_ownership_controls" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "test" { +{{- template "region" }} + depends_on = [ + aws_s3_bucket_public_access_block.test, + aws_s3_bucket_ownership_controls.test, + ] + + bucket = aws_s3_bucket.test.id + acl = "public-read" +} diff --git a/internal/service/s3/testdata/tmpl/bucket_ownership_controls_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_ownership_controls_basic.gtpl new file mode 100644 index 000000000000..220e178916bc --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_ownership_controls_basic.gtpl @@ -0,0 +1,12 @@ +resource "aws_s3_bucket_ownership_controls" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.bucket + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} diff --git a/internal/service/s3/testdata/tmpl/bucket_policy_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_policy_basic.gtpl new file mode 100644 index 000000000000..20108aee9f27 --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_policy_basic.gtpl @@ -0,0 +1,34 @@ +resource "aws_s3_bucket_policy" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.bucket + policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + + actions = [ + "s3:*", + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} + diff --git a/internal/service/s3/testdata/tmpl/bucket_public_access_block_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_public_access_block_basic.gtpl new file mode 100644 index 000000000000..acfec5bf3d80 --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_public_access_block_basic.gtpl @@ -0,0 +1,14 @@ +resource "aws_s3_bucket_public_access_block" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.bucket + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} diff --git a/internal/service/s3/testdata/tmpl/bucket_server_side_encryption_configuration_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_server_side_encryption_configuration_basic.gtpl new file mode 100644 index 000000000000..72e6ab458f73 --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_server_side_encryption_configuration_basic.gtpl @@ -0,0 +1,16 @@ +resource "aws_s3_bucket_server_side_encryption_configuration" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.bucket + + rule { + # This is Amazon S3 bucket default encryption. + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} diff --git a/internal/service/s3/testdata/tmpl/bucket_versioning_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_versioning_basic.gtpl new file mode 100644 index 000000000000..389d1211e1da --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_versioning_basic.gtpl @@ -0,0 +1,12 @@ +resource "aws_s3_bucket_versioning" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} diff --git a/internal/service/s3/testdata/tmpl/bucket_website_configuration_basic.gtpl b/internal/service/s3/testdata/tmpl/bucket_website_configuration_basic.gtpl new file mode 100644 index 000000000000..42b858cf3b52 --- /dev/null +++ b/internal/service/s3/testdata/tmpl/bucket_website_configuration_basic.gtpl @@ -0,0 +1,12 @@ +resource "aws_s3_bucket_website_configuration" "test" { +{{- template "region" }} + bucket = aws_s3_bucket.test.id + index_document { + suffix = "index.html" + } +} + +resource "aws_s3_bucket" "test" { +{{- template "region" }} + bucket = var.rName +} diff --git a/internal/service/s3control/access_grant.go b/internal/service/s3control/access_grant.go index 29a955167dbc..52c2db9ec177 100644 --- a/internal/service/s3control/access_grant.go +++ b/internal/service/s3control/access_grant.go @@ -33,7 +33,7 @@ import ( ) // @FrameworkResource("aws_s3control_access_grant", name="Access Grant") -// @Tags +// @Tags(identifierAttribute="access_grant_arn") func newAccessGrantResource(context.Context) (resource.ResourceWithConfigure, error) { r := &accessGrantResource{} @@ -82,6 +82,7 @@ func (r *accessGrantResource) Schema(ctx context.Context, request resource.Schem stringplanmodifier.UseStateForUnknown(), }, }, + names.AttrID: framework.IDAttribute(), "permission": schema.StringAttribute{ CustomType: fwtypes.StringEnumType[awstypes.Permission](), Required: true, @@ -89,7 +90,6 @@ func (r *accessGrantResource) Schema(ctx context.Context, request resource.Schem stringplanmodifier.RequiresReplace(), }, }, - names.AttrID: framework.IDAttribute(), "s3_prefix_type": schema.StringAttribute{ CustomType: fwtypes.StringEnumType[awstypes.S3PrefixType](), Optional: true, @@ -153,29 +153,28 @@ func (r *accessGrantResource) Schema(ctx context.Context, request resource.Schem func (r *accessGrantResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { var data accessGrantResourceModel - response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } + if data.AccountID.IsUnknown() { + data.AccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } conn := r.Meta().S3ControlClient(ctx) - if data.AccountID.ValueString() == "" { - data.AccountID = types.StringValue(r.Meta().AccountID(ctx)) - } - input := &s3control.CreateAccessGrantInput{} - response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + var input s3control.CreateAccessGrantInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) if response.Diagnostics.HasError() { return } + // Additional fields. input.Tags = getTagsIn(ctx) // "InvalidRequest: Invalid Grantee in the request". - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, s3PropagationTimeout, func() (any, error) { - return conn.CreateAccessGrant(ctx, input) + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, s3PropagationTimeout, func(ctx context.Context) (any, error) { + return conn.CreateAccessGrant(ctx, &input) }, errCodeInvalidRequest, "Invalid Grantee in the request") if err != nil { @@ -186,30 +185,29 @@ func (r *accessGrantResource) Create(ctx context.Context, request resource.Creat // Set values for unknowns. output := outputRaw.(*s3control.CreateAccessGrantOutput) - data.AccessGrantARN = fwflex.StringToFramework(ctx, output.AccessGrantArn) - data.AccessGrantID = fwflex.StringToFramework(ctx, output.AccessGrantId) - data.GrantScope = fwflex.StringToFramework(ctx, output.GrantScope) + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } id, err := data.setID() if err != nil { - response.Diagnostics.AddError("creating S3 Access Grant", err.Error()) + response.Diagnostics.Append(fwdiag.NewCreatingResourceIDErrorDiagnostic(err)) return } - data.ID = types.StringValue(id) + data.ID = fwflex.StringValueToFramework(ctx, id) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } func (r *accessGrantResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { var data accessGrantResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } if err := data.InitFromID(); err != nil { - response.Diagnostics.AddError("parsing resource ID", err.Error()) + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) return } @@ -241,62 +239,23 @@ func (r *accessGrantResource) Read(ctx context.Context, request resource.ReadReq return } - tags, err := listTags(ctx, conn, data.AccessGrantARN.ValueString(), data.AccountID.ValueString()) - - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("listing tags for S3 Access Grant (%s)", data.ID.ValueString()), err.Error()) - - return - } - - setTagsOut(ctx, svcTags(tags)) - response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *accessGrantResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { - var old, new accessGrantResourceModel - - response.Diagnostics.Append(request.State.Get(ctx, &old)...) - - if response.Diagnostics.HasError() { - return - } - - response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) - - if response.Diagnostics.HasError() { - return - } - - conn := r.Meta().S3ControlClient(ctx) - - if oldTagsAll, newTagsAll := old.TagsAll, new.TagsAll; !newTagsAll.Equal(oldTagsAll) { - if err := updateTags(ctx, conn, new.AccessGrantARN.ValueString(), new.AccountID.ValueString(), oldTagsAll, newTagsAll); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("updating tags for S3 Access Grant (%s)", new.ID.ValueString()), err.Error()) - - return - } - } - - response.Diagnostics.Append(response.State.Set(ctx, &new)...) -} - func (r *accessGrantResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { var data accessGrantResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } conn := r.Meta().S3ControlClient(ctx) - _, err := conn.DeleteAccessGrant(ctx, &s3control.DeleteAccessGrantInput{ + input := s3control.DeleteAccessGrantInput{ AccessGrantId: fwflex.StringFromFramework(ctx, data.AccessGrantID), AccountId: fwflex.StringFromFramework(ctx, data.AccountID), - }) + } + _, err := conn.DeleteAccessGrant(ctx, &input) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { return @@ -310,11 +269,15 @@ func (r *accessGrantResource) Delete(ctx context.Context, request resource.Delet } func findAccessGrantByTwoPartKey(ctx context.Context, conn *s3control.Client, accountID, grantID string) (*s3control.GetAccessGrantOutput, error) { - input := &s3control.GetAccessGrantInput{ + input := s3control.GetAccessGrantInput{ AccessGrantId: aws.String(grantID), AccountId: aws.String(accountID), } + return findAccessGrant(ctx, conn, &input) +} + +func findAccessGrant(ctx context.Context, conn *s3control.Client, input *s3control.GetAccessGrantInput) (*s3control.GetAccessGrantOutput, error) { output, err := conn.GetAccessGrant(ctx, input) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { diff --git a/internal/service/s3control/access_grants_instance.go b/internal/service/s3control/access_grants_instance.go index 887f2046f79e..0142a4480a42 100644 --- a/internal/service/s3control/access_grants_instance.go +++ b/internal/service/s3control/access_grants_instance.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -29,7 +29,7 @@ import ( ) // @FrameworkResource("aws_s3control_access_grants_instance", name="Access Grants Instance") -// @Tags +// @Tags(identifierAttribute="access_grants_instance_arn") func newAccessGrantsInstanceResource(context.Context) (resource.ResourceWithConfigure, error) { r := &accessGrantsInstanceResource{} @@ -85,59 +85,56 @@ func (r *accessGrantsInstanceResource) Schema(ctx context.Context, request resou func (r *accessGrantsInstanceResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { var data accessGrantsInstanceResourceModel - response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } + if data.AccountID.IsUnknown() { + data.AccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } conn := r.Meta().S3ControlClient(ctx) - if data.AccountID.ValueString() == "" { - data.AccountID = types.StringValue(r.Meta().AccountID(ctx)) - } - input := &s3control.CreateAccessGrantsInstanceInput{ - AccountId: flex.StringFromFramework(ctx, data.AccountID), - IdentityCenterArn: flex.StringFromFramework(ctx, data.IdentityCenterARN), - Tags: getTagsIn(ctx), + accountID := fwflex.StringValueFromFramework(ctx, data.AccountID) + var input s3control.CreateAccessGrantsInstanceInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return } - output, err := conn.CreateAccessGrantsInstance(ctx, input) + // Additional fields. + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateAccessGrantsInstance(ctx, &input) if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("creating S3 Access Grants Instance (%s)", data.AccountID.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Access Grants Instance (%s)", accountID), err.Error()) return } // Set values for unknowns. - data.AccessGrantsInstanceARN = flex.StringToFramework(ctx, output.AccessGrantsInstanceArn) - data.AccessGrantsInstanceID = flex.StringToFramework(ctx, output.AccessGrantsInstanceId) - data.IdentityCenterApplicationARN = flex.StringToFramework(ctx, output.IdentityCenterArn) - data.setID() + data.ID = fwflex.StringValueToFramework(ctx, accountID) + // Backwards compatibility, don't use AutoFlEx. + data.AccessGrantsInstanceARN = fwflex.StringToFramework(ctx, output.AccessGrantsInstanceArn) + data.AccessGrantsInstanceID = fwflex.StringToFramework(ctx, output.AccessGrantsInstanceId) + data.IdentityCenterApplicationARN = fwflex.StringToFramework(ctx, output.IdentityCenterArn) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } func (r *accessGrantsInstanceResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { var data accessGrantsInstanceResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } - - if err := data.InitFromID(); err != nil { - response.Diagnostics.AddError("parsing resource ID", err.Error()) - - return - } + data.AccountID = data.ID // From import. conn := r.Meta().S3ControlClient(ctx) - output, err := findAccessGrantsInstance(ctx, conn, data.AccountID.ValueString()) + accountID := fwflex.StringValueFromFramework(ctx, data.AccountID) + output, err := findAccessGrantsInstanceByID(ctx, conn, accountID) if tfresource.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) @@ -147,135 +144,118 @@ func (r *accessGrantsInstanceResource) Read(ctx context.Context, request resourc } if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("reading S3 Access Grants Instance (%s)", data.ID.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Access Grants Instance (%s)", accountID), err.Error()) return } // Set attributes for import. - data.AccessGrantsInstanceARN = flex.StringToFramework(ctx, output.AccessGrantsInstanceArn) - data.AccessGrantsInstanceID = flex.StringToFramework(ctx, output.AccessGrantsInstanceId) - data.IdentityCenterApplicationARN = flex.StringToFramework(ctx, output.IdentityCenterArn) - - tags, err := listTags(ctx, conn, data.AccessGrantsInstanceARN.ValueString(), data.AccountID.ValueString()) - - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("listing tags for S3 Access Grants Instance (%s)", data.ID.ValueString()), err.Error()) - - return - } - - setTagsOut(ctx, svcTags(tags)) + // Backwards compatibility, don't use AutoFlEx. + data.AccessGrantsInstanceARN = fwflex.StringToFramework(ctx, output.AccessGrantsInstanceArn) + data.AccessGrantsInstanceID = fwflex.StringToFramework(ctx, output.AccessGrantsInstanceId) + data.IdentityCenterApplicationARN = fwflex.StringToFramework(ctx, output.IdentityCenterArn) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } func (r *accessGrantsInstanceResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { var old, new accessGrantsInstanceResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &old)...) - if response.Diagnostics.HasError() { return } - response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) - if response.Diagnostics.HasError() { return } conn := r.Meta().S3ControlClient(ctx) - if oldARN, newARN := old.IdentityCenterARN, new.IdentityCenterARN; !newARN.Equal(oldARN) { + if accountID, oldARN, newARN := fwflex.StringValueFromFramework(ctx, new.AccountID), old.IdentityCenterARN, new.IdentityCenterARN; !newARN.Equal(oldARN) { if !oldARN.IsNull() { - if err := disassociateAccessGrantsInstanceIdentityCenterInstance(ctx, conn, old.ID.ValueString()); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("dissociating S3 Access Grants Instance (%s) IAM Identity Center instance", old.ID.ValueString()), err.Error()) + if err := disassociateAccessGrantsInstanceIdentityCenterInstance(ctx, conn, accountID); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("dissociating S3 Access Grants Instance (%s) IAM Identity Center instance", accountID), err.Error()) return } } if !newARN.IsNull() { - if err := associateAccessGrantsInstanceIdentityCenterInstance(ctx, conn, new.ID.ValueString(), newARN.ValueString()); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("associating S3 Access Grants Instance (%s) IAM Identity Center instance (%s)", new.ID.ValueString(), newARN.ValueString()), err.Error()) + if err := associateAccessGrantsInstanceIdentityCenterInstance(ctx, conn, accountID, newARN.ValueString()); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("associating S3 Access Grants Instance (%s) IAM Identity Center instance (%s)", accountID, newARN.ValueString()), err.Error()) return } } } - if oldTagsAll, newTagsAll := old.TagsAll, new.TagsAll; !newTagsAll.Equal(oldTagsAll) { - if err := updateTags(ctx, conn, new.AccessGrantsInstanceARN.ValueString(), new.AccountID.ValueString(), oldTagsAll, newTagsAll); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("updating tags for S3 Access Grants Instance (%s)", new.ID.ValueString()), err.Error()) - - return - } - } - response.Diagnostics.Append(response.State.Set(ctx, &new)...) } func (r *accessGrantsInstanceResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { var data accessGrantsInstanceResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } conn := r.Meta().S3ControlClient(ctx) + accountID := fwflex.StringValueFromFramework(ctx, data.AccountID) if !data.IdentityCenterARN.IsNull() { - if err := disassociateAccessGrantsInstanceIdentityCenterInstance(ctx, conn, data.ID.ValueString()); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("dissociating S3 Access Grants Instance (%s) IAM Identity Center instance", data.ID.ValueString()), err.Error()) + if err := disassociateAccessGrantsInstanceIdentityCenterInstance(ctx, conn, accountID); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("dissociating S3 Access Grants Instance (%s) IAM Identity Center instance", accountID), err.Error()) return } } - _, err := conn.DeleteAccessGrantsInstance(ctx, &s3control.DeleteAccessGrantsInstanceInput{ - AccountId: flex.StringFromFramework(ctx, data.AccountID), - }) + input := s3control.DeleteAccessGrantsInstanceInput{ + AccountId: aws.String(accountID), + } + _, err := conn.DeleteAccessGrantsInstance(ctx, &input) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { return } if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Access Grants Instance (%s)", data.ID.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Access Grants Instance (%s)", accountID), err.Error()) return } } func associateAccessGrantsInstanceIdentityCenterInstance(ctx context.Context, conn *s3control.Client, accountID, identityCenterARN string) error { - input := &s3control.AssociateAccessGrantsIdentityCenterInput{ + input := s3control.AssociateAccessGrantsIdentityCenterInput{ AccountId: aws.String(accountID), IdentityCenterArn: aws.String(identityCenterARN), } - _, err := conn.AssociateAccessGrantsIdentityCenter(ctx, input) + _, err := conn.AssociateAccessGrantsIdentityCenter(ctx, &input) return err } func disassociateAccessGrantsInstanceIdentityCenterInstance(ctx context.Context, conn *s3control.Client, accountID string) error { - input := &s3control.DissociateAccessGrantsIdentityCenterInput{ + input := s3control.DissociateAccessGrantsIdentityCenterInput{ AccountId: aws.String(accountID), } - _, err := conn.DissociateAccessGrantsIdentityCenter(ctx, input) + _, err := conn.DissociateAccessGrantsIdentityCenter(ctx, &input) return err } -func findAccessGrantsInstance(ctx context.Context, conn *s3control.Client, accountID string) (*s3control.GetAccessGrantsInstanceOutput, error) { - input := &s3control.GetAccessGrantsInstanceInput{ +func findAccessGrantsInstanceByID(ctx context.Context, conn *s3control.Client, accountID string) (*s3control.GetAccessGrantsInstanceOutput, error) { + input := s3control.GetAccessGrantsInstanceInput{ AccountId: aws.String(accountID), } + return findAccessGrantsInstance(ctx, conn, &input) +} + +func findAccessGrantsInstance(ctx context.Context, conn *s3control.Client, input *s3control.GetAccessGrantsInstanceInput) (*s3control.GetAccessGrantsInstanceOutput, error) { output, err := conn.GetAccessGrantsInstance(ctx, input) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { @@ -307,13 +287,3 @@ type accessGrantsInstanceResourceModel struct { Tags tftags.Map `tfsdk:"tags"` TagsAll tftags.Map `tfsdk:"tags_all"` } - -func (data *accessGrantsInstanceResourceModel) InitFromID() error { - data.AccountID = data.ID - - return nil -} - -func (data *accessGrantsInstanceResourceModel) setID() { - data.ID = data.AccountID -} diff --git a/internal/service/s3control/access_grants_instance_test.go b/internal/service/s3control/access_grants_instance_test.go index 4c9b5e01d70b..c6aeec7441ad 100644 --- a/internal/service/s3control/access_grants_instance_test.go +++ b/internal/service/s3control/access_grants_instance_test.go @@ -167,7 +167,7 @@ func testAccCheckAccessGrantsInstanceDestroy(ctx context.Context) resource.TestC continue } - _, err := tfs3control.FindAccessGrantsInstance(ctx, conn, rs.Primary.ID) + _, err := tfs3control.FindAccessGrantsInstanceByID(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -193,7 +193,7 @@ func testAccCheckAccessGrantsInstanceExists(ctx context.Context, n string) resou conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlClient(ctx) - _, err := tfs3control.FindAccessGrantsInstance(ctx, conn, rs.Primary.ID) + _, err := tfs3control.FindAccessGrantsInstanceByID(ctx, conn, rs.Primary.ID) return err } diff --git a/internal/service/s3control/access_grants_location.go b/internal/service/s3control/access_grants_location.go index 2fb2745d8d51..81bfb98492e3 100644 --- a/internal/service/s3control/access_grants_location.go +++ b/internal/service/s3control/access_grants_location.go @@ -30,7 +30,7 @@ import ( ) // @FrameworkResource("aws_s3control_access_grants_location", name="Access Grants Location") -// @Tags +// @Tags(identifierAttribute="access_grants_location_arn") func newAccessGrantsLocationResource(context.Context) (resource.ResourceWithConfigure, error) { r := &accessGrantsLocationResource{} @@ -71,13 +71,13 @@ func (r *accessGrantsLocationResource) Schema(ctx context.Context, request resou CustomType: fwtypes.ARNType, Required: true, }, + names.AttrID: framework.IDAttribute(), "location_scope": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, }, - names.AttrID: framework.IDAttribute(), names.AttrTags: tftags.TagsAttribute(), names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), }, @@ -86,28 +86,27 @@ func (r *accessGrantsLocationResource) Schema(ctx context.Context, request resou func (r *accessGrantsLocationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { var data accessGrantsLocationResourceModel - response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } + if data.AccountID.IsUnknown() { + data.AccountID = fwflex.StringValueToFramework(ctx, r.Meta().AccountID(ctx)) + } conn := r.Meta().S3ControlClient(ctx) - if data.AccountID.ValueString() == "" { - data.AccountID = types.StringValue(r.Meta().AccountID(ctx)) - } - input := &s3control.CreateAccessGrantsLocationInput{} - response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + var input s3control.CreateAccessGrantsLocationInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) if response.Diagnostics.HasError() { return } + // Additional fields. input.Tags = getTagsIn(ctx) - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3PropagationTimeout, func() (any, error) { - return conn.CreateAccessGrantsLocation(ctx, input) + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3PropagationTimeout, func(ctx context.Context) (any, error) { + return conn.CreateAccessGrantsLocation(ctx, &input) }, errCodeInvalidIAMRole) if err != nil { @@ -118,29 +117,29 @@ func (r *accessGrantsLocationResource) Create(ctx context.Context, request resou // Set values for unknowns. output := outputRaw.(*s3control.CreateAccessGrantsLocationOutput) - data.AccessGrantsLocationARN = fwflex.StringToFramework(ctx, output.AccessGrantsLocationArn) - data.AccessGrantsLocationID = fwflex.StringToFramework(ctx, output.AccessGrantsLocationId) + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } id, err := data.setID() if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("creating S3 Access Grants Location (%s)", data.LocationScope.ValueString()), err.Error()) + response.Diagnostics.Append(fwdiag.NewCreatingResourceIDErrorDiagnostic(err)) return } - data.ID = types.StringValue(id) + data.ID = fwflex.StringValueToFramework(ctx, id) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } func (r *accessGrantsLocationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { var data accessGrantsLocationResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } if err := data.InitFromID(); err != nil { - response.Diagnostics.AddError("parsing resource ID", err.Error()) + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) return } @@ -168,30 +167,16 @@ func (r *accessGrantsLocationResource) Read(ctx context.Context, request resourc return } - tags, err := listTags(ctx, conn, data.AccessGrantsLocationARN.ValueString(), data.AccountID.ValueString()) - - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("listing tags for S3 Access Grants Location (%s)", data.ID.ValueString()), err.Error()) - - return - } - - setTagsOut(ctx, svcTags(tags)) - response.Diagnostics.Append(response.State.Set(ctx, &data)...) } func (r *accessGrantsLocationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { var old, new accessGrantsLocationResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &old)...) - if response.Diagnostics.HasError() { return } - response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) - if response.Diagnostics.HasError() { return } @@ -199,14 +184,14 @@ func (r *accessGrantsLocationResource) Update(ctx context.Context, request resou conn := r.Meta().S3ControlClient(ctx) if !new.IAMRoleARN.Equal(old.IAMRoleARN) { - input := &s3control.UpdateAccessGrantsLocationInput{} - response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) + var input s3control.UpdateAccessGrantsLocationInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) if response.Diagnostics.HasError() { return } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3PropagationTimeout, func() (any, error) { - return conn.UpdateAccessGrantsLocation(ctx, input) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3PropagationTimeout, func(ctx context.Context) (any, error) { + return conn.UpdateAccessGrantsLocation(ctx, &input) }, errCodeInvalidIAMRole) if err != nil { @@ -216,36 +201,25 @@ func (r *accessGrantsLocationResource) Update(ctx context.Context, request resou } } - if oldTagsAll, newTagsAll := old.TagsAll, new.TagsAll; !newTagsAll.Equal(oldTagsAll) { - if err := updateTags(ctx, conn, new.AccessGrantsLocationARN.ValueString(), new.AccountID.ValueString(), oldTagsAll, newTagsAll); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("updating tags for S3 Access Grants Location (%s)", new.ID.ValueString()), err.Error()) - - return - } - } - response.Diagnostics.Append(response.State.Set(ctx, &new)...) } func (r *accessGrantsLocationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { var data accessGrantsLocationResourceModel - response.Diagnostics.Append(request.State.Get(ctx, &data)...) - if response.Diagnostics.HasError() { return } conn := r.Meta().S3ControlClient(ctx) - input := &s3control.DeleteAccessGrantsLocationInput{ + input := s3control.DeleteAccessGrantsLocationInput{ AccessGrantsLocationId: fwflex.StringFromFramework(ctx, data.AccessGrantsLocationID), AccountId: fwflex.StringFromFramework(ctx, data.AccountID), } - // "AccessGrantsLocationNotEmptyError: Please delete access grants before deleting access grants location". - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3PropagationTimeout, func() (any, error) { - return conn.DeleteAccessGrantsLocation(ctx, input) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3PropagationTimeout, func(ctx context.Context) (any, error) { + return conn.DeleteAccessGrantsLocation(ctx, &input) }, errCodeAccessGrantsLocationNotEmptyError) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { @@ -260,11 +234,15 @@ func (r *accessGrantsLocationResource) Delete(ctx context.Context, request resou } func findAccessGrantsLocationByTwoPartKey(ctx context.Context, conn *s3control.Client, accountID, locationID string) (*s3control.GetAccessGrantsLocationOutput, error) { - input := &s3control.GetAccessGrantsLocationInput{ + input := s3control.GetAccessGrantsLocationInput{ AccessGrantsLocationId: aws.String(locationID), AccountId: aws.String(accountID), } + return findAccessGrantsLocation(ctx, conn, &input) +} + +func findAccessGrantsLocation(ctx context.Context, conn *s3control.Client, input *s3control.GetAccessGrantsLocationInput) (*s3control.GetAccessGrantsLocationOutput, error) { output, err := conn.GetAccessGrantsLocation(ctx, input) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { diff --git a/internal/service/s3control/access_point.go b/internal/service/s3control/access_point.go index 9d1638541bef..395d3dfa99ac 100644 --- a/internal/service/s3control/access_point.go +++ b/internal/service/s3control/access_point.go @@ -21,12 +21,15 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_s3_access_point, name="Access Point") +// @Tags(identifierAttribute="arn") func resourceAccessPoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAccessPointCreate, @@ -90,18 +93,7 @@ func resourceAccessPoint() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrPolicy: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, - DiffSuppressOnRefresh: true, - StateFunc: func(v any) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - }, + names.AttrPolicy: sdkv2.IAMPolicyDocumentSchemaOptionalComputed(), "public_access_block_configuration": { Type: schema.TypeList, Optional: true, @@ -138,6 +130,8 @@ func resourceAccessPoint() *schema.Resource { }, }, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrVPCConfiguration: { Type: schema.TypeList, Optional: true, @@ -160,17 +154,19 @@ func resourceAccessPoint() *schema.Resource { func resourceAccessPointCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3ControlClient(ctx) + c := meta.(*conns.AWSClient) + conn := c.S3ControlClient(ctx) - accountID := meta.(*conns.AWSClient).AccountID(ctx) + accountID := c.AccountID(ctx) if v, ok := d.GetOk(names.AttrAccountID); ok { accountID = v.(string) } name := d.Get(names.AttrName).(string) - input := &s3control.CreateAccessPointInput{ + input := s3control.CreateAccessPointInput{ AccountId: aws.String(accountID), Bucket: aws.String(d.Get(names.AttrBucket).(string)), Name: aws.String(name), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("bucket_account_id"); ok { @@ -185,18 +181,18 @@ func resourceAccessPointCreate(ctx context.Context, d *schema.ResourceData, meta input.VpcConfiguration = expandVPCConfiguration(v.([]any)[0].(map[string]any)) } - output, err := conn.CreateAccessPoint(ctx, input) + output, err := conn.CreateAccessPoint(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Access Point (%s): %s", name, err) } - resourceID, err := AccessPointCreateResourceID(aws.ToString(output.AccessPointArn)) + resourceID, err := accessPointCreateResourceID(aws.ToString(output.AccessPointArn)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - accountID, name, err = AccessPointParseResourceID(resourceID) + accountID, name, err = accessPointParseResourceID(resourceID) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -209,13 +205,13 @@ func resourceAccessPointCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendFromErr(diags, err) } - input := &s3control.PutAccessPointPolicyInput{ + input := s3control.PutAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), Policy: aws.String(policy), } - _, err = conn.PutAccessPointPolicy(ctx, input) + _, err = conn.PutAccessPointPolicy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Access Point (%s) policy: %s", d.Id(), err) @@ -227,9 +223,10 @@ func resourceAccessPointCreate(ctx context.Context, d *schema.ResourceData, meta func resourceAccessPointRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3ControlClient(ctx) + c := meta.(*conns.AWSClient) + conn := c.S3ControlClient(ctx) - accountID, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := accessPointParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -276,40 +273,23 @@ func resourceAccessPointRead(ctx context.Context, d *schema.ResourceData, meta a return sdkdiag.AppendErrorf(diags, "parsing S3 Access Point (%s): %s", d.Id(), err) } + // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-resources-for-iam-policies. switch service := apARN.Service; service { case "s3": - // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-resources-for-iam-policies. - accessPointARN := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "s3", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: accountID, - Resource: fmt.Sprintf("accesspoint/%s", aws.ToString(output.Name)), - } - - d.Set(names.AttrARN, accessPointARN.String()) + d.Set(names.AttrARN, c.RegionalARNWithAccount(ctx, "s3", accountID, "accesspoint/"+aws.ToString(output.Name))) d.Set(names.AttrBucket, output.Bucket) case "s3express": - // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-resources-for-iam-policies. - accessPointARN := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition(ctx), - Service: "s3express", - Region: meta.(*conns.AWSClient).Region(ctx), - AccountID: accountID, - Resource: fmt.Sprintf("accesspoint/%s", aws.ToString(output.Name)), - } - - d.Set(names.AttrARN, accessPointARN.String()) + d.Set(names.AttrARN, c.RegionalARNWithAccount(ctx, "s3express", accountID, "accesspoint/"+aws.ToString(output.Name))) d.Set(names.AttrBucket, output.Bucket) default: - return sdkdiag.AppendErrorf(diags, "unknown S3 Access Point service %s", service) + return sdkdiag.AppendErrorf(diags, "unknown S3 Access Point service (%s)", service) } } d.Set(names.AttrAccountID, accountID) d.Set(names.AttrAlias, output.Alias) d.Set("bucket_account_id", output.BucketAccountId) - d.Set(names.AttrDomainName, meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s-%s.s3-accesspoint", aws.ToString(output.Name), accountID))) + d.Set(names.AttrDomainName, c.RegionalHostname(ctx, fmt.Sprintf("%s-%s.s3-accesspoint", aws.ToString(output.Name), accountID))) d.Set(names.AttrEndpoints, output.Endpoints) d.Set(names.AttrName, output.Name) d.Set("network_origin", output.NetworkOrigin) @@ -357,7 +337,7 @@ func resourceAccessPointUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3ControlClient(ctx) - accountID, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := accessPointParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -369,24 +349,24 @@ func resourceAccessPointUpdate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendFromErr(diags, err) } - input := &s3control.PutAccessPointPolicyInput{ + input := s3control.PutAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), Policy: aws.String(policy), } - _, err = conn.PutAccessPointPolicy(ctx, input) + _, err = conn.PutAccessPointPolicy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating S3 Access Point (%s) policy: %s", d.Id(), err) } } else { - input := &s3control.DeleteAccessPointPolicyInput{ + input := s3control.DeleteAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), } - _, err := conn.DeleteAccessPointPolicy(ctx, input) + _, err := conn.DeleteAccessPointPolicy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting S3 Access Point (%s) policy: %s", d.Id(), err) @@ -401,16 +381,17 @@ func resourceAccessPointDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3ControlClient(ctx) - accountID, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := accessPointParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting S3 Access Point: %s", d.Id()) - _, err = conn.DeleteAccessPoint(ctx, &s3control.DeleteAccessPointInput{ + input := s3control.DeleteAccessPointInput{ AccountId: aws.String(accountID), Name: aws.String(name), - }) + } + _, err = conn.DeleteAccessPoint(ctx, &input) if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { return diags @@ -424,12 +405,11 @@ func resourceAccessPointDelete(ctx context.Context, d *schema.ResourceData, meta } func findAccessPointByTwoPartKey(ctx context.Context, conn *s3control.Client, accountID, name string) (*s3control.GetAccessPointOutput, error) { - input := &s3control.GetAccessPointInput{ + input := s3control.GetAccessPointInput{ AccountId: aws.String(accountID), Name: aws.String(name), } - - output, err := conn.GetAccessPoint(ctx, input) + output, err := conn.GetAccessPoint(ctx, &input) if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint) { return nil, &retry.NotFoundError{ @@ -451,7 +431,7 @@ func findAccessPointByTwoPartKey(ctx context.Context, conn *s3control.Client, ac const accessPointResourceIDSeparator = ":" -func AccessPointCreateResourceID(accessPointARN string) (string, error) { +func accessPointCreateResourceID(accessPointARN string) (string, error) { v, err := arn.Parse(accessPointARN) if err != nil { @@ -478,7 +458,7 @@ func AccessPointCreateResourceID(accessPointARN string) (string, error) { } } -func AccessPointParseResourceID(id string) (string, string, error) { +func accessPointParseResourceID(id string) (string, string, error) { if v, err := arn.Parse(id); err == nil { return v.AccountID, id, nil } diff --git a/internal/service/s3control/access_point_data_source.go b/internal/service/s3control/access_point_data_source.go new file mode 100644 index 000000000000..052611507424 --- /dev/null +++ b/internal/service/s3control/access_point_data_source.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3control + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_s3_access_point", name="Access Point") +// @Tags(identifierAttribute="arn") +func newAccessPointDataSource(context.Context) (datasource.DataSourceWithConfigure, error) { + return &accessPointDataSource{}, nil +} + +type accessPointDataSource struct { + framework.DataSourceWithModel[accessPointDataSourceModel] +} + +func (d *accessPointDataSource) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrAccountID: schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + fwvalidators.AWSAccountID(), + }, + }, + names.AttrAlias: schema.StringAttribute{ + Computed: true, + }, + names.AttrARN: schema.StringAttribute{ + Computed: true, + }, + names.AttrBucket: schema.StringAttribute{ + Computed: true, + }, + "bucket_account_id": schema.StringAttribute{ + Computed: true, + }, + "data_source_id": schema.StringAttribute{ + Computed: true, + }, + "data_source_type": schema.StringAttribute{ + Computed: true, + }, + names.AttrEndpoints: schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + Computed: true, + ElementType: types.StringType, + }, + names.AttrName: schema.StringAttribute{ + Required: true, + }, + "network_origin": schema.StringAttribute{ + Computed: true, + }, + "public_access_block_configuration": framework.DataSourceComputedListOfObjectAttribute[publicAccessBlockConfigurationModel](ctx), + names.AttrTags: tftags.TagsAttributeComputedOnly(), + names.AttrVPCConfiguration: framework.DataSourceComputedListOfObjectAttribute[vpcConfigurationModel](ctx), + }, + } +} + +func (d *accessPointDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var data accessPointDataSourceModel + response.Diagnostics.Append(request.Config.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := d.Meta().S3ControlClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.Name) + accountID := fwflex.StringValueFromFramework(ctx, data.AccountID) + if accountID == "" { + accountID = d.Meta().AccountID(ctx) + } + output, err := findAccessPointByTwoPartKey(ctx, conn, accountID, name) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Access Point (%s)", name), err.Error()) + + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + data.AccountID = fwflex.StringValueToFramework(ctx, accountID) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +type accessPointDataSourceModel struct { + framework.WithRegionModel + AccessPointARN types.String `tfsdk:"arn"` + AccountID types.String `tfsdk:"account_id"` + Alias types.String `tfsdk:"alias"` + Bucket types.String `tfsdk:"bucket"` + BucketAccountID types.String `tfsdk:"bucket_account_id"` + DataSourceID types.String `tfsdk:"data_source_id"` + DataSourceType types.String `tfsdk:"data_source_type"` + Endpoints fwtypes.MapOfString `tfsdk:"endpoints"` + Name types.String `tfsdk:"name"` + NetworkOrigin types.String `tfsdk:"network_origin"` + PublicAccessBlockConfiguration fwtypes.ListNestedObjectValueOf[publicAccessBlockConfigurationModel] `tfsdk:"public_access_block_configuration"` + Tags tftags.Map `tfsdk:"tags"` + VPCConfiguration fwtypes.ListNestedObjectValueOf[vpcConfigurationModel] `tfsdk:"vpc_configuration"` +} + +type publicAccessBlockConfigurationModel struct { + BlockPublicACLs types.Bool `tfsdk:"block_public_acls"` + BlockPublicPolicy types.Bool `tfsdk:"block_public_policy"` + IgnorePublicACLs types.Bool `tfsdk:"ignore_public_acls"` + RestrictPublicBuckets types.Bool `tfsdk:"restrict_public_buckets"` +} + +type vpcConfigurationModel struct { + VpcID types.String `tfsdk:"vpc_id"` +} diff --git a/internal/service/s3control/access_point_data_source_test.go b/internal/service/s3control/access_point_data_source_test.go new file mode 100644 index 000000000000..9b8f59c73184 --- /dev/null +++ b/internal/service/s3control/access_point_data_source_test.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3control_test + +import ( + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3ControlAccessPointDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_access_point.test" + dataSourceName := "data.aws_s3_access_point.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccAccessPointDataSourceConfig_basic(bucketName, accessPointName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, names.AttrAccountID, dataSourceName, names.AttrAccountID), + resource.TestCheckResourceAttrPair(resourceName, names.AttrAlias, dataSourceName, names.AttrAlias), + resource.TestCheckResourceAttrPair(resourceName, names.AttrARN, dataSourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, names.AttrBucket, dataSourceName, names.AttrBucket), + resource.TestCheckResourceAttrPair(resourceName, "bucket_account_id", dataSourceName, "bucket_account_id"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsPercent, dataSourceName, acctest.CtTagsPercent), + resource.TestCheckResourceAttrPair(resourceName, "network_origin", dataSourceName, "network_origin"), + ), + }, + }, + }) +} + +func testAccAccessPointDataSourceConfig_basic(bucketName, accessPointName string) string { + return acctest.ConfigCompose(testAccAccessPointConfig_tags1(bucketName, accessPointName, acctest.CtKey1, acctest.CtValue1), ` +data "aws_s3_access_point" "test" { + name = aws_s3_access_point.test.name +} +`) +} diff --git a/internal/service/s3control/access_point_policy.go b/internal/service/s3control/access_point_policy.go index 634f3de63329..5485fce4963d 100644 --- a/internal/service/s3control/access_point_policy.go +++ b/internal/service/s3control/access_point_policy.go @@ -15,9 +15,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" @@ -46,17 +46,7 @@ func resourceAccessPointPolicy() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - names.AttrPolicy: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, - DiffSuppressOnRefresh: true, - StateFunc: func(v any) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, - }, + names.AttrPolicy: sdkv2.IAMPolicyDocumentSchemaRequired(), }, } } @@ -65,12 +55,12 @@ func resourceAccessPointPolicyCreate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3ControlClient(ctx) - resourceID, err := AccessPointCreateResourceID(d.Get("access_point_arn").(string)) + resourceID, err := accessPointCreateResourceID(d.Get("access_point_arn").(string)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - accountID, name, err := AccessPointParseResourceID(resourceID) + accountID, name, err := accessPointParseResourceID(resourceID) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -80,13 +70,13 @@ func resourceAccessPointPolicyCreate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendFromErr(diags, err) } - input := &s3control.PutAccessPointPolicyInput{ + input := s3control.PutAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), Policy: aws.String(policy), } - _, err = conn.PutAccessPointPolicy(ctx, input) + _, err = conn.PutAccessPointPolicy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Access Point (%s) Policy: %s", resourceID, err) @@ -101,7 +91,7 @@ func resourceAccessPointPolicyRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3ControlClient(ctx) - accountID, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := accessPointParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -119,7 +109,6 @@ func resourceAccessPointPolicyRead(ctx context.Context, d *schema.ResourceData, } d.Set("has_public_access_policy", status.IsPublic) - if policy != "" { policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), policy) if err != nil { @@ -138,7 +127,7 @@ func resourceAccessPointPolicyUpdate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3ControlClient(ctx) - accountID, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := accessPointParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -148,13 +137,13 @@ func resourceAccessPointPolicyUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendFromErr(diags, err) } - input := &s3control.PutAccessPointPolicyInput{ + input := s3control.PutAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), Policy: aws.String(policy), } - _, err = conn.PutAccessPointPolicy(ctx, input) + _, err = conn.PutAccessPointPolicy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating S3 Access Point Policy (%s): %s", d.Id(), err) @@ -167,16 +156,17 @@ func resourceAccessPointPolicyDelete(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3ControlClient(ctx) - accountID, name, err := AccessPointParseResourceID(d.Id()) + accountID, name, err := accessPointParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting S3 Access Point Policy: %s", d.Id()) - _, err = conn.DeleteAccessPointPolicy(ctx, &s3control.DeleteAccessPointPolicyInput{ + input := s3control.DeleteAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), - }) + } + _, err = conn.DeleteAccessPointPolicy(ctx, &input) if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { return diags @@ -190,8 +180,7 @@ func resourceAccessPointPolicyDelete(ctx context.Context, d *schema.ResourceData } func resourceAccessPointPolicyImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - resourceID, err := AccessPointCreateResourceID(d.Id()) - + resourceID, err := accessPointCreateResourceID(d.Id()) if err != nil { return nil, err } @@ -203,12 +192,11 @@ func resourceAccessPointPolicyImport(ctx context.Context, d *schema.ResourceData } func findAccessPointPolicyAndStatusByTwoPartKey(ctx context.Context, conn *s3control.Client, accountID, name string) (string, *types.PolicyStatus, error) { - inputGAPP := &s3control.GetAccessPointPolicyInput{ + inputGAPP := s3control.GetAccessPointPolicyInput{ AccountId: aws.String(accountID), Name: aws.String(name), } - - outputGAPP, err := conn.GetAccessPointPolicy(ctx, inputGAPP) + outputGAPP, err := conn.GetAccessPointPolicy(ctx, &inputGAPP) if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { return "", nil, &retry.NotFoundError{ @@ -231,12 +219,11 @@ func findAccessPointPolicyAndStatusByTwoPartKey(ctx context.Context, conn *s3con return "", nil, tfresource.NewEmptyResultError(inputGAPP) } - inputGAPPS := &s3control.GetAccessPointPolicyStatusInput{ + inputGAPPS := s3control.GetAccessPointPolicyStatusInput{ AccountId: aws.String(accountID), Name: aws.String(name), } - - outputGAPPS, err := conn.GetAccessPointPolicyStatus(ctx, inputGAPPS) + outputGAPPS, err := conn.GetAccessPointPolicyStatus(ctx, &inputGAPPS) if tfawserr.ErrCodeEquals(err, errCodeNoSuchAccessPoint, errCodeNoSuchAccessPointPolicy) { return "", nil, &retry.NotFoundError{ diff --git a/internal/service/s3control/access_point_test.go b/internal/service/s3control/access_point_test.go index ab4509db3b35..515fc1183da6 100644 --- a/internal/service/s3control/access_point_test.go +++ b/internal/service/s3control/access_point_test.go @@ -14,6 +14,7 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" @@ -40,7 +41,7 @@ func TestAccS3ControlAccessPoint_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAccessPointConfig_basic(bucketName, accessPointName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAccessPointExists(ctx, resourceName, &v), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrAccountID), // https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points-alias.html: @@ -59,6 +60,7 @@ func TestAccS3ControlAccessPoint_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.block_public_policy", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.ignore_public_acls", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "public_access_block_configuration.0.restrict_public_buckets", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", "0"), ), }, @@ -91,6 +93,11 @@ func TestAccS3ControlAccessPoint_disappears(t *testing.T) { acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3control.ResourceAccessPoint(), resourceName), ), ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, }, }) @@ -359,6 +366,53 @@ func TestAccS3ControlAccessPoint_directoryBucket_basic(t *testing.T) { }) } +func TestAccS3ControlAccessPoint_tags(t *testing.T) { + ctx := acctest.Context(t) + var v s3control.GetAccessPointOutput + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + accessPointName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_access_point.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAccessPointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAccessPointConfig_tags1(bucketName, accessPointName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessPointConfig_tags2(bucketName, accessPointName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccAccessPointConfig_tags1(bucketName, accessPointName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccessPointExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCheckAccessPointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3ControlClient(ctx) @@ -440,7 +494,7 @@ func testAccCheckAccessPointHasPolicy(ctx context.Context, n string, fn func() s equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) + return fmt.Errorf("Error testing policy equivalence: %w", err) } if !equivalent { return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", @@ -676,3 +730,38 @@ resource "aws_s3_access_point" "test" { } `, rName)) } + +func testAccAccessPointConfig_tags1(bucketName, accessPointName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[2]q + + tags = { + %[3]q = %[4]q + } +} +`, bucketName, accessPointName, tagKey1, tagValue1) +} + +func testAccAccessPointConfig_tags2(bucketName, accessPointName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_access_point" "test" { + bucket = aws_s3_bucket.test.bucket + name = %[2]q + + tags = { + %[3]q = %[4]q + %[5]q = %[6]q + } +} +`, bucketName, accessPointName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/internal/service/s3control/account_public_access_block.go b/internal/service/s3control/account_public_access_block.go index 808fefadc692..782674ae3241 100644 --- a/internal/service/s3control/account_public_access_block.go +++ b/internal/service/s3control/account_public_access_block.go @@ -96,7 +96,7 @@ func resourceAccountPublicAccessBlockCreate(ctx context.Context, d *schema.Resou d.SetId(accountID) - _, err = tfresource.RetryWhenNotFound(ctx, s3PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, s3PropagationTimeout, func(ctx context.Context) (any, error) { return findPublicAccessBlockByAccountID(ctx, conn, d.Id()) }) @@ -177,7 +177,7 @@ func resourceAccountPublicAccessBlockDelete(ctx context.Context, d *schema.Resou return sdkdiag.AppendErrorf(diags, "deleting S3 Account Public Access Block (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, s3PropagationTimeout, func(ctx context.Context) (any, error) { return findPublicAccessBlockByAccountID(ctx, conn, d.Id()) }) diff --git a/internal/service/s3control/account_public_access_block_identity_gen_test.go b/internal/service/s3control/account_public_access_block_identity_gen_test.go index 83afa58307da..1bd617e6966c 100644 --- a/internal/service/s3control/account_public_access_block_identity_gen_test.go +++ b/internal/service/s3control/account_public_access_block_identity_gen_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -22,8 +23,9 @@ func testAccS3ControlAccountPublicAccessBlock_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccS3ControlAccountPublicAccessBlock_Identity_Basic, - "ExistingResource": testAccS3ControlAccountPublicAccessBlock_Identity_ExistingResource, + acctest.CtBasic: testAccS3ControlAccountPublicAccessBlock_Identity_Basic, + "ExistingResource": testAccS3ControlAccountPublicAccessBlock_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccS3ControlAccountPublicAccessBlock_Identity_ExistingResource_NoRefresh_NoChange, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -35,7 +37,7 @@ func testAccS3ControlAccountPublicAccessBlock_Identity_Basic(t *testing.T) { var v awstypes.PublicAccessBlockConfiguration resourceName := "aws_s3_account_public_access_block.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -99,3 +101,118 @@ func testAccS3ControlAccountPublicAccessBlock_Identity_Basic(t *testing.T) { }, }) } + +func testAccS3ControlAccountPublicAccessBlock_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.PublicAccessBlockConfiguration + resourceName := "aws_s3_account_public_access_block.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + CheckDestroy: testAccCheckAccountPublicAccessBlockDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AccountPublicAccessBlock/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/AccountPublicAccessBlock/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AccountPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + }), + }, + }, + }, + }) +} + +func testAccS3ControlAccountPublicAccessBlock_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.PublicAccessBlockConfiguration + resourceName := "aws_s3_account_public_access_block.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + CheckDestroy: testAccCheckAccountPublicAccessBlockDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AccountPublicAccessBlock/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AccountPublicAccessBlock/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/s3control/account_public_access_block_test.go b/internal/service/s3control/account_public_access_block_test.go index 0ef4c936e5c9..6de95c4c5147 100644 --- a/internal/service/s3control/account_public_access_block_test.go +++ b/internal/service/s3control/account_public_access_block_test.go @@ -11,14 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3control/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -79,83 +73,6 @@ func testAccAccountPublicAccessBlock_basic(t *testing.T) { }) } -func testAccS3ControlAccountPublicAccessBlock_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v types.PublicAccessBlockConfiguration - resourceName := "aws_s3_account_public_access_block.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.Route53ServiceID), - CheckDestroy: testAccCheckAccountPublicAccessBlockDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccAccountPublicAccessBlockConfig_basic(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccAccountPublicAccessBlockConfig_basic(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccAccountPublicAccessBlockConfig_basic(), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAccountPublicAccessBlockExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - }), - }, - }, - }, - }) -} - func testAccAccountPublicAccessBlock_disappears(t *testing.T) { ctx := acctest.Context(t) var v types.PublicAccessBlockConfiguration diff --git a/internal/service/s3control/bucket.go b/internal/service/s3control/bucket.go index ef66af58e1d6..15c1739b76b8 100644 --- a/internal/service/s3control/bucket.go +++ b/internal/service/s3control/bucket.go @@ -5,7 +5,6 @@ package s3control import ( "context" - "fmt" "log" "strings" "time" @@ -14,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/s3control" - "github.com/aws/aws-sdk-go-v2/service/s3control/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -34,6 +32,9 @@ const ( // @SDKResource("aws_s3control_bucket", name="Bucket") // @Tags +// @ArnIdentity +// @Testing(preIdentityVersion="v6.14.1") +// @Testing(preCheck="acctest.PreCheckOutpostsOutposts") func resourceBucket() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBucketCreate, @@ -41,10 +42,6 @@ func resourceBucket() *schema.Resource { UpdateWithoutTimeout: resourceBucketUpdate, DeleteWithoutTimeout: resourceBucketDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -99,10 +96,8 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta any) d.SetId(aws.ToString(output.BucketArn)) - if tags := keyValueTagsFromS3Tags(ctx, getS3TagsIn(ctx)); len(tags) > 0 { - if err := bucketUpdateTags(ctx, conn, d.Id(), nil, tags); err != nil { - return sdkdiag.AppendErrorf(diags, "adding S3 Control Bucket (%s) tags: %s", d.Id(), err) - } + if err := bucketCreateTags(ctx, conn, d.Id(), getS3TagsIn(ctx)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting S3 Control Bucket (%s) tags: %s", d.Id(), err) } return append(diags, resourceBucketRead(ctx, d, meta)...) @@ -190,7 +185,7 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta any) // can occur on deletion: // InvalidBucketState: Bucket is in an invalid state log.Printf("[DEBUG] Deleting S3 Control Bucket: %s", d.Id()) - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketStatePropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketStatePropagationTimeout, func(ctx context.Context) (any, error) { return conn.DeleteBucket(ctx, input) }, errCodeInvalidBucketState) @@ -230,83 +225,3 @@ func findBucketByTwoPartKey(ctx context.Context, conn *s3control.Client, account return output, nil } - -// Custom S3control tagging functions using similar formatting as other service generated code. - -// bucketListTags lists S3control bucket tags. -// The identifier is the bucket ARN. -func bucketListTags(ctx context.Context, conn *s3control.Client, identifier string) (tftags.KeyValueTags, error) { - parsedArn, err := arn.Parse(identifier) - - if err != nil { - return tftags.New(ctx, nil), err - } - - input := &s3control.GetBucketTaggingInput{ - AccountId: aws.String(parsedArn.AccountID), - Bucket: aws.String(identifier), - } - - output, err := conn.GetBucketTagging(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet) { - return tftags.New(ctx, nil), nil - } - - if err != nil { - return tftags.New(ctx, nil), err - } - - return keyValueTagsFromS3Tags(ctx, output.TagSet), nil -} - -// bucketUpdateTags updates S3control bucket tags. -// The identifier is the bucket ARN. -func bucketUpdateTags(ctx context.Context, conn *s3control.Client, identifier string, oldTagsMap, newTagsMap any) error { - parsedArn, err := arn.Parse(identifier) - - if err != nil { - return err - } - - oldTags := tftags.New(ctx, oldTagsMap) - newTags := tftags.New(ctx, newTagsMap) - - // We need to also consider any existing ignored tags. - allTags, err := bucketListTags(ctx, conn, identifier) - - if err != nil { - return fmt.Errorf("listing resource tags (%s): %w", identifier, err) - } - - ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) - - if len(newTags)+len(ignoredTags) > 0 { - input := &s3control.PutBucketTaggingInput{ - AccountId: aws.String(parsedArn.AccountID), - Bucket: aws.String(identifier), - Tagging: &types.Tagging{ - TagSet: svcS3Tags(newTags.Merge(ignoredTags)), - }, - } - - _, err := conn.PutBucketTagging(ctx, input) - - if err != nil { - return fmt.Errorf("setting resource tags (%s): %s", identifier, err) - } - } else if len(oldTags) > 0 && len(ignoredTags) == 0 { - input := &s3control.DeleteBucketTaggingInput{ - AccountId: aws.String(parsedArn.AccountID), - Bucket: aws.String(identifier), - } - - _, err := conn.DeleteBucketTagging(ctx, input) - - if err != nil { - return fmt.Errorf("deleting resource tags (%s): %s", identifier, err) - } - } - - return nil -} diff --git a/internal/service/s3control/bucket_identity_gen_test.go b/internal/service/s3control/bucket_identity_gen_test.go new file mode 100644 index 000000000000..204b902541e7 --- /dev/null +++ b/internal/service/s3control/bucket_identity_gen_test.go @@ -0,0 +1,349 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package s3control_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3ControlBucket_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3control_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOutpostsOutposts(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + CheckDestroy: testAccCheckBucketDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccS3ControlBucket_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3control_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOutpostsOutposts(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.14.1 +func TestAccS3ControlBucket_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3control_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOutpostsOutposts(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + CheckDestroy: testAccCheckBucketDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic_v6.14.1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.14.1 +func TestAccS3ControlBucket_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_s3control_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckOutpostsOutposts(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ControlServiceID), + CheckDestroy: testAccCheckBucketDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic_v6.14.1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Bucket/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/s3control/exports_test.go b/internal/service/s3control/exports_test.go index f2e053190213..2e6d46bda9d5 100644 --- a/internal/service/s3control/exports_test.go +++ b/internal/service/s3control/exports_test.go @@ -23,7 +23,7 @@ var ( ResourceStorageLensConfiguration = resourceStorageLensConfiguration FindAccessGrantByTwoPartKey = findAccessGrantByTwoPartKey - FindAccessGrantsInstance = findAccessGrantsInstance + FindAccessGrantsInstanceByID = findAccessGrantsInstanceByID FindAccessGrantsInstanceResourcePolicy = findAccessGrantsInstanceResourcePolicy FindAccessGrantsLocationByTwoPartKey = findAccessGrantsLocationByTwoPartKey FindAccessPointByTwoPartKey = findAccessPointByTwoPartKey @@ -39,4 +39,6 @@ var ( FindObjectLambdaAccessPointPolicyAndStatusByTwoPartKey = findObjectLambdaAccessPointPolicyAndStatusByTwoPartKey FindPublicAccessBlockByAccountID = findPublicAccessBlockByAccountID FindStorageLensConfigurationByAccountIDAndConfigID = findStorageLensConfigurationByAccountIDAndConfigID + + AccessPointParseResourceID = accessPointParseResourceID ) diff --git a/internal/service/s3control/generate.go b/internal/service/s3control/generate.go index 9a446be97f0d..1124e7d7c418 100644 --- a/internal/service/s3control/generate.go +++ b/internal/service/s3control/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -TagResTypeElem=AccountId -UpdateTags +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -TagResTypeIsAccountID -TagResTypeElem=AccountId -UpdateTags //go:generate go run ../../generate/tags/main.go -ServiceTagsSlice -TagsFunc=svcS3Tags -KeyValueTagsFunc=keyValueTagsFromS3Tags -GetTagsInFunc=getS3TagsIn -SetTagsOutFunc=setS3TagsOut -TagType=S3Tag -- s3_tags_gen.go //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/identitytests/main.go diff --git a/internal/service/s3control/s3_tags.go b/internal/service/s3control/s3_tags.go new file mode 100644 index 000000000000..4cf97ab6f449 --- /dev/null +++ b/internal/service/s3control/s3_tags.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3control + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/s3control" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3control/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" +) + +// Custom S3 tag functions using the same format as generated code. + +func bucketCreateTags(ctx context.Context, conn *s3control.Client, identifier string, tags []awstypes.S3Tag) error { + if len(tags) == 0 { + return nil + } + + return bucketUpdateTags(ctx, conn, identifier, nil, keyValueTagsFromS3Tags(ctx, tags)) +} + +// bucketListTags lists S3control bucket tags. +// The identifier is the bucket ARN. +func bucketListTags(ctx context.Context, conn *s3control.Client, identifier string, optFns ...func(*s3control.Options)) (tftags.KeyValueTags, error) { + parsedArn, err := arn.Parse(identifier) + + if err != nil { + return tftags.New(ctx, nil), err + } + + input := s3control.GetBucketTaggingInput{ + AccountId: aws.String(parsedArn.AccountID), + Bucket: aws.String(identifier), + } + + output, err := conn.GetBucketTagging(ctx, &input, optFns...) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet) { + return tftags.New(ctx, nil), nil + } + + if err != nil { + return tftags.New(ctx, nil), err + } + + return keyValueTagsFromS3Tags(ctx, output.TagSet), nil +} + +// bucketUpdateTags updates S3control bucket tags. +// The identifier is the bucket ARN. +func bucketUpdateTags(ctx context.Context, conn *s3control.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*s3control.Options)) error { + parsedArn, err := arn.Parse(identifier) + + if err != nil { + return err + } + + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + // We need to also consider any existing ignored tags. + allTags, err := bucketListTags(ctx, conn, identifier) + + if err != nil { + return fmt.Errorf("listing resource tags (%s): %w", identifier, err) + } + + ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) + + if len(newTags)+len(ignoredTags) > 0 { + input := s3control.PutBucketTaggingInput{ + AccountId: aws.String(parsedArn.AccountID), + Bucket: aws.String(identifier), + Tagging: &awstypes.Tagging{ + TagSet: svcS3Tags(newTags.Merge(ignoredTags)), + }, + } + + _, err := conn.PutBucketTagging(ctx, &input, optFns...) + + if err != nil { + return fmt.Errorf("setting resource tags (%s): %w", identifier, err) + } + } else if len(oldTags) > 0 && len(ignoredTags) == 0 { + input := s3control.DeleteBucketTaggingInput{ + AccountId: aws.String(parsedArn.AccountID), + Bucket: aws.String(identifier), + } + + _, err := conn.DeleteBucketTagging(ctx, &input, optFns...) + + if err != nil { + return fmt.Errorf("deleting resource tags (%s): %w", identifier, err) + } + } + + return nil +} diff --git a/internal/service/s3control/service_endpoint_resolver_gen.go b/internal/service/s3control/service_endpoint_resolver_gen.go index 29593cd2fde3..f8cabada3ce8 100644 --- a/internal/service/s3control/service_endpoint_resolver_gen.go +++ b/internal/service/s3control/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params s3control.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up s3control endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up s3control endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/s3control/service_package_gen.go b/internal/service/s3control/service_package_gen.go index 5533d06a0115..afef12c76f43 100644 --- a/internal/service/s3control/service_package_gen.go +++ b/internal/service/s3control/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/s3control" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,7 +18,17 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { - return []*inttypes.ServicePackageFrameworkDataSource{} + return []*inttypes.ServicePackageFrameworkDataSource{ + { + Factory: newAccessPointDataSource, + TypeName: "aws_s3_access_point", + Name: "Access Point", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { @@ -28,15 +37,19 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Factory: newAccessGrantResource, TypeName: "aws_s3control_access_grant", Name: "Access Grant", - Tags: unique.Make(inttypes.ServicePackageResourceTags{}), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "access_grant_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), }, { Factory: newAccessGrantsInstanceResource, TypeName: "aws_s3control_access_grants_instance", Name: "Access Grants Instance", - Tags: unique.Make(inttypes.ServicePackageResourceTags{}), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "access_grants_instance_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), }, { Factory: newAccessGrantsInstanceResourcePolicyResource, @@ -48,8 +61,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Factory: newAccessGrantsLocationResource, TypeName: "aws_s3control_access_grants_location", Name: "Access Grants Location", - Tags: unique.Make(inttypes.ServicePackageResourceTags{}), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "access_grants_location_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), }, { Factory: newDirectoryBucketAccessPointScopeResource, @@ -83,7 +98,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Factory: resourceAccessPoint, TypeName: "aws_s3_access_point", Name: "Access Point", - Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), }, { Factory: resourceAccountPublicAccessBlock, @@ -109,6 +127,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Name: "Bucket", Tags: unique.Make(inttypes.ServicePackageResourceTags{}), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceBucketLifecycleConfiguration, @@ -179,7 +203,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *s3control.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/s3control/storage_lens_configuration.go b/internal/service/s3control/storage_lens_configuration.go index 236c93f9b7cd..d19af7076f76 100644 --- a/internal/service/s3control/storage_lens_configuration.go +++ b/internal/service/s3control/storage_lens_configuration.go @@ -620,7 +620,7 @@ func storageLensConfigurationUpdateTags(ctx context.Context, conn *s3control.Cli allTags, err := storageLensConfigurationListTags(ctx, conn, accountID, configID) if err != nil { - return fmt.Errorf("listing tags: %s", err) + return fmt.Errorf("listing tags: %w", err) } ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) @@ -635,7 +635,7 @@ func storageLensConfigurationUpdateTags(ctx context.Context, conn *s3control.Cli _, err := conn.PutStorageLensConfigurationTagging(ctx, input) if err != nil { - return fmt.Errorf("setting tags: %s", err) + return fmt.Errorf("setting tags: %w", err) } } else if len(oldTags) > 0 && len(ignoredTags) == 0 { input := &s3control.DeleteStorageLensConfigurationTaggingInput{ @@ -646,7 +646,7 @@ func storageLensConfigurationUpdateTags(ctx context.Context, conn *s3control.Cli _, err := conn.DeleteStorageLensConfigurationTagging(ctx, input) if err != nil { - return fmt.Errorf("deleting tags: %s", err) + return fmt.Errorf("deleting tags: %w", err) } } diff --git a/internal/service/s3control/sweep.go b/internal/service/s3control/sweep.go index d90419509dd5..91556fd1e10d 100644 --- a/internal/service/s3control/sweep.go +++ b/internal/service/s3control/sweep.go @@ -68,7 +68,7 @@ func sweepAccessGrants(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) @@ -110,7 +110,7 @@ func sweepAccessGrantsInstances(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) @@ -152,7 +152,7 @@ func sweepAccessGrantsLocations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) @@ -194,7 +194,7 @@ func sweepAccessPoints(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) @@ -218,7 +218,7 @@ func sweepAccessPoints(region string) error { for _, v := range page.AccessPointList { arn := aws.ToString(v.AccessPointArn) - id, err := AccessPointCreateResourceID(arn) + id, err := accessPointCreateResourceID(arn) if err != nil { log.Printf("[WARN] S3 Access Point (%s): %s", arn, err) continue @@ -249,7 +249,7 @@ func sweepMultiRegionAccessPoints(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) @@ -293,7 +293,7 @@ func sweepObjectLambdaAccessPoints(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) @@ -341,7 +341,7 @@ func sweepStorageLensConfigurations(region string) error { } client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.S3ControlClient(ctx) accountID := client.AccountID(ctx) diff --git a/internal/service/s3control/tags_gen.go b/internal/service/s3control/tags_gen.go index 28ef2bcfafd4..0fa3e0b2d292 100644 --- a/internal/service/s3control/tags_gen.go +++ b/internal/service/s3control/tags_gen.go @@ -3,8 +3,8 @@ package s3control import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3control" awstypes "github.com/aws/aws-sdk-go-v2/service/s3control/types" @@ -28,7 +28,7 @@ func listTags(ctx context.Context, conn *s3control.Client, identifier, resourceT output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -36,11 +36,12 @@ func listTags(ctx context.Context, conn *s3control.Client, identifier, resourceT // ListTags lists s3control service tags and set them in Context. // It is called from outside this package. -func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, resourceType string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).S3ControlClient(ctx), identifier, resourceType) +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + c := meta.(*conns.AWSClient) + tags, err := listTags(ctx, c.S3ControlClient(ctx), identifier, c.AccountID(ctx)) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -119,7 +120,7 @@ func updateTags(ctx context.Context, conn *s3control.Client, identifier, resourc _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -135,7 +136,7 @@ func updateTags(ctx context.Context, conn *s3control.Client, identifier, resourc _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -144,6 +145,7 @@ func updateTags(ctx context.Context, conn *s3control.Client, identifier, resourc // UpdateTags updates s3control service tags. // It is called from outside this package. -func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier, resourceType string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).S3ControlClient(ctx), identifier, resourceType, oldTags, newTags) +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + c := meta.(*conns.AWSClient) + return updateTags(ctx, c.S3ControlClient(ctx), identifier, c.AccountID(ctx), oldTags, newTags) } diff --git a/internal/service/s3control/testdata/AccountPublicAccessBlock/basic_v5.100.0/main_gen.tf b/internal/service/s3control/testdata/AccountPublicAccessBlock/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..56cc28d5fb71 --- /dev/null +++ b/internal/service/s3control/testdata/AccountPublicAccessBlock/basic_v5.100.0/main_gen.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_account_public_access_block" "test" {} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3control/testdata/AccountPublicAccessBlock/basic_v6.0.0/main_gen.tf b/internal/service/s3control/testdata/AccountPublicAccessBlock/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..d22b1d9f7a89 --- /dev/null +++ b/internal/service/s3control/testdata/AccountPublicAccessBlock/basic_v6.0.0/main_gen.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3_account_public_access_block" "test" {} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3control/testdata/Bucket/basic/main_gen.tf b/internal/service/s3control/testdata/Bucket/basic/main_gen.tf new file mode 100644 index 000000000000..77ac4cfbab81 --- /dev/null +++ b/internal/service/s3control/testdata/Bucket/basic/main_gen.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3control_bucket" "test" { + bucket = var.rName + outpost_id = data.aws_outposts_outpost.test.id +} + +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/s3control/testdata/Bucket/basic_v6.14.1/main_gen.tf b/internal/service/s3control/testdata/Bucket/basic_v6.14.1/main_gen.tf new file mode 100644 index 000000000000..06ddeff98ffa --- /dev/null +++ b/internal/service/s3control/testdata/Bucket/basic_v6.14.1/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3control_bucket" "test" { + bucket = var.rName + outpost_id = data.aws_outposts_outpost.test.id +} + +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.14.1" + } + } +} + +provider "aws" {} diff --git a/internal/service/s3control/testdata/Bucket/region_override/main_gen.tf b/internal/service/s3control/testdata/Bucket/region_override/main_gen.tf new file mode 100644 index 000000000000..f0459ad6c1a1 --- /dev/null +++ b/internal/service/s3control/testdata/Bucket/region_override/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_s3control_bucket" "test" { + region = var.region + + bucket = var.rName + outpost_id = data.aws_outposts_outpost.test.id +} + +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/s3control/testdata/tmpl/bucket_tags.gtpl b/internal/service/s3control/testdata/tmpl/bucket_tags.gtpl new file mode 100644 index 000000000000..f7a2d400688c --- /dev/null +++ b/internal/service/s3control/testdata/tmpl/bucket_tags.gtpl @@ -0,0 +1,12 @@ +resource "aws_s3control_bucket" "test" { +{{- template "region" }} + bucket = var.rName + outpost_id = data.aws_outposts_outpost.test.id +{{- template "tags" }} +} + +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} \ No newline at end of file diff --git a/internal/service/s3outposts/endpoint.go b/internal/service/s3outposts/endpoint.go index 0d2d8837a507..4635ac2a78ec 100644 --- a/internal/service/s3outposts/endpoint.go +++ b/internal/service/s3outposts/endpoint.go @@ -15,13 +15,13 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3outposts" awstypes "github.com/aws/aws-sdk-go-v2/service/s3outposts/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -136,7 +136,7 @@ func resourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta any) endpoint, err := findEndpointByARN(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { + if !d.IsNewResource() && retry.NotFound(err) { log.Printf("[WARN] S3 Outposts Endpoint %s not found, removing from state", d.Id()) d.SetId("") return diags @@ -236,8 +236,7 @@ func findEndpoints(ctx context.Context, conn *s3outposts.Client, input *s3outpos if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + LastError: err, } } @@ -251,8 +250,8 @@ func findEndpoints(ctx context.Context, conn *s3outposts.Client, input *s3outpos return output, nil } -func statusEndpoint(ctx context.Context, conn *s3outposts.Client, arn string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusEndpoint(conn *s3outposts.Client, arn string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { output, err := findEndpointByARN(ctx, conn, arn) if tfresource.NotFound(err) { @@ -274,7 +273,7 @@ func waitEndpointStatusCreated(ctx context.Context, conn *s3outposts.Client, arn stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.EndpointStatusPending), Target: enum.Slice(awstypes.EndpointStatusAvailable), - Refresh: statusEndpoint(ctx, conn, arn), + Refresh: statusEndpoint(conn, arn), Timeout: timeout, } @@ -282,7 +281,7 @@ func waitEndpointStatusCreated(ctx context.Context, conn *s3outposts.Client, arn if output, ok := outputRaw.(*awstypes.Endpoint); ok { if failedReason := output.FailedReason; failedReason != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(failedReason.ErrorCode), aws.ToString(failedReason.Message))) + retry.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(failedReason.ErrorCode), aws.ToString(failedReason.Message))) } return output, err diff --git a/internal/service/s3outposts/endpoint_test.go b/internal/service/s3outposts/endpoint_test.go index 9f47d083eefa..9e71eb78d97d 100644 --- a/internal/service/s3outposts/endpoint_test.go +++ b/internal/service/s3outposts/endpoint_test.go @@ -13,28 +13,27 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tfs3outposts "github.com/hashicorp/terraform-provider-aws/internal/service/s3outposts" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccS3OutpostsEndpoint_basic(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_s3outposts_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rInt := sdkacctest.RandIntRange(0, 255) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3OutpostsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEndpointDestroy(ctx), + CheckDestroy: testAccCheckEndpointDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEndpointConfig_basic(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckEndpointExists(ctx, resourceName), + testAccCheckEndpointExists(ctx, t, resourceName), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3-outposts", regexache.MustCompile(`outpost/[^/]+/endpoint/[0-9a-z]+`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationTime), resource.TestCheckResourceAttrPair(resourceName, names.AttrCIDRBlock, "aws_vpc.test", names.AttrCIDRBlock), @@ -58,19 +57,19 @@ func TestAccS3OutpostsEndpoint_basic(t *testing.T) { func TestAccS3OutpostsEndpoint_private(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_s3outposts_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rInt := sdkacctest.RandIntRange(0, 255) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3OutpostsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEndpointDestroy(ctx), + CheckDestroy: testAccCheckEndpointDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEndpointConfig_private(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckEndpointExists(ctx, resourceName), + testAccCheckEndpointExists(ctx, t, resourceName), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3-outposts", regexache.MustCompile(`outpost/[^/]+/endpoint/[0-9a-z]+`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationTime), resource.TestCheckResourceAttrPair(resourceName, names.AttrCIDRBlock, "aws_vpc.test", names.AttrCIDRBlock), @@ -94,19 +93,19 @@ func TestAccS3OutpostsEndpoint_private(t *testing.T) { func TestAccS3OutpostsEndpoint_customerOwnedIPv4Pool(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_s3outposts_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rInt := sdkacctest.RandIntRange(0, 255) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3OutpostsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEndpointDestroy(ctx), + CheckDestroy: testAccCheckEndpointDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEndpointConfig_customerOwnedIPv4Pool(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckEndpointExists(ctx, resourceName), + testAccCheckEndpointExists(ctx, t, resourceName), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3-outposts", regexache.MustCompile(`outpost/[^/]+/endpoint/[0-9a-z]+`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationTime), resource.TestCheckResourceAttrPair(resourceName, names.AttrCIDRBlock, "aws_vpc.test", names.AttrCIDRBlock), @@ -131,19 +130,19 @@ func TestAccS3OutpostsEndpoint_customerOwnedIPv4Pool(t *testing.T) { func TestAccS3OutpostsEndpoint_disappears(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_s3outposts_endpoint.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) rInt := sdkacctest.RandIntRange(0, 255) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3OutpostsServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEndpointDestroy(ctx), + CheckDestroy: testAccCheckEndpointDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccEndpointConfig_basic(rName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckEndpointExists(ctx, resourceName), + testAccCheckEndpointExists(ctx, t, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3outposts.ResourceEndpoint(), resourceName), ), ExpectNonEmptyPlan: true, @@ -152,9 +151,9 @@ func TestAccS3OutpostsEndpoint_disappears(t *testing.T) { }) } -func testAccCheckEndpointDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckEndpointDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3OutpostsClient(ctx) + conn := acctest.ProviderMeta(ctx, t).S3OutpostsClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3outposts_endpoint" { @@ -163,7 +162,7 @@ func testAccCheckEndpointDestroy(ctx context.Context) resource.TestCheckFunc { _, err := tfs3outposts.FindEndpointByARN(ctx, conn, rs.Primary.ID) - if tfresource.NotFound(err) { + if retry.NotFound(err) { continue } @@ -178,7 +177,7 @@ func testAccCheckEndpointDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckEndpointExists(ctx context.Context, n string) resource.TestCheckFunc { +func testAccCheckEndpointExists(ctx context.Context, t *testing.T, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -188,7 +187,7 @@ func testAccCheckEndpointExists(ctx context.Context, n string) resource.TestChec return fmt.Errorf("No S3 Outposts Endpoint ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3OutpostsClient(ctx) + conn := acctest.ProviderMeta(ctx, t).S3OutpostsClient(ctx) _, err := tfs3outposts.FindEndpointByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/s3outposts/service_endpoint_resolver_gen.go b/internal/service/s3outposts/service_endpoint_resolver_gen.go index f457b581e30a..346ca329bd22 100644 --- a/internal/service/s3outposts/service_endpoint_resolver_gen.go +++ b/internal/service/s3outposts/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params s3outposts.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up s3outposts endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up s3outposts endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/s3outposts/service_endpoints_gen_test.go b/internal/service/s3outposts/service_endpoints_gen_test.go index 7bf7c0cd9a2a..2ba1a8a5cb95 100644 --- a/internal/service/s3outposts/service_endpoints_gen_test.go +++ b/internal/service/s3outposts/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/s3outposts/service_package_gen.go b/internal/service/s3outposts/service_package_gen.go index 69aaf7382484..c95d9137063d 100644 --- a/internal/service/s3outposts/service_package_gen.go +++ b/internal/service/s3outposts/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/s3outposts" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -64,7 +63,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *s3outposts.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/s3tables/exports_test.go b/internal/service/s3tables/exports_test.go index d50b5c17dc29..087f059833b2 100644 --- a/internal/service/s3tables/exports_test.go +++ b/internal/service/s3tables/exports_test.go @@ -4,28 +4,21 @@ package s3tables var ( - NewResourceNamespace = newNamespaceResource - NewResourceTable = newTableResource - NewResourceTableBucket = newTableBucketResource - NewResourceTableBucketPolicy = newTableBucketPolicyResource - ResourceTablePolicy = newTablePolicyResource + ResourceNamespace = newNamespaceResource + ResourceTable = newTableResource + ResourceTableBucket = newTableBucketResource + ResourceTableBucketPolicy = newTableBucketPolicyResource + ResourceTablePolicy = newTablePolicyResource - FindNamespace = findNamespace - FindTable = findTable - FindTableBucket = findTableBucket - FindTableBucketPolicy = findTableBucketPolicy - FindTablePolicy = findTablePolicy + FindNamespaceByTwoPartKey = findNamespaceByTwoPartKey + FindTableByThreePartKey = findTableByThreePartKey + FindTableBucketByARN = findTableBucketByARN + FindTableBucketPolicyByARN = findTableBucketPolicyByARN + FindTablePolicyByThreePartKey = findTablePolicyByThreePartKey TableIDFromTableARN = tableIDFromTableARN ) -const ( - ResNameNamespace = resNameNamespace - ResNameTableBucket = resNameTableBucket - - NamespaceIDSeparator = namespaceIDSeparator -) - type ( TableIdentifier = tableIdentifier ) diff --git a/internal/service/s3tables/namespace.go b/internal/service/s3tables/namespace.go index 4f539b6f317f..b46d4ccfd8c8 100644 --- a/internal/service/s3tables/namespace.go +++ b/internal/service/s3tables/namespace.go @@ -24,10 +24,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -38,17 +38,13 @@ func newNamespaceResource(_ context.Context) (resource.ResourceWithConfigure, er return &namespaceResource{}, nil } -const ( - resNameNamespace = "Namespace" -) - type namespaceResource struct { framework.ResourceWithModel[namespaceResourceModel] framework.WithNoUpdate } -func (r *namespaceResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *namespaceResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrCreatedAt: schema.StringAttribute{ CustomType: timetypes.RFC3339Type{}, @@ -87,151 +83,146 @@ func (r *namespaceResource) Schema(ctx context.Context, req resource.SchemaReque } } -func (r *namespaceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan namespaceResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *namespaceResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data namespaceResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - var input s3tables.CreateNamespaceInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { - return + conn := r.Meta().S3TablesClient(ctx) + + namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) + input := s3tables.CreateNamespaceInput{ + Namespace: []string{namespace}, + TableBucketARN: aws.String(tableBucketARN), } - input.Namespace = []string{plan.Namespace.ValueString()} - out, err := conn.CreateNamespace(ctx, &input) + _, err := conn.CreateNamespace(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), err), - err.Error(), - ) - return - } - if out == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), nil), - errors.New("empty output").Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Tables Namespace (%s)", namespace), err.Error()) + return } - namespace, err := findNamespace(ctx, conn, plan.TableBucketARN.ValueString(), out.Namespace[0]) + output, err := findNamespaceByTwoPartKey(ctx, conn, tableBucketARN, namespace) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameNamespace, plan.Namespace.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Namespace (%s)", namespace), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, namespace, &plan)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { return } - plan.Namespace = types.StringValue(out.Namespace[0]) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *namespaceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state namespaceResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *namespaceResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data namespaceResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findNamespace(ctx, conn, state.TableBucketARN.ValueString(), state.Namespace.ValueString()) + conn := r.Meta().S3TablesClient(ctx) + + namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) + output, err := findNamespaceByTwoPartKey(ctx, conn, tableBucketARN, namespace) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameNamespace, state.Namespace.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Namespace (%s)", namespace), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *namespaceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state namespaceResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *namespaceResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data namespaceResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) input := s3tables.DeleteNamespaceInput{ - Namespace: state.Namespace.ValueStringPointer(), - TableBucketARN: state.TableBucketARN.ValueStringPointer(), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), } - _, err := conn.DeleteNamespace(ctx, &input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return - } + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Tables Namespace (%s)", namespace), err.Error()) - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, resNameNamespace, state.Namespace.String(), err), - err.Error(), - ) return } } -func (r *namespaceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - identifier, err := parseNamespaceIdentifier(req.ID) +func (r *namespaceResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + identifier, err := parseNamespaceIdentifier(request.ID) if err != nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( "Invalid Import ID", "Import IDs for S3 Tables Namespaces must use the format "+namespaceIDSeparator+".\n"+ - fmt.Sprintf("Had %q", req.ID), + fmt.Sprintf("Had %q", request.ID), ) return } - identifier.PopulateState(ctx, &resp.State, &resp.Diagnostics) + identifier.PopulateState(ctx, &response.State, &response.Diagnostics) } -func findNamespace(ctx context.Context, conn *s3tables.Client, bucketARN, name string) (*s3tables.GetNamespaceOutput, error) { - in := s3tables.GetNamespaceInput{ - Namespace: aws.String(name), - TableBucketARN: aws.String(bucketARN), +func findNamespaceByTwoPartKey(ctx context.Context, conn *s3tables.Client, tableBucketARN, namespace string) (*s3tables.GetNamespaceOutput, error) { + input := s3tables.GetNamespaceInput{ + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), } - out, err := conn.GetNamespace(ctx, &in) - if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + return findNamespace(ctx, conn, &input) +} + +func findNamespace(ctx context.Context, conn *s3tables.Client, input *s3tables.GetNamespaceInput) (*s3tables.GetNamespaceOutput, error) { + output, err := conn.GetNamespace(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, } + } + if err != nil { return nil, err } - if out == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil { + return nil, tfresource.NewEmptyResultError(input) } - return out, nil + return output, nil } type namespaceResourceModel struct { diff --git a/internal/service/s3tables/namespace_test.go b/internal/service/s3tables/namespace_test.go index e570e9b378da..b8d9abb6fc38 100644 --- a/internal/service/s3tables/namespace_test.go +++ b/internal/service/s3tables/namespace_test.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -56,7 +55,7 @@ func TestAccS3TablesNamespace_basic(t *testing.T) { { ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccNamespaceImportStateIdFunc(resourceName), + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ";", "table_bucket_arn", names.AttrNamespace), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: names.AttrNamespace, }, @@ -85,7 +84,7 @@ func TestAccS3TablesNamespace_disappears(t *testing.T) { Config: testAccNamespaceConfig_basic(rName, bucketName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckNamespaceExists(ctx, resourceName, &namespace), - acctest.CheckFrameworkResourceDisappearsWithStateFunc(ctx, acctest.Provider, tfs3tables.NewResourceNamespace, resourceName, namespaceDisappearsStateFunc), + acctest.CheckFrameworkResourceDisappearsWithStateFunc(ctx, acctest.Provider, tfs3tables.ResourceNamespace, resourceName, namespaceDisappearsStateFunc), ), ExpectNonEmptyPlan: true, }, @@ -102,56 +101,44 @@ func testAccCheckNamespaceDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace]) + _, err := tfs3tables.FindNamespaceByTwoPartKey(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace]) + if tfresource.NotFound(err) { - return nil + continue } + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameNamespace, rs.Primary.ID, err) + return err } - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameNamespace, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("S3 Tables Namespace %s still exists", rs.Primary.Attributes[names.AttrNamespace]) } return nil } } -func testAccCheckNamespaceExists(ctx context.Context, name string, namespace *s3tables.GetNamespaceOutput) resource.TestCheckFunc { +func testAccCheckNamespaceExists(ctx context.Context, n string, v *s3tables.GetNamespaceOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, name, errors.New("not found")) - } - - if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrNamespace] == "" { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - resp, err := tfs3tables.FindNamespace(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace]) + output, err := tfs3tables.FindNamespaceByTwoPartKey(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace]) + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameNamespace, rs.Primary.ID, err) + return err } - *namespace = *resp + *v = *output return nil } } -func testAccNamespaceImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("not found: %s", resourceName) - } - - return rs.Primary.Attributes["table_bucket_arn"] + tfs3tables.NamespaceIDSeparator + rs.Primary.Attributes[names.AttrNamespace], nil - } -} - func namespaceDisappearsStateFunc(ctx context.Context, state *tfsdk.State, is *terraform.InstanceState) error { v, ok := is.Attributes[names.AttrNamespace] if !ok { diff --git a/internal/service/s3tables/service_endpoint_resolver_gen.go b/internal/service/s3tables/service_endpoint_resolver_gen.go index 5e03ccf49e00..69db7bbf695c 100644 --- a/internal/service/s3tables/service_endpoint_resolver_gen.go +++ b/internal/service/s3tables/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params s3tables.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up s3tables endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up s3tables endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/s3tables/service_endpoints_gen_test.go b/internal/service/s3tables/service_endpoints_gen_test.go index c7f3191876ba..357a3ddbdc14 100644 --- a/internal/service/s3tables/service_endpoints_gen_test.go +++ b/internal/service/s3tables/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/s3tables/service_package_gen.go b/internal/service/s3tables/service_package_gen.go index 970953aee87f..1db7296956b2 100644 --- a/internal/service/s3tables/service_package_gen.go +++ b/internal/service/s3tables/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/s3tables" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -88,7 +87,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *s3tables.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/s3tables/sweep.go b/internal/service/s3tables/sweep.go index a4b938162941..591e452a0b44 100644 --- a/internal/service/s3tables/sweep.go +++ b/internal/service/s3tables/sweep.go @@ -5,9 +5,11 @@ package s3tables import ( "context" + "log" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3tables" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/sweep" "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" @@ -16,43 +18,47 @@ import ( ) func RegisterSweepers() { - awsv2.Register("aws_s3tables_namespace", sweepNamespaces, - "aws_s3tables_table", - ) - + awsv2.Register("aws_s3tables_namespace", sweepNamespaces, "aws_s3tables_table") awsv2.Register("aws_s3tables_table", sweepTables) - - awsv2.Register("aws_s3tables_table_bucket", sweepTableBuckets, - "aws_s3tables_namespace", - ) + awsv2.Register("aws_s3tables_table_bucket", sweepTableBuckets, "aws_s3tables_namespace") } func sweepNamespaces(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.S3TablesClient(ctx) + var input s3tables.ListTableBucketsInput + sweepResources := make([]sweep.Sweepable, 0) - var sweepResources []sweep.Sweepable + pages := s3tables.NewListTableBucketsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - tableBuckets := s3tables.NewListTableBucketsPaginator(conn, &s3tables.ListTableBucketsInput{}) - for tableBuckets.HasMorePages() { - page, err := tableBuckets.NextPage(ctx) if err != nil { return nil, err } - for _, bucket := range page.TableBuckets { - namespaces := s3tables.NewListNamespacesPaginator(conn, &s3tables.ListNamespacesInput{ - TableBucketARN: bucket.Arn, - }) - for namespaces.HasMorePages() { - page, err := namespaces.NextPage(ctx) + for _, v := range page.TableBuckets { + tableBucketARN := aws.ToString(v.Arn) + + if typ := v.Type; typ != awstypes.TableBucketTypeCustomer { + log.Printf("[INFO] Skipping S3 Tables Table Bucket %s: Type=%s", tableBucketARN, typ) + continue + } + + input := s3tables.ListNamespacesInput{ + TableBucketARN: aws.String(tableBucketARN), + } + pages := s3tables.NewListNamespacesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { return nil, err } - for _, namespace := range page.Namespaces { + for _, v := range page.Namespaces { sweepResources = append(sweepResources, framework.NewSweepResource(newNamespaceResource, client, - framework.NewAttribute("table_bucket_arn", aws.ToString(bucket.Arn)), - framework.NewAttribute(names.AttrNamespace, namespace.Namespace[0]), + framework.NewAttribute("table_bucket_arn", tableBucketARN), + framework.NewAttribute(names.AttrNamespace, v.Namespace[0]), )) } } @@ -64,42 +70,55 @@ func sweepNamespaces(ctx context.Context, client *conns.AWSClient) ([]sweep.Swee func sweepTables(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.S3TablesClient(ctx) + var input s3tables.ListTableBucketsInput + sweepResources := make([]sweep.Sweepable, 0) - var sweepResources []sweep.Sweepable + pages := s3tables.NewListTableBucketsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - tableBuckets := s3tables.NewListTableBucketsPaginator(conn, &s3tables.ListTableBucketsInput{}) - for tableBuckets.HasMorePages() { - page, err := tableBuckets.NextPage(ctx) if err != nil { return nil, err } - for _, bucket := range page.TableBuckets { - namespaces := s3tables.NewListNamespacesPaginator(conn, &s3tables.ListNamespacesInput{ - TableBucketARN: bucket.Arn, - }) - for namespaces.HasMorePages() { - page, err := namespaces.NextPage(ctx) + for _, v := range page.TableBuckets { + tableBucketARN := aws.ToString(v.Arn) + + if typ := v.Type; typ != awstypes.TableBucketTypeCustomer { + log.Printf("[INFO] Skipping S3 Tables Table Bucket %s: Type=%s", tableBucketARN, typ) + continue + } + + input := s3tables.ListNamespacesInput{ + TableBucketARN: aws.String(tableBucketARN), + } + pages := s3tables.NewListNamespacesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { return nil, err } - for _, namespace := range page.Namespaces { - tables := s3tables.NewListTablesPaginator(conn, &s3tables.ListTablesInput{ - TableBucketARN: bucket.Arn, - Namespace: aws.String(namespace.Namespace[0]), - }) - for tables.HasMorePages() { - page, err := tables.NextPage(ctx) + for _, v := range page.Namespaces { + namespace := v.Namespace[0] + input := s3tables.ListTablesInput{ + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + } + pages := s3tables.NewListTablesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { return nil, err } - for _, table := range page.Tables { + for _, v := range page.Tables { sweepResources = append(sweepResources, framework.NewSweepResource(newTableResource, client, - framework.NewAttribute("table_bucket_arn", aws.ToString(bucket.Arn)), - framework.NewAttribute(names.AttrNamespace, namespace.Namespace[0]), - framework.NewAttribute(names.AttrName, aws.ToString(table.Name)), + framework.NewAttribute("table_bucket_arn", tableBucketARN), + framework.NewAttribute(names.AttrNamespace, namespace), + framework.NewAttribute(names.AttrName, aws.ToString(v.Name)), )) } } @@ -113,19 +132,27 @@ func sweepTables(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepabl func sweepTableBuckets(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.S3TablesClient(ctx) + var input s3tables.ListTableBucketsInput + sweepResources := make([]sweep.Sweepable, 0) - var sweepResources []sweep.Sweepable - - pages := s3tables.NewListTableBucketsPaginator(conn, &s3tables.ListTableBucketsInput{}) + pages := s3tables.NewListTableBucketsPaginator(conn, &input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) + if err != nil { return nil, err } - for _, bucket := range page.TableBuckets { + for _, v := range page.TableBuckets { + tableBucketARN := aws.ToString(v.Arn) + + if typ := v.Type; typ != awstypes.TableBucketTypeCustomer { + log.Printf("[INFO] Skipping S3 Tables Table Bucket %s: Type=%s", tableBucketARN, typ) + continue + } + sweepResources = append(sweepResources, framework.NewSweepResource(newTableBucketResource, client, - framework.NewAttribute(names.AttrARN, aws.ToString(bucket.Arn)), + framework.NewAttribute(names.AttrARN, tableBucketARN), )) } } diff --git a/internal/service/s3tables/table.go b/internal/service/s3tables/table.go index d0eb32add6fe..fdf15d2f7cae 100644 --- a/internal/service/s3tables/table.go +++ b/internal/service/s3tables/table.go @@ -31,10 +31,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -45,16 +45,12 @@ func newTableResource(_ context.Context) (resource.ResourceWithConfigure, error) return &tableResource{}, nil } -const ( - ResNameTable = "Table" -) - type tableResource struct { framework.ResourceWithModel[tableResourceModel] } -func (r *tableResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *tableResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrARN: framework.ARNAttributeComputedOnly(), names.AttrCreatedAt: schema.StringAttribute{ @@ -235,438 +231,499 @@ func (r *tableResource) Schema(ctx context.Context, req resource.SchemaRequest, } } -func (r *tableResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan tableResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tableResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data tableResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + name, namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) var input s3tables.CreateTableInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } - // Handle metadata separately since it's an interface type - if !plan.Metadata.IsNull() && !plan.Metadata.IsUnknown() { - metadataModel, d := plan.Metadata.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + // Handle metadata separately since it's an interface type. + if !data.Metadata.IsNull() && !data.Metadata.IsUnknown() { + metadataModel, diags := data.Metadata.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(flex.Expand(ctx, metadataModel, &input.Metadata)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, metadataModel, &input.Metadata)...) + if response.Diagnostics.HasError() { return } } _, err := conn.CreateTable(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTable, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Tables Table (%s)", name), err.Error()) + return } - if !plan.MaintenanceConfiguration.IsUnknown() && !plan.MaintenanceConfiguration.IsNull() { - mc, d := plan.MaintenanceConfiguration.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + if !data.MaintenanceConfiguration.IsUnknown() && !data.MaintenanceConfiguration.IsNull() { + mc, diags := data.MaintenanceConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } if !mc.IcebergCompaction.IsNull() { + typ := awstypes.TableMaintenanceTypeIcebergCompaction input := s3tables.PutTableMaintenanceConfigurationInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - Type: awstypes.TableMaintenanceTypeIcebergCompaction, + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + Type: typ, } - value, d := expandTableMaintenanceIcebergCompaction(ctx, mc.IcebergCompaction) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + value, diags := expandTableMaintenanceIcebergCompaction(ctx, mc.IcebergCompaction) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } input.Value = &value _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table (%s) maintenance configuration (%s)", name, typ), err.Error()) + return } } if !mc.IcebergSnapshotManagement.IsNull() { + typ := awstypes.TableMaintenanceTypeIcebergSnapshotManagement input := s3tables.PutTableMaintenanceConfigurationInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - Type: awstypes.TableMaintenanceTypeIcebergSnapshotManagement, + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + Type: typ, } - value, d := expandTableMaintenanceIcebergSnapshotManagement(ctx, mc.IcebergSnapshotManagement) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + value, diags := expandTableMaintenanceIcebergSnapshotManagement(ctx, mc.IcebergSnapshotManagement) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } input.Value = &value _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table (%s) maintenance configuration (%s)", name, typ), err.Error()) + return } } } - table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + outputGT, err := findTableByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTable, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, table, &plan, flex.WithFieldNamePrefix("Table"))...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, outputGT, &data, fwflex.WithFieldNamePrefix("Table"))...) + if response.Diagnostics.HasError() { return } - plan.Namespace = types.StringValue(table.Namespace[0]) + data.Namespace = types.StringValue(outputGT.Namespace[0]) + + outputGTMC, err := findTableMaintenanceConfigurationByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s) maintenance configuration", name), err.Error()) - awsMaintenanceConfig, err := conn.GetTableMaintenanceConfiguration(ctx, &s3tables.GetTableMaintenanceConfigurationInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - }) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) - } - maintenanceConfiguration, d := flattenTableMaintenanceConfiguration(ctx, awsMaintenanceConfig) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { return + default: + value, diags := flattenTableMaintenanceConfiguration(ctx, outputGTMC) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.MaintenanceConfiguration = value } - plan.MaintenanceConfiguration = maintenanceConfiguration - awsEncryptionConfig, err := conn.GetTableEncryption(ctx, &s3tables.GetTableEncryptionInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - }) + awsEncryptionConfig, err := findTableEncryptionByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + switch { - case errs.IsA[*awstypes.NotFoundException](err): + case tfresource.NotFound(err): case err != nil: - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s) encryption", name), err.Error()) + + return default: var encryptionConfiguration encryptionConfigurationModel - resp.Diagnostics.Append(flex.Flatten(ctx, awsEncryptionConfig.EncryptionConfiguration, &encryptionConfiguration)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, awsEncryptionConfig, &encryptionConfiguration)...) + if response.Diagnostics.HasError() { return } - plan.EncryptionConfiguration, d = fwtypes.NewObjectValueOf(ctx, &encryptionConfiguration) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + var diags diag.Diagnostics + data.EncryptionConfiguration, diags = fwtypes.NewObjectValueOf(ctx, &encryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } } - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *tableResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tableResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data tableResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findTable(ctx, conn, state.TableBucketARN.ValueString(), state.Namespace.ValueString(), state.Name.ValueString()) + conn := r.Meta().S3TablesClient(ctx) + + name, namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) + outputGT, err := findTableByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, ResNameTable, state.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state, flex.WithFieldNamePrefix("Table"))...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, outputGT, &data, fwflex.WithFieldNamePrefix("Table"))...) + if response.Diagnostics.HasError() { return } - state.Namespace = types.StringValue(out.Namespace[0]) + data.Namespace = types.StringValue(outputGT.Namespace[0]) + + outputGTMC, err := findTableMaintenanceConfigurationByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s) maintenance configuration", name), err.Error()) - awsMaintenanceConfig, err := conn.GetTableMaintenanceConfiguration(ctx, &s3tables.GetTableMaintenanceConfigurationInput{ - Name: state.Name.ValueStringPointer(), - Namespace: state.Namespace.ValueStringPointer(), - TableBucketARN: state.TableBucketARN.ValueStringPointer(), - }) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), - err.Error(), - ) - } - maintenanceConfiguration, d := flattenTableMaintenanceConfiguration(ctx, awsMaintenanceConfig) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { return + default: + value, diags := flattenTableMaintenanceConfiguration(ctx, outputGTMC) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.MaintenanceConfiguration = value } - state.MaintenanceConfiguration = maintenanceConfiguration - awsEncryptionConfig, err := conn.GetTableEncryption(ctx, &s3tables.GetTableEncryptionInput{ - Name: state.Name.ValueStringPointer(), - Namespace: state.Namespace.ValueStringPointer(), - TableBucketARN: state.TableBucketARN.ValueStringPointer(), - }) - if err != nil { - if !errs.IsA[*awstypes.NotFoundException](err) { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), - err.Error(), - ) - } - } else { + awsEncryptionConfig, err := findTableEncryptionByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s) encryption", name), err.Error()) + + return + default: var encryptionConfiguration encryptionConfigurationModel - resp.Diagnostics.Append(flex.Flatten(ctx, awsEncryptionConfig.EncryptionConfiguration, &encryptionConfiguration)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, awsEncryptionConfig, &encryptionConfiguration)...) + if response.Diagnostics.HasError() { return } - state.EncryptionConfiguration, d = fwtypes.NewObjectValueOf(ctx, &encryptionConfiguration) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + var diags diag.Diagnostics + data.EncryptionConfiguration, diags = fwtypes.NewObjectValueOf(ctx, &encryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *tableResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan, state tableResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old tableResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { return } - if !plan.Name.Equal(state.Name) || !plan.Namespace.Equal(state.Namespace) { + conn := r.Meta().S3TablesClient(ctx) + + // New name and namespace. + name, namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, new.Name), fwflex.StringValueFromFramework(ctx, new.Namespace), fwflex.StringValueFromFramework(ctx, new.TableBucketARN) + + if !new.Name.Equal(old.Name) || !new.Namespace.Equal(old.Namespace) { input := s3tables.RenameTableInput{ - TableBucketARN: state.TableBucketARN.ValueStringPointer(), - Namespace: state.Namespace.ValueStringPointer(), - Name: state.Name.ValueStringPointer(), + Name: old.Name.ValueStringPointer(), + Namespace: old.Namespace.ValueStringPointer(), + TableBucketARN: aws.String(tableBucketARN), } - if !plan.Name.Equal(state.Name) { - input.NewName = plan.Name.ValueStringPointer() + if !new.Name.Equal(old.Name) { + input.NewName = aws.String(name) } - if !plan.Namespace.Equal(state.Namespace) { - input.NewNamespaceName = plan.Namespace.ValueStringPointer() + if !new.Namespace.Equal(old.Namespace) { + input.NewNamespaceName = aws.String(namespace) } _, err := conn.RenameTable(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, ResNameTable, state.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("renaming S3 Tables Table (%s)", name), err.Error()) + + return } } - if !plan.MaintenanceConfiguration.Equal(state.MaintenanceConfiguration) { - planMC, d := plan.MaintenanceConfiguration.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + if !new.MaintenanceConfiguration.Equal(old.MaintenanceConfiguration) { + newMC, d := new.MaintenanceConfiguration.ToPtr(ctx) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { return } - stateMC, d := state.MaintenanceConfiguration.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + oldMC, d := old.MaintenanceConfiguration.ToPtr(ctx) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { return } - if !planMC.IcebergCompaction.Equal(stateMC.IcebergCompaction) { + if !newMC.IcebergCompaction.Equal(oldMC.IcebergCompaction) { + typ := awstypes.TableMaintenanceTypeIcebergCompaction input := s3tables.PutTableMaintenanceConfigurationInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - Type: awstypes.TableMaintenanceTypeIcebergCompaction, + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + Type: typ, } - value, d := expandTableMaintenanceIcebergCompaction(ctx, planMC.IcebergCompaction) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + value, diags := expandTableMaintenanceIcebergCompaction(ctx, newMC.IcebergCompaction) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } input.Value = &value _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table (%s) maintenance configuration (%s)", name, typ), err.Error()) + return } } - if !planMC.IcebergSnapshotManagement.Equal(stateMC.IcebergSnapshotManagement) { + if !newMC.IcebergSnapshotManagement.Equal(oldMC.IcebergSnapshotManagement) { + typ := awstypes.TableMaintenanceTypeIcebergSnapshotManagement input := s3tables.PutTableMaintenanceConfigurationInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - Type: awstypes.TableMaintenanceTypeIcebergSnapshotManagement, + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + Type: typ, } - value, d := expandTableMaintenanceIcebergSnapshotManagement(ctx, planMC.IcebergSnapshotManagement) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + value, d := expandTableMaintenanceIcebergSnapshotManagement(ctx, newMC.IcebergSnapshotManagement) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { return } input.Value = &value _, err := conn.PutTableMaintenanceConfiguration(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table (%s) maintenance configuration (%s)", name, typ), err.Error()) + return } } } - table, err := findTable(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) + outputGT, err := findTableByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, ResNameTable, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, table, &plan, flex.WithFieldNamePrefix("Table"))...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, outputGT, &new, fwflex.WithFieldNamePrefix("Table"))...) + if response.Diagnostics.HasError() { return } - plan.Namespace = types.StringValue(table.Namespace[0]) + new.Namespace = types.StringValue(outputGT.Namespace[0]) + + outputGTMC, err := findTableMaintenanceConfigurationByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table (%s) maintenance configuration", name), err.Error()) - awsMaintenanceConfig, err := conn.GetTableMaintenanceConfiguration(ctx, &s3tables.GetTableMaintenanceConfigurationInput{ - Name: plan.Name.ValueStringPointer(), - Namespace: plan.Namespace.ValueStringPointer(), - TableBucketARN: plan.TableBucketARN.ValueStringPointer(), - }) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) - } - maintenanceConfiguration, d := flattenTableMaintenanceConfiguration(ctx, awsMaintenanceConfig) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { return + default: + value, diags := flattenTableMaintenanceConfiguration(ctx, outputGTMC) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + new.MaintenanceConfiguration = value } - plan.MaintenanceConfiguration = maintenanceConfiguration - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *tableResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tableResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data tableResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + name, namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) input := s3tables.DeleteTableInput{ - Name: state.Name.ValueStringPointer(), - Namespace: state.Namespace.ValueStringPointer(), - TableBucketARN: state.TableBucketARN.ValueStringPointer(), + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), } - _, err := conn.DeleteTable(ctx, &input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return - } + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Tables Table (%s)", name), err.Error()) - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, ResNameTable, state.Name.String(), err), - err.Error(), - ) return } } -func (r *tableResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - identifier, err := parseTableIdentifier(req.ID) +func (r *tableResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + identifier, err := parseTableIdentifier(request.ID) if err != nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( "Invalid Import ID", "Import IDs for S3 Tables Tables must use the format
"+tableIDSeparator+""+tableIDSeparator+"
.\n"+ - fmt.Sprintf("Had %q", req.ID), + fmt.Sprintf("Had %q", request.ID), ) return } - identifier.PopulateState(ctx, &resp.State, &resp.Diagnostics) + identifier.PopulateState(ctx, &response.State, &response.Diagnostics) +} + +func findTableByThreePartKey(ctx context.Context, conn *s3tables.Client, tableBucketARN, namespace, name string) (*s3tables.GetTableOutput, error) { + input := s3tables.GetTableInput{ + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + } + + return findTable(ctx, conn, &input) } -func findTable(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTableOutput, error) { - in := s3tables.GetTableInput{ +func findTable(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableInput) (*s3tables.GetTableOutput, error) { + output, err := conn.GetTable(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findTableEncryptionByThreePartKey(ctx context.Context, conn *s3tables.Client, tableBucketARN, namespace, name string) (*awstypes.EncryptionConfiguration, error) { + input := s3tables.GetTableEncryptionInput{ Name: aws.String(name), Namespace: aws.String(namespace), - TableBucketARN: aws.String(bucketARN), + TableBucketARN: aws.String(tableBucketARN), + } + + return findTableEncryption(ctx, conn, &input) +} + +func findTableEncryption(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableEncryptionInput) (*awstypes.EncryptionConfiguration, error) { + output, err := conn.GetTableEncryption(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } } - out, err := conn.GetTable(ctx, &in) if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + return nil, err + } + + if output == nil || output.EncryptionConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.EncryptionConfiguration, nil +} + +func findTableMaintenanceConfigurationByThreePartKey(ctx context.Context, conn *s3tables.Client, tableBucketARN, namespace, name string) (*s3tables.GetTableMaintenanceConfigurationOutput, error) { + input := s3tables.GetTableMaintenanceConfigurationInput{ + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + } + + return findTableMaintenanceConfiguration(ctx, conn, &input) +} + +func findTableMaintenanceConfiguration(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableMaintenanceConfigurationInput) (*s3tables.GetTableMaintenanceConfigurationOutput, error) { + output, err := conn.GetTableMaintenanceConfiguration(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, } + } + if err != nil { return nil, err } - if out == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil { + return nil, tfresource.NewEmptyResultError(input) } - return out, nil + return output, nil } type tableResourceModel struct { @@ -779,7 +836,7 @@ func expandIcebergCompactionSettings(ctx context.Context, in fwtypes.ObjectValue var value awstypes.IcebergCompactionSettings - diags.Append(flex.Expand(ctx, model, &value)...) + diags.Append(fwflex.Expand(ctx, model, &value)...) return &awstypes.TableMaintenanceSettingsMemberIcebergCompaction{ Value: value, @@ -790,7 +847,7 @@ func flattenIcebergCompactionSettings(ctx context.Context, in awstypes.TableMain switch t := in.(type) { case *awstypes.TableMaintenanceSettingsMemberIcebergCompaction: var model icebergCompactionSettingsModel - diags.Append(flex.Flatten(ctx, t.Value, &model)...) + diags.Append(fwflex.Flatten(ctx, t.Value, &model)...) result = fwtypes.NewObjectValueOfMust(ctx, &model) case *awstypes.UnknownUnionMember: @@ -849,7 +906,7 @@ func expandIcebergSnapshotManagementSettings(ctx context.Context, in fwtypes.Obj var value awstypes.IcebergSnapshotManagementSettings - diags.Append(flex.Expand(ctx, model, &value)...) + diags.Append(fwflex.Expand(ctx, model, &value)...) return &awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement{ Value: value, @@ -860,7 +917,7 @@ func flattenIcebergSnapshotManagementSettings(ctx context.Context, in awstypes.T switch t := in.(type) { case *awstypes.TableMaintenanceSettingsMemberIcebergSnapshotManagement: var model icebergSnapshotManagementSettingsModel - diags.Append(flex.Flatten(ctx, t.Value, &model)...) + diags.Append(fwflex.Flatten(ctx, t.Value, &model)...) result = fwtypes.NewObjectValueOfMust(ctx, &model) case *awstypes.UnknownUnionMember: @@ -955,7 +1012,7 @@ type icebergSchemaFieldModel struct { } var ( - _ flex.Expander = tableMetadataModel{} + _ fwflex.Expander = tableMetadataModel{} ) func (m tableMetadataModel) Expand(ctx context.Context) (out any, diags diag.Diagnostics) { @@ -970,7 +1027,7 @@ func (m tableMetadataModel) Expand(ctx context.Context) (out any, diags diag.Dia // Create Iceberg schema var schema awstypes.IcebergMetadata - diags.Append(flex.Expand(ctx, icebergModel, &schema)...) + diags.Append(fwflex.Expand(ctx, icebergModel, &schema)...) if diags.HasError() { return nil, diags } diff --git a/internal/service/s3tables/table_bucket.go b/internal/service/s3tables/table_bucket.go index a4ecd768546c..445550a5017c 100644 --- a/internal/service/s3tables/table_bucket.go +++ b/internal/service/s3tables/table_bucket.go @@ -5,7 +5,7 @@ package s3tables import ( "context" - "errors" + "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3tables" @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" @@ -23,10 +24,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -38,16 +39,12 @@ func newTableBucketResource(_ context.Context) (resource.ResourceWithConfigure, return &tableBucketResource{}, nil } -const ( - resNameTableBucket = "Table Bucket" -) - type tableBucketResource struct { framework.ResourceWithModel[tableBucketResourceModel] } -func (r *tableBucketResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *tableBucketResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrARN: framework.ARNAttributeComputedOnly(), names.AttrCreatedAt: schema.StringAttribute{ @@ -61,6 +58,11 @@ func (r *tableBucketResource) Schema(ctx context.Context, req resource.SchemaReq CustomType: fwtypes.NewObjectTypeOf[encryptionConfigurationModel](ctx), Optional: true, }, + names.AttrForceDestroy: schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, // TODO: Once Protocol v6 is supported, convert this to a `schema.SingleNestedAttribute` with full schema information // Validations needed: // * iceberg_unreferenced_file_removal.settings.non_current_days: int32validator.AtLeast(1) @@ -107,347 +109,417 @@ func (r *tableBucketResource) Schema(ctx context.Context, req resource.SchemaReq } } -func (r *tableBucketResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan tableBucketResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data tableBucketResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.Name) var input s3tables.CreateTableBucketInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } - out, err := conn.CreateTableBucket(ctx, &input) + outputCTB, err := conn.CreateTableBucket(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) - return - } - if out == nil || out.Arn == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), nil), - errors.New("empty output").Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Tables Table Bucket (%s)", name), err.Error()) + return } - if !plan.MaintenanceConfiguration.IsUnknown() && !plan.MaintenanceConfiguration.IsNull() { - mc, d := plan.MaintenanceConfiguration.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + tableBucketARN := aws.ToString(outputCTB.Arn) + if !data.MaintenanceConfiguration.IsUnknown() && !data.MaintenanceConfiguration.IsNull() { + mc, diags := data.MaintenanceConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } if !mc.IcebergUnreferencedFileRemovalSettings.IsNull() { + typ := awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval input := s3tables.PutTableBucketMaintenanceConfigurationInput{ - TableBucketARN: out.Arn, - Type: awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval, + TableBucketARN: aws.String(tableBucketARN), + Type: typ, } - value, d := expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, mc.IcebergUnreferencedFileRemovalSettings) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + value, diags := expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, mc.IcebergUnreferencedFileRemovalSettings) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } - input.Value = &value _, err := conn.PutTableBucketMaintenanceConfiguration(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table Bucket (%s) maintenance configuration (%s)", name, typ), err.Error()) + return } } } - bucket, err := findTableBucket(ctx, conn, aws.ToString(out.Arn)) + outputGTB, err := findTableBucketByARN(ctx, conn, tableBucketARN) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s)", name), err.Error()) + + return } - resp.Diagnostics.Append(flex.Flatten(ctx, bucket, &plan)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, outputGTB, &data)...) + if response.Diagnostics.HasError() { return } - awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ - TableBucketARN: bucket.Arn, - }) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) - } - maintenanceConfiguration, d := flattenTableBucketMaintenanceConfiguration(ctx, awsMaintenanceConfig) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + outputGTBMC, err := findTableBucketMaintenanceConfigurationByARN(ctx, conn, tableBucketARN) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s) maintenance configuration", name), err.Error()) + return + default: + value, diags := flattenTableBucketMaintenanceConfiguration(ctx, outputGTBMC) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.MaintenanceConfiguration = value } - plan.MaintenanceConfiguration = maintenanceConfiguration - awsEncryptionConfig, err := findTableBucketEncryptionConfiguration(ctx, conn, plan.ARN.ValueString()) + awsEncryptionConfig, err := findTableBucketEncryptionConfigurationByARN(ctx, conn, tableBucketARN) + switch { case tfresource.NotFound(err): case err != nil: - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s) encryption", name), err.Error()) + + return default: var encryptionConfiguration encryptionConfigurationModel - resp.Diagnostics.Append(flex.Flatten(ctx, awsEncryptionConfig, &encryptionConfiguration)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, awsEncryptionConfig, &encryptionConfiguration)...) + if response.Diagnostics.HasError() { + return + } + var diags diag.Diagnostics + data.EncryptionConfiguration, diags = fwtypes.NewObjectValueOf(ctx, &encryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } - plan.EncryptionConfiguration = fwtypes.NewObjectValueOfMust(ctx, &encryptionConfiguration) } - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *tableBucketResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tableBucketResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data tableBucketResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findTableBucket(ctx, conn, state.ARN.ValueString()) + conn := r.Meta().S3TablesClient(ctx) + + name, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.ARN) + outputGTB, err := findTableBucketByARN(ctx, conn, tableBucketARN) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, outputGTB, &data)...) + if response.Diagnostics.HasError() { return } - awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ - TableBucketARN: state.ARN.ValueStringPointer(), - }) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), - err.Error(), - ) - } - maintenanceConfiguration, d := flattenTableBucketMaintenanceConfiguration(ctx, awsMaintenanceConfig) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + outputGTBMC, err := findTableBucketMaintenanceConfigurationByARN(ctx, conn, tableBucketARN) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s) maintenance configuration", name), err.Error()) + return + default: + value, diags := flattenTableBucketMaintenanceConfiguration(ctx, outputGTBMC) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + data.MaintenanceConfiguration = value } - state.MaintenanceConfiguration = maintenanceConfiguration - awsEncryptionConfig, err := findTableBucketEncryptionConfiguration(ctx, conn, state.ARN.ValueString()) + awsEncryptionConfig, err := findTableBucketEncryptionConfigurationByARN(ctx, conn, tableBucketARN) + switch { case tfresource.NotFound(err): case err != nil: - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, resNameTableBucket, state.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s) encryption", name), err.Error()) + + return default: var encryptionConfiguration encryptionConfigurationModel - resp.Diagnostics.Append(flex.Flatten(ctx, awsEncryptionConfig, &encryptionConfiguration)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, awsEncryptionConfig, &encryptionConfiguration)...) + if response.Diagnostics.HasError() { + return + } + var diags diag.Diagnostics + data.EncryptionConfiguration, diags = fwtypes.NewObjectValueOf(ctx, &encryptionConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } - state.EncryptionConfiguration = fwtypes.NewObjectValueOfMust(ctx, &encryptionConfiguration) } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *tableBucketResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var state, plan tableBucketResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new tableBucketResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { return } conn := r.Meta().S3TablesClient(ctx) - if !plan.EncryptionConfiguration.Equal(state.EncryptionConfiguration) { - ec, d := plan.EncryptionConfiguration.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + name, tableBucketARN := fwflex.StringValueFromFramework(ctx, new.Name), fwflex.StringValueFromFramework(ctx, new.ARN) + + if !new.EncryptionConfiguration.Equal(old.EncryptionConfiguration) { + ec, diags := new.EncryptionConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { return } input := s3tables.PutTableBucketEncryptionInput{ - TableBucketARN: plan.ARN.ValueStringPointer(), + TableBucketARN: aws.String(tableBucketARN), } var encryptionConfiguration awstypes.EncryptionConfiguration - - resp.Diagnostics.Append(flex.Expand(ctx, ec, &encryptionConfiguration)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, ec, &encryptionConfiguration)...) + if response.Diagnostics.HasError() { return } - input.EncryptionConfiguration = &encryptionConfiguration _, err := conn.PutTableBucketEncryption(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table Bucket (%s) encryption configuration", name), err.Error()) + return } } - if !state.MaintenanceConfiguration.Equal(plan.MaintenanceConfiguration) { - mc, d := plan.MaintenanceConfiguration.ToPtr(ctx) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + if !old.MaintenanceConfiguration.Equal(new.MaintenanceConfiguration) { + mc, d := new.MaintenanceConfiguration.ToPtr(ctx) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { return } if !mc.IcebergUnreferencedFileRemovalSettings.IsNull() { + typ := awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval input := s3tables.PutTableBucketMaintenanceConfigurationInput{ - TableBucketARN: state.ARN.ValueStringPointer(), - Type: awstypes.TableBucketMaintenanceTypeIcebergUnreferencedFileRemoval, + TableBucketARN: aws.String(tableBucketARN), + Type: typ, } value, d := expandTableBucketMaintenanceIcebergUnreferencedFileRemoval(ctx, mc.IcebergUnreferencedFileRemovalSettings) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { return } - input.Value = &value _, err := conn.PutTableBucketMaintenanceConfiguration(ctx, &input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("putting S3 Tables Table Bucket (%s) maintenance configuration (%s)", name, typ), err.Error()) + return } } - awsMaintenanceConfig, err := conn.GetTableBucketMaintenanceConfiguration(ctx, &s3tables.GetTableBucketMaintenanceConfigurationInput{ - TableBucketARN: state.ARN.ValueStringPointer(), - }) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, resNameTableBucket, plan.Name.String(), err), - err.Error(), - ) - } - maintenanceConfiguration, d := flattenTableBucketMaintenanceConfiguration(ctx, awsMaintenanceConfig) - resp.Diagnostics.Append(d...) - if resp.Diagnostics.HasError() { + outputGTBMC, err := findTableBucketMaintenanceConfigurationByARN(ctx, conn, tableBucketARN) + + switch { + case tfresource.NotFound(err): + case err != nil: + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket (%s) maintenance configuration", name), err.Error()) + return + default: + value, d := flattenTableBucketMaintenanceConfiguration(ctx, outputGTBMC) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { + return + } + new.MaintenanceConfiguration = value } - plan.MaintenanceConfiguration = maintenanceConfiguration } - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *tableBucketResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tableBucketResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data tableBucketResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - input := &s3tables.DeleteTableBucketInput{ - TableBucketARN: state.ARN.ValueStringPointer(), + conn := r.Meta().S3TablesClient(ctx) + + name, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.ARN) + input := s3tables.DeleteTableBucketInput{ + TableBucketARN: aws.String(tableBucketARN), } + _, err := conn.DeleteTableBucket(ctx, &input) - _, err := conn.DeleteTableBucket(ctx, input) if errs.IsA[*awstypes.NotFoundException](err) { return } + + // If deletion fails due to bucket not being empty and force_destroy is enabled. + if err != nil && data.ForceDestroy.ValueBool() { + // Check if the error indicates the bucket is not empty. + if errs.IsA[*awstypes.ConflictException](err) || errs.IsA[*awstypes.BadRequestException](err) { + tflog.Debug(ctx, "Table bucket not empty, attempting to empty it", map[string]any{ + "table_bucket_arn": data.ARN.ValueString(), + }) + + // Empty the table bucket by deleting all tables and namespaces. + if err := emptyTableBucket(ctx, conn, tableBucketARN); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Tables Table Bucket (%s) (force_destroy = true)", name), err.Error()) + + return + } + + // Retry deletion after emptying. + _, err = conn.DeleteTableBucket(ctx, &input) + } + } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, resNameTableBucket, state.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Tables Table Bucket (%s)", name), err.Error()) + return } } -func (r *tableBucketResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root(names.AttrARN), req, resp) +func (r *tableBucketResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root(names.AttrARN), request, response) + + // Set force_destroy to false on import to prevent accidental deletion + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrForceDestroy), types.BoolValue(false))...) } -func findTableBucket(ctx context.Context, conn *s3tables.Client, arn string) (*s3tables.GetTableBucketOutput, error) { - in := s3tables.GetTableBucketInput{ +func findTableBucketByARN(ctx context.Context, conn *s3tables.Client, arn string) (*s3tables.GetTableBucketOutput, error) { + input := s3tables.GetTableBucketInput{ TableBucketARN: aws.String(arn), } - out, err := conn.GetTableBucket(ctx, &in) - if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + return findTableBucket(ctx, conn, &input) +} + +func findTableBucket(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableBucketInput) (*s3tables.GetTableBucketOutput, error) { + output, err := conn.GetTableBucket(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, } + } + if err != nil { return nil, err } - if out == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil { + return nil, tfresource.NewEmptyResultError(input) } - return out, nil + return output, nil } -func findTableBucketEncryptionConfiguration(ctx context.Context, conn *s3tables.Client, arn string) (*awstypes.EncryptionConfiguration, error) { - in := s3tables.GetTableBucketEncryptionInput{ +func findTableBucketEncryptionConfigurationByARN(ctx context.Context, conn *s3tables.Client, arn string) (*awstypes.EncryptionConfiguration, error) { + input := s3tables.GetTableBucketEncryptionInput{ TableBucketARN: aws.String(arn), } - out, err := conn.GetTableBucketEncryption(ctx, &in) + return findTableBucketEncryptionConfiguration(ctx, conn, &input) +} + +func findTableBucketEncryptionConfiguration(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableBucketEncryptionInput) (*awstypes.EncryptionConfiguration, error) { + output, err := conn.GetTableBucketEncryption(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + return nil, err + } + + if output == nil || output.EncryptionConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.EncryptionConfiguration, nil +} + +func findTableBucketMaintenanceConfigurationByARN(ctx context.Context, conn *s3tables.Client, arn string) (*s3tables.GetTableBucketMaintenanceConfigurationOutput, error) { + input := s3tables.GetTableBucketMaintenanceConfigurationInput{ + TableBucketARN: aws.String(arn), + } + + return findTableBucketMaintenanceConfiguration(ctx, conn, &input) +} + +func findTableBucketMaintenanceConfiguration(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableBucketMaintenanceConfigurationInput) (*s3tables.GetTableBucketMaintenanceConfigurationOutput, error) { + output, err := conn.GetTableBucketMaintenanceConfiguration(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, } + } + if err != nil { return nil, err } - return out.EncryptionConfiguration, nil + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil } type tableBucketResourceModel struct { @@ -455,6 +527,7 @@ type tableBucketResourceModel struct { ARN types.String `tfsdk:"arn"` CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` EncryptionConfiguration fwtypes.ObjectValueOf[encryptionConfigurationModel] `tfsdk:"encryption_configuration"` + ForceDestroy types.Bool `tfsdk:"force_destroy"` MaintenanceConfiguration fwtypes.ObjectValueOf[tableBucketMaintenanceConfigurationModel] `tfsdk:"maintenance_configuration" autoflex:"-"` Name types.String `tfsdk:"name"` OwnerAccountID types.String `tfsdk:"owner_account_id"` @@ -553,7 +626,7 @@ func expandIcebergUnreferencedFileRemovalSettings(ctx context.Context, in fwtype var value awstypes.IcebergUnreferencedFileRemovalSettings - diags.Append(flex.Expand(ctx, model, &value)...) + diags.Append(fwflex.Expand(ctx, model, &value)...) return &awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval{ Value: value, @@ -564,7 +637,7 @@ func flattenIcebergUnreferencedFileRemovalSettings(ctx context.Context, in awsty switch t := in.(type) { case *awstypes.TableBucketMaintenanceSettingsMemberIcebergUnreferencedFileRemoval: var model icebergUnreferencedFileRemovalSettingsModel - diags.Append(flex.Flatten(ctx, t.Value, &model)...) + diags.Append(fwflex.Flatten(ctx, t.Value, &model)...) result = fwtypes.NewObjectValueOfMust(ctx, &model) case *awstypes.UnknownUnionMember: @@ -577,3 +650,94 @@ func flattenIcebergUnreferencedFileRemovalSettings(ctx context.Context, in awsty } return result, diags } + +// emptyTableBucket deletes all tables in all namespaces within the specified table bucket. +// This is used when force_destroy is enabled to allow deletion of non-empty table buckets. +func emptyTableBucket(ctx context.Context, conn *s3tables.Client, tableBucketARN string) error { + tflog.Debug(ctx, "Starting to empty table bucket", map[string]any{ + "table_bucket_arn": tableBucketARN, + }) + + // First, list all namespaces in the table bucket. + input := s3tables.ListNamespacesInput{ + TableBucketARN: aws.String(tableBucketARN), + } + pages := s3tables.NewListNamespacesPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return fmt.Errorf("listing S3 Tables Table Bucket (%s) namespaces: %w", tableBucketARN, err) + } + + // For each namespace, list and delete all tables. + for _, v := range page.Namespaces { + namespace := v.Namespace[0] + tflog.Debug(ctx, "Processing namespace", map[string]any{ + names.AttrNamespace: namespace, + }) + + inputLT := s3tables.ListTablesInput{ + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + } + pages := s3tables.NewListTablesPaginator(conn, &inputLT) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return fmt.Errorf("listing S3 Tables Table Bucket (%s,%s) tables: %w", tableBucketARN, namespace, err) + } + + // Delete each table. + for _, v := range page.Tables { + name := aws.ToString(v.Name) + tflog.Debug(ctx, "Deleting table", map[string]any{ + names.AttrName: name, + names.AttrNamespace: namespace, + }) + + input := s3tables.DeleteTableInput{ + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + } + _, err := conn.DeleteTable(ctx, &input) + + if errs.IsA[*awstypes.NotFoundException](err) { + continue + } + + if err != nil { + return fmt.Errorf("deleting S3 Tables Table Bucket (%s,%s) table (%s): %w", tableBucketARN, namespace, name, err) + } + } + } + + // After deleting all tables in the namespace, delete the namespace itself. + tflog.Debug(ctx, "Deleting namespace", map[string]any{ + names.AttrNamespace: namespace, + }) + + inputDN := s3tables.DeleteNamespaceInput{ + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), + } + _, err = conn.DeleteNamespace(ctx, &inputDN) + + if errs.IsA[*awstypes.NotFoundException](err) { + continue + } + + if err != nil { + return fmt.Errorf("deleting S3 Tables Table Bucket (%s) namespace (%s): %w", tableBucketARN, namespace, err) + } + } + } + + tflog.Debug(ctx, "Successfully emptied table bucket", map[string]any{ + "table_bucket_arn": tableBucketARN, + }) + + return nil +} diff --git a/internal/service/s3tables/table_bucket_policy.go b/internal/service/s3tables/table_bucket_policy.go index 575d4586ff1a..86db3d3b077a 100644 --- a/internal/service/s3tables/table_bucket_policy.go +++ b/internal/service/s3tables/table_bucket_policy.go @@ -5,6 +5,7 @@ package s3tables import ( "context" + "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3tables" @@ -15,13 +16,12 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/names" ) // @FrameworkResource("aws_s3tables_table_bucket_policy", name="Table Bucket Policy") @@ -29,16 +29,12 @@ func newTableBucketPolicyResource(_ context.Context) (resource.ResourceWithConfi return &tableBucketPolicyResource{}, nil } -const ( - ResNameTableBucketPolicy = "Table Bucket Policy" -) - type tableBucketPolicyResource struct { framework.ResourceWithModel[tableBucketPolicyResourceModel] } -func (r *tableBucketPolicyResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *tableBucketPolicyResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "resource_policy": schema.StringAttribute{ CustomType: fwtypes.IAMPolicyType, @@ -55,167 +51,146 @@ func (r *tableBucketPolicyResource) Schema(ctx context.Context, req resource.Sch } } -func (r *tableBucketPolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan tableBucketPolicyResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketPolicyResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data tableBucketPolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + tableBucketARN := fwflex.StringValueFromFramework(ctx, data.TableBucketARN) var input s3tables.PutTableBucketPolicyInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } _, err := conn.PutTableBucketPolicy(ctx, &input) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), - err.Error(), - ) - return - } - out, err := findTableBucketPolicy(ctx, conn, plan.TableBucketARN.ValueString()) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), - err.Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Tables Table Bucket Policy (%s)", tableBucketARN), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) - if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *tableBucketPolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tableBucketPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketPolicyResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data tableBucketPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findTableBucketPolicy(ctx, conn, state.TableBucketARN.ValueString()) + conn := r.Meta().S3TablesClient(ctx) + + tableBucketARN := fwflex.StringValueFromFramework(ctx, data.TableBucketARN) + output, err := findTableBucketPolicyByARN(ctx, conn, tableBucketARN) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, ResNameTableBucketPolicy, state.TableBucketARN.String(), err), - err.Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Bucket Policy (%s)", tableBucketARN), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) -} + data.ResourcePolicy = fwtypes.IAMPolicyValue(aws.ToString(output.ResourcePolicy)) -func (r *tableBucketPolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().S3TablesClient(ctx) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} - var plan tableBucketPolicyResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketPolicyResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new tableBucketPolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + tableBucketARN := fwflex.StringValueFromFramework(ctx, new.TableBucketARN) var input s3tables.PutTableBucketPolicyInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { return } _, err := conn.PutTableBucketPolicy(ctx, &input) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), - err.Error(), - ) - return - } - out, err := findTableBucketPolicy(ctx, conn, plan.TableBucketARN.ValueString()) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.TableBucketARN.String(), err), - err.Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("updating S3 Tables Table Bucket Policy (%s)", tableBucketARN), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) - if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, new)...) } -func (r *tableBucketPolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tableBucketPolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tableBucketPolicyResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data tableBucketPolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + tableBucketARN := fwflex.StringValueFromFramework(ctx, data.TableBucketARN) input := s3tables.DeleteTableBucketPolicyInput{ - TableBucketARN: state.TableBucketARN.ValueStringPointer(), + TableBucketARN: aws.String(tableBucketARN), } - _, err := conn.DeleteTableBucketPolicy(ctx, &input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return - } + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Tables Table Bucket Policy (%s)", tableBucketARN), err.Error()) - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, ResNameTableBucketPolicy, state.TableBucketARN.String(), err), - err.Error(), - ) return } } -func (r *tableBucketPolicyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root("table_bucket_arn"), req, resp) +func (r *tableBucketPolicyResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("table_bucket_arn"), request, response) } -func findTableBucketPolicy(ctx context.Context, conn *s3tables.Client, tableBucketARN string) (*s3tables.GetTableBucketPolicyOutput, error) { - in := s3tables.GetTableBucketPolicyInput{ +func findTableBucketPolicyByARN(ctx context.Context, conn *s3tables.Client, tableBucketARN string) (*s3tables.GetTableBucketPolicyOutput, error) { + input := s3tables.GetTableBucketPolicyInput{ TableBucketARN: aws.String(tableBucketARN), } - out, err := conn.GetTableBucketPolicy(ctx, &in) - if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + return findTableBucketPolicy(ctx, conn, &input) +} + +func findTableBucketPolicy(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTableBucketPolicyInput) (*s3tables.GetTableBucketPolicyOutput, error) { + output, err := conn.GetTableBucketPolicy(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, } + } + if err != nil { return nil, err } - return out, nil + if output == nil || aws.ToString(output.ResourcePolicy) == "" { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil } type tableBucketPolicyResourceModel struct { diff --git a/internal/service/s3tables/table_bucket_policy_test.go b/internal/service/s3tables/table_bucket_policy_test.go index 5e08ba3b5aea..82b0b4424cf0 100644 --- a/internal/service/s3tables/table_bucket_policy_test.go +++ b/internal/service/s3tables/table_bucket_policy_test.go @@ -5,7 +5,6 @@ package s3tables_test import ( "context" - "errors" "fmt" "testing" @@ -15,7 +14,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -77,7 +75,7 @@ func TestAccS3TablesTableBucketPolicy_disappears(t *testing.T) { Config: testAccTableBucketPolicyConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableBucketPolicyExists(ctx, resourceName, &tablebucketpolicy), - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTableBucketPolicy, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.ResourceTableBucketPolicy, resourceName), ), ExpectNonEmptyPlan: true, }, @@ -94,40 +92,39 @@ func testAccCheckTableBucketPolicyDestroy(ctx context.Context) resource.TestChec continue } - _, err := tfs3tables.FindTableBucketPolicy(ctx, conn, rs.Primary.Attributes["table_bucket_arn"]) + _, err := tfs3tables.FindTableBucketPolicyByARN(ctx, conn, rs.Primary.Attributes["table_bucket_arn"]) + if tfresource.NotFound(err) { - return nil + continue } + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucketPolicy, rs.Primary.ID, err) + return err } - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucketPolicy, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("S3 Tables Table Bucket Policy %s still exists", rs.Primary.Attributes["table_bucket_arn"]) } return nil } } -func testAccCheckTableBucketPolicyExists(ctx context.Context, name string, tablebucketpolicy *s3tables.GetTableBucketPolicyOutput) resource.TestCheckFunc { +func testAccCheckTableBucketPolicyExists(ctx context.Context, n string, v *s3tables.GetTableBucketPolicyOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucketPolicy, name, errors.New("not found")) - } - - if rs.Primary.Attributes["table_bucket_arn"] == "" { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucketPolicy, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - resp, err := tfs3tables.FindTableBucketPolicy(ctx, conn, rs.Primary.Attributes["table_bucket_arn"]) + output, err := tfs3tables.FindTableBucketPolicyByARN(ctx, conn, rs.Primary.Attributes["table_bucket_arn"]) + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucketPolicy, rs.Primary.ID, err) + return err } - *tablebucketpolicy = *resp + *v = *output return nil } diff --git a/internal/service/s3tables/table_bucket_test.go b/internal/service/s3tables/table_bucket_test.go index 19cf66336fe4..e6584e324228 100644 --- a/internal/service/s3tables/table_bucket_test.go +++ b/internal/service/s3tables/table_bucket_test.go @@ -5,10 +5,10 @@ package s3tables_test import ( "context" - "errors" "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3tables" awstypes "github.com/aws/aws-sdk-go-v2/service/s3tables/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,8 +28,7 @@ import ( func TestAccS3TablesTableBucket_basic(t *testing.T) { ctx := acctest.Context(t) - - var tablebucket s3tables.GetTableBucketOutput + var v s3tables.GetTableBucketOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3tables_table_bucket.test" @@ -45,9 +44,10 @@ func TestAccS3TablesTableBucket_basic(t *testing.T) { { Config: testAccTableBucketConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + testAccCheckTableBucketExists(ctx, resourceName, &v), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3tables", "bucket/"+rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttr(resourceName, names.AttrForceDestroy, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), acctest.CheckResourceAttrAccountID(ctx, resourceName, names.AttrOwnerAccountID), ), @@ -69,6 +69,7 @@ func TestAccS3TablesTableBucket_basic(t *testing.T) { ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: names.AttrARN, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy}, }, }, }) @@ -76,8 +77,7 @@ func TestAccS3TablesTableBucket_basic(t *testing.T) { func TestAccS3TablesTableBucket_encryptionConfiguration(t *testing.T) { ctx := acctest.Context(t) - - var tablebucket s3tables.GetTableBucketOutput + var v s3tables.GetTableBucketOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3tables_table_bucket.test" resourceKeyOne := "aws_kms_key.test" @@ -95,9 +95,10 @@ func TestAccS3TablesTableBucket_encryptionConfiguration(t *testing.T) { { Config: testAccTableBucketConfig_encryptionConfiguration(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + testAccCheckTableBucketExists(ctx, resourceName, &v), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3tables", "bucket/"+rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttr(resourceName, names.AttrForceDestroy, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrPair(resourceName, "encryption_configuration.kms_key_arn", resourceKeyOne, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "encryption_configuration.sse_algorithm", "aws:kms"), @@ -118,9 +119,10 @@ func TestAccS3TablesTableBucket_encryptionConfiguration(t *testing.T) { { Config: testAccTableBucketConfig_encryptionConfigurationUpdate(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + testAccCheckTableBucketExists(ctx, resourceName, &v), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "s3tables", "bucket/"+rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttr(resourceName, names.AttrForceDestroy, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrPair(resourceName, "encryption_configuration.kms_key_arn", resourceKeyTwo, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "encryption_configuration.sse_algorithm", "aws:kms"), @@ -144,6 +146,7 @@ func TestAccS3TablesTableBucket_encryptionConfiguration(t *testing.T) { ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: names.AttrARN, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy}, }, }, }) @@ -151,8 +154,7 @@ func TestAccS3TablesTableBucket_encryptionConfiguration(t *testing.T) { func TestAccS3TablesTableBucket_disappears(t *testing.T) { ctx := acctest.Context(t) - - var tablebucket s3tables.GetTableBucketOutput + var v s3tables.GetTableBucketOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3tables_table_bucket.test" @@ -168,8 +170,8 @@ func TestAccS3TablesTableBucket_disappears(t *testing.T) { { Config: testAccTableBucketConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTableBucket, resourceName), + testAccCheckTableBucketExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.ResourceTableBucket, resourceName), ), ExpectNonEmptyPlan: true, }, @@ -179,8 +181,7 @@ func TestAccS3TablesTableBucket_disappears(t *testing.T) { func TestAccS3TablesTableBucket_maintenanceConfiguration(t *testing.T) { ctx := acctest.Context(t) - - var tablebucket s3tables.GetTableBucketOutput + var v s3tables.GetTableBucketOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3tables_table_bucket.test" @@ -196,7 +197,7 @@ func TestAccS3TablesTableBucket_maintenanceConfiguration(t *testing.T) { { Config: testAccTableBucketConfig_maintenanceConfiguration(rName, awstypes.MaintenanceStatusEnabled, 20, 6), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + testAccCheckTableBucketExists(ctx, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ @@ -216,11 +217,12 @@ func TestAccS3TablesTableBucket_maintenanceConfiguration(t *testing.T) { ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: names.AttrARN, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy}, }, { Config: testAccTableBucketConfig_maintenanceConfiguration(rName, awstypes.MaintenanceStatusEnabled, 15, 4), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + testAccCheckTableBucketExists(ctx, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ @@ -240,11 +242,12 @@ func TestAccS3TablesTableBucket_maintenanceConfiguration(t *testing.T) { ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: names.AttrARN, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy}, }, { Config: testAccTableBucketConfig_maintenanceConfiguration(rName, awstypes.MaintenanceStatusDisabled, 15, 4), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTableBucketExists(ctx, resourceName, &tablebucket), + testAccCheckTableBucketExists(ctx, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("maintenance_configuration"), knownvalue.ObjectExact(map[string]knownvalue.Check{ @@ -264,11 +267,105 @@ func TestAccS3TablesTableBucket_maintenanceConfiguration(t *testing.T) { ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrARN), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: names.AttrARN, + ImportStateVerifyIgnore: []string{names.AttrForceDestroy}, }, }, }) } +func TestAccS3TablesTableBucket_forceDestroy(t *testing.T) { + ctx := acctest.Context(t) + var v s3tables.GetTableBucketOutput + resourceName := "aws_s3tables_table_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketConfig_forceDestroy(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrForceDestroy, acctest.CtTrue), + testAccCheckTableBucketAddTables(ctx, resourceName, "namespace1", "table1"), + ), + }, + }, + }) +} + +func TestAccS3TablesTableBucket_forceDestroyMultipleNamespacesAndTables(t *testing.T) { + ctx := acctest.Context(t) + var v s3tables.GetTableBucketOutput + resourceName := "aws_s3tables_table_bucket.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.S3TablesServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableBucketConfig_forceDestroy(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTableBucketExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrForceDestroy, acctest.CtTrue), + testAccCheckTableBucketAddTables(ctx, resourceName, "namespace1", "table1"), + testAccCheckTableBucketAddTables(ctx, resourceName, "namespace2", "table2", "table3"), + testAccCheckTableBucketAddTables(ctx, resourceName, "namespace3", "table4", "table5", "table6"), + ), + }, + }, + }) +} + +func testAccCheckTableBucketAddTables(ctx context.Context, n string, namespace string, tableNames ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) + + // First, create the namespace if it doesn't exist + _, err := conn.CreateNamespace(ctx, &s3tables.CreateNamespaceInput{ + TableBucketARN: aws.String(rs.Primary.Attributes[names.AttrARN]), + Namespace: []string{namespace}, + }) + if err != nil { + // Ignore if namespace already exists + if !errs.IsA[*awstypes.ConflictException](err) { + return fmt.Errorf("CreateNamespace error: %w", err) + } + } + + // Create each table + for _, tableName := range tableNames { + _, err := conn.CreateTable(ctx, &s3tables.CreateTableInput{ + TableBucketARN: aws.String(rs.Primary.Attributes[names.AttrARN]), + Namespace: aws.String(namespace), + Name: aws.String(tableName), + Format: awstypes.OpenTableFormatIceberg, + }) + if err != nil { + // Ignore if table already exists + if !errs.IsA[*awstypes.ConflictException](err) { + return fmt.Errorf("CreateTable error for table %s: %w", tableName, err) + } + } + } + + return nil + } +} + func testAccCheckTableBucketDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) @@ -278,40 +375,39 @@ func testAccCheckTableBucketDestroy(ctx context.Context) resource.TestCheckFunc continue } - _, err := tfs3tables.FindTableBucket(ctx, conn, rs.Primary.Attributes[names.AttrARN]) + _, err := tfs3tables.FindTableBucketByARN(ctx, conn, rs.Primary.Attributes[names.AttrARN]) + if tfresource.NotFound(err) { - return nil + continue } + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucket, rs.Primary.ID, err) + return err } - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTableBucket, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("S3 Tables Table Bucket %s still exists", rs.Primary.Attributes[names.AttrARN]) } return nil } } -func testAccCheckTableBucketExists(ctx context.Context, name string, tablebucket *s3tables.GetTableBucketOutput) resource.TestCheckFunc { +func testAccCheckTableBucketExists(ctx context.Context, n string, v *s3tables.GetTableBucketOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucket, name, errors.New("not found")) - } - - if rs.Primary.Attributes[names.AttrARN] == "" { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucket, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - resp, err := tfs3tables.FindTableBucket(ctx, conn, rs.Primary.Attributes[names.AttrARN]) + output, err := tfs3tables.FindTableBucketByARN(ctx, conn, rs.Primary.Attributes[names.AttrARN]) + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTableBucket, rs.Primary.ID, err) + return err } - *tablebucket = *resp + *v = *output return nil } @@ -376,3 +472,12 @@ resource "aws_s3tables_table_bucket" "test" { } `, rName, status, nonCurrentDays, unreferencedDays) } + +func testAccTableBucketConfig_forceDestroy(rName string) string { + return fmt.Sprintf(` +resource "aws_s3tables_table_bucket" "test" { + name = %[1]q + force_destroy = true +} +`, rName) +} diff --git a/internal/service/s3tables/table_policy.go b/internal/service/s3tables/table_policy.go index 49831fe0ef94..cc942c8e8d60 100644 --- a/internal/service/s3tables/table_policy.go +++ b/internal/service/s3tables/table_policy.go @@ -16,10 +16,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -30,27 +30,21 @@ func newTablePolicyResource(_ context.Context) (resource.ResourceWithConfigure, return &tablePolicyResource{}, nil } -const ( - ResNameTablePolicy = "Table Policy" -) - type tablePolicyResource struct { framework.ResourceWithModel[tablePolicyResourceModel] } -func (r *tablePolicyResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *tablePolicyResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrName: schema.StringAttribute{ - Required: true, - Validators: tableNameValidator, + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, }, names.AttrNamespace: schema.StringAttribute{ - Required: true, - Validators: namespaceNameValidator, + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, @@ -70,185 +64,163 @@ func (r *tablePolicyResource) Schema(ctx context.Context, req resource.SchemaReq } } -func (r *tablePolicyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan tablePolicyResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tablePolicyResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data tablePolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.Name) var input s3tables.PutTablePolicyInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { return } _, err := conn.PutTablePolicy(ctx, &input) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTablePolicy, plan.Name.String(), err), - err.Error(), - ) - return - } - out, err := findTablePolicy(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.Name.String(), err), - err.Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Tables Table Policy (%s)", name), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) - if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *tablePolicyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tablePolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tablePolicyResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data tablePolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := findTablePolicy(ctx, conn, state.TableBucketARN.ValueString(), state.Namespace.ValueString(), state.Name.ValueString()) + conn := r.Meta().S3TablesClient(ctx) + + name, namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) + output, err := findTablePolicyByThreePartKey(ctx, conn, tableBucketARN, namespace, name) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionReading, ResNameTablePolicy, state.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading S3 Tables Table Policy (%s)", name), err.Error()) + return } - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *tablePolicyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var plan tablePolicyResourceModel - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *tablePolicyResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new tablePolicyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, new.Name) var input s3tables.PutTablePolicyInput - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { return } _, err := conn.PutTablePolicy(ctx, &input) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionUpdating, ResNameTablePolicy, plan.Name.String(), err), - err.Error(), - ) - return - } - out, err := findTablePolicy(ctx, conn, plan.TableBucketARN.ValueString(), plan.Namespace.ValueString(), plan.Name.ValueString()) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionCreating, ResNameTableBucketPolicy, plan.Name.String(), err), - err.Error(), - ) - return - } + response.Diagnostics.AddError(fmt.Sprintf("creating S3 Tables Table Policy (%s)", name), err.Error()) - resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) - if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *tablePolicyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Meta().S3TablesClient(ctx) - - var state tablePolicyResourceModel - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *tablePolicyResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data tablePolicyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } + conn := r.Meta().S3TablesClient(ctx) + + name, namespace, tableBucketARN := fwflex.StringValueFromFramework(ctx, data.Name), fwflex.StringValueFromFramework(ctx, data.Namespace), fwflex.StringValueFromFramework(ctx, data.TableBucketARN) input := s3tables.DeleteTablePolicyInput{ - Name: state.Name.ValueStringPointer(), - Namespace: state.Namespace.ValueStringPointer(), - TableBucketARN: state.TableBucketARN.ValueStringPointer(), + Name: aws.String(name), + Namespace: aws.String(namespace), + TableBucketARN: aws.String(tableBucketARN), } - _, err := conn.DeleteTablePolicy(ctx, &input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return + } + if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return - } + response.Diagnostics.AddError(fmt.Sprintf("deleting S3 Tables Table Policy (%s)", name), err.Error()) - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.S3Tables, create.ErrActionDeleting, ResNameTablePolicy, state.Name.String(), err), - err.Error(), - ) return } } -func (r *tablePolicyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - identifier, err := parseTableIdentifier(req.ID) +func (r *tablePolicyResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + identifier, err := parseTableIdentifier(request.ID) if err != nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( "Invalid Import ID", "Import IDs for S3 Tables Table Policies must use the format
"+tableIDSeparator+""+tableIDSeparator+"
.\n"+ - fmt.Sprintf("Had %q", req.ID), + fmt.Sprintf("Had %q", request.ID), ) return } - identifier.PopulateState(ctx, &resp.State, &resp.Diagnostics) + identifier.PopulateState(ctx, &response.State, &response.Diagnostics) } -func findTablePolicy(ctx context.Context, conn *s3tables.Client, bucketARN, namespace, name string) (*s3tables.GetTablePolicyOutput, error) { - in := s3tables.GetTablePolicyInput{ +func findTablePolicyByThreePartKey(ctx context.Context, conn *s3tables.Client, tableBucketARN, namespace, name string) (*s3tables.GetTablePolicyOutput, error) { + input := s3tables.GetTablePolicyInput{ Name: aws.String(name), Namespace: aws.String(namespace), - TableBucketARN: aws.String(bucketARN), + TableBucketARN: aws.String(tableBucketARN), } - out, err := conn.GetTablePolicy(ctx, &in) - if err != nil { - if errs.IsA[*awstypes.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + return findTablePolicy(ctx, conn, &input) +} + +func findTablePolicy(ctx context.Context, conn *s3tables.Client, input *s3tables.GetTablePolicyInput) (*s3tables.GetTablePolicyOutput, error) { + output, err := conn.GetTablePolicy(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, } + } + if err != nil { return nil, err } - if out == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil || aws.ToString(output.ResourcePolicy) == "" { + return nil, tfresource.NewEmptyResultError(input) } - return out, nil + return output, nil } type tablePolicyResourceModel struct { diff --git a/internal/service/s3tables/table_policy_test.go b/internal/service/s3tables/table_policy_test.go index 8af7c3d86da2..7e2c8827297f 100644 --- a/internal/service/s3tables/table_policy_test.go +++ b/internal/service/s3tables/table_policy_test.go @@ -5,7 +5,6 @@ package s3tables_test import ( "context" - "errors" "fmt" "strings" "testing" @@ -16,7 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -101,48 +99,47 @@ func testAccCheckTablePolicyDestroy(ctx context.Context) resource.TestCheckFunc continue } - _, err := tfs3tables.FindTablePolicy(ctx, conn, + _, err := tfs3tables.FindTablePolicyByThreePartKey(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace], rs.Primary.Attributes[names.AttrName], ) + if tfresource.NotFound(err) { - return nil + continue } + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTablePolicy, rs.Primary.ID, err) + return err } - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTablePolicy, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("S3 Tables Table Policy %s still exists", rs.Primary.Attributes[names.AttrName]) } return nil } } -func testAccCheckTablePolicyExists(ctx context.Context, name string, tablepolicy *s3tables.GetTablePolicyOutput) resource.TestCheckFunc { +func testAccCheckTablePolicyExists(ctx context.Context, n string, v *s3tables.GetTablePolicyOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTablePolicy, name, errors.New("not found")) - } - - if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrNamespace] == "" || rs.Primary.Attributes[names.AttrName] == "" { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTablePolicy, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - resp, err := tfs3tables.FindTablePolicy(ctx, conn, + output, err := tfs3tables.FindTablePolicyByThreePartKey(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace], rs.Primary.Attributes[names.AttrName], ) + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTablePolicy, rs.Primary.ID, err) + return err } - *tablepolicy = *resp + *v = *output return nil } diff --git a/internal/service/s3tables/table_test.go b/internal/service/s3tables/table_test.go index aaa6c18833cd..77539b6fbde3 100644 --- a/internal/service/s3tables/table_test.go +++ b/internal/service/s3tables/table_test.go @@ -5,7 +5,6 @@ package s3tables_test import ( "context" - "errors" "fmt" "strings" "testing" @@ -25,7 +24,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfs3tables "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -126,9 +124,14 @@ func TestAccS3TablesTable_disappears(t *testing.T) { Config: testAccTableConfig_basic(rName, namespace, bucketName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTableExists(ctx, resourceName, &table), - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.NewResourceTable, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfs3tables.ResourceTable, resourceName), ), ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, }, }, }) @@ -592,48 +595,47 @@ func testAccCheckTableDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := tfs3tables.FindTable(ctx, conn, + _, err := tfs3tables.FindTableByThreePartKey(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace], rs.Primary.Attributes[names.AttrName], ) + if tfresource.NotFound(err) { - return nil + continue } + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTable, rs.Primary.ID, err) + return err } - return create.Error(names.S3Tables, create.ErrActionCheckingDestroyed, tfs3tables.ResNameTable, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("S3 Tables Table %s still exists", rs.Primary.Attributes[names.AttrName]) } return nil } } -func testAccCheckTableExists(ctx context.Context, name string, table *s3tables.GetTableOutput) resource.TestCheckFunc { +func testAccCheckTableExists(ctx context.Context, n string, v *s3tables.GetTableOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, name, errors.New("not found")) - } - - if rs.Primary.Attributes["table_bucket_arn"] == "" || rs.Primary.Attributes[names.AttrNamespace] == "" || rs.Primary.Attributes[names.AttrName] == "" { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).S3TablesClient(ctx) - resp, err := tfs3tables.FindTable(ctx, conn, + output, err := tfs3tables.FindTableByThreePartKey(ctx, conn, rs.Primary.Attributes["table_bucket_arn"], rs.Primary.Attributes[names.AttrNamespace], rs.Primary.Attributes[names.AttrName], ) + if err != nil { - return create.Error(names.S3Tables, create.ErrActionCheckingExistence, tfs3tables.ResNameTable, rs.Primary.ID, err) + return err } - *table = *resp + *v = *output return nil } diff --git a/internal/service/s3vectors/generate.go b/internal/service/s3vectors/generate.go new file mode 100644 index 000000000000..b5fabf66b3e9 --- /dev/null +++ b/internal/service/s3vectors/generate.go @@ -0,0 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/servicepackage/main.go +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package s3vectors diff --git a/internal/service/s3vectors/service_endpoint_resolver_gen.go b/internal/service/s3vectors/service_endpoint_resolver_gen.go new file mode 100644 index 000000000000..1db1fb115fb0 --- /dev/null +++ b/internal/service/s3vectors/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3vectors + +import ( + "context" + "fmt" + "net" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3vectors" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ s3vectors.EndpointResolverV2 = resolverV2{} + +type resolverV2 struct { + defaultResolver s3vectors.EndpointResolverV2 +} + +func newEndpointResolverV2() resolverV2 { + return resolverV2{ + defaultResolver: s3vectors.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverV2) ResolveEndpoint(ctx context.Context, params s3vectors.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws.Bool(false) + } else { + err = fmt.Errorf("looking up s3vectors endpoint %q: %w", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*s3vectors.Options) { + return func(o *s3vectors.Options) { + if endpoint != "" { + o.BaseEndpoint = aws.String(endpoint) + } + } +} diff --git a/internal/service/s3vectors/service_endpoints_gen_test.go b/internal/service/s3vectors/service_endpoints_gen_test.go new file mode 100644 index 000000000000..8bf60804b3f3 --- /dev/null +++ b/internal/service/s3vectors/service_endpoints_gen_test.go @@ -0,0 +1,602 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package s3vectors_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/s3vectors" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "s3vectors" + awsEnvVar = "AWS_ENDPOINT_URL_S3VECTORS" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "s3vectors" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + ctx := t.Context() + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(ctx, t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(ctx, t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + t.Run(name, func(t *testing.T) { + testEndpointCase(ctx, t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(ctx context.Context, region string) (url.URL, error) { + r := s3vectors.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, s3vectors.EndpointParameters{ + Region: aws.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(ctx context.Context, region string) (url.URL, error) { + r := s3vectors.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, s3vectors.EndpointParameters{ + Region: aws.String(region), + UseFIPS: aws.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.S3VectorsClient(ctx) + + var result apiCallParams + + input := s3vectors.ListVectorBucketsInput{} + _, err := client.ListVectorBuckets(ctx, &input, + func(opts *s3vectors.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(ctx, t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(ctx context.Context, t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := sdkv2.NewProvider(ctx) + if err != nil { + t.Fatal(err) + } + + p.TerraformVersion = "1.0.0" + + expectedDiags := testcase.expected.diags + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = errors.New("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i any) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + fmt.Fprintf(&buf, "endpoint_url = %s\n", config.baseUrl) + } + + if config.serviceUrl != "" { + fmt.Fprintf(&buf, ` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/s3vectors/service_package_gen.go b/internal/service/s3vectors/service_package_gen.go new file mode 100644 index 000000000000..d5ee9e741b74 --- /dev/null +++ b/internal/service/s3vectors/service_package_gen.go @@ -0,0 +1,87 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3vectors + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3vectors" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { + return []*inttypes.ServicePackageFrameworkDataSource{} +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { + return []*inttypes.ServicePackageFrameworkResource{} +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { + return []*inttypes.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePackageSDKResource { + return []*inttypes.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.S3Vectors +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3vectors.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + optFns := []func(*s3vectors.Options){ + s3vectors.WithEndpointResolverV2(newEndpointResolverV2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *s3vectors.Options) { + if region := config[names.AttrRegion].(string); o.Region != region { + tflog.Info(ctx, "overriding provider-configured AWS API region", map[string]any{ + "service": p.ServicePackageName(), + "original_region": o.Region, + "override_region": region, + }) + o.Region = region + } + }, + func(o *s3vectors.Options) { + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) + } + }, + withExtraOptions(ctx, p, config), + } + + return s3vectors.NewFromConfig(cfg, optFns...), nil +} + +// withExtraOptions returns a functional option that allows this service package to specify extra API client options. +// This option is always called after any generated options. +func withExtraOptions(ctx context.Context, sp conns.ServicePackage, config map[string]any) func(*s3vectors.Options) { + if v, ok := sp.(interface { + withExtraOptions(context.Context, map[string]any) []func(*s3vectors.Options) + }); ok { + optFns := v.withExtraOptions(ctx, config) + + return func(o *s3vectors.Options) { + for _, optFn := range optFns { + optFn(o) + } + } + } + + return func(*s3vectors.Options) {} +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/service/sagemaker/device_fleet.go b/internal/service/sagemaker/device_fleet.go index 5f807aacb7a1..2192e12a11c8 100644 --- a/internal/service/sagemaker/device_fleet.go +++ b/internal/service/sagemaker/device_fleet.go @@ -114,7 +114,7 @@ func resourceDeviceFleetCreate(ctx context.Context, d *schema.ResourceData, meta input.Description = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return conn.CreateDeviceFleet(ctx, input) }, ErrCodeValidationException) if err != nil { diff --git a/internal/service/sagemaker/endpoint_configuration.go b/internal/service/sagemaker/endpoint_configuration.go index 544ed48baefd..c64f2ad67fc0 100644 --- a/internal/service/sagemaker/endpoint_configuration.go +++ b/internal/service/sagemaker/endpoint_configuration.go @@ -1107,6 +1107,9 @@ func expandEndpointConfigNotificationConfig(configured []any) *awstypes.AsyncInf if len(configured) == 0 { return nil } + if configured[0] == nil { + return &awstypes.AsyncInferenceNotificationConfig{} + } m := configured[0].(map[string]any) diff --git a/internal/service/sagemaker/endpoint_configuration_test.go b/internal/service/sagemaker/endpoint_configuration_test.go index 50b6d1dbb26a..753f82681527 100644 --- a/internal/service/sagemaker/endpoint_configuration_test.go +++ b/internal/service/sagemaker/endpoint_configuration_test.go @@ -769,6 +769,24 @@ func TestAccSageMakerEndpointConfiguration_Async_notif(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccEndpointConfigurationConfig_asyncNotifEmpty(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "async_inference_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "async_inference_config.0.client_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "async_inference_config.0.output_config.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "async_inference_config.0.output_config.0.s3_output_path"), + resource.TestCheckResourceAttr(resourceName, "async_inference_config.0.output_config.0.notification_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "async_inference_config.0.output_config.0.notification_config.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1523,6 +1541,46 @@ resource "aws_sagemaker_endpoint_configuration" "test" { `, rName)) } +func testAccEndpointConfigurationConfig_asyncNotifEmpty(rName string) string { + return acctest.ConfigCompose(testAccEndpointConfigurationConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_sns_topic" "test" { + name = %[1]q +} + +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 + enable_key_rotation = true +} + +resource "aws_sagemaker_endpoint_configuration" "test" { + name = %[1]q + + production_variants { + variant_name = "variant-1" + model_name = aws_sagemaker_model.test.name + initial_instance_count = 2 + instance_type = "ml.t2.medium" + initial_variant_weight = 1 + } + + async_inference_config { + output_config { + s3_output_path = "s3://${aws_s3_bucket.test.bucket}/" + kms_key_id = aws_kms_key.test.arn + + notification_config {} + } + } +} +`, rName)) +} + func testAccEndpointConfigurationConfig_asyncNotifInferenceIn(rName string) string { return acctest.ConfigCompose(testAccEndpointConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/internal/service/sagemaker/feature_group.go b/internal/service/sagemaker/feature_group.go index ebcbe5a76afb..dbe2e3bf63fe 100644 --- a/internal/service/sagemaker/feature_group.go +++ b/internal/service/sagemaker/feature_group.go @@ -344,23 +344,20 @@ func resourceFeatureGroupCreate(ctx context.Context, d *schema.ResourceData, met } log.Printf("[DEBUG] SageMaker AI Feature Group create config: %#v", *input) - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { + err := tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { _, err := conn.CreateFeatureGroup(ctx, input) if err != nil { if tfawserr.ErrMessageContains(err, "ValidationException", "The execution role ARN is invalid.") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if tfawserr.ErrMessageContains(err, "ValidationException", "Invalid S3Uri provided") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.CreateFeatureGroup(ctx, input) - } if err != nil { return sdkdiag.AppendErrorf(diags, "creating SageMaker AI Feature Group: %s", err) diff --git a/internal/service/sagemaker/flow_definition.go b/internal/service/sagemaker/flow_definition.go index f52133ea08f7..1a359bed24a9 100644 --- a/internal/service/sagemaker/flow_definition.go +++ b/internal/service/sagemaker/flow_definition.go @@ -274,7 +274,7 @@ func resourceFlowDefinitionCreate(ctx context.Context, d *schema.ResourceData, m } log.Printf("[DEBUG] Creating SageMaker AI Flow Definition: %#v", input) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.CreateFlowDefinition(ctx, input) }, ErrCodeValidationException) diff --git a/internal/service/sagemaker/image.go b/internal/service/sagemaker/image.go index 9c1bafbd181f..da1e9e1cd724 100644 --- a/internal/service/sagemaker/image.go +++ b/internal/service/sagemaker/image.go @@ -49,7 +49,7 @@ func resourceImage() *schema.Resource { ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z](-*[0-9A-Za-z])*$`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z]([-.]?[0-9A-Za-z])*$`), "Valid characters are a-z, A-Z, 0-9, - (hyphen), and . (dot)."), ), }, names.AttrRoleARN: { diff --git a/internal/service/sagemaker/model.go b/internal/service/sagemaker/model.go index c1df6adba83b..6ae9810e152f 100644 --- a/internal/service/sagemaker/model.go +++ b/internal/service/sagemaker/model.go @@ -445,7 +445,7 @@ func resourceModelCreate(ctx context.Context, d *schema.ResourceData, meta any) } log.Printf("[DEBUG] SageMaker AI model create config: %#v", *createOpts) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return conn.CreateModel(ctx, createOpts) }, ErrCodeValidationException) @@ -514,7 +514,7 @@ func resourceModelDelete(ctx context.Context, d *schema.ResourceData, meta any) } log.Printf("[INFO] Deleting SageMaker AI model: %s", d.Id()) - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 5*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.DeleteModel(ctx, deleteOpts) if err != nil { @@ -523,17 +523,15 @@ func resourceModelDelete(ctx context.Context, d *schema.ResourceData, meta any) } if errs.IsA[*awstypes.ResourceNotFound](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteModel(ctx, deleteOpts) - } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting sagemaker model: %s", err) } diff --git a/internal/service/sagemaker/notebook_instance.go b/internal/service/sagemaker/notebook_instance.go index 919c89e21e18..0bb3527eb2c9 100644 --- a/internal/service/sagemaker/notebook_instance.go +++ b/internal/service/sagemaker/notebook_instance.go @@ -417,33 +417,26 @@ func startNotebookInstance(ctx context.Context, conn *sagemaker.Client, id strin } // StartNotebookInstance sometimes doesn't take so we'll check for a state change and if // it doesn't change we'll send another request - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 5*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := conn.StartNotebookInstance(ctx, startOpts) if err != nil { - return retry.NonRetryableError(fmt.Errorf("starting: %s", err)) + return tfresource.NonRetryableError(fmt.Errorf("starting: %w", err)) } err = waitNotebookInstanceStarted(ctx, conn, id) if err != nil { - return retry.RetryableError(fmt.Errorf("starting: waiting for completion: %s", err)) + return tfresource.RetryableError(fmt.Errorf("starting: waiting for completion: %w", err)) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.StartNotebookInstance(ctx, startOpts) - if err != nil { - return fmt.Errorf("starting: %s", err) - } - err = waitNotebookInstanceStarted(ctx, conn, id) - if err != nil { - return fmt.Errorf("starting: waiting for completion: %s", err) - } + if err != nil { + return fmt.Errorf("starting: %w", err) } if err := waitNotebookInstanceInService(ctx, conn, id); err != nil { - return fmt.Errorf("starting: waiting to be in service: %s", err) + return fmt.Errorf("starting: waiting to be in service: %w", err) } return nil } @@ -467,11 +460,11 @@ func stopNotebookInstance(ctx context.Context, conn *sagemaker.Client, id string } if _, err := conn.StopNotebookInstance(ctx, stopOpts); err != nil { - return fmt.Errorf("stopping: %s", err) + return fmt.Errorf("stopping: %w", err) } if err := waitNotebookInstanceStopped(ctx, conn, id); err != nil { - return fmt.Errorf("stopping: waiting for completion: %s", err) + return fmt.Errorf("stopping: waiting for completion: %w", err) } return nil diff --git a/internal/service/sagemaker/notebook_instance_test.go b/internal/service/sagemaker/notebook_instance_test.go index 08371e955046..606efd497d47 100644 --- a/internal/service/sagemaker/notebook_instance_test.go +++ b/internal/service/sagemaker/notebook_instance_test.go @@ -461,7 +461,7 @@ func TestAccSageMakerNotebookInstance_DirectInternet_access(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "direct_internet_access", "Disabled"), resource.TestCheckResourceAttrPair(resourceName, names.AttrSubnetID, "aws_subnet.test", names.AttrID), resource.TestCheckResourceAttr(resourceName, "security_groups.#", "1"), - resource.TestMatchResourceAttr(resourceName, names.AttrNetworkInterfaceID, regexache.MustCompile("eni-.*")), + resource.TestMatchResourceAttr(resourceName, names.AttrNetworkInterfaceID, regexache.MustCompile(`^eni-[0-9a-f]+$`)), ), }, { @@ -476,7 +476,7 @@ func TestAccSageMakerNotebookInstance_DirectInternet_access(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "direct_internet_access", "Enabled"), resource.TestCheckResourceAttrPair(resourceName, names.AttrSubnetID, "aws_subnet.test", names.AttrID), resource.TestCheckResourceAttr(resourceName, "security_groups.#", "1"), - resource.TestMatchResourceAttr(resourceName, names.AttrNetworkInterfaceID, regexache.MustCompile("eni-.*")), + resource.TestMatchResourceAttr(resourceName, names.AttrNetworkInterfaceID, regexache.MustCompile(`^eni-[0-9a-f]+$`)), ), }, }, diff --git a/internal/service/sagemaker/project.go b/internal/service/sagemaker/project.go index 8c6e0fae5202..094c1c290cc9 100644 --- a/internal/service/sagemaker/project.go +++ b/internal/service/sagemaker/project.go @@ -121,7 +121,7 @@ func resourceProjectCreate(ctx context.Context, d *schema.ResourceData, meta any input.ProjectDescription = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return conn.CreateProject(ctx, input) }, ErrCodeValidationException) if err != nil { diff --git a/internal/service/sagemaker/service_endpoint_resolver_gen.go b/internal/service/sagemaker/service_endpoint_resolver_gen.go index 59805710c056..bc7779c6046a 100644 --- a/internal/service/sagemaker/service_endpoint_resolver_gen.go +++ b/internal/service/sagemaker/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sagemaker.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sagemaker endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sagemaker endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sagemaker/service_endpoints_gen_test.go b/internal/service/sagemaker/service_endpoints_gen_test.go index ad85533825ce..12646ba4423f 100644 --- a/internal/service/sagemaker/service_endpoints_gen_test.go +++ b/internal/service/sagemaker/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sagemaker/service_package_gen.go b/internal/service/sagemaker/service_package_gen.go index fcf56e2ecdec..8d0c645a48c5 100644 --- a/internal/service/sagemaker/service_package_gen.go +++ b/internal/service/sagemaker/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sagemaker" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -337,7 +336,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sagemaker.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sagemaker/servicecatalog_portfolio_status_identity_gen_test.go b/internal/service/sagemaker/servicecatalog_portfolio_status_identity_gen_test.go index bb732e76abda..e3051b80d806 100644 --- a/internal/service/sagemaker/servicecatalog_portfolio_status_identity_gen_test.go +++ b/internal/service/sagemaker/servicecatalog_portfolio_status_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccSageMakerServicecatalogPortfolioStatus_IdentitySerial(t *testing.T) t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccSageMakerServicecatalogPortfolioStatus_Identity_Basic, - "ExistingResource": testAccSageMakerServicecatalogPortfolioStatus_Identity_ExistingResource, - "RegionOverride": testAccSageMakerServicecatalogPortfolioStatus_Identity_RegionOverride, + acctest.CtBasic: testAccSageMakerServicecatalogPortfolioStatus_Identity_Basic, + "ExistingResource": testAccSageMakerServicecatalogPortfolioStatus_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccSageMakerServicecatalogPortfolioStatus_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccSageMakerServicecatalogPortfolioStatus_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -37,7 +39,7 @@ func testAccSageMakerServicecatalogPortfolioStatus_Identity_Basic(t *testing.T) var v sagemaker.GetSagemakerServicecatalogPortfolioStatusOutput resourceName := "aws_sagemaker_servicecatalog_portfolio_status.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -111,7 +113,7 @@ func testAccSageMakerServicecatalogPortfolioStatus_Identity_RegionOverride(t *te resourceName := "aws_sagemaker_servicecatalog_portfolio_status.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -215,3 +217,120 @@ func testAccSageMakerServicecatalogPortfolioStatus_Identity_RegionOverride(t *te }, }) } + +func testAccSageMakerServicecatalogPortfolioStatus_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v sagemaker.GetSagemakerServicecatalogPortfolioStatusOutput + resourceName := "aws_sagemaker_servicecatalog_portfolio_status.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServicecatalogPortfolioStatus/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServicecatalogPortfolioStatusExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/ServicecatalogPortfolioStatus/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServicecatalogPortfolioStatusExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ServicecatalogPortfolioStatus/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccSageMakerServicecatalogPortfolioStatus_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v sagemaker.GetSagemakerServicecatalogPortfolioStatusOutput + resourceName := "aws_sagemaker_servicecatalog_portfolio_status.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ServicecatalogPortfolioStatus/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServicecatalogPortfolioStatusExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ServicecatalogPortfolioStatus/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServicecatalogPortfolioStatusExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/sagemaker/servicecatalog_portfolio_status_test.go b/internal/service/sagemaker/servicecatalog_portfolio_status_test.go index e5d7ffbe5bee..a3ec8e4d22a2 100644 --- a/internal/service/sagemaker/servicecatalog_portfolio_status_test.go +++ b/internal/service/sagemaker/servicecatalog_portfolio_status_test.go @@ -10,14 +10,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/sagemaker" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/names" @@ -64,75 +58,6 @@ func testAccServicecatalogPortfolioStatus_basic(t *testing.T) { }) } -func testAccSageMakerServicecatalogPortfolioStatus_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_sagemaker_servicecatalog_portfolio_status.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccServicecatalogPortfolioStatusConfigConfig_basic("Enabled"), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccServicecatalogPortfolioStatusConfigConfig_basic("Enabled"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccServicecatalogPortfolioStatusConfigConfig_basic("Enabled"), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccCheckServicecatalogPortfolioStatusExists(ctx context.Context, n string, config *sagemaker.GetSagemakerServicecatalogPortfolioStatusOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/sagemaker/tags_gen.go b/internal/service/sagemaker/tags_gen.go index d2a7dca9652c..dc40450464fa 100644 --- a/internal/service/sagemaker/tags_gen.go +++ b/internal/service/sagemaker/tags_gen.go @@ -3,8 +3,8 @@ package sagemaker import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sagemaker" awstypes "github.com/aws/aws-sdk-go-v2/service/sagemaker/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *sagemaker.Client, identifier string, op page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SageMakerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *sagemaker.Client, identifier string, _, err := conn.DeleteTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *sagemaker.Client, identifier string, _, err := conn.AddTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/sagemaker/testdata/ServicecatalogPortfolioStatus/basic_v5.100.0/main_gen.tf b/internal/service/sagemaker/testdata/ServicecatalogPortfolioStatus/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..b249bc0d1c74 --- /dev/null +++ b/internal/service/sagemaker/testdata/ServicecatalogPortfolioStatus/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sagemaker_servicecatalog_portfolio_status" "test" { + status = "Enabled" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/sagemaker/testdata/ServicecatalogPortfolioStatus/basic_v6.0.0/main_gen.tf b/internal/service/sagemaker/testdata/ServicecatalogPortfolioStatus/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..2a509b90d3b8 --- /dev/null +++ b/internal/service/sagemaker/testdata/ServicecatalogPortfolioStatus/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sagemaker_servicecatalog_portfolio_status" "test" { + status = "Enabled" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/sagemaker/testdata/UserProfile/basic_v6.2.0/main_gen.tf b/internal/service/sagemaker/testdata/UserProfile/basic_v6.2.0/main_gen.tf new file mode 100644 index 000000000000..004c5b1596e3 --- /dev/null +++ b/internal/service/sagemaker/testdata/UserProfile/basic_v6.2.0/main_gen.tf @@ -0,0 +1,87 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = var.rName +} + +# testAccUserProfileConfig_base + +resource "aws_sagemaker_domain" "test" { + domain_name = var.rName + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = aws_subnet.test[*].id + + default_user_settings { + execution_role = aws_iam_role.test.arn + } + + retention_policy { + home_efs_file_system = "Delete" + } +} + +resource "aws_iam_role" "test" { + name = var.rName + path = "/" + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.2.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/sagemaker/user_profile.go b/internal/service/sagemaker/user_profile.go index b04ceb0df7e9..66e94cf659a6 100644 --- a/internal/service/sagemaker/user_profile.go +++ b/internal/service/sagemaker/user_profile.go @@ -38,6 +38,7 @@ import ( // @ImportIDHandler("userProfileImportID") // @Testing(serialize=true) // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sagemaker;sagemaker.DescribeUserProfileOutput") +// @Testing(preIdentityVersion="6.2.0") func resourceUserProfile() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceUserProfileCreate, @@ -77,7 +78,7 @@ func resourceUserProfile() *schema.Resource { ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 63), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z](-*[0-9A-Za-z]){0,62}`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z](-*[0-9A-Za-z]){0,62}$`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), ), }, "user_settings": { diff --git a/internal/service/sagemaker/user_profile_identity_gen_test.go b/internal/service/sagemaker/user_profile_identity_gen_test.go index ab861d647306..39177e6c9eba 100644 --- a/internal/service/sagemaker/user_profile_identity_gen_test.go +++ b/internal/service/sagemaker/user_profile_identity_gen_test.go @@ -24,9 +24,10 @@ func testAccSageMakerUserProfile_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccSageMakerUserProfile_Identity_Basic, - "ExistingResource": testAccSageMakerUserProfile_Identity_ExistingResource, - "RegionOverride": testAccSageMakerUserProfile_Identity_RegionOverride, + acctest.CtBasic: testAccSageMakerUserProfile_Identity_Basic, + "ExistingResource": testAccSageMakerUserProfile_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccSageMakerUserProfile_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccSageMakerUserProfile_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -39,7 +40,7 @@ func testAccSageMakerUserProfile_Identity_Basic(t *testing.T) { resourceName := "aws_sagemaker_user_profile.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -128,7 +129,7 @@ func testAccSageMakerUserProfile_Identity_RegionOverride(t *testing.T) { resourceName := "aws_sagemaker_user_profile.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -213,3 +214,121 @@ func testAccSageMakerUserProfile_Identity_RegionOverride(t *testing.T) { }, }) } + +// Resource Identity was added after v6.2.0 +func testAccSageMakerUserProfile_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v sagemaker.DescribeUserProfileOutput + resourceName := "aws_sagemaker_user_profile.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + CheckDestroy: testAccCheckUserProfileDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/UserProfile/basic_v6.2.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserProfileExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/UserProfile/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "domain_id": knownvalue.NotNull(), + "user_profile_name": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("domain_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("user_profile_name")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.2.0 +func testAccSageMakerUserProfile_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v sagemaker.DescribeUserProfileOutput + resourceName := "aws_sagemaker_user_profile.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + CheckDestroy: testAccCheckUserProfileDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/UserProfile/basic_v6.2.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserProfileExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/UserProfile/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sagemaker/user_profile_test.go b/internal/service/sagemaker/user_profile_test.go index 886aa4651507..f814edcb9d43 100644 --- a/internal/service/sagemaker/user_profile_test.go +++ b/internal/service/sagemaker/user_profile_test.go @@ -11,18 +11,10 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/sagemaker" - "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfsagemaker "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -461,66 +453,6 @@ func testAccUserProfile_disappears(t *testing.T) { }) } -func testAccSageMakerUserProfile_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v sagemaker.DescribeUserProfileOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_sagemaker_user_profile.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ACMPCAServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.2.0", - }, - }, - Config: testAccUserProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckUserProfileExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccUserProfileConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckUserProfileExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectAttributeFormat(resourceName, tfjsonpath.New(names.AttrID), "{domain_id}/{user_profile_name}"), - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - "domain_id": knownvalue.NotNull(), - "user_profile_name": knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("domain_id")), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("user_profile_name")), - }, - }, - }, - }) -} - func testAccCheckUserProfileDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SageMakerClient(ctx) diff --git a/internal/service/sagemaker/workteam.go b/internal/service/sagemaker/workteam.go index efb05ffe2a00..0ba7029f9782 100644 --- a/internal/service/sagemaker/workteam.go +++ b/internal/service/sagemaker/workteam.go @@ -206,7 +206,7 @@ func resourceWorkteamCreate(ctx context.Context, d *schema.ResourceData, meta an input.WorkforceName = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return conn.CreateWorkteam(ctx, input) }, ErrCodeValidationException) diff --git a/internal/service/scheduler/retry.go b/internal/service/scheduler/retry.go index 4694b4da7e1f..2f1493ab0d64 100644 --- a/internal/service/scheduler/retry.go +++ b/internal/service/scheduler/retry.go @@ -20,7 +20,7 @@ func retryWhenIAMNotPropagated[T any](ctx context.Context, f func() (T, error)) v, err := tfresource.RetryWhen( ctx, iamPropagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return f() }, func(err error) (bool, error) { diff --git a/internal/service/scheduler/schedule.go b/internal/service/scheduler/schedule.go index f0653dbf2b7d..5375e02ac1c5 100644 --- a/internal/service/scheduler/schedule.go +++ b/internal/service/scheduler/schedule.go @@ -45,6 +45,12 @@ func resourceSchedule() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "action_after_completion": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.ActionAfterCompletion](), + }, names.AttrARN: { Type: schema.TypeString, Computed: true, @@ -442,6 +448,10 @@ func resourceScheduleCreate(ctx context.Context, d *schema.ResourceData, meta an ScheduleExpression: aws.String(d.Get(names.AttrScheduleExpression).(string)), } + if v, ok := d.Get("action_after_completion").(string); ok && v != "" { + in.ActionAfterCompletion = types.ActionAfterCompletion(v) + } + if v, ok := d.Get(names.AttrDescription).(string); ok && v != "" { in.Description = aws.String(v) } @@ -532,6 +542,7 @@ func resourceScheduleRead(ctx context.Context, d *schema.ResourceData, meta any) return create.AppendDiagError(diags, names.Scheduler, create.ErrActionReading, ResNameSchedule, d.Id(), err) } + d.Set("action_after_completion", out.ActionAfterCompletion) d.Set(names.AttrARN, out.Arn) d.Set(names.AttrDescription, out.Description) @@ -579,6 +590,10 @@ func resourceScheduleUpdate(ctx context.Context, d *schema.ResourceData, meta an Target: expandTarget(ctx, d.Get(names.AttrTarget).([]any)[0].(map[string]any)), } + if v, ok := d.Get("action_after_completion").(string); ok && v != "" { + in.ActionAfterCompletion = types.ActionAfterCompletion(v) + } + if v, ok := d.Get(names.AttrDescription).(string); ok && v != "" { in.Description = aws.String(v) } diff --git a/internal/service/scheduler/schedule_test.go b/internal/service/scheduler/schedule_test.go index 155c2e89f1a0..76359a5a8352 100644 --- a/internal/service/scheduler/schedule_test.go +++ b/internal/service/scheduler/schedule_test.go @@ -12,8 +12,10 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/scheduler" + awstypes "github.com/aws/aws-sdk-go-v2/service/scheduler/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -201,6 +203,7 @@ func TestAccSchedulerSchedule_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckScheduleExists(ctx, t, resourceName, &schedule), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "scheduler", regexache.MustCompile(regexp.QuoteMeta(`schedule/default/`+name))), + resource.TestCheckResourceAttr(resourceName, "action_after_completion", string(awstypes.ActionAfterCompletionNone)), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckResourceAttr(resourceName, "end_date", ""), resource.TestCheckResourceAttr(resourceName, "flexible_time_window.0.maximum_window_in_minutes", "0"), @@ -261,12 +264,62 @@ func TestAccSchedulerSchedule_disappears(t *testing.T) { testAccCheckScheduleExists(ctx, t, resourceName, &schedule), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfscheduler.ResourceSchedule(), resourceName), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, ExpectNonEmptyPlan: true, }, }, }) } +func TestAccSchedulerSchedule_actionAfterCompletion(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var schedule scheduler.GetScheduleOutput + name := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_scheduler_schedule.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SchedulerEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SchedulerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckScheduleDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccScheduleConfig_actionAfterCompletion(name, string(awstypes.ActionAfterCompletionNone)), + Check: resource.ComposeTestCheckFunc( + testAccCheckScheduleExists(ctx, t, resourceName, &schedule), + resource.TestCheckResourceAttr(resourceName, "action_after_completion", string(awstypes.ActionAfterCompletionNone)), + ), + }, + { + Config: testAccScheduleConfig_actionAfterCompletion(name, string(awstypes.ActionAfterCompletionDelete)), + Check: resource.ComposeTestCheckFunc( + testAccCheckScheduleExists(ctx, t, resourceName, &schedule), + resource.TestCheckResourceAttr(resourceName, "action_after_completion", string(awstypes.ActionAfterCompletionDelete)), + ), + }, + { + Config: testAccScheduleConfig_actionAfterCompletion(name, string(awstypes.ActionAfterCompletionNone)), + Check: resource.ComposeTestCheckFunc( + testAccCheckScheduleExists(ctx, t, resourceName, &schedule), + resource.TestCheckResourceAttr(resourceName, "action_after_completion", string(awstypes.ActionAfterCompletionNone)), + ), + }, + }, + }) +} + func TestAccSchedulerSchedule_description(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -1695,6 +1748,32 @@ resource "aws_scheduler_schedule" "test" { ) } +func testAccScheduleConfig_actionAfterCompletion(rName, actionAfterCompletion string) string { + return acctest.ConfigCompose( + testAccScheduleConfig_base, + fmt.Sprintf(` +resource "aws_sqs_queue" "test" {} + +resource "aws_scheduler_schedule" "test" { + name = %[1]q + + action_after_completion = %[2]q + + flexible_time_window { + mode = "OFF" + } + + schedule_expression = "rate(1 hour)" + + target { + arn = aws_sqs_queue.test.arn + role_arn = aws_iam_role.test.arn + } +} +`, rName, actionAfterCompletion), + ) +} + func testAccScheduleConfig_description(name, description string) string { return acctest.ConfigCompose( testAccScheduleConfig_base, diff --git a/internal/service/scheduler/service_endpoint_resolver_gen.go b/internal/service/scheduler/service_endpoint_resolver_gen.go index 17dae0750335..b2e5d62a0525 100644 --- a/internal/service/scheduler/service_endpoint_resolver_gen.go +++ b/internal/service/scheduler/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params scheduler.Endpoi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up scheduler endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up scheduler endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/scheduler/service_endpoints_gen_test.go b/internal/service/scheduler/service_endpoints_gen_test.go index f07d545e26bf..d4a4b4dfe8b0 100644 --- a/internal/service/scheduler/service_endpoints_gen_test.go +++ b/internal/service/scheduler/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/scheduler/service_package_gen.go b/internal/service/scheduler/service_package_gen.go index 031c2894a366..c70b61ff1e25 100644 --- a/internal/service/scheduler/service_package_gen.go +++ b/internal/service/scheduler/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/scheduler" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -73,7 +72,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *scheduler.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/scheduler/tags_gen.go b/internal/service/scheduler/tags_gen.go index c0e0b58785df..b839a877a6bf 100644 --- a/internal/service/scheduler/tags_gen.go +++ b/internal/service/scheduler/tags_gen.go @@ -3,8 +3,8 @@ package scheduler import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/scheduler" awstypes "github.com/aws/aws-sdk-go-v2/service/scheduler/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *scheduler.Client, identifier string, op output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SchedulerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *scheduler.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *scheduler.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/schemas/registry_policy_test.go b/internal/service/schemas/registry_policy_test.go index d3a59177aff1..277ed9e63d92 100644 --- a/internal/service/schemas/registry_policy_test.go +++ b/internal/service/schemas/registry_policy_test.go @@ -213,7 +213,7 @@ func testAccCheckRegistryPolicy(ctx context.Context, name string, expectedSid st equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) + return fmt.Errorf("Error testing policy equivalence: %w", err) } if !equivalent { return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", diff --git a/internal/service/schemas/service_endpoint_resolver_gen.go b/internal/service/schemas/service_endpoint_resolver_gen.go index 7168432f8af8..f8a8f3fe733d 100644 --- a/internal/service/schemas/service_endpoint_resolver_gen.go +++ b/internal/service/schemas/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params schemas.Endpoint }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up schemas endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up schemas endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/schemas/service_endpoints_gen_test.go b/internal/service/schemas/service_endpoints_gen_test.go index 81b8c5187c0f..fde853fd8979 100644 --- a/internal/service/schemas/service_endpoints_gen_test.go +++ b/internal/service/schemas/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/schemas/service_package.go b/internal/service/schemas/service_package.go index 2570bcfb675f..1190063fbdb5 100644 --- a/internal/service/schemas/service_package.go +++ b/internal/service/schemas/service_package.go @@ -10,21 +10,32 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/schemas" awstypes "github.com/aws/aws-sdk-go-v2/service/schemas/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*schemas.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*schemas.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*schemas.Options){ func(o *schemas.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.TooManyRequestsException](err, "Too Many Requests") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.TooManyRequestsException](err, "Too Many Requests") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/schemas/service_package_gen.go b/internal/service/schemas/service_package_gen.go index 439f492b0de1..e75cec37061c 100644 --- a/internal/service/schemas/service_package_gen.go +++ b/internal/service/schemas/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/schemas" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -91,7 +90,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *schemas.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/schemas/tags_gen.go b/internal/service/schemas/tags_gen.go index 3f0ad14c48df..09d76747a760 100644 --- a/internal/service/schemas/tags_gen.go +++ b/internal/service/schemas/tags_gen.go @@ -3,8 +3,8 @@ package schemas import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/schemas" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *schemas.Client, identifier string, optF output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SchemasClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *schemas.Client, identifier string, ol _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *schemas.Client, identifier string, ol _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/secretsmanager/consts.go b/internal/service/secretsmanager/consts.go index c17ad53267f6..eb0edea78598 100644 --- a/internal/service/secretsmanager/consts.go +++ b/internal/service/secretsmanager/consts.go @@ -8,5 +8,5 @@ import ( ) const ( - PropagationTimeout = 2 * time.Minute + propagationTimeout = 2 * time.Minute ) diff --git a/internal/service/secretsmanager/generate.go b/internal/service/secretsmanager/generate.go index a90febca32cf..1eed653fc132 100644 --- a/internal/service/secretsmanager/generate.go +++ b/internal/service/secretsmanager/generate.go @@ -4,6 +4,7 @@ //go:generate go run ../../generate/tags/main.go -ListTagsInIDElem=SecretId -ServiceTagsSlice -TagInIDElem=SecretId -UpdateTags //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/tagstests/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package secretsmanager diff --git a/internal/service/secretsmanager/secret.go b/internal/service/secretsmanager/secret.go index a78186c38d5a..ae18e7916300 100644 --- a/internal/service/secretsmanager/secret.go +++ b/internal/service/secretsmanager/secret.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "log" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -35,7 +36,9 @@ import ( // @SDKResource("aws_secretsmanager_secret", name="Secret") // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/secretsmanager;secretsmanager.DescribeSecretOutput") +// @Testing(preIdentityVersion="v6.8.0") // @Testing(importIgnore="force_overwrite_replica_secret;recovery_window_in_days") +// @ArnIdentity func resourceSecret() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSecretCreate, @@ -43,10 +46,6 @@ func resourceSecret() *schema.Resource { UpdateWithoutTimeout: resourceSecretUpdate, DeleteWithoutTimeout: resourceSecretDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -175,8 +174,8 @@ func resourceSecretCreate(ctx context.Context, d *schema.ResourceData, meta any) } // Retry for secret recreation after deletion. - outputRaw, err := tfresource.RetryWhen(ctx, PropagationTimeout, - func() (any, error) { + outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, + func(ctx context.Context) (any, error) { return conn.CreateSecret(ctx, input) }, func(err error) (bool, error) { @@ -196,7 +195,7 @@ func resourceSecretCreate(ctx context.Context, d *schema.ResourceData, meta any) d.SetId(aws.ToString(outputRaw.(*secretsmanager.CreateSecretOutput).ARN)) - _, err = tfresource.RetryWhenNotFound(ctx, PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findSecretByID(ctx, conn, d.Id()) }) @@ -249,19 +248,19 @@ func resourceSecretRead(ctx context.Context, d *schema.ResourceData, meta any) d } var policy *secretsmanager.GetResourcePolicyOutput - err = tfresource.Retry(ctx, PropagationTimeout, func() *retry.RetryError { + err = tfresource.Retry(ctx, propagationTimeout, func(ctx context.Context) *tfresource.RetryError { output, err := findSecretPolicyByID(ctx, conn, d.Id()) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if v := output.ResourcePolicy; v != nil { if valid, err := tfiam.PolicyHasValidAWSPrincipals(aws.ToString(v)); err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } else if !valid { log.Printf("[DEBUG] Retrying because of invalid principals") - return retry.RetryableError(errors.New("contains invalid principals")) + return tfresource.RetryableError(errors.New("contains invalid principals")) } } @@ -271,9 +270,15 @@ func resourceSecretRead(ctx context.Context, d *schema.ResourceData, meta any) d }) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Secrets Manager Secret (%s) policy: %s", d.Id(), err) - } else if v := policy.ResourcePolicy; v != nil { - policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), aws.ToString(v)) + if strings.Contains(err.Error(), "contains invalid principals") { + diags = sdkdiag.AppendWarningf(diags, "reading Secrets Manager Secret (%s) policy: %s", d.Id(), err) + } else { + return sdkdiag.AppendErrorf(diags, "reading Secrets Manager Secret (%s) policy: %s", d.Id(), err) + } + } + + if policy != nil && policy.ResourcePolicy != nil { + policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), aws.ToString(policy.ResourcePolicy)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -383,7 +388,7 @@ func resourceSecretDelete(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "deleting Secrets Manager Secret (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findSecretByID(ctx, conn, d.Id()) }) @@ -442,7 +447,7 @@ func removeSecretReplicas(ctx context.Context, conn *secretsmanager.Client, id s } func putSecretPolicy(ctx context.Context, conn *secretsmanager.Client, input *secretsmanager.PutResourcePolicyInput) (*secretsmanager.PutResourcePolicyOutput, error) { - outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*types.MalformedPolicyDocumentException](ctx, PropagationTimeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[any, *types.MalformedPolicyDocumentException](ctx, propagationTimeout, func(ctx context.Context) (any, error) { return conn.PutResourcePolicy(ctx, input) }, "This resource policy contains an unsupported principal") diff --git a/internal/service/secretsmanager/secret_data_source_tags_gen_test.go b/internal/service/secretsmanager/secret_data_source_tags_gen_test.go index 9d4e3a1fe07b..3cd0b2e0e048 100644 --- a/internal/service/secretsmanager/secret_data_source_tags_gen_test.go +++ b/internal/service/secretsmanager/secret_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccSecretsManagerSecretDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccSecretsManagerSecretDataSource_tags(t *testing.T) { func TestAccSecretsManagerSecretDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccSecretsManagerSecretDataSource_tags_NullMap(t *testing.T) { func TestAccSecretsManagerSecretDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccSecretsManagerSecretDataSource_tags_EmptyMap(t *testing.T) { func TestAccSecretsManagerSecretDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccSecretsManagerSecretDataSource_tags_DefaultTags_nonOverlapping(t *te func TestAccSecretsManagerSecretDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccSecretsManagerSecretDataSource_tags_IgnoreTags_Overlap_DefaultTag(t func TestAccSecretsManagerSecretDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/secretsmanager/secret_identity_gen_test.go b/internal/service/secretsmanager/secret_identity_gen_test.go new file mode 100644 index 000000000000..080c32f217a8 --- /dev/null +++ b/internal/service/secretsmanager/secret_identity_gen_test.go @@ -0,0 +1,360 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package secretsmanager_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSecretsManagerSecret_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.DescribeSecretOutput + resourceName := "aws_secretsmanager_secret.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_overwrite_replica_secret", "recovery_window_in_days", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccSecretsManagerSecret_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_secretsmanager_secret.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_overwrite_replica_secret", "recovery_window_in_days", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_overwrite_replica_secret", "recovery_window_in_days", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSecretsManagerSecret_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.DescribeSecretOutput + resourceName := "aws_secretsmanager_secret.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSecretsManagerSecret_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.DescribeSecretOutput + resourceName := "aws_secretsmanager_secret.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Secret/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/secretsmanager/secret_policy.go b/internal/service/secretsmanager/secret_policy.go index 2b2b23e1d18a..e4071f7f9aad 100644 --- a/internal/service/secretsmanager/secret_policy.go +++ b/internal/service/secretsmanager/secret_policy.go @@ -24,6 +24,9 @@ import ( ) // @SDKResource("aws_secretsmanager_secret_policy", name="Secret Policy") +// @ArnIdentity("secret_arn") +// @Testing(preIdentityVersion="v6.8.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/secretsmanager;secretsmanager.GetResourcePolicyOutput") func resourceSecretPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSecretPolicyCreate, @@ -31,10 +34,6 @@ func resourceSecretPolicy() *schema.Resource { UpdateWithoutTimeout: resourceSecretPolicyUpdate, DeleteWithoutTimeout: resourceSecretPolicyDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ "block_public_policy": { Type: schema.TypeBool, @@ -87,7 +86,7 @@ func resourceSecretPolicyCreate(ctx context.Context, d *schema.ResourceData, met d.SetId(aws.ToString(output.ARN)) - _, err = tfresource.RetryWhenNotFound(ctx, PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findSecretPolicyByID(ctx, conn, d.Id()) }) @@ -169,7 +168,7 @@ func resourceSecretPolicyDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendFromErr(diags, err) } - _, err = tfresource.RetryUntilNotFound(ctx, PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { output, err := findSecretPolicyByID(ctx, conn, d.Id()) if err != nil { diff --git a/internal/service/secretsmanager/secret_policy_identity_gen_test.go b/internal/service/secretsmanager/secret_policy_identity_gen_test.go new file mode 100644 index 000000000000..61ba1054f4f3 --- /dev/null +++ b/internal/service/secretsmanager/secret_policy_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package secretsmanager_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSecretsManagerSecretPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.GetResourcePolicyOutput + resourceName := "aws_secretsmanager_secret_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("secret_arn"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "secret_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_arn")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_arn"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_arn"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSecretsManagerSecretPolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_secretsmanager_secret_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("secret_arn"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "secret_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_arn")), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_arn"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_arn"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_arn"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSecretsManagerSecretPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.GetResourcePolicyOutput + resourceName := "aws_secretsmanager_secret_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "secret_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_arn")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSecretsManagerSecretPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.GetResourcePolicyOutput + resourceName := "aws_secretsmanager_secret_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecretPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/secretsmanager/secret_rotation.go b/internal/service/secretsmanager/secret_rotation.go index 8e8e71cb1675..46c88406455a 100644 --- a/internal/service/secretsmanager/secret_rotation.go +++ b/internal/service/secretsmanager/secret_rotation.go @@ -24,6 +24,10 @@ import ( ) // @SDKResource("aws_secretsmanager_secret_rotation", name="Secret Rotation") +// @ArnIdentity("secret_id") +// @Testing(preIdentityVersion="v6.8.0") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/secretsmanager;secretsmanager.DescribeSecretOutput") +// @Testing(importIgnore="rotate_immediately") func resourceSecretRotation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSecretRotationCreate, @@ -31,10 +35,6 @@ func resourceSecretRotation() *schema.Resource { UpdateWithoutTimeout: resourceSecretRotationUpdate, DeleteWithoutTimeout: resourceSecretRotationDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ { @@ -113,7 +113,7 @@ func resourceSecretRotationCreate(ctx context.Context, d *schema.ResourceData, m } // AccessDeniedException: Secrets Manager cannot invoke the specified Lambda function. - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 1*time.Minute, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 1*time.Minute, func(ctx context.Context) (any, error) { return conn.RotateSecret(ctx, input) }, "AccessDeniedException") @@ -176,7 +176,7 @@ func resourceSecretRotationUpdate(ctx context.Context, d *schema.ResourceData, m } // AccessDeniedException: Secrets Manager cannot invoke the specified Lambda function. - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 1*time.Minute, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 1*time.Minute, func(ctx context.Context) (any, error) { return conn.RotateSecret(ctx, input) }, "AccessDeniedException", "InvalidRequestException") diff --git a/internal/service/secretsmanager/secret_rotation_identity_gen_test.go b/internal/service/secretsmanager/secret_rotation_identity_gen_test.go new file mode 100644 index 000000000000..c87048e9c5b0 --- /dev/null +++ b/internal/service/secretsmanager/secret_rotation_identity_gen_test.go @@ -0,0 +1,360 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package secretsmanager_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSecretsManagerSecretRotation_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.DescribeSecretOutput + resourceName := "aws_secretsmanager_secret_rotation.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretRotationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretRotationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("secret_id"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "secret_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "rotate_immediately", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccSecretsManagerSecretRotation_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_secretsmanager_secret_rotation.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("secret_id"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "secret_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_id")), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "rotate_immediately", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "rotate_immediately", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSecretsManagerSecretRotation_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.DescribeSecretOutput + resourceName := "aws_secretsmanager_secret_rotation.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretRotationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretRotationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + "secret_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_id")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSecretsManagerSecretRotation_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.DescribeSecretOutput + resourceName := "aws_secretsmanager_secret_rotation.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretRotationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretRotationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecretRotation/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/secretsmanager/secret_tags_gen_test.go b/internal/service/secretsmanager/secret_tags_gen_test.go index d94c9eaf35b9..503b16f982d9 100644 --- a/internal/service/secretsmanager/secret_tags_gen_test.go +++ b/internal/service/secretsmanager/secret_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/secretsmanager" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccSecretsManagerSecret_tags(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccSecretsManagerSecret_tags(t *testing.T) { func TestAccSecretsManagerSecret_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccSecretsManagerSecret_tags_null(t *testing.T) { func TestAccSecretsManagerSecret_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccSecretsManagerSecret_tags_EmptyMap(t *testing.T) { func TestAccSecretsManagerSecret_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccSecretsManagerSecret_tags_AddOnUpdate(t *testing.T) { func TestAccSecretsManagerSecret_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccSecretsManagerSecret_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSecretsManagerSecret_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccSecretsManagerSecret_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSecretsManagerSecret_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccSecretsManagerSecret_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSecretsManagerSecret_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSecretsManagerSecret_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSecretsManagerSecret_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSecretsManagerSecret_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_updateToProviderOnly(t *testin func TestAccSecretsManagerSecret_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_updateToResourceOnly(t *testin func TestAccSecretsManagerSecret_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccSecretsManagerSecret_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_emptyProviderOnlyTag(t *testin func TestAccSecretsManagerSecret_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_nullOverlappingResourceTag(t * func TestAccSecretsManagerSecret_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccSecretsManagerSecret_tags_DefaultTags_nullNonOverlappingResourceTag( func TestAccSecretsManagerSecret_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccSecretsManagerSecret_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSecretsManagerSecret_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccSecretsManagerSecret_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSecretsManagerSecret_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccSecretsManagerSecret_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccSecretsManagerSecret_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccSecretsManagerSecret_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccSecretsManagerSecret_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v secretsmanager.DescribeSecretOutput resourceName := "aws_secretsmanager_secret.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), CheckDestroy: testAccCheckSecretDestroy(ctx), diff --git a/internal/service/secretsmanager/secret_version.go b/internal/service/secretsmanager/secret_version.go index 84e5690764f5..8a1867d7d268 100644 --- a/internal/service/secretsmanager/secret_version.go +++ b/internal/service/secretsmanager/secret_version.go @@ -22,7 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - itypes "github.com/hashicorp/terraform-provider-aws/internal/types" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,6 +33,14 @@ const ( ) // @SDKResource("aws_secretsmanager_secret_version", name="Secret Version") +// @IdentityAttribute("secret_id") +// @IdentityAttribute("version_id") +// @Testing(preIdentityVersion="v6.10.0") +// @ImportIDHandler("secretVersionImportID") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/secretsmanager;secretsmanager.GetSecretValueOutput") +// @Testing(importStateIdFunc="testAccSecretVersionImportStateIdFunc") +// @Testing(importIgnore="has_secret_string_wo") +// @Testing(plannableImportAction="NoOp") func resourceSecretVersion() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSecretVersionCreate, @@ -40,13 +48,6 @@ func resourceSecretVersion() *schema.Resource { UpdateWithoutTimeout: resourceSecretVersionUpdate, DeleteWithoutTimeout: resourceSecretVersionDelete, - Importer: &schema.ResourceImporter{ - StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - d.Set("has_secret_string_wo", false) - return []*schema.ResourceData{d}, nil - }, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -116,7 +117,7 @@ func resourceSecretVersionCreate(ctx context.Context, d *schema.ResourceData, me if v, ok := d.GetOk("secret_binary"); ok { var err error - input.SecretBinary, err = itypes.Base64Decode(v.(string)) + input.SecretBinary, err = inttypes.Base64Decode(v.(string)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -149,7 +150,7 @@ func resourceSecretVersionCreate(ctx context.Context, d *schema.ResourceData, me versionID := aws.ToString(output.VersionId) d.SetId(secretVersionCreateResourceID(secretID, versionID)) - _, err = tfresource.RetryWhenNotFound(ctx, PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findSecretVersionByTwoPartKey(ctx, conn, secretID, versionID) }) @@ -182,7 +183,7 @@ func resourceSecretVersionRead(ctx context.Context, d *schema.ResourceData, meta } d.Set(names.AttrARN, output.ARN) - d.Set("secret_binary", itypes.Base64EncodeOnce(output.SecretBinary)) + d.Set("secret_binary", inttypes.Base64EncodeOnce(output.SecretBinary)) d.Set("secret_id", secretID) d.Set("secret_string", output.SecretString) d.Set("version_id", output.VersionId) @@ -333,7 +334,7 @@ func resourceSecretVersionDelete(ctx context.Context, d *schema.ResourceData, me } } - _, err = tfresource.RetryUntilNotFound(ctx, PropagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { output, err := findSecretVersionByTwoPartKey(ctx, conn, secretID, versionID) if err != nil { @@ -354,25 +355,6 @@ func resourceSecretVersionDelete(ctx context.Context, d *schema.ResourceData, me return diags } -const secretVersionIDSeparator = "|" - -func secretVersionCreateResourceID(secretID, versionID string) string { - parts := []string{secretID, versionID} - id := strings.Join(parts, secretVersionIDSeparator) - - return id -} - -func secretVersionParseResourceID(id string) (string, string, error) { - parts := strings.SplitN(id, secretVersionIDSeparator, 2) - - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", fmt.Errorf("unexpected format of ID (%[1]s), expected SecretID%[2]sVersionID", id, secretVersionIDSeparator) - } - - return parts[0], parts[1], nil -} - func findSecretVersion(ctx context.Context, conn *secretsmanager.Client, input *secretsmanager.GetSecretValueInput) (*secretsmanager.GetSecretValueOutput, error) { output, err := conn.GetSecretValue(ctx, input) @@ -404,3 +386,46 @@ func findSecretVersionByTwoPartKey(ctx context.Context, conn *secretsmanager.Cli return findSecretVersion(ctx, conn, input) } + +const secretVersionIDSeparator = "|" + +func secretVersionCreateResourceID(secretID, versionID string) string { + parts := []string{secretID, versionID} + id := strings.Join(parts, secretVersionIDSeparator) + + return id +} + +func secretVersionParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, secretVersionIDSeparator, 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%[1]s), expected SecretID%[2]sVersionID", id, secretVersionIDSeparator) + } + + return parts[0], parts[1], nil +} + +var _ inttypes.SDKv2ImportID = secretVersionImportID{} + +type secretVersionImportID struct{} + +func (secretVersionImportID) Create(d *schema.ResourceData) string { + secretID := d.Get("secret_id").(string) + versionID := d.Get("version_id").(string) + return secretVersionCreateResourceID(secretID, versionID) +} + +func (secretVersionImportID) Parse(id string) (string, map[string]string, error) { + secretID, versionID, err := secretVersionParseResourceID(id) + if err != nil { + return id, nil, err + } + + results := map[string]string{ + "secret_id": secretID, + "version_id": versionID, + } + + return id, results, nil +} diff --git a/internal/service/secretsmanager/secret_version_identity_gen_test.go b/internal/service/secretsmanager/secret_version_identity_gen_test.go new file mode 100644 index 000000000000..1c347e67a4da --- /dev/null +++ b/internal/service/secretsmanager/secret_version_identity_gen_test.go @@ -0,0 +1,327 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package secretsmanager_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSecretsManagerSecretVersion_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.GetSecretValueOutput + resourceName := "aws_secretsmanager_secret_version.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretVersionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretVersionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "secret_id": knownvalue.NotNull(), + "version_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("version_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: testAccSecretVersionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "has_secret_string_wo", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: testAccSecretVersionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("version_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("version_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSecretsManagerSecretVersion_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_secretsmanager_secret_version.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "secret_id": knownvalue.NotNull(), + "version_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("version_id")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccSecretVersionImportStateIdFunc), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "has_secret_string_wo", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccSecretVersionImportStateIdFunc), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("version_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("secret_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("version_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSecretsManagerSecretVersion_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.GetSecretValueOutput + resourceName := "aws_secretsmanager_secret_version.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretVersionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretVersionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "secret_id": knownvalue.NotNull(), + "version_id": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("secret_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("version_id")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSecretsManagerSecretVersion_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v secretsmanager.GetSecretValueOutput + resourceName := "aws_secretsmanager_secret_version.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecretsManagerServiceID), + CheckDestroy: testAccCheckSecretVersionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSecretVersionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SecretVersion/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/secretsmanager/secret_version_test.go b/internal/service/secretsmanager/secret_version_test.go index 8754c62f310a..3c01c5bd5db2 100644 --- a/internal/service/secretsmanager/secret_version_test.go +++ b/internal/service/secretsmanager/secret_version_test.go @@ -363,6 +363,17 @@ func testAccCheckSecretVersionExists(ctx context.Context, n string, v *secretsma } } +func testAccSecretVersionImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s|%s", rs.Primary.Attributes["secret_id"], rs.Primary.Attributes["version_id"]), nil + } +} + func testAccCheckSecretVersionWriteOnlyValueEqual(t *testing.T, param *secretsmanager.GetSecretValueOutput, writeOnlyValue string) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.ToString(param.SecretString) != writeOnlyValue { diff --git a/internal/service/secretsmanager/service_endpoint_resolver_gen.go b/internal/service/secretsmanager/service_endpoint_resolver_gen.go index 41ea6fde8b01..2ef0c39780ca 100644 --- a/internal/service/secretsmanager/service_endpoint_resolver_gen.go +++ b/internal/service/secretsmanager/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params secretsmanager.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up secretsmanager endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up secretsmanager endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/secretsmanager/service_endpoints_gen_test.go b/internal/service/secretsmanager/service_endpoints_gen_test.go index 223699aef5d5..46e5ebee758e 100644 --- a/internal/service/secretsmanager/service_endpoints_gen_test.go +++ b/internal/service/secretsmanager/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/secretsmanager/service_package_gen.go b/internal/service/secretsmanager/service_package_gen.go index 683f9b9c3d2f..14d9de4e91b6 100644 --- a/internal/service/secretsmanager/service_package_gen.go +++ b/internal/service/secretsmanager/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -98,24 +97,50 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceSecretPolicy, TypeName: "aws_secretsmanager_secret_policy", Name: "Secret Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentityNamed("secret_arn", + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceSecretRotation, TypeName: "aws_secretsmanager_secret_rotation", Name: "Secret Rotation", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentityNamed("secret_id", + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceSecretVersion, TypeName: "aws_secretsmanager_secret_version", Name: "Secret Version", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("secret_id", true), + inttypes.StringIdentityAttribute("version_id", true), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: secretVersionImportID{}, + }, }, } } @@ -143,7 +168,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *secretsmanager.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/secretsmanager/sweep.go b/internal/service/secretsmanager/sweep.go index 6024431cb4c9..e2678bb90d82 100644 --- a/internal/service/secretsmanager/sweep.go +++ b/internal/service/secretsmanager/sweep.go @@ -31,7 +31,7 @@ func sweepSecretPolicies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SecretsManagerClient(ctx) input := &secretsmanager.ListSecretsInput{} @@ -79,7 +79,7 @@ func sweepSecrets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SecretsManagerClient(ctx) input := &secretsmanager.ListSecretsInput{} diff --git a/internal/service/secretsmanager/tags_gen.go b/internal/service/secretsmanager/tags_gen.go index 32741dfd7665..31a16d326024 100644 --- a/internal/service/secretsmanager/tags_gen.go +++ b/internal/service/secretsmanager/tags_gen.go @@ -3,8 +3,8 @@ package secretsmanager import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" awstypes "github.com/aws/aws-sdk-go-v2/service/secretsmanager/types" @@ -84,7 +84,7 @@ func updateTags(ctx context.Context, conn *secretsmanager.Client, identifier str _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *secretsmanager.Client, identifier str _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/secretsmanager/testdata/Secret/basic/main_gen.tf b/internal/service/secretsmanager/testdata/Secret/basic/main_gen.tf new file mode 100644 index 000000000000..054c39ce7a93 --- /dev/null +++ b/internal/service/secretsmanager/testdata/Secret/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/Secret/basic_v6.8.0/main_gen.tf b/internal/service/secretsmanager/testdata/Secret/basic_v6.8.0/main_gen.tf new file mode 100644 index 000000000000..4c3fd98e435e --- /dev/null +++ b/internal/service/secretsmanager/testdata/Secret/basic_v6.8.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.8.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/secretsmanager/testdata/Secret/region_override/main_gen.tf b/internal/service/secretsmanager/testdata/Secret/region_override/main_gen.tf new file mode 100644 index 000000000000..cd074c4983aa --- /dev/null +++ b/internal/service/secretsmanager/testdata/Secret/region_override/main_gen.tf @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/SecretPolicy/basic/main_gen.tf b/internal/service/secretsmanager/testdata/SecretPolicy/basic/main_gen.tf new file mode 100644 index 000000000000..a2198c371dcf --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretPolicy/basic/main_gen.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_policy" "test" { + secret_arn = aws_secretsmanager_secret.test.arn + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Sid = "EnableAllPermissions" + Effect = "Allow" + Principal = { + AWS = "*" + } + Action = "secretsmanager:GetSecretValue" + Resource = "*" + }] + }) +} + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/SecretPolicy/basic_v6.8.0/main_gen.tf b/internal/service/secretsmanager/testdata/SecretPolicy/basic_v6.8.0/main_gen.tf new file mode 100644 index 000000000000..ce90b4b87248 --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretPolicy/basic_v6.8.0/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_policy" "test" { + secret_arn = aws_secretsmanager_secret.test.arn + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Sid = "EnableAllPermissions" + Effect = "Allow" + Principal = { + AWS = "*" + } + Action = "secretsmanager:GetSecretValue" + Resource = "*" + }] + }) +} + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.8.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/secretsmanager/testdata/SecretPolicy/region_override/main_gen.tf b/internal/service/secretsmanager/testdata/SecretPolicy/region_override/main_gen.tf new file mode 100644 index 000000000000..64e35d2db90a --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretPolicy/region_override/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_policy" "test" { + region = var.region + + secret_arn = aws_secretsmanager_secret.test.arn + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Sid = "EnableAllPermissions" + Effect = "Allow" + Principal = { + AWS = "*" + } + Action = "secretsmanager:GetSecretValue" + Resource = "*" + }] + }) +} + +resource "aws_secretsmanager_secret" "test" { + region = var.region + + name = var.rName +} + + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/SecretRotation/basic/main_gen.tf b/internal/service/secretsmanager/testdata/SecretRotation/basic/main_gen.tf new file mode 100644 index 000000000000..0e19bc1763c5 --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretRotation/basic/main_gen.tf @@ -0,0 +1,85 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_rotation" "test" { + secret_id = aws_secretsmanager_secret.test.id + rotation_lambda_arn = aws_lambda_function.test.arn + + rotation_rules { + automatically_after_days = 7 + } + + depends_on = [aws_lambda_permission.test] +} + +data "aws_partition" "current" { +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy" "test" { + name = var.rName + role = aws_iam_role.test.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecretVersionStage", + ] + Effect = "Allow" + Resource = aws_secretsmanager_secret.test.arn + }] + }) +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.test.name +} + +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = var.rName + handler = "exports.example" + role = aws_iam_role.test.arn + runtime = "nodejs20.x" +} + +resource "aws_lambda_permission" "test" { + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.test.function_name + principal = "secretsmanager.amazonaws.com" + statement_id = "AllowExecutionFromSecretsManager1" +} + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/SecretRotation/basic_v6.8.0/main_gen.tf b/internal/service/secretsmanager/testdata/SecretRotation/basic_v6.8.0/main_gen.tf new file mode 100644 index 000000000000..06c70e9db689 --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretRotation/basic_v6.8.0/main_gen.tf @@ -0,0 +1,95 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_rotation" "test" { + secret_id = aws_secretsmanager_secret.test.id + rotation_lambda_arn = aws_lambda_function.test.arn + + rotation_rules { + automatically_after_days = 7 + } + + depends_on = [aws_lambda_permission.test] +} + +data "aws_partition" "current" { +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy" "test" { + name = var.rName + role = aws_iam_role.test.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecretVersionStage", + ] + Effect = "Allow" + Resource = aws_secretsmanager_secret.test.arn + }] + }) +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.test.name +} + +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = var.rName + handler = "exports.example" + role = aws_iam_role.test.arn + runtime = "nodejs20.x" +} + +resource "aws_lambda_permission" "test" { + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.test.function_name + principal = "secretsmanager.amazonaws.com" + statement_id = "AllowExecutionFromSecretsManager1" +} + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.8.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/secretsmanager/testdata/SecretRotation/region_override/main_gen.tf b/internal/service/secretsmanager/testdata/SecretRotation/region_override/main_gen.tf new file mode 100644 index 000000000000..3e9df6d639bb --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretRotation/region_override/main_gen.tf @@ -0,0 +1,101 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_rotation" "test" { + region = var.region + + secret_id = aws_secretsmanager_secret.test.id + rotation_lambda_arn = aws_lambda_function.test.arn + + rotation_rules { + automatically_after_days = 7 + } + + depends_on = [aws_lambda_permission.test] +} + +data "aws_partition" "current" { +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy" "test" { + name = var.rName + role = aws_iam_role.test.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecretVersionStage", + ] + Effect = "Allow" + Resource = aws_secretsmanager_secret.test.arn + }] + }) +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.test.name +} + +resource "aws_lambda_function" "test" { + region = var.region + + filename = "test-fixtures/lambdatest.zip" + function_name = var.rName + handler = "exports.example" + role = aws_iam_role.test.arn + runtime = "nodejs20.x" +} + +resource "aws_lambda_permission" "test" { + region = var.region + + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.test.function_name + principal = "secretsmanager.amazonaws.com" + statement_id = "AllowExecutionFromSecretsManager1" +} + +resource "aws_secretsmanager_secret" "test" { + region = var.region + + name = var.rName +} + +resource "aws_secretsmanager_secret_version" "test" { + region = var.region + + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/SecretVersion/basic/main_gen.tf b/internal/service/secretsmanager/testdata/SecretVersion/basic/main_gen.tf new file mode 100644 index 000000000000..b1a06680d7d0 --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretVersion/basic/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/SecretVersion/basic_v6.10.0/main_gen.tf b/internal/service/secretsmanager/testdata/SecretVersion/basic_v6.10.0/main_gen.tf new file mode 100644 index 000000000000..e0fd8c952298 --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretVersion/basic_v6.10.0/main_gen.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +resource "aws_secretsmanager_secret" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.10.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/secretsmanager/testdata/SecretVersion/region_override/main_gen.tf b/internal/service/secretsmanager/testdata/SecretVersion/region_override/main_gen.tf new file mode 100644 index 000000000000..edba78da0591 --- /dev/null +++ b/internal/service/secretsmanager/testdata/SecretVersion/region_override/main_gen.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_secretsmanager_secret_version" "test" { + region = var.region + + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +resource "aws_secretsmanager_secret" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/secretsmanager/testdata/tmpl/secret_policy_tags.gtpl b/internal/service/secretsmanager/testdata/tmpl/secret_policy_tags.gtpl new file mode 100644 index 000000000000..e564687934d8 --- /dev/null +++ b/internal/service/secretsmanager/testdata/tmpl/secret_policy_tags.gtpl @@ -0,0 +1,23 @@ +resource "aws_secretsmanager_secret_policy" "test" { +{{- template "region" }} + secret_arn = aws_secretsmanager_secret.test.arn + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Sid = "EnableAllPermissions" + Effect = "Allow" + Principal = { + AWS = "*" + } + Action = "secretsmanager:GetSecretValue" + Resource = "*" + }] + }) +} + +resource "aws_secretsmanager_secret" "test" { +{{- template "region" }} + name = var.rName +} + diff --git a/internal/service/secretsmanager/testdata/tmpl/secret_rotation_tags.gtpl b/internal/service/secretsmanager/testdata/tmpl/secret_rotation_tags.gtpl new file mode 100644 index 000000000000..371c71adbbd9 --- /dev/null +++ b/internal/service/secretsmanager/testdata/tmpl/secret_rotation_tags.gtpl @@ -0,0 +1,81 @@ +resource "aws_secretsmanager_secret_rotation" "test" { +{{- template "region" }} + secret_id = aws_secretsmanager_secret.test.id + rotation_lambda_arn = aws_lambda_function.test.arn + + rotation_rules { + automatically_after_days = 7 + } + + depends_on = [aws_lambda_permission.test] +} + +data "aws_partition" "current" { +} + +resource "aws_iam_role" "test" { + name = var.rName + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "lambda.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy" "test" { + name = var.rName + role = aws_iam_role.test.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:PutSecretValue", + "secretsmanager:UpdateSecretVersionStage", + ] + Effect = "Allow" + Resource = aws_secretsmanager_secret.test.arn + }] + }) +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.test.name +} + +resource "aws_lambda_function" "test" { +{{- template "region" }} + filename = "test-fixtures/lambdatest.zip" + function_name = var.rName + handler = "exports.example" + role = aws_iam_role.test.arn + runtime = "nodejs20.x" +} + +resource "aws_lambda_permission" "test" { +{{- template "region" }} + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.test.function_name + principal = "secretsmanager.amazonaws.com" + statement_id = "AllowExecutionFromSecretsManager1" +} + +resource "aws_secretsmanager_secret" "test" { +{{- template "region" }} + name = var.rName +} + +resource "aws_secretsmanager_secret_version" "test" { +{{- template "region" }} + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} diff --git a/internal/service/secretsmanager/testdata/tmpl/secret_tags.gtpl b/internal/service/secretsmanager/testdata/tmpl/secret_tags.gtpl index 44f92d6ad62e..3e1e7624d0dd 100644 --- a/internal/service/secretsmanager/testdata/tmpl/secret_tags.gtpl +++ b/internal/service/secretsmanager/testdata/tmpl/secret_tags.gtpl @@ -1,4 +1,5 @@ resource "aws_secretsmanager_secret" "test" { +{{- template "region" }} name = var.rName {{- template "tags" . }} diff --git a/internal/service/secretsmanager/testdata/tmpl/secret_version_basic.gtpl b/internal/service/secretsmanager/testdata/tmpl/secret_version_basic.gtpl new file mode 100644 index 000000000000..1bbc02f019cc --- /dev/null +++ b/internal/service/secretsmanager/testdata/tmpl/secret_version_basic.gtpl @@ -0,0 +1,10 @@ +resource "aws_secretsmanager_secret_version" "test" { +{{- template "region" }} + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + +resource "aws_secretsmanager_secret" "test" { +{{- template "region" }} + name = var.rName +} diff --git a/internal/service/securityhub/account.go b/internal/service/securityhub/account.go index 7fa8ee2fd51c..eef020e2cb43 100644 --- a/internal/service/securityhub/account.go +++ b/internal/service/securityhub/account.go @@ -181,7 +181,7 @@ func resourceAccountDelete(ctx context.Context, d *schema.ResourceData, meta any conn := meta.(*conns.AWSClient).SecurityHubClient(ctx) log.Printf("[DEBUG] Deleting Security Hub Account: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, adminAccountDeletedTimeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, adminAccountDeletedTimeout, func(ctx context.Context) (any, error) { return conn.DisableSecurityHub(ctx, &securityhub.DisableSecurityHubInput{}) }, errCodeInvalidInputException, "Cannot disable Security Hub on the Security Hub administrator") diff --git a/internal/service/securityhub/automation_rule.go b/internal/service/securityhub/automation_rule.go index 314f8d9c56d5..a790e1f208b1 100644 --- a/internal/service/securityhub/automation_rule.go +++ b/internal/service/securityhub/automation_rule.go @@ -37,6 +37,7 @@ import ( // @ArnIdentity(identityDuplicateAttributes="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/securityhub/types;awstypes;awstypes.AutomationRulesConfig") // @Testing(serialize=true) +// @Testing(preIdentityVersion="v5.100.0") func newAutomationRuleResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &automationRuleResource{}, nil } diff --git a/internal/service/securityhub/automation_rule_identity_gen_test.go b/internal/service/securityhub/automation_rule_identity_gen_test.go index ead6ebd2e5f9..f0e1f306a717 100644 --- a/internal/service/securityhub/automation_rule_identity_gen_test.go +++ b/internal/service/securityhub/automation_rule_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccSecurityHubAutomationRule_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccSecurityHubAutomationRule_Identity_Basic, - "ExistingResource": testAccSecurityHubAutomationRule_Identity_ExistingResource, - "RegionOverride": testAccSecurityHubAutomationRule_Identity_RegionOverride, + acctest.CtBasic: testAccSecurityHubAutomationRule_Identity_Basic, + "ExistingResource": testAccSecurityHubAutomationRule_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccSecurityHubAutomationRule_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccSecurityHubAutomationRule_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -38,7 +40,7 @@ func testAccSecurityHubAutomationRule_Identity_Basic(t *testing.T) { resourceName := "aws_securityhub_automation_rule.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -59,6 +61,9 @@ func testAccSecurityHubAutomationRule_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -120,7 +125,7 @@ func testAccSecurityHubAutomationRule_Identity_RegionOverride(t *testing.T) { resourceName := "aws_securityhub_automation_rule.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -139,6 +144,9 @@ func testAccSecurityHubAutomationRule_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -230,3 +238,129 @@ func testAccSecurityHubAutomationRule_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccSecurityHubAutomationRule_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AutomationRulesConfig + resourceName := "aws_securityhub_automation_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityHubServiceID), + CheckDestroy: testAccCheckAutomationRuleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AutomationRule/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAutomationRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/AutomationRule/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAutomationRuleExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AutomationRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func testAccSecurityHubAutomationRule_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.AutomationRulesConfig + resourceName := "aws_securityhub_automation_rule.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SecurityHubServiceID), + CheckDestroy: testAccCheckAutomationRuleDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/AutomationRule/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAutomationRuleExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/AutomationRule/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/securityhub/automation_rule_test.go b/internal/service/securityhub/automation_rule_test.go index dec5d91c7d65..6570f5d2cde8 100644 --- a/internal/service/securityhub/automation_rule_test.go +++ b/internal/service/securityhub/automation_rule_test.go @@ -13,13 +13,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/securityhub/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfsecurityhub "github.com/hashicorp/terraform-provider-aws/internal/service/securityhub" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -345,70 +340,6 @@ func testAccAutomationRule_tags(t *testing.T) { }) } -func testAccSecurityHubAutomationRule_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_securityhub_automation_rule.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SecurityHubServiceID), - CheckDestroy: testAccCheckAutomationRuleDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccAutomationRuleConfig_basic(rName), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccAutomationRuleConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccAutomationRuleConfig_basic(rName), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckAutomationRuleExists(ctx context.Context, n string, v *types.AutomationRulesConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/securityhub/insight.go b/internal/service/securityhub/insight.go index 7a335e620c56..3670cba68e4e 100644 --- a/internal/service/securityhub/insight.go +++ b/internal/service/securityhub/insight.go @@ -6,7 +6,6 @@ package securityhub import ( "context" "log" - "strconv" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/securityhub" @@ -18,6 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" @@ -397,19 +398,19 @@ func numberFilterSchema() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "eq": { - Type: schema.TypeString, + Type: nullable.TypeNullableFloat, Optional: true, - ValidateFunc: verify.ValidTypeStringNullableFloat, + ValidateFunc: nullable.ValidateTypeStringNullableFloat, }, "gte": { - Type: schema.TypeString, + Type: nullable.TypeNullableFloat, Optional: true, - ValidateFunc: verify.ValidTypeStringNullableFloat, + ValidateFunc: nullable.ValidateTypeStringNullableFloat, }, "lte": { - Type: schema.TypeString, + Type: nullable.TypeNullableFloat, Optional: true, - ValidateFunc: verify.ValidTypeStringNullableFloat, + ValidateFunc: nullable.ValidateTypeStringNullableFloat, }, }, }, @@ -966,24 +967,21 @@ func expandNumberFilters(l []any) []types.NumberFilter { nf := types.NumberFilter{} - if v, ok := tfMap["eq"].(string); ok && v != "" { - val, err := strconv.ParseFloat(v, 64) - if err == nil { - nf.Eq = aws.Float64(val) + if v, ok := tfMap["eq"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + nf.Eq = aws.Float64(v) } } - if v, ok := tfMap["gte"].(string); ok && v != "" { - val, err := strconv.ParseFloat(v, 64) - if err == nil { - nf.Gte = aws.Float64(val) + if v, ok := tfMap["gte"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + nf.Gte = aws.Float64(v) } } - if v, ok := tfMap["lte"].(string); ok && v != "" { - val, err := strconv.ParseFloat(v, 64) - if err == nil { - nf.Lte = aws.Float64(val) + if v, ok := tfMap["lte"].(string); ok { + if v, null, _ := nullable.Float(v).ValueFloat64(); !null { + nf.Lte = aws.Float64(v) } } @@ -1122,15 +1120,15 @@ func flattenNumberFilters(filters []types.NumberFilter) []any { m := map[string]any{} if filter.Eq != nil { - m["eq"] = strconv.FormatFloat(aws.ToFloat64(filter.Eq), 'f', -1, 64) + m["eq"] = flex.Float64ToStringValue(filter.Eq) } if filter.Gte != nil { - m["gte"] = strconv.FormatFloat(aws.ToFloat64(filter.Gte), 'f', -1, 64) + m["gte"] = flex.Float64ToStringValue(filter.Gte) } if filter.Lte != nil { - m["lte"] = strconv.FormatFloat(aws.ToFloat64(filter.Lte), 'f', -1, 64) + m["lte"] = flex.Float64ToStringValue(filter.Lte) } numFilters = append(numFilters, m) diff --git a/internal/service/securityhub/organization_admin_account.go b/internal/service/securityhub/organization_admin_account.go index 03068c055446..2415aef6d762 100644 --- a/internal/service/securityhub/organization_admin_account.go +++ b/internal/service/securityhub/organization_admin_account.go @@ -57,7 +57,7 @@ func resourceOrganizationAdminAccountCreate(ctx context.Context, d *schema.Resou const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func(ctx context.Context) (any, error) { return conn.EnableOrganizationAdminAccount(ctx, input) }, errCodeResourceConflictException) diff --git a/internal/service/securityhub/organization_configuration.go b/internal/service/securityhub/organization_configuration.go index a17b9def3be5..55ba2861bf43 100644 --- a/internal/service/securityhub/organization_configuration.go +++ b/internal/service/securityhub/organization_configuration.go @@ -93,7 +93,7 @@ func resourceOrganizationConfigurationUpdate(ctx context.Context, d *schema.Reso } // e.g. "DataUnavailableException: Central configuration couldn't be enabled because data from organization o-ira6i4k380 is still syncing. Retry later." - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateOrganizationConfiguration(ctx, input) }, errCodeDataUnavailableException, "Retry later") diff --git a/internal/service/securityhub/service_endpoint_resolver_gen.go b/internal/service/securityhub/service_endpoint_resolver_gen.go index 0df275daeb97..fc23da696d12 100644 --- a/internal/service/securityhub/service_endpoint_resolver_gen.go +++ b/internal/service/securityhub/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params securityhub.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up securityhub endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up securityhub endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/securityhub/service_endpoints_gen_test.go b/internal/service/securityhub/service_endpoints_gen_test.go index db311162d392..380b6941b96a 100644 --- a/internal/service/securityhub/service_endpoints_gen_test.go +++ b/internal/service/securityhub/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/securityhub/service_package_gen.go b/internal/service/securityhub/service_package_gen.go index 541ac7b523a0..c847a9d3240f 100644 --- a/internal/service/securityhub/service_package_gen.go +++ b/internal/service/securityhub/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/securityhub" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -163,7 +162,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *securityhub.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/securityhub/tags_gen.go b/internal/service/securityhub/tags_gen.go index 16dd2f4bc5e9..df22731c1090 100644 --- a/internal/service/securityhub/tags_gen.go +++ b/internal/service/securityhub/tags_gen.go @@ -3,8 +3,8 @@ package securityhub import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/securityhub" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *securityhub.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SecurityHubClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *securityhub.Client, identifier string _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *securityhub.Client, identifier string _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/securityhub/testdata/AutomationRule/basic_v5.100.0/main_gen.tf b/internal/service/securityhub/testdata/AutomationRule/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..b5d75c7b7b13 --- /dev/null +++ b/internal/service/securityhub/testdata/AutomationRule/basic_v5.100.0/main_gen.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_securityhub_automation_rule" "test" { + description = "test description" + rule_name = var.rName + rule_order = 1 + + actions { + finding_fields_update { + severity { + label = "LOW" + product = "0.0" + } + + types = ["Software and Configuration Checks/Industry and Regulatory Standards"] + + user_defined_fields = { + key = "value" + } + } + type = "FINDING_FIELDS_UPDATE" + } + + criteria { + aws_account_id { + comparison = "EQUALS" + value = "1234567890" + } + } + + depends_on = [aws_securityhub_account.test] +} + +resource "aws_securityhub_account" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/securityhub/testdata/AutomationRule/basic_v6.0.0/main_gen.tf b/internal/service/securityhub/testdata/AutomationRule/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..a46428388f9c --- /dev/null +++ b/internal/service/securityhub/testdata/AutomationRule/basic_v6.0.0/main_gen.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_securityhub_automation_rule" "test" { + description = "test description" + rule_name = var.rName + rule_order = 1 + + actions { + finding_fields_update { + severity { + label = "LOW" + product = "0.0" + } + + types = ["Software and Configuration Checks/Industry and Regulatory Standards"] + + user_defined_fields = { + key = "value" + } + } + type = "FINDING_FIELDS_UPDATE" + } + + criteria { + aws_account_id { + comparison = "EQUALS" + value = "1234567890" + } + } + + depends_on = [aws_securityhub_account.test] +} + +resource "aws_securityhub_account" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/securitylake/aws_log_source_test.go b/internal/service/securitylake/aws_log_source_test.go index b80d2f2b2069..13b09c1c815f 100644 --- a/internal/service/securitylake/aws_log_source_test.go +++ b/internal/service/securitylake/aws_log_source_test.go @@ -25,12 +25,15 @@ func testAccAWSLogSource_basic(t *testing.T) { resourceName := "aws_securitylake_aws_log_source.test" var logSource types.AwsLogSourceConfiguration + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -74,12 +77,15 @@ func testAccAWSLogSource_sourceVersion(t *testing.T) { resourceName := "aws_securitylake_aws_log_source.test" var logSource types.AwsLogSourceConfiguration + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -129,13 +135,16 @@ func testAccAWSLogSource_multiRegion(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) var logSource types.AwsLogSourceConfiguration + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region(), acctest.AlternateRegion()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) acctest.PreCheckMultipleRegion(t, 2) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), @@ -166,12 +175,15 @@ func testAccAWSLogSource_disappears(t *testing.T) { resourceName := "aws_securitylake_aws_log_source.test" var logSource types.AwsLogSourceConfiguration + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -195,12 +207,15 @@ func testAccAWSLogSource_multiple(t *testing.T) { resourceName2 := "aws_securitylake_aws_log_source.test2" var logSource, logSource2 types.AwsLogSourceConfiguration + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/securitylake/custom_log_source_test.go b/internal/service/securitylake/custom_log_source_test.go index ae7e938f7402..976961f25079 100644 --- a/internal/service/securitylake/custom_log_source_test.go +++ b/internal/service/securitylake/custom_log_source_test.go @@ -28,12 +28,15 @@ func testAccCustomLogSource_basic(t *testing.T) { rName := randomCustomLogSourceName() var customLogSource types.CustomLogSourceResource + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -76,12 +79,15 @@ func testAccCustomLogSource_sourceVersion(t *testing.T) { rName := randomCustomLogSourceName() var customLogSource types.CustomLogSourceResource + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -125,12 +131,15 @@ func testAccCustomLogSource_multiple(t *testing.T) { rName2 := randomCustomLogSourceName() var customLogSource, customLogSource2 types.CustomLogSourceResource + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -163,12 +172,15 @@ func testAccCustomLogSource_eventClasses(t *testing.T) { rName := randomCustomLogSourceName() var customLogSource types.CustomLogSourceResource + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -227,12 +239,15 @@ func testAccCustomLogSource_disappears(t *testing.T) { rName := randomCustomLogSourceName() var customLogSource types.CustomLogSourceResource + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/securitylake/data_lake.go b/internal/service/securitylake/data_lake.go index 130001d39c00..fdaf200cbd7b 100644 --- a/internal/service/securitylake/data_lake.go +++ b/internal/service/securitylake/data_lake.go @@ -71,7 +71,7 @@ func (r *dataLakeResource) Schema(ctx context.Context, request resource.SchemaRe CustomType: fwtypes.ARNType, Required: true, PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), }, }, "s3_bucket_arn": framework.ARNAttributeComputedOnly(), @@ -294,7 +294,7 @@ func (r *dataLakeResource) Update(ctx context.Context, request resource.UpdateRe conn := r.Meta().SecurityLakeClient(ctx) - if !new.Configurations.Equal(old.Configurations) { + if !new.Configurations.Equal(old.Configurations) || !new.MetaStoreManagerRoleARN.Equal(old.MetaStoreManagerRoleARN) { input := &securitylake.UpdateDataLakeInput{} response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) if response.Diagnostics.HasError() { @@ -585,7 +585,7 @@ func retryDataLakeConflictWithMutex[T any](ctx context.Context, f func() (T, err const dataLakeTimeout = 2 * time.Minute - raw, err := tfresource.RetryWhenIsA[*awstypes.ConflictException](ctx, dataLakeTimeout, func() (any, error) { + raw, err := tfresource.RetryWhenIsA[any, *awstypes.ConflictException](ctx, dataLakeTimeout, func(ctx context.Context) (any, error) { return f() }) if err != nil { diff --git a/internal/service/securitylake/data_lake_test.go b/internal/service/securitylake/data_lake_test.go index 4c7f3744e287..e9a32b0df12e 100644 --- a/internal/service/securitylake/data_lake_test.go +++ b/internal/service/securitylake/data_lake_test.go @@ -32,12 +32,15 @@ func testAccDataLake_basic(t *testing.T) { var datalake types.DataLakeResource resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -54,6 +57,7 @@ func testAccDataLake_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "0"), resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, names.AttrRegion, acctest.Region()), resource.TestCheckResourceAttrSet(resourceName, "s3_bucket_arn"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), ), @@ -85,12 +89,15 @@ func testAccDataLake_Identity_Basic(t *testing.T) { var datalake types.DataLakeResource resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -103,7 +110,7 @@ func testAccDataLake_Identity_Basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.Region()), ), ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("securityhub", "data-lake/default")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("securitylake", "data-lake/default")), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), @@ -127,12 +134,15 @@ func testAccDataLake_Identity_RegionOverride(t *testing.T) { resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region(), acctest.AlternateRegion()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -144,7 +154,7 @@ func testAccDataLake_Identity_RegionOverride(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.AlternateRegion()), ), ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("securityhub", "data-lake/default")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("securitylake", "data-lake/default")), statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), @@ -175,12 +185,15 @@ func testAccDataLake_disappears(t *testing.T) { var datalake types.DataLakeResource resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -203,12 +216,15 @@ func testAccDataLake_tags(t *testing.T) { var datalake types.DataLakeResource resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -255,12 +271,15 @@ func testAccDataLake_lifeCycle(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -290,43 +309,57 @@ func testAccDataLake_lifeCycle(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, + { + Config: testAccDataLakeConfig_lifeCycleUpdate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataLakeExists(ctx, resourceName, &datalake), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, + }, }, }) } -func testAccDataLake_lifeCycleUpdate(t *testing.T) { +func testAccDataLake_metaStoreUpdate(t *testing.T) { ctx := acctest.Context(t) var datalake types.DataLakeResource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataLakeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataLakeConfig_lifeCycle(rName), + Config: testAccDataLakeConfig_metaStore(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", names.AttrID), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "2"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.days", "80"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.1.storage_class", "ONEZONE_IA"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), ), }, { @@ -336,19 +369,10 @@ func testAccDataLake_lifeCycleUpdate(t *testing.T) { ImportStateVerifyIgnore: []string{"meta_store_manager_role_arn"}, }, { - Config: testAccDataLakeConfig_lifeCycleUpdate(rName), + Config: testAccDataLakeConfig_metaStoreUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDataLakeExists(ctx, resourceName, &datalake), - resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager", names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.encryption_configuration.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "configuration.0.encryption_configuration.0.kms_key_id", "aws_kms_key.test", names.AttrID), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.days", "31"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttrPair(resourceName, "meta_store_manager_role_arn", "aws_iam_role.meta_store_manager_updated", names.AttrARN), ), }, { @@ -367,13 +391,16 @@ func testAccDataLake_replication(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_securitylake_data_lake.region_2" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t, acctest.Region(), acctest.AlternateRegion()) + }) + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) - testAccDeleteGlueDatabase(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -393,10 +420,12 @@ func testAccDataLake_replication(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.transition.0.storage_class", "STANDARD_IA"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.#", "1"), resource.TestCheckResourceAttr(resourceName, "configuration.0.lifecycle_configuration.0.expiration.0.days", "300"), + resource.TestCheckResourceAttr(resourceName, "configuration.0.region", acctest.AlternateRegion()), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "configuration.0.replication_configuration.0.role_arn", "aws_iam_role.datalake_s3_replication", names.AttrARN), resource.TestCheckResourceAttr(resourceName, "configuration.0.replication_configuration.0.regions.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "configuration.0.replication_configuration.0.regions.*", acctest.Region()), + resource.TestCheckResourceAttr(resourceName, names.AttrRegion, acctest.Region()), ), }, { @@ -413,12 +442,17 @@ func testAccDataLake_Identity_ExistingResource(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_securitylake_data_lake.test" + t.Cleanup(func() { + testAccDeleteGlueDatabases(ctx, t) + }) + resource.Test(t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, PreCheck: func() { acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SecurityLake) testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SecurityLakeServiceID), @@ -522,7 +556,7 @@ func testAccCheckDataLakeExists(ctx context.Context, n string, v *types.DataLake } } -const testAccDataLakeConfigConfig_base = testAccDataLakeConfigConfig_base_iam + ` +const testAccDataLakeConfigConfig_base_kmsKey = ` resource "aws_kms_key" "test" { deletion_window_in_days = 7 enable_key_rotation = true @@ -545,7 +579,7 @@ POLICY func testAccDataLakeConfigConfig_base_regionOverride() string { return acctest.ConfigCompose( - testAccDataLakeConfigConfig_base_iam, + testAccDataLakeConfigConfig_base, fmt.Sprintf(` resource "aws_kms_key" "test" { region = %[1]q @@ -570,7 +604,7 @@ POLICY `, acctest.AlternateRegion())) } -const testAccDataLakeConfigConfig_base_iam = ` +const testAccDataLakeConfigConfig_base = ` data "aws_caller_identity" "current" {} data "aws_partition" "current" {} @@ -616,6 +650,33 @@ resource "aws_iam_role" "datalake_s3_replication" { POLICY } +# These are required in all configurations because the role stays registered with the Lake Formation Data Lake +# after the MetaStoreManager role is updated. +resource "aws_iam_role" "meta_store_manager_updated" { + name = "AmazonSecurityLakeMetaStoreManagerV1" + path = "/service-role/" + assume_role_policy = < 0 { + // To enable debugging of potential issues, log as a warning instead of exiting prematurely. + // For example, errors can be present after a failed version update, and the stack rolled back + // to the current version. + if v := outputDR.RecordDetail.RecordErrors; len(v) > 0 { var errs []error for _, err := range v { @@ -472,13 +442,14 @@ func resourceProvisionedProductRead(ctx context.Context, d *schema.ResourceData, log.Printf("[WARN] Errors found when describing Service Catalog Provisioned Product (%s) Record (%s): %s", d.Id(), aws.ToString(detail.LastProvisioningRecordId), errors.Join(errs...)) } - if err := d.Set("outputs", flattenRecordOutputs(recordOutput.RecordOutputs)); err != nil { + if err := d.Set("outputs", flattenRecordOutputs(outputDR.RecordOutputs)); err != nil { return sdkdiag.AppendErrorf(diags, "setting outputs: %s", err) } - d.Set("path_id", recordOutput.RecordDetail.PathId) + d.Set("path_id", outputDR.RecordDetail.PathId) + d.Set("provisioning_artifact_id", outputDR.RecordDetail.ProvisioningArtifactId) - setTagsOut(ctx, svcTags(recordKeyValueTags(ctx, recordOutput.RecordDetail.RecordTags))) + setTagsOut(ctx, svcTags(recordKeyValueTags(ctx, outputDR.RecordDetail.RecordTags))) return diags } @@ -487,9 +458,9 @@ func resourceProvisionedProductUpdate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ServiceCatalogClient(ctx) - input := &servicecatalog.UpdateProvisionedProductInput{ - UpdateToken: aws.String(id.UniqueId()), + input := servicecatalog.UpdateProvisionedProductInput{ ProvisionedProductId: aws.String(d.Id()), + UpdateToken: aws.String(id.UniqueId()), } if v, ok := d.GetOk("accept_language"); ok { @@ -531,29 +502,43 @@ func resourceProvisionedProductUpdate(ctx context.Context, d *schema.ResourceDat // to provisioned AWS objects during update if the tags don't change. input.Tags = getTagsIn(ctx) - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { - _, err := conn.UpdateProvisionedProduct(ctx, input) - - if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil - }) + _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), + func(ctx context.Context) (any, error) { + return conn.UpdateProvisionedProduct(ctx, &input) + }, + func(err error) (bool, error) { + if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { + return true, err + } - if tfresource.TimedOut(err) { - _, err = conn.UpdateProvisionedProduct(ctx, input) - } + return false, err + }, + ) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Service Catalog Provisioned Product (%s): %s", d.Id(), err) } - if _, err := waitProvisionedProductReady(ctx, conn, d.Get("accept_language").(string), d.Id(), "", d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitProvisionedProductReady(ctx, conn, d.Id(), d.Get("accept_language").(string), d.Timeout(schema.TimeoutUpdate)); err != nil { + if failureErr, ok := errs.As[*provisionedProductFailureError](err); ok { + log.Printf("[WARN] Service Catalog Provisioned Product (%s) update failed with status %s, refreshing state", d.Id(), failureErr.Status) + refreshDiags := resourceProvisionedProductRead(ctx, d, meta) + if refreshDiags.HasError() { + // If refresh fails, return both errors + return append(refreshDiags, sdkdiag.AppendErrorf(diags, "waiting for Service Catalog Provisioned Product (%s) update: %s", d.Id(), err)...) + } + + if d.HasChange("provisioning_parameters") { + // If parameters were changed, rollback to previous values. + // + // The read APIs used to refresh state above do not return parameter values, and therefore + // will not reflect that the planned updates did not take effect. Explicitly rolling back + // ensures the planned parameter changes are attempted again on a subsequent apply. + oldParams, _ := d.GetChange("provisioning_parameters") + d.Set("provisioning_parameters", oldParams) + } + } + return sdkdiag.AppendErrorf(diags, "waiting for Service Catalog Provisioned Product (%s) update: %s", d.Id(), err) } @@ -564,7 +549,7 @@ func resourceProvisionedProductDelete(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ServiceCatalogClient(ctx) - input := &servicecatalog.TerminateProvisionedProductInput{ + input := servicecatalog.TerminateProvisionedProductInput{ TerminateToken: aws.String(id.UniqueId()), ProvisionedProductId: aws.String(d.Id()), } @@ -581,7 +566,7 @@ func resourceProvisionedProductDelete(ctx context.Context, d *schema.ResourceDat input.RetainPhysicalResources = v.(bool) } - _, err := conn.TerminateProvisionedProduct(ctx, input) + _, err := conn.TerminateProvisionedProduct(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags @@ -591,19 +576,209 @@ func resourceProvisionedProductDelete(ctx context.Context, d *schema.ResourceDat return sdkdiag.AppendErrorf(diags, "terminating Service Catalog Provisioned Product (%s): %s", d.Id(), err) } - err = waitProvisionedProductTerminated(ctx, conn, d.Get("accept_language").(string), d.Id(), "", d.Timeout(schema.TimeoutDelete)) + _, err = waitProvisionedProductTerminated(ctx, conn, d.Id(), d.Get("accept_language").(string), d.Timeout(schema.TimeoutDelete)) + + if errs.IsA[*provisionedProductFailureError](err) { + input.IgnoreErrors = true + + _, err = conn.TerminateProvisionedProduct(ctx, &input) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } + if err != nil { + return sdkdiag.AppendErrorf(diags, "terminating Service Catalog Provisioned Product (%s): %s", d.Id(), err) + } + + _, err = waitProvisionedProductTerminated(ctx, conn, d.Id(), d.Get("accept_language").(string), d.Timeout(schema.TimeoutDelete)) + } if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Service Catalog Provisioned Product (%s) to be terminated: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for Service Catalog Provisioned Product (%s) terminate: %s", d.Id(), err) } return diags } +func findProvisionedProductByTwoPartKey(ctx context.Context, conn *servicecatalog.Client, id, acceptLanguage string) (*servicecatalog.DescribeProvisionedProductOutput, error) { + input := servicecatalog.DescribeProvisionedProductInput{ + Id: aws.String(id), + } + if acceptLanguage != "" { + input.AcceptLanguage = aws.String(acceptLanguage) + } + + return findProvisionedProduct(ctx, conn, &input) +} + +func findProvisionedProduct(ctx context.Context, conn *servicecatalog.Client, input *servicecatalog.DescribeProvisionedProductInput) (*servicecatalog.DescribeProvisionedProductOutput, error) { + output, err := conn.DescribeProvisionedProduct(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ProvisionedProductDetail == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findRecordByTwoPartKey(ctx context.Context, conn *servicecatalog.Client, id, acceptLanguage string) (*servicecatalog.DescribeRecordOutput, error) { + input := servicecatalog.DescribeRecordInput{ + Id: aws.String(id), + } + if acceptLanguage != "" { + input.AcceptLanguage = aws.String(acceptLanguage) + } + + return findRecord(ctx, conn, &input) +} + +func findRecord(ctx context.Context, conn *servicecatalog.Client, input *servicecatalog.DescribeRecordInput) (*servicecatalog.DescribeRecordOutput, error) { + var output *servicecatalog.DescribeRecordOutput + + for { + page, err := conn.DescribeRecord(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if page == nil { + break + } + + if output == nil { + output = page + } else { + output.RecordOutputs = append(output.RecordOutputs, page.RecordOutputs...) + } + + nextPageToken := aws.ToString(page.NextPageToken) + if nextPageToken == "" { + break + } + input.PageToken = aws.String(nextPageToken) + } + + if output == nil || output.RecordDetail == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusProvisionedProduct(ctx context.Context, conn *servicecatalog.Client, id, acceptLanguage string) retry.StateRefreshFunc { + return func() (any, string, error) { + output, err := findProvisionedProductByTwoPartKey(ctx, conn, id, acceptLanguage) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.ProvisionedProductDetail.Status), err + } +} + +func waitProvisionedProductReady(ctx context.Context, conn *servicecatalog.Client, id, acceptLanguage string, timeout time.Duration) (*servicecatalog.DescribeProvisionedProductOutput, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ProvisionedProductStatusUnderChange, awstypes.ProvisionedProductStatusPlanInProgress), + Target: enum.Slice(awstypes.ProvisionedProductStatusAvailable), + Refresh: statusProvisionedProduct(ctx, conn, id, acceptLanguage), + Timeout: timeout, + ContinuousTargetOccurence: continuousTargetOccurrence, + NotFoundChecks: notFoundChecks, + MinTimeout: minTimeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*servicecatalog.DescribeProvisionedProductOutput); ok { + if detail := output.ProvisionedProductDetail; detail != nil { + if errs.IsA[*retry.UnexpectedStateError](err) { + // The statuses `ERROR` and `TAINTED` are equivalent: the application of the requested change has failed. + // The difference is that, in the case of `TAINTED`, there is a previous version to roll back to. + if status := detail.Status; status == awstypes.ProvisionedProductStatusError || status == awstypes.ProvisionedProductStatusTainted { + // Create a custom error type that signals state refresh is needed + return output, &provisionedProductFailureError{ + StatusMessage: aws.ToString(detail.StatusMessage), + Status: status, + } + } + } + } + return output, err + } + + return nil, err +} + +func waitProvisionedProductTerminated(ctx context.Context, conn *servicecatalog.Client, id, acceptLanguage string, timeout time.Duration) (*servicecatalog.DescribeProvisionedProductOutput, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.ProvisionedProductStatusAvailable, + awstypes.ProvisionedProductStatusUnderChange, + ), + Target: []string{}, + Refresh: statusProvisionedProduct(ctx, conn, id, acceptLanguage), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*servicecatalog.DescribeProvisionedProductOutput); ok { + if detail := output.ProvisionedProductDetail; detail != nil { + if errs.IsA[*retry.UnexpectedStateError](err) { + // If the status is `TAINTED`, we can retry with `IgnoreErrors` + if status := detail.Status; status == awstypes.ProvisionedProductStatusTainted { + // Create a custom error type that signals state refresh is needed + return output, &provisionedProductFailureError{ + StatusMessage: aws.ToString(detail.StatusMessage), + Status: status, + } + } + } + } + return output, err + } + + return nil, err +} + +// provisionedProductFailureError represents a provisioned product operation failure +// that requires state refresh to recover from inconsistent state. +type provisionedProductFailureError struct { + StatusMessage string + Status awstypes.ProvisionedProductStatus +} + +func (e *provisionedProductFailureError) Error() string { + return e.StatusMessage +} + func expandProvisioningParameter(tfMap map[string]any) awstypes.ProvisioningParameter { apiObject := awstypes.ProvisioningParameter{} diff --git a/internal/service/servicecatalog/provisioned_product_tags_gen_test.go b/internal/service/servicecatalog/provisioned_product_tags_gen_test.go index 3ebe0ae7af99..9b7fd5ad543a 100644 --- a/internal/service/servicecatalog/provisioned_product_tags_gen_test.go +++ b/internal/service/servicecatalog/provisioned_product_tags_gen_test.go @@ -9,7 +9,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -24,11 +23,12 @@ import ( func TestAccServiceCatalogProvisionedProduct_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -222,11 +222,12 @@ func TestAccServiceCatalogProvisionedProduct_tags(t *testing.T) { func TestAccServiceCatalogProvisionedProduct_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -292,11 +293,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_null(t *testing.T) { func TestAccServiceCatalogProvisionedProduct_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -358,11 +360,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_EmptyMap(t *testing.T) { func TestAccServiceCatalogProvisionedProduct_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -444,11 +447,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_EmptyTag_OnCreate(t *testing.T t.Skip("Resource ProvisionedProduct does not support empty tags") ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -541,11 +545,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_EmptyTag_OnUpdate_Add(t *testi t.Skip("Resource ProvisionedProduct does not support empty tags") ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -686,11 +691,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_EmptyTag_OnUpdate_Replace(t *t t.Skip("Resource ProvisionedProduct does not support empty tags") ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -778,11 +784,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_EmptyTag_OnUpdate_Replace(t *t func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -975,11 +982,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_providerOnly(t *te func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1146,11 +1154,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_nonOverlapping(t * func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1331,11 +1340,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_overlapping(t *tes func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1424,11 +1434,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_updateToProviderOn func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1518,11 +1529,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_emptyResourceTag(t t.Skip("Resource ProvisionedProduct does not support empty tags") ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1588,11 +1600,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_emptyProviderOnlyT t.Skip("Resource ProvisionedProduct does not support empty tags") ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1648,11 +1661,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_emptyProviderOnlyT func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1713,11 +1727,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_nullOverlappingRes func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1778,11 +1793,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_DefaultTags_nullNonOverlapping func TestAccServiceCatalogProvisionedProduct_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1836,11 +1852,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_ComputedTag_OnCreate(t *testin func TestAccServiceCatalogProvisionedProduct_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -1936,11 +1953,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_ComputedTag_OnUpdate_Add(t *te func TestAccServiceCatalogProvisionedProduct_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -2026,11 +2044,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_ComputedTag_OnUpdate_Replace(t func TestAccServiceCatalogProvisionedProduct_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), @@ -2188,11 +2207,12 @@ func TestAccServiceCatalogProvisionedProduct_tags_IgnoreTags_Overlap_DefaultTag( func TestAccServiceCatalogProvisionedProduct_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.ProvisionedProductDetail resourceName := "aws_servicecatalog_provisioned_product.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), diff --git a/internal/service/servicecatalog/provisioned_product_test.go b/internal/service/servicecatalog/provisioned_product_test.go index 048b9294b70b..2984edf4b08d 100644 --- a/internal/service/servicecatalog/provisioned_product_test.go +++ b/internal/service/servicecatalog/provisioned_product_test.go @@ -11,15 +11,18 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/servicecatalog" awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs" tfservicecatalog "github.com/hashicorp/terraform-provider-aws/internal/service/servicecatalog" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -457,6 +460,127 @@ func TestAccServiceCatalogProvisionedProduct_productTagUpdateAfterError(t *testi }) } +// Validates that a provisioned product in tainted status properly triggers an update +// on subsequent applies. +// Ref: https://github.com/hashicorp/terraform-provider-aws/issues/42585 +func TestAccServiceCatalogProvisionedProduct_retryTaintedUpdate(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_servicecatalog_provisioned_product.test" + artifactsDataSourceName := "data.aws_servicecatalog_provisioning_artifacts.product_artifacts" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + initialArtifactID := "provisioning_artifact_details.0.id" + newArtifactID := "provisioning_artifact_details.1.id" + var v awstypes.ProvisionedProductDetail + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProvisionedProductDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1 - Setup + { + Config: testAccProvisionedProductConfig_retryTaintedUpdate(rName, false, false, "original"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProvisionedProductExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "provisioning_artifact_id", artifactsDataSourceName, initialArtifactID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrStatus), knownvalue.StringExact("AVAILABLE")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("provisioning_parameters"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("FailureSimulation"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact(acctest.CtFalse), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("ExtraParam"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact("original"), + }), + })), + }, + }, + // Step 2 - Trigger a failure, leaving the provisioned product tainted + { + Config: testAccProvisionedProductConfig_retryTaintedUpdate(rName, true, true, "updated"), + ExpectError: regexache.MustCompile(`The following resource\(s\) failed to update:`), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("provisioning_parameters"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("FailureSimulation"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact(acctest.CtTrue), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("ExtraParam"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact("updated"), + }), + })), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrStatus), knownvalue.StringExact("TAINTED")), + // Verify state is rolled back to the parameters from the original setup run + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("provisioning_parameters"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("FailureSimulation"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact(acctest.CtFalse), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("ExtraParam"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact("original"), + }), + })), + }, + }, + // Step 3 - Verify an update is planned, even without configuration changes + { + Config: testAccProvisionedProductConfig_retryTaintedUpdate(rName, true, true, "updated"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ExpectError: regexache.MustCompile(`The following resource\(s\) failed to update:`), + }, + // Step 4 - Resolve the failure, verifying an update is completed + { + Config: testAccProvisionedProductConfig_retryTaintedUpdate(rName, true, false, "updated"), + Check: resource.ComposeTestCheckFunc( + testAccCheckProvisionedProductExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "provisioning_artifact_id", artifactsDataSourceName, newArtifactID), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrStatus), knownvalue.StringExact("AVAILABLE")), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("provisioning_parameters"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("FailureSimulation"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact(acctest.CtFalse), + }), + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrKey: knownvalue.StringExact("ExtraParam"), + "use_previous_value": knownvalue.Bool(false), + names.AttrValue: knownvalue.StringExact("updated"), + }), + })), + }, + }, + }, + }) +} + func testAccCheckProvisionedProductDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ServiceCatalogClient(ctx) @@ -466,13 +590,9 @@ func testAccCheckProvisionedProductDestroy(ctx context.Context) resource.TestChe continue } - input := &servicecatalog.DescribeProvisionedProductInput{ - Id: aws.String(rs.Primary.ID), - AcceptLanguage: aws.String(rs.Primary.Attributes["accept_language"]), - } - _, err := conn.DescribeProvisionedProduct(ctx, input) + _, err := tfservicecatalog.FindProvisionedProductByTwoPartKey(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["accept_language"]) - if errs.IsA[*awstypes.ResourceNotFoundException](err) { + if tfresource.NotFound(err) { continue } @@ -480,29 +600,29 @@ func testAccCheckProvisionedProductDestroy(ctx context.Context) resource.TestChe return err } - return fmt.Errorf("Service Catalog Provisioned Product (%s) still exists", rs.Primary.ID) + return fmt.Errorf("Service Catalog Provisioned Product %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckProvisionedProductExists(ctx context.Context, resourceName string, pprod *awstypes.ProvisionedProductDetail) resource.TestCheckFunc { +func testAccCheckProvisionedProductExists(ctx context.Context, n string, v *awstypes.ProvisionedProductDetail) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("resource not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).ServiceCatalogClient(ctx) - out, err := tfservicecatalog.WaitProvisionedProductReady(ctx, conn, tfservicecatalog.AcceptLanguageEnglish, rs.Primary.ID, "", tfservicecatalog.ProvisionedProductReadyTimeout) + output, err := tfservicecatalog.FindProvisionedProductByTwoPartKey(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["accept_language"]) + if err != nil { - return fmt.Errorf("describing Service Catalog Provisioned Product (%s): %w", rs.Primary.ID, err) + return err } - *pprod = *out.ProvisionedProductDetail + *v = *output.ProvisionedProductDetail return nil } @@ -988,3 +1108,71 @@ resource "aws_s3_bucket" "conflict" { } `, rName, conflictingBucketName, tagValue)) } + +func testAccProvisionedProductConfig_retryTaintedUpdate(rName string, useNewVersion bool, simulateFailure bool, extraParam string) string { + return acctest.ConfigCompose( + testAccProvisionedProductPortfolioBaseConfig(rName), + fmt.Sprintf(` +locals { + initial_provisioning_artifact = data.aws_servicecatalog_provisioning_artifacts.product_artifacts.provisioning_artifact_details[0] + new_provisioning_artifact = data.aws_servicecatalog_provisioning_artifacts.product_artifacts.provisioning_artifact_details[1] +} + +resource "aws_servicecatalog_provisioned_product" "test" { + name = %[1]q + product_id = aws_servicecatalog_product.test.id + provisioning_artifact_id = %[2]t ? local.new_provisioning_artifact.id : local.initial_provisioning_artifact.id + + provisioning_parameters { + key = "FailureSimulation" + value = "%[3]t" + } + + provisioning_parameters { + key = "ExtraParam" + value = %[4]q + } +} + +resource "aws_servicecatalog_product" "test" { + description = %[1]q + name = %[1]q + owner = "test" + type = "CLOUD_FORMATION_TEMPLATE" + + provisioning_artifact_parameters { + name = "%[1]s - Initial" + description = "Initial" + template_url = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/${aws_s3_object.test.key}" + type = "CLOUD_FORMATION_TEMPLATE" + } +} + +resource "aws_servicecatalog_provisioning_artifact" "new_version" { + product_id = aws_servicecatalog_product.test.id + + name = "%[1]s - New" + description = "New" + template_url = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/${aws_s3_object.test.key}" + type = "CLOUD_FORMATION_TEMPLATE" +} + +data "aws_servicecatalog_provisioning_artifacts" "product_artifacts" { + product_id = aws_servicecatalog_product.test.id + + depends_on = [aws_servicecatalog_provisioning_artifact.new_version] +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "product_template.yaml" + + source = "${path.module}/testdata/retry-tainted-update/product_template.yaml" +} +`, rName, useNewVersion, simulateFailure, extraParam)) +} diff --git a/internal/service/servicecatalog/provisioning_artifact.go b/internal/service/servicecatalog/provisioning_artifact.go index 1793be1e7aa5..ac0100a04d9c 100644 --- a/internal/service/servicecatalog/provisioning_artifact.go +++ b/internal/service/servicecatalog/provisioning_artifact.go @@ -13,7 +13,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -139,26 +138,22 @@ func resourceProvisioningArtifactCreate(ctx context.Context, d *schema.ResourceD } var output *servicecatalog.CreateProvisioningArtifactOutput - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.CreateProvisioningArtifact(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.CreateProvisioningArtifact(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "creating Service Catalog Provisioning Artifact: %s", err) } @@ -258,24 +253,20 @@ func resourceProvisioningArtifactUpdate(ctx context.Context, d *schema.ResourceD input.Name = aws.String(v.(string)) } - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateProvisioningArtifact(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateProvisioningArtifact(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "updating Service Catalog Provisioning Artifact (%s): %s", d.Id(), err) } diff --git a/internal/service/servicecatalog/service_action.go b/internal/service/servicecatalog/service_action.go index e85c7706e018..08691e11aca6 100644 --- a/internal/service/servicecatalog/service_action.go +++ b/internal/service/servicecatalog/service_action.go @@ -12,7 +12,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -115,26 +114,22 @@ func resourceServiceActionCreate(ctx context.Context, d *schema.ResourceData, me } var output *servicecatalog.CreateServiceActionOutput - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.CreateServiceAction(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.CreateServiceAction(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "creating Service Catalog Service Action: %s", err) } @@ -206,24 +201,20 @@ func resourceServiceActionUpdate(ctx context.Context, d *schema.ResourceData, me input.Name = aws.String(d.Get(names.AttrName).(string)) } - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateServiceAction(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateServiceAction(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "updating Service Catalog Service Action (%s): %s", d.Id(), err) } @@ -239,24 +230,20 @@ func resourceServiceActionDelete(ctx context.Context, d *schema.ResourceData, me Id: aws.String(d.Id()), } - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) *tfresource.RetryError { _, err := conn.DeleteServiceAction(ctx, input) if errs.IsA[*awstypes.ResourceInUseException](err) { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteServiceAction(ctx, input) - } - if errs.IsA[*awstypes.ResourceNotFoundException](err) { log.Printf("[INFO] Attempted to delete Service Action (%s) but does not exist", d.Id()) return diags diff --git a/internal/service/servicecatalog/service_endpoint_resolver_gen.go b/internal/service/servicecatalog/service_endpoint_resolver_gen.go index 2bb71adfe74a..0d54ece32b50 100644 --- a/internal/service/servicecatalog/service_endpoint_resolver_gen.go +++ b/internal/service/servicecatalog/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params servicecatalog.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up servicecatalog endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up servicecatalog endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/servicecatalog/service_endpoints_gen_test.go b/internal/service/servicecatalog/service_endpoints_gen_test.go index 5558bede9de2..eae203f9cff8 100644 --- a/internal/service/servicecatalog/service_endpoints_gen_test.go +++ b/internal/service/servicecatalog/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/servicecatalog/service_package_gen.go b/internal/service/servicecatalog/service_package_gen.go index 4665dbb4268a..396327b20798 100644 --- a/internal/service/servicecatalog/service_package_gen.go +++ b/internal/service/servicecatalog/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/servicecatalog" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -178,7 +177,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *servicecatalog.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/servicecatalog/status.go b/internal/service/servicecatalog/status.go index 08543cedd860..0b19f76ebaf7 100644 --- a/internal/service/servicecatalog/status.go +++ b/internal/service/servicecatalog/status.go @@ -90,7 +90,13 @@ func statusPortfolioShareWithToken(ctx context.Context, conn *servicecatalog.Cli return nil, statusUnavailable, fmt.Errorf("describing portfolio share status: empty response") } - return output, string(output.Status), err + status := output.Status + if (status == awstypes.ShareStatusCompletedWithErrors || status == awstypes.ShareStatusError) && + output.ShareDetails != nil && output.ShareDetails.ShareErrors != nil && len(output.ShareDetails.ShareErrors) > 0 { + return output, string(status), fmt.Errorf("portfolio share status: %+v", output.ShareDetails.ShareErrors) + } + + return output, string(status), err } } @@ -321,39 +327,6 @@ func statusLaunchPaths(ctx context.Context, conn *servicecatalog.Client, acceptL } } -func statusProvisionedProduct(ctx context.Context, conn *servicecatalog.Client, acceptLanguage, id, name string) retry.StateRefreshFunc { - return func() (any, string, error) { - input := &servicecatalog.DescribeProvisionedProductInput{} - - if acceptLanguage != "" { - input.AcceptLanguage = aws.String(acceptLanguage) - } - - // one or the other but not both - if id != "" { - input.Id = aws.String(id) - } else if name != "" { - input.Name = aws.String(name) - } - - output, err := conn.DescribeProvisionedProduct(ctx, input) - - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - if output == nil || output.ProvisionedProductDetail == nil { - return nil, "", nil - } - - return output, string(output.ProvisionedProductDetail.Status), err - } -} - func statusPortfolioConstraints(ctx context.Context, conn *servicecatalog.Client, acceptLanguage, portfolioID, productID string) retry.StateRefreshFunc { return func() (any, string, error) { input := &servicecatalog.ListConstraintsForPortfolioInput{ diff --git a/internal/service/servicecatalog/sweep.go b/internal/service/servicecatalog/sweep.go index 2b6a3d189dfc..0e3d0ac0c365 100644 --- a/internal/service/servicecatalog/sweep.go +++ b/internal/service/servicecatalog/sweep.go @@ -88,7 +88,7 @@ func sweepBudgetResourceAssociations(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -180,7 +180,7 @@ func sweepConstraints(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -237,7 +237,7 @@ func sweepPrincipalPortfolioAssociations(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -294,7 +294,7 @@ func sweepProductPortfolioAssociations(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -369,7 +369,7 @@ func sweepProducts(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -416,7 +416,7 @@ func sweepProvisionedProducts(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -465,7 +465,7 @@ func sweepProvisioningArtifacts(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -525,7 +525,7 @@ func sweepServiceActions(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -568,7 +568,7 @@ func sweepTagOptionResourceAssociations(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) @@ -634,7 +634,7 @@ func sweepTagOptions(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceCatalogClient(ctx) diff --git a/internal/service/servicecatalog/tag_option.go b/internal/service/servicecatalog/tag_option.go index 2828d91d4886..cbb61af619fb 100644 --- a/internal/service/servicecatalog/tag_option.go +++ b/internal/service/servicecatalog/tag_option.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/servicecatalog" awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -71,26 +70,22 @@ func resourceTagOptionCreate(ctx context.Context, d *schema.ResourceData, meta a } var output *servicecatalog.CreateTagOptionOutput - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.CreateTagOption(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.CreateTagOption(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "creating Service Catalog Tag Option: %s", err) } @@ -165,24 +160,20 @@ func resourceTagOptionUpdate(ctx context.Context, d *schema.ResourceData, meta a input.Value = aws.String(d.Get(names.AttrValue).(string)) } - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) *tfresource.RetryError { _, err := conn.UpdateTagOption(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - _, err = conn.UpdateTagOption(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "updating Service Catalog Tag Option (%s): %s", d.Id(), err) } diff --git a/internal/service/servicecatalog/tag_option_resource_association.go b/internal/service/servicecatalog/tag_option_resource_association.go index e2a31cf734ed..0848c9d8b873 100644 --- a/internal/service/servicecatalog/tag_option_resource_association.go +++ b/internal/service/servicecatalog/tag_option_resource_association.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/servicecatalog" awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -78,26 +77,22 @@ func resourceTagOptionResourceAssociationCreate(ctx context.Context, d *schema.R } var output *servicecatalog.AssociateTagOptionWithResourceOutput - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err := tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { var err error output, err = conn.AssociateTagOptionWithResource(ctx, input) if errs.IsAErrorMessageContains[*awstypes.InvalidParametersException](err, "profile does not exist") { - return retry.RetryableError(err) + return tfresource.RetryableError(err) } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } return nil }) - if tfresource.TimedOut(err) { - output, err = conn.AssociateTagOptionWithResource(ctx, input) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "associating Service Catalog Tag Option with Resource: %s", err) } diff --git a/internal/service/servicecatalog/testdata/retry-tainted-update/product_template.yaml b/internal/service/servicecatalog/testdata/retry-tainted-update/product_template.yaml new file mode 100644 index 000000000000..af531995bdfa --- /dev/null +++ b/internal/service/servicecatalog/testdata/retry-tainted-update/product_template.yaml @@ -0,0 +1,169 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +AWSTemplateFormatVersion: '2010-09-09' +Description: 'Test product template for taint reproduction' + +Parameters: + ExtraParam: + Type: String + Description: Extra parameter + Default: 'none' + + FailureSimulation: + Type: String + Description: Boolean to simulate failure + Default: 'false' + AllowedValues: + - 'true' + - 'false' + +Resources: + TestLambdaFunction: + Type: AWS::Lambda::Function + Properties: + FunctionName: !Sub '${AWS::StackName}-test-function' + Handler: index.handler + Role: !GetAtt LambdaExecutionRole.Arn + Runtime: nodejs18.x + Code: + ZipFile: | + // cfnresponse module inline + const cfnresponse = (() => { + const SUCCESS = "SUCCESS"; + const FAILED = "FAILED"; + + function send(event, context, responseStatus, responseData, physicalResourceId, noEcho) { + return new Promise((resolve, reject) => { + const responseBody = JSON.stringify({ + Status: responseStatus, + Reason: `See the details in CloudWatch Log Stream: ${context.logStreamName}`, + PhysicalResourceId: physicalResourceId || context.logStreamName, + StackId: event.StackId, + RequestId: event.RequestId, + LogicalResourceId: event.LogicalResourceId, + NoEcho: noEcho || false, + Data: responseData || {} + }); + + console.log("Response body:", responseBody); + + const https = require("https"); + const url = require("url"); + + const parsedUrl = url.parse(event.ResponseURL); + const options = { + hostname: parsedUrl.hostname, + port: 443, + path: parsedUrl.path, + method: "PUT", + headers: { + "content-type": "", + "content-length": responseBody.length + } + }; + + const request = https.request(options, (response) => { + console.log(`Status code: ${response.statusCode}`); + console.log(`Status message: ${response.statusMessage}`); + resolve(); + }); + + request.on("error", (error) => { + console.log(`Send CFN response failed: ${error}`); + reject(error); + }); + + request.write(responseBody); + request.end(); + }); + } + + return { + SUCCESS, + FAILED, + send + }; + })(); + + exports.handler = async (event, context) => { + console.log('Event:', JSON.stringify(event)); + + // For CloudFormation custom resources + if (event.RequestType) { + try { + // Simulate failure if parameter is set to true + if (event.ResourceProperties.FailureSimulation === 'true' && + (event.RequestType === 'Create' || event.RequestType === 'Update')) { + console.log('Simulating failure as requested'); + await cfnresponse.send(event, context, cfnresponse.FAILED, { + Message: 'Simulated failure' + }); + return; + } + + // Process the request based on the RequestType + let responseData = {}; + + if (event.RequestType === 'Create' || event.RequestType === 'Update') { + // Perform create or update actions + responseData = { + Message: 'Resource created/updated successfully', + Timestamp: new Date().toISOString() + }; + } else if (event.RequestType === 'Delete') { + // Perform delete actions + responseData = { + Message: 'Resource deleted successfully' + }; + } + + // Send success response + await cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData); + } catch (error) { + console.error('Error:', error); + await cfnresponse.send(event, context, cfnresponse.FAILED, { + Error: error.message + }); + } + } else { + // For direct Lambda invocations + // Simulate failure if parameter is set to true + if (event.FailureSimulation === 'true') { + throw new Error('Simulated failure'); + } + + return { + statusCode: 200, + body: JSON.stringify('Success'), + }; + } + }; + Environment: + Variables: + FailureSimulation: !Ref FailureSimulation + ExtraParam: !Ref ExtraParam + + LambdaExecutionRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: lambda.amazonaws.com + Action: 'sts:AssumeRole' + ManagedPolicyArns: + - 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' + + TestInvocation: + Type: Custom::TestInvocation + Properties: + ServiceToken: !GetAtt TestLambdaFunction.Arn + FailureSimulation: !Ref FailureSimulation + +Outputs: + LambdaArn: + Description: ARN of the Lambda function + Value: !GetAtt TestLambdaFunction.Arn diff --git a/internal/service/servicecatalog/wait.go b/internal/service/servicecatalog/wait.go index d3600429438a..ac1c408f74ba 100644 --- a/internal/service/servicecatalog/wait.go +++ b/internal/service/servicecatalog/wait.go @@ -5,10 +5,8 @@ package servicecatalog import ( "context" - "errors" "time" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/servicecatalog" awstypes "github.com/aws/aws-sdk-go-v2/service/servicecatalog/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -42,10 +40,6 @@ const ( ProductReadTimeout = 10 * time.Minute ProductReadyTimeout = 5 * time.Minute ProductUpdateTimeout = 5 * time.Minute - ProvisionedProductDeleteTimeout = 30 * time.Minute - ProvisionedProductReadTimeout = 10 * time.Minute - ProvisionedProductReadyTimeout = 30 * time.Minute - ProvisionedProductUpdateTimeout = 30 * time.Minute ProvisioningArtifactDeleteTimeout = 3 * time.Minute ProvisioningArtifactReadTimeout = 10 * time.Minute ProvisioningArtifactReadyTimeout = 3 * time.Minute @@ -173,10 +167,10 @@ func waitPortfolioShareReady(ctx context.Context, conn *servicecatalog.Client, p return nil, err } -func waitPortfolioShareCreatedWithToken(ctx context.Context, conn *servicecatalog.Client, token string, acceptRequired bool, timeout time.Duration) (*servicecatalog.DescribePortfolioShareStatusOutput, error) { +func waitPortfolioShareCreatedWithToken(ctx context.Context, conn *servicecatalog.Client, token string, waitForAcceptance bool, timeout time.Duration) (*servicecatalog.DescribePortfolioShareStatusOutput, error) { targets := enum.Slice(awstypes.ShareStatusCompleted) - if !acceptRequired { + if !waitForAcceptance { targets = append(targets, string(awstypes.ShareStatusInProgress)) } @@ -464,50 +458,6 @@ func waitLaunchPathsReady(ctx context.Context, conn *servicecatalog.Client, acce return nil, err } -func waitProvisionedProductReady(ctx context.Context, conn *servicecatalog.Client, acceptLanguage, id, name string, timeout time.Duration) (*servicecatalog.DescribeProvisionedProductOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.ProvisionedProductStatusUnderChange, awstypes.ProvisionedProductStatusPlanInProgress), - Target: enum.Slice(awstypes.ProvisionedProductStatusAvailable), - Refresh: statusProvisionedProduct(ctx, conn, acceptLanguage, id, name), - Timeout: timeout, - ContinuousTargetOccurence: continuousTargetOccurrence, - NotFoundChecks: notFoundChecks, - MinTimeout: minTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*servicecatalog.DescribeProvisionedProductOutput); ok { - if detail := output.ProvisionedProductDetail; detail != nil { - var foo *retry.UnexpectedStateError - if errors.As(err, &foo) { - // The statuses `ERROR` and `TAINTED` are equivalent: the application of the requested change has failed. - // The difference is that, in the case of `TAINTED`, there is a previous version to roll back to. - status := string(detail.Status) - if status == string(awstypes.ProvisionedProductStatusError) || status == string(awstypes.ProvisionedProductStatusTainted) { - return output, errors.New(aws.ToString(detail.StatusMessage)) - } - } - } - return output, err - } - - return nil, err -} - -func waitProvisionedProductTerminated(ctx context.Context, conn *servicecatalog.Client, acceptLanguage, id, name string, timeout time.Duration) error { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.ProvisionedProductStatusAvailable, awstypes.ProvisionedProductStatusUnderChange), - Target: []string{}, - Refresh: statusProvisionedProduct(ctx, conn, acceptLanguage, id, name), - Timeout: timeout, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} - func waitPortfolioConstraintsReady(ctx context.Context, conn *servicecatalog.Client, acceptLanguage, portfolioID, productID string, timeout time.Duration) ([]awstypes.ConstraintDetail, error) { stateConf := &retry.StateChangeConf{ Pending: []string{statusNotFound}, diff --git a/internal/service/servicecatalogappregistry/application_data_source_tags_gen_test.go b/internal/service/servicecatalogappregistry/application_data_source_tags_gen_test.go index 899a8b27ac8b..d86b140550ec 100644 --- a/internal/service/servicecatalogappregistry/application_data_source_tags_gen_test.go +++ b/internal/service/servicecatalogappregistry/application_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccServiceCatalogAppRegistryApplicationDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccServiceCatalogAppRegistryApplicationDataSource_tags(t *testing.T) { func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_NullMap(t *testi func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_EmptyMap(t *test func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_DefaultTags_nonO func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_IgnoreTags_Overl func TestAccServiceCatalogAppRegistryApplicationDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/servicecatalogappregistry/application_tags_gen_test.go b/internal/service/servicecatalogappregistry/application_tags_gen_test.go index 0b0df136b7d0..ffa1845339e9 100644 --- a/internal/service/servicecatalogappregistry/application_tags_gen_test.go +++ b/internal/service/servicecatalogappregistry/application_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccServiceCatalogAppRegistryApplication_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags(t *testing.T) { func TestAccServiceCatalogAppRegistryApplication_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -260,10 +261,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_null(t *testing.T) { func TestAccServiceCatalogAppRegistryApplication_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -309,10 +311,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_EmptyMap(t *testing.T) { func TestAccServiceCatalogAppRegistryApplication_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -388,10 +391,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_AddOnUpdate(t *testing.T) func TestAccServiceCatalogAppRegistryApplication_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -477,10 +481,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_EmptyTag_OnCreate(t *testi func TestAccServiceCatalogAppRegistryApplication_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -615,10 +620,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_EmptyTag_OnUpdate_Add(t *t func TestAccServiceCatalogAppRegistryApplication_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -704,10 +710,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_EmptyTag_OnUpdate_Replace( func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -884,10 +891,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_providerOnly(t func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1043,10 +1051,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_nonOverlapping func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1218,10 +1227,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_overlapping(t func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1307,10 +1317,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_updateToProvid func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1395,10 +1406,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_updateToResour func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1460,10 +1472,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_emptyResourceT func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1517,10 +1530,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_emptyProviderO func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1585,10 +1599,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_nullOverlappin func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1655,10 +1670,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_DefaultTags_nullNonOverlap func TestAccServiceCatalogAppRegistryApplication_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1709,10 +1725,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_ComputedTag_OnCreate(t *te func TestAccServiceCatalogAppRegistryApplication_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1805,10 +1822,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_ComputedTag_OnUpdate_Add(t func TestAccServiceCatalogAppRegistryApplication_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -1891,10 +1909,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_ComputedTag_OnUpdate_Repla func TestAccServiceCatalogAppRegistryApplication_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), @@ -2052,10 +2071,11 @@ func TestAccServiceCatalogAppRegistryApplication_tags_IgnoreTags_Overlap_Default func TestAccServiceCatalogAppRegistryApplication_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_servicecatalogappregistry_application.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckApplicationDestroy(ctx), diff --git a/internal/service/servicecatalogappregistry/attribute_group_data_source_tags_gen_test.go b/internal/service/servicecatalogappregistry/attribute_group_data_source_tags_gen_test.go index af6424b04b87..ba13fbefea05 100644 --- a/internal/service/servicecatalogappregistry/attribute_group_data_source_tags_gen_test.go +++ b/internal/service/servicecatalogappregistry/attribute_group_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags(t *testing.T) func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_NullMap(t *te func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_EmptyMap(t *t func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_DefaultTags_n func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_IgnoreTags_Ov func TestAccServiceCatalogAppRegistryAttributeGroupDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/servicecatalogappregistry/attribute_group_tags_gen_test.go b/internal/service/servicecatalogappregistry/attribute_group_tags_gen_test.go index b837cfac444f..a6c5be1c3428 100644 --- a/internal/service/servicecatalogappregistry/attribute_group_tags_gen_test.go +++ b/internal/service/servicecatalogappregistry/attribute_group_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccServiceCatalogAppRegistryAttributeGroup_tags(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags(t *testing.T) { func TestAccServiceCatalogAppRegistryAttributeGroup_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_null(t *testing.T) { func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyMap(t *testing.T) func TestAccServiceCatalogAppRegistryAttributeGroup_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_AddOnUpdate(t *testing. func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyTag_OnCreate(t *te func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyTag_OnUpdate_Add(t func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_EmptyTag_OnUpdate_Repla func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_providerOnl func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_nonOverlapp func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_overlapping func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_updateToPro func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_updateToRes func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_emptyResour func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_emptyProvid func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_nullOverlap func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_DefaultTags_nullNonOver func TestAccServiceCatalogAppRegistryAttributeGroup_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_ComputedTag_OnCreate(t func TestAccServiceCatalogAppRegistryAttributeGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_ComputedTag_OnUpdate_Ad func TestAccServiceCatalogAppRegistryAttributeGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_ComputedTag_OnUpdate_Re func TestAccServiceCatalogAppRegistryAttributeGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccServiceCatalogAppRegistryAttributeGroup_tags_IgnoreTags_Overlap_Defa func TestAccServiceCatalogAppRegistryAttributeGroup_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v servicecatalogappregistry.GetAttributeGroupOutput resourceName := "aws_servicecatalogappregistry_attribute_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ServiceCatalogAppRegistryServiceID), CheckDestroy: testAccCheckAttributeGroupDestroy(ctx), diff --git a/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go b/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go index f83021f4124b..f5b5ee8b8eaa 100644 --- a/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go +++ b/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params servicecatalogap }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up servicecatalogappregistry endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up servicecatalogappregistry endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go b/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go index b6dd634bc4e6..19bd9d786181 100644 --- a/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go +++ b/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/servicecatalogappregistry/service_package_gen.go b/internal/service/servicecatalogappregistry/service_package_gen.go index 42c6c00e05b5..b4aaff1c8382 100644 --- a/internal/service/servicecatalogappregistry/service_package_gen.go +++ b/internal/service/servicecatalogappregistry/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -107,7 +106,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *servicecatalogappregistry.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/servicecatalogappregistry/tags_gen.go b/internal/service/servicecatalogappregistry/tags_gen.go index 1e87544a5caf..3a6667a5bb54 100644 --- a/internal/service/servicecatalogappregistry/tags_gen.go +++ b/internal/service/servicecatalogappregistry/tags_gen.go @@ -3,8 +3,8 @@ package servicecatalogappregistry import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *servicecatalogappregistry.Client, ident output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ServiceCatalogAppRegistryClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *servicecatalogappregistry.Client, ide _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *servicecatalogappregistry.Client, ide _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/servicediscovery/http_namespace.go b/internal/service/servicediscovery/http_namespace.go index 71c4dd1b7a14..7f608f4a9964 100644 --- a/internal/service/servicediscovery/http_namespace.go +++ b/internal/service/servicediscovery/http_namespace.go @@ -138,7 +138,7 @@ func resourceHTTPNamespaceDelete(ctx context.Context, d *schema.ResourceData, me const ( timeout = 2 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.ResourceInUse](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.ResourceInUse](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteNamespace(ctx, &servicediscovery.DeleteNamespaceInput{ Id: aws.String(d.Id()), }) diff --git a/internal/service/servicediscovery/service_endpoint_resolver_gen.go b/internal/service/servicediscovery/service_endpoint_resolver_gen.go index fd594064d493..082187efb41b 100644 --- a/internal/service/servicediscovery/service_endpoint_resolver_gen.go +++ b/internal/service/servicediscovery/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params servicediscovery }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up servicediscovery endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up servicediscovery endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/servicediscovery/service_endpoints_gen_test.go b/internal/service/servicediscovery/service_endpoints_gen_test.go index 871fc92f3cb1..533625ee1ec3 100644 --- a/internal/service/servicediscovery/service_endpoints_gen_test.go +++ b/internal/service/servicediscovery/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/servicediscovery/service_package_gen.go b/internal/service/servicediscovery/service_package_gen.go index bbadf661222f..86da74ba25b6 100644 --- a/internal/service/servicediscovery/service_package_gen.go +++ b/internal/service/servicediscovery/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/servicediscovery" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -119,7 +118,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *servicediscovery.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/servicediscovery/sweep.go b/internal/service/servicediscovery/sweep.go index 111ee345f0dc..0e50b806e0d6 100644 --- a/internal/service/servicediscovery/sweep.go +++ b/internal/service/servicediscovery/sweep.go @@ -52,7 +52,7 @@ func sweepHTTPNamespaces(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceDiscoveryClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -89,7 +89,7 @@ func sweepPrivateDNSNamespaces(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceDiscoveryClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -126,7 +126,7 @@ func sweepPublicDNSNamespaces(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceDiscoveryClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -163,7 +163,7 @@ func sweepServices(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.ServiceDiscoveryClient(ctx) input := &servicediscovery.ListServicesInput{} diff --git a/internal/service/servicediscovery/tags_gen.go b/internal/service/servicediscovery/tags_gen.go index 9e2a3bf620ee..0c98678be2d3 100644 --- a/internal/service/servicediscovery/tags_gen.go +++ b/internal/service/servicediscovery/tags_gen.go @@ -3,8 +3,8 @@ package servicediscovery import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/servicediscovery" awstypes "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *servicediscovery.Client, identifier str output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).ServiceDiscoveryClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *servicediscovery.Client, identifier s _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *servicediscovery.Client, identifier s _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/servicequotas/service_endpoint_resolver_gen.go b/internal/service/servicequotas/service_endpoint_resolver_gen.go index c8782ad4935f..0ffafbd04b51 100644 --- a/internal/service/servicequotas/service_endpoint_resolver_gen.go +++ b/internal/service/servicequotas/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params servicequotas.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up servicequotas endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up servicequotas endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/servicequotas/service_endpoints_gen_test.go b/internal/service/servicequotas/service_endpoints_gen_test.go index 4fbad913abd8..cdf98b309055 100644 --- a/internal/service/servicequotas/service_endpoints_gen_test.go +++ b/internal/service/servicequotas/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/servicequotas/service_package_gen.go b/internal/service/servicequotas/service_package_gen.go index 43bebb7d6b7a..e7b33f4f887c 100644 --- a/internal/service/servicequotas/service_package_gen.go +++ b/internal/service/servicequotas/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/servicequotas" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -97,7 +96,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *servicequotas.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/servicequotas/service_quota.go b/internal/service/servicequotas/service_quota.go index 51a3781ea44d..eb7656f0c4ec 100644 --- a/internal/service/servicequotas/service_quota.go +++ b/internal/service/servicequotas/service_quota.go @@ -146,7 +146,6 @@ func resourceServiceQuotaCreate(ctx context.Context, d *schema.ResourceData, met // A Service Quota will always have a default value, but will only have a current value if it has been set. defaultQuota, err := findDefaultServiceQuotaByServiceCodeAndQuotaCode(ctx, conn, serviceCode, quotaCode) - if err != nil { return sdkdiag.AppendErrorf(diags, "reading Service Quotas default Service Quota (%s/%s): %s", serviceCode, quotaCode, err) } @@ -164,7 +163,13 @@ func resourceServiceQuotaCreate(ctx context.Context, d *schema.ResourceData, met } id := serviceQuotaCreateResourceID(serviceCode, quotaCode) - if value := d.Get(names.AttrValue).(float64); value > quotaValue { + value := d.Get(names.AttrValue).(float64) + + if value < quotaValue { + return sdkdiag.AppendErrorf(diags, "requesting Service Quotas Service Quota (%s) with value less than current", id) + } + + if value > quotaValue { input := servicequotas.RequestServiceQuotaIncreaseInput{ DesiredValue: aws.Float64(value), QuotaCode: aws.String(quotaCode), @@ -172,7 +177,6 @@ func resourceServiceQuotaCreate(ctx context.Context, d *schema.ResourceData, met } output, err := conn.RequestServiceQuotaIncrease(ctx, &input) - if err != nil { return sdkdiag.AppendErrorf(diags, "requesting Service Quotas Service Quota (%s) increase: %s", id, err) } @@ -276,6 +280,10 @@ func resourceServiceQuotaUpdate(ctx context.Context, d *schema.ResourceData, met output, err := conn.RequestServiceQuotaIncrease(ctx, &input) + if errs.IsAErrorMessageContains[*awstypes.ResourceAlreadyExistsException](err, "Only one open service quota increase request is allowed per quota") { + return sdkdiag.AppendWarningf(diags, "resource service quota %s already exists", d.Id()) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "requesting Service Quotas Service Quota (%s) increase: %s", d.Id(), err) } diff --git a/internal/service/servicequotas/service_quota_data_source.go b/internal/service/servicequotas/service_quota_data_source.go index 8fd65ca1d60b..de2e029458b7 100644 --- a/internal/service/servicequotas/service_quota_data_source.go +++ b/internal/service/servicequotas/service_quota_data_source.go @@ -128,7 +128,6 @@ func dataSourceServiceQuotaRead(ctx context.Context, d *schema.ResourceData, met // A Service Quota will always have a default value, but will only have a current value if it has been set. if quotaName != "" { defaultQuota, err = findDefaultServiceQuotaByServiceCodeAndQuotaName(ctx, conn, serviceCode, quotaName) - quotaCode = aws.ToString(defaultQuota.QuotaCode) } else { defaultQuota, err = findDefaultServiceQuotaByServiceCodeAndQuotaCode(ctx, conn, serviceCode, quotaCode) } @@ -137,6 +136,10 @@ func dataSourceServiceQuotaRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("Service Quotas Service Quota", err)) } + if quotaName != "" { + quotaCode = aws.ToString(defaultQuota.QuotaCode) + } + arn := aws.ToString(defaultQuota.QuotaArn) d.SetId(arn) d.Set("adjustable", defaultQuota.Adjustable) diff --git a/internal/service/servicequotas/service_quota_data_source_test.go b/internal/service/servicequotas/service_quota_data_source_test.go index 665203a7ca12..a573014d8379 100644 --- a/internal/service/servicequotas/service_quota_data_source_test.go +++ b/internal/service/servicequotas/service_quota_data_source_test.go @@ -151,6 +151,10 @@ func TestAccServiceQuotasServiceQuotaDataSource_quotaName(t *testing.T) { ErrorCheck: acctest.ErrorCheck(t, names.ServiceQuotasServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ + { + Config: testAccServiceQuotaDataSourceConfig_name("vpc", setQuotaQuotaName+"nonexist"), + ExpectError: regexache.MustCompile(`Service Quotas Service Quota`), + }, { Config: testAccServiceQuotaDataSourceConfig_name("vpc", setQuotaQuotaName), Check: resource.ComposeAggregateTestCheckFunc( diff --git a/internal/service/servicequotas/service_quota_test.go b/internal/service/servicequotas/service_quota_test.go index 6b98f356d40e..11a8a715d5c1 100644 --- a/internal/service/servicequotas/service_quota_test.go +++ b/internal/service/servicequotas/service_quota_test.go @@ -253,6 +253,26 @@ func TestAccServiceQuotasServiceQuota_permissionError(t *testing.T) { }) } +func TestAccServiceQuotasServiceQuota_valueLessThanCurrent(t *testing.T) { + ctx := acctest.Context(t) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + testAccPreCheckServiceQuotaSet(ctx, t, setQuotaServiceCode, setQuotaQuotaCode) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ServiceQuotasServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccServiceQuotaConfig_valueLessThanCurrent(setQuotaServiceCode, setQuotaQuotaCode), + ExpectError: regexache.MustCompile(`requesting Service Quotas Service Quota \([^)]+\) with value less than current`), + }, + }, + }) +} + // nosemgrep:ci.servicequotas-in-func-name func testAccServiceQuotaConfig_sameValue(serviceCode, quotaCode string) string { return fmt.Sprintf(` @@ -310,3 +330,18 @@ resource "aws_servicequotas_service_quota" "test" { } `, serviceCode, quotaCode)) } + +func testAccServiceQuotaConfig_valueLessThanCurrent(serviceCode, quotaCode string) string { + return fmt.Sprintf(` +data "aws_servicequotas_service_quota" "test" { + quota_code = %[1]q + service_code = %[2]q +} + +resource "aws_servicequotas_service_quota" "test" { + quota_code = data.aws_servicequotas_service_quota.test.quota_code + service_code = data.aws_servicequotas_service_quota.test.service_code + value = data.aws_servicequotas_service_quota.test.value - 1 +} +`, quotaCode, serviceCode) +} diff --git a/internal/service/ses/receipt_rule.go b/internal/service/ses/receipt_rule.go index a26cdcd01b63..0b563d070b74 100644 --- a/internal/service/ses/receipt_rule.go +++ b/internal/service/ses/receipt_rule.go @@ -296,7 +296,7 @@ func resourceReceiptRuleCreate(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutCreate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateReceiptRule(ctx, input) }, func(err error) (bool, error) { @@ -498,7 +498,7 @@ func resourceReceiptRuleUpdate(ctx context.Context, d *schema.ResourceData, meta } _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateReceiptRule(ctx, input) }, func(err error) (bool, error) { diff --git a/internal/service/ses/send_email_action.go b/internal/service/ses/send_email_action.go new file mode 100644 index 000000000000..c084da17f860 --- /dev/null +++ b/internal/service/ses/send_email_action.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ses + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ses" + awstypes "github.com/aws/aws-sdk-go-v2/service/ses/types" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @Action(aws_ses_send_email, name="Send Email") +func newSendEmailAction(_ context.Context) (action.ActionWithConfigure, error) { + return &sendEmailAction{}, nil +} + +var ( + _ action.Action = (*sendEmailAction)(nil) +) + +type sendEmailAction struct { + framework.ActionWithModel[sendEmailActionModel] +} + +type sendEmailActionModel struct { + framework.WithRegionModel + Source types.String `tfsdk:"source"` + ToAddresses fwtypes.ListValueOf[types.String] `tfsdk:"to_addresses"` + CcAddresses fwtypes.ListValueOf[types.String] `tfsdk:"cc_addresses"` + BccAddresses fwtypes.ListValueOf[types.String] `tfsdk:"bcc_addresses"` + Subject types.String `tfsdk:"subject"` + TextBody types.String `tfsdk:"text_body"` + HtmlBody types.String `tfsdk:"html_body"` + ReplyToAddresses fwtypes.ListValueOf[types.String] `tfsdk:"reply_to_addresses"` + ReturnPath types.String `tfsdk:"return_path"` +} + +func (a *sendEmailAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Sends an email using Amazon SES. This action allows for imperative email sending with full control over recipients, content, and formatting.", + Attributes: map[string]schema.Attribute{ + names.AttrSource: schema.StringAttribute{ + Description: "The email address that is sending the email. This address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.", + Required: true, + }, + "to_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The To: field(s) of the message.", + Optional: true, + }, + "cc_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The CC: field(s) of the message.", + Optional: true, + }, + "bcc_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The BCC: field(s) of the message.", + Optional: true, + }, + "subject": schema.StringAttribute{ + Description: "The subject of the message: A short summary of the content, which will appear in the recipient's inbox.", + Required: true, + }, + "text_body": schema.StringAttribute{ + Description: "The message body in text format. Either text_body or html_body must be specified.", + Optional: true, + }, + "html_body": schema.StringAttribute{ + Description: "The message body in HTML format. Either text_body or html_body must be specified.", + Optional: true, + }, + "reply_to_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.", + Optional: true, + }, + "return_path": schema.StringAttribute{ + Description: "The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled.", + Optional: true, + }, + }, + } +} + +func (a *sendEmailAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config sendEmailActionModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + // Validate that at least one body type is provided + if config.TextBody.IsNull() && config.HtmlBody.IsNull() { + resp.Diagnostics.AddError( + "Missing Email Body", + "Either text_body or html_body must be specified", + ) + return + } + + conn := a.Meta().SESClient(ctx) + + source := config.Source.ValueString() + subject := config.Subject.ValueString() + + tflog.Info(ctx, "Starting SES send email action", map[string]any{ + names.AttrSource: source, + "subject": subject, + "has_text_body": !config.TextBody.IsNull(), + "has_html_body": !config.HtmlBody.IsNull(), + }) + + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Sending email from %s...", source), + }) + + // Build destination + destination := &awstypes.Destination{} + if !config.ToAddresses.IsNull() { + destination.ToAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.ToAddresses) + } + if !config.CcAddresses.IsNull() { + destination.CcAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.CcAddresses) + } + if !config.BccAddresses.IsNull() { + destination.BccAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.BccAddresses) + } + + // Build message + message := &awstypes.Message{ + Subject: &awstypes.Content{ + Data: aws.String(subject), + }, + Body: &awstypes.Body{}, + } + + if !config.TextBody.IsNull() { + message.Body.Text = &awstypes.Content{ + Data: config.TextBody.ValueStringPointer(), + } + } + if !config.HtmlBody.IsNull() { + message.Body.Html = &awstypes.Content{ + Data: config.HtmlBody.ValueStringPointer(), + } + } + + // Build input + input := &ses.SendEmailInput{ + Source: aws.String(source), + Destination: destination, + Message: message, + } + + if !config.ReplyToAddresses.IsNull() { + input.ReplyToAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.ReplyToAddresses) + } + + if !config.ReturnPath.IsNull() { + input.ReturnPath = config.ReturnPath.ValueStringPointer() + } + + // Send email + output, err := conn.SendEmail(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Send Email", + fmt.Sprintf("Could not send email from %s: %s", source, err), + ) + return + } + + messageId := aws.ToString(output.MessageId) + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Email sent successfully (Message ID: %s)", messageId), + }) + + tflog.Info(ctx, "SES send email action completed successfully", map[string]any{ + names.AttrSource: source, + "message_id": messageId, + }) +} diff --git a/internal/service/ses/send_email_action_test.go b/internal/service/ses/send_email_action_test.go new file mode 100644 index 000000000000..5d938d0884c5 --- /dev/null +++ b/internal/service/ses/send_email_action_test.go @@ -0,0 +1,204 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ses_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ses" + awstypes "github.com/aws/aws-sdk-go-v2/service/ses/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSESSendEmailAction_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + testEmail := acctest.SkipIfEnvVarNotSet(t, "SES_VERIFIED_EMAIL") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SESEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SESServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccSendEmailActionConfig_basic(rName, testEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckSendEmailAction(ctx, testEmail), + ), + }, + }, + }) +} + +func TestAccSESSendEmailAction_htmlBody(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + testEmail := acctest.SkipIfEnvVarNotSet(t, "SES_VERIFIED_EMAIL") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SESEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SESServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccSendEmailActionConfig_htmlBody(rName, testEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckSendEmailAction(ctx, testEmail), + ), + }, + }, + }) +} + +func TestAccSESSendEmailAction_multipleRecipients(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + testEmail := acctest.SkipIfEnvVarNotSet(t, "SES_VERIFIED_EMAIL") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SESEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SESServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccSendEmailActionConfig_multipleRecipients(rName, testEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckSendEmailAction(ctx, testEmail), + ), + }, + }, + }) +} + +// testAccCheckSendEmailAction verifies the action can send emails +func testAccCheckSendEmailAction(ctx context.Context, sourceEmail string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SESClient(ctx) + + // Verify the source email is verified in SES + input := &ses.GetIdentityVerificationAttributesInput{ + Identities: []string{sourceEmail}, + } + + output, err := conn.GetIdentityVerificationAttributes(ctx, input) + if err != nil { + return fmt.Errorf("Failed to get identity verification attributes: %w", err) + } + + if attrs, ok := output.VerificationAttributes[sourceEmail]; ok { + if attrs.VerificationStatus != awstypes.VerificationStatusSuccess { + return fmt.Errorf("Email %s is not verified in SES (status: %s)", sourceEmail, string(attrs.VerificationStatus)) + } + } else { + return fmt.Errorf("Email %s not found in SES identities", sourceEmail) + } + + return nil + } +} + +// Configuration functions + +func testAccSendEmailActionConfig_basic(rName, testEmail string) string { + return fmt.Sprintf(` +action "aws_ses_send_email" "test" { + config { + source = %[2]q + subject = "Test Email from %[1]s" + text_body = "This is a test email sent from Terraform action test." + to_addresses = [%[2]q] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create] + actions = [action.aws_ses_send_email.test] + } + } +} +`, rName, testEmail) +} + +func testAccSendEmailActionConfig_htmlBody(rName, testEmail string) string { + return fmt.Sprintf(` +action "aws_ses_send_email" "test" { + config { + source = %[2]q + subject = "HTML Test Email from %[1]s" + html_body = "

Test Email

This is a test email sent from Terraform action test.

" + to_addresses = [%[2]q] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create] + actions = [action.aws_ses_send_email.test] + } + } +} +`, rName, testEmail) +} + +func testAccSendEmailActionConfig_multipleRecipients(rName, testEmail string) string { + return fmt.Sprintf(` +action "aws_ses_send_email" "test" { + config { + source = %[2]q + subject = "Multi-recipient Test Email from %[1]s" + text_body = "This is a test email sent to multiple recipients." + to_addresses = [%[2]q] + cc_addresses = [%[2]q] + reply_to_addresses = [%[2]q] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create] + actions = [action.aws_ses_send_email.test] + } + } +} +`, rName, testEmail) +} diff --git a/internal/service/ses/service_endpoint_resolver_gen.go b/internal/service/ses/service_endpoint_resolver_gen.go index 18d0abf603bb..4f87d10155a2 100644 --- a/internal/service/ses/service_endpoint_resolver_gen.go +++ b/internal/service/ses/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ses.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ses endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ses endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ses/service_endpoints_gen_test.go b/internal/service/ses/service_endpoints_gen_test.go index 213f54471fda..c5667e17313d 100644 --- a/internal/service/ses/service_endpoints_gen_test.go +++ b/internal/service/ses/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ses/service_package_gen.go b/internal/service/ses/service_package_gen.go index beb44c783d7b..92ab96308930 100644 --- a/internal/service/ses/service_package_gen.go +++ b/internal/service/ses/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ses" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newSendEmailAction, + TypeName: "aws_ses_send_email", + Name: "Send Email", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{} } @@ -161,7 +171,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ses.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sesv2/configuration_set_data_source_tags_gen_test.go b/internal/service/sesv2/configuration_set_data_source_tags_gen_test.go index ebc7eac9da08..ec9dadf55a47 100644 --- a/internal/service/sesv2/configuration_set_data_source_tags_gen_test.go +++ b/internal/service/sesv2/configuration_set_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccSESV2ConfigurationSetDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccSESV2ConfigurationSetDataSource_tags(t *testing.T) { func TestAccSESV2ConfigurationSetDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccSESV2ConfigurationSetDataSource_tags_NullMap(t *testing.T) { func TestAccSESV2ConfigurationSetDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccSESV2ConfigurationSetDataSource_tags_EmptyMap(t *testing.T) { func TestAccSESV2ConfigurationSetDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccSESV2ConfigurationSetDataSource_tags_DefaultTags_nonOverlapping(t *t func TestAccSESV2ConfigurationSetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccSESV2ConfigurationSetDataSource_tags_IgnoreTags_Overlap_DefaultTag(t func TestAccSESV2ConfigurationSetDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/sesv2/configuration_set_event_destination.go b/internal/service/sesv2/configuration_set_event_destination.go index da7beecf99f5..b737268b4818 100644 --- a/internal/service/sesv2/configuration_set_event_destination.go +++ b/internal/service/sesv2/configuration_set_event_destination.go @@ -223,7 +223,7 @@ func resourceConfigurationSetEventDestinationCreate(ctx context.Context, d *sche configurationSetEventDestinationID := configurationSetEventDestinationCreateResourceID(d.Get("configuration_set_name").(string), d.Get("event_destination_name").(string)) out, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateConfigurationSetEventDestination(ctx, in) }, func(err error) (bool, error) { @@ -297,7 +297,7 @@ func resourceConfigurationSetEventDestinationUpdate(ctx context.Context, d *sche log.Printf("[DEBUG] Updating SESV2 ConfigurationSetEventDestination (%s): %#v", d.Id(), in) _, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.UpdateConfigurationSetEventDestination(ctx, in) }, func(err error) (bool, error) { diff --git a/internal/service/sesv2/configuration_set_tags_gen_test.go b/internal/service/sesv2/configuration_set_tags_gen_test.go index 52655d946f7d..3032e40f7904 100644 --- a/internal/service/sesv2/configuration_set_tags_gen_test.go +++ b/internal/service/sesv2/configuration_set_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccSESV2ConfigurationSet_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccSESV2ConfigurationSet_tags(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccSESV2ConfigurationSet_tags_null(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccSESV2ConfigurationSet_tags_EmptyMap(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccSESV2ConfigurationSet_tags_AddOnUpdate(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccSESV2ConfigurationSet_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccSESV2ConfigurationSet_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccSESV2ConfigurationSet_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_nonOverlapping(t *testing.T) func TestAccSESV2ConfigurationSet_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_updateToProviderOnly(t *testi func TestAccSESV2ConfigurationSet_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_updateToResourceOnly(t *testi func TestAccSESV2ConfigurationSet_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_emptyResourceTag(t *testing.T func TestAccSESV2ConfigurationSet_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_emptyProviderOnlyTag(t *testi func TestAccSESV2ConfigurationSet_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_nullOverlappingResourceTag(t func TestAccSESV2ConfigurationSet_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccSESV2ConfigurationSet_tags_DefaultTags_nullNonOverlappingResourceTag func TestAccSESV2ConfigurationSet_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccSESV2ConfigurationSet_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccSESV2ConfigurationSet_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSESV2ConfigurationSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccSESV2ConfigurationSet_tags_ComputedTag_OnUpdate_Replace(t *testing.T func TestAccSESV2ConfigurationSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccSESV2ConfigurationSet_tags_IgnoreTags_Overlap_DefaultTag(t *testing. func TestAccSESV2ConfigurationSet_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_configuration_set.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckConfigurationSetDestroy(ctx), diff --git a/internal/service/sesv2/contact_list_tags_gen_test.go b/internal/service/sesv2/contact_list_tags_gen_test.go index 5eae5ca551db..ccd7ff2ad9c5 100644 --- a/internal/service/sesv2/contact_list_tags_gen_test.go +++ b/internal/service/sesv2/contact_list_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -47,10 +46,11 @@ func testAccSESV2ContactList_tagsSerial(t *testing.T) { func testAccSESV2ContactList_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -228,10 +228,11 @@ func testAccSESV2ContactList_tags(t *testing.T) { func testAccSESV2ContactList_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -294,10 +295,11 @@ func testAccSESV2ContactList_tags_null(t *testing.T) { func testAccSESV2ContactList_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -356,10 +358,11 @@ func testAccSESV2ContactList_tags_EmptyMap(t *testing.T) { func testAccSESV2ContactList_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -436,10 +439,11 @@ func testAccSESV2ContactList_tags_AddOnUpdate(t *testing.T) { func testAccSESV2ContactList_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -524,10 +528,11 @@ func testAccSESV2ContactList_tags_EmptyTag_OnCreate(t *testing.T) { func testAccSESV2ContactList_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -660,10 +665,11 @@ func testAccSESV2ContactList_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func testAccSESV2ContactList_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -748,10 +754,11 @@ func testAccSESV2ContactList_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func testAccSESV2ContactList_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -928,10 +935,11 @@ func testAccSESV2ContactList_tags_DefaultTags_providerOnly(t *testing.T) { func testAccSESV2ContactList_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1087,10 +1095,11 @@ func testAccSESV2ContactList_tags_DefaultTags_nonOverlapping(t *testing.T) { func testAccSESV2ContactList_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1262,10 +1271,11 @@ func testAccSESV2ContactList_tags_DefaultTags_overlapping(t *testing.T) { func testAccSESV2ContactList_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1351,10 +1361,11 @@ func testAccSESV2ContactList_tags_DefaultTags_updateToProviderOnly(t *testing.T) func testAccSESV2ContactList_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1439,10 +1450,11 @@ func testAccSESV2ContactList_tags_DefaultTags_updateToResourceOnly(t *testing.T) func testAccSESV2ContactList_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1503,10 +1515,11 @@ func testAccSESV2ContactList_tags_DefaultTags_emptyResourceTag(t *testing.T) { func testAccSESV2ContactList_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1559,10 +1572,11 @@ func testAccSESV2ContactList_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func testAccSESV2ContactList_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1620,10 +1634,11 @@ func testAccSESV2ContactList_tags_DefaultTags_nullOverlappingResourceTag(t *test func testAccSESV2ContactList_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1681,10 +1696,11 @@ func testAccSESV2ContactList_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func testAccSESV2ContactList_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1735,10 +1751,11 @@ func testAccSESV2ContactList_tags_ComputedTag_OnCreate(t *testing.T) { func testAccSESV2ContactList_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1831,10 +1848,11 @@ func testAccSESV2ContactList_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func testAccSESV2ContactList_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -1917,10 +1935,11 @@ func testAccSESV2ContactList_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func testAccSESV2ContactList_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), @@ -2078,10 +2097,11 @@ func testAccSESV2ContactList_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func testAccSESV2ContactList_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_contact_list.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckContactListDestroy(ctx), diff --git a/internal/service/sesv2/dedicated_ip_pool_data_source_tags_gen_test.go b/internal/service/sesv2/dedicated_ip_pool_data_source_tags_gen_test.go index 4a4ae413e137..7c068cb08f85 100644 --- a/internal/service/sesv2/dedicated_ip_pool_data_source_tags_gen_test.go +++ b/internal/service/sesv2/dedicated_ip_pool_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccSESV2DedicatedIPPoolDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccSESV2DedicatedIPPoolDataSource_tags(t *testing.T) { func TestAccSESV2DedicatedIPPoolDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccSESV2DedicatedIPPoolDataSource_tags_NullMap(t *testing.T) { func TestAccSESV2DedicatedIPPoolDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccSESV2DedicatedIPPoolDataSource_tags_EmptyMap(t *testing.T) { func TestAccSESV2DedicatedIPPoolDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccSESV2DedicatedIPPoolDataSource_tags_DefaultTags_nonOverlapping(t *te func TestAccSESV2DedicatedIPPoolDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccSESV2DedicatedIPPoolDataSource_tags_IgnoreTags_Overlap_DefaultTag(t func TestAccSESV2DedicatedIPPoolDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/sesv2/dedicated_ip_pool_tags_gen_test.go b/internal/service/sesv2/dedicated_ip_pool_tags_gen_test.go index 8306c45d63cf..03fa4e6a3799 100644 --- a/internal/service/sesv2/dedicated_ip_pool_tags_gen_test.go +++ b/internal/service/sesv2/dedicated_ip_pool_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccSESV2DedicatedIPPool_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccSESV2DedicatedIPPool_tags(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccSESV2DedicatedIPPool_tags_null(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccSESV2DedicatedIPPool_tags_EmptyMap(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccSESV2DedicatedIPPool_tags_AddOnUpdate(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccSESV2DedicatedIPPool_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccSESV2DedicatedIPPool_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccSESV2DedicatedIPPool_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_updateToProviderOnly(t *testin func TestAccSESV2DedicatedIPPool_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_updateToResourceOnly(t *testin func TestAccSESV2DedicatedIPPool_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_emptyResourceTag(t *testing.T) func TestAccSESV2DedicatedIPPool_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_emptyProviderOnlyTag(t *testin func TestAccSESV2DedicatedIPPool_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_nullOverlappingResourceTag(t * func TestAccSESV2DedicatedIPPool_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccSESV2DedicatedIPPool_tags_DefaultTags_nullNonOverlappingResourceTag( func TestAccSESV2DedicatedIPPool_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccSESV2DedicatedIPPool_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccSESV2DedicatedIPPool_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSESV2DedicatedIPPool_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccSESV2DedicatedIPPool_tags_ComputedTag_OnUpdate_Replace(t *testing.T) func TestAccSESV2DedicatedIPPool_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccSESV2DedicatedIPPool_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T func TestAccSESV2DedicatedIPPool_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_dedicated_ip_pool.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckDedicatedIPPoolDestroy(ctx), diff --git a/internal/service/sesv2/email_identity.go b/internal/service/sesv2/email_identity.go index fddaecc4d192..0e964b7a62ae 100644 --- a/internal/service/sesv2/email_identity.go +++ b/internal/service/sesv2/email_identity.go @@ -6,7 +6,6 @@ package sesv2 import ( "context" "errors" - "fmt" "log" "time" @@ -115,6 +114,10 @@ func resourceEmailIdentity() *schema.Resource { }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "verification_status": { + Type: schema.TypeString, + Computed: true, + }, "verified_for_sending_status": { Type: schema.TypeBool, Computed: true, @@ -160,7 +163,8 @@ func resourceEmailIdentityCreate(ctx context.Context, d *schema.ResourceData, me func resourceEmailIdentityRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SESV2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.SESV2Client(ctx) out, err := findEmailIdentityByID(ctx, conn, d.Id()) @@ -174,10 +178,9 @@ func resourceEmailIdentityRead(ctx context.Context, d *schema.ResourceData, meta return create.AppendDiagError(diags, names.SESV2, create.ErrActionReading, resNameEmailIdentity, d.Id(), err) } - d.Set(names.AttrARN, emailIdentityARN(ctx, meta.(*conns.AWSClient), d.Id())) + d.Set(names.AttrARN, emailIdentityARN(ctx, c, d.Id())) d.Set("configuration_set_name", out.ConfigurationSetName) d.Set("email_identity", d.Id()) - if out.DkimAttributes != nil { tfMap := flattenDKIMAttributes(out.DkimAttributes) tfMap["domain_signing_private_key"] = d.Get("dkim_signing_attributes.0.domain_signing_private_key").(string) @@ -189,8 +192,8 @@ func resourceEmailIdentityRead(ctx context.Context, d *schema.ResourceData, meta } else { d.Set("dkim_signing_attributes", nil) } - - d.Set("identity_type", string(out.IdentityType)) + d.Set("identity_type", out.IdentityType) + d.Set("verification_status", out.VerificationStatus) d.Set("verified_for_sending_status", out.VerifiedForSendingStatus) return diags @@ -350,5 +353,5 @@ func flattenDKIMAttributes(apiObject *types.DkimAttributes) map[string]any { } func emailIdentityARN(ctx context.Context, c *conns.AWSClient, emailIdentityName string) string { - return c.RegionalARN(ctx, "ses", fmt.Sprintf("identity/%s", emailIdentityName)) + return c.RegionalARN(ctx, "ses", "identity/"+emailIdentityName) } diff --git a/internal/service/sesv2/email_identity_data_source.go b/internal/service/sesv2/email_identity_data_source.go index 119f29bfc67b..3bee9e58135c 100644 --- a/internal/service/sesv2/email_identity_data_source.go +++ b/internal/service/sesv2/email_identity_data_source.go @@ -80,6 +80,10 @@ func dataSourceEmailIdentity() *schema.Resource { Computed: true, }, names.AttrTags: tftags.TagsSchemaComputed(), + "verification_status": { + Type: schema.TypeString, + Computed: true, + }, "verified_for_sending_status": { Type: schema.TypeBool, Computed: true, @@ -94,7 +98,8 @@ const ( func dataSourceEmailIdentityRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SESV2Client(ctx) + c := meta.(*conns.AWSClient) + conn := c.SESV2Client(ctx) name := d.Get("email_identity").(string) @@ -104,10 +109,9 @@ func dataSourceEmailIdentityRead(ctx context.Context, d *schema.ResourceData, me } d.SetId(name) - d.Set(names.AttrARN, emailIdentityARN(ctx, meta.(*conns.AWSClient), name)) + d.Set(names.AttrARN, emailIdentityARN(ctx, c, name)) d.Set("configuration_set_name", out.ConfigurationSetName) d.Set("email_identity", name) - if out.DkimAttributes != nil { tfMap := flattenDKIMAttributes(out.DkimAttributes) tfMap["domain_signing_private_key"] = d.Get("dkim_signing_attributes.0.domain_signing_private_key").(string) @@ -119,8 +123,8 @@ func dataSourceEmailIdentityRead(ctx context.Context, d *schema.ResourceData, me } else { d.Set("dkim_signing_attributes", nil) } - - d.Set("identity_type", string(out.IdentityType)) + d.Set("identity_type", out.IdentityType) + d.Set("verification_status", out.VerificationStatus) d.Set("verified_for_sending_status", out.VerifiedForSendingStatus) return diags diff --git a/internal/service/sesv2/email_identity_data_source_tags_gen_test.go b/internal/service/sesv2/email_identity_data_source_tags_gen_test.go index 6ac19ba8dac0..9f716b49899d 100644 --- a/internal/service/sesv2/email_identity_data_source_tags_gen_test.go +++ b/internal/service/sesv2/email_identity_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccSESV2EmailIdentityDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccSESV2EmailIdentityDataSource_tags(t *testing.T) { func TestAccSESV2EmailIdentityDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccSESV2EmailIdentityDataSource_tags_NullMap(t *testing.T) { func TestAccSESV2EmailIdentityDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccSESV2EmailIdentityDataSource_tags_EmptyMap(t *testing.T) { func TestAccSESV2EmailIdentityDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccSESV2EmailIdentityDataSource_tags_DefaultTags_nonOverlapping(t *test func TestAccSESV2EmailIdentityDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccSESV2EmailIdentityDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccSESV2EmailIdentityDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/sesv2/email_identity_data_source_test.go b/internal/service/sesv2/email_identity_data_source_test.go index 61d9d1465993..d34e5994573a 100644 --- a/internal/service/sesv2/email_identity_data_source_test.go +++ b/internal/service/sesv2/email_identity_data_source_test.go @@ -39,6 +39,7 @@ func TestAccSESV2EmailIdentityDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "dkim_signing_attributes.0.status", dataSourceName, "dkim_signing_attributes.0.status"), resource.TestCheckResourceAttrPair(resourceName, "dkim_signing_attributes.0.tokens.#", dataSourceName, "dkim_signing_attributes.0.tokens.#"), resource.TestCheckResourceAttrPair(resourceName, "identity_type", dataSourceName, "identity_type"), + resource.TestCheckResourceAttrPair(resourceName, "verification_status", dataSourceName, "verification_status"), resource.TestCheckResourceAttrPair(resourceName, "verified_for_sending_status", dataSourceName, "verified_for_sending_status"), ), }, diff --git a/internal/service/sesv2/email_identity_tags_gen_test.go b/internal/service/sesv2/email_identity_tags_gen_test.go index 8aa06db87d6b..f97f8c88eb56 100644 --- a/internal/service/sesv2/email_identity_tags_gen_test.go +++ b/internal/service/sesv2/email_identity_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,10 +17,11 @@ import ( func TestAccSESV2EmailIdentity_tags(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -199,10 +199,11 @@ func TestAccSESV2EmailIdentity_tags(t *testing.T) { func TestAccSESV2EmailIdentity_tags_null(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -265,10 +266,11 @@ func TestAccSESV2EmailIdentity_tags_null(t *testing.T) { func TestAccSESV2EmailIdentity_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -327,10 +329,11 @@ func TestAccSESV2EmailIdentity_tags_EmptyMap(t *testing.T) { func TestAccSESV2EmailIdentity_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -407,10 +410,11 @@ func TestAccSESV2EmailIdentity_tags_AddOnUpdate(t *testing.T) { func TestAccSESV2EmailIdentity_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -495,10 +499,11 @@ func TestAccSESV2EmailIdentity_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSESV2EmailIdentity_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -631,10 +636,11 @@ func TestAccSESV2EmailIdentity_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSESV2EmailIdentity_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -719,10 +725,11 @@ func TestAccSESV2EmailIdentity_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSESV2EmailIdentity_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -899,10 +906,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSESV2EmailIdentity_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1058,10 +1066,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSESV2EmailIdentity_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1233,10 +1242,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSESV2EmailIdentity_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1322,10 +1332,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_updateToProviderOnly(t *testing. func TestAccSESV2EmailIdentity_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1410,10 +1421,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_updateToResourceOnly(t *testing. func TestAccSESV2EmailIdentity_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1474,10 +1486,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccSESV2EmailIdentity_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1530,10 +1543,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_emptyProviderOnlyTag(t *testing. func TestAccSESV2EmailIdentity_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1591,10 +1605,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_nullOverlappingResourceTag(t *te func TestAccSESV2EmailIdentity_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1652,10 +1667,11 @@ func TestAccSESV2EmailIdentity_tags_DefaultTags_nullNonOverlappingResourceTag(t func TestAccSESV2EmailIdentity_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1706,10 +1722,11 @@ func TestAccSESV2EmailIdentity_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSESV2EmailIdentity_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1802,10 +1819,11 @@ func TestAccSESV2EmailIdentity_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSESV2EmailIdentity_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -1888,10 +1906,11 @@ func TestAccSESV2EmailIdentity_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccSESV2EmailIdentity_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), @@ -2049,10 +2068,11 @@ func TestAccSESV2EmailIdentity_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccSESV2EmailIdentity_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_sesv2_email_identity.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SESV2ServiceID), CheckDestroy: testAccCheckEmailIdentityDestroy(ctx), diff --git a/internal/service/sesv2/email_identity_test.go b/internal/service/sesv2/email_identity_test.go index 495b84386a45..51f647b3655f 100644 --- a/internal/service/sesv2/email_identity_test.go +++ b/internal/service/sesv2/email_identity_test.go @@ -46,6 +46,7 @@ func TestAccSESV2EmailIdentity_basic_emailAddress(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "dkim_signing_attributes.0.status", "NOT_STARTED"), resource.TestCheckResourceAttr(resourceName, "dkim_signing_attributes.0.tokens.#", "0"), resource.TestCheckResourceAttr(resourceName, "identity_type", "EMAIL_ADDRESS"), + resource.TestCheckResourceAttr(resourceName, "verification_status", "PENDING"), resource.TestCheckResourceAttr(resourceName, "verified_for_sending_status", acctest.CtFalse), ), }, @@ -83,6 +84,7 @@ func TestAccSESV2EmailIdentity_basic_domain(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "dkim_signing_attributes.0.status", "PENDING"), resource.TestCheckResourceAttr(resourceName, "dkim_signing_attributes.0.tokens.#", "3"), resource.TestCheckResourceAttr(resourceName, "identity_type", "DOMAIN"), + resource.TestCheckResourceAttr(resourceName, "verification_status", "PENDING"), resource.TestCheckResourceAttr(resourceName, "verified_for_sending_status", acctest.CtFalse), ), }, diff --git a/internal/service/sesv2/service_endpoint_resolver_gen.go b/internal/service/sesv2/service_endpoint_resolver_gen.go index 7bb572de1434..4d55cfe2378e 100644 --- a/internal/service/sesv2/service_endpoint_resolver_gen.go +++ b/internal/service/sesv2/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sesv2.EndpointPa }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sesv2 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sesv2 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sesv2/service_endpoints_gen_test.go b/internal/service/sesv2/service_endpoints_gen_test.go index 96705e14b9e6..eef108538fc2 100644 --- a/internal/service/sesv2/service_endpoints_gen_test.go +++ b/internal/service/sesv2/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sesv2/service_package_gen.go b/internal/service/sesv2/service_package_gen.go index c05b298ce07c..178e1cb33c1a 100644 --- a/internal/service/sesv2/service_package_gen.go +++ b/internal/service/sesv2/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sesv2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -171,7 +170,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sesv2.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sesv2/tags_gen.go b/internal/service/sesv2/tags_gen.go index 225cbfe24c95..ee861673bb62 100644 --- a/internal/service/sesv2/tags_gen.go +++ b/internal/service/sesv2/tags_gen.go @@ -3,8 +3,8 @@ package sesv2 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sesv2" awstypes "github.com/aws/aws-sdk-go-v2/service/sesv2/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *sesv2.Client, identifier string, optFns output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SESV2Client(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *sesv2.Client, identifier string, oldT _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *sesv2.Client, identifier string, oldT _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/sfn/activity.go b/internal/service/sfn/activity.go index 83843031adb8..72263c3d533b 100644 --- a/internal/service/sfn/activity.go +++ b/internal/service/sfn/activity.go @@ -27,6 +27,8 @@ import ( // @SDKResource("aws_sfn_activity", name="Activity") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.14.1") func resourceActivity() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceActivityCreate, @@ -34,11 +36,11 @@ func resourceActivity() *schema.Resource { UpdateWithoutTimeout: resourceActivityUpdate, DeleteWithoutTimeout: resourceActivityDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, names.AttrCreationDate: { Type: schema.TypeString, Computed: true, @@ -129,6 +131,7 @@ func resourceActivityRead(ctx context.Context, d *schema.ResourceData, meta any) } else { d.Set(names.AttrEncryptionConfiguration, nil) } + d.Set(names.AttrARN, output.ActivityArn) d.Set(names.AttrName, output.Name) return diags diff --git a/internal/service/sfn/activity_identity_gen_test.go b/internal/service/sfn/activity_identity_gen_test.go new file mode 100644 index 000000000000..1f6f83989d19 --- /dev/null +++ b/internal/service/sfn/activity_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sfn_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSFNActivity_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_activity.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckActivityDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckActivityExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSFNActivity_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_activity.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.14.1 +func TestAccSFNActivity_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_activity.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckActivityDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic_v6.14.1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckActivityExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.14.1 +func TestAccSFNActivity_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_activity.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckActivityDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic_v6.14.1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckActivityExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Activity/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sfn/activity_test.go b/internal/service/sfn/activity_test.go index 4695baf97d71..aea11b5ab1a6 100644 --- a/internal/service/sfn/activity_test.go +++ b/internal/service/sfn/activity_test.go @@ -11,7 +11,6 @@ import ( "time" awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -35,7 +34,7 @@ func TestAccSFNActivity_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccActivityConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -64,7 +63,7 @@ func TestAccSFNActivity_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccActivityConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsfn.ResourceActivity(), resourceName), ), @@ -87,7 +86,7 @@ func TestAccSFNActivity_tags(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccActivityConfig_basicTags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), @@ -100,7 +99,7 @@ func TestAccSFNActivity_tags(t *testing.T) { }, { Config: testAccActivityConfig_basicTags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), @@ -109,7 +108,7 @@ func TestAccSFNActivity_tags(t *testing.T) { }, { Config: testAccActivityConfig_basicTags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -126,7 +125,7 @@ func TestAccSFNActivity_encryptionConfigurationCustomerManagedKMSKey(t *testing. reusePeriodSeconds := 900 kmsKeyResource := "aws_kms_key.kms_key_for_sfn" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -134,7 +133,7 @@ func TestAccSFNActivity_encryptionConfigurationCustomerManagedKMSKey(t *testing. Steps: []resource.TestStep{ { Config: testAccActivityConfig_encryptionConfigurationCustomerManagedKMSKey(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.type", string(awstypes.EncryptionTypeCustomerManagedKmsKey)), @@ -157,7 +156,7 @@ func TestAccSFNActivity_encryptionConfigurationServiceOwnedKey(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_sfn_activity.test" - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -165,7 +164,7 @@ func TestAccSFNActivity_encryptionConfigurationServiceOwnedKey(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccActivityConfig_encryptionConfigurationServiceOwnedKey(rName, string(awstypes.EncryptionTypeAwsOwnedKey)), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckActivityExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.type", string(awstypes.EncryptionTypeAwsOwnedKey)), @@ -205,7 +204,7 @@ func testAccCheckActivityDestroy(ctx context.Context) resource.TestCheckFunc { } // Retrying as Read after Delete is not always consistent. - err := retry.RetryContext(ctx, 1*time.Minute, func() *retry.RetryError { + err := tfresource.Retry(ctx, 1*time.Minute, func(ctx context.Context) *tfresource.RetryError { _, err := tfsfn.FindActivityByARN(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { @@ -213,10 +212,10 @@ func testAccCheckActivityDestroy(ctx context.Context) resource.TestCheckFunc { } if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } - return retry.RetryableError(fmt.Errorf("Step Functions Activity still exists: %s", rs.Primary.ID)) + return tfresource.RetryableError(fmt.Errorf("Step Functions Activity still exists: %s", rs.Primary.ID)) }) return err diff --git a/internal/service/sfn/alias.go b/internal/service/sfn/alias.go index 27b1c0525520..0c4201a7f4d7 100644 --- a/internal/service/sfn/alias.go +++ b/internal/service/sfn/alias.go @@ -23,6 +23,9 @@ import ( ) // @SDKResource("aws_sfn_alias", name="Alias") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sfn;sfn.DescribeStateMachineAliasOutput") +// @Testing(preIdentityVersion="v6.14.1") func resourceAlias() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAliasCreate, @@ -30,10 +33,6 @@ func resourceAlias() *schema.Resource { UpdateWithoutTimeout: resourceAliasUpdate, DeleteWithoutTimeout: resourceAliasDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(30 * time.Minute), Update: schema.DefaultTimeout(30 * time.Minute), diff --git a/internal/service/sfn/alias_identity_gen_test.go b/internal/service/sfn/alias_identity_gen_test.go new file mode 100644 index 000000000000..55ab0b14342c --- /dev/null +++ b/internal/service/sfn/alias_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sfn_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/sfn" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSFNAlias_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineAliasOutput + resourceName := "aws_sfn_alias.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckAliasDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAliasExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSFNAlias_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_alias.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.14.1 +func TestAccSFNAlias_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineAliasOutput + resourceName := "aws_sfn_alias.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckAliasDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic_v6.14.1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAliasExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.14.1 +func TestAccSFNAlias_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineAliasOutput + resourceName := "aws_sfn_alias.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckAliasDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic_v6.14.1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAliasExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Alias/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sfn/generate.go b/internal/service/sfn/generate.go index 5b3d8d3fe089..9bbaf20e5b13 100644 --- a/internal/service/sfn/generate.go +++ b/internal/service/sfn/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/listpages/main.go -ListOps=ListStateMachineVersions //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/identitytests/main.go //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/sfn/service_endpoint_resolver_gen.go b/internal/service/sfn/service_endpoint_resolver_gen.go index 8a46a815fabf..81f5a58b9bb0 100644 --- a/internal/service/sfn/service_endpoint_resolver_gen.go +++ b/internal/service/sfn/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sfn.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sfn endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sfn endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sfn/service_endpoints_gen_test.go b/internal/service/sfn/service_endpoints_gen_test.go index 573a1e97cd72..1db7d442bbda 100644 --- a/internal/service/sfn/service_endpoints_gen_test.go +++ b/internal/service/sfn/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sfn/service_package_gen.go b/internal/service/sfn/service_package_gen.go index a93d3c35c1fb..a90121de97bb 100644 --- a/internal/service/sfn/service_package_gen.go +++ b/internal/service/sfn/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newStartExecutionAction, + TypeName: "aws_sfn_start_execution", + Name: "Start Execution", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{} } @@ -65,12 +75,24 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceAlias, TypeName: "aws_sfn_alias", Name: "Alias", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceStateMachine, @@ -80,6 +102,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -107,7 +135,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sfn.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sfn/start_execution_action.go b/internal/service/sfn/start_execution_action.go new file mode 100644 index 000000000000..3bb9a1a60ba9 --- /dev/null +++ b/internal/service/sfn/start_execution_action.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sfn + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @Action(aws_sfn_start_execution, name="Start Execution") +func newStartExecutionAction(_ context.Context) (action.ActionWithConfigure, error) { + return &startExecutionAction{}, nil +} + +var ( + _ action.Action = (*startExecutionAction)(nil) +) + +type startExecutionAction struct { + framework.ActionWithModel[startExecutionActionModel] +} + +type startExecutionActionModel struct { + framework.WithRegionModel + StateMachineArn types.String `tfsdk:"state_machine_arn"` + Input types.String `tfsdk:"input"` + Name types.String `tfsdk:"name"` + TraceHeader types.String `tfsdk:"trace_header"` +} + +func (a *startExecutionAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Starts a Step Functions state machine execution with the specified input data.", + Attributes: map[string]schema.Attribute{ + "state_machine_arn": schema.StringAttribute{ + Description: "The ARN of the state machine to execute. Can be unqualified, version-qualified, or alias-qualified.", + Required: true, + }, + "input": schema.StringAttribute{ + Description: "JSON input data for the execution. Defaults to '{}'.", + Optional: true, + Validators: []validator.String{ + validators.JSON(), + }, + }, + names.AttrName: schema.StringAttribute{ + Description: "Name of the execution. Must be unique within the account/region/state machine for 90 days. Auto-generated if not provided.", + Optional: true, + }, + "trace_header": schema.StringAttribute{ + Description: "AWS X-Ray trace header for distributed tracing.", + Optional: true, + }, + }, + } +} + +func (a *startExecutionAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config startExecutionActionModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + conn := a.Meta().SFNClient(ctx) + + stateMachineArn := config.StateMachineArn.ValueString() + input := "{}" + if !config.Input.IsNull() { + input = config.Input.ValueString() + } + + tflog.Info(ctx, "Starting Step Functions execution", map[string]any{ + "state_machine_arn": stateMachineArn, + "input_length": len(input), + "has_name": !config.Name.IsNull(), + "has_trace_header": !config.TraceHeader.IsNull(), + }) + + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Starting execution for state machine %s...", stateMachineArn), + }) + + startInput := &sfn.StartExecutionInput{ + StateMachineArn: aws.String(stateMachineArn), + Input: aws.String(input), + } + + if !config.Name.IsNull() { + startInput.Name = config.Name.ValueStringPointer() + } + + if !config.TraceHeader.IsNull() { + startInput.TraceHeader = config.TraceHeader.ValueStringPointer() + } + + output, err := conn.StartExecution(ctx, startInput) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Start Step Functions Execution", + fmt.Sprintf("Could not start execution for state machine %s: %s", stateMachineArn, err), + ) + return + } + + executionArn := aws.ToString(output.ExecutionArn) + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Execution started successfully with ARN %s", executionArn), + }) + + tflog.Info(ctx, "Step Functions execution started successfully", map[string]any{ + "state_machine_arn": stateMachineArn, + "execution_arn": executionArn, + "start_date": output.StartDate, + }) +} diff --git a/internal/service/sfn/start_execution_action_test.go b/internal/service/sfn/start_execution_action_test.go new file mode 100644 index 000000000000..b1b5087b2a38 --- /dev/null +++ b/internal/service/sfn/start_execution_action_test.go @@ -0,0 +1,370 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sfn_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSFNStartExecutionAction_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + inputJSON := `{"key1":"value1","key2":"value2"}` + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccStartExecutionActionConfig_basic(rName, inputJSON), + Check: resource.ComposeTestCheckFunc( + testAccCheckStartExecutionAction(ctx, rName, inputJSON), + ), + }, + }, + }) +} + +func TestAccSFNStartExecutionAction_withName(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + executionName := sdkacctest.RandomWithPrefix("execution") + inputJSON := `{"test":"data"}` + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccStartExecutionActionConfig_withName(rName, executionName, inputJSON), + Check: resource.ComposeTestCheckFunc( + testAccCheckStartExecutionActionWithName(ctx, rName, executionName, inputJSON), + ), + }, + }, + }) +} + +func TestAccSFNStartExecutionAction_emptyInput(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccStartExecutionActionConfig_emptyInput(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStartExecutionAction(ctx, rName, "{}"), + ), + }, + }, + }) +} + +// Test helper functions + +func testAccCheckStartExecutionAction(ctx context.Context, stateMachineName, expectedInput string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) + + // Get the state machine ARN + stateMachines, err := conn.ListStateMachines(ctx, &sfn.ListStateMachinesInput{}) + if err != nil { + return fmt.Errorf("failed to list state machines: %w", err) + } + + var stateMachineArn string + for _, sm := range stateMachines.StateMachines { + if *sm.Name == stateMachineName { + stateMachineArn = *sm.StateMachineArn + break + } + } + + if stateMachineArn == "" { + return fmt.Errorf("state machine %s not found", stateMachineName) + } + + // List executions to verify one was created (check all statuses) + executions, err := conn.ListExecutions(ctx, &sfn.ListExecutionsInput{ + StateMachineArn: &stateMachineArn, + }) + if err != nil { + return fmt.Errorf("failed to list executions for state machine %s: %w", stateMachineName, err) + } + + if len(executions.Executions) == 0 { + return fmt.Errorf("no executions found for state machine %s", stateMachineName) + } + + // Verify the execution input matches expected + execution := executions.Executions[0] + executionDetails, err := conn.DescribeExecution(ctx, &sfn.DescribeExecutionInput{ + ExecutionArn: execution.ExecutionArn, + }) + if err != nil { + return fmt.Errorf("failed to describe execution %s: %w", *execution.ExecutionArn, err) + } + + if *executionDetails.Input != expectedInput { + return fmt.Errorf("execution input mismatch. Expected: %s, Got: %s", expectedInput, *executionDetails.Input) + } + + return nil + } +} + +func testAccCheckStartExecutionActionWithName(ctx context.Context, stateMachineName, executionName, expectedInput string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) + + // Get the state machine ARN + stateMachines, err := conn.ListStateMachines(ctx, &sfn.ListStateMachinesInput{}) + if err != nil { + return fmt.Errorf("failed to list state machines: %w", err) + } + + var stateMachineArn string + for _, sm := range stateMachines.StateMachines { + if *sm.Name == stateMachineName { + stateMachineArn = *sm.StateMachineArn + break + } + } + + if stateMachineArn == "" { + return fmt.Errorf("state machine %s not found", stateMachineName) + } + + // Find execution by name (check all statuses) + executions, err := conn.ListExecutions(ctx, &sfn.ListExecutionsInput{ + StateMachineArn: &stateMachineArn, + }) + if err != nil { + return fmt.Errorf("failed to list executions for state machine %s: %w", stateMachineName, err) + } + + var foundExecution *awstypes.ExecutionListItem + for _, execution := range executions.Executions { + if *execution.Name == executionName { + foundExecution = &execution + break + } + } + + if foundExecution == nil { + return fmt.Errorf("execution with name %s not found for state machine %s", executionName, stateMachineName) + } + + // Verify the execution input + executionDetails, err := conn.DescribeExecution(ctx, &sfn.DescribeExecutionInput{ + ExecutionArn: foundExecution.ExecutionArn, + }) + if err != nil { + return fmt.Errorf("failed to describe execution %s: %w", *foundExecution.ExecutionArn, err) + } + + if *executionDetails.Input != expectedInput { + return fmt.Errorf("execution input mismatch. Expected: %s, Got: %s", expectedInput, *executionDetails.Input) + } + + return nil + } +} + +// Configuration functions + +func testAccStartExecutionActionConfig_basic(rName, inputJSON string) string { + return acctest.ConfigCompose( + testAccStartExecutionActionConfig_base(rName), + fmt.Sprintf(` +action "aws_sfn_start_execution" "test" { + config { + state_machine_arn = aws_sfn_state_machine.test.arn + input = %[1]q + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sfn_start_execution.test] + } + } +} +`, inputJSON)) +} + +func testAccStartExecutionActionConfig_withName(rName, executionName, inputJSON string) string { + return acctest.ConfigCompose( + testAccStartExecutionActionConfig_base(rName), + fmt.Sprintf(` +action "aws_sfn_start_execution" "test" { + config { + state_machine_arn = aws_sfn_state_machine.test.arn + name = %[1]q + input = %[2]q + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sfn_start_execution.test] + } + } +} +`, executionName, inputJSON)) +} + +func testAccStartExecutionActionConfig_emptyInput(rName string) string { + return acctest.ConfigCompose( + testAccStartExecutionActionConfig_base(rName), + ` +action "aws_sfn_start_execution" "test" { + config { + state_machine_arn = aws_sfn_state_machine.test.arn + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sfn_start_execution.test] + } + } +} +`) +} + +func testAccStartExecutionActionConfig_base(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role" "for_lambda" { + name = "%[1]s-lambda" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Principal = { + Service = "lambda.amazonaws.com" + } + Effect = "Allow" + }] + }) +} + +resource "aws_iam_role_policy" "for_lambda" { + name = "%[1]s-lambda" + role = aws_iam_role.for_lambda.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + Resource = "arn:${data.aws_partition.current.partition}:logs:*:*:*" + }] + }) +} + +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + role = aws_iam_role.for_lambda.arn + handler = "exports.example" + runtime = "nodejs20.x" +} + +resource "aws_iam_role" "for_sfn" { + name = "%[1]s-sfn" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Principal = { + Service = "states.${data.aws_region.current.region}.amazonaws.com" + } + Action = "sts:AssumeRole" + }] + }) +} + +resource "aws_iam_role_policy" "for_sfn" { + name = "%[1]s-sfn" + role = aws_iam_role.for_sfn.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Action = [ + "lambda:InvokeFunction" + ] + Resource = "*" + }] + }) +} + +resource "aws_sfn_state_machine" "test" { + name = %[1]q + role_arn = aws_iam_role.for_sfn.arn + + definition = jsonencode({ + Comment = "A simple minimal example" + StartAt = "Hello" + States = { + Hello = { + Type = "Task" + Resource = aws_lambda_function.test.arn + End = true + } + } + }) +} + +data "aws_region" "current" {} +data "aws_partition" "current" {} +`, rName) +} diff --git a/internal/service/sfn/state_machine.go b/internal/service/sfn/state_machine.go index db3755d887c0..9b9e10739b05 100644 --- a/internal/service/sfn/state_machine.go +++ b/internal/service/sfn/state_machine.go @@ -35,6 +35,9 @@ import ( // @SDKResource("aws_sfn_state_machine", name="State Machine") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sfn;sfn.DescribeStateMachineOutput") +// @Testing(preIdentityVersion="v6.13.0") func resourceStateMachine() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceStateMachineCreate, @@ -42,10 +45,6 @@ func resourceStateMachine() *schema.Resource { UpdateWithoutTimeout: resourceStateMachineUpdate, DeleteWithoutTimeout: resourceStateMachineDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(1 * time.Minute), @@ -230,7 +229,7 @@ func resourceStateMachineCreate(ctx context.Context, d *schema.ResourceData, met // Note: the instance may be in a deleting mode, hence the retry // when creating the step function. This can happen when we are // updating the resource (since there is no update API call). - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateStateMachine(ctx, input) }, "StateMachineDeleting", "AccessDeniedException") @@ -359,11 +358,11 @@ func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, met } // Handle eventual consistency after update. - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { // nosemgrep:ci.helper-schema-retry-RetryContext-without-TimeoutError-check + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutUpdate), func(ctx context.Context) *tfresource.RetryError { output, err := findStateMachineByARN(ctx, conn, d.Id()) if err != nil { - return retry.NonRetryableError(err) + return tfresource.NonRetryableError(err) } if d.HasChange("definition") && !verify.JSONBytesEqual([]byte(aws.ToString(output.Definition)), []byte(d.Get("definition").(string))) || @@ -375,7 +374,7 @@ func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, met d.HasChange("encryption_configuration.0.kms_key_id") && output.EncryptionConfiguration != nil && output.EncryptionConfiguration.KmsKeyId != nil && aws.ToString(output.EncryptionConfiguration.KmsKeyId) != d.Get("encryption_configuration.0.kms_key_id") || d.HasChange("encryption_configuration.0.encryption_type") && output.EncryptionConfiguration != nil && string(output.EncryptionConfiguration.Type) != d.Get("encryption_configuration.0.encryption_type").(string) || d.HasChange("encryption_configuration.0.kms_data_key_reuse_period_seconds") && output.EncryptionConfiguration != nil && output.EncryptionConfiguration.KmsDataKeyReusePeriodSeconds != nil && aws.ToInt32(output.EncryptionConfiguration.KmsDataKeyReusePeriodSeconds) != int32(d.Get("encryption_configuration.0.kms_data_key_reuse_period_seconds").(int)) { - return retry.RetryableError(fmt.Errorf("Step Functions State Machine (%s) eventual consistency", d.Id())) + return tfresource.RetryableError(fmt.Errorf("Step Functions State Machine (%s) eventual consistency", d.Id())) } return nil diff --git a/internal/service/sfn/state_machine_identity_gen_test.go b/internal/service/sfn/state_machine_identity_gen_test.go new file mode 100644 index 000000000000..b6a308e93a67 --- /dev/null +++ b/internal/service/sfn/state_machine_identity_gen_test.go @@ -0,0 +1,341 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sfn_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/sfn" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSFNStateMachine_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineOutput + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckStateMachineDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSFNStateMachine_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.13.0 +func TestAccSFNStateMachine_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineOutput + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckStateMachineDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic_v6.13.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.13.0 +func TestAccSFNStateMachine_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineOutput + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckStateMachineDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic_v6.13.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sfn/state_machine_test.go b/internal/service/sfn/state_machine_test.go index ab0d5f08ded0..69d3b2b4b075 100644 --- a/internal/service/sfn/state_machine_test.go +++ b/internal/service/sfn/state_machine_test.go @@ -40,7 +40,7 @@ func TestAccSFNStateMachine_createUpdate(t *testing.T) { { Config: testAccStateMachineConfig_basic(rName, 5), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "states", fmt.Sprintf("stateMachine:%s", rName)), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -70,7 +70,7 @@ func TestAccSFNStateMachine_createUpdate(t *testing.T) { { Config: testAccStateMachineConfig_basic(rName, 10), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "states", fmt.Sprintf("stateMachine:%s", rName)), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -108,7 +108,7 @@ func TestAccSFNStateMachine_expressUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "EXPRESS", 5), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -129,7 +129,7 @@ func TestAccSFNStateMachine_expressUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "EXPRESS", 10), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -164,7 +164,7 @@ func TestAccSFNStateMachine_standardUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "STANDARD", 5), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -186,7 +186,7 @@ func TestAccSFNStateMachine_standardUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "STANDARD", 10), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -221,8 +221,8 @@ func TestAccSFNStateMachine_nameGenerated(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_nameGenerated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrNameGenerated(resourceName, names.AttrName), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, id.UniqueIdPrefix), ), @@ -250,8 +250,8 @@ func TestAccSFNStateMachine_namePrefix(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_namePrefix(rName, "tf-acc-test-prefix-"), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrNameFromPrefix(resourceName, names.AttrName, "tf-acc-test-prefix-"), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, "tf-acc-test-prefix-"), ), @@ -279,8 +279,8 @@ func TestAccSFNStateMachine_publish(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_publish(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, "publish", acctest.CtTrue), resource.TestCheckResourceAttrSet(resourceName, "state_machine_version_arn"), ), @@ -309,8 +309,8 @@ func TestAccSFNStateMachine_tags(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -322,8 +322,8 @@ func TestAccSFNStateMachine_tags(t *testing.T) { }, { Config: testAccStateMachineConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -331,8 +331,8 @@ func TestAccSFNStateMachine_tags(t *testing.T) { }, { Config: testAccStateMachineConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -355,8 +355,8 @@ func TestAccSFNStateMachine_tracing(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_tracingDisable(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.0.enabled", acctest.CtFalse), ), @@ -368,8 +368,8 @@ func TestAccSFNStateMachine_tracing(t *testing.T) { }, { Config: testAccStateMachineConfig_tracingEnable(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.0.enabled", acctest.CtTrue), ), @@ -392,8 +392,8 @@ func TestAccSFNStateMachine_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_basic(rName, 5), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsfn.ResourceStateMachine(), resourceName), ), ExpectNonEmptyPlan: true, @@ -416,8 +416,8 @@ func TestAccSFNStateMachine_expressLogging(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_expressLogConfiguration(rName, string(awstypes.LogLevelError)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -430,8 +430,8 @@ func TestAccSFNStateMachine_expressLogging(t *testing.T) { }, { Config: testAccStateMachineConfig_expressLogConfiguration(rName, string(awstypes.LogLevelAll)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -456,7 +456,7 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test reusePeriodSeconds1 := 900 reusePeriodSeconds2 := 450 - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -464,8 +464,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_1(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds1), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -487,8 +487,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test //Update periodReuseSeconds { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_1(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -505,8 +505,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test //Update kmsKeyId { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_2(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -523,8 +523,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test //Update Encryption Key Type { Config: testAccStateMachineConfig_encryptionConfigurationServiceOwnedKey(rName, string(awstypes.EncryptionTypeAwsOwnedKey)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -547,7 +547,7 @@ func TestAccSFNStateMachine_encryptionConfigurationServiceOwnedKey(t *testing.T) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) reusePeriodSeconds := 900 - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -555,8 +555,8 @@ func TestAccSFNStateMachine_encryptionConfigurationServiceOwnedKey(t *testing.T) Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_encryptionConfigurationServiceOwnedKey(rName, string(awstypes.EncryptionTypeAwsOwnedKey)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -575,8 +575,8 @@ func TestAccSFNStateMachine_encryptionConfigurationServiceOwnedKey(t *testing.T) //Update Encryption Type { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_1(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -612,7 +612,7 @@ func TestAccSFNStateMachine_definitionValidation(t *testing.T) { }) } -func testAccCheckExists(ctx context.Context, n string, v *sfn.DescribeStateMachineOutput) resource.TestCheckFunc { +func testAccCheckStateMachineExists(ctx context.Context, n string, v *sfn.DescribeStateMachineOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/sfn/sweep.go b/internal/service/sfn/sweep.go index 5428d5510275..e2a2f6bfa3ac 100644 --- a/internal/service/sfn/sweep.go +++ b/internal/service/sfn/sweep.go @@ -30,7 +30,7 @@ func sweepActivities(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SFNClient(ctx) input := &sfn.ListActivitiesInput{} @@ -71,7 +71,7 @@ func sweepStateMachines(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SFNClient(ctx) input := &sfn.ListStateMachinesInput{} diff --git a/internal/service/sfn/tags_gen.go b/internal/service/sfn/tags_gen.go index f452927b6e6b..f29003fd0275 100644 --- a/internal/service/sfn/tags_gen.go +++ b/internal/service/sfn/tags_gen.go @@ -3,8 +3,8 @@ package sfn import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sfn" awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *sfn.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SFNClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *sfn.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *sfn.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/sfn/testdata/Activity/basic/main_gen.tf b/internal/service/sfn/testdata/Activity/basic/main_gen.tf new file mode 100644 index 000000000000..ba0fa7d0576d --- /dev/null +++ b/internal/service/sfn/testdata/Activity/basic/main_gen.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sfn_activity" "test" { + name = var.rName +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/sfn/testdata/Activity/basic_v6.14.1/main_gen.tf b/internal/service/sfn/testdata/Activity/basic_v6.14.1/main_gen.tf new file mode 100644 index 000000000000..771095df68ad --- /dev/null +++ b/internal/service/sfn/testdata/Activity/basic_v6.14.1/main_gen.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sfn_activity" "test" { + name = var.rName +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.14.1" + } + } +} + +provider "aws" {} diff --git a/internal/service/sfn/testdata/Activity/region_override/main_gen.tf b/internal/service/sfn/testdata/Activity/region_override/main_gen.tf new file mode 100644 index 000000000000..75aadfb1b3a3 --- /dev/null +++ b/internal/service/sfn/testdata/Activity/region_override/main_gen.tf @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sfn_activity" "test" { + region = var.region + + name = var.rName +} +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/sfn/testdata/Alias/basic/main_gen.tf b/internal/service/sfn/testdata/Alias/basic/main_gen.tf new file mode 100644 index 000000000000..259ad7490d7b --- /dev/null +++ b/internal/service/sfn/testdata/Alias/basic/main_gen.tf @@ -0,0 +1,145 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sfn_alias" "test" { + name = var.rName + + routing_configuration { + state_machine_version_arn = aws_sfn_state_machine.test.state_machine_version_arn + weight = 100 + } +} + +resource "aws_sfn_state_machine" "test" { + name = var.rName + role_arn = aws_iam_role.for_sfn.arn + publish = true + + definition = < 0 { + input.SigningParameters = flex.ExpandStringValueMap(v) + } + _, err := conn.PutSigningProfile(ctx, input) if err != nil { @@ -223,6 +236,11 @@ func resourceSigningProfileRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "setting signing_material: %s", err) } } + if output.SigningParameters != nil { + if err := d.Set("signing_parameters", flex.FlattenStringValueMap(output.SigningParameters)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting signing_parameters: %s", err) + } + } d.Set(names.AttrStatus, output.Status) d.Set(names.AttrVersion, output.ProfileVersion) d.Set("version_arn", output.ProfileVersionArn) diff --git a/internal/service/signer/signing_profile_data_source.go b/internal/service/signer/signing_profile_data_source.go index 0c264888ef2e..0f644f471577 100644 --- a/internal/service/signer/signing_profile_data_source.go +++ b/internal/service/signer/signing_profile_data_source.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -74,6 +75,25 @@ func dataSourceSigningProfile() *schema.Resource { }, }, }, + "signing_material": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrCertificateARN: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "signing_parameters": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, names.AttrStatus: { Type: schema.TypeString, Computed: true, @@ -109,13 +129,15 @@ func dataSourceSigningProfileRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "setting signer signing profile platform id: %s", err) } - if err := d.Set("signature_validity_period", []any{ - map[string]any{ - names.AttrValue: signingProfileOutput.SignatureValidityPeriod.Value, - names.AttrType: signingProfileOutput.SignatureValidityPeriod.Type, - }, - }); err != nil { - return sdkdiag.AppendErrorf(diags, "setting signer signing profile signature validity period: %s", err) + if v := signingProfileOutput.SignatureValidityPeriod; v != nil { + if err := d.Set("signature_validity_period", []any{ + map[string]any{ + names.AttrValue: v.Value, + names.AttrType: v.Type, + }, + }); err != nil { + return sdkdiag.AppendErrorf(diags, "setting signature_validity_period: %s", err) + } } if err := d.Set("platform_display_name", signingProfileOutput.PlatformDisplayName); err != nil { @@ -134,6 +156,18 @@ func dataSourceSigningProfileRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "setting signer signing profile version arn: %s", err) } + if signingProfileOutput.SigningMaterial != nil { + if err := d.Set("signing_material", flattenSigningMaterial(signingProfileOutput.SigningMaterial)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting signing_material: %s", err) + } + } + + if signingProfileOutput.SigningParameters != nil { + if err := d.Set("signing_parameters", flex.FlattenStringValueMap(signingProfileOutput.SigningParameters)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting signing_parameters: %s", err) + } + } + if err := d.Set(names.AttrStatus, signingProfileOutput.Status); err != nil { return sdkdiag.AppendErrorf(diags, "setting signer signing profile status: %s", err) } diff --git a/internal/service/signer/signing_profile_data_source_test.go b/internal/service/signer/signing_profile_data_source_test.go index 5d928f1bc812..e35703a17458 100644 --- a/internal/service/signer/signing_profile_data_source_test.go +++ b/internal/service/signer/signing_profile_data_source_test.go @@ -46,6 +46,39 @@ func TestAccSignerSigningProfileDataSource_basic(t *testing.T) { }) } +func TestAccSignerSigningProfileDataSource_signingParameters(t *testing.T) { + ctx := acctest.Context(t) + rootDomain := acctest.ACMCertificateDomainFromEnv(t) + domainName := acctest.ACMCertificateRandomSubDomain(rootDomain) + var conf signer.GetSigningProfileOutput + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + dataSourceName := "data.aws_signer_signing_profile.test" + resourceName := "aws_signer_signing_profile.test_sp" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckSingerSigningProfile(ctx, t, "AmazonFreeRTOS-Default") + }, + ErrorCheck: acctest.ErrorCheck(t, signer.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccSigningProfileDataSourceConfig_signingParameters(rName, rootDomain, domainName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSigningProfileExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrPair(dataSourceName, "platform_id", resourceName, "platform_id"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_material.#", resourceName, "signing_material.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_material.0.certificate_arn", resourceName, "signing_material.0.certificate_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_parameters.%", resourceName, "signing_parameters.%"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_parameters.param1", resourceName, "signing_parameters.param1"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_parameters.param2", resourceName, "signing_parameters.param2"), + ), + }, + }, + }) +} + func testAccSigningProfileDataSourceConfig_basic(profileName string) string { return fmt.Sprintf(` resource "aws_signer_signing_profile" "test" { @@ -57,3 +90,48 @@ data "aws_signer_signing_profile" "test" { name = aws_signer_signing_profile.test.name }`, profileName) } + +func testAccSigningProfileDataSourceConfig_signingParameters(rName, rootDomain, domainName string) string { + return fmt.Sprintf(` +data "aws_route53_zone" "test" { + name = %[2]q + private_zone = false +} + +resource "aws_acm_certificate" "test" { + domain_name = %[3]q + validation_method = "DNS" +} + +resource "aws_route53_record" "test" { + allow_overwrite = true + name = tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_name + records = [tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_value] + ttl = 60 + type = tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_type + zone_id = data.aws_route53_zone.test.zone_id +} + +resource "aws_acm_certificate_validation" "test" { + certificate_arn = aws_acm_certificate.test.arn + validation_record_fqdns = [aws_route53_record.test.fqdn] +} + +resource "aws_signer_signing_profile" "test_sp" { + platform_id = "AmazonFreeRTOS-Default" + name = %[1]q + + signing_material { + certificate_arn = aws_acm_certificate.test.arn + } + signing_parameters = { + "param1" = "value1" + "param2" = "value2" + } + depends_on = [aws_acm_certificate_validation.test] +} + +data "aws_signer_signing_profile" "test" { + name = aws_signer_signing_profile.test_sp.name +}`, rName, rootDomain, domainName) +} diff --git a/internal/service/signer/signing_profile_permission.go b/internal/service/signer/signing_profile_permission.go index 84ee4b13d4dc..18d237cd5533 100644 --- a/internal/service/signer/signing_profile_permission.go +++ b/internal/service/signer/signing_profile_permission.go @@ -121,7 +121,7 @@ func resourceSigningProfilePermissionCreate(ctx context.Context, d *schema.Resou } _, err = tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.AddProfilePermission(ctx, input) }, func(err error) (bool, error) { @@ -139,7 +139,7 @@ func resourceSigningProfilePermissionCreate(ctx context.Context, d *schema.Resou d.SetId(fmt.Sprintf("%s/%s", profileName, statementID)) - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findPermissionByTwoPartKey(ctx, conn, profileName, statementID) }) @@ -217,7 +217,7 @@ func resourceSigningProfilePermissionDelete(ctx context.Context, d *schema.Resou return sdkdiag.AppendErrorf(diags, "deleting Signer Signing Profile Permission (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, propagationTimeout, func() (any, error) { + _, err = tfresource.RetryUntilNotFound(ctx, propagationTimeout, func(ctx context.Context) (any, error) { return findPermissionByTwoPartKey(ctx, conn, profileName, statementID) }) diff --git a/internal/service/signer/signing_profile_test.go b/internal/service/signer/signing_profile_test.go index 9937043107ea..88121a5f4f45 100644 --- a/internal/service/signer/signing_profile_test.go +++ b/internal/service/signer/signing_profile_test.go @@ -49,6 +49,7 @@ func TestAccSignerSigningProfile_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "revocation_record.#", "0"), resource.TestCheckResourceAttr(resourceName, "signature_validity_period.#", "1"), resource.TestCheckNoResourceAttr(resourceName, "signing_material"), + resource.TestCheckResourceAttr(resourceName, "signing_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "Active"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), resource.TestCheckResourceAttrSet(resourceName, names.AttrVersion), @@ -231,6 +232,49 @@ func TestAccSignerSigningProfile_signatureValidityPeriod(t *testing.T) { }) } +func TestAccSignerSigningProfile_signingParameters(t *testing.T) { + ctx := acctest.Context(t) + rootDomain := acctest.ACMCertificateDomainFromEnv(t) + domainName := acctest.ACMCertificateRandomSubDomain(rootDomain) + var conf signer.GetSigningProfileOutput + rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) + resourceName := "aws_signer_signing_profile.test_sp" + certificateName := "aws_acm_certificate.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckSingerSigningProfile(ctx, t, "AmazonFreeRTOS-Default") + }, + ErrorCheck: acctest.ErrorCheck(t, signer.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSigningProfileDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSigningProfileConfig_signingParameters(rName, rootDomain, domainName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSigningProfileExists(ctx, resourceName, &conf), + acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "signer", "/signing-profiles/{name}"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, resourceName, names.AttrName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "platform_id", "AmazonFreeRTOS-Default"), + resource.TestCheckResourceAttrPair(resourceName, "signing_material.0.certificate_arn", certificateName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "signing_parameters.%", "2"), + resource.TestCheckResourceAttr(resourceName, "signing_parameters.param1", acctest.CtValue1), + resource.TestCheckResourceAttr(resourceName, "signing_parameters.param2", acctest.CtValue2), + resource.TestCheckResourceAttrSet(resourceName, names.AttrVersion), + resource.TestCheckResourceAttrSet(resourceName, "version_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccPreCheckSingerSigningProfile(ctx context.Context, t *testing.T, platformID string) { conn := acctest.Provider.Meta().(*conns.AWSClient).SignerClient(ctx) @@ -370,3 +414,44 @@ resource "aws_signer_signing_profile" "test_sp" { } `, rName) } + +func testAccSigningProfileConfig_signingParameters(rName, rootDomain, domainName string) string { + return fmt.Sprintf(` +data "aws_route53_zone" "test" { + name = %[2]q + private_zone = false +} + +resource "aws_acm_certificate" "test" { + domain_name = %[3]q + validation_method = "DNS" +} + +resource "aws_route53_record" "test" { + allow_overwrite = true + name = tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_name + records = [tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_value] + ttl = 60 + type = tolist(aws_acm_certificate.test.domain_validation_options)[0].resource_record_type + zone_id = data.aws_route53_zone.test.zone_id +} + +resource "aws_acm_certificate_validation" "test" { + certificate_arn = aws_acm_certificate.test.arn + validation_record_fqdns = [aws_route53_record.test.fqdn] +} + +resource "aws_signer_signing_profile" "test_sp" { + platform_id = "AmazonFreeRTOS-Default" + name = %[1]q + + signing_material { + certificate_arn = aws_acm_certificate.test.arn + } + signing_parameters = { + "param1" = "value1" + "param2" = "value2" + } + depends_on = [aws_acm_certificate_validation.test] +}`, rName, rootDomain, domainName) +} diff --git a/internal/service/signer/tags_gen.go b/internal/service/signer/tags_gen.go index 7df177e2c805..1d3c8dceeaa9 100644 --- a/internal/service/signer/tags_gen.go +++ b/internal/service/signer/tags_gen.go @@ -3,8 +3,8 @@ package signer import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/signer" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *signer.Client, identifier string, optFn output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SignerClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *signer.Client, identifier string, old _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *signer.Client, identifier string, old _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/smarterr.hcl b/internal/service/smarterr.hcl new file mode 100644 index 000000000000..cce5fb445b92 --- /dev/null +++ b/internal/service/smarterr.hcl @@ -0,0 +1,191 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +smarterr { + debug = false + hint_match_mode = "first" + hint_join_char = "\n" +} + +template "error_summary" { + format = "{{.happening}} {{.service}} {{.resource}}" +} + +template "error_detail" { + format = < 0 { + // Clean up message + _, err := conn.DeleteMessage(ctx, &sqs.DeleteMessageInput{ + QueueUrl: &queueURL, + ReceiptHandle: output.Messages[0].ReceiptHandle, + }) + if err != nil { + return fmt.Errorf("error deleting message from SQS: %w", err) + } + return nil + } + } + } + } +} + +func testAccPublishActionConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q +} + +resource "aws_sqs_queue" "test" { + name = %[1]q +} + +resource "aws_sns_topic_subscription" "test" { + topic_arn = aws_sns_topic.test.arn + protocol = "sqs" + endpoint = aws_sqs_queue.test.arn +} + +resource "aws_sqs_queue_policy" "test" { + queue_url = aws_sqs_queue.test.id + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Principal = "*" + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.test.arn + Condition = { + ArnEquals = { + "aws:SourceArn" = aws_sns_topic.test.arn + } + } + }] + }) +} + +action "aws_sns_publish" "test" { + config { + topic_arn = aws_sns_topic.test.arn + message = "Test message from Terraform" + } +} + +resource "terraform_data" "trigger" { + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_sns_publish.test] + } + } + + depends_on = [ + aws_sns_topic_subscription.test, + aws_sqs_queue_policy.test + ] +} +`, rName) +} + +func testAccPublishActionConfig_withAttributes(rName string) string { + return fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q +} + +resource "aws_sqs_queue" "test" { + name = %[1]q +} + +resource "aws_sns_topic_subscription" "test" { + topic_arn = aws_sns_topic.test.arn + protocol = "sqs" + endpoint = aws_sqs_queue.test.arn +} + +resource "aws_sqs_queue_policy" "test" { + queue_url = aws_sqs_queue.test.id + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Principal = "*" + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.test.arn + Condition = { + ArnEquals = { + "aws:SourceArn" = aws_sns_topic.test.arn + } + } + }] + }) +} + +action "aws_sns_publish" "test" { + config { + topic_arn = aws_sns_topic.test.arn + subject = "Test Subject" + message = "Test message with attributes" + + message_attributes { + map_block_key = "priority" + data_type = "String" + string_value = "high" + } + + message_attributes { + map_block_key = "source" + data_type = "String" + string_value = "terraform" + } + } +} + +resource "terraform_data" "trigger" { + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_sns_publish.test] + } + } + + depends_on = [ + aws_sns_topic_subscription.test, + aws_sqs_queue_policy.test + ] +} +`, rName) +} diff --git a/internal/service/sns/service_endpoint_resolver_gen.go b/internal/service/sns/service_endpoint_resolver_gen.go index 6fb32a73767c..f6104976a8a8 100644 --- a/internal/service/sns/service_endpoint_resolver_gen.go +++ b/internal/service/sns/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sns.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sns endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sns endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sns/service_endpoints_gen_test.go b/internal/service/sns/service_endpoints_gen_test.go index 2deb34002e64..c1c5eb590bf2 100644 --- a/internal/service/sns/service_endpoints_gen_test.go +++ b/internal/service/sns/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sns/service_package_gen.go b/internal/service/sns/service_package_gen.go index cde4f0a3d220..c11ebeb455d8 100644 --- a/internal/service/sns/service_package_gen.go +++ b/internal/service/sns/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sns" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newPublishAction, + TypeName: "aws_sns_publish", + Name: "Publish", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{} } @@ -62,24 +72,48 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrARN, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTopicDataProtectionPolicy, TypeName: "aws_sns_topic_data_protection_policy", Name: "Topic Data Protection Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTopicPolicy, TypeName: "aws_sns_topic_policy", Name: "Topic Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceTopicSubscription, TypeName: "aws_sns_topic_subscription", Name: "Topic Subscription", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -107,7 +141,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sns.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sns/sweep.go b/internal/service/sns/sweep.go index eb32eb81a73a..b4cad0785065 100644 --- a/internal/service/sns/sweep.go +++ b/internal/service/sns/sweep.go @@ -55,7 +55,7 @@ func sweepPlatformApplications(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &sns.ListPlatformApplicationsInput{} conn := client.SNSClient(ctx) @@ -96,7 +96,7 @@ func sweepTopics(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &sns.ListTopicsInput{} conn := client.SNSClient(ctx) @@ -137,7 +137,7 @@ func sweepTopicSubscriptions(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &sns.ListSubscriptionsInput{} conn := client.SNSClient(ctx) diff --git a/internal/service/sns/tags_gen.go b/internal/service/sns/tags_gen.go index c0322ee2e899..cf5b116f2eeb 100644 --- a/internal/service/sns/tags_gen.go +++ b/internal/service/sns/tags_gen.go @@ -3,8 +3,8 @@ package sns import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sns" awstypes "github.com/aws/aws-sdk-go-v2/service/sns/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *sns.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SNSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *sns.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -141,7 +141,7 @@ func updateTags(ctx context.Context, conn *sns.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/sns/testdata/Topic/basic/main_gen.tf b/internal/service/sns/testdata/Topic/basic/main_gen.tf new file mode 100644 index 000000000000..671fdc2a03f4 --- /dev/null +++ b/internal/service/sns/testdata/Topic/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sns_topic" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/sns/testdata/Topic/basic_v6.4.0/main_gen.tf b/internal/service/sns/testdata/Topic/basic_v6.4.0/main_gen.tf new file mode 100644 index 000000000000..d0c31bf1439b --- /dev/null +++ b/internal/service/sns/testdata/Topic/basic_v6.4.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sns_topic" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.4.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/sns/testdata/Topic/region_override/main_gen.tf b/internal/service/sns/testdata/Topic/region_override/main_gen.tf new file mode 100644 index 000000000000..5a6e231945e1 --- /dev/null +++ b/internal/service/sns/testdata/Topic/region_override/main_gen.tf @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sns_topic" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/sns/testdata/TopicDataProtectionPolicy/basic/main_gen.tf b/internal/service/sns/testdata/TopicDataProtectionPolicy/basic/main_gen.tf new file mode 100644 index 000000000000..31f3580b7193 --- /dev/null +++ b/internal/service/sns/testdata/TopicDataProtectionPolicy/basic/main_gen.tf @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_partition" "current" {} + +resource "aws_sns_topic" "test" { + name = var.rName +} + +resource "aws_sns_topic_data_protection_policy" "test" { + arn = aws_sns_topic.test.arn + policy = jsonencode( + { + "Description" = "Default data protection policy" + "Name" = "__default_data_protection_policy" + "Statement" = [ + { + "DataDirection" = "Inbound" + "DataIdentifier" = [ + "arn:${data.aws_partition.current.partition}:dataprotection::aws:data-identifier/EmailAddress", + ] + "Operation" = { + "Deny" = {} + } + "Principal" = [ + "*", + ] + "Sid" = var.rName + }, + ] + "Version" = "2021-06-01" + } + ) +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/sns/testdata/TopicDataProtectionPolicy/basic_v6.8.0/main_gen.tf b/internal/service/sns/testdata/TopicDataProtectionPolicy/basic_v6.8.0/main_gen.tf new file mode 100644 index 000000000000..08425fc4a7ae --- /dev/null +++ b/internal/service/sns/testdata/TopicDataProtectionPolicy/basic_v6.8.0/main_gen.tf @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_partition" "current" {} + +resource "aws_sns_topic" "test" { + name = var.rName +} + +resource "aws_sns_topic_data_protection_policy" "test" { + arn = aws_sns_topic.test.arn + policy = jsonencode( + { + "Description" = "Default data protection policy" + "Name" = "__default_data_protection_policy" + "Statement" = [ + { + "DataDirection" = "Inbound" + "DataIdentifier" = [ + "arn:${data.aws_partition.current.partition}:dataprotection::aws:data-identifier/EmailAddress", + ] + "Operation" = { + "Deny" = {} + } + "Principal" = [ + "*", + ] + "Sid" = var.rName + }, + ] + "Version" = "2021-06-01" + } + ) +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.8.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/sns/testdata/TopicDataProtectionPolicy/region_override/main_gen.tf b/internal/service/sns/testdata/TopicDataProtectionPolicy/region_override/main_gen.tf new file mode 100644 index 000000000000..fc36c2fa89ce --- /dev/null +++ b/internal/service/sns/testdata/TopicDataProtectionPolicy/region_override/main_gen.tf @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_partition" "current" {} + +resource "aws_sns_topic" "test" { + region = var.region + + name = var.rName +} + +resource "aws_sns_topic_data_protection_policy" "test" { + region = var.region + + arn = aws_sns_topic.test.arn + policy = jsonencode( + { + "Description" = "Default data protection policy" + "Name" = "__default_data_protection_policy" + "Statement" = [ + { + "DataDirection" = "Inbound" + "DataIdentifier" = [ + "arn:${data.aws_partition.current.partition}:dataprotection::aws:data-identifier/EmailAddress", + ] + "Operation" = { + "Deny" = {} + } + "Principal" = [ + "*", + ] + "Sid" = var.rName + }, + ] + "Version" = "2021-06-01" + } + ) +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/sns/testdata/TopicPolicy/basic/main_gen.tf b/internal/service/sns/testdata/TopicPolicy/basic/main_gen.tf new file mode 100644 index 000000000000..b120913d4cc7 --- /dev/null +++ b/internal/service/sns/testdata/TopicPolicy/basic/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sns_topic" "test" { + name = var.rName +} + +resource "aws_sns_topic_policy" "test" { + arn = aws_sns_topic.test.arn + policy = <" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSNSTopicDataProtectionPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_data_protection_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicDataProtectionPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicDataProtectionPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSNSTopicDataProtectionPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_data_protection_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicDataProtectionPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicDataProtectionPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TopicDataProtectionPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sns/topic_data_protection_policy_test.go b/internal/service/sns/topic_data_protection_policy_test.go index a34ae9492cc2..b54038d9ad46 100644 --- a/internal/service/sns/topic_data_protection_policy_test.go +++ b/internal/service/sns/topic_data_protection_policy_test.go @@ -133,3 +133,17 @@ resource "aws_sns_topic_data_protection_policy" "test" { } `, rName) } + +func testAccCheckTopicDataProtectionPolicyExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SNSClient(ctx) + _, err := tfsns.FindDataProtectionPolicyByARN(ctx, conn, rs.Primary.ID) + + return err + } +} diff --git a/internal/service/sns/topic_data_source_tags_gen_test.go b/internal/service/sns/topic_data_source_tags_gen_test.go index e6c4acbd5cc4..3c7dcc1602cc 100644 --- a/internal/service/sns/topic_data_source_tags_gen_test.go +++ b/internal/service/sns/topic_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccSNSTopicDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccSNSTopicDataSource_tags(t *testing.T) { func TestAccSNSTopicDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccSNSTopicDataSource_tags_NullMap(t *testing.T) { func TestAccSNSTopicDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccSNSTopicDataSource_tags_EmptyMap(t *testing.T) { func TestAccSNSTopicDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccSNSTopicDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSNSTopicDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccSNSTopicDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccSNSTopicDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/sns/topic_identity_gen_test.go b/internal/service/sns/topic_identity_gen_test.go new file mode 100644 index 000000000000..0f717373c44c --- /dev/null +++ b/internal/service/sns/topic_identity_gen_test.go @@ -0,0 +1,340 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sns_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSNSTopic_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v map[string]string + resourceName := "aws_sns_topic.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSNSTopic_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccSNSTopic_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v map[string]string + resourceName := "aws_sns_topic.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.4.0 +func TestAccSNSTopic_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v map[string]string + resourceName := "aws_sns_topic.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic_v6.4.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Topic/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sns/topic_policy.go b/internal/service/sns/topic_policy.go index ff8a4823ec6f..70430b20f81c 100644 --- a/internal/service/sns/topic_policy.go +++ b/internal/service/sns/topic_policy.go @@ -23,6 +23,8 @@ import ( ) // @SDKResource("aws_sns_topic_policy", name="Topic Policy") +// @ArnIdentity +// @Testing(preIdentityVersion="v6.8.0") func resourceTopicPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTopicPolicyUpsert, @@ -30,10 +32,6 @@ func resourceTopicPolicy() *schema.Resource { UpdateWithoutTimeout: resourceTopicPolicyUpsert, DeleteWithoutTimeout: resourceTopicPolicyDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, diff --git a/internal/service/sns/topic_policy_identity_gen_test.go b/internal/service/sns/topic_policy_identity_gen_test.go new file mode 100644 index 000000000000..efbb3ca4ced8 --- /dev/null +++ b/internal/service/sns/topic_policy_identity_gen_test.go @@ -0,0 +1,337 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sns_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSNSTopicPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSNSTopicPolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSNSTopicPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSNSTopicPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicPolicyExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TopicPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sns/topic_policy_test.go b/internal/service/sns/topic_policy_test.go index de3a743c18cd..b42edfdebdac 100644 --- a/internal/service/sns/topic_policy_test.go +++ b/internal/service/sns/topic_policy_test.go @@ -331,3 +331,28 @@ resource "aws_sns_topic_policy" "test" { } `, rName) } + +func testAccCheckTopicPolicyExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No SNS Topic ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SNSClient(ctx) + output, err := tfsns.FindTopicAttributesByARN(ctx, conn, rs.Primary.ID) + if err != nil { + return err + } + + if output[tfsns.TopicAttributeNamePolicy] == "" { + return fmt.Errorf("Topic policy not found") + } + + return nil + } +} diff --git a/internal/service/sns/topic_subscription.go b/internal/service/sns/topic_subscription.go index d0fcaa812120..0414158410a6 100644 --- a/internal/service/sns/topic_subscription.go +++ b/internal/service/sns/topic_subscription.go @@ -157,6 +157,10 @@ var ( ) // @SDKResource("aws_sns_topic_subscription", name="Topic Subscription") +// @ArnIdentity +// @Testing(existsType="map[string]string") +// @Testing(preIdentityVersion="v6.8.0") +// @Testing(importIgnore="confirmation_timeout_in_minutes;endpoint_auto_confirms") func resourceTopicSubscription() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTopicSubscriptionCreate, @@ -164,10 +168,6 @@ func resourceTopicSubscription() *schema.Resource { UpdateWithoutTimeout: resourceTopicSubscriptionUpdate, DeleteWithoutTimeout: resourceTopicSubscriptionDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - CustomizeDiff: resourceTopicSubscriptionCustomizeDiff, Schema: subscriptionSchema, @@ -252,7 +252,7 @@ func resourceTopicSubscriptionRead(ctx context.Context, d *schema.ResourceData, } } - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, subscriptionCreateTimeout, func() (any, error) { + attributes, err := tfresource.RetryWhenNewResourceNotFound(ctx, subscriptionCreateTimeout, func(ctx context.Context) (map[string]string, error) { return findSubscriptionAttributesByARN(ctx, conn, d.Id()) }, d.IsNewResource()) @@ -266,8 +266,6 @@ func resourceTopicSubscriptionRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading SNS Topic Subscription (%s): %s", d.Id(), err) } - attributes := outputRaw.(map[string]string) - return sdkdiag.AppendFromErr(diags, subscriptionAttributeMap.APIAttributesToResourceData(attributes, d)) } @@ -593,18 +591,18 @@ func normalizeTopicSubscriptionDeliveryPolicy(policy string) ([]byte, error) { var deliveryPolicy TopicSubscriptionDeliveryPolicy if err := json.Unmarshal([]byte(policy), &deliveryPolicy); err != nil { - return nil, fmt.Errorf("[WARN] Unable to unmarshal SNS Topic Subscription delivery policy JSON: %s", err) + return nil, fmt.Errorf("[WARN] Unable to unmarshal SNS Topic Subscription delivery policy JSON: %w", err) } normalizedDeliveryPolicy, err := json.Marshal(deliveryPolicy) if err != nil { - return nil, fmt.Errorf("[WARN] Unable to marshal SNS Topic Subscription delivery policy back to JSON: %s", err) + return nil, fmt.Errorf("[WARN] Unable to marshal SNS Topic Subscription delivery policy back to JSON: %w", err) } b := bytes.NewBufferString("") if err := json.Compact(b, normalizedDeliveryPolicy); err != nil { - return nil, fmt.Errorf("[WARN] Unable to marshal SNS Topic Subscription delivery policy back to JSON: %s", err) + return nil, fmt.Errorf("[WARN] Unable to marshal SNS Topic Subscription delivery policy back to JSON: %w", err) } return b.Bytes(), nil diff --git a/internal/service/sns/topic_subscription_identity_gen_test.go b/internal/service/sns/topic_subscription_identity_gen_test.go new file mode 100644 index 000000000000..b77f677e02ae --- /dev/null +++ b/internal/service/sns/topic_subscription_identity_gen_test.go @@ -0,0 +1,359 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sns_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSNSTopicSubscription_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v map[string]string + resourceName := "aws_sns_topic_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicSubscriptionDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicSubscriptionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "confirmation_timeout_in_minutes", "endpoint_auto_confirms", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccSNSTopicSubscription_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sns_topic_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "confirmation_timeout_in_minutes", "endpoint_auto_confirms", + }, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "confirmation_timeout_in_minutes", "endpoint_auto_confirms", + }, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSNSTopicSubscription_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v map[string]string + resourceName := "aws_sns_topic_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicSubscriptionDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicSubscriptionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.8.0 +func TestAccSNSTopicSubscription_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v map[string]string + resourceName := "aws_sns_topic_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), + CheckDestroy: testAccCheckTopicSubscriptionDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic_v6.8.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTopicSubscriptionExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TopicSubscription/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sns/topic_subscription_test.go b/internal/service/sns/topic_subscription_test.go index b59c4ca76c59..b52128b61da5 100644 --- a/internal/service/sns/topic_subscription_test.go +++ b/internal/service/sns/topic_subscription_test.go @@ -866,7 +866,7 @@ func testAccCheckTopicSubscriptionDeliveryPolicyAttribute(attributes *map[string var apiDeliveryPolicy tfsns.TopicSubscriptionDeliveryPolicy if err := json.Unmarshal([]byte(apiDeliveryPolicyJSONString), &apiDeliveryPolicy); err != nil { - return fmt.Errorf("unable to unmarshal SNS Topic Subscription delivery policy JSON (%s): %s", apiDeliveryPolicyJSONString, err) + return fmt.Errorf("unable to unmarshal SNS Topic Subscription delivery policy JSON (%s): %w", apiDeliveryPolicyJSONString, err) } if reflect.DeepEqual(apiDeliveryPolicy, *expectedDeliveryPolicy) { @@ -887,7 +887,7 @@ func testAccCheckTopicSubscriptionRedrivePolicyAttribute(ctx context.Context, at var apiRedrivePolicy tfsns.TopicSubscriptionRedrivePolicy if err := json.Unmarshal([]byte(apiRedrivePolicyJSONString), &apiRedrivePolicy); err != nil { - return fmt.Errorf("unable to unmarshal SNS Topic Subscription redrive policy JSON (%s): %s", apiRedrivePolicyJSONString, err) + return fmt.Errorf("unable to unmarshal SNS Topic Subscription redrive policy JSON (%s): %w", apiRedrivePolicyJSONString, err) } expectedRedrivePolicy := tfsns.TopicSubscriptionRedrivePolicy{ @@ -1165,7 +1165,7 @@ resource "aws_lambda_permission" "apigw_lambda" { action = "lambda:InvokeFunction" function_name = aws_lambda_function.lambda.arn principal = "apigateway.${data.aws_partition.current.dns_suffix}" - source_arn = "${aws_api_gateway_deployment.test.execution_arn}/*" + source_arn = "${aws_api_gateway_stage.test.execution_arn}/*" } resource "aws_lambda_function" "lambda" { @@ -1180,14 +1180,19 @@ resource "aws_lambda_function" "lambda" { resource "aws_api_gateway_deployment" "test" { depends_on = [aws_api_gateway_integration_response.test] rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = "acctest" +} + +resource "aws_api_gateway_stage" "test" { + stage_name = "acctest" + rest_api_id = aws_api_gateway_rest_api.test.id + deployment_id = aws_api_gateway_deployment.test.id } resource "aws_sns_topic_subscription" "test" { depends_on = [aws_lambda_permission.apigw_lambda] topic_arn = aws_sns_topic.test.arn protocol = "https" - endpoint = aws_api_gateway_deployment.test.invoke_url + endpoint = aws_api_gateway_stage.test.invoke_url endpoint_auto_confirms = true } `, rName) @@ -1291,7 +1296,7 @@ resource "aws_lambda_permission" "apigw_lambda" { action = "lambda:InvokeFunction" function_name = aws_lambda_function.lambda.arn principal = "apigateway.${data.aws_partition.current.dns_suffix}" - source_arn = "${aws_api_gateway_deployment.test.execution_arn}/*" + source_arn = "${aws_api_gateway_stage.test.execution_arn}/*" } resource "aws_lambda_function" "lambda" { @@ -1306,7 +1311,12 @@ resource "aws_lambda_function" "lambda" { resource "aws_api_gateway_deployment" "test" { depends_on = [aws_api_gateway_integration_response.test] rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = "acctest" +} + +resource "aws_api_gateway_stage" "test" { + stage_name = "acctest" + rest_api_id = aws_api_gateway_rest_api.test.id + deployment_id = aws_api_gateway_deployment.test.id } resource "aws_iam_role" "invocation_role" { @@ -1389,7 +1399,7 @@ resource "aws_sns_topic_subscription" "test" { depends_on = [aws_lambda_permission.apigw_lambda] topic_arn = aws_sns_topic.test.arn protocol = "https" - endpoint = replace(aws_api_gateway_deployment.test.invoke_url, "https://", "https://davematthews:granny@") + endpoint = replace(aws_api_gateway_stage.test.invoke_url, "https://", "https://davematthews:granny@") endpoint_auto_confirms = true confirmation_timeout_in_minutes = 3 diff --git a/internal/service/sns/topic_tags_gen_test.go b/internal/service/sns/topic_tags_gen_test.go index fd9d10136ba1..721b2576a8e4 100644 --- a/internal/service/sns/topic_tags_gen_test.go +++ b/internal/service/sns/topic_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -18,11 +17,12 @@ import ( func TestAccSNSTopic_tags(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -200,11 +200,12 @@ func TestAccSNSTopic_tags(t *testing.T) { func TestAccSNSTopic_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -267,11 +268,12 @@ func TestAccSNSTopic_tags_null(t *testing.T) { func TestAccSNSTopic_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -330,11 +332,12 @@ func TestAccSNSTopic_tags_EmptyMap(t *testing.T) { func TestAccSNSTopic_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -411,11 +414,12 @@ func TestAccSNSTopic_tags_AddOnUpdate(t *testing.T) { func TestAccSNSTopic_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -500,11 +504,12 @@ func TestAccSNSTopic_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSNSTopic_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -637,11 +642,12 @@ func TestAccSNSTopic_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSNSTopic_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -726,11 +732,12 @@ func TestAccSNSTopic_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -907,11 +914,12 @@ func TestAccSNSTopic_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1067,11 +1075,12 @@ func TestAccSNSTopic_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1243,11 +1252,12 @@ func TestAccSNSTopic_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1333,11 +1343,12 @@ func TestAccSNSTopic_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1422,11 +1433,12 @@ func TestAccSNSTopic_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1487,11 +1499,12 @@ func TestAccSNSTopic_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1544,11 +1557,12 @@ func TestAccSNSTopic_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1606,11 +1620,12 @@ func TestAccSNSTopic_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { func TestAccSNSTopic_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1668,11 +1683,12 @@ func TestAccSNSTopic_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T func TestAccSNSTopic_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1723,11 +1739,12 @@ func TestAccSNSTopic_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSNSTopic_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1820,11 +1837,12 @@ func TestAccSNSTopic_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSNSTopic_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -1907,11 +1925,12 @@ func TestAccSNSTopic_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccSNSTopic_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), @@ -2069,11 +2088,12 @@ func TestAccSNSTopic_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccSNSTopic_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[string]string resourceName := "aws_sns_topic.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SNSServiceID), CheckDestroy: testAccCheckTopicDestroy(ctx), diff --git a/internal/service/sns/topic_test.go b/internal/service/sns/topic_test.go index 8635fec43a43..3855eb34d64f 100644 --- a/internal/service/sns/topic_test.go +++ b/internal/service/sns/topic_test.go @@ -648,7 +648,7 @@ func testAccCheckTopicHasPolicy(ctx context.Context, n string, expectedPolicyTex equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicyText) if err != nil { - return fmt.Errorf("testing policy equivalence: %s", err) + return fmt.Errorf("testing policy equivalence: %w", err) } if !equivalent { diff --git a/internal/service/sqs/attribute_funcs.go b/internal/service/sqs/attribute_funcs.go index e6eda838b9f9..db799f2fd136 100644 --- a/internal/service/sqs/attribute_funcs.go +++ b/internal/service/sqs/attribute_funcs.go @@ -49,7 +49,7 @@ func (h *queueAttributeHandler) Upsert(ctx context.Context, d *schema.ResourceDa deadline := inttypes.NewDeadline(d.Timeout(schema.TimeoutCreate)) - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate)/2, func() (any, error) { + _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate)/2, func(ctx context.Context) (any, error) { return conn.SetQueueAttributes(ctx, input) }, errCodeInvalidAttributeValue, "Invalid value for the parameter Policy") @@ -70,7 +70,7 @@ func (h *queueAttributeHandler) Read(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).SQSClient(ctx) - outputRaw, err := tfresource.RetryWhenNotFound(ctx, queueAttributeReadTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNotFound(ctx, queueAttributeReadTimeout, func(ctx context.Context) (*string, error) { return findQueueAttributeByTwoPartKey(ctx, conn, d.Id(), h.AttributeName) }) @@ -84,7 +84,7 @@ func (h *queueAttributeHandler) Read(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "reading SQS Queue (%s) attribute (%s): %s", d.Id(), h.AttributeName, err) } - newValue, err := h.ToSet(d.Get(h.SchemaKey).(string), aws.ToString(outputRaw.(*string))) + newValue, err := h.ToSet(d.Get(h.SchemaKey).(string), aws.ToString(output)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } diff --git a/internal/service/sqs/generate.go b/internal/service/sqs/generate.go index 0de929a54271..8c2428e6e1e1 100644 --- a/internal/service/sqs/generate.go +++ b/internal/service/sqs/generate.go @@ -4,6 +4,7 @@ //go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=ListQueueTags -ListTagsInIDElem=QueueUrl -ServiceTagsMap -KVTValues -TagOp=TagQueue -TagInIDElem=QueueUrl -UntagOp=UntagQueue -UpdateTags -CreateTags //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/tagstests/main.go +//go:generate go run ../../generate/identitytests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package sqs diff --git a/internal/service/sqs/queue.go b/internal/service/sqs/queue.go index 1299d1f3d81a..1491c8c5330b 100644 --- a/internal/service/sqs/queue.go +++ b/internal/service/sqs/queue.go @@ -93,7 +93,7 @@ var ( Type: schema.TypeInt, Optional: true, Default: defaultQueueMaximumMessageSize, - ValidateFunc: validation.IntBetween(1024, 262_144), + ValidateFunc: validation.IntBetween(1024, 1_048_576), }, "message_retention_seconds": { Type: schema.TypeInt, @@ -195,6 +195,9 @@ var ( // @SDKResource("aws_sqs_queue", name="Queue") // @Tags(identifierAttribute="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sqs/types;awstypes;map[awstypes.QueueAttributeName]string") +// @IdentityAttribute("url") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(idAttrDuplicates="url") func resourceQueue() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceQueueCreate, @@ -202,10 +205,6 @@ func resourceQueue() *schema.Resource { UpdateWithoutTimeout: resourceQueueUpdate, DeleteWithoutTimeout: resourceQueueDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - CustomizeDiff: resourceQueueCustomizeDiff, Schema: queueSchema, @@ -238,7 +237,7 @@ func resourceQueueCreate(ctx context.Context, d *schema.ResourceData, meta any) // create is 2 phase: 1. create, 2. wait for propagation deadline := inttypes.NewDeadline(d.Timeout(schema.TimeoutCreate)) - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate)/2, func() (any, error) { + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate)/2, func(ctx context.Context) (any, error) { return conn.CreateQueue(ctx, input) }, errCodeQueueDeletedRecently) @@ -246,7 +245,7 @@ func resourceQueueCreate(ctx context.Context, d *schema.ResourceData, meta any) if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { input.Tags = nil - outputRaw, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate)/2, func() (any, error) { + outputRaw, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate)/2, func(ctx context.Context) (any, error) { return conn.CreateQueue(ctx, input) }, errCodeQueueDeletedRecently) } @@ -282,7 +281,7 @@ func resourceQueueRead(ctx context.Context, d *schema.ResourceData, meta any) di var diags diag.Diagnostics conn := meta.(*conns.AWSClient).SQSClient(ctx) - outputRaw, err := tfresource.RetryWhenNotFound(ctx, queueReadTimeout, func() (any, error) { + output, err := tfresource.RetryWhenNotFound(ctx, queueReadTimeout, func(ctx context.Context) (map[types.QueueAttributeName]string, error) { return findQueueAttributesByURL(ctx, conn, d.Id()) }) @@ -301,7 +300,7 @@ func resourceQueueRead(ctx context.Context, d *schema.ResourceData, meta any) di return sdkdiag.AppendFromErr(diags, err) } - err = queueAttributeMap.APIAttributesToResourceData(outputRaw.(map[types.QueueAttributeName]string), d) + err = queueAttributeMap.APIAttributesToResourceData(output, d) if err != nil { return sdkdiag.AppendFromErr(diags, err) } diff --git a/internal/service/sqs/queue_data_source_tags_gen_test.go b/internal/service/sqs/queue_data_source_tags_gen_test.go index 2e4341ec417a..5c45c75ac5ff 100644 --- a/internal/service/sqs/queue_data_source_tags_gen_test.go +++ b/internal/service/sqs/queue_data_source_tags_gen_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/statecheck" @@ -17,10 +16,11 @@ import ( func TestAccSQSQueueDataSource_tags(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,10 +45,11 @@ func TestAccSQSQueueDataSource_tags(t *testing.T) { func TestAccSQSQueueDataSource_tags_NullMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -69,10 +70,11 @@ func TestAccSQSQueueDataSource_tags_NullMap(t *testing.T) { func TestAccSQSQueueDataSource_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,10 +95,11 @@ func TestAccSQSQueueDataSource_tags_EmptyMap(t *testing.T) { func TestAccSQSQueueDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), Steps: []resource.TestStep{ @@ -125,10 +128,11 @@ func TestAccSQSQueueDataSource_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSQSQueueDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), Steps: []resource.TestStep{ @@ -163,10 +167,11 @@ func TestAccSQSQueueDataSource_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) func TestAccSQSQueueDataSource_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + dataSourceName := "data.aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), Steps: []resource.TestStep{ diff --git a/internal/service/sqs/queue_identity_gen_test.go b/internal/service/sqs/queue_identity_gen_test.go new file mode 100644 index 000000000000..6a6417089656 --- /dev/null +++ b/internal/service/sqs/queue_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sqs_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSQSQueue_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrURL), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrURL: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrURL)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrURL), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrURL), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSQSQueue_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sqs_queue.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrURL), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrURL: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrURL)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrURL), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrURL), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueue_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrURL: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrURL)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueue_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Queue/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sqs/queue_policy.go b/internal/service/sqs/queue_policy.go index 3031510863d1..1c5a4ca0be28 100644 --- a/internal/service/sqs/queue_policy.go +++ b/internal/service/sqs/queue_policy.go @@ -12,6 +12,10 @@ import ( ) // @SDKResource("aws_sqs_queue_policy", name="Queue Policy") +// @IdentityAttribute("queue_url") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(idAttrDuplicates="queue_url") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sqs/types;awstypes;map[awstypes.QueueAttributeName]string") func resourceQueuePolicy() *schema.Resource { h := &queueAttributeHandler{ AttributeName: types.QueueAttributeNamePolicy, @@ -26,10 +30,6 @@ func resourceQueuePolicy() *schema.Resource { UpdateWithoutTimeout: h.Upsert, DeleteWithoutTimeout: h.Delete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - MigrateState: QueuePolicyMigrateState, SchemaVersion: 1, diff --git a/internal/service/sqs/queue_policy_identity_gen_test.go b/internal/service/sqs/queue_policy_identity_gen_test.go new file mode 100644 index 000000000000..d1879b768a0a --- /dev/null +++ b/internal/service/sqs/queue_policy_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sqs_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSQSQueuePolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueuePolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueuePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("queue_url"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSQSQueuePolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sqs_queue_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("queue_url"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueuePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueuePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueuePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueuePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueuePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueuePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/QueuePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sqs/queue_policy_test.go b/internal/service/sqs/queue_policy_test.go index dc685ec9e9f5..7a9c713df2a9 100644 --- a/internal/service/sqs/queue_policy_test.go +++ b/internal/service/sqs/queue_policy_test.go @@ -145,6 +145,17 @@ func TestAccSQSQueuePolicy_update(t *testing.T) { }) } +// Satisfy generated identity test function names by aliasing to queue checks +// +// This mimics the standard policy acceptance test behavior, but in the +// future we may consider replacing this approach with custom checks +// to validate the presence/content of the policy rather than just +// the parent queue. +var ( + testAccCheckQueuePolicyExists = testAccCheckQueueExists + testAccCheckQueuePolicyDestroy = testAccCheckQueueDestroy +) + func testAccQueuePolicyConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_sqs_queue" "test" { diff --git a/internal/service/sqs/queue_redrive_allow_policy.go b/internal/service/sqs/queue_redrive_allow_policy.go index 6ae5ae30cbd0..8b1f0d69f923 100644 --- a/internal/service/sqs/queue_redrive_allow_policy.go +++ b/internal/service/sqs/queue_redrive_allow_policy.go @@ -11,6 +11,10 @@ import ( ) // @SDKResource("aws_sqs_queue_redrive_allow_policy", name="Queue Redrive Allow Policy") +// @IdentityAttribute("queue_url") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(idAttrDuplicates="queue_url") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sqs/types;awstypes;map[awstypes.QueueAttributeName]string") func resourceQueueRedriveAllowPolicy() *schema.Resource { h := &queueAttributeHandler{ AttributeName: types.QueueAttributeNameRedriveAllowPolicy, @@ -33,10 +37,6 @@ func resourceQueueRedriveAllowPolicy() *schema.Resource { "redrive_allow_policy": sdkv2.JSONDocumentSchemaRequired(), }, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - CreateWithoutTimeout: h.Upsert, ReadWithoutTimeout: h.Read, UpdateWithoutTimeout: h.Upsert, diff --git a/internal/service/sqs/queue_redrive_allow_policy_identity_gen_test.go b/internal/service/sqs/queue_redrive_allow_policy_identity_gen_test.go new file mode 100644 index 000000000000..2e04c239cb72 --- /dev/null +++ b/internal/service/sqs/queue_redrive_allow_policy_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sqs_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSQSQueueRedriveAllowPolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_redrive_allow_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueRedriveAllowPolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueRedriveAllowPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("queue_url"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSQSQueueRedriveAllowPolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sqs_queue_redrive_allow_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("queue_url"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueueRedriveAllowPolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_redrive_allow_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueRedriveAllowPolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueRedriveAllowPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueueRedriveAllowPolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_redrive_allow_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueRedriveAllowPolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueRedriveAllowPolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/QueueRedriveAllowPolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sqs/queue_redrive_allow_policy_test.go b/internal/service/sqs/queue_redrive_allow_policy_test.go index 22fab997a567..5c94623be0b6 100644 --- a/internal/service/sqs/queue_redrive_allow_policy_test.go +++ b/internal/service/sqs/queue_redrive_allow_policy_test.go @@ -162,6 +162,17 @@ func TestAccSQSQueueRedriveAllowPolicy_byQueue(t *testing.T) { }) } +// Satisfy generated identity test function names by aliasing to queue checks +// +// This mimics the standard policy acceptance test behavior, but in the +// future we may consider replacing this approach with custom checks +// to validate the presence/content of the redrive allow policy rather than just +// the parent queue. +var ( + testAccCheckQueueRedriveAllowPolicyExists = testAccCheckQueueExists + testAccCheckQueueRedriveAllowPolicyDestroy = testAccCheckQueueDestroy +) + func testAccQueueRedriveAllowPolicyConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_sqs_queue" "test" { diff --git a/internal/service/sqs/queue_redrive_policy.go b/internal/service/sqs/queue_redrive_policy.go index 315564fd905d..29b2ba135604 100644 --- a/internal/service/sqs/queue_redrive_policy.go +++ b/internal/service/sqs/queue_redrive_policy.go @@ -11,6 +11,10 @@ import ( ) // @SDKResource("aws_sqs_queue_redrive_policy", name="Queue Redrive Policy") +// @IdentityAttribute("queue_url") +// @Testing(preIdentityVersion="v6.9.0") +// @Testing(idAttrDuplicates="queue_url") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sqs/types;awstypes;map[awstypes.QueueAttributeName]string") func resourceQueueRedrivePolicy() *schema.Resource { h := &queueAttributeHandler{ AttributeName: types.QueueAttributeNameRedrivePolicy, @@ -33,10 +37,6 @@ func resourceQueueRedrivePolicy() *schema.Resource { "redrive_policy": sdkv2.JSONDocumentSchemaRequired(), }, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - CreateWithoutTimeout: h.Upsert, ReadWithoutTimeout: h.Read, UpdateWithoutTimeout: h.Upsert, diff --git a/internal/service/sqs/queue_redrive_policy_identity_gen_test.go b/internal/service/sqs/queue_redrive_policy_identity_gen_test.go new file mode 100644 index 000000000000..7873fdc695e2 --- /dev/null +++ b/internal/service/sqs/queue_redrive_policy_identity_gen_test.go @@ -0,0 +1,314 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sqs_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSQSQueueRedrivePolicy_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_redrive_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueRedrivePolicyDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueRedrivePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("queue_url"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSQSQueueRedrivePolicy_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sqs_queue_redrive_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New("queue_url"), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("queue_url"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueueRedrivePolicy_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_redrive_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueRedrivePolicyDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueRedrivePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "queue_url": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("queue_url")), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.9.0 +func TestAccSQSQueueRedrivePolicy_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v map[awstypes.QueueAttributeName]string + resourceName := "aws_sqs_queue_redrive_policy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), + CheckDestroy: testAccCheckQueueRedrivePolicyDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic_v6.9.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckQueueRedrivePolicyExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/QueueRedrivePolicy/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/sqs/queue_redrive_policy_test.go b/internal/service/sqs/queue_redrive_policy_test.go index 7b737f293330..e29cf9fae6fb 100644 --- a/internal/service/sqs/queue_redrive_policy_test.go +++ b/internal/service/sqs/queue_redrive_policy_test.go @@ -147,6 +147,17 @@ func TestAccSQSQueueRedrivePolicy_update(t *testing.T) { }) } +// Satisfy generated identity test function names by aliasing to queue checks +// +// This mimics the standard policy acceptance test behavior, but in the +// future we may consider replacing this approach with custom checks +// to validate the presence/content of the redrive policy rather than just +// the parent queue. +var ( + testAccCheckQueueRedrivePolicyExists = testAccCheckQueueExists + testAccCheckQueueRedrivePolicyDestroy = testAccCheckQueueDestroy +) + func testAccQueueRedrivePolicyConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_sqs_queue" "test" { diff --git a/internal/service/sqs/queue_tags_gen_test.go b/internal/service/sqs/queue_tags_gen_test.go index 8170bf726ee4..0d83bafa6e04 100644 --- a/internal/service/sqs/queue_tags_gen_test.go +++ b/internal/service/sqs/queue_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/sqs/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccSQSQueue_tags(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccSQSQueue_tags(t *testing.T) { func TestAccSQSQueue_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccSQSQueue_tags_null(t *testing.T) { func TestAccSQSQueue_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccSQSQueue_tags_EmptyMap(t *testing.T) { func TestAccSQSQueue_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccSQSQueue_tags_AddOnUpdate(t *testing.T) { func TestAccSQSQueue_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccSQSQueue_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSQSQueue_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccSQSQueue_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSQSQueue_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccSQSQueue_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccSQSQueue_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccSQSQueue_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccSQSQueue_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccSQSQueue_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccSQSQueue_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccSQSQueue_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccSQSQueue_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccSQSQueue_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { func TestAccSQSQueue_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccSQSQueue_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T func TestAccSQSQueue_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccSQSQueue_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSQSQueue_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccSQSQueue_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSQSQueue_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccSQSQueue_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccSQSQueue_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccSQSQueue_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccSQSQueue_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v map[awstypes.QueueAttributeName]string resourceName := "aws_sqs_queue.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SQSServiceID), CheckDestroy: testAccCheckQueueDestroy(ctx), diff --git a/internal/service/sqs/queue_test.go b/internal/service/sqs/queue_test.go index c99e0f675363..8b68b06d3838 100644 --- a/internal/service/sqs/queue_test.go +++ b/internal/service/sqs/queue_test.go @@ -921,7 +921,7 @@ func testAccCheckQueuePolicyAttribute(ctx context.Context, queueAttributes *map[ equivalent, err := awspolicy.PoliciesAreEquivalent(actualPolicyText, expectedPolicy) if err != nil { - return fmt.Errorf("Error testing policy equivalence: %s", err) + return fmt.Errorf("Error testing policy equivalence: %w", err) } if !equivalent { return fmt.Errorf("Non-equivalent policy error:\n\nexpected: %s\n\n got: %s\n", expectedPolicy, actualPolicyText) @@ -967,7 +967,7 @@ func testAccCheckQueueDestroy(ctx context.Context) resource.TestCheckFunc { // SQS seems to be highly eventually consistent. Even if one connection reports that the queue is gone, // another connection may still report it as present. - _, err := tfresource.RetryUntilNotFound(ctx, tfsqs.QueueDeletedTimeout, func() (any, error) { + _, err := tfresource.RetryUntilNotFound(ctx, tfsqs.QueueDeletedTimeout, func(ctx context.Context) (any, error) { return tfsqs.FindQueueAttributesByURL(ctx, conn, rs.Primary.ID) }) if errors.Is(err, tfresource.ErrFoundResource) { @@ -1317,7 +1317,7 @@ func testAccQueueConfig_managedEncryptionKMSDataKeyReusePeriodSeconds(rName stri return fmt.Sprintf(` resource "aws_sqs_queue" "test" { kms_data_key_reuse_period_seconds = "60" - max_message_size = "261244" + max_message_size = "1048576" message_retention_seconds = "60" name = %[1]q sqs_managed_sse_enabled = true @@ -1371,7 +1371,7 @@ func testAccQueueConfig_noManagedEncryptionKMSDataKeyReusePeriodSeconds(rName st resource "aws_sqs_queue" "test" { fifo_queue = true kms_data_key_reuse_period_seconds = "60" - max_message_size = "261244" + max_message_size = "1048576" message_retention_seconds = "60" name = "%[1]s.fifo" receive_wait_time_seconds = "10" diff --git a/internal/service/sqs/service_endpoint_resolver_gen.go b/internal/service/sqs/service_endpoint_resolver_gen.go index 7f2413f026c3..71bfc54d879f 100644 --- a/internal/service/sqs/service_endpoint_resolver_gen.go +++ b/internal/service/sqs/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sqs.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sqs endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sqs endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sqs/service_endpoints_gen_test.go b/internal/service/sqs/service_endpoints_gen_test.go index 61aaae0baafe..076779b7829a 100644 --- a/internal/service/sqs/service_endpoints_gen_test.go +++ b/internal/service/sqs/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sqs/service_package_gen.go b/internal/service/sqs/service_package_gen.go index 7581abddbfbf..a2c866b75dfd 100644 --- a/internal/service/sqs/service_package_gen.go +++ b/internal/service/sqs/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -55,25 +54,41 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa Tags: unique.Make(inttypes.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrURL), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceQueuePolicy, TypeName: "aws_sqs_queue_policy", Name: "Queue Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity("queue_url"), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceQueueRedriveAllowPolicy, TypeName: "aws_sqs_queue_redrive_allow_policy", Name: "Queue Redrive Allow Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity("queue_url"), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceQueueRedrivePolicy, TypeName: "aws_sqs_queue_redrive_policy", Name: "Queue Redrive Policy", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity("queue_url"), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } @@ -101,7 +116,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sqs.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sqs/tags_gen.go b/internal/service/sqs/tags_gen.go index 7628e94d408a..5fc80380790b 100644 --- a/internal/service/sqs/tags_gen.go +++ b/internal/service/sqs/tags_gen.go @@ -3,8 +3,8 @@ package sqs import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *sqs.Client, identifier string, optFns . output, err := conn.ListQueueTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SQSClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -108,7 +108,7 @@ func updateTags(ctx context.Context, conn *sqs.Client, identifier string, oldTag _, err := conn.UntagQueue(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -123,7 +123,7 @@ func updateTags(ctx context.Context, conn *sqs.Client, identifier string, oldTag _, err := conn.TagQueue(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/sqs/testdata/Queue/basic/main_gen.tf b/internal/service/sqs/testdata/Queue/basic/main_gen.tf new file mode 100644 index 000000000000..e5ac13e70bf5 --- /dev/null +++ b/internal/service/sqs/testdata/Queue/basic/main_gen.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sqs_queue" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/sqs/testdata/Queue/basic_v6.9.0/main_gen.tf b/internal/service/sqs/testdata/Queue/basic_v6.9.0/main_gen.tf new file mode 100644 index 000000000000..da55752b22e2 --- /dev/null +++ b/internal/service/sqs/testdata/Queue/basic_v6.9.0/main_gen.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sqs_queue" "test" { + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.9.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/sqs/testdata/Queue/region_override/main_gen.tf b/internal/service/sqs/testdata/Queue/region_override/main_gen.tf new file mode 100644 index 000000000000..a2a371e382ec --- /dev/null +++ b/internal/service/sqs/testdata/Queue/region_override/main_gen.tf @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sqs_queue" "test" { + region = var.region + + name = var.rName +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/sqs/testdata/QueuePolicy/basic/main_gen.tf b/internal/service/sqs/testdata/QueuePolicy/basic/main_gen.tf new file mode 100644 index 000000000000..4d63ea93a972 --- /dev/null +++ b/internal/service/sqs/testdata/QueuePolicy/basic/main_gen.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sqs_queue_policy" "test" { + queue_url = aws_sqs_queue.test.id + + policy = </") + } + + windowID := parts[0] + targetID := parts[1] + + result := map[string]string{ + "window_id": windowID, + } + return targetID, result, nil +} diff --git a/internal/service/ssm/maintenance_window_target_identity_gen_test.go b/internal/service/ssm/maintenance_window_target_identity_gen_test.go new file mode 100644 index 000000000000..1bded146a262 --- /dev/null +++ b/internal/service/ssm/maintenance_window_target_identity_gen_test.go @@ -0,0 +1,321 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ssm_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ssm/types" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSMMaintenanceWindowTarget_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v types.MaintenanceWindowTarget + resourceName := "aws_ssm_maintenance_window_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckMaintenanceWindowTargetDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMaintenanceWindowTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "window_id": knownvalue.NotNull(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("window_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: testAccMaintenanceWindowTargetImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: testAccMaintenanceWindowTargetImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSSMMaintenanceWindowTarget_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ssm_maintenance_window_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "window_id": knownvalue.NotNull(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("window_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccMaintenanceWindowTargetImportStateIdFunc), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccMaintenanceWindowTargetImportStateIdFunc), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSSMMaintenanceWindowTarget_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.MaintenanceWindowTarget + resourceName := "aws_ssm_maintenance_window_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckMaintenanceWindowTargetDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMaintenanceWindowTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "window_id": knownvalue.NotNull(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("window_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSSMMaintenanceWindowTarget_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.MaintenanceWindowTarget + resourceName := "aws_ssm_maintenance_window_target.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckMaintenanceWindowTargetDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMaintenanceWindowTargetExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTarget/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ssm/maintenance_window_task.go b/internal/service/ssm/maintenance_window_task.go index ee54f9b8d545..5438f26aaca2 100644 --- a/internal/service/ssm/maintenance_window_task.go +++ b/internal/service/ssm/maintenance_window_task.go @@ -26,11 +26,18 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_ssm_maintenance_window_task", name="Maintenance Window Task") +// @IdentityAttribute("window_id") +// @IdentityAttribute("id") +// @ImportIDHandler("maintenanceWindowTaskImportID") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ssm;ssm.GetMaintenanceWindowTaskOutput") +// @Testing(preIdentityVersion="v6.10.0") +// @Testing(importStateIdFunc="testAccMaintenanceWindowTaskImportStateIdFunc") func resourceMaintenanceWindowTask() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceMaintenanceWindowTaskCreate, @@ -38,10 +45,6 @@ func resourceMaintenanceWindowTask() *schema.Resource { UpdateWithoutTimeout: resourceMaintenanceWindowTaskUpdate, DeleteWithoutTimeout: resourceMaintenanceWindowTaskDelete, - Importer: &schema.ResourceImporter{ - StateContext: resourceMaintenanceWindowTaskImport, - }, - Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -504,21 +507,6 @@ func resourceMaintenanceWindowTaskDelete(ctx context.Context, d *schema.Resource return diags } -func resourceMaintenanceWindowTaskImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - idParts := strings.SplitN(d.Id(), "/", 2) - if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { - return nil, fmt.Errorf("unexpected format of ID (%q), expected /", d.Id()) - } - - windowID := idParts[0] - windowTaskID := idParts[1] - - d.Set("window_id", windowID) - d.SetId(windowTaskID) - - return []*schema.ResourceData{d}, nil -} - func findMaintenanceWindowTaskByTwoPartKey(ctx context.Context, conn *ssm.Client, windowID, windowTaskID string) (*ssm.GetMaintenanceWindowTaskOutput, error) { input := &ssm.GetMaintenanceWindowTaskInput{ WindowId: aws.String(windowID), @@ -869,3 +857,26 @@ func flattenTaskInvocationCommonParameters(apiObject map[string][]string) []any return tfList } + +var _ inttypes.SDKv2ImportID = maintenanceWindowTaskImportID{} + +type maintenanceWindowTaskImportID struct{} + +func (maintenanceWindowTaskImportID) Create(d *schema.ResourceData) string { + return d.Id() +} + +func (maintenanceWindowTaskImportID) Parse(id string) (string, map[string]string, error) { + parts := strings.SplitN(id, "/", 2) + if len(parts) != 2 { + return id, nil, fmt.Errorf("maintenance_window_task id must be of the form /") + } + + windowID := parts[0] + taskID := parts[1] + + result := map[string]string{ + "window_id": windowID, + } + return taskID, result, nil +} diff --git a/internal/service/ssm/maintenance_window_task_identity_gen_test.go b/internal/service/ssm/maintenance_window_task_identity_gen_test.go new file mode 100644 index 000000000000..05aa464305e8 --- /dev/null +++ b/internal/service/ssm/maintenance_window_task_identity_gen_test.go @@ -0,0 +1,321 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ssm_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ssm" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSMMaintenanceWindowTask_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v ssm.GetMaintenanceWindowTaskOutput + resourceName := "aws_ssm_maintenance_window_task.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckMaintenanceWindowTaskDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMaintenanceWindowTaskExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "window_id": knownvalue.NotNull(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("window_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: testAccMaintenanceWindowTaskImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: testAccMaintenanceWindowTaskImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSSMMaintenanceWindowTask_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ssm_maintenance_window_task.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + "window_id": knownvalue.NotNull(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("window_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccMaintenanceWindowTaskImportStateIdFunc), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFuncAdapter(resourceName, testAccMaintenanceWindowTaskImportStateIdFunc), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("window_id"), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSSMMaintenanceWindowTask_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v ssm.GetMaintenanceWindowTaskOutput + resourceName := "aws_ssm_maintenance_window_task.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckMaintenanceWindowTaskDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMaintenanceWindowTaskExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "window_id": knownvalue.NotNull(), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("window_id")), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSSMMaintenanceWindowTask_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v ssm.GetMaintenanceWindowTaskOutput + resourceName := "aws_ssm_maintenance_window_task.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckMaintenanceWindowTaskDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMaintenanceWindowTaskExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/MaintenanceWindowTask/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ssm/parameter.go b/internal/service/ssm/parameter.go index 333983f24351..a53c1a11d480 100644 --- a/internal/service/ssm/parameter.go +++ b/internal/service/ssm/parameter.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -32,6 +33,11 @@ import ( // @Tags(identifierAttribute="id", resourceType="Parameter") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ssm/types;awstypes;awstypes.Parameter") // @Testing(importIgnore="has_value_wo") +// @IdentityAttribute("name") +// @Testing(idAttrDuplicates="name") +// @Testing(preIdentityVersion="v6.7.0") +// @Testing(plannableImportAction="NoOp") +// @CustomImport func resourceParameter() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceParameterCreate, @@ -41,6 +47,12 @@ func resourceParameter() *schema.Resource { Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + identitySpec := importer.IdentitySpec(ctx) + + if err := importer.RegionalSingleParameterized(ctx, d, identitySpec, meta.(importer.AWSClient)); err != nil { + return nil, err + } + d.Set("has_value_wo", false) return []*schema.ResourceData{d}, nil }, @@ -150,16 +162,19 @@ func resourceParameter() *schema.Resource { return awstypes.ParameterTier(old.(string)) == awstypes.ParameterTierAdvanced && awstypes.ParameterTier(new.(string)) == awstypes.ParameterTierStandard }), customdiff.ComputedIf(names.AttrVersion, func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { - return diff.HasChange(names.AttrValue) + return diff.HasChange(names.AttrValue) || !diff.NewValueKnown(names.AttrValue) || diff.HasChange(names.AttrDescription) }), customdiff.ComputedIf(names.AttrValue, func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { return diff.HasChange("insecure_value") }), customdiff.ComputedIf("insecure_value", func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { - return diff.HasChange(names.AttrValue) + if diff.NewValueKnown("insecure_value") { + return false + } + return diff.HasChange(names.AttrValue) || !diff.NewValueKnown(names.AttrValue) }), customdiff.ComputedIf("has_value_wo", func(_ context.Context, diff *schema.ResourceDiff, meta any) bool { - return diff.HasChange("value_wo_version") + return diff.HasChange("value_wo_version") || !diff.NewValueKnown("value_wo_version") }), ), } @@ -253,7 +268,7 @@ func resourceParameterRead(ctx context.Context, d *schema.ResourceData, meta any timeout = 2 * time.Minute ) outputRaw, err := tfresource.RetryWhen(ctx, timeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return findParameterByName(ctx, conn, d.Id(), true) }, func(err error) (bool, error) { @@ -292,6 +307,9 @@ func resourceParameterRead(ctx context.Context, d *schema.ResourceData, meta any if valueWO != "" { hasWriteOnly = true + } else { + hasWriteOnly = false + d.Set("has_value_wo", nil) } } diff --git a/internal/service/ssm/parameter_identity_gen_test.go b/internal/service/ssm/parameter_identity_gen_test.go new file mode 100644 index 000000000000..817e10e4e1eb --- /dev/null +++ b/internal/service/ssm/parameter_identity_gen_test.go @@ -0,0 +1,320 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ssm_test + +import ( + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSMParameter_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Parameter + resourceName := "aws_ssm_parameter.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckParameterDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckParameterExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrName), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "has_value_wo", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSSMParameter_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ssm_parameter.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrName), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "has_value_wo", + }, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrName), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccSSMParameter_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Parameter + resourceName := "aws_ssm_parameter.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckParameterDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckParameterExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrName)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.7.0 +func TestAccSSMParameter_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.Parameter + resourceName := "aws_ssm_parameter.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckParameterDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic_v6.7.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckParameterExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Parameter/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ssm/parameter_tags_gen_test.go b/internal/service/ssm/parameter_tags_gen_test.go index 8e8b7819989a..b87d7515181a 100644 --- a/internal/service/ssm/parameter_tags_gen_test.go +++ b/internal/service/ssm/parameter_tags_gen_test.go @@ -7,7 +7,6 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccSSMParameter_tags(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -213,11 +213,12 @@ func TestAccSSMParameter_tags(t *testing.T) { func TestAccSSMParameter_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -283,11 +284,12 @@ func TestAccSSMParameter_tags_null(t *testing.T) { func TestAccSSMParameter_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -349,11 +351,12 @@ func TestAccSSMParameter_tags_EmptyMap(t *testing.T) { func TestAccSSMParameter_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -433,11 +436,12 @@ func TestAccSSMParameter_tags_AddOnUpdate(t *testing.T) { func TestAccSSMParameter_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -528,11 +532,12 @@ func TestAccSSMParameter_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSSMParameter_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -671,11 +676,12 @@ func TestAccSSMParameter_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSSMParameter_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -763,11 +769,12 @@ func TestAccSSMParameter_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -956,11 +963,12 @@ func TestAccSSMParameter_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1125,11 +1133,12 @@ func TestAccSSMParameter_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1310,11 +1319,12 @@ func TestAccSSMParameter_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1403,11 +1413,12 @@ func TestAccSSMParameter_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1495,11 +1506,12 @@ func TestAccSSMParameter_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1563,11 +1575,12 @@ func TestAccSSMParameter_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1623,11 +1636,12 @@ func TestAccSSMParameter_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccSSMParameter_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1688,11 +1702,12 @@ func TestAccSSMParameter_tags_DefaultTags_nullOverlappingResourceTag(t *testing. func TestAccSSMParameter_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1753,11 +1768,12 @@ func TestAccSSMParameter_tags_DefaultTags_nullNonOverlappingResourceTag(t *testi func TestAccSSMParameter_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1811,11 +1827,12 @@ func TestAccSSMParameter_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSSMParameter_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -1911,11 +1928,12 @@ func TestAccSSMParameter_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSSMParameter_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -2001,11 +2019,12 @@ func TestAccSSMParameter_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccSSMParameter_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), @@ -2163,11 +2182,12 @@ func TestAccSSMParameter_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccSSMParameter_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v awstypes.Parameter resourceName := "aws_ssm_parameter.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckParameterDestroy(ctx), diff --git a/internal/service/ssm/parameter_test.go b/internal/service/ssm/parameter_test.go index 24e2009dc8a2..d6b714d659cb 100644 --- a/internal/service/ssm/parameter_test.go +++ b/internal/service/ssm/parameter_test.go @@ -6,6 +6,7 @@ package ssm_test import ( "context" "fmt" + "math/big" "testing" "github.com/YakDriver/regexache" @@ -234,6 +235,114 @@ func TestAccSSMParameter_writeOnly(t *testing.T) { }) } +func TestAccSSMParameter_changeValueToWriteOnly(t *testing.T) { + ctx := acctest.Context(t) + var param awstypes.Parameter + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssm_parameter.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(version.Must(version.NewVersion("1.11.0"))), + }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckParameterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccParameterConfig_changeValueToWriteOnly1(rName, "SecureString", "test"), + Check: resource.ComposeTestCheckFunc( + testAccCheckParameterExists(ctx, resourceName, ¶m), + ), + }, + { + Config: testAccParameterConfig_changeValueToWriteOnly2(rName, "SecureString", "testUpdated"), + Check: resource.ComposeTestCheckFunc( + testAccCheckParameterExists(ctx, resourceName, ¶m), + testAccCheckParameterWriteOnlyValueEqual(t, ¶m, "testUpdated"), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("has_value_wo"), knownvalue.Bool(true)), + statecheck.ExpectKnownValue( + resourceName, + tfjsonpath.New("value_wo_version"), + knownvalue.NumberFunc(func(v *big.Float) error { + if v.IsInt() { + if v == nil { + return fmt.Errorf("version is nil") + } + if v.Cmp(big.NewFloat(0)) <= 0 { // Si v <= 0 + return fmt.Errorf("expected version to be greater than 0, got %s", v.String()) + } + return nil + } else { + return fmt.Errorf("expected version to be an int value") + } + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("value_wo"), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New("has_value_wo")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New("value_wo_version")), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("value_wo"), knownvalue.Null()), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + Config: testAccParameterConfig_changeValueToWriteOnly1(rName, "SecureString", "test"), + Check: resource.ComposeTestCheckFunc( + testAccCheckParameterExists(ctx, resourceName, ¶m), + resource.TestCheckResourceAttr(resourceName, names.AttrType, "SecureString"), + resource.TestCheckResourceAttr(resourceName, names.AttrValue, "test"), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue( + resourceName, + tfjsonpath.New("has_value_wo"), + knownvalue.Bool(false), + ), + statecheck.ExpectKnownValue( + resourceName, + tfjsonpath.New("value_wo_version"), + knownvalue.NumberExact(big.NewFloat(float64(0))), + ), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("value_wo"), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New("has_value_wo")), + plancheck.ExpectKnownValue( + resourceName, + tfjsonpath.New("value_wo_version"), + knownvalue.Null(), + ), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("value_wo"), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("insecure_value"), knownvalue.Null()), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrValue)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrVersion)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + }, + }) +} + func TestAccSSMParameter_tier(t *testing.T) { ctx := acctest.Context(t) var parameter1, parameter2, parameter3 awstypes.Parameter @@ -1595,3 +1704,44 @@ resource "aws_ssm_parameter" "test" { } `, rName, value, valueVersion) } + +func testAccParameterConfig_changeValueToWriteOnly1(rName, typ, value string) string { + return fmt.Sprintf(` +resource "aws_ssm_parameter" "prereq" { + name = "%[1]s-prereq" + type = %[2]q + value = %[3]q +} + +data "aws_ssm_parameter" "prereq" { + name = aws_ssm_parameter.prereq.name +} + +resource "aws_ssm_parameter" "test" { + name = %[1]q + type = %[2]q + value = data.aws_ssm_parameter.prereq.value +} +`, rName, typ, value) +} + +func testAccParameterConfig_changeValueToWriteOnly2(rName, typ, value string) string { + return fmt.Sprintf(` +resource "aws_ssm_parameter" "prereq" { + name = "%[1]s-prereq" + type = %[2]q + value = %[3]q +} + +data "aws_ssm_parameter" "prereq" { + name = aws_ssm_parameter.prereq.name +} + +resource "aws_ssm_parameter" "test" { + name = %[1]q + type = %[2]q + value_wo = data.aws_ssm_parameter.prereq.value + value_wo_version = data.aws_ssm_parameter.prereq.version +} +`, rName, typ, value) +} diff --git a/internal/service/ssm/patch_baseline.go b/internal/service/ssm/patch_baseline.go index f51c28be539b..32cf34ebca58 100644 --- a/internal/service/ssm/patch_baseline.go +++ b/internal/service/ssm/patch_baseline.go @@ -32,7 +32,9 @@ import ( // @SDKResource("aws_ssm_patch_baseline", name="Patch Baseline") // @Tags(identifierAttribute="id", resourceType="PatchBaseline") +// @IdentityAttribute("id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ssm;ssm.GetPatchBaselineOutput") +// @Testing(preIdentityVersion="v6.10.0") func resourcePatchBaseline() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePatchBaselineCreate, @@ -40,10 +42,6 @@ func resourcePatchBaseline() *schema.Resource { UpdateWithoutTimeout: resourcePatchBaselineUpdate, DeleteWithoutTimeout: resourcePatchBaselineDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ "approval_rule": { Type: schema.TypeList, @@ -121,6 +119,12 @@ func resourcePatchBaseline() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "available_security_updates_compliance_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.PatchComplianceStatus](), + }, names.AttrDescription: { Type: schema.TypeString, Optional: true, @@ -266,6 +270,10 @@ func resourcePatchBaselineCreate(ctx context.Context, d *schema.ResourceData, me input.ApprovedPatchesEnableNonSecurity = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("available_security_updates_compliance_status"); ok { + input.AvailableSecurityUpdatesComplianceStatus = awstypes.PatchComplianceStatus(v.(string)) + } + if v, ok := d.GetOk(names.AttrDescription); ok { input.Description = aws.String(v.(string)) } @@ -333,6 +341,7 @@ func resourcePatchBaselineRead(ctx context.Context, d *schema.ResourceData, meta Resource: "patchbaseline/" + strings.TrimPrefix(d.Id(), "/"), }.String() d.Set(names.AttrARN, arn) + d.Set("available_security_updates_compliance_status", output.AvailableSecurityUpdatesComplianceStatus) d.Set(names.AttrDescription, output.Description) if err := d.Set("global_filter", flattenPatchFilterGroup(output.GlobalFilters)); err != nil { return sdkdiag.AppendErrorf(diags, "setting global_filter: %s", err) @@ -374,6 +383,10 @@ func resourcePatchBaselineUpdate(ctx context.Context, d *schema.ResourceData, me input.ApprovedPatchesEnableNonSecurity = aws.Bool(d.Get("approved_patches_enable_non_security").(bool)) } + if d.HasChange("available_security_updates_compliance_status") { + input.AvailableSecurityUpdatesComplianceStatus = awstypes.PatchComplianceStatus(d.Get("available_security_updates_compliance_status").(string)) + } + if d.HasChange(names.AttrDescription) { input.Description = aws.String(d.Get(names.AttrDescription).(string)) } diff --git a/internal/service/ssm/patch_baseline_data_source.go b/internal/service/ssm/patch_baseline_data_source.go index e90c624da3de..b185de9fa9fa 100644 --- a/internal/service/ssm/patch_baseline_data_source.go +++ b/internal/service/ssm/patch_baseline_data_source.go @@ -81,6 +81,10 @@ func dataSourcePatchBaseline() *schema.Resource { }, }, }, + "available_security_updates_compliance_status": { + Type: schema.TypeString, + Computed: true, + }, "default_baseline": { Type: schema.TypeBool, Optional: true, @@ -240,6 +244,7 @@ func dataPatchBaselineRead(ctx context.Context, d *schema.ResourceData, meta any if err := d.Set("approval_rule", flattenPatchRuleGroup(output.ApprovalRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting approval_rule: %s", err) } + d.Set("available_security_updates_compliance_status", output.AvailableSecurityUpdatesComplianceStatus) d.Set("default_baseline", baseline.DefaultBaseline) d.Set(names.AttrDescription, baseline.BaselineDescription) if err := d.Set("global_filter", flattenPatchFilterGroup(output.GlobalFilters)); err != nil { diff --git a/internal/service/ssm/patch_baseline_data_source_test.go b/internal/service/ssm/patch_baseline_data_source_test.go index 3798b3aa3a05..ed8fe4797511 100644 --- a/internal/service/ssm/patch_baseline_data_source_test.go +++ b/internal/service/ssm/patch_baseline_data_source_test.go @@ -75,6 +75,15 @@ func TestAccSSMPatchBaselineDataSource_newBaseline(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, names.AttrSource, resourceName, names.AttrSource), ), }, + { + Config: testAccPatchBaselineDataSourceConfig_availableSecurityUpdatesComplianceStatus(rName, "COMPLIANT"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "approval_rule", resourceName, "approval_rule"), + resource.TestCheckResourceAttrPair(dataSourceName, "available_security_updates_compliance_status", resourceName, "available_security_updates_compliance_status"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDescription, resourceName, names.AttrDescription), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrName, resourceName, names.AttrName), + ), + }, }, }) } @@ -114,3 +123,38 @@ data "aws_ssm_patch_baseline" "test" { } `, name) } + +func testAccPatchBaselineDataSourceConfig_availableSecurityUpdatesComplianceStatus(rName, complianceStatus string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "test" { + name = "patch-baseline-%[1]s" + operating_system = "WINDOWS" + description = "Baseline" + approved_patches_compliance_level = "CRITICAL" + available_security_updates_compliance_status = "%[2]s" + approval_rule { + approve_after_days = 7 + compliance_level = "CRITICAL" + patch_filter { + key = "PRODUCT" + values = ["WindowsServer2019", "WindowsServer2022", "MicrosoftDefenderAntivirus"] + } + patch_filter { + key = "CLASSIFICATION" + values = ["CriticalUpdates", "FeaturePacks", "SecurityUpdates", "Updates", "UpdateRollups"] + } + patch_filter { + key = "MSRC_SEVERITY" + values = ["*"] + } + } +} + +data "aws_ssm_patch_baseline" "test" { + owner = "Self" + name_prefix = aws_ssm_patch_baseline.test.name + operating_system = "WINDOWS" +} + +`, rName, complianceStatus) +} diff --git a/internal/service/ssm/patch_baseline_identity_gen_test.go b/internal/service/ssm/patch_baseline_identity_gen_test.go new file mode 100644 index 000000000000..1af811fe64c9 --- /dev/null +++ b/internal/service/ssm/patch_baseline_identity_gen_test.go @@ -0,0 +1,309 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package ssm_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ssm" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSMPatchBaseline_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v ssm.GetPatchBaselineOutput + resourceName := "aws_ssm_patch_baseline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPatchBaselineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSSMPatchBaseline_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ssm_patch_baseline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSSMPatchBaseline_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v ssm.GetPatchBaselineOutput + resourceName := "aws_ssm_patch_baseline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPatchBaselineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrID)), + }, + }, + }, + }) +} + +// Resource Identity was added after v6.10.0 +func TestAccSSMPatchBaseline_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v ssm.GetPatchBaselineOutput + resourceName := "aws_ssm_patch_baseline.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic_v6.10.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPatchBaselineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/PatchBaseline/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + }, + }) +} diff --git a/internal/service/ssm/patch_baseline_tags_gen_test.go b/internal/service/ssm/patch_baseline_tags_gen_test.go index 7f46c3f54131..ed800765bcf9 100644 --- a/internal/service/ssm/patch_baseline_tags_gen_test.go +++ b/internal/service/ssm/patch_baseline_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssm" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccSSMPatchBaseline_tags(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccSSMPatchBaseline_tags(t *testing.T) { func TestAccSSMPatchBaseline_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccSSMPatchBaseline_tags_null(t *testing.T) { func TestAccSSMPatchBaseline_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccSSMPatchBaseline_tags_EmptyMap(t *testing.T) { func TestAccSSMPatchBaseline_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccSSMPatchBaseline_tags_AddOnUpdate(t *testing.T) { func TestAccSSMPatchBaseline_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccSSMPatchBaseline_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccSSMPatchBaseline_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccSSMPatchBaseline_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccSSMPatchBaseline_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccSSMPatchBaseline_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccSSMPatchBaseline_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccSSMPatchBaseline_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccSSMPatchBaseline_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_overlapping(t *testing.T) { func TestAccSSMPatchBaseline_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccSSMPatchBaseline_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccSSMPatchBaseline_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccSSMPatchBaseline_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccSSMPatchBaseline_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccSSMPatchBaseline_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccSSMPatchBaseline_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccSSMPatchBaseline_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccSSMPatchBaseline_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccSSMPatchBaseline_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccSSMPatchBaseline_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccSSMPatchBaseline_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccSSMPatchBaseline_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccSSMPatchBaseline_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccSSMPatchBaseline_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccSSMPatchBaseline_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v ssm.GetPatchBaselineOutput resourceName := "aws_ssm_patch_baseline.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), diff --git a/internal/service/ssm/patch_baseline_test.go b/internal/service/ssm/patch_baseline_test.go index ed27ddee2de2..760454617d8b 100644 --- a/internal/service/ssm/patch_baseline_test.go +++ b/internal/service/ssm/patch_baseline_test.go @@ -405,6 +405,47 @@ func TestAccSSMPatchBaseline_rejectPatchesAction(t *testing.T) { }) } +func TestAccSSMPatchBaseline_availableSecurityUpdatesComplianceStatus(t *testing.T) { + ctx := acctest.Context(t) + var before, after ssm.GetPatchBaselineOutput + name := sdkacctest.RandString(10) + resourceName := "aws_ssm_patch_baseline.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPatchBaselineDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPatchBaselineConfig_availableSecurityUpdatesComplianceStatus(name, string(awstypes.PatchComplianceStatusCompliant)), + Check: resource.ComposeTestCheckFunc( + testAccCheckPatchBaselineExists(ctx, resourceName, &before), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ssm", regexache.MustCompile(`patchbaseline/pb-.+`)), + resource.TestCheckResourceAttr(resourceName, "available_security_updates_compliance_status", string(awstypes.PatchComplianceStatusCompliant)), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "Baseline"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, fmt.Sprintf("patch-baseline-%s", name)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPatchBaselineConfig_availableSecurityUpdatesComplianceStatus(name, string(awstypes.PatchComplianceStatusNonCompliant)), + Check: resource.ComposeTestCheckFunc( + testAccCheckPatchBaselineExists(ctx, resourceName, &after), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "ssm", regexache.MustCompile(`patchbaseline/pb-.+`)), + resource.TestCheckResourceAttr(resourceName, "available_security_updates_compliance_status", string(awstypes.PatchComplianceStatusNonCompliant)), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "Baseline"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, fmt.Sprintf("patch-baseline-%s", name)), + ), + }, + }, + }) +} + // testAccSSMPatchBaseline_deleteDefault needs to be serialized with the other // Default Patch Baseline acceptance tests because it sets the default patch baseline func testAccSSMPatchBaseline_deleteDefault(t *testing.T) { @@ -768,3 +809,31 @@ resource "aws_ssm_patch_baseline" "test" { } `, rName) } + +func testAccPatchBaselineConfig_availableSecurityUpdatesComplianceStatus(rName, complianceStatus string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "test" { + name = "patch-baseline-%[1]s" + operating_system = "WINDOWS" + description = "Baseline" + approved_patches_compliance_level = "CRITICAL" + available_security_updates_compliance_status = "%[2]s" + approval_rule { + approve_after_days = 7 + compliance_level = "CRITICAL" + patch_filter { + key = "PRODUCT" + values = ["WindowsServer2019", "WindowsServer2022", "MicrosoftDefenderAntivirus"] + } + patch_filter { + key = "CLASSIFICATION" + values = ["CriticalUpdates", "FeaturePacks", "SecurityUpdates", "Updates", "UpdateRollups"] + } + patch_filter { + key = "MSRC_SEVERITY" + values = ["*"] + } + } +} +`, rName, complianceStatus) +} diff --git a/internal/service/ssm/resource_data_sync.go b/internal/service/ssm/resource_data_sync.go index 9194445b7dbc..7fb14f57d7bf 100644 --- a/internal/service/ssm/resource_data_sync.go +++ b/internal/service/ssm/resource_data_sync.go @@ -96,7 +96,7 @@ func resourceResourceDataSyncCreate(ctx context.Context, d *schema.ResourceData, const ( timeout = 1 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ResourceDataSyncInvalidConfigurationException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ResourceDataSyncInvalidConfigurationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateResourceDataSync(ctx, input) }, "S3 write failed for bucket") diff --git a/internal/service/ssm/service_endpoint_resolver_gen.go b/internal/service/ssm/service_endpoint_resolver_gen.go index 3c998074a290..137dfb9b2542 100644 --- a/internal/service/ssm/service_endpoint_resolver_gen.go +++ b/internal/service/ssm/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ssm.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ssm endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ssm endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ssm/service_endpoints_gen_test.go b/internal/service/ssm/service_endpoints_gen_test.go index 81db15a0519c..65cdfb7411e7 100644 --- a/internal/service/ssm/service_endpoints_gen_test.go +++ b/internal/service/ssm/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ssm/service_package_gen.go b/internal/service/ssm/service_package_gen.go index 9e60e69c0d40..4c89804593fb 100644 --- a/internal/service/ssm/service_package_gen.go +++ b/internal/service/ssm/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ssm" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -102,7 +101,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, ResourceType: "Association", }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrAssociationID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceDefaultPatchBaseline, @@ -118,7 +121,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, ResourceType: "Document", }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrName), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceMaintenanceWindow, @@ -128,19 +135,39 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, ResourceType: "MaintenanceWindow", }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourceMaintenanceWindowTarget, TypeName: "aws_ssm_maintenance_window_target", Name: "Maintenance Window Target", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("window_id", true), + inttypes.StringIdentityAttribute(names.AttrID, true), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: maintenanceWindowTargetImportID{}, + }, }, { Factory: resourceMaintenanceWindowTask, TypeName: "aws_ssm_maintenance_window_task", Name: "Maintenance Window Task", Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + inttypes.StringIdentityAttribute("window_id", true), + inttypes.StringIdentityAttribute(names.AttrID, true), + }), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + ImportID: maintenanceWindowTaskImportID{}, + }, }, { Factory: resourceParameter, @@ -150,7 +177,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, ResourceType: "Parameter", }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrName), + Import: inttypes.SDKv2Import{ + CustomImport: true, + }, }, { Factory: resourcePatchBaseline, @@ -160,7 +191,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, ResourceType: "PatchBaseline", }), - Region: unique.Make(inttypes.ResourceRegionDefault()), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, { Factory: resourcePatchGroup, @@ -206,7 +241,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ssm.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ssm/service_setting.go b/internal/service/ssm/service_setting.go index 46db7c594364..470a374b10a7 100644 --- a/internal/service/ssm/service_setting.go +++ b/internal/service/ssm/service_setting.go @@ -8,16 +8,20 @@ import ( "log" "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ssm" awstypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -41,6 +45,10 @@ func resourceServiceSetting() *schema.Resource { "setting_id": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.Any( + verify.ValidARN, + validation.StringMatch(regexache.MustCompile(`^/ssm/`), "setting_id must begin with '/ssm/'"), + ), }, "setting_value": { Type: schema.TypeString, @@ -56,21 +64,27 @@ func resourceServiceSetting() *schema.Resource { func resourceServiceSettingUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSMClient(ctx) + c := meta.(*conns.AWSClient) + conn := c.SSMClient(ctx) settingID := d.Get("setting_id").(string) - input := &ssm.UpdateServiceSettingInput{ + input := ssm.UpdateServiceSettingInput{ SettingId: aws.String(settingID), SettingValue: aws.String(d.Get("setting_value").(string)), } - _, err := conn.UpdateServiceSetting(ctx, input) + _, err := conn.UpdateServiceSetting(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating SSM Service Setting (%s): %s", settingID, err) } - d.SetId(settingID) + // While settingID can be either a full ARN or an ID with "/ssm/" prefix, id is always ARN. + if arn.IsARN(settingID) { + d.SetId(settingID) + } else { + d.SetId(c.RegionalARN(ctx, "ssm", "servicesetting"+settingID)) + } if _, err := waitServiceSettingUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for SSM Service Setting (%s) update: %s", d.Id(), err) @@ -96,9 +110,15 @@ func resourceServiceSettingRead(ctx context.Context, d *schema.ResourceData, met } d.Set(names.AttrARN, output.ARN) - // AWS SSM service setting API requires the entire ARN as input, - // but setting_id in the output is only a part of ARN. - d.Set("setting_id", output.ARN) + // setting_id begins with "/ssm/" prefix, according to the AWS documentation + // https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetServiceSetting.html#API_GetServiceSetting_RequestSyntax + // However, the full ARN format can be accepted by the AWS API as well and the first implementation of this resource assumed the full ARN format for setting_id. + // For backwards compatibility, support both formats. + if arn.IsARN(d.Get("setting_id").(string)) { + d.Set("setting_id", output.ARN) + } else { + d.Set("setting_id", output.SettingId) + } d.Set("setting_value", output.SettingValue) d.Set(names.AttrStatus, output.Status) @@ -110,9 +130,10 @@ func resourceServiceSettingDelete(ctx context.Context, d *schema.ResourceData, m conn := meta.(*conns.AWSClient).SSMClient(ctx) log.Printf("[DEBUG] Deleting SSM Service Setting: %s", d.Id()) - _, err := conn.ResetServiceSetting(ctx, &ssm.ResetServiceSettingInput{ + input := ssm.ResetServiceSettingInput{ SettingId: aws.String(d.Id()), - }) + } + _, err := conn.ResetServiceSetting(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting SSM Service Setting (%s): %s", d.Id(), err) @@ -126,11 +147,11 @@ func resourceServiceSettingDelete(ctx context.Context, d *schema.ResourceData, m } func findServiceSettingByID(ctx context.Context, conn *ssm.Client, id string) (*awstypes.ServiceSetting, error) { - input := &ssm.GetServiceSettingInput{ + input := ssm.GetServiceSettingInput{ SettingId: aws.String(id), } - output, err := conn.GetServiceSetting(ctx, input) + output, err := conn.GetServiceSetting(ctx, &input) if errs.IsA[*awstypes.ServiceSettingNotFound](err) { return nil, &retry.NotFoundError{ diff --git a/internal/service/ssm/service_setting_test.go b/internal/service/ssm/service_setting_test.go index 59abcde855c9..bdb6cde2562b 100644 --- a/internal/service/ssm/service_setting_test.go +++ b/internal/service/ssm/service_setting_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,12 +20,24 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccSSMServiceSetting_basic(t *testing.T) { +func TestAccSSMServiceSetting_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccServiceSetting_basic, + "upgradeFromV6_5_0": testAccServiceSetting_upgradeFromV6_5_0, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccServiceSetting_basic(t *testing.T) { ctx := acctest.Context(t) var setting awstypes.ServiceSetting resourceName := "aws_ssm_service_setting.test" + settingID := "/ssm/parameter-store/high-throughput-enabled" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -34,7 +47,9 @@ func TestAccSSMServiceSetting_basic(t *testing.T) { Config: testAccServiceSettingConfig_basic(acctest.CtFalse), Check: resource.ComposeTestCheckFunc( testAccServiceSettingExists(ctx, resourceName, &setting), + resource.TestCheckResourceAttr(resourceName, "setting_id", settingID), resource.TestCheckResourceAttr(resourceName, "setting_value", acctest.CtFalse), + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, resourceName, names.AttrARN), ), }, { @@ -46,8 +61,83 @@ func TestAccSSMServiceSetting_basic(t *testing.T) { Config: testAccServiceSettingConfig_basic(acctest.CtTrue), Check: resource.ComposeTestCheckFunc( testAccServiceSettingExists(ctx, resourceName, &setting), + resource.TestCheckResourceAttr(resourceName, "setting_id", settingID), + resource.TestCheckResourceAttr(resourceName, "setting_value", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, resourceName, names.AttrARN), + ), + }, + { + Config: testAccServiceSettingConfig_settingIDByARN(acctest.CtFalse), + Check: resource.ComposeTestCheckFunc( + testAccServiceSettingExists(ctx, resourceName, &setting), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, "setting_id", "ssm", "servicesetting"+settingID), + resource.TestCheckResourceAttr(resourceName, "setting_value", acctest.CtFalse), + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, resourceName, names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateCheck: acctest.ImportCheckResourceAttr("setting_id", settingID), + ImportStateVerifyIgnore: []string{ + "setting_id", + }, + }, + { + Config: testAccServiceSettingConfig_settingIDByARN(acctest.CtTrue), + Check: resource.ComposeTestCheckFunc( + testAccServiceSettingExists(ctx, resourceName, &setting), + acctest.CheckResourceAttrRegionalARN(ctx, resourceName, "setting_id", "ssm", "servicesetting"+settingID), resource.TestCheckResourceAttr(resourceName, "setting_value", acctest.CtTrue), + resource.TestCheckResourceAttrPair(resourceName, names.AttrID, resourceName, names.AttrARN), + ), + }, + }, + }) +} + +func testAccServiceSetting_upgradeFromV6_5_0(t *testing.T) { + ctx := acctest.Context(t) + var setting awstypes.ServiceSetting + resourceName := "aws_ssm_service_setting.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckServiceSettingDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "6.5.0", + }, + }, + Config: testAccServiceSettingConfig_settingIDByARN(acctest.CtFalse), + Check: resource.ComposeTestCheckFunc( + testAccServiceSettingExists(ctx, resourceName, &setting), ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccServiceSettingConfig_settingIDByARN(acctest.CtFalse), + Check: resource.ComposeTestCheckFunc( + testAccServiceSettingExists(ctx, resourceName, &setting), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, }, }, }) @@ -106,6 +196,15 @@ func testAccServiceSettingExists(ctx context.Context, n string, v *awstypes.Serv func testAccServiceSettingConfig_basic(settingValue string) string { return fmt.Sprintf(` +resource "aws_ssm_service_setting" "test" { + setting_id = "/ssm/parameter-store/high-throughput-enabled" + setting_value = %[1]q +} +`, settingValue) +} + +func testAccServiceSettingConfig_settingIDByARN(settingValue string) string { + return fmt.Sprintf(` data "aws_partition" "current" {} data "aws_region" "current" {} data "aws_caller_identity" "current" {} diff --git a/internal/service/ssm/sweep.go b/internal/service/ssm/sweep.go index fc03c4702ebf..28450d725e7d 100644 --- a/internal/service/ssm/sweep.go +++ b/internal/service/ssm/sweep.go @@ -117,7 +117,7 @@ func sweepMaintenanceWindows(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SSMClient(ctx) input := &ssm.DescribeMaintenanceWindowsInput{} diff --git a/internal/service/ssm/tags_gen.go b/internal/service/ssm/tags_gen.go index f8370e5d49f8..21e0c0997101 100644 --- a/internal/service/ssm/tags_gen.go +++ b/internal/service/ssm/tags_gen.go @@ -3,8 +3,8 @@ package ssm import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ssm" awstypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" @@ -94,7 +94,7 @@ func updateTags(ctx context.Context, conn *ssm.Client, identifier, resourceType _, err := conn.RemoveTagsFromResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -110,7 +110,7 @@ func updateTags(ctx context.Context, conn *ssm.Client, identifier, resourceType _, err := conn.AddTagsToResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ssm/testdata/Association/basic/main_gen.tf b/internal/service/ssm/testdata/Association/basic/main_gen.tf new file mode 100644 index 000000000000..76597c7b91c3 --- /dev/null +++ b/internal/service/ssm/testdata/Association/basic/main_gen.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssm_association" "test" { + name = aws_ssm_document.test.name + schedule_expression = "cron(0 16 ? * WED *)" + + targets { + key = "tag:Name" + values = ["acceptanceTest"] + } +} + +resource "aws_ssm_document" "test" { + name = var.rName + document_type = "Command" + + content = < 0 { + return tags + } + } + + return nil +} + +// setTagsOutSlice sets ssmquicksetup service tags in Context. +func setTagsOutSlice(ctx context.Context, tags []awstypes.TagEntry) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTagsSlice(ctx, tags)) + } +} diff --git a/internal/service/ssmsap/service_endpoint_resolver_gen.go b/internal/service/ssmsap/service_endpoint_resolver_gen.go index bb2cb6c76f46..10e408a653c3 100644 --- a/internal/service/ssmsap/service_endpoint_resolver_gen.go +++ b/internal/service/ssmsap/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ssmsap.EndpointP }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ssmsap endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ssmsap endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ssmsap/service_endpoints_gen_test.go b/internal/service/ssmsap/service_endpoints_gen_test.go index d810d7b96549..6f7ace60fed2 100644 --- a/internal/service/ssmsap/service_endpoints_gen_test.go +++ b/internal/service/ssmsap/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ssmsap/service_package_gen.go b/internal/service/ssmsap/service_package_gen.go index 329e2f5fa3c9..7915f024cc9c 100644 --- a/internal/service/ssmsap/service_package_gen.go +++ b/internal/service/ssmsap/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ssmsap" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ssmsap.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/sso/service_endpoint_resolver_gen.go b/internal/service/sso/service_endpoint_resolver_gen.go index 95ed5d597c30..554798a11407 100644 --- a/internal/service/sso/service_endpoint_resolver_gen.go +++ b/internal/service/sso/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sso.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sso endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sso endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sso/service_endpoints_gen_test.go b/internal/service/sso/service_endpoints_gen_test.go index 3384ce400090..5a8842f2eee8 100644 --- a/internal/service/sso/service_endpoints_gen_test.go +++ b/internal/service/sso/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sso/service_package_gen.go b/internal/service/sso/service_package_gen.go index 57892476c81e..c93ace081806 100644 --- a/internal/service/sso/service_package_gen.go +++ b/internal/service/sso/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sso" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sso.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ssoadmin/application.go b/internal/service/ssoadmin/application.go index f07ea6b4391b..bab9d359adfa 100644 --- a/internal/service/ssoadmin/application.go +++ b/internal/service/ssoadmin/application.go @@ -37,6 +37,7 @@ import ( // @IdentityFix // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ssoadmin;ssoadmin.DescribeApplicationOutput") // @Testing(preCheckWithRegion="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckSSOAdminInstancesWithRegion") +// @Testing(v60NullValuesError=true) func newApplicationResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &applicationResource{}, nil } diff --git a/internal/service/ssoadmin/application_assignment_configuration.go b/internal/service/ssoadmin/application_assignment_configuration.go index 2d95c9481320..8aa27e56ca82 100644 --- a/internal/service/ssoadmin/application_assignment_configuration.go +++ b/internal/service/ssoadmin/application_assignment_configuration.go @@ -28,6 +28,7 @@ import ( // @ArnIdentity("application_arn", identityDuplicateAttributes="id") // @ArnFormat(global=true) // @Testing(preCheckWithRegion="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckSSOAdminInstancesWithRegion") +// @Testing(v60RefreshError=true) func newApplicationAssignmentConfigurationResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &applicationAssignmentConfigurationResource{}, nil } diff --git a/internal/service/ssoadmin/application_assignment_configuration_identity_gen_test.go b/internal/service/ssoadmin/application_assignment_configuration_identity_gen_test.go index 9f3f59e38f42..da6149411244 100644 --- a/internal/service/ssoadmin/application_assignment_configuration_identity_gen_test.go +++ b/internal/service/ssoadmin/application_assignment_configuration_identity_gen_test.go @@ -15,15 +15,17 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_Basic(t *testing.T) { ctx := acctest.Context(t) + resourceName := "aws_ssoadmin_application_assignment_configuration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -112,7 +114,7 @@ func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_RegionOverride(t resourceName := "aws_ssoadmin_application_assignment_configuration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -229,3 +231,129 @@ func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_RegionOverride(t }, }) } + +func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_ExistingResource_fromV5(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ssoadmin_application_assignment_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckSSOAdminInstancesWithRegion(ctx, t, acctest.Region()) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), + CheckDestroy: testAccCheckApplicationAssignmentConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/ApplicationAssignmentConfiguration/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationAssignmentConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ApplicationAssignmentConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationAssignmentConfigurationExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "application_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("application_arn")), + }, + }, + }, + }) +} + +func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_ExistingResource_fromV6(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_ssoadmin_application_assignment_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckSSOAdminInstancesWithRegion(ctx, t, acctest.Region()) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), + CheckDestroy: testAccCheckApplicationAssignmentConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create in v6.0 + { + ConfigDirectory: config.StaticDirectory("testdata/ApplicationAssignmentConfiguration/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationAssignmentConfigurationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "application_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("application_arn")), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/ApplicationAssignmentConfiguration/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationAssignmentConfigurationExists(ctx, resourceName), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + "application_arn": knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("application_arn")), + }, + }, + }, + }) +} diff --git a/internal/service/ssoadmin/application_assignment_configuration_test.go b/internal/service/ssoadmin/application_assignment_configuration_test.go index 9c7f15d95c38..135c6fbed5ad 100644 --- a/internal/service/ssoadmin/application_assignment_configuration_test.go +++ b/internal/service/ssoadmin/application_assignment_configuration_test.go @@ -12,13 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -132,94 +127,6 @@ func TestAccSSOAdminApplicationAssignmentConfiguration_update(t *testing.T) { }) } -func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_ExistingResource_fromV5(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ssoadmin_application_assignment_configuration.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), - CheckDestroy: testAccCheckApplicationAssignmentConfigurationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccApplicationAssignmentConfigurationConfig_basic_v5(rName, true), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccApplicationAssignmentConfigurationConfig_basic(rName, true), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("application_arn")), - }, - }, - }, - }) -} - -func TestAccSSOAdminApplicationAssignmentConfiguration_Identity_ExistingResource_fromV6(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ssoadmin_application_assignment_configuration.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), - CheckDestroy: testAccCheckApplicationAssignmentConfigurationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccApplicationAssignmentConfigurationConfig_basic(rName, true), - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("application_arn")), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccApplicationAssignmentConfigurationConfig_basic(rName, true), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New("application_arn")), - }, - }, - }, - }) -} - func testAccCheckApplicationAssignmentConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) @@ -282,20 +189,3 @@ resource "aws_ssoadmin_application_assignment_configuration" "test" { } `, rName, testAccApplicationProviderARN, assignmentRequired) } - -func testAccApplicationAssignmentConfigurationConfig_basic_v5(rName string, assignmentRequired bool) string { - return fmt.Sprintf(` -data "aws_ssoadmin_instances" "test" {} - -resource "aws_ssoadmin_application" "test" { - name = %[1]q - application_provider_arn = %[2]q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] -} - -resource "aws_ssoadmin_application_assignment_configuration" "test" { - application_arn = aws_ssoadmin_application.test.application_arn - assignment_required = %[3]t -} -`, rName, testAccApplicationProviderARN, assignmentRequired) -} diff --git a/internal/service/ssoadmin/application_identity_gen_test.go b/internal/service/ssoadmin/application_identity_gen_test.go index 170a4da0b78c..f33b86f5212f 100644 --- a/internal/service/ssoadmin/application_identity_gen_test.go +++ b/internal/service/ssoadmin/application_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccSSOAdminApplication_Identity_Basic(t *testing.T) { resourceName := "aws_ssoadmin_application.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -118,7 +119,7 @@ func TestAccSSOAdminApplication_Identity_RegionOverride(t *testing.T) { resourceName := "aws_ssoadmin_application.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -239,3 +240,139 @@ func TestAccSSOAdminApplication_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccSSOAdminApplication_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v ssoadmin.DescribeApplicationOutput + resourceName := "aws_ssoadmin_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckSSOAdminInstancesWithRegion(ctx, t, acctest.Region()) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Application/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Application/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccSSOAdminApplication_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v ssoadmin.DescribeApplicationOutput + resourceName := "aws_ssoadmin_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckSSOAdminInstancesWithRegion(ctx, t, acctest.Region()) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Application/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/ssoadmin/application_test.go b/internal/service/ssoadmin/application_test.go index a9c08be1677e..020baa56dd80 100644 --- a/internal/service/ssoadmin/application_test.go +++ b/internal/service/ssoadmin/application_test.go @@ -14,15 +14,11 @@ import ( "github.com/hashicorp/terraform-plugin-testing/compare" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfssoadmin "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -295,113 +291,6 @@ func TestAccSSOAdminApplication_tags(t *testing.T) { }) } -func TestAccSSOAdminApplication_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v ssoadmin.DescribeApplicationOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ssoadmin_application.test" - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), - CheckDestroy: testAccCheckApplicationDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccApplicationConfig_basic(rName, testAccApplicationProviderARN), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccApplicationConfig_basic(rName, testAccApplicationProviderARN), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.2.0", - }, - }, - Config: testAccApplicationConfig_basic(rName, testAccApplicationProviderARN), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccApplicationConfig_basic(rName, testAccApplicationProviderARN), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - names.AttrARN: knownvalue.NotNull(), - }), - statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), - }, - }, - }, - }) -} - func testAccCheckApplicationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) diff --git a/internal/service/ssoadmin/service_endpoint_resolver_gen.go b/internal/service/ssoadmin/service_endpoint_resolver_gen.go index b05be0b05206..21af504a6558 100644 --- a/internal/service/ssoadmin/service_endpoint_resolver_gen.go +++ b/internal/service/ssoadmin/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params ssoadmin.Endpoin }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up ssoadmin endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up ssoadmin endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/ssoadmin/service_endpoints_gen_test.go b/internal/service/ssoadmin/service_endpoints_gen_test.go index 3bf09b559832..30aaea57a856 100644 --- a/internal/service/ssoadmin/service_endpoints_gen_test.go +++ b/internal/service/ssoadmin/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/ssoadmin/service_package.go b/internal/service/ssoadmin/service_package.go index 573b993a1c0e..ae8f467eb22b 100644 --- a/internal/service/ssoadmin/service_package.go +++ b/internal/service/ssoadmin/service_package.go @@ -10,21 +10,32 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" ) -func (p *servicePackage) withExtraOptions(_ context.Context, config map[string]any) []func(*ssoadmin.Options) { +func (p *servicePackage) withExtraOptions(ctx context.Context, config map[string]any) []func(*ssoadmin.Options) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) return []func(*ssoadmin.Options){ func(o *ssoadmin.Options) { - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsA[*types.ConflictException](err) || errs.IsA[*types.ThrottlingException](err) { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) + retryables := []retry.IsErrorRetryable{ + retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsA[*types.ConflictException](err) || errs.IsA[*types.ThrottlingException](err) { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + }), + } + // Include go-vcr retryable to prevent generated client retryer from being overridden + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + retryables = append(retryables, vcr.InteractionNotFoundRetryableFunc) + } + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retryables...) }, } } diff --git a/internal/service/ssoadmin/service_package_gen.go b/internal/service/ssoadmin/service_package_gen.go index 110661e68e38..a4d6c45d2510 100644 --- a/internal/service/ssoadmin/service_package_gen.go +++ b/internal/service/ssoadmin/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -192,7 +191,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *ssoadmin.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/ssoadmin/sweep.go b/internal/service/ssoadmin/sweep.go index 444eb5b963c4..078297fe47e5 100644 --- a/internal/service/ssoadmin/sweep.go +++ b/internal/service/ssoadmin/sweep.go @@ -42,7 +42,7 @@ func sweepAccountAssignments(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SSOAdminClient(ctx) @@ -134,7 +134,7 @@ func sweepApplications(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SSOAdminClient(ctx) var sweepResources []sweep.Sweepable @@ -196,7 +196,7 @@ func sweepPermissionSets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SSOAdminClient(ctx) var sweepResources []sweep.Sweepable diff --git a/internal/service/ssoadmin/tags_gen.go b/internal/service/ssoadmin/tags_gen.go index 51bdb5114dad..52bbc6b449ae 100644 --- a/internal/service/ssoadmin/tags_gen.go +++ b/internal/service/ssoadmin/tags_gen.go @@ -3,8 +3,8 @@ package ssoadmin import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" awstypes "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" @@ -32,7 +32,7 @@ func listTags(ctx context.Context, conn *ssoadmin.Client, identifier, resourceTy page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -47,7 +47,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res tags, err := listTags(ctx, meta.(*conns.AWSClient).SSOAdminClient(ctx), identifier, resourceType) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -126,7 +126,7 @@ func updateTags(ctx context.Context, conn *ssoadmin.Client, identifier, resource _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -142,7 +142,7 @@ func updateTags(ctx context.Context, conn *ssoadmin.Client, identifier, resource _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/ssoadmin/testdata/Application/basic_v5.100.0/main_gen.tf b/internal/service/ssoadmin/testdata/Application/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..cccb454e7bc8 --- /dev/null +++ b/internal/service/ssoadmin/testdata/Application/basic_v5.100.0/main_gen.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssoadmin_application" "test" { + name = var.rName + application_provider_arn = local.test_application_provider_arn + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +data "aws_ssoadmin_instances" "test" {} + +locals { + test_application_provider_arn = "arn:aws:sso::aws:applicationProvider/custom" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ssoadmin/testdata/Application/basic_v6.0.0/main_gen.tf b/internal/service/ssoadmin/testdata/Application/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..f5276b04f6be --- /dev/null +++ b/internal/service/ssoadmin/testdata/Application/basic_v6.0.0/main_gen.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssoadmin_application" "test" { + name = var.rName + application_provider_arn = local.test_application_provider_arn + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +data "aws_ssoadmin_instances" "test" {} + +locals { + test_application_provider_arn = "arn:aws:sso::aws:applicationProvider/custom" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ssoadmin/testdata/ApplicationAssignmentConfiguration/basic_v5.100.0/main_gen.tf b/internal/service/ssoadmin/testdata/ApplicationAssignmentConfiguration/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6756f1834853 --- /dev/null +++ b/internal/service/ssoadmin/testdata/ApplicationAssignmentConfiguration/basic_v5.100.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssoadmin_application_assignment_configuration" "test" { + application_arn = aws_ssoadmin_application.test.application_arn + assignment_required = true +} + +resource "aws_ssoadmin_application" "test" { + name = var.rName + application_provider_arn = local.test_application_provider_arn + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +data "aws_ssoadmin_instances" "test" {} + +locals { + test_application_provider_arn = "arn:aws:sso::aws:applicationProvider/custom" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ssoadmin/testdata/ApplicationAssignmentConfiguration/basic_v6.0.0/main_gen.tf b/internal/service/ssoadmin/testdata/ApplicationAssignmentConfiguration/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..2ef5462bae5a --- /dev/null +++ b/internal/service/ssoadmin/testdata/ApplicationAssignmentConfiguration/basic_v6.0.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssoadmin_application_assignment_configuration" "test" { + application_arn = aws_ssoadmin_application.test.arn + assignment_required = true +} + +resource "aws_ssoadmin_application" "test" { + name = var.rName + application_provider_arn = local.test_application_provider_arn + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +data "aws_ssoadmin_instances" "test" {} + +locals { + test_application_provider_arn = "arn:aws:sso::aws:applicationProvider/custom" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ssoadmin/testdata/TrustedTokenIssuer/basic_v5.100.0/main_gen.tf b/internal/service/ssoadmin/testdata/TrustedTokenIssuer/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..6cc6a0c7a43a --- /dev/null +++ b/internal/service/ssoadmin/testdata/TrustedTokenIssuer/basic_v5.100.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssoadmin_trusted_token_issuer" "test" { + name = var.rName + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = "email" + identity_store_attribute_path = "emails.value" + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } +} + +data "aws_ssoadmin_instances" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ssoadmin/testdata/TrustedTokenIssuer/basic_v6.0.0/main_gen.tf b/internal/service/ssoadmin/testdata/TrustedTokenIssuer/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..8b9b780bddc3 --- /dev/null +++ b/internal/service/ssoadmin/testdata/TrustedTokenIssuer/basic_v6.0.0/main_gen.tf @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssoadmin_trusted_token_issuer" "test" { + name = var.rName + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = "email" + identity_store_attribute_path = "emails.value" + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } +} + +data "aws_ssoadmin_instances" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/ssoadmin/testdata/tmpl/application_assignment_configuration_basic_v5.100.0.gtpl b/internal/service/ssoadmin/testdata/tmpl/application_assignment_configuration_basic_v5.100.0.gtpl new file mode 100644 index 000000000000..71b3755725c0 --- /dev/null +++ b/internal/service/ssoadmin/testdata/tmpl/application_assignment_configuration_basic_v5.100.0.gtpl @@ -0,0 +1,20 @@ +resource "aws_ssoadmin_application_assignment_configuration" "test" { +{{- template "region" }} + application_arn = aws_ssoadmin_application.test.application_arn + assignment_required = true +} + +resource "aws_ssoadmin_application" "test" { +{{- template "region" }} + name = var.rName + application_provider_arn = local.test_application_provider_arn + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +data "aws_ssoadmin_instances" "test" { +{{- template "region" -}} +} + +locals { + test_application_provider_arn = "arn:aws:sso::aws:applicationProvider/custom" +} diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index cd0a202b9e33..090c054948ca 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -39,6 +39,7 @@ import ( // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/ssoadmin;ssoadmin.DescribeTrustedTokenIssuerOutput") // @Testing(preCheckWithRegion="github.com/hashicorp/terraform-provider-aws/internal/acctest;acctest.PreCheckSSOAdminInstancesWithRegion") // @Testing(serialize=true) +// @Testing(preIdentityVersion="v5.100.0") func newTrustedTokenIssuerResource(_ context.Context) (resource.ResourceWithConfigure, error) { return &trustedTokenIssuerResource{}, nil } diff --git a/internal/service/ssoadmin/trusted_token_issuer_identity_gen_test.go b/internal/service/ssoadmin/trusted_token_issuer_identity_gen_test.go index 39b2f9677dd9..3259e6c225c9 100644 --- a/internal/service/ssoadmin/trusted_token_issuer_identity_gen_test.go +++ b/internal/service/ssoadmin/trusted_token_issuer_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccSSOAdminTrustedTokenIssuer_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccSSOAdminTrustedTokenIssuer_Identity_Basic, - "ExistingResource": testAccSSOAdminTrustedTokenIssuer_Identity_ExistingResource, - "RegionOverride": testAccSSOAdminTrustedTokenIssuer_Identity_RegionOverride, + acctest.CtBasic: testAccSSOAdminTrustedTokenIssuer_Identity_Basic, + "ExistingResource": testAccSSOAdminTrustedTokenIssuer_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccSSOAdminTrustedTokenIssuer_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccSSOAdminTrustedTokenIssuer_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -38,7 +40,7 @@ func testAccSSOAdminTrustedTokenIssuer_Identity_Basic(t *testing.T) { resourceName := "aws_ssoadmin_trusted_token_issuer.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,7 +129,7 @@ func testAccSSOAdminTrustedTokenIssuer_Identity_RegionOverride(t *testing.T) { resourceName := "aws_ssoadmin_trusted_token_issuer.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -244,3 +246,137 @@ func testAccSSOAdminTrustedTokenIssuer_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccSSOAdminTrustedTokenIssuer_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v ssoadmin.DescribeTrustedTokenIssuerOutput + resourceName := "aws_ssoadmin_trusted_token_issuer.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckSSOAdminInstancesWithRegion(ctx, t, acctest.Region()) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), + CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TrustedTokenIssuer/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity set on refresh + { + ConfigDirectory: config.StaticDirectory("testdata/TrustedTokenIssuer/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustedTokenIssuer/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func testAccSSOAdminTrustedTokenIssuer_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v ssoadmin.DescribeTrustedTokenIssuerOutput + resourceName := "aws_ssoadmin_trusted_token_issuer.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckSSOAdminInstancesWithRegion(ctx, t, acctest.Region()) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), + CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/TrustedTokenIssuer/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustedTokenIssuer/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + }, + }, + }) +} diff --git a/internal/service/ssoadmin/trusted_token_issuer_test.go b/internal/service/ssoadmin/trusted_token_issuer_test.go index 8aaa46803f9e..708989ca8138 100644 --- a/internal/service/ssoadmin/trusted_token_issuer_test.go +++ b/internal/service/ssoadmin/trusted_token_issuer_test.go @@ -8,19 +8,12 @@ import ( "fmt" "testing" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfssoadmin "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -184,86 +177,6 @@ func testAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { }) } -func testAccSSOAdminTrustedTokenIssuer_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var application ssoadmin.DescribeTrustedTokenIssuerOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_ssoadmin_trusted_token_issuer.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminServiceID), - CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccTrustedTokenIssuerConfigBase_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccTrustedTokenIssuerConfigBase_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.GlobalARNRegexp("sso", regexache.MustCompile(`trustedTokenIssuer/.+`)), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccTrustedTokenIssuerConfigBase_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.GlobalARNRegexp("sso", regexache.MustCompile(`trustedTokenIssuer/.+`)), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccCheckTrustedTokenIssuerDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) diff --git a/internal/service/storagegateway/cache_test.go b/internal/service/storagegateway/cache_test.go index a53ceed120c4..0474a448e1cd 100644 --- a/internal/service/storagegateway/cache_test.go +++ b/internal/service/storagegateway/cache_test.go @@ -153,7 +153,7 @@ func testAccCheckCacheExists(ctx context.Context, n string) resource.TestCheckFu } func testAccCacheConfig_fileGateway(rName string) string { - return acctest.ConfigCompose(testAccGatewayConfig_typeFileS3(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_typeFileS3(rName, rName), fmt.Sprintf(` resource "aws_ebs_volume" "test" { availability_zone = aws_instance.test.availability_zone size = "10" diff --git a/internal/service/storagegateway/cached_iscsi_volume.go b/internal/service/storagegateway/cached_iscsi_volume.go index 2c906b599539..86575edbc0b3 100644 --- a/internal/service/storagegateway/cached_iscsi_volume.go +++ b/internal/service/storagegateway/cached_iscsi_volume.go @@ -220,7 +220,7 @@ func resourceCachediSCSIVolumeDelete(ctx context.Context, d *schema.ResourceData const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidGatewayRequestException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidGatewayRequestException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteVolume(ctx, &storagegateway.DeleteVolumeInput{ VolumeARN: aws.String(d.Id()), }) diff --git a/internal/service/storagegateway/errors.go b/internal/service/storagegateway/errors.go index 99f064a8cf40..0a911feefdc7 100644 --- a/internal/service/storagegateway/errors.go +++ b/internal/service/storagegateway/errors.go @@ -14,7 +14,6 @@ import ( const ( operationErrCodeFileShareNotFound awstypes.ErrorCode = "FileShareNotFound" operationErrCodeFileSystemAssociationNotFound awstypes.ErrorCode = "FileSystemAssociationNotFound" - operationErrCodeGatewayNotFound awstypes.ErrorCode = "GatewayNotFound" ) // operationErrorCode returns the operation error code from the specified error: @@ -34,9 +33,26 @@ func operationErrorCode(err error) awstypes.ErrorCode { return "" } +// The API returns multiple responses for a disconnected gateway. +func isGatewayNotConnectedErr(err error) bool { + if operationErrorCode(err) == awstypes.ErrorCodeGatewayNotConnected { + return true + } + + if tfawserr.ErrCodeEquals(err, string(awstypes.ErrorCodeGatewayNotConnected)) { + return true + } + + if errs.IsAErrorMessageContains[*awstypes.InvalidGatewayRequestException](err, "The specified gateway is not connected") { + return true + } + + return false +} + // The API returns multiple responses for a missing gateway. func isGatewayNotFoundErr(err error) bool { - if operationErrorCode(err) == operationErrCodeGatewayNotFound { + if operationErrorCode(err) == awstypes.ErrorCodeGatewayNotFound { return true } diff --git a/internal/service/storagegateway/exports_test.go b/internal/service/storagegateway/exports_test.go index 83ae1d93f243..dea67e85b820 100644 --- a/internal/service/storagegateway/exports_test.go +++ b/internal/service/storagegateway/exports_test.go @@ -19,6 +19,7 @@ var ( FindCachediSCSIVolumeByARN = findCachediSCSIVolumeByARN FindFileSystemAssociationByARN = findFileSystemAssociationByARN FindGatewayByARN = findGatewayByARN + FindGatewayInfoByARN = findGatewayInfoByARN FindNFSFileShareByARN = findNFSFileShareByARN FindSMBFileShareByARN = findSMBFileShareByARN FindStorediSCSIVolumeByARN = findStorediSCSIVolumeByARN diff --git a/internal/service/storagegateway/gateway.go b/internal/service/storagegateway/gateway.go index d5de019b38a7..dd408b91fc9a 100644 --- a/internal/service/storagegateway/gateway.go +++ b/internal/service/storagegateway/gateway.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -304,32 +305,26 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any } var response *http.Response - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { + err = tfresource.Retry(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) *tfresource.RetryError { response, err = client.Do(request) if err != nil { + errReturn := fmt.Errorf("making HTTP request: %w", err) if errs.IsA[net.Error](err) { - errMessage := fmt.Errorf("making HTTP request: %w", err) - log.Printf("[DEBUG] retryable %s", errMessage) - return retry.RetryableError(errMessage) + return tfresource.RetryableError(errReturn) } - return retry.NonRetryableError(fmt.Errorf("making HTTP request: %w", err)) + return tfresource.NonRetryableError(errReturn) } - if slices.Contains([]int{504}, response.StatusCode) { - errMessage := fmt.Errorf("status code in HTTP response: %d", response.StatusCode) - log.Printf("[DEBUG] retryable %s", errMessage) - return retry.RetryableError(errMessage) + if slices.Contains([]int{http.StatusGatewayTimeout}, response.StatusCode) { + errReturn := fmt.Errorf("status code in HTTP response: %d", response.StatusCode) + return tfresource.RetryableError(errReturn) } return nil }) - if tfresource.TimedOut(err) { - response, err = client.Do(request) - } - if err != nil { return sdkdiag.AppendErrorf(diags, "retrieving activation key from IP Address (%s): %s", gatewayIPAddress, err) } @@ -351,7 +346,7 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any } name := d.Get("gateway_name").(string) - input := &storagegateway.ActivateGatewayInput{ + input := storagegateway.ActivateGatewayInput{ ActivationKey: aws.String(activationKey), GatewayRegion: aws.String(region), GatewayName: aws.String(name), @@ -368,7 +363,7 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any input.TapeDriveType = aws.String(v.(string)) } - output, err := conn.ActivateGateway(ctx, input) + output, err := conn.ActivateGateway(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "activating Storage Gateway Gateway (%s): %s", name, err) @@ -376,17 +371,17 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any d.SetId(aws.ToString(output.GatewayARN)) - if _, err = waitGatewayConnected(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitGatewayConnected(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Storage Gateway Gateway (%s) connect: %s", d.Id(), err) } if v, ok := d.GetOk(names.AttrCloudWatchLogGroupARN); ok && v.(string) != "" { - input := &storagegateway.UpdateGatewayInformationInput{ + input := storagegateway.UpdateGatewayInformationInput{ CloudWatchLogGroupARN: aws.String(v.(string)), GatewayARN: aws.String(d.Id()), } - _, err := conn.UpdateGatewayInformation(ctx, input) + _, err := conn.UpdateGatewayInformation(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s) CloudWatch log group: %s", d.Id(), err) @@ -419,12 +414,12 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any } if v, ok := d.GetOk("smb_guest_password"); ok && v.(string) != "" { - input := &storagegateway.SetSMBGuestPasswordInput{ + input := storagegateway.SetSMBGuestPasswordInput{ GatewayARN: aws.String(d.Id()), Password: aws.String(v.(string)), } - _, err := conn.SetSMBGuestPassword(ctx, input) + _, err := conn.SetSMBGuestPassword(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting Storage Gateway Gateway (%s) SMB guest password: %s", d.Id(), err) @@ -432,12 +427,12 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any } if v, ok := d.GetOk("smb_security_strategy"); ok { - input := &storagegateway.UpdateSMBSecurityStrategyInput{ + input := storagegateway.UpdateSMBSecurityStrategyInput{ GatewayARN: aws.String(d.Id()), SMBSecurityStrategy: awstypes.SMBSecurityStrategy(v.(string)), } - _, err := conn.UpdateSMBSecurityStrategy(ctx, input) + _, err := conn.UpdateSMBSecurityStrategy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting Storage Gateway Gateway (%s) SMB security strategy: %s", d.Id(), err) @@ -445,12 +440,12 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any } if v, ok := d.GetOk("smb_file_share_visibility"); ok { - input := &storagegateway.UpdateSMBFileShareVisibilityInput{ + input := storagegateway.UpdateSMBFileShareVisibilityInput{ FileSharesVisible: aws.Bool(v.(bool)), GatewayARN: aws.String(d.Id()), } - _, err := conn.UpdateSMBFileShareVisibility(ctx, input) + _, err := conn.UpdateSMBFileShareVisibility(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s) SMB file share visibility: %s", d.Id(), err) @@ -459,7 +454,7 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any switch d.Get("gateway_type").(string) { case gatewayTypeCached, gatewayTypeStored, gatewayTypeVTL, gatewayTypeVTLSnow: - input := &storagegateway.UpdateBandwidthRateLimitInput{ + input := storagegateway.UpdateBandwidthRateLimitInput{ GatewayARN: aws.String(d.Id()), } @@ -472,7 +467,7 @@ func resourceGatewayCreate(ctx context.Context, d *schema.ResourceData, meta any } if input.AverageDownloadRateLimitInBitsPerSec != nil || input.AverageUploadRateLimitInBitsPerSec != nil { - _, err := conn.UpdateBandwidthRateLimit(ctx, input) + _, err := conn.UpdateBandwidthRateLimit(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s) bandwidth rate limits: %s", d.Id(), err) @@ -495,6 +490,18 @@ func resourceGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) return diags } + if isGatewayNotConnectedErr(err) { + if gatewayInfo, err := findGatewayInfoByARN(ctx, conn, d.Id()); err == nil { + d.Set(names.AttrARN, gatewayInfo.GatewayARN) + d.Set("ec2_instance_id", gatewayInfo.Ec2InstanceId) + d.Set("gateway_name", gatewayInfo.GatewayName) + d.Set("gateway_type", gatewayInfo.GatewayType) + d.Set("host_environment", gatewayInfo.HostEnvironment) + + return diags + } + } + if err != nil { return sdkdiag.AppendErrorf(diags, "reading Storage Gateway Gateway (%s): %s", d.Id(), err) } @@ -570,11 +577,7 @@ func resourceGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) switch aws.ToString(outputDGI.GatewayType) { case gatewayTypeCached, gatewayTypeStored, gatewayTypeVTL, gatewayTypeVTLSnow: - input := &storagegateway.DescribeBandwidthRateLimitInput{ - GatewayARN: aws.String(d.Id()), - } - - outputDBRL, err := conn.DescribeBandwidthRateLimit(ctx, input) + outputDBRL, err := findBandwidthRateLimitByARN(ctx, conn, d.Id()) switch { case errs.IsAErrorMessageContains[*awstypes.InvalidGatewayRequestException](err, "not supported"): @@ -587,11 +590,7 @@ func resourceGatewayRead(ctx context.Context, d *schema.ResourceData, meta any) } } - input := &storagegateway.DescribeMaintenanceStartTimeInput{ - GatewayARN: aws.String(d.Id()), - } - - outputDMST, err := conn.DescribeMaintenanceStartTime(ctx, input) + outputDMST, err := findMaintenanceStartTimeByARN(ctx, conn, d.Id()) switch { case errs.IsAErrorMessageContains[*awstypes.InvalidGatewayRequestException](err, "The specified operation is not supported"): @@ -614,14 +613,14 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any conn := meta.(*conns.AWSClient).StorageGatewayClient(ctx) if d.HasChanges(names.AttrCloudWatchLogGroupARN, "gateway_name", "gateway_timezone") { - input := &storagegateway.UpdateGatewayInformationInput{ + input := storagegateway.UpdateGatewayInformationInput{ CloudWatchLogGroupARN: aws.String(d.Get(names.AttrCloudWatchLogGroupARN).(string)), GatewayARN: aws.String(d.Id()), GatewayName: aws.String(d.Get("gateway_name").(string)), GatewayTimezone: aws.String(d.Get("gateway_timezone").(string)), } - _, err := conn.UpdateGatewayInformation(ctx, input) + _, err := conn.UpdateGatewayInformation(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s): %s", d.Id(), err) @@ -656,12 +655,12 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any } if d.HasChange("smb_guest_password") { - input := &storagegateway.SetSMBGuestPasswordInput{ + input := storagegateway.SetSMBGuestPasswordInput{ GatewayARN: aws.String(d.Id()), Password: aws.String(d.Get("smb_guest_password").(string)), } - _, err := conn.SetSMBGuestPassword(ctx, input) + _, err := conn.SetSMBGuestPassword(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting Storage Gateway Gateway (%s) SMB guest password: %s", d.Id(), err) @@ -669,12 +668,12 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any } if d.HasChange("smb_security_strategy") { - input := &storagegateway.UpdateSMBSecurityStrategyInput{ + input := storagegateway.UpdateSMBSecurityStrategyInput{ GatewayARN: aws.String(d.Id()), SMBSecurityStrategy: awstypes.SMBSecurityStrategy(d.Get("smb_security_strategy").(string)), } - _, err := conn.UpdateSMBSecurityStrategy(ctx, input) + _, err := conn.UpdateSMBSecurityStrategy(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s) SMB security strategy: %s", d.Id(), err) @@ -682,12 +681,12 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any } if d.HasChange("smb_file_share_visibility") { - input := &storagegateway.UpdateSMBFileShareVisibilityInput{ + input := storagegateway.UpdateSMBFileShareVisibilityInput{ FileSharesVisible: aws.Bool(d.Get("smb_file_share_visibility").(bool)), GatewayARN: aws.String(d.Id()), } - _, err := conn.UpdateSMBFileShareVisibility(ctx, input) + _, err := conn.UpdateSMBFileShareVisibility(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s) SMB file share visibility: %s", d.Id(), err) @@ -695,11 +694,11 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any } if d.HasChanges("average_download_rate_limit_in_bits_per_sec", "average_upload_rate_limit_in_bits_per_sec") { - inputD := &storagegateway.DeleteBandwidthRateLimitInput{ + inputD := storagegateway.DeleteBandwidthRateLimitInput{ GatewayARN: aws.String(d.Id()), } needsDelete := false - inputU := &storagegateway.UpdateBandwidthRateLimitInput{ + inputU := storagegateway.UpdateBandwidthRateLimitInput{ GatewayARN: aws.String(d.Id()), } needsUpdate := false @@ -725,7 +724,7 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any } if needsUpdate { - _, err := conn.UpdateBandwidthRateLimit(ctx, inputU) + _, err := conn.UpdateBandwidthRateLimit(ctx, &inputU) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Storage Gateway Gateway (%s) bandwidth rate limits: %s", d.Id(), err) @@ -733,7 +732,7 @@ func resourceGatewayUpdate(ctx context.Context, d *schema.ResourceData, meta any } if needsDelete { - _, err := conn.DeleteBandwidthRateLimit(ctx, inputD) + _, err := conn.DeleteBandwidthRateLimit(ctx, &inputD) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Storage Gateway Gateway (%s) bandwidth rate limits: %s", d.Id(), err) @@ -749,9 +748,10 @@ func resourceGatewayDelete(ctx context.Context, d *schema.ResourceData, meta any conn := meta.(*conns.AWSClient).StorageGatewayClient(ctx) log.Printf("[DEBUG] Deleting Storage Gateway Gateway: %s", d.Id()) - _, err := conn.DeleteGateway(ctx, &storagegateway.DeleteGatewayInput{ + input := storagegateway.DeleteGatewayInput{ GatewayARN: aws.String(d.Id()), - }) + } + _, err := conn.DeleteGateway(ctx, &input) if isGatewayNotFoundErr(err) { return diags @@ -765,11 +765,11 @@ func resourceGatewayDelete(ctx context.Context, d *schema.ResourceData, meta any } func findGatewayByARN(ctx context.Context, conn *storagegateway.Client, arn string) (*storagegateway.DescribeGatewayInformationOutput, error) { - input := &storagegateway.DescribeGatewayInformationInput{ + input := storagegateway.DescribeGatewayInformationInput{ GatewayARN: aws.String(arn), } - return findGateway(ctx, conn, input) + return findGateway(ctx, conn, &input) } func findGateway(ctx context.Context, conn *storagegateway.Client, input *storagegateway.DescribeGatewayInformationInput) (*storagegateway.DescribeGatewayInformationOutput, error) { @@ -793,12 +793,51 @@ func findGateway(ctx context.Context, conn *storagegateway.Client, input *storag return output, nil } +func findGatewayInfoByARN(ctx context.Context, conn *storagegateway.Client, arn string) (*awstypes.GatewayInfo, error) { + var input storagegateway.ListGatewaysInput + + return findGatewayInfo(ctx, conn, &input, func(v *awstypes.GatewayInfo) bool { + return aws.ToString(v.GatewayARN) == arn + }) +} + +func findGatewayInfo(ctx context.Context, conn *storagegateway.Client, input *storagegateway.ListGatewaysInput, filter tfslices.Predicate[*awstypes.GatewayInfo]) (*awstypes.GatewayInfo, error) { + output, err := findGateways(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findGateways(ctx context.Context, conn *storagegateway.Client, input *storagegateway.ListGatewaysInput, filter tfslices.Predicate[*awstypes.GatewayInfo]) ([]awstypes.GatewayInfo, error) { + var output []awstypes.GatewayInfo + + pages := storagegateway.NewListGatewaysPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.Gateways { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + func findSMBSettingsByARN(ctx context.Context, conn *storagegateway.Client, arn string) (*storagegateway.DescribeSMBSettingsOutput, error) { - input := &storagegateway.DescribeSMBSettingsInput{ + input := storagegateway.DescribeSMBSettingsInput{ GatewayARN: aws.String(arn), } - return findSMBSettings(ctx, conn, input) + return findSMBSettings(ctx, conn, &input) } func findSMBSettings(ctx context.Context, conn *storagegateway.Client, input *storagegateway.DescribeSMBSettingsInput) (*storagegateway.DescribeSMBSettingsOutput, error) { @@ -822,6 +861,64 @@ func findSMBSettings(ctx context.Context, conn *storagegateway.Client, input *st return output, nil } +func findBandwidthRateLimitByARN(ctx context.Context, conn *storagegateway.Client, arn string) (*storagegateway.DescribeBandwidthRateLimitOutput, error) { + input := storagegateway.DescribeBandwidthRateLimitInput{ + GatewayARN: aws.String(arn), + } + + return findBandwidthRateLimit(ctx, conn, &input) +} + +func findBandwidthRateLimit(ctx context.Context, conn *storagegateway.Client, input *storagegateway.DescribeBandwidthRateLimitInput) (*storagegateway.DescribeBandwidthRateLimitOutput, error) { + output, err := conn.DescribeBandwidthRateLimit(ctx, input) + + if isGatewayNotFoundErr(err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findMaintenanceStartTimeByARN(ctx context.Context, conn *storagegateway.Client, arn string) (*storagegateway.DescribeMaintenanceStartTimeOutput, error) { + input := storagegateway.DescribeMaintenanceStartTimeInput{ + GatewayARN: aws.String(arn), + } + + return findMaintenanceStartTime(ctx, conn, &input) +} + +func findMaintenanceStartTime(ctx context.Context, conn *storagegateway.Client, input *storagegateway.DescribeMaintenanceStartTimeInput) (*storagegateway.DescribeMaintenanceStartTimeOutput, error) { + output, err := conn.DescribeMaintenanceStartTime(ctx, input) + + if isGatewayNotFoundErr(err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + const ( gatewayStatusConnected = "GatewayConnected" gatewayStatusNotConnected = "GatewayNotConnected" @@ -835,7 +932,7 @@ func statusGatewayConnected(ctx context.Context, conn *storagegateway.Client, ga return nil, "", nil } - if errs.IsAErrorMessageContains[*awstypes.InvalidGatewayRequestException](err, "The specified gateway is not connected") { + if isGatewayNotConnectedErr(err) { return output, gatewayStatusNotConnected, nil } diff --git a/internal/service/storagegateway/gateway_test.go b/internal/service/storagegateway/gateway_test.go index d099f8afc05a..8afdb7dcf284 100644 --- a/internal/service/storagegateway/gateway_test.go +++ b/internal/service/storagegateway/gateway_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/storagegateway" + awstypes "github.com/aws/aws-sdk-go-v2/service/storagegateway/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,7 +23,7 @@ import ( func TestAccStorageGatewayGateway_GatewayType_cached(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -66,7 +66,7 @@ func TestAccStorageGatewayGateway_GatewayType_cached(t *testing.T) { func TestAccStorageGatewayGateway_GatewayType_fileFSxSMB(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -109,7 +109,7 @@ func TestAccStorageGatewayGateway_GatewayType_fileFSxSMB(t *testing.T) { func TestAccStorageGatewayGateway_GatewayType_fileS3(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -120,7 +120,7 @@ func TestAccStorageGatewayGateway_GatewayType_fileS3(t *testing.T) { CheckDestroy: testAccCheckGatewayDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccGatewayConfig_typeFileS3(rName), + Config: testAccGatewayConfig_typeFileS3(rName, rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGatewayExists(ctx, resourceName, &gateway), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "storagegateway", regexache.MustCompile(`gateway/sgw-.+`)), @@ -152,7 +152,7 @@ func TestAccStorageGatewayGateway_GatewayType_fileS3(t *testing.T) { func TestAccStorageGatewayGateway_GatewayType_stored(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -195,7 +195,7 @@ func TestAccStorageGatewayGateway_GatewayType_stored(t *testing.T) { func TestAccStorageGatewayGateway_GatewayType_vtl(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -236,7 +236,7 @@ func TestAccStorageGatewayGateway_GatewayType_vtl(t *testing.T) { func TestAccStorageGatewayGateway_tags(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -284,9 +284,10 @@ func TestAccStorageGatewayGateway_tags(t *testing.T) { func TestAccStorageGatewayGateway_gatewayName(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput - rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var gateway awstypes.GatewayInfo + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rNameGateway1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rNameGateway2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" resource.ParallelTest(t, resource.TestCase{ @@ -296,17 +297,17 @@ func TestAccStorageGatewayGateway_gatewayName(t *testing.T) { CheckDestroy: testAccCheckGatewayDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccGatewayConfig_typeFileS3(rName1), + Config: testAccGatewayConfig_typeFileS3(rName, rNameGateway1), Check: resource.ComposeTestCheckFunc( testAccCheckGatewayExists(ctx, resourceName, &gateway), - resource.TestCheckResourceAttr(resourceName, "gateway_name", rName1), + resource.TestCheckResourceAttr(resourceName, "gateway_name", rNameGateway1), ), }, { - Config: testAccGatewayConfig_typeFileS3(rName2), + Config: testAccGatewayConfig_typeFileS3(rName, rNameGateway2), Check: resource.ComposeTestCheckFunc( testAccCheckGatewayExists(ctx, resourceName, &gateway), - resource.TestCheckResourceAttr(resourceName, "gateway_name", rName2), + resource.TestCheckResourceAttr(resourceName, "gateway_name", rNameGateway2), ), }, { @@ -321,7 +322,7 @@ func TestAccStorageGatewayGateway_gatewayName(t *testing.T) { func TestAccStorageGatewayGateway_cloudWatchLogs(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" resourceName2 := "aws_cloudwatch_log_group.test" @@ -351,7 +352,7 @@ func TestAccStorageGatewayGateway_cloudWatchLogs(t *testing.T) { func TestAccStorageGatewayGateway_gatewayTimezone(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -387,7 +388,7 @@ func TestAccStorageGatewayGateway_gatewayTimezone(t *testing.T) { func TestAccStorageGatewayGateway_gatewayVPCEndpoint(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" vpcEndpointResourceName := "aws_vpc_endpoint.test" @@ -417,7 +418,7 @@ func TestAccStorageGatewayGateway_gatewayVPCEndpoint(t *testing.T) { func TestAccStorageGatewayGateway_smbActiveDirectorySettings(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" domainName := acctest.RandomDomainName() @@ -450,7 +451,7 @@ func TestAccStorageGatewayGateway_smbActiveDirectorySettings(t *testing.T) { func TestAccStorageGatewayGateway_SMBActiveDirectorySettings_timeout(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" domainName := acctest.RandomDomainName() @@ -482,7 +483,7 @@ func TestAccStorageGatewayGateway_SMBActiveDirectorySettings_timeout(t *testing. func TestAccStorageGatewayGateway_smbMicrosoftActiveDirectorySettings(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" domainName := acctest.RandomDomainName() @@ -516,7 +517,7 @@ func TestAccStorageGatewayGateway_smbMicrosoftActiveDirectorySettings(t *testing func TestAccStorageGatewayGateway_SMBMicrosoftActiveDirectorySettings_timeout(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" domainName := acctest.RandomDomainName() @@ -548,7 +549,7 @@ func TestAccStorageGatewayGateway_SMBMicrosoftActiveDirectorySettings_timeout(t func TestAccStorageGatewayGateway_smbGuestPassword(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -584,7 +585,7 @@ func TestAccStorageGatewayGateway_smbGuestPassword(t *testing.T) { func TestAccStorageGatewayGateway_smbSecurityStrategy(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -621,7 +622,7 @@ func TestAccStorageGatewayGateway_smbSecurityStrategy(t *testing.T) { func TestAccStorageGatewayGateway_smbVisibility(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -664,7 +665,7 @@ func TestAccStorageGatewayGateway_smbVisibility(t *testing.T) { func TestAccStorageGatewayGateway_disappears(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -688,7 +689,7 @@ func TestAccStorageGatewayGateway_disappears(t *testing.T) { func TestAccStorageGatewayGateway_bandwidthUpload(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -731,7 +732,7 @@ func TestAccStorageGatewayGateway_bandwidthUpload(t *testing.T) { func TestAccStorageGatewayGateway_bandwidthDownload(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -774,7 +775,7 @@ func TestAccStorageGatewayGateway_bandwidthDownload(t *testing.T) { func TestAccStorageGatewayGateway_bandwidthAll(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -820,7 +821,7 @@ func TestAccStorageGatewayGateway_bandwidthAll(t *testing.T) { func TestAccStorageGatewayGateway_maintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) - var gateway storagegateway.DescribeGatewayInformationOutput + var gateway awstypes.GatewayInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_storagegateway_gateway.test" @@ -871,6 +872,36 @@ func TestAccStorageGatewayGateway_maintenanceStartTime(t *testing.T) { }) } +// https://github.com/hashicorp/terraform-provider-aws/issues/27523. +func TestAccStorageGatewayGateway_offline(t *testing.T) { + ctx := acctest.Context(t) + var gateway awstypes.GatewayInfo + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_storagegateway_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.StorageGatewayServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGatewayConfig_typeCached(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + }, + { + Config: testAccGatewayConfig_offline(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGatewayExists(ctx, resourceName, &gateway), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckGatewayDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).StorageGatewayClient(ctx) @@ -897,7 +928,7 @@ func testAccCheckGatewayDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckGatewayExists(ctx context.Context, n string, v *storagegateway.DescribeGatewayInformationOutput) resource.TestCheckFunc { +func testAccCheckGatewayExists(ctx context.Context, n string, v *awstypes.GatewayInfo) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -906,7 +937,7 @@ func testAccCheckGatewayExists(ctx context.Context, n string, v *storagegateway. conn := acctest.Provider.Meta().(*conns.AWSClient).StorageGatewayClient(ctx) - output, err := tfstoragegateway.FindGatewayByARN(ctx, conn, rs.Primary.ID) + output, err := tfstoragegateway.FindGatewayInfoByARN(ctx, conn, rs.Primary.ID) if err != nil { return err @@ -979,7 +1010,7 @@ resource "aws_security_group" "test" { `, rName)) } -func testAccGatewayConfig_baseFile(rName string) string { +func testAccGatewayConfig_baseFileS3(rName string) string { return acctest.ConfigCompose( testAccGatewayConfig_baseVPC(rName), // Reference: https://docs.aws.amazon.com/storagegateway/latest/userguide/Requirements.html @@ -1006,6 +1037,33 @@ resource "aws_instance" "test" { `, rName)) } +func testAccGatewayConfig_baseFileFSx(rName string) string { + return acctest.ConfigCompose( + testAccGatewayConfig_baseVPC(rName), + // Reference: https://docs.aws.amazon.com/storagegateway/latest/userguide/Requirements.html + acctest.AvailableEC2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "m5.xlarge", "m4.xlarge"), + fmt.Sprintf(` +# Reference: https://docs.aws.amazon.com/storagegateway/latest/userguide/ec2-gateway-file.html +data "aws_ssm_parameter" "aws_service_storagegateway_ami_FILE_FSX_SMB_latest" { + name = "/aws/service/storagegateway/ami/FILE_FSX_SMB/latest" +} + +resource "aws_instance" "test" { + depends_on = [aws_route.test] + + ami = data.aws_ssm_parameter.aws_service_storagegateway_ami_FILE_FSX_SMB_latest.value + associate_public_ip_address = true + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + vpc_security_group_ids = [aws_security_group.test.id] + subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccGatewayConfig_baseTapeAndVolume(rName string) string { return acctest.ConfigCompose( testAccGatewayConfig_baseVPC(rName), @@ -1046,7 +1104,7 @@ resource "aws_storagegateway_gateway" "test" { } func testAccGatewayConfig_typeFileFSxSMB(rName string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileFSx(rName), fmt.Sprintf(` resource "aws_storagegateway_gateway" "test" { gateway_ip_address = aws_instance.test.public_ip gateway_name = %[1]q @@ -1056,19 +1114,19 @@ resource "aws_storagegateway_gateway" "test" { `, rName)) } -func testAccGatewayConfig_typeFileS3(rName string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` +func testAccGatewayConfig_typeFileS3(rName, gatewayName string) string { + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_storagegateway_gateway" "test" { gateway_ip_address = aws_instance.test.public_ip gateway_name = %[1]q gateway_timezone = "GMT" gateway_type = "FILE_S3" } -`, rName)) +`, gatewayName)) } func testAccGatewayConfig_logGroup(rName string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_cloudwatch_log_group" "test" { name = %[1]q } @@ -1106,7 +1164,7 @@ resource "aws_storagegateway_gateway" "test" { } func testAccGatewayConfig_timezone(rName, gatewayTimezone string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_storagegateway_gateway" "test" { gateway_ip_address = aws_instance.test.public_ip gateway_name = %[1]q @@ -1367,7 +1425,7 @@ resource "aws_storagegateway_gateway" "test" { } func testAccGatewayConfig_smbGuestPassword(rName, smbGuestPassword string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_storagegateway_gateway" "test" { gateway_ip_address = aws_instance.test.public_ip gateway_name = %[1]q @@ -1379,7 +1437,7 @@ resource "aws_storagegateway_gateway" "test" { } func testAccGatewayConfig_smbSecurityStrategy(rName, strategy string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_storagegateway_gateway" "test" { gateway_ip_address = aws_instance.test.public_ip gateway_name = %[1]q @@ -1391,7 +1449,7 @@ resource "aws_storagegateway_gateway" "test" { } func testAccGatewayConfig_smbVisibility(rName string, visible bool) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_storagegateway_gateway" "test" { gateway_ip_address = aws_instance.test.public_ip gateway_name = %[1]q @@ -1494,3 +1552,13 @@ resource "aws_storagegateway_gateway" "test" { } `, rName, hourOfDay, minuteOfHour, dayOfWeek, dayOfMonth)) } + +func testAccGatewayConfig_offline(rName string) string { + return acctest.ConfigCompose(testAccGatewayConfig_typeCached(rName), ` +resource "aws_ec2_instance_state" "test" { + instance_id = aws_instance.test.id + state = "stopped" + force = true +} +`) +} diff --git a/internal/service/storagegateway/local_disk_data_source_test.go b/internal/service/storagegateway/local_disk_data_source_test.go index a97cd02e8bf3..ddf59915132a 100644 --- a/internal/service/storagegateway/local_disk_data_source_test.go +++ b/internal/service/storagegateway/local_disk_data_source_test.go @@ -68,7 +68,7 @@ func TestAccStorageGatewayLocalDiskDataSource_diskPath(t *testing.T) { func testAccLocalDiskDataSourceConfig_base(rName string) string { return acctest.ConfigCompose( - testAccGatewayConfig_typeFileS3(rName), + testAccGatewayConfig_typeFileS3(rName, rName), fmt.Sprintf(` resource "aws_ebs_volume" "test" { availability_zone = aws_instance.test.availability_zone diff --git a/internal/service/storagegateway/nfs_file_share_test.go b/internal/service/storagegateway/nfs_file_share_test.go index 8db2c66f0dbb..b40d91befb5a 100644 --- a/internal/service/storagegateway/nfs_file_share_test.go +++ b/internal/service/storagegateway/nfs_file_share_test.go @@ -731,7 +731,7 @@ func testAccCheckNFSFileShareExists(ctx context.Context, n string, v *awstypes.N } func testAccNFSFileShareConfig_baseS3(rName string) string { - return acctest.ConfigCompose(testAccGatewayConfig_baseFile(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccGatewayConfig_baseFileS3(rName), fmt.Sprintf(` resource "aws_iam_role" "test" { name = %[1]q diff --git a/internal/service/storagegateway/service_endpoint_resolver_gen.go b/internal/service/storagegateway/service_endpoint_resolver_gen.go index 53a0721391a2..46fb7e873ce3 100644 --- a/internal/service/storagegateway/service_endpoint_resolver_gen.go +++ b/internal/service/storagegateway/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params storagegateway.E }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up storagegateway endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up storagegateway endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/storagegateway/service_endpoints_gen_test.go b/internal/service/storagegateway/service_endpoints_gen_test.go index 03c531a5ecae..3c3b1783424d 100644 --- a/internal/service/storagegateway/service_endpoints_gen_test.go +++ b/internal/service/storagegateway/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/storagegateway/service_package_gen.go b/internal/service/storagegateway/service_package_gen.go index a8022c01ff59..23531aa2effe 100644 --- a/internal/service/storagegateway/service_package_gen.go +++ b/internal/service/storagegateway/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/storagegateway" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -146,7 +145,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *storagegateway.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/storagegateway/smb_file_share.go b/internal/service/storagegateway/smb_file_share.go index 4fe718d9401d..b0ee77406062 100644 --- a/internal/service/storagegateway/smb_file_share.go +++ b/internal/service/storagegateway/smb_file_share.go @@ -319,7 +319,7 @@ func resourceSMBFileShareRead(ctx context.Context, d *schema.ResourceData, meta } d.Set("access_based_enumeration", fileshare.AccessBasedEnumeration) - d.Set("admin_user_list", aws.StringSlice(fileshare.AdminUserList)) + d.Set("admin_user_list", fileshare.AdminUserList) d.Set(names.AttrARN, fileshare.FileShareARN) d.Set("audit_destination_arn", fileshare.AuditDestinationARN) d.Set("authentication", fileshare.Authentication) @@ -337,7 +337,7 @@ func resourceSMBFileShareRead(ctx context.Context, d *schema.ResourceData, meta d.Set("file_share_name", fileshare.FileShareName) d.Set("gateway_arn", fileshare.GatewayARN) d.Set("guess_mime_type_enabled", fileshare.GuessMIMETypeEnabled) - d.Set("invalid_user_list", aws.StringSlice(fileshare.InvalidUserList)) + d.Set("invalid_user_list", fileshare.InvalidUserList) d.Set("kms_encrypted", fileshare.KMSEncrypted) //nolint:staticcheck // deprecated by AWS, but must remain for backward compatibility d.Set(names.AttrKMSKeyARN, fileshare.KMSKey) d.Set("location_arn", fileshare.LocationARN) @@ -349,7 +349,7 @@ func resourceSMBFileShareRead(ctx context.Context, d *schema.ResourceData, meta d.Set("requester_pays", fileshare.RequesterPays) d.Set(names.AttrRoleARN, fileshare.Role) d.Set("smb_acl_enabled", fileshare.SMBACLEnabled) - d.Set("valid_user_list", aws.StringSlice(fileshare.ValidUserList)) + d.Set("valid_user_list", fileshare.ValidUserList) d.Set("vpc_endpoint_dns_name", fileshare.VPCEndpointDNSName) setTagsOut(ctx, fileshare.Tags) diff --git a/internal/service/storagegateway/stored_iscsi_volume.go b/internal/service/storagegateway/stored_iscsi_volume.go index 14e7cbed93b6..a8c7d2de68bf 100644 --- a/internal/service/storagegateway/stored_iscsi_volume.go +++ b/internal/service/storagegateway/stored_iscsi_volume.go @@ -231,7 +231,7 @@ func resourceStorediSCSIVolumeDelete(ctx context.Context, d *schema.ResourceData const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidGatewayRequestException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.InvalidGatewayRequestException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteVolume(ctx, &storagegateway.DeleteVolumeInput{ VolumeARN: aws.String(d.Id()), }) diff --git a/internal/service/storagegateway/sweep.go b/internal/service/storagegateway/sweep.go index f869458078a1..fef47fde910f 100644 --- a/internal/service/storagegateway/sweep.go +++ b/internal/service/storagegateway/sweep.go @@ -38,7 +38,7 @@ func sweepGateways(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.StorageGatewayClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -78,7 +78,7 @@ func sweepTapePools(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.StorageGatewayClient(ctx) sweepResources := make([]sweep.Sweepable, 0) @@ -118,7 +118,7 @@ func sweepFileSystemAssociations(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.StorageGatewayClient(ctx) sweepResources := make([]sweep.Sweepable, 0) diff --git a/internal/service/storagegateway/tags_gen.go b/internal/service/storagegateway/tags_gen.go index c52e3cc62a92..3931b1151ffb 100644 --- a/internal/service/storagegateway/tags_gen.go +++ b/internal/service/storagegateway/tags_gen.go @@ -3,8 +3,8 @@ package storagegateway import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/storagegateway" awstypes "github.com/aws/aws-sdk-go-v2/service/storagegateway/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *storagegateway.Client, identifier strin page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).StorageGatewayClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *storagegateway.Client, identifier str _, err := conn.RemoveTagsFromResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *storagegateway.Client, identifier str _, err := conn.AddTagsToResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/sts/caller_identity_data_source_test.go b/internal/service/sts/caller_identity_data_source_test.go index 931fb6e97c14..29257dd52b96 100644 --- a/internal/service/sts/caller_identity_data_source_test.go +++ b/internal/service/sts/caller_identity_data_source_test.go @@ -17,7 +17,7 @@ import ( func TestAccSTSCallerIdentityDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.STSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -45,7 +45,7 @@ func TestAccSTSCallerIdentityDataSource_alternateRegion(t *testing.T) { t.Skipf("Skipping test due to missing %s", envvar.AlternateRegion) } - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.STSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/sts/service_endpoint_resolver_gen.go b/internal/service/sts/service_endpoint_resolver_gen.go index a004a0d5457b..6e2c9c3ea911 100644 --- a/internal/service/sts/service_endpoint_resolver_gen.go +++ b/internal/service/sts/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params sts.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up sts endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up sts endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/sts/service_endpoints_gen_test.go b/internal/service/sts/service_endpoints_gen_test.go index 93831e69cd49..c134e51647b2 100644 --- a/internal/service/sts/service_endpoints_gen_test.go +++ b/internal/service/sts/service_endpoints_gen_test.go @@ -659,7 +659,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/sts/service_package_gen.go b/internal/service/sts/service_package_gen.go index 8c4260fbefa9..db20465c77f0 100644 --- a/internal/service/sts/service_package_gen.go +++ b/internal/service/sts/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -64,7 +63,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *sts.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/swf/domain_test.go b/internal/service/swf/domain_test.go index 1c75ba4b9fe6..da36f3d99490 100644 --- a/internal/service/swf/domain_test.go +++ b/internal/service/swf/domain_test.go @@ -240,7 +240,7 @@ func testAccCheckDomainDestroy(ctx context.Context) resource.TestCheckFunc { } // Retrying as Read after Delete is not always consistent. - _, err := tfresource.RetryUntilNotFound(ctx, 2*time.Minute, func() (any, error) { + _, err := tfresource.RetryUntilNotFound(ctx, 2*time.Minute, func(ctx context.Context) (any, error) { return tfswf.FindDomainByName(ctx, conn, rs.Primary.ID) }) diff --git a/internal/service/swf/service_endpoint_resolver_gen.go b/internal/service/swf/service_endpoint_resolver_gen.go index 42f649d27d2b..537b2bb7e386 100644 --- a/internal/service/swf/service_endpoint_resolver_gen.go +++ b/internal/service/swf/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params swf.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up swf endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up swf endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/swf/service_endpoints_gen_test.go b/internal/service/swf/service_endpoints_gen_test.go index 72b2922f62a1..f517e5999474 100644 --- a/internal/service/swf/service_endpoints_gen_test.go +++ b/internal/service/swf/service_endpoints_gen_test.go @@ -523,7 +523,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/swf/service_package_gen.go b/internal/service/swf/service_package_gen.go index c59664252c4f..d1d2a0e0c16a 100644 --- a/internal/service/swf/service_package_gen.go +++ b/internal/service/swf/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/swf" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *swf.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/swf/sweep.go b/internal/service/swf/sweep.go index 470fa832b3cf..c8f42a3f2c65 100644 --- a/internal/service/swf/sweep.go +++ b/internal/service/swf/sweep.go @@ -26,7 +26,7 @@ func sweepDomains(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SWFClient(ctx) input := &swf.ListDomainsInput{ diff --git a/internal/service/swf/tags_gen.go b/internal/service/swf/tags_gen.go index a0a90a6f9541..fb4425d07e63 100644 --- a/internal/service/swf/tags_gen.go +++ b/internal/service/swf/tags_gen.go @@ -3,8 +3,8 @@ package swf import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/swf" awstypes "github.com/aws/aws-sdk-go-v2/service/swf/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *swf.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SWFClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *swf.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *swf.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/synthetics/canary.go b/internal/service/synthetics/canary.go index 0dab5a4d20b6..1cc095dcb73b 100644 --- a/internal/service/synthetics/canary.go +++ b/internal/service/synthetics/canary.go @@ -133,6 +133,12 @@ func ResourceCanary() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "ephemeral_storage": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1024, 5120), + }, "memory_in_mb": { Type: schema.TypeInt, Optional: true, @@ -189,6 +195,21 @@ func ResourceCanary() *schema.Resource { return (new == "rate(0 minute)" || new == "rate(0 minutes)") && old == "rate(0 hour)" }, }, + "retry_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_retries": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 2), + }, + }, + }, + }, }, }, }, @@ -243,6 +264,10 @@ func ResourceCanary() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "ipv6_allowed_for_dual_stack": { + Type: schema.TypeBool, + Optional: true, + }, names.AttrSecurityGroupIDs: { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, @@ -332,7 +357,7 @@ func resourceCanaryCreate(ctx context.Context, d *schema.ResourceData, meta any) iamwaiterStopTime := time.Now().Add(propagationTimeout) _, err = tfresource.RetryWhen(ctx, propagationTimeout+canaryCreatedTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return retryCreateCanary(ctx, conn, d, input) }, func(err error) (bool, error) { @@ -384,7 +409,11 @@ func resourceCanaryRead(ctx context.Context, d *schema.ResourceData, meta any) d }.String() d.Set(names.AttrARN, canaryArn) d.Set("artifact_s3_location", canary.ArtifactS3Location) - d.Set("engine_arn", canary.EngineArn) + if len(canary.EngineConfigs) > 0 { + d.Set("engine_arn", canary.EngineConfigs[0].EngineArn) + } else { + d.Set("engine_arn", canary.EngineArn) + } d.Set(names.AttrExecutionRoleARN, canary.ExecutionRoleArn) d.Set("failure_retention_period", canary.FailureRetentionPeriodInDays) d.Set("handler", canary.Code.Handler) @@ -673,9 +702,26 @@ func expandCanarySchedule(l []any) *awstypes.CanaryScheduleInput { codeConfig.DurationInSeconds = aws.Int64(int64(v.(int))) } + if v, ok := m["retry_config"]; ok { + codeConfig.RetryConfig = expandCanaryScheduleRetryConfig(v.([]any)) + } + return codeConfig } +func expandCanaryScheduleRetryConfig(l []any) *awstypes.RetryConfigInput { + if len(l) == 0 || l[0] == nil { + return nil + } + m := l[0].(map[string]any) + + config := &awstypes.RetryConfigInput{ + MaxRetries: aws.Int32(int32(m["max_retries"].(int))), + } + + return config +} + func flattenCanarySchedule(canarySchedule *awstypes.CanaryScheduleOutput) []any { if canarySchedule == nil { return []any{} @@ -686,6 +732,21 @@ func flattenCanarySchedule(canarySchedule *awstypes.CanaryScheduleOutput) []any "duration_in_seconds": aws.ToInt64(canarySchedule.DurationInSeconds), } + if canarySchedule.RetryConfig != nil { + m["retry_config"] = flattenCanaryScheduleRetryConfig(canarySchedule.RetryConfig) + } + + return []any{m} +} + +func flattenCanaryScheduleRetryConfig(retryConfig *awstypes.RetryConfigOutput) []any { + if retryConfig == nil { + return []any{} + } + m := map[string]any{ + "max_retries": aws.ToInt32(retryConfig.MaxRetries), + } + return []any{m} } @@ -714,6 +775,10 @@ func expandCanaryRunConfig(l []any) *awstypes.CanaryRunConfigInput { codeConfig.EnvironmentVariables = flex.ExpandStringValueMap(vars) } + if v, ok := m["ephemeral_storage"].(int); ok && v > 0 { + codeConfig.EphemeralStorage = aws.Int32(int32(v)) + } + return codeConfig } @@ -732,6 +797,10 @@ func flattenCanaryRunConfig(canaryCodeOut *awstypes.CanaryRunConfigOutput, envVa m["environment_variables"] = envVars } + if canaryCodeOut.EphemeralStorage != nil { + m["ephemeral_storage"] = aws.ToInt32(canaryCodeOut.EphemeralStorage) + } + return []any{m} } @@ -746,6 +815,10 @@ func flattenCanaryVPCConfig(canaryVpcOutput *awstypes.VpcConfigOutput) []any { names.AttrVPCID: aws.ToString(canaryVpcOutput.VpcId), } + if canaryVpcOutput.Ipv6AllowedForDualStack != nil { + m["ipv6_allowed_for_dual_stack"] = aws.ToBool(canaryVpcOutput.Ipv6AllowedForDualStack) + } + return []any{m} } @@ -761,6 +834,10 @@ func expandCanaryVPCConfig(l []any) *awstypes.VpcConfigInput { SecurityGroupIds: flex.ExpandStringValueSet(m[names.AttrSecurityGroupIDs].(*schema.Set)), } + if v, ok := m["ipv6_allowed_for_dual_stack"]; ok { + codeConfig.Ipv6AllowedForDualStack = aws.Bool(v.(bool)) + } + return codeConfig } diff --git a/internal/service/synthetics/canary_test.go b/internal/service/synthetics/canary_test.go index f01972f94439..7c8bc6f00734 100644 --- a/internal/service/synthetics/canary_test.go +++ b/internal/service/synthetics/canary_test.go @@ -41,7 +41,8 @@ func TestAccSyntheticsCanary_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrPair(resourceName, "runtime_version", runtimeVersionDataSourceName, "version_name"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), - resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1000"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.ephemeral_storage", "1024"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1500"), resource.TestCheckResourceAttr(resourceName, "run_config.0.timeout_in_seconds", "840"), resource.TestCheckResourceAttr(resourceName, "failure_retention_period", "31"), resource.TestCheckResourceAttr(resourceName, "success_retention_period", "31"), @@ -49,8 +50,10 @@ func TestAccSyntheticsCanary_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "vpc_config.#", "0"), resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.0.max_retries", "0"), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "engine_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`function:cwsyn-%s.+`, rName))), - acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), + //acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), resource.TestCheckResourceAttrPair(resourceName, names.AttrExecutionRoleARN, "aws_iam_role.test", names.AttrARN), resource.TestCheckResourceAttr(resourceName, "artifact_s3_location", fmt.Sprintf("%s/", rName)), resource.TestCheckResourceAttr(resourceName, "timeline.#", "1"), @@ -73,7 +76,8 @@ func TestAccSyntheticsCanary_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrPair(resourceName, "runtime_version", runtimeVersionDataSourceName, "version_name"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), - resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1000"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.ephemeral_storage", "1024"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1500"), resource.TestCheckResourceAttr(resourceName, "run_config.0.timeout_in_seconds", "840"), resource.TestCheckResourceAttr(resourceName, "failure_retention_period", "31"), resource.TestCheckResourceAttr(resourceName, "success_retention_period", "31"), @@ -82,7 +86,7 @@ func TestAccSyntheticsCanary_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "engine_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`function:cwsyn-%s.+`, rName))), - acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), + //acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), resource.TestCheckResourceAttrPair(resourceName, names.AttrExecutionRoleARN, "aws_iam_role.test", names.AttrARN), resource.TestCheckResourceAttr(resourceName, "artifact_s3_location", fmt.Sprintf("%s/test/", rName)), resource.TestCheckResourceAttr(resourceName, "timeline.#", "1"), @@ -330,7 +334,7 @@ func TestAccSyntheticsCanary_s3(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrPair(resourceName, "runtime_version", runtimeVersionDataSourceName, "version_name"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), - resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1000"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1500"), resource.TestCheckResourceAttr(resourceName, "run_config.0.timeout_in_seconds", "840"), resource.TestCheckResourceAttr(resourceName, "run_config.0.active_tracing", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "failure_retention_period", "31"), @@ -340,7 +344,7 @@ func TestAccSyntheticsCanary_s3(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "engine_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`function:cwsyn-%s.+`, rName))), - acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), + //acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), resource.TestCheckResourceAttrPair(resourceName, names.AttrExecutionRoleARN, "aws_iam_role.test", names.AttrARN), resource.TestCheckResourceAttr(resourceName, "artifact_s3_location", fmt.Sprintf("%s/", rName)), ), @@ -372,7 +376,7 @@ func TestAccSyntheticsCanary_run(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckCanaryExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1000"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1500"), resource.TestCheckResourceAttr(resourceName, "run_config.0.timeout_in_seconds", "60"), ), }, @@ -506,6 +510,7 @@ func TestAccSyntheticsCanary_vpc(t *testing.T) { testAccCheckCanaryExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "1"), resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtFalse), resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), ), }, @@ -521,6 +526,7 @@ func TestAccSyntheticsCanary_vpc(t *testing.T) { testAccCheckCanaryExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtFalse), resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), ), }, @@ -530,6 +536,7 @@ func TestAccSyntheticsCanary_vpc(t *testing.T) { testAccCheckCanaryExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "1"), resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtFalse), resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), ), }, @@ -537,6 +544,194 @@ func TestAccSyntheticsCanary_vpc(t *testing.T) { }) } +func TestAccSyntheticsCanary_vpcIPv6AllowedForDualStack(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var conf awstypes.Canary + rName := fmt.Sprintf("tf-acc-test-%s", sdkacctest.RandString(8)) + resourceName := "aws_synthetics_canary.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SyntheticsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCanaryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCanaryConfig_vpcIPv6AllowedForDualStack(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"zip_file", "start_canary", "delete_lambda"}, + }, + { + Config: testAccCanaryConfig_vpcIPv6AllowedForDualStack(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtFalse), + ), + }, + { + Config: testAccCanaryConfig_vpcIPv6AllowedForDualStack(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtTrue), + ), + }, + { + Config: testAccCanaryConfig_vpcIPv6AllowedForDualStackUpdated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.subnet_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.security_group_ids.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_config.0.vpc_id", "aws_vpc.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "vpc_config.0.ipv6_allowed_for_dual_stack", acctest.CtFalse), + ), + }, + }, + }) +} + +func TestAccSyntheticsCanary_runConfigEphemeralStorage(t *testing.T) { + ctx := acctest.Context(t) + var conf1, conf2 awstypes.Canary + rName := fmt.Sprintf("tf-acc-test-%s", sdkacctest.RandString(8)) + resourceName := "aws_synthetics_canary.test" + runtimeVersionDataSourceName := "data.aws_synthetics_runtime_version.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SyntheticsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCanaryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCanaryConfig_runConfigEphemeralStorage(rName, 1024), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf1), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "synthetics", regexache.MustCompile(`canary:.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrPair(resourceName, "runtime_version", runtimeVersionDataSourceName, "version_name"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1500"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.timeout_in_seconds", "840"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.ephemeral_storage", "1024"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "engine_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`function:cwsyn-%s.+`, rName))), + //acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), + resource.TestCheckResourceAttrPair(resourceName, names.AttrExecutionRoleARN, "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "artifact_s3_location", fmt.Sprintf("%s/", rName)), + resource.TestCheckResourceAttr(resourceName, "timeline.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "timeline.0.created"), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "READY"), + resource.TestCheckResourceAttr(resourceName, "artifact_config.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"zip_file", "start_canary", "delete_lambda"}, + }, + { + Config: testAccCanaryConfig_runConfigEphemeralStorage(rName, 2048), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf2), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "synthetics", regexache.MustCompile(`canary:.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrPair(resourceName, "runtime_version", runtimeVersionDataSourceName, "version_name"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.memory_in_mb", "1500"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.timeout_in_seconds", "840"), + resource.TestCheckResourceAttr(resourceName, "run_config.0.ephemeral_storage", "2048"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "engine_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`function:cwsyn-%s.+`, rName))), + //acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "source_location_arn", "lambda", regexache.MustCompile(fmt.Sprintf(`layer:cwsyn-%s.+`, rName))), + resource.TestCheckResourceAttrPair(resourceName, names.AttrExecutionRoleARN, "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "artifact_s3_location", fmt.Sprintf("%s/", rName)), + resource.TestCheckResourceAttr(resourceName, "timeline.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "timeline.0.created"), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "READY"), + resource.TestCheckResourceAttr(resourceName, "artifact_config.#", "0"), + ), + }, + }, + }) +} + +func TestAccSyntheticsCanary_scheduleRetryConfig(t *testing.T) { + ctx := acctest.Context(t) + var conf1, conf2 awstypes.Canary + rName := fmt.Sprintf("tf-acc-test-%s", sdkacctest.RandString(8)) + resourceName := "aws_synthetics_canary.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SyntheticsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCanaryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCanaryConfig_scheduleRetryConfig(rName, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf1), + resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.0.max_retries", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"zip_file", "start_canary", "delete_lambda"}, + }, + { + Config: testAccCanaryConfig_scheduleRetryConfig(rName, 2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf2), + resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.0.max_retries", "2"), + testAccCheckCanaryIsUpdated(&conf1, &conf2), + ), + }, + { + Config: testAccCanaryConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCanaryExists(ctx, resourceName, &conf2), + resource.TestCheckResourceAttr(resourceName, "schedule.0.duration_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.expression", "rate(0 hour)"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schedule.0.retry_config.0.max_retries", "2"), // unchanged + ), + }, + }, + }) +} + func TestAccSyntheticsCanary_tags(t *testing.T) { ctx := acctest.Context(t) var conf awstypes.Canary @@ -1263,6 +1458,121 @@ resource "aws_synthetics_canary" "test" { `, rName)) } +func testAccCanaryConfig_vpcIPv6AllowedForDualStack(rName string, ipv6 bool) string { + return acctest.ConfigCompose( + testAccCanaryConfig_base(rName), + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), + testAccCanarySecurityGroupBaseConfig(rName, 2), + fmt.Sprintf(` +resource "aws_synthetics_canary" "test" { + name = %[1]q + artifact_s3_location = "s3://${aws_s3_bucket.test.bucket}/" + execution_role_arn = aws_iam_role.test.arn + handler = "exports.handler" + zip_file = "test-fixtures/lambdatest.zip" + runtime_version = data.aws_synthetics_runtime_version.test.version_name + delete_lambda = true + + schedule { + expression = "rate(0 minute)" + } + + vpc_config { + subnet_ids = aws_subnet.test[*].id + security_group_ids = aws_security_group.test[*].id + + ipv6_allowed_for_dual_stack = %[2]t + } + + depends_on = [aws_iam_role_policy_attachment.test] +} +`, rName, ipv6)) +} + +func testAccCanaryConfig_vpcIPv6AllowedForDualStackUpdated(rName string) string { + return acctest.ConfigCompose( + testAccCanaryConfig_base(rName), + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), + testAccCanarySecurityGroupBaseConfig(rName, 2), + fmt.Sprintf(` +resource "aws_synthetics_canary" "test" { + name = %[1]q + artifact_s3_location = "s3://${aws_s3_bucket.test.bucket}/" + execution_role_arn = aws_iam_role.test.arn + handler = "exports.handler" + zip_file = "test-fixtures/lambdatest.zip" + runtime_version = data.aws_synthetics_runtime_version.test.version_name + delete_lambda = true + + schedule { + expression = "rate(0 minute)" + } + + vpc_config { + subnet_ids = aws_subnet.test[*].id + security_group_ids = aws_security_group.test[*].id + } + + depends_on = [aws_iam_role_policy_attachment.test] +} +`, rName)) +} + +func testAccCanaryConfig_runConfigEphemeralStorage(rName string, ephemeralStorage int) string { + return acctest.ConfigCompose( + testAccCanaryConfig_base(rName), + fmt.Sprintf(` +resource "aws_synthetics_canary" "test" { + # Must have bucket versioning enabled first + depends_on = [aws_s3_bucket_versioning.test, aws_iam_role.test, aws_iam_role_policy.test] + + name = %[1]q + artifact_s3_location = "s3://${aws_s3_bucket.test.bucket}/" + execution_role_arn = aws_iam_role.test.arn + handler = "exports.handler" + zip_file = "test-fixtures/lambdatest.zip" + runtime_version = data.aws_synthetics_runtime_version.test.version_name + delete_lambda = true + + schedule { + expression = "rate(0 minute)" + } + run_config { + ephemeral_storage = %[2]d + } +} +`, rName, ephemeralStorage)) +} + +func testAccCanaryConfig_scheduleRetryConfig(rName string, maxRetries int) string { + return acctest.ConfigCompose( + testAccCanaryConfig_base(rName), + fmt.Sprintf(` +resource "aws_synthetics_canary" "test" { + # Must have bucket versioning enabled first + depends_on = [aws_s3_bucket_versioning.test, aws_iam_role.test, aws_iam_role_policy.test] + + name = %[1]q + artifact_s3_location = "s3://${aws_s3_bucket.test.bucket}/" + execution_role_arn = aws_iam_role.test.arn + handler = "exports.handler" + zip_file = "test-fixtures/lambdatest.zip" + runtime_version = data.aws_synthetics_runtime_version.test.version_name + delete_lambda = true + + schedule { + expression = "rate(0 minute)" + retry_config { + max_retries = %[2]d + } + } + run_config { + timeout_in_seconds = 60 + } +} +`, rName, maxRetries)) +} + func testAccCanaryConfig_tags1(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose( testAccCanaryConfig_base(rName), diff --git a/internal/service/synthetics/service_endpoint_resolver_gen.go b/internal/service/synthetics/service_endpoint_resolver_gen.go index c9f76544d5c2..c2c5bb5924ea 100644 --- a/internal/service/synthetics/service_endpoint_resolver_gen.go +++ b/internal/service/synthetics/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params synthetics.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up synthetics endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up synthetics endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/synthetics/service_endpoints_gen_test.go b/internal/service/synthetics/service_endpoints_gen_test.go index 646cf8a7fe16..a0e57a9d0e04 100644 --- a/internal/service/synthetics/service_endpoints_gen_test.go +++ b/internal/service/synthetics/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/synthetics/service_package_gen.go b/internal/service/synthetics/service_package_gen.go index a9ac74725bcd..37a22925e6fa 100644 --- a/internal/service/synthetics/service_package_gen.go +++ b/internal/service/synthetics/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/synthetics" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -95,7 +94,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *synthetics.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/synthetics/sweep.go b/internal/service/synthetics/sweep.go index d2d54aeeb26f..f40148acc021 100644 --- a/internal/service/synthetics/sweep.go +++ b/internal/service/synthetics/sweep.go @@ -31,7 +31,7 @@ func sweepCanaries(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.SyntheticsClient(ctx) diff --git a/internal/service/synthetics/tags_gen.go b/internal/service/synthetics/tags_gen.go index a96bd0775410..aaa857ee272b 100644 --- a/internal/service/synthetics/tags_gen.go +++ b/internal/service/synthetics/tags_gen.go @@ -3,8 +3,8 @@ package synthetics import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/synthetics" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *synthetics.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).SyntheticsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *synthetics.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *synthetics.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/taxsettings/service_endpoint_resolver_gen.go b/internal/service/taxsettings/service_endpoint_resolver_gen.go index ec3e69fc5394..5fd9a8163ea4 100644 --- a/internal/service/taxsettings/service_endpoint_resolver_gen.go +++ b/internal/service/taxsettings/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params taxsettings.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up taxsettings endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up taxsettings endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/taxsettings/service_endpoints_gen_test.go b/internal/service/taxsettings/service_endpoints_gen_test.go index 08c2bb5380fa..6d858cc22aa7 100644 --- a/internal/service/taxsettings/service_endpoints_gen_test.go +++ b/internal/service/taxsettings/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/taxsettings/service_package_gen.go b/internal/service/taxsettings/service_package_gen.go index ad2253cc3814..064115b70cd3 100644 --- a/internal/service/taxsettings/service_package_gen.go +++ b/internal/service/taxsettings/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/taxsettings" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *taxsettings.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/timestreaminfluxdb/db_cluster.go b/internal/service/timestreaminfluxdb/db_cluster.go new file mode 100644 index 000000000000..578f941e03d9 --- /dev/null +++ b/internal/service/timestreaminfluxdb/db_cluster.go @@ -0,0 +1,713 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb + +import ( + "context" + "errors" + "math" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int32validator" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_timestreaminfluxdb_db_cluster", name="DB Cluster") +// @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb;timestreaminfluxdb.GetDbClusterOutput") +// @Testing(importIgnore="bucket;username;organization;password") +// @Testing(existsTakesT=true) +// @Testing(destroyTakesT=true) +func newDBClusterResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &dbClusterResource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +const ( + ResNameDBCluster = "DB Cluster" +) + +type dbClusterResource struct { + framework.ResourceWithModel[dbClusterResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +func (r *dbClusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrAllocatedStorage: schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(20, 16384), + }, + Description: `The amount of storage to allocate for your DB storage type in GiB (gibibytes).`, + }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrBucket: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 64), + stringvalidator.RegexMatches( + regexache.MustCompile("^[^_][^\"]*$"), + "", + ), + }, + Description: `The name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. + A bucket combines the concept of a database and a retention period (the duration of time + that each data point persists). A bucket belongs to an organization.`, + }, + "db_instance_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.DbInstanceType](), + Required: true, + Description: `The Timestream for InfluxDB DB instance type to run InfluxDB on.`, + }, + "db_parameter_group_identifier": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIf( + dbClusterDBParameterGroupIdentifierReplaceIf, "Replace db_parameter_group_identifier diff", "Replace db_parameter_group_identifier diff", + ), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 64), + stringvalidator.RegexMatches( + regexache.MustCompile("^[a-zA-Z0-9]+$"), + "", + ), + }, + Description: `The ID of the DB parameter group to assign to your DB cluster. + DB parameter groups specify how the database is configured. For example, DB parameter groups + can specify the limit for query concurrency.`, + }, + "db_storage_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.DbStorageType](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: `The Timestream for InfluxDB DB storage type to read and write InfluxDB data. + You can choose between 3 different types of provisioned Influx IOPS included storage according + to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, + Influx IO Included 16000 IOPS.`, + }, + "deployment_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.ClusterDeploymentType](), + Optional: true, + Computed: true, + Default: stringdefault.StaticString(string(awstypes.ClusterDeploymentTypeMultiNodeReadReplicas)), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: `Specifies the type of cluster to create.`, + }, + names.AttrEndpoint: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: `The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.`, + }, + "failover_mode": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.FailoverMode](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: `Specifies the behavior of failure recovery when the primary node of the cluster + fails.`, + }, + names.AttrID: framework.IDAttribute(), + "influx_auth_parameters_secret_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: `The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the + initial InfluxDB authorization parameters. The secret value is a JSON formatted + key-value pair holding InfluxDB authorization values: organization, bucket, + username, and password.`, + }, + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 40), + stringvalidator.RegexMatches( + regexache.MustCompile("^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$"), + "", + ), + }, + Description: `The name that uniquely identifies the DB cluster when interacting with the + Amazon Timestream for InfluxDB API and CLI commands. This name will also be a + prefix included in the endpoint. DB cluster names must be unique per customer + and per region.`, + }, + "network_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.NetworkType](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: `Specifies whether the networkType of the Timestream for InfluxDB cluster is + IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate + over both IPv4 and IPv6 protocols.`, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "organization": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + Description: `The name of the initial organization for the initial admin user in InfluxDB. An + InfluxDB organization is a workspace for a group of users.`, + }, + names.AttrPassword: schema.StringAttribute{ + Required: true, + Sensitive: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(8, 64), + stringvalidator.RegexMatches(regexache.MustCompile("^[a-zA-Z0-9]+$"), ""), + }, + Description: `The password of the initial admin user created in InfluxDB. This password will + allow you to access the InfluxDB UI to perform various administrative tasks and + also use the InfluxDB CLI to create an operator token. These attributes will be + stored in a Secret created in AWS SecretManager in your account.`, + }, + names.AttrPort: schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.UseStateForUnknown(), + }, + Validators: []validator.Int32{ + int32validator.Between(1024, 65535), + int32validator.NoneOf(2375, 2376, 7788, 7789, 7790, 7791, 7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 8090, 51678, 51679, 51680), + }, + Description: `The port number on which InfluxDB accepts connections.`, + }, + names.AttrPubliclyAccessible: schema.BoolAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + boolplanmodifier.UseStateForUnknown(), + }, + Description: `Configures the Timestream for InfluxDB cluster with a public IP to facilitate access.`, + }, + "reader_endpoint": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: `The endpoint used to connect to the Timestream for InfluxDB cluster for + read-only operations.`, + }, + names.AttrUsername: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexache.MustCompile("^[a-zA-Z]([a-zA-Z0-9]*(-[a-zA-Z0-9]+)*)?$"), + `Must start with a letter and can't end with a hyphen or contain two + consecutive hyphens`, + ), + }, + Description: `The username of the initial admin user created in InfluxDB. + Must start with a letter and can't end with a hyphen or contain two + consecutive hyphens. For example, my-user1. This username will allow + you to access the InfluxDB UI to perform various administrative tasks + and also use the InfluxDB CLI to create an operator token. These + attributes will be stored in a Secret created in Amazon Secrets + Manager in your account.`, + }, + names.AttrVPCSecurityGroupIDs: schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + Required: true, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Validators: []validator.Set{ + setvalidator.SizeBetween(1, 5), + setvalidator.ValueStringsAre( + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches(regexache.MustCompile("^sg-[a-z0-9]+$"), ""), + ), + }, + Description: `A list of VPC security group IDs to associate with the Timestream for InfluxDB cluster.`, + }, + "vpc_subnet_ids": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + Required: true, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Validators: []validator.Set{ + setvalidator.SizeBetween(1, 3), + setvalidator.ValueStringsAre( + stringvalidator.LengthAtMost(64), + stringvalidator.RegexMatches(regexache.MustCompile("^subnet-[a-z0-9]+$"), ""), + ), + }, + Description: `A list of VPC subnet IDs to associate with the DB cluster. Provide at least + two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby.`, + }, + }, + Blocks: map[string]schema.Block{ + "log_delivery_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dbClusterLogDeliveryConfigurationData](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + Description: `Configuration for sending InfluxDB engine logs to a specified S3 bucket.`, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "s3_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dbClusterS3ConfigurationData](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrBucketName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 63), + stringvalidator.RegexMatches(regexache.MustCompile("^[0-9a-z]+[0-9a-z\\.\\-]*[0-9a-z]+$"), ""), + }, + Description: `The name of the S3 bucket to deliver logs to.`, + }, + names.AttrEnabled: schema.BoolAttribute{ + Required: true, + Description: `Indicates whether log delivery to the S3 bucket is enabled.`, + }, + }, + }, + Description: `Configuration for S3 bucket log delivery.`, + }, + }, + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func dbClusterDBParameterGroupIdentifierReplaceIf(ctx context.Context, req planmodifier.StringRequest, resp *stringplanmodifier.RequiresReplaceIfFuncResponse) { + if req.State.Raw.IsNull() || req.Plan.Raw.IsNull() { + return + } + var plan, state dbClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // If the DBParameterGroupIdentifier is being removed, the cluster must be recreated. + dbParameterGroupIdentifierRemoved := !state.DBParameterGroupIdentifier.IsNull() && plan.DBParameterGroupIdentifier.IsNull() + + resp.RequiresReplace = dbParameterGroupIdentifierRemoved +} + +func (r *dbClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var plan dbClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + input := timestreaminfluxdb.CreateDbClusterInput{} + resp.Diagnostics.Append(fwflex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + + input.Tags = getTagsIn(ctx) + + out, err := conn.CreateDbCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBCluster, plan.Name.String(), err), + err.Error(), + ) + return + } + + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBCluster, plan.Name.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + if out.DbClusterId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionCreating, ResNameDBCluster, plan.Name.String(), nil), + errors.New("received response with nil DbClusterId").Error(), + ) + return + } + + state := plan + state.ID = fwflex.StringToFramework(ctx, out.DbClusterId) + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + output, err := waitDBClusterCreated(ctx, conn, state.ID.ValueString(), createTimeout) + if err != nil { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), out.DbClusterId)...) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForCreation, ResNameDBCluster, plan.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(fwflex.Flatten(ctx, output, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *dbClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var state dbClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + output, err := findDBClusterByID(ctx, conn, state.ID.ValueString()) + if retry.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionReading, ResNameDBCluster, state.ID.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(fwflex.Flatten(ctx, output, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *dbClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var plan, state dbClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + diff, d := fwflex.Diff(ctx, plan, state) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + + if diff.HasChanges() { + input := timestreaminfluxdb.UpdateDbClusterInput{ + DbClusterId: plan.ID.ValueStringPointer(), + } + resp.Diagnostics.Append(fwflex.Expand(ctx, plan, &input, diff.IgnoredFieldNamesOpts()...)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateDbCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionUpdating, ResNameDBCluster, plan.ID.String(), err), + err.Error(), + ) + return + } + + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + output, err := waitDBClusterUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForUpdate, ResNameDBCluster, plan.ID.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(fwflex.Flatten(ctx, output, &plan)...) + if resp.Diagnostics.HasError() { + return + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *dbClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().TimestreamInfluxDBClient(ctx) + + var state dbClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + in := ×treaminfluxdb.DeleteDbClusterInput{ + DbClusterId: state.ID.ValueStringPointer(), + } + + _, err := conn.DeleteDbCluster(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionDeleting, ResNameDBCluster, state.ID.String(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitDBClusterDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.TimestreamInfluxDB, create.ErrActionWaitingForDeletion, ResNameDBCluster, state.ID.String(), err), + err.Error(), + ) + return + } +} + +func (r *dbClusterResource) ValidateConfig(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse) { + var allocatedStorage types.Int64 + resp.Diagnostics.Append(req.Config.GetAttribute(ctx, path.Root(names.AttrAllocatedStorage), &allocatedStorage)...) + if resp.Diagnostics.HasError() { + return + } + + if allocatedStorage.IsNull() || allocatedStorage.IsUnknown() { + return + } + + if allocatedStorage.ValueInt64() > math.MaxInt32 { + resp.Diagnostics.AddError( + "Invalid value for allocated_storage", + "allocated_storage was greater than the maximum allowed value for int32", + ) + return + } + + if allocatedStorage.ValueInt64() < math.MinInt32 { + resp.Diagnostics.AddError( + "Invalid value for allocated_storage", + "allocated_storage was less than the minimum allowed value for int32", + ) + return + } +} + +func waitDBClusterCreated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbClusterOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ClusterStatusCreating), + Target: enum.Slice(awstypes.ClusterStatusAvailable), + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*timestreaminfluxdb.GetDbClusterOutput); ok { + return out, err + } + + return nil, err +} + +func waitDBClusterUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbClusterOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(string(awstypes.ClusterStatusUpdating), string(awstypes.StatusUpdatingInstanceType)), + Target: enum.Slice(awstypes.ClusterStatusAvailable), + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*timestreaminfluxdb.GetDbClusterOutput); ok { + return out, err + } + + return nil, err +} + +func waitDBClusterDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, id string, timeout time.Duration) (*timestreaminfluxdb.GetDbClusterOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ClusterStatusDeleting, awstypes.ClusterStatusDeleted), + Target: []string{}, + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*timestreaminfluxdb.GetDbClusterOutput); ok { + return out, err + } + + return nil, err +} + +func statusDBCluster(conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { + out, err := findDBClusterByID(ctx, conn, id) + if retry.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + return out, string(out.Status), nil + } +} + +func findDBClusterByID(ctx context.Context, conn *timestreaminfluxdb.Client, id string) (*timestreaminfluxdb.GetDbClusterOutput, error) { + in := ×treaminfluxdb.GetDbClusterInput{ + DbClusterId: aws.String(id), + } + + out, err := conn.GetDbCluster(ctx, in) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + } + } + + if err != nil { + return nil, err + } + + if out == nil || out.Id == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +type dbClusterResourceModel struct { + framework.WithRegionModel + AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` + ARN types.String `tfsdk:"arn"` + Bucket types.String `tfsdk:"bucket"` + DBInstanceType fwtypes.StringEnum[awstypes.DbInstanceType] `tfsdk:"db_instance_type"` + DBParameterGroupIdentifier types.String `tfsdk:"db_parameter_group_identifier"` + DBStorageType fwtypes.StringEnum[awstypes.DbStorageType] `tfsdk:"db_storage_type"` + DeploymentType fwtypes.StringEnum[awstypes.ClusterDeploymentType] `tfsdk:"deployment_type"` + Endpoint types.String `tfsdk:"endpoint"` + FailoverMode fwtypes.StringEnum[awstypes.FailoverMode] `tfsdk:"failover_mode"` + ID types.String `tfsdk:"id"` + InfluxAuthParametersSecretARN types.String `tfsdk:"influx_auth_parameters_secret_arn"` + LogDeliveryConfiguration fwtypes.ListNestedObjectValueOf[dbClusterLogDeliveryConfigurationData] `tfsdk:"log_delivery_configuration"` + Name types.String `tfsdk:"name"` + NetworkType fwtypes.StringEnum[awstypes.NetworkType] `tfsdk:"network_type"` + Organization types.String `tfsdk:"organization"` + Password types.String `tfsdk:"password"` + Port types.Int32 `tfsdk:"port"` + PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` + ReaderEndpoint types.String `tfsdk:"reader_endpoint"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Username types.String `tfsdk:"username"` + VPCSecurityGroupIDs fwtypes.SetOfString `tfsdk:"vpc_security_group_ids"` + VPCSubnetIDs fwtypes.SetOfString `tfsdk:"vpc_subnet_ids"` +} + +type dbClusterLogDeliveryConfigurationData struct { + S3Configuration fwtypes.ListNestedObjectValueOf[dbClusterS3ConfigurationData] `tfsdk:"s3_configuration"` +} + +type dbClusterS3ConfigurationData struct { + BucketName types.String `tfsdk:"bucket_name"` + Enabled types.Bool `tfsdk:"enabled"` +} diff --git a/internal/service/timestreaminfluxdb/db_cluster_tags_gen_test.go b/internal/service/timestreaminfluxdb/db_cluster_tags_gen_test.go new file mode 100644 index 000000000000..3d73b909834d --- /dev/null +++ b/internal/service/timestreaminfluxdb/db_cluster_tags_gen_test.go @@ -0,0 +1,2364 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package timestreaminfluxdb_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccTimestreamInfluxDBDBCluster_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrBucket, names.AttrUsername, "organization", names.AttrPassword, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v timestreaminfluxdb.GetDbClusterOutput + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/DBCluster/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/timestreaminfluxdb/db_cluster_test.go b/internal/service/timestreaminfluxdb/db_cluster_test.go new file mode 100644 index 000000000000..4c93ba9a1b80 --- /dev/null +++ b/internal/service/timestreaminfluxdb/db_cluster_test.go @@ -0,0 +1,813 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timestreaminfluxdb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" + awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tftimestreaminfluxdb "github.com/hashicorp/terraform-provider-aws/internal/service/timestreaminfluxdb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccTimestreamInfluxDBDBCluster_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "timestream-influxdb", regexache.MustCompile(`db-cluster/.+$`)), + resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.ClusterDeploymentTypeMultiNodeReadReplicas)), + resource.TestCheckResourceAttr(resourceName, "failover_mode", string(awstypes.FailoverModeAutomatic)), + resource.TestCheckResourceAttrSet(resourceName, "influx_auth_parameters_secret_arn"), + resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeIpv4)), + resource.TestCheckResourceAttr(resourceName, names.AttrPort, "8086"), + resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtFalse), + resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreaminfluxdb.ResourceDBCluster, resourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_dbInstanceType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster1, dbCluster2 timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_dbInstanceType(rName, string(awstypes.DbInstanceTypeDbInfluxMedium)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", string(awstypes.DbInstanceTypeDbInfluxMedium)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + { + Config: testAccDBClusterConfig_dbInstanceType(rName, string(awstypes.DbInstanceTypeDbInfluxLarge)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster2), + resource.TestCheckResourceAttr(resourceName, "db_instance_type", string(awstypes.DbInstanceTypeDbInfluxLarge)), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_logDeliveryConfiguration(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster1, dbCluster2 timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_logDeliveryConfigurationEnabled(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.%", "2"), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.enabled", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + { + Config: testAccDBClusterConfig_logDeliveryConfigurationEnabled(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster2), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.%", "2"), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.enabled", acctest.CtFalse), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_networkType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_networkTypeIPV4(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeIpv4)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_port(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster1, dbCluster2 timestreaminfluxdb.GetDbClusterOutput + port1 := "8086" + port2 := "8087" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_port(rName, port1), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, names.AttrPort, port1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + { + Config: testAccDBClusterConfig_port(rName, port2), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster2), + resource.TestCheckResourceAttr(resourceName, names.AttrPort, port2), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_allocatedStorage(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + allocatedStorage := "20" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_allocatedStorage(rName, allocatedStorage), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, names.AttrAllocatedStorage, allocatedStorage), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_dbStorageType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_dbStorageType(rName, string(awstypes.DbStorageTypeInfluxIoIncludedT1)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_publiclyAccessible(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_publiclyAccessible(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEndpoint), + resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), + resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_deploymentType(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_deploymentType(rName, string(awstypes.ClusterDeploymentTypeMultiNodeReadReplicas)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.ClusterDeploymentTypeMultiNodeReadReplicas)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func TestAccTimestreamInfluxDBDBCluster_failoverMode(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster1, dbCluster2 timestreaminfluxdb.GetDbClusterOutput + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + resourceName := "aws_timestreaminfluxdb_db_cluster.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckDBClusters(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBClusterDestroy(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccDBClusterConfig_failoverMode(rName, string(awstypes.FailoverModeAutomatic)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, "failover_mode", string(awstypes.FailoverModeAutomatic)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + { + Config: testAccDBClusterConfig_failoverMode(rName, string(awstypes.FailoverModeNoFailover)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDBClusterExists(ctx, t, resourceName, &dbCluster2), + resource.TestCheckResourceAttr(resourceName, "failover_mode", string(awstypes.FailoverModeNoFailover)), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrBucket, names.AttrUsername, names.AttrPassword, "organization"}, + }, + }, + }) +} + +func testAccCheckDBClusterDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.ProviderMeta(ctx, t).TimestreamInfluxDBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_timestreaminfluxdb_db_cluster" { + continue + } + + _, err := tftimestreaminfluxdb.FindDBClusterByID(ctx, conn, rs.Primary.ID) + + if retry.NotFound(err) { + continue + } + + if err != nil { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDBCluster, rs.Primary.ID, err) + } + + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingDestroyed, tftimestreaminfluxdb.ResNameDBCluster, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckDBClusterExists(ctx context.Context, t *testing.T, name string, dbCluster *timestreaminfluxdb.GetDbClusterOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBCluster, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBCluster, name, errors.New("not set")) + } + + conn := acctest.ProviderMeta(ctx, t).TimestreamInfluxDBClient(ctx) + resp, err := tftimestreaminfluxdb.FindDBClusterByID(ctx, conn, rs.Primary.ID) + + if err != nil { + return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBCluster, rs.Primary.ID, err) + } + + *dbCluster = *resp + + return nil + } +} + +func testAccPreCheckDBClusters(ctx context.Context, t *testing.T) { + conn := acctest.ProviderMeta(ctx, t).TimestreamInfluxDBClient(ctx) + + input := ×treaminfluxdb.ListDbClustersInput{} + _, err := conn.ListDbClusters(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccDBClusterConfig_base(rName string, subnetCount int) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, subnetCount), ` +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} +`) +} + +// Minimal configuration. +func testAccDBClusterConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + port = 8086 + bucket = "initial" + organization = "organization" +} +`, rName)) +} + +func testAccDBClusterConfig_dbInstanceType(rName string, instanceType string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = %[2]q + port = 8086 + bucket = "initial" + organization = "organization" +} +`, rName, instanceType)) +} + +// Configuration with log_delivery_configuration set and enabled. +func testAccDBClusterConfig_logDeliveryConfigurationEnabled(rName string, enabled bool) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.test.json +} + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + publicly_accessible = false + port = 8086 + bucket = "initial" + organization = "organization" + + log_delivery_configuration { + s3_configuration { + bucket_name = aws_s3_bucket.test.bucket + enabled = %[2]t + } + } +} +`, rName, enabled)) +} + +func testAccDBClusterConfig_publiclyAccessible(rName string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_route" "test" { + route_table_id = aws_vpc.test.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} + +resource "aws_route_table_association" "test" { + subnet_id = aws_subnet.test[0].id + route_table_id = aws_vpc.test.main_route_table_id +} + +resource "aws_vpc_security_group_ingress_rule" "test" { + security_group_id = aws_security_group.test.id + referenced_security_group_id = aws_security_group.test.id + ip_protocol = -1 +} + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + publicly_accessible = true +} +`, rName)) +} + +func testAccDBClusterConfig_deploymentType(rName string, deploymentType string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + db_storage_type = "InfluxIOIncludedT1" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + deployment_type = %[2]q +} +`, rName, deploymentType)) +} + +func testAccDBClusterConfig_networkTypeIPV4(rName string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + port = 8086 + bucket = "initial" + organization = "organization" + + network_type = "IPV4" +} +`, rName)) +} + +func testAccDBClusterConfig_port(rName string, port string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + port = %[2]s +} +`, rName, port)) +} + +func testAccDBClusterConfig_allocatedStorage(rName string, storageAmount string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + allocated_storage = %[2]s +} +`, rName, storageAmount)) +} + +func testAccDBClusterConfig_dbStorageType(rName string, dbStorageType string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 400 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + db_storage_type = %[2]q +} +`, rName, dbStorageType)) +} + +func testAccDBClusterConfig_failoverMode(rName string, failoverMode string) string { + return acctest.ConfigCompose(testAccDBClusterConfig_base(rName, 2), fmt.Sprintf(` +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = %[1]q + allocated_storage = 400 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + failover_mode = %[2]q +} +`, rName, failoverMode)) +} diff --git a/internal/service/timestreaminfluxdb/db_instance.go b/internal/service/timestreaminfluxdb/db_instance.go index 185b45cace9a..6e1e36ebfe3d 100644 --- a/internal/service/timestreaminfluxdb/db_instance.go +++ b/internal/service/timestreaminfluxdb/db_instance.go @@ -29,7 +29,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -37,6 +36,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -46,6 +46,8 @@ import ( // @Tags(identifierAttribute="arn") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb;timestreaminfluxdb.GetDbInstanceOutput") // @Testing(importIgnore="bucket;username;organization;password") +// @Testing(existsTakesT=true) +// @Testing(destroyTakesT=true) func newDBInstanceResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &dbInstanceResource{} @@ -109,7 +111,7 @@ func (r *dbInstanceResource) Schema(ctx context.Context, req resource.SchemaRequ Optional: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplaceIf( - dbParameterGroupIdentifierReplaceIf, "Replace db_parameter_group_identifier diff", "Replace db_parameter_group_identifier diff", + dbInstanceDBParameterGroupIdentifierReplaceIf, "Replace db_parameter_group_identifier diff", "Replace db_parameter_group_identifier diff", ), }, Validators: []validator.String{ @@ -183,7 +185,7 @@ func (r *dbInstanceResource) Schema(ctx context.Context, req resource.SchemaRequ Optional: true, Computed: true, PlanModifiers: []planmodifier.String{ - stringplanmodifier.RequiresReplace(), + stringplanmodifier.RequiresReplaceIfConfigured(), stringplanmodifier.UseStateForUnknown(), }, Description: `Specifies whether the networkType of the Timestream for InfluxDB instance is @@ -298,7 +300,7 @@ func (r *dbInstanceResource) Schema(ctx context.Context, req resource.SchemaRequ }, Blocks: map[string]schema.Block{ "log_delivery_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[logDeliveryConfigurationData](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[dbInstanceLogDeliveryConfigurationData](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -306,7 +308,7 @@ func (r *dbInstanceResource) Schema(ctx context.Context, req resource.SchemaRequ NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ "s3_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[s3ConfigurationData](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[dbInstanceS3ConfigurationData](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -340,7 +342,7 @@ func (r *dbInstanceResource) Schema(ctx context.Context, req resource.SchemaRequ } } -func dbParameterGroupIdentifierReplaceIf(ctx context.Context, req planmodifier.StringRequest, resp *stringplanmodifier.RequiresReplaceIfFuncResponse) { +func dbInstanceDBParameterGroupIdentifierReplaceIf(ctx context.Context, req planmodifier.StringRequest, resp *stringplanmodifier.RequiresReplaceIfFuncResponse) { if req.State.Raw.IsNull() || req.Plan.Raw.IsNull() { return } @@ -425,7 +427,7 @@ func (r *dbInstanceResource) Read(ctx context.Context, req resource.ReadRequest, } output, err := findDBInstanceByID(ctx, conn, state.ID.ValueString()) - if tfresource.NotFound(err) { + if retry.NotFound(err) { resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) resp.State.RemoveResource(ctx) return @@ -460,7 +462,7 @@ func (r *dbInstanceResource) Update(ctx context.Context, req resource.UpdateRequ return } - diff, d := fwflex.Diff(ctx, plan, state) + diff, d := fwflex.Diff(ctx, plan, state, fwflex.WithIgnoredField("SecondaryAvailabilityZone")) resp.Diagnostics.Append(d...) if resp.Diagnostics.HasError() { return @@ -501,6 +503,8 @@ func (r *dbInstanceResource) Update(ctx context.Context, req resource.UpdateRequ plan.SecondaryAvailabilityZone = fwflex.StringToFrameworkLegacy(ctx, output.SecondaryAvailabilityZone) } else { + plan.NetworkType = state.NetworkType + plan.Port = state.Port plan.SecondaryAvailabilityZone = state.SecondaryAvailabilityZone } @@ -575,7 +579,7 @@ func waitDBInstanceCreated(ctx context.Context, conn *timestreaminfluxdb.Client, stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.StatusCreating), Target: enum.Slice(awstypes.StatusAvailable), - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -593,7 +597,7 @@ func waitDBInstanceUpdated(ctx context.Context, conn *timestreaminfluxdb.Client, stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.StatusModifying, awstypes.StatusUpdating, awstypes.StatusUpdatingInstanceType, awstypes.StatusUpdatingDeploymentType), Target: enum.Slice(awstypes.StatusAvailable), - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -611,7 +615,7 @@ func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.StatusDeleting, awstypes.StatusDeleted), Target: []string{}, - Refresh: statusDBInstance(ctx, conn, id), + Refresh: statusDBInstance(conn, id), Timeout: timeout, Delay: 30 * time.Second, } @@ -624,10 +628,10 @@ func waitDBInstanceDeleted(ctx context.Context, conn *timestreaminfluxdb.Client, return nil, err } -func statusDBInstance(ctx context.Context, conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusDBInstance(conn *timestreaminfluxdb.Client, id string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { out, err := findDBInstanceByID(ctx, conn, id) - if tfresource.NotFound(err) { + if retry.NotFound(err) { return nil, "", nil } @@ -647,8 +651,7 @@ func findDBInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, + LastError: err, } } @@ -665,38 +668,38 @@ func findDBInstanceByID(ctx context.Context, conn *timestreaminfluxdb.Client, id type dbInstanceResourceModel struct { framework.WithRegionModel - AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` - ARN types.String `tfsdk:"arn"` - AvailabilityZone types.String `tfsdk:"availability_zone"` - Bucket types.String `tfsdk:"bucket"` - DBInstanceType fwtypes.StringEnum[awstypes.DbInstanceType] `tfsdk:"db_instance_type"` - DBParameterGroupIdentifier types.String `tfsdk:"db_parameter_group_identifier"` - DBStorageType fwtypes.StringEnum[awstypes.DbStorageType] `tfsdk:"db_storage_type"` - DeploymentType fwtypes.StringEnum[awstypes.DeploymentType] `tfsdk:"deployment_type"` - Endpoint types.String `tfsdk:"endpoint"` - ID types.String `tfsdk:"id"` - InfluxAuthParametersSecretARN types.String `tfsdk:"influx_auth_parameters_secret_arn"` - LogDeliveryConfiguration fwtypes.ListNestedObjectValueOf[logDeliveryConfigurationData] `tfsdk:"log_delivery_configuration"` - Name types.String `tfsdk:"name"` - NetworkType fwtypes.StringEnum[awstypes.NetworkType] `tfsdk:"network_type"` - Organization types.String `tfsdk:"organization"` - Password types.String `tfsdk:"password"` - Port types.Int32 `tfsdk:"port"` - PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` - SecondaryAvailabilityZone types.String `tfsdk:"secondary_availability_zone"` - Tags tftags.Map `tfsdk:"tags"` - TagsAll tftags.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` - Username types.String `tfsdk:"username"` - VPCSecurityGroupIDs fwtypes.SetOfString `tfsdk:"vpc_security_group_ids"` - VPCSubnetIDs fwtypes.SetOfString `tfsdk:"vpc_subnet_ids"` + AllocatedStorage types.Int64 `tfsdk:"allocated_storage"` + ARN types.String `tfsdk:"arn"` + AvailabilityZone types.String `tfsdk:"availability_zone"` + Bucket types.String `tfsdk:"bucket"` + DBInstanceType fwtypes.StringEnum[awstypes.DbInstanceType] `tfsdk:"db_instance_type"` + DBParameterGroupIdentifier types.String `tfsdk:"db_parameter_group_identifier"` + DBStorageType fwtypes.StringEnum[awstypes.DbStorageType] `tfsdk:"db_storage_type"` + DeploymentType fwtypes.StringEnum[awstypes.DeploymentType] `tfsdk:"deployment_type"` + Endpoint types.String `tfsdk:"endpoint"` + ID types.String `tfsdk:"id"` + InfluxAuthParametersSecretARN types.String `tfsdk:"influx_auth_parameters_secret_arn"` + LogDeliveryConfiguration fwtypes.ListNestedObjectValueOf[dbInstanceLogDeliveryConfigurationData] `tfsdk:"log_delivery_configuration"` + Name types.String `tfsdk:"name"` + NetworkType fwtypes.StringEnum[awstypes.NetworkType] `tfsdk:"network_type"` + Organization types.String `tfsdk:"organization"` + Password types.String `tfsdk:"password"` + Port types.Int32 `tfsdk:"port"` + PubliclyAccessible types.Bool `tfsdk:"publicly_accessible"` + SecondaryAvailabilityZone types.String `tfsdk:"secondary_availability_zone"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Username types.String `tfsdk:"username"` + VPCSecurityGroupIDs fwtypes.SetOfString `tfsdk:"vpc_security_group_ids"` + VPCSubnetIDs fwtypes.SetOfString `tfsdk:"vpc_subnet_ids"` } -type logDeliveryConfigurationData struct { - S3Configuration fwtypes.ListNestedObjectValueOf[s3ConfigurationData] `tfsdk:"s3_configuration"` +type dbInstanceLogDeliveryConfigurationData struct { + S3Configuration fwtypes.ListNestedObjectValueOf[dbInstanceS3ConfigurationData] `tfsdk:"s3_configuration"` } -type s3ConfigurationData struct { +type dbInstanceS3ConfigurationData struct { BucketName types.String `tfsdk:"bucket_name"` Enabled types.Bool `tfsdk:"enabled"` } diff --git a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go index 5633e6ca9e95..2857471b769b 100644 --- a/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,14 +18,15 @@ import ( func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -38,7 +38,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -85,7 +85,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -136,7 +136,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -180,7 +180,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -213,14 +213,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags(t *testing.T) { func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -232,7 +233,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -276,14 +277,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_null(t *testing.T) { func TestAccTimestreamInfluxDBDBInstance_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -293,7 +295,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyMap(t *testing.T) { acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), @@ -327,14 +329,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyMap(t *testing.T) { func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -344,7 +347,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -367,7 +370,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -410,14 +413,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_AddOnUpdate(t *testing.T) { func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -429,7 +433,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -473,7 +477,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -506,14 +510,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -525,7 +530,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -557,7 +562,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -608,7 +613,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -651,14 +656,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Add(t *testing.T func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -670,7 +676,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testi }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -701,7 +707,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testi }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -744,14 +750,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_EmptyTag_OnUpdate_Replace(t *testi func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -764,7 +771,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -811,7 +818,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -860,7 +867,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -903,7 +910,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -937,14 +944,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_providerOnly(t *testin func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -959,7 +967,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1016,7 +1024,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1072,7 +1080,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -1106,14 +1114,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nonOverlapping(t *test func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1128,7 +1137,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1184,7 +1193,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1244,7 +1253,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1291,14 +1300,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_overlapping(t *testing func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1310,7 +1320,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1343,7 +1353,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -1384,14 +1394,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToProviderOnly(t func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1404,7 +1415,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -1432,7 +1443,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1476,14 +1487,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_updateToResourceOnly(t func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1498,7 +1510,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *te }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1545,14 +1557,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyResourceTag(t *te func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1565,7 +1578,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t acctest.CtResourceTags: nil, }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), @@ -1606,14 +1619,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_emptyProviderOnlyTag(t func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1628,7 +1642,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1676,14 +1690,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullOverlappingResourc func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1698,7 +1713,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1748,14 +1763,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_DefaultTags_nullNonOverlappingReso func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1765,7 +1781,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) "unknownTagKey": config.StringVariable("computedkey1"), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), ), ConfigStateChecks: []statecheck.StateCheck{ @@ -1806,14 +1822,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnCreate(t *testing.T) func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1825,7 +1842,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testin }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1857,7 +1874,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testin "knownTagValue": config.StringVariable(acctest.CtValue1), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), ), ConfigStateChecks: []statecheck.StateCheck{ @@ -1906,14 +1923,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Add(t *testin func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1925,7 +1943,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *te }), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -1955,7 +1973,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *te "unknownTagKey": config.StringVariable(acctest.CtKey1), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), ), ConfigStateChecks: []statecheck.StateCheck{ @@ -1996,14 +2014,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_ComputedTag_OnUpdate_Replace(t *te func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ // 1: Create { @@ -2022,7 +2041,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *t ), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -2071,7 +2090,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *t ), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -2120,7 +2139,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *t ), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -2158,14 +2177,15 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v timestreaminfluxdb.GetDbInstanceOutput resourceName := "aws_timestreaminfluxdb_db_instance.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ // 1: Create { @@ -2182,7 +2202,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_ResourceTag(t * ), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -2240,7 +2260,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_ResourceTag(t * ), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ @@ -2297,7 +2317,7 @@ func TestAccTimestreamInfluxDBDBInstance_tags_IgnoreTags_Overlap_ResourceTag(t * ), }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckDBInstanceExists(ctx, t, resourceName, &v), ), ConfigStateChecks: []statecheck.StateCheck{ statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ diff --git a/internal/service/timestreaminfluxdb/db_instance_test.go b/internal/service/timestreaminfluxdb/db_instance_test.go index 22c413b3a2db..a970a68e8eef 100644 --- a/internal/service/timestreaminfluxdb/db_instance_test.go +++ b/internal/service/timestreaminfluxdb/db_instance_test.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" awstypes "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -22,10 +21,9 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" - "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftimestreaminfluxdb "github.com/hashicorp/terraform-provider-aws/internal/service/timestreaminfluxdb" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -36,22 +34,22 @@ func TestAccTimestreamInfluxDBDBInstance_basic(t *testing.T) { } var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "timestream-influxdb", regexache.MustCompile(`db-instance/.+$`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrAvailabilityZone), resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), @@ -79,22 +77,22 @@ func TestAccTimestreamInfluxDBDBInstance_disappears(t *testing.T) { } var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreaminfluxdb.ResourceDBInstance, resourceName), ), ExpectNonEmptyPlan: true, @@ -110,22 +108,22 @@ func TestAccTimestreamInfluxDBDBInstance_dbInstanceType(t *testing.T) { } var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_dbInstanceType(rName, string(awstypes.DbInstanceTypeDbInfluxMedium)), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, "db_instance_type", string(awstypes.DbInstanceTypeDbInfluxMedium)), ), }, @@ -138,7 +136,7 @@ func TestAccTimestreamInfluxDBDBInstance_dbInstanceType(t *testing.T) { { Config: testAccDBInstanceConfig_dbInstanceType(rName, string(awstypes.DbInstanceTypeDbInfluxLarge)), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), resource.TestCheckResourceAttr(resourceName, "db_instance_type", string(awstypes.DbInstanceTypeDbInfluxLarge)), ), @@ -160,22 +158,22 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) } var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.%", "2"), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.bucket_name", rName), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.enabled", acctest.CtTrue), @@ -190,7 +188,7 @@ func TestAccTimestreamInfluxDBDBInstance_logDeliveryConfiguration(t *testing.T) { Config: testAccDBInstanceConfig_logDeliveryConfigurationEnabled(rName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.%", "2"), resource.TestCheckResourceAttr(resourceName, "log_delivery_configuration.0.s3_configuration.0.bucket_name", rName), @@ -214,22 +212,22 @@ func TestAccTimestreamInfluxDBDBInstance_networkType(t *testing.T) { } var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_networkTypeIPV4(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeIpv4)), ), }, @@ -242,7 +240,7 @@ func TestAccTimestreamInfluxDBDBInstance_networkType(t *testing.T) { { Config: testAccDBInstanceConfig_networkTypeDual(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), resource.TestCheckResourceAttr(resourceName, "network_type", string(awstypes.NetworkTypeDual)), ), }, @@ -265,22 +263,22 @@ func TestAccTimestreamInfluxDBDBInstance_port(t *testing.T) { var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput port1 := "8086" port2 := "8087" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_port(rName, port1), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, names.AttrPort, port1), ), }, @@ -293,7 +291,7 @@ func TestAccTimestreamInfluxDBDBInstance_port(t *testing.T) { { Config: testAccDBInstanceConfig_port(rName, port2), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), resource.TestCheckResourceAttr(resourceName, names.AttrPort, port2), ), @@ -317,22 +315,22 @@ func TestAccTimestreamInfluxDBDBInstance_allocatedStorage(t *testing.T) { var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput allocatedStorage1 := "20" allocatedStorage2 := "40" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_allocatedStorage(rName, allocatedStorage1), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, names.AttrAllocatedStorage, allocatedStorage1), ), }, @@ -345,7 +343,7 @@ func TestAccTimestreamInfluxDBDBInstance_allocatedStorage(t *testing.T) { { Config: testAccDBInstanceConfig_allocatedStorage(rName, allocatedStorage2), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), resource.TestCheckResourceAttr(resourceName, names.AttrAllocatedStorage, allocatedStorage2), ), @@ -367,22 +365,22 @@ func TestAccTimestreamInfluxDBDBInstance_dbStorageType(t *testing.T) { } var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_dbStorageType(rName, string(awstypes.DbStorageTypeInfluxIoIncludedT1)), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT1)), ), }, @@ -395,7 +393,7 @@ func TestAccTimestreamInfluxDBDBInstance_dbStorageType(t *testing.T) { { Config: testAccDBInstanceConfig_dbStorageType(rName, string(awstypes.DbStorageTypeInfluxIoIncludedT2)), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), resource.TestCheckResourceAttr(resourceName, "db_storage_type", string(awstypes.DbStorageTypeInfluxIoIncludedT2)), ), @@ -417,22 +415,22 @@ func TestAccTimestreamInfluxDBDBInstance_publiclyAccessible(t *testing.T) { } var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_publiclyAccessible(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance), resource.TestCheckResourceAttrSet(resourceName, names.AttrEndpoint), resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtTrue), ), @@ -454,22 +452,22 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentType(t *testing.T) { } var dbInstance1, dbInstance2 timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), Steps: []resource.TestStep{ { Config: testAccDBInstanceConfig_deploymentType(rName, string(awstypes.DeploymentTypeWithMultiazStandby)), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance1), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance1), // DB instance will not be publicly accessible and will not have an endpoint. // DB instance will have a secondary availability zone. resource.TestCheckResourceAttrSet(resourceName, "secondary_availability_zone"), @@ -485,7 +483,7 @@ func TestAccTimestreamInfluxDBDBInstance_deploymentType(t *testing.T) { { Config: testAccDBInstanceConfig_deploymentType(rName, string(awstypes.DeploymentTypeSingleAz)), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance2), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance2), testAccCheckDBInstanceNotRecreated(&dbInstance1, &dbInstance2), resource.TestCheckResourceAttr(resourceName, "secondary_availability_zone", ""), resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.DeploymentTypeSingleAz)), @@ -509,16 +507,21 @@ func TestAccTimestreamInfluxDBDBInstance_upgradeV5_90_0(t *testing.T) { } var dbInstance timestreaminfluxdb.GetDbInstanceOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreaminfluxdb_db_instance.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheck(ctx, t) + testAccPreCheckDBInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamInfluxDBServiceID), - CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx, t), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, Steps: []resource.TestStep{ { ExternalProviders: map[string]resource.ExternalProvider{ @@ -529,7 +532,7 @@ func TestAccTimestreamInfluxDBDBInstance_upgradeV5_90_0(t *testing.T) { }, Config: testAccDBInstanceConfig_basicV5_90_0(rName, rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -543,35 +546,11 @@ func TestAccTimestreamInfluxDBDBInstance_upgradeV5_90_0(t *testing.T) { })), }, }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccDBInstanceConfig_basicV5_90_0(rName, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPreRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_type"), knownvalue.StringExact(string(awstypes.NetworkTypeIpv4))), - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ - "Name": knownvalue.StringExact(rName), - })), - }, - }, { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Config: testAccDBInstanceConfig_basicV5_90_0(rName, rName+"-updated"), Check: resource.ComposeTestCheckFunc( - testAccCheckDBInstanceExists(ctx, resourceName, &dbInstance), + testAccCheckDBInstanceExists(ctx, t, resourceName, &dbInstance), ), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ @@ -585,7 +564,7 @@ func TestAccTimestreamInfluxDBDBInstance_upgradeV5_90_0(t *testing.T) { }, }, ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_type"), knownvalue.StringExact(string(awstypes.NetworkTypeIpv4))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("network_type"), knownvalue.Null()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ "Name": knownvalue.StringExact(rName + "-updated"), })), @@ -595,9 +574,9 @@ func TestAccTimestreamInfluxDBDBInstance_upgradeV5_90_0(t *testing.T) { }) } -func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckDBInstanceDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) + conn := acctest.ProviderMeta(ctx, t).TimestreamInfluxDBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_timestreaminfluxdb_db_instance" { @@ -606,7 +585,7 @@ func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { _, err := tftimestreaminfluxdb.FindDBInstanceByID(ctx, conn, rs.Primary.ID) - if tfresource.NotFound(err) { + if retry.NotFound(err) { continue } @@ -621,7 +600,7 @@ func testAccCheckDBInstanceDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckDBInstanceExists(ctx context.Context, name string, dbInstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { +func testAccCheckDBInstanceExists(ctx context.Context, t *testing.T, name string, dbInstance *timestreaminfluxdb.GetDbInstanceOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -632,7 +611,7 @@ func testAccCheckDBInstanceExists(ctx context.Context, name string, dbInstance * return create.Error(names.TimestreamInfluxDB, create.ErrActionCheckingExistence, tftimestreaminfluxdb.ResNameDBInstance, name, errors.New("not set")) } - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) + conn := acctest.ProviderMeta(ctx, t).TimestreamInfluxDBClient(ctx) resp, err := tftimestreaminfluxdb.FindDBInstanceByID(ctx, conn, rs.Primary.ID) if err != nil { @@ -645,8 +624,8 @@ func testAccCheckDBInstanceExists(ctx context.Context, name string, dbInstance * } } -func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamInfluxDBClient(ctx) +func testAccPreCheckDBInstances(ctx context.Context, t *testing.T) { + conn := acctest.ProviderMeta(ctx, t).TimestreamInfluxDBClient(ctx) input := ×treaminfluxdb.ListDbInstancesInput{} _, err := conn.ListDbInstances(ctx, input) diff --git a/internal/service/timestreaminfluxdb/exports_test.go b/internal/service/timestreaminfluxdb/exports_test.go index e6e2d702637c..3a0de223d589 100644 --- a/internal/service/timestreaminfluxdb/exports_test.go +++ b/internal/service/timestreaminfluxdb/exports_test.go @@ -5,7 +5,9 @@ package timestreaminfluxdb // Exports for use in tests only. var ( + ResourceDBCluster = newDBClusterResource ResourceDBInstance = newDBInstanceResource + FindDBClusterByID = findDBClusterByID FindDBInstanceByID = findDBInstanceByID ) diff --git a/internal/service/timestreaminfluxdb/service_endpoint_resolver_gen.go b/internal/service/timestreaminfluxdb/service_endpoint_resolver_gen.go index 6e6d8616c872..1327183e38bd 100644 --- a/internal/service/timestreaminfluxdb/service_endpoint_resolver_gen.go +++ b/internal/service/timestreaminfluxdb/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params timestreaminflux }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up timestreaminfluxdb endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up timestreaminfluxdb endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/timestreaminfluxdb/service_endpoints_gen_test.go b/internal/service/timestreaminfluxdb/service_endpoints_gen_test.go index a0c0877814a0..c46716b1bb4a 100644 --- a/internal/service/timestreaminfluxdb/service_endpoints_gen_test.go +++ b/internal/service/timestreaminfluxdb/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/timestreaminfluxdb/service_package_gen.go b/internal/service/timestreaminfluxdb/service_package_gen.go index d216dc6a6f78..07887fbf777a 100644 --- a/internal/service/timestreaminfluxdb/service_package_gen.go +++ b/internal/service/timestreaminfluxdb/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -24,6 +23,15 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newDBClusterResource, + TypeName: "aws_timestreaminfluxdb_db_cluster", + Name: "DB Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newDBInstanceResource, TypeName: "aws_timestreaminfluxdb_db_instance", @@ -67,7 +75,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *timestreaminfluxdb.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/timestreaminfluxdb/sweep.go b/internal/service/timestreaminfluxdb/sweep.go index ec77b6bc762d..2873c45b155e 100644 --- a/internal/service/timestreaminfluxdb/sweep.go +++ b/internal/service/timestreaminfluxdb/sweep.go @@ -16,9 +16,33 @@ import ( ) func RegisterSweepers() { + awsv2.Register("aws_timestreaminfluxdb_db_cluster", sweepDBClusters) awsv2.Register("aws_timestreaminfluxdb_db_instance", sweepDBInstances) } +func sweepDBClusters(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.TimestreamInfluxDBClient(ctx) + var input timestreaminfluxdb.ListDbClustersInput + sweepResources := make([]sweep.Sweepable, 0) + + pages := timestreaminfluxdb.NewListDbClustersPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.Items { + sweepResources = append(sweepResources, framework.NewSweepResource(newDBClusterResource, client, + framework.NewAttribute(names.AttrID, aws.ToString(v.Id)), + )) + } + } + + return sweepResources, nil +} + func sweepDBInstances(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { conn := client.TimestreamInfluxDBClient(ctx) var input timestreaminfluxdb.ListDbInstancesInput diff --git a/internal/service/timestreaminfluxdb/tags_gen.go b/internal/service/timestreaminfluxdb/tags_gen.go index 1f6a82d58d0e..c434c1aa9ab5 100644 --- a/internal/service/timestreaminfluxdb/tags_gen.go +++ b/internal/service/timestreaminfluxdb/tags_gen.go @@ -3,8 +3,8 @@ package timestreaminfluxdb import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *timestreaminfluxdb.Client, identifier s output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).TimestreamInfluxDBClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *timestreaminfluxdb.Client, identifier _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *timestreaminfluxdb.Client, identifier _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/timestreaminfluxdb/testdata/DBCluster/tags/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBCluster/tags/main_gen.tf new file mode 100644 index 000000000000..8131c416ab85 --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBCluster/tags/main_gen.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + failover_mode = "AUTOMATIC" + + tags = var.resource_tags +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBCluster/tagsComputed1/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBCluster/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..d0e8aaf2498f --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBCluster/tagsComputed1/main_gen.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + failover_mode = "AUTOMATIC" + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBCluster/tagsComputed2/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBCluster/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..f5ce67b05544 --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBCluster/tagsComputed2/main_gen.tf @@ -0,0 +1,79 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + failover_mode = "AUTOMATIC" + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBCluster/tags_defaults/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBCluster/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..c625761a9717 --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBCluster/tags_defaults/main_gen.tf @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + failover_mode = "AUTOMATIC" + + tags = var.resource_tags +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBCluster/tags_ignore/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBCluster/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..7776c2d666af --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/DBCluster/tags_ignore/main_gen.tf @@ -0,0 +1,84 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + failover_mode = "AUTOMATIC" + + tags = var.resource_tags +} + +# acctest.ConfigVPCWithSubnets(rName, 2) + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf index a933a60a994c..ecff113c73d2 100644 --- a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags/main_gen.tf @@ -1,12 +1,38 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = var.resource_tags +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + data "aws_availability_zones" "available" { - exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + exclude_zone_ids = local.default_exclude_zone_ids state = "available" filter { @@ -15,31 +41,14 @@ data "aws_availability_zones" "available" { } } -resource "aws_subnet" "test" { - count = 1 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id } -resource "aws_timestreaminfluxdb_db_instance" "test" { - name = var.rName - allocated_storage = 20 - username = "admin" - password = "testpassword" - vpc_subnet_ids = aws_subnet.test[*].id - vpc_security_group_ids = [aws_security_group.test.id] - db_instance_type = "db.influx.medium" - bucket = "initial" - organization = "organization" - - tags = var.resource_tags -} variable "rName" { description = "Name for resource" type = string diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf index 729d296dafe4..7e1d53008bfa 100644 --- a/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed1/main_gen.tf @@ -3,12 +3,40 @@ provider "null" {} +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + data "aws_availability_zones" "available" { - exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + exclude_zone_ids = local.default_exclude_zone_ids state = "available" filter { @@ -17,33 +45,14 @@ data "aws_availability_zones" "available" { } } -resource "aws_subnet" "test" { - count = 1 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id } -resource "aws_timestreaminfluxdb_db_instance" "test" { - name = var.rName - allocated_storage = 20 - username = "admin" - password = "testpassword" - vpc_subnet_ids = aws_subnet.test[*].id - vpc_security_group_ids = [aws_security_group.test.id] - db_instance_type = "db.influx.medium" - bucket = "initial" - organization = "organization" - - tags = { - (var.unknownTagKey) = null_resource.test.id - } -} resource "null_resource" "test" {} variable "rName" { diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf index 31bcbc6eb1c5..d8ef9a7594bc 100644 --- a/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tagsComputed2/main_gen.tf @@ -3,12 +3,41 @@ provider "null" {} +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + data "aws_availability_zones" "available" { - exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + exclude_zone_ids = local.default_exclude_zone_ids state = "available" filter { @@ -17,34 +46,14 @@ data "aws_availability_zones" "available" { } } -resource "aws_subnet" "test" { - count = 1 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id } -resource "aws_timestreaminfluxdb_db_instance" "test" { - name = var.rName - allocated_storage = 20 - username = "admin" - password = "testpassword" - vpc_subnet_ids = aws_subnet.test[*].id - vpc_security_group_ids = [aws_security_group.test.id] - db_instance_type = "db.influx.medium" - bucket = "initial" - organization = "organization" - - tags = { - (var.unknownTagKey) = null_resource.test.id - (var.knownTagKey) = var.knownTagValue - } -} resource "null_resource" "test" {} variable "rName" { diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf index 015c46bd72f2..26d17732378b 100644 --- a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_defaults/main_gen.tf @@ -7,12 +7,38 @@ provider "aws" { } } +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = var.resource_tags +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + data "aws_availability_zones" "available" { - exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + exclude_zone_ids = local.default_exclude_zone_ids state = "available" filter { @@ -21,31 +47,14 @@ data "aws_availability_zones" "available" { } } -resource "aws_subnet" "test" { - count = 1 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id } -resource "aws_timestreaminfluxdb_db_instance" "test" { - name = var.rName - allocated_storage = 20 - username = "admin" - password = "testpassword" - vpc_subnet_ids = aws_subnet.test[*].id - vpc_security_group_ids = [aws_security_group.test.id] - db_instance_type = "db.influx.medium" - bucket = "initial" - organization = "organization" - - tags = var.resource_tags -} variable "rName" { description = "Name for resource" type = string diff --git a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf index 579f735e71d7..bee3f86058f8 100644 --- a/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf +++ b/internal/service/timestreaminfluxdb/testdata/DBInstance/tags_ignore/main_gen.tf @@ -10,12 +10,38 @@ provider "aws" { } } +resource "aws_timestreaminfluxdb_db_instance" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + + tags = var.resource_tags +} + +# acctest.ConfigVPCWithSubnets(rName, 1) + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } +resource "aws_subnet" "test" { + count = 1 + + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude + data "aws_availability_zones" "available" { - exclude_zone_ids = ["usw2-az4", "usgw1-az2"] + exclude_zone_ids = local.default_exclude_zone_ids state = "available" filter { @@ -24,31 +50,14 @@ data "aws_availability_zones" "available" { } } -resource "aws_subnet" "test" { - count = 1 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] } resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id } -resource "aws_timestreaminfluxdb_db_instance" "test" { - name = var.rName - allocated_storage = 20 - username = "admin" - password = "testpassword" - vpc_subnet_ids = aws_subnet.test[*].id - vpc_security_group_ids = [aws_security_group.test.id] - db_instance_type = "db.influx.medium" - bucket = "initial" - organization = "organization" - - tags = var.resource_tags -} variable "rName" { description = "Name for resource" type = string diff --git a/internal/service/timestreaminfluxdb/testdata/tmpl/db_cluster_tags.gtpl b/internal/service/timestreaminfluxdb/testdata/tmpl/db_cluster_tags.gtpl new file mode 100644 index 000000000000..d999718ac693 --- /dev/null +++ b/internal/service/timestreaminfluxdb/testdata/tmpl/db_cluster_tags.gtpl @@ -0,0 +1,20 @@ +resource "aws_timestreaminfluxdb_db_cluster" "test" { + name = var.rName + allocated_storage = 20 + username = "admin" + password = "testpassword" + vpc_subnet_ids = aws_subnet.test[*].id + vpc_security_group_ids = [aws_security_group.test.id] + db_instance_type = "db.influx.medium" + bucket = "initial" + organization = "organization" + failover_mode = "AUTOMATIC" + +{{- template "tags" . }} +} + +{{ template "acctest.ConfigVPCWithSubnets" 2 }} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} diff --git a/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl b/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl index dfc8a6fd24b3..97436bf16be9 100644 --- a/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl +++ b/internal/service/timestreaminfluxdb/testdata/tmpl/db_instance_tags.gtpl @@ -1,29 +1,3 @@ -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" -} - -data "aws_availability_zones" "available" { - exclude_zone_ids = ["usw2-az4", "usgw1-az2"] - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_subnet" "test" { - count = 1 - - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) -} - -resource "aws_security_group" "test" { - vpc_id = aws_vpc.test.id -} - resource "aws_timestreaminfluxdb_db_instance" "test" { name = var.rName allocated_storage = 20 @@ -36,4 +10,10 @@ resource "aws_timestreaminfluxdb_db_instance" "test" { organization = "organization" {{- template "tags" . }} -} \ No newline at end of file +} + +{{ template "acctest.ConfigVPCWithSubnets" 1 }} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id +} diff --git a/internal/service/timestreamquery/scheduled_query.go b/internal/service/timestreamquery/scheduled_query.go index e553d3e15ce6..421a54c2a79f 100644 --- a/internal/service/timestreamquery/scheduled_query.go +++ b/internal/service/timestreamquery/scheduled_query.go @@ -20,13 +20,13 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -634,7 +634,7 @@ func (r *scheduledQueryResource) Read(ctx context.Context, req resource.ReadRequ } out, err := findScheduledQueryByARN(ctx, conn, state.ARN.ValueString()) - if tfresource.NotFound(err) { + if retry.NotFound(err) { resp.State.RemoveResource(ctx) return } @@ -753,7 +753,7 @@ func waitScheduledQueryCreated(ctx context.Context, conn *timestreamquery.Client stateConf := &retry.StateChangeConf{ Pending: []string{}, Target: enum.Slice(awstypes.ScheduledQueryStateEnabled), - Refresh: statusScheduledQuery(ctx, conn, id), + Refresh: statusScheduledQuery(conn, id), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -771,7 +771,7 @@ func waitScheduledQueryUpdated(ctx context.Context, conn *timestreamquery.Client stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.ScheduledQueryStateDisabled), Target: enum.Slice(awstypes.ScheduledQueryStateEnabled), - Refresh: statusScheduledQuery(ctx, conn, arn), + Refresh: statusScheduledQuery(conn, arn), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -789,7 +789,7 @@ func waitScheduledQueryDeleted(ctx context.Context, conn *timestreamquery.Client stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.ScheduledQueryStateEnabled, awstypes.ScheduledQueryStateDisabled), Target: []string{}, - Refresh: statusScheduledQuery(ctx, conn, arn), + Refresh: statusScheduledQuery(conn, arn), Timeout: timeout, } @@ -804,8 +804,8 @@ func waitScheduledQueryDeleted(ctx context.Context, conn *timestreamquery.Client // statusScheduledQuery is a state refresh function that queries the service // and returns the state of the scheduled query, not the run status of the most // recent run. -func statusScheduledQuery(ctx context.Context, conn *timestreamquery.Client, arn string) retry.StateRefreshFunc { - return func() (any, string, error) { +func statusScheduledQuery(conn *timestreamquery.Client, arn string) retry.StateRefreshFunc { + return func(ctx context.Context) (any, string, error) { out, err := findScheduledQueryByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -828,8 +828,7 @@ func findScheduledQueryByARN(ctx context.Context, conn *timestreamquery.Client, if err != nil { if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, + LastError: err, } } diff --git a/internal/service/timestreamquery/scheduled_query_test.go b/internal/service/timestreamquery/scheduled_query_test.go index 16d4c6a270e1..bbce9608eaa0 100644 --- a/internal/service/timestreamquery/scheduled_query_test.go +++ b/internal/service/timestreamquery/scheduled_query_test.go @@ -17,14 +17,12 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/timestreamquery/types" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" awswritetypes "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/retry" tftimestreamquery "github.com/hashicorp/terraform-provider-aws/internal/service/timestreamquery" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -32,24 +30,24 @@ func TestAccTimestreamQueryScheduledQuery_basic(t *testing.T) { ctx := acctest.Context(t) var scheduledquery awstypes.ScheduledQueryDescription - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreamquery_scheduled_query.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamQueryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckScheduledQueryDestroy(ctx), + CheckDestroy: testAccCheckScheduledQueryDestroy(ctx, t), Steps: []resource.TestStep{ { // Must be done in 2 steps because the scheduled query requires data to be ingested first // which creates the columns. Otherwise, the SQL will always be invalid because no columns exist. Config: testAccScheduledQueryConfig_base(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccWriteRecords(ctx, "aws_timestreamwrite_table.test", rName, rName), + testAccWriteRecords(ctx, t, "aws_timestreamwrite_table.test", rName, rName), ), }, { @@ -58,7 +56,7 @@ func TestAccTimestreamQueryScheduledQuery_basic(t *testing.T) { testAccScheduledQueryConfig_basic(rName), ), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckScheduledQueryExists(ctx, resourceName, &scheduledquery), + testAccCheckScheduledQueryExists(ctx, t, resourceName, &scheduledquery), acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "timestream", regexache.MustCompile(`scheduled-query/.+$`)), acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreationTime), resource.TestCheckResourceAttr(resourceName, "error_report_configuration.#", "1"), @@ -134,24 +132,24 @@ func TestAccTimestreamQueryScheduledQuery_disappears(t *testing.T) { ctx := acctest.Context(t) var scheduledquery awstypes.ScheduledQueryDescription - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) resourceName := "aws_timestreamquery_scheduled_query.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.TimestreamQueryServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckScheduledQueryDestroy(ctx), + CheckDestroy: testAccCheckScheduledQueryDestroy(ctx, t), Steps: []resource.TestStep{ { // Must be done in 2 steps because the scheduled query requires data to be ingested first // which creates the columns. Otherwise, the SQL will always be invalid because no columns exist. Config: testAccScheduledQueryConfig_base(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccWriteRecords(ctx, "aws_timestreamwrite_table.test", rName, rName), + testAccWriteRecords(ctx, t, "aws_timestreamwrite_table.test", rName, rName), ), }, { @@ -160,7 +158,7 @@ func TestAccTimestreamQueryScheduledQuery_disappears(t *testing.T) { testAccScheduledQueryConfig_basic(rName), ), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckScheduledQueryExists(ctx, resourceName, &scheduledquery), + testAccCheckScheduledQueryExists(ctx, t, resourceName, &scheduledquery), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftimestreamquery.ResourceScheduledQuery, resourceName), ), ExpectNonEmptyPlan: true, @@ -180,7 +178,7 @@ func testAccScheduledQueryImportStateIDFunc(resourceName string) resource.Import } } -func testAccWriteRecords(ctx context.Context, name, database, table string) resource.TestCheckFunc { +func testAccWriteRecords(ctx context.Context, t *testing.T, name, database, table string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -235,7 +233,7 @@ func testAccWriteRecords(ctx context.Context, name, database, table string) reso Records: records, } - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamWriteClient(ctx) + conn := acctest.ProviderMeta(ctx, t).TimestreamWriteClient(ctx) _, err := conn.WriteRecords(ctx, input) if err != nil { return create.Error(names.TimestreamQuery, create.ErrActionChecking, tftimestreamquery.ResNameScheduledQuery, rs.Primary.Attributes[names.AttrARN], err) @@ -245,9 +243,9 @@ func testAccWriteRecords(ctx context.Context, name, database, table string) reso } } -func testAccCheckScheduledQueryDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckScheduledQueryDestroy(ctx context.Context, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamQueryClient(ctx) + conn := acctest.ProviderMeta(ctx, t).TimestreamQueryClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_timestreamquery_scheduled_query" { @@ -255,7 +253,7 @@ func testAccCheckScheduledQueryDestroy(ctx context.Context) resource.TestCheckFu } _, err := tftimestreamquery.FindScheduledQueryByARN(ctx, conn, rs.Primary.Attributes[names.AttrARN]) - if tfresource.NotFound(err) { + if retry.NotFound(err) { return nil } if err != nil { @@ -269,7 +267,7 @@ func testAccCheckScheduledQueryDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccCheckScheduledQueryExists(ctx context.Context, name string, scheduledquery *awstypes.ScheduledQueryDescription) resource.TestCheckFunc { +func testAccCheckScheduledQueryExists(ctx context.Context, t *testing.T, name string, scheduledquery *awstypes.ScheduledQueryDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -280,7 +278,7 @@ func testAccCheckScheduledQueryExists(ctx context.Context, name string, schedule return create.Error(names.TimestreamQuery, create.ErrActionCheckingExistence, tftimestreamquery.ResNameScheduledQuery, name, errors.New("not set")) } - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamQueryClient(ctx) + conn := acctest.ProviderMeta(ctx, t).TimestreamQueryClient(ctx) resp, err := tftimestreamquery.FindScheduledQueryByARN(ctx, conn, rs.Primary.Attributes[names.AttrARN]) if err != nil { @@ -294,7 +292,7 @@ func testAccCheckScheduledQueryExists(ctx context.Context, name string, schedule } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).TimestreamQueryClient(ctx) + conn := acctest.ProviderMeta(ctx, t).TimestreamQueryClient(ctx) input := ×treamquery.ListScheduledQueriesInput{} diff --git a/internal/service/timestreamquery/service_endpoint_resolver_gen.go b/internal/service/timestreamquery/service_endpoint_resolver_gen.go index d4a8f7fb7911..0f32b9ea726e 100644 --- a/internal/service/timestreamquery/service_endpoint_resolver_gen.go +++ b/internal/service/timestreamquery/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params timestreamquery. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up timestreamquery endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up timestreamquery endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/timestreamquery/service_endpoints_gen_test.go b/internal/service/timestreamquery/service_endpoints_gen_test.go index 738c9604a73c..576fffe499a0 100644 --- a/internal/service/timestreamquery/service_endpoints_gen_test.go +++ b/internal/service/timestreamquery/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/timestreamquery/service_package_gen.go b/internal/service/timestreamquery/service_package_gen.go index daf6509ba0b3..045d27c32c5f 100644 --- a/internal/service/timestreamquery/service_package_gen.go +++ b/internal/service/timestreamquery/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/timestreamquery" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -67,7 +66,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *timestreamquery.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/timestreamquery/tags_gen.go b/internal/service/timestreamquery/tags_gen.go index 4350540de1f2..a0c4ed84fe3e 100644 --- a/internal/service/timestreamquery/tags_gen.go +++ b/internal/service/timestreamquery/tags_gen.go @@ -3,8 +3,8 @@ package timestreamquery import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreamquery" awstypes "github.com/aws/aws-sdk-go-v2/service/timestreamquery/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *timestreamquery.Client, identifier stri page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).TimestreamQueryClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *timestreamquery.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *timestreamquery.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/timestreamwrite/service_endpoint_resolver_gen.go b/internal/service/timestreamwrite/service_endpoint_resolver_gen.go index 60605316f15d..d84bc7dec4d9 100644 --- a/internal/service/timestreamwrite/service_endpoint_resolver_gen.go +++ b/internal/service/timestreamwrite/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params timestreamwrite. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up timestreamwrite endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up timestreamwrite endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/timestreamwrite/service_package_gen.go b/internal/service/timestreamwrite/service_package_gen.go index da35247ae895..68168b71ae65 100644 --- a/internal/service/timestreamwrite/service_package_gen.go +++ b/internal/service/timestreamwrite/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -89,7 +88,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *timestreamwrite.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/timestreamwrite/sweep.go b/internal/service/timestreamwrite/sweep.go index e74f59bff902..f1e2b8eb6cef 100644 --- a/internal/service/timestreamwrite/sweep.go +++ b/internal/service/timestreamwrite/sweep.go @@ -31,7 +31,7 @@ func sweepDatabases(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ×treamwrite.ListDatabasesInput{} conn := client.TimestreamWriteClient(ctx) @@ -72,7 +72,7 @@ func sweepTables(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := ×treamwrite.ListTablesInput{} conn := client.TimestreamWriteClient(ctx) diff --git a/internal/service/timestreamwrite/tags_gen.go b/internal/service/timestreamwrite/tags_gen.go index e68baf082f38..8110617ffa42 100644 --- a/internal/service/timestreamwrite/tags_gen.go +++ b/internal/service/timestreamwrite/tags_gen.go @@ -3,8 +3,8 @@ package timestreamwrite import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" awstypes "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *timestreamwrite.Client, identifier stri output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).TimestreamWriteClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *timestreamwrite.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *timestreamwrite.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/transcribe/language_model.go b/internal/service/transcribe/language_model.go index f7cd556e3ffb..feb01d3eea8f 100644 --- a/internal/service/transcribe/language_model.go +++ b/internal/service/transcribe/language_model.go @@ -122,7 +122,7 @@ func resourceLanguageModelCreate(ctx context.Context, d *schema.ResourceData, me } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func() (any, error) { + func(ctx context.Context) (any, error) { return conn.CreateLanguageModel(ctx, in) }, func(err error) (bool, error) { diff --git a/internal/service/transcribe/service_endpoint_resolver_gen.go b/internal/service/transcribe/service_endpoint_resolver_gen.go index 408d39860981..85896a85b311 100644 --- a/internal/service/transcribe/service_endpoint_resolver_gen.go +++ b/internal/service/transcribe/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params transcribe.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up transcribe endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up transcribe endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/transcribe/service_endpoints_gen_test.go b/internal/service/transcribe/service_endpoints_gen_test.go index 0458e43a62da..7a267da5b392 100644 --- a/internal/service/transcribe/service_endpoints_gen_test.go +++ b/internal/service/transcribe/service_endpoints_gen_test.go @@ -601,7 +601,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/transcribe/service_package_gen.go b/internal/service/transcribe/service_package_gen.go index f0314c6c8074..6e52d43f88bc 100644 --- a/internal/service/transcribe/service_package_gen.go +++ b/internal/service/transcribe/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/transcribe" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newStartTranscriptionJobAction, + TypeName: "aws_transcribe_start_transcription_job", + Name: "Start Transcription Job", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{} } @@ -94,7 +104,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *transcribe.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/transcribe/start_transcription_job_action.go b/internal/service/transcribe/start_transcription_job_action.go new file mode 100644 index 000000000000..7ca04345ba7d --- /dev/null +++ b/internal/service/transcribe/start_transcription_job_action.go @@ -0,0 +1,284 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transcribe + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/transcribe" + awstypes "github.com/aws/aws-sdk-go-v2/service/transcribe/types" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/actionwait" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +const ( + transcriptionJobPollInterval = 5 * time.Second + transcriptionJobProgressInterval = 30 * time.Second +) + +// @Action(aws_transcribe_start_transcription_job, name="Start Transcription Job") +func newStartTranscriptionJobAction(_ context.Context) (action.ActionWithConfigure, error) { + return &startTranscriptionJobAction{}, nil +} + +var ( + _ action.Action = (*startTranscriptionJobAction)(nil) +) + +type startTranscriptionJobAction struct { + framework.ActionWithModel[startTranscriptionJobActionModel] +} + +type startTranscriptionJobActionModel struct { + framework.WithRegionModel + TranscriptionJobName types.String `tfsdk:"transcription_job_name"` + MediaFileUri types.String `tfsdk:"media_file_uri"` + LanguageCode fwtypes.StringEnum[awstypes.LanguageCode] `tfsdk:"language_code"` + IdentifyLanguage types.Bool `tfsdk:"identify_language"` + IdentifyMultipleLanguages types.Bool `tfsdk:"identify_multiple_languages"` + MediaFormat fwtypes.StringEnum[awstypes.MediaFormat] `tfsdk:"media_format"` + MediaSampleRateHertz types.Int64 `tfsdk:"media_sample_rate_hertz"` + OutputBucketName types.String `tfsdk:"output_bucket_name"` + OutputKey types.String `tfsdk:"output_key"` + Timeout types.Int64 `tfsdk:"timeout"` +} + +func (a *startTranscriptionJobAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Starts an Amazon Transcribe transcription job to transcribe audio from a media file. The media file must be uploaded to an Amazon S3 bucket before starting the transcription job.", + Attributes: map[string]schema.Attribute{ + "transcription_job_name": schema.StringAttribute{ + Description: "A unique name for the transcription job within your AWS account.", + Required: true, + }, + "media_file_uri": schema.StringAttribute{ + Description: "The Amazon S3 location of the media file to transcribe (e.g., s3://bucket-name/file.mp3).", + Required: true, + }, + names.AttrLanguageCode: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.LanguageCode](), + Description: "The language code for the language used in the input media file. Required if identify_language and identify_multiple_languages are both false.", + Optional: true, + }, + "identify_language": schema.BoolAttribute{ + Description: "Enable automatic language identification for single-language media files. Cannot be used with identify_multiple_languages.", + Optional: true, + }, + "identify_multiple_languages": schema.BoolAttribute{ + Description: "Enable automatic language identification for multi-language media files. Cannot be used with identify_language.", + Optional: true, + }, + "media_format": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.MediaFormat](), + Description: "The format of the input media file. If not specified, Amazon Transcribe will attempt to determine the format automatically.", + Optional: true, + }, + "media_sample_rate_hertz": schema.Int64Attribute{ + Description: "The sample rate of the input media file in Hertz. If not specified, Amazon Transcribe will attempt to determine the sample rate automatically.", + Optional: true, + }, + "output_bucket_name": schema.StringAttribute{ + Description: "The name of the Amazon S3 bucket where you want your transcription output stored. If not specified, output is stored in a service-managed bucket.", + Optional: true, + }, + "output_key": schema.StringAttribute{ + Description: "The Amazon S3 object key for your transcription output. If not specified, a default key is generated.", + Optional: true, + }, + names.AttrTimeout: schema.Int64Attribute{ + Description: "Maximum time in seconds to wait for the transcription job to start. Defaults to 300 seconds (5 minutes).", + Optional: true, + }, + }, + } +} + +func (a *startTranscriptionJobAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config startTranscriptionJobActionModel + + // Parse configuration + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + // Get AWS client + conn := a.Meta().TranscribeClient(ctx) + + transcriptionJobName := config.TranscriptionJobName.ValueString() + mediaFileUri := config.MediaFileUri.ValueString() + + // Set default timeout + timeout := 5 * time.Minute + if !config.Timeout.IsNull() { + timeout = time.Duration(config.Timeout.ValueInt64()) * time.Second + } + + tflog.Info(ctx, "Starting transcription job action", map[string]any{ + "transcription_job_name": transcriptionJobName, + "media_file_uri": mediaFileUri, + "timeout_seconds": int64(timeout.Seconds()), + }) + + // Send initial progress update + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Starting transcription job %s...", transcriptionJobName), + }) + + // Build the start transcription job input + input := &transcribe.StartTranscriptionJobInput{ + TranscriptionJobName: aws.String(transcriptionJobName), + Media: &awstypes.Media{ + MediaFileUri: aws.String(mediaFileUri), + }, + } + + // Validate language configuration - exactly one must be specified + languageOptions := []bool{ + !config.LanguageCode.IsNull() && !config.LanguageCode.IsUnknown(), + !config.IdentifyLanguage.IsNull() && config.IdentifyLanguage.ValueBool(), + !config.IdentifyMultipleLanguages.IsNull() && config.IdentifyMultipleLanguages.ValueBool(), + } + + activeCount := 0 + for _, active := range languageOptions { + if active { + activeCount++ + } + } + + switch activeCount { + case 0: + resp.Diagnostics.AddError( + "Missing Language Configuration", + "You must specify exactly one of: language_code, identify_language, or identify_multiple_languages", + ) + return + case 1: + // Valid - continue + default: + resp.Diagnostics.AddError( + "Conflicting Language Configuration", + "You can only specify one of: language_code, identify_language, or identify_multiple_languages", + ) + return + } + + // Set language configuration + if languageOptions[0] { + input.LanguageCode = config.LanguageCode.ValueEnum() + } + if languageOptions[1] { + input.IdentifyLanguage = aws.Bool(true) + } + if languageOptions[2] { + input.IdentifyMultipleLanguages = aws.Bool(true) + } + + // Set optional parameters + if !config.MediaFormat.IsNull() && !config.MediaFormat.IsUnknown() { + input.MediaFormat = config.MediaFormat.ValueEnum() + } + + if !config.MediaSampleRateHertz.IsNull() { + input.MediaSampleRateHertz = aws.Int32(int32(config.MediaSampleRateHertz.ValueInt64())) + } + + if !config.OutputBucketName.IsNull() { + input.OutputBucketName = config.OutputBucketName.ValueStringPointer() + } + + if !config.OutputKey.IsNull() { + input.OutputKey = config.OutputKey.ValueStringPointer() + } + + // Start the transcription job + _, err := conn.StartTranscriptionJob(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Start Transcription Job", + fmt.Sprintf("Could not start transcription job %s: %s", transcriptionJobName, err), + ) + return + } + + // Wait for job to move beyond QUEUED: treat IN_PROGRESS or COMPLETED as success, FAILED as failure, QUEUED transitional. + fr, err := actionwait.WaitForStatus(ctx, func(ctx context.Context) (actionwait.FetchResult[*awstypes.TranscriptionJob], error) { + input := transcribe.GetTranscriptionJobInput{TranscriptionJobName: aws.String(transcriptionJobName)} + getOutput, gerr := conn.GetTranscriptionJob(ctx, &input) + if gerr != nil { + return actionwait.FetchResult[*awstypes.TranscriptionJob]{}, fmt.Errorf("get transcription job: %w", gerr) + } + if getOutput.TranscriptionJob == nil { + return actionwait.FetchResult[*awstypes.TranscriptionJob]{}, fmt.Errorf("transcription job %s not found", transcriptionJobName) + } + status := getOutput.TranscriptionJob.TranscriptionJobStatus + return actionwait.FetchResult[*awstypes.TranscriptionJob]{Status: actionwait.Status(status), Value: getOutput.TranscriptionJob}, nil + }, actionwait.Options[*awstypes.TranscriptionJob]{ + Timeout: timeout, + Interval: actionwait.FixedInterval(transcriptionJobPollInterval), + ProgressInterval: transcriptionJobProgressInterval, + SuccessStates: []actionwait.Status{ + actionwait.Status(awstypes.TranscriptionJobStatusInProgress), + actionwait.Status(awstypes.TranscriptionJobStatusCompleted), + }, + TransitionalStates: []actionwait.Status{ + actionwait.Status(awstypes.TranscriptionJobStatusQueued), + }, + FailureStates: []actionwait.Status{ + actionwait.Status(awstypes.TranscriptionJobStatusFailed), + }, + ProgressSink: func(fr actionwait.FetchResult[any], meta actionwait.ProgressMeta) { + resp.SendProgress(action.InvokeProgressEvent{Message: fmt.Sprintf("Transcription job %s is currently %s", transcriptionJobName, fr.Status)}) + }, + }) + if err != nil { + var timeoutErr *actionwait.TimeoutError + var failureErr *actionwait.FailureStateError + var unexpectedErr *actionwait.UnexpectedStateError + + if errors.As(err, &timeoutErr) { + resp.Diagnostics.AddError( + "Timeout Waiting for Transcription Job", + fmt.Sprintf("Transcription job %s did not reach a running state within %v", transcriptionJobName, timeout), + ) + } else if errors.As(err, &failureErr) { + resp.Diagnostics.AddError( + "Transcription Job Failed", + fmt.Sprintf("Transcription job %s failed: %s", transcriptionJobName, failureErr.Status), + ) + } else if errors.As(err, &unexpectedErr) { + resp.Diagnostics.AddError( + "Unexpected Transcription Job Status", + fmt.Sprintf("Transcription job %s entered unexpected status: %s", transcriptionJobName, unexpectedErr.Status), + ) + } else { + resp.Diagnostics.AddError( + "Error Waiting for Transcription Job", + fmt.Sprintf("Error while waiting for transcription job %s: %s", transcriptionJobName, err), + ) + } + return + } + + resp.SendProgress(action.InvokeProgressEvent{Message: fmt.Sprintf("Transcription job %s started successfully and is %s", transcriptionJobName, fr.Status)}) + logFields := map[string]any{ + "transcription_job_name": transcriptionJobName, + "job_status": fr.Status, + } + if fr.Value != nil { + logFields[names.AttrCreationTime] = fr.Value.CreationTime + } + tflog.Info(ctx, "Transcription job started successfully", logFields) +} diff --git a/internal/service/transcribe/start_transcription_job_action_test.go b/internal/service/transcribe/start_transcription_job_action_test.go new file mode 100644 index 000000000000..3ca225096593 --- /dev/null +++ b/internal/service/transcribe/start_transcription_job_action_test.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transcribe_test + +import ( + "context" + "fmt" + "slices" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/transcribe" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccTranscribeStartTranscriptionJobAction_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + bucketName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TranscribeServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStartTranscriptionJobActionConfig_basic(rName, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTranscriptionJobExists(ctx, rName), + testAccCheckTranscriptionJobStatus(ctx, rName, "IN_PROGRESS", "COMPLETED"), + ), + }, + }, + }) +} + +func TestAccTranscribeStartTranscriptionJobAction_identifyLanguage(t *testing.T) { + ctx := acctest.Context(t) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + bucketName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TranscribeServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStartTranscriptionJobActionConfig_identifyLanguage(rName, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTranscriptionJobExists(ctx, rName), + testAccCheckTranscriptionJobStatus(ctx, rName, "IN_PROGRESS", "COMPLETED"), + testAccCheckTranscriptionJobIdentifyLanguage(ctx, rName, true), + ), + }, + }, + }) +} + +func TestAccTranscribeStartTranscriptionJobAction_withOutputLocation(t *testing.T) { + ctx := acctest.Context(t) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + bucketName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TranscribeServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStartTranscriptionJobActionConfig_withOutputLocation(rName, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTranscriptionJobExists(ctx, rName), + testAccCheckTranscriptionJobStatus(ctx, rName, "IN_PROGRESS", "COMPLETED"), + ), + }, + }, + }) +} + +func testAccCheckTranscriptionJobExists(ctx context.Context, jobName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).TranscribeClient(ctx) + + input := &transcribe.GetTranscriptionJobInput{ + TranscriptionJobName: &jobName, + } + + _, err := conn.GetTranscriptionJob(ctx, input) + if err != nil { + return fmt.Errorf("transcription job %s not found: %w", jobName, err) + } + + return nil + } +} + +func testAccCheckTranscriptionJobStatus(ctx context.Context, jobName string, expectedStatuses ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).TranscribeClient(ctx) + + input := &transcribe.GetTranscriptionJobInput{ + TranscriptionJobName: &jobName, + } + + output, err := conn.GetTranscriptionJob(ctx, input) + if err != nil { + return fmt.Errorf("error getting transcription job %s: %w", jobName, err) + } + + if output.TranscriptionJob == nil { + return fmt.Errorf("transcription job %s not found", jobName) + } + + actualStatus := string(output.TranscriptionJob.TranscriptionJobStatus) + if slices.Contains(expectedStatuses, actualStatus) { + return nil + } + + return fmt.Errorf("expected transcription job %s status to be one of %v, got %s", jobName, expectedStatuses, actualStatus) + } +} + +func testAccCheckTranscriptionJobIdentifyLanguage(ctx context.Context, jobName string, expected bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).TranscribeClient(ctx) + + input := &transcribe.GetTranscriptionJobInput{ + TranscriptionJobName: &jobName, + } + + output, err := conn.GetTranscriptionJob(ctx, input) + if err != nil { + return fmt.Errorf("error getting transcription job %s: %w", jobName, err) + } + + if output.TranscriptionJob == nil { + return fmt.Errorf("transcription job %s not found", jobName) + } + + actual := output.TranscriptionJob.IdentifyLanguage != nil && *output.TranscriptionJob.IdentifyLanguage + if actual != expected { + return fmt.Errorf("expected transcription job %s identify_language to be %t, got %t", jobName, expected, actual) + } + + return nil + } +} + +func testAccStartTranscriptionJobActionConfig_basic(rName, bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[2]q + force_destroy = true +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "test-audio.wav" + source = "test-fixtures/test-audio.wav" +} + +action "aws_transcribe_start_transcription_job" "test" { + config { + transcription_job_name = %[1]q + media_file_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.test.key}" + language_code = "en-US" + timeout = 600 + } +} + +resource "terraform_data" "test" { + triggers_replace = [ + aws_s3_object.test.etag + ] + + input = "completed" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_transcribe_start_transcription_job.test] + } + } +} +`, rName, bucketName) +} + +func testAccStartTranscriptionJobActionConfig_identifyLanguage(rName, bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[2]q + force_destroy = true +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "test-audio.wav" + source = "test-fixtures/test-audio.wav" +} + +action "aws_transcribe_start_transcription_job" "test" { + config { + transcription_job_name = %[1]q + media_file_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.test.key}" + identify_language = true + timeout = 600 + } +} + +resource "terraform_data" "test" { + triggers_replace = [ + aws_s3_object.test.etag + ] + + input = "completed" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_transcribe_start_transcription_job.test] + } + } +} +`, rName, bucketName) +} + +func testAccStartTranscriptionJobActionConfig_withOutputLocation(rName, bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[2]q + force_destroy = true +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.bucket + key = "test-audio.wav" + source = "test-fixtures/test-audio.wav" +} + +action "aws_transcribe_start_transcription_job" "test" { + config { + transcription_job_name = %[1]q + media_file_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.test.key}" + language_code = "en-US" + output_bucket_name = aws_s3_bucket.test.bucket + output_key = "transcripts/%[1]s.json" + timeout = 600 + } +} + +resource "terraform_data" "test" { + triggers_replace = [ + aws_s3_object.test.etag + ] + + input = "completed" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_transcribe_start_transcription_job.test] + } + } +} +`, rName, bucketName) +} diff --git a/internal/service/transcribe/sweep.go b/internal/service/transcribe/sweep.go index 6ab1591ae9cd..b63eaada075e 100644 --- a/internal/service/transcribe/sweep.go +++ b/internal/service/transcribe/sweep.go @@ -53,7 +53,7 @@ func sweepLanguageModels(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.TranscribeClient(ctx) @@ -97,7 +97,7 @@ func sweepMedicalVocabularies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.TranscribeClient(ctx) @@ -142,7 +142,7 @@ func sweepVocabularies(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.TranscribeClient(ctx) @@ -187,7 +187,7 @@ func sweepVocabularyFilters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.TranscribeClient(ctx) diff --git a/internal/service/transcribe/tags_gen.go b/internal/service/transcribe/tags_gen.go index 1052bbdf3ab4..ba857aa3d243 100644 --- a/internal/service/transcribe/tags_gen.go +++ b/internal/service/transcribe/tags_gen.go @@ -3,8 +3,8 @@ package transcribe import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/transcribe" awstypes "github.com/aws/aws-sdk-go-v2/service/transcribe/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *transcribe.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).TranscribeClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *transcribe.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *transcribe.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/transcribe/test-fixtures/test-audio.wav b/internal/service/transcribe/test-fixtures/test-audio.wav new file mode 100644 index 000000000000..b33814b96b91 Binary files /dev/null and b/internal/service/transcribe/test-fixtures/test-audio.wav differ diff --git a/internal/service/transfer/exports_test.go b/internal/service/transfer/exports_test.go index bee2d12e1deb..a09ed3d4839c 100644 --- a/internal/service/transfer/exports_test.go +++ b/internal/service/transfer/exports_test.go @@ -5,25 +5,31 @@ package transfer // Exports for use in tests only. var ( - ResourceAccess = resourceAccess - ResourceAgreement = resourceAgreement - ResourceCertificate = resourceCertificate - ResourceConnector = resourceConnector - ResourceProfile = resourceProfile - ResourceServer = resourceServer - ResourceSSHKey = resourceSSHKey - ResourceTag = resourceTag - ResourceUser = resourceUser - ResourceWorkflow = resourceWorkflow + ResourceAccess = resourceAccess + ResourceAgreement = resourceAgreement + ResourceCertificate = resourceCertificate + ResourceConnector = resourceConnector + ResourceHostKey = newHostKeyResource + ResourceProfile = resourceProfile + ResourceServer = resourceServer + ResourceSSHKey = resourceSSHKey + ResourceTag = resourceTag + ResourceUser = resourceUser + ResourceWebApp = newWebAppResource + ResourceWebAppCustomization = newWebAppCustomizationResource + ResourceWorkflow = resourceWorkflow FindAccessByTwoPartKey = findAccessByTwoPartKey FindAgreementByTwoPartKey = findAgreementByTwoPartKey FindCertificateByID = findCertificateByID FindConnectorByID = findConnectorByID + FindHostKeyByTwoPartKey = findHostKeyByTwoPartKey FindProfileByID = findProfileByID FindServerByID = findServerByID FindTag = findTag FindUserByTwoPartKey = findUserByTwoPartKey FindUserSSHKeyByThreePartKey = findUserSSHKeyByThreePartKey FindWorkflowByID = findWorkflowByID + FindWebAppByID = findWebAppByID + FindWebAppCustomizationByID = findWebAppCustomizationByID ) diff --git a/internal/service/transfer/host_key.go b/internal/service/transfer/host_key.go new file mode 100644 index 000000000000..33f688bd85b3 --- /dev/null +++ b/internal/service/transfer/host_key.go @@ -0,0 +1,315 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transfer + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/transfer" + awstypes "github.com/aws/aws-sdk-go-v2/service/transfer/types" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tfstringplanmodifier "github.com/hashicorp/terraform-provider-aws/internal/framework/planmodifiers/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/framework/privatestate" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_transfer_host_key", name="Host Key") +// @Tags(identifierAttribute="arn") +func newHostKeyResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &hostKeyResource{} + + return r, nil +} + +type hostKeyResource struct { + framework.ResourceWithModel[hostKeyResourceModel] +} + +const ( + hostKeyBodyWOKey = "host_key_body_wo" +) + +func (r *hostKeyResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrDescription: schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(0, 200), + }, + }, + "host_key_body": schema.StringAttribute{ + Optional: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(0, 4096), + stringvalidator.ExactlyOneOf( + path.MatchRoot("host_key_body"), + path.MatchRoot("host_key_body_wo"), + ), + stringvalidator.PreferWriteOnlyAttribute(path.MatchRoot("host_key_body_wo")), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "host_key_body_wo": schema.StringAttribute{ + Optional: true, + Sensitive: true, + WriteOnly: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(0, 4096), + }, + PlanModifiers: []planmodifier.String{ + tfstringplanmodifier.RequiresReplaceWO(hostKeyBodyWOKey), + }, + }, + "host_key_fingerprint": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "host_key_id": framework.IDAttribute(), + "server_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (r *hostKeyResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var plan, config hostKeyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &plan)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.Config.Get(ctx, &config)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().TransferClient(ctx) + + serverID := fwflex.StringValueFromFramework(ctx, plan.ServerID) + var input transfer.ImportHostKeyInput + response.Diagnostics.Append(fwflex.Expand(ctx, plan, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Prefer write-only value. It's only in Config, not Plan. + if !config.HostKeyBodyWO.IsNull() { + input.HostKeyBody = fwflex.StringFromFramework(ctx, config.HostKeyBodyWO) + } + + // Additional fields. + input.Tags = getTagsIn(ctx) + + out, err := conn.ImportHostKey(ctx, &input) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Transfer Host Key (%s)", serverID), err.Error()) + + return + } + + // Store hash of write-only value. + if !config.HostKeyBodyWO.IsNull() { + woStore := privatestate.NewWriteOnlyValueStore(response.Private, hostKeyBodyWOKey) + response.Diagnostics.Append(woStore.SetValue(ctx, config.HostKeyBodyWO)...) + if response.Diagnostics.HasError() { + return + } + } + + hostKeyID := aws.ToString(out.HostKeyId) + hostKey, err := findHostKeyByTwoPartKey(ctx, conn, serverID, hostKeyID) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Transfer Host Key (%s)", hostKeyID), err.Error()) + + return + } + + // Set values for unknowns. + response.Diagnostics.Append(fwflex.Flatten(ctx, hostKey, &plan)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, plan)...) +} + +func (r *hostKeyResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data hostKeyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().TransferClient(ctx) + + serverID, hostKeyID := fwflex.StringValueFromFramework(ctx, data.ServerID), fwflex.StringValueFromFramework(ctx, data.HostKeyID) + out, err := findHostKeyByTwoPartKey(ctx, conn, serverID, hostKeyID) + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Transfer Host Key (%s)", hostKeyID), err.Error()) + + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, out, &data)...) + if response.Diagnostics.HasError() { + return + } + + setTagsOut(ctx, out.Tags) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *hostKeyResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old hostKeyResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().TransferClient(ctx) + + diff, d := fwflex.Diff(ctx, new, old) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { + return + } + + if diff.HasChanges() { + hostKeyID := fwflex.StringValueFromFramework(ctx, new.HostKeyID) + var input transfer.UpdateHostKeyInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateHostKey(ctx, &input) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Transfer Host Key (%s)", hostKeyID), err.Error()) + + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *hostKeyResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data hostKeyResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().TransferClient(ctx) + + serverID, hostKeyID := fwflex.StringValueFromFramework(ctx, data.ServerID), fwflex.StringValueFromFramework(ctx, data.HostKeyID) + input := transfer.DeleteHostKeyInput{ + HostKeyId: aws.String(hostKeyID), + ServerId: aws.String(serverID), + } + _, err := conn.DeleteHostKey(ctx, &input) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Transfer Host Key (%s)", hostKeyID), err.Error()) + + return + } +} + +func (r *hostKeyResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + hostKeyIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, hostKeyIDParts, true) + + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("server_id"), parts[0])...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("host_key_id"), parts[1])...) +} + +func findHostKeyByTwoPartKey(ctx context.Context, conn *transfer.Client, serverID, hostKeyID string) (*awstypes.DescribedHostKey, error) { + input := transfer.DescribeHostKeyInput{ + HostKeyId: aws.String(hostKeyID), + ServerId: aws.String(serverID), + } + + return findHostKey(ctx, conn, &input) +} + +func findHostKey(ctx context.Context, conn *transfer.Client, input *transfer.DescribeHostKeyInput) (*awstypes.DescribedHostKey, error) { + output, err := conn.DescribeHostKey(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.HostKey == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.HostKey, nil +} + +type hostKeyResourceModel struct { + framework.WithRegionModel + ARN types.String `tfsdk:"arn"` + Description types.String `tfsdk:"description"` + HostKeyBody types.String `tfsdk:"host_key_body"` + HostKeyBodyWO types.String `tfsdk:"host_key_body_wo"` + HostKeyFingerprint types.String `tfsdk:"host_key_fingerprint"` + HostKeyID types.String `tfsdk:"host_key_id"` + ServerID types.String `tfsdk:"server_id"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} diff --git a/internal/service/transfer/host_key_test.go b/internal/service/transfer/host_key_test.go new file mode 100644 index 000000000000..096a19a6a859 --- /dev/null +++ b/internal/service/transfer/host_key_test.go @@ -0,0 +1,504 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transfer_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/transfer/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tftransfer "github.com/hashicorp/terraform-provider-aws/internal/service/transfer" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccHostKey_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_basic(privateKey), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNRegexp("transfer", regexache.MustCompile(`host-key/.+/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("host_key_fingerprint"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("host_key_id"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "host_key_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", "server_id", "host_key_id"), + ImportStateVerifyIgnore: []string{"host_key_body"}, + }, + }, + }) +} + +func testAccHostKey_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_basic(privateKey), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tftransfer.ResourceHostKey, resourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + }, + }) +} + +func testAccHostKey_tags(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_tags1(privateKey, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "host_key_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", "server_id", "host_key_id"), + ImportStateVerifyIgnore: []string{"host_key_body"}, + }, + { + Config: testAccHostKeyConfig_tags2(privateKey, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + { + Config: testAccHostKeyConfig_tags1(privateKey, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + }) +} + +func testAccHostKey_description(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_description(privateKey, "description1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.StringExact("description1")), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "host_key_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", "server_id", "host_key_id"), + ImportStateVerifyIgnore: []string{"host_key_body"}, + }, + { + Config: testAccHostKeyConfig_description(privateKey, "description2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.StringExact("description2")), + }, + }, + }, + }) +} + +func testAccHostKey_updateHostKeyBody(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey1, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + _, privateKey2, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_basic(privateKey1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + Config: testAccHostKeyConfig_basic(privateKey2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + +func testAccHostKey_hostKeyBodyWO(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_hostKeyBodyWO(privateKey), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNRegexp("transfer", regexache.MustCompile(`host-key/.+/.+`))), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrDescription), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("host_key_fingerprint"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("host_key_id"), knownvalue.NotNull()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "host_key_id", + ImportStateIdFunc: acctest.AttrsImportStateIdFunc(resourceName, ",", "server_id", "host_key_id"), + ImportStateVerifyIgnore: []string{"host_key_body"}, + }, + }, + }) +} + +func testAccHostKey_updateHostKeyBodyWO(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.DescribedHostKey + resourceName := "aws_transfer_host_key.test" + _, privateKey1, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + _, privateKey2, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatalf("error generating random SSH key: %s", err) + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckHostKeyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccHostKeyConfig_hostKeyBodyWO(privateKey1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + { + Config: testAccHostKeyConfig_hostKeyBodyWO(privateKey2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckHostKeyExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + }, + }, + }) +} + +func testAccCheckHostKeyExists(ctx context.Context, n string, v *awstypes.DescribedHostKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).TransferClient(ctx) + + output, err := tftransfer.FindHostKeyByTwoPartKey(ctx, conn, rs.Primary.Attributes["server_id"], rs.Primary.Attributes["host_key_id"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCheckHostKeyDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).TransferClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_transfer_host_key" { + continue + } + + _, err := tftransfer.FindHostKeyByTwoPartKey(ctx, conn, rs.Primary.Attributes["server_id"], rs.Primary.Attributes["host_key_id"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Transfer Host Key %s still exists", rs.Primary.Attributes["host_key_id"]) + } + + return nil + } +} + +func testAccHostKeyConfig_basic(privateKey string) string { + return fmt.Sprintf(` +resource "aws_transfer_server" "test" { + identity_provider_type = "SERVICE_MANAGED" +} + +resource "aws_transfer_host_key" "test" { + server_id = aws_transfer_server.test.id + host_key_body = < 0 && len(policyState) > 0 && (len(policyPlan[0].Principal.Entity.Path) > 0 && (len(policyState[0].Principal.Entity.Path)) > 0) { - policyPrincipal = (policyPlan[0].Principal.Entity.String() != policyState[0].Principal.Entity.String()) || (policyPlan[0].Principal.Type != policyState[0].Principal.Type) - } - - var policyResource bool - if len(policyPlan) > 0 && len(policyState) > 0 && (len(policyPlan[0].Resource.Entity.Path) > 0 && (len(policyState[0].Resource.Entity.Path)) > 0) { - policyResource = (policyPlan[0].Resource.Entity.String() != policyState[0].Resource.Entity.String()) || (policyPlan[0].Resource.Type != policyState[0].Resource.Type) - } + var policyPrincipal, policyResource, policyEffect bool + if len(planPolicies) > 0 && len(statePolicies) > 0 { + planPolicyAST := planPolicies[0].AST() + statePolicyAST := statePolicies[0].AST() - var policyEffect bool - if len(policyPlan) > 0 && len(policyState) > 0 { - policyEffect = policyPlan[0].Effect != policyState[0].Effect + policyEffect = planPolicyAST.Effect != statePolicyAST.Effect + policyPrincipal = planPolicyAST.Principal != statePolicyAST.Principal + policyResource = planPolicyAST.Resource != statePolicyAST.Resource } - resp.RequiresReplace = policyEffect || policyResource || policyPrincipal + resp.RequiresReplace = policyEffect || policyPrincipal || policyResource } const ( diff --git a/internal/service/verifiedpermissions/policy_store.go b/internal/service/verifiedpermissions/policy_store.go index 5e9f0d88a0b8..9ec2b7f17fd3 100644 --- a/internal/service/verifiedpermissions/policy_store.go +++ b/internal/service/verifiedpermissions/policy_store.go @@ -52,6 +52,14 @@ func (r *policyStoreResource) Schema(ctx context.Context, request resource.Schem s := schema.Schema{ Attributes: map[string]schema.Attribute{ names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrDeletionProtection: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.DeletionProtection](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, names.AttrDescription: schema.StringAttribute{ Optional: true, }, @@ -118,11 +126,22 @@ func (r *policyStoreResource) Create(ctx context.Context, request resource.Creat } // Set values for unknowns. - response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + data.ID = fwflex.StringToFramework(ctx, output.PolicyStoreId) + + policyStore, err := findPolicyStoreByID(ctx, conn, data.ID.ValueString()) + + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.VerifiedPermissions, create.ErrActionReading, ResNamePolicyStore, data.PolicyStoreID.ValueString(), err), + err.Error(), + ) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, policyStore, &data)...) if response.Diagnostics.HasError() { return } - data.ID = fwflex.StringToFramework(ctx, output.PolicyStoreId) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } @@ -175,7 +194,7 @@ func (r *policyStoreResource) Update(ctx context.Context, request resource.Updat conn := r.Meta().VerifiedPermissionsClient(ctx) - if !new.Description.Equal(old.Description) || !new.ValidationSettings.Equal(old.ValidationSettings) { + if !new.DeletionProtection.Equal(old.DeletionProtection) || !new.Description.Equal(old.Description) || !new.ValidationSettings.Equal(old.ValidationSettings) { var input verifiedpermissions.UpdatePolicyStoreInput response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) if response.Diagnostics.HasError() { @@ -231,6 +250,7 @@ type policyStoreResourceModel struct { framework.WithRegionModel ARN types.String `tfsdk:"arn"` Description types.String `tfsdk:"description"` + DeletionProtection fwtypes.StringEnum[awstypes.DeletionProtection] `tfsdk:"deletion_protection"` ID types.String `tfsdk:"id"` PolicyStoreID types.String `tfsdk:"policy_store_id"` Tags tftags.Map `tfsdk:"tags"` diff --git a/internal/service/verifiedpermissions/policy_store_data_source.go b/internal/service/verifiedpermissions/policy_store_data_source.go index 3f6d7f8eb971..272ea5e532d0 100644 --- a/internal/service/verifiedpermissions/policy_store_data_source.go +++ b/internal/service/verifiedpermissions/policy_store_data_source.go @@ -41,6 +41,10 @@ func (d *policyStoreDataSource) Schema(ctx context.Context, request datasource.S CustomType: timetypes.RFC3339Type{}, Computed: true, }, + names.AttrDeletionProtection: schema.StringAttribute{ + Computed: true, + CustomType: fwtypes.StringEnumType[awstypes.DeletionProtection](), + }, names.AttrDescription: schema.StringAttribute{ Computed: true, }, @@ -87,6 +91,7 @@ type policyStoreDataSourceModel struct { framework.WithRegionModel ARN types.String `tfsdk:"arn"` CreatedDate timetypes.RFC3339 `tfsdk:"created_date"` + DeletionProtection fwtypes.StringEnum[awstypes.DeletionProtection] `tfsdk:"deletion_protection"` Description types.String `tfsdk:"description"` ID types.String `tfsdk:"id"` LastUpdatedDate timetypes.RFC3339 `tfsdk:"last_updated_date"` diff --git a/internal/service/verifiedpermissions/policy_store_data_source_test.go b/internal/service/verifiedpermissions/policy_store_data_source_test.go index 6ed4b4a466fd..c93f54029187 100644 --- a/internal/service/verifiedpermissions/policy_store_data_source_test.go +++ b/internal/service/verifiedpermissions/policy_store_data_source_test.go @@ -38,6 +38,7 @@ func TestAccVerifiedPermissionsPolicyStoreDataSource_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPolicyStoreExists(ctx, dataSourceName, &policystore), resource.TestCheckResourceAttrPair(resourceName, "validation_settings.0.mode", dataSourceName, "validation_settings.0.mode"), + resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, "DISABLED"), resource.TestCheckResourceAttrPair(resourceName, names.AttrDescription, dataSourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(resourceName, names.AttrARN, dataSourceName, names.AttrARN), resource.TestCheckResourceAttrSet(dataSourceName, names.AttrCreatedDate), diff --git a/internal/service/verifiedpermissions/policy_store_test.go b/internal/service/verifiedpermissions/policy_store_test.go index d4dc48b363d4..f13bc7f8baa4 100644 --- a/internal/service/verifiedpermissions/policy_store_test.go +++ b/internal/service/verifiedpermissions/policy_store_test.go @@ -45,6 +45,7 @@ func TestAccVerifiedPermissionsPolicyStore_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPolicyStoreExists(ctx, resourceName, &policystore), resource.TestCheckResourceAttr(resourceName, "validation_settings.0.mode", "OFF"), + resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, "DISABLED"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "Terraform acceptance test"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, "0"), @@ -95,6 +96,47 @@ func TestAccVerifiedPermissionsPolicyStore_update(t *testing.T) { }, }) } +func TestAccVerifiedPermissionsPolicyStore_deletionProtection(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var policystore verifiedpermissions.GetPolicyStoreOutput + resourceName := "aws_verifiedpermissions_policy_store.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.VerifiedPermissionsEndpointID) + testAccPolicyStoresPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.VerifiedPermissionsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPolicyStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPolicyStoreConfig_deletion_protection("DISABLED"), + Check: resource.ComposeTestCheckFunc( + testAccCheckPolicyStoreExists(ctx, resourceName, &policystore), + resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, "DISABLED"), + ), + }, + { + Config: testAccPolicyStoreConfig_deletion_protection("ENABLED"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, "ENABLED"), + ), + }, + { + Config: testAccPolicyStoreConfig_deletion_protection("DISABLED"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, "DISABLED"), + ), + }, + }, + }) +} func TestAccVerifiedPermissionsPolicyStore_disappears(t *testing.T) { ctx := acctest.Context(t) @@ -254,6 +296,17 @@ resource "aws_verifiedpermissions_policy_store" "test" { }`, mode) } +func testAccPolicyStoreConfig_deletion_protection(deletionProtection string) string { + return fmt.Sprintf(` +resource "aws_verifiedpermissions_policy_store" "test" { + description = "Terraform acceptance test" + deletion_protection = %[1]q + validation_settings { + mode = "OFF" + } +}`, deletionProtection) +} + func testAccPolicyStoreConfig_tags1(mode, tagKey1, tagValue1 string) string { return fmt.Sprintf(` resource "aws_verifiedpermissions_policy_store" "test" { diff --git a/internal/service/verifiedpermissions/policy_test.go b/internal/service/verifiedpermissions/policy_test.go index 975260ab2a13..297cbf25390b 100644 --- a/internal/service/verifiedpermissions/policy_test.go +++ b/internal/service/verifiedpermissions/policy_test.go @@ -13,6 +13,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/verifiedpermissions/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -115,6 +116,7 @@ func TestAccVerifiedPermissionsPolicy_update(t *testing.T) { policyStatement := "permit (principal, action == Action::\"view\", resource in Album:: \"test_album\");" policyStatementActionUpdated := "permit (principal, action == Action::\"write\", resource in Album:: \"test_album\");" policyStatementEffectUpdated := "forbid (principal, action == Action::\"view\", resource in Album:: \"test_album\");" + policyStatementResourceUpdated := "forbid (principal, action == Action::\"view\", resource in Album:: \"test_album_updated\");" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -136,6 +138,11 @@ func TestAccVerifiedPermissionsPolicy_update(t *testing.T) { }, { Config: testAccPolicyConfig_basic(rName, policyStatementActionUpdated), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "definition.0.static.0.description", rName), @@ -145,6 +152,11 @@ func TestAccVerifiedPermissionsPolicy_update(t *testing.T) { }, { Config: testAccPolicyConfig_basic(rName, policyStatementEffectUpdated), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "definition.0.static.0.description", rName), @@ -152,6 +164,20 @@ func TestAccVerifiedPermissionsPolicy_update(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "policy_id"), ), }, + { + Config: testAccPolicyConfig_basic(rName, policyStatementResourceUpdated), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName, &policy), + resource.TestCheckResourceAttr(resourceName, "definition.0.static.0.description", rName), + resource.TestCheckResourceAttr(resourceName, "definition.0.static.0.statement", policyStatementResourceUpdated), + resource.TestCheckResourceAttrSet(resourceName, "policy_id"), + ), + }, }, }) } diff --git a/internal/service/verifiedpermissions/service_endpoint_resolver_gen.go b/internal/service/verifiedpermissions/service_endpoint_resolver_gen.go index db962ad42bf1..1c91ab63da98 100644 --- a/internal/service/verifiedpermissions/service_endpoint_resolver_gen.go +++ b/internal/service/verifiedpermissions/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params verifiedpermissi }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up verifiedpermissions endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up verifiedpermissions endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/verifiedpermissions/service_endpoints_gen_test.go b/internal/service/verifiedpermissions/service_endpoints_gen_test.go index 253d24ec3c71..a85465607c9f 100644 --- a/internal/service/verifiedpermissions/service_endpoints_gen_test.go +++ b/internal/service/verifiedpermissions/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/verifiedpermissions/service_package_gen.go b/internal/service/verifiedpermissions/service_package_gen.go index 99aeef68000c..f50383b90ec5 100644 --- a/internal/service/verifiedpermissions/service_package_gen.go +++ b/internal/service/verifiedpermissions/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/verifiedpermissions" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -101,7 +100,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *verifiedpermissions.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/verifiedpermissions/sweep.go b/internal/service/verifiedpermissions/sweep.go index 152fed964778..55386d21cea6 100644 --- a/internal/service/verifiedpermissions/sweep.go +++ b/internal/service/verifiedpermissions/sweep.go @@ -27,7 +27,7 @@ func sweepPolicyStores(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.VerifiedPermissionsClient(ctx) diff --git a/internal/service/verifiedpermissions/tags_gen.go b/internal/service/verifiedpermissions/tags_gen.go index 0b4febbb8980..a676845dacff 100644 --- a/internal/service/verifiedpermissions/tags_gen.go +++ b/internal/service/verifiedpermissions/tags_gen.go @@ -3,8 +3,8 @@ package verifiedpermissions import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/verifiedpermissions" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *verifiedpermissions.Client, identifier output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).VerifiedPermissionsClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *verifiedpermissions.Client, identifie _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *verifiedpermissions.Client, identifie _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/vpclattice/resource_configuration.go b/internal/service/vpclattice/resource_configuration.go index f9120eff9036..105894f747d5 100644 --- a/internal/service/vpclattice/resource_configuration.go +++ b/internal/service/vpclattice/resource_configuration.go @@ -372,7 +372,7 @@ func (r *resourceConfigurationResource) Delete(ctx context.Context, request reso const ( timeout = 1 * time.Minute ) - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ValidationException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[any, *awstypes.ValidationException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteResourceConfiguration(ctx, &vpclattice.DeleteResourceConfigurationInput{ ResourceConfigurationIdentifier: fwflex.StringFromFramework(ctx, data.ID), }) diff --git a/internal/service/vpclattice/resource_configuration_tags_gen_test.go b/internal/service/vpclattice/resource_configuration_tags_gen_test.go index 97405552cb07..1e82568cd9bb 100644 --- a/internal/service/vpclattice/resource_configuration_tags_gen_test.go +++ b/internal/service/vpclattice/resource_configuration_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/vpclattice" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccVPCLatticeResourceConfiguration_tags(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccVPCLatticeResourceConfiguration_tags(t *testing.T) { func TestAccVPCLatticeResourceConfiguration_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_null(t *testing.T) { func TestAccVPCLatticeResourceConfiguration_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_EmptyMap(t *testing.T) { func TestAccVPCLatticeResourceConfiguration_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_AddOnUpdate(t *testing.T) { func TestAccVPCLatticeResourceConfiguration_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_EmptyTag_OnCreate(t *testing.T) func TestAccVPCLatticeResourceConfiguration_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_EmptyTag_OnUpdate_Add(t *testin func TestAccVPCLatticeResourceConfiguration_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_EmptyTag_OnUpdate_Replace(t *te func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_providerOnly(t *tes func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_nonOverlapping(t *t func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_overlapping(t *test func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_updateToProviderOnl func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_updateToResourceOnl func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_emptyResourceTag(t func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_emptyProviderOnlyTa func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_nullOverlappingReso func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_DefaultTags_nullNonOverlappingR func TestAccVPCLatticeResourceConfiguration_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_ComputedTag_OnCreate(t *testing func TestAccVPCLatticeResourceConfiguration_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_ComputedTag_OnUpdate_Add(t *tes func TestAccVPCLatticeResourceConfiguration_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_ComputedTag_OnUpdate_Replace(t func TestAccVPCLatticeResourceConfiguration_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccVPCLatticeResourceConfiguration_tags_IgnoreTags_Overlap_DefaultTag(t func TestAccVPCLatticeResourceConfiguration_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceConfigurationOutput resourceName := "aws_vpclattice_resource_configuration.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceConfigurationDestroy(ctx), diff --git a/internal/service/vpclattice/resource_gateway.go b/internal/service/vpclattice/resource_gateway.go index d29c7a214f3e..e4517a17f5a7 100644 --- a/internal/service/vpclattice/resource_gateway.go +++ b/internal/service/vpclattice/resource_gateway.go @@ -12,10 +12,12 @@ import ( "github.com/aws/aws-sdk-go-v2/service/vpclattice" awstypes "github.com/aws/aws-sdk-go-v2/service/vpclattice/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int32validator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" @@ -67,6 +69,18 @@ func (r *resourceGatewayResource) Schema(ctx context.Context, request resource.S stringplanmodifier.UseStateForUnknown(), }, }, + "ipv4_addresses_per_eni": schema.Int32Attribute{ + Optional: true, + Computed: true, + Validators: []validator.Int32{ + int32validator.AtLeast(1), + int32validator.AtMost(62), + }, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + }, names.AttrName: schema.StringAttribute{ Required: true, Validators: []validator.String{ @@ -136,6 +150,11 @@ func (r *resourceGatewayResource) Create(ctx context.Context, request resource.C input.Tags = getTagsIn(ctx) input.VpcIdentifier = fwflex.StringFromFramework(ctx, data.VPCID) + // Ipv4AddressesPerEni is irrelevant if IPAddressType is IPv6 + if data.IPAddressType.ValueEnum() != awstypes.ResourceGatewayIpAddressTypeIpv6 { + input.Ipv4AddressesPerEni = fwflex.Int32FromFramework(ctx, data.IPV4AddressesPerEni) + } + outputCRG, err := conn.CreateResourceGateway(ctx, &input) if err != nil { @@ -354,15 +373,16 @@ func waitResourceGatewayDeleted(ctx context.Context, conn *vpclattice.Client, id type resourceGatewayResourceModel struct { framework.WithRegionModel - ARN types.String `tfsdk:"arn"` - ID types.String `tfsdk:"id"` - IPAddressType fwtypes.StringEnum[awstypes.ResourceGatewayIpAddressType] `tfsdk:"ip_address_type"` - Name types.String `tfsdk:"name"` - SecurityGroupIDs fwtypes.SetOfString `tfsdk:"security_group_ids"` - Status fwtypes.StringEnum[awstypes.ResourceGatewayStatus] `tfsdk:"status"` - SubnetIDs fwtypes.SetOfString `tfsdk:"subnet_ids"` - Tags tftags.Map `tfsdk:"tags"` - TagsAll tftags.Map `tfsdk:"tags_all"` - Timeouts timeouts.Value `tfsdk:"timeouts"` - VPCID types.String `tfsdk:"vpc_id"` + ARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + IPAddressType fwtypes.StringEnum[awstypes.ResourceGatewayIpAddressType] `tfsdk:"ip_address_type"` + IPV4AddressesPerEni types.Int32 `tfsdk:"ipv4_addresses_per_eni"` + Name types.String `tfsdk:"name"` + SecurityGroupIDs fwtypes.SetOfString `tfsdk:"security_group_ids"` + Status fwtypes.StringEnum[awstypes.ResourceGatewayStatus] `tfsdk:"status"` + SubnetIDs fwtypes.SetOfString `tfsdk:"subnet_ids"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + VPCID types.String `tfsdk:"vpc_id"` } diff --git a/internal/service/vpclattice/resource_gateway_tags_gen_test.go b/internal/service/vpclattice/resource_gateway_tags_gen_test.go index f81ef1515c15..430b174cac3b 100644 --- a/internal/service/vpclattice/resource_gateway_tags_gen_test.go +++ b/internal/service/vpclattice/resource_gateway_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/vpclattice" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccVPCLatticeResourceGateway_tags(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccVPCLatticeResourceGateway_tags(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccVPCLatticeResourceGateway_tags_null(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccVPCLatticeResourceGateway_tags_EmptyMap(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccVPCLatticeResourceGateway_tags_AddOnUpdate(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccVPCLatticeResourceGateway_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccVPCLatticeResourceGateway_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccVPCLatticeResourceGateway_tags_EmptyTag_OnUpdate_Replace(t *testing. func TestAccVPCLatticeResourceGateway_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_providerOnly(t *testing.T func TestAccVPCLatticeResourceGateway_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_nonOverlapping(t *testing func TestAccVPCLatticeResourceGateway_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_overlapping(t *testing.T) func TestAccVPCLatticeResourceGateway_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_updateToProviderOnly(t *t func TestAccVPCLatticeResourceGateway_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_updateToResourceOnly(t *t func TestAccVPCLatticeResourceGateway_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_emptyResourceTag(t *testi func TestAccVPCLatticeResourceGateway_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_emptyProviderOnlyTag(t *t func TestAccVPCLatticeResourceGateway_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_nullOverlappingResourceTa func TestAccVPCLatticeResourceGateway_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccVPCLatticeResourceGateway_tags_DefaultTags_nullNonOverlappingResourc func TestAccVPCLatticeResourceGateway_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccVPCLatticeResourceGateway_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccVPCLatticeResourceGateway_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccVPCLatticeResourceGateway_tags_ComputedTag_OnUpdate_Add(t *testing.T func TestAccVPCLatticeResourceGateway_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccVPCLatticeResourceGateway_tags_ComputedTag_OnUpdate_Replace(t *testi func TestAccVPCLatticeResourceGateway_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccVPCLatticeResourceGateway_tags_IgnoreTags_Overlap_DefaultTag(t *test func TestAccVPCLatticeResourceGateway_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetResourceGatewayOutput resourceName := "aws_vpclattice_resource_gateway.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), diff --git a/internal/service/vpclattice/resource_gateway_test.go b/internal/service/vpclattice/resource_gateway_test.go index 67b43d052cae..975798df5c01 100644 --- a/internal/service/vpclattice/resource_gateway_test.go +++ b/internal/service/vpclattice/resource_gateway_test.go @@ -165,6 +165,43 @@ func TestAccVPCLatticeResourceGateway_multipleSubnets(t *testing.T) { }) } +func TestAccVPCLatticeResourceGateway_ipv4AddressesPerEni(t *testing.T) { + ctx := acctest.Context(t) + var resourcegateway vpclattice.GetResourceGatewayOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_vpclattice_resource_gateway.test" + addressType := "IPV4" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.VPCLatticeEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckResourceGatewayDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccResourceGatewayConfig_ipv4AddressesPerEni(rName, 5), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceGatewayExists(ctx, resourceName, &resourcegateway), + resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, addressType), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, "ACTIVE"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ipv4_addresses_per_eni", "5"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "vpc-lattice", regexache.MustCompile(`resourcegateway/rgw-.+`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccVPCLatticeResourceGateway_update(t *testing.T) { ctx := acctest.Context(t) var resourcegateway vpclattice.GetResourceGatewayOutput @@ -385,6 +422,18 @@ resource "aws_vpclattice_resource_gateway" "test" { `, rName)) } +func testAccResourceGatewayConfig_ipv4AddressesPerEni(rName string, ipAddressesPerEni int32) string { + return acctest.ConfigCompose(testAccResourceGatewayConfig_base(rName), fmt.Sprintf(` +resource "aws_vpclattice_resource_gateway" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ipv4_addresses_per_eni = %[2]d +} +`, rName, ipAddressesPerEni)) +} + func testAccResourceGatewayConfig_update1(rName string) string { return acctest.ConfigCompose(testAccResourceGatewayConfig_base(rName), fmt.Sprintf(` resource "aws_security_group" "test2" { diff --git a/internal/service/vpclattice/service_endpoint_resolver_gen.go b/internal/service/vpclattice/service_endpoint_resolver_gen.go index 72d43fd38087..74ec0b088651 100644 --- a/internal/service/vpclattice/service_endpoint_resolver_gen.go +++ b/internal/service/vpclattice/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params vpclattice.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up vpclattice endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up vpclattice endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/vpclattice/service_endpoints_gen_test.go b/internal/service/vpclattice/service_endpoints_gen_test.go index 43657905d5ef..d0beb9adebbc 100644 --- a/internal/service/vpclattice/service_endpoints_gen_test.go +++ b/internal/service/vpclattice/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/vpclattice/service_network_resource_association_tags_gen_test.go b/internal/service/vpclattice/service_network_resource_association_tags_gen_test.go index e758dfc55d0c..a751da8167d4 100644 --- a/internal/service/vpclattice/service_network_resource_association_tags_gen_test.go +++ b/internal/service/vpclattice/service_network_resource_association_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/vpclattice" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccVPCLatticeServiceNetworkResourceAssociation_tags(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags(t *testing.T) { func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -263,11 +264,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_null(t *testing.T) func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -313,11 +315,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyMap(t *testing func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -393,11 +396,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_AddOnUpdate(t *test func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -483,11 +487,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyTag_OnCreate(t func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -622,11 +627,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyTag_OnUpdate_A func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -712,11 +718,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_EmptyTag_OnUpdate_R func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -893,11 +900,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_provide func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1053,11 +1061,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_nonOver func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1229,11 +1238,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_overlap func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1319,11 +1329,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_updateT func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1408,11 +1419,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_updateT func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1474,11 +1486,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_emptyRe func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1532,11 +1545,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_emptyPr func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1601,11 +1615,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_nullOve func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1672,11 +1687,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_DefaultTags_nullNon func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1727,11 +1743,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_ComputedTag_OnCreat func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1824,11 +1841,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_ComputedTag_OnUpdat func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -1911,11 +1929,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_ComputedTag_OnUpdat func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), @@ -2073,11 +2092,12 @@ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_IgnoreTags_Overlap_ func TestAccVPCLatticeServiceNetworkResourceAssociation_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v vpclattice.GetServiceNetworkResourceAssociationOutput resourceName := "aws_vpclattice_service_network_resource_association.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.VPCLatticeServiceID), CheckDestroy: testAccCheckServiceNetworkResourceAssociationDestroy(ctx), diff --git a/internal/service/vpclattice/service_package_gen.go b/internal/service/vpclattice/service_package_gen.go index a98a20f96213..57313208f26d 100644 --- a/internal/service/vpclattice/service_package_gen.go +++ b/internal/service/vpclattice/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/vpclattice" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -212,7 +211,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *vpclattice.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/vpclattice/sweep.go b/internal/service/vpclattice/sweep.go index 344847581fa0..608e2d9f8268 100644 --- a/internal/service/vpclattice/sweep.go +++ b/internal/service/vpclattice/sweep.go @@ -21,7 +21,8 @@ func RegisterSweepers() { awsv2.Register("aws_vpclattice_service", sweepServices) awsv2.Register("aws_vpclattice_service_network", sweepServiceNetworks, "aws_vpclattice_service") awsv2.Register("aws_vpclattice_service_network_resource_association", sweepServiceNetworkResourceAssociations) - awsv2.Register("aws_vpclattice_target_group", sweepTargetGroups) + awsv2.Register("aws_vpclattice_target_group", sweepTargetGroups, "aws_vpclattice_target_group_attachment") + awsv2.Register("aws_vpclattice_target_group_attachment", sweepTargetGroupAttachments) } func sweepResourceConfigurations(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { @@ -168,3 +169,43 @@ func sweepTargetGroups(ctx context.Context, client *conns.AWSClient) ([]sweep.Sw return sweepResources, nil } + +func sweepTargetGroupAttachments(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.VPCLatticeClient(ctx) + var sweepResources []sweep.Sweepable + + var input vpclattice.ListTargetGroupsInput + pages := vpclattice.NewListTargetGroupsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, targetGroup := range page.Items { + input := vpclattice.ListTargetsInput{ + TargetGroupIdentifier: targetGroup.Id, + } + pages := vpclattice.NewListTargetsPaginator(conn, &input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + for _, target := range page.Items { + r := resourceTargetGroupAttachment() + d := r.Data(nil) + + d.SetId(targetGroupAttachmentCreateResourceID(aws.ToString(targetGroup.Id), aws.ToString(target.Id), aws.ToInt32(target.Port))) + d.Set("target_group_identifier", targetGroup.Id) + d.Set(names.AttrTarget, []any{flattenTargetSummary(&target)}) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + } + } + } + + return sweepResources, nil +} diff --git a/internal/service/vpclattice/tags_gen.go b/internal/service/vpclattice/tags_gen.go index b99d4a7547dd..511a94304bc9 100644 --- a/internal/service/vpclattice/tags_gen.go +++ b/internal/service/vpclattice/tags_gen.go @@ -3,8 +3,8 @@ package vpclattice import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/vpclattice" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *vpclattice.Client, identifier string, o output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).VPCLatticeClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *vpclattice.Client, identifier string, _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *vpclattice.Client, identifier string, _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/vpclattice/target_group.go b/internal/service/vpclattice/target_group.go index 98e2a0ebf6e3..0e2fe4fcf3db 100644 --- a/internal/service/vpclattice/target_group.go +++ b/internal/service/vpclattice/target_group.go @@ -309,7 +309,7 @@ func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta } // Draining the targets can take a moment, so we need to retry on conflict. - _, err := tfresource.RetryWhenIsA[*types.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), func(ctx context.Context) (any, error) { return conn.DeleteTargetGroup(ctx, &input) }) diff --git a/internal/service/waf/rule.go b/internal/service/waf/rule.go index 917a5a8bdce9..efb70f6d6058 100644 --- a/internal/service/waf/rule.go +++ b/internal/service/waf/rule.go @@ -182,7 +182,7 @@ func resourceRuleDelete(ctx context.Context, d *schema.ResourceData, meta any) d const ( timeout = 1 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.WAFReferencedItemException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.WAFReferencedItemException](ctx, timeout, func(ctx context.Context) (any, error) { return newRetryer(conn).RetryWithToken(ctx, func(token *string) (any, error) { input := &waf.DeleteRuleInput{ ChangeToken: token, diff --git a/internal/service/waf/service_endpoint_resolver_gen.go b/internal/service/waf/service_endpoint_resolver_gen.go index 7250bd6d46eb..22327bd30ddf 100644 --- a/internal/service/waf/service_endpoint_resolver_gen.go +++ b/internal/service/waf/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params waf.EndpointPara }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up waf endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up waf endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/waf/service_endpoints_gen_test.go b/internal/service/waf/service_endpoints_gen_test.go index df3f447c06dc..c4e894895926 100644 --- a/internal/service/waf/service_endpoints_gen_test.go +++ b/internal/service/waf/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/waf/service_package_gen.go b/internal/service/waf/service_package_gen.go index 8269a63a1648..1f71fc0ff938 100644 --- a/internal/service/waf/service_package_gen.go +++ b/internal/service/waf/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/waf" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -173,7 +172,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *waf.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/waf/sweep.go b/internal/service/waf/sweep.go index 9cb8c1c7f46c..e1a886ac3214 100644 --- a/internal/service/waf/sweep.go +++ b/internal/service/waf/sweep.go @@ -132,7 +132,7 @@ func sweepByteMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListByteMatchSetsInput{} @@ -185,7 +185,7 @@ func sweepGeoMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListGeoMatchSetsInput{} @@ -238,7 +238,7 @@ func sweepIPSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListIPSetsInput{} @@ -291,7 +291,7 @@ func sweepRateBasedRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListRateBasedRulesInput{} @@ -344,7 +344,7 @@ func sweepRegexMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListRegexMatchSetsInput{} @@ -397,7 +397,7 @@ func sweepRegexPatternSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListRegexPatternSetsInput{} @@ -450,7 +450,7 @@ func sweepRuleGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListRuleGroupsInput{} @@ -503,7 +503,7 @@ func sweepRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListRulesInput{} @@ -556,7 +556,7 @@ func sweepSizeConstraintSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListSizeConstraintSetsInput{} @@ -609,7 +609,7 @@ func sweepSQLInjectionMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListSqlInjectionMatchSetsInput{} @@ -662,7 +662,7 @@ func sweepWebACLs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListWebACLsInput{} @@ -715,7 +715,7 @@ func sweepXSSMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFClient(ctx) input := &waf.ListXssMatchSetsInput{} diff --git a/internal/service/waf/tags_gen.go b/internal/service/waf/tags_gen.go index a5af982dfb76..fd2244981380 100644 --- a/internal/service/waf/tags_gen.go +++ b/internal/service/waf/tags_gen.go @@ -3,8 +3,8 @@ package waf import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/waf" awstypes "github.com/aws/aws-sdk-go-v2/service/waf/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *waf.Client, identifier string, optFns . output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagInfoForResource.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).WAFClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *waf.Client, identifier string, oldTag _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *waf.Client, identifier string, oldTag _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/waf/token_handlers.go b/internal/service/waf/token_handlers.go index d3cc0174f397..392d30cfd12b 100644 --- a/internal/service/waf/token_handlers.go +++ b/internal/service/waf/token_handlers.go @@ -30,7 +30,7 @@ func (t *retryer) RetryWithToken(ctx context.Context, f withTokenFunc) (any, err const ( timeout = 15 * time.Minute ) - return tfresource.RetryWhenIsA[*awstypes.WAFStaleDataException](ctx, timeout, func() (any, error) { + return tfresource.RetryWhenIsA[any, *awstypes.WAFStaleDataException](ctx, timeout, func(ctx context.Context) (any, error) { input := &waf.GetChangeTokenInput{} output, err := t.connection.GetChangeToken(ctx, input) diff --git a/internal/service/wafregional/service_endpoint_resolver_gen.go b/internal/service/wafregional/service_endpoint_resolver_gen.go index 1baaa5ef61c1..e03de5759dc0 100644 --- a/internal/service/wafregional/service_endpoint_resolver_gen.go +++ b/internal/service/wafregional/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params wafregional.Endp }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up wafregional endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up wafregional endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/wafregional/service_endpoints_gen_test.go b/internal/service/wafregional/service_endpoints_gen_test.go index 4d64f5677ae5..c89fa4a35c11 100644 --- a/internal/service/wafregional/service_endpoints_gen_test.go +++ b/internal/service/wafregional/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/wafregional/service_package_gen.go b/internal/service/wafregional/service_package_gen.go index bc8db1c0044c..f63717f8fce6 100644 --- a/internal/service/wafregional/service_package_gen.go +++ b/internal/service/wafregional/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/wafregional" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -179,7 +178,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *wafregional.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/wafregional/sweep.go b/internal/service/wafregional/sweep.go index fd3a811c3bb7..931f404f202c 100644 --- a/internal/service/wafregional/sweep.go +++ b/internal/service/wafregional/sweep.go @@ -132,7 +132,7 @@ func sweepByteMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListByteMatchSetsInput{} @@ -185,7 +185,7 @@ func sweepGeoMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListGeoMatchSetsInput{} @@ -238,7 +238,7 @@ func sweepIPSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListIPSetsInput{} @@ -291,7 +291,7 @@ func sweepRateBasedRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListRateBasedRulesInput{} @@ -344,7 +344,7 @@ func sweepRegexMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListRegexMatchSetsInput{} @@ -397,7 +397,7 @@ func sweepRegexPatternSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListRegexPatternSetsInput{} @@ -450,7 +450,7 @@ func sweepRuleGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListRuleGroupsInput{} @@ -503,7 +503,7 @@ func sweepRules(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListRulesInput{} @@ -556,7 +556,7 @@ func sweepSizeConstraintSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListSizeConstraintSetsInput{} @@ -609,7 +609,7 @@ func sweepSQLInjectionMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListSqlInjectionMatchSetsInput{} @@ -662,7 +662,7 @@ func sweepWebACLs(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListWebACLsInput{} @@ -715,7 +715,7 @@ func sweepXSSMatchSet(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WAFRegionalClient(ctx) input := &wafregional.ListXssMatchSetsInput{} diff --git a/internal/service/wafregional/tags_gen.go b/internal/service/wafregional/tags_gen.go index 60edb0febf3c..d91bc7fdfdee 100644 --- a/internal/service/wafregional/tags_gen.go +++ b/internal/service/wafregional/tags_gen.go @@ -3,8 +3,8 @@ package wafregional import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/wafregional" awstypes "github.com/aws/aws-sdk-go-v2/service/wafregional/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *wafregional.Client, identifier string, output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagInfoForResource.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).WAFRegionalClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *wafregional.Client, identifier string _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *wafregional.Client, identifier string _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/wafregional/token_handlers.go b/internal/service/wafregional/token_handlers.go index 9160175e8372..ad1fcce5d652 100644 --- a/internal/service/wafregional/token_handlers.go +++ b/internal/service/wafregional/token_handlers.go @@ -29,7 +29,7 @@ func (t *retryer) RetryWithToken(ctx context.Context, f withTokenFunc) (any, err const ( timeout = 15 * time.Minute ) - return tfresource.RetryWhenIsA[*awstypes.WAFStaleDataException](ctx, timeout, func() (any, error) { + return tfresource.RetryWhenIsA[any, *awstypes.WAFStaleDataException](ctx, timeout, func(ctx context.Context) (any, error) { input := &wafregional.GetChangeTokenInput{} output, err := t.connection.GetChangeToken(ctx, input) diff --git a/internal/service/wafregional/web_acl_association.go b/internal/service/wafregional/web_acl_association.go index 4173ea2e069c..b7150fc0f2c0 100644 --- a/internal/service/wafregional/web_acl_association.go +++ b/internal/service/wafregional/web_acl_association.go @@ -67,7 +67,7 @@ func resourceWebACLAssociationCreate(ctx context.Context, d *schema.ResourceData WebACLId: aws.String(webACLID), } - _, err := tfresource.RetryWhenIsA[*awstypes.WAFUnavailableEntityException](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.AssociateWebACL(ctx, input) }) diff --git a/internal/service/wafv2/exports_test.go b/internal/service/wafv2/exports_test.go index 660620dda906..54a2f993ba72 100644 --- a/internal/service/wafv2/exports_test.go +++ b/internal/service/wafv2/exports_test.go @@ -12,7 +12,9 @@ var ( ResourceWebACLAssociation = resourceWebACLAssociation ResourceWebACLLoggingConfiguration = resourceWebACLLoggingConfiguration ResourceAPIKey = newAPIKeyResource + ResourceWebACLRuleGroupAssociation = newResourceWebACLRuleGroupAssociation + CloudFrontDistributionIDFromARN = cloudFrontDistributionIDFromARN FindAPIKeyByTwoPartKey = findAPIKeyByTwoPartKey FindIPSetByThreePartKey = findIPSetByThreePartKey FindLoggingConfigurationByARN = findLoggingConfigurationByARN @@ -20,6 +22,8 @@ var ( FindRuleGroupByThreePartKey = findRuleGroupByThreePartKey FindWebACLByResourceARN = findWebACLByResourceARN FindWebACLByThreePartKey = findWebACLByThreePartKey + IsCloudFrontDistributionARN = isCloudFrontDistributionARN ListRuleGroupsPages = listRuleGroupsPages ListWebACLsPages = listWebACLsPages + ParseWebACLARN = parseWebACLARN ) diff --git a/internal/service/wafv2/flex.go b/internal/service/wafv2/flex.go index 65a84b57a3c8..c0809dbf0af3 100644 --- a/internal/service/wafv2/flex.go +++ b/internal/service/wafv2/flex.go @@ -1078,6 +1078,41 @@ func expandWebACLRulesJSON(rawRules string) ([]awstypes.Rule, error) { return rules, nil } +func expandRuleGroupRulesJSON(rawRules string) ([]awstypes.Rule, error) { + // Backwards compatibility. + if rawRules == "" { + return nil, errors.New("decoding JSON: unexpected end of JSON input") + } + + var temp []any + err := tfjson.DecodeFromBytes([]byte(rawRules), &temp) + if err != nil { + return nil, fmt.Errorf("decoding JSON: %w", err) + } + + for _, v := range temp { + walkRulesGroupJSON(reflect.ValueOf(v)) + } + + out, err := tfjson.EncodeToBytes(temp) + if err != nil { + return nil, err + } + + var rules []awstypes.Rule + err = tfjson.DecodeFromBytes(out, &rules) + if err != nil { + return nil, err + } + + for i, r := range rules { + if reflect.ValueOf(r).IsZero() { + return nil, fmt.Errorf("invalid Rule Group Rule supplied at index (%d)", i) + } + } + return rules, nil +} + func walkWebACLJSON(v reflect.Value) { m := map[string][]struct { key string @@ -1125,6 +1160,53 @@ func walkWebACLJSON(v reflect.Value) { } } +func walkRulesGroupJSON(v reflect.Value) { + m := map[string][]struct { + key string + outputType any + }{ + "ByteMatchStatement": { + {key: "SearchString", outputType: []byte{}}, + }, + } + + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Map: + for _, k := range v.MapKeys() { + if val, ok := m[k.String()]; ok { + st := v.MapIndex(k).Interface().(map[string]any) + for _, va := range val { + if st[va.key] == nil { + continue + } + str := st[va.key] + switch reflect.ValueOf(va.outputType).Kind() { + case reflect.Slice, reflect.Array: + switch reflect.ValueOf(va.outputType).Type().Elem().Kind() { + case reflect.Uint8: + base64String := itypes.Base64Encode([]byte(str.(string))) + st[va.key] = base64String + default: + } + default: + } + } + } else { + walkRulesGroupJSON(v.MapIndex(k)) + } + } + case reflect.Array, reflect.Slice: + for i := range v.Len() { + walkRulesGroupJSON(v.Index(i)) + } + default: + } +} + func expandWebACLRules(l []any) []awstypes.Rule { if len(l) == 0 || l[0] == nil { return nil @@ -1774,6 +1856,9 @@ func expandRateBasedStatementCustomKeys(l []any) []awstypes.RateBasedStatementCu for _, ck := range l { r := awstypes.RateBasedStatementCustomKey{} m := ck.(map[string]any) + if v, ok := m["asn"]; ok && len(v.([]any)) > 0 { + r.ASN = &awstypes.RateLimitAsn{} + } if v, ok := m["cookie"]; ok { r.Cookie = expandRateLimitCookie(v.([]any)) } @@ -2454,8 +2539,8 @@ func flattenCookiesMatchPattern(c *awstypes.CookieMatchPattern) any { } m := map[string]any{ - "included_cookies": aws.StringSlice(c.IncludedCookies), - "excluded_cookies": aws.StringSlice(c.ExcludedCookies), + "included_cookies": c.IncludedCookies, + "excluded_cookies": c.ExcludedCookies, } if c.All != nil { @@ -3246,6 +3331,7 @@ func flattenHeader(apiObject *awstypes.ResponseInspectionHeader) []any { m := map[string]any{ "failure_values": apiObject.FailureValues, + names.AttrName: apiObject.Name, "success_values": apiObject.SuccessValues, } @@ -3379,6 +3465,11 @@ func flattenRateBasedStatementCustomKeys(apiObject []awstypes.RateBasedStatement for i, o := range apiObject { tfMap := map[string]any{} + if o.ASN != nil { + tfMap["asn"] = []any{ + map[string]any{}, + } + } if o.Cookie != nil { tfMap["cookie"] = flattenRateLimitCookie(o.Cookie) } diff --git a/internal/service/wafv2/ip_set.go b/internal/service/wafv2/ip_set.go index 6bc74a860e86..3e01e46c9191 100644 --- a/internal/service/wafv2/ip_set.go +++ b/internal/service/wafv2/ip_set.go @@ -249,7 +249,7 @@ func resourceIPSetDelete(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.WAFAssociatedItemException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.WAFAssociatedItemException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteIPSet(ctx, input) }) diff --git a/internal/service/wafv2/regex_pattern_set.go b/internal/service/wafv2/regex_pattern_set.go index 64d259211293..9119a0d8b538 100644 --- a/internal/service/wafv2/regex_pattern_set.go +++ b/internal/service/wafv2/regex_pattern_set.go @@ -94,7 +94,6 @@ func resourceRegexPatternSet() *schema.Resource { "regular_expression": { Type: schema.TypeSet, Optional: true, - MaxItems: 10, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "regex_string": { @@ -229,7 +228,7 @@ func resourceRegexPatternSetDelete(ctx context.Context, d *schema.ResourceData, const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.WAFAssociatedItemException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.WAFAssociatedItemException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteRegexPatternSet(ctx, input) }) diff --git a/internal/service/wafv2/rule_group.go b/internal/service/wafv2/rule_group.go index 0636374fffe0..89cc54d2c0e7 100644 --- a/internal/service/wafv2/rule_group.go +++ b/internal/service/wafv2/rule_group.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -26,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -98,9 +100,21 @@ func resourceRuleGroup() *schema.Resource { validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_-]+$`), "must contain only alphanumeric hyphen and underscore characters"), ), }, + "rules_json": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{names.AttrRule}, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + StateFunc: func(v any) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + }, names.AttrRule: { - Type: schema.TypeSet, - Optional: true, + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"rules_json"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrAction: { @@ -155,12 +169,23 @@ func resourceRuleGroupCreate(ctx context.Context, d *schema.ResourceData, meta a input := &wafv2.CreateRuleGroupInput{ Capacity: aws.Int64(int64(d.Get("capacity").(int))), Name: aws.String(name), - Rules: expandRules(d.Get(names.AttrRule).(*schema.Set).List()), Scope: awstypes.Scope(d.Get(names.AttrScope).(string)), Tags: getTagsIn(ctx), VisibilityConfig: expandVisibilityConfig(d.Get("visibility_config").([]any)), } + if v, ok := d.GetOk(names.AttrRule); ok { + input.Rules = expandRules(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("rules_json"); ok { + rules, err := expandRuleGroupRulesJSON(v.(string)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting rule: %s", err) + } + input.Rules = rules + } + if v, ok := d.GetOk("custom_response_body"); ok && v.(*schema.Set).Len() > 0 { input.CustomResponseBodies = expandCustomResponseBodies(v.(*schema.Set).List()) } @@ -172,7 +197,7 @@ func resourceRuleGroupCreate(ctx context.Context, d *schema.ResourceData, meta a const ( timeout = 5 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateRuleGroup(ctx, input) }) @@ -212,8 +237,13 @@ func resourceRuleGroupRead(ctx context.Context, d *schema.ResourceData, meta any d.Set("lock_token", output.LockToken) d.Set(names.AttrName, ruleGroup.Name) d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(ruleGroup.Name))) - if err := d.Set(names.AttrRule, flattenRules(ruleGroup.Rules)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting rule: %s", err) + if _, ok := d.GetOk("rules_json"); !ok { + if err := d.Set(names.AttrRule, flattenRules(ruleGroup.Rules)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting rule: %s", err) + } + } else { + d.Set("rules_json", d.Get("rules_json")) + d.Set(names.AttrRule, nil) } if err := d.Set("visibility_config", flattenVisibilityConfig(ruleGroup.VisibilityConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting visibility_config: %s", err) @@ -231,11 +261,22 @@ func resourceRuleGroupUpdate(ctx context.Context, d *schema.ResourceData, meta a Id: aws.String(d.Id()), LockToken: aws.String(d.Get("lock_token").(string)), Name: aws.String(d.Get(names.AttrName).(string)), - Rules: expandRules(d.Get(names.AttrRule).(*schema.Set).List()), Scope: awstypes.Scope(d.Get(names.AttrScope).(string)), VisibilityConfig: expandVisibilityConfig(d.Get("visibility_config").([]any)), } + if v, ok := d.GetOk(names.AttrRule); ok { + input.Rules = expandRules(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("rules_json"); ok { + rules, err := expandRuleGroupRulesJSON(v.(string)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "expanding WAFv2 RuleGroup JSON rule (%s): %s", d.Id(), err) + } + input.Rules = rules + } + if v, ok := d.GetOk("custom_response_body"); ok && v.(*schema.Set).Len() > 0 { input.CustomResponseBodies = expandCustomResponseBodies(v.(*schema.Set).List()) } @@ -247,7 +288,7 @@ func resourceRuleGroupUpdate(ctx context.Context, d *schema.ResourceData, meta a const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateRuleGroup(ctx, input) }) @@ -274,7 +315,7 @@ func resourceRuleGroupDelete(ctx context.Context, d *schema.ResourceData, meta a const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsOneOf2[*awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsOneOf2[any, *awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteRuleGroup(ctx, input) }) diff --git a/internal/service/wafv2/rule_group_test.go b/internal/service/wafv2/rule_group_test.go index c0de0d1bafed..eefb718991d3 100644 --- a/internal/service/wafv2/rule_group_test.go +++ b/internal/service/wafv2/rule_group_test.go @@ -5701,3 +5701,75 @@ resource "aws_wafv2_rule_group" "test" { } `, rName) } +func TestAccWAFV2RuleGroup_rulesJSON(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.RuleGroup + ruleGroupName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_rule_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckScopeRegional(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckRuleGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccRuleGroupConfig_rulesJSON(ruleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuleGroupExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "wafv2", regexache.MustCompile(`regional/rulegroup/.+$`)), + resource.TestCheckResourceAttrSet(resourceName, "rules_json"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rules_json", names.AttrRule}, + ImportStateIdFunc: testAccRuleGroupImportStateIdFunc(resourceName), + }, + }, + }) +} + +func testAccRuleGroupConfig_rulesJSON(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + capacity = 100 + name = %[1]q + scope = "REGIONAL" + + rules_json = jsonencode([{ + Name = "rule-1" + Priority = 1 + Action = { + Count = {} + } + Statement = { + ByteMatchStatement = { + SearchString = "badbot" + FieldToMatch = { + UriPath = {} + } + TextTransformations = [{ + Priority = 1 + Type = "NONE" + }] + PositionalConstraint = "CONTAINS" + } + } + VisibilityConfig = { + CloudwatchMetricsEnabled = false + MetricName = "friendly-rule-metric-name" + SampledRequestsEnabled = false + } + }]) + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "friendly-metric-name" + sampled_requests_enabled = false + } +} +`, rName) +} diff --git a/internal/service/wafv2/schemas.go b/internal/service/wafv2/schemas.go index 5a33db78f5df..c7b7bcf73801 100644 --- a/internal/service/wafv2/schemas.go +++ b/internal/service/wafv2/schemas.go @@ -1083,6 +1083,7 @@ func rateBasedStatementSchema(level int) *schema.Schema { MaxItems: 5, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "asn": emptySchema(), "cookie": { Type: schema.TypeList, Optional: true, diff --git a/internal/service/wafv2/service_endpoint_resolver_gen.go b/internal/service/wafv2/service_endpoint_resolver_gen.go index bfaf551e2ca8..6228c2e10bf4 100644 --- a/internal/service/wafv2/service_endpoint_resolver_gen.go +++ b/internal/service/wafv2/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params wafv2.EndpointPa }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up wafv2 endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up wafv2 endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/wafv2/service_endpoints_gen_test.go b/internal/service/wafv2/service_endpoints_gen_test.go index ac0a4f95fc9c..7a91683f4fc5 100644 --- a/internal/service/wafv2/service_endpoints_gen_test.go +++ b/internal/service/wafv2/service_endpoints_gen_test.go @@ -524,7 +524,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/wafv2/service_package_gen.go b/internal/service/wafv2/service_package_gen.go index 8c27e48c5823..1ef5b620ce6f 100644 --- a/internal/service/wafv2/service_package_gen.go +++ b/internal/service/wafv2/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/wafv2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -30,6 +29,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser Name: "API Key", Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newResourceWebACLRuleGroupAssociation, + TypeName: "aws_wafv2_web_acl_rule_group_association", + Name: "Web ACL Rule Group Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, } } @@ -138,7 +143,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *wafv2.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/wafv2/tags_gen.go b/internal/service/wafv2/tags_gen.go index ccd41d0ae818..f65a34788014 100644 --- a/internal/service/wafv2/tags_gen.go +++ b/internal/service/wafv2/tags_gen.go @@ -3,8 +3,8 @@ package wafv2 import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/wafv2" awstypes "github.com/aws/aws-sdk-go-v2/service/wafv2/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *wafv2.Client, identifier string, optFns output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagInfoForResource.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).WAFV2Client(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *wafv2.Client, identifier string, oldT _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *wafv2.Client, identifier string, oldT _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/wafv2/web_acl.go b/internal/service/wafv2/web_acl.go index 3179ce0fcc1a..da1e87828573 100644 --- a/internal/service/wafv2/web_acl.go +++ b/internal/service/wafv2/web_acl.go @@ -305,7 +305,7 @@ func resourceWebACLCreate(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 5 * time.Minute ) - outputRaw, err := tfresource.RetryWhenIsA[*awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + outputRaw, err := tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.CreateWebACL(ctx, input) }) @@ -384,7 +384,21 @@ func resourceWebACLUpdate(ctx context.Context, d *schema.ResourceData, meta any) var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WAFV2Client(ctx) - if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + // https://github.com/hashicorp/terraform-provider-aws/pull/42740. + // if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + if d.HasChanges( + "association_config", + "captcha_config", + "challenge_config", + "custom_response_body", + "data_protection_config", + names.AttrDefaultAction, + names.AttrDescription, + "rule_json", + names.AttrRule, + "token_domains", + "visibility_config", + ) { aclName := d.Get(names.AttrName).(string) aclScope := d.Get(names.AttrScope).(string) aclLockToken := d.Get("lock_token").(string) @@ -449,7 +463,7 @@ func resourceWebACLUpdate(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateWebACL(ctx, input) }) @@ -464,7 +478,7 @@ func resourceWebACLUpdate(ctx context.Context, d *schema.ResourceData, meta any) if newLockToken := aws.ToString(output.LockToken); newLockToken != aclLockToken { // Retrieved a new lock token, retry due to other processes modifying the web acl out of band (See: https://docs.aws.amazon.com/sdk-for-go/api/service/shield/#Shield.EnableApplicationLayerAutomaticResponse) input.LockToken = aws.String(newLockToken) - _, err = tfresource.RetryWhenIsOneOf2[*awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenIsOneOf2[any, *awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.UpdateWebACL(ctx, input) }) @@ -500,7 +514,7 @@ func resourceWebACLDelete(ctx context.Context, d *schema.ResourceData, meta any) const ( timeout = 5 * time.Minute ) - _, err := tfresource.RetryWhenIsOneOf2[*awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + _, err := tfresource.RetryWhenIsOneOf2[any, *awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteWebACL(ctx, input) }) @@ -515,7 +529,7 @@ func resourceWebACLDelete(ctx context.Context, d *schema.ResourceData, meta any) if newLockToken := aws.ToString(output.LockToken); newLockToken != aclLockToken { // Retrieved a new lock token, retry due to other processes modifying the web acl out of band (See: https://docs.aws.amazon.com/sdk-for-go/api/service/shield/#Shield.EnableApplicationLayerAutomaticResponse) input.LockToken = aws.String(newLockToken) - _, err = tfresource.RetryWhenIsOneOf2[*awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func() (any, error) { + _, err = tfresource.RetryWhenIsOneOf2[any, *awstypes.WAFAssociatedItemException, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { return conn.DeleteWebACL(ctx, input) }) @@ -536,13 +550,7 @@ func resourceWebACLDelete(ctx context.Context, d *schema.ResourceData, meta any) return diags } -func findWebACLByThreePartKey(ctx context.Context, conn *wafv2.Client, id, name, scope string) (*wafv2.GetWebACLOutput, error) { - input := &wafv2.GetWebACLInput{ - Id: aws.String(id), - Name: aws.String(name), - Scope: awstypes.Scope(scope), - } - +func findWebACL(ctx context.Context, conn *wafv2.Client, input *wafv2.GetWebACLInput) (*wafv2.GetWebACLOutput, error) { output, err := conn.GetWebACL(ctx, input) if errs.IsA[*awstypes.WAFNonexistentItemException](err) { @@ -563,6 +571,16 @@ func findWebACLByThreePartKey(ctx context.Context, conn *wafv2.Client, id, name, return output, nil } +func findWebACLByThreePartKey(ctx context.Context, conn *wafv2.Client, id, name, scope string) (*wafv2.GetWebACLOutput, error) { + input := wafv2.GetWebACLInput{ + Id: aws.String(id), + Name: aws.String(name), + Scope: awstypes.Scope(scope), + } + + return findWebACL(ctx, conn, &input) +} + // filterWebACLRules removes the AWS-added Shield Advanced auto mitigation rule here // so that the provider will not report diff and/or attempt to remove the rule as it is // owned and managed by AWS. diff --git a/internal/service/wafv2/web_acl_association.go b/internal/service/wafv2/web_acl_association.go index 88aae4c21385..a677893798df 100644 --- a/internal/service/wafv2/web_acl_association.go +++ b/internal/service/wafv2/web_acl_association.go @@ -78,7 +78,7 @@ func resourceWebACLAssociationCreate(ctx context.Context, d *schema.ResourceData } log.Printf("[INFO] Creating WAFv2 WebACL Association: %s", d.Id()) - if _, err = tfresource.RetryWhenIsA[*awstypes.WAFUnavailableEntityException](ctx, d.Timeout(schema.TimeoutCreate), func() (any, error) { + if _, err = tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.AssociateWebACL(ctx, input) }); err != nil { return sdkdiag.AppendErrorf(diags, "creating WAFv2 WebACL Association (%s): %s", id, err) diff --git a/internal/service/wafv2/web_acl_data_source.go b/internal/service/wafv2/web_acl_data_source.go index 6ef0d75eb906..3c8c97d22a74 100644 --- a/internal/service/wafv2/web_acl_data_source.go +++ b/internal/service/wafv2/web_acl_data_source.go @@ -5,15 +5,22 @@ package wafv2 import ( "context" + "fmt" + "strings" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/wafv2" awstypes "github.com/aws/aws-sdk-go-v2/service/wafv2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + sdkretry "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tfcloudfront "github.com/hashicorp/terraform-provider-aws/internal/service/cloudfront" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,8 +40,15 @@ func dataSourceWebACL() *schema.Resource { Computed: true, }, names.AttrName: { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{names.AttrName, names.AttrResourceARN}, + }, + names.AttrResourceARN: { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{names.AttrName, names.AttrResourceARN}, + ValidateFunc: verify.ValidARN, }, names.AttrScope: { Type: schema.TypeString, @@ -49,44 +63,144 @@ func dataSourceWebACL() *schema.Resource { func dataSourceWebACLRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WAFV2Client(ctx) + name := d.Get(names.AttrName).(string) + resourceArn := d.Get(names.AttrResourceARN).(string) + scope := awstypes.Scope(d.Get(names.AttrScope).(string)) - var foundWebACL awstypes.WebACLSummary - input := &wafv2.ListWebACLsInput{ - Scope: awstypes.Scope(d.Get(names.AttrScope).(string)), - Limit: aws.Int32(100), - } + var webACL *awstypes.WebACL + var err error - for { - resp, err := conn.ListWebACLs(ctx, input) + if resourceArn != "" { + // Check if this is a CloudFront distribution ARN and scope is CLOUDFRONT + if scope == awstypes.ScopeCloudfront && isCloudFrontDistributionARN(resourceArn) { + webACL, err = findWebACLByCloudFrontDistributionARN(ctx, meta.(*conns.AWSClient), resourceArn) + } else { + // Use GetWebACLForResource API for regional resources + webACL, err = findWebACLByResourceARN(ctx, conn, resourceArn) + } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading WAFv2 WebACLs: %s", err) + if retry.NotFound(err) { + return sdkdiag.AppendErrorf(diags, "WAFv2 WebACL not found for resource_arn: %s", resourceArn) + } + return sdkdiag.AppendErrorf(diags, "reading WAFv2 WebACL for resource_arn (%s): %s", resourceArn, err) } - - if resp == nil || resp.WebACLs == nil { - return sdkdiag.AppendErrorf(diags, "reading WAFv2 WebACLs") + } else { + // Use existing ListWebACLs + filter by name logic + var foundWebACL awstypes.WebACLSummary + input := wafv2.ListWebACLsInput{ + Scope: scope, + Limit: aws.Int32(100), } - for _, webACL := range resp.WebACLs { - if aws.ToString(webACL.Name) == name { - foundWebACL = webACL - break + err := listWebACLsPages(ctx, conn, &input, func(page *wafv2.ListWebACLsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, acl := range page.WebACLs { + if aws.ToString(acl.Name) == name { + foundWebACL = acl + return false + } } + + return !lastPage + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "list WAFv2 WebACLs: %s", err) + } + + if foundWebACL.Id == nil { + return sdkdiag.AppendErrorf(diags, "WAFv2 WebACL not found for name: %s", name) } - if resp.NextMarker == nil { - break + // Get full WebACL details using GetWebACL + getResp, err := findWebACLByThreePartKey(ctx, conn, aws.ToString(foundWebACL.Id), aws.ToString(foundWebACL.Name), string(scope)) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading WAFv2 WebACL (%s): %s", aws.ToString(foundWebACL.Id), err) } - input.NextMarker = resp.NextMarker + + webACL = getResp.WebACL } - if foundWebACL.Id == nil { - return sdkdiag.AppendErrorf(diags, "WAFv2 WebACL not found for name: %s", name) + if webACL == nil { + return sdkdiag.AppendErrorf(diags, "WAFv2 WebACL not found") } - d.SetId(aws.ToString(foundWebACL.Id)) - d.Set(names.AttrARN, foundWebACL.ARN) - d.Set(names.AttrDescription, foundWebACL.Description) + d.SetId(aws.ToString(webACL.Id)) + d.Set(names.AttrARN, webACL.ARN) + d.Set(names.AttrDescription, webACL.Description) + d.Set(names.AttrName, webACL.Name) return diags } + +// Helper function to detect CloudFront distribution ARNs +func isCloudFrontDistributionARN(s string) bool { + // CloudFront distribution ARNs: arn:partition:cloudfront::account:distribution/ID + return strings.Contains(s, ":cloudfront::") && strings.Contains(s, ":distribution/") && arn.IsARN(s) +} + +// Helper function to extract distribution ID from CloudFront ARN +func cloudFrontDistributionIDFromARN(arn string) (string, error) { + parts := strings.Split(arn, "/") + if len(parts) < 2 { + return "", fmt.Errorf("invalid CloudFront distribution ARN format: %s", arn) + } + return parts[len(parts)-1], nil +} + +// Helper function to find WebACL by CloudFront distribution ARN +func findWebACLByCloudFrontDistributionARN(ctx context.Context, client *conns.AWSClient, distributionARN string) (*awstypes.WebACL, error) { + // Extract distribution ID from ARN + distributionID, err := cloudFrontDistributionIDFromARN(distributionARN) + if err != nil { + return nil, err + } + + output, err := tfcloudfront.FindDistributionByID(ctx, client.CloudFrontClient(ctx), distributionID) + + if err != nil { + return nil, fmt.Errorf("getting CloudFront distribution (%s): %w", distributionID, err) + } + + webACLARN := aws.ToString(output.Distribution.DistributionConfig.WebACLId) + if webACLARN == "" { + return nil, &sdkretry.NotFoundError{ + Message: fmt.Sprintf("no WebACL associated with CloudFront distribution: %s", distributionID), + } + } + + // Now get the actual WebACL using WAFv2 API + wafConn := client.WAFV2Client(ctx) + + if !strings.Contains(webACLARN, ":wafv2:") || !arn.IsARN(webACLARN) { + // This would be a WAF Classic ID, not supported by this data source + return nil, fmt.Errorf("CloudFront distribution (%s) is associated with WAF Classic WebACL (%s), which is not supported by this data source. Use aws_waf_web_acl data source instead", distributionID, webACLARN) + } + + // Parse the ARN to extract name and ID + // WAFv2 ARN format: arn:partition:wafv2:region:account:global/webacl/name/id + parts := strings.Split(webACLARN, "/") + if len(parts) < 4 { + return nil, fmt.Errorf("invalid WAFv2 WebACL ARN format: %s", webACLARN) + } + + webACLName := parts[len(parts)-2] + webACLID := parts[len(parts)-1] + + var webACLOut *wafv2.GetWebACLOutput + if webACLOut, err = findWebACLByThreePartKey(ctx, wafConn, webACLID, webACLName, string(awstypes.ScopeCloudfront)); err != nil { + return nil, fmt.Errorf("finding WAFv2 WebACL (%s): %w", webACLARN, err) + } + if webACLOut == nil { + return nil, &sdkretry.NotFoundError{ + Message: fmt.Sprintf("no WAFv2 WebACL found: %s", webACLARN), + } + } + + return webACLOut.WebACL, nil +} diff --git a/internal/service/wafv2/web_acl_data_source_test.go b/internal/service/wafv2/web_acl_data_source_test.go index 4a957073b1ac..8945cc69e1ff 100644 --- a/internal/service/wafv2/web_acl_data_source_test.go +++ b/internal/service/wafv2/web_acl_data_source_test.go @@ -11,9 +11,135 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfwafv2 "github.com/hashicorp/terraform-provider-aws/internal/service/wafv2" "github.com/hashicorp/terraform-provider-aws/names" ) +func TestIsCloudFrontDistributionARN(t *testing.T) { + t.Parallel() + tests := []struct { + name string + arn string + expected bool + }{ + { + name: "standard AWS partition", + arn: "arn:aws:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expected: true, + }, + { + name: "AWS GovCloud partition", + arn: "arn:aws-us-gov:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expected: true, + }, + { + name: "AWS China partition", + arn: "arn:aws-cn:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expected: true, + }, + { + name: "ISOB partition", + arn: "arn:isob:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expected: true, + }, + { + name: "unknown future partition", + arn: "arn:aws-new-region:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expected: true, + }, + { + name: "ALB ARN", + arn: "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188", //lintignore:AWSAT003,AWSAT005 + expected: false, + }, + { + name: "CloudFront origin access identity", + arn: "arn:aws:cloudfront::123456789012:origin-access-identity/E12345678901234", //lintignore:AWSAT005 + expected: false, + }, + { + name: "not an ARN", + arn: "not-an-arn", + expected: false, + }, + { + name: "invalid ARN format", + arn: "arn:aws:cloudfront:123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.arn, func(t *testing.T) { + t.Parallel() + result := tfwafv2.IsCloudFrontDistributionARN(tt.arn) + if result != tt.expected { + t.Errorf("isCloudFrontDistributionARN(%q) = %v, want %v", tt.arn, result, tt.expected) + } + }) + } +} + +func TestCloudFrontDistributionIDFromARN(t *testing.T) { + t.Parallel() + tests := []struct { + name string + arn string + expectedID string + expectError bool + }{ + { + name: "standard AWS CloudFront ARN", + arn: "arn:aws:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expectedID: "E12345678901234", + }, + { + name: "GovCloud CloudFront ARN", + arn: "arn:aws-us-gov:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expectedID: "E12345678901234", + }, + { + name: "China CloudFront ARN", + arn: "arn:aws-cn:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expectedID: "E12345678901234", + }, + { + name: "ISOB CloudFront ARN", + arn: "arn:isob:cloudfront::123456789012:distribution/E12345678901234", //lintignore:AWSAT005 + expectedID: "E12345678901234", + }, + { + name: "invalid ARN - no slash", + arn: "invalid-arn", + expectError: true, + }, + { + name: "invalid ARN - missing distribution ID", + arn: "arn:aws:cloudfront::123456789012:distribution", //lintignore:AWSAT005 + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.arn, func(t *testing.T) { + t.Parallel() + id, err := tfwafv2.CloudFrontDistributionIDFromARN(tt.arn) + if tt.expectError { + if err == nil { + t.Errorf("cloudFrontDistributionIDFromARN(%q) expected error, got nil", tt.arn) + } + } else { + if err != nil { + t.Errorf("cloudFrontDistributionIDFromARN(%q) unexpected error: %v", tt.arn, err) + } + if id != tt.expectedID { + t.Errorf("cloudFrontDistributionIDFromARN(%q) = %q, want %q", tt.arn, id, tt.expectedID) + } + } + }) + } +} + func TestAccWAFV2WebACLDataSource_basic(t *testing.T) { ctx := acctest.Context(t) name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -44,10 +170,81 @@ func TestAccWAFV2WebACLDataSource_basic(t *testing.T) { }) } +func TestAccWAFV2WebACLDataSource_resource(t *testing.T) { + ctx := acctest.Context(t) + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl.test" + datasourceName := "data.aws_wafv2_web_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckScopeRegional(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWebACLDataSourceConfig_resource(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), + acctest.MatchResourceAttrRegionalARN(ctx, datasourceName, names.AttrARN, "wafv2", regexache.MustCompile(fmt.Sprintf("regional/webacl/%v/.+$", name))), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrID, resourceName, names.AttrID), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrScope, resourceName, names.AttrScope), + ), + }, + }, + }) +} + +func TestAccWAFV2WebACLDataSource_resourceNotFound(t *testing.T) { + ctx := acctest.Context(t) + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckScopeRegional(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWebACLDataSourceConfig_resourceNotFound(name), + ExpectError: regexache.MustCompile(`WAFv2 WebACL not found for`), + }, + }, + }) +} + +func TestAccWAFV2WebACLDataSource_cloudfront(t *testing.T) { + ctx := acctest.Context(t) + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl.test" + datasourceName := "data.aws_wafv2_web_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckWAFV2CloudFrontScope(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWebACLDataSourceConfig_cloudfront(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrID, resourceName, names.AttrID), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrName, resourceName, names.AttrName), + resource.TestCheckResourceAttr(datasourceName, names.AttrScope, "CLOUDFRONT"), + ), + }, + }, + }) +} + func testAccWebACLDataSourceConfig_name(name string) string { return fmt.Sprintf(` resource "aws_wafv2_web_acl" "test" { - name = "%s" + name = %[1]q scope = "REGIONAL" default_action { @@ -71,7 +268,7 @@ data "aws_wafv2_web_acl" "test" { func testAccWebACLDataSourceConfig_nonExistent(name string) string { return fmt.Sprintf(` resource "aws_wafv2_web_acl" "test" { - name = "%s" + name = %[1]q scope = "REGIONAL" default_action { @@ -91,3 +288,201 @@ data "aws_wafv2_web_acl" "test" { } `, name) } + +func testAccWebACLDataSourceConfig_resource(name string) string { + return fmt.Sprintf(` +resource "aws_lb" "test" { + name = %[1]q + internal = false + load_balancer_type = "application" + subnets = aws_subnet.test[*].id + + enable_deletion_protection = false +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + cidr_block = "10.0.${count.index}.0/24" + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "%[1]s-${count.index}" + } +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + block {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "friendly-rule-metric-name" + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl_association" "test" { + resource_arn = aws_lb.test.arn + web_acl_arn = aws_wafv2_web_acl.test.arn +} + +data "aws_wafv2_web_acl" "test" { + resource_arn = aws_lb.test.arn + scope = "REGIONAL" + + depends_on = [aws_wafv2_web_acl_association.test] +} +`, name) +} + +func testAccWebACLDataSourceConfig_resourceNotFound(name string) string { + return fmt.Sprintf(` +resource "aws_lb" "test" { + name = %[1]q + internal = false + load_balancer_type = "application" + subnets = aws_subnet.test[*].id + + enable_deletion_protection = false +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + count = 2 + + vpc_id = aws_vpc.test.id + cidr_block = "10.0.${count.index}.0/24" + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "%[1]s-${count.index}" + } +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +data "aws_wafv2_web_acl" "test" { + resource_arn = aws_lb.test.arn + scope = "REGIONAL" +} +`, name) +} + +func testAccWebACLDataSourceConfig_cloudfront(name string) string { + return fmt.Sprintf(` +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "CLOUDFRONT" + + default_action { + block {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "friendly-rule-metric-name" + sampled_requests_enabled = false + } +} + +resource "aws_cloudfront_distribution" "test" { + web_acl_id = aws_wafv2_web_acl.test.arn + + origin { + domain_name = "www.example.com" + origin_id = "test" + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "https-only" + origin_ssl_protocols = ["TLSv1.2"] + } + } + + enabled = true + + default_cache_behavior { + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "test" + viewer_protocol_policy = "allow-all" + + forwarded_values { + query_string = false + cookies { + forward = "all" + } + } + } + + restrictions { + geo_restriction { + restriction_type = "none" + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } +} + +data "aws_wafv2_web_acl" "test" { + resource_arn = aws_cloudfront_distribution.test.arn + scope = "CLOUDFRONT" +} +`, name) +} diff --git a/internal/service/wafv2/web_acl_rule_group_association.go b/internal/service/wafv2/web_acl_rule_group_association.go new file mode 100644 index 000000000000..a21361204a9c --- /dev/null +++ b/internal/service/wafv2/web_acl_rule_group_association.go @@ -0,0 +1,1263 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package wafv2 + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/wafv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/wafv2/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int32validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +const ( + webACLRuleGroupAssociationResourceIDPartCount = 4 + overrideActionNone = "none" + overrideActionCount = "count" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource("aws_wafv2_web_acl_rule_group_association", name="Web ACL Rule Group Association") +func newResourceWebACLRuleGroupAssociation(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceWebACLRuleGroupAssociation{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +const ( + ResNameWebACLRuleGroupAssociation = "Web ACL Rule Group Association" +) + +type resourceWebACLRuleGroupAssociation struct { + framework.ResourceWithModel[resourceWebACLRuleGroupAssociationModel] + framework.WithTimeouts +} + +func (r *resourceWebACLRuleGroupAssociation) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + ruleActionOverrideLNB := schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[ruleActionOverrideModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(100), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 128), + }, + Description: "Name of the rule to override.", + }, + }, + Blocks: map[string]schema.Block{ + "action_to_use": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[actionToUseModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "allow": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[allowActionModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "custom_request_handling": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customRequestHandlingModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "insert_header": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[insertHeaderModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + }, + names.AttrValue: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "block": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[blockActionModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "custom_response": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customResponseModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "custom_response_body_key": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 128), + }, + }, + "response_code": schema.Int32Attribute{ + Required: true, + Validators: []validator.Int32{ + int32validator.Between(200, 600), + }, + }, + }, + Blocks: map[string]schema.Block{ + "response_header": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[responseHeaderModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + }, + names.AttrValue: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "captcha": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[captchaActionModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "custom_request_handling": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customRequestHandlingModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "insert_header": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[insertHeaderModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + }, + names.AttrValue: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "challenge": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[challengeActionModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "custom_request_handling": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customRequestHandlingModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "insert_header": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[insertHeaderModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + }, + names.AttrValue: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "count": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[countActionModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "custom_request_handling": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customRequestHandlingModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "insert_header": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[insertHeaderModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + }, + names.AttrValue: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Description: "Action to use in place of the rule action.", + }, + }, + }, + Description: "Action settings to use in place of rule actions configured inside the rule group. You can specify up to 100 overrides.", + } + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "rule_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 128), + }, + Description: "Name of the rule to create in the Web ACL that references the rule group.", + }, + names.AttrPriority: schema.Int32Attribute{ + Required: true, + Validators: []validator.Int32{ + int32validator.AtLeast(0), + }, + Description: "Priority of the rule within the Web ACL.", + }, + "web_acl_arn": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + fwvalidators.ARN(), + }, + Description: "ARN of the Web ACL to associate the Rule Group with.", + }, + "override_action": schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.OneOf(overrideActionNone, overrideActionCount), + }, + Description: "Override action for the rule group. Valid values are 'none' and 'count'. Defaults to 'none'.", + }, + }, + Blocks: map[string]schema.Block{ + "rule_group_reference": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[ruleGroupReferenceModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.SizeAtLeast(0), + listvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("managed_rule_group")), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + fwvalidators.ARN(), + }, + Description: "ARN of the Rule Group to associate with the Web ACL.", + }, + }, + Blocks: map[string]schema.Block{ + "rule_action_override": ruleActionOverrideLNB, + }, + }, + Description: "Rule Group reference configuration.", + }, + "managed_rule_group": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[managedRuleGroupModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.SizeAtLeast(0), + listvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("rule_group_reference")), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 128), + }, + Description: "Name of the managed rule group.", + }, + "vendor_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 128), + }, + Description: "Name of the managed rule group vendor.", + }, + names.AttrVersion: schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + }, + Description: "Version of the managed rule group. Omit this to use the default version.", + }, + }, + Blocks: map[string]schema.Block{ + "rule_action_override": ruleActionOverrideLNB, + }, + }, + Description: "Managed rule group configuration.", + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + Description: "Associates a WAFv2 Rule Group (custom or managed) with a Web ACL by adding a rule that references the Rule Group.", + } +} + +func (r *resourceWebACLRuleGroupAssociation) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().WAFV2Client(ctx) + + var plan resourceWebACLRuleGroupAssociationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + // Parse Web ACL ARN to get ID, name, and scope + webACLID, webACLName, webACLScope, err := parseWebACLARN(plan.WebACLARN.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionCreating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), err), + err.Error(), + ) + return + } + + // Get current Web ACL configuration + webACL, err := findWebACLByThreePartKey(ctx, conn, webACLID, webACLName, webACLScope) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionCreating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), err), + err.Error(), + ) + return + } + + // Check if rule with same priority or name already exists + for _, rule := range webACL.WebACL.Rules { + if rule.Priority == plan.Priority.ValueInt32() { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionCreating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), nil), + fmt.Sprintf("Rule with priority %d already exists in Web ACL", plan.Priority.ValueInt32()), + ) + return + } + if aws.ToString(rule.Name) == plan.RuleName.ValueString() { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionCreating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), nil), + fmt.Sprintf("Rule with name %s already exists in Web ACL", plan.RuleName.ValueString()), + ) + return + } + } + + // Get rule configuration from either custom or managed rule group + var ruleGroupARN string + var ruleGroupName string + var ruleGroupVendorName string + var ruleGroupVersion string + var ruleActionOverrides []awstypes.RuleActionOverride + var ruleStatement *awstypes.Statement + + // Check for custom rule group reference + if !plan.RuleGroupReference.IsNull() && !plan.RuleGroupReference.IsUnknown() { + ruleGroupRefs := plan.RuleGroupReference.Elements() + if len(ruleGroupRefs) > 0 { + var ruleGroupRefModel ruleGroupReferenceModel + resp.Diagnostics.Append(ruleGroupRefs[0].(fwtypes.ObjectValueOf[ruleGroupReferenceModel]).As(ctx, &ruleGroupRefModel, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + ruleGroupARN = ruleGroupRefModel.ARN.ValueString() + + // Create rule group reference statement + ruleGroupRefStatement := &awstypes.RuleGroupReferenceStatement{ + ARN: aws.String(ruleGroupARN), + } + + // Add rule action overrides if specified + if !ruleGroupRefModel.RuleActionOverride.IsNull() && !ruleGroupRefModel.RuleActionOverride.IsUnknown() { + resp.Diagnostics.Append(fwflex.Expand(ctx, ruleGroupRefModel.RuleActionOverride, &ruleActionOverrides)...) + if resp.Diagnostics.HasError() { + return + } + ruleGroupRefStatement.RuleActionOverrides = ruleActionOverrides + } + + ruleStatement = &awstypes.Statement{ + RuleGroupReferenceStatement: ruleGroupRefStatement, + } + } + } + + // Check for managed rule group (mutually exclusive with custom) + if !plan.ManagedRuleGroup.IsNull() && !plan.ManagedRuleGroup.IsUnknown() { + managedRuleGroups := plan.ManagedRuleGroup.Elements() + if len(managedRuleGroups) > 0 { + var managedRuleGroupRef managedRuleGroupModel + resp.Diagnostics.Append(managedRuleGroups[0].(fwtypes.ObjectValueOf[managedRuleGroupModel]).As(ctx, &managedRuleGroupRef, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + ruleGroupName = managedRuleGroupRef.Name.ValueString() + ruleGroupVendorName = managedRuleGroupRef.VendorName.ValueString() + if !managedRuleGroupRef.Version.IsNull() && !managedRuleGroupRef.Version.IsUnknown() { + ruleGroupVersion = managedRuleGroupRef.Version.ValueString() + } + + // Create managed rule group statement + managedRuleGroupStatement := &awstypes.ManagedRuleGroupStatement{ + Name: aws.String(ruleGroupName), + VendorName: aws.String(ruleGroupVendorName), + } + if ruleGroupVersion != "" { + managedRuleGroupStatement.Version = aws.String(ruleGroupVersion) + } + + // Add rule action overrides if specified + if !managedRuleGroupRef.RuleActionOverride.IsNull() && !managedRuleGroupRef.RuleActionOverride.IsUnknown() { + resp.Diagnostics.Append(fwflex.Expand(ctx, managedRuleGroupRef.RuleActionOverride, &ruleActionOverrides)...) + if resp.Diagnostics.HasError() { + return + } + managedRuleGroupStatement.RuleActionOverrides = ruleActionOverrides + } + + ruleStatement = &awstypes.Statement{ + ManagedRuleGroupStatement: managedRuleGroupStatement, + } + } + } + + if ruleStatement == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionCreating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), nil), + "Either rule_group_reference or managed_rule_group block is required", + ) + return + } + + // Create new rule with the appropriate statement type + newRule := awstypes.Rule{ + Name: plan.RuleName.ValueStringPointer(), + Priority: plan.Priority.ValueInt32(), + Statement: ruleStatement, + VisibilityConfig: &awstypes.VisibilityConfig{ + SampledRequestsEnabled: true, + CloudWatchMetricsEnabled: true, + MetricName: plan.RuleName.ValueStringPointer(), + }, + } + + // Set override action + overrideAction := plan.OverrideAction.ValueString() + if overrideAction == "" { + overrideAction = overrideActionNone + plan.OverrideAction = types.StringValue(overrideActionNone) // Set the default in the plan + } + + switch overrideAction { + case overrideActionNone: + newRule.OverrideAction = &awstypes.OverrideAction{ + None: &awstypes.NoneAction{}, + } + case overrideActionCount: + newRule.OverrideAction = &awstypes.OverrideAction{ + Count: &awstypes.CountAction{}, + } + } + + // Add the new rule to existing rules + webACL.WebACL.Rules = append(webACL.WebACL.Rules, newRule) + + // Update the Web ACL + updateInput := &wafv2.UpdateWebACLInput{ + Id: aws.String(webACLID), + Name: aws.String(webACLName), + Scope: awstypes.Scope(webACLScope), + DefaultAction: webACL.WebACL.DefaultAction, + Rules: webACL.WebACL.Rules, + VisibilityConfig: webACL.WebACL.VisibilityConfig, + LockToken: webACL.LockToken, + AssociationConfig: webACL.WebACL.AssociationConfig, + CaptchaConfig: webACL.WebACL.CaptchaConfig, + ChallengeConfig: webACL.WebACL.ChallengeConfig, + CustomResponseBodies: webACL.WebACL.CustomResponseBodies, + TokenDomains: webACL.WebACL.TokenDomains, + } + + // Only set description if it's not empty + if webACL.WebACL.Description != nil && aws.ToString(webACL.WebACL.Description) != "" { + updateInput.Description = webACL.WebACL.Description + } + + const timeout = 5 * time.Minute + _, err = tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { + return conn.UpdateWebACL(ctx, updateInput) + }) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionCreating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceWebACLRuleGroupAssociation) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state resourceWebACLRuleGroupAssociationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().WAFV2Client(ctx) + + // Use attributes directly instead of parsing ID + webACLARN := state.WebACLARN.ValueString() + ruleName := state.RuleName.ValueString() + + // Parse Web ACL ARN to get ID, name, and scope + webACLID, webACLName, webACLScope, err := parseWebACLARN(webACLARN) + if err != nil { + resp.Diagnostics.AddError( + "Reading WAFv2 Web ACL Rule Group Association", + fmt.Sprintf("Error parsing Web ACL ARN: %s", err), + ) + return + } + + // Get the Web ACL and check if the rule group is associated + webACL, err := findWebACLByThreePartKey(ctx, conn, webACLID, webACLName, webACLScope) + if err != nil { + if retry.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + + resp.Diagnostics.AddError( + "Reading WAFv2 Web ACL Rule Group Association", + fmt.Sprintf("Error reading Web ACL: %s", err), + ) + return + } + + // Find the rule group in the Web ACL rules + found := false + for _, rule := range webACL.WebACL.Rules { + if aws.ToString(rule.Name) != ruleName { + continue + } + + // Check if this rule matches our rule group configuration from state + if rule.Statement != nil { + var matchesRuleGroup bool + var ruleActionOverrides fwtypes.ListNestedObjectValueOf[ruleActionOverrideModel] + + // Check if we have a custom rule group in state + if !state.RuleGroupReference.IsNull() && !state.RuleGroupReference.IsUnknown() && rule.Statement.RuleGroupReferenceStatement != nil { + // Get the ARN from state for comparison + ruleGroupRefs := state.RuleGroupReference.Elements() + if len(ruleGroupRefs) > 0 { + var ruleGroupRefModel ruleGroupReferenceModel + resp.Diagnostics.Append(ruleGroupRefs[0].(fwtypes.ObjectValueOf[ruleGroupReferenceModel]).As(ctx, &ruleGroupRefModel, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + + if aws.ToString(rule.Statement.RuleGroupReferenceStatement.ARN) == ruleGroupRefModel.ARN.ValueString() { + matchesRuleGroup = true + // Handle rule action overrides with autoflex + if rule.Statement.RuleGroupReferenceStatement.RuleActionOverrides != nil { + resp.Diagnostics.Append(fwflex.Flatten(ctx, rule.Statement.RuleGroupReferenceStatement.RuleActionOverrides, &ruleActionOverrides)...) + if resp.Diagnostics.HasError() { + return + } + } else { + ruleActionOverrides = fwtypes.NewListNestedObjectValueOfNull[ruleActionOverrideModel](ctx) + } + + // Update the rule group reference nested structure + ruleGroupRefModel.RuleActionOverride = ruleActionOverrides + listValue, diags := fwtypes.NewListNestedObjectValueOfSlice(ctx, []*ruleGroupReferenceModel{&ruleGroupRefModel}, nil) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + state.RuleGroupReference = listValue + state.ManagedRuleGroup = fwtypes.NewListNestedObjectValueOfNull[managedRuleGroupModel](ctx) + } + } + } else if !state.ManagedRuleGroup.IsNull() && !state.ManagedRuleGroup.IsUnknown() && rule.Statement.ManagedRuleGroupStatement != nil { + // Check if we have a managed rule group in state + managedRuleGroups := state.ManagedRuleGroup.Elements() + if len(managedRuleGroups) > 0 { + var managedRuleGroupRef managedRuleGroupModel + resp.Diagnostics.Append(managedRuleGroups[0].(fwtypes.ObjectValueOf[managedRuleGroupModel]).As(ctx, &managedRuleGroupRef, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + + managedStmt := rule.Statement.ManagedRuleGroupStatement + // Check if this matches our managed rule group from state + if aws.ToString(managedStmt.Name) == managedRuleGroupRef.Name.ValueString() && + aws.ToString(managedStmt.VendorName) == managedRuleGroupRef.VendorName.ValueString() { + // Check version match (both can be empty/null) + stateVersion := managedRuleGroupRef.Version.ValueString() + ruleVersion := aws.ToString(managedStmt.Version) + if stateVersion == ruleVersion { + matchesRuleGroup = true + // Handle rule action overrides with autoflex + if managedStmt.RuleActionOverrides != nil { + resp.Diagnostics.Append(fwflex.Flatten(ctx, managedStmt.RuleActionOverrides, &ruleActionOverrides)...) + if resp.Diagnostics.HasError() { + return + } + } else { + ruleActionOverrides = fwtypes.NewListNestedObjectValueOfNull[ruleActionOverrideModel](ctx) + } + + // Update the managed rule group nested structure + managedRuleGroupRef.RuleActionOverride = ruleActionOverrides + listValue, diags := fwtypes.NewListNestedObjectValueOfSlice(ctx, []*managedRuleGroupModel{&managedRuleGroupRef}, nil) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + state.ManagedRuleGroup = listValue + state.RuleGroupReference = fwtypes.NewListNestedObjectValueOfNull[ruleGroupReferenceModel](ctx) + } + } + } + } + + if matchesRuleGroup { + found = true + state.Priority = types.Int32Value(rule.Priority) + + // Determine override action + overrideAction := overrideActionNone + if rule.OverrideAction != nil { + if rule.OverrideAction.Count != nil { + overrideAction = overrideActionCount + } else if rule.OverrideAction.None != nil { + overrideAction = overrideActionNone + } + } + state.OverrideAction = types.StringValue(overrideAction) + break + } + } + } + + if !found { + resp.Diagnostics.AddWarning( + "Rule Group Association Not Found", + "Rule group association was not found in Web ACL, removing from state", + ) + resp.State.RemoveResource(ctx) + return + } + + // Update state with current values (WebACLARN and RuleName should already be set from current state) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceWebACLRuleGroupAssociation) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan, state resourceWebACLRuleGroupAssociationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().WAFV2Client(ctx) + + // Parse Web ACL ARN to get ID, name, and scope + webACLARN := plan.WebACLARN.ValueString() + webACLID, webACLName, webACLScope, err := parseWebACLARN(webACLARN) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionUpdating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), err), + err.Error(), + ) + return + } + + // Get current Web ACL configuration + webACL, err := findWebACLByThreePartKey(ctx, conn, webACLID, webACLName, webACLScope) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionUpdating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), err), + err.Error(), + ) + return + } + + // Find the rule to update + ruleName := plan.RuleName.ValueString() + ruleFound := false + for i, rule := range webACL.WebACL.Rules { + if aws.ToString(rule.Name) == ruleName { + ruleFound = true + + // Update the rule's priority + webACL.WebACL.Rules[i].Priority = plan.Priority.ValueInt32() + + // Update override action + overrideAction := plan.OverrideAction.ValueString() + if overrideAction == "" { + overrideAction = overrideActionNone // Default value + } + + switch overrideAction { + case overrideActionNone: + webACL.WebACL.Rules[i].OverrideAction = &awstypes.OverrideAction{ + None: &awstypes.NoneAction{}, + } + case overrideActionCount: + webACL.WebACL.Rules[i].OverrideAction = &awstypes.OverrideAction{ + Count: &awstypes.CountAction{}, + } + } + + // Update rule action overrides from nested structure (both custom and managed) + var overrides []awstypes.RuleActionOverride + if !plan.RuleGroupReference.IsNull() && !plan.RuleGroupReference.IsUnknown() { + ruleGroupRefs := plan.RuleGroupReference.Elements() + if len(ruleGroupRefs) > 0 { + var ruleGroupRefModel ruleGroupReferenceModel + resp.Diagnostics.Append(ruleGroupRefs[0].(fwtypes.ObjectValueOf[ruleGroupReferenceModel]).As(ctx, &ruleGroupRefModel, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + + if !ruleGroupRefModel.RuleActionOverride.IsNull() && !ruleGroupRefModel.RuleActionOverride.IsUnknown() { + resp.Diagnostics.Append(fwflex.Expand(ctx, ruleGroupRefModel.RuleActionOverride, &overrides)...) + if resp.Diagnostics.HasError() { + return + } + } + } + } else if !plan.ManagedRuleGroup.IsNull() && !plan.ManagedRuleGroup.IsUnknown() { + managedRuleGroups := plan.ManagedRuleGroup.Elements() + if len(managedRuleGroups) > 0 { + var managedRuleGroupRef managedRuleGroupModel + resp.Diagnostics.Append(managedRuleGroups[0].(fwtypes.ObjectValueOf[managedRuleGroupModel]).As(ctx, &managedRuleGroupRef, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + + if !managedRuleGroupRef.RuleActionOverride.IsNull() && !managedRuleGroupRef.RuleActionOverride.IsUnknown() { + resp.Diagnostics.Append(fwflex.Expand(ctx, managedRuleGroupRef.RuleActionOverride, &overrides)...) + if resp.Diagnostics.HasError() { + return + } + } + } + } + + // Update the appropriate statement type with new overrides + if webACL.WebACL.Rules[i].Statement != nil { + if webACL.WebACL.Rules[i].Statement.RuleGroupReferenceStatement != nil { + webACL.WebACL.Rules[i].Statement.RuleGroupReferenceStatement.RuleActionOverrides = overrides + } else if webACL.WebACL.Rules[i].Statement.ManagedRuleGroupStatement != nil { + webACL.WebACL.Rules[i].Statement.ManagedRuleGroupStatement.RuleActionOverrides = overrides + } + } + + break + } + } + + if !ruleFound { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionUpdating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), nil), + fmt.Sprintf("Rule %s not found in Web ACL", ruleName), + ) + return + } + + // Check for priority conflicts with other rules + for _, rule := range webACL.WebACL.Rules { + if aws.ToString(rule.Name) != ruleName && rule.Priority == plan.Priority.ValueInt32() { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionUpdating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), nil), + fmt.Sprintf("Rule with priority %d already exists in Web ACL", plan.Priority.ValueInt32()), + ) + return + } + } + + // Update the Web ACL with the modified rule + updateInput := &wafv2.UpdateWebACLInput{ + Id: aws.String(webACLID), + Name: aws.String(webACLName), + Scope: awstypes.Scope(webACLScope), + DefaultAction: webACL.WebACL.DefaultAction, + Rules: webACL.WebACL.Rules, + VisibilityConfig: webACL.WebACL.VisibilityConfig, + LockToken: webACL.LockToken, + AssociationConfig: webACL.WebACL.AssociationConfig, + CaptchaConfig: webACL.WebACL.CaptchaConfig, + ChallengeConfig: webACL.WebACL.ChallengeConfig, + CustomResponseBodies: webACL.WebACL.CustomResponseBodies, + TokenDomains: webACL.WebACL.TokenDomains, + } + + // Only set description if it's not empty + if webACL.WebACL.Description != nil && aws.ToString(webACL.WebACL.Description) != "" { + updateInput.Description = webACL.WebACL.Description + } + + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + _, err = tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, updateTimeout, func(ctx context.Context) (any, error) { + return conn.UpdateWebACL(ctx, updateInput) + }) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionUpdating, ResNameWebACLRuleGroupAssociation, plan.RuleName.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceWebACLRuleGroupAssociation) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state resourceWebACLRuleGroupAssociationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().WAFV2Client(ctx) + + // Use attributes directly instead of parsing ID + webACLARN := state.WebACLARN.ValueString() + ruleName := state.RuleName.ValueString() + + // Parse Web ACL ARN to get ID, name, and scope + webACLID, webACLName, webACLScope, err := parseWebACLARN(webACLARN) + if err != nil { + resp.Diagnostics.AddError( + "Deleting WAFv2 Web ACL Rule Group Association", + fmt.Sprintf("Error parsing Web ACL ARN: %s", err), + ) + return + } + + // Get the Web ACL + webACL, err := findWebACLByThreePartKey(ctx, conn, webACLID, webACLName, webACLScope) + if err != nil { + if retry.NotFound(err) { + // Web ACL is already gone, nothing to do + return + } + + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionDeleting, ResNameWebACLRuleGroupAssociation, state.RuleName.String(), err), + err.Error(), + ) + return + } + + // Filter out the rule we want to remove + var updatedRules []awstypes.Rule + ruleFound := false + for _, rule := range webACL.WebACL.Rules { + if aws.ToString(rule.Name) != ruleName { + updatedRules = append(updatedRules, rule) + } else { + ruleFound = true + } + } + + if !ruleFound { + // Rule is already gone, nothing to do + return + } + + // Update the Web ACL without the rule + updateInput := &wafv2.UpdateWebACLInput{ + Id: aws.String(webACLID), + Name: aws.String(webACLName), + Scope: awstypes.Scope(webACLScope), + DefaultAction: webACL.WebACL.DefaultAction, + Rules: updatedRules, + VisibilityConfig: webACL.WebACL.VisibilityConfig, + LockToken: webACL.LockToken, + AssociationConfig: webACL.WebACL.AssociationConfig, + CaptchaConfig: webACL.WebACL.CaptchaConfig, + ChallengeConfig: webACL.WebACL.ChallengeConfig, + CustomResponseBodies: webACL.WebACL.CustomResponseBodies, + TokenDomains: webACL.WebACL.TokenDomains, + } + + // Only set description if it's not empty + if webACL.WebACL.Description != nil && aws.ToString(webACL.WebACL.Description) != "" { + updateInput.Description = webACL.WebACL.Description + } + + const timeout = 5 * time.Minute + _, err = tfresource.RetryWhenIsA[any, *awstypes.WAFUnavailableEntityException](ctx, timeout, func(ctx context.Context) (any, error) { + return conn.UpdateWebACL(ctx, updateInput) + }) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.WAFV2, create.ErrActionDeleting, ResNameWebACLRuleGroupAssociation, state.RuleName.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceWebACLRuleGroupAssociation) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + parts, err := intflex.ExpandResourceId(req.ID, webACLRuleGroupAssociationResourceIDPartCount, true) + if err != nil { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: web_acl_arn,rule_name,rule_group_type,rule_group_identifier. Got: %q", req.ID), + ) + return + } + + webACLARN := parts[0] + ruleName := parts[1] + ruleGroupType := parts[2] + ruleGroupIdentifier := parts[3] + + // Parse Web ACL ARN to get ID, name, and scope + _, _, _, err = parseWebACLARN(webACLARN) + if err != nil { + resp.Diagnostics.AddError( + "Invalid Web ACL ARN", + fmt.Sprintf("Error parsing Web ACL ARN: %s", err), + ) + return + } + + // Set basic attributes + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("web_acl_arn"), webACLARN)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("rule_name"), ruleName)...) + + // Set the appropriate rule group nested structure based on type + switch ruleGroupType { + case "custom": + // Custom rule group (ARN format) + if !arn.IsARN(ruleGroupIdentifier) { + resp.Diagnostics.AddError( + "Invalid Custom Rule Group Identifier", + "Custom rule group identifier should be an ARN", + ) + return + } + + ruleGroupRefModel := &ruleGroupReferenceModel{ + ARN: types.StringValue(ruleGroupIdentifier), + RuleActionOverride: fwtypes.NewListNestedObjectValueOfNull[ruleActionOverrideModel](ctx), + } + + listValue, diags := fwtypes.NewListNestedObjectValueOfSlice(ctx, []*ruleGroupReferenceModel{ruleGroupRefModel}, nil) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("rule_group_reference"), listValue)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("managed_rule_group"), fwtypes.NewListNestedObjectValueOfNull[managedRuleGroupModel](ctx))...) + case "managed": + // Managed rule group (vendorName:ruleName[:version] format) + identifierParts := strings.Split(ruleGroupIdentifier, ":") + if len(identifierParts) < 2 { + resp.Diagnostics.AddError( + "Invalid Managed Rule Group Identifier", + "Managed rule group identifier should be in format 'vendorName:ruleName[:version]'", + ) + return + } + + vendorName := identifierParts[0] + ruleGroupName := identifierParts[1] + var version string + if len(identifierParts) > 2 { + version = identifierParts[2] + } + + managedRuleGroupRef := &managedRuleGroupModel{ + Name: types.StringValue(ruleGroupName), + VendorName: types.StringValue(vendorName), + RuleActionOverride: fwtypes.NewListNestedObjectValueOfNull[ruleActionOverrideModel](ctx), + } + if version != "" { + managedRuleGroupRef.Version = types.StringValue(version) + } else { + managedRuleGroupRef.Version = types.StringNull() + } + + listValue, diags := fwtypes.NewListNestedObjectValueOfSlice(ctx, []*managedRuleGroupModel{managedRuleGroupRef}, nil) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("managed_rule_group"), listValue)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("rule_group_reference"), fwtypes.NewListNestedObjectValueOfNull[ruleGroupReferenceModel](ctx))...) + default: + resp.Diagnostics.AddError( + "Invalid Rule Group Type", + fmt.Sprintf("Rule group type must be 'custom' or 'managed', got: %s", ruleGroupType), + ) + return + } +} + +// parseWebACLARN extracts the Web ACL ID, name, and scope from the ARN +func parseWebACLARN(arn string) (id, name, scope string, err error) { + // ARN format: arn:aws:wafv2:region:account-id:scope/webacl/name/id + // or for CloudFront: arn:aws:wafv2:global:account-id:global/webacl/name/id + parts := strings.Split(arn, ":") + if len(parts) < 6 { + return "", "", "", fmt.Errorf("invalid Web ACL ARN format: %s", arn) + } + + resourceParts := strings.Split(parts[5], "/") + if len(resourceParts) < 4 { + return "", "", "", fmt.Errorf("invalid Web ACL ARN resource format: %s", parts[5]) + } + + // Validate that this is a webacl ARN + if resourceParts[1] != "webacl" { + return "", "", "", fmt.Errorf("invalid Web ACL ARN: expected webacl resource type, got %s", resourceParts[1]) + } + + // Determine scope + scopeValue := "REGIONAL" + if parts[3] == "global" || resourceParts[0] == "global" { + scopeValue = "CLOUDFRONT" + } + + // Extract name and ID + nameIndex := len(resourceParts) - 2 + idIndex := len(resourceParts) - 1 + + return resourceParts[idIndex], resourceParts[nameIndex], scopeValue, nil +} + +type resourceWebACLRuleGroupAssociationModel struct { + framework.WithRegionModel + RuleName types.String `tfsdk:"rule_name"` + Priority types.Int32 `tfsdk:"priority"` + RuleGroupReference fwtypes.ListNestedObjectValueOf[ruleGroupReferenceModel] `tfsdk:"rule_group_reference"` + ManagedRuleGroup fwtypes.ListNestedObjectValueOf[managedRuleGroupModel] `tfsdk:"managed_rule_group"` + WebACLARN types.String `tfsdk:"web_acl_arn"` + OverrideAction types.String `tfsdk:"override_action"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type ruleGroupReferenceModel struct { + ARN types.String `tfsdk:"arn"` + RuleActionOverride fwtypes.ListNestedObjectValueOf[ruleActionOverrideModel] `tfsdk:"rule_action_override"` +} + +type managedRuleGroupModel struct { + Name types.String `tfsdk:"name"` + VendorName types.String `tfsdk:"vendor_name"` + Version types.String `tfsdk:"version"` + RuleActionOverride fwtypes.ListNestedObjectValueOf[ruleActionOverrideModel] `tfsdk:"rule_action_override"` +} + +type ruleActionOverrideModel struct { + Name types.String `tfsdk:"name"` + ActionToUse fwtypes.ListNestedObjectValueOf[actionToUseModel] `tfsdk:"action_to_use"` +} + +type actionToUseModel struct { + Allow fwtypes.ListNestedObjectValueOf[allowActionModel] `tfsdk:"allow"` + Block fwtypes.ListNestedObjectValueOf[blockActionModel] `tfsdk:"block"` + Captcha fwtypes.ListNestedObjectValueOf[captchaActionModel] `tfsdk:"captcha"` + Challenge fwtypes.ListNestedObjectValueOf[challengeActionModel] `tfsdk:"challenge"` + Count fwtypes.ListNestedObjectValueOf[countActionModel] `tfsdk:"count"` +} + +type allowActionModel struct { + CustomRequestHandling fwtypes.ListNestedObjectValueOf[customRequestHandlingModel] `tfsdk:"custom_request_handling"` +} + +type blockActionModel struct { + CustomResponse fwtypes.ListNestedObjectValueOf[customResponseModel] `tfsdk:"custom_response"` +} + +type captchaActionModel struct { + CustomRequestHandling fwtypes.ListNestedObjectValueOf[customRequestHandlingModel] `tfsdk:"custom_request_handling"` +} + +type challengeActionModel struct { + CustomRequestHandling fwtypes.ListNestedObjectValueOf[customRequestHandlingModel] `tfsdk:"custom_request_handling"` +} + +type countActionModel struct { + CustomRequestHandling fwtypes.ListNestedObjectValueOf[customRequestHandlingModel] `tfsdk:"custom_request_handling"` +} + +type customRequestHandlingModel struct { + InsertHeader fwtypes.ListNestedObjectValueOf[insertHeaderModel] `tfsdk:"insert_header"` +} + +type customResponseModel struct { + CustomResponseBodyKey types.String `tfsdk:"custom_response_body_key"` + ResponseCode types.Int32 `tfsdk:"response_code"` + ResponseHeader fwtypes.ListNestedObjectValueOf[responseHeaderModel] `tfsdk:"response_header"` +} + +type insertHeaderModel struct { + Name types.String `tfsdk:"name"` + Value types.String `tfsdk:"value"` +} + +type responseHeaderModel struct { + Name types.String `tfsdk:"name"` + Value types.String `tfsdk:"value"` +} diff --git a/internal/service/wafv2/web_acl_rule_group_association_test.go b/internal/service/wafv2/web_acl_rule_group_association_test.go new file mode 100644 index 000000000000..b87471a9da8e --- /dev/null +++ b/internal/service/wafv2/web_acl_rule_group_association_test.go @@ -0,0 +1,1504 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package wafv2_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/wafv2" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfwafv2 "github.com/hashicorp/terraform-provider-aws/internal/service/wafv2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestParseWebACLARN(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + arn string + expectedID string + expectedName string + expectedScope string + expectError bool + }{ + "valid regional ARN": { + arn: "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/test-web-acl/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "test-web-acl", + expectedScope: "REGIONAL", + expectError: false, + }, + "valid CloudFront ARN with global region": { + arn: "arn:aws:wafv2:global:123456789012:global/webacl/test-web-acl/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "test-web-acl", + expectedScope: "CLOUDFRONT", + expectError: false, + }, + "valid CloudFront ARN with specific region": { + arn: "arn:aws:wafv2:us-east-1:123456789012:global/webacl/test-web-acl/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "test-web-acl", + expectedScope: "CLOUDFRONT", + expectError: false, + }, + "web ACL name with hyphens": { + arn: "arn:aws:wafv2:us-west-2:123456789012:regional/webacl/my-test-web-acl-name/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "my-test-web-acl-name", + expectedScope: "REGIONAL", + expectError: false, + }, + "web ACL name with underscores": { + arn: "arn:aws:wafv2:eu-west-1:123456789012:regional/webacl/my_test_web_acl_name/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "my_test_web_acl_name", + expectedScope: "REGIONAL", + expectError: false, + }, + "invalid ARN - too few parts": { + arn: "arn:aws:wafv2:us-east-1:123456789012", //lintignore:AWSAT003,AWSAT005 + expectError: true, + }, + "invalid ARN - empty": { + arn: "", + expectError: true, + }, + "invalid ARN - not an ARN": { + arn: "not-an-arn", + expectError: true, + }, + "invalid resource format - too few parts": { + arn: "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/test-web-acl", //lintignore:AWSAT003,AWSAT005 + expectError: true, + }, + "invalid resource format - wrong resource type": { + arn: "arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/test-rule-group/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectError: true, + }, + "different AWS partition": { + arn: "arn:aws-us-gov:wafv2:us-gov-east-1:123456789012:regional/webacl/test-web-acl/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "test-web-acl", + expectedScope: "REGIONAL", + expectError: false, + }, + "different AWS partition with CloudFront": { + arn: "arn:aws-cn:wafv2:global:123456789012:global/webacl/test-web-acl/12345678-1234-1234-1234-123456789012", //lintignore:AWSAT003,AWSAT005 + expectedID: "12345678-1234-1234-1234-123456789012", + expectedName: "test-web-acl", + expectedScope: "CLOUDFRONT", + expectError: false, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + id, name, scope, err := tfwafv2.ParseWebACLARN(testCase.arn) + + if testCase.expectError { + if err == nil { + t.Errorf("expected error but got none") + } + return + } + + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + if id != testCase.expectedID { + t.Errorf("expected ID %q, got %q", testCase.expectedID, id) + } + + if name != testCase.expectedName { + t.Errorf("expected name %q, got %q", testCase.expectedName, name) + } + + if scope != testCase.expectedScope { + t.Errorf("expected scope %q, got %q", testCase.expectedScope, scope) + } + }) + } +} + +func TestAccWAFV2WebACLRuleGroupAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + webACLResourceName := "aws_wafv2_web_acl.test" + ruleGroupResourceName := "aws_wafv2_rule_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "rule_name", fmt.Sprintf("%s-association", rName)), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "10"), + resource.TestCheckResourceAttr(resourceName, "override_action", "none"), + resource.TestCheckResourceAttrPair(resourceName, "web_acl_arn", webACLResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "rule_group_reference.0.arn", ruleGroupResourceName, names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfwafv2.ResourceWebACLRuleGroupAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_overrideAction(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_overrideAction(rName, "count"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "override_action", "count"), + ), + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_ruleActionOverride(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleActionOverride(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.#", "2"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.name", "rule-1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.allow.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.allow.0.custom_request_handling.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.allow.0.custom_request_handling.0.insert_header.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.allow.0.custom_request_handling.0.insert_header.0.name", "X-Custom-Header"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.allow.0.custom_request_handling.0.insert_header.0.value", "custom-value"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.name", "rule-2"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.0.block.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.0.block.0.custom_response.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.0.block.0.custom_response.0.response_code", "403"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.0.block.0.custom_response.0.response_header.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.0.block.0.custom_response.0.response_header.0.name", "X-Block-Reason"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.1.action_to_use.0.block.0.custom_response.0.response_header.0.value", "rule-override"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_ruleActionOverrideUpdate(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleActionOverrideCount(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.name", "rule-1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.count.#", "1"), + ), + }, + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleActionOverrideCaptcha(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.name", "rule-1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.0.rule_action_override.0.action_to_use.0.captcha.#", "1"), + ), + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_priorityUpdate(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_priority(rName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "10"), + ), + }, + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_priority(rName, 20), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "20"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_overrideActionUpdate(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_overrideAction(rName, "none"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "override_action", "none"), + ), + }, + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_overrideAction(rName, "count"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "override_action", "count"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_ruleNameRequiresReplace(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleName(rName, "original-rule"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "rule_name", "original-rule"), + ), + }, + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleName(rName, "updated-rule"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "rule_name", "updated-rule"), + ), + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_RuleGroupReference_webACLARNRequiresReplace(t *testing.T) { + ctx := acctest.Context(t) + var v wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_webACL(rName, "first"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + ), + }, + { + Config: testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_webACL(rName, "second"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &v), + ), + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_ManagedRuleGroup_basic(t *testing.T) { + ctx := acctest.Context(t) + var webACL wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_ManagedRuleGroup_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &webACL), + resource.TestCheckResourceAttr(resourceName, "rule_name", "test-rule"), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "1"), + resource.TestCheckResourceAttr(resourceName, "override_action", "none"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.name", "AWSManagedRulesCommonRuleSet"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.vendor_name", "AWS"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationManagedRuleGroupImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_ManagedRuleGroup_withVersion(t *testing.T) { + ctx := acctest.Context(t) + var webACL wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_ManagedRuleGroup_withVersion(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &webACL), + resource.TestCheckResourceAttr(resourceName, "rule_name", "test-rule"), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "1"), + resource.TestCheckResourceAttr(resourceName, "override_action", "none"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.name", "AWSManagedRulesCommonRuleSet"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.vendor_name", "AWS"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.version", "Version_1.0"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationManagedRuleGroupImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func TestAccWAFV2WebACLRuleGroupAssociation_ManagedRuleGroup_ruleActionOverride(t *testing.T) { + ctx := acctest.Context(t) + var webACL wafv2.GetWebACLOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl_rule_group_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLRuleGroupAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLRuleGroupAssociationConfig_ManagedRuleGroup_ruleActionOverride(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLRuleGroupAssociationExists(ctx, resourceName, &webACL), + resource.TestCheckResourceAttr(resourceName, "rule_name", "test-rule"), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "1"), + resource.TestCheckResourceAttr(resourceName, "override_action", "none"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.name", "AWSManagedRulesCommonRuleSet"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.vendor_name", "AWS"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.rule_action_override.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.rule_action_override.0.name", "GenericRFI_BODY"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.rule_action_override.0.action_to_use.#", "1"), + resource.TestCheckResourceAttr(resourceName, "managed_rule_group.0.rule_action_override.0.action_to_use.0.count.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule_group_reference.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLRuleGroupAssociationManagedRuleGroupImportStateIDFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "web_acl_arn", + }, + }, + }) +} + +func testAccCheckWebACLRuleGroupAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WAFV2Client(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_wafv2_web_acl_rule_group_association" { + continue + } + + // Use resource attributes directly instead of parsing ID + webACLARN := rs.Primary.Attributes["web_acl_arn"] + ruleName := rs.Primary.Attributes["rule_name"] + + // Determine rule group type and identifier from attributes + var ruleGroupType, ruleGroupIdentifier string + if rs.Primary.Attributes["rule_group_reference.0.arn"] != "" { + ruleGroupType = "custom" + ruleGroupIdentifier = rs.Primary.Attributes["rule_group_reference.0.arn"] + } else if rs.Primary.Attributes["managed_rule_group.0.name"] != "" { + ruleGroupType = "managed" + vendorName := rs.Primary.Attributes["managed_rule_group.0.vendor_name"] + ruleGroupName := rs.Primary.Attributes["managed_rule_group.0.name"] + version := rs.Primary.Attributes["managed_rule_group.0.version"] + ruleGroupIdentifier = fmt.Sprintf("%s:%s", vendorName, ruleGroupName) + if version != "" { + ruleGroupIdentifier += ":" + version + } + } else { + continue // Skip if no rule group configuration found + } + + // Parse Web ACL ARN to get ID, name, and scope + webACLID, webACLName, webACLScope, err := tfwafv2.ParseWebACLARN(webACLARN) + if err != nil { + continue + } + + // Get the Web ACL + webACL, err := tfwafv2.FindWebACLByThreePartKey(ctx, conn, webACLID, webACLName, webACLScope) + if tfresource.NotFound(err) { + // Web ACL is gone, so the association is definitely destroyed + continue + } + if err != nil { + return fmt.Errorf("error reading Web ACL (%s): %w", webACLARN, err) + } + + // Check if the rule still exists in the Web ACL + for _, rule := range webACL.WebACL.Rules { + if aws.ToString(rule.Name) != ruleName || rule.Statement == nil { + continue + } + + // Check if this rule matches our rule group type and identifier + var matchesRuleGroup bool + if ruleGroupType == "custom" && rule.Statement.RuleGroupReferenceStatement != nil { + // For custom rule groups, the identifier is the ARN + if aws.ToString(rule.Statement.RuleGroupReferenceStatement.ARN) == ruleGroupIdentifier { + matchesRuleGroup = true + } + } else if ruleGroupType == "managed" && rule.Statement.ManagedRuleGroupStatement != nil { + // For managed rule groups, construct identifier and compare + managedStmt := rule.Statement.ManagedRuleGroupStatement + managedIdentifier := fmt.Sprintf("%s:%s", aws.ToString(managedStmt.VendorName), aws.ToString(managedStmt.Name)) + if managedStmt.Version != nil && aws.ToString(managedStmt.Version) != "" { + managedIdentifier += ":" + aws.ToString(managedStmt.Version) + } + if managedIdentifier == ruleGroupIdentifier { + matchesRuleGroup = true + } + } + + if matchesRuleGroup { + return fmt.Errorf("WAFv2 Web ACL Rule Group Association still exists in Web ACL %s for rule %s", webACLARN, ruleName) + } + } + } + + return nil + } +} + +func testAccCheckWebACLRuleGroupAssociationExists(ctx context.Context, n string, v *wafv2.GetWebACLOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // Use resource attributes directly instead of parsing ID + webACLARN := rs.Primary.Attributes["web_acl_arn"] + ruleName := rs.Primary.Attributes["rule_name"] + + if webACLARN == "" || ruleName == "" { + return fmt.Errorf("Missing required attributes: web_acl_arn=%s, rule_name=%s", webACLARN, ruleName) + } + + // Determine rule group type and identifier from attributes + var ruleGroupType, ruleGroupIdentifier string + if rs.Primary.Attributes["rule_group_reference.0.arn"] != "" { + ruleGroupType = "custom" + ruleGroupIdentifier = rs.Primary.Attributes["rule_group_reference.0.arn"] + } else if rs.Primary.Attributes["managed_rule_group.0.name"] != "" { + ruleGroupType = "managed" + vendorName := rs.Primary.Attributes["managed_rule_group.0.vendor_name"] + ruleGroupName := rs.Primary.Attributes["managed_rule_group.0.name"] + version := rs.Primary.Attributes["managed_rule_group.0.version"] + ruleGroupIdentifier = fmt.Sprintf("%s:%s", vendorName, ruleGroupName) + if version != "" { + ruleGroupIdentifier += ":" + version + } + } else { + return fmt.Errorf("No rule group configuration found in state") + } + + // Parse Web ACL ARN to get ID, name, and scope + webACLID, webACLName, webACLScope, err := tfwafv2.ParseWebACLARN(webACLARN) + if err != nil { + return fmt.Errorf("error parsing Web ACL ARN: %w", err) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WAFV2Client(ctx) + + // Get the Web ACL + webACL, err := tfwafv2.FindWebACLByThreePartKey(ctx, conn, webACLID, webACLName, webACLScope) + if err != nil { + return fmt.Errorf("error reading Web ACL (%s): %w", webACLARN, err) + } + + // Check if the rule exists in the Web ACL with the correct configuration + found := false + for _, rule := range webACL.WebACL.Rules { + if aws.ToString(rule.Name) != ruleName || rule.Statement == nil { + continue + } + + // Check if this rule matches our rule group type and identifier + var matchesRuleGroup bool + if ruleGroupType == "custom" && rule.Statement.RuleGroupReferenceStatement != nil { + // For custom rule groups, the identifier is the ARN + if aws.ToString(rule.Statement.RuleGroupReferenceStatement.ARN) == ruleGroupIdentifier { + matchesRuleGroup = true + } + } else if ruleGroupType == "managed" && rule.Statement.ManagedRuleGroupStatement != nil { + // For managed rule groups, construct identifier and compare + managedStmt := rule.Statement.ManagedRuleGroupStatement + managedIdentifier := fmt.Sprintf("%s:%s", aws.ToString(managedStmt.VendorName), aws.ToString(managedStmt.Name)) + if managedStmt.Version != nil && aws.ToString(managedStmt.Version) != "" { + managedIdentifier += ":" + aws.ToString(managedStmt.Version) + } + if managedIdentifier == ruleGroupIdentifier { + matchesRuleGroup = true + } + } + + if matchesRuleGroup { + found = true + break + } + } + + if !found { + return fmt.Errorf("WAFv2 Web ACL Rule Group Association not found in Web ACL %s for rule %s", webACLARN, ruleName) + } + + *v = *webACL + + return nil + } +} + +func testAccWebACLRuleGroupAssociationImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + webACLARN := rs.Primary.Attributes["web_acl_arn"] + ruleGroupARN := rs.Primary.Attributes["rule_group_reference.0.arn"] + ruleName := rs.Primary.Attributes["rule_name"] + + // Format: webACLARN,ruleName,ruleGroupType,ruleGroupIdentifier + return fmt.Sprintf("%s,%s,%s,%s", webACLARN, ruleName, "custom", ruleGroupARN), nil + } +} + +func testAccWebACLRuleGroupAssociationManagedRuleGroupImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + webACLARN := rs.Primary.Attributes["web_acl_arn"] + vendorName := rs.Primary.Attributes["managed_rule_group.0.vendor_name"] + ruleGroupName := rs.Primary.Attributes["managed_rule_group.0.name"] + version := rs.Primary.Attributes["managed_rule_group.0.version"] + ruleName := rs.Primary.Attributes["rule_name"] + + // Build managed rule group identifier: vendorName:ruleGroupName[:version] + ruleGroupIdentifier := fmt.Sprintf("%s:%s", vendorName, ruleGroupName) + if version != "" { + ruleGroupIdentifier += ":" + version + } + + // Format: webACLARN,ruleName,ruleGroupType,ruleGroupIdentifier + return fmt.Sprintf("%s,%s,%s,%s", webACLARN, ruleName, "managed", ruleGroupIdentifier), nil + } +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + count {} + } + + statement { + geo_match_statement { + country_codes = ["US", "CA"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = 10 + web_acl_arn = aws_wafv2_web_acl.test.arn + + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + } +} +`, rName) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_overrideAction(rName, overrideAction string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US", "CA"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = 10 + web_acl_arn = aws_wafv2_web_acl.test.arn + override_action = %[2]q + + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + } +} +`, rName, overrideAction) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleActionOverride(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US", "CA"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + rule { + name = "rule-2" + priority = 2 + + action { + allow {} + } + + statement { + ip_set_reference_statement { + arn = aws_wafv2_ip_set.test.arn + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-2" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_ip_set" "test" { + name = %[1]q + scope = "REGIONAL" + + ip_address_version = "IPV4" + addresses = ["192.0.2.0/24"] +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = 10 + web_acl_arn = aws_wafv2_web_acl.test.arn + + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + + rule_action_override { + name = "rule-1" + action_to_use { + allow { + custom_request_handling { + insert_header { + name = "X-Custom-Header" + value = "custom-value" + } + } + } + } + } + + rule_action_override { + name = "rule-2" + action_to_use { + block { + custom_response { + response_code = 403 + response_header { + name = "X-Block-Reason" + value = "rule-override" + } + } + } + } + } + } +} +`, rName) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleActionOverrideCount(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US", "CA"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = 10 + web_acl_arn = aws_wafv2_web_acl.test.arn + + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + + rule_action_override { + name = "rule-1" + action_to_use { + count { + custom_request_handling { + insert_header { + name = "X-Count-Header" + value = "counted" + } + } + } + } + } + } +} +`, rName) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleActionOverrideCaptcha(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US", "CA"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = 10 + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + + rule_action_override { + name = "rule-1" + action_to_use { + captcha { + custom_request_handling { + insert_header { + name = "X-Captcha-Header" + value = "captcha-required" + } + } + } + } + } + } + web_acl_arn = aws_wafv2_web_acl.test.arn +} +`, rName) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_priority(rName string, priority int) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = %[2]d + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + } + web_acl_arn = aws_wafv2_web_acl.test.arn + override_action = "none" +} +`, rName, priority) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_ruleName(rName, ruleName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = %[2]q + priority = 10 + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + } + web_acl_arn = aws_wafv2_web_acl.test.arn + override_action = "none" +} +`, rName, ruleName) +} + +func testAccWebACLRuleGroupAssociationConfig_RuleGroupReference_webACL(rName, webACLSuffix string) string { + return fmt.Sprintf(` +resource "aws_wafv2_rule_group" "test" { + name = %[1]q + scope = "REGIONAL" + capacity = 10 + + rule { + name = "rule-1" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["US"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "rule-1" + sampled_requests_enabled = false + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } +} + +resource "aws_wafv2_web_acl" "test" { + name = "%[1]s-%[2]s" + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "%[1]s-%[2]s" + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "%[1]s-association" + priority = 10 + rule_group_reference { + arn = aws_wafv2_rule_group.test.arn + } + web_acl_arn = aws_wafv2_web_acl.test.arn + override_action = "none" +} +`, rName, webACLSuffix) +} + +func testAccWebACLRuleGroupAssociationConfig_ManagedRuleGroup_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "test-rule" + priority = 1 + web_acl_arn = aws_wafv2_web_acl.test.arn + + managed_rule_group { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + } + + override_action = "none" +} +`, rName) +} + +func testAccWebACLRuleGroupAssociationConfig_ManagedRuleGroup_withVersion(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "test-rule" + priority = 1 + web_acl_arn = aws_wafv2_web_acl.test.arn + + managed_rule_group { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + version = "Version_1.0" + } + + override_action = "none" +} +`, rName) +} + +func testAccWebACLRuleGroupAssociationConfig_ManagedRuleGroup_ruleActionOverride(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = %[1]q + sampled_requests_enabled = false + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "test" { + rule_name = "test-rule" + priority = 1 + web_acl_arn = aws_wafv2_web_acl.test.arn + + managed_rule_group { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + + rule_action_override { + name = "GenericRFI_BODY" + action_to_use { + count {} + } + } + } + + override_action = "none" +} +`, rName) +} diff --git a/internal/service/wafv2/web_acl_test.go b/internal/service/wafv2/web_acl_test.go index 7eaf3e23dcaf..ca5d3d0dbebb 100644 --- a/internal/service/wafv2/web_acl_test.go +++ b/internal/service/wafv2/web_acl_test.go @@ -2069,6 +2069,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2098,6 +2099,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "1", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2127,6 +2129,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "1", @@ -2155,6 +2158,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2184,6 +2188,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2213,6 +2218,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2242,6 +2248,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2271,6 +2278,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2319,6 +2327,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2347,6 +2356,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.evaluation_window_sec": "300", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2377,6 +2387,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2405,6 +2416,7 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { "statement.0.rate_based_statement.0.evaluation_window_sec": "300", "statement.0.rate_based_statement.0.limit": "50000", "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "1", "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", @@ -2419,6 +2431,65 @@ func TestAccWAFV2WebACL_RateBased_customKeys(t *testing.T) { }), ), }, + { + Config: testAccWebACLConfig_rateBasedStatement_customKeysBasic(webACLName, names.AttrHeader, "x-forwrded-for"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "wafv2", regexache.MustCompile(`regional/webacl/.+$`)), + resource.TestCheckResourceAttr(resourceName, names.AttrName, webACLName), + resource.TestCheckResourceAttr(resourceName, acctest.CtRulePound, "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "statement.#": "1", + "statement.0.rate_based_statement.#": "1", + "statement.0.rate_based_statement.0.custom_key.#": "1", + "statement.0.rate_based_statement.0.aggregate_key_type": "CUSTOM_KEYS", + "statement.0.rate_based_statement.0.evaluation_window_sec": "300", + "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", + "statement.0.rate_based_statement.0.limit": "50000", + "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.header.#": "1", + "statement.0.rate_based_statement.0.custom_key.0.ip.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.label_namespace.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.query_argument.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.query_string.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.uri_path.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.header.0.text_transformation.#": "1", + }), + ), + }, + { + Config: testAccWebACLConfig_rateBasedStatement_customKeysASN(webACLName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebACLExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "wafv2", regexache.MustCompile(`regional/webacl/.+$`)), + resource.TestCheckResourceAttr(resourceName, names.AttrName, webACLName), + resource.TestCheckResourceAttr(resourceName, acctest.CtRulePound, "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "statement.#": "1", + "statement.0.rate_based_statement.#": "1", + "statement.0.rate_based_statement.0.custom_key.#": "1", + "statement.0.rate_based_statement.0.aggregate_key_type": "CUSTOM_KEYS", + "statement.0.rate_based_statement.0.evaluation_window_sec": "300", + "statement.0.rate_based_statement.0.forwarded_ip_config.#": "0", + "statement.0.rate_based_statement.0.limit": "50000", + "statement.0.rate_based_statement.0.scope_down_statement.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.asn.#": "1", + "statement.0.rate_based_statement.0.custom_key.0.cookie.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.forwarded_ip.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.http_method.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.header.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.ip.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.label_namespace.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.query_argument.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.query_string.#": "0", + "statement.0.rate_based_statement.0.custom_key.0.uri_path.#": "0", + }), + ), + }, { ResourceName: resourceName, ImportState: true, @@ -3370,6 +3441,61 @@ func TestAccWAFV2WebACL_CloudFrontScope(t *testing.T) { }) } +func TestAccWAFV2WebACL_CloudFrontScopeResponseInspectionHeader(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.WebACL + webACLName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_wafv2_web_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckWAFV2CloudFrontScope(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WAFV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebACLDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebACLConfig_CloudFrontScopeResponseInspectionHeader(webACLName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWebACLExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "wafv2", regexache.MustCompile(`global/webacl/.+$`)), + resource.TestCheckResourceAttr(resourceName, names.AttrName, webACLName), + resource.TestCheckResourceAttr(resourceName, names.AttrScope, string(awstypes.ScopeCloudfront)), + resource.TestCheckResourceAttr(resourceName, acctest.CtRulePound, "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + names.AttrName: "rule-1", + "statement.#": "1", + "statement.0.managed_rule_group_statement.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.login_path": "/api/1/signin", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.request_inspection.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.request_inspection.0.password_field.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.request_inspection.0.password_field.0.identifier": "/password", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.request_inspection.0.payload_type": "JSON", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.request_inspection.0.username_field.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.request_inspection.0.username_field.0.identifier": "/username", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.0.header.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.0.header.0.name": "sample-header", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.0.header.0.success_values.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.0.header.0.success_values.0": "f1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.0.header.0.failure_values.#": "1", + "statement.0.managed_rule_group_statement.0.managed_rule_group_configs.0.aws_managed_rules_atp_rule_set.0.response_inspection.0.header.0.failure_values.0": "f2", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccWebACLImportStateIdFunc(resourceName), + }, + }, + }) +} + func TestAccWAFV2WebACL_ruleJSON(t *testing.T) { ctx := acctest.Context(t) var v awstypes.WebACL @@ -6433,6 +6559,57 @@ resource "aws_wafv2_web_acl" "test" { `, rName) } +func testAccWebACLConfig_rateBasedStatement_customKeysASN(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + description = %[1]q + scope = "REGIONAL" + + default_action { + allow {} + } + + rule { + name = "rule-1" + priority = 1 + + action { + count {} + } + + statement { + rate_based_statement { + aggregate_key_type = "CUSTOM_KEYS" + limit = 50000 + + custom_key { + asn {} + } + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "friendly-rule-metric-name" + sampled_requests_enabled = false + } + } + + tags = { + Tag1 = "Value1" + Tag2 = "Value2" + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "friendly-metric-name" + sampled_requests_enabled = false + } +} +`, rName) +} + func testAccWebACLConfig_rateBasedStatementUpdate(rName string) string { return fmt.Sprintf(` resource "aws_wafv2_web_acl" "test" { @@ -7127,6 +7304,69 @@ resource "aws_wafv2_web_acl" "test" { `, rName) } +func testAccWebACLConfig_CloudFrontScopeResponseInspectionHeader(rName string) string { + return fmt.Sprintf(` +resource "aws_wafv2_web_acl" "test" { + name = %[1]q + description = %[1]q + scope = "CLOUDFRONT" + + default_action { + allow {} + } + + rule { + name = "rule-1" + priority = 1 + + override_action { + count {} + } + + statement { + managed_rule_group_statement { + name = "AWSManagedRulesATPRuleSet" + vendor_name = "AWS" + + managed_rule_group_configs { + aws_managed_rules_atp_rule_set { + login_path = "/api/1/signin" + request_inspection { + password_field { + identifier = "/password" + } + payload_type = "JSON" + username_field { + identifier = "/username" + } + } + response_inspection { + header { + name = "sample-header" + success_values = ["f1"] + failure_values = ["f2"] + } + } + } + } + } + } + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "AWSManagedRulesATPRuleSet_json" + sampled_requests_enabled = true + } + } + + visibility_config { + cloudwatch_metrics_enabled = false + metric_name = "friendly-metric-name" + sampled_requests_enabled = false + } +} +`, rName) +} + func testAccWebACLConfig_associationConfigCloudFront(rName string) string { return fmt.Sprintf(` resource "aws_wafv2_web_acl" "test" { diff --git a/internal/service/wellarchitected/service_endpoint_resolver_gen.go b/internal/service/wellarchitected/service_endpoint_resolver_gen.go index 37d1771b72af..ccce2685f172 100644 --- a/internal/service/wellarchitected/service_endpoint_resolver_gen.go +++ b/internal/service/wellarchitected/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params wellarchitected. }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up wellarchitected endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up wellarchitected endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/wellarchitected/service_endpoints_gen_test.go b/internal/service/wellarchitected/service_endpoints_gen_test.go index b6bd0d8a32a2..7043a627348a 100644 --- a/internal/service/wellarchitected/service_endpoints_gen_test.go +++ b/internal/service/wellarchitected/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/wellarchitected/service_package_gen.go b/internal/service/wellarchitected/service_package_gen.go index 7fcc663c2497..d6b0b8ef68a7 100644 --- a/internal/service/wellarchitected/service_package_gen.go +++ b/internal/service/wellarchitected/service_package_gen.go @@ -6,7 +6,6 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/wellarchitected" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -56,7 +55,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *wellarchitected.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/wellarchitected/tags_gen.go b/internal/service/wellarchitected/tags_gen.go index 071fdb5d1c94..7dec99bba995 100644 --- a/internal/service/wellarchitected/tags_gen.go +++ b/internal/service/wellarchitected/tags_gen.go @@ -3,8 +3,8 @@ package wellarchitected import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/wellarchitected" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -26,7 +26,7 @@ func listTags(ctx context.Context, conn *wellarchitected.Client, identifier stri output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -38,7 +38,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).WellArchitectedClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -99,7 +99,7 @@ func updateTags(ctx context.Context, conn *wellarchitected.Client, identifier st _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -114,7 +114,7 @@ func updateTags(ctx context.Context, conn *wellarchitected.Client, identifier st _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/workmail/generate.go b/internal/service/workmail/generate.go new file mode 100644 index 000000000000..568013754638 --- /dev/null +++ b/internal/service/workmail/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tags/main.go -ServiceTagsSlice -ListTags -ListTagsInIDElem=ResourceARN -UpdateTags -TagInIDElem=ResourceARN +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package workmail diff --git a/internal/service/workmail/service_endpoint_resolver_gen.go b/internal/service/workmail/service_endpoint_resolver_gen.go new file mode 100644 index 000000000000..285133ce4852 --- /dev/null +++ b/internal/service/workmail/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package workmail + +import ( + "context" + "fmt" + "net" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workmail" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ workmail.EndpointResolverV2 = resolverV2{} + +type resolverV2 struct { + defaultResolver workmail.EndpointResolverV2 +} + +func newEndpointResolverV2() resolverV2 { + return resolverV2{ + defaultResolver: workmail.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverV2) ResolveEndpoint(ctx context.Context, params workmail.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws.Bool(false) + } else { + err = fmt.Errorf("looking up workmail endpoint %q: %w", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*workmail.Options) { + return func(o *workmail.Options) { + if endpoint != "" { + o.BaseEndpoint = aws.String(endpoint) + } + } +} diff --git a/internal/service/workmail/service_endpoints_gen_test.go b/internal/service/workmail/service_endpoints_gen_test.go new file mode 100644 index 000000000000..7a5ea3fe0ca3 --- /dev/null +++ b/internal/service/workmail/service_endpoints_gen_test.go @@ -0,0 +1,604 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package workmail_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/workmail" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "workmail" + awsEnvVar = "AWS_ENDPOINT_URL_WORKMAIL" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "workmail" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + ctx := t.Context() + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(ctx, t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(ctx, t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + t.Run(name, func(t *testing.T) { + testEndpointCase(ctx, t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(ctx context.Context, region string) (url.URL, error) { + r := workmail.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, workmail.EndpointParameters{ + Region: aws.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(ctx context.Context, region string) (url.URL, error) { + r := workmail.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(ctx, workmail.EndpointParameters{ + Region: aws.String(region), + UseFIPS: aws.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.WorkMailClient(ctx) + + var result apiCallParams + + input := workmail.ListResourcesInput{ + OrganizationId: aws.String("m-12345678901234567890123456789012"), + } + _, err := client.ListResources(ctx, &input, + func(opts *workmail.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(ctx context.Context, t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(ctx, region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(ctx, t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(ctx context.Context, t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := sdkv2.NewProvider(ctx) + if err != nil { + t.Fatal(err) + } + + p.TerraformVersion = "1.0.0" + + expectedDiags := testcase.expected.diags + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = errors.New("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i any) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + fmt.Fprintf(&buf, "endpoint_url = %s\n", config.baseUrl) + } + + if config.serviceUrl != "" { + fmt.Fprintf(&buf, ` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/workmail/service_package_gen.go b/internal/service/workmail/service_package_gen.go new file mode 100644 index 000000000000..72e816d639f8 --- /dev/null +++ b/internal/service/workmail/service_package_gen.go @@ -0,0 +1,87 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package workmail + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workmail" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/vcr" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { + return []*inttypes.ServicePackageFrameworkDataSource{} +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { + return []*inttypes.ServicePackageFrameworkResource{} +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { + return []*inttypes.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePackageSDKResource { + return []*inttypes.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.WorkMail +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*workmail.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + optFns := []func(*workmail.Options){ + workmail.WithEndpointResolverV2(newEndpointResolverV2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *workmail.Options) { + if region := config[names.AttrRegion].(string); o.Region != region { + tflog.Info(ctx, "overriding provider-configured AWS API region", map[string]any{ + "service": p.ServicePackageName(), + "original_region": o.Region, + "override_region": region, + }) + o.Region = region + } + }, + func(o *workmail.Options) { + if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { + tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) + } + }, + withExtraOptions(ctx, p, config), + } + + return workmail.NewFromConfig(cfg, optFns...), nil +} + +// withExtraOptions returns a functional option that allows this service package to specify extra API client options. +// This option is always called after any generated options. +func withExtraOptions(ctx context.Context, sp conns.ServicePackage, config map[string]any) func(*workmail.Options) { + if v, ok := sp.(interface { + withExtraOptions(context.Context, map[string]any) []func(*workmail.Options) + }); ok { + optFns := v.withExtraOptions(ctx, config) + + return func(o *workmail.Options) { + for _, optFn := range optFns { + optFn(o) + } + } + } + + return func(*workmail.Options) {} +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/service/workmail/tags_gen.go b/internal/service/workmail/tags_gen.go new file mode 100644 index 000000000000..7e555869e7de --- /dev/null +++ b/internal/service/workmail/tags_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package workmail + +import ( + "context" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workmail" + awstypes "github.com/aws/aws-sdk-go-v2/service/workmail/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists workmail service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *workmail.Client, identifier string, optFns ...func(*workmail.Options)) (tftags.KeyValueTags, error) { + input := workmail.ListTagsForResourceInput{ + ResourceARN: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, &input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), smarterr.NewError(err) + } + + return keyValueTags(ctx, output.Tags), nil +} + +// ListTags lists workmail service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).WorkMailClient(ctx), identifier) + + if err != nil { + return smarterr.NewError(err) + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// []*SERVICE.Tag handling + +// svcTags returns workmail service tags. +func svcTags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := awstypes.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// keyValueTags creates tftags.KeyValueTags from workmail service tags. +func keyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.ToString(tag.Key)] = tag.Value + } + + return tftags.New(ctx, m) +} + +// getTagsIn returns workmail service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) []awstypes.Tag { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := svcTags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets workmail service tags in Context. +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(keyValueTags(ctx, tags)) + } +} + +// updateTags updates workmail service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *workmail.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*workmail.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.WorkMail) + if len(removedTags) > 0 { + input := workmail.UntagResourceInput{ + ResourceARN: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.WorkMail) + if len(updatedTags) > 0 { + input := workmail.TagResourceInput{ + ResourceARN: aws.String(identifier), + Tags: svcTags(updatedTags), + } + + _, err := conn.TagResource(ctx, &input, optFns...) + + if err != nil { + return smarterr.NewError(err) + } + } + + return nil +} + +// UpdateTags updates workmail service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).WorkMailClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/workspaces/directory.go b/internal/service/workspaces/directory.go index 4323ba6a2ef0..7e481847bea2 100644 --- a/internal/service/workspaces/directory.go +++ b/internal/service/workspaces/directory.go @@ -384,8 +384,8 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta a const ( timeout = 2 * time.Minute ) - output, err := tfresource.RetryWhenIsA[*types.InvalidResourceStateException](ctx, timeout, - func() (any, error) { + output, err := tfresource.RetryWhenIsA[any, *types.InvalidResourceStateException](ctx, timeout, + func(ctx context.Context) (any, error) { return conn.RegisterWorkspaceDirectory(ctx, &input) }) @@ -675,8 +675,8 @@ func resourceDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta a const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenIsA[*types.InvalidResourceStateException](ctx, timeout, - func() (any, error) { + _, err := tfresource.RetryWhenIsA[any, *types.InvalidResourceStateException](ctx, timeout, + func(ctx context.Context) (any, error) { return conn.DeregisterWorkspaceDirectory(ctx, &input) }) diff --git a/internal/service/workspaces/service_endpoint_resolver_gen.go b/internal/service/workspaces/service_endpoint_resolver_gen.go index f490c10c28e2..c15acea60cff 100644 --- a/internal/service/workspaces/service_endpoint_resolver_gen.go +++ b/internal/service/workspaces/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params workspaces.Endpo }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up workspaces endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up workspaces endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/workspaces/service_endpoints_gen_test.go b/internal/service/workspaces/service_endpoints_gen_test.go index 09554602d634..75ff50a05311 100644 --- a/internal/service/workspaces/service_endpoints_gen_test.go +++ b/internal/service/workspaces/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/workspaces/service_package_gen.go b/internal/service/workspaces/service_package_gen.go index f25ee5dbfade..09a82280d487 100644 --- a/internal/service/workspaces/service_package_gen.go +++ b/internal/service/workspaces/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/workspaces" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -126,7 +125,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *workspaces.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/workspaces/sweep.go b/internal/service/workspaces/sweep.go index ac07adc543c5..ee41d0341708 100644 --- a/internal/service/workspaces/sweep.go +++ b/internal/service/workspaces/sweep.go @@ -39,7 +39,7 @@ func sweepDirectories(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } input := &workspaces.DescribeWorkspaceDirectoriesInput{} conn := client.WorkSpacesClient(ctx) @@ -80,7 +80,7 @@ func sweepIPGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("getting client: %w", err) } conn := client.WorkSpacesClient(ctx) input := &workspaces.DescribeIpGroupsInput{} @@ -124,7 +124,7 @@ func sweepWorkspace(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("getting client: %w", err) } input := &workspaces.DescribeWorkspacesInput{} conn := client.WorkSpacesClient(ctx) diff --git a/internal/service/workspaces/tags_gen.go b/internal/service/workspaces/tags_gen.go index a24adaa37626..31c9ccf0ab70 100644 --- a/internal/service/workspaces/tags_gen.go +++ b/internal/service/workspaces/tags_gen.go @@ -3,8 +3,8 @@ package workspaces import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspaces" awstypes "github.com/aws/aws-sdk-go-v2/service/workspaces/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *workspaces.Client, identifier string, o output, err := conn.DescribeTags(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.TagList), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).WorkSpacesClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *workspaces.Client, identifier string, _, err := conn.DeleteTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *workspaces.Client, identifier string, _, err := conn.CreateTags(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/workspacesweb/browser_settings_association.go b/internal/service/workspacesweb/browser_settings_association.go new file mode 100644 index 000000000000..fb3af29f3bee --- /dev/null +++ b/internal/service/workspacesweb/browser_settings_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_browser_settings_association", name="Browser Settings Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.BrowserSettings") +// @Testing(importStateIdFunc="testAccBrowserSettingsAssociationImportStateIdFunc)" +func newBrowserSettingsAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &browserSettingsAssociationResource{}, nil +} + +type browserSettingsAssociationResource struct { + framework.ResourceWithModel[browserSettingsAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *browserSettingsAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "browser_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *browserSettingsAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data browserSettingsAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateBrowserSettingsInput{ + BrowserSettingsArn: data.BrowserSettingsARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateBrowserSettings(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Browser Settings Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *browserSettingsAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data browserSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the browser settings and checking associated portals + output, err := findBrowserSettingsByARN(ctx, conn, data.BrowserSettingsARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Browser Settings Association (%s)", data.BrowserSettingsARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *browserSettingsAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data browserSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateBrowserSettingsInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateBrowserSettings(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Browser Settings Association (%s)", data.BrowserSettingsARN.ValueString()), err.Error()) + return + } +} + +func (r *browserSettingsAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + browserSettingsAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, browserSettingsAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: browser_settings_arn,portal_arn. Got: %q", request.ID), + ) + return + } + browserSettingsARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("browser_settings_arn"), browserSettingsARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type browserSettingsAssociationResourceModel struct { + framework.WithRegionModel + BrowserSettingsARN fwtypes.ARN `tfsdk:"browser_settings_arn"` + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` +} diff --git a/internal/service/workspacesweb/browser_settings_association_test.go b/internal/service/workspacesweb/browser_settings_association_test.go new file mode 100644 index 000000000000..e4850625d04c --- /dev/null +++ b/internal/service/workspacesweb/browser_settings_association_test.go @@ -0,0 +1,195 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebBrowserSettingsAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var browserSettings awstypes.BrowserSettings + resourceName := "aws_workspacesweb_browser_settings_association.test" + browserSettingsResourceName := "aws_workspacesweb_browser_settings.test" + portalResourceName := "aws_workspacesweb_portal.test" + browserPolicy1 := `{ + "chromePolicies": + { + "DefaultDownloadDirectory": { + "value": "/home/as2-streaming-user/MyFiles/TemporaryFiles1" + } + } + } ` + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBrowserSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBrowserSettingsAssociationConfig_basic(browserPolicy1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBrowserSettingsAssociationExists(ctx, resourceName, &browserSettings), + resource.TestCheckResourceAttrPair(resourceName, "browser_settings_arn", browserSettingsResourceName, "browser_settings_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccBrowserSettingsAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "browser_settings_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccBrowserSettingsAssociationConfig_basic(browserPolicy1), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the BrowserSettings Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(browserSettingsResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(browserSettingsResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "browser_settings_arn", browserSettingsResourceName, "browser_settings_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebBrowserSettingsAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var browserSettings awstypes.BrowserSettings + resourceName := "aws_workspacesweb_browser_settings_association.test" + browserPolicy1 := `{ + "chromePolicies": + { + "DefaultDownloadDirectory": { + "value": "/home/as2-streaming-user/MyFiles/TemporaryFiles1" + } + } + } ` + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBrowserSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBrowserSettingsAssociationConfig_basic(browserPolicy1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBrowserSettingsAssociationExists(ctx, resourceName, &browserSettings), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceBrowserSettingsAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckBrowserSettingsAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_browser_settings_association" { + continue + } + + browserSettings, err := tfworkspacesweb.FindBrowserSettingsByARN(ctx, conn, rs.Primary.Attributes["browser_settings_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(browserSettings.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web Browser Settings Association %s still exists", rs.Primary.Attributes["browser_settings_arn"]) + } + } + + return nil + } +} + +func testAccCheckBrowserSettingsAssociationExists(ctx context.Context, n string, v *awstypes.BrowserSettings) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindBrowserSettingsByARN(ctx, conn, rs.Primary.Attributes["browser_settings_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccBrowserSettingsAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["browser_settings_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccBrowserSettingsAssociationConfig_basic(browserPolicy string) string { + return fmt.Sprintf(` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_browser_settings" "test" { + browser_policy = %[1]q +} + +resource "aws_workspacesweb_browser_settings_association" "test" { + browser_settings_arn = aws_workspacesweb_browser_settings.test.browser_settings_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +`, browserPolicy) +} diff --git a/internal/service/workspacesweb/browser_settings_tags_gen_test.go b/internal/service/workspacesweb/browser_settings_tags_gen_test.go index e030fabff7b9..2e5d717491aa 100644 --- a/internal/service/workspacesweb/browser_settings_tags_gen_test.go +++ b/internal/service/workspacesweb/browser_settings_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccWorkSpacesWebBrowserSettings_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -199,10 +200,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags(t *testing.T) { func TestAccWorkSpacesWebBrowserSettings_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -260,10 +262,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_null(t *testing.T) { func TestAccWorkSpacesWebBrowserSettings_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -309,10 +312,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_EmptyMap(t *testing.T) { func TestAccWorkSpacesWebBrowserSettings_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -387,10 +391,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_AddOnUpdate(t *testing.T) { func TestAccWorkSpacesWebBrowserSettings_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -476,10 +481,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccWorkSpacesWebBrowserSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -613,10 +619,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T func TestAccWorkSpacesWebBrowserSettings_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -701,10 +708,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_EmptyTag_OnUpdate_Replace(t *testi func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -881,10 +889,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_providerOnly(t *testin func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1040,10 +1049,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_nonOverlapping(t *test func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1215,10 +1225,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_overlapping(t *testing func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1303,10 +1314,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_updateToProviderOnly(t func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1390,10 +1402,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_updateToResourceOnly(t func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1455,10 +1468,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_emptyResourceTag(t *te func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1512,10 +1526,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_emptyProviderOnlyTag(t func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1580,10 +1595,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_nullOverlappingResourc func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1650,10 +1666,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_DefaultTags_nullNonOverlappingReso func TestAccWorkSpacesWebBrowserSettings_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1704,10 +1721,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_ComputedTag_OnCreate(t *testing.T) func TestAccWorkSpacesWebBrowserSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1799,10 +1817,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_ComputedTag_OnUpdate_Add(t *testin func TestAccWorkSpacesWebBrowserSettings_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -1884,10 +1903,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_ComputedTag_OnUpdate_Replace(t *te func TestAccWorkSpacesWebBrowserSettings_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), @@ -2042,10 +2062,11 @@ func TestAccWorkSpacesWebBrowserSettings_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccWorkSpacesWebBrowserSettings_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.BrowserSettings resourceName := "aws_workspacesweb_browser_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckBrowserSettingsDestroy(ctx), diff --git a/internal/service/workspacesweb/data_protection_settings_association.go b/internal/service/workspacesweb/data_protection_settings_association.go new file mode 100644 index 000000000000..694f3e62e803 --- /dev/null +++ b/internal/service/workspacesweb/data_protection_settings_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_data_protection_settings_association", name="Data Protection Settings Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.DataProtectionSettings") +// @Testing(importStateIdAttribute="data_protection_settings_arn,portal_arn") +func newDataProtectionSettingsAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &dataProtectionSettingsAssociationResource{}, nil +} + +type dataProtectionSettingsAssociationResource struct { + framework.ResourceWithModel[dataProtectionSettingsAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *dataProtectionSettingsAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "data_protection_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *dataProtectionSettingsAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data dataProtectionSettingsAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateDataProtectionSettingsInput{ + DataProtectionSettingsArn: data.DataProtectionSettingsARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateDataProtectionSettings(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Data Protection Settings Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *dataProtectionSettingsAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data dataProtectionSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the data protection settings and checking associated portals + output, err := findDataProtectionSettingsByARN(ctx, conn, data.DataProtectionSettingsARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Data Protection Settings Association (%s)", data.DataProtectionSettingsARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *dataProtectionSettingsAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data dataProtectionSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateDataProtectionSettingsInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateDataProtectionSettings(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Data Protection Settings Association (%s)", data.DataProtectionSettingsARN.ValueString()), err.Error()) + return + } +} + +func (r *dataProtectionSettingsAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + dataProtectionSettingsAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, dataProtectionSettingsAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: data_protection_settings_arn,portal_arn. Got: %q", request.ID), + ) + return + } + dataProtectionSettingsARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("data_protection_settings_arn"), dataProtectionSettingsARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type dataProtectionSettingsAssociationResourceModel struct { + framework.WithRegionModel + DataProtectionSettingsARN fwtypes.ARN `tfsdk:"data_protection_settings_arn"` + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` +} diff --git a/internal/service/workspacesweb/data_protection_settings_association_test.go b/internal/service/workspacesweb/data_protection_settings_association_test.go new file mode 100644 index 000000000000..951bf87fac40 --- /dev/null +++ b/internal/service/workspacesweb/data_protection_settings_association_test.go @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebDataProtectionSettingsAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var dataProtectionSettings awstypes.DataProtectionSettings + resourceName := "aws_workspacesweb_data_protection_settings_association.test" + dataProtectionSettingsResourceName := "aws_workspacesweb_data_protection_settings.test" + portalResourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataProtectionSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataProtectionSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDataProtectionSettingsAssociationExists(ctx, resourceName, &dataProtectionSettings), + resource.TestCheckResourceAttrPair(resourceName, "data_protection_settings_arn", dataProtectionSettingsResourceName, "data_protection_settings_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccDataProtectionSettingsAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "data_protection_settings_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccDataProtectionSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the DataProtectionSettings Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(dataProtectionSettingsResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(dataProtectionSettingsResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "data_protection_settings_arn", dataProtectionSettingsResourceName, "data_protection_settings_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebDataProtectionSettingsAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var dataProtectionSettings awstypes.DataProtectionSettings + resourceName := "aws_workspacesweb_data_protection_settings_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataProtectionSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataProtectionSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDataProtectionSettingsAssociationExists(ctx, resourceName, &dataProtectionSettings), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceDataProtectionSettingsAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckDataProtectionSettingsAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_data_protection_settings_association" { + continue + } + + dataProtectionSettings, err := tfworkspacesweb.FindDataProtectionSettingsByARN(ctx, conn, rs.Primary.Attributes["data_protection_settings_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(dataProtectionSettings.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web Data Protection Settings Association %s still exists", rs.Primary.Attributes["data_protection_settings_arn"]) + } + } + + return nil + } +} + +func testAccCheckDataProtectionSettingsAssociationExists(ctx context.Context, n string, v *awstypes.DataProtectionSettings) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindDataProtectionSettingsByARN(ctx, conn, rs.Primary.Attributes["data_protection_settings_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccDataProtectionSettingsAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["data_protection_settings_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccDataProtectionSettingsAssociationConfig_basic() string { + return ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_data_protection_settings" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_data_protection_settings_association" "test" { + data_protection_settings_arn = aws_workspacesweb_data_protection_settings.test.data_protection_settings_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +` +} diff --git a/internal/service/workspacesweb/data_protection_settings_tags_gen_test.go b/internal/service/workspacesweb/data_protection_settings_tags_gen_test.go index 67232508e4ab..595d702d121c 100644 --- a/internal/service/workspacesweb/data_protection_settings_tags_gen_test.go +++ b/internal/service/workspacesweb/data_protection_settings_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccWorkSpacesWebDataProtectionSettings_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -199,10 +200,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags(t *testing.T) { func TestAccWorkSpacesWebDataProtectionSettings_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -260,10 +262,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_null(t *testing.T) { func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -309,10 +312,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyMap(t *testing.T) { func TestAccWorkSpacesWebDataProtectionSettings_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -387,10 +391,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_AddOnUpdate(t *testing.T) { func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -476,10 +481,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyTag_OnCreate(t *testin func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -613,10 +619,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyTag_OnUpdate_Add(t *te func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -701,10 +708,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_EmptyTag_OnUpdate_Replace(t func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -881,10 +889,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_providerOnly(t func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1040,10 +1049,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_nonOverlapping( func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1215,10 +1225,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_overlapping(t * func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1303,10 +1314,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_updateToProvide func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1390,10 +1402,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_updateToResourc func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1455,10 +1468,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_emptyResourceTa func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1512,10 +1526,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_emptyProviderOn func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1580,10 +1595,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_nullOverlapping func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1650,10 +1666,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_DefaultTags_nullNonOverlapp func TestAccWorkSpacesWebDataProtectionSettings_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1704,10 +1721,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_ComputedTag_OnCreate(t *tes func TestAccWorkSpacesWebDataProtectionSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1799,10 +1817,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_ComputedTag_OnUpdate_Add(t func TestAccWorkSpacesWebDataProtectionSettings_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -1884,10 +1903,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_ComputedTag_OnUpdate_Replac func TestAccWorkSpacesWebDataProtectionSettings_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), @@ -2042,10 +2062,11 @@ func TestAccWorkSpacesWebDataProtectionSettings_tags_IgnoreTags_Overlap_DefaultT func TestAccWorkSpacesWebDataProtectionSettings_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.DataProtectionSettings resourceName := "aws_workspacesweb_data_protection_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckDataProtectionSettingsDestroy(ctx), diff --git a/internal/service/workspacesweb/exports_test.go b/internal/service/workspacesweb/exports_test.go index 61ec728a6790..e51c5e630f72 100644 --- a/internal/service/workspacesweb/exports_test.go +++ b/internal/service/workspacesweb/exports_test.go @@ -5,17 +5,35 @@ package workspacesweb // Exports for use in tests only. var ( - ResourceBrowserSettings = newBrowserSettingsResource - ResourceDataProtectionSettings = newDataProtectionSettingsResource - ResourceIPAccessSettings = newIPAccessSettingsResource - ResourceNetworkSettings = newNetworkSettingsResource - ResourceUserAccessLoggingSettings = newUserAccessLoggingSettingsResource - ResourceUserSettings = newUserSettingsResource + ResourceBrowserSettings = newBrowserSettingsResource + ResourceBrowserSettingsAssociation = newBrowserSettingsAssociationResource + ResourceDataProtectionSettings = newDataProtectionSettingsResource + ResourceDataProtectionSettingsAssociation = newDataProtectionSettingsAssociationResource + ResourceIdentityProvider = newIdentityProviderResource + ResourceIPAccessSettings = newIPAccessSettingsResource + ResourceIPAccessSettingsAssociation = newIPAccessSettingsAssociationResource + ResourceNetworkSettings = newNetworkSettingsResource + ResourceNetworkSettingsAssociation = newNetworkSettingsAssociationResource + ResourcePortal = newPortalResource + ResourceSessionLogger = newSessionLoggerResource + ResourceSessionLoggerAssociation = newSessionLoggerAssociationResource + ResourceTrustStore = newTrustStoreResource + ResourceTrustStoreAssociation = newTrustStoreAssociationResource + ResourceUserAccessLoggingSettings = newUserAccessLoggingSettingsResource + ResourceUserAccessLoggingSettingsAssociation = newUserAccessLoggingSettingsAssociationResource + ResourceUserSettings = newUserSettingsResource + ResourceUserSettingsAssociation = newUserSettingsAssociationResource FindBrowserSettingsByARN = findBrowserSettingsByARN FindDataProtectionSettingsByARN = findDataProtectionSettingsByARN + FindIdentityProviderByARN = findIdentityProviderByARN FindIPAccessSettingsByARN = findIPAccessSettingsByARN FindNetworkSettingsByARN = findNetworkSettingsByARN + FindPortalByARN = findPortalByARN + FindSessionLoggerByARN = findSessionLoggerByARN + FindTrustStoreByARN = findTrustStoreByARN FindUserAccessLoggingSettingsByARN = findUserAccessLoggingSettingsByARN FindUserSettingsByARN = findUserSettingsByARN + + PortalARNFromIdentityProviderARN = portalARNFromIdentityProviderARN ) diff --git a/internal/service/workspacesweb/identity_provider.go b/internal/service/workspacesweb/identity_provider.go new file mode 100644 index 000000000000..bbdad81309e5 --- /dev/null +++ b/internal/service/workspacesweb/identity_provider.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_workspacesweb_identity_provider", name="Identity Provider") +// @Tags(identifierAttribute="identity_provider_arn") +// @Testing(tagsTest=true) +// @Testing(generator=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.IdentityProvider") +// @Testing(importStateIdAttribute="identity_provider_arn") +func newIdentityProviderResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &identityProviderResource{}, nil +} + +type identityProviderResource struct { + framework.ResourceWithModel[identityProviderResourceModel] +} + +func (r *identityProviderResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "identity_provider_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "identity_provider_details": schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + ElementType: types.StringType, + Required: true, + }, + "identity_provider_name": schema.StringAttribute{ + Required: true, + }, + "identity_provider_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.IdentityProviderType](), + Required: true, + }, + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (r *identityProviderResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data identityProviderResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + name := fwflex.StringValueFromFramework(ctx, data.IdentityProviderName) + var input workspacesweb.CreateIdentityProviderInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateIdentityProvider(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating WorkSpacesWeb Identity Provider (%s)", name), err.Error()) + return + } + + data.IdentityProviderARN = fwflex.StringToFramework(ctx, output.IdentityProviderArn) + + // Get the identity provider details to populate other fields + identityProvider, portalARN, err := findIdentityProviderByARN(ctx, conn, data.IdentityProviderARN.ValueString()) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Identity Provider (%s)", data.IdentityProviderARN.ValueString()), err.Error()) + return + } + + data.PortalARN = fwtypes.ARNValue(portalARN) + + response.Diagnostics.Append(fwflex.Flatten(ctx, identityProvider, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *identityProviderResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data identityProviderResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + output, portalARN, err := findIdentityProviderByARN(ctx, conn, data.IdentityProviderARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Identity Provider (%s)", data.IdentityProviderARN.ValueString()), err.Error()) + return + } + + data.PortalARN = fwtypes.ARNValue(portalARN) + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *identityProviderResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old identityProviderResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + if !new.IdentityProviderDetails.Equal(old.IdentityProviderDetails) || + !new.IdentityProviderName.Equal(old.IdentityProviderName) || + !new.IdentityProviderType.Equal(old.IdentityProviderType) { + var input workspacesweb.UpdateIdentityProviderInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + + output, err := conn.UpdateIdentityProvider(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating WorkSpacesWeb Identity Provider (%s)", new.IdentityProviderARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output.IdentityProvider, &new)...) + if response.Diagnostics.HasError() { + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *identityProviderResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data identityProviderResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DeleteIdentityProviderInput{ + IdentityProviderArn: data.IdentityProviderARN.ValueStringPointer(), + } + _, err := conn.DeleteIdentityProvider(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Identity Provider (%s)", data.IdentityProviderARN.ValueString()), err.Error()) + return + } +} + +func (r *identityProviderResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("identity_provider_arn"), request, response) +} + +const ( + arnResourceSeparator = "/" + arnService = "workspaces-web" +) + +func portalARNFromIdentityProviderARN(identityProviderARN string) (string, error) { + // Identity Provider ARN format: arn:{PARTITION}:workspaces-web:{REGION}:{ACCOUNT_ID}:identityProvider/{PORTAL_ID}/{IDP_RESOURCE_ID} + // Portal ARN format: arn:{PARTITION}:workspaces-web:{REGION}:{ACCOUNT_ID}:portal/{PORTAL_ID} + parsedARN, err := arn.Parse(identityProviderARN) + + if err != nil { + return "", fmt.Errorf("parsing ARN (%s): %w", identityProviderARN, err) + } + + if actual, expected := parsedARN.Service, arnService; actual != expected { + return "", fmt.Errorf("expected service %s in ARN (%s), got: %s", expected, identityProviderARN, actual) + } + + resourceParts := strings.Split(parsedARN.Resource, arnResourceSeparator) + + if actual, expected := len(resourceParts), 3; actual != expected { + return "", fmt.Errorf("expected %d resource parts in ARN (%s), got: %d", expected, identityProviderARN, actual) + } + + if actual, expected := resourceParts[0], "identityProvider"; actual != expected { + return "", fmt.Errorf("expected %s in ARN (%s), got: %s", expected, identityProviderARN, actual) + } + + portalARN := arn.ARN{ + Partition: parsedARN.Partition, + Service: parsedARN.Service, + Region: parsedARN.Region, + AccountID: parsedARN.AccountID, + Resource: "portal" + arnResourceSeparator + resourceParts[1], + }.String() + + return portalARN, nil +} + +func findIdentityProviderByARN(ctx context.Context, conn *workspacesweb.Client, arn string) (*awstypes.IdentityProvider, string, error) { + input := workspacesweb.GetIdentityProviderInput{ + IdentityProviderArn: &arn, + } + output, err := conn.GetIdentityProvider(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, "", &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, "", err + } + + if output == nil || output.IdentityProvider == nil { + return nil, "", tfresource.NewEmptyResultError(input) + } + + portalARN, err := portalARNFromIdentityProviderARN(arn) + if err != nil { + return nil, "", err + } + + return output.IdentityProvider, portalARN, nil +} + +type identityProviderResourceModel struct { + framework.WithRegionModel + IdentityProviderARN types.String `tfsdk:"identity_provider_arn"` + IdentityProviderDetails fwtypes.MapOfString `tfsdk:"identity_provider_details"` + IdentityProviderName types.String `tfsdk:"identity_provider_name"` + IdentityProviderType fwtypes.StringEnum[awstypes.IdentityProviderType] `tfsdk:"identity_provider_type"` + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} diff --git a/internal/service/workspacesweb/identity_provider_tags_gen_test.go b/internal/service/workspacesweb/identity_provider_tags_gen_test.go new file mode 100644 index 000000000000..ac27d02aba67 --- /dev/null +++ b/internal/service/workspacesweb/identity_provider_tags_gen_test.go @@ -0,0 +1,2245 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package workspacesweb_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebIdentityProvider_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/IdentityProvider/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/workspacesweb/identity_provider_test.go b/internal/service/workspacesweb/identity_provider_test.go new file mode 100644 index 000000000000..31e6b0f84d31 --- /dev/null +++ b/internal/service/workspacesweb/identity_provider_test.go @@ -0,0 +1,307 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/retry" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestPortalARNFromIdentityProviderARN(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + identityProviderARN string + wantPortalARN string + wantErr bool + }{ + "empty ARN": { + wantErr: true, + }, + "unparsable ARN": { + identityProviderARN: "test", + wantErr: true, + }, + "invalid ARN service": { + identityProviderARN: "arn:aws:workspaces:us-west-2:123456789012:identityProvider/portal-123/ip-456", //lintignore:AWSAT003,AWSAT005 + wantErr: true, + }, + "invalid ARN resource parts": { + identityProviderARN: "arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/bs-789", //lintignore:AWSAT003,AWSAT005 + wantErr: true, + }, + "valid ARN": { + identityProviderARN: "arn:aws:workspaces-web:us-west-2:123456789012:identityProvider/portal-123/ip-456", //lintignore:AWSAT003,AWSAT005 + wantPortalARN: "arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-123", //lintignore:AWSAT003,AWSAT005 + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := tfworkspacesweb.PortalARNFromIdentityProviderARN(testCase.identityProviderARN) + + if got, want := err != nil, testCase.wantErr; !cmp.Equal(got, want) { + t.Errorf("PortalARNFromIdentityProviderARN(%s) err %t, want %t", testCase.identityProviderARN, got, want) + } + if err == nil { + if diff := cmp.Diff(got, testCase.wantPortalARN); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + } + }) + } +} + +func TestAccWorkSpacesWebIdentityProvider_basic(t *testing.T) { + ctx := acctest.Context(t) + var identityProvider awstypes.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + portalResourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIdentityProviderConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &identityProvider), + resource.TestCheckResourceAttr(resourceName, "identity_provider_name", "test"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_type", string(awstypes.IdentityProviderTypeSaml)), + resource.TestCheckResourceAttrSet(resourceName, "identity_provider_details.MetadataFile"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "identity_provider_arn", "workspaces-web", regexache.MustCompile(`identityProvider/.+$`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_disappears(t *testing.T) { + ctx := acctest.Context(t) + var identityProvider awstypes.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIdentityProviderConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &identityProvider), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceIdentityProvider, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_oidc_basic(t *testing.T) { + ctx := acctest.Context(t) + var identityProvider awstypes.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIdentityProviderConfig_updated(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &identityProvider), + resource.TestCheckResourceAttr(resourceName, "identity_provider_name", "test-updated"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_type", string(awstypes.IdentityProviderTypeOidc)), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.client_id", "test-client-id"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.client_secret", "test-client-secret"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.oidc_issuer", "https://accounts.google.com"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.authorize_scopes", "openid, email"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.attributes_request_method", "POST"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebIdentityProvider_update(t *testing.T) { + ctx := acctest.Context(t) + var identityProvider awstypes.IdentityProvider + resourceName := "aws_workspacesweb_identity_provider.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIdentityProviderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIdentityProviderConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &identityProvider), + resource.TestCheckResourceAttr(resourceName, "identity_provider_name", "test"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_type", string(awstypes.IdentityProviderTypeSaml)), + resource.TestCheckResourceAttrSet(resourceName, "identity_provider_details.MetadataFile"), + ), + }, + { + Config: testAccIdentityProviderConfig_updated(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIdentityProviderExists(ctx, resourceName, &identityProvider), + resource.TestCheckResourceAttr(resourceName, "identity_provider_name", "test-updated"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_type", string(awstypes.IdentityProviderTypeOidc)), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.client_id", "test-client-id"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.client_secret", "test-client-secret"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.oidc_issuer", "https://accounts.google.com"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.authorize_scopes", "openid, email"), + resource.TestCheckResourceAttr(resourceName, "identity_provider_details.attributes_request_method", "POST"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "identity_provider_arn"), + ImportStateVerifyIdentifierAttribute: "identity_provider_arn", + }, + }, + }) +} + +func testAccCheckIdentityProviderDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_identity_provider" { + continue + } + + _, _, err := tfworkspacesweb.FindIdentityProviderByARN(ctx, conn, rs.Primary.Attributes["identity_provider_arn"]) + + if retry.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("WorkSpaces Web Identity Provider %s still exists", rs.Primary.Attributes["identity_provider_arn"]) + } + + return nil + } +} + +func testAccCheckIdentityProviderExists(ctx context.Context, n string, v *awstypes.IdentityProvider) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, _, err := tfworkspacesweb.FindIdentityProviderByARN(ctx, conn, rs.Primary.Attributes["identity_provider_arn"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccIdentityProviderConfig_basic() string { + return ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } +} +` +} + +func testAccIdentityProviderConfig_updated() string { + return ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test-updated" + identity_provider_type = "OIDC" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + client_id = "test-client-id" + client_secret = "test-client-secret" + oidc_issuer = "https://accounts.google.com" + attributes_request_method = "POST" + authorize_scopes = "openid, email" + } +} +` +} diff --git a/internal/service/workspacesweb/ip_access_settings_association.go b/internal/service/workspacesweb/ip_access_settings_association.go new file mode 100644 index 000000000000..afc3ed4cf439 --- /dev/null +++ b/internal/service/workspacesweb/ip_access_settings_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_ip_access_settings_association", name="IP Access Settings Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.IpAccessSettings") +// @Testing(importStateIdAttribute="ip_access_settings_arn,portal_arn") +func newIPAccessSettingsAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &ipAccessSettingsAssociationResource{}, nil +} + +type ipAccessSettingsAssociationResource struct { + framework.ResourceWithModel[ipAccessSettingsAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *ipAccessSettingsAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "ip_access_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *ipAccessSettingsAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data ipAccessSettingsAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateIpAccessSettingsInput{ + IpAccessSettingsArn: data.IPAccessSettingsARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateIpAccessSettings(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb IP Access Settings Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *ipAccessSettingsAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data ipAccessSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the IP access settings and checking associated portals + output, err := findIPAccessSettingsByARN(ctx, conn, data.IPAccessSettingsARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb IP Access Settings Association (%s)", data.IPAccessSettingsARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *ipAccessSettingsAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data ipAccessSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateIpAccessSettingsInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateIpAccessSettings(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb IP Access Settings Association (%s)", data.IPAccessSettingsARN.ValueString()), err.Error()) + return + } +} + +func (r *ipAccessSettingsAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + ipAccessSettingsAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, ipAccessSettingsAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: ip_access_settings_arn,portal_arn. Got: %q", request.ID), + ) + return + } + ipAccessSettingsARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("ip_access_settings_arn"), ipAccessSettingsARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type ipAccessSettingsAssociationResourceModel struct { + framework.WithRegionModel + IPAccessSettingsARN fwtypes.ARN `tfsdk:"ip_access_settings_arn"` + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` +} diff --git a/internal/service/workspacesweb/ip_access_settings_association_test.go b/internal/service/workspacesweb/ip_access_settings_association_test.go new file mode 100644 index 000000000000..a83269cbdc25 --- /dev/null +++ b/internal/service/workspacesweb/ip_access_settings_association_test.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebIPAccessSettingsAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var ipAccessSettings awstypes.IpAccessSettings + resourceName := "aws_workspacesweb_ip_access_settings_association.test" + ipAccessSettingsResourceName := "aws_workspacesweb_ip_access_settings.test" + portalResourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIPAccessSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIPAccessSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIPAccessSettingsAssociationExists(ctx, resourceName, &ipAccessSettings), + resource.TestCheckResourceAttrPair(resourceName, "ip_access_settings_arn", ipAccessSettingsResourceName, "ip_access_settings_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccIPAccessSettingsAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "ip_access_settings_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccIPAccessSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the IPAccessSettings Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(ipAccessSettingsResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(ipAccessSettingsResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "ip_access_settings_arn", ipAccessSettingsResourceName, "ip_access_settings_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebIPAccessSettingsAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var ipAccessSettings awstypes.IpAccessSettings + resourceName := "aws_workspacesweb_ip_access_settings_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIPAccessSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIPAccessSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIPAccessSettingsAssociationExists(ctx, resourceName, &ipAccessSettings), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceIPAccessSettingsAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckIPAccessSettingsAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_ip_access_settings_association" { + continue + } + + ipAccessSettings, err := tfworkspacesweb.FindIPAccessSettingsByARN(ctx, conn, rs.Primary.Attributes["ip_access_settings_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(ipAccessSettings.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web IP Access Settings Association %s still exists", rs.Primary.Attributes["ip_access_settings_arn"]) + } + } + + return nil + } +} + +func testAccCheckIPAccessSettingsAssociationExists(ctx context.Context, n string, v *awstypes.IpAccessSettings) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindIPAccessSettingsByARN(ctx, conn, rs.Primary.Attributes["ip_access_settings_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccIPAccessSettingsAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["ip_access_settings_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccIPAccessSettingsAssociationConfig_basic() string { + return ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_ip_access_settings" "test" { + display_name = "test" + + ip_rule { + ip_range = "10.0.0.0/16" + } +} + +resource "aws_workspacesweb_ip_access_settings_association" "test" { + ip_access_settings_arn = aws_workspacesweb_ip_access_settings.test.ip_access_settings_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +` +} diff --git a/internal/service/workspacesweb/ip_access_settings_tags_gen_test.go b/internal/service/workspacesweb/ip_access_settings_tags_gen_test.go index 8b04b247552b..96833b1b0757 100644 --- a/internal/service/workspacesweb/ip_access_settings_tags_gen_test.go +++ b/internal/service/workspacesweb/ip_access_settings_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccWorkSpacesWebIPAccessSettings_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -199,10 +200,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags(t *testing.T) { func TestAccWorkSpacesWebIPAccessSettings_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -260,10 +262,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_null(t *testing.T) { func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -309,10 +312,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyMap(t *testing.T) { func TestAccWorkSpacesWebIPAccessSettings_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -387,10 +391,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_AddOnUpdate(t *testing.T) { func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -476,10 +481,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -613,10 +619,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyTag_OnUpdate_Add(t *testing. func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -701,10 +708,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_EmptyTag_OnUpdate_Replace(t *test func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -881,10 +889,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_providerOnly(t *testi func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1040,10 +1049,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_nonOverlapping(t *tes func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1215,10 +1225,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_overlapping(t *testin func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1303,10 +1314,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_updateToProviderOnly( func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1390,10 +1402,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_updateToResourceOnly( func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1455,10 +1468,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_emptyResourceTag(t *t func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1512,10 +1526,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_emptyProviderOnlyTag( func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1580,10 +1595,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_nullOverlappingResour func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1650,10 +1666,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_DefaultTags_nullNonOverlappingRes func TestAccWorkSpacesWebIPAccessSettings_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1704,10 +1721,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_ComputedTag_OnCreate(t *testing.T func TestAccWorkSpacesWebIPAccessSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1799,10 +1817,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_ComputedTag_OnUpdate_Add(t *testi func TestAccWorkSpacesWebIPAccessSettings_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -1884,10 +1903,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_ComputedTag_OnUpdate_Replace(t *t func TestAccWorkSpacesWebIPAccessSettings_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), @@ -2042,10 +2062,11 @@ func TestAccWorkSpacesWebIPAccessSettings_tags_IgnoreTags_Overlap_DefaultTag(t * func TestAccWorkSpacesWebIPAccessSettings_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.IpAccessSettings resourceName := "aws_workspacesweb_ip_access_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckIPAccessSettingsDestroy(ctx), diff --git a/internal/service/workspacesweb/network_settings_association.go b/internal/service/workspacesweb/network_settings_association.go new file mode 100644 index 000000000000..a0515d5866ad --- /dev/null +++ b/internal/service/workspacesweb/network_settings_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_network_settings_association", name="Network Settings Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.NetworkSettings") +// @Testing(importStateIdAttribute="network_settings_arn,portal_arn") +func newNetworkSettingsAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &networkSettingsAssociationResource{}, nil +} + +type networkSettingsAssociationResource struct { + framework.ResourceWithModel[networkSettingsAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *networkSettingsAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "network_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *networkSettingsAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data networkSettingsAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateNetworkSettingsInput{ + NetworkSettingsArn: data.NetworkSettingsARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateNetworkSettings(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Network Settings Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *networkSettingsAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data networkSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the network settings and checking associated portals + output, err := findNetworkSettingsByARN(ctx, conn, data.NetworkSettingsARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Network Settings Association (%s)", data.NetworkSettingsARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *networkSettingsAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data networkSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateNetworkSettingsInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateNetworkSettings(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Network Settings Association (%s)", data.NetworkSettingsARN.ValueString()), err.Error()) + return + } +} + +func (r *networkSettingsAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + networkSettingsAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, networkSettingsAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: network_settings_arn,portal_arn. Got: %q", request.ID), + ) + return + } + networkSettingsARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("network_settings_arn"), networkSettingsARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type networkSettingsAssociationResourceModel struct { + framework.WithRegionModel + NetworkSettingsARN fwtypes.ARN `tfsdk:"network_settings_arn"` + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` +} diff --git a/internal/service/workspacesweb/network_settings_association_test.go b/internal/service/workspacesweb/network_settings_association_test.go new file mode 100644 index 000000000000..ac780eba9f87 --- /dev/null +++ b/internal/service/workspacesweb/network_settings_association_test.go @@ -0,0 +1,186 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebNetworkSettingsAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var networkSettings awstypes.NetworkSettings + resourceName := "aws_workspacesweb_network_settings_association.test" + networkSettingsResourceName := "aws_workspacesweb_network_settings.test" + portalResourceName := "aws_workspacesweb_portal.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckNetworkSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSettingsAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkSettingsAssociationExists(ctx, resourceName, &networkSettings), + resource.TestCheckResourceAttrPair(resourceName, "network_settings_arn", networkSettingsResourceName, "network_settings_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccNetworkSettingsAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "network_settings_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccNetworkSettingsAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the NetworkSettings Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(networkSettingsResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(networkSettingsResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "network_settings_arn", networkSettingsResourceName, "network_settings_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebNetworkSettingsAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var networkSettings awstypes.NetworkSettings + resourceName := "aws_workspacesweb_network_settings_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckNetworkSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSettingsAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNetworkSettingsAssociationExists(ctx, resourceName, &networkSettings), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceNetworkSettingsAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckNetworkSettingsAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_network_settings_association" { + continue + } + + networkSettings, err := tfworkspacesweb.FindNetworkSettingsByARN(ctx, conn, rs.Primary.Attributes["network_settings_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(networkSettings.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web Network Settings Association %s still exists", rs.Primary.Attributes["network_settings_arn"]) + } + } + + return nil + } +} + +func testAccCheckNetworkSettingsAssociationExists(ctx context.Context, n string, v *awstypes.NetworkSettings) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindNetworkSettingsByARN(ctx, conn, rs.Primary.Attributes["network_settings_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccNetworkSettingsAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["network_settings_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccNetworkSettingsAssociationConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccNetworkSettingsConfig_base(rName), ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_network_settings" "test" { + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test[0].id, aws_subnet.test[1].id] + security_group_ids = [aws_security_group.test[0].id, aws_security_group.test[1].id] +} + +resource "aws_workspacesweb_network_settings_association" "test" { + network_settings_arn = aws_workspacesweb_network_settings.test.network_settings_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +`) +} diff --git a/internal/service/workspacesweb/network_settings_tags_gen_test.go b/internal/service/workspacesweb/network_settings_tags_gen_test.go index b6a60e0e46f0..dae18ed395e8 100644 --- a/internal/service/workspacesweb/network_settings_tags_gen_test.go +++ b/internal/service/workspacesweb/network_settings_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccWorkSpacesWebNetworkSettings_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -199,10 +200,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags(t *testing.T) { func TestAccWorkSpacesWebNetworkSettings_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -260,10 +262,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_null(t *testing.T) { func TestAccWorkSpacesWebNetworkSettings_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -309,10 +312,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_EmptyMap(t *testing.T) { func TestAccWorkSpacesWebNetworkSettings_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -387,10 +391,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_AddOnUpdate(t *testing.T) { func TestAccWorkSpacesWebNetworkSettings_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -476,10 +481,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccWorkSpacesWebNetworkSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -613,10 +619,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T func TestAccWorkSpacesWebNetworkSettings_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -701,10 +708,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_EmptyTag_OnUpdate_Replace(t *testi func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -881,10 +889,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_providerOnly(t *testin func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1040,10 +1049,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_nonOverlapping(t *test func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1215,10 +1225,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_overlapping(t *testing func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1303,10 +1314,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_updateToProviderOnly(t func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1390,10 +1402,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_updateToResourceOnly(t func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1455,10 +1468,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_emptyResourceTag(t *te func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1512,10 +1526,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_emptyProviderOnlyTag(t func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1580,10 +1595,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_nullOverlappingResourc func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1650,10 +1666,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_DefaultTags_nullNonOverlappingReso func TestAccWorkSpacesWebNetworkSettings_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1704,10 +1721,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_ComputedTag_OnCreate(t *testing.T) func TestAccWorkSpacesWebNetworkSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1799,10 +1817,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_ComputedTag_OnUpdate_Add(t *testin func TestAccWorkSpacesWebNetworkSettings_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -1884,10 +1903,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_ComputedTag_OnUpdate_Replace(t *te func TestAccWorkSpacesWebNetworkSettings_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), @@ -2042,10 +2062,11 @@ func TestAccWorkSpacesWebNetworkSettings_tags_IgnoreTags_Overlap_DefaultTag(t *t func TestAccWorkSpacesWebNetworkSettings_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.NetworkSettings resourceName := "aws_workspacesweb_network_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckNetworkSettingsDestroy(ctx), diff --git a/internal/service/workspacesweb/portal.go b/internal/service/workspacesweb/portal.go new file mode 100644 index 000000000000..74547db355f1 --- /dev/null +++ b/internal/service/workspacesweb/portal.go @@ -0,0 +1,486 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_workspacesweb_portal", name="Portal") +// @Tags(identifierAttribute="portal_arn") +// @Testing(tagsTest=true) +// @Testing(generator=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.Portal") +// @Testing(importStateIdAttribute="portal_arn") +func newPortalResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &portalResource{} + + r.SetDefaultCreateTimeout(5 * time.Minute) + r.SetDefaultUpdateTimeout(5 * time.Minute) + r.SetDefaultDeleteTimeout(5 * time.Minute) + + return r, nil +} + +type portalResource struct { + framework.ResourceWithModel[portalResourceModel] + framework.WithTimeouts +} + +func (r *portalResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "additional_encryption_context": schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + ElementType: types.StringType, + Optional: true, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.RequiresReplace(), + }, + }, + "authentication_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.AuthenticationType](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "browser_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "browser_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.BrowserType](), + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrCreationDate: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "customer_managed_key": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "data_protection_settings_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrDisplayName: schema.StringAttribute{ + Optional: true, + Computed: true, + }, + names.AttrInstanceType: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.InstanceType](), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "ip_access_settings_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "max_concurrent_sessions": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "network_settings_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "portal_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "portal_endpoint": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "portal_status": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.PortalStatus](), + Computed: true, + }, + "renderer_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.RendererType](), + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "session_logger_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "trust_store_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "user_access_logging_settings_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "user_settings_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *portalResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data portalResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + var input workspacesweb.CreatePortalInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + input.Tags = getTagsIn(ctx) + + output, err := conn.CreatePortal(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Portal", err.Error()) + return + } + + data.PortalARN = fwflex.StringToFramework(ctx, output.PortalArn) + data.PortalEndpoint = fwflex.StringToFramework(ctx, output.PortalEndpoint) + + // Wait for portal to be created + portal, err := waitPortalCreated(ctx, conn, data.PortalARN.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for WorkSpacesWeb Portal (%s) create", data.PortalARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, portal, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *portalResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data portalResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + output, err := findPortalByARN(ctx, conn, data.PortalARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Portal (%s)", data.PortalARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *portalResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old portalResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + if !new.AuthenticationType.Equal(old.AuthenticationType) || + !new.BrowserSettingsARN.Equal(old.BrowserSettingsARN) || + !new.DataProtectionSettingsARN.Equal(old.DataProtectionSettingsARN) || + !new.DisplayName.Equal(old.DisplayName) || + !new.InstanceType.Equal(old.InstanceType) || + !new.IPAccessSettingsARN.Equal(old.IPAccessSettingsARN) || + !new.MaxConcurrentSessions.Equal(old.MaxConcurrentSessions) || + !new.NetworkSettingsARN.Equal(old.NetworkSettingsARN) || + !new.TrustStoreARN.Equal(old.TrustStoreARN) || + !new.UserAccessLoggingSettingsARN.Equal(old.UserAccessLoggingSettingsARN) || + !new.UserSettingsARN.Equal(old.UserSettingsARN) { + var input workspacesweb.UpdatePortalInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdatePortal(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating WorkSpacesWeb Portal (%s)", new.PortalARN.ValueString()), err.Error()) + return + } + + // Wait for portal to be updated + portal, err := waitPortalUpdated(ctx, conn, new.PortalARN.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for WorkSpacesWeb Portal (%s) update", new.PortalARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, portal, &new)...) + if response.Diagnostics.HasError() { + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *portalResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data portalResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DeletePortalInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + _, err := conn.DeletePortal(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Portal (%s)", data.PortalARN.ValueString()), err.Error()) + return + } + + // Wait for portal to be deleted + _, err = waitPortalDeleted(ctx, conn, data.PortalARN.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for WorkSpacesWeb Portal (%s) delete", data.PortalARN.ValueString()), err.Error()) + return + } +} + +func (r *portalResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("portal_arn"), request, response) +} + +// Waiters +func waitPortalCreated(ctx context.Context, conn *workspacesweb.Client, arn string, timeout time.Duration) (*awstypes.Portal, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.PortalStatusPending), + Target: enum.Slice(awstypes.PortalStatusIncomplete, awstypes.PortalStatusActive), + Refresh: statusPortal(ctx, conn, arn), + Timeout: timeout, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*awstypes.Portal); ok { + return out, err + } + + return nil, err +} + +func waitPortalUpdated(ctx context.Context, conn *workspacesweb.Client, arn string, timeout time.Duration) (*awstypes.Portal, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.PortalStatusPending), + Target: enum.Slice(awstypes.PortalStatusIncomplete, awstypes.PortalStatusActive), + Refresh: statusPortal(ctx, conn, arn), + Timeout: timeout, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*awstypes.Portal); ok { + return out, err + } + + return nil, err +} + +func waitPortalDeleted(ctx context.Context, conn *workspacesweb.Client, arn string, timeout time.Duration) (*awstypes.Portal, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.PortalStatusActive, awstypes.PortalStatusIncomplete, awstypes.PortalStatusPending), + Target: []string{}, + Refresh: statusPortal(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*awstypes.Portal); ok { + return out, err + } + + return nil, err +} + +// Status function +func statusPortal(ctx context.Context, conn *workspacesweb.Client, arn string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := findPortalByARN(ctx, conn, arn) + if tfretry.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.PortalStatus), nil + } +} + +// Finder function +func findPortalByARN(ctx context.Context, conn *workspacesweb.Client, arn string) (*awstypes.Portal, error) { + input := workspacesweb.GetPortalInput{ + PortalArn: aws.String(arn), + } + + output, err := conn.GetPortal(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Portal == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + + return output.Portal, nil +} + +// Data model +type portalResourceModel struct { + framework.WithRegionModel + AdditionalEncryptionContext fwtypes.MapOfString `tfsdk:"additional_encryption_context"` + AuthenticationType fwtypes.StringEnum[awstypes.AuthenticationType] `tfsdk:"authentication_type"` + BrowserSettingsARN fwtypes.ARN `tfsdk:"browser_settings_arn"` + BrowserType fwtypes.StringEnum[awstypes.BrowserType] `tfsdk:"browser_type"` + CreationDate timetypes.RFC3339 `tfsdk:"creation_date"` + CustomerManagedKey types.String `tfsdk:"customer_managed_key"` + DataProtectionSettingsARN types.String `tfsdk:"data_protection_settings_arn"` + DisplayName types.String `tfsdk:"display_name"` + InstanceType fwtypes.StringEnum[awstypes.InstanceType] `tfsdk:"instance_type"` + IPAccessSettingsARN types.String `tfsdk:"ip_access_settings_arn"` + MaxConcurrentSessions types.Int64 `tfsdk:"max_concurrent_sessions"` + NetworkSettingsARN types.String `tfsdk:"network_settings_arn"` + PortalARN types.String `tfsdk:"portal_arn"` + PortalEndpoint types.String `tfsdk:"portal_endpoint"` + PortalStatus fwtypes.StringEnum[awstypes.PortalStatus] `tfsdk:"portal_status"` + RendererType fwtypes.StringEnum[awstypes.RendererType] `tfsdk:"renderer_type"` + SessionLoggerARN types.String `tfsdk:"session_logger_arn"` + StatusReason types.String `tfsdk:"status_reason"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + TrustStoreARN types.String `tfsdk:"trust_store_arn"` + UserAccessLoggingSettingsARN types.String `tfsdk:"user_access_logging_settings_arn"` + UserSettingsARN types.String `tfsdk:"user_settings_arn"` +} diff --git a/internal/service/workspacesweb/portal_tags_gen_test.go b/internal/service/workspacesweb/portal_tags_gen_test.go new file mode 100644 index 000000000000..75bd3f583e1a --- /dev/null +++ b/internal/service/workspacesweb/portal_tags_gen_test.go @@ -0,0 +1,2245 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package workspacesweb_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebPortal_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Portal + resourceName := "aws_workspacesweb_portal.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Portal/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/workspacesweb/portal_test.go b/internal/service/workspacesweb/portal_test.go new file mode 100644 index 000000000000..68986eb12d36 --- /dev/null +++ b/internal/service/workspacesweb/portal_test.go @@ -0,0 +1,302 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebPortal_basic(t *testing.T) { + ctx := acctest.Context(t) + var portal awstypes.Portal + resourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPortalConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &portal), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, "test"), + resource.TestCheckResourceAttr(resourceName, names.AttrInstanceType, string(awstypes.InstanceTypeStandardRegular)), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "portal_arn", "workspaces-web", regexache.MustCompile(`portal/.+$`)), + resource.TestCheckResourceAttrSet(resourceName, "portal_endpoint"), + resource.TestCheckResourceAttr(resourceName, "portal_status", string(awstypes.PortalStatusIncomplete)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_disappears(t *testing.T) { + ctx := acctest.Context(t) + var portal awstypes.Portal + resourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPortalConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &portal), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourcePortal, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_update(t *testing.T) { + ctx := acctest.Context(t) + var portal awstypes.Portal + resourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPortalConfig_updateBefore(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &portal), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, "test-before"), + resource.TestCheckResourceAttr(resourceName, names.AttrInstanceType, string(awstypes.InstanceTypeStandardRegular)), + resource.TestCheckResourceAttr(resourceName, "max_concurrent_sessions", "1"), + resource.TestCheckResourceAttr(resourceName, "authentication_type", string(awstypes.AuthenticationTypeStandard)), + resource.TestCheckResourceAttr(resourceName, "portal_status", string(awstypes.PortalStatusIncomplete)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + { + Config: testAccPortalConfig_updateAfter(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &portal), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, "test-after"), + resource.TestCheckResourceAttr(resourceName, names.AttrInstanceType, string(awstypes.InstanceTypeStandardLarge)), + resource.TestCheckResourceAttr(resourceName, "max_concurrent_sessions", "2"), + resource.TestCheckResourceAttr(resourceName, "authentication_type", string(awstypes.AuthenticationTypeIamIdentityCenter)), + resource.TestCheckResourceAttr(resourceName, "portal_status", string(awstypes.PortalStatusActive)), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebPortal_complete(t *testing.T) { + ctx := acctest.Context(t) + var portal awstypes.Portal + resourceName := "aws_workspacesweb_portal.test" + kmsKeyResourceName := "aws_kms_key.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPortalDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPortalConfig_complete(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPortalExists(ctx, resourceName, &portal), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, "test-complete"), + resource.TestCheckResourceAttr(resourceName, names.AttrInstanceType, string(awstypes.InstanceTypeStandardLarge)), + resource.TestCheckResourceAttr(resourceName, "max_concurrent_sessions", "2"), + resource.TestCheckResourceAttr(resourceName, "authentication_type", string(awstypes.AuthenticationTypeStandard)), + resource.TestCheckResourceAttrPair(resourceName, "customer_managed_key", kmsKeyResourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "additional_encryption_context.Environment", "Production"), + resource.TestCheckResourceAttr(resourceName, "portal_status", string(awstypes.PortalStatusIncomplete)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "portal_arn"), + ImportStateVerifyIdentifierAttribute: "portal_arn", + }, + }, + }) +} + +func testAccCheckPortalDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_portal" { + continue + } + + _, err := tfworkspacesweb.FindPortalByARN(ctx, conn, rs.Primary.Attributes["portal_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("WorkSpaces Web Portal %s still exists", rs.Primary.Attributes["portal_arn"]) + } + + return nil + } +} + +func testAccCheckPortalExists(ctx context.Context, n string, v *awstypes.Portal) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindPortalByARN(ctx, conn, rs.Primary.Attributes["portal_arn"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccPortalConfig_basic() string { + return fmt.Sprintf(` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" + instance_type = %q +} +`, string(awstypes.InstanceTypeStandardRegular)) +} + +func testAccPortalConfig_updateBefore() string { + return testAccPortalConfig_template("test-before", string(awstypes.InstanceTypeStandardRegular), 1, string(awstypes.AuthenticationTypeStandard)) +} + +func testAccPortalConfig_updateAfter() string { + return testAccPortalConfig_template("test-after", string(awstypes.InstanceTypeStandardLarge), 2, string(awstypes.AuthenticationTypeIamIdentityCenter)) +} + +func testAccPortalConfig_template(displayName, instanceType string, maxConcurrentSessions int, authenticationType string) string { + return fmt.Sprintf(` +resource "aws_workspacesweb_portal" "test" { + display_name = %q + instance_type = %q + max_concurrent_sessions = %d + authentication_type = %q +} +`, displayName, instanceType, maxConcurrentSessions, authenticationType) +} + +func testAccPortalConfig_complete() string { + return fmt.Sprintf(` + +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Sid = "Enable IAM User Permissions" + Effect = "Allow" + Principal = { + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" + } + Action = "kms:*" + Resource = "*" + }, + { + Sid = "Allow WorkSpacesWeb to use the key" + Effect = "Allow" + Principal = { + Service = "workspaces-web.amazonaws.com" + } + Action = [ + "kms:DescribeKey", + "kms:GenerateDataKey", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Decrypt", + "kms:ReEncryptTo", + "kms:ReEncryptFrom" + ] + Resource = "*" + } + ] + }) +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test-complete" + instance_type = %q + max_concurrent_sessions = 2 + authentication_type = %q + customer_managed_key = aws_kms_key.test.arn + + additional_encryption_context = { + Environment = "Production" + } +} +`, string(awstypes.InstanceTypeStandardLarge), string(awstypes.AuthenticationTypeStandard)) +} diff --git a/internal/service/workspacesweb/service_endpoint_resolver_gen.go b/internal/service/workspacesweb/service_endpoint_resolver_gen.go index cdb424937fcc..814afc3d7e90 100644 --- a/internal/service/workspacesweb/service_endpoint_resolver_gen.go +++ b/internal/service/workspacesweb/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params workspacesweb.En }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up workspacesweb endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up workspacesweb endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/workspacesweb/service_endpoints_gen_test.go b/internal/service/workspacesweb/service_endpoints_gen_test.go index 389e3634c1b3..6ffb9506201d 100644 --- a/internal/service/workspacesweb/service_endpoints_gen_test.go +++ b/internal/service/workspacesweb/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/workspacesweb/service_package_gen.go b/internal/service/workspacesweb/service_package_gen.go index bd38debde273..e8212d5ccbcc 100644 --- a/internal/service/workspacesweb/service_package_gen.go +++ b/internal/service/workspacesweb/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/workspacesweb" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -33,6 +32,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newBrowserSettingsAssociationResource, + TypeName: "aws_workspacesweb_browser_settings_association", + Name: "Browser Settings Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newDataProtectionSettingsResource, TypeName: "aws_workspacesweb_data_protection_settings", @@ -42,6 +47,21 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newDataProtectionSettingsAssociationResource, + TypeName: "aws_workspacesweb_data_protection_settings_association", + Name: "Data Protection Settings Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newIdentityProviderResource, + TypeName: "aws_workspacesweb_identity_provider", + Name: "Identity Provider", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "identity_provider_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newIPAccessSettingsResource, TypeName: "aws_workspacesweb_ip_access_settings", @@ -51,6 +71,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newIPAccessSettingsAssociationResource, + TypeName: "aws_workspacesweb_ip_access_settings_association", + Name: "IP Access Settings Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newNetworkSettingsResource, TypeName: "aws_workspacesweb_network_settings", @@ -60,6 +86,51 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newNetworkSettingsAssociationResource, + TypeName: "aws_workspacesweb_network_settings_association", + Name: "Network Settings Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newPortalResource, + TypeName: "aws_workspacesweb_portal", + Name: "Portal", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "portal_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newSessionLoggerResource, + TypeName: "aws_workspacesweb_session_logger", + Name: "Session Logger", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "session_logger_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newSessionLoggerAssociationResource, + TypeName: "aws_workspacesweb_session_logger_association", + Name: "Session Logger Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newTrustStoreResource, + TypeName: "aws_workspacesweb_trust_store", + Name: "Trust Store", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: "trust_store_arn", + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + { + Factory: newTrustStoreAssociationResource, + TypeName: "aws_workspacesweb_trust_store_association", + Name: "Trust Store Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newUserAccessLoggingSettingsResource, TypeName: "aws_workspacesweb_user_access_logging_settings", @@ -69,6 +140,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newUserAccessLoggingSettingsAssociationResource, + TypeName: "aws_workspacesweb_user_access_logging_settings_association", + Name: "User Access Logging Settings Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newUserSettingsResource, TypeName: "aws_workspacesweb_user_settings", @@ -78,6 +155,12 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newUserSettingsAssociationResource, + TypeName: "aws_workspacesweb_user_settings_association", + Name: "User Settings Association", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, } } @@ -112,7 +195,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *workspacesweb.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/workspacesweb/session_logger.go b/internal/service/workspacesweb/session_logger.go new file mode 100644 index 000000000000..98f027165a5b --- /dev/null +++ b/internal/service/workspacesweb/session_logger.go @@ -0,0 +1,396 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_workspacesweb_session_logger", name="Session Logger") +// @Tags(identifierAttribute="session_logger_arn") +// @Testing(tagsTest=true) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.SessionLogger") +// @Testing(importStateIdAttribute="session_logger_arn") +func newSessionLoggerResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &sessionLoggerResource{}, nil +} + +type sessionLoggerResource struct { + framework.ResourceWithModel[sessionLoggerResourceModel] +} + +func (r *sessionLoggerResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "additional_encryption_context": schema.MapAttribute{ + CustomType: fwtypes.MapOfStringType, + ElementType: types.StringType, + Optional: true, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.RequiresReplace(), + }, + }, + "associated_portal_arns": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + }, + "customer_managed_key": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrDisplayName: schema.StringAttribute{ + Optional: true, + }, + "session_logger_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + "event_filter": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[eventFilterModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "include": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringEnumType[awstypes.Event](), + Validators: []validator.Set{ + setvalidator.ExactlyOneOf( + path.MatchRelative().AtParent().AtName("all"), + path.MatchRelative().AtParent().AtName("include"), + ), + }, + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "all": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[eventFilterAllModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{}, + }, + }, + }, + }, + "log_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[logConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "s3": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3LogConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrBucket: schema.StringAttribute{ + Required: true, + }, + "bucket_owner": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "folder_structure": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.FolderStructure](), + Required: true, + }, + "key_prefix": schema.StringAttribute{ + Optional: true, + }, + "log_file_format": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.LogFileFormat](), + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r *sessionLoggerResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data sessionLoggerResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + var input workspacesweb.CreateSessionLoggerInput + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.ClientToken = aws.String(sdkid.UniqueId()) + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateSessionLogger(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Session Logger", err.Error()) + return + } + + data.SessionLoggerARN = fwflex.StringToFramework(ctx, output.SessionLoggerArn) + + // Get the session logger details to populate other fields + sessionLogger, err := findSessionLoggerByARN(ctx, conn, data.SessionLoggerARN.ValueString()) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Session Logger (%s)", data.SessionLoggerARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, sessionLogger, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *sessionLoggerResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data sessionLoggerResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + output, err := findSessionLoggerByARN(ctx, conn, data.SessionLoggerARN.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Session Logger (%s)", data.SessionLoggerARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *sessionLoggerResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new sessionLoggerResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + + if !new.DisplayName.Equal(old.DisplayName) || + !new.EventFilter.Equal(old.EventFilter) || + !new.LogConfiguration.Equal(old.LogConfiguration) { + conn := r.Meta().WorkSpacesWebClient(ctx) + + var input workspacesweb.UpdateSessionLoggerInput + response.Diagnostics.Append(fwflex.Expand(ctx, new, &input)...) + if response.Diagnostics.HasError() { + return + } + + output, err := conn.UpdateSessionLogger(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating WorkSpacesWeb Session Logger (%s)", old.SessionLoggerARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output.SessionLogger, &new)...) + if response.Diagnostics.HasError() { + return + } + } else { + new.LogConfiguration = old.LogConfiguration + } + + response.Diagnostics.Append(response.State.Set(ctx, new)...) +} + +func (r *sessionLoggerResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data sessionLoggerResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DeleteSessionLoggerInput{ + SessionLoggerArn: data.SessionLoggerARN.ValueStringPointer(), + } + _, err := conn.DeleteSessionLogger(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Session Logger (%s)", data.SessionLoggerARN.ValueString()), err.Error()) + return + } +} + +func (r *sessionLoggerResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("session_logger_arn"), request, response) +} + +func findSessionLoggerByARN(ctx context.Context, conn *workspacesweb.Client, arn string) (*awstypes.SessionLogger, error) { + input := workspacesweb.GetSessionLoggerInput{ + SessionLoggerArn: &arn, + } + output, err := conn.GetSessionLogger(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.SessionLogger == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.SessionLogger, nil +} + +type sessionLoggerResourceModel struct { + framework.WithRegionModel + AdditionalEncryptionContext fwtypes.MapOfString `tfsdk:"additional_encryption_context"` + AssociatedPortalARNs fwtypes.ListOfString `tfsdk:"associated_portal_arns"` + CustomerManagedKey fwtypes.ARN `tfsdk:"customer_managed_key"` + DisplayName types.String `tfsdk:"display_name"` + EventFilter fwtypes.ListNestedObjectValueOf[eventFilterModel] `tfsdk:"event_filter"` + LogConfiguration fwtypes.ListNestedObjectValueOf[logConfigurationModel] `tfsdk:"log_configuration"` + SessionLoggerARN types.String `tfsdk:"session_logger_arn"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} + +type logConfigurationModel struct { + S3 fwtypes.ListNestedObjectValueOf[s3LogConfigurationModel] `tfsdk:"s3"` +} + +type eventFilterModel struct { + All fwtypes.ListNestedObjectValueOf[eventFilterAllModel] `tfsdk:"all"` + Include fwtypes.SetOfStringEnum[awstypes.Event] `tfsdk:"include"` +} + +type eventFilterAllModel struct{} + +var ( + _ fwflex.Expander = eventFilterModel{} + _ fwflex.Flattener = &eventFilterModel{} +) + +func (m eventFilterModel) Expand(ctx context.Context) (any, diag.Diagnostics) { + var diags diag.Diagnostics + var v awstypes.EventFilter + + switch { + case !m.All.IsNull(): + v = &awstypes.EventFilterMemberAll{Value: awstypes.Unit{}} + case !m.Include.IsNull(): + v = &awstypes.EventFilterMemberInclude{Value: fwflex.ExpandFrameworkStringyValueSet[awstypes.Event](ctx, m.Include)} + } + + return v, diags +} + +func (m *eventFilterModel) Flatten(ctx context.Context, v any) diag.Diagnostics { + var diags diag.Diagnostics + + switch t := v.(type) { + case awstypes.EventFilterMemberAll: + var data eventFilterAllModel + diags.Append(fwflex.Flatten(ctx, t.Value, &data)...) + if diags.HasError() { + return diags + } + m.All = fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &data) + case awstypes.EventFilterMemberInclude: + m.Include = fwflex.FlattenFrameworkStringyValueSetOfStringEnum(ctx, t.Value) + } + return diags +} + +type s3LogConfigurationModel struct { + Bucket types.String `tfsdk:"bucket"` + BucketOwner types.String `tfsdk:"bucket_owner"` + FolderStructure fwtypes.StringEnum[awstypes.FolderStructure] `tfsdk:"folder_structure"` + KeyPrefix types.String `tfsdk:"key_prefix"` + LogFileFormat fwtypes.StringEnum[awstypes.LogFileFormat] `tfsdk:"log_file_format"` +} diff --git a/internal/service/workspacesweb/session_logger_association.go b/internal/service/workspacesweb/session_logger_association.go new file mode 100644 index 000000000000..ac09647b0bd7 --- /dev/null +++ b/internal/service/workspacesweb/session_logger_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_session_logger_association", name="Session Logger Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.SessionLogger") +// @Testing(importStateIdAttribute="session_logger_arn,portal_arn") +func newSessionLoggerAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &sessionLoggerAssociationResource{}, nil +} + +type sessionLoggerAssociationResource struct { + framework.ResourceWithModel[sessionLoggerAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *sessionLoggerAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "session_logger_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *sessionLoggerAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data sessionLoggerAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateSessionLoggerInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + SessionLoggerArn: data.SessionLoggerARN.ValueStringPointer(), + } + + _, err := conn.AssociateSessionLogger(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Session Logger Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *sessionLoggerAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data sessionLoggerAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the session logger and checking associated portals + output, err := findSessionLoggerByARN(ctx, conn, data.SessionLoggerARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Session Logger Association (%s)", data.SessionLoggerARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *sessionLoggerAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data sessionLoggerAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateSessionLoggerInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateSessionLogger(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Session Logger Association (%s)", data.SessionLoggerARN.ValueString()), err.Error()) + return + } +} + +func (r *sessionLoggerAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + sessionLoggerAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, sessionLoggerAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: session_logger_arn,portal_arn. Got: %q", request.ID), + ) + return + } + sessionLoggerARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("session_logger_arn"), sessionLoggerARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type sessionLoggerAssociationResourceModel struct { + framework.WithRegionModel + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` + SessionLoggerARN fwtypes.ARN `tfsdk:"session_logger_arn"` +} diff --git a/internal/service/workspacesweb/session_logger_association_test.go b/internal/service/workspacesweb/session_logger_association_test.go new file mode 100644 index 000000000000..f34474e7a163 --- /dev/null +++ b/internal/service/workspacesweb/session_logger_association_test.go @@ -0,0 +1,230 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebSessionLoggerAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger_association.test" + sessionLoggerResourceName := "aws_workspacesweb_session_logger.test" + portalResourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerAssociationConfig_basic(rName, rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerAssociationExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttrPair(resourceName, "session_logger_arn", sessionLoggerResourceName, "session_logger_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccSessionLoggerAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccSessionLoggerAssociationConfig_basic(rName, rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the SessionLogger Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(sessionLoggerResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(sessionLoggerResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "session_logger_arn", sessionLoggerResourceName, "session_logger_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLoggerAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerAssociationConfig_basic(rName, rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerAssociationExists(ctx, resourceName, &sessionLogger), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceSessionLoggerAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckSessionLoggerAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_session_logger_association" { + continue + } + + sessionLogger, err := tfworkspacesweb.FindSessionLoggerByARN(ctx, conn, rs.Primary.Attributes["session_logger_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(sessionLogger.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web Session Logger Association %s still exists", rs.Primary.Attributes["session_logger_arn"]) + } + } + + return nil + } +} + +func testAccCheckSessionLoggerAssociationExists(ctx context.Context, n string, v *awstypes.SessionLogger) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindSessionLoggerByARN(ctx, conn, rs.Primary.Attributes["session_logger_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccSessionLoggerAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["session_logger_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccSessionLoggerAssociationConfig_base(rName string) string { + return fmt.Sprintf(` +resource "aws_workspacesweb_portal" "test" { + display_name = %[1]q +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} +`, rName) +} + +func testAccSessionLoggerAssociationConfig_basic(rName, sessionLoggerName, folderStructureType, logFileFormat string) string { + return testAccSessionLoggerAssociationConfig_base(rName) + fmt.Sprintf(` +resource "aws_workspacesweb_session_logger" "test" { + display_name = %[1]q + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.id + folder_structure = %[2]q + log_file_format = %[3]q + } + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] +} + +resource "aws_workspacesweb_session_logger_association" "test" { + portal_arn = aws_workspacesweb_portal.test.portal_arn + session_logger_arn = aws_workspacesweb_session_logger.test.session_logger_arn +} +`, sessionLoggerName, folderStructureType, logFileFormat) +} diff --git a/internal/service/workspacesweb/session_logger_tags_gen_test.go b/internal/service/workspacesweb/session_logger_tags_gen_test.go new file mode 100644 index 000000000000..32abfcb85a18 --- /dev/null +++ b/internal/service/workspacesweb/session_logger_tags_gen_test.go @@ -0,0 +1,2341 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package workspacesweb_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebSessionLogger_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.SessionLogger + resourceName := "aws_workspacesweb_session_logger.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/SessionLogger/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/workspacesweb/session_logger_test.go b/internal/service/workspacesweb/session_logger_test.go new file mode 100644 index 000000000000..024aca7bfeb5 --- /dev/null +++ b/internal/service/workspacesweb/session_logger_test.go @@ -0,0 +1,464 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebSessionLogger_basic(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerConfig_basic(rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, rName), + resource.TestCheckResourceAttr(resourceName, "log_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "log_configuration.0.s3.0.bucket", "aws_s3_bucket.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.folder_structure", string(awstypes.FolderStructureFlat)), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.log_file_format", string(awstypes.LogFileFormatJson)), + resource.TestCheckResourceAttr(resourceName, "event_filter.0.all.#", "1"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "session_logger_arn", "workspaces-web", regexache.MustCompile(`sessionLogger/.+$`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "session_logger_arn"), + ImportStateVerifyIdentifierAttribute: "session_logger_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_complete(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerConfig_complete(rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson), string(awstypes.EventSessionStart)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, rName), + resource.TestCheckResourceAttrSet(resourceName, "customer_managed_key"), + resource.TestCheckResourceAttr(resourceName, "additional_encryption_context.%", "1"), + resource.TestCheckResourceAttr(resourceName, "additional_encryption_context.test", names.AttrValue), + resource.TestCheckResourceAttrSet(resourceName, "log_configuration.0.s3.0.bucket_owner"), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.key_prefix", "logs/"), + resource.TestCheckResourceAttr(resourceName, "event_filter.0.include.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "event_filter.0.include.*", string(awstypes.EventSessionStart)), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_update(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerConfig_basic(rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, rName), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.folder_structure", string(awstypes.FolderStructureFlat)), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.log_file_format", string(awstypes.LogFileFormatJson)), + resource.TestCheckResourceAttr(resourceName, "event_filter.0.all.#", "1"), + resource.TestCheckResourceAttr(resourceName, "event_filter.0.include.#", "0"), + ), + }, + { + Config: testAccSessionLoggerConfig_update(rName2, string(awstypes.FolderStructureNestedByDate), string(awstypes.LogFileFormatJsonLines), string(awstypes.EventSessionStart), string(awstypes.EventSessionEnd)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, rName2), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.folder_structure", string(awstypes.FolderStructureNestedByDate)), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.log_file_format", string(awstypes.LogFileFormatJsonLines)), + resource.TestCheckResourceAttr(resourceName, "log_configuration.0.s3.0.key_prefix", "updated-logs/"), + resource.TestCheckResourceAttr(resourceName, "event_filter.0.all.#", "0"), + resource.TestCheckResourceAttr(resourceName, "event_filter.0.include.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "event_filter.0.include.*", string(awstypes.EventSessionStart)), + resource.TestCheckTypeSetElemAttr(resourceName, "event_filter.0.include.*", string(awstypes.EventSessionEnd)), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_customerManagedKey(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerConfig_customerManagedKey(rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, rName), + resource.TestCheckResourceAttrPair(resourceName, "customer_managed_key", "aws_kms_key.test", names.AttrARN), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_additionalEncryptionContext(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerConfig_additionalEncryptionContext(rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + resource.TestCheckResourceAttr(resourceName, names.AttrDisplayName, rName), + resource.TestCheckResourceAttr(resourceName, "additional_encryption_context.%", "1"), + resource.TestCheckResourceAttr(resourceName, "additional_encryption_context.test", names.AttrValue), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebSessionLogger_disappears(t *testing.T) { + ctx := acctest.Context(t) + var sessionLogger awstypes.SessionLogger + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_workspacesweb_session_logger.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSessionLoggerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSessionLoggerConfig_basic(rName, string(awstypes.FolderStructureFlat), string(awstypes.LogFileFormatJson)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSessionLoggerExists(ctx, resourceName, &sessionLogger), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceSessionLogger, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckSessionLoggerDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_session_logger" { + continue + } + + _, err := tfworkspacesweb.FindSessionLoggerByARN(ctx, conn, rs.Primary.Attributes["session_logger_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("WorkSpaces Web Session Logger %s still exists", rs.Primary.Attributes["session_logger_arn"]) + } + + return nil + } +} + +func testAccCheckSessionLoggerExists(ctx context.Context, n string, v *awstypes.SessionLogger) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindSessionLoggerByARN(ctx, conn, rs.Primary.Attributes["session_logger_arn"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccSessionLoggerConfig_s3Base(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} +`, rName) +} + +func testAccSessionLoggerConfig_kmsBase(rName string) string { + return testAccSessionLoggerConfig_s3Base(rName) + ` +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "kms_key_policy" { + statement { + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + actions = ["kms:*"] + resources = ["*"] + } + + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + actions = [ + "kms:Encrypt", + "kms:GenerateDataKey*", + "kms:ReEncrypt*", + "kms:Decrypt" + ] + resources = ["*"] + } +} + +resource "aws_kms_key" "test" { + description = "Test key for session logger" + policy = data.aws_iam_policy_document.kms_key_policy.json +} +` +} + +func testAccSessionLoggerConfig_basic(rName, folderStructureType, logFileFormat string) string { + return testAccSessionLoggerConfig_s3Base(rName) + fmt.Sprintf(` +resource "aws_workspacesweb_session_logger" "test" { + display_name = %[1]q + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.id + folder_structure = %[2]q + log_file_format = %[3]q + } + } + + event_filter { + all {} + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] +} +`, rName, folderStructureType, logFileFormat) +} + +func testAccSessionLoggerConfig_complete(rName, folderStructureType, logFileFormat, event string) string { + return testAccSessionLoggerConfig_kmsBase(rName) + fmt.Sprintf(` +resource "aws_workspacesweb_session_logger" "test" { + display_name = %[1]q + customer_managed_key = aws_kms_key.test.arn + additional_encryption_context = { + test = "value" + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.id + bucket_owner = data.aws_caller_identity.current.account_id + folder_structure = %[2]q + key_prefix = "logs/" + log_file_format = %[3]q + } + } + + event_filter { + include = [%[4]q] + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] +} +`, rName, folderStructureType, logFileFormat, event) +} + +func testAccSessionLoggerConfig_update(rName, folderStructureType, logFileFormat, event1, event2 string) string { + return testAccSessionLoggerConfig_s3Base(rName) + fmt.Sprintf(` +resource "aws_workspacesweb_session_logger" "test" { + display_name = %[1]q + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.id + folder_structure = %[2]q + key_prefix = "updated-logs/" + log_file_format = %[3]q + } + } + + event_filter { + include = [%[4]q, %[5]q] + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] +} +`, rName, folderStructureType, logFileFormat, event1, event2) +} + +func testAccSessionLoggerConfig_customerManagedKey(rName, folderStructureType, logFileFormat string) string { + return testAccSessionLoggerConfig_kmsBase(rName) + fmt.Sprintf(` +resource "aws_workspacesweb_session_logger" "test" { + display_name = %[1]q + customer_managed_key = aws_kms_key.test.arn + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.id + folder_structure = %[2]q + log_file_format = %[3]q + } + } + + event_filter { + all {} + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] +} +`, rName, folderStructureType, logFileFormat) +} + +func testAccSessionLoggerConfig_additionalEncryptionContext(rName, folderStructureType, logFileFormat string) string { + return testAccSessionLoggerConfig_s3Base(rName) + fmt.Sprintf(` +resource "aws_workspacesweb_session_logger" "test" { + display_name = %[1]q + additional_encryption_context = { + test = "value" + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.id + folder_structure = %[2]q + log_file_format = %[3]q + } + } + + event_filter { + all {} + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] +} +`, rName, folderStructureType, logFileFormat) +} diff --git a/internal/service/workspacesweb/tags_gen.go b/internal/service/workspacesweb/tags_gen.go index a1ed6e64548a..e7e0153da379 100644 --- a/internal/service/workspacesweb/tags_gen.go +++ b/internal/service/workspacesweb/tags_gen.go @@ -3,8 +3,8 @@ package workspacesweb import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspacesweb" awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" @@ -27,7 +27,7 @@ func listTags(ctx context.Context, conn *workspacesweb.Client, identifier string output, err := conn.ListTagsForResource(ctx, &input, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } return keyValueTags(ctx, output.Tags), nil @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).WorkSpacesWebClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -117,7 +117,7 @@ func updateTags(ctx context.Context, conn *workspacesweb.Client, identifier stri _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -132,7 +132,7 @@ func updateTags(ctx context.Context, conn *workspacesweb.Client, identifier stri _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/workspacesweb/testdata/IdentityProvider/tags/main_gen.tf b/internal/service/workspacesweb/testdata/IdentityProvider/tags/main_gen.tf new file mode 100644 index 000000000000..ffc430fda93a --- /dev/null +++ b/internal/service/workspacesweb/testdata/IdentityProvider/tags/main_gen.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } + + tags = var.resource_tags + +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/workspacesweb/testdata/IdentityProvider/tagsComputed1/main_gen.tf b/internal/service/workspacesweb/testdata/IdentityProvider/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..8d416e77279b --- /dev/null +++ b/internal/service/workspacesweb/testdata/IdentityProvider/tagsComputed1/main_gen.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + } + +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/IdentityProvider/tagsComputed2/main_gen.tf b/internal/service/workspacesweb/testdata/IdentityProvider/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..5d7fc45282c9 --- /dev/null +++ b/internal/service/workspacesweb/testdata/IdentityProvider/tagsComputed2/main_gen.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } + +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/IdentityProvider/tags_defaults/main_gen.tf b/internal/service/workspacesweb/testdata/IdentityProvider/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..f6ad7382aae1 --- /dev/null +++ b/internal/service/workspacesweb/testdata/IdentityProvider/tags_defaults/main_gen.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } + + tags = var.resource_tags + +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/IdentityProvider/tags_ignore/main_gen.tf b/internal/service/workspacesweb/testdata/IdentityProvider/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..401b72e64cc4 --- /dev/null +++ b/internal/service/workspacesweb/testdata/IdentityProvider/tags_ignore/main_gen.tf @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } + + tags = var.resource_tags + +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/Portal/tags/main_gen.tf b/internal/service/workspacesweb/testdata/Portal/tags/main_gen.tf new file mode 100644 index 000000000000..df9d9fce75b8 --- /dev/null +++ b/internal/service/workspacesweb/testdata/Portal/tags/main_gen.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_workspacesweb_portal" "test" { + + tags = var.resource_tags + +} +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/workspacesweb/testdata/Portal/tagsComputed1/main_gen.tf b/internal/service/workspacesweb/testdata/Portal/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..06ca9e3767de --- /dev/null +++ b/internal/service/workspacesweb/testdata/Portal/tagsComputed1/main_gen.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_workspacesweb_portal" "test" { + + tags = { + (var.unknownTagKey) = null_resource.test.id + } + +} +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/Portal/tagsComputed2/main_gen.tf b/internal/service/workspacesweb/testdata/Portal/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..3b0b2bd5429d --- /dev/null +++ b/internal/service/workspacesweb/testdata/Portal/tagsComputed2/main_gen.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_workspacesweb_portal" "test" { + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } + +} +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/Portal/tags_defaults/main_gen.tf b/internal/service/workspacesweb/testdata/Portal/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..b06385ac778d --- /dev/null +++ b/internal/service/workspacesweb/testdata/Portal/tags_defaults/main_gen.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_workspacesweb_portal" "test" { + + tags = var.resource_tags + +} +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/Portal/tags_ignore/main_gen.tf b/internal/service/workspacesweb/testdata/Portal/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..7961dbff780f --- /dev/null +++ b/internal/service/workspacesweb/testdata/Portal/tags_ignore/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_workspacesweb_portal" "test" { + + tags = var.resource_tags + +} +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/SessionLogger/tags/main_gen.tf b/internal/service/workspacesweb/testdata/SessionLogger/tags/main_gen.tf new file mode 100644 index 000000000000..c31d47baa011 --- /dev/null +++ b/internal/service/workspacesweb/testdata/SessionLogger/tags/main_gen.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_workspacesweb_session_logger" "test" { + display_name = var.rName + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.bucket + folder_structure = "Flat" + log_file_format = "Json" + } + } + + tags = var.resource_tags + + depends_on = [aws_s3_bucket_policy.allow_write_access] + +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/workspacesweb/testdata/SessionLogger/tagsComputed1/main_gen.tf b/internal/service/workspacesweb/testdata/SessionLogger/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..332c93d689ea --- /dev/null +++ b/internal/service/workspacesweb/testdata/SessionLogger/tagsComputed1/main_gen.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_workspacesweb_session_logger" "test" { + display_name = var.rName + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.bucket + folder_structure = "Flat" + log_file_format = "Json" + } + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] + +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/SessionLogger/tagsComputed2/main_gen.tf b/internal/service/workspacesweb/testdata/SessionLogger/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..7d43ca5e7a23 --- /dev/null +++ b/internal/service/workspacesweb/testdata/SessionLogger/tagsComputed2/main_gen.tf @@ -0,0 +1,79 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_workspacesweb_session_logger" "test" { + display_name = var.rName + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.bucket + folder_structure = "Flat" + log_file_format = "Json" + } + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } + + depends_on = [aws_s3_bucket_policy.allow_write_access] + +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/SessionLogger/tags_defaults/main_gen.tf b/internal/service/workspacesweb/testdata/SessionLogger/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..3bb168b11271 --- /dev/null +++ b/internal/service/workspacesweb/testdata/SessionLogger/tags_defaults/main_gen.tf @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_workspacesweb_session_logger" "test" { + display_name = var.rName + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.bucket + folder_structure = "Flat" + log_file_format = "Json" + } + } + + tags = var.resource_tags + + depends_on = [aws_s3_bucket_policy.allow_write_access] + +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/SessionLogger/tags_ignore/main_gen.tf b/internal/service/workspacesweb/testdata/SessionLogger/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..91930b5d53f0 --- /dev/null +++ b/internal/service/workspacesweb/testdata/SessionLogger/tags_ignore/main_gen.tf @@ -0,0 +1,84 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_workspacesweb_session_logger" "test" { + display_name = var.rName + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.bucket + folder_structure = "Flat" + log_file_format = "Json" + } + } + + tags = var.resource_tags + + depends_on = [aws_s3_bucket_policy.allow_write_access] + +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/TrustStore/tags/main_gen.tf b/internal/service/workspacesweb/testdata/TrustStore/tags/main_gen.tf new file mode 100644 index 000000000000..bad3f7af8411 --- /dev/null +++ b/internal/service/workspacesweb/testdata/TrustStore/tags/main_gen.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test.certificate + } + + tags = var.resource_tags +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/workspacesweb/testdata/TrustStore/tagsComputed1/main_gen.tf b/internal/service/workspacesweb/testdata/TrustStore/tagsComputed1/main_gen.tf new file mode 100644 index 000000000000..f3df14a6aff7 --- /dev/null +++ b/internal/service/workspacesweb/testdata/TrustStore/tagsComputed1/main_gen.tf @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test.certificate + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/TrustStore/tagsComputed2/main_gen.tf b/internal/service/workspacesweb/testdata/TrustStore/tagsComputed2/main_gen.tf new file mode 100644 index 000000000000..9919aee9fb88 --- /dev/null +++ b/internal/service/workspacesweb/testdata/TrustStore/tagsComputed2/main_gen.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test.certificate + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "null_resource" "test" {} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/TrustStore/tags_defaults/main_gen.tf b/internal/service/workspacesweb/testdata/TrustStore/tags_defaults/main_gen.tf new file mode 100644 index 000000000000..62fa9ecde689 --- /dev/null +++ b/internal/service/workspacesweb/testdata/TrustStore/tags_defaults/main_gen.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test.certificate + } + + tags = var.resource_tags +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/TrustStore/tags_ignore/main_gen.tf b/internal/service/workspacesweb/testdata/TrustStore/tags_ignore/main_gen.tf new file mode 100644 index 000000000000..fb60b0ee3c65 --- /dev/null +++ b/internal/service/workspacesweb/testdata/TrustStore/tags_ignore/main_gen.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test.certificate + } + + tags = var.resource_tags +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/workspacesweb/testdata/tmpl/identity_provider_tags.gtpl b/internal/service/workspacesweb/testdata/tmpl/identity_provider_tags.gtpl new file mode 100644 index 000000000000..ea73cdd94725 --- /dev/null +++ b/internal/service/workspacesweb/testdata/tmpl/identity_provider_tags.gtpl @@ -0,0 +1,16 @@ +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + MetadataFile = file("./testfixtures/saml-metadata.xml") + } + +{{- template "tags" . }} + +} + +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} diff --git a/internal/service/workspacesweb/testdata/tmpl/portal_tags.gtpl b/internal/service/workspacesweb/testdata/tmpl/portal_tags.gtpl new file mode 100644 index 000000000000..4589075edacf --- /dev/null +++ b/internal/service/workspacesweb/testdata/tmpl/portal_tags.gtpl @@ -0,0 +1,5 @@ +resource "aws_workspacesweb_portal" "test" { + +{{- template "tags" . }} + +} \ No newline at end of file diff --git a/internal/service/workspacesweb/testdata/tmpl/session_logger_tags.gtpl b/internal/service/workspacesweb/testdata/tmpl/session_logger_tags.gtpl new file mode 100644 index 000000000000..6a5a4bbf986a --- /dev/null +++ b/internal/service/workspacesweb/testdata/tmpl/session_logger_tags.gtpl @@ -0,0 +1,48 @@ +resource "aws_workspacesweb_session_logger" "test" { + display_name = var.rName + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.test.bucket + folder_structure = "Flat" + log_file_format = "Json" + } + } + +{{- template "tags" . }} + + depends_on = [aws_s3_bucket_policy.allow_write_access] + +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName + force_destroy = true +} + +data "aws_iam_policy_document" "allow_write_access" { + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + + actions = [ + "s3:PutObject" + ] + + resources = [ + aws_s3_bucket.test.arn, + "${aws_s3_bucket.test.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "allow_write_access" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.allow_write_access.json +} diff --git a/internal/service/workspacesweb/testdata/tmpl/trust_store_tags.gtpl b/internal/service/workspacesweb/testdata/tmpl/trust_store_tags.gtpl new file mode 100644 index 000000000000..e1b5497a5769 --- /dev/null +++ b/internal/service/workspacesweb/testdata/tmpl/trust_store_tags.gtpl @@ -0,0 +1,32 @@ +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:aws:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test.certificate + } +{{- template "tags" . }} +} diff --git a/internal/service/workspacesweb/testfixtures/saml-metadata.xml b/internal/service/workspacesweb/testfixtures/saml-metadata.xml new file mode 100644 index 000000000000..f961397cf528 --- /dev/null +++ b/internal/service/workspacesweb/testfixtures/saml-metadata.xml @@ -0,0 +1,20 @@ + + + + + + + + + MIICfjCCAeegAwIBAgIBADANBgkqhkiG9w0BAQ0FADBbMQswCQYDVQQGEwJ1czELMAkGA1UECAwCQ0ExEjAQBgNVBAoMCVRlcnJhZm9ybTErMCkGA1UEAwwidGVycmFmb3JtLWRldi1lZC5teS5zYWxlc2ZvcmNlLmNvbTAgFw0yMDA4MjkxNDQ4MzlaGA8yMDcwMDgxNzE0NDgzOVowWzELMAkGA1UEBhMCdXMxCzAJBgNVBAgMAkNBMRIwEAYDVQQKDAlUZXJyYWZvcm0xKzApBgNVBAMMInRlcnJhZm9ybS1kZXYtZWQubXkuc2FsZXNmb3JjZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOxUTzEKdivVjfZ/BERGpX/ZWQsBKHut17dQTKW/3jox1N9EJ3ULj9qEDen6zQ74Ce8hSEkrG7MP9mcP1oEhQZSca5tTAop1GejJG+bfF4v6cXM9pqHlllrYrmXMfESiahqhBhE8VvoGJkvp393TcB1lX+WxO8Q74demTrQn5tgvAgMBAAGjUDBOMB0GA1UdDgQWBBREKZt4Av70WKQE4aLD2tvbSLnBlzAfBgNVHSMEGDAWgBREKZt4Av70WKQE4aLD2tvbSLnBlzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBDQUAA4GBACxeC29WMGqeOlQF4JWwsYwIC82SUaZvMDqjAm9ieIrAZRH6J6Cu40c/rvsUGUjQ9logKX15RAyI7Rn0jBUgopRkNL71HyyM7ug4qN5An05VmKQWIbVfxkNVB2Ipb/ICMc5UE38G4y4VbANZFvbFbkVq6OAP2GGNl22o/XSnhFY8 + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + diff --git a/internal/service/workspacesweb/trust_store.go b/internal/service/workspacesweb/trust_store.go new file mode 100644 index 000000000000..c26449c7af76 --- /dev/null +++ b/internal/service/workspacesweb/trust_store.go @@ -0,0 +1,432 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "encoding/base64" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_workspacesweb_trust_store", name="Trust Store") +// @Tags(identifierAttribute="trust_store_arn") +// @Testing(tagsTest=true) +// @Testing(generator=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.TrustStore") +// @Testing(importStateIdAttribute="trust_store_arn") +// @Testing(importIgnore="certificate_list") +func newTrustStoreResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &trustStoreResource{}, nil +} + +type trustStoreResource struct { + framework.ResourceWithModel[trustStoreResourceModel] +} + +func (r *trustStoreResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "associated_portal_arns": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "trust_store_arn": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrCertificate: schema.SetNestedBlock{ + CustomType: fwtypes.NewSetNestedObjectTypeOf[certificateModel](ctx), + Validators: []validator.Set{ + setvalidator.SizeAtLeast(0), + }, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.UseStateForUnknown(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "body": schema.StringAttribute{ + Required: true, + }, + names.AttrIssuer: schema.StringAttribute{ + Computed: true, + }, + "not_valid_after": schema.StringAttribute{ + Computed: true, + }, + "not_valid_before": schema.StringAttribute{ + Computed: true, + }, + "subject": schema.StringAttribute{ + Computed: true, + }, + "thumbprint": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + } +} + +func (r *trustStoreResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data trustStoreResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.CreateTrustStoreInput{ + ClientToken: aws.String(sdkid.UniqueId()), + Tags: getTagsIn(ctx), + } + + // Convert string certificates to byte slices + for _, certificate := range data.Certificates.Elements() { + var cert certificateModel + response.Diagnostics.Append(tfsdk.ValueAs(ctx, certificate, &cert)...) + if response.Diagnostics.HasError() { + return + } + + formattedCert := strings.ReplaceAll(strings.Trim(cert.Body.ValueString(), "\""), `\n`, "\n") + input.CertificateList = append(input.CertificateList, []byte(formattedCert)) + } + + output, err := conn.CreateTrustStore(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Trust Store", err.Error()) + return + } + + data.TrustStoreARN = fwflex.StringToFramework(ctx, output.TrustStoreArn) + + // Get the trust store details to populate other fields + trustStore, err := findTrustStoreByARN(ctx, conn, data.TrustStoreARN.ValueString()) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Trust Store (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, trustStore, &data)...) + if response.Diagnostics.HasError() { + return + } + + // Populate certificate details + certificates, err := listTrustStoreCertificates(ctx, conn, data.TrustStoreARN.ValueString()) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("listing WorkSpacesWeb Trust Store Certificates (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } + + var diags diag.Diagnostics + data.Certificates, diags = fwtypes.NewSetNestedObjectValueOfValueSlice(ctx, certificates) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *trustStoreResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data trustStoreResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + output, err := findTrustStoreByARN(ctx, conn, data.TrustStoreARN.ValueString()) + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Trust Store (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + // Populate certificate details by merging existing state with computed values + certificates, err := listTrustStoreCertificates(ctx, conn, data.TrustStoreARN.ValueString()) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("listing WorkSpacesWeb Trust Store Certificates (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } + + var diags diag.Diagnostics + data.Certificates, diags = fwtypes.NewSetNestedObjectValueOfValueSlice(ctx, certificates) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *trustStoreResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var new, old trustStoreResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + if !new.Certificates.Equal(old.Certificates) { + input := workspacesweb.UpdateTrustStoreInput{ + ClientToken: aws.String(sdkid.UniqueId()), + TrustStoreArn: new.TrustStoreARN.ValueStringPointer(), + } + + // Handle certificate additions and deletions + oldCerts := make(map[string]string) // cert content -> thumbprint + for _, certificate := range old.Certificates.Elements() { + var cert certificateModel + response.Diagnostics.Append(tfsdk.ValueAs(ctx, certificate, &cert)...) + if response.Diagnostics.HasError() { + return + } + + oldCerts[base64.StdEncoding.EncodeToString([]byte(cert.Body.ValueString()))] = cert.Thumbprint.ValueString() + } + + newCertContents := make(map[string]bool) + for _, certificate := range new.Certificates.Elements() { + var cert certificateModel + response.Diagnostics.Append(tfsdk.ValueAs(ctx, certificate, &cert)...) + if response.Diagnostics.HasError() { + return + } + + formattedCert := strings.ReplaceAll(strings.Trim(cert.Body.ValueString(), "\""), `\n`, "\n") + newCertContents[base64.StdEncoding.EncodeToString([]byte(formattedCert))] = true + } + + // Find certificates to add + for _, certificate := range new.Certificates.Elements() { + var cert certificateModel + response.Diagnostics.Append(tfsdk.ValueAs(ctx, certificate, &cert)...) + if response.Diagnostics.HasError() { + return + } + + formattedCert := strings.ReplaceAll(strings.Trim(cert.Body.String(), "\""), `\n`, "\n") + certEncoded := base64.StdEncoding.EncodeToString([]byte(formattedCert)) + if _, exists := oldCerts[certEncoded]; !exists { + input.CertificatesToAdd = append(input.CertificatesToAdd, []byte(formattedCert)) + } + } + + // Find certificates to delete (by thumbprint) + for certEncoded, thumbprint := range oldCerts { + if !newCertContents[certEncoded] { + input.CertificatesToDelete = append(input.CertificatesToDelete, thumbprint) + } + } + + _, err := conn.UpdateTrustStore(ctx, &input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating WorkSpacesWeb Trust Store (%s)", new.TrustStoreARN.ValueString()), err.Error()) + return + } + } + + // Read the updated state to get computed values + updatedTrustStore, err := findTrustStoreByARN(ctx, conn, new.TrustStoreARN.ValueString()) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Trust Store (%s) after update", new.TrustStoreARN.ValueString()), err.Error()) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, updatedTrustStore, &new)...) + if response.Diagnostics.HasError() { + return + } + + // Populate certificate details by merging planned data with computed values + certificates, err := listTrustStoreCertificates(ctx, conn, new.TrustStoreARN.ValueString()) + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("listing WorkSpacesWeb Trust Store Certificates (%s) after update", new.TrustStoreARN.ValueString()), err.Error()) + return + } + + var diags diag.Diagnostics + new.Certificates, diags = fwtypes.NewSetNestedObjectValueOfValueSlice(ctx, certificates) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *trustStoreResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data trustStoreResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DeleteTrustStoreInput{ + TrustStoreArn: data.TrustStoreARN.ValueStringPointer(), + } + _, err := conn.DeleteTrustStore(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Trust Store (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } +} + +func (r *trustStoreResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("trust_store_arn"), request, response) +} + +func findTrustStoreByARN(ctx context.Context, conn *workspacesweb.Client, arn string) (*awstypes.TrustStore, error) { + input := workspacesweb.GetTrustStoreInput{ + TrustStoreArn: aws.String(arn), + } + output, err := conn.GetTrustStore(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.TrustStore == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.TrustStore, nil +} + +func listTrustStoreCertificates(ctx context.Context, conn *workspacesweb.Client, arn string) ([]certificateModel, error) { + input := workspacesweb.ListTrustStoreCertificatesInput{ + TrustStoreArn: aws.String(arn), + } + + var certificates []certificateModel + pages := workspacesweb.NewListTrustStoreCertificatesPaginator(conn, &input) + for pages.HasMorePages() { + output, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, certSummary := range output.CertificateList { + // Get detailed certificate information + input := workspacesweb.GetTrustStoreCertificateInput{ + Thumbprint: certSummary.Thumbprint, + TrustStoreArn: aws.String(arn), + } + + output, err := conn.GetTrustStoreCertificate(ctx, &input) + + if err != nil { + return nil, err + } + + if output.Certificate != nil { + cert := certificateModel{ + Body: types.StringValue(string(output.Certificate.Body)), + Issuer: types.StringPointerValue(output.Certificate.Issuer), + NotValidAfter: types.StringValue(aws.ToTime(output.Certificate.NotValidAfter).Format(time.RFC3339)), + NotValidBefore: types.StringValue(aws.ToTime(output.Certificate.NotValidBefore).Format(time.RFC3339)), + Subject: types.StringPointerValue(output.Certificate.Subject), + Thumbprint: types.StringPointerValue(output.Certificate.Thumbprint), + } + certificates = append(certificates, cert) + } + } + } + + return certificates, nil +} + +type trustStoreResourceModel struct { + framework.WithRegionModel + AssociatedPortalARNs fwtypes.ListOfString `tfsdk:"associated_portal_arns"` + Certificates fwtypes.SetNestedObjectValueOf[certificateModel] `tfsdk:"certificate"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` + TrustStoreARN types.String `tfsdk:"trust_store_arn"` +} + +type certificateModel struct { + Body types.String `tfsdk:"body"` + Issuer types.String `tfsdk:"issuer"` + NotValidAfter types.String `tfsdk:"not_valid_after"` + NotValidBefore types.String `tfsdk:"not_valid_before"` + Subject types.String `tfsdk:"subject"` + Thumbprint types.String `tfsdk:"thumbprint"` +} diff --git a/internal/service/workspacesweb/trust_store_association.go b/internal/service/workspacesweb/trust_store_association.go new file mode 100644 index 000000000000..fda6773b7890 --- /dev/null +++ b/internal/service/workspacesweb/trust_store_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_trust_store_association", name="Trust Store Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.TrustStore") +// @Testing(importStateIdAttribute="trust_store_arn,portal_arn") +func newTrustStoreAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &trustStoreAssociationResource{}, nil +} + +type trustStoreAssociationResource struct { + framework.ResourceWithModel[trustStoreAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *trustStoreAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "trust_store_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *trustStoreAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data trustStoreAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateTrustStoreInput{ + TrustStoreArn: data.TrustStoreARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateTrustStore(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb Trust Store Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *trustStoreAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data trustStoreAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the trust store and checking associated portals + output, err := findTrustStoreByARN(ctx, conn, data.TrustStoreARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb Trust Store Association (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *trustStoreAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data trustStoreAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateTrustStoreInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateTrustStore(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb Trust Store Association (%s)", data.TrustStoreARN.ValueString()), err.Error()) + return + } +} + +func (r *trustStoreAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + trustStoreAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, trustStoreAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: trust_store_arn,portal_arn. Got: %q", request.ID), + ) + return + } + trustStoreARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("trust_store_arn"), trustStoreARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type trustStoreAssociationResourceModel struct { + framework.WithRegionModel + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` + TrustStoreARN fwtypes.ARN `tfsdk:"trust_store_arn"` +} diff --git a/internal/service/workspacesweb/trust_store_association_test.go b/internal/service/workspacesweb/trust_store_association_test.go new file mode 100644 index 000000000000..3e7d63309863 --- /dev/null +++ b/internal/service/workspacesweb/trust_store_association_test.go @@ -0,0 +1,231 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebTrustStoreAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var trustStore awstypes.TrustStore + resourceName := "aws_workspacesweb_trust_store_association.test" + trustStoreResourceName := "aws_workspacesweb_trust_store.test" + portalResourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustStoreAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustStoreAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreAssociationExists(ctx, resourceName, &trustStore), + resource.TestCheckResourceAttrPair(resourceName, "trust_store_arn", trustStoreResourceName, "trust_store_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccTrustStoreAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccTrustStoreAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the TrustStore Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(trustStoreResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(trustStoreResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "trust_store_arn", trustStoreResourceName, "trust_store_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStoreAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var trustStore awstypes.TrustStore + resourceName := "aws_workspacesweb_trust_store_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustStoreAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustStoreAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreAssociationExists(ctx, resourceName, &trustStore), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceTrustStoreAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckTrustStoreAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_trust_store_association" { + continue + } + + trustStore, err := tfworkspacesweb.FindTrustStoreByARN(ctx, conn, rs.Primary.Attributes["trust_store_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(trustStore.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web Trust Store Association %s still exists", rs.Primary.Attributes["trust_store_arn"]) + } + } + + return nil + } +} + +func testAccCheckTrustStoreAssociationExists(ctx context.Context, n string, v *awstypes.TrustStore) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindTrustStoreByARN(ctx, conn, rs.Primary.Attributes["trust_store_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccTrustStoreAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["trust_store_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccTrustStoreAssociationConfig_acmBase() string { + return ` +data "aws_partition" "current" {} + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } + permanent_deletion_time_in_days = 7 +} + +resource "aws_acmpca_certificate" "test1" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_acmpca_certificate" "test2" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} +` +} + +func testAccTrustStoreAssociationConfig_basic() string { + return acctest.ConfigCompose( + testAccTrustStoreAssociationConfig_acmBase(), + ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test1.certificate + } +} + +resource "aws_workspacesweb_trust_store_association" "test" { + trust_store_arn = aws_workspacesweb_trust_store.test.trust_store_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +`) +} diff --git a/internal/service/workspacesweb/trust_store_tags_gen_test.go b/internal/service/workspacesweb/trust_store_tags_gen_test.go new file mode 100644 index 000000000000..dd06651003cf --- /dev/null +++ b/internal/service/workspacesweb/trust_store_tags_gen_test.go @@ -0,0 +1,2330 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package workspacesweb_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebTrustStore_tags(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_null(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_EmptyMap(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{}), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1Updated), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + acctest.CtTagsKey1, // The canonical value returned by the AWS API is "" + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.Null(), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(""), + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "tags.resourcekey1", // The canonical value returned by the AWS API is "" + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tagsComputed2/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tagsComputed1/"), + ConfigVariables: config.Variables{ + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + ImportStateVerifyIgnore: []string{ + "certificate_list", + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 2: Update ignored tag only + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Updated), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1Again), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtProviderKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), // TODO: Should not be set + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { + ctx := acctest.Context(t) + + var v types.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + acctest.ParallelTest(ctx, t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + // 1: Create + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 2: Update ignored tag + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + // 3: Update both tags + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/TrustStore/tags_ignore/"), + ConfigVariables: config.Variables{ + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2Updated), + }), + "ignore_tag_keys": config.SetVariable( + config.StringVariable(acctest.CtResourceKey1), + ), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + expectFullResourceTags(ctx, resourceName, knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), // TODO: Should not be set + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), // TODO: Should be NoOp + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Again), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2Updated), + })), + }, + }, + ExpectNonEmptyPlan: true, + }, + }, + }) +} diff --git a/internal/service/workspacesweb/trust_store_test.go b/internal/service/workspacesweb/trust_store_test.go new file mode 100644 index 000000000000..4b4bae7e7434 --- /dev/null +++ b/internal/service/workspacesweb/trust_store_test.go @@ -0,0 +1,359 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebTrustStore_basic(t *testing.T) { + ctx := acctest.Context(t) + var trustStore awstypes.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustStoreConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &trustStore), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "trust_store_arn", "workspaces-web", regexache.MustCompile(`trustStore/.+$`)), + resource.TestCheckResourceAttrPair(resourceName, "certificate.0.body", "aws_acmpca_certificate.test1", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.thumbprint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_multipleCerts(t *testing.T) { + ctx := acctest.Context(t) + var trustStore awstypes.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustStoreConfig_multipleCerts(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &trustStore), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "2"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "trust_store_arn", "workspaces-web", regexache.MustCompile(`trustStore/.+$`)), + resource.TestCheckTypeSetElemAttrPair(resourceName, "certificate.*.body", "aws_acmpca_certificate.test1", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.thumbprint"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "certificate.*.body", "aws_acmpca_certificate.test2", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.thumbprint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_disappears(t *testing.T) { + ctx := acctest.Context(t) + var trustStore awstypes.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustStoreConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &trustStore), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceTrustStore, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccWorkSpacesWebTrustStore_update(t *testing.T) { + ctx := acctest.Context(t) + var trustStore awstypes.TrustStore + resourceName := "aws_workspacesweb_trust_store.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustStoreConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &trustStore), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "trust_store_arn", "workspaces-web", regexache.MustCompile(`trustStore/.+$`)), + resource.TestCheckResourceAttrPair(resourceName, "certificate.0.body", "aws_acmpca_certificate.test1", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.thumbprint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + }, + { + Config: testAccTrustStoreConfig_updatedAdd(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &trustStore), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "2"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "trust_store_arn", "workspaces-web", regexache.MustCompile(`trustStore/.+$`)), + resource.TestCheckTypeSetElemAttrPair(resourceName, "certificate.*.body", "aws_acmpca_certificate.test1", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.thumbprint"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "certificate.*.body", "aws_acmpca_certificate.test2", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.1.thumbprint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, "trust_store_arn"), + ImportStateVerifyIdentifierAttribute: "trust_store_arn", + }, + { + Config: testAccTrustStoreConfig_updatedRemove(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustStoreExists(ctx, resourceName, &trustStore), + resource.TestCheckResourceAttr(resourceName, "certificate.#", "1"), + acctest.MatchResourceAttrRegionalARN(ctx, resourceName, "trust_store_arn", "workspaces-web", regexache.MustCompile(`trustStore/.+$`)), + resource.TestCheckResourceAttrPair(resourceName, "certificate.0.body", "aws_acmpca_certificate.test2", names.AttrCertificate), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.issuer"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_after"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.not_valid_before"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.subject"), + resource.TestCheckResourceAttrSet(resourceName, "certificate.0.thumbprint"), + ), + }, + }, + }) +} + +func testAccCheckTrustStoreDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_trust_store" { + continue + } + + _, err := tfworkspacesweb.FindTrustStoreByARN(ctx, conn, rs.Primary.Attributes["trust_store_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("WorkSpaces Web Trust Store %s still exists", rs.Primary.Attributes["trust_store_arn"]) + } + + return nil + } +} + +func testAccCheckTrustStoreExists(ctx context.Context, n string, v *awstypes.TrustStore) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindTrustStoreByARN(ctx, conn, rs.Primary.Attributes["trust_store_arn"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} +func testAccTrustStoreConfig_acmBase() string { + return (` + +data "aws_partition" "current" {} + +resource "aws_acmpca_certificate_authority" "test" { + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_2048" + signing_algorithm = "SHA256WITHRSA" + + subject { + common_name = "example.com" + } + } +} + +resource "aws_acmpca_certificate" "test1" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} + +resource "aws_acmpca_certificate" "test2" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA256WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 1 + } +} +`) +} + +func testAccTrustStoreConfig_basic() string { + return acctest.ConfigCompose( + testAccTrustStoreConfig_acmBase(), + ` +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test1.certificate + } +} +`) +} + +func testAccTrustStoreConfig_multipleCerts() string { + return acctest.ConfigCompose( + testAccTrustStoreConfig_acmBase(), + ` +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test1.certificate + } + certificate { + body = aws_acmpca_certificate.test2.certificate + } +} +`) +} + +func testAccTrustStoreConfig_updatedAdd() string { + return acctest.ConfigCompose( + testAccTrustStoreConfig_acmBase(), + ` +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test1.certificate + } + certificate { + body = aws_acmpca_certificate.test2.certificate + } +} +`) +} + +func testAccTrustStoreConfig_updatedRemove() string { + return acctest.ConfigCompose( + testAccTrustStoreConfig_acmBase(), + ` +resource "aws_workspacesweb_trust_store" "test" { + certificate { + body = aws_acmpca_certificate.test2.certificate + } +} +`) +} diff --git a/internal/service/workspacesweb/user_access_logging_settings_association.go b/internal/service/workspacesweb/user_access_logging_settings_association.go new file mode 100644 index 000000000000..ac37bbe162e5 --- /dev/null +++ b/internal/service/workspacesweb/user_access_logging_settings_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_user_access_logging_settings_association", name="User Access Logging Settings Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.UserAccessLoggingSettings") +// @Testing(importStateIdAttribute="user_access_logging_settings_arn,portal_arn") +func newUserAccessLoggingSettingsAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &userAccessLoggingSettingsAssociationResource{}, nil +} + +type userAccessLoggingSettingsAssociationResource struct { + framework.ResourceWithModel[userAccessLoggingSettingsAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *userAccessLoggingSettingsAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "user_access_logging_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *userAccessLoggingSettingsAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data userAccessLoggingSettingsAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateUserAccessLoggingSettingsInput{ + UserAccessLoggingSettingsArn: data.UserAccessLoggingSettingsARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateUserAccessLoggingSettings(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb User Access Logging Settings Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *userAccessLoggingSettingsAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data userAccessLoggingSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the user access logging settings and checking associated portals + output, err := findUserAccessLoggingSettingsByARN(ctx, conn, data.UserAccessLoggingSettingsARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb User Access Logging Settings Association (%s)", data.UserAccessLoggingSettingsARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *userAccessLoggingSettingsAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data userAccessLoggingSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateUserAccessLoggingSettingsInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateUserAccessLoggingSettings(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb User Access Logging Settings Association (%s)", data.UserAccessLoggingSettingsARN.ValueString()), err.Error()) + return + } +} + +func (r *userAccessLoggingSettingsAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + userAccessLoggingSettingsAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, userAccessLoggingSettingsAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: user_access_logging_settings_arn,portal_arn. Got: %q", request.ID), + ) + return + } + userAccessLoggingSettingsARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("user_access_logging_settings_arn"), userAccessLoggingSettingsARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type userAccessLoggingSettingsAssociationResourceModel struct { + framework.WithRegionModel + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` + UserAccessLoggingSettingsARN fwtypes.ARN `tfsdk:"user_access_logging_settings_arn"` +} diff --git a/internal/service/workspacesweb/user_access_logging_settings_association_test.go b/internal/service/workspacesweb/user_access_logging_settings_association_test.go new file mode 100644 index 000000000000..8fd7d7ad9c30 --- /dev/null +++ b/internal/service/workspacesweb/user_access_logging_settings_association_test.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebUserAccessLoggingSettingsAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var userAccessLoggingSettings awstypes.UserAccessLoggingSettings + resourceName := "aws_workspacesweb_user_access_logging_settings_association.test" + userAccessLoggingSettingsResourceName := "aws_workspacesweb_user_access_logging_settings.test" + portalResourceName := "aws_workspacesweb_portal.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserAccessLoggingSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserAccessLoggingSettingsAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserAccessLoggingSettingsAssociationExists(ctx, resourceName, &userAccessLoggingSettings), + resource.TestCheckResourceAttrPair(resourceName, "user_access_logging_settings_arn", userAccessLoggingSettingsResourceName, "user_access_logging_settings_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccUserAccessLoggingSettingsAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "user_access_logging_settings_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccUserAccessLoggingSettingsAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the UserAccessLoggingSettings Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(userAccessLoggingSettingsResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(userAccessLoggingSettingsResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "user_access_logging_settings_arn", userAccessLoggingSettingsResourceName, "user_access_logging_settings_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebUserAccessLoggingSettingsAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var userAccessLoggingSettings awstypes.UserAccessLoggingSettings + resourceName := "aws_workspacesweb_user_access_logging_settings_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserAccessLoggingSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserAccessLoggingSettingsAssociationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserAccessLoggingSettingsAssociationExists(ctx, resourceName, &userAccessLoggingSettings), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceUserAccessLoggingSettingsAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckUserAccessLoggingSettingsAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_user_access_logging_settings_association" { + continue + } + + userAccessLoggingSettings, err := tfworkspacesweb.FindUserAccessLoggingSettingsByARN(ctx, conn, rs.Primary.Attributes["user_access_logging_settings_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(userAccessLoggingSettings.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web User Access Logging Settings Association %s still exists", rs.Primary.Attributes["user_access_logging_settings_arn"]) + } + } + + return nil + } +} + +func testAccCheckUserAccessLoggingSettingsAssociationExists(ctx context.Context, n string, v *awstypes.UserAccessLoggingSettings) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindUserAccessLoggingSettingsByARN(ctx, conn, rs.Primary.Attributes["user_access_logging_settings_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccUserAccessLoggingSettingsAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["user_access_logging_settings_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccUserAccessLoggingSettingsAssociationConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_kinesis_stream" "test" { + name = "amazon-workspaces-web-%[1]s" + shard_count = 1 +} + +resource "aws_workspacesweb_user_access_logging_settings" "test" { + kinesis_stream_arn = aws_kinesis_stream.test.arn +} + +resource "aws_workspacesweb_user_access_logging_settings_association" "test" { + user_access_logging_settings_arn = aws_workspacesweb_user_access_logging_settings.test.user_access_logging_settings_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +`, rName) +} diff --git a/internal/service/workspacesweb/user_access_logging_settings_tags_gen_test.go b/internal/service/workspacesweb/user_access_logging_settings_tags_gen_test.go index d52ffbf79da6..6da331c4d75c 100644 --- a/internal/service/workspacesweb/user_access_logging_settings_tags_gen_test.go +++ b/internal/service/workspacesweb/user_access_logging_settings_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccWorkSpacesWebUserAccessLoggingSettings_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -209,11 +209,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags(t *testing.T) { func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -273,11 +274,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_null(t *testing.T) { func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -325,11 +327,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyMap(t *testing.T) { func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -407,11 +410,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_AddOnUpdate(t *testing.T func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyTag_OnCreate(t *tes func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -644,11 +649,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyTag_OnUpdate_Add(t func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -736,11 +742,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_EmptyTag_OnUpdate_Replac func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -925,11 +932,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_providerOnly func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1091,11 +1099,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_nonOverlappi func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1273,11 +1282,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_overlapping( func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1365,11 +1375,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_updateToProv func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1456,11 +1467,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_updateToReso func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1524,11 +1536,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_emptyResourc func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1584,11 +1597,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_emptyProvide func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1655,11 +1669,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_nullOverlapp func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1728,11 +1743,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_DefaultTags_nullNonOverl func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1785,11 +1801,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_ComputedTag_OnCreate(t * func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1884,11 +1901,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_ComputedTag_OnUpdate_Add func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -1973,11 +1991,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_ComputedTag_OnUpdate_Rep func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), @@ -2135,11 +2154,12 @@ func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_IgnoreTags_Overlap_Defau func TestAccWorkSpacesWebUserAccessLoggingSettings_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserAccessLoggingSettings resourceName := "aws_workspacesweb_user_access_logging_settings.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserAccessLoggingSettingsDestroy(ctx), diff --git a/internal/service/workspacesweb/user_settings_association.go b/internal/service/workspacesweb/user_settings_association.go new file mode 100644 index 000000000000..80676655ffe7 --- /dev/null +++ b/internal/service/workspacesweb/user_settings_association.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb + +import ( + "context" + "fmt" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/workspacesweb" + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfretry "github.com/hashicorp/terraform-provider-aws/internal/retry" +) + +// @FrameworkResource("aws_workspacesweb_user_settings_association", name="User Settings Association") +// @Testing(tagsTest=false) +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/workspacesweb/types;types.UserSettings") +// @Testing(importStateIdAttribute="user_settings_arn,portal_arn") +func newUserSettingsAssociationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &userSettingsAssociationResource{}, nil +} + +type userSettingsAssociationResource struct { + framework.ResourceWithModel[userSettingsAssociationResourceModel] + framework.WithNoUpdate +} + +func (r *userSettingsAssociationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "portal_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "user_settings_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *userSettingsAssociationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data userSettingsAssociationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.AssociateUserSettingsInput{ + UserSettingsArn: data.UserSettingsARN.ValueStringPointer(), + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.AssociateUserSettings(ctx, &input) + + if err != nil { + response.Diagnostics.AddError("creating WorkSpacesWeb User Settings Association", err.Error()) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *userSettingsAssociationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data userSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + // Check if the association exists by getting the user settings and checking associated portals + output, err := findUserSettingsByARN(ctx, conn, data.UserSettingsARN.ValueString()) + if tfretry.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpacesWeb User Settings Association (%s)", data.UserSettingsARN.ValueString()), err.Error()) + return + } + + // Check if the portal is in the associated portals list + if !slices.Contains(output.AssociatedPortalArns, data.PortalARN.ValueString()) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(fmt.Errorf("association not found"))) + response.State.RemoveResource(ctx) + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *userSettingsAssociationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data userSettingsAssociationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().WorkSpacesWebClient(ctx) + + input := workspacesweb.DisassociateUserSettingsInput{ + PortalArn: data.PortalARN.ValueStringPointer(), + } + + _, err := conn.DisassociateUserSettings(ctx, &input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpacesWeb User Settings Association (%s)", data.UserSettingsARN.ValueString()), err.Error()) + return + } +} + +func (r *userSettingsAssociationResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + const ( + userSettingsAssociationIDParts = 2 + ) + parts, err := intflex.ExpandResourceId(request.ID, userSettingsAssociationIDParts, true) + if err != nil { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: user_settings_arn,portal_arn. Got: %q", request.ID), + ) + return + } + userSettingsARN := parts[0] + portalARN := parts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("user_settings_arn"), userSettingsARN)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("portal_arn"), portalARN)...) +} + +type userSettingsAssociationResourceModel struct { + framework.WithRegionModel + PortalARN fwtypes.ARN `tfsdk:"portal_arn"` + UserSettingsARN fwtypes.ARN `tfsdk:"user_settings_arn"` +} diff --git a/internal/service/workspacesweb/user_settings_association_test.go b/internal/service/workspacesweb/user_settings_association_test.go new file mode 100644 index 000000000000..075e0a8de316 --- /dev/null +++ b/internal/service/workspacesweb/user_settings_association_test.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspacesweb_test + +import ( + "context" + "fmt" + "slices" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/workspacesweb/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfworkspacesweb "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccWorkSpacesWebUserSettingsAssociation_basic(t *testing.T) { + ctx := acctest.Context(t) + var userSettings awstypes.UserSettings + resourceName := "aws_workspacesweb_user_settings_association.test" + userSettingsResourceName := "aws_workspacesweb_user_settings.test" + portalResourceName := "aws_workspacesweb_portal.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserSettingsAssociationExists(ctx, resourceName, &userSettings), + resource.TestCheckResourceAttrPair(resourceName, "user_settings_arn", userSettingsResourceName, "user_settings_arn"), + resource.TestCheckResourceAttrPair(resourceName, "portal_arn", portalResourceName, "portal_arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccUserSettingsAssociationImportStateIdFunc(resourceName), + ImportStateVerifyIdentifierAttribute: "user_settings_arn", + }, + { + ResourceName: resourceName, + RefreshState: true, + }, + { + Config: testAccUserSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + //The following checks are for the UserSettings Resource and the PortalResource (and not for the association resource). + resource.TestCheckResourceAttr(userSettingsResourceName, "associated_portal_arns.#", "1"), + resource.TestCheckResourceAttrPair(userSettingsResourceName, "associated_portal_arns.0", portalResourceName, "portal_arn"), + resource.TestCheckResourceAttrPair(portalResourceName, "user_settings_arn", userSettingsResourceName, "user_settings_arn"), + ), + }, + }, + }) +} + +func TestAccWorkSpacesWebUserSettingsAssociation_disappears(t *testing.T) { + ctx := acctest.Context(t) + var userSettings awstypes.UserSettings + resourceName := "aws_workspacesweb_user_settings_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.WorkSpacesWebEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserSettingsAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserSettingsAssociationConfig_basic(), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserSettingsAssociationExists(ctx, resourceName, &userSettings), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfworkspacesweb.ResourceUserSettingsAssociation, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckUserSettingsAssociationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_workspacesweb_user_settings_association" { + continue + } + + userSettings, err := tfworkspacesweb.FindUserSettingsByARN(ctx, conn, rs.Primary.Attributes["user_settings_arn"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + // Check if the portal is still associated + portalARN := rs.Primary.Attributes["portal_arn"] + if slices.Contains(userSettings.AssociatedPortalArns, portalARN) { + return fmt.Errorf("WorkSpaces Web User Settings Association %s still exists", rs.Primary.Attributes["user_settings_arn"]) + } + } + + return nil + } +} + +func testAccCheckUserSettingsAssociationExists(ctx context.Context, n string, v *awstypes.UserSettings) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesWebClient(ctx) + + output, err := tfworkspacesweb.FindUserSettingsByARN(ctx, conn, rs.Primary.Attributes["user_settings_arn"]) + + if err != nil { + return err + } + + // Check if the portal is associated + portalARN := rs.Primary.Attributes["portal_arn"] + if !slices.Contains(output.AssociatedPortalArns, portalARN) { + return fmt.Errorf("Association not found") + } + + *v = *output + + return nil + } +} + +func testAccUserSettingsAssociationImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["user_settings_arn"], rs.Primary.Attributes["portal_arn"]), nil + } +} + +func testAccUserSettingsAssociationConfig_basic() string { + return ` +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_user_settings" "test" { + copy_allowed = "Enabled" + download_allowed = "Enabled" + paste_allowed = "Enabled" + print_allowed = "Enabled" + upload_allowed = "Enabled" +} + +resource "aws_workspacesweb_user_settings_association" "test" { + user_settings_arn = aws_workspacesweb_user_settings.test.user_settings_arn + portal_arn = aws_workspacesweb_portal.test.portal_arn +} +` +} diff --git a/internal/service/workspacesweb/user_settings_tags_gen_test.go b/internal/service/workspacesweb/user_settings_tags_gen_test.go index e9d57e3e9dd2..1510985e51a6 100644 --- a/internal/service/workspacesweb/user_settings_tags_gen_test.go +++ b/internal/service/workspacesweb/user_settings_tags_gen_test.go @@ -18,10 +18,11 @@ import ( func TestAccWorkSpacesWebUserSettings_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -199,10 +200,11 @@ func TestAccWorkSpacesWebUserSettings_tags(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -260,10 +262,11 @@ func TestAccWorkSpacesWebUserSettings_tags_null(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -309,10 +312,11 @@ func TestAccWorkSpacesWebUserSettings_tags_EmptyMap(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -387,10 +391,11 @@ func TestAccWorkSpacesWebUserSettings_tags_AddOnUpdate(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -476,10 +481,11 @@ func TestAccWorkSpacesWebUserSettings_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -613,10 +619,11 @@ func TestAccWorkSpacesWebUserSettings_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -701,10 +708,11 @@ func TestAccWorkSpacesWebUserSettings_tags_EmptyTag_OnUpdate_Replace(t *testing. func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -881,10 +889,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_providerOnly(t *testing.T func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1040,10 +1049,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_nonOverlapping(t *testing func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1215,10 +1225,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_overlapping(t *testing.T) func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1303,10 +1314,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_updateToProviderOnly(t *t func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1390,10 +1402,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_updateToResourceOnly(t *t func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1455,10 +1468,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_emptyResourceTag(t *testi func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1512,10 +1526,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_emptyProviderOnlyTag(t *t func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1580,10 +1595,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_nullOverlappingResourceTa func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1650,10 +1666,11 @@ func TestAccWorkSpacesWebUserSettings_tags_DefaultTags_nullNonOverlappingResourc func TestAccWorkSpacesWebUserSettings_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1704,10 +1721,11 @@ func TestAccWorkSpacesWebUserSettings_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccWorkSpacesWebUserSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1799,10 +1817,11 @@ func TestAccWorkSpacesWebUserSettings_tags_ComputedTag_OnUpdate_Add(t *testing.T func TestAccWorkSpacesWebUserSettings_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -1884,10 +1903,11 @@ func TestAccWorkSpacesWebUserSettings_tags_ComputedTag_OnUpdate_Replace(t *testi func TestAccWorkSpacesWebUserSettings_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), @@ -2042,10 +2062,11 @@ func TestAccWorkSpacesWebUserSettings_tags_IgnoreTags_Overlap_DefaultTag(t *test func TestAccWorkSpacesWebUserSettings_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.UserSettings resourceName := "aws_workspacesweb_user_settings.test" - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.WorkSpacesWebServiceID), CheckDestroy: testAccCheckUserSettingsDestroy(ctx), diff --git a/internal/service/xray/encryption_config_identity_gen_test.go b/internal/service/xray/encryption_config_identity_gen_test.go index ffb581300be3..bf9d47f15204 100644 --- a/internal/service/xray/encryption_config_identity_gen_test.go +++ b/internal/service/xray/encryption_config_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,9 +24,10 @@ func testAccXRayEncryptionConfig_IdentitySerial(t *testing.T) { t.Helper() testCases := map[string]func(t *testing.T){ - acctest.CtBasic: testAccXRayEncryptionConfig_Identity_Basic, - "ExistingResource": testAccXRayEncryptionConfig_Identity_ExistingResource, - "RegionOverride": testAccXRayEncryptionConfig_Identity_RegionOverride, + acctest.CtBasic: testAccXRayEncryptionConfig_Identity_Basic, + "ExistingResource": testAccXRayEncryptionConfig_Identity_ExistingResource, + "ExistingResourceNoRefresh": testAccXRayEncryptionConfig_Identity_ExistingResource_NoRefresh_NoChange, + "RegionOverride": testAccXRayEncryptionConfig_Identity_RegionOverride, } acctest.RunSerialTests1Level(t, testCases, 0) @@ -37,7 +39,7 @@ func testAccXRayEncryptionConfig_Identity_Basic(t *testing.T) { var v awstypes.EncryptionConfig resourceName := "aws_xray_encryption_config.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -111,7 +113,7 @@ func testAccXRayEncryptionConfig_Identity_RegionOverride(t *testing.T) { resourceName := "aws_xray_encryption_config.test" - resource.Test(t, resource.TestCase{ + acctest.Test(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -215,3 +217,120 @@ func testAccXRayEncryptionConfig_Identity_RegionOverride(t *testing.T) { }, }) } + +func testAccXRayEncryptionConfig_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EncryptionConfig + resourceName := "aws_xray_encryption_config.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EncryptionConfig/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEncryptionConfigExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/EncryptionConfig/basic_v6.0.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEncryptionConfigExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: knownvalue.Null(), + names.AttrRegion: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EncryptionConfig/basic/"), + ConfigVariables: config.Variables{}, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + }), + }, + }, + }, + }) +} + +func testAccXRayEncryptionConfig_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v awstypes.EncryptionConfig + resourceName := "aws_xray_encryption_config.test" + + acctest.Test(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/EncryptionConfig/basic_v5.100.0/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEncryptionConfigExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/EncryptionConfig/basic/"), + ConfigVariables: config.Variables{}, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEncryptionConfigExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/xray/encryption_config_test.go b/internal/service/xray/encryption_config_test.go index 73ea9afc7374..9e43eeb5d3b1 100644 --- a/internal/service/xray/encryption_config_test.go +++ b/internal/service/xray/encryption_config_test.go @@ -11,14 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/xray/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfxray "github.com/hashicorp/terraform-provider-aws/internal/service/xray" "github.com/hashicorp/terraform-provider-aws/names" @@ -104,75 +98,6 @@ func testAccCheckEncryptionConfigExists(ctx context.Context, n string, v *types. } } -func testAccXRayEncryptionConfig_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - resourceName := "aws_xray_encryption_config.test" - - resource.Test(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), - CheckDestroy: acctest.CheckDestroyNoop, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccEncryptionConfigConfig_basic(), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccEncryptionConfigConfig_basic(), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: knownvalue.Null(), - names.AttrRegion: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccEncryptionConfigConfig_basic(), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrAccountID: tfknownvalue.AccountID(), - names.AttrRegion: knownvalue.StringExact(acctest.Region()), - }), - }, - }, - }, - }) -} - func testAccEncryptionConfigConfig_basic() string { return ` resource "aws_xray_encryption_config" "test" { diff --git a/internal/service/xray/group_identity_gen_test.go b/internal/service/xray/group_identity_gen_test.go index ca551a6fa087..de5e9b1696ac 100644 --- a/internal/service/xray/group_identity_gen_test.go +++ b/internal/service/xray/group_identity_gen_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccXRayGroup_Identity_Basic(t *testing.T) { resourceName := "aws_xray_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -47,6 +48,9 @@ func TestAccXRayGroup_Identity_Basic(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -108,7 +112,7 @@ func TestAccXRayGroup_Identity_RegionOverride(t *testing.T) { resourceName := "aws_xray_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.SkipBelow(tfversion.Version1_12_0), }, @@ -127,6 +131,9 @@ func TestAccXRayGroup_Identity_RegionOverride(t *testing.T) { ConfigStateChecks: []statecheck.StateCheck{ statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), }, }, @@ -218,3 +225,131 @@ func TestAccXRayGroup_Identity_RegionOverride(t *testing.T) { }, }) } + +func TestAccXRayGroup_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Group + resourceName := "aws_xray_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), + CheckDestroy: testAccCheckGroupDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Group/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: v6.0 Identity error + { + ConfigDirectory: config.StaticDirectory("testdata/Group/basic_v6.0.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGroupExists(ctx, resourceName, &v), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.Null(), + }), + }, + }, + + // Step 3: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Group/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} + +func TestAccXRayGroup_Identity_ExistingResource_NoRefresh_NoChange(t *testing.T) { + ctx := acctest.Context(t) + + var v types.Group + resourceName := "aws_xray_group.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), + CheckDestroy: testAccCheckGroupDestroy(ctx), + AdditionalCLIOptions: &resource.AdditionalCLIOptions{ + Plan: resource.PlanOptions{ + NoRefresh: true, + }, + }, + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/Group/basic_v5.100.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGroupExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Group/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGroupExists(ctx, resourceName, &v), + ), + }, + }, + }) +} diff --git a/internal/service/xray/group_tags_gen_test.go b/internal/service/xray/group_tags_gen_test.go index f8ccf209a26e..0f83c3d5785a 100644 --- a/internal/service/xray/group_tags_gen_test.go +++ b/internal/service/xray/group_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/xray/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccXRayGroup_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccXRayGroup_tags(t *testing.T) { func TestAccXRayGroup_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccXRayGroup_tags_null(t *testing.T) { func TestAccXRayGroup_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccXRayGroup_tags_EmptyMap(t *testing.T) { func TestAccXRayGroup_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccXRayGroup_tags_AddOnUpdate(t *testing.T) { func TestAccXRayGroup_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccXRayGroup_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccXRayGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccXRayGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccXRayGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccXRayGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccXRayGroup_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccXRayGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccXRayGroup_tags_DefaultTags_overlapping(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccXRayGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccXRayGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccXRayGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccXRayGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { func TestAccXRayGroup_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccXRayGroup_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) func TestAccXRayGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccXRayGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing. func TestAccXRayGroup_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccXRayGroup_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccXRayGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccXRayGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccXRayGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccXRayGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccXRayGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccXRayGroup_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccXRayGroup_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.Group resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckGroupDestroy(ctx), diff --git a/internal/service/xray/group_test.go b/internal/service/xray/group_test.go index cdf25f295097..ef72943c9292 100644 --- a/internal/service/xray/group_test.go +++ b/internal/service/xray/group_test.go @@ -12,14 +12,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/xray/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/knownvalue" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/hashicorp/terraform-plugin-testing/tfversion" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" - tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfxray "github.com/hashicorp/terraform-provider-aws/internal/service/xray" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -134,84 +128,6 @@ func TestAccXRayGroup_disappears(t *testing.T) { }) } -func TestAccXRayGroup_Identity_ExistingResource(t *testing.T) { - ctx := acctest.Context(t) - var v types.Group - resourceName := "aws_xray_group.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - TerraformVersionChecks: []tfversion.TerraformVersionCheck{ - tfversion.SkipBelow(tfversion.Version1_12_0), - }, - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), - CheckDestroy: testAccCheckGroupDestroy(ctx), - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "5.100.0", - }, - }, - Config: testAccGroupConfig_basic(rName, "responsetime > 5"), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckGroupExists(ctx, resourceName, &v), - ), - ConfigStateChecks: []statecheck.StateCheck{ - tfstatecheck.ExpectNoIdentity(resourceName), - }, - }, - { - ExternalProviders: map[string]resource.ExternalProvider{ - "aws": { - Source: "hashicorp/aws", - VersionConstraint: "6.0.0", - }, - }, - Config: testAccGroupConfig_basic(rName, "responsetime > 5"), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckGroupExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: knownvalue.Null(), - }), - }, - }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccGroupConfig_basic(rName, "responsetime > 5"), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckGroupExists(ctx, resourceName, &v), - ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), - }, - }, - ConfigStateChecks: []statecheck.StateCheck{ - statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ - names.AttrARN: tfknownvalue.RegionalARNRegexp("xray", regexache.MustCompile(`group/.+`)), - }), - }, - }, - }, - }) -} - func testAccCheckGroupExists(ctx context.Context, n string, v *types.Group) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/internal/service/xray/sampling_rule_tags_gen_test.go b/internal/service/xray/sampling_rule_tags_gen_test.go index 6bcaea61bff8..beee9b442d84 100644 --- a/internal/service/xray/sampling_rule_tags_gen_test.go +++ b/internal/service/xray/sampling_rule_tags_gen_test.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/xray/types" "github.com/hashicorp/terraform-plugin-testing/config" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/knownvalue" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,11 +18,12 @@ import ( func TestAccXRaySamplingRule_tags(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -201,11 +201,12 @@ func TestAccXRaySamplingRule_tags(t *testing.T) { func TestAccXRaySamplingRule_tags_null(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -268,11 +269,12 @@ func TestAccXRaySamplingRule_tags_null(t *testing.T) { func TestAccXRaySamplingRule_tags_EmptyMap(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -331,11 +333,12 @@ func TestAccXRaySamplingRule_tags_EmptyMap(t *testing.T) { func TestAccXRaySamplingRule_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -412,11 +415,12 @@ func TestAccXRaySamplingRule_tags_AddOnUpdate(t *testing.T) { func TestAccXRaySamplingRule_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -501,11 +505,12 @@ func TestAccXRaySamplingRule_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccXRaySamplingRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -638,11 +643,12 @@ func TestAccXRaySamplingRule_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccXRaySamplingRule_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -727,11 +733,12 @@ func TestAccXRaySamplingRule_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccXRaySamplingRule_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -908,11 +915,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccXRaySamplingRule_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1068,11 +1076,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccXRaySamplingRule_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1244,11 +1253,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_overlapping(t *testing.T) { func TestAccXRaySamplingRule_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1334,11 +1344,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccXRaySamplingRule_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1423,11 +1434,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccXRaySamplingRule_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1488,11 +1500,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccXRaySamplingRule_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1545,11 +1558,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccXRaySamplingRule_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1607,11 +1621,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccXRaySamplingRule_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1669,11 +1684,12 @@ func TestAccXRaySamplingRule_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccXRaySamplingRule_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1724,11 +1740,12 @@ func TestAccXRaySamplingRule_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccXRaySamplingRule_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1821,11 +1838,12 @@ func TestAccXRaySamplingRule_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccXRaySamplingRule_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -1908,11 +1926,12 @@ func TestAccXRaySamplingRule_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { func TestAccXRaySamplingRule_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), @@ -2070,11 +2089,12 @@ func TestAccXRaySamplingRule_tags_IgnoreTags_Overlap_DefaultTag(t *testing.T) { func TestAccXRaySamplingRule_tags_IgnoreTags_Overlap_ResourceTag(t *testing.T) { ctx := acctest.Context(t) + var v types.SamplingRule resourceName := "aws_xray_sampling_rule.test" - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) - resource.ParallelTest(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.XRayServiceID), CheckDestroy: testAccCheckSamplingRuleDestroy(ctx), diff --git a/internal/service/xray/service_endpoint_resolver_gen.go b/internal/service/xray/service_endpoint_resolver_gen.go index 5af164b38b69..d4a8fa69db91 100644 --- a/internal/service/xray/service_endpoint_resolver_gen.go +++ b/internal/service/xray/service_endpoint_resolver_gen.go @@ -62,7 +62,7 @@ func (r resolverV2) ResolveEndpoint(ctx context.Context, params xray.EndpointPar }) params.UseFIPS = aws.Bool(false) } else { - err = fmt.Errorf("looking up xray endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up xray endpoint %q: %w", hostname, err) return } } else { diff --git a/internal/service/xray/service_endpoints_gen_test.go b/internal/service/xray/service_endpoints_gen_test.go index 59ba212a4d31..a2cb72fc6955 100644 --- a/internal/service/xray/service_endpoints_gen_test.go +++ b/internal/service/xray/service_endpoints_gen_test.go @@ -521,7 +521,7 @@ func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { ) } -var errCancelOperation = fmt.Errorf("Test: Canceling request") +var errCancelOperation = errors.New("Test: Canceling request") func addCancelRequestMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { diff --git a/internal/service/xray/service_package_gen.go b/internal/service/xray/service_package_gen.go index 9988c8415649..9f0ffb3cad60 100644 --- a/internal/service/xray/service_package_gen.go +++ b/internal/service/xray/service_package_gen.go @@ -7,7 +7,6 @@ import ( "unique" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/xray" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -102,7 +101,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( func(o *xray.Options) { if inContext, ok := conns.FromContext(ctx); ok && inContext.VCREnabled() { tflog.Info(ctx, "overriding retry behavior to immediately return VCR errors") - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(vcr.InteractionNotFoundRetryableFunc)) + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), vcr.InteractionNotFoundRetryableFunc) } }, withExtraOptions(ctx, p, config), diff --git a/internal/service/xray/tags_gen.go b/internal/service/xray/tags_gen.go index e1effcc979c1..8c3c8d31fb65 100644 --- a/internal/service/xray/tags_gen.go +++ b/internal/service/xray/tags_gen.go @@ -3,8 +3,8 @@ package xray import ( "context" - "fmt" + "github.com/YakDriver/smarterr" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/xray" awstypes "github.com/aws/aws-sdk-go-v2/service/xray/types" @@ -31,7 +31,7 @@ func listTags(ctx context.Context, conn *xray.Client, identifier string, optFns page, err := pages.NextPage(ctx, optFns...) if err != nil { - return tftags.New(ctx, nil), err + return tftags.New(ctx, nil), smarterr.NewError(err) } output = append(output, page.Tags...) @@ -46,7 +46,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri tags, err := listTags(ctx, meta.(*conns.AWSClient).XRayClient(ctx), identifier) if err != nil { - return err + return smarterr.NewError(err) } if inContext, ok := tftags.FromContext(ctx); ok { @@ -124,7 +124,7 @@ func updateTags(ctx context.Context, conn *xray.Client, identifier string, oldTa _, err := conn.UntagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } @@ -139,7 +139,7 @@ func updateTags(ctx context.Context, conn *xray.Client, identifier string, oldTa _, err := conn.TagResource(ctx, &input, optFns...) if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) + return smarterr.NewError(err) } } diff --git a/internal/service/xray/testdata/EncryptionConfig/basic_v5.100.0/main_gen.tf b/internal/service/xray/testdata/EncryptionConfig/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..0c690ca844b0 --- /dev/null +++ b/internal/service/xray/testdata/EncryptionConfig/basic_v5.100.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_xray_encryption_config" "test" { + type = "NONE" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/xray/testdata/EncryptionConfig/basic_v6.0.0/main_gen.tf b/internal/service/xray/testdata/EncryptionConfig/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..6b10f571f947 --- /dev/null +++ b/internal/service/xray/testdata/EncryptionConfig/basic_v6.0.0/main_gen.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_xray_encryption_config" "test" { + type = "NONE" +} + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/xray/testdata/Group/basic_v5.100.0/main_gen.tf b/internal/service/xray/testdata/Group/basic_v5.100.0/main_gen.tf new file mode 100644 index 000000000000..2e2b51da5a62 --- /dev/null +++ b/internal/service/xray/testdata/Group/basic_v5.100.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_xray_group" "test" { + group_name = var.rName + filter_expression = "responsetime > 5" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.100.0" + } + } +} + +provider "aws" {} diff --git a/internal/service/xray/testdata/Group/basic_v6.0.0/main_gen.tf b/internal/service/xray/testdata/Group/basic_v6.0.0/main_gen.tf new file mode 100644 index 000000000000..fcc8df2c6414 --- /dev/null +++ b/internal/service/xray/testdata/Group/basic_v6.0.0/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_xray_group" "test" { + group_name = var.rName + filter_expression = "responsetime > 5" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "6.0.0" + } + } +} + +provider "aws" {} diff --git a/internal/slices/iter.go b/internal/slices/iter.go new file mode 100644 index 000000000000..f0a987de277f --- /dev/null +++ b/internal/slices/iter.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package slices + +import ( + "iter" +) + +// AppliedToEach returns an iterator that yields the slice elements transformed by the function `f`. +func AppliedToEach[S ~[]E, E any, T any](s S, f func(E) T) iter.Seq[T] { + return func(yield func(T) bool) { + for _, v := range s { + if !yield(f(v)) { + return + } + } + } +} + +// BackwardValues returns an iterator that yields the slice elements in reverse order. +// It is a values-only equivalent of `slices.Backward`. +func BackwardValues[Slice ~[]E, E any](s Slice) iter.Seq[E] { + return func(yield func(E) bool) { + for i := len(s) - 1; i >= 0; i-- { + if !yield(s[i]) { + return + } + } + } +} diff --git a/internal/slices/iter_test.go b/internal/slices/iter_test.go new file mode 100644 index 000000000000..f8c5ea0263df --- /dev/null +++ b/internal/slices/iter_test.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package slices + +import ( + "slices" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestAppliedToEach(t *testing.T) { + t.Parallel() + + type testCase struct { + input []string + expected []string + } + tests := map[string]testCase{ + "three elements": { + input: []string{"one", "two", "3"}, + expected: []string{"ONE", "TWO", "3"}, + }, + "one element": { + input: []string{"abcdEFGH"}, + expected: []string{"ABCDEFGH"}, + }, + "zero elements": { + input: []string{}, + expected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + iter := AppliedToEach(test.input, strings.ToUpper) + + got := slices.Collect(iter) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +// Copied and adapted from stdlib slices package +func TestBackwardValues(t *testing.T) { + t.Parallel() + + for size := range 10 { + var s []int + for i := range size { + s = append(s, i) + } + ev := size - 1 + cnt := 0 + for v := range BackwardValues(s) { + if v != ev { + t.Errorf("at iteration %d got %d want %d", cnt, v, ev) + } + ev-- + cnt++ + } + if cnt != size { + t.Errorf("read %d values expected %d", cnt, size) + } + } +} diff --git a/internal/slices/slices.go b/internal/slices/slices.go index fb8880d9fb77..727d072542e6 100644 --- a/internal/slices/slices.go +++ b/internal/slices/slices.go @@ -4,6 +4,7 @@ package slices import ( + "iter" "slices" ) @@ -69,6 +70,10 @@ type Predicate[T any] func(T) bool // Filter returns a new slice containing all values that return `true` for the filter function `f`. func Filter[S ~[]E, E any](s S, f Predicate[E]) S { + if len(s) == 0 { + return nil + } + v := S(make([]E, 0, len(s))) for _, e := range s { @@ -163,3 +168,23 @@ func Strings[S ~[]E, E stringable](s S) []string { return string(e) }) } + +// CollectWithError collects values from seq into a new slice and returns it. +// The first non-nil error in seq is returned. +// If seq is empty, the result is nil. +func CollectWithError[E any](seq iter.Seq2[E, error]) ([]E, error) { + return AppendSeqWithError([]E(nil), seq) +} + +// AppendSeqWithError appends the values from seq to the slice and returns the extended slice. +// The first non-nil error in seq is returned. +// If seq is empty, the result preserves the nilness of s. +func AppendSeqWithError[S ~[]E, E any](s S, seq iter.Seq2[E, error]) (S, error) { + for v, err := range seq { + if err != nil { + return nil, err + } + s = append(s, v) + } + return s, nil +} diff --git a/internal/slices/slices_test.go b/internal/slices/slices_test.go index 0537d29f2cfd..4fbc705250b1 100644 --- a/internal/slices/slices_test.go +++ b/internal/slices/slices_test.go @@ -4,6 +4,8 @@ package slices import ( + "errors" + "maps" "strings" "testing" @@ -171,7 +173,7 @@ func TestFilter(t *testing.T) { }, "zero elements": { input: []string{}, - expected: []string{}, + expected: nil, }, } @@ -365,3 +367,46 @@ func TestRange(t *testing.T) { }) } } + +func TestCollectWithError(t *testing.T) { + t.Parallel() + + type testCase struct { + input map[int]error + wantErr bool + } + tests := map[string]testCase{ + "no error": { + input: map[int]error{ + 1: nil, + 2: nil, + 3: nil, + }, + }, + "has error": { + input: map[int]error{ + 1: nil, + 2: errors.New("test error"), + 3: nil, + }, + wantErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := CollectWithError(maps.All(test.input)) + + if got, want := err != nil, test.wantErr; !cmp.Equal(got, want) { + t.Errorf("CollectWithError() err %t, want %t", got, want) + } + if err == nil { + if got, want := len(got), len(test.input); !cmp.Equal(got, want) { + t.Errorf("CollectWithError() len %d, want %d", got, want) + } + } + }) + } +} diff --git a/internal/smerr/smarterr.go b/internal/smerr/smarterr.go new file mode 100644 index 000000000000..98b80f1c295b --- /dev/null +++ b/internal/smerr/smarterr.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package smerr + +import ( + "context" + + "github.com/YakDriver/smarterr" + fwdiag "github.com/hashicorp/terraform-plugin-framework/diag" + sdkdiag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +const ( + ID = smarterr.ID +) + +// This is smarterr wrapping to inject private context into keyvals for the SDK and Framework diagnostics. + +// Append enriches smarterr.Append with resource and service context if available. +func Append(ctx context.Context, diags sdkdiag.Diagnostics, err error, keyvals ...any) sdkdiag.Diagnostics { + return smarterr.Append(ctx, diags, err, injectContext(ctx, keyvals...)...) +} + +// AddError enriches smarterr.AddError with resource and service context if available. +func AddError(ctx context.Context, diags *fwdiag.Diagnostics, err error, keyvals ...any) { + smarterr.AddError(ctx, diags, err, injectContext(ctx, keyvals...)...) +} + +// EnrichAppend enriches smarterr.EnrichAppend with resource and service context if available. +func EnrichAppend(ctx context.Context, existing *fwdiag.Diagnostics, incoming fwdiag.Diagnostics, keyvals ...any) { + smarterr.EnrichAppend(ctx, existing, incoming, injectContext(ctx, keyvals...)...) +} + +func injectContext(ctx context.Context, keyvals ...any) []any { + if inctx, ok := conns.FromContext(ctx); ok { + srv := inctx.ServicePackageName() + if v, err := names.HumanFriendly(srv); err == nil { + srv = v + } + keyvals = append(keyvals, smarterr.ResourceName, inctx.ResourceName(), smarterr.ServiceName, srv) + } + return keyvals +} diff --git a/internal/smithy/README.md b/internal/smithy/README.md new file mode 100644 index 000000000000..20d5bf99f0ee --- /dev/null +++ b/internal/smithy/README.md @@ -0,0 +1,3 @@ +# Smithy Helpers + +[Smithy](https://smithy.io/) helpers. diff --git a/internal/smithy/json.go b/internal/smithy/json.go new file mode 100644 index 000000000000..71d5ef4f90e5 --- /dev/null +++ b/internal/smithy/json.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package smithy + +import ( + "strings" + + smithydocument "github.com/aws/smithy-go/document" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" +) + +// DocumentFromJSONString converts a JSON string to a [Smithy document](https://smithy.io/2.0/spec/simple-types.html#document). +func DocumentFromJSONString[T any](s string, f func(any) T) (T, error) { + var v any + + err := tfjson.DecodeFromString(s, &v) + if err != nil { + var zero T + return zero, err + } + + return f(v), nil +} + +// DocumentToJSONString converts a [Smithy document](https://smithy.io/2.0/spec/simple-types.html#document) to a JSON string. +func DocumentToJSONString(document smithydocument.Marshaler) (string, error) { + bytes, err := document.MarshalSmithyDocument() + if err != nil { + return "", err + } + + return strings.TrimSpace(string(bytes)), nil +} + +// JSONStringer interface is used to marshal and unmarshal JSON interface objects. +type JSONStringer interface { + smithydocument.Marshaler + smithydocument.Unmarshaler +} diff --git a/internal/smithy/json_test.go b/internal/smithy/json_test.go new file mode 100644 index 000000000000..479d0719e09b --- /dev/null +++ b/internal/smithy/json_test.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package smithy_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/document" + "github.com/google/go-cmp/cmp" + tfsmithy "github.com/hashicorp/terraform-provider-aws/internal/smithy" +) + +func TestDocumentToFromJSONString(t *testing.T) { + t.Parallel() + + testCases := []struct { + testName string + input string + wantOutput string + wantInputErr bool + wantOutputErr bool + }{ + { + testName: "empty string", + input: ``, + wantOutput: `null`, + }, + { + testName: "empty JSON", + input: `{}`, + wantOutput: `{}`, + }, + { + testName: "valid JSON", + input: `{"Field1": 42}`, + wantOutput: `{"Field1":42}`, + }, + { + testName: "invalid JSON", + input: `{"Field1"=42}`, + wantInputErr: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + + json, err := tfsmithy.DocumentFromJSONString(testCase.input, document.NewLazyDocument) + if got, want := err != nil, testCase.wantInputErr; !cmp.Equal(got, want) { + t.Errorf("DocumentFromJSONString(%s) err %t (%v), want %t", testCase.input, got, err, want) + } + if err == nil { + output, err := tfsmithy.DocumentToJSONString(json) + if got, want := err != nil, testCase.wantOutputErr; !cmp.Equal(got, want) { + t.Errorf("DocumentToJSONString err %t (%v), want %t", got, err, want) + } + if err == nil { + if diff := cmp.Diff(output, testCase.wantOutput); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + } + } + }) + } +} diff --git a/internal/sweep/awsv2/skip.go b/internal/sweep/awsv2/skip.go index 4bb052f30c2c..c4fbcfa1a509 100644 --- a/internal/sweep/awsv2/skip.go +++ b/internal/sweep/awsv2/skip.go @@ -45,6 +45,10 @@ func SkipSweepError(err error) bool { if tfawserr.ErrMessageContains(err, "HttpConnectionTimeoutException", "Failed to connect to") { return true } + // Example (amp): InternalServerErrorException: Internal server error + if tfawserr.ErrMessageContains(err, "InternalServerErrorException", "Internal server error") { + return true + } // Example (GovCloud): InvalidAction: DescribeDBProxies is not available in this region if tfawserr.ErrMessageContains(err, "InvalidAction", "is not available") { return true @@ -90,6 +94,10 @@ func SkipSweepError(err error) bool { if tfawserr.ErrMessageContains(err, "InvalidParameterValueException", "Access Denied to API Version") { return true } + // Example (GovCloud): InvalidParameterException: The DATA_PROTECTION_POLICY policy type is not supported in this region + if tfawserr.ErrMessageContains(err, "InvalidParameterException", "DATA_PROTECTION_POLICY policy type is not supported in this region") { + return true + } // Example (GovCloud): The AppStream 2.0 user pool feature is not supported in the us-gov-west-1 AWS Region if tfawserr.ErrMessageContains(err, "InvalidParameterValueException", "feature is not supported") { return true @@ -98,6 +106,10 @@ func SkipSweepError(err error) bool { if tfawserr.ErrMessageContains(err, "InvalidParameterValueException", "This API operation is currently unavailable") { return true } + // Example (GovCloud): InvalidSignatureException: Credential should be scoped to a valid region + if tfawserr.ErrMessageContains(err, "InvalidSignatureException", "Credential should be scoped to a valid region") { + return true + } // For example from us-west-2 Route53 zone if tfawserr.ErrMessageContains(err, "KeySigningKeyInParentDSRecord", "Due to DNS lookup failure") { return true @@ -110,6 +122,10 @@ func SkipSweepError(err error) bool { if tfawserr.ErrMessageContains(err, "ResourceNotFoundException", "The subscription does not exist") { return true } + // Example (GovCloud): SignatureDoesNotMatch: Credential should be scoped to a valid region + if tfawserr.ErrMessageContains(err, "SignatureDoesNotMatch", "Credential should be scoped to a valid region") { + return true + } // For example from us-gov-east-1 IoT domain configuration if tfawserr.ErrMessageContains(err, "UnauthorizedException", "API is not available in") { return true @@ -130,6 +146,10 @@ func SkipSweepError(err error) bool { if tfawserr.ErrMessageContains(err, "UnsupportedOperation", "The functionality you requested is not available in this region") { return true } + // Example (fsx): UnsupportedOperation: This operation is unsupported. + if tfawserr.ErrMessageContains(err, "UnsupportedOperation", "This operation is unsupported") { + return true + } // For example from us-west-1 EMR studio if tfawserr.ErrMessageContains(err, "ValidationException", "Account is not whitelisted to use this feature") { return true diff --git a/internal/sweep/framework/resource.go b/internal/sweep/framework/resource.go index ae6ee40066a7..12672a1c3fda 100644 --- a/internal/sweep/framework/resource.go +++ b/internal/sweep/framework/resource.go @@ -6,6 +6,7 @@ package framework import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-framework/path" fwresource "github.com/hashicorp/terraform-plugin-framework/resource" rschema "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -72,7 +73,13 @@ func (sr *sweepResource) Delete(ctx context.Context, optFns ...tfresource.Option if d.HasError() { return fwdiag.DiagnosticsError(d) } - ctx = tflog.SetField(ctx, attr.path, attr.value) + switch v := attr.value.(type) { + case *string: + ctx = tflog.SetField(ctx, attr.path, aws.ToString(v)) + + default: + ctx = tflog.SetField(ctx, attr.path, v) + } } tflog.Info(ctx, "Sweeping resource") diff --git a/internal/sweep/register_gen_test.go b/internal/sweep/register_gen_test.go index cb24319a4c0e..9d948704d3b5 100644 --- a/internal/sweep/register_gen_test.go +++ b/internal/sweep/register_gen_test.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/batch" "github.com/hashicorp/terraform-provider-aws/internal/service/bcmdataexports" "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagent" + "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagentcore" "github.com/hashicorp/terraform-provider-aws/internal/service/budgets" "github.com/hashicorp/terraform-provider-aws/internal/service/chime" "github.com/hashicorp/terraform-provider-aws/internal/service/cleanrooms" @@ -207,6 +208,7 @@ func registerSweepers() { batch.RegisterSweepers() bcmdataexports.RegisterSweepers() bedrockagent.RegisterSweepers() + bedrockagentcore.RegisterSweepers() budgets.RegisterSweepers() chime.RegisterSweepers() cleanrooms.RegisterSweepers() diff --git a/internal/sweep/service_packages_gen_test.go b/internal/sweep/service_packages_gen_test.go index a063d088e82d..b65cd9455036 100644 --- a/internal/sweep/service_packages_gen_test.go +++ b/internal/sweep/service_packages_gen_test.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/apprunner" "github.com/hashicorp/terraform-provider-aws/internal/service/appstream" "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/service/arcregionswitch" "github.com/hashicorp/terraform-provider-aws/internal/service/athena" "github.com/hashicorp/terraform-provider-aws/internal/service/auditmanager" "github.com/hashicorp/terraform-provider-aws/internal/service/autoscaling" @@ -35,6 +36,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/bcmdataexports" "github.com/hashicorp/terraform-provider-aws/internal/service/bedrock" "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagent" + "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagentcore" "github.com/hashicorp/terraform-provider-aws/internal/service/billing" "github.com/hashicorp/terraform-provider-aws/internal/service/budgets" "github.com/hashicorp/terraform-provider-aws/internal/service/ce" @@ -175,6 +177,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/notifications" "github.com/hashicorp/terraform-provider-aws/internal/service/notificationscontacts" "github.com/hashicorp/terraform-provider-aws/internal/service/oam" + "github.com/hashicorp/terraform-provider-aws/internal/service/odb" "github.com/hashicorp/terraform-provider-aws/internal/service/opensearch" "github.com/hashicorp/terraform-provider-aws/internal/service/opensearchserverless" "github.com/hashicorp/terraform-provider-aws/internal/service/organizations" @@ -214,6 +217,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" "github.com/hashicorp/terraform-provider-aws/internal/service/s3outposts" "github.com/hashicorp/terraform-provider-aws/internal/service/s3tables" + "github.com/hashicorp/terraform-provider-aws/internal/service/s3vectors" "github.com/hashicorp/terraform-provider-aws/internal/service/sagemaker" "github.com/hashicorp/terraform-provider-aws/internal/service/scheduler" "github.com/hashicorp/terraform-provider-aws/internal/service/schemas" @@ -255,6 +259,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/wafregional" "github.com/hashicorp/terraform-provider-aws/internal/service/wafv2" "github.com/hashicorp/terraform-provider-aws/internal/service/wellarchitected" + "github.com/hashicorp/terraform-provider-aws/internal/service/workmail" "github.com/hashicorp/terraform-provider-aws/internal/service/workspaces" "github.com/hashicorp/terraform-provider-aws/internal/service/workspacesweb" "github.com/hashicorp/terraform-provider-aws/internal/service/xray" @@ -281,6 +286,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { apprunner.ServicePackage(ctx), appstream.ServicePackage(ctx), appsync.ServicePackage(ctx), + arcregionswitch.ServicePackage(ctx), athena.ServicePackage(ctx), auditmanager.ServicePackage(ctx), autoscaling.ServicePackage(ctx), @@ -290,6 +296,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { bcmdataexports.ServicePackage(ctx), bedrock.ServicePackage(ctx), bedrockagent.ServicePackage(ctx), + bedrockagentcore.ServicePackage(ctx), billing.ServicePackage(ctx), budgets.ServicePackage(ctx), ce.ServicePackage(ctx), @@ -430,6 +437,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { notifications.ServicePackage(ctx), notificationscontacts.ServicePackage(ctx), oam.ServicePackage(ctx), + odb.ServicePackage(ctx), opensearch.ServicePackage(ctx), opensearchserverless.ServicePackage(ctx), organizations.ServicePackage(ctx), @@ -469,6 +477,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { s3control.ServicePackage(ctx), s3outposts.ServicePackage(ctx), s3tables.ServicePackage(ctx), + s3vectors.ServicePackage(ctx), sagemaker.ServicePackage(ctx), scheduler.ServicePackage(ctx), schemas.ServicePackage(ctx), @@ -510,6 +519,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { wafregional.ServicePackage(ctx), wafv2.ServicePackage(ctx), wellarchitected.ServicePackage(ctx), + workmail.ServicePackage(ctx), workspaces.ServicePackage(ctx), workspacesweb.ServicePackage(ctx), xray.ServicePackage(ctx), diff --git a/internal/tags/key_value_tags.go b/internal/tags/key_value_tags.go index 566b4ab6e82e..9c92be4db62d 100644 --- a/internal/tags/key_value_tags.go +++ b/internal/tags/key_value_tags.go @@ -15,8 +15,6 @@ import ( "github.com/hashicorp/go-cty/cty" fwdiag "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" - "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" @@ -887,13 +885,10 @@ func (tags KeyValueTags) ResolveDuplicates(ctx context.Context, defaultConfig *D } // ResolveDuplicatesFramework resolves differences between incoming tags, defaultTags, and ignoreConfig -func (tags KeyValueTags) ResolveDuplicatesFramework(ctx context.Context, defaultConfig *DefaultConfig, ignoreConfig *IgnoreConfig, resp *resource.ReadResponse, diags *fwdiag.Diagnostics) KeyValueTags { +func (tags KeyValueTags) ResolveDuplicatesFramework(ctx context.Context, defaultConfig *DefaultConfig, ignoreConfig *IgnoreConfig, tagsAll Map, diags *fwdiag.Diagnostics) KeyValueTags { // remove default config. t := tags.RemoveDefaultConfig(defaultConfig) - var tagsAll Map - diags.Append(resp.State.GetAttribute(ctx, path.Root("tags"), &tagsAll)...) - if diags.HasError() { return KeyValueTags{} } diff --git a/internal/tfresource/errors.go b/internal/tfresource/errors.go index f40eaf738fe7..bac461c584aa 100644 --- a/internal/tfresource/errors.go +++ b/internal/tfresource/errors.go @@ -4,6 +4,8 @@ package tfresource import ( + "errors" + "github.com/hashicorp/terraform-provider-aws/internal/retry" ) @@ -33,3 +35,49 @@ var TimedOut = retry.TimedOut // which handles both Plugin SDK V2 and internal error types. For net-new usage, // prefer calling retry.SetLastError directly. var SetLastError = retry.SetLastError + +// From github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry: + +// RetryError forces client code to choose whether or not a given error is retryable. +type RetryError struct { + err error + isRetryable bool +} + +func (e *RetryError) Error() string { + return e.err.Error() +} + +func (e *RetryError) Unwrap() error { + return e.err +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. To prevent logic errors, will return an error when passed a +// nil error. +func RetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform AWS Provider and should be " + + "reported as a GitHub issue in the provider repository."), + isRetryable: false, + } + } + return &RetryError{err: err, isRetryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform AWS Provider and should be " + + "reported as a GitHub issue in the provider repository."), + isRetryable: false, + } + } + return &RetryError{err: err, isRetryable: false} +} diff --git a/internal/tfresource/not_found_error.go b/internal/tfresource/not_found_error.go index 787b710bb5c2..f423cbb989ce 100644 --- a/internal/tfresource/not_found_error.go +++ b/internal/tfresource/not_found_error.go @@ -6,6 +6,7 @@ package tfresource import ( "errors" "fmt" + "iter" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -130,6 +131,43 @@ func AssertSingleValueResult[T any](a []T, fs ...foundFunc[T]) (*T, error) { } } +// AssertSingleValueResultIterErr returns either a pointer to the single value in the iterator or the error value from the iterator. +// If there are not exactly one value, returns a `NotFound` error. +func AssertSingleValueResultIterErr[T any](i iter.Seq2[T, error]) (*T, error) { + next, stop := iter.Pull2(i) + defer stop() + + v, err, ok := next() + if !ok { + return nil, NewEmptyResultError(nil) + } + + if err != nil { + return nil, err + } + + _, err, ok = next() + if !ok { + return &v, nil + } + + if err != nil { + return nil, err + } + n := 2 + for { + _, err, ok = next() + if !ok { + break + } + if err != nil { + return nil, err + } + n++ + } + return nil, NewTooManyResultsError(n, nil) +} + // AssertFirstValueResult returns a pointer to the first value in the specified slice of values. // Returns a `NotFound` error otherwise. func AssertFirstValueResult[T any](a []T) (*T, error) { diff --git a/internal/tfresource/not_found_error_test.go b/internal/tfresource/not_found_error_test.go index 5c170a998208..a8bd08fd004a 100644 --- a/internal/tfresource/not_found_error_test.go +++ b/internal/tfresource/not_found_error_test.go @@ -6,9 +6,11 @@ package tfresource import ( "errors" "fmt" + "iter" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" ) func TestEmptyResultErrorAsNotFoundError(t *testing.T) { @@ -169,3 +171,125 @@ func TestTooManyResultsErrorIs(t *testing.T) { }) } } + +func TestAssertSingleValueResult(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + input []int + expectedValue int + expectedError error + }{ + "empty slice": { + input: []int{}, + expectedError: NewEmptyResultError(nil), + }, + "single element": { + input: []int{42}, + expectedValue: 42, + }, + "multiple elements": { + input: []int{42, 43}, + expectedError: NewTooManyResultsError(2, nil), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + result, err := AssertSingleValueResult(testCase.input) + + if testCase.expectedError != nil { + if err == nil { + t.Errorf("expected error: %v, got nil", testCase.expectedError) + } else if err.Error() != testCase.expectedError.Error() { + t.Errorf("expected error: %v, got %v", testCase.expectedError, err) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if result == nil { + if testCase.expectedError == nil { + t.Errorf("expected %d, got nil", testCase.expectedValue) + } + return + } else if *result != testCase.expectedValue { + t.Errorf("expected %d, got %d", testCase.expectedValue, *result) + } + }) + } +} + +func TestAssertSingleValueResultIterErr(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + input iter.Seq2[int, error] + expectedValue int + expectedError error + }{ + "empty slice": { + input: tfiter.Null2[int, error](), + expectedError: NewEmptyResultError(nil), + }, + "single element": { + input: valuesWithErrors([]int{42}), + expectedValue: 42, + }, + "multiple elements": { + input: valuesWithErrors([]int{42, 43}), + expectedError: NewTooManyResultsError(2, nil), + }, + "with error": { + input: valueError(errors.New("test error")), + expectedError: errors.New("test error"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + result, err := AssertSingleValueResultIterErr(testCase.input) + + if testCase.expectedError != nil { + if err == nil { + t.Errorf("expected error: %v, got nil", testCase.expectedError) + } else if err.Error() != testCase.expectedError.Error() { + t.Errorf("expected error: %v, got %v", testCase.expectedError, err) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if result == nil { + if testCase.expectedError == nil { + t.Errorf("expected %d, got nil", testCase.expectedValue) + } + return + } else if *result != testCase.expectedValue { + t.Errorf("expected %d, got %d", testCase.expectedValue, *result) + } + }) + } +} + +func valuesWithErrors(values []int) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + for _, v := range values { + if !yield(v, nil) { + break + } + } + } +} + +func valueError(err error) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + if !yield(0, err) { + return + } + } +} diff --git a/internal/tfresource/retry.go b/internal/tfresource/retry.go index 6af38c365c0c..3ebd030f7ed9 100644 --- a/internal/tfresource/retry.go +++ b/internal/tfresource/retry.go @@ -7,11 +7,9 @@ import ( "context" "fmt" "math/rand" - "sync" "time" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" - sdkretry "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/backoff" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/retry" @@ -27,88 +25,13 @@ type Retryable func(error) (bool, error) // RetryWhen retries the function `f` when the error it returns satisfies `retryable`. // `f` is retried until `timeout` expires. -func RetryWhen(ctx context.Context, timeout time.Duration, f func() (any, error), retryable Retryable) (any, error) { - var output any - - err := Retry(ctx, timeout, func() *sdkretry.RetryError { - var err error - var again bool - - output, err = f() - again, err = retryable(err) - - if again { - return sdkretry.RetryableError(err) - } - - if err != nil { - return sdkretry.NonRetryableError(err) - } - - return nil - }) - - if TimedOut(err) { - output, err = f() - } - - if err != nil { - return nil, err - } - - return output, nil -} - -// RetryGWhen is the generic version of RetryWhen which obviates the need for a type -// assertion after the call. It retries the function `f` when the error it returns -// satisfies `retryable`. `f` is retried until `timeout` expires. -func RetryGWhen[T any](ctx context.Context, timeout time.Duration, f func() (T, error), retryable Retryable) (T, error) { - var output T - - err := Retry(ctx, timeout, func() *sdkretry.RetryError { - var err error - var again bool - - output, err = f() - again, err = retryable(err) - - if again { - return sdkretry.RetryableError(err) - } - - if err != nil { - return sdkretry.NonRetryableError(err) - } - - return nil - }) - - if TimedOut(err) { - output, err = f() - } - - if err != nil { - var zero T - return zero, err - } - - return output, nil +func RetryWhen[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), retryable Retryable) (T, error) { + return retryWhen(ctx, timeout, f, retryable) } // RetryWhenAWSErrCodeEquals retries the specified function when it returns one of the specified AWS error codes. -func RetryWhenAWSErrCodeEquals(ctx context.Context, timeout time.Duration, f func() (any, error), codes ...string) (any, error) { // nosemgrep:ci.aws-in-func-name - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if tfawserr.ErrCodeEquals(err, codes...) { - return true, err - } - - return false, err - }) -} - -// RetryGWhenAWSErrCodeEquals retries the specified function when it returns one of the specified AWS error codes. -func RetryGWhenAWSErrCodeEquals[T any](ctx context.Context, timeout time.Duration, f func() (T, error), codes ...string) (T, error) { // nosemgrep:ci.aws-in-func-name - return RetryGWhen(ctx, timeout, f, func(err error) (bool, error) { +func RetryWhenAWSErrCodeEquals[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), codes ...string) (T, error) { // nosemgrep:ci.aws-in-func-name + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { if tfawserr.ErrCodeEquals(err, codes...) { return true, err } @@ -118,8 +41,8 @@ func RetryGWhenAWSErrCodeEquals[T any](ctx context.Context, timeout time.Duratio } // RetryWhenAWSErrCodeContains retries the specified function when it returns an AWS error containing the specified code. -func RetryWhenAWSErrCodeContains(ctx context.Context, timeout time.Duration, f func() (any, error), code string) (any, error) { // nosemgrep:ci.aws-in-func-name - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { +func RetryWhenAWSErrCodeContains[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), code string) (T, error) { // nosemgrep:ci.aws-in-func-name + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { if tfawserr.ErrCodeContains(err, code) { return true, err } @@ -129,8 +52,8 @@ func RetryWhenAWSErrCodeContains(ctx context.Context, timeout time.Duration, f f } // RetryWhenAWSErrMessageContains retries the specified function when it returns an AWS error containing the specified message. -func RetryWhenAWSErrMessageContains(ctx context.Context, timeout time.Duration, f func() (any, error), code, message string) (any, error) { // nosemgrep:ci.aws-in-func-name - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { +func RetryWhenAWSErrMessageContains[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), code, message string) (T, error) { // nosemgrep:ci.aws-in-func-name + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { if tfawserr.ErrMessageContains(err, code, message) { return true, err } @@ -139,19 +62,9 @@ func RetryWhenAWSErrMessageContains(ctx context.Context, timeout time.Duration, }) } -func RetryWhenIsA[T error](ctx context.Context, timeout time.Duration, f func() (any, error)) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if errs.IsA[T](err) { - return true, err - } - - return false, err - }) -} - -func RetryWhenIsOneOf2[T1, T2 error](ctx context.Context, timeout time.Duration, f func() (any, error)) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if errs.IsA[T1](err) || errs.IsA[T2](err) { +func RetryWhenIsA[T any, E error](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error)) (T, error) { + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { + if errs.IsA[E](err) { return true, err } @@ -159,9 +72,9 @@ func RetryWhenIsOneOf2[T1, T2 error](ctx context.Context, timeout time.Duration, }) } -func RetryWhenIsOneOf3[T1, T2, T3 error](ctx context.Context, timeout time.Duration, f func() (any, error)) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if errs.IsA[T1](err) || errs.IsA[T2](err) || errs.IsA[T3](err) { +func RetryWhenIsOneOf2[T any, E1, E2 error](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error)) (T, error) { + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { + if errs.IsA[E1](err) || errs.IsA[E2](err) { return true, err } @@ -169,9 +82,9 @@ func RetryWhenIsOneOf3[T1, T2, T3 error](ctx context.Context, timeout time.Durat }) } -func RetryWhenIsOneOf4[T1, T2, T3, T4 error](ctx context.Context, timeout time.Duration, f func() (any, error)) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if errs.IsA[T1](err) || errs.IsA[T2](err) || errs.IsA[T3](err) || errs.IsA[T4](err) { +func RetryWhenIsOneOf3[T any, E1, E2, E3 error](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error)) (T, error) { + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { + if errs.IsA[E1](err) || errs.IsA[E2](err) || errs.IsA[E3](err) { return true, err } @@ -179,9 +92,9 @@ func RetryWhenIsOneOf4[T1, T2, T3, T4 error](ctx context.Context, timeout time.D }) } -func RetryWhenIsAErrorMessageContains[T errs.ErrorWithErrorMessage](ctx context.Context, timeout time.Duration, f func() (any, error), needle string) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if errs.IsAErrorMessageContains[T](err, needle) { +func RetryWhenIsOneOf4[T any, E1, E2, E3, E4 error](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error)) (T, error) { + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { + if errs.IsA[E1](err) || errs.IsA[E2](err) || errs.IsA[E3](err) || errs.IsA[E4](err) { return true, err } @@ -189,8 +102,8 @@ func RetryWhenIsAErrorMessageContains[T errs.ErrorWithErrorMessage](ctx context. }) } -func RetryGWhenIsAErrorMessageContains[T any, E errs.ErrorWithErrorMessage](ctx context.Context, timeout time.Duration, f func() (T, error), needle string) (T, error) { - return RetryGWhen(ctx, timeout, f, func(err error) (bool, error) { +func RetryWhenIsAErrorMessageContains[T any, E errs.ErrorWithErrorMessage](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), needle string) (T, error) { + return retryWhen(ctx, timeout, f, func(err error) (bool, error) { if errs.IsAErrorMessageContains[E](err, needle) { return true, err } @@ -216,51 +129,24 @@ func RetryUntilEqual[T comparable](ctx context.Context, timeout time.Duration, t } // RetryUntilNotFound retries the specified function until it returns a retry.NotFoundError. -func RetryUntilNotFound(ctx context.Context, timeout time.Duration, f func() (any, error)) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if NotFound(err) { - return false, nil - } - - if err != nil { - return false, err - } - - return true, ErrFoundResource - }) +func RetryUntilNotFound(ctx context.Context, timeout time.Duration, f func(context.Context) (any, error)) (any, error) { + return retry.Op(f).UntilNotFound()(ctx, timeout) } // RetryWhenNotFound retries the specified function when it returns a retry.NotFoundError. -func RetryWhenNotFound(ctx context.Context, timeout time.Duration, f func() (any, error)) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { - if NotFound(err) { - return true, err - } - - return false, err - }) -} - -// RetryGWhenNotFound retries the specified function when it returns a retry.NotFoundError. -func RetryGWhenNotFound[T any](ctx context.Context, timeout time.Duration, f func() (T, error)) (T, error) { - return RetryGWhen(ctx, timeout, f, func(err error) (bool, error) { - if NotFound(err) { - return true, err - } - - return false, err - }) +func RetryWhenNotFound[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error)) (T, error) { + return retry.Op(f).UntilFoundN(1)(ctx, timeout) } // RetryWhenNewResourceNotFound retries the specified function when it returns a retry.NotFoundError and `isNewResource` is true. -func RetryWhenNewResourceNotFound(ctx context.Context, timeout time.Duration, f func() (any, error), isNewResource bool) (any, error) { - return RetryWhen(ctx, timeout, f, func(err error) (bool, error) { +func RetryWhenNewResourceNotFound[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), isNewResource bool) (T, error) { + return retry.Op(f).If(func(_ T, err error) (bool, error) { if isNewResource && NotFound(err) { return true, err } return false, err - }) + })(ctx, timeout) } type Options struct { @@ -271,7 +157,7 @@ type Options struct { ContinuousTargetOccurence int // Number of times the Target state has to occur continuously } -func (o Options) Apply(c *sdkretry.StateChangeConf) { +func (o Options) Apply(c *retry.StateChangeConf) { if o.Delay > 0 { c.Delay = o.Delay } @@ -335,58 +221,36 @@ func WithContinuousTargetOccurence(continuousTargetOccurence int) OptionsFunc { // Retry allows configuration of StateChangeConf's various time arguments. // This is especially useful for AWS services that are prone to throttling, such as Route53, where // the default durations cause problems. -func Retry(ctx context.Context, timeout time.Duration, f sdkretry.RetryFunc, optFns ...OptionsFunc) error { - // These are used to pull the error out of the function; need a mutex to - // avoid a data race. - var resultErr error - var resultErrMu sync.Mutex - - options := Options{} +func Retry(ctx context.Context, timeout time.Duration, f func(context.Context) *RetryError, optFns ...OptionsFunc) error { + options := Options{ + MinPollInterval: 500 * time.Millisecond, //nolint:mnd // 500ms is the Plugin SDKv2 default + } for _, fn := range optFns { fn(&options) } - c := &sdkretry.StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * time.Millisecond, - Refresh: func() (any, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil + _, err := retryWhen(ctx, timeout, + func(ctx context.Context) (any, error) { + return nil, f(ctx) + }, + func(err error) (bool, error) { + if err, ok := errs.As[*RetryError](err); ok { + if err != nil { + return err.isRetryable, err.err + } + return false, nil } - return nil, "quit", rerr.Err + return false, err }, - } + backoff.WithDelay(backoff.SDKv2HelperRetryCompatibleDelay(options.Delay, options.PollInterval, options.MinPollInterval)), + ) - options.Apply(c) - - _, waitErr := c.WaitForStateContext(ctx) - - // Need to acquire the lock here to be able to avoid race using resultErr as - // the return value - resultErrMu.Lock() - defer resultErrMu.Unlock() + return err +} - // resultErr may be nil because the wait timed out and resultErr was never - // set; this is still an error - if resultErr == nil { - return waitErr - } - // resultErr takes precedence over waitErr if both are set because it is - // more likely to be useful - return resultErr +func retryWhen[T any](ctx context.Context, timeout time.Duration, f func(context.Context) (T, error), retryable Retryable, opts ...backoff.Option) (T, error) { + return retry.Op(f).If(func(_ T, err error) (bool, error) { + return retryable(err) + })(ctx, timeout, opts...) } diff --git a/internal/tfresource/retry_test.go b/internal/tfresource/retry_test.go index 5693153a5e6b..e7ba630190d3 100644 --- a/internal/tfresource/retry_test.go +++ b/internal/tfresource/retry_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -22,18 +22,18 @@ func TestRetryWhenAWSErrCodeEquals(t *testing.T) { // nosemgrep:ci.aws-in-func-n ctx := t.Context() testCases := []struct { Name string - F func() (any, error) + F func(context.Context) (any, error) ExpectError bool }{ { Name: "no error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, nil }, }, { Name: "non-retryable other error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, errors.New("TestCode") }, ExpectError: true, @@ -60,18 +60,18 @@ func TestRetryWhenAWSErrMessageContains(t *testing.T) { // nosemgrep:ci.aws-in-f ctx := t.Context() testCases := []struct { Name string - F func() (any, error) + F func(context.Context) (any, error) ExpectError bool }{ { Name: "no error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, nil }, }, { Name: "non-retryable other error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, errors.New("TestCode") }, ExpectError: true, @@ -99,33 +99,33 @@ func TestRetryWhenNewResourceNotFound(t *testing.T) { var retryCount int32 testCases := []struct { Name string - F func() (any, error) + F func(context.Context) (any, error) NewResource bool ExpectError bool }{ { Name: "no error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, nil }, }, { Name: "no error new resource", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, nil }, NewResource: true, }, { Name: "non-retryable other error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, errors.New("TestCode") }, ExpectError: true, }, { Name: "non-retryable other error new resource", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, errors.New("TestCode") }, NewResource: true, @@ -133,14 +133,14 @@ func TestRetryWhenNewResourceNotFound(t *testing.T) { }, { Name: "retryable NotFoundError not new resource", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, &retry.NotFoundError{} }, ExpectError: true, }, { Name: "retryable NotFoundError new resource timeout", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, &retry.NotFoundError{} }, NewResource: true, @@ -148,7 +148,7 @@ func TestRetryWhenNewResourceNotFound(t *testing.T) { }, { Name: "retryable NotFoundError success new resource", - F: func() (any, error) { + F: func(context.Context) (any, error) { if atomic.CompareAndSwapInt32(&retryCount, 0, 1) { return nil, &retry.NotFoundError{} } @@ -182,32 +182,32 @@ func TestRetryWhenNotFound(t *testing.T) { var retryCount int32 testCases := []struct { Name string - F func() (any, error) + F func(context.Context) (any, error) ExpectError bool }{ { Name: "no error", - F: func() (any, error) { + F: func(ctx context.Context) (any, error) { return nil, nil }, }, { Name: "non-retryable other error", - F: func() (any, error) { + F: func(ctx context.Context) (any, error) { return nil, errors.New("TestCode") }, ExpectError: true, }, { Name: "retryable NotFoundError timeout", - F: func() (any, error) { + F: func(ctx context.Context) (any, error) { return nil, &retry.NotFoundError{} }, ExpectError: true, }, { Name: "retryable NotFoundError success", - F: func() (any, error) { + F: func(ctx context.Context) (any, error) { if atomic.CompareAndSwapInt32(&retryCount, 0, 1) { return nil, &retry.NotFoundError{} } @@ -299,32 +299,32 @@ func TestRetryUntilNotFound(t *testing.T) { var retryCount int32 testCases := []struct { Name string - F func() (any, error) + F func(context.Context) (any, error) ExpectError bool }{ { Name: "no error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, nil }, ExpectError: true, }, { Name: "other error", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, errors.New("TestCode") }, ExpectError: true, }, { Name: "NotFoundError", - F: func() (any, error) { + F: func(context.Context) (any, error) { return nil, &retry.NotFoundError{} }, }, { Name: "retryable NotFoundError", - F: func() (any, error) { + F: func(context.Context) (any, error) { if atomic.CompareAndSwapInt32(&retryCount, 0, 1) { return nil, nil } @@ -349,13 +349,61 @@ func TestRetryUntilNotFound(t *testing.T) { } } -func TestRetryContext_error(t *testing.T) { +func TestRetryContext_nil(t *testing.T) { + t.Parallel() + + ctx := t.Context() + var expected error + f := func(context.Context) *tfresource.RetryError { + return nil + } + + errCh := make(chan error) + go func() { + errCh <- tfresource.Retry(ctx, 1*time.Second, f) + }() + + select { + case err := <-errCh: + if err != expected { //nolint:errorlint // We are actually comparing equality + t.Fatalf("bad: %#v", err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } +} + +func TestRetryContext_nonRetryableError(t *testing.T) { + t.Parallel() + + ctx := t.Context() + expected := fmt.Errorf("nope") + f := func(context.Context) *tfresource.RetryError { + return tfresource.NonRetryableError(expected) + } + + errCh := make(chan error) + go func() { + errCh <- tfresource.Retry(ctx, 1*time.Second, f) + }() + + select { + case err := <-errCh: + if err != expected { //nolint:errorlint // We are actually comparing equality + t.Fatalf("bad: %#v", err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } +} + +func TestRetryContext_retryableError(t *testing.T) { t.Parallel() ctx := t.Context() expected := fmt.Errorf("nope") - f := func() *retry.RetryError { - return retry.NonRetryableError(expected) + f := func(context.Context) *tfresource.RetryError { + return tfresource.RetryableError(expected) } errCh := make(chan error) diff --git a/internal/types/aws_region.go b/internal/types/aws_region.go index 227c157b9390..17df8f75c87b 100644 --- a/internal/types/aws_region.go +++ b/internal/types/aws_region.go @@ -9,5 +9,5 @@ import ( // IsAWSRegion returns whether or not the specified string is a valid AWS Region. func IsAWSRegion(s string) bool { // nosemgrep:ci.aws-in-func-name - return regexache.MustCompile(`^[a-z]{2}(-[a-z]+)+-\d{1,2}$`).MatchString(s) + return regexache.MustCompile(`^[a-z]{2,4}(-[a-z]+)+-\d{1,2}$`).MatchString(s) } diff --git a/internal/types/aws_region_test.go b/internal/types/aws_region_test.go index bbcb4665163d..8cd9969d2a76 100644 --- a/internal/types/aws_region_test.go +++ b/internal/types/aws_region_test.go @@ -18,6 +18,7 @@ func TestIsAWSRegion(t *testing.T) { // nosemgrep:ci.aws-in-func-name {"", false}, {"eu-isoe-west-1", true}, {"mars", false}, + {"eusc-de-east-1", true}, } { ok := IsAWSRegion(tc.id) if got, want := ok, tc.valid; got != want { diff --git a/internal/types/service_package.go b/internal/types/service_package.go index 59910edac05d..d966e651e8f3 100644 --- a/internal/types/service_package.go +++ b/internal/types/service_package.go @@ -8,8 +8,10 @@ import ( "slices" "unique" + "github.com/hashicorp/terraform-plugin-framework/action" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -42,6 +44,15 @@ type ServicePackageResourceTags struct { ResourceType string // Extra resourceType parameter value for UpdateTags etc. } +// ServicePackageAction represents a Terraform Plugin Framework action +// implemented by a service package. +type ServicePackageAction struct { + Factory func(context.Context) (action.ActionWithConfigure, error) + TypeName string + Name string + Region unique.Handle[ServicePackageResourceRegion] +} + // ServicePackageEphemeralResource represents a Terraform Plugin Framework ephemeral resource // implemented by a service package. type ServicePackageEphemeralResource struct { @@ -73,6 +84,15 @@ type ServicePackageFrameworkResource struct { Import FrameworkImport } +type ServicePackageFrameworkListResource struct { + Factory func() list.ListResourceWithConfigure + TypeName string + Name string + Tags unique.Handle[ServicePackageResourceTags] + Region unique.Handle[ServicePackageResourceRegion] + Identity Identity +} + // ServicePackageSDKDataSource represents a Terraform Plugin SDK data source // implemented by a service package. type ServicePackageSDKDataSource struct { @@ -95,17 +115,32 @@ type ServicePackageSDKResource struct { Import SDKv2Import } +type ListResourceForSDK interface { + list.ListResourceWithRawV5Schemas + list.ListResourceWithConfigure +} + +type ServicePackageSDKListResource struct { + Factory func() ListResourceForSDK + TypeName string + Name string + Tags unique.Handle[ServicePackageResourceTags] + Region unique.Handle[ServicePackageResourceRegion] + Identity Identity +} + type Identity struct { IsGlobalResource bool // All IsSingleton bool // Singleton IsARN bool // ARN IsGlobalARNFormat bool // ARN - IdentityAttribute string // ARN, Single-Parameter + IdentityAttribute string // ARN IDAttrShadowsAttr string Attributes []IdentityAttribute IdentityDuplicateAttrs []string IsSingleParameter bool IsMutable bool + IsSetOnUpdate bool } func (i Identity) HasInherentRegion() bool { @@ -123,21 +158,15 @@ func (i Identity) HasInherentRegion() bool { func RegionalParameterizedIdentity(attributes []IdentityAttribute, opts ...IdentityOptsFunc) Identity { baseAttributes := []IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, - { - Name: "region", - Required: false, - }, + StringIdentityAttribute("account_id", false), + StringIdentityAttribute("region", false), } baseAttributes = slices.Grow(baseAttributes, len(attributes)) identity := Identity{ Attributes: append(baseAttributes, attributes...), } if len(attributes) == 1 { - identity.IDAttrShadowsAttr = attributes[0].Name + identity.IDAttrShadowsAttr = attributes[0].Name() } for _, opt := range opts { @@ -148,14 +177,38 @@ func RegionalParameterizedIdentity(attributes []IdentityAttribute, opts ...Ident } type IdentityAttribute struct { - Name string - Required bool + name string + required bool + resourceAttributeName string +} + +func (ia IdentityAttribute) Name() string { + return ia.name +} + +func (ia IdentityAttribute) Required() bool { + return ia.required +} + +func (ia IdentityAttribute) ResourceAttributeName() string { + if ia.resourceAttributeName == "" { + return ia.name + } + return ia.resourceAttributeName } func StringIdentityAttribute(name string, required bool) IdentityAttribute { return IdentityAttribute{ - Name: name, - Required: required, + name: name, + required: required, + } +} + +func StringIdentityAttributeWithMappedName(name string, required bool, resourceAttributeName string) IdentityAttribute { + return IdentityAttribute{ + name: name, + required: required, + resourceAttributeName: resourceAttributeName, } } @@ -182,10 +235,7 @@ func arnIdentity(isGlobalResource bool, name string, opts []IdentityOptsFunc) Id IsGlobalARNFormat: isGlobalResource, IdentityAttribute: name, Attributes: []IdentityAttribute{ - { - Name: name, - Required: true, - }, + StringIdentityAttribute(name, true), }, } @@ -204,30 +254,36 @@ func RegionalResourceWithGlobalARNFormatNamed(name string, opts ...IdentityOptsF identity := RegionalARNIdentityNamed(name, opts...) identity.IsGlobalARNFormat = true - identity.Attributes = slices.Insert(identity.Attributes, 0, IdentityAttribute{ - Name: "region", - Required: false, - }) + identity.Attributes = slices.Insert(identity.Attributes, 0, + StringIdentityAttribute("region", false), + ) return identity } func RegionalSingleParameterIdentity(name string, opts ...IdentityOptsFunc) Identity { identity := Identity{ - IdentityAttribute: name, Attributes: []IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, - { - Name: "region", - Required: false, - }, - { - Name: name, - Required: true, - }, + StringIdentityAttribute("account_id", false), + StringIdentityAttribute("region", false), + StringIdentityAttribute(name, true), + }, + IsSingleParameter: true, + } + + for _, opt := range opts { + opt(&identity) + } + + return identity +} + +func RegionalSingleParameterIdentityWithMappedName(name string, resourceAttributeName string, opts ...IdentityOptsFunc) Identity { + identity := Identity{ + Attributes: []IdentityAttribute{ + StringIdentityAttribute("account_id", false), + StringIdentityAttribute("region", false), + StringIdentityAttributeWithMappedName(name, true, resourceAttributeName), }, IsSingleParameter: true, } @@ -241,17 +297,27 @@ func RegionalSingleParameterIdentity(name string, opts ...IdentityOptsFunc) Iden func GlobalSingleParameterIdentity(name string, opts ...IdentityOptsFunc) Identity { identity := Identity{ - IsGlobalResource: true, - IdentityAttribute: name, + IsGlobalResource: true, Attributes: []IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, - { - Name: name, - Required: true, - }, + StringIdentityAttribute("account_id", false), + StringIdentityAttribute(name, true), + }, + IsSingleParameter: true, + } + + for _, opt := range opts { + opt(&identity) + } + + return identity +} + +func GlobalSingleParameterIdentityWithMappedName(name string, resourceAttributeName string, opts ...IdentityOptsFunc) Identity { + identity := Identity{ + IsGlobalResource: true, + Attributes: []IdentityAttribute{ + StringIdentityAttribute("account_id", false), + StringIdentityAttributeWithMappedName(name, true, resourceAttributeName), }, IsSingleParameter: true, } @@ -265,10 +331,7 @@ func GlobalSingleParameterIdentity(name string, opts ...IdentityOptsFunc) Identi func GlobalParameterizedIdentity(attributes []IdentityAttribute, opts ...IdentityOptsFunc) Identity { baseAttributes := []IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, + StringIdentityAttribute("account_id", false), } baseAttributes = slices.Grow(baseAttributes, len(attributes)) identity := Identity{ @@ -276,7 +339,7 @@ func GlobalParameterizedIdentity(attributes []IdentityAttribute, opts ...Identit Attributes: append(baseAttributes, attributes...), } if len(attributes) == 1 { - identity.IDAttrShadowsAttr = attributes[0].Name + identity.IDAttrShadowsAttr = attributes[0].Name() } for _, opt := range opts { @@ -291,10 +354,7 @@ func GlobalSingletonIdentity(opts ...IdentityOptsFunc) Identity { IsGlobalResource: true, IsSingleton: true, Attributes: []IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, + StringIdentityAttribute("account_id", false), }, } @@ -310,14 +370,8 @@ func RegionalSingletonIdentity(opts ...IdentityOptsFunc) Identity { IsGlobalResource: false, IsSingleton: true, Attributes: []IdentityAttribute{ - { - Name: "account_id", - Required: false, - }, - { - Name: "region", - Required: false, - }, + StringIdentityAttribute("account_id", false), + StringIdentityAttribute("region", false), }, } @@ -336,10 +390,12 @@ func WithIdentityDuplicateAttrs(attrs ...string) IdentityOptsFunc { } } -// WithV6_0SDKv2Fix is for use ONLY for resource types affected by the v6.0 SDKv2 existing resource issue -func WithV6_0SDKv2Fix() IdentityOptsFunc { +// WithMutableIdentity is for use for resource types that normally have a mutable identity +// If Identity must be mutable to fix potential errors, use WithIdentityFix() +func WithMutableIdentity() IdentityOptsFunc { return func(opts *Identity) { opts.IsMutable = true + opts.IsSetOnUpdate = true } } @@ -350,6 +406,13 @@ func WithIdentityFix() IdentityOptsFunc { } } +// WithV6_0SDKv2Fix is for use ONLY for resource types affected by the v6.0 SDKv2 existing resource issue +func WithV6_0SDKv2Fix() IdentityOptsFunc { + return func(opts *Identity) { + opts.IsMutable = true + } +} + type ImportIDParser interface { Parse(id string) (string, map[string]string, error) } @@ -371,5 +434,10 @@ type SDKv2ImportID interface { type SDKv2Import struct { WrappedImport bool + CustomImport bool ImportID SDKv2ImportID // Multi-Parameter } + +type SDKv2Tagger interface { + SetTagsSpec(tags unique.Handle[ServicePackageResourceTags]) +} diff --git a/internal/types/timestamp/timestamp.go b/internal/types/timestamp/timestamp.go index e33b72f2d57b..1578abc5667d 100644 --- a/internal/types/timestamp/timestamp.go +++ b/internal/types/timestamp/timestamp.go @@ -53,7 +53,7 @@ func (t Timestamp) ValidateOnceAWeekWindowFormat() error { func (t Timestamp) ValidateUTCFormat() error { _, err := time.Parse(time.RFC3339, t.String()) if err != nil { - return fmt.Errorf("must be in RFC3339 time format %q. Example: %s", time.RFC3339, err) + return fmt.Errorf("must be in RFC3339 time format %q: %w", time.RFC3339, err) } return nil diff --git a/internal/vcr/retry.go b/internal/vcr/retry.go index f52f336df8d9..ece73000fd9c 100644 --- a/internal/vcr/retry.go +++ b/internal/vcr/retry.go @@ -8,14 +8,15 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/hashicorp/terraform-provider-aws/internal/errs" ) // InteractionNotFoundRetryableFunc is a retryable function to augment retry behavior for AWS service clients // when VCR testing is enabled -var InteractionNotFoundRetryableFunc = func(err error) aws.Ternary { +var InteractionNotFoundRetryableFunc = retry.IsErrorRetryableFunc(func(err error) aws.Ternary { if errs.IsA[*url.Error](err) && strings.Contains(err.Error(), "requested interaction not found") { return aws.FalseTernary } return aws.UnknownTernary // Delegate to configured Retryer. -} +}) diff --git a/internal/verify/validate.go b/internal/verify/validate.go index 5406bccd2fc3..5238ab7d18b5 100644 --- a/internal/verify/validate.go +++ b/internal/verify/validate.go @@ -109,7 +109,7 @@ func ValidARNCheck(f ...ARNCheckFunc) schema.SchemaValidateFunc { parsedARN, err := arn.Parse(value) if err != nil { - errors = append(errors, fmt.Errorf("%q (%s) is an invalid ARN: %s", k, value, err)) + errors = append(errors, fmt.Errorf("%q (%s) is an invalid ARN: %w", k, value, err)) return ws, errors } @@ -224,7 +224,7 @@ func ValidIAMPolicyJSON(v any, k string) (ws []string, errors []error) { } if err := basevalidation.JSONNoDuplicateKeys(value); err != nil { - errors = append(errors, fmt.Errorf("%q contains duplicate JSON keys: %s", k, err)) + errors = append(errors, fmt.Errorf("%q contains duplicate JSON keys: %w", k, err)) return //nolint:nakedret // Naked return due to legacy, non-idiomatic Go function, error handling } @@ -330,7 +330,7 @@ func ValidLaunchTemplateID(v any, k string) (ws []string, errors []error) { errors = append(errors, fmt.Errorf("%q cannot be longer than 255 characters", k)) } else if !regexache.MustCompile(`^lt\-[0-9a-z]+$`).MatchString(value) { errors = append(errors, fmt.Errorf( - "%q must begin with 'lt-' and be comprised of only alphanumeric characters: %v", k, value)) + "%q must begin with 'lt-' and only contain alphanumeric characters: %v", k, value)) } return } @@ -411,36 +411,16 @@ func ValidRegionName(v any, k string) (ws []string, errors []error) { func ValidStringIsJSONOrYAML(v any, k string) (ws []string, errors []error) { if looksLikeJSONString(v) { if _, err := structure.NormalizeJsonString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %w", k, err)) } } else { if _, err := checkYAMLString(v); err != nil { - errors = append(errors, fmt.Errorf("%q contains an invalid YAML: %s", k, err)) + errors = append(errors, fmt.Errorf("%q contains an invalid YAML: %w", k, err)) } } return } -// ValidTypeStringNullableFloat provides custom error messaging for TypeString floats -// Some arguments require a floating point value or an unspecified, empty field. -func ValidTypeStringNullableFloat(v any, k string) (ws []string, es []error) { - value, ok := v.(string) - if !ok { - es = append(es, fmt.Errorf("expected type of %s to be string", k)) - return - } - - if value == "" { - return - } - - if _, err := strconv.ParseFloat(value, 64); err != nil { - es = append(es, fmt.Errorf("%s: cannot parse '%s' as float: %s", k, value, err)) - } - - return -} - // ValidUTCTimestamp validates a string in UTC Format required by APIs including: // https://docs.aws.amazon.com/iot/latest/apireference/API_CloudwatchMetricAction.html // https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBInstanceToPointInTime.html @@ -465,7 +445,7 @@ func ValidDuration(v any, k string) (ws []string, errors []error) { value := v.(string) duration, err := time.ParseDuration(value) if err != nil { - errors = append(errors, fmt.Errorf("%q cannot be parsed as a duration: %s", k, err)) + errors = append(errors, fmt.Errorf("%q cannot be parsed as a duration: %w", k, err)) } if duration < 0 { errors = append(errors, fmt.Errorf("%q must be greater than zero", k)) diff --git a/internal/verify/validate_test.go b/internal/verify/validate_test.go index 0fb200e1eef0..8c67824dfd43 100644 --- a/internal/verify/validate_test.go +++ b/internal/verify/validate_test.go @@ -4,7 +4,6 @@ package verify import ( - "regexp" "strings" "testing" @@ -93,59 +92,6 @@ func TestValid4ByteASNString(t *testing.T) { } } -func TestValidTypeStringNullableFloat(t *testing.T) { - t.Parallel() - - testCases := []struct { - val any - expectedErr *regexp.Regexp - }{ - { - val: "", - }, - { - val: "0", - }, - { - val: "1", - }, - { - val: "42.0", - }, - { - val: "threeve", - expectedErr: regexache.MustCompile(`cannot parse`), - }, - } - - matchErr := func(errs []error, r *regexp.Regexp) bool { - // err must match one provided - for _, err := range errs { - if r.MatchString(err.Error()) { - return true - } - } - - return false - } - - for i, tc := range testCases { - _, errs := ValidTypeStringNullableFloat(tc.val, "test_property") - - if len(errs) == 0 && tc.expectedErr == nil { - continue - } - - if len(errs) != 0 && tc.expectedErr == nil { - t.Fatalf("expected test case %d to produce no errors, got %v", i, errs) - } - - if !matchErr(errs, tc.expectedErr) { - t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) - } - } -} - func TestValidAccountID(t *testing.T) { t.Parallel() @@ -230,6 +176,7 @@ func TestValidARN(t *testing.T) { "arn:aws-us-gov:s3:::bucket/object", // lintignore:AWSAT005 // GovCloud S3 ARN "arn:aws:cloudwatch::cw0000000000:alarm:my-alarm", // lintignore:AWSAT005 // CloudWatch Alarm "arn:aws:imagebuilder:eu-central-1:aws-marketplace:component/crowdstrike-falcon-install-linux-prod-nhzsem4gwwfja/1.2.2/1", // lintignore:AWSAT003,AWSAT005 // EC2 image builder marketplace subscription ARN + "arn:aws-eusc:acm-pca:eusc-de-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012", // lintignore:AWSAT003,AWSAT005 // ESC ACMPCA ARN } for _, v := range validNames { _, errors := ValidARN(v, "arn") diff --git a/internal/verify/verify.go b/internal/verify/verify.go index 2b1d69fa3a09..8f1b7ed5b632 100644 --- a/internal/verify/verify.go +++ b/internal/verify/verify.go @@ -4,7 +4,7 @@ package verify import ( - "gopkg.in/yaml.v3" + tfyaml "github.com/hashicorp/terraform-provider-aws/internal/yaml" ) const UUIDRegexPattern = `[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[ab89][0-9a-f]{3}-[0-9a-f]{12}` @@ -13,15 +13,15 @@ const UUIDRegexPattern = `[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[ab89][0-9a-f // the YAML parser. Returns either a parsing // error or original YAML string. func checkYAMLString(yamlString any) (string, error) { - var y any - if yamlString == nil || yamlString.(string) == "" { return "", nil } + var y any + s := yamlString.(string) - err := yaml.Unmarshal([]byte(s), &y) + err := tfyaml.DecodeFromString(s, &y) return s, err } diff --git a/internal/yaml/decode.go b/internal/yaml/decode.go index bc9082bd898b..d65abb678cc9 100644 --- a/internal/yaml/decode.go +++ b/internal/yaml/decode.go @@ -8,7 +8,7 @@ import ( "io" "strings" - "gopkg.in/yaml.v3" + yaml "github.com/goccy/go-yaml" ) // DecodeFromBytes decodes (unmarshals) the given byte slice, containing valid YAML, into `to`. diff --git a/mkdocs.yml b/mkdocs.yml index b2f6648238c7..3cde35fc4e52 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -32,13 +32,20 @@ nav: - How We Prioritize: prioritization.md - Developer Reference: - Acceptance Test Environment Variables: acc-test-environment-variables.md + - AI Agents: ai-agents.md + - AI Agent Guides: + - ARN-Based Resource Identity: ai-agent-guides/arn-based-resource-identity.md + - Parameterized Resource Identity: ai-agent-guides/parameterized-resource-identity.md + - Smarterr: ai-agent-guides/smarterr.md - AWS SDK Go Base: aws-sdk-go-base.md - Core Services: core-services.md - Data Handling and Conversion: data-handling-and-conversion.md - Debugging: debugging.md - Dependency Updates: dependency-updates.md - Design Decision Log: design-decision-log.md + - Enhanced Region Support: enhanced-region-support.md - Error Handling: error-handling.md + - Go-VCR: go-vcr.md - ID Attributes: id-attributes.md - Makefile Cheat Sheet: makefile-cheat-sheet.md - Naming Standards: naming.md diff --git a/names/consts_gen.go b/names/consts_gen.go index aa6170eb2602..065cdebdd073 100644 --- a/names/consts_gen.go +++ b/names/consts_gen.go @@ -7,6 +7,7 @@ const ( AMP = "amp" APIGateway = "apigateway" APIGatewayV2 = "apigatewayv2" + ARCRegionSwitch = "arcregionswitch" AccessAnalyzer = "accessanalyzer" Account = "account" Amplify = "amplify" @@ -30,6 +31,7 @@ const ( Batch = "batch" Bedrock = "bedrock" BedrockAgent = "bedrockagent" + BedrockAgentCore = "bedrockagentcore" Billing = "billing" Budgets = "budgets" CE = "ce" @@ -168,6 +170,7 @@ const ( NetworkMonitor = "networkmonitor" Notifications = "notifications" NotificationsContacts = "notificationscontacts" + ODB = "odb" ObservabilityAccessManager = "oam" OpenSearch = "opensearch" OpenSearchIngestion = "osis" @@ -208,6 +211,7 @@ const ( S3Control = "s3control" S3Outposts = "s3outposts" S3Tables = "s3tables" + S3Vectors = "s3vectors" SES = "ses" SESV2 = "sesv2" SFN = "sfn" @@ -249,6 +253,7 @@ const ( WAFRegional = "wafregional" WAFV2 = "wafv2" WellArchitected = "wellarchitected" + WorkMail = "workmail" WorkSpaces = "workspaces" WorkSpacesWeb = "workspacesweb" XRay = "xray" @@ -262,6 +267,7 @@ const ( AMPServiceID = "amp" APIGatewayServiceID = "API Gateway" APIGatewayV2ServiceID = "ApiGatewayV2" + ARCRegionSwitchServiceID = "ARC Region Switch" AccessAnalyzerServiceID = "AccessAnalyzer" AccountServiceID = "Account" AmplifyServiceID = "Amplify" @@ -285,6 +291,7 @@ const ( BatchServiceID = "Batch" BedrockServiceID = "Bedrock" BedrockAgentServiceID = "Bedrock Agent" + BedrockAgentCoreServiceID = "Bedrock AgentCore Control" BillingServiceID = "Billing" BudgetsServiceID = "Budgets" CEServiceID = "Cost Explorer" @@ -423,6 +430,7 @@ const ( NetworkMonitorServiceID = "NetworkMonitor" NotificationsServiceID = "notifications" NotificationsContactsServiceID = "notificationscontacts" + ODBServiceID = "ODB" ObservabilityAccessManagerServiceID = "OAM" OpenSearchServiceID = "OpenSearch" OpenSearchIngestionServiceID = "OSIS" @@ -463,6 +471,7 @@ const ( S3ControlServiceID = "S3 Control" S3OutpostsServiceID = "S3Outposts" S3TablesServiceID = "S3Tables" + S3VectorsServiceID = "S3Vectors" SESServiceID = "SES" SESV2ServiceID = "SESv2" SFNServiceID = "SFN" @@ -504,6 +513,7 @@ const ( WAFRegionalServiceID = "WAF Regional" WAFV2ServiceID = "WAFV2" WellArchitectedServiceID = "WellArchitected" + WorkMailServiceID = "WorkMail" WorkSpacesServiceID = "WorkSpaces" WorkSpacesWebServiceID = "WorkSpaces Web" XRayServiceID = "XRay" diff --git a/names/data/lookup.go b/names/data/lookup.go index f646d66b0c8a..9e3432f85d33 100644 --- a/names/data/lookup.go +++ b/names/data/lookup.go @@ -8,7 +8,7 @@ import "fmt" func LookupService(name string) (result ServiceRecord, err error) { serviceData, err := ReadAllServiceData() if err != nil { - return result, fmt.Errorf("error reading service data: %s", err) + return result, fmt.Errorf("error reading service data: %w", err) } for _, s := range serviceData { diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index d104ebbb58cd..9ec22f8d3924 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -648,6 +648,38 @@ service "appsync" { brand = "AWS" } +service "arcregionswitch" { + cli_v2_command { + aws_cli_v2_command = "arc-region-switch" + aws_cli_v2_command_no_dashes = "arcregionswitch" + } + + sdk { + id = "ARC Region Switch" + arn_namespace = "arcregionswitch" + } + + names { + provider_name_upper = "ARCRegionSwitch" + human_friendly = "Application Resilience Controller Region Switch" + } + + endpoint_info { + endpoint_api_call = "ListPlans" + endpoint_region_overrides = { + "aws" = "us-east-1" + } + } + + resource_prefix { + correct = "aws_arcregionswitch_" + } + + provider_package_correct = "arcregionswitch" + doc_prefix = ["arcregionswitch_"] + brand = "AWS" +} + service "athena" { sdk { id = "Athena" @@ -875,6 +907,39 @@ service "bedrockagent" { brand = "Amazon" } +service "bedrockagentcore" { + cli_v2_command { + aws_cli_v2_command = "bedrock-agentcore-control" + aws_cli_v2_command_no_dashes = "bedrockagentcorecontrol" + } + + go_packages { + v2_package = "bedrockagentcorecontrol" + } + + sdk { + id = "Bedrock AgentCore Control" + arn_namespace = "bedrock-agentcore" + } + + names { + provider_name_upper = "BedrockAgentCore" + human_friendly = "Bedrock AgentCore" + } + + endpoint_info { + endpoint_api_call = "ListAgentRuntimes" + } + + resource_prefix { + correct = "aws_bedrockagentcore_" + } + + provider_package_correct = "bedrockagentcore" + doc_prefix = ["bedrockagentcore_"] + brand = "Amazon" +} + service "bcmdataexports" { sdk { id = "BCM Data Exports" @@ -2068,13 +2133,21 @@ service "cognitoidp" { } resource_prefix { - actual = "aws_cognito_(identity_provider|resource|user|risk)" + actual = "aws_cognito_(identity_provider|log|managed_login_branding|managed_user|resource|risk|user)" correct = "aws_cognitoidp_" } provider_package_correct = "cognitoidp" - doc_prefix = ["cognito_identity_provider", "cognito_managed_user", "cognito_resource_", "cognito_user", "cognito_risk"] - brand = "AWS" + doc_prefix = [ + "cognito_identity_provider", + "cognito_log", + "cognito_managed_login_branding", + "cognito_managed_user", + "cognito_resource_", + "cognito_risk", + "cognito_user" + ] + brand = "AWS" } service "cognitosync" { @@ -5997,6 +6070,29 @@ service "oam" { brand = "AWS" } +service "odb" { + sdk { + id = "ODB" + arn_namespace = "odb" + } + + names { + provider_name_upper = "ODB" + human_friendly = "Oracle Database@AWS" + } + + endpoint_info { + endpoint_api_call = "ListCloudExadataInfrastructures" + } + + resource_prefix { + correct = "aws_odb_" + } + + doc_prefix = ["odb_"] + brand = "AWS" +} + service "opensearch" { go_packages { v1_package = "opensearchservice" @@ -7341,6 +7437,29 @@ service "s3tables" { brand = "Amazon" } +service "s3vectors" { + sdk { + id = "S3Vectors" + arn_namespace = "s3vectors" + } + + names { + provider_name_upper = "S3Vectors" + human_friendly = "S3 Vectors" + } + + endpoint_info { + endpoint_api_call = "ListVectorBuckets" + } + + resource_prefix { + correct = "aws_s3vectors_" + } + + doc_prefix = ["s3vectors_"] + brand = "Amazon" +} + service "glacier" { sdk { id = "Glacier" @@ -8855,6 +8974,11 @@ service "workmail" { human_friendly = "WorkMail" } + endpoint_info { + endpoint_api_call = "ListResources" + endpoint_api_params = "OrganizationId: aws.String(\"m-12345678901234567890123456789012\")" + } + resource_prefix { correct = "aws_workmail_" } @@ -8862,7 +8986,6 @@ service "workmail" { provider_package_correct = "workmail" doc_prefix = ["workmail_"] brand = "Amazon" - not_implemented = true } service "workmailmessageflow" { diff --git a/names/names.go b/names/names.go index 706dde0bcd65..9316b49a8c1c 100644 --- a/names/names.go +++ b/names/names.go @@ -66,6 +66,7 @@ const ( ComputeOptimizerEndpointID = "compute-optimizer" ConfigServiceEndpointID = "config" ConnectEndpointID = "connect" + ControlTowerEndpointID = "controltower" DataExchangeEndpointID = "dataexchange" DataPipelineEndpointID = "datapipeline" DataZoneEndpointID = "datazone" @@ -295,5 +296,9 @@ func HumanFriendly(service string) (string, error) { } const ( - TopLevelRegionAttributeDescription = `Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference).` + ResourceTopLevelRegionAttributeDescription = `Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). ` + topLevelRegionDefaultDescription + ListResourceTopLevelRegionAttributeDescription = `Region to [query](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) for resources of this type. ` + topLevelRegionDefaultDescription + ActionTopLevelRegionAttributeDescription = `Region where this action will be [executed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). ` + topLevelRegionDefaultDescription + + topLevelRegionDefaultDescription = `Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference).` ) diff --git a/skaff/datasource/datasource.gtpl b/skaff/datasource/datasource.gtpl index 9087bea14d6b..f8521721d68e 100644 --- a/skaff/datasource/datasource.gtpl +++ b/skaff/datasource/datasource.gtpl @@ -57,6 +57,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" {{- if .IncludeTags }} tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" {{- end }} @@ -111,7 +112,7 @@ func DataSource{{ .DataSource }}() *schema.Resource { // https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema#Schema {{- end }} Schema: map[string]*schema.Schema{ - "arn": { {{- if .IncludeComments }} // TIP: Many, but not all, data sources have an `arn` attribute.{{- end }} + names.AttrARN: { {{- if .IncludeComments }} // TIP: Many, but not all, data sources have an `arn` attribute.{{- end }} Type: schema.TypeString, Computed: true, }, @@ -138,7 +139,7 @@ func DataSource{{ .DataSource }}() *schema.Resource { }, }, {{- if .IncludeTags }} - "tags": tftags.TagsSchemaComputed(), {{- if .IncludeComments }} // TIP: Many, but not all, data sources have `tags` attributes.{{- end }} + names.AttrTags: tftags.TagsSchemaComputed(), {{- if .IncludeComments }} // TIP: Many, but not all, data sources have `tags` attributes.{{- end }} {{- end }} }, } @@ -174,11 +175,12 @@ func dataSource{{ .DataSource }}Read(ctx context.Context, d *schema.ResourceData // elements. However, a data source will have perhaps one or a few arguments // that are key to finding the relevant information, such as 'name' below. {{- end }} - name := d.Get("name").(string) + name := d.Get(names.AttrName).(string) out, err := find{{ .DataSource }}ByName(ctx, conn, name) if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionReading, DSName{{ .DataSource }}, name, err) + smerr.Append(ctx, diags, err, smerr.ID, name) + return diags } {{ if .IncludeComments }} // TIP: -- 3. Set the ID @@ -196,7 +198,7 @@ func dataSource{{ .DataSource }}Read(ctx context.Context, d *schema.ResourceData // // For simple data types (i.e., schema.TypeString, schema.TypeBool, // schema.TypeInt, and schema.TypeFloat), a simple Set call (e.g., - // d.Set("arn", out.Arn) is sufficient. No error or nil checking is + // d.Set(names.AttrARN, out.Arn) is sufficient. No error or nil checking is // necessary. // // However, there are some situations where more handling is needed. @@ -206,27 +208,30 @@ func dataSource{{ .DataSource }}Read(ctx context.Context, d *schema.ResourceData // is equivalent to what is already set. In that case, you may check if // it is equivalent before setting the different JSON. {{- end }} - d.Set("arn", out.ARN) - d.Set("name", out.Name) + d.Set(names.AttrARN, out.ARN) + d.Set(names.AttrName, out.Name) {{ if .IncludeComments }} // TIP: Setting a complex type. // For more information, see: // https://hashicorp.github.io/terraform-provider-aws/data-handling-and-conversion/ {{- end }} if err := d.Set("complex_argument", flattenComplexArguments(out.ComplexArguments)); err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionSetting, DSName{{ .DataSource }}, d.Id(), err) + smerr.Append(ctx, diags, err, smerr.ID, d.Id()) + return diags } {{ if .IncludeComments }} // TIP: Setting a JSON string to avoid errorneous diffs. {{- end }} p, err := verify.SecondJSONUnlessEquivalent(d.Get("policy").(string), aws.ToString(out.Policy)) if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionSetting, DSName{{ .DataSource }}, d.Id(), err) + smerr.Append(ctx, diags, err, smerr.ID, d.Id()) + return diags } p, err = structure.NormalizeJsonString(p) if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionReading, DSName{{ .DataSource }}, d.Id(), err) + smerr.Append(ctx, diags, err, smerr.ID, d.Id()) + return diags } d.Set("policy", p) @@ -241,8 +246,9 @@ func dataSource{{ .DataSource }}Read(ctx context.Context, d *schema.ResourceData {{- if .IncludeTags }} ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) - if err := d.Set("tags", KeyValueTags(out.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionSetting, DSName{{ .DataSource }}, d.Id(), err) + if err := d.Set(names.AttrTags, KeyValueTags(out.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + smerr.Append(ctx, diags, err, smerr.ID, d.Id()) + return diags } {{- end }} {{ if .IncludeComments }} diff --git a/skaff/datasource/datasourcefw.gtpl b/skaff/datasource/datasourcefw.gtpl index 9ff7327d24d0..24241624d53c 100644 --- a/skaff/datasource/datasourcefw.gtpl +++ b/skaff/datasource/datasourcefw.gtpl @@ -44,10 +44,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" {{- if .IncludeTags }} tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" {{- end }} @@ -174,7 +174,7 @@ func (d *dataSource{{ .DataSource }}) Read(ctx context.Context, req datasource.R // TIP: -- 2. Fetch the config {{- end }} var data dataSource{{ .DataSource }}Model - resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.Config.Get(ctx, &data)) if resp.Diagnostics.HasError() { return } @@ -183,10 +183,7 @@ func (d *dataSource{{ .DataSource }}) Read(ctx context.Context, req datasource.R {{- end }} out, err := find{{ .DataSource }}ByName(ctx, conn, data.Name.ValueString()) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionReading, DSName{{ .DataSource }}, data.Name.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, data.Name.String()) return } @@ -194,7 +191,7 @@ func (d *dataSource{{ .DataSource }}) Read(ctx context.Context, req datasource.R // TIP: -- 4. Set the ID, arguments, and attributes // Using a field name prefix allows mapping fields such as `{{ .DataSource }}Id` to `ID` {{- end }} - resp.Diagnostics.Append(flex.Flatten(ctx, out, &data, flex.WithFieldNamePrefix("{{ .DataSource }}"))...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &data, flex.WithFieldNamePrefix("{{ .DataSource }}")), smerr.ID, data.Name.String()) if resp.Diagnostics.HasError() { return } @@ -211,7 +208,7 @@ func (d *dataSource{{ .DataSource }}) Read(ctx context.Context, req datasource.R {{ if .IncludeComments -}} // TIP: -- 6. Set the state {{- end }} - resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, resp.State.Set(ctx, &data), smerr.ID, data.Name.String()) } {{ if .IncludeComments }} diff --git a/skaff/go.mod b/skaff/go.mod index 1ed49c886bb1..ddd4d4341da5 100644 --- a/skaff/go.mod +++ b/skaff/go.mod @@ -1,11 +1,11 @@ module github.com/hashicorp/terraform-provider-aws/skaff -go 1.24.4 +go 1.24.8 require ( github.com/YakDriver/regexache v0.24.0 github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 - github.com/spf13/cobra v1.9.1 + github.com/spf13/cobra v1.10.1 ) require ( @@ -13,16 +13,16 @@ require ( github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 // indirect + github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/zclconf/go-cty v1.16.3 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/text v0.27.0 // indirect - golang.org/x/tools v0.34.0 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/tools v0.38.0 // indirect ) replace github.com/hashicorp/terraform-provider-aws => ../ diff --git a/skaff/go.sum b/skaff/go.sum index 2b68792ef7fd..54d60c58fd14 100644 --- a/skaff/go.sum +++ b/skaff/go.sum @@ -5,38 +5,38 @@ github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 h1:81+kWbE1yErFBMjME0I5k3x3kojjKsWtPYHEAutoPow= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65/go.mod h1:WtMzv9T++tfWVea+qB2MXoaqxw33S8bpJslzUike2mQ= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 h1:IS4mjtvkLHXWI5yn/t9ILOUiBqPePMFaO4IRh5pcMk4= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67/go.mod h1:l81jrdpcZSWUsJs4BGFfdGScefSYEFQRLMQRG3uyvT0= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= -github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/skaff/resource/resource.gtpl b/skaff/resource/resource.gtpl index 72e1564afc2a..607ec583428b 100644 --- a/skaff/resource/resource.gtpl +++ b/skaff/resource/resource.gtpl @@ -55,9 +55,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/YakDriver/smarterr" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" {{- if .IncludeTags }} tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" {{- end }} @@ -164,7 +165,7 @@ func Resource{{ .Resource }}() *schema.Resource { // https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema#Schema {{- end }} Schema: map[string]*schema.Schema{ - "arn": { {{- if .IncludeComments }} // TIP: Many, but not all, resources have an `arn` attribute.{{- end }} + names.AttrARN: { {{- if .IncludeComments }} // TIP: Many, but not all, resources have an `arn` attribute.{{- end }} Type: schema.TypeString, Computed: true, }, @@ -231,8 +232,8 @@ func resource{{ .Resource }}Create(ctx context.Context, d *schema.ResourceData, // TIP: Mandatory or fields that will always be present can be set when // you create the Input structure. (Replace these with real fields.) {{- end }} - {{ .Resource }}Name: aws.String(d.Get("name").(string)), - {{ .Resource }}Type: aws.String(d.Get("type").(string)), + {{ .Resource }}Name: aws.String(d.Get(names.AttrName).(string)), + {{ .Resource }}Type: aws.String(d.Get(names.AttrType).(string)), {{ if .IncludeComments }} // TIP: Not all resources support tags and tags don't always make sense. If // your resource doesn't need tags, you can remove the tags lines here and @@ -268,11 +269,11 @@ func resource{{ .Resource }}Create(ctx context.Context, d *schema.ResourceData, // TIP: Since d.SetId() has not been called yet, you cannot use d.Id() // in error messages at this point. {{- end }} - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionCreating, ResName{{ .Resource }}, d.Get("name").(string), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Get(names.AttrName).(string)) } if out == nil || out.{{ .Resource }} == nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionCreating, ResName{{ .Resource }}, d.Get("name").(string), errors.New("empty output")) + return smerr.Append(ctx, diags, errors.New("empty output"), smerr.ID, d.Get(names.AttrName).(string)) } {{ if .IncludeComments }} // TIP: -- 4. Set the minimum arguments and/or attributes for the Read function to @@ -283,7 +284,7 @@ func resource{{ .Resource }}Create(ctx context.Context, d *schema.ResourceData, // TIP: -- 5. Use a waiter to wait for create to complete {{- end }} if _, err := wait{{ .Resource }}Created(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionWaitingForCreation, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: -- 6. Call the Read function in the Create return @@ -325,14 +326,14 @@ func resource{{ .Resource }}Read(ctx context.Context, d *schema.ResourceData, me } if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionReading, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: -- 4. Set the arguments and attributes // // For simple data types (i.e., schema.TypeString, schema.TypeBool, // schema.TypeInt, and schema.TypeFloat), a simple Set call (e.g., - // d.Set("arn", out.Arn) is sufficient. No error or nil checking is + // d.Set(names.AttrARN, out.Arn) is sufficient. No error or nil checking is // necessary. // // However, there are some situations where more handling is needed. @@ -342,8 +343,8 @@ func resource{{ .Resource }}Read(ctx context.Context, d *schema.ResourceData, me // is equivalent to what is already set. In that case, you may check if // it is equivalent before setting the different JSON. {{- end }} - d.Set("arn", out.Arn) - d.Set("name", out.Name) + d.Set(names.AttrARN, out.Arn) + d.Set(names.AttrName, out.Name) {{ if .IncludeComments }} // TIP: Setting a complex type. // For more information, see: @@ -352,19 +353,19 @@ func resource{{ .Resource }}Read(ctx context.Context, d *schema.ResourceData, me // https://hashicorp.github.io/terraform-provider-aws/data-handling-and-conversion/#root-typeset-of-resource-and-aws-list-of-structure {{- end }} if err := d.Set("complex_argument", flattenComplexArguments(out.ComplexArguments)); err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionSetting, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: Setting a JSON string to avoid errorneous diffs. {{- end }} p, err := verify.SecondJSONUnlessEquivalent(d.Get("policy").(string), aws.ToString(out.Policy)) if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionSetting, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } p, err = structure.NormalizeJsonString(p) if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionSetting, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } d.Set("policy", p) @@ -435,13 +436,13 @@ func resource{{ .Resource }}Update(ctx context.Context, d *schema.ResourceData, log.Printf("[DEBUG] Updating {{ .Service }} {{ .Resource }} (%s): %#v", d.Id(), in) out, err := conn.Update{{ .Resource }}(ctx, in) if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionUpdating, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: -- 4. Use a waiter to wait for update to complete {{- end }} if _, err := wait{{ .Resource }}Updated(ctx, conn, aws.ToString(out.OperationId), d.Timeout(schema.TimeoutUpdate)); err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionWaitingForUpdate, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: -- 5. Call the Read function in the Update return @@ -491,13 +492,13 @@ func resource{{ .Resource }}Delete(ctx context.Context, d *schema.ResourceData, return diags } if err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionDeleting, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: -- 4. Use a waiter to wait for delete to complete {{- end }} if _, err := wait{{ .Resource }}Deleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return create.AppendDiagError(diags, names.{{ .Service }}, create.ErrActionWaitingForDeletion, ResName{{ .Resource }}, d.Id(), err) + return smerr.Append(ctx, diags, err, smerr.ID, d.Id()) } {{ if .IncludeComments }} // TIP: -- 5. Return diags @@ -543,10 +544,10 @@ func wait{{ .Resource }}Created(ctx context.Context, conn *{{ .ServiceLower }}.C outputRaw, err := stateConf.WaitForStateContext(ctx) if out, ok := outputRaw.(*{{ .ServiceLower }}.{{ .Resource }}); ok { - return out, err + return out, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } {{ if .IncludeComments }} // TIP: It is easier to determine whether a resource is updated for some @@ -566,10 +567,10 @@ func wait{{ .Resource }}Updated(ctx context.Context, conn *{{ .ServiceLower }}.C outputRaw, err := stateConf.WaitForStateContext(ctx) if out, ok := outputRaw.(*{{ .ServiceLower }}.{{ .Resource }}); ok { - return out, err + return out, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } {{ if .IncludeComments }} // TIP: A deleted waiter is almost like a backwards created waiter. There may @@ -585,10 +586,10 @@ func wait{{ .Resource }}Deleted(ctx context.Context, conn *{{ .ServiceLower }}.C outputRaw, err := stateConf.WaitForStateContext(ctx) if out, ok := outputRaw.(*{{ .ServiceLower }}.{{ .Resource }}); ok { - return out, err + return out, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } {{ if .IncludeComments }} // TIP: ==== STATUS ==== @@ -607,7 +608,7 @@ func status{{ .Resource }}(ctx context.Context, conn *{{ .ServiceLower }}.Client } if err != nil { - return nil, "", err + return nil, "", smarterr.NewError(err) } return out, aws.ToString(out.Status), nil @@ -627,17 +628,17 @@ func find{{ .Resource }}ByID(ctx context.Context, conn *{{ .ServiceLower }}.Clie out, err := conn.Get{{ .Resource }}(ctx, in) if errs.IsA[*types.ResourceNotFoundException](err){ - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: in, - } + }) } if err != nil { - return nil, err + return nil, smarterr.NewError(err) } if out == nil || out.{{ .Resource }} == nil { - return nil, tfresource.NewEmptyResultError(in) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(in)) } return out.{{ .Resource }}, nil diff --git a/skaff/resource/resourcefw.gtpl b/skaff/resource/resourcefw.gtpl index 23f658a21721..a2f534395ac3 100644 --- a/skaff/resource/resourcefw.gtpl +++ b/skaff/resource/resourcefw.gtpl @@ -60,6 +60,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" + "github.com/YakDriver/smarterr" "github.com/hashicorp/terraform-provider-aws/internal/sweep" sweepfw "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" {{- if .IncludeTags }} @@ -262,7 +264,7 @@ func (r *resource{{ .Resource }}) Create(ctx context.Context, req resource.Creat // TIP: -- 2. Fetch the plan {{- end }} var plan resource{{ .Resource }}Model - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.Plan.Get(ctx, &plan)) if resp.Diagnostics.HasError() { return } @@ -274,7 +276,7 @@ func (r *resource{{ .Resource }}) Create(ctx context.Context, req resource.Creat {{ if .IncludeComments -}} // TIP: Using a field name prefix allows mapping fields such as `ID` to `{{ .Resource }}Id` {{- end }} - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input, flex.WithFieldNamePrefix("{{ .Resource }}"))...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Expand(ctx, plan, &input, flex.WithFieldNamePrefix("{{ .Resource }}"))) if resp.Diagnostics.HasError() { return } @@ -291,24 +293,18 @@ func (r *resource{{ .Resource }}) Create(ctx context.Context, req resource.Creat // TIP: Since ID has not been set yet, you cannot use plan.ID.String() // in error messages at this point. {{- end }} - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionCreating, ResName{{ .Resource }}, plan.Name.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, plan.Name.String()) return } if out == nil || out.{{ .Resource }} == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionCreating, ResName{{ .Resource }}, plan.Name.String(), nil), - errors.New("empty output").Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, errors.New("empty output"), smerr.ID, plan.Name.String()) return } {{ if .IncludeComments -}} // TIP: -- 5. Using the output from the create function, set attributes {{- end }} - resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &plan)) if resp.Diagnostics.HasError() { return } @@ -319,16 +315,13 @@ func (r *resource{{ .Resource }}) Create(ctx context.Context, req resource.Creat createTimeout := r.CreateTimeout(ctx, plan.Timeouts) _, err = wait{{ .Resource }}Created(ctx, conn, plan.ID.ValueString(), createTimeout) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionWaitingForCreation, ResName{{ .Resource }}, plan.Name.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, plan.Name.String()) return } {{ if .IncludeComments }} // TIP: -- 7. Save the request plan to response state {{- end }} - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, resp.State.Set(ctx, plan)) } func (r *resource{{ .Resource }}) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { @@ -353,7 +346,7 @@ func (r *resource{{ .Resource }}) Read(ctx context.Context, req resource.ReadReq // TIP: -- 2. Fetch the state {{- end }} var state resource{{ .Resource }}Model - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.State.Get(ctx, &state)) if resp.Diagnostics.HasError() { return } @@ -371,23 +364,20 @@ func (r *resource{{ .Resource }}) Read(ctx context.Context, req resource.ReadReq return } if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionReading, ResName{{ .Resource }}, state.ID.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, state.ID.String()) return } {{ if .IncludeComments }} // TIP: -- 5. Set the arguments and attributes {{- end }} - resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &state)) if resp.Diagnostics.HasError() { return } {{ if .IncludeComments }} // TIP: -- 6. Set the state {{- end }} - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, resp.State.Set(ctx, &state)) } func (r *resource{{ .Resource }}) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { @@ -422,8 +412,8 @@ func (r *resource{{ .Resource }}) Update(ctx context.Context, req resource.Updat // TIP: -- 2. Fetch the plan {{- end }} var plan, state resource{{ .Resource }}Model - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.Plan.Get(ctx, &plan)) + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.State.Get(ctx, &state)) if resp.Diagnostics.HasError() { return } @@ -431,14 +421,14 @@ func (r *resource{{ .Resource }}) Update(ctx context.Context, req resource.Updat // TIP: -- 3. Get the difference between the plan and state, if any {{- end }} diff, d := flex.Diff(ctx, plan, state) - resp.Diagnostics.Append(d...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, d) if resp.Diagnostics.HasError() { return } if diff.HasChanges() { var input {{ .SDKPackage }}.Update{{ .Resource }}Input - resp.Diagnostics.Append(flex.Expand(ctx, plan, &input, flex.WithFieldNamePrefix("Test"))...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Expand(ctx, plan, &input, flex.WithFieldNamePrefix("Test"))) if resp.Diagnostics.HasError() { return } @@ -447,23 +437,17 @@ func (r *resource{{ .Resource }}) Update(ctx context.Context, req resource.Updat {{- end }} out, err := conn.Update{{ .Resource }}(ctx, &input) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionUpdating, ResName{{ .Resource }}, plan.ID.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, plan.ID.String()) return } if out == nil || out.{{ .Resource }} == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionUpdating, ResName{{ .Resource }}, plan.ID.String(), nil), - errors.New("empty output").Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, errors.New("empty output"), smerr.ID, plan.ID.String()) return } {{ if .IncludeComments }} // TIP: Using the output from the update function, re-set any computed attributes {{- end }} - resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &plan)) if resp.Diagnostics.HasError() { return } @@ -475,17 +459,14 @@ func (r *resource{{ .Resource }}) Update(ctx context.Context, req resource.Updat updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) _, err := wait{{ .Resource }}Updated(ctx, conn, plan.ID.ValueString(), updateTimeout) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionWaitingForUpdate, ResName{{ .Resource }}, plan.ID.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, plan.ID.String()) return } {{ if .IncludeComments -}} // TIP: -- 6. Save the request plan to response state {{- end }} - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, resp.State.Set(ctx, &plan)) } func (r *resource{{ .Resource }}) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { @@ -515,7 +496,7 @@ func (r *resource{{ .Resource }}) Delete(ctx context.Context, req resource.Delet // TIP: -- 2. Fetch the state {{- end }} var state resource{{ .Resource }}Model - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + smerr.EnrichAppend(ctx, &resp.Diagnostics, req.State.Get(ctx, &state)) if resp.Diagnostics.HasError() { return } @@ -538,10 +519,7 @@ func (r *resource{{ .Resource }}) Delete(ctx context.Context, req resource.Delet return } - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionDeleting, ResName{{ .Resource }}, state.ID.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, state.ID.String()) return } {{ if .IncludeComments }} @@ -550,10 +528,7 @@ func (r *resource{{ .Resource }}) Delete(ctx context.Context, req resource.Delet deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) _, err = wait{{ .Resource }}Deleted(ctx, conn, state.ID.ValueString(), deleteTimeout) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.{{ .Service }}, create.ErrActionWaitingForDeletion, ResName{{ .Resource }}, state.ID.String(), err), - err.Error(), - ) + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, state.ID.String()) return } } @@ -609,10 +584,10 @@ func wait{{ .Resource }}Created(ctx context.Context, conn *{{ .ServiceLower }}.C outputRaw, err := stateConf.WaitForStateContext(ctx) if out, ok := outputRaw.(*{{ .ServiceLower }}.{{ .Resource }}); ok { - return out, err + return out, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } {{ if .IncludeComments }} // TIP: It is easier to determine whether a resource is updated for some @@ -632,10 +607,10 @@ func wait{{ .Resource }}Updated(ctx context.Context, conn *{{ .ServiceLower }}.C outputRaw, err := stateConf.WaitForStateContext(ctx) if out, ok := outputRaw.(*{{ .ServiceLower }}.{{ .Resource }}); ok { - return out, err + return out, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } {{ if .IncludeComments }} // TIP: A deleted waiter is almost like a backwards created waiter. There may @@ -651,10 +626,10 @@ func wait{{ .Resource }}Deleted(ctx context.Context, conn *{{ .ServiceLower }}.C outputRaw, err := stateConf.WaitForStateContext(ctx) if out, ok := outputRaw.(*{{ .ServiceLower }}.{{ .Resource }}); ok { - return out, err + return out, smarterr.NewError(err) } - return nil, err + return nil, smarterr.NewError(err) } {{ if .IncludeComments }} // TIP: ==== STATUS ==== @@ -673,7 +648,7 @@ func status{{ .Resource }}(ctx context.Context, conn *{{ .ServiceLower }}.Client } if err != nil { - return nil, "", err + return nil, "", smarterr.NewError(err) } return out, aws.ToString(out.Status), nil @@ -694,17 +669,17 @@ func find{{ .Resource }}ByID(ctx context.Context, conn *{{ .ServiceLower }}.Clie out, err := conn.Get{{ .Resource }}(ctx, &input) if err != nil { if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ + return nil, smarterr.NewError(&retry.NotFoundError{ LastError: err, LastRequest: &input, - } + }) } - return nil, err + return nil, smarterr.NewError(err) } if out == nil || out.{{ .Resource }} == nil { - return nil, tfresource.NewEmptyResultError(&input) + return nil, smarterr.NewError(tfresource.NewEmptyResultError(&input)) } return out.{{ .Resource }}, nil @@ -771,7 +746,7 @@ func sweep{{ .Resource }}s(ctx context.Context, client *conns.AWSClient) ([]swee for pages.HasMorePages() { page, err := pages.NextPage(ctx) if err != nil { - return nil, err + return nil, smarterr.NewError(err) } for _, v := range page.{{ .Resource }}s { diff --git a/tools/literally/go.mod b/tools/literally/go.mod index eb4d30b0ac73..e6bccd071b06 100644 --- a/tools/literally/go.mod +++ b/tools/literally/go.mod @@ -1,3 +1,3 @@ module github.com/hashicorp/terraform-provider-aws/tools/literally -go 1.24.4 +go 1.24.8 diff --git a/tools/tfsdk2fw/go.mod b/tools/tfsdk2fw/go.mod index c64060213254..cf6bab46842f 100644 --- a/tools/tfsdk2fw/go.mod +++ b/tools/tfsdk2fw/go.mod @@ -1,11 +1,10 @@ module github.com/hashicorp/terraform-provider-aws/tools/tfsdk2fw -go 1.24.4 +go 1.24.8 require ( - github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 - golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 ) require ( @@ -15,290 +14,297 @@ require ( github.com/ProtonMail/go-crypto v1.3.0 // indirect github.com/YakDriver/go-version v0.1.0 // indirect github.com/YakDriver/regexache v0.24.0 // indirect - github.com/agext/levenshtein v1.2.2 // indirect + github.com/YakDriver/smarterr v0.6.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.5 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.17 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.2 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.12 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.16 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect - github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.40.0 // indirect - github.com/aws/aws-sdk-go-v2/service/account v1.24.2 // indirect - github.com/aws/aws-sdk-go-v2/service/acm v1.33.0 // indirect - github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.5 // indirect - github.com/aws/aws-sdk-go-v2/service/amp v1.34.3 // indirect - github.com/aws/aws-sdk-go-v2/service/amplify v1.33.3 // indirect - github.com/aws/aws-sdk-go-v2/service/apigateway v1.31.4 // indirect - github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.28.4 // indirect - github.com/aws/aws-sdk-go-v2/service/appconfig v1.38.3 // indirect - github.com/aws/aws-sdk-go-v2/service/appfabric v1.12.4 // indirect - github.com/aws/aws-sdk-go-v2/service/appflow v1.46.4 // indirect - github.com/aws/aws-sdk-go-v2/service/appintegrations v1.31.4 // indirect - github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.36.4 // indirect - github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.30.6 // indirect - github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/appmesh v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/apprunner v1.34.2 // indirect - github.com/aws/aws-sdk-go-v2/service/appstream v1.45.5 // indirect - github.com/aws/aws-sdk-go-v2/service/appsync v1.47.3 // indirect - github.com/aws/aws-sdk-go-v2/service/athena v1.51.3 // indirect - github.com/aws/aws-sdk-go-v2/service/auditmanager v1.39.2 // indirect - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.54.0 // indirect - github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.25.4 // indirect - github.com/aws/aws-sdk-go-v2/service/backup v1.43.1 // indirect - github.com/aws/aws-sdk-go-v2/service/batch v1.53.0 // indirect - github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/bedrock v1.38.0 // indirect - github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.44.2 // indirect - github.com/aws/aws-sdk-go-v2/service/billing v1.2.4 // indirect - github.com/aws/aws-sdk-go-v2/service/budgets v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/chatbot v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/chime v1.36.4 // indirect - github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.22.4 // indirect - github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.22.2 // indirect - github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.25.2 // indirect - github.com/aws/aws-sdk-go-v2/service/cloud9 v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.24.6 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudformation v1.61.0 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.46.3 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.9.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.49.3 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 // indirect - github.com/aws/aws-sdk-go-v2/service/codeartifact v1.34.4 // indirect - github.com/aws/aws-sdk-go-v2/service/codebuild v1.61.2 // indirect - github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.17.21 // indirect - github.com/aws/aws-sdk-go-v2/service/codecommit v1.28.4 // indirect - github.com/aws/aws-sdk-go-v2/service/codeconnections v1.6.4 // indirect - github.com/aws/aws-sdk-go-v2/service/codedeploy v1.30.6 // indirect - github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.25.4 // indirect - github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/codepipeline v1.42.2 // indirect - github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.29.6 // indirect - github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.53.2 // indirect - github.com/aws/aws-sdk-go-v2/service/comprehend v1.36.6 // indirect - github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.43.2 // indirect - github.com/aws/aws-sdk-go-v2/service/configservice v1.53.0 // indirect - github.com/aws/aws-sdk-go-v2/service/connect v1.131.0 // indirect - github.com/aws/aws-sdk-go-v2/service/connectcases v1.26.0 // indirect - github.com/aws/aws-sdk-go-v2/service/controltower v1.22.3 // indirect - github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/costexplorer v1.51.2 // indirect - github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.16.2 // indirect - github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.47.0 // indirect - github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.53.0 // indirect - github.com/aws/aws-sdk-go-v2/service/databrew v1.34.4 // indirect - github.com/aws/aws-sdk-go-v2/service/dataexchange v1.35.2 // indirect - github.com/aws/aws-sdk-go-v2/service/datapipeline v1.26.4 // indirect - github.com/aws/aws-sdk-go-v2/service/datasync v1.49.3 // indirect - github.com/aws/aws-sdk-go-v2/service/datazone v1.31.0 // indirect - github.com/aws/aws-sdk-go-v2/service/dax v1.24.4 // indirect - github.com/aws/aws-sdk-go-v2/service/detective v1.33.2 // indirect - github.com/aws/aws-sdk-go-v2/service/devicefarm v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/devopsguru v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/directconnect v1.32.5 // indirect - github.com/aws/aws-sdk-go-v2/service/directoryservice v1.31.7 // indirect - github.com/aws/aws-sdk-go-v2/service/dlm v1.30.7 // indirect - github.com/aws/aws-sdk-go-v2/service/docdb v1.41.6 // indirect - github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.15.4 // indirect - github.com/aws/aws-sdk-go-v2/service/drs v1.31.4 // indirect - github.com/aws/aws-sdk-go-v2/service/dsql v1.5.2 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.231.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.58.1 // indirect - github.com/aws/aws-sdk-go-v2/service/efs v1.36.2 // indirect - github.com/aws/aws-sdk-go-v2/service/eks v1.66.1 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticache v1.46.3 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.29.5 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.6 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.46.0 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.33.6 // indirect - github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.28.4 // indirect - github.com/aws/aws-sdk-go-v2/service/emr v1.49.3 // indirect - github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.32.0 // indirect - github.com/aws/aws-sdk-go-v2/service/eventbridge v1.40.0 // indirect - github.com/aws/aws-sdk-go-v2/service/evidently v1.24.4 // indirect - github.com/aws/aws-sdk-go-v2/service/evs v1.0.2 // indirect - github.com/aws/aws-sdk-go-v2/service/finspace v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/firehose v1.37.7 // indirect - github.com/aws/aws-sdk-go-v2/service/fis v1.33.4 // indirect - github.com/aws/aws-sdk-go-v2/service/fms v1.40.5 // indirect - github.com/aws/aws-sdk-go-v2/service/fsx v1.55.0 // indirect - github.com/aws/aws-sdk-go-v2/service/gamelift v1.42.1 // indirect - github.com/aws/aws-sdk-go-v2/service/glacier v1.27.5 // indirect - github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/glue v1.117.0 // indirect - github.com/aws/aws-sdk-go-v2/service/grafana v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/greengrass v1.28.4 // indirect - github.com/aws/aws-sdk-go-v2/service/groundstation v1.33.2 // indirect - github.com/aws/aws-sdk-go-v2/service/guardduty v1.56.0 // indirect - github.com/aws/aws-sdk-go-v2/service/healthlake v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/service/iam v1.43.0 // indirect - github.com/aws/aws-sdk-go-v2/service/identitystore v1.28.6 // indirect - github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.42.3 // indirect - github.com/aws/aws-sdk-go-v2/service/inspector v1.26.4 // indirect - github.com/aws/aws-sdk-go-v2/service/inspector2 v1.38.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.21.5 // indirect - github.com/aws/aws-sdk-go-v2/service/invoicing v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/iot v1.64.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ivs v1.43.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ivschat v1.17.4 // indirect - github.com/aws/aws-sdk-go-v2/service/kafka v1.39.5 // indirect - github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.23.5 // indirect - github.com/aws/aws-sdk-go-v2/service/kendra v1.56.4 // indirect - github.com/aws/aws-sdk-go-v2/service/keyspaces v1.19.0 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.3 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.26.7 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.32.7 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.28.4 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 // indirect - github.com/aws/aws-sdk-go-v2/service/lakeformation v1.41.8 // indirect - github.com/aws/aws-sdk-go-v2/service/lambda v1.72.0 // indirect - github.com/aws/aws-sdk-go-v2/service/launchwizard v1.9.4 // indirect - github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.52.1 // indirect - github.com/aws/aws-sdk-go-v2/service/licensemanager v1.32.0 // indirect - github.com/aws/aws-sdk-go-v2/service/lightsail v1.43.4 // indirect - github.com/aws/aws-sdk-go-v2/service/location v1.44.4 // indirect - github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.32.4 // indirect - github.com/aws/aws-sdk-go-v2/service/m2 v1.21.2 // indirect - github.com/aws/aws-sdk-go-v2/service/macie2 v1.45.4 // indirect - github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.40.2 // indirect - github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.75.0 // indirect - github.com/aws/aws-sdk-go-v2/service/medialive v1.76.2 // indirect - github.com/aws/aws-sdk-go-v2/service/mediapackage v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.24.0 // indirect - github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/mediastore v1.25.4 // indirect - github.com/aws/aws-sdk-go-v2/service/memorydb v1.27.2 // indirect - github.com/aws/aws-sdk-go-v2/service/mgn v1.33.4 // indirect - github.com/aws/aws-sdk-go-v2/service/mq v1.29.2 // indirect - github.com/aws/aws-sdk-go-v2/service/mwaa v1.35.3 // indirect - github.com/aws/aws-sdk-go-v2/service/neptune v1.37.3 // indirect - github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.51.0 // indirect - github.com/aws/aws-sdk-go-v2/service/networkmanager v1.35.1 // indirect - github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/notifications v1.2.5 // indirect - github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.1.4 // indirect - github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/opensearch v1.46.6 // indirect - github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.19.6 // indirect - github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0 // indirect - github.com/aws/aws-sdk-go-v2/service/osis v1.15.5 // indirect - github.com/aws/aws-sdk-go-v2/service/outposts v1.51.0 // indirect - github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.19.0 // indirect - github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/pcs v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/service/pinpoint v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.20.3 // indirect - github.com/aws/aws-sdk-go-v2/service/pipes v1.19.5 // indirect - github.com/aws/aws-sdk-go-v2/service/polly v1.48.4 // indirect - github.com/aws/aws-sdk-go-v2/service/pricing v1.34.5 // indirect - github.com/aws/aws-sdk-go-v2/service/qbusiness v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/qldb v1.26.4 // indirect - github.com/aws/aws-sdk-go-v2/service/quicksight v1.87.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ram v1.30.6 // indirect - github.com/aws/aws-sdk-go-v2/service/rbin v1.22.6 // indirect - github.com/aws/aws-sdk-go-v2/service/rds v1.99.1 // indirect - github.com/aws/aws-sdk-go-v2/service/redshift v1.54.6 // indirect - github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.33.3 // indirect - github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/rekognition v1.47.2 // indirect - github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.17.6 // indirect - github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.29.3 // indirect - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 // indirect - github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/route53 v1.53.0 // indirect - github.com/aws/aws-sdk-go-v2/service/route53domains v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/route53profiles v1.5.9 // indirect - github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.27.3 // indirect - github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.22.4 // indirect - github.com/aws/aws-sdk-go-v2/service/route53resolver v1.36.0 // indirect - github.com/aws/aws-sdk-go-v2/service/rum v1.24.4 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3control v1.60.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3outposts v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/s3tables v1.5.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sagemaker v1.200.1 // indirect - github.com/aws/aws-sdk-go-v2/service/scheduler v1.13.10 // indirect - github.com/aws/aws-sdk-go-v2/service/schemas v1.29.5 // indirect - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7 // indirect - github.com/aws/aws-sdk-go-v2/service/securityhub v1.58.0 // indirect - github.com/aws/aws-sdk-go-v2/service/securitylake v1.20.5 // indirect - github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.25.4 // indirect - github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.34.2 // indirect - github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.31.4 // indirect - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.35.7 // indirect - github.com/aws/aws-sdk-go-v2/service/servicequotas v1.28.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ses v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sesv2 v1.46.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sfn v1.35.7 // indirect - github.com/aws/aws-sdk-go-v2/service/shield v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/service/signer v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect - github.com/aws/aws-sdk-go-v2/service/ssm v1.60.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.4.4 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmsap v1.20.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect - github.com/aws/aws-sdk-go-v2/service/storagegateway v1.38.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect - github.com/aws/aws-sdk-go-v2/service/swf v1.28.6 // indirect - github.com/aws/aws-sdk-go-v2/service/synthetics v1.35.3 // indirect - github.com/aws/aws-sdk-go-v2/service/taxsettings v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.10.5 // indirect - github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.31.2 // indirect - github.com/aws/aws-sdk-go-v2/service/transcribe v1.47.0 // indirect - github.com/aws/aws-sdk-go-v2/service/transfer v1.61.0 // indirect - github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.24.2 // indirect - github.com/aws/aws-sdk-go-v2/service/vpclattice v1.14.4 // indirect - github.com/aws/aws-sdk-go-v2/service/waf v1.26.4 // indirect - github.com/aws/aws-sdk-go-v2/service/wafregional v1.26.4 // indirect - github.com/aws/aws-sdk-go-v2/service/wafv2 v1.63.1 // indirect - github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.35.4 // indirect - github.com/aws/aws-sdk-go-v2/service/workspaces v1.58.0 // indirect - github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/xray v1.31.7 // indirect - github.com/aws/smithy-go v1.22.4 // indirect - github.com/beevik/etree v1.5.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.44.6 // indirect + github.com/aws/aws-sdk-go-v2/service/account v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/acm v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/acmpca v1.44.5 // indirect + github.com/aws/aws-sdk-go-v2/service/amp v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/amplify v1.37.5 // indirect + github.com/aws/aws-sdk-go-v2/service/apigateway v1.35.6 // indirect + github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.32.6 // indirect + github.com/aws/aws-sdk-go-v2/service/appconfig v1.42.6 // indirect + github.com/aws/aws-sdk-go-v2/service/appfabric v1.16.6 // indirect + github.com/aws/aws-sdk-go-v2/service/appflow v1.50.6 // indirect + github.com/aws/aws-sdk-go-v2/service/appintegrations v1.36.6 // indirect + github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.40.5 // indirect + github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/service/appmesh v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/apprunner v1.38.7 // indirect + github.com/aws/aws-sdk-go-v2/service/appstream v1.50.0 // indirect + github.com/aws/aws-sdk-go-v2/service/appsync v1.51.6 // indirect + github.com/aws/aws-sdk-go-v2/service/arcregionswitch v1.2.8 // indirect + github.com/aws/aws-sdk-go-v2/service/athena v1.55.6 // indirect + github.com/aws/aws-sdk-go-v2/service/auditmanager v1.45.6 // indirect + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.59.3 // indirect + github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/backup v1.49.0 // indirect + github.com/aws/aws-sdk-go-v2/service/batch v1.57.10 // indirect + github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.11.8 // indirect + github.com/aws/aws-sdk-go-v2/service/bedrock v1.48.0 // indirect + github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.50.6 // indirect + github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol v1.10.0 // indirect + github.com/aws/aws-sdk-go-v2/service/billing v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/service/budgets v1.39.2 // indirect + github.com/aws/aws-sdk-go-v2/service/chatbot v1.14.6 // indirect + github.com/aws/aws-sdk-go-v2/service/chime v1.40.5 // indirect + github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.26.6 // indirect + github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.27.0 // indirect + github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.36.0 // indirect + github.com/aws/aws-sdk-go-v2/service/cloud9 v1.33.5 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudformation v1.67.0 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.55.0 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.53.6 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.51.1 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.58.2 // indirect + github.com/aws/aws-sdk-go-v2/service/codeartifact v1.38.6 // indirect + github.com/aws/aws-sdk-go-v2/service/codebuild v1.67.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.20.8 // indirect + github.com/aws/aws-sdk-go-v2/service/codecommit v1.32.6 // indirect + github.com/aws/aws-sdk-go-v2/service/codeconnections v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codedeploy v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codepipeline v1.46.6 // indirect + github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.57.7 // indirect + github.com/aws/aws-sdk-go-v2/service/comprehend v1.40.6 // indirect + github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.47.5 // indirect + github.com/aws/aws-sdk-go-v2/service/configservice v1.58.2 // indirect + github.com/aws/aws-sdk-go-v2/service/connect v1.142.0 // indirect + github.com/aws/aws-sdk-go-v2/service/connectcases v1.32.0 // indirect + github.com/aws/aws-sdk-go-v2/service/controltower v1.26.6 // indirect + github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/costexplorer v1.57.0 // indirect + github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.20.6 // indirect + github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.53.0 // indirect + github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.57.7 // indirect + github.com/aws/aws-sdk-go-v2/service/databrew v1.38.5 // indirect + github.com/aws/aws-sdk-go-v2/service/dataexchange v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/datapipeline v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/datasync v1.55.0 // indirect + github.com/aws/aws-sdk-go-v2/service/datazone v1.43.0 // indirect + github.com/aws/aws-sdk-go-v2/service/dax v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/detective v1.37.7 // indirect + github.com/aws/aws-sdk-go-v2/service/devicefarm v1.35.6 // indirect + github.com/aws/aws-sdk-go-v2/service/devopsguru v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/directconnect v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/directoryservice v1.38.0 // indirect + github.com/aws/aws-sdk-go-v2/service/dlm v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/docdb v1.47.0 // indirect + github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.19.6 // indirect + github.com/aws/aws-sdk-go-v2/service/drs v1.35.6 // indirect + github.com/aws/aws-sdk-go-v2/service/dsql v1.9.8 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.51.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.257.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.65.1 // indirect + github.com/aws/aws-sdk-go-v2/service/efs v1.40.8 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.74.2 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticache v1.50.5 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.33.7 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.51.0 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.32.6 // indirect + github.com/aws/aws-sdk-go-v2/service/emr v1.54.5 // indirect + github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.40.2 // indirect + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.36.6 // indirect + github.com/aws/aws-sdk-go-v2/service/eventbridge v1.45.5 // indirect + github.com/aws/aws-sdk-go-v2/service/evidently v1.28.5 // indirect + github.com/aws/aws-sdk-go-v2/service/evs v1.5.2 // indirect + github.com/aws/aws-sdk-go-v2/service/finspace v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/firehose v1.41.6 // indirect + github.com/aws/aws-sdk-go-v2/service/fis v1.37.5 // indirect + github.com/aws/aws-sdk-go-v2/service/fms v1.44.6 // indirect + github.com/aws/aws-sdk-go-v2/service/fsx v1.62.0 // indirect + github.com/aws/aws-sdk-go-v2/service/gamelift v1.46.6 // indirect + github.com/aws/aws-sdk-go-v2/service/glacier v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/glue v1.131.0 // indirect + github.com/aws/aws-sdk-go-v2/service/grafana v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/service/greengrass v1.32.6 // indirect + github.com/aws/aws-sdk-go-v2/service/groundstation v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/guardduty v1.65.0 // indirect + github.com/aws/aws-sdk-go-v2/service/healthlake v1.35.5 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.47.7 // indirect + github.com/aws/aws-sdk-go-v2/service/identitystore v1.32.7 // indirect + github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.48.0 // indirect + github.com/aws/aws-sdk-go-v2/service/inspector v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/inspector2 v1.44.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/invoicing v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/service/iot v1.69.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ivs v1.47.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ivschat v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kafka v1.43.6 // indirect + github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.27.6 // indirect + github.com/aws/aws-sdk-go-v2/service/kendra v1.60.6 // indirect + github.com/aws/aws-sdk-go-v2/service/keyspaces v1.23.6 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesis v1.40.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.30.6 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.36.7 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.32.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.45.6 // indirect + github.com/aws/aws-sdk-go-v2/service/lakeformation v1.45.5 // indirect + github.com/aws/aws-sdk-go-v2/service/lambda v1.78.0 // indirect + github.com/aws/aws-sdk-go-v2/service/launchwizard v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.33.5 // indirect + github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.56.6 // indirect + github.com/aws/aws-sdk-go-v2/service/licensemanager v1.36.6 // indirect + github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.0 // indirect + github.com/aws/aws-sdk-go-v2/service/location v1.49.6 // indirect + github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.36.6 // indirect + github.com/aws/aws-sdk-go-v2/service/m2 v1.25.6 // indirect + github.com/aws/aws-sdk-go-v2/service/macie2 v1.49.6 // indirect + github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.45.0 // indirect + github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.82.6 // indirect + github.com/aws/aws-sdk-go-v2/service/medialive v1.84.0 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackage v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/mediastore v1.29.6 // indirect + github.com/aws/aws-sdk-go-v2/service/memorydb v1.32.0 // indirect + github.com/aws/aws-sdk-go-v2/service/mgn v1.37.5 // indirect + github.com/aws/aws-sdk-go-v2/service/mq v1.34.4 // indirect + github.com/aws/aws-sdk-go-v2/service/mwaa v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/neptune v1.42.5 // indirect + github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.57.1 // indirect + github.com/aws/aws-sdk-go-v2/service/networkmanager v1.39.7 // indirect + github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/notifications v1.7.4 // indirect + github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.5.8 // indirect + github.com/aws/aws-sdk-go-v2/service/oam v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/odb v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/service/opensearch v1.52.5 // indirect + github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/organizations v1.45.3 // indirect + github.com/aws/aws-sdk-go-v2/service/osis v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/service/outposts v1.57.0 // indirect + github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.25.2 // indirect + github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.15.6 // indirect + github.com/aws/aws-sdk-go-v2/service/pcs v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/pinpoint v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/pipes v1.23.5 // indirect + github.com/aws/aws-sdk-go-v2/service/polly v1.53.7 // indirect + github.com/aws/aws-sdk-go-v2/service/pricing v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/qbusiness v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/qldb v1.30.6 // indirect + github.com/aws/aws-sdk-go-v2/service/quicksight v1.95.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ram v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/rbin v1.26.6 // indirect + github.com/aws/aws-sdk-go-v2/service/rds v1.108.2 // indirect + github.com/aws/aws-sdk-go-v2/service/redshift v1.59.0 // indirect + github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.37.6 // indirect + github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.31.8 // indirect + github.com/aws/aws-sdk-go-v2/service/rekognition v1.51.5 // indirect + github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.22.0 // indirect + github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.33.7 // indirect + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.30.6 // indirect + github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.21.6 // indirect + github.com/aws/aws-sdk-go-v2/service/route53 v1.58.4 // indirect + github.com/aws/aws-sdk-go-v2/service/route53domains v1.34.4 // indirect + github.com/aws/aws-sdk-go-v2/service/route53profiles v1.9.6 // indirect + github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.31.7 // indirect + github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.26.6 // indirect + github.com/aws/aws-sdk-go-v2/service/route53resolver v1.40.6 // indirect + github.com/aws/aws-sdk-go-v2/service/rum v1.28.7 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 // indirect + github.com/aws/aws-sdk-go-v2/service/s3control v1.66.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3outposts v1.33.6 // indirect + github.com/aws/aws-sdk-go-v2/service/s3tables v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3vectors v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sagemaker v1.215.3 // indirect + github.com/aws/aws-sdk-go-v2/service/scheduler v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/service/schemas v1.33.5 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/securityhub v1.64.4 // indirect + github.com/aws/aws-sdk-go-v2/service/securitylake v1.24.6 // indirect + github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.29.6 // indirect + github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.38.6 // indirect + github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.35.6 // indirect + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.39.9 // indirect + github.com/aws/aws-sdk-go-v2/service/servicequotas v1.33.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ses v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sesv2 v1.53.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sfn v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/shield v1.34.6 // indirect + github.com/aws/aws-sdk-go-v2/service/signer v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sns v1.38.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sqs v1.42.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssm v1.65.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.30.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.39.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmsap v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.36.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect + github.com/aws/aws-sdk-go-v2/service/storagegateway v1.42.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 // indirect + github.com/aws/aws-sdk-go-v2/service/swf v1.32.5 // indirect + github.com/aws/aws-sdk-go-v2/service/synthetics v1.41.0 // indirect + github.com/aws/aws-sdk-go-v2/service/taxsettings v1.16.6 // indirect + github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.35.5 // indirect + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.35.5 // indirect + github.com/aws/aws-sdk-go-v2/service/transcribe v1.53.0 // indirect + github.com/aws/aws-sdk-go-v2/service/transfer v1.67.0 // indirect + github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/vpclattice v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/waf v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/wafregional v1.30.6 // indirect + github.com/aws/aws-sdk-go-v2/service/wafv2 v1.68.0 // indirect + github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.39.6 // indirect + github.com/aws/aws-sdk-go-v2/service/workmail v1.36.4 // indirect + github.com/aws/aws-sdk-go-v2/service/workspaces v1.63.6 // indirect + github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.32.6 // indirect + github.com/aws/aws-sdk-go-v2/service/xray v1.36.4 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/beevik/etree v1.6.0 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/cedar-policy/cedar-go v0.1.0 // indirect + github.com/cedar-policy/cedar-go v1.2.6 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fatih/color v1.18.0 // indirect github.com/gertd/go-pluralize v0.2.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/goccy/go-yaml v1.18.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 // indirect - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 // indirect github.com/hashicorp/awspolicyequivalence v1.7.0 // indirect github.com/hashicorp/cli v1.1.7 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -307,27 +313,26 @@ require ( github.com/hashicorp/go-cty v1.5.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.3 // indirect + github.com/hashicorp/go-plugin v1.7.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hc-install v0.9.2 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.23.0 // indirect - github.com/hashicorp/terraform-json v0.25.0 // indirect - github.com/hashicorp/terraform-plugin-framework v1.15.0 // indirect + github.com/hashicorp/terraform-exec v0.24.0 // indirect + github.com/hashicorp/terraform-json v0.27.2 // indirect + github.com/hashicorp/terraform-plugin-framework v1.16.1 // indirect github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 // indirect - github.com/hashicorp/terraform-plugin-framework-timeouts v0.5.0 // indirect + github.com/hashicorp/terraform-plugin-framework-timeouts v0.6.0 // indirect github.com/hashicorp/terraform-plugin-framework-timetypes v0.5.0 // indirect - github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.28.0 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.19.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.29.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-mux v0.20.0 // indirect - github.com/hashicorp/terraform-plugin-testing v1.13.2 // indirect - github.com/hashicorp/terraform-registry-address v0.2.5 // indirect + github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1.0.20251013071646-7ed2ee242705 // indirect + github.com/hashicorp/terraform-registry-address v0.4.0 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -337,10 +342,10 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/posener/complete v1.2.3 // indirect github.com/shopspring/decimal v1.4.0 // indirect @@ -351,25 +356,25 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/zclconf/go-cty v1.16.3 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.61.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.27.0 // indirect - golang.org/x/tools v0.34.0 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/tools v0.38.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.72.1 // indirect - google.golang.org/protobuf v1.36.6 // indirect - gopkg.in/dnaeon/go-vcr.v4 v4.0.4 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.9 // indirect + gopkg.in/dnaeon/go-vcr.v4 v4.0.5 // indirect ) replace github.com/hashicorp/terraform-provider-aws => ../.. diff --git a/tools/tfsdk2fw/go.sum b/tools/tfsdk2fw/go.sum index 6be13a63a5e7..6cf8ae58fe55 100644 --- a/tools/tfsdk2fw/go.sum +++ b/tools/tfsdk2fw/go.sum @@ -14,564 +14,577 @@ github.com/YakDriver/go-version v0.1.0 h1:/x+Xg2+l89Mjtxl0VRf2+ue8cnHkw6jfYv49j6 github.com/YakDriver/go-version v0.1.0/go.mod h1:LXwFAp1E3KBhS7FHO/FE8r3XCmvKizs/VXXXFWfoSYY= github.com/YakDriver/regexache v0.24.0 h1:zUKaixelkswzdqsqPc2sveiV//Mi/msJn0teG8zBDiA= github.com/YakDriver/regexache v0.24.0/go.mod h1:awcd8uBj614F3ScW06JqlfSGqq2/7vdJHy+RiKzVC+g= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/YakDriver/smarterr v0.6.0 h1:BFJ09GTAVcGfyzMUk7/yiS0rBEPXTzUxpP67bbyVLoo= +github.com/YakDriver/smarterr v0.6.0/go.mod h1:Sg1LUzBronueGfhn2yalB2iVMXl24TIGam/mS5cZh5c= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0= -github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY= -github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= -github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83 h1:08otkOELsIi0toRRGMytlJhOctcN8xfKfKFR2NXz3kE= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.83/go.mod h1:dGsGb2wI8JDWeMAhjVPP+z+dqvYjL6k6o+EujcRNk5c= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8= +github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= +github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.12 h1:pYM1Qgy0dKZLHX2cXslNacbcEFMkDMl+Bcj5ROuS6p8= +github.com/aws/aws-sdk-go-v2/config v1.31.12/go.mod h1:/MM0dyD7KSDPR+39p9ZNVKaHDLb9qnfDurvVS2KAhN8= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16 h1:4JHirI4zp958zC026Sm+V4pSDwW4pwLefKrc0bF2lwI= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16/go.mod h1:qQMtGx9OSw7ty1yLclzLxXCRbrkjWAM7JnObZjmCB7I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 h1:ofHawDLJTI6ytDIji+g4dXQ6u2idzTb04tDlN9AS614= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12/go.mod h1:f5pL4iLDfbcxj1SZcdRdIokBB5eHbuYPS/Fs9DwUPRQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.40.0 h1:xYryxpwtCZxukhjSd0O26zT3CbGDlzoYFBWqY0DoK3A= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.40.0/go.mod h1:mwjv8LM1RN5WJNOPTKspM0AnCxFoTjMopGI19k0Hb4k= -github.com/aws/aws-sdk-go-v2/service/account v1.24.2 h1:1ItkqDExKIDsS8NoIBq7OxQOJnQNOVjC25CYa9RzOos= -github.com/aws/aws-sdk-go-v2/service/account v1.24.2/go.mod h1:NShtay87juyMTb3c6bHN6Bai5dUFmTX7NzURY4/Jyb0= -github.com/aws/aws-sdk-go-v2/service/acm v1.33.0 h1:Z3MHBWR1KiviwaAiG7MTPB6T5gLYRPhUECuKLgltCwA= -github.com/aws/aws-sdk-go-v2/service/acm v1.33.0/go.mod h1:t3jPqKBnySV3qsU40cj1TWleOYx5vyz1xBeZiplAVcs= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.5 h1:wO4AWPJlnLRbLgQnrVKG/HTy9qDCxFVMjPFkqr2IKRA= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.5/go.mod h1:Jhu06Hov5+oM1+zkhDGCZBp8yoVCSiFHSnkSC0KIzDs= -github.com/aws/aws-sdk-go-v2/service/amp v1.34.3 h1:xH65YCH77WzkxqdzDl6PfX2TaYK/8YiZwy6UqNkFkv4= -github.com/aws/aws-sdk-go-v2/service/amp v1.34.3/go.mod h1:SulhOciRP/ZvQQdU9cNuE9OAfnD7+itzfKPiyBx0I1I= -github.com/aws/aws-sdk-go-v2/service/amplify v1.33.3 h1:6rZkMM5S/fSnIP02Q/paqszlyp/kKNhl+hHV9WuuH7I= -github.com/aws/aws-sdk-go-v2/service/amplify v1.33.3/go.mod h1:Ir47WZbig8znnUdUx5YPxwjt92xXZSQKu2+Y+NjGzBM= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.31.4 h1:XFKyI5HLJwV0HBKuUTIE19yaKHOvgZK/sDSj3HmE8dM= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.31.4/go.mod h1:b7jjY+ZgE+CzV8iX9d2ose6aPKkpA7a7RIi9mHEFlqM= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.28.4 h1:H4WoC79VAg7e5PrK6ta1ua7aNg5bj6JKrWRL45hAawA= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.28.4/go.mod h1:NomAJQ/SaEj3KlzfxI4V8y3CJNv1Mr2ynTv7lbYePp0= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.38.3 h1:tjAPEEHH7V7YX7fxdklhs9Vg9K8aXBosKutnRPrhYKY= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.38.3/go.mod h1:NiWNkf2XdzzN6fWWwB6RtHqmT9SoFCXQJU9zg7tS5TE= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.12.4 h1:NOpFPNcu8Ao3Sqk+zJ6R92Zv7MUQ4xed5aqrauFlOBs= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.12.4/go.mod h1:wRubXIGmzEbl2uPpPX/BZ6Tm/BxCtkXhUirkj0Q1F+A= -github.com/aws/aws-sdk-go-v2/service/appflow v1.46.4 h1:7B2B/QGEXHG4ayH9CgmVd7z+pHQtNGHfVx0T0TyHBCs= -github.com/aws/aws-sdk-go-v2/service/appflow v1.46.4/go.mod h1:EmHkVIWbPmvl3mvSOo/TF0DjSGFZ8+Db7aKiqhM8XIc= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.31.4 h1:AWrTD+eNmKOU1J7KV8TS3w+B9ZYdl7eVBOegEeVGlyY= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.31.4/go.mod h1:lrw4VUA85885klz/SHqwyu0A2V70w9kOH3LZdEuskj8= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.36.4 h1:JetyQYju/+q33qzbNAiuHVIX4zB/AX9nM65qD+eLKM8= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.36.4/go.mod h1:T38DTrOzItEr+LJap6BHKrWN8wBrLP44+n/JY0wC2xI= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.30.6 h1:wOKS3lH9adXnOPg4VJ0AQ56tmmcTO40WTgkHk1F9kJE= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.30.6/go.mod h1:FEqLE3bBOwq2nE4NtVKUljFYcLTc6tVjYAOvDtWXKb4= -github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.11.3 h1:qV6rPSVsIReOn1DTrvC0wi7rlG/IbQmEJQ//0DijU5A= -github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.11.3/go.mod h1:EGKmN5VSpsjvJad12akh86dbFu/YoRa0qFiWzcPnXIk= -github.com/aws/aws-sdk-go-v2/service/appmesh v1.30.4 h1:1TT/4BO285m66cH5vOExvqvvaW/EpP4VngGw7xEvaGc= -github.com/aws/aws-sdk-go-v2/service/appmesh v1.30.4/go.mod h1:jFygkUlz2jEVPPQAq4OSqTTKjt20qx9N/5eR/gnyD7k= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.34.2 h1:ZEkJkUCPdXrL3JOTpa3DuB879AtP5tNF/8i8415A8fY= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.34.2/go.mod h1:p4kYzg6Gb1uqNc7m9/qB4aDycggCAv9mfFXX15S805U= -github.com/aws/aws-sdk-go-v2/service/appstream v1.45.5 h1:BuHTCRVfEACQ9YDVYHLiqEW7LWypFdcPAH07icAmgo0= -github.com/aws/aws-sdk-go-v2/service/appstream v1.45.5/go.mod h1:Kdkrr6TbMceLxOiRDJ6L1hdbv1/GuzGENPxylMzffcw= -github.com/aws/aws-sdk-go-v2/service/appsync v1.47.3 h1:Jc3/7ZWo4pjNhKp0B0WD4Av5QOMaJj6Xqzg0y0l6deA= -github.com/aws/aws-sdk-go-v2/service/appsync v1.47.3/go.mod h1:id62qP6jzhg3NWQ5zfBf12omt9Rm3yEcwI1rtj7+wbE= -github.com/aws/aws-sdk-go-v2/service/athena v1.51.3 h1:4X2/0GQiQBlAE9sGGKnouUI3yjtf9A/uTo7VPjD9/6c= -github.com/aws/aws-sdk-go-v2/service/athena v1.51.3/go.mod h1:q8KLas6BtgGYm695nQxAjFJvqRoj8Qcpig1291KQWok= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.39.2 h1:Pye3If+Jpe58EwCzH+CJZnqGK39w7nSAdBl+BNVv6qs= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.39.2/go.mod h1:zfdQum9cKCPEWF8g8CXfJgFZXJ/+QbvhXvesWOm9WnE= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.54.0 h1:0BmpSm5x2rpB9D2K2OAoOc1cZTUJpw1OiQj86ZT8RTg= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.54.0/go.mod h1:6U/Xm5bBkZGCTxH3NE9+hPKEpCFCothGn/gwytsr1Mk= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.25.4 h1:V//LfMnazbS3Zh1O7rWL3v92yQW0kBpIXlkKGEV1Fmw= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.25.4/go.mod h1:jUiTKxG/so4swtdvfxlKgdEESCAZ1RDWIfyn3DrUVMk= -github.com/aws/aws-sdk-go-v2/service/backup v1.43.1 h1:IWL4JnLGXSFE094fHbveF/Lm+zYgBdoD0zBelyKRKII= -github.com/aws/aws-sdk-go-v2/service/backup v1.43.1/go.mod h1:qDBAiArrJPrmcHvpgCQ4lhM5zV/sf0Iou7nP7Zm2mc8= -github.com/aws/aws-sdk-go-v2/service/batch v1.53.0 h1:uf+Mr9I0l5Eo3aTaunHTJsfTnewLvzqGRPG4DrYabv8= -github.com/aws/aws-sdk-go-v2/service/batch v1.53.0/go.mod h1:3kzOFBSr7kWjiPQFZPqanUTxFwdMiA5UFe/O4NN7fsI= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.8.4 h1:BjeegkJ3Ha6VlzhQdqxViNIUkJNi6seZwHp5pqpYHaI= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.8.4/go.mod h1:0Rs3YH1xh3qTgiy0VP+UR6GibZUVATPAtvr3n58b3d4= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.38.0 h1:wBlJMfquOKOMdSzZezhtzoTuVXc8kkkteymE/bBEXcg= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.38.0/go.mod h1:1GlpVDmL9pBaVwNfgPXR3zuJhhXtNOZoiBa16pNbINY= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.44.2 h1:gedxMyluRPy1ENN1dlOM7rK8Jek1wUvpA9z1Cz2s9N4= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.44.2/go.mod h1:8zZaELHNLx6LNNfMrzCtVVsOFFKP1905FKmsSFuhArM= -github.com/aws/aws-sdk-go-v2/service/billing v1.2.4 h1:QqtOYdXXtghWbPemcCf7x8y/CWlN950/1eRd13EpKuE= -github.com/aws/aws-sdk-go-v2/service/billing v1.2.4/go.mod h1:mP5IsfmMZhkwpGdQm2DKsU5elbGTizrO3vK98LG0vWc= -github.com/aws/aws-sdk-go-v2/service/budgets v1.31.2 h1:ZdjYaUVxxQeWZ5BoU82dF7BpUhNfmha11ya8K9AiPoc= -github.com/aws/aws-sdk-go-v2/service/budgets v1.31.2/go.mod h1:LnxG/U78Q4uws9jS+a9sTwV8OVTWzfsXuBIaAfwksyM= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.10.4 h1:bq7jZuszo3+COUXlDbeiOnWXfRZGzJcNAZzpjEguBow= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.10.4/go.mod h1:IDmqb/P9NQISRL+1vrUskvUaTOo7SaEyULTLp5QZbhc= -github.com/aws/aws-sdk-go-v2/service/chime v1.36.4 h1:RvqaquFRY71C0col7ydmbqmJsqBFpybWRsklPwOcIA0= -github.com/aws/aws-sdk-go-v2/service/chime v1.36.4/go.mod h1:BqpFNKJNnpT9huL8gCdIQpzeZi2+FK/Y5DoyQkDl+C0= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.22.4 h1:AXoWCQp+YYKsAX1FcUm5WOXhC9KNodEhjB2xuRc/i2E= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.22.4/go.mod h1:VK80ksSTmSe1wU33aY0E47R2A2I6v7Zyi4sgn94d9F4= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.22.2 h1:FvJ0+3o1j/k8OejpUK/19BhyuoKlWS67n/hqzyhINfU= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.22.2/go.mod h1:SXGQ5hmMJzWRJt1Mu3s6x15eldRft+xErnAL6CDBC0U= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.25.2 h1:TN80R+dUKMq7xgqgbclW/uBPdgo4zoGJ4uVdzNBgwQo= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.25.2/go.mod h1:81twhtDcStPNYEh9XCp89TyaTjq+4ciPUgSWEoVxpgM= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.29.4 h1:bIyRLJ+QVAE1GPI+9XBGpP1rRKKbHL4oUMOVw/EdUBs= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.29.4/go.mod h1:gdFyMvML9BinbLiHs795bR9rKRHTKxNsOCLfbDFIzB4= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.24.6 h1:ZTDJc/sruFHYXaTr4aNwuHEykFtjqT9hcFFDQceSlAs= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.24.6/go.mod h1:QarpKg2UqElY6gtj2Z3CFbJqP8Wmq//w0LwudfpY69w= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.61.0 h1:1nVq2bvAANTPAfipKBOtbP1ebqTpJrOsxNqwb6ybCG8= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.61.0/go.mod h1:xU79X14UC0F8sEJCRTWwINzlQ4jacpEFpRESLHRHfoY= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.46.3 h1:ULVZL6Ro+vqmXFVFgZ5Q92pqWnhJfwOnWlNtibQPnIs= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.46.3/go.mod h1:vudWcTOLhQf4lzRH0qHUszJh8Gpo+Lp6dqH/HgVR9Xg= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.9.4 h1:b/akD5kwvx/NPXgYMPnaaZ7HWlgrDLg9NatQ2Tc8wVk= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.9.4/go.mod h1:nAAHqFZISt7zseVgaPzYwMY4bbet/rTn/TFMYa3s6sU= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.30.5 h1:P5+wUNAOc2bjxIiQ+ZMVz/Mv5jirnh4nPI9VCLgvJUQ= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.30.5/go.mod h1:4MW0k8bmDdC8VHJf5Vxhp5zLXnvkDRERvfiEvXZDnoM= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.27.4 h1:kJ2Sa4VsJoaPg1vQCFL91N/ZjMzzbEyo7CG6bgzCkbI= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.27.4/go.mod h1:kbxooYiqH9It+k1z+iLiTKlompLUQmEgZY5sv9txU8Q= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.49.3 h1:wSQwBOXa1EV81WiVWLZ8fCrJ7wlwcfqSexEiv9OjPrA= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.49.3/go.mod h1:5N4LfimBXTCtqKr0tZKfcte5UswFb7SJZV+LiQUZsGk= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 h1:Nn3qce+OHZuMj/edx4its32uxedAmquCDxtZkrdeiD4= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3/go.mod h1:aqsLGsPs+rJfwDBwWHLcIV8F7AFcikFTPLwUD4RwORQ= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 h1:e5cbPZYTIY2nUEFieZUfVdINOiCTvChOMPfdLnmiLzs= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0/go.mod h1:UseIHRfrm7PqeZo6fcTb6FUCXzCnh1KJbQbmOfxArGM= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.34.4 h1:8E5noXcMI3cNsX1hcx/ORW6mtla6usxz4BcW1q+zheE= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.34.4/go.mod h1:8bXExDA212G0tJkUYMcxcFhsqcM+jSBtsmOugZe2j7o= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.61.2 h1:efAyxbfGzzswonfsjj3porKv6Q1H98SOHdlZ6hF2NI4= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.61.2/go.mod h1:THLcsyok0+f2SaN7/QZ7tlzNseoF1YB7PJuGc3yd3EQ= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.17.21 h1:0jz43AWY1USrCZwMzxHOIfmoXy7M2ZJRaqCr56x/Rvc= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.17.21/go.mod h1:ic53zDsOvg3DF95EpLCTeR4hf6Oxt6Dz6P9WQ3cvUvw= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.28.4 h1:DyOb/MZoTswNwFhg55VR1rvLkn1S55T7q+P8EuR+A7M= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.28.4/go.mod h1:PB41jkDc903DUreLzzJBB/rabkQqriNqPtv1L9vAIOI= -github.com/aws/aws-sdk-go-v2/service/codeconnections v1.6.4 h1:j1FZyc3Oj7W3dWgmO4cbtOOkCaixavGotkPnoZqrixQ= -github.com/aws/aws-sdk-go-v2/service/codeconnections v1.6.4/go.mod h1:b3xHt4pnrpRyj1i75f8gU3vUy4UKLCbatXjcNZdbB38= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.30.6 h1:A74AkCwB8DsBeJ9DVLtLif2nGuTiHGdZMOeo2yKsyB0= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.30.6/go.mod h1:wjqakZxOg31qrJsrwpkvUoELRhfSNToa8SA1u7PdSxU= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.25.4 h1:gWXiqaKkd6fRF1qOs5DL0ME1cRep4KNAAGGc5J5Lw3Y= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.25.4/go.mod h1:wp/JLha/UGGGklH6qYjzIrQWGM+ewdlrXlwCmi0JbOM= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.30.4 h1:BJqh9+QCaB74sJmi4KpCqrrqV/exeG+gA6hvLRchH6E= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.30.4/go.mod h1:iINMrnaDsPf5UwOXacV+xFBgXphzT2yvdSMBzbOlk4g= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.42.2 h1:IYZ2Prn/aHOGB9GRj7hS7GVHMtRTb/4wiDI5mf326GE= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.42.2/go.mod h1:RgaoO5gg3Pp1se22UalAX6oTusJgdlKwMOfMo/lObgw= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.30.4 h1:wIFcc7VQQpPS15fXRM8WvTUmrYNP6vIjFSxTszDWPyo= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.30.4/go.mod h1:vMiaujmCGuRMMx7k9LVHfr9M+4++LwDpVciiF362wDo= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.27.4 h1:E5SxPPUfnZYDoT765IjNVzhDHmLVvaQdhH/7kRm+ZJY= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.27.4/go.mod h1:7gyIYjHXPAOX3NERsiwOs4uPEtppi3C+PKgwSvrt9AY= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.29.6 h1:qAzPMhagtK5hAs9WWnnrWXkpYfVXBbrcrEO/al4wP7I= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.29.6/go.mod h1:Isbgk/cOSGoFwswAzibnEWm5lXXLOCWOTAxyKmMAOHk= -github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.53.2 h1:3f3FZdZgMBMouhPizBI3i6EnpdyL3ttjObmvr+1kfzg= -github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.53.2/go.mod h1:rwpoEr5M4DCNNxmXX75Ql5+KOW01DEvOE0KPo3iiNEs= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.36.6 h1:rSAMOE0HndTsLBPnuh4YLm205D8+3W/7lwc9q6llhvE= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.36.6/go.mod h1:0bQ8f9sR/AaJBBBnHO0lc7mREP8uqWGSXY6uY7GR37s= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.43.2 h1:eIHLQrO/u2P76oWA2m++l2sOTRNRrKRFKK189YO5XYY= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.43.2/go.mod h1:harX8fH+HCyhgvgzLgVjXomS2ZuQ9W7Mgcr11DXM41w= -github.com/aws/aws-sdk-go-v2/service/configservice v1.53.0 h1:lu97by/q8YJxGjEujMunX5Gel2tf2MfDkb7Rz26Lw1g= -github.com/aws/aws-sdk-go-v2/service/configservice v1.53.0/go.mod h1:BYXP4Mzkc+ki7WFebTIMvzP+2CPFqULpy5KlCPlVOO0= -github.com/aws/aws-sdk-go-v2/service/connect v1.131.0 h1:jNR9bUgK/ZLA5ymyoaGU/7XREyIz99Lx7PS6jMVFW9w= -github.com/aws/aws-sdk-go-v2/service/connect v1.131.0/go.mod h1:xU6tkVMTXQlkRdff/a3rB6RS/goEJjq7QJbQj2/tZO4= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.26.0 h1:zd0G03x3Gsztv7g3P5OtuTVq8VrTCSidAzEsXy61/Ac= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.26.0/go.mod h1:NjwcRfAn4H/Dbt+F6AHYpvpGSfj8ViI30SpL4L3danA= -github.com/aws/aws-sdk-go-v2/service/controltower v1.22.3 h1:C8FcMAc7DIsTGqvoNfhKtf8kCGCRGf+UFr/U/J8WcjQ= -github.com/aws/aws-sdk-go-v2/service/controltower v1.22.3/go.mod h1:maGRVPBBQenlVQo3oooIQ9rwJcrIjyqCKKZIGzxjhTk= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.29.4 h1:G96u5BhFFCwr1o0jmn/9pG4uqWFs1jbMX78BzEwSh2c= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.29.4/go.mod h1:mWXTvKnKJ30G5ZxiEBAaN2jFgzX69Jwwr0lDmx4/6js= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.51.2 h1:7zSsOpcOaTximKcYWlpbhgKSn22fzx3ZkkankTEBHpQ= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.51.2/go.mod h1:xbfTJfT0GwWB6ONGltxdQixqzk/5fD/J/KEeQjUUNI8= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.16.2 h1:yJ9bmAq8pTTETtUjQpONk3hzFLFy4qnsGu8IzPJYW4s= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.16.2/go.mod h1:2e/HlfOil/pDjSsn/P0VcpYxKX3rycKiR8FSVzsOfao= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.47.0 h1:bJKZVmfIHuaI7h0w7Ra5FKtQaKLaarBlJZVfcaMYNh8= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.47.0/go.mod h1:rm68C2eQGFimGGUdirf25ehBACurSxVmirlX2NsgMpQ= -github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.53.0 h1:KPukzgWZnmdc4fZYFkA46orMsQJoeNeEh5wbSnrYCdE= -github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.53.0/go.mod h1:YDWzt7f6AHa4WfyJDv3GcIiyY3969MfsuSX9ANUbZ+k= -github.com/aws/aws-sdk-go-v2/service/databrew v1.34.4 h1:4M8XfsTE92AisaKwV75xtfCVT3Xza3ImIqlZsvzxZ0w= -github.com/aws/aws-sdk-go-v2/service/databrew v1.34.4/go.mod h1:b2Cv3mZxp7bNPEzOQFsSCcPJivdNiHn8HmCA7rau1r8= -github.com/aws/aws-sdk-go-v2/service/dataexchange v1.35.2 h1:/0cE4Ng/7zrNuM7yL3ADTwqDjN8CcPClsDxW6s4Fxy4= -github.com/aws/aws-sdk-go-v2/service/dataexchange v1.35.2/go.mod h1:3KVz8qwswG8F7iJvqk1hijdyF296sqxxYBMYX3vqygk= -github.com/aws/aws-sdk-go-v2/service/datapipeline v1.26.4 h1:qW7fLEpklI16GTkOQOC4IeztsCK38gXAsOLo2On2jD0= -github.com/aws/aws-sdk-go-v2/service/datapipeline v1.26.4/go.mod h1:aLoUy+KtchN6tAwb7YJnPcsb2YEoultUKsx1s/QEz60= -github.com/aws/aws-sdk-go-v2/service/datasync v1.49.3 h1:yWMkk9hwUjpDVsS4h0713JK1gKzubaxmqcQk/9r40t8= -github.com/aws/aws-sdk-go-v2/service/datasync v1.49.3/go.mod h1:gTqSe98/eTBLBSli2OIVCCtZ2wJ2oNrDqK16A2LGWiM= -github.com/aws/aws-sdk-go-v2/service/datazone v1.31.0 h1:AFzCK9/krkZ1i7AZtreEf9uiU8lJ55wTQoXFFgst//8= -github.com/aws/aws-sdk-go-v2/service/datazone v1.31.0/go.mod h1:XBH6CAk0DGML9jXbQM8GQkBE+ER1wRXrm0GxQe783xU= -github.com/aws/aws-sdk-go-v2/service/dax v1.24.4 h1:lyH0fXwrV4nIytmoiz0rzrJSFv84ZJ8MdK83U/LUT/Q= -github.com/aws/aws-sdk-go-v2/service/dax v1.24.4/go.mod h1:D91Ak1sYOquLMDM2EPuBRL+2gQxEnzMhG+/s5iUInMw= -github.com/aws/aws-sdk-go-v2/service/detective v1.33.2 h1:ePaT5c+InRjskQmJYTXwvMmb3VxcKh9MjZ5PVwoBduo= -github.com/aws/aws-sdk-go-v2/service/detective v1.33.2/go.mod h1:RE7vENK3CjJmUV40rQQsgkB7DNHJ1hZraBS99K7A/QQ= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.31.2 h1:6KlUuNr0DmhQQm/g/q3a6swX6WalRpVve8Op2Fdpy30= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.31.2/go.mod h1:+HTd3s8wIGd5b5jSikh9Qd/J1kNfY6IqioLkwZisfvc= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.35.4 h1:e7qpCMdibnlsI0jO5UfGTRfg+0G+HBANsMVtAjc8Pro= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.35.4/go.mod h1:nIALOeX1Xmspm6NhjzznpGmbyBg5gV0hxYcFcSCIUEQ= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.32.5 h1:8H+ZzO2Yez+PbYRzheZoxWmv03k+qKq71Ruhlx9khxE= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.32.5/go.mod h1:DD3baYN1tN5iIxcPKVAlgnDh2ZkUcbzM/lH/j0l+lxI= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.31.7 h1:JV01vGZhXnOGI5mjrSaYs8toau+lPgXp6UlQNm+inFY= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.31.7/go.mod h1:mAPyxqoegn/QPFB2Zy65DiQ2y8MlTtzKvFvlz2rwaQk= -github.com/aws/aws-sdk-go-v2/service/dlm v1.30.7 h1:O2pUnDku0CyRC4kZxa88YCMf395tbCujoOCS423vlXw= -github.com/aws/aws-sdk-go-v2/service/dlm v1.30.7/go.mod h1:dQK5yb0IyYZOJ8paqSQu6csZtYTIIxmAgI4Y4rtL9C4= -github.com/aws/aws-sdk-go-v2/service/docdb v1.41.6 h1:3psRq1ftvPT02Gtnt2YjSa/hXWM0JuEy3uZu8hatWPA= -github.com/aws/aws-sdk-go-v2/service/docdb v1.41.6/go.mod h1:HKdINsFfdzTWR38qWzfMbMJmsXC8tvbdSis/kG1+lCM= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.15.4 h1:ne+OVLZVBibPXOb4Hm9o3iZp3UB5oA175aCrOzVTtHk= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.15.4/go.mod h1:gXnmPUfd/xGEIZ8WsMswLiSAyYkQ6gMC9Uj7zVguwbQ= -github.com/aws/aws-sdk-go-v2/service/drs v1.31.4 h1:/mnR2UVVHcGIrHf70g5nb3RyoUHuj9MAVUYH9JvThcA= -github.com/aws/aws-sdk-go-v2/service/drs v1.31.4/go.mod h1:yvvJJgvXZDPuf3g8F/0IloipIsnnsamkCyVQdxGR6Og= -github.com/aws/aws-sdk-go-v2/service/dsql v1.5.2 h1:FCT/XJTmF+Rs9dpz8raISrEui75jLrF1hwYj2S5T7cw= -github.com/aws/aws-sdk-go-v2/service/dsql v1.5.2/go.mod h1:MFliW2mb4JEqLROEGWnf9o8mEpNjiyieKyOaUqa2ji0= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0 h1:A99gjqZDbdhjtjJVZrmVzVKO2+p3MSg35bDWtbMQVxw= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0/go.mod h1:mWB0GE1bqcVSvpW7OtFA0sKuHk52+IqtnsYU2jUfYAs= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.231.0 h1:uhIwvt6crp2kQenKojfDShGw39WEIrtPRfYZ3FAFlJk= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.231.0/go.mod h1:35jGWx7ECvCwTsApqicFYzZ7JFEnBc6oHUuOQ3xIS54= -github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 h1:Bwzh202Aq7/MYnAjXA9VawCf6u+hjwMdoYmZ4HYsdf8= -github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1/go.mod h1:xZzWl9AXYa6zsLLH41HBFW8KRKJRIzlGmvSM0mVMIX4= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 h1:XJ/AEFYj9VFPJdF+VFi4SUPEDfz1akHwxxm07JfZJcs= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2/go.mod h1:JUBHdhvKbbKmhaHjLsKJAWnQL80T6nURmhB/LEprV+4= -github.com/aws/aws-sdk-go-v2/service/ecs v1.58.1 h1:DTwVT1pmRYac0va8mb4A97bumBXZJeAov776TlsYqHw= -github.com/aws/aws-sdk-go-v2/service/ecs v1.58.1/go.mod h1:kq9VTFKJ68jqeYu1uVx6bR7VgWdQ0Kic/BstllTJJuU= -github.com/aws/aws-sdk-go-v2/service/efs v1.36.2 h1:u559lskjn8+5WRnLU+Aq0VCZLjgw+JXYHiwSfOpweBw= -github.com/aws/aws-sdk-go-v2/service/efs v1.36.2/go.mod h1:e6UrCp+V52p83QPNWC05I2N3vkg15XTfbQ0n4IvYDYQ= -github.com/aws/aws-sdk-go-v2/service/eks v1.66.1 h1:sD1y3G4WXw1GjK95L5dBXPFXNWl/O8GMradUojUYqCg= -github.com/aws/aws-sdk-go-v2/service/eks v1.66.1/go.mod h1:Qj90srO2HigGG5x8Ro6RxixxqiSjZjF91WTEVpnsjAs= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.46.3 h1:K1KtI95Fkz+2PT0OtVRsZyUzb4zHFMWOXNPkXy7LYDY= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.46.3/go.mod h1:kI+JDflKNLqdxVmdg2I8A3dmsCcJzAXXz5vKcHsyz9Y= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.29.5 h1:pMxyQ4h0JhnKOQoTRW6OyzKtsHKGzO3qTikBH7q5dr4= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.29.5/go.mod h1:BfDv/2Xok2pEg9VbiT7WkBIO3WFnAnuUcncn9QkOJko= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.6 h1:9grU/+HRwLXJV8XUjEPThJj/H+0oHkeNBFpSSfZekeg= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.6/go.mod h1:N4fs285CsnBHlAkzBpQapefR/noggTyF09fWs72EzB4= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.46.0 h1:3nrkDeiPreARHMoqvS+umxTKcDVkqnRPlz01/kVgG7U= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.46.0/go.mod h1:E+At5Cto6ntT+qaNs3RpJKsx1GaFaNB3zzNUFhHL8DE= -github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.33.6 h1:uMMgBQYKsZn0kunyKsyUZyeIlBjt0tq8JmuSRhPF3k8= -github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.33.6/go.mod h1:amHnCXfYgnnuX+DZsN/hSBbhKWA8ftDQN0QVVelGGoU= -github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.28.4 h1:0ScNqYCd3DPv6xfaKQkcCB06mWKI1eXQ5HbE4zeBo7M= -github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.28.4/go.mod h1:EqJAUs2nA9PHOBjrMpv+XmjbEdPx3COUMnEKzsc0PGU= -github.com/aws/aws-sdk-go-v2/service/emr v1.49.3 h1:bojA/Hy1JbiG84qjo0dKjzCSrlkGkqoZKivoSA3ZYyI= -github.com/aws/aws-sdk-go-v2/service/emr v1.49.3/go.mod h1:3Fb28r8m3+76JD3SGbN080pY53Zf8S+kraglAVRIucc= -github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.35.4 h1:4DSQddd2X8DtQ7XkfoxgTQm9Ziqg7OMqTqYexZJiQsE= -github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.35.4/go.mod h1:1wo3Ol0hdgtW5tnkHDSywVk1uGZgFz3GIczlHWigLSE= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.32.0 h1:lMEEo2u0vS4+xid38JaKIyjxIh8OCkDNtyt4wHqZ4Os= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.32.0/go.mod h1:DLlEeTpje5Jl1KXggBTphYGdTn+4VUgSOfPZOdQKwOg= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.40.0 h1:S2zUrIgbvBdHCWP5I5P3Wz8+YfDyp7rpCfGXBwmO3a8= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.40.0/go.mod h1:sIrUII6Z+hAVAgcpmsc2e9HvEr++m/v8aBPT7s4ZYUk= -github.com/aws/aws-sdk-go-v2/service/evidently v1.24.4 h1:LFq0twtI4iH7NoI8zqgom4RttSS//mKasAt4vbMbX3E= -github.com/aws/aws-sdk-go-v2/service/evidently v1.24.4/go.mod h1:xs4SqVz98n8Bxjt/NCG2G2Jm/qx8gx+i0euCyIaRZJA= -github.com/aws/aws-sdk-go-v2/service/evs v1.0.2 h1:jwSECr6+TScYZgbaVmL5WSMnjifRg8V0CGv+R/IU4I4= -github.com/aws/aws-sdk-go-v2/service/evs v1.0.2/go.mod h1:0a8Lc552uwJTFIRrlvqlR6dqvxlN6hk4GMYZRek0Se4= -github.com/aws/aws-sdk-go-v2/service/finspace v1.29.4 h1:MPXrTPT6nLbddVOivR+cZg3yC/qDZlf5Eta36oQGmzM= -github.com/aws/aws-sdk-go-v2/service/finspace v1.29.4/go.mod h1:hekaZTEQbeaS+WHd4BzQtu+nJS/E73xZocexPrPrArQ= -github.com/aws/aws-sdk-go-v2/service/firehose v1.37.7 h1:rDNxf0CQboBMqzm6WmhGL58pYpKMjU6Qs3/BfY3Em4Y= -github.com/aws/aws-sdk-go-v2/service/firehose v1.37.7/go.mod h1:E1yDRkUMwlVGmDYcu5UJuwfznGNuVW29sjr2xxM2Y0w= -github.com/aws/aws-sdk-go-v2/service/fis v1.33.4 h1:qHebHke5kT9KPhmKfqxWc3a9paffgRhbegNoORoxfCE= -github.com/aws/aws-sdk-go-v2/service/fis v1.33.4/go.mod h1:xwRN5ORzqRIf5IYIkcyAuEhKhVf4Cts5jd7j/fA8+LE= -github.com/aws/aws-sdk-go-v2/service/fms v1.40.5 h1:2hNJGW372nqz7HzMutbocRpZ3MARYm5kq2tvCFs6OHI= -github.com/aws/aws-sdk-go-v2/service/fms v1.40.5/go.mod h1:93wTShRibgZb1ELz8Pf81L3An0WHKHf9wRJ+6s2OLv0= -github.com/aws/aws-sdk-go-v2/service/fsx v1.55.0 h1:ZyAs2DqX6ksKM5dihLzrFseTygwaZWholin+VmN6Ob4= -github.com/aws/aws-sdk-go-v2/service/fsx v1.55.0/go.mod h1:yKSq9iW5hHBEpyYKpmH7bGVTBpE9Ki4xrfAWV99wXpE= -github.com/aws/aws-sdk-go-v2/service/gamelift v1.42.1 h1:a3b1XXHAg61yVO5oKuMN73LxUipPnY5FaV/+kAqvZn0= -github.com/aws/aws-sdk-go-v2/service/gamelift v1.42.1/go.mod h1:dnPoxIqQYnMMkAW1HYNKCF2Sc17CDR2sm+/L8o5FNe8= -github.com/aws/aws-sdk-go-v2/service/glacier v1.27.5 h1:Rp3lC3bHz78NMV6BlffdC/WlpNL/k060yi5FUGBj5po= -github.com/aws/aws-sdk-go-v2/service/glacier v1.27.5/go.mod h1:hSMtaqxpqY3qBEIStQISXDfbBQTcYLNjYn4OSVWKvdc= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.30.4 h1:idE6j2x7GKSosHJs8cUx8A6KUq3uBrHgjDlWX349fuM= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.30.4/go.mod h1:j/G2N1igocPCVsL7+KhmWI7Y9fiAaUtRdirSReCxDSA= -github.com/aws/aws-sdk-go-v2/service/glue v1.117.0 h1:Tl20k1TsdD8Ot+tfOgUt49EE9FyGla1e2LhuJe2Gkgk= -github.com/aws/aws-sdk-go-v2/service/glue v1.117.0/go.mod h1:AiOhaEmhCSVONWJ9Ul47qOzNNEBXG8saKz1K7vKbRg4= -github.com/aws/aws-sdk-go-v2/service/grafana v1.27.4 h1:XixrfgFR4zUxe2lqvQSp7VneDSjh1jVNdU2ebIWSydg= -github.com/aws/aws-sdk-go-v2/service/grafana v1.27.4/go.mod h1:2tlr8LcYq7dHoKzd0McU0r5Q408BwnpvPFyDIW6g6Cc= -github.com/aws/aws-sdk-go-v2/service/greengrass v1.28.4 h1:O0ymzTHd7bbwTjN4lJksKRM+g/WYOzGe2C0dCai1T+Y= -github.com/aws/aws-sdk-go-v2/service/greengrass v1.28.4/go.mod h1:33wl2N0a4HTF8TcfOpgbr057ZmSmdQM1odJnMXBEDn0= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.33.2 h1:ISdFgeehbUcSmHuKnSXIiXbTCbktq3gQOmOJFKXTIuI= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.33.2/go.mod h1:Yy51sCEGRTCe+WCXyGCtwPlr7cJq8gkV3pCr61IlxFo= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.56.0 h1:9sDfWWFOLWf4iXJRmgA2KM44VqzKzBcYE/3lRxdfBac= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.56.0/go.mod h1:NCwAyLptBGarEwV6HMo52eD4wIqiT+szUlI4WhfEeWM= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.30.5 h1:wXVaLzbLWize/Cbpcz8bt3Z7JptSNjTiT3aLXacB3qA= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.30.5/go.mod h1:KPnC/Zx3SFrNdp6MqngyzCuua9FwdR3gB37IZB19esU= -github.com/aws/aws-sdk-go-v2/service/iam v1.43.0 h1:/ZZo3N8iU/PLsRSCjjlT/J+n4N8kqfTO7BwW1GE+G50= -github.com/aws/aws-sdk-go-v2/service/iam v1.43.0/go.mod h1:QRtwvoAGc59uxv4vQHPKr75SLzhYCRSoETxAA98r6O4= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.28.6 h1:kFlM9ljR/NV9tRbwLpenIdFjDAYFB23pLpcWpCDfkuc= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.28.6/go.mod h1:z1GkhlOp50BHMgSkGFxwKR28G+ZvjykzUScuWhCdVco= -github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.42.3 h1:TLul/XG5yo9fbIMtxEXHwKtjohZjTNVYwWNJR3CRVE0= -github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.42.3/go.mod h1:PKGWYhnhQ3tDhM8W/1R7QUBmM9c7SEshBEewE7XPFPc= -github.com/aws/aws-sdk-go-v2/service/inspector v1.26.4 h1:HmmfKgLW6dj9ZF6LQjnyPr8JfgO5RKViUJZyr+3DyAs= -github.com/aws/aws-sdk-go-v2/service/inspector v1.26.4/go.mod h1:axRC0whrHPEaTEcJCL1FalY9KwwOhmKKdeLzLjqkTyc= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.38.1 h1:c1ggLklQ1C5Aoj99g/4/CCdB6D0oIPaETNYwY4z8/i4= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.38.1/go.mod h1:6usonUxMtrrQ1OuxxJeBR2tR1PZcwjc2/e//xK2rmtQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17 h1:x187MqiHwBGjMGAed8Y8K1VGuCtFvQvXb24r+bwmSdo= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17/go.mod h1:mC9qMbA6e1pwEq6X3zDGtZRXMG2YaElJkbJlMVHLs5I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.21.5 h1:/OevpXjFTKC13DuhlMoJmlVx246loRn4RehOXcaokYs= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.21.5/go.mod h1:hzvUC8l6AJ26Yz6eYiKPClQkSEbukvkNDMMNNhCcM7M= -github.com/aws/aws-sdk-go-v2/service/invoicing v1.2.2 h1:l9h02nlsL71Z3AsiNYe3ok0sKf5FxYalBivi8dmroFo= -github.com/aws/aws-sdk-go-v2/service/invoicing v1.2.2/go.mod h1:qgx493y1oppVNw2khxgCCfmDRCH7xFaLzeHQPPIQcV4= -github.com/aws/aws-sdk-go-v2/service/iot v1.64.4 h1:PCIpXKj5E5SCsIICVb50mU8Ma7B+Yowd872E2x2GEKM= -github.com/aws/aws-sdk-go-v2/service/iot v1.64.4/go.mod h1:zoWywk4n+izQigMVgYQFCnASbAJ8uHv6RHKLrjAsocg= -github.com/aws/aws-sdk-go-v2/service/ivs v1.43.4 h1:o8i4lXojYxWkf1JO/4ZI42A+BqLQcVE7/R/PeSd6//Y= -github.com/aws/aws-sdk-go-v2/service/ivs v1.43.4/go.mod h1:eqKP1qnqzTTjRcIO6DK9HRiIwvbL67xAUZ3IGbQ0WOI= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.17.4 h1:btA/5nMzQ5W9uYvXVfZoo+1MfIsnt8rHxfdeqqb/Hp4= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.17.4/go.mod h1:wVqsjIZzpNfhcxzSEQ5Ex3MZTK6pK41Bnube0cQbklw= -github.com/aws/aws-sdk-go-v2/service/kafka v1.39.5 h1:N92rM/5cDDxhjRLQsiVuV+osgvjgxjlPWDfifwWZl+0= -github.com/aws/aws-sdk-go-v2/service/kafka v1.39.5/go.mod h1:O0aQB4mb7phy2B60/oRkEN2EeUdbWDOHhrnar8ZP1Dk= -github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.23.5 h1:6aVQyYo8DwhQknoluvQn3myUthiSvX7h0nf7r2nrxQU= -github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.23.5/go.mod h1:DIDIP4kbwO2APBMn4aH89FjL3JNeeDoOG37W15Tkk2o= -github.com/aws/aws-sdk-go-v2/service/kendra v1.56.4 h1:GmvdHpYX8gUIIrhVoZ3CVyES0M06FAoMAmwWSroWwDk= -github.com/aws/aws-sdk-go-v2/service/kendra v1.56.4/go.mod h1:UyEw38rFv1ab5iGITliJ76ercQ2W+uH6xGofzM/fWn8= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.19.0 h1:dUlvwCH/2NcG6vE87uBYtedvSqr38hvOMq2V7oNrGek= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.19.0/go.mod h1:6ToAMADrPoGAV7YNsJh8QHv/V9Rok9uPTvJmw0nxpj4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.3 h1:aAi9YBNpYMEX52Z9qy1YP2t3RhDqMcP67Ep/C4q5RiQ= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.35.3/go.mod h1:DH0TzTbBG82HKNpBQlplRNSS4bGz0dsbJvxdK9f6rUY= -github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.26.7 h1:2Yes4BbKaGPHb/bCdaWoDEC9YdAM51fuP5NC7Z3dNYU= -github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.26.7/go.mod h1:eWKL85+D5+OcrfqvRpLF2x71btGZWur944vnaPmWE6E= -github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.32.7 h1:Vt7/srA/qRWlIck03nC/kDGOITQZ5eJ2BlnXNEiPzeU= -github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.32.7/go.mod h1:1FRspsThsK9y/KCnN6lF2ooSPFNw8TwGZf/3xpT3wEo= -github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.28.4 h1:dA0yAAnFje99NZqcHc0O/8rduXOe7e5R+qM798lq3s8= -github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.28.4/go.mod h1:CsOqYUjyz2UVrZ22fiKl+WdCRiXsO7kufv3P816Qo0I= -github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 h1:zJeUxFP7+XP52u23vrp4zMcVhShTWbNO8dHV6xCSvFo= -github.com/aws/aws-sdk-go-v2/service/kms v1.41.2/go.mod h1:Pqd9k4TuespkireN206cK2QBsaBTL6X+VPAez5Qcijk= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.41.8 h1:WvMhnaMOJU9Q1xVmXDT6TT5V+0CyniFUIVS87XfvzFE= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.41.8/go.mod h1:NBaw/nPw3v62yWrxUOGkifYKkIeYoocc3O8lgrnvgxU= -github.com/aws/aws-sdk-go-v2/service/lambda v1.72.0 h1:2LerDz2Lz22IDfdpR/RpSZIFoBoAh1tdHUaiUzG2z0k= -github.com/aws/aws-sdk-go-v2/service/lambda v1.72.0/go.mod h1:vahA7MiX/fQE9J5o1PKbgn8KoXz7ogSFLAQQLdLUvM8= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.9.4 h1:zAxrTUh8ffwiunWoichOWc9tVVSzRpmU/dR6plwIiyE= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.9.4/go.mod h1:Q1KBC3ILbT5cYEAeWT8SSI4vrnNOqAK1mx5ru0Yk1V4= -github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.29.4 h1:emmwvPyyB36dp+c6hPHvn5vR+y/C85VUBKSqS+RhpFI= -github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.29.4/go.mod h1:y9wPFtue7AFgaZQUefO0j/l2SB7wtkFMlXmcdc/oG5I= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.52.1 h1:aLBLIBBVLoKXLjNy5EKh8kFndvawsoxvswsnKg4tXU0= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.52.1/go.mod h1:VGLvL1In57M4vlxHoro5WDGwlpzAMyix0XdwffuYOsI= -github.com/aws/aws-sdk-go-v2/service/licensemanager v1.32.0 h1:fyHzYkcQrD9+5gpLSQU5nkaZAIu1ZlsHzZ7MgMpzhic= -github.com/aws/aws-sdk-go-v2/service/licensemanager v1.32.0/go.mod h1:wgEK7i9V/WGv79dhmZOad0Sc3FcJhwgOJ2ihebLuVJY= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.43.4 h1:0WHz7LVS1JHOMaJJ2uc7vvMERopVfNQE1Dil2yu6Wqw= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.43.4/go.mod h1:2VS/H/N3xtI0VxFja/1Aqy1FscPkVyju4Uq9J08L6Ms= -github.com/aws/aws-sdk-go-v2/service/location v1.44.4 h1:oQhdGB0sDiV6DbHz2syreSdDE3IgpxyEYEexs8Fnjhg= -github.com/aws/aws-sdk-go-v2/service/location v1.44.4/go.mod h1:pkmmKXWZEw624lzTiL+3TzQsihEoqQGZpaYbWDjwvGU= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.32.4 h1:C1BGDdGUvilwtTl0fymQ80x3a/ksZ9HrcDZe5ciHwgM= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.32.4/go.mod h1:PNQsvph/5J9OZz4ns0mUL1myh+3suq6Maq4J/CewM4w= -github.com/aws/aws-sdk-go-v2/service/m2 v1.21.2 h1:xvYDXyQSCk3G7XTHJ/D+OobIcVxgo1ZABl0mrD16jGc= -github.com/aws/aws-sdk-go-v2/service/m2 v1.21.2/go.mod h1:6Ra+8YlUJvmrgRbiVrgvbB7UGa/8AlX6T9BgIqpDfbA= -github.com/aws/aws-sdk-go-v2/service/macie2 v1.45.4 h1:dUUeyfbXzT+0CIEa2cQT5BYLduPVOjLXbroYF/3DNyk= -github.com/aws/aws-sdk-go-v2/service/macie2 v1.45.4/go.mod h1:pUFG4pQ5NL+jDRwLRwiTCMMavh/+swy3be4NVQjyfx0= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.40.2 h1:G6QfYIjydoQi5BRw3zkUP35aURuPgiMWsqda/vMSxxw= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.40.2/go.mod h1:+JCqmRgWpEB6Gmkfb1UUyKQpkbuMo7KOCyZq3vg/xz4= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.75.0 h1:Yw9/tZ1m3rqmcibR1h1TVKF3LKUXdGU1NMXrGzdnrCw= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.75.0/go.mod h1:3DstUf6Py/5v01y1jf73ma6c3r+GbkFqyN2n1RTavRo= -github.com/aws/aws-sdk-go-v2/service/medialive v1.76.2 h1:rjwsjFC6SCrOFYbCCY8ULp5fHluwilZrzYVg2LPgeW8= -github.com/aws/aws-sdk-go-v2/service/medialive v1.76.2/go.mod h1:jExKUuHSh/WksIx3Vs3miOAOMpbF8rnvRNgtI+wH/4I= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.35.4 h1:ohFzCGSbvw7EX9XM8Oxtl9E0Ph2Rasmmuc+Xx8uf6Uo= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.35.4/go.mod h1:OenjZ9DGOXCsBuowIPErRHTsbGZC5jGBok+4V8teBko= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.24.0 h1:hGCDJYqDm/XmIjLD0Pe7kcxUSLQZi6/lc6FD6AiVCrY= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.24.0/go.mod h1:bO3GFTVx6m9gCIErec24aNup05CFQkTaXXl50BUJTDk= -github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.35.4 h1:JUfHo+paK88NAMjDmHQI5KhVybkduH/hAHbLrzz8guQ= -github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.35.4/go.mod h1:XW38yIsZNImizG/0v6CdP74lh6GvnZcaFQ9iwusvwMM= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.25.4 h1:Z3sHyG46Hs1ZNUzQ9Z+psJoclcVB/iM6H7TLuOQ4HIA= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.25.4/go.mod h1:kI9Qf+K599ZwzZzVwOqZJRk0gg9cFDots4NFzvfS148= -github.com/aws/aws-sdk-go-v2/service/memorydb v1.27.2 h1:IfwyIeg5ihdo0rgYPd5GLL7HoSleK+D+VKTQ90Ydvb4= -github.com/aws/aws-sdk-go-v2/service/memorydb v1.27.2/go.mod h1:/R8wCXLpL1wyd22zFfGoWei+JayKQGEGSWJ+FDNngu4= -github.com/aws/aws-sdk-go-v2/service/mgn v1.33.4 h1:A6g03tFkhPDXjiofvTxuvW2HH7DkwsdHuLEkGURj2uE= -github.com/aws/aws-sdk-go-v2/service/mgn v1.33.4/go.mod h1:gWtkzOxwXESKQGqsqICO3LIBA6PuOo/ZU4mMrMhxzo8= -github.com/aws/aws-sdk-go-v2/service/mq v1.29.2 h1:XhJW/ppQrd2J4T+TCxrv6sZWrSyRlZNYNq586EmSbg0= -github.com/aws/aws-sdk-go-v2/service/mq v1.29.2/go.mod h1:ESMOqV079mlqNnqaxin+UNKvPkn9e9Qew83YQMe+RDY= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.35.3 h1:VcyYhv+EqCW3OwixgYpmNff6eJpSAjXtSjE0WLUogSY= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.35.3/go.mod h1:QeKi1Tch8DJpKfsCNKvuXganHLH3XUt3sn22cfVSd2U= -github.com/aws/aws-sdk-go-v2/service/neptune v1.37.3 h1:T+EQnNg3h2IJbfg9M9OAZEiHO+xhVtpnV1IqtrGVFwI= -github.com/aws/aws-sdk-go-v2/service/neptune v1.37.3/go.mod h1://k6uK6wMNDdiPAjtlT4G+ln/yrRwiZCYRseUuaCpmM= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.17.5 h1:4hLlfw7lQ0LfRqgDQTiuJ5l1z56mis4j0ncQjWipa/k= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.17.5/go.mod h1:Ex4YrWM8XMVoK4nCZdWLjPA4KwrrVJnE/G8wIiVwRog= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.51.0 h1:CCNcctA+JRLbaOjsKSmMpkMhqh7yM9NSkUzGx4m6etM= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.51.0/go.mod h1:Sdex/kw/DteUGYsSK3f4UtMBsHi9TBdxtVsJZaCg00k= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.35.1 h1:+WRM1yPx0OttOwWCg+fC0gIiRaYR3cAMqilFWGfKiJ8= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.35.1/go.mod h1:3yDKzKKBJPHeKau2EYAD/iFOd1E5XHXEjYOdShdhsgU= -github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.8.4 h1:9I8hXa5RVl48APWv3xzQyj/VbU+V5TOaVj1tRhNbwzw= -github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.8.4/go.mod h1:p2OtzahA9dYaLJB4zf/VMXWdfJhD5N6wHW6QcxUeF0k= -github.com/aws/aws-sdk-go-v2/service/notifications v1.2.5 h1:rSFeBvrGfRA4wAZYh8KaOJ/k0/JCvJr3l07n9tXSiGU= -github.com/aws/aws-sdk-go-v2/service/notifications v1.2.5/go.mod h1:tJBKodWS4tqyFCfsac9WE5Hm43e/IYDZbB2lax/QyGY= -github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.1.4 h1:8tAWBBRvHcnEucipGelVreFAqisi3Chhc1/ywio7/7U= -github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.1.4/go.mod h1:ZlMouvvOjPxSEcn08KswFDPzkDNA1339mJhvJHEq8Og= -github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 h1:teOWtElLARLOhpYWwupjLbY9j5I/yZ/H1I8jg41An78= -github.com/aws/aws-sdk-go-v2/service/oam v1.18.3/go.mod h1:wGhpdyftHX6/1U4egowHkYdypwBMjpb+KjAAprv6z20= -github.com/aws/aws-sdk-go-v2/service/opensearch v1.46.6 h1:Od+ZuCqT6U0kJ1mjQSmo7FMJ90r1AcgJ/qYRoXG6wQo= -github.com/aws/aws-sdk-go-v2/service/opensearch v1.46.6/go.mod h1:0vIvvobMH8MY/GsR1hdcZPISLp16YwQ18D+cMG/3YEc= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.19.6 h1:bF3ZAHXA0INerCsCw+izReGUn8ZgYl61K77Y/X6xSU8= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.19.6/go.mod h1:FJYhjKoTlazvHMw/o+6UOPgejUyTtri14Z3GKzOCHDk= -github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0 h1:8dPwqXepW7uF1+20KEXZMkVKxHsCUUt6Fc0Zypx9tPg= -github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0/go.mod h1:5MRPiBYQXFmgqmnXbhAVtKk9SebdLGFRmaa8gz1K4cM= -github.com/aws/aws-sdk-go-v2/service/osis v1.15.5 h1:GKITYwhEre2s69oYPdtOKXca7TWf+nJVzIasQCqi+LA= -github.com/aws/aws-sdk-go-v2/service/osis v1.15.5/go.mod h1:Z4CSw4zWtSRQf2YUTFFm8DzccAwxYPZCoCRhgLMH9lE= -github.com/aws/aws-sdk-go-v2/service/outposts v1.51.0 h1:HLhXiT+SOlYunW0KlOUSS2jVy2OUQEdo54umLSf1Bmk= -github.com/aws/aws-sdk-go-v2/service/outposts v1.51.0/go.mod h1:XiGs3zv9ejL2VLM77wccs1qBnsmyAFnWs5Fs6iptvWY= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.19.0 h1:Lwws0exTQXDwOtnvHQgDTA4xOv6Fh3o9SfU0hTCa/gQ= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.19.0/go.mod h1:T1vNF1UfLFdQhuJmDLWlGNG2lo/OzX9xjjUSNnHW1OE= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.11.3 h1:Kcd4PcPvUaNIffZP1O0Kr4Ki2n6WJJOGKgIUbZxMaDU= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.11.3/go.mod h1:zkxvVWdC/LpE3YfN6hmdVXA+2NwIzHs5sItf6Obv73o= -github.com/aws/aws-sdk-go-v2/service/pcs v1.6.2 h1:b1iBwCTqJRqpy8FMv/0d049PLwCa3Jk8+UVAh7qIF+0= -github.com/aws/aws-sdk-go-v2/service/pcs v1.6.2/go.mod h1:C3xBB9K56xxpHoxjN3i60zbcwcjpNpJilYIGC87LWGc= -github.com/aws/aws-sdk-go-v2/service/pinpoint v1.35.4 h1:gvptUhrWhuZQBPFXei0IKyZHkNjcTUOh1BGL695Eens= -github.com/aws/aws-sdk-go-v2/service/pinpoint v1.35.4/go.mod h1:wXJlxfvejDIFeYJIlZv0djXvLAKY8a81OBH+mNrQcEw= -github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.20.3 h1:DpqKXU5uVGg+UBGTj6enBcTI41KO/z+fwmCR76rKml4= -github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.20.3/go.mod h1:klV/eNAO1c5q00dtuTEuLZZkQZgkO/NnkRG7dKRI76U= -github.com/aws/aws-sdk-go-v2/service/pipes v1.19.5 h1:KQnsuly2Ch7DJ9htsCdksI/tqFi7pQ0q69W5G+USmyY= -github.com/aws/aws-sdk-go-v2/service/pipes v1.19.5/go.mod h1:rBlgG8h2mfLBNrY7Z0gz9AYjbFqoqHpMVKUUH5YbBpA= -github.com/aws/aws-sdk-go-v2/service/polly v1.48.4 h1:HIqVbJqUkRNkDB/FfCvvck4GkYz/9X80pz0wt3/aR28= -github.com/aws/aws-sdk-go-v2/service/polly v1.48.4/go.mod h1:Yzmq1/XqHdnsMPyAlIoxnWGlpmkpAwZ4HmoEcBg3nAk= -github.com/aws/aws-sdk-go-v2/service/pricing v1.34.5 h1:VPKHJpSkYojMxD/nN//88/yVauw2lab1q3P6+J0dfvs= -github.com/aws/aws-sdk-go-v2/service/pricing v1.34.5/go.mod h1:21H9QmAqGSjeskZ7iZkuQ9GNuCOR3j2gt2FBct6wMyg= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.28.0 h1:3QtHatGoArrO2x3IMaKxYYj/tUQht/n18gezryaR7No= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.28.0/go.mod h1:FqAEEpHUKMoLeaFEJlsVYz0LmTyGzFW1QYH+DbK2WiA= -github.com/aws/aws-sdk-go-v2/service/qldb v1.26.4 h1:wA14NpU1FWcexAceWHCFPEkCtel9IbTrajBNIlxlgc8= -github.com/aws/aws-sdk-go-v2/service/qldb v1.26.4/go.mod h1:x5TT9jzcs+eoh14Xg2kCOix2jn/Je9cLiKUT5JPQnPc= -github.com/aws/aws-sdk-go-v2/service/quicksight v1.87.0 h1:tprZwg0iv7F48Ou6AKJqlmVrifP6wz6DYjNyvBFz5aI= -github.com/aws/aws-sdk-go-v2/service/quicksight v1.87.0/go.mod h1:2qi3N8xyA+QSqxlkwy9+tglelPujRpN0g74BUDqOuFI= -github.com/aws/aws-sdk-go-v2/service/ram v1.30.6 h1:0a/uXcdUNFS1CancSPzVRwl03Ut3lrDSyOJHwvTLmmU= -github.com/aws/aws-sdk-go-v2/service/ram v1.30.6/go.mod h1:qmavcnsJquTI5vYHDnKNNxbcy0C/c0PQZgLysBQwLEE= -github.com/aws/aws-sdk-go-v2/service/rbin v1.22.6 h1:7tsUhpKIsnK31UTnLER6u5bpYIkeIxCscQvzou6f240= -github.com/aws/aws-sdk-go-v2/service/rbin v1.22.6/go.mod h1:wIGDZidVXHKiPsFtKSKBpmDWt7vEZMcI4onWsQSrX0U= -github.com/aws/aws-sdk-go-v2/service/rds v1.99.1 h1:eiDDf+cf2fAxOF5XaGLlrdCZPsnr5BTcPW55UK92sY4= -github.com/aws/aws-sdk-go-v2/service/rds v1.99.1/go.mod h1:Xe+NMlf/DY/XTXSevASAjGRika9Qt2LnuCDLtos03ms= -github.com/aws/aws-sdk-go-v2/service/redshift v1.54.6 h1:5u13KKciWFrXs3pkiG45cZfjAxCxHHCbhTm/Dg3GRas= -github.com/aws/aws-sdk-go-v2/service/redshift v1.54.6/go.mod h1:CFY4v8m7Nd96aVuFyNU+ujY+1Uim7JrJnAd0jkLf2Zg= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.33.3 h1:q3xxlF1/eZjmkfUxn4y2GTaYJTfbXBOIdbVLpfnJHcM= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.33.3/go.mod h1:rOBWa0PxH6/EjgXOWWzPK38yYhBPfcnyKdkNdZYhBEk= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.27.4 h1:KIx8wB5F1QjXZ+RPuemTKLHMZgoVojeN9zOhfC+17F0= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.27.4/go.mod h1:mO00EfrGvLQ9TE+tQb6Y2CToVq//1jQHbQN4LD12zDw= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.47.2 h1:jhI8d308+/rJ0/x/LIfBWC1KU3pcNxx3mc66HVbUddY= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.47.2/go.mod h1:P1V4mtg5tYOQl0nGcDh4hP2KyIVowqz6YgLcehtAkQo= -github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.30.4 h1:4r+dMPXSz/8/V1ZV7TXb9sT71z7iAcc0Y4wmJVjPLgc= -github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.30.4/go.mod h1:E2eHCs6AP0Cbd/ybgu5o6GQzTPDDcZsyxufzbQOp2bY= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.17.6 h1:WYnJp7XLZv6vJ2Axgcn47DumaXgPSkWxKp+8hL5g5ZI= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.17.6/go.mod h1:rMeCGU1Fk8JtLMf9kWQxtaUaRDEGOJkGNedJuayjFTo= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.29.3 h1:ydDDSNE36VbioP+xbfab1nYP5SDTOR5V8ZcUvZBImr4= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.29.3/go.mod h1:pXO3jDiaYQ49dzcDP/Mtz1VoTLEtqjnuINWeJXv+ktk= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:PwbxovpcJvb25k019bkibvJfCpCmIANOFrXZIFPmRzk= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.5 h1:fYXMgp0V6C5ndZosonHNh8J/xs1aBMfz5qANMlphHV4= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.5/go.mod h1:fGrCQme6bxmDiu+Ppun1qOWmoNSIMbIy5UKFIOaTF8o= -github.com/aws/aws-sdk-go-v2/service/route53 v1.53.0 h1:UglIEyurCqfzZkjNdYAuXUGFu/FNWMKP5eorzggvXe8= -github.com/aws/aws-sdk-go-v2/service/route53 v1.53.0/go.mod h1:wi1naoiPnCQG3cyjsivwPON1ZmQt/EJGxFqXzubBTAw= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.29.4 h1:8qeQjFNXdLd8+4YNVspNHjUrc0wmfrUievd+fOde838= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.29.4/go.mod h1:/dfYzVaLi84gzj8D7RXrF7KIgOBJ4Zk7jp7gQVltBTg= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.5.9 h1:zDOaPWYn4k8yY8pRQUmJQUACPzRNu8ChPMvCA96XWlg= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.5.9/go.mod h1:f/B7apleFy+Nxs6wY0pzA9UbIx0ldX30ZMvy1SO7tAU= -github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.27.3 h1:W7llNxOpVt0M0ToRkGXUs5UjMkntd6+DDesE5A4YXt8= -github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.27.3/go.mod h1:yXZ+EM/v38MqqCHl2fTS7Ftv7vLuwxkR4SG6qAkKCdQ= -github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.22.4 h1:mLYxsH/6tzncWzXTMt0SRp3BradtNrlM1va9Qa2AfQw= -github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.22.4/go.mod h1:9GWWA+r8JCyTMm3X3xUBJxU7o/+v4SMlksksVyMEmkc= -github.com/aws/aws-sdk-go-v2/service/route53resolver v1.36.0 h1:gkR6ADqZBV4RzK+FZVI818Rula1i85/G3JlGnn6FDY0= -github.com/aws/aws-sdk-go-v2/service/route53resolver v1.36.0/go.mod h1:lQW5vqGKTvNpIJ0DVG7dVyJ02OZnSlcLFHgZUpZhEw8= -github.com/aws/aws-sdk-go-v2/service/rum v1.24.4 h1:PF+oU9cTdUFQ3nW+A2qarZQF5txhjRgu8xUotk6y2BA= -github.com/aws/aws-sdk-go-v2/service/rum v1.24.4/go.mod h1:0E3Cb8i2piw7fqp157xGd9tKYbc6r+V2UW7sKzNbw/k= -github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0 h1:5Y75q0RPQoAbieyOuGLhjV9P3txvYgXv2lg0UwJOfmE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU= -github.com/aws/aws-sdk-go-v2/service/s3control v1.60.0 h1:uVNDtWESoQ5Mm+O6FERGOaxLxcmUJ/gj5/2zmdznTsQ= -github.com/aws/aws-sdk-go-v2/service/s3control v1.60.0/go.mod h1:uZDSKJgJ3w3MOjtuvrYMTI7APdGNycg7srBGzaclI+s= -github.com/aws/aws-sdk-go-v2/service/s3outposts v1.29.4 h1:oZjDliGfblCLGHBlw1CTTHaVYB6MkD+ss5AxhqoX1K0= -github.com/aws/aws-sdk-go-v2/service/s3outposts v1.29.4/go.mod h1:E2HKzJfiZE7AfaaPKwKyuHsFCT6CMQx+xA+RBfvNMKY= -github.com/aws/aws-sdk-go-v2/service/s3tables v1.5.0 h1:Y4Jkb371eWF3VDKppy2OBFJqBm+wEXsmkHu9NB5Xvo8= -github.com/aws/aws-sdk-go-v2/service/s3tables v1.5.0/go.mod h1:fTauvBZjNMRnXoEDSo+FFAW0BuLiWpilnB7dz8lnqhY= -github.com/aws/aws-sdk-go-v2/service/sagemaker v1.200.1 h1:EdANB2MVaCwY/YPKyqsdBgHo8DgsGb+Zp6qo/6zfHOw= -github.com/aws/aws-sdk-go-v2/service/sagemaker v1.200.1/go.mod h1:uRG58IrTnRkk83JKfW9BgMpU1MKuHtcwdiBfQyC7agw= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.13.10 h1:rehUqeN8NgQew7PvE/6XeaVyeDXj9fVhM2FMt/PNOM0= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.13.10/go.mod h1:6g2NPTPm0cx1YV1zYJbWXz80wn+xyX0JSBixqRSC99o= -github.com/aws/aws-sdk-go-v2/service/schemas v1.29.5 h1:gCVa2/ufz9Wus7Tw3flUsqwUMyk8oEuTPDcYX9xWuVk= -github.com/aws/aws-sdk-go-v2/service/schemas v1.29.5/go.mod h1:tQVkDFNskR9bKFWpMUtgOMNM1hpL3oAuPRzESx/z73U= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7 h1:d+mnMa4JbJlooSbYQfrJpit/YINaB30JEVgrhtjZneA= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.7/go.mod h1:1X1NotbcGHH7PCQJ98PsExSxsJj/VWzz8MfFz43+02M= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.58.0 h1:5phjeFKLN8b67+CztpBzG9mUOPrsMVryJ9OToMOL21E= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.58.0/go.mod h1:umtmPOd8goFeECUPe2Y1wigFIVrjwLR6GP5+eWmnUBw= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.20.5 h1:Cqeb3ccjhi5YEOlqYP3BLtEcYM+SiZeKgPs2z6FLlvM= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.20.5/go.mod h1:3TwtWEaAiv848bYEEiH9Yg79y5bXKyEDytGh7KUOeS0= -github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.25.4 h1:7eJSfME7No7WvRNFJI5o9fkBOOugNLXFqfn6AHHHguo= -github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.25.4/go.mod h1:9noDAe04msoEwCStlekEqsxzSj44udPquS2Zen4XS0k= -github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.34.2 h1:24S4nRk43CjgWiOlzHDv42q+PyFBZh35q4hgT7d5+6E= -github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.34.2/go.mod h1:O1PtvWmaeH2OMbGOpP0M717VrEtEm3L8s4t5Ehi844I= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.31.4 h1:5LV110/+dsFA3aut0evkDAMxqYOEziZrmQnWo3+2vBQ= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.31.4/go.mod h1:X4EuhIl3vZvJ8fIRTHOvFGblAeUnnZ9bsS5Awlyr1cU= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.35.7 h1:1eaP4/444jrv04HhJdwTHtgnyxWgxwdLjSYBGq+oMB4= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.35.7/go.mod h1:czoZQabc2chvmV/ak4oGSNR9CbcUw2bef3tatmwtoIA= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.28.3 h1:FDzX6WOfsz45IVvbP5O987/hdzjciDPek+AO9BOfDXk= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.28.3/go.mod h1:y10lwaaUXvDg/W5tn2WN5WQEMw/2T4tg7AW5jISZVw0= -github.com/aws/aws-sdk-go-v2/service/ses v1.30.5 h1:MGqdFy1jSw9rBN5qxLpeFGtwLTev1LIbNX7v3mVPZ2U= -github.com/aws/aws-sdk-go-v2/service/ses v1.30.5/go.mod h1:Zftob00wu8O9xWSN1pdczm1U+E6yXk9znf+4lkt+3aQ= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.46.0 h1:uNAn3m1yFv+7j+tbsAh36kG8JvZlUgZbzdQPSC6W0m4= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.46.0/go.mod h1:dy6XqJdtxnu7f9sQVHFMnH1OSlAS62R5feiHQ8WsI4s= -github.com/aws/aws-sdk-go-v2/service/sfn v1.35.7 h1:W5ZFACjUxkIjjtMGG21GhJ3uJfV7ejEsOkJTQHMHrEY= -github.com/aws/aws-sdk-go-v2/service/sfn v1.35.7/go.mod h1:x82j2Ux2Qr9Qzdb47peCIIa8agq7z3k0Zf4TWHEAxjo= -github.com/aws/aws-sdk-go-v2/service/shield v1.30.4 h1:B0NxDxP+NI18kFZiMwUUKVSWEcBwviWjTl4KMfWa3X8= -github.com/aws/aws-sdk-go-v2/service/shield v1.30.4/go.mod h1:07i7GZpF9rdMNRPkfUa3ymRq63Liej297OCz6wiWmiM= -github.com/aws/aws-sdk-go-v2/service/signer v1.27.4 h1:nU51n8zv3mLn9wxZ0cxkToQRsrnqNLg5xJ0j//GF58c= -github.com/aws/aws-sdk-go-v2/service/signer v1.27.4/go.mod h1:6bQTKM4Ryk9vKxVd4fc7uNAw2TI+hfY+lMhkmmEmnWw= -github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 h1:OBuZE9Wt8h2imuRktu+WfjiTGrnYdCIJg8IX92aalHE= -github.com/aws/aws-sdk-go-v2/service/sns v1.34.7/go.mod h1:4WYoZAhHt+dWYpoOQUgkUKfuQbE6Gg/hW4oXE0pKS9U= -github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHfiv5OCRwN/EnCOXQ= -github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI= -github.com/aws/aws-sdk-go-v2/service/ssm v1.60.0 h1:YuMspnzt8uHda7a6A/29WCbjMJygyiyTvq480lnsScQ= -github.com/aws/aws-sdk-go-v2/service/ssm v1.60.0/go.mod h1:IyVabkWrs8SNdOEZLyFFcW9bUltV4G6OQS0s6H20PHg= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.27.4 h1:HhwkyHRVIhGsBnezpwwH2wyrZQKooN9mYuW15/yM8rY= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.27.4/go.mod h1:bVvmYEJmT2xWBx269zEAWlQxJfkcfqyvB1JFjSRrzFc= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.35.4 h1:u8qJueBRnlcWupt1Z6zXFDcHa4eGCV9REex7r9sQnhM= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.35.4/go.mod h1:TwlNzbOPcE2NBuNLgZ1B6VfYJ0JG8WkEwOhKidrskW0= -github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.4.4 h1:WcyN7tIJrpezkcj7c0WzlbjhOo6ojDa8QL5+jXvSZ24= -github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.4.4/go.mod h1:kuwVH10c0+zEubkw7doHtNK6y5hsf6smmsRFBmK13Lo= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.20.4 h1:bzHaYrE7qNBohcfbhlXrBnV0/hk2J4fPysDxYwLCKok= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.20.4/go.mod h1:Z4RGgCEebqIsIhj6KJzTCJR7PmWwO9luAYplvGghIH0= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.31.2 h1:3dryJFNlYa+kgSlHLAcFpQQOeE8g+h2XX3NoiLeB8Yw= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.31.2/go.mod h1:EZSMWhfY55eXlAhKcQmkHMrRqwhOXWOiFcW9jrehv00= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= -github.com/aws/aws-sdk-go-v2/service/storagegateway v1.38.0 h1:VJuHn5d3gzArmJetVkngTKs0RxY6WhlWXt6RkYDPblA= -github.com/aws/aws-sdk-go-v2/service/storagegateway v1.38.0/go.mod h1:qtpDf/mpKyH0BYUVwct88hqiA9/znvnlxpoYcEZ0+Hw= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= -github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= -github.com/aws/aws-sdk-go-v2/service/swf v1.28.6 h1:tKh4RXgqwnIV5+2LW53y0LAA/+sWUJSsSBUZqEQC7/I= -github.com/aws/aws-sdk-go-v2/service/swf v1.28.6/go.mod h1:uIxNj0mirk5vpL/vW1Ko/UwyxOigm+BAVgsM+l2psOA= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.35.3 h1:CuUOM3i9r2U/kpqJDQj8p3Hi0if2N44gl5+qPXImpTM= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.35.3/go.mod h1:xo1aJ/YLmmEMwVU9aOvN4E7jOKgoAAr+6VDAJv+MNl0= -github.com/aws/aws-sdk-go-v2/service/taxsettings v1.12.2 h1:WZPhlC3G/mYx99l/QHl95U/Ue+al6UfPFdTbhbbiRUs= -github.com/aws/aws-sdk-go-v2/service/taxsettings v1.12.2/go.mod h1:A77L7LITMEWcVhGBNUyJ0RZLNVdhTIkhfUSQiS85XZM= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.10.5 h1:xmm2T4HJOkJL1SJwNh6xMEm6ocjE1Yh9YZTChHu98DY= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.10.5/go.mod h1:L4tT63t++iYucM3oLQ5aUQcbvgunzP/xg+ztYfOd1EI= -github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.31.2 h1:CjrXUjlaUS5MjPH6KMpZiFd3VNKDsgxQRSviE4TqWWc= -github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.31.2/go.mod h1:HyCb70yWplefVU5tLdVevHVv1fK6XS11cltC8KX0B0s= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.31.2 h1:HF3f6gSaqLSvqsUVIV0yIPucA9LInGi0V1hK3zUAgxI= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.31.2/go.mod h1:IZWUn9UPCdqPKM+72yj4HxXMXpOCpP7vqW8dctO5Jlo= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.47.0 h1:ASsg4ST0Lgr08AY5nT93g5/BrxJuezA7jI0XKiVK0y0= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.47.0/go.mod h1:ezb4DgeVVNn4S7Wy8eRQ8sy+QHRtzbW7SAKHxZy4ndY= -github.com/aws/aws-sdk-go-v2/service/transfer v1.61.0 h1:5OkUYsglfPicnhv2WAgAzh4gR32iPiNZ2dPMtuzXCDE= -github.com/aws/aws-sdk-go-v2/service/transfer v1.61.0/go.mod h1:9RJji4Q+u/gu2Te56e+CUpUM2UTCt3sMxzLMXYSJ5Ok= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.24.2 h1:d43lKGSX+AWhq5a8vpVuJNekcR5MtmB2JU22eaZZDRM= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.24.2/go.mod h1:4Q5Mgk7BLvRrhwElOeMUlnx3K92I7b8HRNOhyTuousM= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.14.4 h1:01e650ADK6nHoSN4J/sFlblCXSiFITGHrkGPK+xG+Yw= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.14.4/go.mod h1:2gAi7UItKOn/1ccFbqRU+6ZtPo9b3ldnDRe9XqYtdYw= -github.com/aws/aws-sdk-go-v2/service/waf v1.26.4 h1:Fgu+w2R0151xwueAlfPYVaXlqWBi2TUUwfsUJrs++34= -github.com/aws/aws-sdk-go-v2/service/waf v1.26.4/go.mod h1:pSLiROd8QQ8WK5uEOOccapEjDwp1AOC5Ywt4d5D3I3w= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.26.4 h1:+J6iG0+kp1vj5g5KhQHbZDHUidbwFK8LTUlI4t5tIL0= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.26.4/go.mod h1:k6xElMGoSjEbhEpFJ/g+oP8f0/Eprf43xDr0kNG9Dug= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.63.1 h1:FqB3NmVKnZ/2oS9uv1AWunzCusEqSp9USs9BGx4EwSw= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.63.1/go.mod h1:zclPwcQ0Ju4OLYCUtaIp+BA5K5KdxjeBLpKd1HsMVqM= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.35.4 h1:1oNo99IUfAPoMV/g1apd+J5QuYAunU788Wn4FmvzYt0= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.35.4/go.mod h1:dkxQxiW/xGedseew2TBbkzEHQ6UHx1Op4ZiSv8dbuNg= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.58.0 h1:NknK5ksEdnfMdPkhPedhoOQzb5bhd4/5ZNaYJTJRfaM= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.58.0/go.mod h1:zzXFHVKbJU2FcSWXP2so1X/Ght2lrOrXUPt9M/kFOtI= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.27.4 h1:XomoEUvUlwFKpmJ6qejWT+Gflkhe0WmSU3x5JGhGFYw= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.27.4/go.mod h1:O46IBclbuIwlp3plLPOF+HHBDJdIDBqMycf6GPrISuE= -github.com/aws/aws-sdk-go-v2/service/xray v1.31.7 h1:zJL4lRhsNpSYggXij+GBfDmEVT809ElOkhElTKoxeTw= -github.com/aws/aws-sdk-go-v2/service/xray v1.31.7/go.mod h1:GJrs2NbUJi1iUwUjMC+OwC7H24YmDwyJVRUKzVIgA0c= -github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= -github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= -github.com/beevik/etree v1.5.1 h1:TC3zyxYp+81wAmbsi8SWUpZCurbxa6S8RITYRSkNRwo= -github.com/beevik/etree v1.5.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.44.6 h1:OXJuITvU8R/Npo5Wv2dgIFBYROm42kXAD16rk8qirs8= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.44.6/go.mod h1:PvvoZ5HHC38O5xDu4yKotP0ZLvnlbaJbHFOD8vppMQ8= +github.com/aws/aws-sdk-go-v2/service/account v1.28.6 h1:eEtL3V2CHjO4IDRwBelx1sZLRrz7vAsNUrDIb967FkI= +github.com/aws/aws-sdk-go-v2/service/account v1.28.6/go.mod h1:qi8Mmk5TSynuGi1KWkzrFIYfiKSaCv/lIxPPyPOlVfs= +github.com/aws/aws-sdk-go-v2/service/acm v1.37.6 h1:48oGbMpBSzihrU145gpjrxySIs+VNGCXu9kLTLAdJJg= +github.com/aws/aws-sdk-go-v2/service/acm v1.37.6/go.mod h1:4Xgg9iUMFMpWd19UokmUwBCU6fqNJ7LPo11YYt3/xl4= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.44.5 h1:0aROQbnQ6nGlI1idLYuxx/mv4s+2I02RFyOA5MOlMQk= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.44.5/go.mod h1:1whQS1vMFP9KQPLTc9dtqnJGjgJ6Sb80bkPoN8CPQ2k= +github.com/aws/aws-sdk-go-v2/service/amp v1.40.3 h1:pAbmvpyEwOX5OphEvNCjDMTZS+I4mNOBBK5Z6Ga6Zgo= +github.com/aws/aws-sdk-go-v2/service/amp v1.40.3/go.mod h1:Kaiyw5xthjYIWNvilHLlRiNwZa3owNXd+YgJs53hzDE= +github.com/aws/aws-sdk-go-v2/service/amplify v1.37.5 h1:mCxlw2Vuh5XZP6qwuUxr7bXWZ7drfbquJieS8VCIb+k= +github.com/aws/aws-sdk-go-v2/service/amplify v1.37.5/go.mod h1:HeH9qb/ftrO1k18S+BoWN3P/p83yS06x/Opny3ATXDs= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.35.6 h1:v8RqEs++cq7uAYUusuwrHLNEFACv0nlICCBwV11p5sY= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.35.6/go.mod h1:5EVcku5uDhMks5w1FwPL8hLKqJwCgIIbuF5th+vGQhE= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.32.6 h1:k78ulhtPtIqMiZqq8bPkpJlx66VN8DmDIeRgrYpzehc= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.32.6/go.mod h1:A5+OX0k1IIqRR4jR+zPgHpzKmEoLfpyY2xIrrJj8O98= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.42.6 h1:e81OBhEpYUKh7Wg3hHiRE5zHpYPTgB4Sja0YWCBMivU= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.42.6/go.mod h1:3lk8tz+bmjQEPEmdDF7zTDFHlqRFdn0zZvTa2cIe0r8= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.16.6 h1:L50VB8yUNNequjYNhUm+MCjFCxfN6KMaIcpLgo679y8= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.16.6/go.mod h1:v0gYYyI3wXm6R1nxhW068lcxbmh8wTBKIPafxXI7rf4= +github.com/aws/aws-sdk-go-v2/service/appflow v1.50.6 h1:Q30ADINfdo4matzYAGq4rPuQrjQKinuvdaHGnR9/Ksk= +github.com/aws/aws-sdk-go-v2/service/appflow v1.50.6/go.mod h1:2W0SilTCqSFglsMuZYKFmuZCOaBxGLnWvpik/GP+bT8= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.36.6 h1:kwnjEvDnDXPGC2yGF3ygvNs8EGnZFxzsX6bKWFA+j4c= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.36.6/go.mod h1:DqUWf8yC60AYaDfwq1zMLRxP9uT5R41FZwpZzIgEsWU= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.40.5 h1:0t/Dr8fwxkc5fkhoeuYRpGiPowbLKi424s3oeLCusRU= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.40.5/go.mod h1:NUciQYiEOln3pubY8iovZkWZdJrBTnoPPW3JTIk9QAI= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.34.5 h1:HWvL7MWRel0n6W5msGcS2BllKX8OEH168656YH8IRNg= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.34.5/go.mod h1:9jEkcPD8H2x5XTr4JKfuftpz4EoKAhrom5lQzLLCI6I= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.16.0 h1:k5Gds31CrXttYeulwB6VjflGSXnRegRG2jKiWLimgHo= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.16.0/go.mod h1:dB7ydHt6geh960yqkPjZZfA+qqLK577b0jifWU1ahy0= +github.com/aws/aws-sdk-go-v2/service/appmesh v1.34.6 h1:Wupdnc/3bA0GPzEEZInLvu4FHEmkHNHsG/xahSggcGw= +github.com/aws/aws-sdk-go-v2/service/appmesh v1.34.6/go.mod h1:yHte17Vasn4Ows3YO5zLC1MWX2Dw8by5KvgDm6XGSm8= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.38.7 h1:gJCGw8gwiTYjLeTpCdwHFE60SRPN7tH2m0ScVYUZ4+Y= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.38.7/go.mod h1:UiPYznwe6WwKIOwLlWgrjdKvfOVVQ7eaRzf+OC4BzM4= +github.com/aws/aws-sdk-go-v2/service/appstream v1.50.0 h1:W5ZoBalgNd/kh64XbSKhxzX49MsTuhJwoHsuT6fwcic= +github.com/aws/aws-sdk-go-v2/service/appstream v1.50.0/go.mod h1:aPmkM5vZVr/vBeP+czUKCYWAlewa3QCaCZGh6gWZfm8= +github.com/aws/aws-sdk-go-v2/service/appsync v1.51.6 h1:YsjIVoljoczbCUYFzTUhNkYjJlEreqXeuicq2wyvO9A= +github.com/aws/aws-sdk-go-v2/service/appsync v1.51.6/go.mod h1:j4cEEClULtta5LEg7OgxqGTz4k0ipCAvue7P7GGRLQI= +github.com/aws/aws-sdk-go-v2/service/arcregionswitch v1.2.8 h1:01m2bIxzwrVbFB6XADodX2JwSSlpKfarYZWczIdYNSU= +github.com/aws/aws-sdk-go-v2/service/arcregionswitch v1.2.8/go.mod h1:h5EaGwLxZGbeUEkwE9BWg+4lPwv42YgTqqQ/SH2bbB0= +github.com/aws/aws-sdk-go-v2/service/athena v1.55.6 h1:OC3hqQ29uyNsftVHwdbfHpDopEBViNFypjy9N5eDsMw= +github.com/aws/aws-sdk-go-v2/service/athena v1.55.6/go.mod h1:I1paYl0qAaXc+6AmLtylg4ApBC0/HEs5myhVIcy4Nng= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.45.6 h1:QD02o1P75R198cYX9Nt3flwM5HmXxsmWAhG+8Wef2ig= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.45.6/go.mod h1:ZZh2P2Vy29z/3Occ3o40d0P4IuwkaZJPKrSD1gukI6Y= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.59.3 h1:2tVkkifL19ZmmCRJyOudUuTNRzA1SYN7D32iEkB8CvE= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.59.3/go.mod h1:/Utcw7rzRwiW7C9ypYInnEtgyU7Nr8eG3+RFUUvuE1o= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.29.5 h1:YUHawBzbCFAqJzMjyIwHYRNyCJ2cF3cNmqZZcm2/Zqc= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.29.5/go.mod h1:3YNMqOSRPyr23RKCv8RRQz2B2xflT/nk1bZuogMnO8g= +github.com/aws/aws-sdk-go-v2/service/backup v1.49.0 h1:7hWlpBuCnlElrrJps5gmvr1zjPsNSXDdy8Qv2vYfEJI= +github.com/aws/aws-sdk-go-v2/service/backup v1.49.0/go.mod h1:5er5+2GO9YgfAvZ9VqDSf9HKrwKAtjVA5Fm83eXtkfM= +github.com/aws/aws-sdk-go-v2/service/batch v1.57.10 h1:C9unOW8pT063iGGpnNWonK+iRMnVR86iPnYdFaRmnqA= +github.com/aws/aws-sdk-go-v2/service/batch v1.57.10/go.mod h1:fl2yc8ac4mmMPh3ByJ6LRgdL25iPcQ3cUqhZl4R5chE= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.11.8 h1:4O1siNWkg2oMPNzma7AR1GZCQIkH233tl9bTtOaweUg= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.11.8/go.mod h1:Mm4OxLblLwMOAZjrNfDrltCqO/RKSa516DNDrapaZyw= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.48.0 h1:PrP3JDj8+pMfjj6spKZ1Vwf9iSZC/+0NZYRBNXBu7hc= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.48.0/go.mod h1:3sUHFSHdoib4v7JdqEGgxD2sIdTDikr4IpjBOgUAa0g= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.50.6 h1:SQcm5+AnLYVNJP0K8yFRWTfEifhQenCaF+aPfqXf+fk= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.50.6/go.mod h1:Jl3eDtXBZAze9w+aJO1oPzdk55CqOh+Tq9VhLTLQSRA= +github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol v1.10.0 h1:HhOMc4AhT430DBGfv5CGHvc4AQeGe/Yz4i8p/5xe6sE= +github.com/aws/aws-sdk-go-v2/service/bedrockagentcorecontrol v1.10.0/go.mod h1:Es+CYDVSPzyRIJaDDzxvoBNRc+AZbevIL8d+q1+3J5w= +github.com/aws/aws-sdk-go-v2/service/billing v1.8.0 h1:qffsTlqnTPtokF6Y4dlw4YUWPYtOw+PCQyv0gJ8o1PE= +github.com/aws/aws-sdk-go-v2/service/billing v1.8.0/go.mod h1:HaQjETFBieRL+1p0qWCYDzDe/JnI4oJM4UiO3qNEPTo= +github.com/aws/aws-sdk-go-v2/service/budgets v1.39.2 h1:HxSdjcZ9NPVG4ZdznJMUjqjR0DPBWSId0xKUbTfl/Eg= +github.com/aws/aws-sdk-go-v2/service/budgets v1.39.2/go.mod h1:+0hQkFGrrsp6x9hxk/n7EOscPVfwrBkTojUCthoHquM= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.14.6 h1:QWlDo8QuBHtT6LYYf5opmQtUY4ntkcU0mjmmmbZiMoM= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.14.6/go.mod h1:QSe+uEkQQHwIPKFfaZtbZWrNaRq5esdmdQspTPV4apY= +github.com/aws/aws-sdk-go-v2/service/chime v1.40.5 h1:kaAYFY5mvQHeyEX9pamOBly0Vx7f3Al3dCD9p3JJAnE= +github.com/aws/aws-sdk-go-v2/service/chime v1.40.5/go.mod h1:gXN/LFE/H9vql+trNeg5MwcHYB2brbgv4j0pnphrxXU= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.26.6 h1:JT7X1tDbHo/0D0UQh7zi2YlHbH8zaLTgH1zKEPx/kUo= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.26.6/go.mod h1:ROgSEKmD43CMB1KWQSPNovieWq6DPPSu/MCdVbwO6II= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.27.0 h1:NMNhVPuxmv+8l/XktsHQTqyk7vhVsqzKEzePMdQWvgE= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.27.0/go.mod h1:8y4H/7OXnf2YSf2ybz8aqQzxbl5pW/yiolNFSSaZ41g= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.36.0 h1:AI06e0v0FtjcNk3XNsJmp8fiAAOceRzErDjdwN0WPj8= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.36.0/go.mod h1:VyV0Il6a4RYvrqhA6tvNpV13LEBFk77Vu1FMTJs4qyA= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.33.5 h1:ZoUqKpdIPkGeGRY1v81GCaVoELHgtUYEV0WF67skUhk= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.33.5/go.mod h1:Q2RJfC6edAyk5hr3gJMS8WANFoTPGIo2SlAzQkZsT0A= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.28.6 h1:jqP2tyJOEj7qDoLyqyKGnDMAW+Lmi0WwNB2OruNao6w= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.28.6/go.mod h1:GIOHLcWXFDrHSzJJFMNRxLsfA++pOENXO2QVvMT0mJI= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.67.0 h1:dXbv06SZ39MYWL70KgFdMgFl9ZLfHe3AWIiTs0V2LAE= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.67.0/go.mod h1:/q63oDWCyO4xLLRiVYpwufJDwSkL0IbC5epFNJne8JQ= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.55.0 h1:NjW6Wq4xfGF3DVKBXj51dE6P7VXMYup/W8pAekNo91k= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.55.0/go.mod h1:dYwFVhUsRZt7COcGP23ei0lY8gX8ZSHrbyX49VB93MA= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.12.8 h1:dlFZVF9TpiFvPsNO8uN20iHsrpJrALbQbwGbs7cVL9c= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.12.8/go.mod h1:MylnqogyYEsq0wODWlXmewzDOLXvDuhPpyAORIDSOOc= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.34.5 h1:vjOGGSctnKWctwndBRg6fnUQnXiIQ/zuf5km/L4q/zg= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.34.5/go.mod h1:WvZiU3vTIX6sm3FLFNHe05MWjKM4cqOPkfwT1lSj7hw= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.31.6 h1:WqphYeWJNaQRl5taLdy6ipI8EHsQGi8rxghXGvBSpkM= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.31.6/go.mod h1:pOvrSeFE/QezgirkaSVZcEtEo1UvlnZy/XlYo5pAJ8c= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.53.6 h1:lo/qOnIAmeBGsfXa92XpKFolYCEVRqxRYd2V171eU24= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.53.6/go.mod h1:q4HzizMPYR4kPnUmcY7sjTCdB0hoxw84mQTgtjJ50ug= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.51.1 h1:GqVafesryYki8Lw/yRzLcoSeaT06qSAIbLoZLqeY0ks= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.51.1/go.mod h1:Kg/y+WTU5U8KtZ8vYYz0CyiR8UCBbZkpsT7TeqIkQ2M= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.58.2 h1:JPW6ND8muLsBwALrf/VXikyokUmGWNKZa88qZWwFGWA= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.58.2/go.mod h1:3Dh12t3s/KrpEm7HNfg5RH+XWzi9LW2QI7velkc61ac= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.38.6 h1:adRnHtafjEL6BdPyNvVvsljxGlI3wQALwnTLDGDyu3o= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.38.6/go.mod h1:Jo4nWheCppk/3QfXOcYBouw3XfQSLS/lqXn7GQIhYEQ= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.67.5 h1:IjkLl7nLhE8w32Zv9NKBUdbB6YsFHIN0Y7qek4LO7wQ= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.67.5/go.mod h1:1ayIXbJj20GhTn4zvTQ5mKmDYMg5gs9ICsqR+WvjWrw= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.20.8 h1:ngiN4E8pNW15lffBIVfbO6IOSR/3NiRbBTL6XprV2UA= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.20.8/go.mod h1:u8qstOf0Jhr2PB2Xko0PirjruTv4Cp/Rwhw7ZGxgpcI= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.32.6 h1:11qvnjhmVnkb9UFQdagNFmAZV8CNb0hznYUGIEIVMZM= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.32.6/go.mod h1:RLtIEolTsnW3TOw3fHTAXb4H2xNjcpKa/b1nKsTmAh8= +github.com/aws/aws-sdk-go-v2/service/codeconnections v1.10.5 h1:wcDfIGYi7pNS33qRzewQhvAs1FGZA+GrypDce+5m3TU= +github.com/aws/aws-sdk-go-v2/service/codeconnections v1.10.5/go.mod h1:XVNEBA5S5hDvYpzK0//pWFemUsx2LKxYa9Ymkg62Z5E= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.34.6 h1:TcWPqk5hTjCeMz8tWtLPV3nUBWZ7xTFCql8JeM+Jyxw= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.34.6/go.mod h1:00HnOuKp1Q/g5sCAzV8dDJWq6fts0D/1xC5DlLWjXwA= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.29.5 h1:sQevsmx5Sg8WkyR/P+Vq/tqpJCDzKr+tvZYhuP6lMtE= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.29.5/go.mod h1:6vjCrFSI1R02YCIFRqCqcKxOzKWSgib4Q9RPK8yhHS0= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.34.5 h1:WByNI1rera7rLq8qRSh+0uhQSVMDM228fZqOiUyeJb0= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.34.5/go.mod h1:oYDh1yjbugYgvcdCWMbsZcZmp8QQ1OBCqaX2qdXiPvI= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.46.6 h1:z/82UoTxxmA27/yygFEnx+uIdYY1zyK37vCPKZoXyb4= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.46.6/go.mod h1:8n32TPTWAAHJ0kAuD0z8TGR0z84ZfYFm9ILkHgkV5Do= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.34.6 h1:jSXTzwJsreMbTdUaBRhB0PnB+sWfq+awXxZJorfP8U8= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.34.6/go.mod h1:bkkAghnfsExMwlQ9u3NIoMbhUhpUDq1VL5vaaD6KrKI= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.31.6 h1:q0ma8a3t28BbHb0/DSMF6VXOouvdk42kqjLzP1YGMMM= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.31.6/go.mod h1:lOY7xQqacZtC8sN+BEH8S3NCBoSEvLSeMHBVZfCynsk= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.33.6 h1:75RJ5nNarn2EViDSYRPV18H4PXAkugQy1Xjr4HJ9R3M= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.33.6/go.mod h1:dN8D7VkYmVwbH+MVVxiqtldtkTO7ovQiVUkCWa8v6PU= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.57.7 h1:1LPBlVrceFenrbWOZBGu8KTmX8TTMpZfRxX0HCnSjz0= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.57.7/go.mod h1:l8KDrD4EZQwTuM69YK3LFZ4c9VbNHrzaQJjJsoIFqfo= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.40.6 h1:LtBU4r66PzkAdivreTlrlNWH/CQ6PG7sAKlrcdz1d4Y= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.40.6/go.mod h1:tbNB6UTE8b8fVgKsLl8IOc50jyxZ0fGqiVgQTWfNdLg= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.47.5 h1:dilS2NJ0F1Jwhi4A8NuZJAGq7HwFQ/GE4GJ+IoHWzx4= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.47.5/go.mod h1:GP4KTSWjdb7GofokIXNbVP9CQDIKTv13nfqSBiq2hnA= +github.com/aws/aws-sdk-go-v2/service/configservice v1.58.2 h1:sfLW2pTtZZHGM7Ksp3PdMqyoLjoD7dHzPblLLjcYnBk= +github.com/aws/aws-sdk-go-v2/service/configservice v1.58.2/go.mod h1:/+Y1FQ6hhvY+6moAqnf/lrSgNbckvrHoNmxTMJ5WhaU= +github.com/aws/aws-sdk-go-v2/service/connect v1.142.0 h1:2LYf+Q6UtACzAXZ+ylgDnimXdccqbrUlv01Tp9/BBBM= +github.com/aws/aws-sdk-go-v2/service/connect v1.142.0/go.mod h1:RlZrDWMyt5HH92j6fpBcBLjo5FiJw61jNAgTjCAQY5g= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.32.0 h1:4nmhQ24WaJ4e38AKtFJzFSPvoiLDZCK0e2Edm7u+Tdk= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.32.0/go.mod h1:pWZuObOfZSGHvL29N0S0JvGpsvk8xDlJPgX92QTxnTE= +github.com/aws/aws-sdk-go-v2/service/controltower v1.26.6 h1:xJchWovBC1h9lvvcysi4kjDT+ZxycuJc+jt/Y6YELho= +github.com/aws/aws-sdk-go-v2/service/controltower v1.26.6/go.mod h1:7T5FMpZ7QYi3p35ugZH2Wdebzw/bAAQ+HVsdtxT31LI= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.33.6 h1:Fy5Lp0Gn0aHairTF8nj3HNsml9NuLGuKFXsGlSCXMK0= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.33.6/go.mod h1:WjmUookbSIF13EUgmIm3iJbsOR4ig0BZtPtLojlmiEo= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.57.0 h1:OPm/yHm06nNtL47/ITE/TEUgB1yZV7GU20cmH4qUe2A= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.57.0/go.mod h1:5PEFaK4UypksO7xXX+aZ2zJkTA4WYOCaCJ7jfHtvlrs= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.20.6 h1:bm/4K9y+tPlOm7LCw7Oul6j4+twkYN9pMZgf6czWEIE= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.20.6/go.mod h1:Ind97CkUL/Sp8b9+eXlZoJzOyAjgSl+zX2NODYGl/5M= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.53.0 h1:MgnY9bNxeOQ2jPCwkQ5PdNVNJtdLlGWsql4BCEA3oKs= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.53.0/go.mod h1:Zp3IfPlmLCI1qU7It4GyqNKmTNLjNP33ZS9XdJSHY38= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.57.7 h1:ARnadIHN7MAAMkjNsBScWgV7pRhrhXtBnXMG8YDkDNE= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.57.7/go.mod h1:ct/KZc7aF1iJDdvVtIMUBjbZrIespvcZDXfiobANsVw= +github.com/aws/aws-sdk-go-v2/service/databrew v1.38.5 h1:uAyzLnETV1vpvVakHdGNOSnpYtmCPbc8F3e+rjooC+E= +github.com/aws/aws-sdk-go-v2/service/databrew v1.38.5/go.mod h1:TyoXF8AvpXcKkxjlW7E+Aax/FBDLoObTyby6zRffi14= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.39.6 h1:ywlBAsu4TUhGcocmioq7k6709WHhVZx6yHHcuAma1C8= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.39.6/go.mod h1:uu4l98l3f19G6MGsNf3EWcbrpRTwyErJ9PLvI/XaXwg= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.30.5 h1:lIw4H3QLLfAV6OFUFNf2rSQOD8ufSfN9sXciRpUIsv8= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.30.5/go.mod h1:lOMJLtcZ8roDJadGeAVnqdvva6RpG66Rzl3qmyHibQU= +github.com/aws/aws-sdk-go-v2/service/datasync v1.55.0 h1:K2gDOAe8OdZ6lnau8ran0va1vL97/JxANxJ1d5VYHz4= +github.com/aws/aws-sdk-go-v2/service/datasync v1.55.0/go.mod h1:GN+XSZ4Gv+QAfsCkBTEqLlmI766xItwX1KIsNJlPCJo= +github.com/aws/aws-sdk-go-v2/service/datazone v1.43.0 h1:B8F31trY6utWMnh3n3bq9e13Nerz29FRzXT9ixRAJj0= +github.com/aws/aws-sdk-go-v2/service/datazone v1.43.0/go.mod h1:JtfS1guKOGCe3cKwSGrTm0grzQiMy1cfxfEAoMjygLM= +github.com/aws/aws-sdk-go-v2/service/dax v1.29.1 h1:sYEBub6ZSeElTUaelJkffTHj6HdmUsTF5H4B2XI/OiQ= +github.com/aws/aws-sdk-go-v2/service/dax v1.29.1/go.mod h1:FQ3H4KZGNJ7xNstwjgtKtWM99QtU1y2Y2vGdOSqEPZ8= +github.com/aws/aws-sdk-go-v2/service/detective v1.37.7 h1:VlbfflT4Weqvq2cRzhbGv3gKvG2T7rhdwLvl8QohkIU= +github.com/aws/aws-sdk-go-v2/service/detective v1.37.7/go.mod h1:JpUF7Kimgvqm5MBT3YiqVFmLRNqf+9xgzXzaJrCnlts= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.35.6 h1:HCNMZXY/HhpvwpesD0foAVzSqOqkK7QQdgRkIqrUbBM= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.35.6/go.mod h1:D2NbfDF3qEeaPwl+EDLGIhq5sD4jqoTkv8o1rw37IaE= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.39.6 h1:251cRFp3KrRyboVXOFhpurd9SlJ7GOk+lMxsRlfKb7Y= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.39.6/go.mod h1:XhFyJv1IDmCaKiPUwWlj9+gV1mgpoR4BspX8CpfRbR8= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.37.6 h1:xlqasn95WDPq8rFwMuLft8K6EXiBXA4gbElNy3k1qAE= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.37.6/go.mod h1:ihMttb6cmTsmRw8/jdBT0WSR2cmxP+IdU9gIBbDs5mc= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.38.0 h1:/SjJpaHDl2Tcjq7wu0BXBr3y+iVhJGCUySBd40C38dQ= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.38.0/go.mod h1:9OhFQ4k8x6wvJRY3T3qQe4F/YQLo0iZB0Opq+2Mh80o= +github.com/aws/aws-sdk-go-v2/service/dlm v1.34.6 h1:x1q9I5nwC6JBo/k0CHPRUOtLsMTBoKTYBWQXbS0s0lU= +github.com/aws/aws-sdk-go-v2/service/dlm v1.34.6/go.mod h1:GMoqS22ylKwRmUIqPv2yQiYyfi9p4sv7D345nawzTgk= +github.com/aws/aws-sdk-go-v2/service/docdb v1.47.0 h1:Q1lDF/tOln11iUOnnQJd9RM8M2tbqSHCOzQfCwqQRuE= +github.com/aws/aws-sdk-go-v2/service/docdb v1.47.0/go.mod h1:yK1MzY7O/rmmti02gkvk+IdJZ/tCvKpcGZU2YxoWUPg= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.19.6 h1:ZDolNXobqGnz7sLKh1b8yI4T4BrMjFbtIbmZRKmMmrI= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.19.6/go.mod h1:oOz1QSkosu6fWaSQPmS9HYIkeqPs7FH+jugGa/bGQdA= +github.com/aws/aws-sdk-go-v2/service/drs v1.35.6 h1:awl8S++TupDDTsCRvrdNHUicQljM6liiHIBAJk+ej2w= +github.com/aws/aws-sdk-go-v2/service/drs v1.35.6/go.mod h1:p72nRrztE6ntt9W54vgPV3M5b520x8kbxqiDmHjFyjA= +github.com/aws/aws-sdk-go-v2/service/dsql v1.9.8 h1:9SzhOaXCRSMmyKariyaeP7hYcAdFkQk/1x3Z88V5t6o= +github.com/aws/aws-sdk-go-v2/service/dsql v1.9.8/go.mod h1:2Oz6G8F+PlNW4RK40ISLe8fTyLRvSlFOjdaWFcaFl9c= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.51.0 h1:TfglMkeRNYNGkyJ+XOTQJJ/RQb+MBlkiMn2H7DYuZok= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.51.0/go.mod h1:AdM9p8Ytg90UaNYrZIsOivYeC5cDvTPC2Mqw4/2f2aM= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.257.0 h1:YoBAUV2TU4O/0xnOarB+0wgdomnIby+lbPtuTpdS5D0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.257.0/go.mod h1:M8WWWIfXmxA4RgTXcI/5cSByxRqjgne32Sh0VIbrn0A= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5 h1:jzjNyiIrXJHumV1hwofcQLpIZtcDw+vPQL00rLI3s4g= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5/go.mod h1:UtPKcYVHY6RrV9EaaM1KZGNaf9dgviFdsT6xoFMLQsM= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.6 h1:pc4te9Px2oORmxWlJXaX/OkHQsdQ3RiPvuZU7525FZc= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.6/go.mod h1:BeseuedjcZNw+lGyqDIbapD3hvvsEVkjkISUIQLzem4= +github.com/aws/aws-sdk-go-v2/service/ecs v1.65.1 h1:pBbXc1fGRbrYl7NFujuubMmEFEp7CJiKTBsoDOIUkuk= +github.com/aws/aws-sdk-go-v2/service/ecs v1.65.1/go.mod h1:fu6WrWUHYyPRjzYO13UDXA7O6OShI8QbH5YSl9SOJwQ= +github.com/aws/aws-sdk-go-v2/service/efs v1.40.8 h1:vwqXyeluOHOgkonTOxvFqGgMNh0y5H6r23+8RA5ifZo= +github.com/aws/aws-sdk-go-v2/service/efs v1.40.8/go.mod h1:xJFehblB1voatQStn4hPPTnr+ueQ3UKxjSCro66JliE= +github.com/aws/aws-sdk-go-v2/service/eks v1.74.2 h1:GKqBur7gp6rnYbMZXh2+89f8g+/bu26ZKwpXfXrno80= +github.com/aws/aws-sdk-go-v2/service/eks v1.74.2/go.mod h1:f1/1x766rRjLVUk94exobjhggT1MR3vO4wxglqOvpY4= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.50.5 h1:VEdPmtEs1EzHXOcKmKwaN6rwwatgw4k12n08U7qML5w= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.50.5/go.mod h1:venvSIu8icYqJTZ2meX3NIQypX5t4R2E6Cr9wdgHCQ8= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.33.7 h1:zWmgdRblU92HDqT37r+kvORdWAZCiG3z6SvPKcE2D8M= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.33.7/go.mod h1:6hnLvLpLNgqMXL2uaEf/FacDYErGspeQHZn/3U+6H6k= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.6 h1:+YIp+dygyeHjUd7u9kv2MluNwnbiNeUITH4aZ4UgiPs= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.6/go.mod h1:iyqISGdbs/IFj3D7GyiRcVjNnbEYcF3NZrRlZnp7IWs= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.51.0 h1:Zy1yjx+R6cR4pAwzFFJ8nWJh4ri8I44H76PDJ77tcJo= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.51.0/go.mod h1:RuZwE3p8IrWqK1kZhwH2TymlHLPuiI/taBMb8vrD39Q= +github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.37.6 h1:+f1A4QwqPiWy71nr5qlvLMeaR7UjpzDgCAG2MhhmJeo= +github.com/aws/aws-sdk-go-v2/service/elasticsearchservice v1.37.6/go.mod h1:pFAUfULfSY46LfS7WPd9q6IcdM/tWm3qTpEZhCSgtKI= +github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.32.6 h1:81IE+qNRipRKlwOUZzVI3NSOtewZnLqUqOA5UGAV3ME= +github.com/aws/aws-sdk-go-v2/service/elastictranscoder v1.32.6/go.mod h1:k9An7RySCxNbERamBuwDoXaXMTWXQqEusn3/eAoyN94= +github.com/aws/aws-sdk-go-v2/service/emr v1.54.5 h1:tA10GZKqcDLOD5JfeRTpu72X5KqxBDJBqWnn720HhzA= +github.com/aws/aws-sdk-go-v2/service/emr v1.54.5/go.mod h1:zESYrv3WuVUTyMIXwR8OoRAkcgj941Mdp154AXjONAY= +github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.40.2 h1:DXc0q23esbZXny49LUg289Yoy6Vjd58z0TV6jsGdKgM= +github.com/aws/aws-sdk-go-v2/service/emrcontainers v1.40.2/go.mod h1:btaFcfwXxksqE0d6wBhIy3VopO0dWw1KWctELo7P+wk= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.36.6 h1:jBV+JfRW8laF4hQrPoVj7Xxd45hrXg6fvNn0/nOEm3s= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.36.6/go.mod h1:6jyzPmx8zLW3K5oP/CBMH3VFhQyf3G6vPR1vaz3HsTI= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.45.5 h1:MoTJpDDOR1gmfIC6Qc7gS+uS0hlqF7RcphMqAfp8r2U= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.45.5/go.mod h1:fgyvv0FpfhbcmGgcgyDltW9K2UMs1DOBBjnkyX9JC1I= +github.com/aws/aws-sdk-go-v2/service/evidently v1.28.5 h1:TCJCjCNhQ79VvthLKT3r4Ku3SU19rGpoAovI6rydRIs= +github.com/aws/aws-sdk-go-v2/service/evidently v1.28.5/go.mod h1:UOLThVkUgc5apzB1G4oemgrigr3BYpQEbD183CX1k5s= +github.com/aws/aws-sdk-go-v2/service/evs v1.5.2 h1:RrUB7uEIO4LYwaqRwK7KL+zH7irCQDfFOueZCHXFig8= +github.com/aws/aws-sdk-go-v2/service/evs v1.5.2/go.mod h1:0j+d5nDYF1oBpk7MWqkl5VIWnSNhWD9KiWTj+t/U7Y4= +github.com/aws/aws-sdk-go-v2/service/finspace v1.33.6 h1:H3llnOFZFz/g5v4cAA6gUQ54XUJf74SQCyKuLlCZfi4= +github.com/aws/aws-sdk-go-v2/service/finspace v1.33.6/go.mod h1:a2D/sV/YKWlPNmGYZ0OVmX2typzjwO7IwZ2NUfBgaEI= +github.com/aws/aws-sdk-go-v2/service/firehose v1.41.6 h1:BaLiLj0REx6fAxK6KYTeHXv9njpyqnLqrARYC8QhkLQ= +github.com/aws/aws-sdk-go-v2/service/firehose v1.41.6/go.mod h1:kKWlKjg9gI2uOLNQG1GnTBaYfBVQKJC0z99GIPQLFXw= +github.com/aws/aws-sdk-go-v2/service/fis v1.37.5 h1:yqaWoYLetwAKcnR74PvZjgaFRabbWDnllrFOYu6EEV0= +github.com/aws/aws-sdk-go-v2/service/fis v1.37.5/go.mod h1:htMJekf0GQU+ZgqHm5nkrpGrFQk9Sd/VX3mazLer3M4= +github.com/aws/aws-sdk-go-v2/service/fms v1.44.6 h1:Kkp6omiLoa7KDN8I/YesQzQ+Czi8a7iFsz18a2I0avE= +github.com/aws/aws-sdk-go-v2/service/fms v1.44.6/go.mod h1:0MmE+RS7FFf+ld2RVTLQSJumC56UPfnYj20jwC0F7IA= +github.com/aws/aws-sdk-go-v2/service/fsx v1.62.0 h1:by2Uy4YkY+kddlqUXziLUo+ORa5d5Zba7+9tDyB+nSc= +github.com/aws/aws-sdk-go-v2/service/fsx v1.62.0/go.mod h1:IYOHN0ZkhnOc76Wq3jA9p7EBmcyUrD7ovglUA7thwAA= +github.com/aws/aws-sdk-go-v2/service/gamelift v1.46.6 h1:gbD+Jd5bKvfkeieI9nBk4pyBEGUCKGuC3uubBcnfjPQ= +github.com/aws/aws-sdk-go-v2/service/gamelift v1.46.6/go.mod h1:qG2t3ko7BtX5Ix+c9V8xNiQbHyMhL3Cci8NemnNGU9M= +github.com/aws/aws-sdk-go-v2/service/glacier v1.31.6 h1:iwc7B/ZCzm8dhAunHXYU3ppf+OKjtxQmFaVWAi0KVCw= +github.com/aws/aws-sdk-go-v2/service/glacier v1.31.6/go.mod h1:diGbfsRR7oW+2CZPfdR/IC1LC9Vt33OVKHbSmmKaUo8= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.34.6 h1:1up3eQrlvZ0FEzNLFCpRa06ZnBO+w43MqgGjeQJVoXI= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.34.6/go.mod h1:z4vejjg7HKiZPR12s6irgnDOpFw0hTJukQm/tkwmgJU= +github.com/aws/aws-sdk-go-v2/service/glue v1.131.0 h1:ZqcfaqOBjTmdKbSK4FcTlFrUPiezJ/NTulfD5Pn5x5E= +github.com/aws/aws-sdk-go-v2/service/glue v1.131.0/go.mod h1:iH5M4d6X8IdmFUwOVdnoCEt7eqhjYZuw4gEI0ebsQjs= +github.com/aws/aws-sdk-go-v2/service/grafana v1.31.6 h1:SoVlnBHm+Gq5LI4Z4tIxLAfOG1wCFA5puE1vwB/ldHA= +github.com/aws/aws-sdk-go-v2/service/grafana v1.31.6/go.mod h1:ABsoTppDCXrP8CFfMIkaoYdC87U51t0mMxZbDFZGKkQ= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.32.6 h1:olwkT6lMeGYJ18lPObZKMaXOS3a69GoecEtGmR2Umyc= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.32.6/go.mod h1:GS2vTGoqO4jHpYqP0avBbcVmkojcOYKtiBvCrVCg8Pc= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.37.6 h1:LhXUztHSIjfmUHkahRMI+NeYBwv5XcFMyXAcw1+/5W0= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.37.6/go.mod h1:h6rk6CTK+SoxaYWtdwyrjgWI01Q2+figfhS4fLJCtD4= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.65.0 h1:dKlP/56A7vI4bN09mAlxIh9JaY/aZZnNLQkqot0io4U= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.65.0/go.mod h1:0cFCtC9mK9eNAHpKNc5/A59dqjYdwPnE1vL5STupNsk= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.35.5 h1:FP9XMTzx31mocJLJjPJEpaQIDy9cAfYRdclIV/YfRVw= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.35.5/go.mod h1:kVyA+EB5+V1zoCKEd7DR2isRChxswqaafB3kFl5eM0Q= +github.com/aws/aws-sdk-go-v2/service/iam v1.47.7 h1:0EDAdmMTzsgXl++8a0JZ+Yx0/dOqT8o/EONknxlQK94= +github.com/aws/aws-sdk-go-v2/service/iam v1.47.7/go.mod h1:NkNbn/8/mFrPUq0Kg6EM6c0+GaTLG+aPzXxwB7RF5xo= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.32.7 h1:k6s7ZccfZzFfRcko46b+wpiTihVSFb8oAM3zwRTNso0= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.32.7/go.mod h1:4xOhHo77B1qfs09L1DJq5luMO2cSILnc+8UkLvzvtHw= +github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.48.0 h1:F3LuF59HfxqQqWA8lrjZmRwvScpfc6pvkrzHwFZwryA= +github.com/aws/aws-sdk-go-v2/service/imagebuilder v1.48.0/go.mod h1:B44b3XYDjkYgLbEpyTWrK+0k8+N1PZoBO8PdJUF4Cn4= +github.com/aws/aws-sdk-go-v2/service/inspector v1.30.5 h1:a9Yl3PlsRSiOlfg7qCpAPTnL/yhfsEFrPuyMjnnmUkA= +github.com/aws/aws-sdk-go-v2/service/inspector v1.30.5/go.mod h1:WPIOZddPJtTqr0mjtd6YfwXyKJiSlOCb6ZWZ3f3xIac= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.44.6 h1:G3SqMciqPsatTbPmq2lLebpGjanwqfkBGCKStf4nSbE= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.44.6/go.mod h1:idr72RZY3+DwomnH2ZTYE0Y/+rwKdtdneJWWGLlylmU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 h1:X0FveUndcZ3lKbSpIC6rMYGRiQTcUVRNH6X4yYtIrlU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.9 h1:7ILIzhRlYbHmZDdkF15B+RGEO8sGbdSe0RelD0RcV6M= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.9/go.mod h1:6LLPgzztobazqK65Q5qYsFnxwsN0v6cktuIvLC5M7DM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.25.5 h1:1bnvwYxuKCTMiF/MavITDTRnCCOdCbmNWyFbfKMw2wA= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.25.5/go.mod h1:Ok83qcqfCvpkKU655IHorvYG0NMPr30P5H8ng9uNaQk= +github.com/aws/aws-sdk-go-v2/service/invoicing v1.6.8 h1:rdiHnyg2/1Wu3/BVCY0o4a5RGs/bF5NTjqscInYWGJ4= +github.com/aws/aws-sdk-go-v2/service/invoicing v1.6.8/go.mod h1:TniL6d9prBubA9ZcfCJo9Q9r2cyh2c4C9csZMUDYuBw= +github.com/aws/aws-sdk-go-v2/service/iot v1.69.5 h1:ufbRtUcNLpfKjE4MXGnqNwF2gXh5s9CUlgfL3nDyd5I= +github.com/aws/aws-sdk-go-v2/service/iot v1.69.5/go.mod h1:xkUGPoYRFoe0i19cUfIMeocCOWG5Ona7MWMeMqqL8eE= +github.com/aws/aws-sdk-go-v2/service/ivs v1.47.6 h1:L4uWqGDzaapkNiPW1LUnHVGkrjynMG5vyGeu+YATgio= +github.com/aws/aws-sdk-go-v2/service/ivs v1.47.6/go.mod h1:i6n4c/4w7kw455UViuMob7/0YoWB24uXkzal1udFz8g= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.21.5 h1:pwetfaLlSr67dBmlJtVNUsBgSdOzk02NfAo1MDqhyQM= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.21.5/go.mod h1:mbBWB0NzEUOjY8FarIsbCWo6DXqylv1mf+B77uT/xlQ= +github.com/aws/aws-sdk-go-v2/service/kafka v1.43.6 h1:gd9n9V4YTRcg5VJfDYBRVJHQBaUMpbKOKWzAhHzyhcA= +github.com/aws/aws-sdk-go-v2/service/kafka v1.43.6/go.mod h1:061TSd3Z7fxrRzFbo8VniS3VErBjATTfC7+HsSUW11g= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.27.6 h1:YRPt0iTJeUfSFCnZMlIVokoSgotLHBYoKlaQnuclokM= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.27.6/go.mod h1:Z9w9e4XGxePy+tPjsgNKYiJZXPTFysEbKqpc72dzhO0= +github.com/aws/aws-sdk-go-v2/service/kendra v1.60.6 h1:rGhNWcIhP7DqFve8zlZItzX8UslsM26aSCRTC6M2hGs= +github.com/aws/aws-sdk-go-v2/service/kendra v1.60.6/go.mod h1:3Amyw8Cu+M3VliBNu6PkOvVLLGS9eyzmkwBBYIygr60= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.23.6 h1:VvcHwWDWJs6sbM62LI1UKo3ONVBXSP+sPiYduTOZ5Ug= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.23.6/go.mod h1:YjFnZVw57O46J972EzA4Ny7HObGqymOoFCymCiLXdDE= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.40.5 h1:GWAVIxhYlkFX76WGG2gus5eyonXaKPv00VpiSqHzXDo= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.40.5/go.mod h1:u/oFMSASsn9QNBRop5lrIpuNwHZwEXjYxNQp7sHFSxc= +github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.30.6 h1:OL3s9Y927XoMxO4Jod29/eIl1vyS5NDnDesJaLkhjeE= +github.com/aws/aws-sdk-go-v2/service/kinesisanalytics v1.30.6/go.mod h1:1szjTKn1bM+Ce2Pf2g57WqudQXi+YZodbrYMZY2Awzw= +github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.36.7 h1:lePrOEBRe3FMsApDx6QNfiVsUR0ePYdeE+KkIMM6vp0= +github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2 v1.36.7/go.mod h1:AUtvJ7STwd00cd5mT3Vt9WH0LjF56nOWAZzPx+T5wUg= +github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.32.5 h1:8oO+Su+tqdsF1wll/Zm0eenGi/0lXQljG5sFerZvFXQ= +github.com/aws/aws-sdk-go-v2/service/kinesisvideo v1.32.5/go.mod h1:Mu9FDrPD7xsAZf9KhiL+WFEtnEgO7x6Kf8OJceaiJRU= +github.com/aws/aws-sdk-go-v2/service/kms v1.45.6 h1:Br3kil4j7RPW+7LoLVkYt8SuhIWlg6ylmbmzXJ7PgXY= +github.com/aws/aws-sdk-go-v2/service/kms v1.45.6/go.mod h1:FKXkHzw1fJZtg1P1qoAIiwen5thz/cDRTTDCIu8ljxc= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.45.5 h1:YlhZqR9Ma0x7q83cNpis7YJ1w4u532+ohJ7MSHqZno0= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.45.5/go.mod h1:e+RSq7q4W1pe3kt1kFBWQLvCsF3LEa6YF695iPjwUqo= +github.com/aws/aws-sdk-go-v2/service/lambda v1.78.0 h1:o6244M0Z5ryHuO05Fm+03CCZIQSh+qmZgYbnbOuaRGo= +github.com/aws/aws-sdk-go-v2/service/lambda v1.78.0/go.mod h1:LFNm6TvaFI2Li7U18hJB++k+qH5nK3TveIFD7x9TFHc= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.13.6 h1:aZ8MXpLB17q6THeWkvpYVheZTf2oOMgaeXYxQLfq8vY= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.13.6/go.mod h1:5IsG5hZ0YnGeIsZvB88ALqptUB5TmyA68Vh2JtOuolQ= +github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.33.5 h1:ArkKQMxVIaauILktZS/FMc9u52qGpC3OSAA9AQAnvgU= +github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice v1.33.5/go.mod h1:LRYpBu4UZPuBggAl0Q62MaDRDlDYYE/DR/Q3Nr5HnTQ= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.56.6 h1:Ujpm8Qr81ge34jboS6NQu4WK/gpuwjNQI/cW8G2w1+g= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.56.6/go.mod h1:xiuNneOma5q5l0VtLAR6MuQ3K5sJlUCz51HB0IdJgvc= +github.com/aws/aws-sdk-go-v2/service/licensemanager v1.36.6 h1:jSPCSRdv3Ad2BZtaCO3PWJQmoOe6WXqrG79IoHrTpl4= +github.com/aws/aws-sdk-go-v2/service/licensemanager v1.36.6/go.mod h1:E+dz2RTwFIOG6cKRJiln5khKJmROa6RvP7DKiEEPCFE= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.0 h1:JOLRYFWMMKUABCp94HHfo0JBVQDVTLXOvWWphjpBBiQ= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.0/go.mod h1:WEOSRNyfIfvgrD9MuSIGrogKyuFahaVMziVq1pHI0NQ= +github.com/aws/aws-sdk-go-v2/service/location v1.49.6 h1:hNRkhRPvAHAqZapl7BPcjls1BAnykokUkF71E0iYgPU= +github.com/aws/aws-sdk-go-v2/service/location v1.49.6/go.mod h1:aRLVKgDTnlsf0moRfee8FTWv9SghW/x3W0W33Y//ZDY= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.36.6 h1:9yP3vAUac8JYDnenwuOuPmpIRBgCVidxWN6hZvab1lE= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.36.6/go.mod h1:A6750m3A2OebBhSwoXKosN5Vciq/JiY2piPsmZauiwc= +github.com/aws/aws-sdk-go-v2/service/m2 v1.25.6 h1:1pA10Dziy1XrpFNf6aND3Y43imaLL9w6U6lfMBUNR0E= +github.com/aws/aws-sdk-go-v2/service/m2 v1.25.6/go.mod h1:X3NB31GJKffp5h+SnU4aMKMarKd9Bd7jRFs2y/Ihve8= +github.com/aws/aws-sdk-go-v2/service/macie2 v1.49.6 h1:0lg+Mhd61q16NUpxwnNpAhP7sxSOO5H5/l+QxerZuIc= +github.com/aws/aws-sdk-go-v2/service/macie2 v1.49.6/go.mod h1:hAUjN7Dlx1i1Sjbx67uumWB7iwXOA/PM8kNOiw4ygjY= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.45.0 h1:4cBXNlo8XYFq/leCpTVuZX2qAp779SIg3wkMPd5FDjo= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.45.0/go.mod h1:pyFeP6f26HHtJJeNU4LqcD3R1Zh9RMwZjiluEsgZlYE= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.82.6 h1:gk0yVOnKaRKGyWifpqw3aGeEGB4EO77UYGXnucl93Ek= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.82.6/go.mod h1:YZeaUGHZihZA8/pC2hi248p8Y8S4oPMZLXso2RF4hsQ= +github.com/aws/aws-sdk-go-v2/service/medialive v1.84.0 h1:4VnT0CicQgGzIkzbfIz9FcCvl/A25JclsZ/jkkP2sGs= +github.com/aws/aws-sdk-go-v2/service/medialive v1.84.0/go.mod h1:ZWOvuk7slOmdlSnDIY7gr00d/HUEKAYT15oPc2oMprw= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.39.6 h1:xF2FWETQbjkGKK8fcmaJ2bO7i53wwRbsnExg5uTswyI= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.39.6/go.mod h1:hSlgOOXXYOtXOH8PUE07ZctOeDR9doOvtvpM6oR7z54= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.31.3 h1:Tbh1uS0VAEw75762wftgeXlrpK2AO2tZjObiilryUCQ= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.31.3/go.mod h1:5w01h9/Nmf0FUimiQGY9bYPU/of1Nz9oxiGbNxzUYT8= +github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.39.6 h1:8QUyNYiWzhsbQJITt/v+SNwdA/wH7B8YnDO/9GLeX2g= +github.com/aws/aws-sdk-go-v2/service/mediapackagevod v1.39.6/go.mod h1:QHMKOy8M9YcyxacWIIije66JGOPn0Uv911y3QN5xvOE= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.29.6 h1:iafXeKlVqhC8/ScR2CzQlWzDm+B3BNcQD7SzZ+gE1LM= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.29.6/go.mod h1:pyztXbSyAGD+TmvQhGva28W3KgwEsjZ39d/tM5E3WLk= +github.com/aws/aws-sdk-go-v2/service/memorydb v1.32.0 h1:R+jvAaitNKrnuBDpAxM/Pi/1JD5cRqwL3cQolngYf+M= +github.com/aws/aws-sdk-go-v2/service/memorydb v1.32.0/go.mod h1:ls5Htz+L0oFjuS/8Md/RLSLCFUpGkvlnZ2GLZ4NZguw= +github.com/aws/aws-sdk-go-v2/service/mgn v1.37.5 h1:BMu425Ntx40waGQ0/g6BeX1F/sYvKdIcO+ABys5Jv9s= +github.com/aws/aws-sdk-go-v2/service/mgn v1.37.5/go.mod h1:F3YMviBP/8gRnYBh8j+6MUw/c3ID0l3IMS37kHAo22Y= +github.com/aws/aws-sdk-go-v2/service/mq v1.34.4 h1:Oo18RmcBezamgeYgLQs0TvQte9qnBsT/h0FXaVR/su0= +github.com/aws/aws-sdk-go-v2/service/mq v1.34.4/go.mod h1:Ix0YBjTUQkaENu7moWEIOuRPvSXCankc9G8+6tCHPFE= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.39.6 h1:E2/4c34w/DlacnWCB00i5vK84Q+R4THQekMFYKEQ6EU= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.39.6/go.mod h1:VGfnBe0/1AmUklInis8fWYCsX1sytShIyJaAskYui8k= +github.com/aws/aws-sdk-go-v2/service/neptune v1.42.5 h1:tfn0wZ5FFDV8USRyR1pbwVuMjc/8lxCXGiXtj4pGP2Y= +github.com/aws/aws-sdk-go-v2/service/neptune v1.42.5/go.mod h1:31tyZ8ZVqFkyO8beZNHcEOQZGn/BkuSpj92xz0DV47M= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.21.5 h1:M6POQvRc86IBNXMGMnigORWW7TuI4DQw6w9/7a22AmE= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.21.5/go.mod h1:SDNZtDXmPLQgX3rhJKQilrATByCSvfefeGBzmyHWV9A= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.57.1 h1:ft8fBc54sf9RPLzZ9C3R2ICWlsJI7gNXzhe4KM6hcMU= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.57.1/go.mod h1:aR3+jhGdmzkcu69LUu3uEfWSz48rSWZpRZ1UiW1brzY= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.39.7 h1:UqDxJzpwgrEi/AuVaXCqN3g0zysr0K/RPY7kxj3kAFs= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.39.7/go.mod h1:2lpNczbmNGrUPnMa04jr4J2BxQ3jv2pYErTQlELWESg= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.12.6 h1:5MKZrK70vz0m3A/IbE00XgWdX6VARrQMe/lTtlh1VIc= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.12.6/go.mod h1:r99VUsxYLupfRyaZ517RoASgRyspKDvj+T3Ec2bsJLg= +github.com/aws/aws-sdk-go-v2/service/notifications v1.7.4 h1:Ur4HSvZrzDevCVcXHLzj/VYKLYznFsIuXXzsqggt0HE= +github.com/aws/aws-sdk-go-v2/service/notifications v1.7.4/go.mod h1:jp/DVjlMmlD2RaRCAYs0IRy2k5XkppwVNc9wo4oYkkQ= +github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.5.8 h1:SoLCt2Ig7kir4Vv8VtFVqADtaE1iSrC/f2U9vVgCe4M= +github.com/aws/aws-sdk-go-v2/service/notificationscontacts v1.5.8/go.mod h1:TqON/FD1E56TNUpLtwI6m1PbjmuZRhrvbo1ZGOIbzto= +github.com/aws/aws-sdk-go-v2/service/oam v1.22.5 h1:To+7SakfElByzTR10RrFGAXRH2uWBDvMPTFOYQY0Wrw= +github.com/aws/aws-sdk-go-v2/service/oam v1.22.5/go.mod h1:huNDbI1vKiFhIuo8Q4hK09wk1kN+RFdbxrrjSsCZtCQ= +github.com/aws/aws-sdk-go-v2/service/odb v1.5.0 h1:e7gPLy+UbKMdrAgbN/E06dAq/OyTMV3YCQH85rAlAXA= +github.com/aws/aws-sdk-go-v2/service/odb v1.5.0/go.mod h1:sNgPICtv0QshuEoMhFiRT5rBWnpXMFvLhly/Hu0MqYw= +github.com/aws/aws-sdk-go-v2/service/opensearch v1.52.5 h1:gkLP1OOn0/gBPD125+Ax+9DKuGGsu9TwvbZJ4bBgcsY= +github.com/aws/aws-sdk-go-v2/service/opensearch v1.52.5/go.mod h1:c1RKL9jCAUP+7ZtY+99yWcWxRFBsQ3LG5Klkj5PEoJs= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.26.4 h1:46xDV+bDfEaoI4CFYA/SASoD17PhdIfRcnybENoeA68= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.26.4/go.mod h1:a+I7XPLBv75d9aI6TvmcMn2osIxiZ8rxjSy/OZQQAlw= +github.com/aws/aws-sdk-go-v2/service/organizations v1.45.3 h1:JcKtlBBVZpu01E+WS5s6MerJezxVNW0arRinXwd8eMg= +github.com/aws/aws-sdk-go-v2/service/organizations v1.45.3/go.mod h1:oiUEFEALhJA54ODqgmRr3o5rZ+SOXARVOj4Gl3d935M= +github.com/aws/aws-sdk-go-v2/service/osis v1.20.2 h1:fmH/ayvn5AcQ2jnFMtbd57CwTtuOKcZLdfl3eGZP7oc= +github.com/aws/aws-sdk-go-v2/service/osis v1.20.2/go.mod h1:t3KwhHJvvtof6DJzL7JtGY4+cxPsQumV1snXgg0+aww= +github.com/aws/aws-sdk-go-v2/service/outposts v1.57.0 h1:1gMI04UYdiTWeebGwhcRRCmf4ypoGt0fwPa/z2J+vFU= +github.com/aws/aws-sdk-go-v2/service/outposts v1.57.0/go.mod h1:AX2swwJXvwgCE0695M12Vw8p/JU2PQNC/5J9ur1Zd9s= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.25.2 h1:jAVEnaD69Jhc3ePMMuKLj7Y6NNRNN4s1X/UT+SMlXag= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.25.2/go.mod h1:LtAy3qbryUglXiyAYdn+OCltWbMMMvYoUK6hAiFc73k= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.15.6 h1:tZK9NedvW1WYWf+eZ437hUNETq4+eofZ9ja32FtFHLQ= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.15.6/go.mod h1:fRBVcoZiTYjTywau+UcyTXAjtLxz20Jsaz0XXdG2950= +github.com/aws/aws-sdk-go-v2/service/pcs v1.14.0 h1:Dk/dj8EBPQBPawPMR2M9+tijanojSdoxJDSA5clenZo= +github.com/aws/aws-sdk-go-v2/service/pcs v1.14.0/go.mod h1:wyFACwTlB5ZUiOQAqs+5m7gj4xUCbojoUTaZYxa7BjM= +github.com/aws/aws-sdk-go-v2/service/pinpoint v1.39.6 h1:6vjEH7AL5aYC49apXto1dHgMBNDdZLh2L3Bve0vkE2o= +github.com/aws/aws-sdk-go-v2/service/pinpoint v1.39.6/go.mod h1:oNyevxM/xQifmv2yk482sM2isWXgloHLrOLoeOrOPHQ= +github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.25.5 h1:S/QLsL7GTbrtjrALjKWdab+UBUB7LIHnEJtMvdD9khk= +github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2 v1.25.5/go.mod h1:TlvbcCoDxToksnKXX+nmSi70Kn0aMcPo3qr2hgbO+yo= +github.com/aws/aws-sdk-go-v2/service/pipes v1.23.5 h1:QrMb0weKCfbPmFM8Z3tHXGDd8b/g5kkbYSGELgYteOE= +github.com/aws/aws-sdk-go-v2/service/pipes v1.23.5/go.mod h1:OYOBK8E3mCVkk/6bCQk+J0R2JgLYotiBd10P07i6CTk= +github.com/aws/aws-sdk-go-v2/service/polly v1.53.7 h1:xOKXUyIN722uc+FtqUIeapvlh0iBM+SXt29mB0L3CVc= +github.com/aws/aws-sdk-go-v2/service/polly v1.53.7/go.mod h1:4xoAju2Su1TJ1Q5Y6hxNFLb3kBzYOtgUN05dQj3VTp4= +github.com/aws/aws-sdk-go-v2/service/pricing v1.39.6 h1:SapAI7aLrvLNUCBeBhhR6cU7TFIrRC5KNeaj72hV+fc= +github.com/aws/aws-sdk-go-v2/service/pricing v1.39.6/go.mod h1:TtNWNQGg2WmSIS+j/ZqyJD3xY6zyAuYHBDGxFYQftjU= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.33.6 h1:kLTYFvi4+nsETUZpwqPcVVOfOX/lD7OvQ4aU+TSNGVE= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.33.6/go.mod h1:XxU8fY4XHMpkvrCDaylvGiaz1PSU1nntX3XasTEomDQ= +github.com/aws/aws-sdk-go-v2/service/qldb v1.30.6 h1:5FBEiFjL83odCokDLGauL1g5Noiapq8jRsqKN2/YaF8= +github.com/aws/aws-sdk-go-v2/service/qldb v1.30.6/go.mod h1:FZR8mKbaQK8xEyQmtZKEPYj6Rxgi3iLKFi7MutHzYNI= +github.com/aws/aws-sdk-go-v2/service/quicksight v1.95.0 h1:h4UqFZgvghZXPeP95J1bDDW/+51Ge/JwXge+fhFkXqw= +github.com/aws/aws-sdk-go-v2/service/quicksight v1.95.0/go.mod h1:aJPu5hqpBhcV4gXqbAuZhBiIZ+dXHrvJ176qTPADa2A= +github.com/aws/aws-sdk-go-v2/service/ram v1.34.6 h1:S/BivEPJDOKDEaLQuodznRu/9VscK2n24Oi464ySkao= +github.com/aws/aws-sdk-go-v2/service/ram v1.34.6/go.mod h1:IjW9GK9av7d2rdmmi3uze2erokbWAxUtMwDc1YOj+9M= +github.com/aws/aws-sdk-go-v2/service/rbin v1.26.6 h1:wKVcl95mVcHW1rJMsf5SsA9T2zrfOmC5WyDrqpFVnVE= +github.com/aws/aws-sdk-go-v2/service/rbin v1.26.6/go.mod h1:LCbTwbuAosB0UYOB4eMr7CmzwKPaO5ZD+UXEhJ6TPn4= +github.com/aws/aws-sdk-go-v2/service/rds v1.108.2 h1:zdlqufjtiEnoL6xdoDXem0reNh/ySUYJupUWEVBLshA= +github.com/aws/aws-sdk-go-v2/service/rds v1.108.2/go.mod h1:VOBL5tbhS7AF0m5YpfwLuRBpb5QVp4EWSPizUr/D6iE= +github.com/aws/aws-sdk-go-v2/service/redshift v1.59.0 h1:MtE4oUVeljvF2CWPZwzWERizY5uhZV7os1eJC9oA8BI= +github.com/aws/aws-sdk-go-v2/service/redshift v1.59.0/go.mod h1:ARgrCFhclWArEevJ/GAn+UBBVc9+f9oFurQlyjx262I= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.37.6 h1:PC5iIPcOwMMqAocH4fuiyLKbEOKr9t75zhp7yysK0NY= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.37.6/go.mod h1:u8BCO9VvZZqxHaCk4i17Js9WSGR45KPN35k/Gi79hng= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.31.8 h1:YJixVrWNAJYfCXcMVMppPA1RQaPtZ0oXGrLDRf5FHIU= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.31.8/go.mod h1:1T8W8J3Xiwhtikj4yLUXTFwOB6cWvukAzncJUV9A5uw= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.51.5 h1:7XEUHyj3NhDxz8ogR9Zqj8SRA/5J2OJ+u4lpGu+qmJ0= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.51.5/go.mod h1:2lepPReuRVIackBiaSO6c5ch3HXIROzHFxCCpMQgKJc= +github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.34.6 h1:LvBVCmxDLAp3tNkAXNvedPjNw2DFJ9W0mwOpbkjaSUE= +github.com/aws/aws-sdk-go-v2/service/resiliencehub v1.34.6/go.mod h1:ZBunG0PHHt5TwsVfGyDpPtAeqmCnlo8SjVRsS/me+5Y= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.22.0 h1:+vdGkeg7koJ0MtMui392lmmF2gKISzqiUryQuq8HumQ= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.22.0/go.mod h1:WeUb0leMU6VjQkjVzmUa/DBuqgaMCgcaWoWNJy4Hg5M= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.33.7 h1:aJEEtqhpU2Vr2zFQ6jJT3z6ryqNSJjQ3UqEUKIsCdU4= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.33.7/go.mod h1:RD/9wH7u81Og53+2Vt7qAOA6PstpLcyiud5wCv0R/ds= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.30.6 h1:c1gIOTNJ6gkocnL33DP1St++uv+f7ClFiUjR5/Pm40o= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.30.6/go.mod h1:KJZ2lPXqxMULgTX/ldDAa2WeLAR2qz7vGqLEJLCP1RM= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.21.6 h1:1n0OZvoccoCuMn8GYI8/A78sWc0NKc3VTgTyO3fmasY= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.21.6/go.mod h1:+X3mqbUeamf2ANy4ppudqH0s6tuH2pFl04Cq8gFAikc= +github.com/aws/aws-sdk-go-v2/service/route53 v1.58.4 h1:KycXrohD5OxAZ5h02YechO2gevvoHfAPAaJM5l8zqb0= +github.com/aws/aws-sdk-go-v2/service/route53 v1.58.4/go.mod h1:xNLZLn4SusktBQ5moqUOgiDKGz3a7vHwF4W0KD+WBPc= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.34.4 h1:mQ7ZPMQ2Dz4dl//dgMOWmApKXGZ9f9cHza7Qh9tnqSM= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.34.4/go.mod h1:7q323bgF8xAtY1+rN/WVtUsbtSPZWWOVsIID9zAI5KA= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.9.6 h1:Tx7z/TsZ+OdtDtUeZFrzAU//NhnFMiGRxeAEEeTIZOI= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.9.6/go.mod h1:d46EQzstY7ltSyackMoYMJGzq+TrF1RYr3DU15t0mCo= +github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.31.7 h1:JztKalb2lLUv07Ls1J4ePVmg0RUgyRBx1/k8maIkawE= +github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig v1.31.7/go.mod h1:PJXGu3IjcUbdL8taf9Zl9vB6ZmigpjAq+gFz6hDBmGc= +github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.26.6 h1:dT53/rvqKgu4MMOzwhv6HVtxgrWp9SgYwkviQIBbmeQ= +github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness v1.26.6/go.mod h1:6d6uDK4yLgR+5jLqWdYejxBE2yS5NV/4FrOmNZclrm0= +github.com/aws/aws-sdk-go-v2/service/route53resolver v1.40.6 h1:lhnQ2Nkm3liKRxl4j3A18DYzGkxaixaSNF0fgXhtZDI= +github.com/aws/aws-sdk-go-v2/service/route53resolver v1.40.6/go.mod h1:hFCmtJyNyNNKxzX43Skr+l4JTpV/w8x470hIJBedcO0= +github.com/aws/aws-sdk-go-v2/service/rum v1.28.7 h1:WD3KNbMhPNIo6NeWIKvH+JyB+nlxA+3FP8T6AeoC8zY= +github.com/aws/aws-sdk-go-v2/service/rum v1.28.7/go.mod h1:d1TetEj0rCx4wEye6LeIjDCgHUkIIS/6cXu8UaI1aP8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 h1:mUI3b885qJgfqKDUSj6RgbRqLdX0wGmg8ruM03zNfQA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4/go.mod h1:6v8ukAxc7z4x4oBjGUsLnH7KGLY9Uhcgij19UJNkiMg= +github.com/aws/aws-sdk-go-v2/service/s3control v1.66.2 h1:/ZonyP9GF0PKVTCLvnce+muPdS8REakUTHwkP8cyFFU= +github.com/aws/aws-sdk-go-v2/service/s3control v1.66.2/go.mod h1:m5ZEef7/rUTT4ed1B22b+MhYKWnp8Qkj4iIp465G6J0= +github.com/aws/aws-sdk-go-v2/service/s3outposts v1.33.6 h1:ISvhq3XY67cCOhHQNuERigQBjUNkr4gjM7f0MFRq9P0= +github.com/aws/aws-sdk-go-v2/service/s3outposts v1.33.6/go.mod h1:rLtMvFVwJRG+in5WrAQxgzDU2KBsenSsNrpLbRa0Xrw= +github.com/aws/aws-sdk-go-v2/service/s3tables v1.10.5 h1:lvhu7h0CC9vsL0kxghR6OeGJwF5VsXDHfeGEAwl6XWE= +github.com/aws/aws-sdk-go-v2/service/s3tables v1.10.5/go.mod h1:ZPE8QxN4+WylqmPew7p7G+J+h1qQo/pbBNLGKr/GJ/o= +github.com/aws/aws-sdk-go-v2/service/s3vectors v1.4.8 h1:ERb8DDNjGcCkDHblpHkSNzEs1ONBk+rCITYA6z+Yd1w= +github.com/aws/aws-sdk-go-v2/service/s3vectors v1.4.8/go.mod h1:gSvTmSFxwjt2k+U9eP8LQpR3sDYpwA/desV1WjaEGJ8= +github.com/aws/aws-sdk-go-v2/service/sagemaker v1.215.3 h1:7QukmIiqAnEoVfduk36whgv8YGtKjcZc1hilfxZxqYQ= +github.com/aws/aws-sdk-go-v2/service/sagemaker v1.215.3/go.mod h1:BSg+goTRoWiHkPwaU91RjaWtCB4+BAcbj6X6Ihvs8I8= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.17.5 h1:QaBANQbMZMyyZ8UmuOaa533NCkgjtwuKyfJqd6fziUQ= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.17.5/go.mod h1:9ulCU1KqL8XYYCu7Zj15WB2lSlSAb1sDzmwVl9LuMGI= +github.com/aws/aws-sdk-go-v2/service/schemas v1.33.5 h1:f94foSb0xp3flzTDe0qHRl/kwsp5RnUvMGH9jYophXc= +github.com/aws/aws-sdk-go-v2/service/schemas v1.33.5/go.mod h1:StI8kLU7UqwT4GUIyHwd4cmLXxglmNbT+faOZltSlA4= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.39.6 h1:9PWl450XOG+m5lKv+qg5BXso1eLxpsZLqq7VPug5km0= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.39.6/go.mod h1:hwt7auGsDcaNQ8pzLgE2kCNyIWouYlAKSjuUu5Dqr7I= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.64.4 h1:56LRTpQSA6dqo2inwUwICUgnlCe3kAddCOhWggdDsYQ= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.64.4/go.mod h1:whhpbyK81XOJWOiCmN4SbYv3X+kgNlMgHOQAnEMRXsM= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.24.6 h1:HDDXTIW91VHxTgu+05f8n5HzJrDAUDqbjX1v3H2KV9Y= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.24.6/go.mod h1:MY0oQdCHOaepsEJoN/WoaCHNoksmhrBrBlDvjUQYJmY= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.29.6 h1:70PEH4oDsU+YER8KUfSedHGlRBVihtcXxah3rokT9S0= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.29.6/go.mod h1:spm5LxERJz0IOpYf9fH6lBDDGgB3OXSwrV0Oj+fL7xU= +github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.38.6 h1:ZGvb2y036q8v5bZhuqzmDnqdpo85u/3/B+9NnbYPcXE= +github.com/aws/aws-sdk-go-v2/service/servicecatalog v1.38.6/go.mod h1:eNS1O7ALYB0n1K6UJASh8kgw1KYGg26wHxx35VZgjFs= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.35.6 h1:OFmbZQixBI0tnwxxoiZtdlGZSOqaOHq7wkxJGLcOIsk= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.35.6/go.mod h1:zOpRzlMssUTM/YZ/JVuztNnGUMGTvOUHTCtyLEtOUnU= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.39.9 h1:snXikqd2A2wiFwFoEjWVLE1p2hbRaVkSxHCcV/vxibg= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.39.9/go.mod h1:D+QXio/b/Fxee/lnsYvajiEuWcPzCIc2B04YzIHX0/M= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.33.0 h1:l+Sd8288cwIW6MMq/qANtWNQzwR8qG8fru4KQl0edjY= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.33.0/go.mod h1:OzKW+2JATYOrFN/hai+5/4SezjqbEeLeZrQqNPLPe+s= +github.com/aws/aws-sdk-go-v2/service/ses v1.34.5 h1:NwOeuOFrWoh4xWKINrmaAK4Vh75jmmY0RAuNjQ6W5Es= +github.com/aws/aws-sdk-go-v2/service/ses v1.34.5/go.mod h1:m3BsMJZD0eqjGIniBzwrNUqG9ZUPquC4hY9FyE2qNFo= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.53.5 h1:ZHBssvFtrtfNCm5APnzFrkdCX4KPDKlSGZ2NbfPmISY= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.53.5/go.mod h1:eJP5lLTdqKwiQB5mKKaSjjJlLB0xcT3pTFF576PbdP0= +github.com/aws/aws-sdk-go-v2/service/sfn v1.39.6 h1:0kpMhSSBrZmYeeKmyM4RftA4XeiC0PDVcbUg3gXNqfk= +github.com/aws/aws-sdk-go-v2/service/sfn v1.39.6/go.mod h1:XyrAUQxv//wWMFyh2mvvTZL9vaYdpjM3Rg5A5QOFOaE= +github.com/aws/aws-sdk-go-v2/service/shield v1.34.6 h1:AWKt4pVqiqzLIT3xoOThd0xT6dY1lSB+7yDcn0N3I48= +github.com/aws/aws-sdk-go-v2/service/shield v1.34.6/go.mod h1:Io5NYTndCqsmL+vdfoQEkInZkbZn8gLloqEjGvng+7M= +github.com/aws/aws-sdk-go-v2/service/signer v1.31.6 h1:TnlG33tsUOBnu7rMicF8YFIC0pxkJdBJwo2R0W5L6Fw= +github.com/aws/aws-sdk-go-v2/service/signer v1.31.6/go.mod h1:a6U0A/LNWknEIS7Fmf4McuUwImMlo6qrKkhbEpSczP8= +github.com/aws/aws-sdk-go-v2/service/sns v1.38.5 h1:c0hINjMfDQvQLJJxfNNcIaLYVLC7E0W2zOQOVVKLnnU= +github.com/aws/aws-sdk-go-v2/service/sns v1.38.5/go.mod h1:E427ZzdOMWh/4KtD48AGfbWLX14iyw9URVOdIwtv80o= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.8 h1:cWiY+//XL5QOYKJyf4Pvt+oE/5wSIi095+bS+ME2lGw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.8/go.mod h1:sLvnKf0p0sMQ33nkJGP2NpYyWHMojpL0O9neiCGc9lc= +github.com/aws/aws-sdk-go-v2/service/ssm v1.65.1 h1:TFg6XiS7EsHN0/jpV3eVNczZi/sPIVP5jxIs+euIESQ= +github.com/aws/aws-sdk-go-v2/service/ssm v1.65.1/go.mod h1:OIezd9K0sM/64DDP4kXx/i0NdgXu6R5KE6SCsIPJsjc= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.30.8 h1:Nqsc8EhmXUwGCLLxB1cCt/8sDyVUDaS9zpkXyd8zcD0= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.30.8/go.mod h1:AGjoKT5weHZ8oo4sFEorNVKsg9noEmEBiXsA5e9veEE= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.39.5 h1:oGUMJl6Wf7vZWiaCRE4MPjtnet6aEjnpF/1WxoKlJ+A= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.39.5/go.mod h1:5TeCNbB10rN3TUR7NWFdRWFLfrjebhMvvE0lQKS30aE= +github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.8.6 h1:sGhOo5CZV1QV1gsAcrwXJVm2EcVaTCVLhEIr29eaihA= +github.com/aws/aws-sdk-go-v2/service/ssmquicksetup v1.8.6/go.mod h1:Y07XpBl1TPJFFfYf4OD1PPmui8rFba8k2u3gAH1H5YM= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.25.5 h1:D2bijFgTDf26Oizhsj5X6X1B8ibIaZVHgonPeLnNTlQ= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.25.5/go.mod h1:11+FpRI0DIr3RuA3pRCDwVA22LP4vymQB7MCTMCApKw= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 h1:A1oRkiSQOWstGh61y4Wc/yQ04sqrQZr1Si/oAXj20/s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.36.2 h1:4O5fAx9BpoX5c+5BxUgOLJM7kS0K20JebxzLzfu+JIk= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.36.2/go.mod h1:7iR/6+xIFUPl0LnAZ0RSBQ4A4R6CyA7WrxKyB9QncWc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo= +github.com/aws/aws-sdk-go-v2/service/storagegateway v1.42.7 h1:Pr+heLI6opJl3ntVUqiIB6ehpCERmKvEsTq1JmRq18M= +github.com/aws/aws-sdk-go-v2/service/storagegateway v1.42.7/go.mod h1:e5HFdmxGXdN1LQ/a+twPR3PuQyvI/aPK38MWVU17QHc= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8= +github.com/aws/aws-sdk-go-v2/service/swf v1.32.5 h1:uGf0//B3vB5y2gqeP7qUtyKh8A+MrhWgGXlisdsCCsk= +github.com/aws/aws-sdk-go-v2/service/swf v1.32.5/go.mod h1:jY8XhNSBjSezBEJA5pEM9sW7nqBc6EDmHwIJ1hj5pq4= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.41.0 h1:PhrW72CcuCEhPNItPyE7eO/gE5fx4QJiNQ2Hwo6gqRM= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.41.0/go.mod h1:eCpO7DjOFxysY+P8dEFJMWCTnpMLt7IGbIhSk5yHDMA= +github.com/aws/aws-sdk-go-v2/service/taxsettings v1.16.6 h1:RS2HB6ey9KpPETo2pWoPNcedvZY6E6+TETJ+3qHmrVA= +github.com/aws/aws-sdk-go-v2/service/taxsettings v1.16.6/go.mod h1:f2PHOYOjNlFxzDo6eR6Zf89XmUancu6ORaoTVURTY7g= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.17.0 h1:QvVSS9mo0AiMK9ndQFpaj6R7dM7LEpoo+nh/ZeeAxPc= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.17.0/go.mod h1:7ObjtSvjDRJVBRhd9zxh6kgxYMbA9vtBQ24+RNjUdao= +github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.35.5 h1:ivzKU64lfi9F5VwBLIEcoqQMtXKrn7iUHOmhU8I88pA= +github.com/aws/aws-sdk-go-v2/service/timestreamquery v1.35.5/go.mod h1:cSU9wIi0AjMmlo0ydUD839k1yh7fNnTdCZokIwol5Qg= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.35.5 h1:BjJ8HypXtGM+O5HP2rPfbxq50UuecvoZpRPdTAnILOM= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.35.5/go.mod h1:rnOKv/DJpfdiPPOyAdsHSommMuIHW6bmP2rrQJJYPdU= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.53.0 h1:O8BRjUAD1Jf15RLBaAPHkShlAB+poKZdsAp+Tpa/txY= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.53.0/go.mod h1:ZZN5Hh+s7Cr845LY5cWKJiCplzW/vZwpnpjxifgo4Ko= +github.com/aws/aws-sdk-go-v2/service/transfer v1.67.0 h1:1Z3X4hOfdiyJP+a/yZzDB577mzTzLB0m/JDu+1VD4LM= +github.com/aws/aws-sdk-go-v2/service/transfer v1.67.0/go.mod h1:28XXFJKdD8UJP9USN1DMtNNJpSt06CyozE/UaPbgjGA= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.29.5 h1:U7NFjnobEmEFqF9DB4tMLqSIJmciwcGYajpOOJgRdgY= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.29.5/go.mod h1:BkoBrVIzx/RT4x6XqY1o5iUqq9Hh62PKnBC9YBClDvk= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.19.0 h1:3OyM+OTHo2c5u3lWQijbwDGF7jmnpZwya+IOAywsQQw= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.19.0/go.mod h1:DjBHb+rO7d7WLJKX73dO6fULvhHdJrXfv+k3Kkiespc= +github.com/aws/aws-sdk-go-v2/service/waf v1.30.5 h1:JUQsG3CPBtXGGdAril5Fois4wiFE1Z3Z/mkvkjd4TCQ= +github.com/aws/aws-sdk-go-v2/service/waf v1.30.5/go.mod h1:sRusTwVAJQtNy6vsrAHGXtd2WIgBGusW2waE6Kgc/So= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.30.6 h1:DqojtTvnVwuylF6Ru08okb9UGNrq3qc67mZ8Gpz9sVk= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.30.6/go.mod h1:uo8gzm2uqf+gYYa2blt1ITjIl59iTzOznUGaajPfT+g= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.68.0 h1:BUhKcwhfjDIUSA2+J9LLm+C2Z2tcBwFvRpEQAfuWlT4= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.68.0/go.mod h1:maJyEaarDIirG/MA0EYIxWc1ctk4sbc4+cEUVCIgorI= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.39.6 h1:0vFMsxhs4763afIR7366ricWl+w1sVOeroRkMOV9BGA= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.39.6/go.mod h1:zmHAn01szsTI7D4u2qgUs0CMklmz2af4EU12bDOWZrM= +github.com/aws/aws-sdk-go-v2/service/workmail v1.36.4 h1:hVmkAUyvH4OZkiW0HmNYYeDjVL7jyj3kCN/3r6nPiW4= +github.com/aws/aws-sdk-go-v2/service/workmail v1.36.4/go.mod h1:RlfJYDlvfjkqFAf3Fim2a5ryz6/gOYQJXJLJV5/c/xU= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.63.6 h1:QHAuU6Tfq2k6Okb8cdZ98BkXzmUZflfKG64Fc44CnQQ= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.63.6/go.mod h1:LTdwIWneoBQ7vVoD1gAGXvcNWq1gkgQqbCoKg1iDUZ0= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.32.6 h1:h8PGWVBIctOlbBpupw4CorUI8gCtX7d5o2lSHi12XgM= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.32.6/go.mod h1:nUz45LzKA733mQl1wIKcWK87SkJWXo+TiLYC5NUZf3M= +github.com/aws/aws-sdk-go-v2/service/xray v1.36.4 h1:G5VZW+21OPiOGoAFM+gBWPLKyuRaB2dC/RdYgL82ZS8= +github.com/aws/aws-sdk-go-v2/service/xray v1.36.4/go.mod h1:FYhPO/0+3jtQ10m0K1DnBTrJkNgXsrYhSHOt3/mCOnE= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= +github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/cedar-policy/cedar-go v0.1.0 h1:2tZwWn8tNO/896YAM7OQmH3vn98EeHEA3g9anwdVZvA= -github.com/cedar-policy/cedar-go v0.1.0/go.mod h1:pEgiK479O5dJfzXnTguOMm+bCplzy5rEEFPGdZKPWz4= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/cedar-policy/cedar-go v1.2.6 h1:q6f1sRxhoBG7lnK/fH6oBG33ruf2yIpcfcPXNExANa0= +github.com/cedar-policy/cedar-go v1.2.6/go.mod h1:h5+3CVW1oI5LXVskJG+my9TFCYI5yjh/+Ul3EJie6MI= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= @@ -590,12 +603,14 @@ github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -612,8 +627,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 h1:l16/Vrl0+x+HjHJWEjcKPwHYoxN9EC78gAFXKlH6m84= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0/go.mod h1:HAmscHyzSOfB1Dr16KLc177KNbn83wscnZC+N7WyaM8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 h1:81+kWbE1yErFBMjME0I5k3x3kojjKsWtPYHEAutoPow= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65/go.mod h1:WtMzv9T++tfWVea+qB2MXoaqxw33S8bpJslzUike2mQ= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67 h1:IS4mjtvkLHXWI5yn/t9ILOUiBqPePMFaO4IRh5pcMk4= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.67/go.mod h1:l81jrdpcZSWUsJs4BGFfdGScefSYEFQRLMQRG3uyvT0= github.com/hashicorp/awspolicyequivalence v1.7.0 h1:HxwPEw2/31BqQa73PinGciTfG2uJ/ATelvDG8X1gScU= github.com/hashicorp/awspolicyequivalence v1.7.0/go.mod h1:+oCTxQEYt+GcRalqrqTCBcJf100SQYiWQ4aENNYxYe0= github.com/hashicorp/cli v1.1.7 h1:/fZJ+hNdwfTSfsxMBa9WWMlfjUZbX8/LnUxgAd7lCVU= @@ -633,8 +648,8 @@ github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= -github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= +github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA= +github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -644,38 +659,38 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= -github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= -github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= -github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= -github.com/hashicorp/terraform-plugin-framework v1.15.0 h1:LQ2rsOfmDLxcn5EeIwdXFtr03FVsNktbbBci8cOKdb4= -github.com/hashicorp/terraform-plugin-framework v1.15.0/go.mod h1:hxrNI/GY32KPISpWqlCoTLM9JZsGH3CyYlir09bD/fI= +github.com/hashicorp/terraform-exec v0.24.0 h1:mL0xlk9H5g2bn0pPF6JQZk5YlByqSqrO5VoaNtAf8OE= +github.com/hashicorp/terraform-exec v0.24.0/go.mod h1:lluc/rDYfAhYdslLJQg3J0oDqo88oGQAdHR+wDqFvo4= +github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU= +github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= +github.com/hashicorp/terraform-plugin-framework v1.16.1 h1:1+zwFm3MEqd/0K3YBB2v9u9DtyYHyEuhVOfeIXbteWA= +github.com/hashicorp/terraform-plugin-framework v1.16.1/go.mod h1:0xFOxLy5lRzDTayc4dzK/FakIgBhNf/lC4499R9cV4Y= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 h1:SJXL5FfJJm17554Kpt9jFXngdM6fXbnUnZ6iT2IeiYA= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0/go.mod h1:p0phD0IYhsu9bR4+6OetVvvH59I6LwjXGnTVEr8ox6E= -github.com/hashicorp/terraform-plugin-framework-timeouts v0.5.0 h1:I/N0g/eLZ1ZkLZXUQ0oRSXa8YG/EF0CEuQP1wXdrzKw= -github.com/hashicorp/terraform-plugin-framework-timeouts v0.5.0/go.mod h1:t339KhmxnaF4SzdpxmqW8HnQBHVGYazwtfxU0qCs4eE= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.6.0 h1:Vv16e7EW4nT9668IV0RhdpEmnLl0im7BZx6J+QMlUkg= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.6.0/go.mod h1:rpHo9hZLn4vEkvNL5xsSdLRdaDZKSinuc0xL+BdOpVA= github.com/hashicorp/terraform-plugin-framework-timetypes v0.5.0 h1:v3DapR8gsp3EM8fKMh6up9cJUFQ2iRaFsYLP8UJnCco= github.com/hashicorp/terraform-plugin-framework-timetypes v0.5.0/go.mod h1:c3PnGE9pHBDfdEVG9t1S1C9ia5LW+gkFR0CygXlM8ak= -github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 h1:OQnlOt98ua//rCw+QhBbSqfW3QbwtVrcdWeQN5gI3Hw= -github.com/hashicorp/terraform-plugin-framework-validators v0.18.0/go.mod h1:lZvZvagw5hsJwuY7mAY6KUz45/U6fiDR0CzQAwWD0CA= -github.com/hashicorp/terraform-plugin-go v0.28.0 h1:zJmu2UDwhVN0J+J20RE5huiF3XXlTYVIleaevHZgKPA= -github.com/hashicorp/terraform-plugin-go v0.28.0/go.mod h1:FDa2Bb3uumkTGSkTFpWSOwWJDwA7bf3vdP3ltLDTH6o= -github.com/hashicorp/terraform-plugin-mux v0.20.0 h1:3QpBnI9uCuL0Yy2Rq/kR9cOdmOFNhw88A2GoZtk5aXM= -github.com/hashicorp/terraform-plugin-mux v0.20.0/go.mod h1:wSIZwJjSYk86NOTX3fKUlThMT4EAV1XpBHz9SAvjQr4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 h1:NFPMacTrY/IdcIcnUB+7hsore1ZaRWU9cnB6jFoBnIM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0/go.mod h1:QYmYnLfsosrxjCnGY1p9c7Zj6n9thnEE+7RObeYs3fA= -github.com/hashicorp/terraform-plugin-testing v1.13.2 h1:mSotG4Odl020vRjIenA3rggwo6Kg6XCKIwtRhYgp+/M= -github.com/hashicorp/terraform-plugin-testing v1.13.2/go.mod h1:WHQ9FDdiLoneey2/QHpGM/6SAYf4A7AZazVg7230pLE= -github.com/hashicorp/terraform-registry-address v0.2.5 h1:2GTftHqmUhVOeuu9CW3kwDkRe4pcBDq0uuK5VJngU1M= -github.com/hashicorp/terraform-registry-address v0.2.5/go.mod h1:PpzXWINwB5kuVS5CA7m1+eO2f1jKb5ZDIxrOPfpnGkg= +github.com/hashicorp/terraform-plugin-framework-validators v0.19.0 h1:Zz3iGgzxe/1XBkooZCewS0nJAaCFPFPHdNJd8FgE4Ow= +github.com/hashicorp/terraform-plugin-framework-validators v0.19.0/go.mod h1:GBKTNGbGVJohU03dZ7U8wHqc2zYnMUawgCN+gC0itLc= +github.com/hashicorp/terraform-plugin-go v0.29.0 h1:1nXKl/nSpaYIUBU1IG/EsDOX0vv+9JxAltQyDMpq5mU= +github.com/hashicorp/terraform-plugin-go v0.29.0/go.mod h1:vYZbIyvxyy0FWSmDHChCqKvI40cFTDGSb3D8D70i9GM= +github.com/hashicorp/terraform-plugin-mux v0.21.0 h1:QsEYnzSD2c3zT8zUrUGqaFGhV/Z8zRUlU7FY3ZPJFfw= +github.com/hashicorp/terraform-plugin-mux v0.21.0/go.mod h1:Qpt8+6AD7NmL0DS7ASkN0EXpDQ2J/FnnIgeUr1tzr5A= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 h1:mlAq/OrMlg04IuJT7NpefI1wwtdpWudnEmjuQs04t/4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1/go.mod h1:GQhpKVvvuwzD79e8/NZ+xzj+ZpWovdPAe8nfV/skwNU= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1.0.20251013071646-7ed2ee242705 h1:+Xi2Akrl1b7bs6VIOtA3Vm+cxx+byzP9U2r461/gL4g= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1.0.20251013071646-7ed2ee242705/go.mod h1:UrIjRAJLN0kygs0miY1Moy4PxUzy2e9R5WxyRk8aliI= +github.com/hashicorp/terraform-registry-address v0.4.0 h1:S1yCGomj30Sao4l5BMPjTGZmCNzuv7/GDTDX99E9gTk= +github.com/hashicorp/terraform-registry-address v0.4.0/go.mod h1:LRS1Ay0+mAiRkUyltGT+UHWkIqTFvigGn/LbMshfflE= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -683,8 +698,8 @@ github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -715,27 +730,28 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -751,8 +767,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -769,46 +785,46 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= -github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.61.0 h1:lR4WnQLBC9XyTwKrz0327rq2QnIdJNpaVIGuW2yMvME= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.61.0/go.mod h1:UK49mXgwqIWFUDH8ibqTswbhy4fuwjEjj4VKMC7krUQ= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0 h1:0W0GZvzQe514c3igO063tR0cFVStoABt1agKqlYToL8= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0/go.mod h1:wIvTiRUU7Pbfqas/5JVjGZcftBeSAGSYVMOHWzWG0qE= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= -golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= -golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -821,44 +837,46 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/dnaeon/go-vcr.v4 v4.0.4 h1:UNc8d1Ya2otEOU3DoUgnSLp0tXvBNE0FuFe86Nnzcbw= -gopkg.in/dnaeon/go-vcr.v4 v4.0.4/go.mod h1:65yxh9goQVrudqofKtHA4JNFWd6XZRkWfKN4YpMx7KI= +gopkg.in/dnaeon/go-vcr.v4 v4.0.5 h1:I0hpTIvD5rII+8LgYGrHMA2d4SQPoL6u7ZvJakWKsiA= +gopkg.in/dnaeon/go-vcr.v4 v4.0.5/go.mod h1:dRos81TkW9C1WJt6tTaE+uV2Lo8qJT3AG2b35+CB/nQ= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/tools/tfsdk2fw/main.go b/tools/tfsdk2fw/main.go index cbfa767b83bd..f0201c4df04a 100644 --- a/tools/tfsdk2fw/main.go +++ b/tools/tfsdk2fw/main.go @@ -11,13 +11,13 @@ import ( "io" "os" "path" + "slices" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/internal/provider" + "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2" "github.com/hashicorp/terraform-provider-aws/tools/tfsdk2fw/naming" - "golang.org/x/exp/slices" ) var ( @@ -57,7 +57,7 @@ func main() { PackageName: packageName, } - p, err := provider.New(context.Background()) + p, err := sdkv2.NewProvider(context.Background()) if err != nil { g.Fatalf(err.Error()) diff --git a/version/VERSION b/version/VERSION new file mode 100644 index 000000000000..4e19b1d22a13 --- /dev/null +++ b/version/VERSION @@ -0,0 +1 @@ +6.17.0 \ No newline at end of file diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index 45c4b8b75701..d0fef3789c3b 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -17,6 +17,7 @@ AppStream 2.0 AppSync Application Auto Scaling Application Migration (Mgn) +Application Resilience Controller Region Switch Application Signals Athena Audit Manager @@ -26,6 +27,7 @@ BCM Data Exports Backup Batch Bedrock +Bedrock AgentCore Bedrock Agents Billing CE (Cost Explorer) @@ -174,6 +176,7 @@ Network Manager OpenSearch OpenSearch Ingestion OpenSearch Serverless +Oracle Database@AWS Organizations Outposts Outposts (EC2) @@ -207,6 +210,7 @@ S3 (Simple Storage) S3 Control S3 Glacier S3 Tables +S3 Vectors S3 on Outposts SES (Simple Email) SESv2 (Simple Email V2) @@ -255,6 +259,7 @@ WAF Classic Regional Wavelength Web Services Budgets Well-Architected Tool +WorkMail WorkSpaces WorkSpaces Web X-Ray diff --git a/website/docs/actions/cloudfront_create_invalidation.html.markdown b/website/docs/actions/cloudfront_create_invalidation.html.markdown new file mode 100644 index 000000000000..12d86e0a1024 --- /dev/null +++ b/website/docs/actions/cloudfront_create_invalidation.html.markdown @@ -0,0 +1,135 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_create_invalidation" +description: |- + Invalidates CloudFront distribution cache for specified paths. +--- + +# Action: aws_cloudfront_create_invalidation + +~> **Note:** `aws_cloudfront_create_invalidation` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invalidates CloudFront distribution cache for specified paths. This action creates an invalidation request and waits for it to complete. + +For information about CloudFront cache invalidation, see the [Amazon CloudFront Developer Guide](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html). For specific information about creating invalidation requests, see the [CreateInvalidation](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CreateInvalidation.html) page in the Amazon CloudFront API Reference. + +~> **Note:** CloudFront invalidation requests can take several minutes to complete. This action will wait for the invalidation to finish before continuing. You can only have a limited number of invalidation requests in progress at any given time. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_cloudfront_distribution" "example" { + # ... distribution configuration +} + +action "aws_cloudfront_create_invalidation" "example" { + config { + distribution_id = aws_cloudfront_distribution.example.id + paths = ["/*"] + } +} + +resource "terraform_data" "example" { + input = "trigger-invalidation" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_cloudfront_create_invalidation.example] + } + } +} +``` + +### Invalidate Specific Paths + +```terraform +action "aws_cloudfront_create_invalidation" "assets" { + config { + distribution_id = aws_cloudfront_distribution.example.id + paths = [ + "/images/*", + "/css/*", + "/js/app.js", + "/index.html" + ] + timeout = 1200 # 20 minutes + } +} +``` + +### With Custom Caller Reference + +```terraform +action "aws_cloudfront_create_invalidation" "deployment" { + config { + distribution_id = aws_cloudfront_distribution.example.id + paths = ["/*"] + caller_reference = "deployment-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + timeout = 900 + } +} +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to invalidate cache after updating static assets: + +```terraform +# Trigger invalidation after S3 sync +resource "terraform_data" "deploy_complete" { + input = local.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_cloudfront_create_invalidation.post_deploy] + } + } + + depends_on = [aws_s3_object.assets] +} + +action "aws_cloudfront_create_invalidation" "post_deploy" { + config { + distribution_id = aws_cloudfront_distribution.main.id + paths = [ + "/index.html", + "/manifest.json", + "/static/js/*", + "/static/css/*" + ] + } +} +``` + +### Environment-Specific Invalidation + +```terraform +locals { + cache_paths = var.environment == "production" ? [ + "/api/*", + "/assets/*" + ] : ["/*"] +} + +action "aws_cloudfront_create_invalidation" "env_specific" { + config { + distribution_id = aws_cloudfront_distribution.app.id + paths = local.cache_paths + timeout = var.environment == "production" ? 1800 : 900 + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `distribution_id` - (Required) ID of the CloudFront distribution to invalidate cache for. Must be a valid CloudFront distribution ID (e.g., E1GHKQ2EXAMPLE). +* `paths` - (Required) List of file paths or patterns to invalidate. Use `/*` to invalidate all files. Supports specific files (`/index.html`), directory wildcards (`/images/*`), or all files (`/*`). Maximum of 3000 paths per invalidation request. Note: The first 1,000 invalidation paths per month are free, additional paths are charged per path. +* `caller_reference` - (Optional) Unique identifier for the invalidation request. If not provided, one will be generated automatically. Maximum length of 128 characters. +* `timeout` - (Optional) Timeout in seconds to wait for the invalidation to complete. Defaults to 900 seconds (15 minutes). Must be between 60 and 3600 seconds. Invalidation requests typically take 5-15 minutes to process. diff --git a/website/docs/actions/codebuild_start_build.html.markdown b/website/docs/actions/codebuild_start_build.html.markdown new file mode 100644 index 000000000000..a8b0c14ef0d2 --- /dev/null +++ b/website/docs/actions/codebuild_start_build.html.markdown @@ -0,0 +1,101 @@ +--- +subcategory: "CodeBuild" +layout: "aws" +page_title: "AWS: aws_codebuild_start_build" +description: |- + Starts a CodeBuild project build. +--- + +# Action: aws_codebuild_start_build + +~> **Note:** `aws_codebuild_start_build` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts a CodeBuild project build. This action will initiate a build and wait for it to complete, providing progress updates during execution. + +For information about AWS CodeBuild, see the [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/). For specific information about starting builds, see the [StartBuild](https://docs.aws.amazon.com/codebuild/latest/APIReference/API_StartBuild.html) page in the AWS CodeBuild API Reference. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_codebuild_project" "example" { + name = "example-project" + service_role = aws_iam_role.example.arn + + artifacts { + type = "NO_ARTIFACTS" + } + + environment { + compute_type = "BUILD_GENERAL1_SMALL" + image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" + type = "LINUX_CONTAINER" + } + + source { + type = "NO_SOURCE" + buildspec = "version: 0.2\nphases:\n build:\n commands:\n - echo 'Hello World'" + } +} + +action "aws_codebuild_start_build" "example" { + config { + project_name = aws_codebuild_project.example.name + } +} + +resource "terraform_data" "build_trigger" { + input = "trigger-build" + + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_codebuild_start_build.example] + } + } +} +``` + +### Build with Environment Variables + +```terraform +action "aws_codebuild_start_build" "deploy" { + config { + project_name = aws_codebuild_project.deploy.name + source_version = "main" + timeout = 1800 + + environment_variables_override { + name = "ENVIRONMENT" + value = "production" + type = "PLAINTEXT" + } + + environment_variables_override { + name = "API_KEY" + value = "/prod/api-key" + type = "PARAMETER_STORE" + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `project_name` - (Required) Name of the CodeBuild project to build. + +The following arguments are optional: + +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `source_version` - (Optional) Version of the build input to be built. For GitHub, this can be a commit SHA, branch name, or tag name. +* `timeout` - (Optional) Timeout in seconds for the build operation. Defaults to 1800 seconds (30 minutes). +* `environment_variables_override` - (Optional) Environment variables to override for this build. See [Environment Variables Override](#environment-variables-override) below. + +### Environment Variables Override + +* `name` - (Required) Environment variable name. +* `value` - (Required) Environment variable value. +* `type` - (Optional) Environment variable type. Valid values are `PLAINTEXT`, `PARAMETER_STORE`, or `SECRETS_MANAGER`. Defaults to `PLAINTEXT`. diff --git a/website/docs/actions/ec2_stop_instance.html.markdown b/website/docs/actions/ec2_stop_instance.html.markdown new file mode 100644 index 000000000000..8220cf22872b --- /dev/null +++ b/website/docs/actions/ec2_stop_instance.html.markdown @@ -0,0 +1,94 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_stop_instance" +description: |- + Stops an EC2 instance. +--- + +# Action: aws_ec2_stop_instance + +~> **Note:** `aws_ec2_stop_instance` is in alpha. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +!> **Warning:** This action may cause unintended consequences. When triggered, the `aws_ec2_stop_instance` action changes the instance state to `stopped`, and Terraform does not reconcile the change. With `aws_instance`, the `instance_state` attribute will be out of sync until the next refresh. With `aws_ec2_instance_state`, this action directly conflicts. With higher-level managers (Auto Scaling Groups, ECS/EKS node groups, EMR, Batch), it may trigger replacement or restart of instances. Use caution—this preview action should be limited to development environments. + +Stops an EC2 instance. This action will gracefully stop the instance and wait for it to reach the stopped state. + +For information about Amazon EC2, see the [Amazon EC2 User Guide](https://docs.aws.amazon.com/ec2/latest/userguide/). For specific information about stopping instances, see the [StopInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_StopInstances.html) page in the Amazon EC2 API Reference. + +~> **Note:** This action directly stops EC2 instances which will interrupt running workloads. Ensure proper coordination with your applications before using this action. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_instance" "example" { + ami = data.aws_ami.amazon_linux.id + instance_type = "t3.micro" + + tags = { + Name = "example-instance" + } +} + +action "aws_ec2_stop_instance" "example" { + config { + instance_id = aws_instance.example.id + } +} +``` + +### Force Stop + +```terraform +action "aws_ec2_stop_instance" "force_stop" { + config { + instance_id = aws_instance.example.id + force = true + timeout = 300 + } +} +``` + +### Maintenance Window + +```terraform +resource "aws_instance" "web_server" { + ami = data.aws_ami.amazon_linux.id + instance_type = "t3.micro" + + tags = { + Name = "web-server" + } +} + +action "aws_ec2_stop_instance" "maintenance" { + config { + instance_id = aws_instance.web_server.id + timeout = 900 + } +} + +resource "terraform_data" "maintenance_trigger" { + input = var.maintenance_window + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_ec2_stop_instance.maintenance] + } + } + + depends_on = [aws_instance.web_server] +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `instance_id` - (Required) ID of the EC2 instance to stop. Must be a valid EC2 instance ID (e.g., i-1234567890abcdef0). +* `force` - (Optional) Forces the instance to stop. The instance does not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances. Default: `false`. +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `timeout` - (Optional) Timeout in seconds to wait for the instance to stop. Must be between 30 and 3600 seconds. Default: `600`. diff --git a/website/docs/actions/events_put_events.html.markdown b/website/docs/actions/events_put_events.html.markdown new file mode 100644 index 000000000000..1dde36fd87dd --- /dev/null +++ b/website/docs/actions/events_put_events.html.markdown @@ -0,0 +1,146 @@ +--- +subcategory: "EventBridge" +layout: "aws" +page_title: "AWS: aws_events_put_events" +description: |- + Sends custom events to Amazon EventBridge so that they can be matched to rules. +--- + +# Action: aws_events_put_events + +~> **Note:** `aws_events_put_events` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Sends custom events to Amazon EventBridge so that they can be matched to rules. This action provides an imperative way to emit events from Terraform plans (e.g., deployment notifications) while still allowing Terraform to manage when the emission occurs through `action_trigger` lifecycle events. + +## Example Usage + +### Basic Event + +```terraform +action "aws_events_put_events" "example" { + config { + entry { + source = "mycompany.myapp" + detail_type = "User Action" + detail = jsonencode({ + user_id = "12345" + action = "login" + }) + } + } +} +``` + +### Multiple Events + +```terraform +action "aws_events_put_events" "batch" { + config { + entry { + source = "mycompany.orders" + detail_type = "Order Created" + detail = jsonencode({ + order_id = "order-123" + amount = 99.99 + }) + } + + entry { + source = "mycompany.orders" + detail_type = "Order Updated" + detail = jsonencode({ + order_id = "order-456" + status = "shipped" + }) + } + } +} +``` + +### Custom Event Bus + +```terraform +resource "aws_cloudwatch_event_bus" "example" { + name = "custom-bus" +} + +action "aws_events_put_events" "custom_bus" { + config { + entry { + source = "mycompany.analytics" + detail_type = "Page View" + event_bus_name = aws_cloudwatch_event_bus.example.name + detail = jsonencode({ + page = "/home" + user = "anonymous" + }) + } + } +} +``` + +### Event with Resources and Timestamp + +```terraform +action "aws_events_put_events" "detailed" { + config { + entry { + source = "aws.ec2" + detail_type = "EC2 Instance State-change Notification" + time = "2023-01-01T12:00:00Z" # RFC3339 + resources = ["arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0"] + detail = jsonencode({ + instance_id = "i-1234567890abcdef0" + state = "running" + }) + } + } +} +``` + +### Triggered by Terraform Data + +```terraform +resource "terraform_data" "deploy" { + input = var.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_events_put_events.deployment] + } + } +} + +action "aws_events_put_events" "deployment" { + config { + entry { + source = "mycompany.deployments" + detail_type = "Deployment Complete" + detail = jsonencode({ + deployment_id = var.deployment_id + environment = var.environment + timestamp = timestamp() + }) + } + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `entry` - (Required) One or more `entry` blocks defining events to send. Multiple blocks may be specified. See [below](#entry-block). +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `entry` Block + +Each `entry` block supports: + +* `source` - (Required) The source identifier for the event (e.g., `mycompany.myapp`). +* `detail_type` - (Optional) Free-form string used to decide what fields to expect in the event detail. +* `detail` - (Optional) JSON string (use `jsonencode()`) representing the event detail payload. +* `event_bus_name` - (Optional) Name or ARN of the event bus. Defaults to the account's default bus. +* `resources` - (Optional) List of ARNs the event primarily concerns. +* `time` - (Optional) RFC3339 timestamp for the event. If omitted, the receive time is used. diff --git a/website/docs/actions/lambda_invoke.html.markdown b/website/docs/actions/lambda_invoke.html.markdown new file mode 100644 index 000000000000..4372d8008799 --- /dev/null +++ b/website/docs/actions/lambda_invoke.html.markdown @@ -0,0 +1,222 @@ +--- +subcategory: "Lambda" +layout: "aws" +page_title: "AWS: aws_lambda_invoke" +description: |- + Invokes an AWS Lambda function with the specified payload. +--- + +# Action: aws_lambda_invoke + +~> **Note:** `aws_lambda_invoke` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invokes an AWS Lambda function with the specified payload. This action allows for imperative invocation of Lambda functions with full control over invocation parameters. + +For information about AWS Lambda functions, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/). For specific information about invoking Lambda functions, see the [Invoke](https://docs.aws.amazon.com/lambda/latest/api/API_Invoke.html) page in the AWS Lambda API Reference. + +~> **Note:** Synchronous invocations will wait for the function to complete execution, while asynchronous invocations return immediately after the request is _accepted_. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_lambda_function" "example" { + # ... function configuration +} + +action "aws_lambda_invoke" "example" { + config { + function_name = aws_lambda_function.example.function_name + payload = jsonencode({ + key1 = "value1" + key2 = "value2" + }) + } +} + +resource "terraform_data" "example" { + input = "trigger-lambda" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.example] + } + } +} +``` + +### Invoke with Function Version + +```terraform +action "aws_lambda_invoke" "versioned" { + config { + function_name = aws_lambda_function.example.function_name + qualifier = aws_lambda_function.example.version + payload = jsonencode({ + operation = "process" + data = var.processing_data + }) + } +} +``` + +### Asynchronous Invocation + +```terraform +action "aws_lambda_invoke" "async" { + config { + function_name = aws_lambda_function.worker.function_name + invocation_type = "Event" + payload = jsonencode({ + task_id = "background-job-${random_uuid.job_id.result}" + data = local.background_task_data + }) + } +} +``` + +### Dry Run Validation + +```terraform +action "aws_lambda_invoke" "validate" { + config { + function_name = aws_lambda_function.validator.function_name + invocation_type = "DryRun" + payload = jsonencode({ + config = var.validation_config + }) + } +} +``` + +### With Log Capture + +```terraform +action "aws_lambda_invoke" "debug" { + config { + function_name = aws_lambda_function.debug.function_name + log_type = "Tail" + payload = jsonencode({ + debug_level = "verbose" + component = "api-gateway" + }) + } +} +``` + +### Mobile Application Context + +```terraform +action "aws_lambda_invoke" "mobile" { + config { + function_name = aws_lambda_function.mobile_backend.function_name + client_context = base64encode(jsonencode({ + client = { + client_id = "mobile-app" + app_version = "1.2.3" + } + env = { + locale = "en_US" + } + })) + payload = jsonencode({ + user_id = var.user_id + action = "sync_data" + }) + } +} +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment functions: + +```terraform +# Trigger warmup after deployment +resource "terraform_data" "deploy_complete" { + input = local.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.warmup] + } + } + + depends_on = [aws_lambda_function.api] +} + +action "aws_lambda_invoke" "warmup" { + config { + function_name = aws_lambda_function.api.function_name + payload = jsonencode({ + action = "warmup" + source = "terraform-deployment" + }) + } +} +``` + +### Environment-Specific Processing + +```terraform +locals { + processing_config = var.environment == "production" ? { + batch_size = 100 + timeout = 900 + } : { + batch_size = 10 + timeout = 60 + } +} + +action "aws_lambda_invoke" "process_data" { + config { + function_name = aws_lambda_function.processor.function_name + payload = jsonencode(merge(local.processing_config, { + data_source = var.data_source + environment = var.environment + })) + } +} +``` + +### Complex Payload with Dynamic Content + +```terraform +action "aws_lambda_invoke" "complex" { + config { + function_name = aws_lambda_function.orchestrator.function_name + payload = jsonencode({ + workflow = { + id = "workflow-${timestamp()}" + steps = var.workflow_steps + } + resources = { + s3_bucket = aws_s3_bucket.data.bucket + dynamodb = aws_dynamodb_table.state.name + sns_topic = aws_sns_topic.notifications.arn + } + metadata = { + created_by = "terraform" + environment = var.environment + version = var.app_version + } + }) + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `client_context` - (Optional) Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. This is only used for mobile applications and should contain information about the client application and device. +* `function_name` - (Required) Name, ARN, or partial ARN of the Lambda function to invoke. You can specify a function name (e.g., `my-function`), a qualified function name (e.g., `my-function:PROD`), or a partial ARN (e.g., `123456789012:function:my-function`). +* `invocation_type` - (Optional) Invocation type. Valid values are `RequestResponse` (default) for synchronous invocation that waits for the function to complete and returns the response, `Event` for asynchronous invocation that returns immediately after the request is accepted, and `DryRun` to validate parameters and verify permissions without actually executing the function. +* `log_type` - (Optional) Set to `Tail` to include the execution log in the response. Only applies to synchronous invocations (`RequestResponse` invocation type). Defaults to `None`. When set to `Tail`, the last 4 KB of the execution log is included in the response. +* `payload` - (Required) JSON payload to send to the Lambda function. This should be a valid JSON string that represents the event data for your function. The payload size limit is 6 MB for synchronous invocations and 256 KB for asynchronous invocations. +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `qualifier` - (Optional) Version or alias of the Lambda function to invoke. If not specified, the `$LATEST` version will be invoked. Can be a version number (e.g., `1`) or an alias (e.g., `PROD`). diff --git a/website/docs/actions/ses_send_email.html.markdown b/website/docs/actions/ses_send_email.html.markdown new file mode 100644 index 000000000000..9311d0a810fe --- /dev/null +++ b/website/docs/actions/ses_send_email.html.markdown @@ -0,0 +1,177 @@ +--- +subcategory: "SES (Simple Email)" +layout: "aws" +page_title: "AWS: aws_ses_send_email" +description: |- + Sends an email using Amazon SES. +--- + +# Action: aws_ses_send_email + +~> **Note:** `aws_ses_send_email` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Sends an email using Amazon SES. This action allows for imperative email sending with full control over recipients, content, and formatting. + +For information about Amazon SES, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/). For specific information about sending emails, see the [SendEmail](https://docs.aws.amazon.com/ses/latest/APIReference/API_SendEmail.html) page in the Amazon SES API Reference. + +~> **Note:** All email addresses used must be verified in Amazon SES or belong to a verified domain. Due to the difficulty in testing, your help is important in discovering and reporting issues. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_ses_email_identity" "example" { + email = "sender@example.com" +} + +action "aws_ses_send_email" "example" { + config { + source = aws_ses_email_identity.example.email + subject = "Test Email" + text_body = "This is a test email sent from Terraform." + to_addresses = ["recipient@example.com"] + } +} + +resource "terraform_data" "example" { + input = "send-notification" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_ses_send_email.example] + } + } +} +``` + +### HTML Email with Multiple Recipients + +```terraform +action "aws_ses_send_email" "newsletter" { + config { + source = aws_ses_email_identity.marketing.email + subject = "Monthly Newsletter - ${formatdate("MMMM YYYY", timestamp())}" + html_body = "

Welcome!

This is our monthly newsletter.

" + to_addresses = var.subscriber_emails + cc_addresses = ["manager@example.com"] + reply_to_addresses = ["support@example.com"] + return_path = "bounces@example.com" + } +} +``` + +### Deployment Notification + +```terraform +action "aws_ses_send_email" "deploy_notification" { + config { + source = "deployments@example.com" + subject = "Deployment Complete: ${var.environment}" + text_body = "Application ${var.app_name} has been successfully deployed to ${var.environment}." + to_addresses = var.team_emails + } +} + +resource "terraform_data" "deployment" { + input = var.deployment_id + + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_ses_send_email.deploy_notification] + } + } + + depends_on = [aws_instance.app] +} +``` + +### Alert Email with Dynamic Content + +```terraform +locals { + alert_body = templatefile("${path.module}/templates/alert.txt", { + service = var.service_name + environment = var.environment + timestamp = timestamp() + details = var.alert_details + }) +} + +action "aws_ses_send_email" "alert" { + config { + source = "alerts@example.com" + subject = "ALERT: ${var.service_name} Issue Detected" + text_body = local.alert_body + to_addresses = var.oncall_emails + cc_addresses = var.manager_emails + } +} +``` + +### Multi-format Email + +```terraform +action "aws_ses_send_email" "welcome" { + config { + source = aws_ses_email_identity.noreply.email + subject = "Welcome to ${var.company_name}!" + text_body = "Welcome! Thank you for joining us. Visit our website for more information." + html_body = templatefile("${path.module}/templates/welcome.html", { + user_name = var.user_name + company_name = var.company_name + website_url = var.website_url + }) + to_addresses = [var.user_email] + } +} +``` + +### Conditional Email Sending + +```terraform +action "aws_ses_send_email" "conditional" { + config { + source = "notifications@example.com" + subject = var.environment == "production" ? "Production Alert" : "Test Alert" + text_body = "This is a ${var.environment} environment notification." + to_addresses = var.environment == "production" ? var.prod_emails : var.dev_emails + } +} +``` + +### Batch Processing Notification + +```terraform +action "aws_ses_send_email" "batch_complete" { + config { + source = "batch-jobs@example.com" + subject = "Batch Processing Complete - ${var.job_name}" + html_body = <<-HTML +

Batch Job Results

+

Job: ${var.job_name}

+

Records Processed: ${var.records_processed}

+

Duration: ${var.processing_duration}

+

Status: ${var.job_status}

+ HTML + to_addresses = var.admin_emails + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `bcc_addresses` - (Optional) List of email addresses for the BCC: field of the message. Recipients in this list will receive the email but their addresses will not be visible to other recipients. +* `cc_addresses` - (Optional) List of email addresses for the CC: field of the message. Recipients in this list will receive the email and their addresses will be visible to all recipients. +* `html_body` - (Optional) Message body in HTML format. Either `text_body` or `html_body` (or both) must be specified. HTML content allows for rich formatting including links, images, and styling. +* `reply_to_addresses` - (Optional) List of reply-to email addresses for the message. If the recipient replies to the message, each reply-to address will receive the reply. If not specified, replies will go to the source address. +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `return_path` - (Optional) Email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. This is useful for handling delivery failures and spam complaints. +* `source` - (Required) Email address that is sending the email. This address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. +* `subject` - (Required) Subject of the message: A short summary of the content, which will appear in the recipient's inbox. +* `text_body` - (Optional) Message body in text format. Either `text_body` or `html_body` (or both) must be specified. Text format ensures compatibility with all email clients. +* `to_addresses` - (Optional) List of email addresses for the To: field of the message. These are the primary recipients of the email. diff --git a/website/docs/actions/sfn_start_execution.html.markdown b/website/docs/actions/sfn_start_execution.html.markdown new file mode 100644 index 000000000000..37ac7cc2cee4 --- /dev/null +++ b/website/docs/actions/sfn_start_execution.html.markdown @@ -0,0 +1,238 @@ +--- +subcategory: "SFN (Step Functions)" +layout: "aws" +page_title: "AWS: aws_sfn_start_execution" +description: |- + Starts a Step Functions state machine execution with the specified input data. +--- + +# Action: aws_sfn_start_execution + +~> **Note:** `aws_sfn_start_execution` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts a Step Functions state machine execution with the specified input data. This action allows for imperative execution of state machines with full control over execution parameters. + +For information about AWS Step Functions, see the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/). For specific information about starting executions, see the [StartExecution](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) page in the AWS Step Functions API Reference. + +~> **Note:** For `STANDARD` workflows, executions with the same name and input are idempotent. For `EXPRESS` workflows, each execution is unique regardless of name and input. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_sfn_state_machine" "example" { + name = "example-state-machine" + role_arn = aws_iam_role.sfn.arn + + definition = jsonencode({ + Comment = "A simple minimal example" + StartAt = "Hello" + States = { + Hello = { + Type = "Pass" + Result = "Hello World!" + End = true + } + } + }) +} + +action "aws_sfn_start_execution" "example" { + config { + state_machine_arn = aws_sfn_state_machine.example.arn + input = jsonencode({ + user_id = "12345" + action = "process" + }) + } +} + +resource "terraform_data" "example" { + input = "trigger-execution" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sfn_start_execution.example] + } + } +} +``` + +### Named Execution + +```terraform +action "aws_sfn_start_execution" "named" { + config { + state_machine_arn = aws_sfn_state_machine.processor.arn + name = "deployment-${var.deployment_id}" + input = jsonencode({ + deployment_id = var.deployment_id + environment = var.environment + }) + } +} +``` + +### Execution with Version + +```terraform +action "aws_sfn_start_execution" "versioned" { + config { + state_machine_arn = "${aws_sfn_state_machine.example.arn}:${aws_sfn_state_machine.example.version_number}" + input = jsonencode({ + version = "v2" + config = var.processing_config + }) + } +} +``` + +### Execution with Alias + +```terraform +resource "aws_sfn_alias" "prod" { + name = "PROD" + state_machine_arn = aws_sfn_state_machine.example.arn + routing_configuration { + state_machine_version_weight { + state_machine_version_arn = aws_sfn_state_machine.example.arn + weight = 100 + } + } +} + +action "aws_sfn_start_execution" "production" { + config { + state_machine_arn = aws_sfn_alias.prod.arn + input = jsonencode({ + environment = "production" + batch_size = 1000 + }) + } +} +``` + +### X-Ray Tracing + +```terraform +action "aws_sfn_start_execution" "traced" { + config { + state_machine_arn = aws_sfn_state_machine.example.arn + trace_header = "Root=1-${formatdate("YYYYMMDD", timestamp())}-${substr(uuid(), 0, 24)}" + input = jsonencode({ + trace_id = "custom-trace-${timestamp()}" + data = var.processing_data + }) + } +} +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment workflows: + +```terraform +resource "terraform_data" "deploy_complete" { + input = local.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sfn_start_execution.post_deploy] + } + } + + depends_on = [aws_lambda_function.processors] +} + +action "aws_sfn_start_execution" "post_deploy" { + config { + state_machine_arn = aws_sfn_state_machine.data_pipeline.arn + name = "post-deploy-${local.deployment_id}" + input = jsonencode({ + deployment_id = local.deployment_id + environment = var.environment + resources = { + lambda_functions = [for f in aws_lambda_function.processors : f.arn] + s3_bucket = aws_s3_bucket.data.bucket + } + }) + } +} +``` + +### Environment-Specific Processing + +```terraform +locals { + execution_config = var.environment == "production" ? { + batch_size = 1000 + max_retries = 3 + timeout_hours = 24 + } : { + batch_size = 100 + max_retries = 1 + timeout_hours = 2 + } +} + +action "aws_sfn_start_execution" "batch_process" { + config { + state_machine_arn = aws_sfn_state_machine.batch_processor.arn + input = jsonencode(merge(local.execution_config, { + data_source = var.data_source + output_path = var.output_path + })) + } +} +``` + +### Complex Workflow Orchestration + +```terraform +action "aws_sfn_start_execution" "orchestrator" { + config { + state_machine_arn = aws_sfn_state_machine.orchestrator.arn + input = jsonencode({ + workflow = { + id = "workflow-${timestamp()}" + type = "data-processing" + steps = var.workflow_steps + } + resources = { + compute = { + lambda_functions = [for f in aws_lambda_function.workers : f.arn] + ecs_cluster = aws_ecs_cluster.processing.arn + } + storage = { + input_bucket = aws_s3_bucket.input.bucket + output_bucket = aws_s3_bucket.output.bucket + temp_bucket = aws_s3_bucket.temp.bucket + } + messaging = { + success_topic = aws_sns_topic.success.arn + error_topic = aws_sns_topic.errors.arn + } + } + metadata = { + created_by = "terraform" + environment = var.environment + version = var.app_version + tags = var.execution_tags + } + }) + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `input` - (Optional) JSON input data for the execution. Must be valid JSON. Defaults to `{}` if not specified. The input size limit is 256 KB. +* `name` - (Optional) Name of the execution. Must be unique within the account/region/state machine for 90 days. If not provided, Step Functions automatically generates a UUID. Names must not contain whitespace, brackets, wildcards, or special characters. +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `state_machine_arn` - (Required) ARN of the state machine to execute. Can be an unqualified ARN, version-qualified ARN (e.g., `arn:aws:states:region:account:stateMachine:name:version`), or alias-qualified ARN (e.g., `arn:aws:states:region:account:stateMachine:name:alias`). +* `trace_header` - (Optional) AWS X-Ray trace header for distributed tracing. Used to correlate execution traces across services. diff --git a/website/docs/actions/sns_publish.html.markdown b/website/docs/actions/sns_publish.html.markdown new file mode 100644 index 000000000000..c238419bf170 --- /dev/null +++ b/website/docs/actions/sns_publish.html.markdown @@ -0,0 +1,151 @@ +--- +subcategory: "SNS (Simple Notification)" +layout: "aws" +page_title: "AWS: aws_sns_publish" +description: |- + Publishes a message to an Amazon SNS topic. +--- + +# Action: aws_sns_publish + +~> **Note:** `aws_sns_publish` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Publishes a message to an Amazon SNS topic. This action allows for imperative message publishing with full control over message attributes and structure. + +For information about Amazon SNS, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/). For specific information about publishing messages, see the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) page in the Amazon SNS API Reference. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_sns_topic" "example" { + name = "example-topic" +} + +action "aws_sns_publish" "example" { + config { + topic_arn = aws_sns_topic.example.arn + message = "Hello from Terraform!" + } +} + +resource "terraform_data" "example" { + input = "trigger-message" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sns_publish.example] + } + } +} +``` + +### Message with Subject + +```terraform +action "aws_sns_publish" "notification" { + config { + topic_arn = aws_sns_topic.alerts.arn + subject = "System Alert" + message = "Critical system event detected at ${timestamp()}" + } +} +``` + +### JSON Message Structure + +```terraform +action "aws_sns_publish" "structured" { + config { + topic_arn = aws_sns_topic.mobile.arn + message_structure = "json" + message = jsonencode({ + default = "Default message" + email = "Email version of the message" + sms = "SMS version" + GCM = jsonencode({ + data = { + message = "Push notification message" + } + }) + }) + } +} +``` + +### Message with Attributes + +```terraform +action "aws_sns_publish" "with_attributes" { + config { + topic_arn = aws_sns_topic.processing.arn + message = "Process this data" + + message_attributes { + map_block_key = "priority" + data_type = "String" + string_value = "high" + } + + message_attributes { + map_block_key = "source" + data_type = "String" + string_value = "terraform" + } + } +} +``` + +### Deployment Notification + +```terraform +action "aws_sns_publish" "deploy_complete" { + config { + topic_arn = aws_sns_topic.deployments.arn + subject = "Deployment Complete" + message = jsonencode({ + environment = var.environment + version = var.app_version + timestamp = timestamp() + resources = { + instances = length(aws_instance.app) + databases = length(aws_db_instance.main) + } + }) + } +} + +resource "terraform_data" "deploy_trigger" { + input = var.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sns_publish.deploy_complete] + } + } + + depends_on = [aws_instance.app, aws_db_instance.main] +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `message` - (Required) Message to publish. For JSON message structure, this should be a JSON object with protocol-specific messages. Maximum size is 256 KB. +* `message_attributes` - (Optional) Message attributes to include with the message. Each attribute consists of a name, data type, and value. Up to 10 attributes are allowed. [See below.](#message-attributes) +* `message_structure` - (Optional) Set to `json` if you want to send different messages for each protocol. If not specified, the message will be sent as-is to all protocols. +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `subject` - (Optional) Optional subject for the message. Only used for email and email-json protocols. Maximum length is 100 characters. +* `topic_arn` - (Required) ARN of the SNS topic to publish the message to. + +### Message Attributes + +The `message_attributes` block supports: + +* `data_type` - (Required) Data type of the message attribute. Valid values are `String`, `Number`, and `Binary`. +* `map_block_key` - (Required) Name of the message attribute (used as map key). Must be unique within the message. +* `string_value` - (Required) Value of the message attribute. diff --git a/website/docs/actions/transcribe_start_transcription_job.html.markdown b/website/docs/actions/transcribe_start_transcription_job.html.markdown new file mode 100644 index 000000000000..0feb65ab3a99 --- /dev/null +++ b/website/docs/actions/transcribe_start_transcription_job.html.markdown @@ -0,0 +1,124 @@ +--- +subcategory: "Transcribe" +layout: "aws" +page_title: "AWS: aws_transcribe_start_transcription_job" +description: |- + Starts an Amazon Transcribe transcription job. +--- + +# Action: aws_transcribe_start_transcription_job + +~> **Note:** `aws_transcribe_start_transcription_job` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts an Amazon Transcribe transcription job to transcribe audio from a media file. The media file must be uploaded to an Amazon S3 bucket before starting the transcription job. + +For information about Amazon Transcribe, see the [Amazon Transcribe Developer Guide](https://docs.aws.amazon.com/transcribe/latest/dg/). For specific information about starting transcription jobs, see the [StartTranscriptionJob](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_StartTranscriptionJob.html) page in the Amazon Transcribe API Reference. + +~> **Note:** This action starts the transcription job and waits for it to begin processing, but does not wait for the transcription to complete. The job will continue running asynchronously after the action completes. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "my-transcription-bucket" +} + +resource "aws_s3_object" "audio" { + bucket = aws_s3_bucket.example.bucket + key = "audio/meeting.mp3" + source = "path/to/meeting.mp3" +} + +action "aws_transcribe_start_transcription_job" "example" { + config { + transcription_job_name = "meeting-transcription-${timestamp()}" + media_file_uri = "s3://${aws_s3_bucket.example.bucket}/${aws_s3_object.audio.key}" + language_code = "en-US" + } +} +``` + +### Automatic Language Detection + +```terraform +action "aws_transcribe_start_transcription_job" "auto_detect" { + config { + transcription_job_name = "auto-detect-transcription" + media_file_uri = "s3://my-bucket/audio/multilingual-meeting.mp3" + identify_language = true + timeout = 600 + } +} +``` + +### Multiple Language Detection + +```terraform +action "aws_transcribe_start_transcription_job" "multilingual" { + config { + transcription_job_name = "multilingual-transcription" + media_file_uri = "s3://my-bucket/audio/conference-call.mp3" + identify_multiple_languages = true + media_format = "mp3" + media_sample_rate_hertz = 44100 + } +} +``` + +### Custom Output Location + +```terraform +action "aws_transcribe_start_transcription_job" "custom_output" { + config { + transcription_job_name = "custom-output-transcription" + media_file_uri = "s3://my-bucket/audio/interview.wav" + language_code = "en-US" + output_bucket_name = aws_s3_bucket.transcripts.bucket + output_key = "transcripts/interview-transcript.json" + } +} +``` + +### CI/CD Pipeline Integration + +```terraform +resource "terraform_data" "process_audio" { + input = var.audio_files + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_transcribe_start_transcription_job.batch_process] + } + } + + depends_on = [aws_s3_object.uploaded_audio] +} + +action "aws_transcribe_start_transcription_job" "batch_process" { + config { + transcription_job_name = "batch-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + media_file_uri = "s3://${aws_s3_bucket.audio.bucket}/${aws_s3_object.uploaded_audio.key}" + language_code = var.audio_language + timeout = 900 + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `transcription_job_name` - (Required) Unique name for the transcription job within your AWS account. Must be 1-200 characters and contain only alphanumeric characters, hyphens, periods, and underscores. +* `media_file_uri` - (Required) S3 location of the media file to transcribe (e.g., `s3://bucket-name/file.mp3`). The file must be accessible to Amazon Transcribe. +* `language_code` - (Optional) Language code for the language used in the input media file. Required if `identify_language` and `identify_multiple_languages` are both false. Valid values can be found in the [Amazon Transcribe supported languages documentation](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html). +* `identify_language` - (Optional) Enable automatic language identification for single-language media files. Cannot be used with `identify_multiple_languages`. Default: `false`. +* `identify_multiple_languages` - (Optional) Enable automatic language identification for multi-language media files. Cannot be used with `identify_language`. Default: `false`. +* `media_format` - (Optional) Format of the input media file. If not specified, Amazon Transcribe will attempt to determine the format automatically. Valid values: `mp3`, `mp4`, `wav`, `flac`, `ogg`, `amr`, `webm`, `m4a`. +* `media_sample_rate_hertz` - (Optional) Sample rate of the input media file in Hertz. If not specified, Amazon Transcribe will attempt to determine the sample rate automatically. Valid range: 8000-48000. +* `output_bucket_name` - (Optional) Name of the S3 bucket where you want your transcription output stored. If not specified, output is stored in a service-managed bucket. +* `output_key` - (Optional) S3 object key for your transcription output. If not specified, a default key is generated. +* `region` - (Optional) Region where this action should be [run](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `timeout` - (Optional) Maximum time in seconds to wait for the transcription job to start. Must be between 60 and 3600 seconds. Default: `300`. diff --git a/website/docs/cdktf/python/actions/cloudfront_create_invalidation.html.markdown b/website/docs/cdktf/python/actions/cloudfront_create_invalidation.html.markdown new file mode 100644 index 000000000000..ced7fc065b4d --- /dev/null +++ b/website/docs/cdktf/python/actions/cloudfront_create_invalidation.html.markdown @@ -0,0 +1,126 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_create_invalidation" +description: |- + Invalidates CloudFront distribution cache for specified paths. +--- + + + +# Action: aws_cloudfront_create_invalidation + +~> **Note:** `aws_cloudfront_create_invalidation` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invalidates CloudFront distribution cache for specified paths. This action creates an invalidation request and waits for it to complete. + +For information about CloudFront cache invalidation, see the [Amazon CloudFront Developer Guide](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html). For specific information about creating invalidation requests, see the [CreateInvalidation](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CreateInvalidation.html) page in the Amazon CloudFront API Reference. + +~> **Note:** CloudFront invalidation requests can take several minutes to complete. This action will wait for the invalidation to finish before continuing. You can only have a limited number of invalidation requests in progress at any given time. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudfront_distribution import CloudfrontDistribution +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, defaultCacheBehavior, enabled, origin, restrictions, viewerCertificate): + super().__init__(scope, name) + CloudfrontDistribution(self, "example", + default_cache_behavior=default_cache_behavior, + enabled=enabled, + origin=origin, + restrictions=restrictions, + viewer_certificate=viewer_certificate + ) + terraform_data_example = DataResource(self, "example_1", + input="trigger-invalidation", + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_cloudfront_create_invalidation.example], + "events": [before_create, before_update] + } + ] + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + terraform_data_example.override_logical_id("example") +``` + +### Invalidate Specific Paths + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### With Custom Caller Reference + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to invalidate cache after updating static assets: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataResource(self, "deploy_complete", + depends_on=[assets], + input=deployment_id, + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_cloudfront_create_invalidation.post_deploy], + "events": [before_create, before_update] + } + ] + ) + ) +``` + +### Environment-Specific Invalidation + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This action supports the following arguments: + +* `distribution_id` - (Required) ID of the CloudFront distribution to invalidate cache for. Must be a valid CloudFront distribution ID (e.g., E1GHKQ2EXAMPLE). +* `paths` - (Required) List of file paths or patterns to invalidate. Use `/*` to invalidate all files. Supports specific files (`/index.html`), directory wildcards (`/images/*`), or all files (`/*`). Maximum of 3000 paths per invalidation request. Note: The first 1,000 invalidation paths per month are free, additional paths are charged per path. +* `caller_reference` - (Optional) Unique identifier for the invalidation request. If not provided, one will be generated automatically. Maximum length of 128 characters. +* `timeout` - (Optional) Timeout in seconds to wait for the invalidation to complete. Defaults to 900 seconds (15 minutes). Must be between 60 and 3600 seconds. Invalidation requests typically take 5-15 minutes to process. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/actions/codebuild_start_build.html.markdown b/website/docs/cdktf/python/actions/codebuild_start_build.html.markdown new file mode 100644 index 000000000000..39d4b8938ddf --- /dev/null +++ b/website/docs/cdktf/python/actions/codebuild_start_build.html.markdown @@ -0,0 +1,93 @@ +--- +subcategory: "CodeBuild" +layout: "aws" +page_title: "AWS: aws_codebuild_start_build" +description: |- + Starts a CodeBuild project build. +--- + + + +# Action: aws_codebuild_start_build + +~> **Note:** `aws_codebuild_start_build` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts a CodeBuild project build. This action will initiate a build and wait for it to complete, providing progress updates during execution. + +For information about AWS CodeBuild, see the [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/). For specific information about starting builds, see the [StartBuild](https://docs.aws.amazon.com/codebuild/latest/APIReference/API_StartBuild.html) page in the AWS CodeBuild API Reference. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Token, DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.codebuild_project import CodebuildProject +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CodebuildProject(self, "example", + artifacts=CodebuildProjectArtifacts( + type="NO_ARTIFACTS" + ), + environment=CodebuildProjectEnvironment( + compute_type="BUILD_GENERAL1_SMALL", + image="aws/codebuild/amazonlinux2-x86_64-standard:3.0", + type="LINUX_CONTAINER" + ), + name="example-project", + service_role=Token.as_string(aws_iam_role_example.arn), + source=CodebuildProjectSource( + buildspec="version: 0.2\nphases:\n build:\n commands:\n - echo 'Hello World'\n", + type="NO_SOURCE" + ) + ) + DataResource(self, "build_trigger", + input="trigger-build", + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_codebuild_start_build.example], + "events": [after_create] + } + ] + ) + ) +``` + +### Build with Environment Variables + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +The following arguments are required: + +* `project_name` - (Required) Name of the CodeBuild project to build. + +The following arguments are optional: + +* `source_version` - (Optional) Version of the build input to be built. For GitHub, this can be a commit SHA, branch name, or tag name. +* `timeout` - (Optional) Timeout in seconds for the build operation. Defaults to 1800 seconds (30 minutes). +* `environment_variables_override` - (Optional) Environment variables to override for this build. See [Environment Variables Override](#environment-variables-override) below. + +### Environment Variables Override + +* `name` - (Required) Environment variable name. +* `value` - (Required) Environment variable value. +* `type` - (Optional) Environment variable type. Valid values are `PLAINTEXT`, `PARAMETER_STORE`, or `SECRETS_MANAGER`. Defaults to `PLAINTEXT`. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/actions/ec2_stop_instance.html.markdown b/website/docs/cdktf/python/actions/ec2_stop_instance.html.markdown new file mode 100644 index 000000000000..225ec79feb9e --- /dev/null +++ b/website/docs/cdktf/python/actions/ec2_stop_instance.html.markdown @@ -0,0 +1,100 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_stop_instance" +description: |- + Stops an EC2 instance. +--- + + + +# Action: aws_ec2_stop_instance + +~> **Note:** `aws_ec2_stop_instance` is in alpha. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Stops an EC2 instance. This action will gracefully stop the instance and wait for it to reach the stopped state. + +For information about Amazon EC2, see the [Amazon EC2 User Guide](https://docs.aws.amazon.com/ec2/latest/userguide/). For specific information about stopping instances, see the [StopInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_StopInstances.html) page in the Amazon EC2 API Reference. + +~> **Note:** This action directly stops EC2 instances which will interrupt running workloads. Ensure proper coordination with your applications before using this action. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.instance import Instance +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Instance(self, "example", + ami=Token.as_string(amazon_linux.id), + instance_type="t3.micro", + tags={ + "Name": "example-instance" + } + ) +``` + +### Force Stop + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Maintenance Window + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Token, DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.instance import Instance +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + web_server = Instance(self, "web_server", + ami=Token.as_string(amazon_linux.id), + instance_type="t3.micro", + tags={ + "Name": "web-server" + } + ) + DataResource(self, "maintenance_trigger", + depends_on=[web_server], + input=maintenance_window.value, + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_ec2_stop_instance.maintenance], + "events": [before_create, before_update] + } + ] + ) + ) +``` + +## Argument Reference + +This action supports the following arguments: + +* `instance_id` - (Required) ID of the EC2 instance to stop. Must be a valid EC2 instance ID (e.g., i-1234567890abcdef0). +* `force` - (Optional) Forces the instance to stop. The instance does not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances. Default: `false`. +* `timeout` - (Optional) Timeout in seconds to wait for the instance to stop. Must be between 30 and 3600 seconds. Default: `600`. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/actions/lambda_invoke.html.markdown b/website/docs/cdktf/python/actions/lambda_invoke.html.markdown new file mode 100644 index 000000000000..da209f94ab3c --- /dev/null +++ b/website/docs/cdktf/python/actions/lambda_invoke.html.markdown @@ -0,0 +1,169 @@ +--- +subcategory: "Lambda" +layout: "aws" +page_title: "AWS: aws_lambda_invoke" +description: |- + Invokes an AWS Lambda function with the specified payload. +--- + + + +# Action: aws_lambda_invoke + +~> **Note:** `aws_lambda_invoke` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invokes an AWS Lambda function with the specified payload. This action allows for imperative invocation of Lambda functions with full control over invocation parameters. + +For information about AWS Lambda functions, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/). For specific information about invoking Lambda functions, see the [Invoke](https://docs.aws.amazon.com/lambda/latest/api/API_Invoke.html) page in the AWS Lambda API Reference. + +~> **Note:** Synchronous invocations will wait for the function to complete execution, while asynchronous invocations return immediately after the request is _accepted_. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, functionName, role): + super().__init__(scope, name) + LambdaFunction(self, "example", + function_name=function_name, + role=role + ) + terraform_data_example = DataResource(self, "example_1", + input="trigger-lambda", + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_lambda_invoke.example], + "events": [before_create, before_update] + } + ] + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + terraform_data_example.override_logical_id("example") +``` + +### Invoke with Function Version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Asynchronous Invocation + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Dry Run Validation + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### With Log Capture + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Mobile Application Context + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment functions: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataResource(self, "deploy_complete", + depends_on=[api], + input=deployment_id, + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_lambda_invoke.warmup], + "events": [before_create, before_update] + } + ] + ) + ) +``` + +### Environment-Specific Processing + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Complex Payload with Dynamic Content + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This action supports the following arguments: + +* `client_context` - (Optional) Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. This is only used for mobile applications and should contain information about the client application and device. +* `function_name` - (Required) Name, ARN, or partial ARN of the Lambda function to invoke. You can specify a function name (e.g., `my-function`), a qualified function name (e.g., `my-function:PROD`), or a partial ARN (e.g., `123456789012:function:my-function`). +* `invocation_type` - (Optional) Invocation type. Valid values are `RequestResponse` (default) for synchronous invocation that waits for the function to complete and returns the response, `Event` for asynchronous invocation that returns immediately after the request is accepted, and `DryRun` to validate parameters and verify permissions without actually executing the function. +* `log_type` - (Optional) Set to `Tail` to include the execution log in the response. Only applies to synchronous invocations (`RequestResponse` invocation type). Defaults to `None`. When set to `Tail`, the last 4 KB of the execution log is included in the response. +* `payload` - (Required) JSON payload to send to the Lambda function. This should be a valid JSON string that represents the event data for your function. The payload size limit is 6 MB for synchronous invocations and 256 KB for asynchronous invocations. +* `qualifier` - (Optional) Version or alias of the Lambda function to invoke. If not specified, the `$LATEST` version will be invoked. Can be a version number (e.g., `1`) or an alias (e.g., `PROD`). + + \ No newline at end of file diff --git a/website/docs/cdktf/python/actions/ses_send_email.html.markdown b/website/docs/cdktf/python/actions/ses_send_email.html.markdown new file mode 100644 index 000000000000..2e63e33c3c8f --- /dev/null +++ b/website/docs/cdktf/python/actions/ses_send_email.html.markdown @@ -0,0 +1,147 @@ +--- +subcategory: "SES (Simple Email)" +layout: "aws" +page_title: "AWS: aws_ses_send_email" +description: |- + Sends an email using Amazon SES. +--- + + + +# Action: aws_ses_send_email + +~> **Note:** `aws_ses_send_email` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Sends an email using Amazon SES. This action allows for imperative email sending with full control over recipients, content, and formatting. + +For information about Amazon SES, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/). For specific information about sending emails, see the [SendEmail](https://docs.aws.amazon.com/ses/latest/APIReference/API_SendEmail.html) page in the Amazon SES API Reference. + +~> **Note:** All email addresses used must be verified in Amazon SES or belong to a verified domain. Due to the difficulty in testing, your help is important in discovering and reporting issues. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.ses_email_identity import SesEmailIdentity +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SesEmailIdentity(self, "example", + email="sender@example.com" + ) + terraform_data_example = DataResource(self, "example_1", + input="send-notification", + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_ses_send_email.example], + "events": [before_create, before_update] + } + ] + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + terraform_data_example.override_logical_id("example") +``` + +### HTML Email with Multiple Recipients + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Deployment Notification + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataResource(self, "deployment", + depends_on=[app], + input=deployment_id.value, + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_ses_send_email.deploy_notification], + "events": [after_create] + } + ] + ) + ) +``` + +### Alert Email with Dynamic Content + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Multi-format Email + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Conditional Email Sending + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Batch Processing Notification + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This action supports the following arguments: + +* `bcc_addresses` - (Optional) List of email addresses for the BCC: field of the message. Recipients in this list will receive the email but their addresses will not be visible to other recipients. +* `cc_addresses` - (Optional) List of email addresses for the CC: field of the message. Recipients in this list will receive the email and their addresses will be visible to all recipients. +* `html_body` - (Optional) Message body in HTML format. Either `text_body` or `html_body` (or both) must be specified. HTML content allows for rich formatting including links, images, and styling. +* `reply_to_addresses` - (Optional) List of reply-to email addresses for the message. If the recipient replies to the message, each reply-to address will receive the reply. If not specified, replies will go to the source address. +* `return_path` - (Optional) Email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. This is useful for handling delivery failures and spam complaints. +* `source` - (Required) Email address that is sending the email. This address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. +* `subject` - (Required) Subject of the message: A short summary of the content, which will appear in the recipient's inbox. +* `text_body` - (Optional) Message body in text format. Either `text_body` or `html_body` (or both) must be specified. Text format ensures compatibility with all email clients. +* `to_addresses` - (Optional) List of email addresses for the To: field of the message. These are the primary recipients of the email. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/actions/sfn_start_execution.html.markdown b/website/docs/cdktf/python/actions/sfn_start_execution.html.markdown new file mode 100644 index 000000000000..ca0769bd545a --- /dev/null +++ b/website/docs/cdktf/python/actions/sfn_start_execution.html.markdown @@ -0,0 +1,187 @@ +--- +subcategory: "SFN (Step Functions)" +layout: "aws" +page_title: "AWS: aws_sfn_start_execution" +description: |- + Starts a Step Functions state machine execution with the specified input data. +--- + + + +# Action: aws_sfn_start_execution + +~> **Note:** `aws_sfn_start_execution` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts a Step Functions state machine execution with the specified input data. This action allows for imperative execution of state machines with full control over execution parameters. + +For information about AWS Step Functions, see the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/). For specific information about starting executions, see the [StartExecution](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) page in the AWS Step Functions API Reference. + +~> **Note:** For `STANDARD` workflows, executions with the same name and input are idempotent. For `EXPRESS` workflows, each execution is unique regardless of name and input. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Fn, Token, DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.sfn_state_machine import SfnStateMachine +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SfnStateMachine(self, "example", + definition=Token.as_string( + Fn.jsonencode({ + "Comment": "A simple minimal example", + "StartAt": "Hello", + "States": { + "Hello": { + "End": True, + "Result": "Hello World!", + "Type": "Pass" + } + } + })), + name="example-state-machine", + role_arn=sfn.arn + ) + terraform_data_example = DataResource(self, "example_1", + input="trigger-execution", + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_sfn_start_execution.example], + "events": [before_create, before_update] + } + ] + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + terraform_data_example.override_logical_id("example") +``` + +### Named Execution + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Execution with Version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Execution with Alias + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.sfn_alias import SfnAlias +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, stateMachineVersionArn, weight): + super().__init__(scope, name) + SfnAlias(self, "prod", + name="PROD", + routing_configuration=[SfnAliasRoutingConfiguration( + state_machine_version_weight=[{ + "state_machine_version_arn": example.arn, + "weight": 100 + } + ], + state_machine_version_arn=state_machine_version_arn, + weight=weight + ) + ], + state_machine_arn=example.arn + ) +``` + +### X-Ray Tracing + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment workflows: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataResource(self, "deploy_complete", + depends_on=[processors], + input=deployment_id, + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_sfn_start_execution.post_deploy], + "events": [before_create, before_update] + } + ] + ) + ) +``` + +### Environment-Specific Processing + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Complex Workflow Orchestration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This action supports the following arguments: + +* `input` - (Optional) JSON input data for the execution. Must be valid JSON. Defaults to `{}` if not specified. The input size limit is 256 KB. +* `name` - (Optional) Name of the execution. Must be unique within the account/region/state machine for 90 days. If not provided, Step Functions automatically generates a UUID. Names must not contain whitespace, brackets, wildcards, or special characters. +* `state_machine_arn` - (Required) ARN of the state machine to execute. Can be an unqualified ARN, version-qualified ARN (e.g., `arn:aws:states:region:account:stateMachine:name:version`), or alias-qualified ARN (e.g., `arn:aws:states:region:account:stateMachine:name:alias`). +* `trace_header` - (Optional) AWS X-Ray trace header for distributed tracing. Used to correlate execution traces across services. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/actions/sns_publish.html.markdown b/website/docs/cdktf/python/actions/sns_publish.html.markdown new file mode 100644 index 000000000000..8a70c9e44eeb --- /dev/null +++ b/website/docs/cdktf/python/actions/sns_publish.html.markdown @@ -0,0 +1,127 @@ +--- +subcategory: "SNS (Simple Notification)" +layout: "aws" +page_title: "AWS: aws_sns_publish" +description: |- + Publishes a message to an Amazon SNS topic. +--- + + + +# Action: aws_sns_publish + +~> **Note:** `aws_sns_publish` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Publishes a message to an Amazon SNS topic. This action allows for imperative message publishing with full control over message attributes and structure. + +For information about Amazon SNS, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/). For specific information about publishing messages, see the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) page in the Amazon SNS API Reference. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.sns_topic import SnsTopic +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SnsTopic(self, "example", + name="example-topic" + ) + terraform_data_example = DataResource(self, "example_1", + input="trigger-message", + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_sns_publish.example], + "events": [before_create, before_update] + } + ] + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + terraform_data_example.override_logical_id("example") +``` + +### Message with Subject + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### JSON Message Structure + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Message with Attributes + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Deployment Notification + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import DataResource, TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataResource(self, "deploy_trigger", + depends_on=[app, main], + input=deployment_id.value, + lifecycle=TerraformResourceLifecycle( + action_trigger=[{ + "actions": [aws_sns_publish.deploy_complete], + "events": [before_create, before_update] + } + ] + ) + ) +``` + +## Argument Reference + +This action supports the following arguments: + +* `message` - (Required) Message to publish. For JSON message structure, this should be a JSON object with protocol-specific messages. Maximum size is 256 KB. +* `message_attributes` - (Optional) Message attributes to include with the message. Each attribute consists of a name, data type, and value. Up to 10 attributes are allowed. [See below.](#message-attributes) +* `message_structure` - (Optional) Set to `json` if you want to send different messages for each protocol. If not specified, the message will be sent as-is to all protocols. +* `subject` - (Optional) Optional subject for the message. Only used for email and email-json protocols. Maximum length is 100 characters. +* `topic_arn` - (Required) ARN of the SNS topic to publish the message to. + +### Message Attributes + +The `message_attributes` block supports: + +* `data_type` - (Required) Data type of the message attribute. Valid values are `String`, `Number`, and `Binary`. +* `map_block_key` - (Required) Name of the message attribute (used as map key). Must be unique within the message. +* `string_value` - (Required) Value of the message attribute. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/acm_certificate.html.markdown b/website/docs/cdktf/python/d/acm_certificate.html.markdown index 4d97d9a3e29b..d245f61563aa 100644 --- a/website/docs/cdktf/python/d/acm_certificate.html.markdown +++ b/website/docs/cdktf/python/d/acm_certificate.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Optional) Domain of the certificate to look up. If set and no certificate is found with this name, an error will be returned. * `key_types` - (Optional) List of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. See the [ACM API Reference](https://docs.aws.amazon.com/acm/latest/APIReference/API_CertificateDetail.html#ACM-Type-CertificateDetail-KeyAlgorithm) for supported key algorithms. * `statuses` - (Optional) List of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`, @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `certificate_chain` - Certificates forming the requested ACM-issued certificate's chain of trust. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. * `tags` - Mapping of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/acmpca_certificate.html.markdown b/website/docs/cdktf/python/d/acmpca_certificate.html.markdown index 69f186493741..28c58ce9d609 100644 --- a/website/docs/cdktf/python/d/acmpca_certificate.html.markdown +++ b/website/docs/cdktf/python/d/acmpca_certificate.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the certificate issued by the private certificate authority. * `certificate_authority_arn` - (Required) ARN of the certificate authority. @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `certificate` - PEM-encoded certificate value. * `certificate_chain` - PEM-encoded certificate chain that includes any intermediate certificates and chains up to root CA. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/acmpca_certificate_authority.html.markdown b/website/docs/cdktf/python/d/acmpca_certificate_authority.html.markdown index 90c4e80173cf..52ce1566c0d3 100644 --- a/website/docs/cdktf/python/d/acmpca_certificate_authority.html.markdown +++ b/website/docs/cdktf/python/d/acmpca_certificate_authority.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the certificate authority. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of user-defined tags that are attached to the certificate authority. * `type` - Type of the certificate authority. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ami.html.markdown b/website/docs/cdktf/python/d/ami.html.markdown index d49748ec9386..3105d186b747 100644 --- a/website/docs/cdktf/python/d/ami.html.markdown +++ b/website/docs/cdktf/python/d/ami.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owners` - (Optional) List of AMI owners to limit search. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g., `amazon`, `aws-marketplace`, `microsoft`). * `most_recent` - (Optional) If more than one result is returned, use the most recent AMI. @@ -59,6 +60,10 @@ recent AMI. * `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-images in the AWS CLI reference][1]. +* `allow_unsafe_filter` - (Optional) If true, allow unsafe filter values. With unsafe +filters and `most_recent` set to `true`, a third party may introduce a new image which +will be returned by this data source. Consider filtering by owner or image ID rather +than setting this argument. * `name_regex` - (Optional) Regex string to apply to the AMI list returned by AWS. This allows more advanced filtering not supported from the AWS API. This filtering is done locally on what AWS returns, and could have a performance @@ -146,4 +151,4 @@ interpolation. [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ami_ids.html.markdown b/website/docs/cdktf/python/d/ami_ids.html.markdown index 3cdf905490ad..1153b8a7b4db 100644 --- a/website/docs/cdktf/python/d/ami_ids.html.markdown +++ b/website/docs/cdktf/python/d/ami_ids.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owners` - (Required) List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g., `amazon`, `aws-marketplace`, `microsoft`). * `executable_users` - (Optional) Limit search to users with *explicit* launch permission on the image. Valid items are the numeric account ID or `self`. @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_api_key.html.markdown b/website/docs/cdktf/python/d/api_gateway_api_key.html.markdown index 1ec46d6e7993..4595cf61d7ab 100644 --- a/website/docs/cdktf/python/d/api_gateway_api_key.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_api_key.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) ID of the API Key to look up. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether the API Key is enabled. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_api_keys.html.markdown b/website/docs/cdktf/python/d/api_gateway_api_keys.html.markdown index d4cca4112f5a..0b216a2bebd3 100644 --- a/website/docs/cdktf/python/d/api_gateway_api_keys.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_api_keys.html.markdown @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customer_id` - (Optional) Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace. * `include_values` - (Optional) Set this value to `true` if you wish the result contains the key value. Defaults to `false`. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether the API Key is enabled. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_authorizer.html.markdown b/website/docs/cdktf/python/d/api_gateway_authorizer.html.markdown index 9533c9036ce6..a89c56f30822 100644 --- a/website/docs/cdktf/python/d/api_gateway_authorizer.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_authorizer.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizer_id` - (Required) Authorizer identifier. * `rest_api_id` - (Required) ID of the associated REST API. @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_arns` - List of the Amazon Cognito user pool ARNs. * `type` - Type of the authorizer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_authorizers.html.markdown b/website/docs/cdktf/python/d/api_gateway_authorizers.html.markdown index fe40d4bbe9ec..2a99cb2ccf2c 100644 --- a/website/docs/cdktf/python/d/api_gateway_authorizers.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_authorizers.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the associated REST API. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of Authorizer identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_domain_name.html.markdown b/website/docs/cdktf/python/d/api_gateway_domain_name.html.markdown index 462169536045..7270023a18ba 100644 --- a/website/docs/cdktf/python/d/api_gateway_domain_name.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_domain_name.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) Fully-qualified domain name to look up. If no domain name is found, an error will be returned. * `domain_name_id` - (Optional) The identifier for the domain name resource. Supported only for private custom domain names. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `security_policy` - Security policy for the domain name. * `tags` - Key-value map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_export.html.markdown b/website/docs/cdktf/python/d/api_gateway_export.html.markdown index 57bdda1f2a87..000cf9af3577 100644 --- a/website/docs/cdktf/python/d/api_gateway_export.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_export.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `export_type` - (Required) Type of export. Acceptable values are `oas30` for OpenAPI 3.0.x and `swagger` for Swagger/OpenAPI 2.0. * `rest_api_id` - (Required) Identifier of the associated REST API. * `stage_name` - (Required) Name of the Stage that will be exported. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `content_type` - Content-type header value in the HTTP response. * `content_disposition` - Content-disposition header value in the HTTP response. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_resource.html.markdown b/website/docs/cdktf/python/d/api_gateway_resource.html.markdown index d5745edec126..dabd42012a32 100644 --- a/website/docs/cdktf/python/d/api_gateway_resource.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_resource.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) REST API id that owns the resource. If no REST API is found, an error will be returned. * `path` - (Required) Full path of the resource. If no path is found, an error will be returned. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `parent_id` - Set to the ID of the parent Resource. * `path_part` - Set to the path relative to the parent Resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_rest_api.html.markdown b/website/docs/cdktf/python/d/api_gateway_rest_api.html.markdown index 32867fe87df0..bfc51f13e5b7 100644 --- a/website/docs/cdktf/python/d/api_gateway_rest_api.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_rest_api.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the REST API to look up. If no REST API is found with this name, an error will be returned. If multiple REST APIs are found with this name, an error will be returned. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `root_resource_id` - Set to the ID of the API Gateway Resource on the found REST API where the route matches '/'. * `tags` - Key-value map of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_sdk.html.markdown b/website/docs/cdktf/python/d/api_gateway_sdk.html.markdown index 6b481e133338..6cf8522f2946 100644 --- a/website/docs/cdktf/python/d/api_gateway_sdk.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_sdk.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) Identifier of the associated REST API. * `stage_name` - (Required) Name of the Stage that will be exported. * `sdk_type` - (Required) Language for the generated SDK. Currently `java`, `javascript`, `android`, `objectivec` (for iOS), `swift` (for iOS), and `ruby` are supported. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `content_type` - Content-type header value in the HTTP response. * `content_disposition` - Content-disposition header value in the HTTP response. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/api_gateway_vpc_link.html.markdown b/website/docs/cdktf/python/d/api_gateway_vpc_link.html.markdown index 91cd9156813d..ef64927717fd 100644 --- a/website/docs/cdktf/python/d/api_gateway_vpc_link.html.markdown +++ b/website/docs/cdktf/python/d/api_gateway_vpc_link.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the API Gateway VPC Link to look up. If no API Gateway VPC Link is found with this name, an error will be returned. If multiple API Gateway VPC Links are found with this name, an error will be returned. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `target_arns` - List of network load balancer arns in the VPC targeted by the VPC link. Currently AWS only supports 1 target. * `tags` - Key-value map of resource tags - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown b/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown index 4fec2be37fc9..0a5da12720f3 100644 --- a/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown +++ b/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. ## Attribute Reference @@ -67,4 +68,4 @@ The `cors_configuration` object supports the following: * `expose_headers` - Set of exposed HTTP headers. * `max_age` - Number of seconds that the browser should cache preflight request results. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/apigatewayv2_apis.html.markdown b/website/docs/cdktf/python/d/apigatewayv2_apis.html.markdown index 6ffa2696d63a..ab49c87638d0 100644 --- a/website/docs/cdktf/python/d/apigatewayv2_apis.html.markdown +++ b/website/docs/cdktf/python/d/apigatewayv2_apis.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) API name. * `protocol_type` - (Optional) API protocol. * `tags` - (Optional) Map of tags, each pair of which must exactly match @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - Set of API identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/apigatewayv2_export.html.markdown b/website/docs/cdktf/python/d/apigatewayv2_export.html.markdown index eb4709cc818b..0c25ff0dc8ef 100644 --- a/website/docs/cdktf/python/d/apigatewayv2_export.html.markdown +++ b/website/docs/cdktf/python/d/apigatewayv2_export.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `specification` - (Required) Version of the API specification to use. `OAS30`, for OpenAPI 3.0, is the only supported value. * `output_type` - (Required) Output type of the exported definition file. Valid values are `JSON` and `YAML`. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - API identifier. * `body` - ID of the API. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/apigatewayv2_vpc_link.html.markdown b/website/docs/cdktf/python/d/apigatewayv2_vpc_link.html.markdown index a101b129378e..80609c057922 100644 --- a/website/docs/cdktf/python/d/apigatewayv2_vpc_link.html.markdown +++ b/website/docs/cdktf/python/d/apigatewayv2_vpc_link.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_link_id` - (Required) VPC Link ID ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - List of subnets attached to the VPC Link. * `tags` - VPC Link Tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appconfig_application.html.markdown b/website/docs/cdktf/python/d/appconfig_application.html.markdown new file mode 100644 index 000000000000..aafaf8a0fc70 --- /dev/null +++ b/website/docs/cdktf/python/d/appconfig_application.html.markdown @@ -0,0 +1,51 @@ +--- +subcategory: "AppConfig" +layout: "aws" +page_title: "AWS: aws_appconfig_application" +description: |- + Retrieves an AWS AppConfig Application by name. +--- + + + +# Data Source: aws_appconfig_application + +Provides details about an AWS AppConfig Application. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsAppconfigApplication +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsAppconfigApplication(self, "example", + name="my-appconfig-application" + ) +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `id` - (Optional) ID of the Application. Either `id` or `name` must be specified. +* `name` - (Optional) AWS AppConfig Application name. Either `name` or `id` must be specified. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Application. +* `description` - Description of the Application. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appconfig_configuration_profile.html.markdown b/website/docs/cdktf/python/d/appconfig_configuration_profile.html.markdown index 5cf70fdd6dca..4e74020caa67 100644 --- a/website/docs/cdktf/python/d/appconfig_configuration_profile.html.markdown +++ b/website/docs/cdktf/python/d/appconfig_configuration_profile.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ID of the AppConfig application to which this configuration profile belongs. * `configuration_profile_id` - (Required) ID of the Configuration Profile. @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `content` - Either the JSON Schema content or the ARN of an AWS Lambda function. * `type` - Type of validator. Valid values: JSON_SCHEMA and LAMBDA. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appconfig_configuration_profiles.html.markdown b/website/docs/cdktf/python/d/appconfig_configuration_profiles.html.markdown index 30fbdc41107a..29d0fc755814 100644 --- a/website/docs/cdktf/python/d/appconfig_configuration_profiles.html.markdown +++ b/website/docs/cdktf/python/d/appconfig_configuration_profiles.html.markdown @@ -51,8 +51,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ID of the AppConfig Application. ## Attribute Reference @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `configuration_profile_ids` - Set of Configuration Profile IDs associated with the AppConfig Application. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appconfig_environment.html.markdown b/website/docs/cdktf/python/d/appconfig_environment.html.markdown index 62eaa46a3d84..a766e5cd264b 100644 --- a/website/docs/cdktf/python/d/appconfig_environment.html.markdown +++ b/website/docs/cdktf/python/d/appconfig_environment.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ID of the AppConfig Application to which this Environment belongs. * `environment_id` - (Required) ID of the AppConfig Environment. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a or `ROLLED_BACK`. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appconfig_environments.html.markdown b/website/docs/cdktf/python/d/appconfig_environments.html.markdown index 152b434181f5..9490f3f19578 100644 --- a/website/docs/cdktf/python/d/appconfig_environments.html.markdown +++ b/website/docs/cdktf/python/d/appconfig_environments.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ID of the AppConfig Application. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `environment_ids` - Set of Environment IDs associated with this AppConfig Application. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appintegrations_event_integration.html.markdown b/website/docs/cdktf/python/d/appintegrations_event_integration.html.markdown index de794bb54bc4..dee67f94d5ef 100644 --- a/website/docs/cdktf/python/d/appintegrations_event_integration.html.markdown +++ b/website/docs/cdktf/python/d/appintegrations_event_integration.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The AppIntegrations Event Integration name. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `source` - The source of the events. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_gateway_route.html.markdown b/website/docs/cdktf/python/d/appmesh_gateway_route.html.markdown index c139ab4c6185..a3259f523f7c 100644 --- a/website/docs/cdktf/python/d/appmesh_gateway_route.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_gateway_route.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the gateway route. * `mesh_name` - (Required) Name of the service mesh in which the virtual gateway exists. * `virtual_gateway_name` - (Required) Name of the virtual gateway in which the route exists. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Gateway route specification. See the [`aws_appmesh_gateway_route`](/docs/providers/aws/r/appmesh_gateway_route.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_mesh.html.markdown b/website/docs/cdktf/python/d/appmesh_mesh.html.markdown index 8e352d30ac90..33c96b56a431 100644 --- a/website/docs/cdktf/python/d/appmesh_mesh.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_mesh.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the service mesh. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Service mesh specification. See the [`aws_appmesh_mesh`](/docs/providers/aws/r/appmesh_mesh.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_route.html.markdown b/website/docs/cdktf/python/d/appmesh_route.html.markdown index b0452ff35a1a..b5a112a7f846 100644 --- a/website/docs/cdktf/python/d/appmesh_route.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_route.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the route. * `mesh_name` - (Required) Name of the service mesh in which the virtual router exists. * `virtual_router_name` - (Required) Name of the virtual router in which the route exists. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Route specification. See the [`aws_appmesh_route`](/docs/providers/aws/r/appmesh_route.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_virtual_gateway.html.markdown b/website/docs/cdktf/python/d/appmesh_virtual_gateway.html.markdown index 6b44d2fcb718..1c8055b2b01a 100644 --- a/website/docs/cdktf/python/d/appmesh_virtual_gateway.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_virtual_gateway.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual gateway. * `mesh_name` - (Required) Name of the service mesh in which the virtual gateway exists. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. @@ -74,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual gateway specification. See the [`aws_appmesh_virtual_gateway`](/docs/providers/aws/r/appmesh_virtual_gateway.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_virtual_node.html.markdown b/website/docs/cdktf/python/d/appmesh_virtual_node.html.markdown index 3177f5d002a8..6af4be539112 100644 --- a/website/docs/cdktf/python/d/appmesh_virtual_node.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_virtual_node.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual node. * `mesh_name` - (Required) Name of the service mesh in which the virtual node exists. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual node specification. See the [`aws_appmesh_virtual_node`](/docs/providers/aws/r/appmesh_virtual_node.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_virtual_router.html.markdown b/website/docs/cdktf/python/d/appmesh_virtual_router.html.markdown index 0223585bddfa..7e9b2dbc6dc3 100644 --- a/website/docs/cdktf/python/d/appmesh_virtual_router.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_virtual_router.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual router. * `mesh_name` - (Required) Name of the mesh in which the virtual router exists @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual routers specification. See the [`aws_appmesh_virtual_router`](/docs/providers/aws/r/appmesh_virtual_router.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appmesh_virtual_service.html.markdown b/website/docs/cdktf/python/d/appmesh_virtual_service.html.markdown index 1ed72698a796..7639e496aa75 100644 --- a/website/docs/cdktf/python/d/appmesh_virtual_service.html.markdown +++ b/website/docs/cdktf/python/d/appmesh_virtual_service.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual service. * `mesh_name` - (Required) Name of the service mesh in which the virtual service exists. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual service specification. See the [`aws_appmesh_virtual_service`](/docs/providers/aws/r/appmesh_virtual_service.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/apprunner_hosted_zone_id.html.markdown b/website/docs/cdktf/python/d/apprunner_hosted_zone_id.html.markdown index 76c1881bd852..eb4022e7f2a1 100644 --- a/website/docs/cdktf/python/d/apprunner_hosted_zone_id.html.markdown +++ b/website/docs/cdktf/python/d/apprunner_hosted_zone_id.html.markdown @@ -45,13 +45,12 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS App Runner service HostedZoneId is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS App Runner service HostedZoneId is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS App Runner service HostedZoneId in the selected region. +* `id` - ID of the AWS App Runner service HostedZoneId in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appstream_image.html.markdown b/website/docs/cdktf/python/d/appstream_image.html.markdown index 0f3471dcca6d..86a798c37d8c 100644 --- a/website/docs/cdktf/python/d/appstream_image.html.markdown +++ b/website/docs/cdktf/python/d/appstream_image.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - Name of the image being searched for. Cannot be used with name_regex or arn. * `name_regex` - Regular expression name of the image being searched for. Cannot be used with arn or name. * `arn` - Arn of the image being searched for. Cannot be used with name_regex or name. @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Current state of image. Image starts in PENDING state which changes to AVAILABLE if creation passes and FAILED if it fails. Values will be from: PENDING | AVAILABLE | FAILED | COPYING | DELETING | CREATING | IMPORTING. * `visibility` - Visibility type enum indicating whether the image is PUBLIC, PRIVATE, or SHARED. Valid values include: PUBLIC | PRIVATE | SHARED. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/arn.html.markdown b/website/docs/cdktf/python/d/arn.html.markdown index de267f836b83..921ec6f0f1c1 100644 --- a/website/docs/cdktf/python/d/arn.html.markdown +++ b/website/docs/cdktf/python/d/arn.html.markdown @@ -42,15 +42,11 @@ This data source supports the following arguments: This data source exports the following attributes in addition to the arguments above: * `partition` - Partition that the resource is in. - * `service` - The [service namespace](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) that identifies the AWS product. - * `region` - Region the resource resides in. -Note that the ARNs for some resources do not require a region, so this component might be omitted. - +Note that the ARNs for some resources do not include a Region, so this component might be omitted. * `account` - The [ID](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html) of the AWS account that owns the resource, without the hyphens. - * `resource` - Content of this part of the ARN varies by service. It often includes an indicator of the type of resource—for example, an IAM user or Amazon RDS database —followed by a slash (/) or a colon (:), followed by the resource name itself. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/athena_named_query.html.markdown b/website/docs/cdktf/python/d/athena_named_query.html.markdown index c5e634651b45..ab074b0a1f91 100644 --- a/website/docs/cdktf/python/d/athena_named_query.html.markdown +++ b/website/docs/cdktf/python/d/athena_named_query.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The plain language name for the query. Maximum length of 128. * `workgroup` - (Optional) The workgroup to which the query belongs. Defaults to `primary`. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - The unique ID of the query. * `query` - Text of the query itself. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/auditmanager_control.html.markdown b/website/docs/cdktf/python/d/auditmanager_control.html.markdown index b8c65e837ace..0ed7a0cff834 100644 --- a/website/docs/cdktf/python/d/auditmanager_control.html.markdown +++ b/website/docs/cdktf/python/d/auditmanager_control.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the control. * `type` - (Required) Type of control. Valid values are `Custom` and `Standard`. @@ -91,4 +92,4 @@ This data source exports the following attributes in addition to the arguments a See the [`aws_auditmanager_control` resource](/docs/providers/aws/r/auditmanager_control.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/auditmanager_framework.html.markdown b/website/docs/cdktf/python/d/auditmanager_framework.html.markdown index 1cb240f14f0e..4aeb002f4e03 100644 --- a/website/docs/cdktf/python/d/auditmanager_framework.html.markdown +++ b/website/docs/cdktf/python/d/auditmanager_framework.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the framework. * `type` - (Required) Type of framework. Valid values are `Custom` and `Standard`. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a See the [`aws_auditmanager_framework` resource](/docs/providers/aws/r/auditmanager_framework.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/autoscaling_group.html.markdown b/website/docs/cdktf/python/d/autoscaling_group.html.markdown index 019116b19f84..ef3ceceb5edf 100644 --- a/website/docs/cdktf/python/d/autoscaling_group.html.markdown +++ b/website/docs/cdktf/python/d/autoscaling_group.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - Specify the exact name of the desired autoscaling group. ## Attribute Reference @@ -149,4 +150,4 @@ This data source exports the following attributes in addition to the arguments a ~> **NOTE:** Some values are not always set and may not be available for interpolation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/autoscaling_groups.html.markdown b/website/docs/cdktf/python/d/autoscaling_groups.html.markdown index fbfaae0dd0ea..1cb28fb1b968 100644 --- a/website/docs/cdktf/python/d/autoscaling_groups.html.markdown +++ b/website/docs/cdktf/python/d/autoscaling_groups.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `names` - (Optional) List of autoscaling group names * `filter` - (Optional) Filter used to scope the list e.g., by tags. See [related docs](http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_Filter.html). * `name` - (Required) Name of the DescribeAutoScalingGroup filter. The recommended values are: `tag-key`, `tag-value`, and `tag:` @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `names` - List of the Autoscaling Groups in the current region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/availability_zone.html.markdown b/website/docs/cdktf/python/d/availability_zone.html.markdown index 08c475080149..40d3ac6256d8 100644 --- a/website/docs/cdktf/python/d/availability_zone.html.markdown +++ b/website/docs/cdktf/python/d/availability_zone.html.markdown @@ -11,10 +11,10 @@ description: |- # Data Source: aws_availability_zone `aws_availability_zone` provides details about a specific availability zone (AZ) -in the current region. +in the current Region. This can be used both to validate an availability zone given in a variable -and to split the AZ name into its component parts of an AWS region and an +and to split the AZ name into its component parts of an AWS Region and an AZ identifier letter. The latter may be useful e.g., for implementing a consistent subnet numbering scheme across several regions by mapping both the region and the subnet letter to network numbers. @@ -89,6 +89,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `all_availability_zones` - (Optional) Set to `true` to include all Availability Zones and Local Zones regardless of your opt in status. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. * `name` - (Optional) Full name of the availability zone to select. @@ -110,7 +111,8 @@ The `filter` configuration block supports the following arguments: This data source exports the following attributes in addition to the arguments above: -* `group_name` - For Availability Zones, this is the same value as the Region name. For Local Zones, the name of the associated group, for example `us-west-2-lax-1`. +* `group_long_name` - The long name of the Availability Zone group, Local Zone group, or Wavelength Zone group. +* `group_name` - The name of the zone group. For example: `us-east-1-zg-1`, `us-west-2-lax-1`, or `us-east-1-wl1-bos-wlz-1`. * `name_suffix` - Part of the AZ name that appears after the region name, uniquely identifying the AZ within its region. For Availability Zones this is usually a single letter, for example `a` for the `us-west-2a` zone. For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz-1` for the `us-west-2-wl1-sfo-wlz-1` zone. @@ -118,7 +120,6 @@ For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz * `opt_in_status` - For Availability Zones, this always has the value of `opt-in-not-required`. For Local Zones, this is the opt in status. The possible values are `opted-in` and `not-opted-in`. * `parent_zone_id` - ID of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls. * `parent_zone_name` - Name of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls. -* `region` - Region where the selected availability zone resides. This is always the region selected on the provider, since this data source searches only within that region. * `zone_type` - Type of zone. Values are `availability-zone`, `local-zone`, and `wavelength-zone`. ## Timeouts @@ -127,4 +128,4 @@ For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/availability_zones.html.markdown b/website/docs/cdktf/python/d/availability_zones.html.markdown index 3f2a04d100b8..2fefe275d7db 100644 --- a/website/docs/cdktf/python/d/availability_zones.html.markdown +++ b/website/docs/cdktf/python/d/availability_zones.html.markdown @@ -102,6 +102,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `all_availability_zones` - (Optional) Set to `true` to include all Availability Zones and Local Zones regardless of your opt in status. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. * `exclude_names` - (Optional) List of Availability Zone names to exclude. @@ -135,4 +136,4 @@ Note that the indexes of Availability Zone names and IDs correspond. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/backup_framework.html.markdown b/website/docs/cdktf/python/d/backup_framework.html.markdown index 6d0f6da4b82c..1e46f6a316f0 100644 --- a/website/docs/cdktf/python/d/backup_framework.html.markdown +++ b/website/docs/cdktf/python/d/backup_framework.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Backup framework name. ## Attribute Reference @@ -73,4 +74,4 @@ This data source exports the following attributes in addition to the arguments a * `compliance_resource_types` - Describes whether the control scope includes one or more types of resources, such as EFS or RDS. * `tags` - Tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/backup_plan.html.markdown b/website/docs/cdktf/python/d/backup_plan.html.markdown index c8ba0f45f644..963262a3089f 100644 --- a/website/docs/cdktf/python/d/backup_plan.html.markdown +++ b/website/docs/cdktf/python/d/backup_plan.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `plan_id` - (Required) Backup plan ID. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Metadata that you can assign to help organize the plans you create. * `version` - Unique, randomly generated, Unicode, UTF-8 encoded string that serves as the version ID of the backup plan. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/backup_report_plan.html.markdown b/website/docs/cdktf/python/d/backup_report_plan.html.markdown index f94ad8d0fafe..634c0da0724a 100644 --- a/website/docs/cdktf/python/d/backup_report_plan.html.markdown +++ b/website/docs/cdktf/python/d/backup_report_plan.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Backup report plan name. ## Attribute Reference @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `regions` - (Optional) Specifies the list of regions a report covers. * `report_template` - Identifies the report template for the report. Reports are built using a report template. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/backup_selection.html.markdown b/website/docs/cdktf/python/d/backup_selection.html.markdown index e8dc29df77ba..e8398e1d0ffd 100644 --- a/website/docs/cdktf/python/d/backup_selection.html.markdown +++ b/website/docs/cdktf/python/d/backup_selection.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `plan_id` - (Required) Backup plan ID associated with the selection of resources. * `selection_id` - (Required) Backup selection ID. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `iam_role_arn` - ARN of the IAM role that AWS Backup uses to authenticate when restoring and backing up the target resource. See the [AWS Backup Developer Guide](https://docs.aws.amazon.com/aws-backup/latest/devguide/access-control.html#managed-policies) for additional information about using AWS managed policies or creating custom policies attached to the IAM role. * `resources` - An array of strings that either contain Amazon Resource Names (ARNs) or match patterns of resources to assign to a backup plan.. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/backup_vault.html.markdown b/website/docs/cdktf/python/d/backup_vault.html.markdown index 391b779cde1d..632a621e68a6 100644 --- a/website/docs/cdktf/python/d/backup_vault.html.markdown +++ b/website/docs/cdktf/python/d/backup_vault.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the backup vault. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `recovery_points` - Number of recovery points that are stored in a backup vault. * `tags` - Metadata that you can assign to help organize the resources that you create. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/batch_compute_environment.html.markdown b/website/docs/cdktf/python/d/batch_compute_environment.html.markdown index b29fc6e59a30..10c3b7992117 100644 --- a/website/docs/cdktf/python/d/batch_compute_environment.html.markdown +++ b/website/docs/cdktf/python/d/batch_compute_environment.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Batch Compute Environment ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `update_policy` - Specifies the infrastructure update policy for the compute environment. * `tags` - Key-value map of resource tags - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/batch_job_definition.html.markdown b/website/docs/cdktf/python/d/batch_job_definition.html.markdown index 29baa92d89a4..9ca6f2ae7298 100644 --- a/website/docs/cdktf/python/d/batch_job_definition.html.markdown +++ b/website/docs/cdktf/python/d/batch_job_definition.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - ARN of the Job Definition. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. * `revision` - The revision of the job definition. * `name` - The name of the job definition to register. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). @@ -299,4 +300,4 @@ This data source exports the following attributes in addition to the arguments a * `attempt_duration_seconds` - The job timeout time (in seconds) that's measured from the job attempt's startedAt timestamp. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/batch_job_queue.html.markdown b/website/docs/cdktf/python/d/batch_job_queue.html.markdown index 046bd6a12a10..11738df69f56 100644 --- a/website/docs/cdktf/python/d/batch_job_queue.html.markdown +++ b/website/docs/cdktf/python/d/batch_job_queue.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the job queue. ## Attribute Reference @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `job_state_time_limit_action.#.reason` - The reason to log for the action being taken. * `job_state_time_limit_action.#.state` - The state of the job needed to trigger the action. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/batch_scheduling_policy.html.markdown b/website/docs/cdktf/python/d/batch_scheduling_policy.html.markdown index c1d02ec8b0ca..30df78d5dc1a 100644 --- a/website/docs/cdktf/python/d/batch_scheduling_policy.html.markdown +++ b/website/docs/cdktf/python/d/batch_scheduling_policy.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the scheduling policy. ## Attribute Reference @@ -56,4 +57,4 @@ A `share_distribution` block supports the following arguments: * `share_identifier` - Fair share identifier or fair share identifier prefix. For more information, see [ShareAttributes](https://docs.aws.amazon.com/batch/latest/APIReference/API_ShareAttributes.html). * `weight_factor` - Weight factor for the fair share identifier. For more information, see [ShareAttributes](https://docs.aws.amazon.com/batch/latest/APIReference/API_ShareAttributes.html). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown b/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown index c5bbfaa3db2c..e412eea68fe2 100644 --- a/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown @@ -35,7 +35,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `model_id` – (Required) Name or ARN of the custom model. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `model_id` - (Required) Name or ARN of the custom model. ## Attribute Reference @@ -62,5 +63,5 @@ This data source exports the following attributes in addition to the arguments a * `s3_uri` - The S3 URI where the validation data is stored.. * `validation_metrics` - The loss metric for each validator that you provided. * `validation_loss` - The validation loss associated with the validator. - - \ No newline at end of file + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown b/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown index ebdfd103305d..331cb2e16e44 100644 --- a/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown @@ -31,7 +31,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -42,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `model_arn` - The ARN of the custom model. * `model_name` - The name of the custom model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown b/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown index c0ee5b339158..72abbfc425f0 100644 --- a/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown @@ -42,7 +42,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `model_id` – (Required) Model identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `model_id` - (Required) Model identifier. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_name` - Model provider name. * `response_streaming_supported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown b/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown index fb06370d932e..d42b01054cfc 100644 --- a/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `by_customization_type` - (Optional) Customization type to filter on. Valid values are `FINE_TUNING`. * `by_inference_type` - (Optional) Inference type to filter on. Valid values are `ON_DEMAND` and `PROVISIONED`. * `by_output_modality` - (Optional) Output modality to filter on. Valid values are `TEXT`, `IMAGE`, and `EMBEDDING`. @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_name` - Model provider name. * `response_streaming_supported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_inference_profile.html.markdown b/website/docs/cdktf/python/d/bedrock_inference_profile.html.markdown index 0c1de139dfd5..d5c062821443 100644 --- a/website/docs/cdktf/python/d/bedrock_inference_profile.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_inference_profile.html.markdown @@ -44,7 +44,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -- `inference_profile_id` – (Required) Inference Profile identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `inference_profile_id` - (Required) Inference Profile identifier. ## Attribute Reference @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a - `model_arn` - The Amazon Resource Name (ARN) of the model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_inference_profiles.html.markdown b/website/docs/cdktf/python/d/bedrock_inference_profiles.html.markdown index 2db16342b2b3..8786db121766 100644 --- a/website/docs/cdktf/python/d/bedrock_inference_profiles.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_inference_profiles.html.markdown @@ -10,7 +10,7 @@ description: |- # Data Source: aws_bedrock_inference_profiles -Terraform data source for managing AWS Bedrock AWS Bedrock Inference Profiles. +Terraform data source for managing AWS Bedrock Inference Profiles. ## Example Usage @@ -31,9 +31,31 @@ class MyConvertedCode(TerraformStack): DataAwsBedrockInferenceProfiles(self, "test") ``` +### Filter by Type + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_bedrock_inference_profiles import DataAwsBedrockInferenceProfiles +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsBedrockInferenceProfiles(self, "test", + type="APPLICATION" + ) +``` + ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `type` - (Optional) Filters for inference profiles that match the type you specify. Valid values are: `SYSTEM_DEFINED`, `APPLICATION`. ## Attribute Reference @@ -43,18 +65,18 @@ This data source exports the following attributes in addition to the arguments a ### `inference_profile_summaries` -- `created_at` - The time at which the inference profile was created. -- `description` - The description of the inference profile. -- `inference_profile_arn` - The Amazon Resource Name (ARN) of the inference profile. -- `inference_profile_id` - The unique identifier of the inference profile. -- `inference_profile_name` - The name of the inference profile. -- `models` - A list of information about each model in the inference profile. See [`models`](#models). -- `status` - The status of the inference profile. `ACTIVE` means that the inference profile is available to use. -- `type` - The type of the inference profile. `SYSTEM_DEFINED` means that the inference profile is defined by Amazon Bedrock. -- `updated_at` - The time at which the inference profile was last updated. +- `created_at` - Time at which the inference profile was created. +- `description` - Description of the inference profile. +- `inference_profile_arn` - Amazon Resource Name (ARN) of the inference profile. +- `inference_profile_id` - Unique identifier of the inference profile. +- `inference_profile_name` - Name of the inference profile. +- `models` - List of information about each model in the inference profile. See [`models` Block](#models). +- `status` - Status of the inference profile. `ACTIVE` means that the inference profile is available to use. +- `type` - Type of the inference profile. `SYSTEM_DEFINED` means that the inference profile is defined by Amazon Bedrock. `APPLICATION` means the inference profile was created by a user. +- `updated_at` - Time at which the inference profile was last updated. ### `models` -- `model_arn` - The Amazon Resource Name (ARN) of the model. +- `model_arn` - Amazon Resource Name (ARN) of the model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrockagent_agent_versions.html.markdown b/website/docs/cdktf/python/d/bedrockagent_agent_versions.html.markdown index b18bacda40af..4f812534b44b 100644 --- a/website/docs/cdktf/python/d/bedrockagent_agent_versions.html.markdown +++ b/website/docs/cdktf/python/d/bedrockagent_agent_versions.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agent_id` - (Required) Unique identifier of the agent. ## Attribute Reference @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `guardrail_identifier` - Unique identifier of the guardrail. * `guardrail_version` - Version of the guardrail. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/billing_views.html.markdown b/website/docs/cdktf/python/d/billing_views.html.markdown new file mode 100644 index 000000000000..b1bdc9cb08ee --- /dev/null +++ b/website/docs/cdktf/python/d/billing_views.html.markdown @@ -0,0 +1,76 @@ +--- +subcategory: "Billing" +layout: "aws" +page_title: "AWS: aws_billing_views" +description: |- + Retrieve a list of AWS Billing Views. +--- + + + +# Data Source: aws_billing_views + +Provides details about an AWS Billing Views. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_billing_views import DataAwsBillingViews +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsBillingViews(self, "example", + billing_view_types=["PRIMARY"] + ) + TerraformOutput(self, "primary_view_arn_by_types", + value=Fn.lookup_nested(example.billing_view, ["0", "arn"]) + ) +``` + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_billing_views import DataAwsBillingViews +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsBillingViews(self, "example") + TerraformOutput(self, "primary_view_arn_by_name", + value=Fn.lookup_nested("${[ for view in ${" + example.billing_view + "} : view.arn if view.name == \"Primary View\"]}", ["0"]) + ) + TerraformOutput(self, "view_arns", + value="${[ for view in ${" + example.billing_view + "} : view.arn]}" + ) +``` + +## Argument Reference + +The following arguments are optional: + +* `billing_view_types` - (Optional) List of billing view types to retrieve. Valid values are `PRIMARY`, `BILLING_GROUP`, `CUSTOM`. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `billing_view` - List of billing view objects with the following attributes: + * `arn` - ARN of the billing view. + * `description` - Description of the billing view. + * `name` - Name of the billing view. + * `owner_account_id` - Account ID of the billing view owner. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/budgets_budget.html.markdown b/website/docs/cdktf/python/d/budgets_budget.html.markdown index 18b40c5f4c48..e3df3c200845 100644 --- a/website/docs/cdktf/python/d/budgets_budget.html.markdown +++ b/website/docs/cdktf/python/d/budgets_budget.html.markdown @@ -49,6 +49,7 @@ The following arguments are optional: This data source exports the following attributes in addition to the arguments above: * `auto_adjust_data` - Object containing [AutoAdjustData] which determines the budget amount for an auto-adjusting budget. +* `billing_view_arn` - ARN of the billing view. * `budget_exceeded` - Boolean indicating whether this budget has been exceeded. * `budget_limit` - The total amount of cost, usage, RI utilization, RI coverage, Savings Plans utilization, or Savings Plans coverage that you want to track with your budget. Contains object [Spend](#spend). * `budget_type` - Whether this budget tracks monetary cost or usage. @@ -147,4 +148,4 @@ Valid keys for `planned_limit` parameter. * `amount` - The cost or usage amount that's associated with a budget forecast, actual spend, or budget threshold. Length Constraints: Minimum length of `1`. Maximum length of `2147483647`. * `unit` - The unit of measurement that's used for the budget forecast, actual spend, or budget threshold, such as USD or GBP. Length Constraints: Minimum length of `1`. Maximum length of `2147483647`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ce_cost_category.html.markdown b/website/docs/cdktf/python/d/ce_cost_category.html.markdown index 4768e50cff5f..396328da4dce 100644 --- a/website/docs/cdktf/python/d/ce_cost_category.html.markdown +++ b/website/docs/cdktf/python/d/ce_cost_category.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_ce_cost_category +# Data Source: aws_ce_cost_category Provides details about a specific CostExplorer Cost Category. @@ -102,4 +102,4 @@ This data source exports the following attributes in addition to the arguments a * `type` - Parameter type. * `values` - Parameter values. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/chatbot_slack_workspace.html.markdown b/website/docs/cdktf/python/d/chatbot_slack_workspace.html.markdown index 1c45c0e64fba..54698636d056 100644 --- a/website/docs/cdktf/python/d/chatbot_slack_workspace.html.markdown +++ b/website/docs/cdktf/python/d/chatbot_slack_workspace.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `slack_team_name` - (Required) Slack workspace name configured with AWS Chatbot. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `slack_team_id` - ID of the Slack Workspace assigned by AWS Chatbot. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudcontrolapi_resource.html.markdown b/website/docs/cdktf/python/d/cloudcontrolapi_resource.html.markdown index 9efdbdf423ec..847efda6085c 100644 --- a/website/docs/cdktf/python/d/cloudcontrolapi_resource.html.markdown +++ b/website/docs/cdktf/python/d/cloudcontrolapi_resource.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `role_arn` - (Optional) ARN of the IAM Role to assume for operations. * `type_version_id` - (Optional) Identifier of the CloudFormation resource type version. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `properties` - JSON string matching the CloudFormation resource type schema with current configuration. Underlying attributes can be referenced via the [`jsondecode()` function](https://www.terraform.io/docs/language/functions/jsondecode.html), for example, `jsondecode(data.aws_cloudcontrolapi_resource.example.properties)["example"]`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudformation_export.html.markdown b/website/docs/cdktf/python/d/cloudformation_export.html.markdown index 9f7c99d57d82..3b91cdf1b156 100644 --- a/website/docs/cdktf/python/d/cloudformation_export.html.markdown +++ b/website/docs/cdktf/python/d/cloudformation_export.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the export as it appears in the console or from [list-exports](http://docs.aws.amazon.com/cli/latest/reference/cloudformation/list-exports.html) ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `value` - Value from Cloudformation export identified by the export name found from [list-exports](http://docs.aws.amazon.com/cli/latest/reference/cloudformation/list-exports.html) * `exporting_stack_id` - ARN of stack that contains the exported output name and value. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudformation_stack.html.markdown b/website/docs/cdktf/python/d/cloudformation_stack.html.markdown index 7ffa92489081..fc792d43b9d3 100644 --- a/website/docs/cdktf/python/d/cloudformation_stack.html.markdown +++ b/website/docs/cdktf/python/d/cloudformation_stack.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the stack ## Attribute Reference @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `iam_role_arn` - ARN of the IAM role used to create the stack. * `timeout_in_minutes` - Amount of time that can pass before the stack status becomes `CREATE_FAILED` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudformation_type.html.markdown b/website/docs/cdktf/python/d/cloudformation_type.html.markdown index 5be942bb266c..8f3fe4fcdf1a 100644 --- a/website/docs/cdktf/python/d/cloudformation_type.html.markdown +++ b/website/docs/cdktf/python/d/cloudformation_type.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the CloudFormation Type. For example, `arn:aws:cloudformation:us-west-2::type/resource/AWS-EC2-VPC`. * `type` - (Optional) CloudFormation Registry Type. For example, `RESOURCE`. * `type_name` - (Optional) CloudFormation Type name. For example, `AWS::EC2::VPC`. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `source_url` - URL of the source code for the CloudFormation Type. * `visibility` - Scope of the CloudFormation Type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudfront_distribution.html.markdown b/website/docs/cdktf/python/d/cloudfront_distribution.html.markdown index 9b10fd6933b9..ddcfa75cd81a 100644 --- a/website/docs/cdktf/python/d/cloudfront_distribution.html.markdown +++ b/website/docs/cdktf/python/d/cloudfront_distribution.html.markdown @@ -45,6 +45,8 @@ This data source exports the following attributes in addition to the arguments a * `aliases` - List that contains information about CNAMEs (alternate domain names), if any, for this distribution. +* `anycast_ip_list_id` - ID of the Anycast static IP list that is associated with the distribution, if any. + * `arn` - ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID. * `status` - Current status of the distribution. `Deployed` if the @@ -67,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a alias for the zone ID `Z2FDTNDATAQYW2`. * `web_acl_id` AWS WAF web ACL associated with this distribution. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudfront_log_delivery_canonical_user_id.html.markdown b/website/docs/cdktf/python/d/cloudfront_log_delivery_canonical_user_id.html.markdown index 7650a408cf40..8de6c073784f 100644 --- a/website/docs/cdktf/python/d/cloudfront_log_delivery_canonical_user_id.html.markdown +++ b/website/docs/cdktf/python/d/cloudfront_log_delivery_canonical_user_id.html.markdown @@ -72,12 +72,12 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Region you'd like the zone for. By default, fetches the current region. +* `region` - (Optional) Name of the Region whose canonical user ID is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - Canonical user ID for the AWS `awslogsdelivery` account in the region. +* `id` - Canonical user ID for the AWS `awslogsdelivery` account in the Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudhsm_v2_cluster.html.markdown b/website/docs/cdktf/python/d/cloudhsm_v2_cluster.html.markdown index bc8899f9dd13..f9cc2876c035 100644 --- a/website/docs/cdktf/python/d/cloudhsm_v2_cluster.html.markdown +++ b/website/docs/cdktf/python/d/cloudhsm_v2_cluster.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_id` - (Required) ID of Cloud HSM v2 cluster. * `cluster_state` - (Optional) State of the cluster to be found. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `cluster_certificates.0.manufacturer_hardware_certificate` - The HSM hardware certificate issued (signed) by the hardware manufacturer. The number of available cluster certificates may vary depending on state of the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudtrail_service_account.html.markdown b/website/docs/cdktf/python/d/cloudtrail_service_account.html.markdown index 5f19e9343f6b..9601904ece1e 100644 --- a/website/docs/cdktf/python/d/cloudtrail_service_account.html.markdown +++ b/website/docs/cdktf/python/d/cloudtrail_service_account.html.markdown @@ -13,7 +13,7 @@ description: |- Use this data source to get the Account ID of the [AWS CloudTrail Service Account](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html) in a given region for the purpose of allowing CloudTrail to store trail data in S3. -~> **Note:** AWS documentation [states that](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html#troubleshooting-s3-bucket-policy) a [service principal name](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) should be used instead of an AWS account ID in any relevant IAM policy. +~> **Warning:** This data source is deprecated. The AWS documentation [states that](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html#troubleshooting-s3-bucket-policy) a [service principal name](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) should be used instead of an AWS account ID in any relevant IAM policy. ## Example Usage @@ -73,14 +73,13 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS CloudTrail account ID is desired. -Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS CloudTrail account ID is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS CloudTrail service account in the selected region. -* `arn` - ARN of the AWS CloudTrail service account in the selected region. +* `id` - ID of the AWS CloudTrail service account in the selected Region. +* `arn` - ARN of the AWS CloudTrail service account in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_contributor_managed_insight_rules.html.markdown b/website/docs/cdktf/python/d/cloudwatch_contributor_managed_insight_rules.html.markdown index 0029e9c6231a..7aee9eded9cf 100644 --- a/website/docs/cdktf/python/d/cloudwatch_contributor_managed_insight_rules.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_contributor_managed_insight_rules.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) ARN of an Amazon Web Services resource that has managed Contributor Insights rules. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `rule_name` - Name of the Contributor Insights rule that contains data for the specified Amazon Web Services resource. * `state` - Indicates whether the rule is enabled or disabled. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_event_bus.html.markdown b/website/docs/cdktf/python/d/cloudwatch_event_bus.html.markdown index 5304d2801461..5306eb852123 100644 --- a/website/docs/cdktf/python/d/cloudwatch_event_bus.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_event_bus.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the event bus. ## Attribute Reference @@ -49,5 +50,8 @@ This data source exports the following attributes in addition to the arguments a * `description` - Event bus description. * `id` - Name of the event bus. * `kms_key_identifier` - Identifier of the AWS KMS customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified. +* `log_config` - Block for logging configuration settings for the event bus. + * `include_detail` - Whether EventBridge include detailed event information in the records it generates. + * `level` - Level of logging detail to include. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_event_buses.html.markdown b/website/docs/cdktf/python/d/cloudwatch_event_buses.html.markdown index 7143ab5bc1a7..2de054d3d49c 100644 --- a/website/docs/cdktf/python/d/cloudwatch_event_buses.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_event_buses.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name_prefix` - (Optional) Specifying this limits the results to only those event buses with names that start with the specified prefix. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - The name of the event bus. * `policy` - The permissions policy of the event bus, describing which other AWS accounts can write events to this event bus. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_event_connection.html.markdown b/website/docs/cdktf/python/d/cloudwatch_event_connection.html.markdown index a7ddb72dde5f..d3763c2b0498 100644 --- a/website/docs/cdktf/python/d/cloudwatch_event_connection.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_event_connection.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - Name of the connection. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `kms_key_identifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use to encrypt the connection, if one has been specified. * `secret_arn` - ARN of the secret created from the authorization parameters specified for the connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_event_source.html.markdown b/website/docs/cdktf/python/d/cloudwatch_event_source.html.markdown index 5491a8adfff9..03b9e93c4204 100644 --- a/website/docs/cdktf/python/d/cloudwatch_event_source.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_event_source.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name_prefix` - (Optional) Specifying this limits the results to only those partner event sources with names that start with the specified prefix ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the event source * `state` - State of the event source (`ACTIVE` or `PENDING`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_log_group.html.markdown b/website/docs/cdktf/python/d/cloudwatch_log_group.html.markdown index 2b3ed983e6a9..0925f6568e95 100644 --- a/website/docs/cdktf/python/d/cloudwatch_log_group.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_log_group.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Cloudwatch log group ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `retention_in_days` - Number of days log events retained in the specified log group. * `tags` - Map of tags to assign to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudwatch_log_groups.html.markdown b/website/docs/cdktf/python/d/cloudwatch_log_groups.html.markdown index 3a207f8f8b43..bac5ffe36747 100644 --- a/website/docs/cdktf/python/d/cloudwatch_log_groups.html.markdown +++ b/website/docs/cdktf/python/d/cloudwatch_log_groups.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `log_group_name_prefix` - (Optional) Group prefix of the Cloudwatch log groups to list ## Attribute Reference @@ -44,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the Cloudwatch log groups * `log_group_names` - Set of names of the Cloudwatch log groups - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codeartifact_authorization_token.html.markdown b/website/docs/cdktf/python/d/codeartifact_authorization_token.html.markdown index b77f8c417aca..7aa2eb4cd4e9 100644 --- a/website/docs/cdktf/python/d/codeartifact_authorization_token.html.markdown +++ b/website/docs/cdktf/python/d/codeartifact_authorization_token.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) Name of the domain that is in scope for the generated authorization token. * `domain_owner` - (Optional) Account number of the AWS account that owns the domain. * `duration_seconds` - (Optional) Time, in seconds, that the generated authorization token is valid. Valid values are `0` and between `900` and `43200`. @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `authorization_token` - Temporary authorization token. * `expiration` - Time in UTC RFC3339 format when the authorization token expires. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codeartifact_repository_endpoint.html.markdown b/website/docs/cdktf/python/d/codeartifact_repository_endpoint.html.markdown index f049101bea47..469d854c2498 100644 --- a/website/docs/cdktf/python/d/codeartifact_repository_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/codeartifact_repository_endpoint.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) Name of the domain that contains the repository. * `repository` - (Required) Name of the repository. * `format` - (Required) Which endpoint of a repository to return. A repository has one endpoint for each package format: `npm`, `pypi`, `maven`, and `nuget`. @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `repository_endpoint` - URL of the returned endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codebuild_fleet.html.markdown b/website/docs/cdktf/python/d/codebuild_fleet.html.markdown index 29f20a2c89e9..0705a2fbc26f 100644 --- a/website/docs/cdktf/python/d/codebuild_fleet.html.markdown +++ b/website/docs/cdktf/python/d/codebuild_fleet.html.markdown @@ -71,8 +71,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Fleet name. ## Attribute Reference @@ -83,6 +84,7 @@ This data source exports the following attributes in addition to the arguments a * `base_capacity` - Number of machines allocated to the fleet. * `compute_configuration` - Compute configuration of the compute fleet. * `disk` - Amount of disk space of the instance type included in the fleet. + * `instance_type` - EC2 instance type in the fleet. * `machine_type` - Machine type of the instance type included in the fleet. * `memory` - Amount of memory of the instance type included in the fleet. * `vcpu` - Number of vCPUs of the instance type included in the fleet. @@ -111,4 +113,4 @@ This data source exports the following attributes in addition to the arguments a * `subnets` - A list of one or more subnet IDs in your Amazon VPC. * `vpc_id` - The ID of the Amazon VPC. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codecatalyst_dev_environment.html.markdown b/website/docs/cdktf/python/d/codecatalyst_dev_environment.html.markdown index 517ac7f95c1b..9fce466052a7 100644 --- a/website/docs/cdktf/python/d/codecatalyst_dev_environment.html.markdown +++ b/website/docs/cdktf/python/d/codecatalyst_dev_environment.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `env_id` - - (Required) The system-generated unique ID of the Dev Environment for which you want to view information. To retrieve a list of Dev Environment IDs, use [ListDevEnvironments](https://docs.aws.amazon.com/codecatalyst/latest/APIReference/API_ListDevEnvironments.html). * `project_name` - (Required) The name of the project in the space. * `space_name` - (Required) The name of the space. @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The current status of the Dev Environment. From: PENDING | RUNNING | STARTING | STOPPING | STOPPED | FAILED | DELETING | DELETED. * `status_reason` - The reason for the status. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codecommit_approval_rule_template.html.markdown b/website/docs/cdktf/python/d/codecommit_approval_rule_template.html.markdown index f67ddf275f92..0b337b1dfa85 100644 --- a/website/docs/cdktf/python/d/codecommit_approval_rule_template.html.markdown +++ b/website/docs/cdktf/python/d/codecommit_approval_rule_template.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the approval rule template. This needs to be less than 100 characters. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `last_modified_user` - ARN of the user who made the most recent changes to the approval rule template. * `rule_content_sha256` - SHA-256 hash signature for the content of the approval rule template. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codecommit_repository.html.markdown b/website/docs/cdktf/python/d/codecommit_repository.html.markdown index 0d0b4357592f..5afa0ccb371d 100644 --- a/website/docs/cdktf/python/d/codecommit_repository.html.markdown +++ b/website/docs/cdktf/python/d/codecommit_repository.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository_name` - (Required) Name for the repository. This needs to be less than 100 characters. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `clone_url_http` - URL to use for cloning the repository over HTTPS. * `clone_url_ssh` - URL to use for cloning the repository over SSH. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codeguruprofiler_profiling_group.html.markdown b/website/docs/cdktf/python/d/codeguruprofiler_profiling_group.html.markdown index f7265efe2f9a..5095d9d964c5 100644 --- a/website/docs/cdktf/python/d/codeguruprofiler_profiling_group.html.markdown +++ b/website/docs/cdktf/python/d/codeguruprofiler_profiling_group.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the profiling group. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Mapping of Key-Value tags for the resource. * `updated_at` - Timestamp when Profiling Group was updated. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/codestarconnections_connection.html.markdown b/website/docs/cdktf/python/d/codestarconnections_connection.html.markdown index 7d3dced7e970..6bf7a4bec8b5 100644 --- a/website/docs/cdktf/python/d/codestarconnections_connection.html.markdown +++ b/website/docs/cdktf/python/d/codestarconnections_connection.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) CodeStar Connection ARN. * `name` - (Optional) CodeStar Connection name. @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_type` - Name of the external provider where your third-party code repository is configured. Possible values are `Bitbucket`, `GitHub` and `GitLab`. For connections to GitHub Enterprise Server or GitLab Self-Managed instances, you must create an [aws_codestarconnections_host](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codestarconnections_host) resource and use `host_arn` instead. * `tags` - Map of key-value resource tags to associate with the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_identity_pool.html.markdown b/website/docs/cdktf/python/d/cognito_identity_pool.html.markdown index 47eedab81150..573126d56da4 100644 --- a/website/docs/cdktf/python/d/cognito_identity_pool.html.markdown +++ b/website/docs/cdktf/python/d/cognito_identity_pool.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_pool_name` - (Required) The Cognito Identity Pool name. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `supported_login_providers` - Key-Value pairs mapping provider names to provider app IDs. * `tags` - A map of tags to assigned to the Identity Pool. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_group.html.markdown b/website/docs/cdktf/python/d/cognito_user_group.html.markdown index 17c25a0ae809..7d31da1ca785 100644 --- a/website/docs/cdktf/python/d/cognito_user_group.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the user group. * `user_pool_id` - (Required) User pool the client belongs to. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `precedence` - Precedence of the user group. * `role_arn` - ARN of the IAM role to be associated with the user group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_groups.html.markdown b/website/docs/cdktf/python/d/cognito_user_groups.html.markdown index a94daf53a634..5f98f1ae4f58 100644 --- a/website/docs/cdktf/python/d/cognito_user_groups.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_groups.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` - (Required) User pool the client belongs to. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `precedence` - Precedence of the user group. * `role_arn` - ARN of the IAM role to be associated with the user group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_pool.html.markdown b/website/docs/cdktf/python/d/cognito_user_pool.html.markdown index f127fd764739..c600dd8fa194 100644 --- a/website/docs/cdktf/python/d/cognito_user_pool.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_pool.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` - (Required) The cognito pool ID ## Attribute Reference @@ -141,4 +142,4 @@ This data source exports the following attributes in addition to the arguments a * `custom_auth_mode` - Mode of threat protection operation in custom authentication. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_pool_client.html.markdown b/website/docs/cdktf/python/d/cognito_user_pool_client.html.markdown index 9d5296f6acfe..260700beb278 100644 --- a/website/docs/cdktf/python/d/cognito_user_pool_client.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_pool_client.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_id` - (Required) Client Id of the user pool. * `user_pool_id` - (Required) User pool the client belongs to. @@ -87,4 +88,4 @@ Valid values for the following arguments are: `seconds`, `minutes`, `hours` or ` * `id_token` - (Optional) Time unit in for the value in `id_token_validity`, defaults to `hours`. * `refresh_token` - (Optional) Time unit in for the value in `refresh_token_validity`, defaults to `days`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_pool_clients.html.markdown b/website/docs/cdktf/python/d/cognito_user_pool_clients.html.markdown index 6e564fa35f88..d5b7d596c7a1 100644 --- a/website/docs/cdktf/python/d/cognito_user_pool_clients.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_pool_clients.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` - (Required) Cognito user pool ID. ## Attribute Reference @@ -44,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `client_ids` - List of Cognito user pool client IDs. * `client_names` - List of Cognito user pool client names. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_pool_signing_certificate.html.markdown b/website/docs/cdktf/python/d/cognito_user_pool_signing_certificate.html.markdown index c7882d874fe7..cf65b50c2ae6 100644 --- a/website/docs/cdktf/python/d/cognito_user_pool_signing_certificate.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_pool_signing_certificate.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` - (Required) Cognito user pool ID. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `certificate` - Certificate string - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_pools.html.markdown b/website/docs/cdktf/python/d/cognito_user_pools.html.markdown index d76072146b83..318758241722 100644 --- a/website/docs/cdktf/python/d/cognito_user_pools.html.markdown +++ b/website/docs/cdktf/python/d/cognito_user_pools.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cognito user pools. Name is not a unique attribute for cognito user pool, so multiple pools might be returned with given name. If the pool name is expected to be unique, you can reference the pool id via ```tolist(data.aws_cognito_user_pools.selected.ids)[0]``` ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - Set of cognito user pool ids. * `arns` - Set of cognito user pool Amazon Resource Names (ARNs). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_bot_association.html.markdown b/website/docs/cdktf/python/d/connect_bot_association.html.markdown index 9d972df8a749..aacc524a00b4 100644 --- a/website/docs/cdktf/python/d/connect_bot_association.html.markdown +++ b/website/docs/cdktf/python/d/connect_bot_association.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. * `lex_bot` - (Required) Configuration information of an Amazon Lex (V1) bot. Detailed below. @@ -54,4 +55,4 @@ The `lex_bot` configuration block supports the following: This data source exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_contact_flow.html.markdown b/website/docs/cdktf/python/d/connect_contact_flow.html.markdown index dd7d84d4cfa2..3dcb29b45b3d 100644 --- a/website/docs/cdktf/python/d/connect_contact_flow.html.markdown +++ b/website/docs/cdktf/python/d/connect_contact_flow.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contact_flow_id` - (Optional) Returns information on a specific Contact Flow by contact flow id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Contact Flow by name @@ -74,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Tags to assign to the Contact Flow. * `type` - Type of Contact Flow. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown b/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown index cd58db216458..3f30468e4787 100644 --- a/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown +++ b/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contact_flow_module_id` - (Optional) Returns information on a specific Contact Flow Module by contact flow module id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Contact Flow Module by name @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Type of Contact Flow Module Module. Values are either `ACTIVE` or `ARCHIVED`. * `status` - Status of the Contact Flow Module Module. Values are either `PUBLISHED` or `SAVED`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown b/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown index 2f67a06a5483..4c5d47945b58 100644 --- a/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown +++ b/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hours_of_operation_id` - (Optional) Returns information on a specific Hours of Operation by hours of operation id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Hours of Operation by name @@ -93,4 +94,4 @@ A `start_time` block supports the following arguments: * `hours` - Hour of opening. * `minutes` - Minute of opening. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_instance.html.markdown b/website/docs/cdktf/python/d/connect_instance.html.markdown index 0637c8e95746..50358a0a19f5 100644 --- a/website/docs/cdktf/python/d/connect_instance.html.markdown +++ b/website/docs/cdktf/python/d/connect_instance.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Optional) Returns information on a specific connect instance by id * `instance_alias` - (Optional) Returns information on a specific connect instance by alias @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `service_role` - Service role of the instance. * `tags` - A map of tags to assigned to the instance. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_instance_storage_config.html.markdown b/website/docs/cdktf/python/d/connect_instance_storage_config.html.markdown index 2645ca26e979..2f4570084fcd 100644 --- a/website/docs/cdktf/python/d/connect_instance_storage_config.html.markdown +++ b/website/docs/cdktf/python/d/connect_instance_storage_config.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `association_id` - (Required) The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID. * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `resource_type` - (Required) A valid resource type. Valid Values: `AGENT_EVENTS` | `ATTACHMENTS` | `CALL_RECORDINGS` | `CHAT_TRANSCRIPTS` | `CONTACT_EVALUATIONS` | `CONTACT_TRACE_RECORDS` | `MEDIA_STREAMS` | `REAL_TIME_CONTACT_ANALYSIS_SEGMENTS` | `SCHEDULED_REPORTS` | `SCREEN_RECORDINGS`. @@ -93,4 +94,4 @@ The `encryption_config` configuration block supports the following arguments: * `encryption_type` - The type of encryption. Valid Values: `KMS`. * `key_id` - The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_lambda_function_association.html.markdown b/website/docs/cdktf/python/d/connect_lambda_function_association.html.markdown index 415572fc186a..6b73feb3396c 100644 --- a/website/docs/cdktf/python/d/connect_lambda_function_association.html.markdown +++ b/website/docs/cdktf/python/d/connect_lambda_function_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `function_arn` - (Required) ARN of the Lambda Function, omitting any version or alias qualifier. * `instance_id` - (Required) Identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_prompt.html.markdown b/website/docs/cdktf/python/d/connect_prompt.html.markdown index 75da3f06f63e..a09dbce7717c 100644 --- a/website/docs/cdktf/python/d/connect_prompt.html.markdown +++ b/website/docs/cdktf/python/d/connect_prompt.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Required) Returns information on a specific Prompt by name @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the Prompt. * `prompt_id` - Identifier for the prompt. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_queue.html.markdown b/website/docs/cdktf/python/d/connect_queue.html.markdown index 5e8bb605f45a..a9e2bafb27bd 100644 --- a/website/docs/cdktf/python/d/connect_queue.html.markdown +++ b/website/docs/cdktf/python/d/connect_queue.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queue_id` - (Optional) Returns information on a specific Queue by Queue id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Queue by name @@ -84,4 +85,4 @@ A `outbound_caller_config` block supports the following arguments: * `outbound_caller_id_number_id` - Specifies the caller ID number. * `outbound_flow_id` - Outbound whisper flow to be used during an outbound call. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_quick_connect.html.markdown b/website/docs/cdktf/python/d/connect_quick_connect.html.markdown index acbcff288dc0..b7b27546adc8 100644 --- a/website/docs/cdktf/python/d/connect_quick_connect.html.markdown +++ b/website/docs/cdktf/python/d/connect_quick_connect.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `quick_connect_id` - (Optional) Returns information on a specific Quick Connect by Quick Connect id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Quick Connect by name @@ -96,4 +97,4 @@ A `user_config` block contains the following arguments: * `contact_flow_id` - Identifier of the contact flow. * `user_id` - Identifier for the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_routing_profile.html.markdown b/website/docs/cdktf/python/d/connect_routing_profile.html.markdown index 6efa21a7c211..70208f9777c8 100644 --- a/website/docs/cdktf/python/d/connect_routing_profile.html.markdown +++ b/website/docs/cdktf/python/d/connect_routing_profile.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Routing Profile by name * `routing_profile_id` - (Optional) Returns information on a specific Routing Profile by Routing Profile id @@ -90,4 +91,4 @@ A `queue_configs` block supports the following attributes: * `queue_id` - Identifier for the queue. * `queue_name` - Name for the queue. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_security_profile.html.markdown b/website/docs/cdktf/python/d/connect_security_profile.html.markdown index 47376efbe19d..5a4296760a4e 100644 --- a/website/docs/cdktf/python/d/connect_security_profile.html.markdown +++ b/website/docs/cdktf/python/d/connect_security_profile.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_profile_id` - (Optional) Returns information on a specific Security Profile by Security Profile id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Security Profile by name @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions assigned to the security profile. * `tags` - Map of tags to assign to the Security Profile. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_user.html.markdown b/website/docs/cdktf/python/d/connect_user.html.markdown index f2806ffa25e7..94c9cc4c98f1 100644 --- a/website/docs/cdktf/python/d/connect_user.html.markdown +++ b/website/docs/cdktf/python/d/connect_user.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific User by name * `user_id` - (Optional) Returns information on a specific User by User id @@ -97,4 +98,4 @@ A `phone_config` block supports the following attributes: * `desk_phone_number` - The phone number for the user's desk phone. * `phone_type` - The phone type. Valid values are `DESK_PHONE` and `SOFT_PHONE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown b/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown index ede287c5127e..2d3676127202 100644 --- a/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown +++ b/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hierarchy_group_id` - (Optional) Returns information on a specific hierarchy group by hierarchy group id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific hierarchy group by name @@ -88,4 +89,4 @@ A level block supports the following attributes: * `id` - The identifier of the hierarchy group. * `name` - Name of the hierarchy group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_user_hierarchy_structure.html.markdown b/website/docs/cdktf/python/d/connect_user_hierarchy_structure.html.markdown index e8430e35a774..e5556e903fa7 100644 --- a/website/docs/cdktf/python/d/connect_user_hierarchy_structure.html.markdown +++ b/website/docs/cdktf/python/d/connect_user_hierarchy_structure.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance ## Attribute Reference @@ -57,4 +58,4 @@ Each level block supports the following attributes: * `id` - The identifier of the hierarchy level. * `name` - Name of the user hierarchy level. Must not be more than 50 characters. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_vocabulary.html.markdown b/website/docs/cdktf/python/d/connect_vocabulary.html.markdown index 03a4f9839d9d..72d10c9da4a6 100644 --- a/website/docs/cdktf/python/d/connect_vocabulary.html.markdown +++ b/website/docs/cdktf/python/d/connect_vocabulary.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Vocabulary by name * `vocabulary_id` - (Optional) Returns information on a specific Vocabulary by Vocabulary id @@ -79,4 +80,4 @@ separated by a colon (`:`). * `tags` - A map of tags to assign to the Vocabulary. * `vocabulary_id` - The identifier of the custom vocabulary. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/controltower_controls.html.markdown b/website/docs/cdktf/python/d/controltower_controls.html.markdown index 84dce0b2065b..48b7f8de8a23 100644 --- a/website/docs/cdktf/python/d/controltower_controls.html.markdown +++ b/website/docs/cdktf/python/d/controltower_controls.html.markdown @@ -45,8 +45,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `target_identifier` - (Required) The ARN of the organizational unit. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `enabled_controls` - List of all the ARNs for the controls applied to the `target_identifier`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/customer_gateway.html.markdown b/website/docs/cdktf/python/d/customer_gateway.html.markdown index 6eedc46eaffe..656c59aa737d 100644 --- a/website/docs/cdktf/python/d/customer_gateway.html.markdown +++ b/website/docs/cdktf/python/d/customer_gateway.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the gateway. * `filter` - (Optional) One or more [name-value pairs][dcg-filters] to filter by. @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/datapipeline_pipeline.html.markdown b/website/docs/cdktf/python/d/datapipeline_pipeline.html.markdown index 1efdc31bcef7..817e304811de 100644 --- a/website/docs/cdktf/python/d/datapipeline_pipeline.html.markdown +++ b/website/docs/cdktf/python/d/datapipeline_pipeline.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipeline_id` - (Required) ID of the pipeline. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of Pipeline. * `tags` - Map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/datapipeline_pipeline_definition.html.markdown b/website/docs/cdktf/python/d/datapipeline_pipeline_definition.html.markdown index 50270a6054ff..55b321d5b18e 100644 --- a/website/docs/cdktf/python/d/datapipeline_pipeline_definition.html.markdown +++ b/website/docs/cdktf/python/d/datapipeline_pipeline_definition.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipeline_id` - (Required) ID of the pipeline. ## Attribute Reference @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `ref_value` - Field value, expressed as the identifier of another object * `string_value` - Field value, expressed as a String. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/datazone_domain.html.markdown b/website/docs/cdktf/python/d/datazone_domain.html.markdown index ca7e8ff8572b..17dedc5a1976 100644 --- a/website/docs/cdktf/python/d/datazone_domain.html.markdown +++ b/website/docs/cdktf/python/d/datazone_domain.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name of the Domain. One of `name` or `id` is required. * `id` - (Optional) ID of the Domain. One of `name` or `id` is required @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `portal_url` - URL of the Domain. * `status` - Status of the Domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/datazone_environment_blueprint.html.markdown b/website/docs/cdktf/python/d/datazone_environment_blueprint.html.markdown index ba51fc0a7562..9edb4a1503bd 100644 --- a/website/docs/cdktf/python/d/datazone_environment_blueprint.html.markdown +++ b/website/docs/cdktf/python/d/datazone_environment_blueprint.html.markdown @@ -45,8 +45,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_id` - (Required) ID of the domain. * `name` - (Required) Name of the blueprint. * `managed` (Required) Whether the blueprint is managed by Amazon DataZone. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the blueprint * `blueprint_provider` - Provider of the blueprint - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_cluster_snapshot.html.markdown b/website/docs/cdktf/python/d/db_cluster_snapshot.html.markdown index 50050c3a42c2..2dd2b1093d3f 100644 --- a/website/docs/cdktf/python/d/db_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/python/d/db_cluster_snapshot.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `most_recent` - (Optional) If more than one result is returned, use the most recent Snapshot. * `db_cluster_identifier` - (Optional) Returns the list of snapshots created by the specific db_cluster * `db_cluster_snapshot_identifier` - (Optional) Returns information on a specific snapshot_id. @@ -94,4 +95,4 @@ This data source exports the following attributes in addition to the arguments a * `vpc_id` - VPC ID associated with the DB cluster snapshot. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_event_categories.html.markdown b/website/docs/cdktf/python/d/db_event_categories.html.markdown index 642c28e6d4d5..0baff8224604 100644 --- a/website/docs/cdktf/python/d/db_event_categories.html.markdown +++ b/website/docs/cdktf/python/d/db_event_categories.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_type` - (Optional) Type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot. ## Attribute Reference @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `event_categories` - List of the event categories. * `id` - Region of the event categories. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_instance.html.markdown b/website/docs/cdktf/python/d/db_instance.html.markdown index 65b3a2c2468a..7021b9789c91 100644 --- a/website/docs/cdktf/python/d/db_instance.html.markdown +++ b/website/docs/cdktf/python/d/db_instance.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_instance_identifier` - (Optional) Name of the RDS instance. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired instance. @@ -94,4 +95,4 @@ The `master_user_secret` configuration block supports the following attributes: * `secret_arn` - The Amazon Resource Name (ARN) of the secret. * `secret_status` - The status of the secret. Valid Values: `creating` | `active` | `rotating` | `impaired`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_instances.html.markdown b/website/docs/cdktf/python/d/db_instances.html.markdown index b67d3f817381..616b33c80d3c 100644 --- a/website/docs/cdktf/python/d/db_instances.html.markdown +++ b/website/docs/cdktf/python/d/db_instances.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) used to filter instances with AWS supported attributes, such as `engine`, `db-cluster-id` or `db-instance-id` for example. Detailed below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired instances. @@ -79,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a * `instance_arns` - ARNs of the matched RDS instances. * `instance_identifiers` - Identifiers of the matched RDS instances. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_parameter_group.html.markdown b/website/docs/cdktf/python/d/db_parameter_group.html.markdown new file mode 100644 index 000000000000..14cd4c4ea5ef --- /dev/null +++ b/website/docs/cdktf/python/d/db_parameter_group.html.markdown @@ -0,0 +1,49 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_db_parameter_group" +description: |- + Information about a database parameter group. +--- + + + +# Data Source: aws_db_parameter_group + +Information about a database parameter group. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_db_parameter_group import DataAwsDbParameterGroup +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsDbParameterGroup(self, "test", + name="default.postgres15" + ) +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) DB parameter group name. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the parameter group. +* `family` - Family of the parameter group. +* `description` - Description of the parameter group. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_proxy.html.markdown b/website/docs/cdktf/python/d/db_proxy.html.markdown index 5232e08bd783..f6d70ebe3ffb 100644 --- a/website/docs/cdktf/python/d/db_proxy.html.markdown +++ b/website/docs/cdktf/python/d/db_proxy.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the DB proxy. ## Attribute Reference @@ -44,6 +45,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the DB Proxy. * `auth` - Configuration(s) with authorization mechanisms to connect to the associated instance or cluster. * `debug_logging` - Whether the proxy includes detailed information about SQL statements in its logs. +* `default_auth_scheme` - Default authentication scheme that the proxy uses for client connections to the proxy and connections from the proxy to the underlying database. * `endpoint` - Endpoint that you can use to connect to the DB proxy. * `engine_family` - Kinds of databases that the proxy can connect to. * `idle_client_timeout` - Number of seconds a connection to the proxy can have no activity before the proxy drops the client connection. @@ -53,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `vpc_security_group_ids` - Provides a list of VPC security groups that the proxy belongs to. * `vpc_subnet_ids` - EC2 subnet IDs for the proxy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_snapshot.html.markdown b/website/docs/cdktf/python/d/db_snapshot.html.markdown index a6442da6e040..177d134b443f 100644 --- a/website/docs/cdktf/python/d/db_snapshot.html.markdown +++ b/website/docs/cdktf/python/d/db_snapshot.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `most_recent` - (Optional) If more than one result is returned, use the most recent Snapshot. * `db_instance_identifier` - (Optional) Returns the list of snapshots created by the specific db_instance @@ -100,4 +101,4 @@ This data source exports the following attributes in addition to the arguments a * `snapshot_create_time` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Changes for the copy when the snapshot is copied. * `original_snapshot_create_time` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Doesn't change when the snapshot is copied. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_subnet_group.html.markdown b/website/docs/cdktf/python/d/db_subnet_group.html.markdown index 6cb2610c3768..81d0ab6b7d6f 100644 --- a/website/docs/cdktf/python/d/db_subnet_group.html.markdown +++ b/website/docs/cdktf/python/d/db_subnet_group.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the RDS database subnet group. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `supported_network_types` - The network type of the DB subnet group. * `vpc_id` - Provides the VPC ID of the DB subnet group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/devopsguru_notification_channel.html.markdown b/website/docs/cdktf/python/d/devopsguru_notification_channel.html.markdown index a747d78babed..3a283b6f19cf 100644 --- a/website/docs/cdktf/python/d/devopsguru_notification_channel.html.markdown +++ b/website/docs/cdktf/python/d/devopsguru_notification_channel.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Unique identifier for the notification channel. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `message_types` - Events to receive notifications for. * `severities` - Severity levels to receive notifications for. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/devopsguru_resource_collection.html.markdown b/website/docs/cdktf/python/d/devopsguru_resource_collection.html.markdown index 3a899b7c6128..0384f5bc6937 100644 --- a/website/docs/cdktf/python/d/devopsguru_resource_collection.html.markdown +++ b/website/docs/cdktf/python/d/devopsguru_resource_collection.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `type` - (Required) Type of AWS resource collection to create. Valid values are `AWS_CLOUD_FORMATION`, `AWS_SERVICE`, and `AWS_TAGS`. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `app_boundary_key` - An AWS tag key that is used to identify the AWS resources that DevOps Guru analyzes. * `tag_values` - Array of tag values. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/directory_service_directory.html.markdown b/website/docs/cdktf/python/d/directory_service_directory.html.markdown index 01091cef20e7..70994172f82e 100644 --- a/website/docs/cdktf/python/d/directory_service_directory.html.markdown +++ b/website/docs/cdktf/python/d/directory_service_directory.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_id` - (Required) ID of the directory. ## Attribute Reference @@ -53,7 +54,7 @@ This data source exports the following attributes in addition to the arguments a * `access_url` - Access URL for the directory/connector, such as http://alias.awsapps.com. * `dns_ip_addresses` - List of IP addresses of the DNS servers for the directory/connector. * `security_group_id` - ID of the security group created by the directory/connector. -* `tags` – A map of tags assigned to the directory/connector. +* `tags` - A map of tags assigned to the directory/connector. `vpc_settings` (for `SimpleAD` and `MicrosoftAD`) is also exported with the following attributes: @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `radius_timeout` - Amount of time, in seconds, to wait for the RADIUS server to respond. * `use_same_username` - Not currently used. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dms_certificate.html.markdown b/website/docs/cdktf/python/d/dms_certificate.html.markdown index b9910fbb24b5..3818714f7f6d 100644 --- a/website/docs/cdktf/python/d/dms_certificate.html.markdown +++ b/website/docs/cdktf/python/d/dms_certificate.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_id` - (Required) A customer-assigned name for the certificate. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `valid_from_date` - The beginning date that the certificate is valid. * `valid_to_date` - The final date that the certificate is valid. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dms_endpoint.html.markdown b/website/docs/cdktf/python/d/dms_endpoint.html.markdown index e0d1e4fbca9f..5b27fd406a03 100644 --- a/website/docs/cdktf/python/d/dms_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/dms_endpoint.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_id` - (Required) Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a See the [`aws_dms_endpoint` resource](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dms_endpoint) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dms_replication_instance.html.markdown b/website/docs/cdktf/python/d/dms_replication_instance.html.markdown index 40763b59b882..335a698f89bf 100644 --- a/website/docs/cdktf/python/d/dms_replication_instance.html.markdown +++ b/website/docs/cdktf/python/d/dms_replication_instance.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replication_instance_id` - (Required) The replication instance identifier. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `replication_subnet_group_id` - A subnet group to associate with the replication instance. * `vpc_security_group_ids` - A set of VPC security group IDs that are used with the replication instance. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dms_replication_subnet_group.html.markdown b/website/docs/cdktf/python/d/dms_replication_subnet_group.html.markdown index f7c15bafeff8..0666c4254a6a 100644 --- a/website/docs/cdktf/python/d/dms_replication_subnet_group.html.markdown +++ b/website/docs/cdktf/python/d/dms_replication_subnet_group.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replication_subnet_group_id` - (Required) Name for the replication subnet group. This value is stored as a lowercase string. It must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens and cannot be `default`. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - List of at least 2 EC2 subnet IDs for the subnet group. The subnets must cover at least 2 availability zones. * `vpc_id` - The ID of the VPC the subnet group is in. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dms_replication_task.html.markdown b/website/docs/cdktf/python/d/dms_replication_task.html.markdown index 17f81c79e58a..17db0d8cbdea 100644 --- a/website/docs/cdktf/python/d/dms_replication_task.html.markdown +++ b/website/docs/cdktf/python/d/dms_replication_task.html.markdown @@ -35,15 +35,11 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replication_task_id` - (Required) The replication task identifier. - - Must contain from 1 to 255 alphanumeric characters or hyphens. - - First character must be a letter. - - Cannot end with a hyphen. - - Cannot contain two consecutive hyphens. - ## Attribute Reference This data source exports the following attributes in addition to the arguments above: @@ -60,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `target_endpoint_arn` - The Amazon Resource Name (ARN) string that uniquely identifies the target endpoint. * `replication_task_arn` - The Amazon Resource Name (ARN) for the replication task. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/docdb_engine_version.html.markdown b/website/docs/cdktf/python/d/docdb_engine_version.html.markdown index 30a9a2322c12..4f6b754fe57b 100644 --- a/website/docs/cdktf/python/d/docdb_engine_version.html.markdown +++ b/website/docs/cdktf/python/d/docdb_engine_version.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) DB engine. (Default: `docdb`) * `parameter_group_family` - (Optional) Name of a specific DB parameter group family. An example parameter group family is `docdb3.6`. * `preferred_versions` - (Optional) Ordered list of preferred engine versions. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. If both the `version` and `preferred_versions` arguments are not configured, the data source will return the default version for the engine. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `valid_upgrade_targets` - A set of engine versions that this database engine version can be upgraded to. * `version_description` - Description of the database engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/docdb_orderable_db_instance.html.markdown b/website/docs/cdktf/python/d/docdb_orderable_db_instance.html.markdown index e5e0b6c88573..d3e2f4e2f791 100644 --- a/website/docs/cdktf/python/d/docdb_orderable_db_instance.html.markdown +++ b/website/docs/cdktf/python/d/docdb_orderable_db_instance.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) DB engine. Default: `docdb` * `engine_version` - (Optional) Version of the DB engine. * `instance_class` - (Optional) DB instance class. Examples of classes are `db.r5.12xlarge`, `db.r5.24xlarge`, `db.r5.2xlarge`, `db.r5.4xlarge`, `db.r5.large`, `db.r5.xlarge`, and `db.t3.medium`. (Conflicts with `preferred_instance_classes`.) @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `availability_zones` - Availability zones where the instance is available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dx_connection.html.markdown b/website/docs/cdktf/python/d/dx_connection.html.markdown index d976630ca88c..d64d96df1b3a 100644 --- a/website/docs/cdktf/python/d/dx_connection.html.markdown +++ b/website/docs/cdktf/python/d/dx_connection.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the connection to retrieve. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags for the resource. * `vlan_id` - The VLAN ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dx_location.html.markdown b/website/docs/cdktf/python/d/dx_location.html.markdown index d210ee0d89aa..301d6981f3e7 100644 --- a/website/docs/cdktf/python/d/dx_location.html.markdown +++ b/website/docs/cdktf/python/d/dx_location.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `location_code` - (Required) Code for the location to retrieve. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `available_providers` - Names of the service providers for the location. * `location_name` - Name of the location. This includes the name of the colocation partner and the physical site of the building. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dx_locations.html.markdown b/website/docs/cdktf/python/d/dx_locations.html.markdown index 546cb6f52353..0875ea1e944b 100644 --- a/website/docs/cdktf/python/d/dx_locations.html.markdown +++ b/website/docs/cdktf/python/d/dx_locations.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -42,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `location_codes` - Code for the locations. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dx_router_configuration.html.markdown b/website/docs/cdktf/python/d/dx_router_configuration.html.markdown index a07de6d8eee7..47d9b95ebc85 100644 --- a/website/docs/cdktf/python/d/dx_router_configuration.html.markdown +++ b/website/docs/cdktf/python/d/dx_router_configuration.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtual_interface_id` - (Required) ID of the Direct Connect Virtual Interface * `router_type_identifier` - (Required) ID of the Router Type. For example: `CiscoSystemsInc-2900SeriesRouters-IOS124` @@ -74,4 +75,4 @@ A `router` block supports the following attributes: * `xslt_template_name` - Router XSLT Template Name * `xslt_template_name_for_mac` - Router XSLT Template Name for MacSec - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dynamodb_table.html.markdown b/website/docs/cdktf/python/d/dynamodb_table.html.markdown index 4f19bda5bd87..26198cb64540 100644 --- a/website/docs/cdktf/python/d/dynamodb_table.html.markdown +++ b/website/docs/cdktf/python/d/dynamodb_table.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the DynamoDB table. ## Attribute Reference @@ -44,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a See the [DynamoDB Table Resource](/docs/providers/aws/r/dynamodb_table.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dynamodb_table_item.html.markdown b/website/docs/cdktf/python/d/dynamodb_table_item.html.markdown index 42cc09349a0d..8a9b9f4f69d6 100644 --- a/website/docs/cdktf/python/d/dynamodb_table_item.html.markdown +++ b/website/docs/cdktf/python/d/dynamodb_table_item.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `expression_attribute_name` - (Optional) - One or more substitution tokens for attribute names in an expression. Use the `#` character in an expression to dereference an attribute name. * `projection_expression` - (Optional) A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes are returned. If any of the requested attributes are not found, they do not appear in the result. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `item` - JSON representation of a map of attribute names to [AttributeValue](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html) objects, as specified by ProjectionExpression. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/dynamodb_tables.html.markdown b/website/docs/cdktf/python/d/dynamodb_tables.html.markdown index effd3eb3257d..1539a3a5c33a 100644 --- a/website/docs/cdktf/python/d/dynamodb_tables.html.markdown +++ b/website/docs/cdktf/python/d/dynamodb_tables.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformOutput, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsDynamodbTables +from imports.aws.data_aws_dynamodb_tables import DataAwsDynamodbTables class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -36,7 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -44,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `names` - A list of all the DynamoDB table names found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ebs_default_kms_key.html.markdown b/website/docs/cdktf/python/d/ebs_default_kms_key.html.markdown index 98c9f2c2b984..bd191c2b7054 100644 --- a/website/docs/cdktf/python/d/ebs_default_kms_key.html.markdown +++ b/website/docs/cdktf/python/d/ebs_default_kms_key.html.markdown @@ -37,7 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -52,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ebs_encryption_by_default.html.markdown b/website/docs/cdktf/python/d/ebs_encryption_by_default.html.markdown index 9036e7e382b4..5493827dfbbb 100644 --- a/website/docs/cdktf/python/d/ebs_encryption_by_default.html.markdown +++ b/website/docs/cdktf/python/d/ebs_encryption_by_default.html.markdown @@ -31,7 +31,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -46,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ebs_snapshot.html.markdown b/website/docs/cdktf/python/d/ebs_snapshot.html.markdown index 43986c13734c..5ecbb9b3545c 100644 --- a/website/docs/cdktf/python/d/ebs_snapshot.html.markdown +++ b/website/docs/cdktf/python/d/ebs_snapshot.html.markdown @@ -44,17 +44,12 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `most_recent` - (Optional) If more than one result is returned, use the most recent snapshot. - * `owners` - (Optional) Returns the snapshots owned by the specified owner id. Multiple owners can be specified. - * `snapshot_ids` - (Optional) Returns information on a specific snapshot_id. - * `restorable_by_user_ids` - (Optional) One or more AWS accounts IDs that can create volumes from the snapshot. - -* `filter` - (Optional) One or more name/value pairs to filter off of. There are -several valid keys, for a full reference, check out -[describe-snapshots in the AWS CLI reference][1]. +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-snapshots in the AWS CLI reference][1]. ## Attribute Reference @@ -85,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-snapshots.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ebs_snapshot_ids.html.markdown b/website/docs/cdktf/python/d/ebs_snapshot_ids.html.markdown index 5033fa6d9c20..cb68c081e20b 100644 --- a/website/docs/cdktf/python/d/ebs_snapshot_ids.html.markdown +++ b/website/docs/cdktf/python/d/ebs_snapshot_ids.html.markdown @@ -44,13 +44,10 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owners` - (Optional) Returns the snapshots owned by the specified owner id. Multiple owners can be specified. - * `restorable_by_user_ids` - (Optional) One or more AWS accounts IDs that can create volumes from the snapshot. - -* `filter` - (Optional) One or more name/value pairs to filter off of. There are -several valid keys, for a full reference, check out -[describe-volumes in the AWS CLI reference][1]. +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-volumes in the AWS CLI reference][1]. ## Attribute Reference @@ -67,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-snapshots.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ebs_volume.html.markdown b/website/docs/cdktf/python/d/ebs_volume.html.markdown index c4d330cd9cab..0016f04830ea 100644 --- a/website/docs/cdktf/python/d/ebs_volume.html.markdown +++ b/website/docs/cdktf/python/d/ebs_volume.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-volumes in the AWS CLI reference][1]. @@ -69,6 +70,7 @@ This data source exports the following attributes in addition to the arguments a * `throughput` - Throughput that the volume supports, in MiB/s. * `volume_id` - Volume ID (e.g., vol-59fcb34e). * `volume_type` - Type of EBS volume. +* `volume_initialization_rate` - EBS provisioned rate for volume initialization, in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. ## Timeouts @@ -78,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-volumes.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ebs_volumes.html.markdown b/website/docs/cdktf/python/d/ebs_volumes.html.markdown index ede054265ab1..1d138ed52023 100644 --- a/website/docs/cdktf/python/d/ebs_volumes.html.markdown +++ b/website/docs/cdktf/python/d/ebs_volumes.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired volumes. @@ -110,4 +111,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown b/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown index 608a074a61d8..290b8f8c421c 100644 --- a/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown +++ b/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_duration_hours` - (Required) The amount of time of the Capacity Block reservation in hours. * `end_date_range` - (Optional) The date and time at which the Capacity Block Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) * `instance_count` - (Required) The number of instances for which to reserve capacity. @@ -55,4 +56,4 @@ This resource exports the following attributes in addition to the arguments abov * `upfront_fee` - The total price to be paid up front. * `tenancy` - Indicates the tenancy of the Capacity Reservation. Specify either `default` or `dedicated`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_client_vpn_endpoint.html.markdown b/website/docs/cdktf/python/d/ec2_client_vpn_endpoint.html.markdown index 04a0c3b9ec11..f6ecee46fa93 100644 --- a/website/docs/cdktf/python/d/ec2_client_vpn_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/ec2_client_vpn_endpoint.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_vpn_endpoint_id` - (Optional) ID of the Client VPN endpoint. * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired endpoint. @@ -87,12 +88,14 @@ This data source exports the following attributes in addition to the arguments a * `description` - Brief description of the endpoint. * `dns_name` - DNS name to be used by clients when connecting to the Client VPN endpoint. * `dns_servers` - Information about the DNS servers to be used for DNS resolution. +* `endpoint_ip_address_type` - IP address type for the Client VPN endpoint. * `security_group_ids` - IDs of the security groups for the target network associated with the Client VPN endpoint. * `self_service_portal` - Whether the self-service portal for the Client VPN endpoint is enabled. * `self_service_portal_url` - The URL of the self-service portal. * `server_certificate_arn` - The ARN of the server certificate. * `session_timeout_hours` - The maximum VPN session duration time in hours. * `split_tunnel` - Whether split-tunnel is enabled in the AWS Client VPN endpoint. +* `traffic_ip_address_type` - IP address type for traffic within the Client VPN tunnel. * `transport_protocol` - Transport protocol used by the Client VPN endpoint. * `vpc_id` - ID of the VPC associated with the Client VPN endpoint. * `vpn_port` - Port number for the Client VPN endpoint. @@ -103,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_coip_pool.html.markdown b/website/docs/cdktf/python/d/ec2_coip_pool.html.markdown index 80ace3bd5591..d3d4e908a973 100644 --- a/website/docs/cdktf/python/d/ec2_coip_pool.html.markdown +++ b/website/docs/cdktf/python/d/ec2_coip_pool.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `local_gateway_route_table_id` - (Optional) Local Gateway Route Table Id assigned to desired COIP Pool * `pool_id` - (Optional) ID of the specific COIP Pool to retrieve. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_coip_pools.html.markdown b/website/docs/cdktf/python/d/ec2_coip_pools.html.markdown index 0fece79482d8..24d8e4480b6f 100644 --- a/website/docs/cdktf/python/d/ec2_coip_pools.html.markdown +++ b/website/docs/cdktf/python/d/ec2_coip_pools.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired aws_ec2_coip_pools. * `filter` - (Optional) Custom filter block as described below. @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_host.html.markdown b/website/docs/cdktf/python/d/ec2_host.html.markdown index 6f2640c9baf6..395a18f97e11 100644 --- a/website/docs/cdktf/python/d/ec2_host.html.markdown +++ b/website/docs/cdktf/python/d/ec2_host.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. * `host_id` - (Optional) ID of the Dedicated Host. @@ -104,4 +105,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_instance_type.html.markdown b/website/docs/cdktf/python/d/ec2_instance_type.html.markdown index d7da6fc96794..a84fef3d3753 100644 --- a/website/docs/cdktf/python/d/ec2_instance_type.html.markdown +++ b/website/docs/cdktf/python/d/ec2_instance_type.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_type` - (Required) Instance ## Attribute Reference @@ -142,4 +143,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_instance_type_offering.html.markdown b/website/docs/cdktf/python/d/ec2_instance_type_offering.html.markdown index ce0b0bc648af..0601531f8292 100644 --- a/website/docs/cdktf/python/d/ec2_instance_type_offering.html.markdown +++ b/website/docs/cdktf/python/d/ec2_instance_type_offering.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypeOfferings.html) for supported filters. Detailed below. * `location_type` - (Optional) Location type. Defaults to `region`. Valid values: `availability-zone`, `availability-zone-id`, and `region`. * `preferred_instance_types` - (Optional) Ordered list of preferred EC2 Instance Types. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. @@ -55,6 +56,7 @@ This data source exports the following attributes in addition to the arguments a * `id` - EC2 Instance Type. * `instance_type` - EC2 Instance Type. +* `location` - Identifier for the location. ## Timeouts @@ -62,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_instance_type_offerings.html.markdown b/website/docs/cdktf/python/d/ec2_instance_type_offerings.html.markdown index 448552b802f2..0a2f3f0b7a12 100644 --- a/website/docs/cdktf/python/d/ec2_instance_type_offerings.html.markdown +++ b/website/docs/cdktf/python/d/ec2_instance_type_offerings.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypeOfferings.html) for supported filters. Detailed below. * `location_type` - (Optional) Location type. Defaults to `region`. Valid values: `availability-zone`, `availability-zone-id`, and `region`. @@ -68,4 +69,4 @@ Note that the indexes of Instance Type Offering instance types, locations and lo - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_instance_types.html.markdown b/website/docs/cdktf/python/d/ec2_instance_types.html.markdown index edb324c158f0..8d17ba582d1e 100644 --- a/website/docs/cdktf/python/d/ec2_instance_types.html.markdown +++ b/website/docs/cdktf/python/d/ec2_instance_types.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypes.html) for supported filters. Detailed below. ### filter Argument Reference @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateway.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateway.html.markdown index 3e8ffeb6dfb5..1ad490e4c226 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateway.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateway.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `id` - (Optional) Id of the specific Local Gateway to retrieve. * `state` - (Optional) Current state of the desired Local Gateway. @@ -79,4 +80,4 @@ The following attributes are additionally exported: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateway_route_table.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateway_route_table.html.markdown index 177c8a5ce023..fe9e72a72eba 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateway_route_table.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateway_route_table.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `local_gateway_route_table_id` - (Optional) Local Gateway Route Table Id assigned to desired local gateway route table * `local_gateway_id` - (Optional) ID of the specific local gateway route table to retrieve. * `outpost_arn` - (Optional) ARN of the Outpost the local gateway route table is associated with. @@ -73,4 +74,4 @@ This data source exports no additional attributes. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateway_route_tables.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateway_route_tables.html.markdown index 94bde1d043e5..6aa1092b7a2a 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateway_route_tables.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateway_route_tables.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired local gateway route table. * `filter` - (Optional) Custom filter block as described below. @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface.html.markdown index 3f756b150d16..ed3b960b89c1 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLocalGatewayVirtualInterfaces.html) for supported filters. Detailed below. * `id` - (Optional) Identifier of EC2 Local Gateway Virtual Interface. * `tags` - (Optional) Key-value map of resource tags, each pair of which must exactly match a pair on the desired local gateway route table. @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_group.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_group.html.markdown index 7567f1faa1ed..d3dc17de1012 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_group.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_group.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLocalGatewayVirtualInterfaceGroups.html) for supported filters. Detailed below. * `id` - (Optional) Identifier of EC2 Local Gateway Virtual Interface Group. * `local_gateway_id` - (Optional) Identifier of EC2 Local Gateway. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_groups.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_groups.html.markdown index 530a7e604957..bcdfdac2934c 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_groups.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateway_virtual_interface_groups.html.markdown @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLocalGatewayVirtualInterfaceGroups.html) for supported filters. Detailed below. * `tags` - (Optional) Key-value map of resource tags, each pair of which must exactly match a pair on the desired local gateway route table. @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_local_gateways.html.markdown b/website/docs/cdktf/python/d/ec2_local_gateways.html.markdown index 9664639b8ab2..9fe859af9061 100644 --- a/website/docs/cdktf/python/d/ec2_local_gateways.html.markdown +++ b/website/docs/cdktf/python/d/ec2_local_gateways.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired local_gateways. * `filter` - (Optional) Custom filter block as described below. @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_managed_prefix_list.html.markdown b/website/docs/cdktf/python/d/ec2_managed_prefix_list.html.markdown index aeb57e5256e0..2f877f3f411a 100644 --- a/website/docs/cdktf/python/d/ec2_managed_prefix_list.html.markdown +++ b/website/docs/cdktf/python/d/ec2_managed_prefix_list.html.markdown @@ -32,7 +32,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) current = DataAwsRegion(self, "current") DataAwsEc2ManagedPrefixList(self, "example", - name="com.amazonaws.${" + current.name + "}.dynamodb" + name="com.amazonaws.${" + current.region + "}.dynamodb" ) ``` @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the prefix list to select. * `name` - (Optional) Name of the prefix list to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -97,4 +98,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_managed_prefix_lists.html.markdown b/website/docs/cdktf/python/d/ec2_managed_prefix_lists.html.markdown index 71f26a213a8a..e10274ce9a7d 100644 --- a/website/docs/cdktf/python/d/ec2_managed_prefix_lists.html.markdown +++ b/website/docs/cdktf/python/d/ec2_managed_prefix_lists.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired . @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_network_insights_analysis.html.markdown b/website/docs/cdktf/python/d/ec2_network_insights_analysis.html.markdown index 0a10774fba4a..5ab8e87dc573 100644 --- a/website/docs/cdktf/python/d/ec2_network_insights_analysis.html.markdown +++ b/website/docs/cdktf/python/d/ec2_network_insights_analysis.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `network_insights_analysis_id` - (Optional) ID of the Network Insights Analysis to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `status_message` - Message to provide more context when the `status` is `failed`. * `warning_message` - Warning message. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_network_insights_path.html.markdown b/website/docs/cdktf/python/d/ec2_network_insights_path.html.markdown index d83ee592bc3c..e93d27373f25 100644 --- a/website/docs/cdktf/python/d/ec2_network_insights_path.html.markdown +++ b/website/docs/cdktf/python/d/ec2_network_insights_path.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `network_insights_path_id` - (Optional) ID of the Network Insights Path to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `source_ip` - IP address of the AWS resource that is the source of the path. * `tags` - Map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_public_ipv4_pool.html.markdown b/website/docs/cdktf/python/d/ec2_public_ipv4_pool.html.markdown index 9955fbfc571f..463a1eea97c1 100644 --- a/website/docs/cdktf/python/d/ec2_public_ipv4_pool.html.markdown +++ b/website/docs/cdktf/python/d/ec2_public_ipv4_pool.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pool_id` - (Required) AWS resource IDs of a public IPv4 pool (as a string) for which this data source will fetch detailed information. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `total_address_count` - Total number of addresses in the pool. * `total_available_address_count` - Total number of available addresses in the pool. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_public_ipv4_pools.html.markdown b/website/docs/cdktf/python/d/ec2_public_ipv4_pools.html.markdown index 60be6e207ed2..36925dabf97e 100644 --- a/website/docs/cdktf/python/d/ec2_public_ipv4_pools.html.markdown +++ b/website/docs/cdktf/python/d/ec2_public_ipv4_pools.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired pools. @@ -73,4 +74,4 @@ This data source exports the following attributes in addition to the arguments a * `pool_ids` - List of all the pool IDs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_serial_console_access.html.markdown b/website/docs/cdktf/python/d/ec2_serial_console_access.html.markdown index 6a3149a6b512..eb162a0848e7 100644 --- a/website/docs/cdktf/python/d/ec2_serial_console_access.html.markdown +++ b/website/docs/cdktf/python/d/ec2_serial_console_access.html.markdown @@ -31,7 +31,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -46,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_spot_price.html.markdown b/website/docs/cdktf/python/d/ec2_spot_price.html.markdown index 89ef4f79ea72..5bef03fabe25 100644 --- a/website/docs/cdktf/python/d/ec2_spot_price.html.markdown +++ b/website/docs/cdktf/python/d/ec2_spot_price.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_type` - (Optional) Type of instance for which to query Spot Price information. * `availability_zone` - (Optional) Availability zone in which to query Spot price information. * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotPriceHistory.html) for supported filters. Detailed below. @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway.html.markdown index 48178a22df76..5195ce00d3af 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway. @@ -95,4 +96,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_attachment.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_attachment.html.markdown index 5a9e128467b1..ca7737f62109 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_attachment.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_attachment.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transit_gateway_attachment_id` - (Optional) ID of the attachment. @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `transit_gateway_id` - ID of the transit gateway. * `transit_gateway_owner_id` - The ID of the AWS account that owns the transit gateway. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_attachments.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_attachments.html.markdown index 1014453f5a6f..069df2885063 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_attachments.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_attachments.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. ### filter Argument Reference @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_connect.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_connect.html.markdown index cf9e568ac543..0bc551a41c11 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_connect.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_connect.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transit_gateway_connect_id` - (Optional) Identifier of the EC2 Transit Gateway Connect. @@ -83,4 +84,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_connect_peer.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_connect_peer.html.markdown index 2c95ac61597b..4f9182054c04 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_connect_peer.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_connect_peer.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transit_gateway_connect_peer_id` - (Optional) Identifier of the EC2 Transit Gateway Connect Peer. @@ -88,4 +89,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown index 720af43238ee..c80088133022 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Optional) Identifier of the EC2 Transit Gateway. * `dx_gateway_id` - (Optional) Identifier of the Direct Connect Gateway. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_multicast_domain.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_multicast_domain.html.markdown index 2908d70de501..aec82a294168 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_multicast_domain.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_multicast_domain.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transit_gateway_multicast_domain_id` - (Optional) Identifier of the EC2 Transit Gateway Multicast Domain. @@ -100,4 +101,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachment.html.markdown index abfecd2ef391..ddee860cf08e 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachment.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway Peering Attachment. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match @@ -89,4 +90,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown index d3e6ad97a070..fc74b2c4967a 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. ### filter Argument Reference @@ -91,4 +92,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table.html.markdown index ae8c4d40df6b..1dc908d3d152 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway Route Table. @@ -88,4 +89,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_associations.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_associations.html.markdown index d1dc9a49aa42..023b4679d974 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_associations.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_associations.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. More complex filters can be expressed using one or more `filter` sub-blocks, @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of Transit Gateway Route Table Association identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_propagations.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_propagations.html.markdown index 9ef4c5a4bfe7..cd29a4720a8d 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_propagations.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_propagations.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_route_table_id` - (Required) Identifier of EC2 Transit Gateway Route Table. * `filter` - (Optional) Custom filter block as described below. @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of Transit Gateway Route Table Association identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_routes.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_routes.html.markdown index daee72259f27..ef9533e731df 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_routes.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_route_table_routes.html.markdown @@ -255,6 +255,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Required) Custom filter block as described below. * `transit_gateway_route_table_id` - (Required) Identifier of EC2 Transit Gateway Route Table. @@ -280,4 +281,4 @@ This data source exports the following attributes in addition to the arguments a * `transit_gateway_route_table_announcement_id` - The id of the transit gateway route table announcement, most of the time it is an empty string. * `type` - The type of the route, can be `propagated` or `static`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_route_tables.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_route_tables.html.markdown index 0d9a033c7c7d..af48d9190640 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_route_tables.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_route_tables.html.markdown @@ -40,8 +40,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. - * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired transit gateway route table. @@ -50,7 +50,6 @@ which take the following arguments: * `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayRouteTables.html). - * `values` - (Required) Set of values that are accepted for the given field. A Transit Gateway Route Table will be selected if any one of the given values matches. @@ -67,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachment.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachment.html.markdown index 015eb40f28ca..735d4497888b 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachment.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachment.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway VPC Attachment. @@ -90,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachments.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachments.html.markdown index 528ac2f1c814..d16430112a16 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachments.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_vpc_attachments.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. ### filter Argument Reference @@ -74,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_vpn_attachment.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_vpn_attachment.html.markdown index b7a6809e5e41..9d77f1adef18 100644 --- a/website/docs/cdktf/python/d/ec2_transit_gateway_vpn_attachment.html.markdown +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_vpn_attachment.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Optional) Identifier of the EC2 Transit Gateway. * `vpn_connection_id` - (Optional) Identifier of the EC2 VPN Connection. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -88,4 +89,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_authorization_token.html.markdown b/website/docs/cdktf/python/d/ecr_authorization_token.html.markdown index 5da56c713aef..81a80048b5e3 100644 --- a/website/docs/cdktf/python/d/ecr_authorization_token.html.markdown +++ b/website/docs/cdktf/python/d/ecr_authorization_token.html.markdown @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `registry_id` - (Optional) AWS account ID of the ECR Repository. If not specified the default account is assumed. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `proxy_endpoint` - Registry URL to use in the docker login command. * `user_name` - User name decoded from the authorization token. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_image.html.markdown b/website/docs/cdktf/python/d/ecr_image.html.markdown index 797a82a90d89..5274900df8c5 100644 --- a/website/docs/cdktf/python/d/ecr_image.html.markdown +++ b/website/docs/cdktf/python/d/ecr_image.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `registry_id` - (Optional) ID of the Registry where the repository resides. * `repository_name` - (Required) Name of the ECR Repository. * `image_digest` - (Optional) Sha256 digest of the image manifest. At least one of `image_digest`, `image_tag`, or `most_recent` must be specified. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `image_tags` - List of tags associated with this image. * `image_uri` - The URI for the specific image version specified by `image_tag` or `image_digest`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_images.html.markdown b/website/docs/cdktf/python/d/ecr_images.html.markdown new file mode 100644 index 000000000000..2060120d64e2 --- /dev/null +++ b/website/docs/cdktf/python/d/ecr_images.html.markdown @@ -0,0 +1,56 @@ +--- +subcategory: "ECR (Elastic Container Registry)" +layout: "aws" +page_title: "AWS: aws_ecr_images" +description: |- + Provides a list of images for a specified ECR Repository +--- + + + +# Data Source: aws_ecr_images + +The ECR Images data source allows the list of images in a specified repository to be retrieved. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_ecr_images import DataAwsEcrImages +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsEcrImages(self, "example", + repository_name="my-repository" + ) + TerraformOutput(self, "image_digests", + value="${[ for img in ${" + example.image_ids + "} : img.image_digest if img.image_digest != null]}" + ) + TerraformOutput(self, "image_tags", + value="${[ for img in ${" + example.image_ids + "} : img.image_tag if img.image_tag != null]}" + ) +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `registry_id` - (Optional) ID of the Registry where the repository resides. +* `repository_name` - (Required) Name of the ECR Repository. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `image_ids` - List of image objects containing image digest and tags. Each object has the following attributes: + * `image_digest` - The sha256 digest of the image manifest. + * `image_tag` - The tag associated with the image. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_pull_through_cache_rule.html.markdown b/website/docs/cdktf/python/d/ecr_pull_through_cache_rule.html.markdown index 40800a5803d4..159f970d671f 100644 --- a/website/docs/cdktf/python/d/ecr_pull_through_cache_rule.html.markdown +++ b/website/docs/cdktf/python/d/ecr_pull_through_cache_rule.html.markdown @@ -35,7 +35,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -- `ecr_repository_prefix` - (Required) The repository name prefix to use when caching images from the source registry. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `ecr_repository_prefix` - (Required) The repository name prefix to use when caching images from the source registry. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a - `upstream_registry_url` - The registry URL of the upstream registry to use as the source. - `upstream_repository_prefix` - The upstream repository prefix associated with the pull through cache rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_repositories.html.markdown b/website/docs/cdktf/python/d/ecr_repositories.html.markdown index cbad8f5d9f6f..fbe0f4f6f1bf 100644 --- a/website/docs/cdktf/python/d/ecr_repositories.html.markdown +++ b/website/docs/cdktf/python/d/ecr_repositories.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -42,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `names` - A list if AWS Elastic Container Registries for the region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_repository.html.markdown b/website/docs/cdktf/python/d/ecr_repository.html.markdown index 87c3da944fc0..48718261bc63 100644 --- a/website/docs/cdktf/python/d/ecr_repository.html.markdown +++ b/website/docs/cdktf/python/d/ecr_repository.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the ECR Repository. * `registry_id` - (Optional) Registry ID where the repository was created. @@ -46,6 +47,7 @@ This data source exports the following attributes in addition to the arguments a * `encryption_configuration` - Encryption configuration for the repository. See [Encryption Configuration](#encryption-configuration) below. * `image_scanning_configuration` - Configuration block that defines image scanning configuration for the repository. See [Image Scanning Configuration](#image-scanning-configuration) below. * `image_tag_mutability` - The tag mutability setting for the repository. +* `image_tag_mutability_exclusion_filter` - Block that defines filters to specify which image tags can override the default tag mutability setting. * `most_recent_image_tags` - List of image tags associated with the most recently pushed image in the repository. * `repository_url` - URL of the repository (in the form `aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName`). * `tags` - Map of tags assigned to the resource. @@ -55,8 +57,13 @@ This data source exports the following attributes in addition to the arguments a * `encryption_type` - Encryption type to use for the repository, either `AES256` or `KMS`. * `kms_key` - If `encryption_type` is `KMS`, the ARN of the KMS key used. +### Image Tag Mutability Exclusion Filter + +* `filter` - The filter pattern to use for excluding image tags from the mutability setting. +* `filter_type` - The type of filter to use. + ### Image Scanning Configuration * `scan_on_push` - Whether images are scanned after being pushed to the repository. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_repository_creation_template.html.markdown b/website/docs/cdktf/python/d/ecr_repository_creation_template.html.markdown index 3704d0314a58..de2126b4f1b8 100644 --- a/website/docs/cdktf/python/d/ecr_repository_creation_template.html.markdown +++ b/website/docs/cdktf/python/d/ecr_repository_creation_template.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prefix` - (Required) The repository name prefix that the template matches against. ## Attribute Reference @@ -46,6 +47,7 @@ This data source exports the following attributes in addition to the arguments a * `description` - The description for this template. * `encryption_configuration` - Encryption configuration for any created repositories. See [Encryption Configuration](#encryption-configuration) below. * `image_tag_mutability` - The tag mutability setting for any created repositories. +* `image_tag_mutability_exclusion_filter` - Block that defines filters to specify which image tags can override the default tag mutability setting. * `lifecycle_policy` - The lifecycle policy document to apply to any created repositories. * `registry_id` - The registry ID the repository creation template applies to. * `repository_policy` - The registry policy document to apply to any created repositories. @@ -56,4 +58,9 @@ This data source exports the following attributes in addition to the arguments a * `encryption_type` - Encryption type to use for any created repositories, either `AES256` or `KMS`. * `kms_key` - If `encryption_type` is `KMS`, the ARN of the KMS key used. - \ No newline at end of file +### Image Tag Mutability Exclusion Filter + +* `filter` - The filter pattern to use for excluding image tags from the mutability setting. +* `filter_type` - The type of filter to use. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecrpublic_authorization_token.html.markdown b/website/docs/cdktf/python/d/ecrpublic_authorization_token.html.markdown index 5b319c881e95..1b2efef1ea4c 100644 --- a/website/docs/cdktf/python/d/ecrpublic_authorization_token.html.markdown +++ b/website/docs/cdktf/python/d/ecrpublic_authorization_token.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `password` - Password decoded from the authorization token. * `user_name` - User name decoded from the authorization token. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecs_cluster.html.markdown b/website/docs/cdktf/python/d/ecs_cluster.html.markdown index 1d2bd71b2535..ecdcdccba62f 100644 --- a/website/docs/cdktf/python/d/ecs_cluster.html.markdown +++ b/website/docs/cdktf/python/d/ecs_cluster.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_name` - (Required) Name of the ECS Cluster ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `setting` - Settings associated with the ECS Cluster * `tags` - Key-value map of resource tags - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecs_clusters.html.markdown b/website/docs/cdktf/python/d/ecs_clusters.html.markdown index a1266cedb562..770d9806b6d0 100644 --- a/website/docs/cdktf/python/d/ecs_clusters.html.markdown +++ b/website/docs/cdktf/python/d/ecs_clusters.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -41,4 +43,4 @@ This data source exports the following attributes in addition to the arguments a * `cluster_arns` - List of ECS cluster ARNs associated with the account. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecs_container_definition.html.markdown b/website/docs/cdktf/python/d/ecs_container_definition.html.markdown index 43547e91c2a1..8f7ae3133c57 100644 --- a/website/docs/cdktf/python/d/ecs_container_definition.html.markdown +++ b/website/docs/cdktf/python/d/ecs_container_definition.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `task_definition` - (Required) ARN of the task definition which contains the container * `container_name` - (Required) Name of the container definition @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `disable_networking` - Indicator if networking is disabled * `docker_labels` - Set docker labels - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecs_service.html.markdown b/website/docs/cdktf/python/d/ecs_service.html.markdown index dd44856b6225..02dd2c656a42 100644 --- a/website/docs/cdktf/python/d/ecs_service.html.markdown +++ b/website/docs/cdktf/python/d/ecs_service.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_name` - (Required) Name of the ECS Service * `cluster_arn` - (Required) ARN of the ECS Cluster @@ -47,8 +48,28 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the ECS Service * `desired_count` - Number of tasks for the ECS Service * `launch_type` - Launch type for the ECS Service +* `load_balancer` - Load balancers for the ECS Service. See [`load_balancer` Block](#load_balancer-block) for details. * `scheduling_strategy` - Scheduling strategy for the ECS Service * `task_definition` - Family for the latest ACTIVE revision or full ARN of the task definition. * `tags` - Resource tags. - \ No newline at end of file +### `load_balancer` Block + +The `load_balancer` block exports the following attributes: + +* `advanced_configuration` - Settings for Blue/Green deployment. See [`advanced_configuration` Block](#advanced_configuration-block) for details. +* `container_name` - Name of the container to associate with the load balancer. +* `container_port` - Port on the container to associate with the load balancer. +* `elb_name` - Name of the load balancer. +* `target_group_arn` - ARN of the target group to associate with the load balancer. + +### `advanced_configuration` Block + +The `advanced_configuration` block exports the following attributes: + +* `alternate_target_group_arn` - ARN of the alternate target group to use for Blue/Green deployments. +* `production_listener_rule` - ARN of the listener rule that routes production traffic. +* `role_arn` - ARN of the IAM role that allows ECS to manage the target groups. +* `test_listener_rule` - ARN of the listener rule that routes test traffic. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecs_task_definition.html.markdown b/website/docs/cdktf/python/d/ecs_task_definition.html.markdown index a39fd5705fe6..302b6355ba7f 100644 --- a/website/docs/cdktf/python/d/ecs_task_definition.html.markdown +++ b/website/docs/cdktf/python/d/ecs_task_definition.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `task_definition` - (Required) Family for the latest ACTIVE revision, family and revision (family:revision) for a specific revision in the family, the ARN of the task definition to access to. ## Attribute Reference @@ -71,7 +72,6 @@ This data source exports the following attributes in addition to the arguments a * `execution_role_arn` - ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. * `family` - A unique name for your task definition. The following arguments are optional: -* `inference_accelerator` - Configuration block(s) with Inference Accelerators settings. [Detailed below.](#inference_accelerator) * `ipc_mode` - IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`. * `memory` - Amount (in MiB) of memory used by the task. If the `requires_compatibilities` is `FARGATE` this field is required. * `network_mode` - Docker networking mode to use for the containers in the task. Valid values are `none`, `bridge`, `awsvpc`, and `host`. @@ -89,11 +89,6 @@ The following arguments are optional: * `size_in_gib` - The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB. -### inference_accelerator - -* `device_name` - Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. -* `device_type` - Elastic Inference accelerator type to use. - ### placement_constraints * `expression` - Cluster Query Language expression to apply to the constraint. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html). @@ -158,4 +153,4 @@ For more information, see [Specifying an FSX Windows File Server volume in your * `credentials_parameter` - The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials. * `domain` - A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecs_task_execution.html.markdown b/website/docs/cdktf/python/d/ecs_task_execution.html.markdown index a16a126a95f8..4eb1030a9456 100644 --- a/website/docs/cdktf/python/d/ecs_task_execution.html.markdown +++ b/website/docs/cdktf/python/d/ecs_task_execution.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_provider_strategy` - (Optional) Set of capacity provider strategies to use for the cluster. See below. * `client_token` - (Optional) An identifier that you provide to ensure the idempotency of the request. It must be unique and is case sensitive. Up to 64 characters are allowed. The valid characters are characters in the range of 33-126, inclusive. For more information, see [Ensuring idempotency](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/ECS_Idempotency.html). * `desired_count` - (Optional) Number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks for each call. @@ -88,7 +89,6 @@ For more information, see the [Task Networking](https://docs.aws.amazon.com/Amaz * `container_overrides` - (Optional) One or more container overrides that are sent to a task. See below. * `cpu` - (Optional) The CPU override for the task. * `execution_role_arn` - (Optional) Amazon Resource Name (ARN) of the task execution role override for the task. -* `inference_accelerator_overrides` - (Optional) **DEPRECATED** Elastic Inference accelerator override for the task. See below. * `memory` - (Optional) The memory override for the task. * `task_role_arn` - (Optional) Amazon Resource Name (ARN) of the role that containers in this task can assume. @@ -109,13 +109,8 @@ For more information, see the [Task Networking](https://docs.aws.amazon.com/Amaz ### resource_requirements -* `type` - (Required) The type of resource to assign to a container. Valid values are `GPU` or `InferenceAccelerator`. -* `value` - (Required) The value for the specified resource type. If the `GPU` type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. If the `InferenceAccelerator` type is used, the value matches the `deviceName` for an InferenceAccelerator specified in a task definition. - -### inference_accelerator_overrides - -* `device_name` - (Optional) The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition. -* `device_type` - (Optional) The Elastic Inference accelerator type to use. +* `type` - (Required) The type of resource to assign to a container. Valid values are `GPU`. +* `value` - (Required) The value for the specified resource type. If the `GPU` type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. ### placement_constraints @@ -136,4 +131,4 @@ This data source exports the following attributes in addition to the arguments a * `task_arns` - A list of the provisioned task ARNs. * `id` - The unique identifier, which is a comma-delimited string joining the `cluster` and `task_definition` attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/efs_access_point.html.markdown b/website/docs/cdktf/python/d/efs_access_point.html.markdown index 92dff5dc0876..f3efcd2c1ef3 100644 --- a/website/docs/cdktf/python/d/efs_access_point.html.markdown +++ b/website/docs/cdktf/python/d/efs_access_point.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_point_id` - (Required) ID that identifies the file system. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `path` - Path exposed as the root directory * `tags` - Key-value mapping of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/efs_access_points.html.markdown b/website/docs/cdktf/python/d/efs_access_points.html.markdown index a42b558cf217..2e7a5d7d4f0e 100644 --- a/website/docs/cdktf/python/d/efs_access_points.html.markdown +++ b/website/docs/cdktf/python/d/efs_access_points.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `file_system_id` - (Required) EFS File System identifier. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - EFS File System identifier. * `ids` - Set of identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/efs_file_system.html.markdown b/website/docs/cdktf/python/d/efs_file_system.html.markdown index 30fd124e15d0..941a4de770be 100644 --- a/website/docs/cdktf/python/d/efs_file_system.html.markdown +++ b/website/docs/cdktf/python/d/efs_file_system.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `file_system_id` - (Optional) ID that identifies the file system (e.g., fs-ccfc0d65). * `creation_token` - (Optional) Restricts the list to the file system with this creation token. * `tags` - (Optional) Restricts the list to the file system with these tags. @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a * `throughput_mode` - Throughput mode for the file system. * `size_in_bytes` - Current byte count used by the file system. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/efs_mount_target.html.markdown b/website/docs/cdktf/python/d/efs_mount_target.html.markdown index f06f0983c339..e94c19b06906 100644 --- a/website/docs/cdktf/python/d/efs_mount_target.html.markdown +++ b/website/docs/cdktf/python/d/efs_mount_target.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_point_id` - (Optional) ID or ARN of the access point whose mount target that you want to find. It must be included if a `file_system_id` and `mount_target_id` are not included. * `file_system_id` - (Optional) ID or ARN of the file system whose mount target that you want to find. It must be included if an `access_point_id` and `mount_target_id` are not included. * `mount_target_id` - (Optional) ID or ARN of the mount target that you want to find. It must be included in your request if an `access_point_id` and `file_system_id` are not included. @@ -52,6 +53,8 @@ This data source exports the following attributes in addition to the arguments a * `file_system_arn` - Amazon Resource Name of the file system for which the mount target is intended. * `subnet_id` - ID of the mount target's subnet. * `ip_address` - Address at which the file system may be mounted via the mount target. +* `ip_address_type` - IP address type for the mount target. +* `ipv6_address` - IPv6 address at which the file system may be mounted via the mount target. * `security_groups` - List of VPC security group IDs attached to the mount target. * `dns_name` - DNS name for the EFS file system. * `mount_target_dns_name` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html). @@ -60,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `availability_zone_id` - The unique and consistent identifier of the Availability Zone (AZ) that the mount target resides in. * `owner_id` - AWS account ID that owns the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eip.html.markdown b/website/docs/cdktf/python/d/eip.html.markdown index f685371ba0d7..cecc306bb6c4 100644 --- a/website/docs/cdktf/python/d/eip.html.markdown +++ b/website/docs/cdktf/python/d/eip.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAddresses.html). * `id` - (Optional) Allocation ID of the specific VPC EIP to retrieve. If a classic EIP is required, do NOT set `id`, only set `public_ip` * `public_ip` - (Optional) Public IP of the specific EIP to retrieve. @@ -139,4 +140,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eips.html.markdown b/website/docs/cdktf/python/d/eips.html.markdown index dadb23ae8941..87195e803c95 100644 --- a/website/docs/cdktf/python/d/eips.html.markdown +++ b/website/docs/cdktf/python/d/eips.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Elastic IPs. @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_access_entry.html.markdown b/website/docs/cdktf/python/d/eks_access_entry.html.markdown index 253a5ff33479..199eddb8bdf1 100644 --- a/website/docs/cdktf/python/d/eks_access_entry.html.markdown +++ b/website/docs/cdktf/python/d/eks_access_entry.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `cluster_name` – (Required) Name of the EKS Cluster. -* `principal_arn` – (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `cluster_name` - (Required) Name of the EKS Cluster. +* `principal_arn` - (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. ## Attribute Reference @@ -48,10 +49,10 @@ This data source exports the following attributes in addition to the arguments a * `access_entry_arn` - Amazon Resource Name (ARN) of the Access Entry. * `created_at` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was created. -* `kubernetes_groups` – List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. +* `kubernetes_groups` - List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. * `modified_at` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was updated. * `user_name` - Defaults to principal ARN if user is principal else defaults to assume-role/session-name is role is used. * `type` - Defaults to STANDARD which provides the standard workflow. EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX types disallow users to input a username or groups, and prevent associations. * `tags_all` - (Optional) Key-value map of resource tags, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_addon.html.markdown b/website/docs/cdktf/python/d/eks_addon.html.markdown index 206f9c874fac..dfcb942e6371 100644 --- a/website/docs/cdktf/python/d/eks_addon.html.markdown +++ b/website/docs/cdktf/python/d/eks_addon.html.markdown @@ -39,9 +39,10 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `addon_name` – (Required) Name of the EKS add-on. The name must match one of +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addon_name` - (Required) Name of the EKS add-on. The name must match one of the names returned by [list-addon](https://docs.aws.amazon.com/cli/latest/reference/eks/list-addons.html). -* `cluster_name` – (Required) Name of the EKS Cluster. +* `cluster_name` - (Required) Name of the EKS Cluster. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `created_at` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was created. * `modified_at` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was updated. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_addon_version.html.markdown b/website/docs/cdktf/python/d/eks_addon_version.html.markdown index 2776b6635c0a..9632511aaeba 100644 --- a/website/docs/cdktf/python/d/eks_addon_version.html.markdown +++ b/website/docs/cdktf/python/d/eks_addon_version.html.markdown @@ -57,9 +57,10 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `addon_name` – (Required) Name of the EKS add-on. The name must match one of +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addon_name` - (Required) Name of the EKS add-on. The name must match one of the names returned by [list-addon](https://docs.aws.amazon.com/cli/latest/reference/eks/list-addons.html). -* `kubernetes_version` – (Required) Version of the EKS Cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]+$`). +* `kubernetes_version` - (Required) Version of the EKS Cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]+$`). * `most_recent` - (Optional) Determines if the most recent or default version of the addon should be returned. ## Attribute Reference @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Name of the add-on * `version` - Version of the EKS add-on. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_cluster.html.markdown b/website/docs/cdktf/python/d/eks_cluster.html.markdown index cb75f5413a0d..5f70869a9040 100644 --- a/website/docs/cdktf/python/d/eks_cluster.html.markdown +++ b/website/docs/cdktf/python/d/eks_cluster.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster. ## Attribute Reference @@ -60,6 +61,7 @@ This data source exports the following attributes in addition to the arguments a * `data` - The base64 encoded certificate data required to communicate with your cluster. Add this to the `certificate-authority-data` section of the `kubeconfig` file for your cluster. * `cluster_id` - The ID of your local Amazon EKS cluster on the AWS Outpost. This attribute isn't available for an AWS EKS cluster on AWS cloud. * `created_at` - Unix epoch time stamp in seconds for when the cluster was created. +* `deletion_protection` - Whether deletion protection for the cluster is enabled. * `enabled_cluster_log_types` - The enabled control plane logs. * `endpoint` - Endpoint for your Kubernetes API server. * `identity` - Nested attribute containing identity provider information for your cluster. Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. For an example using this information to enable IAM Roles for Service Accounts, see the [`aws_eks_cluster` resource documentation](/docs/providers/aws/r/eks_cluster.html). @@ -96,10 +98,10 @@ This data source exports the following attributes in addition to the arguments a * `endpoint_private_access` - Indicates whether or not the Amazon EKS private API server endpoint is enabled. * `endpoint_public_access` - Indicates whether or not the Amazon EKS public API server endpoint is enabled. * `public_access_cidrs` - List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint. - * `security_group_ids` – List of security group IDs - * `subnet_ids` – List of subnet IDs - * `vpc_id` – The VPC associated with your cluster. + * `security_group_ids` - List of security group IDs + * `subnet_ids` - List of subnet IDs + * `vpc_id` - The VPC associated with your cluster. * `zonal_shift_config` - Contains Zonal Shift Configuration. * `enabled` - Whether zonal shift is enabled. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_cluster_auth.html.markdown b/website/docs/cdktf/python/d/eks_cluster_auth.html.markdown index f09952137048..3faa6859754b 100644 --- a/website/docs/cdktf/python/d/eks_cluster_auth.html.markdown +++ b/website/docs/cdktf/python/d/eks_cluster_auth.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Name of the cluster. * `token` - Token to use to authenticate with the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_cluster_versions.html.markdown b/website/docs/cdktf/python/d/eks_cluster_versions.html.markdown index 77f102dafe13..4e34ce4c7a40 100644 --- a/website/docs/cdktf/python/d/eks_cluster_versions.html.markdown +++ b/website/docs/cdktf/python/d/eks_cluster_versions.html.markdown @@ -19,7 +19,7 @@ Terraform data source for managing AWS EKS (Elastic Kubernetes) Cluster Versions ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import TerraformOutput, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -28,7 +28,16 @@ from imports.aws.data_aws_eks_cluster_versions import DataAwsEksClusterVersions class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DataAwsEksClusterVersions(self, "example") + example = DataAwsEksClusterVersions(self, "example") + TerraformOutput(self, "eks_cluster_version_filtered", + value="${[ for version in ${" + example.cluster_versions + "} : version if version.cluster_version == \"1.33\"]}" + ) + TerraformOutput(self, "eks_cluster_version_list", + value="${[ for version in ${" + example.cluster_versions + "} : version.cluster_version]}" + ) + TerraformOutput(self, "eks_cluster_versions", + value=example.cluster_versions + ) ``` ### Filter by Cluster Type @@ -73,9 +82,9 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_type` - (Optional) Type of clusters to filter by. Currently, the only valid value is `eks`. -* `cluster_versions` - (Optional) A list of Kubernetes versions that you can use to check if EKS supports it. * `default_only` - (Optional) Whether to show only the default versions of Kubernetes supported by EKS. * `include_all` - (Optional) Whether to include all kubernetes versions in the response. * `version_status` - (Optional) Status of the EKS cluster versions to list. @@ -85,14 +94,15 @@ Valid values are `STANDARD_SUPPORT` or `UNSUPPORTED` or `EXTENDED_SUPPORT`. This data source exports the following attributes in addition to the arguments above: -* `cluster_type` - Type of cluster that the version belongs to. -* `cluster_version` - Kubernetes version supported by EKS. -* `default_platform_version` - Default eks platform version for the cluster version. -* `default_version` - Default Kubernetes version for the cluster version. -* `end_of_extended_support_date` - End of extended support date for the cluster version. -* `end_of_standard_support_date` - End of standard support date for the cluster version. -* `kubernetes_patch_version` - Kubernetes patch version for the cluster version. -* `release_date` - Release date of the cluster version. -* `version_status` - Status of the EKS cluster version. - - \ No newline at end of file +* `cluster_versions` - A list of Kubernetes version information. + * `cluster_type` - Type of cluster that the version belongs to. + * `cluster_version` - Kubernetes version supported by EKS. + * `default_platform_version` - Default eks platform version for the cluster version. + * `default_version` - Default Kubernetes version for the cluster version. + * `end_of_extended_support_date` - End of extended support date for the cluster version. + * `end_of_standard_support_date` - End of standard support date for the cluster version. + * `kubernetes_patch_version` - Kubernetes patch version for the cluster version. + * `release_date` - Release date of the cluster version. + * `version_status` - Status of the EKS cluster version. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_clusters.html.markdown b/website/docs/cdktf/python/d/eks_clusters.html.markdown index 6e0462a4fd6b..bb1a8671a057 100644 --- a/website/docs/cdktf/python/d/eks_clusters.html.markdown +++ b/website/docs/cdktf/python/d/eks_clusters.html.markdown @@ -44,7 +44,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -53,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `names` - Set of EKS clusters names - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_node_group.html.markdown b/website/docs/cdktf/python/d/eks_node_group.html.markdown index 6fcacf087a38..87eed32d5f5e 100644 --- a/website/docs/cdktf/python/d/eks_node_group.html.markdown +++ b/website/docs/cdktf/python/d/eks_node_group.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_name` - (Required) Name of the cluster. * `node_group_name` - (Required) Name of the node group. @@ -54,8 +55,8 @@ This data source exports the following attributes in addition to the arguments a * `id` - The ID of the launch template. * `name` - The name of the launch template. * `version` - The version number of the launch template. -* `node_role_arn` – ARN of the IAM Role that provides permissions for the EKS Node Group. -* `release_version` – AMI version of the EKS Node Group. +* `node_role_arn` - ARN of the IAM Role that provides permissions for the EKS Node Group. +* `release_version` - AMI version of the EKS Node Group. * `remote_access` - Configuration block with remote access settings. * `ec2_ssh_key` - EC2 Key Pair name that provides access for SSH communication with the worker nodes in the EKS Node Group. * `source_security_group_ids` - Set of EC2 Security Group IDs to allow SSH access (port 22) from on the worker nodes. @@ -68,12 +69,12 @@ This data source exports the following attributes in addition to the arguments a * `max_size` - Maximum number of worker nodes. * `min_size` - Minimum number of worker nodes. * `status` - Status of the EKS Node Group. -* `subnet_ids` – Identifiers of EC2 Subnets to associate with the EKS Node Group. +* `subnet_ids` - Identifiers of EC2 Subnets to associate with the EKS Node Group. * `taints` - List of objects containing information about taints applied to the nodes in the EKS Node Group. * `key` - The key of the taint. * `value` - The value of the taint. * `effect` - The effect of the taint. * `tags` - Key-value map of resource tags. -* `version` – Kubernetes version. +* `version` - Kubernetes version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/eks_node_groups.html.markdown b/website/docs/cdktf/python/d/eks_node_groups.html.markdown index 7b431c2e364a..20f23e8963cf 100644 --- a/website/docs/cdktf/python/d/eks_node_groups.html.markdown +++ b/website/docs/cdktf/python/d/eks_node_groups.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_name` - (Required) Name of the cluster. ## Attribute Reference @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Cluster name. * `names` - Set of all node group names in an EKS Cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elastic_beanstalk_application.html.markdown b/website/docs/cdktf/python/d/elastic_beanstalk_application.html.markdown index ad28d70a7c3e..a8444f792aae 100644 --- a/website/docs/cdktf/python/d/elastic_beanstalk_application.html.markdown +++ b/website/docs/cdktf/python/d/elastic_beanstalk_application.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the application ## Attribute Reference @@ -58,4 +59,4 @@ Application version lifecycle (`appversion_lifecycle`) supports the nested attri * `max_age_in_days` - Number of days to retain an application version. * `delete_source_from_s3` - Specifies whether delete a version's source bundle from S3 when the application version is deleted. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elastic_beanstalk_hosted_zone.html.markdown b/website/docs/cdktf/python/d/elastic_beanstalk_hosted_zone.html.markdown index 0c93b3e0af69..c2f1760a41d4 100644 --- a/website/docs/cdktf/python/d/elastic_beanstalk_hosted_zone.html.markdown +++ b/website/docs/cdktf/python/d/elastic_beanstalk_hosted_zone.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Region you'd like the zone for. By default, fetches the current region. +* `region` - (Optional) Name of the Region whose hosted zone is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -41,6 +41,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the hosted zone. -* `region` - Region of the hosted zone. - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elastic_beanstalk_solution_stack.html.markdown b/website/docs/cdktf/python/d/elastic_beanstalk_solution_stack.html.markdown index f7ff8686515a..11081a620a5f 100644 --- a/website/docs/cdktf/python/d/elastic_beanstalk_solution_stack.html.markdown +++ b/website/docs/cdktf/python/d/elastic_beanstalk_solution_stack.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `most_recent` - (Optional) If more than one result is returned, use the most recent solution stack. * `name_regex` - Regex string to apply to the solution stack list returned @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a [beanstalk-platforms]: http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html "AWS Elastic Beanstalk Supported Platforms documentation" - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticache_cluster.html.markdown b/website/docs/cdktf/python/d/elasticache_cluster.html.markdown index 095677c95c3d..d644b8d4b493 100644 --- a/website/docs/cdktf/python/d/elasticache_cluster.html.markdown +++ b/website/docs/cdktf/python/d/elasticache_cluster.html.markdown @@ -35,33 +35,34 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `cluster_id` – (Required) Group identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `cluster_id` - (Required) Group identifier. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `node_type` – The cluster node type. -* `num_cache_nodes` – The number of cache nodes that the cache cluster has. -* `engine` – Name of the cache engine. -* `engine_version` – Version number of the cache engine. +* `node_type` - The cluster node type. +* `num_cache_nodes` - The number of cache nodes that the cache cluster has. +* `engine` - Name of the cache engine. +* `engine_version` - Version number of the cache engine. * `ip_discovery` - The IP version advertised in the discovery protocol. * `network_type` - The IP versions for cache cluster connections. -* `subnet_group_name` – Name of the subnet group associated to the cache cluster. -* `security_group_ids` – List VPC security groups associated with the cache cluster. -* `parameter_group_name` – Name of the parameter group associated with this cache cluster. +* `subnet_group_name` - Name of the subnet group associated to the cache cluster. +* `security_group_ids` - List VPC security groups associated with the cache cluster. +* `parameter_group_name` - Name of the parameter group associated with this cache cluster. * `replication_group_id` - The replication group to which this cache cluster belongs. * `log_delivery_configuration` - Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log) delivery settings. -* `maintenance_window` – Specifies the weekly time range for when maintenance +* `maintenance_window` - Specifies the weekly time range for when maintenance on the cache cluster is performed. * `snapshot_window` - Daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of the cache cluster. * `snapshot_retention_limit` - The number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. * `availability_zone` - Availability Zone for the cache cluster. -* `notification_topic_arn` – An ARN of an +* `notification_topic_arn` - An ARN of an SNS topic that ElastiCache notifications get sent to. -* `port` – The port number on which each of the cache nodes will +* `port` - The port number on which each of the cache nodes will accept connections. * `configuration_endpoint` - (Memcached only) Configuration endpoint to allow host discovery. * `cluster_address` - (Memcached only) DNS name of the cache cluster without the port appended. @@ -70,4 +71,4 @@ accept connections. Referenceable e.g., as `${data.aws_elasticache_cluster.bar.cache_nodes.0.address}` * `tags` - Tags assigned to the resource - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticache_replication_group.html.markdown b/website/docs/cdktf/python/d/elasticache_replication_group.html.markdown index 3fd1b560e6a3..36da1c79a9ef 100644 --- a/website/docs/cdktf/python/d/elasticache_replication_group.html.markdown +++ b/website/docs/cdktf/python/d/elasticache_replication_group.html.markdown @@ -35,7 +35,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `replication_group_id` – (Required) Identifier for the replication group. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `replication_group_id` - (Required) Identifier for the replication group. ## Attribute Reference @@ -46,8 +47,8 @@ This data source exports the following attributes in addition to the arguments a * `auth_token_enabled` - Whether an AuthToken (password) is enabled. * `automatic_failover_enabled` - A flag whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. * `cluster_mode` - Whether cluster mode is enabled or disabled. -* `node_type` – The cluster node type. -* `num_cache_clusters` – The number of cache clusters that the replication group has. +* `node_type` - The cluster node type. +* `num_cache_clusters` - The number of cache clusters that the replication group has. * `num_node_groups` - Number of node groups (shards) for the replication group. * `member_clusters` - Identifiers of all the nodes that are part of this replication group. * `multi_az_enabled` - Whether Multi-AZ Support is enabled for the replication group. @@ -55,9 +56,9 @@ This data source exports the following attributes in addition to the arguments a * `log_delivery_configuration` - Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log) delivery settings. * `snapshot_window` - Daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). * `snapshot_retention_limit` - The number of days for which ElastiCache retains automatic cache cluster snapshots before deleting them. -* `port` – The port number on which the configuration endpoint will accept connections. +* `port` - The port number on which the configuration endpoint will accept connections. * `configuration_endpoint_address` - The configuration endpoint address to allow host discovery. * `primary_endpoint_address` - The endpoint of the primary node in this node group (shard). * `reader_endpoint_address` - The endpoint of the reader node in this node group (shard). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticache_reserved_cache_node_offering.html.markdown b/website/docs/cdktf/python/d/elasticache_reserved_cache_node_offering.html.markdown index cff4bcef4ba1..8206781700f1 100644 --- a/website/docs/cdktf/python/d/elasticache_reserved_cache_node_offering.html.markdown +++ b/website/docs/cdktf/python/d/elasticache_reserved_cache_node_offering.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cache_node_type` - (Required) Node type for the reserved cache node. See AWS documentation for information on [supported node types for Redis](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types for Redis](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). See AWS documentation for information on [supported node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/nodes-select-size.html). @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `fixed_price` - Fixed price charged for this reserved cache node. * `offering_id` - Unique identifier for the reservation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticache_serverless_cache.html.markdown b/website/docs/cdktf/python/d/elasticache_serverless_cache.html.markdown index dd37f479cf86..ab9db064a96e 100644 --- a/website/docs/cdktf/python/d/elasticache_serverless_cache.html.markdown +++ b/website/docs/cdktf/python/d/elasticache_serverless_cache.html.markdown @@ -35,7 +35,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `name` – (Required) Identifier for the serverless cache. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Identifier for the serverless cache. ## Attribute Reference @@ -47,7 +48,7 @@ This data source exports the following attributes in addition to the arguments a * `daily_snapshot_time` - The daily time that snapshots will be created from the new serverless cache. Only available for engine types `"redis"` and `"valkey"`. * `description` - Description of the serverless cache. * `endpoint` - Represents the information required for client programs to connect to the cache. See [`endpoint` Block](#endpoint-block) for details. -* `engine` – Name of the cache engine. +* `engine` - Name of the cache engine. * `full_engine_version` - The name and version number of the engine the serverless cache is compatible with. * `kms_key_id` - ARN of the customer managed key for encrypting the data at rest. * `major_engine_version` - The version number of the engine the serverless cache is compatible with. @@ -55,7 +56,7 @@ This data source exports the following attributes in addition to the arguments a * `security_group_ids` - A list of the one or more VPC security groups associated with the serverless cache. * `snapshot_retention_limit` - The number of snapshots that will be retained for the serverless cache. Available for Redis only. * `status` - The current status of the serverless cache. -* `subnet_ids` – A list of the identifiers of the subnets where the VPC endpoint for the serverless cache are deployed. +* `subnet_ids` - A list of the identifiers of the subnets where the VPC endpoint for the serverless cache are deployed. * `user_group_id` - The identifier of the UserGroup associated with the serverless cache. Available for Redis only. ### `cache_usage_limits` Block @@ -94,4 +95,4 @@ The `reader_endpoint` block exports the following attributes: * `address` - The DNS hostname of the cache node. * `port` - The port number that the cache engine is listening on. Set as integer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticache_subnet_group.html.markdown b/website/docs/cdktf/python/d/elasticache_subnet_group.html.markdown index 841f49777848..fcef7594807a 100644 --- a/website/docs/cdktf/python/d/elasticache_subnet_group.html.markdown +++ b/website/docs/cdktf/python/d/elasticache_subnet_group.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_elasticache_subnet_group +# Data Source: aws_elasticache_subnet_group Provides information about a ElastiCache Subnet Group. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the subnet group. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the subnet group. * `vpc_id` - The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticache_user.html.markdown b/website/docs/cdktf/python/d/elasticache_user.html.markdown index 2657b0f8a120..2da36e3094c9 100644 --- a/website/docs/cdktf/python/d/elasticache_user.html.markdown +++ b/website/docs/cdktf/python/d/elasticache_user.html.markdown @@ -35,7 +35,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `user_id` – (Required) Identifier for the user. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `user_id` - (Required) Identifier for the user. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `user_name` - User name of the user. * `access_string` - String for what access a user possesses within the associated ElastiCache replication groups or clusters. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elasticsearch_domain.html.markdown b/website/docs/cdktf/python/d/elasticsearch_domain.html.markdown index 6b2d0f878051..0a29be89d04b 100644 --- a/website/docs/cdktf/python/d/elasticsearch_domain.html.markdown +++ b/website/docs/cdktf/python/d/elasticsearch_domain.html.markdown @@ -35,18 +35,19 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `domain_name` – (Required) Name of the domain. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `domain_name` - (Required) Name of the domain. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `access_policies` – The policy document attached to the domain. +* `access_policies` - The policy document attached to the domain. * `advanced_options` - Key-value string pairs to specify advanced configuration options. * `advanced_security_options` - Status of the Elasticsearch domain's advanced security options. The block consists of the following attributes: * `enabled` - Whether advanced security is enabled. * `internal_user_database_enabled` - Whether the internal user database is enabled. -* `arn` – The ARN of the domain. +* `arn` - The ARN of the domain. * `auto_tune_options` - Configuration of the Auto-Tune options of the domain. * `desired_state` - The Auto-Tune desired state for the domain. * `maintenance_schedule` - A list of the nested configurations for the Auto-Tune maintenance windows of the domain. @@ -75,20 +76,20 @@ This data source exports the following attributes in addition to the arguments a * `user_pool_id` - The Cognito User pool used by the domain. * `identity_pool_id` - The Cognito Identity pool used by the domain. * `role_arn` - The IAM Role with the AmazonESCognitoAccess policy attached. -* `created` – Status of the creation of the domain. -* `deleted` – Status of the deletion of the domain. -* `domain_id` – Unique identifier for the domain. +* `created` - Status of the creation of the domain. +* `deleted` - Status of the deletion of the domain. +* `domain_id` - Unique identifier for the domain. * `ebs_options` - EBS Options for the instances in the domain. * `ebs_enabled` - Whether EBS volumes are attached to data nodes in the domain. * `throughput` - The throughput (in MiB/s) of the EBS volumes attached to data nodes. * `volume_type` - The type of EBS volumes attached to data nodes. * `volume_size` - The size of EBS volumes attached to data nodes (in GB). * `iops` - The baseline input/output (I/O) performance of EBS volumes attached to data nodes. -* `elasticsearch_version` – Elasticsearch version for the domain. +* `elasticsearch_version` - Elasticsearch version for the domain. * `encryption_at_rest` - Domain encryption at rest related options. * `enabled` - Whether encryption at rest is enabled in the domain. * `kms_key_id` - The KMS key id used to encrypt data at rest. -* `endpoint` – Domain-specific endpoint used to submit index, search, and data upload requests. +* `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. * `kibana_endpoint` - Domain-specific endpoint used to access the Kibana application. * `log_publishing_options` - Domain log publishing related options. * `log_type` - The type of Elasticsearch log being published. @@ -96,7 +97,7 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether log publishing is enabled. * `node_to_node_encryption` - Domain in transit encryption related options. * `enabled` - Whether node to node encryption is enabled. -* `processing` – Status of a configuration change in the domain. +* `processing` - Status of a configuration change in the domain. * `snapshot_options` – Domain snapshot related options. * `automated_snapshot_start_hour` - Hour during which the service takes an automated daily snapshot of the indices in the domain. * `tags` - Tags assigned to the domain. @@ -106,4 +107,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - The subnets used by the domain. * `vpc_id` - The VPC used by the domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elb.html.markdown b/website/docs/cdktf/python/d/elb.html.markdown index 546287d2cd8b..8ce4418e25f1 100644 --- a/website/docs/cdktf/python/d/elb.html.markdown +++ b/website/docs/cdktf/python/d/elb.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Unique name of the load balancer. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a See the [ELB Resource](/docs/providers/aws/r/elb.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elb_hosted_zone_id.html.markdown b/website/docs/cdktf/python/d/elb_hosted_zone_id.html.markdown index c62487b955e3..40141c517d05 100644 --- a/website/docs/cdktf/python/d/elb_hosted_zone_id.html.markdown +++ b/website/docs/cdktf/python/d/elb_hosted_zone_id.html.markdown @@ -45,13 +45,12 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS ELB HostedZoneId is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS ELB HostedZoneId is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS ELB HostedZoneId in the selected region. +* `id` - ID of the AWS ELB HostedZoneId in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/elb_service_account.html.markdown b/website/docs/cdktf/python/d/elb_service_account.html.markdown index f1fdfa7640d7..c7e143de2946 100644 --- a/website/docs/cdktf/python/d/elb_service_account.html.markdown +++ b/website/docs/cdktf/python/d/elb_service_account.html.markdown @@ -82,14 +82,13 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS ELB account ID is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS ELB account ID is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS ELB service account in the selected region. -* `arn` - ARN of the AWS ELB service account in the selected region. +* `id` - ID of the AWS ELB service account in the selected Region. +* `arn` - ARN of the AWS ELB service account in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/emr_release_labels.html.markdown b/website/docs/cdktf/python/d/emr_release_labels.html.markdown index 3b02677d1dca..a006c0417c51 100644 --- a/website/docs/cdktf/python/d/emr_release_labels.html.markdown +++ b/website/docs/cdktf/python/d/emr_release_labels.html.markdown @@ -38,7 +38,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `filters` – (Optional) Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return. See [Filters](#filters). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `filters` - (Optional) Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return. See [Filters](#filters). ### Filters @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `release_labels` - Returned release labels. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/emr_supported_instance_types.html.markdown b/website/docs/cdktf/python/d/emr_supported_instance_types.html.markdown index 04d555093075..25ac9c0a5a2e 100644 --- a/website/docs/cdktf/python/d/emr_supported_instance_types.html.markdown +++ b/website/docs/cdktf/python/d/emr_supported_instance_types.html.markdown @@ -78,8 +78,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `release_label` - (Required) Amazon EMR release label. For more information about Amazon EMR releases and their included application versions and features, see the [Amazon EMR Release Guide](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-components.html). ## Attribute Reference @@ -102,4 +103,4 @@ This data source exports the following attributes in addition to the arguments a * `type` - Amazon EC2 instance type. For example, `m5.xlarge`. * `vcpu` - The number of vCPUs available for the instance type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/emrcontainers_virtual_cluster.html.markdown b/website/docs/cdktf/python/d/emrcontainers_virtual_cluster.html.markdown index c0e51b08621d..ffc2d1c3deba 100644 --- a/website/docs/cdktf/python/d/emrcontainers_virtual_cluster.html.markdown +++ b/website/docs/cdktf/python/d/emrcontainers_virtual_cluster.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtual_cluster_id` - (Required) ID of the cluster. ## Attribute Reference @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Status of the EKS cluster. One of `RUNNING`, `TERMINATING`, `TERMINATED`, `ARRESTED`. * `tags` - Key-value mapping of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fis_experiment_templates.html.markdown b/website/docs/cdktf/python/d/fis_experiment_templates.html.markdown index 78da2803825c..42d34992f9c5 100644 --- a/website/docs/cdktf/python/d/fis_experiment_templates.html.markdown +++ b/website/docs/cdktf/python/d/fis_experiment_templates.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired experiment templates. @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of all the experiment template ids found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown index 62d4cd68c8b2..cf3df5fb2b46 100644 --- a/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Identifier of the file system (e.g. `fs-12345678`). ## Attribute Reference @@ -84,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a * `DNSName` - The file system's DNS name. You can mount your file system using its DNS name. * `IpAddresses` - IP addresses of the file system endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machine.html.markdown b/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machine.html.markdown index 672df44e1f0a..b8be58f5c0d2 100644 --- a/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machine.html.markdown +++ b/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machine.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. * `id` - (Optional) Identifier of the storage virtual machine (e.g. `svm-12345678`). @@ -121,4 +122,4 @@ The following arguments are supported for `active_directory_configuration` confi * `DNSName` - The file system's DNS name. You can mount your file system using its DNS name. * `IpAddresses` - The SVM endpoint's IP addresses. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machines.html.markdown b/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machines.html.markdown index 7ab2e18ad2cb..17b9a2582490 100644 --- a/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machines.html.markdown +++ b/website/docs/cdktf/python/d/fsx_ontap_storage_virtual_machines.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. ### filter @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of all SVM IDs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fsx_openzfs_snapshot.html.markdown b/website/docs/cdktf/python/d/fsx_openzfs_snapshot.html.markdown index 8b9d50ad5af7..68834f957242 100644 --- a/website/docs/cdktf/python/d/fsx_openzfs_snapshot.html.markdown +++ b/website/docs/cdktf/python/d/fsx_openzfs_snapshot.html.markdown @@ -42,10 +42,9 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `most_recent` - (Optional) If more than one result is returned, use the most recent snapshot. - * `snapshot_ids` - (Optional) Returns information on a specific snapshot_id. - * `filter` - (Optional) One or more name/value pairs to filter off of. The supported names are file-system-id or volume-id. @@ -61,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - List of Tag values, with a maximum of 50 elements. * `volume_id` - ID of the volume that the snapshot is of. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fsx_windows_file_system.html.markdown b/website/docs/cdktf/python/d/fsx_windows_file_system.html.markdown index f7faacb1722a..5779e042e795 100644 --- a/website/docs/cdktf/python/d/fsx_windows_file_system.html.markdown +++ b/website/docs/cdktf/python/d/fsx_windows_file_system.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Identifier of the file system (e.g. `fs-12345678`). ## Attribute Reference @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `vpc_id` - The ID of the primary virtual private cloud (VPC) for the file system. * `weekly_maintenance_start_time` - The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/glue_catalog_table.html.markdown b/website/docs/cdktf/python/d/glue_catalog_table.html.markdown index b4978e41655e..558a02cf2929 100644 --- a/website/docs/cdktf/python/d/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/python/d/glue_catalog_table.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the table. * `database_name` - (Required) Name of the metadata database where the table metadata resides. * `catalog_id` - (Optional) ID of the Glue Catalog and database where the table metadata resides. If omitted, this defaults to the current AWS Account ID. @@ -69,6 +70,7 @@ This data source exports the following attributes in addition to the arguments a * `comment` - Free-form text comment. * `name` - Name of the Partition Key. +* `parameters` - Map of key-value pairs. * `type` - Datatype of data in the Partition Key. ### storage_descriptor @@ -131,4 +133,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the target table. * `region` - Region of the target table. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/glue_connection.html.markdown b/website/docs/cdktf/python/d/glue_connection.html.markdown index 9113eb122968..4e43534c66af 100644 --- a/website/docs/cdktf/python/d/glue_connection.html.markdown +++ b/website/docs/cdktf/python/d/glue_connection.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Concatenation of the catalog ID and connection name. For example, if your account ID is `123456789123` and the connection name is `conn` then the ID is `123456789123:conn`. @@ -47,10 +48,10 @@ This data source exports the following attributes in addition to the arguments a * `athena_properties` - A map of connection properties specific to the Athena compute environment. * `connection_properties` - A map of connection properties. * `connection_type` - Type of Glue Connection. -* `description` – Description of the connection. -* `match_criteria` – A list of criteria that can be used in selecting this connection. +* `description` - Description of the connection. +* `match_criteria` - A list of criteria that can be used in selecting this connection. * `name` - Name of the Glue Connection. * `physical_connection_requirements` - A map of physical connection requirements, such as VPC and SecurityGroup. * `tags` - Tags assigned to the resource - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/glue_data_catalog_encryption_settings.html.markdown b/website/docs/cdktf/python/d/glue_data_catalog_encryption_settings.html.markdown index 2c7ea2c5195d..accb41951eb5 100644 --- a/website/docs/cdktf/python/d/glue_data_catalog_encryption_settings.html.markdown +++ b/website/docs/cdktf/python/d/glue_data_catalog_encryption_settings.html.markdown @@ -36,14 +36,15 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Required) ID of the Data Catalog. This is typically the AWS account ID. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `data_catalog_encryption_settings` – The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). -* `id` – The ID of the Data Catalog to set the security configuration for. +* `data_catalog_encryption_settings` - The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). +* `id` - The ID of the Data Catalog to set the security configuration for. ### data_catalog_encryption_settings @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `catalog_encryption_service_role` - The ARN of the AWS IAM role used for accessing encrypted Data Catalog data. * `sse_aws_kms_key_id` - ARN of the AWS KMS key to use for encryption at rest. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/glue_registry.html.markdown b/website/docs/cdktf/python/d/glue_registry.html.markdown index 2cc9eafff191..3d2fa2a06154 100644 --- a/website/docs/cdktf/python/d/glue_registry.html.markdown +++ b/website/docs/cdktf/python/d/glue_registry.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Glue Registry. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - Amazon Resource Name (ARN) of Glue Registry. * `description` - A description of the registry. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/glue_script.html.markdown b/website/docs/cdktf/python/d/glue_script.html.markdown index 55aa6890d7da..c4faef94ac51 100644 --- a/website/docs/cdktf/python/d/glue_script.html.markdown +++ b/website/docs/cdktf/python/d/glue_script.html.markdown @@ -222,6 +222,7 @@ output "scala_code" { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dag_edge` - (Required) List of the edges in the DAG. Defined below. * `dag_node` - (Required) List of the nodes in the DAG. Defined below. * `language` - (Optional) Programming language of the resulting code from the DAG. Defaults to `PYTHON`. Valid values are `PYTHON` and `SCALA`. @@ -253,4 +254,4 @@ This data source exports the following attributes in addition to the arguments a * `python_script` - Python script generated from the DAG when the `language` argument is set to `PYTHON`. * `scala_code` - Scala code generated from the DAG when the `language` argument is set to `SCALA`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/grafana_workspace.html.markdown b/website/docs/cdktf/python/d/grafana_workspace.html.markdown index 3b96eea006df..061720efd0a2 100644 --- a/website/docs/cdktf/python/d/grafana_workspace.html.markdown +++ b/website/docs/cdktf/python/d/grafana_workspace.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workspace_id` - (Required) Grafana workspace ID. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the Grafana workspace. * `tags` - Tags assigned to the resource - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/guardduty_detector.html.markdown b/website/docs/cdktf/python/d/guardduty_detector.html.markdown index 0bab4ad7c187..5e3ffdae021c 100644 --- a/website/docs/cdktf/python/d/guardduty_detector.html.markdown +++ b/website/docs/cdktf/python/d/guardduty_detector.html.markdown @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the detector. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Current status of the detector. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/guardduty_finding_ids.html.markdown b/website/docs/cdktf/python/d/guardduty_finding_ids.html.markdown index 1c9ef551ac92..6a2e8fccab45 100644 --- a/website/docs/cdktf/python/d/guardduty_finding_ids.html.markdown +++ b/website/docs/cdktf/python/d/guardduty_finding_ids.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detector_id` - (Required) ID of the GuardDuty detector. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `has_findings` - Indicates whether findings are present for the specified detector. * `finding_ids` - A list of finding IDs for the specified detector. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/iam_principal_policy_simulation.html.markdown b/website/docs/cdktf/python/d/iam_principal_policy_simulation.html.markdown index 2f55b46207f6..963fedc81346 100644 --- a/website/docs/cdktf/python/d/iam_principal_policy_simulation.html.markdown +++ b/website/docs/cdktf/python/d/iam_principal_policy_simulation.html.markdown @@ -65,11 +65,11 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws.s3_bucket_object import S3BucketObject +from imports.aws.s3_object import S3Object class MyConvertedCode(TerraformStack): def __init__(self, scope, name, *, key): super().__init__(scope, name) - S3BucketObject(self, "example", + S3Object(self, "example", bucket="my-test-bucket", depends_on=[s3_object_access], key=key @@ -236,4 +236,4 @@ This data source exports the following attributes in addition to the arguments a * `missing_context_keys` - A set of context keys (or condition keys) that were needed by some of the policies contributing to this result but not specified using a `context` block in the configuration. Missing or incorrect context keys will typically cause a simulated request to be disallowed. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/iam_server_certificate.html.markdown b/website/docs/cdktf/python/d/iam_server_certificate.html.markdown index 4f5e74ee6afa..db797caf678a 100644 --- a/website/docs/cdktf/python/d/iam_server_certificate.html.markdown +++ b/website/docs/cdktf/python/d/iam_server_certificate.html.markdown @@ -65,31 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `certificate_body` is the public key certificate (PEM-encoded). This is useful when [configuring back-end instance authentication](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html) policy for load balancer * `certificate_chain` is the public key certificate chain (PEM-encoded) if exists, empty otherwise -## Import - -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an IAM server certificate using `name`. For example: - -```python -# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug -from constructs import Construct -from cdktf import TerraformStack -# -# Provider bindings are generated by running `cdktf get`. -# See https://cdk.tf/provider-generation for more details. -# -from imports.aws.iam_server_certificate import IamServerCertificate -class MyConvertedCode(TerraformStack): - def __init__(self, scope, name): - super().__init__(scope, name) - IamServerCertificate.generate_config_for_import(self, "example", "example") -``` - -Using `terraform import`, import an IAM server certificate using `name`. For example: - -```console -% terraform import aws_iam_server_certificate.example example -``` - -Import will read in the certificate body, certificate chain (if it exists), ID, name, path, and ARN. It will not retrieve the private key which is not available through the AWS API. - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/identitystore_group.html.markdown b/website/docs/cdktf/python/d/identitystore_group.html.markdown index ae7a9e3a07f5..57236067fdf1 100644 --- a/website/docs/cdktf/python/d/identitystore_group.html.markdown +++ b/website/docs/cdktf/python/d/identitystore_group.html.markdown @@ -53,8 +53,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alternate_identifier` (Optional) A unique identifier for the group that is not the primary identifier. Conflicts with `group_id` and `filter`. Detailed below. -* `filter` - (Optional, **Deprecated** use the `alternate_identifier` attribute instead) Configuration block for filtering by a unique attribute of the group. Detailed below. * `group_id` - (Optional) The identifier for a group in the Identity Store. -> Exactly one of the above arguments must be provided. Passing both `filter` and `group_id` is allowed for backwards compatibility. @@ -75,15 +75,6 @@ The `external_id` configuration block supports the following arguments: * `id` - (Required) The identifier issued to this resource by an external identity provider. * `issuer` - (Required) The issuer for an external identifier. -### `filter` Configuration Block - -~> The `filter` configuration block has been deprecated. Use `alternate_identifier` instead. - -The following arguments are supported by the `filter` configuration block: - -* `attribute_path` - (Required) Attribute path that is used to specify which attribute name to search. Currently, `DisplayName` is the only valid attribute path. -* `attribute_value` - (Required) Value for an attribute. - ### `unique_attribute` Configuration Block The `unique_attribute` configuration block supports the following arguments: @@ -102,4 +93,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - The identifier issued to this resource by an external identity provider. * `issuer` - The issuer for an external identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/identitystore_group_memberships.html.markdown b/website/docs/cdktf/python/d/identitystore_group_memberships.html.markdown index 8398eb9c51bb..63a264bfd3c7 100644 --- a/website/docs/cdktf/python/d/identitystore_group_memberships.html.markdown +++ b/website/docs/cdktf/python/d/identitystore_group_memberships.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_id` - (Required) The identifier for a group in the Identity Store. * `identity_store_id` - (Required) Identity Store ID associated with the Single Sign-On Instance. @@ -77,4 +78,4 @@ This data source exports the following attributes in addition to the arguments a * `user_id` - User identifier of the group member. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/identitystore_groups.html.markdown b/website/docs/cdktf/python/d/identitystore_groups.html.markdown index fd4b3410dce6..313f1b739bbd 100644 --- a/website/docs/cdktf/python/d/identitystore_groups.html.markdown +++ b/website/docs/cdktf/python/d/identitystore_groups.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_store_id` - (Required) Identity Store ID associated with the Single Sign-On (SSO) Instance. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Identifier issued to this resource by an external identity provider. * `issuer` - Issuer for an external identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/identitystore_user.html.markdown b/website/docs/cdktf/python/d/identitystore_user.html.markdown index 800caa4982f7..afb996bc67e2 100644 --- a/website/docs/cdktf/python/d/identitystore_user.html.markdown +++ b/website/docs/cdktf/python/d/identitystore_user.html.markdown @@ -53,8 +53,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alternate_identifier` (Optional) A unique identifier for a user or group that is not the primary identifier. Conflicts with `user_id` and `filter`. Detailed below. -* `filter` - (Optional, **Deprecated** use the `alternate_identifier` attribute instead) Configuration block for filtering by a unique attribute of the user. Detailed below. * `user_id` - (Optional) The identifier for a user in the Identity Store. -> Exactly one of the above arguments must be provided. Passing both `filter` and `user_id` is allowed for backwards compatibility. @@ -75,15 +75,6 @@ The `external_id` configuration block supports the following arguments: * `id` - (Required) The identifier issued to this resource by an external identity provider. * `issuer` - (Required) The issuer for an external identifier. -### `filter` Configuration Block - -~> The `filter` configuration block has been deprecated. Use `alternate_identifier` instead. - -The following arguments are supported by the `filter` configuration block: - -* `attribute_path` - (Required) Attribute path that is used to specify which attribute name to search. Currently, `UserName` is the only valid attribute path. -* `attribute_value` - (Required) Value for an attribute. - ### `unique_attribute` Configuration Block The `unique_attribute` configuration block supports the following arguments: @@ -133,4 +124,4 @@ This data source exports the following attributes in addition to the arguments a * `user_name` - User's user name value. * `user_type` - The user type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/identitystore_users.html.markdown b/website/docs/cdktf/python/d/identitystore_users.html.markdown index a0871c5a773f..cfedbd2bf617 100644 --- a/website/docs/cdktf/python/d/identitystore_users.html.markdown +++ b/website/docs/cdktf/python/d/identitystore_users.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_store_id` - (Required) Identity Store ID associated with the Single Sign-On Instance. ## Attribute Reference @@ -87,4 +88,4 @@ This data source exports the following attributes in addition to the arguments a * `user_name` - User's user name value. * `user_type` - User type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_component.html.markdown b/website/docs/cdktf/python/d/imagebuilder_component.html.markdown index 23a898397be6..f3b5f0c9b200 100644 --- a/website/docs/cdktf/python/d/imagebuilder_component.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_component.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the component. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `type` - Type of the component. * `version` - Version of the component. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_components.html.markdown b/website/docs/cdktf/python/d/imagebuilder_components.html.markdown index f06702e59bac..428288dcd89a 100644 --- a/website/docs/cdktf/python/d/imagebuilder_components.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_components.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owner` - (Optional) Owner of the image recipes. Valid values are `Self`, `Shared`, `Amazon` and `ThirdParty`. Defaults to `Self`. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Components. * `names` - Set of names of the matched Image Builder Components. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_container_recipe.html.markdown b/website/docs/cdktf/python/d/imagebuilder_container_recipe.html.markdown index fad3260ce44f..a7d62de373ec 100644 --- a/website/docs/cdktf/python/d/imagebuilder_container_recipe.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_container_recipe.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the container recipe. ## Attribute Reference @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `version` - Version of the container recipe. * `working_directory` - Working directory used during build and test workflows. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_container_recipes.html.markdown b/website/docs/cdktf/python/d/imagebuilder_container_recipes.html.markdown index e3121f121f52..9dcccf0e3ddd 100644 --- a/website/docs/cdktf/python/d/imagebuilder_container_recipes.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_container_recipes.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owner` - (Optional) Owner of the container recipes. Valid values are `Self`, `Shared`, `Amazon` and `ThirdParty`. Defaults to `Self`. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Container Recipes. * `names` - Set of names of the matched Image Builder Container Recipes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_distribution_configuration.html.markdown b/website/docs/cdktf/python/d/imagebuilder_distribution_configuration.html.markdown index 8130d8429c36..a2faa44ab759 100644 --- a/website/docs/cdktf/python/d/imagebuilder_distribution_configuration.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_distribution_configuration.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the distribution configuration. ## Attribute Reference @@ -89,4 +90,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the distribution configuration. * `tags` - Key-value map of resource tags for the distribution configuration. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_distribution_configurations.html.markdown b/website/docs/cdktf/python/d/imagebuilder_distribution_configurations.html.markdown index cb464d52e8d5..e0e0c31cf2ff 100644 --- a/website/docs/cdktf/python/d/imagebuilder_distribution_configurations.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_distribution_configurations.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ## filter Configuration Block @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Distribution Configurations. * `names` - Set of names of the matched Image Builder Distribution Configurations. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_image.html.markdown b/website/docs/cdktf/python/d/imagebuilder_image.html.markdown index 82ad8161f107..314d637c91df 100644 --- a/website/docs/cdktf/python/d/imagebuilder_image.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_image.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the image. The suffix can either be specified with wildcards (`x.x.x`) to fetch the latest build version or a full build version (e.g., `2020.11.26/1`) to fetch an exact version. ## Attribute Reference @@ -74,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the image. * `version` - Version of the image. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_image_pipeline.html.markdown b/website/docs/cdktf/python/d/imagebuilder_image_pipeline.html.markdown index 6330813eff53..f3d6d1ee4ecf 100644 --- a/website/docs/cdktf/python/d/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_image_pipeline.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the image pipeline. ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the image pipeline. * `tags` - Key-value map of resource tags for the image pipeline. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_image_pipelines.html.markdown b/website/docs/cdktf/python/d/imagebuilder_image_pipelines.html.markdown index 98a3440c16c6..4c79bb242331 100644 --- a/website/docs/cdktf/python/d/imagebuilder_image_pipelines.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_image_pipelines.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration Block @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Image Pipelines. * `names` - Set of names of the matched Image Builder Image Pipelines. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_image_recipe.html.markdown b/website/docs/cdktf/python/d/imagebuilder_image_recipe.html.markdown index 0b8d1eea856c..7c08d2e3c393 100644 --- a/website/docs/cdktf/python/d/imagebuilder_image_recipe.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_image_recipe.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the image recipe. ## Attribute Reference @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a * `version` - Version of the image recipe. * `working_directory` - Working directory used during build and test workflows. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_image_recipes.html.markdown b/website/docs/cdktf/python/d/imagebuilder_image_recipes.html.markdown index b02c70abca4b..ff6eb626e0a7 100644 --- a/website/docs/cdktf/python/d/imagebuilder_image_recipes.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_image_recipes.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owner` - (Optional) Owner of the image recipes. Valid values are `Self`, `Shared`, `Amazon` and `ThirdParty`. Defaults to `Self`. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Image Recipes. * `names` - Set of names of the matched Image Builder Image Recipes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_infrastructure_configuration.html.markdown b/website/docs/cdktf/python/d/imagebuilder_infrastructure_configuration.html.markdown index 3eaeb6eae0c0..e177e95ef6e6 100644 --- a/website/docs/cdktf/python/d/imagebuilder_infrastructure_configuration.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_infrastructure_configuration.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the infrastructure configuration. ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the infrastructure configuration. * `terminate_instance_on_failure` - Whether instances are terminated on failure. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/imagebuilder_infrastructure_configurations.html.markdown b/website/docs/cdktf/python/d/imagebuilder_infrastructure_configurations.html.markdown index f14cc1520282..fc2958480b09 100644 --- a/website/docs/cdktf/python/d/imagebuilder_infrastructure_configurations.html.markdown +++ b/website/docs/cdktf/python/d/imagebuilder_infrastructure_configurations.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ## filter Configuration Block @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Infrastructure Configurations. * `names` - Set of names of the matched Image Builder Infrastructure Configurations. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/inspector_rules_packages.html.markdown b/website/docs/cdktf/python/d/inspector_rules_packages.html.markdown index ce1e74d356db..4ddb8f010586 100644 --- a/website/docs/cdktf/python/d/inspector_rules_packages.html.markdown +++ b/website/docs/cdktf/python/d/inspector_rules_packages.html.markdown @@ -54,7 +54,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -63,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `arns` - List of the Amazon Inspector Classic Rules Packages arns available in the AWS region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/instance.html.markdown b/website/docs/cdktf/python/d/instance.html.markdown index 31d709012182..7602e014adb0 100644 --- a/website/docs/cdktf/python/d/instance.html.markdown +++ b/website/docs/cdktf/python/d/instance.html.markdown @@ -43,12 +43,14 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Optional) Specify the exact Instance ID with which to populate the data source. * `instance_tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Instance. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `get_user_data` - (Optional) Retrieve Base64 encoded User Data contents into the `user_data_base64` attribute. A SHA-1 hash of the User Data contents will always be present in the `user_data` attribute. Defaults to `false`. @@ -58,6 +60,14 @@ several valid keys, for a full reference, check out Terraform will fail. Ensure that your search is specific enough to return a single Instance ID only. +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + ## Attribute Reference `id` is set to the ID of the found Instance. In addition, the following attributes @@ -111,6 +121,7 @@ interpolation. * `outpost_arn` - ARN of the Outpost. * `password_data` - Base-64 encoded encrypted password data for the instance. Useful for getting the administrator password for instances running Microsoft Windows. This attribute is only exported if `get_password_data` is true. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `placement_group` - Placement group of the Instance. +* `placement_group_id` - Placement group ID of the Instance. * `placement_partition_number` - Number of the partition the instance is in. * `private_dns` - Private DNS name assigned to the Instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC. * `private_dns_name_options` - Options for the instance hostname. @@ -147,4 +158,4 @@ interpolation. [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/instances.html.markdown b/website/docs/cdktf/python/d/instances.html.markdown index a66e4f5de63b..f46c13740387 100644 --- a/website/docs/cdktf/python/d/instances.html.markdown +++ b/website/docs/cdktf/python/d/instances.html.markdown @@ -66,12 +66,22 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_tags` - (Optional) Map of tags, each pair of which must exactly match a pair on desired instances. * `instance_state_names` - (Optional) List of instance states that should be applicable to the desired instances. The permitted values are: `pending, running, shutting-down, stopped, stopping, terminated`. The default value is `running`. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. ## Attribute Reference @@ -91,4 +101,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/internet_gateway.html.markdown b/website/docs/cdktf/python/d/internet_gateway.html.markdown index 99021ca3f32b..784d2368c2a8 100644 --- a/website/docs/cdktf/python/d/internet_gateway.html.markdown +++ b/website/docs/cdktf/python/d/internet_gateway.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `internet_gateway_id` - (Optional) ID of the specific Internet Gateway to retrieve. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Internet Gateway. @@ -79,4 +80,4 @@ Each attachment supports the following: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/iot_endpoint.html.markdown b/website/docs/cdktf/python/d/iot_endpoint.html.markdown index f497a5125c4e..69e6cf23f759 100644 --- a/website/docs/cdktf/python/d/iot_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/iot_endpoint.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_type` - (Optional) Endpoint type. Valid values: `iot:CredentialProvider`, `iot:Data`, `iot:Data-ATS`, `iot:Jobs`. ## Attribute Reference @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a * `iot:Data-ATS`: `IDENTIFIER-ats.iot.REGION.amazonaws.com` * `iot:Jobs`: `IDENTIFIER.jobs.iot.REGION.amazonaws.com` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/iot_registration_code.html.markdown b/website/docs/cdktf/python/d/iot_registration_code.html.markdown index c6c2de575cb8..b28b719fd00e 100644 --- a/website/docs/cdktf/python/d/iot_registration_code.html.markdown +++ b/website/docs/cdktf/python/d/iot_registration_code.html.markdown @@ -48,7 +48,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -56,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `registration_code` - The CA certificate registration code. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ivs_stream_key.html.markdown b/website/docs/cdktf/python/d/ivs_stream_key.html.markdown index 6143aa48ce55..4d49bcdf8c30 100644 --- a/website/docs/cdktf/python/d/ivs_stream_key.html.markdown +++ b/website/docs/cdktf/python/d/ivs_stream_key.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `channel_arn` - (Required) ARN of the Channel. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the resource. * `value` - Stream Key value. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kendra_experience.html.markdown b/website/docs/cdktf/python/d/kendra_experience.html.markdown index ae601a6df671..ec4150a59804 100644 --- a/website/docs/cdktf/python/d/kendra_experience.html.markdown +++ b/website/docs/cdktf/python/d/kendra_experience.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `experience_id` - (Required) Identifier of the Experience. * `index_id` - (Required) Identifier of the index that contains the Experience. @@ -75,4 +76,4 @@ The `endpoints` block supports the following attributes: * `endpoint` - Endpoint of your Amazon Kendra Experience. * `endpoint_type` - Type of endpoint for your Amazon Kendra Experience. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kendra_faq.html.markdown b/website/docs/cdktf/python/d/kendra_faq.html.markdown index 31f85b15b66c..bcdc204677a1 100644 --- a/website/docs/cdktf/python/d/kendra_faq.html.markdown +++ b/website/docs/cdktf/python/d/kendra_faq.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `faq_id` - (Required) Identifier of the FAQ. * `index_id` - (Required) Identifier of the index that contains the FAQ. @@ -62,4 +63,4 @@ The `s3_path` configuration block supports the following attributes: * `bucket` - Name of the S3 bucket that contains the file. * `key` - Name of the file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kendra_index.html.markdown b/website/docs/cdktf/python/d/kendra_index.html.markdown index 8f15a4f1f064..8bd124f7c283 100644 --- a/website/docs/cdktf/python/d/kendra_index.html.markdown +++ b/website/docs/cdktf/python/d/kendra_index.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Returns information on a specific Index by id. ## Attribute Reference @@ -129,4 +130,4 @@ A `jwt_token_type_configuration` block supports the following attributes: * `url` - Signing key URL. * `user_name_attribute_field` - The user name attribute field. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kendra_query_suggestions_block_list.html.markdown b/website/docs/cdktf/python/d/kendra_query_suggestions_block_list.html.markdown index 575a63abdfc0..ea366591115e 100644 --- a/website/docs/cdktf/python/d/kendra_query_suggestions_block_list.html.markdown +++ b/website/docs/cdktf/python/d/kendra_query_suggestions_block_list.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `index_id` - (Required) Identifier of the index that contains the block list. * `query_suggestions_block_list_id` - (Required) Identifier of the block list. @@ -62,4 +63,4 @@ The `source_s3_path` configuration block supports the following attributes: * `bucket` - Name of the S3 bucket that contains the file. * `key` - Name of the file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kendra_thesaurus.html.markdown b/website/docs/cdktf/python/d/kendra_thesaurus.html.markdown index 403f02e820a3..50c8256c9014 100644 --- a/website/docs/cdktf/python/d/kendra_thesaurus.html.markdown +++ b/website/docs/cdktf/python/d/kendra_thesaurus.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `index_id` - (Required) Identifier of the index that contains the Thesaurus. * `thesaurus_id` - (Required) Identifier of the Thesaurus. @@ -63,4 +64,4 @@ The `source_s3_path` configuration block supports the following attributes: * `bucket` - Name of the S3 bucket that contains the file. * `key` - Name of the file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/key_pair.html.markdown b/website/docs/cdktf/python/d/key_pair.html.markdown index 75fbbd121201..453320081b9c 100644 --- a/website/docs/cdktf/python/d/key_pair.html.markdown +++ b/website/docs/cdktf/python/d/key_pair.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_pair_id` - (Optional) Key Pair ID. * `key_name` - (Optional) Key Pair name. * `include_public_key` - (Optional) Whether to include the public key material in the response. @@ -86,4 +87,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kinesis_firehose_delivery_stream.html.markdown b/website/docs/cdktf/python/d/kinesis_firehose_delivery_stream.html.markdown index 7f6cf4b09c2f..a77c6df35024 100644 --- a/website/docs/cdktf/python/d/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/cdktf/python/d/kinesis_firehose_delivery_stream.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Kinesis Firehose Delivery Stream. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://aws.amazon.com/documentation/firehose/ - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kinesis_stream.html.markdown b/website/docs/cdktf/python/d/kinesis_stream.html.markdown index 3cf729270a04..441d5f901da1 100644 --- a/website/docs/cdktf/python/d/kinesis_stream.html.markdown +++ b/website/docs/cdktf/python/d/kinesis_stream.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Kinesis Stream. ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a [3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html [4]: https://docs.aws.amazon.com/streams/latest/dev/how-do-i-size-a-stream.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kinesis_stream_consumer.html.markdown b/website/docs/cdktf/python/d/kinesis_stream_consumer.html.markdown index 1983ef56971a..b6f9792ee061 100644 --- a/website/docs/cdktf/python/d/kinesis_stream_consumer.html.markdown +++ b/website/docs/cdktf/python/d/kinesis_stream_consumer.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the stream consumer. * `name` - (Optional) Name of the stream consumer. * `stream_arn` - (Required) ARN of the data stream the consumer is registered with. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://docs.aws.amazon.com/streams/latest/dev/amazon-kinesis-consumers.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kms_alias.html.markdown b/website/docs/cdktf/python/d/kms_alias.html.markdown index cd23f6455489..1215cb470123 100644 --- a/website/docs/cdktf/python/d/kms_alias.html.markdown +++ b/website/docs/cdktf/python/d/kms_alias.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the alias * `name_prefix` - Prefix of the alias - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kms_ciphertext.html.markdown b/website/docs/cdktf/python/d/kms_ciphertext.html.markdown index 9ba68720d295..72c440cc0877 100644 --- a/website/docs/cdktf/python/d/kms_ciphertext.html.markdown +++ b/website/docs/cdktf/python/d/kms_ciphertext.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `plaintext` - (Required) Data to be encrypted. Note that this may show up in logs, and it will be stored in the state file. * `key_id` - (Required) Globally unique key ID for the customer master key. * `context` - (Optional) An optional mapping that makes up the encryption context. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Globally unique key ID for the customer master key. * `ciphertext_blob` - Base64 encoded ciphertext - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kms_custom_key_store.html.markdown b/website/docs/cdktf/python/d/kms_custom_key_store.html.markdown index 04f39717e42a..b0a4a324cc18 100644 --- a/website/docs/cdktf/python/d/kms_custom_key_store.html.markdown +++ b/website/docs/cdktf/python/d/kms_custom_key_store.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `custom_key_store_id` - (Optional) The ID for the custom key store. * `custom_key_store_name` - (Optional) The user-specified friendly name for the custom key store. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `creation_date` - The date and time when the custom key store was created. * `trust_anchor_certificate` - The trust anchor certificate of the associated CloudHSM cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kms_key.html.markdown b/website/docs/cdktf/python/d/kms_key.html.markdown index 9a8457fc006a..3b5c50c1db68 100644 --- a/website/docs/cdktf/python/d/kms_key.html.markdown +++ b/website/docs/cdktf/python/d/kms_key.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_id` - (Required) Key identifier which can be one of the following format: * Key ID. E.g: `1234abcd-12ab-34cd-56ef-1234567890ab` * Key ARN. E.g.: `arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` @@ -64,7 +65,7 @@ This data source exports the following attributes in addition to the arguments a * `cloud_hsm_cluster_id`: The cluster ID of the AWS CloudHSM cluster that contains the key material for the KMS key. * `creation_date`: The date and time when the key was created * `custom_key_store_id`: A unique identifier for the custom key store that contains the KMS key. -* `customer_master_key_spec`: Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports +* `customer_master_key_spec`: See `key_spec`. * `deletion_date`: The date and time after which AWS KMS deletes the key. This value is present only when `key_state` is `PendingDeletion`, otherwise this value is 0 * `description`: The description of the key. * `enabled`: Specifies whether the key is enabled. When `key_state` is `Enabled` this value is true, otherwise it is false @@ -91,4 +92,4 @@ The `primary_key` and `replica_keys` objects support the following: * `arn`: The key ARN of a primary or replica key of a multi-Region key. * `region`: The AWS Region of a primary or replica key in a multi-Region key. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kms_public_key.html.markdown b/website/docs/cdktf/python/d/kms_public_key.html.markdown index 39cfe0236908..82167e6af77a 100644 --- a/website/docs/cdktf/python/d/kms_public_key.html.markdown +++ b/website/docs/cdktf/python/d/kms_public_key.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_id` - (Required) Key identifier which can be one of the following format: * Key ID. E.g - `1234abcd-12ab-34cd-56ef-1234567890ab` * Key ARN. E.g. - `arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `public_key_pem` - Exported public key. The value is Privacy Enhanced Mail (PEM) encoded. * `signing_algorithms` - Signing algorithms that AWS KMS supports for this key. Only set when the `key_usage` of the public key is `SIGN_VERIFY`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/kms_secret.html.markdown b/website/docs/cdktf/python/d/kms_secret.html.markdown index 9edd17eb485e..3a43919f211e 100644 --- a/website/docs/cdktf/python/d/kms_secret.html.markdown +++ b/website/docs/cdktf/python/d/kms_secret.html.markdown @@ -10,6 +10,6 @@ description: |- # Data Source: aws_kms_secret -!> **WARNING:** This data source was removed in version 2.0.0 of the Terraform AWS Provider. You can migrate existing configurations to the [`aws_kms_secrets` data source](/docs/providers/aws/d/kms_secrets.html) following instructions available in the [Version 2 Upgrade Guide](../guides/version-2-upgrade.html#data-source-aws_kms_secret). +!> **WARNING:** This data source's functionality was removed in version 2.0.0 of the Terraform AWS Provider. You can migrate existing configurations to the [`aws_kms_secrets` data source](/docs/providers/aws/d/kms_secrets.html) following instructions available in the [Version 2 Upgrade Guide](../guides/version-2-upgrade.html#data-source-aws_kms_secret). This data source will be removed in a future version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown index 5c4c7d842b27..b986fa7a497b 100644 --- a/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown @@ -35,13 +35,14 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, the account ID. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `admins` – List of ARNs of AWS Lake Formation principals (IAM users or roles). +* `admins` - List of ARNs of AWS Lake Formation principals (IAM users or roles). * `allow_external_data_filtering` - Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `allow_full_table_external_data_access` - Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. * `authorized_session_tag_value_list` - Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. @@ -49,8 +50,8 @@ This data source exports the following attributes in addition to the arguments a * `create_table_default_permissions` - Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. * `external_data_filtering_allow_list` - A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `parameters` - Key-value map of additional configuration. `CROSS_ACCOUNT_VERSION` will be set to values `"1"`, `"2"`, `"3"`, or `"4"`. `SET_CONTEXT` will also be returned with a value of `TRUE`. In a fresh account, prior to configuring, `CROSS_ACCOUNT_VERSION` is `"1"`. -* `read_only_admins` – List of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. -* `trusted_resource_owners` – List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). +* `read_only_admins` - List of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. +* `trusted_resource_owners` - List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). ### create_database_default_permissions @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions granted to the principal. * `principal` - Principal who is granted permissions. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lakeformation_permissions.html.markdown b/website/docs/cdktf/python/d/lakeformation_permissions.html.markdown index ab41d02617c8..dcdb5a3fa1a2 100644 --- a/website/docs/cdktf/python/d/lakeformation_permissions.html.markdown +++ b/website/docs/cdktf/python/d/lakeformation_permissions.html.markdown @@ -95,7 +95,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `principal` – (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `principal` - (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. One of the following is required: @@ -110,7 +111,8 @@ One of the following is required: The following arguments are optional: -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. ### data_cells_filter @@ -123,7 +125,7 @@ The following arguments are optional: The following argument is required: -* `arn` – (Required) ARN that uniquely identifies the data location resource. +* `arn` - (Required) ARN that uniquely identifies the data location resource. The following argument is optional: @@ -133,7 +135,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -143,7 +145,7 @@ The following argument is optional: The following arguments are required: -* `key` – (Required) Key-name for the tag. +* `key` - (Required) Key-name for the tag. * `values` - (Required) List of possible values an attribute can take. The following argument is optional: @@ -154,7 +156,7 @@ The following argument is optional: The following arguments are required: -* `resource_type` – (Required) Resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. +* `resource_type` - (Required) Resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. * `expression` - (Required) List of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See [`expression`](#expression) below. The following argument is optional: @@ -163,17 +165,18 @@ The following argument is optional: #### expression -* `key` – (Required) Key-name of an LF-Tag. +* `key` - (Required) Key-name of an LF-Tag. * `values` - (Required) List of possible values of an LF-Tag. ### table The following argument is required: -* `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `database_name` - (Required) Name of the database for the table. Unique to a Data Catalog. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `name` - (Optional) Name of the table. At least one of `name` or `wildcard` is required. * `wildcard` - (Optional) Whether to use a wildcard representing every table under a database. At least one of `name` or `wildcard` is required. Defaults to `false`. @@ -182,11 +185,12 @@ The following arguments are optional: The following arguments are required: -* `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `database_name` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `column_names` - (Optional) Set of column names for the table. At least one of `column_names` or `excluded_column_names` is required. * `excluded_column_names` - (Optional) Set of column names for the table to exclude. At least one of `column_names` or `excluded_column_names` is required. @@ -195,7 +199,7 @@ The following arguments are optional: This data source exports the following attributes in addition to the arguments above: -* `permissions` – List of permissions granted to the principal. For details on permissions, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `permissions` - List of permissions granted to the principal. For details on permissions, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `permissions_with_grant_option` - Subset of `permissions` which the principal can pass. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lakeformation_resource.html.markdown b/website/docs/cdktf/python/d/lakeformation_resource.html.markdown index c7b0498983d8..949665730611 100644 --- a/website/docs/cdktf/python/d/lakeformation_resource.html.markdown +++ b/website/docs/cdktf/python/d/lakeformation_resource.html.markdown @@ -35,13 +35,17 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `arn` – (Required) ARN of the resource, an S3 path. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `arn` - (Required) ARN of the resource, an S3 path. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: +* `hybrid_access_enabled` - Flag to enable AWS LakeFormation hybrid access permission mode. * `last_modified` - Date and time the resource was last modified in [RFC 3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). -* `role_arn` – Role that the resource was registered with. +* `role_arn` - Role that the resource was registered with. +* `with_federation` - Whether the resource is a federated resource. +* `with_privileged_access` - Boolean to grant the calling principal the permissions to perform all supported Lake Formation operations on the registered data location. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_alias.html.markdown b/website/docs/cdktf/python/d/lambda_alias.html.markdown index 61593eb5383b..cb535adad083 100644 --- a/website/docs/cdktf/python/d/lambda_alias.html.markdown +++ b/website/docs/cdktf/python/d/lambda_alias.html.markdown @@ -3,21 +3,84 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_alias" description: |- - Provides a Lambda Alias data source. + Provides details about an AWS Lambda Alias. --- # Data Source: aws_lambda_alias -Provides information about a Lambda Alias. +Provides details about an AWS Lambda Alias. Use this data source to retrieve information about an existing Lambda function alias for traffic management, deployment strategies, or API integrations. ## Example Usage +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_alias import DataAwsLambdaAlias +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaAlias(self, "example", + function_name="my-lambda-function", + name="production" + ) + TerraformOutput(self, "alias_arn", + value=example.arn + ) +``` + +### API Gateway Integration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.api_gateway_integration import ApiGatewayIntegration +from imports.aws.data_aws_lambda_alias import DataAwsLambdaAlias +from imports.aws.lambda_permission import LambdaPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + api_handler = DataAwsLambdaAlias(self, "api_handler", + function_name="api-handler", + name="live" + ) + ApiGatewayIntegration(self, "example", + http_method=Token.as_string(aws_api_gateway_method_example.http_method), + integration_http_method="POST", + resource_id=Token.as_string(aws_api_gateway_resource_example.id), + rest_api_id=Token.as_string(aws_api_gateway_rest_api_example.id), + type="AWS_PROXY", + uri=Token.as_string(api_handler.invoke_arn) + ) + LambdaPermission(self, "api_gateway", + action="lambda:InvokeFunction", + function_name=Token.as_string(api_handler.function_name), + principal="apigateway.amazonaws.com", + qualifier=Token.as_string(api_handler.name), + source_arn="${" + aws_api_gateway_rest_api_example.execution_arn + "}/*/*", + statement_id="AllowExecutionFromAPIGateway" + ) +``` + +### Deployment Version Tracking + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Op, TerraformOutput, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -26,26 +89,78 @@ from imports.aws.data_aws_lambda_alias import DataAwsLambdaAlias class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DataAwsLambdaAlias(self, "production", - function_name="my-lambda-func", + production = DataAwsLambdaAlias(self, "production", + function_name="payment-processor", name="production" ) + staging = DataAwsLambdaAlias(self, "staging", + function_name="payment-processor", + name="staging" + ) + version_drift = Op.neq(production.function_version, staging.function_version) + TerraformOutput(self, "deployment_status", + value=[{ + "production_version": production.function_version, + "ready_for_promotion": Op.not(version_drift), + "staging_version": staging.function_version, + "version_drift": version_drift + } + ] + ) +``` + +### EventBridge Rule Target + +```terraform +data "aws_lambda_alias" "event_processor" { + function_name = "event-processor" + name = "stable" +} + +resource "aws_cloudwatch_event_rule" "example" { + name = "capture-events" + description = "Capture events for processing" + + event_pattern = jsonencode({ + source = ["myapp.orders"] + detail-type = ["Order Placed"] + }) +} + +resource "aws_cloudwatch_event_target" "lambda" { + rule = aws_cloudwatch_event_rule.example.name + target_id = "SendToLambda" + arn = data.aws_lambda_alias.event_processor.arn +} + +resource "aws_lambda_permission" "allow_eventbridge" { + statement_id = "AllowExecutionFromEventBridge" + action = "lambda:InvokeFunction" + function_name = data.aws_lambda_alias.event_processor.function_name + principal = "events.amazonaws.com" + qualifier = data.aws_lambda_alias.event_processor.name + source_arn = aws_cloudwatch_event_rule.example.arn +} ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are required: * `function_name` - (Required) Name of the aliased Lambda function. * `name` - (Required) Name of the Lambda alias. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `arn` - ARN identifying the Lambda function alias. -* `description` - Description of alias. +* `description` - Description of the alias. * `function_version` - Lambda function version which the alias uses. -* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in aws_api_gateway_integration's `uri`. +* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_code_signing_config.html.markdown b/website/docs/cdktf/python/d/lambda_code_signing_config.html.markdown index efea8545bba5..1a012451601d 100644 --- a/website/docs/cdktf/python/d/lambda_code_signing_config.html.markdown +++ b/website/docs/cdktf/python/d/lambda_code_signing_config.html.markdown @@ -3,60 +3,186 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_code_signing_config" description: |- - Provides a Lambda Code Signing Config data source. + Provides details about an AWS Lambda Code Signing Config. --- # Data Source: aws_lambda_code_signing_config -Provides information about a Lambda Code Signing Config. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail). +Provides details about an AWS Lambda Code Signing Config. Use this data source to retrieve information about an existing code signing configuration for Lambda functions to ensure code integrity and authenticity. -For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions][1] +For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html). ## Example Usage +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_code_signing_config import DataAwsLambdaCodeSigningConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaCodeSigningConfig(self, "example", + arn="arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b" + ) + TerraformOutput(self, "config_details", + value=[{ + "config_id": example.config_id, + "description": example.description, + "policy": Fn.lookup_nested(example.policies, ["0", "untrusted_artifact_on_deployment" + ]) + } + ] + ) +``` + +### Use in Lambda Function + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # from imports.aws.data_aws_lambda_code_signing_config import DataAwsLambdaCodeSigningConfig +from imports.aws.lambda_function import LambdaFunction class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DataAwsLambdaCodeSigningConfig(self, "existing_csc", - arn="arn:aws:lambda:${" + aws_region.value + "}:${" + aws_account.value + "}:code-signing-config:csc-0f6c334abcdea4d8b" + security_config = DataAwsLambdaCodeSigningConfig(self, "security_config", + arn=code_signing_config_arn.string_value + ) + LambdaFunction(self, "example", + code_signing_config_arn=Token.as_string(security_config.arn), + filename="function.zip", + function_name="secure-function", + handler="index.handler", + role=lambda_role.arn, + runtime="nodejs20.x", + tags={ + "Environment": "production", + "Security": "code-signed" + } + ) +``` + +### Validate Signing Profiles + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, TerraformOutput, conditional, Token, TerraformCount, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_code_signing_config import DataAwsLambdaCodeSigningConfig +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + required_profile = "arn:aws:signer:us-west-2:123456789012:/signing-profiles/MyProfile" + example = DataAwsLambdaCodeSigningConfig(self, "example", + arn=code_signing_config_arn.string_value + ) + allowed_profiles = Fn.lookup_nested(example.allowed_publishers, ["0", "signing_profile_version_arns" + ]) + profile_allowed = Fn.contains(allowed_profiles, required_profile) + TerraformOutput(self, "deployment_status", + value=[{ + "function_created": profile_allowed, + "message": conditional(profile_allowed, "Function deployed with valid signing profile", "Deployment blocked - signing profile not allowed"), + "profile_allowed": profile_allowed + } + ] + ) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + conditional_count = TerraformCount.of( + Token.as_number(conditional(profile_allowed, 1, 0))) + LambdaFunction(self, "conditional", + code_signing_config_arn=Token.as_string(example.arn), + filename="function.zip", + function_name="conditional-function", + handler="index.handler", + role=lambda_role.arn, + runtime="python3.12", + count=conditional_count + ) +``` + +### Multi-Environment Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Op, TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_code_signing_config import DataAwsLambdaCodeSigningConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + dev = DataAwsLambdaCodeSigningConfig(self, "dev", + arn="arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-dev-456" + ) + prod = DataAwsLambdaCodeSigningConfig(self, "prod", + arn="arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-prod-123" + ) + dev_policy = Fn.lookup_nested(dev.policies, ["0", "untrusted_artifact_on_deployment" + ]) + prod_policy = Fn.lookup_nested(prod.policies, ["0", "untrusted_artifact_on_deployment" + ]) + config_comparison = { + "dev_enforcement": dev_policy, + "policies_match": Op.eq(prod_policy, dev_policy), + "prod_enforcement": prod_policy + } + TerraformOutput(self, "environment_comparison", + value=config_comparison ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are required: * `arn` - (Required) ARN of the code signing configuration. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `allowed_publishers` - List of allowed publishers as signing profiles for this code signing configuration. +* `allowed_publishers` - List of allowed publishers as signing profiles for this code signing configuration. [See below](#allowed_publishers-attribute-reference). * `config_id` - Unique identifier for the code signing configuration. * `description` - Code signing configuration description. * `last_modified` - Date and time that the code signing configuration was last modified. -* `policies` - List of code signing policies that control the validation failure action for signature mismatch or expiry. - -`allowed_publishers` is exported with the following attribute: +* `policies` - List of code signing policies that control the validation failure action for signature mismatch or expiry. [See below](#policies-attribute-reference). -* `signing_profile_version_arns` - The ARN for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. +### allowed_publishers Attribute Reference -`policies` is exported with the following attribute: +* `signing_profile_version_arns` - Set of ARNs for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. -* `untrusted_artifact_on_deployment` - Code signing configuration policy for deployment validation failure. +### policies Attribute Reference -[1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html +* `untrusted_artifact_on_deployment` - Code signing configuration policy for deployment validation failure. Valid values: `Warn`, `Enforce`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_function.html.markdown b/website/docs/cdktf/python/d/lambda_function.html.markdown index 1fca905d5b4d..864ece1352b5 100644 --- a/website/docs/cdktf/python/d/lambda_function.html.markdown +++ b/website/docs/cdktf/python/d/lambda_function.html.markdown @@ -3,45 +3,154 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function" description: |- - Provides a Lambda Function data source. + Provides details about an AWS Lambda Function. --- # Data Source: aws_lambda_function -Provides information about a Lambda Function. +Provides details about an AWS Lambda Function. Use this data source to obtain information about an existing Lambda function for use in other resources or as a reference for function configurations. + +~> **Note:** This data source returns information about the latest version or alias specified by the `qualifier`. If no `qualifier` is provided, it returns information about the most recent published version, or `$LATEST` if no published version exists. ## Example Usage +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_function import DataAwsLambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaFunction(self, "example", + function_name="my-lambda-function" + ) + TerraformOutput(self, "function_arn", + value=example.arn + ) +``` + +### Using Function Alias + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.api_gateway_integration import ApiGatewayIntegration +from imports.aws.data_aws_lambda_function import DataAwsLambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaFunction(self, "example", + function_name="api-handler", + qualifier="production" + ) + aws_api_gateway_integration_example = ApiGatewayIntegration(self, "example_1", + http_method=Token.as_string(aws_api_gateway_method_example.http_method), + integration_http_method="POST", + resource_id=Token.as_string(aws_api_gateway_resource_example.id), + rest_api_id=Token.as_string(aws_api_gateway_rest_api_example.id), + type="AWS_PROXY", + uri=Token.as_string(example.invoke_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_api_gateway_integration_example.override_logical_id("example") +``` + +### Function Configuration Reference + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import VariableType, TerraformVariable, TerraformStack +from cdktf import Token, Fn, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # from imports.aws.data_aws_lambda_function import DataAwsLambdaFunction +from imports.aws.lambda_function import LambdaFunction class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - # You can read more about this at https://cdk.tf/variables - function_name = TerraformVariable(self, "function_name", - type=VariableType.STRING + reference = DataAwsLambdaFunction(self, "reference", + function_name="existing-function" ) - DataAwsLambdaFunction(self, "existing", - function_name=function_name.string_value + LambdaFunction(self, "example", + architectures=Token.as_list(reference.architectures), + environment=LambdaFunctionEnvironment( + variables=Token.as_string_map( + Fn.lookup_nested(reference.environment, ["0", "variables"])) + ), + filename="new-function.zip", + function_name="new-function", + handler=Token.as_string(reference.handler), + memory_size=Token.as_number(reference.memory_size), + role=Token.as_string(reference.role), + runtime=Token.as_string(reference.runtime), + timeout=Token.as_number(reference.timeout), + vpc_config=LambdaFunctionVpcConfig( + security_group_ids=Token.as_list( + Fn.lookup_nested(reference.vpc_config, ["0", "security_group_ids"])), + subnet_ids=Token.as_list( + Fn.lookup_nested(reference.vpc_config, ["0", "subnet_ids"])) + ) + ) +``` + +### Function Version Management + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, Op, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_function import DataAwsLambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + latest = DataAwsLambdaFunction(self, "latest", + function_name="my-function", + qualifier="$LATEST" + ) + version = DataAwsLambdaFunction(self, "version", + function_name="my-function", + qualifier="3" + ) + TerraformOutput(self, "version_comparison", + value=[{ + "code_difference": Op.neq(version.code_sha256, latest.code_sha256), + "latest_version": latest.version, + "specific_version": version.version + } + ] ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are required: -* `function_name` - (Required) Name of the lambda function. -* `qualifier` - (Optional) Alias name or version number of the lambda functionE.g., `$LATEST`, `my-alias`, or `1`. When not included: the data source resolves to the most recent published version; if no published version exists: it resolves to the most recent unpublished version. +* `function_name` - (Required) Name of the Lambda function. + +The following arguments are optional: + +* `qualifier` - (Optional) Alias name or version number of the Lambda function. E.g., `$LATEST`, `my-alias`, or `1`. When not included: the data source resolves to the most recent published version; if no published version exists: it resolves to the most recent unpublished version. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -51,31 +160,67 @@ This data source exports the following attributes in addition to the arguments a * `arn` - Unqualified (no `:QUALIFIER` or `:VERSION` suffix) ARN identifying your Lambda Function. See also `qualified_arn`. * `code_sha256` - Base64-encoded representation of raw SHA-256 sum of the zip file. * `code_signing_config_arn` - ARN for a Code Signing Configuration. -* `dead_letter_config` - Configure the function's *dead letter queue*. +* `dead_letter_config` - Configuration for the function's dead letter queue. [See below](#dead_letter_config-attribute-reference). * `description` - Description of what your Lambda Function does. -* `environment` - Lambda environment's configuration settings. -* `ephemeral_storage` - Amount of Ephemeral storage(`/tmp`) allocated for the Lambda Function. -* `file_system_config` - Connection settings for an Amazon EFS file system. +* `environment` - Lambda environment's configuration settings. [See below](#environment-attribute-reference). +* `ephemeral_storage` - Amount of ephemeral storage (`/tmp`) allocated for the Lambda Function. [See below](#ephemeral_storage-attribute-reference). +* `file_system_config` - Connection settings for an Amazon EFS file system. [See below](#file_system_config-attribute-reference). * `handler` - Function entrypoint in your code. * `image_uri` - URI of the container image. -* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway. **NOTE:** Starting with `v4.51.0` of the provider, this will *not* include the qualifier. +* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway. **Note:** Starting with `v4.51.0` of the provider, this will not include the qualifier. * `kms_key_arn` - ARN for the KMS encryption key. * `last_modified` - Date this resource was last modified. * `layers` - List of Lambda Layer ARNs attached to your Lambda Function. -* `logging_config` - Advanced logging settings. +* `logging_config` - Advanced logging settings. [See below](#logging_config-attribute-reference). * `memory_size` - Amount of memory in MB your Lambda Function can use at runtime. * `qualified_arn` - Qualified (`:QUALIFIER` or `:VERSION` suffix) ARN identifying your Lambda Function. See also `arn`. * `qualified_invoke_arn` - Qualified (`:QUALIFIER` or `:VERSION` suffix) ARN to be used for invoking Lambda Function from API Gateway. See also `invoke_arn`. -* `reserved_concurrent_executions` - The amount of reserved concurrent executions for this lambda function or `-1` if unreserved. +* `reserved_concurrent_executions` - Amount of reserved concurrent executions for this Lambda function or `-1` if unreserved. * `role` - IAM role attached to the Lambda Function. * `runtime` - Runtime environment for the Lambda function. * `signing_job_arn` - ARN of a signing job. -* `signing_profile_version_arn` - The ARN for a signing profile version. +* `signing_profile_version_arn` - ARN for a signing profile version. * `source_code_hash` - (**Deprecated** use `code_sha256` instead) Base64-encoded representation of raw SHA-256 sum of the zip file. * `source_code_size` - Size in bytes of the function .zip file. +* `source_kms_key_arn` - ARN of the AWS Key Management Service key used to encrypt the function's `.zip` deployment package. +* `tags` - Map of tags assigned to the Lambda Function. * `timeout` - Function execution time at which Lambda should terminate the function. -* `tracing_config` - Tracing settings of the function. -* `version` - The version of the Lambda function returned. If `qualifier` is not set, this will resolve to the most recent published version. If no published version of the function exists, `version` will resolve to `$LATEST`. -* `vpc_config` - VPC configuration associated with your Lambda function. +* `tracing_config` - Tracing settings of the function. [See below](#tracing_config-attribute-reference). +* `version` - Version of the Lambda function returned. If `qualifier` is not set, this will resolve to the most recent published version. If no published version of the function exists, `version` will resolve to `$LATEST`. +* `vpc_config` - VPC configuration associated with your Lambda function. [See below](#vpc_config-attribute-reference). + +### dead_letter_config + +* `target_arn` - ARN of an SNS topic or SQS queue to notify when an invocation fails. + +### environment + +* `variables` - Map of environment variables that are accessible from the function code during execution. + +### ephemeral_storage + +* `size` - Size of the Lambda function ephemeral storage (`/tmp`) in MB. + +### file_system_config + +* `arn` - ARN of the Amazon EFS Access Point that provides access to the file system. +* `local_mount_path` - Path where the function can access the file system, starting with `/mnt/`. + +### logging_config + +* `application_log_level` - Detail level of the logs your application sends to CloudWatch when using supported logging libraries. +* `log_format` - Format for your function's logs. Valid values: `Text`, `JSON`. +* `log_group` - CloudWatch log group your function sends logs to. +* `system_log_level` - Detail level of the Lambda platform event logs sent to CloudWatch. + +### tracing_config + +* `mode` - Tracing mode. Valid values: `Active`, `PassThrough`. + +### vpc_config + +* `security_group_ids` - List of security group IDs associated with the Lambda function. +* `subnet_ids` - List of subnet IDs associated with the Lambda function. +* `vpc_id` - ID of the VPC. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_function_url.html.markdown b/website/docs/cdktf/python/d/lambda_function_url.html.markdown index 422acb17ff0f..cc2fe192bb6a 100644 --- a/website/docs/cdktf/python/d/lambda_function_url.html.markdown +++ b/website/docs/cdktf/python/d/lambda_function_url.html.markdown @@ -3,21 +3,23 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_url" description: |- - Provides a Lambda function URL data source. + Provides details about an AWS Lambda Function URL. --- # Data Source: aws_lambda_function_url -Provides information about a Lambda function URL. +Provides details about an AWS Lambda Function URL. Use this data source to retrieve information about an existing function URL configuration. ## Example Usage +### Basic Usage + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import VariableType, TerraformVariable, TerraformStack +from cdktf import TerraformOutput, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -26,29 +28,90 @@ from imports.aws.data_aws_lambda_function_url import DataAwsLambdaFunctionUrl class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - # You can read more about this at https://cdk.tf/variables - function_name = TerraformVariable(self, "function_name", - type=VariableType.STRING + example = DataAwsLambdaFunctionUrl(self, "example", + function_name="my_lambda_function" ) - DataAwsLambdaFunctionUrl(self, "existing", - function_name=function_name.string_value + TerraformOutput(self, "function_url", + value=example.function_url + ) +``` + +### With Qualifier + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_function_url import DataAwsLambdaFunctionUrl +from imports.aws.route53_record import Route53Record +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaFunctionUrl(self, "example", + function_name=Token.as_string(aws_lambda_function_example.function_name), + qualifier="production" + ) + Route53Record(self, "lambda_alias", + name="api.example.com", + records=[ + Token.as_string( + Fn.replace(Token.as_string(example.function_url), "https://", "")) + ], + ttl=300, + type="CNAME", + zone_id=Token.as_string(aws_route53_zone_example.zone_id) + ) +``` + +### Retrieve CORS Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Op, conditional, TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_function_url import DataAwsLambdaFunctionUrl +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaFunctionUrl(self, "example", + function_name="api_function" + ) + cors_config = conditional( + Op.gt(Fn.length_of(example.cors), 0), + Fn.lookup_nested(example.cors, ["0"]), "null") + allowed_origins = conditional( + Op.neq(cors_config, "null"), + Fn.lookup_nested(cors_config, ["allow_origins"]), []) + TerraformOutput(self, "cors_allowed_origins", + value=allowed_origins ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are required: -* `function_name` - (Required) The name (or ARN) of the Lambda function. -* `qualifier` - (Optional) Alias name or `"$LATEST"`. +* `function_name` - (Required) Name or ARN of the Lambda function. + +The following arguments are optional: + +* `qualifier` - (Optional) Alias name or `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `authorization_type` - Type of authentication that the function URL uses. -* `cors` - The [cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) settings for the function URL. See the [`aws_lambda_function_url` resource](/docs/providers/aws/r/lambda_function_url.html) documentation for more details. +* `cors` - Cross-origin resource sharing (CORS) settings for the function URL. [See below](#cors-attribute-reference). * `creation_time` - When the function URL was created, in [ISO-8601 format](https://www.w3.org/TR/NOTE-datetime). * `function_arn` - ARN of the function. * `function_url` - HTTP URL endpoint for the function in the format `https://.lambda-url..on.aws/`. @@ -56,4 +119,13 @@ This data source exports the following attributes in addition to the arguments a * `last_modified_time` - When the function URL configuration was last updated, in [ISO-8601 format](https://www.w3.org/TR/NOTE-datetime). * `url_id` - Generated ID for the endpoint. - \ No newline at end of file +### cors Attribute Reference + +* `allow_credentials` - Whether credentials are included in the CORS request. +* `allow_headers` - List of headers that are specified in the Access-Control-Request-Headers header. +* `allow_methods` - List of HTTP methods that are allowed when calling the function URL. +* `allow_origins` - List of origins that are allowed to make requests to the function URL. +* `expose_headers` - List of headers in the response that you want to expose to the origin that called the function URL. +* `max_age` - Maximum amount of time, in seconds, that web browsers can cache results of a preflight request. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_functions.html.markdown b/website/docs/cdktf/python/d/lambda_functions.html.markdown index 4e2e0f364ac9..465dd7c25646 100644 --- a/website/docs/cdktf/python/d/lambda_functions.html.markdown +++ b/website/docs/cdktf/python/d/lambda_functions.html.markdown @@ -3,41 +3,155 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_functions" description: |- - Terraform data resource to get a list of Lambda Functions. + Provides a list of AWS Lambda Functions. --- # Data Source: aws_lambda_functions -Terraform data resource to get a list of Lambda Functions. +Provides a list of AWS Lambda Functions in the current region. Use this data source to discover existing Lambda functions for inventory, monitoring, or bulk operations. ## Example Usage +### List All Functions + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_functions import DataAwsLambdaFunctions +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + all = DataAwsLambdaFunctions(self, "all") + TerraformOutput(self, "all_function_names", + value=all.function_names + ) + TerraformOutput(self, "function_count", + value=Fn.length_of(all.function_names) + ) +``` + +### Use Function List for Bulk Operations + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformCount, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_metric_alarm import CloudwatchMetricAlarm +from imports.aws.data_aws_lambda_functions import DataAwsLambdaFunctions +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + all = DataAwsLambdaFunctions(self, "all") + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + lambda_errors_count = TerraformCount.of( + Token.as_number(Fn.length_of(all.function_names))) + CloudwatchMetricAlarm(self, "lambda_errors", + alarm_description="This metric monitors lambda errors", + alarm_name= + Token.as_string( + Fn.lookup_nested(all.function_names, [lambda_errors_count.index])) + "-errors", + comparison_operator="GreaterThanThreshold", + dimensions={ + "FunctionName": Token.as_string( + Fn.lookup_nested(all.function_names, [lambda_errors_count.index])) + }, + evaluation_periods=Token.as_number("2"), + metric_name="Errors", + namespace="AWS/Lambda", + period=Token.as_number("300"), + statistic="Sum", + tags={ + "Environment": "monitoring", + "Purpose": "lambda-error-tracking" + }, + threshold=Token.as_number("5"), + count=lambda_errors_count + ) +``` + +### Filter Functions by Name Pattern + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_functions import DataAwsLambdaFunctions +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + all = DataAwsLambdaFunctions(self, "all") + api_functions = "${[ for name in ${" + all.function_names + "} : name if can(regex(\"^api-\", name))]}" + worker_functions = "${[ for name in ${" + all.function_names + "} : name if can(regex(\"^worker-\", name))]}" + TerraformOutput(self, "api_functions", + value=api_functions + ) + TerraformOutput(self, "worker_functions", + value=worker_functions + ) +``` + +### Create Function Inventory + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import TerraformOutput, Fn, Token, TerraformCount, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # +from imports.aws.data_aws_lambda_function import DataAwsLambdaFunction from imports.aws.data_aws_lambda_functions import DataAwsLambdaFunctions class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DataAwsLambdaFunctions(self, "all") + all = DataAwsLambdaFunctions(self, "all") + function_inventory = "${[ for i, name in ${" + all.function_names + "} : {\n name = name\n arn = data.aws_lambda_functions.all.function_arns[i]\n runtime = data.aws_lambda_function.details[i].runtime\n memory_size = data.aws_lambda_function.details[i].memory_size\n timeout = data.aws_lambda_function.details[i].timeout\n handler = data.aws_lambda_function.details[i].handler\n }]}" + TerraformOutput(self, "function_inventory", + value=function_inventory + ) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + details_count = TerraformCount.of( + Token.as_number(Fn.length_of(all.function_names))) + DataAwsLambdaFunction(self, "details", + function_name=Token.as_string( + Fn.lookup_nested(all.function_names, [details_count.index])), + count=details_count + ) ``` ## Argument Reference -This data source does not support any arguments. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `function_names` - A list of Lambda Function names. -* `function_arns` - A list of Lambda Function ARNs. +* `function_arns` - List of Lambda Function ARNs. +* `function_names` - List of Lambda Function names. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_invocation.html.markdown b/website/docs/cdktf/python/d/lambda_invocation.html.markdown index 74a22a1935c1..c1009046b16a 100644 --- a/website/docs/cdktf/python/d/lambda_invocation.html.markdown +++ b/website/docs/cdktf/python/d/lambda_invocation.html.markdown @@ -3,27 +3,29 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_invocation" description: |- - Invoke AWS Lambda Function as data source + Invokes an AWS Lambda Function and returns its results. --- # Data Source: aws_lambda_invocation -Use this data source to invoke custom lambda functions as data source. -The lambda function is invoked with [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) -invocation type. +Invokes an AWS Lambda Function and returns its results. Use this data source to execute Lambda functions during Terraform operations and use their results in other resources or outputs. -~> **NOTE:** The `aws_lambda_invocation` data source invokes the function during the first `apply` and every subsequent `plan` when the function is known. +The Lambda function is invoked with [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) +~> **Note:** The `aws_lambda_invocation` data source invokes the function during the first `apply` and every subsequent `plan` when the function is known. + +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking a Lambda function with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) ## Example Usage +### Basic Invocation + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformOutput, Fn, Token, TerraformStack +from cdktf import Token, Fn, TerraformOutput, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -33,28 +35,115 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) example = DataAwsLambdaInvocation(self, "example", - function_name=lambda_function_test.function_name, - input="{\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n}\n\n" + function_name=Token.as_string(aws_lambda_function_example.function_name), + input=Token.as_string( + Fn.jsonencode({ + "id": "123456", + "operation": "getStatus" + })) ) - TerraformOutput(self, "result_entry", - value=Fn.lookup_nested(Fn.jsondecode(Token.as_string(example.result)), ["\"key1\"" - ]) + TerraformOutput(self, "result", + value=Fn.jsondecode(Token.as_string(example.result)) + ) +``` + +### Dynamic Resource Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_invocation import DataAwsLambdaInvocation +from imports.aws.elasticache_cluster import ElasticacheCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + resource_config = DataAwsLambdaInvocation(self, "resource_config", + function_name="resource-config-generator", + input=Token.as_string( + Fn.jsonencode({ + "environment": environment.value, + "region": current.region, + "service": "api" + })), + qualifier="production" + ) + config = Fn.jsondecode(Token.as_string(resource_config.result)) + ElasticacheCluster(self, "example", + cluster_id=Token.as_string( + Fn.lookup_nested(config, ["cache", "cluster_id"])), + engine=Token.as_string(Fn.lookup_nested(config, ["cache", "engine"])), + node_type=Token.as_string(Fn.lookup_nested(config, ["cache", "node_type"])), + num_cache_nodes=Token.as_number( + Fn.lookup_nested(config, ["cache", "nodes"])), + parameter_group_name=Token.as_string( + Fn.lookup_nested(config, ["cache", "parameter_group"])), + tags=Token.as_string_map(Fn.lookup_nested(config, ["tags"])) + ) +``` + +### Error Handling + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, Fn, Op, conditional, TerraformCount, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.null.resource import Resource +from imports.aws.data_aws_lambda_invocation import DataAwsLambdaInvocation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaInvocation(self, "example", + function_name=Token.as_string(aws_lambda_function_example.function_name), + input=Token.as_string( + Fn.jsonencode({ + "action": "validate", + "payload": configuration.value + })) + ) + result = Fn.jsondecode(Token.as_string(example.result)) + has_errors = Fn.try([ + Op.neq(Fn.lookup_nested(result, ["errors"]), "null"), False + ]) + error_messages = conditional(has_errors, + Fn.join(", ", Token.as_list(Fn.lookup_nested(result, ["errors"]))), "null") + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + validation_check_count = TerraformCount.of( + Token.as_number( + conditional(has_errors, + fail("Configuration validation failed: ${" + error_messages + "}"), 0))) + Resource(self, "validation_check", + count=validation_check_count ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are required: + +* `function_name` - (Required) Name of the Lambda function. +* `input` - (Required) String in JSON format that is passed as payload to the Lambda function. + +The following arguments are optional: -* `function_name` - (Required) Name of the lambda function. -* `input` - (Required) String in JSON format that is passed as payload to the lambda function. -* `qualifier` - (Optional) Qualifier (a.k.a version) of the lambda function. Defaults - to `$LATEST`. +* `qualifier` - (Optional) Qualifier (a.k.a version) of the Lambda function. Defaults to `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `result` - String result of the lambda function invocation. +* `result` - String result of the Lambda function invocation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lambda_layer_version.html.markdown b/website/docs/cdktf/python/d/lambda_layer_version.html.markdown index 25c8f90afc51..b483e400906d 100644 --- a/website/docs/cdktf/python/d/lambda_layer_version.html.markdown +++ b/website/docs/cdktf/python/d/lambda_layer_version.html.markdown @@ -3,21 +3,115 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version" description: |- - Provides a Lambda Layer Version data source. + Provides details about an AWS Lambda Layer Version. --- # Data Source: aws_lambda_layer_version -Provides information about a Lambda Layer Version. +Provides details about an AWS Lambda Layer Version. Use this data source to retrieve information about a specific layer version or find the latest version compatible with your runtime and architecture requirements. ## Example Usage +### Get Latest Layer Version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_layer_version import DataAwsLambdaLayerVersion +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaLayerVersion(self, "example", + layer_name="my-shared-utilities" + ) + aws_lambda_function_example = LambdaFunction(self, "example_1", + filename="function.zip", + function_name="example_function", + handler="index.handler", + layers=[Token.as_string(example.arn)], + role=lambda_role.arn, + runtime="nodejs20.x" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_example.override_logical_id("example") +``` + +### Get Specific Layer Version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_layer_version import DataAwsLambdaLayerVersion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsLambdaLayerVersion(self, "example", + layer_name="production-utilities", + version=5 + ) + TerraformOutput(self, "layer_info", + value=[{ + "arn": example.arn, + "description": example.description, + "version": example.version + } + ] + ) +``` + +### Get Latest Compatible Layer Version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_lambda_layer_version import DataAwsLambdaLayerVersion +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + arm_layer = DataAwsLambdaLayerVersion(self, "arm_layer", + compatible_architecture="arm64", + layer_name="optimized-libraries" + ) + python_layer = DataAwsLambdaLayerVersion(self, "python_layer", + compatible_runtime="python3.12", + layer_name="python-dependencies" + ) + LambdaFunction(self, "example", + architectures=["arm64"], + filename="function.zip", + function_name="multi_layer_function", + handler="app.handler", + layers=[Token.as_string(python_layer.arn), Token.as_string(arm_layer.arn)], + role=lambda_role.arn, + runtime="python3.12" + ) +``` + +### Compare Layer Versions + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import VariableType, TerraformVariable, TerraformStack +from cdktf import Op, TerraformOutput, conditional, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -26,44 +120,48 @@ from imports.aws.data_aws_lambda_layer_version import DataAwsLambdaLayerVersion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - # You can read more about this at https://cdk.tf/variables - layer_name = TerraformVariable(self, "layer_name", - type=VariableType.STRING + latest = DataAwsLambdaLayerVersion(self, "latest", + layer_name="shared-layer" + ) + stable = DataAwsLambdaLayerVersion(self, "stable", + layer_name="shared-layer", + version=3 ) - DataAwsLambdaLayerVersion(self, "existing", - layer_name=layer_name.string_value + use_latest_layer = Op.gt(latest.version, 5) + TerraformOutput(self, "selected_layer_version", + value=conditional(use_latest_layer, latest.version, stable.version) ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are required: + +* `layer_name` - (Required) Name of the Lambda layer. + +The following arguments are optional: -* `layer_name` - (Required) Name of the lambda layer. +* `compatible_architecture` - (Optional) Specific architecture the layer version must support. Conflicts with `version`. If specified, the latest available layer version supporting the provided architecture will be used. +* `compatible_runtime` - (Optional) Specific runtime the layer version must support. Conflicts with `version`. If specified, the latest available layer version supporting the provided runtime will be used. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `version` - (Optional) Specific layer version. Conflicts with `compatible_runtime` and `compatible_architecture`. If omitted, the latest available layer version will be used. -* `compatible_runtime` (Optional) Specific runtime the layer version must support. Conflicts with `version`. If specified, the latest available layer version supporting the provided runtime will be used. -* `compatible_architecture` (Optional) Specific architecture the layer version could support. Conflicts with `version`. If specified, the latest available layer version supporting the provided architecture will be used. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: +* `arn` - ARN of the Lambda Layer with version. * `code_sha256` - Base64-encoded representation of raw SHA-256 sum of the zip file. +* `compatible_architectures` - List of [Architectures](https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleArchitectures) the specific Lambda Layer version is compatible with. +* `compatible_runtimes` - List of [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleRuntimes) the specific Lambda Layer version is compatible with. +* `created_date` - Date this resource was created. * `description` - Description of the specific Lambda Layer version. -* `license_info` - License info associated with the specific Lambda Layer version. -* `compatible_runtimes` - List of [Runtimes][1] the specific Lambda Layer version is compatible with. -* `compatible_architectures` - A list of [Architectures][2] the specific Lambda Layer version is compatible with. -* `arn` - ARN of the Lambda Layer with version. * `layer_arn` - ARN of the Lambda Layer without version. -* `created_date` - Date this resource was created. +* `license_info` - License info associated with the specific Lambda Layer version. * `signing_job_arn` - ARN of a signing job. -* `signing_profile_version_arn` - The ARN for a signing profile version. +* `signing_profile_version_arn` - ARN for a signing profile version. * `source_code_hash` - (**Deprecated** use `code_sha256` instead) Base64-encoded representation of raw SHA-256 sum of the zip file. * `source_code_size` - Size in bytes of the function .zip file. -* `version` - This Lambda Layer version. - -[1]: https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleRuntimes -[2]: https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleArchitectures +* `version` - Lambda Layer version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/launch_configuration.html.markdown b/website/docs/cdktf/python/d/launch_configuration.html.markdown index 23cafb7e73d5..886fa81d8ce5 100644 --- a/website/docs/cdktf/python/d/launch_configuration.html.markdown +++ b/website/docs/cdktf/python/d/launch_configuration.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the launch configuration. ## Attribute Reference @@ -89,4 +90,4 @@ This data source exports the following attributes in addition to the arguments a * `device_name` - Name of the device. * `virtual_name` - Virtual Name of the device. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/launch_template.html.markdown b/website/docs/cdktf/python/d/launch_template.html.markdown index d419ceef9901..939c606c330b 100644 --- a/website/docs/cdktf/python/d/launch_template.html.markdown +++ b/website/docs/cdktf/python/d/launch_template.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. * `id` - (Optional) ID of the specific launch template to retrieve. * `name` - (Optional) Name of the launch template. @@ -84,4 +85,4 @@ This resource also exports a full set of attributes corresponding to the argumen - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lb.html.markdown b/website/docs/cdktf/python/d/lb.html.markdown index 06d6a7e33f96..03773db42507 100644 --- a/website/docs/cdktf/python/d/lb.html.markdown +++ b/website/docs/cdktf/python/d/lb.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) Full ARN of the load balancer. * `name` - (Optional) Unique name of the load balancer. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired load balancer. @@ -71,4 +72,4 @@ returned attributes - they are identical. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lb_hosted_zone_id.html.markdown b/website/docs/cdktf/python/d/lb_hosted_zone_id.html.markdown index 1aa33da21d0c..b62e05f34d23 100644 --- a/website/docs/cdktf/python/d/lb_hosted_zone_id.html.markdown +++ b/website/docs/cdktf/python/d/lb_hosted_zone_id.html.markdown @@ -44,14 +44,13 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS ELB HostedZoneId is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS ELB HostedZoneId is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `load_balancer_type` - (Optional) Type of load balancer to create. Possible values are `application` or `network`. The default value is `application`. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS ELB HostedZoneId in the selected region. +* `id` - ID of the AWS ELB HostedZoneId in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lb_listener.html.markdown b/website/docs/cdktf/python/d/lb_listener.html.markdown index 0ff8e85baa94..6f5836013dce 100644 --- a/website/docs/cdktf/python/d/lb_listener.html.markdown +++ b/website/docs/cdktf/python/d/lb_listener.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the listener. Required if `load_balancer_arn` and `port` is not set. * `load_balancer_arn` - (Optional) ARN of the load balancer. Required if `arn` is not set. * `port` - (Optional) Port of the listener. Required if `arn` is not set. @@ -68,4 +69,4 @@ See the [LB Listener Resource](/docs/providers/aws/r/lb_listener.html) for detai - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lb_listener_rule.html.markdown b/website/docs/cdktf/python/d/lb_listener_rule.html.markdown index f4c22860e933..af3fb347bc95 100644 --- a/website/docs/cdktf/python/d/lb_listener_rule.html.markdown +++ b/website/docs/cdktf/python/d/lb_listener_rule.html.markdown @@ -24,7 +24,7 @@ from cdktf import VariableType, TerraformVariable, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsLbListenerRule +from imports.aws.data_aws_lb_listener_rule import DataAwsLbListenerRule class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -34,7 +34,7 @@ class MyConvertedCode(TerraformStack): type=VariableType.STRING ) DataAwsLbListenerRule(self, "example", - arn=lb_rule_arn.value + arn=lb_rule_arn.string_value ) ``` @@ -48,7 +48,7 @@ from cdktf import VariableType, TerraformVariable, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsLbListenerRule +from imports.aws.data_aws_lb_listener_rule import DataAwsLbListenerRule class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -61,8 +61,8 @@ class MyConvertedCode(TerraformStack): type=VariableType.NUMBER ) DataAwsLbListenerRule(self, "example", - listener_arn=lb_listener_arn.value, - priority=lb_rule_priority.value + listener_arn=lb_listener_arn.string_value, + priority=lb_rule_priority.number_value ) ``` @@ -70,6 +70,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the Listener Rule. Either `arn` or `listener_arn` must be set. * `listener_arn` - (Optional) ARN of the associated Listener. @@ -185,4 +186,4 @@ This data source exports the following attributes in addition to the arguments a * `values` - Set of `key`-`value` pairs indicating the query string parameters to match. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lb_target_group.html.markdown b/website/docs/cdktf/python/d/lb_target_group.html.markdown index 1771a87095a7..8db1d5557604 100644 --- a/website/docs/cdktf/python/d/lb_target_group.html.markdown +++ b/website/docs/cdktf/python/d/lb_target_group.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) Full ARN of the target group. * `name` - (Optional) Unique name of the target group. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired target group. @@ -71,4 +72,4 @@ on the returned attributes - they are identical. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lb_trust_store.html.markdown b/website/docs/cdktf/python/d/lb_trust_store.html.markdown index cf97aecdef5a..f6cc24d80623 100644 --- a/website/docs/cdktf/python/d/lb_trust_store.html.markdown +++ b/website/docs/cdktf/python/d/lb_trust_store.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) Full ARN of the trust store. * `name` - (Optional) Unique name of the trust store. @@ -70,4 +71,4 @@ on the returned attributes - they are identical. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lbs.html.markdown b/website/docs/cdktf/python/d/lbs.html.markdown index 876449d97b00..9fc308e46490 100644 --- a/website/docs/cdktf/python/d/lbs.html.markdown +++ b/website/docs/cdktf/python/d/lbs.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Load Balancers. @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of Load Balancer ARNs. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lex_bot.html.markdown b/website/docs/cdktf/python/d/lex_bot.html.markdown index 6d9e6dc1a1d7..cdae2e51e57d 100644 --- a/website/docs/cdktf/python/d/lex_bot.html.markdown +++ b/website/docs/cdktf/python/d/lex_bot.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the bot. The name is case sensitive. * `version` - (Optional) Version or alias of the bot. @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `version` - Version of the bot. For a new bot, the version is always `$LATEST`. * `voice_id` - Amazon Polly voice ID that the Amazon Lex Bot uses for voice interactions with the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lex_bot_alias.html.markdown b/website/docs/cdktf/python/d/lex_bot_alias.html.markdown index f3b40f5f86b2..c80c1109433e 100644 --- a/website/docs/cdktf/python/d/lex_bot_alias.html.markdown +++ b/website/docs/cdktf/python/d/lex_bot_alias.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bot_name` - (Required) Name of the bot. * `name` - (Required) Name of the bot alias. The name is case sensitive. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `last_updated_date` - Date that the bot alias was updated. When you create a resource, the creation date and the last updated date are the same. * `name` - Name of the alias. The name is not case sensitive. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lex_intent.html.markdown b/website/docs/cdktf/python/d/lex_intent.html.markdown index d1dde1e2153c..7cf827cdb701 100644 --- a/website/docs/cdktf/python/d/lex_intent.html.markdown +++ b/website/docs/cdktf/python/d/lex_intent.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the intent. The name is case sensitive. * `version` - (Optional) Version of the intent. @@ -56,4 +57,4 @@ intent on. To find the signature for an intent, see in the Alexa Skills Kit. * `version` - Version of the bot. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lex_slot_type.html.markdown b/website/docs/cdktf/python/d/lex_slot_type.html.markdown index 4f714e2084fc..0b968f30c254 100644 --- a/website/docs/cdktf/python/d/lex_slot_type.html.markdown +++ b/website/docs/cdktf/python/d/lex_slot_type.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the slot type. The name is case sensitive. * `version` - (Optional) Version of the slot type. @@ -58,4 +59,4 @@ value is similar to the slot value. `TOP_RESOLUTION` returns the first value in if there is a resolution list for the slot, otherwise null is returned. * `version` - Version of the slot type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/licensemanager_grants.html.markdown b/website/docs/cdktf/python/d/licensemanager_grants.html.markdown index 8090e28aadf7..f19680725df6 100644 --- a/website/docs/cdktf/python/d/licensemanager_grants.html.markdown +++ b/website/docs/cdktf/python/d/licensemanager_grants.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. ### `filter` @@ -82,4 +83,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - List of all the license grant ARNs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/licensemanager_received_license.html.markdown b/website/docs/cdktf/python/d/licensemanager_received_license.html.markdown index be08e97c2729..7d05d4b2e902 100644 --- a/website/docs/cdktf/python/d/licensemanager_received_license.html.markdown +++ b/website/docs/cdktf/python/d/licensemanager_received_license.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `license_arn` - (Required) The ARN of the received license you want data for. ## Attribute Reference @@ -120,4 +121,4 @@ A list with a single map. * `begin` - Start of the validity time range. * `end` - End of the validity time range. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/licensemanager_received_licenses.html.markdown b/website/docs/cdktf/python/d/licensemanager_received_licenses.html.markdown index 59280997bfbd..d2b3d23d8b28 100644 --- a/website/docs/cdktf/python/d/licensemanager_received_licenses.html.markdown +++ b/website/docs/cdktf/python/d/licensemanager_received_licenses.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. ### `filter` @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - List of all the license ARNs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_geofence_collection.html.markdown b/website/docs/cdktf/python/d/location_geofence_collection.html.markdown index 065b3d1a2a05..158438bbea97 100644 --- a/website/docs/cdktf/python/d/location_geofence_collection.html.markdown +++ b/website/docs/cdktf/python/d/location_geofence_collection.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `collection_name` - (Required) Name of the geofence collection. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the geofence collection. * `update_time` - Timestamp for when the geofence collection resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_map.html.markdown b/website/docs/cdktf/python/d/location_map.html.markdown index 9b4bf1fe01c3..77874edebb08 100644 --- a/website/docs/cdktf/python/d/location_map.html.markdown +++ b/website/docs/cdktf/python/d/location_map.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `map_name` - (Required) Name of the map resource. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the map. * `update_time` - Timestamp for when the map resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_place_index.html.markdown b/website/docs/cdktf/python/d/location_place_index.html.markdown index b56c9eb23d91..f0f6bb061ed9 100644 --- a/website/docs/cdktf/python/d/location_place_index.html.markdown +++ b/website/docs/cdktf/python/d/location_place_index.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `index_name` - (Required) Name of the place index resource. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the place index. * `update_time` - Timestamp for when the place index resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_route_calculator.html.markdown b/website/docs/cdktf/python/d/location_route_calculator.html.markdown index c2df5c4ed156..f9af9d21d067 100644 --- a/website/docs/cdktf/python/d/location_route_calculator.html.markdown +++ b/website/docs/cdktf/python/d/location_route_calculator.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `calculator_name` - (Required) Name of the route calculator resource. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the route calculator. * `update_time` - Timestamp for when the route calculator resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_tracker.html.markdown b/website/docs/cdktf/python/d/location_tracker.html.markdown index d7da7589af11..4a1a18cbb2a2 100644 --- a/website/docs/cdktf/python/d/location_tracker.html.markdown +++ b/website/docs/cdktf/python/d/location_tracker.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tracker_name` - (Required) Name of the tracker resource. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `tracker_arn` - ARN for the tracker resource. Used when you need to specify a resource across all AWS. * `update_time` - Timestamp for when the tracker resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_tracker_association.html.markdown b/website/docs/cdktf/python/d/location_tracker_association.html.markdown index d9a7a50ca571..d20c998b1175 100644 --- a/website/docs/cdktf/python/d/location_tracker_association.html.markdown +++ b/website/docs/cdktf/python/d/location_tracker_association.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `consumer_arn` - (Required) ARN of the geofence collection associated to tracker resource. * `tracker_name` - (Required) Name of the tracker resource associated with a geofence collection. @@ -45,4 +46,4 @@ The following arguments are required: This data source exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/location_tracker_associations.html.markdown b/website/docs/cdktf/python/d/location_tracker_associations.html.markdown index 3c5357332a92..672c304d9079 100644 --- a/website/docs/cdktf/python/d/location_tracker_associations.html.markdown +++ b/website/docs/cdktf/python/d/location_tracker_associations.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tracker_name` - (Required) Name of the tracker resource associated with a geofence collection. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `consumer_arns` - List of geofence collection ARNs associated to the tracker resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/media_convert_queue.html.markdown b/website/docs/cdktf/python/d/media_convert_queue.html.markdown index c6d16e3c52be..a1246a43679d 100644 --- a/website/docs/cdktf/python/d/media_convert_queue.html.markdown +++ b/website/docs/cdktf/python/d/media_convert_queue.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_media_convert_queue +# Data Source: aws_media_convert_queue Retrieve information about a AWS Elemental MediaConvert Queue. @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Unique identifier of the queue. The same as `name`. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The status of the queue. * `tags` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/medialive_input.html.markdown b/website/docs/cdktf/python/d/medialive_input.html.markdown index 625c43eb3a93..bb3732791a64 100644 --- a/website/docs/cdktf/python/d/medialive_input.html.markdown +++ b/website/docs/cdktf/python/d/medialive_input.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) The ID of the Input. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags assigned to the Input. * `type` - The type of the input. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/memorydb_acl.html.markdown b/website/docs/cdktf/python/d/memorydb_acl.html.markdown index 32b141af2b71..e2e64c3ba26d 100644 --- a/website/docs/cdktf/python/d/memorydb_acl.html.markdown +++ b/website/docs/cdktf/python/d/memorydb_acl.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_acl +# Data Source: aws_memorydb_acl Provides information about a MemoryDB ACL. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the ACL. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the ACL. * `user_names` - Set of MemoryDB user names included in this ACL. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/memorydb_cluster.html.markdown b/website/docs/cdktf/python/d/memorydb_cluster.html.markdown index cc84154b01bb..6ec1ad88f1c4 100644 --- a/website/docs/cdktf/python/d/memorydb_cluster.html.markdown +++ b/website/docs/cdktf/python/d/memorydb_cluster.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_cluster +# Data Source: aws_memorydb_cluster Provides information about a MemoryDB Cluster. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster. ## Attribute Reference @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `tls_enabled` - When true, in-transit encryption is enabled for the cluster. * `tags` - Map of tags assigned to the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/memorydb_parameter_group.html.markdown b/website/docs/cdktf/python/d/memorydb_parameter_group.html.markdown index 94ccc96dbeb1..d57eab5f8028 100644 --- a/website/docs/cdktf/python/d/memorydb_parameter_group.html.markdown +++ b/website/docs/cdktf/python/d/memorydb_parameter_group.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_parameter_group +# Data Source: aws_memorydb_parameter_group Provides information about a MemoryDB Parameter Group. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the parameter group. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `value` - Value of the parameter. * `tags` - Map of tags assigned to the parameter group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/memorydb_snapshot.html.markdown b/website/docs/cdktf/python/d/memorydb_snapshot.html.markdown index e07bcacf55cf..76330f29869a 100644 --- a/website/docs/cdktf/python/d/memorydb_snapshot.html.markdown +++ b/website/docs/cdktf/python/d/memorydb_snapshot.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_snapshot +# Data Source: aws_memorydb_snapshot Provides information about a MemoryDB Snapshot. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the snapshot. ## Attribute Reference @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `source` - Whether the snapshot is from an automatic backup (`automated`) or was created manually (`manual`). * `tags` - Map of tags assigned to the snapshot. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/memorydb_subnet_group.html.markdown b/website/docs/cdktf/python/d/memorydb_subnet_group.html.markdown index 2612ca33647f..6c0413c3b32f 100644 --- a/website/docs/cdktf/python/d/memorydb_subnet_group.html.markdown +++ b/website/docs/cdktf/python/d/memorydb_subnet_group.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_subnet_group +# Data Source: aws_memorydb_subnet_group Provides information about a MemoryDB Subnet Group. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the subnet group. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `vpc_id` - VPC in which the subnet group exists. * `tags` - Map of tags assigned to the subnet group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/memorydb_user.html.markdown b/website/docs/cdktf/python/d/memorydb_user.html.markdown index efe33dec939c..e568ed4b818d 100644 --- a/website/docs/cdktf/python/d/memorydb_user.html.markdown +++ b/website/docs/cdktf/python/d/memorydb_user.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_user +# Data Source: aws_memorydb_user Provides information about a MemoryDB User. @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_name` - (Required) Name of the user. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `minimum_engine_version` - Minimum engine version supported for the user. * `tags` - Map of tags assigned to the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mq_broker.html.markdown b/website/docs/cdktf/python/d/mq_broker.html.markdown index 89a1db0aa3bb..8b79e44664b6 100644 --- a/website/docs/cdktf/python/d/mq_broker.html.markdown +++ b/website/docs/cdktf/python/d/mq_broker.html.markdown @@ -3,21 +3,21 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker" description: |- - Provides a MQ Broker data source. + Provides details about an existing Amazon MQ broker. --- # Data Source: aws_mq_broker -Provides information about a MQ Broker. +Provides details about an existing Amazon MQ broker. Use this data source to retrieve configuration and metadata for an Amazon MQ broker by ID or name. ## Example Usage ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import VariableType, TerraformVariable, TerraformStack +from cdktf import TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -26,36 +26,90 @@ from imports.aws.data_aws_mq_broker import DataAwsMqBroker class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - # You can read more about this at https://cdk.tf/variables - broker_id = TerraformVariable(self, "broker_id", - default="", - type=VariableType.STRING - ) - broker_name = TerraformVariable(self, "broker_name", - default="", - type=VariableType.STRING - ) - DataAwsMqBroker(self, "by_id", - broker_id=broker_id.string_value - ) - DataAwsMqBroker(self, "by_name", - broker_name=broker_name.string_value + DataAwsMqBroker(self, "example", + broker_id="b-1234a5b6-78cd-901e-2fgh-3i45j6k178l9" ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are optional: -* `broker_id` - (Optional) Unique id of the mq broker. -* `broker_name` - (Optional) Unique name of the mq broker. +* `broker_id` - (Optional) Unique ID of the MQ broker. +* `broker_name` - (Optional) Unique name of the MQ broker. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +~> **Note:** Either `broker_id` or `broker_name` must be specified. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -See the [`aws_mq_broker` resource](/docs/providers/aws/r/mq_broker.html) for details on the returned attributes. -They are identical except for user password, which is not returned when describing broker. +* `arn` - ARN of the broker. +* `authentication_strategy` - Authentication strategy used to secure the broker. +* `auto_minor_version_upgrade` - Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. +* `configuration` - Configuration block for broker configuration. See [Configuration](#configuration) below. +* `deployment_mode` - Deployment mode of the broker. +* `encryption_options` - Configuration block containing encryption options. See [Encryption Options](#encryption-options) below. +* `engine_type` - Type of broker engine. +* `engine_version` - Version of the broker engine. +* `host_instance_type` - Broker's instance type. +* `instances` - List of information about allocated brokers (both active & standby). See [Instances](#instances) below. +* `ldap_server_metadata` - Configuration block for the LDAP server used to authenticate and authorize connections to the broker. See [LDAP Server Metadata](#ldap-server-metadata) below. +* `logs` - Configuration block for the logging configuration of the broker. See [Logs](#logs) below. +* `maintenance_window_start_time` - Configuration block for the maintenance window start time. See [Maintenance Window Start Time](#maintenance-window-start-time) below. +* `publicly_accessible` - Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. +* `security_groups` - List of security group IDs assigned to the broker. +* `storage_type` - Storage type of the broker. +* `subnet_ids` - List of subnet IDs in which to launch the broker. +* `tags` - Map of tags assigned to the broker. +* `user` - Configuration block for broker users. See [User](#user) below. + +### Configuration + +* `id` - Configuration ID. +* `revision` - Revision of the Configuration. + +### Encryption Options + +* `kms_key_id` - Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. +* `use_aws_owned_key` - Whether to enable an AWS-owned KMS CMK that is not in your account. + +### Instances + +* `console_url` - URL of the ActiveMQ Web Console or the RabbitMQ Management UI depending on `engine_type`. +* `endpoints` - Broker's wire-level protocol endpoints. +* `ip_address` - IP Address of the broker. + +### LDAP Server Metadata + +* `hosts` - List of a fully qualified domain name of the LDAP server and an optional failover server. +* `role_base` - Fully qualified name of the directory to search for a user's groups. +* `role_name` - LDAP attribute that identifies the group name attribute in the object returned from the group membership query. +* `role_search_matching` - Search criteria for groups. +* `role_search_subtree` - Whether the directory search scope is the entire sub-tree. +* `service_account_password` - Service account password. +* `service_account_username` - Service account username. +* `user_base` - Fully qualified name of the directory where you want to search for users. +* `user_role_name` - Name of the LDAP attribute for the user group membership. +* `user_search_matching` - Search criteria for users. +* `user_search_subtree` - Whether the directory search scope is the entire sub-tree. + +### Logs + +* `audit` - Whether audit logging is enabled. +* `general` - Whether general logging is enabled. + +### Maintenance Window Start Time + +* `day_of_week` - Day of the week. +* `time_of_day` - Time, in 24-hour format. +* `time_zone` - Time zone in either the Country/City format or the UTC offset format. + +### User + +* `console_access` - Whether to enable access to the ActiveMQ Web Console for the user. +* `groups` - List of groups to which the ActiveMQ user belongs. +* `replication_user` - Whether to set replication user. +* `username` - Username of the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mq_broker_engine_types.html.markdown b/website/docs/cdktf/python/d/mq_broker_engine_types.html.markdown index 2665594885f8..b5f372cc277e 100644 --- a/website/docs/cdktf/python/d/mq_broker_engine_types.html.markdown +++ b/website/docs/cdktf/python/d/mq_broker_engine_types.html.markdown @@ -3,19 +3,17 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker_engine_types" description: |- - Retrieve information about available broker engines. + Provides details about available MQ broker engine types. --- # Data Source: aws_mq_broker_engine_types -Retrieve information about available broker engines. +Provides details about available MQ broker engine types. Use this data source to retrieve supported engine types and their versions for Amazon MQ brokers. ## Example Usage -### Basic Usage - ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -37,17 +35,22 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `engine_type` - (Optional) The MQ engine type to return version details for. +* `engine_type` - (Optional) MQ engine type to return version details for. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `broker_engine_types` - A list of available engine types and versions. See [Engine Types](#engine-types). +* `broker_engine_types` - List of available engine types and versions. See [Engine Types](#engine-types). + +### Engine Types + +* `engine_type` - Broker's engine type. +* `engine_versions` - List of engine versions. See [Engine Versions](#engine-versions). -### engine-types +### Engine Versions -* `engine_type` - The broker's engine type. -* `engine_versions` - The list of engine versions. +* `name` - Name of the engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mq_broker_instance_type_offerings.html.markdown b/website/docs/cdktf/python/d/mq_broker_instance_type_offerings.html.markdown index 9bd34eb6e55b..b278adc1afe4 100644 --- a/website/docs/cdktf/python/d/mq_broker_instance_type_offerings.html.markdown +++ b/website/docs/cdktf/python/d/mq_broker_instance_type_offerings.html.markdown @@ -3,14 +3,14 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker_instance_type_offerings" description: |- - Provides a MQ Broker Instance Offerings data source. + Provides details about available MQ broker instance type offerings. --- # Data Source: aws_mq_broker_instance_type_offerings -Provides information about a MQ Broker Instance Offerings. +Provides details about available MQ broker instance type offerings. Use this data source to discover supported instance types, storage types, and deployment modes for Amazon MQ brokers. ## Example Usage @@ -26,48 +26,49 @@ from imports.aws.data_aws_mq_broker_instance_type_offerings import DataAwsMqBrok class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DataAwsMqBrokerInstanceTypeOfferings(self, "all", + DataAwsMqBrokerInstanceTypeOfferings(self, "activemq", + engine_type="ACTIVEMQ" + ) + DataAwsMqBrokerInstanceTypeOfferings(self, "all") + DataAwsMqBrokerInstanceTypeOfferings(self, "ebs", + storage_type="EBS" + ) + DataAwsMqBrokerInstanceTypeOfferings(self, "filtered", engine_type="ACTIVEMQ", host_instance_type="mq.m5.large", storage_type="EBS" ) - DataAwsMqBrokerInstanceTypeOfferings(self, "empty") - DataAwsMqBrokerInstanceTypeOfferings(self, "engine", - engine_type="ACTIVEMQ" - ) - DataAwsMqBrokerInstanceTypeOfferings(self, "instance", + DataAwsMqBrokerInstanceTypeOfferings(self, "m5", host_instance_type="mq.m5.large" ) - DataAwsMqBrokerInstanceTypeOfferings(self, "storage", - storage_type="EBS" - ) ``` ## Argument Reference -This data source supports the following arguments: +The following arguments are optional: * `engine_type` - (Optional) Filter response by engine type. * `host_instance_type` - (Optional) Filter response by host instance type. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `storage_type` - (Optional) Filter response by storage type. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `broker_instance_options` - Option for host instance type. See Broker Instance Options below. +* `broker_instance_options` - List of broker instance options. See [Broker Instance Options](#broker-instance-options) below. ### Broker Instance Options -* `availability_zones` - List of available AZs. See Availability Zones. below +* `availability_zones` - List of available Availability Zones. See [Availability Zones](#availability-zones) below. * `engine_type` - Broker's engine type. * `host_instance_type` - Broker's instance type. * `storage_type` - Broker's storage type. -* `supported_deployment_modes` - The list of supported deployment modes. -* `supported_engine_versions` - The list of supported engine versions. +* `supported_deployment_modes` - List of supported deployment modes. +* `supported_engine_versions` - List of supported engine versions. ### Availability Zones * `name` - Name of the Availability Zone. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/msk_bootstrap_brokers.html.markdown b/website/docs/cdktf/python/d/msk_bootstrap_brokers.html.markdown index 5093da9ee30a..ff12dfb7000b 100644 --- a/website/docs/cdktf/python/d/msk_bootstrap_brokers.html.markdown +++ b/website/docs/cdktf/python/d/msk_bootstrap_brokers.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_arn` - (Required) ARN of the cluster the nodes belong to. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `bootstrap_brokers_vpc_connectivity_sasl_scram` - A string containing one or more DNS names (or IP addresses) and SASL SCRAM port pairs for VPC connectivity. * `bootstrap_brokers_vpc_connectivity_tls` - A string containing one or more DNS names (or IP addresses) and TLS port pairs for VPC connectivity. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/msk_broker_nodes.html.markdown b/website/docs/cdktf/python/d/msk_broker_nodes.html.markdown index 85cb40ae6117..7d0feb19fc31 100644 --- a/website/docs/cdktf/python/d/msk_broker_nodes.html.markdown +++ b/website/docs/cdktf/python/d/msk_broker_nodes.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_arn` - (Required) ARN of the cluster the nodes belong to. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `endpoints` - Set of endpoints for accessing the broker. This does not include ports * `node_arn` - ARN of the node - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/msk_cluster.html.markdown b/website/docs/cdktf/python/d/msk_cluster.html.markdown index 5ded5ddf3be1..67448f0b07e0 100644 --- a/website/docs/cdktf/python/d/msk_cluster.html.markdown +++ b/website/docs/cdktf/python/d/msk_cluster.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_name` - (Required) Name of the cluster. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `zookeeper_connect_string` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster. The returned values are sorted alphbetically. The AWS API may not return all endpoints, so this value is not guaranteed to be stable across applies. * `zookeeper_connect_string_tls` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster via TLS. The returned values are sorted alphabetically. The AWS API may not return all endpoints, so this value is not guaranteed to be stable across applies. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/msk_configuration.html.markdown b/website/docs/cdktf/python/d/msk_configuration.html.markdown index de87cb089664..7b313b12f02f 100644 --- a/website/docs/cdktf/python/d/msk_configuration.html.markdown +++ b/website/docs/cdktf/python/d/msk_configuration.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the configuration. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `kafka_versions` - List of Apache Kafka versions which can use this configuration. * `server_properties` - Contents of the server.properties file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/msk_kafka_version.html.markdown b/website/docs/cdktf/python/d/msk_kafka_version.html.markdown index 83342b2c2452..461f754ef78f 100644 --- a/website/docs/cdktf/python/d/msk_kafka_version.html.markdown +++ b/website/docs/cdktf/python/d/msk_kafka_version.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `preferred_versions` - (Optional) Ordered list of preferred Kafka versions. The first match in this list will be returned. Either `preferred_versions` or `version` must be set. * `version` - (Optional) Version of MSK Kafka. For example 2.4.1.1 or "2.2.1" etc. Either `preferred_versions` or `version` must be set. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the MSK Kafka version eg. `ACTIVE` or `DEPRECATED`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/msk_vpc_connection.html.markdown b/website/docs/cdktf/python/d/msk_vpc_connection.html.markdown index c4b52550d13f..b3b3b57e8306 100644 --- a/website/docs/cdktf/python/d/msk_vpc_connection.html.markdown +++ b/website/docs/cdktf/python/d/msk_vpc_connection.html.markdown @@ -34,6 +34,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the VPC Connection. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `target_cluster_arn` - The Amazon Resource Name (ARN) of the cluster. * `vpc_id` - The VPC ID of the remote client. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mskconnect_connector.html.markdown b/website/docs/cdktf/python/d/mskconnect_connector.html.markdown index 6efc668de6cf..8c2a11610e7e 100644 --- a/website/docs/cdktf/python/d/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/python/d/mskconnect_connector.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the connector. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags assigned to the resource. * `version` - Current version of the connector. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown index 417f974a09e1..d99ef5c5089a 100644 --- a/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the custom plugin. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - the state of the custom plugin. * `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown index 6267f3eda1d4..aba808760865 100644 --- a/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the worker configuration. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `properties_file_content` - contents of connect-distributed.properties file. * `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/nat_gateway.html.markdown b/website/docs/cdktf/python/d/nat_gateway.html.markdown index 47d72052e174..8022276747b9 100644 --- a/website/docs/cdktf/python/d/nat_gateway.html.markdown +++ b/website/docs/cdktf/python/d/nat_gateway.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the specific NAT Gateway to retrieve. * `subnet_id` - (Optional) ID of subnet that the NAT Gateway resides in. * `vpc_id` - (Optional) ID of the VPC that the NAT Gateway resides in. @@ -98,4 +99,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/nat_gateways.html.markdown b/website/docs/cdktf/python/d/nat_gateways.html.markdown index 898d5b37cf1b..6194e2fc1193 100644 --- a/website/docs/cdktf/python/d/nat_gateways.html.markdown +++ b/website/docs/cdktf/python/d/nat_gateways.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `vpc_id` - (Optional) VPC ID that you want to filter from. * `tags` - (Optional) Map of tags, each pair of which must exactly match @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/neptune_engine_version.html.markdown b/website/docs/cdktf/python/d/neptune_engine_version.html.markdown index 09d8badaebb8..6e559f0977e6 100644 --- a/website/docs/cdktf/python/d/neptune_engine_version.html.markdown +++ b/website/docs/cdktf/python/d/neptune_engine_version.html.markdown @@ -46,6 +46,7 @@ This data source supports the following arguments: * `preferred_major_targets` - (Optional) Ordered list of preferred major engine versions. * `preferred_upgrade_targets` - (Optional) Ordered list of preferred upgrade engine versions. * `preferred_versions` - (Optional) Ordered list of preferred engine versions. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. If both the `version` and `preferred_versions` arguments are not configured, the data source will return the default version for the engine. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `version` - (Optional) Version of the DB engine. For example, `1.0.1.0`, `1.0.2.2`, and `1.0.3.0`. If both the `version` and `preferred_versions` arguments are not configured, the data source will return the default version for the engine. ## Attribute Reference @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `version_actual` - Actual engine version returned by the API. * `version_description` - Description of the database engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/neptune_orderable_db_instance.html.markdown b/website/docs/cdktf/python/d/neptune_orderable_db_instance.html.markdown index 14571598b5b0..710ee98264f8 100644 --- a/website/docs/cdktf/python/d/neptune_orderable_db_instance.html.markdown +++ b/website/docs/cdktf/python/d/neptune_orderable_db_instance.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) DB engine. (Default: `neptune`) * `engine_version` - (Optional) Version of the DB engine. For example, `1.0.1.0`, `1.0.1.2`, `1.0.2.2`, and `1.0.3.0`. * `instance_class` - (Optional) DB instance class. Examples of classes are `db.r5.large`, `db.r5.xlarge`, `db.r4.large`, `db.r5.4xlarge`, `db.r5.12xlarge`, `db.r4.xlarge`, and `db.t3.medium`. @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `supports_performance_insights` - Whether a DB instance supports Performance Insights. * `supports_storage_encryption` - Whether a DB instance supports encrypted storage. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/network_acls.html.markdown b/website/docs/cdktf/python/d/network_acls.html.markdown index 3a3d88488772..e4ccbed48ef3 100644 --- a/website/docs/cdktf/python/d/network_acls.html.markdown +++ b/website/docs/cdktf/python/d/network_acls.html.markdown @@ -88,6 +88,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Optional) VPC ID that you want to filter from. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired network ACLs. @@ -115,4 +116,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/network_interface.html.markdown b/website/docs/cdktf/python/d/network_interface.html.markdown index f5c509a0ab9e..a31d42efda6b 100644 --- a/website/docs/cdktf/python/d/network_interface.html.markdown +++ b/website/docs/cdktf/python/d/network_interface.html.markdown @@ -35,15 +35,17 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `id` – (Optional) Identifier for the network interface. -* `filter` – (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-network-interfaces](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-network-interfaces.html) in the AWS CLI reference. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `id` - (Optional) Identifier for the network interface. +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-network-interfaces](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-network-interfaces.html) in the AWS CLI reference. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the network interface. -* `association` - Association information for an Elastic IP address (IPv4) associated with the network interface. See supported fields below. +* `association` - Association information for an Elastic IP address (IPv4) associated with the network interface. See [association](#association) below. +* `attachment` - Attachment of the ENI. See [attachment](#attachment) below. * `availability_zone` - Availability Zone. * `description` - Description of the network interface. * `interface_type` - Type of interface. @@ -70,10 +72,18 @@ This data source exports the following attributes in addition to the arguments a * `public_dns_name` - Public DNS name. * `public_ip` - Address of the Elastic IP address bound to the network interface. +### `attachment` + +* `attachment_id` - ID of the network interface attachment. +* `device_index` - Device index of the network interface attachment on the instance. +* `instance_id` - ID of the instance. +* `instance_owner_id` - AWS account ID of the owner of the instance. +* `network_card_index` - Index of the network card. + ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/network_interfaces.html.markdown b/website/docs/cdktf/python/d/network_interfaces.html.markdown index 646ca97d94fa..8ebcea51f062 100644 --- a/website/docs/cdktf/python/d/network_interfaces.html.markdown +++ b/website/docs/cdktf/python/d/network_interfaces.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired network interfaces. * `filter` - (Optional) Custom filter block as described below. @@ -116,4 +117,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkfirewall_firewall.html.markdown b/website/docs/cdktf/python/d/networkfirewall_firewall.html.markdown index d5f14e1fe6fe..36c75ee9a732 100644 --- a/website/docs/cdktf/python/d/networkfirewall_firewall.html.markdown +++ b/website/docs/cdktf/python/d/networkfirewall_firewall.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - ARN of the firewall. * `name` - Descriptive name of the firewall. @@ -86,6 +87,9 @@ One or more of these arguments is required. This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the firewall. +* `availability_zone_change_protection` - Indicates whether the firewall is protected against changes to its Availability Zone configuration. +* `availability_zone_mapping` - Set of Availability Zones where the firewall endpoints are created for a transit gateway-attached firewall. + * `availability_zone_id` - The ID of the Availability Zone where the firewall endpoint is located. * `delete_protection` - A flag indicating whether the firewall is protected against deletion. * `description` - Description of the firewall. * `enabled_analysis_types` - Set of types for which to collect analysis metrics. @@ -98,6 +102,8 @@ This data source exports the following attributes in addition to the arguments a * `sync_states` - Set of subnets configured for use by the firewall. * `attachment` - Nested list describing the attachment status of the firewall's association with a single VPC subnet. * `endpoint_id` - The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + * `status` - The current status of the firewall endpoint instantiation in the subnet. + * `status_message` - It populates this with the reason for the error or failure and how to resolve it. A FAILED status indicates a non-recoverable state, and a ERROR status indicates an issue that you can fix. * `subnet_id` - The unique identifier of the subnet that you've specified to be used for a firewall endpoint. * `availability_zone` - The Availability Zone where the subnet is configured. * `capacity_usage_summary` - Aggregated count of all resources used by reference sets in a firewall. @@ -107,6 +113,10 @@ This data source exports the following attributes in addition to the arguments a * `resolved_cidr_count` - Total number of CIDR blocks used by the IP set references in a firewall. * `utilized_cidr_count` - Number of CIDR blocks used by the IP set references in a firewall. * `configuration_sync_state_summary` - Summary of sync states for all availability zones in which the firewall is configured. + * `transit_gateway_attachment_sync_states` - Set of transit gateway configured for use by the firewall. + * `attachment_id` - The unique identifier of the transit gateway attachment. + * `status_message` - A message providing additional information about the current status. + * `transit_gateway_attachment_status` - The current status of the transit gateway attachment. * `id` - ARN that identifies the firewall. * `name` - Descriptive name of the firewall. * `subnet_change_protection` - A flag indicating whether the firewall is protected against changes to the subnet associations. @@ -114,6 +124,8 @@ This data source exports the following attributes in addition to the arguments a * `subnet_id` - The unique identifier for the subnet. * `tags` - Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `update_token` - String token used when updating a firewall. +* `transit_gateway_id` - The unique identifier of the transit gateway associated with this firewall. +* `transit_gateway_owner_account_id` - The AWS account ID that owns the transit gateway. * `vpc_id` - Unique identifier of the VPC where AWS Network Firewall should create the firewall. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkfirewall_firewall_policy.html.markdown b/website/docs/cdktf/python/d/networkfirewall_firewall_policy.html.markdown index 3c9d987659b9..ab3751838915 100644 --- a/website/docs/cdktf/python/d/networkfirewall_firewall_policy.html.markdown +++ b/website/docs/cdktf/python/d/networkfirewall_firewall_policy.html.markdown @@ -80,6 +80,7 @@ AWS Network Firewall does not allow multiple firewall policies with the same nam This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - ARN of the firewall policy. * `name` - Descriptive name of the firewall policy. @@ -97,4 +98,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ram_resource_share [2]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_firewall_policy - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkfirewall_resource_policy.html.markdown b/website/docs/cdktf/python/d/networkfirewall_resource_policy.html.markdown index 21a7ba3ca462..37fc0507011e 100644 --- a/website/docs/cdktf/python/d/networkfirewall_resource_policy.html.markdown +++ b/website/docs/cdktf/python/d/networkfirewall_resource_policy.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) The Amazon Resource Name (ARN) that identifies the resource policy. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_resource_policy - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_connection.html.markdown b/website/docs/cdktf/python/d/networkmanager_connection.html.markdown index 028edca460fb..302e79bfd51e 100644 --- a/website/docs/cdktf/python/d/networkmanager_connection.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_connection.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connection" description: |- - Retrieve information about a connection. + Provides details about an existing Network Manager connection. --- # Data Source: aws_networkmanager_connection -Retrieve information about a connection. +Provides details about an existing Network Manager connection. ## Example Usage @@ -51,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `link_id` - ID of the link for the first device. * `tags` - Key-value tags for the connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_connections.html.markdown b/website/docs/cdktf/python/d/networkmanager_connections.html.markdown index d800f64ee99d..4f46452533d2 100644 --- a/website/docs/cdktf/python/d/networkmanager_connections.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_connections.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connections" description: |- - Retrieve information about connections. + Provides details about existing Network Manager connections. --- # Data Source: aws_networkmanager_connections -Retrieve information about connections. +Provides details about existing Network Manager connections. ## Example Usage @@ -48,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the connections. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown b/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown index e9882bdb511e..47fdcf09d74f 100644 --- a/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown @@ -3,7 +3,7 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_core_network_policy_document" description: |- - Generates an Core Network policy document in JSON format + Generates a Core Network policy document in JSON format --- @@ -223,6 +223,8 @@ The following arguments are available: * `inside_cidr_blocks` (Optional) - The Classless Inter-Domain Routing (CIDR) block range used to create tunnels for AWS Transit Gateway Connect. The format is standard AWS CIDR range (for example, `10.0.1.0/24`). You can optionally define the inside CIDR in the Core Network Edges section per Region. The minimum is a `/24` for IPv4 or `/64` for IPv6. You can provide multiple `/24` subnets or a larger CIDR range. If you define a larger CIDR range, new Core Network Edges will be automatically assigned `/24` and `/64` subnets from the larger CIDR. an Inside CIDR block is required for attaching Connect attachments to a Core Network Edge. * `vpn_ecmp_support` (Optional) - Indicates whether the core network forwards traffic over multiple equal-cost routes using VPN. The value can be either `true` or `false`. The default is `true`. * `edge_locations` (Required) - A block value of AWS Region locations where you're creating Core Network Edges. Detailed below. +* `dns_support` (Optional) - Indicates whether DNS resolution is enabled for the core network. The value can be either `true` or `false`. When set to `true`, DNS resolution is enabled for VPCs attached to the core network, allowing resources in different VPCs to resolve each other's domain names. The default is `true`. +* `security_group_referencing_support` — (Optional) Indicates whether security group referencing is enabled for the core network. The value can be either `true` or `false`. When set to `true`, security groups in one VPC can reference security groups in another VPC attached to the core network, enabling more flexible security configurations across your network. The default is `false`. ### `edge_locations` @@ -281,4 +283,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - Standard JSON policy document rendered based on the arguments above. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_device.html.markdown b/website/docs/cdktf/python/d/networkmanager_device.html.markdown index 2950e68579a3..98eb4e99689f 100644 --- a/website/docs/cdktf/python/d/networkmanager_device.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_device.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_device" description: |- - Retrieve information about a device. + Provides details about an existing Network Manager device. --- # Data Source: aws_networkmanager_device -Retrieve information about a device. +Provides details about an existing Network Manager device. ## Example Usage @@ -24,12 +24,11 @@ from cdktf import TerraformStack # from imports.aws.data_aws_networkmanager_device import DataAwsNetworkmanagerDevice class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, globalNetworkId): + def __init__(self, scope, name): super().__init__(scope, name) DataAwsNetworkmanagerDevice(self, "example", device_id=device_id.string_value, - global_network_id_id=global_network_id.value, - global_network_id=global_network_id + global_network_id=global_network_id.string_value ) ``` @@ -66,4 +65,4 @@ The `location` object supports the following: * `latitude` - Latitude. * `longitude` - Longitude. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_devices.html.markdown b/website/docs/cdktf/python/d/networkmanager_devices.html.markdown index 8dbc1aea273b..1e86a99ee377 100644 --- a/website/docs/cdktf/python/d/networkmanager_devices.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_devices.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_devices" description: |- - Retrieve information about devices. + Provides details about existing Network Manager devices. --- # Data Source: aws_networkmanager_devices -Retrieve information about devices. +Provides details about existing Network Manager devices. ## Example Usage @@ -48,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the devices. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_global_network.html.markdown b/website/docs/cdktf/python/d/networkmanager_global_network.html.markdown index 6bfcecb1d4a0..4ddceb5d1d7e 100644 --- a/website/docs/cdktf/python/d/networkmanager_global_network.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_global_network.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_global_network" description: |- - Retrieve information about a global network. + Provides details about an existing Network Manager global network. --- # Data Source: aws_networkmanager_global_network -Retrieve information about a global network. +Provides details about an existing Network Manager global network. ## Example Usage @@ -45,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the global network. * `tags` - Map of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_global_networks.html.markdown b/website/docs/cdktf/python/d/networkmanager_global_networks.html.markdown index effd5bcc84d9..ab42910ea852 100644 --- a/website/docs/cdktf/python/d/networkmanager_global_networks.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_global_networks.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_global_networks" description: |- - Retrieve information about global networks. + Provides details about existing Network Manager global networks. --- # Data Source: aws_networkmanager_global_networks -Retrieve information about global networks. +Provides details about existing Network Manager global networks. ## Example Usage @@ -45,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the global networks. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_link.html.markdown b/website/docs/cdktf/python/d/networkmanager_link.html.markdown index 2e01f311f2b0..cfc11b59473f 100644 --- a/website/docs/cdktf/python/d/networkmanager_link.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_link.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_link" description: |- - Retrieve information about a link. + Provides details about an existing Network Manager link. --- # Data Source: aws_networkmanager_link -Retrieve information about a link. +Provides details about an existing Network Manager link. ## Example Usage @@ -56,4 +56,4 @@ The `bandwidth` object supports the following: * `download_speed` - Download speed in Mbps. * `upload_speed` - Upload speed in Mbps. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_links.html.markdown b/website/docs/cdktf/python/d/networkmanager_links.html.markdown index 5ace33bf7521..f3bc990cf19a 100644 --- a/website/docs/cdktf/python/d/networkmanager_links.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_links.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_links" description: |- - Retrieve information about links. + Provides details about existing Network Manager links. --- # Data Source: aws_networkmanager_links -Retrieve information about link. +Provides details about existing Network Manager links. ## Example Usage @@ -50,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the links. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_site.html.markdown b/website/docs/cdktf/python/d/networkmanager_site.html.markdown index 752b3f0f62fb..dcba66e00f35 100644 --- a/website/docs/cdktf/python/d/networkmanager_site.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_site.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_site" description: |- - Retrieve information about a site. + Provides details about an existing Network Manager site. --- # Data Source: aws_networkmanager_site -Retrieve information about a site. +Provides details about an existing Network Manager site. ## Example Usage @@ -54,4 +54,4 @@ The `location` object supports the following: * `latitude` - Latitude of the location. * `longitude` - Longitude of the location. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_sites.html.markdown b/website/docs/cdktf/python/d/networkmanager_sites.html.markdown index 8251eb48b832..89f51227592c 100644 --- a/website/docs/cdktf/python/d/networkmanager_sites.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_sites.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_sites" description: |- - Retrieve information about sites. + Provides details about existing Network Manager sites. --- # Data Source: aws_networkmanager_sites -Retrieve information about sites. +Provides details about existing Network Manager sites. ## Example Usage @@ -47,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the sites. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/oam_link.html.markdown b/website/docs/cdktf/python/d/oam_link.html.markdown index b94ec6ab07f0..88c90d04312b 100644 --- a/website/docs/cdktf/python/d/oam_link.html.markdown +++ b/website/docs/cdktf/python/d/oam_link.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `link_identifier` - (Required) ARN of the link. ## Attribute Reference @@ -71,4 +72,4 @@ The `metric_configuration` configuration block supports the following arguments: * `filter` - Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/oam_links.html.markdown b/website/docs/cdktf/python/d/oam_links.html.markdown index 36ce819eb277..4b4d6571fd62 100644 --- a/website/docs/cdktf/python/d/oam_links.html.markdown +++ b/website/docs/cdktf/python/d/oam_links.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -41,4 +43,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARN of the Links. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/oam_sink.html.markdown b/website/docs/cdktf/python/d/oam_sink.html.markdown index a68c91647ef2..acd25a58b799 100644 --- a/website/docs/cdktf/python/d/oam_sink.html.markdown +++ b/website/docs/cdktf/python/d/oam_sink.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sink_identifier` - (Required) ARN of the sink. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `sink_id` - Random ID string that AWS generated as part of the sink ARN. * `tags` - Tags assigned to the sink. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/oam_sinks.html.markdown b/website/docs/cdktf/python/d/oam_sinks.html.markdown index dea17c0b0b98..7ea263169d0b 100644 --- a/website/docs/cdktf/python/d/oam_sinks.html.markdown +++ b/website/docs/cdktf/python/d/oam_sinks.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -41,4 +43,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARN of the Sinks. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_cloud_autonomous_vm_cluster.html.markdown b/website/docs/cdktf/python/d/odb_cloud_autonomous_vm_cluster.html.markdown new file mode 100644 index 000000000000..8cc3284ecfa4 --- /dev/null +++ b/website/docs/cdktf/python/d/odb_cloud_autonomous_vm_cluster.html.markdown @@ -0,0 +1,99 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_cluster" +page_title: "AWS: aws_odb_cloud_autonomous_vm_cluster" +description: |- + Terraform data source for managing cloud autonomous vm cluster resource in AWS for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_cloud_autonomous_vm_cluster + +Terraform data source for managing cloud autonomous vm cluster resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_odb_cloud_autonomous_vm_cluster import DataAwsOdbCloudAutonomousVmCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbCloudAutonomousVmCluster(self, "example", + id="example" + ) +``` + +## Argument Reference + +The following arguments are optional: + +* `id` - (Required) The unique identifier of the cloud autonomous vm cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `cloud_exadata_infrastructure_id` - Cloud exadata infrastructure id associated with this cloud autonomous VM cluster. +* `autonomous_data_storage_percentage` - The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster. +* `autonomous_data_storage_size_in_tbs` - The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. +* `available_autonomous_data_storage_size_in_tbs` - The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB. +* `available_container_databases` - The number of Autonomous CDBs that you can create with the currently available storage. +* `available_cpus` - The number of CPU cores available for allocation to Autonomous Databases. +* `compute_model` - The compute model of the Autonomous VM cluster: ECPU or OCPU. +* `cpu_core_count` - The total number of CPU cores in the Autonomous VM cluster. +* `cpu_core_count_per_node` - The number of CPU cores enabled per node in the Autonomous VM cluster. +* `cpu_percentage` - he percentage of total CPU cores currently in use in the Autonomous VM cluster. +* `created_at` - The date and time when the Autonomous VM cluster was created. +* `data_storage_size_in_gbs` - The total data storage allocated to the Autonomous VM cluster, in GB. +* `data_storage_size_in_tbs` - The total data storage allocated to the Autonomous VM cluster, in TB. +* `odb_node_storage_size_in_gbs` - The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB). +* `db_servers` - The list of database servers associated with the Autonomous VM cluster. +* `description` - The user-provided description of the Autonomous VM cluster. +* `display_name` - The display name of the Autonomous VM cluster. +* `domain` - The domain name of the Autonomous VM cluster. +* `exadata_storage_in_tbs_lowest_scaled_value` - The minimum value to which you can scale down the Exadata storage, in TB. +* `hostname` - The hostname of the Autonomous VM cluster. +* `is_mtls_enabled_vm_cluster` - Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. +* `license_model` - The Oracle license model that applies to the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. +* `max_acds_lowest_scaled_value` - The minimum value to which you can scale down the maximum number of Autonomous CDBs. +* `memory_per_oracle_compute_unit_in_gbs` - The amount of memory allocated per Oracle Compute Unit, in GB. +* `memory_size_in_gbs` - The total amount of memory allocated to the Autonomous VM cluster, in gigabytes (GB). +* `node_count` - The number of database server nodes in the Autonomous VM cluster. +* `non_provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can't be provisioned because of resource constraints. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `oci_url` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `odb_network_id` - The unique identifier of the ODB network associated with this Autonomous VM cluster. +* `percent_progress` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster. +* `provisioned_autonomous_container_databases` - The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster. +* `provisioned_cpus` - The number of CPU cores currently provisioned in the Autonomous VM cluster. +* `reclaimable_cpus` - The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases. +* `reserved_cpus` - The number of CPU cores reserved for system operations and redundancy. +* `scan_listener_port_non_tls` - The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. +* `scan_listener_port_tls` - The SCAN listener port for TLS (TCP) protocol. The default is 2484. +* `shape` - The shape of the Exadata infrastructure for the Autonomous VM cluster. +* `status` - The status of the Autonomous VM cluster. +* `status_reason` - Additional information about the current status of the Autonomous VM cluster. +* `time_database_ssl_certificate_expires` - The expiration date and time of the database SSL certificate. +* `time_ords_certificate_expires` - The expiration date and time of the Oracle REST Data Services (ORDS)certificate. +* `time_zone` - The time zone of the Autonomous VM cluster. +* `total_container_databases` - The total number of Autonomous Container Databases that can be created with the allocated local storage. +* `tags` - A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `maintenance_window` - The maintenance window for the Autonomous VM cluster. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_cloud_exadata_infrastructure.html.markdown b/website/docs/cdktf/python/d/odb_cloud_exadata_infrastructure.html.markdown new file mode 100644 index 000000000000..ac19df1a8b12 --- /dev/null +++ b/website/docs/cdktf/python/d/odb_cloud_exadata_infrastructure.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_exadata_infrastructure" +page_title: "AWS: aws_odb_cloud_exadata_infrastructure" +description: |- + Terraform data source for managing exadata infrastructure resource in AWS for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_cloud_exadata_infrastructure + +Terraform data source for exadata infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_odb_cloud_exadata_infrastructure import DataAwsOdbCloudExadataInfrastructure +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbCloudExadataInfrastructure(self, "example", + id="example" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `activated_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `additional_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `availability_zone` - The name of the Availability Zone (AZ) where the Exadata infrastructure is located. +* `availability_zone_id` - The AZ ID of the AZ where the Exadata infrastructure is located. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `id` - The unique identifier of the Exadata infrastructure. +* `compute_count` - The number of database servers for the Exadata infrastructure. +* `cpu_count` - The total number of CPU cores that are allocated to the Exadata infrastructure. +* `data_storage_size_in_tbs` - The size of the Exadata infrastructure's data disk group, in terabytes (TB). +* `db_node_storage_size_in_gbs` - The size of the storage available on each database node, in gigabytes (GB). +* `db_server_version` - The version of the Exadata infrastructure. +* `display_name` - The display name of the Exadata infrastructure. +* `last_maintenance_run_id` - The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure. +* `max_cpu_count` - The total number of CPU cores available on the Exadata infrastructure. +* `max_data_storage_in_tbs` - The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure. +* `max_db_node_storage_size_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure. +* `max_memory_in_gbs` - The total amount of memory, in gigabytes (GB), that's available on the Exadata infrastructure. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure. +* `monthly_db_server_version` - The monthly software version of the database servers installed on the Exadata infrastructure. +* `monthly_storage_server_version` - The monthly software version of the storage servers installed on the Exadata infrastructure. +* `next_maintenance_run_id` - The OCID of the next maintenance run for the Exadata infrastructure. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the Exadata infrastructure. +* `oci_url` - The HTTPS link to the Exadata infrastructure in OCI. +* `ocid` - The OCID of the Exadata infrastructure in OCI. +* `percent_progress` - The amount of progress made on the current operation on the Exadata infrastructure expressed as a percentage. +* `shape` - The model name of the Exadata infrastructure. +* `status` - The status of the Exadata infrastructure. +* `status_reason` - Additional information about the status of the Exadata infrastructure. +* `storage_count` - The number of storage servers that are activated for the Exadata infrastructure. +* `storage_server_version` - The software version of the storage servers on the Exadata infrastructure. +* `total_storage_size_in_gbs` - The total amount of storage, in gigabytes (GB), on the Exadata infrastructure. +* `compute_model` - The OCI compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `created_at` - The time when the Exadata infrastructure was created. +* `database_server_type` - The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. +* `storage_server_type` - The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. +* `maintenance_window` - The scheduling details of the maintenance window. Patching and system updates take place during the maintenance window. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_cloud_vm_cluster.html.markdown b/website/docs/cdktf/python/d/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..be8488b57169 --- /dev/null +++ b/website/docs/cdktf/python/d/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,92 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform data source for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_cloud_vm_cluster + +Terraform data source for Exadata Infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsOdbDbServersList +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbDbServersList(self, "example", + cloud_exadata_infrastructure_id="example-id" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `cloud_exadata_infrastructure_id` - The ID of the Cloud Exadata Infrastructure. +* `cluster_name` - The name of the Grid Infrastructure (GI) cluster. +* `cpu_core_count` - The number of CPU cores enabled on the VM cluster. +* `data_storage_size_in_tbs` - The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster. +* `db_servers` - The list of database servers for the VM cluster. +* `disk_redundancy` - The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy. +* `display_name` - The display name of the VM cluster. +* `domain` - The domain name of the VM cluster. +* `gi_version` - The software version of the Oracle Grid Infrastructure (GI) for the VM cluster. +* `hostname_prefix_computed` - The computed hostname prefix for the VM cluster. +* `is_local_backup_enabled` - Indicates whether database backups to local Exadata storage is enabled for the VM cluster. +* `is_sparse_disk_group_enabled` - Indicates whether the VM cluster is configured with a sparse disk group. +* `last_update_history_entry_id` - The Oracle Cloud ID (OCID) of the last maintenance update history entry. +* `license_model` - The Oracle license model applied to the VM cluster. +* `listener_port` - The port number configured for the listener on the VM cluster. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated for the VM cluster. +* `node_count` - The number of nodes in the VM cluster. +* `ocid` - The OCID of the VM cluster. +* `oci_resource_anchor_name` - The name of the OCI Resource Anchor. +* `oci_url` - The HTTPS link to the VM cluster in OCI. +* `odb_network_id` - The ID of the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the VM cluster, expressed as a percentage. +* `scan_dns_name` - The FQDN of the DNS record for the Single Client Access Name (SCAN) IP addresses that are associated with the VM cluster. +* `scan_dns_record_id` - The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster. +* `scan_ip_ids` - The OCID of the SCAN IP addresses that are associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure that's running the VM cluster. +* `ssh_public_keys` - The public key portion of one or more key pairs used for SSH access to the VM cluster. +* `status` - The status of the VM cluster. +* `status_reason` - Additional information about the status of the VM cluster. +* `storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster. +* `system_version` - The operating system version of the image chosen for the VM cluster. +* `timezone` - The time zone of the VM cluster. +* `vip_ids` - The virtual IP (VIP) addresses that are associated with the VM cluster. Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for each node in the VM cluster to enable failover. If one node fails, the VIP is reassigned to another active node in the cluster. +* `created_at` - The time when the VM cluster was created. +* `compute_model` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `data_collection_options` - The set of diagnostic collection options enabled for the VM cluster. +* `iorm_config_cache` - The ExadataIormConfig cache details for the VM cluster. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_db_node.html.markdown b/website/docs/cdktf/python/d/odb_db_node.html.markdown new file mode 100644 index 000000000000..fa2a1143f663 --- /dev/null +++ b/website/docs/cdktf/python/d/odb_db_node.html.markdown @@ -0,0 +1,82 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_node" +page_title: "AWS: aws_odb_db_node" +description: |- + Terraform data source for managing db node linked to cloud vm cluster of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_node + +Terraform data source for manging db nodes linked to cloud vm cluster of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsOdbDbNode +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbDbNode(self, "example", + cloud_vm_cluster_id="cloud_vm_cluster_id", + id="db_node_id" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_vm_cluster_id` - (Required) The unique identifier of the cloud vm cluster. +* `id` - (Required) The unique identifier of db node associated with vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `cloud_vm_cluster_id` - The ID of the cloud VM cluster. +* `status` - The current status of the DB node. +* `status_reason` - Additional information about the status of the DB node. +* `additional_details` - Additional information about the planned maintenance. +* `backup_ip_id` - The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node. +* `backup_vnic2_id` - The OCID of the second backup VNIC. +* `backup_vnic_id` - The OCID of the backup VNIC. +* `cpu_core_count` - The number of CPU cores enabled on the DB node. +* `db_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), allocated on the DB node. +* `db_server_id` - The unique identifier of the DB server that is associated with the DB node. +* `db_system_id` - The OCID of the DB system. +* `fault_domain` - The name of the fault domain the instance is contained in. +* `host_ip_id` - The OCID of the host IP address that's associated with the DB node. +* `hostname` - The host name for the DB node. +* `ocid` - The OCID of the DB node. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the DB node. +* `maintenance_type` - The type of database node maintenance. Either VMDB_REBOOT_MIGRATION or EXADBXS_REBOOT_MIGRATION. +* `memory_size_in_gbs` - The allocated memory in GBs on the DB node. +* `software_storage_size_in_gbs` - The size (in GB) of the block storage volume allocation for the DB system. +* `created_at` - The date and time when the DB node was created. +* `time_maintenance_window_end` - The end date and time of the maintenance window. +* `time_maintenance_window_start` - The start date and time of the maintenance window. +* `total_cpu_core_count` - The total number of CPU cores reserved on the DB node. +* `vnic2_id` - The OCID of the second VNIC. +* `vnic_id` - The OCID of the VNIC. +* `private_ip_address` - The private IP address assigned to the DB node. +* `floating_ip_address` - The floating IP address assigned to the DB node. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_db_nodes.html.markdown b/website/docs/cdktf/python/d/odb_db_nodes.html.markdown new file mode 100644 index 000000000000..77fb6e420bbd --- /dev/null +++ b/website/docs/cdktf/python/d/odb_db_nodes.html.markdown @@ -0,0 +1,83 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_nodes" +page_title: "AWS: aws_odb_db_nodes" +description: |- + Terraform data source for managing db nodes linked to cloud vm cluster of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_nodes + +Terraform data source for manging db nodes linked to cloud vm cluster of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsOdbDbNodes +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbDbNodes(self, "example", + cloud_vm_cluster_id="example" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_vm_cluster_id` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `db_nodes` - The list of DB nodes along with their properties. + +### db_nodes + +* `additional_details` - Additional information about the planned maintenance. +* `backup_ip_id` - The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node. +* `backup_vnic_2_id` - The OCID of the second backup virtual network interface card (VNIC) for the DB node. +* `backup_vnic_id` - The OCID of the backup VNIC for the DB node. +* `cpu_core_count` - The number of CPU cores enabled on the DB node. +* `created_at` - The date and time when the DB node was created. +* `db_node_arn` - The Amazon Resource Name (ARN) of the DB node. +* `db_node_id` - The unique identifier of the DB node. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated on the DB node. +* `db_server_id` - The unique identifier of the database server that's associated with the DB node. +* `db_system_id` - The OCID of the DB system. +* `fault_domain` - The name of the fault domain where the DB node is located. +* `host_ip_id` - The OCID of the host IP address that's associated with the DB node. +* `hostname` - The host name for the DB node. +* `maintenance_type` - The type of maintenance the DB node is undergoing. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated on the DB node. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the DB node. +* `ocid` - The OCID of the DB node. +* `software_storage_size_in_gb` - The size of the block storage volume, in gigabytes (GB), that's allocated for the DB system. This attribute applies only for virtual machine DB systems. +* `status` - The current status of the DB node. +* `status_reason` - Additional information about the status of the DB node. +* `time_maintenance_window_end` - The end date and time of the maintenance window. +* `time_maintenance_window_start` - The start date and time of the maintenance window. +* `total_cpu_core_count` - The total number of CPU cores reserved on the DB node. +* `vnic_2_id` - The OCID of the second VNIC. +* `vnic_id` - The OCID of the VNIC. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_db_server.html.markdown b/website/docs/cdktf/python/d/odb_db_server.html.markdown new file mode 100644 index 000000000000..57492995e3b3 --- /dev/null +++ b/website/docs/cdktf/python/d/odb_db_server.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_server" +page_title: "AWS: aws_odb_db_server" +description: |- + Terraform data source for managing db server linked to exadata infrastructure of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_server + +Terraform data source for manging db server linked to exadata infrastructure of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsOdbDbServer +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbDbServer(self, "example", + cloud_exadata_infrastructure_id="exadata_infra_id", + id="db_server_id" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the cloud vm cluster. +* `id` - (Required) The unique identifier of db node associated with vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `autonomous_virtual_machine_ids` - The list of unique identifiers for the Autonomous VMs associated with this database server. +* `autonomous_vm_cluster_ids` - The OCID of the autonomous VM clusters that are associated with the database server. +* `compute_model` - The compute model of the database server. +* `status` - The status of the database server. +* `status_reason` - Additional information about the current status of the database server. +* `cpu_core_count` - The number of CPU cores enabled on the database server. +* `db_node_storage_size_in_gbs` - The allocated local node storage in GBs on the database server. +* `db_server_patching_details` - The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window. +* `display_name` - The display name of the database server. +* `exadata_infrastructure_id` - The exadata infrastructure ID of the database server. +* `ocid` - The OCID of the database server to retrieve information about. +* `oci_resource_anchor_name` - The name of the OCI resource anchor. +* `max_cpu_count` - The total number of CPU cores available. +* `max_db_node_storage_in_gbs` - The total local node storage available in GBs. +* `max_memory_in_gbs` - The total memory available in GBs. +* `memory_size_in_gbs` - The allocated memory in GBs on the database server. +* `shape` - The shape of the database server. The shape determines the amount of CPU, storage, and memory resources available. +* `created_at` - The date and time when the database server was created. +* `vm_cluster_ids` - The OCID of the VM clusters that are associated with the database server. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_db_servers.html.markdown b/website/docs/cdktf/python/d/odb_db_servers.html.markdown new file mode 100644 index 000000000000..48f7963d30cd --- /dev/null +++ b/website/docs/cdktf/python/d/odb_db_servers.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_servers" +page_title: "AWS: aws_odb_db_servers" +description: |- + Terraform data source for managing db servers linked to exadata infrastructure of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_servers + +Terraform data source for manging db servers linked to exadata infrastructure of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsOdbDbServers +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbDbServers(self, "example", + cloud_exadata_infrastructure_id="exadata_infra_id" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `db_servers` - the list of DB servers along with their properties. + +### db_servers + +* `autonomous_virtual_machine_ids` - A list of unique identifiers for the Autonomous VMs. +* `autonomous_vm_cluster_ids` - A list of identifiers for the Autonomous VM clusters. +* `compute_model` - The OCI compute model used when you create or clone an instance: **ECPU** or **OCPU**. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers, while OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `cpu_core_count` - The number of CPU cores enabled on the database server. +* `created_at` - The date and time when the database server was created. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated on the database server. +* `db_server_id` - The unique identifier of the database server. +* `db_server_patching_details` - The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window. +* `display_name` - The user-friendly name of the database server. The name doesn't need to be unique. +* `exadata_infrastructure_id` - The ID of the Exadata infrastructure that hosts the database server. +* `max_cpu_count` - The total number of CPU cores available on the database server. +* `max_db_node_storage_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the database server. +* `max_memory_in_gbs` - The total amount of memory, in gigabytes (GB), that's available on the database server. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated on the database server. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the database server. +* `ocid` - The OCID of the database server. +* `shape` - The hardware system model of the Exadata infrastructure that the database server is hosted on. The shape determines the amount of CPU, storage, and memory resources available. +* `status` - The current status of the database server. +* `status_reason` - Additional information about the status of the database server. +* `vm_cluster_ids` - The IDs of the VM clusters that are associated with the database server. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_network.html.markdown b/website/docs/cdktf/python/d/odb_network.html.markdown new file mode 100644 index 000000000000..62be78e581a6 --- /dev/null +++ b/website/docs/cdktf/python/d/odb_network.html.markdown @@ -0,0 +1,71 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network" +page_title: "AWS: aws_odb_network" +description: |- + Terraform data source to retrieve odb network for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_network + +Terraform data source for to retrieve network resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_odb_network import DataAwsOdbNetwork +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbNetwork(self, "example", + id="example" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) Unique identifier of the odb network resource. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `display_name` - Display name for the network resource. +* `availability_zone_id` - The AZ ID of the AZ where the ODB network is located. +* `availability_zone` - The availability zone where the ODB network is located. +* `backup_subnet_cidr` - The CIDR range of the backup subnet for the ODB network. +* `client_subnet_cidr` - The CIDR notation for the network resource. +* `custom_domain_name` - The name of the custom domain that the network is located. +* `default_dns_prefix` - The default DNS prefix for the network resource. +* `oci_network_anchor_id` - The unique identifier of the OCI network anchor for the ODB network. +* `oci_network_anchor_url` - The URL of the OCI network anchor for the ODB network. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the ODB network. +* `oci_vcn_id` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `oci_vcn_url` - The URL of the OCI VCN for the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the ODB network, expressed as a percentage. +* `peered_cidrs` - The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation. +* `status` - The status of the network resource. +* `status_reason` - Additional information about the current status of the ODB network. +* `created_at` - The date and time when the ODB network was created. +* `managed_services` - The managed services configuration for the ODB network. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/odb_network_peering_connection.html.markdown b/website/docs/cdktf/python/d/odb_network_peering_connection.html.markdown new file mode 100644 index 000000000000..f31c364d307a --- /dev/null +++ b/website/docs/cdktf/python/d/odb_network_peering_connection.html.markdown @@ -0,0 +1,63 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connection" +page_title: "AWS: aws_odb_network_peering_connection" +description: |- + Terraform data source for managing oracle database network peering resource in AWS. +--- + + + +# Data Source: aws_odb_network_peering_connection + +Terraform data source for managing oracle database network peering resource in AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_odb_network_peering_connection import DataAwsOdbNetworkPeeringConnection +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsOdbNetworkPeeringConnection(self, "example", + id="example" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `display_name` - Display name of the ODB network peering connection. +* `status` - Status of the ODB network peering connection. +* `status_reason` - Status of the ODB network peering connection. +* `odb_network_arn` - ARN of the ODB network peering connection. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `peer_network_arn` - ARN of the peer network peering connection. +* `odb_peering_connection_type` - Type of the ODB peering connection. +* `created_at` - Created time of the ODB network peering connection. +* `percent_progress` - Progress of the ODB network peering connection. +* `tags` - Tags applied to the resource. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearch_domain.html.markdown b/website/docs/cdktf/python/d/opensearch_domain.html.markdown index b4ecdcbfcc80..fe6db76c8448 100644 --- a/website/docs/cdktf/python/d/opensearch_domain.html.markdown +++ b/website/docs/cdktf/python/d/opensearch_domain.html.markdown @@ -35,18 +35,19 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `domain_name` – (Required) Name of the domain. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `domain_name` - (Required) Name of the domain. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `access_policies` – Policy document attached to the domain. +* `access_policies` - Policy document attached to the domain. * `advanced_options` - Key-value string pairs to specify advanced configuration options. * `advanced_security_options` - Status of the OpenSearch domain's advanced security options. The block consists of the following attributes: * `enabled` - Whether advanced security is enabled. * `internal_user_database_enabled` - Whether the internal user database is enabled. -* `arn` – ARN of the domain. +* `arn` - ARN of the domain. * `auto_tune_options` - Configuration of the Auto-Tune options of the domain. * `desired_state` - Auto-Tune desired state for the domain. * `maintenance_schedule` - A list of the nested configurations for the Auto-Tune maintenance windows of the domain. @@ -83,26 +84,25 @@ This data source exports the following attributes in addition to the arguments a * `user_pool_id` - Cognito User pool used by the domain. * `identity_pool_id` - Cognito Identity pool used by the domain. * `role_arn` - IAM Role with the AmazonOpenSearchServiceCognitoAccess policy attached. -* `created` – Status of the creation of the domain. +* `created` - Status of the creation of the domain. * `dashboard_endpoint` - Domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). * `dashboard_endpoint_v2` - V2 domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html) -* `deleted` – Status of the deletion of the domain. +* `deleted` - Status of the deletion of the domain. * `domain_endpoint_v2_hosted_zone_id` - Dual stack hosted zone ID for the domain. -* `domain_id` – Unique identifier for the domain. +* `domain_id` - Unique identifier for the domain. * `ebs_options` - EBS Options for the instances in the domain. * `ebs_enabled` - Whether EBS volumes are attached to data nodes in the domain. * `throughput` - The throughput (in MiB/s) of the EBS volumes attached to data nodes. * `volume_type` - Type of EBS volumes attached to data nodes. * `volume_size` - Size of EBS volumes attached to data nodes (in GB). * `iops` - Baseline input/output (I/O) performance of EBS volumes attached to data nodes. -* `engine_version` – OpenSearch version for the domain. +* `engine_version` - OpenSearch version for the domain. * `encryption_at_rest` - Domain encryption at rest related options. * `enabled` - Whether encryption at rest is enabled in the domain. * `kms_key_id` - KMS key id used to encrypt data at rest. -* `endpoint` – Domain-specific endpoint used to submit index, search, and data upload requests. +* `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. * `endpoint_v2` - V2 domain-specific endpoint that works with both IPv4 and IPv6 addresses, used to submit index, search, and data upload requests. * `ip_address_type` - Type of IP addresses supported by the endpoint for the domain. -* `kibana_endpoint` - (**Deprecated**) Domain-specific endpoint for kibana without https scheme. Use the `dashboard_endpoint` attribute instead. * `log_publishing_options` - Domain log publishing related options. * `log_type` - Type of OpenSearch log being published. * `cloudwatch_log_group_arn` - CloudWatch Log Group where the logs are published. @@ -115,7 +115,7 @@ This data source exports the following attributes in addition to the arguments a * `window_start_time` - 10h window for updates * `hours` - Starting hour of the 10-hour window for updates * `minutes` - Starting minute of the 10-hour window for updates -* `processing` – Status of a configuration change in the domain. +* `processing` - Status of a configuration change in the domain. * `snapshot_options` – Domain snapshot related options. * `automated_snapshot_start_hour` - Hour during which the service takes an automated daily snapshot of the indices in the domain. * `software_update_options` - Software update options for the domain @@ -127,4 +127,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - Subnets used by the domain. * `vpc_id` - VPC used by the domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearchserverless_access_policy.html.markdown b/website/docs/cdktf/python/d/opensearchserverless_access_policy.html.markdown index cfe670da820d..eaeb6aa37644 100644 --- a/website/docs/cdktf/python/d/opensearchserverless_access_policy.html.markdown +++ b/website/docs/cdktf/python/d/opensearchserverless_access_policy.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy. * `type` - (Required) Type of access policy. Must be `data`. @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - JSON policy document to use as the content for the new policy. * `policy_version` - Version of the policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearchserverless_collection.html.markdown b/website/docs/cdktf/python/d/opensearchserverless_collection.html.markdown index 09c1a88403ef..eafe5d07ed6d 100644 --- a/website/docs/cdktf/python/d/opensearchserverless_collection.html.markdown +++ b/website/docs/cdktf/python/d/opensearchserverless_collection.html.markdown @@ -37,11 +37,12 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: -~> Exactly one of `id` or `name` is required. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the collection. * `name` - (Optional) Name of the collection. +~> Exactly one of `id` or `name` is required. + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags to assign to the collection. * `type` - Type of collection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearchserverless_lifecycle_policy.html.markdown b/website/docs/cdktf/python/d/opensearchserverless_lifecycle_policy.html.markdown index 54b62b51a155..f0e1161f1d12 100644 --- a/website/docs/cdktf/python/d/opensearchserverless_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/python/d/opensearchserverless_lifecycle_policy.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy * `type` - (Required) Type of lifecycle policy. Must be `retention`. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - JSON policy document to use as the content for the new policy. * `policy_version` - Version of the policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearchserverless_security_config.html.markdown b/website/docs/cdktf/python/d/opensearchserverless_security_config.html.markdown index 40f0470e14a7..dc135d0821a7 100644 --- a/website/docs/cdktf/python/d/opensearchserverless_security_config.html.markdown +++ b/website/docs/cdktf/python/d/opensearchserverless_security_config.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) The unique identifier of the security configuration. ## Attribute Reference @@ -59,4 +60,4 @@ SAML options for the security configuration. * `session_timeout` - Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. * `user_attribute` - User attribute for this SAML integration. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearchserverless_security_policy.html.markdown b/website/docs/cdktf/python/d/opensearchserverless_security_policy.html.markdown index 61cf7b401398..8301048febbb 100644 --- a/website/docs/cdktf/python/d/opensearchserverless_security_policy.html.markdown +++ b/website/docs/cdktf/python/d/opensearchserverless_security_policy.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy * `type` - (Required) Type of security policy. One of `encryption` or `network`. @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - The JSON policy document without any whitespaces. * `policy_version` - Version of the policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearchserverless_vpc_endpoint.html.markdown b/website/docs/cdktf/python/d/opensearchserverless_vpc_endpoint.html.markdown index 21ffcb6be0a7..3163ffa3c0f7 100644 --- a/website/docs/cdktf/python/d/opensearchserverless_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/opensearchserverless_vpc_endpoint.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_id` - (Required) The unique identifier of the endpoint. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - The IDs of the subnets from which you access OpenSearch Serverless. * `vpc_id` - The ID of the VPC from which you access OpenSearch Serverless. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/organizations_resource_tags.html.markdown b/website/docs/cdktf/python/d/organizations_resource_tags.html.markdown index eb44688fde7f..8ca74e84b813 100644 --- a/website/docs/cdktf/python/d/organizations_resource_tags.html.markdown +++ b/website/docs/cdktf/python/d/organizations_resource_tags.html.markdown @@ -41,10 +41,10 @@ This data source supports the following arguments: You can specify any of the following taggable resources. -* AWS account – specify the account ID number. -* Organizational unit – specify the OU ID that begins with `ou-` and looks similar to: `ou-1a2b-34uvwxyz` -* Root – specify the root ID that begins with `r-` and looks similar to: `r-1a2b` -* Policy – specify the policy ID that begins with `p-` and looks similar to: `p-12abcdefg3` +* AWS account - specify the account ID number. +* Organizational unit - specify the OU ID that begins with `ou-` and looks similar to: `ou-1a2b-34uvwxyz` +* Root - specify the root ID that begins with `r-` and looks similar to: `r-1a2b` +* Policy - specify the policy ID that begins with `p-` and looks similar to: `p-12abcdefg3` ## Attribute Reference @@ -52,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of key=value pairs for each tag set on the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_asset.html.markdown b/website/docs/cdktf/python/d/outposts_asset.html.markdown index 3aeb4c1da7d3..4b82a492f37a 100644 --- a/website/docs/cdktf/python/d/outposts_asset.html.markdown +++ b/website/docs/cdktf/python/d/outposts_asset.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) Outpost ARN. * `asset_id` - (Required) ID of the asset. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `rack_elevation` - Position of an asset in a rack measured in rack units. * `rack_id` - Rack ID of the asset. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_assets.html.markdown b/website/docs/cdktf/python/d/outposts_assets.html.markdown index 9a6238c21cf0..1afab98a69b8 100644 --- a/website/docs/cdktf/python/d/outposts_assets.html.markdown +++ b/website/docs/cdktf/python/d/outposts_assets.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) Outpost ARN. * `host_id_filter` - (Optional) Filters by list of Host IDs of a Dedicated Host. * `status_id_filter` - (Optional) Filters by list of state status. Valid values: "ACTIVE", "RETIRING". @@ -87,4 +88,4 @@ This data source exports the following attributes in addition to the arguments a * `asset_ids` - List of all the asset ids found. This data source will fail if none are found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_outpost.html.markdown b/website/docs/cdktf/python/d/outposts_outpost.html.markdown index ec1dc3ef1fe3..aacf7d16dd4a 100644 --- a/website/docs/cdktf/python/d/outposts_outpost.html.markdown +++ b/website/docs/cdktf/python/d/outposts_outpost.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) Identifier of the Outpost. * `name` - (Optional) Name of the Outpost. * `arn` - (Optional) ARN. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `supported_hardware_type` - The hardware type. * `tags` - The Outpost tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_outpost_instance_type.html.markdown b/website/docs/cdktf/python/d/outposts_outpost_instance_type.html.markdown index a1ec05fdc675..8388aa28201b 100644 --- a/website/docs/cdktf/python/d/outposts_outpost_instance_type.html.markdown +++ b/website/docs/cdktf/python/d/outposts_outpost_instance_type.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_type` - (Optional) Desired instance type. Conflicts with `preferred_instance_types`. * `preferred_instance_types` - (Optional) Ordered list of preferred instance types. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. Conflicts with `instance_type`. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Outpost identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_outpost_instance_types.html.markdown b/website/docs/cdktf/python/d/outposts_outpost_instance_types.html.markdown index 0c450229637b..6f8e3a33ce7f 100644 --- a/website/docs/cdktf/python/d/outposts_outpost_instance_types.html.markdown +++ b/website/docs/cdktf/python/d/outposts_outpost_instance_types.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) Outpost ARN. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `instance_types` - Set of instance types. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_outposts.html.markdown b/website/docs/cdktf/python/d/outposts_outposts.html.markdown index f4306c4771cf..88b24c66a795 100644 --- a/website/docs/cdktf/python/d/outposts_outposts.html.markdown +++ b/website/docs/cdktf/python/d/outposts_outposts.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Optional) Availability Zone name. * `availability_zone_id` - (Optional) Availability Zone identifier. * `site_id` - (Optional) Site identifier. @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_site.html.markdown b/website/docs/cdktf/python/d/outposts_site.html.markdown index b19459475cc7..4ebf5fce0ad5 100644 --- a/website/docs/cdktf/python/d/outposts_site.html.markdown +++ b/website/docs/cdktf/python/d/outposts_site.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) Identifier of the Site. * `name` - (Optional) Name of the Site. @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `account_id` - AWS Account identifier. * `description` - Description. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/outposts_sites.html.markdown b/website/docs/cdktf/python/d/outposts_sites.html.markdown index 4b3b8c50d352..cb3a682fe1ce 100644 --- a/website/docs/cdktf/python/d/outposts_sites.html.markdown +++ b/website/docs/cdktf/python/d/outposts_sites.html.markdown @@ -31,7 +31,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -40,4 +42,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of Outposts Site identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/polly_voices.html.markdown b/website/docs/cdktf/python/d/polly_voices.html.markdown index c78bd1694b57..e8fde77924ba 100644 --- a/website/docs/cdktf/python/d/polly_voices.html.markdown +++ b/website/docs/cdktf/python/d/polly_voices.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) Engine used by Amazon Polly when processing input text for speech synthesis. Valid values are `standard`, `neural`, and `long-form`. * `include_additional_language_codes` - (Optional) Whether to return any bilingual voices that use the specified language as an additional language. * `language_code` - (Optional) Language identification tag for filtering the list of voices returned. If not specified, all available voices are returned. @@ -77,4 +78,4 @@ See the [AWS Polly Voice documentation](https://docs.aws.amazon.com/polly/latest * `name` - Name of the voice. * `supported_engines` - Specifies which engines are supported by a given voice. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/prefix_list.html.markdown b/website/docs/cdktf/python/d/prefix_list.html.markdown index 9de2870d8933..7d0cb850f74a 100644 --- a/website/docs/cdktf/python/d/prefix_list.html.markdown +++ b/website/docs/cdktf/python/d/prefix_list.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prefix_list_id` - (Optional) ID of the prefix list to select. * `name` - (Optional) Name of the prefix list to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -120,4 +121,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/prometheus_default_scraper_configuration.html.markdown b/website/docs/cdktf/python/d/prometheus_default_scraper_configuration.html.markdown index 20242ab9f19b..c0a0c208c9bd 100644 --- a/website/docs/cdktf/python/d/prometheus_default_scraper_configuration.html.markdown +++ b/website/docs/cdktf/python/d/prometheus_default_scraper_configuration.html.markdown @@ -32,7 +32,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -40,4 +42,4 @@ This data source exports the following attributes in addition to the arguments a * `configuration` - The configuration file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/prometheus_workspace.html.markdown b/website/docs/cdktf/python/d/prometheus_workspace.html.markdown index 527177c094fc..2ae80fde2222 100644 --- a/website/docs/cdktf/python/d/prometheus_workspace.html.markdown +++ b/website/docs/cdktf/python/d/prometheus_workspace.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workspace_id` - (Required) Prometheus workspace ID. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the Prometheus workspace. * `tags` - Tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/prometheus_workspaces.html.markdown b/website/docs/cdktf/python/d/prometheus_workspaces.html.markdown index 3b281a496246..60074c22df31 100644 --- a/website/docs/cdktf/python/d/prometheus_workspaces.html.markdown +++ b/website/docs/cdktf/python/d/prometheus_workspaces.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias_prefix` - (Optional) Limits results to workspaces with aliases that begin with this value. ## Attribute Reference @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - List of ARNs of the matched Prometheus workspaces. * `workspace_ids` - List of workspace IDs of the matched Prometheus workspaces. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/qldb_ledger.html.markdown b/website/docs/cdktf/python/d/qldb_ledger.html.markdown index 305ccf1870ad..3a20c5c0ef61 100644 --- a/website/docs/cdktf/python/d/qldb_ledger.html.markdown +++ b/website/docs/cdktf/python/d/qldb_ledger.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Friendly name of the ledger to match. ## Attribute Reference @@ -44,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a See the [QLDB Ledger Resource](/docs/providers/aws/r/qldb_ledger.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/quicksight_analysis.html.markdown b/website/docs/cdktf/python/d/quicksight_analysis.html.markdown index 61e4082dc9dc..d1e9d1c6fbad 100644 --- a/website/docs/cdktf/python/d/quicksight_analysis.html.markdown +++ b/website/docs/cdktf/python/d/quicksight_analysis.html.markdown @@ -38,7 +38,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: * `analysis_id` - (Required) Identifier for the analysis. -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a See the [Analysis Resource](/docs/providers/aws/r/quicksight_analysis.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/quicksight_data_set.html.markdown b/website/docs/cdktf/python/d/quicksight_data_set.html.markdown index 949227ab5ae7..e876f35376df 100644 --- a/website/docs/cdktf/python/d/quicksight_data_set.html.markdown +++ b/website/docs/cdktf/python/d/quicksight_data_set.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `data_set_id` - (Required) Identifier for the data set. -* `aws_account_id` - (Optional) AWS account ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a See the [Data Set Resource](/docs/providers/aws/r/quicksight_data_set.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/quicksight_group.html.markdown b/website/docs/cdktf/python/d/quicksight_group.html.markdown index b7c945ee817c..d963834e80d8 100644 --- a/website/docs/cdktf/python/d/quicksight_group.html.markdown +++ b/website/docs/cdktf/python/d/quicksight_group.html.markdown @@ -43,8 +43,9 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) QuickSight namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - The group description. * `principal_id` - The principal ID of the group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/quicksight_theme.html.markdown b/website/docs/cdktf/python/d/quicksight_theme.html.markdown index eec05ca078f4..595ad5b8d278 100644 --- a/website/docs/cdktf/python/d/quicksight_theme.html.markdown +++ b/website/docs/cdktf/python/d/quicksight_theme.html.markdown @@ -41,7 +41,8 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - AWS account ID. +* `aws_account_id` - AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -131,4 +132,4 @@ This data source exports the following attributes in addition to the arguments a * `warning` - Color (hexadecimal) that applies to warning and informational messages. * `warning_foreground` - Color (hexadecimal) that applies to any text or other elements that appear over the warning color. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/quicksight_user.html.markdown b/website/docs/cdktf/python/d/quicksight_user.html.markdown index 81c34aceeedb..3100d77c6635 100644 --- a/website/docs/cdktf/python/d/quicksight_user.html.markdown +++ b/website/docs/cdktf/python/d/quicksight_user.html.markdown @@ -43,8 +43,9 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) QuickSight namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -52,6 +53,7 @@ This data source exports the following attributes in addition to the arguments a * `active` - The active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an Active Directory user, that user is inactive until they sign in and provide a password. * `arn` - The Amazon Resource Name (ARN) for the user. +* `custom_permissions_name` - The custom permissions profile associated with this user. * `email` - The user's email address. * `identity_type` - The type of identity authentication used by the user. * `principal_id` - The principal ID of the user. @@ -60,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a - `AUTHOR`: A user who can create data sources, datasets, analyzes, and dashboards. - `ADMIN`: A user who is an author, who can also manage Amazon QuickSight settings. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ram_resource_share.html.markdown b/website/docs/cdktf/python/d/ram_resource_share.html.markdown index 03ac47da2710..623a66ad6b27 100644 --- a/website/docs/cdktf/python/d/ram_resource_share.html.markdown +++ b/website/docs/cdktf/python/d/ram_resource_share.html.markdown @@ -60,10 +60,11 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name of the resource share to retrieve. * `resource_owner` (Required) Owner of the resource share. Valid values are `SELF` or `OTHER-ACCOUNTS`. * `resource_share_status` (Optional) Specifies that you want to retrieve details of only those resource shares that have this status. Valid values are `PENDING`, `ACTIVE`, `FAILED`, `DELETING`, and `DELETED`. -* `filter` - (Optional) Filter used to scope the list e.g., by tags. See [related docs] (https://docs.aws.amazon.com/ram/latest/APIReference/API_TagFilter.html). +* `filter` - (Optional) Filter used to scope the list of owned shares e.g., by tags. See [related docs] (https://docs.aws.amazon.com/ram/latest/APIReference/API_TagFilter.html). * `name` - (Required) Name of the tag key to filter on. * `values` - (Required) Value of the tag key. @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the resource share. * `tags` - Tags attached to the resource share. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_certificate.html.markdown b/website/docs/cdktf/python/d/rds_certificate.html.markdown index e1cd22829f87..a1ed29d6c453 100644 --- a/website/docs/cdktf/python/d/rds_certificate.html.markdown +++ b/website/docs/cdktf/python/d/rds_certificate.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) Certificate identifier. For example, `rds-ca-2019`. * `default_for_new_launches` - (Optional) When enabled, returns the default certificate for new RDS instances. * `latest_valid_till` - (Optional) When enabled, returns the certificate with the latest `ValidTill`. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `valid_from` - [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of certificate starting validity date. * `valid_till` - [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of certificate ending validity date. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_cluster.html.markdown b/website/docs/cdktf/python/d/rds_cluster.html.markdown index e8df0047b506..ba6f6dbfaa82 100644 --- a/website/docs/cdktf/python/d/rds_cluster.html.markdown +++ b/website/docs/cdktf/python/d/rds_cluster.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required) Cluster identifier of the RDS cluster. ## Attribute Reference @@ -46,4 +47,4 @@ returned attributes - they are identical for all attributes, except the `tags_al * `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_cluster_parameter_group.html.markdown b/website/docs/cdktf/python/d/rds_cluster_parameter_group.html.markdown index 67ea1f378b57..b2bfe9cba53a 100644 --- a/website/docs/cdktf/python/d/rds_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/python/d/rds_cluster_parameter_group.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) DB cluster parameter group name. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `family` - Family of the cluster parameter group. * `description` - Description of the cluster parameter group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_clusters.html.markdown b/website/docs/cdktf/python/d/rds_clusters.html.markdown index 885ca2d5515f..10bf8f04b3f7 100644 --- a/website/docs/cdktf/python/d/rds_clusters.html.markdown +++ b/website/docs/cdktf/python/d/rds_clusters.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration block @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `cluster_arns` - Set of cluster ARNs of the matched RDS clusters. * `cluster_identifiers` - Set of ARNs of cluster identifiers of the matched RDS clusters. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_engine_version.html.markdown b/website/docs/cdktf/python/d/rds_engine_version.html.markdown index fd2b83015f1d..286546763e1a 100644 --- a/website/docs/cdktf/python/d/rds_engine_version.html.markdown +++ b/website/docs/cdktf/python/d/rds_engine_version.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_only` - (Optional) Whether the engine version must be an AWS-defined default version. Some engines have multiple default versions, such as for each major version. Using `default_only` may help avoid `multiple RDS engine versions` errors. See also `latest`. * `filter` - (Optional) One or more name/value pairs to use in filtering versions. There are several valid keys; for a full reference, check out [describe-db-engine-versions in the AWS CLI reference](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-engine-versions.html). * `has_major_target` - (Optional) Whether the engine version must have one or more major upgrade targets. Not including `has_major_target` or setting it to `false` doesn't imply that there's no corresponding major upgrade target for the engine version. @@ -106,4 +107,4 @@ This data source exports the following attributes in addition to the arguments a * `version_actual` - Complete engine version. * `version_description` - Description of the engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_orderable_db_instance.html.markdown b/website/docs/cdktf/python/d/rds_orderable_db_instance.html.markdown index 7fe1d9900c43..4628c0f82c07 100644 --- a/website/docs/cdktf/python/d/rds_orderable_db_instance.html.markdown +++ b/website/docs/cdktf/python/d/rds_orderable_db_instance.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone_group` - (Optional) Availability zone group. * `engine_latest_version` - (Optional) When set to `true`, the data source attempts to return the most recent version matching the other criteria you provide. You must use `engine_latest_version` with `preferred_instance_classes` and/or `preferred_engine_versions`. Using `engine_latest_version` will avoid `multiple RDS DB Instance Classes` errors. If you use `engine_latest_version` with `preferred_instance_classes`, the data source returns the latest version for the _first_ matching instance class (instance class priority). **Note:** The data source uses a best-effort approach at selecting the latest version but due to the complexity of version identifiers across engines, using `engine_latest_version` may _not_ return the latest version in every situation. * `engine_version` - (Optional) Version of the DB engine. If none is provided, the data source tries to use the AWS-defined default version that matches any other criteria. @@ -99,4 +100,4 @@ This data source exports the following attributes in addition to the arguments a * `multi_az_capable` - Whether a DB instance is Multi-AZ capable. * `outpost_capable` - Whether a DB instance supports RDS on Outposts. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/rds_reserved_instance_offering.html.markdown b/website/docs/cdktf/python/d/rds_reserved_instance_offering.html.markdown index 484da7e115b6..a556aedfe959 100644 --- a/website/docs/cdktf/python/d/rds_reserved_instance_offering.html.markdown +++ b/website/docs/cdktf/python/d/rds_reserved_instance_offering.html.markdown @@ -39,11 +39,12 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_instance_class` - (Required) DB instance class for the reserved DB instance. * `duration` - (Required) Duration of the reservation in years or seconds. Valid values are `1`, `3`, `31536000`, `94608000` * `multi_az` - (Required) Whether the reservation applies to Multi-AZ deployments. * `offering_type` - (Required) Offering type of this reserved DB instance. Valid values are `No Upfront`, `Partial Upfront`, `All Upfront`. -* `product_description` - (Required) Description of the reserved DB instance. +* `product_description` - (Required) Description of the reserved DB instance. Example values are `postgresql`, `aurora-postgresql`, `mysql`, `aurora-mysql`, `mariadb`. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `fixed_price` - Fixed price charged for this reserved DB instance. * `offering_id` - Unique identifier for the reservation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshift_cluster.html.markdown b/website/docs/cdktf/python/d/redshift_cluster.html.markdown index 4c4841440f0a..a0ed5cba1119 100644 --- a/website/docs/cdktf/python/d/redshift_cluster.html.markdown +++ b/website/docs/cdktf/python/d/redshift_cluster.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required) Cluster identifier ## Attribute Reference @@ -108,4 +109,4 @@ Cluster nodes (for `cluster_nodes`) support the following attributes: * `private_ip_address` - Private IP address of a node within a cluster * `public_ip_address` - Public IP address of a node within a cluster - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshift_cluster_credentials.html.markdown b/website/docs/cdktf/python/d/redshift_cluster_credentials.html.markdown index 07ca23941af6..316715bef9fc 100644 --- a/website/docs/cdktf/python/d/redshift_cluster_credentials.html.markdown +++ b/website/docs/cdktf/python/d/redshift_cluster_credentials.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_create` - (Optional) Create a database user with the name specified for the user named in `db_user` if one does not exist. * `cluster_identifier` - (Required) Unique identifier of the cluster that contains the database for which your are requesting credentials. * `db_name` - (Optional) Name of a database that DbUser is authorized to log on to. If `db_name` is not specified, `db_user` can log on to any existing database. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `db_password` - Temporary password that authorizes the user name returned by `db_user` to log on to the database `db_name`. * `expiration` - Date and time the password in `db_password` expires. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshift_data_shares.html.markdown b/website/docs/cdktf/python/d/redshift_data_shares.html.markdown index 1cf5986e5dcc..602dac39a8c3 100644 --- a/website/docs/cdktf/python/d/redshift_data_shares.html.markdown +++ b/website/docs/cdktf/python/d/redshift_data_shares.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -48,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `managed_by` - Identifier of a datashare to show its managing entity. * `producer_arn` - ARN (Amazon Resource Name) of the producer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshift_orderable_cluster.html.markdown b/website/docs/cdktf/python/d/redshift_orderable_cluster.html.markdown index 60ce882a6573..1c133fd17cce 100644 --- a/website/docs/cdktf/python/d/redshift_orderable_cluster.html.markdown +++ b/website/docs/cdktf/python/d/redshift_orderable_cluster.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_type` - (Optional) Reshift Cluster typeE.g., `multi-node` or `single-node` * `cluster_version` - (Optional) Redshift Cluster versionE.g., `1.0` * `node_type` - (Optional) Redshift Cluster node typeE.g., `dc2.8xlarge` @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `availability_zones` - List of Availability Zone names where the Redshift Cluster is available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshift_producer_data_shares.html.markdown b/website/docs/cdktf/python/d/redshift_producer_data_shares.html.markdown index b27959cc2b56..b384002e42bc 100644 --- a/website/docs/cdktf/python/d/redshift_producer_data_shares.html.markdown +++ b/website/docs/cdktf/python/d/redshift_producer_data_shares.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `status` - (Optional) Status of a datashare in the producer. Valid values are `ACTIVE`, `AUTHORIZED`, `PENDING_AUTHORIZATION`, `DEAUTHORIZED`, and `REJECTED`. Omit this argument to return all statuses. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `managed_by` - Identifier of a datashare to show its managing entity. * `producer_arn` - ARN (Amazon Resource Name) of the producer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshift_subnet_group.html.markdown b/website/docs/cdktf/python/d/redshift_subnet_group.html.markdown index f1e6b883d56f..9f3245ed6053 100644 --- a/website/docs/cdktf/python/d/redshift_subnet_group.html.markdown +++ b/website/docs/cdktf/python/d/redshift_subnet_group.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster subnet group for which information is requested. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - An array of VPC subnet IDs. * `tags` - Tags associated to the Subnet Group - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshiftserverless_credentials.html.markdown b/website/docs/cdktf/python/d/redshiftserverless_credentials.html.markdown index 41d0d342d44e..e5edecccb9c3 100644 --- a/website/docs/cdktf/python/d/redshiftserverless_credentials.html.markdown +++ b/website/docs/cdktf/python/d/redshiftserverless_credentials.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workgroup_name` - (Required) The name of the workgroup associated with the database. * `db_name` - (Optional) The name of the database to get temporary authorization to log on to. * `duration_seconds` - (Optional) The number of seconds until the returned temporary password expires. The minimum is 900 seconds, and the maximum is 3600 seconds. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `db_user` - A database user name that is authorized to log on to the database `db_name` using the password `db_password` . If the specified `db_user` exists in the database, the new user name has the same database privileges as the user named in `db_user` . By default, the user is added to PUBLIC. the user doesn't exist in the database. * `expiration` - Date and time the password in `db_password` expires. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshiftserverless_namespace.html.markdown b/website/docs/cdktf/python/d/redshiftserverless_namespace.html.markdown index caf96f668c48..4cbb14327b88 100644 --- a/website/docs/cdktf/python/d/redshiftserverless_namespace.html.markdown +++ b/website/docs/cdktf/python/d/redshiftserverless_namespace.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namespace_name` - (Required) The name of the namespace. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `log_exports` - The types of logs the namespace can export. Available export types are `userlog`, `connectionlog`, and `useractivitylog`. * `namespace_id` - The Redshift Namespace ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/redshiftserverless_workgroup.html.markdown b/website/docs/cdktf/python/d/redshiftserverless_workgroup.html.markdown index 5449fabc09ce..91df488241ce 100644 --- a/website/docs/cdktf/python/d/redshiftserverless_workgroup.html.markdown +++ b/website/docs/cdktf/python/d/redshiftserverless_workgroup.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workgroup_name` - (Required) The name of the workgroup associated with the database. ## Attribute Reference @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `private_ip_address` - The IPv4 address of the network interface within the subnet. * `subnet_id` - The unique identifier of the subnet. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/region.html.markdown b/website/docs/cdktf/python/d/region.html.markdown index 11a856e8a09c..e2015e3f70f5 100644 --- a/website/docs/cdktf/python/d/region.html.markdown +++ b/website/docs/cdktf/python/d/region.html.markdown @@ -3,24 +3,24 @@ subcategory: "Meta Data Sources" layout: "aws" page_title: "AWS: aws_region" description: |- - Provides details about a specific service region + Provides details about a specific AWS Region --- # Data Source: aws_region -`aws_region` provides details about a specific AWS region. +`aws_region` provides details about a specific AWS Region. -As well as validating a given region name this resource can be used to -discover the name of the region configured within the provider. The latter +As well as validating a given Region name this resource can be used to +discover the name of the Region configured within the provider. The latter can be useful in a child module which is inheriting an AWS provider configuration from its parent module. ## Example Usage The following example shows how the resource might be used to obtain -the name of the AWS region configured on the provider. +the name of the AWS Region configured on the provider. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -41,8 +41,9 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `name` - (Optional) Full name of the region to select. +* `region` - (Optional) Full name of the region to select (e.g. `us-east-1`), and the region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint` - (Optional) EC2 endpoint of the region to select. +* `name` - (Optional, **Deprecated**) Full name of the region to select. Use `region` instead. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Region's description in this format: "Location (Region name)". - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/resourceexplorer2_search.html.markdown b/website/docs/cdktf/python/d/resourceexplorer2_search.html.markdown index 7dcf927c4d5f..73c91d668177 100644 --- a/website/docs/cdktf/python/d/resourceexplorer2_search.html.markdown +++ b/website/docs/cdktf/python/d/resourceexplorer2_search.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `view_arn` - (Optional) Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the AWS Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a `401 Unauthorized` exception. ## Attribute Reference @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `last_reported_at` - The date and time that the information about this resource property was last updated. * `name` - Name of this property of the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/resourcegroupstaggingapi_resources.html.markdown b/website/docs/cdktf/python/d/resourcegroupstaggingapi_resources.html.markdown index 3b7bc7e9cfbb..02225782837c 100644 --- a/website/docs/cdktf/python/d/resourcegroupstaggingapi_resources.html.markdown +++ b/website/docs/cdktf/python/d/resourcegroupstaggingapi_resources.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclude_compliant_resources` - (Optional) Specifies whether to exclude resources that are compliant with the tag policy. You can use this parameter only if the `include_compliance_details` argument is also set to `true`. * `include_compliance_details` - (Optional) Specifies whether to include details regarding the compliance with the effective tag policy. * `tag_filter` - (Optional) Specifies a list of Tag Filters (keys and values) to restrict the output to only those resources that have the specified tag and, if included, the specified value. See [Tag Filter](#tag-filter) below. Conflicts with `resource_arn_list`. @@ -105,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a * `resource_arn` - ARN of the resource. * `tags` - Map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route.html.markdown b/website/docs/cdktf/python/d/route.html.markdown index e8716ae8a7a7..058ee4938422 100644 --- a/website/docs/cdktf/python/d/route.html.markdown +++ b/website/docs/cdktf/python/d/route.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `route_table_id` - (Required) ID of the specific Route Table containing the Route entry. * `carrier_gateway_id` - (Optional) EC2 Carrier Gateway ID of the Route belonging to the Route Table. * `core_network_arn` - (Optional) Core network ARN of the Route belonging to the Route Table. @@ -78,4 +79,4 @@ This data source exports no additional attributes. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_endpoint.html.markdown b/website/docs/cdktf/python/d/route53_resolver_endpoint.html.markdown index 089385147531..e04d3251cea5 100644 --- a/website/docs/cdktf/python/d/route53_resolver_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_endpoint.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolver_endpoint_id` - (Optional) ID of the Route53 Resolver Endpoint. * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out @@ -77,4 +78,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_Filter.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_firewall_config.html.markdown b/website/docs/cdktf/python/d/route53_resolver_firewall_config.html.markdown index 72f4675d5036..629a3b9f28d1 100644 --- a/website/docs/cdktf/python/d/route53_resolver_firewall_config.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_firewall_config.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) The ID of the VPC from Amazon VPC that the configuration is for. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - The ID of the firewall configuration. * `owner_id` - The Amazon Web Services account ID of the owner of the VPC that this firewall configuration applies to. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_firewall_domain_list.html.markdown b/website/docs/cdktf/python/d/route53_resolver_firewall_domain_list.html.markdown index b9fa352e6adc..1d233b534a67 100644 --- a/website/docs/cdktf/python/d/route53_resolver_firewall_domain_list.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_firewall_domain_list.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewall_domain_list_id` - (Required) The ID of the domain list. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The status of the domain list. * `status_message` - Additional information about the status of the list, if available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group.html.markdown b/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group.html.markdown index c5d332c1609f..7c63b8025747 100644 --- a/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewall_rule_group_id` - (Required) The ID of the rule group. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The status of the rule group. * `status_message` - Additional information about the status of the rule group, if available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group_association.html.markdown b/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group_association.html.markdown index ff23e5c9dd4c..ce2aed2d1de5 100644 --- a/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group_association.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_firewall_rule_group_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewall_rule_group_association_id` - (Required) The identifier for the association. ## Attribute Reference @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `status_message` - Additional information about the status of the response, if available. * `vpc_id` - The unique identifier of the VPC that is associated with the rule group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_firewall_rules.html.markdown b/website/docs/cdktf/python/d/route53_resolver_firewall_rules.html.markdown index 80fb0ca053e4..963c404cb60d 100644 --- a/website/docs/cdktf/python/d/route53_resolver_firewall_rules.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_firewall_rules.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewall_rule_group_id` - (Required) The unique identifier of the firewall rule group that you want to retrieve the rules for. * `action` - (Optional) The action that DNS Firewall should take on a DNS query when it matches one of the domains in the rule's domain list. * `priority` - (Optional) The setting that determines the processing order of the rules in a rule group. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `modification_time` - The date and time that the rule was last modified, in Unix time format and Coordinated Universal Time (UTC). * `name` - The name of the rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_query_log_config.html.markdown b/website/docs/cdktf/python/d/route53_resolver_query_log_config.html.markdown index f6fde4af86df..8198b48a1291 100644 --- a/website/docs/cdktf/python/d/route53_resolver_query_log_config.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_query_log_config.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolver_query_log_config_id` - (Optional) ID of the Route53 Resolver Query Logging Configuration. * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_Filter.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_rule.html.markdown b/website/docs/cdktf/python/d/route53_resolver_rule.html.markdown index d35f39c3e3cf..086806242b89 100644 --- a/website/docs/cdktf/python/d/route53_resolver_rule.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_rule.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Optional) Domain name the desired resolver rule forwards DNS queries for. Conflicts with `resolver_rule_id`. * `name` - (Optional) Friendly name of the desired resolver rule. Conflicts with `resolver_rule_id`. * `resolver_endpoint_id` (Optional) ID of the outbound resolver endpoint of the desired resolver rule. Conflicts with `resolver_rule_id`. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a Values are `NOT_SHARED`, `SHARED_BY_ME` or `SHARED_WITH_ME` * `tags` - Map of tags assigned to the resolver rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_resolver_rules.html.markdown b/website/docs/cdktf/python/d/route53_resolver_rules.html.markdown index 345b93437f73..69dab524ee67 100644 --- a/website/docs/cdktf/python/d/route53_resolver_rules.html.markdown +++ b/website/docs/cdktf/python/d/route53_resolver_rules.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name_regex` - (Optional) Regex string to filter resolver rule names. The filtering is done locally, so could have a performance impact if the result is large. This argument should be used along with other arguments to limit the number of results returned. @@ -95,4 +96,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `resolver_rule_ids` - IDs of the matched resolver rules. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_traffic_policy_document.html.markdown b/website/docs/cdktf/python/d/route53_traffic_policy_document.html.markdown index 75ce14767a81..fb52551ff6b1 100644 --- a/website/docs/cdktf/python/d/route53_traffic_policy_document.html.markdown +++ b/website/docs/cdktf/python/d/route53_traffic_policy_document.html.markdown @@ -35,10 +35,10 @@ class MyConvertedCode(TerraformStack): endpoint=[DataAwsRoute53TrafficPolicyDocumentEndpoint( id="my_elb", type="elastic-load-balancer", - value="elb-111111.${" + current.name + "}.elb.amazonaws.com" + value="elb-111111.${" + current.region + "}.elb.amazonaws.com" ), DataAwsRoute53TrafficPolicyDocumentEndpoint( id="site_down_banner", - region=Token.as_string(current.name), + region=Token.as_string(current.region), type="s3-website", value="www.example.com" ) @@ -222,4 +222,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - Standard JSON policy document rendered based on the arguments above. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53profiles_profiles.html.markdown b/website/docs/cdktf/python/d/route53profiles_profiles.html.markdown index 7fca436aae59..2793dc8c5fcd 100644 --- a/website/docs/cdktf/python/d/route53profiles_profiles.html.markdown +++ b/website/docs/cdktf/python/d/route53profiles_profiles.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the Profile. * `share_status` - Share status of the Profile. Valid values [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53profiles_Profile.html) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route_table.html.markdown b/website/docs/cdktf/python/d/route_table.html.markdown index 764d991e01cc..fe1e47aa9e73 100644 --- a/website/docs/cdktf/python/d/route_table.html.markdown +++ b/website/docs/cdktf/python/d/route_table.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. * `gateway_id` - (Optional) ID of an Internet Gateway or Virtual Private Gateway which is connected to the Route Table (not exported if not passed as a parameter). * `route_table_id` - (Optional) ID of the specific Route Table to retrieve. @@ -113,4 +114,4 @@ Associations are also exported with the following attributes: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route_tables.html.markdown b/website/docs/cdktf/python/d/route_tables.html.markdown index dca13d379521..aaa65e009450 100644 --- a/website/docs/cdktf/python/d/route_tables.html.markdown +++ b/website/docs/cdktf/python/d/route_tables.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `vpc_id` - (Optional) VPC ID that you want to filter from. * `tags` - (Optional) Map of tags, each pair of which must exactly match @@ -84,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_access_point.html.markdown b/website/docs/cdktf/python/d/s3_access_point.html.markdown new file mode 100644 index 000000000000..d20f8b51fb85 --- /dev/null +++ b/website/docs/cdktf/python/d/s3_access_point.html.markdown @@ -0,0 +1,63 @@ +--- +subcategory: "S3 Control" +layout: "aws" +page_title: "AWS: aws_s3_access_point" +description: |- + Provides details about a specific S3 access point +--- + + + +# Data Source: aws_s3_access_point + +Provides details about a specific S3 access point. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_s3_access_point import DataAwsS3AccessPoint +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsS3AccessPoint(self, "example", + name="example-access-point" + ) +``` + +## Argument Reference + +This data source supports the following arguments: + +* `account_id` - (Optional) AWS account ID for the account that owns the specified access point. +* `name` - (Required) Name of the access point. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `alias` - Access point alias. +* `arn` - Access point ARN. +* `bucket` - Name of the bucket associated with the access point. +* `bucket_account_id` - AWS account ID associated with the S3 bucket associated with the access point. +* `data_source_id` - Unique identifier for the data source of the access point. +* `data_source_type` - Type of the data source that the access point is attached to. +* `endpoints` - VPC endpoint for the access point. +* `network_origin` - Indicates whether the access point allows access from the public Internet. +* `public_access_block_configuration` - `PublicAccessBlock` configuration for the access point. + * `block_public_acls` - Whether Amazon S3 blocks public ACLs for buckets in this account. + * `block_public_policy` - Whether Amazon S3 blocks public bucket policies for buckets in this account. + * `ignore_public_acls` - Whether Amazon S3 ignores public ACLs for buckets in this account. + * `restrict_public_buckets` - Whether Amazon S3 restricts public bucket policies for buckets in this account. +* `tags` - Tags assigned to the access point. +* `vpc_configuration` - VPC configuration for the access point. + * `vpc_id` - Access point will only allow connections from this VPC. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_bucket.html.markdown b/website/docs/cdktf/python/d/s3_bucket.html.markdown index 951e4819f9be..283036a89b6d 100644 --- a/website/docs/cdktf/python/d/s3_bucket.html.markdown +++ b/website/docs/cdktf/python/d/s3_bucket.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket ## Attribute Reference @@ -95,10 +96,10 @@ This data source exports the following attributes in addition to the arguments a * `id` - Name of the bucket. * `arn` - ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucket_domain_name` - Bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. +* `bucket_region` - AWS region this bucket resides in. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name. Please refer to the [S3 endpoints reference](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) for format. Note: AWS CloudFront allows specifying an S3 region-specific endpoint when creating an S3 origin. This will prevent redirect issues from CloudFront to the S3 Origin URL. For more information, see the [Virtual Hosted-Style Requests for Other Regions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#deprecated-global-endpoint) section in the AWS S3 User Guide. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `region` - AWS region this bucket resides in. * `website_endpoint` - Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. * `website_domain` - Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_bucket_object.html.markdown b/website/docs/cdktf/python/d/s3_bucket_object.html.markdown index 7325c779877d..fb1ff696827c 100644 --- a/website/docs/cdktf/python/d/s3_bucket_object.html.markdown +++ b/website/docs/cdktf/python/d/s3_bucket_object.html.markdown @@ -96,6 +96,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `key` - (Required) Full path to the object inside the bucket * `version_id` - (Optional) Specific version ID of the object returned (defaults to latest version) @@ -129,4 +130,4 @@ This data source exports the following attributes in addition to the arguments a -> **Note:** Terraform ignores all leading `/`s in the object's `key` and treats multiple `/`s in the rest of the object's `key` as a single `/`, so values of `/index.html` and `index.html` correspond to the same S3 object as do `first//second///third//` and `first/second/third/`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_bucket_objects.html.markdown b/website/docs/cdktf/python/d/s3_bucket_objects.html.markdown index d0c86c94504c..26ae864e9903 100644 --- a/website/docs/cdktf/python/d/s3_bucket_objects.html.markdown +++ b/website/docs/cdktf/python/d/s3_bucket_objects.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Lists object keys in this S3 bucket. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `prefix` - (Optional) Limits results to object keys with this prefix (Default: none) * `delimiter` - (Optional) Character used to group keys (Default: none) @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - S3 Bucket. * `owners` - List of strings representing object owner IDs (see `fetch_owner` above) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_bucket_policy.html.markdown b/website/docs/cdktf/python/d/s3_bucket_policy.html.markdown index 592ebc2ddc2a..ded8f0fc6282 100644 --- a/website/docs/cdktf/python/d/s3_bucket_policy.html.markdown +++ b/website/docs/cdktf/python/d/s3_bucket_policy.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Bucket name. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - IAM bucket policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_directory_buckets.html.markdown b/website/docs/cdktf/python/d/s3_directory_buckets.html.markdown index d6740a7b30a8..868f850f853e 100644 --- a/website/docs/cdktf/python/d/s3_directory_buckets.html.markdown +++ b/website/docs/cdktf/python/d/s3_directory_buckets.html.markdown @@ -31,7 +31,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -40,4 +42,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Bucket ARNs. * `buckets` - Buckets names. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_object.html.markdown b/website/docs/cdktf/python/d/s3_object.html.markdown index cdcc97768e86..12873c00b04d 100644 --- a/website/docs/cdktf/python/d/s3_object.html.markdown +++ b/website/docs/cdktf/python/d/s3_object.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `checksum_mode` - (Optional) To retrieve the object's checksum, this argument must be `ENABLED`. If you enable `checksum_mode` and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `ENABLED` * `key` - (Required) Full path to the object inside the bucket @@ -135,4 +136,4 @@ This data source exports the following attributes in addition to the arguments a -> **Note:** Terraform ignores all leading `/`s in the object's `key` and treats multiple `/`s in the rest of the object's `key` as a single `/`, so values of `/index.html` and `index.html` correspond to the same S3 object as do `first//second///third//` and `first/second/third/`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3_objects.html.markdown b/website/docs/cdktf/python/d/s3_objects.html.markdown index f6cbcc45720e..968c3cf8e7c7 100644 --- a/website/docs/cdktf/python/d/s3_objects.html.markdown +++ b/website/docs/cdktf/python/d/s3_objects.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Lists object keys in this S3 bucket. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `prefix` - (Optional) Limits results to object keys with this prefix (Default: none) * `delimiter` - (Optional) Character used to group keys (Default: none) @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `owners` - List of strings representing object owner IDs (see `fetch_owner` above) * `request_charged` - If present, indicates that the requester was successfully charged for the request. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/s3control_multi_region_access_point.html.markdown b/website/docs/cdktf/python/d/s3control_multi_region_access_point.html.markdown index 28808cbf3448..ab9cb5039c66 100644 --- a/website/docs/cdktf/python/d/s3control_multi_region_access_point.html.markdown +++ b/website/docs/cdktf/python/d/s3control_multi_region_access_point.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID of the S3 Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `name` - (Required) The name of the Multi-Region Access Point. @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `bucket_account_id` - The AWS account ID that owns the bucket. * `region` - The name of the region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sagemaker_prebuilt_ecr_image.html.markdown b/website/docs/cdktf/python/d/sagemaker_prebuilt_ecr_image.html.markdown index b95abf490985..e4d784d2d3c9 100644 --- a/website/docs/cdktf/python/d/sagemaker_prebuilt_ecr_image.html.markdown +++ b/website/docs/cdktf/python/d/sagemaker_prebuilt_ecr_image.html.markdown @@ -43,7 +43,7 @@ This data source supports the following arguments: * `repository_name` - (Required) Name of the repository, which is generally the algorithm or library. Values include `autogluon-inference`, `autogluon-training`, `blazingtext`, `djl-inference`, `factorization-machines`, `forecasting-deepar`, `huggingface-pytorch-inference`, `huggingface-pytorch-inference-neuron`, `huggingface-pytorch-inference-neuronx`, `huggingface-pytorch-tgi-inference`, `huggingface-pytorch-training`, `huggingface-pytorch-training-neuronx`, `huggingface-pytorch-trcomp-training`, `huggingface-tensorflow-inference`, `huggingface-tensorflow-training`, `huggingface-tensorflow-trcomp-training`, `image-classification`, `image-classification-neo`, `ipinsights`, `kmeans`, `knn`, `lda`, `linear-learner`, `mxnet-inference`, `mxnet-inference-eia`, `mxnet-training`, `ntm`, `object-detection`, `object2vec`, `pca`, `pytorch-inference`, `pytorch-inference-eia`, `pytorch-inference-graviton`, `pytorch-inference-neuronx`, `pytorch-training`, `pytorch-training-neuronx`, `pytorch-trcomp-training`, `randomcutforest`, `sagemaker-base-python`, `sagemaker-chainer`, `sagemaker-clarify-processing`, `sagemaker-data-wrangler-container`, `sagemaker-debugger-rules`, `sagemaker-geospatial-v1-0`, `sagemaker-inference-mxnet`, `sagemaker-inference-pytorch`, `sagemaker-inference-tensorflow`, `sagemaker-model-monitor-analyzer`, `sagemaker-mxnet`, `sagemaker-mxnet-eia`, `sagemaker-mxnet-serving`, `sagemaker-mxnet-serving-eia`, `sagemaker-neo-mxnet`, `sagemaker-neo-pytorch`, `sagemaker-neo-tensorflow`, `sagemaker-pytorch`, `sagemaker-rl-coach-container`, `sagemaker-rl-mxnet`, `sagemaker-rl-ray-container`, `sagemaker-rl-tensorflow`, `sagemaker-rl-vw-container`, `sagemaker-scikit-learn`, `sagemaker-spark-processing`, `sagemaker-sparkml-serving`, `sagemaker-tensorflow`, `sagemaker-tensorflow-eia`, `sagemaker-tensorflow-scriptmode`, `sagemaker-tensorflow-serving`, `sagemaker-tensorflow-serving-eia`, `sagemaker-tritonserver`, `sagemaker-xgboost`, `semantic-segmentation`, `seq2seq`, `stabilityai-pytorch-inference`, `tei`, `tei-cpu`, `tensorflow-inference`, `tensorflow-inference-eia`, `tensorflow-inference-graviton`, `tensorflow-training`, and `xgboost-neo`. * `dns_suffix` - (Optional) DNS suffix to use in the registry path. If not specified, the AWS provider sets it to the DNS suffix for the current region. * `image_tag` - (Optional) Image tag for the Docker image. If not specified, the AWS provider sets the value to `1`, which for many repositories indicates the latest version. Some repositories, such as XGBoost, do not support `1` or `latest` and specific version must be used. -* `region` (Optional) - Region to use in the registry path. If not specified, the AWS provider sets it to the current region. +* `region` - (Optional) Region to use in the registry path. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -52,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `registry_id` - Account ID containing the image. For example, `469771592824`. * `registry_path` - Docker image URL. For example, `341280168497.dkr.ecr.ca-central-1.amazonaws.com/sagemaker-sparkml-serving:2.4`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/secretsmanager_random_password.html.markdown b/website/docs/cdktf/python/d/secretsmanager_random_password.html.markdown index 7a6c347e848f..09bf21ed7b34 100644 --- a/website/docs/cdktf/python/d/secretsmanager_random_password.html.markdown +++ b/website/docs/cdktf/python/d/secretsmanager_random_password.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclude_characters` - (Optional) String of the characters that you don't want in the password. * `exclude_lowercase` - (Optional) Specifies whether to exclude lowercase letters from the password. * `exclude_numbers` - (Optional) Specifies whether to exclude numbers from the password. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `random_password` - Random password. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/secretsmanager_secret.html.markdown b/website/docs/cdktf/python/d/secretsmanager_secret.html.markdown index e03833423feb..3b8dd7cb2212 100644 --- a/website/docs/cdktf/python/d/secretsmanager_secret.html.markdown +++ b/website/docs/cdktf/python/d/secretsmanager_secret.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the secret to retrieve. * `name` - (Optional) Name of the secret to retrieve. @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - Resource-based policy document that's attached to the secret. * `tags` - Tags of the secret. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/secretsmanager_secret_rotation.html.markdown b/website/docs/cdktf/python/d/secretsmanager_secret_rotation.html.markdown index 15606519126e..924eda960bb8 100644 --- a/website/docs/cdktf/python/d/secretsmanager_secret_rotation.html.markdown +++ b/website/docs/cdktf/python/d/secretsmanager_secret_rotation.html.markdown @@ -37,14 +37,21 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret_id` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `rotation_enabled` - ARN of the secret. -* `rotation_lambda_arn` - Decrypted part of the protected secret information that was originally provided as a string. -* `rotation_rules` - Decrypted part of the protected secret information that was originally provided as a binary. Base64 encoded. +* `rotation_enabled` - Specifies whether automatic rotation is enabled for this secret. +* `rotation_lambda_arn` - Amazon Resource Name (ARN) of the lambda function used for rotation. +* `rotation_rules` - Configuration block for rotation rules. See [`rotation_rules`](#rotation_rules) below. - \ No newline at end of file +### rotation_rules + +* `automatically_after_days` - Number of days between automatic scheduled rotations of the secret. +* `duration` - Length of the rotation window in hours. +* `schedule_expression` - A `cron()` or `rate()` expression that defines the schedule for rotating the secret. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/secretsmanager_secret_version.html.markdown b/website/docs/cdktf/python/d/secretsmanager_secret_version.html.markdown index 68bbf3e4a4dd..f17b0d8d83bb 100644 --- a/website/docs/cdktf/python/d/secretsmanager_secret_version.html.markdown +++ b/website/docs/cdktf/python/d/secretsmanager_secret_version.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret_id` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. * `version_id` - (Optional) Specifies the unique identifier of the version of the secret that you want to retrieve. Overrides `version_stage`. * `version_stage` - (Optional) Specifies the secret version that you want to retrieve by the staging label attached to the version. Defaults to `AWSCURRENT`. @@ -91,4 +92,4 @@ This data source exports the following attributes in addition to the arguments a * `secret_binary` - Decrypted part of the protected secret information that was originally provided as a binary. * `version_id` - Unique identifier of this version of the secret. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/secretsmanager_secret_versions.html.markdown b/website/docs/cdktf/python/d/secretsmanager_secret_versions.html.markdown index ec0995da820c..c3996e2cd75a 100644 --- a/website/docs/cdktf/python/d/secretsmanager_secret_versions.html.markdown +++ b/website/docs/cdktf/python/d/secretsmanager_secret_versions.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret_id` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. * `include_deprecated` - (Optional) If true, all deprecated secret versions are included in the response. If false, no deprecated secret versions are included in the response. If no value is specified, the default value is `false`. @@ -95,4 +96,4 @@ This data source exports the following attributes in addition to the arguments a * `version_id` - Unique version identifier of this version of the secret. * `version_stages` - List of staging labels attached to the version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/secretsmanager_secrets.html.markdown b/website/docs/cdktf/python/d/secretsmanager_secrets.html.markdown index 1e2449c662f1..6e6c83a30cb2 100644 --- a/website/docs/cdktf/python/d/secretsmanager_secrets.html.markdown +++ b/website/docs/cdktf/python/d/secretsmanager_secrets.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ## filter Configuration Block @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Secrets Manager secrets. * `names` - Set of names of the matched Secrets Manager secrets. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/security_group.html.markdown b/website/docs/cdktf/python/d/security_group.html.markdown index 9e4fb274096c..bb29d1f7e8cf 100644 --- a/website/docs/cdktf/python/d/security_group.html.markdown +++ b/website/docs/cdktf/python/d/security_group.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `id` - (Optional) Id of the specific security group to retrieve. * `name` - (Optional) Name that the desired security group must have. @@ -85,4 +86,4 @@ The following fields are also exported: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/security_groups.html.markdown b/website/docs/cdktf/python/d/security_groups.html.markdown index e732f1265722..d29e41c37837 100644 --- a/website/docs/cdktf/python/d/security_groups.html.markdown +++ b/website/docs/cdktf/python/d/security_groups.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match for desired security groups. * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out [describe-security-groups in the AWS CLI reference][1]. @@ -82,4 +83,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/securityhub_standards_control_associations.html.markdown b/website/docs/cdktf/python/d/securityhub_standards_control_associations.html.markdown index 3da5c4337037..88480f910127 100644 --- a/website/docs/cdktf/python/d/securityhub_standards_control_associations.html.markdown +++ b/website/docs/cdktf/python/d/securityhub_standards_control_associations.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_securityhub_standards_control_associations +# Data Source: aws_securityhub_standards_control_associations Terraform data source for managing an AWS Security Hub Standards Control Associations. @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_control_id` - (Required) The identifier of the control (identified with `SecurityControlId`, `SecurityControlArn`, or a mix of both parameters). ## Attribute Reference @@ -64,4 +65,4 @@ See [`standards_control_associations`](#standards_control_associations-attribute * `updated_at` - Last time that a control's enablement status in a specified standard was updated. * `updated_reason` - Reason for updating a control's enablement status in a specified standard. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/serverlessapplicationrepository_application.html.markdown b/website/docs/cdktf/python/d/serverlessapplicationrepository_application.html.markdown index a6a709c17765..9c0b988df2a5 100644 --- a/website/docs/cdktf/python/d/serverlessapplicationrepository_application.html.markdown +++ b/website/docs/cdktf/python/d/serverlessapplicationrepository_application.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ARN of the application. * `semantic_version` - (Optional) Requested version of the application. By default, retrieves the latest version. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `source_code_url` - URL pointing to the source code of the application version. * `template_url` - URL pointing to the Cloud Formation template for the application version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/service.html.markdown b/website/docs/cdktf/python/d/service.html.markdown index 2216513eee29..75dd334b6881 100644 --- a/website/docs/cdktf/python/d/service.html.markdown +++ b/website/docs/cdktf/python/d/service.html.markdown @@ -31,7 +31,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) current = DataAwsRegion(self, "current") DataAwsService(self, "test", - region=Token.as_string(current.name), + region=Token.as_string(current.region), service_id="ec2" ) ``` @@ -79,8 +79,8 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: * `dns_name` - (Optional) DNS name of the service (_e.g.,_ `rds.us-east-1.amazonaws.com`). One of `dns_name`, `reverse_dns_name`, or `service_id` is required. -* `partition` - (Optional) Partition corresponding to the region. -* `region` - (Optional) Region of the service (_e.g.,_ `us-west-2`, `ap-northeast-1`). +* `partition` - (Optional) Partition corresponding to the Region. +* `region` - (Optional) Region of the service (_e.g.,_ `us-west-2`, `ap-northeast-1`). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `reverse_dns_name` - (Optional) Reverse DNS name of the service (_e.g.,_ `com.amazonaws.us-west-2.s3`). One of `dns_name`, `reverse_dns_name`, or `service_id` is required. * `reverse_dns_prefix` - (Optional) Prefix of the service (_e.g.,_ `com.amazonaws` in AWS Commercial, `cn.com.amazonaws` in AWS China). * `service_id` - (Optional) Service endpoint ID (_e.g.,_ `s3`, `rds`, `ec2`). One of `dns_name`, `reverse_dns_name`, or `service_id` is required. A service's endpoint ID can be found in the [_AWS General Reference_](https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html). @@ -91,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a * `supported` - Whether the service is supported in the region's partition. New services may not be listed immediately as supported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/service_discovery_dns_namespace.html.markdown b/website/docs/cdktf/python/d/service_discovery_dns_namespace.html.markdown index 2a382092dcf3..2c5f83a9b566 100644 --- a/website/docs/cdktf/python/d/service_discovery_dns_namespace.html.markdown +++ b/website/docs/cdktf/python/d/service_discovery_dns_namespace.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the namespace. * `type` - (Required) Type of the namespace. Allowed values are `DNS_PUBLIC` or `DNS_PRIVATE`. @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `hosted_zone` - ID for the hosted zone that Amazon Route 53 creates when you create a namespace. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/service_discovery_http_namespace.html.markdown b/website/docs/cdktf/python/d/service_discovery_http_namespace.html.markdown index b7c060fb6bf1..35a61d91dd97 100644 --- a/website/docs/cdktf/python/d/service_discovery_http_namespace.html.markdown +++ b/website/docs/cdktf/python/d/service_discovery_http_namespace.html.markdown @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the http namespace. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `http_name` - Name of an HTTP namespace. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/service_discovery_service.html.markdown b/website/docs/cdktf/python/d/service_discovery_service.html.markdown index 4c52fbda0c3f..4e6fd80b1f27 100644 --- a/website/docs/cdktf/python/d/service_discovery_service.html.markdown +++ b/website/docs/cdktf/python/d/service_discovery_service.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the service. * `namespace_id` - (Required) ID of the namespace that the service belongs to. @@ -81,4 +82,4 @@ The `health_check_custom_config` configuration block supports the following argu * `failure_threshold` - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/service_principal.html.markdown b/website/docs/cdktf/python/d/service_principal.html.markdown index a58f709ff169..2a1120cfdc50 100644 --- a/website/docs/cdktf/python/d/service_principal.html.markdown +++ b/website/docs/cdktf/python/d/service_principal.html.markdown @@ -40,16 +40,15 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: * `service_name` - (Required) Name of the service you want to generate a Service Principal Name for. -* `region` - (Optional) Region you'd like the SPN for. By default, uses the current region. +* `region` - (Optional) Region you'd like the SPN for. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - Identifier of the current Service Principal (compound of service, region and suffix). (e.g. `logs.us-east-1.amazonaws.com`in AWS Commercial, `logs.cn-north-1.amazonaws.com.cn` in AWS China). +* `id` - Identifier of the current Service Principal (compound of service, Region and suffix). (e.g. `logs.us-east-1.amazonaws.com`in AWS Commercial, `logs.cn-north-1.amazonaws.com.cn` in AWS China). * `name` - Service Principal Name (e.g., `logs.amazonaws.com` in AWS Commercial, `logs.amazonaws.com.cn` in AWS China). * `service` - Service used for SPN generation (e.g. `logs`). * `suffix` - Suffix of the SPN (e.g., `amazonaws.com` in AWS Commercial, `amazonaws.com.cn` in AWS China). -*`region` - Region identifier of the generated SPN (e.g., `us-east-1` in AWS Commercial, `cn-north-1` in AWS China). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalog_constraint.html.markdown b/website/docs/cdktf/python/d/servicecatalog_constraint.html.markdown index a97efc80137f..f551681aa979 100644 --- a/website/docs/cdktf/python/d/servicecatalog_constraint.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalog_constraint.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Constraint status. * `type` - Type of constraint. Valid values are `LAUNCH`, `NOTIFICATION`, `RESOURCE_UPDATE`, `STACKSET`, and `TEMPLATE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalog_launch_paths.html.markdown b/website/docs/cdktf/python/d/servicecatalog_launch_paths.html.markdown index 69f5ae8da69f..221be7b66c4f 100644 --- a/website/docs/cdktf/python/d/servicecatalog_launch_paths.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalog_launch_paths.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the constraint. * `type` - Type of constraint. Valid values are `LAUNCH`, `NOTIFICATION`, `STACKSET`, and `TEMPLATE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalog_portfolio.html.markdown b/website/docs/cdktf/python/d/servicecatalog_portfolio.html.markdown index 5ea5f2b82edb..26532ce07b29 100644 --- a/website/docs/cdktf/python/d/servicecatalog_portfolio.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalog_portfolio.html.markdown @@ -39,6 +39,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_name` - Name of the person or organization who owns the portfolio. * `tags` - Tags applied to the portfolio. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalog_portfolio_constraints.html.markdown b/website/docs/cdktf/python/d/servicecatalog_portfolio_constraints.html.markdown index 5fda31628c04..306a44e5782a 100644 --- a/website/docs/cdktf/python/d/servicecatalog_portfolio_constraints.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalog_portfolio_constraints.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `product_id` - (Optional) Product identifier. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `product_id` - Identifier of the product the constraint applies to. A constraint applies to a specific instance of a product within a certain portfolio. * `type` - Type of constraint. Valid values are `LAUNCH`, `NOTIFICATION`, `STACKSET`, and `TEMPLATE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalog_product.html.markdown b/website/docs/cdktf/python/d/servicecatalog_product.html.markdown index d85ed2a0203c..a2ccb367fb5f 100644 --- a/website/docs/cdktf/python/d/servicecatalog_product.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalog_product.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values are `en` (English), `jp` (Japanese), `zh` (Chinese). The default value is `en`. ## Attribute Reference @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Tags applied to the product. * `type` - Type of product. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalog_provisioning_artifacts.html.markdown b/website/docs/cdktf/python/d/servicecatalog_provisioning_artifacts.html.markdown index b788bc535377..62a43e4f4b90 100644 --- a/website/docs/cdktf/python/d/servicecatalog_provisioning_artifacts.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalog_provisioning_artifacts.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - The name of the provisioning artifact. * `type` - The type of provisioning artifact. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalogappregistry_application.html.markdown b/website/docs/cdktf/python/d/servicecatalogappregistry_application.html.markdown index f0075a11a3e1..eb51aa079f49 100644 --- a/website/docs/cdktf/python/d/servicecatalogappregistry_application.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalogappregistry_application.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Application identifier. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the application. * `tags` - A map of tags assigned to the Application. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group.html.markdown b/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group.html.markdown index 745253c6adfe..2870e4168ded 100644 --- a/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsServicecatalogappregistryAttributeGroup +from imports.aws.data_aws_servicecatalogappregistry_attribute_group import DataAwsServicecatalogappregistryAttributeGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -37,8 +37,8 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ~> Exactly one of `arn`, `id`, or `name` must be set. - * `arn` - (Optional) ARN of the Attribute Group to find. * `id` - (Optional) ID of the Attribute Group to find. * `name` - (Optional) Name of the Attribute Group to find. @@ -51,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the Attribute Group. * `tags` - A map of tags assigned to the Attribute Group. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group_associations.html.markdown b/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group_associations.html.markdown index 8630a33191d7..f0623037f268 100644 --- a/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group_associations.html.markdown +++ b/website/docs/cdktf/python/d/servicecatalogappregistry_attribute_group_associations.html.markdown @@ -37,17 +37,15 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ~> Exactly one of `id`or `name` must be set. - * `id` - (Optional) ID of the application to which attribute groups are associated. * `name` - (Optional) Name of the application to which attribute groups are associated. -The following arguments are optional: - ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `attribute_group_ids` - Set of attribute group IDs this application is associated with. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicequotas_service.html.markdown b/website/docs/cdktf/python/d/servicequotas_service.html.markdown index 4ed83cb26b29..200bd6e9aa48 100644 --- a/website/docs/cdktf/python/d/servicequotas_service.html.markdown +++ b/website/docs/cdktf/python/d/servicequotas_service.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_name` - (Required) Service name to lookup within Service Quotas. Available values can be found with the [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html). ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Code of the service. * `service_code` - Code of the service. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicequotas_service_quota.html.markdown b/website/docs/cdktf/python/d/servicequotas_service_quota.html.markdown index b3409e6ffff9..b64818efefc1 100644 --- a/website/docs/cdktf/python/d/servicequotas_service_quota.html.markdown +++ b/website/docs/cdktf/python/d/servicequotas_service_quota.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_code` - (Required) Service code for the quota. Available values can be found with the [`aws_servicequotas_service` data source](/docs/providers/aws/d/servicequotas_service.html) or [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html). * `quota_code` - (Optional) Quota code within the service. When configured, the data source directly looks up the service quota. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html). One of `quota_code` or `quota_name` must be specified. * `quota_name` - (Optional) Quota name within the service. When configured, the data source searches through all service quotas to find the matching quota name. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html). One of `quota_name` or `quota_code` must be specified. @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `metric_statistic_recommendation` - The metric statistic that AWS recommend you use when determining quota usage. * `value` - Current value of the service quota. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/servicequotas_templates.html.markdown b/website/docs/cdktf/python/d/servicequotas_templates.html.markdown index 37a6c91db733..98f134ce99e2 100644 --- a/website/docs/cdktf/python/d/servicequotas_templates.html.markdown +++ b/website/docs/cdktf/python/d/servicequotas_templates.html.markdown @@ -3,14 +3,14 @@ subcategory: "Service Quotas" layout: "aws" page_title: "AWS: aws_servicequotas_templates" description: |- - Terraform data source for managing an AWS Service Quotas Templates. + Terraform data source for managing AWS Service Quotas Templates. --- # Data Source: aws_servicequotas_templates -Terraform data source for managing an AWS Service Quotas Templates. +Terraform data source for managing AWS Service Quotas Templates. ## Example Usage @@ -29,15 +29,16 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) DataAwsServicequotasTemplates(self, "example", - region="us-east-1" + aws_region="us-east-1" ) ``` ## Argument Reference -The following arguments are required: +This data source supports the following arguments: -* `region` - (Required) AWS Region to which the quota increases apply. +* `aws_region` - (Optional) AWS Region to which the quota increases apply. +* `region` - (Optional, **Deprecated**) AWS Region to which the quota increases apply. Use `aws_region` instead. ## Attribute Reference @@ -51,9 +52,9 @@ This data source exports the following attributes in addition to the arguments a * `quota_name` - Quota name. * `quota_code` - Quota identifier. * `region` - AWS Region to which the template applies. -* `service_code` - (Required) Service identifier. +* `service_code` - Service identifier. * `service_name` - Service name. * `unit` - Unit of measurement. -* `value` - (Required) The new, increased value for the quota. +* `value` - The new, increased value for the quota. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ses_active_receipt_rule_set.html.markdown b/website/docs/cdktf/python/d/ses_active_receipt_rule_set.html.markdown index 1d70845e28b6..3ff9964df6fb 100644 --- a/website/docs/cdktf/python/d/ses_active_receipt_rule_set.html.markdown +++ b/website/docs/cdktf/python/d/ses_active_receipt_rule_set.html.markdown @@ -31,7 +31,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -40,4 +42,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - SES receipt rule set ARN. * `rule_set_name` - Name of the rule set - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ses_domain_identity.html.markdown b/website/docs/cdktf/python/d/ses_domain_identity.html.markdown index b2dd7038e8b6..a13312c48035 100644 --- a/website/docs/cdktf/python/d/ses_domain_identity.html.markdown +++ b/website/docs/cdktf/python/d/ses_domain_identity.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -43,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `domain` - Name of the domain * `verification_token` - Code which when added to the domain as a TXT record will signal to SES that the owner of the domain has authorized SES to act on their behalf. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ses_email_identity.html.markdown b/website/docs/cdktf/python/d/ses_email_identity.html.markdown index 47880e1ac236..2129bde48116 100644 --- a/website/docs/cdktf/python/d/ses_email_identity.html.markdown +++ b/website/docs/cdktf/python/d/ses_email_identity.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -42,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN of the email identity. * `email` - Email identity. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sesv2_configuration_set.html.markdown b/website/docs/cdktf/python/d/sesv2_configuration_set.html.markdown index 26a10b7e3497..2aff77630e65 100644 --- a/website/docs/cdktf/python/d/sesv2_configuration_set.html.markdown +++ b/website/docs/cdktf/python/d/sesv2_configuration_set.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration_set_name` - (Required) The name of the configuration set. ## Attribute Reference @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `guardian_options` - Specifies additional settings for your VDM configuration as applicable to the Guardian. * `optimized_shared_delivery` - Specifies the status of your VDM optimized shared delivery. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sesv2_dedicated_ip_pool.html.markdown b/website/docs/cdktf/python/d/sesv2_dedicated_ip_pool.html.markdown index bb8bf384ee82..f763ea567932 100644 --- a/website/docs/cdktf/python/d/sesv2_dedicated_ip_pool.html.markdown +++ b/website/docs/cdktf/python/d/sesv2_dedicated_ip_pool.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pool_name` - (Required) Name of the dedicated IP pool. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `warmup_percentage` - Indicates how complete the dedicated IP warm-up process is. When this value equals `1`, the address has completed the warm-up process and is ready for use. * `warmup_status` - The warm-up status of a dedicated IP address. Valid values: `IN_PROGRESS`, `DONE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sesv2_email_identity.html.markdown b/website/docs/cdktf/python/d/sesv2_email_identity.html.markdown index f50eeb0a55bb..bc31bd256fea 100644 --- a/website/docs/cdktf/python/d/sesv2_email_identity.html.markdown +++ b/website/docs/cdktf/python/d/sesv2_email_identity.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email_identity` - (Required) The name of the email identity. ## Attribute Reference @@ -53,6 +54,7 @@ This data source exports the following attributes in addition to the arguments a * `tokens` - If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. * `identity_type` - The email identity type. Valid values: `EMAIL_ADDRESS`, `DOMAIN`. * `tags` - Key-value mapping of resource tags. +* `verification_status` - The verification status of the identity. The status can be one of the following: `PENDING`, `SUCCESS`, `FAILED`, `TEMPORARY_FAILURE`, and `NOT_STARTED`. * `verified_for_sending_status` - Specifies whether or not the identity is verified. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sesv2_email_identity_mail_from_attributes.html.markdown b/website/docs/cdktf/python/d/sesv2_email_identity_mail_from_attributes.html.markdown index fe5a8d977e2e..88b03c60c701 100644 --- a/website/docs/cdktf/python/d/sesv2_email_identity_mail_from_attributes.html.markdown +++ b/website/docs/cdktf/python/d/sesv2_email_identity_mail_from_attributes.html.markdown @@ -42,8 +42,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email_identity` - (Required) The name of the email identity. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `behavior_on_mx_failure` - The action to take if the required MX record isn't found when you send an email. Valid values: `USE_DEFAULT_VALUE`, `REJECT_MESSAGE`. * `mail_from_domain` - The custom MAIL FROM domain that you want the verified identity to use. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sfn_activity.html.markdown b/website/docs/cdktf/python/d/sfn_activity.html.markdown index 3da7edb60730..61642a34b7fb 100644 --- a/website/docs/cdktf/python/d/sfn_activity.html.markdown +++ b/website/docs/cdktf/python/d/sfn_activity.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name that identifies the activity. * `arn` - (Optional) ARN that identifies the activity. @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ARN that identifies the activity. * `creation_date` - Date the activity was created. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sfn_alias.html.markdown b/website/docs/cdktf/python/d/sfn_alias.html.markdown index 4e88faed6ee6..308c7f103247 100644 --- a/website/docs/cdktf/python/d/sfn_alias.html.markdown +++ b/website/docs/cdktf/python/d/sfn_alias.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the State Machine alias. * `statemachine_arn` - (Required) ARN of the State Machine. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of state machine alias. * `routing_configuration` - Routing Configuration of state machine alias - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sfn_state_machine.html.markdown b/website/docs/cdktf/python/d/sfn_state_machine.html.markdown index d25072df4cb5..f48e44b1d205 100644 --- a/website/docs/cdktf/python/d/sfn_state_machine.html.markdown +++ b/website/docs/cdktf/python/d/sfn_state_machine.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Friendly name of the state machine to match. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `revision_id` - The revision identifier for the state machine. * `status` - Set to the current status of the state machine. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sfn_state_machine_versions.html.markdown b/website/docs/cdktf/python/d/sfn_state_machine_versions.html.markdown index 8da67deb2cb3..074d97ea8bf9 100644 --- a/website/docs/cdktf/python/d/sfn_state_machine_versions.html.markdown +++ b/website/docs/cdktf/python/d/sfn_state_machine_versions.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `statemachine_arn` - (Required) ARN of the State Machine. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `statemachine_versions` - ARN List identifying the statemachine versions. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/signer_signing_job.html.markdown b/website/docs/cdktf/python/d/signer_signing_job.html.markdown index 880f1c8a4a77..3487dfd3c2cd 100644 --- a/website/docs/cdktf/python/d/signer_signing_job.html.markdown +++ b/website/docs/cdktf/python/d/signer_signing_job.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `job_id` - (Required) ID of the signing job on output. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the signing job. * `status_reason` - String value that contains the status reason. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/signer_signing_profile.html.markdown b/website/docs/cdktf/python/d/signer_signing_profile.html.markdown index 8d4ddae7df93..49a1299c792d 100644 --- a/website/docs/cdktf/python/d/signer_signing_profile.html.markdown +++ b/website/docs/cdktf/python/d/signer_signing_profile.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the target signing profile. ## Attribute Reference @@ -46,9 +47,12 @@ This data source exports the following attributes in addition to the arguments a * `platform_id` - ID of the platform that is used by the target signing profile. * `revocation_record` - Revocation information for a signing profile. * `signature_validity_period` - The validity period for a signing job. +* `signing_material` - AWS Certificate Manager certificate that will be used to sign code with the new signing profile. + * `certificate_arn` - ARN of the certificate used for signing. +* `signing_parameters` - Map of key-value pairs for signing. * `status` - Status of the target signing profile. * `tags` - List of tags associated with the signing profile. * `version` - Current version of the signing profile. * `version_arn` - Signing profile ARN, including the profile version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sns_topic.html.markdown b/website/docs/cdktf/python/d/sns_topic.html.markdown index 1201fc5b3cef..db46ee81716e 100644 --- a/website/docs/cdktf/python/d/sns_topic.html.markdown +++ b/website/docs/cdktf/python/d/sns_topic.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Friendly name of the topic to match. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ARN of the found topic, suitable for referencing in other resources that support SNS topics. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/spot_datafeed_subscription.html.markdown b/website/docs/cdktf/python/d/spot_datafeed_subscription.html.markdown index 17b96bbe7d99..5306c87f3590 100644 --- a/website/docs/cdktf/python/d/spot_datafeed_subscription.html.markdown +++ b/website/docs/cdktf/python/d/spot_datafeed_subscription.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -42,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `bucket` - The name of the Amazon S3 bucket where the spot instance data feed is located. * `prefix` - The prefix for the data feed files. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sqs_queue.html.markdown b/website/docs/cdktf/python/d/sqs_queue.html.markdown index 12127153678b..ba9c0daeea8c 100644 --- a/website/docs/cdktf/python/d/sqs_queue.html.markdown +++ b/website/docs/cdktf/python/d/sqs_queue.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the queue to match. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `url` - URL of the queue. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/sqs_queues.html.markdown b/website/docs/cdktf/python/d/sqs_queues.html.markdown index 5928e33e308f..c05641d04057 100644 --- a/website/docs/cdktf/python/d/sqs_queues.html.markdown +++ b/website/docs/cdktf/python/d/sqs_queues.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queue_name_prefix` - (Optional) A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned. Queue URLs and names are case-sensitive. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `queue_urls` - A list of queue URLs. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_document.html.markdown b/website/docs/cdktf/python/d/ssm_document.html.markdown index ca16b466f32a..5b1f42f97739 100644 --- a/website/docs/cdktf/python/d/ssm_document.html.markdown +++ b/website/docs/cdktf/python/d/ssm_document.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the document. * `document_format` - The format of the document. Valid values: `JSON`, `TEXT`, `YAML`. * `document_version` - The document version. @@ -73,4 +74,4 @@ This data source exports the following attributes in addition to the arguments a * `content` - The content for the SSM document in JSON or YAML format. * `document_type` - The type of the document. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_instances.html.markdown b/website/docs/cdktf/python/d/ssm_instances.html.markdown index 6a0b482b396a..e22fee64c125 100644 --- a/website/docs/cdktf/python/d/ssm_instances.html.markdown +++ b/website/docs/cdktf/python/d/ssm_instances.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration Block @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - Set of instance IDs of the matched SSM managed instances. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_maintenance_windows.html.markdown b/website/docs/cdktf/python/d/ssm_maintenance_windows.html.markdown index 837c2b00571e..84208c0f40a7 100644 --- a/website/docs/cdktf/python/d/ssm_maintenance_windows.html.markdown +++ b/website/docs/cdktf/python/d/ssm_maintenance_windows.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration Block @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of window IDs of the matched SSM maintenance windows. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_parameter.html.markdown b/website/docs/cdktf/python/d/ssm_parameter.html.markdown index 4c59ab68acb2..f0a30a6d88e7 100644 --- a/website/docs/cdktf/python/d/ssm_parameter.html.markdown +++ b/website/docs/cdktf/python/d/ssm_parameter.html.markdown @@ -14,6 +14,8 @@ Provides an SSM Parameter data source. ## Example Usage +### Default + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -31,6 +33,25 @@ class MyConvertedCode(TerraformStack): ) ``` +### With version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_ssm_parameter import DataAwsSsmParameter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsSsmParameter(self, "foo", + name="foo:3" + ) +``` + ~> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). @@ -40,7 +61,8 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `name` - (Required) Name of the parameter. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the parameter. To query by parameter version use `name:version` (e.g., `foo:3`). * `with_decryption` - (Optional) Whether to return decrypted `SecureString` value. Defaults to `true`. ## Attribute Reference @@ -54,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `insecure_value` - Value of the parameter. **Use caution:** This value is never marked as sensitive. * `version` - Version of the parameter. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_parameters_by_path.html.markdown b/website/docs/cdktf/python/d/ssm_parameters_by_path.html.markdown index cc81c270ec94..5b3d03c71f47 100644 --- a/website/docs/cdktf/python/d/ssm_parameters_by_path.html.markdown +++ b/website/docs/cdktf/python/d/ssm_parameters_by_path.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `path` - (Required) The hierarchy for the parameter. Hierarchies start with a forward slash (/). The hierarchy is the parameter name except the last part of the parameter. The last part of the parameter name can't be in the path. A parameter name hierarchy can have a maximum of 15 levels. **Note:** If the parameter name (e.g., `/my-app/my-param`) is specified, the data source will not retrieve any value as designed, unless there are other parameters that happen to use the former path in their hierarchy (e.g., `/my-app/my-param/my-actual-param`). * `with_decryption` - (Optional) Whether to retrieve all parameters in the hierarchy, particularly those of `SecureString` type, with their value decrypted. Defaults to `true`. * `recursive` - (Optional) Whether to retrieve all parameters within the hirerachy. Defaults to `false`. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `types` - A list that contains the types (`String`, `StringList`, or `SecureString`) of retrieved parameters. * `values` - A list that contains the retrieved parameter values. **Note:** This value is always marked as sensitive in the Terraform plan output, regardless of whether any retrieved parameters are of `SecureString` type. Use the [`nonsensitive` function](https://developer.hashicorp.com/terraform/language/functions/nonsensitive) to override the behavior at your own risk and discretion, if you are certain that there are no sensitive values being retrieved. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_patch_baseline.html.markdown b/website/docs/cdktf/python/d/ssm_patch_baseline.html.markdown index 30772992f136..39a9a8626a4d 100644 --- a/website/docs/cdktf/python/d/ssm_patch_baseline.html.markdown +++ b/website/docs/cdktf/python/d/ssm_patch_baseline.html.markdown @@ -65,6 +65,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_baseline` - (Optional) Filters the results against the baselines default_baseline field. * `name_prefix` - (Optional) Filter results by the baseline name prefix. * `operating_system` - (Optional) Specified OS for the baseline. Valid values: `AMAZON_LINUX`, `AMAZON_LINUX_2`, `UBUNTU`, `REDHAT_ENTERPRISE_LINUX`, `SUSE`, `CENTOS`, `ORACLE_LINUX`, `DEBIAN`, `MACOS`, `RASPBIAN` and `ROCKY_LINUX`. @@ -84,6 +85,7 @@ This data source exports the following attributes in addition to the arguments a * `patch_filter` - Patch filter group that defines the criteria for the rule. * `key` - Key for the filter. * `values` - Value for the filter. +* `available_security_updates_compliance_status` - Indicates the compliance status of managed nodes for which security-related patches are available but were not approved. Supported for Windows Server managed nodes only. * `global_filter` - Set of global filters used to exclude patches from the baseline. * `key` - Key for the filter. * `values` - Value for the filter. @@ -98,4 +100,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name specified to identify the patch source. * `products` - Specific operating system versions a patch repository applies to. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssm_patch_baselines.html.markdown b/website/docs/cdktf/python/d/ssm_patch_baselines.html.markdown index 88d4c2039461..458990a4dc73 100644 --- a/website/docs/cdktf/python/d/ssm_patch_baselines.html.markdown +++ b/website/docs/cdktf/python/d/ssm_patch_baselines.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsSsmPatchBaselines +from imports.aws.data_aws_ssm_patch_baselines import DataAwsSsmPatchBaselines class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -41,18 +41,18 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsSsmPatchBaselines +from imports.aws.data_aws_ssm_patch_baselines import DataAwsSsmPatchBaselines class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) DataAwsSsmPatchBaselines(self, "example", - filter=[{ - "key": "OWNER", - "values": ["AWS"] - }, { - "key": "OPERATING_SYSTEM", - "values": ["WINDOWS"] - } + filter=[DataAwsSsmPatchBaselinesFilter( + key="OWNER", + values=["AWS"] + ), DataAwsSsmPatchBaselinesFilter( + key="OPERATING_SYSTEM", + values=["WINDOWS"] + ) ] ) ``` @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Key-value pairs used to filter the results. See [`filter`](#filter-argument-reference) below. * `default_baselines` - (Optional) Only return baseline identities where `default_baseline` is `true`. @@ -83,4 +84,4 @@ This data source exports the following attributes in addition to the arguments a * `default_baseline` - Indicates whether this is the default baseline. AWS Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system. * `operating_system` - Operating system the patch baseline applies to. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssmcontacts_contact.html.markdown b/website/docs/cdktf/python/d/ssmcontacts_contact.html.markdown index bbfbf777b279..df224a72810a 100644 --- a/website/docs/cdktf/python/d/ssmcontacts_contact.html.markdown +++ b/website/docs/cdktf/python/d/ssmcontacts_contact.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the contact or escalation plan. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `display_name` - Full friendly name of the contact or escalation plan. * `tags` - Map of tags to assign to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssmcontacts_contact_channel.html.markdown b/website/docs/cdktf/python/d/ssmcontacts_contact_channel.html.markdown index 51f87a898da3..9afbca7a535a 100644 --- a/website/docs/cdktf/python/d/ssmcontacts_contact_channel.html.markdown +++ b/website/docs/cdktf/python/d/ssmcontacts_contact_channel.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `arn` - Amazon Resource Name (ARN) of the contact channel. ## Attribute Reference @@ -44,13 +45,9 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: - `activation_status` - Whether the contact channel is activated. - - `contact_id` - Amazon Resource Name (ARN) of the AWS SSM Contact that the contact channel belongs to. - - `delivery_address` - Details used to engage the contact channel. - - `name` - Name of the contact channel. - - `type` - Type of the contact channel. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssmcontacts_plan.html.markdown b/website/docs/cdktf/python/d/ssmcontacts_plan.html.markdown index bc0a3a5b15cc..b25422075e18 100644 --- a/website/docs/cdktf/python/d/ssmcontacts_plan.html.markdown +++ b/website/docs/cdktf/python/d/ssmcontacts_plan.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contact_id` - (Required) The Amazon Resource Name (ARN) of the contact or escalation plan. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `stage` - List of stages. A contact has an engagement plan with stages that contact specified contact channels. An escalation plan uses stages that contact specified contacts. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssmcontacts_rotation.html.markdown b/website/docs/cdktf/python/d/ssmcontacts_rotation.html.markdown index 9d60e09e917d..80a41ac6b94b 100644 --- a/website/docs/cdktf/python/d/ssmcontacts_rotation.html.markdown +++ b/website/docs/cdktf/python/d/ssmcontacts_rotation.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the rotation. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `start_time` - The date and time, in RFC 3339 format, that the rotation goes into effect. * `tags` - A map of tags to assign to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssmincidents_replication_set.html.markdown b/website/docs/cdktf/python/d/ssmincidents_replication_set.html.markdown index a635d5ba591d..0f38aaf0af66 100644 --- a/website/docs/cdktf/python/d/ssmincidents_replication_set.html.markdown +++ b/website/docs/cdktf/python/d/ssmincidents_replication_set.html.markdown @@ -42,14 +42,16 @@ This data source does not support any arguments. This data source exports the following attributes in addition to the arguments above: * `arn` - The Amazon Resource Name (ARN) of the replication set. -* `tags` - All tags applied to the replication set. * `created_by` - The ARN of the user who created the replication set. * `deletion_protected` - If `true`, the last remaining Region in a replication set can’t be deleted. * `last_modified_by` - The ARN of the user who last modified the replication set. +* `region` - (**Deprecated**) The replication set's Regions. Use `regions` instead. +* `regions` - The replication set's Regions. * `status` - The overall status of a replication set. * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` +* `tags` - All tags applied to the replication set. -The `region` configuration block exports the following attributes for each Region: +The `regions` configuration block exports the following attributes for each Region: * `name` - The name of the Region. * `kms_key_arn` - The ARN of the AWS Key Management Service (AWS KMS) encryption key. @@ -57,4 +59,4 @@ The `region` configuration block exports the following attributes for each Regio * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` * `status_message` - More information about the status of a Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssmincidents_response_plan.html.markdown b/website/docs/cdktf/python/d/ssmincidents_response_plan.html.markdown index ab5892539959..d820071fb1ac 100644 --- a/website/docs/cdktf/python/d/ssmincidents_response_plan.html.markdown +++ b/website/docs/cdktf/python/d/ssmincidents_response_plan.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the response plan. ## Attribute Reference @@ -87,4 +88,4 @@ The `integration` configuration block exports the following attributes: * `service_id` - The ID of the PagerDuty service that the response plan associates with an incident when it launches. * `secret_id` - The ID of the AWS Secrets Manager secret that stores your PagerDuty key — either a General Access REST API Key or User Token REST API Key — and other user credentials. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_application.html.markdown b/website/docs/cdktf/python/d/ssoadmin_application.html.markdown index cf36ce241c1c..250c900b39f4 100644 --- a/website/docs/cdktf/python/d/ssoadmin_application.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_application.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_arn` - (Required) ARN of the application. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `portal_options` - Options for the portal associated with an application. See the `aws_ssoadmin_application` [resource documentation](../r/ssoadmin_application.html.markdown#portal_options-argument-reference). The attributes are the same. * `status` - Status of the application. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_application_assignments.html.markdown b/website/docs/cdktf/python/d/ssoadmin_application_assignments.html.markdown index ace12553c08f..df8ad2061747 100644 --- a/website/docs/cdktf/python/d/ssoadmin_application_assignments.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_application_assignments.html.markdown @@ -29,14 +29,15 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) DataAwsSsoadminApplicationAssignments(self, "example", - application_arn=Token.as_string(aws_ssoadmin_application_example.application_arn) + application_arn=Token.as_string(aws_ssoadmin_application_example.arn) ) ``` ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_arn` - (Required) ARN of the application. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `principal_id` - An identifier for an object in IAM Identity Center, such as a user or group. * `principal_type` - Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_application_providers.html.markdown b/website/docs/cdktf/python/d/ssoadmin_application_providers.html.markdown index 57c1aca2d77f..6f914fa9d0a0 100644 --- a/website/docs/cdktf/python/d/ssoadmin_application_providers.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_application_providers.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -54,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `display_name` - Name of the application provider. * `icon_url` - URL that points to an icon that represents the application provider. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_instances.html.markdown b/website/docs/cdktf/python/d/ssoadmin_instances.html.markdown index 5409c5093de4..ad43e1c84902 100644 --- a/website/docs/cdktf/python/d/ssoadmin_instances.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_instances.html.markdown @@ -37,7 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -47,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `identity_store_ids` - Set of identifiers of the identity stores connected to the SSO Instances. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_permission_set.html.markdown b/website/docs/cdktf/python/d/ssoadmin_permission_set.html.markdown index 7c0cd7a76fbd..873c7c864144 100644 --- a/website/docs/cdktf/python/d/ssoadmin_permission_set.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_permission_set.html.markdown @@ -45,12 +45,13 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -~> **NOTE:** Either `arn` or `name` must be configured. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the permission set. * `instance_arn` - (Required) ARN of the SSO Instance associated with the permission set. * `name` - (Optional) Name of the SSO Permission Set. +~> **NOTE:** Either `arn` or `name` must be configured. + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `session_duration` - Length of time that the application user sessions are valid in the ISO-8601 standard. * `tags` - Key-value map of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_permission_sets.html.markdown b/website/docs/cdktf/python/d/ssoadmin_permission_sets.html.markdown index 050c69e1fd09..5c1ccbb817df 100644 --- a/website/docs/cdktf/python/d/ssoadmin_permission_sets.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_permission_sets.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required) ARN of the SSO Instance associated with the permission set. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of string contain the ARN of all Permission Sets. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ssoadmin_principal_application_assignments.html.markdown b/website/docs/cdktf/python/d/ssoadmin_principal_application_assignments.html.markdown index dd54fcb0eecf..bff13faafc0e 100644 --- a/website/docs/cdktf/python/d/ssoadmin_principal_application_assignments.html.markdown +++ b/website/docs/cdktf/python/d/ssoadmin_principal_application_assignments.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required) ARN of the instance of IAM Identity Center. * `principal_id` - (Required) An identifier for an object in IAM Identity Center, such as a user or group. * `principal_type` - (Required) Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `principal_id` - An identifier for an object in IAM Identity Center, such as a user or group. * `principal_type` - Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/storagegateway_local_disk.html.markdown b/website/docs/cdktf/python/d/storagegateway_local_disk.html.markdown index 61207a566a3f..8799db73e9de 100644 --- a/website/docs/cdktf/python/d/storagegateway_local_disk.html.markdown +++ b/website/docs/cdktf/python/d/storagegateway_local_disk.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gateway_arn` - (Required) ARN of the gateway. * `disk_node` - (Optional) Device node of the local disk to retrieve. For example, `/dev/sdb`. * `disk_path` - (Optional) Device path of the local disk to retrieve. For example, `/dev/xvdb` or `/dev/nvme1n1`. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `disk_id` - Disk identifierE.g., `pci-0000:03:00.0-scsi-0:0:0:0` * `id` - Disk identifierE.g., `pci-0000:03:00.0-scsi-0:0:0:0` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/subnet.html.markdown b/website/docs/cdktf/python/d/subnet.html.markdown index 05a16d7e1293..01693d7c3b30 100644 --- a/website/docs/cdktf/python/d/subnet.html.markdown +++ b/website/docs/cdktf/python/d/subnet.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Optional) Availability zone where the subnet must reside. * `availability_zone_id` - (Optional) ID of the Availability Zone for the subnet. This argument is not supported in all regions or partitions. If necessary, use `availability_zone` instead. * `cidr_block` - (Optional) CIDR block of the desired subnet. @@ -124,4 +125,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/subnets.html.markdown b/website/docs/cdktf/python/d/subnets.html.markdown index 1df3d9fd579b..a4e925b4fa2b 100644 --- a/website/docs/cdktf/python/d/subnets.html.markdown +++ b/website/docs/cdktf/python/d/subnets.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired subnets. @@ -148,4 +149,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/synthetics_runtime_version.html.markdown b/website/docs/cdktf/python/d/synthetics_runtime_version.html.markdown index f1e203f516bb..4eab77567a53 100644 --- a/website/docs/cdktf/python/d/synthetics_runtime_version.html.markdown +++ b/website/docs/cdktf/python/d/synthetics_runtime_version.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `latest` - (Optional) Whether the latest version of the runtime should be fetched. Conflicts with `version`. Valid values: `true`. * `version` - (Optional) Version of the runtime to be fetched (for example, `9.0`). Conflicts with `latest`. @@ -74,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `release_date` - Date that the runtime version was released. * `version_name` - Name of the runtime version. For a list of valid runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/synthetics_runtime_versions.html.markdown b/website/docs/cdktf/python/d/synthetics_runtime_versions.html.markdown index 365d379450e9..850d8a5f3fc9 100644 --- a/website/docs/cdktf/python/d/synthetics_runtime_versions.html.markdown +++ b/website/docs/cdktf/python/d/synthetics_runtime_versions.html.markdown @@ -33,7 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -50,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `version_name` - Name of the runtime version. For a list of valid runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/timestreamwrite_database.html.markdown b/website/docs/cdktf/python/d/timestreamwrite_database.html.markdown index 629cba4a33ef..390063e0a69d 100644 --- a/website/docs/cdktf/python/d/timestreamwrite_database.html.markdown +++ b/website/docs/cdktf/python/d/timestreamwrite_database.html.markdown @@ -35,9 +35,10 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: -* `database_name` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `database_name` - (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. ## Attribute Reference @@ -45,9 +46,9 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN that uniquely identifies this database. * `created_time` - Creation time of database. -* `database_name` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. +* `database_name` - (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. * `kms_key_id` - The ARN of the KMS key used to encrypt the data stored in the database. * `last_updated_time` - Last time database was updated. * `table_count` - Total number of tables in the Timestream database. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/timestreamwrite_table.html.markdown b/website/docs/cdktf/python/d/timestreamwrite_table.html.markdown index 626686fb5bfb..f0b1250de256 100644 --- a/website/docs/cdktf/python/d/timestreamwrite_table.html.markdown +++ b/website/docs/cdktf/python/d/timestreamwrite_table.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `database_name` - (Required) Name of the Timestream database. * `name` - (Required) Name of the Timestream table. @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the table. * `table_status` - Current state of table. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/transfer_connector.html.markdown b/website/docs/cdktf/python/d/transfer_connector.html.markdown index 6b0376102f93..2507c854ed93 100644 --- a/website/docs/cdktf/python/d/transfer_connector.html.markdown +++ b/website/docs/cdktf/python/d/transfer_connector.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Unique identifier for connector ## Attribute Reference @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `value` - Values associated with the tags key. * `url` - URL of the partner's AS2 or SFTP endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/transfer_server.html.markdown b/website/docs/cdktf/python/d/transfer_server.html.markdown index 04707628baf2..155cf0ee6519 100644 --- a/website/docs/cdktf/python/d/transfer_server.html.markdown +++ b/website/docs/cdktf/python/d/transfer_server.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `server_id` - (Required) ID for an SFTP server. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the resource. * `url` - URL of the service endpoint used to authenticate users with an `identity_provider_type` of `API_GATEWAY`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/verifiedpermissions_policy_store.html.markdown b/website/docs/cdktf/python/d/verifiedpermissions_policy_store.html.markdown index 2469a1b01922..32fb1b61a972 100644 --- a/website/docs/cdktf/python/d/verifiedpermissions_policy_store.html.markdown +++ b/website/docs/cdktf/python/d/verifiedpermissions_policy_store.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) The ID of the Policy Store. ## Attribute Reference @@ -45,8 +46,9 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN of the Policy Store. * `created_date` - The date the Policy Store was created. +* `deletion_protection` - Whether the policy store can be deleted. * `last_updated_date` - The date the Policy Store was last updated. * `tags` - Map of key-value pairs associated with the policy store. * `validation_settings` - Validation settings for the policy store. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc.html.markdown b/website/docs/cdktf/python/d/vpc.html.markdown index 713dd7b915e4..4f23c9575e27 100644 --- a/website/docs/cdktf/python/d/vpc.html.markdown +++ b/website/docs/cdktf/python/d/vpc.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_block` - (Optional) Cidr block of the desired VPC. * `dhcp_options_id` - (Optional) DHCP options id of the desired VPC. * `default` - (Optional) Boolean constraint on whether the desired VPC is @@ -104,4 +105,4 @@ The following attribute is additionally exported: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_dhcp_options.html.markdown b/website/docs/cdktf/python/d/vpc_dhcp_options.html.markdown index 7daa21496033..04753085eb12 100644 --- a/website/docs/cdktf/python/d/vpc_dhcp_options.html.markdown +++ b/website/docs/cdktf/python/d/vpc_dhcp_options.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dhcp_options_id` - (Optional) EC2 DHCP Options ID. * `filter` - (Optional) List of custom filters as described below. @@ -97,4 +98,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_endpoint.html.markdown b/website/docs/cdktf/python/d/vpc_endpoint.html.markdown index 0cb81ddb49af..32d18712494a 100644 --- a/website/docs/cdktf/python/d/vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/d/vpc_endpoint.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `id` - (Optional) ID of the specific VPC Endpoint to retrieve. * `service_name` - (Optional) Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). @@ -102,4 +103,4 @@ DNS options (for `dns_options`) support the following attributes: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_endpoint_associations.html.markdown b/website/docs/cdktf/python/d/vpc_endpoint_associations.html.markdown index 5b240ffe0634..8cb64de94d18 100644 --- a/website/docs/cdktf/python/d/vpc_endpoint_associations.html.markdown +++ b/website/docs/cdktf/python/d/vpc_endpoint_associations.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_id` - ID of the specific VPC Endpoint to retrieve. ## Attribute Reference @@ -72,4 +73,4 @@ DNS blocks (for `private_dns_entry`) support the following attributes: * `dns_name` - DNS name. * `hosted_zone_id` - ID of the private hosted zone. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_endpoint_service.html.markdown b/website/docs/cdktf/python/d/vpc_endpoint_service.html.markdown index b02fd678c7ff..0d2c2b1419bf 100644 --- a/website/docs/cdktf/python/d/vpc_endpoint_service.html.markdown +++ b/website/docs/cdktf/python/d/vpc_endpoint_service.html.markdown @@ -118,8 +118,9 @@ This data source exports the following attributes in addition to the arguments a * `owner` - AWS account ID of the service owner or `amazon`. * `private_dns_name` - Private DNS name for the service. * `private_dns_names` - Private DNS names assigned to the VPC endpoint service. -* `region` - Region of the endpoint service. +* `region` - (**Deprecated**) Region of the endpoint service. Use `service_region` instead. * `service_id` - ID of the endpoint service. +* `service_region` - Region of the endpoint service. * `supported_ip_address_types` - The supported IP address types. * `tags` - Map of tags assigned to the resource. * `vpc_endpoint_policy_supported` - Whether or not the service supports endpoint policies - `true` or `false`. @@ -130,4 +131,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_ipam.html.markdown b/website/docs/cdktf/python/d/vpc_ipam.html.markdown index 4a5bb762a6b8..a2371aacb4bc 100644 --- a/website/docs/cdktf/python/d/vpc_ipam.html.markdown +++ b/website/docs/cdktf/python/d/vpc_ipam.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsVpcIpam +from imports.aws.data_aws_vpc_ipam import DataAwsVpcIpam class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) ID of the IPAM. ## Attribute Reference @@ -50,6 +51,7 @@ This data source exports the following attributes in addition to the arguments a * `enable_private_gua` - If private GUA is enabled. * `id` - ID of the IPAM resource. * `ipam_region` - Region that the IPAM exists in. +* `metered_account` - AWS account that is charged for active IP addresses managed in IPAM. * `operating_regions` - Regions that the IPAM is configured to operate in. * `owner_id` - ID of the account that owns this IPAM. * `private_default_scope_id` - ID of the default private scope. @@ -61,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `tier` - IPAM Tier. * `tags` - Tags of the IPAM resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_ipam_pool.html.markdown b/website/docs/cdktf/python/d/vpc_ipam_pool.html.markdown index c35e5d025219..255272ca6082 100644 --- a/website/docs/cdktf/python/d/vpc_ipam_pool.html.markdown +++ b/website/docs/cdktf/python/d/vpc_ipam_pool.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipam_pool_id` - (Optional) ID of the IPAM pool you would like information on. * `filter` - (Optional) Custom filter block as described below. @@ -91,4 +92,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_ipam_pool_cidrs.html.markdown b/website/docs/cdktf/python/d/vpc_ipam_pool_cidrs.html.markdown index 2035ce7cf6b6..39daaab5a5fe 100644 --- a/website/docs/cdktf/python/d/vpc_ipam_pool_cidrs.html.markdown +++ b/website/docs/cdktf/python/d/vpc_ipam_pool_cidrs.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipam_pool_id` - ID of the IPAM pool you would like the list of provisioned CIDRs. * `filter` - Custom filter block as described below. @@ -119,4 +120,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `1m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_ipam_pools.html.markdown b/website/docs/cdktf/python/d/vpc_ipam_pools.html.markdown index 911a0214f953..47865414243e 100644 --- a/website/docs/cdktf/python/d/vpc_ipam_pools.html.markdown +++ b/website/docs/cdktf/python/d/vpc_ipam_pools.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Required) Custom filter block as described below. The arguments of this data source act as filters for querying the available IPAM Pools in the current region. @@ -81,4 +82,4 @@ The following attributes are available on each pool entry found. * `source_ipam_pool_id` - ID of the source IPAM pool. * `tags` - Map of tags to assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_ipam_preview_next_cidr.html.markdown b/website/docs/cdktf/python/d/vpc_ipam_preview_next_cidr.html.markdown index 2fb804b36b2b..af41446f8337 100644 --- a/website/docs/cdktf/python/d/vpc_ipam_preview_next_cidr.html.markdown +++ b/website/docs/cdktf/python/d/vpc_ipam_preview_next_cidr.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disallowed_cidrs` - (Optional) Exclude a particular CIDR range from being returned by the pool. * `ipam_pool_id` - (Required) ID of the pool to which you want to assign a CIDR. * `netmask_length` - (Optional) Netmask length of the CIDR you would like to preview from the IPAM pool. @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_ipams.html.markdown b/website/docs/cdktf/python/d/vpc_ipams.html.markdown index 5aba47d42fda..f2b2f29e7fbe 100644 --- a/website/docs/cdktf/python/d/vpc_ipams.html.markdown +++ b/website/docs/cdktf/python/d/vpc_ipams.html.markdown @@ -83,6 +83,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipam_ids` - (Optional) IDs of the IPAM resources to query for. * `filter` - (Optional) Custom filter block as described below. @@ -122,4 +123,4 @@ This data source exports the following attributes in addition to the arguments a * `state_message` - State message of the IPAM. * `tier` - IPAM Tier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_peering_connection.html.markdown b/website/docs/cdktf/python/d/vpc_peering_connection.html.markdown index e37936505df2..248fd2b885fd 100644 --- a/website/docs/cdktf/python/d/vpc_peering_connection.html.markdown +++ b/website/docs/cdktf/python/d/vpc_peering_connection.html.markdown @@ -52,11 +52,9 @@ This data source supports the following arguments: * `vpc_id` - (Optional) ID of the requester VPC of the specific VPC Peering Connection to retrieve. * `owner_id` - (Optional) AWS account ID of the owner of the requester VPC of the specific VPC Peering Connection to retrieve. * `cidr_block` - (Optional) Primary CIDR block of the requester VPC of the specific VPC Peering Connection to retrieve. -* `region` - (Optional) Region of the requester VPC of the specific VPC Peering Connection to retrieve. * `peer_vpc_id` - (Optional) ID of the accepter VPC of the specific VPC Peering Connection to retrieve. * `peer_owner_id` - (Optional) AWS account ID of the owner of the accepter VPC of the specific VPC Peering Connection to retrieve. * `peer_cidr_block` - (Optional) Primary CIDR block of the accepter VPC of the specific VPC Peering Connection to retrieve. -* `peer_region` - (Optional) Region of the accepter VPC of the specific VPC Peering Connection to retrieve. * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired VPC Peering Connection. @@ -70,7 +68,6 @@ More complex filters can be expressed using one or more `filter` sub-blocks, whi * `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html). - * `values` - (Required) Set of values that are accepted for the given field. A VPC Peering Connection will be selected if any one of the given values matches. @@ -84,8 +81,11 @@ This data source exports the following attributes in addition to the arguments a * `ipv6_cidr_block_set` - List of objects with IPv6 CIDR blocks of the requester VPC. * `peer_cidr_block_set` - List of objects with IPv4 CIDR blocks of the accepter VPC. * `peer_ipv6_cidr_block_set` - List of objects with IPv6 CIDR blocks of the accepter VPC. +* `peer_region` - Region of the accepter VPC. +* `region` - (**Deprecated**) Region of the requester VPC. Use `requester_region` instead. * `requester` - Configuration block that describes [VPC Peering Connection] (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. +* `requester_region` - Region of the requester VPC. #### Accepter and Requester Attribute Reference @@ -102,4 +102,4 @@ private IP addresses when queried from instances in a peer VPC. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_peering_connections.html.markdown b/website/docs/cdktf/python/d/vpc_peering_connections.html.markdown index e2805efce7df..3b48ef234dd4 100644 --- a/website/docs/cdktf/python/d/vpc_peering_connections.html.markdown +++ b/website/docs/cdktf/python/d/vpc_peering_connections.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired VPC Peering Connection. @@ -81,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_security_group_rule.html.markdown b/website/docs/cdktf/python/d/vpc_security_group_rule.html.markdown index b4fb6b75d69e..4c5bac000653 100644 --- a/website/docs/cdktf/python/d/vpc_security_group_rule.html.markdown +++ b/website/docs/cdktf/python/d/vpc_security_group_rule.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_group_rule_id` - (Optional) ID of the security group rule to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags assigned to the resource. * `to_port` - (Optional) The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpc_security_group_rules.html.markdown b/website/docs/cdktf/python/d/vpc_security_group_rules.html.markdown index bab194b0a8a5..701ceea0d04e 100644 --- a/website/docs/cdktf/python/d/vpc_security_group_rules.html.markdown +++ b/website/docs/cdktf/python/d/vpc_security_group_rules.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired security group rule. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of all the security group rule IDs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpclattice_auth_policy.html.markdown b/website/docs/cdktf/python/d/vpclattice_auth_policy.html.markdown index b3418f427c64..5a3d96a971fd 100644 --- a/website/docs/cdktf/python/d/vpclattice_auth_policy.html.markdown +++ b/website/docs/cdktf/python/d/vpclattice_auth_policy.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_identifier` - (Required) The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - The auth policy. The policy string in JSON must not contain newlines or blank lines. * `state` - The state of the auth policy. The auth policy is only active when the auth type is set to AWS_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the Auth type is NONE, then, any auth policy you provide will remain inactive. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpclattice_listener.html.markdown b/website/docs/cdktf/python/d/vpclattice_listener.html.markdown index 1ef4b8a1e16e..100cb95d798f 100644 --- a/website/docs/cdktf/python/d/vpclattice_listener.html.markdown +++ b/website/docs/cdktf/python/d/vpclattice_listener.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_identifier` - (Required) ID or Amazon Resource Name (ARN) of the service network * `listener_identifier` - (Required) ID or Amazon Resource Name (ARN) of the listener @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `service_id` - The ID of the service. * `tags` - List of tags associated with the listener. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpclattice_resource_policy.html.markdown b/website/docs/cdktf/python/d/vpclattice_resource_policy.html.markdown index ffce8540e4ab..a3ab9a00dc19 100644 --- a/website/docs/cdktf/python/d/vpclattice_resource_policy.html.markdown +++ b/website/docs/cdktf/python/d/vpclattice_resource_policy.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) Resource ARN of the resource for which a policy is retrieved. ## Attribute Reference @@ -45,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - JSON-encoded string representation of the applied resource policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpclattice_service.html.markdown b/website/docs/cdktf/python/d/vpclattice_service.html.markdown index e02ff5d55d58..cfe3dcfd96b6 100644 --- a/website/docs/cdktf/python/d/vpclattice_service.html.markdown +++ b/website/docs/cdktf/python/d/vpclattice_service.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Service name. * `service_identifier` - (Optional) ID or Amazon Resource Name (ARN) of the service. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the service. * `tags` - List of tags associated with the service. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpclattice_service_network.html.markdown b/website/docs/cdktf/python/d/vpclattice_service_network.html.markdown index 9e9550be163e..ff0d3fe3c51f 100644 --- a/website/docs/cdktf/python/d/vpclattice_service_network.html.markdown +++ b/website/docs/cdktf/python/d/vpclattice_service_network.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_network_identifier` - (Required) Identifier of the service network. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `number_of_associated_services` - Number of services associated with this service network. * `number_of_associated_vpcs` - Number of VPCs associated with this service network. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpcs.html.markdown b/website/docs/cdktf/python/d/vpcs.html.markdown index 013b8a607d6b..e0998c4fd45b 100644 --- a/website/docs/cdktf/python/d/vpcs.html.markdown +++ b/website/docs/cdktf/python/d/vpcs.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired vpcs. * `filter` - (Optional) Custom filter block as described below. @@ -119,4 +120,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/vpn_gateway.html.markdown b/website/docs/cdktf/python/d/vpn_gateway.html.markdown index a42f2765e4e6..f730c407f212 100644 --- a/website/docs/cdktf/python/d/vpn_gateway.html.markdown +++ b/website/docs/cdktf/python/d/vpn_gateway.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the specific VPN Gateway to retrieve. * `state` - (Optional) State of the specific VPN Gateway to retrieve. * `availability_zone` - (Optional) Availability Zone of the specific VPN Gateway to retrieve. @@ -74,4 +75,4 @@ This data source exports no additional attributes. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafregional_ipset.html.markdown b/website/docs/cdktf/python/d/wafregional_ipset.html.markdown index 95ccc6cdc22c..7eaab7a5ff7c 100644 --- a/website/docs/cdktf/python/d/wafregional_ipset.html.markdown +++ b/website/docs/cdktf/python/d/wafregional_ipset.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional IP set. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional IP set. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafregional_rate_based_rule.html.markdown b/website/docs/cdktf/python/d/wafregional_rate_based_rule.html.markdown index 0f88bb952912..0568817b6700 100644 --- a/website/docs/cdktf/python/d/wafregional_rate_based_rule.html.markdown +++ b/website/docs/cdktf/python/d/wafregional_rate_based_rule.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional rate based rule. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional rate based rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafregional_rule.html.markdown b/website/docs/cdktf/python/d/wafregional_rule.html.markdown index b86bc6990faf..96d6ca98ef87 100644 --- a/website/docs/cdktf/python/d/wafregional_rule.html.markdown +++ b/website/docs/cdktf/python/d/wafregional_rule.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional rule. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafregional_subscribed_rule_group.html.markdown b/website/docs/cdktf/python/d/wafregional_subscribed_rule_group.html.markdown index 6bf00fe37268..ac4f744f120e 100644 --- a/website/docs/cdktf/python/d/wafregional_subscribed_rule_group.html.markdown +++ b/website/docs/cdktf/python/d/wafregional_subscribed_rule_group.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name of the WAF rule group. * `metric_name` - (Optional) Name of the WAF rule group. @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF rule group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafregional_web_acl.html.markdown b/website/docs/cdktf/python/d/wafregional_web_acl.html.markdown index 55ec368847f5..3a744c5a287b 100644 --- a/website/docs/cdktf/python/d/wafregional_web_acl.html.markdown +++ b/website/docs/cdktf/python/d/wafregional_web_acl.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional Web ACL. ## Attribute Reference @@ -43,4 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional Web ACL. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafv2_ip_set.html.markdown b/website/docs/cdktf/python/d/wafv2_ip_set.html.markdown index 28157bb5262d..53dfede9d7ae 100644 --- a/website/docs/cdktf/python/d/wafv2_ip_set.html.markdown +++ b/website/docs/cdktf/python/d/wafv2_ip_set.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAFv2 IP Set. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Unique identifier for the set. * `ip_address_version` - IP address version of the set. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafv2_regex_pattern_set.html.markdown b/website/docs/cdktf/python/d/wafv2_regex_pattern_set.html.markdown index 9e4e2849e01d..503270ed08ae 100644 --- a/website/docs/cdktf/python/d/wafv2_regex_pattern_set.html.markdown +++ b/website/docs/cdktf/python/d/wafv2_regex_pattern_set.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAFv2 Regex Pattern Set. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. @@ -54,4 +55,4 @@ Each `regular_expression` supports the following argument: * `regex_string` - (Required) String representing the regular expression, see the AWS WAF [documentation](https://docs.aws.amazon.com/waf/latest/developerguide/waf-regex-pattern-set-creating.html) for more information. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafv2_rule_group.html.markdown b/website/docs/cdktf/python/d/wafv2_rule_group.html.markdown index e745cf683305..87fd2cf82723 100644 --- a/website/docs/cdktf/python/d/wafv2_rule_group.html.markdown +++ b/website/docs/cdktf/python/d/wafv2_rule_group.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAFv2 Rule Group. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the rule group that helps with identification. * `id` - Unique identifier of the rule group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/wafv2_web_acl.html.markdown b/website/docs/cdktf/python/d/wafv2_web_acl.html.markdown index 7b3a17b56639..fc054c899e3c 100644 --- a/website/docs/cdktf/python/d/wafv2_web_acl.html.markdown +++ b/website/docs/cdktf/python/d/wafv2_web_acl.html.markdown @@ -14,6 +14,8 @@ Retrieves the summary of a WAFv2 Web ACL. ## Example Usage +### Lookup by name + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -32,11 +34,37 @@ class MyConvertedCode(TerraformStack): ) ``` +### Lookup by associated resource + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_wafv2_web_acl import DataAwsWafv2WebAcl +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsWafv2WebAcl(self, "alb_example", + resource_arn="arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/app/my-alb/xxxxx", + scope="REGIONAL" + ) + DataAwsWafv2WebAcl(self, "cloudfront_example", + resource_arn="arn:aws:cloudfront::123456789012:distribution/XXX", + scope="CLOUDFRONT" + ) +``` + ## Argument Reference This data source supports the following arguments: -* `name` - (Required) Name of the WAFv2 Web ACL. +* `name` - (Optional) Name of the WAFv2 Web ACL. Exactly one of `name` or `resource_arn` must be specified. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `resource_arn` - (Optional) ARN of the AWS resource associated with the Web ACL. This can be an ARN of an Application Load Balancer, Amazon API Gateway REST API, AWS AppSync GraphQL API, Amazon Cognito user pool, AWS App Runner service, AWS Verified Access instance, or AWS Amplify application. Exactly one of `name` or `resource_arn` must be specified. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. ## Attribute Reference @@ -47,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the WebACL that helps with identification. * `id` - Unique identifier of the WebACL. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/workspaces_bundle.html.markdown b/website/docs/cdktf/python/d/workspaces_bundle.html.markdown index cb7ff5eea7b7..e3f5b4635c21 100644 --- a/website/docs/cdktf/python/d/workspaces_bundle.html.markdown +++ b/website/docs/cdktf/python/d/workspaces_bundle.html.markdown @@ -57,21 +57,22 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `bundle_id` – (Optional) ID of the bundle. -* `owner` – (Optional) Owner of the bundles. You have to leave it blank for own bundles. You cannot combine this parameter with `bundle_id`. -* `name` – (Optional) Name of the bundle. You cannot combine this parameter with `bundle_id`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `bundle_id` - (Optional) ID of the bundle. +* `owner` - (Optional) Owner of the bundles. You have to leave it blank for own bundles. You cannot combine this parameter with `bundle_id`. +* `name` - (Optional) Name of the bundle. You cannot combine this parameter with `bundle_id`. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `description` – The description of the bundle. -* `bundle_id` – The ID of the bundle. -* `name` – The name of the bundle. -* `owner` – The owner of the bundle. -* `compute_type` – The compute type. See supported fields below. -* `root_storage` – The root volume. See supported fields below. -* `user_storage` – The user storage. See supported fields below. +* `description` - The description of the bundle. +* `bundle_id` - The ID of the bundle. +* `name` - The name of the bundle. +* `owner` - The owner of the bundle. +* `compute_type` - The compute type. See supported fields below. +* `root_storage` - The root volume. See supported fields below. +* `user_storage` - The user storage. See supported fields below. ### `compute_type` @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a * `capacity` - Size of the user storage. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/workspaces_directory.html.markdown b/website/docs/cdktf/python/d/workspaces_directory.html.markdown index 5448d2575dc1..e180979469f6 100644 --- a/website/docs/cdktf/python/d/workspaces_directory.html.markdown +++ b/website/docs/cdktf/python/d/workspaces_directory.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_id` - (Required) Directory identifier for registration in WorkSpaces service. ## Attribute Reference @@ -43,8 +44,8 @@ This data source exports the following attributes in addition to the arguments a * `id` - WorkSpaces directory identifier. * `active_directory_config` - Configuration for Active Directory integration when `workspace_type` is set to `POOLS`. - * `domain_name` – Fully qualified domain name of the AWS Directory Service directory. - * `service_account_secret_arn` – ARN of the Secrets Manager secret that contains the credentials for the service account. + * `domain_name` - Fully qualified domain name of the AWS Directory Service directory. + * `service_account_secret_arn` - ARN of the Secrets Manager secret that contains the credentials for the service account. * `alias` - Directory alias. * `customer_user_name` - User name for the service account. * `directory_name` - Name of the directory. @@ -53,33 +54,33 @@ This data source exports the following attributes in addition to the arguments a * `iam_role_id` - Identifier of the IAM role. This is the role that allows Amazon WorkSpaces to make calls to other services, such as Amazon EC2, on your behalf. * `ip_group_ids` - Identifiers of the IP access control groups associated with the directory. * `registration_code` - Registration code for the directory. This is the code that users enter in their Amazon WorkSpaces client application to connect to the directory. -* `self_service_permissions` – The permissions to enable or disable self-service capabilities. - * `change_compute_type` – Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. - * `increase_volume_size` – Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. - * `rebuild_workspace` – Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. - * `restart_workspace` – Whether WorkSpaces directory users can restart their workspace. - * `switch_running_mode` – Whether WorkSpaces directory users can switch the running mode of their workspace. +* `self_service_permissions` - The permissions to enable or disable self-service capabilities. + * `change_compute_type` - Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. + * `increase_volume_size` - Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. + * `rebuild_workspace` - Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. + * `restart_workspace` - Whether WorkSpaces directory users can restart their workspace. + * `switch_running_mode` - Whether WorkSpaces directory users can switch the running mode of their workspace. * `subnet_ids` - Identifiers of the subnets where the directory resides. -* `tags` – A map of tags assigned to the WorkSpaces directory. +* `tags` - A map of tags assigned to the WorkSpaces directory. * `user_identity_type` - The user identity type for the WorkSpaces directory. -* `workspace_access_properties` – Specifies which devices and operating systems users can use to access their WorkSpaces. - * `device_type_android` – (Optional) Indicates whether users can use Android devices to access their WorkSpaces. - * `device_type_chromeos` – (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. - * `device_type_ios` – (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. - * `device_type_linux` – (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. - * `device_type_osx` – (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. - * `device_type_web` – (Optional) Indicates whether users can access their WorkSpaces through a web browser. - * `device_type_windows` – (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. - * `device_type_zeroclient` – (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. -* `workspace_creation_properties` – The default properties that are used for creating WorkSpaces. - * `custom_security_group_id` – The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. - * `default_ou` – The default organizational unit (OU) for your WorkSpace directories. - * `enable_internet_access` – Indicates whether internet access is enabled for your WorkSpaces. - * `enable_maintenance_mode` – Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see [WorkSpace Maintenance](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). - * `user_enabled_as_local_administrator` – Indicates whether users are local administrators of their WorkSpaces. +* `workspace_access_properties` - Specifies which devices and operating systems users can use to access their WorkSpaces. + * `device_type_android` - (Optional) Indicates whether users can use Android devices to access their WorkSpaces. + * `device_type_chromeos` - (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. + * `device_type_ios` - (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. + * `device_type_linux` - (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. + * `device_type_osx` - (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. + * `device_type_web` - (Optional) Indicates whether users can access their WorkSpaces through a web browser. + * `device_type_windows` - (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. + * `device_type_zeroclient` - (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. +* `workspace_creation_properties` - The default properties that are used for creating WorkSpaces. + * `custom_security_group_id` - The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. + * `default_ou` - The default organizational unit (OU) for your WorkSpace directories. + * `enable_internet_access` - Indicates whether internet access is enabled for your WorkSpaces. + * `enable_maintenance_mode` - Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see [WorkSpace Maintenance](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). + * `user_enabled_as_local_administrator` - Indicates whether users are local administrators of their WorkSpaces. * `workspace_directory_description` - The description of the WorkSpaces directory when `workspace_type` is set to `POOLS`. * `workspace_directory_name` - The name of the WorkSpaces directory when `workspace_type` is set to `POOLS`. * `workspace_security_group_id` - The identifier of the security group that is assigned to new WorkSpaces. * `workspace_type` - The type of WorkSpaces directory. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/workspaces_image.html.markdown b/website/docs/cdktf/python/d/workspaces_image.html.markdown index 1dd64d61c8aa..7eddb5f3e5c9 100644 --- a/website/docs/cdktf/python/d/workspaces_image.html.markdown +++ b/website/docs/cdktf/python/d/workspaces_image.html.markdown @@ -35,16 +35,17 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: -* `image_id` – (Required) ID of the image. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `image_id` - (Required) ID of the image. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `name` – The name of the image. -* `description` – The description of the image. -* `os` – The operating system that the image is running. -* `required_tenancy` – Specifies whether the image is running on dedicated hardware. When Bring Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more information, see [Bring Your Own Windows Desktop Images](https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). -* `state` – The status of the image. +* `name` - The name of the image. +* `description` - The description of the image. +* `os` - The operating system that the image is running. +* `required_tenancy` - Specifies whether the image is running on dedicated hardware. When Bring Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more information, see [Bring Your Own Windows Desktop Images](https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). +* `state` - The status of the image. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/workspaces_workspace.html.markdown b/website/docs/cdktf/python/d/workspaces_workspace.html.markdown index b024c6212a1a..f8d1e567bc88 100644 --- a/website/docs/cdktf/python/d/workspaces_workspace.html.markdown +++ b/website/docs/cdktf/python/d/workspaces_workspace.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_workspaces_workspace +# Data Source: aws_workspaces_workspace Use this data source to get information about a workspace in [AWS Workspaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces.html) Service. @@ -57,23 +57,24 @@ class MyConvertedCode(TerraformStack): This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bundle_id` - (Optional) ID of the bundle for the WorkSpace. * `directory_id` - (Optional) ID of the directory for the WorkSpace. You have to specify `user_name` along with `directory_id`. You cannot combine this parameter with `workspace_id`. * `root_volume_encryption_enabled` - (Optional) Indicates whether the data stored on the root volume is encrypted. * `tags` - (Optional) Tags for the WorkSpace. -* `user_name` – (Optional) User name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. You cannot combine this parameter with `workspace_id`. -* `user_volume_encryption_enabled` – (Optional) Indicates whether the data stored on the user volume is encrypted. -* `volume_encryption_key` – (Optional) Symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. +* `user_name` - (Optional) User name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. You cannot combine this parameter with `workspace_id`. +* `user_volume_encryption_enabled` - (Optional) Indicates whether the data stored on the user volume is encrypted. +* `volume_encryption_key` - (Optional) Symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. * `workspace_id` - (Optional) ID of the WorkSpace. You cannot combine this parameter with `directory_id`. -* `workspace_properties` – (Optional) WorkSpace properties. +* `workspace_properties` - (Optional) WorkSpace properties. `workspace_properties` supports the following: -* `compute_type_name` – (Optional) Compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO` and `GRAPHICSPRO`. -* `root_volume_size_gib` – (Optional) Size of the root volume. -* `running_mode` – (Optional) Running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. -* `running_mode_auto_stop_timeout_in_minutes` – (Optional) Time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. -* `user_volume_size_gib` – (Optional) Size of the user storage. +* `compute_type_name` - (Optional) Compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO` and `GRAPHICSPRO`. +* `root_volume_size_gib` - (Optional) Size of the root volume. +* `running_mode` - (Optional) Running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. +* `running_mode_auto_stop_timeout_in_minutes` - (Optional) Time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. +* `user_volume_size_gib` - (Optional) Size of the user storage. ## Attribute Reference @@ -84,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a * `computer_name` - Name of the WorkSpace, as seen by the operating system. * `state` - Operational state of the WorkSpace. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown b/website/docs/cdktf/python/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown index aa72df322c11..52e232ba3760 100644 --- a/website/docs/cdktf/python/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown @@ -7,13 +7,12 @@ description: |- --- - # Ephemeral: aws_cognito_identity_openid_token_for_developer_identity Terraform ephemeral resource for managing an AWS Cognito Identity Open ID Token for Developer Identity. -~> Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -38,12 +37,14 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_pool_id` - (Required) An identity pool ID in the format REGION:GUID. The following arguments are optional: +* `region` – (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_id` - (Optional) A unique identifier in the format REGION:GUID. * `logins` - (Optional) A set of optional name-value pairs that map provider names to provider tokens. Each name-value pair represents a user from a public provider or developer provider. If the user is from a developer provider, the name-value pair will follow the syntax `"developer_provider_name": "developer_user_identifier"`. The developer provider is the "domain" by which Cognito will refer to your users; you provided this domain while creating/updating the identity pool. The developer user identifier is an identifier from your backend that uniquely identifies a user. When you create an identity pool, you can specify the supported logins. @@ -58,4 +59,4 @@ This resource exports the following attributes in addition to the arguments abov * `token` - An OpenID token. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/eks_cluster_auth.html.markdown b/website/docs/cdktf/python/ephemeral-resources/eks_cluster_auth.html.markdown index 6fcab61ba842..9fc5c7745563 100644 --- a/website/docs/cdktf/python/ephemeral-resources/eks_cluster_auth.html.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/eks_cluster_auth.html.markdown @@ -12,7 +12,7 @@ description: |- Retrieve an authentication token to communicate with an EKS cluster. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the EKS cluster. ## Attribute Reference @@ -62,4 +63,4 @@ This resource exports the following attributes in addition to the arguments abov * `token` - Token to use to authenticate with the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/kms_secrets.html.markdown b/website/docs/cdktf/python/ephemeral-resources/kms_secrets.html.markdown index 10762623698c..9a0d63e1cc8d 100644 --- a/website/docs/cdktf/python/ephemeral-resources/kms_secrets.html.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/kms_secrets.html.markdown @@ -12,7 +12,7 @@ description: |- Decrypt multiple secrets from data encrypted with the AWS KMS service. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret` - (Required) One or more encrypted payload definitions from the KMS service. See the Secret Definitions below. ### Secret Definitions @@ -72,4 +73,4 @@ This resource exports the following attributes in addition to the arguments abov * `plaintext` - Map containing each `secret` `name` as the key with its decrypted plaintext value - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/lambda_invocation.html.markdown b/website/docs/cdktf/python/ephemeral-resources/lambda_invocation.html.markdown index 85ea3d8b3ba6..24629fcff433 100644 --- a/website/docs/cdktf/python/ephemeral-resources/lambda_invocation.html.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/lambda_invocation.html.markdown @@ -3,34 +3,187 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_invocation" description: |- - Invoke AWS Lambda Function + Invokes an AWS Lambda Function as an ephemeral resource. --- # Ephemeral: aws_lambda_invocation -Use this ephemeral resource to invoke a Lambda function. The lambda function is invoked with the [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. +Invokes an AWS Lambda Function as an ephemeral resource. Use this ephemeral resource to execute Lambda functions during Terraform operations without persisting results in state, ideal for generating sensitive data or performing lightweight operations. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +The Lambda function is invoked with [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. -~> **NOTE:** The `aws_lambda_invocation` ephemeral resource invokes the function during every `plan` and `apply` when the function is known. A common use case for this functionality is when invoking a lightweight function—where repeated invocations are acceptable—that produces sensitive information you do not want to store in the state. +~> **Note:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) +~> **Note:** The `aws_lambda_invocation` ephemeral resource invokes the function during every `plan` and `apply` when the function is known. A common use case for this functionality is when invoking a lightweight function—where repeated invocations are acceptable—that produces sensitive information you do not want to store in the state. + +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking a Lambda function with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) ## Example Usage -### Basic Example +### Generate Sensitive Configuration ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformOutput, Fn, TerraformStack +from cdktf import VariableType, TerraformVariable, TerraformOutput, Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.ssm_parameter import SsmParameter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + # You can read more about this at https://cdk.tf/variables + environment = TerraformVariable(self, "environment", + description="The environment name (e.g., dev, prod)", + type=VariableType.STRING + ) + TerraformOutput(self, "key_generated", + value="API key generated and stored in Parameter Store" + ) + SsmParameter(self, "api_key", + name="/app/${" + environment.value + "}/api-key", + tags={ + "Environment": environment.string_value, + "Generated": "ephemeral-lambda" + }, + type="SecureString", + value=Token.as_string( + Fn.lookup_nested( + Fn.jsondecode(aws_lambda_invocation.secret_generator.result), ["api_key"])) + ) +``` + +### Dynamic Resource Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.autoscaling_group import AutoscalingGroup +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + sizing = Fn.jsondecode(aws_lambda_invocation.resource_calculator.result) + AutoscalingGroup(self, "example", + desired_capacity=Token.as_number( + Fn.lookup_nested(sizing, ["desired_instances"])), + health_check_type="ELB", + launch_template=AutoscalingGroupLaunchTemplate( + id=Token.as_string(aws_launch_template_example.id), + version="$Latest" + ), + max_size=Token.as_number(Fn.lookup_nested(sizing, ["max_instances"])), + min_size=Token.as_number(Fn.lookup_nested(sizing, ["min_instances"])), + name="optimized-asg", + tag=[AutoscalingGroupTag( + key="OptimizedBy", + propagate_at_launch=True, + value="ephemeral-lambda" + ) + ], + target_group_arns=[Token.as_string(aws_lb_target_group_example.arn)], + vpc_zone_identifier=subnet_ids.list_value + ) +``` + +### Validation and Compliance Checks + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import FileProvisioner +from constructs import Construct +from cdktf import VariableType, TerraformVariable, conditional, Token, TerraformCount, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.null.resource import Resource +from imports.aws.instance import Instance +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + # You can read more about this at https://cdk.tf/variables + instance_type = TerraformVariable(self, "instance_type", + description="The EC2 instance type to use", + type=VariableType.STRING + ) + is_compliant = compliant + violations = validation_result_violations + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + example_count = TerraformCount.of( + Token.as_number(conditional(is_compliant, 1, 0))) + Instance(self, "example", + ami=Token.as_string(data_aws_ami_example.id), + instance_type=instance_type.string_value, + root_block_device=InstanceRootBlockDevice( + encrypted=encrypt_storage.boolean_value + ), + tags={ + "ComplianceCheck": "passed", + "Environment": environment.string_value + }, + count=example_count + ) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + compliance_gate_count = TerraformCount.of( + Token.as_number(conditional(is_compliant, 0, 1))) + Resource(self, "compliance_gate", + count=compliance_gate_count, + provisioners=[FileProvisioner( + type="local-exec", + command="echo 'Compliance violations: " + + Token.as_string(Fn.join(", ", Token.as_list(violations))) + "' && exit 1" + ) + ] + ) +``` + +### External API Integration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.ecs_service import EcsService class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - TerraformOutput(self, "result_entry", - value=Fn.lookup_nested(Fn.jsondecode(example.result), ["\"key1\""]) + external_config = Fn.jsondecode(aws_lambda_invocation.external_config.result) + EcsService(self, "example", + cluster=Token.as_string(aws_ecs_cluster_example.id), + deployment_configuration=EcsServiceDeploymentConfiguration( + maximum_percent=Fn.lookup_nested(external_config, ["max_percent"]), + minimum_healthy_percent=Fn.lookup_nested(external_config, ["min_healthy_percent" + ]) + ), + desired_count=Token.as_number( + Fn.lookup_nested(external_config, ["replica_count"])), + name="web-app", + tags={ + "ConfigSource": "external-api", + "Environment": environment.string_value + }, + task_definition=Token.as_string(aws_ecs_task_definition_example.arn) ) ``` @@ -44,17 +197,91 @@ The following arguments are required: The following arguments are optional: * `client_context` - (Optional) Up to 3583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. -* `log_type` - (Optional) Set to `Tail` to include the execution log in the response. Valid values are `None` and `Tail`. +* `log_type` - (Optional) Set to `Tail` to include the execution log in the response. Valid values: `None` and `Tail`. * `qualifier` - (Optional) Version or alias to invoke a published version of the function. Defaults to `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: +This ephemeral resource exports the following attributes in addition to the arguments above: -* `executed_version` - Version of the function that executed. When you invoke a function with an alias, the version the alias resolved to. +* `executed_version` - Version of the function that executed. When you invoke a function with an alias, this shows the version the alias resolved to. * `function_error` - If present, indicates that an error occurred during function execution. Details about the error are included in `result`. * `log_result` - Last 4 KB of the execution log, which is base64-encoded. -* `result` - String result of the lambda function invocation. +* `result` - String result of the Lambda function invocation. * `status_code` - HTTP status code is in the 200 range for a successful request. - \ No newline at end of file +## Usage Notes + +### Handling Sensitive Data + +Since ephemeral resources are designed to not persist data in state, they are ideal for handling sensitive information: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.secretsmanager_secret_version import SecretsmanagerSecretVersion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SecretsmanagerSecretVersion(self, "example", + secret_id=Token.as_string(aws_secretsmanager_secret_example.id), + secret_string=aws_lambda_invocation.credentials.result + ) +``` + +### Error Handling + +Always check for function errors in your configuration: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Op, Fn, Token, conditional, TerraformCount, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.null.resource import Resource +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + has_error = Op.neq(aws_lambda_invocation.example.function_error, "null") + invocation_result = Fn.jsondecode(aws_lambda_invocation.example.result) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + validation_count = TerraformCount.of( + Token.as_number( + conditional(has_error, + fail("Lambda function error: " + + Token.as_string( + Fn.lookup_nested(invocation_result, ["errorMessage"]))), 0))) + Resource(self, "validation", + count=validation_count + ) +``` + +### Logging + +Enable detailed logging for debugging: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformOutput, Fn, TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + TerraformOutput(self, "execution_logs", + value=Fn.base64decode(aws_lambda_invocation.example.log_result) + ) +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/secretsmanager_random_password.html.markdown b/website/docs/cdktf/python/ephemeral-resources/secretsmanager_random_password.html.markdown index 18d394bacbc1..83fc2af4a6a3 100644 --- a/website/docs/cdktf/python/ephemeral-resources/secretsmanager_random_password.html.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/secretsmanager_random_password.html.markdown @@ -29,6 +29,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclude_characters` - (Optional) String of the characters that you don't want in the password. * `exclude_lowercase` - (Optional) Specifies whether to exclude lowercase letters from the password. * `exclude_numbers` - (Optional) Specifies whether to exclude numbers from the password. @@ -44,4 +45,4 @@ This resource exports the following attributes in addition to the arguments abov * `random_password` - Random password. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/secretsmanager_secret_version.html.markdown b/website/docs/cdktf/python/ephemeral-resources/secretsmanager_secret_version.html.markdown index c86604ad451a..3f9a09a1eb65 100644 --- a/website/docs/cdktf/python/ephemeral-resources/secretsmanager_secret_version.html.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/secretsmanager_secret_version.html.markdown @@ -12,7 +12,7 @@ description: |- Retrieve information about a Secrets Manager secret version, including its secret value. To retrieve secret metadata, see the [`aws_secretsmanager_secret` data source](/docs/providers/aws/d/secretsmanager_secret.html). -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret_id` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. * `version_id` - (Optional) Specifies the unique identifier of the version of the secret that you want to retrieve. Overrides `version_stage`. * `version_stage` - (Optional) Specifies the secret version that you want to retrieve by the staging label attached to the version. Defaults to `AWSCURRENT`. @@ -75,4 +76,4 @@ This resource exports the following attributes in addition to the arguments abov * `secret_binary` - Decrypted part of the protected secret information that was originally provided as a binary. * `version_id` - Unique identifier of this version of the secret. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/ephemeral-resources/ssm_parameter.html.markdown b/website/docs/cdktf/python/ephemeral-resources/ssm_parameter.html.markdown index ddd72a2b032b..3985bf51d981 100644 --- a/website/docs/cdktf/python/ephemeral-resources/ssm_parameter.html.markdown +++ b/website/docs/cdktf/python/ephemeral-resources/ssm_parameter.html.markdown @@ -12,7 +12,7 @@ description: |- Retrieve information about an SSM parameter, including its value. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the parameter that you want to query * `with_decryption` - (Optional) Return decrypted values for a secure string parameter (Defaults to `true`). @@ -46,4 +47,4 @@ This resource exports the following attributes in addition to the arguments abov * `version` - The parameter version. * `with_decryption` - Indicates whether the secure string parameters were decrypted. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown b/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown index 305bfbc9f633..5014a5238bdc 100644 --- a/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown +++ b/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown @@ -126,6 +126,7 @@ class MyConvertedCode(TerraformStack): |App Runner|`apprunner`|`AWS_ENDPOINT_URL_APPRUNNER`|`apprunner`| |AppStream 2.0|`appstream`|`AWS_ENDPOINT_URL_APPSTREAM`|`appstream`| |AppSync|`appsync`|`AWS_ENDPOINT_URL_APPSYNC`|`appsync`| +|Application Resilience Controller Region Switch|`arcregionswitch`|`AWS_ENDPOINT_URL_ARC_REGION_SWITCH`|`arc_region_switch`| |Athena|`athena`|`AWS_ENDPOINT_URL_ATHENA`|`athena`| |Audit Manager|`auditmanager`|`AWS_ENDPOINT_URL_AUDITMANAGER`|`auditmanager`| |Auto Scaling|`autoscaling`|`AWS_ENDPOINT_URL_AUTO_SCALING`|`auto_scaling`| @@ -135,6 +136,7 @@ class MyConvertedCode(TerraformStack): |BCM Data Exports|`bcmdataexports`|`AWS_ENDPOINT_URL_BCM_DATA_EXPORTS`|`bcm_data_exports`| |Bedrock|`bedrock`|`AWS_ENDPOINT_URL_BEDROCK`|`bedrock`| |Bedrock Agents|`bedrockagent`|`AWS_ENDPOINT_URL_BEDROCK_AGENT`|`bedrock_agent`| +|Bedrock AgentCore|`bedrockagentcore`|`AWS_ENDPOINT_URL_BEDROCK_AGENTCORE_CONTROL`|`bedrock_agentcore_control`| |Billing|`billing`|`AWS_ENDPOINT_URL_BILLING`|`billing`| |Web Services Budgets|`budgets`|`AWS_ENDPOINT_URL_BUDGETS`|`budgets`| |CE (Cost Explorer)|`ce`(or `costexplorer`)|`AWS_ENDPOINT_URL_COST_EXPLORER`|`cost_explorer`| @@ -232,8 +234,6 @@ class MyConvertedCode(TerraformStack): |CloudWatch Internet Monitor|`internetmonitor`|`AWS_ENDPOINT_URL_INTERNETMONITOR`|`internetmonitor`| |Invoicing|`invoicing`|`AWS_ENDPOINT_URL_INVOICING`|`invoicing`| |IoT Core|`iot`|`AWS_ENDPOINT_URL_IOT`|`iot`| -|IoT Analytics|`iotanalytics`|`AWS_ENDPOINT_URL_IOTANALYTICS`|`iotanalytics`| -|IoT Events|`iotevents`|`AWS_ENDPOINT_URL_IOT_EVENTS`|`iot_events`| |IVS (Interactive Video)|`ivs`|`AWS_ENDPOINT_URL_IVS`|`ivs`| |IVS (Interactive Video) Chat|`ivschat`|`AWS_ENDPOINT_URL_IVSCHAT`|`ivschat`| |Managed Streaming for Kafka|`kafka`(or `msk`)|`AWS_ENDPOINT_URL_KAFKA`|`kafka`| @@ -276,9 +276,9 @@ class MyConvertedCode(TerraformStack): |User Notifications|`notifications`|`AWS_ENDPOINT_URL_NOTIFICATIONS`|`notifications`| |User Notifications Contacts|`notificationscontacts`|`AWS_ENDPOINT_URL_NOTIFICATIONSCONTACTS`|`notificationscontacts`| |CloudWatch Observability Access Manager|`oam`(or `cloudwatchobservabilityaccessmanager`)|`AWS_ENDPOINT_URL_OAM`|`oam`| +|Oracle Database@AWS|`odb`|`AWS_ENDPOINT_URL_ODB`|`odb`| |OpenSearch|`opensearch`(or `opensearchservice`)|`AWS_ENDPOINT_URL_OPENSEARCH`|`opensearch`| |OpenSearch Serverless|`opensearchserverless`|`AWS_ENDPOINT_URL_OPENSEARCHSERVERLESS`|`opensearchserverless`| -|OpsWorks|`opsworks`|`AWS_ENDPOINT_URL_OPSWORKS`|`opsworks`| |Organizations|`organizations`|`AWS_ENDPOINT_URL_ORGANIZATIONS`|`organizations`| |OpenSearch Ingestion|`osis`(or `opensearchingestion`)|`AWS_ENDPOINT_URL_OSIS`|`osis`| |Outposts|`outposts`|`AWS_ENDPOINT_URL_OUTPOSTS`|`outposts`| @@ -316,6 +316,7 @@ class MyConvertedCode(TerraformStack): |S3 Control|`s3control`|`AWS_ENDPOINT_URL_S3_CONTROL`|`s3_control`| |S3 on Outposts|`s3outposts`|`AWS_ENDPOINT_URL_S3OUTPOSTS`|`s3outposts`| |S3 Tables|`s3tables`|`AWS_ENDPOINT_URL_S3TABLES`|`s3tables`| +|S3 Vectors|`s3vectors`|`AWS_ENDPOINT_URL_S3VECTORS`|`s3vectors`| |SageMaker AI|`sagemaker`|`AWS_ENDPOINT_URL_SAGEMAKER`|`sagemaker`| |EventBridge Scheduler|`scheduler`|`AWS_ENDPOINT_URL_SCHEDULER`|`scheduler`| |EventBridge Schemas|`schemas`|`AWS_ENDPOINT_URL_SCHEMAS`|`schemas`| @@ -332,7 +333,6 @@ class MyConvertedCode(TerraformStack): |SFN (Step Functions)|`sfn`(or `stepfunctions`)|`AWS_ENDPOINT_URL_SFN`|`sfn`| |Shield|`shield`|`AWS_ENDPOINT_URL_SHIELD`|`shield`| |Signer|`signer`|`AWS_ENDPOINT_URL_SIGNER`|`signer`| -|SDB (SimpleDB)|`simpledb`(or `sdb`)|`AWS_ENDPOINT_URL_SIMPLEDB`|`simpledb`| |SNS (Simple Notification)|`sns`|`AWS_ENDPOINT_URL_SNS`|`sns`| |SQS (Simple Queue)|`sqs`|`AWS_ENDPOINT_URL_SQS`|`sqs`| |SSM (Systems Manager)|`ssm`|`AWS_ENDPOINT_URL_SSM`|`ssm`| @@ -358,7 +358,7 @@ class MyConvertedCode(TerraformStack): |WAF Classic Regional|`wafregional`|`AWS_ENDPOINT_URL_WAF_REGIONAL`|`waf_regional`| |WAF|`wafv2`|`AWS_ENDPOINT_URL_WAFV2`|`wafv2`| |Well-Architected Tool|`wellarchitected`|`AWS_ENDPOINT_URL_WELLARCHITECTED`|`wellarchitected`| -|WorkLink|`worklink`|`AWS_ENDPOINT_URL_WORKLINK`|`worklink`| +|WorkMail|`workmail`|`AWS_ENDPOINT_URL_WORKMAIL`|`workmail`| |WorkSpaces|`workspaces`|`AWS_ENDPOINT_URL_WORKSPACES`|`workspaces`| |WorkSpaces Web|`workspacesweb`|`AWS_ENDPOINT_URL_WORKSPACES_WEB`|`workspaces_web`| |X-Ray|`xray`|`AWS_ENDPOINT_URL_XRAY`|`xray`| @@ -461,4 +461,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/guides/enhanced-region-support.html.markdown b/website/docs/cdktf/python/guides/enhanced-region-support.html.markdown new file mode 100644 index 000000000000..208c3b70e25a --- /dev/null +++ b/website/docs/cdktf/python/guides/enhanced-region-support.html.markdown @@ -0,0 +1,628 @@ +--- +subcategory: "" +layout: "aws" +page_title: "Terraform AWS Provider Enhanced Region Support" +description: |- + Enhanced Region support with the Terraform AWS Provider. +--- + + + +# Enhanced Region Support + +Version 6.0.0 of the Terraform AWS Provider adds `region` to most resources making it significantly easier to manage infrastructure across AWS Regions without requiring multiple provider configurations. + + + +- [What's new](#whats-new) +- [What's not changing](#whats-not-changing) +- [Can I use `region` in every resource?](#can-i-use-region-in-every-resource) +- [Why make this change](#why-make-this-change) +- [How `region` works](#how-region-works) +- [Migrating from multiple provider configurations](#migrating-from-multiple-provider-configurations) +- [Before and after examples using `region`](#before-and-after-examples-using-region) +- [Non–region-aware resources](#nonregion-aware-resources) + + + +## What's new + +As of v6.0.0, most existing resources, data sources, and ephemeral resources are now [Region-aware](#nonregion-aware-resources), meaning they support a new top-level `region`. This allows you to manage a resource in a Region different from the one specified in the provider configuration without requiring multiple provider blocks. See [How `region` works](#how-region-works) for details. + +For example, if your provider is configured for `us-east-1`, you can now manage a VPC in `us-west-2` without defining an additional provider block: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.vpc import Vpc +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Vpc(self, "peer", + cidr_block="10.1.0.0/16", + region="us-west-2" + ) +``` + +## What's _not_ changing + +_Pre-v6.0.0 configurations that use provider blocks per Region remain valid in v6.0.0 and are not deprecated._ + +You can still define the Region at the provider level using any of the existing methods—for example, through the AWS [config file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html), [provider configuration](https://developer.hashicorp.com/terraform/language/providers/configuration), [environment variables](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables), [shared configuration files](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#shared-configuration-and-credentials-files), or explicitly using the `provider`’s [`region`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#region). + +## Can I use `region` in every resource? + +No. While most resources are now Region-aware, there are exceptions. These include a few resources that already had a `region` and resources that are inherently global. See [Non–region-aware resources](#nonregion-aware-resources). + +## Why make this change + +Before version 6.0.0, managing infrastructure across multiple Regions required a separate provider configuration for each Region. This approach led to complex and repetitive configurations, especially for large infrastructures—AWS currently operates in [36 Regions](https://aws.amazon.com/about-aws/global-infrastructure/), with more announced. Additionally, each provider configuration adds overhead in terms of memory and compute resources. + +See the [examples](#before-and-after-examples-using-region) below for a comparison of configurations before and after introducing `region`. + +## How `region` works + +The new top-level `region` is [_Optional_ and _Computed_](https://developer.hashicorp.com/terraform/plugin/framework/handling-data/attributes/string#configurability), and defaults to the Region specified in the provider configuration. Its value is validated to ensure it belongs to the configured [partition](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/partitions.html). **Changing the value of `region` will force resource replacement.** + +To [import](https://developer.hashicorp.com/terraform/cli/import) a resource in a specific Region, append `@` to the [import ID](https://developer.hashicorp.com/terraform/language/import#import-id)—for example: + +```sh +terraform import aws_vpc.test_vpc vpc-a01106c2@eu-west-1 +``` + +## Migrating from multiple provider configurations + +To migrate from a separate provider configuration for each Region to a single provider configuration block and per-resource `region` values you must ensure that Terraform state is refreshed before editing resource configuration: + +1. Upgrade to v6.0.0 +2. Run a Terraform apply in [refresh-only mode](https://developer.hashicorp.com/terraform/cli/commands/plan#planning-modes) -- `terraform apply -refresh-only` +3. Modify the affected resource configurations, replacing the [`provider` meta-argument](https://developer.hashicorp.com/terraform/language/meta-arguments/resource-provider) with a `region` argument + +## Before and after examples using `region` + +### Cross-region VPC peering + +
+Before, Pre-v6.0.0 +

+ +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.provider import AwsProvider +from imports.aws.vpc import Vpc +from imports.aws.vpc_peering_connection import VpcPeeringConnection +from imports.aws.vpc_peering_connection_accepter import VpcPeeringConnectionAccepterA +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="us-east-1" + ) + peer = AwsProvider(self, "aws_1", + alias="peer", + region="us-west-2" + ) + main = Vpc(self, "main", + cidr_block="10.0.0.0/16" + ) + aws_vpc_peer = Vpc(self, "peer", + cidr_block="10.1.0.0/16", + provider=peer + ) + data_aws_caller_identity_peer = DataAwsCallerIdentity(self, "peer_4", + provider=peer + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_caller_identity_peer.override_logical_id("peer") + aws_vpc_peering_connection_peer = VpcPeeringConnection(self, "peer_5", + auto_accept=False, + peer_owner_id=Token.as_string(data_aws_caller_identity_peer.account_id), + peer_region="us-west-2", + peer_vpc_id=Token.as_string(aws_vpc_peer.id), + vpc_id=main.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_peering_connection_peer.override_logical_id("peer") + aws_vpc_peering_connection_accepter_peer = + VpcPeeringConnectionAccepterA(self, "peer_6", + auto_accept=True, + provider=peer, + vpc_peering_connection_id=Token.as_string(aws_vpc_peering_connection_peer.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_peering_connection_accepter_peer.override_logical_id("peer") +``` + +

+
+ +
+After, v6.0.0+ +

+ +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.provider import AwsProvider +from imports.aws.vpc import Vpc +from imports.aws.vpc_peering_connection import VpcPeeringConnection +from imports.aws.vpc_peering_connection_accepter import VpcPeeringConnectionAccepterA +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="us-east-1" + ) + main = Vpc(self, "main", + cidr_block="10.0.0.0/16" + ) + peer = Vpc(self, "peer", + cidr_block="10.1.0.0/16", + region="us-west-2" + ) + aws_vpc_peering_connection_peer = VpcPeeringConnection(self, "peer_3", + auto_accept=False, + peer_region="us-west-2", + peer_vpc_id=peer.id, + vpc_id=main.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_peering_connection_peer.override_logical_id("peer") + aws_vpc_peering_connection_accepter_peer = + VpcPeeringConnectionAccepterA(self, "peer_4", + auto_accept=True, + region="us-west-2", + vpc_peering_connection_id=Token.as_string(aws_vpc_peering_connection_peer.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_peering_connection_accepter_peer.override_logical_id("peer") +``` + +

+
+ +### KMS replica key + +
+Before, Pre-v6.0.0 +

+ +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.kms_key import KmsKey +from imports.aws.kms_replica_key import KmsReplicaKey +from imports.aws.provider import AwsProvider +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + primary = AwsProvider(self, "aws", + alias="primary", + region="us-east-1" + ) + AwsProvider(self, "aws_1", + region="us-west-2" + ) + aws_kms_key_primary = KmsKey(self, "primary", + deletion_window_in_days=30, + description="Multi-Region primary key", + multi_region=True, + provider=primary + ) + KmsReplicaKey(self, "replica", + deletion_window_in_days=7, + description="Multi-Region replica key", + primary_key_arn=Token.as_string(aws_kms_key_primary.arn) + ) +``` + +

+
+ +
+After, v6.0.0 +

+ +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.kms_key import KmsKey +from imports.aws.kms_replica_key import KmsReplicaKey +from imports.aws.provider import AwsProvider +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="us-west-2" + ) + primary = KmsKey(self, "primary", + deletion_window_in_days=30, + description="Multi-Region primary key", + multi_region=True, + region="us-east-1" + ) + KmsReplicaKey(self, "replica", + deletion_window_in_days=7, + description="Multi-Region replica key", + primary_key_arn=primary.arn + ) +``` + +

+
+ +### S3 bucket replication configuration + +
+Before, Pre-v6.0.0 +

+ +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.iam_policy import IamPolicy +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +from imports.aws.provider import AwsProvider +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_acl import S3BucketAcl +from imports.aws.s3_bucket_replication_configuration import S3BucketReplicationConfigurationA +from imports.aws.s3_bucket_versioning import S3BucketVersioningA +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="eu-west-1" + ) + central = AwsProvider(self, "aws_1", + alias="central", + region="eu-central-1" + ) + destination = S3Bucket(self, "destination", + bucket="tf-test-bucket-destination-12345" + ) + source = S3Bucket(self, "source", + bucket="tf-test-bucket-source-12345", + provider=central + ) + S3BucketAcl(self, "source_bucket_acl", + acl="private", + bucket=source.id, + provider=central + ) + aws_s3_bucket_versioning_destination = S3BucketVersioningA(self, "destination_5", + bucket=destination.id, + versioning_configuration=S3BucketVersioningVersioningConfiguration( + status="Enabled" + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_versioning_destination.override_logical_id("destination") + aws_s3_bucket_versioning_source = S3BucketVersioningA(self, "source_6", + bucket=source.id, + provider=central, + versioning_configuration=S3BucketVersioningVersioningConfiguration( + status="Enabled" + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_versioning_source.override_logical_id("source") + assume_role = DataAwsIamPolicyDocument(self, "assume_role", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["s3.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + replication = DataAwsIamPolicyDocument(self, "replication", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:GetReplicationConfiguration", "s3:ListBucket"], + effect="Allow", + resources=[source.arn] + ), DataAwsIamPolicyDocumentStatement( + actions=["s3:GetObjectVersionForReplication", "s3:GetObjectVersionAcl", "s3:GetObjectVersionTagging" + ], + effect="Allow", + resources=["${" + source.arn + "}/*"] + ), DataAwsIamPolicyDocumentStatement( + actions=["s3:ReplicateObject", "s3:ReplicateDelete", "s3:ReplicateTags" + ], + effect="Allow", + resources=["${" + destination.arn + "}/*"] + ) + ] + ) + aws_iam_policy_replication = IamPolicy(self, "replication_9", + name="tf-iam-role-policy-replication-12345", + policy=Token.as_string(replication.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_policy_replication.override_logical_id("replication") + aws_iam_role_replication = IamRole(self, "replication_10", + assume_role_policy=Token.as_string(assume_role.json), + name="tf-iam-role-replication-12345" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_replication.override_logical_id("replication") + aws_iam_role_policy_attachment_replication = IamRolePolicyAttachment(self, "replication_11", + policy_arn=Token.as_string(aws_iam_policy_replication.arn), + role=Token.as_string(aws_iam_role_replication.name) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_attachment_replication.override_logical_id("replication") + aws_s3_bucket_replication_configuration_replication = + S3BucketReplicationConfigurationA(self, "replication_12", + bucket=source.id, + depends_on=[aws_s3_bucket_versioning_source], + provider=central, + role=Token.as_string(aws_iam_role_replication.arn), + rule=[S3BucketReplicationConfigurationRule( + destination=S3BucketReplicationConfigurationRuleDestination( + bucket=destination.arn, + storage_class="STANDARD" + ), + filter=S3BucketReplicationConfigurationRuleFilter( + prefix="example" + ), + id="examplerule", + status="Enabled" + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_replication_configuration_replication.override_logical_id("replication") +``` + +

+
+ +
+After, v6.0.0 +

+ +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.iam_policy import IamPolicy +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +from imports.aws.provider import AwsProvider +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_acl import S3BucketAcl +from imports.aws.s3_bucket_replication_configuration import S3BucketReplicationConfigurationA +from imports.aws.s3_bucket_versioning import S3BucketVersioningA +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="eu-west-1" + ) + destination = S3Bucket(self, "destination", + bucket="tf-test-bucket-destination-12345" + ) + source = S3Bucket(self, "source", + bucket="tf-test-bucket-source-12345", + region="eu-central-1" + ) + S3BucketAcl(self, "source_bucket_acl", + acl="private", + bucket=source.id, + region="eu-central-1" + ) + aws_s3_bucket_versioning_destination = S3BucketVersioningA(self, "destination_4", + bucket=destination.id, + versioning_configuration=S3BucketVersioningVersioningConfiguration( + status="Enabled" + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_versioning_destination.override_logical_id("destination") + aws_s3_bucket_versioning_source = S3BucketVersioningA(self, "source_5", + bucket=source.id, + region="eu-central-1", + versioning_configuration=S3BucketVersioningVersioningConfiguration( + status="Enabled" + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_versioning_source.override_logical_id("source") + assume_role = DataAwsIamPolicyDocument(self, "assume_role", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["s3.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + replication = DataAwsIamPolicyDocument(self, "replication", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:GetReplicationConfiguration", "s3:ListBucket"], + effect="Allow", + resources=[source.arn] + ), DataAwsIamPolicyDocumentStatement( + actions=["s3:GetObjectVersionForReplication", "s3:GetObjectVersionAcl", "s3:GetObjectVersionTagging" + ], + effect="Allow", + resources=["${" + source.arn + "}/*"] + ), DataAwsIamPolicyDocumentStatement( + actions=["s3:ReplicateObject", "s3:ReplicateDelete", "s3:ReplicateTags" + ], + effect="Allow", + resources=["${" + destination.arn + "}/*"] + ) + ] + ) + aws_iam_policy_replication = IamPolicy(self, "replication_8", + name="tf-iam-role-policy-replication-12345", + policy=Token.as_string(replication.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_policy_replication.override_logical_id("replication") + aws_iam_role_replication = IamRole(self, "replication_9", + assume_role_policy=Token.as_string(assume_role.json), + name="tf-iam-role-replication-12345" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_replication.override_logical_id("replication") + aws_iam_role_policy_attachment_replication = IamRolePolicyAttachment(self, "replication_10", + policy_arn=Token.as_string(aws_iam_policy_replication.arn), + role=Token.as_string(aws_iam_role_replication.name) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_attachment_replication.override_logical_id("replication") + aws_s3_bucket_replication_configuration_replication = + S3BucketReplicationConfigurationA(self, "replication_11", + bucket=source.id, + depends_on=[aws_s3_bucket_versioning_source], + region="eu-central-1", + role=Token.as_string(aws_iam_role_replication.arn), + rule=[S3BucketReplicationConfigurationRule( + destination=S3BucketReplicationConfigurationRuleDestination( + bucket=destination.arn, + storage_class="STANDARD" + ), + filter=S3BucketReplicationConfigurationRuleFilter( + prefix="example" + ), + id="examplerule", + status="Enabled" + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_replication_configuration_replication.override_logical_id("replication") +``` + +

+
+ +## Non–region-aware resources + +This section lists resources that are not Region-aware—meaning `region` has not been added to them. + +Some resources, such as [IAM and STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html#IAMEndpoints), are [global](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/global-services.html) and exist in all Regions within a partition. + +Other resources are not Region-aware because they already had a top-level `region`, are inherently global, or because adding `region` would not be appropriate for other reasons. + +### Resources deprecating `region` + +The following regional resources and data sources had a top-level `region` prior to version 6.0.0. It is now deprecated and will be replaced in a future version to support the new Region-aware behavior. + +* `aws_cloudformation_stack_set_instance` resource +* `aws_config_aggregate_authorization` resource +* `aws_dx_hosted_connection` resource +* `aws_region` data source +* `aws_s3_bucket` data source +* `aws_servicequotas_template` resource +* `aws_servicequotas_templates` data source +* `aws_ssmincidents_replication_set` resource and data source +* `aws_vpc_endpoint_service` data source +* `aws_vpc_peering_connection` data source + +### Global services + +All resources for the following services are considered _global_: + +* Account Management (`aws_account_*`) +* Billing (`aws_billing_*`) +* Billing and Cost Management Data Exports (`aws_bcmdataexports_*`) +* Budgets (`aws_budgets_*`) +* CloudFront (`aws_cloudfront_*` and `aws_cloudfrontkeyvaluestore_*`) +* Cost Explorer (`aws_ce_*`) +* Cost Optimization Hub (`aws_costoptimizationhub_*`) +* Cost and Usage Report (`aws_cur_*`) +* Global Accelerator (`aws_globalaccelerator_*`) +* IAM (`aws_iam_*`, `aws_rolesanywhere_*` and `aws_caller_identity`) +* Network Manager (`aws_networkmanager_*`) +* Organizations (`aws_organizations_*`) +* Price List (`aws_pricing_*`) +* Route 53 (`aws_route53_*` and `aws_route53domains_*`) +* Route 53 ARC (`aws_route53recoverycontrolconfig_*` and `aws_route53recoveryreadiness_*`) +* Shield Advanced (`aws_shield_*`) +* User Notifications (`aws_notifications_*`) +* User Notifications Contacts (`aws_notificationscontacts_*`) +* WAF Classic (`aws_waf_*`) + +### Global resources in regional services + +Some regional services have a subset of resources that are global: + +| Service | Type | Name | +|---|---|---| +| Backup | Resource | `aws_backup_global_settings` | +| Chime SDK Voice | Resource | `aws_chimesdkvoice_global_settings` | +| CloudTrail | Resource | `aws_cloudtrail_organization_delegated_admin_account` | +| Direct Connect | Resource | `aws_dx_gateway` | +| Direct Connect | Data Source | `aws_dx_gateway` | +| EC2 | Resource | `aws_ec2_image_block_public_access` | +| Firewall Manager | Resource | `aws_fms_admin_account` | +| IPAM | Resource | `aws_vpc_ipam_organization_admin_account` | +| QuickSight | Resource | `aws_quicksight_account_settings` | +| Resource Access Manager | Resource | `aws_ram_sharing_with_organization` | +| S3 | Data Source | `aws_canonical_user_id` | +| S3 | Resource | `aws_s3_account_public_access_block` | +| S3 | Data Source | `aws_s3_account_public_access_block` | +| Service Catalog | Resource | `aws_servicecatalog_organizations_access` | + +### Meta data sources + +The `aws_default_tags`, `aws_partition`, and `aws_regions` data sources are effectively global. + +`region` of the `aws_arn` data source stays as-is. + +### Policy Document Data Sources + +Some data sources convert HCL into JSON policy documents and are effectively global: + +* `aws_cloudwatch_log_data_protection_policy_document` +* `aws_ecr_lifecycle_policy_document` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/guides/version-6-upgrade.html.markdown b/website/docs/cdktf/python/guides/version-6-upgrade.html.markdown new file mode 100644 index 000000000000..8c6e0886cb40 --- /dev/null +++ b/website/docs/cdktf/python/guides/version-6-upgrade.html.markdown @@ -0,0 +1,747 @@ +--- +subcategory: "" +layout: "aws" +page_title: "Terraform AWS Provider Version 6 Upgrade Guide" +description: |- + Terraform AWS Provider Version 6 Upgrade Guide +--- + + + +# Terraform AWS Provider Version 6 Upgrade Guide + +Version 6.0.0 of the AWS provider for Terraform is a major release and includes changes that you need to consider when upgrading. This guide will help with that process and focuses only on changes from version 5.x to version 6.0.0. See the [Version 5 Upgrade Guide](/docs/providers/aws/guides/version-5-upgrade.html) for information on upgrading from 4.x to version 5.0.0. + +Upgrade topics: + + + +- [Prerequisites to Upgrade to v6.0.0](#prerequisites-to-upgrade-to-v600) +- [Removed Provider Arguments](#removed-provider-arguments) +- [Enhanced Region Support](#enhanced-region-support) +- [Amazon Elastic Transcoder Deprecation](#amazon-elastic-transcoder-deprecation) +- [CloudWatch Evidently Deprecation](#cloudwatch-evidently-deprecation) +- [Nullable Boolean Validation Update](#nullable-boolean-validation-update) +- [OpsWorks Stacks Removal](#opsworks-stacks-removal) +- [S3 Global Endpoint Deprecation](#s3-global-endpoint-deprecation) +- [SimpleDB Support Removed](#simpledb-support-removed) +- [Worklink Support Removed](#worklink-support-removed) +- [Data Source `aws_ami`](#data-source-aws_ami) +- [Data Source `aws_batch_compute_environment`](#data-source-aws_batch_compute_environment) +- [Data Source `aws_ecs_task_definition`](#data-source-aws_ecs_task_definition) +- [Data Source `aws_ecs_task_execution`](#data-source-aws_ecs_task_execution) +- [Data Source `aws_elbv2_listener_rule`](#data-source-aws_elbv2_listener_rule) +- [Data Source `aws_globalaccelerator_accelerator`](#data-source-aws_globalaccelerator_accelerator) +- [Data Source `aws_identitystore_group`](#data-source-aws_identitystore_group) +- [Data Source `aws_identitystore_user`](#data-source-aws_identitystore_user) +- [Data Source `aws_kms_secret`](#data-source-aws_kms_secret) +- [Data Source `aws_launch_template`](#data-source-aws_launch_template) +- [Data Source `aws_opensearch_domain`](#data-source-aws_opensearch_domain) +- [Data Source `aws_opensearchserverless_security_config`](#data-source-aws_opensearchserverless_security_config) +- [Data Source `aws_quicksight_data_set`](#data-source-aws_quicksight_data_set) +- [Data Source `aws_region`](#data-source-aws_region) +- [Data Source `aws_s3_bucket`](#data-source-aws_s3_bucket) +- [Data Source `aws_service_discovery_service`](#data-source-aws_service_discovery_service) +- [Data Source `aws_servicequotas_templates`](#data-source-aws_servicequotas_templates) +- [Data Source `aws_ssmincidents_replication_set`](#data-source-aws_ssmincidents_replication_set) +- [Data Source `aws_vpc_endpoint_service`](#data-source-aws_vpc_endpoint_service) +- [Data Source `aws_vpc_peering_connection`](#data-source-aws_vpc_peering_connection) +- [Resource `aws_accessanalyzer_archive_rule`](#typenullablebool-validation-update) +- [Resource `aws_alb_target_group`](#typenullablebool-validation-update) +- [Resource `aws_api_gateway_account`](#resource-aws_api_gateway_account) +- [Resource `aws_api_gateway_deployment`](#resource-aws_api_gateway_deployment) +- [Resource `aws_appflow_connector_profile`](#resource-aws_appflow_connector_profile) +- [Resource `aws_appflow_flow`](#resource-aws_appflow_flow) +- [Resource `aws_batch_compute_environment`](#resource-aws_batch_compute_environment) +- [Resource `aws_batch_job_queue`](#resource-aws_batch_job_queue) +- [Resource `aws_bedrock_model_invocation_logging_configuration`](#resource-aws_bedrock_model_invocation_logging_configuration) +- [Resource `aws_cloudformation_stack_set_instance`](#resource-aws_cloudformation_stack_set_instance) +- [Resource `aws_cloudfront_key_value_store`](#resource-aws_cloudfront_key_value_store) +- [Resource `aws_cloudfront_response_headers_policy`](#resource-aws_cloudfront_response_headers_policy) +- [Resource `aws_cloudtrail_event_data_store`](#typenullablebool-validation-update) +- [Resource `aws_cognito_user_in_group`](#resource-aws_cognito_user_in_group) +- [Resource `aws_config_aggregate_authorization`](#resource-aws_config_aggregate_authorization) +- [Resource `aws_cur_report_definition`](#resource-aws_cur_report_definition) +- [Resource `aws_db_instance`](#resource-aws_db_instance) +- [Resource `aws_dms_endpoint`](#resource-aws_dms_endpoint) +- [Resource `aws_dx_gateway_association`](#resource-aws_dx_gateway_association) +- [Resource `aws_dx_hosted_connection`](#resource-aws_dx_hosted_connection) +- [Resource `aws_ec2_spot_instance_fleet`](#typenullablebool-validation-update) +- [Resource `aws_ecs_task_definition`](#resource-aws_ecs_task_definition) +- [Resource `aws_eip`](#resource-aws_eip) +- [Resource `aws_eks_addon`](#resource-aws_eks_addon) +- [Resource `aws_elasticache_cluster`](#typenullablebool-validation-update) +- [Resource `aws_elasticache_replication_group`](#resource-aws_elasticache_replication_group) +- [Resource `aws_elasticache_user`](#resource-aws_elasticache_user) +- [Resource `aws_elasticache_user_group`](#resource-aws_elasticache_user_group) +- [Resource `aws_evidently_feature`](#typenullablebool-validation-update) +- [Resource `aws_flow_log`](#resource-aws_flow_log) +- [Resource `aws_guardduty_detector`](#resource-aws_guardduty_detector) +- [Resource `aws_guardduty_organization_configuration`](#resource-aws_guardduty_organization_configuration) +- [Resource `aws_imagebuilder_container_recipe`](#typenullablebool-validation-update) +- [Resource `aws_imagebuilder_image_recipe`](#typenullablebool-validation-update) +- [Resource `aws_instance`](#resource-aws_instance) +- [Resource `aws_kinesis_analytics_application`](#resource-aws_kinesis_analytics_application) +- [Resource `aws_launch_template`](#resource-aws_launch_template) +- [Resource `aws_lb_listener`](#resource-aws_lb_listener) +- [Resource `aws_lb_target_group`](#typenullablebool-validation-update) +- [Resource `aws_media_store_container`](#resource-aws_media_store_container) +- [Resource `aws_media_store_container_policy`](#resource-aws_media_store_container_policy) +- [Resource `aws_mq_broker`](#typenullablebool-validation-update) +- [Resource `aws_networkmanager_core_network`](#resource-aws_networkmanager_core_network) +- [Resource `aws_opensearch_domain`](#resource-aws_opensearch_domain) +- [Resource `aws_opensearchserverless_security_config`](#resource-aws_opensearchserverless_security_config) +- [Resource `aws_paymentcryptography_key`](#resource-aws_paymentcryptography_key) +- [Resource `aws_redshift_cluster`](#resource-aws_redshift_cluster) +- [Resource `aws_redshift_service_account`](#resource-aws_redshift_service_account) +- [Resource `aws_rekognition_stream_processor`](#resource-aws_rekognition_stream_processor) +- [Resource `aws_resiliencehub_resiliency_policy`](#resource-aws_resiliencehub_resiliency_policy) +- [Resource `aws_s3_bucket`](#resource-aws_s3_bucket) +- [Resource `aws_sagemaker_image_version`](#resource-aws_sagemaker_image_version) +- [Resource `aws_sagemaker_notebook_instance`](#resource-aws_sagemaker_notebook_instance) +- [Resource `aws_servicequotas_template`](#resource-aws_servicequotas_template) +- [Resource `aws_spot_instance_request`](#resource-aws_spot_instance_request) +- [Resource `aws_ssm_association`](#resource-aws_ssm_association) +- [Resource `aws_ssmincidents_replication_set`](#resource-aws_ssmincidents_replication_set) +- [Resource `aws_verifiedpermissions_schema`](#resource-aws_verifiedpermissions_schema) +- [Resource `aws_wafv2_web_acl`](#resource-aws_wafv2_web_acl) + + + +## Prerequisites to Upgrade to v6.0.0 + +-> Before upgrading to version `6.0.0`, first upgrade to the latest available `5.x` version of the provider. Run [`terraform plan`](https://developer.hashicorp.com/terraform/cli/commands/plan) and confirm that: + +- Your plan completes without errors or unexpected changes. +- There are no deprecation warnings related to the changes described in this guide. + +If you use [version constraints](https://developer.hashicorp.com/terraform/language/providers/requirements#provider-versions) (recommended), update them to allow the `6.x` series and run [`terraform init -upgrade`](https://developer.hashicorp.com/terraform/cli/commands/init) to download the new version. + +### Example + +**Before:** + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.provider import AwsProvider +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws") +``` + +**After:** + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.provider import AwsProvider +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws") +``` + +## Removed Provider Arguments + +Remove the following from your provider configuration—they are no longer supported: + +- `endpoints.opsworks` – removed following AWS OpsWorks Stacks End of Life. +- `endpoints.simpledb` and `endpoints.sdb` – removed due to the removal of Amazon SimpleDB support. +- `endpoints.worklink` – removed due to the removal of Amazon Worklink support. + +## Enhanced Region Support + +Version 6.0.0 adds `region` to most resources making it significantly easier to manage infrastructure across AWS Regions without requiring multiple provider configurations. See [Enhanced Region Support](enhanced-region-support.html). + +## Amazon Elastic Transcoder Deprecation + +Amazon Elastic Transcoder will be [discontinued](https://aws.amazon.com/blogs/media/support-for-amazon-elastic-transcoder-ending-soon/) on **November 13, 2025**. + +The following resources are deprecated and will be removed in a future major release: + +- `aws_elastictranscoder_pipeline` +- `aws_elastictranscoder_preset` + +Use [AWS Elemental MediaConvert](https://aws.amazon.com/blogs/media/migrating-workflows-from-amazon-elastic-transcoder-to-aws-elemental-mediaconvert/) instead. + +## CloudWatch Evidently Deprecation + +AWS will [end support](https://aws.amazon.com/blogs/mt/support-for-amazon-cloudwatch-evidently-ending-soon/) for CloudWatch Evidently on **October 17, 2025**. + +The following resources are deprecated and will be removed in a future major release: + +- `aws_evidently_feature` +- `aws_evidently_launch` +- `aws_evidently_project` +- `aws_evidently_segment` + +Migrate to [AWS AppConfig Feature Flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/). + +## Nullable Boolean Validation Update + +Update your configuration to _only_ use `""`, `true`, or `false` if you use the arguments below _and_ you are using `0` or `1` to represent boolean values: + +| Resource | Attribute(s) | +|-----------------------------------------|--------------------------------------------------------------------------| +| `aws_accessanalyzer_archive_rule` | `filter.exists` | +| `aws_alb_target_group` | `preserve_client_ip` | +| `aws_cloudtrail_event_data_store` | `suspend` | +| `aws_ec2_spot_instance_fleet` | `terminate_instances_on_delete` | +| `aws_elasticache_cluster` | `auto_minor_version_upgrade` | +| `aws_elasticache_replication_group` | `at_rest_encryption_enabled`, `auto_minor_version_upgrade` | +| `aws_evidently_feature` | `variations.value.bool_value` | +| `aws_imagebuilder_container_recipe` | `instance_configuration.block_device_mapping.ebs.delete_on_termination`, `instance_configuration.block_device_mapping.ebs.encrypted` | +| `aws_imagebuilder_image_recipe` | `block_device_mapping.ebs.delete_on_termination`, `block_device_mapping.ebs.encrypted` | +| `aws_launch_template` | `block_device_mappings.ebs.delete_on_termination`, `block_device_mappings.ebs.encrypted`, `ebs_optimized`, `network_interfaces.associate_carrier_ip_address`, `network_interfaces.associate_public_ip_address`, `network_interfaces.delete_on_termination`, `network_interfaces.primary_ipv6` | +| `aws_lb_target_group` | `preserve_client_ip` | +| `aws_mq_broker` | `logs.audit` | + +This is due to changes to `TypeNullableBool`. + +## OpsWorks Stacks Removal + +The AWS OpsWorks Stacks service has reached [End of Life](https://docs.aws.amazon.com/opsworks/latest/userguide/stacks-eol-faqs.html). The following resources have been removed: + +- `aws_opsworks_application` +- `aws_opsworks_custom_layer` +- `aws_opsworks_ecs_cluster_layer` +- `aws_opsworks_ganglia_layer` +- `aws_opsworks_haproxy_layer` +- `aws_opsworks_instance` +- `aws_opsworks_java_app_layer` +- `aws_opsworks_memcached_layer` +- `aws_opsworks_mysql_layer` +- `aws_opsworks_nodejs_app_layer` +- `aws_opsworks_permission` +- `aws_opsworks_php_app_layer` +- `aws_opsworks_rails_app_layer` +- `aws_opsworks_rds_db_instance` +- `aws_opsworks_stack` +- `aws_opsworks_static_web_layer` +- `aws_opsworks_user_profile` + +## SimpleDB Support Removed + +The `aws_simpledb_domain` resource has been removed, as the [AWS SDK for Go v2](https://docs.aws.amazon.com/sdk-for-go/v2/developer-guide/welcome.html) no longer supports Amazon SimpleDB. + +## Worklink Support Removed + +The following resources have been removed due to dropped support for Amazon Worklink in the [AWS SDK for Go v2](https://github.com/aws/aws-sdk-go-v2/pull/2814): + +- `aws_worklink_fleet` +- `aws_worklink_website_certificate_authority_association` + +## S3 Global Endpoint Deprecation + +Support for the global S3 endpoint is deprecated. This affects S3 resources in `us-east-1` (excluding directory buckets) when `s3_us_east_1_regional_endpoint` is set to `legacy`. + +`s3_us_east_1_regional_endpoint` will be removed in `v7.0.0`. + +To prepare: + +- Remove `s3_us_east_1_regional_endpoint` from your provider configuration, **or** +- Set its value to `regional` and verify functionality. + +## Data Source `aws_ami` + +When using `most_recent = true`, your configuration **must now include** an `owner` or a `filter` that identifies the image by `image-id` or `owner-id`. + +- **Before (v5 and earlier):** + Terraform allowed this setup and showed only a warning. + +- **Now (v6+):** + Terraform will stop with an **error** to prevent unsafe or ambiguous AMI lookups. + +### How to fix it + +Do one of the following: + +- Add `owner`: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +- Or add a `filter` block that includes either `image-id` or `owner-id`: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Unsafe option (not recommended) + +To override this check, you can set: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +However, this may lead to unreliable results and should be avoided unless absolutely necessary. + +## Data Source `aws_batch_compute_environment` + +`compute_environment_name` has been renamed to `name`. + +Update your configurations to replace any usage of `compute_environment_name` with `name` to use this version. + +## Data Source `aws_ecs_task_definition` + +Remove `inference_accelerator`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. + +## Data Source `aws_ecs_task_execution` + +Remove `inference_accelerator_overrides`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. + +## Data Source `aws_elbv2_listener_rule` + +Treat the following as lists of nested blocks instead of single-nested blocks: + +- `action.authenticate_cognito` +- `action.authenticate_oidc` +- `action.fixed_response` +- `action.forward` +- `action.forward.stickiness` +- `action.redirect` +- `condition.host_header` +- `condition.http_header` +- `condition.http_request_method` +- `condition.path_pattern` +- `condition.query_string` +- `condition.source_ip` + +The data source configuration itself does not change. However, now, include an index when referencing them. For example, update `action[0].authenticate_cognito.scope` to `action[0].authenticate_cognito[0].scope`. + +## Data Source `aws_globalaccelerator_accelerator` + +`id` is now **computed only** and can no longer be set manually. +If your configuration explicitly attempts to set a value for `id`, you must remove it to avoid an error. + +## Data Source `aws_identitystore_group` + +Remove `filter`—it is no longer supported. To locate a group, update your configuration to use `alternate_identifier` instead. + +## Data Source `aws_identitystore_user` + +Remove `filter`—it is no longer supported. +To locate a user, update your configuration to use `alternate_identifier` instead. + +## Data Source `aws_kms_secret` + +The functionality for this data source was removed in **v2.0.0** and the data source will be removed in a future version. + +## Data Source `aws_launch_template` + +Remove the following—they are no longer supported: + +- `elastic_gpu_specifications`: Amazon Elastic Graphics reached end of life in January 2024. +- `elastic_inference_accelerator`: Amazon Elastic Inference reached end of life in April 2024. + +## Data Source `aws_opensearch_domain` + +Remove `kibana_endpoint`—it is no longer supported. AWS OpenSearch Service no longer uses Kibana endpoints. The service now uses **Dashboards**, accessible at the `/_dashboards/` path on the domain endpoint. +For more details, refer to the [AWS OpenSearch Dashboards documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). + +## Data Source `aws_opensearchserverless_security_config` + +Treat `saml_options` as a list of nested blocks instead of a single-nested block. The data source configuration itself does not change. However, now, include an index when referencing it. For example, update `saml_options.session_timeout` to `saml_options[0].session_timeout`. + +## Data Source `aws_quicksight_data_set` + +Remove `tags_all`—it is no longer supported. + +## Data Source `aws_region` + +`name` has been deprecated. Use `region` instead. + +## Data Source `aws_s3_bucket` + +`bucket_region` has been added and should be used instead of `region`, which is now used for [Enhanced Region Support](enhanced-region-support.html). + +## Data Source `aws_service_discovery_service` + +Remove `tags_all`—it is no longer supported. + +## Data Source `aws_servicequotas_templates` + +`region` has been deprecated. Use `aws_region` instead. + +## Data Source `aws_ssmincidents_replication_set` + +`region` has been deprecated. Use `regions` instead. + +## Data Source `aws_vpc_endpoint_service` + +`region` has been deprecated. Use `service_region` instead. + +## Data Source `aws_vpc_peering_connection` + +`region` has been deprecated. Use `requester_region` instead. + +## Resource `aws_api_gateway_account` + +Remove `reset_on_delete`—it is no longer supported. The destroy operation will now always reset the API Gateway account settings by default. + +If you want to retain the previous behavior (where the account settings were not changed upon destruction), use a `removed` block in your configuration. For more details, see the [removing resources documentation](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources). + +## Resource `aws_api_gateway_deployment` + +* Use the `aws_api_gateway_stage` resource if your configuration uses any of the following, which have been removed from the `aws_api_gateway_deployment` resource: + - `stage_name` + - `stage_description` + - `canary_settings` +* Remove `invoke_url` and `execution_arn`—they are no longer supported. Use the `aws_api_gateway_stage` resource instead. + +### Migration Example + +**Before (v5 and earlier, using implicit stage):** + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.api_gateway_deployment import ApiGatewayDeployment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + ApiGatewayDeployment(self, "example", + rest_api_id=Token.as_string(aws_api_gateway_rest_api_example.id), + stage_name="prod" + ) +``` + +**After (v6+, using explicit stage):** + +If your previous configuration relied on an implicitly created stage, you must now define and manage that stage explicitly using the `aws_api_gateway_stage` resource. To do this, create a corresponding resource and import the existing stage into your configuration. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.api_gateway_deployment import ApiGatewayDeployment +from imports.aws.api_gateway_stage import ApiGatewayStage +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = ApiGatewayDeployment(self, "example", + rest_api_id=Token.as_string(aws_api_gateway_rest_api_example.id) + ) + ApiGatewayStage(self, "prod", + deployment_id=example.id, + rest_api_id=Token.as_string(aws_api_gateway_rest_api_example.id), + stage_name="prod" + ) +``` + +Import the existing stage, replacing `rest_api_id` and `stage_name` with your values: + +```sh +terraform import aws_api_gateway_stage.prod rest_api_id/stage_name +``` + +## Resource `aws_appflow_connector_profile` + +Importing an `aws_appflow_connector_profile` resource now uses the `name` of the Connector Profile. + +## Resource `aws_appflow_flow` + +Importing an `aws_appflow_flow` resource now uses the `name` of the Flow. + +## Resource `aws_batch_compute_environment` + +Replace any usage of `compute_environment_name` with `name` and `compute_environment_name_prefix` with `name_prefix` as they have been renamed. + +## Resource `aws_batch_job_queue` + +Remove `compute_environments`—it is no longer supported. +Use `compute_environment_order` configuration blocks instead. While you must update your configuration, Terraform will upgrade states with `compute_environments` to `compute_environment_order`. + +**Before (v5 and earlier):** + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.batch_job_queue import BatchJobQueue +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + BatchJobQueue(self, "example", + compute_environments=[aws_batch_compute_environment_example.arn], + name="patagonia", + priority=1, + state="ENABLED" + ) +``` + +**After (v6+):** + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.batch_job_queue import BatchJobQueue +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + BatchJobQueue(self, "example", + compute_environment_order=[BatchJobQueueComputeEnvironmentOrder( + compute_environment=Token.as_string(aws_batch_compute_environment_example.arn), + order=0 + ) + ], + name="patagonia", + priority=1, + state="ENABLED" + ) +``` + +## Resource `aws_bedrock_model_invocation_logging_configuration` + +Treat the following as lists of nested blocks instead of single-nested blocks: + +- `logging_config` +- `logging_config.cloudwatch_config` +- `logging_config.cloudwatch_config.large_data_delivery_s3_config` +- `logging_config.s3_config` + +The resource configuration itself does not change, but you must now include an index when referencing them. For example, update `logging_config.cloudwatch_config.log_group_name` to `logging_config[0].cloudwatch_config[0].log_group_name`. + +## Resource `aws_cloudformation_stack_set_instance` + +`region` has been deprecated. Use `stack_set_instance_region` instead. + +## Resource `aws_cloudfront_key_value_store` + +Use `name` to reference the resource name. `id` represents the ID value returned by the AWS API. + +## Resource `aws_cloudfront_response_headers_policy` + +Do not set a value for `etag` as it is now computed only. + +## Resource `aws_cognito_user_in_group` + +For the `id`, use a comma-delimited string concatenating `user_pool_id`, `group_name`, and `username`. For example, in an import command, use comma-delimiting for the composite `id`. + +## Resource `aws_config_aggregate_authorization` + +`region` has been deprecated. Use `authorized_aws_region` instead. + +## Resource `aws_cur_report_definition` + +`s3_prefix` is now required. + +## Resource `aws_db_instance` + +Do not use `character_set_name` with `replicate_source_db`, `restore_to_point_in_time`, `s3_import`, or `snapshot_identifier`. The combination is no longer valid. + +## Resource `aws_dms_endpoint` + +`s3_settings` has been removed. Use the `aws_dms_s3_endpoint` resource rather than `s3_settings` of `aws_dms_endpoint`. + +## Resource `aws_dx_gateway_association` + +Remove `vpn_gateway_id`—it is no longer supported. Use `associated_gateway_id` instead. + +## Resource `aws_dx_hosted_connection` + +`region` has been deprecated. Use `connection_region` instead. + +## Resource `aws_ecs_task_definition` + +Remove `inference_accelerator`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. + +## Resource `aws_eip` + +Remove `vpc`—it is no longer supported. Use `domain` instead. + +## Resource `aws_eks_addon` + +Remove `resolve_conflicts`—it is no longer supported. Use `resolve_conflicts_on_create` and `resolve_conflicts_on_update` instead. + +## Resource `aws_elasticache_replication_group` + +* `auth_token_update_strategy` no longer has a default value. If `auth_token` is set, it must also be explicitly configured. +* The ability to provide an uppercase `engine` value is deprecated. In `v7.0.0`, plan-time validation of `engine` will require an entirely lowercase value to match the returned value from the AWS API without diff suppression. +* See also [changes](#typenullablebool-validation-update) to `at_rest_encryption_enabled` and `auto_minor_version_upgrade`. + +## Resource `aws_elasticache_user` + +The ability to provide an uppercase `engine` value is deprecated. +In `v7.0.0`, plan-time validation of `engine` will require an entirely lowercase value to match the returned value from the AWS API without diff suppression. + +## Resource `aws_elasticache_user_group` + +The ability to provide an uppercase `engine` value is deprecated. +In `v7.0.0`, plan-time validation of `engine` will require an entirely lowercase value to match the returned value from the AWS API without diff suppression. + +## Resource `aws_flow_log` + +Remove `log_group_name`—it is no longer supported. Use `log_destination` instead. + +## Resource `aws_guardduty_detector` + +`datasources` is deprecated. +Use the `aws_guardduty_detector_feature` resource instead. + +## Resource `aws_guardduty_organization_configuration` + +* Remove `auto_enable`—it is no longer supported. +* `auto_enable_organization_members` is now required. +* `datasources` is deprecated. + +## Resource `aws_instance` + +* `user_data` no longer applies hashing and is now stored in clear text. **Do not include passwords or sensitive information** in `user_data`, as it will be visible in plaintext. Follow [AWS Best Practices](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) to secure your instance metadata. If you need to provide base64-encoded user data, use `user_data_base64` instead. +* Remove `cpu_core_count` and `cpu_threads_per_core`—they are no longer supported. Instead, use the `cpu_options` configuration block with `core_count` and `threads_per_core`. + +## Resource `aws_kinesis_analytics_application` + +This resource is deprecated and will be removed in a future version. [Effective January 27, 2026](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-to-amazon-managed-service-for-apache-flink-and-amazon-managed-service-for-apache-flink-studio/), AWS will [no longer support](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/discontinuation.html) Amazon Kinesis Data Analytics for SQL. Use the `aws_kinesisanalyticsv2_application` resource instead to manage Amazon Kinesis Data Analytics for Apache Flink applications. AWS provides guidance for migrating from [Amazon Kinesis Data Analytics for SQL Applications to Amazon Managed Service for Apache Flink Studio](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-applications-to-amazon-managed-service-for-apache-flink-studio/) including [examples](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/migrating-to-kda-studio-overview.html). + +## Resource `aws_launch_template` + +* Remove `elastic_gpu_specifications`—it is no longer supported. Amazon Elastic Graphics reached end of life in January 2024. +* Remove `elastic_inference_accelerator`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. +* See also [changes](#typenullablebool-validation-update) to `block_device_mappings.ebs.delete_on_termination`, `block_device_mappings.ebs.encrypted`, `ebs_optimized`, `network_interfaces.associate_carrier_ip_address`, `network_interfaces.associate_public_ip_address`, `network_interfaces.delete_on_termination`, and `network_interfaces.primary_ipv6`. + +## Resource `aws_lb_listener` + +* For `mutual_authentication`, `advertise_trust_store_ca_names`, `ignore_client_certificate_expiry`, and `trust_store_arn` can now only be set when `mode` is `verify`. +* `trust_store_arn` is required when `mode` is `verify`. + +## Resource `aws_media_store_container` + +This resource is deprecated and will be removed in a future version. AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective November 13, 2025. Users should begin transitioning to alternative solutions as soon as possible. For simple live streaming workflows, AWS recommends migrating to Amazon S3. For advanced use cases that require features such as packaging, DRM, or cross-region redundancy, consider using AWS Elemental MediaPackage. + +## Resource `aws_media_store_container_policy` + +This resource is deprecated and will be removed in a future version. AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective November 13, 2025. Users should begin transitioning to alternative solutions as soon as possible. For simple live streaming workflows, AWS recommends migrating to Amazon S3. For advanced use cases that require features such as packaging, DRM, or cross-region redundancy, consider using AWS Elemental MediaPackage. + +## Resource `aws_networkmanager_core_network` + +Remove `base_policy_region`—it is no longer supported. Use `base_policy_regions` instead. + +## Resource `aws_opensearch_domain` + +Remove `kibana_endpoint`—it is no longer supported. AWS OpenSearch Service does not use Kibana endpoints (i.e., `_plugin/kibana`). Instead, OpenSearch uses Dashboards, accessible at the path `/_dashboards/` on the domain endpoint. The terminology has shifted from “Kibana” to “Dashboards.” + +For more information, see the [AWS OpenSearch Dashboards documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). + +## Resource `aws_opensearchserverless_security_config` + +Treat `saml_options` as a list of nested blocks instead of a single-nested block. The resource configuration itself does not change. However, now, include an index when referencing it. For example, update `saml_options.session_timeout` to `saml_options[0].session_timeout`. + +## Resource `aws_paymentcryptography_key` + +Treat the `key_attributes` and `key_attributes.key_modes_of_use` as lists of nested blocks instead of single-nested blocks. The resource configuration itself does not change. However, now, include an index when referencing them. For example, update `key_attributes.key_modes_of_use.decrypt` to `key_attributes[0].key_modes_of_use[0].decrypt`. + +## Resource `aws_redshift_cluster` + +* `encrypted` now defaults to `true`. +* `publicly_accessible` now defaults to `false`. +* Remove `snapshot_copy`—it is no longer supported. Use the `aws_redshift_snapshot_copy` resource instead. +* Remove `logging`—it is no longer supported. Use the `aws_redshift_logging` resource instead. +* `cluster_public_key`, `cluster_revision_number`, and `endpoint` are now read only and should not be set. + +## Resource `aws_redshift_service_account` + +The `aws_redshift_service_account` resource has been removed. AWS [recommends](https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-bucket-permissions) that a [service principal name](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) should be used instead of an AWS account ID in any relevant IAM policy. + +## Resource `aws_rekognition_stream_processor` + +Treat `regions_of_interest.bounding_box` as a list of nested blocks instead of a single-nested block. The resource configuration itself does not change. However, now, include an index when referencing it. For example, update `regions_of_interest[0].bounding_box.height` to `regions_of_interest[0].bounding_box[0].height`. + +## Resource `aws_resiliencehub_resiliency_policy` + +Treat the following as lists of nested blocks instead of single-nested blocks: + +- `policy` +- `policy.az` +- `policy.hardware` +- `policy.software` +- `policy.region` + +The resource configuration itself does not change. However, now, include an index when referencing them. For example, update `policy.az.rpo` to `policy[0].az[0].rpo`. + +## Resource `aws_s3_bucket` + +`bucket_region` has been added and should be used instead of `region`, which is now used for [Enhanced Region Support](enhanced-region-support.html). + +## Resource `aws_sagemaker_image_version` + +For the `id`, use a comma-delimited string concatenating `image_name` and `version`. For example, in an import command, use comma-delimiting for the composite `id`. +Use `image_name` to reference the image name. + +## Resource `aws_sagemaker_notebook_instance` + +Remove `accelerator_types`—it is no longer supported. Instead, use `instance_type` to use [Inferentia](https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html). + +## Resource `aws_servicequotas_template` + +`region` has been deprecated. Use `aws_region` instead. + +## Resource `aws_spot_instance_request` + +Remove `block_duration_minutes`—it is no longer supported. + +## Resource `aws_ssm_association` + +Remove `instance_id`—it is no longer supported. Use `targets` instead. + +## Resource `aws_ssmincidents_replication_set` + +`region` has been deprecated. Use `regions` instead. + +## Resource `aws_verifiedpermissions_schema` + +Treat `definition` as a list of nested blocks instead of a single-nested block. The resource configuration itself does not change. However, now, include an index when referencing it. For example, update `definition.value` to `definition[0].value`. + +## Resource `aws_wafv2_web_acl` + +The default value for `rule.statement.managed_rule_group_statement.managed_rule_group_configs.aws_managed_rules_bot_control_rule_set.enable_machine_learning` is now `false`. +To retain the previous behavior where the argument was omitted, explicitly set the value to `true`. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/index.html.markdown b/website/docs/cdktf/python/index.html.markdown index 560c6273ba8b..918e9e50b614 100644 --- a/website/docs/cdktf/python/index.html.markdown +++ b/website/docs/cdktf/python/index.html.markdown @@ -9,18 +9,13 @@ description: |- # AWS Provider -Use the Amazon Web Services (AWS) provider to interact with the -many resources supported by AWS. You must configure the provider -with the proper credentials before you can use it. +The Amazon Web Services (AWS) provider is Terraform’s most widely-used provider and the industry-standard way to manage AWS infrastructure as code. It is an indispensable part of how leading technology companies, global banks, government agencies, and some of the largest enterprises in the world build and operate in the cloud. Every day, it provisions and orchestrates billions of dollars of AWS infrastructure across thousands of organizations. -Use the navigation to the left to read about the available resources. There are currently 1514 resources and 608 data sources available in the provider. +With 1,543 resources and 615 data sources, the AWS provider spans the full breadth of AWS services—from foundational capabilities like compute, storage, networking, and identity management to advanced services for AI, analytics, and event-driven architectures, including Lambda, RDS, SageMaker, and Bedrock. Whether automating a single S3 bucket or orchestrating a multi-region, enterprise-scale environment, the provider delivers consistent, reliable workflows that scale with your needs. -To learn the basics of Terraform using this provider, follow the -hands-on [get started tutorials](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, -including Lambda, RDS, and IAM by following the [AWS services -tutorials](https://developer.hashicorp.com/terraform/tutorials/aws?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). +Configure the provider with your AWS credentials, and you can immediately begin creating and managing infrastructure in a safe, repeatable way. Use the navigation on the left to explore the available resources, or start with our [Get Started tutorials](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) to learn the fundamentals. For deeper guidance on specific AWS services, visit the [AWS services tutorials](https://developer.hashicorp.com/terraform/tutorials/aws?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). -Some AWS services do not support IPv6. As a result, the provider may not be able to interact with AWS APIs using IPv6 addresses. +Note: Some AWS services do not yet support IPv6. In those cases, the provider may not be able to connect to AWS APIs over IPv6 addresses. ## Example Usage @@ -450,17 +445,19 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf Can also be set with either the `AWS_REGION` or `AWS_DEFAULT_REGION` environment variables, or via a shared config file parameter `region` if `profile` is used. If credentials are retrieved from the EC2 Instance Metadata Service, the Region can also be retrieved from the metadata. + Most Regional resources, data sources and ephemeral resources support an optional top-level `region` argument which can be used to override the provider configuration value. See the individual resource's documentation for details. * `retry_mode` - (Optional) Specifies how retries are attempted. Valid values are `standard` and `adaptive`. Can also be configured using the `AWS_RETRY_MODE` environment variable or the shared config file parameter `retry_mode`. * `s3_use_path_style` - (Optional) Whether to enable the request to use path-style addressing, i.e., `https://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will use virtual hosted bucket addressing, `https://BUCKET.s3.amazonaws.com/KEY`, when possible. Specific to the Amazon S3 service. -* `s3_us_east_1_regional_endpoint` - (Optional) Specifies whether S3 API calls in the `us-east-1` Region use the legacy global endpoint or a regional endpoint. +* `s3_us_east_1_regional_endpoint` - (Optional, **Deprecated**) Specifies whether S3 API calls in the `us-east-1` Region use the legacy global endpoint or a regional endpoint. Valid values are `legacy` or `regional`. If omitted, the default behavior in the `us-east-1` Region is to use the global endpoint for general purpose buckets and the regional endpoint for directory buckets. Can also be configured using the `AWS_S3_US_EAST_1_REGIONAL_ENDPOINT` environment variable or the `s3_us_east_1_regional_endpoint` shared config file parameter. Specific to the Amazon S3 service. + This argument and the ability to use the global S3 endpoint are deprecated and will be removed in `v7.0.0`. * `secret_key` - (Optional) AWS secret key. Can also be set with the `AWS_SECRET_ACCESS_KEY` environment variable, or via a shared configuration and credentials files if `profile` is used. See also `access_key`. * `shared_config_files` - (Optional) List of paths to AWS shared config files. If not set, the default is `[~/.aws/config]`. A single value can also be set with the `AWS_CONFIG_FILE` environment variable. * `shared_credentials_files` - (Optional) List of paths to the shared credentials file. If not set and a profile is used, the default value is `[~/.aws/credentials]`. A single value can also be set with the `AWS_SHARED_CREDENTIALS_FILE` environment variable. @@ -900,4 +897,4 @@ Approaches differ per authentication providers: There used to be no better way to get account ID out of the API when using the federated account until `sts:GetCallerIdentity` was introduced. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/list-resources/batch_job_queue.html.markdown b/website/docs/cdktf/python/list-resources/batch_job_queue.html.markdown new file mode 100644 index 000000000000..74b15f606a20 --- /dev/null +++ b/website/docs/cdktf/python/list-resources/batch_job_queue.html.markdown @@ -0,0 +1,35 @@ +--- +subcategory: "Batch" +layout: "aws" +page_title: "AWS: aws_batch_job_queue" +description: |- + Lists Batch Job Queue resources. +--- + + + +# List Resource: aws_batch_job_queue + +~> **Note:** The `aws_batch_job_queue` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists Batch Job Queue resources. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + + \ No newline at end of file diff --git a/website/docs/cdktf/python/list-resources/cloudwatch_log_group.html.markdown b/website/docs/cdktf/python/list-resources/cloudwatch_log_group.html.markdown new file mode 100644 index 000000000000..baae960d3a96 --- /dev/null +++ b/website/docs/cdktf/python/list-resources/cloudwatch_log_group.html.markdown @@ -0,0 +1,35 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +description: |- + Lists CloudWatch Logs Log Group resources. +--- + + + +# List Resource: aws_cloudwatch_log_group + +~> **Note:** The `aws_cloudwatch_log_group` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists CloudWatch Logs Log Group resources. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + + \ No newline at end of file diff --git a/website/docs/cdktf/python/list-resources/iam_role.html.markdown b/website/docs/cdktf/python/list-resources/iam_role.html.markdown new file mode 100644 index 000000000000..2364a91001eb --- /dev/null +++ b/website/docs/cdktf/python/list-resources/iam_role.html.markdown @@ -0,0 +1,34 @@ +--- +subcategory: "IAM (Identity & Access Management)" +layout: "aws" +page_title: "AWS: aws_iam_role" +description: |- + Lists IAM Role resources. +--- + + + +# List Resource: aws_iam_role + +~> **Note:** The `aws_iam_role` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists IAM Role resources. + +Excludes Service-Linked Roles (see "AWS service-linked role" in [IAM Roles Terms and Concepts documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts)). + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This list resource does not support any arguments. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/list-resources/instance.html.markdown b/website/docs/cdktf/python/list-resources/instance.html.markdown new file mode 100644 index 000000000000..b69ef666c898 --- /dev/null +++ b/website/docs/cdktf/python/list-resources/instance.html.markdown @@ -0,0 +1,68 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_instance" +description: |- + Lists EC2 Instance resources. +--- + + + +# List Resource: aws_instance + +~> **Note:** The `aws_instance` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists EC2 Instance resources. + +By default, EC2 Instances managed by an Auto Scaling Group and EC2 Instances in either the `terminated` or `shutting-down` state are excluded. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +### Filter Usage + +This example will return instances in the `stopped` state. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. +* `include_auto_scaled` - (Optional) Whether to include EC2 instances that are managed by an Auto Scaling Group. + Default value is `false`. +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + +[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/accessanalyzer_analyzer.html.markdown b/website/docs/cdktf/python/r/accessanalyzer_analyzer.html.markdown index 41d9201b1176..b9e2db2794ac 100644 --- a/website/docs/cdktf/python/r/accessanalyzer_analyzer.html.markdown +++ b/website/docs/cdktf/python/r/accessanalyzer_analyzer.html.markdown @@ -60,7 +60,7 @@ class MyConvertedCode(TerraformStack): aws_accessanalyzer_analyzer_example.override_logical_id("example") ``` -### Organization Unused Access Analyzer with analysis rule +### Organization Unused Access Analyzer With Analysis Rule ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -78,20 +78,19 @@ class MyConvertedCode(TerraformStack): analyzer_name="example", configuration=AccessanalyzerAnalyzerConfiguration( unused_access=AccessanalyzerAnalyzerConfigurationUnusedAccess( - analysis_rule=[{ - "exclusion": [{ - "account_ids": ["123456789012", "234567890123"] - }, { - "resource_tags": [{ + analysis_rule=AccessanalyzerAnalyzerConfigurationUnusedAccessAnalysisRule( + exclusion=[AccessanalyzerAnalyzerConfigurationUnusedAccessAnalysisRuleExclusion( + account_ids=["123456789012", "234567890123"] + ), AccessanalyzerAnalyzerConfigurationUnusedAccessAnalysisRuleExclusion( + resource_tags=[{ "key1": "value1" }, { "key2": "value2" } ] - } + ) ] - } - ], + ), unused_access_age=180 ) ), @@ -99,6 +98,68 @@ class MyConvertedCode(TerraformStack): ) ``` +### Account Internal Access Analyzer by Resource Types + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.accessanalyzer_analyzer import AccessanalyzerAnalyzer +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AccessanalyzerAnalyzer(self, "test", + analyzer_name="example", + configuration=AccessanalyzerAnalyzerConfiguration( + internal_access=AccessanalyzerAnalyzerConfigurationInternalAccess( + analysis_rule=AccessanalyzerAnalyzerConfigurationInternalAccessAnalysisRule( + inclusion=[AccessanalyzerAnalyzerConfigurationInternalAccessAnalysisRuleInclusion( + resource_types=["AWS::S3::Bucket", "AWS::RDS::DBSnapshot", "AWS::DynamoDB::Table" + ] + ) + ] + ) + ) + ), + type="ORGANIZATION_INTERNAL_ACCESS" + ) +``` + +### Organization Internal Access Analyzer by Account ID and Resource ARN + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.accessanalyzer_analyzer import AccessanalyzerAnalyzer +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AccessanalyzerAnalyzer(self, "test", + analyzer_name="example", + configuration=AccessanalyzerAnalyzerConfiguration( + internal_access=AccessanalyzerAnalyzerConfigurationInternalAccess( + analysis_rule=AccessanalyzerAnalyzerConfigurationInternalAccessAnalysisRule( + inclusion=[AccessanalyzerAnalyzerConfigurationInternalAccessAnalysisRuleInclusion( + account_ids=["123456789012"], + resource_arns=["arn:aws:s3:::my-example-bucket"] + ) + ] + ) + ) + ), + type="ORGANIZATION_INTERNAL_ACCESS" + ) +``` + ## Argument Reference The following arguments are required: @@ -107,34 +168,64 @@ The following arguments are required: The following arguments are optional: -* `configuration` - (Optional) A block that specifies the configuration of the analyzer. [Documented below](#configuration-argument-reference) +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `configuration` - (Optional) A block that specifies the configuration of the analyzer. See [`configuration` Block](#configuration-block) for details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `type` - (Optional) Type of Analyzer. Valid values are `ACCOUNT`, `ORGANIZATION`, `ACCOUNT_UNUSED_ACCESS `, `ORGANIZATION_UNUSED_ACCESS`. Defaults to `ACCOUNT`. +* `type` - (Optional) Type that represents the zone of trust or scope for the analyzer. Valid values are `ACCOUNT`, `ACCOUNT_INTERNAL_ACCESS`, `ACCOUNT_UNUSED_ACCESS`, `ORGANIZATION`, `ORGANIZATION_INTERNAL_ACCESS`, `ORGANIZATION_UNUSED_ACCESS`. Defaults to `ACCOUNT`. + +### `configuration` Block + +The `configuration` configuration block supports the following arguments: + +* `internal_access` - (Optional) Specifies the configuration of an internal access analyzer for an AWS organization or account. This configuration determines how the analyzer evaluates access within your AWS environment. See [`internal_access` Block](#internal_access-block) for details. +* `unused_access` - (Optional) Specifies the configuration of an unused access analyzer for an AWS organization or account. See [`unused_access` Block](#unused_access-block) for details. + +### `internal_access` Block + +The `internal_access` configuration block supports the following arguments: + +* `analysis_rule` - (Optional) Information about analysis rules for the internal access analyzer. These rules determine which resources and access patterns will be analyzed. See [`analysis_rule` Block for Internal Access Analyzer](#analysis_rule-block-for-internal-access-analyzer) for details. + +### `analysis_rule` Block for Internal Access Analyzer + +The `analysis_rule` configuration block for internal access analyzer supports the following arguments: + +* `inclusion` - (Optional) List of rules for the internal access analyzer containing criteria to include in analysis. Only resources that meet the rule criteria will generate findings. See [`inclusion` Block](#inclusion-block) for details. + +### `inclusion` Block + +The `inclusion` configuration block supports the following arguments: + +* `account_ids` - (Optional) List of AWS account IDs to apply to the internal access analysis rule criteria. Account IDs can only be applied to the analysis rule criteria for organization-level analyzers. +* `resource_arns` - (Optional) List of resource ARNs to apply to the internal access analysis rule criteria. The analyzer will only generate findings for resources that match these ARNs. +* `resource_types` - (Optional) List of resource types to apply to the internal access analysis rule criteria. The analyzer will only generate findings for resources of these types. Refer to [InternalAccessAnalysisRuleCriteria](https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_InternalAccessAnalysisRuleCriteria.html) in the AWS IAM Access Analyzer API Reference for valid values. + +### `unused_access` Block -### `configuration` Argument Reference +The `unused_access` configuration block supports the following arguments: -* `unused_access` - (Optional) A block that specifies the configuration of an unused access analyzer for an AWS organization or account. [Documented below](#unused_access-argument-reference) +* `unused_access_age` - (Optional) Specified access age in days for which to generate findings for unused access. +* `analysis_rule` - (Optional) Information about analysis rules for the analyzer. Analysis rules determine which entities will generate findings based on the criteria you define when you create the rule. See [`analysis_rule` Block for Unused Access Analyzer](#analysis_rule-block-for-unused-access-analyzer) for details. -### `unused_access` Argument Reference +### `analysis_rule` Block for Unused Access Analyzer -* `unused_access_age` - (Optional) The specified access age in days for which to generate findings for unused access. -* `analysis_rule` - (Optional) A block for analysis rules. [Documented below](#analysis_rule-argument-reference) +The `analysis_rule` configuration block for unused access analyzer supports the following arguments: -### `analysis_rule` Argument Reference +* `exclusion` - (Optional) List of rules for the analyzer containing criteria to exclude from analysis. Entities that meet the rule criteria will not generate findings. See [`exclusion` Block](#exclusion-block) for details. -* `exclusion` - (Optional) A block for the analyzer rules containing criteria to exclude from analysis. [Documented below](#exclusion-argument-reference) +### `exclusion` Block -#### `exclusion` Argument Reference +The `exclusion` configuration block supports the following arguments: -* `account_ids` - (Optional) A list of account IDs to exclude from the analysis. -* `resource_tags` - (Optional) A list of key-value pairs for resource tags to exclude from the analysis. +* `account_ids` - (Optional) List of AWS account IDs to apply to the analysis rule criteria. The accounts cannot include the organization analyzer owner account. Account IDs can only be applied to the analysis rule criteria for organization-level analyzers. +* `resource_tags` - (Optional) List of key-value pairs for resource tags to exclude from the analysis. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Analyzer. -* `id` - Analyzer name. +* `id` - Name of the analyzer. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -162,4 +253,4 @@ Using `terraform import`, import Access Analyzer Analyzers using the `analyzer_n % terraform import aws_accessanalyzer_analyzer.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/accessanalyzer_archive_rule.html.markdown b/website/docs/cdktf/python/r/accessanalyzer_archive_rule.html.markdown index 84ce8a26c0be..c21d436e6002 100644 --- a/website/docs/cdktf/python/r/accessanalyzer_archive_rule.html.markdown +++ b/website/docs/cdktf/python/r/accessanalyzer_archive_rule.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `analyzer_name` - (Required) Analyzer name. * `filter` - (Required) Filter criteria for the archive rule. See [Filter](#filter) for more details. * `rule_name` - (Required) Rule name. @@ -94,4 +95,4 @@ Using `terraform import`, import AccessAnalyzer ArchiveRule using the `analyzer_ % terraform import aws_accessanalyzer_archive_rule.example example-analyzer/example-rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acm_certificate.html.markdown b/website/docs/cdktf/python/r/acm_certificate.html.markdown index e1aafb3a6aba..41144a01adca 100644 --- a/website/docs/cdktf/python/r/acm_certificate.html.markdown +++ b/website/docs/cdktf/python/r/acm_certificate.html.markdown @@ -187,6 +187,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * Creating an Amazon issued certificate * `domain_name` - (Required) Domain name for which the certificate should be issued * `subject_alternative_names` - (Optional) Set of domains that should be SANs in the issued certificate. To remove all elements of a previously configured list, set this value equal to an empty list (`[]`) or use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html) to trigger recreation. @@ -214,6 +215,7 @@ This resource supports the following arguments: Supported nested arguments for the `options` configuration block: * `certificate_transparency_logging_preference` - (Optional) Whether certificate details should be added to a certificate transparency log. Valid values are `ENABLED` or `DISABLED`. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency for more details. +* `export` - (Optional) Whether the certificate can be exported. Valid values are `ENABLED` or `DISABLED` (default). **Note** Issuing an exportable certificate is subject to additional charges. See [AWS Certificate Manager pricing](https://aws.amazon.com/certificate-manager/pricing/) for more details. ## validation_option Configuration Block @@ -258,6 +260,27 @@ Renewal summary objects export the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acm_certificate.example + identity = { + "arn" = "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a" + } +} + +resource "aws_acm_certificate" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the certificate. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import certificates using their ARN. For example: ```python @@ -272,13 +295,13 @@ from imports.aws.acm_certificate import AcmCertificate class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - AcmCertificate.generate_config_for_import(self, "cert", "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a") + AcmCertificate.generate_config_for_import(self, "example", "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a") ``` Using `terraform import`, import certificates using their ARN. For example: ```console -% terraform import aws_acm_certificate.cert arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a +% terraform import aws_acm_certificate.example arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acm_certificate_validation.html.markdown b/website/docs/cdktf/python/r/acm_certificate_validation.html.markdown index edad40e3e09e..2fa0141c7da2 100644 --- a/website/docs/cdktf/python/r/acm_certificate_validation.html.markdown +++ b/website/docs/cdktf/python/r/acm_certificate_validation.html.markdown @@ -187,6 +187,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_arn` - (Required) ARN of the certificate that is being validated. * `validation_record_fqdns` - (Optional) List of FQDNs that implement the validation. Only valid for DNS validation method ACM certificates. If this is set, the resource can implement additional sanity checks and has an explicit dependency on the resource that is implementing the validation @@ -202,4 +203,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `75m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acmpca_certificate.html.markdown b/website/docs/cdktf/python/r/acmpca_certificate.html.markdown index d68ec0852772..7ce9c792450f 100644 --- a/website/docs/cdktf/python/r/acmpca_certificate.html.markdown +++ b/website/docs/cdktf/python/r/acmpca_certificate.html.markdown @@ -75,6 +75,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_authority_arn` - (Required) ARN of the certificate authority. * `certificate_signing_request` - (Required) Certificate Signing Request in PEM format. * `signing_algorithm` - (Required) Algorithm to use to sign certificate requests. Valid values: `SHA256WITHRSA`, `SHA256WITHECDSA`, `SHA384WITHRSA`, `SHA384WITHECDSA`, `SHA512WITHRSA`, `SHA512WITHECDSA`. @@ -98,6 +99,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_certificate.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245" + } +} + +resource "aws_acmpca_certificate" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ACM PCA Certificates using their ARN. For example: ```python @@ -121,4 +143,4 @@ Using `terraform import`, import ACM PCA Certificates using their ARN. For examp % terraform import aws_acmpca_certificate.cert arn:aws:acm-pca:eu-west-1:675225743824:certificate-authority/08319ede-83g9-1400-8f21-c7d12b2b6edb/certificate/a4e9c2aa4bcfab625g1b9136464cd3a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acmpca_certificate_authority.html.markdown b/website/docs/cdktf/python/r/acmpca_certificate_authority.html.markdown index e4cc97fef17a..8bb5a2a39915 100644 --- a/website/docs/cdktf/python/r/acmpca_certificate_authority.html.markdown +++ b/website/docs/cdktf/python/r/acmpca_certificate_authority.html.markdown @@ -135,6 +135,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_authority_configuration` - (Required) Nested argument containing algorithms and certificate subject information. Defined below. * `enabled` - (Optional) Whether the certificate authority is enabled or disabled. Defaults to `true`. Can only be disabled if the CA is in an `ACTIVE` state. * `revocation_configuration` - (Optional) Nested argument containing revocation configuration. Defined below. @@ -209,6 +210,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_certificate_authority.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_acmpca_certificate_authority" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate authority. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_acmpca_certificate_authority` using the certificate authority ARN. For example: ```python @@ -232,4 +254,4 @@ Using `terraform import`, import `aws_acmpca_certificate_authority` using the ce % terraform import aws_acmpca_certificate_authority.example arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acmpca_certificate_authority_certificate.html.markdown b/website/docs/cdktf/python/r/acmpca_certificate_authority_certificate.html.markdown index 6cd18c242545..a7b117f75004 100644 --- a/website/docs/cdktf/python/r/acmpca_certificate_authority_certificate.html.markdown +++ b/website/docs/cdktf/python/r/acmpca_certificate_authority_certificate.html.markdown @@ -138,6 +138,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate` - (Required) PEM-encoded certificate for the Certificate Authority. * `certificate_authority_arn` - (Required) ARN of the Certificate Authority. * `certificate_chain` - (Optional) PEM-encoded certificate chain that includes any intermediate certificates and chains up to root CA. Required for subordinate Certificate Authorities. Not allowed for root Certificate Authorities. @@ -146,4 +147,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acmpca_permission.html.markdown b/website/docs/cdktf/python/r/acmpca_permission.html.markdown index fc6b92cbe36f..212f92295ad5 100644 --- a/website/docs/cdktf/python/r/acmpca_permission.html.markdown +++ b/website/docs/cdktf/python/r/acmpca_permission.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_authority_arn` - (Required) ARN of the CA that grants the permissions. * `actions` - (Required) Actions that the specified AWS service principal can use. These include `IssueCertificate`, `GetCertificate`, and `ListPermissions`. Note that in order for ACM to automatically rotate certificates issued by a PCA, it must be granted permission on all 3 actions, as per the example above. * `principal` - (Required) AWS service or identity that receives the permission. At this time, the only valid principal is `acm.amazonaws.com`. @@ -61,4 +62,4 @@ This resource exports the following attributes in addition to the arguments abov * `policy` - IAM policy that is associated with the permission. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/acmpca_policy.html.markdown b/website/docs/cdktf/python/r/acmpca_policy.html.markdown index 290e9f9195c3..fd017e845bbb 100644 --- a/website/docs/cdktf/python/r/acmpca_policy.html.markdown +++ b/website/docs/cdktf/python/r/acmpca_policy.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) ARN of the private CA to associate with the policy. * `policy` - (Required) JSON-formatted IAM policy to attach to the specified private CA resource. @@ -81,6 +82,27 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_policy.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_acmpca_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate authority. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_acmpca_policy` using the `resource_arn` value. For example: ```python @@ -104,4 +126,4 @@ Using `terraform import`, import `aws_acmpca_policy` using the `resource_arn` va % terraform import aws_acmpca_policy.example arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ami.html.markdown b/website/docs/cdktf/python/r/ami.html.markdown index e4dfe32e236e..5255f343bde8 100644 --- a/website/docs/cdktf/python/r/ami.html.markdown +++ b/website/docs/cdktf/python/r/ami.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Region-unique name for the AMI. * `boot_mode` - (Optional) Boot mode of the AMI. For more information, see [Boot modes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) in the Amazon Elastic Compute Cloud User Guide. * `deprecation_time` - (Optional) Date and time to deprecate the AMI. If you specified a value for seconds, Amazon EC2 rounds the seconds to the nearest minute. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -161,4 +162,4 @@ Using `terraform import`, import `aws_ami` using the ID of the AMI. For example: % terraform import aws_ami.example ami-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ami_copy.html.markdown b/website/docs/cdktf/python/r/ami_copy.html.markdown index a3bb0dfbd954..752097ee895d 100644 --- a/website/docs/cdktf/python/r/ami_copy.html.markdown +++ b/website/docs/cdktf/python/r/ami_copy.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Region-unique name for the AMI. * `source_ami_id` - (Required) Id of the AMI to copy. This id must be valid in the region given by `source_ami_region`. @@ -82,4 +83,4 @@ configuration. * `update` - (Default `40m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ami_from_instance.html.markdown b/website/docs/cdktf/python/r/ami_from_instance.html.markdown index d286288b11c7..57cce226b620 100644 --- a/website/docs/cdktf/python/r/ami_from_instance.html.markdown +++ b/website/docs/cdktf/python/r/ami_from_instance.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Region-unique name for the AMI. * `source_instance_id` - (Required) ID of the instance to use as the basis of the AMI. * `snapshot_without_reboot` - (Optional) Boolean that overrides the behavior of stopping @@ -79,4 +80,4 @@ This resource also exports a full set of attributes corresponding to the argumen [`aws_ami`](/docs/providers/aws/r/ami.html) resource, allowing the properties of the created AMI to be used elsewhere in the configuration. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ami_launch_permission.html.markdown b/website/docs/cdktf/python/r/ami_launch_permission.html.markdown index ed11a8b4637b..273284cde179 100644 --- a/website/docs/cdktf/python/r/ami_launch_permission.html.markdown +++ b/website/docs/cdktf/python/r/ami_launch_permission.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) AWS account ID for the launch permission. * `group` - (Optional) Name of the group for the launch permission. Valid values: `"all"`. * `image_id` - (Required) ID of the AMI. @@ -117,4 +118,4 @@ Using `terraform import`, import AMI Launch Permissions using `[ACCOUNT-ID|GROUP % terraform import aws_ami_launch_permission.example 123456789012/ami-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/amplify_app.html.markdown b/website/docs/cdktf/python/r/amplify_app.html.markdown index 6f93c6489fbd..c5e88e074e44 100644 --- a/website/docs/cdktf/python/r/amplify_app.html.markdown +++ b/website/docs/cdktf/python/r/amplify_app.html.markdown @@ -181,10 +181,33 @@ class MyConvertedCode(TerraformStack): ) ``` +### Job Config + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.amplify_app import AmplifyApp +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AmplifyApp(self, "example", + job_config=AmplifyAppJobConfig( + build_compute_type="STANDARD_8GB" + ), + name="example" + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for an Amplify app. * `access_token` - (Optional) Personal access token for a third-party source control system for an Amplify app. This token must have write access to the relevant repo to create a webhook and a read-only deploy key for the Amplify project. The token is not stored, so after applying this attribute can be removed and the setup token deleted. * `auto_branch_creation_config` - (Optional) Automated branch creation configuration for an Amplify app. See [`auto_branch_creation_config` Block](#auto_branch_creation_config-block) for details. @@ -202,6 +225,7 @@ This resource supports the following arguments: * `enable_branch_auto_deletion` - (Optional) Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository. * `environment_variables` - (Optional) Environment variables map for an Amplify app. * `iam_service_role_arn` - (Optional) AWS Identity and Access Management (IAM) service role for an Amplify app. +* `job_config` - (Optional) Used to configure the [Amplify Application build instance compute type](https://docs.aws.amazon.com/amplify/latest/APIReference/API_JobConfig.html#amplify-Type-JobConfig-buildComputeType). See [`job_config` Block](#job_config-block) for details. * `oauth_token` - (Optional) OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key. The OAuth token is not stored. * `platform` - (Optional) Platform or framework for an Amplify app. Valid values: `WEB`, `WEB_COMPUTE`. Default value: `WEB`. * `repository` - (Optional) Repository for an Amplify app. @@ -237,6 +261,12 @@ The `custom_rule` configuration block supports the following arguments: * `status` - (Optional) Status code for a URL rewrite or redirect rule. Valid values: `200`, `301`, `302`, `404`, `404-200`. * `target` - (Required) Target pattern for a URL rewrite or redirect rule. +### `job_config` Block + +The `job_config` configuration block supports the following arguments: + +* `build_compute_type` - (Optional) Size of the build instance. Valid values: `STANDARD_8GB`, `LARGE_16GB`, and `XLARGE_72GB`. Default: `STANDARD_8GB`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -281,4 +311,4 @@ Using `terraform import`, import Amplify App using Amplify App ID (appId). For e App ID can be obtained from App ARN (e.g., `arn:aws:amplify:us-east-1:12345678:apps/d2ypk4k47z8u6`). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/amplify_backend_environment.html.markdown b/website/docs/cdktf/python/r/amplify_backend_environment.html.markdown index ed02a722229c..0f6a0e9661f2 100644 --- a/website/docs/cdktf/python/r/amplify_backend_environment.html.markdown +++ b/website/docs/cdktf/python/r/amplify_backend_environment.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_id` - (Required) Unique ID for an Amplify app. * `environment_name` - (Required) Name for the backend environment. * `deployment_artifacts` - (Optional) Name of deployment artifacts. @@ -81,4 +82,4 @@ Using `terraform import`, import Amplify backend environment using `app_id` and % terraform import aws_amplify_backend_environment.example d2ypk4k47z8u6/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/amplify_branch.html.markdown b/website/docs/cdktf/python/r/amplify_branch.html.markdown index ba288b0addf3..cbd51dcf7ed2 100644 --- a/website/docs/cdktf/python/r/amplify_branch.html.markdown +++ b/website/docs/cdktf/python/r/amplify_branch.html.markdown @@ -169,6 +169,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_id` - (Required) Unique ID for an Amplify app. * `branch_name` - (Required) Name for the branch. * `backend_environment_arn` - (Optional) ARN for a backend environment that is part of an Amplify app. @@ -180,6 +181,7 @@ This resource supports the following arguments: * `enable_notification` - (Optional) Enables notifications for the branch. * `enable_performance_mode` - (Optional) Enables performance mode for the branch. * `enable_pull_request_preview` - (Optional) Enables pull request previews for this branch. +* `enable_skew_protection` - (Optional) Enables skew protection for the branch. * `environment_variables` - (Optional) Environment variables for the branch. * `framework` - (Optional) Framework for the branch. * `pull_request_environment_name` - (Optional) Amplify environment name for the pull request. @@ -223,4 +225,4 @@ Using `terraform import`, import Amplify branch using `app_id` and `branch_name` % terraform import aws_amplify_branch.master d2ypk4k47z8u6/master ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/amplify_domain_association.html.markdown b/website/docs/cdktf/python/r/amplify_domain_association.html.markdown index b6689d821cdc..8bcc5454db16 100644 --- a/website/docs/cdktf/python/r/amplify_domain_association.html.markdown +++ b/website/docs/cdktf/python/r/amplify_domain_association.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_id` - (Required) Unique ID for an Amplify app. * `certificate_settings` - (Optional) The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you. * `domain_name` - (Required) Domain name for the domain association. @@ -117,4 +118,4 @@ Using `terraform import`, import Amplify domain association using `app_id` and ` % terraform import aws_amplify_domain_association.app d2ypk4k47z8u6/example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/amplify_webhook.html.markdown b/website/docs/cdktf/python/r/amplify_webhook.html.markdown index 362c2b580d8f..474374f94fe4 100644 --- a/website/docs/cdktf/python/r/amplify_webhook.html.markdown +++ b/website/docs/cdktf/python/r/amplify_webhook.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_id` - (Required) Unique ID for an Amplify app. * `branch_name` - (Required) Name for a branch that is part of the Amplify app. * `description` - (Optional) Description for a webhook. @@ -84,4 +85,4 @@ Using `terraform import`, import Amplify webhook using a webhook ID. For example % terraform import aws_amplify_webhook.master a26b22a0-748b-4b57-b9a0-ae7e601fe4b1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_account.html.markdown b/website/docs/cdktf/python/r/api_gateway_account.html.markdown index 5785c35dc886..71de427338f3 100644 --- a/website/docs/cdktf/python/r/api_gateway_account.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_account.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cloudwatch_role_arn` - (Optional) ARN of an IAM role for CloudWatch (to allow logging & monitoring). See more [in AWS Docs](https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-stage-settings.html#how-to-stage-settings-console). Logging & monitoring can be enabled/disabled and otherwise tuned on the API Gateway Stage level. * `reset_on_delete` - (Optional) If `true`, destroying the resource will reset account settings to default, otherwise account settings are not modified. Defaults to `false`. @@ -94,7 +95,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway Accounts using the word `api-gateway-account`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway Accounts using the account ID. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -108,13 +109,13 @@ from imports.aws.api_gateway_account import ApiGatewayAccount class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - ApiGatewayAccount.generate_config_for_import(self, "demo", "api-gateway-account") + ApiGatewayAccount.generate_config_for_import(self, "demo", "123456789012") ``` -Using `terraform import`, import API Gateway Accounts using the word `api-gateway-account`. For example: +Using `terraform import`, import API Gateway Accounts using the account ID. For example: ```console -% terraform import aws_api_gateway_account.demo api-gateway-account +% terraform import aws_api_gateway_account.demo 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_api_key.html.markdown b/website/docs/cdktf/python/r/api_gateway_api_key.html.markdown index 12933789b5c8..ecce7306b26e 100644 --- a/website/docs/cdktf/python/r/api_gateway_api_key.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_api_key.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the API key. * `customer_id` - (Optional) An Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace. * `description` - (Optional) API key description. Defaults to "Managed by Terraform". @@ -79,4 +80,4 @@ Using `terraform import`, import API Gateway Keys using the `id`. For example: % terraform import aws_api_gateway_api_key.example 8bklk8bl1k3sB38D9B3l0enyWT8c09B30lkq0blk ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_authorizer.html.markdown b/website/docs/cdktf/python/r/api_gateway_authorizer.html.markdown index dee0c9e90cec..a8316d12d9dc 100644 --- a/website/docs/cdktf/python/r/api_gateway_authorizer.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_authorizer.html.markdown @@ -104,6 +104,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizer_uri` - (Optional, required for type `TOKEN`/`REQUEST`) Authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`, e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations` * `name` - (Required) Name of the authorizer @@ -147,4 +148,4 @@ Using `terraform import`, import AWS API Gateway Authorizer using the `REST-API- % terraform import aws_api_gateway_authorizer.authorizer 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_base_path_mapping.html.markdown b/website/docs/cdktf/python/r/api_gateway_base_path_mapping.html.markdown index 702a803c0cb1..d23dbe80ef41 100644 --- a/website/docs/cdktf/python/r/api_gateway_base_path_mapping.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_base_path_mapping.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) Already-registered domain name to connect the API to. * `api_id` - (Required) ID of the API to connect. * `stage_name` - (Optional) Name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path. @@ -147,4 +148,4 @@ For a non-root `base_path` and a private custom domain name: % terraform import aws_api_gateway_base_path_mapping.example api.internal.example.com/base-path/abcde12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_client_certificate.html.markdown b/website/docs/cdktf/python/r/api_gateway_client_certificate.html.markdown index 2fbecf171850..1f8112b8f656 100644 --- a/website/docs/cdktf/python/r/api_gateway_client_certificate.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_client_certificate.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the client certificate. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -74,4 +75,4 @@ Using `terraform import`, import API Gateway Client Certificates using the id. F % terraform import aws_api_gateway_client_certificate.demo ab1cqe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_deployment.html.markdown b/website/docs/cdktf/python/r/api_gateway_deployment.html.markdown index 6b241fc4cdc9..76ee4ba50028 100644 --- a/website/docs/cdktf/python/r/api_gateway_deployment.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_deployment.html.markdown @@ -17,8 +17,6 @@ To properly capture all REST API configuration in a deployment, this resource mu * For REST APIs that are configured via OpenAPI specification ([`aws_api_gateway_rest_api` resource](api_gateway_rest_api.html) `body` argument), no special dependency setup is needed beyond referencing the `id` attribute of that resource unless additional Terraform resources have further customized the REST API. * When the REST API configuration involves other Terraform resources ([`aws_api_gateway_integration` resource](api_gateway_integration.html), etc.), the dependency setup can be done with implicit resource references in the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html). The `triggers` argument should be preferred over `depends_on`, since `depends_on` can only capture dependency ordering and will not cause the resource to recreate (redeploy the REST API) with upstream configuration changes. -!> **WARNING:** We recommend using the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead of managing an API Gateway Stage via the `stage_name` argument of this resource. When this resource is recreated (REST API redeployment) with the `stage_name` configured, the stage is deleted and recreated. This will cause a temporary service interruption, increase Terraform plan differences, and can require a second Terraform apply to recreate any downstream stage configuration such as associated `aws_api_method_settings` resources. - ~> **NOTE:** Enable the [resource `lifecycle` configuration block `create_before_destroy` argument](https://www.terraform.io/language/meta-arguments/lifecycle#create_before_destroy) in this resource configuration to properly order redeployments in Terraform. Without enabling `create_before_destroy`, API Gateway can return errors such as `BadRequestException: Active stages pointing to this deployment must be moved or deleted` on recreation. ## Example Usage @@ -145,35 +143,17 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `canary_settings` - (Optional, **Deprecated** Use an explicit [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead) Input configuration for the canary deployment when the deployment is a canary release deployment. - See [`canary_settings](#canary_settings-argument-reference) below. - Has no effect when `stage_name` is not set. -* `description` - (Optional) Description of the deployment +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the deployment. * `rest_api_id` - (Required) REST API identifier. -* `stage_description` - (Optional, **Deprecated** Use an explicit [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead) Description to set on the stage managed by the `stage_name` argument. - Has no effect when `stage_name` is not set. -* `stage_name` - (Optional, **Deprecated** Use an explicit [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead) Name of the stage to create with this deployment. - If the specified stage already exists, it will be updated to point to the new deployment. - We recommend using the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead to manage stages. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`-replace` option](https://developer.hashicorp.com/terraform/cli/commands/plan#replace-address) with `terraform plan` or `terraform apply`. -* `variables` - (Optional) Map to set on the stage managed by the `stage_name` argument. - -### `canary_settings` Argument Reference - -* `percent_traffic` - Percentage (0.0-100.0) of traffic routed to the canary deployment. -* `stage_variable_overrides` - Stage variable overrides used for the canary release deployment. They can override existing stage variables or add new stage variables for the canary release deployment. These stage variables are represented as a string-to-string map between stage variable names and their values. -* `use_stage_cache` - Boolean flag to indicate whether the canary release deployment uses the stage cache or not. +* `variables` - (Optional) Map to set on the related stage. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - ID of the deployment -* `invoke_url` - **DEPRECATED: Use the `aws_api_gateway_stage` resource instead.** URL to invoke the API pointing to the stage, - e.g., `https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/prod` -* `execution_arn` - **DEPRECATED: Use the `aws_api_gateway_stage` resource instead.** Execution ARN to be used in [`lambda_permission`](/docs/providers/aws/r/lambda_permission.html)'s `source_arn` - when allowing API Gateway to invoke a Lambda function, - e.g., `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j/prod` * `created_date` - Creation date of the deployment ## Import @@ -201,8 +181,8 @@ Using `terraform import`, import `aws_api_gateway_deployment` using `REST-API-ID % terraform import aws_api_gateway_deployment.example aabbccddee/1122334 ``` -The `stage_name`, `stage_description`, and `variables` arguments cannot be imported. Use the [`aws_api_gateway_stage` resource](api_gateway_stage.html) to import and manage stages. +The `variables` arguments cannot be imported. Use the [`aws_api_gateway_stage` resource](api_gateway_stage.html) to import and manage stages. The `triggers` argument cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_documentation_part.html.markdown b/website/docs/cdktf/python/r/api_gateway_documentation_part.html.markdown index 14178965b2fa..29c86579fbb9 100644 --- a/website/docs/cdktf/python/r/api_gateway_documentation_part.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_documentation_part.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `location` - (Required) Location of the targeted API entity of the to-be-created documentation part. See below. * `properties` - (Required) Content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., "{ \"description\": \"The API does ...\" }". Only Swagger-compliant key-value pairs can be exported and, hence, published. * `rest_api_id` - (Required) ID of the associated Rest API @@ -96,4 +97,4 @@ Using `terraform import`, import API Gateway documentation_parts using `REST-API % terraform import aws_api_gateway_documentation_part.example 5i4e1ko720/3oyy3t ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_documentation_version.html.markdown b/website/docs/cdktf/python/r/api_gateway_documentation_version.html.markdown index 789cee123588..7db439161f70 100644 --- a/website/docs/cdktf/python/r/api_gateway_documentation_version.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_documentation_version.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `version` - (Required) Version identifier of the API documentation snapshot. * `rest_api_id` - (Required) ID of the associated Rest API * `description` - (Optional) Description of the API documentation version. @@ -89,4 +90,4 @@ Using `terraform import`, import API Gateway documentation versions using `REST- % terraform import aws_api_gateway_documentation_version.example 5i4e1ko720/example-version ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_domain_name.html.markdown b/website/docs/cdktf/python/r/api_gateway_domain_name.html.markdown index 863d27df627f..19310237066c 100644 --- a/website/docs/cdktf/python/r/api_gateway_domain_name.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_domain_name.html.markdown @@ -198,6 +198,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) Fully-qualified domain name to register. * `endpoint_configuration` - (Optional) Configuration block defining API endpoint information including type. See below. * `mutual_tls_authentication` - (Optional) Mutual TLS authentication configuration for the domain name. See below. @@ -291,4 +292,4 @@ For a private custom domain name: % terraform import aws_api_gateway_domain_name.example dev.api.internal.example.com/abcde12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_domain_name_access_association.html.markdown b/website/docs/cdktf/python/r/api_gateway_domain_name_access_association.html.markdown index ca773272fbad..1bae38a38ef0 100644 --- a/website/docs/cdktf/python/r/api_gateway_domain_name_access_association.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_domain_name_access_association.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_association_source` - (Required) The identifier of the domain name access association source. For a `VPCE`, the value is the VPC endpoint ID. * `access_association_source_type` - (Required) The type of the domain name access association source. Valid values are `VPCE`. * `domain_name_arn` - (Required) The ARN of the domain name. @@ -52,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_api_gateway_domain_name_access_association.example + identity = { + "arn" = "arn:aws:apigateway:us-east-1::/domainnames/example.com/accessassociation" + } +} + +resource "aws_api_gateway_domain_name_access_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the API Gateway domain name access association. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway domain name acces associations using their `arn`. For example: ```python @@ -75,4 +97,4 @@ Using `terraform import`, import API Gateway domain name acces associations as u % terraform import aws_api_gateway_domain_name_access_association.example arn:aws:apigateway:us-west-2:123456789012:/domainnameaccessassociations/domainname/12qmzgp2.9m7ilski.test+hykg7a12e7/vpcesource/vpce-05de3f8f82740a748 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_gateway_response.html.markdown b/website/docs/cdktf/python/r/api_gateway_gateway_response.html.markdown index e030cdee8b6c..a6d661db9616 100644 --- a/website/docs/cdktf/python/r/api_gateway_gateway_response.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_gateway_response.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be managed. See the [AWS Documentation](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) for supported values. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) String identifier of the associated REST API. -* `response_type` - (Required) Response type of the associated GatewayResponse. +* `response_type` - (Required) Response type of the associated GatewayResponse. See the [AWS Documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html) for supported values. * `status_code` - (Optional) HTTP status code of the Gateway Response. * `response_templates` - (Optional) Map of templates used to transform the response body. * `response_parameters` - (Optional) Map of parameters (paths, query strings and headers) of the Gateway Response. @@ -82,4 +83,4 @@ Using `terraform import`, import `aws_api_gateway_gateway_response` using `REST- % terraform import aws_api_gateway_gateway_response.example 12345abcde/UNAUTHORIZED ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_integration.html.markdown b/website/docs/cdktf/python/r/api_gateway_integration.html.markdown index 4478e3b4a9a8..e2df214cd049 100644 --- a/website/docs/cdktf/python/r/api_gateway_integration.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_integration.html.markdown @@ -227,6 +227,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the associated REST API. * `resource_id` - (Required) API resource ID. * `http_method` - (Required) HTTP method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTION`, `ANY`) @@ -288,4 +289,4 @@ Using `terraform import`, import `aws_api_gateway_integration` using `REST-API-I % terraform import aws_api_gateway_integration.example 12345abcde/67890fghij/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_integration_response.html.markdown b/website/docs/cdktf/python/r/api_gateway_integration_response.html.markdown index 3394df3df211..6eb0558804b1 100644 --- a/website/docs/cdktf/python/r/api_gateway_integration_response.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_integration_response.html.markdown @@ -83,6 +83,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content_handling` - (Optional) How to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the response payload will be passed through from the integration response to the method response without modification. * `response_parameters` - (Optional) Map of response parameters that can be read from the backend response. For example: `response_parameters = { "method.response.header.X-Some-Header" = "integration.response.header.X-Some-Other-Header" }`. * `response_templates` - (Optional) Map of templates used to transform the integration response body. @@ -117,4 +118,4 @@ Using `terraform import`, import `aws_api_gateway_integration_response` using `R % terraform import aws_api_gateway_integration_response.example 12345abcde/67890fghij/GET/200 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_method.html.markdown b/website/docs/cdktf/python/r/api_gateway_method.html.markdown index 47ecc2fd3962..3c5b77ff211b 100644 --- a/website/docs/cdktf/python/r/api_gateway_method.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_method.html.markdown @@ -105,6 +105,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the associated REST API * `resource_id` - (Required) API resource ID * `http_method` - (Required) HTTP Method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTIONS`, `ANY`) @@ -149,4 +150,4 @@ Using `terraform import`, import `aws_api_gateway_method` using `REST-API-ID/RES % terraform import aws_api_gateway_method.example 12345abcde/67890fghij/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_method_response.html.markdown b/website/docs/cdktf/python/r/api_gateway_method_response.html.markdown index cc1efdd6b2d3..fa5977ec5725 100644 --- a/website/docs/cdktf/python/r/api_gateway_method_response.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_method_response.html.markdown @@ -137,6 +137,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) The string identifier of the associated REST API. * `resource_id` - (Required) The Resource identifier for the method resource. * `http_method` - (Required) The HTTP verb of the method resource (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTIONS`, `ANY`). @@ -144,7 +145,7 @@ This resource supports the following arguments: * `response_models` - (Optional) A map specifying the model resources used for the response's content type. Response models are represented as a key/value map, with a content type as the key and a Model name as the value. * `response_parameters` - (Optional) A map specifying required or optional response parameters that API Gateway can send back to the caller. A key defines a method response header name and the associated value is a boolean flag indicating whether the method response parameter is required. The method response header names must match the pattern of `method.response.header.{name}`, where `name` is a valid and unique header name. - The response parameter names defined here are available in the integration response to be mapped from an integration response header expressed in `integration.response.header.{name}`, a static value enclosed within a pair of single quotes (e.g., '`application/json'`), or a JSON expression from the back-end response payload in the form of `integration.response.body.{JSON-expression}`, where `JSON-expression` is a valid JSON expression without the `$` prefix.) +The response parameter names defined here are available in the integration response to be mapped from an integration response header expressed in `integration.response.header.{name}`, a static value enclosed within a pair of single quotes (e.g., '`application/json'`), or a JSON expression from the back-end response payload in the form of `integration.response.body.{JSON-expression}`, where `JSON-expression` is a valid JSON expression without the `$` prefix.) ## Attribute Reference @@ -175,4 +176,4 @@ Using `terraform import`, import `aws_api_gateway_method_response` using `REST-A % terraform import aws_api_gateway_method_response.example 12345abcde/67890fghij/GET/200 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_method_settings.html.markdown b/website/docs/cdktf/python/r/api_gateway_method_settings.html.markdown index 9b5b864681ac..374bc294e788 100644 --- a/website/docs/cdktf/python/r/api_gateway_method_settings.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_method_settings.html.markdown @@ -198,6 +198,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the REST API * `stage_name` - (Required) Name of the stage * `method_path` - (Required) Method path defined as `{resource_path}/{http_method}` for an individual method override, or `*/*` for overriding all methods in the stage. Ensure to trim any leading forward slashes in the path (e.g., `trimprefix(aws_api_gateway_resource.example.path, "/")`). @@ -245,4 +246,4 @@ Using `terraform import`, import `aws_api_gateway_method_settings` using `REST-A % terraform import aws_api_gateway_method_settings.example 12345abcde/example/test/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_model.html.markdown b/website/docs/cdktf/python/r/api_gateway_model.html.markdown index 51c2aabb4544..3f5eb91d82b5 100644 --- a/website/docs/cdktf/python/r/api_gateway_model.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_model.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the associated REST API * `name` - (Required) Name of the model * `description` - (Optional) Description of the model @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_api_gateway_model` using `REST-API-ID/NAME % terraform import aws_api_gateway_model.example 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_request_validator.html.markdown b/website/docs/cdktf/python/r/api_gateway_request_validator.html.markdown index 2643757683fc..fd266ad584eb 100644 --- a/website/docs/cdktf/python/r/api_gateway_request_validator.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_request_validator.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the request validator * `rest_api_id` - (Required) ID of the associated Rest API * `validate_request_body` - (Optional) Boolean whether to validate request body. Defaults to `false`. @@ -74,4 +75,4 @@ Using `terraform import`, import `aws_api_gateway_request_validator` using `REST % terraform import aws_api_gateway_request_validator.example 12345abcde/67890fghij ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_resource.html.markdown b/website/docs/cdktf/python/r/api_gateway_resource.html.markdown index 3039424cbb2a..04a5cc865725 100644 --- a/website/docs/cdktf/python/r/api_gateway_resource.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_resource.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the associated REST API * `parent_id` - (Required) ID of the parent API resource * `path_part` - (Required) Last path segment of this API resource. @@ -78,4 +79,4 @@ Using `terraform import`, import `aws_api_gateway_resource` using `REST-API-ID/R % terraform import aws_api_gateway_resource.example 12345abcde/67890fghij ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_rest_api.html.markdown b/website/docs/cdktf/python/r/api_gateway_rest_api.html.markdown index a12e3b1cb058..0f50a3bb15fe 100644 --- a/website/docs/cdktf/python/r/api_gateway_rest_api.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_rest_api.html.markdown @@ -236,6 +236,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_key_source` - (Optional) Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `binary_media_types` - (Optional) List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `body` - (Optional) OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `aws_api_gateway_deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). @@ -307,4 +308,4 @@ Using `terraform import`, import `aws_api_gateway_rest_api` using the REST API I ~> **NOTE:** Resource import does not currently support the `body` attribute. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_rest_api_policy.html.markdown b/website/docs/cdktf/python/r/api_gateway_rest_api_policy.html.markdown index 5a8f9b4348ee..b396bfd7a95c 100644 --- a/website/docs/cdktf/python/r/api_gateway_rest_api_policy.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_rest_api_policy.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the REST API. * `policy` - (Required) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) @@ -102,4 +103,4 @@ Using `terraform import`, import `aws_api_gateway_rest_api_policy` using the RES % terraform import aws_api_gateway_rest_api_policy.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_rest_api_put.markdown b/website/docs/cdktf/python/r/api_gateway_rest_api_put.markdown index 44110258bd50..1e39f6fa59e8 100644 --- a/website/docs/cdktf/python/r/api_gateway_rest_api_put.markdown +++ b/website/docs/cdktf/python/r/api_gateway_rest_api_put.markdown @@ -144,13 +144,15 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `body` - (Required) PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 6MB. * `rest_api_id` - (Required) Identifier of the associated REST API. The following arguments are optional: +* `region` – (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fail_on_warnings` - (Optional) Whether to rollback the API update when a warning is encountered. The default value is `false`. * `parameters` - (Optional) Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, use `ignore = "documentation"`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`-replace` option](https://developer.hashicorp.com/terraform/cli/commands/plan#replace-address) with `terraform plan` or `terraform apply`. @@ -190,4 +192,4 @@ Using `terraform import`, import API Gateway REST API Put using the `rest_api_id % terraform import aws_api_gateway_rest_api_put.example import-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_stage.html.markdown b/website/docs/cdktf/python/r/api_gateway_stage.html.markdown index 86c2164ac961..7805db66e523 100644 --- a/website/docs/cdktf/python/r/api_gateway_stage.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_stage.html.markdown @@ -120,6 +120,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) ID of the associated REST API * `stage_name` - (Required) Name of the stage * `deployment_id` - (Required) ID of the deployment that the stage points to @@ -129,8 +130,8 @@ This resource supports the following arguments: * `canary_settings` - (Optional) Configuration settings of a canary deployment. See [Canary Settings](#canary-settings) below. * `client_certificate_id` - (Optional) Identifier of a client certificate for the stage. * `description` - (Optional) Description of the stage. -* `documentation_version` - (Optional) Version of the associated API documentation -* `variables` - (Optional) Map that defines the stage variables +* `documentation_version` - (Optional) Version of the associated API documentation. +* `variables` - (Optional) Map that defines the stage variables. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `xray_tracing_enabled` - (Optional) Whether active tracing with X-ray is enabled. Defaults to `false`. @@ -186,4 +187,4 @@ Using `terraform import`, import `aws_api_gateway_stage` using `REST-API-ID/STAG % terraform import aws_api_gateway_stage.example 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_usage_plan.html.markdown b/website/docs/cdktf/python/r/api_gateway_usage_plan.html.markdown index 01848f3fe834..4dd01ca707b8 100644 --- a/website/docs/cdktf/python/r/api_gateway_usage_plan.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_usage_plan.html.markdown @@ -95,6 +95,7 @@ resource "aws_api_gateway_usage_plan" "example" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the usage plan. * `description` - (Optional) Description of a usage plan. * `api_stages` - (Optional) Associated [API stages](#api-stages-arguments) of the usage plan. @@ -165,4 +166,4 @@ Using `terraform import`, import AWS API Gateway Usage Plan using the `id`. For % terraform import aws_api_gateway_usage_plan.myusageplan ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_usage_plan_key.html.markdown b/website/docs/cdktf/python/r/api_gateway_usage_plan_key.html.markdown index 79a5b2358751..86c9d8dcb643 100644 --- a/website/docs/cdktf/python/r/api_gateway_usage_plan_key.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_usage_plan_key.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_id` - (Required) Identifier of the API key resource. * `key_type` - (Required) Type of the API key resource. Currently, the valid key type is API_KEY. * `usage_plan_id` - (Required) Id of the usage plan resource representing to associate the key to. @@ -94,4 +95,4 @@ Using `terraform import`, import AWS API Gateway Usage Plan Key using the `USAGE % terraform import aws_api_gateway_usage_plan_key.key 12345abcde/zzz ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_vpc_link.html.markdown b/website/docs/cdktf/python/r/api_gateway_vpc_link.html.markdown index 49a8838b13dd..8ce2b87097fc 100644 --- a/website/docs/cdktf/python/r/api_gateway_vpc_link.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_vpc_link.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name used to label and identify the VPC link. * `description` - (Optional) Description of the VPC link. * `target_arns` - (Required, ForceNew) List of network load balancer arns in the VPC targeted by the VPC link. Currently AWS only supports 1 target. @@ -89,4 +90,4 @@ Using `terraform import`, import API Gateway VPC Link using the `id`. For exampl % terraform import aws_api_gateway_vpc_link.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_api.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_api.html.markdown index 1787c2273a6a..e895804d1ffe 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_api.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_api.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the API. Must be less than or equal to 128 characters in length. * `protocol_type` - (Required) API protocol. Valid values: `HTTP`, `WEBSOCKET`. * `api_key_selection_expression` - (Optional) An [API key selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-apikey-selection-expressions). @@ -137,4 +138,4 @@ Using `terraform import`, import `aws_apigatewayv2_api` using the API identifier % terraform import aws_apigatewayv2_api.example aabbccddee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_api_mapping.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_api_mapping.html.markdown index 561fbb548c39..31fe7f9a39b6 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_api_mapping.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_api_mapping.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `domain_name` - (Required) Domain name. Use the [`aws_apigatewayv2_domain_name`](/docs/providers/aws/r/apigatewayv2_domain_name.html) resource to configure a domain name. * `stage` - (Required) API stage. Use the [`aws_apigatewayv2_stage`](/docs/providers/aws/r/apigatewayv2_stage.html) resource to configure an API stage. @@ -76,4 +77,4 @@ Using `terraform import`, import `aws_apigatewayv2_api_mapping` using the API ma % terraform import aws_apigatewayv2_api_mapping.example 1122334/ws-api.example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_authorizer.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_authorizer.html.markdown index 9e548d11f18a..bc1994d41983 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_authorizer.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_authorizer.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `authorizer_type` - (Required) Authorizer type. Valid values: `JWT`, `REQUEST`. Specify `REQUEST` for a Lambda function using incoming request parameters. @@ -131,4 +132,4 @@ Using `terraform import`, import `aws_apigatewayv2_authorizer` using the API ide % terraform import aws_apigatewayv2_authorizer.example aabbccddee/1122334 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_deployment.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_deployment.html.markdown index 3844bab60dc1..85212da811a3 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_deployment.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_deployment.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `description` - (Optional) Description for the deployment resource. Must be less than or equal to 1024 characters in length. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). @@ -122,4 +123,4 @@ Using `terraform import`, import `aws_apigatewayv2_deployment` using the API ide The `triggers` argument cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_domain_name.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_domain_name.html.markdown index 93a536a4ea1e..6df3169eda9a 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_domain_name.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_domain_name.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) Domain name. Must be between 1 and 512 characters in length. * `domain_name_configuration` - (Required) Domain name configuration. See below. * `mutual_tls_authentication` - (Optional) Mutual TLS authentication configuration for the domain name. @@ -148,4 +149,4 @@ Using `terraform import`, import `aws_apigatewayv2_domain_name` using the domain % terraform import aws_apigatewayv2_domain_name.example ws-api.example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_integration.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_integration.html.markdown index 317368ff0303..15ff78ac3de9 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_integration.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_integration.html.markdown @@ -147,6 +147,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `integration_type` - (Required) Integration type of an integration. Valid values: `AWS` (supported only for WebSocket APIs), `AWS_PROXY`, `HTTP` (supported only for WebSocket APIs), `HTTP_PROXY`, `MOCK` (supported only for WebSocket APIs). For an HTTP API private integration, use `HTTP_PROXY`. @@ -218,4 +219,4 @@ Using `terraform import`, import `aws_apigatewayv2_integration` using the API id -> **Note:** The API Gateway managed integration created as part of [_quick_create_](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-basic-concept.html#apigateway-definition-quick-create) cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_integration_response.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_integration_response.html.markdown index d146ac3ae21a..126d9102a379 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_integration_response.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_integration_response.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `integration_id` - (Required) Identifier of the [`aws_apigatewayv2_integration`](/docs/providers/aws/r/apigatewayv2_integration.html). * `integration_response_key` - (Required) Integration response key. @@ -78,4 +79,4 @@ Using `terraform import`, import `aws_apigatewayv2_integration_response` using t % terraform import aws_apigatewayv2_integration_response.example aabbccddee/1122334/998877 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_model.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_model.html.markdown index 2c26befea7e5..15faec4d81ed 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_model.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_model.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `content_type` - (Required) The content-type for the model, for example, `application/json`. Must be between 1 and 256 characters in length. * `name` - (Required) Name of the model. Must be alphanumeric. Must be between 1 and 128 characters in length. @@ -87,4 +88,4 @@ Using `terraform import`, import `aws_apigatewayv2_model` using the API identifi % terraform import aws_apigatewayv2_model.example aabbccddee/1122334 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_route.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_route.html.markdown index 0f75806b978b..2c2b40782082 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_route.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_route.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `route_key` - (Required) Route key for the route. For HTTP APIs, the route key can be either `$default`, or a combination of an HTTP method and resource path, for example, `GET /pets`. * `api_key_required` - (Optional) Boolean whether an API key is required for the route. Defaults to `false`. Supported only for WebSocket APIs. @@ -138,4 +139,4 @@ Using `terraform import`, import `aws_apigatewayv2_route` using the API identifi -> **Note:** The API Gateway managed route created as part of [_quick_create_](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-basic-concept.html#apigateway-definition-quick-create) cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_route_response.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_route_response.html.markdown index 7c481acef797..d3067b0fdbb1 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_route_response.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_route_response.html.markdown @@ -46,6 +46,7 @@ You can only define the $default route response for WebSocket APIs. You can use This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API identifier. * `route_id` - (Required) Identifier of the [`aws_apigatewayv2_route`](/docs/providers/aws/r/apigatewayv2_route.html). * `route_response_key` - (Required) Route response key. @@ -83,4 +84,4 @@ Using `terraform import`, import `aws_apigatewayv2_route_response` using the API % terraform import aws_apigatewayv2_route_response.example aabbccddee/1122334/998877 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_stage.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_stage.html.markdown index 589c3f947e9d..c70fae460aff 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_stage.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_stage.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_log_settings` - (Optional) Settings for logging access in this stage. Use the [`aws_api_gateway_account`](/docs/providers/aws/r/api_gateway_account.html) resource to configure [permissions for CloudWatch Logging](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html#set-up-access-logging-permissions). * `auto_deploy` - (Optional) Whether updates to an API automatically trigger a new deployment. Defaults to `false`. Applicable for HTTP APIs. @@ -122,4 +123,4 @@ Using `terraform import`, import `aws_apigatewayv2_stage` using the API identifi -> **Note:** The API Gateway managed stage created as part of [_quick_create_](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-basic-concept.html#apigateway-definition-quick-create) cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apigatewayv2_vpc_link.html.markdown b/website/docs/cdktf/python/r/apigatewayv2_vpc_link.html.markdown index c314aae90920..d47c4809ebba 100644 --- a/website/docs/cdktf/python/r/apigatewayv2_vpc_link.html.markdown +++ b/website/docs/cdktf/python/r/apigatewayv2_vpc_link.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the VPC Link. Must be between 1 and 128 characters in length. * `security_group_ids` - (Required) Security group IDs for the VPC Link. * `subnet_ids` - (Required) Subnet IDs for the VPC Link. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_apigatewayv2_vpc_link` using the VPC Link % terraform import aws_apigatewayv2_vpc_link.example aabbccddee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/app_cookie_stickiness_policy.html.markdown b/website/docs/cdktf/python/r/app_cookie_stickiness_policy.html.markdown index bfa97e5e4e03..e0b6782faa00 100644 --- a/website/docs/cdktf/python/r/app_cookie_stickiness_policy.html.markdown +++ b/website/docs/cdktf/python/r/app_cookie_stickiness_policy.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the stickiness policy. * `load_balancer` - (Required) Name of load balancer to which the policy should be attached. @@ -93,4 +94,4 @@ Using `terraform import`, import application cookie stickiness policies using th % terraform import aws_app_cookie_stickiness_policy.example my-elb:80:my-policy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appautoscaling_policy.html.markdown b/website/docs/cdktf/python/r/appautoscaling_policy.html.markdown index 4c86e5fb2f58..cd535df3162d 100644 --- a/website/docs/cdktf/python/r/appautoscaling_policy.html.markdown +++ b/website/docs/cdktf/python/r/appautoscaling_policy.html.markdown @@ -237,17 +237,133 @@ class MyConvertedCode(TerraformStack): ) ``` +### Predictive Scaling + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appautoscaling_policy import AppautoscalingPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppautoscalingPolicy(self, "example", + name="example-policy", + policy_type="PredictiveScaling", + predictive_scaling_policy_configuration=AppautoscalingPolicyPredictiveScalingPolicyConfiguration( + metric_specification=[AppautoscalingPolicyPredictiveScalingPolicyConfigurationMetricSpecification( + predefined_metric_pair_specification=AppautoscalingPolicyPredictiveScalingPolicyConfigurationMetricSpecificationPredefinedMetricPairSpecification( + predefined_metric_type="ECSServiceMemoryUtilization" + ), + target_value=Token.as_string(40) + ) + ] + ), + resource_id=Token.as_string(aws_appautoscaling_target_example.resource_id), + scalable_dimension=Token.as_string(aws_appautoscaling_target_example.scalable_dimension), + service_namespace=Token.as_string(aws_appautoscaling_target_example.service_namespace) + ) +``` + ## Argument Reference This resource supports the following arguments: * `name` - (Required) Name of the policy. Must be between 1 and 255 characters in length. -* `policy_type` - (Optional) Policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation. +* `policy_type` - (Optional) Policy type. Valid values are `StepScaling`, `TargetTrackingScaling`, and `PredictiveScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html), [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html), and [Predictive Scaling](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-predictive-scaling.html) documentation. +* `predictive_scaling_policy_configuration` - (Optional) Predictive scaling policy configuration, requires `policy_type = "PredictiveScaling"`. See supported fields below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `scalable_dimension` - (Required) Scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `service_namespace` - (Required) AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `step_scaling_policy_configuration` - (Optional) Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below. -* `target_tracking_scaling_policy_configuration` - (Optional) Target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below. +* `target_tracking_scaling_policy_configuration` - (Optional) Target tracking policy configuration, requires `policy_type = "TargetTrackingScaling"`. See supported fields below. + +### predictive_scaling_policy_configuration + +The `predictive_scaling_policy_configuration` configuration block supports the following arguments: + +* `max_capacity_breach_behavior` - (Optional) The behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity. Valid values are `HonorMaxCapacity` and `IncreaseMaxCapacity`. +* `max_capacity_buffer` - (Optional) Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. Required if the `max_capacity_breach_behavior` argument is set to `IncreaseMaxCapacity`, and cannot be used otherwise. +* `metric_specification` - (Required) Metrics and target utilization to use for predictive scaling. See supported fields below. +* `mode` - (Optional) Predictive scaling mode. Valid values are `ForecastOnly` and `ForecastAndScale`. +* `scheduling_buffer_time` - (Optional) Amount of time, in seconds, that the start time can be advanced. + +### predictive_scaling_policy_configuration metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` configuration block supports the following arguments: + +* `customized_capacity_metric_specification` - (Optional) Customized capacity metric specification. See supported fields below. +* `customized_load_metric_specification` - (Optional) Customized load metric specification. See supported fields below. +* `customized_scaling_metric_specification` - (Optional) Customized scaling metric specification. See supported fields below. +* `predefined_load_metric_specification` - (Optional) Predefined load metric specification. See supported fields below. +* `predefined_metric_pair_specification` - (Optional) Predefined metric pair specification that determines the appropriate scaling metric and load metric to use. See supported fields below. +* `predefined_scaling_metric_specification` - (Optional) Predefined scaling metric specification. See supported fields below. +* `target_value` - (Required) Target utilization. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification, customized_load_metric_specification and customized_scaling_metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification`, `customized_load_metric_specification`, and `customized_scaling_metric_specification` configuration blocks supports the following arguments: + +* `metric_data_query` - (Required) One or more metric data queries to provide data points for a metric specification. See supported fields below. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` configuration block supports the following arguments: + +* `expression` - (Optional) Math expression to perform on the returned data, if this object is performing a math expression. +* `id` - (Required) Short name that identifies the object's results in the response. +* `label` - (Optional) Human-readable label for this metric or expression. +* `metric_stat` - (Optional) Information about the metric data to return. See supported fields below. +* `return_data` - (Optional) Whether to return the timestamps and raw data values of this metric. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` `metric_stat` configuration block supports the following arguments: + +* `metric` - (Required) CloudWatch metric to return, including the metric name, namespace, and dimensions. See supported fields below. +* `stat` - (Required) Statistic to return. +* `unit` - (Optional) Unit to use for the returned data points. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat metric + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` `metric_stat` `metric` configuration block supports the following arguments: + +* `dimension` - (Optional) Dimensions of the metric. See supported fields below. +* `metric_name` - (Optional) Name of the metric. +* `namespace` - (Optional) Namespace of the metric. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat metric dimension + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` `metric_stat` `metric` `dimension` configuration block supports the following arguments: + +* `name` - (Optional) Name of the dimension. +* `value` - (Optional) Value of the dimension. + +### predictive_scaling_policy_configuration metric_specification predefined_load_metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `predefined_load_metric_specification` configuration block supports the following arguments: + +* `predefined_metric_type` - (Required) Metric type. +* `resource_label` - (Optional) Label that uniquely identifies a target group. + +### predictive_scaling_policy_configuration metric_specification predefined_metric_pair_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `predefined_metric_pair_specification` configuration block supports the following arguments: + +* `predefined_metric_type` - (Required) Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. +* `resource_label` - (Optional) Label that uniquely identifies a specific target group from which to determine the total and average request count. + +### predictive_scaling_policy_configuration metric_specification predefined_scaling_metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `predefined_scaling_metric_specification` configuration block supports the following arguments: + +* `predefined_metric_type` - (Required) Metric type. +* `resource_label` - (Optional) Label that uniquely identifies a specific target group from which to determine the average request count. ### step_scaling_policy_configuration @@ -435,4 +551,4 @@ Using `terraform import`, import Application AutoScaling Policy using the `servi % terraform import aws_appautoscaling_policy.test-policy service-namespace/resource-id/scalable-dimension/policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appautoscaling_scheduled_action.html.markdown b/website/docs/cdktf/python/r/appautoscaling_scheduled_action.html.markdown index 11a910626d4b..48b0e1aadbfe 100644 --- a/website/docs/cdktf/python/r/appautoscaling_scheduled_action.html.markdown +++ b/website/docs/cdktf/python/r/appautoscaling_scheduled_action.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the scheduled action. * `service_namespace` - (Required) Namespace of the AWS service. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_PutScheduledAction.html) Example: ecs * `resource_id` - (Required) Identifier of the resource associated with the scheduled action. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_PutScheduledAction.html) @@ -115,4 +116,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the scheduled action. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appautoscaling_target.html.markdown b/website/docs/cdktf/python/r/appautoscaling_target.html.markdown index e1e894c99011..52362d742999 100644 --- a/website/docs/cdktf/python/r/appautoscaling_target.html.markdown +++ b/website/docs/cdktf/python/r/appautoscaling_target.html.markdown @@ -141,6 +141,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `max_capacity` - (Required) Max capacity of the scalable target. * `min_capacity` - (Required) Min capacity of the scalable target. * `resource_id` - (Required) Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters) @@ -190,4 +191,4 @@ Using `terraform import`, import Application AutoScaling Target using the `servi % terraform import aws_appautoscaling_target.test-target service-namespace/resource-id/scalable-dimension ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_application.html.markdown b/website/docs/cdktf/python/r/appconfig_application.html.markdown index 22d0d5542b5a..20a98bf3bb56 100644 --- a/website/docs/cdktf/python/r/appconfig_application.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_application.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the application. Must be between 1 and 64 characters in length. * `description` - (Optional) Description of the application. Can be at most 1024 characters. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -76,4 +77,4 @@ Using `terraform import`, import AppConfig Applications using their application % terraform import aws_appconfig_application.example 71rxuzt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_configuration_profile.html.markdown b/website/docs/cdktf/python/r/appconfig_configuration_profile.html.markdown index b44a0bab9444..c2cf8f5c78ca 100644 --- a/website/docs/cdktf/python/r/appconfig_configuration_profile.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_configuration_profile.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required, Forces new resource) Application ID. Must be between 4 and 7 characters in length. * `location_uri` - (Required, Forces new resource) URI to locate the configuration. You can specify the AWS AppConfig hosted configuration store, Systems Manager (SSM) document, an SSM Parameter Store parameter, or an Amazon S3 object. For the hosted configuration store, specify `hosted`. For an SSM document, specify either the document name in the format `ssm-document://` or the ARN. For a parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN. For an Amazon S3 object, specify the URI in the following format: `s3:///`. * `name` - (Required) Name for the configuration profile. Must be between 1 and 128 characters in length. @@ -97,4 +98,4 @@ Using `terraform import`, import AppConfig Configuration Profiles using the conf % terraform import aws_appconfig_configuration_profile.example 71abcde:11xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_deployment.html.markdown b/website/docs/cdktf/python/r/appconfig_deployment.html.markdown index a01e283c2241..60268bb8a1f9 100644 --- a/website/docs/cdktf/python/r/appconfig_deployment.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_deployment.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required, Forces new resource) Application ID. Must be between 4 and 7 characters in length. * `configuration_profile_id` - (Required, Forces new resource) Configuration profile ID. Must be between 4 and 7 characters in length. * `configuration_version` - (Required, Forces new resource) Configuration version to deploy. Can be at most 1024 characters. @@ -89,4 +90,4 @@ Using `terraform import`, import AppConfig Deployments using the application ID, % terraform import aws_appconfig_deployment.example 71abcde/11xxxxx/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_deployment_strategy.html.markdown b/website/docs/cdktf/python/r/appconfig_deployment_strategy.html.markdown index 20221d8bdb99..6e7456fa0dbe 100644 --- a/website/docs/cdktf/python/r/appconfig_deployment_strategy.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_deployment_strategy.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deployment_duration_in_minutes` - (Required) Total amount of time for a deployment to last. Minimum value of 0, maximum value of 1440. * `growth_factor` - (Required) Percentage of targets to receive a deployed configuration during each interval. Minimum value of 1.0, maximum value of 100.0. * `name` - (Required, Forces new resource) Name for the deployment strategy. Must be between 1 and 64 characters in length. @@ -86,4 +87,4 @@ Using `terraform import`, import AppConfig Deployment Strategies using their dep % terraform import aws_appconfig_deployment_strategy.example 11xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_environment.html.markdown b/website/docs/cdktf/python/r/appconfig_environment.html.markdown index b7130b489adf..cf60d6a7ebb9 100644 --- a/website/docs/cdktf/python/r/appconfig_environment.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_environment.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required, Forces new resource) AppConfig application ID. Must be between 4 and 7 characters in length. * `name` - (Required) Name for the environment. Must be between 1 and 64 characters in length. * `description` - (Optional) Description of the environment. Can be at most 1024 characters. @@ -104,4 +105,4 @@ Using `terraform import`, import AppConfig Environments using the environment ID % terraform import aws_appconfig_environment.example 71abcde:11xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_extension.html.markdown b/website/docs/cdktf/python/r/appconfig_extension.html.markdown index 94f3dce01f0e..ca700746a0d5 100644 --- a/website/docs/cdktf/python/r/appconfig_extension.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_extension.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the extension. Each extension name in your account must be unique. Extension versions use the same name. * `description` - (Optional) Information about the extension. * `action_point` - (Required) The action points defined in the extension. [Detailed below](#action_point). @@ -139,4 +140,4 @@ Using `terraform import`, import AppConfig Extensions using their extension ID. % terraform import aws_appconfig_extension.example 71rxuzt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_extension_association.html.markdown b/website/docs/cdktf/python/r/appconfig_extension_association.html.markdown index 26e0696df111..302a513c25a7 100644 --- a/website/docs/cdktf/python/r/appconfig_extension_association.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_extension_association.html.markdown @@ -90,6 +90,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `extension_arn` - (Required) The ARN of the extension defined in the association. * `resource_arn` - (Optional) The ARN of the application, configuration profile, or environment to associate with the extension. * `parameters` - (Optional) The parameter names and values defined for the association. @@ -127,4 +128,4 @@ Using `terraform import`, import AppConfig Extension Associations using their ex % terraform import aws_appconfig_extension_association.example 71rxuzt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appconfig_hosted_configuration_version.html.markdown b/website/docs/cdktf/python/r/appconfig_hosted_configuration_version.html.markdown index ca97fa3ef4d6..225ad378923c 100644 --- a/website/docs/cdktf/python/r/appconfig_hosted_configuration_version.html.markdown +++ b/website/docs/cdktf/python/r/appconfig_hosted_configuration_version.html.markdown @@ -103,10 +103,52 @@ class MyConvertedCode(TerraformStack): ) ``` +### Multi-variant Feature Flags + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appconfig_hosted_configuration_version import AppconfigHostedConfigurationVersion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppconfigHostedConfigurationVersion(self, "example", + application_id=Token.as_string(aws_appconfig_application_example.id), + configuration_profile_id=Token.as_string(aws_appconfig_configuration_profile_example.configuration_profile_id), + content=Token.as_string( + Fn.jsonencode({ + "flags": { + "loggingenabled": { + "name": "loggingEnabled" + } + }, + "values": { + "loggingenabled": { + "_variants": Fn.concat(["${[ for user_id in ${" + appcfg_enable_logging_user_ids.value + "} : { # Flat list of userIds\n enabled = true,\n name = \"usersWithLoggingEnabled_${user_id}\",\n rule = \"(or (eq $userId \\\"${user_id}\\\"))\"\n }]}", [{ + "enabled": False, + "name": "Default" + } + ] + ]) + } + }, + "version": "1" + })), + content_type="application/json", + description="Example Multi-variant Feature Flag Configuration Version" + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required, Forces new resource) Application ID. * `configuration_profile_id` - (Required, Forces new resource) Configuration profile ID. * `content` - (Required, Forces new resource) Content of the configuration or the configuration data. @@ -146,4 +188,4 @@ Using `terraform import`, import AppConfig Hosted Configuration Versions using t % terraform import aws_appconfig_hosted_configuration_version.example 71abcde/11xxxxx/2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown b/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown index dada575b20e5..97b5a95a20bf 100644 --- a/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown +++ b/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app` - (Required) The name of the application for valid values see https://docs.aws.amazon.com/appfabric/latest/api/API_CreateAppAuthorization.html. * `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. * `auth_type` - (Required) The authorization type for the app authorization valid values are oauth2 and apiKey. @@ -93,4 +94,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `30m`) * `delete` - (Default `30m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown b/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown index 7b5968075ccb..5a8e03d0569b 100644 --- a/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown +++ b/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. * `app_authorization_arn` - (Required) The Amazon Resource Name (ARN) or Universal Unique Identifier (UUID) of the app authorization to use for the request. * `auth_request` - (Optional) Contains OAuth2 authorization information.This is required if the app authorization for the request is configured with an OAuth2 (oauth2) authorization type. @@ -60,4 +61,4 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `30m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown b/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown index 0cdeee527c28..ff7a1bc831e8 100644 --- a/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown +++ b/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customer_managed_key_arn` - (Optional) The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) key to use to encrypt the application data. If this is not specified, an AWS owned key is used for encryption. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -52,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appfabric_app_bundle.example + identity = { + "arn" = "arn:aws:appfabric:us-east-1:123456789012:appbundle/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_appfabric_app_bundle" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the AppFabric app bundle. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric AppBundle using the `arn`. For example: ```python @@ -75,4 +97,4 @@ Using `terraform import`, import AppFabric AppBundle using the `arn`. For exampl % terraform import aws_appfabric_app_bundle.example arn:aws:appfabric:[region]:[account]:appbundle/ee5587b4-5765-4288-a202-xxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown b/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown index 3228b00fd384..a5013a89a723 100644 --- a/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown +++ b/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app` - (Required) Name of the application. Refer to the AWS Documentation for the [list of valid values](https://docs.aws.amazon.com/appfabric/latest/api/API_CreateIngestion.html#appfabric-CreateIngestion-request-app) * `app_bundle_arn` - (Required) Amazon Resource Name (ARN) of the app bundle to use for the request. @@ -82,4 +83,4 @@ Using `terraform import`, import AppFabric Ingestion using the `app_bundle_ident % terraform import aws_appfabric_ingestion.example arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown b/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown index a16df580724f..161b7bc7a125 100644 --- a/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown +++ b/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. * `ingestion_arn` - (Required) The Amazon Resource Name (ARN) of the ingestion to use for the request. * `destination_configuration` - (Required) Contains information about the destination of ingested data. @@ -111,4 +112,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appflow_connector_profile.html.markdown b/website/docs/cdktf/python/r/appflow_connector_profile.html.markdown index d414f0e8b965..ab1bbc3f118e 100644 --- a/website/docs/cdktf/python/r/appflow_connector_profile.html.markdown +++ b/website/docs/cdktf/python/r/appflow_connector_profile.html.markdown @@ -99,6 +99,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name ` (Required) - Name of the connector profile. The name is unique for each `ConnectorProfile` in your AWS account. * `connection_mode` (Required) - Indicates the connection mode and specifies whether it is public or private. Private flows use AWS PrivateLink to route data over AWS infrastructure without exposing it to the public internet. One of: `Public`, `Private`. * `connector_label` (Optional) - The label of the connector. The label is unique for each ConnectorRegistration in your AWS account. Only needed if calling for `CustomConnector` connector type. @@ -339,7 +340,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow Connector Profile using the connector profile `arn`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appflow_connector_profile.example + identity = { + name = "example_profile" + } +} + +resource "aws_appflow_connector_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the Appflow connector profile. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow Connector Profile using the connector profile `name`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -353,16 +380,16 @@ from imports.aws.appflow_connector_profile import AppflowConnectorProfile class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - AppflowConnectorProfile.generate_config_for_import(self, "profile", "arn:aws:appflow:us-west-2:123456789012:connectorprofile/example-profile") + AppflowConnectorProfile.generate_config_for_import(self, "example", "example-profile") ``` -Using `terraform import`, import AppFlow Connector Profile using the connector profile `arn`. For example: +Using `terraform import`, import AppFlow Connector Profile using the connector profile `name`. For example: ```console -% terraform import aws_appflow_connector_profile.profile arn:aws:appflow:us-west-2:123456789012:connectorprofile/example-profile +% terraform import aws_appflow_connector_profile.example example-profile ``` [1]: https://docs.aws.amazon.com/appflow/1.0/APIReference/Welcome.html [2]: https://docs.aws.amazon.com/appflow/1.0/APIReference/API_CreateConnectorProfile.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appflow_flow.html.markdown b/website/docs/cdktf/python/r/appflow_flow.html.markdown index a426dc14b145..e6c5297af867 100644 --- a/website/docs/cdktf/python/r/appflow_flow.html.markdown +++ b/website/docs/cdktf/python/r/appflow_flow.html.markdown @@ -136,6 +136,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the flow. * `destination_flow_config` - (Required) A [Destination Flow Config](#destination-flow-config) that controls how Amazon AppFlow places data in the destination connector. * `source_flow_config` - (Required) The [Source Flow Config](#source-flow-config) that controls how Amazon AppFlow retrieves data from the source connector. @@ -435,7 +436,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow flows using the `arn`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appflow_flow.example + identity = { + name = "example-flow" + } +} + +resource "aws_appflow_flow" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the AppFlow flow. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow flows using the `name`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -449,13 +476,13 @@ from imports.aws.appflow_flow import AppflowFlow class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - AppflowFlow.generate_config_for_import(self, "example", "arn:aws:appflow:us-west-2:123456789012:flow/example-flow") + AppflowFlow.generate_config_for_import(self, "example", "example-flow") ``` -Using `terraform import`, import AppFlow flows using the `arn`. For example: +Using `terraform import`, import AppFlow flows using the `name`. For example: ```console -% terraform import aws_appflow_flow.example arn:aws:appflow:us-west-2:123456789012:flow/example-flow +% terraform import aws_appflow_flow.example example-flow ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appintegrations_data_integration.html.markdown b/website/docs/cdktf/python/r/appintegrations_data_integration.html.markdown index 37153fe18486..c9db9ae3de48 100644 --- a/website/docs/cdktf/python/r/appintegrations_data_integration.html.markdown +++ b/website/docs/cdktf/python/r/appintegrations_data_integration.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Data Integration. * `kms_key` - (Required) Specifies the KMS key Amazon Resource Name (ARN) for the Data Integration. * `name` - (Required) Specifies the name of the Data Integration. @@ -92,4 +93,4 @@ Using `terraform import`, import Amazon AppIntegrations Data Integrations using % terraform import aws_appintegrations_data_integration.example 12345678-1234-1234-1234-123456789123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appintegrations_event_integration.html.markdown b/website/docs/cdktf/python/r/appintegrations_event_integration.html.markdown index 1f0eeb211788..d79f4fa78448 100644 --- a/website/docs/cdktf/python/r/appintegrations_event_integration.html.markdown +++ b/website/docs/cdktf/python/r/appintegrations_event_integration.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Event Integration. * `eventbridge_bus` - (Required) EventBridge bus. * `event_filter` - (Required) Block that defines the configuration information for the event filter. The Event Filter block is documented below. @@ -86,4 +87,4 @@ Using `terraform import`, import Amazon AppIntegrations Event Integrations using % terraform import aws_appintegrations_event_integration.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/applicationinsights_application.html.markdown b/website/docs/cdktf/python/r/applicationinsights_application.html.markdown index 714f4ca2a243..a64b186c46e5 100644 --- a/website/docs/cdktf/python/r/applicationinsights_application.html.markdown +++ b/website/docs/cdktf/python/r/applicationinsights_application.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_config_enabled` - (Optional) Indicates whether Application Insights automatically configures unmonitored resources in the resource group. * `auto_create` - (Optional) Configures all of the resources in the resource group by applying the recommended configurations. * `cwe_monitor_enabled` - (Optional) Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated, failed deployment, and others. @@ -98,4 +99,4 @@ Using `terraform import`, import ApplicationInsights Applications using the `res % terraform import aws_applicationinsights_application.some some-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_gateway_route.html.markdown b/website/docs/cdktf/python/r/appmesh_gateway_route.html.markdown index 3df90a88d863..1fc54fafa55c 100644 --- a/website/docs/cdktf/python/r/appmesh_gateway_route.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_gateway_route.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the gateway route. Must be between 1 and 255 characters in length. * `mesh_name` - (Required) Name of the service mesh in which to create the gateway route. Must be between 1 and 255 characters in length. * `virtual_gateway_name` - (Required) Name of the [virtual gateway](/docs/providers/aws/r/appmesh_virtual_gateway.html) to associate the gateway route with. Must be between 1 and 255 characters in length. @@ -200,4 +201,4 @@ Using `terraform import`, import App Mesh gateway routes using `mesh_name` and ` [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_mesh.html.markdown b/website/docs/cdktf/python/r/appmesh_mesh.html.markdown index c2bfdac4699a..a1ef67ba016f 100644 --- a/website/docs/cdktf/python/r/appmesh_mesh.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_mesh.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the service mesh. Must be between 1 and 255 characters in length. * `spec` - (Optional) Service mesh specification to apply. * `egress_filter`- (Optional) Egress filter rules for the service mesh. @@ -106,4 +107,4 @@ Using `terraform import`, import App Mesh service meshes using the `name`. For e % terraform import aws_appmesh_mesh.simple simpleapp ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_route.html.markdown b/website/docs/cdktf/python/r/appmesh_route.html.markdown index 6c3af977db1b..710cdf6a7943 100644 --- a/website/docs/cdktf/python/r/appmesh_route.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_route.html.markdown @@ -175,6 +175,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the route. Must be between 1 and 255 characters in length. * `mesh_name` - (Required) Name of the service mesh in which to create the route. Must be between 1 and 255 characters in length. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -380,4 +381,4 @@ Using `terraform import`, import App Mesh virtual routes using `mesh_name` and ` [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_virtual_gateway.html.markdown b/website/docs/cdktf/python/r/appmesh_virtual_gateway.html.markdown index 523c1258dfd4..023f428af6b9 100644 --- a/website/docs/cdktf/python/r/appmesh_virtual_gateway.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_virtual_gateway.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual gateway. Must be between 1 and 255 characters in length. * `mesh_name` - (Required) Name of the service mesh in which to create the virtual gateway. Must be between 1 and 255 characters in length. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -319,4 +320,4 @@ Using `terraform import`, import App Mesh virtual gateway using `mesh_name` toge [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_virtual_node.html.markdown b/website/docs/cdktf/python/r/appmesh_virtual_node.html.markdown index 89e9fdb74204..2747eaaa0bfb 100644 --- a/website/docs/cdktf/python/r/appmesh_virtual_node.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_virtual_node.html.markdown @@ -212,6 +212,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual node. Must be between 1 and 255 characters in length. * `mesh_name` - (Required) Name of the service mesh in which to create the virtual node. Must be between 1 and 255 characters in length. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -522,4 +523,4 @@ Using `terraform import`, import App Mesh virtual nodes using `mesh_name` togeth [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_virtual_router.html.markdown b/website/docs/cdktf/python/r/appmesh_virtual_router.html.markdown index 2d0a212729a1..3b346999842d 100644 --- a/website/docs/cdktf/python/r/appmesh_virtual_router.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_virtual_router.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual router. Must be between 1 and 255 characters in length. * `mesh_name` - (Required) Name of the service mesh in which to create the virtual router. Must be between 1 and 255 characters in length. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -113,4 +114,4 @@ Using `terraform import`, import App Mesh virtual routers using `mesh_name` toge [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appmesh_virtual_service.html.markdown b/website/docs/cdktf/python/r/appmesh_virtual_service.html.markdown index 4d36bd937642..95467864d74d 100644 --- a/website/docs/cdktf/python/r/appmesh_virtual_service.html.markdown +++ b/website/docs/cdktf/python/r/appmesh_virtual_service.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual service. Must be between 1 and 255 characters in length. * `mesh_name` - (Required) Name of the service mesh in which to create the virtual service. Must be between 1 and 255 characters in length. * `mesh_owner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -133,4 +134,4 @@ Using `terraform import`, import App Mesh virtual services using `mesh_name` tog [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_auto_scaling_configuration_version.html.markdown b/website/docs/cdktf/python/r/apprunner_auto_scaling_configuration_version.html.markdown index c5886b890179..d257ac77a3fc 100644 --- a/website/docs/cdktf/python/r/apprunner_auto_scaling_configuration_version.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_auto_scaling_configuration_version.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_scaling_configuration_name` - (Required, Forces new resource) Name of the auto scaling configuration. * `max_concurrency` - (Optional, Forces new resource) Maximal number of concurrent requests that you want an instance to process. When the number of concurrent requests goes over this limit, App Runner scales up your service. * `max_size` - (Optional, Forces new resource) Maximal number of instances that App Runner provisions for your service. @@ -59,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_auto_scaling_configuration_version.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:autoscalingconfiguration/example-auto-scaling-config/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_auto_scaling_configuration_version" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner auto scaling configuration version. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner AutoScaling Configuration Versions using the `arn`. For example: ```python @@ -82,4 +104,4 @@ Using `terraform import`, import App Runner AutoScaling Configuration Versions u % terraform import aws_apprunner_auto_scaling_configuration_version.example "arn:aws:apprunner:us-east-1:1234567890:autoscalingconfiguration/example/1/69bdfe0115224b0db49398b7beb68e0f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_connection.html.markdown b/website/docs/cdktf/python/r/apprunner_connection.html.markdown index cbfe99f94c71..7ae25bdbe7cf 100644 --- a/website/docs/cdktf/python/r/apprunner_connection.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_connection.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_name` - (Required) Name of the connection. * `provider_type` - (Required) Source repository provider. Valid values: `GITHUB`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -78,4 +79,4 @@ Using `terraform import`, import App Runner Connections using the `connection_na % terraform import aws_apprunner_connection.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_custom_domain_association.html.markdown b/website/docs/cdktf/python/r/apprunner_custom_domain_association.html.markdown index ad21b21b2a76..7c1e81aff74e 100644 --- a/website/docs/cdktf/python/r/apprunner_custom_domain_association.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_custom_domain_association.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) Custom domain endpoint to association. Specify a base domain e.g., `example.com` or a subdomain e.g., `subdomain.example.com`. * `enable_www_subdomain` (Optional) Whether to associate the subdomain with the App Runner service in addition to the base domain. Defaults to `true`. * `service_arn` - (Required) ARN of the App Runner service. @@ -84,4 +85,4 @@ Using `terraform import`, import App Runner Custom Domain Associations using the % terraform import aws_apprunner_custom_domain_association.example example.com,arn:aws:apprunner:us-east-1:123456789012:service/example-app/8fe1e10304f84fd2b0df550fe98a71fa ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_default_auto_scaling_configuration_version.html.markdown b/website/docs/cdktf/python/r/apprunner_default_auto_scaling_configuration_version.html.markdown index b8994c9386a6..c8a73e344abb 100644 --- a/website/docs/cdktf/python/r/apprunner_default_auto_scaling_configuration_version.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_default_auto_scaling_configuration_version.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_scaling_configuration_arn` - (Required) The ARN of the App Runner auto scaling configuration that you want to set as the default. ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import App Runner default auto scaling configurations % terraform import aws_apprunner_default_auto_scaling_configuration_version.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_deployment.html.markdown b/website/docs/cdktf/python/r/apprunner_deployment.html.markdown index 46151db09b1e..b9187447e26d 100644 --- a/website/docs/cdktf/python/r/apprunner_deployment.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_deployment.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_arn` - (Required) The Amazon Resource Name (ARN) of the App Runner service to start the deployment for. ## Attribute Reference @@ -45,4 +46,4 @@ This resource exports the following attributes in addition to the arguments abov * `operation_id` - The unique ID of the operation associated with deployment. * `status` - The current status of the App Runner service deployment. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_observability_configuration.html.markdown b/website/docs/cdktf/python/r/apprunner_observability_configuration.html.markdown index 78be9a1292ac..1921c1b81314 100644 --- a/website/docs/cdktf/python/r/apprunner_observability_configuration.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_observability_configuration.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `observability_configuration_name` - (Required, Forces new resource) Name of the observability configuration. * `trace_configuration` - (Optional) Configuration of the tracing feature within this observability configuration. If you don't specify it, App Runner doesn't enable tracing. See [Trace Configuration](#trace-configuration) below for more details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -63,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_observability_configuration.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/example-observability-config/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_observability_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner observability configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner Observability Configuration using the `arn`. For example: ```python @@ -86,4 +108,4 @@ Using `terraform import`, import App Runner Observability Configuration using th % terraform import aws_apprunner_observability_configuration.example arn:aws:apprunner:us-east-1:1234567890:observabilityconfiguration/example/1/d75bc7ea55b71e724fe5c23452fe22a1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_service.html.markdown b/website/docs/cdktf/python/r/apprunner_service.html.markdown index 472cfd867ead..52681d06d7ab 100644 --- a/website/docs/cdktf/python/r/apprunner_service.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_service.html.markdown @@ -149,6 +149,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_scaling_configuration_arn` - ARN of an App Runner automatic scaling configuration resource that you want to associate with your service. If not provided, App Runner associates the latest revision of a default auto scaling configuration. * `encryption_configuration` - (Forces new resource) An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an AWS managed CMK. See [Encryption Configuration](#encryption-configuration) below for more details. * `health_check_configuration` - Settings of the health check that AWS App Runner performs to monitor the health of your service. See [Health Check Configuration](#health-check-configuration) below for more details. @@ -300,6 +301,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_service.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:service/example-app-service/8fe1e10304f84fd2b0df550fe98a71fa" + } +} + +resource "aws_apprunner_service" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner service. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner Services using the `arn`. For example: ```python @@ -323,4 +345,4 @@ Using `terraform import`, import App Runner Services using the `arn`. For exampl % terraform import aws_apprunner_service.example arn:aws:apprunner:us-east-1:1234567890:service/example/0a03292a89764e5882c41d8f991c82fe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_vpc_connector.html.markdown b/website/docs/cdktf/python/r/apprunner_vpc_connector.html.markdown index e669d3a5d622..bf03d95e5afc 100644 --- a/website/docs/cdktf/python/r/apprunner_vpc_connector.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_vpc_connector.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_connector_name` - (Required) Name for the VPC connector. * `subnets` (Required) List of IDs of subnets that App Runner should use when it associates your service with a custom Amazon VPC. Specify IDs of subnets of a single Amazon VPC. App Runner determines the Amazon VPC from the subnets you specify. * `security_groups` - List of IDs of security groups that App Runner should use for access to AWS resources under the specified subnets. If not specified, App Runner uses the default security group of the Amazon VPC. The default security group allows all outbound traffic. @@ -53,6 +54,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_vpc_connector.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:vpcconnector/example-vpc-connector/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_vpc_connector" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner VPC connector. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner vpc connector using the `arn`. For example: ```python @@ -76,4 +98,4 @@ Using `terraform import`, import App Runner vpc connector using the `arn`. For e % terraform import aws_apprunner_vpc_connector.example arn:aws:apprunner:us-east-1:1234567890:vpcconnector/example/1/0a03292a89764e5882c41d8f991c82fe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/apprunner_vpc_ingress_connection.html.markdown b/website/docs/cdktf/python/r/apprunner_vpc_ingress_connection.html.markdown index 95e031e3b0be..c4afa62e38a3 100644 --- a/website/docs/cdktf/python/r/apprunner_vpc_ingress_connection.html.markdown +++ b/website/docs/cdktf/python/r/apprunner_vpc_ingress_connection.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the VPC Ingress Connection resource. It must be unique across all the active VPC Ingress Connections in your AWS account in the AWS Region. * `service_arn` - (Required) The Amazon Resource Name (ARN) for this App Runner service that is used to create the VPC Ingress Connection resource. * `ingress_vpc_configuration` - (Required) Specifications for the customer’s Amazon VPC and the related AWS PrivateLink VPC endpoint that are used to create the VPC Ingress Connection resource. See [Ingress VPC Configuration](#ingress-vpc-configuration) below for more details. @@ -66,6 +67,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_vpc_ingress_connection.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:vpcingressconnection/example-vpc-ingress-connection/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_vpc_ingress_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner VPC ingress connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner VPC Ingress Connection using the `arn`. For example: ```python @@ -89,4 +111,4 @@ Using `terraform import`, import App Runner VPC Ingress Connection using the `ar % terraform import aws_apprunner_vpc_ingress_connection.example "arn:aws:apprunner:us-west-2:837424938642:vpcingressconnection/example/b379f86381d74825832c2e82080342fa" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_directory_config.html.markdown b/website/docs/cdktf/python/r/appstream_directory_config.html.markdown index 8bfe4361b100..d1390c6a9a4b 100644 --- a/website/docs/cdktf/python/r/appstream_directory_config.html.markdown +++ b/website/docs/cdktf/python/r/appstream_directory_config.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_name` - (Required) Fully qualified name of the directory. * `organizational_unit_distinguished_names` - (Required) Distinguished names of the organizational units for computer accounts. * `service_account_credentials` - (Required) Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See [`service_account_credentials`](#service_account_credentials) below. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_appstream_directory_config` using the id. % terraform import aws_appstream_directory_config.example directoryNameExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_fleet.html.markdown b/website/docs/cdktf/python/r/appstream_fleet.html.markdown index b30779614815..c36a88ac8afe 100644 --- a/website/docs/cdktf/python/r/appstream_fleet.html.markdown +++ b/website/docs/cdktf/python/r/appstream_fleet.html.markdown @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description to display. * `disconnect_timeout_in_seconds` - (Optional) Amount of time that a streaming session remains active after users disconnect. * `display_name` - (Optional) Human-readable friendly name for the AppStream fleet. @@ -132,4 +133,4 @@ Using `terraform import`, import `aws_appstream_fleet` using the id. For example % terraform import aws_appstream_fleet.example fleetNameExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_fleet_stack_association.html.markdown b/website/docs/cdktf/python/r/appstream_fleet_stack_association.html.markdown index 8048447f6b8d..17a9da492622 100644 --- a/website/docs/cdktf/python/r/appstream_fleet_stack_association.html.markdown +++ b/website/docs/cdktf/python/r/appstream_fleet_stack_association.html.markdown @@ -52,8 +52,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fleet_name` - (Required) Name of the fleet. * `stack_name` (Required) Name of the stack. @@ -88,4 +89,4 @@ Using `terraform import`, import AppStream Stack Fleet Association using the `fl % terraform import aws_appstream_fleet_stack_association.example fleetName/stackName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_image_builder.html.markdown b/website/docs/cdktf/python/r/appstream_image_builder.html.markdown index 7517b032d4ba..a1fe794ffdda 100644 --- a/website/docs/cdktf/python/r/appstream_image_builder.html.markdown +++ b/website/docs/cdktf/python/r/appstream_image_builder.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_endpoint` - (Optional) Set of interface VPC endpoint (interface endpoint) objects. Maximum of 4. See below. * `appstream_agent_version` - (Optional) Version of the AppStream 2.0 agent to use for this image builder. * `description` - (Optional) Description to display. @@ -119,4 +120,4 @@ Using `terraform import`, import `aws_appstream_image_builder` using the `name`. % terraform import aws_appstream_image_builder.example imageBuilderExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_stack.html.markdown b/website/docs/cdktf/python/r/appstream_stack.html.markdown index 36771264aee2..903d53a98585 100644 --- a/website/docs/cdktf/python/r/appstream_stack.html.markdown +++ b/website/docs/cdktf/python/r/appstream_stack.html.markdown @@ -80,6 +80,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_endpoints` - (Optional) Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. See [`access_endpoints`](#access_endpoints) below. * `application_settings` - (Optional) Settings for application settings persistence. @@ -162,4 +163,4 @@ Using `terraform import`, import `aws_appstream_stack` using the id. For example % terraform import aws_appstream_stack.example stackID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_user.html.markdown b/website/docs/cdktf/python/r/appstream_user.html.markdown index 8440a67ab875..a97af3c805a5 100644 --- a/website/docs/cdktf/python/r/appstream_user.html.markdown +++ b/website/docs/cdktf/python/r/appstream_user.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether the user in the user pool is enabled. * `first_name` - (Optional) First name, or given name, of the user. * `last_name` - (Optional) Last name, or surname, of the user. @@ -82,4 +83,4 @@ Using `terraform import`, import `aws_appstream_user` using the `user_name` and % terraform import aws_appstream_user.example UserName/AuthenticationType ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_user_stack_association.html.markdown b/website/docs/cdktf/python/r/appstream_user_stack_association.html.markdown index e4b3005678af..017bd80f2faa 100644 --- a/website/docs/cdktf/python/r/appstream_user_stack_association.html.markdown +++ b/website/docs/cdktf/python/r/appstream_user_stack_association.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `send_email_notification` - (Optional) Whether a welcome email is sent to a user after the user is created in the user pool. ## Attribute Reference @@ -90,4 +91,4 @@ Using `terraform import`, import AppStream User Stack Association using the `use % terraform import aws_appstream_user_stack_association.example userName/auhtenticationType/stackName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_api.html.markdown b/website/docs/cdktf/python/r/appsync_api.html.markdown new file mode 100644 index 000000000000..90b6e6b1fe20 --- /dev/null +++ b/website/docs/cdktf/python/r/appsync_api.html.markdown @@ -0,0 +1,254 @@ +--- +subcategory: "AppSync" +layout: "aws" +page_title: "AWS: aws_appsync_api" +description: |- + Manages an AWS AppSync Event API. +--- + + + +# Resource: aws_appsync_api + +Manages an [AWS AppSync Event API](https://docs.aws.amazon.com/appsync/latest/eventapi/event-api-concepts.html#API). Event APIs enable real-time subscriptions and event-driven communication in AppSync applications. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appsync_api import AppsyncApi +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppsyncApi(self, "example", + event_config=[AppsyncApiEventConfig( + auth_provider=[AppsyncApiEventConfigAuthProvider( + auth_type="API_KEY" + ) + ], + connection_auth_mode=[AppsyncApiEventConfigConnectionAuthMode( + auth_type="API_KEY" + ) + ], + default_publish_auth_mode=[AppsyncApiEventConfigDefaultPublishAuthMode( + auth_type="API_KEY" + ) + ], + default_subscribe_auth_mode=[AppsyncApiEventConfigDefaultSubscribeAuthMode( + auth_type="API_KEY" + ) + ] + ) + ], + name="example-event-api" + ) +``` + +### With Cognito Authentication + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appsync_api import AppsyncApi +from imports.aws.cognito_user_pool import CognitoUserPool +from imports.aws.data_aws_region import DataAwsRegion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CognitoUserPool(self, "example", + name="example-user-pool" + ) + current = DataAwsRegion(self, "current") + aws_appsync_api_example = AppsyncApi(self, "example_2", + event_config=[AppsyncApiEventConfig( + auth_provider=[AppsyncApiEventConfigAuthProvider( + auth_type="AMAZON_COGNITO_USER_POOLS", + cognito_config=[AppsyncApiEventConfigAuthProviderCognitoConfig( + aws_region=Token.as_string(current.name), + user_pool_id=example.id + ) + ] + ) + ], + connection_auth_mode=[AppsyncApiEventConfigConnectionAuthMode( + auth_type="AMAZON_COGNITO_USER_POOLS" + ) + ], + default_publish_auth_mode=[AppsyncApiEventConfigDefaultPublishAuthMode( + auth_type="AMAZON_COGNITO_USER_POOLS" + ) + ], + default_subscribe_auth_mode=[AppsyncApiEventConfigDefaultSubscribeAuthMode( + auth_type="AMAZON_COGNITO_USER_POOLS" + ) + ] + ) + ], + name="example-event-api" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_appsync_api_example.override_logical_id("example") +``` + +### With Lambda Authorizer + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appsync_api import AppsyncApi +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppsyncApi(self, "example", + event_config=[AppsyncApiEventConfig( + auth_provider=[AppsyncApiEventConfigAuthProvider( + auth_type="AWS_LAMBDA", + lambda_authorizer_config=[AppsyncApiEventConfigAuthProviderLambdaAuthorizerConfig( + authorizer_result_ttl_in_seconds=300, + authorizer_uri=Token.as_string(aws_lambda_function_example.arn) + ) + ] + ) + ], + connection_auth_mode=[AppsyncApiEventConfigConnectionAuthMode( + auth_type="AWS_LAMBDA" + ) + ], + default_publish_auth_mode=[AppsyncApiEventConfigDefaultPublishAuthMode( + auth_type="AWS_LAMBDA" + ) + ], + default_subscribe_auth_mode=[AppsyncApiEventConfigDefaultSubscribeAuthMode( + auth_type="AWS_LAMBDA" + ) + ] + ) + ], + name="example-event-api" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `event_config` - (Required) Configuration for the Event API. See [Event Config](#event-config) below. +* `name` - (Required) Name of the Event API. + +The following arguments are optional: + +* `owner_contact` - (Optional) Contact information for the owner of the Event API. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Event Config + +The `event_config` block supports the following: + +* `auth_provider` - (Required) List of authentication providers. See [Auth Providers](#auth-providers) below. +* `connection_auth_mode` - (Required) List of authentication modes for connections. See [Auth Modes](#auth-modes) below. +* `default_publish_auth_mode` - (Required) List of default authentication modes for publishing. See [Auth Modes](#auth-modes) below. +* `default_subscribe_auth_mode` - (Required) List of default authentication modes for subscribing. See [Auth Modes](#auth-modes) below. +* `log_config` - (Optional) Logging configuration. See [Log Config](#log-config) below. + +### Auth Providers + +The `auth_provider` block supports the following: + +* `auth_type` - (Required) Type of authentication provider. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. +* `cognito_config` - (Optional) Configuration for Cognito user pool authentication. Required when `auth_type` is `AMAZON_COGNITO_USER_POOLS`. See [Cognito Config](#cognito-config) below. +* `lambda_authorizer_config` - (Optional) Configuration for Lambda authorization. Required when `auth_type` is `AWS_LAMBDA`. See [Lambda Authorizer Config](#lambda-authorizer-config) below. +* `openid_connect_config` - (Optional) Configuration for OpenID Connect. Required when `auth_type` is `OPENID_CONNECT`. See [OpenID Connect Config](#openid-connect-config) below. + +### Cognito Config + +The `cognito_config` block supports the following: + +* `app_id_client_regex` - (Optional) Regular expression for matching the client ID. +* `aws_region` - (Required) AWS region where the user pool is located. +* `user_pool_id` - (Required) ID of the Cognito user pool. + +### Lambda Authorizer Config + +The `lambda_authorizer_config` block supports the following: + +* `authorizer_result_ttl_in_seconds` - (Optional) TTL in seconds for the authorization result cache. +* `authorizer_uri` - (Required) URI of the Lambda function for authorization. +* `identity_validation_expression` - (Optional) Regular expression for identity validation. + +### OpenID Connect Config + +The `openid_connect_config` block supports the following: + +* `auth_ttl` - (Optional) TTL in seconds for the authentication token. +* `client_id` - (Optional) Client ID for the OpenID Connect provider. +* `iat_ttl` - (Optional) TTL in seconds for the issued at time. +* `issuer` - (Required) Issuer URL for the OpenID Connect provider. + +### Auth Modes + +The `connection_auth_mode`, `default_publish_auth_mode`, and `default_subscribe_auth_mode` blocks support the following: + +* `auth_type` - (Required) Type of authentication. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. + +### Log Config + +The `log_config` block supports the following: + +* `cloudwatch_logs_role_arn` - (Required) ARN of the IAM role for CloudWatch logs. +* `log_level` - (Required) Log level. Valid values: `NONE`, `ERROR`, `ALL`, `INFO`, `DEBUG`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `api_id` - ID of the Event API. +* `api_arn` - ARN of the Event API. +* `dns` - DNS configuration for the Event API. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `waf_web_acl_arn` - ARN of the associated WAF web ACL. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Event API using the `api_id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appsync_api import AppsyncApi +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppsyncApi.generate_config_for_import(self, "example", "example-api-id") +``` + +Using `terraform import`, import AppSync Event API using the `api_id`. For example: + +```console +% terraform import aws_appsync_api.example example-api-id +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_api_cache.html.markdown b/website/docs/cdktf/python/r/appsync_api_cache.html.markdown index 6e8a8859505c..dd84abb6457c 100644 --- a/website/docs/cdktf/python/r/appsync_api_cache.html.markdown +++ b/website/docs/cdktf/python/r/appsync_api_cache.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) GraphQL API ID. * `api_caching_behavior` - (Required) Caching behavior. Valid values are `FULL_REQUEST_CACHING` and `PER_RESOLVER_CACHING`. * `type` - (Required) Cache instance type. Valid values are `SMALL`, `MEDIUM`, `LARGE`, `XLARGE`, `LARGE_2X`, `LARGE_4X`, `LARGE_8X`, `LARGE_12X`, `T2_SMALL`, `T2_MEDIUM`, `R4_LARGE`, `R4_XLARGE`, `R4_2XLARGE`, `R4_4XLARGE`, `R4_8XLARGE`. @@ -83,4 +84,4 @@ Using `terraform import`, import `aws_appsync_api_cache` using the AppSync API I % terraform import aws_appsync_api_cache.example xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_api_key.html.markdown b/website/docs/cdktf/python/r/appsync_api_key.html.markdown index 688c31fc3282..cc1483ec584e 100644 --- a/website/docs/cdktf/python/r/appsync_api_key.html.markdown +++ b/website/docs/cdktf/python/r/appsync_api_key.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) ID of the associated AppSync API * `description` - (Optional) API key description. Defaults to "Managed by Terraform". * `expires` - (Optional) RFC3339 string representation of the expiry date. Rounded down to nearest hour. By default, it is 7 days from the date of creation. @@ -79,4 +80,4 @@ Using `terraform import`, import `aws_appsync_api_key` using the AppSync API ID % terraform import aws_appsync_api_key.example xxxxx:yyyyy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_channel_namespace.html.markdown b/website/docs/cdktf/python/r/appsync_channel_namespace.html.markdown new file mode 100644 index 000000000000..22d186ce7d64 --- /dev/null +++ b/website/docs/cdktf/python/r/appsync_channel_namespace.html.markdown @@ -0,0 +1,118 @@ +--- +subcategory: "AppSync" +layout: "aws" +page_title: "AWS: aws_appsync_channel_namespace" +description: |- + Manages an AWS AppSync Channel Namespace. +--- + + + +# Resource: aws_appsync_channel_namespace + +Manages an [AWS AppSync Channel Namespace](https://docs.aws.amazon.com/appsync/latest/eventapi/event-api-concepts.html#namespace). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appsync_channel_namespace import AppsyncChannelNamespace +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppsyncChannelNamespace(self, "example", + api_id=Token.as_string(aws_appsync_api_example.api_id), + name="example-channel-namespace" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `api_id` - (Required) Event API ID. +* `name` - (Required) Name of the channel namespace. + +The following arguments are optional: + +* `code_handlers` - (Optional) Event handler functions that run custom business logic to process published events and subscribe requests. +* `handler_configs` - (Optional) Configuration for the `on_publish` and `on_subscribe` handlers. See [Handler Configs](#handler-configs) below. +* `publish_auth_mode` - (Optional) Authorization modes to use for publishing messages on the channel namespace. This configuration overrides the default API authorization configuration. See [Auth Modes](#auth-modes) below. +* `subscribe_auth_mode` - (Optional) Authorization modes to use for subscribing to messages on the channel namespace. This configuration overrides the default API authorization configuration. See [Auth Modes](#auth-modes) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Auth Modes + +The `publish_auth_mode`, and `subscribe_auth_mode` blocks support the following: + +* `auth_type` - (Required) Type of authentication. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. + +### Handler Configs + +The `handler_configs` block support the following: + +* `on_publish` - (Optional) Handler configuration. See [Handler Config](#handler-config) below. +* `on_subscribe` - (Optional) Handler configuration. See [Handler Config](#handler-config) below. + +### Handler Config + +The `on_publish` and `on_subscribe` blocks support the following: + +* `behavior` - (Required) Behavior for the handler. Valid values: `CODE`, `DIRECT`. +* `integration` - (Required) Integration data source configuration for the handler. See [Integration](#integration) below. + +### Integration + +The `integration` block support the following: + +* `data_source_name` - (Required) Unique name of the data source that has been configured on the API. +* `lambda_config` - (Optional) Configuration for a Lambda data source. See [Lambda Config](#lambda-config) below. + +### Lambad Config + +The `lambda_config` block support the following: + +* `invoke_type` - (Optional) Invocation type for a Lambda data source. Valid values: `REQUEST_RESPONSE`, `EVENT`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `channel_namespace_arn` - ARN of the channel namespace. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Channel Namespace using the `api_id` and `name` separated by a comma (`,`). For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appsync_channel_namespace import AppsyncChannelNamespace +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppsyncChannelNamespace.generate_config_for_import(self, "example", "example-api-id,example-channel-namespace") +``` + +Using `terraform import`, import AppSync Channel Namespace using the `api_id` and `name` separated by a comma (`,`). For example: + +```console +% terraform import aws_appsync_channel_namespace.example example-api-id,example-channel-namespace +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_datasource.html.markdown b/website/docs/cdktf/python/r/appsync_datasource.html.markdown index 5d31f3f4fb8d..873ccdc4a4b0 100644 --- a/website/docs/cdktf/python/r/appsync_datasource.html.markdown +++ b/website/docs/cdktf/python/r/appsync_datasource.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API ID for the GraphQL API for the data source. * `name` - (Required) User-supplied name for the data source. * `type` - (Required) Type of the Data Source. Valid values: `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `HTTP`, `NONE`, `RELATIONAL_DATABASE`, `AMAZON_EVENTBRIDGE`, `AMAZON_OPENSEARCH_SERVICE`. @@ -226,4 +227,4 @@ Using `terraform import`, import `aws_appsync_datasource` using the `api_id`, a % terraform import aws_appsync_datasource.example abcdef123456-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_domain_name.html.markdown b/website/docs/cdktf/python/r/appsync_domain_name.html.markdown index bb63baa6f08f..1c14a51a3102 100644 --- a/website/docs/cdktf/python/r/appsync_domain_name.html.markdown +++ b/website/docs/cdktf/python/r/appsync_domain_name.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_arn` - (Required) ARN of the certificate. This can be an Certificate Manager (ACM) certificate or an Identity and Access Management (IAM) server certificate. The certifiacte must reside in us-east-1. * `description` - (Optional) A description of the Domain Name. * `domain_name` - (Required) Domain name. @@ -73,4 +74,4 @@ Using `terraform import`, import `aws_appsync_domain_name` using the AppSync dom % terraform import aws_appsync_domain_name.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_domain_name_api_association.html.markdown b/website/docs/cdktf/python/r/appsync_domain_name_api_association.html.markdown index 66cc1ced6072..c13158adf1f3 100644 --- a/website/docs/cdktf/python/r/appsync_domain_name_api_association.html.markdown +++ b/website/docs/cdktf/python/r/appsync_domain_name_api_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API ID. * `domain_name` - (Required) Appsync domain name. @@ -70,4 +71,4 @@ Using `terraform import`, import `aws_appsync_domain_name_api_association` using % terraform import aws_appsync_domain_name_api_association.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_function.html.markdown b/website/docs/cdktf/python/r/appsync_function.html.markdown index 23743836fc9f..19fada978905 100644 --- a/website/docs/cdktf/python/r/appsync_function.html.markdown +++ b/website/docs/cdktf/python/r/appsync_function.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) ID of the associated AppSync API. * `code` - (Optional) The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. * `data_source` - (Required) Function data source name. @@ -150,4 +151,4 @@ Using `terraform import`, import `aws_appsync_function` using the AppSync API ID % terraform import aws_appsync_function.example xxxxx-yyyyy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown b/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown index 7968fed08b85..89c56598db98 100644 --- a/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown +++ b/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown @@ -72,7 +72,7 @@ class MyConvertedCode(TerraformStack): authentication_type="AMAZON_COGNITO_USER_POOLS", name="example", user_pool_config=AppsyncGraphqlApiUserPoolConfig( - aws_region=Token.as_string(current.name), + aws_region=Token.as_string(current.region), default_action="DENY", user_pool_id=Token.as_string(aws_cognito_user_pool_example.id) ) @@ -316,13 +316,15 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_type` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` * `name` - (Required) User-supplied name for the GraphQL API. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additional_authentication_provider` - (Optional) One or more additional authentication providers for the GraphQL API. See [`additional_authentication_provider` Block](#additional_authentication_provider-block) for details. * `api_type` - (Optional) API type. Valid values are `GRAPHQL` or `MERGED`. A `MERGED` type requires `merged_api_execution_role_arn` to be set. * `enhanced_metrics_config` - (Optional) Enables and controls the enhanced metrics feature. See [`enhanced_metrics_config` Block](#enhanced_metrics_config-block) for details. @@ -425,4 +427,4 @@ Using `terraform import`, import AppSync GraphQL API using the GraphQL API ID. F % terraform import aws_appsync_graphql_api.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_resolver.html.markdown b/website/docs/cdktf/python/r/appsync_resolver.html.markdown index 54a9393f165e..f5f45d08bc7e 100644 --- a/website/docs/cdktf/python/r/appsync_resolver.html.markdown +++ b/website/docs/cdktf/python/r/appsync_resolver.html.markdown @@ -104,6 +104,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) API ID for the GraphQL API. * `code` - (Optional) The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. * `type` - (Required) Type name from the schema defined in the GraphQL API. @@ -173,4 +174,4 @@ Using `terraform import`, import `aws_appsync_resolver` using the `api_id`, a hy % terraform import aws_appsync_resolver.example abcdef123456-exampleType-exampleField ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_source_api_association.html.markdown b/website/docs/cdktf/python/r/appsync_source_api_association.html.markdown index 90c675815ad4..64b3e3e92f0d 100644 --- a/website/docs/cdktf/python/r/appsync_source_api_association.html.markdown +++ b/website/docs/cdktf/python/r/appsync_source_api_association.html.markdown @@ -3,13 +3,13 @@ subcategory: "AppSync" layout: "aws" page_title: "AWS: aws_appsync_source_api_association" description: |- - Terraform resource for managing an AWS AppSync Source Api Association. + Terraform resource for managing an AWS AppSync Source API Association. --- # Resource: aws_appsync_source_api_association -Terraform resource for managing an AWS AppSync Source Api Association. +Terraform resource for managing an AWS AppSync Source API Association. ## Example Usage @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the source API being merged. * `merged_api_arn` - (Optional) ARN of the merged API. One of `merged_api_arn` or `merged_api_id` must be specified. * `merged_api_id` - (Optional) ID of the merged API. One of `merged_api_arn` or `merged_api_id` must be specified. @@ -54,9 +55,9 @@ The `source_api_association_config` configuration block supports the following a This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the Source Api Association. -* `association_id` - ID of the Source Api Association. -* `id` - Combined ID of the Source Api Association and Merge Api. +* `arn` - ARN of the Source API Association. +* `association_id` - ID of the Source API Association. +* `id` - Combined ID of the Source API Association and Merge API. ## Timeouts @@ -68,7 +69,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Source Api Association using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Source API Association using the `association_id` and `merged_api_id` separated by `,`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -85,10 +86,10 @@ class MyConvertedCode(TerraformStack): AppsyncSourceApiAssociation.generate_config_for_import(self, "example", "gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31") ``` -Using `terraform import`, import AppSync Source Api Association using the `gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31`. For example: +Using `terraform import`, import AppSync Source API Association using the `association_id` and `merged_api_id` separated by `,`. For example: ```console % terraform import aws_appsync_source_api_association.example gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_type.html.markdown b/website/docs/cdktf/python/r/appsync_type.html.markdown index 0f4b01c03d9d..c641d8670399 100644 --- a/website/docs/cdktf/python/r/appsync_type.html.markdown +++ b/website/docs/cdktf/python/r/appsync_type.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `api_id` - (Required) GraphQL API ID. * `format` - (Required) The type format: `SDL` or `JSON`. * `definition` - (Required) The type definition. @@ -82,4 +83,4 @@ Using `terraform import`, import Appsync Types using the `id`. For example: % terraform import aws_appsync_type.example api-id:format:name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/athena_capacity_reservation.html.markdown b/website/docs/cdktf/python/r/athena_capacity_reservation.html.markdown index 20c35effea5c..e7d553a31dd6 100644 --- a/website/docs/cdktf/python/r/athena_capacity_reservation.html.markdown +++ b/website/docs/cdktf/python/r/athena_capacity_reservation.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import Athena Capacity Reservation using the `name`. F % terraform import aws_athena_capacity_reservation.example example-reservation ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/athena_data_catalog.html.markdown b/website/docs/cdktf/python/r/athena_data_catalog.html.markdown index 9d9626c266da..e4a53b9346af 100644 --- a/website/docs/cdktf/python/r/athena_data_catalog.html.markdown +++ b/website/docs/cdktf/python/r/athena_data_catalog.html.markdown @@ -120,6 +120,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `name` - (Required) Name of the data catalog. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters. - `type` - (Required) Type of data catalog: `LAMBDA` for a federated catalog, `GLUE` for AWS Glue Catalog, or `HIVE` for an external hive metastore. - `parameters` - (Required) Key value pairs that specifies the Lambda function or functions to use for the data catalog. The mapping used depends on the catalog type. @@ -159,4 +160,4 @@ Using `terraform import`, import data catalogs using their `name`. For example: % terraform import aws_athena_data_catalog.example example-data-catalog ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/athena_database.html.markdown b/website/docs/cdktf/python/r/athena_database.html.markdown index 58a106b5631f..ba8302c0c379 100644 --- a/website/docs/cdktf/python/r/athena_database.html.markdown +++ b/website/docs/cdktf/python/r/athena_database.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of S3 bucket to save the results of the query execution. * `name` - (Required) Name of the database to create. * `acl_configuration` - (Optional) That an Amazon S3 canned ACL should be set to control ownership of stored query results. See [ACL Configuration](#acl-configuration) below. @@ -50,6 +51,7 @@ This resource supports the following arguments: * `expected_bucket_owner` - (Optional) AWS account ID that you expect to be the owner of the Amazon S3 bucket. * `force_destroy` - (Optional, Default: false) Boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable. * `properties` - (Optional) Key-value map of custom metadata properties for the database definition. +* `workgroup` - (Optional) Name of the workgroup. ### ACL Configuration @@ -117,4 +119,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/athena_named_query.html.markdown b/website/docs/cdktf/python/r/athena_named_query.html.markdown index 73962b895978..b41ecf65d1df 100644 --- a/website/docs/cdktf/python/r/athena_named_query.html.markdown +++ b/website/docs/cdktf/python/r/athena_named_query.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Plain language name for the query. Maximum length of 128. * `workgroup` - (Optional) Workgroup to which the query belongs. Defaults to `primary` * `database` - (Required) Database to which the query belongs. @@ -105,4 +106,4 @@ Using `terraform import`, import Athena Named Query using the query ID. For exam % terraform import aws_athena_named_query.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/athena_prepared_statement.html.markdown b/website/docs/cdktf/python/r/athena_prepared_statement.html.markdown index a7ce97b0cd13..6fb7e243618d 100644 --- a/website/docs/cdktf/python/r/athena_prepared_statement.html.markdown +++ b/website/docs/cdktf/python/r/athena_prepared_statement.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the prepared statement. Maximum length of 256. * `workgroup` - (Required) The name of the workgroup to which the prepared statement belongs. * `query_statement` - (Required) The query string for the prepared statement. @@ -101,4 +102,4 @@ Using `terraform import`, import Athena Prepared Statement using the `WORKGROUP- % terraform import aws_athena_prepared_statement.example 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/athena_workgroup.html.markdown b/website/docs/cdktf/python/r/athena_workgroup.html.markdown index 0a0cf4b2ac5f..9395a708f015 100644 --- a/website/docs/cdktf/python/r/athena_workgroup.html.markdown +++ b/website/docs/cdktf/python/r/athena_workgroup.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the workgroup. * `configuration` - (Optional) Configuration block with various settings for the workgroup. Documented below. * `description` - (Optional) Description of the workgroup. @@ -58,19 +59,25 @@ This resource supports the following arguments: * `bytes_scanned_cutoff_per_query` - (Optional) Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least `10485760`. * `enforce_workgroup_configuration` - (Optional) Boolean whether the settings for the workgroup override client-side settings. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). Defaults to `true`. * `engine_version` - (Optional) Configuration block for the Athena Engine Versioning. For more information, see [Athena Engine Versioning](https://docs.aws.amazon.com/athena/latest/ug/engine-versions.html). See [Engine Version](#engine-version) below. -* `execution_role` - (Optional) Role used in a notebook session for accessing the user's resources. +* `execution_role` - (Optional) Role used to access user resources in notebook sessions and IAM Identity Center enabled workgroups. The property is required for IAM Identity Center enabled workgroups. +* `identity_center_configuration` - (Optional) Configuration block to set up an IAM Identity Center enabled workgroup. See [Identity Center Configuration](#identity-center-configuration) below. * `publish_cloudwatch_metrics_enabled` - (Optional) Boolean whether Amazon CloudWatch metrics are enabled for the workgroup. Defaults to `true`. -* `result_configuration` - (Optional) Configuration block with result settings. See [Result Configuration](#result-configuration) below. * `requester_pays_enabled` - (Optional) If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see [Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) in the Amazon Simple Storage Service Developer Guide. +* `result_configuration` - (Optional) Configuration block with result settings. See [Result Configuration](#result-configuration) below. #### Engine Version * `selected_engine_version` - (Optional) Requested engine version. Defaults to `AUTO`. +#### Identity Center Configuration + +* `enable_identity_center` - (Optional) Specifies whether the workgroup is IAM Identity Center supported. +* `identity_center_instance_arn` - (Optional) The IAM Identity Center instance ARN that the workgroup associates to. + #### Result Configuration -* `encryption_configuration` - (Optional) Configuration block with encryption settings. See [Encryption Configuration](#encryption-configuration) below. * `acl_configuration` - (Optional) That an Amazon S3 canned ACL should be set to control ownership of stored query results. See [ACL Configuration](#acl-configuration) below. +* `encryption_configuration` - (Optional) Configuration block with encryption settings. See [Encryption Configuration](#encryption-configuration) below. * `expected_bucket_owner` - (Optional) AWS account ID that you expect to be the owner of the Amazon S3 bucket. * `output_location` - (Optional) Location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/`. For more information, see [Queries and Query Result Files](https://docs.aws.amazon.com/athena/latest/ug/querying.html). @@ -119,4 +126,4 @@ Using `terraform import`, import Athena Workgroups using their name. For example % terraform import aws_athena_workgroup.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_account_registration.html.markdown b/website/docs/cdktf/python/r/auditmanager_account_registration.html.markdown index 30d29991c4f1..41f05dcceb30 100644 --- a/website/docs/cdktf/python/r/auditmanager_account_registration.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_account_registration.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delegated_admin_account` - (Optional) Identifier for the delegated administrator account. * `deregister_on_destroy` - (Optional) Flag to deregister AuditManager in the account upon destruction. Defaults to `false` (ie. AuditManager will remain active in the account, even if this resource is removed). * `kms_key` - (Optional) KMS key identifier. @@ -90,4 +91,4 @@ Using `terraform import`, import Audit Manager Account Registration resources us % terraform import aws_auditmanager_account_registration.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_assessment.html.markdown b/website/docs/cdktf/python/r/auditmanager_assessment.html.markdown index 6af1dbfae50a..e98f0f0f8a17 100644 --- a/website/docs/cdktf/python/r/auditmanager_assessment.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_assessment.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the assessment. * `tags` - (Optional) A map of tags to assign to the assessment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -127,4 +128,4 @@ Using `terraform import`, import Audit Manager Assessments using the assessment % terraform import aws_auditmanager_assessment.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_assessment_delegation.html.markdown b/website/docs/cdktf/python/r/auditmanager_assessment_delegation.html.markdown index 9f6b8f2c86a3..63066e43dc38 100644 --- a/website/docs/cdktf/python/r/auditmanager_assessment_delegation.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_assessment_delegation.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `comment` - (Optional) Comment describing the delegation request. ## Attribute Reference @@ -82,4 +83,4 @@ Using `terraform import`, import Audit Manager Assessment Delegation using the ` % terraform import aws_auditmanager_assessment_delegation.example abcdef-123456,arn:aws:iam::123456789012:role/example,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_assessment_report.html.markdown b/website/docs/cdktf/python/r/auditmanager_assessment_report.html.markdown index ad6b14a3e1bc..66d85e6d4861 100644 --- a/website/docs/cdktf/python/r/auditmanager_assessment_report.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_assessment_report.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the assessment report. ## Attribute Reference @@ -78,4 +79,4 @@ Using `terraform import`, import Audit Manager Assessment Reports using the asse % terraform import aws_auditmanager_assessment_report.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_control.html.markdown b/website/docs/cdktf/python/r/auditmanager_control.html.markdown index cecbd1f08683..267d22fa117a 100644 --- a/website/docs/cdktf/python/r/auditmanager_control.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_control.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `action_plan_instructions` - (Optional) Recommended actions to carry out if the control isn't fulfilled. * `action_plan_title` - (Optional) Title of the action plan for remediating the control. * `description` - (Optional) Description of the control. @@ -64,6 +65,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_description` - (Optional) Description of the source. * `source_frequency` - (Optional) Frequency of evidence collection. Valid values are `DAILY`, `WEEKLY`, or `MONTHLY`. * `source_keyword` - (Optional) The keyword to search for in CloudTrail logs, Config rules, Security Hub checks, and Amazon Web Services API names. See [`source_keyword`](#source_keyword) below. @@ -110,4 +112,4 @@ Using `terraform import`, import an Audit Manager Control using the `id`. For ex % terraform import aws_auditmanager_control.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_framework.html.markdown b/website/docs/cdktf/python/r/auditmanager_framework.html.markdown index 5481b5638140..ac78dceebf42 100644 --- a/website/docs/cdktf/python/r/auditmanager_framework.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_framework.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `compliance_type` - (Optional) Compliance type that the new custom framework supports, such as `CIS` or `HIPAA`. * `description` - (Optional) Description of the framework. * `tags` - (Optional) A map of tags to assign to the framework. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -103,4 +104,4 @@ Using `terraform import`, import Audit Manager Framework using the framework `id % terraform import aws_auditmanager_framework.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_framework_share.html.markdown b/website/docs/cdktf/python/r/auditmanager_framework_share.html.markdown index c058ebd560a9..050a35052880 100644 --- a/website/docs/cdktf/python/r/auditmanager_framework_share.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_framework_share.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `comment` - (Optional) Comment from the sender about the share request. ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import Audit Manager Framework Share using the `id`. F % terraform import aws_auditmanager_framework_share.example abcdef-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/auditmanager_organization_admin_account_registration.html.markdown b/website/docs/cdktf/python/r/auditmanager_organization_admin_account_registration.html.markdown index 225013e38ab6..2fc716614813 100644 --- a/website/docs/cdktf/python/r/auditmanager_organization_admin_account_registration.html.markdown +++ b/website/docs/cdktf/python/r/auditmanager_organization_admin_account_registration.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `admin_account_id` - (Required) Identifier for the organization administrator account. ## Attribute Reference @@ -71,4 +72,4 @@ Using `terraform import`, import Audit Manager Organization Admin Account Regist % terraform import aws_auditmanager_organization_admin_account_registration.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_attachment.html.markdown b/website/docs/cdktf/python/r/autoscaling_attachment.html.markdown index 6ef47e3ae26d..340ceeb43303 100644 --- a/website/docs/cdktf/python/r/autoscaling_attachment.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_attachment.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoscaling_group_name` - (Required) Name of ASG to associate with the ELB. * `elb` - (Optional) Name of the ELB. * `lb_target_group_arn` - (Optional) ARN of a load balancer target group. @@ -64,4 +65,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_group.html.markdown b/website/docs/cdktf/python/r/autoscaling_group.html.markdown index b25c164b6939..34a15dc3c1a8 100644 --- a/website/docs/cdktf/python/r/autoscaling_group.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_group.html.markdown @@ -495,6 +495,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `name` - (Optional) Name of the Auto Scaling Group. By default generated by Terraform. Conflicts with `name_prefix`. - `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -779,7 +780,7 @@ This configuration block supports the following: - `instance_warmup` - (Optional) Number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. - `max_healthy_percentage` - (Optional) Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between `100` and `200`, defaults to `100`. - `min_healthy_percentage` - (Optional) Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. - - `skip_matching` - (Optional) Replace instances that already have your desired configuration. Defaults to `false`. + - `skip_matching` - (Optional) Skip replacing instances that already have your desired configuration. Defaults to `false`. - `auto_rollback` - (Optional) Automatically rollback if instance refresh fails. Defaults to `false`. This option may only be set to `true` when specifying a `launch_template` or `mixed_instances_policy`. - `alarm_specification` - (Optional) Alarm Specification for Instance Refresh. - `alarms` - (Required) List of Cloudwatch alarms. If any of these alarms goes into ALARM state, Instance Refresh is failed. @@ -952,4 +953,4 @@ Using `terraform import`, import Auto Scaling Groups using the `name`. For examp % terraform import aws_autoscaling_group.web web-asg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_group_tag.html.markdown b/website/docs/cdktf/python/r/autoscaling_group_tag.html.markdown index 2bbcdbfcce99..89f9cfcf977e 100644 --- a/website/docs/cdktf/python/r/autoscaling_group_tag.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_group_tag.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoscaling_group_name` - (Required) Name of the Autoscaling Group to apply the tag to. * `tag` - (Required) Tag to create. The `tag` block is documented below. @@ -103,4 +104,4 @@ Using `terraform import`, import `aws_autoscaling_group_tag` using the ASG name % terraform import aws_autoscaling_group_tag.example asg-example,k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_lifecycle_hook.html.markdown b/website/docs/cdktf/python/r/autoscaling_lifecycle_hook.html.markdown index 003a45f03293..70b237c40060 100644 --- a/website/docs/cdktf/python/r/autoscaling_lifecycle_hook.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_lifecycle_hook.html.markdown @@ -74,13 +74,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the lifecycle hook. * `autoscaling_group_name` - (Required) Name of the Auto Scaling group to which you want to assign the lifecycle hook * `default_result` - (Optional) Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The value for this parameter can be either CONTINUE or ABANDON. The default value for this parameter is ABANDON. * `heartbeat_timeout` - (Optional) Defines the amount of time, in seconds, that can elapse before the lifecycle hook times out. When the lifecycle hook times out, Auto Scaling performs the action defined in the DefaultResult parameter * `lifecycle_transition` - (Required) Instance state to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see [describe-lifecycle-hook-types](https://docs.aws.amazon.com/cli/latest/reference/autoscaling/describe-lifecycle-hook-types.html#examples) * `notification_metadata` - (Optional) Contains additional information that you want to include any time Auto Scaling sends a message to the notification target. -* `notification_target_arn` - (Optional) ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue or an SNS topic. +* `notification_target_arn` - (Optional) ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue, an SNS topic, or a Lambda function. * `role_arn` - (Optional) ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. ## Attribute Reference @@ -112,4 +113,4 @@ Using `terraform import`, import AutoScaling Lifecycle Hooks using the role auto % terraform import aws_autoscaling_lifecycle_hook.test-lifecycle-hook asg-name/lifecycle-hook-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_notification.html.markdown b/website/docs/cdktf/python/r/autoscaling_notification.html.markdown index eb4bc671b4e0..cdad80c75994 100644 --- a/website/docs/cdktf/python/r/autoscaling_notification.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_notification.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_names` - (Required) List of AutoScaling Group Names * `notifications` - (Required) List of Notification Types that trigger notifications. Acceptable values are documented [in the AWS documentation here][1] @@ -73,4 +74,4 @@ This resource exports the following attributes in addition to the arguments abov [1]: https://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_NotificationConfiguration.html [2]: https://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_DescribeNotificationConfigurations.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_policy.html.markdown b/website/docs/cdktf/python/r/autoscaling_policy.html.markdown index 0bfcb9d4b661..78d40d23b5de 100644 --- a/website/docs/cdktf/python/r/autoscaling_policy.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_policy.html.markdown @@ -228,6 +228,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy. * `autoscaling_group_name` - (Required) Name of the autoscaling group. * `adjustment_type` - (Optional) Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are `ChangeInCapacity`, `ExactCapacity`, and `PercentChangeInCapacity`. @@ -517,4 +518,4 @@ Using `terraform import`, import AutoScaling scaling policy using the role autos % terraform import aws_autoscaling_policy.test-policy asg-name/policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_schedule.html.markdown b/website/docs/cdktf/python/r/autoscaling_schedule.html.markdown index 048d6c7129be..a5f8b2852e33 100644 --- a/website/docs/cdktf/python/r/autoscaling_schedule.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_schedule.html.markdown @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `desired_capacity` - (Optional) The initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain. Set to `-1` if you don't want to change the desired capacity at the scheduled time. Defaults to `0`. * `end_time` - (Optional) The date and time for the recurring schedule to end, in UTC with the format `"YYYY-MM-DDThh:mm:ssZ"` (e.g. `"2021-06-01T00:00:00Z"`). * `max_size` - (Optional) The maximum size of the Auto Scaling group. Set to `-1` if you don't want to change the maximum size at the scheduled time. Defaults to `0`. @@ -100,4 +101,4 @@ Using `terraform import`, import AutoScaling ScheduledAction using the `auto-sca % terraform import aws_autoscaling_schedule.resource-name auto-scaling-group-name/scheduled-action-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_traffic_source_attachment.html.markdown b/website/docs/cdktf/python/r/autoscaling_traffic_source_attachment.html.markdown index ed85c276c916..e31dd23839f5 100644 --- a/website/docs/cdktf/python/r/autoscaling_traffic_source_attachment.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_traffic_source_attachment.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `autoscaling_group_name` - (Required) The name of the Auto Scaling group. - `traffic_source` - (Required) The unique identifiers of a traffic sources. @@ -59,4 +60,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscalingplans_scaling_plan.html.markdown b/website/docs/cdktf/python/r/autoscalingplans_scaling_plan.html.markdown index ff112877c736..906630310825 100644 --- a/website/docs/cdktf/python/r/autoscalingplans_scaling_plan.html.markdown +++ b/website/docs/cdktf/python/r/autoscalingplans_scaling_plan.html.markdown @@ -155,6 +155,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the scaling plan. Names cannot contain vertical bars, colons, or forward slashes. * `application_source` - (Required) CloudFormation stack or set of tags. You can create one scaling plan per application source. * `scaling_instruction` - (Required) Scaling instructions. More details can be found in the [AWS Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/plans/APIReference/API_ScalingInstruction.html). @@ -263,4 +264,4 @@ Using `terraform import`, import Auto Scaling scaling plans using the `name`. Fo % terraform import aws_autoscalingplans_scaling_plan.example MyScale1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_framework.html.markdown b/website/docs/cdktf/python/r/backup_framework.html.markdown index 627621a2a852..5867b1ab4d4b 100644 --- a/website/docs/cdktf/python/r/backup_framework.html.markdown +++ b/website/docs/cdktf/python/r/backup_framework.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `control` - (Required) One or more control blocks that make up the framework. Each control in the list has a name, input parameters, and scope. Detailed below. * `description` - (Optional) The description of the framework with a maximum of 1,024 characters * `name` - (Required) The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. @@ -170,4 +171,4 @@ Using `terraform import`, import Backup Framework using the `id` which correspon % terraform import aws_backup_framework.test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_logically_air_gapped_vault.html.markdown b/website/docs/cdktf/python/r/backup_logically_air_gapped_vault.html.markdown index e609aa01524f..bb2ad8e11a2f 100644 --- a/website/docs/cdktf/python/r/backup_logically_air_gapped_vault.html.markdown +++ b/website/docs/cdktf/python/r/backup_logically_air_gapped_vault.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Logically Air Gapped Backup Vault to create. * `max_retention_days` - (Required) Maximum retention period that the Logically Air Gapped Backup Vault retains recovery points. * `min_retention_days` - (Required) Minimum retention period that the Logically Air Gapped Backup Vault retains recovery points. @@ -84,4 +85,4 @@ Using `terraform import`, import Backup Logically Air Gapped Vault using the `id % terraform import aws_backup_logically_air_gapped_vault.example lag-example-vault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_plan.html.markdown b/website/docs/cdktf/python/r/backup_plan.html.markdown index 1a44c387d682..6dac5f89c1ce 100644 --- a/website/docs/cdktf/python/r/backup_plan.html.markdown +++ b/website/docs/cdktf/python/r/backup_plan.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The display name of a backup plan. * `rule` - (Required) A rule object that specifies a scheduled task that is used to back up a selection of resources. * `advanced_backup_setting` - (Optional) An object that specifies backup options for each resource type. @@ -127,4 +128,4 @@ Using `terraform import`, import Backup Plan using the `id`. For example: % terraform import aws_backup_plan.test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_region_settings.html.markdown b/website/docs/cdktf/python/r/backup_region_settings.html.markdown index 018be137f777..b2327eef1af0 100644 --- a/website/docs/cdktf/python/r/backup_region_settings.html.markdown +++ b/website/docs/cdktf/python/r/backup_region_settings.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_type_opt_in_preference` - (Required) A map of service names to their opt-in preferences for the Region. See [AWS Documentation on which services support backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/backup-feature-availability.html). * `resource_type_management_preference` - (Optional) A map of service names to their full management preferences for the Region. For more information, see the AWS Documentation on [what full management is](https://docs.aws.amazon.com/aws-backup/latest/devguide/whatisbackup.html#full-management) and [which services support full management](https://docs.aws.amazon.com/aws-backup/latest/devguide/backup-feature-availability.html#features-by-resource). @@ -93,4 +94,4 @@ Using `terraform import`, import Backup Region Settings using the `region`. For % terraform import aws_backup_region_settings.test us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_report_plan.html.markdown b/website/docs/cdktf/python/r/backup_report_plan.html.markdown index b8bac73a1d6a..3f1ca57aa89a 100644 --- a/website/docs/cdktf/python/r/backup_report_plan.html.markdown +++ b/website/docs/cdktf/python/r/backup_report_plan.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the report plan with a maximum of 1,024 characters * `name` - (Required) The unique name of the report plan. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. * `report_delivery_channel` - (Required) An object that contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports. Detailed below. @@ -106,4 +107,4 @@ Using `terraform import`, import Backup Report Plan using the `id` which corresp % terraform import aws_backup_report_plan.test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_restore_testing_plan.html.markdown b/website/docs/cdktf/python/r/backup_restore_testing_plan.html.markdown index c29bbecbd079..04331c24e1e7 100644 --- a/website/docs/cdktf/python/r/backup_restore_testing_plan.html.markdown +++ b/website/docs/cdktf/python/r/backup_restore_testing_plan.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` (Required): The name of the restore testing plan. Must be between 1 and 50 characters long and contain only alphanumeric characters and underscores. * `schedule_expression` (Required): The schedule expression for the restore testing plan. * `schedule_expression_timezone` (Optional): The timezone for the schedule expression. If not provided, the state value will be used. @@ -89,4 +90,4 @@ Using `terraform import`, import Backup Restore Testing Plan using the `name`. F % terraform import aws_backup_restore_testing_plan.example my_testing_plan ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_restore_testing_selection.html.markdown b/website/docs/cdktf/python/r/backup_restore_testing_selection.html.markdown index 9abce4c87786..2f8f12547e94 100644 --- a/website/docs/cdktf/python/r/backup_restore_testing_selection.html.markdown +++ b/website/docs/cdktf/python/r/backup_restore_testing_selection.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the backup restore testing selection. * `restore_testing_plan_name` - (Required) The name of the restore testing plan. * `protected_resource_type` - (Required) The type of the protected resource. @@ -119,4 +120,4 @@ Using `terraform import`, import Backup Restore Testing Selection using `name:re % terraform import aws_backup_restore_testing_selection.example restore_testing_selection_12345678:restore_testing_plan_12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_selection.html.markdown b/website/docs/cdktf/python/r/backup_selection.html.markdown index 6dc3813ffa73..4a11e4796849 100644 --- a/website/docs/cdktf/python/r/backup_selection.html.markdown +++ b/website/docs/cdktf/python/r/backup_selection.html.markdown @@ -194,6 +194,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The display name of a resource selection document. * `plan_id` - (Required) The backup plan ID to be associated with the selection of resources. * `iam_role_arn` - (Required) The ARN of the IAM role that AWS Backup uses to authenticate when restoring and backing up the target resource. See the [AWS Backup Developer Guide](https://docs.aws.amazon.com/aws-backup/latest/devguide/access-control.html#managed-policies) for additional information about using AWS managed policies or creating custom policies attached to the IAM role. @@ -278,4 +279,4 @@ Using `terraform import`, import Backup selection using the role plan_id and id % terraform import aws_backup_selection.example plan-id|selection-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_vault.html.markdown b/website/docs/cdktf/python/r/backup_vault.html.markdown index 096af9b16eca..962e81f822cb 100644 --- a/website/docs/cdktf/python/r/backup_vault.html.markdown +++ b/website/docs/cdktf/python/r/backup_vault.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `force_destroy` - (Optional, Default: `false`) A boolean that indicates that all recovery points stored in the vault are deleted so that the vault can be destroyed without error. * `kms_key_arn` - (Optional) The server-side encryption key that is used to protect your backups. * `name` - (Required) Name of the backup vault to create. @@ -81,4 +82,4 @@ Using `terraform import`, import Backup vault using the `name`. For example: % terraform import aws_backup_vault.test-vault TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_vault_lock_configuration.html.markdown b/website/docs/cdktf/python/r/backup_vault_lock_configuration.html.markdown index 36f61f3fefbc..348ee3403267 100644 --- a/website/docs/cdktf/python/r/backup_vault_lock_configuration.html.markdown +++ b/website/docs/cdktf/python/r/backup_vault_lock_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backup_vault_name` - (Required) Name of the backup vault to add a lock configuration for. * `changeable_for_days` - (Optional) The number of days before the lock date. If omitted creates a vault lock in `governance` mode, otherwise it will create a vault lock in `compliance` mode. * `max_retention_days` - (Optional) The maximum retention period that the vault retains its recovery points. @@ -75,4 +76,4 @@ Using `terraform import`, import Backup vault lock configuration using the `name % terraform import aws_backup_vault_lock_configuration.test TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_vault_notifications.html.markdown b/website/docs/cdktf/python/r/backup_vault_notifications.html.markdown index b322f94b6420..2edb2995bbac 100644 --- a/website/docs/cdktf/python/r/backup_vault_notifications.html.markdown +++ b/website/docs/cdktf/python/r/backup_vault_notifications.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backup_vault_name` - (Required) Name of the backup vault to add notifications for. * `sns_topic_arn` - (Required) The Amazon Resource Name (ARN) that specifies the topic for a backup vault’s events * `backup_vault_events` - (Required) An array of events that indicate the status of jobs to back up resources to the backup vault. @@ -104,4 +105,4 @@ Using `terraform import`, import Backup vault notifications using the `name`. Fo % terraform import aws_backup_vault_notifications.test TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/backup_vault_policy.html.markdown b/website/docs/cdktf/python/r/backup_vault_policy.html.markdown index 34e6c8aa403f..1f730b58e574 100644 --- a/website/docs/cdktf/python/r/backup_vault_policy.html.markdown +++ b/website/docs/cdktf/python/r/backup_vault_policy.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backup_vault_name` - (Required) Name of the backup vault to add policy for. * `policy` - (Required) The backup vault access policy document in JSON format. @@ -96,4 +97,4 @@ Using `terraform import`, import Backup vault policy using the `name`. For examp % terraform import aws_backup_vault_policy.test TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/batch_compute_environment.html.markdown b/website/docs/cdktf/python/r/batch_compute_environment.html.markdown index 9892c05ad2c9..939937b5cb89 100644 --- a/website/docs/cdktf/python/r/batch_compute_environment.html.markdown +++ b/website/docs/cdktf/python/r/batch_compute_environment.html.markdown @@ -122,7 +122,6 @@ class MyConvertedCode(TerraformStack): # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_iam_instance_profile_ecs_instance_role.override_logical_id("ecs_instance_role") aws_batch_compute_environment_sample = BatchComputeEnvironment(self, "sample_11", - name="sample", compute_resources=BatchComputeEnvironmentComputeResources( instance_role=Token.as_string(aws_iam_instance_profile_ecs_instance_role.arn), instance_type=["c4.large"], @@ -134,6 +133,7 @@ class MyConvertedCode(TerraformStack): type="EC2" ), depends_on=[aws_iam_role_policy_attachment_aws_batch_service_role], + name="sample", service_role=aws_batch_service_role.arn, type="MANAGED" ) @@ -156,7 +156,6 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) BatchComputeEnvironment(self, "sample", - name="sample", compute_resources=BatchComputeEnvironmentComputeResources( max_vcpus=16, security_group_ids=[Token.as_string(aws_security_group_sample.id)], @@ -164,6 +163,7 @@ class MyConvertedCode(TerraformStack): type="FARGATE" ), depends_on=[aws_batch_service_role], + name="sample", service_role=Token.as_string(aws_iam_role_aws_batch_service_role.arn), type="MANAGED" ) @@ -184,7 +184,6 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) BatchComputeEnvironment(self, "sample", - name="sample", compute_resources=BatchComputeEnvironmentComputeResources( allocation_strategy="BEST_FIT_PROGRESSIVE", instance_role=ecs_instance.arn, @@ -195,6 +194,7 @@ class MyConvertedCode(TerraformStack): subnets=[Token.as_string(aws_subnet_sample.id)], type="EC2" ), + name="sample", type="MANAGED", update_policy=BatchComputeEnvironmentUpdatePolicy( job_execution_timeout_minutes=30, @@ -207,6 +207,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique compute environment name beginning with the specified prefix. Conflicts with `name`. * `compute_resources` - (Optional) Details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. See details below. @@ -242,6 +243,7 @@ This resource supports the following arguments: `ec2_configuration` supports the following: * `image_id_override` - (Optional) The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the `image_id` argument in the [`compute_resources`](#compute_resources) block. +* `image_kubernetes_version` - (Optional) The Kubernetes version for the compute environment. If you don't specify a value, the latest version that AWS Batch supports is used. See [Supported Kubernetes versions](https://docs.aws.amazon.com/batch/latest/userguide/supported_kubernetes_version.html) for the list of Kubernetes versions supported by AWS Batch on Amazon EKS. * `image_type` - (Optional) The image type to match with the instance type to select an AMI. If the `image_id_override` parameter isn't specified, then a recent [Amazon ECS-optimized Amazon Linux 2 AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) (`ECS_AL2`) is used. ### launch_template @@ -264,7 +266,7 @@ This resource supports the following arguments: `update_policy` supports the following: * `job_execution_timeout_minutes` - (Required) Specifies the job timeout (in minutes) when the compute environment infrastructure is updated. -* `terminate_jobs_on_update` - (Required) Specifies whether jobs are automatically terminated when the computer environment infrastructure is updated. +* `terminate_jobs_on_update` - (Required) Specifies whether jobs are automatically terminated when the compute environment infrastructure is updated. ## Attribute Reference @@ -278,6 +280,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_compute_environment.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:compute-environment/sample" + } +} + +resource "aws_batch_compute_environment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the compute environment. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Batch compute using the `name`. For example: ```python @@ -305,4 +328,4 @@ Using `terraform import`, import AWS Batch compute using the `name`. For example [2]: http://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html [3]: http://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/batch_job_definition.html.markdown b/website/docs/cdktf/python/r/batch_job_definition.html.markdown index 4733ff8b8702..9dcefccab852 100644 --- a/website/docs/cdktf/python/r/batch_job_definition.html.markdown +++ b/website/docs/cdktf/python/r/batch_job_definition.html.markdown @@ -306,6 +306,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container_properties` - (Optional) Valid [container properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) provided as a single valid JSON document. This parameter is only valid if the `type` parameter is `container`. * `deregister_on_new_revision` - (Optional) When updating a job definition a new revision is created. This parameter determines if the previous version is `deregistered` (`INACTIVE`) or left `ACTIVE`. Defaults to `true`. * `ecs_properties` - (Optional) Valid [ECS properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) provided as a single valid JSON document. This parameter is only valid if the `type` parameter is `container`. @@ -368,7 +369,7 @@ The following arguments are optional: #### eks_metadata -* `labels` - Key-value pairs used to identify, sort, and organize cube resources. +* `labels` - Key-value pairs used to identify, sort, and organize kubernetes resources. #### `eks_secret` @@ -402,6 +403,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_job_definition.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:job-definition/sample:1" + } +} + +resource "aws_batch_job_definition" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the job definition. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch Job Definition using the `arn`. For example: ```python @@ -425,4 +447,4 @@ Using `terraform import`, import Batch Job Definition using the `arn`. For examp % terraform import aws_batch_job_definition.test arn:aws:batch:us-east-1:123456789012:job-definition/sample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/batch_job_queue.html.markdown b/website/docs/cdktf/python/r/batch_job_queue.html.markdown index b7169152989e..ef91900a2924 100644 --- a/website/docs/cdktf/python/r/batch_job_queue.html.markdown +++ b/website/docs/cdktf/python/r/batch_job_queue.html.markdown @@ -92,6 +92,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Specifies the name of the job queue. * `compute_environment_order` - (Optional) The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment runs a specific job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. * `job_state_time_limit_action` - (Optional) The set of job state time limit actions mapped to a job queue. Specifies an action that AWS Batch will take after the job has remained at the head of the queue in the specified state for longer than the specified time. @@ -130,6 +131,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_job_queue.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:job-queue/sample" + } +} + +resource "aws_batch_job_queue" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the job queue. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch Job Queue using the `arn`. For example: ```python @@ -153,4 +175,4 @@ Using `terraform import`, import Batch Job Queue using the `arn`. For example: % terraform import aws_batch_job_queue.test_queue arn:aws:batch:us-east-1:123456789012:job-queue/sample ``` - + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/batch_scheduling_policy.html.markdown b/website/docs/cdktf/python/r/batch_scheduling_policy.html.markdown index 45b8bb1811d6..ef3c355f8995 100644 --- a/website/docs/cdktf/python/r/batch_scheduling_policy.html.markdown +++ b/website/docs/cdktf/python/r/batch_scheduling_policy.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fairshare_policy` - (Optional) A fairshare policy block specifies the `compute_reservation`, `share_delay_seconds`, and `share_distribution` of the scheduling policy. The `fairshare_policy` block is documented below. * `name` - (Required) Specifies the name of the scheduling policy. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -97,4 +98,4 @@ Using `terraform import`, import Batch Scheduling Policy using the `arn`. For ex % terraform import aws_batch_scheduling_policy.test_policy arn:aws:batch:us-east-1:123456789012:scheduling-policy/sample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bcmdataexports_export.html.markdown b/website/docs/cdktf/python/r/bcmdataexports_export.html.markdown index f34678505899..7756d7d6e399 100644 --- a/website/docs/cdktf/python/r/bcmdataexports_export.html.markdown +++ b/website/docs/cdktf/python/r/bcmdataexports_export.html.markdown @@ -25,15 +25,22 @@ from cdktf import Token, TerraformStack # See https://cdk.tf/provider-generation for more details. # from imports.aws.bcmdataexports_export import BcmdataexportsExport +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_partition import DataAwsPartition class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) + current = DataAwsCallerIdentity(self, "current") + data_aws_partition_current = DataAwsPartition(self, "current_1") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_partition_current.override_logical_id("current") BcmdataexportsExport(self, "test", export=[BcmdataexportsExportExport( data_query=[BcmdataexportsExportExportDataQuery( query_statement="SELECT identity_line_item_id, identity_time_interval, line_item_product_code,line_item_unblended_cost FROM COST_AND_USAGE_REPORT", table_configurations={ "COST_AND_USAGE_REPORT": { + "BILLING_VIEW_ARN": "arn:${" + data_aws_partition_current.partition + "}:billing::${" + current.account_id + "}:billingview/primary", "INCLUDE_MANUAL_DISCOUNT_COMPATIBILITY": "FALSE", "INCLUDE_RESOURCES": "FALSE", "INCLUDE_SPLIT_COST_ALLOCATION_DATA": "FALSE", @@ -84,8 +91,8 @@ The following arguments are required: ### `data_query` Argument Reference -* `query_statement` - (Required) Query statement. -* `table_configurations` - (Optional) Table configuration. +* `query_statement` - (Required) Query statement. The SQL table name for CUR 2.0 is `COST_AND_USAGE_REPORT`. See the [AWS documentation](https://docs.aws.amazon.com/cur/latest/userguide/table-dictionary-cur2.html) for a list of available columns. +* `table_configurations` - (Optional) Table configuration. See the [AWS documentation](https://docs.aws.amazon.com/cur/latest/userguide/table-dictionary-cur2.html#cur2-table-configurations) for the available configurations. In addition to those listed in the documentation, `BILLING_VIEW_ARN` must also be included, as shown in the example above. ### `destination_configurations` Argument Reference @@ -113,7 +120,8 @@ The following arguments are required: This resource exports the following attributes in addition to the arguments above: -* `export_arn` - Amazon Resource Name (ARN) for this export. +* `arn` - Amazon Resource Name (ARN) for this export. +* `export[0].export_arn` - Amazon Resource Name (ARN) for this export. ## Timeouts @@ -124,6 +132,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bcmdataexports_export.example + identity = { + "arn" = "arn:aws:bcm-data-exports:us-east-1:123456789012:export/example-export" + } +} + +resource "aws_bcmdataexports_export" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the BCM Data Exports export. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import BCM Data Exports Export using the export ARN. For example: ```python @@ -147,4 +176,4 @@ Using `terraform import`, import BCM Data Exports Export using the export ARN. F % terraform import aws_bcmdataexports_export.example arn:aws:bcm-data-exports:us-east-1:123456789012:export/CostUsageReport-9f1c75f3-f982-4d9a-b936-1e7ecab814b7 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown b/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown index fb3a8646d2e3..848ab32a11f8 100644 --- a/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `base_model_identifier` - (Required) The Amazon Resource Name (ARN) of the base model. * `custom_model_kms_key_id` - (Optional) The custom model is encrypted at rest using this key. Specify the key ARN. * `custom_model_name` - (Required) Name for the custom model. @@ -87,8 +88,8 @@ This resource supports the following arguments: * `validator` - (Required) Information about the validators. * `s3_uri` - (Required) The S3 URI where the validation data is stored. * `vpc_config` - (Optional) Configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for this job. - * `security_group_ids` – (Required) VPC configuration security group IDs. - * `subnet_ids` – (Required) VPC configuration subnets. + * `security_group_ids` - (Required) VPC configuration security group IDs. + * `subnet_ids` - (Required) VPC configuration subnets. ## Attribute Reference @@ -111,6 +112,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bedrock_custom_model.example + identity = { + "arn" = "arn:aws:bedrock:us-west-2:123456789012:custom-model/amazon.titan-text-lite-v1:0:4k/example-model" + } +} + +resource "aws_bedrock_custom_model" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Bedrock custom model. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Custom Model using the `job_arn`. For example: ```python @@ -134,4 +156,4 @@ Using `terraform import`, import Bedrock custom model using the `job_arn`. For e % terraform import aws_bedrock_custom_model.example arn:aws:bedrock:us-west-2:123456789012:model-customization-job/amazon.titan-text-express-v1:0:8k/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_guardrail.html.markdown b/website/docs/cdktf/python/r/bedrock_guardrail.html.markdown index 94a9613857b1..f00a808416a8 100644 --- a/website/docs/cdktf/python/r/bedrock_guardrail.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_guardrail.html.markdown @@ -29,19 +29,30 @@ resource "aws_bedrock_guardrail" "example" { output_strength = "MEDIUM" type = "HATE" } + tier_config { + tier_name = "STANDARD" + } } sensitive_information_policy_config { pii_entities_config { - action = "BLOCK" - type = "NAME" + action = "BLOCK" + input_action = "BLOCK" + output_action = "ANONYMIZE" + input_enabled = true + output_enabled = true + type = "NAME" } regexes_config { - action = "BLOCK" - description = "example regex" - name = "regex_example" - pattern = "^\\d{3}-\\d{2}-\\d{4}$" + action = "BLOCK" + input_action = "BLOCK" + output_action = "BLOCK" + input_enabled = true + output_enabled = false + description = "example regex" + name = "regex_example" + pattern = "^\\d{3}-\\d{2}-\\d{4}$" } } @@ -52,6 +63,9 @@ resource "aws_bedrock_guardrail" "example" { type = "DENY" definition = "Investment advice refers to inquiries, guidance, or recommendations regarding the management or allocation of funds or assets with the goal of generating returns ." } + tier_config { + tier_name = "CLASSIC" + } } word_policy_config { @@ -75,6 +89,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content_policy_config` - (Optional) Content policy config for a guardrail. See [Content Policy Config](#content-policy-config) for more information. * `contextual_grounding_policy_config` - (Optional) Contextual grounding policy config for a guardrail. See [Contextual Grounding Policy Config](#contextual-grounding-policy-config) for more information. * `description` (Optional) Description of the guardrail or its version. @@ -90,6 +105,7 @@ The `content_policy_config` configuration block supports the following arguments * `filters_config` - (Optional) Set of content filter configs in content policy. See [Filters Config](#content-filters-config) for more information. +* `tier_config` - (Optional) Configuration block for the content policy tier. See [Tier Config](#content-tier-config) for more information. #### Content Filters Config @@ -99,6 +115,12 @@ The `filters_config` configuration block supports the following arguments: * `output_strength` - (Optional) Strength for filters. * `type` - (Optional) Type of filter in content policy. +#### Content Tier Config + +The `tier_config` configuration block supports the following arguments: + +* `tier_name` - (Required) The name of the content policy tier. Valid values include STANDARD or CLASSIC. + ### Contextual Grounding Policy Config * `filters_config` (Required) List of contextual grounding filter configs. See [Contextual Grounding Filters Config](#contextual-grounding-filters-config) for more information. @@ -110,8 +132,17 @@ The `filters_config` configuration block supports the following arguments: * `threshold` - (Required) The threshold for this filter. * `type` - (Required) Type of contextual grounding filter. +### Cross Region Inference + +* `cross_region_config` (Optional) Configuration block to enable cross-region routing for bedrock guardrails. See [Cross Region Config](#cross-region-config for more information. Note see [available regions](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-cross-region.html) here. + +#### Cross Region Config + +* `guardrail_profile_identifier` (Required) Guardrail profile ARN. + ### Topic Policy Config +* `tier_config` - (Optional) Configuration block for the topic policy tier. See [Tier Config](#topics-tier-config) for more information. * `topics_config` (Required) List of topic configs in topic policy. See [Topics Config](#topics-config) for more information. #### Topics Config @@ -121,6 +152,12 @@ The `filters_config` configuration block supports the following arguments: * `type` (Required) Type of topic in a policy. * `examples` (Optional) List of text examples. +#### Topics Tier Config + +The `tier_config` configuration block supports the following arguments: + +* `tier_name` - (Required) The name of the content policy tier. Valid values include STANDARD or CLASSIC. + ### Sensitive Information Policy Config * `pii_entities_config` (Optional) List of entities. See [PII Entities Config](#pii-entities-config) for more information. @@ -128,13 +165,21 @@ The `filters_config` configuration block supports the following arguments: #### PII Entities Config -* `action` (Required) Options for sensitive information action. +* `action` (Required) Options for sensitive information action. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. * `type` (Required) The currently supported PII entities. #### Regexes Config -* `action` (Required) Options for sensitive information action. +* `action` (Required) Options for sensitive information action. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. * `name` (Required) The regex name. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. * `pattern` (Required) The regex pattern. * `description` (Optional) The regex description. @@ -146,10 +191,18 @@ The `filters_config` configuration block supports the following arguments: #### Managed Word Lists Config * `type` (Required) Options for managed words. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. #### Words Config * `text` (Required) The custom word text. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. ## Attribute Reference @@ -194,4 +247,4 @@ Using `terraform import`, import Amazon Bedrock Guardrail using using a comma-de % terraform import aws_bedrock_guardrail.example guardrail-id-12345678,DRAFT ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_guardrail_version.html.markdown b/website/docs/cdktf/python/r/bedrock_guardrail_version.html.markdown index 8c7b16dfc27b..9533cd0874dc 100644 --- a/website/docs/cdktf/python/r/bedrock_guardrail_version.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_guardrail_version.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Guardrail version. * `skip_destroy` - (Optional) Whether to retain the old version of a previously deployed Guardrail. Default is `false` @@ -83,4 +84,4 @@ Using `terraform import`, import Amazon Bedrock Guardrail Version using using a % terraform import aws_bedrock_guardrail_version.example arn:aws:bedrock:us-west-2:123456789012:guardrail-id-12345678,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_inference_profile.html.markdown b/website/docs/cdktf/python/r/bedrock_inference_profile.html.markdown index 09a3af064472..f699f826b912 100644 --- a/website/docs/cdktf/python/r/bedrock_inference_profile.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_inference_profile.html.markdown @@ -24,16 +24,16 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import BedrockInferenceProfile +from imports.aws.bedrock_inference_profile import BedrockInferenceProfile from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) BedrockInferenceProfile(self, "example", description="Profile with tag for cost allocation tracking", - model_source=[{ - "copy_from": "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0" - } + model_source=[BedrockInferenceProfileModelSource( + copy_from="arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0" + ) ], name="Claude Sonnet for Project 123", tags={ @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the inference profile. * `tags` - (Optional) Key-value mapping of resource tags for the inference profile. @@ -87,7 +88,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Inference Profile using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Inference Profile using the `name`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -97,17 +98,17 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import BedrockInferenceProfile +from imports.aws.bedrock_inference_profile import BedrockInferenceProfile class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) BedrockInferenceProfile.generate_config_for_import(self, "example", "inference_profile-id-12345678") ``` -Using `terraform import`, import Bedrock Inference Profile using the `example_id_arg`. For example: +Using `terraform import`, import Bedrock Inference Profile using the `name`. For example: ```console % terraform import aws_bedrock_inference_profile.example inference_profile-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown b/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown index 927088d6b889..2adf1c701cfd 100644 --- a/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown @@ -51,17 +51,17 @@ class MyConvertedCode(TerraformStack): aws_bedrock_model_invocation_logging_configuration_example = BedrockModelInvocationLoggingConfiguration(self, "example_3", depends_on=[aws_s3_bucket_policy_example], - logging_config=[{ - "embedding_data_delivery_enabled": True, - "image_data_delivery_enabled": True, - "s3_config": [{ - "bucket_name": example.id, - "key_prefix": "bedrock" - } + logging_config=[BedrockModelInvocationLoggingConfigurationLoggingConfig( + embedding_data_delivery_enabled=True, + image_data_delivery_enabled=True, + s3_config=[BedrockModelInvocationLoggingConfigurationLoggingConfigS3Config( + bucket_name=example.id, + key_prefix="bedrock" + ) ], - "text_data_delivery_enabled": True, - "video_data_delivery_enabled": True - } + text_data_delivery_enabled=True, + video_data_delivery_enabled=True + ) ] ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. @@ -70,42 +70,43 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logging_config` - (Required) The logging configuration values to set. See [`logging_config` Block](#logging_config-block) for details. ### `logging_config` Block The `logging_config` configuration block supports the following arguments: -* `cloudwatch_config` – (Optional) CloudWatch logging configuration. See [`cloudwatch_config` Block](#cloudwatch_config-block) for details. -* `embedding_data_delivery_enabled` – (Optional) Set to include embeddings data in the log delivery. Defaults to `true`. -* `image_data_delivery_enabled` – (Optional) Set to include image data in the log delivery. Defaults to `true`. -* `s3_config` – (Optional) S3 configuration for storing log data. See [`s3_config` Block](#s3_config-block) for details. -* `text_data_delivery_enabled` – (Optional) Set to include text data in the log delivery. Defaults to `true`. -* `video_data_delivery_enabled` – (Optional) Set to include text data in the log delivery. Defaults to `true`. +* `cloudwatch_config` - (Optional) CloudWatch logging configuration. See [`cloudwatch_config` Block](#cloudwatch_config-block) for details. +* `embedding_data_delivery_enabled` - (Optional) Set to include embeddings data in the log delivery. Defaults to `true`. +* `image_data_delivery_enabled` - (Optional) Set to include image data in the log delivery. Defaults to `true`. +* `s3_config` - (Optional) S3 configuration for storing log data. See [`s3_config` Block](#s3_config-block) for details. +* `text_data_delivery_enabled` - (Optional) Set to include text data in the log delivery. Defaults to `true`. +* `video_data_delivery_enabled` - (Optional) Set to include text data in the log delivery. Defaults to `true`. ### `cloudwatch_config` Block The `cloudwatch_config` configuration block supports the following arguments: -* `large_data_delivery_s3_config` – (Optional) S3 configuration for delivering a large amount of data. See [`large_data_delivery_s3_config` Block](#large_data_delivery_s3_config-block) for details. -* `log_group_name` – (Required) Log group name. -* `role_arn` – (Optional) The role ARN. +* `large_data_delivery_s3_config` - (Optional) S3 configuration for delivering a large amount of data. See [`large_data_delivery_s3_config` Block](#large_data_delivery_s3_config-block) for details. +* `log_group_name` - (Required) Log group name. +* `role_arn` - (Optional) The role ARN. ### `large_data_delivery_s3_config` Block The `large_data_delivery_s3_config` configuration block supports the following arguments: -* `bucket_name` – (Required) S3 bucket name. -* `key_prefix` – (Optional) S3 prefix. +* `bucket_name` - (Required) S3 bucket name. +* `key_prefix` - (Optional) S3 prefix. ### `s3_config` Block The `s3_config` configuration block supports the following arguments: -* `bucket_name` – (Required) S3 bucket name. -* `key_prefix` – (Optional) S3 prefix. +* `bucket_name` - (Required) S3 bucket name. +* `key_prefix` - (Optional) S3 prefix. ## Attribute Reference @@ -138,4 +139,4 @@ Using `terraform import`, import Bedrock custom model using the `id` set to the % terraform import aws_bedrock_model_invocation_logging_configuration.my_config us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown b/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown index 9cb6938cda59..71c288e397e0 100644 --- a/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `commitment_duration` - (Optional) Commitment duration requested for the Provisioned Throughput. For custom models, you can purchase on-demand Provisioned Throughput by omitting this argument. Valid values: `OneMonth`, `SixMonths`. * `model_arn` - (Required) ARN of the model to associate with this Provisioned Throughput. * `model_units` - (Required) Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. @@ -59,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bedrock_provisioned_model_throughput.example + identity = { + "arn" = "arn:aws:bedrock:us-west-2:123456789012:provisioned-model/a1b2c3d4567890ab" + } +} + +resource "aws_bedrock_provisioned_model_throughput" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Bedrock provisioned model throughput. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Provisioned Throughput using the `provisioned_model_arn`. For example: ```python @@ -82,4 +104,4 @@ Using `terraform import`, import Provisioned Throughput using the `provisioned_m % terraform import aws_bedrock_provisioned_model_throughput.example arn:aws:bedrock:us-west-2:123456789012:provisioned-model/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown index 8fe54812c9b9..98d367f325e8 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown @@ -43,7 +43,7 @@ class MyConvertedCode(TerraformStack): example_agent_permissions = DataAwsIamPolicyDocument(self, "example_agent_permissions", statement=[DataAwsIamPolicyDocumentStatement( actions=["bedrock:InvokeModel"], - resources=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.name + "}::foundation-model/anthropic.claude-v2" + resources=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.region + "}::foundation-model/anthropic.claude-v2" ] ) ] @@ -57,7 +57,7 @@ class MyConvertedCode(TerraformStack): variable="aws:SourceAccount" ), DataAwsIamPolicyDocumentStatementCondition( test="ArnLike", - values=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:agent/*" + values=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:agent/*" ], variable="AWS:SourceArn" ) @@ -100,6 +100,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agent_collaboration` - (Optional) Agents collaboration role. Valid values: `SUPERVISOR`, `SUPERVISOR_ROUTER`, `DISABLED`. * `customer_encryption_key_arn` - (Optional) ARN of the AWS KMS key that encrypts the agent. * `description` - (Optional) Description of the agent. @@ -198,4 +199,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent using the agent % terraform import aws_bedrockagent_agent.example GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown index 91d0b3e5ab2e..338720f86cc2 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown @@ -167,6 +167,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `action_group_state` - (Optional) Whether the action group is available for the agent to invoke or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request. Valid values: `ENABLED`, `DISABLED`. * `api_schema` - (Optional) Either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html). See [`api_schema` Block](#api_schema-block) for details. * `description` - (Optional) Description of the action group. @@ -249,6 +250,7 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `30m`) * `update` - (Default `30m`) +* `delete` - (Default `30m`) ## Import @@ -275,4 +277,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Action Group th % terraform import aws_bedrockagent_agent_action_group.example MMAUDBZTH4,GGRRAED6JP,DRAFT ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown index 40212676f958..a51dd80ae47d 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown @@ -44,7 +44,7 @@ class MyConvertedCode(TerraformStack): example_agent_permissions = DataAwsIamPolicyDocument(self, "example_agent_permissions", statement=[DataAwsIamPolicyDocumentStatement( actions=["bedrock:InvokeModel"], - resources=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.name + "}::foundation-model/anthropic.claude-v2" + resources=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.region + "}::foundation-model/anthropic.claude-v2" ] ) ] @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): variable="aws:SourceAccount" ), DataAwsIamPolicyDocumentStatementCondition( test="ArnLike", - values=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:agent/*" + values=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:agent/*" ], variable="AWS:SourceArn" ) @@ -107,6 +107,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the alias. * `routing_configuration` - (Optional) Details about the routing configuration of the alias. See [`routing_configuration` Block](#routing_configuration-block) for details. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -160,4 +161,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Alias using the % terraform import aws_bedrockagent_agent_alias.example 66IVY0GUTF,GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_collaborator.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_collaborator.html.markdown index 91f437e647f4..e549329675ef 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_collaborator.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_collaborator.html.markdown @@ -45,11 +45,11 @@ class MyConvertedCode(TerraformStack): example_agent_permissions = DataAwsIamPolicyDocument(self, "example_agent_permissions", statement=[DataAwsIamPolicyDocumentStatement( actions=["bedrock:InvokeModel"], - resources=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.name + "}::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0" + resources=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.region + "}::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0" ] ), DataAwsIamPolicyDocumentStatement( actions=["bedrock:GetAgentAlias", "bedrock:InvokeAgent"], - resources=["arn:${" + current_agent.partition + "}:bedrock:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:agent/*", "arn:${" + current_agent.partition + "}:bedrock:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:agent-alias/*" + resources=["arn:${" + current_agent.partition + "}:bedrock:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:agent/*", "arn:${" + current_agent.partition + "}:bedrock:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:agent-alias/*" ] ) ] @@ -63,7 +63,7 @@ class MyConvertedCode(TerraformStack): variable="aws:SourceAccount" ), DataAwsIamPolicyDocumentStatementCondition( test="ArnLike", - values=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:agent/*" + values=["arn:${" + data_aws_partition_current.partition + "}:bedrock:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:agent/*" ], variable="AWS:SourceArn" ) @@ -130,10 +130,11 @@ The following arguments are required: * `agent_id` - (Required) ID if the agent to associate the collaborator. * `collaboration_instruction` - (Required) Instruction to give the collaborator. -* `collbaorator_name` - (Required) Name of this collaborator. +* `collaborator_name` - (Required) Name of this collaborator. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prepare_agent` (Optional) Whether to prepare the agent after creation or modification. Defaults to `true`. * `relay_conversation_history` - (Optional) Configure relaying the history to the collaborator. @@ -182,4 +183,4 @@ Using `terraform import`, import Bedrock Agents Agent Collaborator using a comma % terraform import aws_bedrockagent_agent_collaborator.example 9LSJO0BFI8,DRAFT,AG3TN4RQIY ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown index 1cf7d3bc6038..591872538860 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agent_version` - (Optional, Forces new resource) Version of the agent with which you want to associate the knowledge base. Valid values: `DRAFT`. ## Attribute Reference @@ -60,6 +61,7 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `5m`) * `update` - (Default `5m`) +* `delete` - (Default `5m`) ## Import @@ -86,4 +88,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Knowledge Base % terraform import aws_bedrockagent_agent_knowledge_base_association.example GGRRAED6JP,DRAFT,EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown b/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown index 95f257852ecb..fe8af993826e 100644 --- a/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data_deletion_policy` - (Optional) Data deletion policy for a data source. Valid values: `RETAIN`, `DELETE`. * `description` - (Optional) Description of the data source. * `server_side_encryption_configuration` - (Optional) Details about the configuration of the server-side encryption. See [`server_side_encryption_configuration` block](#server_side_encryption_configuration-block) for details. @@ -351,4 +352,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Data Source using the [3]: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_SharePointDataSourceConfiguration.html [4]: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_WebDataSourceConfiguration.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_flow.html.markdown b/website/docs/cdktf/python/r/bedrockagent_flow.html.markdown new file mode 100644 index 000000000000..c6cb452e6471 --- /dev/null +++ b/website/docs/cdktf/python/r/bedrockagent_flow.html.markdown @@ -0,0 +1,434 @@ +--- +subcategory: "Bedrock Agents" +layout: "aws" +page_title: "AWS: aws_bedrockagent_flow" +description: |- + Terraform resource for managing an AWS Bedrock Agents Flow. +--- + + + +# Resource: aws_bedrockagent_flow + +Terraform resource for managing an AWS Bedrock Agents Flow. + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.bedrockagent_flow import BedrockagentFlow +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + BedrockagentFlow(self, "example", + execution_role_arn=Token.as_string(aws_iam_role_example.arn), + name="example-flow" + ) +``` + +## Example Usage + +The default definition: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.bedrockagent_flow import BedrockagentFlow +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + BedrockagentFlow(self, "example", + definition=[BedrockagentFlowDefinition( + connection={ + "configuration": [{ + "data": [{ + "source_output": "document", + "target_input": "topic" + } + ] + } + ], + "name": "FlowInputNodeFlowInputNode0ToPrompt_1PromptsNode0", + "source": "FlowInputNode", + "target": "Prompt_1", + "type": "Data" + }, + node_attribute=[BedrockagentFlowDefinitionNode( + configuration=[BedrockagentFlowDefinitionNodeConfiguration( + input=[BedrockagentFlowDefinitionNodeConfigurationInput()] + ) + ], + name="FlowInputNode", + output=[BedrockagentFlowDefinitionNodeOutput( + name="document", + type="String" + ) + ], + type="Input" + ), BedrockagentFlowDefinitionNode( + configuration=[BedrockagentFlowDefinitionNodeConfiguration( + prompt=[BedrockagentFlowDefinitionNodeConfigurationPrompt( + source_configuration=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfiguration( + inline=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfigurationInline( + inference_configuration=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfigurationInlineInferenceConfiguration( + text=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfigurationInlineInferenceConfigurationText( + max_tokens=2048, + stop_sequences=["User:"], + temperature=0, + top_p=0.8999999761581421 + ) + ] + ) + ], + model_id="amazon.titan-text-express-v1", + template_configuration=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfigurationInlineTemplateConfiguration( + text=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfigurationInlineTemplateConfigurationText( + input_variable=[BedrockagentFlowDefinitionNodeConfigurationPromptSourceConfigurationInlineTemplateConfigurationTextInputVariable( + name="topic" + ) + ], + text="Write a paragraph about {{topic}}." + ) + ] + ) + ], + template_type="TEXT" + ) + ] + ) + ] + ) + ] + ) + ], + input=[BedrockagentFlowDefinitionNodeInput( + expression="$.data", + name="topic", + type="String" + ) + ], + name="Prompt_1", + output=[BedrockagentFlowDefinitionNodeOutput( + name="modelCompletion", + type="String" + ) + ], + type="Prompt" + ), BedrockagentFlowDefinitionNode( + configuration=[BedrockagentFlowDefinitionNodeConfiguration( + output=[BedrockagentFlowDefinitionNodeConfigurationOutput()] + ) + ], + input=[BedrockagentFlowDefinitionNodeInput( + expression="$.data", + name="document", + type="String" + ) + ], + name="FlowOutputNode", + type="Output" + ) + ] + ) + ], + execution_role_arn=Token.as_string(aws_iam_role_example.arn), + name="example" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the flow. +* `execution_role_arn` - (Required) The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see [Create a service role for flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-permissions.html) in the Amazon Bedrock User Guide. + +The following arguments are optional: + +* `description` - (Optional) A description for the flow. +* `customer_encryption_key_arn` - (Optional) The Amazon Resource Name (ARN) of the KMS key to encrypt the flow. +* `definition` - (Optional) A definition of the nodes and connections between nodes in the flow. See [Definition](#definition) for more information. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Definition + +* `connection` - (Optional) A list of connection definitions in the flow. See [Connection](#connection) for more information. +* `node` - (Optional) A list of node definitions in the flow. See [Node](#node) for more information. + +### Connection + +* `name` - (Required) A name for the connection that you can reference. +* `source` - (Required) The node that the connection starts at. +* `target` - (Required) The node that the connection ends at. +* `type` - (Required) Whether the source node that the connection begins from is a condition node `Conditional` or not `Data`. +* `configuration` - (Required) Configuration of the connection. See [Connection Configuration](#connection-configuration) for more information. + +### Connection Configuration + +* `data` - (Optional) The configuration of a connection originating from a node that isn’t a Condition node. See [Data Connection Configuration](#data-connection-configuration) for more information. +* `conditional` - (Optional) The configuration of a connection originating from a Condition node. See [Conditional Connection Configuration](#conditional-connection-configuration) for more information. + +#### Data Connection Configuration + +* `source_output` - (Required) The name of the output in the source node that the connection begins from. +* `target_input` - (Required) The name of the input in the target node that the connection ends at. + +#### Conditional Connection Configuration + +* `condition` - (Required) The condition that triggers this connection. For more information about how to write conditions, see the Condition node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide. + +### Node + +* `name` - (Required) A name for the node. +* `type` - (Required) The type of node. This value must match the name of the key that you provide in the configuration. Valid values: `Agent`, `Collector`, `Condition`, `Input`, `Iterator`, `KnowledgeBase`, `LambdaFunction`, `Lex`, `Output`, `Prompt`, `Retrieval`, `Storage` +* `configuration` - (Required) Contains configurations for the node. See [Node Configuration](#node-configuration) for more information. +* `input` - (Optional) A list of objects containing information about an input into the node. See [Node Input](#node-input) for more information. +* `output` - (Optional) A list of objects containing information about an output from the node. See [Node Output](#node-output) for more information. + +### Node Input + +* `name` - (Required) A name for the input that you can reference. +* `type` - (Required) The data type of the input. If the input doesn’t match this type at runtime, a validation error will be thrown. +* `expression` - (Required) An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html). +* `category` - (Optional) How input data flows between iterations in a DoWhile loop. + +### Node Output + +* `name` - (Required) A name for the output that you can reference. +* `type` - (Required) The data type of the output. If the output doesn’t match this type at runtime, a validation error will be thrown. + +### Node Configuration + +* `agent` - (Optional) Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response. See [Agent Node Configuration](#agent-node-configuration) for more information. +* `collector` - (Optional) Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs. This object has no fields. +* `condition` - (Optional) Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow. See [Condition Node Configuration](#condition-node-configuration) for more information. +* `inline_code` - (Optional) Contains configurations for an inline code node in your flow. See [Inline Code Node Configuration](#inline-code-node-configuration) for more information. +* `input` - (Optional) Contains configurations for an input flow node in your flow. The node `inputs` can’t be specified for this node. This object has no fields. +* `iterator` - (Optional) Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output. The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node. This object has no fields. +* `knowledge_base` - (Optional) Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response. See [Knowledge Base Node Configuration](#knowledge-base-node-configuration) for more information. +* `lambda_function` - (Optional) Contains configurations for a Lambda function node in your flow. Invokes a Lambda function. See [Lambda Function Node Configuration](#lambda-function-node-configuration) for more information. +* `lex` - (Optional) Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output. See [Lex Node Configuration](#lex-node-configuration) for more information. +* `output` - (Optional) Contains configurations for an output flow node in your flow. The node `outputs` can’t be specified for this node. This object has no fields. +* `prompt` - (Optional) Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node. See [Prompt Node Configuration](#prompt-node-configuration) for more information. +* `retrieval` - (Optional) Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output. See [Retrieval Node Configuration](#retrieval-node-configuration) for more information. +* `storage` - (Optional) Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. See [Storage Node Configuration](#storage-node-configuration) for more information. + +### Agent Node Configuration + +* `agent_alias_arn` - (Required) The Amazon Resource Name (ARN) of the alias of the agent to invoke. + +### Condition Node Configuration + +* `condition` - (Optional) A list of conditions. See [Condition Config](#condition-config) for more information. + +#### Condition Config + +* `name` - (Required) A name for the condition that you can reference. +* `expression` - (Optional) Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes). + +### Inline Code Node Configuration + +* `code` - (Required) The code that's executed in your inline code node. +* `language` - (Required) The programming language used by your inline code node. + +### Knowledge Base Node Configuration + +* `knowledge_base_id` - (Required) The unique identifier of the knowledge base to query. +* `model_id` - (Required) The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. +* `guardrail_configuration` - (Required) Contains configurations for a guardrail to apply during query and response generation for the knowledge base in this configuration. See [Guardrail Configuration](#guardrail-configuration) for more information. + +#### Guardrail Configuration + +* `guardrail_identifier` - (Required) The unique identifier of the guardrail. +* `guardrail_version` - (Required) The version of the guardrail. + +### Lambda Function Node Configuration + +* `lambda_arn` - (Required) The Amazon Resource Name (ARN) of the Lambda function to invoke. + +### Lex Node Configuration + +* `bot_alias_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke. +* `locale_id` - (Required) The Region to invoke the Amazon Lex bot in + +### Prompt Node Configuration + +* `resource` - (Optional) Contains configurations for a prompt from Prompt management. See [Prompt Resource Configuration](#prompt-resource-configuration) for more information. +* `inline` - (Optional) Contains configurations for a prompt that is defined inline. See [Prompt Inline Configuration](#prompt-inline-configuration) for more information. + +#### Prompt Resource Configuration + +* `prompt_arn` - (Required) The Amazon Resource Name (ARN) of the prompt from Prompt management. + +#### Prompt Inline Configuration + +* `additional_model_request_fields` - (Optional) Additional fields to be included in the model request for the Prompt node. +* `inference_configuration` - (Optional) Contains inference configurations for the prompt. See [Prompt Inference Configuration](#prompt-inference-configuration) for more information. +* `model_id` - (Required) The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to run inference with. +* `template_type` - (Required) The type of prompt template. Valid values: `TEXT`, `CHAT`. +* `template_configuration` - (Required) Contains a prompt and variables in the prompt that can be replaced with values at runtime. See [Prompt Template Configuration](#prompt-template-configuration) for more information. + +#### Prompt Inference Configuration + +* `text` - (Optional) Contains inference configurations for a text prompt. See [Text Inference Configuration](#text-inference-configuration) for more information. + +#### Text Inference Configuration + +* `max_tokens` - (Optional) Maximum number of tokens to return in the response. +* `stop_sequences` - (Optional) List of strings that define sequences after which the model will stop generating. +* `temperature` - (Optional) Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs. +* `top_p` - (Optional) Percentage of most-likely candidates that the model considers for the next token. + +#### Prompt Template Configuration + +* `text` - (Optional) Contains configurations for the text in a message for a prompt. See [Text Template Configuration](#text-template-configuration) +* `chat` - (Optional) Contains configurations to use the prompt in a conversational format. See [Chat Template Configuration](#chat-template-configuration) for more information. + +#### Text Template Configuration + +* `text` - (Required) The message for the prompt. +* `input_variable` - (Optional) A list of variables in the prompt template. See [Input Variable](#input-variable) for more information. +* `cache_point` - (Optional) A cache checkpoint within a template configuration. See [Cache Point](#cache-point) for more information. + +#### Chat Template Configuration + +* `input_variable` - (Optional) A list of variables in the prompt template. See [Input Variable](#input-variable) for more information. +* `message` - (Optional) A list of messages in the chat for the prompt. See [Message](#message) for more information. +* `system` - (Optional) A list of system prompts to provide context to the model or to describe how it should behave. See [System](#system) for more information. +* `tool_configuration` - (Optional) Configuration information for the tools that the model can use when generating a response. See [Tool Configuration](#tool-configuration) for more information. + +#### Message + +* `role` - (Required) The role that the message belongs to. +* `content` - (Required) Contains the content for the message you pass to, or receive from a model. See [Message Content] for more information. + +#### Message Content + +* `cache_point` - (Optional) Creates a cache checkpoint within a message. See [Cache Point](#cache-point) for more information. +* `text` - (Optional) The text in the message. + +#### System + +* `cache_point` - (Optional) Creates a cache checkpoint within a tool designation. See [Cache Point](#cache-point) for more information. +* `text` - (Optional) The text in the system prompt. + +#### Tool Configuration + +* `tool_choice` - (Optional) Defines which tools the model should request when invoked. See [Tool Choice](#tool-choice) for more information. +* `tool` - (Optional) A list of tools to pass to a model. See [Tool](#tool) for more information. + +#### Tool Choice + +* `any` - (Optional) Defines tools, at least one of which must be requested by the model. No text is generated but the results of tool use are sent back to the model to help generate a response. This object has no fields. +* `auto` - (Optional) Defines tools. The model automatically decides whether to call a tool or to generate text instead. This object has no fields. +* `tool` - (Optional) Defines a specific tool that the model must request. No text is generated but the results of tool use are sent back to the model to help generate a response. See [Named Tool](#named-tool) for more information. + +#### Named Tool + +* `name` - (Required) The name of the tool. + +#### Tool + +* `cache_point` - (Optional) Creates a cache checkpoint within a tool designation. See [Cache Point](#cache-point) for more information. +* `tool_spec` - (Optional) The specification for the tool. See [Tool Specification](#tool-specification) for more information. + +#### Tool Specification + +* `name` - (Required) The name of the tool. +* `description` - (Optional) The description of the tool. +* `input_schema` - (Optional) The input schema of the tool. See [Tool Input Schema](#tool-input-schema) for more information. + +#### Tool Input Schema + +* `json` - (Optional) A JSON object defining the input schema for the tool. + +#### Input Variable + +* `name` - (Required) The name of the variable. + +#### Cache Point + +* `type` - (Required) Indicates that the CachePointBlock is of the default type. Valid values: `default`. + +### Retrieval Node Configuration + +* `service_configuration` - (Required) Contains configurations for the service to use for retrieving data to return as the output from the node. See [Retrieval Service Configuration](#retrieval-service-configuration) for more information. + +#### Retrieval Service Configuration + +* `s3` - (Optional) Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node. See [Retrieval S3 Service Configuration](#retrieval-s3-service-configuration) for more information. + +#### Retrieval S3 Service Configuration + +* `bucket_name` - (Required) The name of the Amazon S3 bucket from which to retrieve data. + +### Storage Node Configuration + +* `service_configuration` - (Required) Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. See [Storage Service Configuration](#storage-service-configuration) for more information. + +#### Storage Service Configuration + +* `s3` - (Optional) Contains configurations for the service to use for storing the input into the node. See [Storage S3 Service Configuration](#storage-s3-service-configuration) for more information. + +#### Storage S3 Service Configuration + +* `bucket_name` - (Required) The name of the Amazon S3 bucket in which to store the input into the node. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) of the flow. +* `id` - The unique identifier of the flow. +* `created_at` - The time at which the flow was created. +* `updated_at` - The time at which the flow was last updated. +* `version` - The version of the flow. +* `status` - The status of the flow. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Agents Flow using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.bedrockagent_flow import BedrockagentFlow +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + BedrockagentFlow.generate_config_for_import(self, "example", "ABCDEFGHIJ") +``` + +Using `terraform import`, import Bedrock Agents Flow using the `id`. For example: + +```console +% terraform import aws_bedrockagent_flow.example ABCDEFGHIJ +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown b/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown index 4008122704c4..8ce28e694607 100644 --- a/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown @@ -76,24 +76,24 @@ class MyConvertedCode(TerraformStack): type="VECTOR", vector_knowledge_base_configuration=[BedrockagentKnowledgeBaseKnowledgeBaseConfigurationVectorKnowledgeBaseConfiguration( embedding_model_arn="arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-text-v2:0", - embedding_model_configuration=[{ - "bedrock_embedding_model_configuration": [{ - "dimensions": 1024, - "embedding_data_type": "FLOAT32" - } + embedding_model_configuration=[BedrockagentKnowledgeBaseKnowledgeBaseConfigurationVectorKnowledgeBaseConfigurationEmbeddingModelConfiguration( + bedrock_embedding_model_configuration=[BedrockagentKnowledgeBaseKnowledgeBaseConfigurationVectorKnowledgeBaseConfigurationEmbeddingModelConfigurationBedrockEmbeddingModelConfiguration( + dimensions=1024, + embedding_data_type="FLOAT32" + ) ] - } + ) ], - supplemental_data_storage_configuration=[{ - "storage_location": [{ - "s3_location": [{ - "uri": "s3://my-bucket/chunk-processor/" - } + supplemental_data_storage_configuration=[BedrockagentKnowledgeBaseKnowledgeBaseConfigurationVectorKnowledgeBaseConfigurationSupplementalDataStorageConfiguration( + storage_location=[BedrockagentKnowledgeBaseKnowledgeBaseConfigurationVectorKnowledgeBaseConfigurationSupplementalDataStorageConfigurationStorageLocation( + s3_location=[BedrockagentKnowledgeBaseKnowledgeBaseConfigurationVectorKnowledgeBaseConfigurationSupplementalDataStorageConfigurationStorageLocationS3Location( + uri="s3://my-bucket/chunk-processor/" + ) ], - "type": "S3" - } + type="S3" + ) ] - } + ) ] ) ] @@ -130,6 +130,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the knowledge base. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -281,4 +282,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Knowledge Base using % terraform import aws_bedrockagent_knowledge_base.example EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_prompt.html.markdown b/website/docs/cdktf/python/r/bedrockagent_prompt.html.markdown index 272b9fbc5c4c..655496e756db 100644 --- a/website/docs/cdktf/python/r/bedrockagent_prompt.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_prompt.html.markdown @@ -88,6 +88,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the prompt. * `default_variant` - (Optional) Name of the default variant for your prompt. * `customer_encryption_key_arn` - (Optional) Amazon Resource Name (ARN) of the KMS key that you encrypted the prompt with. @@ -236,4 +237,4 @@ Using `terraform import`, import Bedrock Agents Prompt using the `id`. For examp % terraform import aws_bedrockagent_prompt.example 1A2BC3DEFG ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/budgets_budget.html.markdown b/website/docs/cdktf/python/r/budgets_budget.html.markdown index c76cbb94df9a..3307a89c93be 100644 --- a/website/docs/cdktf/python/r/budgets_budget.html.markdown +++ b/website/docs/cdktf/python/r/budgets_budget.html.markdown @@ -263,6 +263,7 @@ The following arguments are optional: * `account_id` - (Optional) The ID of the target account for budget. Will use current user's account_id by default if omitted. * `auto_adjust_data` - (Optional) Object containing [AutoAdjustData](#auto-adjust-data) which determines the budget amount for an auto-adjusting budget. +* `billing_view_arn` - (Optional) ARN of the billing view. * `cost_filter` - (Optional) A list of [CostFilter](#cost-filter) name/values pair to apply to budget. * `cost_types` - (Optional) Object containing [CostTypes](#cost-types) The types of cost included in a budget, such as tax and subscriptions. * `limit_amount` - (Optional) The amount of cost or usage being measured for a budget. @@ -370,4 +371,4 @@ Using `terraform import`, import budgets using `AccountID:BudgetName`. For examp % terraform import aws_budgets_budget.myBudget 123456789012:myBudget ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ce_anomaly_monitor.html.markdown b/website/docs/cdktf/python/r/ce_anomaly_monitor.html.markdown index da61524c1f6a..fa24f39cdbe3 100644 --- a/website/docs/cdktf/python/r/ce_anomaly_monitor.html.markdown +++ b/website/docs/cdktf/python/r/ce_anomaly_monitor.html.markdown @@ -90,6 +90,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_anomaly_monitor.example + identity = { + "arn" = "arn:aws:ce::123456789012:anomalymonitor/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_anomaly_monitor" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer anomaly monitor. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_anomaly_monitor` using the `id`. For example: ```python @@ -113,4 +134,4 @@ Using `terraform import`, import `aws_ce_anomaly_monitor` using the `id`. For ex % terraform import aws_ce_anomaly_monitor.example costAnomalyMonitorARN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ce_anomaly_subscription.html.markdown b/website/docs/cdktf/python/r/ce_anomaly_subscription.html.markdown index 3f47e2bfb32b..32ef38541d45 100644 --- a/website/docs/cdktf/python/r/ce_anomaly_subscription.html.markdown +++ b/website/docs/cdktf/python/r/ce_anomaly_subscription.html.markdown @@ -258,6 +258,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_anomaly_subscription.example + identity = { + "arn" = "arn:aws:ce::123456789012:anomalysubscription/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_anomaly_subscription" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer anomaly subscription. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_anomaly_subscription` using the `id`. For example: ```python @@ -281,4 +302,4 @@ Using `terraform import`, import `aws_ce_anomaly_subscription` using the `id`. F % terraform import aws_ce_anomaly_subscription.example AnomalySubscriptionARN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ce_cost_category.html.markdown b/website/docs/cdktf/python/r/ce_cost_category.html.markdown index 44cd8890c1aa..a2973e98db8b 100644 --- a/website/docs/cdktf/python/r/ce_cost_category.html.markdown +++ b/website/docs/cdktf/python/r/ce_cost_category.html.markdown @@ -138,6 +138,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_cost_category.example + identity = { + "arn" = "arn:aws:ce::123456789012:costcategory/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_cost_category" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer cost category. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_cost_category` using the id. For example: ```python @@ -161,4 +182,4 @@ Using `terraform import`, import `aws_ce_cost_category` using the id. For exampl % terraform import aws_ce_cost_category.example costCategoryARN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chatbot_slack_channel_configuration.html.markdown b/website/docs/cdktf/python/r/chatbot_slack_channel_configuration.html.markdown index d1dc24b3d564..c517afd32b80 100644 --- a/website/docs/cdktf/python/r/chatbot_slack_channel_configuration.html.markdown +++ b/website/docs/cdktf/python/r/chatbot_slack_channel_configuration.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `guardrail_policy_arns` - (Optional) List of IAM policy ARNs that are applied as channel guardrails. The AWS managed `AdministratorAccess` policy is applied by default if this is not set. * `logging_level` - (Optional) Logging levels include `ERROR`, `INFO`, or `NONE`. * `sns_topic_arns` - (Optional) ARNs of the SNS topics that deliver notifications to AWS Chatbot. @@ -98,4 +99,4 @@ Using `terraform import`, import Chatbot Slack Channel Configuration using the ` % terraform import aws_chatbot_slack_channel_configuration.example arn:aws:chatbot::123456789012:chat-configuration/slack-channel/min-slaka-kanal ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chatbot_teams_channel_configuration.html.markdown b/website/docs/cdktf/python/r/chatbot_teams_channel_configuration.html.markdown index d07a4803ff85..d77d66e4b772 100644 --- a/website/docs/cdktf/python/r/chatbot_teams_channel_configuration.html.markdown +++ b/website/docs/cdktf/python/r/chatbot_teams_channel_configuration.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `channel_name` - (Optional) Name of the Microsoft Teams channel. * `guardrail_policy_arns` - (Optional) List of IAM policy ARNs that are applied as channel guardrails. The AWS managed `AdministratorAccess` policy is applied by default if this is not set. * `logging_level` - (Optional) Logging levels include `ERROR`, `INFO`, or `NONE`. @@ -102,4 +103,4 @@ Using `terraform import`, import Chatbot Microsoft Teams Channel Configuration u % terraform import aws_chatbot_teams_channel_configuration.example 5f4f15d2-b958-522a-8333-124aa8bf0925 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector.html.markdown index 210110e47ec5..163f537f7fb3 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `aws_region` - (Optional) The AWS Region in which the Amazon Chime Voice Connector is created. Default value: `us-east-1` * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -78,4 +79,4 @@ Using `terraform import`, import Configuration Recorder using the name. For exam % terraform import aws_chime_voice_connector.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector_group.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector_group.html.markdown index 222b165bed4d..328ec145cde7 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector_group.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector_group.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Amazon Chime Voice Connector group. * `connector` - (Optional) The Amazon Chime Voice Connectors to route inbound calls to. @@ -97,4 +98,4 @@ Using `terraform import`, import Configuration Recorder using the name. For exam % terraform import aws_chime_voice_connector_group.default example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector_logging.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector_logging.html.markdown index 94d45b816e81..f1e89dd3efb1 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector_logging.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector_logging.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voice_connector_id` - (Required) The Amazon Chime Voice Connector ID. * `enable_sip_logs` - (Optional) When true, enables SIP message logs for sending to Amazon CloudWatch Logs. * `enable_media_metric_logs` - (Optional) When true, enables logging of detailed media metrics for Voice Connectors to Amazon CloudWatch logs. @@ -79,4 +80,4 @@ Using `terraform import`, import Chime Voice Connector Logging using the `voice_ % terraform import aws_chime_voice_connector_logging.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector_origination.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector_origination.html.markdown index 7004c17d0669..2d0fb4e5e977 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector_origination.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector_origination.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voice_connector_id` - (Required) The Amazon Chime Voice Connector ID. * `route` - (Required) Set of call distribution properties defined for your SIP hosts. See [route](#route) below for more details. Minimum of 1. Maximum of 20. * `disabled` - (Optional) When origination settings are disabled, inbound calls are not enabled for your Amazon Chime Voice Connector. @@ -103,4 +104,4 @@ Using `terraform import`, import Chime Voice Connector Origination using the `vo % terraform import aws_chime_voice_connector_origination.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector_streaming.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector_streaming.html.markdown index 533cfbbeb20c..fa8be5a6bc57 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector_streaming.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector_streaming.html.markdown @@ -126,6 +126,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voice_connector_id` - (Required) The Amazon Chime Voice Connector ID. * `data_retention` - (Required) The retention period, in hours, for the Amazon Kinesis data. * `disabled` - (Optional) When true, media streaming to Amazon Kinesis is turned off. Default: `false` @@ -168,4 +169,4 @@ Using `terraform import`, import Chime Voice Connector Streaming using the `voic % terraform import aws_chime_voice_connector_streaming.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector_termination.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector_termination.html.markdown index 817269f149f6..a3b47d00688e 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector_termination.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector_termination.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voice_connector_id` - (Required) The Amazon Chime Voice Connector ID. * `cidr_allow_list` - (Required) The IP addresses allowed to make calls, in CIDR format. * `calling_regions` - (Required) The countries to which calls are allowed, in ISO 3166-1 alpha-2 format. @@ -85,4 +86,4 @@ Using `terraform import`, import Chime Voice Connector Termination using the `vo % terraform import aws_chime_voice_connector_termination.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chime_voice_connector_termination_credentials.html.markdown b/website/docs/cdktf/python/r/chime_voice_connector_termination_credentials.html.markdown index bd081ac48335..64f6fdbb69f0 100644 --- a/website/docs/cdktf/python/r/chime_voice_connector_termination_credentials.html.markdown +++ b/website/docs/cdktf/python/r/chime_voice_connector_termination_credentials.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voice_connector_id` - (Required) Amazon Chime Voice Connector ID. * `credentials` - (Required) List of termination SIP credentials. @@ -103,4 +104,4 @@ Using `terraform import`, import Chime Voice Connector Termination Credentials u % terraform import aws_chime_voice_connector_termination_credentials.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown b/website/docs/cdktf/python/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown index 95a6c56b8dd0..905046bab590 100644 --- a/website/docs/cdktf/python/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown +++ b/website/docs/cdktf/python/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown @@ -319,6 +319,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Configuration name. * `resource_access_role_arn` - (Required) ARN of IAM Role used by service to invoke processors and sinks specified by configuration elements. * `elements` - (Required) Collection of processors and sinks to transform media and deliver data. @@ -406,6 +407,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_chimesdkmediapipelines_media_insights_pipeline_configuration.example + identity = { + "arn" = "arn:aws:chime:us-east-1:123456789012:media-insights-pipeline-configuration/example-config" + } +} + +resource "aws_chimesdkmediapipelines_media_insights_pipeline_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Chime SDK media insights pipeline configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Chime SDK Media Pipelines Media Insights Pipeline Configuration using the `id`. For example: ```python @@ -429,4 +451,4 @@ Using `terraform import`, import Chime SDK Media Pipelines Media Insights Pipeli % terraform import aws_chimesdkmediapipelines_media_insights_pipeline_configuration.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chimesdkvoice_sip_media_application.html.markdown b/website/docs/cdktf/python/r/chimesdkvoice_sip_media_application.html.markdown index a5ed50234036..2ca12a13a4bf 100644 --- a/website/docs/cdktf/python/r/chimesdkvoice_sip_media_application.html.markdown +++ b/website/docs/cdktf/python/r/chimesdkvoice_sip_media_application.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `endpoints` @@ -88,4 +89,4 @@ Using `terraform import`, import a ChimeSDKVoice SIP Media Application using the % terraform import aws_chimesdkvoice_sip_media_application.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chimesdkvoice_sip_rule.html.markdown b/website/docs/cdktf/python/r/chimesdkvoice_sip_rule.html.markdown index 99ab91026700..2e71f49a4735 100644 --- a/website/docs/cdktf/python/r/chimesdkvoice_sip_rule.html.markdown +++ b/website/docs/cdktf/python/r/chimesdkvoice_sip_rule.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disabled` - (Optional) Enables or disables a rule. You must disable rules before you can delete them. ### `target_applications` @@ -92,4 +93,4 @@ Using `terraform import`, import a ChimeSDKVoice SIP Rule using the `id`. For ex % terraform import aws_chimesdkvoice_sip_rule.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/chimesdkvoice_voice_profile_domain.html.markdown b/website/docs/cdktf/python/r/chimesdkvoice_voice_profile_domain.html.markdown index e9a5612f6b13..d62ec981d2d2 100644 --- a/website/docs/cdktf/python/r/chimesdkvoice_voice_profile_domain.html.markdown +++ b/website/docs/cdktf/python/r/chimesdkvoice_voice_profile_domain.html.markdown @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of Voice Profile Domain. ## Attribute Reference @@ -100,4 +101,4 @@ Using `terraform import`, import AWS Chime SDK Voice Profile Domain using the `i % terraform import aws_chimesdkvoice_voice_profile_domain.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cleanrooms_collaboration.html.markdown b/website/docs/cdktf/python/r/cleanrooms_collaboration.html.markdown index bd7947283cc5..1042dc98bfa5 100644 --- a/website/docs/cdktf/python/r/cleanrooms_collaboration.html.markdown +++ b/website/docs/cdktf/python/r/cleanrooms_collaboration.html.markdown @@ -10,13 +10,11 @@ description: |- # Resource: aws_cleanrooms_collaboration -Provides a AWS Clean Rooms collaboration. All members included in the definition will be invited to -join the collaboration and can create memberships. +Provides a AWS Clean Rooms collaboration. +All members included in the definition will be invited to join the collaboration and can create memberships. ## Example Usage -### Collaboration with tags - ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -30,6 +28,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name, *, memberAbilities): super().__init__(scope, name) CleanroomsCollaboration(self, "test_collaboration", + analytics_engine="SPARK", creator_display_name="Creator ", creator_member_abilities=["CAN_QUERY", "CAN_RECEIVE_RESULTS"], data_encryption_metadata=CleanroomsCollaborationDataEncryptionMetadata( @@ -55,7 +54,7 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: * `name` - (Required) - The name of the collaboration. Collaboration names do not need to be unique. * `description` - (Required) - A description for a collaboration. @@ -63,6 +62,10 @@ This resource supports the following arguments: * `creator_display_name` - (Required - Forces new resource) - The name for the member record for the collaboration creator. * `query_log_status` - (Required - Forces new resource) - Determines if members of the collaboration can enable query logs within their own. emberships. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-queryLogStatus). + +The following arguments are optional: + +* `analytics_engine` - (Optional) Analytics engine used by the collaboration. Valid values are `CLEAN_ROOMS_SQL` (deprecated) and `SPARK`. * `data_encryption_metadata` - (Required - Forces new resource) - a collection of settings which determine how the [c3r client](https://docs.aws.amazon.com/clean-rooms/latest/userguide/crypto-computing.html) will encrypt data for use within this collaboration. * `data_encryption_metadata.allow_clear_text` - (Required - Forces new resource) - Indicates whether encrypted tables can contain cleartext data. This is a boolea field. @@ -76,17 +79,18 @@ or cryptographically processed (false). * `member.account_id` - (Required - Forces new resource) - The account id for the invited member. * `member.display_name` - (Required - Forces new resource) - The display name for the invited member. * `member.member_abilities` - (Required - Forces new resource) - The list of abilities for the invited member. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-creatorMemberAbilities). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) - Key value pairs which tag the collaboration. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The arn of the collaboration. -* `id` - The id of the collaboration. -* `create_time` - The date and time the collaboration was created. +* `arn` - ARN of the collaboration. +* `id` - ID of the collaboration. +* `create_time` - Date and time the collaboration was created. * `member status` - For each member included in the collaboration an additional computed attribute of status is added. These values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_MemberSummary.html#API-Type-MemberSummary-status). -* `updated_time` - The date and time the collaboration was last updated. +* `updated_time` - Date and time the collaboration was last updated. ## Timeouts @@ -121,4 +125,4 @@ Using `terraform import`, import `aws_cleanrooms_collaboration` using the `id`. % terraform import aws_cleanrooms_collaboration.collaboration 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cleanrooms_configured_table.html.markdown b/website/docs/cdktf/python/r/cleanrooms_configured_table.html.markdown index 1746ecd79183..80bf6a60a0cb 100644 --- a/website/docs/cdktf/python/r/cleanrooms_configured_table.html.markdown +++ b/website/docs/cdktf/python/r/cleanrooms_configured_table.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) - The name of the configured table. * `description` - (Optional) - A description for the configured table. * `analysis_method` - (Required) - The analysis method for the configured table. The only valid value is currently `DIRECT_QUERY`. @@ -75,6 +76,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cleanrooms_configured_table.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_cleanrooms_configured_table" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the cleanrooms configured table. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_cleanrooms_configured_table` using the `id`. For example: ```python @@ -98,4 +125,4 @@ Using `terraform import`, import `aws_cleanrooms_configured_table` using the `id % terraform import aws_cleanrooms_configured_table.table 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cleanrooms_membership.html.markdown b/website/docs/cdktf/python/r/cleanrooms_membership.html.markdown index f68a0630e038..00a4e474c34e 100644 --- a/website/docs/cdktf/python/r/cleanrooms_membership.html.markdown +++ b/website/docs/cdktf/python/r/cleanrooms_membership.html.markdown @@ -24,24 +24,24 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import CleanroomsMembership +from imports.aws.cleanrooms_membership import CleanroomsMembership class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) CleanroomsMembership(self, "test_membership", collaboration_id="1234abcd-12ab-34cd-56ef-1234567890ab", - default_result_configuration=[{ - "output_configuration": [{ - "s3": [{ - "bucket": "test-bucket", - "key_prefix": "test-prefix", - "result_format": "PARQUET" - } + default_result_configuration=[CleanroomsMembershipDefaultResultConfiguration( + output_configuration=[CleanroomsMembershipDefaultResultConfigurationOutputConfiguration( + s3=[CleanroomsMembershipDefaultResultConfigurationOutputConfigurationS3( + bucket="test-bucket", + key_prefix="test-prefix", + result_format="PARQUET" + ) ] - } + ) ], - "role_arn": "arn:aws:iam::123456789012:role/role-name" - } + role_arn="arn:aws:iam::123456789012:role/role-name" + ) ], query_log_status="DISABLED", tags={ @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `collaboration_id` - (Required - Forces new resource) - The ID of the collaboration to which the member was invited. * `query_log_status` - (Required) - An indicator as to whether query logging has been enabled or disabled for the membership. * `default_result_configuration` - (Optional) - The default configuration for a query result. @@ -92,7 +93,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import CleanroomsMembership +from imports.aws.cleanrooms_membership import CleanroomsMembership class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -105,4 +106,4 @@ Using `terraform import`, import `aws_cleanrooms_membership` using the `id`. For % terraform import aws_cleanrooms_membership.membership 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloud9_environment_ec2.html.markdown b/website/docs/cdktf/python/r/cloud9_environment_ec2.html.markdown index 28cf6cf5cb4a..85d14bb36e13 100644 --- a/website/docs/cdktf/python/r/cloud9_environment_ec2.html.markdown +++ b/website/docs/cdktf/python/r/cloud9_environment_ec2.html.markdown @@ -108,6 +108,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the environment. * `instance_type` - (Required) The type of instance to connect to the environment, e.g., `t2.micro`. * `image_id` - (Required) The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. Valid values are @@ -135,4 +136,4 @@ This resource exports the following attributes in addition to the arguments abov * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `type` - The type of the environment (e.g., `ssh` or `ec2`). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloud9_environment_membership.html.markdown b/website/docs/cdktf/python/r/cloud9_environment_membership.html.markdown index 282d43498dc5..94ccb891f12b 100644 --- a/website/docs/cdktf/python/r/cloud9_environment_membership.html.markdown +++ b/website/docs/cdktf/python/r/cloud9_environment_membership.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `environment_id` - (Required) The ID of the environment that contains the environment member you want to add. * `permissions` - (Required) The type of environment member permissions you want to associate with this environment member. Allowed values are `read-only` and `read-write` . * `user_arn` - (Required) The Amazon Resource Name (ARN) of the environment member you want to add. @@ -87,4 +88,4 @@ Using `terraform import`, import Cloud9 environment membership using the `enviro % terraform import aws_cloud9_environment_membership.test environment-id#user-arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudcontrolapi_resource.html.markdown b/website/docs/cdktf/python/r/cloudcontrolapi_resource.html.markdown index 4caf3cd7b7d2..1ea9232fd5f2 100644 --- a/website/docs/cdktf/python/r/cloudcontrolapi_resource.html.markdown +++ b/website/docs/cdktf/python/r/cloudcontrolapi_resource.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `role_arn` - (Optional) Amazon Resource Name (ARN) of the IAM Role to assume for operations. * `schema` - (Optional) JSON string of the CloudFormation resource type schema which is used for plan time validation where possible. Automatically fetched if not provided. In large scale environments with multiple resources using the same `type_name`, it is recommended to fetch the schema once via the [`aws_cloudformation_type` data source](/docs/providers/aws/d/cloudformation_type.html) and use this argument to reduce `DescribeType` API operation throttling. This value is marked sensitive only to prevent large plan differences from showing. * `type_version_id` - (Optional) Identifier of the CloudFormation resource type version. @@ -59,4 +60,4 @@ This resource exports the following attributes in addition to the arguments abov * `properties` - JSON string matching the CloudFormation resource type schema with current configuration. Underlying attributes can be referenced via the [`jsondecode()` function](https://www.terraform.io/docs/language/functions/jsondecode.html), for example, `jsondecode(data.aws_cloudcontrolapi_resource.example.properties)["example"]`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudformation_stack.html.markdown b/website/docs/cdktf/python/r/cloudformation_stack.html.markdown index 8b3e963f8a57..cb3005cfdd59 100644 --- a/website/docs/cdktf/python/r/cloudformation_stack.html.markdown +++ b/website/docs/cdktf/python/r/cloudformation_stack.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Stack name. * `template_body` - (Optional) Structure containing the template body (max size: 51,200 bytes). * `template_url` - (Optional) Location of a file containing the template body (max size: 460,800 bytes). @@ -123,4 +124,4 @@ Using `terraform import`, import Cloudformation Stacks using the `name`. For exa % terraform import aws_cloudformation_stack.stack networking-stack ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudformation_stack_instances.html.markdown b/website/docs/cdktf/python/r/cloudformation_stack_instances.html.markdown index 554b59303a56..ec7acd7ad7ba 100644 --- a/website/docs/cdktf/python/r/cloudformation_stack_instances.html.markdown +++ b/website/docs/cdktf/python/r/cloudformation_stack_instances.html.markdown @@ -129,6 +129,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accounts` - (Optional) Accounts where you want to create stack instances in the specified `regions`. You can specify either `accounts` or `deployment_targets`, but not both. * `deployment_targets` - (Optional) AWS Organizations accounts for which to create stack instances in the `regions`. stack sets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for most of this argument. See [deployment_targets](#deployment_targets) below. * `parameter_overrides` - (Optional) Key-value map of input parameters to override from the stack set for these instances. This argument's drift detection is limited to the first account and region since each instance can have unique parameters. @@ -233,4 +234,4 @@ Using `terraform import`, Import CloudFormation stack instances that target OUs, % terraform import aws_cloudformation_stack_instances.example example,SELF,OU ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudformation_stack_set.html.markdown b/website/docs/cdktf/python/r/cloudformation_stack_set.html.markdown index 9477da1748c8..e6975f8425ee 100644 --- a/website/docs/cdktf/python/r/cloudformation_stack_set.html.markdown +++ b/website/docs/cdktf/python/r/cloudformation_stack_set.html.markdown @@ -108,6 +108,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `administration_role_arn` - (Optional) Amazon Resource Number (ARN) of the IAM Role in the administrator account. This must be defined when using the `SELF_MANAGED` permission model. * `auto_deployment` - (Optional) Configuration block containing the auto-deployment model for your StackSet. This can only be defined when using the `SERVICE_MANAGED` permission model. * `enabled` - (Optional) Whether or not auto-deployment is enabled. @@ -200,4 +201,4 @@ Using `terraform import`, import CloudFormation StackSets when acting a delegate % terraform import aws_cloudformation_stack_set.example example,DELEGATED_ADMIN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown b/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown index 160d38e30d18..3984a24159f9 100644 --- a/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown +++ b/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown @@ -34,7 +34,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) CloudformationStackSetInstance(self, "example", account_id="123456789012", - region="us-east-1", + stack_set_instance_region="us-east-1", stack_set_name=Token.as_string(aws_cloudformation_stack_set_example.name) ) ``` @@ -114,7 +114,7 @@ class MyConvertedCode(TerraformStack): ])) ] ), - region="us-east-1", + stack_set_instance_region="us-east-1", stack_set_name=Token.as_string(aws_cloudformation_stack_set_example.name) ) ``` @@ -125,12 +125,13 @@ This resource supports the following arguments: * `stack_set_name` - (Required) Name of the StackSet. * `account_id` - (Optional) Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. +* `call_as` - (Optional) Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: `SELF` (default), `DELEGATED_ADMIN`. * `deployment_targets` - (Optional) AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. +* `operation_preferences` - (Optional) Preferences for how AWS CloudFormation performs a stack set operation. * `parameter_overrides` - (Optional) Key-value map of input parameters to override from the StackSet for this Instance. -* `region` - (Optional) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. +* `region` - (Optional, **Deprecated**) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. Use `stack_set_instance_region` instead. * `retain_stack` - (Optional) During Terraform resource destroy, remove Instance from StackSet while keeping the Stack and its associated resources. Must be enabled in Terraform state _before_ destroy operation to take effect. You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to `false`. -* `call_as` - (Optional) Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: `SELF` (default), `DELEGATED_ADMIN`. -* `operation_preferences` - (Optional) Preferences for how AWS CloudFormation performs a stack set operation. +* `stack_set_instance_region` - Target AWS Region to create a Stack based on the StackSet. Defaults to current region. ### `deployment_targets` Argument Reference @@ -247,4 +248,4 @@ Using `terraform import`, import CloudFormation StackSet Instances when acting a % terraform import aws_cloudformation_stack_set_instance.example example,ou-sdas-123123123/ou-sdas-789789789,us-east-1,DELEGATED_ADMIN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudformation_type.html.markdown b/website/docs/cdktf/python/r/cloudformation_type.html.markdown index 7394ca47af01..58fe9fe91a71 100644 --- a/website/docs/cdktf/python/r/cloudformation_type.html.markdown +++ b/website/docs/cdktf/python/r/cloudformation_type.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `execution_role_arn` - (Optional) Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials. * `logging_config` - (Optional) Configuration block containing logging configuration. * `schema_handler_package` - (Required) URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`. @@ -102,4 +103,4 @@ Using `terraform import`, import `aws_cloudformation_type` using the type versio % terraform import aws_cloudformation_type.example arn:aws:cloudformation:us-east-1:123456789012:type/resource/ExampleCompany-ExampleService-ExampleType/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudfront_continuous_deployment_policy.html.markdown b/website/docs/cdktf/python/r/cloudfront_continuous_deployment_policy.html.markdown index 5be056d435f2..eefc5d87a2e8 100644 --- a/website/docs/cdktf/python/r/cloudfront_continuous_deployment_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudfront_continuous_deployment_policy.html.markdown @@ -163,8 +163,8 @@ The following arguments are required: ### `session_stickiness_config` -* `idle_ttl` - (Required) The amount of time in seconds after which sessions will cease if no requests are received. Valid values are `300` – `3600` (5–60 minutes). The value must be less than or equal to `maximum_ttl`. -* `maximum_ttl` - (Required) The maximum amount of time in seconds to consider requests from the viewer as being part of the same session. Valid values are `300` – `3600` (5–60 minutes). The value must be greater than or equal to `idle_ttl`. +* `idle_ttl` - (Required) The amount of time in seconds after which sessions will cease if no requests are received. Valid values are `300` - `3600` (5–60 minutes). The value must be less than or equal to `maximum_ttl`. +* `maximum_ttl` - (Required) The maximum amount of time in seconds to consider requests from the viewer as being part of the same session. Valid values are `300` - `3600` (5–60 minutes). The value must be greater than or equal to `idle_ttl`. ## Attribute Reference @@ -200,4 +200,4 @@ Using `terraform import`, import CloudFront Continuous Deployment Policy using t % terraform import aws_cloudfront_continuous_deployment_policy.example abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudfront_distribution.html.markdown b/website/docs/cdktf/python/r/cloudfront_distribution.html.markdown index 5a4037df90e4..42cb8aaed3b3 100644 --- a/website/docs/cdktf/python/r/cloudfront_distribution.html.markdown +++ b/website/docs/cdktf/python/r/cloudfront_distribution.html.markdown @@ -25,30 +25,48 @@ The example below creates a CloudFront distribution with an S3 origin. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformIterator, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # from imports.aws.cloudfront_distribution import CloudfrontDistribution +from imports.aws.cloudfront_origin_access_control import CloudfrontOriginAccessControl +from imports.aws.data_aws_acm_certificate import DataAwsAcmCertificate +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.data_aws_route53_zone import DataAwsRoute53Zone +from imports.aws.route53_record import Route53Record from imports.aws.s3_bucket import S3Bucket -from imports.aws.s3_bucket_acl import S3BucketAcl +from imports.aws.s3_bucket_policy import S3BucketPolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) + my_domain = "mydomain.com" s3_origin_id = "myS3Origin" + default_var = CloudfrontOriginAccessControl(self, "default", + name="default-oac", + origin_access_control_origin_type="s3", + signing_behavior="always", + signing_protocol="sigv4" + ) b = S3Bucket(self, "b", bucket="mybucket", tags={ "Name": "My bucket" } ) - S3BucketAcl(self, "b_acl", - acl="private", - bucket=b.id + data_aws_acm_certificate_my_domain = DataAwsAcmCertificate(self, "my_domain", + domain="*.${" + my_domain + "}", + region="us-east-1", + statuses=["ISSUED"] ) - CloudfrontDistribution(self, "s3_distribution", - aliases=["mysite.example.com", "yoursite.example.com"], + data_aws_route53_zone_my_domain = DataAwsRoute53Zone(self, "my_domain_3", + name=my_domain + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_route53_zone_my_domain.override_logical_id("my_domain") + s3_distribution = CloudfrontDistribution(self, "s3_distribution", + aliases=["mysite.${" + my_domain + "}", "yoursite.${" + my_domain + "}"], comment="Some comment", default_cache_behavior=CloudfrontDistributionDefaultCacheBehavior( allowed_methods=["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT" @@ -69,11 +87,6 @@ class MyConvertedCode(TerraformStack): default_root_object="index.html", enabled=True, is_ipv6_enabled=True, - logging_config=CloudfrontDistributionLoggingConfig( - bucket="mylogs.s3.amazonaws.com", - include_cookies=False, - prefix="myprefix" - ), ordered_cache_behavior=[CloudfrontDistributionOrderedCacheBehavior( allowed_methods=["GET", "HEAD", "OPTIONS"], cached_methods=["GET", "HEAD", "OPTIONS"], @@ -126,9 +139,53 @@ class MyConvertedCode(TerraformStack): "Environment": "production" }, viewer_certificate=CloudfrontDistributionViewerCertificate( - cloudfront_default_certificate=True + acm_certificate_arn=Token.as_string(data_aws_acm_certificate_my_domain.arn), + ssl_support_method="sni-only" ) ) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + cloudfront_for_each_iterator = TerraformIterator.from_list( + Token.as_any(s3_distribution.aliases)) + Route53Record(self, "cloudfront", + alias=Route53RecordAlias( + evaluate_target_health=False, + name=s3_distribution.domain_name, + zone_id=s3_distribution.hosted_zone_id + ), + name=Token.as_string(cloudfront_for_each_iterator.value), + type="A", + zone_id=Token.as_string(data_aws_route53_zone_my_domain.zone_id), + for_each=cloudfront_for_each_iterator + ) + origin_bucket_policy = DataAwsIamPolicyDocument(self, "origin_bucket_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:GetObject", "s3:PutObject"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=[s3_distribution.arn], + variable="AWS:SourceArn" + ) + ], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["cloudfront.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + b.arn + "}/*"], + sid="AllowCloudFrontServicePrincipalReadWrite" + ) + ] + ) + aws_s3_bucket_policy_b = S3BucketPolicy(self, "b_7", + bucket=b.bucket, + policy=Token.as_string(origin_bucket_policy.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_b.override_logical_id("b") ``` ### With Failover Routing @@ -201,7 +258,7 @@ from cdktf import TerraformStack # from imports.aws.cloudfront_distribution import CloudfrontDistribution class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, cachedMethods, viewerProtocolPolicy): + def __init__(self, scope, name): super().__init__(scope, name) s3_origin_id = "myS3Origin" CloudfrontDistribution(self, "s3_distribution", @@ -209,9 +266,9 @@ class MyConvertedCode(TerraformStack): default_cache_behavior=CloudfrontDistributionDefaultCacheBehavior( allowed_methods=["GET", "HEAD", "OPTIONS"], cache_policy_id="4135ea2d-6df8-44a3-9df3-4b5a84be39ad", + cached_methods=["GET", "HEAD"], target_origin_id=s3_origin_id, - cached_methods=cached_methods, - viewer_protocol_policy=viewer_protocol_policy + viewer_protocol_policy="allow-all" ), default_root_object="index.html", enabled=True, @@ -252,20 +309,11 @@ from imports.aws.cloudfront_distribution import CloudfrontDistribution from imports.aws.cloudwatch_log_delivery import CloudwatchLogDelivery from imports.aws.cloudwatch_log_delivery_destination import CloudwatchLogDeliveryDestination from imports.aws.cloudwatch_log_delivery_source import CloudwatchLogDeliverySource -from imports.aws.provider import AwsProvider from imports.aws.s3_bucket import S3Bucket class MyConvertedCode(TerraformStack): def __init__(self, scope, name, *, defaultCacheBehavior, enabled, origin, restrictions, viewerCertificate): super().__init__(scope, name) - AwsProvider(self, "aws", - region=region.string_value - ) - us_east1 = AwsProvider(self, "aws_1", - alias="us_east_1", - region="us-east-1" - ) example = CloudfrontDistribution(self, "example", - provider=us_east1, default_cache_behavior=default_cache_behavior, enabled=enabled, origin=origin, @@ -273,36 +321,36 @@ class MyConvertedCode(TerraformStack): viewer_certificate=viewer_certificate ) aws_cloudwatch_log_delivery_source_example = - CloudwatchLogDeliverySource(self, "example_3", + CloudwatchLogDeliverySource(self, "example_1", log_type="ACCESS_LOGS", name="example", - provider=us_east1, + region="us-east-1", resource_arn=example.arn ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_cloudwatch_log_delivery_source_example.override_logical_id("example") - aws_s3_bucket_example = S3Bucket(self, "example_4", + aws_s3_bucket_example = S3Bucket(self, "example_2", bucket="testbucket", force_destroy=True ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_s3_bucket_example.override_logical_id("example") aws_cloudwatch_log_delivery_destination_example = - CloudwatchLogDeliveryDestination(self, "example_5", + CloudwatchLogDeliveryDestination(self, "example_3", delivery_destination_configuration=[CloudwatchLogDeliveryDestinationDeliveryDestinationConfiguration( destination_resource_arn="${" + aws_s3_bucket_example.arn + "}/prefix" ) ], name="s3-destination", output_format="parquet", - provider=us_east1 + region="us-east-1" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_cloudwatch_log_delivery_destination_example.override_logical_id("example") - aws_cloudwatch_log_delivery_example = CloudwatchLogDelivery(self, "example_6", + aws_cloudwatch_log_delivery_example = CloudwatchLogDelivery(self, "example_4", delivery_destination_arn=Token.as_string(aws_cloudwatch_log_delivery_destination_example.arn), delivery_source_name=Token.as_string(aws_cloudwatch_log_delivery_source_example.name), - provider=us_east1, + region="us-east-1", s3_delivery_configuration=[CloudwatchLogDeliveryS3DeliveryConfiguration( suffix_path="/123456678910/{DistributionId}/{yyyy}/{MM}/{dd}/{HH}" ) @@ -312,11 +360,77 @@ class MyConvertedCode(TerraformStack): aws_cloudwatch_log_delivery_example.override_logical_id("example") ``` +### With V2 logging to Data Firehose + +The example below creates a CloudFront distribution with [standard logging V2 to Data Firehose](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/standard-logging.html#enable-access-logging-api). + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudfront_distribution import CloudfrontDistribution +from imports.aws.cloudwatch_log_delivery import CloudwatchLogDelivery +from imports.aws.cloudwatch_log_delivery_destination import CloudwatchLogDeliveryDestination +from imports.aws.cloudwatch_log_delivery_source import CloudwatchLogDeliverySource +from imports.aws.kinesis_firehose_delivery_stream import KinesisFirehoseDeliveryStream +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, defaultCacheBehavior, enabled, origin, restrictions, viewerCertificate, destination, name): + super().__init__(scope, name) + example = CloudfrontDistribution(self, "example", + default_cache_behavior=default_cache_behavior, + enabled=enabled, + origin=origin, + restrictions=restrictions, + viewer_certificate=viewer_certificate + ) + aws_cloudwatch_log_delivery_source_example = + CloudwatchLogDeliverySource(self, "example_1", + log_type="ACCESS_LOGS", + name="cloudfront-logs-source", + region="us-east-1", + resource_arn=example.arn + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_log_delivery_source_example.override_logical_id("example") + cloudfront_logs = KinesisFirehoseDeliveryStream(self, "cloudfront_logs", + region="us-east-1", + tags={ + "LogDeliveryEnabled": "true" + }, + destination=destination, + name=name + ) + aws_cloudwatch_log_delivery_destination_example = + CloudwatchLogDeliveryDestination(self, "example_3", + delivery_destination_configuration=[CloudwatchLogDeliveryDestinationDeliveryDestinationConfiguration( + destination_resource_arn=cloudfront_logs.arn + ) + ], + name="firehose-destination", + output_format="json", + region="us-east-1" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_log_delivery_destination_example.override_logical_id("example") + aws_cloudwatch_log_delivery_example = CloudwatchLogDelivery(self, "example_4", + delivery_destination_arn=Token.as_string(aws_cloudwatch_log_delivery_destination_example.arn), + delivery_source_name=Token.as_string(aws_cloudwatch_log_delivery_source_example.name), + region="us-east-1" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_log_delivery_example.override_logical_id("example") +``` + ## Argument Reference This resource supports the following arguments: * `aliases` (Optional) - Extra CNAMEs (alternate domain names), if any, for this distribution. +* `anycast_ip_list_id` (Optional) - ID of the Anycast static IP list that is associated with the distribution. * `comment` (Optional) - Any comments you want to include about the distribution. * `continuous_deployment_policy_id` (Optional) - Identifier of a continuous deployment policy. This argument should only be set on a production distribution. See the [`aws_cloudfront_continuous_deployment_policy` resource](./cloudfront_continuous_deployment_policy.html.markdown) for additional details. * `custom_error_response` (Optional) - One or more [custom error response](#custom-error-response-arguments) elements (multiples allowed). @@ -474,6 +588,8 @@ class MyConvertedCode(TerraformStack): #### Custom Error Response Arguments +~> **NOTE:** When specifying either `response_page_path` or `response_code`, **both** must be set. + * `error_caching_min_ttl` (Optional) - Minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. * `error_code` (Required) - 4xx or 5xx HTTP status code that you want to customize. * `response_code` (Optional) - HTTP status code that you want CloudFront to return with the custom error page to the viewer. @@ -502,13 +618,15 @@ argument should not be specified. * `origin_id` (Required) - Unique identifier for the origin. * `origin_path` (Optional) - Optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. * `origin_shield` - (Optional) [CloudFront Origin Shield](#origin-shield-arguments) configuration information. Using Origin Shield can help reduce the load on your origin. For more information, see [Using Origin Shield](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/origin-shield.html) in the Amazon CloudFront Developer Guide. +* `response_completion_timeout` - (Optional) Time (in seconds) that a request from CloudFront to the origin can stay open and wait for a response. Must be integer greater than or equal to the value of `origin_read_timeout`. If omitted or explicitly set to `0`, no maximum value is enforced. * `s3_origin_config` - (Optional) [CloudFront S3 origin](#s3-origin-config-arguments) configuration information. If a custom origin is required, use `custom_origin_config` instead. -* `vpc_origin_config` - (Optional) The VPC origin configuration. +* `vpc_origin_config` - (Optional) The [VPC origin configuration](#vpc-origin-config-arguments). ##### Custom Origin Config Arguments * `http_port` (Required) - HTTP port the custom origin listens on. * `https_port` (Required) - HTTPS port the custom origin listens on. +* `ip_address_type` (Optional) - IP protocol CloudFront uses when connecting to your origin. Valid values: `ipv4`, `ipv6`, `dualstack`. * `origin_protocol_policy` (Required) - Origin protocol policy to apply to your origin. One of `http-only`, `https-only`, or `match-viewer`. * `origin_ssl_protocols` (Required) - List of SSL/TLS protocols that CloudFront can use when connecting to your origin over HTTPS. Valid values: `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. For more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the Amazon CloudFront Developer Guide. * `origin_keepalive_timeout` - (Optional) The Custom KeepAlive timeout, in seconds. By default, AWS enforces an upper limit of `60`. But you can request an [increase](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-request-timeout). Defaults to `5`. @@ -618,4 +736,4 @@ Using `terraform import`, import CloudFront Distributions using the `id`. For ex % terraform import aws_cloudfront_distribution.distribution E74FTE3EXAMPLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudfront_function.html.markdown b/website/docs/cdktf/python/r/cloudfront_function.html.markdown index 9d5a3d127da1..7f66cfe10d2a 100644 --- a/website/docs/cdktf/python/r/cloudfront_function.html.markdown +++ b/website/docs/cdktf/python/r/cloudfront_function.html.markdown @@ -53,7 +53,7 @@ The following arguments are optional: * `comment` - (Optional) Comment. * `publish` - (Optional) Whether to publish creation/change as Live CloudFront Function Version. Defaults to `true`. -* `key_value_store_associations` - (Optional) List of `aws_cloudfront_key_value_store` ARNs to be associated to the function. AWS limits associations to on key value store per function. +* `key_value_store_associations` - (Optional) List of `aws_cloudfront_key_value_store` ARNs to be associated to the function. AWS limits associations to one key value store per function. ## Attribute Reference @@ -89,4 +89,4 @@ Using `terraform import`, import CloudFront Functions using the `name`. For exam % terraform import aws_cloudfront_function.test my_test_function ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudfront_key_value_store.html.markdown b/website/docs/cdktf/python/r/cloudfront_key_value_store.html.markdown index e16030b4fd21..7332a3c42234 100644 --- a/website/docs/cdktf/python/r/cloudfront_key_value_store.html.markdown +++ b/website/docs/cdktf/python/r/cloudfront_key_value_store.html.markdown @@ -49,8 +49,8 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) identifying your CloudFront KeyValueStore. -* `id` - A unique identifier for the KeyValueStore. Same as `name`. * `etag` - ETag hash of the KeyValueStore. +* `id` - A unique identifier for the KeyValueStore. ## Timeouts @@ -60,6 +60,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfront_key_value_store.example + identity = { + name = "example_store" + } +} + +resource "aws_cloudfront_key_value_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the CloudFront Key Value Store. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront Key Value Store using the `name`. For example: ```python @@ -83,4 +108,4 @@ Using `terraform import`, import CloudFront Key Value Store using the `name`. Fo % terraform import aws_cloudfront_key_value_store.example example_store ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudfront_realtime_log_config.html.markdown b/website/docs/cdktf/python/r/cloudfront_realtime_log_config.html.markdown index aa01ae39fcad..92d0faa982bc 100644 --- a/website/docs/cdktf/python/r/cloudfront_realtime_log_config.html.markdown +++ b/website/docs/cdktf/python/r/cloudfront_realtime_log_config.html.markdown @@ -110,6 +110,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfront_realtime_log_config.example + identity = { + "arn" = "arn:aws:cloudfront::123456789012:realtime-log-config/ExampleNameForRealtimeLogConfig" + } +} + +resource "aws_cloudfront_realtime_log_config" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CloudFront real-time log configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront real-time log configurations using the ARN. For example: ```python @@ -133,4 +154,4 @@ Using `terraform import`, import CloudFront real-time log configurations using t % terraform import aws_cloudfront_realtime_log_config.example arn:aws:cloudfront::111122223333:realtime-log-config/ExampleNameForRealtimeLogConfig ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudfrontkeyvaluestore_key.html.markdown b/website/docs/cdktf/python/r/cloudfrontkeyvaluestore_key.html.markdown index 57ef50dfd362..8d54aed3ec15 100644 --- a/website/docs/cdktf/python/r/cloudfrontkeyvaluestore_key.html.markdown +++ b/website/docs/cdktf/python/r/cloudfrontkeyvaluestore_key.html.markdown @@ -61,7 +61,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront KeyValueStore Key using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfrontkeyvaluestore_key.example + identity = { + key_value_store_arn = "arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c" + key = "someKey" + } +} + +resource "aws_cloudfrontkeyvaluestore_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `key_value_store_arn` (String) ARN of the CloudFront Key Value Store. +* `key` (String) Key name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront KeyValueStore Key using the `key_value_store_arn` and 'key' separated by `,`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -78,10 +105,10 @@ class MyConvertedCode(TerraformStack): CloudfrontkeyvaluestoreKey.generate_config_for_import(self, "example", "arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c,someKey") ``` -Using `terraform import`, import CloudFront KeyValueStore Key using the `id`. For example: +Using `terraform import`, import CloudFront KeyValueStore Key using the `key_value_store_arn` and 'key' separated by `,`. For example: ```console % terraform import aws_cloudfrontkeyvaluestore_key.example arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c,someKey ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudhsm_v2_cluster.html.markdown b/website/docs/cdktf/python/r/cloudhsm_v2_cluster.html.markdown index c177ee4c62cd..b5c0ee74e284 100644 --- a/website/docs/cdktf/python/r/cloudhsm_v2_cluster.html.markdown +++ b/website/docs/cdktf/python/r/cloudhsm_v2_cluster.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_backup_identifier` - (Optional) ID of Cloud HSM v2 cluster backup to be restored. * `hsm_type` - (Required) The type of HSM module in the cluster. Currently, `hsm1.medium` and `hsm2m.medium` are supported. * `subnet_ids` - (Required) The IDs of subnets in which cluster will operate. @@ -132,4 +133,4 @@ Using `terraform import`, import CloudHSM v2 Clusters using the cluster `id`. Fo % terraform import aws_cloudhsm_v2_cluster.test_cluster cluster-aeb282a201 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudhsm_v2_hsm.html.markdown b/website/docs/cdktf/python/r/cloudhsm_v2_hsm.html.markdown index 065d832016af..814ad28088ec 100644 --- a/website/docs/cdktf/python/r/cloudhsm_v2_hsm.html.markdown +++ b/website/docs/cdktf/python/r/cloudhsm_v2_hsm.html.markdown @@ -42,13 +42,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -~> **NOTE:** Either `subnet_id` or `availability_zone` must be specified. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_id` - (Required) The ID of Cloud HSM v2 cluster to which HSM will be added. * `subnet_id` - (Optional) The ID of subnet in which HSM module will be located. Conflicts with `availability_zone`. * `availability_zone` - (Optional) The IDs of AZ in which HSM module will be located. Conflicts with `subnet_id`. * `ip_address` - (Optional) The IP address of HSM module. Must be within the CIDR of selected subnet. +~> **NOTE:** Either `subnet_id` or `availability_zone` must be specified. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -86,4 +87,4 @@ Using `terraform import`, import HSM modules using their HSM ID. For example: % terraform import aws_cloudhsm_v2_hsm.bar hsm-quo8dahtaca ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudsearch_domain.html.markdown b/website/docs/cdktf/python/r/cloudsearch_domain.html.markdown index 3ced77999f0e..72c4008af953 100644 --- a/website/docs/cdktf/python/r/cloudsearch_domain.html.markdown +++ b/website/docs/cdktf/python/r/cloudsearch_domain.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_options` - (Optional) Domain endpoint options. Documented below. * `index_field` - (Optional) The index fields for documents added to the domain. Documented below. * `multi_az` - (Optional) Whether or not to maintain extra instances for the domain in a second Availability Zone to ensure high availability. @@ -136,4 +137,4 @@ Using `terraform import`, import CloudSearch Domains using the `name`. For examp % terraform import aws_cloudsearch_domain.example example-domain ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudsearch_domain_service_access_policy.html.markdown b/website/docs/cdktf/python/r/cloudsearch_domain_service_access_policy.html.markdown index 2072abaa9197..249166fc65dd 100644 --- a/website/docs/cdktf/python/r/cloudsearch_domain_service_access_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudsearch_domain_service_access_policy.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_policy` - (Required) The access rules you want to configure. These rules replace any existing rules. See the [AWS documentation](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html) for details. * `domain_name` - (Required) The CloudSearch domain name the policy applies to. @@ -106,4 +107,4 @@ Using `terraform import`, import CloudSearch domain service access policies usin % terraform import aws_cloudsearch_domain_service_access_policy.example example-domain ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudtrail.html.markdown b/website/docs/cdktf/python/r/cloudtrail.html.markdown index 542356f83c20..ec8fc77c6b01 100644 --- a/website/docs/cdktf/python/r/cloudtrail.html.markdown +++ b/website/docs/cdktf/python/r/cloudtrail.html.markdown @@ -57,7 +57,7 @@ class MyConvertedCode(TerraformStack): actions=["s3:GetBucketAcl"], condition=[DataAwsIamPolicyDocumentStatementCondition( test="StringEquals", - values=["arn:${" + data_aws_partition_current.partition + "}:cloudtrail:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:trail/example" + values=["arn:${" + data_aws_partition_current.partition + "}:cloudtrail:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:trail/example" ], variable="aws:SourceArn" ) @@ -78,7 +78,7 @@ class MyConvertedCode(TerraformStack): variable="s3:x-amz-acl" ), DataAwsIamPolicyDocumentStatementCondition( test="StringEquals", - values=["arn:${" + data_aws_partition_current.partition + "}:cloudtrail:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:trail/example" + values=["arn:${" + data_aws_partition_current.partition + "}:cloudtrail:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:trail/example" ], variable="aws:SourceArn" ) @@ -372,6 +372,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `advanced_event_selector` - (Optional) Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `event_selector`. * `cloud_watch_logs_group_arn` - (Optional) Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard. * `cloud_watch_logs_role_arn` - (Optional) Role for the CloudWatch Logs endpoint to assume to write to a user’s log group. @@ -453,4 +454,4 @@ Using `terraform import`, import Cloudtrails using the `arn`. For example: % terraform import aws_cloudtrail.sample arn:aws:cloudtrail:us-east-1:123456789012:trail/my-sample-trail ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown b/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown index 4669864dc89d..2cd74ffb5476 100644 --- a/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown +++ b/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown @@ -90,6 +90,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `name` - (Required) The name of the event data store. - `billing_mode` - (Optional) The billing mode for the event data store. The valid values are `EXTENDABLE_RETENTION_PRICING` and `FIXED_RETENTION_PRICING`. Defaults to `EXTENDABLE_RETENTION_PRICING`. - `suspend` - (Optional) Specifies whether to stop ingesting new events into the event data store. If set to `true`, ingestion is suspended while maintaining the ability to query existing events. If set to `false`, ingestion is active. @@ -130,6 +131,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudtrail_event_data_store.example + identity = { + "arn" = "arn:aws:cloudtrail:us-east-1:123456789012:eventdatastore/example-event-data-store-id" + } +} + +resource "aws_cloudtrail_event_data_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CloudTrail event data store. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import event data stores using their `arn`. For example: ```python @@ -153,4 +175,4 @@ Using `terraform import`, import event data stores using their `arn`. For exampl % terraform import aws_cloudtrail_event_data_store.example arn:aws:cloudtrail:us-east-1:123456789123:eventdatastore/22333815-4414-412c-b155-dd254033gfhf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_composite_alarm.html.markdown b/website/docs/cdktf/python/r/cloudwatch_composite_alarm.html.markdown index 840e123c94a2..1c9bb4499e7c 100644 --- a/website/docs/cdktf/python/r/cloudwatch_composite_alarm.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_composite_alarm.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actions_enabled` - (Optional, Forces new resource) Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to `true`. * `actions_suppressor` - (Optional) Actions will be suppressed if the suppressor alarm is in the ALARM state. * `alarm` - (Required) Can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm. @@ -92,4 +93,4 @@ Using `terraform import`, import a CloudWatch Composite Alarm using the `alarm_n % terraform import aws_cloudwatch_composite_alarm.test my-alarm ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_contributor_insight_rule.html.markdown b/website/docs/cdktf/python/r/cloudwatch_contributor_insight_rule.html.markdown index c3c8839747c7..e671763ff207 100644 --- a/website/docs/cdktf/python/r/cloudwatch_contributor_insight_rule.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_contributor_insight_rule.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rule_state` - (Optional) State of the rule. Valid values are `ENABLED` and `DISABLED`. ## Attribute Reference @@ -77,4 +78,4 @@ Using `terraform import`, import CloudWatch Contributor Insight Rule using the ` % terraform import aws_cloudwatch_contributor_insight_rule.example contributor_insight_rule-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_contributor_managed_insight_rule.html.markdown b/website/docs/cdktf/python/r/cloudwatch_contributor_managed_insight_rule.html.markdown index 5c26f91dd8cd..b7274390cd41 100644 --- a/website/docs/cdktf/python/r/cloudwatch_contributor_managed_insight_rule.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_contributor_managed_insight_rule.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rule_state` - (Optional) State of the rule. Valid values are `ENABLED` and `DISABLED`. ## Attribute Reference @@ -77,4 +78,4 @@ Using `terraform import`, import CloudWatch Contributor Managed Insight Rule usi % terraform import aws_cloudwatch_contributor_managed_insight_rule.example contributor_managed_insight_rule-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_dashboard.html.markdown b/website/docs/cdktf/python/r/cloudwatch_dashboard.html.markdown index 9b47ece6a912..ab64d465f19d 100644 --- a/website/docs/cdktf/python/r/cloudwatch_dashboard.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_dashboard.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dashboard_name` - (Required) The name of the dashboard. * `dashboard_body` - (Required) The detailed information about the dashboard, including what widgets are included and their location on the dashboard. You can read more about the body structure in the [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html). @@ -97,4 +98,4 @@ Using `terraform import`, import CloudWatch dashboards using the `dashboard_name % terraform import aws_cloudwatch_dashboard.sample dashboard_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_api_destination.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_api_destination.html.markdown index 54ef4723ba56..6a6932b69510 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_api_destination.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_api_destination.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the new API Destination. The name must be unique for your account. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_. * `description` - (Optional) The description of the new API Destination. Maximum of 512 characters. * `invocation_endpoint` - (Required) URL endpoint to invoke as a target. This could be a valid endpoint generated by a partner service. You can include "*" as path parameters wildcards to be set from the Target HttpParameters. @@ -80,4 +81,4 @@ Using `terraform import`, import EventBridge API Destinations using the `name`. % terraform import aws_cloudwatch_event_api_destination.test api-destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_archive.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_archive.html.markdown index f7c14bc87978..fd8a5cd859ba 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_archive.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_archive.html.markdown @@ -40,7 +40,7 @@ class MyConvertedCode(TerraformStack): aws_cloudwatch_event_archive_order.override_logical_id("order") ``` -## Example all optional arguments +## Example Usage Optional Arguments ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -72,21 +72,101 @@ class MyConvertedCode(TerraformStack): aws_cloudwatch_event_archive_order.override_logical_id("order") ``` +## Example Usage CMK Encryption + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_event_archive import CloudwatchEventArchive +from imports.aws.cloudwatch_event_bus import CloudwatchEventBus +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_partition import DataAwsPartition +from imports.aws.kms_key import KmsKey +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CloudwatchEventBus(self, "example", + name="example" + ) + current = DataAwsCallerIdentity(self, "current") + data_aws_partition_current = DataAwsPartition(self, "current_2") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_partition_current.override_logical_id("current") + aws_kms_key_example = KmsKey(self, "example_3", + deletion_window_in_days=7, + policy=Token.as_string( + Fn.jsonencode({ + "Id": "key-policy-example", + "Statement": [{ + "Action": "kms:*", + "Effect": "Allow", + "Principal": { + "AWS": "arn:${" + data_aws_partition_current.partition + "}:iam::${" + current.account_id + "}:root" + }, + "Resource": "*", + "Sid": "Enable IAM User Permissions" + }, { + "Action": ["kms:DescribeKey"], + "Effect": "Allow", + "Principal": { + "Service": "events.amazonaws.com" + }, + "Resource": "*", + "Sid": "Allow describing of the key" + }, { + "Action": ["kms:GenerateDataKey", "kms:Decrypt", "kms:ReEncrypt*"], + "Condition": { + "StringEquals": { + "kms:_encryption_context:aws:events:event-bus:arn": example.arn + } + }, + "Effect": "Allow", + "Principal": { + "Service": "events.amazonaws.com" + }, + "Resource": "*", + "Sid": "Allow use of the key" + } + ], + "Version": "2012-10-17" + })), + tags={ + "EventBridgeApiDestinations": "true" + } + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_kms_key_example.override_logical_id("example") + aws_cloudwatch_event_archive_example = CloudwatchEventArchive(self, "example_4", + event_source_arn=example.arn, + kms_key_identifier=Token.as_string(aws_kms_key_example.id), + name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_event_archive_example.override_logical_id("example") +``` + ## Argument Reference This resource supports the following arguments: -* `name` - (Required) The name of the new event archive. The archive name cannot exceed 48 characters. -* `event_source_arn` - (Required) Event bus source ARN from where these events should be archived. -* `description` - (Optional) The description of the new event archive. -* `event_pattern` - (Optional) Instructs the new event archive to only capture events matched by this pattern. By default, it attempts to archive every event received in the `event_source_arn`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the archive. The archive name cannot exceed 48 characters. +* `event_source_arn` - (Required) ARN of the event bus associated with the archive. Only events from this event bus are sent to the archive. +* `description` - (Optional) Description for the archive. +* `event_pattern` - (Optional) Event pattern to use to filter events sent to the archive. By default, it attempts to archive every event received in the `event_source_arn`. +* `kms_key_identifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt this archive. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. * `retention_days` - (Optional) The maximum number of days to retain events in the new event archive. By default, it archives indefinitely. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the event archive. +* `arn` - ARN of the archive. ## Import @@ -113,4 +193,4 @@ Using `terraform import`, import an EventBridge archive using the `name`. For ex % terraform import aws_cloudwatch_event_archive.imported_event_archive order-archive ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_bus.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_bus.html.markdown index 70d053a5c29f..dd6d288a8f13 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_bus.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_bus.html.markdown @@ -16,6 +16,8 @@ Provides an EventBridge event bus resource. ## Example Usage +### Basic Usages + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -58,21 +60,249 @@ class MyConvertedCode(TerraformStack): aws_cloudwatch_event_bus_examplepartner.override_logical_id("examplepartner") ``` +### Logging to CloudWatch Logs, S3, and Data Firehose + +See [Configuring logs for Amazon EventBridge event buses](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus-logs.html) for more details. + +#### Required Resources + +* EventBridge Event Bus with `log_config` configured +* Log destinations: + + * CloudWatch Logs log group + * S3 bucket + * Data Firehose delivery stream + +* Resource-based policy or tagging for the service-linked role: + + * CloudWatch Logs log group - `aws_cloudwatch_log_resource_policy` to allow `delivery.logs.amazonaws.com` to put logs into the log group + * S3 bucket - `aws_s3_bucket_policy` to allow `delivery.logs.amazonaws.com` to put logs into the bucket + * Data Firehose delivery stream - tagging the delivery stream with `LogDeliveryEnabled = "true"` to allow the service-linked role `AWSServiceRoleForLogDelivery` to deliver logs + +* CloudWatch Logs Delivery: + + * `aws_cloudwatch_log_delivery_source` for each log type (INFO, ERROR, TRACE) + * `aws_cloudwatch_log_delivery_destination` for the log destination (S3 bucket, CloudWatch Logs log group, or Data Firehose delivery stream) + * `aws_cloudwatch_log_delivery` to link each log type’s delivery source to the delivery destination + +#### Example Usage + +The following example demonstrates how to set up logging for an EventBridge event bus to all three destinations: CloudWatch Logs, S3, and Data Firehose. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_event_bus import CloudwatchEventBus +from imports.aws.cloudwatch_log_delivery import CloudwatchLogDelivery +from imports.aws.cloudwatch_log_delivery_destination import CloudwatchLogDeliveryDestination +from imports.aws.cloudwatch_log_delivery_source import CloudwatchLogDeliverySource +from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.cloudwatch_log_resource_policy import CloudwatchLogResourcePolicy +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.kinesis_firehose_delivery_stream import KinesisFirehoseDeliveryStream +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_policy import S3BucketPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, destination, name): + super().__init__(scope, name) + example = CloudwatchEventBus(self, "example", + log_config=CloudwatchEventBusLogConfig( + include_detail="FULL", + level="TRACE" + ), + name="example-event-bus" + ) + error_logs = CloudwatchLogDeliverySource(self, "error_logs", + log_type="ERROR_LOGS", + name="EventBusSource-${" + example.name + "}-ERROR_LOGS", + resource_arn=example.arn + ) + info_logs = CloudwatchLogDeliverySource(self, "info_logs", + log_type="INFO_LOGS", + name="EventBusSource-${" + example.name + "}-INFO_LOGS", + resource_arn=example.arn + ) + trace_logs = CloudwatchLogDeliverySource(self, "trace_logs", + log_type="TRACE_LOGS", + name="EventBusSource-${" + example.name + "}-TRACE_LOGS", + resource_arn=example.arn + ) + event_bus_logs = CloudwatchLogGroup(self, "event_bus_logs", + name="/aws/vendedlogs/events/event-bus/${" + example.name + "}" + ) + cloudfront_logs = KinesisFirehoseDeliveryStream(self, "cloudfront_logs", + tags={ + "LogDeliveryEnabled": "true" + }, + destination=destination, + name=name + ) + aws_s3_bucket_example = S3Bucket(self, "example_6", + bucket="example-event-bus-logs" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_example.override_logical_id("example") + current = DataAwsCallerIdentity(self, "current") + bucket = DataAwsIamPolicyDocument(self, "bucket", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:PutObject"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=["bucket-owner-full-control"], + variable="s3:x-amz-acl" + ), DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=[Token.as_string(current.account_id)], + variable="aws:SourceAccount" + ), DataAwsIamPolicyDocumentStatementCondition( + test="ArnLike", + values=[info_logs.arn, error_logs.arn, trace_logs.arn], + variable="aws:SourceArn" + ) + ], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["delivery.logs.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + aws_s3_bucket_example.arn + "}/AWSLogs/${" + current.account_id + "}/EventBusLogs/*" + ] + ) + ] + ) + cwlogs = DataAwsIamPolicyDocument(self, "cwlogs", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["logs:CreateLogStream", "logs:PutLogEvents"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=[Token.as_string(current.account_id)], + variable="aws:SourceAccount" + ), DataAwsIamPolicyDocumentStatementCondition( + test="ArnLike", + values=[info_logs.arn, error_logs.arn, trace_logs.arn], + variable="aws:SourceArn" + ) + ], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["delivery.logs.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + event_bus_logs.arn + "}:log-stream:*"] + ) + ] + ) + aws_cloudwatch_log_delivery_destination_cwlogs = + CloudwatchLogDeliveryDestination(self, "cwlogs_10", + delivery_destination_configuration=[CloudwatchLogDeliveryDestinationDeliveryDestinationConfiguration( + destination_resource_arn=event_bus_logs.arn + ) + ], + name="EventsDeliveryDestination-${" + example.name + "}-CWLogs" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_log_delivery_destination_cwlogs.override_logical_id("cwlogs") + firehose = CloudwatchLogDeliveryDestination(self, "firehose", + delivery_destination_configuration=[CloudwatchLogDeliveryDestinationDeliveryDestinationConfiguration( + destination_resource_arn=cloudfront_logs.arn + ) + ], + name="EventsDeliveryDestination-${" + example.name + "}-Firehose" + ) + s3 = CloudwatchLogDeliveryDestination(self, "s3", + delivery_destination_configuration=[CloudwatchLogDeliveryDestinationDeliveryDestinationConfiguration( + destination_resource_arn=Token.as_string(aws_s3_bucket_example.arn) + ) + ], + name="EventsDeliveryDestination-${" + example.name + "}-S3" + ) + aws_cloudwatch_log_resource_policy_example = + CloudwatchLogResourcePolicy(self, "example_13", + policy_document=Token.as_string(cwlogs.json), + policy_name="AWSLogDeliveryWrite-${" + example.name + "}" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_log_resource_policy_example.override_logical_id("example") + aws_s3_bucket_policy_example = S3BucketPolicy(self, "example_14", + bucket=Token.as_string(aws_s3_bucket_example.bucket), + policy=Token.as_string(bucket.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_example.override_logical_id("example") + s3_info_logs = CloudwatchLogDelivery(self, "s3_info_logs", + delivery_destination_arn=s3.arn, + delivery_source_name=info_logs.name + ) + cwlogs_info_logs = CloudwatchLogDelivery(self, "cwlogs_info_logs", + delivery_destination_arn=Token.as_string(aws_cloudwatch_log_delivery_destination_cwlogs.arn), + delivery_source_name=info_logs.name, + depends_on=[s3_info_logs] + ) + firehose_info_logs = CloudwatchLogDelivery(self, "firehose_info_logs", + delivery_destination_arn=firehose.arn, + delivery_source_name=info_logs.name, + depends_on=[cwlogs_info_logs] + ) + s3_error_logs = CloudwatchLogDelivery(self, "s3_error_logs", + delivery_destination_arn=s3.arn, + delivery_source_name=error_logs.name, + depends_on=[s3_info_logs] + ) + s3_trace_logs = CloudwatchLogDelivery(self, "s3_trace_logs", + delivery_destination_arn=s3.arn, + delivery_source_name=trace_logs.name, + depends_on=[s3_error_logs] + ) + cwlogs_error_logs = CloudwatchLogDelivery(self, "cwlogs_error_logs", + delivery_destination_arn=Token.as_string(aws_cloudwatch_log_delivery_destination_cwlogs.arn), + delivery_source_name=error_logs.name, + depends_on=[s3_error_logs, cwlogs_info_logs] + ) + cwlogs_trace_logs = CloudwatchLogDelivery(self, "cwlogs_trace_logs", + delivery_destination_arn=Token.as_string(aws_cloudwatch_log_delivery_destination_cwlogs.arn), + delivery_source_name=trace_logs.name, + depends_on=[s3_trace_logs, cwlogs_error_logs] + ) + firehose_error_logs = CloudwatchLogDelivery(self, "firehose_error_logs", + delivery_destination_arn=firehose.arn, + delivery_source_name=error_logs.name, + depends_on=[cwlogs_error_logs, firehose_info_logs] + ) + CloudwatchLogDelivery(self, "firehose_trace_logs", + delivery_destination_arn=firehose.arn, + delivery_source_name=trace_logs.name, + depends_on=[cwlogs_trace_logs, firehose_error_logs] + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). The following arguments are required: * `name` - (Required) Name of the new event bus. The names of custom event buses can't contain the / character. To create a partner event bus, ensure that the `name` matches the `event_source_name`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dead_letter_config` - (Optional) Configuration details of the Amazon SQS queue for EventBridge to use as a dead-letter queue (DLQ). This block supports the following arguments: * `arn` - (Optional) The ARN of the SQS queue specified as the target for the dead-letter queue. * `description` - (Optional) Event bus description. * `event_source_name` - (Optional) Partner event source that the new event bus will be matched with. Must match `name`. * `kms_key_identifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. +* `log_config` - (Optional) Block for logging configuration settings for the event bus. + * `include_detail` - (Optional) Whether EventBridge include detailed event information in the records it generates. Valid values are `NONE` and `FULL`. + * `level` - (Optional) Level of logging detail to include. Valid values are `OFF`, `ERROR`, `INFO`, and `TRACE`. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -108,4 +338,4 @@ Using `terraform import`, import EventBridge event buses using the name of the e % terraform import aws_cloudwatch_event_bus.messenger chat-messages ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_bus_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_bus_policy.html.markdown index ef5516b5c2ba..5c2cb7cba3b6 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_bus_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_bus_policy.html.markdown @@ -161,6 +161,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The text of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `event_bus_name` - (Optional) The name of the event bus to set the permissions on. If you omit this, the permissions are set on the `default` event bus. @@ -196,4 +197,4 @@ Using `terraform import`, import an EventBridge policy using the `event_bus_name % terraform import aws_cloudwatch_event_bus_policy.DevAccountAccess example-event-bus ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_connection.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_connection.html.markdown index c922d506b1e7..3910f34f7b49 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_connection.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_connection.html.markdown @@ -247,6 +247,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name for the connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_. * `description` - (Optional) Description for the connection. Maximum of 512 characters. * `authorization_type` - (Required) Type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`. @@ -337,4 +338,4 @@ Using `terraform import`, import EventBridge EventBridge connection using the `n % terraform import aws_cloudwatch_event_connection.test ngrok-connection ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_endpoint.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_endpoint.html.markdown index 00c016add749..7fa434cdfb34 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_endpoint.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of the global endpoint. * `event_bus` - (Required) The event buses to use. The names of the event buses must be identical in each Region. Exactly two event buses are required. Documented below. * `name` - (Required) The name of the global endpoint. @@ -121,4 +122,4 @@ Using `terraform import`, import EventBridge Global Endpoints using the `name`. % terraform import aws_cloudwatch_event_endpoint.imported_endpoint example-endpoint ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_permission.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_permission.html.markdown index 783d98050941..a20da0fdbf3e 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_permission.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_permission.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify `*` to permit any account to put events to your default event bus, optionally limited by `condition`. * `statement_id` - (Required) An identifier string for the external account that you are granting permissions to. * `action` - (Optional) The action that you are enabling the other account to perform. Defaults to `events:PutEvents`. @@ -111,4 +112,4 @@ Using `terraform import`, import EventBridge permissions using the `event_bus_na % terraform import aws_cloudwatch_event_permission.DevAccountAccess example-event-bus/DevAccountAccess ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_rule.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_rule.html.markdown index ed1a0797c9eb..68f7b6b0fe12 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_rule.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_rule.html.markdown @@ -62,28 +62,21 @@ data "aws_iam_policy_document" "sns_topic_policy" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the rule. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. **Note**: Due to the length of the generated suffix, must be 38 characters or less. * `schedule_expression` - (Optional) The scheduling expression. For example, `cron(0 20 * * ? *)` or `rate(5 minutes)`. At least one of `schedule_expression` or `event_pattern` is required. Can only be used on the default event bus. For more information, refer to the AWS documentation [Schedule Expressions for Rules](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html). -* `event_bus_name` - (Optional) The name or ARN of the event bus to associate with this rule. - If you omit this, the `default` event bus is used. +* `event_bus_name` - (Optional) The name or ARN of the event bus to associate with this rule. If you omit this, the `default` event bus is used. * `event_pattern` - (Optional) The event pattern described a JSON object. At least one of `schedule_expression` or `event_pattern` is required. See full documentation of [Events and Event Patterns in EventBridge](https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) for details. **Note**: The event pattern size is 2048 by default but it is adjustable up to 4096 characters by submitting a service quota increase request. See [Amazon EventBridge quotas](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-quota.html) for details. * `force_destroy` - (Optional) Used to delete managed rules created by AWS. Defaults to `false`. * `description` - (Optional) The description of the rule. * `role_arn` - (Optional) The Amazon Resource Name (ARN) associated with the role that is used for target invocation. -* `is_enabled` - (Optional, **Deprecated** Use `state` instead) Whether the rule should be enabled. - Defaults to `true`. - Conflicts with `state`. -* `state` - (Optional) State of the rule. - Valid values are `DISABLED`, `ENABLED`, and `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. - When state is `ENABLED`, the rule is enabled for all events except those delivered by CloudTrail. - To also enable the rule for events delivered by CloudTrail, set `state` to `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. - Defaults to `ENABLED`. - Conflicts with `is_enabled`. - - **NOTE:** The rule state `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS` cannot be used in conjunction with the `schedule_expression` argument. +* `is_enabled` - (Optional, **Deprecated** Use `state` instead) Whether the rule should be enabled. Defaults to `true`. Conflicts with `state`. +* `state` - (Optional) State of the rule. Valid values are `DISABLED`, `ENABLED`, and `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. When state is `ENABLED`, the rule is enabled for all events except those delivered by CloudTrail. To also enable the rule for events delivered by CloudTrail, set `state` to `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. Defaults to `ENABLED`. Conflicts with `is_enabled`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +**NOTE:** The rule state `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS` cannot be used in conjunction with the `schedule_expression` argument. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -94,6 +87,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_event_rule.example + identity = { + name = "capture-console-sign-in" + event_bus_name = "example-event-bus" + } +} + +resource "aws_cloudwatch_event_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the EventBridge rule. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `event_bus_name` (String) Name of the event bus. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EventBridge Rules using the `event_bus_name/rule_name` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```python @@ -108,13 +129,13 @@ from imports.aws.cloudwatch_event_rule import CloudwatchEventRule class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - CloudwatchEventRule.generate_config_for_import(self, "console", "example-event-bus/capture-console-sign-in") + CloudwatchEventRule.generate_config_for_import(self, "example", "example-event-bus/capture-console-sign-in") ``` Using `terraform import`, import EventBridge Rules using the `event_bus_name/rule_name` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```console -% terraform import aws_cloudwatch_event_rule.console example-event-bus/capture-console-sign-in +% terraform import aws_cloudwatch_event_rule.example example-event-bus/capture-console-sign-in ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_event_target.html.markdown b/website/docs/cdktf/python/r/cloudwatch_event_target.html.markdown index 0a2c083cb4f7..fb4234d9d0be 100644 --- a/website/docs/cdktf/python/r/cloudwatch_event_target.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_event_target.html.markdown @@ -633,6 +633,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appsync_target` - (Optional) Parameters used when you are using the rule to invoke an AppSync GraphQL API mutation. Documented below. A maximum of 1 are allowed. * `batch_target` - (Optional) Parameters used when you are using the rule to invoke an Amazon Batch Job. Documented below. A maximum of 1 are allowed. * `dead_letter_config` - (Optional) Parameters used when you are providing a dead letter config. Documented below. A maximum of 1 are allowed. @@ -764,6 +765,36 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_event_target.example + identity = { + event_bus_name = "default" + rule = "rule-name" + target_id = "target-id" + } +} + +resource "aws_cloudwatch_event_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `event_bus_name` (String) Event bus name for the target. +* `rule` (String) Rule name for the target. +* `target_id` (String) Target ID. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EventBridge Targets using `event_bus_name/rule-name/target-id` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```python @@ -778,13 +809,13 @@ from imports.aws.cloudwatch_event_target import CloudwatchEventTarget class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - CloudwatchEventTarget.generate_config_for_import(self, "testEventTarget", "rule-name/target-id") + CloudwatchEventTarget.generate_config_for_import(self, "example", "rule-name/target-id") ``` Using `terraform import`, import EventBridge Targets using `event_bus_name/rule-name/target-id` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```console -% terraform import aws_cloudwatch_event_target.test-event-target rule-name/target-id +% terraform import aws_cloudwatch_event_target.example rule-name/target-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown index b893cf5094de..d6b8b2cbd9e7 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown @@ -113,6 +113,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_document` - (Required) Text of the account policy. Refer to the [AWS docs](https://docs.aws.amazon.com/cli/latest/reference/logs/put-account-policy.html) for more information. * `policy_type` - (Required) Type of account policy. One of `DATA_PROTECTION_POLICY`, `SUBSCRIPTION_FILTER_POLICY`, `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY`. You can have one account policy per type in an account. * `policy_name` - (Required) Name of the account policy. @@ -148,4 +149,4 @@ Using `terraform import`, import this resource using the `policy_name` and `poli % terraform import aws_cloudwatch_log_account_policy.example "my-account-policy:SUBSCRIPTION_FILTER_POLICY" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_anomaly_detector.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_anomaly_detector.html.markdown index 826949386671..5443285927b1 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_anomaly_detector.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_anomaly_detector.html.markdown @@ -52,12 +52,14 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `log_group_arn_list` - (Required) Array containing the ARN of the log group that this anomaly detector will watch. You can specify only one log group ARN. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `anomaly_visibility_time` - (Optional) Number of days to have visibility on an anomaly. After this time period has elapsed for an anomaly, it will be automatically baselined and the anomaly detector will treat new occurrences of a similar anomaly as normal. Therefore, if you do not correct the cause of an anomaly during the time period specified in `anomaly_visibility_time`, it will be considered normal going forward and will not be detected as an anomaly. Valid Range: Minimum value of 7. Maximum value of 90. * `detector_name` - (Optional) Name for this anomaly detector. @@ -93,10 +95,10 @@ class MyConvertedCode(TerraformStack): CloudwatchLogAnomalyDetector.generate_config_for_import(self, "example", "log_anomaly_detector-arn-12345678") ``` -Using `terraform import`, import CloudWatch Log Anomaly Detector using the `example_id_arg`. For example: +Using `terraform import`, import CloudWatch Log Anomaly Detector using the `arn`. For example: ```console % terraform import aws_cloudwatch_log_anomaly_detector.example log_anomaly_detector-arn-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_data_protection_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_data_protection_policy.html.markdown index a71813fbe479..0242edfb768c 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_data_protection_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_data_protection_policy.html.markdown @@ -79,6 +79,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `log_group_name` - (Required) The name of the log group under which the log stream is to be created. * `policy_document` - (Required) Specifies the data protection policy in JSON. Read more at [Data protection policy syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-start.html#mask-sensitive-log-data-policysyntax). @@ -111,4 +112,4 @@ Using `terraform import`, import this resource using the `log_group_name`. For e % terraform import aws_cloudwatch_log_data_protection_policy.example my-log-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_delivery.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_delivery.html.markdown index e1e2d4739bd5..d7b2e22604d2 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_delivery.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_delivery.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delivery_destination_arn` - (Required) The ARN of the delivery destination to use for this delivery. * `delivery_source_name` - (Required) The name of the delivery source to use for this delivery. * `field_delimiter` - (Optional) The field delimiter to use between record fields when the final output format of a delivery is in `plain`, `w3c`, or `raw` format. @@ -82,4 +83,4 @@ Using `terraform import`, import CloudWatch Logs Delivery using the `id`. For ex % terraform import aws_cloudwatch_log_delivery.example jsoGVi4Zq8VlYp9n ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination.html.markdown index bccdb4f55517..6fcbff8b4430 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delivery_destination_configuration` - (Required) The AWS resource that will receive the logs. * `destination_resource_arn` - (Required) The ARN of the AWS destination that this delivery destination represents. * `name` - (Required) The name for this delivery destination. @@ -80,4 +81,4 @@ Using `terraform import`, import CloudWatch Logs Delivery Destination using the % terraform import aws_cloudwatch_log_delivery_destination.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination_policy.html.markdown index 2885a38dff01..43012eb88396 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_delivery_destination_policy.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delivery_destination_name` - (Required) The name of the delivery destination to assign this policy to. * `delivery_destination_policy` - (Required) The contents of the policy. @@ -70,4 +71,4 @@ Using `terraform import`, import CloudWatch Logs Delivery Destination Policy usi % terraform import aws_cloudwatch_log_delivery_destination_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_delivery_source.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_delivery_source.html.markdown index 0309cbbf5bfc..8c952ecafa3d 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_delivery_source.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_delivery_source.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `log_type` - (Required) The type of log that the source is sending. For Amazon Bedrock, the valid value is `APPLICATION_LOGS`. For Amazon CodeWhisperer, the valid value is `EVENT_LOGS`. For IAM Identity Center, the valid value is `ERROR_LOGS`. For Amazon WorkMail, the valid values are `ACCESS_CONTROL_LOGS`, `AUTHENTICATION_LOGS`, `WORKMAIL_AVAILABILITY_PROVIDER_LOGS`, and `WORKMAIL_MAILBOX_ACCESS_LOGS`. * `name` - (Required) The name for this delivery source. * `resource_arn` - (Required) The ARN of the AWS resource that is generating and sending logs. @@ -77,4 +78,4 @@ Using `terraform import`, import CloudWatch Logs Delivery Source using the `name % terraform import aws_cloudwatch_log_delivery_source.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_destination.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_destination.html.markdown index 9e6bac69141b..2608877eb8d7 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_destination.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_destination.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the log destination. * `role_arn` - (Required) The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to put data into the target. * `target_arn` - (Required) The ARN of the target Amazon Kinesis stream resource for the destination. @@ -74,4 +75,4 @@ Using `terraform import`, import CloudWatch Logs destinations using the `name`. % terraform import aws_cloudwatch_log_destination.test_destination test_destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_destination_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_destination_policy.html.markdown index 52691bec1a88..41feb1f093f7 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_destination_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_destination_policy.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination_name` - (Required) A name for the subscription filter * `access_policy` - (Required) The policy document. This is a JSON formatted string. * `force_update` - (Optional) Specify true if you are updating an existing destination policy to grant permission to an organization ID instead of granting permission to individual AWS accounts. @@ -92,4 +93,4 @@ Using `terraform import`, import CloudWatch Logs destination policies using the % terraform import aws_cloudwatch_log_destination_policy.test_destination_policy test_destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_group.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_group.html.markdown index 35f227db2908..6409e4f5f4ca 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_group.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_group.html.markdown @@ -39,13 +39,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the log group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `skip_destroy` - (Optional) Set to true if you do not wish the log group (and any logs it may contain) to be deleted at destroy time, and instead just remove the log group from the Terraform state. -* `log_group_class` - (Optional) Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`. +* `log_group_class` - (Optional) Specified the log class of the log group. Possible values are: `STANDARD`, `INFREQUENT_ACCESS`, or `DELIVERY`. * `retention_in_days` - (Optional) Specifies the number of days you want to retain log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, 3653, and 0. - If you select 0, the events in the log group are always retained and never expire. + If you select 0, the events in the log group are always retained and never expire. If `log_group_class` is set to `DELIVERY`, this argument is ignored and `retention_in_days` is forcibly set to 2. * `kms_key_id` - (Optional) The ARN of the KMS Key to use when encrypting log data. Please note, after the AWS KMS CMK is disassociated from the log group, AWS CloudWatch Logs stops encrypting newly ingested data for the log group. All previously ingested data remains encrypted, and AWS CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested. @@ -60,6 +61,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_log_group.example + identity = { + name = "yada" + } +} + +resource "aws_cloudwatch_log_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the CloudWatch log group. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cloudwatch Log Groups using the `name`. For example: ```python @@ -74,13 +101,13 @@ from imports.aws.cloudwatch_log_group import CloudwatchLogGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - CloudwatchLogGroup.generate_config_for_import(self, "testGroup", "yada") + CloudwatchLogGroup.generate_config_for_import(self, "example", "yada") ``` Using `terraform import`, import Cloudwatch Log Groups using the `name`. For example: ```console -% terraform import aws_cloudwatch_log_group.test_group yada +% terraform import aws_cloudwatch_log_group.example yada ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_index_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_index_policy.html.markdown index fc7bf9b8aaa7..cc0617667e26 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_index_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_index_policy.html.markdown @@ -19,13 +19,13 @@ Terraform resource for managing an AWS CloudWatch Logs Index Policy. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Fn, TerraformStack +from cdktf import Fn, Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import CloudwatchLogIndexPolicy from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.cloudwatch_log_index_policy import CloudwatchLogIndexPolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -34,9 +34,10 @@ class MyConvertedCode(TerraformStack): ) aws_cloudwatch_log_index_policy_example = CloudwatchLogIndexPolicy(self, "example_1", log_group_name=example.name, - policy_document=Fn.jsonencode({ - "Fields": ["eventName"] - }) + policy_document=Token.as_string( + Fn.jsonencode({ + "Fields": ["eventName"] + })) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_cloudwatch_log_index_policy_example.override_logical_id("example") @@ -44,8 +45,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `log_group_name` - (Required) Log group name to set the policy for. * `policy_document` - (Required) JSON policy document. This is a JSON formatted string. @@ -65,7 +67,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import CloudwatchLogIndexPolicy +from imports.aws.cloudwatch_log_index_policy import CloudwatchLogIndexPolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -78,4 +80,4 @@ Using `terraform import`, import CloudWatch Logs Index Policy using the `log_gro % terraform import aws_cloudwatch_log_index_policy.example /aws/log/group/name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_metric_filter.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_metric_filter.html.markdown index 427003406301..9b2b08e548b1 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_metric_filter.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_metric_filter.html.markdown @@ -46,11 +46,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the metric filter. * `pattern` - (Required) A valid [CloudWatch Logs filter pattern](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/FilterAndPatternSyntax.html) for extracting metric data out of ingested log events. * `log_group_name` - (Required) The name of the log group to associate the metric filter with. * `metric_transformation` - (Required) A block defining collection of information needed to define how metric data gets emitted. See below. +* `apply_on_transformed_logs` - (Optional) Whether the metric filter will be applied on the transformed version of the log events instead of the original ingested log events. Defaults to `false`. Valid only for log groups that have an active log transformer. The `metric_transformation` block supports the following arguments: @@ -92,4 +94,4 @@ Using `terraform import`, import CloudWatch Log Metric Filter using the `log_gro % terraform import aws_cloudwatch_log_metric_filter.test /aws/lambda/function:test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_resource_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_resource_policy.html.markdown index c96be995cb4a..92fd61ab2acb 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_resource_policy.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_document` - (Required) Details of the resource policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string. Maximum length of 5120 characters. * `policy_name` - (Required) Name of the resource policy. @@ -125,4 +126,4 @@ Using `terraform import`, import CloudWatch log resource policies using the poli % terraform import aws_cloudwatch_log_resource_policy.MyPolicy MyPolicy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_stream.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_stream.html.markdown index c977190421e5..3d62e8bab1f6 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_stream.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_stream.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the log stream. Must not be longer than 512 characters and must not contain `:` * `log_group_name` - (Required) The name of the log group under which the log stream is to be created. @@ -74,4 +75,4 @@ Using `terraform import`, import Cloudwatch Log Stream using the stream's `log_g % terraform import aws_cloudwatch_log_stream.foo Yada:SampleLogStream1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_subscription_filter.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_subscription_filter.html.markdown index ded1a6dd3cc2..ac18cecaedae 100644 --- a/website/docs/cdktf/python/r/cloudwatch_log_subscription_filter.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_log_subscription_filter.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the subscription filter * `destination_arn` - (Required) The ARN of the destination to deliver matching log events to. Kinesis stream or Lambda function ARN. * `filter_pattern` - (Required) A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of log events. Use empty string `""` to match everything. For more information, see the [Amazon CloudWatch Logs User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). @@ -76,4 +77,4 @@ Using `terraform import`, import CloudWatch Logs subscription filter using the l % terraform import aws_cloudwatch_log_subscription_filter.test_lambdafunction_logfilter "/aws/lambda/example_lambda_name|test_lambdafunction_logfilter" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown b/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown index 94b8da8e07a9..4852bc67e521 100644 --- a/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown @@ -219,6 +219,7 @@ You must choose one or the other This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alarm_name` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account * `comparison_operator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`. Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`, `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for alarms based on anomaly detection models. * `evaluation_periods` - (Required) The number of periods over which data is compared to the specified threshold. @@ -294,6 +295,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_metric_alarm.example + identity = { + alarm_name = "alarm-12345" + } +} + +resource "aws_cloudwatch_metric_alarm" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `alarm_name` (String) Name of the CloudWatch metric alarm. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudWatch Metric Alarm using the `alarm_name`. For example: ```python @@ -308,13 +335,13 @@ from imports.aws.cloudwatch_metric_alarm import CloudwatchMetricAlarm class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - CloudwatchMetricAlarm.generate_config_for_import(self, "test", "alarm-12345") + CloudwatchMetricAlarm.generate_config_for_import(self, "example", "alarm-12345") ``` Using `terraform import`, import CloudWatch Metric Alarm using the `alarm_name`. For example: ```console -% terraform import aws_cloudwatch_metric_alarm.test alarm-12345 +% terraform import aws_cloudwatch_metric_alarm.example alarm-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_metric_stream.html.markdown b/website/docs/cdktf/python/r/cloudwatch_metric_stream.html.markdown index e2e79497c1af..eb0bce0a1df6 100644 --- a/website/docs/cdktf/python/r/cloudwatch_metric_stream.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_metric_stream.html.markdown @@ -180,6 +180,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclude_filter` - (Optional) List of exclusive metric filters. If you specify this parameter, the stream sends metrics from all metric namespaces except for the namespaces and the conditional metric names that you specify here. If you don't specify metric names or provide empty metric names whole metric namespace is excluded. Conflicts with `include_filter`. * `include_filter` - (Optional) List of inclusive metric filters. If you specify this parameter, the stream sends only the conditional metric names from the metric namespaces that you specify here. If you don't specify metric names or provide empty metric names whole metric namespace is included. Conflicts with `exclude_filter`. * `name` - (Optional, Forces new resource) Friendly name of the metric stream. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. @@ -245,4 +246,4 @@ Using `terraform import`, import CloudWatch metric streams using the `name`. For % terraform import aws_cloudwatch_metric_stream.sample sample-stream-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_query_definition.html.markdown b/website/docs/cdktf/python/r/cloudwatch_query_definition.html.markdown index 161528007090..812a750c0c38 100644 --- a/website/docs/cdktf/python/r/cloudwatch_query_definition.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_query_definition.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the query. * `query_string` - (Required) The query to save. You can read more about CloudWatch Logs Query Syntax in the [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). * `log_group_names` - (Optional) Specific log groups to use with the query. @@ -72,4 +73,4 @@ Using `terraform import`, import CloudWatch query definitions using the query de % terraform import aws_cloudwatch_query_definition.example arn:aws:logs:us-west-2:123456789012:query-definition:269951d7-6f75-496d-9d7b-6b7a5486bdbd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeartifact_domain.html.markdown b/website/docs/cdktf/python/r/codeartifact_domain.html.markdown index c22e4533708c..f51fac41e5be 100644 --- a/website/docs/cdktf/python/r/codeartifact_domain.html.markdown +++ b/website/docs/cdktf/python/r/codeartifact_domain.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The name of the domain to create. All domain names in an AWS Region that are in the same AWS account must be unique. The domain name is used as the prefix in DNS hostnames. Do not use sensitive information in a domain name because it is publicly discoverable. * `encryption_key` - (Optional) The encryption key for the domain. This is used to encrypt content stored in a domain. The KMS Key Amazon Resource Name (ARN). The default aws/codeartifact AWS KMS master key is used if this element is absent. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -54,6 +55,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_domain.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:domain/example" + } +} + +resource "aws_codeartifact_domain" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact domain. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Domain using the CodeArtifact Domain arn. For example: ```python @@ -77,4 +99,4 @@ Using `terraform import`, import CodeArtifact Domain using the CodeArtifact Doma % terraform import aws_codeartifact_domain.example arn:aws:codeartifact:us-west-2:012345678912:domain/tf-acc-test-8593714120730241305 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeartifact_domain_permissions_policy.html.markdown b/website/docs/cdktf/python/r/codeartifact_domain_permissions_policy.html.markdown index 46dd2baf0dfe..fbe3ef2abf8d 100644 --- a/website/docs/cdktf/python/r/codeartifact_domain_permissions_policy.html.markdown +++ b/website/docs/cdktf/python/r/codeartifact_domain_permissions_policy.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The name of the domain on which to set the resource policy. * `policy_document` - (Required) A JSON policy string to be set as the access control resource policy on the provided domain. * `domain_owner` - (Optional) The account number of the AWS account that owns the domain. @@ -78,6 +79,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_domain_permissions_policy.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:domain/example" + } +} + +resource "aws_codeartifact_domain_permissions_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact domain. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Domain Permissions Policies using the CodeArtifact Domain ARN. For example: ```python @@ -101,4 +123,4 @@ Using `terraform import`, import CodeArtifact Domain Permissions Policies using % terraform import aws_codeartifact_domain_permissions_policy.example arn:aws:codeartifact:us-west-2:012345678912:domain/tf-acc-test-1928056699409417367 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeartifact_repository.html.markdown b/website/docs/cdktf/python/r/codeartifact_repository.html.markdown index 91061224b3d3..51fd9fcd23f5 100644 --- a/website/docs/cdktf/python/r/codeartifact_repository.html.markdown +++ b/website/docs/cdktf/python/r/codeartifact_repository.html.markdown @@ -102,6 +102,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The domain that contains the created repository. * `repository` - (Required) The name of the repository to create. * `domain_owner` - (Optional) The account number of the AWS account that owns the domain. @@ -129,6 +130,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_repository.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:repository/example-domain/example-repo" + } +} + +resource "aws_codeartifact_repository" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact repository. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Repository using the CodeArtifact Repository ARN. For example: ```python @@ -152,4 +174,4 @@ Using `terraform import`, import CodeArtifact Repository using the CodeArtifact % terraform import aws_codeartifact_repository.example arn:aws:codeartifact:us-west-2:012345678912:repository/tf-acc-test-6968272603913957763/tf-acc-test-6968272603913957763 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeartifact_repository_permissions_policy.html.markdown b/website/docs/cdktf/python/r/codeartifact_repository_permissions_policy.html.markdown index 5e79518fefa8..d643e82bcf87 100644 --- a/website/docs/cdktf/python/r/codeartifact_repository_permissions_policy.html.markdown +++ b/website/docs/cdktf/python/r/codeartifact_repository_permissions_policy.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository` - (Required) The name of the repository to set the resource policy on. * `domain` - (Required) The name of the domain on which to set the resource policy. * `policy_document` - (Required) A JSON policy string to be set as the access control resource policy on the provided domain. @@ -89,6 +90,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_repository_permissions_policy.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:repository/example-domain/example-repo" + } +} + +resource "aws_codeartifact_repository_permissions_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact repository. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Repository Permissions Policies using the CodeArtifact Repository ARN. For example: ```python @@ -112,4 +134,4 @@ Using `terraform import`, import CodeArtifact Repository Permissions Policies us % terraform import aws_codeartifact_repository_permissions_policy.example arn:aws:codeartifact:us-west-2:012345678912:repository/tf-acc-test-6968272603913957763/tf-acc-test-6968272603913957763 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_fleet.html.markdown b/website/docs/cdktf/python/r/codebuild_fleet.html.markdown index 82ec92053adb..5eb9d4bff75a 100644 --- a/website/docs/cdktf/python/r/codebuild_fleet.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_fleet.html.markdown @@ -77,7 +77,8 @@ The following arguments are required: The following arguments are optional: -* `compute_configuration` - (Optional) The compute configuration of the compute fleet. This is only required if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. See [`compute_configuration`](#compute_configuration) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `compute_configuration` - (Optional) The compute configuration of the compute fleet. This is only required if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE` or `CUSTOM_INSTANCE_TYPE`. See [`compute_configuration`](#compute_configuration) below. * `fleet_service_role` - (Optional) The service role associated with the compute fleet. * `image_id` - (Optional) The Amazon Machine Image (AMI) of the compute fleet. * `overflow_behavior` - (Optional) Overflow behavior for compute fleet. Valid values: `ON_DEMAND`, `QUEUE`. @@ -88,9 +89,10 @@ The following arguments are optional: ### compute_configuration * `disk` - (Optional) Amount of disk space of the instance type included in the fleet. -* `machine_type` - (Optional) Machine type of the instance type included in the fleet. Valid values: `GENERAL`, `NVME`. -* `memory` - (Optional) Amount of memory of the instance type included in the fleet. -* `vcpu` - (Optional) Number of vCPUs of the instance type included in the fleet. +* `instance_type` - (Optional) EC2 instance type to be launched in the fleet. Specify only if `compute_type` is set to `CUSTOM_INSTANCE_TYPE`. See [Supported instance families](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment-reserved-capacity.instance-types). +* `machine_type` - (Optional) Machine type of the instance type included in the fleet. Valid values: `GENERAL`, `NVME`. Specify only if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. +* `memory` - (Optional) Amount of memory of the instance type included in the fleet. Specify only if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. +* `vcpu` - (Optional) Number of vCPUs of the instance type included in the fleet. Specify only if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. ### scaling_configuration @@ -124,6 +126,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_fleet.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:fleet/example-fleet" + } +} + +resource "aws_codebuild_fleet" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild fleet. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Fleet using the `name` or the `arn`. For example: ```python @@ -147,4 +170,4 @@ Using `terraform import`, import CodeBuild Fleet using the `name`. For example: % terraform import aws_codebuild_fleet.name fleet-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_project.html.markdown b/website/docs/cdktf/python/r/codebuild_project.html.markdown index 0f1669782355..a8a01f96d7e2 100644 --- a/website/docs/cdktf/python/r/codebuild_project.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_project.html.markdown @@ -16,6 +16,8 @@ source (e.g., the "rebuild every time a code change is pushed" option in the Cod ## Example Usage +### Basic Usage + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -224,6 +226,11 @@ class MyConvertedCode(TerraformStack): ) ``` +### Runner Project + +While no special configuration is required for `aws_codebuild_project` to create a project as a Runner Project, an `aws_codebuild_webhook` resource with an appropriate `filter_group` is required. +See the [`aws_codebuild_webhook` resource documentation example](/docs/providers/aws/r/codebuild_webhook.html#for-codebuild-runner-project) for more details. + ## Argument Reference The following arguments are required: @@ -237,6 +244,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `badge_enabled` - (Optional) Generates a publicly-accessible URL for the projects build badge. Available as `badge_url` attribute when enabled. * `build_batch_config` - (Optional) Defines the batch build options for the project. @@ -324,6 +332,7 @@ The following arguments are optional: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE`, `BUILD_GENERAL1_XLARGE`, `BUILD_GENERAL1_2XLARGE`, `BUILD_LAMBDA_1GB`, `BUILD_LAMBDA_2GB`, `BUILD_LAMBDA_4GB`, `BUILD_LAMBDA_8GB`, `BUILD_LAMBDA_10GB`. For additional information, see the [CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). +* `docker_server` - (Optional) Configuration block. Detailed below. * `fleet` - (Optional) Configuration block. Detailed below. * `environment_variable` - (Optional) Configuration block. Detailed below. * `image_pull_credentials_type` - (Optional) Type of credentials AWS CodeBuild uses to pull images in your build. Valid @@ -342,6 +351,11 @@ The following arguments are optional: `LINUX_LAMBDA_CONTAINER`, `ARM_LAMBDA_CONTAINER`, `LINUX_EC2`, `ARM_EC2`, `WINDOWS_EC2`, `MAC_ARM`. For additional information, see the [CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). +#### environment: docker_server + +* `compute_type` - (Required) Compute type for the Docker server. Valid values: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE`, `BUILD_GENERAL1_XLARGE`, and `BUILD_GENERAL1_2XLARGE`. +* `security_group_ids` - (Optional) List of security group IDs to assign to the Docker server. + #### environment: fleet * `fleet_arn` - (Optional) Compute fleet ARN for the build project. @@ -541,6 +555,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_project.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:project/project-name" + } +} + +resource "aws_codebuild_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Project using the `name`. For example: @@ -565,4 +600,4 @@ Using `terraform import`, import CodeBuild Project using the `name`. For example % terraform import aws_codebuild_project.name project-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_report_group.html.markdown b/website/docs/cdktf/python/r/codebuild_report_group.html.markdown index 524d9a8cc25a..56ea47acd464 100644 --- a/website/docs/cdktf/python/r/codebuild_report_group.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_report_group.html.markdown @@ -79,6 +79,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of a Report Group. * `type` - (Required) The type of the Report Group. Valid value are `TEST` and `CODE_COVERAGE`. * `export_config` - (Required) Information about the destination where the raw data of this Report Group is exported. see [Export Config](#export-config) documented below. @@ -110,6 +111,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_report_group.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:report-group/report-group-name" + } +} + +resource "aws_codebuild_report_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild report group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Report Group using the CodeBuild Report Group arn. For example: ```python @@ -133,4 +155,4 @@ Using `terraform import`, import CodeBuild Report Group using the CodeBuild Repo % terraform import aws_codebuild_report_group.example arn:aws:codebuild:us-west-2:123456789:report-group/report-group-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_resource_policy.html.markdown b/website/docs/cdktf/python/r/codebuild_resource_policy.html.markdown index 2c16a7a93413..21fe0fe520f5 100644 --- a/website/docs/cdktf/python/r/codebuild_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_resource_policy.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) The ARN of the Project or ReportGroup resource you want to associate with a resource policy. * `policy` - (Required) A JSON-formatted resource policy. For more information, see [Sharing a Projec](https://docs.aws.amazon.com/codebuild/latest/userguide/project-sharing.html#project-sharing-share) and [Sharing a Report Group](https://docs.aws.amazon.com/codebuild/latest/userguide/report-groups-sharing.html#report-groups-sharing-share). @@ -78,6 +79,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_resource_policy.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:report-group/report-group-name" + } +} + +resource "aws_codebuild_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild resource. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Resource Policy using the CodeBuild Resource Policy arn. For example: ```python @@ -101,4 +123,4 @@ Using `terraform import`, import CodeBuild Resource Policy using the CodeBuild R % terraform import aws_codebuild_resource_policy.example arn:aws:codebuild:us-west-2:123456789:report-group/report-group-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_source_credential.html.markdown b/website/docs/cdktf/python/r/codebuild_source_credential.html.markdown index a74b800d5e29..672dccf7b4a9 100644 --- a/website/docs/cdktf/python/r/codebuild_source_credential.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_source_credential.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auth_type` - (Required) The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. Valid values are `BASIC_AUTH`, `PERSONAL_ACCESS_TOKEN`, `CODECONNECTIONS`, and `SECRETS_MANAGER`. An OAUTH connection is not supported by the API. @@ -104,6 +105,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_source_credential.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:token/github" + } +} + +resource "aws_codebuild_source_credential" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild source credential. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Source Credential using the CodeBuild Source Credential arn. For example: @@ -128,4 +150,4 @@ Using `terraform import`, import CodeBuild Source Credential using the CodeBuild % terraform import aws_codebuild_source_credential.example arn:aws:codebuild:us-west-2:123456789:token:github ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_webhook.html.markdown b/website/docs/cdktf/python/r/codebuild_webhook.html.markdown index 81a8e35d4066..70cd79507e15 100644 --- a/website/docs/cdktf/python/r/codebuild_webhook.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_webhook.html.markdown @@ -92,33 +92,71 @@ class MyConvertedCode(TerraformStack): github_repository_webhook_example.override_logical_id("example") ``` +### For CodeBuild Runner Project + +To create a CodeBuild project as a Runner Project, the following `aws_codebuild_webhook` resource is required for the project. +See thr [AWS Documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/action-runner.html) for more information about CodeBuild Runner Projects. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.codebuild_webhook import CodebuildWebhook +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CodebuildWebhook(self, "example", + build_type="BUILD", + filter_group=[CodebuildWebhookFilterGroup( + filter=[CodebuildWebhookFilterGroupFilter( + pattern="WORKFLOW_JOB_QUEUED", + type="EVENT" + ) + ] + ) + ], + project_name=Token.as_string(aws_codebuild_project_example.name) + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `project_name` - (Required) The name of the build project. * `build_type` - (Optional) The type of build this webhook will trigger. Valid values for this parameter are: `BUILD`, `BUILD_BATCH`. * `manual_creation` - (Optional) If true, CodeBuild doesn't create a webhook in GitHub and instead returns `payload_url` and `secret` values for the webhook. The `payload_url` and `secret` values in the output can be used to manually create a webhook within GitHub. * `branch_filter` - (Optional) A regular expression used to determine which branches get built. Default is all branches are built. We recommend using `filter_group` over `branch_filter`. -* `filter_group` - (Optional) Information about the webhook's trigger. Filter group blocks are documented below. -* `scope_configuration` - (Optional) Scope configuration for global or organization webhooks. Scope configuration blocks are documented below. +* `filter_group` - (Optional) Information about the webhook's trigger. See [filter_group](#filter_group) for details. +* `scope_configuration` - (Optional) Scope configuration for global or organization webhooks. See [scope_configuration](#scope_configuration) for details. +* `pull_request_build_policy` - (Optional) Defines comment-based approval requirements for triggering builds on pull requests. See [pull_request_build_policy](#pull_request_build_policy) for details. -`filter_group` supports the following: +### filter_group -* `filter` - (Required) A webhook filter for the group. Filter blocks are documented below. +* `filter` - (Required) A webhook filter for the group. See [filter](#filter) for details. -`filter` supports the following: +### filter * `type` - (Required) The webhook filter group's type. Valid values for this parameter are: `EVENT`, `BASE_REF`, `HEAD_REF`, `ACTOR_ACCOUNT_ID`, `FILE_PATH`, `COMMIT_MESSAGE`, `WORKFLOW_NAME`, `TAG_NAME`, `RELEASE_NAME`. At least one filter group must specify `EVENT` as its type. * `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED`, `WORKFLOW_JOB_QUEUED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. * `exclude_matched_pattern` - (Optional) If set to `true`, the specified filter does *not* trigger a build. Defaults to `false`. -`scope_configuration` supports the following: +### scope_configuration * `name` - (Required) The name of either the enterprise or organization. * `scope` - (Required) The type of scope for a GitHub webhook. Valid values for this parameter are: `GITHUB_ORGANIZATION`, `GITHUB_GLOBAL`. * `domain` - (Optional) The domain of the GitHub Enterprise organization. Required if your project's source type is GITHUB_ENTERPRISE. +### pull_request_build_policy + +* `requires_comment_approval` - (Required) Specifies when comment-based approval is required before triggering a build on pull requests. Valid values are: `DISABLED`, `ALL_PULL_REQUESTS`, and `FORK_PULL_REQUESTS`. +* `approver_roles` - (Optional) List of repository roles that have approval privileges for pull request builds when comment approval is required. This argument must be specified only when `requires_comment_approval` is not `DISABLED`. See the [AWS documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/pull-request-build-policy.html#pull-request-build-policy.configuration) for valid values and defaults. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -155,4 +193,4 @@ Using `terraform import`, import CodeBuild Webhooks using the CodeBuild Project % terraform import aws_codebuild_webhook.example MyProjectName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecatalyst_dev_environment.html.markdown b/website/docs/cdktf/python/r/codecatalyst_dev_environment.html.markdown index 9e671f0e9f44..804d6dd999de 100644 --- a/website/docs/cdktf/python/r/codecatalyst_dev_environment.html.markdown +++ b/website/docs/cdktf/python/r/codecatalyst_dev_environment.html.markdown @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `inactivity_timeout_minutes` - (Optional) The amount of time the Dev Environment will run without any activity detected before stopping, in minutes. Only whole integers are allowed. Dev Environments consume compute minutes when running. * `repositories` - (Optional) The source repository that contains the branch to clone into the Dev Environment. @@ -89,4 +90,4 @@ This resource exports the following attributes in addition to the arguments abov - `update` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecatalyst_project.html.markdown b/website/docs/cdktf/python/r/codecatalyst_project.html.markdown index e0fd642e07b2..fb97ecc57b7a 100644 --- a/website/docs/cdktf/python/r/codecatalyst_project.html.markdown +++ b/website/docs/cdktf/python/r/codecatalyst_project.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the project. This description will be displayed to all users of the project. We recommend providing a brief description of the project and its intended purpose. ## Attribute Reference @@ -86,4 +87,4 @@ Using `terraform import`, import CodeCatalyst Project using the `id`. For exampl % terraform import aws_codecatalyst_project.example project-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecatalyst_source_repository.html.markdown b/website/docs/cdktf/python/r/codecatalyst_source_repository.html.markdown index 90f6443e2264..3e39b94b8e73 100644 --- a/website/docs/cdktf/python/r/codecatalyst_source_repository.html.markdown +++ b/website/docs/cdktf/python/r/codecatalyst_source_repository.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the project. This description will be displayed to all users of the project. We recommend providing a brief description of the project and its intended purpose. ## Attribute Reference @@ -86,4 +87,4 @@ Using `terraform import`, import CodeCatalyst Source Repository using the `id`. % terraform import aws_codecatalyst_source_repository.example example-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecommit_approval_rule_template.html.markdown b/website/docs/cdktf/python/r/codecommit_approval_rule_template.html.markdown index 78572d8fbe94..d00c7ffa4da8 100644 --- a/website/docs/cdktf/python/r/codecommit_approval_rule_template.html.markdown +++ b/website/docs/cdktf/python/r/codecommit_approval_rule_template.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Required) The content of the approval rule template. Maximum of 3000 characters. * `name` - (Required) The name for the approval rule template. Maximum of 100 characters. * `description` - (Optional) The description of the approval rule template. Maximum of 1000 characters. @@ -87,4 +88,4 @@ Using `terraform import`, import CodeCommit approval rule templates using the `n % terraform import aws_codecommit_approval_rule_template.imported ExistingApprovalRuleTemplateName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecommit_approval_rule_template_association.html.markdown b/website/docs/cdktf/python/r/codecommit_approval_rule_template_association.html.markdown index d3b8da4db125..9c00e790116d 100644 --- a/website/docs/cdktf/python/r/codecommit_approval_rule_template_association.html.markdown +++ b/website/docs/cdktf/python/r/codecommit_approval_rule_template_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `approval_rule_template_name` - (Required) The name for the approval rule template. * `repository_name` - (Required) The name of the repository that you want to associate with the template. @@ -70,4 +71,4 @@ Using `terraform import`, import CodeCommit approval rule template associations % terraform import aws_codecommit_approval_rule_template_association.example approver-rule-for-example,MyExampleRepo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecommit_repository.html.markdown b/website/docs/cdktf/python/r/codecommit_repository.html.markdown index 5f8d7471fe58..da8d0edf7d33 100644 --- a/website/docs/cdktf/python/r/codecommit_repository.html.markdown +++ b/website/docs/cdktf/python/r/codecommit_repository.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository_name` - (Required) The name for the repository. This needs to be less than 100 characters. * `description` - (Optional) The description of the repository. This needs to be less than 1000 characters * `default_branch` - (Optional) The default branch of the repository. The branch specified here needs to exist. @@ -105,4 +106,4 @@ Using `terraform import`, import CodeCommit repository using repository name. Fo % terraform import aws_codecommit_repository.imported ExistingRepo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codecommit_trigger.html.markdown b/website/docs/cdktf/python/r/codecommit_trigger.html.markdown index 6e4188ed374d..e0b23fbe054a 100644 --- a/website/docs/cdktf/python/r/codecommit_trigger.html.markdown +++ b/website/docs/cdktf/python/r/codecommit_trigger.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository_name` - (Required) The name for the repository. This needs to be less than 100 characters. * `trigger` - (Required) The name of the trigger. * `name` - (Required) The name of the trigger. @@ -63,4 +64,4 @@ This resource exports the following attributes in addition to the arguments abov * `configuration_id` - System-generated unique identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeconnections_connection.html.markdown b/website/docs/cdktf/python/r/codeconnections_connection.html.markdown index f761c1bac3e6..da7251c3fb4c 100644 --- a/website/docs/cdktf/python/r/codeconnections_connection.html.markdown +++ b/website/docs/cdktf/python/r/codeconnections_connection.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the connection to be created. The name must be unique in the calling AWS account. Changing `name` will create a new resource. * `provider_type` - (Optional) The name of the external provider where your third-party code repository is configured. Changing `provider_type` will create a new resource. Conflicts with `host_arn`. * `host_arn` - (Optional) The Amazon Resource Name (ARN) of the host associated with the connection. Conflicts with `provider_type` @@ -49,13 +50,34 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The codeconnections connection ARN. * `arn` - The codeconnections connection ARN. * `connection_status` - The codeconnections connection status. Possible values are `PENDING`, `AVAILABLE` and `ERROR`. +* `id` - (**Deprecated**) The codeconnections connection ARN. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeconnections_connection.example + identity = { + "arn" = "arn:aws:codeconnections:us-west-2:123456789012:connection/example-connection-id" + } +} + +resource "aws_codeconnections_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeConnections connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeConnections connection using the ARN. For example: ```python @@ -79,4 +101,4 @@ Using `terraform import`, import CodeConnections connection using the ARN. For e % terraform import aws_codeconnections_connection.test-connection arn:aws:codeconnections:us-west-1:0123456789:connection/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeconnections_host.html.markdown b/website/docs/cdktf/python/r/codeconnections_host.html.markdown index f412201d32c2..bdf3cb8a1183 100644 --- a/website/docs/cdktf/python/r/codeconnections_host.html.markdown +++ b/website/docs/cdktf/python/r/codeconnections_host.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the host to be created. The name must be unique in the calling AWS account. * `provider_endpoint` - (Required) The endpoint of the infrastructure to be represented by the host after it is created. * `provider_type` - (Required) The name of the external provider where your third-party code repository is configured. @@ -57,12 +58,33 @@ A `vpc_configuration` block supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The CodeConnections Host ARN. * `arn` - The CodeConnections Host ARN. +* `id` - (**Deprecated**) The CodeConnections Host ARN. * `status` - The CodeConnections Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeconnections_host.example + identity = { + "arn" = "arn:aws:codeconnections:us-west-2:123456789012:host/example-host-id" + } +} + +resource "aws_codeconnections_host" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeConnections host. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeConnections Host using the ARN. For example: ```python @@ -86,4 +108,4 @@ Using `terraform import`, import CodeConnections Host using the ARN. For example % terraform import aws_codeconnections_host.example-host arn:aws:codeconnections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codedeploy_app.html.markdown b/website/docs/cdktf/python/r/codedeploy_app.html.markdown index c87845367884..2518b6f3b758 100644 --- a/website/docs/cdktf/python/r/codedeploy_app.html.markdown +++ b/website/docs/cdktf/python/r/codedeploy_app.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the application. * `compute_platform` - (Optional) The compute platform can either be `ECS`, `Lambda`, or `Server`. Default is `Server`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -119,4 +120,4 @@ Using `terraform import`, import CodeDeploy Applications using the `name`. For e % terraform import aws_codedeploy_app.example my-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codedeploy_deployment_config.html.markdown b/website/docs/cdktf/python/r/codedeploy_deployment_config.html.markdown index 55e554c4ea9a..ba7186342f4d 100644 --- a/website/docs/cdktf/python/r/codedeploy_deployment_config.html.markdown +++ b/website/docs/cdktf/python/r/codedeploy_deployment_config.html.markdown @@ -114,6 +114,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deployment_config_name` - (Required) The name of the deployment config. * `compute_platform` - (Optional) The compute platform can be `Server`, `Lambda`, or `ECS`. Default is `Server`. * `minimum_healthy_hosts` - (Optional) A minimum_healthy_hosts block. Required for `Server` compute platform. Minimum Healthy Hosts are documented below. @@ -188,4 +189,4 @@ Using `terraform import`, import CodeDeploy Deployment Configurations using the % terraform import aws_codedeploy_deployment_config.example my-deployment-config ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codedeploy_deployment_group.html.markdown b/website/docs/cdktf/python/r/codedeploy_deployment_group.html.markdown index bceea40dfd5b..9f3c5009e11f 100644 --- a/website/docs/cdktf/python/r/codedeploy_deployment_group.html.markdown +++ b/website/docs/cdktf/python/r/codedeploy_deployment_group.html.markdown @@ -216,6 +216,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_name` - (Required) The name of the application. * `deployment_group_name` - (Required) The name of the deployment group. * `service_role_arn` - (Required) The service role ARN that allows deployments. @@ -415,4 +416,4 @@ Using `terraform import`, import CodeDeploy Deployment Groups using `app_name`, [1]: http://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-sns-event-notifications-create-trigger.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codeguruprofiler_profiling_group.html.markdown b/website/docs/cdktf/python/r/codeguruprofiler_profiling_group.html.markdown index e03de65b03d6..b7e8decc3393 100644 --- a/website/docs/cdktf/python/r/codeguruprofiler_profiling_group.html.markdown +++ b/website/docs/cdktf/python/r/codeguruprofiler_profiling_group.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `compute_platform` - (Optional) Compute platform of the profiling group. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,4 +87,4 @@ Using `terraform import`, import CodeGuru Profiler Profiling Group using the `id % terraform import aws_codeguruprofiler_profiling_group.example profiling_group-name-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codegurureviewer_repository_association.html.markdown b/website/docs/cdktf/python/r/codegurureviewer_repository_association.html.markdown index 7d3209263039..85e591733eaf 100644 --- a/website/docs/cdktf/python/r/codegurureviewer_repository_association.html.markdown +++ b/website/docs/cdktf/python/r/codegurureviewer_repository_association.html.markdown @@ -62,6 +62,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kms_key_details` - (Optional) An object describing the KMS key to asssociate. Block is documented below. ## repository @@ -118,4 +119,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codepipeline.html.markdown b/website/docs/cdktf/python/r/codepipeline.html.markdown index c8a1a269802e..d1183d2ff503 100644 --- a/website/docs/cdktf/python/r/codepipeline.html.markdown +++ b/website/docs/cdktf/python/r/codepipeline.html.markdown @@ -161,18 +161,19 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the pipeline. * `pipeline_type` - (Optional) Type of the pipeline. Possible values are: `V1` and `V2`. Default value is `V1`. * `role_arn` - (Required) A service role Amazon Resource Name (ARN) that grants AWS CodePipeline permission to make calls to AWS services on your behalf. * `artifact_store` (Required) One or more artifact_store blocks. Artifact stores are documented below. * `execution_mode` (Optional) The method that the pipeline will use to handle multiple executions. The default mode is `SUPERSEDED`. For value values, refer to the [AWS documentation](https://docs.aws.amazon.com/codepipeline/latest/APIReference/API_PipelineDeclaration.html#CodePipeline-Type-PipelineDeclaration-executionMode). - - **Note:** `QUEUED` or `PARALLEL` mode can only be used with V2 pipelines. * `stage` (Minimum of at least two `stage` blocks is required) A stage block. Stages are documented below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `trigger` - (Optional) A trigger block. Valid only when `pipeline_type` is `V2`. Triggers are documented below. * `variable` - (Optional) A pipeline-level variable block. Valid only when `pipeline_type` is `V2`. Variable are documented below. +**Note:** `QUEUED` or `PARALLEL` mode can only be used with V2 pipelines. + ### `artifact_store` An `artifact_store` block supports the following arguments: @@ -367,4 +368,4 @@ Using `terraform import`, import CodePipelines using the `name`. For example: % terraform import aws_codepipeline.example example-pipeline ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codepipeline_custom_action_type.html.markdown b/website/docs/cdktf/python/r/codepipeline_custom_action_type.html.markdown index 6f28ae56bf1a..9877dc45579b 100644 --- a/website/docs/cdktf/python/r/codepipeline_custom_action_type.html.markdown +++ b/website/docs/cdktf/python/r/codepipeline_custom_action_type.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `category` - (Required) The category of the custom action. Valid values: `Source`, `Build`, `Deploy`, `Test`, `Invoke`, `Approval` * `configuration_property` - (Optional) The configuration properties for the custom action. Max 10 items. @@ -119,4 +120,4 @@ Using `terraform import`, import CodeDeploy CustomActionType using the `id`. For % terraform import aws_codepipeline_custom_action_type.example Build:terraform:1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codepipeline_webhook.html.markdown b/website/docs/cdktf/python/r/codepipeline_webhook.html.markdown index 45d3cf2dbbfb..3b86459c6baa 100644 --- a/website/docs/cdktf/python/r/codepipeline_webhook.html.markdown +++ b/website/docs/cdktf/python/r/codepipeline_webhook.html.markdown @@ -112,6 +112,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the webhook. * `authentication` - (Required) The type of authentication to use. One of `IP`, `GITHUB_HMAC`, or `UNAUTHENTICATED`. * `authentication_configuration` - (Optional) An `auth` block. Required for `IP` and `GITHUB_HMAC`. Auth blocks are documented below. @@ -141,6 +142,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codepipeline_webhook.example + identity = { + "arn" = "arn:aws:codepipeline:us-west-2:123456789012:webhook:example-webhook" + } +} + +resource "aws_codepipeline_webhook" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodePipeline webhook. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodePipeline Webhooks using their ARN. For example: ```python @@ -164,4 +186,4 @@ Using `terraform import`, import CodePipeline Webhooks using their ARN. For exam % terraform import aws_codepipeline_webhook.example arn:aws:codepipeline:us-west-2:123456789012:webhook:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codestarconnections_connection.html.markdown b/website/docs/cdktf/python/r/codestarconnections_connection.html.markdown index 57a60e872d3e..b3b70ced7b30 100644 --- a/website/docs/cdktf/python/r/codestarconnections_connection.html.markdown +++ b/website/docs/cdktf/python/r/codestarconnections_connection.html.markdown @@ -88,6 +88,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the connection to be created. The name must be unique in the calling AWS account. Changing `name` will create a new resource. * `provider_type` - (Optional) The name of the external provider where your third-party code repository is configured. Valid values are `Bitbucket`, `GitHub`, `GitHubEnterpriseServer`, `GitLab` or `GitLabSelfManaged`. Changing `provider_type` will create a new resource. Conflicts with `host_arn` * `host_arn` - (Optional) The Amazon Resource Name (ARN) of the host associated with the connection. Conflicts with `provider_type` @@ -104,6 +105,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarconnections_connection.example + identity = { + "arn" = "arn:aws:codestar-connections:us-west-2:123456789012:connection/example-connection-id" + } +} + +resource "aws_codestarconnections_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar connections using the ARN. For example: ```python @@ -127,4 +149,4 @@ Using `terraform import`, import CodeStar connections using the ARN. For example % terraform import aws_codestarconnections_connection.test-connection arn:aws:codestar-connections:us-west-1:0123456789:connection/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codestarconnections_host.html.markdown b/website/docs/cdktf/python/r/codestarconnections_host.html.markdown index 09f740c51021..caf1f7b0f40d 100644 --- a/website/docs/cdktf/python/r/codestarconnections_host.html.markdown +++ b/website/docs/cdktf/python/r/codestarconnections_host.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the host to be created. The name must be unique in the calling AWS account. * `provider_endpoint` - (Required) The endpoint of the infrastructure to be represented by the host after it is created. * `provider_type` - (Required) The name of the external provider where your third-party code repository is configured. @@ -61,6 +62,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarconnections_host.example + identity = { + "arn" = "arn:aws:codestar-connections:us-west-2:123456789012:host/example-host-id" + } +} + +resource "aws_codestarconnections_host" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar connections host. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar Host using the ARN. For example: ```python @@ -84,4 +106,4 @@ Using `terraform import`, import CodeStar Host using the ARN. For example: % terraform import aws_codestarconnections_host.example-host arn:aws:codestar-connections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codestarnotifications_notification_rule.html.markdown b/website/docs/cdktf/python/r/codestarnotifications_notification_rule.html.markdown index bf65f4735075..bbd9a74030a2 100644 --- a/website/docs/cdktf/python/r/codestarnotifications_notification_rule.html.markdown +++ b/website/docs/cdktf/python/r/codestarnotifications_notification_rule.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detail_type` - (Required) The level of detail to include in the notifications for this resource. Possible values are `BASIC` and `FULL`. * `event_type_ids` - (Required) A list of event types associated with this notification rule. For list of allowed events see [here](https://docs.aws.amazon.com/codestar-notifications/latest/userguide/concepts.html#concepts-api). @@ -92,6 +93,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarnotifications_notification_rule.example + identity = { + "arn" = "arn:aws:codestar-notifications:us-west-2:123456789012:notificationrule/dc82df7a-9435-44d4-a696-78f67EXAMPLE" + } +} + +resource "aws_codestarnotifications_notification_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar notification rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar notification rule using the ARN. For example: ```python @@ -115,4 +137,4 @@ Using `terraform import`, import CodeStar notification rule using the ARN. For e % terraform import aws_codestarnotifications_notification_rule.foo arn:aws:codestar-notifications:us-west-1:0123456789:notificationrule/2cdc68a3-8f7c-4893-b6a5-45b362bd4f2b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_identity_pool.html.markdown b/website/docs/cdktf/python/r/cognito_identity_pool.html.markdown index 32503b283487..69887be2e57a 100644 --- a/website/docs/cdktf/python/r/cognito_identity_pool.html.markdown +++ b/website/docs/cdktf/python/r/cognito_identity_pool.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_pool_name` (Required) - The Cognito Identity Pool name. * `allow_unauthenticated_identities` (Required) - Whether the identity pool supports unauthenticated logins or not. * `allow_classic_flow` (Optional) - Enables or disables the classic / basic authentication flow. Default is `false`. @@ -109,4 +110,4 @@ Using `terraform import`, import Cognito Identity Pool using its ID. For example % terraform import aws_cognito_identity_pool.mypool us-west-2:1a234567-8901-234b-5cde-f6789g01h2i3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_identity_pool_provider_principal_tag.html.markdown b/website/docs/cdktf/python/r/cognito_identity_pool_provider_principal_tag.html.markdown index c0eac9f43e5b..d5afe67ec741 100644 --- a/website/docs/cdktf/python/r/cognito_identity_pool_provider_principal_tag.html.markdown +++ b/website/docs/cdktf/python/r/cognito_identity_pool_provider_principal_tag.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_pool_id` (Required) - An identity pool ID. * `identity_provider_name` (Required) - The name of the identity provider. * `principal_tags`: (Optional: []) - String to string map of variables. @@ -104,4 +105,4 @@ Using `terraform import`, import Cognito Identity Pool Roles Attachment using th % terraform import aws_cognito_identity_pool_provider_principal_tag.example us-west-2_abc123:CorpAD ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_identity_pool_roles_attachment.html.markdown b/website/docs/cdktf/python/r/cognito_identity_pool_roles_attachment.html.markdown index b8fcfba9dc04..af41dc4a721d 100644 --- a/website/docs/cdktf/python/r/cognito_identity_pool_roles_attachment.html.markdown +++ b/website/docs/cdktf/python/r/cognito_identity_pool_roles_attachment.html.markdown @@ -109,6 +109,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_pool_id` (Required) - An identity pool ID in the format `REGION_GUID`. * `role_mapping` (Optional) - A List of [Role Mapping](#role-mappings). * `roles` (Required) - The map of roles associated with this pool. For a given role, the key will be either "authenticated" or "unauthenticated" and the value will be the Role ARN. @@ -158,4 +159,4 @@ Using `terraform import`, import Cognito Identity Pool Roles Attachment using th % terraform import aws_cognito_identity_pool_roles_attachment.example us-west-2:b64805ad-cb56-40ba-9ffc-f5d8207e6d42 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_identity_provider.html.markdown b/website/docs/cdktf/python/r/cognito_identity_provider.html.markdown index c9fa696d76ce..857225fcef67 100644 --- a/website/docs/cdktf/python/r/cognito_identity_provider.html.markdown +++ b/website/docs/cdktf/python/r/cognito_identity_provider.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` (Required) - The user pool id * `provider_name` (Required) - The provider name * `provider_type` (Required) - The provider type. [See AWS API for valid values](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateIdentityProvider.html#CognitoUserPools-CreateIdentityProvider-request-ProviderType) @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_cognito_identity_provider` resources using % terraform import aws_cognito_identity_provider.example us-west-2_abc123:CorpAD ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_log_delivery_configuration.html.markdown b/website/docs/cdktf/python/r/cognito_log_delivery_configuration.html.markdown new file mode 100644 index 000000000000..d793b7008552 --- /dev/null +++ b/website/docs/cdktf/python/r/cognito_log_delivery_configuration.html.markdown @@ -0,0 +1,298 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_log_delivery_configuration" +description: |- + Manages an AWS Cognito IDP (Identity Provider) Log Delivery Configuration. +--- + + + +# Resource: aws_cognito_log_delivery_configuration + +Manages an AWS Cognito IDP (Identity Provider) Log Delivery Configuration. + +## Example Usage + +### Basic Usage with CloudWatch Logs + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.cognito_log_delivery_configuration import CognitoLogDeliveryConfiguration +from imports.aws.cognito_user_pool import CognitoUserPool +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CloudwatchLogGroup(self, "example", + name="example" + ) + aws_cognito_user_pool_example = CognitoUserPool(self, "example_1", + name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cognito_user_pool_example.override_logical_id("example") + aws_cognito_log_delivery_configuration_example = + CognitoLogDeliveryConfiguration(self, "example_2", + log_configurations=[CognitoLogDeliveryConfigurationLogConfigurations( + cloud_watch_logs_configuration=[CognitoLogDeliveryConfigurationLogConfigurationsCloudWatchLogsConfiguration( + log_group_arn=example.arn + ) + ], + event_source="userNotification", + log_level="ERROR" + ) + ], + user_pool_id=Token.as_string(aws_cognito_user_pool_example.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cognito_log_delivery_configuration_example.override_logical_id("example") +``` + +### Multiple Log Configurations with Different Destinations + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.cognito_log_delivery_configuration import CognitoLogDeliveryConfiguration +from imports.aws.cognito_user_pool import CognitoUserPool +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy import IamRolePolicy +from imports.aws.kinesis_firehose_delivery_stream import KinesisFirehoseDeliveryStream +from imports.aws.s3_bucket import S3Bucket +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CloudwatchLogGroup(self, "example", + name="example" + ) + aws_cognito_user_pool_example = CognitoUserPool(self, "example_1", + name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cognito_user_pool_example.override_logical_id("example") + firehose = IamRole(self, "firehose", + assume_role_policy=Token.as_string( + Fn.jsonencode({ + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "firehose.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + })), + name="firehose-role" + ) + aws_s3_bucket_example = S3Bucket(self, "example_3", + bucket="example-bucket", + force_destroy=True + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_example.override_logical_id("example") + aws_iam_role_policy_firehose = IamRolePolicy(self, "firehose_4", + name="firehose-policy", + policy=Token.as_string( + Fn.jsonencode({ + "Statement": [{ + "Action": ["s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [aws_s3_bucket_example.arn, "${" + aws_s3_bucket_example.arn + "}/*" + ] + } + ], + "Version": "2012-10-17" + })), + role=firehose.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_firehose.override_logical_id("firehose") + aws_kinesis_firehose_delivery_stream_example = + KinesisFirehoseDeliveryStream(self, "example_5", + destination="extended_s3", + extended_s3_configuration=KinesisFirehoseDeliveryStreamExtendedS3Configuration( + bucket_arn=Token.as_string(aws_s3_bucket_example.arn), + role_arn=firehose.arn + ), + name="example-stream" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_kinesis_firehose_delivery_stream_example.override_logical_id("example") + aws_cognito_log_delivery_configuration_example = + CognitoLogDeliveryConfiguration(self, "example_6", + log_configurations=[CognitoLogDeliveryConfigurationLogConfigurations( + cloud_watch_logs_configuration=[CognitoLogDeliveryConfigurationLogConfigurationsCloudWatchLogsConfiguration( + log_group_arn=example.arn + ) + ], + event_source="userNotification", + log_level="INFO" + ), CognitoLogDeliveryConfigurationLogConfigurations( + event_source="userAuthEvents", + firehose_configuration=[CognitoLogDeliveryConfigurationLogConfigurationsFirehoseConfiguration( + stream_arn=Token.as_string(aws_kinesis_firehose_delivery_stream_example.arn) + ) + ], + log_level="ERROR" + ) + ], + user_pool_id=Token.as_string(aws_cognito_user_pool_example.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cognito_log_delivery_configuration_example.override_logical_id("example") +``` + +### S3 Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_log_delivery_configuration import CognitoLogDeliveryConfiguration +from imports.aws.cognito_user_pool import CognitoUserPool +from imports.aws.s3_bucket import S3Bucket +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CognitoUserPool(self, "example", + name="example" + ) + aws_s3_bucket_example = S3Bucket(self, "example_1", + bucket="example-bucket", + force_destroy=True + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_example.override_logical_id("example") + aws_cognito_log_delivery_configuration_example = + CognitoLogDeliveryConfiguration(self, "example_2", + log_configurations=[CognitoLogDeliveryConfigurationLogConfigurations( + event_source="userNotification", + log_level="ERROR", + s3_configuration=[CognitoLogDeliveryConfigurationLogConfigurationsS3Configuration( + bucket_arn=Token.as_string(aws_s3_bucket_example.arn) + ) + ] + ) + ], + user_pool_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cognito_log_delivery_configuration_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `user_pool_id` - (Required) The ID of the user pool for which to configure log delivery. + +The following arguments are optional: + +* `log_configurations` - (Optional) Configuration block for log delivery. At least one configuration block is required. See [Log Configurations](#log-configurations) below. +* `region` - (Optional) The AWS region. + +### Log Configurations + +The `log_configurations` block supports the following: + +* `event_source` - (Required) The event source to configure logging for. Valid values are `userNotification` and `userAuthEvents`. +* `log_level` - (Required) The log level to set for the event source. Valid values are `ERROR` and `INFO`. +* `cloud_watch_logs_configuration` - (Optional) Configuration for CloudWatch Logs delivery. See [CloudWatch Logs Configuration](#cloudwatch-logs-configuration) below. +* `firehose_configuration` - (Optional) Configuration for Kinesis Data Firehose delivery. See [Firehose Configuration](#firehose-configuration) below. +* `s3_configuration` - (Optional) Configuration for S3 delivery. See [S3 Configuration](#s3-configuration) below. + +~> **Note:** At least one destination configuration (`cloud_watch_logs_configuration`, `firehose_configuration`, or `s3_configuration`) must be specified for each log configuration. + +#### CloudWatch Logs Configuration + +The `cloud_watch_logs_configuration` block supports the following: + +* `log_group_arn` - (Optional) The ARN of the CloudWatch Logs log group to which the logs should be delivered. + +#### Firehose Configuration + +The `firehose_configuration` block supports the following: + +* `stream_arn` - (Optional) The ARN of the Kinesis Data Firehose delivery stream to which the logs should be delivered. + +#### S3 Configuration + +The `s3_configuration` block supports the following: + +* `bucket_arn` - (Optional) The ARN of the S3 bucket to which the logs should be delivered. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +## Import + +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cognito_log_delivery_configuration.example + identity = { + user_pool_id = "us-west-2_example123" + } +} + +resource "aws_cognito_log_delivery_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `user_pool_id` (String) ID of the Cognito User Pool. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cognito IDP (Identity Provider) Log Delivery Configuration using the `user_pool_id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_log_delivery_configuration import CognitoLogDeliveryConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CognitoLogDeliveryConfiguration.generate_config_for_import(self, "example", "us-west-2_example123") +``` + +Using `terraform import`, import Cognito IDP (Identity Provider) Log Delivery Configuration using the `user_pool_id`. For example: + +```console +% terraform import aws_cognito_log_delivery_configuration.example us-west-2_example123 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_managed_login_branding.html.markdown b/website/docs/cdktf/python/r/cognito_managed_login_branding.html.markdown new file mode 100644 index 000000000000..a2a73015a41c --- /dev/null +++ b/website/docs/cdktf/python/r/cognito_managed_login_branding.html.markdown @@ -0,0 +1,120 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_managed_login_branding" +description: |- + Manages branding settings for a user pool style and associates it with an app client. +--- + + + +# Resource: aws_cognito_managed_login_branding + +Manages branding settings for a user pool style and associates it with an app client. + +## Example Usage + +### Default Branding Style + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_managed_login_branding import CognitoManagedLoginBranding +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CognitoManagedLoginBranding(self, "client", + client_id=example.id, + use_cognito_provided_values=True, + user_pool_id=Token.as_string(aws_cognito_user_pool_example.id) + ) +``` + +### Custom Branding Style + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_managed_login_branding import CognitoManagedLoginBranding +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CognitoManagedLoginBranding(self, "client", + asset=[CognitoManagedLoginBrandingAsset( + bytes=Token.as_string(Fn.filebase64("login_branding_asset.svg")), + category="PAGE_HEADER_BACKGROUND", + color_mode="DARK", + extension="SVG" + ) + ], + client_id=example.id, + settings=Token.as_string(Fn.jsonencode({})), + user_pool_id=Token.as_string(aws_cognito_user_pool_example.id) + ) +``` + +## Argument Reference + +The following arguments are required: + +* `client_id` - (Required) App client that the branding style is for. +* `user_pool_id` - (Required) User pool the client belongs to. + +The following arguments are optional: + +* `asset` - (Optional) Image files to apply to roles like backgrounds, logos, and icons. See [details below](#asset). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `settings` - (Optional) JSON document with the the settings to apply to the style. +* `use_cognito_provided_values` - (Optional) When `true`, applies the default branding style options. + +### asset + +* `bytes` - (Optional) Image file, in Base64-encoded binary. +* `category` - (Required) Category that the image corresponds to. See [AWS documentation](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssetType.html#CognitoUserPools-Type-AssetType-Category) for valid values. +* `color_mode` - (Required) Display-mode target of the asset. Valid values: `LIGHT`, `DARK`, `DYNAMIC`. +* `extensions` - (Required) File type of the image file. See [AWS documentation](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssetType.html#CognitoUserPools-Type-AssetType-Extension) for valid values. +* `resource_id` - (Optional) Asset ID. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `managed_login_branding_id` - ID of the managed login branding style. +* `settings_all` - Settings including Amazon Cognito defaults. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cognito branding settings using `user_pool_id` and `managed_login_branding_id` separated by `,`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_managed_login_branding import CognitoManagedLoginBranding +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CognitoManagedLoginBranding.generate_config_for_import(self, "example", "us-west-2_rSss9Zltr,06c6ae7b-1e66-46d2-87a9-1203ea3307bd") +``` + +Using `terraform import`, import Cognito branding settings using `user_pool_id` and `managed_login_branding_id` separated by `,`. For example: + +```console +% terraform import aws_cognito_managed_login_branding.example us-west-2_rSss9Zltr,06c6ae7b-1e66-46d2-87a9-1203ea3307bd +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_managed_user_pool_client.html.markdown b/website/docs/cdktf/python/r/cognito_managed_user_pool_client.html.markdown index 99e8187f026f..39bbd188b464 100644 --- a/website/docs/cdktf/python/r/cognito_managed_user_pool_client.html.markdown +++ b/website/docs/cdktf/python/r/cognito_managed_user_pool_client.html.markdown @@ -126,6 +126,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_token_validity` - (Optional) Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. By default, the unit is hours. The unit can be overridden by a value in `token_validity_units.access_token`. * `allowed_oauth_flows_user_pool_client` - (Optional) Whether the client is allowed to use OAuth 2.0 features. `allowed_oauth_flows_user_pool_client` must be set to `true` before you can configure the following arguments: `callback_urls`, `logout_urls`, `allowed_oauth_scopes` and `allowed_oauth_flows`. * `allowed_oauth_flows` - (Optional) List of allowed OAuth flows, including `code`, `implicit`, and `client_credentials`. `allowed_oauth_flows_user_pool_client` must be set to `true` before you can configure this option. @@ -204,4 +205,4 @@ Using `terraform import`, import Cognito User Pool Clients using the `id` of the % terraform import aws_cognito_managed_user_pool_client.client us-west-2_abc123/3ho4ek12345678909nh3fmhpko ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_resource_server.html.markdown b/website/docs/cdktf/python/r/cognito_resource_server.html.markdown index c212cd7ff369..ee728b79fbcc 100644 --- a/website/docs/cdktf/python/r/cognito_resource_server.html.markdown +++ b/website/docs/cdktf/python/r/cognito_resource_server.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identifier` - (Required) An identifier for the resource server. * `name` - (Required) A name for the resource server. * `user_pool_id` - (Required) User pool the client belongs to. @@ -115,4 +116,4 @@ Using `terraform import`, import `aws_cognito_resource_server` using their User % terraform import aws_cognito_resource_server.example "us-west-2_abc123|https://example.com" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_risk_configuration.html.markdown b/website/docs/cdktf/python/r/cognito_risk_configuration.html.markdown index 8daec104447c..b43ad8236db3 100644 --- a/website/docs/cdktf/python/r/cognito_risk_configuration.html.markdown +++ b/website/docs/cdktf/python/r/cognito_risk_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` - (Required) The user pool ID. * `client_id` - (Optional) The app client ID. When the client ID is not provided, the same risk configuration is applied to all the clients in the User Pool. * `account_takeover_risk_configuration` - (Optional) The account takeover risk configuration. See details below. @@ -151,4 +152,4 @@ Import using the user pool ID and Client ID separated by a `:`: % terraform import aws_cognito_risk_configuration.main example:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user.html.markdown b/website/docs/cdktf/python/r/cognito_user.html.markdown index bc0069066dd8..b49225a1ce31 100644 --- a/website/docs/cdktf/python/r/cognito_user.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user.html.markdown @@ -96,6 +96,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `attributes` - (Optional) A map that contains user attributes and attribute values to be set for the user. * `client_metadata` - (Optional) A map of custom key-value pairs that you can provide as input for any custom workflows that user creation triggers. Amazon Cognito does not store the `client_metadata` value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose. For more information, see [Customizing User Pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html). * `desired_delivery_mediums` - (Optional) A list of mediums to the welcome message will be sent through. Allowed values are `EMAIL` and `SMS`. If it's provided, make sure you have also specified `email` attribute for the `EMAIL` medium and `phone_number` for the `SMS`. More than one value can be specified. Amazon Cognito does not store the `desired_delivery_mediums` value. Defaults to `["SMS"]`. @@ -141,4 +142,4 @@ Using `terraform import`, import Cognito User using the `user_pool_id`/`name` at % terraform import aws_cognito_user.user us-east-1_vG78M4goG/user ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user_group.html.markdown b/website/docs/cdktf/python/r/cognito_user_group.html.markdown index 2c32128dbfd9..a03530fceddf 100644 --- a/website/docs/cdktf/python/r/cognito_user_group.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user_group.html.markdown @@ -75,6 +75,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the user group. * `user_pool_id` - (Required) The user pool ID. * `description` - (Optional) The description of the user group. @@ -110,4 +111,4 @@ Using `terraform import`, import Cognito User Groups using the `user_pool_id`/`n % terraform import aws_cognito_user_group.group us-east-1_vG78M4goG/user-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user_in_group.html.markdown b/website/docs/cdktf/python/r/cognito_user_in_group.html.markdown index 422d0cad80ba..815b5f554706 100644 --- a/website/docs/cdktf/python/r/cognito_user_in_group.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user_in_group.html.markdown @@ -62,8 +62,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_pool_id` - (Required) The user pool ID of the user and group. * `group_name` - (Required) The name of the group to which the user is to be added. * `username` - (Required) The username of the user to be added to the group. @@ -72,4 +73,29 @@ The following arguments are required: This resource exports no additional attributes. - \ No newline at end of file +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a Cognito Group User using a comma-delimited string concatenating the `user_pool_id`, `group_name`, and `username` arguments. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_user_in_group import CognitoUserInGroup +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CognitoUserInGroup.generate_config_for_import(self, "example", "us-east-1_vG78M4goG,example-group,example-user") +``` + +Using `terraform import`, import a Cognito Group User using a comma-delimited string concatenating the `user_pool_id`, `group_name`, and `username` arguments. For example: + +```console +% terraform import aws_cognito_user_in_group.example us-east-1_vG78M4goG,example-group,example-user +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user_pool.html.markdown b/website/docs/cdktf/python/r/cognito_user_pool.html.markdown index 5668689a3dfe..147f2338e5d6 100644 --- a/website/docs/cdktf/python/r/cognito_user_pool.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user_pool.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the user pool. * `account_recovery_setting` - (Optional) Configuration block to define which verified available method a user can use to recover their forgotten password. [Detailed below](#account_recovery_setting). * `admin_create_user_config` - (Optional) Configuration block for creating a new user profile. [Detailed below](#admin_create_user_config). @@ -103,18 +104,18 @@ This resource supports the following arguments: * `deletion_protection` - (Optional) When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are `ACTIVE` and `INACTIVE`, Default value is `INACTIVE`. * `device_configuration` - (Optional) Configuration block for the user pool's device tracking. [Detailed below](#device_configuration). * `email_configuration` - (Optional) Configuration block for configuring email. [Detailed below](#email_configuration). -* `email_mfa_configuration` - (Optional) Configuration block for configuring email Multi-Factor Authentication (MFA); requires at least 2 `account_recovery_setting` entries; requires an `email_configuration` configuration block. [Detailed below](#email_mfa_configuration). +* `email_mfa_configuration` - (Optional) Configuration block for configuring email Multi-Factor Authentication (MFA); requires at least 2 `account_recovery_setting` entries; requires an `email_configuration` configuration block. Effective only when `mfa_configuration` is `ON` or `OPTIONAL`. [Detailed below](#email_mfa_configuration). * `email_verification_message` - (Optional) String representing the email verification message. Conflicts with `verification_message_template` configuration block `email_message` argument. * `email_verification_subject` - (Optional) String representing the email verification subject. Conflicts with `verification_message_template` configuration block `email_subject` argument. * `lambda_config` - (Optional) Configuration block for the AWS Lambda triggers associated with the user pool. [Detailed below](#lambda_config). -* `mfa_configuration` - (Optional) Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of `OFF`. Valid values are `OFF` (MFA Tokens are not required), `ON` (MFA is required for all users to sign in; requires at least one of `sms_configuration` or `software_token_mfa_configuration` to be configured), or `OPTIONAL` (MFA Will be required only for individual users who have MFA Enabled; requires at least one of `sms_configuration` or `software_token_mfa_configuration` to be configured). +* `mfa_configuration` - (Optional) Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of `OFF`. Valid values are `OFF` (MFA Tokens are not required), `ON` (MFA is required for all users to sign in; requires at least one of `email_mfa_configuration`, `sms_configuration` or `software_token_mfa_configuration` to be configured), or `OPTIONAL` (MFA Will be required only for individual users who have MFA Enabled; requires at least one of `email_mfa_configuration`, `sms_configuration` or `software_token_mfa_configuration` to be configured). * `password_policy` - (Optional) Configuration block for information about the user pool password policy. [Detailed below](#password_policy). * `schema` - (Optional) Configuration block for the schema attributes of a user pool. [Detailed below](#schema). Schema attributes from the [standard attribute set](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#cognito-user-pools-standard-attributes) only need to be specified if they are different from the default configuration. Attributes can be added, but not modified or removed. Maximum of 50 attributes. * `sign_in_policy` - (Optional) Configuration block for information about the user pool sign in policy. [Detailed below](#sign_in_policy). * `sms_authentication_message` - (Optional) String representing the SMS authentication message. The Message must contain the `{####}` placeholder, which will be replaced with the code. -* `sms_configuration` - (Optional) Configuration block for Short Message Service (SMS) settings. [Detailed below](#sms_configuration). These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). +* `sms_configuration` - (Optional) Configuration block for Short Message Service (SMS) settings. [Detailed below](#sms_configuration). These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). SMS MFA is activated only when `mfa_configuration` is set to `ON` or `OPTIONAL` along with this block. Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). * `sms_verification_message` - (Optional) String representing the SMS verification message. Conflicts with `verification_message_template` configuration block `sms_message` argument. -* `software_token_mfa_configuration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. [Detailed below](#software_token_mfa_configuration). +* `software_token_mfa_configuration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. Effective only when `mfa_configuration` is `ON` or `OPTIONAL`. [Detailed below](#software_token_mfa_configuration). * `tags` - (Optional) Map of tags to assign to the User Pool. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `user_attribute_update_settings` - (Optional) Configuration block for user attribute update settings. [Detailed below](#user_attribute_update_settings). * `user_pool_add_ons` - (Optional) Configuration block for user pool add-ons to enable user pool advanced security mode features. [Detailed below](#user_pool_add_ons). @@ -344,4 +345,4 @@ Using `terraform import`, import Cognito User Pools using the `id`. For example: % terraform import aws_cognito_user_pool.pool us-west-2_abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user_pool_client.html.markdown b/website/docs/cdktf/python/r/cognito_user_pool_client.html.markdown index 1fbe378444f3..9b53159ae062 100644 --- a/website/docs/cdktf/python/r/cognito_user_pool_client.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user_pool_client.html.markdown @@ -198,10 +198,10 @@ class MyConvertedCode(TerraformStack): CognitoUserPoolClient(self, "userpool_client", explicit_auth_flows=["ADMIN_NO_SRP_AUTH"], name="client", - refresh_token_rotation=[{ - "feature": "ENABLED", - "retry_grace_period_seconds": 10 - } + refresh_token_rotation=[CognitoUserPoolClientRefreshTokenRotation( + feature="ENABLED", + retry_grace_period_seconds=10 + ) ], user_pool_id=pool.id ) @@ -216,6 +216,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_token_validity` - (Optional) Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. By default, the unit is hours. The unit can be overridden by a value in `token_validity_units.access_token`. * `allowed_oauth_flows_user_pool_client` - (Optional) Whether the client is allowed to use OAuth 2.0 features. `allowed_oauth_flows_user_pool_client` must be set to `true` before you can configure the following arguments: `callback_urls`, `logout_urls`, `allowed_oauth_scopes` and `allowed_oauth_flows`. * `allowed_oauth_flows` - (Optional) List of allowed OAuth flows, including `code`, `implicit`, and `client_credentials`. `allowed_oauth_flows_user_pool_client` must be set to `true` before you can configure this option. @@ -293,4 +294,4 @@ Using `terraform import`, import Cognito User Pool Clients using the `id` of the % terraform import aws_cognito_user_pool_client.client us-west-2_abc123/3ho4ek12345678909nh3fmhpko ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user_pool_domain.html.markdown b/website/docs/cdktf/python/r/cognito_user_pool_domain.html.markdown index cd4f89fab7a3..d0587c7044eb 100644 --- a/website/docs/cdktf/python/r/cognito_user_pool_domain.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user_pool_domain.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. * `user_pool_id` - (Required) The user pool ID. * `certificate_arn` - (Optional) The ARN of an ISSUED ACM certificate in us-east-1 for a custom domain. @@ -125,4 +126,4 @@ Using `terraform import`, import Cognito User Pool Domains using the `domain`. F % terraform import aws_cognito_user_pool_domain.main auth.example.org ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cognito_user_pool_ui_customization.html.markdown b/website/docs/cdktf/python/r/cognito_user_pool_ui_customization.html.markdown index 12a5e2c13a7f..36cd35774120 100644 --- a/website/docs/cdktf/python/r/cognito_user_pool_ui_customization.html.markdown +++ b/website/docs/cdktf/python/r/cognito_user_pool_ui_customization.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_id` (Optional) The client ID for the client app. Defaults to `ALL`. If `ALL` is specified, the `css` and/or `image_file` settings will be used for every client that has no UI customization set previously. * `css` (Optional) - The CSS values in the UI customization, provided as a String. At least one of `css` or `image_file` is required. * `image_file` (Optional) - The uploaded logo image for the UI customization, provided as a base64-encoded String. Drift detection is not possible for this argument. At least one of `css` or `image_file` is required. @@ -137,4 +138,4 @@ Using `terraform import`, import Cognito User Pool UI Customizations using the ` % terraform import aws_cognito_user_pool_ui_customization.example us-west-2_ZCTarbt5C,12bu4fuk3mlgqa2rtrujgp6egq ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/comprehend_document_classifier.html.markdown b/website/docs/cdktf/python/r/comprehend_document_classifier.html.markdown index 54ace878b56c..e9e4d131f1de 100644 --- a/website/docs/cdktf/python/r/comprehend_document_classifier.html.markdown +++ b/website/docs/cdktf/python/r/comprehend_document_classifier.html.markdown @@ -41,7 +41,7 @@ class MyConvertedCode(TerraformStack): data_access_role_arn=Token.as_string(aws_iam_role_example.arn), depends_on=[aws_iam_role_policy_example], input_data_config=ComprehendDocumentClassifierInputDataConfig( - s3_uri="s3://${" + test.bucket + "}/${" + documents.id + "}" + s3_uri="s3://${" + test.bucket + "}/${" + documents.key + "}" ), language_code="en", name="example" @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mode` - (Optional, Default: `MULTI_CLASS`) The document classification mode. One of `MULTI_CLASS` or `MULTI_LABEL`. `MULTI_CLASS` is also known as "Single Label" in the AWS Console. @@ -142,6 +143,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_comprehend_document_classifier.example + identity = { + "arn" = "arn:aws:comprehend:us-west-2:123456789012:document-classifier/example" + } +} + +resource "aws_comprehend_document_classifier" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Comprehend document classifier. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Comprehend Document Classifier using the ARN. For example: ```python @@ -165,4 +187,4 @@ Using `terraform import`, import Comprehend Document Classifier using the ARN. F % terraform import aws_comprehend_document_classifier.example arn:aws:comprehend:us-west-2:123456789012:document_classifier/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/comprehend_entity_recognizer.html.markdown b/website/docs/cdktf/python/r/comprehend_entity_recognizer.html.markdown index 371dc6774c6d..7810032483e0 100644 --- a/website/docs/cdktf/python/r/comprehend_entity_recognizer.html.markdown +++ b/website/docs/cdktf/python/r/comprehend_entity_recognizer.html.markdown @@ -42,10 +42,10 @@ class MyConvertedCode(TerraformStack): depends_on=[aws_iam_role_policy_example], input_data_config=ComprehendEntityRecognizerInputDataConfig( documents=ComprehendEntityRecognizerInputDataConfigDocuments( - s3_uri="s3://${" + aws_s3_bucket_documents.bucket + "}/${" + documents.id + "}" + s3_uri="s3://${" + aws_s3_bucket_documents.bucket + "}/${" + documents.key + "}" ), entity_list=ComprehendEntityRecognizerInputDataConfigEntityListStruct( - s3_uri="s3://${" + aws_s3_bucket_entities.bucket + "}/${" + entities.id + "}" + s3_uri="s3://${" + aws_s3_bucket_entities.bucket + "}/${" + entities.key + "}" ), entity_types=[ComprehendEntityRecognizerInputDataConfigEntityTypes( type="ENTITY_1" @@ -74,6 +74,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `model_kms_key_id` - (Optional) The ID or ARN of a KMS Key used to encrypt trained Entity Recognizers. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` Configuration Block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `version_name` - (Optional) Name for the version of the Entity Recognizer. @@ -165,6 +166,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_comprehend_entity_recognizer.example + identity = { + "arn" = "arn:aws:comprehend:us-west-2:123456789012:entity-recognizer/example" + } +} + +resource "aws_comprehend_entity_recognizer" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Comprehend entity recognizer. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Comprehend Entity Recognizer using the ARN. For example: ```python @@ -188,4 +210,4 @@ Using `terraform import`, import Comprehend Entity Recognizer using the ARN. For % terraform import aws_comprehend_entity_recognizer.example arn:aws:comprehend:us-west-2:123456789012:entity-recognizer/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/computeoptimizer_enrollment_status.html.markdown b/website/docs/cdktf/python/r/computeoptimizer_enrollment_status.html.markdown index 71e90b080f2a..4550fb9b55dd 100644 --- a/website/docs/cdktf/python/r/computeoptimizer_enrollment_status.html.markdown +++ b/website/docs/cdktf/python/r/computeoptimizer_enrollment_status.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `include_member_accounts` - (Optional) Whether to enroll member accounts of the organization if the account is the management account of an organization. Default is `false`. * `status` - (Required) The enrollment status of the account. Valid values: `Active`, `Inactive`. @@ -76,4 +77,4 @@ Using `terraform import`, import enrollment status using the account ID. For exa % terraform import aws_computeoptimizer_enrollment_status.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/computeoptimizer_recommendation_preferences.html.markdown b/website/docs/cdktf/python/r/computeoptimizer_recommendation_preferences.html.markdown index a6981901fe51..8463bb65c5f5 100644 --- a/website/docs/cdktf/python/r/computeoptimizer_recommendation_preferences.html.markdown +++ b/website/docs/cdktf/python/r/computeoptimizer_recommendation_preferences.html.markdown @@ -77,12 +77,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enhanced_infrastructure_metrics` - (Optional) The status of the enhanced infrastructure metrics recommendation preference. Valid values: `Active`, `Inactive`. * `external_metrics_preference` - (Optional) The provider of the external metrics recommendation preference. See [External Metrics Preference](#external-metrics-preference) below. * `inferred_workload_types` - (Optional) The status of the inferred workload types recommendation preference. Valid values: `Active`, `Inactive`. * `look_back_period` - (Optional) The preference to control the number of days the utilization metrics of the AWS resource are analyzed. Valid values: `DAYS_14`, `DAYS_32`, `DAYS_93`. * `preferred_resource` - (Optional) The preference to control which resource type values are considered when generating rightsizing recommendations. See [Preferred Resources](#preferred-resources) below. -* `resource_type` - (Required) The target resource type of the recommendation preferences. Valid values: `Ec2Instance`, `AutoScalingGroup`, `RdsDBInstance`. +* `resource_type` - (Required) The target resource type of the recommendation preferences. Valid values: `Ec2Instance`, `AutoScalingGroup`, `RdsDBInstance`, `AuroraDBClusterStorage`. * `savings_estimation_mode` - (Optional) The status of the savings estimation mode preference. Valid values: `AfterDiscounts`, `BeforeDiscounts`. * `scope` - (Required) The scope of the recommendation preferences. See [Scope](#scope) below. * `utilization_preference` - (Optional) The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom. See [Utilization Preferences](#utilization-preferences) below. @@ -141,4 +142,4 @@ Using `terraform import`, import recommendation preferences using the resource t % terraform import aws_computeoptimizer_recommendation_preferences.example Ec2Instance,AccountId,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_aggregate_authorization.html.markdown b/website/docs/cdktf/python/r/config_aggregate_authorization.html.markdown index b70ed07d609a..c31bb1a70f31 100644 --- a/website/docs/cdktf/python/r/config_aggregate_authorization.html.markdown +++ b/website/docs/cdktf/python/r/config_aggregate_authorization.html.markdown @@ -28,7 +28,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) ConfigAggregateAuthorization(self, "example", account_id="123456789012", - region="eu-west-2" + authorized_aws_region="eu-west-2" ) ``` @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `account_id` - (Required) Account ID -* `region` - (Required) Region +* `account_id` - (Required) Account ID. +* `authorized_aws_region` - (Optional) The region authorized to collect aggregated data. +* `region` - (Optional, **Deprecated**) The region authorized to collect aggregated data. Use `authorized_aws_region` instead. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -49,7 +50,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Config aggregate authorizations using `account_id:region`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Config aggregate authorizations using `account_id:authorized_aws_region`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -66,10 +67,10 @@ class MyConvertedCode(TerraformStack): ConfigAggregateAuthorization.generate_config_for_import(self, "example", "123456789012:us-east-1") ``` -Using `terraform import`, import Config aggregate authorizations using `account_id:region`. For example: +Using `terraform import`, import Config aggregate authorizations using `account_id:authorized_aws_region`. For example: ```console % terraform import aws_config_aggregate_authorization.example 123456789012:us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_config_rule.html.markdown b/website/docs/cdktf/python/r/config_config_rule.html.markdown index 5f417e8afa15..b729f34a0b2e 100644 --- a/website/docs/cdktf/python/r/config_config_rule.html.markdown +++ b/website/docs/cdktf/python/r/config_config_rule.html.markdown @@ -165,6 +165,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule * `description` - (Optional) Description of the rule * `evaluation_mode` - (Optional) The modes the Config rule can be evaluated in. See [Evaluation Mode](#evaluation-mode) for more details. @@ -246,4 +247,4 @@ Using `terraform import`, import Config Rule using the name. For example: % terraform import aws_config_config_rule.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_configuration_aggregator.html.markdown b/website/docs/cdktf/python/r/config_configuration_aggregator.html.markdown index 07f4f9b92ec2..bdf2184cad9a 100644 --- a/website/docs/cdktf/python/r/config_configuration_aggregator.html.markdown +++ b/website/docs/cdktf/python/r/config_configuration_aggregator.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the configuration aggregator. * `account_aggregation_source` - (Optional) The account(s) to aggregate config data from as documented below. * `organization_aggregation_source` - (Optional) The organization to aggregate config data from as documented below. @@ -150,4 +151,4 @@ Using `terraform import`, import Configuration Aggregators using the name. For e % terraform import aws_config_configuration_aggregator.example foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_configuration_recorder.html.markdown b/website/docs/cdktf/python/r/config_configuration_recorder.html.markdown index 0dc856c50f96..796023c15d60 100644 --- a/website/docs/cdktf/python/r/config_configuration_recorder.html.markdown +++ b/website/docs/cdktf/python/r/config_configuration_recorder.html.markdown @@ -122,6 +122,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the recorder. Defaults to `default`. Changing it recreates the resource. * `role_arn` - (Required) Amazon Resource Name (ARN) of the IAM role. Used to make read or write requests to the delivery channel and to describe the AWS resources associated with the account. See [AWS Docs](http://docs.aws.amazon.com/config/latest/developerguide/iamrole-permissions.html) for more details. * `recording_group` - (Optional) Recording group - see below. @@ -185,4 +186,4 @@ Using `terraform import`, import Configuration Recorder using the name. For exam % terraform import aws_config_configuration_recorder.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_configuration_recorder_status.html.markdown b/website/docs/cdktf/python/r/config_configuration_recorder_status.html.markdown index 7e606e82893f..c9a730613f5a 100644 --- a/website/docs/cdktf/python/r/config_configuration_recorder_status.html.markdown +++ b/website/docs/cdktf/python/r/config_configuration_recorder_status.html.markdown @@ -97,6 +97,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the recorder * `is_enabled` - (Required) Whether the configuration recorder should be enabled or disabled. @@ -129,4 +130,4 @@ Using `terraform import`, import Configuration Recorder Status using the name of % terraform import aws_config_configuration_recorder_status.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_conformance_pack.html.markdown b/website/docs/cdktf/python/r/config_conformance_pack.html.markdown index c940fe4269fd..3a4d4ac84725 100644 --- a/website/docs/cdktf/python/r/config_conformance_pack.html.markdown +++ b/website/docs/cdktf/python/r/config_conformance_pack.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the conformance pack. Must begin with a letter and contain from 1 to 256 alphanumeric characters and hyphens. * `delivery_s3_bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Maximum length of 63. * `delivery_s3_key_prefix` - (Optional) The prefix for the Amazon S3 bucket. Maximum length of 1024. @@ -133,4 +134,4 @@ Using `terraform import`, import Config Conformance Packs using the `name`. For % terraform import aws_config_conformance_pack.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_delivery_channel.html.markdown b/website/docs/cdktf/python/r/config_delivery_channel.html.markdown index 2c1a388a9a92..8cacd64a277d 100644 --- a/website/docs/cdktf/python/r/config_delivery_channel.html.markdown +++ b/website/docs/cdktf/python/r/config_delivery_channel.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the delivery channel. Defaults to `default`. Changing it recreates the resource. * `s3_bucket_name` - (Required) The name of the S3 bucket used to store the configuration history. * `s3_key_prefix` - (Optional) The prefix for the specified S3 bucket. @@ -127,4 +128,4 @@ Using `terraform import`, import Delivery Channel using the name. For example: % terraform import aws_config_delivery_channel.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_organization_conformance_pack.html.markdown b/website/docs/cdktf/python/r/config_organization_conformance_pack.html.markdown index ecd9e32c1d87..5cfe942597c3 100644 --- a/website/docs/cdktf/python/r/config_organization_conformance_pack.html.markdown +++ b/website/docs/cdktf/python/r/config_organization_conformance_pack.html.markdown @@ -97,6 +97,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the organization conformance pack. Must begin with a letter and contain from 1 to 128 alphanumeric characters and hyphens. * `delivery_s3_bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Delivery bucket must begin with `awsconfigconforms` prefix. Maximum length of 63. * `delivery_s3_key_prefix` - (Optional) The prefix for the Amazon S3 bucket. Maximum length of 1024. @@ -152,4 +153,4 @@ Using `terraform import`, import Config Organization Conformance Packs using the % terraform import aws_config_organization_conformance_pack.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_organization_custom_policy_rule.html.markdown b/website/docs/cdktf/python/r/config_organization_custom_policy_rule.html.markdown index 1f7177afe62d..1f1d0888f4c2 100644 --- a/website/docs/cdktf/python/r/config_organization_custom_policy_rule.html.markdown +++ b/website/docs/cdktf/python/r/config_organization_custom_policy_rule.html.markdown @@ -43,28 +43,29 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `name` - (Required) name of the rule -* `policy_text` - (Required) policy definition containing the logic for your organization AWS Config Custom Policy rule -* `policy_runtime` - (Required) runtime system for your organization AWS Config Custom Policy rules -* `trigger_types` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification` +* `name` - (Required) Name of the rule. +* `policy_text` - (Required) Policy definition containing the rule logic. +* `policy_runtime` - (Required) Runtime system for policy rules. +* `trigger_types` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification`. The following arguments are optional: -* `description` - (Optional) Description of the rule -* `debug_log_delivery_accounts` - (Optional) List of AWS account identifiers to exclude from the rule -* `excluded_accounts` - (Optional) List of AWS account identifiers to exclude from the rule -* `input_parameters` - (Optional) A string in JSON format that is passed to the AWS Config Rule Lambda Function +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the rule. +* `debug_log_delivery_accounts` - (Optional) List of accounts that you can enable debug logging for. The list is null when debug logging is enabled for all accounts. +* `excluded_accounts` - (Optional) List of AWS account identifiers to exclude from the rule. +* `input_parameters` - (Optional) A string in JSON format that is passed to the AWS Config Rule Lambda Function. * `maximum_execution_frequency` - (Optional) Maximum frequency with which AWS Config runs evaluations for a rule, if the rule is triggered at a periodic frequency. Defaults to `TwentyFour_Hours` for periodic frequency triggered rules. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, or `TwentyFour_Hours`. -* `resource_id_scope` - (Optional) Identifier of the AWS resource to evaluate -* `resource_types_scope` - (Optional) List of types of AWS resources to evaluate -* `tag_key_scope` - (Optional, Required if `tag_value_scope` is configured) Tag key of AWS resources to evaluate -* `tag_value_scope` - (Optional) Tag value of AWS resources to evaluate +* `resource_id_scope` - (Optional) Identifier of the AWS resource to evaluate. +* `resource_types_scope` - (Optional) List of types of AWS resources to evaluate. +* `tag_key_scope` - (Optional, Required if `tag_value_scope` is configured) Tag key of AWS resources to evaluate. +* `tag_value_scope` - (Optional) Tag value of AWS resources to evaluate. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Amazon Resource Name (ARN) of the rule +* `arn` - Amazon Resource Name (ARN) of the rule. ## Timeouts @@ -99,4 +100,4 @@ Using `terraform import`, import a Config Organization Custom Policy Rule using % terraform import aws_config_organization_custom_policy_rule.example example_rule_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_organization_custom_rule.html.markdown b/website/docs/cdktf/python/r/config_organization_custom_rule.html.markdown index adf79787532a..6463ef816829 100644 --- a/website/docs/cdktf/python/r/config_organization_custom_rule.html.markdown +++ b/website/docs/cdktf/python/r/config_organization_custom_rule.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `lambda_function_arn` - (Required) Amazon Resource Name (ARN) of the rule Lambda Function * `name` - (Required) The name of the rule * `trigger_types` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification`, and `ScheduledNotification` @@ -110,4 +111,4 @@ Using `terraform import`, import Config Organization Custom Rules using the name % terraform import aws_config_organization_custom_rule.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_organization_managed_rule.html.markdown b/website/docs/cdktf/python/r/config_organization_managed_rule.html.markdown index e643d0be910a..1a3968021ffd 100644 --- a/website/docs/cdktf/python/r/config_organization_managed_rule.html.markdown +++ b/website/docs/cdktf/python/r/config_organization_managed_rule.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule * `rule_identifier` - (Required) Identifier of an available AWS Config Managed Rule to call. For available values, see the [List of AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html) documentation * `description` - (Optional) Description of the rule @@ -99,4 +100,4 @@ Using `terraform import`, import Config Organization Managed Rules using the nam % terraform import aws_config_organization_managed_rule.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_remediation_configuration.html.markdown b/website/docs/cdktf/python/r/config_remediation_configuration.html.markdown index 311500bd1410..0171c2664749 100644 --- a/website/docs/cdktf/python/r/config_remediation_configuration.html.markdown +++ b/website/docs/cdktf/python/r/config_remediation_configuration.html.markdown @@ -80,6 +80,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `automatic` - (Optional) Remediation is triggered automatically if `true`. * `execution_controls` - (Optional) Configuration block for execution controls. See below. * `maximum_automatic_attempts` - (Optional) Maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5. @@ -139,4 +140,4 @@ Using `terraform import`, import Remediation Configurations using the name confi % terraform import aws_config_remediation_configuration.this example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_retention_configuration.html.markdown b/website/docs/cdktf/python/r/config_retention_configuration.html.markdown index e5d2a7782ee1..d0fdd073cc00 100644 --- a/website/docs/cdktf/python/r/config_retention_configuration.html.markdown +++ b/website/docs/cdktf/python/r/config_retention_configuration.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `retention_period_in_days` - (Required) The number of days AWS Config stores historical information. ## Attribute Reference @@ -69,4 +70,4 @@ Using `terraform import`, import the AWS Config retention configuration using th % terraform import aws_config_retention_configuration.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_bot_association.html.markdown b/website/docs/cdktf/python/r/connect_bot_association.html.markdown index 131bb278346b..557d80eab46c 100644 --- a/website/docs/cdktf/python/r/connect_bot_association.html.markdown +++ b/website/docs/cdktf/python/r/connect_bot_association.html.markdown @@ -96,7 +96,7 @@ class MyConvertedCode(TerraformStack): aws_connect_bot_association_example = ConnectBotAssociation(self, "example_3", instance_id=Token.as_string(aws_connect_instance_example.id), lex_bot=ConnectBotAssociationLexBot( - lex_region=Token.as_string(current.name), + lex_region=Token.as_string(current.region), name=Token.as_string(aws_lex_bot_example.name) ) ) @@ -108,6 +108,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. * `lex_bot` - (Required) Configuration information of an Amazon Lex (V1) bot. Detailed below. @@ -149,4 +150,4 @@ Using `terraform import`, import `aws_connect_bot_association` using the Amazon % terraform import aws_connect_bot_association.example aaaaaaaa-bbbb-cccc-dddd-111111111111:Example:us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_contact_flow.html.markdown b/website/docs/cdktf/python/r/connect_contact_flow.html.markdown index bfe5da4aaf71..fc0daf00c8b1 100644 --- a/website/docs/cdktf/python/r/connect_contact_flow.html.markdown +++ b/website/docs/cdktf/python/r/connect_contact_flow.html.markdown @@ -112,6 +112,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Optional) Specifies the content of the Contact Flow, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used. * `content_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the Contact Flow source specified with `filename`. The usual way to set this is filebase64sha256("mycontact_flow.json") (Terraform 0.11.12 and later) or base64sha256(file("mycontact_flow.json")) (Terraform 0.11.11 and earlier), where "mycontact_flow.json" is the local filename of the Contact Flow source. * `description` - (Optional) Specifies the description of the Contact Flow. @@ -155,4 +156,4 @@ Using `terraform import`, import Amazon Connect Contact Flows using the `instanc % terraform import aws_connect_contact_flow.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_contact_flow_module.html.markdown b/website/docs/cdktf/python/r/connect_contact_flow_module.html.markdown index cfb65720d5b2..a6c53335fd62 100644 --- a/website/docs/cdktf/python/r/connect_contact_flow_module.html.markdown +++ b/website/docs/cdktf/python/r/connect_contact_flow_module.html.markdown @@ -125,6 +125,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Optional) Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used. * `content_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the Contact Flow Module source specified with `filename`. The usual way to set this is filebase64sha256("contact_flow_module.json") (Terraform 0.11.12 and later) or base64sha256(file("contact_flow_module.json")) (Terraform 0.11.11 and earlier), where "contact_flow_module.json" is the local filename of the Contact Flow Module source. * `description` - (Optional) Specifies the description of the Contact Flow Module. @@ -167,4 +168,4 @@ Using `terraform import`, import Amazon Connect Contact Flow Modules using the ` % terraform import aws_connect_contact_flow_module.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_hours_of_operation.html.markdown b/website/docs/cdktf/python/r/connect_hours_of_operation.html.markdown index ed21fdd5ce98..8e7f47c70eda 100644 --- a/website/docs/cdktf/python/r/connect_hours_of_operation.html.markdown +++ b/website/docs/cdktf/python/r/connect_hours_of_operation.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `config` - (Required) One or more config blocks which define the configuration information for the hours of operation: day, start time, and end time . Config blocks are documented below. * `description` - (Optional) Specifies the description of the Hours of Operation. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -121,4 +122,4 @@ Using `terraform import`, import Amazon Connect Hours of Operations using the `i % terraform import aws_connect_hours_of_operation.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_instance.html.markdown b/website/docs/cdktf/python/r/connect_instance.html.markdown index dba65825ffa5..21068a1e7fa8 100644 --- a/website/docs/cdktf/python/r/connect_instance.html.markdown +++ b/website/docs/cdktf/python/r/connect_instance.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_resolve_best_voices_enabled` - (Optional) Specifies whether auto resolve best voices is enabled. Defaults to `true`. * `contact_flow_logs_enabled` - (Optional) Specifies whether contact flow logs are enabled. Defaults to `false`. * `contact_lens_enabled` - (Optional) Specifies whether contact lens is enabled. Defaults to `true`. @@ -122,6 +123,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_connect_instance.example + identity = { + id = "f1288a1f-6193-445a-b47e-af739b2" + } +} + +resource "aws_connect_instance" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the connect instance. + +#### Optional + +- `account_id` (String) AWS Account where this resource is managed. +- `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Connect instances using the `id`. For example: ```python @@ -145,4 +172,4 @@ Using `terraform import`, import Connect instances using the `id`. For example: % terraform import aws_connect_instance.example f1288a1f-6193-445a-b47e-af739b2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_instance_storage_config.html.markdown b/website/docs/cdktf/python/r/connect_instance_storage_config.html.markdown index 841daca5f92f..6e2a95124000 100644 --- a/website/docs/cdktf/python/r/connect_instance_storage_config.html.markdown +++ b/website/docs/cdktf/python/r/connect_instance_storage_config.html.markdown @@ -160,6 +160,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `resource_type` - (Required) A valid resource type. Valid Values: `AGENT_EVENTS` | `ATTACHMENTS` | `CALL_RECORDINGS` | `CHAT_TRANSCRIPTS` | `CONTACT_EVALUATIONS` | `CONTACT_TRACE_RECORDS` | `EMAIL_MESSAGES` | `MEDIA_STREAMS` | `REAL_TIME_CONTACT_ANALYSIS_CHAT_SEGMENTS` | `REAL_TIME_CONTACT_ANALYSIS_SEGMENTS` | `REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS` | `SCHEDULED_REPORTS` | `SCREEN_RECORDINGS`. * `storage_config` - (Required) Specifies the storage configuration options for the Connect Instance. [Documented below](#storage_config). @@ -241,4 +242,4 @@ Using `terraform import`, import Amazon Connect Instance Storage Configs using t % terraform import aws_connect_instance_storage_config.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5:CHAT_TRANSCRIPTS ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_lambda_function_association.html.markdown b/website/docs/cdktf/python/r/connect_lambda_function_association.html.markdown index 7426c20f68d0..b615f0f2b1c4 100644 --- a/website/docs/cdktf/python/r/connect_lambda_function_association.html.markdown +++ b/website/docs/cdktf/python/r/connect_lambda_function_association.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `function_arn` - (Required) Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. * `instance_id` - (Required) The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. @@ -71,4 +72,4 @@ Using `terraform import`, import `aws_connect_lambda_function_association` using % terraform import aws_connect_lambda_function_association.example aaaaaaaa-bbbb-cccc-dddd-111111111111,arn:aws:lambda:us-west-2:123456789123:function:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_phone_number.html.markdown b/website/docs/cdktf/python/r/connect_phone_number.html.markdown index bd4dce84fcc7..29b440727303 100644 --- a/website/docs/cdktf/python/r/connect_phone_number.html.markdown +++ b/website/docs/cdktf/python/r/connect_phone_number.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `country_code` - (Required, Forces new resource) The ISO country code. For a list of Valid values, refer to [PhoneNumberCountryCode](https://docs.aws.amazon.com/connect/latest/APIReference/API_SearchAvailablePhoneNumbers.html#connect-SearchAvailablePhoneNumbers-request-PhoneNumberCountryCode). * `description` - (Optional, Forces new resource) The description of the phone number. * `prefix` - (Optional, Forces new resource) The prefix of the phone number that is used to filter available phone numbers. If provided, it must contain `+` as part of the country code. Do not specify this argument when importing the resource. @@ -121,6 +122,31 @@ The `status` configuration block supports the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_connect_phone_number.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} +resource "aws_connect_phone_number" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the connect phone number. + +#### Optional + +- `account_id` (String) AWS Account where this resource is managed. +- `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Amazon Connect Phone Numbers using its `id`. For example: ```python @@ -144,4 +170,4 @@ Using `terraform import`, import Amazon Connect Phone Numbers using its `id`. Fo % terraform import aws_connect_phone_number.example 12345678-abcd-1234-efgh-9876543210ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_phone_number_contact_flow_association.html.markdown b/website/docs/cdktf/python/r/connect_phone_number_contact_flow_association.html.markdown new file mode 100644 index 000000000000..313a17e828dd --- /dev/null +++ b/website/docs/cdktf/python/r/connect_phone_number_contact_flow_association.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "Connect" +layout: "aws" +page_title: "AWS: aws_connect_phone_number_contact_flow_association" +description: |- + Associates a flow with a phone number claimed to an Amazon Connect instance. +--- + + + +# Resource: aws_connect_phone_number_contact_flow_association + +Associates a flow with a phone number claimed to an Amazon Connect instance. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import ConnectPhoneNumberContactFlowAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + ConnectPhoneNumberContactFlowAssociation(self, "example", + contact_flow_id=aws_connect_contact_flow_example.contact_flow_id, + instance_id=aws_connect_instance_example.id, + phone_number_id=aws_connect_phone_number_example.id + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `contact_flow_id` - (Required) Contact flow ID. +* `instance_id` - (Required) Amazon Connect instance ID. +* `phone_number_id` - (Required) Phone number ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_connect_phone_number_contact_flow_association` using the `phone_number_id`, `instance_id` and `contact_flow_id` separated by a comma (`,`). For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import ConnectPhoneNumberContactFlowAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + ConnectPhoneNumberContactFlowAssociation.generate_config_for_import(self, "example", "36727a4c-4683-4e49-880c-3347c61110a4,fa6c1691-e2eb-4487-bdb9-1aaed6268ebd,c4acdc79-395e-4280-a294-9062f56b07bb") +``` + +Using `terraform import`, import `aws_connect_phone_number_contact_flow_association` using the `phone_number_id`, `instance_id` and `contact_flow_id` separated by a comma (`,`). For example: + +```console +% terraform import aws_connect_phone_number_contact_flow_association.example 36727a4c-4683-4e49-880c-3347c61110a4,fa6c1691-e2eb-4487-bdb9-1aaed6268ebd,c4acdc79-395e-4280-a294-9062f56b07bb +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_queue.html.markdown b/website/docs/cdktf/python/r/connect_queue.html.markdown index d7722f766c6b..519fbdf8ed98 100644 --- a/website/docs/cdktf/python/r/connect_queue.html.markdown +++ b/website/docs/cdktf/python/r/connect_queue.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Queue. * `hours_of_operation_id` - (Required) Specifies the identifier of the Hours of Operation. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -150,4 +151,4 @@ Using `terraform import`, import Amazon Connect Queues using the `instance_id` a % terraform import aws_connect_queue.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_quick_connect.html.markdown b/website/docs/cdktf/python/r/connect_quick_connect.html.markdown index 81d8fc04ac90..63d56857ce26 100644 --- a/website/docs/cdktf/python/r/connect_quick_connect.html.markdown +++ b/website/docs/cdktf/python/r/connect_quick_connect.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Quick Connect. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `name` - (Required) Specifies the name of the Quick Connect. @@ -109,4 +110,4 @@ Using `terraform import`, import Amazon Connect Quick Connects using the `instan % terraform import aws_connect_quick_connect.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_routing_profile.html.markdown b/website/docs/cdktf/python/r/connect_routing_profile.html.markdown index 3fa6d18fc542..5e5374f6a449 100644 --- a/website/docs/cdktf/python/r/connect_routing_profile.html.markdown +++ b/website/docs/cdktf/python/r/connect_routing_profile.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_outbound_queue_id` - (Required) Specifies the default outbound queue for the Routing Profile. * `description` - (Required) Specifies the description of the Routing Profile. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -115,4 +116,4 @@ Using `terraform import`, import Amazon Connect Routing Profiles using the `inst % terraform import aws_connect_routing_profile.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_security_profile.html.markdown b/website/docs/cdktf/python/r/connect_security_profile.html.markdown index fb1a64ea6a0d..51717216be50 100644 --- a/website/docs/cdktf/python/r/connect_security_profile.html.markdown +++ b/website/docs/cdktf/python/r/connect_security_profile.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Security Profile. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `name` - (Required) Specifies the name of the Security Profile. @@ -84,4 +85,4 @@ Using `terraform import`, import Amazon Connect Security Profiles using the `ins % terraform import aws_connect_security_profile.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_user.html.markdown b/website/docs/cdktf/python/r/connect_user.html.markdown index 0c5b949147d8..6efecdba2ca1 100644 --- a/website/docs/cdktf/python/r/connect_user.html.markdown +++ b/website/docs/cdktf/python/r/connect_user.html.markdown @@ -181,6 +181,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_user_id` - (Optional) The identifier of the user account in the directory used for identity management. If Amazon Connect cannot access the directory, you can specify this identifier to authenticate users. If you include the identifier, we assume that Amazon Connect cannot access the directory. Otherwise, the identity information is used to authenticate users from your directory. This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an error is returned. * `hierarchy_group_id` - (Optional) The identifier of the hierarchy group for the user. * `identity_info` - (Optional) A block that contains information about the identity of the user. Documented below. @@ -242,4 +243,4 @@ Using `terraform import`, import Amazon Connect Users using the `instance_id` an % terraform import aws_connect_user.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_user_hierarchy_group.html.markdown b/website/docs/cdktf/python/r/connect_user_hierarchy_group.html.markdown index 1203b4521a9f..82b2d12ec11f 100644 --- a/website/docs/cdktf/python/r/connect_user_hierarchy_group.html.markdown +++ b/website/docs/cdktf/python/r/connect_user_hierarchy_group.html.markdown @@ -75,6 +75,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `name` - (Required) The name of the user hierarchy group. Must not be more than 100 characters. * `parent_group_id` - (Optional) The identifier for the parent hierarchy group. The user hierarchy is created at level one if the parent group ID is null. @@ -132,4 +133,4 @@ Using `terraform import`, import Amazon Connect User Hierarchy Groups using the % terraform import aws_connect_user_hierarchy_group.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_user_hierarchy_structure.html.markdown b/website/docs/cdktf/python/r/connect_user_hierarchy_structure.html.markdown index 44a49d929b38..c32f2380fc1c 100644 --- a/website/docs/cdktf/python/r/connect_user_hierarchy_structure.html.markdown +++ b/website/docs/cdktf/python/r/connect_user_hierarchy_structure.html.markdown @@ -79,6 +79,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hierarchy_structure` - (Required) A block that defines the hierarchy structure's levels. The `hierarchy_structure` block is documented below. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -131,4 +132,4 @@ Using `terraform import`, import Amazon Connect User Hierarchy Structures using % terraform import aws_connect_user_hierarchy_structure.example f1288a1f-6193-445a-b47e-af739b2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/connect_vocabulary.html.markdown b/website/docs/cdktf/python/r/connect_vocabulary.html.markdown index e8b01e5ba4b3..341a2fb21db0 100644 --- a/website/docs/cdktf/python/r/connect_vocabulary.html.markdown +++ b/website/docs/cdktf/python/r/connect_vocabulary.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Required) The content of the custom vocabulary in plain-text format with a table of values. Each row in the table represents a word or a phrase, described with Phrase, IPA, SoundsLike, and DisplayAs fields. Separate the fields with TAB characters. For more information, see [Create a custom vocabulary using a table](https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html#create-vocabulary-table). Minimum length of `1`. Maximum length of `60000`. * `instance_id` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `language_code` - (Required) The language code of the vocabulary entries. For a list of languages and their corresponding language codes, see [What is Amazon Transcribe?](https://docs.aws.amazon.com/transcribe/latest/dg/transcribe-whatis.html). Valid Values are `ar-AE`, `de-CH`, `de-DE`, `en-AB`, `en-AU`, `en-GB`, `en-IE`, `en-IN`, `en-US`, `en-WL`, `es-ES`, `es-US`, `fr-CA`, `fr-FR`, `hi-IN`, `it-IT`, `ja-JP`, `ko-KR`, `pt-BR`, `pt-PT`, `zh-CN`. @@ -94,4 +95,4 @@ Using `terraform import`, import Amazon Connect Vocabularies using the `instance % terraform import aws_connect_vocabulary.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/controltower_baseline.html.markdown b/website/docs/cdktf/python/r/controltower_baseline.html.markdown new file mode 100644 index 000000000000..b1ea539ad75f --- /dev/null +++ b/website/docs/cdktf/python/r/controltower_baseline.html.markdown @@ -0,0 +1,103 @@ +--- +subcategory: "Control Tower" +layout: "aws" +page_title: "AWS: aws_controltower_baseline" +description: |- + Terraform resource for managing an AWS Control Tower Baseline. +--- + + + +# Resource: aws_controltower_baseline + +Terraform resource for managing an AWS Control Tower Baseline. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import ControltowerBaseline +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + ControltowerBaseline(self, "example", + baseline_identifier="arn:aws:controltower:us-east-1::baseline/17BSJV3IGJ2QSGA2", + baseline_version="4.0", + parameters=[{ + "key": "IdentityCenterEnabledBaselineArn", + "value": "arn:aws:controltower:us-east-1:664418989480:enabledbaseline/XALULM96QHI525UOC" + } + ], + target_identifier=test.arn + ) +``` + +## Argument Reference + +The following arguments are required: + +* `baseline_identifier` - (Required) The ARN of the baseline to be enabled. +* `baseline_version` - (Required) The version of the baseline to be enabled. +* `target_identifier` - (Required) The ARN of the target on which the baseline will be enabled. Only OUs are supported as targets. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `parameters` - (Optional) A list of key-value objects that specify enablement parameters, where key is a string and value is a document of any type. See [Parameter](#parameters) below for details. +* `tags` - (Optional) Tags to apply to the landing zone. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### parameters + +* `key` - (Required) The key of the parameter. +* `value` - (Required) The value of the parameter. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Baseline. +* `operaton_identifier` - The ID (in UUID format) of the asynchronous operation. +* `tags_all` - A map of tags assigned to the landing zone, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Control Tower Baseline using the `arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import ControltowerBaseline +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + ControltowerBaseline.generate_config_for_import(self, "example", "arn:aws:controltower:us-east-1:012345678912:enabledbaseline/XALULM96QHI525UOC") +``` + +Using `terraform import`, import Control Tower Baseline using the `arn`. For example: + +```console +% terraform import aws_controltower_baseline.example arn:aws:controltower:us-east-1:012345678912:enabledbaseline/XALULM96QHI525UOC +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/controltower_control.html.markdown b/website/docs/cdktf/python/r/controltower_control.html.markdown index a414ee618cd1..d220572f6c22 100644 --- a/website/docs/cdktf/python/r/controltower_control.html.markdown +++ b/website/docs/cdktf/python/r/controltower_control.html.markdown @@ -39,7 +39,7 @@ class MyConvertedCode(TerraformStack): data_aws_organizations_organizational_units_example.override_logical_id("example") current = DataAwsRegion(self, "current") aws_controltower_control_example = ControltowerControl(self, "example_3", - control_identifier="arn:aws:controltower:${" + current.name + "}::control/AWS-GR_EC2_VOLUME_INUSE_CHECK", + control_identifier="arn:aws:controltower:${" + current.region + "}::control/AWS-GR_EC2_VOLUME_INUSE_CHECK", parameters=[ControltowerControlParameters( key="AllowedRegions", value=Token.as_string(Fn.jsonencode(["us-east-1"])) @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `parameters` - (Optional) Parameter values which are specified to configure the control when you enable it. See [Parameters](#parameters) for more details. ### Parameters @@ -100,4 +101,4 @@ Using `terraform import`, import Control Tower Controls using their `organizatio % terraform import aws_controltower_control.example arn:aws:organizations::123456789101:ou/o-qqaejywet/ou-qg5o-ufbhdtv3,arn:aws:controltower:us-east-1::control/WTDSMKDKDNLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/controltower_landing_zone.html.markdown b/website/docs/cdktf/python/r/controltower_landing_zone.html.markdown index e8375d53c360..2fed1786a19f 100644 --- a/website/docs/cdktf/python/r/controltower_landing_zone.html.markdown +++ b/website/docs/cdktf/python/r/controltower_landing_zone.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `manifest_json` - (Required) The manifest JSON file is a text file that describes your AWS resources. For examples, review [Launch your landing zone](https://docs.aws.amazon.com/controltower/latest/userguide/lz-api-launch). * `version` - (Required) The landing zone version. * `tags` - (Optional) Tags to apply to the landing zone. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,4 +87,4 @@ Using `terraform import`, import a Control Tower Landing Zone using the `id`. Fo % terraform import aws_controltower_landing_zone.example 1A2B3C4D5E6F7G8H ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cur_report_definition.html.markdown b/website/docs/cdktf/python/r/cur_report_definition.html.markdown index 28e7ea6f54cf..3925496b87f2 100644 --- a/website/docs/cdktf/python/r/cur_report_definition.html.markdown +++ b/website/docs/cdktf/python/r/cur_report_definition.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): format="textORcsv", report_name="example-cur-report-definition", s3_bucket="example-bucket-name", + s3_prefix="example-cur-report", s3_region="us-east-1", time_unit="HOURLY" ) @@ -48,9 +49,9 @@ This resource supports the following arguments: * `time_unit` - (Required) The frequency on which report data are measured and displayed. Valid values are: `DAILY`, `HOURLY`, `MONTHLY`. * `format` - (Required) Format for report. Valid values are: `textORcsv`, `Parquet`. If `Parquet` is used, then Compression must also be `Parquet`. * `compression` - (Required) Compression format for report. Valid values are: `GZIP`, `ZIP`, `Parquet`. If `Parquet` is used, then format must also be `Parquet`. -* `additional_schema_elements` - (Required) A list of schema elements. Valid values are: `RESOURCES`, `SPLIT_COST_ALLOCATION_DATA`. +* `additional_schema_elements` - (Required) A list of schema elements. Valid values are: `RESOURCES`, `SPLIT_COST_ALLOCATION_DATA`, `MANUAL_DISCOUNT_COMPATIBILITY`. * `s3_bucket` - (Required) Name of the existing S3 bucket to hold generated reports. -* `s3_prefix` - (Optional) Report path prefix. Limited to 256 characters. +* `s3_prefix` - (Required) Report path prefix. Limited to 256 characters. May be empty (`""`) but the resource can then not be modified via the AWS Console. * `s3_region` - (Required) Region of the existing S3 bucket to hold generated reports. * `additional_artifacts` - (Required) A list of additional artifacts. Valid values are: `REDSHIFT`, `QUICKSIGHT`, `ATHENA`. When ATHENA exists within additional_artifacts, no other artifact type can be declared and report_versioning must be `OVERWRITE_REPORT`. * `refresh_closed_reports` - (Optional) Set to true to update your reports after they have been finalized if AWS detects charges related to previous months. @@ -89,4 +90,4 @@ Using `terraform import`, import Report Definitions using the `report_name`. For % terraform import aws_cur_report_definition.example_cur_report_definition example-cur-report-definition ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/customer_gateway.html.markdown b/website/docs/cdktf/python/r/customer_gateway.html.markdown index cfa2f1441894..9cd4e18dbabe 100644 --- a/website/docs/cdktf/python/r/customer_gateway.html.markdown +++ b/website/docs/cdktf/python/r/customer_gateway.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bgp_asn` - (Optional, Forces new resource) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). Valid values are from `1` to `2147483647`. Conflicts with `bgp_asn_extended`. * `bgp_asn_extended` - (Optional, Forces new resource) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). Valid values are from `2147483648` to `4294967295` Conflicts with `bgp_asn`. * `certificate_arn` - (Optional) The Amazon Resource Name (ARN) for the customer gateway certificate. @@ -84,4 +85,4 @@ Using `terraform import`, import Customer Gateways using the `id`. For example: % terraform import aws_customer_gateway.main cgw-b4dc3961 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/customerprofiles_domain.html.markdown b/website/docs/cdktf/python/r/customerprofiles_domain.html.markdown index 65d98b95cdb9..4128c4d721a7 100644 --- a/website/docs/cdktf/python/r/customerprofiles_domain.html.markdown +++ b/website/docs/cdktf/python/r/customerprofiles_domain.html.markdown @@ -117,6 +117,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dead_letter_queue_url` - The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications. * `default_encryption_key` - The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage. * `matching` - A block that specifies the process of matching duplicate profiles. [Documented below](#matching). @@ -242,4 +243,4 @@ Using `terraform import`, import Amazon Customer Profiles Domain using the resou % terraform import aws_customerprofiles_domain.example e6f777be-22d0-4b40-b307-5d2720ef16b2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/customerprofiles_profile.html.markdown b/website/docs/cdktf/python/r/customerprofiles_profile.html.markdown index 85086003ccf2..83f25d3e5216 100644 --- a/website/docs/cdktf/python/r/customerprofiles_profile.html.markdown +++ b/website/docs/cdktf/python/r/customerprofiles_profile.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_number` - A unique account number that you have given to the customer. * `additional_information` - Any additional information relevant to the customer’s profile. * `address` - A block that specifies a generic address associated with the customer that is not mailing, shipping, or billing. [Documented below](#address). @@ -123,4 +124,4 @@ Using `terraform import`, import Amazon Customer Profiles Profile using the reso % terraform import aws_customerprofiles_profile.example domain-name/5f2f473dfbe841eb8d05cfc2a4c926df ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dataexchange_data_set.html.markdown b/website/docs/cdktf/python/r/dataexchange_data_set.html.markdown index d78d3c0878b8..1ee941e16ba6 100644 --- a/website/docs/cdktf/python/r/dataexchange_data_set.html.markdown +++ b/website/docs/cdktf/python/r/dataexchange_data_set.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `asset_type` - (Required) The type of asset that is added to a data set. Valid values include `API_GATEWAY_API`, `LAKE_FORMATION_DATA_PERMISSION`, `REDSHIFT_DATA_SHARE`, `S3_DATA_ACCESS`, `S3_SNAPSHOT`. * `description` - (Required) A description for the data set. * `name` - (Required) The name of the data set. @@ -75,4 +76,4 @@ Using `terraform import`, import DataExchange DataSets using their `id`. For exa % terraform import aws_dataexchange_data_set.example 4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dataexchange_event_action.html.markdown b/website/docs/cdktf/python/r/dataexchange_event_action.html.markdown index b5795037af97..9de612dd2709 100644 --- a/website/docs/cdktf/python/r/dataexchange_event_action.html.markdown +++ b/website/docs/cdktf/python/r/dataexchange_event_action.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `action` - (Required) Describes the action to take. Described in [`action` Configuration Block](#action-configuration-block) below. * `event` - (Required) Describes the event that triggers the `action`. @@ -130,4 +131,4 @@ Using `terraform import`, import Data Exchange Event Action using the id. For ex % terraform import aws_dataexchange_event_action.example example-event-action-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dataexchange_revision.html.markdown b/website/docs/cdktf/python/r/dataexchange_revision.html.markdown index f18e1bdafab5..20867253b425 100644 --- a/website/docs/cdktf/python/r/dataexchange_revision.html.markdown +++ b/website/docs/cdktf/python/r/dataexchange_revision.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data_set_id` - (Required) The dataset id. * `comment` - (Required) An optional comment about the revision. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -73,4 +74,4 @@ Using `terraform import`, import DataExchange Revisions using their `data-set-id % terraform import aws_dataexchange_revision.example 4fa784c7-ccb4-4dbf-ba4f-02198320daa1:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dataexchange_revision_assets.html.markdown b/website/docs/cdktf/python/r/dataexchange_revision_assets.html.markdown index ecbea09b9165..cc57a8d6ab36 100644 --- a/website/docs/cdktf/python/r/dataexchange_revision_assets.html.markdown +++ b/website/docs/cdktf/python/r/dataexchange_revision_assets.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `comment` - (Optional) A comment for the revision. Maximum length is 16,348 characters. * `finalize` - (Optional) Finalized a revision. Defaults to `false`. * `force_destoy` - (Optional) Force destroy the revision. Defaults to `false`. @@ -103,4 +104,4 @@ Configuration options: * `create` - (Default 30m) Time to create the revision. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datapipeline_pipeline.html.markdown b/website/docs/cdktf/python/r/datapipeline_pipeline.html.markdown index 0bcf17d2973c..6126d5bc2f8b 100644 --- a/website/docs/cdktf/python/r/datapipeline_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/datapipeline_pipeline.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of Pipeline. * `description` - (Optional) The description of Pipeline. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -71,4 +72,4 @@ Using `terraform import`, import `aws_datapipeline_pipeline` using the id (Pipel % terraform import aws_datapipeline_pipeline.default df-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datapipeline_pipeline_definition.html.markdown b/website/docs/cdktf/python/r/datapipeline_pipeline_definition.html.markdown index e4fb43d9f0e7..1ff5a0e0b94a 100644 --- a/website/docs/cdktf/python/r/datapipeline_pipeline_definition.html.markdown +++ b/website/docs/cdktf/python/r/datapipeline_pipeline_definition.html.markdown @@ -88,6 +88,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `parameter_object` - (Optional) Configuration block for the parameter objects used in the pipeline definition. See below * `parameter_value` - (Optional) Configuration block for the parameter values used in the pipeline definition. See below @@ -149,4 +150,4 @@ Using `terraform import`, import `aws_datapipeline_pipeline_definition` using th % terraform import aws_datapipeline_pipeline_definition.example df-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_agent.html.markdown b/website/docs/cdktf/python/r/datasync_agent.html.markdown index 055cd4781c3d..b4ceaf0d2153 100644 --- a/website/docs/cdktf/python/r/datasync_agent.html.markdown +++ b/website/docs/cdktf/python/r/datasync_agent.html.markdown @@ -54,7 +54,7 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcEndpoint(self, "example", security_group_ids=[Token.as_string(aws_security_group_example.id)], - service_name="com.amazonaws.${" + current.name + "}.datasync", + service_name="com.amazonaws.${" + current.region + "}.datasync", subnet_ids=[Token.as_string(aws_subnet_example.id)], vpc_endpoint_type="Interface", vpc_id=Token.as_string(aws_vpc_example.id) @@ -81,6 +81,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the DataSync Agent. * `activation_key` - (Optional) DataSync Agent activation key during resource creation. Conflicts with `ip_address`. If an `ip_address` is provided instead, Terraform will retrieve the `activation_key` as part of the resource creation. * `ip_address` - (Optional) DataSync Agent IP address to retrieve activation key during resource creation. Conflicts with `activation_key`. DataSync Agent must be accessible on port 80 from where Terraform is running. @@ -106,6 +107,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_agent.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:agent/agent-12345678901234567" + } +} + +resource "aws_datasync_agent" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync agent. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_agent` using the DataSync Agent Amazon Resource Name (ARN). For example: ```python @@ -129,4 +151,4 @@ Using `terraform import`, import `aws_datasync_agent` using the DataSync Agent A % terraform import aws_datasync_agent.example arn:aws:datasync:us-east-1:123456789012:agent/agent-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_azure_blob.html.markdown b/website/docs/cdktf/python/r/datasync_location_azure_blob.html.markdown index 687341b299e3..628dfbfc5298 100644 --- a/website/docs/cdktf/python/r/datasync_location_azure_blob.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_azure_blob.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_tier` - (Optional) The access tier that you want your objects or files transferred into. Valid values: `HOT`, `COOL` and `ARCHIVE`. Default: `HOT`. * `agent_arns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. * `authentication_type` - (Required) The authentication method DataSync uses to access your Azure Blob Storage. Valid values: `SAS`. @@ -64,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_azure_blob.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_azure_blob" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync Azure Blob location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_azure_blob` using the Amazon Resource Name (ARN). For example: ```python @@ -87,4 +109,4 @@ Using `terraform import`, import `aws_datasync_location_azure_blob` using the Am % terraform import aws_datasync_location_azure_blob.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_efs.html.markdown b/website/docs/cdktf/python/r/datasync_location_efs.html.markdown index 59bcbb614203..2202774be80c 100644 --- a/website/docs/cdktf/python/r/datasync_location_efs.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_efs.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_point_arn` - (Optional) Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system. * `ec2_config` - (Required) Configuration block containing EC2 configurations for connecting to the EFS File System. * `efs_file_system_arn` - (Required) Amazon Resource Name (ARN) of EFS File System. @@ -66,6 +67,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_efs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_efs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync EFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_efs` using the DataSync Task Amazon Resource Name (ARN). For example: ```python @@ -89,4 +111,4 @@ Using `terraform import`, import `aws_datasync_location_efs` using the DataSync % terraform import aws_datasync_location_efs.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_fsx_lustre_file_system.html.markdown b/website/docs/cdktf/python/r/datasync_location_fsx_lustre_file_system.html.markdown index 4519d5882515..94bd41cee6d6 100644 --- a/website/docs/cdktf/python/r/datasync_location_fsx_lustre_file_system.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_fsx_lustre_file_system.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fsx_filesystem_arn` - (Required) The Amazon Resource Name (ARN) for the FSx for Lustre file system. * `security_group_arns` - (Optional) The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Lustre file system. * `subdirectory` - (Optional) Subdirectory to perform actions as source or destination. @@ -76,4 +77,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_lustre_file_system` % terraform import aws_datasync_location_fsx_lustre_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:476956259333:file-system/fs-08e04cd442c1bb94a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_fsx_ontap_file_system.html.markdown b/website/docs/cdktf/python/r/datasync_location_fsx_ontap_file_system.html.markdown index 29ab4e0c5925..bc5c868ce336 100644 --- a/website/docs/cdktf/python/r/datasync_location_fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_fsx_ontap_file_system.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subdirectory` - (Optional) Path to the file share in the SVM where you'll copy your data. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares) (e.g. `/vol1`, `/vol1/tree1`, `share1`). * `tags` - (Optional) Key-value pairs of resource tags to assign to the DataSync Location. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -114,4 +115,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_ontap_file_system` u % terraform import aws_datasync_location_fsx_ontap_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:123456789012:storage-virtual-machine/svm-12345678abcdef123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_fsx_openzfs_file_system.html.markdown b/website/docs/cdktf/python/r/datasync_location_fsx_openzfs_file_system.html.markdown index ef2e7d4a3c02..c4ce5816d212 100644 --- a/website/docs/cdktf/python/r/datasync_location_fsx_openzfs_file_system.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_fsx_openzfs_file_system.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fsx_filesystem_arn` - (Required) The Amazon Resource Name (ARN) for the FSx for OpenZfs file system. * `protocol` - (Required) The type of protocol that DataSync uses to access your file system. See below. * `security_group_arns` - (Optional) The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for openzfs file system. @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_openzfs_file_system` % terraform import aws_datasync_location_fsx_openzfs_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:123456789012:file-system/fs-08e04cd442c1bb94a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_fsx_windows_file_system.html.markdown b/website/docs/cdktf/python/r/datasync_location_fsx_windows_file_system.html.markdown index 0a1fc9e16979..c7e07577c98d 100644 --- a/website/docs/cdktf/python/r/datasync_location_fsx_windows_file_system.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_fsx_windows_file_system.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fsx_filesystem_arn` - (Required) The Amazon Resource Name (ARN) for the FSx for Windows file system. * `password` - (Required) The password of the user who has the permissions to access files and folders in the FSx for Windows file system. * `user` - (Required) The user who has the permissions to access files and folders in the FSx for Windows file system. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_windows_file_system` % terraform import aws_datasync_location_fsx_windows_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:476956259333:file-system/fs-08e04cd442c1bb94a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_hdfs.html.markdown b/website/docs/cdktf/python/r/datasync_location_hdfs.html.markdown index 5f0c53e1cab3..7a445d9cf6b3 100644 --- a/website/docs/cdktf/python/r/datasync_location_hdfs.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_hdfs.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agent_arns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. * `authentication_type` - (Required) The type of authentication used to determine the identity of the user. Valid values are `SIMPLE` and `KERBEROS`. * `block_size` - (Optional) The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB). @@ -107,6 +108,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_hdfs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_hdfs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync HDFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_hdfs` using the Amazon Resource Name (ARN). For example: ```python @@ -130,4 +152,4 @@ Using `terraform import`, import `aws_datasync_location_hdfs` using the Amazon R % terraform import aws_datasync_location_hdfs.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_nfs.html.markdown b/website/docs/cdktf/python/r/datasync_location_nfs.html.markdown index e0988a3c7e3c..6af32dda15dd 100644 --- a/website/docs/cdktf/python/r/datasync_location_nfs.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_nfs.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mount_options` - (Optional) Configuration block containing mount options used by DataSync to access the NFS Server. * `on_prem_config` - (Required) Configuration block containing information for connecting to the NFS File System. * `server_hostname` - (Required) Specifies the IP address or DNS name of the NFS server. The DataSync Agent(s) use this to mount the NFS server. @@ -69,6 +70,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_nfs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_nfs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync NFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_nfs` using the DataSync Task Amazon Resource Name (ARN). For example: ```python @@ -92,4 +114,4 @@ Using `terraform import`, import `aws_datasync_location_nfs` using the DataSync % terraform import aws_datasync_location_nfs.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_object_storage.html.markdown b/website/docs/cdktf/python/r/datasync_location_object_storage.html.markdown index 1ab82e9151ac..8ebda8fe8318 100644 --- a/website/docs/cdktf/python/r/datasync_location_object_storage.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_object_storage.html.markdown @@ -39,7 +39,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `agent_arns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `agent_arns` - (Optional) A list of DataSync Agent ARNs with which this location will be associated. For agentless cross-cloud transfers, this parameter does not need to be specified. * `access_key` - (Optional) The access key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `access_key` and `secret_key` to provide the user name and password, respectively. * `bucket_name` - (Required) The bucket on the self-managed object storage server that is used to read data from. * `secret_key` - (Optional) The secret key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `access_key` and `secret_key` to provide the user name and password, respectively. @@ -60,6 +61,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_object_storage.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_object_storage" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync object storage location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_object_storage` using the Amazon Resource Name (ARN). For example: ```python @@ -83,4 +105,4 @@ Using `terraform import`, import `aws_datasync_location_object_storage` using th % terraform import aws_datasync_location_object_storage.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_s3.html.markdown b/website/docs/cdktf/python/r/datasync_location_s3.html.markdown index 6878734c163c..d61ae868c97b 100644 --- a/website/docs/cdktf/python/r/datasync_location_s3.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_s3.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agent_arns` - (Optional) (Amazon S3 on Outposts only) Amazon Resource Name (ARN) of the DataSync agent on the Outpost. * `s3_bucket_arn` - (Required) Amazon Resource Name (ARN) of the S3 bucket, or the Amazon S3 access point if the S3 bucket is located on an AWS Outposts resource. * `s3_config` - (Required) Configuration block containing information for connecting to S3. @@ -89,6 +90,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_s3.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_s3" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync S3 location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_s3` using the DataSync Task Amazon Resource Name (ARN). For example: ```python @@ -112,4 +134,4 @@ Using `terraform import`, import `aws_datasync_location_s3` using the DataSync T % terraform import aws_datasync_location_s3.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_location_smb.html.markdown b/website/docs/cdktf/python/r/datasync_location_smb.html.markdown index 1dac0cce2ae3..3baf2e2e7ab7 100644 --- a/website/docs/cdktf/python/r/datasync_location_smb.html.markdown +++ b/website/docs/cdktf/python/r/datasync_location_smb.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agent_arns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. * `domain` - (Optional) The name of the Windows domain the SMB server belongs to. * `mount_options` - (Optional) Configuration block containing mount options used by DataSync to access the SMB Server. Can be `AUTOMATIC`, `SMB2`, or `SMB3`. @@ -65,6 +66,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_smb.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_smb" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync SMB location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_smb` using the Amazon Resource Name (ARN). For example: ```python @@ -88,4 +110,4 @@ Using `terraform import`, import `aws_datasync_location_smb` using the Amazon Re % terraform import aws_datasync_location_smb.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datasync_task.html.markdown b/website/docs/cdktf/python/r/datasync_task.html.markdown index a5837f9473ae..d96c7277679f 100644 --- a/website/docs/cdktf/python/r/datasync_task.html.markdown +++ b/website/docs/cdktf/python/r/datasync_task.html.markdown @@ -121,6 +121,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination_location_arn` - (Required) Amazon Resource Name (ARN) of destination DataSync Location. * `source_location_arn` - (Required) Amazon Resource Name (ARN) of source DataSync Location. * `cloudwatch_log_group_arn` - (Optional) Amazon Resource Name (ARN) of the CloudWatch Log Group that is used to monitor and log events in the sync task. @@ -216,6 +217,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_task.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:task/task-12345678901234567" + } +} + +resource "aws_datasync_task" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync task. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_task` using the DataSync Task Amazon Resource Name (ARN). For example: ```python @@ -239,4 +261,4 @@ Using `terraform import`, import `aws_datasync_task` using the DataSync Task Ama % terraform import aws_datasync_task.example arn:aws:datasync:us-east-1:123456789012:task/task-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_asset_type.html.markdown b/website/docs/cdktf/python/r/datazone_asset_type.html.markdown index ec6ee8f7a752..0e28a7642f5f 100644 --- a/website/docs/cdktf/python/r/datazone_asset_type.html.markdown +++ b/website/docs/cdktf/python/r/datazone_asset_type.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the custom asset type. * `forms_input` - (Optional) The metadata forms that are to be attached to the custom asset type. @@ -88,4 +89,4 @@ Using `terraform import`, import DataZone Asset Type using the `domain_identifie % terraform import aws_datazone_asset_type.example domain-id-12345678,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_domain.html.markdown b/website/docs/cdktf/python/r/datazone_domain.html.markdown index 1d9eecbde6dc..c81ca6cea5ed 100644 --- a/website/docs/cdktf/python/r/datazone_domain.html.markdown +++ b/website/docs/cdktf/python/r/datazone_domain.html.markdown @@ -26,6 +26,7 @@ from cdktf import Fn, Token, TerraformStack # from imports.aws.datazone_domain import DatazoneDomain from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy import IamRolePolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -48,28 +49,122 @@ class MyConvertedCode(TerraformStack): ], "Version": "2012-10-17" })), - inline_policy=[IamRoleInlinePolicy( - name="domain_execution_policy", - policy=Token.as_string( - Fn.jsonencode({ - "Statement": [{ - "Action": ["datazone:*", "ram:*", "sso:*", "kms:*"], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - })) - ) - ], name="my_domain_execution_role" ) + aws_iam_role_policy_domain_execution_role = IamRolePolicy(self, "domain_execution_role_1", + policy=Token.as_string( + Fn.jsonencode({ + "Statement": [{ + "Action": ["datazone:*", "ram:*", "sso:*", "kms:*"], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + })), + role=domain_execution_role.name + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_domain_execution_role.override_logical_id("domain_execution_role") DatazoneDomain(self, "example", domain_execution_role=domain_execution_role.arn, name="example" ) ``` +### V2 Domain + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_iam_policy import DataAwsIamPolicy +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.datazone_domain import DatazoneDomain +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + current = DataAwsCallerIdentity(self, "current") + domain_execution_role = DataAwsIamPolicy(self, "domain_execution_role", + name="SageMakerStudioDomainExecutionRolePolicy" + ) + domain_service_role = DataAwsIamPolicy(self, "domain_service_role", + name="SageMakerStudioDomainServiceRolePolicy" + ) + assume_role_domain_execution = DataAwsIamPolicyDocument(self, "assume_role_domain_execution", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole", "sts:TagSession", "sts:SetContext"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=[Token.as_string(current.account_id)], + variable="aws:SourceAccount" + ), DataAwsIamPolicyDocumentStatementCondition( + test="ForAllValues:StringLike", + values=["datazone*"], + variable="aws:TagKeys" + ) + ], + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["datazone.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + assume_role_domain_service = DataAwsIamPolicyDocument(self, "assume_role_domain_service", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=[Token.as_string(current.account_id)], + variable="aws:SourceAccount" + ) + ], + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["datazone.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + domain_execution = IamRole(self, "domain_execution", + assume_role_policy=Token.as_string(assume_role_domain_execution.json), + name="example-domain-execution-role" + ) + domain_service = IamRole(self, "domain_service", + assume_role_policy=Token.as_string(assume_role_domain_service.json), + name="example-domain-service-role" + ) + aws_iam_role_policy_attachment_domain_execution = + IamRolePolicyAttachment(self, "domain_execution_7", + policy_arn=Token.as_string(domain_execution_role.arn), + role=domain_execution.name + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_attachment_domain_execution.override_logical_id("domain_execution") + aws_iam_role_policy_attachment_domain_service = IamRolePolicyAttachment(self, "domain_service_8", + policy_arn=Token.as_string(domain_service_role.arn), + role=domain_service.name + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_attachment_domain_service.override_logical_id("domain_service") + DatazoneDomain(self, "example", + domain_execution_role=domain_execution.arn, + domain_version="V2", + name="example-domain", + service_role=domain_service.arn + ) +``` + ## Argument Reference The following arguments are required: @@ -79,8 +174,11 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Domain. +* `domain_version` - (Optional) Version of the Domain. Valid values are `V1` and `V2`. Defaults to `V1`. * `kms_key_identifier` - (Optional) ARN of the KMS key used to encrypt the Amazon DataZone domain, metadata and reporting data. +* `service_role` - (Optional) ARN of the service role used by DataZone. Required when `domain_version` is set to `V2`. * `single_sign_on` - (Optional) Single sign on options, used to [enable AWS IAM Identity Center](https://docs.aws.amazon.com/datazone/latest/userguide/enable-IAM-identity-center-for-datazone.html) for DataZone. * `skip_deletion_check` - (Optional) Whether to skip the deletion check for the Domain. @@ -125,4 +223,4 @@ Using `terraform import`, import DataZone Domain using the `domain_id`. For exam % terraform import aws_datazone_domain.example domain-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_environment.html.markdown b/website/docs/cdktf/python/r/datazone_environment.html.markdown index 6b84a2e864cd..818666bdd011 100644 --- a/website/docs/cdktf/python/r/datazone_environment.html.markdown +++ b/website/docs/cdktf/python/r/datazone_environment.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_identifier` - (Optional) The ID of the Amazon Web Services account where the environment exists * `account_region` - (Optional) The Amazon Web Services region where the environment exists. * `blueprint_identifier` - (Optional) The blueprint with which the environment is created. @@ -117,4 +118,4 @@ Using `terraform import`, import DataZone Environment using the `domain_idntifie % terraform import aws_datazone_environment.example dzd_d2i7tzk3tnjjf4,5vpywijpwryec0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_environment_blueprint_configuration.html.markdown b/website/docs/cdktf/python/r/datazone_environment_blueprint_configuration.html.markdown index 69700053c4ef..eb926c58e94a 100644 --- a/website/docs/cdktf/python/r/datazone_environment_blueprint_configuration.html.markdown +++ b/website/docs/cdktf/python/r/datazone_environment_blueprint_configuration.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `manage_access_role_arn` - (Optional) ARN of the manage access role with which this blueprint is created. * `provisioning_role_arn` - (Optional) ARN of the provisioning role with which this blueprint is created. * `regional_parameters` - (Optional) Parameters for each region in which the blueprint is enabled @@ -97,4 +98,4 @@ Using `terraform import`, import DataZone Environment Blueprint Configuration us % terraform import aws_datazone_environment_blueprint_configuration.example domain-id-12345/environment-blueprint-id-54321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_environment_profile.html.markdown b/website/docs/cdktf/python/r/datazone_environment_profile.html.markdown index d7481f67caba..e7dd6c874612 100644 --- a/website/docs/cdktf/python/r/datazone_environment_profile.html.markdown +++ b/website/docs/cdktf/python/r/datazone_environment_profile.html.markdown @@ -134,6 +134,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `aws_account_id` - (Required) - Id of the AWS account being used. * `aws_account_region` - (Required) - Desired region for environment profile. * `domain_identifier` - (Required) - Domain Identifier for environment profile. @@ -179,4 +180,4 @@ Using `terraform import`, import DataZone Environment Profile using a comma-deli % terraform import aws_datazone_environment_profile.example environment_profile-id-12345678,domain-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_form_type.html.markdown b/website/docs/cdktf/python/r/datazone_form_type.html.markdown index c95372861f68..e99ecde138c7 100644 --- a/website/docs/cdktf/python/r/datazone_form_type.html.markdown +++ b/website/docs/cdktf/python/r/datazone_form_type.html.markdown @@ -112,6 +112,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of form type. Must have a length of between 1 and 2048 characters. * `status` - (Optional) Status of form type. Must be "ENABLED" or "DISABLED" If status is set to "ENABLED" terraform cannot delete the resource until it is manually changed in the AWS console. @@ -151,4 +152,4 @@ Using `terraform import`, import DataZone Form Type using a comma separated valu % terraform import aws_datazone_form_type.example domain_identifier,name,revision ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_glossary.html.markdown b/website/docs/cdktf/python/r/datazone_glossary.html.markdown index 5bca6c5042b1..29c0ac25dc95 100644 --- a/website/docs/cdktf/python/r/datazone_glossary.html.markdown +++ b/website/docs/cdktf/python/r/datazone_glossary.html.markdown @@ -125,6 +125,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the glossary. Must have a length between 0 and 4096. * `status` - (Optional) Status of business glossary. Valid values are DISABLED and ENABLED. @@ -136,7 +137,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Glossary using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Glossary using a comma-delimited string combining the domain id, glossary id, and the id of the project it's under. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -159,4 +160,4 @@ Using `terraform import`, import DataZone Glossary using the import Datazone Glo % terraform import aws_datazone_glossary.example domain-id,glossary-id,owning-project-identifier ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_glossary_term.html.markdown b/website/docs/cdktf/python/r/datazone_glossary_term.html.markdown index e1b311240a0f..68c4edafcaf1 100644 --- a/website/docs/cdktf/python/r/datazone_glossary_term.html.markdown +++ b/website/docs/cdktf/python/r/datazone_glossary_term.html.markdown @@ -113,6 +113,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `long_description` - (Optional) Long description of entry. * `short_description` - (Optional) Short description of entry. * `status` - (Optional) If glossary term is ENABLED or DISABLED. @@ -159,4 +160,4 @@ Using `terraform import`, import DataZone Glossary Term using a comma-delimited % terraform import aws_datazone_glossary_term.example domain-id,glossary-term-id,glossary-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_project.html.markdown b/website/docs/cdktf/python/r/datazone_project.html.markdown index 652bfe19ad2a..feb13ba9dc09 100644 --- a/website/docs/cdktf/python/r/datazone_project.html.markdown +++ b/website/docs/cdktf/python/r/datazone_project.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `skip_deletion_check` - (Optional) Optional flag to delete all child entities within the project. * `description` - (Optional) Description of project. * `glossary_terms` - (Optional) List of glossary terms that can be used in the project. The list cannot be empty or include over 20 values. Each value must follow the regex of `[a-zA-Z0-9_-]{1,36}$`. @@ -115,4 +116,4 @@ Using `terraform import`, import DataZone Project using a colon-delimited string % terraform import aws_datazone_project.example domain-1234:project-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/datazone_user_profile.html.markdown b/website/docs/cdktf/python/r/datazone_user_profile.html.markdown index cc146e0d8c82..98058b354c4a 100644 --- a/website/docs/cdktf/python/r/datazone_user_profile.html.markdown +++ b/website/docs/cdktf/python/r/datazone_user_profile.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `status` - (Optional) The user profile status. * `user_type` - (Optional) The user type. @@ -87,4 +88,4 @@ Using `terraform import`, import DataZone User Profile using the `user_identifie % terraform import aws_datazone_user_profile.example arn:aws:iam::123456789012:user/example,dzd_54nakfrg9k6suo,IAM ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dax_cluster.html.markdown b/website/docs/cdktf/python/r/dax_cluster.html.markdown index 7e6896b16083..28a607bb5277 100644 --- a/website/docs/cdktf/python/r/dax_cluster.html.markdown +++ b/website/docs/cdktf/python/r/dax_cluster.html.markdown @@ -38,49 +38,37 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `cluster_endpoint_encryption_type` – (Optional) The type of encryption the +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `cluster_endpoint_encryption_type` - (Optional) The type of encryption the cluster's endpoint should support. Valid values are: `NONE` and `TLS`. Default value is `NONE`. - -* `cluster_name` – (Required) Group identifier. DAX converts this name to +* `cluster_name` - (Required) Group identifier. DAX converts this name to lowercase - * `iam_role_arn` - (Required) A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role's permissions to access DynamoDB on your behalf - -* `node_type` – (Required) The compute and memory capacity of the nodes. See +* `node_type` - (Required) The compute and memory capacity of the nodes. See [Nodes][1] for supported node types - -* `replication_factor` – (Required) The number of nodes in the DAX cluster. A +* `replication_factor` - (Required) The number of nodes in the DAX cluster. A replication factor of 1 will create a single-node cluster, without any read replicas - * `availability_zones` - (Optional) List of Availability Zones in which the nodes will be created - -* `description` – (Optional) Description for the cluster - -* `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an +* `description` - (Optional) Description for the cluster +* `notification_topic_arn` - (Optional) An Amazon Resource Name (ARN) of an SNS topic to send DAX notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` - -* `parameter_group_name` – (Optional) Name of the parameter group to associate +* `parameter_group_name` - (Optional) Name of the parameter group to associate with this DAX cluster - -* `maintenance_window` – (Optional) Specifies the weekly time range for when +* `maintenance_window` - (Optional) Specifies the weekly time range for when maintenance on the cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` - -* `security_group_ids` – (Optional) One or more VPC security groups associated +* `security_group_ids` - (Optional) One or more VPC security groups associated with the cluster - * `server_side_encryption` - (Optional) Encrypt at rest options - -* `subnet_group_name` – (Optional) Name of the subnet group to be used for the +* `subnet_group_name` - (Optional) Name of the subnet group to be used for the cluster - * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. The `server_side_encryption` object supports the following: @@ -141,4 +129,4 @@ Using `terraform import`, import DAX Clusters using the `cluster_name`. For exam [1]: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAX.concepts.cluster.html#DAX.concepts.nodes - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dax_parameter_group.html.markdown b/website/docs/cdktf/python/r/dax_parameter_group.html.markdown index be1b23d56038..13c40fbef4b4 100644 --- a/website/docs/cdktf/python/r/dax_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/dax_parameter_group.html.markdown @@ -43,11 +43,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` – (Required) The name of the parameter group. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name of the parameter group. * `description` - (Optional, ForceNew) A description of the parameter group. - -* `parameters` – (Optional) The parameters of the parameter group. +* `parameters` - (Optional) The parameters of the parameter group. ## parameters @@ -87,4 +86,4 @@ Using `terraform import`, import DAX Parameter Group using the `name`. For examp % terraform import aws_dax_parameter_group.example my_dax_pg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dax_subnet_group.html.markdown b/website/docs/cdktf/python/r/dax_subnet_group.html.markdown index c9269c3fa99d..6988f0bf6e59 100644 --- a/website/docs/cdktf/python/r/dax_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/dax_subnet_group.html.markdown @@ -36,16 +36,17 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` – (Required) The name of the subnet group. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name of the subnet group. * `description` - (Optional) A description of the subnet group. -* `subnet_ids` – (Required) A list of VPC subnet IDs for the subnet group. +* `subnet_ids` - (Required) A list of VPC subnet IDs for the subnet group. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - The name of the subnet group. -* `vpc_id` – VPC ID of the subnet group. +* `vpc_id` - VPC ID of the subnet group. ## Import @@ -72,4 +73,4 @@ Using `terraform import`, import DAX Subnet Group using the `name`. For example: % terraform import aws_dax_subnet_group.example my_dax_sg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_cluster_snapshot.html.markdown b/website/docs/cdktf/python/r/db_cluster_snapshot.html.markdown index c9e9555c5d15..a484b4459b58 100644 --- a/website/docs/cdktf/python/r/db_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/db_cluster_snapshot.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_cluster_identifier` - (Required) The DB Cluster Identifier from which to take the snapshot. * `db_cluster_snapshot_identifier` - (Required) The Identifier for the snapshot. * `shared_accounts` - (Optional) List of AWS Account IDs to share the snapshot with. Use `all` to make the snapshot public. @@ -91,4 +92,4 @@ Using `terraform import`, import `aws_db_cluster_snapshot` using the cluster sna % terraform import aws_db_cluster_snapshot.example my-cluster-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_event_subscription.html.markdown b/website/docs/cdktf/python/r/db_event_subscription.html.markdown index f386f765d0ec..e1903de9b918 100644 --- a/website/docs/cdktf/python/r/db_event_subscription.html.markdown +++ b/website/docs/cdktf/python/r/db_event_subscription.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the DB event subscription. By default generated by Terraform. * `name_prefix` - (Optional) The name of the DB event subscription. Conflicts with `name`. * `sns_topic` - (Required) The SNS topic to send events to. @@ -111,4 +112,4 @@ Using `terraform import`, import DB Event Subscriptions using the `name`. For ex % terraform import aws_db_event_subscription.default rds-event-sub ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_instance.html.markdown b/website/docs/cdktf/python/r/db_instance.html.markdown index 1ffdd2ca027e..a4e1a33d5509 100644 --- a/website/docs/cdktf/python/r/db_instance.html.markdown +++ b/website/docs/cdktf/python/r/db_instance.html.markdown @@ -29,7 +29,7 @@ See the AWS Docs on [RDS Instance Maintenance][instance-maintenance] for more in ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data instate](https://www.terraform.io/docs/state/sensitive-data.html). --> **Note:** Write-Only argument `password_wo` is available to use in place of `password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `password_wo` is available to use in place of `password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). > **Hands-on:** Try the [Manage AWS RDS Instances](https://learn.hashicorp.com/tutorials/terraform/aws-rds) tutorial on HashiCorp Learn. @@ -352,6 +352,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocated_storage` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) The allocated storage in gibibytes. If `max_allocated_storage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If `replicate_source_db` is set, the value is ignored during the creation of the instance. * `allow_major_version_upgrade` - (Optional) Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and @@ -381,7 +382,7 @@ Defaults to true. See [Oracle Character Sets Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html) or [Server-Level Collation for Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.SQLServer.CommonDBATasks.Collation.html) for more information. Cannot be set with `replicate_source_db`, `restore_to_point_in_time`, `s3_import`, or `snapshot_identifier`. -* `copy_tags_to_snapshot` – (Optional, boolean) Copy all Instance `tags` to snapshots. Default is `false`. +* `copy_tags_to_snapshot` - (Optional, boolean) Copy all Instance `tags` to snapshots. Default is `false`. * `custom_iam_instance_profile` - (Optional) The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. * `database_insights_mode` - (Optional) The mode of Database Insights that is enabled for the instance. Valid values: `standard`, `advanced` . * `db_name` - (Optional) The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-instance.html) for more details on what applies for those engines. If you are providing an Oracle db name, it needs to be in all upper case. Cannot be specified for a replica. @@ -656,4 +657,4 @@ Using `terraform import`, import DB Instances using the `identifier`. For exampl % terraform import aws_db_instance.default mydb-rds-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_instance_automated_backups_replication.html.markdown b/website/docs/cdktf/python/r/db_instance_automated_backups_replication.html.markdown index 6e6a762551b9..b35ddf537c6a 100644 --- a/website/docs/cdktf/python/r/db_instance_automated_backups_replication.html.markdown +++ b/website/docs/cdktf/python/r/db_instance_automated_backups_replication.html.markdown @@ -113,6 +113,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kms_key_id` - (Optional, Forces new resource) The AWS KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination AWS Region, for example, `arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE`. * `pre_signed_url` - (Optional, Forces new resource) A URL that contains a [Signature Version 4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) signed request for the [`StartDBInstanceAutomatedBackupsReplication`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartDBInstanceAutomatedBackupsReplication.html) action to be called in the AWS Region of the source DB instance. * `retention_period` - (Optional, Forces new resource) The retention period for the replicated automated backups, defaults to `7`. @@ -156,4 +157,4 @@ Using `terraform import`, import RDS instance automated backups replication usin % terraform import aws_db_instance_automated_backups_replication.default arn:aws:rds:us-east-1:123456789012:auto-backup:ab-faaa2mgdj1vmp4xflr7yhsrmtbtob7ltrzzz2my ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_instance_role_association.html.markdown b/website/docs/cdktf/python/r/db_instance_role_association.html.markdown index b2443b2cea7f..3d7caf438efe 100644 --- a/website/docs/cdktf/python/r/db_instance_role_association.html.markdown +++ b/website/docs/cdktf/python/r/db_instance_role_association.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_instance_identifier` - (Required) DB Instance Identifier to associate with the IAM Role. * `feature_name` - (Required) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). * `role_arn` - (Required) Amazon Resource Name (ARN) of the IAM Role to associate with the DB Instance. @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_db_instance_role_association` using the DB % terraform import aws_db_instance_role_association.example my-db-instance,arn:aws:iam::123456789012:role/my-role ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_option_group.html.markdown b/website/docs/cdktf/python/r/db_option_group.html.markdown index 7e4da681bd47..cba219749d21 100644 --- a/website/docs/cdktf/python/r/db_option_group.html.markdown +++ b/website/docs/cdktf/python/r/db_option_group.html.markdown @@ -71,6 +71,7 @@ More information about this can be found [here](https://docs.aws.amazon.com/Amaz This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the option group. If omitted, Terraform will assign a random, unique name. Must be lowercase, to match as it is stored in AWS. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. Must be lowercase, to match as it is stored in AWS. * `option_group_description` - (Optional) Description of the option group. Defaults to "Managed by Terraform". @@ -137,4 +138,4 @@ Using `terraform import`, import DB option groups using the `name`. For example: % terraform import aws_db_option_group.example mysql-option-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_parameter_group.html.markdown b/website/docs/cdktf/python/r/db_parameter_group.html.markdown index aa6fefca0f3c..b15de19f07d6 100644 --- a/website/docs/cdktf/python/r/db_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/db_parameter_group.html.markdown @@ -221,6 +221,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DB parameter group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required, Forces new resource) The family of the DB parameter group. @@ -272,4 +273,4 @@ Using `terraform import`, import DB Parameter groups using the `name`. For examp % terraform import aws_db_parameter_group.rds_pg rds-pg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_proxy.html.markdown b/website/docs/cdktf/python/r/db_proxy.html.markdown index 99838e224138..bd34fb1b1bca 100644 --- a/website/docs/cdktf/python/r/db_proxy.html.markdown +++ b/website/docs/cdktf/python/r/db_proxy.html.markdown @@ -143,9 +143,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. -* `auth` - (Required) Configuration block(s) with authorization mechanisms to connect to the associated instances or clusters. Described below. +* `auth` - (Optional) Configuration block(s) with authorization mechanisms to connect to the associated instances or clusters. Required when `default_auth_scheme` is `NONE` or unspecified. Described below. * `debug_logging` - (Optional) Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. +* `default_auth_scheme` - (Optional) Default authentication scheme that the proxy uses for client connections to the proxy and connections from the proxy to the underlying database. Valid values are `NONE` and `IAM_AUTH`. Defaults to `NONE`. * `engine_family` - (Required, Forces new resource) The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL`. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL`. For RDS for Microsoft SQL Server, specify `SQLSERVER`. Valid values are `MYSQL`, `POSTGRESQL`, and `SQLSERVER`. * `idle_client_timeout` - (Optional) The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database. * `require_tls` - (Optional) A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy. @@ -205,4 +207,4 @@ Using `terraform import`, import DB proxies using the `name`. For example: % terraform import aws_db_proxy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown b/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown index 67a68bf8e014..2b85fd030c4a 100644 --- a/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown +++ b/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_proxy_name` - (Required) Name of the RDS DB Proxy. * `connection_pool_config` - (Optional) The settings that determine the size and behavior of the connection pool for the target group. @@ -119,4 +120,4 @@ Using `terraform import`, import DB proxy default target groups using the `db_pr % terraform import aws_db_proxy_default_target_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_proxy_endpoint.html.markdown b/website/docs/cdktf/python/r/db_proxy_endpoint.html.markdown index 98c72343c39c..47926b4f6c35 100644 --- a/website/docs/cdktf/python/r/db_proxy_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/db_proxy_endpoint.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_proxy_endpoint_name` - (Required) The identifier for the proxy endpoint. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. * `db_proxy_name` - (Required) The name of the DB proxy associated with the DB proxy endpoint that you create. * `vpc_subnet_ids` - (Required) One or more VPC subnet IDs to associate with the new proxy. @@ -88,4 +89,4 @@ Using `terraform import`, import DB proxy endpoints using the `DB-PROXY-NAME/DB- % terraform import aws_db_proxy_endpoint.example example/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_proxy_target.html.markdown b/website/docs/cdktf/python/r/db_proxy_target.html.markdown index 197e5c608d40..b93dbfe6b553 100644 --- a/website/docs/cdktf/python/r/db_proxy_target.html.markdown +++ b/website/docs/cdktf/python/r/db_proxy_target.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_proxy_name` - (Required, Forces new resource) The name of the DB proxy. * `target_group_name` - (Required, Forces new resource) The name of the target group. * `db_instance_identifier` - (Optional, Forces new resource) DB instance identifier. @@ -145,4 +146,4 @@ Provisioned Clusters: % terraform import aws_db_proxy_target.example example-proxy/default/TRACKED_CLUSTER/example-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_snapshot.html.markdown b/website/docs/cdktf/python/r/db_snapshot.html.markdown index 558f607da6a0..cde92eaf7916 100644 --- a/website/docs/cdktf/python/r/db_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/db_snapshot.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_instance_identifier` - (Required) The DB Instance Identifier from which to take the snapshot. * `db_snapshot_identifier` - (Required) The Identifier for the snapshot. * `shared_accounts` - (Optional) List of AWS Account IDs to share the snapshot with. Use `all` to make the snapshot public. @@ -106,4 +107,4 @@ Using `terraform import`, import `aws_db_snapshot` using the snapshot identifier % terraform import aws_db_snapshot.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_snapshot_copy.html.markdown b/website/docs/cdktf/python/r/db_snapshot_copy.html.markdown index a349241cf8fc..a65109977928 100644 --- a/website/docs/cdktf/python/r/db_snapshot_copy.html.markdown +++ b/website/docs/cdktf/python/r/db_snapshot_copy.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `copy_tags` - (Optional) Whether to copy existing tags. Defaults to `false`. * `destination_region` - (Optional) The Destination region to place snapshot copy. * `kms_key_id` - (Optional) KMS key ID. @@ -122,4 +123,4 @@ Using `terraform import`, import `aws_db_snapshot_copy` using the snapshot ident % terraform import aws_db_snapshot_copy.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_subnet_group.html.markdown b/website/docs/cdktf/python/r/db_subnet_group.html.markdown index b274dd14b96e..f974742a2dcf 100644 --- a/website/docs/cdktf/python/r/db_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/db_subnet_group.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DB subnet group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) The description of the DB subnet group. Defaults to "Managed by Terraform". @@ -82,4 +83,4 @@ Using `terraform import`, import DB Subnet groups using the `name`. For example: % terraform import aws_db_subnet_group.default production-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/default_network_acl.html.markdown b/website/docs/cdktf/python/r/default_network_acl.html.markdown index bacde57de404..e214fb794556 100644 --- a/website/docs/cdktf/python/r/default_network_acl.html.markdown +++ b/website/docs/cdktf/python/r/default_network_acl.html.markdown @@ -167,6 +167,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `egress` - (Optional) Configuration block for an egress rule. Detailed below. * `ingress` - (Optional) Configuration block for an ingress rule. Detailed below. * `subnet_ids` - (Optional) List of Subnet IDs to apply the ACL to. See the notes above on Managing Subnets in the Default Network ACL @@ -186,6 +187,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_block` - (Optional) The CIDR block to match. This must be a valid network mask. * `icmp_code` - (Optional) The ICMP type code to be used. Default 0. * `icmp_type` - (Optional) The ICMP type to be used. Default 0. @@ -230,4 +232,4 @@ Using `terraform import`, import Default Network ACLs using the `id`. For exampl % terraform import aws_default_network_acl.sample acl-7aaabd18 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/default_route_table.html.markdown b/website/docs/cdktf/python/r/default_route_table.html.markdown index 57f5f268fc4b..6fdd540df225 100644 --- a/website/docs/cdktf/python/r/default_route_table.html.markdown +++ b/website/docs/cdktf/python/r/default_route_table.html.markdown @@ -79,6 +79,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `propagating_vgws` - (Optional) List of virtual gateways for propagation. * `route` - (Optional) Configuration block of routes. Detailed below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). This means that omitting this argument is interpreted as ignoring any existing routes. To remove all managed routes an empty list should be specified. See the example above. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -151,4 +152,4 @@ Using `terraform import`, import Default VPC route tables using the `vpc_id`. Fo [tf-main-route-table-association]: /docs/providers/aws/r/main_route_table_association.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/default_security_group.html.markdown b/website/docs/cdktf/python/r/default_security_group.html.markdown index 12d83193f2f6..fd4023459f55 100644 --- a/website/docs/cdktf/python/r/default_security_group.html.markdown +++ b/website/docs/cdktf/python/r/default_security_group.html.markdown @@ -99,6 +99,7 @@ Removing this resource from your configuration will remove it from your statefil The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `egress` - (Optional, VPC only) Configuration block. Detailed below. * `ingress` - (Optional) Configuration block. Detailed below. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -158,4 +159,4 @@ Using `terraform import`, import Security Groups using the security group `id`. % terraform import aws_default_security_group.default_sg sg-903004f8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/detective_graph.html.markdown b/website/docs/cdktf/python/r/detective_graph.html.markdown index 476cbc013e62..0cd77e255e34 100644 --- a/website/docs/cdktf/python/r/detective_graph.html.markdown +++ b/website/docs/cdktf/python/r/detective_graph.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the instance. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -71,4 +72,4 @@ Using `terraform import`, import `aws_detective_graph` using the ARN. For exampl % terraform import aws_detective_graph.example arn:aws:detective:us-east-1:123456789101:graph:231684d34gh74g4bae1dbc7bd807d02d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/detective_invitation_accepter.html.markdown b/website/docs/cdktf/python/r/detective_invitation_accepter.html.markdown index e1cce64b5f50..c28468557a25 100644 --- a/website/docs/cdktf/python/r/detective_invitation_accepter.html.markdown +++ b/website/docs/cdktf/python/r/detective_invitation_accepter.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `graph_arn` - (Required) ARN of the behavior graph that the member account is accepting the invitation for. ## Attribute Reference @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_detective_invitation_accepter` using the g % terraform import aws_detective_invitation_accepter.example arn:aws:detective:us-east-1:123456789101:graph:231684d34gh74g4bae1dbc7bd807d02d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/detective_member.html.markdown b/website/docs/cdktf/python/r/detective_member.html.markdown index 514ba38dae61..a5f34f29b4dc 100644 --- a/website/docs/cdktf/python/r/detective_member.html.markdown +++ b/website/docs/cdktf/python/r/detective_member.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) AWS account ID for the account. * `email_address` - (Required) Email address for the account. * `graph_arn` - (Required) ARN of the behavior graph to invite the member accounts to contribute their data to. @@ -85,4 +86,4 @@ Using `terraform import`, import `aws_detective_member` using the ARN of the gra % terraform import aws_detective_member.example arn:aws:detective:us-east-1:123456789101:graph:231684d34gh74g4bae1dbc7bd807d02d/123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/detective_organization_admin_account.html.markdown b/website/docs/cdktf/python/r/detective_organization_admin_account.html.markdown index 307543f5f2e2..bf52d0ba6846 100644 --- a/website/docs/cdktf/python/r/detective_organization_admin_account.html.markdown +++ b/website/docs/cdktf/python/r/detective_organization_admin_account.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) AWS account identifier to designate as a delegated administrator for Detective. ## Attribute Reference @@ -71,4 +72,4 @@ Using `terraform import`, import `aws_detective_organization_admin_account` usin % terraform import aws_detective_organization_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown b/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown index ab313214c4c6..db649c49f53a 100644 --- a/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_enable` - (Required) When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s Detective delegated administrator and Detective is enabled in that AWS Region. * `graph_arn` - (Required) ARN of the behavior graph. @@ -79,4 +80,4 @@ Using `terraform import`, import `aws_detective_organization_admin_account` usin % terraform import aws_detective_organization_configuration.example arn:aws:detective:us-east-1:123456789012:graph:00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devicefarm_device_pool.html.markdown b/website/docs/cdktf/python/r/devicefarm_device_pool.html.markdown index 7fd353b32730..d6f4a692ca9f 100644 --- a/website/docs/cdktf/python/r/devicefarm_device_pool.html.markdown +++ b/website/docs/cdktf/python/r/devicefarm_device_pool.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Device Pool * `project_arn` - (Required) The ARN of the project for the device pool. * `rule` - (Required) The device pool's rules. See [Rule](#rule). @@ -64,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_device_pool.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:devicepool:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_devicefarm_device_pool" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm device pool. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Device Pools using their ARN. For example: ```python @@ -87,4 +109,4 @@ Using `terraform import`, import DeviceFarm Device Pools using their ARN. For ex % terraform import aws_devicefarm_device_pool.example arn:aws:devicefarm:us-west-2:123456789012:devicepool:4fa784c7-ccb4-4dbf-ba4f-02198320daa1/4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devicefarm_instance_profile.html.markdown b/website/docs/cdktf/python/r/devicefarm_instance_profile.html.markdown index 9d718474e407..96c6c6fd584b 100644 --- a/website/docs/cdktf/python/r/devicefarm_instance_profile.html.markdown +++ b/website/docs/cdktf/python/r/devicefarm_instance_profile.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the instance profile. * `exclude_app_packages_from_cleanup` - (Optional) An array of strings that specifies the list of app packages that should not be cleaned up from the device after a test run. * `name` - (Required) The name for the instance profile. @@ -53,6 +54,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_instance_profile.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:instanceprofile:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_instance_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm instance profile. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Instance Profiles using their ARN. For example: ```python @@ -76,4 +98,4 @@ Using `terraform import`, import DeviceFarm Instance Profiles using their ARN. F % terraform import aws_devicefarm_instance_profile.example arn:aws:devicefarm:us-west-2:123456789012:instanceprofile:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devicefarm_network_profile.html.markdown b/website/docs/cdktf/python/r/devicefarm_network_profile.html.markdown index b456495d8ede..6eeb9f48a95e 100644 --- a/website/docs/cdktf/python/r/devicefarm_network_profile.html.markdown +++ b/website/docs/cdktf/python/r/devicefarm_network_profile.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the network profile. * `downlink_bandwidth_bits` - (Optional) The data throughput rate in bits per second, as an integer from `0` to `104857600`. Default value is `104857600`. * `downlink_delay_ms` - (Optional) Delay time for all packets to destination in milliseconds as an integer from `0` to `2000`. @@ -67,6 +68,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_network_profile.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:networkprofile:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_network_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm network profile. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Network Profiles using their ARN. For example: ```python @@ -90,4 +112,4 @@ Using `terraform import`, import DeviceFarm Network Profiles using their ARN. Fo % terraform import aws_devicefarm_network_profile.example arn:aws:devicefarm:us-west-2:123456789012:networkprofile:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devicefarm_project.html.markdown b/website/docs/cdktf/python/r/devicefarm_project.html.markdown index 2fc9bb8a4609..51c1a7f1ea2d 100644 --- a/website/docs/cdktf/python/r/devicefarm_project.html.markdown +++ b/website/docs/cdktf/python/r/devicefarm_project.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the project * `default_job_timeout_minutes` - (Optional) Sets the execution timeout value (in minutes) for a project. All test runs in this project use the specified execution timeout value unless overridden when scheduling a run. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -55,6 +56,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_project.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:project:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Projects using their ARN. For example: ```python @@ -78,4 +100,4 @@ Using `terraform import`, import DeviceFarm Projects using their ARN. For exampl % terraform import aws_devicefarm_project.example arn:aws:devicefarm:us-west-2:123456789012:project:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devicefarm_test_grid_project.html.markdown b/website/docs/cdktf/python/r/devicefarm_test_grid_project.html.markdown index 35a74043003a..af5277a7461e 100644 --- a/website/docs/cdktf/python/r/devicefarm_test_grid_project.html.markdown +++ b/website/docs/cdktf/python/r/devicefarm_test_grid_project.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Selenium testing project. * `description` - (Optional) Human-readable description of the project. * `vpc_config` - (Required) The VPC security groups and subnets that are attached to a project. See [VPC Config](#vpc-config) below. @@ -63,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_test_grid_project.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_test_grid_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm test grid project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Test Grid Projects using their ARN. For example: ```python @@ -86,4 +108,4 @@ Using `terraform import`, import DeviceFarm Test Grid Projects using their ARN. % terraform import aws_devicefarm_test_grid_project.example arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devicefarm_upload.html.markdown b/website/docs/cdktf/python/r/devicefarm_upload.html.markdown index 3c6c03bbd53c..60a80feeb434 100644 --- a/website/docs/cdktf/python/r/devicefarm_upload.html.markdown +++ b/website/docs/cdktf/python/r/devicefarm_upload.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content_type` - (Optional) The upload's content type (for example, application/octet-stream). * `name` - (Required) The upload's file name. The name should not contain any forward slashes (/). If you are uploading an iOS app, the file name must end with the .ipa extension. If you are uploading an Android app, the file name must end with the .apk extension. For all others, the file name must end with the .zip file extension. * `project_arn` - (Required) The ARN of the project for the upload. @@ -61,6 +62,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_upload.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:upload:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_devicefarm_upload" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm upload. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Uploads using their ARN. For example: ```python @@ -84,4 +106,4 @@ Using `terraform import`, import DeviceFarm Uploads using their ARN. For example % terraform import aws_devicefarm_upload.example arn:aws:devicefarm:us-west-2:123456789012:upload:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devopsguru_event_sources_config.html.markdown b/website/docs/cdktf/python/r/devopsguru_event_sources_config.html.markdown index 47d31bc6012f..e867097397de 100644 --- a/website/docs/cdktf/python/r/devopsguru_event_sources_config.html.markdown +++ b/website/docs/cdktf/python/r/devopsguru_event_sources_config.html.markdown @@ -44,8 +44,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `event_sources` - (Required) Configuration information about the integration of DevOps Guru as the Consumer via EventBridge with another AWS Service. See [`event_sources`](#event_sources-argument-reference) below. ### `event_sources` Argument Reference @@ -64,7 +65,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Event Sources Config using the `id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Event Sources Config using the region. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -81,10 +82,10 @@ class MyConvertedCode(TerraformStack): DevopsguruEventSourcesConfig.generate_config_for_import(self, "example", "us-east-1") ``` -Using `terraform import`, import DevOps Guru Event Sources Config using the `id`. For example: +Using `terraform import`, import DevOps Guru Event Sources Config using the region. For example: ```console % terraform import aws_devopsguru_event_sources_config.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devopsguru_notification_channel.html.markdown b/website/docs/cdktf/python/r/devopsguru_notification_channel.html.markdown index e76fada9bb02..d06488dc44e1 100644 --- a/website/docs/cdktf/python/r/devopsguru_notification_channel.html.markdown +++ b/website/docs/cdktf/python/r/devopsguru_notification_channel.html.markdown @@ -70,6 +70,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filters` - (Optional) Filter configurations for the Amazon SNS notification topic. See the [`filters` argument reference](#filters-argument-reference) below. ### `sns` Argument Reference @@ -112,4 +113,4 @@ Using `terraform import`, import DevOps Guru Notification Channel using the `id` % terraform import aws_devopsguru_notification_channel.example id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devopsguru_resource_collection.html.markdown b/website/docs/cdktf/python/r/devopsguru_resource_collection.html.markdown index c49551bfcad7..4a1af5094b93 100644 --- a/website/docs/cdktf/python/r/devopsguru_resource_collection.html.markdown +++ b/website/docs/cdktf/python/r/devopsguru_resource_collection.html.markdown @@ -119,6 +119,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cloudformation` - (Optional) A collection of AWS CloudFormation stacks. See [`cloudformation`](#cloudformation-argument-reference) below for additional details. * `tags` - (Optional) AWS tags used to filter the resources in the resource collection. See [`tags`](#tags-argument-reference) below for additional details. @@ -162,4 +163,4 @@ Using `terraform import`, import DevOps Guru Resource Collection using the `id`. % terraform import aws_devopsguru_resource_collection.example AWS_CLOUD_FORMATION ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/devopsguru_service_integration.html.markdown b/website/docs/cdktf/python/r/devopsguru_service_integration.html.markdown index 7462665ce369..49c733d7992e 100644 --- a/website/docs/cdktf/python/r/devopsguru_service_integration.html.markdown +++ b/website/docs/cdktf/python/r/devopsguru_service_integration.html.markdown @@ -84,8 +84,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kms_server_side_encryption` - (Required) Information about whether DevOps Guru is configured to encrypt server-side data using KMS. See [`kms_server_side_encryption`](#kms_server_side_encryption-argument-reference) below. * `logs_anomaly_detection` - (Required) Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups. See [`logs_anomaly_detection`](#logs_anomaly_detection-argument-reference) below. * `ops_center` - (Required) Information about whether DevOps Guru is configured to create an OpsItem in AWS Systems Manager OpsCenter for each created insight. See [`ops_center`](#ops_center-argument-reference) below. @@ -112,7 +113,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Service Integration using the `id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Service Integration using the region. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -129,10 +130,10 @@ class MyConvertedCode(TerraformStack): DevopsguruServiceIntegration.generate_config_for_import(self, "example", "us-east-1") ``` -Using `terraform import`, import DevOps Guru Service Integration using the `id`. For example: +Using `terraform import`, import DevOps Guru Service Integration using the region. For example: ```console % terraform import aws_devopsguru_service_integration.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_conditional_forwarder.html.markdown b/website/docs/cdktf/python/r/directory_service_conditional_forwarder.html.markdown index fb24ffc1e21b..c2c8896ace3c 100644 --- a/website/docs/cdktf/python/r/directory_service_conditional_forwarder.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_conditional_forwarder.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_id` - (Required) ID of directory. * `dns_ips` - (Required) A list of forwarder IP addresses. * `remote_domain_name` - (Required) The fully qualified domain name of the remote domain for which forwarders will be used. @@ -70,4 +71,4 @@ Using `terraform import`, import conditional forwarders using the directory id a % terraform import aws_directory_service_conditional_forwarder.example d-1234567890:example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_directory.html.markdown b/website/docs/cdktf/python/r/directory_service_directory.html.markdown index 26c1cac9bb6d..e53a23c48113 100644 --- a/website/docs/cdktf/python/r/directory_service_directory.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_directory.html.markdown @@ -155,6 +155,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The fully qualified name for the directory, such as `corp.example.com` * `password` - (Required) The password for the directory administrator or connector user. * `size` - (Optional) (For `SimpleAD` and `ADConnector` types) The size of the directory (`Small` or `Large` are accepted values). `Large` by default. @@ -228,4 +229,4 @@ Using `terraform import`, import DirectoryService directories using the director % terraform import aws_directory_service_directory.sample d-926724cf57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_log_subscription.html.markdown b/website/docs/cdktf/python/r/directory_service_log_subscription.html.markdown index c275cc50a3d3..a803eff9f971 100644 --- a/website/docs/cdktf/python/r/directory_service_log_subscription.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_log_subscription.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_id` - (Required) ID of directory. * `log_group_name` - (Required) Name of the cloudwatch log group to which the logs should be published. The log group should be already created and the directory service principal should be provided with required permission to create stream and publish logs. Changing this value would delete the current subscription and create a new one. A directory can only have one log subscription at a time. @@ -98,4 +99,4 @@ Using `terraform import`, import Directory Service Log Subscriptions using the d % terraform import aws_directory_service_log_subscription.msad d-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_radius_settings.html.markdown b/website/docs/cdktf/python/r/directory_service_radius_settings.html.markdown index 7406207930ed..ddbec5fe9f1d 100644 --- a/website/docs/cdktf/python/r/directory_service_radius_settings.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_radius_settings.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_protocol` - (Optional) The protocol specified for your RADIUS endpoints. Valid values: `PAP`, `CHAP`, `MS-CHAPv1`, `MS-CHAPv2`. * `directory_id` - (Required) The identifier of the directory for which you want to manager RADIUS settings. * `display_label` - (Required) Display label. @@ -90,4 +91,4 @@ Using `terraform import`, import RADIUS settings using the directory ID. For exa % terraform import aws_directory_service_radius_settings.example d-926724cf57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_region.html.markdown b/website/docs/cdktf/python/r/directory_service_region.html.markdown index ee89a46666bd..52ff7413472b 100644 --- a/website/docs/cdktf/python/r/directory_service_region.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_region.html.markdown @@ -145,6 +145,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `desired_number_of_domain_controllers` - (Optional) The number of domain controllers desired in the replicated directory. Minimum value of `2`. * `directory_id` - (Required) The identifier of the directory to which you want to add Region replication. * `region_name` - (Required) The name of the Region where you want to add domain controllers for replication. @@ -195,4 +196,4 @@ Using `terraform import`, import Replicated Regions using directory ID,Region na % terraform import aws_directory_service_region.example d-9267651497,us-east-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_shared_directory.html.markdown b/website/docs/cdktf/python/r/directory_service_shared_directory.html.markdown index 6489e179bde3..05be75594d67 100644 --- a/website/docs/cdktf/python/r/directory_service_shared_directory.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_shared_directory.html.markdown @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `method` - (Optional) Method used when sharing a directory. Valid values are `ORGANIZATIONS` and `HANDSHAKE`. Default is `HANDSHAKE`. * `notes` - (Optional, Sensitive) Message sent by the directory owner to the directory consumer to help the directory consumer administrator determine whether to approve or reject the share invitation. @@ -104,4 +105,4 @@ Using `terraform import`, import Directory Service Shared Directories using the % terraform import aws_directory_service_shared_directory.example d-1234567890/d-9267633ece ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_shared_directory_accepter.html.markdown b/website/docs/cdktf/python/r/directory_service_shared_directory_accepter.html.markdown index f71a04a2a84e..b0fde64e2f6d 100644 --- a/website/docs/cdktf/python/r/directory_service_shared_directory_accepter.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_shared_directory_accepter.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `shared_directory_id` - (Required) Identifier of the directory that is stored in the directory consumer account that corresponds to the shared directory in the owner account. ## Attribute Reference @@ -93,4 +94,4 @@ Using `terraform import`, import Directory Service Shared Directories using the % terraform import aws_directory_service_shared_directory_accepter.example d-9267633ece ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/directory_service_trust.html.markdown b/website/docs/cdktf/python/r/directory_service_trust.html.markdown index 366829645c20..e8aa29be120e 100644 --- a/website/docs/cdktf/python/r/directory_service_trust.html.markdown +++ b/website/docs/cdktf/python/r/directory_service_trust.html.markdown @@ -114,6 +114,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `conditional_forwarder_ip_addrs` - (Optional) Set of IPv4 addresses for the DNS server associated with the remote Directory. Can contain between 1 and 4 values. * `delete_associated_conditional_forwarder` - (Optional) Whether to delete the conditional forwarder when deleting the Trust relationship. @@ -169,4 +170,4 @@ Using `terraform import`, import the Trust relationship using the directory ID a % terraform import aws_directory_service_trust.example d-926724cf57/directory.example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dlm_lifecycle_policy.html.markdown b/website/docs/cdktf/python/r/dlm_lifecycle_policy.html.markdown index 894080355157..cca8feecf54b 100644 --- a/website/docs/cdktf/python/r/dlm_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/python/r/dlm_lifecycle_policy.html.markdown @@ -96,6 +96,39 @@ class MyConvertedCode(TerraformStack): ) ``` +### Example Default Policy + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.dlm_lifecycle_policy import DlmLifecyclePolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DlmLifecyclePolicy(self, "example", + default_policy="VOLUME", + description="tf-acc-basic", + execution_role_arn=Token.as_string(aws_iam_role_example.arn), + policy_details=DlmLifecyclePolicyPolicyDetails( + create_interval=5, + exclusions=DlmLifecyclePolicyPolicyDetailsExclusions( + exclude_boot_volumes=False, + exclude_tags={ + "test": "exclude" + }, + exclude_volume_types=["gp2"] + ), + policy_language="SIMPLIFIED", + resource_type="VOLUME" + ) + ) +``` + ### Example Cross-Region Snapshot Copy Usage ```python @@ -231,12 +264,66 @@ class MyConvertedCode(TerraformStack): aws_iam_role_policy_attachment_example.override_logical_id("example") ``` +### Example Post/Pre Scripts + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy import DataAwsIamPolicy +from imports.aws.dlm_lifecycle_policy import DlmLifecyclePolicy +from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DlmLifecyclePolicy(self, "example", + description="tf-acc-basic", + execution_role_arn=Token.as_string(aws_iam_role_example.arn), + policy_details=DlmLifecyclePolicyPolicyDetails( + resource_types=["INSTANCE"], + schedule=[DlmLifecyclePolicyPolicyDetailsSchedule( + create_rule=DlmLifecyclePolicyPolicyDetailsScheduleCreateRule( + interval=12, + scripts=DlmLifecyclePolicyPolicyDetailsScheduleCreateRuleScripts( + execute_operation_on_script_failure=False, + execution_handler="AWS_VSS_BACKUP", + maximum_retry_count=2 + ) + ), + name="Windows VSS", + retain_rule=DlmLifecyclePolicyPolicyDetailsScheduleRetainRule( + count=10 + ) + ) + ], + target_tags={ + "tag1": "Windows" + } + ) + ) + aws_iam_role_policy_attachment_example = IamRolePolicyAttachment(self, "example_1", + policy_arn=Token.as_string(data_aws_iam_policy_example.arn), + role=test.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_attachment_example.override_logical_id("example") + DataAwsIamPolicy(self, "test", + name="AWSDataLifecycleManagerSSMFullAccess" + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required) A description for the DLM lifecycle policy. * `execution_role_arn` - (Required) The ARN of an IAM role that is able to be assumed by the DLM service. +* `default_policy` - (Required) Specify the type of default policy to create. valid values are `VOLUME` or `INSTANCE`. * `policy_details` - (Required) See the [`policy_details` configuration](#policy-details-arguments) block. Max of 1. * `state` - (Optional) Whether the lifecycle policy should be enabled or disabled. `ENABLED` or `DISABLED` are valid values. Defaults to `ENABLED`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -244,13 +331,20 @@ This resource supports the following arguments: #### Policy Details arguments * `action` - (Optional) The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the [`action` configuration](#action-arguments) block. +* `copy_tags` - (Optional, Default policies only) Indicates whether the policy should copy tags from the source resource to the snapshot or AMI. Default value is `false`. +* `create_interval` - (Optional, Default policies only) How often the policy should run and create snapshots or AMIs. valid values range from `1` to `7`. Default value is `1`. +* `exclusions` - (Optional, Default policies only) Specifies exclusion parameters for volumes or instances for which you do not want to create snapshots or AMIs. See the [`exclusions` configuration](#exclusions-arguments) block. +* `extend_deletion` - (Optional, Default policies only) snapshot or AMI retention behavior for the policy if the source volume or instance is deleted, or if the policy enters the error, disabled, or deleted state. Default value is `false`. +* `retain_interval` - (Optional, Default policies only) Specifies how long the policy should retain snapshots or AMIs before deleting them. valid values range from `2` to `14`. Default value is `7`. * `event_source` - (Optional) The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the [`event_source` configuration](#event-source-arguments) block. +* `resource_type` - (Optional, Default policies only) Type of default policy to create. Valid values are `VOLUME` and `INSTANCE`. * `resource_types` - (Optional) A list of resource types that should be targeted by the lifecycle policy. Valid values are `VOLUME` and `INSTANCE`. -* `resource_locations` - (Optional) The location of the resources to backup. If the source resources are located in an AWS Region, specify `CLOUD`. If the source resources are located on an Outpost in your account, specify `OUTPOST`. If you specify `OUTPOST`, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account. Valid values are `CLOUD` and `OUTPOST`. +* `resource_locations` - (Optional) The location of the resources to backup. If the source resources are located in an AWS Region, specify `CLOUD`. If the source resources are located on an Outpost in your account, specify `OUTPOST`. If the source resources are located in a Local Zone, specify `LOCAL_ZONE`. Valid values are `CLOUD`, `LOCAL_ZONE`, and `OUTPOST`. +* `policy_language` - (Optional) Type of policy to create. `SIMPLIFIED` To create a default policy. `STANDARD` To create a custom policy. * `policy_type` - (Optional) The valid target resource types and actions a policy can manage. Specify `EBS_SNAPSHOT_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify `IMAGE_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify `EVENT_BASED_POLICY` to create an event-based policy that performs specific actions when a defined event occurs in your AWS account. Default value is `EBS_SNAPSHOT_MANAGEMENT`. * `parameters` - (Optional) A set of optional parameters for snapshot and AMI lifecycle policies. See the [`parameters` configuration](#parameters-arguments) block. * `schedule` - (Optional) See the [`schedule` configuration](#schedule-arguments) block. -* `target_tags` (Optional) A map of tag keys and their values. Any resources that match the `resource_types` and are tagged with _any_ of these tags will be targeted. +* `target_tags` (Optional) A map of tag keys and their values. Any resources that match the `resource_types` and are tagged with _any_ of these tags will be targeted. Required when `policy_type` is `EBS_SNAPSHOT_MANAGEMENT` or `IMAGE_MANAGEMENT`. Must not be specified when `policy_type` is `EVENT_BASED_POLICY`. ~> Note: You cannot have overlapping lifecycle policies that share the same `target_tags`. Terraform is unable to detect this at plan time but it will fail during apply. @@ -281,6 +375,12 @@ This resource supports the following arguments: * `event_type` - (Required) The type of event. Currently, only `shareSnapshot` events are supported. * `snapshot_owner` - (Required) The IDs of the AWS accounts that can trigger policy by sharing snapshots with your account. The policy only runs if one of the specified AWS accounts shares a snapshot with your account. +#### Exclusions arguments + +* `exclude_boot_volumes` - (Optional) Indicates whether to exclude volumes that are attached to instances as the boot volume. To exclude boot volumes, specify `true`. +* `exclude_tags` - (Optional) Map specifies whether to exclude volumes that have specific tags. +* `exclude_volume_types` - (Optional) List specifies the volume types to exclude. + #### Parameters arguments * `exclude_boot_volume` - (Optional) Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is `false`. @@ -288,6 +388,7 @@ This resource supports the following arguments: #### Schedule arguments +* `archive_rule` - (Optional) Specifies a snapshot archiving rule for a schedule. See [`archive_rule`](#archive-rule-arguments) block. * `copy_tags` - (Optional) Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. * `create_rule` - (Required) See the [`create_rule`](#create-rule-arguments) block. Max of 1 per schedule. * `cross_region_copy_rule` (Optional) - See the [`cross_region_copy_rule`](#cross-region-copy-rule-arguments) block. Max of 3 per schedule. @@ -299,12 +400,21 @@ This resource supports the following arguments: * `tags_to_add` - (Optional) A map of tag keys and their values. DLM lifecycle policies will already tag the snapshot with the tags on the volume. This configuration adds extra tags on top of these. * `variable_tags` - (Optional) A map of tag keys and variable values, where the values are determined when the policy is executed. Only `$(instance-id)` or `$(timestamp)` are valid values. Can only be used when `resource_types` is `INSTANCE`. +#### Archive Rule Arguments + +* `archive_retain_rule` - (Required) Information about the retention period for the snapshot archiving rule. See the [`archive_retain_rule`](#archive-retain-rule-arguments) block. + +#### Archive Retain Rule Arguments + +* `retention_archive_tier` - (Required) Information about retention period in the Amazon EBS Snapshots Archive. See the [`retention_archive_tier`](#retention-archive-tier-arguments) block. + #### Create Rule arguments * `cron_expression` - (Optional) The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. Conflicts with `interval`, `interval_unit`, and `times`. * `interval` - (Optional) How often this lifecycle policy should be evaluated. `1`, `2`,`3`,`4`,`6`,`8`,`12` or `24` are valid values. Conflicts with `cron_expression`. If set, `interval_unit` and `times` must also be set. * `interval_unit` - (Optional) The unit for how often the lifecycle policy should be evaluated. `HOURS` is currently the only allowed value and also the default value. Conflicts with `cron_expression`. Must be set if `interval` is set. * `location` - (Optional) Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD`. To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL`. If you omit this parameter, `CLOUD` is used by default. If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost. Valid values are `CLOUD` and `OUTPOST_LOCAL`. +* `scripts` - (Optional) Specifies pre and/or post scripts for a snapshot lifecycle policy that targets instances. Valid only when `resource_type` is INSTANCE. See the [`scripts` configuration](#scripts-rule-arguments) block. * `times` - (Optional) A list of times in 24 hour clock format that sets when the lifecycle policy should be evaluated. Max of 1. Conflicts with `cron_expression`. Must be set if `interval` is set. #### Deprecate Rule arguments @@ -339,7 +449,8 @@ This resource supports the following arguments: * `deprecate_rule` - (Optional) The AMI deprecation rule for cross-Region AMI copies created by the rule. See the [`deprecate_rule`](#cross-region-copy-rule-deprecate-rule-arguments) block. * `encrypted` - (Required) To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or if encryption by default is not enabled. * `retain_rule` - (Required) The retention rule that indicates how long snapshot copies are to be retained in the destination Region. See the [`retain_rule`](#cross-region-copy-rule-retain-rule-arguments) block. Max of 1 per schedule. -* `target` - (Required) The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. +* `target` - Use only for DLM policies of `policy_type=EBS_SNAPSHOT_MANAGEMENT`. The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. +* `target_region` - Use only for DLM policies of `policy_type=IMAGE_MANAGEMENT`. The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. #### Cross Region Copy Rule Deprecate Rule arguments @@ -351,6 +462,26 @@ This resource supports the following arguments: * `interval` - (Required) The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days. * `interval_unit` - (Required) The unit of time for time-based retention. Valid values: `DAYS`, `WEEKS`, `MONTHS`, or `YEARS`. +#### Scripts Rule arguments + +* `execute_operation_on_script_failure` - (Optional) Indicates whether Amazon Data Lifecycle Manager should default to crash-consistent snapshots if the pre script fails. The default is `true`. + +* `execution_handler` - (Required) The SSM document that includes the pre and/or post scripts to run. In case automating VSS backups, specify `AWS_VSS_BACKUP`. In case automating application-consistent snapshots for SAP HANA workloads, specify `AWSSystemsManagerSAP-CreateDLMSnapshotForSAPHANA`. If you are using a custom SSM document that you own, specify either the name or ARN of the SSM document. + +* `execution_handler_service` - (Optional) Indicates the service used to execute the pre and/or post scripts. If using custom SSM documents or automating application-consistent snapshots of SAP HANA workloads, specify `AWS_SYSTEMS_MANAGER`. In case automating VSS Backups, omit this parameter. The default is `AWS_SYSTEMS_MANAGER`. + +* `execution_timeout` - (Optional) Specifies a timeout period, in seconds, after which Amazon Data Lifecycle Manager fails the script run attempt if it has not completed. In case automating VSS Backups, omit this parameter. The default is `10`. + +* `maximum_retry_count` - (Optional) Specifies the number of times Amazon Data Lifecycle Manager should retry scripts that fail. Must be an integer between `0` and `3`. The default is `0`. + +* `stages` - (Optional) List to indicate which scripts Amazon Data Lifecycle Manager should run on target instances. Pre scripts run before Amazon Data Lifecycle Manager initiates snapshot creation. Post scripts run after Amazon Data Lifecycle Manager initiates snapshot creation. Valid values: `PRE` and `POST`. The default is `PRE` and `POST` + +#### Retention Archive Tier Arguments + +* `count` - (Optional)The maximum number of snapshots to retain in the archive storage tier for each volume. Must be an integer between `1` and `1000`. Conflicts with `interval` and `interval_unit`. +* `interval` - (Optional) Specifies the period of time to retain snapshots in the archive tier. After this period expires, the snapshot is permanently deleted. Conflicts with `count`. If set, `interval_unit` must also be set. +* `interval_unit` - (Optional) The unit of time for time-based retention. Valid values are `DAYS`, `WEEKS`, `MONTHS`, `YEARS`. Conflicts with `count`. Must be set if `interval` is set. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -384,4 +515,4 @@ Using `terraform import`, import DLM lifecycle policies using their policy ID. F % terraform import aws_dlm_lifecycle_policy.example policy-abcdef12345678901 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_certificate.html.markdown b/website/docs/cdktf/python/r/dms_certificate.html.markdown index 57870b5896e9..ad83ed9fb06e 100644 --- a/website/docs/cdktf/python/r/dms_certificate.html.markdown +++ b/website/docs/cdktf/python/r/dms_certificate.html.markdown @@ -42,10 +42,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_id` - (Required) The certificate identifier. - - - Must contain from 1 to 255 alphanumeric characters and hyphens. - * `certificate_pem` - (Optional) The contents of the .pem X.509 certificate file for the certificate. Either `certificate_pem` or `certificate_wallet` must be set. * `certificate_wallet` - (Optional) The contents of the Oracle Wallet certificate for use with SSL, provided as a base64-encoded String. Either `certificate_pem` or `certificate_wallet` must be set. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -82,4 +80,4 @@ Using `terraform import`, import certificates using the `certificate_id`. For ex % terraform import aws_dms_certificate.test test-dms-certificate-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_endpoint.html.markdown b/website/docs/cdktf/python/r/dms_endpoint.html.markdown index e6a73e73beef..49558849386a 100644 --- a/website/docs/cdktf/python/r/dms_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/dms_endpoint.html.markdown @@ -14,8 +14,6 @@ Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be ~> **Note:** All arguments including the password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). -~> **Note:** The `s3_settings` argument is deprecated, may not be maintained, and will be removed in a future version. Use the [`aws_dms_s3_endpoint`](/docs/providers/aws/r/dms_s3_endpoint.html) resource instead. - ## Example Usage ```python @@ -51,15 +49,17 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_id` - (Required) Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens. * `endpoint_type` - (Required) Type of endpoint. Valid values are `source`, `target`. * `engine_name` - (Required) Type of engine for the endpoint. Valid values are `aurora`, `aurora-postgresql`, `aurora-serverless`, `aurora-postgresql-serverless`,`azuredb`, `azure-sql-managed-instance`, `babelfish`, `db2`, `db2-zos`, `docdb`, `dynamodb`, `elasticsearch`, `kafka`, `kinesis`, `mariadb`, `mongodb`, `mysql`, `opensearch`, `oracle`, `postgres`, `redshift`,`redshift-serverless`, `s3`, `sqlserver`, `neptune` ,`sybase`. Please note that some of engine names are available only for `target` endpoint type (e.g. `redshift`). -* `kms_key_arn` - (Required when `engine_name` is `mongodb`, cannot be set when `engine_name` is `s3`, optional otherwise) ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter `s3_settings.server_side_encryption_kms_key_id`. When `engine_name` is `redshift`, `kms_key_arn` is the KMS Key for the Redshift target and the parameter `redshift_settings.server_side_encryption_kms_key_id` encrypts the S3 intermediate storage. +* `kms_key_arn` - (Required when `engine_name` is `mongodb`, optional otherwise) ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. When `engine_name` is `redshift`, `kms_key_arn` is the KMS Key for the Redshift target and the parameter `redshift_settings.server_side_encryption_kms_key_id` encrypts the S3 intermediate storage. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_arn` - (Optional, Default: empty string) ARN for the certificate. * `database_name` - (Optional) Name of the endpoint database. * `elasticsearch_settings` - (Optional) Configuration block for OpenSearch settings. See below. @@ -67,12 +67,12 @@ The following arguments are optional: * `kafka_settings` - (Optional) Configuration block for Kafka settings. See below. * `kinesis_settings` - (Optional) Configuration block for Kinesis settings. See below. * `mongodb_settings` - (Optional) Configuration block for MongoDB settings. See below. +* `oracle_settings` - (Optional) Configuration block for Oracle settings. See below. * `password` - (Optional) Password to be used to login to the endpoint database. * `postgres_settings` - (Optional) Configuration block for Postgres settings. See below. * `pause_replication_tasks` - (Optional) Whether to pause associated running replication tasks, regardless if they are managed by Terraform, prior to modifying the endpoint. Only tasks paused by the resource will be restarted after the modification completes. Default is `false`. * `port` - (Optional) Port used by the endpoint database. * `redshift_settings` - (Optional) Configuration block for Redshift settings. See below. -* `s3_settings` - (Optional) (**Deprecated**, use the [`aws_dms_s3_endpoint`](/docs/providers/aws/r/dms_s3_endpoint.html) resource instead) Configuration block for S3 settings. See below. * `secrets_manager_access_role_arn` - (Optional) ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by `secrets_manager_arn`. The role must allow the `iam:PassRole` action. ~> **Note:** You can specify one of two sets of values for these permissions. You can specify the values for this setting and `secrets_manager_arn`. Or you can specify clear-text values for `username`, `password` , `server_name`, and `port`. You can't specify both. @@ -144,11 +144,18 @@ The following arguments are optional: * `extract_doc_id` - (Optional) Document ID. Use this setting when `nesting_level` is set to `none`. Default is `false`. * `nesting_level` - (Optional) Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode). +### oracle_settings + +-> Additional information can be found in the [Using Oracle as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html). + +* `authentication_method` - (Optional) Authentication mechanism to access the Oracle source endpoint. Default is `password`. Valid values are `password` and `kerberos`. + ### postgres_settings -> Additional information can be found in the [Using PostgreSQL as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html). * `after_connect_script` - (Optional) For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. +* `authentication_method` - (Optional) Specifies the authentication method. Valid values: `password`, `iam`. * `babelfish_database_name` - (Optional) The Babelfish for Aurora PostgreSQL database name for the endpoint. * `capture_ddls` - (Optional) To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. * `database_mode` - (Optional) Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. @@ -163,6 +170,7 @@ The following arguments are optional: * `map_long_varchar_as` - Optional When true, DMS migrates LONG values as VARCHAR. * `max_file_size` - (Optional) Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is `32,768 KB`. * `plugin_name` - (Optional) Specifies the plugin to use to create a replication slot. Valid values: `pglogical`, `test_decoding`. +* `service_access_role_arn` - (Optional) Specifies the IAM role to use to authenticate the connection. * `slot_name` - (Optional) Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. ### redis_settings @@ -187,51 +195,6 @@ The following arguments are optional: * `server_side_encryption_kms_key_id` - (Required when `encryption_mode` is `SSE_KMS`, must not be set otherwise) ARN or Id of KMS Key to use when `encryption_mode` is `SSE_KMS`. * `service_access_role_arn` - (Optional) Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage. -### s3_settings - -~> **Deprecated:** This argument is deprecated, may not be maintained, and will be removed in a future version. Use the [`aws_dms_s3_endpoint`](/docs/providers/aws/r/dms_s3_endpoint.html) resource instead. - --> Additional information can be found in the [Using Amazon S3 as a Source for AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.S3.html) and [Using Amazon S3 as a Target for AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html). - -* `add_column_name` - (Optional) Whether to add column name information to the .csv output file. Default is `false`. -* `bucket_folder` - (Optional) S3 object prefix. -* `bucket_name` - (Optional) S3 bucket name. -* `canned_acl_for_objects` - (Optional) Predefined (canned) access control list for objects created in an S3 bucket. Valid values include `none`, `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Default is `none`. -* `cdc_inserts_and_updates` - (Optional) Whether to write insert and update operations to .csv or .parquet output files. Default is `false`. -* `cdc_inserts_only` - (Optional) Whether to write insert operations to .csv or .parquet output files. Default is `false`. -* `cdc_max_batch_interval` - (Optional) Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is `60`. -* `cdc_min_file_size` - (Optional) Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is `32000`. **NOTE:** Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. -* `cdc_path` - (Optional) Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If `cdc_path` is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. -* `compression_type` - (Optional) Set to compress target files. Default is `NONE`. Valid values are `GZIP` and `NONE`. -* `csv_delimiter` - (Optional) Delimiter used to separate columns in the source files. Default is `,`. -* `csv_no_sup_value` - (Optional) String to use for all columns not included in the supplemental log. -* `csv_null_value` - (Optional) String to as null when writing to the target. -* `csv_row_delimiter` - (Optional) Delimiter used to separate rows in the source files. Default is `\n`. -* `data_format` - (Optional) Output format for the files that AWS DMS uses to create S3 objects. Valid values are `csv` and `parquet`. Default is `csv`. -* `data_page_size` - (Optional) Size of one data page in bytes. Default is `1048576` (1 MiB). -* `date_partition_delimiter` - (Optional) Date separating delimiter to use during folder partitioning. Valid values are `SLASH`, `UNDERSCORE`, `DASH`, and `NONE`. Default is `SLASH`. -* `date_partition_enabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Default is `false`. -* `date_partition_sequence` - (Optional) Date format to use during folder partitioning. Use this parameter when `date_partition_enabled` is set to true. Valid values are `YYYYMMDD`, `YYYYMMDDHH`, `YYYYMM`, `MMYYYYDD`, and `DDMMYYYY`. Default is `YYYYMMDD`. -* `dict_page_size_limit` - (Optional) Maximum size in bytes of an encoded dictionary page of a column. Default is `1048576` (1 MiB). -* `enable_statistics` - (Optional) Whether to enable statistics for Parquet pages and row groups. Default is `true`. -* `encoding_type` - (Optional) Type of encoding to use. Value values are `rle_dictionary`, `plain`, and `plain_dictionary`. Default is `rle_dictionary`. -* `encryption_mode` - (Optional) Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are `SSE_S3` and `SSE_KMS`. Default is `SSE_S3`. -* `external_table_definition` - (Optional) JSON document that describes how AWS DMS should interpret the data. -* `glue_catalog_generation` - (Optional) Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See [Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.GlueCatalog) for more information. Default is `false`. -* `ignore_header_rows` - (Optional) When this value is set to `1`, DMS ignores the first row header in a .csv file. Default is `0`. -* `include_op_for_full_load` - (Optional) Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is `false`. -* `max_file_size` - (Optional) Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from `1` to `1048576`. Default is `1048576` (1 GB). -* `parquet_timestamp_in_millisecond` - (Optional) - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is `false`. -* `parquet_version` - (Optional) Version of the .parquet file format. Default is `parquet-1-0`. Valid values are `parquet-1-0` and `parquet-2-0`. -* `preserve_transactions` - (Optional) Whether DMS saves the transaction order for a CDC load on the S3 target specified by `cdc_path`. Default is `false`. -* `rfc_4180` - (Optional) For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is `true`. -* `row_group_length` - (Optional) Number of rows in a row group. Default is `10000`. -* `server_side_encryption_kms_key_id` - (Required when `encryption_mode` is `SSE_KMS`, must not be set otherwise) ARN or Id of KMS Key to use when `encryption_mode` is `SSE_KMS`. -* `service_access_role_arn` - (Optional) ARN of the IAM Role with permissions to read from or write to the S3 Bucket. -* `timestamp_column_name` - (Optional) Column to add with timestamp information to the endpoint data for an Amazon S3 target. -* `use_csv_no_sup_value` - (Optional) Whether to use `csv_no_sup_value` for columns not included in the supplemental log. -* `use_task_start_time_for_full_load_timestamp` - (Optional) When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is `false`. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -271,4 +234,4 @@ Using `terraform import`, import endpoints using the `endpoint_id`. For example: % terraform import aws_dms_endpoint.test test-dms-endpoint-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_event_subscription.html.markdown b/website/docs/cdktf/python/r/dms_event_subscription.html.markdown index c7a59ddf0783..0c112a530fee 100644 --- a/website/docs/cdktf/python/r/dms_event_subscription.html.markdown +++ b/website/docs/cdktf/python/r/dms_event_subscription.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of event subscription. * `enabled` - (Optional, Default: true) Whether the event subscription should be enabled. * `event_categories` - (Optional) List of event categories to listen for, see `DescribeEventCategories` for a canonical list. @@ -93,4 +94,4 @@ Using `terraform import`, import event subscriptions using the `name`. For examp % terraform import aws_dms_event_subscription.test my-awesome-event-subscription ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_replication_config.html.markdown b/website/docs/cdktf/python/r/dms_replication_config.html.markdown index 4cf27b6bf81e..79e7f150981a 100644 --- a/website/docs/cdktf/python/r/dms_replication_config.html.markdown +++ b/website/docs/cdktf/python/r/dms_replication_config.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `compute_config` - (Required) Configuration block for provisioning an DMS Serverless replication. * `start_replication` - (Optional) Whether to run or stop the serverless replication, default is false. * `replication_config_identifier` - (Required) Unique identifier that you want to use to create the config. @@ -96,6 +97,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dms_replication_config.example + identity = { + "arn" = "arn:aws:dms:us-east-1:123456789012:replication-config:example-config" + } +} + +resource "aws_dms_replication_config" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DMS replication configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import replication configs using the `arn`. For example: ```python @@ -119,4 +141,4 @@ Using `terraform import`, import a replication config using the `arn`. For examp % terraform import aws_dms_replication_config.example arn:aws:dms:us-east-1:123456789012:replication-config:UX6OL6MHMMJKFFOXE3H7LLJCMEKBDUG4ZV7DRSI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_replication_instance.html.markdown b/website/docs/cdktf/python/r/dms_replication_instance.html.markdown index c5b18d17bcef..24471c9ff68d 100644 --- a/website/docs/cdktf/python/r/dms_replication_instance.html.markdown +++ b/website/docs/cdktf/python/r/dms_replication_instance.html.markdown @@ -94,35 +94,34 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocated_storage` - (Optional, Default: 50, Min: 5, Max: 6144) The amount of storage (in gigabytes) to be initially allocated for the replication instance. * `allow_major_version_upgrade` - (Optional, Default: false) Indicates that major version upgrades are allowed. * `apply_immediately` - (Optional, Default: false) Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource. * `auto_minor_version_upgrade` - (Optional, Default: false) Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. * `availability_zone` - (Optional) The EC2 Availability Zone that the replication instance will be created in. +* `dns_name_servers` - (Optional) A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. * `engine_version` - (Optional) The engine version number of the replication instance. +* `kerberos_authentication_settings` - (Optional) Configuration block for settings required for Kerberos authentication. See below. * `kms_key_arn` - (Optional) The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. * `multi_az` - (Optional) Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`. * `network_type` - (Optional) The type of IP address protocol used by a replication instance. Valid values: `IPV4`, `DUAL`. * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). - - - Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. - - Format: `ddd:hh24:mi-ddd:hh24:mi` - - Valid Days: `mon, tue, wed, thu, fri, sat, sun` - - Constraints: Minimum 30-minute window. - * `publicly_accessible` - (Optional, Default: false) Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. * `replication_instance_class` - (Required) The compute and memory capacity of the replication instance as specified by the replication instance class. See [AWS DMS User Guide](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.Types.html) for available instance sizes and advice on which one to choose. * `replication_instance_id` - (Required) The replication instance identifier. This parameter is stored as a lowercase string. - - - Must contain from 1 to 63 alphanumeric characters or hyphens. - - First character must be a letter. - - Cannot end with a hyphen - - Cannot contain two consecutive hyphens. - * `replication_subnet_group_id` - (Optional) A subnet group to associate with the replication instance. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_security_group_ids` - (Optional) A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance. +## kerberos_authentication_settings + +-> Additional information can be found in the [Using Kerberos Authentication with AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.Kerberos.html). + +* `key_cache_secret_iam_arn` - (Required) ARN of the IAM role that grants AWS DMS access to the secret containing key cache file for the Kerberos authentication. +* `key_cache_secret_id` - (Required) Secret ID that stores the key cache file required for Kerberos authentication. +* `krb5_file_contents` - (Required) Contents of krb5 configuration file required for Kerberos authentication. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -165,4 +164,4 @@ Using `terraform import`, import replication instances using the `replication_in % terraform import aws_dms_replication_instance.test test-dms-replication-instance-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_replication_subnet_group.html.markdown b/website/docs/cdktf/python/r/dms_replication_subnet_group.html.markdown index 860655e8f341..2861f07a9514 100644 --- a/website/docs/cdktf/python/r/dms_replication_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/dms_replication_subnet_group.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replication_subnet_group_description` - (Required) Description for the subnet group. * `replication_subnet_group_id` - (Required) Name for the replication subnet group. This value is stored as a lowercase string. It must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens and cannot be `default`. * `subnet_ids` - (Required) List of at least 2 EC2 subnet IDs for the subnet group. The subnets must cover at least 2 availability zones. @@ -140,4 +141,4 @@ Using `terraform import`, import replication subnet groups using the `replicatio % terraform import aws_dms_replication_subnet_group.test test-dms-replication-subnet-group-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_replication_task.html.markdown b/website/docs/cdktf/python/r/dms_replication_task.html.markdown index 6da4a7630543..3253351196e6 100644 --- a/website/docs/cdktf/python/r/dms_replication_task.html.markdown +++ b/website/docs/cdktf/python/r/dms_replication_task.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the source engine. For more information see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). * `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string or UNIX timestamp for the start of the Change Data Capture (CDC) operation. * `migration_type` - (Required) Migration type. Can be one of `full-load | cdc | full-load-and-cdc`. @@ -93,4 +94,4 @@ Using `terraform import`, import replication tasks using the `replication_task_i % terraform import aws_dms_replication_task.test test-dms-replication-task-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dms_s3_endpoint.html.markdown b/website/docs/cdktf/python/r/dms_s3_endpoint.html.markdown index c726d40f9055..eb2646e8950c 100644 --- a/website/docs/cdktf/python/r/dms_s3_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/dms_s3_endpoint.html.markdown @@ -122,6 +122,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `add_column_name` - (Optional) Whether to add column name information to the .csv output file. Default is `false`. * `add_trailing_padding_character` - (Optional) Whether to add padding. Default is `false`. (Ignored for source endpoints.) * `bucket_folder` - (Optional) S3 object prefix. @@ -209,4 +210,4 @@ Using `terraform import`, import endpoints using the `endpoint_id`. For example: % terraform import aws_dms_s3_endpoint.example example-dms-endpoint-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_cluster.html.markdown b/website/docs/cdktf/python/r/docdb_cluster.html.markdown index 2c3f02c7879f..2d60f32b977d 100644 --- a/website/docs/cdktf/python/r/docdb_cluster.html.markdown +++ b/website/docs/cdktf/python/r/docdb_cluster.html.markdown @@ -23,7 +23,7 @@ phase because a modification has not yet taken place. You can use the ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). --> **Note:** Write-Only argument `master_password_wo` is available to use in place of `master_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `master_password_wo` is available to use in place of `master_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -54,12 +54,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allow_major_version_upgrade` - (Optional) A value that indicates whether major version upgrades are allowed. Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. -* `availability_zones` - (Optional) A list of EC2 Availability Zones that - instances in the DB cluster can be created in. +* `availability_zones` - (Optional) A list of EC2 Availability Zones that instances in the DB cluster can be created in. + DocumentDB automatically assigns 3 AZs if less than 3 AZs are configured, which will show as a difference requiring resource recreation next Terraform apply. + We recommend specifying 3 AZs or using [the `lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) if necessary. * `backup_retention_period` - (Optional) The days to retain backups for. Default `1` * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. @@ -87,6 +89,7 @@ This resource supports the following arguments: Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 * `restore_to_point_in_time` - (Optional, Forces new resource) A configuration block for restoring a DB instance to an arbitrary point in time. Requires the `identifier` argument to be set with the name of the new DB instance to be created. See [Restore To Point In Time](#restore-to-point-in-time) below for details. +* `serverless_v2_scaling_configuration` - (Optional) Scaling configuration of an Amazon DocumentDB Serverless cluster. See [Serverless V2 Scaling Configuration](#serverless-v2-scaling-configuration) below for details. * `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. * `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false`. @@ -107,16 +110,24 @@ The `restore_to_point_in_time` block supports the following arguments: * `source_cluster_identifier` - (Required) The identifier of the source DB cluster from which to restore. Must match the identifier of an existing DB cluster. * `use_latest_restorable_time` - (Optional) A boolean value that indicates whether the DB cluster is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restore_to_time`. +### Serverless V2 Scaling Configuration + +The `serverless_v2_scaling_configuration` block supports the following arguments. +Adding this block (i.e. switching to serverless) or removing it (i.e. switching from serverless) will trigger cluster replacement. + +* `max_capacity` - (Required) Maximum number of Amazon DocumentDB capacity units (DCUs) for an instance in an Amazon DocumentDB Serverless cluster. Valid values are multiples of 0.5 between 1 and 256. +* `min_capacity` - (Required) Minimum number of Amazon DocumentDB capacity units (DCUs) for an instance in an Amazon DocumentDB Serverless cluster. Valid values are multiples of 0.5 between 0.5 and 256. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) of cluster -* `cluster_members` – List of DocumentDB Instances that are a part of this cluster +* `cluster_members` - List of DocumentDB Instances that are a part of this cluster * `cluster_resource_id` - The DocumentDB Cluster Resource ID * `endpoint` - The DNS address of the DocumentDB instance * `hosted_zone_id` - The Route53 Hosted Zone ID of the endpoint -* `id` - The DocumentDB Cluster Identifier +* `id` - (**Deprecated**) Amazon Resource Name (ARN) of cluster * `reader_endpoint` - A read-only endpoint for the DocumentDB cluster, automatically load-balanced across replicas * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -154,4 +165,4 @@ Using `terraform import`, import DocumentDB Clusters using the `cluster_identifi % terraform import aws_docdb_cluster.docdb_cluster docdb-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown b/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown index a3a43a9caf1b..67093474bc42 100644 --- a/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown +++ b/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown @@ -57,13 +57,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `auto_minor_version_upgrade` - (Optional) This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set (see [docs](https://docs.aws.amazon.com/documentdb/latest/developerguide/API_DBInstance.html)). Default `true`. * `availability_zone` - (Optional, Computed) The EC2 Availability Zone that the DB instance is created in. See [docs](https://docs.aws.amazon.com/documentdb/latest/developerguide/API_CreateDBInstance.html) about the details. * `ca_cert_identifier` - (Optional) The identifier of the certificate authority (CA) certificate for the DB instance. * `cluster_identifier` - (Required) The identifier of the [`aws_docdb_cluster`](/docs/providers/aws/r/docdb_cluster.html) in which to launch this instance. -* `copy_tags_to_snapshot` – (Optional, boolean) Copy all DB instance `tags` to snapshots. Default is `false`. +* `copy_tags_to_snapshot` - (Optional, boolean) Copy all DB instance `tags` to snapshots. Default is `false`. * `enable_performance_insights` - (Optional) A value that indicates whether to enable Performance Insights for the DB Instance. Default `false`. See [docs] (https://docs.aws.amazon.com/documentdb/latest/developerguide/performance-insights.html) about the details. * `engine` - (Optional) The name of the database engine to be used for the DocumentDB instance. Defaults to `docdb`. Valid Values: `docdb`. * `identifier` - (Optional, Forces new resource) The identifier for the DocumentDB instance, if omitted, Terraform will assign a random, unique identifier. @@ -112,7 +113,10 @@ This resource exports the following attributes in addition to the arguments abov * `preferred_backup_window` - The daily time range during which automated backups are created if automated backups are enabled. * `storage_encrypted` - Specifies whether the DB cluster is encrypted. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. +* `writer` - Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. + +For more detailed documentation about each argument, refer to +the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). @@ -157,4 +161,4 @@ Using `terraform import`, import DocumentDB Cluster Instances using the `identif % terraform import aws_docdb_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_cluster_parameter_group.html.markdown b/website/docs/cdktf/python/r/docdb_cluster_parameter_group.html.markdown index 5fca22c63a12..771f7915c859 100644 --- a/website/docs/cdktf/python/r/docdb_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/docdb_cluster_parameter_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DocumentDB cluster parameter group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required, Forces new resource) The family of the DocumentDB cluster parameter group. @@ -90,4 +91,4 @@ Using `terraform import`, import DocumentDB Cluster Parameter Groups using the ` % terraform import aws_docdb_cluster_parameter_group.cluster_pg production-pg-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_cluster_snapshot.html.markdown b/website/docs/cdktf/python/r/docdb_cluster_snapshot.html.markdown index 30da89821622..ab0cd83ff54c 100644 --- a/website/docs/cdktf/python/r/docdb_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/docdb_cluster_snapshot.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_cluster_identifier` - (Required) The DocumentDB Cluster Identifier from which to take the snapshot. * `db_cluster_snapshot_identifier` - (Required) The Identifier for the snapshot. @@ -85,4 +86,4 @@ Using `terraform import`, import `aws_docdb_cluster_snapshot` using the cluster % terraform import aws_docdb_cluster_snapshot.example my-cluster-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_event_subscription.html.markdown b/website/docs/cdktf/python/r/docdb_event_subscription.html.markdown index 51845f6afccd..8aeb6b0adb55 100644 --- a/website/docs/cdktf/python/r/docdb_event_subscription.html.markdown +++ b/website/docs/cdktf/python/r/docdb_event_subscription.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the DocumentDB event subscription. By default generated by Terraform. * `name_prefix` - (Optional) The name of the DocumentDB event subscription. Conflicts with `name`. * `sns_topic` - (Required) The SNS topic to send events to. @@ -111,4 +112,4 @@ Using `terraform import`, import DocumentDB Event Subscriptions using the `name` % terraform import aws_docdb_event_subscription.example event-sub ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_global_cluster.html.markdown b/website/docs/cdktf/python/r/docdb_global_cluster.html.markdown index 8b276dcb33e1..18023da03c64 100644 --- a/website/docs/cdktf/python/r/docdb_global_cluster.html.markdown +++ b/website/docs/cdktf/python/r/docdb_global_cluster.html.markdown @@ -119,6 +119,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `global_cluster_identifier` - (Required, Forces new resources) The global cluster identifier. * `database_name` - (Optional, Forces new resources) Name for an automatically created database on cluster creation. * `deletion_protection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. @@ -195,4 +196,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_subnet_group.html.markdown b/website/docs/cdktf/python/r/docdb_subnet_group.html.markdown index dcc116e73d46..9f910c1af3c6 100644 --- a/website/docs/cdktf/python/r/docdb_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/docdb_subnet_group.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the docDB subnet group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) The description of the docDB subnet group. Defaults to "Managed by Terraform". @@ -78,4 +79,4 @@ Using `terraform import`, import DocumentDB Subnet groups using the `name`. For % terraform import aws_docdb_subnet_group.default production-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdbelastic_cluster.html.markdown b/website/docs/cdktf/python/r/docdbelastic_cluster.html.markdown index aec0097fc971..e78ba7b35ea5 100644 --- a/website/docs/cdktf/python/r/docdbelastic_cluster.html.markdown +++ b/website/docs/cdktf/python/r/docdbelastic_cluster.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backup_retention_period` - (Optional) The number of days for which automatic snapshots are retained. It should be in between 1 and 35. If not specified, the default value of 1 is set. * `kms_key_id` - (Optional) ARN of a KMS key that is used to encrypt the Elastic DocumentDB cluster. If not specified, the default encryption key that KMS creates for your account is used. * `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled, as determined by the `backup_retention_period`. @@ -79,7 +80,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearchServerless Access Policy using the `name` and `type` arguments separated by a slash (`/`). For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_docdbelastic_cluster.example + identity = { + "arn" = "arn:aws:docdb-elastic:us-east-1:000011112222:cluster/12345678-7abc-def0-1234-56789abcdef" + } +} + +resource "aws_docdbelastic_cluster" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DocDB Elastic cluster. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DocDB Elastic Cluster using the `arn`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -102,4 +124,4 @@ Using `terraform import`, import DocDB (DocumentDB) Elastic Cluster using the `a % terraform import aws_docdbelastic_cluster.example arn:aws:docdb-elastic:us-east-1:000011112222:cluster/12345678-7abc-def0-1234-56789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown b/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown index aee9673b4244..8cb8c0b0011a 100644 --- a/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown +++ b/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown @@ -87,6 +87,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_replicate_new_disks` - (Optional) Whether to allow the AWS replication agent to automatically replicate newly added disks. * `tags` - (Optional) Set of tags to be associated with the Replication Configuration Template resource. @@ -141,4 +142,4 @@ Using `terraform import`, import DRS Replication Configuration Template using th % terraform import aws_drs_replication_configuration_template.example templateid ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dsql_cluster.html.markdown b/website/docs/cdktf/python/r/dsql_cluster.html.markdown index b055945ec35b..97a119c6a0b4 100644 --- a/website/docs/cdktf/python/r/dsql_cluster.html.markdown +++ b/website/docs/cdktf/python/r/dsql_cluster.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DsqlCluster +from imports.aws.dsql_cluster import DsqlCluster class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -40,10 +40,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `deletion_protection_enabled` - (Required) Whether deletion protection is enabled in this cluster. +* `deletion_protection_enabled` - (Optional) Whether deletion protection is enabled in this cluster. + Default value is `false`. +* `force_destroy` - (Optional) Destroys cluster even if `deletion_protection_enabled` is set to `true`. + Default value is `false`. * `kms_encryption_key` - (Optional) The ARN of the AWS KMS key that encrypts data in the DSQL Cluster, or `"AWS_OWNED_KMS_KEY"`. * `multi_region_properties` - (Optional) Multi-region properties of the DSQL Cluster. * `witness_region` - (Required) Witness region for the multi-region clusters. Setting this makes this cluster a multi-region cluster. Changing it recreates the resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Set of tags to be associated with the AWS DSQL Cluster resource. ## Attribute Reference @@ -80,7 +84,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DsqlCluster +from imports.aws.dsql_cluster import DsqlCluster class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -93,4 +97,4 @@ Using `terraform import`, import DSQL Cluster using the `identifier`. For exampl % terraform import aws_dsql_cluster.example abcde1f234ghijklmnop5qr6st ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dsql_cluster_peering.html.markdown b/website/docs/cdktf/python/r/dsql_cluster_peering.html.markdown index aee594d72660..5eac79ea8003 100644 --- a/website/docs/cdktf/python/r/dsql_cluster_peering.html.markdown +++ b/website/docs/cdktf/python/r/dsql_cluster_peering.html.markdown @@ -19,33 +19,35 @@ Terraform resource for managing an Amazon Aurora DSQL Cluster Peering. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Fn, TerraformStack +from cdktf import Fn, Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DsqlCluster, DsqlClusterPeering +from imports.aws.dsql_cluster import DsqlCluster +from imports.aws.dsql_cluster_peering import DsqlClusterPeering class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) example1 = DsqlCluster(self, "example_1", - multi_region_properties=[{ - "witness_region": "us-west-2" - } + multi_region_properties=[DsqlClusterMultiRegionProperties( + witness_region="us-west-2" + ) ] ) example2 = DsqlCluster(self, "example_2", - multi_region_properties=[{ - "witness_region": "us-west-2" - } + multi_region_properties=[DsqlClusterMultiRegionProperties( + witness_region="us-west-2" + ) ], provider=alternate ) aws_dsql_cluster_peering_example1 = DsqlClusterPeering(self, "example_1_2", clusters=[example2.arn], identifier=example1.identifier, - witness_region=Fn.lookup_nested(example1.multi_region_properties, ["0", "witness_region" - ]) + witness_region=Token.as_string( + Fn.lookup_nested(example1.multi_region_properties, ["0", "witness_region" + ])) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_dsql_cluster_peering_example1.override_logical_id("example_1") @@ -53,8 +55,9 @@ class MyConvertedCode(TerraformStack): clusters=[example1.arn], identifier=example2.identifier, provider=alternate, - witness_region=Fn.lookup_nested(example2.multi_region_properties, ["0", "witness_region" - ]) + witness_region=Token.as_string( + Fn.lookup_nested(example2.multi_region_properties, ["0", "witness_region" + ])) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_dsql_cluster_peering_example2.override_logical_id("example_2") @@ -66,6 +69,7 @@ This resource supports the following arguments: * `clusters` - (Required) List of DSQL Cluster ARNs to be peered to this cluster. * `identifier` - (Required) DSQL Cluster Identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `witness_region` - (Required) Witness region for a multi-region cluster. ## Attribute Reference @@ -90,7 +94,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DsqlClusterPeering +from imports.aws.dsql_cluster_peering import DsqlClusterPeering class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -103,4 +107,4 @@ Using `terraform import`, import DSQL Cluster Peering using the `identifier`. Fo % terraform import aws_dsql_cluster_peering.example cluster-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_bgp_peer.html.markdown b/website/docs/cdktf/python/r/dx_bgp_peer.html.markdown index c249a3cab0bb..34a58496d818 100644 --- a/website/docs/cdktf/python/r/dx_bgp_peer.html.markdown +++ b/website/docs/cdktf/python/r/dx_bgp_peer.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `virtual_interface_id` - (Required) The ID of the Direct Connect virtual interface on which to create the BGP peer. @@ -62,4 +63,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_connection.html.markdown b/website/docs/cdktf/python/r/dx_connection.html.markdown index 4545ea2376d1..7d4abd5b65c6 100644 --- a/website/docs/cdktf/python/r/dx_connection.html.markdown +++ b/website/docs/cdktf/python/r/dx_connection.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bandwidth` - (Required) The bandwidth of the connection. Valid values for dedicated connections: 1Gbps, 10Gbps, 100Gbps, and 400Gbps. Valid values for hosted connections: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Case sensitive. Refer to the AWS Direct Connection supported bandwidths for [Dedicated Connections](https://docs.aws.amazon.com/directconnect/latest/UserGuide/dedicated_connection.html) and [Hosted Connections](https://docs.aws.amazon.com/directconnect/latest/UserGuide/hosted_connection.html). * `encryption_mode` - (Optional) The connection MAC Security (MACsec) encryption mode. MAC Security (MACsec) is only available on dedicated connections. Valid values are `no_encrypt`, `should_encrypt`, and `must_encrypt`. * `location` - (Required) The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`. @@ -139,4 +140,4 @@ Using `terraform import`, import Direct Connect connections using the connection % terraform import aws_dx_connection.test_connection dxcon-ffre0ec3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_connection_association.html.markdown b/website/docs/cdktf/python/r/dx_connection_association.html.markdown index df05e70f4c6d..e9cb9552e3c3 100644 --- a/website/docs/cdktf/python/r/dx_connection_association.html.markdown +++ b/website/docs/cdktf/python/r/dx_connection_association.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_id` - (Required) The ID of the connection. * `lag_id` - (Required) The ID of the LAG with which to associate the connection. @@ -59,4 +60,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_connection_confirmation.html.markdown b/website/docs/cdktf/python/r/dx_connection_confirmation.html.markdown index 6f5b5d6559bd..ed184a8379e7 100644 --- a/website/docs/cdktf/python/r/dx_connection_confirmation.html.markdown +++ b/website/docs/cdktf/python/r/dx_connection_confirmation.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_id` - (Required) The ID of the hosted connection. ### Removing `aws_dx_connection_confirmation` from your configuration @@ -48,4 +49,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_gateway.html.markdown b/website/docs/cdktf/python/r/dx_gateway.html.markdown index e94834028a3d..71e7d2017a32 100644 --- a/website/docs/cdktf/python/r/dx_gateway.html.markdown +++ b/website/docs/cdktf/python/r/dx_gateway.html.markdown @@ -56,6 +56,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dx_gateway.example + identity = { + id = "abcd1234-dcba-5678-be23-cdef9876ab45" + } +} + +resource "aws_dx_gateway" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the Direct Connect Gateway. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Direct Connect Gateways using the gateway `id`. For example: ```python @@ -70,13 +96,13 @@ from imports.aws.dx_gateway import DxGateway class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DxGateway.generate_config_for_import(self, "test", "abcd1234-dcba-5678-be23-cdef9876ab45") + DxGateway.generate_config_for_import(self, "example", "abcd1234-dcba-5678-be23-cdef9876ab45") ``` Using `terraform import`, import Direct Connect Gateways using the gateway `id`. For example: ```console -% terraform import aws_dx_gateway.test abcd1234-dcba-5678-be23-cdef9876ab45 +% terraform import aws_dx_gateway.example abcd1234-dcba-5678-be23-cdef9876ab45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_gateway_association.html.markdown b/website/docs/cdktf/python/r/dx_gateway_association.html.markdown index df0c71c7522e..38d6ad68b8fc 100644 --- a/website/docs/cdktf/python/r/dx_gateway_association.html.markdown +++ b/website/docs/cdktf/python/r/dx_gateway_association.html.markdown @@ -135,6 +135,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dx_gateway_id` - (Required) The ID of the Direct Connect gateway. * `associated_gateway_id` - (Optional) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. Used for single account Direct Connect gateway associations. @@ -152,10 +153,10 @@ Used for cross-account Direct Connect gateway associations. This resource exports the following attributes in addition to the arguments above: -* `id` - The ID of the Direct Connect gateway association resource. * `associated_gateway_type` - The type of the associated gateway, `transitGateway` or `virtualPrivateGateway`. * `dx_gateway_association_id` - The ID of the Direct Connect gateway association. * `dx_gateway_owner_account_id` - The ID of the AWS account that owns the Direct Connect gateway. +* `transit_gateway_attachment_id` - The ID of the Transit Gateway Attachment when the type is `transitGateway`. ## Timeouts @@ -190,4 +191,4 @@ Using `terraform import`, import Direct Connect gateway associations using `dx_g % terraform import aws_dx_gateway_association.example 345508c3-7215-4aef-9832-07c125d5bd0f/vgw-98765432 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_gateway_association_proposal.html.markdown b/website/docs/cdktf/python/r/dx_gateway_association_proposal.html.markdown index c2fab6988064..627f669142e0 100644 --- a/website/docs/cdktf/python/r/dx_gateway_association_proposal.html.markdown +++ b/website/docs/cdktf/python/r/dx_gateway_association_proposal.html.markdown @@ -39,6 +39,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `associated_gateway_id` - (Required) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. * `dx_gateway_id` - (Required) Direct Connect Gateway identifier. * `dx_gateway_owner_account_id` - (Required) AWS Account identifier of the Direct Connect Gateway's owner. @@ -107,4 +108,4 @@ Using a proposal ID, Direct Connect Gateway ID and associated gateway ID separat The latter case is useful when a previous proposal has been accepted and deleted by AWS. The `aws_dx_gateway_association_proposal` resource will then represent a pseudo-proposal for the same Direct Connect Gateway and associated gateway. If no previous proposal is available, use a tool like [`uuidgen`](http://manpages.ubuntu.com/manpages/bionic/man1/uuidgen.1.html) to generate a new random pseudo-proposal ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_connection.html.markdown b/website/docs/cdktf/python/r/dx_hosted_connection.html.markdown index 9a652be05cd4..347087c8df73 100644 --- a/website/docs/cdktf/python/r/dx_hosted_connection.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_connection.html.markdown @@ -49,16 +49,17 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The ID of the connection. -* `jumbo_frame_capable` - Boolean value representing if jumbo frames have been enabled for this connection. -* `has_logical_redundancy` - Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6). * `aws_device` - The Direct Connect endpoint on which the physical connection terminates. -* `state` - The state of the connection. Possible values include: ordering, requested, pending, available, down, deleting, deleted, rejected, unknown. See [AllocateHostedConnection](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_AllocateHostedConnection.html) for a description of each connection state. +* `connection_region` - The AWS Region where the connection is located. +* `has_logical_redundancy` - Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6). +* `id` - The ID of the hosted connection. +* `jumbo_frame_capable` - Boolean value representing if jumbo frames have been enabled for this connection. * `lag_id` - The ID of the LAG. * `loa_issue_time` - The time of the most recent call to [DescribeLoa](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLoa.html) for this connection. * `location` - The location of the connection. * `partner_name` - The name of the AWS Direct Connect service provider associated with the connection. * `provider_name` - The name of the service provider associated with the connection. -* `region` - The AWS Region where the connection is located. +* `region` - (**Deprecated**) The AWS Region where the connection is located. Use `connection_region` instead. +* `state` - The state of the connection. Possible values include: ordering, requested, pending, available, down, deleting, deleted, rejected, unknown. See [AllocateHostedConnection](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_AllocateHostedConnection.html) for a description of each connection state. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface.html.markdown b/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface.html.markdown index 6264585db080..7d41c9abe98d 100644 --- a/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connection_id` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -94,4 +95,4 @@ Using `terraform import`, import Direct Connect hosted private virtual interface % terraform import aws_dx_hosted_private_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface_accepter.html.markdown b/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface_accepter.html.markdown index 86585c91e9fe..1ca2755af201 100644 --- a/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface_accepter.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_private_virtual_interface_accepter.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtual_interface_id` - (Required) The ID of the Direct Connect virtual interface to accept. * `dx_gateway_id` - (Optional) The ID of the Direct Connect gateway to which to connect the virtual interface. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -120,4 +121,4 @@ Using `terraform import`, import Direct Connect hosted private virtual interface % terraform import aws_dx_hosted_private_virtual_interface_accepter.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface.html.markdown b/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface.html.markdown index 3a05d67da817..5b17d4b8a34b 100644 --- a/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connection_id` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -95,4 +96,4 @@ Using `terraform import`, import Direct Connect hosted public virtual interfaces % terraform import aws_dx_hosted_public_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface_accepter.html.markdown b/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface_accepter.html.markdown index 0053f598bcd1..0c52f7ab5d7d 100644 --- a/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface_accepter.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_public_virtual_interface_accepter.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtual_interface_id` - (Required) The ID of the Direct Connect virtual interface to accept. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -115,4 +116,4 @@ Using `terraform import`, import Direct Connect hosted public virtual interfaces % terraform import aws_dx_hosted_public_virtual_interface_accepter.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface.html.markdown b/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface.html.markdown index 44130df93f39..39f1bc3375e6 100644 --- a/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connection_id` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -95,4 +96,4 @@ Using `terraform import`, import Direct Connect hosted transit virtual interface % terraform import aws_dx_hosted_transit_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface_accepter.html.markdown b/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface_accepter.html.markdown index 1e8c08166c4d..274de8a5aa25 100644 --- a/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface_accepter.html.markdown +++ b/website/docs/cdktf/python/r/dx_hosted_transit_virtual_interface_accepter.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dx_gateway_id` - (Required) The ID of the [Direct Connect gateway](dx_gateway.html) to which to connect the virtual interface. * `virtual_interface_id` - (Required) The ID of the Direct Connect virtual interface to accept. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -115,4 +116,4 @@ Using `terraform import`, import Direct Connect hosted transit virtual interface % terraform import aws_dx_hosted_transit_virtual_interface_accepter.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_lag.html.markdown b/website/docs/cdktf/python/r/dx_lag.html.markdown index c1fa3d7eeb16..e574b6b4f3ce 100644 --- a/website/docs/cdktf/python/r/dx_lag.html.markdown +++ b/website/docs/cdktf/python/r/dx_lag.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the LAG. * `connections_bandwidth` - (Required) The bandwidth of the individual dedicated connections bundled by the LAG. Valid values: 1Gbps, 10Gbps, 100Gbps, and 400Gbps. Case sensitive. Refer to the AWS Direct Connection supported bandwidths for [Dedicated Connections](https://docs.aws.amazon.com/directconnect/latest/UserGuide/dedicated_connection.html). * `location` - (Required) The AWS Direct Connect location in which the LAG should be allocated. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`. @@ -84,4 +85,4 @@ Using `terraform import`, import Direct Connect LAGs using the LAG `id`. For exa % terraform import aws_dx_lag.test_lag dxlag-fgnsp5rq ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_macsec_key_association.html.markdown b/website/docs/cdktf/python/r/dx_macsec_key_association.html.markdown index dedbaa3b099d..0aafe9d0c478 100644 --- a/website/docs/cdktf/python/r/dx_macsec_key_association.html.markdown +++ b/website/docs/cdktf/python/r/dx_macsec_key_association.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cak` - (Optional) The MAC Security (MACsec) CAK to associate with the dedicated connection. The valid values are 64 hexadecimal characters (0-9, A-E). Required if using `ckn`. * `ckn` - (Optional) The MAC Security (MACsec) CKN to associate with the dedicated connection. The valid values are 64 hexadecimal characters (0-9, A-E). Required if using `cak`. * `connection_id` - (Required) The ID of the dedicated Direct Connect connection. The connection must be a dedicated connection in the `AVAILABLE` state. @@ -95,4 +96,4 @@ This resource exports the following attributes in addition to the arguments abov * `start_on` - The date in UTC format that the MAC Security (MACsec) secret key takes effect. * `state` - The state of the MAC Security (MACsec) secret key. The possible values are: associating, associated, disassociating, disassociated. See [MacSecKey](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_MacSecKey.html#DX-Type-MacSecKey-state) for descriptions of each state. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_private_virtual_interface.html.markdown b/website/docs/cdktf/python/r/dx_private_virtual_interface.html.markdown index 951eb0395af9..1746663a84bf 100644 --- a/website/docs/cdktf/python/r/dx_private_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/r/dx_private_virtual_interface.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connection_id` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -97,4 +98,4 @@ Using `terraform import`, import Direct Connect private virtual interfaces using % terraform import aws_dx_private_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_public_virtual_interface.html.markdown b/website/docs/cdktf/python/r/dx_public_virtual_interface.html.markdown index f2d15dce029d..3bc84d4cd80a 100644 --- a/website/docs/cdktf/python/r/dx_public_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/r/dx_public_virtual_interface.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connection_id` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -94,4 +95,4 @@ Using `terraform import`, import Direct Connect public virtual interfaces using % terraform import aws_dx_public_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_transit_virtual_interface.html.markdown b/website/docs/cdktf/python/r/dx_transit_virtual_interface.html.markdown index 30d7957983fd..4f0265a8add4 100644 --- a/website/docs/cdktf/python/r/dx_transit_virtual_interface.html.markdown +++ b/website/docs/cdktf/python/r/dx_transit_virtual_interface.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgp_asn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connection_id` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -105,4 +106,4 @@ Using `terraform import`, import Direct Connect transit virtual interfaces using % terraform import aws_dx_transit_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_contributor_insights.html.markdown b/website/docs/cdktf/python/r/dynamodb_contributor_insights.html.markdown index 7ad5d918d81d..5f9572c34482 100644 --- a/website/docs/cdktf/python/r/dynamodb_contributor_insights.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_contributor_insights.html.markdown @@ -35,8 +35,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `table_name` - (Required) The name of the table to enable contributor insights * `index_name` - (Optional) The global secondary index name +* `mode` - (Optional) argument to specify the [CloudWatch contributor insights mode](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/contributorinsights_HowItWorks.html#contributorinsights_HowItWorks.Modes) ## Attribute Reference @@ -67,4 +69,4 @@ Using `terraform import`, import `aws_dynamodb_contributor_insights` using the f % terraform import aws_dynamodb_contributor_insights.test name:ExampleTableName/index:ExampleIndexName/123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_global_table.html.markdown b/website/docs/cdktf/python/r/dynamodb_global_table.html.markdown index 7445aedad0e1..c7d764dbf37d 100644 --- a/website/docs/cdktf/python/r/dynamodb_global_table.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_global_table.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the global table. Must match underlying DynamoDB Table names in all regions. * `replica` - (Required) Underlying DynamoDB Table. At least 1 replica must be defined. See below. @@ -126,4 +127,4 @@ Using `terraform import`, import DynamoDB Global Tables using the global table n % terraform import aws_dynamodb_global_table.MyTable MyTable ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_kinesis_streaming_destination.html.markdown b/website/docs/cdktf/python/r/dynamodb_kinesis_streaming_destination.html.markdown index 95a6107feed5..d6d0b7e44153 100644 --- a/website/docs/cdktf/python/r/dynamodb_kinesis_streaming_destination.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_kinesis_streaming_destination.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `approximate_creation_date_time_precision` - (Optional) Toggle for the precision of Kinesis data stream timestamp. Valid values: `MILLISECOND` and `MICROSECOND`. * `stream_arn` - (Required) The ARN for a Kinesis data stream. This must exist in the same account and region as the DynamoDB table. * `table_name` - (Required) The name of the DynamoDB table. There can only be one Kinesis streaming destination for a given DynamoDB table. @@ -92,4 +93,4 @@ Using `terraform import`, import DynamoDB Kinesis Streaming Destinations using t % terraform import aws_dynamodb_kinesis_streaming_destination.example example,arn:aws:kinesis:us-east-1:111122223333:exampleStreamName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_resource_policy.html.markdown b/website/docs/cdktf/python/r/dynamodb_resource_policy.html.markdown index 0c3e75496f0f..5e7714b3c9da 100644 --- a/website/docs/cdktf/python/r/dynamodb_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_resource_policy.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `confirm_remove_self_resource_access` - (Optional) Set this parameter to true to confirm that you want to remove your permissions to change the policy of this resource in the future. ## Attribute Reference @@ -54,7 +55,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB Resource Policy using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dynamodb_resource_policy.example + identity = { + "arn" = "arn:aws:dynamodb:us-west-2:123456789012:table/example-table" + } +} + +resource "aws_dynamodb_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DynamoDB table. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB Resource Policy using the `resource_arn`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -71,10 +93,10 @@ class MyConvertedCode(TerraformStack): DynamodbResourcePolicy.generate_config_for_import(self, "example", "arn:aws:dynamodb:us-east-1:1234567890:table/my-table") ``` -Using `terraform import`, import DynamoDB Resource Policy using the `example_id_arg`. For example: +Using `terraform import`, import DynamoDB Resource Policy using the `resource_arn`. For example: ```console % terraform import aws_dynamodb_resource_policy.example arn:aws:dynamodb:us-east-1:1234567890:table/my-table ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_table.html.markdown b/website/docs/cdktf/python/r/dynamodb_table.html.markdown index 8b35174c3e23..5ea37bfe64fa 100644 --- a/website/docs/cdktf/python/r/dynamodb_table.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_table.html.markdown @@ -120,6 +120,48 @@ class MyConvertedCode(TerraformStack): ) ``` +#### Global Tables with Multi-Region Strong Consistency + +A global table configured for Multi-Region strong consistency (MRSC) provides the ability to perform a strongly consistent read with multi-Region scope. Performing a strongly consistent read on an MRSC table ensures you're always reading the latest version of an item, irrespective of the Region in which you're performing the read. + +**Note** Please see detailed information, restrictions, caveats etc on the [AWS Support Page](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/multi-region-strong-consistency-gt.html). + +Consistency Mode (`consistency_mode`) is a new argument on the embedded `replica` that allows you to configure consistency mode for Global Tables. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.dynamodb_table import DynamodbTable +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DynamodbTable(self, "example", + attribute=[DynamodbTableAttribute( + name="TestTableHashKey", + type="S" + ) + ], + billing_mode="PAY_PER_REQUEST", + hash_key="TestTableHashKey", + name="example", + replica=[DynamodbTableReplica( + consistency_mode="STRONG", + region_name="us-east-2" + ), DynamodbTableReplica( + consistency_mode="STRONG", + region_name="us-west-2" + ) + ], + stream_enabled=True, + stream_view_type="NEW_AND_OLD_IMAGES" + ) +``` + ### Replica Tagging You can manage global table replicas' tags in various ways. This example shows using `replica.*.propagate_tags` for the first replica and the `aws_dynamodb_tag` resource for the other. @@ -186,7 +228,7 @@ class MyConvertedCode(TerraformStack): key="Architect", resource_arn=Token.as_string( Fn.replace(example.arn, - Token.as_string(current.name), + Token.as_string(current.region), Token.as_string(alternate.name))), value="Gigi" ) @@ -204,6 +246,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `billing_mode` - (Optional) Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`. * `deletion_protection_enabled` - (Optional) Enables deletion protection for table. Defaults to `false`. * `import_table` - (Optional) Import Amazon S3 data into a new table. See below. @@ -226,6 +269,7 @@ The following arguments are optional: Default value is `STANDARD`. * `tags` - (Optional) A map of tags to populate on the created table. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `ttl` - (Optional) Configuration block for TTL. See below. +* `warm_throughput` - (Optional) Sets the number of warm read and write units for the specified table. See below. * `write_capacity` - (Optional) Number of write units for this table. If the `billing_mode` is `PROVISIONED`, this field is required. ### `attribute` @@ -262,10 +306,11 @@ The following arguments are optional: * `hash_key` - (Required) Name of the hash key in the index; must be defined as an attribute in the resource. * `name` - (Required) Name of the index. * `non_key_attributes` - (Optional) Only required with `INCLUDE` as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. -* `on_demand_throughput` - (Optional) Sets the maximum number of read and write units for the specified on-demand table. See below. +* `on_demand_throughput` - (Optional) Sets the maximum number of read and write units for the specified on-demand index. See below. * `projection_type` - (Required) One of `ALL`, `INCLUDE` or `KEYS_ONLY` where `ALL` projects every attribute into the index, `KEYS_ONLY` projects into the index only the table and index hash_key and sort_key attributes , `INCLUDE` projects into the index all of the attributes that are defined in `non_key_attributes` in addition to the attributes that that`KEYS_ONLY` project. * `range_key` - (Optional) Name of the range key; must be defined * `read_capacity` - (Optional) Number of read units for this index. Must be set if billing_mode is set to PROVISIONED. +* `warm_throughput` - (Optional) Sets the number of warm read and write units for this index. See below. * `write_capacity` - (Optional) Number of write units for this index. Must be set if billing_mode is set to PROVISIONED. ### `local_secondary_index` @@ -292,6 +337,7 @@ The following arguments are optional: **Note:** This attribute will _not_ be populated with the ARN of _default_ keys. **Note:** Changing this value will recreate the replica. * `point_in_time_recovery` - (Optional) Whether to enable Point In Time Recovery for the replica. Default is `false`. +* `deletion_protection_enabled` - (Optional) Whether deletion protection is enabled (true) or disabled (false) on the replica. Default is `false`. * `propagate_tags` - (Optional) Whether to propagate the global table's tags to a replica. Default is `false`. Changes to tags only move in one direction: from global (source) to replica. @@ -299,6 +345,7 @@ The following arguments are optional: Tag changes on the global table are propagated to replicas. Changing from `true` to `false` on a subsequent `apply` leaves replica tags as-is and no longer manages them. * `region_name` - (Required) Region name of the replica. +* `consistency_mode` - (Optional) Whether this global table will be using `STRONG` consistency mode or `EVENTUAL` consistency mode. Default value is `EVENTUAL`. ### `server_side_encryption` @@ -312,6 +359,13 @@ The following arguments are optional: * `enabled` - (Optional) Whether TTL is enabled. Default value is `false`. +### `warm_throughput` + +~> **Note:** Explicitly configuring both `read_units_per_second` and `write_units_per_second` to the default/minimum values will cause Terraform to report differences. + +* `read_units_per_second` - (Optional) Number of read operations a table or index can instantaneously support. For the base table, decreasing this value will force a new resource. For a global secondary index, this value can be increased or decreased without recreation. Minimum value of `12000` (default). +* `write_units_per_second` - (Optional) Number of write operations a table or index can instantaneously support. For the base table, decreasing this value will force a new resource. For a global secondary index, this value can be increased or decreased without recreation. Minimum value of `4000` (default). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -360,4 +414,4 @@ Using `terraform import`, import DynamoDB tables using the `name`. For example: % terraform import aws_dynamodb_table.basic-dynamodb-table GameScores ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_table_export.html.markdown b/website/docs/cdktf/python/r/dynamodb_table_export.html.markdown index 04722c469192..b6859b1f0814 100644 --- a/website/docs/cdktf/python/r/dynamodb_table_export.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_table_export.html.markdown @@ -118,6 +118,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `export_format` - (Optional, Forces new resource) Format for the exported data. Valid values are: `DYNAMODB_JSON`, `ION`. See the [AWS Documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/S3DataExport.Output.html#S3DataExport.Output_Data) for more information on these export formats. Default is `DYNAMODB_JSON`. * `export_time` - (Optional, Forces new resource) Time in RFC3339 format from which to export table data. The table export will be a snapshot of the table's state at this point in time. Omitting this value will result in a snapshot from the current time. * `export_type` - (Optional, Forces new resource) Whether to execute as a full export or incremental export. Valid values are: `FULL_EXPORT`, `INCREMENTAL_EXPORT`. Defaults to `FULL_EXPORT`. If `INCREMENTAL_EXPORT` is provided, the `incremental_export_specification` argument must also be provided. @@ -156,6 +157,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dynamodb_table_export.example + identity = { + "arn" = "arn:aws:dynamodb:us-west-2:123456789012:table/example-table/export/01234567890123-a1b2c3d4" + } +} + +resource "aws_dynamodb_table_export" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DynamoDB table export. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB table exports using the `arn`. For example: ```python @@ -179,4 +201,4 @@ Using `terraform import`, import DynamoDB table exports using the `arn`. For exa % terraform import aws_dynamodb_table_export.example arn:aws:dynamodb:us-west-2:12345678911:table/my-table-1/export/01580735656614-2c2f422e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown b/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown index 82cb4b9be9f5..184485fd7a78 100644 --- a/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hash_key` - (Required) Hash key to use for lookups and identification of the item * `item` - (Required) JSON representation of a map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. * `range_key` - (Optional) Range key to use for lookups and identification of the item. Required if there is range key defined in the table. @@ -69,4 +70,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import DynamoDB table items. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_table_replica.html.markdown b/website/docs/cdktf/python/r/dynamodb_table_replica.html.markdown index 547f8daa5268..a7d4b23fa8d3 100644 --- a/website/docs/cdktf/python/r/dynamodb_table_replica.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_table_replica.html.markdown @@ -79,6 +79,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kms_key_arn` - (Optional, Forces new resource) ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, `alias/aws/dynamodb`. **Note:** This attribute will _not_ be populated with the ARN of _default_ keys. * `deletion_protection_enabled` - (Optional) Whether deletion protection is enabled (true) or disabled (false) on the table replica. * `point_in_time_recovery` - (Optional) Whether to enable Point In Time Recovery for the table replica. Default is `false`. @@ -130,4 +131,4 @@ Using `terraform import`, import DynamoDB table replicas using the `table-name:m % terraform import aws_dynamodb_table_replica.example TestTable:us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_tag.html.markdown b/website/docs/cdktf/python/r/dynamodb_tag.html.markdown index ac4ba82ad88a..23b5951a20ba 100644 --- a/website/docs/cdktf/python/r/dynamodb_tag.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_tag.html.markdown @@ -56,7 +56,7 @@ class MyConvertedCode(TerraformStack): provider=replica, resource_arn=Token.as_string( Fn.replace(example.arn, - Token.as_string(current.name), + Token.as_string(current.region), Token.as_string(data_aws_region_replica.name))), value="testvalue" ) @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) Amazon Resource Name (ARN) of the DynamoDB resource to tag. * `key` - (Required) Tag name. * `value` - (Required) Tag value. @@ -101,4 +102,4 @@ Using `terraform import`, import `aws_dynamodb_tag` using the DynamoDB resource % terraform import aws_dynamodb_tag.example arn:aws:dynamodb:us-east-1:123456789012:table/example,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_default_kms_key.html.markdown b/website/docs/cdktf/python/r/ebs_default_kms_key.html.markdown index affc3bdfe5ea..617f753e61cf 100644 --- a/website/docs/cdktf/python/r/ebs_default_kms_key.html.markdown +++ b/website/docs/cdktf/python/r/ebs_default_kms_key.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_arn` - (Required, ForceNew) The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use to encrypt the EBS volume. ## Attribute Reference @@ -73,4 +74,4 @@ Using `terraform import`, import the EBS default KMS CMK using the KMS key ARN. % terraform import aws_ebs_default_kms_key.example arn:aws:kms:us-east-1:123456789012:key/abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_encryption_by_default.html.markdown b/website/docs/cdktf/python/r/ebs_encryption_by_default.html.markdown index 2f93248479c4..078ba6643d01 100644 --- a/website/docs/cdktf/python/r/ebs_encryption_by_default.html.markdown +++ b/website/docs/cdktf/python/r/ebs_encryption_by_default.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether or not default EBS encryption is enabled. Valid values are `true` or `false`. Defaults to `true`. ## Attribute Reference @@ -68,4 +69,4 @@ Using `terraform import`, import the default EBS encryption state. For example: % terraform import aws_ebs_encryption_by_default.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_fast_snapshot_restore.html.markdown b/website/docs/cdktf/python/r/ebs_fast_snapshot_restore.html.markdown index 790e9096cd8a..4505918828cd 100644 --- a/website/docs/cdktf/python/r/ebs_fast_snapshot_restore.html.markdown +++ b/website/docs/cdktf/python/r/ebs_fast_snapshot_restore.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Required) Availability zone in which to enable fast snapshot restores. * `snapshot_id` - (Required) ID of the snapshot. @@ -57,7 +58,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `availability_zone` and `snapshot_id` separated by `,`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -74,10 +75,10 @@ class MyConvertedCode(TerraformStack): EbsFastSnapshotRestore.generate_config_for_import(self, "example", "us-west-2a,snap-abcdef123456") ``` -Using `terraform import`, import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `id`. For example: +Using `terraform import`, import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `availability_zone` and `snapshot_id` separated by `,`. For example: ```console % terraform import aws_ebs_fast_snapshot_restore.example us-west-2a,snap-abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_snapshot.html.markdown b/website/docs/cdktf/python/r/ebs_snapshot.html.markdown index 360c7c6fd6a1..440460ac33d4 100644 --- a/website/docs/cdktf/python/r/ebs_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/ebs_snapshot.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `volume_id` - (Required) The Volume ID of which to make a snapshot. * `description` - (Optional) A description of what the snapshot is. * `outpost_arn` - (Optional) The Amazon Resource Name (ARN) of the Outpost on which to create a local snapshot. @@ -100,4 +101,4 @@ Using `terraform import`, import EBS Snapshot using the `id`. For example: % terraform import aws_ebs_snapshot.id snap-049df61146c4d7901 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_snapshot_block_public_access.html.markdown b/website/docs/cdktf/python/r/ebs_snapshot_block_public_access.html.markdown index b3d812835d91..7185f5e318e9 100644 --- a/website/docs/cdktf/python/r/ebs_snapshot_block_public_access.html.markdown +++ b/website/docs/cdktf/python/r/ebs_snapshot_block_public_access.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `state` - (Required) The mode in which to enable "Block public access for snapshots" for the region. Allowed values are `block-all-sharing`, `block-new-sharing`, `unblocked`. ## Attribute Reference @@ -68,4 +69,4 @@ Using `terraform import`, import the state. For example: % terraform import aws_ebs_snapshot_block_public_access.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_snapshot_copy.html.markdown b/website/docs/cdktf/python/r/ebs_snapshot_copy.html.markdown index 6e8fa7aa8042..2eee4b7ff36f 100644 --- a/website/docs/cdktf/python/r/ebs_snapshot_copy.html.markdown +++ b/website/docs/cdktf/python/r/ebs_snapshot_copy.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of what the snapshot is. * `encrypted` - Whether the snapshot is encrypted. * `kms_key_id` - The ARN for the KMS encryption key. @@ -84,4 +85,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_snapshot_import.html.markdown b/website/docs/cdktf/python/r/ebs_snapshot_import.html.markdown index d7f2d379b401..881da936f3a2 100644 --- a/website/docs/cdktf/python/r/ebs_snapshot_import.html.markdown +++ b/website/docs/cdktf/python/r/ebs_snapshot_import.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_data` - (Optional) The client-specific data. Detailed below. * `description` - (Optional) The description string for the import snapshot task. * `disk_container` - (Required) Information about the disk container. Detailed below. @@ -94,4 +95,4 @@ This resource exports the following attributes in addition to the arguments abov * `data_encryption_key_id` - The data encryption key identifier for the snapshot. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ebs_volume.html.markdown b/website/docs/cdktf/python/r/ebs_volume.html.markdown index 56602c9115af..3f667d6b3f93 100644 --- a/website/docs/cdktf/python/r/ebs_volume.html.markdown +++ b/website/docs/cdktf/python/r/ebs_volume.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Required) Availability zone where the EBS volume will exist. * `encrypted` - (Optional) If true, the disk will be encrypted. * `final_snapshot` - (Optional) If true, snapshot will be created before volume deletion. Any tags on the volume will be migrated to the snapshot. By default set to false @@ -51,6 +52,7 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughput` - (Optional) Throughput that the volume supports, in MiB/s. Only valid for `type` of `gp3`. * `type` - (Optional) Type of EBS volume. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `gp2`). +* `volume_initialization_rate` - (Optional) EBS provisioned rate for volume initialization, in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. This argument can only be set if `snapshot_id` is specified. ~> **NOTE:** At least one of `size` or `snapshot_id` is required. @@ -98,4 +100,4 @@ Using `terraform import`, import EBS Volumes using the `id`. For example: % terraform import aws_ebs_volume.id vol-049df61146c4d7901 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_availability_zone_group.html.markdown b/website/docs/cdktf/python/r/ec2_availability_zone_group.html.markdown index 73bb5082acaf..631a68d4aef1 100644 --- a/website/docs/cdktf/python/r/ec2_availability_zone_group.html.markdown +++ b/website/docs/cdktf/python/r/ec2_availability_zone_group.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_name` - (Required) Name of the Availability Zone Group. * `opt_in_status` - (Required) Indicates whether to enable or disable Availability Zone Group. Valid values: `opted-in` or `not-opted-in`. @@ -72,4 +73,4 @@ Using `terraform import`, import EC2 Availability Zone Groups using the group na % terraform import aws_ec2_availability_zone_group.example us-west-2-lax-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown b/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown index 7b4e05a476d0..e582d956bf26 100644 --- a/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown +++ b/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_block_offering_id` - (Required) The Capacity Block Reservation ID. * `instance_platform` - (Required) The type of operating system for which to reserve capacity. Valid options are `Linux/UNIX`, `Red Hat Enterprise Linux`, `SUSE Linux`, `Windows`, `Windows with SQL Server`, `Windows with SQL Server Enterprise`, `Windows with SQL Server Standard` or `Windows with SQL Server Web`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -75,4 +76,4 @@ This resource exports the following attributes in addition to the arguments abov * `tenancy` - Indicates the tenancy of the Capacity Block Reservation. Specify either `default` or `dedicated`. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown b/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown index c582020b6afc..fee9493724cb 100644 --- a/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown +++ b/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Required) The Availability Zone in which to create the Capacity Reservation. * `ebs_optimized` - (Optional) Indicates whether the Capacity Reservation supports EBS-optimized instances. * `end_date` - (Optional) The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -94,4 +95,4 @@ Using `terraform import`, import Capacity Reservations using the `id`. For examp % terraform import aws_ec2_capacity_reservation.web cr-0123456789abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_carrier_gateway.html.markdown b/website/docs/cdktf/python/r/ec2_carrier_gateway.html.markdown index 2c1e548c4f45..05b8560794e4 100644 --- a/website/docs/cdktf/python/r/ec2_carrier_gateway.html.markdown +++ b/website/docs/cdktf/python/r/ec2_carrier_gateway.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_id` - (Required) The ID of the VPC to associate with the carrier gateway. @@ -75,4 +76,4 @@ Using `terraform import`, import `aws_ec2_carrier_gateway` using the carrier gat % terraform import aws_ec2_carrier_gateway.example cgw-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_client_vpn_authorization_rule.html.markdown b/website/docs/cdktf/python/r/ec2_client_vpn_authorization_rule.html.markdown index 9544447222e1..d0e008ba01d5 100644 --- a/website/docs/cdktf/python/r/ec2_client_vpn_authorization_rule.html.markdown +++ b/website/docs/cdktf/python/r/ec2_client_vpn_authorization_rule.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_vpn_endpoint_id` - (Required) The ID of the Client VPN endpoint. * `target_network_cidr` - (Required) The IPv4 address range, in CIDR notation, of the network to which the authorization rule applies. * `access_group_id` - (Optional) The ID of the group to which the authorization rule grants access. One of `access_group_id` or `authorize_all_groups` must be set. @@ -107,4 +108,4 @@ Using the endpoint ID, target network CIDR, and group name: % terraform import aws_ec2_client_vpn_authorization_rule.example cvpn-endpoint-0ac3a1abbccddd666,10.1.0.0/24,team-a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_client_vpn_endpoint.html.markdown b/website/docs/cdktf/python/r/ec2_client_vpn_endpoint.html.markdown index 6ec5dab51ef1..c1fcef138e1a 100644 --- a/website/docs/cdktf/python/r/ec2_client_vpn_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/ec2_client_vpn_endpoint.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_options` - (Required) Information about the authentication method to be used to authenticate clients. -* `client_cidr_block` - (Required) The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. The CIDR block should be /22 or greater. +* `client_cidr_block` - (Optional) The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. The CIDR block should be /22 or greater. When `traffic_ip_address_type` is set to `ipv6`, it must not be specified. Otherwise, it is required. * `client_connect_options` - (Optional) The options for managing connection authorization for new client connections. * `client_login_banner_options` - (Optional) Options for enabling a customizable text banner that will be displayed on AWS provided clients when a VPN session is established. * `client_route_enforcement_options` - (Optional) Options for enforce administrator defined routes on devices connected through the VPN. @@ -57,12 +58,14 @@ This resource supports the following arguments: * `description` - (Optional) A brief description of the Client VPN endpoint. * `disconnect_on_session_timeout` - (Optional) Indicates whether the client VPN session is disconnected after the maximum `session_timeout_hours` is reached. If `true`, users are prompted to reconnect client VPN. If `false`, client VPN attempts to reconnect automatically. The default value is `false`. * `dns_servers` - (Optional) Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address of the connecting device is used. +* `endpoint_ip_address_type` - (Optional) IP address type for the Client VPN endpoint. Valid values are `ipv4`, `ipv6`, or `dual-stack`. Defaults to `ipv4`. * `security_group_ids` - (Optional) The IDs of one or more security groups to apply to the target network. You must also specify the ID of the VPC that contains the security groups. * `self_service_portal` - (Optional) Specify whether to enable the self-service portal for the Client VPN endpoint. Values can be `enabled` or `disabled`. Default value is `disabled`. * `server_certificate_arn` - (Required) The ARN of the ACM server certificate. * `session_timeout_hours` - (Optional) The maximum session duration is a trigger by which end-users are required to re-authenticate prior to establishing a VPN session. Default value is `24` - Valid values: `8 | 10 | 12 | 24` * `split_tunnel` - (Optional) Indicates whether split-tunnel is enabled on VPN endpoint. Default value is `false`. * `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `traffic_ip_address_type` - (Optional) IP address type for traffic within the Client VPN tunnel. Valid values are `ipv4`, `ipv6`, or `dual-stack`. Defaults to `ipv4`. When it is set to `ipv6`, `client_cidr_block` must not be specified. * `transport_protocol` - (Optional) The transport protocol to be used by the VPN session. Default value is `udp`. * `vpc_id` - (Optional) The ID of the VPC to associate with the Client VPN endpoint. If no security group IDs are specified in the request, the default security group for the VPC is applied. * `vpn_port` - (Optional) The port number for the Client VPN endpoint. Valid values are `443` and `1194`. Default value is `443`. @@ -134,4 +137,4 @@ Using `terraform import`, import AWS Client VPN endpoints using the `id` value f % terraform import aws_ec2_client_vpn_endpoint.example cvpn-endpoint-0ac3a1abbccddd666 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_client_vpn_network_association.html.markdown b/website/docs/cdktf/python/r/ec2_client_vpn_network_association.html.markdown index 7987a6692788..24b3a19cdbae 100644 --- a/website/docs/cdktf/python/r/ec2_client_vpn_network_association.html.markdown +++ b/website/docs/cdktf/python/r/ec2_client_vpn_network_association.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_vpn_endpoint_id` - (Required) The ID of the Client VPN endpoint. * `subnet_id` - (Required) The ID of the subnet to associate with the Client VPN endpoint. @@ -80,4 +81,4 @@ Using `terraform import`, import AWS Client VPN network associations using the e % terraform import aws_ec2_client_vpn_network_association.example cvpn-endpoint-0ac3a1abbccddd666,cvpn-assoc-0b8db902465d069ad ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_client_vpn_route.html.markdown b/website/docs/cdktf/python/r/ec2_client_vpn_route.html.markdown index 1b965762af4c..d1c1f827fe46 100644 --- a/website/docs/cdktf/python/r/ec2_client_vpn_route.html.markdown +++ b/website/docs/cdktf/python/r/ec2_client_vpn_route.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_vpn_endpoint_id` - (Required) The ID of the Client VPN endpoint. * `destination_cidr_block` - (Required) The IPv4 address range, in CIDR notation, of the route destination. * `description` - (Optional) A brief description of the route. @@ -107,4 +108,4 @@ Using `terraform import`, import AWS Client VPN routes using the endpoint ID, ta % terraform import aws_ec2_client_vpn_route.example cvpn-endpoint-1234567890abcdef,subnet-9876543210fedcba,10.1.0.0/24 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_default_credit_specification.html.markdown b/website/docs/cdktf/python/r/ec2_default_credit_specification.html.markdown index 3e49d20c0799..27f515bfa37b 100644 --- a/website/docs/cdktf/python/r/ec2_default_credit_specification.html.markdown +++ b/website/docs/cdktf/python/r/ec2_default_credit_specification.html.markdown @@ -23,7 +23,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import Ec2DefaultCreditSpecification +from imports.aws.ec2_default_credit_specification import Ec2DefaultCreditSpecification class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cpu_credits` - (Required) Credit option for CPU usage of the instance family. Valid values: `standard`, `unlimited`. * `instance_family` - (Required) Instance family. Valid values are `t2`, `t3`, `t3a`, `t4g`. @@ -63,7 +64,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import Ec2DefaultCreditSpecification +from imports.aws.ec2_default_credit_specification import Ec2DefaultCreditSpecification class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -75,4 +76,4 @@ Using `terraform import`, import EC2 (Elastic Compute Cloud) Default Credit Spec ```console % terraform import aws_ec2_default_credit_specification.example t2 - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_fleet.html.markdown b/website/docs/cdktf/python/r/ec2_fleet.html.markdown index c7b520edb2b2..7db81cb9f37e 100644 --- a/website/docs/cdktf/python/r/ec2_fleet.html.markdown +++ b/website/docs/cdktf/python/r/ec2_fleet.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `context` - (Optional) Reserved. * `excess_capacity_termination_policy` - (Optional) Whether running instances should be terminated if the total target capacity of the EC2 Fleet is decreased below the current size of the EC2. Valid values: `no-termination`, `termination`. Defaults to `termination`. Supported only for fleets of type `maintain`. * `launch_template_config` - (Required) Nested argument containing EC2 Launch Template configurations. Defined below. @@ -270,4 +271,4 @@ Using `terraform import`, import `aws_ec2_fleet` using the Fleet identifier. For % terraform import aws_ec2_fleet.example fleet-b9b55d27-c5fc-41ac-a6f3-48fcc91f080c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_host.html.markdown b/website/docs/cdktf/python/r/ec2_host.html.markdown index 0a7b8c435200..06b5df23cbe4 100644 --- a/website/docs/cdktf/python/r/ec2_host.html.markdown +++ b/website/docs/cdktf/python/r/ec2_host.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `asset_id` - (Optional) The ID of the Outpost hardware asset on which to allocate the Dedicated Hosts. This parameter is supported only if you specify OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this parameter. * `auto_placement` - (Optional) Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. Valid values: `on`, `off`. Default: `on`. * `availability_zone` - (Required) The Availability Zone in which to allocate the Dedicated Host. @@ -89,4 +90,4 @@ Using `terraform import`, import hosts using the host `id`. For example: % terraform import aws_ec2_host.example h-0385a99d0e4b20cbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_instance_connect_endpoint.html.markdown b/website/docs/cdktf/python/r/ec2_instance_connect_endpoint.html.markdown index 72487bb18a03..588eee128048 100644 --- a/website/docs/cdktf/python/r/ec2_instance_connect_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/ec2_instance_connect_endpoint.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `preserve_client_ip` - (Optional) Indicates whether your client's IP address is preserved as the source. Default: `true`. * `security_group_ids` - (Optional) One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for the VPC will be associated with the endpoint. * `subnet_id` - (Required) The ID of the subnet in which to create the EC2 Instance Connect Endpoint. @@ -85,4 +86,4 @@ Using `terraform import`, import EC2 Instance Connect Endpoints using the `id`. % terraform import aws_ec2_instance_connect_endpoint.example eice-012345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_instance_metadata_defaults.html.markdown b/website/docs/cdktf/python/r/ec2_instance_metadata_defaults.html.markdown index 22f1481a8c3c..5531486fc2d4 100644 --- a/website/docs/cdktf/python/r/ec2_instance_metadata_defaults.html.markdown +++ b/website/docs/cdktf/python/r/ec2_instance_metadata_defaults.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `http_endpoint` - (Optional) Whether the metadata service is available. Can be `"enabled"`, `"disabled"`, or `"no-preference"`. Default: `"no-preference"`. * `http_tokens` - (Optional) Whether the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2 (IMDSv2)_. Can be `"optional"`, `"required"`, or `"no-preference"`. Default: `"no-preference"`. * `http_put_response_hop_limit` - (Optional) The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from `1` to `64`, or `-1` to indicate no preference. Default: `-1`. @@ -50,4 +51,4 @@ This data source exports no additional attributes. You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_instance_state.html.markdown b/website/docs/cdktf/python/r/ec2_instance_state.html.markdown index 36e690233001..8191d0803f4d 100644 --- a/website/docs/cdktf/python/r/ec2_instance_state.html.markdown +++ b/website/docs/cdktf/python/r/ec2_instance_state.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `force` - (Optional) Whether to request a forced stop when `state` is `stopped`. Otherwise (_i.e._, `state` is `running`), ignored. When an instance is forced to stop, it does not flush file system caches or file system metadata, and you must subsequently perform file system check and repair. Not recommended for Windows instances. Defaults to `false`. ## Attribute Reference @@ -107,4 +108,4 @@ Using `terraform import`, import `aws_ec2_instance_state` using the `instance_id % terraform import aws_ec2_instance_state.test i-02cae6557dfcf2f96 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_local_gateway_route.html.markdown b/website/docs/cdktf/python/r/ec2_local_gateway_route.html.markdown index c590809734ae..5ff6870d2566 100644 --- a/website/docs/cdktf/python/r/ec2_local_gateway_route.html.markdown +++ b/website/docs/cdktf/python/r/ec2_local_gateway_route.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination_cidr_block` - (Required) IPv4 CIDR range used for destination matches. Routing decisions are based on the most specific match. * `local_gateway_route_table_id` - (Required) Identifier of EC2 Local Gateway Route Table. * `local_gateway_virtual_interface_group_id` - (Required) Identifier of EC2 Local Gateway Virtual Interface Group. @@ -72,4 +73,4 @@ Using `terraform import`, import `aws_ec2_local_gateway_route` using the EC2 Loc % terraform import aws_ec2_local_gateway_route.example lgw-rtb-12345678_172.16.0.0/16 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_local_gateway_route_table_vpc_association.html.markdown b/website/docs/cdktf/python/r/ec2_local_gateway_route_table_vpc_association.html.markdown index cce5af9d84d8..444c77e9c2fc 100644 --- a/website/docs/cdktf/python/r/ec2_local_gateway_route_table_vpc_association.html.markdown +++ b/website/docs/cdktf/python/r/ec2_local_gateway_route_table_vpc_association.html.markdown @@ -55,6 +55,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_ec2_local_gateway_route_table_vpc_associat % terraform import aws_ec2_local_gateway_route_table_vpc_association.example lgw-vpc-assoc-1234567890abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_managed_prefix_list.html.markdown b/website/docs/cdktf/python/r/ec2_managed_prefix_list.html.markdown index 85efe476ea8d..ae86279a3d5d 100644 --- a/website/docs/cdktf/python/r/ec2_managed_prefix_list.html.markdown +++ b/website/docs/cdktf/python/r/ec2_managed_prefix_list.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required, Forces new resource) Address family (`IPv4` or `IPv6`) of this prefix list. * `entry` - (Optional) Configuration block for prefix list entry. Detailed below. Different entries may have overlapping CIDR blocks, but a particular CIDR should not be duplicated. * `max_entries` - (Required) Maximum number of entries that this prefix list can contain. @@ -108,4 +109,4 @@ Using `terraform import`, import Prefix Lists using the `id`. For example: % terraform import aws_ec2_managed_prefix_list.default pl-0570a1d2d725c16be ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_managed_prefix_list_entry.html.markdown b/website/docs/cdktf/python/r/ec2_managed_prefix_list_entry.html.markdown index 90cf1e441870..b9ee322877e7 100644 --- a/website/docs/cdktf/python/r/ec2_managed_prefix_list_entry.html.markdown +++ b/website/docs/cdktf/python/r/ec2_managed_prefix_list_entry.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr` - (Required) CIDR block of this entry. * `description` - (Optional) Description of this entry. Please note that due to API limitations, updating only the description of an entry will require recreating the entry. * `prefix_list_id` - (Required) The ID of the prefix list. @@ -87,4 +88,4 @@ Using `terraform import`, import prefix list entries using `prefix_list_id` and % terraform import aws_ec2_managed_prefix_list_entry.default pl-0570a1d2d725c16be,10.0.3.0/24 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_network_insights_analysis.html.markdown b/website/docs/cdktf/python/r/ec2_network_insights_analysis.html.markdown index 24be9c94b660..aa4defdfb8af 100644 --- a/website/docs/cdktf/python/r/ec2_network_insights_analysis.html.markdown +++ b/website/docs/cdktf/python/r/ec2_network_insights_analysis.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter_in_arns` - (Optional) A list of ARNs for resources the path must traverse. * `wait_for_completion` - (Optional) If enabled, the resource will wait for the Network Insights Analysis status to change to `succeeded` or `failed`. Setting this to `false` will skip the process. Default: `true`. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -96,4 +97,4 @@ Using `terraform import`, import Network Insights Analyzes using the `id`. For e % terraform import aws_ec2_network_insights_analysis.test nia-0462085c957f11a55 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown b/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown index 38f55089d470..7144c5d4215f 100644 --- a/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown +++ b/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_ip` - (Optional) IP address of the source resource. * `destination` - (Optional) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. Either the `destination` argument or the `destination_address` argument in the `filter_at_source` block must be specified. * `destination_ip` - (Optional) IP address of the destination resource. @@ -97,4 +98,4 @@ Using `terraform import`, import Network Insights Paths using the `id`. For exam % terraform import aws_ec2_network_insights_path.test nip-00edfba169923aefd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_serial_console_access.html.markdown b/website/docs/cdktf/python/r/ec2_serial_console_access.html.markdown index a5145fc70f02..27010c0a93d0 100644 --- a/website/docs/cdktf/python/r/ec2_serial_console_access.html.markdown +++ b/website/docs/cdktf/python/r/ec2_serial_console_access.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether or not serial console access is enabled. Valid values are `true` or `false`. Defaults to `true`. ## Attribute Reference @@ -68,4 +69,4 @@ Using `terraform import`, import serial console access state. For example: % terraform import aws_ec2_serial_console_access.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_subnet_cidr_reservation.html.markdown b/website/docs/cdktf/python/r/ec2_subnet_cidr_reservation.html.markdown index 8abbc4dc1354..7bd429121576 100644 --- a/website/docs/cdktf/python/r/ec2_subnet_cidr_reservation.html.markdown +++ b/website/docs/cdktf/python/r/ec2_subnet_cidr_reservation.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_block` - (Required) The CIDR block for the reservation. * `reservation_type` - (Required) The type of reservation to create. Valid values: `explicit`, `prefix` * `subnet_id` - (Required) The ID of the subnet to create the reservation for. @@ -74,4 +75,4 @@ Using `terraform import`, import Existing CIDR reservations using `SUBNET_ID:RES % terraform import aws_ec2_subnet_cidr_reservation.example subnet-01llsxvsxabqiymcz:scr-4mnvz6wb7otksjcs9 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_tag.html.markdown b/website/docs/cdktf/python/r/ec2_tag.html.markdown index 6e98dde36f4c..a3d8feb9c32f 100644 --- a/website/docs/cdktf/python/r/ec2_tag.html.markdown +++ b/website/docs/cdktf/python/r/ec2_tag.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) The ID of the EC2 resource to manage the tag for. * `key` - (Required) The tag name. * `value` - (Required) The value of the tag. @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_ec2_tag` using the EC2 resource identifier % terraform import aws_ec2_tag.example tgw-attach-1234567890abcdef,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_traffic_mirror_filter.html.markdown b/website/docs/cdktf/python/r/ec2_traffic_mirror_filter.html.markdown index ddbc5424004d..af8803532ef9 100644 --- a/website/docs/cdktf/python/r/ec2_traffic_mirror_filter.html.markdown +++ b/website/docs/cdktf/python/r/ec2_traffic_mirror_filter.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) A description of the filter. * `network_services` - (Optional) List of amazon network services that should be mirrored. Valid values: `amazon-dns`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -76,4 +77,4 @@ Using `terraform import`, import traffic mirror filter using the `id`. For examp % terraform import aws_ec2_traffic_mirror_filter.foo tmf-0fbb93ddf38198f64 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_traffic_mirror_filter_rule.html.markdown b/website/docs/cdktf/python/r/ec2_traffic_mirror_filter_rule.html.markdown index 8916f34c4e35..0bdcf5e7a02f 100644 --- a/website/docs/cdktf/python/r/ec2_traffic_mirror_filter_rule.html.markdown +++ b/website/docs/cdktf/python/r/ec2_traffic_mirror_filter_rule.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the traffic mirror filter rule. * `traffic_mirror_filter_id` - (Required) ID of the traffic mirror filter to which this rule should be added * `destination_cidr_block` - (Required) Destination CIDR block to assign to the Traffic Mirror rule. @@ -115,4 +116,4 @@ Using `terraform import`, import traffic mirror rules using the `traffic_mirror_ % terraform import aws_ec2_traffic_mirror_filter_rule.rule tmf-0fbb93ddf38198f64:tmfr-05a458f06445d0aee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_traffic_mirror_session.html.markdown b/website/docs/cdktf/python/r/ec2_traffic_mirror_session.html.markdown index f496f81b3ba4..f802fd5d4424 100644 --- a/website/docs/cdktf/python/r/ec2_traffic_mirror_session.html.markdown +++ b/website/docs/cdktf/python/r/ec2_traffic_mirror_session.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of the traffic mirror session. * `network_interface_id` - (Required, Forces new) ID of the source network interface. Not all network interfaces are eligible as mirror sources. On EC2 instances only nitro based instances support mirroring. * `traffic_mirror_filter_id` - (Required) ID of the traffic mirror filter to be used @@ -94,4 +95,4 @@ Using `terraform import`, import traffic mirror sessions using the `id`. For exa % terraform import aws_ec2_traffic_mirror_session.session tms-0d8aa3ca35897b82e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_traffic_mirror_target.html.markdown b/website/docs/cdktf/python/r/ec2_traffic_mirror_target.html.markdown index 8c803466f6c8..2f4736c2424a 100644 --- a/website/docs/cdktf/python/r/ec2_traffic_mirror_target.html.markdown +++ b/website/docs/cdktf/python/r/ec2_traffic_mirror_target.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new) A description of the traffic mirror session. * `network_interface_id` - (Optional, Forces new) The network interface ID that is associated with the target. * `network_load_balancer_arn` - (Optional, Forces new) The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target. @@ -89,4 +90,4 @@ Using `terraform import`, import traffic mirror targets using the `id`. For exam % terraform import aws_ec2_traffic_mirror_target.target tmt-0c13a005422b86606 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway.html.markdown index 5eef0f2c4578..2853e8fd3bdd 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amazon_side_asn` - (Optional) Private Autonomous System Number (ASN) for the Amazon side of a BGP session. The range is `64512` to `65534` for 16-bit ASNs and `4200000000` to `4294967294` for 32-bit ASNs. Default value: `64512`. -> **NOTE:** Modifying `amazon_side_asn` on a Transit Gateway with active BGP sessions is [not allowed](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyTransitGatewayOptions.html). You must first delete all Transit Gateway attachments that have BGP configured prior to modifying `amazon_side_asn`. @@ -94,4 +95,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway` using the EC2 Transit % terraform import aws_ec2_transit_gateway.example tgw-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_connect.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_connect.html.markdown index 1377a015fd1b..1e9668fe7877 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_connect.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_connect.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `protocol` - (Optional) The tunnel protocol. Valid values: `gre`. Default is `gre`. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Connect. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transit_gateway_default_route_table_association` - (Optional) Boolean whether the Connect should be associated with the EC2 Transit Gateway association default route table. This cannot be configured or perform drift detection with Resource Access Manager shared EC2 Transit Gateways. Default value: `true`. @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_connect` using the EC2 % terraform import aws_ec2_transit_gateway_connect.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_connect_peer.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_connect_peer.html.markdown index e01d3f6d1753..bc14a1674d01 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_connect_peer.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_connect_peer.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bgp_asn` - (Optional) The BGP ASN number assigned customer device. If not provided, it will use the same BGP ASN as is associated with Transit Gateway. * `inside_cidr_blocks` - (Required) The CIDR block that will be used for addressing within the tunnel. It must contain exactly one IPv4 CIDR block and up to one IPv6 CIDR block. The IPv4 CIDR block must be /29 size and must be within 169.254.0.0/16 range, with exception of: 169.254.0.0/29, 169.254.1.0/29, 169.254.2.0/29, 169.254.3.0/29, 169.254.4.0/29, 169.254.5.0/29, 169.254.169.248/29. The IPv6 CIDR block must be /125 size and must be within fd00::/8. The first IP from each CIDR block is assigned for customer gateway, the second and third is for Transit Gateway (An example: from range 169.254.100.0/29, .1 is assigned to customer gateway and .2 and .3 are assigned to Transit Gateway) * `peer_address` - (Required) The IP addressed assigned to customer device, which will be used as tunnel endpoint. It can be IPv4 or IPv6 address, but must be the same address family as `transit_gateway_address` @@ -94,4 +95,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_connect_peer` using th % terraform import aws_ec2_transit_gateway_connect_peer.example tgw-connect-peer-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_association.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_association.html.markdown index bdd9930bfd1e..c5617058776c 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_association.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_association.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Required) ID of the Transit Gateway to change the default association route table on. * `transit_gateway_route_table_id` - (Required) ID of the Transit Gateway Route Table to be made the default association route table. @@ -52,4 +53,4 @@ This resource exports no additional attributes. * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_propagation.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_propagation.html.markdown index 3b6b12ce17a6..2eb334f8788e 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_propagation.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_default_route_table_propagation.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Required) ID of the Transit Gateway to change the default association route table on. * `transit_gateway_route_table_id` - (Required) ID of the Transit Gateway Route Table to be made the default association route table. @@ -52,4 +53,4 @@ This resource exports no additional attributes. * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain.html.markdown index 234308bc1df1..ccc047cb9e11 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain.html.markdown @@ -143,6 +143,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Required) EC2 Transit Gateway identifier. The EC2 Transit Gateway must have `multicast_support` enabled. * `auto_accept_shared_associations` - (Optional) Whether to automatically accept cross-account subnet associations that are associated with the EC2 Transit Gateway Multicast Domain. Valid values: `disable`, `enable`. Default value: `disable`. * `igmpv2_support` - (Optional) Whether to enable Internet Group Management Protocol (IGMP) version 2 for the EC2 Transit Gateway Multicast Domain. Valid values: `disable`, `enable`. Default value: `disable`. @@ -190,4 +191,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_multicast_domain` usin % terraform import aws_ec2_transit_gateway_multicast_domain.example tgw-mcast-domain-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain_association.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain_association.html.markdown index bce08a6b5552..f7477a2b6e7d 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain_association.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_domain_association.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnet_id` - (Required) The ID of the subnet to associate with the transit gateway multicast domain. * `transit_gateway_attachment_id` - (Required) The ID of the transit gateway attachment. * `transit_gateway_multicast_domain_id` - (Required) The ID of the transit gateway multicast domain. @@ -77,4 +78,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_member.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_member.html.markdown index 3a4f7cce3f09..394c9e46d076 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_member.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_member.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_ip_address` - (Required) The IP address assigned to the transit gateway multicast group. * `network_interface_id` - (Required) The group members' network interface ID to register with the transit gateway multicast group. * `transit_gateway_multicast_domain_id` - (Required) The ID of the transit gateway multicast domain. @@ -48,4 +49,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - EC2 Transit Gateway Multicast Group Member identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_source.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_source.html.markdown index 824190e8ff1e..348e0082b073 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_source.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_multicast_group_source.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_ip_address` - (Required) The IP address assigned to the transit gateway multicast group. * `network_interface_id` - (Required) The group members' network interface ID to register with the transit gateway multicast group. * `transit_gateway_multicast_domain_id` - (Required) The ID of the transit gateway multicast domain. @@ -48,4 +49,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - EC2 Transit Gateway Multicast Group Member identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown index 700c324f43c1..22c64a7016fd 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown @@ -72,6 +72,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `peer_account_id` - (Optional) Account ID of EC2 Transit Gateway to peer with. Defaults to the account ID the [AWS provider][1] is currently connected to. * `peer_region` - (Required) Region of EC2 Transit Gateway to peer with. * `peer_transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway to peer with. @@ -120,4 +121,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_peering_attachment` us [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown index bc6c9ddfe6be..8722b6ca5ad4 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown @@ -40,6 +40,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_attachment_id` - (Required) The ID of the EC2 Transit Gateway Peering Attachment to manage. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Peering Attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -78,4 +79,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_peering_attachment_acc % terraform import aws_ec2_transit_gateway_peering_attachment_accepter.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table.html.markdown index 9d4ef3d85f71..096924e782b7 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Required) EC2 Transit Gateway identifier. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Policy Table. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -75,4 +76,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_policy_table` using th % terraform import aws_ec2_transit_gateway_policy_table.example tgw-rtb-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table_association.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table_association.html.markdown index bffe7caf4fc1..9d8d39d35b63 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table_association.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_policy_table_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_attachment_id` - (Required) Identifier of EC2 Transit Gateway Attachment. * `transit_gateway_policy_table_id` - (Required) Identifier of EC2 Transit Gateway Policy Table. @@ -72,4 +73,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_policy_table_associati % terraform import aws_ec2_transit_gateway_policy_table_association.example tgw-rtb-12345678_tgw-attach-87654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_prefix_list_reference.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_prefix_list_reference.html.markdown index f497f1dbc0d3..94ce26d14cc8 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_prefix_list_reference.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_prefix_list_reference.html.markdown @@ -65,6 +65,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `blackhole` - (Optional) Indicates whether to drop traffic that matches the Prefix List. Defaults to `false`. * `transit_gateway_attachment_id` - (Optional) Identifier of EC2 Transit Gateway Attachment. @@ -99,4 +100,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_prefix_list_reference` % terraform import aws_ec2_transit_gateway_prefix_list_reference.example tgw-rtb-12345678_pl-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_route.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_route.html.markdown index 28135adfd164..df61058e9fca 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_route.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_route.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination_cidr_block` - (Required) IPv4 or IPv6 RFC1924 CIDR used for destination matches. Routing decisions are based on the most specific match. * `transit_gateway_attachment_id` - (Optional) Identifier of EC2 Transit Gateway Attachment (required if `blackhole` is set to false). * `blackhole` - (Optional) Indicates whether to drop traffic that matches this route (default to `false`). @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route` using the EC2 T % terraform import aws_ec2_transit_gateway_route.example tgw-rtb-12345678_0.0.0.0/0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_route_table.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_route_table.html.markdown index e2b5eb9da98a..49f43a79e2e6 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_route_table.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_route_table.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Route Table. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -73,4 +74,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route_table` using the % terraform import aws_ec2_transit_gateway_route_table.example tgw-rtb-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_association.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_association.html.markdown index 34b68757f82e..b590b2679664 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_association.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_attachment_id` - (Required) Identifier of EC2 Transit Gateway Attachment. * `transit_gateway_route_table_id` - (Required) Identifier of EC2 Transit Gateway Route Table. * `replace_existing_association` - (Optional) Boolean whether the Gateway Attachment should remove any current Route Table association before associating with the specified Route Table. Default value: `false`. This argument is intended for use with EC2 Transit Gateways shared into the current account, otherwise the `transit_gateway_default_route_table_association` argument of the `aws_ec2_transit_gateway_vpc_attachment` resource should be used. @@ -73,4 +74,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route_table_associatio % terraform import aws_ec2_transit_gateway_route_table_association.example tgw-rtb-12345678_tgw-attach-87654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_propagation.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_propagation.html.markdown index 61ffd4dbc36b..888bddc5f2c4 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_propagation.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_route_table_propagation.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_attachment_id` - (Required) Identifier of EC2 Transit Gateway Attachment. * `transit_gateway_route_table_id` - (Required) Identifier of EC2 Transit Gateway Route Table. @@ -72,4 +73,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route_table_propagatio % terraform import aws_ec2_transit_gateway_route_table_propagation.example tgw-rtb-12345678_tgw-attach-87654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment.html.markdown index 341aa7c453fa..8319d5634fee 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment.html.markdown @@ -39,6 +39,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnet_ids` - (Required) Identifiers of EC2 Subnets. * `transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway. * `vpc_id` - (Required) Identifier of EC2 VPC. @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_vpc_attachment` using % terraform import aws_ec2_transit_gateway_vpc_attachment.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown index 66f43fa1baca..38314444ec35 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown @@ -46,6 +46,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transit_gateway_attachment_id` - (Required) The ID of the EC2 Transit Gateway Attachment to manage. * `transit_gateway_default_route_table_association` - (Optional) Boolean whether the VPC Attachment should be associated with the EC2 Transit Gateway association default route table. Default value: `true`. * `transit_gateway_default_route_table_propagation` - (Optional) Boolean whether the VPC Attachment should propagate routes with the EC2 Transit Gateway propagation default route table. Default value: `true`. @@ -91,4 +92,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_vpc_attachment_accepte % terraform import aws_ec2_transit_gateway_vpc_attachment_accepter.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_account_setting.html.markdown b/website/docs/cdktf/python/r/ecr_account_setting.html.markdown index 616ca7a0c1ee..294d20d100c6 100644 --- a/website/docs/cdktf/python/r/ecr_account_setting.html.markdown +++ b/website/docs/cdktf/python/r/ecr_account_setting.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the account setting. One of: `BASIC_SCAN_TYPE_VERSION`, `REGISTRY_POLICY_SCOPE`. * `value` - (Required) Setting value that is specified. Valid values are: * If `name` is specified as `BASIC_SCAN_TYPE_VERSION`, one of: `AWS_NATIVE`, `CLAIR`. @@ -94,4 +95,4 @@ Using `terraform import`, import EMR Security Configurations using the account s % terraform import aws_ecr_account_setting.foo BASIC_SCAN_TYPE_VERSION ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_lifecycle_policy.html.markdown b/website/docs/cdktf/python/r/ecr_lifecycle_policy.html.markdown index 3452b85fa483..430641cee6a0 100644 --- a/website/docs/cdktf/python/r/ecr_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/python/r/ecr_lifecycle_policy.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository` - (Required) Name of the repository to apply the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. See more details about [Policy Parameters](http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lifecycle_policy_parameters) in the official AWS docs. Consider using the [`aws_ecr_lifecycle_policy_document` data_source](/docs/providers/aws/d/ecr_lifecycle_policy_document.html) to generate/manage the JSON document used for the `policy` argument. @@ -86,6 +87,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_lifecycle_policy.example + identity = { + repository = "tf-example" + } +} + +resource "aws_ecr_lifecycle_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `repository` - (String) Name of the ECR repository. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Lifecycle Policy using the name of the repository. For example: ```python @@ -109,4 +136,4 @@ Using `terraform import`, import ECR Lifecycle Policy using the name of the repo % terraform import aws_ecr_lifecycle_policy.example tf-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_pull_through_cache_rule.html.markdown b/website/docs/cdktf/python/r/ecr_pull_through_cache_rule.html.markdown index 627d7d93d055..9e9c10d10b68 100644 --- a/website/docs/cdktf/python/r/ecr_pull_through_cache_rule.html.markdown +++ b/website/docs/cdktf/python/r/ecr_pull_through_cache_rule.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `credential_arn` - (Optional) ARN of the Secret which will be used to authenticate against the registry. * `custom_role_arn` - (Optional) The ARN of the IAM role associated with the pull through cache rule. Must be specified if the upstream registry is a cross-account ECR private registry. See [AWS Document - Setting up permissions for cross-account ECR to ECR PTC](https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache-private.html). * `ecr_repository_prefix` - (Required, Forces new resource) The repository name prefix to use when caching images from the source registry. Use `ROOT` as the prefix to apply a template to all repositories in your registry that don't have an associated pull through cache rule. @@ -77,4 +78,4 @@ Using `terraform import`, import a pull-through cache rule using the `ecr_reposi % terraform import aws_ecr_pull_through_cache_rule.example ecr-public ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_registry_policy.html.markdown b/website/docs/cdktf/python/r/ecr_registry_policy.html.markdown index 5d92f4a4d004..7e7700bb21fd 100644 --- a/website/docs/cdktf/python/r/ecr_registry_policy.html.markdown +++ b/website/docs/cdktf/python/r/ecr_registry_policy.html.markdown @@ -47,7 +47,7 @@ class MyConvertedCode(TerraformStack): "Principal": { "AWS": "arn:${" + data_aws_partition_current.partition + "}:iam::${" + current.account_id + "}:root" }, - "Resource": ["arn:${" + data_aws_partition_current.partition + "}:ecr:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:repository/*" + "Resource": ["arn:${" + data_aws_partition_current.partition + "}:ecr:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:repository/*" ], "Sid": "testpolicy" } @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) ## Attribute Reference @@ -94,4 +95,4 @@ Using `terraform import`, import ECR Registry Policy using the registry id. For % terraform import aws_ecr_registry_policy.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_registry_scanning_configuration.html.markdown b/website/docs/cdktf/python/r/ecr_registry_scanning_configuration.html.markdown index 3bc6c7c73ff9..ea00950a3074 100644 --- a/website/docs/cdktf/python/r/ecr_registry_scanning_configuration.html.markdown +++ b/website/docs/cdktf/python/r/ecr_registry_scanning_configuration.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `scan_type` - (Required) the scanning type to set for the registry. Can be either `ENHANCED` or `BASIC`. - `rule` - (Optional) One or multiple blocks specifying scanning rules to determine which repository filters are used and at what frequency scanning will occur. See [below for schema](#rule). @@ -120,4 +121,4 @@ Using `terraform import`, import ECR Scanning Configurations using the `registry % terraform import aws_ecr_registry_scanning_configuration.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_replication_configuration.html.markdown b/website/docs/cdktf/python/r/ecr_replication_configuration.html.markdown index 04765013aa98..be405892173b 100644 --- a/website/docs/cdktf/python/r/ecr_replication_configuration.html.markdown +++ b/website/docs/cdktf/python/r/ecr_replication_configuration.html.markdown @@ -129,6 +129,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replication_configuration` - (Required) Replication configuration for a registry. See [Replication Configuration](#replication-configuration). ### Replication Configuration @@ -181,4 +182,4 @@ Using `terraform import`, import ECR Replication Configuration using the `regist % terraform import aws_ecr_replication_configuration.service 012345678912 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_repository.html.markdown b/website/docs/cdktf/python/r/ecr_repository.html.markdown index 76eab30f6ae5..a9b79b90f7af 100644 --- a/website/docs/cdktf/python/r/ecr_repository.html.markdown +++ b/website/docs/cdktf/python/r/ecr_repository.html.markdown @@ -35,15 +35,45 @@ class MyConvertedCode(TerraformStack): ) ``` +### With Image Tag Mutability Exclusion + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.ecr_repository import EcrRepository +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + EcrRepository(self, "example", + image_tag_mutability="IMMUTABLE_WITH_EXCLUSION", + image_tag_mutability_exclusion_filter=[EcrRepositoryImageTagMutabilityExclusionFilter( + filter="latest*", + filter_type="WILDCARD" + ), EcrRepositoryImageTagMutabilityExclusionFilter( + filter="dev-*", + filter_type="WILDCARD" + ) + ], + name="example-repo" + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the repository. * `encryption_configuration` - (Optional) Encryption configuration for the repository. See [below for schema](#encryption_configuration). * `force_delete` - (Optional) If `true`, will delete the repository even if it contains images. Defaults to `false`. -* `image_tag_mutability` - (Optional) The tag mutability setting for the repository. Must be one of: `MUTABLE` or `IMMUTABLE`. Defaults to `MUTABLE`. +* `image_tag_mutability` - (Optional) The tag mutability setting for the repository. Must be one of: `MUTABLE`, `IMMUTABLE`, `IMMUTABLE_WITH_EXCLUSION`, or `MUTABLE_WITH_EXCLUSION`. Defaults to `MUTABLE`. +* `image_tag_mutability_exclusion_filter` - (Optional) Configuration block that defines filters to specify which image tags can override the default tag mutability setting. Only applicable when `image_tag_mutability` is set to `IMMUTABLE_WITH_EXCLUSION` or `MUTABLE_WITH_EXCLUSION`. See [below for schema](#image_tag_mutability_exclusion_filter). * `image_scanning_configuration` - (Optional) Configuration block that defines image scanning configuration for the repository. By default, image scanning must be manually triggered. See the [ECR User Guide](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) for more information about image scanning. * `scan_on_push` - (Required) Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -53,6 +83,11 @@ This resource supports the following arguments: * `encryption_type` - (Optional) The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`. * `kms_key` - (Optional) The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR. +### image_tag_mutability_exclusion_filter + +* `filter` - (Required) The filter pattern to use for excluding image tags from the mutability setting. Must contain only letters, numbers, and special characters (._*-). Each filter can be up to 128 characters long and can contain a maximum of 2 wildcards (*). +* `filter_type` - (Required) The type of filter to use. Must be `WILDCARD`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -70,6 +105,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_repository.service + identity = { + name = "test-service" + } +} + +resource "aws_ecr_repository" "service" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the ECR repository. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Repositories using the `name`. For example: ```python @@ -93,4 +154,4 @@ Using `terraform import`, import ECR Repositories using the `name`. For example: % terraform import aws_ecr_repository.service test-service ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_repository_creation_template.html.markdown b/website/docs/cdktf/python/r/ecr_repository_creation_template.html.markdown index ce186db14c03..802747a5209d 100644 --- a/website/docs/cdktf/python/r/ecr_repository_creation_template.html.markdown +++ b/website/docs/cdktf/python/r/ecr_repository_creation_template.html.markdown @@ -66,12 +66,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prefix` - (Required, Forces new resource) The repository name prefix to match against. Use `ROOT` to match any prefix that doesn't explicitly match another template. * `applied_for` - (Required) Which features this template applies to. Must contain one or more of `PULL_THROUGH_CACHE` or `REPLICATION`. * `custom_role_arn` - (Optional) A custom IAM role to use for repository creation. Required if using repository tags or KMS encryption. * `description` - (Optional) The description for this template. * `encryption_configuration` - (Optional) Encryption configuration for any created repositories. See [below for schema](#encryption_configuration). * `image_tag_mutability` - (Optional) The tag mutability setting for any created repositories. Must be one of: `MUTABLE` or `IMMUTABLE`. Defaults to `MUTABLE`. +* `image_tag_mutability_exclusion_filter` - (Optional) Configuration block that defines filters to specify which image tags can override the default tag mutability setting. Only applicable when `image_tag_mutability` is set to `IMMUTABLE_WITH_EXCLUSION` or `MUTABLE_WITH_EXCLUSION`. See [below for schema](#image_tag_mutability_exclusion_filter). * `lifecycle_policy` - (Optional) The lifecycle policy document to apply to any created repositories. See more details about [Policy Parameters](http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lifecycle_policy_parameters) in the official AWS docs. Consider using the [`aws_ecr_lifecycle_policy_document` data_source](/docs/providers/aws/d/ecr_lifecycle_policy_document.html) to generate/manage the JSON document used for the `lifecycle_policy` argument. * `repository_policy` - (Optional) The registry policy document to apply to any created repositories. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `resource_tags` - (Optional) A map of tags to assign to any created repositories. @@ -81,6 +83,11 @@ This resource supports the following arguments: * `encryption_type` - (Optional) The encryption type to use for any created repositories. Valid values are `AES256` or `KMS`. Defaults to `AES256`. * `kms_key` - (Optional) The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR. +### image_tag_mutability_exclusion_filter + +* `filter` - (Required) The filter pattern to use for excluding image tags from the mutability setting. Must contain only letters, numbers, and special characters (._*-). Each filter can be up to 128 characters long and can contain a maximum of 2 wildcards (*). +* `filter_type` - (Required) The type of filter to use. Must be `WILDCARD`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -112,4 +119,4 @@ Using `terraform import`, import the ECR Repository Creating Templates using the % terraform import aws_ecr_repository_creation_template.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecr_repository_policy.html.markdown b/website/docs/cdktf/python/r/ecr_repository_policy.html.markdown index ebd15bc02ac0..7d3f3371668e 100644 --- a/website/docs/cdktf/python/r/ecr_repository_policy.html.markdown +++ b/website/docs/cdktf/python/r/ecr_repository_policy.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository` - (Required) Name of the repository to apply the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) @@ -73,6 +74,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_repository_policy.example + identity = { + repository = "example" + } +} + +resource "aws_ecr_repository_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `repository` - (String) Name of the ECR repository. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Repository Policy using the repository name. For example: ```python @@ -96,4 +123,4 @@ Using `terraform import`, import ECR Repository Policy using the repository name % terraform import aws_ecr_repository_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecrpublic_repository.html.markdown b/website/docs/cdktf/python/r/ecrpublic_repository.html.markdown index 2aa5552eac0b..7b4d1ade3eef 100644 --- a/website/docs/cdktf/python/r/ecrpublic_repository.html.markdown +++ b/website/docs/cdktf/python/r/ecrpublic_repository.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository_name` - (Required) Name of the repository. * `catalog_data` - (Optional) Catalog data configuration for the repository. See [below for schema](#catalog_data). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -108,4 +109,4 @@ Using `terraform import`, import ECR Public Repositories using the `repository_n % terraform import aws_ecrpublic_repository.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecrpublic_repository_policy.html.markdown b/website/docs/cdktf/python/r/ecrpublic_repository_policy.html.markdown index 03ef58999381..f8353d20317a 100644 --- a/website/docs/cdktf/python/r/ecrpublic_repository_policy.html.markdown +++ b/website/docs/cdktf/python/r/ecrpublic_repository_policy.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository_name` - (Required) Name of the repository to apply the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) @@ -97,4 +98,4 @@ Using `terraform import`, import ECR Public Repository Policy using the reposito % terraform import aws_ecrpublic_repository_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_account_setting_default.html.markdown b/website/docs/cdktf/python/r/ecs_account_setting_default.html.markdown index d5e514d82d54..78eb921e0775 100644 --- a/website/docs/cdktf/python/r/ecs_account_setting_default.html.markdown +++ b/website/docs/cdktf/python/r/ecs_account_setting_default.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the account setting to set. * `value` - (Required) State of the setting. @@ -69,7 +70,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - ARN that identifies the account setting. * `prinicpal_arn` - ARN that identifies the account setting. ## Import @@ -97,4 +97,4 @@ Using `terraform import`, import ECS Account Setting defaults using the `name`. % terraform import aws_ecs_account_setting_default.example taskLongArnFormat ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_capacity_provider.html.markdown b/website/docs/cdktf/python/r/ecs_capacity_provider.html.markdown index 8aae409d1ce3..551bbc2bdf95 100644 --- a/website/docs/cdktf/python/r/ecs_capacity_provider.html.markdown +++ b/website/docs/cdktf/python/r/ecs_capacity_provider.html.markdown @@ -29,7 +29,7 @@ from imports.aws.ecs_capacity_provider import EcsCapacityProvider class MyConvertedCode(TerraformStack): def __init__(self, scope, name, *, maxSize, minSize): super().__init__(scope, name) - test = AutoscalingGroup(self, "test", + example = AutoscalingGroup(self, "example", tag=[AutoscalingGroupTag( key="AmazonECSManaged", propagate_at_launch=True, @@ -39,9 +39,9 @@ class MyConvertedCode(TerraformStack): max_size=max_size, min_size=min_size ) - aws_ecs_capacity_provider_test = EcsCapacityProvider(self, "test_1", + aws_ecs_capacity_provider_example = EcsCapacityProvider(self, "example_1", auto_scaling_group_provider=EcsCapacityProviderAutoScalingGroupProvider( - auto_scaling_group_arn=test.arn, + auto_scaling_group_arn=example.arn, managed_scaling=EcsCapacityProviderAutoScalingGroupProviderManagedScaling( maximum_scaling_step_size=1000, minimum_scaling_step_size=1, @@ -50,16 +50,17 @@ class MyConvertedCode(TerraformStack): ), managed_termination_protection="ENABLED" ), - name="test" + name="example" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. - aws_ecs_capacity_provider_test.override_logical_id("test") + aws_ecs_capacity_provider_example.override_logical_id("example") ``` ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_scaling_group_provider` - (Required) Configuration block for the provider for the ECS auto scaling group. Detailed below. * `name` - (Required) Name of the capacity provider. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,12 +87,32 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN that identifies the capacity provider. -* `id` - ARN that identifies the capacity provider. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS Capacity Providers using the `name`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecs_capacity_provider.example + identity = { + "arn" = "arn:aws:ecs:us-west-2:123456789012:capacity-provider/example" + } +} + +resource "aws_ecs_capacity_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ECS capacity provider. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS Capacity Providers using the `arn`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -105,13 +126,13 @@ from imports.aws.ecs_capacity_provider import EcsCapacityProvider class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - EcsCapacityProvider.generate_config_for_import(self, "example", "example") + EcsCapacityProvider.generate_config_for_import(self, "example", "arn:aws:ecs:us-west-2:123456789012:capacity-provider/example") ``` -Using `terraform import`, import ECS Capacity Providers using the `name`. For example: +Using `terraform import`, import ECS Capacity Providers using the `arn`. For example: ```console -% terraform import aws_ecs_capacity_provider.example example +% terraform import aws_ecs_capacity_provider.example arn:aws:ecs:us-west-2:123456789012:capacity-provider/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_cluster.html.markdown b/website/docs/cdktf/python/r/ecs_cluster.html.markdown index b10c3cb1be3c..05f924c8a92e 100644 --- a/website/docs/cdktf/python/r/ecs_cluster.html.markdown +++ b/website/docs/cdktf/python/r/ecs_cluster.html.markdown @@ -170,6 +170,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Optional) Execute command configuration for the cluster. See [`configuration` Block](#configuration-block) for details. * `service_connect_defaults` - (Optional) Default Service Connect namespace. See [`service_connect_defaults` Block](#service_connect_defaults-block) for details. * `setting` - (Optional) Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. See [`setting` Block](#setting-block) for details. @@ -225,7 +226,6 @@ The `setting` configuration block supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN that identifies the cluster. -* `id` - ARN that identifies the cluster. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -253,4 +253,4 @@ Using `terraform import`, import ECS clusters using the cluster name. For exampl % terraform import aws_ecs_cluster.stateless stateless-app ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_cluster_capacity_providers.html.markdown b/website/docs/cdktf/python/r/ecs_cluster_capacity_providers.html.markdown index 4a3fcbbc918c..b328449b35e4 100644 --- a/website/docs/cdktf/python/r/ecs_cluster_capacity_providers.html.markdown +++ b/website/docs/cdktf/python/r/ecs_cluster_capacity_providers.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_providers` - (Optional) Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`. * `cluster_name` - (Required, Forces new resource) Name of the ECS cluster to manage capacity providers for. * `default_capacity_provider_strategy` - (Optional) Set of capacity provider strategies to use by default for the cluster. Detailed below. @@ -92,4 +93,4 @@ Using `terraform import`, import ECS cluster capacity providers using the `clust % terraform import aws_ecs_cluster_capacity_providers.example my-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_service.html.markdown b/website/docs/cdktf/python/r/ecs_service.html.markdown index 211e2b07fb17..7456223c7c8f 100644 --- a/website/docs/cdktf/python/r/ecs_service.html.markdown +++ b/website/docs/cdktf/python/r/ecs_service.html.markdown @@ -152,6 +152,31 @@ class MyConvertedCode(TerraformStack): ) ``` +### Blue/Green Deployment with SIGINT Rollback + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.ecs_service import EcsService +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + EcsService(self, "example", + cluster=Token.as_string(aws_ecs_cluster_example.id), + deployment_configuration=EcsServiceDeploymentConfiguration( + strategy="BLUE_GREEN" + ), + name="example", + sigint_rollback=True, + wait_for_steady_state=True + ) +``` + ### Redeploy Service On Every Apply The key used with `triggers` is arbitrary. @@ -185,11 +210,13 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alarms` - (Optional) Information about the CloudWatch alarms. [See below](#alarms). -* `availability_zone_rebalancing` - (Optional) ECS automatically redistributes tasks within a service across Availability Zones (AZs) to mitigate the risk of impaired application availability due to underlying infrastructure failures and task lifecycle activities. The valid values are `ENABLED` and `DISABLED`. Defaults to `DISABLED`. -* `capacity_provider_strategy` - (Optional) Capacity provider strategies to use for the service. Can be one or more. These can be updated without destroying and recreating the service only if `force_new_deployment = true` and not changing from 0 `capacity_provider_strategy` blocks to greater than 0, or vice versa. [See below](#capacity_provider_strategy). Conflicts with `launch_type`. +* `availability_zone_rebalancing` - (Optional) ECS automatically redistributes tasks within a service across Availability Zones (AZs) to mitigate the risk of impaired application availability due to underlying infrastructure failures and task lifecycle activities. The valid values are `ENABLED` and `DISABLED`. When creating a new service, if no value is specified, it defaults to `ENABLED` if the service is compatible with AvailabilityZoneRebalancing. When updating an existing service, if no value is specified it defaults to the existing service's AvailabilityZoneRebalancing value. If the service never had an AvailabilityZoneRebalancing value set, Amazon ECS treats this as `DISABLED`. +* `capacity_provider_strategy` - (Optional) Capacity provider strategies to use for the service. Can be one or more. Updating this argument requires `force_new_deployment = true`. [See below](#capacity_provider_strategy). Conflicts with `launch_type`. * `cluster` - (Optional) ARN of an ECS cluster. * `deployment_circuit_breaker` - (Optional) Configuration block for deployment circuit breaker. [See below](#deployment_circuit_breaker). +* `deployment_configuration` - (Optional) Configuration block for deployment settings. [See below](#deployment_configuration). * `deployment_controller` - (Optional) Configuration block for deployment controller configuration. [See below](#deployment_controller). * `deployment_maximum_percent` - (Optional) Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. * `deployment_minimum_healthy_percent` - (Optional) Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. @@ -210,6 +237,7 @@ The following arguments are optional: * `scheduling_strategy` - (Optional) Scheduling strategy to use for the service. The valid values are `REPLICA` and `DAEMON`. Defaults to `REPLICA`. Note that [*Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy*](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html). * `service_connect_configuration` - (Optional) ECS Service Connect configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace. [See below](#service_connect_configuration). * `service_registries` - (Optional) Service discovery registries for the service. The maximum number of `service_registries` blocks is `1`. [See below](#service_registries). +* `sigint_rollback` - (Optional) Whether to enable graceful termination of deployments using SIGINT signals. When enabled, allows customers to safely cancel an in-progress deployment and automatically trigger a rollback to the previous stable state. Defaults to `false`. Only applicable when using `ECS` deployment controller and requires `wait_for_steady_state = true`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `task_definition` - (Optional) Family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. Required unless using the `EXTERNAL` deployment controller. If a revision is not specified, the latest `ACTIVE` revision is used. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger an in-place update (redeployment). Useful with `plantimestamp()`. See example above. @@ -264,6 +292,23 @@ The `capacity_provider_strategy` configuration block supports the following: * `capacity_provider` - (Required) Short name of the capacity provider. * `weight` - (Required) Relative percentage of the total number of launched tasks that should use the specified capacity provider. +### deployment_configuration + +The `deployment_configuration` configuration block supports the following: + +* `strategy` - (Optional) Type of deployment strategy. Valid values: `ROLLING`, `BLUE_GREEN`. Default: `ROLLING`. +* `bake_time_in_minutes` - (Optional) Number of minutes to wait after a new deployment is fully provisioned before terminating the old deployment. Only used when `strategy` is set to `BLUE_GREEN`. +* `lifecycle_hook` - (Optional) Configuration block for lifecycle hooks that are invoked during deployments. [See below](#lifecycle_hook). + +### lifecycle_hook + +The `lifecycle_hook` configuration block supports the following: + +* `hook_target_arn` - (Required) ARN of the Lambda function to invoke for the lifecycle hook. +* `role_arn` - (Required) ARN of the IAM role that grants the service permission to invoke the Lambda function. +* `lifecycle_stages` - (Required) Stages during the deployment when the hook should be invoked. Valid values: `RECONCILE_SERVICE`, `PRE_SCALE_UP`, `POST_SCALE_UP`, `TEST_TRAFFIC_SHIFT`, `POST_TEST_TRAFFIC_SHIFT`, `PRODUCTION_TRAFFIC_SHIFT`, `POST_PRODUCTION_TRAFFIC_SHIFT`. +* `hook_details` - (Optional) Custom parameters that Amazon ECS will pass to the hook target invocations (such as a Lambda function). + ### deployment_circuit_breaker The `deployment_circuit_breaker` configuration block supports the following: @@ -285,9 +330,19 @@ The `deployment_controller` configuration block supports the following: * `target_group_arn` - (Required for ALB/NLB) ARN of the Load Balancer target group to associate with the service. * `container_name` - (Required) Name of the container to associate with the load balancer (as it appears in a container definition). * `container_port` - (Required) Port on the container to associate with the load balancer. +* `advanced_configuration` - (Optional) Configuration block for Blue/Green deployment settings. Required when using `BLUE_GREEN` deployment strategy. [See below](#advanced_configuration). -> **Version note:** Multiple `load_balancer` configuration block support was added in Terraform AWS Provider version 2.22.0. This allows configuration of [ECS service support for multiple target groups](https://aws.amazon.com/about-aws/whats-new/2019/07/amazon-ecs-services-now-support-multiple-load-balancer-target-groups/). +### advanced_configuration + +The `advanced_configuration` configuration block supports the following: + +* `alternate_target_group_arn` - (Required) ARN of the alternate target group to use for Blue/Green deployments. +* `production_listener_rule` - (Required) ARN of the listener rule that routes production traffic. +* `role_arn` - (Required) ARN of the IAM role that allows ECS to manage the target groups. +* `test_listener_rule` - (Optional) ARN of the listener rule that routes test traffic. + ### network_configuration `network_configuration` support the following: @@ -354,7 +409,7 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC `service` supports the following: -* `client_alias` - (Optional) List of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1. [See below](#client_alias). +* `client_alias` - (Optional) List of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. For each service block where enabled is true, exactly one `client_alias` with one `port` should be specified. [See below](#client_alias). * `discovery_name` - (Optional) Name of the new AWS Cloud Map service that Amazon ECS creates for this Amazon ECS service. * `ingress_port_override` - (Optional) Port number for the Service Connect proxy to listen on. * `port_name` - (Required) Name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service. @@ -388,6 +443,26 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC * `dns_name` - (Optional) Name that you use in the applications of client tasks to connect to this service. * `port` - (Required) Listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace. +* `test_traffic_rules` - (Optional) Configuration block for test traffic routing rules. [See below](#test_traffic_rules). + +### test_traffic_rules + +The `test_traffic_rules` configuration block supports the following: + +* `header` - (Optional) Configuration block for header-based routing rules. [See below](#header). + +### header + +The `header` configuration block supports the following: + +* `name` - (Required) Name of the HTTP header to match. +* `value` - (Required) Configuration block for header value matching criteria. [See below](#value). + +### value + +The `value` configuration block supports the following: + +* `exact` - (Required) Exact string value to match in the header. ### tag_specifications @@ -401,7 +476,7 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC This resource exports the following attributes in addition to the arguments above: -* `id` - ARN that identifies the service. +* `arn` - ARN that identifies the service. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -437,4 +512,4 @@ Using `terraform import`, import ECS services using the `name` together with ecs % terraform import aws_ecs_service.imported cluster-name/service-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_tag.html.markdown b/website/docs/cdktf/python/r/ecs_tag.html.markdown index 660a77242f94..6afe82cc9f29 100644 --- a/website/docs/cdktf/python/r/ecs_tag.html.markdown +++ b/website/docs/cdktf/python/r/ecs_tag.html.markdown @@ -32,7 +32,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) example = BatchComputeEnvironment(self, "example", - compute_environment_name="example", + name="example", service_role=Token.as_string(aws_iam_role_example.arn), type="UNMANAGED" ) @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) Amazon Resource Name (ARN) of the ECS resource to tag. * `key` - (Required) Tag name. * `value` - (Required) Tag value. @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_ecs_tag` using the ECS resource identifier % terraform import aws_ecs_tag.example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_task_definition.html.markdown b/website/docs/cdktf/python/r/ecs_task_definition.html.markdown index 3c3b8dfbd148..7dd077fcb83d 100644 --- a/website/docs/cdktf/python/r/ecs_task_definition.html.markdown +++ b/website/docs/cdktf/python/r/ecs_task_definition.html.markdown @@ -198,7 +198,7 @@ resource "aws_secretsmanager_secret_version" "test" { } ``` -### Example Using `container_definitions` and `inference_accelerator` +### Example Using `container_definitions` ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -213,13 +213,8 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) EcsTaskDefinition(self, "test", - container_definitions="[\n {\n \"cpu\": 10,\n \"command\": [\"sleep\", \"10\"],\n \"entryPoint\": [\"/\"],\n \"environment\": [\n {\"name\": \"VARNAME\", \"value\": \"VARVAL\"}\n ],\n \"essential\": true,\n \"image\": \"jenkins\",\n \"memory\": 128,\n \"name\": \"jenkins\",\n \"portMappings\": [\n {\n \"containerPort\": 80,\n \"hostPort\": 8080\n }\n ],\n \"resourceRequirements\":[\n {\n \"type\":\"InferenceAccelerator\",\n \"value\":\"device_1\"\n }\n ]\n }\n]\n\n", - family="test", - inference_accelerator=[EcsTaskDefinitionInferenceAccelerator( - device_name="device_1", - device_type="eia1.medium" - ) - ] + container_definitions="[\n {\n \"cpu\": 10,\n \"command\": [\"sleep\", \"10\"],\n \"entryPoint\": [\"/\"],\n \"environment\": [\n {\"name\": \"VARNAME\", \"value\": \"VARVAL\"}\n ],\n \"essential\": true,\n \"image\": \"jenkins\",\n \"memory\": 128,\n \"name\": \"jenkins\",\n \"portMappings\": [\n {\n \"containerPort\": 80,\n \"hostPort\": 8080\n }\n ]\n }\n]\n\n", + family="test" ) ``` @@ -260,12 +255,10 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cpu` - (Optional) Number of cpu units used by the task. If the `requires_compatibilities` is `FARGATE` this field is required. * `enable_fault_injection` - (Optional) Enables fault injection and allows for fault injection requests to be accepted from the task's containers. Default is `false`. - - **Note:** Fault injection only works with tasks using the `awsvpc` or `host` network modes. Fault injection isn't available on Windows. * `execution_role_arn` - (Optional) ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. -* `inference_accelerator` - (Optional) Configuration block(s) with Inference Accelerators settings. [Detailed below.](#inference_accelerator) * `ipc_mode` - (Optional) IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`. * `memory` - (Optional) Amount (in MiB) of memory used by the task. If the `requires_compatibilities` is `FARGATE` this field is required. * `network_mode` - (Optional) Docker networking mode to use for the containers in the task. Valid values are `none`, `bridge`, `awsvpc`, and `host`. @@ -283,6 +276,8 @@ The following arguments are optional: ~> **NOTE:** Proper escaping is required for JSON field values containing quotes (`"`) such as `environment` values. If directly setting the JSON, they should be escaped as `\"` in the JSON, e.g., `"value": "I \"love\" escaped quotes"`. If using a Terraform variable value, they should be escaped as `\\\"` in the variable, e.g., `value = "I \\\"love\\\" escaped quotes"` in the variable and `"value": "${var.myvariable}"` in the JSON. +~> **Note:** Fault injection only works with tasks using the `awsvpc` or `host` network modes. Fault injection isn't available on Windows. + ### volume * `docker_volume_configuration` - (Optional) Configuration block to configure a [docker volume](#docker_volume_configuration). Detailed below. @@ -351,11 +346,6 @@ For more information, see [Specifying an FSX Windows File Server volume in your * `size_in_gib` - (Required) The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB. -### inference_accelerator - -* `device_name` - (Required) Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. -* `device_type` - (Required) Elastic Inference accelerator type to use. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -390,4 +380,4 @@ Using `terraform import`, import ECS Task Definitions using their ARNs. For exam % terraform import aws_ecs_task_definition.example arn:aws:ecs:us-east-1:012345678910:task-definition/mytaskfamily:123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ecs_task_set.html.markdown b/website/docs/cdktf/python/r/ecs_task_set.html.markdown index 7ecd5c246cd9..8123407572cd 100644 --- a/website/docs/cdktf/python/r/ecs_task_set.html.markdown +++ b/website/docs/cdktf/python/r/ecs_task_set.html.markdown @@ -81,6 +81,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_provider_strategy` - (Optional) The capacity provider strategy to use for the service. Can be one or more. [Defined below](#capacity_provider_strategy). * `external_id` - (Optional) The external ID associated with the task set. * `force_delete` - (Optional) Whether to allow deleting the task set without waiting for scaling down to 0. You can force a task set to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the tasks before deleting the task set. This bypasses that behavior and potentially leaves resources dangling. @@ -175,4 +176,4 @@ Using `terraform import`, import ECS Task Sets using the `task_set_id`, `service % terraform import aws_ecs_task_set.example ecs-svc/7177320696926227436,arn:aws:ecs:us-west-2:123456789101:service/example/example-1234567890,arn:aws:ecs:us-west-2:123456789101:cluster/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/efs_access_point.html.markdown b/website/docs/cdktf/python/r/efs_access_point.html.markdown index 43e5fa3ea9e9..e5803faec35c 100644 --- a/website/docs/cdktf/python/r/efs_access_point.html.markdown +++ b/website/docs/cdktf/python/r/efs_access_point.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `file_system_id` - (Required) ID of the file system for which the access point is intended. * `posix_user` - (Optional) Operating system user and group applied to all file system requests made using the access point. [Detailed](#posix_user) below. * `root_directory`- (Optional) Directory on the Amazon EFS file system that the access point provides access to. [Detailed](#root_directory) below. @@ -95,4 +96,4 @@ Using `terraform import`, import the EFS access points using the `id`. For examp % terraform import aws_efs_access_point.test fsap-52a643fb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/efs_backup_policy.html.markdown b/website/docs/cdktf/python/r/efs_backup_policy.html.markdown index 24e8c0a639f6..8ce041290a19 100644 --- a/website/docs/cdktf/python/r/efs_backup_policy.html.markdown +++ b/website/docs/cdktf/python/r/efs_backup_policy.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `file_system_id` - (Required) The ID of the EFS file system. * `backup_policy` - (Required) A backup_policy object (documented below). @@ -83,4 +84,4 @@ Using `terraform import`, import the EFS backup policies using the `id`. For exa % terraform import aws_efs_backup_policy.example fs-6fa144c6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/efs_file_system.html.markdown b/website/docs/cdktf/python/r/efs_file_system.html.markdown index d6326b42e817..f283d94285a3 100644 --- a/website/docs/cdktf/python/r/efs_file_system.html.markdown +++ b/website/docs/cdktf/python/r/efs_file_system.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone_name` - (Optional) the AWS Availability Zone in which to create the file system. Used to create a file system that uses One Zone storage classes. See [user guide](https://docs.aws.amazon.com/efs/latest/ug/availability-durability.html) for more information. * `creation_token` - (Optional) A unique name (a maximum of 64 characters are allowed) used as reference when creating the Elastic File System to ensure idempotent file @@ -140,4 +141,4 @@ Using `terraform import`, import the EFS file systems using the `id`. For exampl % terraform import aws_efs_file_system.foo fs-6fa144c6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/efs_file_system_policy.html.markdown b/website/docs/cdktf/python/r/efs_file_system_policy.html.markdown index 6362b31ab69e..f890bc90b0d6 100644 --- a/website/docs/cdktf/python/r/efs_file_system_policy.html.markdown +++ b/website/docs/cdktf/python/r/efs_file_system_policy.html.markdown @@ -69,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypass_policy_lockout_safety_check` - (Optional) A flag to indicate whether to bypass the `aws_efs_file_system_policy` lockout safety check. The policy lockout safety check determines whether the policy in the request will prevent the principal making the request will be locked out from making future `PutFileSystemPolicy` requests on the file system. Set `bypass_policy_lockout_safety_check` to `true` only when you intend to prevent the principal that is making the request from making a subsequent `PutFileSystemPolicy` request on the file system. The default value is `false`. ## Attribute Reference @@ -102,4 +103,4 @@ Using `terraform import`, import the EFS file system policies using the `id`. Fo % terraform import aws_efs_file_system_policy.foo fs-6fa144c6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/efs_mount_target.html.markdown b/website/docs/cdktf/python/r/efs_mount_target.html.markdown index 68a7c2b3a350..a67b2328ae3f 100644 --- a/website/docs/cdktf/python/r/efs_mount_target.html.markdown +++ b/website/docs/cdktf/python/r/efs_mount_target.html.markdown @@ -48,10 +48,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `file_system_id` - (Required) The ID of the file system for which the mount target is intended. * `subnet_id` - (Required) The ID of the subnet to add the mount target in. * `ip_address` - (Optional) The address (within the address range of the specified subnet) at which the file system may be mounted via the mount target. +* `ip_address_type` - (Optional) IP address type for the mount target. Valid values are `IPV4_ONLY` (only IPv4 addresses), `IPV6_ONLY` (only IPv6 addresses), and `DUAL_STACK` (dual-stack, both IPv4 and IPv6 addresses). Defaults to `IPV4_ONLY`. +* `ipv6_address` - (Optional) IPv6 address to use. Valid only when `ip_address_type` is set to `IPV6_ONLY` or `DUAL_STACK`. * `security_groups` - (Optional) A list of up to 5 VPC security group IDs (that must be for the same VPC as subnet specified) in effect for the mount target. @@ -104,4 +107,4 @@ Using `terraform import`, import the EFS mount targets using the `id`. For examp % terraform import aws_efs_mount_target.alpha fsmt-52a643fb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/efs_replication_configuration.html.markdown b/website/docs/cdktf/python/r/efs_replication_configuration.html.markdown index f37dc65c81ce..9dc8b9368d4c 100644 --- a/website/docs/cdktf/python/r/efs_replication_configuration.html.markdown +++ b/website/docs/cdktf/python/r/efs_replication_configuration.html.markdown @@ -102,6 +102,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination` - (Required) A destination configuration block (documented below). * `source_file_system_id` - (Required) The ID of the file system that is to be replicated. @@ -157,4 +158,4 @@ Using `terraform import`, import EFS Replication Configurations using the file s % terraform import aws_efs_replication_configuration.example fs-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/egress_only_internet_gateway.html.markdown b/website/docs/cdktf/python/r/egress_only_internet_gateway.html.markdown index 528d8eed14c7..dbabde4fd8eb 100644 --- a/website/docs/cdktf/python/r/egress_only_internet_gateway.html.markdown +++ b/website/docs/cdktf/python/r/egress_only_internet_gateway.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The VPC ID to create in. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -83,4 +84,4 @@ Using `terraform import`, import Egress-only Internet gateways using the `id`. F % terraform import aws_egress_only_internet_gateway.example eigw-015e0e244e24dfe8a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eip.html.markdown b/website/docs/cdktf/python/r/eip.html.markdown index 9c346ded412a..fd65fd4d0807 100644 --- a/website/docs/cdktf/python/r/eip.html.markdown +++ b/website/docs/cdktf/python/r/eip.html.markdown @@ -158,6 +158,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address` - (Optional) IP address from an EC2 BYOIP pool. This option is only available for VPC EIPs. * `associate_with_private_ip` - (Optional) User-specified primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address. * `customer_owned_ipv4_pool` - (Optional) ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing). @@ -169,13 +170,12 @@ This resource supports the following arguments: * `public_ipv4_pool` - (Optional) EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs. * `tags` - (Optional) Map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `vpc` - (Optional **Deprecated**) Boolean if the EIP is in a VPC or not. Use `domain` instead. - Defaults to `true` unless the region supports EC2-Classic. -~> **NOTE:** You can specify either the `instance` ID or the `network_interface` ID, but not both. Including both will **not** return an error from the AWS API, but will have undefined behavior. See the relevant [AssociateAddress API Call][1] for more information. +~> **NOTE:** You can specify either the `instance` ID or the `network_interface` ID, but not both. +Including both will **not** return an error from the AWS API, but will have undefined behavior. +See the relevant [AssociateAddress API Call][1] for more information. -~> **NOTE:** Specifying both `public_ipv4_pool` and `address` won't cause an error but `address` will be used in the -case both options are defined as the api only requires one or the other. +~> **NOTE:** Specifying both `public_ipv4_pool` and `address` won't cause an error, however, only `address` will be used if both options are defined as the API only requires one of the two. ## Attribute Reference @@ -230,4 +230,4 @@ Using `terraform import`, import EIPs in a VPC using their Allocation ID. For ex [1]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eip_association.html.markdown b/website/docs/cdktf/python/r/eip_association.html.markdown index 0c0d7065d79d..be2da022b985 100644 --- a/website/docs/cdktf/python/r/eip_association.html.markdown +++ b/website/docs/cdktf/python/r/eip_association.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocation_id` - (Optional, Forces new resource) ID of the associated Elastic IP. This argument is required despite being optional at the resource level due to legacy support for EC2-Classic networking. * `allow_reassociation` - (Optional, Forces new resource) Whether to allow an Elastic IP address to be re-associated. @@ -98,4 +99,4 @@ Using `terraform import`, import EIP Assocations using their association IDs. Fo % terraform import aws_eip_association.test eipassoc-ab12c345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eip_domain_name.html.markdown b/website/docs/cdktf/python/r/eip_domain_name.html.markdown index 1ee3bd505295..77deef86f457 100644 --- a/website/docs/cdktf/python/r/eip_domain_name.html.markdown +++ b/website/docs/cdktf/python/r/eip_domain_name.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocation_id` - (Required) The allocation ID. * `domain_name` - (Required) The domain name to modify for the IP address. @@ -68,4 +69,4 @@ This resource exports the following attributes in addition to the arguments abov - `update` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_access_entry.html.markdown b/website/docs/cdktf/python/r/eks_access_entry.html.markdown index e7829d076372..de7c21766e3b 100644 --- a/website/docs/cdktf/python/r/eks_access_entry.html.markdown +++ b/website/docs/cdktf/python/r/eks_access_entry.html.markdown @@ -38,12 +38,13 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `cluster_name` – (Required) Name of the EKS Cluster. -* `principal_arn` – (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. +* `cluster_name` - (Required) Name of the EKS Cluster. +* `principal_arn` - (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. The following arguments are optional: -* `kubernetes_groups` – (Optional) List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `kubernetes_groups` - (Optional) List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `type` - (Optional) Defaults to STANDARD which provides the standard workflow. EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX types disallow users to input a username or groups, and prevent associations. * `user_name` - (Optional) Defaults to principal ARN if user is principal else defaults to assume-role/session-name is role is used. @@ -90,4 +91,4 @@ Using `terraform import`, import EKS access entry using the `cluster_name` and ` % terraform import aws_eks_access_entry.my_eks_access_entry my_cluster_name:my_principal_arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_access_policy_association.html.markdown b/website/docs/cdktf/python/r/eks_access_policy_association.html.markdown index 60ff40395477..d2080e934a63 100644 --- a/website/docs/cdktf/python/r/eks_access_policy_association.html.markdown +++ b/website/docs/cdktf/python/r/eks_access_policy_association.html.markdown @@ -39,12 +39,13 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `cluster_name` – (Required) Name of the EKS Cluster. -* `policy_arn` – (Required) The ARN of the access policy that you're associating. -* `principal_arn` – (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. -* `access_scope` – (Required) The configuration block to determine the scope of the access. See [`access_scope` Block](#access_scope-block) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `cluster_name` - (Required) Name of the EKS Cluster. +* `policy_arn` - (Required) The ARN of the access policy that you're associating. +* `principal_arn` - (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. +* `access_scope` - (Required) The configuration block to determine the scope of the access. See [`access_scope` Block](#access_scope-block) below. ### `access_scope` Block @@ -99,4 +100,4 @@ Using `terraform import`, import EKS access entry using the `cluster_name` `prin % terraform import aws_eks_access_policy_association.my_eks_access_entry my_cluster_name#my_principal_arn#my_policy_arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_addon.html.markdown b/website/docs/cdktf/python/r/eks_addon.html.markdown index e8907003162f..76d822baebb8 100644 --- a/website/docs/cdktf/python/r/eks_addon.html.markdown +++ b/website/docs/cdktf/python/r/eks_addon.html.markdown @@ -62,13 +62,14 @@ Custom add-on configuration can be passed using `configuration_values` as a sing ~> **Note:** `configuration_values` is a single JSON string should match the valid JSON schema for each add-on with specific version. -To find the correct JSON schema for each add-on can be extracted using [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html) call. -This below is an example for extracting the `configuration_values` schema for `coredns`. +You can use [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html) to extract each add-on's JSON schema. +Here's an example command to extract the `configuration_values` schema for `coredns`. ```bash - aws eks describe-addon-configuration \ - --addon-name coredns \ - --addon-version v1.10.1-eksbuild.1 +aws eks describe-addon-configuration \ + --addon-name coredns \ + --addon-version v1.10.1-eksbuild.1 \ + | jq -r .configurationSchema | jq . ``` Example to create a `coredns` managed addon with custom `configuration_values`. @@ -189,20 +190,21 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `addon_name` – (Required) Name of the EKS add-on. The name must match one of +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addon_name` - (Required) Name of the EKS add-on. The name must match one of the names returned by [describe-addon-versions](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-versions.html). -* `cluster_name` – (Required) Name of the EKS Cluster. +* `cluster_name` - (Required) Name of the EKS Cluster. The following arguments are optional: -* `addon_version` – (Optional) The version of the EKS add-on. The version must +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addon_version` - (Optional) The version of the EKS add-on. The version must match one of the versions returned by [describe-addon-versions](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-versions.html). * `configuration_values` - (Optional) custom configuration values for addons with single JSON string. This JSON string value must match the JSON schema derived from [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html). -* `resolve_conflicts_on_create` - (Optional) How to resolve field value conflicts when migrating a self-managed add-on to an Amazon EKS add-on. Valid values are `NONE` and `OVERWRITE`. For more details see the [CreateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateAddon.html) API Docs. -* `resolve_conflicts_on_update` - (Optional) How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Valid values are `NONE`, `OVERWRITE`, and `PRESERVE`. For more details see the [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) API Docs. -* `resolve_conflicts` - (**Deprecated** use the `resolve_conflicts_on_create` and `resolve_conflicts_on_update` attributes instead) Define how to resolve parameter value conflicts when migrating an existing add-on to an Amazon EKS add-on or when applying version updates to the add-on. Valid values are `NONE`, `OVERWRITE` and `PRESERVE`. Note that `PRESERVE` is only valid on addon update, not for initial addon creation. If you need to set this to `PRESERVE`, use the `resolve_conflicts_on_create` and `resolve_conflicts_on_update` attributes instead. For more details check [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) API Docs. +* `resolve_conflicts_on_create` - (Optional) How to resolve field value conflicts when migrating a self-managed add-on to an Amazon EKS add-on. Valid values are `NONE` and `OVERWRITE`. For more details see the [CreateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateAddon.html) API Documentation. +* `resolve_conflicts_on_update` - (Optional) How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Valid values are `NONE`, `OVERWRITE`, and `PRESERVE`. For more details see the [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) API Documentation. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `pod_identity_association` - (Optional) Configuration block with EKS Pod Identity association settings. See [`pod_identity_association`](#pod-identity-association) below for details. * `preserve` - (Optional) Indicates if you want to preserve the created resources when deleting the EKS add-on. @@ -267,4 +269,4 @@ Using `terraform import`, import EKS add-on using the `cluster_name` and `addon_ % terraform import aws_eks_addon.my_eks_addon my_cluster_name:my_addon_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_cluster.html.markdown b/website/docs/cdktf/python/r/eks_cluster.html.markdown index a409223810fd..1a2abe6bb275 100644 --- a/website/docs/cdktf/python/r/eks_cluster.html.markdown +++ b/website/docs/cdktf/python/r/eks_cluster.html.markdown @@ -301,7 +301,7 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `name` – (Required) Name of the cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]*$`). +* `name` - (Required) Name of the cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]*$`). * `role_arn` - (Required) ARN of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding [`depends_on`](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html) if using the [`aws_iam_role_policy` resource](/docs/providers/aws/r/iam_role_policy.html) or [`aws_iam_role_policy_attachment` resource](/docs/providers/aws/r/iam_role_policy_attachment.html), otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. * `vpc_config` - (Required) Configuration block for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the Amazon EKS User Guide. Detailed below. Also contains attributes detailed in the Attributes section. @@ -310,16 +310,18 @@ The following arguments are optional: * `access_config` - (Optional) Configuration block for the access config associated with your cluster, see [Amazon EKS Access Entries](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html). [Detailed](#access_config) below. * `bootstrap_self_managed_addons` - (Optional) Install default unmanaged add-ons, such as `aws-cni`, `kube-proxy`, and CoreDNS during cluster creation. If `false`, you must manually install desired add-ons. Changing this value will force a new cluster to be created. Defaults to `true`. * `compute_config` - (Optional) Configuration block with compute configuration for EKS Auto Mode. [Detailed](#compute_config) below. +* `deletion_protection` - (Optional) Whether to enable deletion protection for the cluster. When enabled, the cluster cannot be deleted unless deletion protection is first disabled. Default: `false`. * `enabled_cluster_log_types` - (Optional) List of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). * `encryption_config` - (Optional) Configuration block with encryption configuration for the cluster. [Detailed](#encryption_config) below. * `force_update_version` - (Optional) Force version update by overriding upgrade-blocking readiness checks when updating a cluster. * `kubernetes_network_config` - (Optional) Configuration block with kubernetes network configuration for the cluster. [Detailed](#kubernetes_network_config) below. If removed, Terraform will only perform drift detection if a configuration value is provided. * `outpost_config` - (Optional) Configuration block representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This block isn't available for creating Amazon EKS clusters on the AWS cloud. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `remote_network_config` - (Optional) Configuration block with remote network configuration for EKS Hybrid Nodes. [Detailed](#remote_network_config) below. * `storage_config` - (Optional) Configuration block with storage configuration for EKS Auto Mode. [Detailed](#storage_config) below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `upgrade_policy` - (Optional) Configuration block for the support policy to use for the cluster. See [upgrade_policy](#upgrade_policy) for details. -* `version` – (Optional) Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. +* `version` - (Optional) Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. * `zonal_shift_config` - (Optional) Configuration block with zonal shift configuration for the cluster. [Detailed](#zonal_shift_config) below. ### access_config @@ -327,7 +329,7 @@ The following arguments are optional: The `access_config` configuration block supports the following arguments: * `authentication_mode` - (Optional) The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` -* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `false`. +* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `true`. ### compute_config @@ -375,8 +377,8 @@ The `remote_pod_networks` configuration block supports the following arguments: * `endpoint_private_access` - (Optional) Whether the Amazon EKS private API server endpoint is enabled. Default is `false`. * `endpoint_public_access` - (Optional) Whether the Amazon EKS public API server endpoint is enabled. Default is `true`. * `public_access_cidrs` - (Optional) List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with `0.0.0.0/0`. Terraform will only perform drift detection of its value when present in a configuration. -* `security_group_ids` – (Optional) List of security group IDs for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. -* `subnet_ids` – (Required) List of subnet IDs. Must be in at least two different availability zones. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. +* `security_group_ids` - (Optional) List of security group IDs for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. +* `subnet_ids` - (Required) List of subnet IDs. Must be in at least two different availability zones. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. * `vpc_id` - (Computed) ID of the VPC associated with your cluster. ### kubernetes_network_config @@ -507,4 +509,4 @@ Using `terraform import`, import EKS Clusters using the `name`. For example: % terraform import aws_eks_cluster.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_fargate_profile.html.markdown b/website/docs/cdktf/python/r/eks_fargate_profile.html.markdown index de56e06e52e0..a68d572261a1 100644 --- a/website/docs/cdktf/python/r/eks_fargate_profile.html.markdown +++ b/website/docs/cdktf/python/r/eks_fargate_profile.html.markdown @@ -78,14 +78,15 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `cluster_name` – (Required) Name of the EKS Cluster. -* `fargate_profile_name` – (Required) Name of the EKS Fargate Profile. -* `pod_execution_role_arn` – (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. +* `cluster_name` - (Required) Name of the EKS Cluster. +* `fargate_profile_name` - (Required) Name of the EKS Fargate Profile. +* `pod_execution_role_arn` - (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. * `selector` - (Required) Configuration block(s) for selecting Kubernetes Pods to execute with this EKS Fargate Profile. Detailed below. -* `subnet_ids` – (Required) Identifiers of private EC2 Subnets to associate with the EKS Fargate Profile. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster). +* `subnet_ids` - (Required) Identifiers of private EC2 Subnets to associate with the EKS Fargate Profile. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### selector Configuration Block @@ -96,6 +97,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `labels` - (Optional) Key-value map of Kubernetes labels for selection. ## Attribute Reference @@ -139,4 +141,4 @@ Using `terraform import`, import EKS Fargate Profiles using the `cluster_name` a % terraform import aws_eks_fargate_profile.my_fargate_profile my_cluster:my_fargate_profile ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_identity_provider_config.html.markdown b/website/docs/cdktf/python/r/eks_identity_provider_config.html.markdown index 85cc1ea484a4..d39aa7acab17 100644 --- a/website/docs/cdktf/python/r/eks_identity_provider_config.html.markdown +++ b/website/docs/cdktf/python/r/eks_identity_provider_config.html.markdown @@ -40,16 +40,17 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `cluster_name` – (Required) Name of the EKS Cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `cluster_name` - (Required) Name of the EKS Cluster. * `oidc` - (Required) Nested attribute containing [OpenID Connect](https://openid.net/connect/) identity provider information for the cluster. Detailed below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### oidc Configuration Block -* `client_id` – (Required) Client ID for the OpenID Connect identity provider. +* `client_id` - (Required) Client ID for the OpenID Connect identity provider. * `groups_claim` - (Optional) The JWT claim that the provider will use to return groups. * `groups_prefix` - (Optional) A prefix that is prepended to group claims e.g., `oidc:`. -* `identity_provider_config_name` – (Required) The name of the identity provider config. +* `identity_provider_config_name` - (Required) The name of the identity provider config. * `issuer_url` - (Required) Issuer URL for the OpenID Connect identity provider. * `required_claims` - (Optional) The key value pairs that describe required claims in the identity token. * `username_claim` - (Optional) The JWT claim that the provider will use as the username. @@ -96,4 +97,4 @@ Using `terraform import`, import EKS Identity Provider Configurations using the % terraform import aws_eks_identity_provider_config.my_identity_provider_config my_cluster:my_identity_provider_config ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_node_group.html.markdown b/website/docs/cdktf/python/r/eks_node_group.html.markdown index 331ed134fccc..aba09c864eae 100644 --- a/website/docs/cdktf/python/r/eks_node_group.html.markdown +++ b/website/docs/cdktf/python/r/eks_node_group.html.markdown @@ -191,13 +191,14 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `cluster_name` – (Required) Name of the EKS Cluster. -* `node_role_arn` – (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. +* `cluster_name` - (Required) Name of the EKS Cluster. +* `node_role_arn` - (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. * `scaling_config` - (Required) Configuration block with scaling settings. See [`scaling_config`](#scaling_config-configuration-block) below for details. -* `subnet_ids` – (Required) Identifiers of EC2 Subnets to associate with the EKS Node Group. +* `subnet_ids` - (Required) Identifiers of EC2 Subnets to associate with the EKS Node Group. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ami_type` - (Optional) Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values. Terraform will only perform drift detection if a configuration value is provided. * `capacity_type` - (Optional) Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. Terraform will only perform drift detection if a configuration value is provided. * `disk_size` - (Optional) Disk size in GiB for worker nodes. Defaults to `50` for Windows, `20` all other node groups. Terraform will only perform drift detection if a configuration value is provided. @@ -205,15 +206,15 @@ The following arguments are optional: * `instance_types` - (Optional) List of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. Terraform will only perform drift detection if a configuration value is provided. * `labels` - (Optional) Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed. * `launch_template` - (Optional) Configuration block with Launch Template settings. See [`launch_template`](#launch_template-configuration-block) below for details. Conflicts with `remote_access`. -* `node_group_name` – (Optional) Name of the EKS Node Group. If omitted, Terraform will assign a random, unique name. Conflicts with `node_group_name_prefix`. The node group name can't be longer than 63 characters. It must start with a letter or digit, but can also include hyphens and underscores for the remaining characters. -* `node_group_name_prefix` – (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `node_group_name`. +* `node_group_name` - (Optional) Name of the EKS Node Group. If omitted, Terraform will assign a random, unique name. Conflicts with `node_group_name_prefix`. The node group name can't be longer than 63 characters. It must start with a letter or digit, but can also include hyphens and underscores for the remaining characters. +* `node_group_name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `node_group_name`. * `node_repair_config` - (Optional) The node auto repair configuration for the node group. See [`node_repair_config`](#node_repair_config-configuration-block) below for details. -* `release_version` – (Optional) AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. +* `release_version` - (Optional) AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. * `remote_access` - (Optional) Configuration block with remote access settings. See [`remote_access`](#remote_access-configuration-block) below for details. Conflicts with `launch_template`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `taint` - (Optional) The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group. See [taint](#taint-configuration-block) below for details. * `update_config` - (Optional) Configuration block with update settings. See [`update_config`](#update_config-configuration-block) below for details. -* `version` – (Optional) Kubernetes version. Defaults to EKS Cluster Kubernetes version. Terraform will only perform drift detection if a configuration value is provided. +* `version` - (Optional) Kubernetes version. Defaults to EKS Cluster Kubernetes version. Terraform will only perform drift detection if a configuration value is provided. ### launch_template Configuration Block @@ -297,4 +298,4 @@ Using `terraform import`, import EKS Node Groups using the `cluster_name` and `n % terraform import aws_eks_node_group.my_node_group my_cluster:my_node_group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_pod_identity_association.html.markdown b/website/docs/cdktf/python/r/eks_pod_identity_association.html.markdown index e2dd9fed63a3..887d9316c21d 100644 --- a/website/docs/cdktf/python/r/eks_pod_identity_association.html.markdown +++ b/website/docs/cdktf/python/r/eks_pod_identity_association.html.markdown @@ -80,7 +80,10 @@ The following arguments are required: The following arguments are optional: +* `disable_session_tags` - (Optional) Disable the tags that are automatically added to role session by Amazon EKS. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `target_role_arn` - (Optional) The Amazon Resource Name (ARN) of the IAM role to be chained to the the IAM role specified as `role_arn`. ## Attribute Reference @@ -88,6 +91,7 @@ This resource exports the following attributes in addition to the arguments abov * `association_arn` - The Amazon Resource Name (ARN) of the association. * `association_id` - The ID of the association. +* `external_id` - The unique identifier for this association for a target IAM role. You put this value in the trust policy of the target role, in a Condition to match the sts.ExternalId. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -115,4 +119,4 @@ Using `terraform import`, import EKS (Elastic Kubernetes) Pod Identity Associati % terraform import aws_eks_pod_identity_association.example example,a-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastic_beanstalk_application.html.markdown b/website/docs/cdktf/python/r/elastic_beanstalk_application.html.markdown index 0640b7b069c4..dcf46e5cefca 100644 --- a/website/docs/cdktf/python/r/elastic_beanstalk_application.html.markdown +++ b/website/docs/cdktf/python/r/elastic_beanstalk_application.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the application, must be unique within your account * `description` - (Optional) Short description of the application * `tags` - (Optional) Key-value map of tags for the Elastic Beanstalk Application. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -89,4 +90,4 @@ Using `terraform import`, import Elastic Beanstalk Applications using the `name` % terraform import aws_elastic_beanstalk_application.tf_test tf-test-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastic_beanstalk_application_version.html.markdown b/website/docs/cdktf/python/r/elastic_beanstalk_application_version.html.markdown index 696d1052c129..882bbf68d195 100644 --- a/website/docs/cdktf/python/r/elastic_beanstalk_application_version.html.markdown +++ b/website/docs/cdktf/python/r/elastic_beanstalk_application_version.html.markdown @@ -60,7 +60,7 @@ class MyConvertedCode(TerraformStack): application="tf-test-name", bucket=Token.as_string(aws_s3_bucket_default.id), description="application version created by terraform", - key=Token.as_string(aws_s3_object_default.id), + key=Token.as_string(aws_s3_object_default.key), name="tf-test-version-label" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. @@ -78,6 +78,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Short description of the Application Version. * `force_delete` - (Optional) On delete, force an Application Version to be deleted when it may be in use by multiple Elastic Beanstalk Environments. * `process` - (Optional) Pre-processes and validates the environment manifest (env.yaml ) and configuration files (*.config files in the .ebextensions folder) in the source bundle. Validating configuration files can identify issues prior to deploying the application version to an environment. You must turn processing on for application versions that you create using AWS CodeBuild or AWS CodeCommit. For application versions built from a source bundle in Amazon S3, processing is optional. It validates Elastic Beanstalk configuration files. It doesn’t validate your application’s configuration files, like proxy server or Docker configuration. @@ -90,4 +91,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN assigned by AWS for this Elastic Beanstalk Application. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastic_beanstalk_configuration_template.html.markdown b/website/docs/cdktf/python/r/elastic_beanstalk_configuration_template.html.markdown index b8cb70baaa7d..6005a0943ca4 100644 --- a/website/docs/cdktf/python/r/elastic_beanstalk_configuration_template.html.markdown +++ b/website/docs/cdktf/python/r/elastic_beanstalk_configuration_template.html.markdown @@ -44,14 +44,15 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique name for this Template. -* `application` – (Required) name of the application to associate with this configuration template +* `application` - (Required) name of the application to associate with this configuration template * `description` - (Optional) Short description of the Template -* `environment_id` – (Optional) The ID of the environment used with this configuration template -* `setting` – (Optional) Option settings to configure the new Environment. These +* `environment_id` - (Optional) The ID of the environment used with this configuration template +* `setting` - (Optional) Option settings to configure the new Environment. These override specific values that are set as defaults. The format is detailed below in [Option Settings](#option-settings) -* `solution_stack_name` – (Optional) A solution stack to base your Template +* `solution_stack_name` - (Optional) A solution stack to base your Template off of. Example stacks can be found in the [Amazon API documentation][1] ## Option Settings @@ -76,4 +77,4 @@ This resource exports the following attributes in addition to the arguments abov [1]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastic_beanstalk_environment.html.markdown b/website/docs/cdktf/python/r/elastic_beanstalk_environment.html.markdown index bcc516fc0114..73ccb55bfabf 100644 --- a/website/docs/cdktf/python/r/elastic_beanstalk_environment.html.markdown +++ b/website/docs/cdktf/python/r/elastic_beanstalk_environment.html.markdown @@ -47,29 +47,30 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique name for this Environment. This name is used in the application URL -* `application` – (Required) Name of the application that contains the version +* `application` - (Required) Name of the application that contains the version to be deployed * `cname_prefix` - (Optional) Prefix to use for the fully qualified DNS name of the Environment. * `description` - (Optional) Short description of the Environment * `tier` - (Optional) Elastic Beanstalk Environment tier. Valid values are `Worker` or `WebServer`. If tier is left blank `WebServer` will be used. -* `setting` – (Optional) Option settings to configure the new Environment. These +* `setting` - (Optional) Option settings to configure the new Environment. These override specific values that are set as defaults. The format is detailed below in [Option Settings](#option-settings) -* `solution_stack_name` – (Optional) A solution stack to base your environment +* `solution_stack_name` - (Optional) A solution stack to base your environment off of. Example stacks can be found in the [Amazon API documentation][1] -* `template_name` – (Optional) The name of the Elastic Beanstalk Configuration +* `template_name` - (Optional) The name of the Elastic Beanstalk Configuration template to use in deployment -* `platform_arn` – (Optional) The [ARN][2] of the Elastic Beanstalk [Platform][3] +* `platform_arn` - (Optional) The [ARN][2] of the Elastic Beanstalk [Platform][3] to use in deployment * `wait_for_ready_timeout` - (Default `20m`) The maximum [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should wait for an Elastic Beanstalk Environment to be in a ready state before timing out. -* `poll_interval` – The time between polling the AWS API to +* `poll_interval` - The time between polling the AWS API to check if changes have been applied. Use this to adjust the rate of API calls for any `create` or `update` action. Minimum `10s`, maximum `180s`. Omit this to use the default behavior, which is an exponential backoff @@ -134,9 +135,9 @@ This resource exports the following attributes in addition to the arguments abov * `description` - Description of the Elastic Beanstalk Environment. * `tier` - The environment tier specified. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `application` – The Elastic Beanstalk Application specified for this environment. -* `setting` – Settings specifically set for this Environment. -* `all_settings` – List of all option settings configured in this Environment. These +* `application` - The Elastic Beanstalk Application specified for this environment. +* `setting` - Settings specifically set for this Environment. +* `all_settings` - List of all option settings configured in this Environment. These are a combination of default settings and their overrides from `setting` in the configuration. * `cname` - Fully qualified DNS name for this Environment. @@ -177,4 +178,4 @@ Using `terraform import`, import Elastic Beanstalk Environments using the `id`. % terraform import aws_elastic_beanstalk_environment.prodenv e-rpqsewtp2j ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_cluster.html.markdown b/website/docs/cdktf/python/r/elasticache_cluster.html.markdown index bf11a585d0f4..8364dea84fe9 100644 --- a/website/docs/cdktf/python/r/elasticache_cluster.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_cluster.html.markdown @@ -201,26 +201,24 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `cluster_id` – (Required) Group identifier. ElastiCache converts this name to lowercase. Changing this value will re-create the resource. -* `engine` – (Optional, Required if `replication_group_id` is not specified) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` and `valkey`. -* `node_type` – (Required unless `replication_group_id` is provided) The instance class used. +* `cluster_id` - (Required) Group identifier. ElastiCache converts this name to lowercase. Changing this value will re-create the resource. +* `engine` - (Optional, Required if `replication_group_id` is not specified) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` and `valkey`. +* `node_type` - (Required unless `replication_group_id` is provided) The instance class used. See AWS documentation for information on [supported node types for Valkey or Redis OSS](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.CurrentGen) and [guidance on selecting node types for Valkey or Redis OSS](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SelectSize.html#CacheNodes.SelectSize.redis). See AWS documentation for information on [supported node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.CurrentGen-Memcached) and [guidance on selecting node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SelectSize.html#CacheNodes.SelectSize.Mem). For Memcached, changing this value will re-create the resource. -* `num_cache_nodes` – (Required unless `replication_group_id` is provided) The initial number of cache nodes that the cache cluster will have. For Redis, this value must be 1. For Memcached, this value must be between 1 and 40. If this number is reduced on subsequent runs, the highest numbered nodes will be removed. -* `parameter_group_name` – (Required unless `replication_group_id` is provided) The name of the parameter group to associate with this cache cluster. - -The following arguments are optional: - +* `num_cache_nodes` - (Required unless `replication_group_id` is provided) The initial number of cache nodes that the cache cluster will have. For Redis, this value must be 1. For Memcached, this value must be between 1 and 40. If this number is reduced on subsequent runs, the highest numbered nodes will be removed. +* `parameter_group_name` - (Required unless `replication_group_id` is provided) The name of the parameter group to associate with this cache cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apply_immediately` - (Optional) Whether any database modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon ElastiCache Documentation for more information](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html). * `auto_minor_version_upgrade` - (Optional) Specifies whether minor version engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Only supported for engine type `"redis"` and if the engine version is 6 or higher. Defaults to `true`. * `availability_zone` - (Optional) Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone. Changing this value will re-create the resource. * `az_mode` - (Optional, Memcached only) Whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`. -* `engine_version` – (Optional) Version number of the cache engine to be used. +* `engine_version` - (Optional) Version number of the cache engine to be used. If not set, defaults to the latest version. See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html) in the AWS Documentation for supported versions. When `engine` is `redis` and the version is 7 or higher, the major and minor version should be set, e.g., `7.2`. @@ -231,22 +229,22 @@ The following arguments are optional: * `final_snapshot_identifier` - (Optional, Redis only) Name of your final cluster snapshot. If omitted, no final snapshot will be made. * `ip_discovery` - (Optional) The IP version to advertise in the discovery protocol. Valid values are `ipv4` or `ipv6`. * `log_delivery_configuration` - (Optional, Redis only) Specifies the destination and format of Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/Log_Delivery.html#Log_contents-engine-log). See the documentation on [Amazon ElastiCache](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/Log_Delivery.html). See [Log Delivery Configuration](#log-delivery-configuration) below for more details. -* `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance +* `maintenance_window` - (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`. * `network_type` - (Optional) The IP versions for cache cluster connections. IPv6 is supported with Redis engine `6.2` onword or Memcached version `1.6.6` for all [Nitro system](https://aws.amazon.com/ec2/nitro/) instances. Valid values are `ipv4`, `ipv6` or `dual_stack`. -* `notification_topic_arn` – (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic`. +* `notification_topic_arn` - (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic`. * `outpost_mode` - (Optional) Specify the outpost mode that will apply to the cache cluster creation. Valid values are `"single-outpost"` and `"cross-outpost"`, however AWS currently only supports `"single-outpost"` mode. -* `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. Changing this value will re-create the resource. +* `port` - (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. Changing this value will re-create the resource. * `preferred_availability_zones` - (Optional, Memcached only) List of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference. * `preferred_outpost_arn` - (Optional, Required if `outpost_mode` is specified) The outpost ARN in which the cache cluster will be created. * `replication_group_id` - (Optional, Required if `engine` is not specified) ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. -* `security_group_ids` – (Optional, VPC only) One or more VPC security groups associated with the cache cluster. Cannot be provided with `replication_group_id.` -* `snapshot_arns` – (Optional, Redis only) Single-element string list containing an Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. The object name cannot contain any commas. Changing `snapshot_arns` forces a new resource. +* `security_group_ids` - (Optional, VPC only) One or more VPC security groups associated with the cache cluster. Cannot be provided with `replication_group_id.` +* `snapshot_arns` - (Optional, Redis only) Single-element string list containing an Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. The object name cannot contain any commas. Changing `snapshot_arns` forces a new resource. * `snapshot_name` - (Optional, Redis only) Name of a snapshot from which to restore data into the new node group. Changing `snapshot_name` forces a new resource. * `snapshot_retention_limit` - (Optional, Redis only) Number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro cache nodes * `snapshot_window` - (Optional, Redis only) Daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00 -* `subnet_group_name` – (Optional, VPC only) Name of the subnet group to be used for the cache cluster. Changing this value will re-create the resource. Cannot be provided with `replication_group_id.` +* `subnet_group_name` - (Optional, VPC only) Name of the subnet group to be used for the cache cluster. Changing this value will re-create the resource. Cannot be provided with `replication_group_id.` * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transit_encryption_enabled` - (Optional) Enable encryption in-transit. Supported with Memcached versions `1.6.12` and later, Valkey `7.2` and later, Redis OSS versions `3.2.6`, `4.0.10` and later, running in a VPC. See the [ElastiCache in-transit encryption documentation](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/in-transit-encryption.html#in-transit-encryption-constraints) for more details. @@ -303,4 +301,4 @@ Using `terraform import`, import ElastiCache Clusters using the `cluster_id`. Fo % terraform import aws_elasticache_cluster.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_global_replication_group.html.markdown b/website/docs/cdktf/python/r/elasticache_global_replication_group.html.markdown index c2ff8bcf76b9..ec23b0c390bd 100644 --- a/website/docs/cdktf/python/r/elasticache_global_replication_group.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_global_replication_group.html.markdown @@ -58,8 +58,7 @@ The initial Redis version is determined by the version set on the primary replic However, once it is part of a Global Replication Group, the Global Replication Group manages the version of all member replication groups. -The member replication groups must have [`lifecycle.ignore_changes[engine_version]`](https://www.terraform.io/language/meta-arguments/lifecycle) set, -or Terraform will always return a diff. +The provider is configured to ignore changes to `engine`, `engine_version` and `parameter_group_name` inside `aws_elasticache_replication_group` resources if they belong to a global replication group. In this example, the primary replication group will be created with Redis 6.0, @@ -68,7 +67,6 @@ The secondary replication group will be created with Redis 6.2. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug -from cdktf import TerraformResourceLifecycle, TerraformResourceLifecycle from constructs import Construct from cdktf import TerraformStack # @@ -84,9 +82,6 @@ class MyConvertedCode(TerraformStack): description="primary replication group", engine="redis", engine_version="6.0", - lifecycle=TerraformResourceLifecycle( - ignore_changes=[engine_version] - ), node_type="cache.m5.large", num_cache_clusters=1, replication_group_id="example-primary" @@ -99,9 +94,6 @@ class MyConvertedCode(TerraformStack): ElasticacheReplicationGroup(self, "secondary", description="secondary replication group", global_replication_group_id=example.global_replication_group_id, - lifecycle=TerraformResourceLifecycle( - ignore_changes=[engine_version] - ), num_cache_clusters=1, provider=other_region, replication_group_id="example-secondary" @@ -112,13 +104,19 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `automatic_failover_enabled` - (Optional) Specifies whether read-only replicas will be automatically promoted to read/write primary if the existing primary fails. When creating, by default the Global Replication Group inherits the automatic failover setting of the primary replication group. * `cache_node_type` - (Optional) The instance class used. See AWS documentation for information on [supported node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). When creating, by default the Global Replication Group inherits the node type of the primary replication group. -* `engine_version` - (Optional) Redis version to use for the Global Replication Group. +* `engine` - (Optional) The name of the cache engine to be used for the clusters in this global replication group. + When creating, by default the Global Replication Group inherits the engine of the primary replication group. + If an engine is specified, the Global Replication Group and all member replication groups will be upgraded to this engine. + Valid values are `redis` or `valkey`. + Default is `redis` if `engine_version` is specified. +* `engine_version` - (Optional) Engine version to use for the Global Replication Group. When creating, by default the Global Replication Group inherits the version of the primary replication group. If a version is specified, the Global Replication Group and all member replication groups will be upgraded to this version. Cannot be downgraded without replacing the Global Replication Group and all member replication groups. @@ -126,12 +124,12 @@ This resource supports the following arguments: When the version is 6, the major and minor version can be set, e.g., `6.2`, or the minor version can be unspecified which will use the latest version at creation time, e.g., `6.x`. The actual engine version used is returned in the attribute `engine_version_actual`, see [Attribute Reference](#attribute-reference) below. -* `global_replication_group_id_suffix` – (Required) The suffix name of a Global Datastore. If `global_replication_group_id_suffix` is changed, creates a new resource. -* `primary_replication_group_id` – (Required) The ID of the primary cluster that accepts writes and will replicate updates to the secondary cluster. If `primary_replication_group_id` is changed, creates a new resource. -* `global_replication_group_description` – (Optional) A user-created description for the global replication group. +* `global_replication_group_id_suffix` - (Required) The suffix name of a Global Datastore. If `global_replication_group_id_suffix` is changed, creates a new resource. +* `primary_replication_group_id` - (Required) The ID of the primary cluster that accepts writes and will replicate updates to the secondary cluster. If `primary_replication_group_id` is changed, creates a new resource. +* `global_replication_group_description` - (Optional) A user-created description for the global replication group. * `num_node_groups` - (Optional) The number of node groups (shards) on the global replication group. * `parameter_group_name` - (Optional) An ElastiCache Parameter Group to use for the Global Replication Group. - Required when upgrading a major engine version, but will be ignored if left configured after the upgrade is complete. + Required when upgrading an engine or major engine version, but will be ignored if left configured after the upgrade is complete. Specifying without a major version upgrade will fail. Note that ElastiCache creates a copy of this parameter group for each member replication group. @@ -145,7 +143,6 @@ This resource exports the following attributes in addition to the arguments abov * `at_rest_encryption_enabled` - A flag that indicate whether the encryption at rest is enabled. * `auth_token_enabled` - A flag that indicate whether AuthToken (password) is enabled. * `cluster_enabled` - Indicates whether the Global Datastore is cluster enabled. -* `engine` - The name of the cache engine to be used for the clusters in this global replication group. * `global_replication_group_id` - The full ID of the global replication group. * `global_node_groups` - Set of node groups (shards) on the global replication group. Has the values: @@ -186,4 +183,4 @@ Using `terraform import`, import ElastiCache Global Replication Groups using the % terraform import aws_elasticache_global_replication_group.my_global_replication_group okuqm-global-replication-group-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_parameter_group.html.markdown b/website/docs/cdktf/python/r/elasticache_parameter_group.html.markdown index 37bf0f1d3159..a6dfc6ca16eb 100644 --- a/website/docs/cdktf/python/r/elasticache_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_parameter_group.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the ElastiCache parameter group. * `family` - (Required) The family of the ElastiCache parameter group. * `description` - (Optional) The description of the ElastiCache parameter group. Defaults to "Managed by Terraform". @@ -90,4 +91,4 @@ Using `terraform import`, import ElastiCache Parameter Groups using the `name`. % terraform import aws_elasticache_parameter_group.default redis-params ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_replication_group.html.markdown b/website/docs/cdktf/python/r/elasticache_replication_group.html.markdown index e32eff935ae0..982686a996c6 100644 --- a/website/docs/cdktf/python/r/elasticache_replication_group.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_replication_group.html.markdown @@ -252,17 +252,18 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `description` – (Required) User-created description for the replication group. Must not be empty. -* `replication_group_id` – (Required) Replication group identifier. This parameter is stored as a lowercase string. +* `description` - (Required) User-created description for the replication group. Must not be empty. +* `replication_group_id` - (Required) Replication group identifier. This parameter is stored as a lowercase string. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apply_immediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`. * `at_rest_encryption_enabled` - (Optional) Whether to enable encryption at rest. When `engine` is `redis`, default is `false`. When `engine` is `valkey`, default is `true`. * `auth_token` - (Optional) Password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`. -* `auth_token_update_strategy` - (Optional) Strategy to use when updating the `auth_token`. Valid values are `SET`, `ROTATE`, and `DELETE`. Defaults to `ROTATE`. +* `auth_token_update_strategy` - (Optional) Strategy to use when updating the `auth_token`. Valid values are `SET`, `ROTATE`, and `DELETE`. Required if `auth_token` is set. * `auto_minor_version_upgrade` - (Optional) Specifies whether minor version engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Only supported for engine types `"redis"` and `"valkey"` and if the engine version is 6 or higher. Defaults to `true`. @@ -283,7 +284,7 @@ The following arguments are optional: * `ip_discovery` - (Optional) The IP version to advertise in the discovery protocol. Valid values are `ipv4` or `ipv6`. * `kms_key_id` - (Optional) The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`. * `log_delivery_configuration` - (Optional, Redis only) Specifies the destination and format of Redis OSS/Valkey [SLOWLOG](https://redis.io/commands/slowlog) or Redis OSS/Valkey [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log). See the documentation on [Amazon ElastiCache](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log). See [Log Delivery Configuration](#log-delivery-configuration) below for more details. -* `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` +* `maintenance_window` - (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` * `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. If `true`, `automatic_failover_enabled` must also be enabled. Defaults to `false`. @@ -292,7 +293,7 @@ The following arguments are optional: See AWS documentation for information on [supported node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). Required unless `global_replication_group_id` is set. Cannot be set if `global_replication_group_id` is set. -* `notification_topic_arn` – (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` +* `notification_topic_arn` - (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` * `num_cache_clusters` - (Optional) Number of cache clusters (primary and replicas) this replication group will have. If `automatic_failover_enabled` or `multi_az_enabled` are `true`, must be at least 2. Updates will occur before other modifications. @@ -302,7 +303,7 @@ The following arguments are optional: Changing this number will trigger a resizing operation before other settings modifications. Conflicts with `num_cache_clusters`. * `parameter_group_name` - (Optional) Name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. To enable "cluster mode", i.e., data sharding, use a parameter group that has the parameter `cluster-enabled` set to true. -* `port` – (Optional) Port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. +* `port` - (Optional) Port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. * `preferred_cache_cluster_azs` - (Optional) List of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is considered. The first item in the list will be the primary node. Ignored when updating. * `replicas_per_node_group` - (Optional) Number of replica nodes in each node group. Changing this number will trigger a resizing operation before other settings modifications. @@ -311,7 +312,7 @@ The following arguments are optional: Can only be set if `num_node_groups` is set. * `security_group_ids` - (Optional) IDs of one or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud. * `security_group_names` - (Optional) Names of one or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud. -* `snapshot_arns` – (Optional) List of ARNs that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas. +* `snapshot_arns` - (Optional) List of ARNs that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas. * `snapshot_name` - (Optional) Name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource. * `snapshot_retention_limit` - (Optional, Redis only) Number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted. If the value of `snapshot_retention_limit` is set to zero (0), backups are turned off. Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro cache nodes * `snapshot_window` - (Optional, Redis only) Daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster. The minimum snapshot window is a 60 minute period. Example: `05:00-09:00` @@ -382,4 +383,4 @@ Using `terraform import`, import ElastiCache Replication Groups using the `repli % terraform import aws_elasticache_replication_group.my_replication_group replication-group-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_reserved_cache_node.html.markdown b/website/docs/cdktf/python/r/elasticache_reserved_cache_node.html.markdown index 050fc2b8d1de..f7ee8f77666c 100644 --- a/website/docs/cdktf/python/r/elasticache_reserved_cache_node.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_reserved_cache_node.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cache_node_count` - (Optional) Number of cache node instances to reserve. Default value is `1`. * `id` - (Optional) Customer-specified identifier to track this reservation. @@ -68,7 +69,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN for the reserved cache node. * `duration` - Duration of the reservation as an RFC3339 duration. -* `fixed_price` – Fixed price charged for this reserved cache node. +* `fixed_price` - Fixed price charged for this reserved cache node. * `cache_node_type` - Node type for the reserved cache nodes. * `offering_type` - Offering type of this reserved cache node. * `product_description` - Engine type for the reserved cache node. @@ -111,4 +112,4 @@ Using `terraform import`, import ElastiCache Reserved Cache Node using the `id`. % terraform import aws_elasticache_reserved_cache_node.example CustomReservationID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_serverless_cache.html.markdown b/website/docs/cdktf/python/r/elasticache_serverless_cache.html.markdown index 3c99616ade4d..15a8848e5a25 100644 --- a/website/docs/cdktf/python/r/elasticache_serverless_cache.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_serverless_cache.html.markdown @@ -133,21 +133,22 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `engine` – (Required) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` or `valkey`. -* `name` – (Required) The Cluster name which serves as a unique identifier to the serverless cache +* `engine` - (Required) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` or `valkey`. +* `name` - (Required) The Cluster name which serves as a unique identifier to the serverless cache The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cache_usage_limits` - (Optional) Sets the cache usage limits for storage and ElastiCache Processing Units for the cache. See [`cache_usage_limits` Block](#cache_usage_limits-block) for details. * `daily_snapshot_time` - (Optional) The daily time that snapshots will be created from the new serverless cache. Only supported for engine types `"redis"` or `"valkey"`. Defaults to `0`. * `description` - (Optional) User-provided description for the serverless cache. The default is NULL. * `kms_key_id` - (Optional) ARN of the customer managed key for encrypting the data at rest. If no KMS key is provided, a default service key is used. -* `major_engine_version` – (Optional) The version of the cache engine that will be used to create the serverless cache. +* `major_engine_version` - (Optional) The version of the cache engine that will be used to create the serverless cache. See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html) in the AWS Documentation for supported versions. * `security_group_ids` - (Optional) A list of the one or more VPC security groups to be associated with the serverless cache. The security group will authorize traffic access for the VPC end-point (private-link). If no other information is given this will be the VPC’s Default Security Group that is associated with the cluster VPC end-point. * `snapshot_arns_to_restore` - (Optional, Redis only) The list of ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only. * `snapshot_retention_limit` - (Optional, Redis only) The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only. -* `subnet_ids` – (Optional) A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. +* `subnet_ids` - (Optional) A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `user_group_id` - (Optional) The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL. @@ -232,4 +233,4 @@ Using `terraform import`, import ElastiCache Serverless Cache using the `name`. % terraform import aws_elasticache_serverless_cache.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_subnet_group.html.markdown b/website/docs/cdktf/python/r/elasticache_subnet_group.html.markdown index 899f9d02fedd..3a7587e33624 100644 --- a/website/docs/cdktf/python/r/elasticache_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_subnet_group.html.markdown @@ -54,9 +54,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` – (Required) Name for the cache subnet group. ElastiCache converts this name to lowercase. -* `description` – (Optional) Description for the cache subnet group. Defaults to "Managed by Terraform". -* `subnet_ids` – (Required) List of VPC Subnet IDs for the cache subnet group +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name for the cache subnet group. ElastiCache converts this name to lowercase. +* `description` - (Optional) Description for the cache subnet group. Defaults to "Managed by Terraform". +* `subnet_ids` - (Required) List of VPC Subnet IDs for the cache subnet group * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -91,4 +92,4 @@ Using `terraform import`, import ElastiCache Subnet Groups using the `name`. For % terraform import aws_elasticache_subnet_group.bar tf-test-cache-subnet ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_user.html.markdown b/website/docs/cdktf/python/r/elasticache_user.html.markdown index ec11e3e9b961..1b185c97f9e6 100644 --- a/website/docs/cdktf/python/r/elasticache_user.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_user.html.markdown @@ -96,6 +96,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_mode` - (Optional) Denotes the user's authentication properties. Detailed below. * `no_password_required` - (Optional) Indicates a password is not required for this user. * `passwords` - (Optional) Passwords used for this user. You can create up to two passwords for each user. @@ -146,4 +147,4 @@ Using `terraform import`, import ElastiCache users using the `user_id`. For exam % terraform import aws_elasticache_user.my_user userId1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_user_group.html.markdown b/website/docs/cdktf/python/r/elasticache_user_group.html.markdown index 18dd9acffebd..e4b650946e3c 100644 --- a/website/docs/cdktf/python/r/elasticache_user_group.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_user_group.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_ids` - (Optional) The list of user IDs that belong to the user group. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -88,4 +89,4 @@ Using `terraform import`, import ElastiCache user groups using the `user_group_i % terraform import aws_elasticache_user_group.my_user_group userGoupId1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticache_user_group_association.html.markdown b/website/docs/cdktf/python/r/elasticache_user_group_association.html.markdown index df1cb3120f34..337af7e9ed10 100644 --- a/website/docs/cdktf/python/r/elasticache_user_group_association.html.markdown +++ b/website/docs/cdktf/python/r/elasticache_user_group_association.html.markdown @@ -66,8 +66,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `user_group_id` - (Required) ID of the user group. * `user_id` - (Required) ID of the user to associated with the user group. @@ -107,4 +108,4 @@ Using `terraform import`, import ElastiCache user group associations using the ` % terraform import aws_elasticache_user_group_association.example userGoupId1,userId ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticsearch_domain.html.markdown b/website/docs/cdktf/python/r/elasticsearch_domain.html.markdown index 1188356187d6..298ca519ad08 100644 --- a/website/docs/cdktf/python/r/elasticsearch_domain.html.markdown +++ b/website/docs/cdktf/python/r/elasticsearch_domain.html.markdown @@ -68,7 +68,7 @@ class MyConvertedCode(TerraformStack): # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. data_aws_region_current.override_logical_id("current") ElasticsearchDomain(self, "example", - access_policies="{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": \"es:*\",\n \"Principal\": \"*\",\n \"Effect\": \"Allow\",\n \"Resource\": \"arn:aws:es:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*\",\n \"Condition\": {\n \"IpAddress\": {\"aws:SourceIp\": [\"66.193.100.22/32\"]}\n }\n }\n ]\n}\n\n", + access_policies="{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": \"es:*\",\n \"Principal\": \"*\",\n \"Effect\": \"Allow\",\n \"Resource\": \"arn:aws:es:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*\",\n \"Condition\": {\n \"IpAddress\": {\"aws:SourceIp\": [\"66.193.100.22/32\"]}\n }\n }\n ]\n}\n\n", domain_name=domain.string_value ) ``` @@ -193,7 +193,7 @@ class MyConvertedCode(TerraformStack): # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. data_aws_subnets_selected.override_logical_id("selected") aws_elasticsearch_domain_es = ElasticsearchDomain(self, "es_8", - access_policies="{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Action\": \"es:*\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Resource\": \"arn:aws:es:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*\"\n\t\t}\n\t]\n}\n\n", + access_policies="{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Action\": \"es:*\",\n\t\t\t\"Principal\": \"*\",\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Resource\": \"arn:aws:es:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*\"\n\t\t}\n\t]\n}\n\n", advanced_options={ "rest.action.multi.allow_explicit_index": "true" }, @@ -227,6 +227,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_policies` - (Optional) IAM policy document specifying the access policies for the domain. * `advanced_options` - (Optional) Key-value string pairs to specify advanced configuration options. Note that the values for these configuration options must be strings (wrapped in quotes) or they may be wrong and cause a perpetual diff, causing Terraform to want to recreate your Elasticsearch domain on every apply. * `advanced_security_options` - (Optional) Configuration block for [fine-grained access control](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/fgac.html). Detailed below. @@ -359,7 +360,6 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the domain. * `domain_id` - Unique identifier for the domain. -* `domain_name` - Name of the Elasticsearch domain. * `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. * `kibana_endpoint` - Domain-specific endpoint for kibana without https scheme. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -399,4 +399,4 @@ Using `terraform import`, import Elasticsearch domains using the `domain_name`. % terraform import aws_elasticsearch_domain.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticsearch_domain_policy.html.markdown b/website/docs/cdktf/python/r/elasticsearch_domain_policy.html.markdown index b1b4d3a50c4f..3258c758a604 100644 --- a/website/docs/cdktf/python/r/elasticsearch_domain_policy.html.markdown +++ b/website/docs/cdktf/python/r/elasticsearch_domain_policy.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) Name of the domain. * `access_policies` - (Optional) IAM policy document specifying the access policies for the domain @@ -48,4 +49,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticsearch_domain_saml_options.html.markdown b/website/docs/cdktf/python/r/elasticsearch_domain_saml_options.html.markdown index 271db340ac0b..efaa142fe5b8 100644 --- a/website/docs/cdktf/python/r/elasticsearch_domain_saml_options.html.markdown +++ b/website/docs/cdktf/python/r/elasticsearch_domain_saml_options.html.markdown @@ -65,6 +65,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `saml_options` - (Optional) The SAML authentication options for an AWS Elasticsearch Domain. ### saml_options @@ -113,4 +114,4 @@ Using `terraform import`, import Elasticsearch domains using the `domain_name`. % terraform import aws_elasticsearch_domain_saml_options.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elasticsearch_vpc_endpoint.html.markdown b/website/docs/cdktf/python/r/elasticsearch_vpc_endpoint.html.markdown index d39ec6f540d3..9bea786bc8b2 100644 --- a/website/docs/cdktf/python/r/elasticsearch_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/elasticsearch_vpc_endpoint.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_arn` - (Required, Forces new resource) Specifies the Amazon Resource Name (ARN) of the domain to create the endpoint for * `vpc_options` - (Required) Options to specify the subnets and security groups for the endpoint. @@ -92,4 +93,4 @@ Using `terraform import`, import elasticsearch VPC endpoint connections using th % terraform import aws_elasticsearch_vpc_endpoint_connection.example endpoint-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown b/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown index 19bdbfe19031..4eac0f5ccc61 100644 --- a/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown @@ -12,6 +12,8 @@ description: |- Provides an Elastic Transcoder pipeline resource. +~> **Warning:** This resource is deprecated. Use [AWS Elemental MediaConvert](https://aws.amazon.com/blogs/media/migrating-workflows-from-amazon-elastic-transcoder-to-aws-elemental-mediaconvert/) instead. AWS will [discontinue support for Amazon Elastic Transcoder](https://aws.amazon.com/blogs/media/support-for-amazon-elastic-transcoder-ending-soon/), effective November 13, 2025. + ## Example Usage ```python @@ -45,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `aws_kms_key_arn` - (Optional) The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. * `content_config` - (Optional) The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) * `content_config_permissions` - (Optional) The permissions for the `content_config` object. (documented below) @@ -136,4 +139,4 @@ Using `terraform import`, import Elastic Transcoder pipelines using the `id`. Fo % terraform import aws_elastictranscoder_pipeline.basic_pipeline 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown b/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown index 3d11d5080ff7..e24f488c32af 100644 --- a/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown +++ b/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown @@ -12,6 +12,8 @@ description: |- Provides an Elastic Transcoder preset resource. +~> **Warning:** This resource is deprecated. Use [AWS Elemental MediaConvert](https://aws.amazon.com/blogs/media/migrating-workflows-from-amazon-elastic-transcoder-to-aws-elemental-mediaconvert/) instead. AWS will [discontinue support for Amazon Elastic Transcoder](https://aws.amazon.com/blogs/media/support-for-amazon-elastic-transcoder-ending-soon/), effective November 13, 2025. + ## Example Usage ```python @@ -88,6 +90,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `audio` - (Optional, Forces new resource) Audio parameters object (documented below). * `audio_codec_options` - (Optional, Forces new resource) Codec options for the audio parameters (documented below) * `container` - (Required, Forces new resource) The container type for the output file. Valid values are `flac`, `flv`, `fmp4`, `gif`, `mp3`, `mp4`, `mpg`, `mxf`, `oga`, `ogg`, `ts`, and `webm`. @@ -198,4 +201,4 @@ Using `terraform import`, import Elastic Transcoder presets using the `id`. For % terraform import aws_elastictranscoder_preset.basic_preset 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elb.html.markdown b/website/docs/cdktf/python/r/elb.html.markdown index b3a43e06adf5..2e9d2ccca71f 100644 --- a/website/docs/cdktf/python/r/elb.html.markdown +++ b/website/docs/cdktf/python/r/elb.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the ELB. By default generated by Terraform. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -180,4 +181,4 @@ Using `terraform import`, import ELBs using the `name`. For example: % terraform import aws_elb.bar elb-production-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elb_attachment.html.markdown b/website/docs/cdktf/python/r/elb_attachment.html.markdown index ab92965a7a0e..1295ed8a2c27 100644 --- a/website/docs/cdktf/python/r/elb_attachment.html.markdown +++ b/website/docs/cdktf/python/r/elb_attachment.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `elb` - (Required) The name of the ELB. * `instance` - (Required) Instance ID to place in the ELB pool. @@ -50,4 +51,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_block_public_access_configuration.html.markdown b/website/docs/cdktf/python/r/emr_block_public_access_configuration.html.markdown index 6ee850aaa789..5f4e5c4d851c 100644 --- a/website/docs/cdktf/python/r/emr_block_public_access_configuration.html.markdown +++ b/website/docs/cdktf/python/r/emr_block_public_access_configuration.html.markdown @@ -119,6 +119,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `permitted_public_security_group_rule_range` - (Optional) Configuration block for defining permitted public security group rule port ranges. Can be defined multiple times per resource. Only valid if `block_public_security_group_rules` is set to `true`. ### `permitted_public_security_group_rule_range` @@ -157,4 +158,4 @@ Using `terraform import`, import the current EMR Block Public Access Configurati % terraform import aws_emr_block_public_access_configuration.example current ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_cluster.html.markdown b/website/docs/cdktf/python/r/emr_cluster.html.markdown index 75a4d51b13f6..d469614d7ce5 100644 --- a/website/docs/cdktf/python/r/emr_cluster.html.markdown +++ b/website/docs/cdktf/python/r/emr_cluster.html.markdown @@ -463,6 +463,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additional_info` - (Optional) JSON string for selecting additional features such as adding proxy information. Note: Currently there is no API to retrieve the value of this argument after EMR cluster creation from provider, therefore Terraform cannot detect drift from the actual EMR cluster if its value is changed outside Terraform. * `applications` - (Optional) A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster. For a list of applications available for each Amazon EMR release version, see the [Amazon EMR Release Guide](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-components.html). * `autoscaling_role` - (Optional) IAM role for automatic scaling policies. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group. @@ -505,6 +506,7 @@ class MyConvertedCode(TerraformStack): * `log_uri` - (Optional) S3 bucket to write the log files of the job flow. If a value is not provided, logs are not created. * `master_instance_fleet` - (Optional) Configuration block to use an [Instance Fleet](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html) for the master node type. Cannot be specified if any `master_instance_group` configuration blocks are set. Detailed below. * `master_instance_group` - (Optional) Configuration block to use an [Instance Group](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-groups) for the [master node type](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-master-core-task-nodes.html#emr-plan-master). +* `os_release_label` - (Optional) Amazon Linux release for all nodes in a cluster launch RunJobFlow request. If not specified, Amazon EMR uses the latest validated Amazon Linux release for cluster launch. * `placement_group_config` - (Optional) The specified placement group configuration for an Amazon EMR cluster. * `scale_down_behavior` - (Optional) Way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an `instance group` is resized. * `security_configuration` - (Optional) Security configuration name to attach to the EMR cluster. Only valid for EMR clusters with `release_label` 4.8.0 or greater. @@ -515,6 +517,8 @@ class MyConvertedCode(TerraformStack): * `unhealthy_node_replacement` - (Optional) Whether whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster. Default value is `false`. * `visible_to_all_users` - (Optional) Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default value is `true`. + **NOTE:** As per the [Amazon EMR API Reference](https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html#EMR-RunJobFlow-request-VisibleToAllUsers), this argument is no longer supported. Do not set this argument, particularly to `false`, as it would lead to perpetual differences. + ### bootstrap_action * `args` - (Optional) List of command line arguments to pass to the bootstrap action script. @@ -681,7 +685,6 @@ This resource exports the following attributes in addition to the arguments abov * `release_label` - Release label for the Amazon EMR release. * `service_role` - IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `visible_to_all_users` - Indicates whether the job flow is visible to all IAM users of the AWS account associated with the job flow. ## Import @@ -733,4 +736,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_instance_fleet.html.markdown b/website/docs/cdktf/python/r/emr_instance_fleet.html.markdown index dcca5929e92e..3ea55851e4c8 100644 --- a/website/docs/cdktf/python/r/emr_instance_fleet.html.markdown +++ b/website/docs/cdktf/python/r/emr_instance_fleet.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_id` - (Required) ID of the EMR Cluster to attach to. Changing this forces a new resource to be created. * `instance_type_configs` - (Optional) Configuration block for instance fleet * `launch_specifications` - (Optional) Configuration block for launch specification @@ -166,4 +167,4 @@ Using `terraform import`, import EMR Instance Fleet using the EMR Cluster identi % terraform import aws_emr_instance_fleet.example j-123456ABCDEF/if-15EK4O09RZLNR ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_instance_group.html.markdown b/website/docs/cdktf/python/r/emr_instance_group.html.markdown index 96a573d6098e..7d578950372c 100644 --- a/website/docs/cdktf/python/r/emr_instance_group.html.markdown +++ b/website/docs/cdktf/python/r/emr_instance_group.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` (Required) Human friendly name given to the instance group. Changing this forces a new resource to be created. * `cluster_id` (Required) ID of the EMR Cluster to attach to. Changing this forces a new resource to be created. * `instance_type` (Required) The EC2 instance type for all instances in the instance group. Changing this forces a new resource to be created. @@ -112,4 +113,4 @@ Using `terraform import`, import EMR task instance group using their EMR Cluster % terraform import aws_emr_instance_group.task_group j-123456ABCDEF/ig-15EK4O09RZLNR ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_managed_scaling_policy.html.markdown b/website/docs/cdktf/python/r/emr_managed_scaling_policy.html.markdown index 8b2545891408..02bf29909528 100644 --- a/website/docs/cdktf/python/r/emr_managed_scaling_policy.html.markdown +++ b/website/docs/cdktf/python/r/emr_managed_scaling_policy.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_id` - (Required) ID of the EMR cluster * `compute_limits` - (Required) Configuration block with compute limit settings. Described below. @@ -95,4 +96,4 @@ Using `terraform import`, import EMR Managed Scaling Policies using the EMR Clus % terraform import aws_emr_managed_scaling_policy.example j-123456ABCDEF ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_security_configuration.html.markdown b/website/docs/cdktf/python/r/emr_security_configuration.html.markdown index 475e624c153f..ab040c7cf62f 100644 --- a/website/docs/cdktf/python/r/emr_security_configuration.html.markdown +++ b/website/docs/cdktf/python/r/emr_security_configuration.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the EMR Security Configuration. By default generated by Terraform. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -75,4 +76,4 @@ Using `terraform import`, import EMR Security Configurations using the `name`. F % terraform import aws_emr_security_configuration.sc example-sc-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_studio.html.markdown b/website/docs/cdktf/python/r/emr_studio.html.markdown index 2d6a87dae8f3..2ddd360de594 100644 --- a/website/docs/cdktf/python/r/emr_studio.html.markdown +++ b/website/docs/cdktf/python/r/emr_studio.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A detailed description of the Amazon EMR Studio. * `encryption_key_arn` - (Optional) The AWS KMS key identifier (ARN) used to encrypt Amazon EMR Studio workspace and notebook files when backed up to Amazon S3. * `idp_auth_url` - (Optional) The authentication endpoint of your identity provider (IdP). Specify this value when you use IAM authentication and want to let federated users log in to a Studio with the Studio URL and credentials from your IdP. Amazon EMR Studio redirects users to this endpoint to enter credentials. @@ -93,4 +94,4 @@ Using `terraform import`, import EMR studios using the `id`. For example: % terraform import aws_emr_studio.studio es-123456ABCDEF ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emr_studio_session_mapping.html.markdown b/website/docs/cdktf/python/r/emr_studio_session_mapping.html.markdown index fbe43e2a31c1..833a1061494c 100644 --- a/website/docs/cdktf/python/r/emr_studio_session_mapping.html.markdown +++ b/website/docs/cdktf/python/r/emr_studio_session_mapping.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity_id`- (Optional) The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store. * `identity_name` - (Optional) The name of the user or group from the Amazon Web Services SSO Identity Store. * `identity_type` - (Required) Specifies whether the identity to map to the Amazon EMR Studio is a `USER` or a `GROUP`. @@ -75,4 +76,4 @@ Using `terraform import`, import EMR studio session mappings using `studio-id:id % terraform import aws_emr_studio_session_mapping.example es-xxxxx:USER:xxxxx-xxx-xxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emrcontainers_job_template.html.markdown b/website/docs/cdktf/python/r/emrcontainers_job_template.html.markdown index fb8aa73d0017..ef9fb1bce497 100644 --- a/website/docs/cdktf/python/r/emrcontainers_job_template.html.markdown +++ b/website/docs/cdktf/python/r/emrcontainers_job_template.html.markdown @@ -46,9 +46,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `job_template_data` - (Required) The job template data which holds values of StartJobRun API request. * `kms_key_arn` - (Optional) The KMS key ARN used to encrypt the job template. -* `name` – (Required) The specified name of the job template. +* `name` - (Required) The specified name of the job template. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### job_template_data Arguments @@ -134,4 +135,4 @@ Using `terraform import`, import EKS job templates using the `id`. For example: % terraform import aws_emrcontainers_job_template.example a1b2c3d4e5f6g7h8i9j10k11l ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emrcontainers_virtual_cluster.html.markdown b/website/docs/cdktf/python/r/emrcontainers_virtual_cluster.html.markdown index aa6ebcf64265..aabf9e9a2e51 100644 --- a/website/docs/cdktf/python/r/emrcontainers_virtual_cluster.html.markdown +++ b/website/docs/cdktf/python/r/emrcontainers_virtual_cluster.html.markdown @@ -46,8 +46,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container_provider` - (Required) Configuration block for the container provider associated with your cluster. -* `name` – (Required) Name of the virtual cluster. +* `name` - (Required) Name of the virtual cluster. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### container_provider Arguments @@ -91,4 +92,4 @@ Using `terraform import`, import EKS Clusters using the `id`. For example: % terraform import aws_emrcontainers_virtual_cluster.example a1b2c3d4e5f6g7h8i9j10k11l ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/emrserverless_application.html.markdown b/website/docs/cdktf/python/r/emrserverless_application.html.markdown index 34bcd127e427..3521492f3b6a 100644 --- a/website/docs/cdktf/python/r/emrserverless_application.html.markdown +++ b/website/docs/cdktf/python/r/emrserverless_application.html.markdown @@ -96,17 +96,18 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `architecture` – (Optional) The CPU architecture of an application. Valid values are `ARM64` or `X86_64`. Default value is `X86_64`. -* `auto_start_configuration` – (Optional) The configuration for an application to automatically start on job submission. -* `auto_stop_configuration` – (Optional) The configuration for an application to automatically stop after a certain amount of time being idle. -* `image_configuration` – (Optional) The image configuration applied to all worker types. -* `initial_capacity` – (Optional) The capacity to initialize when the application is created. -* `interactive_configuration` – (Optional) Enables the interactive use cases to use when running an application. -* `maximum_capacity` – (Optional) The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. -* `name` – (Required) The name of the application. -* `network_configuration` – (Optional) The network configuration for customer VPC connectivity. -* `release_label` – (Required) The EMR release version associated with the application. -* `type` – (Required) The type of application you want to start, such as `spark` or `hive`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `architecture` - (Optional) The CPU architecture of an application. Valid values are `ARM64` or `X86_64`. Default value is `X86_64`. +* `auto_start_configuration` - (Optional) The configuration for an application to automatically start on job submission. +* `auto_stop_configuration` - (Optional) The configuration for an application to automatically stop after a certain amount of time being idle. +* `image_configuration` - (Optional) The image configuration applied to all worker types. +* `initial_capacity` - (Optional) The capacity to initialize when the application is created. +* `interactive_configuration` - (Optional) Enables the interactive use cases to use when running an application. +* `maximum_capacity` - (Optional) The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. +* `name` - (Required) The name of the application. +* `network_configuration` - (Optional) The network configuration for customer VPC connectivity. +* `release_label` - (Required) The EMR release version associated with the application. +* `type` - (Required) The type of application you want to start, such as `spark` or `hive`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_start_configuration Arguments @@ -187,4 +188,4 @@ Using `terraform import`, import EMR Severless applications using the `id`. For % terraform import aws_emrserverless_application.example id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/evidently_feature.html.markdown b/website/docs/cdktf/python/r/evidently_feature.html.markdown index c6a5d3686545..2847e8b978f4 100644 --- a/website/docs/cdktf/python/r/evidently_feature.html.markdown +++ b/website/docs/cdktf/python/r/evidently_feature.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Feature resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -148,6 +150,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_variation` - (Optional) The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature. This variation must also be listed in the `variations` structure. If you omit `default_variation`, the first variation listed in the `variations` structure is used as the default variation. * `description` - (Optional) Specifies the description of the feature. * `entity_overrides` - (Optional) Specify users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served. @@ -228,4 +231,4 @@ Using `terraform import`, import CloudWatch Evidently Feature using the feature % terraform import aws_evidently_feature.example exampleFeatureName:arn:aws:evidently:us-east-1:123456789012:project/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/evidently_launch.html.markdown b/website/docs/cdktf/python/r/evidently_launch.html.markdown index bf6ca28fff84..94052bba9d97 100644 --- a/website/docs/cdktf/python/r/evidently_launch.html.markdown +++ b/website/docs/cdktf/python/r/evidently_launch.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Launch resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -321,6 +323,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the launch. * `groups` - (Required) One or up to five blocks that contain the feature and variations that are to be used for the launch. [Detailed below](#groups). * `metric_monitors` - (Optional) One or up to three blocks that define the metrics that will be used to monitor the launch performance. [Detailed below](#metric_monitors). @@ -456,4 +459,4 @@ Import using the `name` of the launch and `arn` of the project separated by a `: % terraform import aws_evidently_launch.example exampleLaunchName:arn:aws:evidently:us-east-1:123456789012:project/exampleProjectName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/evidently_project.html.markdown b/website/docs/cdktf/python/r/evidently_project.html.markdown index c454992d03bf..c55e9b81c179 100644 --- a/website/docs/cdktf/python/r/evidently_project.html.markdown +++ b/website/docs/cdktf/python/r/evidently_project.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Project resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -98,6 +100,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data_delivery` - (Optional) A block that contains information about where Evidently is to store evaluation events for longer term storage, if you choose to do so. If you choose not to store these events, Evidently deletes them after using them to produce metrics and other experiment results that you can view. See below. * `description` - (Optional) Specifies the description of the project. * `name` - (Required) A name for the project. @@ -168,4 +171,4 @@ Using `terraform import`, import CloudWatch Evidently Project using the `arn`. F % terraform import aws_evidently_project.example arn:aws:evidently:us-east-1:123456789012:segment/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/evidently_segment.html.markdown b/website/docs/cdktf/python/r/evidently_segment.html.markdown index 80daa7a34867..9e9321c234b2 100644 --- a/website/docs/cdktf/python/r/evidently_segment.html.markdown +++ b/website/docs/cdktf/python/r/evidently_segment.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Segment resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -85,6 +87,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) Specifies the description of the segment. * `name` - (Required, Forces new resource) A name for the segment. * `pattern` - (Required, Forces new resource) The pattern to use for the segment. For more information about pattern syntax, see [Segment rule pattern syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html#CloudWatch-Evidently-segments-syntax.html). @@ -127,4 +130,4 @@ Using `terraform import`, import CloudWatch Evidently Segment using the `arn`. F % terraform import aws_evidently_segment.example arn:aws:evidently:us-west-2:123456789012:segment/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_cluster.html.markdown b/website/docs/cdktf/python/r/finspace_kx_cluster.html.markdown index 6614c7098cf0..0e4c61f9b1e4 100644 --- a/website/docs/cdktf/python/r/finspace_kx_cluster.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_cluster.html.markdown @@ -87,11 +87,12 @@ The following arguments are required: * RDB - Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the `savedownStorageConfiguration` parameter. * GATEWAY - A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. * GP - A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only `SINGLE` AZ mode. - * Tickerplant – A tickerplant cluster allows you to subscribe to feed handlers based on IAM permissions. It can publish to RDBs, other Tickerplants, and real-time subscribers (RTS). Tickerplants can persist messages to log, which is readable by any RDB environment. It supports only single-node that is only one kdb process. + * Tickerplant - A tickerplant cluster allows you to subscribe to feed handlers based on IAM permissions. It can publish to RDBs, other Tickerplants, and real-time subscribers (RTS). Tickerplants can persist messages to log, which is readable by any RDB environment. It supports only single-node that is only one kdb process. * `vpc_configuration` - (Required) Configuration details about the network where the Privatelink endpoint of the cluster resides. See [vpc_configuration](#vpc_configuration). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_scaling_configuration` - (Optional) Configuration based on which FinSpace will scale in or scale out nodes in your cluster. See [auto_scaling_configuration](#auto_scaling_configuration). * `availability_zone_id` - (Optional) The availability zone identifiers for the requested regions. Required when `az_mode` is set to SINGLE. * `cache_storage_configurations` - (Optional) Configurations for a read only cache storage associated with a cluster. This cache will be stored as an FSx Lustre that reads from the S3 store. See [cache_storage_configuration](#cache_storage_configuration). @@ -124,13 +125,13 @@ The capacity_configuration block supports the following arguments: * `node_type` - (Required) Determines the hardware of the host computer used for your cluster instance. Each node type offers different memory and storage capabilities. Choose a node type based on the requirements of the application or software that you plan to run on your instance. You can only specify one of the following values: - * kx.s.large – The node type with a configuration of 12 GiB memory and 2 vCPUs. - * kx.s.xlarge – The node type with a configuration of 27 GiB memory and 4 vCPUs. - * kx.s.2xlarge – The node type with a configuration of 54 GiB memory and 8 vCPUs. - * kx.s.4xlarge – The node type with a configuration of 108 GiB memory and 16 vCPUs. - * kx.s.8xlarge – The node type with a configuration of 216 GiB memory and 32 vCPUs. - * kx.s.16xlarge – The node type with a configuration of 432 GiB memory and 64 vCPUs. - * kx.s.32xlarge – The node type with a configuration of 864 GiB memory and 128 vCPUs. + * kx.s.large - The node type with a configuration of 12 GiB memory and 2 vCPUs. + * kx.s.xlarge - The node type with a configuration of 27 GiB memory and 4 vCPUs. + * kx.s.2xlarge - The node type with a configuration of 54 GiB memory and 8 vCPUs. + * kx.s.4xlarge - The node type with a configuration of 108 GiB memory and 16 vCPUs. + * kx.s.8xlarge - The node type with a configuration of 216 GiB memory and 32 vCPUs. + * kx.s.16xlarge - The node type with a configuration of 432 GiB memory and 64 vCPUs. + * kx.s.32xlarge - The node type with a configuration of 864 GiB memory and 128 vCPUs. * `node_count` - (Required) Number of instances running in a cluster. Must be at least 1 and at most 5. ### cache_storage_configuration @@ -247,4 +248,4 @@ Using `terraform import`, import an AWS FinSpace Kx Cluster using the `id` (envi % terraform import aws_finspace_kx_cluster.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_database.html.markdown b/website/docs/cdktf/python/r/finspace_kx_database.html.markdown index d87bbb81ded8..e122c8c75bf3 100644 --- a/website/docs/cdktf/python/r/finspace_kx_database.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_database.html.markdown @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the KX database. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -104,4 +105,4 @@ Using `terraform import`, import an AWS FinSpace Kx Database using the `id` (env % terraform import aws_finspace_kx_database.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_dataview.html.markdown b/website/docs/cdktf/python/r/finspace_kx_dataview.html.markdown index d5076d957289..23aa73d24db9 100644 --- a/website/docs/cdktf/python/r/finspace_kx_dataview.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_dataview.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_update` - (Optional) The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false. * `availability_zone_id` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to. * `changeset_id` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data. @@ -123,4 +124,4 @@ Using `terraform import`, import an AWS FinSpace Kx Cluster using the `id` (envi % terraform import aws_finspace_kx_dataview.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database,my-tf-kx-dataview ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_environment.html.markdown b/website/docs/cdktf/python/r/finspace_kx_environment.html.markdown index 8b2c36eae569..dc62c7f7fa0d 100644 --- a/website/docs/cdktf/python/r/finspace_kx_environment.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_environment.html.markdown @@ -147,6 +147,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `custom_dns_configuration` - (Optional) List of DNS server name and server IP. This is used to set up Route-53 outbound resolvers. Defined below. * `description` - (Optional) Description for the KX environment. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -240,4 +241,4 @@ Using `terraform import`, import an AWS FinSpace Kx Environment using the `id`. % terraform import aws_finspace_kx_environment.example n3ceo7wqxoxcti5tujqwzs ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_scaling_group.html.markdown b/website/docs/cdktf/python/r/finspace_kx_scaling_group.html.markdown index 37831b0155a6..1852f1aaaa02 100644 --- a/website/docs/cdktf/python/r/finspace_kx_scaling_group.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_scaling_group.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. ## Attribute Reference @@ -58,14 +59,14 @@ This resource exports the following attributes in addition to the arguments abov * `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. * `status` - The status of scaling group. - * `CREATING` – The scaling group creation is in progress. - * `CREATE_FAILED` – The scaling group creation has failed. - * `ACTIVE` – The scaling group is active. - * `UPDATING` – The scaling group is in the process of being updated. - * `UPDATE_FAILED` – The update action failed. - * `DELETING` – The scaling group is in the process of being deleted. - * `DELETE_FAILED` – The system failed to delete the scaling group. - * `DELETED` – The scaling group is successfully deleted. + * `CREATING` - The scaling group creation is in progress. + * `CREATE_FAILED` - The scaling group creation has failed. + * `ACTIVE` - The scaling group is active. + * `UPDATING` - The scaling group is in the process of being updated. + * `UPDATE_FAILED` - The update action failed. + * `DELETING` - The scaling group is in the process of being deleted. + * `DELETE_FAILED` - The system failed to delete the scaling group. + * `DELETED` - The scaling group is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). @@ -102,4 +103,4 @@ Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` % terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_user.html.markdown b/website/docs/cdktf/python/r/finspace_kx_user.html.markdown index 0ef85fe2e382..f34179a3e607 100644 --- a/website/docs/cdktf/python/r/finspace_kx_user.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_user.html.markdown @@ -78,6 +78,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -121,4 +122,4 @@ Using `terraform import`, import an AWS FinSpace Kx User using the `id` (environ % terraform import aws_finspace_kx_user.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-user ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/finspace_kx_volume.html.markdown b/website/docs/cdktf/python/r/finspace_kx_volume.html.markdown index 52c0666da605..35c10fb8a992 100644 --- a/website/docs/cdktf/python/r/finspace_kx_volume.html.markdown +++ b/website/docs/cdktf/python/r/finspace_kx_volume.html.markdown @@ -29,7 +29,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) FinspaceKxVolume(self, "example", - availability_zones=Token.as_list("use1-az2"), + availability_zones=["use1-az2"], az_mode="SINGLE", environment_id=Token.as_string(aws_finspace_kx_environment_example.id), name="my-tf-kx-volume", @@ -55,6 +55,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (`NAS_1`) file system volume. This parameter is required when `volume_type` is `NAS_1`. See [`nas1_configuration` Argument Reference](#nas1_configuration-argument-reference) below. * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume @@ -73,15 +74,15 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX volume. * `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `status` - The status of volume creation. - * `CREATING` – The volume creation is in progress. - * `CREATE_FAILED` – The volume creation has failed. - * `ACTIVE` – The volume is active. - * `UPDATING` – The volume is in the process of being updated. - * `UPDATE_FAILED` – The update action failed. - * `UPDATED` – The volume is successfully updated. - * `DELETING` – The volume is in the process of being deleted. - * `DELETE_FAILED` – The system failed to delete the volume. - * `DELETED` – The volume is successfully deleted. + * `CREATING` - The volume creation is in progress. + * `CREATE_FAILED` - The volume creation has failed. + * `ACTIVE` - The volume is active. + * `UPDATING` - The volume is in the process of being updated. + * `UPDATE_FAILED` - The update action failed. + * `UPDATED` - The volume is successfully updated. + * `DELETING` - The volume is in the process of being deleted. + * `DELETE_FAILED` - The system failed to delete the volume. + * `DELETED` - The volume is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. @@ -118,4 +119,4 @@ Using `terraform import`, import an AWS FinSpace Kx Volume using the `id` (envir % terraform import aws_finspace_kx_volume.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fis_experiment_template.html.markdown b/website/docs/cdktf/python/r/fis_experiment_template.html.markdown index 83bdd4585969..8bc22cbd236a 100644 --- a/website/docs/cdktf/python/r/fis_experiment_template.html.markdown +++ b/website/docs/cdktf/python/r/fis_experiment_template.html.markdown @@ -194,6 +194,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `experiment_options` - (Optional) The experiment options for the experiment template. See [experiment_options](#experiment_options) below for more details! * `tags` - (Optional) Key-value mapping of tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `target` - (Optional) Target of an action. See below. @@ -327,4 +328,4 @@ Using `terraform import`, import FIS Experiment Templates using the `id`. For ex % terraform import aws_fis_experiment_template.template EXT123AbCdEfGhIjK ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/flow_log.html.markdown b/website/docs/cdktf/python/r/flow_log.html.markdown index 3a34eb74a526..804cb270ae64 100644 --- a/website/docs/cdktf/python/r/flow_log.html.markdown +++ b/website/docs/cdktf/python/r/flow_log.html.markdown @@ -11,7 +11,7 @@ description: |- # Resource: aws_flow_log Provides a VPC/Subnet/ENI/Transit Gateway/Transit Gateway Attachment Flow Log to capture IP traffic for a specific network -interface, subnet, or VPC. Logs are sent to a CloudWatch Log Group, a S3 Bucket, or Amazon Kinesis Data Firehose +interface, subnet, or VPC. Logs are sent to a CloudWatch Log Group, a S3 Bucket, or Amazon Data Firehose ## Example Usage @@ -82,7 +82,7 @@ class MyConvertedCode(TerraformStack): aws_flow_log_example.override_logical_id("example") ``` -### Amazon Kinesis Data Firehose logging +### Amazon Data Firehose logging ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -228,26 +228,162 @@ class MyConvertedCode(TerraformStack): aws_flow_log_example.override_logical_id("example") ``` +### Cross-Account Amazon Data Firehose Logging + +The following example shows how to set up a flow log in one AWS account (source) that sends logs to an Amazon Data Firehose delivery stream in another AWS account (destination). +See the [AWS Documentation](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs-firehose.html). + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.flow_log import FlowLog +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy import IamRolePolicy +from imports.aws.kinesis_firehose_delivery_stream import KinesisFirehoseDeliveryStream +from imports.aws.provider import AwsProvider +from imports.aws.vpc import Vpc +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, destination, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + profile="admin-src" + ) + destination_account = AwsProvider(self, "aws_1", + alias="destination_account", + profile="admin-dst" + ) + dst = KinesisFirehoseDeliveryStream(self, "dst", + provider=destination_account, + tags={ + "LogDeliveryEnabled": "true" + }, + destination=destination, + name=name + ) + src = Vpc(self, "src") + dst_role_policy = DataAwsIamPolicyDocument(self, "dst_role_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["iam:CreateServiceLinkedRole", "firehose:TagDeliveryStream" + ], + effect="Allow", + resources=["*"] + ) + ] + ) + src_assume_role_policy = DataAwsIamPolicyDocument(self, "src_assume_role_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["delivery.logs.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + aws_iam_role_src = IamRole(self, "src_6", + assume_role_policy=Token.as_string(src_assume_role_policy.json), + name="tf-example-mySourceRole" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_src.override_logical_id("src") + dst_assume_role_policy = DataAwsIamPolicyDocument(self, "dst_assume_role_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=[Token.as_string(aws_iam_role_src.arn)], + type="AWS" + ) + ] + ) + ] + ) + aws_iam_role_dst = IamRole(self, "dst_8", + assume_role_policy=Token.as_string(dst_assume_role_policy.json), + name="AWSLogDeliveryFirehoseCrossAccountRole", + provider=destination_account + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_dst.override_logical_id("dst") + aws_iam_role_policy_dst = IamRolePolicy(self, "dst_9", + name="AWSLogDeliveryFirehoseCrossAccountRolePolicy", + policy=Token.as_string(dst_role_policy.json), + provider=destination_account, + role=Token.as_string(aws_iam_role_dst.name) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_dst.override_logical_id("dst") + src_role_policy = DataAwsIamPolicyDocument(self, "src_role_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["iam:PassRole"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=["delivery.logs.amazonaws.com"], + variable="iam:PassedToService" + ), DataAwsIamPolicyDocumentStatementCondition( + test="StringLike", + values=[src.arn], + variable="iam:AssociatedResourceARN" + ) + ], + effect="Allow", + resources=[Token.as_string(aws_iam_role_src.arn)] + ), DataAwsIamPolicyDocumentStatement( + actions=["logs:CreateLogDelivery", "logs:DeleteLogDelivery", "logs:ListLogDeliveries", "logs:GetLogDelivery" + ], + effect="Allow", + resources=["*"] + ), DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + resources=[Token.as_string(aws_iam_role_dst.arn)] + ) + ] + ) + aws_flow_log_src = FlowLog(self, "src_11", + deliver_cross_account_role=Token.as_string(aws_iam_role_dst.arn), + iam_role_arn=Token.as_string(aws_iam_role_src.arn), + log_destination=dst.arn, + log_destination_type="kinesis-data-firehose", + traffic_type="ALL", + vpc_id=src.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_flow_log_src.override_logical_id("src") + IamRolePolicy(self, "src_policy", + name="tf-example-mySourceRolePolicy", + policy=Token.as_string(src_role_policy.json), + role=Token.as_string(aws_iam_role_src.name) + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `traffic_type` - (Required) The type of traffic to capture. Valid values: `ACCEPT`,`REJECT`, `ALL`. -* `deliver_cross_account_role` - (Optional) ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. -* `eni_id` - (Optional) Elastic Network Interface ID to attach to -* `iam_role_arn` - (Optional) The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group -* `log_destination_type` - (Optional) The type of the logging destination. Valid values: `cloud-watch-logs`, `s3`, `kinesis-data-firehose`. Default: `cloud-watch-logs`. -* `log_destination` - (Optional) The ARN of the logging destination. Either `log_destination` or `log_group_name` must be set. -* `log_group_name` - (Optional) **Deprecated:** Use `log_destination` instead. The name of the CloudWatch log group. Either `log_group_name` or `log_destination` must be set. -* `subnet_id` - (Optional) Subnet ID to attach to -* `transit_gateway_id` - (Optional) Transit Gateway ID to attach to -* `transit_gateway_attachment_id` - (Optional) Transit Gateway Attachment ID to attach to -* `vpc_id` - (Optional) VPC ID to attach to +* `deliver_cross_account_role` - (Optional) ARN of the IAM role in the destination account used for cross-account delivery of flow logs. +* `eni_id` - (Optional) Elastic Network Interface ID to attach to. +* `iam_role_arn` - (Optional) ARN of the IAM role used to post flow logs. Corresponds to `DeliverLogsPermissionArn` in the [AWS API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFlowLogs.html). +* `log_destination_type` - (Optional) Logging destination type. Valid values: `cloud-watch-logs`, `s3`, `kinesis-data-firehose`. Default: `cloud-watch-logs`. +* `log_destination` - (Optional) ARN of the logging destination. +* `subnet_id` - (Optional) Subnet ID to attach to. +* `transit_gateway_id` - (Optional) Transit Gateway ID to attach to. +* `transit_gateway_attachment_id` - (Optional) Transit Gateway Attachment ID to attach to. +* `vpc_id` - (Optional) VPC ID to attach to. * `log_format` - (Optional) The fields to include in the flow log record. Accepted format example: `"$${interface-id} $${srcaddr} $${dstaddr} $${srcport} $${dstport}"`. -* `max_aggregation_interval` - (Optional) The maximum interval of time - during which a flow of packets is captured and aggregated into a flow - log record. Valid Values: `60` seconds (1 minute) or `600` seconds (10 - minutes). Default: `600`. When `transit_gateway_id` or `transit_gateway_attachment_id` is specified, `max_aggregation_interval` *must* be 60 seconds (1 minute). +* `max_aggregation_interval` - (Optional) The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. + Valid Values: `60` seconds (1 minute) or `600` seconds (10 minutes). Default: `600`. + When `transit_gateway_id` or `transit_gateway_attachment_id` is specified, `max_aggregation_interval` *must* be 60 seconds (1 minute). * `destination_options` - (Optional) Describes the destination options for a flow log. More details below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -257,7 +393,7 @@ This resource supports the following arguments: Describes the destination options for a flow log. -* `file_format` - (Optional) The format for the flow log. Default value: `plain-text`. Valid values: `plain-text`, `parquet`. +* `file_format` - (Optional) File format for the flow log. Default value: `plain-text`. Valid values: `plain-text`, `parquet`. * `hive_compatible_partitions` - (Optional) Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3. Default value: `false`. * `per_hour_partition` - (Optional) Indicates whether to partition the flow log per hour. This reduces the cost and response time for queries. Default value: `false`. @@ -265,8 +401,8 @@ Describes the destination options for a flow log. This resource exports the following attributes in addition to the arguments above: -* `id` - The Flow Log ID -* `arn` - The ARN of the Flow Log. +* `id` - Flow Log ID. +* `arn` - ARN of the Flow Log. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -294,4 +430,4 @@ Using `terraform import`, import Flow Logs using the `id`. For example: % terraform import aws_flow_log.test_flow_log fl-1a2b3c4d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fms_policy.html.markdown b/website/docs/cdktf/python/r/fms_policy.html.markdown index aabe25680cc4..8449c72aec7e 100644 --- a/website/docs/cdktf/python/r/fms_policy.html.markdown +++ b/website/docs/cdktf/python/r/fms_policy.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The friendly name of the AWS Firewall Manager Policy. * `delete_all_policy_resources` - (Optional) If true, the request will also perform a clean-up process. Defaults to `true`. More information can be found here [AWS Firewall Manager delete policy](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_DeletePolicy.html) * `delete_unused_fm_managed_resources` - (Optional) If true, Firewall Manager will automatically remove protections from resources that leave the policy scope. Defaults to `false`. More information can be found here [AWS Firewall Manager policy contents](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html) @@ -76,6 +77,7 @@ This resource supports the following arguments: * `exclude_resource_tags` - (Required, Forces new resource) A boolean value, if true the tags that are specified in the `resource_tags` are not protected by this policy. If set to false and resource_tags are populated, resources that contain tags will be protected by this policy. * `include_map` - (Optional) A map of lists of accounts and OU's to include in the policy. See the [`include_map`](#include_map-configuration-block) block. * `remediation_enabled` - (Required) A boolean value, indicates if the policy should automatically applied to resources that already exist in the account. +* `resource_tag_logical_operator` - (Optional) Controls how multiple resource tags are combined: with AND, so that a resource must have all tags to be included or excluded, or OR, so that a resource must have at least one tag. The valid values are `AND` and `OR`. * `resource_tags` - (Optional) A map of resource tags, that if present will filter protections on resources based on the exclude_resource_tags. * `resource_type` - (Optional) A resource type to protect. Conflicts with `resource_type_list`. See the [FMS API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html#fms-Type-Policy-ResourceType) for more information about supported values. * `resource_type_list` - (Optional) A list of resource types to protect. Conflicts with `resource_type`. See the [FMS API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html#fms-Type-Policy-ResourceType) for more information about supported values. Lists with only one element are not supported, instead use `resource_type`. @@ -190,4 +192,4 @@ Using `terraform import`, import Firewall Manager policies using the policy ID. % terraform import aws_fms_policy.example 5be49585-a7e3-4c49-dde1-a179fe4a619a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fms_resource_set.html.markdown b/website/docs/cdktf/python/r/fms_resource_set.html.markdown index c053a616a7f8..40aad5a5d0fa 100644 --- a/website/docs/cdktf/python/r/fms_resource_set.html.markdown +++ b/website/docs/cdktf/python/r/fms_resource_set.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_set` - (Required) Details about the resource set to be created or updated. See [`resource_set` Attribute Reference](#resource_set-attribute-reference) below. ### `resource_set` Attribute Reference @@ -91,4 +92,4 @@ Using `terraform import`, import FMS (Firewall Manager) Resource Set using the ` % terraform import aws_fms_resource_set.example resource_set-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_backup.html.markdown b/website/docs/cdktf/python/r/fsx_backup.html.markdown index 5fb30951d635..2ff293fe6440 100644 --- a/website/docs/cdktf/python/r/fsx_backup.html.markdown +++ b/website/docs/cdktf/python/r/fsx_backup.html.markdown @@ -132,12 +132,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -Note - Only file_system_id or volume_id can be specified. file_system_id is used for Lustre and Windows, volume_id is used for ONTAP. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `file_system_id` - (Optional) The ID of the file system to back up. Required if backing up Lustre or Windows file systems. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. If you have set `copy_tags_to_backups` to true, and you specify one or more tags, no existing file system tags are copied from the file system to the backup. * `volume_id` - (Optional) The ID of the volume to back up. Required if backing up a ONTAP Volume. +Note - One of `file_system_id` or `volume_id` can be specified. `file_system_id` is used for Lustre and Windows, `volume_id` is used for ONTAP. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -181,4 +182,4 @@ Using `terraform import`, import FSx Backups using the `id`. For example: % terraform import aws_fsx_backup.example fs-543ab12b1ca672f33 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_data_repository_association.html.markdown b/website/docs/cdktf/python/r/fsx_data_repository_association.html.markdown index b9b3d637b695..b409d79be6fb 100644 --- a/website/docs/cdktf/python/r/fsx_data_repository_association.html.markdown +++ b/website/docs/cdktf/python/r/fsx_data_repository_association.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `batch_import_meta_data_on_create` - (Optional) Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`. * `data_repository_path` - (Required) The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system. * `file_system_id` - (Required) The ID of the Amazon FSx file system to on which to create a data repository association. @@ -130,4 +131,4 @@ Using `terraform import`, import FSx Data Repository Associations using the `id` % terraform import aws_fsx_data_repository_association.example dra-0b1cfaeca11088b10 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_file_cache.html.markdown b/website/docs/cdktf/python/r/fsx_file_cache.html.markdown index d7631484f04f..3a14dfd0462e 100644 --- a/website/docs/cdktf/python/r/fsx_file_cache.html.markdown +++ b/website/docs/cdktf/python/r/fsx_file_cache.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `copy_tags_to_data_repository_associations` - A boolean flag indicating whether tags for the cache should be copied to data repository associations. This value defaults to false. * `data_repository_association` - See the [`data_repository_association` configuration](#data-repository-association-arguments) block. Max of 8. A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA configurations must meet the following requirements: 1) All configurations on the list must be of the same data repository type, either all S3 or all NFS. A cache can't link to different data repository types at the same time. 2) An NFS DRA must link to an NFS file system that supports the NFSv3 protocol. DRA automatic import and automatic export is not supported. @@ -151,4 +152,4 @@ Using `terraform import`, import Amazon File Cache cache using the resource `id` % terraform import aws_fsx_file_cache.example fc-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_lustre_file_system.html.markdown b/website/docs/cdktf/python/r/fsx_lustre_file_system.html.markdown index fd7eb8368d2f..f60957a35d15 100644 --- a/website/docs/cdktf/python/r/fsx_lustre_file_system.html.markdown +++ b/website/docs/cdktf/python/r/fsx_lustre_file_system.html.markdown @@ -37,12 +37,14 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_import_policy` - (Optional) How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see [Auto Import Data Repo](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) for more details. Only supported on `PERSISTENT_1` deployment types. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. * `backup_id` - (Optional) The ID of the source backup to create the filesystem from. @@ -174,4 +176,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown index 4c6bc8259f77..0f06a9361e29 100644 --- a/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown @@ -106,6 +106,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values are between `1024` and `524288` for `MULTI_AZ_2`. Valid values between `1024` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. For `SINGLE_AZ_2`, the `1048576` (1PB) maximum is only supported when using 2 or more ha_pairs, the maximum is `524288` (512TB) when using 1 ha_pair. * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. * `preferred_subnet_id` - (Required) The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). @@ -215,4 +216,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_ontap_storage_virtual_machine.html.markdown b/website/docs/cdktf/python/r/fsx_ontap_storage_virtual_machine.html.markdown index 9e9bfa3cff71..60eb5eca080f 100644 --- a/website/docs/cdktf/python/r/fsx_ontap_storage_virtual_machine.html.markdown +++ b/website/docs/cdktf/python/r/fsx_ontap_storage_virtual_machine.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active_directory_configuration` - (Optional) Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. * `file_system_id` - (Required) The ID of the Amazon FSx ONTAP File System that this SVM will be created on. * `name` - (Required) The name of the SVM. You can use a maximum of 47 alphanumeric characters, plus the underscore (_) special character. @@ -176,4 +177,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_ontap_volume.html.markdown b/website/docs/cdktf/python/r/fsx_ontap_volume.html.markdown index d6e1bcd4b8e8..e6860b43fadb 100644 --- a/website/docs/cdktf/python/r/fsx_ontap_volume.html.markdown +++ b/website/docs/cdktf/python/r/fsx_ontap_volume.html.markdown @@ -76,6 +76,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `aggregate_configuration` - (Optional) The Aggregate configuration only applies to `FLEXGROUP` volumes. See [`aggregate_configuration` Block] for details. * `bypass_snaplock_enterprise_retention` - (Optional) Setting this to `true` allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. @@ -200,4 +201,4 @@ Using `terraform import`, import FSx ONTAP volume using the `id`. For example: % terraform import aws_fsx_ontap_volume.example fsvol-12345678abcdef123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_openzfs_file_system.html.markdown b/website/docs/cdktf/python/r/fsx_openzfs_file_system.html.markdown index 3c15e2b10d4c..7b684f18a710 100644 --- a/website/docs/cdktf/python/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/cdktf/python/r/fsx_openzfs_file_system.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `backup_id` - (Optional) The ID of the source backup to create the filesystem from. * `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. @@ -62,6 +63,7 @@ The following arguments are optional: * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `storage_type` - (Optional) The filesystem storage type. Only `SSD` is supported. +* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the filesystem. Maximum number of items defined by [FSx for OpenZFS Resource quota](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/limits.html#limits-openzfs-resources-file-system). See [`user_and_group_quotas` Block](#user_and_group_quotas-block) Below. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. @@ -177,4 +179,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_openzfs_snapshot.html.markdown b/website/docs/cdktf/python/r/fsx_openzfs_snapshot.html.markdown index 8fef768fe059..eb191f186808 100644 --- a/website/docs/cdktf/python/r/fsx_openzfs_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/fsx_openzfs_snapshot.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Snapshot. You can use a maximum of 203 alphanumeric characters plus either _ or - or : or . for the name. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. If you have set `copy_tags_to_backups` to true, and you specify one or more tags, no existing file system tags are copied from the file system to the backup. * `volume_id` - (Optional) The ID of the volume to snapshot. This can be the root volume or a child volume. @@ -129,4 +130,4 @@ Using `terraform import`, import FSx OpenZFS snapshot using the `id`. For exampl % terraform import aws_fsx_openzfs_snapshot.example fs-543ab12b1ca672f33 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_openzfs_volume.html.markdown b/website/docs/cdktf/python/r/fsx_openzfs_volume.html.markdown index 250b76b4815a..e5aca16e02dc 100644 --- a/website/docs/cdktf/python/r/fsx_openzfs_volume.html.markdown +++ b/website/docs/cdktf/python/r/fsx_openzfs_volume.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. * `parent_volume_id` - (Required) The volume id of volume that will be the parent volume for the volume being created, this could be the root volume created from the `aws_fsx_openzfs_file_system` resource with the `root_volume_id` or the `id` property of another `aws_fsx_openzfs_volume`. * `copy_tags_to_snapshots` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. @@ -48,7 +49,7 @@ This resource supports the following arguments: * `origin_snapshot` - (Optional) Specifies the configuration to use when creating the OpenZFS volume. See [`origin_snapshot` Block](#origin_snapshot-block) below for details. * `storage_capacity_quota_gib` - (Optional) The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. * `storage_capacity_reservation_gib` - (Optional) The amount of storage in gibibytes (GiB) to reserve from the parent volume. -* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [`user_and_group_quotas` Block](#user_and_group_quotas-block) Below. +* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum number of items defined by [FSx for OpenZFS Resource quota](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/limits.html#limits-openzfs-resources-file-system). See [`user_and_group_quotas` Block](#user_and_group_quotas-block) Below. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `nfs_exports` Block @@ -120,4 +121,4 @@ Using `terraform import`, import FSx Volumes using the `id`. For example: % terraform import aws_fsx_openzfs_volume.example fsvol-543ab12b1ca672f33 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_s3_access_point_attachment.html.markdown b/website/docs/cdktf/python/r/fsx_s3_access_point_attachment.html.markdown new file mode 100644 index 000000000000..679b6d63e710 --- /dev/null +++ b/website/docs/cdktf/python/r/fsx_s3_access_point_attachment.html.markdown @@ -0,0 +1,135 @@ +--- +subcategory: "FSx" +layout: "aws" +page_title: "AWS: aws_fsx_s3_access_point_attachment" +description: |- + Manages an Amazon FSx S3 Access Point attachment. +--- + + + +# Resource: aws_fsx_s3_access_point_attachment + +Manages an Amazon FSx S3 Access Point attachment. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.fsx_s3_access_point_attachment import FsxS3AccessPointAttachment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + FsxS3AccessPointAttachment(self, "example", + name="example-attachment", + openzfs_configuration=[FsxS3AccessPointAttachmentOpenzfsConfiguration( + file_system_identity=[FsxS3AccessPointAttachmentOpenzfsConfigurationFileSystemIdentity( + posix_user=[FsxS3AccessPointAttachmentOpenzfsConfigurationFileSystemIdentityPosixUser( + gid=1001, + uid=1001 + ) + ], + type="POSIX" + ) + ], + volume_id=Token.as_string(aws_fsx_openzfs_volume_example.id) + ) + ], + type="OPENZFS" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the S3 access point. +* `openzfs_configuration` - (Required) Configuration to use when creating and attaching an S3 access point to an FSx for OpenZFS volume. See [`openzfs_configuration` Block](#openzfs_configuration-block) for details. +* `type` - (Required) Type of S3 access point. Valid values: `OpenZFS`. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `s3_access_point` - (Optional) S3 access point configuration. See [`s3_access_point` Block](#s3_access_point-block) for details. + +### `openzfs_configuration` Block + +The `openzfs_configuration` configuration block supports the following arguments: + +* `file_system_identity` - (Required) File system user identity to use for authorizing file read and write requests that are made using the S3 access point. See [`file_system_identity` Block](#file_system_identity-block) for details. +* `volume_id` - (Required) ID of the FSx for OpenZFS volume to which the S3 access point is attached. + +### `file_system_identity` Block + +The `file_system_identity` configuration block supports the following arguments: + +* `posix_user` - (Required) UID and GIDs of the file system POSIX user. See [`posix_user` Block](#posix_user-block) for details. +* `type` - (Required) FSx for OpenZFS user identity type. Valid values: `POSIX`. + +### `posix_user` Block + +The `posix_user` configuration block supports the following arguments: + +* `gid` - (Required) GID of the file system user. +* `secondary_gids` - (Optional) List of secondary GIDs for the file system user.. +* `uid` - (Required) UID of the file system user. + +### `s3_access_point` Block + +The `s3_access_point` configuration block supports the following arguments: + +* `policy` - (Required) Access policy associated with the S3 access point configuration. +* `vpc_configuration` - (Optional) Amazon S3 restricts access to the S3 access point to requests made from the specified VPC. See [`vpc_configuration` Block](#vpc_configuration-block) for details. + +### `vpc_configuration` Block + +The `vpc_configuration` configuration block supports the following arguments: + +* `vpc_id` - (Required) VPC ID. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `s3_access_point_alias` - S3 access point's alias. +* `s3_access_point_arn` - S3 access point's ARN. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) +* `delete` - (Default `15m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import FSx S3 Access Point attachments using the `name`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.fsx_s3_access_point_attachment import FsxS3AccessPointAttachment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + FsxS3AccessPointAttachment.generate_config_for_import(self, "example", "example-attachment") +``` + +Using `terraform import`, import FSx S3 Access Point attachments using the `name`. For example: + +```console +% terraform import aws_fsx_s3_access_point_attachment.example example-attachment +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_windows_file_system.html.markdown b/website/docs/cdktf/python/r/fsx_windows_file_system.html.markdown index 77a593bf09cd..b3a247c8a27a 100644 --- a/website/docs/cdktf/python/r/fsx_windows_file_system.html.markdown +++ b/website/docs/cdktf/python/r/fsx_windows_file_system.html.markdown @@ -80,6 +80,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active_directory_id` - (Optional) The ID for an existing Microsoft Active Directory instance that the file system should join when it's created. Cannot be specified with `self_managed_active_directory`. * `aliases` - (Optional) An array DNS alias names that you want to associate with the Amazon FSx file system. For more information, see [Working with DNS Aliases](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) * `audit_log_configuration` - (Optional) The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See [`audit_log_configuration` Block](#audit_log_configuration-block) for details. @@ -198,4 +199,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/gamelift_alias.html.markdown b/website/docs/cdktf/python/r/gamelift_alias.html.markdown index ef4677127174..0853b1b4a867 100644 --- a/website/docs/cdktf/python/r/gamelift_alias.html.markdown +++ b/website/docs/cdktf/python/r/gamelift_alias.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the alias. * `description` - (Optional) Description of the alias. * `routing_strategy` - (Required) Specifies the fleet and/or routing type to use for the alias. @@ -86,4 +87,4 @@ Using `terraform import`, import GameLift Aliases using the ID. For example: % terraform import aws_gamelift_alias.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/gamelift_build.html.markdown b/website/docs/cdktf/python/r/gamelift_build.html.markdown index dbd019d5a1c7..b307aa14cfcf 100644 --- a/website/docs/cdktf/python/r/gamelift_build.html.markdown +++ b/website/docs/cdktf/python/r/gamelift_build.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the build * `operating_system` - (Required) Operating system that the game server binaries are built to run on. Valid values: `WINDOWS_2012`, `AMAZON_LINUX`, `AMAZON_LINUX_2`, `WINDOWS_2016`, `AMAZON_LINUX_2023`. * `storage_location` - (Required) Information indicating where your game build files are stored. See below. @@ -89,4 +90,4 @@ Using `terraform import`, import GameLift Builds using the ID. For example: % terraform import aws_gamelift_build.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/gamelift_fleet.html.markdown b/website/docs/cdktf/python/r/gamelift_fleet.html.markdown index ceae405a8db6..98493fad6f88 100644 --- a/website/docs/cdktf/python/r/gamelift_fleet.html.markdown +++ b/website/docs/cdktf/python/r/gamelift_fleet.html.markdown @@ -34,7 +34,8 @@ resource "aws_gamelift_fleet" "example" { This resource supports the following arguments: -* `build_id` - (Optional) ID of the GameLift Build to be deployed on the fleet. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `build_id` - (Optional) ID of the GameLift Build to be deployed on the fleet. Conflicts with `script_id`. * `certificate_configuration` - (Optional) Prompts GameLift to generate a TLS/SSL certificate for the fleet. See [certificate_configuration](#certificate_configuration). * `description` - (Optional) Human-readable description of the fleet. * `ec2_inbound_permission` - (Optional) Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. See below. @@ -46,7 +47,7 @@ This resource supports the following arguments: * `new_game_session_protection_policy` - (Optional) Game session protection policy to apply to all instances in this fleetE.g., `FullProtection`. Defaults to `NoProtection`. * `resource_creation_limit_policy` - (Optional) Policy that limits the number of game sessions an individual player can create over a span of time for this fleet. See below. * `runtime_configuration` - (Optional) Instructions for launching server processes on each instance in the fleet. See below. -* `script_id` - (Optional) ID of the GameLift Script to be deployed on the fleet. +* `script_id` - (Optional) ID of the GameLift Script to be deployed on the fleet. Conflicts with `build_id`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Nested Fields @@ -122,4 +123,4 @@ Using `terraform import`, import GameLift Fleets using the ID. For example: % terraform import aws_gamelift_fleet.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/gamelift_game_server_group.html.markdown b/website/docs/cdktf/python/r/gamelift_game_server_group.html.markdown index 923a5a2df0c8..326792c8aff4 100644 --- a/website/docs/cdktf/python/r/gamelift_game_server_group.html.markdown +++ b/website/docs/cdktf/python/r/gamelift_game_server_group.html.markdown @@ -138,6 +138,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `balancing_strategy` - (Optional) Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances. Valid values: `SPOT_ONLY`, `SPOT_PREFERRED`, `ON_DEMAND_ONLY`. Defaults to `SPOT_PREFERRED`. * `game_server_group_name` - (Required) Name of the game server group. @@ -236,4 +237,4 @@ Using `terraform import`, import GameLift Game Server Group using the `name`. Fo % terraform import aws_gamelift_game_server_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/gamelift_game_session_queue.html.markdown b/website/docs/cdktf/python/r/gamelift_game_session_queue.html.markdown index b640115c75cc..adfa876b0d59 100644 --- a/website/docs/cdktf/python/r/gamelift_game_session_queue.html.markdown +++ b/website/docs/cdktf/python/r/gamelift_game_session_queue.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the session queue. * `timeout_in_seconds` - (Required) Maximum time a game session request can remain in the queue. * `custom_event_data` - (Optional) Information to be added to all events that are related to this game session queue. @@ -92,4 +93,4 @@ Using `terraform import`, import GameLift Game Session Queues using their `name` % terraform import aws_gamelift_game_session_queue.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/gamelift_script.html.markdown b/website/docs/cdktf/python/r/gamelift_script.html.markdown index 446bf4ea4d84..1101e3c33a84 100644 --- a/website/docs/cdktf/python/r/gamelift_script.html.markdown +++ b/website/docs/cdktf/python/r/gamelift_script.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the script * `storage_location` - (Optional) Information indicating where your game script files are stored. See below. * `version` - (Optional) Version that is associated with this script. @@ -88,4 +89,4 @@ Using `terraform import`, import GameLift Scripts using the ID. For example: % terraform import aws_gamelift_script.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glacier_vault.html.markdown b/website/docs/cdktf/python/r/glacier_vault.html.markdown index 487e87bd600e..01b798c167bc 100644 --- a/website/docs/cdktf/python/r/glacier_vault.html.markdown +++ b/website/docs/cdktf/python/r/glacier_vault.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Vault. Names can be between 1 and 255 characters long and the valid characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period). * `access_policy` - (Optional) The policy document. This is a JSON formatted string. The heredoc syntax or `file` function is helpful here. Use the [Glacier Developer Guide](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html) for more information on Glacier Vault Policy @@ -111,4 +112,4 @@ Using `terraform import`, import Glacier Vaults using the `name`. For example: % terraform import aws_glacier_vault.archive my_archive ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glacier_vault_lock.html.markdown b/website/docs/cdktf/python/r/glacier_vault_lock.html.markdown index dcbbdddb9fd6..7e93db636399 100644 --- a/website/docs/cdktf/python/r/glacier_vault_lock.html.markdown +++ b/website/docs/cdktf/python/r/glacier_vault_lock.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `complete_lock` - (Required) Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the Terraform resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time. * `policy` - (Required) JSON string containing the IAM policy to apply as the Glacier Vault Lock policy. * `vault_name` - (Required) The name of the Glacier Vault. @@ -125,4 +126,4 @@ Using `terraform import`, import Glacier Vault Locks using the Glacier Vault nam % terraform import aws_glacier_vault_lock.example example-vault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_accelerator.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_accelerator.html.markdown index 54338a21b07b..d4d524d5809f 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_accelerator.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_accelerator.html.markdown @@ -86,6 +86,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_accelerator.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_accelerator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator accelerator. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator accelerators using the `arn`. For example: ```python @@ -109,4 +130,4 @@ Using `terraform import`, import Global Accelerator accelerators using the `arn` % terraform import aws_globalaccelerator_accelerator.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown index 18222a027531..67da7703129b 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown @@ -93,7 +93,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator Cross Account Attachment using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_cross_account_attachment.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:attachment/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_cross_account_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator cross-account attachment. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator Cross Account Attachment using the `arn`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -110,10 +131,10 @@ class MyConvertedCode(TerraformStack): GlobalacceleratorCrossAccountAttachment.generate_config_for_import(self, "example", "arn:aws:globalaccelerator::012345678910:attachment/01234567-abcd-8910-efgh-123456789012") ``` -Using `terraform import`, import Global Accelerator Cross Account Attachment using the `example_id_arg`. For example: +Using `terraform import`, import Global Accelerator Cross Account Attachment using the `arn`. For example: ```console % terraform import aws_globalaccelerator_cross_account_attachment.example arn:aws:globalaccelerator::012345678910:attachment/01234567-abcd-8910-efgh-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_custom_routing_accelerator.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_custom_routing_accelerator.html.markdown index 1a7c33b3dfd6..0144c0c3ff3b 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_custom_routing_accelerator.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_custom_routing_accelerator.html.markdown @@ -85,6 +85,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_accelerator.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_custom_routing_accelerator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing accelerator. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing accelerators using the `arn`. For example: ```python @@ -108,4 +129,4 @@ Using `terraform import`, import Global Accelerator custom routing accelerators % terraform import aws_globalaccelerator_custom_routing_accelerator.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_custom_routing_endpoint_group.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_custom_routing_endpoint_group.html.markdown index 6c2b8d59df9c..45f28decb183 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_custom_routing_endpoint_group.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_custom_routing_endpoint_group.html.markdown @@ -77,6 +77,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_endpoint_group.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz/endpoint-group/098765zyxwvu" + } +} + +resource "aws_globalaccelerator_custom_routing_endpoint_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing endpoint group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing endpoint groups using the `id`. For example: ```python @@ -100,4 +121,4 @@ Using `terraform import`, import Global Accelerator custom routing endpoint grou % terraform import aws_globalaccelerator_custom_routing_endpoint_group.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxx/endpoint-group/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_custom_routing_listener.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_custom_routing_listener.html.markdown index 0f183ebc38da..aeae1e86f2fa 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_custom_routing_listener.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_custom_routing_listener.html.markdown @@ -78,6 +78,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_listener.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz" + } +} + +resource "aws_globalaccelerator_custom_routing_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing listeners using the `id`. For example: ```python @@ -101,4 +122,4 @@ Using `terraform import`, import Global Accelerator custom routing listeners usi % terraform import aws_globalaccelerator_custom_routing_listener.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_endpoint_group.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_endpoint_group.html.markdown index bd8bb55845d2..0bf2a36f8a50 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_endpoint_group.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_endpoint_group.html.markdown @@ -82,6 +82,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_endpoint_group.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz/endpoint-group/098765zyxwvu" + } +} + +resource "aws_globalaccelerator_endpoint_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator endpoint group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator endpoint groups using the `id`. For example: ```python @@ -105,4 +126,4 @@ Using `terraform import`, import Global Accelerator endpoint groups using the `i % terraform import aws_globalaccelerator_endpoint_group.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxx/endpoint-group/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_listener.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_listener.html.markdown index 3a021cc8f257..44f552118fb2 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_listener.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_listener.html.markdown @@ -82,6 +82,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_listener.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz" + } +} + +resource "aws_globalaccelerator_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator listeners using the `id`. For example: ```python @@ -105,4 +126,4 @@ Using `terraform import`, import Global Accelerator listeners using the `id`. Fo % terraform import aws_globalaccelerator_listener.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_catalog_database.html.markdown b/website/docs/cdktf/python/r/glue_catalog_database.html.markdown index 056825dd66eb..989ce227972c 100644 --- a/website/docs/cdktf/python/r/glue_catalog_database.html.markdown +++ b/website/docs/cdktf/python/r/glue_catalog_database.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) ID of the Glue Catalog to create the database in. If omitted, this defaults to the AWS Account ID. * `create_table_default_permission` - (Optional) Creates a set of default permissions on the table for principals. See [`create_table_default_permission`](#create_table_default_permission) below. * `description` - (Optional) Description of the database. @@ -124,4 +125,4 @@ Using `terraform import`, import Glue Catalog Databases using the `catalog_id:na % terraform import aws_glue_catalog_database.database 123456789012:my_database ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_catalog_table.html.markdown b/website/docs/cdktf/python/r/glue_catalog_table.html.markdown index 4cdeaa0f1726..a2e6559b29f2 100644 --- a/website/docs/cdktf/python/r/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/python/r/glue_catalog_table.html.markdown @@ -100,6 +100,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name. * `description` - (Optional) Description of the table. * `owner` - (Optional) Owner of the table. @@ -142,6 +143,7 @@ To add an index to an existing table, see the [`glue_partition_index` resource]( * `comment` - (Optional) Free-form text comment. * `name` - (Required) Name of the Partition Key. +* `parameters` - (Optional) Map of key-value pairs. * `type` - (Optional) Datatype of data in the Partition Key. ### storage_descriptor @@ -236,4 +238,4 @@ Using `terraform import`, import Glue Tables using the catalog ID (usually AWS a % terraform import aws_glue_catalog_table.MyTable 123456789012:MyDatabase:MyTable ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_catalog_table_optimizer.html.markdown b/website/docs/cdktf/python/r/glue_catalog_table_optimizer.html.markdown index 3bfe1e1a2599..c0873fc599c1 100644 --- a/website/docs/cdktf/python/r/glue_catalog_table_optimizer.html.markdown +++ b/website/docs/cdktf/python/r/glue_catalog_table_optimizer.html.markdown @@ -59,14 +59,14 @@ class MyConvertedCode(TerraformStack): catalog_id="123456789012", configuration=[GlueCatalogTableOptimizerConfiguration( enabled=True, - retention_configuration=[{ - "iceberg_configuration": [{ - "clean_expired_files": True, - "number_of_snapshots_to_retain": 3, - "snapshot_retention_period_in_days": 7 - } + retention_configuration=[GlueCatalogTableOptimizerConfigurationRetentionConfiguration( + iceberg_configuration=[GlueCatalogTableOptimizerConfigurationRetentionConfigurationIcebergConfiguration( + clean_expired_files=True, + number_of_snapshots_to_retain=3, + snapshot_retention_period_in_days=7 + ) ] - } + ) ], role_arn="arn:aws:iam::123456789012:role/example-role" ) @@ -95,13 +95,13 @@ class MyConvertedCode(TerraformStack): catalog_id="123456789012", configuration=[GlueCatalogTableOptimizerConfiguration( enabled=True, - orphan_file_deletion_configuration=[{ - "iceberg_configuration": [{ - "location": "s3://example-bucket/example_table/", - "orphan_file_retention_period_in_days": 7 - } + orphan_file_deletion_configuration=[GlueCatalogTableOptimizerConfigurationOrphanFileDeletionConfiguration( + iceberg_configuration=[GlueCatalogTableOptimizerConfigurationOrphanFileDeletionConfigurationIcebergConfiguration( + location="s3://example-bucket/example_table/", + orphan_file_retention_period_in_days=7 + ) ] - } + ) ], role_arn="arn:aws:iam::123456789012:role/example-role" ) @@ -114,8 +114,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Required) The Catalog ID of the table. * `configuration` - (Required) A configuration block that defines the table optimizer settings. See [Configuration](#configuration) for additional details. * `database_name` - (Required) The name of the database in the catalog in which the table resides. @@ -132,15 +133,17 @@ The following arguments are required: ### Orphan File Deletion Configuration * `iceberg_configuration` (Optional) - The configuration for an Iceberg orphan file deletion optimizer. - * `orphan_file_retention_period_in_days` (Optional) - The number of days that orphan files should be retained before file deletion. Defaults to `3`. * `location` (Optional) - Specifies a directory in which to look for files. You may choose a sub-directory rather than the top-level table location. Defaults to the table's location. - + * `orphan_file_retention_period_in_days` (Optional) - The number of days that orphan files should be retained before file deletion. Defaults to `3`. + * `run_rate_in_hours` (Optional) - interval in hours between orphan file deletion job runs. Defaults to `24`. + ### Retention Configuration * `iceberg_configuration` (Optional) - The configuration for an Iceberg snapshot retention optimizer. - * `snapshot_retention_period_in_days` (Optional) - The number of days to retain the Iceberg snapshots. Defaults to `5`, or the corresponding Iceberg table configuration field if it exists. - * `number_of_snapshots_to_retain` (Optional) - The number of Iceberg snapshots to retain within the retention period. Defaults to `1` or the corresponding Iceberg table configuration field if it exists. * `clean_expired_files` (Optional) - If set to `false`, snapshots are only deleted from table metadata, and the underlying data and metadata files are not deleted. Defaults to `false`. + * `number_of_snapshots_to_retain` (Optional) - The number of Iceberg snapshots to retain within the retention period. Defaults to `1` or the corresponding Iceberg table configuration field if it exists. + * `run_rate_in_hours` (Optional) - Interval in hours between retention job runs. Defaults to `24`. + * `snapshot_retention_period_in_days` (Optional) - The number of days to retain the Iceberg snapshots. Defaults to `5`, or the corresponding Iceberg table configuration field if it exists. ## Attribute Reference @@ -171,4 +174,4 @@ Using `terraform import`, import Glue Catalog Table Optimizer using the `catalog % terraform import aws_glue_catalog_table_optimizer.example 123456789012,example_database,example_table,compaction ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_classifier.html.markdown b/website/docs/cdktf/python/r/glue_classifier.html.markdown index 28f95baa4061..d95dc8e7588f 100644 --- a/website/docs/cdktf/python/r/glue_classifier.html.markdown +++ b/website/docs/cdktf/python/r/glue_classifier.html.markdown @@ -115,11 +115,12 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `csv_classifier` - (Optional) A classifier for CSV content. Defined below. -* `grok_classifier` – (Optional) A classifier that uses grok patterns. Defined below. -* `json_classifier` – (Optional) A classifier for JSON content. Defined below. -* `name` – (Required) The name of the classifier. -* `xml_classifier` – (Optional) A classifier for XML content. Defined below. +* `grok_classifier` - (Optional) A classifier that uses grok patterns. Defined below. +* `json_classifier` - (Optional) A classifier for JSON content. Defined below. +* `name` - (Required) The name of the classifier. +* `xml_classifier` - (Optional) A classifier for XML content. Defined below. ### csv_classifier @@ -131,7 +132,7 @@ This resource supports the following arguments: * `disable_value_trimming` - (Optional) Specifies whether to trim column values. * `header` - (Optional) A list of strings representing column names. * `quote_symbol` - (Optional) A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter. -* `serde` – (Optional) The SerDe for processing CSV. Valid values are `OpenCSVSerDe`, `LazySimpleSerDe`, `None`. +* `serde` - (Optional) The SerDe for processing CSV. Valid values are `OpenCSVSerDe`, `LazySimpleSerDe`, `None`. ### grok_classifier @@ -179,4 +180,4 @@ Using `terraform import`, import Glue Classifiers using their name. For example: % terraform import aws_glue_classifier.MyClassifier MyClassifier ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_connection.html.markdown b/website/docs/cdktf/python/r/glue_connection.html.markdown index f28a70bba82e..f03a5431a70d 100644 --- a/website/docs/cdktf/python/r/glue_connection.html.markdown +++ b/website/docs/cdktf/python/r/glue_connection.html.markdown @@ -406,20 +406,22 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `name` – (Required) Name of the connection. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the connection. The following arguments are optional: -* `catalog_id` – (Optional) ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. -* `athena_properties` – (Optional) Map of key-value pairs used as connection properties specific to the Athena compute environment. -* `connection_properties` – (Optional) Map of key-value pairs used as parameters for this connection. For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/connection-properties.html). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. +* `athena_properties` - (Optional) Map of key-value pairs used as connection properties specific to the Athena compute environment. +* `connection_properties` - (Optional) Map of key-value pairs used as parameters for this connection. For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/connection-properties.html). **Note:** Some connection types require the `SparkProperties` property with a JSON document that contains the actual connection properties. For specific examples, refer to [Example Usage](#example-usage). -* `connection_type` – (Optional) Type of the connection. Valid values: `AZURECOSMOS`, `AZURESQL`, `BIGQUERY`, `CUSTOM`, `DYNAMODB`, `JDBC`, `KAFKA`, `MARKETPLACE`, `MONGODB`, `NETWORK`, `OPENSEARCH`, `SNOWFLAKE`. Defaults to `JDBC`. -* `description` – (Optional) Description of the connection. -* `match_criteria` – (Optional) List of criteria that can be used in selecting this connection. +* `connection_type` - (Optional) Type of the connection. Valid values: `AZURECOSMOS`, `AZURESQL`, `BIGQUERY`, `CUSTOM`, `DYNAMODB`, `JDBC`, `KAFKA`, `MARKETPLACE`, `MONGODB`, `NETWORK`, `OPENSEARCH`, `SNOWFLAKE`. Defaults to `JDBC`. +* `description` - (Optional) Description of the connection. +* `match_criteria` - (Optional) List of criteria that can be used in selecting this connection. * `physical_connection_requirements` - (Optional) Map of physical connection requirements, such as VPC and SecurityGroup. See [`physical_connection_requirements` Block](#physical_connection_requirements-block) for details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -464,4 +466,4 @@ Using `terraform import`, import Glue Connections using the `CATALOG-ID` (AWS ac % terraform import aws_glue_connection.MyConnection 123456789012:MyConnection ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_crawler.html.markdown b/website/docs/cdktf/python/r/glue_crawler.html.markdown index 61c70bbe2795..9db3c9162963 100644 --- a/website/docs/cdktf/python/r/glue_crawler.html.markdown +++ b/website/docs/cdktf/python/r/glue_crawler.html.markdown @@ -189,6 +189,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `database_name` (Required) Glue database where results are written. * `name` (Required) Name of the crawler. * `role` (Required) The IAM role friendly name (including path without leading slash), or ARN of an IAM role, used by the crawler to access other resources. @@ -326,4 +327,4 @@ Using `terraform import`, import Glue Crawlers using `name`. For example: % terraform import aws_glue_crawler.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_data_catalog_encryption_settings.html.markdown b/website/docs/cdktf/python/r/glue_data_catalog_encryption_settings.html.markdown index 8f098bb9b8f2..3388a6e7a4db 100644 --- a/website/docs/cdktf/python/r/glue_data_catalog_encryption_settings.html.markdown +++ b/website/docs/cdktf/python/r/glue_data_catalog_encryption_settings.html.markdown @@ -45,8 +45,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `data_catalog_encryption_settings` – (Required) The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). -* `catalog_id` – (Optional) The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `data_catalog_encryption_settings` - (Required) The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). +* `catalog_id` - (Optional) The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. ### data_catalog_encryption_settings @@ -95,4 +96,4 @@ Using `terraform import`, import Glue Data Catalog Encryption Settings using `CA % terraform import aws_glue_data_catalog_encryption_settings.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_data_quality_ruleset.html.markdown b/website/docs/cdktf/python/r/glue_data_quality_ruleset.html.markdown index 72a28e6d7d81..61da1c84fce9 100644 --- a/website/docs/cdktf/python/r/glue_data_quality_ruleset.html.markdown +++ b/website/docs/cdktf/python/r/glue_data_quality_ruleset.html.markdown @@ -106,6 +106,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the data quality ruleset. * `name` - (Required, Forces new resource) Name of the data quality ruleset. * `ruleset` - (Optional) A Data Quality Definition Language (DQDL) ruleset. For more information, see the AWS Glue developer guide. @@ -153,4 +154,4 @@ Using `terraform import`, import Glue Data Quality Ruleset using the `name`. For % terraform import aws_glue_data_quality_ruleset.example exampleName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_dev_endpoint.html.markdown b/website/docs/cdktf/python/r/glue_dev_endpoint.html.markdown index 894270b5c86f..3af17439e8f8 100644 --- a/website/docs/cdktf/python/r/glue_dev_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/glue_dev_endpoint.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arguments` - (Optional) A map of arguments used to configure the endpoint. * `extra_jars_s3_path` - (Optional) Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint. * `extra_python_libs_s3_path` - (Optional) Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma. @@ -121,4 +122,4 @@ Using `terraform import`, import a Glue Development Endpoint using the `name`. F % terraform import aws_glue_dev_endpoint.example foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_job.html.markdown b/website/docs/cdktf/python/r/glue_job.html.markdown index fface3745d00..9d59519e862e 100644 --- a/website/docs/cdktf/python/r/glue_job.html.markdown +++ b/website/docs/cdktf/python/r/glue_job.html.markdown @@ -268,34 +268,29 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `command` – (Required) The command of the job. Defined below. -* `connections` – (Optional) The list of connections used for this job. -* `default_arguments` – (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. -* `non_overridable_arguments` – (Optional) Non-overridable arguments for this job, specified as name-value pairs. -* `description` – (Optional) Description of the job. -* `execution_property` – (Optional) Execution property of the job. Defined below. +* `command` - (Required) The command of the job. Defined below. +* `connections` - (Optional) The list of connections used for this job. +* `default_arguments` - (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. +* `description` - (Optional) Description of the job. +* `execution_class` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. +* `execution_property` - (Optional) Execution property of the job. Defined below. * `glue_version` - (Optional) The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). +* `job_mode` - (Optional) Describes how a job was created. Valid values are `SCRIPT`, `NOTEBOOK` and `VISUAL`. * `job_run_queuing_enabled` - (Optional) Specifies whether job run queuing is enabled for the job runs for this job. A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will not be considered for queueing. -* `execution_class` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. -* `maintenance_window` – (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. -* `max_capacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` `2.0` and above. -* `max_retries` – (Optional) The maximum number of times to retry this job if it fails. -* `name` – (Required) The name you assign to this job. It must be unique in your account. +* `maintenance_window` - (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. +* `max_capacity` - (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` `2.0` and above. +* `max_retries` - (Optional) The maximum number of times to retry this job if it fails. +* `name` - (Required) The name you assign to this job. It must be unique in your account. +* `non_overridable_arguments` - (Optional) Non-overridable arguments for this job, specified as name-value pairs. * `notification_property` - (Optional) Notification property of the job. Defined below. -* `role_arn` – (Required) The ARN of the IAM role associated with this job. +* `number_of_workers` - (Optional) The number of workers of a defined workerType that are allocated when a job runs. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `role_arn` - (Required) The ARN of the IAM role associated with this job. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `timeout` – (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and null (unlimited) for `gluestreaming` jobs. +* `timeout` - (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and null (unlimited) for `gluestreaming` jobs. * `security_configuration` - (Optional) The name of the Security Configuration to be associated with the job. * `source_control_details` - (Optional) The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. Defined below. -* `worker_type` - (Optional) The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - * For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. - * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. Recommended for memory-intensive jobs. - * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. Recommended for memory-intensive jobs. - * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. Recommended for memory-intensive jobs. Only available for Glue version 3.0. Available AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. Recommended for memory-intensive jobs. Only available for Glue version 3.0. Available AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4GB of memory, 64 GB disk), and provides 1 executor per worker. Recommended for low volume streaming jobs. Only available for Glue version 3.0. - * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler. -* `number_of_workers` - (Optional) The number of workers of a defined workerType that are allocated when a job runs. +* `worker_type` - (Optional) The type of predefined worker that is allocated when a job runs. Valid values: `Standard`, `G.1X`, `G.2X`, `G.025X`, `G.4X`, `G.8X`, `G.12X`, `G.16X`, `R.1X`, `R.2X`, `R.4X`, `R.8X`, `Z.2X` (Ray jobs). See the [AWS documentation](https://docs.aws.amazon.com/glue/latest/dg/worker-types.html) for details. ### command Argument Reference @@ -356,4 +351,4 @@ Using `terraform import`, import Glue Jobs using `name`. For example: % terraform import aws_glue_job.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_ml_transform.html.markdown b/website/docs/cdktf/python/r/glue_ml_transform.html.markdown index a167af48a5dd..adcd07f976dc 100644 --- a/website/docs/cdktf/python/r/glue_ml_transform.html.markdown +++ b/website/docs/cdktf/python/r/glue_ml_transform.html.markdown @@ -120,16 +120,17 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` – (Required) The name you assign to this ML Transform. It must be unique in your account. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name you assign to this ML Transform. It must be unique in your account. * `input_record_tables` - (Required) A list of AWS Glue table definitions used by the transform. see [Input Record Tables](#input_record_tables). * `parameters` - (Required) The algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type. see [Parameters](#parameters). -* `role_arn` – (Required) The ARN of the IAM role associated with this ML Transform. -* `description` – (Optional) Description of the ML Transform. +* `role_arn` - (Required) The ARN of the IAM role associated with this ML Transform. +* `description` - (Optional) Description of the ML Transform. * `glue_version` - (Optional) The version of glue to use, for example "1.0". For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). -* `max_capacity` – (Optional) The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from `2` to `100` DPUs; the default is `10`. `max_capacity` is a mutually exclusive option with `number_of_workers` and `worker_type`. -* `max_retries` – (Optional) The maximum number of times to retry this ML Transform if it fails. +* `max_capacity` - (Optional) The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from `2` to `100` DPUs; the default is `10`. `max_capacity` is a mutually exclusive option with `number_of_workers` and `worker_type`. +* `max_retries` - (Optional) The maximum number of times to retry this ML Transform if it fails. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `timeout` – (Optional) The ML Transform timeout in minutes. The default is 2880 minutes (48 hours). +* `timeout` - (Optional) The ML Transform timeout in minutes. The default is 2880 minutes (48 hours). * `worker_type` - (Optional) The type of predefined worker that is allocated when an ML Transform runs. Accepts a value of `Standard`, `G.1X`, or `G.2X`. Required with `number_of_workers`. * `number_of_workers` - (Optional) The number of workers of a defined `worker_type` that are allocated when an ML Transform runs. Required with `worker_type`. @@ -192,4 +193,4 @@ Using `terraform import`, import Glue ML Transforms using `id`. For example: % terraform import aws_glue_ml_transform.example tfm-c2cafbe83b1c575f49eaca9939220e2fcd58e2d5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_partition.html.markdown b/website/docs/cdktf/python/r/glue_partition.html.markdown index 39a4057e86ae..ca1a1d63b201 100644 --- a/website/docs/cdktf/python/r/glue_partition.html.markdown +++ b/website/docs/cdktf/python/r/glue_partition.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `database_name` - (Required) Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase. * `partition_values` - (Required) The values that define the partition. * `catalog_id` - (Optional) ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name. @@ -116,4 +117,4 @@ Using `terraform import`, import Glue Partitions using the catalog ID (usually A % terraform import aws_glue_partition.part 123456789012:MyDatabase:MyTable:val1#val2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_partition_index.html.markdown b/website/docs/cdktf/python/r/glue_partition_index.html.markdown index db0d4e65d3bc..270053419df6 100644 --- a/website/docs/cdktf/python/r/glue_partition_index.html.markdown +++ b/website/docs/cdktf/python/r/glue_partition_index.html.markdown @@ -110,6 +110,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `table_name` - (Required) Name of the table. For Hive compatibility, this must be entirely lowercase. * `database_name` - (Required) Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase. * `partition_index` - (Required) Configuration block for a partition index. See [`partition_index`](#partition_index) below. @@ -158,4 +159,4 @@ Using `terraform import`, import Glue Partition Indexes using the catalog ID (us % terraform import aws_glue_partition_index.example 123456789012:MyDatabase:MyTable:index-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_registry.html.markdown b/website/docs/cdktf/python/r/glue_registry.html.markdown index 3a480442c170..1b8349f693ad 100644 --- a/website/docs/cdktf/python/r/glue_registry.html.markdown +++ b/website/docs/cdktf/python/r/glue_registry.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `registry_name` – (Required) The Name of the registry. -* `description` – (Optional) A description of the registry. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `registry_name` - (Required) The Name of the registry. +* `description` - (Optional) A description of the registry. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -49,6 +50,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_glue_registry.example + identity = { + "arn" = "arn:aws:glue:us-west-2:123456789012:registry/example" + } +} + +resource "aws_glue_registry" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Glue registry. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Glue Registries using `arn`. For example: ```python @@ -72,4 +94,4 @@ Using `terraform import`, import Glue Registries using `arn`. For example: % terraform import aws_glue_registry.example arn:aws:glue:us-west-2:123456789012:registry/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_resource_policy.html.markdown b/website/docs/cdktf/python/r/glue_resource_policy.html.markdown index 06293c5d2a54..69a05a33254c 100644 --- a/website/docs/cdktf/python/r/glue_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/glue_resource_policy.html.markdown @@ -45,7 +45,7 @@ class MyConvertedCode(TerraformStack): type="AWS" ) ], - resources=["arn:${" + data_aws_partition_current.partition + "}:glue:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:*" + resources=["arn:${" + data_aws_partition_current.partition + "}:glue:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:*" ] ) ] @@ -59,7 +59,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `policy` – (Required) The policy to be applied to the aws glue data catalog. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `policy` - (Required) The policy to be applied to the aws glue data catalog. * `enable_hybrid` - (Optional) Indicates that you are using both methods to grant cross-account. Valid values are `TRUE` and `FALSE`. Note the terraform will not perform drift detetction on this field as its not return on read. ## Attribute Reference @@ -91,4 +92,4 @@ Using `terraform import`, import Glue Resource Policy using the account ID. For % terraform import aws_glue_resource_policy.Test 12356789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_schema.html.markdown b/website/docs/cdktf/python/r/glue_schema.html.markdown index 8186cd83138a..20c21775bdcb 100644 --- a/website/docs/cdktf/python/r/glue_schema.html.markdown +++ b/website/docs/cdktf/python/r/glue_schema.html.markdown @@ -39,12 +39,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `schema_name` – (Required) The Name of the schema. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `schema_name` - (Required) The Name of the schema. * `registry_arn` - (Required) The ARN of the Glue Registry to create the schema in. * `data_format` - (Required) The data format of the schema definition. Valid values are `AVRO`, `JSON` and `PROTOBUF`. * `compatibility` - (Required) The compatibility mode of the schema. Values values are: `NONE`, `DISABLED`, `BACKWARD`, `BACKWARD_ALL`, `FORWARD`, `FORWARD_ALL`, `FULL`, and `FULL_ALL`. * `schema_definition` - (Required) The schema definition using the `data_format` setting for `schema_name`. -* `description` – (Optional) A description of the schema. +* `description` - (Optional) A description of the schema. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -61,6 +62,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_glue_schema.example + identity = { + "arn" = "arn:aws:glue:us-west-2:123456789012:schema/example-registry/example-schema" + } +} + +resource "aws_glue_schema" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Glue schema. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Glue Registries using `arn`. For example: ```python @@ -84,4 +106,4 @@ Using `terraform import`, import Glue Registries using `arn`. For example: % terraform import aws_glue_schema.example arn:aws:glue:us-west-2:123456789012:schema/example/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_security_configuration.html.markdown b/website/docs/cdktf/python/r/glue_security_configuration.html.markdown index 26bc3cad774b..3e5a374e2da1 100644 --- a/website/docs/cdktf/python/r/glue_security_configuration.html.markdown +++ b/website/docs/cdktf/python/r/glue_security_configuration.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `encryption_configuration` – (Required) Configuration block containing encryption configuration. Detailed below. -* `name` – (Required) Name of the security configuration. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `encryption_configuration` - (Required) Configuration block containing encryption configuration. Detailed below. +* `name` - (Required) Name of the security configuration. ### encryption_configuration Argument Reference @@ -102,4 +103,4 @@ Using `terraform import`, import Glue Security Configurations using `name`. For % terraform import aws_glue_security_configuration.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_trigger.html.markdown b/website/docs/cdktf/python/r/glue_trigger.html.markdown index dd90d88b2ebe..e18a594b5f01 100644 --- a/website/docs/cdktf/python/r/glue_trigger.html.markdown +++ b/website/docs/cdktf/python/r/glue_trigger.html.markdown @@ -164,15 +164,16 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `actions` – (Required) List of actions initiated by this trigger when it fires. See [Actions](#actions) Below. -* `description` – (Optional) A description of the new trigger. -* `enabled` – (Optional) Start the trigger. Defaults to `true`. -* `name` – (Required) The name of the trigger. -* `predicate` – (Optional) A predicate to specify when the new trigger should fire. Required when trigger type is `CONDITIONAL`. See [Predicate](#predicate) Below. -* `schedule` – (Optional) A cron expression used to specify the schedule. [Time-Based Schedules for Jobs and Crawlers](https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html) +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `actions` - (Required) List of actions initiated by this trigger when it fires. See [Actions](#actions) Below. +* `description` - (Optional) A description of the new trigger. +* `enabled` - (Optional) Start the trigger. Defaults to `true`. +* `name` - (Required) The name of the trigger. +* `predicate` - (Optional) A predicate to specify when the new trigger should fire. Required when trigger type is `CONDITIONAL`. See [Predicate](#predicate) Below. +* `schedule` - (Optional) A cron expression used to specify the schedule. [Time-Based Schedules for Jobs and Crawlers](https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html) * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `start_on_creation` – (Optional) Set to true to start `SCHEDULED` and `CONDITIONAL` triggers when created. True is not supported for `ON_DEMAND` triggers. -* `type` – (Required) The type of trigger. Valid values are `CONDITIONAL`, `EVENT`, `ON_DEMAND`, and `SCHEDULED`. +* `start_on_creation` - (Optional) Set to true to start `SCHEDULED` and `CONDITIONAL` triggers when created. True is not supported for `ON_DEMAND` triggers. +* `type` - (Required) The type of trigger. Valid values are `CONDITIONAL`, `EVENT`, `ON_DEMAND`, and `SCHEDULED`. * `workflow_name` - (Optional) A workflow to which the trigger should be associated to. Every workflow graph (DAG) needs a starting trigger (`ON_DEMAND` or `SCHEDULED` type) and can contain multiple additional `CONDITIONAL` triggers. * `event_batching_condition` - (Optional) Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires. See [Event Batching Condition](#event-batching-condition). @@ -249,4 +250,4 @@ Using `terraform import`, import Glue Triggers using `name`. For example: % terraform import aws_glue_trigger.MyTrigger MyTrigger ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_user_defined_function.html.markdown b/website/docs/cdktf/python/r/glue_user_defined_function.html.markdown index a2956adf9b00..1a985d24e6a5 100644 --- a/website/docs/cdktf/python/r/glue_user_defined_function.html.markdown +++ b/website/docs/cdktf/python/r/glue_user_defined_function.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the function. * `catalog_id` - (Optional) ID of the Glue Catalog to create the function in. If omitted, this defaults to the AWS Account ID. * `database_name` - (Required) The name of the Database to create the Function. @@ -97,4 +98,4 @@ Using `terraform import`, import Glue User Defined Functions using the `catalog_ % terraform import aws_glue_user_defined_function.func 123456789012:my_database:my_func ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_workflow.html.markdown b/website/docs/cdktf/python/r/glue_workflow.html.markdown index 7ad2c693d718..50d6863eb4c3 100644 --- a/website/docs/cdktf/python/r/glue_workflow.html.markdown +++ b/website/docs/cdktf/python/r/glue_workflow.html.markdown @@ -63,9 +63,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` – (Required) The name you assign to this workflow. -* `default_run_properties` – (Optional) A map of default run properties for this workflow. These properties are passed to all jobs associated to the workflow. -* `description` – (Optional) Description of the workflow. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name you assign to this workflow. +* `default_run_properties` - (Optional) A map of default run properties for this workflow. These properties are passed to all jobs associated to the workflow. +* `description` - (Optional) Description of the workflow. * `max_concurrent_runs` - (Optional) Prevents exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -102,4 +103,4 @@ Using `terraform import`, import Glue Workflows using `name`. For example: % terraform import aws_glue_workflow.MyWorkflow MyWorkflow ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_license_association.html.markdown b/website/docs/cdktf/python/r/grafana_license_association.html.markdown index 0264d5486de5..939c2900e78c 100644 --- a/website/docs/cdktf/python/r/grafana_license_association.html.markdown +++ b/website/docs/cdktf/python/r/grafana_license_association.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `grafana_token` - (Optional) A token from Grafana Labs that ties your AWS account with a Grafana Labs account. * `license_type` - (Required) The type of license for the workspace license association. Valid values are `ENTERPRISE` and `ENTERPRISE_FREE_TRIAL`. * `workspace_id` - (Required) The workspace id. @@ -100,4 +101,4 @@ Using `terraform import`, import Grafana workspace license association using the % terraform import aws_grafana_license_association.example g-2054c75a02 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_role_association.html.markdown b/website/docs/cdktf/python/r/grafana_role_association.html.markdown index 5ebdbd353378..e71f81a610b0 100644 --- a/website/docs/cdktf/python/r/grafana_role_association.html.markdown +++ b/website/docs/cdktf/python/r/grafana_role_association.html.markdown @@ -70,6 +70,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_ids` - (Optional) The AWS SSO group ids to be assigned the role given in `role`. * `user_ids` - (Optional) The AWS SSO user ids to be assigned the role given in `role`. @@ -77,4 +78,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace.html.markdown b/website/docs/cdktf/python/r/grafana_workspace.html.markdown index d34da24e950a..12f354c35d8c 100644 --- a/website/docs/cdktf/python/r/grafana_workspace.html.markdown +++ b/website/docs/cdktf/python/r/grafana_workspace.html.markdown @@ -84,7 +84,7 @@ class MyConvertedCode(TerraformStack): ) ``` -The optional argument `configuration` is a JSON string that enables the unified `Grafana Alerting` (Grafana version 10 or newer) and `Plugins Management` (Grafana version 9 or newer) on the Grafana Workspaces. +The optional argument `configuration` is a JSON string that disables the unified `Grafana Alerting` (Grafana version 10 or newer) and enables `Plugin Management` (Grafana version 9 or newer) on the Grafana Workspaces. For more information about using Grafana alerting, and the effects of turning it on or off, see [Alerts in Grafana version 10](https://docs.aws.amazon.com/grafana/latest/userguide/v10-alerts.html). @@ -98,8 +98,9 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Optional) The configuration string for the workspace that you create. For more information about the format and configuration options available, see [Working in your Grafana workspace](https://docs.aws.amazon.com/grafana/latest/userguide/AMG-configure-workspace.html). -* `data_sources` - (Optional) The data sources for the workspace. Valid values are `AMAZON_OPENSEARCH_SERVICE`, `ATHENA`, `CLOUDWATCH`, `PROMETHEUS`, `REDSHIFT`, `SITEWISE`, `TIMESTREAM`, `XRAY` +* `data_sources` - (Optional) The data sources for the workspace. Valid values are `AMAZON_OPENSEARCH_SERVICE`, `ATHENA`, `CLOUDWATCH`, `PROMETHEUS`, `REDSHIFT`, `SITEWISE`, `TIMESTREAM`, `TWINMAKER`, XRAY` * `description` - (Optional) The workspace description. * `grafana_version` - (Optional) Specifies the version of Grafana to support in the new workspace. Supported values are `8.4`, `9.4` and `10.4`. If not specified, defaults to the latest version. * `name` - (Optional) The Grafana workspace name. @@ -156,4 +157,4 @@ Using `terraform import`, import Grafana Workspace using the workspace's `id`. F % terraform import aws_grafana_workspace.example g-2054c75a02 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace_api_key.html.markdown b/website/docs/cdktf/python/r/grafana_workspace_api_key.html.markdown index 4a7c1ce2f131..80762ea33914 100644 --- a/website/docs/cdktf/python/r/grafana_workspace_api_key.html.markdown +++ b/website/docs/cdktf/python/r/grafana_workspace_api_key.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `key_name` - (Required) Specifies the name of the API key. Key names must be unique to the workspace. - `key_role` - (Required) Specifies the permission level of the API key. Valid values are `VIEWER`, `EDITOR`, or `ADMIN`. - `seconds_to_live` - (Required) Specifies the time in seconds until the API key expires. Keys can be valid for up to 30 days. @@ -51,4 +52,4 @@ This resource exports the following attributes in addition to the arguments abov * `key` - The key token in JSON format. Use this value as a bearer token to authenticate HTTP requests to the workspace. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace_saml_configuration.html.markdown b/website/docs/cdktf/python/r/grafana_workspace_saml_configuration.html.markdown index 062ee72ba783..13faeae17396 100644 --- a/website/docs/cdktf/python/r/grafana_workspace_saml_configuration.html.markdown +++ b/website/docs/cdktf/python/r/grafana_workspace_saml_configuration.html.markdown @@ -71,6 +71,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `admin_role_values` - (Optional) The admin role values. * `allowed_organizations` - (Optional) The allowed organizations. * `email_assertion` - (Optional) The email assertion. @@ -114,4 +115,4 @@ Using `terraform import`, import Grafana Workspace SAML configuration using the % terraform import aws_grafana_workspace_saml_configuration.example g-2054c75a02 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown b/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown index 685f24f35057..ba1da9d757ae 100644 --- a/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown +++ b/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account. * `grafana_role` - (Required) The permission level to use for this service account. For more information about the roles and the permissions each has, see the [User roles](https://docs.aws.amazon.com/grafana/latest/userguide/Grafana-user-roles.html) documentation. * `workspace_id` - (Required) The Grafana workspace with which the service account is associated. @@ -77,4 +78,4 @@ Using `terraform import`, import Managed Grafana Workspace Service Account using % terraform import aws_grafana_workspace_service_account.example g-abc12345,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown b/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown index c0f1bbc0f0ff..187557ae2620 100644 --- a/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown +++ b/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the token to create. The name must be unique within the workspace. * `seconds_to_live` - (Required) Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future. * `service_account_id` - (Required) The ID of the service account for which to create a token. @@ -66,4 +67,4 @@ This resource exports the following attributes in addition to the arguments abov * `expires_at` - Specifies when the service account token will expire. * `key` - The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_detector.html.markdown b/website/docs/cdktf/python/r/guardduty_detector.html.markdown index 6a56e1dac006..ccc608eba982 100644 --- a/website/docs/cdktf/python/r/guardduty_detector.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_detector.html.markdown @@ -54,9 +54,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enable` - (Optional) Enable monitoring and feedback reporting. Setting to `false` is equivalent to "suspending" GuardDuty. Defaults to `true`. * `finding_publishing_frequency` - (Optional) Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty primary account and cannot be modified, otherwise defaults to `SIX_HOURS`. For standalone and GuardDuty primary accounts, it must be configured in Terraform to enable drift detection. Valid values for standalone and primary accounts: `FIFTEEN_MINUTES`, `ONE_HOUR`, `SIX_HOURS`. See [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings_cloudwatch.html#guardduty_findings_cloudwatch_notification_frequency) for more information. -* `datasources` - (Optional) Describes which data sources will be enabled for the detector. See [Data Sources](#data-sources) below for more details. [Deprecated](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-feature-object-api-changes-march2023.html) in favor of [`aws_guardduty_detector_feature` resources](guardduty_detector_feature.html). +* `datasources` - (Optional, **Deprecated** use `aws_guardduty_detector_feature` resources instead) Describes which data sources will be enabled for the detector. See [Data Sources](#data-sources) below for more details. [Deprecated](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-feature-object-api-changes-march2023.html) in favor of [`aws_guardduty_detector_feature` resources](guardduty_detector_feature.html). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Data Sources @@ -150,4 +151,4 @@ Using `terraform import`, import GuardDuty detectors using the detector ID. For The ID of the detector can be retrieved via the [AWS CLI](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/list-detectors.html) using `aws guardduty list-detectors`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown b/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown index b5ac1ca8a689..59a2fe082820 100644 --- a/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown @@ -32,6 +32,38 @@ class MyConvertedCode(TerraformStack): example = GuarddutyDetector(self, "example", enable=True ) + GuarddutyDetectorFeature(self, "s3_protection", + detector_id=example.id, + name="S3_DATA_EVENTS", + status="ENABLED" + ) +``` + +## Extended Threat Detection for EKS + +To enable GuardDuty [Extended Threat Detection](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-extended-threat-detection.html) for EKS, you need at least one of these features enabled: [EKS Protection](https://docs.aws.amazon.com/guardduty/latest/ug/kubernetes-protection.html) or [Runtime Monitoring](https://docs.aws.amazon.com/guardduty/latest/ug/runtime-monitoring-configuration.html). For maximum detection coverage, enabling both is recommended to enhance detection capabilities. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.guardduty_detector import GuarddutyDetector +from imports.aws.guardduty_detector_feature import GuarddutyDetectorFeature +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = GuarddutyDetector(self, "example", + enable=True + ) + GuarddutyDetectorFeature(self, "eks_protection", + detector_id=example.id, + name="EKS_AUDIT_LOGS", + status="ENABLED" + ) GuarddutyDetectorFeature(self, "eks_runtime_monitoring", additional_configuration=[GuarddutyDetectorFeatureAdditionalConfiguration( name="EKS_ADDON_MANAGEMENT", @@ -48,6 +80,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detector_id` - (Required) Amazon GuardDuty detector ID. * `name` - (Required) The name of the detector feature. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. * `status` - (Required) The status of the detector feature. Valid values: `ENABLED`, `DISABLED`. @@ -64,4 +97,4 @@ The `additional_configuration` block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_filter.html.markdown b/website/docs/cdktf/python/r/guardduty_filter.html.markdown index 54abea1b18d4..5e34c28fc64c 100644 --- a/website/docs/cdktf/python/r/guardduty_filter.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_filter.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detector_id` - (Required) ID of a GuardDuty detector, attached to your account. * `name` - (Required) The name of your filter. * `description` - (Optional) Description of the filter. @@ -80,7 +81,6 @@ The `criterion` block suports the following: This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the GuardDuty filter. -* `id` - A compound field, consisting of the ID of the GuardDuty detector and the name of the filter. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -108,4 +108,4 @@ Using `terraform import`, import GuardDuty filters using the detector ID and fil % terraform import aws_guardduty_filter.MyFilter 00b00fd5aecc0ab60a708659477e9617:MyFilter ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_invite_accepter.html.markdown b/website/docs/cdktf/python/r/guardduty_invite_accepter.html.markdown index d325ba21e3ac..c111f8faba5d 100644 --- a/website/docs/cdktf/python/r/guardduty_invite_accepter.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_invite_accepter.html.markdown @@ -64,14 +64,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detector_id` - (Required) The detector ID of the member GuardDuty account. * `master_account_id` - (Required) AWS account ID for primary account. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - GuardDuty member detector ID +This resource exports no additional attributes. ## Timeouts @@ -104,4 +103,4 @@ Using `terraform import`, import `aws_guardduty_invite_accepter` using the membe % terraform import aws_guardduty_invite_accepter.member 00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_ipset.html.markdown b/website/docs/cdktf/python/r/guardduty_ipset.html.markdown index 79cd626ee600..5f6764e97db9 100644 --- a/website/docs/cdktf/python/r/guardduty_ipset.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_ipset.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `activate` - (Required) Specifies whether GuardDuty is to start using the uploaded IPSet. * `detector_id` - (Required) The detector ID of the GuardDuty. * `format` - (Required) The format of the file that contains the IPSet. Valid values: `TXT` | `STIX` | `OTX_CSV` | `ALIEN_VAULT` | `PROOF_POINT` | `FIRE_EYE` @@ -69,7 +70,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) of the GuardDuty IPSet. -* `id` - The ID of the GuardDuty IPSet. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -97,4 +97,4 @@ Using `terraform import`, import GuardDuty IPSet using the primary GuardDuty det % terraform import aws_guardduty_ipset.MyIPSet 00b00fd5aecc0ab60a708659477e9617:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown b/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown index c5548c9fbc20..c5b99833173d 100644 --- a/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actions` - (Optional) Information about whether the tags will be added to the S3 object after scanning. See [`actions`](#actions-argument-reference) below. * `protected_resource` - (Required) Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. See [`protected_resource`](#protected_resource-argument-reference) below. * `role` - (Required) ARN of IAM role that includes the permissions required to scan and add tags to the associated protected resource. @@ -109,4 +110,4 @@ Using `terraform import`, import GuardDuty malware protection plans using their % terraform import aws_guardduty_malware_protection_plan.example 1234567890abcdef0123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_member.html.markdown b/website/docs/cdktf/python/r/guardduty_member.html.markdown index a285073167b2..7b1a5d5abbf4 100644 --- a/website/docs/cdktf/python/r/guardduty_member.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_member.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) AWS account ID for member account. * `detector_id` - (Required) The detector ID of the GuardDuty account where you want to create member accounts. * `email` - (Required) Email address for member account. @@ -60,7 +61,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The ID of the GuardDuty member * `relationship_status` - The status of the relationship between the member account and its primary account. More information can be found in [Amazon GuardDuty API Reference](https://docs.aws.amazon.com/guardduty/latest/ug/get-members.html). ## Timeouts @@ -95,4 +95,4 @@ Using `terraform import`, import GuardDuty members using the primary GuardDuty d % terraform import aws_guardduty_member.MyMember 00b00fd5aecc0ab60a708659477e9617:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_member_detector_feature.html.markdown b/website/docs/cdktf/python/r/guardduty_member_detector_feature.html.markdown index 4ae01e57a234..53f5b837326f 100644 --- a/website/docs/cdktf/python/r/guardduty_member_detector_feature.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_member_detector_feature.html.markdown @@ -24,8 +24,8 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import GuarddutyMemberDetectorFeature from imports.aws.guardduty_detector import GuarddutyDetector +from imports.aws.guardduty_member_detector_feature import GuarddutyMemberDetectorFeature class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -34,16 +34,47 @@ class MyConvertedCode(TerraformStack): ) GuarddutyMemberDetectorFeature(self, "runtime_monitoring", account_id="123456789012", - additional_configuration=[{ - "name": "EKS_ADDON_MANAGEMENT", - "status": "ENABLED" - }, { - "name": "ECS_FARGATE_AGENT_MANAGEMENT", - "status": "ENABLED" - } + detector_id=example.id, + name="S3_DATA_EVENTS", + status="ENABLED" + ) +``` + +## Extended Threat Detection for EKS + +To enable GuardDuty [Extended Threat Detection](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-extended-threat-detection.html) for EKS, you need at least one of these features enabled: [EKS Protection](https://docs.aws.amazon.com/guardduty/latest/ug/kubernetes-protection.html) or [Runtime Monitoring](https://docs.aws.amazon.com/guardduty/latest/ug/runtime-monitoring-configuration.html). For maximum detection coverage, enabling both is recommended to enhance detection capabilities. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.guardduty_detector import GuarddutyDetector +from imports.aws.guardduty_detector_feature import GuarddutyDetectorFeature +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = GuarddutyDetector(self, "example", + enable=True + ) + GuarddutyDetectorFeature(self, "eks_protection", + account_id="123456789012", + detector_id=example.id, + name="EKS_AUDIT_LOGS", + status="ENABLED" + ) + GuarddutyDetectorFeature(self, "eks_runtime_monitoring", + account_id="123456789012", + additional_configuration=[GuarddutyDetectorFeatureAdditionalConfiguration( + name="EKS_ADDON_MANAGEMENT", + status="ENABLED" + ) ], detector_id=example.id, - name="RUNTIME_MONITORING", + name="EKS_RUNTIME_MONITORING", status="ENABLED" ) ``` @@ -52,6 +83,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detector_id` - (Required) Amazon GuardDuty detector ID. * `account_id` - (Required) Member account ID to be updated. * `name` - (Required) The name of the detector feature. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`,`RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`. @@ -69,4 +101,4 @@ The `additional_configuration` block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_organization_admin_account.html.markdown b/website/docs/cdktf/python/r/guardduty_organization_admin_account.html.markdown index a2755dfb58ac..cb12b9344990 100644 --- a/website/docs/cdktf/python/r/guardduty_organization_admin_account.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_organization_admin_account.html.markdown @@ -48,13 +48,12 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `admin_account_id` - (Required) AWS account identifier to designate as a delegated administrator for GuardDuty. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - AWS account identifier. +This resource exports no additional attributes. ## Import @@ -81,4 +80,4 @@ Using `terraform import`, import GuardDuty Organization Admin Account using the % terraform import aws_guardduty_organization_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown b/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown index f07d5b997d2a..bb42e1cf2d8b 100644 --- a/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown @@ -62,8 +62,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `auto_enable` - (Optional) *Deprecated:* Use `auto_enable_organization_members` instead. When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s GuardDuty delegated administrator and GuardDuty is enabled in that AWS Region. -* `auto_enable_organization_members` - (Optional) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. Valid values are `ALL`, `NEW`, `NONE`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `auto_enable_organization_members` - (Required) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. + Valid values are `ALL`, `NEW`, `NONE`. * `detector_id` - (Required) The detector ID of the GuardDuty account. * `datasources` - (Optional) Configuration for the collected datasources. [Deprecated](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-feature-object-api-changes-march2023.html) in favor of [`aws_guardduty_organization_configuration_feature` resources](guardduty_organization_configuration_feature.html). @@ -118,9 +119,7 @@ The `ebs_volumes` block supports the following: ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - Identifier of the GuardDuty Detector. +This resource exports no additional attributes. ## Import @@ -147,4 +146,4 @@ Using `terraform import`, import GuardDuty Organization Configurations using the % terraform import aws_guardduty_organization_configuration.example 00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown b/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown index fce52115dff3..5c4f273f4144 100644 --- a/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown @@ -12,7 +12,7 @@ description: |- Provides a resource to manage a single Amazon GuardDuty [organization configuration feature](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-features-activation-model.html#guardduty-features). -~> **NOTE:** Deleting this resource does not disable the organization configuration feature, the resource in simply removed from state instead. +~> **NOTE:** Deleting this resource does not disable the organization configuration feature, the resource is simply removed from state instead. ## Example Usage @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_enable` - (Required) The status of the feature that is configured for the member accounts within the organization. Valid values: `NEW`, `ALL`, `NONE`. * `detector_id` - (Required) The ID of the detector that configures the delegated administrator. * `name` - (Required) The name of the feature that will be configured for the organization. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. @@ -64,4 +65,4 @@ The `additional_configuration` block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_publishing_destination.html.markdown b/website/docs/cdktf/python/r/guardduty_publishing_destination.html.markdown index 83985ae0d383..78d068638f19 100644 --- a/website/docs/cdktf/python/r/guardduty_publishing_destination.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_publishing_destination.html.markdown @@ -82,7 +82,7 @@ class MyConvertedCode(TerraformStack): type="Service" ) ], - resources=["arn:aws:kms:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:key/*" + resources=["arn:aws:kms:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:key/*" ], sid="Allow GuardDuty to encrypt findings" ), DataAwsIamPolicyDocumentStatement( @@ -92,7 +92,7 @@ class MyConvertedCode(TerraformStack): type="AWS" ) ], - resources=["arn:aws:kms:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:key/*" + resources=["arn:aws:kms:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:key/*" ], sid="Allow all users to modify/delete key (test only)" ) @@ -117,6 +117,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detector_id` - (Required) The detector ID of the GuardDuty. * `destination_arn` - (Required) The bucket arn and prefix under which the findings get exported. Bucket-ARN is required, the prefix is optional and will be `AWSLogs/[Account-ID]/GuardDuty/[Region]/` if not provided * `kms_key_arn` - (Required) The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty enforces this to be encrypted. @@ -126,9 +127,7 @@ This resource supports the following arguments: ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - The ID of the GuardDuty PublishingDestination and the detector ID. Format: `:` +This resource exports no additional attributes. ## Import @@ -155,4 +154,4 @@ Using `terraform import`, import GuardDuty PublishingDestination using the maste % terraform import aws_guardduty_publishing_destination.test a4b86f26fa42e7e7cf0d1c333ea77777:a4b86f27a0e464e4a7e0516d242f1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_threatintelset.html.markdown b/website/docs/cdktf/python/r/guardduty_threatintelset.html.markdown index bfbd9c460fed..8836a9bbe7f9 100644 --- a/website/docs/cdktf/python/r/guardduty_threatintelset.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_threatintelset.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `activate` - (Required) Specifies whether GuardDuty is to start using the uploaded ThreatIntelSet. * `detector_id` - (Required) The detector ID of the GuardDuty. * `format` - (Required) The format of the file that contains the ThreatIntelSet. Valid values: `TXT` | `STIX` | `OTX_CSV` | `ALIEN_VAULT` | `PROOF_POINT` | `FIRE_EYE` @@ -73,7 +74,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) of the GuardDuty ThreatIntelSet. -* `id` - The ID of the GuardDuty ThreatIntelSet and the detector ID. Format: `:` * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -101,4 +101,4 @@ Using `terraform import`, import GuardDuty ThreatIntelSet using the primary Guar % terraform import aws_guardduty_threatintelset.MyThreatIntelSet 00b00fd5aecc0ab60a708659477e9617:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_group_membership.html.markdown b/website/docs/cdktf/python/r/iam_group_membership.html.markdown index 54da12abdf23..29a12709f385 100644 --- a/website/docs/cdktf/python/r/iam_group_membership.html.markdown +++ b/website/docs/cdktf/python/r/iam_group_membership.html.markdown @@ -57,7 +57,7 @@ This resource supports the following arguments: * `name` - (Required) The name to identify the Group Membership * `users` - (Required) A list of IAM User names to associate with the Group -* `group` – (Required) The IAM Group name to attach the list of `users` to +* `group` - (Required) The IAM Group name to attach the list of `users` to ## Attribute Reference @@ -65,10 +65,10 @@ This resource exports the following attributes in addition to the arguments abov * `name` - The name to identify the Group Membership * `users` - list of IAM User names -* `group` – IAM Group name +* `group` - IAM Group name [1]: /docs/providers/aws/r/iam_group.html [2]: /docs/providers/aws/r/iam_user.html [3]: /docs/providers/aws/r/iam_user_group_membership.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_openid_connect_provider.html.markdown b/website/docs/cdktf/python/r/iam_openid_connect_provider.html.markdown index dc46ce4ed898..90313535a30d 100644 --- a/website/docs/cdktf/python/r/iam_openid_connect_provider.html.markdown +++ b/website/docs/cdktf/python/r/iam_openid_connect_provider.html.markdown @@ -75,6 +75,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_openid_connect_provider.example + identity = { + "arn" = "arn:aws:iam::123456789012:oidc-provider/example.com" + } +} + +resource "aws_iam_openid_connect_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM OpenID Connect provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM OpenID Connect Providers using the `arn`. For example: ```python @@ -98,4 +119,4 @@ Using `terraform import`, import IAM OpenID Connect Providers using the `arn`. F % terraform import aws_iam_openid_connect_provider.default arn:aws:iam::123456789012:oidc-provider/accounts.google.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_policy.html.markdown b/website/docs/cdktf/python/r/iam_policy.html.markdown index 29d5f6425783..da0d83c46d8c 100644 --- a/website/docs/cdktf/python/r/iam_policy.html.markdown +++ b/website/docs/cdktf/python/r/iam_policy.html.markdown @@ -68,6 +68,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_policy.example + identity = { + "arn" = "arn:aws:iam::123456789012:policy/UsersManageOwnCredentials" + } +} + +resource "aws_iam_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM policy. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Policies using the `arn`. For example: ```python @@ -91,4 +112,4 @@ Using `terraform import`, import IAM Policies using the `arn`. For example: % terraform import aws_iam_policy.administrator arn:aws:iam::123456789012:policy/UsersManageOwnCredentials ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_role.html.markdown b/website/docs/cdktf/python/r/iam_role.html.markdown index 9c23dab5703a..455c6373f6d8 100644 --- a/website/docs/cdktf/python/r/iam_role.html.markdown +++ b/website/docs/cdktf/python/r/iam_role.html.markdown @@ -282,6 +282,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role.example + identity = { + name = "developer_name" + } +} + +resource "aws_iam_role" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the IAM role. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Roles using the `name`. For example: ```python @@ -296,13 +321,13 @@ from imports.aws.iam_role import IamRole class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - IamRole.generate_config_for_import(self, "developer", "developer_name") + IamRole.generate_config_for_import(self, "example", "developer_name") ``` Using `terraform import`, import IAM Roles using the `name`. For example: ```console -% terraform import aws_iam_role.developer developer_name +% terraform import aws_iam_role.example developer_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_role_policy.html.markdown b/website/docs/cdktf/python/r/iam_role_policy.html.markdown index dae25a71f0f6..f5b7f1cec54a 100644 --- a/website/docs/cdktf/python/r/iam_role_policy.html.markdown +++ b/website/docs/cdktf/python/r/iam_role_policy.html.markdown @@ -67,24 +67,48 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` - (Optional) The name of the role policy. If omitted, Terraform will -assign a random, unique name. -* `name_prefix` - (Optional) Creates a unique name beginning with the specified - prefix. Conflicts with `name`. -* `policy` - (Required) The inline policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) +* `name` - (Optional) The name of the role policy. + If omitted, Terraform will assign a random, unique name. +* `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. + Conflicts with `name`. +* `policy` - (Required) The inline policy document. + This is a JSON formatted string. + For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) * `role` - (Required) The name of the IAM role to attach to the policy. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - The role policy ID, in the form of `role_name:role_policy_name`. -* `name` - The name of the policy. -* `policy` - The policy document attached to the role. -* `role` - The name of the role associated with the policy. +This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role_policy.example + identity = { + role = "role_of_mypolicy_name" + name = "mypolicy_name" + } +} + +resource "aws_iam_role_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `role` (String) Name of the IAM role. +* `name` (String) Name of the role policy. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Role Policies using the `role_name:role_policy_name`. For example: ```python @@ -99,13 +123,13 @@ from imports.aws.iam_role_policy import IamRolePolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - IamRolePolicy.generate_config_for_import(self, "mypolicy", "role_of_mypolicy_name:mypolicy_name") + IamRolePolicy.generate_config_for_import(self, "example", "role_of_mypolicy_name:mypolicy_name") ``` Using `terraform import`, import IAM Role Policies using the `role_name:role_policy_name`. For example: ```console -% terraform import aws_iam_role_policy.mypolicy role_of_mypolicy_name:mypolicy_name +% terraform import aws_iam_role_policy.example role_of_mypolicy_name:mypolicy_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_role_policy_attachment.html.markdown b/website/docs/cdktf/python/r/iam_role_policy_attachment.html.markdown index 1cec19c93311..9bf709ae9e6c 100644 --- a/website/docs/cdktf/python/r/iam_role_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/iam_role_policy_attachment.html.markdown @@ -83,6 +83,33 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role_policy_attachment.example + identity = { + role = "test-role" + policy_arn = "arn:aws:iam::xxxxxxxxxxxx:policy/test-policy" + } +} + +resource "aws_iam_role_policy_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `role` (String) Name of the IAM role. +* `policy_arn` (String) ARN of the IAM policy. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM role policy attachments using the role name and policy arn separated by `/`. For example: ```python @@ -97,13 +124,13 @@ from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - IamRolePolicyAttachment.generate_config_for_import(self, "testAttach", "test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy") + IamRolePolicyAttachment.generate_config_for_import(self, "example", "test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy") ``` Using `terraform import`, import IAM role policy attachments using the role name and policy arn separated by `/`. For example: ```console -% terraform import aws_iam_role_policy_attachment.test-attach test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy +% terraform import aws_iam_role_policy_attachment.example test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_saml_provider.html.markdown b/website/docs/cdktf/python/r/iam_saml_provider.html.markdown index aeec0fb6ef77..e8d7575fa71a 100644 --- a/website/docs/cdktf/python/r/iam_saml_provider.html.markdown +++ b/website/docs/cdktf/python/r/iam_saml_provider.html.markdown @@ -50,6 +50,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_saml_provider.example + identity = { + "arn" = "arn:aws:iam::123456789012:saml-provider/ExampleProvider" + } +} + +resource "aws_iam_saml_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM SAML provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM SAML Providers using the `arn`. For example: ```python @@ -73,4 +94,4 @@ Using `terraform import`, import IAM SAML Providers using the `arn`. For example % terraform import aws_iam_saml_provider.default arn:aws:iam::123456789012:saml-provider/SAMLADFS ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_server_certificate.html.markdown b/website/docs/cdktf/python/r/iam_server_certificate.html.markdown index a71fd5ca8309..a67694d9dc31 100644 --- a/website/docs/cdktf/python/r/iam_server_certificate.html.markdown +++ b/website/docs/cdktf/python/r/iam_server_certificate.html.markdown @@ -118,9 +118,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `certificate_body` – (Required, Forces new resource) The contents of the public key certificate in +* `certificate_body` - (Required, Forces new resource) The contents of the public key certificate in PEM-encoded format. -* `certificate_chain` – (Optional, Forces new resource) The contents of the certificate chain. +* `certificate_chain` - (Optional, Forces new resource) The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain. * `name` - (Optional) The name of the Server Certificate. Do not include the path in this value. If omitted, Terraform will assign a random, unique name. @@ -130,7 +130,7 @@ This resource supports the following arguments: included, it defaults to a slash (/). If this certificate is for use with AWS CloudFront, the path must be in format `/cloudfront/your_path_here`. See [IAM Identifiers][1] for more details on IAM Paths. -* `private_key` – (Required, Forces new resource) The contents of the private key in PEM-encoded format. +* `private_key` - (Required, Forces new resource) The contents of the private key in PEM-encoded format. * `tags` - (Optional) Map of resource tags for the server certificate. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ~> **NOTE:** AWS performs behind-the-scenes modifications to some certificate files if they do not adhere to a specific format. These modifications will result in terraform forever believing that it needs to update the resources since the local and AWS file contents will not match after theses modifications occur. In order to prevent this from happening you must ensure that all your PEM-encoded files use UNIX line-breaks and that `certificate_body` contains only one certificate. All other certificates should go in `certificate_chain`. It is common for some Certificate Authorities to issue certificate files that have DOS line-breaks and that are actually multiple certificates concatenated together in order to form a full certificate chain. @@ -180,4 +180,4 @@ Using `terraform import`, import IAM Server Certificates using the `name`. For e [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html [lifecycle]: /docs/configuration/resources.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_service_linked_role.html.markdown b/website/docs/cdktf/python/r/iam_service_linked_role.html.markdown index 6012176fbd4d..fb4b720d1a3d 100644 --- a/website/docs/cdktf/python/r/iam_service_linked_role.html.markdown +++ b/website/docs/cdktf/python/r/iam_service_linked_role.html.markdown @@ -54,6 +54,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_service_linked_role.example + identity = { + "arn" = "arn:aws:iam::123456789012:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk" + } +} + +resource "aws_iam_service_linked_role" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM service-linked role. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM service-linked roles using role ARN. For example: ```python @@ -77,4 +98,4 @@ Using `terraform import`, import IAM service-linked roles using role ARN. For ex % terraform import aws_iam_service_linked_role.elasticbeanstalk arn:aws:iam::123456789012:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_signing_certificate.html.markdown b/website/docs/cdktf/python/r/iam_signing_certificate.html.markdown index 37ec406ecdc5..6ad1f0e99bcb 100644 --- a/website/docs/cdktf/python/r/iam_signing_certificate.html.markdown +++ b/website/docs/cdktf/python/r/iam_signing_certificate.html.markdown @@ -63,9 +63,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `certificate_body` – (Required) The contents of the signing certificate in PEM-encoded format. -* `status` – (Optional) The status you want to assign to the certificate. `Active` means that the certificate can be used for programmatic calls to Amazon Web Services `Inactive` means that the certificate cannot be used. -* `user_name` – (Required) The name of the user the signing certificate is for. +* `certificate_body` - (Required) The contents of the signing certificate in PEM-encoded format. +* `status` - (Optional) The status you want to assign to the certificate. `Active` means that the certificate can be used for programmatic calls to Amazon Web Services `Inactive` means that the certificate cannot be used. +* `user_name` - (Required) The name of the user the signing certificate is for. ## Attribute Reference @@ -99,4 +99,4 @@ Using `terraform import`, import IAM Signing Certificates using the `id`. For ex % terraform import aws_iam_signing_certificate.certificate IDIDIDIDID:user-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_virtual_mfa_device.html.markdown b/website/docs/cdktf/python/r/iam_virtual_mfa_device.html.markdown index 8e15ee0afe27..959d4bcf1596 100644 --- a/website/docs/cdktf/python/r/iam_virtual_mfa_device.html.markdown +++ b/website/docs/cdktf/python/r/iam_virtual_mfa_device.html.markdown @@ -45,7 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `virtual_mfa_device_name` - (Required) The name of the virtual MFA device. Use with path to uniquely identify a virtual MFA device. -* `path` – (Optional) The path for the virtual MFA device. +* `path` - (Optional) The path for the virtual MFA device. * `tags` - (Optional) Map of resource tags for the virtual mfa device. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -84,4 +84,4 @@ Using `terraform import`, import IAM Virtual MFA Devices using the `arn`. For ex % terraform import aws_iam_virtual_mfa_device.example arn:aws:iam::123456789012:mfa/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/identitystore_group.html.markdown b/website/docs/cdktf/python/r/identitystore_group.html.markdown index 0a93db73f7d6..bdb4280edee8 100644 --- a/website/docs/cdktf/python/r/identitystore_group.html.markdown +++ b/website/docs/cdktf/python/r/identitystore_group.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `display_name` - (Optional) A string containing the name of the group. This value is commonly displayed when the group is referenced. * `description` - (Optional) A string containing the description of the group. @@ -92,4 +93,4 @@ Using `terraform import`, import an Identity Store Group using the combination ` % terraform import aws_identitystore_group.example d-9c6705e95c/b8a1c340-8031-7071-a2fb-7dc540320c30 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/identitystore_group_membership.html.markdown b/website/docs/cdktf/python/r/identitystore_group_membership.html.markdown index 6f3103c3bc02..06388b4e729b 100644 --- a/website/docs/cdktf/python/r/identitystore_group_membership.html.markdown +++ b/website/docs/cdktf/python/r/identitystore_group_membership.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `member_id` - (Required) The identifier for a user in the Identity Store. * `group_id` - (Required) The identifier for a group in the Identity Store. * `identity_store_id` - (Required) Identity Store ID associated with the Single Sign-On Instance. @@ -100,4 +101,4 @@ Using `terraform import`, import `aws_identitystore_group_membership` using the % terraform import aws_identitystore_group_membership.example d-0000000000/00000000-0000-0000-0000-000000000000 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/identitystore_user.html.markdown b/website/docs/cdktf/python/r/identitystore_user.html.markdown index 33dd3f8c2f5b..0d3ce8581912 100644 --- a/website/docs/cdktf/python/r/identitystore_user.html.markdown +++ b/website/docs/cdktf/python/r/identitystore_user.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addresses` - (Optional) Details about the user's address. At most 1 address is allowed. Detailed below. * `emails` - (Optional) Details about the user's email. At most 1 email is allowed. Detailed below. * `locale` - (Optional) The user's geographical region or location. @@ -99,6 +100,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `formatted` - (Optional) The name that is typically displayed when the name is shown for display. * `honorific_prefix` - (Optional) The honorific prefix of the user. * `honorific_suffix` - (Optional) The honorific suffix of the user. @@ -144,4 +146,4 @@ Using `terraform import`, import an Identity Store User using the combination `i % terraform import aws_identitystore_user.example d-9c6705e95c/065212b4-9061-703b-5876-13a517ae2a7c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_component.html.markdown b/website/docs/cdktf/python/r/imagebuilder_component.html.markdown index 2b4968759b9d..0e804d732d1f 100644 --- a/website/docs/cdktf/python/r/imagebuilder_component.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_component.html.markdown @@ -84,6 +84,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `change_description` - (Optional) Change description of the component. * `data` - (Optional) Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. Terraform will only perform drift detection of its value when present in a configuration. * `description` - (Optional) Description of the component. @@ -133,4 +134,4 @@ Using `terraform import`, import `aws_imagebuilder_components` resources using t Certain resource arguments, such as `uri`, cannot be read via the API and imported into Terraform. Terraform will display a difference for these arguments the first run after import if declared in the Terraform configuration for an imported resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_container_recipe.html.markdown b/website/docs/cdktf/python/r/imagebuilder_container_recipe.html.markdown index e52a1666f369..3acdcd8f0796 100644 --- a/website/docs/cdktf/python/r/imagebuilder_container_recipe.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_container_recipe.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the container recipe. * `dockerfile_template_data` - (Optional) The Dockerfile template used to build the image as an inline data blob. * `dockerfile_template_uri` - (Optional) The Amazon S3 URI for the Dockerfile that will be used to build the container image. @@ -98,6 +99,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `block_device_mapping` - (Optional) Configuration block(s) with block device mappings for the container recipe. Detailed below. * `image` - (Optional) The AMI ID to use as the base image for a container build and test instance. If not specified, Image Builder will use the appropriate ECS-optimized AMI as a base image. @@ -105,6 +107,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `device_name` - (Optional) Name of the device. For example, `/dev/sda` or `/dev/xvdb`. * `ebs` - (Optional) Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. * `no_device` - (Optional) Set to `true` to remove a mapping from the parent image. @@ -114,6 +117,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delete_on_termination` - (Optional) Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. * `encrypted` - (Optional) Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. * `iops` - (Optional) Number of Input/Output (I/O) operations per second to provision for an `io1` or `io2` volume. @@ -136,6 +140,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_container_recipe.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:container-recipe/example/1.0.0" + } +} + +resource "aws_imagebuilder_container_recipe" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder container recipe. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_container_recipe` resources using the Amazon Resource Name (ARN). For example: ```python @@ -159,4 +184,4 @@ Using `terraform import`, import `aws_imagebuilder_container_recipe` resources u % terraform import aws_imagebuilder_container_recipe.example arn:aws:imagebuilder:us-east-1:123456789012:container-recipe/example/1.0.0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_distribution_configuration.html.markdown b/website/docs/cdktf/python/r/imagebuilder_distribution_configuration.html.markdown index fd3e2ce09558..c110cf0c5b9f 100644 --- a/website/docs/cdktf/python/r/imagebuilder_distribution_configuration.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_distribution_configuration.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the distribution configuration. * `tags` - (Optional) Key-value map of resource tags for the distribution configuration. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -68,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ami_distribution_configuration` - (Optional) Configuration block with Amazon Machine Image (AMI) distribution settings. Detailed below. * `container_distribution_configuration` - (Optional) Configuration block with container distribution settings. Detailed below. * `fast_launch_configuration` - (Optional) Set of Windows faster-launching configurations to use for AMI distribution. Detailed below. @@ -80,6 +82,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ami_tags` - (Optional) Key-value map of tags to apply to the distributed AMI. * `description` - (Optional) Description to apply to the distributed AMI. * `kms_key_id` - (Optional) Amazon Resource Name (ARN) of the Key Management Service (KMS) Key to encrypt the distributed AMI. @@ -91,6 +94,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `organization_arns` - (Optional) Set of AWS Organization ARNs to assign. * `organizational_unit_arns` - (Optional) Set of AWS Organizational Unit ARNs to assign. * `user_groups` - (Optional) Set of EC2 launch permission user groups to assign. Use `all` to distribute a public AMI. @@ -155,6 +159,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_distribution_configuration.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example" + } +} + +resource "aws_imagebuilder_distribution_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder distribution configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_distribution_configurations` resources using the Amazon Resource Name (ARN). For example: ```python @@ -178,4 +203,4 @@ Using `terraform import`, import `aws_imagebuilder_distribution_configurations` % terraform import aws_imagebuilder_distribution_configuration.example arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_image.html.markdown b/website/docs/cdktf/python/r/imagebuilder_image.html.markdown index c3a83b32c3f4..9192a632f70d 100644 --- a/website/docs/cdktf/python/r/imagebuilder_image.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_image.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container_recipe_arn` - (Optional) - Amazon Resource Name (ARN) of the container recipe. * `distribution_configuration_arn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. * `enhanced_image_metadata_enabled` - (Optional) Whether additional information about the image being created is collected. Defaults to `true`. @@ -55,6 +56,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `image_tests_enabled` - (Optional) Whether image tests are enabled. Defaults to `true`. * `timeout_minutes` - (Optional) Number of minutes before image tests time out. Valid values are between `60` and `1440`. Defaults to `720`. @@ -62,6 +64,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `image_scanning_enabled` - (Optional) Indicates whether Image Builder keeps a snapshot of the vulnerability scans that Amazon Inspector runs against the build instance when you create a new image. Defaults to `false`. * `ecr_configuration` - (Optional) Configuration block with ECR configuration. Detailed below. @@ -69,6 +72,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository_name` - (Optional) The name of the container repository that Amazon Inspector scans to identify findings for your container images. * `container_tags` - (Optional) Set of tags for Image Builder to apply to the output container image that that Amazon Inspector scans. @@ -80,6 +84,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `on_failure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. * `parallel_group` - (Optional) The parallel group in which to run a test Workflow. * `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. @@ -120,6 +125,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1" + } +} + +resource "aws_imagebuilder_image" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image` resources using the Amazon Resource Name (ARN). For example: ```python @@ -143,4 +169,4 @@ Using `terraform import`, import `aws_imagebuilder_image` resources using the Am % terraform import aws_imagebuilder_image.example arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown b/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown index 67e5f392005e..b3984591d0ee 100644 --- a/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown @@ -53,7 +53,7 @@ class MyConvertedCode(TerraformStack): ) ], name="example", - parent_image="arn:${" + current.partition + "}:imagebuilder:${" + data_aws_region_current.name + "}:aws:image/amazon-linux-2-x86/x.x.x", + parent_image="arn:${" + current.partition + "}:imagebuilder:${" + data_aws_region_current.region + "}:aws:image/amazon-linux-2-x86/x.x.x", version="1.0.0" ) aws_imagebuilder_image_pipeline_example = ImagebuilderImagePipeline(self, "example_1", @@ -80,6 +80,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container_recipe_arn` - (Optional) Amazon Resource Name (ARN) of the container recipe. * `description` - (Optional) Description of the image pipeline. * `distribution_configuration_arn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. @@ -97,6 +98,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `image_scanning_enabled` - (Optional) Whether image scans are enabled. Defaults to `false`. * `ecr_configuration` - (Optional) Configuration block with ECR configuration for image scanning. Detailed below. @@ -104,6 +106,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container tags` - (Optional) list of tags to apply to scanned images * `repository_name` - (Optional) The name of the repository to scan @@ -111,6 +114,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `image_tests_enabled` - (Optional) Whether image tests are enabled. Defaults to `true`. * `timeout_minutes` - (Optional) Number of minutes before image tests time out. Valid values are between `60` and `1440`. Defaults to `720`. @@ -122,6 +126,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipeline_execution_start_condition` - (Optional) Condition when the pipeline should trigger a new image build. Valid values are `EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE` and `EXPRESSION_MATCH_ONLY`. Defaults to `EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE`. * `timezone` - (Optional) The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the [IANA timezone format](https://www.joda.org/joda-time/timezones.html). If not specified this defaults to UTC. @@ -134,6 +139,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `on_failure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. * `parallel_group` - (Optional) The parallel group in which to run a test Workflow. * `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. @@ -159,6 +165,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image_pipeline.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example" + } +} + +resource "aws_imagebuilder_image_pipeline" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image pipeline. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image_pipeline` resources using the Amazon Resource Name (ARN). For example: ```python @@ -182,4 +209,4 @@ Using `terraform import`, import `aws_imagebuilder_image_pipeline` resources usi % terraform import aws_imagebuilder_image_pipeline.example arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_image_recipe.html.markdown b/website/docs/cdktf/python/r/imagebuilder_image_recipe.html.markdown index fefc3f20a324..beb1f80cd716 100644 --- a/website/docs/cdktf/python/r/imagebuilder_image_recipe.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_image_recipe.html.markdown @@ -49,7 +49,7 @@ class MyConvertedCode(TerraformStack): ) ], name="example", - parent_image="arn:${" + current.partition + "}:imagebuilder:${" + data_aws_region_current.name + "}:aws:image/amazon-linux-2-x86/x.x.x", + parent_image="arn:${" + current.partition + "}:imagebuilder:${" + data_aws_region_current.region + "}:aws:image/amazon-linux-2-x86/x.x.x", version="1.0.0" ) ``` @@ -60,11 +60,12 @@ The following arguments are required: * `component` - (Required) Ordered configuration block(s) with components for the image recipe. Detailed below. * `name` - (Required) Name of the image recipe. -* `parent_image` - (Required) The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN or an AMI ID. +* `parent_image` - (Required) The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN, an AMI ID, or an SSM Parameter referencing the AMI. For an SSM Parameter, enter the prefix `ssm:`, followed by the parameter name or ARN. * `version` - (Required) The semantic version of the image recipe, which specifies the version in the following format, with numeric values in each position to indicate a specific version: major.minor.patch. For example: 1.0.0. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `block_device_mapping` - (Optional) Configuration block(s) with block device mappings for the image recipe. Detailed below. * `description` - (Optional) Description of the image recipe. * `systems_manager_agent` - (Optional) Configuration block for the Systems Manager Agent installed by default by Image Builder. Detailed below. @@ -116,6 +117,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image_recipe.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image-recipe/example/1.0.0" + } +} + +resource "aws_imagebuilder_image_recipe" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image recipe. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image_recipe` resources using the Amazon Resource Name (ARN). For example: ```python @@ -139,4 +161,4 @@ Using `terraform import`, import `aws_imagebuilder_image_recipe` resources using % terraform import aws_imagebuilder_image_recipe.example arn:aws:imagebuilder:us-east-1:123456789012:image-recipe/example/1.0.0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_infrastructure_configuration.html.markdown b/website/docs/cdktf/python/r/imagebuilder_infrastructure_configuration.html.markdown index 5f4eb731563b..4e596792b6d2 100644 --- a/website/docs/cdktf/python/r/imagebuilder_infrastructure_configuration.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_infrastructure_configuration.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description for the configuration. * `instance_metadata_options` - (Optional) Configuration block with instance metadata options for the HTTP requests that pipeline builds use to launch EC2 build and test instances. Detailed below. * `instance_types` - (Optional) Set of EC2 Instance Types. @@ -74,6 +75,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `http_put_response_hop_limit` - The number of hops that an instance can traverse to reach its destonation. * `http_tokens` - Whether a signed token is required for instance metadata retrieval requests. Valid values: `required`, `optional`. @@ -91,12 +93,14 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `s3_key_prefix` - (Optional) Prefix to use for S3 logs. Defaults to `/`. ### placement The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Optional) Availability Zone where your build and test instances will launch. * `host_id` - (Optional) ID of the Dedicated Host on which build and test instances run. Conflicts with `host_resource_group_arn`. * `host_resource_group_arn` - (Optional) ARN of the host resource group in which to launch build and test instances. Conflicts with `host_id`. @@ -114,6 +118,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_infrastructure_configuration.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:infrastructure-configuration/example" + } +} + +resource "aws_imagebuilder_infrastructure_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder infrastructure configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_infrastructure_configuration` using the Amazon Resource Name (ARN). For example: ```python @@ -137,4 +162,4 @@ Using `terraform import`, import `aws_imagebuilder_infrastructure_configuration` % terraform import aws_imagebuilder_infrastructure_configuration.example arn:aws:imagebuilder:us-east-1:123456789012:infrastructure-configuration/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_lifecycle_policy.html.markdown b/website/docs/cdktf/python/r/imagebuilder_lifecycle_policy.html.markdown index 630162e1293b..b8cf63c77820 100644 --- a/website/docs/cdktf/python/r/imagebuilder_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_lifecycle_policy.html.markdown @@ -22,11 +22,11 @@ from cdktf import Fn, Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import ImagebuilderLifecyclePolicy from imports.aws.data_aws_partition import DataAwsPartition from imports.aws.data_aws_region import DataAwsRegion from imports.aws.iam_role import IamRole from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +from imports.aws.imagebuilder_lifecycle_policy import ImagebuilderLifecyclePolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -61,27 +61,26 @@ class MyConvertedCode(TerraformStack): description="Example description", execution_role=example.arn, name="name", - policy_detail=[{ - "action": [{ - "type": "DELETE" - } + policy_detail=[ImagebuilderLifecyclePolicyPolicyDetail( + action=[ImagebuilderLifecyclePolicyPolicyDetailAction( + type="DELETE" + ) ], - "filter": [{ - "retain_at_least": 10, - "type": "AGE", - "unit": "YEARS", - "value": 6 - } + filter=[ImagebuilderLifecyclePolicyPolicyDetailFilter( + retain_at_least=10, + type="AGE", + unit="YEARS", + value=6 + ) ] - } + ) ], - resource_selection=[{ - "tag_map": [{ + resource_selection=[ImagebuilderLifecyclePolicyResourceSelection( + tag_map={ "key1": "value1", "key2": "value2" } - ] - } + ) ], resource_type="AMI_IMAGE" ) @@ -101,6 +100,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) description for the lifecycle policy. * `tags` - (Optional) Key-value map of resource tags for the Image Builder Lifecycle Policy. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -113,6 +113,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclusion_rules` - (Optional) Additional rules to specify resources that should be exempt from policy actions. ### action @@ -123,12 +124,14 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `include_resources` - (Optional) Specifies the resources that the lifecycle policy applies to. Detailed below. ### include_resources The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amis` - (Optional) Specifies whether the lifecycle action should apply to distributed AMIs. * `containers` - (Optional) Specifies whether the lifecycle action should apply to distributed containers. * `snapshots` - (Optional) Specifies whether the lifecycle action should apply to snapshots associated with distributed AMIs. @@ -142,6 +145,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `retain_at_least` - (Optional) For age-based filters, this is the number of resources to keep on hand after the lifecycle DELETE action is applied. Impacted resources are only deleted if you have more than this number of resources. If you have fewer resources than this number, the impacted resource is not deleted. * `unit` - (Optional) Defines the unit of time that the lifecycle policy uses to determine impacted resources. This is required for age-based rules. Valid values: `DAYS`, `WEEKS`, `MONTHS` or `YEARS`. @@ -149,6 +153,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amis` - (Optional) Lists configuration values that apply to AMIs that Image Builder should exclude from the lifecycle action. Detailed below. * `tag_map` - (Optional) Contains a list of tags that Image Builder uses to skip lifecycle actions for Image Builder image resources that have them. @@ -156,6 +161,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `is_public` - (Optional) Configures whether public AMIs are excluded from the lifecycle action. * `last_launched` - (Optional) Specifies configuration details for Image Builder to exclude the most recent resources from lifecycle actions. Detailed below. * `regions` - (Optional) Configures AWS Regions that are excluded from the lifecycle action. @@ -173,6 +179,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `recipe` - (Optional) A list of recipe that are used as selection criteria for the output images that the lifecycle policy applies to. Detailed below. * `tag_map` - (Optional) A list of tags that are used as selection criteria for the Image Builder image resources that the lifecycle policy applies to. @@ -194,6 +201,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_lifecycle_policy.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:lifecycle-policy/example" + } +} + +resource "aws_imagebuilder_lifecycle_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder lifecycle policy. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_lifecycle_policy` using the Amazon Resource Name (ARN). For example: ```python @@ -204,7 +232,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import ImagebuilderLifecyclePolicy +from imports.aws.imagebuilder_lifecycle_policy import ImagebuilderLifecyclePolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -217,4 +245,4 @@ Using `terraform import`, import `aws_imagebuilder_lifecycle_policy` using the A % terraform import aws_imagebuilder_lifecycle_policy.example arn:aws:imagebuilder:us-east-1:123456789012:lifecycle-policy/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_workflow.html.markdown b/website/docs/cdktf/python/r/imagebuilder_workflow.html.markdown index b8fd7a441db9..9f2fb0986a09 100644 --- a/website/docs/cdktf/python/r/imagebuilder_workflow.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_workflow.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `change_description` - (Optional) Change description of the workflow. * `data` - (Optional) Inline YAML string with data of the workflow. Exactly one of `data` and `uri` can be specified. * `description` - (Optional) Description of the workflow. @@ -64,7 +65,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 Image Builder Workflow using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_workflow.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:workflow/build/example/1.0.0" + } +} + +resource "aws_imagebuilder_workflow" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder workflow. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 Image Builder Workflow using the `arn`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -81,7 +103,7 @@ class MyConvertedCode(TerraformStack): ImagebuilderWorkflow.generate_config_for_import(self, "example", "workflow-id-12345678") ``` -Using `terraform import`, import EC2 Image Builder Workflow using the `example_id_arg`. For example: +Using `terraform import`, import EC2 Image Builder Workflow using the `arn`. For example: ```console % terraform import aws_imagebuilder_workflow.example arn:aws:imagebuilder:us-east-1:aws:workflow/test/example/1.0.1/1 @@ -89,4 +111,4 @@ Using `terraform import`, import EC2 Image Builder Workflow using the `example_i Certain resource arguments, such as `uri`, cannot be read via the API and imported into Terraform. Terraform will display a difference for these arguments the first run after import if declared in the Terraform configuration for an imported resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector2_delegated_admin_account.html.markdown b/website/docs/cdktf/python/r/inspector2_delegated_admin_account.html.markdown index 1627599b92b0..180813edf3dd 100644 --- a/website/docs/cdktf/python/r/inspector2_delegated_admin_account.html.markdown +++ b/website/docs/cdktf/python/r/inspector2_delegated_admin_account.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) Account to enable as delegated admin account. ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import Inspector Delegated Admin Account using the `ac % terraform import aws_inspector2_delegated_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector2_enabler.html.markdown b/website/docs/cdktf/python/r/inspector2_enabler.html.markdown index 26ac957ac720..5b686d092abd 100644 --- a/website/docs/cdktf/python/r/inspector2_enabler.html.markdown +++ b/website/docs/cdktf/python/r/inspector2_enabler.html.markdown @@ -60,12 +60,13 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_ids` - (Required) Set of account IDs. Can contain one of: the Organization's Administrator Account, or one or more Member Accounts. * `resource_types` - (Required) Type of resources to scan. - Valid values are `EC2`, `ECR`, `LAMBDA` and `LAMBDA_CODE`. + Valid values are `EC2`, `ECR`, `LAMBDA`, `LAMBDA_CODE` and `CODE_REPOSITORY`. At least one item is required. ## Attribute Reference @@ -80,4 +81,29 @@ This resource exports no additional attributes. * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Inspector Enabler using `account_ids` and `region_types` formatted as `[account_id1]:[account_id2]:...-[resource_type1]:[resource_type2]:...`, where `account_ids` are sorted in ascending order and `resource_types` are sorted in alphabetical order. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.inspector2_enabler import Inspector2Enabler +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Inspector2Enabler.generate_config_for_import(self, "example", "123456789012:234567890123-EC2:ECR") +``` + +Using `terraform import`, import Inspector Enabler using using `account_ids` and `region_types` formatted as `[account_id1]:[account_id2]:...-[resource_type1]:[resource_type2]:...`, where `account_ids` are sorted in ascending order and `resource_types` are sorted in alphabetical order. For example: + +```console +% terraform import aws_inspector2_enabler.example 123456789012:234567890123-EC2:ECR +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector2_filter.html.markdown b/website/docs/cdktf/python/r/inspector2_filter.html.markdown index c3ca351b859b..c5d2c9cd0f73 100644 --- a/website/docs/cdktf/python/r/inspector2_filter.html.markdown +++ b/website/docs/cdktf/python/r/inspector2_filter.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description * `reason` - (Optional) Reason for creating the filter * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -68,6 +69,8 @@ This resource exports the following attributes in addition to the arguments abov The `filter_criteria` configuration block supports the following attributes: * `aws_account_id` - (Optional) The AWS account ID in which the finding was generated. [Documented below](#string-filter). +* `code_repository_project_name` - (Optional) The project name in a code repository. [Documented below](#string-filter). +* `code_repository_provider_type` - (Optional) The repository provider type (such as GitHub, GitLab, etc.) [Documented below](#string-filter). * `code_vulnerability_detector_name` - (Optional) The ID of the component. [Documented below](#string-filter). * `code_vulnerability_detector_tags` - (Optional) The ID of the component. [Documented below](#string-filter). * `code_vulnerability_file_path` - (Optional) The ID of the component. [Documented below](#string-filter). @@ -77,6 +80,8 @@ The `filter_criteria` configuration block supports the following attributes: * `ec2_instance_subnet_id` - (Optional) The ID of the subnet. [Documented below](#string-filter). * `ec2_instance_vpc_id` - (Optional) The ID of the VPC. [Documented below](#string-filter). * `ecr_image_architecture` - (Optional) The architecture of the ECR image. [Documented below](#string-filter). +* `ecr_image_in_use_count` - (Optional) The number of the ECR images in use. [Documented below](#number-filter). +* `ecr_image_last_in_use_at` - (Optional) The date range when an ECR image was last used in an ECS cluster task or EKS cluster pod. [Documented below](#date-filter). * `ecr_image_hash` - (Optional) The SHA256 hash of the ECR image. [Documented below](#string-filter). * `ecr_image_pushed_at` - (Optional) The date range when the image was pushed. [Documented below](#date-filter). * `ecr_image_registry` - (Optional) The registry of the ECR image. [Documented below](#string-filter). @@ -178,10 +183,10 @@ class MyConvertedCode(TerraformStack): Inspector2Filter.generate_config_for_import(self, "example", "arn:aws:inspector2:us-east-1:111222333444:owner/111222333444/filter/abcdefgh12345678") ``` -Using `terraform import`, import Inspector Filter using the `example_id_arg`. For example: +Using `terraform import`, import Inspector Filter using the `arn`. For example: ```console % terraform import aws_inspector2_filter.example "arn:aws:inspector2:us-east-1:111222333444:owner/111222333444/filter/abcdefgh12345678" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector2_member_association.html.markdown b/website/docs/cdktf/python/r/inspector2_member_association.html.markdown index cff5f731aa84..56a9c9ea0b58 100644 --- a/website/docs/cdktf/python/r/inspector2_member_association.html.markdown +++ b/website/docs/cdktf/python/r/inspector2_member_association.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) ID of the account to associate ## Attribute Reference @@ -72,4 +73,4 @@ Using `terraform import`, import Amazon Inspector Member Association using the ` % terraform import aws_inspector2_member_association.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector2_organization_configuration.html.markdown b/website/docs/cdktf/python/r/inspector2_organization_configuration.html.markdown index 229bc33c510f..a7e77091dcd7 100644 --- a/website/docs/cdktf/python/r/inspector2_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/inspector2_organization_configuration.html.markdown @@ -34,6 +34,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) Inspector2OrganizationConfiguration(self, "example", auto_enable=Inspector2OrganizationConfigurationAutoEnable( + code_repository=False, ec2=True, ecr=False, lambda_=True, @@ -44,14 +45,16 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_enable` - (Required) Configuration block for auto enabling. See below. ### `auto_enable` * `ec2` - (Required) Whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector organization. * `ecr` - (Required) Whether Amazon ECR scans are automatically enabled for new members of your Amazon Inspector organization. +* `code_repository` - (Optional) Whether code repository scans are automatically enabled for new members of your Amazon Inspector organization. * `lambda` - (Optional) Whether Lambda Function scans are automatically enabled for new members of your Amazon Inspector organization. * `lambda_code` - (Optional) Whether AWS Lambda code scans are automatically enabled for new members of your Amazon Inspector organization. **Note:** Lambda code scanning requires Lambda standard scanning to be activated. Consequently, if you are setting this argument to `true`, you must also set the `lambda` argument to `true`. See [Scanning AWS Lambda functions with Amazon Inspector](https://docs.aws.amazon.com/inspector/latest/user/scanning-lambda.html#lambda-code-scans) for more information. @@ -69,4 +72,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector_assessment_target.html.markdown b/website/docs/cdktf/python/r/inspector_assessment_target.html.markdown index 4ab34fd8e3f8..774f0f8a79f8 100644 --- a/website/docs/cdktf/python/r/inspector_assessment_target.html.markdown +++ b/website/docs/cdktf/python/r/inspector_assessment_target.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the assessment target. * `resource_group_arn` (Optional) Inspector Resource Group Amazon Resource Name (ARN) stating tags for instance matching. If not specified, all EC2 instances in the current AWS account and region are included in the assessment target. @@ -54,6 +55,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_inspector_assessment_target.example + identity = { + "arn" = "arn:aws:inspector:us-west-2:123456789012:target/0-12345678" + } +} + +resource "aws_inspector_assessment_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Inspector assessment target. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Inspector Classic Assessment Targets using their Amazon Resource Name (ARN). For example: ```python @@ -77,4 +99,4 @@ Using `terraform import`, import Inspector Classic Assessment Targets using thei % terraform import aws_inspector_assessment_target.example arn:aws:inspector:us-east-1:123456789012:target/0-xxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector_assessment_template.html.markdown b/website/docs/cdktf/python/r/inspector_assessment_template.html.markdown index 333d5a9144d0..75d5262e6ca8 100644 --- a/website/docs/cdktf/python/r/inspector_assessment_template.html.markdown +++ b/website/docs/cdktf/python/r/inspector_assessment_template.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the assessment template. * `target_arn` - (Required) The assessment target ARN to attach the template to. * `duration` - (Required) The duration of the inspector run. @@ -67,6 +68,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_inspector_assessment_template.example + identity = { + "arn" = "arn:aws:inspector:us-west-2:123456789012:target/0-12345678/template/0-87654321" + } +} + +resource "aws_inspector_assessment_template" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Inspector assessment template. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_inspector_assessment_template` using the template assessment ARN. For example: ```python @@ -90,4 +112,4 @@ Using `terraform import`, import `aws_inspector_assessment_template` using the t % terraform import aws_inspector_assessment_template.example arn:aws:inspector:us-west-2:123456789012:target/0-9IaAzhGR/template/0-WEcjR8CH ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/inspector_resource_group.html.markdown b/website/docs/cdktf/python/r/inspector_resource_group.html.markdown index 0c3aaeebe3a0..41dcf4c140de 100644 --- a/website/docs/cdktf/python/r/inspector_resource_group.html.markdown +++ b/website/docs/cdktf/python/r/inspector_resource_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Required) Key-value map of tags that are used to select the EC2 instances to be included in an [Amazon Inspector assessment target](/docs/providers/aws/r/inspector_assessment_target.html). ## Attribute Reference @@ -46,4 +47,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The resource group ARN. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/instance.html.markdown b/website/docs/cdktf/python/r/instance.html.markdown index 9cca4b555f67..5d8d699dbf64 100644 --- a/website/docs/cdktf/python/r/instance.html.markdown +++ b/website/docs/cdktf/python/r/instance.html.markdown @@ -43,7 +43,7 @@ class MyConvertedCode(TerraformStack): most_recent=True, owners=["099720109477"] ) - Instance(self, "web", + Instance(self, "example", ami=Token.as_string(ubuntu.id), instance_type="t3.micro", tags={ @@ -66,7 +66,7 @@ from imports.aws.instance import Instance class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Instance(self, "web", + Instance(self, "example", ami="resolve:ssm:/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64", instance_type="t3.micro", tags={ @@ -90,7 +90,7 @@ from imports.aws.instance import Instance class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - this_var = DataAwsAmi(self, "this", + example = DataAwsAmi(self, "example", filter=[DataAwsAmiFilter( name="architecture", values=["arm64"] @@ -102,8 +102,8 @@ class MyConvertedCode(TerraformStack): most_recent=True, owners=["amazon"] ) - aws_instance_this = Instance(self, "this_1", - ami=Token.as_string(this_var.id), + aws_instance_example = Instance(self, "example_1", + ami=Token.as_string(example.id), instance_market_options=InstanceInstanceMarketOptions( market_type="spot", spot_options=InstanceInstanceMarketOptionsSpotOptions( @@ -116,7 +116,7 @@ class MyConvertedCode(TerraformStack): } ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. - aws_instance_this.override_logical_id("this") + aws_instance_example.override_logical_id("example") ``` ### Network and credit specification example @@ -150,27 +150,25 @@ class MyConvertedCode(TerraformStack): }, vpc_id=my_vpc.id ) - foo = NetworkInterface(self, "foo", + example = NetworkInterface(self, "example", private_ips=["172.16.10.100"], subnet_id=my_subnet.id, tags={ "Name": "primary_network_interface" } ) - aws_instance_foo = Instance(self, "foo_3", + aws_instance_example = Instance(self, "example_3", ami="ami-005e54dee72cc1d00", credit_specification=InstanceCreditSpecification( cpu_credits="unlimited" ), instance_type="t2.micro", - network_interface=[InstanceNetworkInterface( - device_index=0, - network_interface_id=foo.id + primary_network_interface=InstancePrimaryNetworkInterface( + network_interface_id=example.id ) - ] ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. - aws_instance_foo.override_logical_id("foo") + aws_instance_example.override_logical_id("example") ``` ### CPU options example @@ -273,17 +271,12 @@ Do not use `volume_tags` if you plan to manage block device tags outside the `aw This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ami` - (Optional) AMI to use for the instance. Required unless `launch_template` is specified and the Launch Template specifes an AMI. If an AMI is specified in the Launch Template, setting `ami` will override the AMI specified in the Launch Template. * `associate_public_ip_address` - (Optional) Whether to associate a public IP address with an instance in a VPC. * `availability_zone` - (Optional) AZ to start the instance in. - * `capacity_reservation_specification` - (Optional) Describes an instance's Capacity Reservation targeting option. See [Capacity Reservation Specification](#capacity-reservation-specification) below for more details. - --> **NOTE:** Changing `cpu_core_count` and/or `cpu_threads_per_core` will cause the resource to be destroyed and re-created. - -* `cpu_core_count` - (Optional, **Deprecated** use the `cpu_options` argument instead) Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. * `cpu_options` - (Optional) The CPU options for the instance. See [CPU Options](#cpu-options) below for more details. -* `cpu_threads_per_core` - (Optional - has no effect unless `cpu_core_count` is also set, **Deprecated** use the `cpu_options` argument instead) If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. * `credit_specification` - (Optional) Configuration block for customizing the credit specification of the instance. See [Credit Specification](#credit-specification) below for more details. Terraform will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. * `disable_api_stop` - (Optional) If true, enables [EC2 Instance Stop Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection). * `disable_api_termination` - (Optional) If true, enables [EC2 Instance Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination). @@ -292,6 +285,7 @@ This resource supports the following arguments: * `enable_primary_ipv6` - (Optional) Whether to assign a primary IPv6 Global Unicast Address (GUA) to the instance when launched in a dual-stack or IPv6-only subnet. A primary IPv6 address ensures a consistent IPv6 address for the instance and is automatically assigned by AWS to the ENI. Once enabled, the first IPv6 GUA becomes the primary IPv6 address and cannot be disabled. The primary IPv6 address remains until the instance is terminated or the ENI is detached. Disabling `enable_primary_ipv6` after it has been enabled forces recreation of the instance. * `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. * `ephemeral_block_device` - (Optional) One or more configuration blocks to customize Ephemeral (also known as "Instance Store") volumes on the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. When accessing this as an attribute reference, it is a set of objects. +* `force_destroy` - (Optional) Destroys instance even if `disable_api_termination` or `disable_api_stop` is set to `true`. Defaults to `false`. Once this parameter is set to `true`, a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the instance or destroying the instance, this flag will not work. Additionally when importing an instance, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. * `host_id` - (Optional) ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. @@ -307,9 +301,11 @@ This resource supports the following arguments: * `maintenance_options` - (Optional) Maintenance and recovery options for the instance. See [Maintenance Options](#maintenance-options) below for more details. * `metadata_options` - (Optional) Customize the metadata options of the instance. See [Metadata Options](#metadata-options) below for more details. * `monitoring` - (Optional) If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) -* `network_interface` - (Optional) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. -* `placement_group` - (Optional) Placement Group to start the instance in. +* `network_interface` - (Optional, **Deprecated** to specify the primary network interface, use `primary_network_interface`, to attach additional network interfaces, use `aws_network_interface_attachment` resources) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. +* `placement_group` - (Optional) Placement Group to start the instance in. Conflicts with `placement_group_id`. +* `placement_group_id` - (Optional) Placement Group ID to start the instance in. Conflicts with `placement_group`. * `placement_partition_number` - (Optional) Number of the partition the instance is in. Valid only if [the `aws_placement_group` resource's](placement_group.html) `strategy` argument is set to `"partition"`. +* `primary_network_interface` - (Optional) The primary network interface. See [Primary Network Interface](#primary-network-interface) below. * `private_dns_name_options` - (Optional) Options for the instance hostname. The default values are inherited from the subnet. See [Private DNS Name Options](#private-dns-name-options) below for more details. * `private_ip` - (Optional) Private IP address to associate with the instance in a VPC. * `root_block_device` - (Optional) Configuration block to customize details about the root block device of the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. When accessing this as an attribute reference, it is a list containing one object. @@ -456,7 +452,11 @@ For more information, see the documentation on the [Instance Metadata Service](h ### Network Interfaces -Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use the `aws_network_interface` or `aws_network_interface_attachment` resources instead. +`network_interface` is **deprecated**. +Use `primary_network_interface` to specify the primary network interface. +To attach additional network interfaces, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources. + +Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources instead. The `network_interface` configuration block _does_, however, allow users to supply their own network interface to be used as the default network interface on an EC2 Instance, attached at `eth0`. @@ -467,6 +467,16 @@ Each `network_interface` block supports the following: * `network_card_index` - (Optional) Integer index of the network card. Limited by instance type. The default index is `0`. * `network_interface_id` - (Required) ID of the network interface to attach. +### Primary Network Interface + +Represents the primary network interface on the EC2 Instance. +To manage additional network interfaces, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources. + +Each `primary_network_interface` block supports the following: + +* `delete_on_termination` - (Read-Only) Whether the network interface will be deleted when the instance terminates. +* `network_interface_id` - (Required) ID of the network interface to attach. + ### Private DNS Name Options The `private_dns_name_options` block supports the following: @@ -540,6 +550,32 @@ For `instance_market_options`, in addition to the arguments above, the following ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_instance.example + identity = { + id = "i-12345678" + } +} + +resource "aws_instance" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the instance. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import instances using the `id`. For example: ```python @@ -563,4 +599,4 @@ Using `terraform import`, import instances using the `id`. For example: % terraform import aws_instance.web i-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/internet_gateway.html.markdown b/website/docs/cdktf/python/r/internet_gateway.html.markdown index 33f68b07f931..d5716e09e6a4 100644 --- a/website/docs/cdktf/python/r/internet_gateway.html.markdown +++ b/website/docs/cdktf/python/r/internet_gateway.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Optional) The VPC ID to create in. See the [aws_internet_gateway_attachment](internet_gateway_attachment.html) resource for an alternate way to attach an Internet Gateway to a VPC. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -106,4 +107,4 @@ Using `terraform import`, import Internet Gateways using the `id`. For example: % terraform import aws_internet_gateway.gw igw-c0a643a9 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/internet_gateway_attachment.html.markdown b/website/docs/cdktf/python/r/internet_gateway_attachment.html.markdown index 5931035085c9..0f5dd373862b 100644 --- a/website/docs/cdktf/python/r/internet_gateway_attachment.html.markdown +++ b/website/docs/cdktf/python/r/internet_gateway_attachment.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `internet_gateway_id` - (Required) The ID of the internet gateway. * `vpc_id` - (Required) The ID of the VPC. @@ -87,4 +88,4 @@ Using `terraform import`, import Internet Gateway Attachments using the `id`. Fo % terraform import aws_internet_gateway_attachment.example igw-c0a643a9:vpc-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/internetmonitor_monitor.html.markdown b/website/docs/cdktf/python/r/internetmonitor_monitor.html.markdown index 7097cb39f035..21871117cfa8 100644 --- a/website/docs/cdktf/python/r/internetmonitor_monitor.html.markdown +++ b/website/docs/cdktf/python/r/internetmonitor_monitor.html.markdown @@ -39,6 +39,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `health_events_config` - (Optional) Health event thresholds. A health event threshold percentage, for performance and availability, determines when Internet Monitor creates a health event when there's an internet issue that affects your application end users. See [Health Events Config](#health-events-config) below. * `internet_measurements_log_delivery` - (Optional) Publish internet measurements for Internet Monitor to an Amazon S3 bucket in addition to CloudWatch Logs. * `max_city_networks_to_monitor` - (Optional) The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider (ISP), that clients access the resources through. This limit helps control billing costs. @@ -87,4 +88,4 @@ Using `terraform import`, import Internet Monitor Monitors using the `monitor_na % terraform import aws_internetmonitor_monitor.some some-monitor ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_authorizer.html.markdown b/website/docs/cdktf/python/r/iot_authorizer.html.markdown index 34bc60f9d6e1..f12c38630181 100644 --- a/website/docs/cdktf/python/r/iot_authorizer.html.markdown +++ b/website/docs/cdktf/python/r/iot_authorizer.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizer_function_arn` - (Required) The ARN of the authorizer's Lambda function. * `enable_caching_for_http` - (Optional) Specifies whether the HTTP caching is enabled or not. Default: `false`. * `name` - (Required) The name of the authorizer. @@ -87,4 +88,4 @@ Using `terraform import`, import IOT Authorizers using the name. For example: % terraform import aws_iot_authorizer.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_billing_group.html.markdown b/website/docs/cdktf/python/r/iot_billing_group.html.markdown index 58c827c40872..01558ca78624 100644 --- a/website/docs/cdktf/python/r/iot_billing_group.html.markdown +++ b/website/docs/cdktf/python/r/iot_billing_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Billing Group. * `properties` - (Optional) The Billing Group properties. Defined below. * `tags` - (Optional) Key-value mapping of resource tags @@ -83,4 +84,4 @@ Using `terraform import`, import IoT Billing Groups using the name. For example: % terraform import aws_iot_billing_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_ca_certificate.html.markdown b/website/docs/cdktf/python/r/iot_ca_certificate.html.markdown index 26e834079849..0e07fd303b0f 100644 --- a/website/docs/cdktf/python/r/iot_ca_certificate.html.markdown +++ b/website/docs/cdktf/python/r/iot_ca_certificate.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active` - (Required) Boolean flag to indicate if the certificate should be active for device authentication. * `allow_auto_registration` - (Required) Boolean flag to indicate if the certificate should be active for device regisration. * `ca_certificate_pem` - (Required) PEM encoded CA certificate. @@ -113,4 +114,4 @@ This resource exports the following attributes in addition to the arguments abov * `not_after` - The certificate is not valid after this date. * `not_before` - The certificate is not valid before this date. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_certificate.html.markdown b/website/docs/cdktf/python/r/iot_certificate.html.markdown index 1c89af796051..5fc3b995a3f0 100644 --- a/website/docs/cdktf/python/r/iot_certificate.html.markdown +++ b/website/docs/cdktf/python/r/iot_certificate.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active` - (Required) Boolean flag to indicate if the certificate should be active * `csr` - (Optional) The certificate signing request. Review [CreateCertificateFromCsr](https://docs.aws.amazon.com/iot/latest/apireference/API_CreateCertificateFromCsr.html) @@ -101,4 +102,4 @@ This resource exports the following attributes in addition to the arguments abov * `public_key` - When neither CSR nor certificate is provided, the public key. * `private_key` - When neither CSR nor certificate is provided, the private key. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_domain_configuration.html.markdown b/website/docs/cdktf/python/r/iot_domain_configuration.html.markdown index 4853e83e6a67..4899230d5423 100644 --- a/website/docs/cdktf/python/r/iot_domain_configuration.html.markdown +++ b/website/docs/cdktf/python/r/iot_domain_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_protocol` - (Optional) An enumerated string that specifies the application-layer protocol. Valid values are `SECURE_MQTT`, `MQTT_WSS`, `HTTPS` or `DEFAULT`. * `authentication_type` - (Optional) An enumerated string that specifies the authentication type. Valid values are `CUSTOM_AUTH_X509`, `CUSTOM_AUTH`, `AWS_X509`, `AWS_SIGV4` or `DEFAULT`. * `authorizer_config` - (Optional) An object that specifies the authorization service for a domain. See the [`authorizer_config` Block](#authorizer_config-block) below for details. @@ -97,4 +98,4 @@ Using `terraform import`, import domain configurations using the name. For examp % terraform import aws_iot_domain_configuration.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_event_configurations.html.markdown b/website/docs/cdktf/python/r/iot_event_configurations.html.markdown index 9b82a8924303..61032c874cbe 100644 --- a/website/docs/cdktf/python/r/iot_event_configurations.html.markdown +++ b/website/docs/cdktf/python/r/iot_event_configurations.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `event_configurations` - (Required) Map. The new event configuration values. You can use only these strings as keys: `THING_GROUP_HIERARCHY`, `THING_GROUP_MEMBERSHIP`, `THING_TYPE`, `THING_TYPE_ASSOCIATION`, `THING_GROUP`, `THING`, `POLICY`, `CA_CERTIFICATE`, `JOB_EXECUTION`, `CERTIFICATE`, `JOB`. Use boolean for values of mapping. ## Attribute Reference @@ -80,4 +81,4 @@ Using `terraform import`, import IoT Event Configurations using the AWS Region. % terraform import aws_iot_event_configurations.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_indexing_configuration.html.markdown b/website/docs/cdktf/python/r/iot_indexing_configuration.html.markdown index eedf625dd70c..54a08b6b7111 100644 --- a/website/docs/cdktf/python/r/iot_indexing_configuration.html.markdown +++ b/website/docs/cdktf/python/r/iot_indexing_configuration.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `thing_group_indexing_configuration` - (Optional) Thing group indexing configuration. See below. * `thing_indexing_configuration` - (Optional) Thing indexing configuration. See below. @@ -97,4 +98,4 @@ The `filter` configuration block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_logging_options.html.markdown b/website/docs/cdktf/python/r/iot_logging_options.html.markdown index 810ecef7d22f..3364caf35a5f 100644 --- a/website/docs/cdktf/python/r/iot_logging_options.html.markdown +++ b/website/docs/cdktf/python/r/iot_logging_options.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_log_level` - (Optional) The default logging level. Valid Values: `"DEBUG"`, `"INFO"`, `"ERROR"`, `"WARN"`, `"DISABLED"`. * `disable_all_logs` - (Optional) If `true` all logs are disabled. The default is `false`. * `role_arn` - (Required) The ARN of the role that allows IoT to write to Cloudwatch logs. @@ -44,4 +45,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_policy.html.markdown b/website/docs/cdktf/python/r/iot_policy.html.markdown index 314f024cc80b..9bbe8cb38526 100644 --- a/website/docs/cdktf/python/r/iot_policy.html.markdown +++ b/website/docs/cdktf/python/r/iot_policy.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. Use the [IoT Developer Guide](http://docs.aws.amazon.com/iot/latest/developerguide/iot-policies.html) for more information on IoT Policies. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -93,4 +94,4 @@ Using `terraform import`, import IoT policies using the `name`. For example: % terraform import aws_iot_policy.pubsub PubSubToAnyTopic ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_policy_attachment.html.markdown b/website/docs/cdktf/python/r/iot_policy_attachment.html.markdown index 86523f69abe4..615c0b0fe7ce 100644 --- a/website/docs/cdktf/python/r/iot_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/iot_policy_attachment.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The name of the policy to attach. * `target` - (Required) The identity to which the policy is attached. @@ -64,4 +65,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_provisioning_template.html.markdown b/website/docs/cdktf/python/r/iot_provisioning_template.html.markdown index 344ba7acb976..b6b470c0497a 100644 --- a/website/docs/cdktf/python/r/iot_provisioning_template.html.markdown +++ b/website/docs/cdktf/python/r/iot_provisioning_template.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the fleet provisioning template. * `description` - (Optional) The description of the fleet provisioning template. * `enabled` - (Optional) True to enable the fleet provisioning template, otherwise false. @@ -149,4 +150,4 @@ Using `terraform import`, import IoT fleet provisioning templates using the `nam % terraform import aws_iot_provisioning_template.fleet FleetProvisioningTemplate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_role_alias.html.markdown b/website/docs/cdktf/python/r/iot_role_alias.html.markdown index 61f2a31079fb..6b97f7a9e028 100644 --- a/website/docs/cdktf/python/r/iot_role_alias.html.markdown +++ b/website/docs/cdktf/python/r/iot_role_alias.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias` - (Required) The name of the role alias. * `role_arn` - (Required) The identity of the role to which the alias refers. * `credential_duration` - (Optional) The duration of the credential, in seconds. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 900 seconds (15 minutes) to 43200 seconds (12 hours). @@ -88,4 +89,4 @@ Using `terraform import`, import IOT Role Alias using the alias. For example: % terraform import aws_iot_role_alias.example myalias ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_thing.html.markdown b/website/docs/cdktf/python/r/iot_thing.html.markdown index 0c4b28e260b8..50034085db81 100644 --- a/website/docs/cdktf/python/r/iot_thing.html.markdown +++ b/website/docs/cdktf/python/r/iot_thing.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the thing. * `attributes` - (Optional) Map of attributes of the thing. * `thing_type_name` - (Optional) The thing type name. @@ -75,4 +76,4 @@ Using `terraform import`, import IOT Things using the name. For example: % terraform import aws_iot_thing.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_thing_group.html.markdown b/website/docs/cdktf/python/r/iot_thing_group.html.markdown index d048e000d528..ab2b2a01957b 100644 --- a/website/docs/cdktf/python/r/iot_thing_group.html.markdown +++ b/website/docs/cdktf/python/r/iot_thing_group.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Thing Group. * `parent_group_name` - (Optional) The name of the parent Thing Group. * `properties` - (Optional) The Thing Group properties. Defined below. @@ -98,4 +99,4 @@ Using `terraform import`, import IoT Things Groups using the name. For example: % terraform import aws_iot_thing_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_thing_group_membership.html.markdown b/website/docs/cdktf/python/r/iot_thing_group_membership.html.markdown index 09265c2b5613..801209655000 100644 --- a/website/docs/cdktf/python/r/iot_thing_group_membership.html.markdown +++ b/website/docs/cdktf/python/r/iot_thing_group_membership.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `thing_name` - (Required) The name of the thing to add to a group. * `thing_group_name` - (Required) The name of the group to which you are adding a thing. * `override_dynamic_group` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. @@ -72,4 +73,4 @@ Using `terraform import`, import IoT Thing Group Membership using the thing grou % terraform import aws_iot_thing_group_membership.example thing_group_name/thing_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_thing_principal_attachment.html.markdown b/website/docs/cdktf/python/r/iot_thing_principal_attachment.html.markdown index 4c014a0975f9..15dc6d73123d 100644 --- a/website/docs/cdktf/python/r/iot_thing_principal_attachment.html.markdown +++ b/website/docs/cdktf/python/r/iot_thing_principal_attachment.html.markdown @@ -45,11 +45,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The AWS IoT Certificate ARN or Amazon Cognito Identity ID. * `thing` - (Required) The name of the thing. +* `thing_principal_type` - (Optional) The type of relationship to specify when attaching a principal to a thing. Valid values are `EXCLUSIVE_THING` (the thing will be the only one attached to the principal) or `NON_EXCLUSIVE_THING` (multiple things can be attached to the principal). Defaults to `NON_EXCLUSIVE_THING`. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_thing_type.html.markdown b/website/docs/cdktf/python/r/iot_thing_type.html.markdown index f179ebbc6ab0..233c44f7fbd7 100644 --- a/website/docs/cdktf/python/r/iot_thing_type.html.markdown +++ b/website/docs/cdktf/python/r/iot_thing_type.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces New Resource) The name of the thing type. * `deprecated` - (Optional, Defaults to false) Whether the thing type is deprecated. If true, no new things could be associated with this type. * `properties` - (Optional), Configuration block that can contain the following properties of the thing type: @@ -74,4 +75,4 @@ Using `terraform import`, import IOT Thing Types using the name. For example: % terraform import aws_iot_thing_type.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_topic_rule.html.markdown b/website/docs/cdktf/python/r/iot_topic_rule.html.markdown index dbcdf7b5ba2f..445221db172c 100644 --- a/website/docs/cdktf/python/r/iot_topic_rule.html.markdown +++ b/website/docs/cdktf/python/r/iot_topic_rule.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule. * `description` - (Optional) The description of the rule. * `enabled` - (Required) Specifies whether the rule is enabled. @@ -278,4 +279,4 @@ Using `terraform import`, import IoT Topic Rules using the `name`. For example: % terraform import aws_iot_topic_rule.rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_topic_rule_destination.html.markdown b/website/docs/cdktf/python/r/iot_topic_rule_destination.html.markdown index 3d728ec019c1..cbefdf73a8b1 100644 --- a/website/docs/cdktf/python/r/iot_topic_rule_destination.html.markdown +++ b/website/docs/cdktf/python/r/iot_topic_rule_destination.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether or not to enable the destination. Default: `true`. * `vpc_configuration` - (Required) Configuration of the virtual private cloud (VPC) connection. For more info, see the [AWS documentation](https://docs.aws.amazon.com/iot/latest/developerguide/vpc-rule-action.html). @@ -79,4 +80,4 @@ Using `terraform import`, import IoT topic rule destinations using the `arn`. Fo % terraform import aws_iot_topic_rule_destination.example arn:aws:iot:us-west-2:123456789012:ruledestination/vpc/2ce781c8-68a6-4c52-9c62-63fe489ecc60 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ivs_channel.html.markdown b/website/docs/cdktf/python/r/ivs_channel.html.markdown index 110f99be7b0b..7c6697a75942 100644 --- a/website/docs/cdktf/python/r/ivs_channel.html.markdown +++ b/website/docs/cdktf/python/r/ivs_channel.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorized` - (Optional) If `true`, channel is private (enabled for playback authorization). * `latency_mode` - (Optional) Channel latency mode. Valid values: `NORMAL`, `LOW`. * `name` - (Optional) Channel name. @@ -63,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_channel.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:channel/abcdABCDefgh" + } +} + +resource "aws_ivs_channel" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS channel. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Channel using the ARN. For example: ```python @@ -86,4 +108,4 @@ Using `terraform import`, import IVS (Interactive Video) Channel using the ARN. % terraform import aws_ivs_channel.example arn:aws:ivs:us-west-2:326937407773:channel/0Y1lcs4U7jk5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ivs_playback_key_pair.html.markdown b/website/docs/cdktf/python/r/ivs_playback_key_pair.html.markdown index 813f4c509e4f..e6f28c8e2412 100644 --- a/website/docs/cdktf/python/r/ivs_playback_key_pair.html.markdown +++ b/website/docs/cdktf/python/r/ivs_playback_key_pair.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Playback Key Pair name. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -61,6 +62,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_playback_key_pair.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:playback-key/abcdABCDefgh" + } +} + +resource "aws_ivs_playback_key_pair" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS playback key pair. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Playback Key Pair using the ARN. For example: ```python @@ -84,4 +106,4 @@ Using `terraform import`, import IVS (Interactive Video) Playback Key Pair using % terraform import aws_ivs_playback_key_pair.example arn:aws:ivs:us-west-2:326937407773:playback-key/KDJRJNQhiQzA ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ivs_recording_configuration.html.markdown b/website/docs/cdktf/python/r/ivs_recording_configuration.html.markdown index e8e773315e26..1d84abbc4752 100644 --- a/website/docs/cdktf/python/r/ivs_recording_configuration.html.markdown +++ b/website/docs/cdktf/python/r/ivs_recording_configuration.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Recording Configuration name. * `recording_reconnect_window_seconds` - (Optional) If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -72,6 +73,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_recording_configuration.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:recording-configuration/abcdABCDefgh" + } +} + +resource "aws_ivs_recording_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS recording configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Recording Configuration using the ARN. For example: ```python @@ -95,4 +117,4 @@ Using `terraform import`, import IVS (Interactive Video) Recording Configuration % terraform import aws_ivs_recording_configuration.example arn:aws:ivs:us-west-2:326937407773:recording-configuration/KAk1sHBl2L47 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ivschat_logging_configuration.html.markdown b/website/docs/cdktf/python/r/ivschat_logging_configuration.html.markdown index 7227a8955145..c647dbbf1332 100644 --- a/website/docs/cdktf/python/r/ivschat_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/ivschat_logging_configuration.html.markdown @@ -159,6 +159,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Logging Configuration name. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -181,6 +182,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivschat_logging_configuration.example + identity = { + "arn" = "arn:aws:ivschat:us-west-2:123456789012:logging-configuration/abcdABCDefgh" + } +} + +resource "aws_ivschat_logging_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS Chat logging configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Chat Logging Configuration using the ARN. For example: ```python @@ -204,4 +226,4 @@ Using `terraform import`, import IVS (Interactive Video) Chat Logging Configurat % terraform import aws_ivschat_logging_configuration.example arn:aws:ivschat:us-west-2:326937407773:logging-configuration/MMUQc8wcqZmC ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ivschat_room.html.markdown b/website/docs/cdktf/python/r/ivschat_room.html.markdown index 6868e72efe39..4868cb161721 100644 --- a/website/docs/cdktf/python/r/ivschat_room.html.markdown +++ b/website/docs/cdktf/python/r/ivschat_room.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logging_configuration_identifiers` - (Optional) List of Logging Configuration ARNs to attach to the room. * `maximum_message_length` - (Optional) Maximum number of characters in a single @@ -116,6 +117,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivschat_room.example + identity = { + "arn" = "arn:aws:ivschat:us-west-2:123456789012:room/g1H2I3j4k5L6" + } +} + +resource "aws_ivschat_room" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS Chat room. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Chat Room using the ARN. For example: ```python @@ -139,4 +161,4 @@ Using `terraform import`, import IVS (Interactive Video) Chat Room using the ARN % terraform import aws_ivschat_room.example arn:aws:ivschat:us-west-2:326937407773:room/GoXEXyB4VwHb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kendra_data_source.html.markdown b/website/docs/cdktf/python/r/kendra_data_source.html.markdown index 0f2f57f9dba5..6d836be64d8a 100644 --- a/website/docs/cdktf/python/r/kendra_data_source.html.markdown +++ b/website/docs/cdktf/python/r/kendra_data_source.html.markdown @@ -490,6 +490,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Optional) A block with the configuration information to connect to your Data Source repository. You can't specify the `configuration` block when the `type` parameter is set to `CUSTOM`. [Detailed below](#configuration-block). * `custom_document_enrichment_configuration` - (Optional) A block with the configuration information for altering document metadata and content during the document ingestion process. For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see [Customizing document metadata during the ingestion process](https://docs.aws.amazon.com/kendra/latest/dg/custom-document-enrichment.html). [Detailed below](#custom_document_enrichment_configuration-block). * `description` - (Optional) A description for the Data Source connector. @@ -537,7 +538,7 @@ The `documents_metadata_configuration` configuration block supports the followin The `web_crawler_configuration` configuration block supports the following arguments: * `authentication_configuration` - (Optional) A block with the configuration information required to connect to websites using authentication. You can connect to websites using basic authentication of user name and password. You use a secret in AWS Secrets Manager to store your authentication credentials. You must provide the website host name and port number. For example, the host name of `https://a.example.com/page1.html` is `"a.example.com"` and the port is `443`, the standard port for HTTPS. [Detailed below](#authentication_configuration-block). -* `crawl_depth` - (Optional) Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels – index level (i.e. seed in this example), sections level, and subsections level – and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to `2`. Minimum value of `0`. Maximum value of `10`. +* `crawl_depth` - (Optional) Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels - index level (i.e. seed in this example), sections level, and subsections level - and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to `2`. Minimum value of `0`. Maximum value of `10`. * `max_content_size_per_page_in_mega_bytes` - (Optional) The maximum size (in MB) of a webpage or attachment to crawl. Files larger than this size (in MB) are skipped/not crawled. The default maximum size of a webpage or attachment is set to `50` MB. Minimum value of `1.0e-06`. Maximum value of `50`. * `max_links_per_page` - (Optional) The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage. As a website’s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance. The default maximum links per page is `100`. Minimum value of `1`. Maximum value of `1000`. * `max_urls_per_minute_crawl_rate` - (Optional) The maximum number of URLs crawled per website host per minute. The default maximum number of URLs crawled per website host per minute is `300`. Minimum value of `1`. Maximum value of `300`. @@ -587,9 +588,9 @@ The `seed_url_configuration` configuration block supports the following argument * `seed_urls` - (Required) The list of seed or starting point URLs of the websites you want to crawl. The list can include a maximum of `100` seed URLs. Array Members: Minimum number of `0` items. Maximum number of `100` items. Length Constraints: Minimum length of `1`. Maximum length of `2048`. * `web_crawler_mode` - (Optional) The default mode is set to `HOST_ONLY`. You can choose one of the following modes: - * `HOST_ONLY` – crawl only the website host names. For example, if the seed URL is `"abc.example.com"`, then only URLs with host name `"abc.example.com"` are crawled. - * `SUBDOMAINS` – crawl the website host names with subdomains. For example, if the seed URL is `"abc.example.com"`, then `"a.abc.example.com"` and `"b.abc.example.com"` are also crawled. - * `EVERYTHING` – crawl the website host names with subdomains and other domains that the webpages link to. + * `HOST_ONLY` - crawl only the website host names. For example, if the seed URL is `"abc.example.com"`, then only URLs with host name `"abc.example.com"` are crawled. + * `SUBDOMAINS` - crawl the website host names with subdomains. For example, if the seed URL is `"abc.example.com"`, then `"a.abc.example.com"` and `"b.abc.example.com"` are also crawled. + * `EVERYTHING` - crawl the website host names with subdomains and other domains that the webpages link to. ### site_maps_configuration Block @@ -710,4 +711,4 @@ Using `terraform import`, import Kendra Data Source using the unique identifiers % terraform import aws_kendra_data_source.example 1045d08d-66ef-4882-b3ed-dfb7df183e90/b34dfdf7-1f2b-4704-9581-79e00296845f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kendra_experience.html.markdown b/website/docs/cdktf/python/r/kendra_experience.html.markdown index 857275051704..0013d041dcbb 100644 --- a/website/docs/cdktf/python/r/kendra_experience.html.markdown +++ b/website/docs/cdktf/python/r/kendra_experience.html.markdown @@ -55,6 +55,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource if removed) A description for your Amazon Kendra experience. * `configuration` - (Optional) Configuration information for your Amazon Kendra experience. Terraform will only perform drift detection of its value when present in a configuration. [Detailed below](#configuration). @@ -128,4 +129,4 @@ Using `terraform import`, import Kendra Experience using the unique identifiers % terraform import aws_kendra_experience.example 1045d08d-66ef-4882-b3ed-dfb7df183e90/b34dfdf7-1f2b-4704-9581-79e00296845f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kendra_faq.html.markdown b/website/docs/cdktf/python/r/kendra_faq.html.markdown index e482064fa4b5..e35d0957a4f1 100644 --- a/website/docs/cdktf/python/r/kendra_faq.html.markdown +++ b/website/docs/cdktf/python/r/kendra_faq.html.markdown @@ -110,6 +110,7 @@ The `s3_path` configuration block supports the following arguments: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) The description for a FAQ. * `file_format` - (Optional, Forces new resource) The file format used by the input files for the FAQ. Valid Values are `CSV`, `CSV_WITH_HEADER`, `JSON`. * `language_code` - (Optional, Forces new resource) The code for a language. This shows a supported language for the FAQ document. English is supported by default. For more information on supported languages, including their codes, see [Adding documents in languages other than English](https://docs.aws.amazon.com/kendra/latest/dg/in-adding-languages.html). @@ -160,4 +161,4 @@ Using `terraform import`, import `aws_kendra_faq` using the unique identifiers o % terraform import aws_kendra_faq.example faq-123456780/idx-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kendra_index.html.markdown b/website/docs/cdktf/python/r/kendra_index.html.markdown index ba64cb21f670..75d2ae5e906f 100644 --- a/website/docs/cdktf/python/r/kendra_index.html.markdown +++ b/website/docs/cdktf/python/r/kendra_index.html.markdown @@ -613,6 +613,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_units` - (Optional) A block that sets the number of additional document storage and query capacity units that should be used by the index. [Detailed below](#capacity_units). * `description` - (Optional) The description of the Index. * `document_metadata_configuration_updates` - (Optional) One or more blocks that specify the configuration settings for any metadata applied to the documents in the index. Minimum number of 0 items. Maximum number of 500 items. If specified, you must define all elements, including those that are provided by default. These index fields are documented at [Amazon Kendra Index documentation](https://docs.aws.amazon.com/kendra/latest/dg/hiw-index.html). For an example resource that defines these default index fields, refer to the [default example above](#specifying-the-predefined-elements). For an example resource that appends additional index fields, refer to the [append example above](#appending-additional-elements). All arguments for each block must be specified. Note that blocks cannot be removed since index fields cannot be deleted. This argument is [detailed below](#document_metadata_configuration_updates). @@ -765,4 +766,4 @@ Using `terraform import`, import Amazon Kendra Indexes using its `id`. For examp % terraform import aws_kendra_index.example 12345678-1234-5678-9123-123456789123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kendra_query_suggestions_block_list.html.markdown b/website/docs/cdktf/python/r/kendra_query_suggestions_block_list.html.markdown index 2aae76aee6ab..7496323b2da8 100644 --- a/website/docs/cdktf/python/r/kendra_query_suggestions_block_list.html.markdown +++ b/website/docs/cdktf/python/r/kendra_query_suggestions_block_list.html.markdown @@ -58,6 +58,7 @@ The `source_s3_path` configuration block supports the following arguments: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description for a block list. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block), tags with matching keys will overwrite those defined at the provider-level. @@ -102,4 +103,4 @@ Using `terraform import`, import the `aws_kendra_query_suggestions_block_list` r % terraform import aws_kendra_query_suggestions_block_list.example blocklist-123456780/idx-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kendra_thesaurus.html.markdown b/website/docs/cdktf/python/r/kendra_thesaurus.html.markdown index 238ed3df7523..2a227ad87692 100644 --- a/website/docs/cdktf/python/r/kendra_thesaurus.html.markdown +++ b/website/docs/cdktf/python/r/kendra_thesaurus.html.markdown @@ -56,6 +56,7 @@ The `source_s3_path` configuration block supports the following arguments: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description for a thesaurus. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -101,4 +102,4 @@ Using `terraform import`, import `aws_kendra_thesaurus` using the unique identif % terraform import aws_kendra_thesaurus.example thesaurus-123456780/idx-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/key_pair.html.markdown b/website/docs/cdktf/python/r/key_pair.html.markdown index 9bf43a805e9a..03b518abcd56 100644 --- a/website/docs/cdktf/python/r/key_pair.html.markdown +++ b/website/docs/cdktf/python/r/key_pair.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_name` - (Optional) The name for the key pair. If neither `key_name` nor `key_name_prefix` is provided, Terraform will create a unique key name using the prefix `terraform-`. * `key_name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `key_name`. If neither `key_name` nor `key_name_prefix` is provided, Terraform will create a unique key name using the prefix `terraform-`. * `public_key` - (Required) The public key material. @@ -88,4 +89,4 @@ Using `terraform import`, import Key Pairs using the `key_name`. For example: ~> **NOTE:** The AWS API does not include the public key in the response, so `terraform apply` will attempt to replace the key pair. There is currently no supported workaround for this limitation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/keyspaces_keyspace.html.markdown b/website/docs/cdktf/python/r/keyspaces_keyspace.html.markdown index b12d2ddc4166..e67428116aef 100644 --- a/website/docs/cdktf/python/r/keyspaces_keyspace.html.markdown +++ b/website/docs/cdktf/python/r/keyspaces_keyspace.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the keyspace to be created. * `replication_specification` - (Optional) The replication specification of the keyspace. * `region_list` - (Optional) Replication regions. If `replication_strategy` is `MULTI_REGION`, `region_list` requires the current Region and at least one additional AWS Region where the keyspace is going to be replicated in. @@ -83,4 +84,4 @@ Using `terraform import`, import a keyspace using the `name`. For example: % terraform import aws_keyspaces_keyspace.example my_keyspace ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/keyspaces_table.html.markdown b/website/docs/cdktf/python/r/keyspaces_table.html.markdown index d56a4680c816..e53db91bf22f 100644 --- a/website/docs/cdktf/python/r/keyspaces_table.html.markdown +++ b/website/docs/cdktf/python/r/keyspaces_table.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_specification` - (Optional) Specifies the read/write throughput capacity mode for the table. * `client_side_timestamps` - (Optional) Enables client-side timestamps for the table. By default, the setting is disabled. * `comment` - (Optional) A description of the table. @@ -156,4 +157,4 @@ Using `terraform import`, import a table using the `keyspace_name` and `table_na % terraform import aws_keyspaces_table.example my_keyspace/my_table ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesis_analytics_application.html.markdown b/website/docs/cdktf/python/r/kinesis_analytics_application.html.markdown index 9651856c6330..6c30de264571 100644 --- a/website/docs/cdktf/python/r/kinesis_analytics_application.html.markdown +++ b/website/docs/cdktf/python/r/kinesis_analytics_application.html.markdown @@ -15,6 +15,8 @@ allows processing and analyzing streaming data using standard SQL. For more details, see the [Amazon Kinesis Analytics Documentation][1]. +!> **WARNING:** _This resource is deprecated and will be removed in a future version._ [Effective January 27, 2026](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-to-amazon-managed-service-for-apache-flink-and-amazon-managed-service-for-apache-flink-studio/), AWS will [no longer support](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/discontinuation.html) Amazon Kinesis Data Analytics for SQL. Use the `aws_kinesisanalyticsv2_application` resource instead to manage Amazon Kinesis Data Analytics for Apache Flink applications. AWS provides guidance for migrating from [Amazon Kinesis Data Analytics for SQL Applications to Amazon Managed Service for Apache Flink Studio](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-applications-to-amazon-managed-service-for-apache-flink-studio/) including [examples](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/migrating-to-kda-studio-overview.html). + -> **Note:** To manage Amazon Kinesis Data Analytics for Apache Flink applications, use the [`aws_kinesisanalyticsv2_application`](/docs/providers/aws/r/kinesisanalyticsv2_application.html) resource. ## Example Usage @@ -164,6 +166,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Kinesis Analytics Application. * `code` - (Optional) SQL Code to transform input data, and generate output. * `description` - (Optional) Description of the application. @@ -397,4 +400,4 @@ Using `terraform import`, import Kinesis Analytics Application using ARN. For ex % terraform import aws_kinesis_analytics_application.example arn:aws:kinesisanalytics:us-west-2:1234567890:application/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesis_firehose_delivery_stream.html.markdown b/website/docs/cdktf/python/r/kinesis_firehose_delivery_stream.html.markdown index 96c76b84de77..eda29fafc239 100644 --- a/website/docs/cdktf/python/r/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/cdktf/python/r/kinesis_firehose_delivery_stream.html.markdown @@ -600,7 +600,7 @@ class MyConvertedCode(TerraformStack): iceberg_configuration=KinesisFirehoseDeliveryStreamIcebergConfiguration( buffering_interval=400, buffering_size=10, - catalog_arn="arn:${" + data_aws_partition_current.partition + "}:glue:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:catalog", + catalog_arn="arn:${" + data_aws_partition_current.partition + "}:glue:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:catalog", destination_table_configuration=[KinesisFirehoseDeliveryStreamIcebergConfigurationDestinationTableConfiguration( database_name=test.name, table_name=Token.as_string(aws_glue_catalog_table_test.name) @@ -751,14 +751,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with `aws-waf-logs-`. See [AWS Documentation](https://docs.aws.amazon.com/waf/latest/developerguide/waf-policies.html#waf-policies-logging-config) for more details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `kinesis_source_configuration` - (Optional) The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See [`kinesis_source_configuration` block](#kinesis_source_configuration-block) below for details. * `msk_source_configuration` - (Optional) The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See [`msk_source_configuration` block](#msk_source_configuration-block) below for details. * `server_side_encryption` - (Optional) Encrypt at rest options. See [`server_side_encryption` block](#server_side_encryption-block) below for details. - - **NOTE:** Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream. -* `destination` – (Required) This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, `http_endpoint`, `opensearch`, `opensearchserverless` and `snowflake`. +* `destination` - (Required) This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, `http_endpoint`, `opensearch`, `opensearchserverless` and `snowflake`. * `elasticsearch_configuration` - (Optional) Configuration options when `destination` is `elasticsearch`. See [`elasticsearch_configuration` block](#elasticsearch_configuration-block) below for details. * `extended_s3_configuration` - (Optional, only Required when `destination` is `extended_s3`) Enhanced configuration options for the s3 destination. See [`extended_s3_configuration` block](#extended_s3_configuration-block) below for details. * `http_endpoint_configuration` - (Optional) Configuration options when `destination` is `http_endpoint`. Requires the user to also specify an `s3_configuration` block. See [`http_endpoint_configuration` block](#http_endpoint_configuration-block) below for details. @@ -769,6 +768,8 @@ This resource supports the following arguments: * `snowflake_configuration` - (Optional) Configuration options when `destination` is `snowflake`. See [`snowflake_configuration` block](#snowflake_configuration-block) below for details. * `splunk_configuration` - (Optional) Configuration options when `destination` is `splunk`. See [`splunk_configuration` block](#splunk_configuration-block) below for details. +**NOTE:** Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream. + ### `kinesis_source_configuration` block The `kinesis_source_configuration` configuration block supports the following arguments: @@ -1243,4 +1244,4 @@ Using `terraform import`, import Kinesis Firehose Delivery streams using the str Note: Import does not work for stream destination `s3`. Consider using `extended_s3` since `s3` destination is deprecated. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesis_resource_policy.html.markdown b/website/docs/cdktf/python/r/kinesis_resource_policy.html.markdown index 0124300f3f95..d4f955e16be8 100644 --- a/website/docs/cdktf/python/r/kinesis_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/kinesis_resource_policy.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The policy document. * `resource_arn` - (Required) The Amazon Resource Name (ARN) of the data stream or consumer. @@ -46,6 +47,27 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kinesis_resource_policy.example + identity = { + "arn" = "arn:aws:kinesis:us-east-1:123456789012:stream/example-stream" + } +} + +resource "aws_kinesis_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Kinesis stream. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Kinesis resource policies using the `resource_arn`. For example: ```python @@ -69,4 +91,4 @@ Using `terraform import`, import Kinesis resource policies using the `resource_a % terraform import aws_kinesis_resource_policy.example arn:aws:kinesis:us-west-2:123456789012:stream/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesis_stream.html.markdown b/website/docs/cdktf/python/r/kinesis_stream.html.markdown index bf09b95179ca..df69dd99206c 100644 --- a/website/docs/cdktf/python/r/kinesis_stream.html.markdown +++ b/website/docs/cdktf/python/r/kinesis_stream.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. -* `shard_count` – (Optional) The number of shards that the stream will use. If the `stream_mode` is `PROVISIONED`, this field is required. +* `shard_count` - (Optional) The number of shards that the stream will use. If the `stream_mode` is `PROVISIONED`, this field is required. Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams][2] for more. * `retention_period` - (Optional) Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24. * `shard_level_metrics` - (Optional) A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch][3] for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. @@ -109,4 +110,4 @@ Using `terraform import`, import Kinesis Streams using the `name`. For example: [2]: https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesis_stream_consumer.html.markdown b/website/docs/cdktf/python/r/kinesis_stream_consumer.html.markdown index 7f2d5fa74c01..38649b038a13 100644 --- a/website/docs/cdktf/python/r/kinesis_stream_consumer.html.markdown +++ b/website/docs/cdktf/python/r/kinesis_stream_consumer.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) Name of the stream consumer. -* `stream_arn` – (Required, Forces new resource) Amazon Resource Name (ARN) of the data stream the consumer is registered with. +* `stream_arn` - (Required, Forces new resource) Amazon Resource Name (ARN) of the data stream the consumer is registered with. ## Attribute Reference @@ -85,4 +86,4 @@ Using `terraform import`, import Kinesis Stream Consumers using the Amazon Resou [1]: https://docs.aws.amazon.com/streams/latest/dev/amazon-kinesis-consumers.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesis_video_stream.html.markdown b/website/docs/cdktf/python/r/kinesis_video_stream.html.markdown index e8769c584beb..d81742b21541 100644 --- a/website/docs/cdktf/python/r/kinesis_video_stream.html.markdown +++ b/website/docs/cdktf/python/r/kinesis_video_stream.html.markdown @@ -43,9 +43,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. -* `data_retention_in_hours` – (Optional) The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. The default value is `0`, indicating that the stream does not persist data. +* `data_retention_in_hours` - (Optional) The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. The default value is `0`, indicating that the stream does not persist data. * `device_name` - (Optional) The name of the device that is writing to the stream. **In the current implementation, Kinesis Video Streams does not use this name.** * `kms_key_id` - (Optional) The ID of the AWS Key Management Service (AWS KMS) key that you want Kinesis Video Streams to use to encrypt stream data. If no key ID is specified, the default, Kinesis Video-managed key (`aws/kinesisvideo`) is used. * `media_type` - (Optional) The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see [Media Types][2]. If you choose to specify the MediaType, see [Naming Requirements][3] for guidelines. @@ -98,4 +99,4 @@ Using `terraform import`, import Kinesis Streams using the `arn`. For example: [2]: http://www.iana.org/assignments/media-types/media-types.xhtml [3]: https://tools.ietf.org/html/rfc6838#section-4.2 - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown b/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown index f8f1b5bd8e83..1ed1105e9d2b 100644 --- a/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown +++ b/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown @@ -276,6 +276,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the application. * `runtime_environment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`, `FLINK-1_19`. * `service_execution_role` - (Required) The ARN of the [IAM role](/docs/providers/aws/r/iam_role.html) used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. @@ -538,4 +539,4 @@ Using `terraform import`, import `aws_kinesisanalyticsv2_application` using the % terraform import aws_kinesisanalyticsv2_application.example arn:aws:kinesisanalytics:us-west-2:123456789012:application/example-sql-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesisanalyticsv2_application_snapshot.html.markdown b/website/docs/cdktf/python/r/kinesisanalyticsv2_application_snapshot.html.markdown index e773df0d84d4..32b52daabf83 100644 --- a/website/docs/cdktf/python/r/kinesisanalyticsv2_application_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/kinesisanalyticsv2_application_snapshot.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_name` - (Required) The name of an existing [Kinesis Analytics v2 Application](/docs/providers/aws/r/kinesisanalyticsv2_application.html). Note that the application must be running for a snapshot to be created. * `snapshot_name` - (Required) The name of the application snapshot. @@ -80,4 +81,4 @@ Using `terraform import`, import `aws_kinesisanalyticsv2_application` using `app % terraform import aws_kinesisanalyticsv2_application_snapshot.example example-application/example-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_alias.html.markdown b/website/docs/cdktf/python/r/kms_alias.html.markdown index 72c7f863e8d3..43f82137c4f1 100644 --- a/website/docs/cdktf/python/r/kms_alias.html.markdown +++ b/website/docs/cdktf/python/r/kms_alias.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) * `name_prefix` - (Optional) Creates an unique alias beginning with the specified prefix. The name must start with the word "alias" followed by a forward slash (alias/). Conflicts with `name`. @@ -56,6 +57,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kms_alias.example + identity = { + name = "alias/my-key-alias" + } +} + +resource "aws_kms_alias" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the KMS key alias. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import KMS aliases using the `name`. For example: ```python @@ -79,4 +106,4 @@ Using `terraform import`, import KMS aliases using the `name`. For example: % terraform import aws_kms_alias.a alias/my-key-alias ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_ciphertext.html.markdown b/website/docs/cdktf/python/r/kms_ciphertext.html.markdown index 627cff6d6029..aa9ba0522286 100644 --- a/website/docs/cdktf/python/r/kms_ciphertext.html.markdown +++ b/website/docs/cdktf/python/r/kms_ciphertext.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `plaintext` - (Required) Data to be encrypted. Note that this may show up in logs, and it will be stored in the state file. * `key_id` - (Required) Globally unique key ID for the customer master key. * `context` - (Optional) An optional mapping that makes up the encryption context. @@ -57,4 +58,4 @@ This resource exports the following attributes in addition to the arguments abov * `ciphertext_blob` - Base64 encoded ciphertext - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_custom_key_store.html.markdown b/website/docs/cdktf/python/r/kms_custom_key_store.html.markdown index 501049d7a85a..a0f90e2dc224 100644 --- a/website/docs/cdktf/python/r/kms_custom_key_store.html.markdown +++ b/website/docs/cdktf/python/r/kms_custom_key_store.html.markdown @@ -99,6 +99,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `custom_key_store_type` - (Optional, ForceNew) Specifies the type of key store to create. Valid values are `AWS_CLOUDHSM` and `EXTERNAL_KEY_STORE`. If omitted, AWS will default the value to `AWS_CLOUDHSM`. If `custom_key_store_type` is `AWS_CLOUDHSM`, the following optional arguments must be set: @@ -159,4 +160,4 @@ Using `terraform import`, import KMS (Key Management) Custom Key Store using the % terraform import aws_kms_custom_key_store.example cks-5ebd4ef395a96288e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_external_key.html.markdown b/website/docs/cdktf/python/r/kms_external_key.html.markdown index edbe83a95236..f524a97fd055 100644 --- a/website/docs/cdktf/python/r/kms_external_key.html.markdown +++ b/website/docs/cdktf/python/r/kms_external_key.html.markdown @@ -43,8 +43,11 @@ This resource supports the following arguments: * `description` - (Optional) Description of the key. * `enabled` - (Optional) Specifies whether the key is enabled. Keys pending import can only be `false`. Imported keys default to `true` unless expired. * `key_material_base64` - (Optional) Base64 encoded 256-bit symmetric encryption key material to import. The CMK is permanently associated with this key material. The same key material can be reimported, but you cannot import different key material. +* `key_spec` - (Optional) Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports. Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_224`, `HMAC_256`, `HMAC_384`, `HMAC_512`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, `ECC_SECG_P256K1`, `ML_DSA_44`, `ML_DSA_65`, `ML_DSA_87`, or `SM2` (China Regions only). Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). +* `key_usage` - (Optional) Specifies the intended use of the key. Valid values: `ENCRYPT_DECRYPT`, `SIGN_VERIFY`, or `GENERATE_VERIFY_MAC`. Defaults to `ENCRYPT_DECRYPT`. * `multi_region` - (Optional) Indicates whether the KMS key is a multi-Region (`true`) or regional (`false`) key. Defaults to `false`. * `policy` - (Optional) A key policy JSON document. If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A key-value map of tags to assign to the key. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `valid_to` - (Optional) Time at which the imported key material expires. When the key material expires, AWS KMS deletes the key material and the CMK becomes unusable. If not specified, key material does not expire. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -56,7 +59,6 @@ This resource exports the following attributes in addition to the arguments abov * `expiration_model` - Whether the key material expires. Empty when pending key material import, otherwise `KEY_MATERIAL_EXPIRES` or `KEY_MATERIAL_DOES_NOT_EXPIRE`. * `id` - The unique identifier for the key. * `key_state` - The state of the CMK. -* `key_usage` - The cryptographic operations for which you can use the CMK. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -84,4 +86,4 @@ Using `terraform import`, import KMS External Keys using the `id`. For example: % terraform import aws_kms_external_key.a arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_grant.html.markdown b/website/docs/cdktf/python/r/kms_grant.html.markdown index 8120069b4a2d..023c19a5031d 100644 --- a/website/docs/cdktf/python/r/kms_grant.html.markdown +++ b/website/docs/cdktf/python/r/kms_grant.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resources) A friendly name for identifying the grant. * `key_id` - (Required, Forces new resources) The unique identifier for the customer master key (CMK) that the grant applies to. Specify the key ID or the Amazon Resource Name (ARN) of the CMK. To specify a CMK in a different AWS account, you must use the key ARN. * `grantee_principal` - (Required, Forces new resources) The principal that is given permission to perform the operations that the grant permits in ARN format. Note that due to eventual consistency issues around IAM principals, terraform's state may not always be refreshed to reflect what is true in AWS. @@ -118,4 +119,4 @@ Using `terraform import`, import KMS Grants using the Key ID and Grant ID separa % terraform import aws_kms_grant.test 1234abcd-12ab-34cd-56ef-1234567890ab:abcde1237f76e4ba7987489ac329fbfba6ad343d6f7075dbd1ef191f0120514 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_key.html.markdown b/website/docs/cdktf/python/r/kms_key.html.markdown index 1b7e5291975c..313bf83d9be0 100644 --- a/website/docs/cdktf/python/r/kms_key.html.markdown +++ b/website/docs/cdktf/python/r/kms_key.html.markdown @@ -291,12 +291,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the key as viewed in AWS console. * `key_usage` - (Optional) Specifies the intended use of the key. Valid values: `ENCRYPT_DECRYPT`, `SIGN_VERIFY`, or `GENERATE_VERIFY_MAC`. Defaults to `ENCRYPT_DECRYPT`. * `custom_key_store_id` - (Optional) ID of the KMS [Custom Key Store](https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html) where the key will be stored instead of KMS (eg CloudHSM). * `customer_master_key_spec` - (Optional) Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports. -Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_256`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, or `ECC_SECG_P256K1`. Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). +Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_224`, `HMAC_256`, `HMAC_384`, `HMAC_512`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, `ECC_SECG_P256K1`, `ML_DSA_44`, `ML_DSA_65`, `ML_DSA_87`, or `SM2` (China Regions only). Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). * `policy` - (Optional) A valid policy JSON document. Although this is a key policy, not an IAM policy, an [`aws_iam_policy_document`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document), in the form that designates a principal, can be used. For more information about building policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). ~> **NOTE:** Note: All KMS keys must have a key policy. If a key policy is not specified, AWS gives the KMS key a [default key policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) that gives all principals in the owning account unlimited access to all KMS operations for the key. This default key policy effectively delegates all access control to IAM policies and KMS grants. @@ -333,6 +334,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kms_key.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_kms_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the KMS key. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import KMS Keys using the `id`. For example: ```python @@ -356,4 +383,4 @@ Using `terraform import`, import KMS Keys using the `id`. For example: % terraform import aws_kms_key.a 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_key_policy.html.markdown b/website/docs/cdktf/python/r/kms_key_policy.html.markdown index 06fb6a455986..182c7034042e 100644 --- a/website/docs/cdktf/python/r/kms_key_policy.html.markdown +++ b/website/docs/cdktf/python/r/kms_key_policy.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_id` - (Required) The ID of the KMS Key to attach the policy. * `policy` - (Required) A valid policy JSON document. Although this is a key policy, not an IAM policy, an [`aws_iam_policy_document`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document), in the form that designates a principal, can be used. For more information about building policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -94,4 +95,4 @@ Using `terraform import`, import KMS Key Policies using the `key_id`. For exampl % terraform import aws_kms_key_policy.a 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_replica_external_key.html.markdown b/website/docs/cdktf/python/r/kms_replica_external_key.html.markdown index 2ad19e9c7f91..9c85dd1663e2 100644 --- a/website/docs/cdktf/python/r/kms_replica_external_key.html.markdown +++ b/website/docs/cdktf/python/r/kms_replica_external_key.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypass_policy_lockout_safety_check` - (Optional) A flag to indicate whether to bypass the key policy lockout safety check. Setting this value to true increases the risk that the KMS key becomes unmanageable. Do not set this value to true indiscriminately. For more information, refer to the scenario in the [Default Key Policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the _AWS Key Management Service Developer Guide_. @@ -107,4 +108,4 @@ Using `terraform import`, import KMS multi-Region replica keys using the `id`. F % terraform import aws_kms_replica_external_key.example 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kms_replica_key.html.markdown b/website/docs/cdktf/python/r/kms_replica_key.html.markdown index 66d54564279c..489d8da84736 100644 --- a/website/docs/cdktf/python/r/kms_replica_key.html.markdown +++ b/website/docs/cdktf/python/r/kms_replica_key.html.markdown @@ -14,6 +14,8 @@ Manages a KMS multi-Region replica key. ## Example Usage +### Terraform AWS Provider v5 (and below) + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -48,10 +50,43 @@ class MyConvertedCode(TerraformStack): ) ``` +### Terraform AWS Provider v6 (and above) + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.kms_key import KmsKey +from imports.aws.kms_replica_key import KmsReplicaKey +from imports.aws.provider import AwsProvider +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="us-west-2" + ) + primary = KmsKey(self, "primary", + deletion_window_in_days=30, + description="Multi-Region primary key", + multi_region=True, + region="us-east-1" + ) + KmsReplicaKey(self, "replica", + deletion_window_in_days=7, + description="Multi-Region replica key", + primary_key_arn=primary.arn + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypass_policy_lockout_safety_check` - (Optional) A flag to indicate whether to bypass the key policy lockout safety check. Setting this value to true increases the risk that the KMS key becomes unmanageable. Do not set this value to true indiscriminately. For more information, refer to the scenario in the [Default Key Policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the _AWS Key Management Service Developer Guide_. @@ -101,4 +136,4 @@ Using `terraform import`, import KMS multi-Region replica keys using the `id`. F % terraform import aws_kms_replica_key.example 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_data_cells_filter.html.markdown b/website/docs/cdktf/python/r/lakeformation_data_cells_filter.html.markdown index 0980a1aceaab..ef9fc9253af4 100644 --- a/website/docs/cdktf/python/r/lakeformation_data_cells_filter.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_data_cells_filter.html.markdown @@ -45,8 +45,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `table_data` - (Required) Information about the data cells filter. See [Table Data](#table-data) below for details. ### Table Data @@ -83,7 +84,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation Data Cells Filter using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation Data Cells Filter using the `database_name`, `name`, `table_catalog_id`, and `table_name` separated by `,`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -100,10 +101,10 @@ class MyConvertedCode(TerraformStack): LakeformationDataCellsFilter.generate_config_for_import(self, "example", "database_name,name,table_catalog_id,table_name") ``` -Using `terraform import`, import Lake Formation Data Cells Filter using the `id`. For example: +Using `terraform import`, import Lake Formation Data Cells Filter using the `database_name`, `name`, `table_catalog_id`, and `table_name` separated by `,`. For example: ```console % terraform import aws_lakeformation_data_cells_filter.example database_name,name,table_catalog_id,table_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown index 936d660f155a..f273430becd2 100644 --- a/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown @@ -125,17 +125,18 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: -* `admins` – (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `admins` - (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles). * `allow_external_data_filtering` - (Optional) Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `allow_full_table_external_data_access` - (Optional) Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. * `authorized_session_tag_value_list` - (Optional) Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, the account ID. * `create_database_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create database permissions. Detailed below. * `create_table_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. * `external_data_filtering_allow_list` - (Optional) A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `parameters` - Key-value map of additional configuration. Valid values for the `CROSS_ACCOUNT_VERSION` key are `"1"`, `"2"`, `"3"`, or `"4"`. `SET_CONTEXT` is also returned with a value of `TRUE`. In a fresh account, prior to configuring, `CROSS_ACCOUNT_VERSION` is `"1"`. Destroying this resource sets the `CROSS_ACCOUNT_VERSION` to `"1"`. -* `read_only_admins` – (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. -* `trusted_resource_owners` – (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). +* `read_only_admins` - (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. +* `trusted_resource_owners` - (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). ~> **NOTE:** Although optional, not including `admins`, `create_database_default_permissions`, `create_table_default_permissions`, `parameters`, and/or `trusted_resource_owners` results in the setting being cleared. @@ -143,6 +144,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `permissions` - (Optional) List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, and `CREATE_TABLE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. @@ -150,6 +152,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `permissions` - (Optional) List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. @@ -157,4 +160,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_lf_tag.html.markdown b/website/docs/cdktf/python/r/lakeformation_lf_tag.html.markdown index 6240f78472aa..4cf207a0fd40 100644 --- a/website/docs/cdktf/python/r/lakeformation_lf_tag.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_lf_tag.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) ID of the Data Catalog to create the tag in. If omitted, this defaults to the AWS Account ID. * `key` - (Required) Key-name for the tag. * `values` - (Required) List of possible values an attribute can take. @@ -71,4 +72,4 @@ Using `terraform import`, import Lake Formation LF-Tags using the `catalog_id:ke % terraform import aws_lakeformation_lf_tag.example 123456789012:some_key ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_lf_tag_expression.html.markdown b/website/docs/cdktf/python/r/lakeformation_lf_tag_expression.html.markdown new file mode 100644 index 000000000000..e267fffd1852 --- /dev/null +++ b/website/docs/cdktf/python/r/lakeformation_lf_tag_expression.html.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "Lake Formation" +layout: "aws" +page_title: "AWS: aws_lakeformation_lf_tag_expression" +description: |- + Terraform resource for managing an AWS Lake Formation LF Tag Expression. +--- + + +# Resource: aws_lakeformation_lf_tag_expression + +Terraform resource for managing an AWS Lake Formation LF Tag Expression. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import LakeformationLfTagExpression +from imports.aws.lakeformation_lf_tag import LakeformationLfTag +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = LakeformationLfTag(self, "example", + key="example", + values=["value"] + ) + aws_lakeformation_lf_tag_expression_example = + LakeformationLfTagExpression(self, "example_1", + expression=[{ + "tag_key": example.key, + "tag_values": example.values + } + ], + name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lakeformation_lf_tag_expression_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the LF-Tag Expression. +* `expression` - (Required) A list of LF-Tag conditions (key-value pairs). See [expression](#expression) for more details. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) ID of the Data Catalog. Defaults to the account ID if not specified. +* `description` - (Optional) Description of the LF-Tag Expression. + +### expression + +* `tag_key` - (Required) The key-name for the LF-Tag. +* `tag_values` - (Required) A list of possible values for the LF-Tag + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation LF Tag Expression using the `name,catalog_id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import LakeformationLfTagExpression +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LakeformationLfTagExpression.generate_config_for_import(self, "example", "example-tag-expression,123456789012") +``` + +Using `terraform import`, import Lake Formation LF Tag Expression using the `name,catalog_id`. For example: + +```console +% terraform import aws_lakeformation_lf_tag_expression.example example-tag-expression,123456789012 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_opt_in.html.markdown b/website/docs/cdktf/python/r/lakeformation_opt_in.html.markdown index d265636818dd..f013e6dc04f9 100644 --- a/website/docs/cdktf/python/r/lakeformation_opt_in.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_opt_in.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) Lake Formation principal. Supported principals are IAM users or IAM roles. See [Principal](#principal) for more details. * `resource_data` - (Required) Structure for the resource. See [Resource](#resource) for more details. @@ -118,4 +119,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_permissions.html.markdown b/website/docs/cdktf/python/r/lakeformation_permissions.html.markdown index 8ac2b724eb30..90e9dd374e08 100644 --- a/website/docs/cdktf/python/r/lakeformation_permissions.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_permissions.html.markdown @@ -120,6 +120,7 @@ The resulting permissions depend on whether the table had `IAMAllowedPrincipals` AllIAMPrincipals is a pseudo-entity group that acts like a Lake Formation principal. The group includes all IAMs in the account that is defined. +```terraform resource "aws_lakeformation_permissions" "example" { permissions = ["SELECT"] principal = "123456789012:IAMPrincipals" @@ -130,6 +131,7 @@ resource "aws_lakeformation_permissions" "example" { column_names = ["event"] } } +``` ## Using Lake Formation Permissions @@ -223,8 +225,8 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `permissions` – (Required) List of permissions granted to the principal. Valid values may include `ALL`, `ALTER`, `ASSOCIATE`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). -* `principal` – (Required) Principal to be granted the permissions on the resource. Supported principals include `IAM_ALLOWED_PRINCIPALS` (see [Default Behavior and `IAMAllowedPrincipals`](#default-behavior-and-iamallowedprincipals) above), IAM roles, users, groups, Federated Users, SAML groups and users, QuickSight groups, OUs, and organizations as well as AWS account IDs for cross-account permissions. For more information, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `permissions` - (Required) List of permissions granted to the principal. Valid values may include `ALL`, `ALTER`, `ASSOCIATE`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `principal` - (Required) Principal to be granted the permissions on the resource. Supported principals include `IAM_ALLOWED_PRINCIPALS` (see [Default Behavior and `IAMAllowedPrincipals`](#default-behavior-and-iamallowedprincipals) above), IAM roles, users, groups, Federated Users, SAML groups and users, QuickSight groups, OUs, and organizations as well as AWS account IDs for cross-account permissions. For more information, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). ~> **NOTE:** We highly recommend that the `principal` _NOT_ be a Lake Formation administrator (granted using `aws_lakeformation_data_lake_settings`). The entity (e.g., IAM role) running Terraform will most likely need to be a Lake Formation administrator. As such, the entity will have implicit permissions and does not need permissions granted through this resource. @@ -241,7 +243,8 @@ One of the following is required: The following arguments are optional: -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. * `permissions_with_grant_option` - (Optional) Subset of `permissions` which the principal can pass. ### data_cells_filter @@ -255,7 +258,7 @@ The following arguments are optional: The following argument is required: -* `arn` – (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. +* `arn` - (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. The following argument is optional: @@ -265,7 +268,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -275,7 +278,7 @@ The following argument is optional: The following arguments are required: -* `key` – (Required) The key-name for the tag. +* `key` - (Required) The key-name for the tag. * `values` - (Required) A list of possible values an attribute can take. The following argument is optional: @@ -286,7 +289,7 @@ The following argument is optional: The following arguments are required: -* `resource_type` – (Required) The resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. +* `resource_type` - (Required) The resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. * `expression` - (Required) A list of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See [`expression`](#expression) below. The following argument is optional: @@ -295,19 +298,20 @@ The following argument is optional: #### expression -* `key` – (Required) The key-name of an LF-Tag. +* `key` - (Required) The key-name of an LF-Tag. * `values` - (Required) A list of possible values of an LF-Tag. ### table The following argument is required: -* `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `database_name` - (Required) Name of the database for the table. Unique to a Data Catalog. * `name` - (Required, at least one of `name` or `wildcard`) Name of the table. * `wildcard` - (Required, at least one of `name` or `wildcard`) Whether to use a wildcard representing every table under a database. Defaults to `false`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. ### table_with_columns @@ -315,12 +319,13 @@ The following arguments are optional: The following arguments are required: * `column_names` - (Required, at least one of `column_names` or `wildcard`) Set of column names for the table. -* `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `database_name` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. * `wildcard` - (Required, at least one of `column_names` or `wildcard`) Whether to use a column wildcard. If `excluded_column_names` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `excluded_column_names` - (Optional) Set of column names for the table to exclude. If `excluded_column_names` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. @@ -328,4 +333,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + diff --git a/website/docs/cdktf/python/r/lakeformation_resource.html.markdown b/website/docs/cdktf/python/r/lakeformation_resource.html.markdown index b9d3f010b7be..8835cf424322 100644 --- a/website/docs/cdktf/python/r/lakeformation_resource.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_resource.html.markdown @@ -46,13 +46,16 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `arn` – (Required) Amazon Resource Name (ARN) of the resource. +* `arn` - (Required) Amazon Resource Name (ARN) of the resource. The following arguments are optional: -* `role_arn` – (Optional) Role that has read/write access to the resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `role_arn` - (Optional) Role that has read/write access to the resource. * `use_service_linked_role` - (Optional) Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. * `hybrid_access_enabled` - (Optional) Flag to enable AWS LakeFormation hybrid access permission mode. +* `with_federation`- (Optional) Whether or not the resource is a federated resource. Set to true when registering AWS Glue connections for federated catalog functionality. +* `with_privileged_access` - (Optional) Boolean to grant the calling principal the permissions to perform all supported Lake Formation operations on the registered data location. ~> **NOTE:** AWS does not support registering an S3 location with an IAM role and subsequently updating the S3 location registration to a service-linked role. @@ -62,4 +65,4 @@ This resource exports the following attributes in addition to the arguments abov * `last_modified` - Date and time the resource was last modified in [RFC 3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_resource_lf_tag.html.markdown b/website/docs/cdktf/python/r/lakeformation_resource_lf_tag.html.markdown index 4051c10258d7..61e54efabbda 100644 --- a/website/docs/cdktf/python/r/lakeformation_resource_lf_tag.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_resource_lf_tag.html.markdown @@ -44,7 +44,7 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `lf_tag` – (Required) Set of LF-tags to attach to the resource. See [LF Tag](#lf-tag) for more details. +* `lf_tag` - (Required) Set of LF-tags to attach to the resource. See [LF Tag](#lf-tag) for more details. Exactly one of the following is required: @@ -54,13 +54,14 @@ Exactly one of the following is required: The following arguments are optional: -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. ### LF Tag The following arguments are required: -* `key` – (Required) Key name for an existing LF-tag. +* `key` - (Required) Key name for an existing LF-tag. * `value` - (Required) Value from the possible values for the LF-tag. The following argument is optional: @@ -71,7 +72,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -81,12 +82,13 @@ The following argument is optional: The following argument is required: -* `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `database_name` - (Required) Name of the database for the table. Unique to a Data Catalog. * `name` - (Required, at least one of `name` or `wildcard`) Name of the table. * `wildcard` - (Required, at least one of `name` or `wildcard`) Whether to use a wildcard representing every table under a database. Defaults to `false`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. ### Table With Columns @@ -94,11 +96,12 @@ The following arguments are optional: The following arguments are required: * `column_names` - (Required, at least one of `column_names` or `wildcard`) Set of column names for the table. -* `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `database_name` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `column_wildcard` - (Optional) Option to add column wildcard. See [Column Wildcard](#column-wildcard) for more details. @@ -121,4 +124,4 @@ This resource exports no additional attributes. You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_resource_lf_tags.html.markdown b/website/docs/cdktf/python/r/lakeformation_resource_lf_tags.html.markdown index 2e3e239a3f8c..14118acdfcef 100644 --- a/website/docs/cdktf/python/r/lakeformation_resource_lf_tags.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_resource_lf_tags.html.markdown @@ -96,7 +96,7 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `lf_tag` – (Required) Set of LF-tags to attach to the resource. See below. +* `lf_tag` - (Required) Set of LF-tags to attach to the resource. See below. Exactly one of the following is required: @@ -106,13 +106,14 @@ Exactly one of the following is required: The following arguments are optional: -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. ### lf_tag The following arguments are required: -* `key` – (Required) Key name for an existing LF-tag. +* `key` - (Required) Key name for an existing LF-tag. * `value` - (Required) Value from the possible values for the LF-tag. The following argument is optional: @@ -123,7 +124,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -133,12 +134,13 @@ The following argument is optional: The following argument is required: -* `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `database_name` - (Required) Name of the database for the table. Unique to a Data Catalog. * `name` - (Required, at least one of `name` or `wildcard`) Name of the table. * `wildcard` - (Required, at least one of `name` or `wildcard`) Whether to use a wildcard representing every table under a database. Defaults to `false`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. ### table_with_columns @@ -146,12 +148,13 @@ The following arguments are optional: The following arguments are required: * `column_names` - (Required, at least one of `column_names` or `wildcard`) Set of column names for the table. -* `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `database_name` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. * `wildcard` - (Required, at least one of `column_names` or `wildcard`) Whether to use a column wildcard. If `excluded_column_names` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `excluded_column_names` - (Optional) Set of column names for the table to exclude. If `excluded_column_names` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. @@ -159,4 +162,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_alias.html.markdown b/website/docs/cdktf/python/r/lambda_alias.html.markdown index 48f9dca8ccb9..1bbbc5de260a 100644 --- a/website/docs/cdktf/python/r/lambda_alias.html.markdown +++ b/website/docs/cdktf/python/r/lambda_alias.html.markdown @@ -3,24 +3,25 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_alias" description: |- - Creates a Lambda function alias. + Manages an AWS Lambda Alias. --- # Resource: aws_lambda_alias -Creates a Lambda function alias. Creates an alias that points to the specified Lambda function version. +Manages an AWS Lambda Alias. Use this resource to create an alias that points to a specific Lambda function version for traffic management and deployment strategies. -For information about Lambda and how to use it, see [What is AWS Lambda?][1] -For information about function aliases, see [CreateAlias][2] and [AliasRoutingConfiguration][3] in the API docs. +For information about Lambda and how to use it, see [What is AWS Lambda?](http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). For information about function aliases, see [CreateAlias](http://docs.aws.amazon.com/lambda/latest/dg/API_CreateAlias.html) and [AliasRoutingConfiguration](https://docs.aws.amazon.com/lambda/latest/dg/API_AliasRoutingConfiguration.html) in the API docs. ## Example Usage +### Basic Alias + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -29,43 +30,115 @@ from imports.aws.lambda_alias import LambdaAlias class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaAlias(self, "test_lambda_alias", - description="a sample description", - function_name=lambda_function_test.arn, + LambdaAlias(self, "example", + description="Production environment alias", + function_name=Token.as_string(aws_lambda_function_example.arn), function_version="1", - name="my_alias", + name="production" + ) +``` + +### Alias with Traffic Splitting + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_alias import LambdaAlias +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaAlias(self, "example", + description="Staging environment with traffic splitting", + function_name=Token.as_string(aws_lambda_function_example.function_name), + function_version="2", + name="staging", + routing_config=LambdaAliasRoutingConfig( + additional_version_weights={ + "1": 0.1, + "3": 0.2 + } + ) + ) +``` + +### Blue-Green Deployment Alias + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_alias import LambdaAlias +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaAlias(self, "example", + description="Live traffic with gradual rollout to new version", + function_name=Token.as_string(aws_lambda_function_example.function_name), + function_version="5", + name="live", routing_config=LambdaAliasRoutingConfig( additional_version_weights={ - "2": 0.5 + "6": 0.05 } ) ) ``` +### Development Alias + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_alias import LambdaAlias +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaAlias(self, "example", + description="Development environment - always points to latest", + function_name=Token.as_string(aws_lambda_function_example.function_name), + function_version="$LATEST", + name="dev" + ) +``` + ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `name` - (Required) Name for the alias you are creating. Pattern: `(?!^[0-9]+$)([a-zA-Z0-9-_]+)` -* `description` - (Optional) Description of the alias. -* `function_name` - (Required) Lambda Function name or ARN. +* `function_name` - (Required) Name or ARN of the Lambda function. * `function_version` - (Required) Lambda function version for which you are creating the alias. Pattern: `(\$LATEST|[0-9]+)`. -* `routing_config` - (Optional) The Lambda alias' route configuration settings. Fields documented below +* `name` - (Required) Name for the alias. Pattern: `(?!^[0-9]+$)([a-zA-Z0-9-_]+)`. -`routing_config` supports the following arguments: +The following arguments are optional: -* `additional_version_weights` - (Optional) A map that defines the proportion of events that should be sent to different versions of a lambda function. +* `description` - (Optional) Description of the alias. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `routing_config` - (Optional) Lambda alias' route configuration settings. [See below](#routing_config-configuration-block). + +### routing_config Configuration Block + +* `additional_version_weights` - (Optional) Map that defines the proportion of events that should be sent to different versions of a Lambda function. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) identifying your Lambda function alias. -* `invoke_arn` - The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri` - -[1]: http://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[2]: http://docs.aws.amazon.com/lambda/latest/dg/API_CreateAlias.html -[3]: https://docs.aws.amazon.com/lambda/latest/dg/API_AliasRoutingConfiguration.html +* `arn` - ARN identifying your Lambda function alias. +* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. ## Import @@ -83,13 +156,13 @@ from imports.aws.lambda_alias import LambdaAlias class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaAlias.generate_config_for_import(self, "testLambdaAlias", "my_test_lambda_function/my_alias") + LambdaAlias.generate_config_for_import(self, "example", "example/production") ``` -Using `terraform import`, import Lambda Function Aliases using the `function_name/alias`. For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_alias.test_lambda_alias my_test_lambda_function/my_alias +% terraform import aws_lambda_alias.example example/production ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_code_signing_config.html.markdown b/website/docs/cdktf/python/r/lambda_code_signing_config.html.markdown index 42c6bff03ef1..9bb9672679dd 100644 --- a/website/docs/cdktf/python/r/lambda_code_signing_config.html.markdown +++ b/website/docs/cdktf/python/r/lambda_code_signing_config.html.markdown @@ -3,19 +3,21 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_code_signing_config" description: |- - Provides a Lambda Code Signing Config resource. + Manages an AWS Lambda Code Signing Config. --- # Resource: aws_lambda_code_signing_config -Provides a Lambda Code Signing Config resource. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail). +Manages an AWS Lambda Code Signing Config. Use this resource to define allowed signing profiles and code-signing validation policies for Lambda functions to ensure code integrity and authenticity. -For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions][1] +For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html). ## Example Usage +### Basic Usage + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -25,50 +27,143 @@ from cdktf import TerraformStack # See https://cdk.tf/provider-generation for more details. # from imports.aws.lambda_code_signing_config import LambdaCodeSigningConfig +from imports.aws.signer_signing_profile import SignerSigningProfile class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaCodeSigningConfig(self, "new_csc", + dev = SignerSigningProfile(self, "dev", + name_prefix="dev_lambda_", + platform_id="AWSLambda-SHA384-ECDSA", + tags={ + "Environment": "development" + } + ) + prod = SignerSigningProfile(self, "prod", + name_prefix="prod_lambda_", + platform_id="AWSLambda-SHA384-ECDSA", + tags={ + "Environment": "production" + } + ) + LambdaCodeSigningConfig(self, "example", allowed_publishers=LambdaCodeSigningConfigAllowedPublishers( - signing_profile_version_arns=[example1.version_arn, example2.version_arn] + signing_profile_version_arns=[prod.version_arn, dev.version_arn] ), - description="My awesome code signing config.", + description="Code signing configuration for Lambda functions", + policies=LambdaCodeSigningConfigPolicies( + untrusted_artifact_on_deployment="Enforce" + ), + tags={ + "Environment": "production", + "Purpose": "code-signing" + } + ) +``` + +### Warning Only Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_code_signing_config import LambdaCodeSigningConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaCodeSigningConfig(self, "example", + allowed_publishers=LambdaCodeSigningConfigAllowedPublishers( + signing_profile_version_arns=[dev.version_arn] + ), + description="Development code signing configuration", policies=LambdaCodeSigningConfigPolicies( untrusted_artifact_on_deployment="Warn" ), tags={ - "Name": "dynamodb" + "Environment": "development", + "Purpose": "code-signing" + } + ) +``` + +### Multiple Environment Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_code_signing_config import LambdaCodeSigningConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaCodeSigningConfig(self, "dev", + allowed_publishers=LambdaCodeSigningConfigAllowedPublishers( + signing_profile_version_arns=[ + Token.as_string(aws_signer_signing_profile_dev.version_arn), test.version_arn + ] + ), + description="Development code signing configuration with warnings", + policies=LambdaCodeSigningConfigPolicies( + untrusted_artifact_on_deployment="Warn" + ), + tags={ + "Environment": "development", + "Security": "flexible" + } + ) + LambdaCodeSigningConfig(self, "prod", + allowed_publishers=LambdaCodeSigningConfigAllowedPublishers( + signing_profile_version_arns=[ + Token.as_string(aws_signer_signing_profile_prod.version_arn) + ] + ), + description="Production code signing configuration with strict enforcement", + policies=LambdaCodeSigningConfigPolicies( + untrusted_artifact_on_deployment="Enforce" + ), + tags={ + "Environment": "production", + "Security": "strict" } ) ``` ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `allowed_publishers` - (Required) Configuration block of allowed publishers as signing profiles for this code signing configuration. [See below](#allowed_publishers-configuration-block). + +The following arguments are optional: -* `allowed_publishers` (Required) A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below. -* `policies` (Optional) A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below. * `description` - (Optional) Descriptive name for this code signing configuration. +* `policies` - (Optional) Configuration block of code signing policies that define the actions to take if the validation checks fail. [See below](#policies-configuration-block). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -The `allowed_publishers` block supports the following argument: +### allowed_publishers Configuration Block -* `signing_profile_version_arns` - (Required) The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. +* `signing_profile_version_arns` - (Required) Set of ARNs for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. Maximum of 20 signing profiles. -The `policies` block supports the following argument: +### policies Configuration Block -* `untrusted_artifact_on_deployment` - (Required) Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log. Valid values: `Warn`, `Enforce`. Default value: `Warn`. +* `untrusted_artifact_on_deployment` - (Required) Code signing configuration policy for deployment validation failure. If you set the policy to `Enforce`, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to `Warn`, Lambda allows the deployment and creates a CloudWatch log. Valid values: `Warn`, `Enforce`. Default value: `Warn`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the code signing configuration. +* `arn` - ARN of the code signing configuration. * `config_id` - Unique identifier for the code signing configuration. -* `last_modified` - The date and time that the code signing configuration was last modified. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - -[1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html +* `last_modified` - Date and time that the code signing configuration was last modified. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -86,13 +181,13 @@ from imports.aws.lambda_code_signing_config import LambdaCodeSigningConfig class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaCodeSigningConfig.generate_config_for_import(self, "importedCsc", "arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b") + LambdaCodeSigningConfig.generate_config_for_import(self, "example", "arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b") ``` -Using `terraform import`, import Code Signing Configs using their ARN. For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_code_signing_config.imported_csc arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b +% terraform import aws_lambda_code_signing_config.example arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown b/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown index ca2c7f80f9c2..c8ef7e0d37db 100644 --- a/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown +++ b/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown @@ -3,21 +3,20 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_event_source_mapping" description: |- - Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK). + Manages an AWS Lambda Event Source Mapping. --- # Resource: aws_lambda_event_source_mapping -Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK). +Manages an AWS Lambda Event Source Mapping. Use this resource to connect Lambda functions to event sources like Kinesis, DynamoDB, SQS, Amazon MQ, and Managed Streaming for Apache Kafka (MSK). -For information about Lambda and how to use it, see [What is AWS Lambda?][1]. -For information about event source mappings, see [CreateEventSourceMapping][2] in the API docs. +For information about Lambda and how to use it, see [What is AWS Lambda?](http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). For information about event source mappings, see [CreateEventSourceMapping](http://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) in the API docs. ## Example Usage -### DynamoDB +### DynamoDB Stream ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -36,12 +35,12 @@ class MyConvertedCode(TerraformStack): function_name=Token.as_string(aws_lambda_function_example.arn), starting_position="LATEST", tags={ - "Name": "dynamodb" + "Name": "dynamodb-stream-mapping" } ) ``` -### Kinesis +### Kinesis Stream ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -56,13 +55,21 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", + batch_size=100, + destination_config=LambdaEventSourceMappingDestinationConfig( + on_failure=LambdaEventSourceMappingDestinationConfigOnFailure( + destination_arn=dlq.arn + ) + ), event_source_arn=Token.as_string(aws_kinesis_stream_example.arn), function_name=Token.as_string(aws_lambda_function_example.arn), + maximum_batching_window_in_seconds=5, + parallelization_factor=2, starting_position="LATEST" ) ``` -### Managed Streaming for Apache Kafka (MSK) +### SQS Queue ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -77,19 +84,21 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", - event_source_arn=Token.as_string(aws_msk_cluster_example.arn), + batch_size=10, + event_source_arn=Token.as_string(aws_sqs_queue_example.arn), function_name=Token.as_string(aws_lambda_function_example.arn), - starting_position="TRIM_HORIZON", - topics=["Example"] + scaling_config=LambdaEventSourceMappingScalingConfig( + maximum_concurrency=100 + ) ) ``` -### Self Managed Apache Kafka +### SQS with Event Filtering ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Token, TerraformStack +from cdktf import Token, Fn, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -99,33 +108,27 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", - function_name=Token.as_string(aws_lambda_function_example.arn), - provisioned_poller_config=LambdaEventSourceMappingProvisionedPollerConfig( - maximum_pollers=80, - minimum_pollers=10 - ), - self_managed_event_source=LambdaEventSourceMappingSelfManagedEventSource( - endpoints={ - "KAFKA_BOOTSTRAP_SERVERS": "kafka1.example.com:9092,kafka2.example.com:9092" - } + event_source_arn=Token.as_string(aws_sqs_queue_example.arn), + filter_criteria=LambdaEventSourceMappingFilterCriteria( + filter=[LambdaEventSourceMappingFilterCriteriaFilter( + pattern=Token.as_string( + Fn.jsonencode({ + "body": { + "Location": ["New York"], + "Temperature": [{ + "numeric": [">", 0, "<=", 100] + } + ] + } + })) + ) + ] ), - source_access_configuration=[LambdaEventSourceMappingSourceAccessConfiguration( - type="VPC_SUBNET", - uri="subnet:subnet-example1" - ), LambdaEventSourceMappingSourceAccessConfiguration( - type="VPC_SUBNET", - uri="subnet:subnet-example2" - ), LambdaEventSourceMappingSourceAccessConfiguration( - type="VPC_SECURITY_GROUP", - uri="security_group:sg-example" - ) - ], - starting_position="TRIM_HORIZON", - topics=["Example"] + function_name=Token.as_string(aws_lambda_function_example.arn) ) ``` -### SQS +### Amazon MSK ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -140,17 +143,23 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", - event_source_arn=sqs_queue_test.arn, - function_name=Token.as_string(aws_lambda_function_example.arn) + amazon_managed_kafka_event_source_config=LambdaEventSourceMappingAmazonManagedKafkaEventSourceConfig( + consumer_group_id="lambda-consumer-group" + ), + batch_size=100, + event_source_arn=Token.as_string(aws_msk_cluster_example.arn), + function_name=Token.as_string(aws_lambda_function_example.arn), + starting_position="TRIM_HORIZON", + topics=["orders", "inventory"] ) ``` -### SQS with event filter +### Self-Managed Apache Kafka ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Fn, Token, TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -160,23 +169,32 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", - event_source_arn=sqs_queue_test.arn, - filter_criteria=LambdaEventSourceMappingFilterCriteria( - filter=[LambdaEventSourceMappingFilterCriteriaFilter( - pattern=Token.as_string( - Fn.jsonencode({ - "body": { - "Location": ["New York"], - "Temperature": [{ - "numeric": [">", 0, "<=", 100] - } - ] - } - })) - ) - ] + function_name=Token.as_string(aws_lambda_function_example.arn), + provisioned_poller_config=LambdaEventSourceMappingProvisionedPollerConfig( + maximum_pollers=100, + minimum_pollers=10 ), - function_name=Token.as_string(aws_lambda_function_example.arn) + self_managed_event_source=LambdaEventSourceMappingSelfManagedEventSource( + endpoints={ + "KAFKA_BOOTSTRAP_SERVERS": "kafka1.example.com:9092,kafka2.example.com:9092" + } + ), + self_managed_kafka_event_source_config=LambdaEventSourceMappingSelfManagedKafkaEventSourceConfig( + consumer_group_id="lambda-consumer-group" + ), + source_access_configuration=[LambdaEventSourceMappingSourceAccessConfiguration( + type="VPC_SUBNET", + uri="subnet:${" + example1.id + "}" + ), LambdaEventSourceMappingSourceAccessConfiguration( + type="VPC_SUBNET", + uri="subnet:${" + example2.id + "}" + ), LambdaEventSourceMappingSourceAccessConfiguration( + type="VPC_SECURITY_GROUP", + uri="security_group:${" + aws_security_group_example.id + "}" + ) + ], + starting_position="TRIM_HORIZON", + topics=["orders"] ) ``` @@ -196,10 +214,9 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", batch_size=10, - enabled=True, event_source_arn=Token.as_string(aws_mq_broker_example.arn), function_name=Token.as_string(aws_lambda_function_example.arn), - queues=["example"], + queues=["orders"], source_access_configuration=[LambdaEventSourceMappingSourceAccessConfiguration( type="BASIC_AUTH", uri=Token.as_string(aws_secretsmanager_secret_version_example.arn) @@ -224,13 +241,12 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) LambdaEventSourceMapping(self, "example", batch_size=1, - enabled=True, event_source_arn=Token.as_string(aws_mq_broker_example.arn), function_name=Token.as_string(aws_lambda_function_example.arn), - queues=["example"], + queues=["orders"], source_access_configuration=[LambdaEventSourceMappingSourceAccessConfiguration( type="VIRTUAL_HOST", - uri="/example" + uri="/production" ), LambdaEventSourceMappingSourceAccessConfiguration( type="BASIC_AUTH", uri=Token.as_string(aws_secretsmanager_secret_version_example.arn) @@ -239,105 +255,137 @@ class MyConvertedCode(TerraformStack): ) ``` +### DocumentDB Change Stream + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_event_source_mapping import LambdaEventSourceMapping +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaEventSourceMapping(self, "example", + document_db_event_source_config=LambdaEventSourceMappingDocumentDbEventSourceConfig( + collection_name="transactions", + database_name="orders", + full_document="UpdateLookup" + ), + event_source_arn=Token.as_string(aws_docdb_cluster_example.arn), + function_name=Token.as_string(aws_lambda_function_example.arn), + source_access_configuration=[LambdaEventSourceMappingSourceAccessConfiguration( + type="BASIC_AUTH", + uri=Token.as_string(aws_secretsmanager_secret_version_example.arn) + ) + ], + starting_position="LATEST" + ) +``` + ## Argument Reference -This resource supports the following arguments: - -* `amazon_managed_kafka_event_source_config` - (Optional) Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below. -* `batch_size` - (Optional) The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to `100` for DynamoDB, Kinesis, MQ and MSK, `10` for SQS. -* `bisect_batch_on_function_error`: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to `false`. -* `destination_config`: - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below. -* `document_db_event_source_config`: - (Optional) Configuration settings for a DocumentDB event source. Detailed below. -* `enabled` - (Optional) Determines if the mapping is enabled. This parameter can be used to enable or disable the mapping, both during resource creation and for already created resources. Defaults to `true`. -* `event_source_arn` - (Optional) The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source. -* `filter_criteria` - (Optional) The criteria to use for [event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below. -* `function_name` - (Required) The name or the ARN of the Lambda function that will be subscribing to events. -* `function_response_types` - (Optional) A list of current response type enums applied to the event source mapping for [AWS Lambda checkpointing](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting). Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: `ReportBatchItemFailures`. -* `kms_key_arn` - (Optional) The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. -* `maximum_batching_window_in_seconds` - (Optional) The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either `maximum_batching_window_in_seconds` expires or `batch_size` has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. -* `maximum_record_age_in_seconds`: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). -* `maximum_retry_attempts`: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. -* `metrics_config`: - (Optional) CloudWatch metrics configuration of the event source. Only available for stream sources (DynamoDB and Kinesis) and SQS queues. Detailed below. -* `parallelization_factor`: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. -* `provisioned_poller_config`: - (Optional) Event poller configuration for the event source. Only valid for Amazon MSK or self-managed Apache Kafka sources. Detailed below. -* `queues` - (Optional) The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. -* `scaling_config` - (Optional) Scaling configuration of the event source. Only available for SQS queues. Detailed below. -* `self_managed_event_source`: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include `source_access_configuration`. Detailed below. -* `self_managed_kafka_event_source_config` - (Optional) Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below. -* `source_access_configuration`: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include `self_managed_event_source`. Detailed below. -* `starting_position` - (Optional) The position in the stream where AWS Lambda should start reading. Must be one of `AT_TIMESTAMP` (Kinesis only), `LATEST` or `TRIM_HORIZON` if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the [AWS DynamoDB Streams API Reference](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_GetShardIterator.html) and [AWS Kinesis API Reference](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType). -* `starting_position_timestamp` - (Optional) A timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of the data record which to start reading when using `starting_position` set to `AT_TIMESTAMP`. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. +The following arguments are required: + +* `function_name` - (Required) Name or ARN of the Lambda function that will be subscribing to events. + +The following arguments are optional: + +* `amazon_managed_kafka_event_source_config` - (Optional) Additional configuration block for Amazon Managed Kafka sources. Incompatible with `self_managed_event_source` and `self_managed_kafka_event_source_config`. [See below](#amazon_managed_kafka_event_source_config-configuration-block). +* `batch_size` - (Optional) Largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to `100` for DynamoDB, Kinesis, MQ and MSK, `10` for SQS. +* `bisect_batch_on_function_error` - (Optional) Whether to split the batch in two and retry if the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Defaults to `false`. +* `destination_config` - (Optional) Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). [See below](#destination_config-configuration-block). +* `document_db_event_source_config` - (Optional) Configuration settings for a DocumentDB event source. [See below](#document_db_event_source_config-configuration-block). +* `enabled` - (Optional) Whether the mapping is enabled. Defaults to `true`. +* `event_source_arn` - (Optional) Event source ARN - required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. Incompatible with Self Managed Kafka source. +* `filter_criteria` - (Optional) Criteria to use for [event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) Kinesis stream, DynamoDB stream, SQS queue event sources. [See below](#filter_criteria-configuration-block). +* `function_response_types` - (Optional) List of current response type enums applied to the event source mapping for [AWS Lambda checkpointing](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting). Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: `ReportBatchItemFailures`. +* `kms_key_arn` - (Optional) ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. +* `maximum_batching_window_in_seconds` - (Optional) Maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer until either `maximum_batching_window_in_seconds` expires or `batch_size` has been met. For streaming event sources, defaults to as soon as records are available in the stream. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. +* `maximum_record_age_in_seconds` - (Optional) Maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). +* `maximum_retry_attempts` - (Optional) Maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. +* `metrics_config` - (Optional) CloudWatch metrics configuration of the event source. Only available for stream sources (DynamoDB and Kinesis) and SQS queues. [See below](#metrics_config-configuration-block). +* `parallelization_factor` - (Optional) Number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. +* `provisioned_poller_config` - (Optional) Event poller configuration for the event source. Only valid for Amazon MSK or self-managed Apache Kafka sources. [See below](#provisioned_poller_config-configuration-block). +* `queues` - (Optional) Name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `scaling_config` - (Optional) Scaling configuration of the event source. Only available for SQS queues. [See below](#scaling_config-configuration-block). +* `self_managed_event_source` - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include `source_access_configuration`. [See below](#self_managed_event_source-configuration-block). +* `self_managed_kafka_event_source_config` - (Optional) Additional configuration block for Self Managed Kafka sources. Incompatible with `event_source_arn` and `amazon_managed_kafka_event_source_config`. [See below](#self_managed_kafka_event_source_config-configuration-block). +* `source_access_configuration` - (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include `self_managed_event_source`. [See below](#source_access_configuration-configuration-block). +* `starting_position` - (Optional) Position in the stream where AWS Lambda should start reading. Must be one of `AT_TIMESTAMP` (Kinesis only), `LATEST` or `TRIM_HORIZON` if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the [AWS DynamoDB Streams API Reference](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_GetShardIterator.html) and [AWS Kinesis API Reference](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType). +* `starting_position_timestamp` - (Optional) Timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of the data record which to start reading when using `starting_position` set to `AT_TIMESTAMP`. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `topics` - (Optional) The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. -* `tumbling_window_in_seconds` - (Optional) The duration in seconds of a processing window for [AWS Lambda streaming analytics](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows). The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). +* `topics` - (Optional) Name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. +* `tumbling_window_in_seconds` - (Optional) Duration in seconds of a processing window for [AWS Lambda streaming analytics](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows). The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). ### amazon_managed_kafka_event_source_config Configuration Block -* `consumer_group_id` - (Optional) A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [AmazonManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_AmazonManagedKafkaEventSourceConfig.html). +* `consumer_group_id` - (Optional) Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [AmazonManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_AmazonManagedKafkaEventSourceConfig.html). ### destination_config Configuration Block -* `on_failure` - (Optional) The destination configuration for failed invocations. Detailed below. +* `on_failure` - (Optional) Destination configuration for failed invocations. [See below](#destination_config-on_failure-configuration-block). #### destination_config on_failure Configuration Block -* `destination_arn` - (Required) The Amazon Resource Name (ARN) of the destination resource. +* `destination_arn` - (Required) ARN of the destination resource. ### document_db_event_source_config Configuration Block -* `collection_name` - (Optional) The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. -* `database_name` - (Required) The name of the database to consume within the DocumentDB cluster. +* `collection_name` - (Optional) Name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. +* `database_name` - (Required) Name of the database to consume within the DocumentDB cluster. * `full_document` - (Optional) Determines what DocumentDB sends to your event stream during document update operations. If set to `UpdateLookup`, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: `UpdateLookup`, `Default`. ### filter_criteria Configuration Block -* `filter` - (Optional) A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below. +* `filter` - (Optional) Set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. [See below](#filter_criteria-filter-configuration-block). #### filter_criteria filter Configuration Block -* `pattern` - (Optional) A filter pattern up to 4096 characters. See [Filter Rule Syntax](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-syntax). +* `pattern` - (Optional) Filter pattern up to 4096 characters. See [Filter Rule Syntax](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-syntax). ### metrics_config Configuration Block -* `metrics` - (Required) A list containing the metrics to be produced by the event source mapping. Valid values: `EventCount`. +* `metrics` - (Required) List containing the metrics to be produced by the event source mapping. Valid values: `EventCount`. ### provisioned_poller_config Configuration Block -* `maximum_pollers` - (Optional) The maximum number of event pollers this event source can scale up to. The range is between 1 and 2000. -* `minimum_pollers` - (Optional) The minimum number of event pollers this event source can scale down to. The range is between 1 and 200. +* `maximum_pollers` - (Optional) Maximum number of event pollers this event source can scale up to. The range is between 1 and 2000. +* `minimum_pollers` - (Optional) Minimum number of event pollers this event source can scale down to. The range is between 1 and 200. ### scaling_config Configuration Block -* `maximum_concurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to `2`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. +* `maximum_concurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to 2. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. ### self_managed_event_source Configuration Block -* `endpoints` - (Required) A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be `KAFKA_BOOTSTRAP_SERVERS` and the value should be a string with a comma separated list of broker endpoints. +* `endpoints` - (Required) Map of endpoints for the self managed source. For Kafka self-managed sources, the key should be `KAFKA_BOOTSTRAP_SERVERS` and the value should be a string with a comma separated list of broker endpoints. ### self_managed_kafka_event_source_config Configuration Block -* `consumer_group_id` - (Optional) A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [SelfManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_SelfManagedKafkaEventSourceConfig.html). +* `consumer_group_id` - (Optional) Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [SelfManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_SelfManagedKafkaEventSourceConfig.html). ### source_access_configuration Configuration Block -* `type` - (Required) The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/api/API_SourceAccessConfiguration.html). -* `uri` - (Required) The URI for this configuration. For type `VPC_SUBNET` the value should be `subnet:subnet_id` where `subnet_id` is the value you would find in an aws_subnet resource's id attribute. For type `VPC_SECURITY_GROUP` the value should be `security_group:security_group_id` where `security_group_id` is the value you would find in an aws_security_group resource's id attribute. +* `type` - (Required) Type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/api/API_SourceAccessConfiguration.html). +* `uri` - (Required) URI for this configuration. For type `VPC_SUBNET` the value should be `subnet:subnet_id` where `subnet_id` is the value you would find in an aws_subnet resource's id attribute. For type `VPC_SECURITY_GROUP` the value should be `security_group:security_group_id` where `security_group_id` is the value you would find in an aws_security_group resource's id attribute. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The event source mapping ARN. -* `function_arn` - The ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from `function_name` above.) -* `last_modified` - The date this resource was last modified. -* `last_processing_result` - The result of the last AWS Lambda invocation of your Lambda function. -* `state` - The state of the event source mapping. -* `state_transition_reason` - The reason the event source mapping is in its current state. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `uuid` - The UUID of the created event source mapping. - -[1]: http://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[2]: http://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html +* `arn` - Event source mapping ARN. +* `function_arn` - ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from `function_name` above.) +* `last_modified` - Date this resource was last modified. +* `last_processing_result` - Result of the last AWS Lambda invocation of your Lambda function. +* `state` - State of the event source mapping. +* `state_transition_reason` - Reason the event source mapping is in its current state. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `uuid` - UUID of the created event source mapping. ## Import @@ -355,13 +403,13 @@ from imports.aws.lambda_event_source_mapping import LambdaEventSourceMapping class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaEventSourceMapping.generate_config_for_import(self, "eventSourceMapping", "12345kxodurf3443") + LambdaEventSourceMapping.generate_config_for_import(self, "example", "12345kxodurf3443") ``` Using `terraform import`, import Lambda event source mappings using the `UUID` (event source mapping identifier). For example: ```console -% terraform import aws_lambda_event_source_mapping.event_source_mapping 12345kxodurf3443 +% terraform import aws_lambda_event_source_mapping.example 12345kxodurf3443 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_function.html.markdown b/website/docs/cdktf/python/r/lambda_function.html.markdown index 753e39b2f7c1..36401e11f936 100644 --- a/website/docs/cdktf/python/r/lambda_function.html.markdown +++ b/website/docs/cdktf/python/r/lambda_function.html.markdown @@ -3,28 +3,26 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function" description: |- - Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS, enabling serverless backend solutions. The Lambda Function itself includes source code and runtime configuration. + Manages an AWS Lambda Function. --- # Resource: aws_lambda_function -Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS, enabling serverless backend solutions. The Lambda Function itself includes source code and runtime configuration. +Manages an AWS Lambda Function. Use this resource to create serverless functions that run code in response to events without provisioning or managing servers. -For information about Lambda and how to use it, see [What is AWS Lambda?][1] +For information about Lambda and how to use it, see [What is AWS Lambda?](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html). For a detailed example of setting up Lambda and API Gateway, see [Serverless Applications with AWS Lambda and API Gateway](https://learn.hashicorp.com/terraform/aws/lambda-api-gateway). -For a detailed example of setting up Lambda and API Gateway, see [Serverless Applications with AWS Lambda and API Gateway.][11] +~> **Note:** Due to [AWS Lambda improved VPC networking changes that began deploying in September 2019](https://aws.amazon.com/blogs/compute/announcing-improved-vpc-networking-for-aws-lambda-functions/), EC2 subnets and security groups associated with Lambda Functions can take up to 45 minutes to successfully delete. Terraform AWS Provider version 2.31.0 and later automatically handles this increased timeout, however prior versions require setting the customizable deletion timeouts of those Terraform resources to 45 minutes (`delete = "45m"`). AWS and HashiCorp are working together to reduce the amount of time required for resource deletion and updates can be tracked in this [GitHub issue](https://github.com/hashicorp/terraform-provider-aws/issues/10329). -~> **NOTE:** Due to [AWS Lambda improved VPC networking changes that began deploying in September 2019](https://aws.amazon.com/blogs/compute/announcing-improved-vpc-networking-for-aws-lambda-functions/), EC2 subnets and security groups associated with Lambda Functions can take up to 45 minutes to successfully delete. Terraform AWS Provider version 2.31.0 and later automatically handles this increased timeout, however prior versions require setting the customizable deletion timeouts of those Terraform resources to 45 minutes (`delete = "45m"`). AWS and HashiCorp are working together to reduce the amount of time required for resource deletion and updates can be tracked in this [GitHub issue](https://github.com/hashicorp/terraform-provider-aws/issues/10329). +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an `aws_lambda_function` with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) - --> To give an external source (like an EventBridge Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model][4] for more details. On the other hand, the `role` argument of this resource is the function's execution role for identity and access to AWS services and resources. +-> **Tip:** To give an external source (like an EventBridge Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model](https://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html) for more details. On the other hand, the `role` argument of this resource is the function's execution role for identity and access to AWS services and resources. ## Example Usage -### Basic Example +### Basic Function with Node.js ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -43,9 +41,9 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) # The following providers are missing schema information and might need manual adjustments to synthesize correctly: archive. # For a more precise conversion please use the --provider flag in convert. - lambda_ = DataArchiveFile(self, "lambda", - output_path="lambda_function_payload.zip", - source_file="lambda.js", + example = DataArchiveFile(self, "example", + output_path="${path.module}/lambda/function.zip", + source_file="${path.module}/lambda/index.js", type="zip" ) assume_role = DataAwsIamPolicyDocument(self, "assume_role", @@ -60,33 +58,71 @@ class MyConvertedCode(TerraformStack): ) ] ) - iam_for_lambda = IamRole(self, "iam_for_lambda", + aws_iam_role_example = IamRole(self, "example_2", assume_role_policy=Token.as_string(assume_role.json), - name="iam_for_lambda" + name="lambda_execution_role" ) - LambdaFunction(self, "test_lambda", + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_example.override_logical_id("example") + aws_lambda_function_example = LambdaFunction(self, "example_3", environment=LambdaFunctionEnvironment( variables={ - "foo": "bar" + "ENVIRONMENT": "production", + "LOG_LEVEL": "info" } ), - filename="lambda_function_payload.zip", - function_name="lambda_function_name", - handler="index.test", - role=iam_for_lambda.arn, - runtime="nodejs18.x", - source_code_hash=Token.as_string(lambda_.output_base64_sha256) + filename=Token.as_string(example.output_path), + function_name="example_lambda_function", + handler="index.handler", + role=Token.as_string(aws_iam_role_example.arn), + runtime="nodejs20.x", + source_code_hash=Token.as_string(example.output_base64_sha256), + tags={ + "Application": "example", + "Environment": "production" + } + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_example.override_logical_id("example") +``` + +### Container Image Function + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaFunction(self, "example", + architectures=["arm64"], + function_name="example_container_function", + image_config=LambdaFunctionImageConfig( + command=["app.handler"], + entry_point=["/lambda-entrypoint.sh"] + ), + image_uri="${" + aws_ecr_repository_example.repository_url + "}:latest", + memory_size=512, + package_type="Image", + role=Token.as_string(aws_iam_role_example.arn), + timeout=30 ) ``` -### Lambda Layers +### Function with Lambda Layers -~> **NOTE:** The `aws_lambda_layer_version` attribute values for `arn` and `layer_arn` were swapped in version 2.0.0 of the Terraform AWS Provider. For version 1.x, use `layer_arn` references. For version 2.x, use `arn` references. +~> **Note:** The `aws_lambda_layer_version` attribute values for `arn` and `layer_arn` were swapped in version 2.0.0 of the Terraform AWS Provider. For version 2.x, use `arn` references. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -94,23 +130,31 @@ from cdktf import TerraformStack from imports.aws.lambda_function import LambdaFunction from imports.aws.lambda_layer_version import LambdaLayerVersion class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, layerName, functionName, role): + def __init__(self, scope, name): super().__init__(scope, name) example = LambdaLayerVersion(self, "example", - layer_name=layer_name + compatible_architectures=["x86_64", "arm64"], + compatible_runtimes=["nodejs20.x", "python3.12"], + description="Common dependencies for Lambda functions", + filename="layer.zip", + layer_name="example_dependencies_layer" ) aws_lambda_function_example = LambdaFunction(self, "example_1", + filename="function.zip", + function_name="example_layered_function", + handler="index.handler", layers=[example.arn], - function_name=function_name, - role=role + role=Token.as_string(aws_iam_role_example.arn), + runtime="nodejs20.x", + tracing_config=LambdaFunctionTracingConfig( + mode="Active" + ) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_lambda_function_example.override_logical_id("example") ``` -### Lambda Ephemeral Storage - -Lambda Function Ephemeral Storage(`/tmp`) allows you to configure the storage upto `10` GB. The default value set to `512` MB. +### VPC Function with Enhanced Networking ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -120,48 +164,38 @@ from cdktf import Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument -from imports.aws.iam_role import IamRole from imports.aws.lambda_function import LambdaFunction class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - assume_role = DataAwsIamPolicyDocument(self, "assume_role", - statement=[DataAwsIamPolicyDocumentStatement( - actions=["sts:AssumeRole"], - effect="Allow", - principals=[DataAwsIamPolicyDocumentStatementPrincipals( - identifiers=["lambda.amazonaws.com"], - type="Service" - ) - ] - ) - ] - ) - iam_for_lambda = IamRole(self, "iam_for_lambda", - assume_role_policy=Token.as_string(assume_role.json), - name="iam_for_lambda" - ) - LambdaFunction(self, "test_lambda", + LambdaFunction(self, "example", ephemeral_storage=LambdaFunctionEphemeralStorage( - size=10240 + size=5120 + ), + filename="function.zip", + function_name="example_vpc_function", + handler="app.handler", + memory_size=1024, + role=Token.as_string(aws_iam_role_example.arn), + runtime="python3.12", + snap_start=LambdaFunctionSnapStart( + apply_on="PublishedVersions" ), - filename="lambda_function_payload.zip", - function_name="lambda_function_name", - handler="index.test", - role=iam_for_lambda.arn, - runtime="nodejs18.x" + timeout=30, + vpc_config=LambdaFunctionVpcConfig( + ipv6_allowed_for_dual_stack=True, + security_group_ids=[example_lambda.id], + subnet_ids=[example_private1.id, example_private2.id] + ) ) ``` -### Lambda File Systems - -Lambda File Systems allow you to connect an Amazon Elastic File System (EFS) file system to a Lambda function to share data across function invocations, access existing data including large files, and save function state. +### Function with EFS Integration ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import VariableType, TerraformVariable, Fn, Token, TerraformCount, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -171,20 +205,38 @@ from imports.aws.efs_file_system import EfsFileSystem from imports.aws.efs_mount_target import EfsMountTarget from imports.aws.lambda_function import LambdaFunction class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, functionName, role): + def __init__(self, scope, name): super().__init__(scope, name) - efs_for_lambda = EfsFileSystem(self, "efs_for_lambda", + # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + # You can read more about this at https://cdk.tf/variables + subnet_ids = TerraformVariable(self, "subnet_ids", + default=["subnet-12345678", "subnet-87654321"], + description="List of subnet IDs for EFS mount targets", + type=VariableType.list(VariableType.STRING) + ) + example = EfsFileSystem(self, "example", + encrypted=True, tags={ - "Name": "efs_for_lambda" + "Name": "lambda-efs" } ) - alpha = EfsMountTarget(self, "alpha", - file_system_id=efs_for_lambda.id, - security_groups=[sg_for_lambda.id], - subnet_id=subnet_for_lambda.id + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + example_count = TerraformCount.of( + Token.as_number(Fn.length_of(subnet_ids.value))) + aws_efs_mount_target_example = EfsMountTarget(self, "example_2", + file_system_id=example.id, + security_groups=[efs.id], + subnet_id=Token.as_string( + Fn.lookup_nested(subnet_ids.value, [example_count.index])), + count=example_count ) - access_point_for_lambda = EfsAccessPoint(self, "access_point_for_lambda", - file_system_id=efs_for_lambda.id, + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_efs_mount_target_example.override_logical_id("example") + aws_efs_access_point_example = EfsAccessPoint(self, "example_3", + file_system_id=example.id, posix_user=EfsAccessPointPosixUser( gid=1000, uid=1000 @@ -193,93 +245,313 @@ class MyConvertedCode(TerraformStack): creation_info=EfsAccessPointRootDirectoryCreationInfo( owner_gid=1000, owner_uid=1000, - permissions="777" + permissions="755" ), path="/lambda" ) ) - LambdaFunction(self, "example", - depends_on=[alpha], + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_efs_access_point_example.override_logical_id("example") + aws_lambda_function_example = LambdaFunction(self, "example_4", + depends_on=[aws_efs_mount_target_example], file_system_config=LambdaFunctionFileSystemConfig( - arn=access_point_for_lambda.arn, - local_mount_path="/mnt/efs" + arn=Token.as_string(aws_efs_access_point_example.arn), + local_mount_path="/mnt/data" ), + filename="function.zip", + function_name="example_efs_function", + handler="index.handler", + role=Token.as_string(aws_iam_role_example.arn), + runtime="nodejs20.x", vpc_config=LambdaFunctionVpcConfig( - security_group_ids=[sg_for_lambda.id], - subnet_ids=[subnet_for_lambda.id] + security_group_ids=[lambda_.id], + subnet_ids=subnet_ids.list_value + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_example.override_logical_id("example") +``` + +### Function with Advanced Logging + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CloudwatchLogGroup(self, "example", + name="/aws/lambda/example_function", + retention_in_days=14, + tags={ + "Application": "example", + "Environment": "production" + } + ) + aws_lambda_function_example = LambdaFunction(self, "example_1", + depends_on=[example], + filename="function.zip", + function_name="example_function", + handler="index.handler", + logging_config=LambdaFunctionLoggingConfig( + application_log_level="INFO", + log_format="JSON", + system_log_level="WARN" ), - function_name=function_name, - role=role + role=Token.as_string(aws_iam_role_example.arn), + runtime="nodejs20.x" ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_example.override_logical_id("example") ``` -### Lambda retries +### Function with logging to S3 or Data Firehose + +#### Required Resources + +* An S3 bucket or Data Firehose delivery stream to store the logs. +* A CloudWatch Log Group with: -Lambda Functions allow you to configure error handling for asynchronous invocation. The settings that it supports are `Maximum age of event` and `Retry attempts` as stated in [Lambda documentation for Configuring error handling for asynchronous invocation](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-errors). To configure these settings, refer to the [aws_lambda_function_event_invoke_config resource](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function_event_invoke_config). + * `log_group_class = "DELIVERY"` + * A subscription filter whose `destination_arn` points to the S3 bucket or the Data Firehose delivery stream. -## CloudWatch Logging and Permissions +* IAM roles: -For more information about CloudWatch Logs for Lambda, see the [Lambda User Guide](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-functions-logs.html). + * Assumed by the `logs.amazonaws.com` service to deliver logs to the S3 bucket or Data Firehose delivery stream. + * Assumed by the `lambda.amazonaws.com` service to send logs to CloudWatch Logs + +* A Lambda function: + + * In the `logging_configuration`, specify the name of the Log Group created above using the `log_group` field + * No special configuration is required to use S3 or Firehose as the log destination + +For more details, see [Sending Lambda function logs to Amazon S3](https://docs.aws.amazon.com/lambda/latest/dg/logging-with-s3.html). + +#### Example: Exporting Lambda Logs to S3 Bucket ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformVariable, Token, TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.cloudwatch_log_subscription_filter import CloudwatchLogSubscriptionFilter from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument -from imports.aws.iam_policy import IamPolicy -from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy import IamRolePolicy from imports.aws.lambda_function import LambdaFunction +from imports.aws.s3_bucket import S3Bucket class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, role): + def __init__(self, scope, name): super().__init__(scope, name) - # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - # You can read more about this at https://cdk.tf/variables - lambda_function_name = TerraformVariable(self, "lambda_function_name", - default="lambda_function_name" + lambda_function_name = "lambda-log-export-example" + export_var = CloudwatchLogGroup(self, "export", + log_group_class="DELIVERY", + name="/aws/lambda/${" + lambda_function_name + "}" ) - example = CloudwatchLogGroup(self, "example", - name="/aws/lambda/${" + lambda_function_name.value + "}", - retention_in_days=14 + LambdaFunction(self, "log_export", + depends_on=[export_var], + filename="function.zip", + function_name=lambda_function_name, + handler="index.lambda_handler", + logging_config=LambdaFunctionLoggingConfig( + log_format="Text", + log_group=export_var.name + ), + role=example.arn, + runtime="python3.13" ) - lambda_logging = DataAwsIamPolicyDocument(self, "lambda_logging", + lambda_log_export = S3Bucket(self, "lambda_log_export", + bucket="${" + lambda_function_name + "}-bucket" + ) + data_aws_iam_policy_document_lambda_log_export = + DataAwsIamPolicyDocument(self, "lambda_log_export_3", statement=[DataAwsIamPolicyDocumentStatement( - actions=["logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" - ], + actions=["s3:PutObject"], effect="Allow", - resources=["arn:aws:logs:*:*:*"] + resources=["${" + lambda_log_export.arn + "}/*"] ) ] ) - aws_iam_policy_lambda_logging = IamPolicy(self, "lambda_logging_3", - description="IAM policy for logging from a lambda", + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_iam_policy_document_lambda_log_export.override_logical_id("lambda_log_export") + logs_assume_role = DataAwsIamPolicyDocument(self, "logs_assume_role", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["logs.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + logs_log_export = IamRole(self, "logs_log_export", + assume_role_policy=Token.as_string(logs_assume_role.json), + name="${" + lambda_function_name + "}-lambda-log-export-role" + ) + aws_iam_role_policy_lambda_log_export = IamRolePolicy(self, "lambda_log_export_6", + policy=Token.as_string(data_aws_iam_policy_document_lambda_log_export.json), + role=logs_log_export.name + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_lambda_log_export.override_logical_id("lambda_log_export") + aws_cloudwatch_log_subscription_filter_lambda_log_export = + CloudwatchLogSubscriptionFilter(self, "lambda_log_export_7", + destination_arn=lambda_log_export.arn, + filter_pattern="", + log_group_name=export_var.name, + name="${" + lambda_function_name + "}-filter", + role_arn=logs_log_export.arn + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cloudwatch_log_subscription_filter_lambda_log_export.override_logical_id("lambda_log_export") +``` + +### Function with Error Handling + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_function import LambdaFunction +from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventInvokeConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = LambdaFunction(self, "example", + dead_letter_config=LambdaFunctionDeadLetterConfig( + target_arn=dlq.arn + ), + filename="function.zip", + function_name="example_function", + handler="index.handler", + role=Token.as_string(aws_iam_role_example.arn), + runtime="nodejs20.x" + ) + aws_lambda_function_event_invoke_config_example = + LambdaFunctionEventInvokeConfig(self, "example_1", + destination_config=LambdaFunctionEventInvokeConfigDestinationConfig( + on_failure=LambdaFunctionEventInvokeConfigDestinationConfigOnFailure( + destination=dlq.arn + ), + on_success=LambdaFunctionEventInvokeConfigDestinationConfigOnSuccess( + destination=success.arn + ) + ), + function_name=example.function_name, + maximum_event_age_in_seconds=60, + maximum_retry_attempts=2 + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_event_invoke_config_example.override_logical_id("example") +``` + +### CloudWatch Logging and Permissions + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import VariableType, TerraformVariable, Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.iam_policy import IamPolicy +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +from imports.aws.lambda_function import LambdaFunction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + # Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + # You can read more about this at https://cdk.tf/variables + function_name = TerraformVariable(self, "function_name", + default="example_function", + description="Name of the Lambda function", + type=VariableType.STRING + ) + example = CloudwatchLogGroup(self, "example", + name="/aws/lambda/${" + function_name.value + "}", + retention_in_days=14, + tags={ + "Environment": "production", + "Function": function_name.string_value + } + ) + lambda_logging = IamPolicy(self, "lambda_logging", + description="IAM policy for logging from Lambda", name="lambda_logging", path="/", - policy=Token.as_string(lambda_logging.json) + policy=Token.as_string( + Fn.jsonencode({ + "Statement": [{ + "Action": ["logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": ["arn:aws:logs:*:*:*"] + } + ], + "Version": "2012-10-17" + })) + ) + aws_iam_role_example = IamRole(self, "example_3", + assume_role_policy=Token.as_string( + Fn.jsonencode({ + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + })), + name="lambda_execution_role" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. - aws_iam_policy_lambda_logging.override_logical_id("lambda_logging") + aws_iam_role_example.override_logical_id("example") lambda_logs = IamRolePolicyAttachment(self, "lambda_logs", - policy_arn=Token.as_string(aws_iam_policy_lambda_logging.arn), - role=iam_for_lambda.name + policy_arn=lambda_logging.arn, + role=Token.as_string(aws_iam_role_example.name) ) - LambdaFunction(self, "test_lambda", + aws_lambda_function_example = LambdaFunction(self, "example_5", depends_on=[lambda_logs, example], - function_name=lambda_function_name.string_value, + filename="function.zip", + function_name=function_name.string_value, + handler="index.handler", logging_config=LambdaFunctionLoggingConfig( - log_format="Text" + application_log_level="INFO", + log_format="JSON", + system_log_level="WARN" ), - role=role + role=Token.as_string(aws_iam_role_example.arn), + runtime="nodejs20.x" ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_example.override_logical_id("example") ``` ## Specifying the Deployment Package -AWS Lambda expects source code to be provided as a deployment package whose structure varies depending on which `runtime` is in use. See [Runtimes][6] for the valid values of `runtime`. The expected structure of the deployment package can be found in [the AWS Lambda documentation for each runtime][8]. +AWS Lambda expects source code to be provided as a deployment package whose structure varies depending on which `runtime` is in use. See [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime) for the valid values of `runtime`. The expected structure of the deployment package can be found in [the AWS Lambda documentation for each runtime](https://docs.aws.amazon.com/lambda/latest/dg/deployment-package-v2.html). Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or indirectly via Amazon S3 (using the `s3_bucket`, `s3_key` and `s3_object_version` arguments). When providing the deployment package via S3 it may be useful to use [the `aws_s3_object` resource](s3_object.html) to upload it. @@ -290,102 +562,87 @@ For larger deployment packages it is recommended by Amazon to upload via S3, sin The following arguments are required: * `function_name` - (Required) Unique name for your Lambda Function. -* `role` - (Required) Amazon Resource Name (ARN) of the function's execution role. The role provides the function's identity and access to AWS services and resources. +* `role` - (Required) ARN of the function's execution role. The role provides the function's identity and access to AWS services and resources. The following arguments are optional: -* `architectures` - (Optional) Instruction set architecture for your Lambda function. Valid values are `["x86_64"]` and `["arm64"]`. Default is `["x86_64"]`. Removing this attribute, function's architecture stay the same. -* `code_signing_config_arn` - (Optional) To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function. -* `dead_letter_config` - (Optional) Configuration block. Detailed below. +* `architectures` - (Optional) Instruction set architecture for your Lambda function. Valid values are `["x86_64"]` and `["arm64"]`. Default is `["x86_64"]`. Removing this attribute, function's architecture stays the same. +* `code_signing_config_arn` - (Optional) ARN of a code-signing configuration to enable code signing for this function. +* `dead_letter_config` - (Optional) Configuration block for dead letter queue. [See below](#dead_letter_config-configuration-block). * `description` - (Optional) Description of what your Lambda Function does. -* `environment` - (Optional) Configuration block. Detailed below. -* `ephemeral_storage` - (Optional) The amount of Ephemeral storage(`/tmp`) to allocate for the Lambda Function in MB. This parameter is used to expand the total amount of Ephemeral storage available, beyond the default amount of `512`MB. Detailed below. -* `file_system_config` - (Optional) Configuration block. Detailed below. -* `filename` - (Optional) Path to the function's deployment package within the local filesystem. Exactly one of `filename`, `image_uri`, or `s3_bucket` must be specified. -* `handler` - (Optional) Function [entrypoint][3] in your code. -* `image_config` - (Optional) Configuration block. Detailed below. -* `image_uri` - (Optional) ECR image URI containing the function's deployment package. Exactly one of `filename`, `image_uri`, or `s3_bucket` must be specified. -* `kms_key_arn` - (Optional) Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference, remove this configuration. -* `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] -* `logging_config` - (Optional) Configuration block used to specify advanced logging settings. Detailed below. -* `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] +* `environment` - (Optional) Configuration block for environment variables. [See below](#environment-configuration-block). +* `ephemeral_storage` - (Optional) Amount of ephemeral storage (`/tmp`) to allocate for the Lambda Function. [See below](#ephemeral_storage-configuration-block). +* `file_system_config` - (Optional) Configuration block for EFS file system. [See below](#file_system_config-configuration-block). +* `filename` - (Optional) Path to the function's deployment package within the local filesystem. Conflicts with `image_uri` and `s3_bucket`. One of `filename`, `image_uri`, or `s3_bucket` must be specified. +* `handler` - (Optional) Function entry point in your code. Required if `package_type` is `Zip`. +* `image_config` - (Optional) Container image configuration values. [See below](#image_config-configuration-block). +* `image_uri` - (Optional) ECR image URI containing the function's deployment package. Conflicts with `filename` and `s3_bucket`. One of `filename`, `image_uri`, or `s3_bucket` must be specified. +* `kms_key_arn` - (Optional) ARN of the AWS Key Management Service key used to encrypt environment variables. If not provided when environment variables are in use, AWS Lambda uses a default service key. If provided when environment variables are not in use, the AWS Lambda API does not save this configuration. +* `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. +* `logging_config` - (Optional) Configuration block for advanced logging settings. [See below](#logging_config-configuration-block). +* `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Valid value between 128 MB to 10,240 MB (10 GB), in 1 MB increments. Defaults to 128. * `package_type` - (Optional) Lambda deployment package type. Valid values are `Zip` and `Image`. Defaults to `Zip`. * `publish` - (Optional) Whether to publish creation/change as new Lambda Function Version. Defaults to `false`. -* `reserved_concurrent_executions` - (Optional) Amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9] -* `replace_security_groups_on_destroy` - (Optional) Whether to replace the security groups on the function's VPC configuration prior to destruction. -Removing these security group associations prior to function destruction can speed up security group deletion times of AWS's internal cleanup operations. -By default, the security groups will be replaced with the `default` security group in the function's configured VPC. -Set the `replacement_security_group_ids` attribute to use a custom list of security groups for replacement. -* `replacement_security_group_ids` - (Optional) List of security group IDs to assign to the function's VPC configuration prior to destruction. -`replace_security_groups_on_destroy` must be set to `true` to use this attribute. -* `runtime` - (Optional) Identifier of the function's runtime. See [Runtimes][6] for valid values. -* `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. This bucket must reside in the same AWS region where you are creating the Lambda function. Exactly one of `filename`, `image_uri`, or `s3_bucket` must be specified. When `s3_bucket` is set, `s3_key` is required. -* `s3_key` - (Optional) S3 key of an object containing the function's deployment package. When `s3_bucket` is set, `s3_key` is required. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `replace_security_groups_on_destroy` - (Optional) Whether to replace the security groups on the function's VPC configuration prior to destruction. Default is `false`. +* `replacement_security_group_ids` - (Optional) List of security group IDs to assign to the function's VPC configuration prior to destruction. Required if `replace_security_groups_on_destroy` is `true`. +* `reserved_concurrent_executions` - (Optional) Amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. +* `runtime` - (Optional) Identifier of the function's runtime. Required if `package_type` is `Zip`. See [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime) for valid values. +* `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename` and `image_uri`. One of `filename`, `image_uri`, or `s3_bucket` must be specified. +* `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Required if `s3_bucket` is set. * `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename` and `image_uri`. -* `skip_destroy` - (Optional) Set to true if you do not wish the function to be deleted at destroy time, and instead just remove the function from the Terraform state. -* `source_code_hash` - (Optional) Virtual attribute used to trigger replacement when source code changes. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (Terraform 0.11.12 and later) or `base64sha256(file("file.zip"))` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive. -* `snap_start` - (Optional) Snap start settings block. Detailed below. -* `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]. -* `tracing_config` - (Optional) Configuration block. Detailed below. -* `vpc_config` - (Optional) Configuration block. Detailed below. - -### dead_letter_config - -Dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see [Dead Letter Queues](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#dlq). - -* `target_arn` - (Required) ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role must be granted suitable access to write to the target object, which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on which service is targeted. - -### environment - -* `variables` - (Optional) Map of environment variables that are accessible from the function code during execution. If provided at least one key must be present. +* `skip_destroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. +* `snap_start` - (Optional) Configuration block for snap start settings. [See below](#snap_start-configuration-block). +* `source_code_hash` - (Optional) Base64-encoded SHA256 hash of the package file. Used to trigger updates when source code changes. +* `source_kms_key_arn` - (Optional) ARN of the AWS Key Management Service key used to encrypt the function's `.zip` deployment package. Conflicts with `image_uri`. +* `tags` - (Optional) Key-value map of tags for the Lambda function. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to 3. Valid between 1 and 900. +* `tracing_config` - (Optional) Configuration block for X-Ray tracing. [See below](#tracing_config-configuration-block). +* `vpc_config` - (Optional) Configuration block for VPC. [See below](#vpc_config-configuration-block). -### ephemeral_storage +### dead_letter_config Configuration Block -* `size` - (Required) The size of the Lambda function Ephemeral storage(`/tmp`) represented in MB. The minimum supported `ephemeral_storage` value defaults to `512`MB and the maximum supported value is `10240`MB. +* `target_arn` - (Required) ARN of an SNS topic or SQS queue to notify when an invocation fails. -### file_system_config +### environment Configuration Block -Connection settings for an EFS file system. Before creating or updating Lambda functions with `file_system_config`, EFS mount targets must be in available lifecycle state. Use `depends_on` to explicitly declare this dependency. See [Using Amazon EFS with Lambda][12]. +* `variables` - (Optional) Map of environment variables available to your Lambda function during execution. -* `arn` - (Required) Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. -* `local_mount_path` - (Required) Path where the function can access the file system, starting with /mnt/. +### ephemeral_storage Configuration Block -### image_config +* `size` - (Required) Amount of ephemeral storage (`/tmp`) in MB. Valid between 512 MB and 10,240 MB (10 GB). -Container image configuration values that override the values in the container image Dockerfile. +### file_system_config Configuration Block -* `command` - (Optional) Parameters that you want to pass in with `entry_point`. -* `entry_point` - (Optional) Entry point to your application, which is typically the location of the runtime executable. -* `working_directory` - (Optional) Working directory. +* `arn` - (Required) ARN of the Amazon EFS Access Point. +* `local_mount_path` - (Required) Path where the function can access the file system. Must start with `/mnt/`. -### logging_config +### image_config Configuration Block -Advanced logging settings. See [Configuring advanced logging controls for your Lambda function][13]. +* `command` - (Optional) Parameters to pass to the container image. +* `entry_point` - (Optional) Entry point to your application. +* `working_directory` - (Optional) Working directory for the container image. -* `application_log_level` - (Optional) for JSON structured logs, choose the detail level of the logs your application sends to CloudWatch when using supported logging libraries. -* `log_format` - (Required) select between `Text` and structured `JSON` format for your function's logs. -* `log_group` - (Optional) the CloudWatch log group your function sends logs to. -* `system_log_level` - (optional) for JSON structured logs, choose the detail level of the Lambda platform event logs sent to CloudWatch, such as `WARN`, `DEBUG`, or `INFO`. +### logging_config Configuration Block -### snap_start +* `application_log_level` - (Optional) Detail level of application logs. Valid values: `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`. +* `log_format` - (Required) Log format. Valid values: `Text`, `JSON`. +* `log_group` - (Optional) CloudWatch log group where logs are sent. +* `system_log_level` - (Optional) Detail level of Lambda platform logs. Valid values: `DEBUG`, `INFO`, `WARN`. -Snap start settings for low-latency startups. This feature is currently only supported for specific runtimes, see [Supported features and limitations][14]. -Remove this block to delete the associated settings (rather than setting `apply_on = "None"`). +### snap_start Configuration Block -* `apply_on` - (Required) Conditions where snap start is enabled. Valid values are `PublishedVersions`. +* `apply_on` - (Required) When to apply snap start optimization. Valid value: `PublishedVersions`. -### tracing_config +### tracing_config Configuration Block -* `mode` - (Required) Whether to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are `PassThrough` and `Active`. If `PassThrough`, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If `Active`, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. +* `mode` - (Required) X-Ray tracing mode. Valid values: `Active`, `PassThrough`. -### vpc_config - -For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. See [VPC Settings][7]. +### vpc_config Configuration Block ~> **NOTE:** If `subnet_ids`, `security_group_ids` and `ipv6_allowed_for_dual_stack` are empty then `vpc_config` is considered to be empty or unset. -* `ipv6_allowed_for_dual_stack` - (Optional) Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. Default is `false`. +* `ipv6_allowed_for_dual_stack` - (Optional) Whether to allow outbound IPv6 traffic on VPC functions connected to dual-stack subnets. Default: `false`. * `security_group_ids` - (Required) List of security group IDs associated with the Lambda function. * `subnet_ids` - (Required) List of subnet IDs associated with the Lambda function. @@ -393,34 +650,20 @@ For network connectivity to AWS resources in a VPC, specify a list of security g This resource exports the following attributes in addition to the arguments above: -* `arn` - Amazon Resource Name (ARN) identifying your Lambda Function. +* `arn` - ARN identifying your Lambda Function. * `code_sha256` - Base64-encoded representation of raw SHA-256 sum of the zip file. -* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`. +* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. * `last_modified` - Date this resource was last modified. * `qualified_arn` - ARN identifying your Lambda Function Version (if versioning is enabled via `publish = true`). -* `qualified_invoke_arn` - Qualified ARN (ARN with lambda version number) to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`. +* `qualified_invoke_arn` - Qualified ARN (ARN with lambda version number) to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. * `signing_job_arn` - ARN of the signing job. * `signing_profile_version_arn` - ARN of the signing profile version. * `snap_start.optimization_status` - Optimization status of the snap start configuration. Valid values are `On` and `Off`. * `source_code_size` - Size in bytes of the function .zip file. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `version` - Latest published version of your Lambda Function. * `vpc_config.vpc_id` - ID of the VPC. -[1]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[3]: https://docs.aws.amazon.com/lambda/latest/dg/walkthrough-custom-events-create-test-function.html -[4]: https://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html -[5]: https://docs.aws.amazon.com/lambda/latest/dg/limits.html -[6]: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime -[7]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html -[8]: https://docs.aws.amazon.com/lambda/latest/dg/deployment-package-v2.html -[9]: https://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html -[10]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html -[11]: https://learn.hashicorp.com/terraform/aws/lambda-api-gateway -[12]: https://docs.aws.amazon.com/lambda/latest/dg/services-efs.html -[13]: https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced -[14]: https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html#snapstart-runtimes - ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): @@ -431,6 +674,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lambda_function.example + identity = { + function_name = "example" + } +} + +resource "aws_lambda_function" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `function_name` (String) Name of the Lambda function. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Functions using the `function_name`. For example: ```python @@ -445,13 +714,13 @@ from imports.aws.lambda_function import LambdaFunction class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunction.generate_config_for_import(self, "testLambda", "my_test_lambda_function") + LambdaFunction.generate_config_for_import(self, "example", "example") ``` Using `terraform import`, import Lambda Functions using the `function_name`. For example: ```console -% terraform import aws_lambda_function.test_lambda my_test_lambda_function +% terraform import aws_lambda_function.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_function_event_invoke_config.html.markdown b/website/docs/cdktf/python/r/lambda_function_event_invoke_config.html.markdown index df585ea7f62d..350db990c417 100644 --- a/website/docs/cdktf/python/r/lambda_function_event_invoke_config.html.markdown +++ b/website/docs/cdktf/python/r/lambda_function_event_invoke_config.html.markdown @@ -3,20 +3,22 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_event_invoke_config" description: |- - Manages an asynchronous invocation configuration for a Lambda Function or Alias. + Manages an AWS Lambda Function Event Invoke Config. --- # Resource: aws_lambda_function_event_invoke_config -Manages an asynchronous invocation configuration for a Lambda Function or Alias. More information about asynchronous invocations and the configurable values can be found in the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html). +Manages an AWS Lambda Function Event Invoke Config. Use this resource to configure error handling and destinations for asynchronous Lambda function invocations. + +More information about asynchronous invocations and the configurable values can be found in the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html). ## Example Usage -### Destination Configuration +### Complete Error Handling and Destinations -~> **NOTE:** Ensure the Lambda Function IAM Role has necessary permissions for the destination, such as `sqs:SendMessage` or `sns:Publish`, otherwise the API will return a generic `InvalidParameterValueException: The destination ARN arn:PARTITION:SERVICE:REGION:ACCOUNT:RESOURCE is invalid.` error. +~> **Note:** Ensure the Lambda Function IAM Role has necessary permissions for the destination, such as `sqs:SendMessage` or `sns:Publish`, otherwise the API will return a generic `InvalidParameterValueException: The destination ARN arn:PARTITION:SERVICE:REGION:ACCOUNT:RESOURCE is invalid.` error. ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -27,23 +29,41 @@ from cdktf import Token, TerraformStack # See https://cdk.tf/provider-generation for more details. # from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventInvokeConfig +from imports.aws.sns_topic import SnsTopic +from imports.aws.sqs_queue import SqsQueue class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) + success = SnsTopic(self, "success", + name="lambda-success-notifications", + tags={ + "Environment": "production", + "Purpose": "lambda-success-notifications" + } + ) + dlq = SqsQueue(self, "dlq", + name="lambda-dlq", + tags={ + "Environment": "production", + "Purpose": "lambda-error-handling" + } + ) LambdaFunctionEventInvokeConfig(self, "example", destination_config=LambdaFunctionEventInvokeConfigDestinationConfig( on_failure=LambdaFunctionEventInvokeConfigDestinationConfigOnFailure( - destination=Token.as_string(aws_sqs_queue_example.arn) + destination=dlq.arn ), on_success=LambdaFunctionEventInvokeConfigDestinationConfigOnSuccess( - destination=Token.as_string(aws_sns_topic_example.arn) + destination=success.arn ) ), - function_name=Token.as_string(aws_lambda_alias_example.function_name) + function_name=Token.as_string(aws_lambda_function_example.function_name), + maximum_event_age_in_seconds=300, + maximum_retry_attempts=1 ) ``` -### Error Handling Configuration +### Error Handling Only ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -58,13 +78,50 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaFunctionEventInvokeConfig(self, "example", - function_name=Token.as_string(aws_lambda_alias_example.function_name), + function_name=Token.as_string(aws_lambda_function_example.function_name), maximum_event_age_in_seconds=60, maximum_retry_attempts=0 ) ``` -### Configuration for Alias Name +### Configuration for Lambda Alias + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_alias import LambdaAlias +from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventInvokeConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = LambdaAlias(self, "example", + description="Production alias", + function_name=Token.as_string(aws_lambda_function_example.function_name), + function_version=Token.as_string(aws_lambda_function_example.version), + name="production" + ) + aws_lambda_function_event_invoke_config_example = + LambdaFunctionEventInvokeConfig(self, "example_1", + destination_config=LambdaFunctionEventInvokeConfigDestinationConfig( + on_failure=LambdaFunctionEventInvokeConfigDestinationConfigOnFailure( + destination=production_dlq.arn + ) + ), + function_name=Token.as_string(aws_lambda_function_example.function_name), + maximum_event_age_in_seconds=1800, + maximum_retry_attempts=2, + qualifier=example.name + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_event_invoke_config_example.override_logical_id("example") +``` + +### Configuration for Published Version ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -79,12 +136,22 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaFunctionEventInvokeConfig(self, "example", - function_name=Token.as_string(aws_lambda_alias_example.function_name), - qualifier=Token.as_string(aws_lambda_alias_example.name) + destination_config=LambdaFunctionEventInvokeConfigDestinationConfig( + on_failure=LambdaFunctionEventInvokeConfigDestinationConfigOnFailure( + destination=version_dlq.arn + ), + on_success=LambdaFunctionEventInvokeConfigDestinationConfigOnSuccess( + destination=version_success.arn + ) + ), + function_name=Token.as_string(aws_lambda_function_example.function_name), + maximum_event_age_in_seconds=21600, + maximum_retry_attempts=2, + qualifier=Token.as_string(aws_lambda_function_example.version) ) ``` -### Configuration for Function Latest Unpublished Version +### Configuration for Latest Version ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -99,12 +166,19 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaFunctionEventInvokeConfig(self, "example", + destination_config=LambdaFunctionEventInvokeConfigDestinationConfig( + on_failure=LambdaFunctionEventInvokeConfigDestinationConfigOnFailure( + destination=dev_dlq.arn + ) + ), function_name=Token.as_string(aws_lambda_function_example.function_name), + maximum_event_age_in_seconds=120, + maximum_retry_attempts=0, qualifier="$LATEST" ) ``` -### Configuration for Function Published Version +### Multiple Destination Types ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -114,13 +188,28 @@ from cdktf import Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # +from imports.aws.cloudwatch_event_bus import CloudwatchEventBus from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventInvokeConfig +from imports.aws.s3_bucket import S3Bucket class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) + lambda_failures = CloudwatchEventBus(self, "lambda_failures", + name="lambda-failure-events" + ) + lambda_success_archive = S3Bucket(self, "lambda_success_archive", + bucket="lambda-success-archive-${" + bucket_suffix.hex + "}" + ) LambdaFunctionEventInvokeConfig(self, "example", - function_name=Token.as_string(aws_lambda_function_example.function_name), - qualifier=Token.as_string(aws_lambda_function_example.version) + destination_config=LambdaFunctionEventInvokeConfigDestinationConfig( + on_failure=LambdaFunctionEventInvokeConfigDestinationConfigOnFailure( + destination=lambda_failures.arn + ), + on_success=LambdaFunctionEventInvokeConfigDestinationConfigOnSuccess( + destination=lambda_success_archive.arn + ) + ), + function_name=Token.as_string(aws_lambda_function_example.function_name) ) ``` @@ -128,45 +217,40 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `function_name` - (Required) Name or Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. +* `function_name` - (Required) Name or ARN of the Lambda Function, omitting any version or alias qualifier. The following arguments are optional: -* `destination_config` - (Optional) Configuration block with destination configuration. See below for details. +* `destination_config` - (Optional) Configuration block with destination configuration. [See below](#destination_config-configuration-block). * `maximum_event_age_in_seconds` - (Optional) Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. * `maximum_retry_attempts` - (Optional) Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. * `qualifier` - (Optional) Lambda Function published version, `$LATEST`, or Lambda Alias name. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### destination_config Configuration Block -~> **NOTE:** At least one of `on_failure` or `on_success` must be configured when using this configuration block, otherwise remove it completely to prevent perpetual differences in Terraform runs. - -The following arguments are optional: +~> **Note:** At least one of `on_failure` or `on_success` must be configured when using this configuration block, otherwise remove it completely to prevent perpetual differences in Terraform runs. -* `on_failure` - (Optional) Configuration block with destination configuration for failed asynchronous invocations. See below for details. -* `on_success` - (Optional) Configuration block with destination configuration for successful asynchronous invocations. See below for details. +* `on_failure` - (Optional) Configuration block with destination configuration for failed asynchronous invocations. [See below](#destination_config-on_failure-configuration-block). +* `on_success` - (Optional) Configuration block with destination configuration for successful asynchronous invocations. [See below](#destination_config-on_success-configuration-block). #### destination_config on_failure Configuration Block -The following arguments are required: - -* `destination` - (Required) Amazon Resource Name (ARN) of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. +* `destination` - (Required) ARN of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. #### destination_config on_success Configuration Block -The following arguments are required: - -* `destination` - (Required) Amazon Resource Name (ARN) of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. +* `destination` - (Required) ARN of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `id` - Fully qualified Lambda Function name or Amazon Resource Name (ARN) +* `id` - Fully qualified Lambda Function name or ARN. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Function Event Invoke Configs using the fully qualified Function name or Amazon Resource Name (ARN). For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Function Event Invoke Configs using the fully qualified Function name or ARN. For example: ARN without qualifier (all versions and aliases): @@ -182,7 +266,7 @@ from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventI class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "arn:aws:us-east-1:123456789012:function:my_function") + LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "arn:aws:lambda:us-east-1:123456789012:function:example") ``` ARN with qualifier: @@ -199,7 +283,7 @@ from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventI class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "arn:aws:us-east-1:123456789012:function:my_function:production") + LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "arn:aws:lambda:us-east-1:123456789012:function:example:production") ``` Name without qualifier (all versions and aliases): @@ -216,7 +300,7 @@ from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventI class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "my_function") + LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "example") ``` Name with qualifier: @@ -233,33 +317,33 @@ from imports.aws.lambda_function_event_invoke_config import LambdaFunctionEventI class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "my_function:production") + LambdaFunctionEventInvokeConfig.generate_config_for_import(self, "example", "example:production") ``` -**Using `terraform import` to import** Lambda Function Event Invoke Configs using the fully qualified Function name or Amazon Resource Name (ARN). For example: +For backwards compatibility, the following legacy `terraform import` commands are also supported: -ARN without qualifier (all versions and aliases): +Using ARN without qualifier: ```console -% terraform import aws_lambda_function_event_invoke_config.example arn:aws:us-east-1:123456789012:function:my_function +% terraform import aws_lambda_function_event_invoke_config.example arn:aws:lambda:us-east-1:123456789012:function:example ``` -ARN with qualifier: +Using ARN with qualifier: ```console -% terraform import aws_lambda_function_event_invoke_config.example arn:aws:us-east-1:123456789012:function:my_function:production +% terraform import aws_lambda_function_event_invoke_config.example arn:aws:lambda:us-east-1:123456789012:function:example:production ``` Name without qualifier (all versions and aliases): ```console -% terraform import aws_lambda_function_event_invoke_config.example my_function +% terraform import aws_lambda_function_event_invoke_config.example example ``` Name with qualifier: ```console -% terraform import aws_lambda_function_event_invoke_config.example my_function:production +% terraform import aws_lambda_function_event_invoke_config.example example:production ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_function_recursion_config.html.markdown b/website/docs/cdktf/python/r/lambda_function_recursion_config.html.markdown index 25745a7b68f6..3b68084c67ea 100644 --- a/website/docs/cdktf/python/r/lambda_function_recursion_config.html.markdown +++ b/website/docs/cdktf/python/r/lambda_function_recursion_config.html.markdown @@ -3,19 +3,21 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_recursion_config" description: |- - Terraform resource for managing an AWS Lambda Function Recursion Config. + Manages an AWS Lambda Function Recursion Config. --- # Resource: aws_lambda_function_recursion_config -Terraform resource for managing an AWS Lambda Function Recursion Config. +Manages an AWS Lambda Function Recursion Config. Use this resource to control how Lambda handles recursive function invocations to prevent infinite loops. -~> Destruction of this resource will return the `recursive_loop` configuration back to the default value of `Terminate`. +~> **Note:** Destruction of this resource will return the `recursive_loop` configuration back to the default value of `Terminate`. ## Example Usage +### Allow Recursive Invocations + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -24,30 +26,77 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # +from imports.aws.lambda_function import LambdaFunction from imports.aws.lambda_function_recursion_config import LambdaFunctionRecursionConfig class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionRecursionConfig(self, "example", - function_name="SomeFunction", + example = LambdaFunction(self, "example", + filename="function.zip", + function_name="recursive_processor", + handler="index.handler", + role=lambda_role.arn, + runtime="python3.12" + ) + aws_lambda_function_recursion_config_example = + LambdaFunctionRecursionConfig(self, "example_1", + function_name=example.function_name, recursive_loop="Allow" ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_function_recursion_config_example.override_logical_id("example") +``` + +### Production Safety Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_function import LambdaFunction +from imports.aws.lambda_function_recursion_config import LambdaFunctionRecursionConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + production_processor = LambdaFunction(self, "production_processor", + filename="processor.zip", + function_name="production-data-processor", + handler="app.handler", + role=lambda_role.arn, + runtime="nodejs20.x", + tags={ + "Environment": "production", + "Purpose": "data-processing" + } + ) + LambdaFunctionRecursionConfig(self, "example", + function_name=production_processor.function_name, + recursive_loop="Terminate" + ) ``` ## Argument Reference The following arguments are required: -* `function_name` - (Required) Lambda function name. +* `function_name` - (Required) Name of the Lambda function. * `recursive_loop` - (Required) Lambda function recursion configuration. Valid values are `Allow` or `Terminate`. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports no additional attributes. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Lambda Function Recursion Config using the `function_name`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Function Recursion Config using the `function_name`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -61,13 +110,13 @@ from imports.aws.lambda_function_recursion_config import LambdaFunctionRecursion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionRecursionConfig.generate_config_for_import(self, "example", "SomeFunction") + LambdaFunctionRecursionConfig.generate_config_for_import(self, "example", "recursive_processor") ``` -Using `terraform import`, import AWS Lambda Function Recursion Config using the `function_name`. For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_function_recursion_config.example SomeFunction +% terraform import aws_lambda_function_recursion_config.example recursive_processor ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_function_url.html.markdown b/website/docs/cdktf/python/r/lambda_function_url.html.markdown index 7cbc1b450f90..3a47d0c31526 100644 --- a/website/docs/cdktf/python/r/lambda_function_url.html.markdown +++ b/website/docs/cdktf/python/r/lambda_function_url.html.markdown @@ -2,24 +2,23 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_url" -description: |- - Provides a Lambda function URL resource. +description: Manages a Lambda function URL. --- # Resource: aws_lambda_function_url -Provides a Lambda function URL resource. A function URL is a dedicated HTTP(S) endpoint for a Lambda function. - -See the [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html) for more information. +Manages a Lambda function URL. Creates a dedicated HTTP(S) endpoint for a Lambda function to enable direct invocation via HTTP requests. ## Example Usage +### Basic Function URL with No Authentication + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -28,53 +27,78 @@ from imports.aws.lambda_function_url import LambdaFunctionUrl class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionUrl(self, "test_latest", + LambdaFunctionUrl(self, "example", authorization_type="NONE", - function_name=test.function_name + function_name=Token.as_string(aws_lambda_function_example.function_name) ) - LambdaFunctionUrl(self, "test_live", +``` + +### Function URL with IAM Authentication and CORS Configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_function_url import LambdaFunctionUrl +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaFunctionUrl(self, "example", authorization_type="AWS_IAM", cors=LambdaFunctionUrlCors( allow_credentials=True, allow_headers=["date", "keep-alive"], - allow_methods=["*"], - allow_origins=["*"], + allow_methods=["GET", "POST"], + allow_origins=["https://example.com"], expose_headers=["keep-alive", "date"], max_age=86400 ), - function_name=test.function_name, + function_name=Token.as_string(aws_lambda_function_example.function_name), + invoke_mode="RESPONSE_STREAM", qualifier="my_alias" ) ``` ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `authorization_type` - (Required) Type of authentication that the function URL uses. Valid values are `AWS_IAM` and `NONE`. +* `function_name` - (Required) Name or ARN of the Lambda function. -* `authorization_type` - (Required) The type of authentication that the function URL uses. Set to `"AWS_IAM"` to restrict access to authenticated IAM users only. Set to `"NONE"` to bypass IAM authentication and create a public endpoint. See the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) for more details. -* `cors` - (Optional) The [cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) settings for the function URL. Documented below. -* `function_name` - (Required) The name (or ARN) of the Lambda function. -* `invoke_mode` - (Optional) Determines how the Lambda function responds to an invocation. Valid values are `BUFFERED` (default) and `RESPONSE_STREAM`. See more in [Configuring a Lambda function to stream responses](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html). -* `qualifier` - (Optional) The alias name or `"$LATEST"`. +The following arguments are optional: -### cors +* `cors` - (Optional) Cross-origin resource sharing (CORS) settings for the function URL. [See below](#cors). +* `invoke_mode` - (Optional) How the Lambda function responds to an invocation. Valid values are `BUFFERED` (default) and `RESPONSE_STREAM`. +* `qualifier` - (Optional) Alias name or `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -This configuration block supports the following attributes: +### CORS -* `allow_credentials` - (Optional) Whether to allow cookies or other credentials in requests to the function URL. The default is `false`. -* `allow_headers` - (Optional) The HTTP headers that origins can include in requests to the function URL. For example: `["date", "keep-alive", "x-custom-header"]`. -* `allow_methods` - (Optional) The HTTP methods that are allowed when calling the function URL. For example: `["GET", "POST", "DELETE"]`, or the wildcard character (`["*"]`). -* `allow_origins` - (Optional) The origins that can access the function URL. You can list any number of specific origins (or the wildcard character (`"*"`)), separated by a comma. For example: `["https://www.example.com", "http://localhost:60905"]`. -* `expose_headers` - (Optional) The HTTP headers in your function response that you want to expose to origins that call the function URL. -* `max_age` - (Optional) The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to `0`, which means that the browser doesn't cache results. The maximum value is `86400`. +* `allow_credentials` - (Optional) Whether to allow cookies or other credentials in requests to the function URL. +* `allow_headers` - (Optional) HTTP headers that origins can include in requests to the function URL. +* `allow_methods` - (Optional) HTTP methods that are allowed when calling the function URL. +* `allow_origins` - (Optional) Origins that can access the function URL. +* `expose_headers` - (Optional) HTTP headers in your function response that you want to expose to origins that call the function URL. +* `max_age` - (Optional) Maximum amount of time, in seconds, that web browsers can cache results of a preflight request. Maximum value is `86400`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `function_arn` - The Amazon Resource Name (ARN) of the function. -* `function_url` - The HTTP URL endpoint for the function in the format `https://.lambda-url..on.aws/`. -* `url_id` - A generated ID for the endpoint. +* `function_arn` - ARN of the Lambda function. +* `function_url` - HTTP URL endpoint for the function in the format `https://.lambda-url..on.aws/`. +* `url_id` - Generated ID for the endpoint. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) ## Import @@ -92,13 +116,13 @@ from imports.aws.lambda_function_url import LambdaFunctionUrl class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaFunctionUrl.generate_config_for_import(self, "testLambdaUrl", "my_test_lambda_function") + LambdaFunctionUrl.generate_config_for_import(self, "example", "example") ``` Using `terraform import`, import Lambda function URLs using the `function_name` or `function_name/qualifier`. For example: ```console -% terraform import aws_lambda_function_url.test_lambda_url my_test_lambda_function +% terraform import aws_lambda_function_url.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_invocation.html.markdown b/website/docs/cdktf/python/r/lambda_invocation.html.markdown index df1300b2d2cb..f7e6fd6b556f 100644 --- a/website/docs/cdktf/python/r/lambda_invocation.html.markdown +++ b/website/docs/cdktf/python/r/lambda_invocation.html.markdown @@ -3,22 +3,22 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_invocation" description: |- - Invoke AWS Lambda Function + Manages an AWS Lambda Function invocation. --- # Resource: aws_lambda_invocation -Use this resource to invoke a lambda function. The lambda function is invoked with the [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. +Manages an AWS Lambda Function invocation. Use this resource to invoke a Lambda function with the [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. -~> **NOTE:** By default this resource _only_ invokes the function when the arguments call for a create or replace. In other words, after an initial invocation on _apply_, if the arguments do not change, a subsequent _apply_ does not invoke the function again. To dynamically invoke the function, see the `triggers` example below. To always invoke a function on each _apply_, see the [`aws_lambda_invocation`](/docs/providers/aws/d/lambda_invocation.html) data source. To invoke the lambda function when the terraform resource is updated and deleted, see the [CRUD Lifecycle Scope](#crud-lifecycle-scope) example below. +~> **Note:** By default this resource _only_ invokes the function when the arguments call for a create or replace. After an initial invocation on _apply_, if the arguments do not change, a subsequent _apply_ does not invoke the function again. To dynamically invoke the function, see the `triggers` example below. To always invoke a function on each _apply_, see the [`aws_lambda_invocation` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lambda_invocation). To invoke the Lambda function when the Terraform resource is updated and deleted, see the [CRUD Lifecycle Management](#crud-lifecycle-management) example below. -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking a Lambda function with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) ## Example Usage -### Basic Example +### Basic Invocation ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -28,29 +28,43 @@ from cdktf import Fn, Token, TerraformOutput, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # +from imports.aws.lambda_function import LambdaFunction from imports.aws.lambda_invocation import LambdaInvocation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - example = LambdaInvocation(self, "example", - function_name=lambda_function_test.function_name, + example = LambdaFunction(self, "example", + filename="function.zip", + function_name="data_processor", + handler="index.handler", + role=lambda_role.arn, + runtime="python3.12" + ) + aws_lambda_invocation_example = LambdaInvocation(self, "example_1", + function_name=example.function_name, input=Token.as_string( Fn.jsonencode({ - "key1": "value1", - "key2": "value2" + "config": { + "debug": False, + "environment": "production" + }, + "operation": "initialize" })) ) - TerraformOutput(self, "result_entry", - value=Fn.lookup_nested(Fn.jsondecode(example.result), ["\"key1\""]) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_invocation_example.override_logical_id("example") + TerraformOutput(self, "initialization_result", + value=Fn.lookup_nested( + Fn.jsondecode(Token.as_string(aws_lambda_invocation_example.result)), ["\"status\""]) ) ``` -### Dynamic Invocation Example Using Triggers +### Dynamic Invocation with Triggers ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Fn, Token, TerraformStack +from cdktf import Token, Fn, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -60,27 +74,32 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaInvocation(self, "example", - function_name=lambda_function_test.function_name, + function_name=Token.as_string(aws_lambda_function_example.function_name), input=Token.as_string( Fn.jsonencode({ - "key1": "value1", - "key2": "value2" + "batch_id": batch_id.result, + "environment": environment.value, + "operation": "process_data" })), triggers={ - "redeployment": Token.as_string( - Fn.sha1( + "config_hash": Token.as_string( + Fn.sha256( Token.as_string( - Fn.jsonencode([aws_lambda_function_example.environment])))) + Fn.jsonencode({ + "environment": environment.value, + "timestamp": Fn.timestamp() + })))), + "function_version": Token.as_string(aws_lambda_function_example.version) } ) ``` -### CRUD Lifecycle Scope +### CRUD Lifecycle Management ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Fn, Token, TerraformStack +from cdktf import Token, Fn, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -90,29 +109,37 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaInvocation(self, "example", - function_name=lambda_function_test.function_name, + function_name=Token.as_string(aws_lambda_function_example.function_name), input=Token.as_string( Fn.jsonencode({ - "key1": "value1", - "key2": "value2" + "credentials": { + "password": db_password.value, + "username": db_username.value + }, + "database_url": aws_db_instance_example.endpoint, + "resource_name": "database_setup" })), lifecycle_scope="CRUD" ) ``` -~> **NOTE:** `lifecycle_scope = "CRUD"` will inject a key `tf` in the input event to pass lifecycle information! This allows the lambda function to handle different lifecycle transitions uniquely. If you need to use a key `tf` in your own input JSON, the default key name can be overridden with the `terraform_key` argument. +~> **Note:** `lifecycle_scope = "CRUD"` will inject a key `tf` in the input event to pass lifecycle information! This allows the Lambda function to handle different lifecycle transitions uniquely. If you need to use a key `tf` in your own input JSON, the default key name can be overridden with the `terraform_key` argument. -The key `tf` gets added with subkeys: +The lifecycle key gets added with subkeys: * `action` - Action Terraform performs on the resource. Values are `create`, `update`, or `delete`. * `prev_input` - Input JSON payload from the previous invocation. This can be used to handle update and delete events. -When the resource from the example above is created, the Lambda will get following JSON payload: +When the resource from the CRUD example above is created, the Lambda will receive the following JSON payload: ```json { - "key1": "value1", - "key2": "value2", + "resource_name": "database_setup", + "database_url": "mydb.cluster-xyz.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + }, "tf": { "action": "create", "prev_input": null @@ -120,33 +147,49 @@ When the resource from the example above is created, the Lambda will get followi } ``` -If the input value of `key1` changes to "valueB", then the lambda will be invoked again with the following JSON payload: +If the `database_url` changes, the Lambda will be invoked again with: ```json { - "key1": "valueB", - "key2": "value2", + "resource_name": "database_setup", + "database_url": "mydb-new.cluster-abc.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + }, "tf": { "action": "update", "prev_input": { - "key1": "value1", - "key2": "value2" + "resource_name": "database_setup", + "database_url": "mydb.cluster-xyz.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + } } } } ``` -When the invocation resource is removed, the final invocation will have the following JSON payload: +When the invocation resource is removed, the final invocation will have: ```json { - "key1": "valueB", - "key2": "value2", + "resource_name": "database_setup", + "database_url": "mydb-new.cluster-abc.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + }, "tf": { "action": "delete", "prev_input": { - "key1": "valueB", - "key2": "value2" + "resource_name": "database_setup", + "database_url": "mydb-new.cluster-abc.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + } } } } @@ -156,20 +199,21 @@ When the invocation resource is removed, the final invocation will have the foll The following arguments are required: -* `function_name` - (Required) Name of the lambda function. -* `input` - (Required) JSON payload to the lambda function. +* `function_name` - (Required) Name of the Lambda function. +* `input` - (Required) JSON payload to the Lambda function. The following arguments are optional: * `lifecycle_scope` - (Optional) Lifecycle scope of the resource to manage. Valid values are `CREATE_ONLY` and `CRUD`. Defaults to `CREATE_ONLY`. `CREATE_ONLY` will invoke the function only on creation or replacement. `CRUD` will invoke the function on each lifecycle event, and augment the input JSON payload with additional lifecycle information. -* `qualifier` - (Optional) Qualifier (i.e., version) of the lambda function. Defaults to `$LATEST`. -* `terraform_key` - (Optional) The JSON key used to store lifecycle information in the input JSON payload. Defaults to `tf`. This additional key is only included when `lifecycle_scope` is set to `CRUD`. -* `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a re-invocation. To force a re-invocation without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). +* `qualifier` - (Optional) Qualifier (i.e., version) of the Lambda function. Defaults to `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `terraform_key` - (Optional) JSON key used to store lifecycle information in the input JSON payload. Defaults to `tf`. This additional key is only included when `lifecycle_scope` is set to `CRUD`. +* `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a re-invocation. To force a re-invocation without changing these keys/values, use the [`terraform taint` command](https://developer.hashicorp.com/terraform/cli/commands/taint). ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `result` - String result of the lambda function invocation. +* `result` - String result of the Lambda function invocation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_layer_version.html.markdown b/website/docs/cdktf/python/r/lambda_layer_version.html.markdown index 8a7bd9a8f25b..827bee23a26b 100644 --- a/website/docs/cdktf/python/r/lambda_layer_version.html.markdown +++ b/website/docs/cdktf/python/r/lambda_layer_version.html.markdown @@ -3,21 +3,23 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version" description: |- - Provides a Lambda Layer Version resource. Lambda Layers allow you to reuse shared bits of code across multiple lambda functions. + Manages an AWS Lambda Layer Version. --- # Resource: aws_lambda_layer_version -Provides a Lambda Layer Version resource. Lambda Layers allow you to reuse shared bits of code across multiple lambda functions. +Manages an AWS Lambda Layer Version. Use this resource to share code and dependencies across multiple Lambda functions. -For information about Lambda Layers and how to use them, see [AWS Lambda Layers][1]. +For information about Lambda Layers and how to use them, see [AWS Lambda Layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). -~> **NOTE:** Setting `skip_destroy` to `true` means that the AWS Provider will _not_ destroy any layer version, even when running `terraform destroy`. Layer versions are thus intentional dangling resources that are _not_ managed by Terraform and may incur extra expense in your AWS account. +~> **Note:** Setting `skip_destroy` to `true` means that the AWS Provider will not destroy any layer version, even when running `terraform destroy`. Layer versions are thus intentional dangling resources that are not managed by Terraform and may incur extra expense in your AWS account. ## Example Usage +### Basic Layer + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -30,21 +32,68 @@ from imports.aws.lambda_layer_version import LambdaLayerVersion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaLayerVersion(self, "lambda_layer", + LambdaLayerVersion(self, "example", compatible_runtimes=["nodejs20.x"], filename="lambda_layer_payload.zip", layer_name="lambda_layer_name" ) ``` +### Layer with S3 Source + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_layer_version import LambdaLayerVersion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaLayerVersion(self, "example", + compatible_architectures=["x86_64", "arm64"], + compatible_runtimes=["nodejs20.x", "python3.12"], + layer_name="lambda_layer_name", + s3_bucket=lambda_layer_zip.bucket, + s3_key=lambda_layer_zip.key + ) +``` + +### Layer with Multiple Runtimes and Architectures + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_layer_version import LambdaLayerVersion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaLayerVersion(self, "example", + compatible_architectures=["x86_64", "arm64"], + compatible_runtimes=["nodejs18.x", "nodejs20.x", "python3.11", "python3.12" + ], + description="Shared utilities for Lambda functions", + filename="lambda_layer_payload.zip", + layer_name="multi_runtime_layer", + license_info="MIT", + source_code_hash=Token.as_string( + Fn.filebase64sha256("lambda_layer_payload.zip")) + ) +``` + ## Specifying the Deployment Package -AWS Lambda Layers expect source code to be provided as a deployment package whose structure varies depending on which `compatible_runtimes` this layer specifies. -See [Runtimes][2] for the valid values of `compatible_runtimes`. +AWS Lambda Layers expect source code to be provided as a deployment package whose structure varies depending on which `compatible_runtimes` this layer specifies. See [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes) for the valid values of `compatible_runtimes`. -Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or -indirectly via Amazon S3 (using the `s3_bucket`, `s3_key` and `s3_object_version` arguments). When providing the deployment -package via S3 it may be useful to use [the `aws_s3_object` resource](s3_object.html) to upload it. +Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or indirectly via Amazon S3 (using the `s3_bucket`, `s3_key` and `s3_object_version` arguments). When providing the deployment package via S3 it may be useful to use [the `aws_s3_object` resource](s3_object.html) to upload it. For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading large files efficiently. @@ -52,20 +101,21 @@ For larger deployment packages it is recommended by Amazon to upload via S3, sin The following arguments are required: -* `layer_name` - (Required) Unique name for your Lambda Layer +* `layer_name` - (Required) Unique name for your Lambda Layer. The following arguments are optional: -* `compatible_architectures` - (Optional) List of [Architectures][4] this layer is compatible with. Currently `x86_64` and `arm64` can be specified. -* `compatible_runtimes` - (Optional) List of [Runtimes][2] this layer is compatible with. Up to 15 runtimes can be specified. +* `compatible_architectures` - (Optional) List of [Architectures](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleArchitectures) this layer is compatible with. Currently `x86_64` and `arm64` can be specified. +* `compatible_runtimes` - (Optional) List of [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes) this layer is compatible with. Up to 15 runtimes can be specified. * `description` - (Optional) Description of what your Lambda Layer does. -* `filename` (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. -* `license_info` - (Optional) License info for your Lambda Layer. See [License Info][3]. +* `filename` - (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. +* `license_info` - (Optional) License info for your Lambda Layer. See [License Info](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-LicenseInfo). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. * `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename`. * `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename`. * `skip_destroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. When this is not set to `true`, changing any of `compatible_architectures`, `compatible_runtimes`, `description`, `filename`, `layer_name`, `license_info`, `s3_bucket`, `s3_key`, `s3_object_version`, or `source_code_hash` forces deletion of the existing layer version and creation of a new layer version. -* `source_code_hash` - (Optional) Virtual attribute used to trigger replacement when source code changes. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `${filebase64sha256("file.zip")}` (Terraform 0.11.12 or later) or `${base64sha256(file("file.zip"))}` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. +* `source_code_hash` - (Optional) Virtual attribute used to trigger replacement when source code changes. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (Terraform 0.11.12 or later) or `base64sha256(file("file.zip"))` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. ## Attribute Reference @@ -80,11 +130,6 @@ This resource exports the following attributes in addition to the arguments abov * `source_code_size` - Size in bytes of the function .zip file. * `version` - Lambda Layer version. -[1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html -[2]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes -[3]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-LicenseInfo -[4]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleArchitectures - ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Layers using `arn`. For example: @@ -101,15 +146,13 @@ from imports.aws.lambda_layer_version import LambdaLayerVersion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaLayerVersion.generate_config_for_import(self, "testLayer", "arn:aws:lambda:_REGION_:_ACCOUNT_ID_:layer:_LAYER_NAME_:_LAYER_VERSION_") + LambdaLayerVersion.generate_config_for_import(self, "example", "arn:aws:lambda:us-west-2:123456789012:layer:example:1") ``` Using `terraform import`, import Lambda Layers using `arn`. For example: ```console -% terraform import \ - aws_lambda_layer_version.test_layer \ - arn:aws:lambda:_REGION_:_ACCOUNT_ID_:layer:_LAYER_NAME_:_LAYER_VERSION_ +% terraform import aws_lambda_layer_version.example arn:aws:lambda:us-west-2:123456789012:layer:example:1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_layer_version_permission.html.markdown b/website/docs/cdktf/python/r/lambda_layer_version_permission.html.markdown index 622e9af5b6d6..7868d9a80c51 100644 --- a/website/docs/cdktf/python/r/lambda_layer_version_permission.html.markdown +++ b/website/docs/cdktf/python/r/lambda_layer_version_permission.html.markdown @@ -3,25 +3,84 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version_permission" description: |- - Provides a Lambda Layer Version Permission resource. + Manages an AWS Lambda Layer Version Permission. --- # Resource: aws_lambda_layer_version_permission -Provides a Lambda Layer Version Permission resource. It allows you to share you own Lambda Layers to another account by account ID, to all accounts in AWS organization or even to all AWS accounts. +Manages an AWS Lambda Layer Version Permission. Use this resource to share Lambda Layers with other AWS accounts, organizations, or make them publicly accessible. -For information about Lambda Layer Permissions and how to use them, see [Using Resource-based Policies for AWS Lambda][1] +For information about Lambda Layer Permissions and how to use them, see [Using Resource-based Policies for AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer). -~> **NOTE:** Setting `skip_destroy` to `true` means that the AWS Provider will _not_ destroy any layer version permission, even when running `terraform destroy`. Layer version permissions are thus intentional dangling resources that are _not_ managed by Terraform and may incur extra expense in your AWS account. +~> **Note:** Setting `skip_destroy` to `true` means that the AWS Provider will not destroy any layer version permission, even when running `terraform destroy`. Layer version permissions are thus intentional dangling resources that are not managed by Terraform and may incur extra expense in your AWS account. ## Example Usage +### Share Layer with Specific Account + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_layer_version import LambdaLayerVersion +from imports.aws.lambda_layer_version_permission import LambdaLayerVersionPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = LambdaLayerVersion(self, "example", + compatible_runtimes=["nodejs20.x", "python3.12"], + description="Common utilities for Lambda functions", + filename="layer.zip", + layer_name="shared_utilities" + ) + aws_lambda_layer_version_permission_example = + LambdaLayerVersionPermission(self, "example_1", + action="lambda:GetLayerVersion", + layer_name=example.layer_name, + principal="123456789012", + statement_id="dev-account-access", + version_number=Token.as_number(example.version) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lambda_layer_version_permission_example.override_logical_id("example") +``` + +### Share Layer with Organization + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_layer_version_permission import LambdaLayerVersionPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaLayerVersionPermission(self, "example", + action="lambda:GetLayerVersion", + layer_name=Token.as_string(aws_lambda_layer_version_example.layer_name), + organization_id="o-1234567890", + principal="*", + statement_id="org-wide-access", + version_number=Token.as_number(aws_lambda_layer_version_example.version) + ) +``` + +### Share Layer Publicly + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -30,34 +89,75 @@ from imports.aws.lambda_layer_version_permission import LambdaLayerVersionPermis class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaLayerVersionPermission(self, "lambda_layer_permission", + LambdaLayerVersionPermission(self, "example", action="lambda:GetLayerVersion", - layer_name="arn:aws:lambda:us-west-2:123456654321:layer:test_layer1", + layer_name=Token.as_string(aws_lambda_layer_version_example.layer_name), + principal="*", + statement_id="public-access", + version_number=Token.as_number(aws_lambda_layer_version_example.version) + ) +``` + +### Multiple Account Access + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lambda_layer_version_permission import LambdaLayerVersionPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LambdaLayerVersionPermission(self, "dev_account", + action="lambda:GetLayerVersion", + layer_name=example.layer_name, principal="111111111111", statement_id="dev-account", - version_number=1 + version_number=Token.as_number(example.version) + ) + LambdaLayerVersionPermission(self, "prod_account", + action="lambda:GetLayerVersion", + layer_name=example.layer_name, + principal="333333333333", + statement_id="prod-account", + version_number=Token.as_number(example.version) + ) + LambdaLayerVersionPermission(self, "staging_account", + action="lambda:GetLayerVersion", + layer_name=example.layer_name, + principal="222222222222", + statement_id="staging-account", + version_number=Token.as_number(example.version) ) ``` ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `action` - (Required) Action, which will be allowed. `lambda:GetLayerVersion` value is suggested by AWS documantation. -* `layer_name` (Required) The name or ARN of the Lambda Layer, which you want to grant access to. -* `organization_id` - (Optional) An identifier of AWS Organization, which should be able to use your Lambda Layer. `principal` should be equal to `*` if `organization_id` provided. -* `principal` - (Required) AWS account ID which should be able to use your Lambda Layer. `*` can be used here, if you want to share your Lambda Layer widely. -* `statement_id` - (Required) The name of Lambda Layer Permission, for example `dev-account` - human readable note about what is this permission for. -* `version_number` (Required) Version of Lambda Layer, which you want to grant access to. Note: permissions only apply to a single version of a layer. -* `skip_destroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. When this is not set to `true`, changing any of `compatible_architectures`, `compatible_runtimes`, `description`, `filename`, `layer_name`, `license_info`, `s3_bucket`, `s3_key`, `s3_object_version`, or `source_code_hash` forces deletion of the existing layer version and creation of a new layer version. +* `action` - (Required) Action that will be allowed. `lambda:GetLayerVersion` is the standard value for layer access. +* `layer_name` - (Required) Name or ARN of the Lambda Layer. +* `principal` - (Required) AWS account ID that should be able to use your Lambda Layer. Use `*` to share with all AWS accounts. +* `statement_id` - (Required) Unique identifier for the permission statement. +* `version_number` - (Required) Version of Lambda Layer to grant access to. Note: permissions only apply to a single version of a layer. + +The following arguments are optional: + +* `organization_id` - (Optional) AWS Organization ID that should be able to use your Lambda Layer. `principal` should be set to `*` when `organization_id` is provided. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `skip_destroy` - (Optional) Whether to retain the permission when the resource is destroyed. Default is `false`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `id` - The `layer_name` and `version_number`, separated by a comma (`,`). -* `revision_id` - A unique identifier for the current revision of the policy. +* `id` - Layer name and version number, separated by a comma (`,`). * `policy` - Full Lambda Layer Permission policy. +* `revision_id` - Unique identifier for the current revision of the policy. ## Import @@ -75,15 +175,13 @@ from imports.aws.lambda_layer_version_permission import LambdaLayerVersionPermis class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaLayerVersionPermission.generate_config_for_import(self, "example", "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1,1") + LambdaLayerVersionPermission.generate_config_for_import(self, "example", "arn:aws:lambda:us-west-2:123456789012:layer:shared_utilities,1") ``` -Using `terraform import`, import Lambda Layer Permissions using `layer_name` and `version_number`, separated by a comma (`,`). For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_layer_version_permission.example arn:aws:lambda:us-west-2:123456654321:layer:test_layer1,1 +% terraform import aws_lambda_layer_version_permission.example arn:aws:lambda:us-west-2:123456789012:layer:shared_utilities,1 ``` -[1]: https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_permission.html.markdown b/website/docs/cdktf/python/r/lambda_permission.html.markdown index fcc60a252f96..9de4213e1062 100644 --- a/website/docs/cdktf/python/r/lambda_permission.html.markdown +++ b/website/docs/cdktf/python/r/lambda_permission.html.markdown @@ -3,18 +3,18 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_permission" description: |- - Creates a Lambda function permission. + Manages an AWS Lambda permission. --- # Resource: aws_lambda_permission -Gives an external source (like an EventBridge Rule, SNS, or S3) permission to access the Lambda function. +Manages an AWS Lambda permission. Use this resource to grant external sources (e.g., EventBridge Rules, SNS, or S3) permission to invoke Lambda functions. ## Example Usage -### Basic Usage +### Basic Usage with EventBridge ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -70,7 +70,7 @@ class MyConvertedCode(TerraformStack): ) ``` -### With SNS +### SNS Integration ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -130,7 +130,7 @@ class MyConvertedCode(TerraformStack): ) ``` -### With API Gateway REST API +### API Gateway REST API Integration ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -158,7 +158,7 @@ class MyConvertedCode(TerraformStack): ) ``` -### With CloudWatch Log Group +### CloudWatch Log Group Integration ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -225,7 +225,7 @@ class MyConvertedCode(TerraformStack): aws_cloudwatch_log_subscription_filter_logging.override_logical_id("logging") ``` -### With Cross-Account Invocation Policy +### Cross-Account Function URL Access ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -255,9 +255,7 @@ class MyConvertedCode(TerraformStack): aws_lambda_permission_url.override_logical_id("url") ``` -### With `replace_triggered_by` Lifecycle Configuration - -If omitting the `qualifier` argument (which forces re-creation each time a function version is published), a `lifecycle` block can be used to ensure permissions are re-applied on any change to the underlying function. +### Automatic Permission Updates with Function Changes ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -285,27 +283,23 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: - -* `action` - (Required) The AWS Lambda action you want to allow in this statement. (e.g., `lambda:InvokeFunction`) -* `event_source_token` - (Optional) The Event Source Token to validate. Used with [Alexa Skills][1]. -* `function_name` - (Required) Name of the Lambda function whose resource policy you are updating -* `function_url_auth_type` - (Optional) Lambda Function URLs [authentication type][3]. Valid values are: `AWS_IAM` or `NONE`. Only supported for `lambda:InvokeFunctionUrl` action. -* `principal` - (Required) The principal who is getting this permission e.g., `s3.amazonaws.com`, an AWS account ID, or AWS IAM principal, or AWS service principal such as `events.amazonaws.com` or `sns.amazonaws.com`. -* `qualifier` - (Optional) Query parameter to specify function version or alias name. The permission will then apply to the specific qualified ARN e.g., `arn:aws:lambda:aws-region:acct-id:function:function-name:2` -* `source_account` - (Optional) This parameter is used when allowing cross-account access, or for S3 and SES. The AWS account ID (without a hyphen) of the source owner. -* `source_arn` - (Optional) When the principal is an AWS service, the ARN of the specific resource within that service to grant permission to. - Without this, any resource from `principal` will be granted permission – even if that resource is from another account. - For S3, this should be the ARN of the S3 Bucket. - For EventBridge events, this should be the ARN of the EventBridge Rule. - For API Gateway, this should be the ARN of the API, as described [here][2]. -* `statement_id` - (Optional) A unique statement identifier. By default generated by Terraform. -* `statement_id_prefix` - (Optional) A statement identifier prefix. Terraform will generate a unique suffix. Conflicts with `statement_id`. -* `principal_org_id` - (Optional) The identifier for your organization in AWS Organizations. Use this to grant permissions to all the AWS accounts under this organization. - -[1]: https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-an-aws-lambda-function.html#use-aws-cli -[2]: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html -[3]: https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html +The following arguments are required: + +* `action` - (Required) Lambda action to allow in this statement (e.g., `lambda:InvokeFunction`) +* `function_name` - (Required) Name or ARN of the Lambda function +* `principal` - (Required) AWS service or account that invokes the function (e.g., `s3.amazonaws.com`, `sns.amazonaws.com`, AWS account ID, or AWS IAM principal) + +The following arguments are optional: + +* `event_source_token` - (Optional) Event Source Token for Alexa Skills +* `function_url_auth_type` - (Optional) Lambda Function URL authentication type. Valid values: `AWS_IAM` or `NONE`. Only valid with `lambda:InvokeFunctionUrl` action +* `principal_org_id` - (Optional) AWS Organizations ID to grant permission to all accounts under this organization +* `qualifier` - (Optional) Lambda function version or alias name +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference) +* `source_account` - (Optional) AWS account ID of the source owner for cross-account access, S3, or SES +* `source_arn` - (Optional) ARN of the source resource granting permission to invoke the Lambda function +* `statement_id` - (Optional) Statement identifier. Generated by Terraform if not provided +* `statement_id_prefix` - (Optional) Statement identifier prefix. Conflicts with `statement_id` ## Attribute Reference @@ -313,6 +307,35 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lambda_permission.example + identity = { + function_name = "my_test_lambda_function" + statement_id = "AllowExecutionFromCloudWatch" + } +} + +resource "aws_lambda_permission" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `function_name` (String) Lambda function name. +* `statement_id` (String) Statement ID for the permission. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `qualifier` (String) Qualifier for the function version or alias. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda permission statements using function_name/statement_id with an optional qualifier. For example: ```python @@ -327,9 +350,11 @@ from imports.aws.lambda_permission import LambdaPermission class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaPermission.generate_config_for_import(self, "testLambdaPermission", "my_test_lambda_function/AllowExecutionFromCloudWatch") + LambdaPermission.generate_config_for_import(self, "example", "my_test_lambda_function/AllowExecutionFromCloudWatch") ``` +Using `qualifier`: + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -342,17 +367,14 @@ from imports.aws.lambda_permission import LambdaPermission class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaPermission.generate_config_for_import(self, "testLambdaPermission", "my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch") + LambdaPermission.generate_config_for_import(self, "example", "my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch") ``` -Using `terraform import`, import Lambda permission statements using function_name/statement_id with an optional qualifier. For example: - -```console -% terraform import aws_lambda_permission.test_lambda_permission my_test_lambda_function/AllowExecutionFromCloudWatch -``` +For backwards compatibility, the following legacy `terraform import` commands are also supported: ```console -% terraform import aws_lambda_permission.test_lambda_permission my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch +% terraform import aws_lambda_permission.example my_test_lambda_function/AllowExecutionFromCloudWatch +% terraform import aws_lambda_permission.example my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_provisioned_concurrency_config.html.markdown b/website/docs/cdktf/python/r/lambda_provisioned_concurrency_config.html.markdown index 1eb748e29b96..beeba2796252 100644 --- a/website/docs/cdktf/python/r/lambda_provisioned_concurrency_config.html.markdown +++ b/website/docs/cdktf/python/r/lambda_provisioned_concurrency_config.html.markdown @@ -3,16 +3,16 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_provisioned_concurrency_config" description: |- - Manages a Lambda Provisioned Concurrency Configuration + Manages an AWS Lambda Provisioned Concurrency Configuration. --- # Resource: aws_lambda_provisioned_concurrency_config -Manages a Lambda Provisioned Concurrency Configuration. +Manages an AWS Lambda Provisioned Concurrency Configuration. Use this resource to configure provisioned concurrency for Lambda functions. -~> **NOTE:** Setting `skip_destroy` to `true` means that the AWS Provider will _not_ destroy a provisioned concurrency configuration, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is _not_ managed by Terraform and may incur extra expense in your AWS account. +~> **Note:** Setting `skip_destroy` to `true` means that the AWS Provider will not destroy a provisioned concurrency configuration, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is not managed by Terraform and may incur extra expense in your AWS account. ## Example Usage @@ -63,12 +63,13 @@ class MyConvertedCode(TerraformStack): The following arguments are required: * `function_name` - (Required) Name or Amazon Resource Name (ARN) of the Lambda Function. -* `provisioned_concurrent_executions` - (Required) Amount of capacity to allocate. Must be greater than or equal to `1`. +* `provisioned_concurrent_executions` - (Required) Amount of capacity to allocate. Must be greater than or equal to 1. * `qualifier` - (Required) Lambda Function version or Lambda Alias name. The following arguments are optional: -* `skip_destroy` - (Optional) Whether to retain the provisoned concurrency configuration upon destruction. Defaults to `false`. If set to `true`, the resource in simply removed from state instead. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `skip_destroy` - (Optional) Whether to retain the provisioned concurrency configuration upon destruction. Defaults to `false`. If set to `true`, the resource is simply removed from state instead. ## Attribute Reference @@ -99,13 +100,13 @@ from imports.aws.lambda_provisioned_concurrency_config import LambdaProvisionedC class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaProvisionedConcurrencyConfig.generate_config_for_import(self, "example", "my_function,production") + LambdaProvisionedConcurrencyConfig.generate_config_for_import(self, "example", "example,production") ``` Using `terraform import`, import a Lambda Provisioned Concurrency Configuration using the `function_name` and `qualifier` separated by a comma (`,`). For example: ```console -% terraform import aws_lambda_provisioned_concurrency_config.example my_function,production +% terraform import aws_lambda_provisioned_concurrency_config.example example,production ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_runtime_management_config.html.markdown b/website/docs/cdktf/python/r/lambda_runtime_management_config.html.markdown index 445ffe7520e3..0724c40b7bad 100644 --- a/website/docs/cdktf/python/r/lambda_runtime_management_config.html.markdown +++ b/website/docs/cdktf/python/r/lambda_runtime_management_config.html.markdown @@ -3,18 +3,17 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_runtime_management_config" description: |- - Terraform resource for managing an AWS Lambda Runtime Management Config. + Manages an AWS Lambda Runtime Management Config. --- # Resource: aws_lambda_runtime_management_config -Terraform resource for managing an AWS Lambda Runtime Management Config. +Manages an AWS Lambda Runtime Management Config. Use this resource to control how Lambda updates the runtime for your function. Refer to the [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) for supported runtimes. -~> Deletion of this resource returns the runtime update mode to `Auto` (the default behavior). -To leave the configured runtime management options in-place, use a [`removed` block](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) with the destroy lifecycle set to `false`. +~> **Note:** Deletion of this resource returns the runtime update mode to `Auto` (the default behavior). To leave the configured runtime management options in-place, use a [`removed` block](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) with the destroy lifecycle set to `false`. ## Example Usage @@ -23,7 +22,7 @@ To leave the configured runtime management options in-place, use a [`removed` bl ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -33,17 +32,17 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaRuntimeManagementConfig(self, "example", - function_name=test.function_name, + function_name=Token.as_string(aws_lambda_function_example.function_name), update_runtime_on="FunctionUpdate" ) ``` -### `Manual` Update +### Manual Update ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -53,13 +52,13 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) LambdaRuntimeManagementConfig(self, "example", - function_name=test.function_name, + function_name=Token.as_string(aws_lambda_function_example.function_name), runtime_version_arn="arn:aws:lambda:us-east-1::runtime:abcd1234", update_runtime_on="Manual" ) ``` -~> Once the runtime update mode is set to `Manual`, the `aws_lambda_function` `runtime` cannot be updated. To upgrade a runtime, the `update_runtime_on` argument must be set to `Auto` or `FunctionUpdate` prior to changing the function's `runtime` argument. +~> **Note:** Once the runtime update mode is set to `Manual`, the `aws_lambda_function` `runtime` cannot be updated. To upgrade a runtime, the `update_runtime_on` argument must be set to `Auto` or `FunctionUpdate` prior to changing the function's `runtime` argument. ## Argument Reference @@ -70,6 +69,7 @@ The following arguments are required: The following arguments are optional: * `qualifier` - (Optional) Version of the function. This can be `$LATEST` or a published version number. If omitted, this resource will manage the runtime configuration for `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `runtime_version_arn` - (Optional) ARN of the runtime version. Only required when `update_runtime_on` is `Manual`. * `update_runtime_on` - (Optional) Runtime update mode. Valid values are `Auto`, `FunctionUpdate`, and `Manual`. When a function is created, the default mode is `Auto`. @@ -95,13 +95,13 @@ from imports.aws.lambda_runtime_management_config import LambdaRuntimeManagement class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - LambdaRuntimeManagementConfig.generate_config_for_import(self, "example", "my-function,$LATEST") + LambdaRuntimeManagementConfig.generate_config_for_import(self, "example", "example,$LATEST") ``` Using `terraform import`, import Lambda Runtime Management Config using a comma-delimited string combining `function_name` and `qualifier`. For example: ```console -% terraform import aws_lambda_runtime_management_config.example my-function,$LATEST +% terraform import aws_lambda_runtime_management_config.example example,$LATEST ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/launch_configuration.html.markdown b/website/docs/cdktf/python/r/launch_configuration.html.markdown index a41f0ce1fe4c..1045c112123c 100644 --- a/website/docs/cdktf/python/r/launch_configuration.html.markdown +++ b/website/docs/cdktf/python/r/launch_configuration.html.markdown @@ -171,6 +171,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `associate_public_ip_address` - (Optional) Associate a public ip address with an instance in a VPC. * `ebs_block_device` - (Optional) Additional EBS block devices to attach to the instance. See [Block Devices](#block-devices) below for details. * `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. @@ -280,4 +281,4 @@ Using `terraform import`, import launch configurations using the `name`. For exa % terraform import aws_launch_configuration.as_conf terraform-lg-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/launch_template.html.markdown b/website/docs/cdktf/python/r/launch_template.html.markdown index a03b4e91e437..73b859f78d7d 100644 --- a/website/docs/cdktf/python/r/launch_template.html.markdown +++ b/website/docs/cdktf/python/r/launch_template.html.markdown @@ -47,13 +47,6 @@ class MyConvertedCode(TerraformStack): disable_api_stop=True, disable_api_termination=True, ebs_optimized=Token.as_string(True), - elastic_gpu_specifications=[LaunchTemplateElasticGpuSpecifications( - type="test" - ) - ], - elastic_inference_accelerator=LaunchTemplateElasticInferenceAccelerator( - type="eia1.medium" - ), iam_instance_profile=LaunchTemplateIamInstanceProfile( name="test" ), @@ -103,6 +96,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `block_device_mappings` - (Optional) Specify volumes to attach to the instance besides the volumes specified by the AMI. See [Block Devices](#block-devices) below for details. * `capacity_reservation_specification` - (Optional) Targeting for EC2 capacity reservations. See [Capacity Reservation Specification](#capacity-reservation-specification) below for more details. @@ -115,9 +109,6 @@ This resource supports the following arguments: * `disable_api_termination` - (Optional) If `true`, enables [EC2 Instance Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingDisableAPITermination.html) * `ebs_optimized` - (Optional) If `true`, the launched EC2 instance will be EBS-optimized. -* `elastic_gpu_specifications` - (Optional) **DEPRECATED** The elastic GPU to attach to the instance. See [Elastic GPU](#elastic-gpu) - below for more details. -* `elastic_inference_accelerator` - (Optional) **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See [Elastic Inference Accelerator](#elastic-inference-accelerator) below for more details. * `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. * `hibernation_options` - (Optional) The hibernation options for the instance. See [Hibernation Options](#hibernation-options) below for more details. * `iam_instance_profile` - (Optional) The IAM Instance Profile to launch the instance with. See [Instance Profile](#instance-profile) @@ -188,7 +179,7 @@ The `ebs` block supports the following: The `capacity_reservation_specification` block supports the following: -* `capacity_reservation_preference` - Indicates the instance's Capacity Reservation preferences. Can be `open` or `none`. (Default `none`). +* `capacity_reservation_preference` - Indicates the instance's Capacity Reservation preferences. Can be `capacity-reservations-only`, `open` or `none`. If `capacity_reservation_id` or `capacity_reservation_resource_group_arn` is specified in `capacity_reservation_target` block, either omit `capacity_reservation_preference` or set it to `capacity-reservations-only`. * `capacity_reservation_target` - Used to target a specific Capacity Reservation: The `capacity_reservation_target` block supports the following: @@ -219,22 +210,6 @@ The `credit_specification` block supports the following: T3 instances are launched as `unlimited` by default. T2 instances are launched as `standard` by default. -### Elastic GPU - -Attach an elastic GPU the instance. - -The `elastic_gpu_specifications` block supports the following: - -* `type` - The [Elastic GPU Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-graphics.html#elastic-graphics-basics) - -### Elastic Inference Accelerator - -**DEPRECATED** Attach an Elastic Inference Accelerator to the instance. Additional information about Elastic Inference in EC2 can be found in the [EC2 User Guide](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-inference.html). - -The `elastic_inference_accelerator` configuration block supports the following: - -* `type` - (Required) Accelerator type. - ### Enclave Options The `enclave_options` block supports the following: @@ -479,7 +454,8 @@ The `placement` block supports the following: * `affinity` - (Optional) The affinity setting for an instance on a Dedicated Host. * `availability_zone` - (Optional) The Availability Zone for the instance. -* `group_name` - (Optional) The name of the placement group for the instance. +* `group_id` - (Optional) The ID of the placement group for the instance. Conflicts with `group_name`. +* `group_name` - (Optional) The name of the placement group for the instance. Conflicts with `group_id`. * `host_id` - (Optional) The ID of the Dedicated Host for the instance. * `host_resource_group_arn` - (Optional) The ARN of the Host Resource Group in which to launch instances. * `spread_domain` - (Optional) Reserved for future use. @@ -537,4 +513,4 @@ Using `terraform import`, import Launch Templates using the `id`. For example: % terraform import aws_launch_template.web lt-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb.html.markdown b/website/docs/cdktf/python/r/lb.html.markdown index 8ecf692e1e9d..79d9549c8645 100644 --- a/website/docs/cdktf/python/r/lb.html.markdown +++ b/website/docs/cdktf/python/r/lb.html.markdown @@ -134,6 +134,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_logs` - (Optional) Access Logs block. See below. * `connection_logs` - (Optional) Connection Logs block. See below. Only valid for Load Balancers of type `application`. * `client_keep_alive` - (Optional) Client keep alive value in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. @@ -159,6 +160,7 @@ This resource supports the following arguments: * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `security_groups` - (Optional) List of security group IDs to assign to the LB. Only valid for Load Balancers of type `application` or `network`. For load balancers of type `network` security groups cannot be added if none are currently present, and cannot all be removed once added. If either of these conditions are met, this will force a recreation of the resource. * `preserve_host_header` - (Optional) Whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. Defaults to `false`. +* `secondary_ips_auto_assigned_per_subnet` - (Optional) The number of secondary IP addresses to configure for your load balancer nodes. Only valid for Load Balancers of type `network`. The valid range is 0-7. When decreased, this will force a recreation of the resource. Default: `0`. * `subnet_mapping` - (Optional) Subnet mapping block. See below. For Load Balancers of type `network` subnet mappings can only be added. * `subnets` - (Optional) List of subnet IDs to attach to the LB. For Load Balancers of type `network` subnets can only be added (see [Availability Zones](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html#availability-zones)), deleting a subnet for load balancers of type `network` will force a recreation of the resource. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -199,10 +201,9 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the load balancer (matches `id`). +* `arn` - ARN of the load balancer. * `arn_suffix` - ARN suffix for use with CloudWatch Metrics. * `dns_name` - DNS name of the load balancer. -* `id` - ARN of the load balancer (matches `arn`). * `subnet_mapping.*.outpost_id` - ID of the Outpost containing the load balancer. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `zone_id` - Canonical hosted zone ID of the load balancer (to be used in a Route 53 Alias record). @@ -217,6 +218,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" + } +} + +resource "aws_lb" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import LBs using their ARN. For example: ```python @@ -240,4 +262,4 @@ Using `terraform import`, import LBs using their ARN. For example: % terraform import aws_lb.bar arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_cookie_stickiness_policy.html.markdown b/website/docs/cdktf/python/r/lb_cookie_stickiness_policy.html.markdown index 99953e28fb27..e9007ddb74f5 100644 --- a/website/docs/cdktf/python/r/lb_cookie_stickiness_policy.html.markdown +++ b/website/docs/cdktf/python/r/lb_cookie_stickiness_policy.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the stickiness policy. * `load_balancer` - (Required) The load balancer to which the policy should be attached. @@ -69,4 +70,4 @@ This resource exports the following attributes in addition to the arguments abov * `lb_port` - The load balancer port to which the policy is applied. * `cookie_expiration_period` - The time period after which the session cookie is considered stale, expressed in seconds. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_listener.html.markdown b/website/docs/cdktf/python/r/lb_listener.html.markdown index 477f3d3c5611..7d09482feb13 100644 --- a/website/docs/cdktf/python/r/lb_listener.html.markdown +++ b/website/docs/cdktf/python/r/lb_listener.html.markdown @@ -52,6 +52,50 @@ class MyConvertedCode(TerraformStack): aws_lb_listener_front_end.override_logical_id("front_end") ``` +With weighted target groups: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lb import Lb +from imports.aws.lb_listener import LbListener +from imports.aws.lb_target_group import LbTargetGroup +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + front_end = Lb(self, "front_end") + front_end_blue = LbTargetGroup(self, "front_end_blue") + front_end_green = LbTargetGroup(self, "front_end_green") + aws_lb_listener_front_end = LbListener(self, "front_end_3", + certificate_arn="arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4", + default_action=[LbListenerDefaultAction( + forward=LbListenerDefaultActionForward( + target_group=[LbListenerDefaultActionForwardTargetGroup( + arn=front_end_blue.arn, + weight=100 + ), LbListenerDefaultActionForwardTargetGroup( + arn=front_end_green.arn, + weight=0 + ) + ] + ), + type="forward" + ) + ], + load_balancer_arn=front_end.arn, + port=Token.as_number("443"), + protocol="HTTPS", + ssl_policy="ELBSecurityPolicy-2016-08" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_lb_listener_front_end.override_logical_id("front_end") +``` + To a NLB: ```python @@ -343,6 +387,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alpn_policy` - (Optional) Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`. * `certificate_arn` - (Optional) ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the [`aws_lb_listener_certificate` resource](/docs/providers/aws/r/lb_listener_certificate.html). * `mutual_authentication` - (Optional) The mutual authentication configuration information. See below. @@ -381,6 +426,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticate_cognito` - (Optional) Configuration block for using Amazon Cognito to authenticate users. Specify only when `type` is `authenticate-cognito`. See below. * `authenticate_oidc` - (Optional) Configuration block for an identity provider that is compliant with OpenID Connect (OIDC). Specify only when `type` is `authenticate-oidc`. See below. * `fixed_response` - (Optional) Information for creating an action that returns a custom HTTP response. Required if `type` is `fixed-response`. @@ -399,6 +445,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_request_extra_params` - (Optional) Query parameters to include in the redirect request to the authorization endpoint. Max: 10. See below. * `on_unauthenticated_request` - (Optional) Behavior if the user is not authenticated. Valid values are `deny`, `allow` and `authenticate`. * `scope` - (Optional) Set of user claims to be requested from the IdP. @@ -423,6 +470,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_request_extra_params` - (Optional) Query parameters to include in the redirect request to the authorization endpoint. Max: 10. * `on_unauthenticated_request` - (Optional) Behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate` * `scope` - (Optional) Set of user claims to be requested from the IdP. @@ -437,6 +485,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `message_body` - (Optional) Message body. * `status_code` - (Optional) HTTP response code. Valid values are `2XX`, `4XX`, or `5XX`. @@ -448,6 +497,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `stickiness` - (Optional) Configuration block for target group stickiness for the rule. See below. ##### target_group @@ -458,6 +508,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `weight` - (Optional) Weight. The range is 0 to 999. ##### stickiness @@ -468,6 +519,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether target group stickiness is enabled. Default is `false`. #### redirect @@ -480,6 +532,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `host` - (Optional) Hostname. This component is not percent-encoded. The hostname can contain `#{host}`. Defaults to `#{host}`. * `path` - (Optional) Absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to `/#{path}`. * `port` - (Optional) Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`. @@ -488,23 +541,44 @@ The following arguments are optional: ### mutual_authentication -* `advertise_trust_store_ca_names` - (Optional) Valid values are `off` and `on`. -* `ignore_client_certificate_expiry` - (Optional) Whether client certificate expiry is ignored. Default is `false`. -* `mode` - (Required) Valid values are `off`, `verify` and `passthrough`. -* `trust_store_arn` - (Required) ARN of the elbv2 Trust Store. +* `advertise_trust_store_ca_names` - (Optional when `mode` is `verify`, invalid otherwise) Valid values are `off` and `on`. +* `ignore_client_certificate_expiry` - (Optional when `mode` is `verify`, invalid otherwise) Whether client certificate expiry is ignored. + Default is `false`. +* `mode` - (Required) Valid values are `off`, `passthrough`, and `verify`. +* `trust_store_arn` - (Required when `mode` is `verify`, invalid otherwise) ARN of the elbv2 Trust Store. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the listener (matches `id`). -* `id` - ARN of the listener (matches `arn`). +* `arn` - ARN of the listener. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ~> **Note:** When importing a listener with a forward-type default action, you must include both a top-level target group ARN and a `forward` block with a `target_group` and `arn` to avoid import differences. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_listener.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96" + } +} + +resource "aws_lb_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import listeners using their ARN. For example: ```python @@ -528,4 +602,4 @@ Using `terraform import`, import listeners using their ARN. For example: % terraform import aws_lb_listener.front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_listener_certificate.html.markdown b/website/docs/cdktf/python/r/lb_listener_certificate.html.markdown index 1c89e679c909..98d922c4c6c0 100644 --- a/website/docs/cdktf/python/r/lb_listener_certificate.html.markdown +++ b/website/docs/cdktf/python/r/lb_listener_certificate.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `listener_arn` - (Required, Forces New Resource) The ARN of the listener to which to attach the certificate. * `certificate_arn` - (Required, Forces New Resource) The ARN of the certificate to attach to the listener. @@ -87,4 +88,4 @@ Using `terraform import`, import Listener Certificates using the listener arn an % terraform import aws_lb_listener_certificate.example arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/test/8e4497da625e2d8a/9ab28ade35828f96/67b3d2d36dd7c26b_arn:aws:iam::123456789012:server-certificate/tf-acc-test-6453083910015726063 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_listener_rule.html.markdown b/website/docs/cdktf/python/r/lb_listener_rule.html.markdown index b03a7efc7750..40c0b99c02b9 100644 --- a/website/docs/cdktf/python/r/lb_listener_rule.html.markdown +++ b/website/docs/cdktf/python/r/lb_listener_rule.html.markdown @@ -195,6 +195,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `listener_arn` - (Required, Forces New Resource) The ARN of the listener to which to attach the rule. * `priority` - (Optional) The priority for the rule between `1` and `50000`. Leaving it unset will automatically set the rule with next available priority after currently existing highest rule. A listener can't have multiple rules with the same priority. * `action` - (Required) An Action block. Action blocks are documented below. @@ -327,6 +328,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_listener_rule.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/9683b2d02a6cabee" + } +} + +resource "aws_lb_listener_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer listener rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import rules using their ARN. For example: ```python @@ -350,4 +372,4 @@ Using `terraform import`, import rules using their ARN. For example: % terraform import aws_lb_listener_rule.front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener-rule/app/test/8e4497da625e2d8a/9ab28ade35828f96/67b3d2d36dd7c26b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_ssl_negotiation_policy.html.markdown b/website/docs/cdktf/python/r/lb_ssl_negotiation_policy.html.markdown index 2ae0327cb70b..c00a5951328d 100644 --- a/website/docs/cdktf/python/r/lb_ssl_negotiation_policy.html.markdown +++ b/website/docs/cdktf/python/r/lb_ssl_negotiation_policy.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the SSL negotiation policy. * `load_balancer` - (Required) The load balancer to which the policy should be attached. @@ -98,4 +99,4 @@ This resource exports the following attributes in addition to the arguments abov * `lb_port` - The load balancer port to which the policy is applied. * `attribute` - The SSL Negotiation policy attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_target_group.html.markdown b/website/docs/cdktf/python/r/lb_target_group.html.markdown index 573aa0e34926..5b17aee8f725 100644 --- a/website/docs/cdktf/python/r/lb_target_group.html.markdown +++ b/website/docs/cdktf/python/r/lb_target_group.html.markdown @@ -174,6 +174,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_termination` - (Optional) Whether to terminate connections at the end of the deregistration timeout on Network Load Balancers. See [doc](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html#deregistration-delay) for more information. Default is `false`. * `deregistration_delay` - (Optional) Amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. * `health_check` - (Optional, Maximum of 1) Health Check configuration block. Detailed below. @@ -298,6 +299,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_target_group.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067" + } +} + +resource "aws_lb_target_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the target group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Target Groups using their ARN. For example: ```python @@ -321,4 +343,4 @@ Using `terraform import`, import Target Groups using their ARN. For example: % terraform import aws_lb_target_group.app_front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:targetgroup/app-front-end/20cfe21448b66314 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_target_group_attachment.html.markdown b/website/docs/cdktf/python/r/lb_target_group_attachment.html.markdown index adcd0f7fc78a..2eba5778f52a 100644 --- a/website/docs/cdktf/python/r/lb_target_group_attachment.html.markdown +++ b/website/docs/cdktf/python/r/lb_target_group_attachment.html.markdown @@ -142,6 +142,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availability_zone` - (Optional) The Availability Zone where the IP address of the target is to be registered. If the private IP address is outside of the VPC scope, this value must be set to `all`. * `port` - (Optional) The port on which targets receive traffic. @@ -155,4 +156,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import Target Group Attachments. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_trust_store.html.markdown b/website/docs/cdktf/python/r/lb_trust_store.html.markdown index 9ba204d7b2ca..e85384b71ff9 100644 --- a/website/docs/cdktf/python/r/lb_trust_store.html.markdown +++ b/website/docs/cdktf/python/r/lb_trust_store.html.markdown @@ -52,10 +52,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ca_certificates_bundle_s3_bucket` - (Required) S3 Bucket name holding the client certificate CA bundle. * `ca_certificates_bundle_s3_key` - (Required) S3 object key holding the client certificate CA bundle. * `ca_certificates_bundle_s3_object_version` - (Optional) Version Id of CA bundle S3 bucket object, if versioned, defaults to latest if omitted. - * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. Cannot be longer than 6 characters. * `name` - (Optional, Forces new resource) Name of the Trust Store. If omitted, Terraform will assign a random, unique name. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -72,6 +72,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_trust_store.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:truststore/my-trust-store/73e2d6bc24d8a067" + } +} + +resource "aws_lb_trust_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the trust store. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Trust Stores using their ARN. For example: ```python @@ -95,4 +116,4 @@ Using `terraform import`, import Target Groups using their ARN. For example: % terraform import aws_lb_trust_store.example arn:aws:elasticloadbalancing:us-west-2:187416307283:truststore/my-trust-store/20cfe21448b66314 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_trust_store_revocation.html.markdown b/website/docs/cdktf/python/r/lb_trust_store_revocation.html.markdown index 1cc3bc33e19f..368118ec5c24 100644 --- a/website/docs/cdktf/python/r/lb_trust_store_revocation.html.markdown +++ b/website/docs/cdktf/python/r/lb_trust_store_revocation.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `trust_store_arn` - (Required) Trust Store ARN. * `revocations_s3_bucket` - (Required) S3 Bucket name holding the client certificate CA bundle. * `revocations_s3_key` - (Required) S3 object key holding the client certificate CA bundle. @@ -84,4 +85,4 @@ Using `terraform import`, import Trust Store Revocations using their ARN. For ex % terraform import aws_lb_trust_store_revocation.example arn:aws:elasticloadbalancing:us-west-2:187416307283:truststore/my-trust-store/20cfe21448b66314,6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lex_bot.html.markdown b/website/docs/cdktf/python/r/lex_bot.html.markdown index fa5a08d3678d..ad6abc384850 100644 --- a/website/docs/cdktf/python/r/lex_bot.html.markdown +++ b/website/docs/cdktf/python/r/lex_bot.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `abort_statement` - (Required) The message that Amazon Lex uses to abort a conversation. Attributes are documented under [statement](#statement). * `child_directed` - (Required) By specifying true, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. For more information see the [Amazon Lex FAQ](https://aws.amazon.com/lex/faqs#data-security) and the [Amazon Lex PutBot API Docs](https://docs.aws.amazon.com/lex/latest/dg/API_PutBot.html#lex-PutBot-request-childDirected). * `clarification_prompt` - (Required) The message that Amazon Lex uses when it doesn't understand the user's request. Attributes are documented under [prompt](#prompt). @@ -168,4 +169,4 @@ Using `terraform import`, import bots using their name. For example: % terraform import aws_lex_bot.order_flowers_bot OrderFlowers ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lex_bot_alias.html.markdown b/website/docs/cdktf/python/r/lex_bot_alias.html.markdown index 95889e506f00..73aa0e3a2688 100644 --- a/website/docs/cdktf/python/r/lex_bot_alias.html.markdown +++ b/website/docs/cdktf/python/r/lex_bot_alias.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bot_name` - (Required) The name of the bot. * `bot_version` - (Required) The version of the bot. * `conversation_logs` - (Optional) The settings that determine how Amazon Lex uses conversation logs for the alias. Attributes are documented under [conversation_logs](#conversation_logs). @@ -104,4 +105,4 @@ Using `terraform import`, import bot aliases using an ID with the format `bot_na % terraform import aws_lex_bot_alias.order_flowers_prod OrderFlowers:OrderFlowersProd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lex_intent.html.markdown b/website/docs/cdktf/python/r/lex_intent.html.markdown index 2940ee007145..81602e2810e3 100644 --- a/website/docs/cdktf/python/r/lex_intent.html.markdown +++ b/website/docs/cdktf/python/r/lex_intent.html.markdown @@ -108,6 +108,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `conclusion_statement` - (Optional) The statement that you want Amazon Lex to convey to the user after the intent is successfully fulfilled by the Lambda function. This element is relevant only if you provide a Lambda function in the `fulfillment_activity`. If you return the intent to the client @@ -273,4 +274,4 @@ Using `terraform import`, import intents using their name. For example: % terraform import aws_lex_intent.order_flowers_intent OrderFlowers ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lex_slot_type.html.markdown b/website/docs/cdktf/python/r/lex_slot_type.html.markdown index f434c6485a7d..a46268452b9a 100644 --- a/website/docs/cdktf/python/r/lex_slot_type.html.markdown +++ b/website/docs/cdktf/python/r/lex_slot_type.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enumeration_value` - (Required) A list of EnumerationValue objects that defines the values that the slot type can take. Each value can have a list of synonyms, which are additional values that help train the machine learning model about the values that it resolves for a slot. Attributes are @@ -115,4 +116,4 @@ Using `terraform import`, import slot types using their name. For example: % terraform import aws_lex_slot_type.flower_types FlowerTypes ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lexv2models_bot.html.markdown b/website/docs/cdktf/python/r/lexv2models_bot.html.markdown index d2875bcdc579..df4a1ece2b07 100644 --- a/website/docs/cdktf/python/r/lexv2models_bot.html.markdown +++ b/website/docs/cdktf/python/r/lexv2models_bot.html.markdown @@ -77,6 +77,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `members` - List of bot members in a network to be created. See [`bot_members`](#bot-members). * `tags` - List of tags to add to the bot. You can only add tags when you create a bot. * `type` - Type of a bot to create. Possible values are `"Bot"` and `"BotNetwork"`. @@ -134,4 +135,4 @@ Using `terraform import`, import Lex V2 Models Bot using the `id`. For example: % terraform import aws_lexv2models_bot.example bot-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lexv2models_bot_locale.html.markdown b/website/docs/cdktf/python/r/lexv2models_bot_locale.html.markdown index 06cc6b0f0c79..6d47b6fd0a24 100644 --- a/website/docs/cdktf/python/r/lexv2models_bot_locale.html.markdown +++ b/website/docs/cdktf/python/r/lexv2models_bot_locale.html.markdown @@ -74,6 +74,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - Description of the bot locale. Use this to help identify the bot locale in lists. * `voice_settings` - Amazon Polly voice ID that Amazon Lex uses for voice interaction with the user. See [`voice_settings`](#voice-settings). @@ -122,4 +123,4 @@ Using `terraform import`, import Lex V2 Models Bot Locale using the `id`. For ex % terraform import aws_lexv2models_bot_locale.example en_US,abcd-12345678,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lexv2models_bot_version.html.markdown b/website/docs/cdktf/python/r/lexv2models_bot_version.html.markdown index 8100b67a327f..48eb911d5b39 100644 --- a/website/docs/cdktf/python/r/lexv2models_bot_version.html.markdown +++ b/website/docs/cdktf/python/r/lexv2models_bot_version.html.markdown @@ -43,12 +43,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bot_id` - (Required) Idientifier of the bot to create the version for. * `locale_specification` - (Required) Specifies the locales that Amazon Lex adds to this version. You can choose the draft version or any other previously published version for each locale. When you specify a source version, the locale data is copied from the source version to the new version. - - The attribute value is a map with one or more entries, each of which has a locale name as the key and an object with the following attribute as the value: - * `sourceBotVersion` - (Required) The version of a bot used for a bot locale. Valid values: `DRAFT`, a numeric version. * `description` - (Optional) A description of the version. Use the description to help identify the version in lists. +* `sourceBotVersion` - (Required) The version of a bot used for a bot locale. Valid values: `DRAFT`, a numeric version. + +The `locale_specification` attribute value is a map with one or more entries, each of which has a locale name as the key and an object with the following attribute as the value: ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import Lex V2 Models Bot Version using the `id`. For e % terraform import aws_lexv2models_bot_version.example id-12345678,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lexv2models_intent.html.markdown b/website/docs/cdktf/python/r/lexv2models_intent.html.markdown index 4d1d52637d5f..43bdeb7d8d64 100644 --- a/website/docs/cdktf/python/r/lexv2models_intent.html.markdown +++ b/website/docs/cdktf/python/r/lexv2models_intent.html.markdown @@ -178,6 +178,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `closing_setting` - (Optional) Configuration block for the response that Amazon Lex sends to the user when the intent is closed. See [`closing_setting`](#closing_setting). * `confirmation_setting` - (Optional) Configuration block for prompts that Amazon Lex sends to the user to confirm the completion of an intent. If the user answers "no," the settings contain a statement that is sent to the user to end the intent. If you configure this block without `prompt_specification.*.prompt_attempts_specification`, AWS will provide default configurations for `Initial` and `Retry1` `prompt_attempts_specification`s. This will cause Terraform to report differences. Use the `confirmation_setting` configuration above in the [Basic Usage](#basic-usage) example to avoid differences resulting from AWS default configuration. See [`confirmation_setting`](#confirmation_setting). * `description` - (Optional) Description of the intent. Use the description to help identify the intent in lists. @@ -582,4 +583,4 @@ Using `terraform import`, import Lex V2 Models Intent using the `intent_id:bot_i % terraform import aws_lexv2models_intent.example intent-42874:bot-11376:DRAFT:en_US ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lexv2models_slot.html.markdown b/website/docs/cdktf/python/r/lexv2models_slot.html.markdown index 0520b2ed3389..5a56f411377f 100644 --- a/website/docs/cdktf/python/r/lexv2models_slot.html.markdown +++ b/website/docs/cdktf/python/r/lexv2models_slot.html.markdown @@ -159,6 +159,7 @@ See the [`value_elicitation_setting` argument reference](#value_elicitation_sett The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the slot. * `multiple_values_setting` - (Optional) Whether the slot returns multiple values in one response. See the [`multiple_values_setting` argument reference](#multiple_values_setting-argument-reference) below. @@ -305,4 +306,4 @@ Using `terraform import`, import Lex V2 Models Slot using the `id`. For example: % terraform import aws_lexv2models_slot.example bot-1234,1,intent-5678,en-US,slot-9012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lexv2models_slot_type.html.markdown b/website/docs/cdktf/python/r/lexv2models_slot_type.html.markdown index 8981c3b9829b..a20736ddb734 100644 --- a/website/docs/cdktf/python/r/lexv2models_slot_type.html.markdown +++ b/website/docs/cdktf/python/r/lexv2models_slot_type.html.markdown @@ -100,6 +100,7 @@ All of the bots, slot types, and slots used by the intent must have the same loc The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the slot type. * `composite_slot_type_setting` - (Optional) Specifications for a composite slot type. See [`composite_slot_type_setting` argument reference](#composite_slot_type_setting-argument-reference) below. @@ -215,4 +216,4 @@ Using `terraform import`, import Lex V2 Models Slot Type using using a comma-del % terraform import aws_lexv2models_slot_type.example bot-1234,DRAFT,en_US,slot_type-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/licensemanager_association.html.markdown b/website/docs/cdktf/python/r/licensemanager_association.html.markdown index 8d6642bc9eb5..78a09326339f 100644 --- a/website/docs/cdktf/python/r/licensemanager_association.html.markdown +++ b/website/docs/cdktf/python/r/licensemanager_association.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `license_configuration_arn` - (Required) ARN of the license configuration. * `resource_arn` - (Required) ARN of the resource associated with the license configuration. @@ -98,4 +99,4 @@ Using `terraform import`, import license configurations using `resource_arn,lice % terraform import aws_licensemanager_association.example arn:aws:ec2:eu-west-1:123456789012:image/ami-123456789abcdef01,arn:aws:license-manager:eu-west-1:123456789012:license-configuration:lic-0123456789abcdef0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/licensemanager_grant.html.markdown b/website/docs/cdktf/python/r/licensemanager_grant.html.markdown index 0ee9118a3c02..ea521d734b35 100644 --- a/website/docs/cdktf/python/r/licensemanager_grant.html.markdown +++ b/website/docs/cdktf/python/r/licensemanager_grant.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The Name of the grant. * `allowed_operations` - (Required) A list of the allowed operations for the grant. This is a subset of the allowed operations on the license. * `license_arn` - (Required) The ARN of the license to grant. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_licensemanager_grant` using the grant arn. % terraform import aws_licensemanager_grant.test arn:aws:license-manager::123456789011:grant:g-01d313393d9e443d8664cc054db1e089 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/licensemanager_grant_accepter.html.markdown b/website/docs/cdktf/python/r/licensemanager_grant_accepter.html.markdown index 2143d7c801cb..26c248fba940 100644 --- a/website/docs/cdktf/python/r/licensemanager_grant_accepter.html.markdown +++ b/website/docs/cdktf/python/r/licensemanager_grant_accepter.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `grant_arn` - (Required) The ARN of the grant to accept. ## Attribute Reference @@ -77,4 +78,4 @@ Using `terraform import`, import `aws_licensemanager_grant_accepter` using the g % terraform import aws_licensemanager_grant_accepter.test arn:aws:license-manager::123456789012:grant:g-1cf9fba4ba2f42dcab11c686c4b4d329 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/licensemanager_license_configuration.html.markdown b/website/docs/cdktf/python/r/licensemanager_license_configuration.html.markdown index d6f48e935537..88a3bbc3f653 100644 --- a/website/docs/cdktf/python/r/licensemanager_license_configuration.html.markdown +++ b/website/docs/cdktf/python/r/licensemanager_license_configuration.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the license configuration. * `description` - (Optional) Description of the license configuration. * `license_count` - (Optional) Number of licenses managed by the license configuration. @@ -99,4 +100,4 @@ Using `terraform import`, import license configurations using the `id`. For exam % terraform import aws_licensemanager_license_configuration.example arn:aws:license-manager:eu-west-1:123456789012:license-configuration:lic-0123456789abcdef0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_bucket.html.markdown b/website/docs/cdktf/python/r/lightsail_bucket.html.markdown index 008c57dd849b..f25c0705ecd2 100644 --- a/website/docs/cdktf/python/r/lightsail_bucket.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_bucket.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: * `force_delete` - (Optional) Whether to force delete non-empty buckets using `terraform destroy`. AWS by default will not delete a bucket which is not empty, to prevent losing bucket data and affecting other resources in Lightsail. If `force_delete` is set to `true` the bucket will be deleted even when not empty. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -52,7 +53,6 @@ This resource exports the following attributes in addition to the arguments abov * `availability_zone` - Availability Zone. Follows the format us-east-2a (case-sensitive). * `created_at` - Date and time when the bucket was created. * `id` - Name used for this bucket (matches `name`). -* `region` - AWS Region name. * `support_code` - Support code for the resource. Include this code in your email to support when you have questions about a resource in Lightsail. This code enables our support team to look up your Lightsail information more easily. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block. * `url` - URL of the bucket. @@ -82,4 +82,4 @@ Using `terraform import`, import `aws_lightsail_bucket` using the `name` attribu % terraform import aws_lightsail_bucket.example example-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_bucket_access_key.html.markdown b/website/docs/cdktf/python/r/lightsail_bucket_access_key.html.markdown index df2382598d58..621896977839 100644 --- a/website/docs/cdktf/python/r/lightsail_bucket_access_key.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_bucket_access_key.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `bucket_name` - (Required) Name of the bucket that the access key will belong to and grant access to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import `aws_lightsail_bucket_access_key` using the `id % terraform import aws_lightsail_bucket_access_key.example example-bucket,AKIAIOSFODNN7EXAMPLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_bucket_resource_access.html.markdown b/website/docs/cdktf/python/r/lightsail_bucket_resource_access.html.markdown index b39ee8e13b43..d08cd607d682 100644 --- a/website/docs/cdktf/python/r/lightsail_bucket_resource_access.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_bucket_resource_access.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `bucket_name` - (Required) Name of the bucket to grant access to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_name` - (Required) Name of the resource to grant bucket access. ## Attribute Reference @@ -87,4 +88,4 @@ Using `terraform import`, import `aws_lightsail_bucket_resource_access` using th % terraform import aws_lightsail_bucket_resource_access.example example-bucket,example-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_certificate.html.markdown b/website/docs/cdktf/python/r/lightsail_certificate.html.markdown index adac4f2c59dc..860197df8f35 100644 --- a/website/docs/cdktf/python/r/lightsail_certificate.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_certificate.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: * `domain_name` - (Optional) Domain name for which the certificate should be issued. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subject_alternative_names` - (Optional) Set of domains that should be SANs in the issued certificate. `domain_name` attribute is automatically added as a Subject Alternative Name. * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_lightsail_certificate` using the certifica % terraform import aws_lightsail_certificate.example example-certificate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_container_service.html.markdown b/website/docs/cdktf/python/r/lightsail_container_service.html.markdown index c5bf77ef2523..a5eb60f61cd9 100644 --- a/website/docs/cdktf/python/r/lightsail_container_service.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_container_service.html.markdown @@ -137,6 +137,7 @@ The following arguments are optional: * `is_disabled` - (Optional) Whether to disable the container service. Defaults to `false`. * `private_registry_access` - (Optional) Configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. [See below](#private-registry-access). * `public_domain_names` - (Optional) Public domain names to use with the container service, such as example.com and www.example.com. You can specify up to four public domain names for a container service. The domain names that you specify are used when you create a deployment with a container configured as the public endpoint of your container service. If you don't specify public domain names, then you can use the default domain of the container service. [See below](#public-domain-names). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ### Private Registry Access @@ -212,4 +213,4 @@ Using `terraform import`, import Lightsail Container Service using the `name`. F % terraform import aws_lightsail_container_service.example container-service-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_container_service_deployment_version.html.markdown b/website/docs/cdktf/python/r/lightsail_container_service_deployment_version.html.markdown index a3f7d6141788..0fcfcc4fb004 100644 --- a/website/docs/cdktf/python/r/lightsail_container_service_deployment_version.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_container_service_deployment_version.html.markdown @@ -71,6 +71,7 @@ The following arguments are required: The following arguments are optional: * `public_endpoint` - (Optional) Configuration block that describes the settings of the public endpoint for the container service. [See below](#public_endpoint). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `container` @@ -141,4 +142,4 @@ Using `terraform import`, import Lightsail Container Service Deployment Version % terraform import aws_lightsail_container_service_deployment_version.example container-service-1/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_database.html.markdown b/website/docs/cdktf/python/r/lightsail_database.html.markdown index 498afd49e24c..878babfdbfb1 100644 --- a/website/docs/cdktf/python/r/lightsail_database.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_database.html.markdown @@ -173,6 +173,7 @@ The following arguments are optional: * `preferred_backup_window` - (Optional) Daily time range during which automated backups are created for your database if automated backups are enabled. Must be in the hh24:mi-hh24:mi format. Example: `16:00-16:30`. Specified in Coordinated Universal Time (UTC). * `preferred_maintenance_window` - (Optional) Weekly time range during which system maintenance can occur on your database. Must be in the ddd:hh24:mi-ddd:hh24:mi format. Specified in Coordinated Universal Time (UTC). Example: `Tue:17:00-Tue:17:30` * `publicly_accessible` - (Optional) Whether the database is accessible to resources outside of your Lightsail account. A value of true specifies a database that is available to resources outside of your Lightsail account. A value of false specifies a database that is available only to your Lightsail resources in the same region as your database. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `skip_final_snapshot` - (Optional) Whether a final database snapshot is created before your database is deleted. If true is specified, no database snapshot is created. If false is specified, a database snapshot is created before your database is deleted. You must specify the final relational database snapshot name parameter if the skip final snapshot parameter is false. * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. @@ -267,4 +268,4 @@ Using `terraform import`, import Lightsail Databases using their name. For examp % terraform import aws_lightsail_database.example example-database ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_disk.html.markdown b/website/docs/cdktf/python/r/lightsail_disk.html.markdown index 98c89be0ac60..ed2dd904170e 100644 --- a/website/docs/cdktf/python/r/lightsail_disk.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_disk.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_lightsail_disk` using the name attribute. % terraform import aws_lightsail_disk.example example-disk ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_disk_attachment.html.markdown b/website/docs/cdktf/python/r/lightsail_disk_attachment.html.markdown index 16f64e3c1c2b..1341b25bec8f 100644 --- a/website/docs/cdktf/python/r/lightsail_disk_attachment.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_disk_attachment.html.markdown @@ -67,6 +67,7 @@ This resource supports the following arguments: * `disk_name` - (Required) Name of the Lightsail disk. * `disk_path` - (Required) Disk path to expose to the instance. * `instance_name` - (Required) Name of the Lightsail instance to attach to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -99,4 +100,4 @@ Using `terraform import`, import `aws_lightsail_disk_attachment` using the id at % terraform import aws_lightsail_disk_attachment.example example-disk,example-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_distribution.html.markdown b/website/docs/cdktf/python/r/lightsail_distribution.html.markdown index bd3259dcef6b..d210573aa2cb 100644 --- a/website/docs/cdktf/python/r/lightsail_distribution.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_distribution.html.markdown @@ -206,6 +206,7 @@ The following arguments are optional: * `certificate_name` - (Optional) Name of the SSL/TLS certificate attached to the distribution. * `ip_address_type` - (Optional) IP address type of the distribution. Valid values: `dualstack`, `ipv4`. Default: `dualstack`. * `is_enabled` - (Optional) Whether the distribution is enabled. Default: `true`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags for the Lightsail Distribution. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### cache_behavior @@ -303,4 +304,4 @@ Using `terraform import`, import Lightsail Distribution using the `name`. For ex % terraform import aws_lightsail_distribution.example example-distribution ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_domain.html.markdown b/website/docs/cdktf/python/r/lightsail_domain.html.markdown index 61854ac0f835..016e67ac4eed 100644 --- a/website/docs/cdktf/python/r/lightsail_domain.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_domain.html.markdown @@ -41,6 +41,10 @@ The following arguments are required: * `domain_name` - (Required) Name of the Lightsail domain to manage. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -48,4 +52,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the Lightsail domain. * `id` - Name used for this domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_domain_entry.html.markdown b/website/docs/cdktf/python/r/lightsail_domain_entry.html.markdown index 1fd814b1217b..2776a8bca1b3 100644 --- a/website/docs/cdktf/python/r/lightsail_domain_entry.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_domain_entry.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: * `is_alias` - (Optional) Whether the entry should be an alias. Default: `false`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -86,4 +87,4 @@ Using `terraform import`, import Lightsail Domain Entry using the id attribute. % terraform import aws_lightsail_domain_entry.example www,example.com,A,127.0.0.1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_instance.html.markdown b/website/docs/cdktf/python/r/lightsail_instance.html.markdown index b60293009144..328a0d1abd6b 100644 --- a/website/docs/cdktf/python/r/lightsail_instance.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_instance.html.markdown @@ -111,6 +111,7 @@ The following arguments are optional: * `add_on` - (Optional) Add-on configuration for the instance. [See below](#add_on). * `ip_address_type` - (Optional) IP address type of the Lightsail Instance. Valid values: `dualstack`, `ipv4`, `ipv6`. Default: `dualstack`. * `key_pair_name` - (Optional) Name of your key pair. Created in the Lightsail console (cannot use `aws_key_pair` at this time). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `user_data` - (Optional) Single lined launch script as a string to configure server with additional user data. @@ -163,4 +164,4 @@ Using `terraform import`, import Lightsail Instances using their name. For examp % terraform import aws_lightsail_instance.example 'example' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_instance_public_ports.html.markdown b/website/docs/cdktf/python/r/lightsail_instance_public_ports.html.markdown index 604e9ffd7da7..c092e9c58a57 100644 --- a/website/docs/cdktf/python/r/lightsail_instance_public_ports.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_instance_public_ports.html.markdown @@ -69,22 +69,23 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `instance_name` - (Required) Name of the Lightsail Instance. -* `port_info` - (Required) Configuration block with port information. AWS closes all currently open ports that are not included in the `port_info`. [See below](#port_info). +* `instance_name` - (Required) Name of the instance for which to open ports. +* `port_info` - (Required) Descriptor of the ports to open for the specified instance. AWS closes all currently open ports that are not included in this argument. See [`port_info` Block](#port_info-block) for details. -### port_info +The following arguments are optional: -The following arguments are required: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `from_port` - (Required) First port in a range of open ports on an instance. -* `protocol` - (Required) IP protocol name. Valid values: `tcp`, `all`, `udp`, `icmp`. -* `to_port` - (Required) Last port in a range of open ports on an instance. +### `port_info` Block -The following arguments are optional: +The `port_info` configuration block supports the following arguments: +* `from_port` - (Required) First port in a range of open ports on an instance. See [PortInfo](https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_PortInfo.html) for details. +* `protocol` - (Required) IP protocol name. Valid values: `tcp`, `all`, `udp`, `icmp`, `icmpv6`. See [PortInfo](https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_PortInfo.html) for details. +* `to_port` - (Required) Last port in a range of open ports on an instance. See [PortInfo](https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_PortInfo.html) for details. * `cidr_list_aliases` - (Optional) Set of CIDR aliases that define access for a preconfigured range of IP addresses. -* `cidrs` - (Optional) Set of CIDR blocks. -* `ipv6_cidrs` - (Optional) Set of IPv6 CIDR blocks. +* `cidrs` - (Optional) Set of IPv4 addresses or ranges of IPv4 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. +* `ipv6_cidrs` - (Optional) Set of IPv6 addresses or ranges of IPv6 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. ## Attribute Reference @@ -92,4 +93,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - ID of the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_key_pair.html.markdown b/website/docs/cdktf/python/r/lightsail_key_pair.html.markdown index fcf3439dca85..e8b497435700 100644 --- a/website/docs/cdktf/python/r/lightsail_key_pair.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_key_pair.html.markdown @@ -83,6 +83,7 @@ The following arguments are optional: * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `pgp_key` - (Optional) PGP key to encrypt the resulting private key material. Only used when creating a new key pair. * `public_key` - (Optional) Public key material. This public key will be imported into Lightsail. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ~> **Note:** A PGP key is not required, however it is strongly encouraged. Without a PGP key, the private key material will be stored in state unencrypted. `pgp_key` is ignored if `public_key` is supplied. @@ -104,4 +105,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import Lightsail Key Pairs because the private and public key are only available on initial creation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_lb.html.markdown b/website/docs/cdktf/python/r/lightsail_lb.html.markdown index f8c2e2f22e3a..bc1f7de6b80d 100644 --- a/website/docs/cdktf/python/r/lightsail_lb.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_lb.html.markdown @@ -49,6 +49,7 @@ The following arguments are optional: * `health_check_path` - (Optional) Health check path of the load balancer. Default value `/`. * `ip_address_type` - (Optional) IP address type of the load balancer. Valid values: `dualstack`, `ipv4`. Default value `dualstack`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_lightsail_lb` using the name attribute. Fo % terraform import aws_lightsail_lb.example example-load-balancer ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_lb_attachment.html.markdown b/website/docs/cdktf/python/r/lightsail_lb_attachment.html.markdown index 490e66f68acf..0c8004338507 100644 --- a/website/docs/cdktf/python/r/lightsail_lb_attachment.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_lb_attachment.html.markdown @@ -69,6 +69,10 @@ The following arguments are required: * `instance_name` - (Required) Name of the instance to attach to the load balancer. * `lb_name` - (Required) Name of the Lightsail load balancer. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -100,4 +104,4 @@ Using `terraform import`, import `aws_lightsail_lb_attachment` using the name at % terraform import aws_lightsail_lb_attachment.example example-load-balancer,example-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_lb_certificate.html.markdown b/website/docs/cdktf/python/r/lightsail_lb_certificate.html.markdown index ed5bf25db4ae..5b89394de721 100644 --- a/website/docs/cdktf/python/r/lightsail_lb_certificate.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_lb_certificate.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subject_alternative_names` - (Optional) Set of domains that should be SANs in the issued certificate. `domain_name` attribute is automatically added as a Subject Alternative Name. ## Attribute Reference @@ -93,4 +94,4 @@ Using `terraform import`, import `aws_lightsail_lb_certificate` using the id att % terraform import aws_lightsail_lb_certificate.example example-load-balancer,example-load-balancer-certificate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_lb_certificate_attachment.html.markdown b/website/docs/cdktf/python/r/lightsail_lb_certificate_attachment.html.markdown index bb75e898fe70..4c2d6e030ed4 100644 --- a/website/docs/cdktf/python/r/lightsail_lb_certificate_attachment.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_lb_certificate_attachment.html.markdown @@ -61,6 +61,10 @@ The following arguments are required: * `certificate_name` - (Required) Name of your SSL/TLS certificate. * `lb_name` - (Required) Name of the load balancer to which you want to associate the SSL/TLS certificate. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -92,4 +96,4 @@ Using `terraform import`, import `aws_lightsail_lb_certificate_attachment` using % terraform import aws_lightsail_lb_certificate_attachment.example example-load-balancer,example-certificate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_lb_https_redirection_policy.html.markdown b/website/docs/cdktf/python/r/lightsail_lb_https_redirection_policy.html.markdown index 784522065e80..c7bef610992c 100644 --- a/website/docs/cdktf/python/r/lightsail_lb_https_redirection_policy.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_lb_https_redirection_policy.html.markdown @@ -69,6 +69,10 @@ The following arguments are required: * `enabled` - (Required) Whether to enable HTTP to HTTPS redirection. `true` to activate HTTP to HTTPS redirection or `false` to deactivate HTTP to HTTPS redirection. * `lb_name` - (Required) Name of the load balancer to which you want to enable HTTP to HTTPS redirection. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -100,4 +104,4 @@ Using `terraform import`, import `aws_lightsail_lb_https_redirection_policy` usi % terraform import aws_lightsail_lb_https_redirection_policy.example example-load-balancer ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_lb_stickiness_policy.html.markdown b/website/docs/cdktf/python/r/lightsail_lb_stickiness_policy.html.markdown index ccd419f7bf7b..cbc679e49083 100644 --- a/website/docs/cdktf/python/r/lightsail_lb_stickiness_policy.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_lb_stickiness_policy.html.markdown @@ -55,6 +55,10 @@ The following arguments are required: * `enabled` - (Required) Whether to enable session stickiness for the load balancer. * `lb_name` - (Required) Name of the load balancer to which you want to enable session stickiness. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -86,4 +90,4 @@ Using `terraform import`, import `aws_lightsail_lb_stickiness_policy` using the % terraform import aws_lightsail_lb_stickiness_policy.example example-load-balancer ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_static_ip.html.markdown b/website/docs/cdktf/python/r/lightsail_static_ip.html.markdown index 872168f578ec..73e8d96057c8 100644 --- a/website/docs/cdktf/python/r/lightsail_static_ip.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_static_ip.html.markdown @@ -41,6 +41,10 @@ The following arguments are required: * `name` - (Required) Name for the allocated static IP. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -74,4 +78,4 @@ Using `terraform import`, import `aws_lightsail_static_ip` using the name attrib % terraform import aws_lightsail_static_ip.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_static_ip_attachment.html.markdown b/website/docs/cdktf/python/r/lightsail_static_ip_attachment.html.markdown index e0e6b4b19c30..ef6261004876 100644 --- a/website/docs/cdktf/python/r/lightsail_static_ip_attachment.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_static_ip_attachment.html.markdown @@ -59,6 +59,10 @@ The following arguments are required: * `instance_name` - (Required) Name of the Lightsail instance to attach the IP to. * `static_ip_name` - (Required) Name of the allocated static IP. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -90,4 +94,4 @@ Using `terraform import`, import `aws_lightsail_static_ip_attachment` using the % terraform import aws_lightsail_static_ip_attachment.example example-static-ip ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/load_balancer_backend_server_policy.html.markdown b/website/docs/cdktf/python/r/load_balancer_backend_server_policy.html.markdown index a26752a00a90..7f32eb5316ac 100644 --- a/website/docs/cdktf/python/r/load_balancer_backend_server_policy.html.markdown +++ b/website/docs/cdktf/python/r/load_balancer_backend_server_policy.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `load_balancer_name` - (Required) The load balancer to attach the policy to. * `policy_names` - (Required) List of Policy Names to apply to the backend server. * `instance_port` - (Required) The instance port to apply the policy to. @@ -86,4 +87,4 @@ This resource exports the following attributes in addition to the arguments abov * `load_balancer_name` - The load balancer on which the policy is defined. * `instance_port` - The backend port the policies are applied to - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/load_balancer_listener_policy.html.markdown b/website/docs/cdktf/python/r/load_balancer_listener_policy.html.markdown index b0dae15f47fd..ddd4b3c1bcad 100644 --- a/website/docs/cdktf/python/r/load_balancer_listener_policy.html.markdown +++ b/website/docs/cdktf/python/r/load_balancer_listener_policy.html.markdown @@ -121,6 +121,7 @@ This example shows how to add a [Predefined Security Policy for ELBs](https://do This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `load_balancer_name` - (Required) The load balancer to attach the policy to. * `load_balancer_port` - (Required) The load balancer listener port to apply the policy to. * `policy_names` - (Required) List of Policy Names to apply to the backend server. @@ -134,4 +135,4 @@ This resource exports the following attributes in addition to the arguments abov * `load_balancer_name` - The load balancer on which the policy is defined. * `load_balancer_port` - The load balancer listener port the policies are applied to - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/load_balancer_policy.html.markdown b/website/docs/cdktf/python/r/load_balancer_policy.html.markdown index 86eae0fc813b..0f6c2d835dba 100644 --- a/website/docs/cdktf/python/r/load_balancer_policy.html.markdown +++ b/website/docs/cdktf/python/r/load_balancer_policy.html.markdown @@ -103,6 +103,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `load_balancer_name` - (Required) The load balancer on which the policy is defined. * `policy_name` - (Required) The name of the load balancer policy. * `policy_type_name` - (Required) The policy type. @@ -117,4 +118,4 @@ This resource exports the following attributes in addition to the arguments abov * `policy_type_name` - The policy type of the policy. * `load_balancer_name` - The load balancer on which the policy is defined. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/location_geofence_collection.html.markdown b/website/docs/cdktf/python/r/location_geofence_collection.html.markdown index 1f53b36b1b03..eadfba5f7f67 100644 --- a/website/docs/cdktf/python/r/location_geofence_collection.html.markdown +++ b/website/docs/cdktf/python/r/location_geofence_collection.html.markdown @@ -39,6 +39,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The optional description for the geofence collection. * `kms_key_id` - (Optional) A key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource. * `tags` - (Optional) Key-value tags for the geofence collection. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +85,4 @@ Using `terraform import`, import Location Geofence Collection using the `collect % terraform import aws_location_geofence_collection.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/location_map.html.markdown b/website/docs/cdktf/python/r/location_map.html.markdown index 2c65e37b7b36..fd99d8636431 100644 --- a/website/docs/cdktf/python/r/location_map.html.markdown +++ b/website/docs/cdktf/python/r/location_map.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) An optional description for the map resource. * `tags` - (Optional) Key-value tags for the map. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_location_map` resources using the map name % terraform import aws_location_map.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/location_place_index.html.markdown b/website/docs/cdktf/python/r/location_place_index.html.markdown index 5e2f98089741..aec94e64ee85 100644 --- a/website/docs/cdktf/python/r/location_place_index.html.markdown +++ b/website/docs/cdktf/python/r/location_place_index.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data_source_configuration` - (Optional) Configuration block with the data storage option chosen for requesting Places. Detailed below. * `description` - (Optional) The optional description for the place index resource. * `tags` - (Optional) Key-value tags for the place index. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -49,6 +50,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `intended_use` - (Optional) Specifies how the results of an operation will be stored by the caller. Valid values: `SingleUse`, `Storage`. Default: `SingleUse`. ## Attribute Reference @@ -85,4 +87,4 @@ Using `terraform import`, import `aws_location_place_index` resources using the % terraform import aws_location_place_index.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/location_route_calculator.html.markdown b/website/docs/cdktf/python/r/location_route_calculator.html.markdown index 6d55917a862e..7bf6204be6f0 100644 --- a/website/docs/cdktf/python/r/location_route_calculator.html.markdown +++ b/website/docs/cdktf/python/r/location_route_calculator.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The optional description for the route calculator resource. * `tags` - (Optional) Key-value tags for the route calculator. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_location_route_calculator` using the route % terraform import aws_location_route_calculator.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/location_tracker.html.markdown b/website/docs/cdktf/python/r/location_tracker.html.markdown index 6a56e5131b2f..2e9e22e9edae 100644 --- a/website/docs/cdktf/python/r/location_tracker.html.markdown +++ b/website/docs/cdktf/python/r/location_tracker.html.markdown @@ -39,6 +39,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The optional description for the tracker resource. * `kms_key_id` - (Optional) A key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource. * `position_filtering` - (Optional) The position filtering method of the tracker resource. Valid values: `TimeBased`, `DistanceBased`, `AccuracyBased`. Default: `TimeBased`. @@ -78,4 +79,4 @@ Using `terraform import`, import `aws_location_tracker` resources using the trac % terraform import aws_location_tracker.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/location_tracker_association.html.markdown b/website/docs/cdktf/python/r/location_tracker_association.html.markdown index 537707c041c9..bf7fc7642e6e 100644 --- a/website/docs/cdktf/python/r/location_tracker_association.html.markdown +++ b/website/docs/cdktf/python/r/location_tracker_association.html.markdown @@ -46,8 +46,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `consumer_arn` - (Required) The Amazon Resource Name (ARN) for the geofence collection to be associated to tracker resource. Used when you need to specify a resource across all AWS. * `tracker_name` - (Required) The name of the tracker resource to be associated with a geofence collection. @@ -87,4 +88,4 @@ Using `terraform import`, import Location Tracker Association using the `tracker % terraform import aws_location_tracker_association.example "tracker_name|consumer_arn" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/m2_application.html.markdown b/website/docs/cdktf/python/r/m2_application.html.markdown index 059f3cc8f507..38cac3ba5acd 100644 --- a/website/docs/cdktf/python/r/m2_application.html.markdown +++ b/website/docs/cdktf/python/r/m2_application.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `definition` - (Optional) The application definition for this application. You can specify either inline JSON or an S3 bucket location. * `kms_key_id` - (Optional) KMS Key to use for the Application. * `role_arn` - (Optional) ARN of role for application to use to access AWS resources. @@ -58,6 +59,7 @@ This argument is processed in [attribute-as-blocks mode](https://www.terraform.i The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Optional) JSON application definition. Either this or `s3_location` must be specified. * `s3_location` - (Optional) Location of the application definition in S3. Either this or `content` must be specified. @@ -103,4 +105,4 @@ Using `terraform import`, import Mainframe Modernization Application using the ` % terraform import aws_m2_application.example 01234567890abcdef012345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/m2_deployment.html.markdown b/website/docs/cdktf/python/r/m2_deployment.html.markdown index 33164e22af2b..699aafd1674c 100644 --- a/website/docs/cdktf/python/r/m2_deployment.html.markdown +++ b/website/docs/cdktf/python/r/m2_deployment.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `environment_id` - (Required) Environment to deploy application to. * `application_id` - (Required) Application to deploy. * `application_version` - (Required) Version to application to deploy @@ -81,4 +82,4 @@ Using `terraform import`, import Mainframe Modernization Deployment using the `A % terraform import aws_m2_deployment.example APPLICATION-ID,DEPLOYMENT-ID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/m2_environment.html.markdown b/website/docs/cdktf/python/r/m2_environment.html.markdown index c40a7ed0263f..d16c2b71eb83 100644 --- a/website/docs/cdktf/python/r/m2_environment.html.markdown +++ b/website/docs/cdktf/python/r/m2_environment.html.markdown @@ -135,6 +135,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine_version` - (Optional) The specific version of the engine for the Environment. * `force_update` - (Optional) Force update the environment even if applications are running. * `kms_key_id` - (Optional) ARN of the KMS key to use for the Environment. @@ -214,4 +215,4 @@ Using `terraform import`, import Mainframe Modernization Environment using the ` % terraform import aws_m2_environment.example 01234567890abcdef012345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_account.html.markdown b/website/docs/cdktf/python/r/macie2_account.html.markdown index 9574d1b3adf1..24dff81653f8 100644 --- a/website/docs/cdktf/python/r/macie2_account.html.markdown +++ b/website/docs/cdktf/python/r/macie2_account.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `finding_publishing_frequency` - (Optional) Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). Valid values are `FIFTEEN_MINUTES`, `ONE_HOUR` or `SIX_HOURS`. * `status` - (Optional) Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`. @@ -73,4 +74,4 @@ Using `terraform import`, import `aws_macie2_account` using the id. For example: % terraform import aws_macie2_account.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_classification_export_configuration.html.markdown b/website/docs/cdktf/python/r/macie2_classification_export_configuration.html.markdown index b429705aece0..ad31f68a90fe 100644 --- a/website/docs/cdktf/python/r/macie2_classification_export_configuration.html.markdown +++ b/website/docs/cdktf/python/r/macie2_classification_export_configuration.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `s3_destination` - (Required) Configuration block for a S3 Destination. Defined below ### s3_destination Configuration Block @@ -65,7 +66,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_macie2_classification_export_configuration` using the account ID and region. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_macie2_classification_export_configuration` using the region. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -79,13 +80,13 @@ from imports.aws.macie2_classification_export_configuration import Macie2Classif class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Macie2ClassificationExportConfiguration.generate_config_for_import(self, "example", "123456789012:us-west-2") + Macie2ClassificationExportConfiguration.generate_config_for_import(self, "example", "us-west-2") ``` -Using `terraform import`, import `aws_macie2_classification_export_configuration` using the account ID and region. For example: +Using `terraform import`, import `aws_macie2_classification_export_configuration` using the region. For example: ```console -% terraform import aws_macie2_classification_export_configuration.example 123456789012:us-west-2 +% terraform import aws_macie2_classification_export_configuration.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_classification_job.html.markdown b/website/docs/cdktf/python/r/macie2_classification_job.html.markdown index 6dd86a8f28e3..c5db339ddf0f 100644 --- a/website/docs/cdktf/python/r/macie2_classification_job.html.markdown +++ b/website/docs/cdktf/python/r/macie2_classification_job.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `schedule_frequency` - (Optional) The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the `job_type` property to `ONE_TIME`. (documented below) * `custom_data_identifier_ids` - (Optional) The custom data identifiers to use for data analysis and classification. * `sampling_percentage` - (Optional) The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects. @@ -174,4 +175,4 @@ Using `terraform import`, import `aws_macie2_classification_job` using the id. F % terraform import aws_macie2_classification_job.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_custom_data_identifier.html.markdown b/website/docs/cdktf/python/r/macie2_custom_data_identifier.html.markdown index 3e04e8295bb4..2ea73d4d07d9 100644 --- a/website/docs/cdktf/python/r/macie2_custom_data_identifier.html.markdown +++ b/website/docs/cdktf/python/r/macie2_custom_data_identifier.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `regex` - (Optional) The regular expression (regex) that defines the pattern to match. The expression can contain as many as 512 characters. * `keywords` - (Optional) An array that lists specific character sequences (keywords), one of which must be within proximity (`maximum_match_distance`) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3 - 90 characters. Keywords aren't case sensitive. * `ignore_words` - (Optional) An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters. Ignore words are case sensitive. @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_macie2_custom_data_identifier` using the i % terraform import aws_macie2_custom_data_identifier.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_findings_filter.html.markdown b/website/docs/cdktf/python/r/macie2_findings_filter.html.markdown index a85810c05c84..6212a99ff759 100644 --- a/website/docs/cdktf/python/r/macie2_findings_filter.html.markdown +++ b/website/docs/cdktf/python/r/macie2_findings_filter.html.markdown @@ -34,7 +34,7 @@ class MyConvertedCode(TerraformStack): description="DESCRIPTION", finding_criteria=Macie2FindingsFilterFindingCriteria( criterion=[Macie2FindingsFilterFindingCriteriaCriterion( - eq=[Token.as_string(current.name)], + eq=[Token.as_string(current.region)], field="region" ) ] @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `finding_criteria` - (Required) The criteria to use to filter findings. * `name` - (Optional) A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -104,4 +105,4 @@ Using `terraform import`, import `aws_macie2_findings_filter` using the id. For % terraform import aws_macie2_findings_filter.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_invitation_accepter.html.markdown b/website/docs/cdktf/python/r/macie2_invitation_accepter.html.markdown index d61dea55adc7..c1dbab36635a 100644 --- a/website/docs/cdktf/python/r/macie2_invitation_accepter.html.markdown +++ b/website/docs/cdktf/python/r/macie2_invitation_accepter.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `administrator_account_id` - (Required) The AWS account ID for the account that sent the invitation. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_macie2_invitation_accepter` using the admi % terraform import aws_macie2_invitation_accepter.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_member.html.markdown b/website/docs/cdktf/python/r/macie2_member.html.markdown index a161b4e233cc..d507c849e076 100644 --- a/website/docs/cdktf/python/r/macie2_member.html.markdown +++ b/website/docs/cdktf/python/r/macie2_member.html.markdown @@ -44,9 +44,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) The AWS account ID for the account. * `email` - (Required) The email address for the account. -* `tags` - (Optional) A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie. * `status` - (Optional) Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`. * `invite` - (Optional) Send an invitation to a member * `invitation_message` - (Optional) A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation. @@ -90,4 +90,4 @@ Using `terraform import`, import `aws_macie2_member` using the account ID of the % terraform import aws_macie2_member.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_organization_admin_account.html.markdown b/website/docs/cdktf/python/r/macie2_organization_admin_account.html.markdown index 0c330da5ab1f..4460e0cf2acd 100644 --- a/website/docs/cdktf/python/r/macie2_organization_admin_account.html.markdown +++ b/website/docs/cdktf/python/r/macie2_organization_admin_account.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `admin_account_id` - (Required) The AWS account ID for the account to designate as the delegated Amazon Macie administrator account for the organization. ## Attribute Reference @@ -74,4 +75,4 @@ Using `terraform import`, import `aws_macie2_organization_admin_account` using t % terraform import aws_macie2_organization_admin_account.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/macie2_organization_configuration.html.markdown b/website/docs/cdktf/python/r/macie2_organization_configuration.html.markdown index cfcd155b976a..1e663d19fc6d 100644 --- a/website/docs/cdktf/python/r/macie2_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/macie2_organization_configuration.html.markdown @@ -35,10 +35,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_enable` - (Required) Whether to enable Amazon Macie automatically for accounts that are added to the organization in AWS Organizations. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/main_route_table_association.html.markdown b/website/docs/cdktf/python/r/main_route_table_association.html.markdown index b4f8e093b58f..216854163e59 100644 --- a/website/docs/cdktf/python/r/main_route_table_association.html.markdown +++ b/website/docs/cdktf/python/r/main_route_table_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The ID of the VPC whose main route table should be set * `route_table_id` - (Required) The ID of the Route Table to set as the new main route table for the target VPC @@ -71,4 +72,4 @@ the `main_route_table_association` delete to work properly. [tf-route-tables]: /docs/providers/aws/r/route_table.html [tf-default-route-table]: /docs/providers/aws/r/default_route_table.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/media_convert_queue.html.markdown b/website/docs/cdktf/python/r/media_convert_queue.html.markdown index 0d6154f0949f..ff6299eeb5d6 100644 --- a/website/docs/cdktf/python/r/media_convert_queue.html.markdown +++ b/website/docs/cdktf/python/r/media_convert_queue.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique identifier describing the queue * `concurrent_jobs` - (Optional) The maximum number of jobs your queue can process concurrently. For on-demand queues, the value you enter is constrained by your service quotas for Maximum concurrent jobs, per on-demand queue and Maximum concurrent jobs, per account. For reserved queues, specify the number of jobs you can process concurrently in your reservation plan instead. * `description` - (Optional) A description of the queue @@ -84,4 +85,4 @@ Using `terraform import`, import Media Convert Queue using the queue name. For e % terraform import aws_media_convert_queue.test tf-test-queue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/media_package_channel.html.markdown b/website/docs/cdktf/python/r/media_package_channel.html.markdown index 8a1be78053bc..f2f9e7a4e4eb 100644 --- a/website/docs/cdktf/python/r/media_package_channel.html.markdown +++ b/website/docs/cdktf/python/r/media_package_channel.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `channel_id` - (Required) A unique identifier describing the channel * `description` - (Optional) A description of the channel * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -78,4 +79,4 @@ Using `terraform import`, import Media Package Channels using the channel ID. Fo % terraform import aws_media_package_channel.kittens kittens-channel ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/media_packagev2_channel_group.html.markdown b/website/docs/cdktf/python/r/media_packagev2_channel_group.html.markdown index 4f9f52198ebb..3e7ce43a5190 100644 --- a/website/docs/cdktf/python/r/media_packagev2_channel_group.html.markdown +++ b/website/docs/cdktf/python/r/media_packagev2_channel_group.html.markdown @@ -22,7 +22,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import MediaPackagev2ChannelGroup +from imports.aws.media_packagev2_channel_group import MediaPackagev2ChannelGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique identifier naming the channel group * `description` - (Optional) A description of the channel group * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -61,7 +62,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import MediaPackagev2ChannelGroup +from imports.aws.media_packagev2_channel_group import MediaPackagev2ChannelGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -74,4 +75,4 @@ Using `terraform import`, import Elemental MediaPackage Version 2 Channel Group % terraform import aws_media_packagev2_channel_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/media_store_container.html.markdown b/website/docs/cdktf/python/r/media_store_container.html.markdown index 87b79dad2fc6..ab8f0affa3ed 100644 --- a/website/docs/cdktf/python/r/media_store_container.html.markdown +++ b/website/docs/cdktf/python/r/media_store_container.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a MediaStore Container. +!> **WARNING:** _This resource is deprecated and will be removed in a future version._ AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective **November 13, 2025**. Users should begin transitioning to alternative solutions as soon as possible. For **simple live streaming workflows**, AWS recommends migrating to **Amazon S3**. For **advanced use cases** that require features such as packaging, DRM, or cross-region redundancy, consider using **AWS Elemental MediaPackage**. + ## Example Usage ```python @@ -35,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the container. Must contain alphanumeric characters or underscores. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -71,4 +74,4 @@ Using `terraform import`, import MediaStore Container using the MediaStore Conta % terraform import aws_media_store_container.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/media_store_container_policy.html.markdown b/website/docs/cdktf/python/r/media_store_container_policy.html.markdown index d7172a2232de..d63c38d4fdc4 100644 --- a/website/docs/cdktf/python/r/media_store_container_policy.html.markdown +++ b/website/docs/cdktf/python/r/media_store_container_policy.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a MediaStore Container Policy. +!> **WARNING:** _This resource is deprecated and will be removed in a future version._ AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective **November 13, 2025**. Users should begin transitioning to alternative solutions as soon as possible. For **simple live streaming workflows**, AWS recommends migrating to **Amazon S3**. For **advanced use cases** that require features such as packaging, DRM, or cross-region redundancy, consider using **AWS Elemental MediaPackage**. + ~> **NOTE:** We suggest using [`jsonencode()`](https://developer.hashicorp.com/terraform/language/functions/jsonencode) or [`aws_iam_policy_document`](/docs/providers/aws/d/iam_policy_document.html) when assigning a value to `policy`. They seamlessly translate Terraform language into JSON, enabling you to maintain consistency within your configuration without the need for context switches. Also, you can sidestep potential complications arising from formatting discrepancies, whitespace inconsistencies, and other nuances inherent to JSON. ## Example Usage @@ -54,7 +56,7 @@ class MyConvertedCode(TerraformStack): type="AWS" ) ], - resources=["arn:aws:mediastore:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:container/${" + example.name + "}/*" + resources=["arn:aws:mediastore:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:container/${" + example.name + "}/*" ], sid="MediaStoreFullAccess" ) @@ -74,6 +76,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container_name` - (Required) The name of the container. * `policy` - (Required) The contents of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -106,4 +109,4 @@ Using `terraform import`, import MediaStore Container Policy using the MediaStor % terraform import aws_media_store_container_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/medialive_channel.html.markdown b/website/docs/cdktf/python/r/medialive_channel.html.markdown index 442022b114ee..e4a87a8850a2 100644 --- a/website/docs/cdktf/python/r/medialive_channel.html.markdown +++ b/website/docs/cdktf/python/r/medialive_channel.html.markdown @@ -111,6 +111,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cdi_input_specification` - (Optional) Specification of CDI inputs for this channel. See [CDI Input Specification](#cdi-input-specification) for more details. * `input_attachments` - (Optional) Input attachments for the channel. See [Input Attachments](#input-attachments) for more details. * `log_level` - (Optional) The log level to write to Cloudwatch logs. @@ -235,7 +236,7 @@ The following arguments are optional: ### SCTE 20 Source Settings -* `convert_608_to_708` – (Optional) If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. +* `convert_608_to_708` - (Optional) If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. * `source_608_channel_number` - (Optional) Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. ### SCTE 27 Source Settings @@ -584,62 +585,62 @@ The following arguments are optional: * `embedded_plus_scte20_destination_settings` - (Optional) Embedded Plus SCTE20 Destination Settings. * `rtmp_caption_info_destination_settings` - (Optional) RTMP Caption Info Destination Settings. * `scte20_plus_embedded_destination_settings` - (Optional) SCTE20 Plus Embedded Destination Settings. -* `scte27_destination_settings` – (Optional) SCTE27 Destination Settings. -* `smpte_tt_destination_settings` – (Optional) SMPTE TT Destination Settings. -* `teletext_destination_settings` – (Optional) Teletext Destination Settings. -* `ttml_destination_settings` – (Optional) TTML Destination Settings. See [TTML Destination Settings](#ttml-destination-settings) for more details. +* `scte27_destination_settings` - (Optional) SCTE27 Destination Settings. +* `smpte_tt_destination_settings` - (Optional) SMPTE TT Destination Settings. +* `teletext_destination_settings` - (Optional) Teletext Destination Settings. +* `ttml_destination_settings` - (Optional) TTML Destination Settings. See [TTML Destination Settings](#ttml-destination-settings) for more details. * `webvtt_destination_settings` - (Optional) WebVTT Destination Settings. See [WebVTT Destination Settings](#webvtt-destination-settings) for more details. ### Burn In Destination Settings -* `alignment` – (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. -* `background_color` – (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. -* `background_opacity` – (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `font` – (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. -* `font_color` – (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `font_opacity` – (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. -* `font_resolution` – (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. -* `font_size` – (Optional) When set to ‘auto’ fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. -* `outline_color` – (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `outline_size` – (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `shadow_color` – (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. -* `shadow_opacity` – (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `shadow_x_offset` – (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. -* `shadow_y_offset` – (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. -* `teletext_grid_control` – (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. -* `x_position` – (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. All burn-in and DVB-Sub font settings must match. -* `y_position` – (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. All burn-in and DVB-Sub font settings must match. +* `alignment` - (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. +* `background_color` - (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. +* `background_opacity` - (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `font` - (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. +* `font_color` - (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `font_opacity` - (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. +* `font_resolution` - (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. +* `font_size` - (Optional) When set to ‘auto’ fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. +* `outline_color` - (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `outline_size` - (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `shadow_color` - (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. +* `shadow_opacity` - (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `shadow_x_offset` - (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. +* `shadow_y_offset` - (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. +* `teletext_grid_control` - (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. +* `x_position` - (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. All burn-in and DVB-Sub font settings must match. +* `y_position` - (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. All burn-in and DVB-Sub font settings must match. ### DVB Sub Destination Settings -* `alignment` – (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. This option is not valid for source captions that are STL or 608/embedded. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `background_color` – (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. -* `background_opacity` – (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `font` – (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. -* `font_color` – (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `font_opacity` – (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. -* `font_resolution` – (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. -* `font_size` – (Optional) When set to auto fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. -* `outline_color` – (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `outline_size` – (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `shadow_color` – (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. -* `shadow_opacity` – (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `shadow_x_offset` – (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. -* `shadow_y_offset` – (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. -* `teletext_grid_control` – (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. -* `x_position` – (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `y_position` – (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `alignment` - (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. This option is not valid for source captions that are STL or 608/embedded. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `background_color` - (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. +* `background_opacity` - (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `font` - (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. +* `font_color` - (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `font_opacity` - (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. +* `font_resolution` - (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. +* `font_size` - (Optional) When set to auto fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. +* `outline_color` - (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `outline_size` - (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `shadow_color` - (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. +* `shadow_opacity` - (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `shadow_x_offset` - (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. +* `shadow_y_offset` - (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. +* `teletext_grid_control` - (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. +* `x_position` - (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `y_position` - (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. ### EBU TT D Destination Settings -* `copyright_holder` – (Optional) Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. -* `fill_line_gap` – (Optional) Specifies how to handle the gap between the lines (in multi-line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. -* `font_family` – (Optional) Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. -* `style_control` – (Optional) Specifies the style information (font color, font position, and so on) to include in the font data that is attached to the EBU-TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. +* `copyright_holder` - (Optional) Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. +* `fill_line_gap` - (Optional) Specifies how to handle the gap between the lines (in multi-line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. +* `font_family` - (Optional) Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. +* `style_control` - (Optional) Specifies the style information (font color, font position, and so on) to include in the font data that is attached to the EBU-TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. ### TTML Destination Settings -* `style_control` – (Optional) This field is not currently supported and will not affect the output styling. Leave the default value. +* `style_control` - (Optional) This field is not currently supported and will not affect the output styling. Leave the default value. ### WebVTT Destination Settings @@ -647,38 +648,38 @@ The following arguments are optional: ### Font -* `password_param` – (Optional) Key used to extract the password from EC2 Parameter store. -* `uri` – (Required) Path to a file accessible to the live stream. -* `username` – (Optional) Username to be used. +* `password_param` - (Optional) Key used to extract the password from EC2 Parameter store. +* `uri` - (Required) Path to a file accessible to the live stream. +* `username` - (Optional) Username to be used. ### Global Configuration -* `initial_audio_gain` – (Optional) Value to set the initial audio gain for the Live Event. -* `input_end_action` – (Optional) Indicates the action to take when the current input completes (e.g. end-of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). +* `initial_audio_gain` - (Optional) Value to set the initial audio gain for the Live Event. +* `input_end_action` - (Optional) Indicates the action to take when the current input completes (e.g. end-of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). * `input_loss_behavior` - (Optional) Settings for system actions when input is lost. See [Input Loss Behavior](#input-loss-behavior) for more details. -* `output_locking_mode` – (Optional) Indicates how MediaLive pipelines are synchronized. PIPELINE\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. -* `output_timing_source` – (Optional) Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. -* `support_low_framerate_inputs` – (Optional) Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. +* `output_locking_mode` - (Optional) Indicates how MediaLive pipelines are synchronized. PIPELINE\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. +* `output_timing_source` - (Optional) Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. +* `support_low_framerate_inputs` - (Optional) Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. ### Input Loss Behavior -* `password_param` – (Optional) Key used to extract the password from EC2 Parameter store. -* `uri` – (Required) Path to a file accessible to the live stream. -* `username` – (Optional) Username to be used. +* `password_param` - (Optional) Key used to extract the password from EC2 Parameter store. +* `uri` - (Required) Path to a file accessible to the live stream. +* `username` - (Optional) Username to be used. ### Motion Graphics Configuration -* `motion_graphics_insertion` – (Optional) Motion Graphics Insertion. +* `motion_graphics_insertion` - (Optional) Motion Graphics Insertion. * `motion_graphics_settings`– (Required) Motion Graphics Settings. See [Motion Graphics Settings](#motion-graphics-settings) for more details. ### Motion Graphics Settings -* `html_motion_graphics_settings` – (Optional) Html Motion Graphics Settings. +* `html_motion_graphics_settings` - (Optional) Html Motion Graphics Settings. ### Nielsen Configuration -* `distributor_id` – (Optional) Enter the Distributor ID assigned to your organization by Nielsen. -* `nielsen_pcm_to_id3_tagging` – (Optional) Enables Nielsen PCM to ID3 tagging. +* `distributor_id` - (Optional) Enter the Distributor ID assigned to your organization by Nielsen. +* `nielsen_pcm_to_id3_tagging` - (Optional) Enables Nielsen PCM to ID3 tagging. ### Avail Blanking @@ -818,4 +819,4 @@ Using `terraform import`, import MediaLive Channel using the `channel_id`. For e % terraform import aws_medialive_channel.example 1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/medialive_input.html.markdown b/website/docs/cdktf/python/r/medialive_input.html.markdown index 5077ef92e891..b5d71c8b0a0b 100644 --- a/website/docs/cdktf/python/r/medialive_input.html.markdown +++ b/website/docs/cdktf/python/r/medialive_input.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinations` - (Optional) Destination settings for PUSH type inputs. See [Destinations](#destinations) for more details. * `input_devices` - (Optional) Settings for the devices. See [Input Devices](#input-devices) for more details. * `media_connect_flows` - (Optional) A list of the MediaConnect Flows. See [Media Connect Flows](#media-connect-flows) for more details. @@ -134,4 +135,4 @@ Using `terraform import`, import MediaLive Input using the `id`. For example: % terraform import aws_medialive_input.example 12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/medialive_input_security_group.html.markdown b/website/docs/cdktf/python/r/medialive_input_security_group.html.markdown index fc9f5341f2d2..9d798094707d 100644 --- a/website/docs/cdktf/python/r/medialive_input_security_group.html.markdown +++ b/website/docs/cdktf/python/r/medialive_input_security_group.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the InputSecurityGroup. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Whitelist Rules @@ -94,4 +95,4 @@ Using `terraform import`, import MediaLive InputSecurityGroup using the `id`. Fo % terraform import aws_medialive_input_security_group.example 123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/medialive_multiplex.html.markdown b/website/docs/cdktf/python/r/medialive_multiplex.html.markdown index ba18a8fdf91b..2fb822077a4c 100644 --- a/website/docs/cdktf/python/r/medialive_multiplex.html.markdown +++ b/website/docs/cdktf/python/r/medialive_multiplex.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `start_multiplex` - (Optional) Whether to start the Multiplex. Defaults to `false`. * `tags` - (Optional) A map of tags to assign to the Multiplex. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -110,4 +111,4 @@ Using `terraform import`, import MediaLive Multiplex using the `id`. For example % terraform import aws_medialive_multiplex.example 12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/medialive_multiplex_program.html.markdown b/website/docs/cdktf/python/r/medialive_multiplex_program.html.markdown index 5bba5f1cf9a2..8f18afce69d3 100644 --- a/website/docs/cdktf/python/r/medialive_multiplex_program.html.markdown +++ b/website/docs/cdktf/python/r/medialive_multiplex_program.html.markdown @@ -77,6 +77,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ### Multiple Program Settings * `program_number` - (Required) Unique program number. @@ -138,4 +140,4 @@ Using `terraform import`, import MediaLive MultiplexProgram using the `id`, or a % terraform import aws_medialive_multiplex_program.example example_program/1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_acl.html.markdown b/website/docs/cdktf/python/r/memorydb_acl.html.markdown index 75ccda8d6837..4cc5e67eeb8a 100644 --- a/website/docs/cdktf/python/r/memorydb_acl.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_acl.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the ACL. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `user_names` - (Optional) Set of MemoryDB user names to be included in this ACL. @@ -77,4 +78,4 @@ Using `terraform import`, import an ACL using the `name`. For example: % terraform import aws_memorydb_acl.example my-acl ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_cluster.html.markdown b/website/docs/cdktf/python/r/memorydb_cluster.html.markdown index d730d2a779db..67807372d999 100644 --- a/website/docs/cdktf/python/r/memorydb_cluster.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_cluster.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_minor_version_upgrade` - (Optional, Forces new resource) When set to `true`, the cluster will automatically receive minor engine version upgrades after launch. Defaults to `true`. * `data_tiering` - (Optional, Forces new resource) Enables data tiering. This option is not supported by all instance types. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html). * `description` - (Optional) Description for the cluster. Defaults to `"Managed by Terraform"`. @@ -131,4 +132,4 @@ Using `terraform import`, import a cluster using the `name`. For example: % terraform import aws_memorydb_cluster.example my-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_multi_region_cluster.html.markdown b/website/docs/cdktf/python/r/memorydb_multi_region_cluster.html.markdown index c08924f324be..80ff1132864a 100644 --- a/website/docs/cdktf/python/r/memorydb_multi_region_cluster.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_multi_region_cluster.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) description for the multi-region cluster. * `engine` - (Optional) The name of the engine to be used for the multi-region cluster. Valid values are `redis` and `valkey`. * `engine_version` - (Optional) The version of the engine to be used for the multi-region cluster. Downgrades are not supported. @@ -106,4 +107,4 @@ Using `terraform import`, import a cluster using the `multi_region_cluster_name` % terraform import aws_memorydb_multi_region_cluster.example virxk-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_parameter_group.html.markdown b/website/docs/cdktf/python/r/memorydb_parameter_group.html.markdown index d0eb498fbaea..3144167fdf11 100644 --- a/website/docs/cdktf/python/r/memorydb_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_parameter_group.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the parameter group. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional, Forces new resource) Description for the parameter group. Defaults to `"Managed by Terraform"`. @@ -91,4 +92,4 @@ Using `terraform import`, import a parameter group using the `name`. For example % terraform import aws_memorydb_parameter_group.example my-parameter-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_snapshot.html.markdown b/website/docs/cdktf/python/r/memorydb_snapshot.html.markdown index 8a355bf357e4..697a063e27b7 100644 --- a/website/docs/cdktf/python/r/memorydb_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_snapshot.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_name` - (Required, Forces new resource) Name of the MemoryDB cluster to take a snapshot of. * `name` - (Optional, Forces new resource) Name of the snapshot. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -100,4 +101,4 @@ Using `terraform import`, import a snapshot using the `name`. For example: % terraform import aws_memorydb_snapshot.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_subnet_group.html.markdown b/website/docs/cdktf/python/r/memorydb_subnet_group.html.markdown index 16ac724c4baf..edcf26fb26b3 100644 --- a/website/docs/cdktf/python/r/memorydb_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_subnet_group.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the subnet group. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) Description for the subnet group. Defaults to `"Managed by Terraform"`. @@ -95,4 +96,4 @@ Using `terraform import`, import a subnet group using its `name`. For example: % terraform import aws_memorydb_subnet_group.example my-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/memorydb_user.html.markdown b/website/docs/cdktf/python/r/memorydb_user.html.markdown index 9e25ee0e6780..b635a08fa0df 100644 --- a/website/docs/cdktf/python/r/memorydb_user.html.markdown +++ b/website/docs/cdktf/python/r/memorydb_user.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### authentication_mode Configuration Block @@ -102,4 +103,4 @@ Using `terraform import`, import a user using the `user_name`. For example: The `passwords` are not available for imported resources, as this information cannot be read back from the MemoryDB API. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mq_broker.html.markdown b/website/docs/cdktf/python/r/mq_broker.html.markdown index e250d145429c..38d86cc0772b 100644 --- a/website/docs/cdktf/python/r/mq_broker.html.markdown +++ b/website/docs/cdktf/python/r/mq_broker.html.markdown @@ -3,22 +3,22 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker" description: |- - Provides an MQ Broker Resource + Manages an AWS MQ broker --- # Resource: aws_mq_broker -Provides an Amazon MQ broker resource. This resources also manages users for the broker. +Manages an AWS MQ broker. Use to create and manage message brokers for ActiveMQ and RabbitMQ engines. -> For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html). -~> **NOTE:** Amazon MQ currently places limits on **RabbitMQ** brokers. For example, a RabbitMQ broker cannot have: instances with an associated IP address of an ENI attached to the broker, an associated LDAP server to authenticate and authorize broker connections, storage type `EFS`, or audit logging. Although this resource allows you to create RabbitMQ users, RabbitMQ users cannot have console access or groups. Also, Amazon MQ does not return information about RabbitMQ users so drift detection is not possible. +!> **Warning:** Amazon MQ currently places limits on **RabbitMQ** brokers. For example, a RabbitMQ broker cannot have: instances with an associated IP address of an ENI attached to the broker, an associated LDAP server to authenticate and authorize broker connections, storage type `EFS`, or audit logging. Although this resource allows you to create RabbitMQ users, RabbitMQ users cannot have console access or groups. Also, Amazon MQ does not return information about RabbitMQ users so drift detection is not possible. -~> **NOTE:** Changes to an MQ Broker can occur when you change a parameter, such as `configuration` or `user`, and are reflected in the next maintenance window. Because of this, Terraform may report a difference in its planning phase because a modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the change immediately (see documentation below). Using `apply_immediately` can result in a brief downtime as the broker reboots. +!> **Warning:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). -~> **NOTE:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). +~> **Note:** Changes to an MQ Broker can occur when you change a parameter, such as `configuration` or `user`, and are reflected in the next maintenance window. Because of this, Terraform may report a difference in its planning phase because a modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the change immediately (see documentation below). Using `apply_immediately` can result in a brief downtime as the broker reboots. ## Example Usage @@ -47,8 +47,8 @@ class MyConvertedCode(TerraformStack): host_instance_type="mq.t2.micro", security_groups=[Token.as_string(aws_security_group_test.id)], user=[MqBrokerUser( - password="MindTheGap", - username="ExampleUser" + password="", + username="example_user" ) ] ) @@ -56,8 +56,6 @@ class MyConvertedCode(TerraformStack): ### High-throughput Optimized Example -This example shows the use of EBS storage for high-throughput optimized performance. - ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -82,8 +80,8 @@ class MyConvertedCode(TerraformStack): security_groups=[Token.as_string(aws_security_group_test.id)], storage_type="ebs", user=[MqBrokerUser( - password="MindTheGap", - username="ExampleUser" + password="", + username="example_user" ) ] ) @@ -114,12 +112,12 @@ class MyConvertedCode(TerraformStack): host_instance_type="mq.m5.large", security_groups=[Token.as_string(aws_security_group_example.id)], user=[MqBrokerUser( - password="MindTheGap", - username="ExampleUser" + password="", + username="example_user" ), MqBrokerUser( - password="Example12345", + password="", replication_user=True, - username="ExampleReplicationUser" + username="example_replication_user" ) ] ) @@ -133,12 +131,12 @@ class MyConvertedCode(TerraformStack): provider=awsalternate, security_groups=[Token.as_string(aws_security_group_example_primary.id)], user=[MqBrokerUser( - password="MindTheGap", - username="ExampleUser" + password="", + username="example_user" ), MqBrokerUser( - password="Example12345", + password="", replication_user=True, - username="ExampleReplicationUser" + username="example_replication_user" ) ] ) @@ -152,26 +150,27 @@ The following arguments are required: * `broker_name` - (Required) Name of the broker. * `engine_type` - (Required) Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`. -* `engine_version` - (Required) Version of the broker engine. See the [AmazonMQ Broker Engine docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html) for supported versions. For example, `5.17.6`. +* `engine_version` - (Required) Version of the broker engine. * `host_instance_type` - (Required) Broker's instance type. For example, `mq.t3.micro`, `mq.m5.large`. * `user` - (Required) Configuration block for broker users. For `engine_type` of `RabbitMQ`, Amazon MQ does not return broker users preventing this resource from making user updates and drift detection. Detailed below. The following arguments are optional: -* `apply_immediately` - (Optional) Specifies whether any broker modifications are applied immediately, or during the next maintenance window. Default is `false`. +* `apply_immediately` - (Optional) Whether to apply broker modifications immediately. Default is `false`. * `authentication_strategy` - (Optional) Authentication strategy used to secure the broker. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`. * `auto_minor_version_upgrade` - (Optional) Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. * `configuration` - (Optional) Configuration block for broker configuration. Applies to `engine_type` of `ActiveMQ` and `RabbitMQ` only. Detailed below. -* `data_replication_mode` - (Optional) Defines whether this broker is a part of a data replication pair. Valid values are `CRDR` and `NONE`. -* `data_replication_primary_broker_arn` - (Optional) The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when `data_replication_mode` is `CRDR`. +* `data_replication_mode` - (Optional) Whether this broker is part of a data replication pair. Valid values are `CRDR` and `NONE`. +* `data_replication_primary_broker_arn` - (Optional) ARN of the primary broker used to replicate data in a data replication pair. Required when `data_replication_mode` is `CRDR`. * `deployment_mode` - (Optional) Deployment mode of the broker. Valid values are `SINGLE_INSTANCE`, `ACTIVE_STANDBY_MULTI_AZ`, and `CLUSTER_MULTI_AZ`. Default is `SINGLE_INSTANCE`. * `encryption_options` - (Optional) Configuration block containing encryption options. Detailed below. -* `ldap_server_metadata` - (Optional) Configuration block for the LDAP server used to authenticate and authorize connections to the broker. Not supported for `engine_type` `RabbitMQ`. Detailed below. (Currently, AWS may not process changes to LDAP server metadata.) -* `logs` - (Optional) Configuration block for the logging configuration of the broker. Detailed below. +* `ldap_server_metadata` - (Optional) Configuration block for the LDAP server used to authenticate and authorize connections. Not supported for `engine_type` `RabbitMQ`. Detailed below. +* `logs` - (Optional) Configuration block for the logging configuration. Detailed below. * `maintenance_window_start_time` - (Optional) Configuration block for the maintenance window start time. Detailed below. * `publicly_accessible` - (Optional) Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_groups` - (Optional) List of security group IDs assigned to the broker. -* `storage_type` - (Optional) Storage type of the broker. For `engine_type` `ActiveMQ`, the valid values are `efs` and `ebs`, and the AWS-default is `efs`. For `engine_type` `RabbitMQ`, only `ebs` is supported. When using `ebs`, only the `mq.m5` broker instance type family is supported. +* `storage_type` - (Optional) Storage type of the broker. For `engine_type` `ActiveMQ`, valid values are `efs` and `ebs` (AWS-default is `efs`). For `engine_type` `RabbitMQ`, only `ebs` is supported. When using `ebs`, only the `mq.m5` broker instance type family is supported. * `subnet_ids` - (Optional) List of subnet IDs in which to launch the broker. A `SINGLE_INSTANCE` deployment requires one subnet. An `ACTIVE_STANDBY_MULTI_AZ` deployment requires multiple subnets. * `tags` - (Optional) Map of tags to assign to the broker. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -179,29 +178,29 @@ The following arguments are optional: The following arguments are optional: -* `id` - (Optional) The Configuration ID. +* `id` - (Optional) Configuration ID. * `revision` - (Optional) Revision of the Configuration. ### encryption_options The following arguments are optional: -* `kms_key_id` - (Optional) Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting `use_aws_owned_key` to `false`. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. -* `use_aws_owned_key` - (Optional) Whether to enable an AWS-owned KMS CMK that is not in your account. Defaults to `true`. Setting to `false` without configuring `kms_key_id` will create an AWS-managed CMK aliased to `aws/mq` in your account. +* `kms_key_id` - (Optional) ARN of KMS CMK to use for encryption at rest. Requires setting `use_aws_owned_key` to `false`. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. +* `use_aws_owned_key` - (Optional) Whether to enable an AWS-owned KMS CMK not in your account. Defaults to `true`. Setting to `false` without configuring `kms_key_id` creates an AWS-managed CMK aliased to `aws/mq` in your account. ### ldap_server_metadata The following arguments are optional: -* `hosts` - (Optional) List of a fully qualified domain name of the LDAP server and an optional failover server. -* `role_base` - (Optional) Fully qualified name of the directory to search for a user’s groups. -* `role_name` - (Optional) Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query. +* `hosts` - (Optional) List of fully qualified domain names of the LDAP server and optional failover server. +* `role_base` - (Optional) Fully qualified name of the directory to search for a user's groups. +* `role_name` - (Optional) LDAP attribute that identifies the group name attribute in the object returned from the group membership query. * `role_search_matching` - (Optional) Search criteria for groups. * `role_search_subtree` - (Optional) Whether the directory search scope is the entire sub-tree. * `service_account_password` - (Optional) Service account password. * `service_account_username` - (Optional) Service account username. * `user_base` - (Optional) Fully qualified name of the directory where you want to search for users. -* `user_role_name` - (Optional) Specifies the name of the LDAP attribute for the user group membership. +* `user_role_name` - (Optional) Name of the LDAP attribute for the user group membership. * `user_search_matching` - (Optional) Search criteria for users. * `user_search_subtree` - (Optional) Whether the directory search scope is the entire sub-tree. @@ -209,8 +208,8 @@ The following arguments are optional: The following arguments are optional: -* `audit` - (Optional) Enables audit logging. Auditing is only possible for `engine_type` of `ActiveMQ`. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to `false`. -* `general` - (Optional) Enables general logging via CloudWatch. Defaults to `false`. +* `audit` - (Optional) Whether to enable audit logging. Only possible for `engine_type` of `ActiveMQ`. Logs user management actions via JMX or ActiveMQ Web Console. Defaults to `false`. +* `general` - (Optional) Whether to enable general logging via CloudWatch. Defaults to `false`. ### maintenance_window_start_time @@ -222,11 +221,16 @@ The following arguments are required: ### user +The following arguments are required: + +* `password` - (Required) Password of the user. Must be 12 to 250 characters long, contain at least 4 unique characters, and must not contain commas. +* `username` - (Required) Username of the user. + +The following arguments are optional: + * `console_access` - (Optional) Whether to enable access to the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) for the user. Applies to `engine_type` of `ActiveMQ` only. * `groups` - (Optional) List of groups (20 maximum) to which the ActiveMQ user belongs. Applies to `engine_type` of `ActiveMQ` only. -* `password` - (Required) Password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas. -* `replication_user` - (Optional) Whether to set set replication user. Defaults to `false`. -* `username` - (Required) Username of the user. +* `replication_user` - (Optional) Whether to set replication user. Defaults to `false`. ~> **NOTE:** AWS currently does not support updating RabbitMQ users. Updates to users can only be in the RabbitMQ UI. @@ -237,7 +241,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the broker. * `id` - Unique ID that Amazon MQ generates for the broker. * `instances` - List of information about allocated brokers (both active & standby). - * `instances.0.console_url` - The URL of the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) or the [RabbitMQ Management UI](https://www.rabbitmq.com/management.html#external-monitoring) depending on `engine_type`. + * `instances.0.console_url` - URL of the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) or the [RabbitMQ Management UI](https://www.rabbitmq.com/management.html#external-monitoring) depending on `engine_type`. * `instances.0.ip_address` - IP Address of the broker. * `instances.0.endpoints` - Broker's wire-level protocol endpoints in the following order & format referenceable e.g., as `instances.0.endpoints.0` (SSL): * For `ActiveMQ`: @@ -248,8 +252,8 @@ This resource exports the following attributes in addition to the arguments abov * `wss://broker-id.mq.us-west-2.amazonaws.com:61619` * For `RabbitMQ`: * `amqps://broker-id.mq.us-west-2.amazonaws.com:5671` -* `pending_data_replication_mode` - (Optional) The data replication mode that will be applied after reboot. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `pending_data_replication_mode` - Data replication mode that will be applied after reboot. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -284,4 +288,4 @@ Using `terraform import`, import MQ Brokers using their broker id. For example: % terraform import aws_mq_broker.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mq_configuration.html.markdown b/website/docs/cdktf/python/r/mq_configuration.html.markdown index 383efe4545d2..3242c06b335a 100644 --- a/website/docs/cdktf/python/r/mq_configuration.html.markdown +++ b/website/docs/cdktf/python/r/mq_configuration.html.markdown @@ -2,17 +2,14 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_configuration" -description: |- - Provides an MQ configuration Resource +description: "Manages an Amazon MQ configuration" --- # Resource: aws_mq_configuration -Provides an MQ Configuration Resource. - -For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html). +Manages an Amazon MQ configuration. Use this resource to create and manage broker configurations for ActiveMQ and RabbitMQ brokers. ## Example Usage @@ -66,16 +63,17 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `data` - (Required) Broker configuration in XML format for `ActiveMQ` or [Cuttlefish](https://github.com/Kyorai/cuttlefish) format for `RabbitMQ`. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML. +* `data` - (Required) Broker configuration in XML format for ActiveMQ or Cuttlefish format for RabbitMQ. See [AWS documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML. * `engine_type` - (Required) Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`. * `engine_version` - (Required) Version of the broker engine. * `name` - (Required) Name of the configuration. The following arguments are optional: -* `authentication_strategy` - (Optional) Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`. +* `authentication_strategy` - (Optional) Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for RabbitMQ engine type. * `description` - (Optional) Description of the configuration. -* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -84,7 +82,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the configuration. * `id` - Unique ID that Amazon MQ generates for the configuration. * `latest_revision` - Latest revision of the configuration. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -111,4 +109,4 @@ Using `terraform import`, import MQ Configurations using the configuration ID. F % terraform import aws_mq_configuration.example c-0187d1eb-88c8-475a-9b79-16ef5a10c94f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_cluster.html.markdown b/website/docs/cdktf/python/r/msk_cluster.html.markdown index 1a42d28b4153..ff4ac13840c6 100644 --- a/website/docs/cdktf/python/r/msk_cluster.html.markdown +++ b/website/docs/cdktf/python/r/msk_cluster.html.markdown @@ -205,16 +205,17 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `broker_node_group_info` - (Required) Configuration block for the broker nodes of the Kafka cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `broker_node_group_info` - (Required) Configuration block for the broker nodes of the Kafka cluster. See [broker_node_group_info Argument Reference](#broker_node_group_info-argument-reference) below. * `cluster_name` - (Required) Name of the MSK cluster. * `kafka_version` - (Required) Specify the desired Kafka software version. * `number_of_broker_nodes` - (Required) The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. -* `client_authentication` - (Optional) Configuration block for specifying a client authentication. See below. -* `configuration_info` - (Optional) Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. -* `encryption_info` - (Optional) Configuration block for specifying encryption. See below. +* `client_authentication` - (Optional) Configuration block for specifying a client authentication. See [client_authentication Argument Reference](#client_authentication-argument-reference) below. +* `configuration_info` - (Optional) Configuration block for specifying an MSK Configuration to attach to Kafka brokers. See [configuration_info Argument Reference](#configuration_info-argument-reference) below. +* `encryption_info` - (Optional) Configuration block for specifying encryption. See [encryption_info Argument Reference](#encryption_info-argument-reference) below. * `enhanced_monitoring` - (Optional) Specify the desired enhanced MSK CloudWatch monitoring level. See [Monitoring Amazon MSK with Amazon CloudWatch](https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html) -* `open_monitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See below. -* `logging_info` - (Optional) Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See below. +* `open_monitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See [open_monitoring Argument Reference](#open_monitoring-argument-reference) below. +* `logging_info` - (Optional) Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See [logging_info Argument Reference](#logging_info-argument-reference) below. * `storage_mode` - (Optional) Controls storage mode for supported storage tiers. Valid values are: `LOCAL` or `TIERED`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -223,14 +224,14 @@ This resource supports the following arguments: * `client_subnets` - (Required) A list of subnets to connect to in client VPC ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-prop-brokernodegroupinfo-clientsubnets)). * `instance_type` - (Required) Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. ([Pricing info](https://aws.amazon.com/msk/pricing/)) * `security_groups` - (Required) A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. -* `az_distribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently the only valid value is `DEFAULT`. -* `connectivity_info` - (Optional) Information about the cluster access configuration. See below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible ([documentation](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html)). -* `storage_info` - (Optional) A block that contains information about storage volumes attached to MSK broker nodes. See below. +* `az_distribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently, the only valid value is `DEFAULT`. +* `connectivity_info` - (Optional) Information about the cluster access configuration. See [broker_node_group_info connectivity_info Argument Reference](#broker_node_group_info-connectivity_info-argument-reference) below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible ([documentation](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html)). +* `storage_info` - (Optional) A block that contains information about storage volumes attached to MSK broker nodes. See [broker_node_group_info storage_info Argument Reference](#broker_node_group_info-storage_info-argument-reference) below. ### broker_node_group_info connectivity_info Argument Reference -* `public_access` - (Optional) Access control settings for brokers. See below. -* `vpc_connectivity` - (Optional) VPC connectivity access control for brokers. See below. +* `public_access` - (Optional) Access control settings for brokers. See [connectivity_info public_access Argument Reference](#connectivity_info-public_access-argument-reference) below. +* `vpc_connectivity` - (Optional) VPC connectivity access control for brokers. See [connectivity_info vpc_connectivity Argument Reference](#connectivity_info-vpc_connectivity-argument-reference) below. ### connectivity_info public_access Argument Reference @@ -238,11 +239,11 @@ This resource supports the following arguments: ### connectivity_info vpc_connectivity Argument Reference -* `client_authentication` - (Optional) Includes all client authentication information for VPC connectivity. See below. +* `client_authentication` - (Optional) Includes all client authentication information for VPC connectivity. See [vpc_connectivity client_authentication Argument Reference](#vpc_connectivity-client_authentication-argument-reference) below. ### vpc_connectivity client_authentication Argument Reference -* `sasl` - (Optional) SASL authentication type details for VPC connectivity. See below. +* `sasl` - (Optional) SASL authentication type details for VPC connectivity. See [vpc_connectivity client_authentication sasl Argument Reference](#vpc_connectivity-client_authentication-sasl-argument-reference) below. * `tls` - (Optional) Enables TLS authentication for VPC connectivity. ### vpc_connectivity client_authentication sasl Argument Reference @@ -252,11 +253,11 @@ This resource supports the following arguments: ### broker_node_group_info storage_info Argument Reference -* `ebs_storage_info` - (Optional) A block that contains EBS volume information. See below. +* `ebs_storage_info` - (Optional) A block that contains EBS volume information. See [storage_info ebs_storage_info Argument Reference](#storage_info-ebs_storage_info-argument-reference) below. ### storage_info ebs_storage_info Argument Reference -* `provisioned_throughput` - (Optional) A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See below. +* `provisioned_throughput` - (Optional) A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See [ebs_storage_info provisioned_throughput Argument Reference](#ebs_storage_info-provisioned_throughput-argument-reference) below. * `volume_size` - (Optional) The size in GiB of the EBS volume for the data drive on each broker node. Minimum value of `1` and maximum value of `16384`. ### ebs_storage_info provisioned_throughput Argument Reference @@ -266,8 +267,8 @@ This resource supports the following arguments: ### client_authentication Argument Reference -* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See below. -* `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. +* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See [client_authentication sasl Argument Reference](#client_authentication-sasl-argument-reference) below. +* `tls` - (Optional) Configuration block for specifying TLS client authentication. See [client_authentication tls Argument Reference](#client_authentication-tls-argument-reference) below. * `unauthenticated` - (Optional) Enables unauthenticated access. #### client_authentication sasl Argument Reference @@ -286,7 +287,7 @@ This resource supports the following arguments: ### encryption_info Argument Reference -* `encryption_in_transit` - (Optional) Configuration block to specify encryption in transit. See below. +* `encryption_in_transit` - (Optional) Configuration block to specify encryption in transit. See [encryption_info encryption_in_transit Argument Reference](#encryption_info-encryption_in_transit-argument-reference) below. * `encryption_at_rest_kms_key_arn` - (Optional) You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest. #### encryption_info encryption_in_transit Argument Reference @@ -296,12 +297,12 @@ This resource supports the following arguments: #### open_monitoring Argument Reference -* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See below. +* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See [open_monitoring prometheus Argument Reference](#open_monitoring-prometheus-argument-reference) below. #### open_monitoring prometheus Argument Reference -* `jmx_exporter` - (Optional) Configuration block for JMX Exporter. See below. -* `node_exporter` - (Optional) Configuration block for Node Exporter. See below. +* `jmx_exporter` - (Optional) Configuration block for JMX Exporter. See [open_monitoring prometheus jmx_exporter Argument Reference](#open_monitoring-prometheus-jmx_exporter-argument-reference) below. +* `node_exporter` - (Optional) Configuration block for Node Exporter. See [open_monitoring prometheus node_exporter Argument Reference](#open_monitoring-prometheus-node_exporter-argument-reference) below. #### open_monitoring prometheus jmx_exporter Argument Reference @@ -313,7 +314,13 @@ This resource supports the following arguments: #### logging_info Argument Reference -* `broker_logs` - (Required) Configuration block for Broker Logs settings for logging info. See below. +* `broker_logs` - (Required) Configuration block for Broker Logs settings for logging info. See [logging_info broker_logs Argument Reference](#logging_info-broker_logs-argument-reference) below. + +#### logging_info broker_logs Argument Reference + +* `cloudwatch_logs` - (Optional) Configuration block for Cloudwatch Logs settings. See [logging_info broker_logs cloudwatch_logs Argument Reference](#logging_info-broker_logs-cloudwatch_logs-argument-reference) below. +* `firehose` - (Optional) Configuration block for Kinesis Data Firehose settings. See [logging_info broker_logs firehose Argument Reference](#logging_info-broker_logs-firehose-argument-reference) below. +* `s3` - (Optional) Configuration block for S3 settings. See [logging_info broker_logs s3 Argument Reference](#logging_info-broker_logs-s3-argument-reference) below. #### logging_info broker_logs cloudwatch_logs Argument Reference @@ -387,4 +394,4 @@ Using `terraform import`, import MSK clusters using the cluster `arn`. For examp % terraform import aws_msk_cluster.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_cluster_policy.html.markdown b/website/docs/cdktf/python/r/msk_cluster_policy.html.markdown index 398acf88dad7..af9e94207243 100644 --- a/website/docs/cdktf/python/r/msk_cluster_policy.html.markdown +++ b/website/docs/cdktf/python/r/msk_cluster_policy.html.markdown @@ -55,8 +55,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_arn` - (Required) The Amazon Resource Name (ARN) that uniquely identifies the cluster. * `policy` - (Required) Resource policy for cluster. @@ -91,4 +92,4 @@ Using `terraform import`, import Managed Streaming for Kafka Cluster Policy usin % terraform import aws_msk_cluster_policy.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_configuration.html.markdown b/website/docs/cdktf/python/r/msk_configuration.html.markdown index 990ad3e2028f..40ab7f602d22 100644 --- a/website/docs/cdktf/python/r/msk_configuration.html.markdown +++ b/website/docs/cdktf/python/r/msk_configuration.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `server_properties` - (Required) Contents of the server.properties file. Supported properties are documented in the [MSK Developer Guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-configuration-properties.html). * `kafka_versions` - (Optional) List of Apache Kafka versions which can use this configuration. * `name` - (Required) Name of the configuration. @@ -74,4 +75,4 @@ Using `terraform import`, import MSK configurations using the configuration ARN. % terraform import aws_msk_configuration.example arn:aws:kafka:us-west-2:123456789012:configuration/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_replicator.html.markdown b/website/docs/cdktf/python/r/msk_replicator.html.markdown index 53f2844e6d27..d0a46236b34f 100644 --- a/website/docs/cdktf/python/r/msk_replicator.html.markdown +++ b/website/docs/cdktf/python/r/msk_replicator.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicator_name` - (Required) The name of the replicator. * `kafka_cluster` - (Required) A list of Kafka clusters which are targets of the replicator. * `service_execution_role_arn` - (Required) The ARN of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters). @@ -172,4 +173,4 @@ Using `terraform import`, import MSK replicators using the replicator ARN. For e % terraform import aws_msk_replicator.example arn:aws:kafka:us-west-2:123456789012:configuration/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_scram_secret_association.html.markdown b/website/docs/cdktf/python/r/msk_scram_secret_association.html.markdown index 3b7a25670c9f..c1aaf71728cf 100644 --- a/website/docs/cdktf/python/r/msk_scram_secret_association.html.markdown +++ b/website/docs/cdktf/python/r/msk_scram_secret_association.html.markdown @@ -112,6 +112,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_arn` - (Required, Forces new resource) Amazon Resource Name (ARN) of the MSK cluster. * `secret_arn_list` - (Required) List of AWS Secrets Manager secret ARNs. @@ -146,4 +147,4 @@ Using `terraform import`, import MSK SCRAM Secret Associations using the `id`. F % terraform import aws_msk_scram_secret_association.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_serverless_cluster.html.markdown b/website/docs/cdktf/python/r/msk_serverless_cluster.html.markdown index 017cd7ee8bbb..918e6b0dbf80 100644 --- a/website/docs/cdktf/python/r/msk_serverless_cluster.html.markdown +++ b/website/docs/cdktf/python/r/msk_serverless_cluster.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_authentication` - (Required) Specifies client authentication information for the serverless cluster. See below. * `cluster_name` - (Required) The name of the serverless cluster. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -113,4 +114,4 @@ Using `terraform import`, import MSK serverless clusters using the cluster `arn` % terraform import aws_msk_serverless_cluster.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_single_scram_secret_association.html.markdown b/website/docs/cdktf/python/r/msk_single_scram_secret_association.html.markdown index 9dc8f078e6a3..3bc6e6ff7586 100644 --- a/website/docs/cdktf/python/r/msk_single_scram_secret_association.html.markdown +++ b/website/docs/cdktf/python/r/msk_single_scram_secret_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_arn` - (Required, Forces new resource) Amazon Resource Name (ARN) of the MSK cluster. * `secret_arn` - (Required, Forces new resource) AWS Secrets Manager secret ARN. @@ -68,4 +69,4 @@ Using `terraform import`, import an MSK SCRAM Secret Association using the `clus % terraform import aws_msk_single_scram_secret_association.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3,arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/msk_vpc_connection.html.markdown b/website/docs/cdktf/python/r/msk_vpc_connection.html.markdown index 477a788dbe42..aac1ca8fc4ab 100644 --- a/website/docs/cdktf/python/r/msk_vpc_connection.html.markdown +++ b/website/docs/cdktf/python/r/msk_vpc_connection.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication` - (Required) The authentication type for the client VPC connection. Specify one of these auth type strings: SASL_IAM, SASL_SCRAM, or TLS. * `client_subnets` - (Required) The list of subnets in the client VPC to connect to. * `security_groups` - (Required) The security groups to attach to the ENIs for the broker nodes. @@ -85,4 +86,4 @@ Using `terraform import`, import MSK configurations using the configuration ARN. % terraform import aws_msk_vpc_connection.example arn:aws:kafka:eu-west-2:123456789012:vpc-connection/123456789012/example/38173259-79cd-4ee8-87f3-682ea6023f48-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mskconnect_connector.html.markdown b/website/docs/cdktf/python/r/mskconnect_connector.html.markdown index b72ffc1d5e48..faaa6fdfe4d2 100644 --- a/website/docs/cdktf/python/r/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/python/r/mskconnect_connector.html.markdown @@ -91,6 +91,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A summary description of the connector. * `log_delivery` - (Optional) Details about log delivery. See [`log_delivery` Block](#log_delivery-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -261,4 +262,4 @@ Using `terraform import`, import MSK Connect Connector using the connector's `ar % terraform import aws_mskconnect_connector.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:connector/example/264edee4-17a3-412e-bd76-6681cfc93805-3' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown index dbb1f8eb0586..d79e1309f834 100644 --- a/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the custom plugin.. * `content_type` - (Required, Forces new resource) The type of the plugin file. Allowed values are `ZIP` and `JAR`. * `description` - (Optional, Forces new resource) A summary description of the custom plugin. @@ -119,4 +120,4 @@ Using `terraform import`, import MSK Connect Custom Plugin using the plugin's `a % terraform import aws_mskconnect_custom_plugin.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:custom-plugin/debezium-example/abcdefgh-1234-5678-9abc-defghijklmno-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown index 87e6a56955ff..9e29e2f581c1 100644 --- a/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) A summary description of the worker configuration. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -85,4 +86,4 @@ Using `terraform import`, import MSK Connect Worker Configuration using the plug % terraform import aws_mskconnect_worker_configuration.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:worker-configuration/example/8848493b-7fcc-478c-a646-4a52634e3378-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mwaa_environment.html.markdown b/website/docs/cdktf/python/r/mwaa_environment.html.markdown index ce7a09f71634..74332ce7d3ad 100644 --- a/website/docs/cdktf/python/r/mwaa_environment.html.markdown +++ b/website/docs/cdktf/python/r/mwaa_environment.html.markdown @@ -172,15 +172,17 @@ This resource supports the following arguments: * `network_configuration` - (Required) Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See [`network_configuration` Block](#network_configuration-block) for details. * `plugins_s3_object_version` - (Optional) The plugins.zip file version you want to use. * `plugins_s3_path` - (Optional) The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `requirements_s3_object_version` - (Optional) The requirements.txt file version you want to use. * `requirements_s3_path` - (Optional) The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). * `schedulers` - (Optional) The number of schedulers that you want to run in your environment. v2.0.2 and above accepts `2` - `5`, default `2`. v1.10.12 accepts `1`. * `source_bucket_arn` - (Required) The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname. * `startup_script_s3_object_version` - (Optional) The version of the startup shell script you want to use. You must specify the version ID that Amazon S3 assigns to the file every time you update the script. * `startup_script_s3_path` - (Optional) The relative path to the script hosted in your bucket. The script runs as your environment starts before starting the Apache Airflow process. Use this script to install dependencies, modify configuration options, and set environment variables. See [Using a startup script](https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html). Supported for environment versions 2.x and later. +* `tags` - (Optional) A map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `webserver_access_mode` - (Optional) Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`. * `weekly_maintenance_window_start` - (Optional) Specifies the start date for the weekly maintenance window. -* `tags` - (Optional) A map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `worker_replacement_strategy` - (Optional) Worker replacement strategy. Valid values: `FORCED`, `GRACEFUL`. ### `logging_configuration` Block @@ -253,4 +255,4 @@ Using `terraform import`, import MWAA Environment using `Name`. For example: % terraform import aws_mwaa_environment.example MyAirflowEnvironment ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/nat_gateway.html.markdown b/website/docs/cdktf/python/r/nat_gateway.html.markdown index a4e0361a6729..6f2822d88229 100644 --- a/website/docs/cdktf/python/r/nat_gateway.html.markdown +++ b/website/docs/cdktf/python/r/nat_gateway.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a resource to create a VPC NAT Gateway. +!> **WARNING:** You should not use the `aws_nat_gateway` resource that has `secondary_allocation_ids` in conjunction with an [`aws_nat_gateway_eip_association`](nat_gateway_eip_association.html) resource. Doing so may cause perpetual differences, and result in associations being overwritten. + ## Example Usage ### Public NAT @@ -108,10 +110,11 @@ This resource supports the following arguments: * `allocation_id` - (Optional) The Allocation ID of the Elastic IP address for the NAT Gateway. Required for `connectivity_type` of `public`. * `connectivity_type` - (Optional) Connectivity type for the NAT Gateway. Valid values are `private` and `public`. Defaults to `public`. * `private_ip` - (Optional) The private IPv4 address to assign to the NAT Gateway. If you don't provide an address, a private IPv4 address will be automatically assigned. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnet_id` - (Required) The Subnet ID of the subnet in which to place the NAT Gateway. -* `secondary_allocation_ids` - (Optional) A list of secondary allocation EIP IDs for this NAT Gateway. +* `secondary_allocation_ids` - (Optional) A list of secondary allocation EIP IDs for this NAT Gateway. To remove all secondary allocations an empty list should be specified. * `secondary_private_ip_address_count` - (Optional) [Private NAT Gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT Gateway. -* `secondary_private_ip_addresses` - (Optional) A list of secondary private IPv4 addresses to assign to the NAT Gateway. +* `secondary_private_ip_addresses` - (Optional) A list of secondary private IPv4 addresses to assign to the NAT Gateway. To remove all secondary private addresses an empty list should be specified. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -157,4 +160,4 @@ Using `terraform import`, import NAT Gateways using the `id`. For example: % terraform import aws_nat_gateway.private_gw nat-05dba92075d71c408 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/nat_gateway_eip_association.html.markdown b/website/docs/cdktf/python/r/nat_gateway_eip_association.html.markdown new file mode 100644 index 000000000000..07ee3b389720 --- /dev/null +++ b/website/docs/cdktf/python/r/nat_gateway_eip_association.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "VPC (Virtual Private Cloud)" +layout: "aws" +page_title: "AWS: aws_nat_gateway_eip_association" +description: |- + Terraform resource for managing an AWS VPC NAT Gateway EIP Association. +--- + + +# Resource: aws_nat_gateway_eip_association + +Terraform resource for managing an AWS VPC NAT Gateway EIP Association. + +!> **WARNING:** You should not use the `aws_nat_gateway_eip_association` resource in conjunction with an [`aws_nat_gateway`](aws_nat_gateway.html) resource that has `secondary_allocation_ids` configured. Doing so may cause perpetual differences, and result in associations being overwritten. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NatGatewayEipAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NatGatewayEipAssociation(self, "example", + allocation_id=aws_eip_example.id, + nat_gateway_id=aws_nat_gateway_example.id + ) +``` + +## Argument Reference + +The following arguments are required: + +* `allocation_id` - (Required) The ID of the Elastic IP Allocation to associate with the NAT Gateway. +* `nat_gateway_id` - (Required) The ID of the NAT Gateway to associate the Elastic IP Allocation to. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC NAT Gateway EIP Association using the `nat_gateway_id,allocation_id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NatGatewayEipAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NatGatewayEipAssociation.generate_config_for_import(self, "example", "nat-1234567890abcdef1,eipalloc-1234567890abcdef1") +``` + +Using `terraform import`, import VPC NAT Gateway EIP Association using the `nat_gateway_id,allocation_id`. For example: + +```console +% terraform import aws_nat_gateway_eip_association.example nat-1234567890abcdef1,eipalloc-1234567890abcdef1 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_cluster.html.markdown b/website/docs/cdktf/python/r/neptune_cluster.html.markdown index aa1638c8a7f4..e742020c8ed6 100644 --- a/website/docs/cdktf/python/r/neptune_cluster.html.markdown +++ b/website/docs/cdktf/python/r/neptune_cluster.html.markdown @@ -59,6 +59,7 @@ This resource supports the following arguments: * `cluster_identifier` - (Optional, Forces new resources) Cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. * `copy_tags_to_snapshot` - (Optional) If set to true, tags are copied to any snapshot of the DB cluster that is created. +* `deletion_protection` - (Optional) Value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. * `enable_cloudwatch_logs_exports` - (Optional) List of the log types this DB cluster is configured to export to Cloudwatch Logs. Currently only supports `audit` and `slowquery`. * `engine` - (Optional) Name of the database engine to be used for this Neptune cluster. Defaults to `neptune`. * `engine_version` - (Optional) Database engine version. @@ -67,21 +68,21 @@ This resource supports the following arguments: * `iam_roles` - (Optional) List of ARNs for the IAM roles to associate to the Neptune Cluster. * `iam_database_authentication_enabled` - (Optional) Whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. * `kms_key_arn` - (Optional) ARN for the KMS encryption key. When specifying `kms_key_arn`, `storage_encrypted` needs to be set to true. -* `neptune_subnet_group_name` - (Optional) Neptune subnet group to associate with this Neptune instance. * `neptune_cluster_parameter_group_name` - (Optional) Cluster parameter group to associate with the cluster. * `neptune_instance_parameter_group_name` – (Optional) Name of DB parameter group to apply to all instances in the cluster. When upgrading, AWS does not return this value, so do not reference it in other arguments—either leave it unset, configure each instance directly, or ensure it matches the `engine_version`. -* `storage_type` - (Optional) Storage type associated with the cluster `standard/iopt1`. Default: `standard` +* `neptune_subnet_group_name` - (Optional) Neptune subnet group to associate with this Neptune instance. +* `port` - (Optional) Port on which the Neptune accepts connections. Default is `8182`. * `preferred_backup_window` - (Optional) Daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferred_maintenance_window` - (Optional) Weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 -* `port` - (Optional) Port on which the Neptune accepts connections. Default is `8182`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replication_source_identifier` - (Optional) ARN of a source Neptune cluster or Neptune instance if this Neptune cluster is to be created as a Read Replica. +* `serverless_v2_scaling_configuration` - (Optional) If set, create the Neptune cluster as a serverless one. See [Serverless](#serverless) for example block attributes. * `skip_final_snapshot` - (Optional) Whether a final Neptune snapshot is created before the Neptune cluster is deleted. If true is specified, no Neptune snapshot is created. If false is specified, a Neptune snapshot is created before the Neptune cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. * `snapshot_identifier` - (Optional) Whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a Neptune cluster snapshot, or the ARN when specifying a Neptune snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storage_encrypted` - (Optional) Whether the Neptune cluster is encrypted. The default is `false` if not specified. +* `storage_type` - (Optional) Storage type associated with the cluster `standard/iopt1`. Default: `standard`. * `tags` - (Optional) Map of tags to assign to the Neptune cluster. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_security_group_ids` - (Optional) List of VPC security groups to associate with the Cluster -* `deletion_protection` - (Optional) Value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. -* `serverless_v2_scaling_configuration` - (Optional) If set, create the Neptune cluster as a serverless one. See [Serverless](#serverless) for example block attributes. ### Serverless @@ -170,4 +171,4 @@ Using `terraform import`, import `aws_neptune_cluster` using the cluster identif % terraform import aws_neptune_cluster.example my-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_cluster_endpoint.html.markdown b/website/docs/cdktf/python/r/neptune_cluster_endpoint.html.markdown index 49f567244f75..5fd36cf376a7 100644 --- a/website/docs/cdktf/python/r/neptune_cluster_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/neptune_cluster_endpoint.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required, Forces new resources) The DB cluster identifier of the DB cluster associated with the endpoint. * `cluster_endpoint_identifier` - (Required, Forces new resources) The identifier of the endpoint. * `endpoint_type` - (Required) The type of the endpoint. One of: `READER`, `WRITER`, `ANY`. @@ -78,4 +79,4 @@ Using `terraform import`, import `aws_neptune_cluster_endpoint` using the `clust % terraform import aws_neptune_cluster_endpoint.example my-cluster:my-endpoint ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_cluster_instance.html.markdown b/website/docs/cdktf/python/r/neptune_cluster_instance.html.markdown index e4ccc01c855b..aa89eff2e70f 100644 --- a/website/docs/cdktf/python/r/neptune_cluster_instance.html.markdown +++ b/website/docs/cdktf/python/r/neptune_cluster_instance.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apply_immediately` - (Optional) Specifies whether any instance modifications are applied immediately, or during the next maintenance window. Default is`false`. * `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`. @@ -93,7 +94,7 @@ This resource exports the following attributes in addition to the arguments abov * `storage_encrypted` - Specifies whether the neptune cluster is encrypted. * `storage_type` - Storage type associated with the cluster `standard/iopt1`. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. +* `writer` - Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. [1]: https://www.terraform.io/docs/configuration/meta-arguments/count.html @@ -130,4 +131,4 @@ Using `terraform import`, import `aws_neptune_cluster_instance` using the instan % terraform import aws_neptune_cluster_instance.example my-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_cluster_parameter_group.html.markdown b/website/docs/cdktf/python/r/neptune_cluster_parameter_group.html.markdown index 59e30665486a..6b57aa0701aa 100644 --- a/website/docs/cdktf/python/r/neptune_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/neptune_cluster_parameter_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the neptune cluster parameter group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required) The family of the neptune cluster parameter group. @@ -88,4 +89,4 @@ Using `terraform import`, import Neptune Cluster Parameter Groups using the `nam % terraform import aws_neptune_cluster_parameter_group.cluster_pg production-pg-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_cluster_snapshot.html.markdown b/website/docs/cdktf/python/r/neptune_cluster_snapshot.html.markdown index 43dad961eed4..b9a9e3332a65 100644 --- a/website/docs/cdktf/python/r/neptune_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/neptune_cluster_snapshot.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_cluster_identifier` - (Required) The DB Cluster Identifier from which to take the snapshot. * `db_cluster_snapshot_identifier` - (Required) The Identifier for the snapshot. @@ -87,4 +88,4 @@ Using `terraform import`, import `aws_neptune_cluster_snapshot` using the cluste % terraform import aws_neptune_cluster_snapshot.example my-cluster-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_event_subscription.html.markdown b/website/docs/cdktf/python/r/neptune_event_subscription.html.markdown index 0a7aef9cb0d5..f518f338d3fd 100644 --- a/website/docs/cdktf/python/r/neptune_event_subscription.html.markdown +++ b/website/docs/cdktf/python/r/neptune_event_subscription.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) A boolean flag to enable/disable the subscription. Defaults to true. * `event_categories` - (Optional) A list of event categories for a `source_type` that you want to subscribe to. Run `aws neptune describe-event-categories` to find all the event categories. * `name` - (Optional) The name of the Neptune event subscription. By default generated by Terraform. @@ -117,4 +118,4 @@ Using `terraform import`, import `aws_neptune_event_subscription` using the even % terraform import aws_neptune_event_subscription.example my-event-subscription ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_global_cluster.html.markdown b/website/docs/cdktf/python/r/neptune_global_cluster.html.markdown index e0017918b84b..59203fe52eac 100644 --- a/website/docs/cdktf/python/r/neptune_global_cluster.html.markdown +++ b/website/docs/cdktf/python/r/neptune_global_cluster.html.markdown @@ -124,6 +124,7 @@ This resource supports the following arguments: * `deletion_protection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Current Valid values: `neptune`. Conflicts with `source_db_cluster_identifier`. * `engine_version` - (Optional) Engine version of the global database. Upgrading the engine version will result in all cluster members being immediately updated and will. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_db_cluster_identifier` - (Optional) ARN to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value. * `storage_encrypted` - (Optional, Forces new resources) Whether the DB cluster is encrypted. The default is `false` unless `source_db_cluster_identifier` is specified and encrypted. Terraform will only perform drift detection if a configuration value is provided. @@ -194,4 +195,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_parameter_group.html.markdown b/website/docs/cdktf/python/r/neptune_parameter_group.html.markdown index 4a191f274183..f1315688f3eb 100644 --- a/website/docs/cdktf/python/r/neptune_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/neptune_parameter_group.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the Neptune parameter group. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required) The family of the Neptune parameter group. @@ -87,4 +88,4 @@ Using `terraform import`, import Neptune Parameter Groups using the `name`. For % terraform import aws_neptune_parameter_group.some_pg some-pg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptune_subnet_group.html.markdown b/website/docs/cdktf/python/r/neptune_subnet_group.html.markdown index 59f01fd13fe5..ad0644d0bcfb 100644 --- a/website/docs/cdktf/python/r/neptune_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/neptune_subnet_group.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the neptune subnet group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) The description of the neptune subnet group. Defaults to "Managed by Terraform". @@ -78,4 +79,4 @@ Using `terraform import`, import Neptune Subnet groups using the `name`. For exa % terraform import aws_neptune_subnet_group.default production-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/neptunegraph_graph.html.markdown b/website/docs/cdktf/python/r/neptunegraph_graph.html.markdown index 7dd4e529b747..264197ac1506 100644 --- a/website/docs/cdktf/python/r/neptunegraph_graph.html.markdown +++ b/website/docs/cdktf/python/r/neptunegraph_graph.html.markdown @@ -10,7 +10,7 @@ description: |- # Resource: aws_neptunegraph_graph -The aws_neptunegraph_graph resource creates an Amazon Analytics Graph. +The `aws_neptunegraph_graph` resource creates an Amazon Analytics Graph. ## Example Usage @@ -56,19 +56,14 @@ The following arguments are required: The following arguments are optional: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `deletion_protection` (Boolean, Default: `true`) Value that indicates whether the Graph has deletion protection enabled. The graph can't be deleted when deletion protection is enabled. - - `graph_name` (String, Forces new resource) Contains a user-supplied name for the Graph. If omitted, Terraform will assign a random, unique identifier. - - `public_connectivity` (Boolean, Default: `false`) Specifies whether the Graph can be reached over the internet. Access to all graphs requires IAM authentication. When the Graph is publicly reachable, its Domain Name System (DNS) endpoint resolves to the public IP address from the internet. When the Graph isn't publicly reachable, you need to create a PrivateGraphEndpoint in a given VPC to ensure the DNS name resolves to a private IP address that is reachable from the VPC. - - `replica_count` (Number, Default: `1`, Forces new resource) Specifies the number of replicas you want when finished. All replicas will be provisioned in different availability zones. Replica Count should always be less than or equal to 2. - - `kms_key_identifier` (String) The ARN for the KMS encryption key. By Default, Neptune Analytics will use an AWS provided key ("AWS_OWNED_KEY"). This parameter is used if you want to encrypt the graph using a KMS Customer Managed Key (CMK). - - `vector_search_configuration` (Block, Forces new resource) Vector Search Configuration (see below for nested schema of vector_search_configuration) - -- `tags` (Attributes Set) The tags associated with this graph. (see below for nested schema of tags) +- `tags` - (Optional) Key-value tags for the graph. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -77,6 +72,7 @@ This resource exports the following attributes in addition to the arguments abov - `endpoint` (String) The connection endpoint for the graph. For example: `g-12a3bcdef4.us-east-1.neptune-graph.amazonaws.com` - `arn` (String) Graph resource ARN - `id` (String) The auto-generated id assigned by the service. +- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -124,4 +120,4 @@ Using `terraform import`, import `aws_neptunegraph_graph` using the graph identi % terraform import aws_neptunegraph_graph.example "graph_id" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_acl.html.markdown b/website/docs/cdktf/python/r/network_acl.html.markdown index f271200174a5..0e4ff591b159 100644 --- a/website/docs/cdktf/python/r/network_acl.html.markdown +++ b/website/docs/cdktf/python/r/network_acl.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The ID of the associated VPC. * `subnet_ids` - (Optional) A list of Subnet IDs to apply the ACL to * `ingress` - (Optional) Specifies an ingress rule. Parameters defined below. @@ -129,4 +130,4 @@ Using `terraform import`, import Network ACLs using the `id`. For example: % terraform import aws_network_acl.main acl-7aaabd18 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_acl_association.html.markdown b/website/docs/cdktf/python/r/network_acl_association.html.markdown index 6c2509fe5046..6a3d6ed6023a 100644 --- a/website/docs/cdktf/python/r/network_acl_association.html.markdown +++ b/website/docs/cdktf/python/r/network_acl_association.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `network_acl_id` - (Required) The ID of the network ACL. * `subnet_id` - (Required) The ID of the associated Subnet. @@ -74,4 +75,4 @@ Using `terraform import`, import Network ACL associations using the `id`. For ex % terraform import aws_network_acl_association.main aclassoc-02baf37f20966b3e6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_acl_rule.html.markdown b/website/docs/cdktf/python/r/network_acl_rule.html.markdown index 502ef58d53f1..02ff73371b96 100644 --- a/website/docs/cdktf/python/r/network_acl_rule.html.markdown +++ b/website/docs/cdktf/python/r/network_acl_rule.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `network_acl_id` - (Required) The ID of the network ACL. * `rule_number` - (Required) The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number. * `egress` - (Optional, bool) Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`. @@ -134,4 +135,4 @@ Using the procotol's decimal value: % terraform import aws_network_acl_rule.my_rule acl-7aaabd18:100:6:false ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_interface.html.markdown b/website/docs/cdktf/python/r/network_interface.html.markdown index ff179583962e..634677475b53 100644 --- a/website/docs/cdktf/python/r/network_interface.html.markdown +++ b/website/docs/cdktf/python/r/network_interface.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `attachment` - (Optional) Configuration block to define the attachment of the ENI. See [Attachment](#attachment) below for more details! * `description` - (Optional) Description for the network interface. * `enable_primary_ipv6` - (Optional) Enables assigning a primary IPv6 Global Unicast Address (GUA) to the network interface (ENI) in dual-stack or IPv6-only subnets. This ensures the instance attached to the ENI retains a consistent IPv6 address. Once enabled, the first IPv6 GUA becomes the primary IPv6 address and cannot be disabled. The primary IPv6 address remains assigned until the instance is terminated or the ENI is detached. Enabling and subsequent disabling forces recreation of the ENI. @@ -89,6 +90,7 @@ The `attachment` block supports the following: * `instance` - (Required) ID of the instance to attach to. * `device_index` - (Required) Integer to define the devices index. +* `network_card_index` - (Optional) Index of the network card. Specify a value greater than 0 when using multiple network cards, which are supported by [some instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards). The default is 0. ## Attribute Reference @@ -126,4 +128,4 @@ Using `terraform import`, import Network Interfaces using the `id`. For example: % terraform import aws_network_interface.test eni-e5aa89a3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_interface_attachment.html.markdown b/website/docs/cdktf/python/r/network_interface_attachment.html.markdown index 31072f6de8fe..60b2870fe38d 100644 --- a/website/docs/cdktf/python/r/network_interface_attachment.html.markdown +++ b/website/docs/cdktf/python/r/network_interface_attachment.html.markdown @@ -37,9 +37,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required) Instance ID to attach. * `network_interface_id` - (Required) ENI ID to attach. * `device_index` - (Required) Network interface index (int). +* `network_card_index` - (Optional) Index of the network card. Specify a value greater than 0 when using multiple network cards, which are supported by [some instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards). The default is 0. ## Attribute Reference @@ -75,4 +77,4 @@ Using `terraform import`, import Elastic network interface (ENI) Attachments usi % terraform import aws_network_interface_attachment.secondary_nic eni-attach-0a33842b4ec347c4c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_interface_permission.html.markdown b/website/docs/cdktf/python/r/network_interface_permission.html.markdown index b1dc7503ca39..d7a1c2a83674 100644 --- a/website/docs/cdktf/python/r/network_interface_permission.html.markdown +++ b/website/docs/cdktf/python/r/network_interface_permission.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `network_interface_id` - (Required) The ID of the network interface. * `aws_account_id` - (Required) The Amazon Web Services account ID. * `permission` - (Required) The type of permission to grant. Valid values are `INSTANCE-ATTACH` or `EIP-ASSOCIATE`. @@ -85,4 +86,4 @@ Using `terraform import`, import Network Interface Permissions using the `networ % terraform import aws_network_interface_permission.example eni-perm-056ad97ce2ac377ed ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/network_interface_sg_attachment.html.markdown b/website/docs/cdktf/python/r/network_interface_sg_attachment.html.markdown index db48f5f66830..8bc8aeb495f1 100644 --- a/website/docs/cdktf/python/r/network_interface_sg_attachment.html.markdown +++ b/website/docs/cdktf/python/r/network_interface_sg_attachment.html.markdown @@ -110,6 +110,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_group_id` - (Required) The ID of the security group. * `network_interface_id` - (Required) The ID of the network interface to attach to. @@ -150,4 +151,4 @@ Using `terraform import`, import Network Interface Security Group attachments us % terraform import aws_network_interface_sg_attachment.sg_attachment eni-1234567890abcdef0_sg-1234567890abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_firewall.html.markdown b/website/docs/cdktf/python/r/networkfirewall_firewall.html.markdown index 103ad7a18238..d89eb49ebc7c 100644 --- a/website/docs/cdktf/python/r/networkfirewall_firewall.html.markdown +++ b/website/docs/cdktf/python/r/networkfirewall_firewall.html.markdown @@ -48,10 +48,52 @@ class MyConvertedCode(TerraformStack): ) ``` +### Transit Gateway Attached Firewall + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_availability_zones import DataAwsAvailabilityZones +from imports.aws.networkfirewall_firewall import NetworkfirewallFirewall +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = DataAwsAvailabilityZones(self, "example", + state="available" + ) + aws_networkfirewall_firewall_example = NetworkfirewallFirewall(self, "example_1", + availability_zone_mapping=[NetworkfirewallFirewallAvailabilityZoneMapping( + availability_zone_id=Token.as_string( + Fn.lookup_nested(example.zone_ids, ["0"])) + ), NetworkfirewallFirewallAvailabilityZoneMapping( + availability_zone_id=Token.as_string( + Fn.lookup_nested(example.zone_ids, ["1"])) + ) + ], + firewall_policy_arn=Token.as_string(aws_networkfirewall_firewall_policy_example.arn), + name="example", + transit_gateway_id=Token.as_string(aws_ec2_transit_gateway_example.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_networkfirewall_firewall_example.override_logical_id("example") +``` + +### Transit Gateway Attached Firewall (Cross Account) + +A full example of how to create a Transit Gateway in one AWS account, share it with a second AWS account, and create Network Firewall in the second account to the Transit Gateway via the `aws_networkfirewall_firewall` and [`aws_networkfirewall_network_firewall_transit_gateway_attachment_accepter`](/docs/providers/aws/r/networkfirewall_network_firewall_transit_gateway_attachment_accepter.html) resources can be found in [the `./examples/network-firewall-cross-account-transit-gateway` directory within the Github Repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/network-firewall-cross-account-transit-gateway) + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `availability_zone_change_protection` - (Optional) A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to `true`, you must first disable this protection before adding or removing Availability Zones. +* `availability_zone_mapping` - (Optional) Required when creating a transit gateway-attached firewall. Set of configuration blocks describing the avaiability availability where you want to create firewall endpoints for a transit gateway-attached firewall. * `delete_protection` - (Optional) A flag indicating whether the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. Defaults to `false`. * `description` - (Optional) A friendly description of the firewall. * `enabled_analysis_types` - (Optional) Set of types for which to collect analysis metrics. See [Reporting on network traffic in Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/reporting.html) for details on how to use the data. Valid values: `TLS_SNI`, `HTTP_HOST`. Defaults to `[]`. @@ -60,9 +102,16 @@ This resource supports the following arguments: * `firewall_policy_change_protection` - (Optional) A flag indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. Defaults to `false`. * `name` - (Required, Forces new resource) A friendly name of the firewall. * `subnet_change_protection` - (Optional) A flag indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. Defaults to `false`. -* `subnet_mapping` - (Required) Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See [Subnet Mapping](#subnet-mapping) below for details. +* `subnet_mapping` - (Optional) Required when creating a VPC attached firewall. Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See [Subnet Mapping](#subnet-mapping) below for details. * `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `vpc_id` - (Required, Forces new resource) The unique identifier of the VPC where AWS Network Firewall should create the firewall. +* `transit_gateway_id` - (Optional, Forces new resource). Required when creating a transit gateway-attached firewall. The unique identifier of the transit gateway to attach to this firewall. You can provide either a transit gateway from your account or one that has been shared with you through AWS Resource Access Manager +* `vpc_id` - (Optional, Forces new resource) Required when creating a VPC attached firewall. The unique identifier of the VPC where AWS Network Firewall should create the firewall. + +### Availability Zone Mapping + +The `availability_zone_mapping` block supports the following arguments: + +* `availability_zone_id` - (Required)The ID of the Availability Zone where the firewall endpoint is located.. ### Encryption Configuration @@ -90,16 +139,19 @@ This resource exports the following attributes in addition to the arguments abov * `endpoint_id` - The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. * `subnet_id` - The unique identifier of the subnet that you've specified to be used for a firewall endpoint. * `availability_zone` - The Availability Zone where the subnet is configured. + * `transit_gateway_attachment_sync_states` - Set of transit gateway configured for use by the firewall. + * `attachment_id` - The unique identifier of the transit gateway attachment. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `transit_gateway_owner_account_id` - The AWS account ID that owns the transit gateway. * `update_token` - A string token used when updating a firewall. ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): -- `create` - (Default `30m`) -- `update` - (Default `30m`) -- `delete` - (Default `30m`) +- `create` - (Default `60m`) +- `update` - (Default `60m`) +- `delete` - (Default `60m`) ## Import @@ -126,4 +178,4 @@ Using `terraform import`, import Network Firewall Firewalls using their `arn`. F % terraform import aws_networkfirewall_firewall.example arn:aws:network-firewall:us-west-1:123456789012:firewall/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_firewall_policy.html.markdown b/website/docs/cdktf/python/r/networkfirewall_firewall_policy.html.markdown index 8cabf3c60649..dd39fc5542dd 100644 --- a/website/docs/cdktf/python/r/networkfirewall_firewall_policy.html.markdown +++ b/website/docs/cdktf/python/r/networkfirewall_firewall_policy.html.markdown @@ -22,10 +22,20 @@ from cdktf import Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_partition import DataAwsPartition +from imports.aws.data_aws_region import DataAwsRegion from imports.aws.networkfirewall_firewall_policy import NetworkfirewallFirewallPolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) + current = DataAwsCallerIdentity(self, "current") + data_aws_partition_current = DataAwsPartition(self, "current_1") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_partition_current.override_logical_id("current") + data_aws_region_current = DataAwsRegion(self, "current_2") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_region_current.override_logical_id("current") NetworkfirewallFirewallPolicy(self, "example", firewall_policy=NetworkfirewallFirewallPolicyFirewallPolicy( stateless_default_actions=["aws:pass"], @@ -35,7 +45,7 @@ class MyConvertedCode(TerraformStack): resource_arn=Token.as_string(aws_networkfirewall_rule_group_example.arn) ) ], - tls_inspection_configuration_arn="arn:aws:network-firewall:REGION:ACCT:tls-configuration/example" + tls_inspection_configuration_arn="arn:${" + data_aws_partition_current.partition + "}:network-firewall:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:tls-configuration/example" ), name="example", tags={ @@ -100,7 +110,7 @@ from imports.aws.networkfirewall_firewall_policy import NetworkfirewallFirewallP class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - NetworkfirewallFirewallPolicy(self, "test", + NetworkfirewallFirewallPolicy(self, "example", firewall_policy=NetworkfirewallFirewallPolicyFirewallPolicy( stateless_custom_action=[NetworkfirewallFirewallPolicyFirewallPolicyStatelessCustomAction( action_definition=NetworkfirewallFirewallPolicyFirewallPolicyStatelessCustomActionActionDefinition( @@ -121,18 +131,87 @@ class MyConvertedCode(TerraformStack): ) ``` +## Policy with Active Threat Defense in Action Order + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_partition import DataAwsPartition +from imports.aws.data_aws_region import DataAwsRegion +from imports.aws.networkfirewall_firewall_policy import NetworkfirewallFirewallPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + current = DataAwsPartition(self, "current") + data_aws_region_current = DataAwsRegion(self, "current_1") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_region_current.override_logical_id("current") + NetworkfirewallFirewallPolicy(self, "example", + firewall_policy=NetworkfirewallFirewallPolicyFirewallPolicy( + stateful_rule_group_reference=[NetworkfirewallFirewallPolicyFirewallPolicyStatefulRuleGroupReference( + deep_threat_inspection=Token.as_string(True), + resource_arn="arn:${" + current.partition + "}:network-firewall:${" + data_aws_region_current.region + "}:aws-managed:stateful-rulegroup/AttackInfrastructureActionOrder" + ) + ], + stateless_default_actions=["aws:pass"], + stateless_fragment_default_actions=["aws:drop"] + ), + name="example" + ) +``` + +## Policy with Active Threat Defense in Strict Order + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_partition import DataAwsPartition +from imports.aws.data_aws_region import DataAwsRegion +from imports.aws.networkfirewall_firewall_policy import NetworkfirewallFirewallPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + current = DataAwsPartition(self, "current") + data_aws_region_current = DataAwsRegion(self, "current_1") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_region_current.override_logical_id("current") + NetworkfirewallFirewallPolicy(self, "example", + firewall_policy=NetworkfirewallFirewallPolicyFirewallPolicy( + stateful_engine_options=NetworkfirewallFirewallPolicyFirewallPolicyStatefulEngineOptions( + rule_order="STRICT_ORDER" + ), + stateful_rule_group_reference=[NetworkfirewallFirewallPolicyFirewallPolicyStatefulRuleGroupReference( + deep_threat_inspection=Token.as_string(False), + priority=1, + resource_arn="arn:${" + current.partition + "}:network-firewall:${" + data_aws_region_current.region + "}:aws-managed:stateful-rulegroup/AttackInfrastructureStrictOrder" + ) + ], + stateless_default_actions=["aws:pass"], + stateless_fragment_default_actions=["aws:drop"] + ), + name="example" + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A friendly description of the firewall policy. - * `encryption_configuration` - (Optional) KMS encryption configuration settings. See [Encryption Configuration](#encryption-configuration) below for details. - * `firewall_policy` - (Required) A configuration block describing the rule groups and policy actions to use in the firewall policy. See [Firewall Policy](#firewall-policy) below for details. - * `name` - (Required, Forces new resource) A friendly name of the firewall policy. - * `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Encryption Configuration @@ -202,6 +281,9 @@ The `flow_timeouts` block supports the following argument: The `stateful_rule_group_reference` block supports the following arguments: +* `deep_threat_inspection` - (Optional) Whether to enable deep threat inspection, which allows AWS to analyze service logs of network traffic processed by these rule groups to identify threat indicators across customers. AWS will use these threat indicators to improve the active threat defense managed rule groups and protect the security of AWS customers and services. This only applies to active threat defense maanaged rule groups. + + For details, refer to [AWS active threat defense for AWS Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/aws-managed-rule-groups-atd.html) in the AWS Network Firewall Developer Guide. * `priority` - (Optional) An integer setting that indicates the order in which to apply the stateful rule groups in a single policy. This argument must be specified if the policy has a `stateful_engine_options` block with a `rule_order` value of `STRICT_ORDER`. AWS Network Firewall applies each stateful rule group to a packet starting with the group that has the lowest priority setting. * `resource_arn` - (Required) The Amazon Resource Name (ARN) of the stateful rule group. @@ -283,4 +365,4 @@ Using `terraform import`, import Network Firewall Policies using their `arn`. Fo % terraform import aws_networkfirewall_firewall_policy.example arn:aws:network-firewall:us-west-1:123456789012:firewall-policy/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown b/website/docs/cdktf/python/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown new file mode 100644 index 000000000000..255ad28791a4 --- /dev/null +++ b/website/docs/cdktf/python/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown @@ -0,0 +1,88 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_firewall_transit_gateway_attachment_accepter" +description: |- + Manages an AWS Network Firewall Firewall Transit Gateway Attachment Accepter. +--- + + + +# Resource: aws_networkfirewall_firewall_transit_gateway_attachment_accepter + +Manages an AWS Network Firewall Firewall Transit Gateway Attachment Accepter. + +When a cross-account (requester's AWS account differs from the accepter's AWS account) requester creates a Network Firewall with Transit Gateway ID using `aws_networkfirewall_firewall`. Then an EC2 Transit Gateway VPC Attachment resource is automatically created in the accepter's account. +The accepter can use the `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resource to "adopt" its side of the connection into management. + +~> **NOTE:** If the `transit_gateway_id` argument in the `aws_networkfirewall_firewall` resource is used to attach a firewall to a transit gateway in a cross-account setup (where **Auto accept shared attachments** is disabled), the resource will be considered created when the transit gateway attachment is in the *Pending Acceptance* state and the firewall is in the *Provisioning* status. At this point, you can use the `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resource to finalize the network firewall deployment. Once the transit gateway attachment reaches the *Available* state, the firewall status *Ready*. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkfirewall_firewall_transit_gateway_attachment_accepter import NetworkfirewallFirewallTransitGatewayAttachmentAccepter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallFirewallTransitGatewayAttachmentAccepter(self, "example", + transit_gateway_attachment_id=Token.as_string( + Fn.lookup_nested(aws_networkfirewall_firewall_example.firewall_status, ["0", "transit_gateway_attachment_sync_state", "0", "attachment_id" + ])) + ) +``` + +A full example of how to create a Transit Gateway in one AWS account, share it with a second AWS account, and create Network Firewall in the second account to the Transit Gateway via the `aws_networkfirewall_firewall` and `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resources can be found in [the `./examples/network-firewall-cross-account-transit-gateway` directory within the Github Repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/network-firewall-cross-account-transit-gateway) + +## Argument Reference + +This resource supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `transit_gateway_attachment_id` - (Required) The unique identifier of the transit gateway attachment to accept. This ID is returned in the response when creating a transit gateway-attached firewall. + +## Attribute Reference + +This resource exports no additional attributes. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall Firewall Transit Gateway Attachment Accepter using the `transit_gateway_attachment_id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkfirewall_firewall_transit_gateway_attachment_accepter import NetworkfirewallFirewallTransitGatewayAttachmentAccepter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallFirewallTransitGatewayAttachmentAccepter.generate_config_for_import(self, "example", "tgw-attach-0c3b7e9570eee089c") +``` + +Using `terraform import`, import Network Firewall Firewall Transit Gateway Attachment Accepter using the `transit_gateway_attachment_id`. For example: + +```console +% terraform import aws_networkfirewall_firewall_transit_gateway_attachment_accepter.example tgw-attach-0c3b7e9570eee089c +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_logging_configuration.html.markdown b/website/docs/cdktf/python/r/networkfirewall_logging_configuration.html.markdown index dca822729081..6576c3c43fff 100644 --- a/website/docs/cdktf/python/r/networkfirewall_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/networkfirewall_logging_configuration.html.markdown @@ -106,8 +106,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewall_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Network Firewall firewall. - * `logging_configuration` - (Required) A configuration block describing how AWS Network Firewall performs logging for a firewall. See [Logging Configuration](#logging-configuration) below for details. ### Logging Configuration @@ -160,4 +160,4 @@ Using `terraform import`, import Network Firewall Logging Configurations using t % terraform import aws_networkfirewall_logging_configuration.example arn:aws:network-firewall:us-west-1:123456789012:firewall/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_resource_policy.html.markdown b/website/docs/cdktf/python/r/networkfirewall_resource_policy.html.markdown index 6e44105ce45c..abae219039f6 100644 --- a/website/docs/cdktf/python/r/networkfirewall_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/networkfirewall_resource_policy.html.markdown @@ -84,8 +84,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) JSON formatted policy document that controls access to the Network Firewall resource. The policy must be provided **without whitespaces**. We recommend using [jsonencode](https://www.terraform.io/docs/configuration/functions/jsonencode.html) for formatting as seen in the examples above. For more details, including available policy statement Actions, see the [Policy](https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_PutResourcePolicy.html#API_PutResourcePolicy_RequestSyntax) parameter in the AWS API documentation. - * `resource_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the rule group or firewall policy. ## Attribute Reference @@ -119,4 +119,4 @@ Using `terraform import`, import Network Firewall Resource Policies using the `r % terraform import aws_networkfirewall_resource_policy.example aws_networkfirewall_rule_group.example arn:aws:network-firewall:us-west-1:123456789012:stateful-rulegroup/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_rule_group.html.markdown b/website/docs/cdktf/python/r/networkfirewall_rule_group.html.markdown index aa026c1ea0b4..f5e8117f8e47 100644 --- a/website/docs/cdktf/python/r/networkfirewall_rule_group.html.markdown +++ b/website/docs/cdktf/python/r/networkfirewall_rule_group.html.markdown @@ -399,20 +399,14 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity` - (Required, Forces new resource) The maximum number of operating resources that this rule group can use. For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules. For a stateful rule group, the minimum capacity required is the number of individual rules. - * `description` - (Optional) A friendly description of the rule group. - * `encryption_configuration` - (Optional) KMS encryption configuration settings. See [Encryption Configuration](#encryption-configuration) below for details. - * `name` - (Required, Forces new resource) A friendly name of the rule group. - * `rule_group` - (Optional) A configuration block that defines the rule group rules. Required unless `rules` is specified. See [Rule Group](#rule-group) below for details. - * `rules` - (Optional) The stateful rule group rules specifications in Suricata file format, with one rule per line. Use this to import your existing Suricata compatible rule groups. Required unless `rule_group` is specified. - * `tags` - (Optional) A map of key:value pairs to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - * `type` - (Required) Whether the rule group is stateless (containing stateless rules) or stateful (containing stateful rules). Valid values include: `STATEFUL` or `STATELESS`. ### Encryption Configuration @@ -620,7 +614,7 @@ The `dimension` block supports the following argument: The `destination` block supports the following argument: -* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4 and IPv6. ### Destination Port @@ -634,7 +628,7 @@ The `destination_port` block supports the following arguments: The `source` block supports the following argument: -* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4 and IPv6. ### Source Port @@ -691,4 +685,4 @@ Using `terraform import`, import Network Firewall Rule Groups using their `arn`. % terraform import aws_networkfirewall_rule_group.example arn:aws:network-firewall:us-west-1:123456789012:stateful-rulegroup/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown b/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown index 3918975efbe8..c7b40805d765 100644 --- a/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown +++ b/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown @@ -336,6 +336,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the TLS inspection configuration. * `encryption_configuration` - (Optional) Encryption configuration block. Detailed below. @@ -447,6 +448,27 @@ The `certificates` block exports the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_networkfirewall_tls_inspection_configuration.example + identity = { + "arn" = "arn:aws:network-firewall:us-west-2:123456789012:tls-configuration/example" + } +} + +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Network Firewall TLS inspection configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall TLS Inspection Configuration using the `arn`. For example: ```python @@ -470,4 +492,4 @@ Using `terraform import`, import Network Firewall TLS Inspection Configuration u % terraform import aws_networkfirewall_tls_inspection_configuration.example arn:aws:network-firewall::::tls-configuration/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_vpc_endpoint_association.html.markdown b/website/docs/cdktf/python/r/networkfirewall_vpc_endpoint_association.html.markdown new file mode 100644 index 000000000000..e8b928fe5482 --- /dev/null +++ b/website/docs/cdktf/python/r/networkfirewall_vpc_endpoint_association.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_vpc_endpoint_association" +description: |- + Manages a firewall endpoint for an AWS Network Firewall firewall. +--- + + + +# Resource: aws_networkfirewall_vpc_endpoint_association + +Manages a firewall endpoint for an AWS Network Firewall firewall. + +Use `aws_networkfirewall_vpc_endpoint_association` to establish new firewall endpoints in any Availability Zone where the firewall is already being used. The first use of a firewall in an Availability Zone must be defined by `aws_networkfirewall_firewall` resource and `subnet_mapping` argument. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkfirewall_vpc_endpoint_association import NetworkfirewallVpcEndpointAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallVpcEndpointAssociation(self, "example", + firewall_arn=Token.as_string(aws_networkfirewall_firewall_example.arn), + subnet_mapping=[NetworkfirewallVpcEndpointAssociationSubnetMapping( + subnet_id=Token.as_string(aws_subnet_example.id) + ), NetworkfirewallVpcEndpointAssociationSubnetMapping( + subnet_id=example_two.id + ) + ], + tags={ + "Name": "example endpoint" + }, + vpc_id=Token.as_string(aws_vpc_example.id) + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `description` (Optional) - A description of the VPC endpoint association. +* `firewall_arn` (Required) - The Amazon Resource Name (ARN) that identifies the firewall. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `subnet_mapping` (Required) - The ID for a subnet that's used in an association with a firewall. See [Subnet Mapping](#subnet-mapping) below for details. +* `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `vpc_id` (Required) - The unique identifier of the VPC for the endpoint association. + +### Subnet Mapping + +The `subnet_mapping` block supports the following arguments: + +* `ip_address_type` - (Optional) The subnet's IP address type. Valid values: `"DUALSTACK"`, `"IPV4"`. +* `subnet_id` - (Required) The unique identifier for the subnet. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `vpc_endpoint_association_arn` - ARN of the VPC Endpoint Association. +* `vpc_endpoint_association_id` - The unique identifier of the VPC endpoint association. +* `vpc_endpoint_association_status` - Nested list of information about the current status of the VPC Endpoint Association. + * `association_sync_states` - Set of subnets configured for use by the VPC Endpoint Association. + * `attachment` - Nested list describing the attachment status of the firewall's VPC Endpoint Association with a single VPC subnet. + * `endpoint_id` - The identifier of the VPC endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + * `subnet_id` - The unique identifier of the subnet that you've specified to be used for a VPC Endpoint Association endpoint. + * `availability_zone` - The Availability Zone where the subnet is configured. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall VPC Endpoint Association using the `vpc_endpoint_association_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkfirewall_vpc_endpoint_association import NetworkfirewallVpcEndpointAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallVpcEndpointAssociation.generate_config_for_import(self, "example", "arn:aws:network-firewall:us-west-1:123456789012:vpc-endpoint-association/example") +``` + +Using `terraform import`, import Network Firewall VPC Endpoint Association using the `vpc_endpoint_association_arn`. For example: + +```console +% terraform import aws_networkfirewall_vpc_endpoint_association.example arn:aws:network-firewall:us-west-1:123456789012:vpc-endpoint-association/example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_attachment_accepter.html.markdown b/website/docs/cdktf/python/r/networkmanager_attachment_accepter.html.markdown index 4a5cc7a808f9..dcce762e060c 100644 --- a/website/docs/cdktf/python/r/networkmanager_attachment_accepter.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_attachment_accepter.html.markdown @@ -3,23 +3,65 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_attachment_accepter" description: |- - Terraform resource for managing an AWS Network Manager Attachment Accepter. + Manages an AWS Network Manager Attachment Accepter. --- # Resource: aws_networkmanager_attachment_accepter -Terraform resource for managing an AWS Network Manager Attachment Accepter. +Manages an AWS Network Manager Attachment Accepter. + +Use this resource to accept cross-account attachments in AWS Network Manager. When an attachment is created in one account and needs to be accepted by another account that owns the core network, this resource handles the acceptance process. ## Example Usage -### Example with VPC attachment +### VPC Attachment + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmanager_attachment_accepter import NetworkmanagerAttachmentAccepter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmanagerAttachmentAccepter(self, "example", + attachment_id=Token.as_string(aws_networkmanager_vpc_attachment_example.id), + attachment_type=Token.as_string(aws_networkmanager_vpc_attachment_example.attachment_type) + ) +``` + +### Site-to-Site VPN Attachment + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmanager_attachment_accepter import NetworkmanagerAttachmentAccepter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmanagerAttachmentAccepter(self, "example", + attachment_id=Token.as_string(aws_networkmanager_site_to_site_vpn_attachment_example.id), + attachment_type=Token.as_string(aws_networkmanager_site_to_site_vpn_attachment_example.attachment_type) + ) +``` + +### Connect Attachment ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -28,18 +70,18 @@ from imports.aws.networkmanager_attachment_accepter import NetworkmanagerAttachm class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - NetworkmanagerAttachmentAccepter(self, "test", - attachment_id=vpc.id, - attachment_type=vpc.attachment_type + NetworkmanagerAttachmentAccepter(self, "example", + attachment_id=Token.as_string(aws_networkmanager_connect_attachment_example.id), + attachment_type=Token.as_string(aws_networkmanager_connect_attachment_example.attachment_type) ) ``` -### Example with site-to-site VPN attachment +### Transit Gateway Route Table Attachment ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. @@ -48,9 +90,29 @@ from imports.aws.networkmanager_attachment_accepter import NetworkmanagerAttachm class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - NetworkmanagerAttachmentAccepter(self, "test", - attachment_id=vpn.id, - attachment_type=vpn.attachment_type + NetworkmanagerAttachmentAccepter(self, "example", + attachment_id=Token.as_string(aws_networkmanager_transit_gateway_route_table_attachment_example.id), + attachment_type=Token.as_string(aws_networkmanager_transit_gateway_route_table_attachment_example.attachment_type) + ) +``` + +### Direct Connect Gateway Attachment + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmanager_attachment_accepter import NetworkmanagerAttachmentAccepter +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmanagerAttachmentAccepter(self, "example", + attachment_id=Token.as_string(aws_networkmanager_dx_gateway_attachment_example.id), + attachment_type=Token.as_string(aws_networkmanager_dx_gateway_attachment_example.attachment_type) ) ``` @@ -58,21 +120,27 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -- `attachment_id` - (Required) The ID of the attachment. -- `attachment_type` - (Required) The type of attachment. Valid values can be found in the [AWS Documentation](https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_ListAttachments.html#API_ListAttachments_RequestSyntax) +* `attachment_id` - (Required) ID of the attachment. +* `attachment_type` - (Required) Type of attachment. Valid values: `CONNECT`, `DIRECT_CONNECT_GATEWAY`, `SITE_TO_SITE_VPN`, `TRANSIT_GATEWAY_ROUTE_TABLE`, `VPC`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `attachment_policy_rule_number` - The policy rule number associated with the attachment. -- `core_network_arn` - The ARN of a core network. -- `core_network_id` - The id of a core network. -- `edge_location` - The Region where the edge is located. This is returned for all attachment types except a Direct Connect gateway attachment, which instead returns `edge_locations`. -- `edge_locations` - The edge locations that the Direct Connect gateway is associated with. This is returned only for Direct Connect gateway attachments. All other attachment types return `edge_location` -- `owner_account_id` - The ID of the attachment account owner. -- `resource_arn` - The attachment resource ARN. -- `segment_name` - The name of the segment attachment. -- `state` - The state of the attachment. - - \ No newline at end of file +* `attachment_policy_rule_number` - Policy rule number associated with the attachment. +* `core_network_arn` - ARN of the core network. +* `core_network_id` - ID of the core network. +* `edge_location` - Region where the edge is located. This is returned for all attachment types except Direct Connect gateway attachments, which instead return `edge_locations`. +* `edge_locations` - Edge locations that the Direct Connect gateway is associated with. This is returned only for Direct Connect gateway attachments. All other attachment types return `edge_location`. +* `owner_account_id` - ID of the attachment account owner. +* `resource_arn` - Attachment resource ARN. +* `segment_name` - Name of the segment attachment. +* `state` - State of the attachment. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_connect_attachment.html.markdown b/website/docs/cdktf/python/r/networkmanager_connect_attachment.html.markdown index 7887602fbad2..2ea6879c0fab 100644 --- a/website/docs/cdktf/python/r/networkmanager_connect_attachment.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_connect_attachment.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connect_attachment" description: |- - Terraform resource for managing an AWS Network Manager ConnectAttachment. + Manages an AWS Network Manager Connect Attachment. --- # Resource: aws_networkmanager_connect_attachment -Terraform resource for managing an AWS Network Manager ConnectAttachment. +Manages an AWS Network Manager Connect Attachment. + +Use this resource to create a Connect attachment in AWS Network Manager. Connect attachments enable you to connect your on-premises networks to your core network through a VPC or Transit Gateway attachment. ## Example Usage @@ -78,7 +80,7 @@ class MyConvertedCode(TerraformStack): aws_networkmanager_connect_attachment_example = NetworkmanagerConnectAttachment(self, "example_2", core_network_id=Token.as_string(awscc_networkmanager_core_network_example.id), - depends_on=["aws_networkmanager_attachment_accepter.test"], + depends_on=[aws_networkmanager_attachment_accepter_example], edge_location=example.edge_location, options=NetworkmanagerConnectAttachmentOptions( protocol="GRE" @@ -97,35 +99,40 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -- `core_network_id` - (Required) The ID of a core network where you want to create the attachment. -- `transport_attachment_id` - (Required) The ID of the attachment between the two connections. -- `edge_location` - (Required) The Region where the edge is located. -- `options` - (Required) Options block. See [options](#options) for more information. +* `core_network_id` - (Required) ID of a core network where you want to create the attachment. +* `edge_location` - (Required) Region where the edge is located. +* `options` - (Required) Options block. See [options](#options) for more information. +* `transport_attachment_id` - (Required) ID of the attachment between the two connections. The following arguments are optional: -- `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### options -* `protocol` - (Required) The protocol used for the attachment connection. Possible values are `GRE` and `NO_ENCAP`. +* `protocol` - (Optional) Protocol used for the attachment connection. Valid values: `GRE`, `NO_ENCAP`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `arn` - The ARN of the attachment. -- `attachment_policy_rule_number` - The policy rule number associated with the attachment. -- `attachment_type` - The type of attachment. -- `core_network_arn` - The ARN of a core network. -- `core_network_id` - The ID of a core network -- `edge_location` - The Region where the edge is located. -- `id` - The ID of the attachment. -- `owner_account_id` - The ID of the attachment account owner. -- `resource_arn` - The attachment resource ARN. -- `segment_name` - The name of the segment attachment. -- `state` - The state of the attachment. -- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the attachment. +* `attachment_id` - ID of the attachment. +* `attachment_policy_rule_number` - Policy rule number associated with the attachment. +* `attachment_type` - Type of attachment. +* `core_network_arn` - ARN of a core network. +* `owner_account_id` - ID of the attachment account owner. +* `resource_arn` - Attachment resource ARN. +* `segment_name` - Name of the segment attachment. +* `state` - State of the attachment. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) ## Import @@ -152,4 +159,4 @@ Using `terraform import`, import `aws_networkmanager_connect_attachment` using t % terraform import aws_networkmanager_connect_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_connect_peer.html.markdown b/website/docs/cdktf/python/r/networkmanager_connect_peer.html.markdown index 922408acdc98..4afd42327941 100644 --- a/website/docs/cdktf/python/r/networkmanager_connect_peer.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_connect_peer.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connect_peer" description: |- - Terraform resource for managing an AWS Network Manager Connect Peer. + Manages an AWS Network Manager Connect Peer. --- # Resource: aws_networkmanager_connect_peer -Terraform resource for managing an AWS Network Manager Connect Peer. +Manages an AWS Network Manager Connect Peer. + +Use this resource to create a Connect peer in AWS Network Manager. Connect peers establish BGP sessions with your on-premises networks through Connect attachments, enabling dynamic routing between your core network and external networks. ## Example Usage @@ -90,7 +92,7 @@ class MyConvertedCode(TerraformStack): aws_networkmanager_connect_attachment_example = NetworkmanagerConnectAttachment(self, "example_2", core_network_id=Token.as_string(awscc_networkmanager_core_network_example.id), - depends_on=["aws_networkmanager_attachment_accepter.test"], + depends_on=[aws_networkmanager_attachment_accepter_example], edge_location=example.edge_location, options=NetworkmanagerConnectAttachmentOptions( protocol="GRE" @@ -99,21 +101,21 @@ class MyConvertedCode(TerraformStack): ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_networkmanager_connect_attachment_example.override_logical_id("example") - aws_networkmanager_connect_peer_example = NetworkmanagerConnectPeer(self, "example_3", + example2 = NetworkmanagerAttachmentAccepter(self, "example2", + attachment_id=Token.as_string(aws_networkmanager_connect_attachment_example.id), + attachment_type=Token.as_string(aws_networkmanager_connect_attachment_example.attachment_type) + ) + aws_networkmanager_connect_peer_example = NetworkmanagerConnectPeer(self, "example_4", bgp_options=NetworkmanagerConnectPeerBgpOptions( peer_asn=65500 ), connect_attachment_id=Token.as_string(aws_networkmanager_connect_attachment_example.id), - depends_on=["aws_networkmanager_attachment_accepter.example2"], + depends_on=[example2], inside_cidr_blocks=["172.16.0.0/16"], peer_address="127.0.0.1" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_networkmanager_connect_peer_example.override_logical_id("example") - NetworkmanagerAttachmentAccepter(self, "example2", - attachment_id=Token.as_string(aws_networkmanager_connect_attachment_example.id), - attachment_type=Token.as_string(aws_networkmanager_connect_attachment_example.attachment_type) - ) ``` ### Usage with a Tunnel-less Connect attachment @@ -154,7 +156,7 @@ class MyConvertedCode(TerraformStack): ), connect_attachment_id=Token.as_string(aws_networkmanager_connect_attachment_example.id), peer_address="127.0.0.1", - subnet_arn=test2.arn + subnet_arn=example2.arn ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_networkmanager_connect_peer_example.override_logical_id("example") @@ -164,28 +166,40 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -- `connect_attachment_id` - (Required) The ID of the connection attachment. -- `peer_address` - (Required) The Connect peer address. +* `connect_attachment_id` - (Required) ID of the connection attachment. +* `peer_address` - (Required) Connect peer address. The following arguments are optional: -- `bgp_options` (Optional) The Connect peer BGP options. -- `core_network_address` (Optional) A Connect peer core network address. -- `inside_cidr_blocks` - (Optional) The inside IP addresses used for BGP peering. Required when the Connect attachment protocol is `GRE`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. -- `subnet_arn` - (Optional) The subnet ARN for the Connect peer. Required when the Connect attachment protocol is `NO_ENCAP`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. -- `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `bgp_options` - (Optional) Connect peer BGP options. See [bgp_options](#bgp_options) for more information. +* `core_network_address` - (Optional) Connect peer core network address. +* `inside_cidr_blocks` - (Optional) Inside IP addresses used for BGP peering. Required when the Connect attachment protocol is `GRE`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. +* `subnet_arn` - (Optional) Subnet ARN for the Connect peer. Required when the Connect attachment protocol is `NO_ENCAP`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. +* `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### bgp_options + +* `peer_asn` - (Optional) Peer ASN. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `arn` - The ARN of the attachment. -- `configuration` - The configuration of the Connect peer. -- `core_network_id` - The ID of a core network. -- `edge_location` - The Region where the peer is located. -- `id` - The ID of the Connect peer. -- `state` - The state of the Connect peer. -- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the Connect peer. +* `configuration` - Configuration of the Connect peer. +* `connect_peer_id` - ID of the Connect peer. +* `core_network_id` - ID of a core network. +* `created_at` - Timestamp when the Connect peer was created. +* `edge_location` - Region where the peer is located. +* `state` - State of the Connect peer. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `15m`) ## Import @@ -212,4 +226,4 @@ Using `terraform import`, import `aws_networkmanager_connect_peer` using the con % terraform import aws_networkmanager_connect_peer.example connect-peer-061f3e96275db1acc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_connection.html.markdown b/website/docs/cdktf/python/r/networkmanager_connection.html.markdown index 87d38d6390c6..78664516abcc 100644 --- a/website/docs/cdktf/python/r/networkmanager_connection.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_connection.html.markdown @@ -3,15 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connection" description: |- - Creates a connection between two devices. + Manages a Network Manager Connection. --- # Resource: aws_networkmanager_connection -Creates a connection between two devices. -The devices can be a physical or virtual appliance that connects to a third-party appliance in a VPC, or a physical appliance that connects to another physical appliance in an on-premises network. +Manages a Network Manager Connection. + +Use this resource to create a connection between two devices in your global network. ## Example Usage @@ -36,22 +37,33 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `connected_device_id` - (Required) ID of the second device in the connection. +* `device_id` - (Required) ID of the first device in the connection. +* `global_network_id` - (Required) ID of the global network. + +The following arguments are optional: -* `connected_device_id` - (Required) The ID of the second device in the connection. -* `connected_link_id` - (Optional) The ID of the link for the second device. -* `description` - (Optional) A description of the connection. -* `device_id` - (Required) The ID of the first device in the connection. -* `global_network_id` - (Required) The ID of the global network. -* `link_id` - (Optional) The ID of the link for the first device. +* `connected_link_id` - (Optional) ID of the link for the second device. +* `description` - (Optional) Description of the connection. +* `link_id` - (Optional) ID of the link for the first device. * `tags` - (Optional) Key-value tags for the connection. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the connection. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the connection. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -78,4 +90,4 @@ Using `terraform import`, import `aws_networkmanager_connection` using the conne % terraform import aws_networkmanager_connection.example arn:aws:networkmanager::123456789012:device/global-network-0d47f6t230mz46dy4/connection-07f6fd08867abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_core_network.html.markdown b/website/docs/cdktf/python/r/networkmanager_core_network.html.markdown index 34016bb4dbe6..7b5cbac21f02 100644 --- a/website/docs/cdktf/python/r/networkmanager_core_network.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_core_network.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_core_network" description: |- - Provides a core network resource. + Manages a Network Manager Core Network. --- # Resource: aws_networkmanager_core_network -Provides a core network resource. +Manages a Network Manager Core Network. + +Use this resource to create and manage a core network within a global network. ## Example Usage @@ -430,13 +432,15 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `description` - (Optional) Description of the Core Network. -* `base_policy_document` - (Optional, conflicts with `base_policy_region`, `base_policy_regions`) Sets the base policy document for the core network. Refer to the [Core network policies documentation](https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-change-sets.html) for more information. -* `base_policy_region` - (Optional, **Deprecated** use the `base_policy_regions` or `base_policy_document` argument instead) The base policy created by setting the `create_base_policy` argument to `true` requires a region to be set in the `edge-locations`, `location` key. If `base_policy_region` is not specified, the region used in the base policy defaults to the region specified in the `provider` block. -* `base_policy_regions` - (Optional, conflicts with `base_policy_region`, `base_policy_document`) A list of regions to add to the base policy. The base policy created by setting the `create_base_policy` argument to `true` requires one or more regions to be set in the `edge-locations`, `location` key. If `base_policy_regions` is not specified, the region used in the base policy defaults to the region specified in the `provider` block. -* `create_base_policy` - (Optional) Specifies whether to create a base policy when a core network is created or updated. A base policy is created and set to `LIVE` to allow attachments to the core network (e.g. VPC Attachments) before applying a policy document provided using the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). This base policy is needed if your core network does not have any `LIVE` policies and your policy document has static routes pointing to VPC attachments and you want to attach your VPCs to the core network before applying the desired policy document. Valid values are `true` or `false`. An example of this Terraform snippet can be found above [for VPC Attachment in a single region](#with-vpc-attachment-single-region) and [for VPC Attachment multi-region](#with-vpc-attachment-multi-region). An example base policy is shown below. This base policy is overridden with the policy that you specify in the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). +* `global_network_id` - (Required) ID of the global network that a core network will be a part of. + +The following arguments are optional: + +* `base_policy_document` - (Optional, conflicts with `base_policy_regions`) Sets the base policy document for the core network. Refer to the [Core network policies documentation](https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-change-sets.html) for more information. +* `base_policy_regions` - (Optional, conflicts with `base_policy_document`) List of regions to add to the base policy. The base policy created by setting the `create_base_policy` argument to `true` requires one or more regions to be set in the `edge-locations`, `location` key. If `base_policy_regions` is not specified, the region used in the base policy defaults to the region specified in the `provider` block. +* `create_base_policy` - (Optional) Whether to create a base policy when a core network is created or updated. A base policy is created and set to `LIVE` to allow attachments to the core network (e.g. VPC Attachments) before applying a policy document provided using the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). This base policy is needed if your core network does not have any `LIVE` policies and your policy document has static routes pointing to VPC attachments and you want to attach your VPCs to the core network before applying the desired policy document. Valid values are `true` or `false`. An example of this Terraform snippet can be found above [for VPC Attachment in a single region](#with-vpc-attachment-single-region) and [for VPC Attachment multi-region](#with-vpc-attachment-multi-region). An example base policy is shown below. This base policy is overridden with the policy that you specify in the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). ```json { @@ -463,28 +467,20 @@ This resource supports the following arguments: } ``` -* `global_network_id` - (Required) The ID of the global network that a core network will be a part of. +* `description` - (Optional) Description of the Core Network. * `tags` - (Optional) Key-value tags for the Core Network. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `30m`) -* `delete` - (Default `30m`) -* `update` - (Default `30m`) - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Core Network Amazon Resource Name (ARN). +* `arn` - Core Network ARN. * `created_at` - Timestamp when a core network was created. * `edges` - One or more blocks detailing the edges within a core network. [Detailed below](#edges). * `id` - Core Network ID. * `segments` - One or more blocks detailing the segments within a core network. [Detailed below](#segments). * `state` - Current state of a core network. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ### `edges` @@ -502,6 +498,14 @@ The `segments` configuration block supports the following arguments: * `name` - Name of a core network segment. * `shared_segments` - Shared segments of a core network. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) +* `update` - (Default `30m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_core_network` using the core network ID. For example: @@ -527,4 +531,4 @@ Using `terraform import`, import `aws_networkmanager_core_network` using the cor % terraform import aws_networkmanager_core_network.example core-network-0d47f6t230mz46dy4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_core_network_policy_attachment.html.markdown b/website/docs/cdktf/python/r/networkmanager_core_network_policy_attachment.html.markdown index 72a893464f1d..517e75a84374 100644 --- a/website/docs/cdktf/python/r/networkmanager_core_network_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_core_network_policy_attachment.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_core_network_policy_attachment" description: |- - Provides a Core Network Policy Attachment resource. + Manages a Network Manager Core Network Policy Attachment. --- # Resource: aws_networkmanager_core_network_policy_attachment -Provides a Core Network Policy Attachment resource. This puts a Core Network Policy to an existing Core Network and executes the change set, which deploys changes globally based on the policy submitted (Sets the policy to `LIVE`). +Manages a Network Manager Core Network Policy Attachment. + +Use this resource to attach a Core Network Policy to an existing Core Network and execute the change set, which deploys changes globally based on the policy submitted (sets the policy to `LIVE`). ~> **NOTE:** Deleting this resource will not delete the current policy defined in this resource. Deleting this resource will also not revert the current `LIVE` policy to the previous version. @@ -396,23 +398,23 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `core_network_id` - (Required) The ID of the core network that a policy will be attached to and made `LIVE`. +* `core_network_id` - (Required) ID of the core network that a policy will be attached to and made `LIVE`. * `policy_document` - (Required) Policy document for creating a core network. Note that updating this argument will result in the new policy document version being set as the `LATEST` and `LIVE` policy document. Refer to the [Core network policies documentation](https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-change-sets.html) for more information. -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `update` - (Default `30m`). If this is the first time attaching a policy to a core network then this timeout value is also used as the `create` timeout value. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `state` - Current state of a core network. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `update` - (Default `30m`). If this is the first time attaching a policy to a core network then this timeout value is also used as the `create` timeout value. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_core_network_policy_attachment` using the core network ID. For example: @@ -438,4 +440,4 @@ Using `terraform import`, import `aws_networkmanager_core_network_policy_attachm % terraform import aws_networkmanager_core_network_policy_attachment.example core-network-0d47f6t230mz46dy4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_customer_gateway_association.html.markdown b/website/docs/cdktf/python/r/networkmanager_customer_gateway_association.html.markdown index 2bd1ffb0d551..2bd836dcda96 100644 --- a/website/docs/cdktf/python/r/networkmanager_customer_gateway_association.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_customer_gateway_association.html.markdown @@ -3,15 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_customer_gateway_association" description: |- - Associates a customer gateway with a device and optionally, with a link. + Manages a Network Manager Customer Gateway Association. --- # Resource: aws_networkmanager_customer_gateway_association -Associates a customer gateway with a device and optionally, with a link. -If you specify a link, it must be associated with the specified device. +Manages a Network Manager Customer Gateway Association. + +Use this resource to associate a customer gateway with a device and optionally, with a link. If you specify a link, it must be associated with the specified device. ## Example Usage @@ -88,17 +89,27 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `customer_gateway_arn` - (Required) ARN of the customer gateway. +* `device_id` - (Required) ID of the device. +* `global_network_id` - (Required) ID of the global network. + +The following arguments are optional: -* `customer_gateway_arn` - (Required) The Amazon Resource Name (ARN) of the customer gateway. -* `device_id` - (Required) The ID of the device. -* `global_network_id` - (Required) The ID of the global network. -* `link_id` - (Optional) The ID of the link. +* `link_id` - (Optional) ID of the link. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_customer_gateway_association` using the global network ID and customer gateway ARN. For example: @@ -124,4 +135,4 @@ Using `terraform import`, import `aws_networkmanager_customer_gateway_associatio % terraform import aws_networkmanager_customer_gateway_association.example global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:customer-gateway/cgw-123abc05e04123abc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_device.html.markdown b/website/docs/cdktf/python/r/networkmanager_device.html.markdown index a77789f45c85..5bc14402a5d5 100644 --- a/website/docs/cdktf/python/r/networkmanager_device.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_device.html.markdown @@ -3,15 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_device" description: |- - Creates a device in a global network. + Manages a Network Manager Device. --- # Resource: aws_networkmanager_device -Creates a device in a global network. If you specify both a site ID and a location, -the location of the site is used for visualization in the Network Manager console. +Manages a Network Manager Device. + +Use this resource to create a device in a global network. If you specify both a site ID and a location, the location of the site is used for visualization in the Network Manager console. ## Example Usage @@ -35,36 +36,47 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `global_network_id` - (Required) ID of the global network. + +The following arguments are optional: -* `aws_location` - (Optional) The AWS location of the device. Documented below. -* `description` - (Optional) A description of the device. -* `global_network_id` - (Required) The ID of the global network. -* `location` - (Optional) The location of the device. Documented below. -* `model` - (Optional) The model of device. -* `serial_number` - (Optional) The serial number of the device. -* `site_id` - (Optional) The ID of the site. +* `aws_location` - (Optional) AWS location of the device. Documented below. +* `description` - (Optional) Description of the device. +* `location` - (Optional) Location of the device. Documented below. +* `model` - (Optional) Model of device. +* `serial_number` - (Optional) Serial number of the device. +* `site_id` - (Optional) ID of the site. * `tags` - (Optional) Key-value tags for the device. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `type` - (Optional) The type of device. -* `vendor` - (Optional) The vendor of the device. +* `type` - (Optional) Type of device. +* `vendor` - (Optional) Vendor of the device. The `aws_location` object supports the following: -* `subnet_arn` - (Optional) The Amazon Resource Name (ARN) of the subnet that the device is located in. -* `zone` - (Optional) The Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. +* `subnet_arn` - (Optional) ARN of the subnet that the device is located in. +* `zone` - (Optional) Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. The `location` object supports the following: -* `address` - (Optional) The physical address. -* `latitude` - (Optional) The latitude. -* `longitude` - (Optional) The longitude. +* `address` - (Optional) Physical address. +* `latitude` - (Optional) Latitude. +* `longitude` - (Optional) Longitude. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the device. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the device. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -91,4 +103,4 @@ Using `terraform import`, import `aws_networkmanager_device` using the device AR % terraform import aws_networkmanager_device.example arn:aws:networkmanager::123456789012:device/global-network-0d47f6t230mz46dy4/device-07f6fd08867abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_dx_gateway_attachment.html.markdown b/website/docs/cdktf/python/r/networkmanager_dx_gateway_attachment.html.markdown index 37a8b6e58df9..e8c80e24cdb2 100644 --- a/website/docs/cdktf/python/r/networkmanager_dx_gateway_attachment.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_dx_gateway_attachment.html.markdown @@ -3,13 +3,15 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_dx_gateway_attachment" description: |- - Terraform resource for managing an AWS Network Manager Direct Connect Gateway Attachment. + Manages a Network Manager Direct Connect Gateway Attachment. --- # Resource: aws_networkmanager_dx_gateway_attachment -Terraform resource for managing an AWS Network Manager Direct Connect (DX) Gateway Attachment. +Manages a Network Manager Direct Connect Gateway Attachment. + +Use this resource to create and manage a Direct Connect Gateway attachment to a Cloud WAN core network. ## Example Usage @@ -30,7 +32,7 @@ class MyConvertedCode(TerraformStack): NetworkmanagerDxGatewayAttachment(self, "test", core_network_id=Token.as_string(aws_networkmanager_core_network_policy_attachment_test.core_network_id), direct_connect_gateway_arn="arn:aws:directconnect::${" + current.account_id + "}:dx-gateway/${" + aws_dx_gateway_test.id + "}", - edge_locations=[Token.as_string(data_aws_region_current.name)] + edge_locations=[Token.as_string(data_aws_region_current.region)] ) ``` @@ -50,14 +52,15 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: +* `arn` - ARN of the attachment. * `attachment_policy_rule_number` - Policy rule number associated with the attachment. * `attachment_type` - Type of attachment. * `core_network_arn` - ARN of the core network for the attachment. -* `id` - The ID of the attachment. +* `id` - ID of the attachment. * `owner_account_id` - ID of the attachment account owner. * `segment_name` - Name of the segment attachment. * `state` - State of the attachment. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -92,4 +95,4 @@ Using `terraform import`, import Network Manager DX Gateway Attachment using the % terraform import aws_networkmanager_dx_gateway_attachment.example attachment-1a2b3c4d5e6f7g ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_global_network.html.markdown b/website/docs/cdktf/python/r/networkmanager_global_network.html.markdown index d191b4252fb3..78e15a7b13c7 100644 --- a/website/docs/cdktf/python/r/networkmanager_global_network.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_global_network.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_global_network" description: |- - Provides a global network resource. + Manages a Network Manager Global Network. --- # Resource: aws_networkmanager_global_network -Provides a global network resource. +Manages a Network Manager Global Network. + +Use this resource to create and manage a global network, which is a single private network that acts as the high-level container for your network objects. ## Example Usage @@ -33,7 +35,7 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are optional: * `description` - (Optional) Description of the Global Network. * `tags` - (Optional) Key-value tags for the Global Network. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -42,8 +44,16 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `arn` - Global Network Amazon Resource Name (ARN) -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Global Network ARN. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -70,4 +80,4 @@ Using `terraform import`, import `aws_networkmanager_global_network` using the g % terraform import aws_networkmanager_global_network.example global-network-0d47f6t230mz46dy4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_link.html.markdown b/website/docs/cdktf/python/r/networkmanager_link.html.markdown index 5b069484bd30..c7305cfa15ce 100644 --- a/website/docs/cdktf/python/r/networkmanager_link.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_link.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_link" description: |- - Creates a link for a site. + Manages a Network Manager link. --- # Resource: aws_networkmanager_link -Creates a link for a site. +Manages a Network Manager link. Use this resource to create a link for a site. ## Example Usage @@ -39,17 +39,20 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `bandwidth` - (Required) The upload speed and download speed in Mbps. Documented below. -* `description` - (Optional) A description of the link. -* `global_network_id` - (Required) The ID of the global network. -* `provider_name` - (Optional) The provider of the link. -* `site_id` - (Required) The ID of the site. +* `bandwidth` - (Required) Upload speed and download speed in Mbps. [See below](#bandwidth). +* `global_network_id` - (Required) ID of the global network. +* `site_id` - (Required) ID of the site. + +The following arguments are optional: + +* `description` - (Optional) Description of the link. +* `provider_name` - (Optional) Provider of the link. * `tags` - (Optional) Key-value tags for the link. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `type` - (Optional) The type of the link. +* `type` - (Optional) Type of the link. -The `bandwidth` object supports the following: +### bandwidth * `download_speed` - (Optional) Download speed in Mbps. * `upload_speed` - (Optional) Upload speed in Mbps. @@ -58,8 +61,16 @@ The `bandwidth` object supports the following: This resource exports the following attributes in addition to the arguments above: -* `arn` - Link Amazon Resource Name (ARN). -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Link ARN. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -86,4 +97,4 @@ Using `terraform import`, import `aws_networkmanager_link` using the link ARN. F % terraform import aws_networkmanager_link.example arn:aws:networkmanager::123456789012:link/global-network-0d47f6t230mz46dy4/link-444555aaabbb11223 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_link_association.html.markdown b/website/docs/cdktf/python/r/networkmanager_link_association.html.markdown index 8c87b5ecaeb1..e19036418809 100644 --- a/website/docs/cdktf/python/r/networkmanager_link_association.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_link_association.html.markdown @@ -3,16 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_link_association" description: |- - Associates a link to a device. + Manages a Network Manager link association. --- # Resource: aws_networkmanager_link_association -Associates a link to a device. -A device can be associated to multiple links and a link can be associated to multiple devices. -The device and link must be in the same global network and the same site. +Manages a Network Manager link association. Associates a link to a device. A device can be associated to multiple links and a link can be associated to multiple devices. The device and link must be in the same global network and the same site. ## Example Usage @@ -37,16 +35,23 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `device_id` - (Required) The ID of the device. -* `global_network_id` - (Required) The ID of the global network. -* `link_id` - (Required) The ID of the link. +* `device_id` - (Required) ID of the device. +* `global_network_id` - (Required) ID of the global network. +* `link_id` - (Required) ID of the link. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_link_association` using the global network ID, link ID and device ID. For example: @@ -72,4 +77,4 @@ Using `terraform import`, import `aws_networkmanager_link_association` using the % terraform import aws_networkmanager_link_association.example global-network-0d47f6t230mz46dy4,link-444555aaabbb11223,device-07f6fd08867abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_site.html.markdown b/website/docs/cdktf/python/r/networkmanager_site.html.markdown index dc17f22fc57b..527f4c3d2941 100644 --- a/website/docs/cdktf/python/r/networkmanager_site.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_site.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_site" description: |- - Creates a site in a global network. + Manages a Network Manager site. --- # Resource: aws_networkmanager_site -Creates a site in a global network. +Manages a Network Manager site. Use this resource to create a site in a global network. ## Example Usage @@ -37,14 +37,17 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `global_network_id` - (Required) ID of the Global Network to create the site in. + +The following arguments are optional: -* `global_network_id` - (Required) The ID of the Global Network to create the site in. * `description` - (Optional) Description of the Site. -* `location` - (Optional) The site location as documented below. +* `location` - (Optional) Site location. [See below](#location). * `tags` - (Optional) Key-value tags for the Site. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -The `location` object supports the following: +### location * `address` - (Optional) Address of the location. * `latitude` - (Optional) Latitude of the location. @@ -54,8 +57,16 @@ The `location` object supports the following: This resource exports the following attributes in addition to the arguments above: -* `arn` - Site Amazon Resource Name (ARN) -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Site ARN. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -82,4 +93,4 @@ Using `terraform import`, import `aws_networkmanager_site` using the site ARN. F % terraform import aws_networkmanager_site.example arn:aws:networkmanager::123456789012:site/global-network-0d47f6t230mz46dy4/site-444555aaabbb11223 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_site_to_site_vpn_attachment.html.markdown b/website/docs/cdktf/python/r/networkmanager_site_to_site_vpn_attachment.html.markdown index 2c33e704612d..529874781497 100644 --- a/website/docs/cdktf/python/r/networkmanager_site_to_site_vpn_attachment.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_site_to_site_vpn_attachment.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_site_to_site_vpn_attachment" description: |- - Terraform resource for managing an AWS Network Manager SiteToSiteAttachment. + Manages a Network Manager site-to-site VPN attachment. --- # Resource: aws_networkmanager_site_to_site_vpn_attachment -Terraform resource for managing an AWS Network Manager SiteToSiteAttachment. +Manages a Network Manager site-to-site VPN attachment. ## Example Usage @@ -99,7 +99,7 @@ class MyConvertedCode(TerraformStack): asn_ranges=["64512-64555"], edge_locations=[DataAwsNetworkmanagerCoreNetworkPolicyDocumentCoreNetworkConfigurationEdgeLocations( asn=Token.as_string(64512), - location=Token.as_string(current.name) + location=Token.as_string(current.region) ) ], vpn_ecmp_support=False @@ -152,29 +152,36 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -- `core_network_id` - (Required) The ID of a core network for the VPN attachment. -- `vpn_connection_arn` - (Required) The ARN of the site-to-site VPN connection. +* `core_network_id` - (Required) ID of a core network for the VPN attachment. +* `vpn_connection_arn` - (Required) ARN of the site-to-site VPN connection. The following arguments are optional: -- `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `arn` - The ARN of the attachment. -- `attachment_policy_rule_number` - The policy rule number associated with the attachment. -- `attachment_type` - The type of attachment. -- `core_network_arn` - The ARN of a core network. -- `core_network_id` - The ID of a core network -- `edge_location` - The Region where the edge is located. -- `id` - The ID of the attachment. -- `owner_account_id` - The ID of the attachment account owner. -- `resource_arn` - The attachment resource ARN. -- `segment_name` - The name of the segment attachment. -- `state` - The state of the attachment. -- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the attachment. +* `attachment_policy_rule_number` - Policy rule number associated with the attachment. +* `attachment_type` - Type of attachment. +* `core_network_arn` - ARN of a core network. +* `edge_location` - Region where the edge is located. +* `id` - ID of the attachment. +* `owner_account_id` - ID of the attachment account owner. +* `resource_arn` - Attachment resource ARN. +* `segment_name` - Name of the segment attachment. +* `state` - State of the attachment. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -201,4 +208,4 @@ Using `terraform import`, import `aws_networkmanager_site_to_site_vpn_attachment % terraform import aws_networkmanager_site_to_site_vpn_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_transit_gateway_connect_peer_association.html.markdown b/website/docs/cdktf/python/r/networkmanager_transit_gateway_connect_peer_association.html.markdown index 29794094d607..865aa7472d43 100644 --- a/website/docs/cdktf/python/r/networkmanager_transit_gateway_connect_peer_association.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_transit_gateway_connect_peer_association.html.markdown @@ -3,15 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_connect_peer_association" description: |- - Associates a transit gateway Connect peer with a device, and optionally, with a link. + Manages a Network Manager transit gateway Connect peer association. --- # Resource: aws_networkmanager_transit_gateway_connect_peer_association -Associates a transit gateway Connect peer with a device, and optionally, with a link. -If you specify a link, it must be associated with the specified device. +Manages a Network Manager transit gateway Connect peer association. Associates a transit gateway Connect peer with a device, and optionally, with a link. If you specify a link, it must be associated with the specified device. ## Example Usage @@ -36,20 +35,30 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `device_id` - (Required) The ID of the device. -* `global_network_id` - (Required) The ID of the global network. -* `link_id` - (Optional) The ID of the link. -* `transit_gateway_connect_peer_arn` - (Required) The Amazon Resource Name (ARN) of the Connect peer. +* `device_id` - (Required) ID of the device. +* `global_network_id` - (Required) ID of the global network. +* `transit_gateway_connect_peer_arn` - (Required) ARN of the Connect peer. + +The following arguments are optional: + +* `link_id` - (Optional) ID of the link. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and customer gateway ARN. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and Connect peer ARN. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -66,10 +75,10 @@ class MyConvertedCode(TerraformStack): NetworkmanagerTransitGatewayConnectPeerAssociation.generate_config_for_import(self, "example", "global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-connect-peer-12345678") ``` -Using `terraform import`, import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and customer gateway ARN. For example: +Using `terraform import`, import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and Connect peer ARN. For example: ```console % terraform import aws_networkmanager_transit_gateway_connect_peer_association.example global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-connect-peer-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_transit_gateway_peering.html.markdown b/website/docs/cdktf/python/r/networkmanager_transit_gateway_peering.html.markdown index 258020a4685b..3c00cef2c8f6 100644 --- a/website/docs/cdktf/python/r/networkmanager_transit_gateway_peering.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_transit_gateway_peering.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_peering" description: |- - Creates a peering connection between an AWS Cloud WAN core network and an AWS Transit Gateway. + Manages a Network Manager transit gateway peering connection. --- # Resource: aws_networkmanager_transit_gateway_peering -Creates a peering connection between an AWS Cloud WAN core network and an AWS Transit Gateway. +Manages a Network Manager transit gateway peering connection. Creates a peering connection between an AWS Cloud WAN core network and an AWS Transit Gateway. ## Example Usage @@ -28,31 +28,43 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) NetworkmanagerTransitGatewayPeering(self, "example", core_network_id=Token.as_string(awscc_networkmanager_core_network_example.id), + depends_on=[aws_ec2_transit_gateway_policy_table_example, aws_networkmanager_core_network_policy_attachment_example + ], transit_gateway_arn=Token.as_string(aws_ec2_transit_gateway_example.arn) ) ``` ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `core_network_id` - (Required) ID of a core network. +* `transit_gateway_arn` - (Required) ARN of the transit gateway for the peering request. + +The following arguments are optional: -* `core_network_id` - (Required) The ID of a core network. * `tags` - (Optional) Key-value tags for the peering. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `transit_gateway_arn` - (Required) The ARN of the transit gateway for the peering request. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Peering Amazon Resource Name (ARN). -* `core_network_arn` - The ARN of the core network. -* `edge_location` - The edge location for the peer. +* `arn` - Peering ARN. +* `core_network_arn` - ARN of the core network. +* `edge_location` - Edge location for the peer. * `id` - Peering ID. -* `owner_account_id` - The ID of the account owner. -* `peering_type` - The type of peering. This will be `TRANSIT_GATEWAY`. -* `resource_arn` - The resource ARN of the peer. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `transit_gateway_peering_attachment_id` - The ID of the transit gateway peering attachment. +* `owner_account_id` - ID of the account owner. +* `peering_type` - Type of peering. This will be `TRANSIT_GATEWAY`. +* `resource_arn` - Resource ARN of the peer. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `transit_gateway_peering_attachment_id` - ID of the transit gateway peering attachment. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `20m`) +* `delete` - (Default `20m`) ## Import @@ -79,4 +91,4 @@ Using `terraform import`, import `aws_networkmanager_transit_gateway_peering` us % terraform import aws_networkmanager_transit_gateway_peering.example peering-444555aaabbb11223 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_transit_gateway_registration.html.markdown b/website/docs/cdktf/python/r/networkmanager_transit_gateway_registration.html.markdown index 177fd941fec6..51845f63b648 100644 --- a/website/docs/cdktf/python/r/networkmanager_transit_gateway_registration.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_transit_gateway_registration.html.markdown @@ -3,16 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_registration" description: |- - Registers a transit gateway to a global network. + Manages a Network Manager transit gateway registration. --- # Resource: aws_networkmanager_transit_gateway_registration -Registers a transit gateway to a global network. The transit gateway can be in any AWS Region, -but it must be owned by the same AWS account that owns the global network. -You cannot register a transit gateway in more than one global network. +Manages a Network Manager transit gateway registration. Registers a transit gateway to a global network. The transit gateway can be in any AWS Region, but it must be owned by the same AWS account that owns the global network. You cannot register a transit gateway in more than one global network. ## Example Usage @@ -48,15 +46,22 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `global_network_id` - (Required) The ID of the Global Network to register to. -* `transit_gateway_arn` - (Required) The ARN of the Transit Gateway to register. +* `global_network_id` - (Required) ID of the Global Network to register to. +* `transit_gateway_arn` - (Required) ARN of the Transit Gateway to register. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_transit_gateway_registration` using the global network ID and transit gateway ARN. For example: @@ -82,4 +87,4 @@ Using `terraform import`, import `aws_networkmanager_transit_gateway_registratio % terraform import aws_networkmanager_transit_gateway_registration.example global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-123abc05e04123abc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_transit_gateway_route_table_attachment.html.markdown b/website/docs/cdktf/python/r/networkmanager_transit_gateway_route_table_attachment.html.markdown index 8b5561409e6b..c807b9afa9d9 100644 --- a/website/docs/cdktf/python/r/networkmanager_transit_gateway_route_table_attachment.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_transit_gateway_route_table_attachment.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_route_table_attachment" description: |- - Creates a transit gateway route table attachment. + Manages a Network Manager transit gateway route table attachment. --- # Resource: aws_networkmanager_transit_gateway_route_table_attachment -Creates a transit gateway route table attachment. +Manages a Network Manager transit gateway route table attachment. ## Example Usage @@ -34,28 +34,38 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `peering_id` - (Required) ID of the peer for the attachment. +* `transit_gateway_route_table_arn` - (Required) ARN of the transit gateway route table for the attachment. + +The following arguments are optional: -* `peering_id` - (Required) The ID of the peer for the attachment. * `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `transit_gateway_route_table_arn` - (Required) The ARN of the transit gateway route table for the attachment. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Attachment Amazon Resource Name (ARN). -* `attachment_policy_rule_number` - The policy rule number associated with the attachment. -* `attachment_type` - The type of attachment. -* `core_network_arn` - The ARN of the core network. -* `core_network_id` - The ID of the core network. -* `edge_location` - The edge location for the peer. -* `id` - The ID of the attachment. -* `owner_account_id` - The ID of the attachment account owner. -* `resource_arn` - The attachment resource ARN. -* `segment_name` - The name of the segment attachment. -* `state` - The state of the attachment. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Attachment ARN. +* `attachment_policy_rule_number` - Policy rule number associated with the attachment. +* `attachment_type` - Type of attachment. +* `core_network_arn` - ARN of the core network. +* `core_network_id` - ID of the core network. +* `edge_location` - Edge location for the peer. +* `id` - ID of the attachment. +* `owner_account_id` - ID of the attachment account owner. +* `resource_arn` - Attachment resource ARN. +* `segment_name` - Name of the segment attachment. +* `state` - State of the attachment. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) ## Import @@ -82,4 +92,4 @@ Using `terraform import`, import `aws_networkmanager_transit_gateway_route_table % terraform import aws_networkmanager_transit_gateway_route_table_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmanager_vpc_attachment.html.markdown b/website/docs/cdktf/python/r/networkmanager_vpc_attachment.html.markdown index f5b7a105ac2f..fbabc261356c 100644 --- a/website/docs/cdktf/python/r/networkmanager_vpc_attachment.html.markdown +++ b/website/docs/cdktf/python/r/networkmanager_vpc_attachment.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_vpc_attachment" description: |- - Terraform resource for managing an AWS Network Manager VPC Attachment. + Manages a Network Manager VPC attachment. --- # Resource: aws_networkmanager_vpc_attachment -Terraform resource for managing an AWS Network Manager VPC Attachment. +Manages a Network Manager VPC attachment. ## Example Usage @@ -35,42 +35,76 @@ class MyConvertedCode(TerraformStack): ) ``` +### Usage with Options + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmanager_vpc_attachment import NetworkmanagerVpcAttachment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmanagerVpcAttachment(self, "example", + core_network_id=Token.as_string(awscc_networkmanager_core_network_example.id), + options=NetworkmanagerVpcAttachmentOptions( + appliance_mode_support=False, + dns_support=True, + ipv6_support=False, + security_group_referencing_support=True + ), + subnet_arns=[Token.as_string(aws_subnet_example.arn)], + vpc_arn=Token.as_string(aws_vpc_example.arn) + ) +``` + ## Argument Reference The following arguments are required: -* `core_network_id` - (Required) The ID of a core network for the VPC attachment. -* `subnet_arns` - (Required) The subnet ARN of the VPC attachment. -* `vpc_arn` - (Required) The ARN of the VPC. +* `core_network_id` - (Required) ID of a core network for the VPC attachment. +* `subnet_arns` - (Required) Subnet ARNs of the VPC attachment. +* `vpc_arn` - (Required) ARN of the VPC. The following arguments are optional: -* `options` - (Optional) Options for the VPC attachment. +* `options` - (Optional) Options for the VPC attachment. [See below](#options). * `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### options -* `appliance_mode_support` - (Optional) Indicates whether appliance mode is supported. - If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. - If the VPC attachment is pending acceptance, changing this value will recreate the resource. -* `ipv6_support` - (Optional) Indicates whether IPv6 is supported. - If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `appliance_mode_support` - (Optional) Whether to enable appliance mode support. If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `dns_support` - (Optional) Whether to enable DNS support. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `ipv6_support` - (Optional) Whether to enable IPv6 support. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `security_group_referencing_support` - (Optional) Whether to enable security group referencing support for this VPC attachment. The default is `true`. However, at the core network policy-level the default is set to `false`. If the VPC attachment is pending acceptance, changing this value will recreate the resource. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The ARN of the attachment. -* `attachment_policy_rule_number` - The policy rule number associated with the attachment. -* `attachment_type` - The type of attachment. -* `core_network_arn` - The ARN of a core network. -* `edge_location` - The Region where the edge is located. -* `id` - The ID of the attachment. -* `owner_account_id` - The ID of the attachment account owner. -* `resource_arn` - The attachment resource ARN. -* `segment_name` - The name of the segment attachment. -* `state` - The state of the attachment. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the attachment. +* `attachment_policy_rule_number` - Policy rule number associated with the attachment. +* `attachment_type` - Type of attachment. +* `core_network_arn` - ARN of a core network. +* `edge_location` - Region where the edge is located. +* `id` - ID of the attachment. +* `owner_account_id` - ID of the attachment account owner. +* `resource_arn` - Attachment resource ARN. +* `segment_name` - Name of the segment attachment. +* `state` - State of the attachment. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -97,4 +131,4 @@ Using `terraform import`, import `aws_networkmanager_vpc_attachment` using the a % terraform import aws_networkmanager_vpc_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown b/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown index 0cb4bf9569ba..328c719c05b1 100644 --- a/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown +++ b/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `aggregation_period` - (Optional) The time, in seconds, that metrics are aggregated and sent to Amazon CloudWatch. Valid values are either 30 or 60. - `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -77,4 +78,4 @@ Using `terraform import`, import `aws_networkmonitor_monitor` using the monitor % terraform import aws_networkmonitor_monitor.example monitor-7786087912324693644 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown b/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown index e867ff3f580b..be661a4df70a 100644 --- a/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown +++ b/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `destination` - (Required) The destination IP address. This must be either IPV4 or IPV6. - `destination_port` - (Optional) The port associated with the destination. This is required only if the protocol is TCP and must be a number between 1 and 65536. - `monitor_name` - (Required) The name of the monitor. @@ -90,4 +91,4 @@ Using `terraform import`, import `aws_networkmonitor_probe` using the monitor na % terraform import aws_networkmonitor_probe.example monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/oam_link.html.markdown b/website/docs/cdktf/python/r/oam_link.html.markdown index 347d9af054ee..93d6b199e7ae 100644 --- a/website/docs/cdktf/python/r/oam_link.html.markdown +++ b/website/docs/cdktf/python/r/oam_link.html.markdown @@ -118,6 +118,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `link_configuration` - (Optional) Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`link_configuration` Block](#link_configuration-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -183,4 +184,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Link us % terraform import aws_oam_link.example arn:aws:oam:us-west-2:123456789012:link/link-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/oam_sink.html.markdown b/website/docs/cdktf/python/r/oam_sink.html.markdown index 89f0a06302e0..79cc3cbf9e51 100644 --- a/website/docs/cdktf/python/r/oam_sink.html.markdown +++ b/website/docs/cdktf/python/r/oam_sink.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -87,4 +88,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink us % terraform import aws_oam_sink.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/oam_sink_policy.html.markdown b/website/docs/cdktf/python/r/oam_sink_policy.html.markdown index 153777b7b564..9432b90255ef 100644 --- a/website/docs/cdktf/python/r/oam_sink_policy.html.markdown +++ b/website/docs/cdktf/python/r/oam_sink_policy.html.markdown @@ -60,8 +60,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sink_identifier` - (Required) ARN of the sink to attach this policy to. * `policy` - (Required) JSON policy to use. If you are updating an existing policy, the entire existing policy is replaced by what you specify here. @@ -104,4 +105,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink Po % terraform import aws_oam_sink_policy.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/odb_cloud_autonomous_vm_cluster.html.markdown b/website/docs/cdktf/python/r/odb_cloud_autonomous_vm_cluster.html.markdown new file mode 100644 index 000000000000..1bdb35a7f88d --- /dev/null +++ b/website/docs/cdktf/python/r/odb_cloud_autonomous_vm_cluster.html.markdown @@ -0,0 +1,201 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_cluster" +page_title: "AWS: aws_odb_cloud_autonomous_vm_cluster" +description: |- + Terraform resource managing cloud autonomous vm cluster in AWS for Oracle Database@AWS. +--- + + + +# Resource: aws_odb_cloud_autonomous_vm_cluster + +Terraform resource managing cloud autonomous vm cluster in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_cloud_autonomous_vm_cluster import OdbCloudAutonomousVmCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbCloudAutonomousVmCluster(self, "avmc_with_all_params", + autonomous_data_storage_size_in_tbs=5, + cloud_exadata_infrastructure_id="", + cpu_core_count_per_node=40, + db_servers=["", ""], + description="my first avmc", + display_name="Ofake_my avmc", + license_model="LICENSE_INCLUDED", + maintenance_window=[OdbCloudAutonomousVmClusterMaintenanceWindow( + days_of_week=[OdbCloudAutonomousVmClusterMaintenanceWindowDaysOfWeek( + name="MONDAY" + ), OdbCloudAutonomousVmClusterMaintenanceWindowDaysOfWeek( + name="TUESDAY" + ) + ], + hours_of_day=[4, 16], + lead_time_in_weeks=3, + months=[OdbCloudAutonomousVmClusterMaintenanceWindowMonths( + name="FEBRUARY" + ), OdbCloudAutonomousVmClusterMaintenanceWindowMonths( + name="MAY" + ), OdbCloudAutonomousVmClusterMaintenanceWindowMonths( + name="AUGUST" + ), OdbCloudAutonomousVmClusterMaintenanceWindowMonths( + name="NOVEMBER" + ) + ], + preference="CUSTOM_PREFERENCE", + weeks_of_month=[2, 4] + ) + ], + memory_per_oracle_compute_unit_in_gbs=2, + odb_network_id="", + scan_listener_port_non_tls=1024, + scan_listener_port_tls=8561, + tags={ + "env": "dev" + }, + time_zone="UTC", + total_container_databases=1 + ) + OdbCloudAutonomousVmCluster(self, "avmc_with_minimum_parameters", + autonomous_data_storage_size_in_tbs=5, + cloud_exadata_infrastructure_id="", + cpu_core_count_per_node=40, + db_servers=[""], + display_name="Ofake-avmc-my_avmc", + license_model="LICENSE_INCLUDED", + maintenance_window=[OdbCloudAutonomousVmClusterMaintenanceWindow( + preference="NO_PREFERENCE" + ) + ], + memory_per_oracle_compute_unit_in_gbs=2, + odb_network_id="", + scan_listener_port_non_tls=1024, + scan_listener_port_tls=8561, + total_container_databases=1 + ) +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) Exadata infrastructure id. Changing this will force terraform to create new resource. +* `autonomous_data_storage_size_in_tbs` - (Required) The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. Changing this will force terraform to create new resource. +* `cpu_core_count_per_node` - (Required) The number of CPU cores enabled per node in the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `db_servers` - (Required) The database servers in the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `display_name` - (Required) The display name of the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `memory_per_oracle_compute_unit_in_gbs` - (Required) The amount of memory allocated per Oracle Compute Unit, in GB. Changing this will force terraform to create new resource. +* `odb_network_id` - (Required) The unique identifier of the ODB network associated with this Autonomous VM Cluster. Changing this will force terraform to create new resource. +* `scan_listener_port_non_tls` - (Required) The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. Changing this will force terraform to create new resource. +* `scan_listener_port_tls` - (Required) The SCAN listener port for TLS (TCP) protocol. The default is 2484. Changing this will force terraform to create new resource. +* `total_container_databases` - (Required) The total number of Autonomous Container Databases that can be created with the allocated local storage. Changing this will force terraform to create new resource. +* `maintenance_window` - (Required) The maintenance window of the Autonomous VM cluster. Changing this will force terraform to create new resource. + +The following arguments are optional: + +* `description` - (Optional) The description of the Autonomous VM cluster. +* `is_mtls_enabled_vm_cluster` - (Optional) Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `license_model` - (Optional) The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. Changing this will force terraform to create new resource. +* `time_zone` - (Optional) The time zone of the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### maintenance_window + +* `preference` - (Required) The preference for the maintenance window scheduling. Changing this will force terraform to create new resource. +* `days_of_week` - (Optional) The days of the week when maintenance can be performed. Changing this will force terraform to create new resource. +* `hours_of_day` - (Optional) The hours of the day when maintenance can be performed. Changing this will force terraform to create new resource. +* `lead_time_in_weeks` - (Optional) The lead time in weeks before the maintenance window. Changing this will force terraform to create new resource. +* `months` - (Optional) The months when maintenance can be performed. Changing this will force terraform to create new resource. +* `weeks_of_month` - (Optional) Indicates whether to skip release updates during maintenance. Changing this will force terraform to create new resource. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `autonomous_data_storage_percentage` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `available_autonomous_data_storage_size_in_tbs` - The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB. +* `available_container_databases` - The number of Autonomous CDBs that you can create with the currently available storage. +* `available_cpus` - The number of CPU cores available for allocation to Autonomous Databases. +* `compute_model` - The compute model of the Autonomous VM cluster: ECPU or OCPU. +* `cpu_core_count` - The total number of CPU cores in the Autonomous VM cluster. +* `cpu_percentage` - The percentage of total CPU cores currently in use in the Autonomous VM cluster. +* `created_at` - The date and time when the Autonomous VM cluster was created. +* `data_storage_size_in_gbs` - The total data storage allocated to the Autonomous VM cluster, in GB. +* `data_storage_size_in_tbs` - The total data storage allocated to the Autonomous VM cluster, in TB. +* `odb_node_storage_size_in_gbs` - The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB). +* `domain` - The domain name of the Autonomous VM cluster. +* `exadata_storage_in_tbs_lowest_scaled_value` - The minimum value to which you can scale down the Exadata storage, in TB. +* `hostname` - The hostname of the Autonomous VM cluster. +* `license_model` - The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. +* `max_acds_lowest_scaled_value` - The minimum value to which you can scale down the maximum number of Autonomous CDBs. +* `memory_size_in_gbs` - The total amount of memory allocated to the Autonomous VM cluster, in gigabytes(GB). +* `node_count` - The number of database server nodes in the Autonomous VM cluster. +* `non_provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can't be provisioned because of resource constraints. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `oci_url` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `percent_progress` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster. +* `provisioned_autonomous_container_databases` - The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster. +* `provisioned_cpus` - The number of CPUs provisioned in the Autonomous VM cluster. +* `reclaimable_cpus` - The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases. +* `reserved_cpus` - The number of CPU cores reserved for system operations and redundancy. +* `shape` - The shape of the Exadata infrastructure for the Autonomous VM cluster. +* `status` - The status of the Autonomous VM cluster. Possible values include CREATING, AVAILABLE, UPDATING, DELETING, DELETED, FAILED. +* `status_reason` - Additional information about the current status of the Autonomous VM cluster. +* `time_zone` - The time zone of the Autonomous VM cluster. +* `time_ords_certificate_expires` - The expiration date and time of the ORDS certificate. +* `time_database_ssl_certificate_expires` - The expiration date and time of the database SSL certificate. +* `tags_all` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_cloud_autonomous_vm_cluster import OdbCloudAutonomousVmCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbCloudAutonomousVmCluster.generate_config_for_import(self, "example", "example") +``` + +Using `terraform import`, import cloud autonomous vm cluster `id`. For example: + +```console +% terraform import aws_odb_cloud_autonomous_vm_cluster.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/odb_cloud_exadata_infrastructure.html.markdown b/website/docs/cdktf/python/r/odb_cloud_exadata_infrastructure.html.markdown new file mode 100644 index 000000000000..978757df67ad --- /dev/null +++ b/website/docs/cdktf/python/r/odb_cloud_exadata_infrastructure.html.markdown @@ -0,0 +1,172 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "aws" +page_title: "AWS: aws_odb_cloud_exadata_infrastructure" +description: |- + Terraform resource for managing exadata infrastructure resource for Oracle Database@AWS. +--- + + + +# Resource: aws_odb_cloud_exadata_infrastructure + +Terraform resource for managing exadata infrastructure resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_cloud_exadata_infrastructure import OdbCloudExadataInfrastructure +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbCloudExadataInfrastructure(self, "example", + availability_zone_id="use1-az6", + compute_count=2, + customer_contacts_to_send_to_oci=[OdbCloudExadataInfrastructureCustomerContactsToSendToOci( + email="abc@example.com" + ), OdbCloudExadataInfrastructureCustomerContactsToSendToOci( + email="def@example.com" + ) + ], + database_server_type="X11M", + display_name="my-exa-infra", + maintenance_window=[OdbCloudExadataInfrastructureMaintenanceWindow( + custom_action_timeout_in_mins=16, + days_of_week=[OdbCloudExadataInfrastructureMaintenanceWindowDaysOfWeek( + name="MONDAY" + ), OdbCloudExadataInfrastructureMaintenanceWindowDaysOfWeek( + name="TUESDAY" + ) + ], + hours_of_day=[11, 16], + is_custom_action_timeout_enabled=True, + lead_time_in_weeks=3, + months=[OdbCloudExadataInfrastructureMaintenanceWindowMonths( + name="FEBRUARY" + ), OdbCloudExadataInfrastructureMaintenanceWindowMonths( + name="MAY" + ), OdbCloudExadataInfrastructureMaintenanceWindowMonths( + name="AUGUST" + ), OdbCloudExadataInfrastructureMaintenanceWindowMonths( + name="NOVEMBER" + ) + ], + patching_mode="ROLLING", + preference="CUSTOM_PREFERENCE", + weeks_of_month=[2, 4] + ) + ], + shape="Exadata.X11M", + storage_count=3, + storage_server_type="X11M-HC", + tags={ + "env": "dev" + } + ) +``` + +## Argument Reference + +The following arguments are required: + +* `display_name` - (Required) The user-friendly name for the Exadata infrastructure. Changing this will force terraform to create a new resource. +* `shape` - (Required) The model name of the Exadata infrastructure. Changing this will force terraform to create new resource. +* `storage_count` - (Required) The number of storage servers that are activated for the Exadata infrastructure. Changing this will force terraform to create new resource. +* `compute_count` - (Required) The number of compute instances that the Exadata infrastructure is located. Changing this will force terraform to create new resource. +* `availability_zone_id` - (Required) The AZ ID of the AZ where the Exadata infrastructure is located. Changing this will force terraform to create new resource. + +The following arguments are optional: + +* `customer_contacts_to_send_to_oci` - (Optional) The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure. Changing this will force terraform to create new resource. +* `availability_zone`: (Optional) The name of the Availability Zone (AZ) where the Exadata infrastructure is located. Changing this will force terraform to create new resource. +* `database_server_type` - (Optional) The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. This is a mandatory parameter for Exadata.X11M system shape. Changing this will force terraform to create new resource. +* `storage_server_type` - (Optional) The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. This is a mandatory parameter for Exadata.X11M system shape. Changing this will force terraform to create new resource. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### maintenance_window + +* `custom_action_timeout_in_mins` - (Required) The custom action timeout in minutes for the maintenance window. +* `is_custom_action_timeout_enabled` - (Required) ndicates whether custom action timeout is enabled for the maintenance window. +* `patching_mode` - (Required) The patching mode for the maintenance window. +* `preference` - (Required) The preference for the maintenance window scheduling. +* `days_of_week` - (Optional) The days of the week when maintenance can be performed. +* `hours_of_day` - (Optional) The hours of the day when maintenance can be performed. +* `lead_time_in_weeks` - (Optional) The lead time in weeks before the maintenance window. +* `months` - (Optional) The months when maintenance can be performed. +* `weeks_of_month` - (Optional) The weeks of the month when maintenance can be performed. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier for the pipeline. +* `arn` - Amazon Resource Name (ARN) of the pipeline. +* `activated_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `additional_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `available_storage_size_in_gbs` - The amount of available storage, in gigabytes (GB), for the Exadata infrastructure. +* `cpu_count` - The total number of CPU cores that are allocated to the Exadata infrastructure. +* `data_storage_size_in_tbs` - The size of the Exadata infrastructure's data disk group, in terabytes (TB). +* `db_node_storage_size_in_gbs` - The size of the Exadata infrastructure's local node storage, in gigabytes (GB). +* `db_server_version` - The software version of the database servers (dom0) in the Exadata infrastructure. +* `last_maintenance_run_id` - The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure. +* `max_cpu_count` - The total number of CPU cores available on the Exadata infrastructure. +* `max_data_storage_in_tbs` - The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure. +* `max_db_node_storage_size_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure. +* `max_memory_in_gbs` - The total amount of memory in gigabytes (GB) available on the Exadata infrastructure. +* `monthly_db_server_version` - The monthly software version of the database servers in the Exadata infrastructure. +* `monthly_storage_server_version` - The monthly software version of the storage servers installed on the Exadata infrastructure. +* `next_maintenance_run_id` - The OCID of the next maintenance run for the Exadata infrastructure. +* `ocid` - The OCID of the Exadata infrastructure. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the Exadata infrastructure. +* `percent_progress` - The amount of progress made on the current operation on the Exadata infrastructure, expressed as a percentage. +* `status` - The current status of the Exadata infrastructure. +* `status_reason` - Additional information about the status of the Exadata infrastructure. +* `storage_server_version` - The software version of the storage servers on the Exadata infrastructure. +* `total_storage_size_in_gbs` - The total amount of storage, in gigabytes (GB), on the Exadata infrastructure. +* `created_at` - The time when the Exadata infrastructure was created. +* `compute_model` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_cloud_exadata_infrastructure import OdbCloudExadataInfrastructure +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbCloudExadataInfrastructure.generate_config_for_import(self, "example", "example") +``` + +Using `terraform import`, import Exadata Infrastructure using the `id`. For example: + +```console +% terraform import aws_odb_cloud_exadata_infrastructure.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/odb_cloud_vm_cluster.html.markdown b/website/docs/cdktf/python/r/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..5dd4f284526b --- /dev/null +++ b/website/docs/cdktf/python/r/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,175 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform resource for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + + + +# Resource: aws_odb_cloud_vm_cluster + +Terraform data source for Exadata Infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_cloud_vm_cluster import OdbCloudVmCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbCloudVmCluster(self, "with_all_parameters", + cloud_exadata_infrastructure_id="exa_gjrmtxl4qk", + cluster_name="julia-13", + cpu_core_count=6, + data_collection_options=[OdbCloudVmClusterDataCollectionOptions( + is_diagnostics_events_enabled=True, + is_health_monitoring_enabled=True, + is_incident_logs_enabled=True + ) + ], + data_storage_size_in_tbs=20, + db_node_storage_size_in_gbs=120, + db_servers=["my-dbserver-1", "my-db-server-2"], + display_name="my-vmc", + gi_version="23.0.0.0", + hostname_prefix="apollo12", + is_local_backup_enabled=True, + is_sparse_diskgroup_enabled=True, + license_model="LICENSE_INCLUDED", + memory_size_in_gbs=60, + odb_network_id="odbnet_3l9st3litg", + scan_listener_port_tcp=1521, + ssh_public_keys=["my-ssh-key"], + tags={ + "env": "dev" + }, + timezone="UTC" + ) + OdbCloudVmCluster(self, "with_minimum_parameter", + cloud_exadata_infrastructure_id="exa_gjrmtxl4qk", + cpu_core_count=6, + data_collection_options=[OdbCloudVmClusterDataCollectionOptions( + is_diagnostics_events_enabled=False, + is_health_monitoring_enabled=False, + is_incident_logs_enabled=False + ) + ], + data_storage_size_in_tbs=20, + db_node_storage_size_in_gbs=120, + db_servers=["db-server-1", "db-server-2"], + display_name="my-exa-infra", + gi_version="23.0.0.0", + hostname_prefix="apollo12", + is_local_backup_enabled=True, + is_sparse_diskgroup_enabled=True, + license_model="LICENSE_INCLUDED", + memory_size_in_gbs=60, + odb_network_id="odbnet_3l9st3litg", + ssh_public_keys=["public-ssh-key"] + ) +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the Exadata infrastructure for this VM cluster. Changing this will create a new resource. +* `cpu_core_count` - (Required) The number of CPU cores to enable on the VM cluster. Changing this will create a new resource. +* `db_servers` - (Required) The list of database servers for the VM cluster. Changing this will create a new resource. +* `display_name` - (Required) A user-friendly name for the VM cluster. Changing this will create a new resource. +* `gi_version` - (Required) A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure. Example: 19.0.0.0 Changing this will create a new resource. +* `hostname_prefix` - (Required) The host name prefix for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. Changing this will create a new resource. +* `odb_network_id` - (Required) The unique identifier of the ODB network for the VM cluster. Changing this will create a new resource. +* `ssh_public_keys` - (Required) The public key portion of one or more key pairs used for SSH access to the VM cluster. Changing this will create a new resource. +* `data_collection_options` - (Required) The set of preferences for the various diagnostic collection options for the VM cluster. + +The following arguments are optional: + +* `cluster_name` - (Optional) The name of the Grid Infrastructure (GI) cluster. Changing this will create a new resource. +* `data_storage_size_in_tbs` - (Optional) The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster. Changing this will create a new resource. +* `db_node_storage_size_in_gbs` - (Optional) The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `is_local_backup_enabled` - (Optional) Specifies whether to enable database backups to local Exadata storage for the VM cluster. Changing this will create a new resource. +* `is_sparse_diskgroup_enabled` - (Optional) Specifies whether to create a sparse disk group for the VM cluster. Changing this will create a new resource. +* `license_model` - (Optional) The Oracle license model to apply to the VM cluster. Default: LICENSE_INCLUDED. Changing this will create a new resource. +* `memory_size_in_gbs` - (Optional) The amount of memory, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `scan_listener_port_tcp` - (Optional) The port number for TCP connections to the single client access name (SCAN) listener. Valid values: 1024–8999, except 2484, 6100, 6200, 7060, 7070, 7085, and 7879. Default: 1521. Changing this will create a new resource. +* `timezone` - (Optional) The configured time zone of the VM cluster. Changing this will create a new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `disk_redundancy` - The type of redundancy for the VM cluster: NORMAL (2-way) or HIGH (3-way). +* `AttrDomain` - The domain name associated with the VM cluster. +* `hostname_prefix_computed` - The host name for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. This member is required. Changing this will create a new resource. +* `iorm_config_cache` - The Exadata IORM (I/O Resource Manager) configuration cache details for the VM cluster. +* `last_update_history_entry_id` - The OCID of the most recent maintenance update history entry. +* `listener_port` - The listener port number configured on the VM cluster. +* `node_count` - The total number of nodes in the VM cluster. +* `ocid` - The OCID (Oracle Cloud Identifier) of the VM cluster. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with the VM cluster. +* `oci_url` - The HTTPS link to the VM cluster resource in OCI. +* `percent_progress` - The percentage of progress made on the current operation for the VM cluster. +* `scan_dns_name` - The fully qualified domain name (FQDN) for the SCAN IP addresses associated with the VM cluster. +* `scan_dns_record_id` - The OCID of the DNS record for the SCAN IPs linked to the VM cluster. +* `scan_ip_ids` - The list of OCIDs for SCAN IP addresses associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure running the VM cluster. +* `status` - The current lifecycle status of the VM cluster. +* `status_reason` - Additional information regarding the current status of the VM cluster. +* `storage_size_in_gbs` - The local node storage allocated to the VM cluster, in gigabytes (GB). +* `system_version` - The operating system version of the image chosen for the VM cluster. +* `vip_ids` - The virtual IP (VIP) addresses assigned to the VM cluster. CRS assigns one VIP per node for failover support. +* `created_at` - The timestamp when the VM cluster was created. +* `compute_model` - The compute model used when the instance is created or cloned — either ECPU or OCPU. ECPU is a virtualized compute unit; OCPU is a physical processor core with hyper-threading. +* `tags_all` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_cloud_vm_cluster import OdbCloudVmCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbCloudVmCluster.generate_config_for_import(self, "example", "example") +``` + +Using `terraform import`, import cloud vm cluster using the `id`. For example: + +```console +% terraform import aws_odb_cloud_vm_cluster.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/odb_network.html.markdown b/website/docs/cdktf/python/r/odb_network.html.markdown new file mode 100644 index 000000000000..349f407b5295 --- /dev/null +++ b/website/docs/cdktf/python/r/odb_network.html.markdown @@ -0,0 +1,116 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network" +page_title: "AWS: aws_odb_network" +description: |- + Terraform resource for managing odb network of an Oracle Database@AWS. +--- + + + +# Resource: aws_odb_network + +Terraform resource for managing odb Network resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_network import OdbNetwork +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbNetwork(self, "example", + availability_zone_id="use1-az6", + backup_subnet_cidr="10.2.1.0/24", + client_subnet_cidr="10.2.0.0/24", + display_name="odb-my-net", + s3_access="DISABLED", + tags={ + "env": "dev" + }, + zero_etl_access="DISABLED" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `display_name` - (Required) The user-friendly name for the odb network. Changing this will force terraform to create a new resource. +* `availability_zone_id` - (Required) The AZ ID of the AZ where the ODB network is located. Changing this will force terraform to create new resource. +* `client_subnet_cidr` - (Required) The CIDR notation for the network resource. Changing this will force terraform to create new resource. +* `backup_subnet_cidr` - (Required) The CIDR range of the backup subnet for the ODB network. Changing this will force terraform to create new resource. +* `s3_access` - (Required) Specifies the configuration for Amazon S3 access from the ODB network. +* `zero_etl_access` - (Required) Specifies the configuration for Zero-ETL access from the ODB network. + +The following arguments are optional: + +* `custom_domain_name` - (Optional) The name of the custom domain that the network is located. Custom_domain_name and default_dns_prefix both can't be given. Changing this will force terraform to create new resource. +* `availability_zone` - (Optional) The name of the Availability Zone (AZ) where the odb network is located. Changing this will force terraform to create new resource. Make sure availability_zone maps correctly with availability_zone_id. +* `s3_policy_document` - (Optional) Specifies the endpoint policy for Amazon S3 access from the ODB network. +* `default_dns_prefix` - (Optional) The default DNS prefix for the network resource. Changing this will force terraform to create new resource. Changing this will force terraform to create new resource. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `oci_dns_forwarding_configs` - The number of storage servers requested for the Exadata infrastructure. +* `peered_cidrs` - The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation. +* `oci_network_anchor_id` - The unique identifier of the OCI network anchor for the ODB network. +* `oci_network_anchor_url` -The URL of the OCI network anchor for the ODB network. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the ODB network. +* `oci_vcn_id` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `oci_vcn_url` - The URL of the OCI VCN for the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the ODB network, expressed as a percentage. +* `managed_services` - The name of the OCI resource anchor for the Exadata infrastructure. +* `status` - The status of the network resource. +* `status_reason` - Additional information about the current status of the ODB network. +* `created_at` - The date and time when the ODB network was created. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_network import OdbNetwork +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbNetwork.generate_config_for_import(self, "example", "example") +``` + +Using `terraform import`, import Odb Network using the `id`. For example: + +```console +% terraform import aws_odb_network.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/odb_network_peering_connection.html.markdown b/website/docs/cdktf/python/r/odb_network_peering_connection.html.markdown new file mode 100644 index 000000000000..ede72d98f7e2 --- /dev/null +++ b/website/docs/cdktf/python/r/odb_network_peering_connection.html.markdown @@ -0,0 +1,102 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connection" +page_title: "AWS: aws_odb_network_peering_connection" +description: |- + Terraform resource for managing oracle database network peering resource in AWS. +--- + + + +# Resource: aws_odb_network_peering_connection + +Terraform resource for managing oracle database network peering resource in AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_network_peering_connection import OdbNetworkPeeringConnection +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbNetworkPeeringConnection(self, "example", + display_name="example", + odb_network_id="my-odb-network-id", + peer_network_id="my-vpc-id", + tags={ + "env": "dev" + } + ) +``` + +## Argument Reference + +The following arguments are required: + +* `odb_network_id` - (Required) The unique identifier of the ODB network that initiates the peering connection. A sample ID is `odbpcx-abcdefgh12345678`. Changing this will force Terraform to create a new resource. +* `peer_network_id` - (Required) The unique identifier of the ODB peering connection. Changing this will force Terraform to create a new resource. +* `display_name` - (Required) Display name of the ODB network peering connection. Changing this will force Terraform to create a new resource. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `status` - Status of the ODB network peering connection. +* `status_reason` - The reason for the current status of the ODB peering connection. +* `odb_network_arn` - ARN of the ODB network peering connection. +* `peer_network_arn` - ARN of the peer network peering connection. +* `odb_peering_connection_type` - Type of the ODB peering connection. +* `created_at` - Created time of the ODB network peering connection. +* `percent_progress` - Progress of the ODB network peering connection. +* `tags_all` - A map of tags assigned to the resource, including inherited tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.odb_network_peering_connection import OdbNetworkPeeringConnection +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OdbNetworkPeeringConnection.generate_config_for_import(self, "example", "example") +``` + +Using `terraform import`, import odb network peering using the `id`. For example: + +```console +% terraform import aws_odb_network_peering_connection.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_authorize_vpc_endpoint_access.html.markdown b/website/docs/cdktf/python/r/opensearch_authorize_vpc_endpoint_access.html.markdown index b023b46d6db4..b8cdb5fcd97e 100644 --- a/website/docs/cdktf/python/r/opensearch_authorize_vpc_endpoint_access.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_authorize_vpc_endpoint_access.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account` - (Required) AWS account ID to grant access to. * `domain_name` - (Required) Name of OpenSearch Service domain to provide access to. @@ -56,7 +57,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Authorize Vpc Endpoint Access using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Authorize Vpc Endpoint Access using the `domain_name`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -73,10 +74,10 @@ class MyConvertedCode(TerraformStack): OpensearchAuthorizeVpcEndpointAccess.generate_config_for_import(self, "example", "authorize_vpc_endpoint_access-id-12345678") ``` -Using `terraform import`, import OpenSearch Authorize Vpc Endpoint Access using the `example_id_arg`. For example: +Using `terraform import`, import OpenSearch Authorize Vpc Endpoint Access using the `domain_name`. For example: ```console % terraform import aws_opensearch_authorize_vpc_endpoint_access.example authorize_vpc_endpoint_access-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_domain.html.markdown b/website/docs/cdktf/python/r/opensearch_domain.html.markdown index 62467c2bf4c5..2fa176a18ac4 100644 --- a/website/docs/cdktf/python/r/opensearch_domain.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_domain.html.markdown @@ -100,7 +100,7 @@ class MyConvertedCode(TerraformStack): type="*" ) ], - resources=["arn:aws:es:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*" + resources=["arn:aws:es:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*" ] ) ] @@ -232,7 +232,7 @@ class MyConvertedCode(TerraformStack): type="*" ) ], - resources=["arn:aws:es:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*" + resources=["arn:aws:es:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:domain/${" + domain.value + "}/*" ] ) ] @@ -384,6 +384,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_policies` - (Optional) IAM policy document specifying the access policies for the domain. * `advanced_options` - (Optional) Key-value string pairs to specify advanced configuration options. Note that the values for these configuration options must be strings (wrapped in quotes) or they may be wrong and cause a perpetual diff, causing Terraform to want to recreate your OpenSearch domain on every apply. * `advanced_security_options` - (Optional) Configuration block for [fine-grained access control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html). Detailed below. @@ -560,7 +561,6 @@ This resource exports the following attributes in addition to the arguments abov * `endpoint_v2` - V2 domain endpoint that works with both IPv4 and IPv6 addresses, used to submit index, search, and data upload requests. * `dashboard_endpoint` - Domain-specific endpoint for Dashboard without https scheme. * `dashboard_endpoint_v2` - V2 domain endpoint for Dashboard that works with both IPv4 and IPv6 addresses, without https scheme. -* `kibana_endpoint` - (**Deprecated**) Domain-specific endpoint for kibana without https scheme. Use the `dashboard_endpoint` attribute instead. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `vpc_options.0.availability_zones` - If the domain was created inside a VPC, the names of the availability zones the configured `subnet_ids` were created inside. * `vpc_options.0.vpc_id` - If the domain was created inside a VPC, the ID of the VPC. @@ -598,4 +598,4 @@ Using `terraform import`, import OpenSearch domains using the `domain_name`. For % terraform import aws_opensearch_domain.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_domain_policy.html.markdown b/website/docs/cdktf/python/r/opensearch_domain_policy.html.markdown index d04774b90f04..3c1a585ae676 100644 --- a/website/docs/cdktf/python/r/opensearch_domain_policy.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_domain_policy.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_policies` - (Optional) IAM policy document specifying the access policies for the domain * `domain_name` - (Required) Name of the domain. @@ -77,4 +78,29 @@ This resource exports no additional attributes. * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Domain Policy using `domain_name` prefixed with `esd-policy-`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.opensearch_domain_policy import OpensearchDomainPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OpensearchDomainPolicy.generate_config_for_import(self, "example", "esd-policy-tf-test") +``` + +Using `terraform import`, import OpenSearch Domain Policy using `domain_name` prefixed with `esd-policy-`. For example: + +```console +% terraform import aws_opensearch_domain_policy.example esd-policy-tf-test +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_domain_saml_options.html.markdown b/website/docs/cdktf/python/r/opensearch_domain_saml_options.html.markdown index 93214f58b809..e72084abb4c1 100644 --- a/website/docs/cdktf/python/r/opensearch_domain_saml_options.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_domain_saml_options.html.markdown @@ -65,6 +65,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `saml_options` - (Optional) SAML authentication options for an AWS OpenSearch Domain. ### saml_options @@ -120,4 +121,4 @@ Using `terraform import`, import OpenSearch domains using the `domain_name`. For % terraform import aws_opensearch_domain_saml_options.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_inbound_connection_accepter.html.markdown b/website/docs/cdktf/python/r/opensearch_inbound_connection_accepter.html.markdown index 1a707d150580..a962446bd529 100644 --- a/website/docs/cdktf/python/r/opensearch_inbound_connection_accepter.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_inbound_connection_accepter.html.markdown @@ -40,12 +40,12 @@ class MyConvertedCode(TerraformStack): local_domain_info=OpensearchOutboundConnectionLocalDomainInfo( domain_name=local_domain.domain_name, owner_id=Token.as_string(current.account_id), - region=Token.as_string(data_aws_region_current.name) + region=Token.as_string(data_aws_region_current.region) ), remote_domain_info=OpensearchOutboundConnectionRemoteDomainInfo( domain_name=remote_domain.domain_name, owner_id=Token.as_string(current.account_id), - region=Token.as_string(data_aws_region_current.name) + region=Token.as_string(data_aws_region_current.region) ) ) aws_opensearch_inbound_connection_accepter_foo = @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_id` - (Required, Forces new resource) Specifies the ID of the connection to accept. ## Attribute Reference @@ -101,4 +102,4 @@ Using `terraform import`, import AWS Opensearch Inbound Connection Accepters usi % terraform import aws_opensearch_inbound_connection_accepter.foo connection-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_outbound_connection.html.markdown b/website/docs/cdktf/python/r/opensearch_outbound_connection.html.markdown index 719b4814e384..ade86f7f69b1 100644 --- a/website/docs/cdktf/python/r/opensearch_outbound_connection.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_outbound_connection.html.markdown @@ -40,12 +40,12 @@ class MyConvertedCode(TerraformStack): local_domain_info=OpensearchOutboundConnectionLocalDomainInfo( domain_name=local_domain.domain_name, owner_id=Token.as_string(current.account_id), - region=Token.as_string(data_aws_region_current.name) + region=Token.as_string(data_aws_region_current.region) ), remote_domain_info=OpensearchOutboundConnectionRemoteDomainInfo( domain_name=remote_domain.domain_name, owner_id=Token.as_string(current.account_id), - region=Token.as_string(data_aws_region_current.name) + region=Token.as_string(data_aws_region_current.region) ) ) ``` @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_alias` - (Required, Forces new resource) Specifies the connection alias that will be used by the customer for this connection. * `connection_mode` - (Required, Forces new resource) Specifies the connection mode. Accepted values are `DIRECT` or `VPC_ENDPOINT`. * `accept_connection` - (Optional, Forces new resource) Accepts the connection. @@ -124,4 +125,4 @@ Using `terraform import`, import AWS Opensearch Outbound Connections using the O % terraform import aws_opensearch_outbound_connection.foo connection-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_package.html.markdown b/website/docs/cdktf/python/r/opensearch_package.html.markdown index a6b15fc99394..273434052d9b 100644 --- a/website/docs/cdktf/python/r/opensearch_package.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_package.html.markdown @@ -55,8 +55,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `engine_version` - (Optional, Forces new resources) Engine version that the package is compatible with. This argument is required and only valid when `package_type` is `ZIP-PLUGIN`. Format: `OpenSearch_X.Y` or `Elasticsearch_X.Y`, where `X` and `Y` are the major and minor version numbers, respectively. * `package_name` - (Required, Forces new resource) Unique name for the package. -* `package_type` - (Required, Forces new resource) The type of package. +* `package_type` - (Required, Forces new resource) The type of package. Valid values are `TXT-DICTIONARY`, `ZIP-PLUGIN`, `PACKAGE-LICENSE` and `PACKAGE-CONFIG`. * `package_source` - (Required, Forces new resource) Configuration block for the package source options. * `package_description` - (Optional, Forces new resource) Description of the package. @@ -97,4 +99,4 @@ Using `terraform import`, import AWS Opensearch Packages using the Package ID. F % terraform import aws_opensearch_package.example package-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_package_association.html.markdown b/website/docs/cdktf/python/r/opensearch_package_association.html.markdown index 36de19e85cf8..dee936418ce0 100644 --- a/website/docs/cdktf/python/r/opensearch_package_association.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_package_association.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `package_id` - (Required, Forces new resource) Internal ID of the package to associate with a domain. * `domain_name` - (Required, Forces new resource) Name of the domain to associate the package with. @@ -74,4 +75,4 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `10m`) * `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_vpc_endpoint.html.markdown b/website/docs/cdktf/python/r/opensearch_vpc_endpoint.html.markdown index 4fd2cba9cab4..8cb629b18df4 100644 --- a/website/docs/cdktf/python/r/opensearch_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_vpc_endpoint.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_arn` - (Required, Forces new resource) Specifies the Amazon Resource Name (ARN) of the domain to create the endpoint for * `vpc_options` - (Required) Options to specify the subnets and security groups for the endpoint. @@ -92,4 +93,4 @@ Using `terraform import`, import OpenSearch VPC endpoint connections using the ` % terraform import aws_opensearch_vpc_endpoint_connection.example endpoint-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearchserverless_access_policy.html.markdown b/website/docs/cdktf/python/r/opensearchserverless_access_policy.html.markdown index 8d63b2599708..f76ffac6b45c 100644 --- a/website/docs/cdktf/python/r/opensearchserverless_access_policy.html.markdown +++ b/website/docs/cdktf/python/r/opensearchserverless_access_policy.html.markdown @@ -137,6 +137,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the policy. Typically used to store information about the permissions defined in the policy. ## Attribute Reference @@ -170,4 +171,4 @@ Using `terraform import`, import OpenSearchServerless Access Policy using the `n % terraform import aws_opensearchserverless_access_policy.example example/data ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearchserverless_collection.html.markdown b/website/docs/cdktf/python/r/opensearchserverless_collection.html.markdown index 2739d3bfca7d..c4de1ba9d9a2 100644 --- a/website/docs/cdktf/python/r/opensearchserverless_collection.html.markdown +++ b/website/docs/cdktf/python/r/opensearchserverless_collection.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the collection. * `standby_replicas` - (Optional) Indicates whether standby replicas should be used for a collection. One of `ENABLED` or `DISABLED`. Defaults to `ENABLED`. * `tags` - (Optional) A map of tags to assign to the collection. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -110,4 +111,4 @@ Using `terraform import`, import OpenSearchServerless Collection using the `id`. % terraform import aws_opensearchserverless_collection.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearchserverless_lifecycle_policy.html.markdown b/website/docs/cdktf/python/r/opensearchserverless_lifecycle_policy.html.markdown index 03031d9b2a7b..6f9c7c7db752 100644 --- a/website/docs/cdktf/python/r/opensearchserverless_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/python/r/opensearchserverless_lifecycle_policy.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the policy. ## Attribute Reference @@ -90,4 +91,4 @@ Using `terraform import`, import OpenSearch Serverless Lifecycle Policy using th % terraform import aws_opensearchserverless_lifecycle_policy.example example/retention ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearchserverless_security_config.html.markdown b/website/docs/cdktf/python/r/opensearchserverless_security_config.html.markdown index 198ceecbd995..29af069fb485 100644 --- a/website/docs/cdktf/python/r/opensearchserverless_security_config.html.markdown +++ b/website/docs/cdktf/python/r/opensearchserverless_security_config.html.markdown @@ -30,9 +30,9 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) OpensearchserverlessSecurityConfig(self, "example", name="example", - saml_options=[{ - "metadata": Token.as_string(Fn.file("${path.module}/idp-metadata.xml")) - } + saml_options=[OpensearchserverlessSecurityConfigSamlOptions( + metadata=Token.as_string(Fn.file("${path.module}/idp-metadata.xml")) + ) ], type="saml" ) @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the security configuration. ### saml_options @@ -88,4 +89,4 @@ Using `terraform import`, import OpenSearchServerless Access Policy using the `n % terraform import aws_opensearchserverless_security_config.example saml/123456789012/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearchserverless_security_policy.html.markdown b/website/docs/cdktf/python/r/opensearchserverless_security_policy.html.markdown index fa249a08e948..c7036ddfed73 100644 --- a/website/docs/cdktf/python/r/opensearchserverless_security_policy.html.markdown +++ b/website/docs/cdktf/python/r/opensearchserverless_security_policy.html.markdown @@ -234,6 +234,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the policy. Typically used to store information about the permissions defined in the policy. ## Attribute Reference @@ -267,4 +268,4 @@ Using `terraform import`, import OpenSearchServerless Security Policy using the % terraform import aws_opensearchserverless_security_policy.example example/encryption ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearchserverless_vpc_endpoint.html.markdown b/website/docs/cdktf/python/r/opensearchserverless_vpc_endpoint.html.markdown index ee381da9caa8..5998c1f069fb 100644 --- a/website/docs/cdktf/python/r/opensearchserverless_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/opensearchserverless_vpc_endpoint.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_group_ids` - (Optional) One or more security groups that define the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint. Up to 5 security groups can be provided. ## Attribute Reference @@ -86,4 +87,4 @@ Using `terraform import`, import OpenSearchServerless Vpc Endpointa using the `i % terraform import aws_opensearchserverless_vpc_endpoint.example vpce-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/organizations_account.html.markdown b/website/docs/cdktf/python/r/organizations_account.html.markdown index 410a10aff0ec..972346da3f0e 100644 --- a/website/docs/cdktf/python/r/organizations_account.html.markdown +++ b/website/docs/cdktf/python/r/organizations_account.html.markdown @@ -72,6 +72,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_account.example + identity = { + id = "111111111111" + } +} + +resource "aws_organizations_account" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the AWS Organizations account. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the AWS member account using the `account_id`. For example: ```python @@ -86,19 +111,19 @@ from imports.aws.organizations_account import OrganizationsAccount class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - OrganizationsAccount.generate_config_for_import(self, "myAccount", "111111111111") + OrganizationsAccount.generate_config_for_import(self, "example", "111111111111") ``` Using `terraform import`, import the AWS member account using the `account_id`. For example: ```console -% terraform import aws_organizations_account.my_account 111111111111 +% terraform import aws_organizations_account.example 111111111111 ``` To import accounts that have set iam_user_access_to_billing, use the following: ```console -% terraform import aws_organizations_account.my_account 111111111111_ALLOW +% terraform import aws_organizations_account.example 111111111111_ALLOW ``` Certain resource arguments, like `role_name`, do not have an Organizations API method for reading the information after account creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference. For example: @@ -126,4 +151,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/organizations_delegated_administrator.html.markdown b/website/docs/cdktf/python/r/organizations_delegated_administrator.html.markdown index 5cd26e8a29b1..dc47e748931a 100644 --- a/website/docs/cdktf/python/r/organizations_delegated_administrator.html.markdown +++ b/website/docs/cdktf/python/r/organizations_delegated_administrator.html.markdown @@ -54,6 +54,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_delegated_administrator.example + identity = { + service_principal = "config.amazonaws.com" + delegated_account_id = "123456789012" + } +} + +resource "aws_organizations_delegated_administrator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `service_principal` (String) Service principal for the AWS service. +* `delegated_account_id` (String) Account ID to be designated as a delegated administrator. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_organizations_delegated_administrator` using the account ID and its service principal. For example: ```python @@ -77,4 +104,4 @@ Using `terraform import`, import `aws_organizations_delegated_administrator` usi % terraform import aws_organizations_delegated_administrator.example 123456789012/config.amazonaws.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/organizations_organization.html.markdown b/website/docs/cdktf/python/r/organizations_organization.html.markdown index 6aa2a149bb95..2ae340b47cae 100644 --- a/website/docs/cdktf/python/r/organizations_organization.html.markdown +++ b/website/docs/cdktf/python/r/organizations_organization.html.markdown @@ -77,6 +77,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_organization.example + identity = { + id = "o-1234567" + } +} + +resource "aws_organizations_organization" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the AWS Organizations organization. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the AWS organization using the `id`. For example: ```python @@ -91,13 +116,13 @@ from imports.aws.organizations_organization import OrganizationsOrganization class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - OrganizationsOrganization.generate_config_for_import(self, "myOrg", "o-1234567") + OrganizationsOrganization.generate_config_for_import(self, "example", "o-1234567") ``` Using `terraform import`, import the AWS organization using the `id`. For example: ```console -% terraform import aws_organizations_organization.my_org o-1234567 +% terraform import aws_organizations_organization.example o-1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/organizations_organizational_unit.html.markdown b/website/docs/cdktf/python/r/organizations_organizational_unit.html.markdown index 63ef7dd8781c..4dfe33a0bfae 100644 --- a/website/docs/cdktf/python/r/organizations_organizational_unit.html.markdown +++ b/website/docs/cdktf/python/r/organizations_organizational_unit.html.markdown @@ -56,6 +56,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_organizational_unit.example + identity = { + id = "ou-1234567" + } +} + +resource "aws_organizations_organizational_unit" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the organizational unit. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Organizations Organizational Units using the `id`. For example: ```python @@ -79,4 +104,4 @@ Using `terraform import`, import AWS Organizations Organizational Units using th % terraform import aws_organizations_organizational_unit.example ou-1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/organizations_policy_attachment.html.markdown b/website/docs/cdktf/python/r/organizations_policy_attachment.html.markdown index d837d1d71cb1..90ea8de4d011 100644 --- a/website/docs/cdktf/python/r/organizations_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/organizations_policy_attachment.html.markdown @@ -89,6 +89,33 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_policy_attachment.example + identity = { + policy_id = "p-12345678" + target_id = "123456789012" + } +} + +resource "aws_organizations_policy_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `policy_id` (String) Organizations policy ID. +* `target_id` (String) Organizations target ID (account, OU, or root). + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_organizations_policy_attachment` using the target ID and policy ID. For example: With an account target: @@ -105,7 +132,7 @@ from imports.aws.organizations_policy_attachment import OrganizationsPolicyAttac class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - OrganizationsPolicyAttachment.generate_config_for_import(self, "account", "123456789012:p-12345678") + OrganizationsPolicyAttachment.generate_config_for_import(self, "example", "123456789012:p-12345678") ``` Using `terraform import`, import `aws_organizations_policy_attachment` using the target ID and policy ID. For example: @@ -113,7 +140,7 @@ Using `terraform import`, import `aws_organizations_policy_attachment` using the With an account target: ```console -% terraform import aws_organizations_policy_attachment.account 123456789012:p-12345678 +% terraform import aws_organizations_policy_attachment.example 123456789012:p-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/osis_pipeline.html.markdown b/website/docs/cdktf/python/r/osis_pipeline.html.markdown index 72846bdeaaa3..73129822c045 100644 --- a/website/docs/cdktf/python/r/osis_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/osis_pipeline.html.markdown @@ -49,7 +49,7 @@ class MyConvertedCode(TerraformStack): aws_osis_pipeline_example = OsisPipeline(self, "example_2", max_units=1, min_units=1, - pipeline_configuration_body="version: \"2\"\nexample-pipeline:\n source:\n http:\n path: \"/example\"\n sink:\n - s3:\n aws:\n sts_role_arn: \"${" + example.arn + "}\"\n region: \"${" + current.name + "}\"\n bucket: \"example\"\n threshold:\n event_collect_timeout: \"60s\"\n codec:\n ndjson:\n\n", + pipeline_configuration_body="version: \"2\"\nexample-pipeline:\n source:\n http:\n path: \"/example\"\n sink:\n - s3:\n aws:\n sts_role_arn: \"${" + example.arn + "}\"\n region: \"${" + current.region + "}\"\n bucket: \"example\"\n threshold:\n event_collect_timeout: \"60s\"\n codec:\n ndjson:\n\n", pipeline_name="example" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. @@ -89,6 +89,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `buffer_options` - (Optional) Key-value pairs to configure persistent buffering for the pipeline. See [`buffer_options`](#buffer_options) below. * `encryption_at_rest_options` - (Optional) Key-value pairs to configure encryption for data that is written to a persistent buffer. See [`encryption_at_rest_options`](#encryption_at_rest_options) below. * `log_publishing_options` - (Optional) Key-value pairs to configure log publishing. See [`log_publishing_options`](#log_publishing_options) below. @@ -159,4 +160,4 @@ Using `terraform import`, import OpenSearch Ingestion Pipeline using the `id`. F % terraform import aws_osis_pipeline.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/paymentcryptography_key.html.markdown b/website/docs/cdktf/python/r/paymentcryptography_key.html.markdown index 106b8d9acbe2..8739cc0be060 100644 --- a/website/docs/cdktf/python/r/paymentcryptography_key.html.markdown +++ b/website/docs/cdktf/python/r/paymentcryptography_key.html.markdown @@ -29,18 +29,18 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) PaymentcryptographyKey(self, "test", exportable=True, - key_attributes=[{ - "key_algorithm": "TDES_3KEY", - "key_class": "SYMMETRIC_KEY", - "key_modes_of_use": [{ - "decrypt": True, - "encrypt": True, - "unwrap": True, - "wrap": True - } + key_attributes=[PaymentcryptographyKeyKeyAttributes( + key_algorithm="TDES_3KEY", + key_class="SYMMETRIC_KEY", + key_modes_of_use=[PaymentcryptographyKeyKeyAttributesKeyModesOfUse( + decrypt=True, + encrypt=True, + unwrap=True, + wrap=True + ) ], - "key_usage": "TR31_P0_PIN_ENCRYPTION_KEY" - } + key_usage="TR31_P0_PIN_ENCRYPTION_KEY" + ) ] ) ``` @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether to enable the key. * `key_check_value_algorithm` - (Optional) Algorithm that AWS Payment Cryptography uses to calculate the key check value (KCV). * `tags` - (Optional) Map of tags assigned to the WorkSpaces Connection Alias. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -71,6 +72,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `decrypt` - (Optional) Whether an AWS Payment Cryptography key can be used to decrypt data. * `derive_key` - (Optional) Whether an AWS Payment Cryptography key can be used to derive new keys. * `encrypt` - (Optional) Whether an AWS Payment Cryptography key can be used to encrypt data. @@ -101,6 +103,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_paymentcryptography_key.example + identity = { + "arn" = "arn:aws:payment-cryptography:us-east-1:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_paymentcryptography_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Payment Cryptography key. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Payment Cryptography Control Plane Key using the `arn:aws:payment-cryptography:us-east-1:123456789012:key/qtbojf64yshyvyzf`. For example: ```python @@ -124,4 +147,4 @@ Using `terraform import`, import Payment Cryptography Control Plane Key using th % terraform import aws_paymentcryptography_key.example arn:aws:payment-cryptography:us-east-1:123456789012:key/qtbojf64yshyvyzf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/paymentcryptography_key_alias.html.markdown b/website/docs/cdktf/python/r/paymentcryptography_key_alias.html.markdown index 98f5b4cb8e3e..a4383ea8a40d 100644 --- a/website/docs/cdktf/python/r/paymentcryptography_key_alias.html.markdown +++ b/website/docs/cdktf/python/r/paymentcryptography_key_alias.html.markdown @@ -30,18 +30,18 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) test = PaymentcryptographyKey(self, "test", exportable=True, - key_attributes=[{ - "key_algorithm": "TDES_3KEY", - "key_class": "SYMMETRIC_KEY", - "key_modes_of_use": [{ - "decrypt": True, - "encrypt": True, - "unwrap": True, - "wrap": True - } + key_attributes=[PaymentcryptographyKeyKeyAttributes( + key_algorithm="TDES_3KEY", + key_class="SYMMETRIC_KEY", + key_modes_of_use=[PaymentcryptographyKeyKeyAttributesKeyModesOfUse( + decrypt=True, + encrypt=True, + unwrap=True, + wrap=True + ) ], - "key_usage": "TR31_P0_PIN_ENCRYPTION_KEY" - } + key_usage="TR31_P0_PIN_ENCRYPTION_KEY" + ) ] ) aws_paymentcryptography_key_alias_test = PaymentcryptographyKeyAlias(self, "test_1", @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `key_arn` - (Optional) ARN of the key. ## Attribute Reference @@ -91,4 +92,4 @@ Using `terraform import`, import Payment Cryptography Control Plane Key Alias us % terraform import aws_paymentcryptography_key_alias.example alias/4681482429376900170 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_adm_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_adm_channel.html.markdown index 8190d1489860..2eaed1930b75 100644 --- a/website/docs/cdktf/python/r/pinpoint_adm_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_adm_channel.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `client_id` - (Required) Client ID (part of OAuth Credentials) obtained via Amazon Developer Account. * `client_secret` - (Required) Client Secret (part of OAuth Credentials) obtained via Amazon Developer Account. @@ -77,4 +78,4 @@ Using `terraform import`, import Pinpoint ADM Channel using the `application-id` % terraform import aws_pinpoint_adm_channel.channel application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_apns_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_apns_channel.html.markdown index aa83950a652e..dc4ce9fd49fc 100644 --- a/website/docs/cdktf/python/r/pinpoint_apns_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_apns_channel.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `default_authentication_method` - (Optional) The default authentication method used for APNs. @@ -92,4 +93,4 @@ Using `terraform import`, import Pinpoint APNs Channel using the `application-id % terraform import aws_pinpoint_apns_channel.apns application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_apns_sandbox_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_apns_sandbox_channel.html.markdown index 9fe79ab19c2f..a3ee9b6c5500 100644 --- a/website/docs/cdktf/python/r/pinpoint_apns_sandbox_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_apns_sandbox_channel.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `default_authentication_method` - (Optional) The default authentication method used for APNs Sandbox. @@ -92,4 +93,4 @@ Using `terraform import`, import Pinpoint APNs Sandbox Channel using the `applic % terraform import aws_pinpoint_apns_sandbox_channel.apns_sandbox application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_apns_voip_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_apns_voip_channel.html.markdown index 6b0550a5465e..92cf41bfcdcf 100644 --- a/website/docs/cdktf/python/r/pinpoint_apns_voip_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_apns_voip_channel.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `default_authentication_method` - (Optional) The default authentication method used for APNs. @@ -92,4 +93,4 @@ Using `terraform import`, import Pinpoint APNs VoIP Channel using the `applicati % terraform import aws_pinpoint_apns_voip_channel.apns_voip application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_apns_voip_sandbox_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_apns_voip_sandbox_channel.html.markdown index 238d15274cfd..f2c3ccafc0b3 100644 --- a/website/docs/cdktf/python/r/pinpoint_apns_voip_sandbox_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_apns_voip_sandbox_channel.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `default_authentication_method` - (Optional) The default authentication method used for APNs. @@ -92,4 +93,4 @@ Using `terraform import`, import Pinpoint APNs VoIP Sandbox Channel using the `a % terraform import aws_pinpoint_apns_voip_sandbox_channel.apns_voip_sandbox application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_app.html.markdown b/website/docs/cdktf/python/r/pinpoint_app.html.markdown index b6dffcfc02ae..4216adfda428 100644 --- a/website/docs/cdktf/python/r/pinpoint_app.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_app.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The application name. By default generated by Terraform * `name_prefix` - (Optional) The name of the Pinpoint application. Conflicts with `name` * `campaign_hook` - (Optional) Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign @@ -100,4 +101,4 @@ Using `terraform import`, import Pinpoint App using the `application-id`. For ex % terraform import aws_pinpoint_app.name application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_baidu_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_baidu_channel.html.markdown index 20f3dc7b4757..c5f60b3a496c 100644 --- a/website/docs/cdktf/python/r/pinpoint_baidu_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_baidu_channel.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `enabled` - (Optional) Specifies whether to enable the channel. Defaults to `true`. * `api_key` - (Required) Platform credential API key from Baidu. @@ -76,4 +77,4 @@ Using `terraform import`, import Pinpoint Baidu Channel using the `application-i % terraform import aws_pinpoint_baidu_channel.channel application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_email_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_email_channel.html.markdown index 9339a950267d..94aad11c656f 100644 --- a/website/docs/cdktf/python/r/pinpoint_email_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_email_channel.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `configuration_set` - (Optional) The ARN of the Amazon SES configuration set that you want to apply to messages that you send through the channel. @@ -116,4 +117,4 @@ Using `terraform import`, import Pinpoint Email Channel using the `application-i % terraform import aws_pinpoint_email_channel.email application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_email_template.markdown b/website/docs/cdktf/python/r/pinpoint_email_template.markdown index f93943f01a2f..0a8b74bc392e 100644 --- a/website/docs/cdktf/python/r/pinpoint_email_template.markdown +++ b/website/docs/cdktf/python/r/pinpoint_email_template.markdown @@ -43,8 +43,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `template_name` - (Required) name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive. * `email_template` - (Required) Specifies the content and settings for a message template that can be used in messages that are sent through the email channel. See [Email Template](#email-template) @@ -95,4 +96,4 @@ Using `terraform import`, import Pinpoint Email Template using the `template_nam % terraform import aws_pinpoint_email_template.reset template_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_event_stream.html.markdown b/website/docs/cdktf/python/r/pinpoint_event_stream.html.markdown index 9e9bff797f9b..1d02d40a7ab5 100644 --- a/website/docs/cdktf/python/r/pinpoint_event_stream.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_event_stream.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `destination_stream_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery stream to which you want to publish events. * `role_arn` - (Required) The IAM role that authorizes Amazon Pinpoint to publish events to the stream in your account. @@ -110,4 +111,4 @@ Using `terraform import`, import Pinpoint Event Stream using the `application-id % terraform import aws_pinpoint_event_stream.stream application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_gcm_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_gcm_channel.html.markdown index 52afba56e800..7ea6e9936593 100644 --- a/website/docs/cdktf/python/r/pinpoint_gcm_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_gcm_channel.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) The application ID. * `api_key` - (Required) Platform credential API key from Google. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. @@ -75,4 +76,4 @@ Using `terraform import`, import Pinpoint GCM Channel using the `application-id` % terraform import aws_pinpoint_gcm_channel.gcm application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpoint_sms_channel.html.markdown b/website/docs/cdktf/python/r/pinpoint_sms_channel.html.markdown index 9f846be149ef..0699feac387d 100644 --- a/website/docs/cdktf/python/r/pinpoint_sms_channel.html.markdown +++ b/website/docs/cdktf/python/r/pinpoint_sms_channel.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ID of the application. * `enabled` - (Optional) Whether the channel is enabled or disabled. By default, it is set to `true`. * `sender_id` - (Optional) Identifier of the sender for your messages. @@ -74,4 +75,4 @@ Using `terraform import`, import the Pinpoint SMS Channel using the `application % terraform import aws_pinpoint_sms_channel.sms application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpointsmsvoicev2_configuration_set.html.markdown b/website/docs/cdktf/python/r/pinpointsmsvoicev2_configuration_set.html.markdown index c30e15475cd4..49f1c37da50a 100644 --- a/website/docs/cdktf/python/r/pinpointsmsvoicev2_configuration_set.html.markdown +++ b/website/docs/cdktf/python/r/pinpointsmsvoicev2_configuration_set.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the configuration set. * `default_sender_id` - (Optional) The default sender ID to use for this configuration set. * `default_message_type` - (Optional) The default message type. Must either be "TRANSACTIONAL" or "PROMOTIONAL" @@ -74,4 +75,4 @@ Using `terraform import`, import configuration sets using the `name`. For exampl % terraform import aws_pinpointsmsvoicev2_configuration_set.example example-configuration-set ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpointsmsvoicev2_opt_out_list.html.markdown b/website/docs/cdktf/python/r/pinpointsmsvoicev2_opt_out_list.html.markdown index 321dd9d65118..b91136b6124e 100644 --- a/website/docs/cdktf/python/r/pinpointsmsvoicev2_opt_out_list.html.markdown +++ b/website/docs/cdktf/python/r/pinpointsmsvoicev2_opt_out_list.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the opt-out list. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -70,4 +71,4 @@ Using `terraform import`, import opt-out lists using the `name`. For example: % terraform import aws_pinpointsmsvoicev2_opt_out_list.example example-opt-out-list ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pinpointsmsvoicev2_phone_number.html.markdown b/website/docs/cdktf/python/r/pinpointsmsvoicev2_phone_number.html.markdown index 681b5994fbf4..e74a15a2f32a 100644 --- a/website/docs/cdktf/python/r/pinpointsmsvoicev2_phone_number.html.markdown +++ b/website/docs/cdktf/python/r/pinpointsmsvoicev2_phone_number.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deletion_protection_enabled` - (Optional) By default this is set to `false`. When set to true the phone number can’t be deleted. * `iso_country_code` - (Required) The two-character code, in ISO 3166-1 alpha-2 format, for the country or region. * `message_type` - (Required) The type of message. Valid values are `TRANSACTIONAL` for messages that are critical or time-sensitive and `PROMOTIONAL` for messages that aren’t critical or time-sensitive. @@ -46,7 +47,7 @@ This resource supports the following arguments: * `opt_out_list_name` - (Optional) The name of the opt-out list to associate with the phone number. * `registration_id` - (Optional) Use this field to attach your phone number for an external registration process. * `self_managed_opt_outs_enabled` - (Optional) When set to `false` an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the opt-out list. When set to true you’re responsible for responding to HELP and STOP requests. You’re also responsible for tracking and honoring opt-out request. -* `two_way_channel_arn` - (Optional) The Amazon Resource Name (ARN) of the two way channel. +* `two_way_channel_arn` - (Optional) Configuration for two-way SMS. Specify an ARN to receive incoming SMS messages, or `connect.[region].amazonaws.com` (with `[region]` replaced by the AWS Region of the Amazon Connect instance) to set Amazon Connect as the inbound destination. * `two_way_channel_enabled` - (Optional) By default this is set to `false`. When set to `true` you can receive incoming text messages from your end recipients. * `two_way_channel_role` - (Optional) IAM Role ARN for a service to assume, to be able to post inbound SMS messages. @@ -85,4 +86,4 @@ Using `terraform import`, import phone numbers using the `id`. For example: % terraform import aws_pinpointsmsvoicev2_phone_number.example phone-abcdef0123456789abcdef0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/pipes_pipe.html.markdown b/website/docs/cdktf/python/r/pipes_pipe.html.markdown index 6154d2055528..edd03471e50b 100644 --- a/website/docs/cdktf/python/r/pipes_pipe.html.markdown +++ b/website/docs/cdktf/python/r/pipes_pipe.html.markdown @@ -250,6 +250,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of the pipe. At most 512 characters. * `desired_state` - (Optional) The state the pipe should be in. One of: `RUNNING`, `STOPPED`. * `enrichment` - (Optional) Enrichment resource of the pipe (typically an ARN). Read more about enrichment in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-enrichment). @@ -341,7 +342,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `maximum_retry_attempts` - (Optional) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source. Maximum value of 10,000. * `on_partial_batch_item_failure` - (Optional) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch. Valid values: AUTOMATIC_BISECT. * `parallelization_factor` - (Optional)The number of batches to process concurrently from each shard. The default value is 1. Maximum value of 10. -* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. +* `starting_position` - (Required) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. ##### source_parameters.dynamodb_stream_parameters.dead_letter_config Configuration Block @@ -659,4 +660,4 @@ Using `terraform import`, import pipes using the `name`. For example: % terraform import aws_pipes_pipe.example my-pipe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/placement_group.html.markdown b/website/docs/cdktf/python/r/placement_group.html.markdown index c7667cf11714..341d077dfccc 100644 --- a/website/docs/cdktf/python/r/placement_group.html.markdown +++ b/website/docs/cdktf/python/r/placement_group.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the placement group. * `partition_count` - (Optional) The number of partitions to create in the placement group. Can only be specified when the `strategy` is set to @@ -80,4 +81,4 @@ Using `terraform import`, import placement groups using the `name`. For example: % terraform import aws_placement_group.prod_pg production-placement-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/prometheus_alert_manager_definition.html.markdown b/website/docs/cdktf/python/r/prometheus_alert_manager_definition.html.markdown index 3d549dcb218a..c3664b58e966 100644 --- a/website/docs/cdktf/python/r/prometheus_alert_manager_definition.html.markdown +++ b/website/docs/cdktf/python/r/prometheus_alert_manager_definition.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workspace_id` - (Required) ID of the prometheus workspace the alert manager definition should be linked to * `definition` - (Required) the alert manager definition that you want to be applied. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alert-manager.html). @@ -73,4 +74,4 @@ Using `terraform import`, import the prometheus alert manager definition using t % terraform import aws_prometheus_alert_manager_definition.demo ws-C6DCB907-F2D7-4D96-957B-66691F865D8B ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/prometheus_query_logging_configuration.html.markdown b/website/docs/cdktf/python/r/prometheus_query_logging_configuration.html.markdown new file mode 100644 index 000000000000..6dee0decf14c --- /dev/null +++ b/website/docs/cdktf/python/r/prometheus_query_logging_configuration.html.markdown @@ -0,0 +1,113 @@ +--- +subcategory: "AMP (Managed Prometheus)" +layout: "aws" +page_title: "AWS: aws_prometheus_query_logging_configuration" +description: |- + Manages an Amazon Managed Service for Prometheus (AMP) Query Logging Configuration. +--- + + + +# Resource: aws_prometheus_query_logging_configuration + +Manages an Amazon Managed Service for Prometheus (AMP) Query Logging Configuration. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_group import CloudwatchLogGroup +from imports.aws.prometheus_query_logging_configuration import PrometheusQueryLoggingConfiguration +from imports.aws.prometheus_workspace import PrometheusWorkspace +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CloudwatchLogGroup(self, "example", + name="/aws/prometheus/query-logs/example" + ) + aws_prometheus_workspace_example = PrometheusWorkspace(self, "example_1", + alias="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_prometheus_workspace_example.override_logical_id("example") + aws_prometheus_query_logging_configuration_example = + PrometheusQueryLoggingConfiguration(self, "example_2", + destination=[PrometheusQueryLoggingConfigurationDestination( + cloudwatch_logs=[PrometheusQueryLoggingConfigurationDestinationCloudwatchLogs( + log_group_arn="${" + example.arn + "}:*" + ) + ], + filters=[PrometheusQueryLoggingConfigurationDestinationFilters( + qsp_threshold=1000 + ) + ] + ) + ], + workspace_id=Token.as_string(aws_prometheus_workspace_example.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_prometheus_query_logging_configuration_example.override_logical_id("example") +``` + +## Argument Reference + +This resource supports the following arguments: + +* `destination` - (Required) Configuration block for the logging destinations. See [`destinations`](#destinations). +* `workspace_id` - (Required) The ID of the AMP workspace for which to configure query logging. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `destination` + +* `cloudwatch_logs` - (Required) Configuration block for CloudWatch Logs destination. See [`cloudwatch_logs`](#cloudwatch_logs). +* `filters` - (Required) A list of filter configurations that specify which logs should be sent to the destination. See [`filters`](#filters). + +#### `cloudwatch_logs` + +* `log_group_arn` - (Required) The ARN of the CloudWatch log group to which query logs will be sent. + +#### `filters` + +* `qsp_threshold` - (Required) The Query Samples Processed (QSP) threshold above which queries will be logged. Queries processing more samples than this threshold will be captured in logs. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the Query Logging Configuration using the workspace ID. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.prometheus_query_logging_configuration import PrometheusQueryLoggingConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + PrometheusQueryLoggingConfiguration.generate_config_for_import(self, "example", "ws-12345678-90ab-cdef-1234-567890abcdef") +``` + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `5m`) +- `update` - (Default `5m`) +- `delete` - (Default `5m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/prometheus_rule_group_namespace.html.markdown b/website/docs/cdktf/python/r/prometheus_rule_group_namespace.html.markdown index e8369598f0a8..df2ec79e03b0 100644 --- a/website/docs/cdktf/python/r/prometheus_rule_group_namespace.html.markdown +++ b/website/docs/cdktf/python/r/prometheus_rule_group_namespace.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data` - (Required) the rule group namespace data that you want to be applied. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-Ruler.html). * `name` - (Required) The name of the rule group namespace. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -79,4 +80,4 @@ Using `terraform import`, import the prometheus rule group namespace using the a % terraform import aws_prometheus_rule_group_namespace.demo arn:aws:aps:us-west-2:123456789012:rulegroupsnamespace/IDstring/namespace_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/prometheus_scraper.html.markdown b/website/docs/cdktf/python/r/prometheus_scraper.html.markdown index f17e92fd2920..1b475a239037 100644 --- a/website/docs/cdktf/python/r/prometheus_scraper.html.markdown +++ b/website/docs/cdktf/python/r/prometheus_scraper.html.markdown @@ -209,14 +209,16 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination` - (Required) Configuration block for the managed scraper to send metrics to. See [`destination`](#destination). * `scrape_configuration` - (Required) The configuration file to use in the new scraper. For more information, see [Scraper configuration](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-configuration). * `source` - (Required) Configuration block to specify where the managed scraper will collect metrics from. See [`source`](#source). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias` - (Optional) a name to associate with the managed scraper. This is for your use, and does not need to be unique. * `role_configuration` - (Optional) Configuration block to enable writing to an Amazon Managed Service for Prometheus workspace in a different account. See [`role_configuration`](#role_configuration) below. @@ -287,4 +289,4 @@ For example: % terraform import aws_prometheus_scraper.example s-0123abc-0000-0123-a000-000000000000 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/prometheus_workspace.html.markdown b/website/docs/cdktf/python/r/prometheus_workspace.html.markdown index 43529f9aa0c5..fa31c9703383 100644 --- a/website/docs/cdktf/python/r/prometheus_workspace.html.markdown +++ b/website/docs/cdktf/python/r/prometheus_workspace.html.markdown @@ -92,6 +92,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias` - (Optional) The alias of the prometheus workspace. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-create-workspace.html). * `kms_key_arn` - (Optional) The ARN for the KMS encryption key. If this argument is not provided, then the AWS owned encryption key will be used to encrypt the data in the workspace. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html) * `logging_configuration` - (Optional) Logging configuration for the workspace. See [Logging Configuration](#logging-configuration) below for details. @@ -137,4 +138,4 @@ Using `terraform import`, import AMP Workspaces using the identifier. For exampl % terraform import aws_prometheus_workspace.demo ws-C6DCB907-F2D7-4D96-957B-66691F865D8B ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/prometheus_workspace_configuration.html.markdown b/website/docs/cdktf/python/r/prometheus_workspace_configuration.html.markdown index 03b6bddcd64b..44dd8a0464bf 100644 --- a/website/docs/cdktf/python/r/prometheus_workspace_configuration.html.markdown +++ b/website/docs/cdktf/python/r/prometheus_workspace_configuration.html.markdown @@ -23,33 +23,31 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import PrometheusWorkspaceConfiguration from imports.aws.prometheus_workspace import PrometheusWorkspace +from imports.aws.prometheus_workspace_configuration import PrometheusWorkspaceConfiguration class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) example = PrometheusWorkspace(self, "example") aws_prometheus_workspace_configuration_example = PrometheusWorkspaceConfiguration(self, "example_1", - limits_per_label_set=[{ - "label_set": [{ + limits_per_label_set=[PrometheusWorkspaceConfigurationLimitsPerLabelSet( + label_set={ "env": "dev" - } - ], - "limits": [{ - "max_series": 100000 - } + }, + limits=[PrometheusWorkspaceConfigurationLimitsPerLabelSetLimits( + max_series=100000 + ) ] - }, { - "label_set": [{ + ), PrometheusWorkspaceConfigurationLimitsPerLabelSet( + label_set={ "env": "prod" - } - ], - "limits": [{ - "max_series": 400000 - } + }, + limits=[PrometheusWorkspaceConfigurationLimitsPerLabelSetLimits( + max_series=400000 + ) ] - } + ) ], retention_period_in_days=60, workspace_id=example.id @@ -72,21 +70,21 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import PrometheusWorkspaceConfiguration from imports.aws.prometheus_workspace import PrometheusWorkspace +from imports.aws.prometheus_workspace_configuration import PrometheusWorkspaceConfiguration class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) example = PrometheusWorkspace(self, "example") aws_prometheus_workspace_configuration_example = PrometheusWorkspaceConfiguration(self, "example_1", - limits_per_label_set=[{ - "label_set": [{}], - "limits": [{ - "max_series": 50000 - } + limits_per_label_set=[PrometheusWorkspaceConfigurationLimitsPerLabelSet( + label_set={}, + limits=[PrometheusWorkspaceConfigurationLimitsPerLabelSetLimits( + max_series=50000 + ) ] - } + ) ], workspace_id=example.id ) @@ -102,15 +100,15 @@ The following arguments are required: The following arguments are optional: -* `retention_period_in_days` - (Optional) Number of days to retain metric data in the workspace. * `limits_per_label_set` - (Optional) Configuration block for setting limits on metrics with specific label sets. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `retention_period_in_days` - (Optional) Number of days to retain metric data in the workspace. ### `limits_per_label_set` The `limits_per_label_set` configuration block supports the following arguments: * `label_set` - (Required) Map of label key-value pairs that identify the metrics to which the limits apply. An empty map represents the default bucket for metrics that don't match any other label set. - * `limits` - (Required) Configuration block for the limits to apply to the specified label set. Detailed below. #### `limits` @@ -142,7 +140,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import PrometheusWorkspaceConfiguration +from imports.aws.prometheus_workspace_configuration import PrometheusWorkspaceConfiguration class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -155,4 +153,4 @@ Using `terraform import`, import AMP (Managed Prometheus) Workspace Configuratio % terraform import aws_prometheus_workspace_configuration.example ws-12345678-abcd-1234-abcd-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/proxy_protocol_policy.html.markdown b/website/docs/cdktf/python/r/proxy_protocol_policy.html.markdown index 0154d645efbf..f83c8707c75d 100644 --- a/website/docs/cdktf/python/r/proxy_protocol_policy.html.markdown +++ b/website/docs/cdktf/python/r/proxy_protocol_policy.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `load_balancer` - (Required) The load balancer to which the policy should be attached. * `instance_ports` - (Required) List of instance ports to which the policy @@ -65,4 +66,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the policy. * `load_balancer` - The load balancer to which the policy is attached. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/qbusiness_application.html.markdown b/website/docs/cdktf/python/r/qbusiness_application.html.markdown index 10f7ad931355..a4b871820907 100644 --- a/website/docs/cdktf/python/r/qbusiness_application.html.markdown +++ b/website/docs/cdktf/python/r/qbusiness_application.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Amazon Q application. * `encryption_configuration` - (Optional) Information about encryption configuration. See [`encryption_configuration`](#encryption_configuration) below. @@ -102,4 +103,4 @@ Using `terraform import`, import a Q Business Application using the `id`. For ex % terraform import aws_qbusiness_application.example id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/qldb_ledger.html.markdown b/website/docs/cdktf/python/r/qldb_ledger.html.markdown index 57cb44dca132..1c4741adc655 100644 --- a/website/docs/cdktf/python/r/qldb_ledger.html.markdown +++ b/website/docs/cdktf/python/r/qldb_ledger.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deletion_protection` - (Optional) The deletion protection for the QLDB Ledger instance. By default it is `true`. To delete this resource via Terraform, this value must be configured to `false` and applied first before attempting deletion. * `kms_key` - (Optional) The key in AWS Key Management Service (AWS KMS) to use for encryption of data at rest in the ledger. For more information, see the [AWS documentation](https://docs.aws.amazon.com/qldb/latest/developerguide/encryption-at-rest.html). Valid values are `"AWS_OWNED_KMS_KEY"` to use an AWS KMS key that is owned and managed by AWS on your behalf, or the ARN of a valid symmetric customer managed KMS key. * `name` - (Optional) The friendly name for the QLDB Ledger instance. By default generated by Terraform. @@ -84,4 +85,4 @@ Using `terraform import`, import QLDB Ledgers using the `name`. For example: % terraform import aws_qldb_ledger.sample-ledger sample-ledger ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/qldb_stream.html.markdown b/website/docs/cdktf/python/r/qldb_stream.html.markdown index beac25f14243..2c1788cfebea 100644 --- a/website/docs/cdktf/python/r/qldb_stream.html.markdown +++ b/website/docs/cdktf/python/r/qldb_stream.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclusive_end_time` - (Optional) The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it. It must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: `"2019-06-13T21:36:34Z"`. * `inclusive_start_time` - (Required) The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: `"2019-06-13T21:36:34Z"`. This cannot be in the future and must be before `exclusive_end_time`. If you provide a value that is before the ledger's `CreationDateTime`, QLDB effectively defaults it to the ledger's `CreationDateTime`. * `kinesis_configuration` - (Required) The configuration settings of the Kinesis Data Streams destination for your stream request. Documented below. @@ -75,4 +76,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `8m`) - `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_account_settings.html.markdown b/website/docs/cdktf/python/r/quicksight_account_settings.html.markdown index e55a31958322..96b64ff74f5c 100644 --- a/website/docs/cdktf/python/r/quicksight_account_settings.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_account_settings.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import QuicksightAccountSettings +from imports.aws.quicksight_account_settings import QuicksightAccountSettings from imports.aws.quicksight_account_subscription import QuicksightAccountSubscription class MyConvertedCode(TerraformStack): def __init__(self, scope, name): @@ -45,14 +45,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `default_namespace` - (Optional) The default namespace for this Amazon Web Services account. Currently, the default is `default`. * `termination_protection_enabled` - (Optional) A boolean value that determines whether or not an Amazon QuickSight account can be deleted. If `true`, it does not allow the account to be deleted and results in an error message if a user tries to make a DeleteAccountSubscription request. If `false`, it will allow the account to be deleted. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `aws_account_id` - The ID for the AWS account that contains the settings. +This resource exports no additional attributes. ## Import @@ -66,7 +65,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import QuicksightAccountSettings +from imports.aws.quicksight_account_settings import QuicksightAccountSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -79,4 +78,4 @@ Using `terraform import`, import QuickSight Account Settings using the AWS accou % terraform import aws_quicksight_account_settings.example "012345678901" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown b/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown index 6c09d2d9b4c7..3dae325d6e27 100644 --- a/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown @@ -50,7 +50,7 @@ The following arguments are optional: * `active_directory_name` - (Optional) Name of your Active Directory. This field is required if `ACTIVE_DIRECTORY` is the selected authentication method of the new Amazon QuickSight account. * `admin_group` - (Optional) Admin group associated with your Active Directory or IAM Identity Center account. This field is required if `ACTIVE_DIRECTORY` or `IAM_IDENTITY_CENTER` is the selected authentication method of the new Amazon QuickSight account. * `author_group` - (Optional) Author group associated with your Active Directory or IAM Identity Center account. -* `aws_account_id` - (Optional) AWS account ID hosting the QuickSight account. Default to provider account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `contact_number` - (Optional) A 10-digit phone number for the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `directory_id` - (Optional) Active Directory ID that is associated with your Amazon QuickSight account. * `email_address` - (Optional) Email address of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. @@ -59,6 +59,7 @@ The following arguments are optional: * `last_name` - (Optional) Last name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `reader_group` - (Optional) Reader group associated with your Active Directory or IAM Identity Center account. * `realm` - (Optional) Realm of the Active Directory that is associated with your Amazon QuickSight account. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -75,6 +76,29 @@ This resource exports the following attributes in addition to the arguments abov ## Import -You cannot import this resource. +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight Account Subscription using `aws_account_id`. For example: - \ No newline at end of file +~> Due to the absence of required arguments in the [`DescribeAccountSettings`](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeAccountSettings.html) API response, importing an existing account subscription will result in a planned replacement on the subsequent `apply` operation. Until the Describe API response in extended to include all configurable arguments, an [`ignore_changes` lifecycle argument](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes) can be used to suppress differences on arguments not read into state. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_account_subscription import QuicksightAccountSubscription +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightAccountSubscription.generate_config_for_import(self, "example", "012345678901") +``` + +Using `terraform import`, import a QuickSight Account Subscription using `aws_account_id`. For example: + +```console +% terraform import aws_quicksight_account_subscription.example "012345678901" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_analysis.html.markdown b/website/docs/cdktf/python/r/quicksight_analysis.html.markdown index acef620b66bb..0e88108c0247 100644 --- a/website/docs/cdktf/python/r/quicksight_analysis.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_analysis.html.markdown @@ -123,11 +123,12 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `definition` - (Optional) A detailed analysis definition. Only one of `definition` or `source_entity` should be configured. See [definition](#definition). * `parameters` - (Optional) The parameters for the creation of the analysis, which you want to use to override the default settings. An analysis can have any type of parameters, and some parameters might accept multiple values. See [parameters](#parameters). * `permissions` - (Optional) A set of resource permissions on the analysis. Maximum of 64 items. See [permissions](#permissions). * `recovery_window_in_days` - (Optional) A value that specifies the number of days that Amazon QuickSight waits before it deletes the analysis. Use `0` to force deletion without recovery. Minimum value of `7`. Maximum value of `30`. Default to `30`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_entity` - (Optional) The entity that you are using as a source when you create the analysis (template). Only one of `definition` or `source_entity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `theme_arn` - (Optional) The Amazon Resource Name (ARN) of the theme that is being used for this analysis. The theme ARN must exist in the same AWS account where you create the analysis. @@ -212,4 +213,4 @@ Using `terraform import`, import a QuickSight Analysis using the AWS account ID % terraform import aws_quicksight_analysis.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_custom_permissions.html.markdown b/website/docs/cdktf/python/r/quicksight_custom_permissions.html.markdown new file mode 100644 index 000000000000..9ed22be4c9f8 --- /dev/null +++ b/website/docs/cdktf/python/r/quicksight_custom_permissions.html.markdown @@ -0,0 +1,97 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_custom_permissions" +description: |- + Manages a QuickSight custom permissions profile. +--- + + + +# Resource: aws_quicksight_custom_permissions + +Manages a QuickSight custom permissions profile. + +## Example Usage + +resource "aws_quicksight_custom_permissions" "example" { + custom_permissions_name = "example-permissions" + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } +} + +## Argument Reference + +The following arguments are required: + +* `capabilities` - (Required) Actions to include in the custom permissions profile. See [capabilities](#capabilities). +* `custom_permissions_name` - (Required, Forces new resource) Custom permissions profile name. + +The following arguments are optional: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### capabilities + +* `add_or_run_anomaly_detection_for_analyses` - (Optional) The ability to add or run anomaly detection. Valid values: `DENY`. +* `create_and_update_dashboard_email_reports` - (Optional) The ability to create and update email reports. Valid values: `DENY`. +* `create_and_update_datasets` - (Optional) The ability to create and update datasets. Valid values: `DENY`. +* `create_and_update_data_sources` - (Optional) The ability to create and update data sources. Valid values: `DENY`. +* `create_and_update_themes` - (Optional) The ability to export to create and update themes. Valid values: `DENY`. +* `create_and_update_threshold_alerts` - (Optional) The ability to create and update threshold alerts. Valid values: `DENY`. +* `create_shared_folders` - (Optional) The ability to create shared folders. Valid values: `DENY`. +* `create_spice_dataset` - (Optional) The ability to create a SPICE dataset. Valid values: `DENY`. +* `export_to_csv` - (Optional) The ability to export to CSV files from the UI. Valid values: `DENY`. +* `export_to_csv_in_scheduled_reports` - (Optional) The ability to export to CSV files in scheduled email reports. Valid values: `DENY`. +* `export_to_excel` - (Optional) The ability to export to Excel files from the UI. Valid values: `DENY`. +* `export_to_excel_in_scheduled_reports` - (Optional) The ability to export to Excel files in scheduled email reports. Valid values: `DENY`. +* `export_to_pdf` - (Optional) The ability to export to PDF files from the UI. Valid values: `DENY`. +* `export_to_pdf_in_scheduled_reports` - (Optional) The ability to export to PDF files in scheduled email reports. Valid values: `DENY`. +* `include_content_in_scheduled_reports_email` - (Optional) The ability to include content in scheduled email reports. Valid values: `DENY`. +* `print_reports` - (Optional) The ability to print reports. Valid values: `DENY`. +* `rename_shared_folders` - (Optional) The ability to rename shared folders. Valid values: `DENY`. +* `share_analyses` - (Optional) The ability to share analyses. Valid values: `DENY`. +* `share_dashboards` - (Optional) The ability to share dashboards. Valid values: `DENY`. +* `share_datasets` - (Optional) The ability to share datasets. Valid values: `DENY`. +* `share_data_sources` - (Optional) The ability to share data sources. Valid values: `DENY`. +* `subscribe_dashboard_email_reports` - (Optional) The ability to subscribe to email reports. Valid values: `DENY`. +* `view_account_spice_capacity` - (Optional) The ability to view account SPICE capacity. Valid values: `DENY`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the custom permissions profile. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight custom permissions profile using the AWS account ID and custom permissions profile name separated by a comma (`,`). For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_custom_permissions import QuicksightCustomPermissions +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightCustomPermissions.generate_config_for_import(self, "example", "123456789012,example-permissions") +``` + +Using `terraform import`, import a QuickSight custom permissions profile using the AWS account ID and custom permissions profile name separated by a comma (`,`). For example: + +```console +% terraform import aws_quicksight_custom_permissions.example 123456789012,example-permissions +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_dashboard.html.markdown b/website/docs/cdktf/python/r/quicksight_dashboard.html.markdown index b006d30008d0..0ea64949e47a 100644 --- a/website/docs/cdktf/python/r/quicksight_dashboard.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_dashboard.html.markdown @@ -126,11 +126,12 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dashboard_publish_options` - (Optional) Options for publishing the dashboard. See [dashboard_publish_options](#dashboard_publish_options). * `definition` - (Optional) A detailed dashboard definition. Only one of `definition` or `source_entity` should be configured. See [definition](#definition). * `parameters` - (Optional) The parameters for the creation of the dashboard, which you want to use to override the default settings. A dashboard can have any type of parameters, and some parameters might accept multiple values. See [parameters](#parameters). * `permissions` - (Optional) A set of resource permissions on the dashboard. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_entity` - (Optional) The entity that you are using as a source when you create the dashboard (template). Only one of `definition` or `source_entity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `theme_arn` - (Optional) The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. The theme ARN must exist in the same AWS account where you create the dashboard. @@ -270,4 +271,4 @@ Using `terraform import`, import a QuickSight Dashboard using the AWS account ID % terraform import aws_quicksight_dashboard.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_data_set.html.markdown b/website/docs/cdktf/python/r/quicksight_data_set.html.markdown index dd387cfc248d..142119c2749f 100644 --- a/website/docs/cdktf/python/r/quicksight_data_set.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_data_set.html.markdown @@ -231,7 +231,7 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `column_groups` - (Optional) Groupings of columns that work together in certain Amazon QuickSight features. Currently, only geospatial hierarchy is supported. See [column_groups](#column_groups). * `column_level_permission_rules` - (Optional) A set of 1 or more definitions of a [ColumnLevelPermissionRule](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_ColumnLevelPermissionRule.html). See [column_level_permission_rules](#column_level_permission_rules). * `data_set_usage_configuration` - (Optional) The usage configuration to apply to child datasets that reference this dataset as a source. See [data_set_usage_configuration](#data_set_usage_configuration). @@ -239,6 +239,7 @@ The following arguments are optional: * `logical_table_map` - (Optional) Configures the combination and transformation of the data from the physical tables. Maximum of 1 entry. See [logical_table_map](#logical_table_map). * `permissions` - (Optional) A set of resource permissions on the data source. Maximum of 64 items. See [permissions](#permissions). * `physical_table_map` - (Optional) Declares the physical tables that are available in the underlying data sources. See [physical_table_map](#physical_table_map). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `row_level_permission_data_set` - (Optional) The row-level security configuration for the data that you want to create. See [row_level_permission_data_set](#row_level_permission_data_set). * `row_level_permission_tag_configuration` - (Optional) The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only. See [row_level_permission_tag_configuration](#row_level_permission_tag_configuration). * `refresh_properties` - (Optional) The refresh properties for the data set. **NOTE**: Only valid when `import_mode` is set to `SPICE`. See [refresh_properties](#refresh_properties). @@ -455,8 +456,17 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) of the data set. * `id` - A comma-delimited string joining AWS account ID and data set ID. +* `output_columns` - The final set of columns available for use in analyses and dashboards after all data preparation and transformation steps have been applied within the data set. See [`output_columns` Block](#output_columns-block) below. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +### `output_columns` Block + +The `output_columns` block has the following attributes. + +* `name` - The name of the column. +* `description` - The description of the column. +* `type` - The data type of the column. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight Data Set using the AWS account ID and data set ID separated by a comma (`,`). For example: @@ -482,4 +492,4 @@ Using `terraform import`, import a QuickSight Data Set using the AWS account ID % terraform import aws_quicksight_data_set.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_data_source.html.markdown b/website/docs/cdktf/python/r/quicksight_data_source.html.markdown index 55b88d99790c..1e26c37ce24a 100644 --- a/website/docs/cdktf/python/r/quicksight_data_source.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_data_source.html.markdown @@ -100,7 +100,7 @@ class MyConvertedCode(TerraformStack): content=Token.as_string( Fn.jsonencode({ "file_locations": [{ - "URIPrefixes": ["https://${" + example.id + "}.s3-${" + data_aws_region_current.name + "}.${" + data_aws_partition_current.dns_suffix + "}" + "URIPrefixes": ["https://${" + example.id + "}.s3-${" + data_aws_region_current.region + "}.${" + data_aws_partition_current.dns_suffix + "}" ] } ], @@ -170,9 +170,10 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) The ID for the AWS account that the data source is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `credentials` - (Optional) The credentials Amazon QuickSight uses to connect to your underlying source. See [Credentials](#credentials-argument-reference) below for more details. * `permission` - (Optional) A set of resource permissions on the data source. Maximum of 64 items. See [Permission](#permission-argument-reference) below for more details. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ssl_properties` - (Optional) Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your underlying source. See [SSL Properties](#ssl_properties-argument-reference) below for more details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_connection_properties`- (Optional) Use this parameter only when you want Amazon QuickSight to use a VPC connection when connecting to your underlying source. See [VPC Connection Properties](#vpc_connection_properties-argument-reference) below for more details. @@ -378,4 +379,4 @@ Using `terraform import`, import a QuickSight data source using the AWS account % terraform import aws_quicksight_data_source.example 123456789123/my-data-source-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_folder.html.markdown b/website/docs/cdktf/python/r/quicksight_folder.html.markdown index 7d1773d5c2bd..9c8ce641b7a3 100644 --- a/website/docs/cdktf/python/r/quicksight_folder.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_folder.html.markdown @@ -94,10 +94,11 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `folder_type` - (Optional) The type of folder. By default, it is `SHARED`. Valid values are: `SHARED`. * `parent_folder_arn` - (Optional) The Amazon Resource Name (ARN) for the parent folder. If not set, creates a root-level folder. * `permissions` - (Optional) A set of resource permissions on the folder. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### permissions @@ -150,4 +151,4 @@ Using `terraform import`, import a QuickSight folder using the AWS account ID an % terraform import aws_quicksight_folder.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_folder_membership.html.markdown b/website/docs/cdktf/python/r/quicksight_folder_membership.html.markdown index e2e6899e5d58..dabf195d29b2 100644 --- a/website/docs/cdktf/python/r/quicksight_folder_membership.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_folder_membership.html.markdown @@ -45,7 +45,8 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -78,4 +79,4 @@ Using `terraform import`, import QuickSight Folder Membership using the AWS acco % terraform import aws_quicksight_folder_membership.example 123456789012,example-folder,DATASET,example-dataset ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_group.html.markdown b/website/docs/cdktf/python/r/quicksight_group.html.markdown index 99139cda655c..7944a054e65f 100644 --- a/website/docs/cdktf/python/r/quicksight_group.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_group.html.markdown @@ -35,10 +35,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `group_name` - (Required) A name for the group. -* `aws_account_id` - (Optional) The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `description` - (Optional) A description for the group. +* `group_name` - (Required) A name for the group. * `namespace` - (Optional) The namespace. Currently, you should set this to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -71,4 +72,4 @@ Using `terraform import`, import QuickSight Group using the aws account id, name % terraform import aws_quicksight_group.example 123456789123/default/tf-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_group_membership.html.markdown b/website/docs/cdktf/python/r/quicksight_group_membership.html.markdown index 7dbf78f47a60..9eca7dc749e5 100644 --- a/website/docs/cdktf/python/r/quicksight_group_membership.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_group_membership.html.markdown @@ -36,10 +36,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `group_name` - (Required) The name of the group in which the member will be added. * `member_name` - (Required) The name of the member to add to the group. -* `aws_account_id` - (Optional) The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. -* `namespace` - (Required) The namespace that you want the user to be a part of. Defaults to `default`. +* `namespace` - (Optional) The namespace that you want the user to be a part of. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -70,4 +71,4 @@ Using `terraform import`, import QuickSight Group membership using the AWS accou % terraform import aws_quicksight_group_membership.example 123456789123/default/all-access-users/john_smith ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_iam_policy_assignment.html.markdown b/website/docs/cdktf/python/r/quicksight_iam_policy_assignment.html.markdown index 64936554084f..14d1177aa30f 100644 --- a/website/docs/cdktf/python/r/quicksight_iam_policy_assignment.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_iam_policy_assignment.html.markdown @@ -48,10 +48,11 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `identities` - (Optional) Amazon QuickSight users, groups, or both to assign the policy to. See [`identities` block](#identities-block). * `namespace` - (Optional) Namespace that contains the assignment. Defaults to `default`. * `policy_arn` - (Optional) ARN of the IAM policy to apply to the Amazon QuickSight users and groups specified in this assignment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `identities` block @@ -90,4 +91,4 @@ Using `terraform import`, import QuickSight IAM Policy Assignment using the AWS % terraform import aws_quicksight_iam_policy_assignment.example 123456789012,default,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_ingestion.html.markdown b/website/docs/cdktf/python/r/quicksight_ingestion.html.markdown index 0a5d8bf9370b..659a03a9d5ca 100644 --- a/website/docs/cdktf/python/r/quicksight_ingestion.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_ingestion.html.markdown @@ -45,7 +45,8 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -80,4 +81,4 @@ Using `terraform import`, import QuickSight Ingestion using the AWS account ID, % terraform import aws_quicksight_ingestion.example 123456789012,example-dataset-id,example-ingestion-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_ip_restriction.html.markdown b/website/docs/cdktf/python/r/quicksight_ip_restriction.html.markdown new file mode 100644 index 000000000000..07a3b9f82972 --- /dev/null +++ b/website/docs/cdktf/python/r/quicksight_ip_restriction.html.markdown @@ -0,0 +1,82 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_ip_restriction" +description: |- + Manages the content and status of IP rules. +--- + + + +# Resource: aws_quicksight_ip_restriction + +Manages the content and status of IP rules. + +~> Deletion of this resource clears all IP restrictions from a QuickSight account. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_ip_restriction import QuicksightIpRestriction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightIpRestriction(self, "example", + enabled=True, + ip_restriction_rule_map={ + "108.56.166.202/32": "Allow self" + }, + vpc_id_restriction_rule_map={ + "${(aws_vpc.example.id)}": "Main VPC" + } + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `enabled` - (Required) Whether IP rules are turned on. +* `ip_restriction_rule_map` - (Optional) Map of allowed IPv4 CIDR ranges and descriptions. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `vpc_endpoint_id_restriction_rule_map` - (Optional) Map of allowed VPC endpoint IDs and descriptions. +* `vpc_id_restriction_rule_map` - (Optional) Map of VPC IDs and descriptions. Traffic from all VPC endpoints that are present in the specified VPC is allowed. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight IP restriction using the AWS account ID. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_ip_restriction import QuicksightIpRestriction +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightIpRestriction.generate_config_for_import(self, "example", "012345678901") +``` + +Using `terraform import`, import QuickSight IP restriction using the AWS account ID. For example: + +```console +% terraform import aws_quicksight_ip_restriction.example "012345678901" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_key_registration.html.markdown b/website/docs/cdktf/python/r/quicksight_key_registration.html.markdown new file mode 100644 index 000000000000..5b5adb8a0fb0 --- /dev/null +++ b/website/docs/cdktf/python/r/quicksight_key_registration.html.markdown @@ -0,0 +1,84 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_key_registration" +description: |- + Registers customer managed keys in a Amazon QuickSight account. +--- + + + +# Resource: aws_quicksight_key_registration + +Registers customer managed keys in a Amazon QuickSight account. + +~> Deletion of this resource clears all CMK registrations from a QuickSight account. QuickSight then uses AWS owned keys to encrypt your resources. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_key_registration import QuicksightKeyRegistration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightKeyRegistration(self, "example", + key_registration=[QuicksightKeyRegistrationKeyRegistration( + key_arn=example1.arn + ), QuicksightKeyRegistrationKeyRegistration( + default_key=True, + key_arn=example2.arn + ) + ] + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `key_registration` - (Required) Registered keys. See [key_registration](#key_registration). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### key_registration + +* `default_key` - (Optional) Whether the key is set as the default key for encryption and decryption use. +* `key_arn` - (Required) ARN of the AWS KMS key that is registered for encryption and decryption use. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight key registration using the AWS account ID. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_key_registration import QuicksightKeyRegistration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightKeyRegistration.generate_config_for_import(self, "example", "012345678901") +``` + +Using `terraform import`, import QuickSight key registration using the AWS account ID. For example: + +```console +% terraform import aws_quicksight_key_registration.example "012345678901" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_namespace.html.markdown b/website/docs/cdktf/python/r/quicksight_namespace.html.markdown index 883e63e56cec..df951990c582 100644 --- a/website/docs/cdktf/python/r/quicksight_namespace.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_namespace.html.markdown @@ -41,8 +41,9 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `identity_store` - (Optional) User identity directory type. Defaults to `QUICKSIGHT`, the only current valid value. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -87,4 +88,4 @@ Using `terraform import`, import QuickSight Namespace using the AWS account ID a % terraform import aws_quicksight_namespace.example 123456789012,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_refresh_schedule.html.markdown b/website/docs/cdktf/python/r/quicksight_refresh_schedule.html.markdown index ab1332da7285..6f15aca1a8ae 100644 --- a/website/docs/cdktf/python/r/quicksight_refresh_schedule.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_refresh_schedule.html.markdown @@ -120,7 +120,8 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### schedule @@ -172,4 +173,4 @@ Using `terraform import`, import a QuickSight Refresh Schedule using the AWS acc % terraform import aws_quicksight_refresh_schedule.example 123456789012,dataset-id,schedule-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_role_custom_permission.html.markdown b/website/docs/cdktf/python/r/quicksight_role_custom_permission.html.markdown new file mode 100644 index 000000000000..d390fd77d247 --- /dev/null +++ b/website/docs/cdktf/python/r/quicksight_role_custom_permission.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_role_custom_permission" +description: |- + Manages the custom permissions that are associated with a role. +--- + + + +# Resource: aws_quicksight_role_custom_permission + +Manages the custom permissions that are associated with a role. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_role_custom_permission import QuicksightRoleCustomPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightRoleCustomPermission(self, "example", + custom_permissions_name=Token.as_string(aws_quicksight_custom_permissions_example.custom_permissions_name), + role="READER" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `custom_permissions_name` - (Required, Forces new resource) Custom permissions profile name. +* `role` - (Required, Forces new resource) Role. Valid values are `ADMIN`, `AUTHOR`, `READER`, `ADMIN_PRO`, `AUTHOR_PRO`, and `READER_PRO`. + +The following arguments are optional: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `namespace` - (Optional, Forces new resource) Namespace containing the role. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight role custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace` and `role`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_role_custom_permission import QuicksightRoleCustomPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightRoleCustomPermission.generate_config_for_import(self, "example", "012345678901,default,READER") +``` + +Using `terraform import`, import QuickSight role custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace`, and `role`. For example: + +```console +% terraform import aws_quicksight_role_custom_permission.example 012345678901,default,READER +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_role_membership.html.markdown b/website/docs/cdktf/python/r/quicksight_role_membership.html.markdown index b53b6a1dfd19..5b65d264d0f7 100644 --- a/website/docs/cdktf/python/r/quicksight_role_membership.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_role_membership.html.markdown @@ -44,8 +44,9 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. Defaults to the account of the caller identity if not configured. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) Name of the namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -76,4 +77,4 @@ Using `terraform import`, import QuickSight Role Membership using a comma-delimi % terraform import aws_quicksight_role_membership.example 012345678901,default,READER,example-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_template.html.markdown b/website/docs/cdktf/python/r/quicksight_template.html.markdown index 7dbce283bd3e..cc07d9577ae9 100644 --- a/website/docs/cdktf/python/r/quicksight_template.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_template.html.markdown @@ -127,9 +127,10 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `definition` - (Optional) A detailed template definition. Only one of `definition` or `source_entity` should be configured. See [definition](#definition). * `permissions` - (Optional) A set of resource permissions on the template. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_entity` - (Optional) The entity that you are using as a source when you create the template (analysis or template). Only one of `definition` or `source_entity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -213,4 +214,4 @@ Using `terraform import`, import a QuickSight Template using the AWS account ID % terraform import aws_quicksight_template.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_template_alias.html.markdown b/website/docs/cdktf/python/r/quicksight_template_alias.html.markdown index 4edcef96c85b..6da15ab2cb54 100644 --- a/website/docs/cdktf/python/r/quicksight_template_alias.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_template_alias.html.markdown @@ -45,7 +45,8 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import QuickSight Template Alias using the AWS account % terraform import aws_quicksight_template_alias.example 123456789012,example-id,example-alias ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_theme.html.markdown b/website/docs/cdktf/python/r/quicksight_theme.html.markdown index 982bedd40454..8e7b214825a4 100644 --- a/website/docs/cdktf/python/r/quicksight_theme.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_theme.html.markdown @@ -47,15 +47,16 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `theme_id` - (Required, Forces new resource) Identifier of the theme. * `base_theme_id` - (Required) The ID of the theme that a custom theme will inherit from. All themes inherit from one of the starting themes defined by Amazon QuickSight. For a list of the starting themes, use ListThemes or choose Themes from within an analysis. -* `name` - (Required) Display name of the theme. * `configuration` - (Required) The theme configuration, which contains the theme display properties. See [configuration](#configuration). +* `name` - (Required) Display name of the theme. +* `theme_id` - (Required, Forces new resource) Identifier of the theme. The following arguments are optional: -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `permissions` - (Optional) A set of resource permissions on the theme. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `version_description` - (Optional) A description of the current theme version being created/updated. @@ -175,4 +176,4 @@ Using `terraform import`, import a QuickSight Theme using the AWS account ID and % terraform import aws_quicksight_theme.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_user.html.markdown b/website/docs/cdktf/python/r/quicksight_user.html.markdown index 8a33fb414e16..26d3f61a1eb7 100644 --- a/website/docs/cdktf/python/r/quicksight_user.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_user.html.markdown @@ -87,14 +87,15 @@ class MyConvertedCode(TerraformStack): The following arguments are required: * `email` - (Required) Email address of the user that you want to register. -* `identity_type` - (Required) Identity type that your Amazon QuickSight account uses to manage the identity of users. Valid values: `IAM`, `QUICKSIGHT`. -* `user_role` - (Required) Amazon QuickSight role for the user. Value values: `READER`, `AUTHOR`, `ADMIN`, `READER_PRO`, `AUTHOR_PRO`, `ADMIN_PRO`. +* `identity_type` - (Required) Identity type that your Amazon QuickSight account uses to manage the identity of users. Valid values: `IAM`, `QUICKSIGHT`, `IAM_IDENTITY_CENTER`. +* `user_role` - (Required) Amazon QuickSight role for the user. Valid values: `READER`, `AUTHOR`, `ADMIN`, `READER_PRO`, `AUTHOR_PRO`, `ADMIN_PRO`, `RESTRICTED_AUTHOR`, `RESTRICTED_READER`. The following arguments are optional: -* `aws_account_id` - (Optional) ID for the AWS account that the user is in. Use the ID for the AWS account that contains your Amazon QuickSight account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `iam_arn` - (Optional) ARN of the IAM user or role that you are registering with Amazon QuickSight. Required only for users with an identity type of `IAM`. * `namespace` - (Optional) The Amazon Quicksight namespace to create the user in. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `session_name` - (Optional) Name of the IAM session to use when assuming roles that can embed QuickSight dashboards. Only valid for registering users using an assumed IAM role. Additionally, if registering multiple users using the same IAM role, each user needs to have a unique session name. * `user_name` - (Optional) Amazon QuickSight user name that you want to create for the user you are registering. Required only for users with an identity type of `QUICKSIGHT`. @@ -110,4 +111,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_user_custom_permission.html.markdown b/website/docs/cdktf/python/r/quicksight_user_custom_permission.html.markdown new file mode 100644 index 000000000000..06d134ee8c3e --- /dev/null +++ b/website/docs/cdktf/python/r/quicksight_user_custom_permission.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_user_custom_permission" +description: |- + Manages the custom permissions profile for a user. +--- + + + +# Resource: aws_quicksight_user_custom_permission + +Manages the custom permissions profile for a user. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_user_custom_permission import QuicksightUserCustomPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightUserCustomPermission(self, "example", + custom_permissions_name=Token.as_string(aws_quicksight_custom_permissions_example.custom_permissions_name), + user_name=Token.as_string(aws_quicksight_user_example.user_name) + ) +``` + +## Argument Reference + +The following arguments are required: + +* `custom_permissions_name` - (Required, Forces new resource) Custom permissions profile name. +* `user_name` - (Required, Forces new resource) Username of the user. + +The following arguments are optional: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `namespace` - (Optional, Forces new resource) Namespace that the user belongs to. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight user custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace` and `user_name`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.quicksight_user_custom_permission import QuicksightUserCustomPermission +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + QuicksightUserCustomPermission.generate_config_for_import(self, "example", "012345678901,default,user1") +``` + +Using `terraform import`, import QuickSight user custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace`, and `user_name`. For example: + +```console +% terraform import aws_quicksight_user_custom_permission.example 012345678901,default,user1 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_vpc_connection.html.markdown b/website/docs/cdktf/python/r/quicksight_vpc_connection.html.markdown index 1a18193f601d..735ecb4c90bb 100644 --- a/website/docs/cdktf/python/r/quicksight_vpc_connection.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_vpc_connection.html.markdown @@ -79,8 +79,9 @@ The following arguments are required: The following arguments are optional: -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dns_resolvers` - (Optional) A list of IP addresses of DNS resolver endpoints for the VPC connection. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -125,4 +126,4 @@ Using `terraform import`, import QuickSight VPC connection using the AWS account % terraform import aws_quicksight_vpc_connection.example 123456789012,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ram_principal_association.html.markdown b/website/docs/cdktf/python/r/ram_principal_association.html.markdown index 906d8ce5557a..2c7161faa975 100644 --- a/website/docs/cdktf/python/r/ram_principal_association.html.markdown +++ b/website/docs/cdktf/python/r/ram_principal_association.html.markdown @@ -75,6 +75,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The principal to associate with the resource share. Possible values are an AWS account ID, an AWS Organizations Organization ARN, or an AWS Organizations Organization Unit ARN. * `resource_share_arn` - (Required) The Amazon Resource Name (ARN) of the resource share. @@ -109,4 +110,4 @@ Using `terraform import`, import RAM Principal Associations using their Resource % terraform import aws_ram_principal_association.example arn:aws:ram:eu-west-1:123456789012:resource-share/73da1ab9-b94a-4ba3-8eb4-45917f7f4b12,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ram_resource_association.html.markdown b/website/docs/cdktf/python/r/ram_resource_association.html.markdown index b25aadd668f6..90073897d80f 100644 --- a/website/docs/cdktf/python/r/ram_resource_association.html.markdown +++ b/website/docs/cdktf/python/r/ram_resource_association.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) Amazon Resource Name (ARN) of the resource to associate with the RAM Resource Share. * `resource_share_arn` - (Required) Amazon Resource Name (ARN) of the RAM Resource Share. @@ -72,4 +73,4 @@ Using `terraform import`, import RAM Resource Associations using their Resource % terraform import aws_ram_resource_association.example arn:aws:ram:eu-west-1:123456789012:resource-share/73da1ab9-b94a-4ba3-8eb4-45917f7f4b12,arn:aws:ec2:eu-west-1:123456789012:subnet/subnet-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ram_resource_share.html.markdown b/website/docs/cdktf/python/r/ram_resource_share.html.markdown index 8ff3a86440d9..07a518e6ab26 100644 --- a/website/docs/cdktf/python/r/ram_resource_share.html.markdown +++ b/website/docs/cdktf/python/r/ram_resource_share.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the resource share. * `allow_external_principals` - (Optional) Indicates whether principals outside your organization can be associated with a resource share. * `permission_arns` - (Optional) Specifies the Amazon Resource Names (ARNs) of the RAM permission to associate with the resource share. If you do not specify an ARN for the permission, RAM automatically attaches the default version of the permission for each resource type. You can associate only one permission with each resource type included in the resource share. @@ -77,4 +78,4 @@ Using `terraform import`, import resource shares using the `arn` of the resource % terraform import aws_ram_resource_share.example arn:aws:ram:eu-west-1:123456789012:resource-share/73da1ab9-b94a-4ba3-8eb4-45917f7f4b12 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ram_resource_share_accepter.html.markdown b/website/docs/cdktf/python/r/ram_resource_share_accepter.html.markdown index 2283c8ab57e9..153f43572cc7 100644 --- a/website/docs/cdktf/python/r/ram_resource_share_accepter.html.markdown +++ b/website/docs/cdktf/python/r/ram_resource_share_accepter.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `share_arn` - (Required) The ARN of the resource share. ## Attribute Reference @@ -103,4 +104,4 @@ Using `terraform import`, import resource share accepters using the resource sha % terraform import aws_ram_resource_share_accepter.example arn:aws:ram:us-east-1:123456789012:resource-share/c4b56393-e8d9-89d9-6dc9-883752de4767 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rbin_rule.html.markdown b/website/docs/cdktf/python/r/rbin_rule.html.markdown index 191aab55bcaa..3f70a33447e2 100644 --- a/website/docs/cdktf/python/r/rbin_rule.html.markdown +++ b/website/docs/cdktf/python/r/rbin_rule.html.markdown @@ -29,7 +29,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) RbinRule(self, "example", - description="example_rule", + description="Example tag-level retention rule", resource_tags=[RbinRuleResourceTags( resource_tag_key="tag_key", resource_tag_value="tag_value" @@ -46,35 +46,69 @@ class MyConvertedCode(TerraformStack): ) ``` +### Region-Level Retention Rule + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.rbin_rule import RbinRule +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + RbinRule(self, "example", + description="Example region-level retention rule with exclusion tags", + exclude_resource_tags=[RbinRuleExcludeResourceTags( + resource_tag_key="tag_key", + resource_tag_value="tag_value" + ) + ], + resource_type="EC2_IMAGE", + retention_period=RbinRuleRetentionPeriod( + retention_period_unit="DAYS", + retention_period_value=10 + ), + tags={ + "test_tag_key": "test_tag_value" + } + ) +``` + ## Argument Reference The following arguments are required: -* `resource_type` - (Required) The resource type to be retained by the retention rule. Valid values are `EBS_SNAPSHOT` and `EC2_IMAGE`. +* `resource_type` - (Required) Resource type to be retained by the retention rule. Valid values are `EBS_SNAPSHOT` and `EC2_IMAGE`. * `retention_period` - (Required) Information about the retention period for which the retention rule is to retain resources. See [`retention_period`](#retention_period) below. The following arguments are optional: -* `description` - (Optional) The retention rule description. -* `resource_tags` - (Optional) Specifies the resource tags to use to identify resources that are to be retained by a tag-level retention rule. See [`resource_tags`](#resource_tags) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Retention rule description. +* `exclude_resource_tags` - (Optional) Exclusion tags to use to identify resources that are to be excluded, or ignored, by a Region-level retention rule. See [`exclude_resource_tags`](#exclude_resource_tags) below. * `lock_configuration` - (Optional) Information about the retention rule lock configuration. See [`lock_configuration`](#lock_configuration) below. +* `resource_tags` - (Optional) Resource tags to use to identify resources that are to be retained by a tag-level retention rule. See [`resource_tags`](#resource_tags) below. ### retention_period The following arguments are required: -* `retention_period_unit` - (Required) The unit of time in which the retention period is measured. Currently, only DAYS is supported. -* `retention_period_value` - (Required) The period value for which the retention rule is to retain resources. The period is measured using the unit specified for RetentionPeriodUnit. +* `retention_period_unit` - (Required) Unit of time in which the retention period is measured. Currently, only DAYS is supported. +* `retention_period_value` - (Required) Period value for which the retention rule is to retain resources. The period is measured using the unit specified for RetentionPeriodUnit. -### resource_tags +### exclude_resource_tags The following argument is required: -* `resource_tag_key` - (Required) The tag key. +* `resource_tag_key` - (Required) Tag key. The following argument is optional: -* `resource_tag_value` - (Optional) The tag value. +* `resource_tag_value` - (Optional) Tag value. ### lock_configuration @@ -86,17 +120,27 @@ The following argument is required: The following arguments are required: -* `unlock_delay_unit` - (Required) The unit of time in which to measure the unlock delay. Currently, the unlock delay can be measure only in days. -* `unlock_delay_value` - (Required) The unlock delay period, measured in the unit specified for UnlockDelayUnit. +* `unlock_delay_unit` - (Required) Unit of time in which to measure the unlock delay. Currently, the unlock delay can be measure only in days. +* `unlock_delay_value` - (Required) Unlock delay period, measured in the unit specified for UnlockDelayUnit. + +### resource_tags + +The following argument is required: + +* `resource_tag_key` - (Required) Tag key. + +The following argument is optional: + +* `resource_tag_value` - (Optional) Tag value. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - (String) ID of the Rule. -* `lock_end_time` - (Timestamp) The date and time at which the unlock delay is set to expire. Only returned for retention rules that have been unlocked and that are still within the unlock delay period. -* `lock_state` - (Optional) The lock state of the retention rules to list. Only retention rules with the specified lock state are returned. Valid values are `locked`, `pending_unlock`, `unlocked`. -* `status` - (String) The state of the retention rule. Only retention rules that are in the `available` state retain resources. Valid values include `pending` and `available`. +* `lock_end_time` - (Timestamp) Date and time at which the unlock delay is set to expire. Only returned for retention rules that have been unlocked and that are still within the unlock delay period. +* `lock_state` - (Optional) Lock state of the retention rules to list. Only retention rules with the specified lock state are returned. Valid values are `locked`, `pending_unlock`, `unlocked`. +* `status` - (String) State of the retention rule. Only retention rules that are in the `available` state retain resources. Valid values include `pending` and `available`. ## Import @@ -123,4 +167,4 @@ Using `terraform import`, import RBin Rule using the `id`. For example: % terraform import aws_rbin_rule.example examplerule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_certificate.html.markdown b/website/docs/cdktf/python/r/rds_certificate.html.markdown index 7701765edca2..222a30510ae2 100644 --- a/website/docs/cdktf/python/r/rds_certificate.html.markdown +++ b/website/docs/cdktf/python/r/rds_certificate.html.markdown @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate_identifier` - (Required) Certificate identifier. For example, `rds-ca-rsa4096-g1`. Refer to [AWS RDS (Relational Database) Certificate Identifier](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificateIdentifier) for more information. ## Attribute Reference @@ -68,4 +69,4 @@ Using `terraform import`, import the RDS certificate override using the `region` % terraform import aws_rds_certificate.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster.html.markdown b/website/docs/cdktf/python/r/rds_cluster.html.markdown index 64f4bfb8f8f0..560215104279 100644 --- a/website/docs/cdktf/python/r/rds_cluster.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster.html.markdown @@ -27,7 +27,7 @@ Changes to an RDS Cluster can occur when you manually change a parameter, such a ~> **NOTE on RDS Clusters and RDS Cluster Role Associations:** Terraform provides both a standalone [RDS Cluster Role Association](rds_cluster_role_association.html) - (an association between an RDS Cluster and a single IAM Role) and an RDS Cluster resource with `iam_roles` attributes. Use one resource or the other to associate IAM Roles and RDS Clusters. Not doing so will cause a conflict of associations and will result in the association being overwritten. --> **Note:** Write-Only argument `master_password_wo` is available to use in place of `master_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `master_password_wo` is available to use in place of `master_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -292,6 +292,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocated_storage` - (Optional, Required for Multi-AZ DB cluster) The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. * `allow_major_version_upgrade` - (Optional) Enable to allow major engine version upgrades when changing engine versions. Defaults to `false`. * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon RDS Documentation for more information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) @@ -305,7 +306,7 @@ This resource supports the following arguments: * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. * `cluster_scalability_type` - (Optional, Forces new resources) Specifies the scalability mode of the Aurora DB cluster. When set to `limitless`, the cluster operates as an Aurora Limitless Database. When set to `standard` (the default), the cluster uses normal DB instance creation. Valid values: `limitless`, `standard`. -* `copy_tags_to_snapshot` – (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. +* `copy_tags_to_snapshot` - (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. * `database_insights_mode` - (Optional) The mode of Database Insights to enable for the DB cluster. Valid values: `standard`, `advanced`. * `database_name` - (Optional) Name for an automatically created database on cluster creation. There are different naming restrictions per database engine: [RDS Naming Constraints][5] * `db_cluster_instance_class` - (Optional, Required for Multi-AZ DB cluster) The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example `db.m6g.xlarge`. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide. @@ -323,7 +324,7 @@ This resource supports the following arguments: * `enable_global_write_forwarding` - (Optional) Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an [`aws_rds_global_cluster`](/docs/providers/aws/r/rds_global_cluster.html)'s primary cluster. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-write-forwarding.html) for more information. * `enable_http_endpoint` - (Optional) Enable HTTP endpoint (data API). Only valid for some combinations of `engine_mode`, `engine` and `engine_version` and only available in some regions. See the [Region and version availability](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html#data-api.regions) section of the documentation. This option also does not work with any of these options specified: `snapshot_identifier`, `replication_source_identifier`, `s3_import`. * `enable_local_write_forwarding` - (Optional) Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-write-forwarding.html) for more information. **NOTE:** Local write forwarding requires Aurora MySQL version 3.04 or higher. -* `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `slowquery`, `iam-db-auth-error`, `postgresql` (PostgreSQL). +* `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `iam-db-auth-error`, `instance`, `postgresql` (PostgreSQL), `slowquery`. * `engine_mode` - (Optional) Database engine mode. Valid values: `global` (only valid for Aurora MySQL 1.21 and earlier), `parallelquery`, `provisioned`, `serverless`. Defaults to: `provisioned`. Specify an empty value (`""`) for no engine mode. See the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) for limitations when using `serverless`. * `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting is valid for cluster types Aurora DB clusters and Multi-AZ DB clusters. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engine_version` - (Optional) Database engine version. Updating this argument results in an outage. See the [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) and [Aurora Postgres](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.html) documentation for your configured engine to determine this value, or by running `aws rds describe-db-engine-versions`. For example with Aurora MySQL 2, a potential value for this argument is `5.7.mysql_aurora.2.03.2`. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute `engine_version_actual`, , see [Attribute Reference](#attribute-reference) below. @@ -521,7 +522,7 @@ This resource exports the following attributes in addition to the arguments abov * `id` - RDS Cluster Identifier * `cluster_identifier` - RDS Cluster Identifier * `cluster_resource_id` - RDS Cluster Resource ID -* `cluster_members` – List of RDS Instances that are a part of this cluster +* `cluster_members` - List of RDS Instances that are a part of this cluster * `availability_zones` - Availability zone of the instance * `backup_retention_period` - Backup retention period * `ca_certificate_identifier` - CA identifier of the CA certificate used for the DB instance's server certificate @@ -593,4 +594,4 @@ Using `terraform import`, import RDS Clusters using the `cluster_identifier`. Fo % terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown b/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown index 54497df9c11c..8399ab48b5c5 100644 --- a/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required, Forces new resources) The Amazon Resource Name (ARN) of the DB cluster. * `mode` - (Required, Forces new resources) Specifies the mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously. One of: `sync`, `async`. * `kms_key_id` - (Required, Forces new resources) The AWS KMS key identifier for encrypting messages in the database activity stream. The AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. @@ -117,4 +118,4 @@ Using `terraform import`, import RDS Aurora Cluster Database Activity Streams us [2]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartActivityStream.html [3]: https://docs.aws.amazon.com/cli/latest/reference/rds/start-activity-stream.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown b/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown index 948d56993e13..9b042216cf95 100644 --- a/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required, Forces new resources) The cluster identifier. * `cluster_endpoint_identifier` - (Required, Forces new resources) The identifier to use for the new endpoint. This parameter is stored as a lowercase string. * `custom_endpoint_type` - (Required) The type of the endpoint. One of: READER , ANY . @@ -127,4 +128,4 @@ Using `terraform import`, import RDS Clusters Endpoint using the `cluster_endpoi [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown b/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown index 002bd0d0078c..4ac2a7d76c93 100644 --- a/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown @@ -69,12 +69,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`. * `availability_zone` - (Optional, Computed, Forces new resource) EC2 Availability Zone that the DB instance is created in. See [docs](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html) about the details. * `ca_cert_identifier` - (Optional) Identifier of the CA certificate for the DB instance. * `cluster_identifier` - (Required, Forces new resource) Identifier of the [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html) in which to launch this instance. -* `copy_tags_to_snapshot` – (Optional, boolean) Indicates whether to copy all of the user-defined tags from the DB instance to snapshots of the DB instance. Default `false`. +* `copy_tags_to_snapshot` - (Optional, boolean) Indicates whether to copy all of the user-defined tags from the DB instance to snapshots of the DB instance. Default `false`. * `custom_iam_instance_profile` - (Optional) Instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. * `db_parameter_group_name` - (Optional) Name of the DB parameter group to associate with this instance. * `db_subnet_group_name` - (Optional, Forces new resource) Specifies the DB subnet group to associate with this DB instance. The default behavior varies depending on whether `db_subnet_group_name` is specified. Please refer to official [AWS documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html) to understand how `db_subnet_group_name` and `publicly_accessible` parameters affect DB instance behaviour. **NOTE:** This must match the `db_subnet_group_name` of the attached [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html). @@ -107,7 +108,7 @@ This resource exports the following attributes in addition to the arguments abov * `cluster_identifier` - RDS Cluster Identifier * `identifier` - Instance identifier * `id` - Instance identifier -* `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. +* `writer` - Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. * `availability_zone` - Availability zone of the instance * `endpoint` - DNS address for this instance. May not be writable * `engine` - Database engine @@ -161,4 +162,4 @@ Using `terraform import`, import RDS Cluster Instances using the `identifier`. F % terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_parameter_group.html.markdown b/website/docs/cdktf/python/r/rds_cluster_parameter_group.html.markdown index 4d7c3bee161c..1d0d0f779bd6 100644 --- a/website/docs/cdktf/python/r/rds_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_parameter_group.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DB cluster parameter group. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required) The family of the DB cluster parameter group. @@ -96,4 +97,4 @@ Using `terraform import`, import RDS Cluster Parameter Groups using the `name`. % terraform import aws_rds_cluster_parameter_group.cluster_pg production-pg-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_role_association.html.markdown b/website/docs/cdktf/python/r/rds_cluster_role_association.html.markdown index d5fede3e6a4b..b9c5edd4b37c 100644 --- a/website/docs/cdktf/python/r/rds_cluster_role_association.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_role_association.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_cluster_identifier` - (Required) DB Cluster Identifier to associate with the IAM Role. -* `feature_name` - (Required) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). +* `feature_name` - (Optional) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). * `role_arn` - (Required) Amazon Resource Name (ARN) of the IAM Role to associate with the DB Cluster. ## Attribute Reference @@ -82,4 +83,4 @@ Using `terraform import`, import `aws_rds_cluster_role_association` using the DB % terraform import aws_rds_cluster_role_association.example my-db-cluster,arn:aws:iam::123456789012:role/my-role ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_snapshot_copy.html.markdown b/website/docs/cdktf/python/r/rds_cluster_snapshot_copy.html.markdown index 5ca53e2bd810..67bb3ec8d16c 100644 --- a/website/docs/cdktf/python/r/rds_cluster_snapshot_copy.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_snapshot_copy.html.markdown @@ -17,14 +17,14 @@ Manages an RDS database cluster snapshot copy. For managing RDS database instanc ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import RdsClusterSnapshotCopy from imports.aws.db_cluster_snapshot import DbClusterSnapshot from imports.aws.rds_cluster import RdsCluster +from imports.aws.rds_cluster_snapshot_copy import RdsClusterSnapshotCopy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -43,7 +43,7 @@ class MyConvertedCode(TerraformStack): # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_db_cluster_snapshot_example.override_logical_id("example") aws_rds_cluster_snapshot_copy_example = RdsClusterSnapshotCopy(self, "example_2", - source_db_cluster_snapshot_identifier=aws_db_cluster_snapshot_example.db_cluster_snapshot_arn, + source_db_cluster_snapshot_identifier=Token.as_string(aws_db_cluster_snapshot_example.db_cluster_snapshot_arn), target_db_cluster_snapshot_identifier="example-copy" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `copy_tags` - (Optional) Whether to copy existing tags. Defaults to `false`. * `destination_region` - (Optional) The Destination region to place snapshot copy. * `kms_key_id` - (Optional) KMS key ID. @@ -103,7 +104,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import RdsClusterSnapshotCopy +from imports.aws.rds_cluster_snapshot_copy import RdsClusterSnapshotCopy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -116,4 +117,4 @@ Using `terraform import`, import `aws_rds_cluster_snapshot_copy` using the `id`. % terraform import aws_rds_cluster_snapshot_copy.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_custom_db_engine_version.markdown b/website/docs/cdktf/python/r/rds_custom_db_engine_version.markdown index 9afc7ff88398..028367cd818d 100644 --- a/website/docs/cdktf/python/r/rds_custom_db_engine_version.markdown +++ b/website/docs/cdktf/python/r/rds_custom_db_engine_version.markdown @@ -136,6 +136,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `database_installation_files_s3_bucket_name` - (Required) The name of the Amazon S3 bucket that contains the database installation files. * `database_installation_files_s3_prefix` - (Required) The prefix for the Amazon S3 bucket that contains the database installation files. * `description` - (Optional) The description of the CEV. @@ -194,4 +195,4 @@ Using `terraform import`, import custom engine versions for Amazon RDS custom us % terraform import aws_rds_custom_db_engine_version.example custom-oracle-ee-cdb:19.cdb_cev1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_export_task.html.markdown b/website/docs/cdktf/python/r/rds_export_task.html.markdown index f330a762c638..8c36b5bd0c61 100644 --- a/website/docs/cdktf/python/r/rds_export_task.html.markdown +++ b/website/docs/cdktf/python/r/rds_export_task.html.markdown @@ -165,6 +165,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `export_only` - (Optional) Data to be exported from the snapshot. If this parameter is not provided, all the snapshot data is exported. Valid values are documented in the [AWS StartExportTask API documentation](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartExportTask.html#API_StartExportTask_RequestParameters). * `s3_prefix` - (Optional) Amazon S3 bucket prefix to use as the file name and path of the exported snapshot. @@ -207,4 +208,4 @@ Using `terraform import`, import a RDS (Relational Database) Export Task using t % terraform import aws_rds_export_task.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_global_cluster.html.markdown b/website/docs/cdktf/python/r/rds_global_cluster.html.markdown index 18724e5525a6..c38788d3e725 100644 --- a/website/docs/cdktf/python/r/rds_global_cluster.html.markdown +++ b/website/docs/cdktf/python/r/rds_global_cluster.html.markdown @@ -250,19 +250,25 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: * `global_cluster_identifier` - (Required, Forces new resources) Global cluster identifier. + +The following arguments are optional: + * `database_name` - (Optional, Forces new resources) Name for an automatically created database on cluster creation. Terraform will only perform drift detection if a configuration value is provided. * `deletion_protection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Valid values: `aurora`, `aurora-mysql`, `aurora-postgresql`. Defaults to `aurora`. Conflicts with `source_db_cluster_identifier`. * `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting applies only to Aurora PostgreSQL-based global databases. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engine_version` - (Optional) Engine version of the Aurora global database. The `engine`, `engine_version`, and `instance_class` (on the `aws_rds_cluster_instance`) must together support global databases. See [Using Amazon Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) for more information. By upgrading the engine version, Terraform will upgrade cluster members. **NOTE:** To avoid an `inconsistent final plan` error while upgrading, use the `lifecycle` `ignore_changes` for `engine_version` meta argument on the associated `aws_rds_cluster` resource as shown above in [Upgrading Engine Versions](#upgrading-engine-versions) example. * `force_destroy` - (Optional) Enable to remove DB Cluster members from Global Cluster on destroy. Required with `source_db_cluster_identifier`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_db_cluster_identifier` - (Optional) Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value. **NOTE:** After initial creation, this argument can be removed and replaced with `engine` and `engine_version`. This allows upgrading the engine version of the Global Cluster. * `storage_encrypted` - (Optional, Forces new resources) Specifies whether the DB cluster is encrypted. The default is `false` unless `source_db_cluster_identifier` is specified and encrypted. Terraform will only perform drift detection if a configuration value is provided. * `tags` - (Optional) A map of tags to assign to the DB cluster. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +~> When both `source_db_cluster_identifier` and `engine`/`engine_version` are set, all engine related values will be ignored during creation. The global cluster will inherit the `engine` and `engine_version` values from the source cluster. After the first apply, any differences between the inherited and configured values will trigger an in-place update. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -334,4 +340,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_instance_state.html.markdown b/website/docs/cdktf/python/r/rds_instance_state.html.markdown index 1cb55266e45b..d43e47dfeace 100644 --- a/website/docs/cdktf/python/r/rds_instance_state.html.markdown +++ b/website/docs/cdktf/python/r/rds_instance_state.html.markdown @@ -38,16 +38,15 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identifier` - (Required) DB Instance Identifier * `state` - (Required) Configured state of the DB Instance. Valid values are `available` and `stopped`. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `identifier` - DB Instance Identifier +This resource exports no additional attributes. ## Timeouts @@ -58,7 +57,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) RDS Instance State using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) RDS Instance State using the `identifier`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -75,10 +74,10 @@ class MyConvertedCode(TerraformStack): RdsInstanceState.generate_config_for_import(self, "example", "db-L72FUFBZX2RRXT3HOJSIUQVOKE") ``` -Using `terraform import`, import RDS (Relational Database) RDS Instance State using the `example_id_arg`. For example: +Using `terraform import`, import RDS (Relational Database) RDS Instance State using the `identifier`. For example: ```console % terraform import aws_rds_instance_state.example rds_instance_state-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_integration.html.markdown b/website/docs/cdktf/python/r/rds_integration.html.markdown index 2d3ad6da5285..4f2169f70f50 100644 --- a/website/docs/cdktf/python/r/rds_integration.html.markdown +++ b/website/docs/cdktf/python/r/rds_integration.html.markdown @@ -126,6 +126,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additional_encryption_context` - (Optional, Forces new resources) Set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see the [User Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). You can only include this parameter if you specify the `kms_key_id` parameter. @@ -146,7 +147,7 @@ For more detailed documentation about each argument, refer to the [AWS official This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Integration. -* `id` - ID of the Integration. +* `id` - (**Deprecated**, use `arn` instead) ARN of the Integration. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -159,6 +160,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_rds_integration.example + identity = { + "arn" = "arn:aws:rds:us-east-1:123456789012:integration:12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_rds_integration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the RDS integration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) Integration using the `arn`. For example: ```python @@ -182,4 +204,4 @@ Using `terraform import`, import RDS (Relational Database) Integration using the % terraform import aws_rds_integration.example arn:aws:rds:us-west-2:123456789012:integration:abcdefgh-0000-1111-2222-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_reserved_instance.html.markdown b/website/docs/cdktf/python/r/rds_reserved_instance.html.markdown index ba02ea32fe51..22509f18ef81 100644 --- a/website/docs/cdktf/python/r/rds_reserved_instance.html.markdown +++ b/website/docs/cdktf/python/r/rds_reserved_instance.html.markdown @@ -53,6 +53,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_count` - (Optional) Number of instances to reserve. Default value is `1`. * `reservation_id` - (Optional) Customer-specified identifier to track this reservation. * `tags` - (Optional) Map of tags to assign to the DB reservation. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -65,7 +66,7 @@ This resource exports the following attributes in addition to the arguments abov * `id` - Unique identifier for the reservation. same as `reservation_id`. * `currency_code` - Currency code for the reserved DB instance. * `duration` - Duration of the reservation in seconds. -* `fixed_price` – Fixed price charged for this reserved DB instance. +* `fixed_price` - Fixed price charged for this reserved DB instance. * `db_instance_class` - DB instance class for the reserved DB instance. * `lease_id` - Unique identifier for the lease associated with the reserved DB instance. Amazon Web Services Support might request the lease ID for an issue related to a reserved DB instance. * `multi_az` - Whether the reservation applies to Multi-AZ deployments. @@ -110,4 +111,4 @@ Using `terraform import`, import RDS DB Instance Reservations using the `instanc % terraform import aws_rds_reserved_instance.reservation_instance CustomReservationID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_shard_group.html.markdown b/website/docs/cdktf/python/r/rds_shard_group.html.markdown index 6e7c505fd05c..4236effb29a3 100644 --- a/website/docs/cdktf/python/r/rds_shard_group.html.markdown +++ b/website/docs/cdktf/python/r/rds_shard_group.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `compute_redundancy` - (Optional) Specifies whether to create standby DB shard groups for the DB shard group. Valid values are: * `0` - Creates a DB shard group without a standby DB shard group. This is the default value. * `1` - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). @@ -112,4 +113,4 @@ Using `terraform import`, import shard group using the `db_shard_group_identifie % terraform import aws_rds_shard_group.example example-shard-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_authentication_profile.html.markdown b/website/docs/cdktf/python/r/redshift_authentication_profile.html.markdown index 23b2e15ede5c..248b22f4e575 100644 --- a/website/docs/cdktf/python/r/redshift_authentication_profile.html.markdown +++ b/website/docs/cdktf/python/r/redshift_authentication_profile.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_profile_name` - (Required, Forces new resource) The name of the authentication profile. * `authentication_profile_content` - (Required) The content of the authentication profile in JSON format. The maximum length of the JSON string is determined by a quota for your account. @@ -75,4 +76,4 @@ Using `terraform import`, import Redshift Authentication by `authentication_prof % terraform import aws_redshift_authentication_profile.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_cluster.html.markdown b/website/docs/cdktf/python/r/redshift_cluster.html.markdown index 223f2eeec455..9ee6682e740b 100644 --- a/website/docs/cdktf/python/r/redshift_cluster.html.markdown +++ b/website/docs/cdktf/python/r/redshift_cluster.html.markdown @@ -17,7 +17,7 @@ Provides a Redshift Cluster Resource. ~> **NOTE:** A Redshift cluster's default IAM role can be managed both by this resource's `default_iam_role_arn` argument and the [`aws_redshift_cluster_iam_roles`](redshift_cluster_iam_roles.html) resource's `default_iam_role_arn` argument. Do not configure different values for both arguments. Doing so will cause a conflict of default IAM roles. --> **Note:** Write-Only argument `master_password_wo` is available to use in place of `master_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `master_password_wo` is available to use in place of `master_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -73,6 +73,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case string. * `database_name` - (Optional) The name of the first database to be created when the cluster is created. If you do not provide a name, Amazon Redshift will create a default database called `dev`. @@ -116,8 +117,9 @@ This resource supports the following arguments: No longer supported by the AWS API. Always returns `auto`. * `number_of_nodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. -* `publicly_accessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `true`. +* `publicly_accessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `false`. * `encrypted` - (Optional) If true , the data in the cluster is encrypted at rest. + Default is `true`. * `enhanced_vpc_routing` - (Optional) If true , enhanced VPC routing is enabled. * `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `encrypted` needs to be set to true. * `elastic_ip` - (Optional) The Elastic IP (EIP) address for the cluster. @@ -128,36 +130,13 @@ This resource supports the following arguments: * `snapshot_cluster_identifier` - (Optional) The name of the cluster the source snapshot was created from. * `owner_account` - (Optional) The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. * `iam_roles` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. -* `logging` - (Optional, **Deprecated**) Logging, documented below. * `maintenance_track_name` - (Optional) The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. Default value is `current`. * `manual_snapshot_retention_period` - (Optional) The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. Valid values are between `-1` and `3653`. Default value is `-1`. -* `snapshot_copy` - (Optional, **Deprecated**) Configuration of automatic copy of snapshots from one region to another. Documented below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. For more detailed documentation about each argument, refer to the [AWS official documentation](http://docs.aws.amazon.com/cli/latest/reference/redshift/index.html#cli-aws-redshift). -### Nested Blocks - -#### `logging` - -~> The `logging` argument is deprecated. Use the [`aws_redshift_logging`](./redshift_logging.html.markdown) resource instead. This argument will be removed in a future major version. - -* `enable` - (Required) Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. -* `bucket_name` - (Optional, required when `enable` is `true` and `log_destination_type` is `s3`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. -For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) -* `s3_key_prefix` - (Optional) The prefix applied to the log file names. -* `log_destination_type` - (Optional) The log destination type. An enum with possible values of `s3` and `cloudwatch`. -* `log_exports` - (Optional) The collection of exported log types. Log types include the connection log, user log and user activity log. Required when `log_destination_type` is `cloudwatch`. Valid log types are `connectionlog`, `userlog`, and `useractivitylog`. - -#### `snapshot_copy` - -~> The `snapshot_copy` argument is deprecated. Use the [`aws_redshift_snapshot_copy`](./redshift_snapshot_copy.html.markdown) resource instead. This argument will be removed in a future major version. - -* `destination_region` - (Required) The destination region that you want to copy snapshots to. -* `retention_period` - (Optional) The number of days to retain automated snapshots in the destination region after they are copied from the source region. Defaults to `7`. -* `grant_name` - (Optional) The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -225,4 +204,4 @@ Using `terraform import`, import Redshift Clusters using the `cluster_identifier % terraform import aws_redshift_cluster.myprodcluster tf-redshift-cluster-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_cluster_iam_roles.html.markdown b/website/docs/cdktf/python/r/redshift_cluster_iam_roles.html.markdown index 269c1b0b4d20..0dca3d4864fb 100644 --- a/website/docs/cdktf/python/r/redshift_cluster_iam_roles.html.markdown +++ b/website/docs/cdktf/python/r/redshift_cluster_iam_roles.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required) The name of the Redshift Cluster IAM Roles. * `iam_role_arns` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. * `default_iam_role_arn` - (Optional) The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. @@ -73,4 +74,4 @@ Using `terraform import`, import Redshift Cluster IAM Roless using the `cluster_ % terraform import aws_redshift_cluster_iam_roles.examplegroup1 example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_cluster_snapshot.html.markdown b/website/docs/cdktf/python/r/redshift_cluster_snapshot.html.markdown index f99a2f11fb9e..974a7afce269 100644 --- a/website/docs/cdktf/python/r/redshift_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/redshift_cluster_snapshot.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required, Forces new resource) The cluster identifier for which you want a snapshot. * `snapshot_identifier` - (Required, Forces new resource) A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the Amazon Web Services account. * `manual_snapshot_retention_period` - (Optional) The number of days that a manual snapshot is retained. If the value is `-1`, the manual snapshot is retained indefinitely. Valid values are -1 and between `1` and `3653`. @@ -82,4 +83,4 @@ Using `terraform import`, import Redshift Cluster Snapshots using `snapshot_iden % terraform import aws_redshift_cluster_snapshot.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_data_share_authorization.html.markdown b/website/docs/cdktf/python/r/redshift_data_share_authorization.html.markdown index d7d048b55958..c742621a29cb 100644 --- a/website/docs/cdktf/python/r/redshift_data_share_authorization.html.markdown +++ b/website/docs/cdktf/python/r/redshift_data_share_authorization.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allow_writes` - (Optional) Whether to allow write operations for a datashare. ## Attribute Reference @@ -77,4 +78,4 @@ Using `terraform import`, import Redshift Data Share Authorization using the `id % terraform import aws_redshift_data_share_authorization.example arn:aws:redshift:us-west-2:123456789012:datashare:3072dae5-022b-4d45-9cd3-01f010aae4b2/example_share,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_data_share_consumer_association.html.markdown b/website/docs/cdktf/python/r/redshift_data_share_consumer_association.html.markdown index 37638683a100..92d61a467117 100644 --- a/website/docs/cdktf/python/r/redshift_data_share_consumer_association.html.markdown +++ b/website/docs/cdktf/python/r/redshift_data_share_consumer_association.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allow_writes` - (Optional) Whether to allow write operations for a datashare. * `associate_entire_account` - (Optional) Whether the datashare is associated with the entire account. Conflicts with `consumer_arn` and `consumer_region`. * `consumer_arn` - (Optional) Amazon Resource Name (ARN) of the consumer that is associated with the datashare. Conflicts with `associate_entire_account` and `consumer_region`. @@ -99,4 +100,4 @@ Using `terraform import`, import Redshift Data Share Consumer Association using % terraform import aws_redshift_data_share_consumer_association.example arn:aws:redshift:us-west-2:123456789012:datashare:b3bfde75-73fd-408b-9086-d6fccfd6d588/example,,,us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_endpoint_access.html.markdown b/website/docs/cdktf/python/r/redshift_endpoint_access.html.markdown index 096c76763f98..d3aaaa24a601 100644 --- a/website/docs/cdktf/python/r/redshift_endpoint_access.html.markdown +++ b/website/docs/cdktf/python/r/redshift_endpoint_access.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required) The cluster identifier of the cluster to access. * `endpoint_name` - (Required) The Redshift-managed VPC endpoint name. * `resource_owner` - (Optional) The Amazon Web Services account ID of the owner of the cluster. This is only required if the cluster is in another Amazon Web Services account. @@ -90,4 +91,4 @@ Using `terraform import`, import Redshift endpoint access using the `name`. For % terraform import aws_redshift_endpoint_access.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_endpoint_authorization.html.markdown b/website/docs/cdktf/python/r/redshift_endpoint_authorization.html.markdown index b4167f0ce9d6..2f3c52ef17e3 100644 --- a/website/docs/cdktf/python/r/redshift_endpoint_authorization.html.markdown +++ b/website/docs/cdktf/python/r/redshift_endpoint_authorization.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account` - (Required) The Amazon Web Services account ID to grant access to. * `cluster_identifier` - (Required) The cluster identifier of the cluster to grant access to. * `force_delete` - (Optional) Indicates whether to force the revoke action. If true, the Redshift-managed VPC endpoints associated with the endpoint authorization are also deleted. Default value is `false`. @@ -76,4 +77,4 @@ Using `terraform import`, import Redshift endpoint authorization using the `id`. % terraform import aws_redshift_endpoint_authorization.example 01234567910:cluster-example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_event_subscription.html.markdown b/website/docs/cdktf/python/r/redshift_event_subscription.html.markdown index 221b0e29a759..8449c648389a 100644 --- a/website/docs/cdktf/python/r/redshift_event_subscription.html.markdown +++ b/website/docs/cdktf/python/r/redshift_event_subscription.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Redshift event subscription. * `sns_topic_arn` - (Required) The ARN of the SNS topic to send events to. * `source_ids` - (Optional) A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a `source_type` must also be specified. @@ -101,4 +102,4 @@ Using `terraform import`, import Redshift Event Subscriptions using the `name`. % terraform import aws_redshift_event_subscription.default redshift-event-sub ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_hsm_client_certificate.html.markdown b/website/docs/cdktf/python/r/redshift_hsm_client_certificate.html.markdown index 8c071c7ca097..99d6741ffa5a 100644 --- a/website/docs/cdktf/python/r/redshift_hsm_client_certificate.html.markdown +++ b/website/docs/cdktf/python/r/redshift_hsm_client_certificate.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hsm_client_certificate_identifier` - (Required, Forces new resource) The identifier of the HSM client certificate. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -71,4 +72,4 @@ Using `terraform import`, import Redshift HSM Client Certificates using `hsm_cli % terraform import aws_redshift_hsm_client_certificate.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_hsm_configuration.html.markdown b/website/docs/cdktf/python/r/redshift_hsm_configuration.html.markdown index 324d54a21d41..5fc98cfadcd7 100644 --- a/website/docs/cdktf/python/r/redshift_hsm_configuration.html.markdown +++ b/website/docs/cdktf/python/r/redshift_hsm_configuration.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required, Forces new resource) A text description of the HSM configuration to be created. * `hsm_configuration_identifier` - (Required, Forces new resource) The identifier to be assigned to the new Amazon Redshift HSM configuration. * `hsm_ip_address` - (Required, Forces new resource) The IP address that the Amazon Redshift cluster must use to access the HSM. @@ -81,4 +82,4 @@ Using `terraform import`, import Redshift HSM Client Certificates using `hsm_con % terraform import aws_redshift_hsm_configuration.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_integration.html.markdown b/website/docs/cdktf/python/r/redshift_integration.html.markdown index a0f413381006..2ace33141d20 100644 --- a/website/docs/cdktf/python/r/redshift_integration.html.markdown +++ b/website/docs/cdktf/python/r/redshift_integration.html.markdown @@ -153,6 +153,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additional_encryption_context` - (Optional, Forces new resources) Set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see the [User Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). You can only include this parameter if you specify the `kms_key_id` parameter. @@ -204,4 +205,4 @@ Using `terraform import`, import Redshift Integration using the `arn`. For examp % terraform import aws_redshift_integration.example arn:aws:redshift:us-west-2:123456789012:integration:abcdefgh-0000-1111-2222-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_logging.html.markdown b/website/docs/cdktf/python/r/redshift_logging.html.markdown index ea5f0afdde7f..85d68792e164 100644 --- a/website/docs/cdktf/python/r/redshift_logging.html.markdown +++ b/website/docs/cdktf/python/r/redshift_logging.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket_name` - (Optional) Name of an existing S3 bucket where the log files are to be stored. Required when `log_destination_type` is `s3`. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) * `log_destination_type` - (Optional) Log destination type. Valid values are `s3` and `cloudwatch`. * `log_exports` - (Optional) Collection of exported log types. Required when `log_destination_type` is `cloudwatch`. Valid values are `connectionlog`, `useractivitylog`, and `userlog`. @@ -73,7 +74,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: -* `id` - Identifier of the source cluster. +* `id` - (**Deprecated**, use `cluster_identifier` instead) Identifier of the source cluster. ## Import @@ -100,4 +101,4 @@ Using `terraform import`, import Redshift Logging using the `id`. For example: % terraform import aws_redshift_logging.example cluster-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_parameter_group.html.markdown b/website/docs/cdktf/python/r/redshift_parameter_group.html.markdown index 8336e634cebd..c442e7094b5e 100644 --- a/website/docs/cdktf/python/r/redshift_parameter_group.html.markdown +++ b/website/docs/cdktf/python/r/redshift_parameter_group.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Redshift parameter group. * `family` - (Required) The family of the Redshift parameter group. * `description` - (Optional) The description of the Redshift parameter group. Defaults to "Managed by Terraform". @@ -93,4 +94,4 @@ Using `terraform import`, import Redshift Parameter Groups using the `name`. For % terraform import aws_redshift_parameter_group.paramgroup1 parameter-group-test-terraform ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_partner.html.markdown b/website/docs/cdktf/python/r/redshift_partner.html.markdown index 62cd986322d0..400fa7174440 100644 --- a/website/docs/cdktf/python/r/redshift_partner.html.markdown +++ b/website/docs/cdktf/python/r/redshift_partner.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) The Amazon Web Services account ID that owns the cluster. * `cluster_identifier` - (Required) The cluster identifier of the cluster that receives data from the partner. * `database_name` - (Required) The name of the database that receives data from the partner. @@ -76,4 +77,4 @@ Using `terraform import`, import Redshift usage limits using the `id`. For examp % terraform import aws_redshift_partner.example 01234567910:cluster-example-id:example:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_resource_policy.html.markdown b/website/docs/cdktf/python/r/redshift_resource_policy.html.markdown index 84bc2daacdae..649ad8faa906 100644 --- a/website/docs/cdktf/python/r/redshift_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/redshift_resource_policy.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) The Amazon Resource Name (ARN) of the account to create or update a resource policy for. * `policy` - (Required) The content of the resource policy being updated. @@ -83,4 +84,4 @@ Using `terraform import`, import Redshift Resource Policies using the `resource_ % terraform import aws_redshift_resource_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_scheduled_action.html.markdown b/website/docs/cdktf/python/r/redshift_scheduled_action.html.markdown index 376c8109f2d4..906f53c4d42d 100644 --- a/website/docs/cdktf/python/r/redshift_scheduled_action.html.markdown +++ b/website/docs/cdktf/python/r/redshift_scheduled_action.html.markdown @@ -116,6 +116,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The scheduled action name. * `description` - (Optional) The description of the scheduled action. * `enable` - (Optional) Whether to enable the scheduled action. Default is `true` . @@ -180,4 +181,4 @@ Using `terraform import`, import Redshift Scheduled Action using the `name`. For % terraform import aws_redshift_scheduled_action.example tf-redshift-scheduled-action ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_snapshot_copy.html.markdown b/website/docs/cdktf/python/r/redshift_snapshot_copy.html.markdown index 952f6bd0d010..70dc9ef979a3 100644 --- a/website/docs/cdktf/python/r/redshift_snapshot_copy.html.markdown +++ b/website/docs/cdktf/python/r/redshift_snapshot_copy.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `manual_snapshot_retention_period` - (Optional) Number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is `-1`, the manual snapshot is retained indefinitely. * `retention_period` - (Optional) Number of days to retain automated snapshots in the destination region after they are copied from the source region. * `snapshot_copy_grant_name` - (Optional) Name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. @@ -77,4 +78,4 @@ Using `terraform import`, import Redshift Snapshot Copy using the `id`. For exam % terraform import aws_redshift_snapshot_copy.example cluster-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_snapshot_copy_grant.html.markdown b/website/docs/cdktf/python/r/redshift_snapshot_copy_grant.html.markdown index 6b298bb18d61..d20f1a19a147 100644 --- a/website/docs/cdktf/python/r/redshift_snapshot_copy_grant.html.markdown +++ b/website/docs/cdktf/python/r/redshift_snapshot_copy_grant.html.markdown @@ -33,10 +33,11 @@ class MyConvertedCode(TerraformStack): snapshot_copy_grant_name="my-grant" ) aws_redshift_cluster_test = RedshiftCluster(self, "test_1", - snapshot_copy=RedshiftClusterSnapshotCopy( - destination_region="us-east-2", - grant_name=test.snapshot_copy_grant_name - ), + snapshot_copy=[{ + "destination_region": "us-east-2", + "grant_name": test.snapshot_copy_grant_name + } + ], cluster_identifier=cluster_identifier, node_type=node_type ) @@ -48,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `snapshot_copy_grant_name` - (Required, Forces new resource) A friendly name for identifying the grant. * `kms_key_id` - (Optional, Forces new resource) The unique identifier for the customer master key (CMK) that the grant applies to. Specify the key ID or the Amazon Resource Name (ARN) of the CMK. To specify a CMK in a different AWS account, you must use the key ARN. If not specified, the default key is used. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +86,4 @@ Using `terraform import`, import Redshift Snapshot Copy Grants by name. For exam % terraform import aws_redshift_snapshot_copy_grant.test my-grant ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_snapshot_schedule.html.markdown b/website/docs/cdktf/python/r/redshift_snapshot_schedule.html.markdown index 35fd25ea9f5a..83064f8515be 100644 --- a/website/docs/cdktf/python/r/redshift_snapshot_schedule.html.markdown +++ b/website/docs/cdktf/python/r/redshift_snapshot_schedule.html.markdown @@ -34,6 +34,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identifier` - (Optional, Forces new resource) The snapshot schedule identifier. If omitted, Terraform will assign a random, unique identifier. * `identifier_prefix` - (Optional, Forces new resource) Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`. @@ -74,4 +75,4 @@ Using `terraform import`, import Redshift Snapshot Schedule using the `identifie % terraform import aws_redshift_snapshot_schedule.default tf-redshift-snapshot-schedule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_snapshot_schedule_association.html.markdown b/website/docs/cdktf/python/r/redshift_snapshot_schedule_association.html.markdown index 7f79ccd8b9db..e9b098843b7a 100644 --- a/website/docs/cdktf/python/r/redshift_snapshot_schedule_association.html.markdown +++ b/website/docs/cdktf/python/r/redshift_snapshot_schedule_association.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Required, Forces new resource) The cluster identifier. * `schedule_identifier` - (Required, Forces new resource) The snapshot schedule identifier. @@ -85,4 +86,4 @@ Using `terraform import`, import Redshift Snapshot Schedule Association using th % terraform import aws_redshift_snapshot_schedule_association.default tf-redshift-cluster/tf-redshift-snapshot-schedule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_subnet_group.html.markdown b/website/docs/cdktf/python/r/redshift_subnet_group.html.markdown index e7a42d979a7a..9263c35c6319 100644 --- a/website/docs/cdktf/python/r/redshift_subnet_group.html.markdown +++ b/website/docs/cdktf/python/r/redshift_subnet_group.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Redshift Subnet group. * `description` - (Optional) The description of the Redshift Subnet group. Defaults to "Managed by Terraform". * `subnet_ids` - (Required) An array of VPC subnet IDs. @@ -102,4 +103,4 @@ Using `terraform import`, import Redshift subnet groups using the `name`. For ex % terraform import aws_redshift_subnet_group.testgroup1 test-cluster-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshift_usage_limit.html.markdown b/website/docs/cdktf/python/r/redshift_usage_limit.html.markdown index 656abdd43e21..99867a7c1ac8 100644 --- a/website/docs/cdktf/python/r/redshift_usage_limit.html.markdown +++ b/website/docs/cdktf/python/r/redshift_usage_limit.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amount` - (Required) The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB). The value must be a positive number. * `breach_action` - (Optional) The action that Amazon Redshift takes when the limit is reached. The default is `log`. Valid values are `log`, `emit-metric`, and `disable`. * `cluster_identifier` - (Required) The identifier of the cluster that you want to limit usage. @@ -79,4 +80,4 @@ Using `terraform import`, import Redshift usage limits using the `id`. For examp % terraform import aws_redshift_usage_limit.example example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftdata_statement.html.markdown b/website/docs/cdktf/python/r/redshiftdata_statement.html.markdown index ca5c9c5700f9..f7dfa663a01d 100644 --- a/website/docs/cdktf/python/r/redshiftdata_statement.html.markdown +++ b/website/docs/cdktf/python/r/redshiftdata_statement.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_identifier` - (Optional) The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials. * `db_user` - (Optional) The database user name. * `secret_arn` - (Optional) The name or ARN of the secret that enables access to the database. @@ -104,4 +105,4 @@ Using `terraform import`, import Redshift Data Statements using the `id`. For ex % terraform import aws_redshiftdata_statement.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_custom_domain_association.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_custom_domain_association.html.markdown index f3a906c49215..3c2256bacce2 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_custom_domain_association.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_custom_domain_association.html.markdown @@ -56,8 +56,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workgroup_name` - (Required) Name of the workgroup. * `custom_domain_name` - (Required) Custom domain to associate with the workgroup. * `custom_domain_certificate_arn` - (Required) ARN of the certificate for the custom domain association. @@ -93,4 +94,4 @@ Using `terraform import`, import Redshift Serverless Custom Domain Association u % terraform import aws_redshiftserverless_custom_domain_association.example example-workgroup,example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_endpoint_access.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_endpoint_access.html.markdown index f22a76699701..071bf6069001 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_endpoint_access.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_endpoint_access.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_name` - (Required) The name of the endpoint. * `owner_account` - (Optional) The owner Amazon Web Services account for the Amazon Redshift Serverless workgroup. * `subnet_ids` - (Required) An array of VPC subnet IDs to associate with the endpoint. @@ -91,4 +92,4 @@ Using `terraform import`, import Redshift Serverless Endpoint Access using the ` % terraform import aws_redshiftserverless_endpoint_access.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_namespace.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_namespace.html.markdown index 83f956cab14f..6be99cc98574 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_namespace.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_namespace.html.markdown @@ -12,7 +12,7 @@ description: |- Creates a new Amazon Redshift Serverless Namespace. --> **Note:** Write-Only argument `admin_password_wo` is available to use in place of `admin_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `admin_password_wo` is available to use in place of `admin_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `admin_password_secret_kms_key_id` - (Optional) ID of the KMS key used to encrypt the namespace's admin credentials secret. * `admin_user_password` - (Optional) The password of the administrator for the first database created in the namespace. Conflicts with `manage_admin_password` and `admin_user_password_wo`. @@ -89,4 +90,4 @@ Using `terraform import`, import Redshift Serverless Namespaces using the `names % terraform import aws_redshiftserverless_namespace.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_resource_policy.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_resource_policy.html.markdown index 5f73a6baa3cd..3ecf0413e818 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_resource_policy.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) The Amazon Resource Name (ARN) of the account to create or update a resource policy for. * `policy` - (Required) The policy to create or update. For example, the following policy grants a user authorization to restore a snapshot. @@ -82,4 +83,4 @@ Using `terraform import`, import Redshift Serverless Resource Policies using the % terraform import aws_redshiftserverless_resource_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_snapshot.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_snapshot.html.markdown index c3b3cceebb3e..91665e59c00e 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_snapshot.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_snapshot.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namespace_name` - (Required) The namespace to create a snapshot for. * `snapshot_name` - (Required) The name of the snapshot. * `retention_period` - (Optional) How long to retain the created snapshot. Default value is `-1`. @@ -78,4 +79,4 @@ Using `terraform import`, import Redshift Serverless Snapshots using the `snapsh % terraform import aws_redshiftserverless_snapshot.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_usage_limit.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_usage_limit.html.markdown index 79f890e4339e..d3b2bc9fbe77 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_usage_limit.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_usage_limit.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amount` - (Required) The limit amount. If time-based, this amount is in Redshift Processing Units (RPU) consumed per hour. If data-based, this amount is in terabytes (TB) of data transferred between Regions in cross-account sharing. The value must be a positive number. * `breach_action` - (Optional) The action that Amazon Redshift Serverless takes when the limit is reached. Valid values are `log`, `emit-metric`, and `deactivate`. The default is `log`. * `period` - (Optional) The time period that the amount applies to. A weekly period begins on Sunday. Valid values are `daily`, `weekly`, and `monthly`. The default is `monthly`. @@ -83,4 +84,4 @@ Using `terraform import`, import Redshift Serverless Usage Limits using the `id` % terraform import aws_redshiftserverless_usage_limit.example example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/redshiftserverless_workgroup.html.markdown b/website/docs/cdktf/python/r/redshiftserverless_workgroup.html.markdown index ae01db334173..9f49abf1bee5 100644 --- a/website/docs/cdktf/python/r/redshiftserverless_workgroup.html.markdown +++ b/website/docs/cdktf/python/r/redshiftserverless_workgroup.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `base_capacity` - (Optional) The base data warehouse capacity of the workgroup in Redshift Processing Units (RPUs). * `price_performance_target` - (Optional) Price-performance scaling for the workgroup. See `Price Performance Target` below. * `config_parameter` - (Optional) An array of parameters to set for more control over a serverless database. See `Config Parameter` below. @@ -125,4 +126,4 @@ Using `terraform import`, import Redshift Serverless Workgroups using the `workg % terraform import aws_redshiftserverless_workgroup.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rekognition_collection.html.markdown b/website/docs/cdktf/python/r/rekognition_collection.html.markdown index ec87ee60849a..4f69883b9775 100644 --- a/website/docs/cdktf/python/r/rekognition_collection.html.markdown +++ b/website/docs/cdktf/python/r/rekognition_collection.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -60,7 +61,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Collection using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Collection using the `collection_id`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -77,10 +78,10 @@ class MyConvertedCode(TerraformStack): RekognitionCollection.generate_config_for_import(self, "example", "collection-id-12345678") ``` -Using `terraform import`, import Rekognition Collection using the `example_id_arg`. For example: +Using `terraform import`, import Rekognition Collection using the `collection_id`. For example: ```console % terraform import aws_rekognition_collection.example collection-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rekognition_project.html.markdown b/website/docs/cdktf/python/r/rekognition_project.html.markdown index df905a7cf54a..2b65556f292f 100644 --- a/website/docs/cdktf/python/r/rekognition_project.html.markdown +++ b/website/docs/cdktf/python/r/rekognition_project.html.markdown @@ -14,6 +14,8 @@ Terraform resource for managing an AWS Rekognition Project. ## Example Usage +### Content Moderation + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -33,6 +35,26 @@ class MyConvertedCode(TerraformStack): ) ``` +### Custom Labels + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.rekognition_project import RekognitionProject +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + RekognitionProject(self, "example", + feature="CUSTOM_LABELS", + name="example-project" + ) +``` + ## Argument Reference The following arguments are required: @@ -41,7 +63,8 @@ The following arguments are required: The following arguments are optional: -* `auto_update` - (Optional) Specify if automatic retraining should occur. Valid values are `ENABLED` or `DISABLED`. Defaults to `DISABLED`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `auto_update` - (Optional) Specify if automatic retraining should occur. Valid values are `ENABLED` or `DISABLED`. Must be set when `feature` is `CONTENT_MODERATION`, but do not set otherwise. * `feature` - (Optional) Specify the feature being customized. Valid values are `CONTENT_MODERATION` or `CUSTOM_LABELS`. Defaults to `CUSTOM_LABELS`. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -61,7 +84,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Project using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Project using the `name`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -84,4 +107,4 @@ Using `terraform import`, import Rekognition Project using the `name`. For examp % terraform import aws_rekognition_project.example project-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rekognition_stream_processor.html.markdown b/website/docs/cdktf/python/r/rekognition_stream_processor.html.markdown index a7054f922035..3863627aa762 100644 --- a/website/docs/cdktf/python/r/rekognition_stream_processor.html.markdown +++ b/website/docs/cdktf/python/r/rekognition_stream_processor.html.markdown @@ -257,6 +257,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data_sharing_preference` - (Optional) See [`data_sharing_preference`](#data_sharing_preference). * `kms_key_id` - (Optional) Optional parameter for label detection stream processors. * `notification_channel` - (Optional) The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See [`notification_channel`](#notification_channel). @@ -371,4 +372,4 @@ Using `terraform import`, import Rekognition Stream Processor using the `name`. % terraform import aws_rekognition_stream_processor.example my-stream ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/resiliencehub_resiliency_policy.html.markdown b/website/docs/cdktf/python/r/resiliencehub_resiliency_policy.html.markdown index d32cef3f3fd7..beca9672fdc3 100644 --- a/website/docs/cdktf/python/r/resiliencehub_resiliency_policy.html.markdown +++ b/website/docs/cdktf/python/r/resiliencehub_resiliency_policy.html.markdown @@ -30,28 +30,28 @@ class MyConvertedCode(TerraformStack): data_location_constraint="AnyLocation", description="testexample", name="testexample", - policy=[{ - "az": [{ - "rpo": "24h", - "rto": "24h" - } + policy=[ResiliencehubResiliencyPolicyPolicy( + az=[ResiliencehubResiliencyPolicyPolicyAz( + rpo="24h", + rto="24h" + ) ], - "hardware": [{ - "rpo": "24h", - "rto": "24h" - } + hardware=[ResiliencehubResiliencyPolicyPolicyHardware( + rpo="24h", + rto="24h" + ) ], - "region": [{ - "rpo": "24h", - "rto": "24h" - } + region=[ResiliencehubResiliencyPolicyPolicyRegion( + rpo="24h", + rto="24h" + ) ], - "software_attribute": [{ - "rpo": "24h", - "rto": "24h" - } + software_attribute=[ResiliencehubResiliencyPolicyPolicySoftware( + rpo="24h", + rto="24h" + ) ] - } + ) ], tier="NonCritical" ) @@ -70,6 +70,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` (String) Description of Resiliency Policy. * `data_location_constraint` (String) Data Location Constraint of the Policy. Valid values are `AnyLocation`, `SameContinent`, and `SameCountry`. @@ -85,6 +86,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `region` - (Attributes) Specifies Region failure policy. [`policy.region`](#policyregion) ### `policy.az` @@ -164,4 +166,4 @@ Using `terraform import`, import Resilience Hub Resiliency Policy using the `arn % terraform import aws_resiliencehub_resiliency_policy.example arn:aws:resiliencehub:us-east-1:123456789012:resiliency-policy/8c1cfa29-d1dd-4421-aa68-c9f64cced4c2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/resourceexplorer2_index.html.markdown b/website/docs/cdktf/python/r/resourceexplorer2_index.html.markdown index c1512149ae8e..4856bdfbf04e 100644 --- a/website/docs/cdktf/python/r/resourceexplorer2_index.html.markdown +++ b/website/docs/cdktf/python/r/resourceexplorer2_index.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `type` - (Required) The type of the index. Valid values: `AGGREGATOR`, `LOCAL`. To understand the difference between `LOCAL` and `AGGREGATOR`, see the [_AWS Resource Explorer User Guide_](https://docs.aws.amazon.com/resource-explorer/latest/userguide/manage-aggregator-region.html). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -55,6 +56,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_resourceexplorer2_index.example + identity = { + "arn" = "arn:aws:resource-explorer-2:us-east-1:123456789012:index/example-index-id" + } +} + +resource "aws_resourceexplorer2_index" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Resource Explorer index. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Resource Explorer indexes using the `arn`. For example: ```python @@ -78,4 +100,4 @@ Using `terraform import`, import Resource Explorer indexes using the `arn`. For % terraform import aws_resourceexplorer2_index.example arn:aws:resource-explorer-2:us-east-1:123456789012:index/6047ac4e-207e-4487-9bcf-cb53bb0ff5cc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/resourceexplorer2_view.html.markdown b/website/docs/cdktf/python/r/resourceexplorer2_view.html.markdown index 4f5af2bf1d68..a435a4aa1be1 100644 --- a/website/docs/cdktf/python/r/resourceexplorer2_view.html.markdown +++ b/website/docs/cdktf/python/r/resourceexplorer2_view.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_view` - (Optional) Specifies whether the view is the [_default view_](https://docs.aws.amazon.com/resource-explorer/latest/userguide/manage-views-about.html#manage-views-about-default) for the AWS Region. Default: `false`. * `filters` - (Optional) Specifies which resources are included in the results of queries made using this view. See [Filters](#filters) below for more details. * `included_property` - (Optional) Optional fields to be included in search results from this view. See [Included Properties](#included-properties) below for more details. @@ -78,6 +79,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_resourceexplorer2_view.example + identity = { + "arn" = "arn:aws:resource-explorer-2:us-east-1:123456789012:view/example-view/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_resourceexplorer2_view" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Resource Explorer view. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Resource Explorer views using the `arn`. For example: ```python @@ -101,4 +123,4 @@ Using `terraform import`, import Resource Explorer views using the `arn`. For ex % terraform import aws_resourceexplorer2_view.example arn:aws:resource-explorer-2:us-west-2:123456789012:view/exampleview/e0914f6c-6c27-4b47-b5d4-6b28381a2421 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/resourcegroups_group.html.markdown b/website/docs/cdktf/python/r/resourcegroups_group.html.markdown index c6777a484369..7eb1c38c362c 100644 --- a/website/docs/cdktf/python/r/resourcegroups_group.html.markdown +++ b/website/docs/cdktf/python/r/resourcegroups_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The resource group's name. A resource group name can have a maximum of 127 characters, including letters, numbers, hyphens, dots, and underscores. The name cannot start with `AWS` or `aws`. * `configuration` - (Optional) A configuration associates the resource group with an AWS service and specifies how the service can interact with the resources in the group. See below for details. * `description` - (Optional) A description of the resource group. @@ -91,4 +92,4 @@ Using `terraform import`, import resource groups using the `name`. For example: % terraform import aws_resourcegroups_group.foo resource-group-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/resourcegroups_resource.html.markdown b/website/docs/cdktf/python/r/resourcegroups_resource.html.markdown index baa6da89b3f4..ec67152fd639 100644 --- a/website/docs/cdktf/python/r/resourcegroups_resource.html.markdown +++ b/website/docs/cdktf/python/r/resourcegroups_resource.html.markdown @@ -51,8 +51,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_arn` - (Required) Name or ARN of the resource group to add resources to. * `resource_arn` - (Required) ARN of the resource to be added to the group. @@ -95,4 +96,4 @@ Using `terraform import`, import an AWS Resource Groups Resource using `group_ar % terraform import aws_resourcegroups_resource.example arn:aws:resource-groups:us-west-2:012345678901:group/example,arn:aws:lambda:us-west-2:012345678901:function:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route.html.markdown b/website/docs/cdktf/python/r/route.html.markdown index cd7681e9749a..6168fe02f824 100644 --- a/website/docs/cdktf/python/r/route.html.markdown +++ b/website/docs/cdktf/python/r/route.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `route_table_id` - (Required) The ID of the routing table. One of the following destination arguments must be supplied: @@ -118,6 +119,46 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route.example + identity = { + route_table_id = "rtb-656C65616E6F72" + destination_cidr_block = "10.42.0.0/16" + + ### OR by IPv6 CIDR block + # destination_ipv6_cidr_block = "10.42.0.0/16" + + ### OR by prefix list ID + # destination_prefix_list_id = "pl-0570a1d2d725c16be" + } +} + +resource "aws_route" "example" { + route_table_id = "rtb-656C65616E6F72" + destination_cidr_block = "10.42.0.0/16" + vpc_peering_connection_id = "pcx-45ff3dc1" +} +``` + +### Identity Schema + +#### Required + +* `route_table_id` - (String) ID of the route table. + +#### Optional + +~> Exactly one of of `destination_cidr_block`, `destination_ipv6_cidr_block`, or `destination_prefix_list_id` is required. + +* `account_id` (String) AWS Account where this resource is managed. +* `destination_cidr_block` - (String) Destination IPv4 CIDR block. +* `destination_ipv6_cidr_block` - (String) Destination IPv6 CIDR block. +* `destination_prefix_list_id` - (String) Destination IPv6 CIDR block. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import individual routes using `ROUTETABLEID_DESTINATION`. Import [local routes](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html#RouteTables) using the VPC's IPv4 or IPv6 CIDR blocks. For example: Import a route in route table `rtb-656C65616E6F72` with an IPv4 destination CIDR of `10.42.0.0/16`: @@ -191,4 +232,4 @@ Import a route in route table `rtb-656C65616E6F72` with a managed prefix list de % terraform import aws_route.my_route rtb-656C65616E6F72_pl-0570a1d2d725c16be ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_record.html.markdown b/website/docs/cdktf/python/r/route53_record.html.markdown index 94b456031bc4..94bc908f4995 100644 --- a/website/docs/cdktf/python/r/route53_record.html.markdown +++ b/website/docs/cdktf/python/r/route53_record.html.markdown @@ -312,6 +312,36 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_record.example + identity = { + zone_id = "Z4KAPRWWNC7JR" + name = "dev.example.com" + type = "NS" + } +} + +resource "aws_route53_record" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `zone_id` (String) Hosted zone ID for the record. +* `name` (String) Name of the record. +* `type` (String) Record type. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `set_identifier` (String) Set identifier for the record. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Records using the ID of the record, record name, record type, and set identifier. For example: Using the ID of the record, which is the zone identifier, record name, and record type, separated by underscores (`_`): @@ -328,7 +358,7 @@ from imports.aws.route53_record import Route53Record class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Route53Record.generate_config_for_import(self, "myrecord", "Z4KAPRWWNC7JR_dev.example.com_NS") + Route53Record.generate_config_for_import(self, "example", "Z4KAPRWWNC7JR_dev.example.com_NS") ``` If the record also contains a set identifier, append it: @@ -345,7 +375,7 @@ from imports.aws.route53_record import Route53Record class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Route53Record.generate_config_for_import(self, "myrecord", "Z4KAPRWWNC7JR_dev.example.com_NS_dev") + Route53Record.generate_config_for_import(self, "example", "Z4KAPRWWNC7JR_dev.example.com_NS_dev") ``` If the record name is the empty string, it can be omitted: @@ -362,7 +392,7 @@ from imports.aws.route53_record import Route53Record class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Route53Record.generate_config_for_import(self, "myrecord", "Z4KAPRWWNC7JR__NS") + Route53Record.generate_config_for_import(self, "example", "Z4KAPRWWNC7JR__NS") ``` **Using `terraform import` to import** Route53 Records using the ID of the record, record name, record type, and set identifier. For example: @@ -370,13 +400,13 @@ class MyConvertedCode(TerraformStack): Using the ID of the record, which is the zone identifier, record name, and record type, separated by underscores (`_`): ```console -% terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev_NS +% terraform import aws_route53_record.example Z4KAPRWWNC7JR_dev_NS ``` If the record also contains a set identifier, append it: ```console -% terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev_NS_dev +% terraform import aws_route53_record.example Z4KAPRWWNC7JR_dev_NS_dev ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_config.html.markdown b/website/docs/cdktf/python/r/route53_resolver_config.html.markdown index a5583b43f6fb..ceaec0cf67d1 100644 --- a/website/docs/cdktf/python/r/route53_resolver_config.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_config.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) The ID of the VPC that the configuration is for. * `autodefined_reverse_flag` - (Required) Indicates whether or not the Resolver will create autodefined rules for reverse DNS lookups. Valid values: `ENABLE`, `DISABLE`. @@ -79,4 +80,4 @@ Using `terraform import`, import Route 53 Resolver configs using the Route 53 Re % terraform import aws_route53_resolver_config.example rslvr-rc-715aa20c73a23da7 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_dnssec_config.html.markdown b/website/docs/cdktf/python/r/route53_resolver_dnssec_config.html.markdown index 538711ea2b5d..c62486399b9c 100644 --- a/website/docs/cdktf/python/r/route53_resolver_dnssec_config.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_dnssec_config.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) The ID of the virtual private cloud (VPC) that you're updating the DNSSEC validation status for. ## Attribute Reference @@ -80,4 +81,4 @@ Using `terraform import`, import Route 53 Resolver DNSSEC configs using the Rou % terraform import aws_route53_resolver_dnssec_config.example rdsc-be1866ecc1683e95 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_endpoint.html.markdown b/website/docs/cdktf/python/r/route53_resolver_endpoint.html.markdown index 915afaa68dbd..0d63497a34c6 100644 --- a/website/docs/cdktf/python/r/route53_resolver_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_endpoint.html.markdown @@ -49,9 +49,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `direction` - (Required) Direction of DNS queries to or from the Route 53 Resolver endpoint. -Valid values are `INBOUND` (resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC) -or `OUTBOUND` (resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC). +Valid values are `INBOUND` (resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC), `OUTBOUND` (resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC) or `INBOUND_DELEGATION` (resolver delegates queries to Route 53 private hosted zones from your network). * `ip_address` - (Required) Subnets and IP addresses in your VPC that you want DNS queries to pass through on the way from your VPCs to your network (for outbound endpoints) or on the way from your network to your VPCs (for inbound endpoints). Described below. * `name` - (Optional) Friendly name of the Route 53 Resolver endpoint. @@ -110,4 +110,4 @@ Using `terraform import`, import Route 53 Resolver endpoints using the Route 53 % terraform import aws_route53_resolver_endpoint.foo rslvr-in-abcdef01234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_firewall_config.html.markdown b/website/docs/cdktf/python/r/route53_resolver_firewall_config.html.markdown index b6c5f4c2da09..5522280e11f4 100644 --- a/website/docs/cdktf/python/r/route53_resolver_firewall_config.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_firewall_config.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) The ID of the VPC that the configuration is for. * `firewall_fail_open` - (Required) Determines how Route 53 Resolver handles queries during failures, for example when all traffic that is sent to DNS Firewall fails to receive a reply. By default, fail open is disabled, which means the failure mode is closed. This approach favors security over availability. DNS Firewall blocks queries that it is unable to evaluate properly. If you enable this option, the failure mode is open. This approach favors availability over security. DNS Firewall allows queries to proceed if it is unable to properly evaluate them. Valid values: `ENABLED`, `DISABLED`. @@ -80,4 +81,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall configs using th % terraform import aws_route53_resolver_firewall_config.example rdsc-be1866ecc1683e95 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_firewall_domain_list.html.markdown b/website/docs/cdktf/python/r/route53_resolver_firewall_domain_list.html.markdown index 052144b381e1..37f63f0ebff5 100644 --- a/website/docs/cdktf/python/r/route53_resolver_firewall_domain_list.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_firewall_domain_list.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the domain list, to manage and use it. * `domains` - (Optional) A array of domains for the firewall domain list. * `tags` - (Optional) A map of tags to assign to the resource. f configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -72,4 +73,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall domain lists us % terraform import aws_route53_resolver_firewall_domain_list.example rslvr-fdl-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown b/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown index e4c32de9aa43..c732e862b709 100644 --- a/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the rule, to manage and use it. * `action` - (Required) The action that DNS Firewall should take on a DNS query when it matches one of the domains in the rule's domain list. Valid values: `ALLOW`, `BLOCK`, `ALERT`. * `block_override_dns_type` - (Required if `block_response` is `OVERRIDE`) The DNS record's type. This determines the format of the record value that you provided in BlockOverrideDomain. Value values: `CNAME`. @@ -103,4 +104,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rules using the % terraform import aws_route53_resolver_firewall_rule.example rslvr-frg-0123456789abcdef:rslvr-fdl-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group.html.markdown b/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group.html.markdown index 3edffd4506d1..2b9450c0d3dc 100644 --- a/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the rule group, to manage and use it. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -73,4 +74,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rule groups usi % terraform import aws_route53_resolver_firewall_rule_group.example rslvr-frg-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group_association.html.markdown b/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group_association.html.markdown index f13fd9bc34d7..8a492e67aa17 100644 --- a/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group_association.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_firewall_rule_group_association.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the rule group association, to manage and use it. * `firewall_rule_group_id` - (Required) The unique identifier of the firewall rule group. * `mutation_protection` - (Optional) If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`. @@ -85,4 +86,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rule group assoc % terraform import aws_route53_resolver_firewall_rule_group_association.example rslvr-frgassoc-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_query_log_config.html.markdown b/website/docs/cdktf/python/r/route53_resolver_query_log_config.html.markdown index 1e82117b1d47..875bd201e81e 100644 --- a/website/docs/cdktf/python/r/route53_resolver_query_log_config.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_query_log_config.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination_arn` - (Required) The ARN of the resource that you want Route 53 Resolver to send query logs. You can send query logs to an [S3 bucket](s3_bucket.html), a [CloudWatch Logs log group](cloudwatch_log_group.html), or a [Kinesis Data Firehose delivery stream](kinesis_firehose_delivery_stream.html). * `name` - (Required) The name of the Route 53 Resolver query logging configuration. @@ -81,4 +82,4 @@ Using `terraform import`, import Route 53 Resolver query logging configurations % terraform import aws_route53_resolver_query_log_config.example rqlc-92edc3b1838248bf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_query_log_config_association.html.markdown b/website/docs/cdktf/python/r/route53_resolver_query_log_config_association.html.markdown index e8d20d1d03d7..83c0cfff1a55 100644 --- a/website/docs/cdktf/python/r/route53_resolver_query_log_config_association.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_query_log_config_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolver_query_log_config_id` - (Required) The ID of the [Route 53 Resolver query logging configuration](route53_resolver_query_log_config.html) that you want to associate a VPC with. * `resource_id` - (Required) The ID of a VPC that you want this query logging configuration to log queries for. @@ -70,4 +71,4 @@ Using `terraform import`, import Route 53 Resolver query logging configuration % terraform import aws_route53_resolver_query_log_config_association.example rqlca-b320624fef3c4d70 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_rule.html.markdown b/website/docs/cdktf/python/r/route53_resolver_rule.html.markdown index fa392a671702..9f5808e53c69 100644 --- a/website/docs/cdktf/python/r/route53_resolver_rule.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_rule.html.markdown @@ -96,6 +96,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Required) DNS queries for this domain name are forwarded to the IP addresses that are specified using `target_ip`. * `rule_type` - (Required) Rule type. Valid values are `FORWARD`, `SYSTEM` and `RECURSIVE`. * `name` - (Optional) Friendly name that lets you easily find a rule in the Resolver dashboard in the Route 53 console. @@ -125,6 +126,32 @@ Values are `NOT_SHARED`, `SHARED_BY_ME` or `SHARED_WITH_ME` ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_resolver_rule.example + identity = { + id = "rslvr-rr-0123456789abcdef0" + } +} + +resource "aws_route53_resolver_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the Route53 Resolver rule. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Resolver rules using the `id`. For example: ```python @@ -139,13 +166,13 @@ from imports.aws.route53_resolver_rule import Route53ResolverRule class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Route53ResolverRule.generate_config_for_import(self, "sys", "rslvr-rr-0123456789abcdef0") + Route53ResolverRule.generate_config_for_import(self, "example", "rslvr-rr-0123456789abcdef0") ``` Using `terraform import`, import Route53 Resolver rules using the `id`. For example: ```console -% terraform import aws_route53_resolver_rule.sys rslvr-rr-0123456789abcdef0 +% terraform import aws_route53_resolver_rule.example rslvr-rr-0123456789abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_rule_association.html.markdown b/website/docs/cdktf/python/r/route53_resolver_rule_association.html.markdown index 96881904f96c..3b84d4e27532 100644 --- a/website/docs/cdktf/python/r/route53_resolver_rule_association.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_rule_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolver_rule_id` - (Required) The ID of the resolver rule that you want to associate with the VPC. * `vpc_id` - (Required) The ID of the VPC that you want to associate the resolver rule with. * `name` - (Optional) A name for the association that you're creating between a resolver rule and a VPC. @@ -48,6 +49,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_resolver_rule_association.example + identity = { + id = "rslvr-rrassoc-97242eaf88example" + } +} + +resource "aws_route53_resolver_rule_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the Route53 Resolver rule association. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Resolver rule associations using the `id`. For example: ```python @@ -71,4 +98,4 @@ Using `terraform import`, import Route53 Resolver rule associations using the `i % terraform import aws_route53_resolver_rule_association.example rslvr-rrassoc-97242eaf88example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_zone.html.markdown b/website/docs/cdktf/python/r/route53_zone.html.markdown index 94948a30fa44..485c41776d21 100644 --- a/website/docs/cdktf/python/r/route53_zone.html.markdown +++ b/website/docs/cdktf/python/r/route53_zone.html.markdown @@ -85,13 +85,26 @@ from cdktf import TerraformStack # See https://cdk.tf/provider-generation for more details. # from imports.aws.route53_zone import Route53Zone +from imports.aws.vpc import Vpc class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) + primary = Vpc(self, "primary", + cidr_block="10.6.0.0/16", + enable_dns_hostnames=True, + enable_dns_support=True + ) + secondary = Vpc(self, "secondary", + cidr_block="10.7.0.0/16", + enable_dns_hostnames=True, + enable_dns_support=True + ) Route53Zone(self, "private", name="example.com", vpc=[Route53ZoneVpc( - vpc_id=example.id + vpc_id=primary.id + ), Route53ZoneVpc( + vpc_id=secondary.id ) ] ) @@ -157,4 +170,4 @@ Using `terraform import`, import Route53 Zones using the zone `id`. For example: % terraform import aws_route53_zone.myzone Z1D633PJN98FT9 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53profiles_association.html.markdown b/website/docs/cdktf/python/r/route53profiles_association.html.markdown index cfcc1757b28c..21c96be48f07 100644 --- a/website/docs/cdktf/python/r/route53profiles_association.html.markdown +++ b/website/docs/cdktf/python/r/route53profiles_association.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Profile Association. Must match a regex of `(?!^[0-9]+$)([a-zA-Z0-9\\-_' ']+)`. * `profile_id` - (Required) ID of the profile associated with the VPC. * `resource_id` - (Required) Resource ID of the VPC the profile to be associated with. @@ -95,10 +96,10 @@ class MyConvertedCode(TerraformStack): Route53ProfilesAssociation.generate_config_for_import(self, "example", "rpa-id-12345678") ``` -Using `terraform import`, import Route 53 Profiles Association using the `example_id_arg`. For example: +Using `terraform import`, import Route 53 Profiles Association using the `id`. For example: ```console % terraform import aws_route53profiles_association.example rpa-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53profiles_profile.html.markdown b/website/docs/cdktf/python/r/route53profiles_profile.html.markdown index 109444782213..b4b594e80304 100644 --- a/website/docs/cdktf/python/r/route53profiles_profile.html.markdown +++ b/website/docs/cdktf/python/r/route53profiles_profile.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Profile. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -66,7 +67,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route 53 Profiles Profile using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route 53 Profiles Profile using the `id`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -83,10 +84,10 @@ class MyConvertedCode(TerraformStack): Route53ProfilesProfile.generate_config_for_import(self, "example", "rp-12345678") ``` -Using `terraform import`, import Route 53 Profiles Profile using the `example`. For example: +Using `terraform import`, import Route 53 Profiles Profile using the `id`. For example: ```console % terraform import aws_route53profiles_profile.example rp-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53profiles_resource_association.html.markdown b/website/docs/cdktf/python/r/route53profiles_resource_association.html.markdown index 7ec2e257754c..c48027791bad 100644 --- a/website/docs/cdktf/python/r/route53profiles_resource_association.html.markdown +++ b/website/docs/cdktf/python/r/route53profiles_resource_association.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Profile Resource Association. * `profile_id` - (Required) ID of the profile associated with the VPC. * `resource_arn` - (Required) Resource ID of the resource to be associated with the profile. @@ -104,10 +105,10 @@ class MyConvertedCode(TerraformStack): Route53ProfilesResourceAssociation.generate_config_for_import(self, "example", "rpa-id-12345678") ``` -Using `terraform import`, import Route 53 Profiles Resource Association using the `example_id_arg`. For example: +Using `terraform import`, import Route 53 Profiles Resource Association using the `id`. For example: ```console % terraform import aws_route53profiles_resource_association.example rpa-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53recoverycontrolconfig_cluster.html.markdown b/website/docs/cdktf/python/r/route53recoverycontrolconfig_cluster.html.markdown index 94a7ab20a5a7..b1995e95c35e 100644 --- a/website/docs/cdktf/python/r/route53recoverycontrolconfig_cluster.html.markdown +++ b/website/docs/cdktf/python/r/route53recoverycontrolconfig_cluster.html.markdown @@ -33,9 +33,10 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: * `name` - (Required) Unique name describing the cluster. +* `network_type` - (Optional) Network type of cluster. Valid values are `IPV4` and `DUALSTACK`. Defaults to `IPV4`. ## Attribute Reference @@ -75,4 +76,4 @@ Using `terraform import`, import Route53 Recovery Control Config cluster using t % terraform import aws_route53recoverycontrolconfig_cluster.mycluster arn:aws:route53-recovery-control::313517334327:cluster/f9ae13be-a11e-4ec7-8522-94a70468e6ea ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route_table.html.markdown b/website/docs/cdktf/python/r/route_table.html.markdown index a1a4151a21a5..c922d84dfd52 100644 --- a/website/docs/cdktf/python/r/route_table.html.markdown +++ b/website/docs/cdktf/python/r/route_table.html.markdown @@ -169,6 +169,7 @@ The target could then be updated again back to `local`. This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The VPC ID. * `route` - (Optional) A list of route objects. Their keys are documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). This means that omitting this argument is interpreted as ignoring any existing routes. To remove all managed routes an empty list should be specified. See the example above. @@ -222,6 +223,32 @@ attribute once the route resource is created. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route_table.example + identity = { + id = "rtb-4e616f6d69" + } +} + +resource "aws_route_table" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the routing table. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route Tables using the route table `id`. For example: ```python @@ -245,4 +272,4 @@ Using `terraform import`, import Route Tables using the route table `id`. For ex % terraform import aws_route_table.public_rt rtb-4e616f6d69 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route_table_association.html.markdown b/website/docs/cdktf/python/r/route_table_association.html.markdown index 07d8c005653c..2ec8b26d8c3e 100644 --- a/website/docs/cdktf/python/r/route_table_association.html.markdown +++ b/website/docs/cdktf/python/r/route_table_association.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnet_id` - (Optional) The subnet ID to create an association. Conflicts with `gateway_id`. * `gateway_id` - (Optional) The gateway ID to create an association. Conflicts with `subnet_id`. * `route_table_id` - (Required) The ID of the routing table to associate with. @@ -129,4 +130,4 @@ With EC2 Internet Gateways: % terraform import aws_route_table_association.assoc igw-01b3a60780f8d034a/rtb-656c65616e6f72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rum_app_monitor.html.markdown b/website/docs/cdktf/python/r/rum_app_monitor.html.markdown index b75ea55ded6b..ad3285ab79b4 100644 --- a/website/docs/cdktf/python/r/rum_app_monitor.html.markdown +++ b/website/docs/cdktf/python/r/rum_app_monitor.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the log stream. * `app_monitor_configuration` - (Optional) configuration data for the app monitor. See [app_monitor_configuration](#app_monitor_configuration) below. * `cw_log_enabled` - (Optional) Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM sends a copy of this telemetry data to Amazon CloudWatch Logs in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur Amazon CloudWatch Logs charges. Default value is `false`. @@ -95,4 +96,4 @@ Using `terraform import`, import Cloudwatch RUM App Monitor using the `name`. Fo % terraform import aws_rum_app_monitor.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rum_metrics_destination.html.markdown b/website/docs/cdktf/python/r/rum_metrics_destination.html.markdown index 21cd2b0e7828..a34c65ee3b1a 100644 --- a/website/docs/cdktf/python/r/rum_metrics_destination.html.markdown +++ b/website/docs/cdktf/python/r/rum_metrics_destination.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_monitor_name` - (Required) The name of the CloudWatch RUM app monitor that will send the metrics. * `destination` - (Required) Defines the destination to send the metrics to. Valid values are `CloudWatch` and `Evidently`. If you specify `Evidently`, you must also specify the ARN of the CloudWatchEvidently experiment that is to be the destination and an IAM role that has permission to write to the experiment. * `destination_arn` - (Optional) Use this parameter only if Destination is Evidently. This parameter specifies the ARN of the Evidently experiment that will receive the extended metrics. @@ -72,4 +73,4 @@ Using `terraform import`, import Cloudwatch RUM Metrics Destination using the `i % terraform import aws_rum_metrics_destination.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_access_point.html.markdown b/website/docs/cdktf/python/r/s3_access_point.html.markdown index 4da83865fa89..bb02d3c099f8 100644 --- a/website/docs/cdktf/python/r/s3_access_point.html.markdown +++ b/website/docs/cdktf/python/r/s3_access_point.html.markdown @@ -129,6 +129,8 @@ The following arguments are optional: * `bucket_account_id` - (Optional) AWS account ID associated with the S3 bucket associated with this access point. * `policy` - (Optional) Valid JSON document that specifies the policy that you want to apply to this access point. Removing `policy` from your configuration or setting `policy` to null or an empty string (i.e., `policy = ""`) _will not_ delete the policy since it could have been set by `aws_s3control_access_point_policy`. To remove the `policy`, set it to `"{}"` (an empty JSON document). * `public_access_block_configuration` - (Optional) Configuration block to manage the `PublicAccessBlock` configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_configuration` - (Optional) Configuration block to restrict access to this access point to requests from the specified Virtual Private Cloud (VPC). Required for S3 on Outposts. Detailed below. ### public_access_block_configuration Configuration Block @@ -164,6 +166,7 @@ Note: S3 access points only support secure access by HTTPS. HTTP isn't supported * `has_public_access_policy` - Indicates whether this access point currently has a policy that allows public access. * `id` - For Access Point of an AWS Partition S3 Bucket, the AWS account ID and access point name separated by a colon (`:`). For S3 on Outposts Bucket, the ARN of the Access Point. * `network_origin` - Indicates whether this access point allows access from the public Internet. Values are `VPC` (the access point doesn't allow access from the public Internet) and `Internet` (the access point allows access from the public Internet, subject to the access point and bucket access policies). +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -217,4 +220,4 @@ Import using the ARN for Access Points associated with an S3 on Outposts Bucket: % terraform import aws_s3_access_point.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-1234567890123456/accesspoint/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket.html.markdown b/website/docs/cdktf/python/r/s3_bucket.html.markdown index 1d1551ab6a84..4a9e22de71ed 100644 --- a/website/docs/cdktf/python/r/s3_bucket.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Optional, Forces new resource) Name of the bucket. If omitted, Terraform will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). The name must not be in the format `[bucket_name]--[azid]--x-s3`. Use the [`aws_s3_directory_bucket`](s3_directory_bucket.html) resource to manage S3 Express buckets. * `bucket_prefix` - (Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). * `force_destroy` - (Optional, Default:`false`) Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. @@ -318,9 +319,9 @@ This resource exports the following attributes in addition to the arguments abov * `id` - Name of the bucket. * `arn` - ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucket_domain_name` - Bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. +* `bucket_region` - AWS region this bucket resides in. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name. Please refer to the [S3 endpoints reference](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) for format. Note: AWS CloudFront allows specifying an S3 region-specific endpoint when creating an S3 origin. This will prevent redirect issues from CloudFront to the S3 Origin URL. For more information, see the [Virtual Hosted-Style Requests for Other Regions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#deprecated-global-endpoint) section in the AWS S3 User Guide. * `hosted_zone_id` - [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `region` - AWS region this bucket resides in. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `website_endpoint` - (**Deprecated**) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) instead. * `website_domain` - (**Deprecated**) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) instead. @@ -336,6 +337,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) Name of the S3 bucket. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket using the `bucket`. For example: ```python @@ -350,13 +377,13 @@ from imports.aws.s3_bucket import S3Bucket class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - S3Bucket.generate_config_for_import(self, "bucket", "bucket-name") + S3Bucket.generate_config_for_import(self, "example", "bucket-name") ``` Using `terraform import`, import S3 bucket using the `bucket`. For example: ```console -% terraform import aws_s3_bucket.bucket bucket-name +% terraform import aws_s3_bucket.example bucket-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_accelerate_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_accelerate_configuration.html.markdown index 72337434564f..3c7eaea6cf81 100644 --- a/website/docs/cdktf/python/r/s3_bucket_accelerate_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_accelerate_configuration.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `status` - (Required) Transfer acceleration state of the bucket. Valid values: `Enabled`, `Suspended`. @@ -104,4 +105,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_accelerate_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_acl.html.markdown b/website/docs/cdktf/python/r/s3_bucket_acl.html.markdown index f121968f4f6a..ef8176357d52 100644 --- a/website/docs/cdktf/python/r/s3_bucket_acl.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_acl.html.markdown @@ -164,6 +164,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional, either `access_control_policy` or `acl` is required) Specifies the Canned ACL to apply to the bucket. Valid values: `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, `bucket-owner-full-control`, `log-delivery-write`. Full details are available on the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). * `access_control_policy` - (Optional, either `access_control_policy` or `acl` is required) Configuration block that sets the ACL permissions for an object per grantee. [See below](#access_control_policy). * `bucket` - (Required, Forces new resource) Bucket to which to apply the ACL. @@ -207,6 +208,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_acl.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_acl" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `acl` (String) Canned ACL to apply to the bucket. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket ACL using `bucket`, `expected_bucket_owner`, and/or `acl`, depending on your situation. For example: If the owner (account ID) of the source bucket is the _same_ account used to configure the Terraform AWS Provider, and the source bucket is **not configured** with a @@ -309,4 +338,4 @@ If the owner (account ID) of the source bucket _differs_ from the account used t [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_analytics_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_analytics_configuration.html.markdown index 54040cdb06f4..771d06be68f5 100644 --- a/website/docs/cdktf/python/r/s3_bucket_analytics_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_analytics_configuration.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket this analytics configuration is associated with. * `name` - (Required) Unique identifier of the analytics configuration for the bucket. * `filter` - (Optional) Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). @@ -146,4 +147,4 @@ Using `terraform import`, import S3 bucket analytics configurations using `bucke % terraform import aws_s3_bucket_analytics_configuration.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_cors_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_cors_configuration.html.markdown index df35013780de..083620ffaae3 100644 --- a/website/docs/cdktf/python/r/s3_bucket_cors_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_cors_configuration.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `cors_rule` - (Required) Set of origins and methods (cross-origin access that you want to allow). [See below](#cors_rule). You can configure up to 100 rules. @@ -79,6 +80,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_cors_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_cors_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket CORS configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -129,4 +157,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_cors_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_intelligent_tiering_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_intelligent_tiering_configuration.html.markdown index 21ab6e128a26..208a02db7697 100644 --- a/website/docs/cdktf/python/r/s3_bucket_intelligent_tiering_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_intelligent_tiering_configuration.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket this intelligent tiering configuration is associated with. * `name` - (Required) Unique name used to identify the S3 Intelligent-Tiering configuration for the bucket. * `status` - (Optional) Specifies the status of the configuration. Valid values: `Enabled`, `Disabled`. @@ -134,4 +135,4 @@ Using `terraform import`, import S3 bucket intelligent tiering configurations us % terraform import aws_s3_bucket_intelligent_tiering_configuration.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_inventory.html.markdown b/website/docs/cdktf/python/r/s3_bucket_inventory.html.markdown index 80cc1ea9ffb7..5c5d1ee162c6 100644 --- a/website/docs/cdktf/python/r/s3_bucket_inventory.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_inventory.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the source bucket that inventory lists the objects for. * `name` - (Required) Unique identifier of the inventory configuration for the bucket. * `included_object_versions` - (Required) Object versions to include in the inventory list. Valid values: `All`, `Current`. @@ -167,4 +168,4 @@ Using `terraform import`, import S3 bucket inventory configurations using `bucke % terraform import aws_s3_bucket_inventory.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_lifecycle_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_lifecycle_configuration.html.markdown index eb73fbe544b3..5aa5180e4af3 100644 --- a/website/docs/cdktf/python/r/s3_bucket_lifecycle_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_lifecycle_configuration.html.markdown @@ -436,6 +436,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the source S3 bucket you want Amazon S3 to monitor. * `expected_bucket_owner` - (Optional) Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error. * `rule` - (Required) List of configuration blocks describing the rules managing the replication. [See below](#rule). @@ -594,4 +595,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_lifecycle_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_logging.html.markdown b/website/docs/cdktf/python/r/s3_bucket_logging.html.markdown index 2b233a21506c..ee88d5becd91 100644 --- a/website/docs/cdktf/python/r/s3_bucket_logging.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_logging.html.markdown @@ -20,6 +20,73 @@ to decide which method meets your requirements. ## Example Usage +### Grant permission by using bucket policy + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_logging import S3BucketLoggingA +from imports.aws.s3_bucket_policy import S3BucketPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = S3Bucket(self, "example", + bucket="example-bucket" + ) + logging = S3Bucket(self, "logging", + bucket="access-logging-bucket" + ) + aws_s3_bucket_logging_example = S3BucketLoggingA(self, "example_2", + bucket=example.bucket, + target_bucket=logging.bucket, + target_object_key_format=S3BucketLoggingTargetObjectKeyFormat( + partitioned_prefix=S3BucketLoggingTargetObjectKeyFormatPartitionedPrefix( + partition_date_source="EventTime" + ) + ), + target_prefix="log/" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_logging_example.override_logical_id("example") + current = DataAwsCallerIdentity(self, "current") + logging_bucket_policy = DataAwsIamPolicyDocument(self, "logging_bucket_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:PutObject"], + condition=[DataAwsIamPolicyDocumentStatementCondition( + test="StringEquals", + values=[Token.as_string(current.account_id)], + variable="aws:SourceAccount" + ) + ], + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["logging.s3.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + logging.arn + "}/*"] + ) + ] + ) + aws_s3_bucket_policy_logging = S3BucketPolicy(self, "logging_5", + bucket=logging.bucket, + policy=Token.as_string(logging_bucket_policy.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_logging.override_logical_id("logging") +``` + +### Grant permission by using bucket ACL + +The [AWS Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html) does not recommend using the ACL. + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -63,6 +130,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `target_bucket` - (Required) Name of the bucket where you want Amazon S3 to store server access logs. @@ -90,8 +158,8 @@ The `grantee` configuration block supports the following arguments: The `target_object_key_format` configuration block supports the following arguments: -* `partitioned_prefix` - (Optional) Partitioned S3 key for log objects. [See below](#partitioned_prefix). -* `simple_prefix` - (Optional) Use the simple format for S3 keys for log objects. To use, set `simple_prefix {}`. +* `partitioned_prefix` - (Optional) Partitioned S3 key for log objects, in the form `[target_prefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]`. Conflicts with `simple_prefix`. [See below](#partitioned_prefix). +* `simple_prefix` - (Optional) Use the simple format for S3 keys for log objects, in the form `[target_prefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]`. To use, set `simple_prefix {}`. Conflicts with `partitioned_prefix`. ### partitioned_prefix @@ -107,6 +175,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_logging.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_logging" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket logging using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -157,4 +252,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_logging.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_metadata_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_metadata_configuration.html.markdown new file mode 100644 index 000000000000..bb5fa4c1dc75 --- /dev/null +++ b/website/docs/cdktf/python/r/s3_bucket_metadata_configuration.html.markdown @@ -0,0 +1,168 @@ +--- +subcategory: "S3 (Simple Storage)" +layout: "aws" +page_title: "AWS: aws_s3_bucket_metadata_configuration" +description: |- + Manages Amazon S3 Metadata for a bucket. +--- + + + +# Resource: aws_s3_bucket_metadata_configuration + +Manages Amazon S3 Metadata for a bucket. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.s3_bucket_metadata_configuration import S3BucketMetadataConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + S3BucketMetadataConfiguration(self, "example", + bucket=Token.as_string(aws_s3_bucket_example.bucket), + metadata_configuration=[S3BucketMetadataConfigurationMetadataConfiguration( + inventory_table_configuration=[S3BucketMetadataConfigurationMetadataConfigurationInventoryTableConfiguration( + configuration_state="ENABLED" + ) + ], + journal_table_configuration=[S3BucketMetadataConfigurationMetadataConfigurationJournalTableConfiguration( + record_expiration=[S3BucketMetadataConfigurationMetadataConfigurationJournalTableConfigurationRecordExpiration( + days=7, + expiration="ENABLED" + ) + ] + ) + ] + ) + ] + ) +``` + +## Argument Reference + +The following arguments are required: + +* `bucket` - (Required) General purpose bucket that you want to create the metadata configuration for. +* `metadata_configuration` - (Required) Metadata configuration. See [`metadata_configuration` Block](#metadata_configuration-block) for details. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `metadata_configuration` Block + +The `metadata_configuration` configuration block supports the following arguments: + +* `inventory_table_configuration` - (Required) Inventory table configuration. See [`inventory_table_configuration` Block](#inventory_table_configuration-block) for details. +* `journal_table_configuration` - (Required) Journal table configuration. See [`journal_table_configuration` Block](#journal_table_configuration-block) for details. + +### `inventory_table_configuration` Block + +The `inventory_table_configuration` configuration block supports the following arguments: + +* `configuration_state` - (Required) Configuration state of the inventory table, indicating whether the inventory table is enabled or disabled. Valid values: `ENABLED`, `DISABLED`. +* `encryption_configuration` - (Optional) Encryption configuration for the inventory table. See [`encryption_configuration` Block](#encryption_configuration-block) for details. + +### `journal_table_configuration` Block + +The `journal_table_configuration` configuration block supports the following arguments: + +* `encryption_configuration` - (Optional) Encryption configuration for the journal table. See [`encryption_configuration` Block](#encryption_configuration-block) for details. +* `record_expiration` - (Required) Journal table record expiration settings. See [`record_expiration` Block](#record_expiration-block) for details. + +### `encryption_configuration` Block + +The `encryption_configuration` configuration block supports the following arguments: + +* `kms_key_arn` - (Optional) KMS key ARN when `sse_algorithm` is `aws:kms`. +* `sse_algorithm` - (Required) Encryption type for the metadata table. Valid values: `aws:kms`, `AES256`. + +### `record_expiration` Block + +The `record_expiration` configuration block supports the following arguments: + +* `days` - (Optional) Number of days to retain journal table records. +* `expiration` - (Required) Whether journal table record expiration is enabled or disabled. Valid values: `ENABLED`, `DISABLED`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `metadata_configuration.0.destination` - Destination information for the S3 Metadata configuration. + * `table_bucket_arn` - ARN of the table bucket where the metadata configuration is stored. + * `table_bucket_type` - Type of the table bucket where the metadata configuration is stored. + * `table_namespace` - Namespace in the table bucket where the metadata tables for the metadata configuration are stored. +* `metadata_configuration.0.inventory_table_configuration.0.table_arn` - Inventory table ARN. +* `metadata_configuration.0.inventory_table_configuration.0.table_name` - Inventory table name. +* `metadata_configuration.0.journal_table_configuration.0.table_arn` - Journal table ARN. +* `metadata_configuration.0.journal_table_configuration.0.table_name` - Journal table name. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket metadata configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.s3_bucket_metadata_configuration import S3BucketMetadataConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + S3BucketMetadataConfiguration.generate_config_for_import(self, "example", "bucket-name") +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expected_bucket_owner` separated by a comma (`,`): + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.s3_bucket_metadata_configuration import S3BucketMetadataConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + S3BucketMetadataConfiguration.generate_config_for_import(self, "example", "bucket-name,123456789012") +``` + +**Using `terraform import` to import** S3 bucket metadata configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```console +% terraform import aws_s3_bucket_metadata_configuration.example bucket-name +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expected_bucket_owner` separated by a comma (`,`): + +```console +% terraform import aws_s3_bucket_metadata_configuration.example bucket-name,123456789012 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_metric.html.markdown b/website/docs/cdktf/python/r/s3_bucket_metric.html.markdown index 58d63ba4c7ac..91c3ae4b855b 100644 --- a/website/docs/cdktf/python/r/s3_bucket_metric.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_metric.html.markdown @@ -111,6 +111,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to put metric configuration. * `name` - (Required) Unique identifier of the metrics configuration for the bucket. Must be less than or equal to 64 characters in length. * `filter` - (Optional) [Object filtering](http://docs.aws.amazon.com/AmazonS3/latest/dev/metrics-configurations.html#metrics-configurations-filter) that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). @@ -152,4 +153,4 @@ Using `terraform import`, import S3 bucket metric configurations using `bucket:m % terraform import aws_s3_bucket_metric.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_notification.html.markdown b/website/docs/cdktf/python/r/s3_bucket_notification.html.markdown index a307f3e0ae66..53c69279320c 100644 --- a/website/docs/cdktf/python/r/s3_bucket_notification.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_notification.html.markdown @@ -394,6 +394,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `eventbridge` - (Optional) Whether to enable Amazon EventBridge notifications. Defaults to `false`. * `lambda_function` - (Optional, Multiple) Used to configure notifications to a Lambda Function. See below. * `queue` - (Optional) Notification configuration to SQS Queue. See below. @@ -452,4 +453,4 @@ Using `terraform import`, import S3 bucket notification using the `bucket`. For % terraform import aws_s3_bucket_notification.bucket_notification bucket-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_object.html.markdown b/website/docs/cdktf/python/r/s3_bucket_object.html.markdown index 38993b7d74ee..f84194ee53dd 100644 --- a/website/docs/cdktf/python/r/s3_bucket_object.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_object.html.markdown @@ -198,6 +198,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`. * `bucket_key_enabled` - (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. * `cache_control` - (Optional) Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. @@ -238,6 +239,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_object.example + identity = { + bucket = "some-bucket-name" + key = "some/key.txt" + } +} + +resource "aws_s3_bucket_object" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. +* `key` (String) Object key. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import objects using the `id` or S3 URL. For example: Import using the `id`, which is the bucket name and the key together: @@ -288,4 +317,4 @@ Import using S3 URL syntax: % terraform import aws_s3_bucket_object.example s3://some-bucket-name/some/key.txt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown index 9eac40bb479c..ae4f5ef6edb5 100644 --- a/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `object_lock_enabled` - (Optional, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. Defaults to `Enabled`. Valid values: `Enabled`. @@ -138,4 +139,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_object_lock_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_ownership_controls.html.markdown b/website/docs/cdktf/python/r/s3_bucket_ownership_controls.html.markdown index 743ae02fabbf..751d0c6c7a0f 100644 --- a/website/docs/cdktf/python/r/s3_bucket_ownership_controls.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_ownership_controls.html.markdown @@ -44,15 +44,17 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket that you want to associate this access point with. * `rule` - (Required) Configuration block(s) with Ownership Controls rules. Detailed below. ### rule Configuration Block -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `object_ownership` - (Required) Object ownership. Valid values: `BucketOwnerPreferred`, `ObjectWriter` or `BucketOwnerEnforced` * `BucketOwnerPreferred` - Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the `bucket-owner-full-control` canned ACL. * `ObjectWriter` - Uploading account will own the object if the object is uploaded with the `bucket-owner-full-control` canned ACL. @@ -89,4 +91,4 @@ Using `terraform import`, import S3 Bucket Ownership Controls using S3 Bucket na % terraform import aws_s3_bucket_ownership_controls.example my-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_policy.html.markdown b/website/docs/cdktf/python/r/s3_bucket_policy.html.markdown index a129865bd7bc..3904535cc0ba 100644 --- a/website/docs/cdktf/python/r/s3_bucket_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_policy.html.markdown @@ -55,10 +55,13 @@ class MyConvertedCode(TerraformStack): aws_s3_bucket_policy_allow_access_from_another_account.override_logical_id("allow_access_from_another_account") ``` +-> Only one `aws_s3_bucket_policy` resource should be defined per S3 bucket. Defining multiple `aws_s3_bucket_policy` resources with different Terraform names but the same `bucket` value may result in unexpected policy overwrites. Each resource uses the `PutBucketPolicy` API, which replaces the entire existing policy without error or warning. Because Terraform treats each resource independently, the policy applied last will silently override any previously applied policy. + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to which to apply the policy. * `policy` - (Required) Text of the policy. Although this is a bucket policy rather than an IAM policy, the [`aws_iam_policy_document`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) data source may be used, so long as it specifies a principal. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Note: Bucket policies are limited to 20 KB in size. @@ -68,6 +71,32 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_policy.example + identity = { + bucket = "my-tf-test-bucket" + } +} + +resource "aws_s3_bucket_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) Name of the S3 bucket. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket policies using the bucket name. For example: ```python @@ -82,13 +111,13 @@ from imports.aws.s3_bucket_policy import S3BucketPolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - S3BucketPolicy.generate_config_for_import(self, "allowAccessFromAnotherAccount", "my-tf-test-bucket") + S3BucketPolicy.generate_config_for_import(self, "example", "my-tf-test-bucket") ``` Using `terraform import`, import S3 bucket policies using the bucket name. For example: ```console -% terraform import aws_s3_bucket_policy.allow_access_from_another_account my-tf-test-bucket +% terraform import aws_s3_bucket_policy.example my-tf-test-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_public_access_block.html.markdown b/website/docs/cdktf/python/r/s3_bucket_public_access_block.html.markdown index a7b4a78b94a2..280a966bc99c 100644 --- a/website/docs/cdktf/python/r/s3_bucket_public_access_block.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_public_access_block.html.markdown @@ -14,6 +14,8 @@ Manages S3 bucket-level Public Access Block configuration. For more information -> This resource cannot be used with S3 directory buckets. +~> Setting `skip_destroy` to `true` means that the AWS Provider will not destroy a public access block, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is not managed by Terraform and will remain in-place in your AWS account. + ## Example Usage ```python @@ -47,9 +49,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) S3 Bucket to which this Public Access Block configuration should be applied. * `block_public_acls` - (Optional) Whether Amazon S3 should block public ACLs for this bucket. Defaults to `false`. Enabling this setting does not affect existing policies or ACLs. When set to `true` causes the following behavior: - * PUT Bucket acl and PUT Object acl calls will fail if the specified ACL allows public access. + * PUT Bucket ACL and PUT Object ACL calls will fail if the specified ACL allows public access. * PUT Object calls will fail if the request includes an object ACL. * `block_public_policy` - (Optional) Whether Amazon S3 should block public bucket policies for this bucket. Defaults to `false`. Enabling this setting does not affect the existing bucket policy. When set to `true` causes Amazon S3 to: * Reject calls to PUT Bucket policy if the specified bucket policy allows public access. @@ -57,6 +60,7 @@ This resource supports the following arguments: * Ignore public ACLs on this bucket and any objects that it contains. * `restrict_public_buckets` - (Optional) Whether Amazon S3 should restrict public bucket policies for this bucket. Defaults to `false`. Enabling this setting does not affect the previously stored bucket policy, except that public and cross-account access within the public bucket policy, including non-public delegation to specific accounts, is blocked. When set to `true`: * Only the bucket owner and AWS Services can access this buckets if it has a public policy. +* `skip_destroy` - (Optional) Whether to retain the public access block upon destruction. If set to `true`, the resource is simply removed from state instead. This may be desirable in certain scenarios to prevent the removal of a public access block before deletion of the associated bucket. ## Attribute Reference @@ -89,4 +93,4 @@ Using `terraform import`, import `aws_s3_bucket_public_access_block` using the b % terraform import aws_s3_bucket_public_access_block.example my-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_replication_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_replication_configuration.html.markdown index e86b6a9acead..cbf9d26f49f1 100644 --- a/website/docs/cdktf/python/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_replication_configuration.html.markdown @@ -20,6 +20,8 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration +#### Terraform AWS Provider v5 (and below) + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -136,9 +138,134 @@ class MyConvertedCode(TerraformStack): storage_class="STANDARD" ), filter=S3BucketReplicationConfigurationRuleFilter( - prefix="foo" + prefix="example" ), - id="foobar", + id="examplerule", + status="Enabled" + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_replication_configuration_replication.override_logical_id("replication") +``` + +#### Terraform AWS Provider v6 (and above) + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.iam_policy import IamPolicy +from imports.aws.iam_role import IamRole +from imports.aws.iam_role_policy_attachment import IamRolePolicyAttachment +from imports.aws.provider import AwsProvider +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_acl import S3BucketAcl +from imports.aws.s3_bucket_replication_configuration import S3BucketReplicationConfigurationA +from imports.aws.s3_bucket_versioning import S3BucketVersioningA +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="eu-west-1" + ) + destination = S3Bucket(self, "destination", + bucket="tf-test-bucket-destination-12345" + ) + source = S3Bucket(self, "source", + bucket="tf-test-bucket-source-12345", + region="eu-central-1" + ) + S3BucketAcl(self, "source_bucket_acl", + acl="private", + bucket=source.id, + region="eu-central-1" + ) + aws_s3_bucket_versioning_destination = S3BucketVersioningA(self, "destination_4", + bucket=destination.id, + versioning_configuration=S3BucketVersioningVersioningConfiguration( + status="Enabled" + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_versioning_destination.override_logical_id("destination") + aws_s3_bucket_versioning_source = S3BucketVersioningA(self, "source_5", + bucket=source.id, + region="eu-central-1", + versioning_configuration=S3BucketVersioningVersioningConfiguration( + status="Enabled" + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_versioning_source.override_logical_id("source") + assume_role = DataAwsIamPolicyDocument(self, "assume_role", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["sts:AssumeRole"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["s3.amazonaws.com"], + type="Service" + ) + ] + ) + ] + ) + replication = DataAwsIamPolicyDocument(self, "replication", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:GetReplicationConfiguration", "s3:ListBucket"], + effect="Allow", + resources=[source.arn] + ), DataAwsIamPolicyDocumentStatement( + actions=["s3:GetObjectVersionForReplication", "s3:GetObjectVersionAcl", "s3:GetObjectVersionTagging" + ], + effect="Allow", + resources=["${" + source.arn + "}/*"] + ), DataAwsIamPolicyDocumentStatement( + actions=["s3:ReplicateObject", "s3:ReplicateDelete", "s3:ReplicateTags" + ], + effect="Allow", + resources=["${" + destination.arn + "}/*"] + ) + ] + ) + aws_iam_policy_replication = IamPolicy(self, "replication_8", + name="tf-iam-role-policy-replication-12345", + policy=Token.as_string(replication.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_policy_replication.override_logical_id("replication") + aws_iam_role_replication = IamRole(self, "replication_9", + assume_role_policy=Token.as_string(assume_role.json), + name="tf-iam-role-replication-12345" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_replication.override_logical_id("replication") + aws_iam_role_policy_attachment_replication = IamRolePolicyAttachment(self, "replication_10", + policy_arn=Token.as_string(aws_iam_policy_replication.arn), + role=Token.as_string(aws_iam_role_replication.name) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_iam_role_policy_attachment_replication.override_logical_id("replication") + aws_s3_bucket_replication_configuration_replication = + S3BucketReplicationConfigurationA(self, "replication_11", + bucket=source.id, + depends_on=[aws_s3_bucket_versioning_source], + region="eu-central-1", + role=Token.as_string(aws_iam_role_replication.arn), + rule=[S3BucketReplicationConfigurationRule( + destination=S3BucketReplicationConfigurationRuleDestination( + bucket=destination.arn, + storage_class="STANDARD" + ), + filter=S3BucketReplicationConfigurationRuleFilter( + prefix="example" + ), + id="examplerule", status="Enabled" ) ] @@ -228,6 +355,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rule` - (Required) List of configuration blocks describing the rules managing the replication. [See below](#rule). @@ -460,4 +588,4 @@ Using `terraform import`, import S3 bucket replication configuration using the ` % terraform import aws_s3_bucket_replication_configuration.replication bucket-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_request_payment_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_request_payment_configuration.html.markdown index 186506ecf8fc..1e89e5e1354d 100644 --- a/website/docs/cdktf/python/r/s3_bucket_request_payment_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_request_payment_configuration.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `payer` - (Required) Specifies who pays for the download and request fees. Valid values: `BucketOwner`, `Requester`. @@ -102,4 +103,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_request_payment_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_server_side_encryption_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_server_side_encryption_configuration.html.markdown index 28be4ce9646c..f301875254fb 100644 --- a/website/docs/cdktf/python/r/s3_bucket_server_side_encryption_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_server_side_encryption_configuration.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) ID (name) of the bucket. * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `rule` - (Required) Set of server-side encryption configuration rules. [See below](#rule). Currently, only a single rule is supported. @@ -79,6 +80,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_server_side_encryption_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket server-side encryption configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -129,4 +157,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_server_side_encryption_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_versioning.html.markdown b/website/docs/cdktf/python/r/s3_bucket_versioning.html.markdown index ca95d6853440..b00afbb5896f 100644 --- a/website/docs/cdktf/python/r/s3_bucket_versioning.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_versioning.html.markdown @@ -134,6 +134,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the S3 bucket. * `versioning_configuration` - (Required) Configuration block for the versioning parameters. [See below](#versioning_configuration). * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. @@ -157,6 +158,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_versioning.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_versioning" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket versioning using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -207,4 +235,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_versioning.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_website_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_website_configuration.html.markdown index e929cf5331a0..5be500c491f2 100644 --- a/website/docs/cdktf/python/r/s3_bucket_website_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_website_configuration.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `error_document` - (Optional, Conflicts with `redirect_all_requests_to`) Name of the error document for the website. [See below](#error_document). * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. @@ -144,6 +145,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_website_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_website_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket website configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -194,4 +222,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_website_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_directory_bucket.html.markdown b/website/docs/cdktf/python/r/s3_directory_bucket.html.markdown index 2b07e4f7cf4b..3444465c144f 100644 --- a/website/docs/cdktf/python/r/s3_directory_bucket.html.markdown +++ b/website/docs/cdktf/python/r/s3_directory_bucket.html.markdown @@ -65,10 +65,12 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket. The name must be in the format `[bucket_name]--[azid]--x-s3`. Use the [`aws_s3_bucket`](s3_bucket.html) resource to manage general purpose buckets. * `data_redundancy` - (Optional) Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. * `force_destroy` - (Optional, Default:`false`) Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `location` - (Required) Bucket location. See [Location](#location) below for more details. +* `tags` - (Optional) Map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `type` - (Optional, Default:`Directory`) Bucket type. Valid values: `Directory`. ### Location @@ -84,6 +86,7 @@ This resource exports the following attributes in addition to the arguments abov * `id` - (**Deprecated**, use `bucket` instead) Name of the bucket. * `arn` - ARN of the bucket. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -110,4 +113,4 @@ Using `terraform import`, import S3 bucket using `bucket`. For example: % terraform import aws_s3_directory_bucket.example example--usw2-az1--x-s3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_object.html.markdown b/website/docs/cdktf/python/r/s3_object.html.markdown index 942157bc0160..f6b59ea7ed35 100644 --- a/website/docs/cdktf/python/r/s3_object.html.markdown +++ b/website/docs/cdktf/python/r/s3_object.html.markdown @@ -250,10 +250,11 @@ The following arguments are optional: * `object_lock_mode` - (Optional) Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. * `object_lock_retain_until_date` - (Optional) Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods). * `override_provider` - (Optional) Override provider-level configuration options. See [Override Provider](#override-provider) below for more details. -* `server_side_encryption` - (Optional) Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`". +* `server_side_encryption` - (Optional) Server-side encryption of the object in S3. Valid values are `"AES256"`, `"aws:kms"`, `"aws:kms:dsse"`, and `"aws:fsx"`. * `source_hash` - (Optional) Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")` (Terraform 0.11.12 or later). (The value is only stored in state and not saved by AWS.) * `source` - (Optional, conflicts with `content` and `content_base64`) Path to a file that will be read and uploaded as raw bytes for the object content. * `storage_class` - (Optional) [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`". +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `website_redirect` - (Optional) Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). @@ -285,6 +286,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_object.example + identity = { + bucket = "some-bucket-name" + key = "some/key.txt" + } +} + +resource "aws_s3_object" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. +* `key` (String) Object key. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import objects using the `id` or S3 URL. For example: Import using the `id`, which is the bucket name and the key together: @@ -335,4 +364,4 @@ Import using S3 URL syntax: % terraform import aws_s3_object.example s3://some-bucket-name/some/key.txt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_object_copy.html.markdown b/website/docs/cdktf/python/r/s3_object_copy.html.markdown index 4b2ec862b6ee..5bce0ca6b81f 100644 --- a/website/docs/cdktf/python/r/s3_object_copy.html.markdown +++ b/website/docs/cdktf/python/r/s3_object_copy.html.markdown @@ -78,6 +78,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Conflicts with `grant`. * `cache_control` - (Optional) Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. * `checksum_algorithm` - (Optional) Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. @@ -155,4 +156,4 @@ This resource exports the following attributes in addition to the arguments abov * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `version_id` - Version ID of the newly created copy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_access_grant.html.markdown b/website/docs/cdktf/python/r/s3control_access_grant.html.markdown index 0d1e2993af24..012d72cd971d 100644 --- a/website/docs/cdktf/python/r/s3control_access_grant.html.markdown +++ b/website/docs/cdktf/python/r/s3control_access_grant.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_grants_location_configuration` - (Optional) See [Location Configuration](#location-configuration) below for more details. * `access_grants_location_id` - (Required) The ID of the S3 Access Grants location to with the access grant is giving access. * `account_id` - (Optional) The AWS account ID for the S3 Access Grants location. Defaults to automatically determined account ID of the Terraform AWS provider. @@ -115,4 +116,4 @@ Using `terraform import`, import S3 Access Grants using the `account_id` and `ac % terraform import aws_s3control_access_grants_location.example 123456789012,04549c5e-2f3c-4a07-824d-2cafe720aa22 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_access_grants_instance.html.markdown b/website/docs/cdktf/python/r/s3control_access_grants_instance.html.markdown index 2e8844aadd43..bacb8b54cfa8 100644 --- a/website/docs/cdktf/python/r/s3control_access_grants_instance.html.markdown +++ b/website/docs/cdktf/python/r/s3control_access_grants_instance.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the S3 Access Grants instance. Defaults to automatically determined account ID of the Terraform AWS provider. * `identity_center_arn` - (Optional) The ARN of the AWS IAM Identity Center instance associated with the S3 Access Grants instance. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -93,4 +94,4 @@ Using `terraform import`, import S3 Access Grants instances using the `account_i % terraform import aws_s3control_access_grants_instance.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_access_grants_instance_resource_policy.html.markdown b/website/docs/cdktf/python/r/s3control_access_grants_instance_resource_policy.html.markdown index 001d449775ad..4eb30f834b74 100644 --- a/website/docs/cdktf/python/r/s3control_access_grants_instance_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3control_access_grants_instance_resource_policy.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the S3 Access Grants instance. Defaults to automatically determined account ID of the Terraform AWS provider. * `policy` - (Optional) The policy document. @@ -73,4 +74,4 @@ Using `terraform import`, import S3 Access Grants instance resource policies usi % terraform import aws_s3control_access_grants_instance_resource_policy.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_access_grants_location.html.markdown b/website/docs/cdktf/python/r/s3control_access_grants_location.html.markdown index b3fa8db202b5..acefa4659866 100644 --- a/website/docs/cdktf/python/r/s3control_access_grants_location.html.markdown +++ b/website/docs/cdktf/python/r/s3control_access_grants_location.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the S3 Access Grants location. Defaults to automatically determined account ID of the Terraform AWS provider. * `iam_role_arn` - (Required) The ARN of the IAM role that S3 Access Grants should use when fulfilling runtime access requests to the location. @@ -84,4 +85,4 @@ Using `terraform import`, import S3 Access Grants locations using the `account_i % terraform import aws_s3control_access_grants_location.example 123456789012,default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_access_point_policy.html.markdown b/website/docs/cdktf/python/r/s3control_access_point_policy.html.markdown index c0926b341279..a429da6afa09 100644 --- a/website/docs/cdktf/python/r/s3control_access_point_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3control_access_point_policy.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_point_arn` - (Required) The ARN of the access point that you want to associate with the specified policy. * `policy` - (Required) The policy that you want to apply to the specified access point. @@ -108,4 +109,4 @@ Using `terraform import`, import Access Point policies using the `access_point_a % terraform import aws_s3control_access_point_policy.example arn:aws:s3:us-west-2:123456789012:accesspoint/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_bucket.html.markdown b/website/docs/cdktf/python/r/s3control_bucket.html.markdown index e50983e2cb23..3ca4e5928da0 100644 --- a/website/docs/cdktf/python/r/s3control_bucket.html.markdown +++ b/website/docs/cdktf/python/r/s3control_bucket.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket. * `outpost_id` - (Required) Identifier of the Outpost to contain this bucket. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -54,6 +55,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3control_bucket.example + identity = { + "arn" = "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example" + } +} + +resource "aws_s3control_bucket" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the bucket. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Control Buckets using Amazon Resource Name (ARN). For example: ```python @@ -77,4 +99,4 @@ Using `terraform import`, import S3 Control Buckets using Amazon Resource Name ( % terraform import aws_s3control_bucket.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_bucket_lifecycle_configuration.html.markdown b/website/docs/cdktf/python/r/s3control_bucket_lifecycle_configuration.html.markdown index 12225a6f63d5..21f318ff368e 100644 --- a/website/docs/cdktf/python/r/s3control_bucket_lifecycle_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3control_bucket_lifecycle_configuration.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Amazon Resource Name (ARN) of the bucket. * `rule` - (Required) Configuration block(s) containing lifecycle rules for the bucket. * `abort_incomplete_multipart_upload` - (Optional) Configuration block containing settings for abort incomplete multipart upload. @@ -102,4 +103,4 @@ Using `terraform import`, import S3 Control Bucket Lifecycle Configurations usin % terraform import aws_s3control_bucket_lifecycle_configuration.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_bucket_policy.html.markdown b/website/docs/cdktf/python/r/s3control_bucket_policy.html.markdown index 84861ccebae2..8958901b27de 100644 --- a/website/docs/cdktf/python/r/s3control_bucket_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3control_bucket_policy.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Amazon Resource Name (ARN) of the bucket. * `policy` - (Required) JSON string of the resource policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -86,4 +87,4 @@ Using `terraform import`, import S3 Control Bucket Policies using the Amazon Res % terraform import aws_s3control_bucket_policy.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_directory_bucket_access_point_scope.html.markdown b/website/docs/cdktf/python/r/s3control_directory_bucket_access_point_scope.html.markdown index 2ab63bfcb5f8..d5157c6e8401 100644 --- a/website/docs/cdktf/python/r/s3control_directory_bucket_access_point_scope.html.markdown +++ b/website/docs/cdktf/python/r/s3control_directory_bucket_access_point_scope.html.markdown @@ -30,9 +30,9 @@ from cdktf import Fn, Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import S3ControlDirectoryBucketAccessPointScope from imports.aws.data_aws_availability_zones import DataAwsAvailabilityZones from imports.aws.s3_access_point import S3AccessPoint +from imports.aws.s3_control_directory_bucket_access_point_scope import S3ControlDirectoryBucketAccessPointScope from imports.aws.s3_directory_bucket import S3DirectoryBucket class MyConvertedCode(TerraformStack): def __init__(self, scope, name): @@ -70,8 +70,9 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` - (Required) The name of the access point that you want to apply the scope to. * `account_id` - (Required) The AWS account ID that owns the specified access point. +* `name` - (Required) The name of the access point that you want to apply the scope to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `scope` - (Optional). Scope is used to restrict access to specific prefixes, API operations, or a combination of both. To remove the `scope`, set it to `{permissions=[] prefixes=[]}`. The default scope is `{permissions=[] prefixes=[]}`. ### Scope Configuration block @@ -99,7 +100,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import S3ControlDirectoryBucketAccessPointScope +from imports.aws.s3_control_directory_bucket_access_point_scope import S3ControlDirectoryBucketAccessPointScope class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -112,4 +113,4 @@ Using `terraform import`, import Access Point Scope using access point name and % terraform import aws_s3control_directory_bucket_access_point_scope.example example--zoneid--xa-s3,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_multi_region_access_point.html.markdown b/website/docs/cdktf/python/r/s3control_multi_region_access_point.html.markdown index 4590d90188c9..8ad0f3c37ae1 100644 --- a/website/docs/cdktf/python/r/s3control_multi_region_access_point.html.markdown +++ b/website/docs/cdktf/python/r/s3control_multi_region_access_point.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the owner of the buckets for which you want to create a Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `details` - (Required) A configuration block containing details about the Multi-Region Access Point. See [Details Configuration Block](#details-configuration) below for more details @@ -142,4 +143,4 @@ Using `terraform import`, import Multi-Region Access Points using the `account_i % terraform import aws_s3control_multi_region_access_point.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_multi_region_access_point_policy.html.markdown b/website/docs/cdktf/python/r/s3control_multi_region_access_point_policy.html.markdown index 25a37781f3f5..06968b548fb3 100644 --- a/website/docs/cdktf/python/r/s3control_multi_region_access_point_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3control_multi_region_access_point_policy.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the owner of the Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `details` - (Required) A configuration block containing details about the policy for the Multi-Region Access Point. See [Details Configuration Block](#details-configuration) below for more details @@ -128,4 +129,4 @@ Using `terraform import`, import Multi-Region Access Point Policies using the `a % terraform import aws_s3control_multi_region_access_point_policy.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_object_lambda_access_point.html.markdown b/website/docs/cdktf/python/r/s3control_object_lambda_access_point.html.markdown index 8414840a9dbe..a248165c14ec 100644 --- a/website/docs/cdktf/python/r/s3control_object_lambda_access_point.html.markdown +++ b/website/docs/cdktf/python/r/s3control_object_lambda_access_point.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `configuration` - (Required) A configuration block containing details about the Object Lambda Access Point. See [Configuration](#configuration) below for more details. * `name` - (Required) The name for this Object Lambda Access Point. @@ -128,4 +129,4 @@ Using `terraform import`, import Object Lambda Access Points using the `account_ % terraform import aws_s3control_object_lambda_access_point.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_object_lambda_access_point_policy.html.markdown b/website/docs/cdktf/python/r/s3control_object_lambda_access_point_policy.html.markdown index 27cd0da09e16..860c2947cd66 100644 --- a/website/docs/cdktf/python/r/s3control_object_lambda_access_point_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3control_object_lambda_access_point_policy.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the account that owns the Object Lambda Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `name` - (Required) The name of the Object Lambda Access Point. * `policy` - (Required) The Object Lambda Access Point resource policy document. @@ -117,4 +118,4 @@ Using `terraform import`, import Object Lambda Access Point policies using the ` % terraform import aws_s3control_object_lambda_access_point_policy.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3control_storage_lens_configuration.html.markdown b/website/docs/cdktf/python/r/s3control_storage_lens_configuration.html.markdown index f8c2a6ed8821..fd85ce90a4bb 100644 --- a/website/docs/cdktf/python/r/s3control_storage_lens_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3control_storage_lens_configuration.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) The AWS account ID for the S3 Storage Lens configuration. Defaults to automatically determined account ID of the Terraform AWS provider. * `config_id` - (Required) The ID of the S3 Storage Lens configuration. * `storage_lens_configuration` - (Required) The S3 Storage Lens configuration. See [Storage Lens Configuration](#storage-lens-configuration) below for more details. @@ -238,4 +239,4 @@ Using `terraform import`, import S3 Storage Lens configurations using the `accou % terraform import aws_s3control_storage_lens_configuration.example 123456789012:example-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3outposts_endpoint.html.markdown b/website/docs/cdktf/python/r/s3outposts_endpoint.html.markdown index c0b8a3de2fc7..4b6b7f758117 100644 --- a/website/docs/cdktf/python/r/s3outposts_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/s3outposts_endpoint.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `outpost_id` - (Required) Identifier of the Outpost to contain this endpoint. * `security_group_id` - (Required) Identifier of the EC2 Security Group. * `subnet_id` - (Required) Identifier of the EC2 Subnet. @@ -79,4 +80,4 @@ Using `terraform import`, import S3 Outposts Endpoints using Amazon Resource Nam % terraform import aws_s3outposts_endpoint.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/endpoint/0123456789abcdef,sg-12345678,subnet-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3tables_namespace.html.markdown b/website/docs/cdktf/python/r/s3tables_namespace.html.markdown index aaba98ae9b03..aae328c75347 100644 --- a/website/docs/cdktf/python/r/s3tables_namespace.html.markdown +++ b/website/docs/cdktf/python/r/s3tables_namespace.html.markdown @@ -42,8 +42,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namespace` - (Required, Forces new resource) Name of the namespace. Must be between 1 and 255 characters in length. Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. @@ -82,4 +83,4 @@ Using `terraform import`, import S3 Tables Namespace using the `table_bucket_arn % terraform import aws_s3tables_namespace.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3tables_table.html.markdown b/website/docs/cdktf/python/r/s3tables_table.html.markdown index e6523fb1dae4..10c499af59ba 100644 --- a/website/docs/cdktf/python/r/s3tables_table.html.markdown +++ b/website/docs/cdktf/python/r/s3tables_table.html.markdown @@ -49,6 +49,68 @@ class MyConvertedCode(TerraformStack): aws_s3_tables_table_example.override_logical_id("example") ``` +### With Metadata Schema + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.s3_tables_namespace import S3TablesNamespace +from imports.aws.s3_tables_table import S3TablesTable +from imports.aws.s3_tables_table_bucket import S3TablesTableBucket +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = S3TablesTableBucket(self, "example", + name="example-bucket" + ) + aws_s3_tables_namespace_example = S3TablesNamespace(self, "example_1", + namespace="example_namespace", + table_bucket_arn=example.arn + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_tables_namespace_example.override_logical_id("example") + aws_s3_tables_table_example = S3TablesTable(self, "example_2", + format="ICEBERG", + metadata=[{ + "iceberg": [{ + "schema": [{ + "field": [{ + "name": "id", + "required": True, + "type": "long" + }, { + "name": "name", + "required": True, + "type": "string" + }, { + "name": "created_at", + "required": False, + "type": "timestamp" + }, { + "name": "price", + "required": False, + "type": "decimal(10,2)" + } + ] + } + ] + } + ] + } + ], + name="example_table", + namespace=Token.as_string(aws_s3_tables_namespace_example.namespace), + table_bucket_arn=Token.as_string(aws_s3_tables_namespace_example.table_bucket_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_tables_table_example.override_logical_id("example") +``` + ## Argument Reference The following arguments are required: @@ -66,10 +128,13 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `encryption_configuration` - (Optional) A single table bucket encryption configuration object. [See `encryption_configuration` below](#encryption_configuration). * `maintenance_configuration` - (Optional) A single table bucket maintenance configuration object. [See `maintenance_configuration` below](#maintenance_configuration). +* `metadata` - (Optional) Contains details about the table metadata. This configuration specifies the metadata format and schema for the table. Currently only supports Iceberg format. + [See `metadata` below](#metadata). ### `encryption_configuration` @@ -121,6 +186,35 @@ The `iceberg_snapshot_management.settings` object supports the following argumen * `min_snapshots_to_keep` - (Required) Minimum number of snapshots to keep. Must be at least `1`. +### `metadata` + +The `metadata` configuration block supports the following argument: + +* `iceberg` - (Optional) Contains details about the metadata for an Iceberg table. This block defines the schema structure for the Apache Iceberg table format. + [See `iceberg` below](#iceberg). + +### `iceberg` + +The `iceberg` configuration block supports the following argument: + +* `schema` - (Required) Schema configuration for the Iceberg table. + [See `schema` below](#schema). + +### `schema` + +The `schema` configuration block supports the following argument: + +* `field` - (Required) List of schema fields for the Iceberg table. Each field defines a column in the table schema. + [See `field` below](#field). + +### `field` + +The `field` configuration block supports the following arguments: + +* `name` - (Required) The name of the field. +* `type` - (Required) The field type. S3 Tables supports all Apache Iceberg primitive types including: `boolean`, `int`, `long`, `float`, `double`, `decimal(precision,scale)`, `date`, `time`, `timestamp`, `timestamptz`, `string`, `uuid`, `fixed(length)`, `binary`. +* `required` - (Optional) A Boolean value that specifies whether values are required for each row in this field. Defaults to `false`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -162,4 +256,4 @@ Using `terraform import`, import S3 Tables Table using the `table_bucket_arn`, t % terraform import aws_s3tables_table.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3tables_table_bucket.html.markdown b/website/docs/cdktf/python/r/s3tables_table_bucket.html.markdown index 550aa4cacbca..f59b45a208dc 100644 --- a/website/docs/cdktf/python/r/s3tables_table_bucket.html.markdown +++ b/website/docs/cdktf/python/r/s3tables_table_bucket.html.markdown @@ -46,8 +46,10 @@ The following arguments are optional: * `encryption_configuration` - (Optional) A single table bucket encryption configuration object. [See `encryption_configuration` below](#encryption_configuration). +* `force_destroy` - (Optional, Default:`false`) Whether all tables and namespaces within the table bucket should be deleted *when the table bucket is destroyed* so that the table bucket can be destroyed without error. These tables and namespaces are *not* recoverable. This only deletes tables and namespaces when the table bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the table bucket or destroying the table bucket, this flag will not work. Additionally when importing a table bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `maintenance_configuration` - (Optional) A single table bucket maintenance configuration object. [See `maintenance_configuration` below](#maintenance_configuration). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `encryption_configuration` @@ -114,4 +116,4 @@ Using `terraform import`, import S3 Tables Table Bucket using the `arn`. For exa % terraform import aws_s3tables_table_bucket.example arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3tables_table_bucket_policy.html.markdown b/website/docs/cdktf/python/r/s3tables_table_bucket_policy.html.markdown index 4435176493c7..5a00f4100da8 100644 --- a/website/docs/cdktf/python/r/s3tables_table_bucket_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3tables_table_bucket_policy.html.markdown @@ -46,8 +46,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_policy` - (Required) Amazon Web Services resource-based policy document in JSON format. * `table_bucket_arn` - (Required, Forces new resource) ARN referencing the Table Bucket that owns this policy. @@ -80,4 +81,4 @@ Using `terraform import`, import S3 Tables Table Bucket Policy using the `table_ % terraform import aws_s3tables_table_bucket_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3tables_table_policy.html.markdown b/website/docs/cdktf/python/r/s3tables_table_policy.html.markdown index 5de88b7bdcc8..52a45b5a2bab 100644 --- a/website/docs/cdktf/python/r/s3tables_table_policy.html.markdown +++ b/website/docs/cdktf/python/r/s3tables_table_policy.html.markdown @@ -66,8 +66,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_policy` - (Required) Amazon Web Services resource-based policy document in JSON format. * `name` - (Required, Forces new resource) Name of the table. Must be between 1 and 255 characters in length. @@ -106,4 +107,4 @@ Using `terraform import`, import S3 Tables Table Policy using the `table_bucket_ % terraform import aws_s3tables_table_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_app.html.markdown b/website/docs/cdktf/python/r/sagemaker_app.html.markdown index e440054b66d5..b5113f753547 100644 --- a/website/docs/cdktf/python/r/sagemaker_app.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_app.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_name` - (Required) The name of the app. * `app_type` - (Required) The type of app. Valid values are `JupyterServer`, `KernelGateway`, `RStudioServerPro`, `RSessionGateway`, `TensorBoard`, `CodeEditor`, `JupyterLab`, `DetailedProfiler`, and `Canvas`. * `domain_id` - (Required) The domain ID. @@ -89,4 +90,4 @@ Using `terraform import`, import SageMaker AI Apps using the `id`. For example: % terraform import aws_sagemaker_app.example arn:aws:sagemaker:us-west-2:012345678912:app/domain-id/user-profile-name/app-type/app-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_app_image_config.html.markdown b/website/docs/cdktf/python/r/sagemaker_app_image_config.html.markdown index 84ac493154fe..41f9bac056bd 100644 --- a/website/docs/cdktf/python/r/sagemaker_app_image_config.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_app_image_config.html.markdown @@ -39,6 +39,26 @@ class MyConvertedCode(TerraformStack): ) ``` +### Using Code Editor with empty configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.sagemaker_app_image_config import SagemakerAppImageConfig +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SagemakerAppImageConfig(self, "test", + app_image_config_name="example", + code_editor_app_image_config=SagemakerAppImageConfigCodeEditorAppImageConfig() + ) +``` + ### Default File System Config ```python @@ -69,12 +89,15 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_image_config_name` - (Required) The name of the App Image Config. -* `code_editor_app_image_config` - (Optional) The CodeEditorAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in Code Editor. See [Code Editor App Image Config](#code-editor-app-image-config) details below. -* `jupyter_lab_image_config` - (Optional) The JupyterLabAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in JupyterLab. See [Jupyter Lab Image Config](#jupyter-lab-image-config) details below. +* `code_editor_app_image_config` - (Optional) The CodeEditorAppImageConfig. See [Code Editor App Image Config](#code-editor-app-image-config) details below. +* `jupyter_lab_image_config` - (Optional) The JupyterLabAppImageConfig. See [Jupyter Lab Image Config](#jupyter-lab-image-config) details below. * `kernel_gateway_image_config` - (Optional) The configuration for the file system and kernels in a SageMaker AI image running as a KernelGateway app. See [Kernel Gateway Image Config](#kernel-gateway-image-config) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +~> **NOTE:** Exactly one of `code_editor_app_image_config`, `jupyter_lab_image_config`, or `kernel_gateway_image_config` must be configured. Empty blocks (e.g., `code_editor_app_image_config {}`) are valid configurations. + ### Code Editor App Image Config * `container_config` - (Optional) The configuration used to run the application image container. See [Container Config](#container-config) details below. @@ -142,4 +165,4 @@ Using `terraform import`, import SageMaker AI App Image Configs using the `name` % terraform import aws_sagemaker_app_image_config.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_code_repository.html.markdown b/website/docs/cdktf/python/r/sagemaker_code_repository.html.markdown index b74f4fc0ec75..367bb9f22646 100644 --- a/website/docs/cdktf/python/r/sagemaker_code_repository.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_code_repository.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `code_repository_name` - (Required) The name of the Code Repository (must be unique). * `git_config` - (Required) Specifies details about the repository. see [Git Config](#git-config) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -125,4 +126,4 @@ Using `terraform import`, import SageMaker AI Code Repositories using the `name` % terraform import aws_sagemaker_code_repository.test_code_repository my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_data_quality_job_definition.html.markdown b/website/docs/cdktf/python/r/sagemaker_data_quality_job_definition.html.markdown index 7b0d8da11438..392ff5d772a8 100644 --- a/website/docs/cdktf/python/r/sagemaker_data_quality_job_definition.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_data_quality_job_definition.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data_quality_app_specification` - (Required) Specifies the container that runs the monitoring job. Fields are documented below. * `data_quality_baseline_config` - (Optional) Configures the constraints and baselines for the monitoring job. Fields are documented below. * `data_quality_job_input` - (Required) A list of inputs for the monitoring job. Fields are documented below. @@ -198,4 +199,4 @@ Using `terraform import`, import data quality job definitions using the `name`. % terraform import aws_sagemaker_data_quality_job_definition.test_data_quality_job_definition data-quality-job-definition-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_device.html.markdown b/website/docs/cdktf/python/r/sagemaker_device.html.markdown index aa9e7ae17f03..12f317ed6e2a 100644 --- a/website/docs/cdktf/python/r/sagemaker_device.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_device.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `device_fleet_name` - (Required) The name of the Device Fleet. * `device` - (Required) The device to register with SageMaker AI Edge Manager. See [Device](#device) details below. @@ -81,4 +82,4 @@ Using `terraform import`, import SageMaker AI Devices using the `device-fleet-na % terraform import aws_sagemaker_device.example my-fleet/my-device ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_device_fleet.html.markdown b/website/docs/cdktf/python/r/sagemaker_device_fleet.html.markdown index 4be5e25975a0..78fbb1d2c00e 100644 --- a/website/docs/cdktf/python/r/sagemaker_device_fleet.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_device_fleet.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `device_fleet_name` - (Required) The name of the Device Fleet (must be unique). * `role_arn` - (Required) The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT). * `output_config` - (Required) Specifies details about the repository. see [Output Config](#output-config) details below. @@ -86,4 +87,4 @@ Using `terraform import`, import SageMaker AI Device Fleets using the `name`. Fo % terraform import aws_sagemaker_device_fleet.example my-fleet ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_domain.html.markdown b/website/docs/cdktf/python/r/sagemaker_domain.html.markdown index f4c1c54ade52..a7133b3a73f8 100644 --- a/website/docs/cdktf/python/r/sagemaker_domain.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_domain.html.markdown @@ -132,6 +132,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app_network_access_type` - (Optional) Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly`. Valid values are `PublicInternetOnly` and `VpcOnly`. * `app_security_group_management` - (Optional) The entity that creates and manages the required security groups for inter-app communication in `VPCOnly` mode. Valid values are `Service` and `Customer`. * `domain_settings` - (Optional) The domain settings. See [`domain_settings` Block](#domain_settings-block) below. @@ -390,4 +391,4 @@ Using `terraform import`, import SageMaker AI Domains using the `id`. For exampl % terraform import aws_sagemaker_domain.test_domain d-8jgsjtilstu8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_endpoint.html.markdown b/website/docs/cdktf/python/r/sagemaker_endpoint.html.markdown index 7629af2d71b5..ca4b34029d42 100644 --- a/website/docs/cdktf/python/r/sagemaker_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_endpoint.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_config_name` - (Required) The name of the endpoint configuration to use. * `deployment_config` - (Optional) The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. See [Deployment Config](#deployment-config). * `name` - (Optional) The name of the endpoint. If omitted, Terraform will assign a random, unique name. @@ -133,4 +134,4 @@ Using `terraform import`, import endpoints using the `name`. For example: % terraform import aws_sagemaker_endpoint.test_endpoint my-endpoint ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown b/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown index f3a426c57b31..bc4ea07e722e 100644 --- a/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `production_variants` - (Required) An list of ProductionVariant objects, one for each model that you want to host at this endpoint. Fields are documented below. * `kms_key_arn` - (Optional) Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. * `name` - (Optional) The name of the endpoint configuration. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. @@ -171,4 +172,4 @@ Using `terraform import`, import endpoint configurations using the `name`. For e % terraform import aws_sagemaker_endpoint_configuration.test_endpoint_config endpoint-config-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_feature_group.html.markdown b/website/docs/cdktf/python/r/sagemaker_feature_group.html.markdown index d271aff0bc94..31fdabce00ab 100644 --- a/website/docs/cdktf/python/r/sagemaker_feature_group.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_feature_group.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `feature_group_name` - (Required) The name of the Feature Group. The name must be unique within an AWS Region in an AWS account. * `record_identifier_feature_name` - (Required) The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store. * `event_time_feature_name` - (Required) The name of the feature that stores the EventTime of a Record in a Feature Group. @@ -131,4 +132,4 @@ Using `terraform import`, import Feature Groups using the `name`. For example: % terraform import aws_sagemaker_feature_group.test_feature_group feature_group-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_flow_definition.html.markdown b/website/docs/cdktf/python/r/sagemaker_flow_definition.html.markdown index da2e3415b4f3..ba9a0c151d1a 100644 --- a/website/docs/cdktf/python/r/sagemaker_flow_definition.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_flow_definition.html.markdown @@ -73,7 +73,7 @@ class MyConvertedCode(TerraformStack): task_count=1, task_description="example", task_title="example", - workteam_arn="arn:aws:sagemaker:${" + current.name + "}:394669845002:workteam/public-crowd/default" + workteam_arn="arn:aws:sagemaker:${" + current.region + "}:394669845002:workteam/public-crowd/default" ), output_config=SagemakerFlowDefinitionOutputConfig( s3_output_path="s3://${" + aws_s3_bucket_example.bucket + "}/" @@ -125,6 +125,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `flow_definition_name` - (Required) The name of your flow definition. * `human_loop_config` - (Required) An object containing information about the tasks the human reviewers will perform. See [Human Loop Config](#human-loop-config) details below. * `role_arn` - (Required) The Amazon Resource Name (ARN) of the role needed to call other services on your behalf. @@ -205,4 +206,4 @@ Using `terraform import`, import SageMaker AI Flow Definitions using the `flow_d % terraform import aws_sagemaker_flow_definition.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_hub.html.markdown b/website/docs/cdktf/python/r/sagemaker_hub.html.markdown index 22f863e8c95b..18ec76eb27cf 100644 --- a/website/docs/cdktf/python/r/sagemaker_hub.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_hub.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hub_name` - (Required) The name of the hub. * `hub_description` - (Required) A description of the hub. * `hub_display_name` - (Optional) The display name of the hub. @@ -82,4 +83,4 @@ Using `terraform import`, import SageMaker AI Hubs using the `name`. For example % terraform import aws_sagemaker_hub.test_hub my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_human_task_ui.html.markdown b/website/docs/cdktf/python/r/sagemaker_human_task_ui.html.markdown index 98f285ade838..65b79b3dc6f7 100644 --- a/website/docs/cdktf/python/r/sagemaker_human_task_ui.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_human_task_ui.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `human_task_ui_name` - (Required) The name of the Human Task UI. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `ui_template` - (Required) The Liquid template for the worker user interface. See [UI Template](#ui-template) below. @@ -86,4 +87,4 @@ Using `terraform import`, import SageMaker AI Human Task UIs using the `human_ta % terraform import aws_sagemaker_human_task_ui.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_image.html.markdown b/website/docs/cdktf/python/r/sagemaker_image.html.markdown index 82b5b189df3a..9cba4c043c18 100644 --- a/website/docs/cdktf/python/r/sagemaker_image.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_image.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `image_name` - (Required) The name of the image. Must be unique to your account. * `role_arn` - (Required) The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. * `display_name` - (Optional) The display name of the image. When the image is added to a domain (must be unique to the domain). @@ -77,4 +78,4 @@ Using `terraform import`, import SageMaker AI Code Images using the `name`. For % terraform import aws_sagemaker_image.test_image my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_image_version.html.markdown b/website/docs/cdktf/python/r/sagemaker_image_version.html.markdown index de3ba91f55d1..3251aadee3ff 100644 --- a/website/docs/cdktf/python/r/sagemaker_image_version.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_image_version.html.markdown @@ -16,6 +16,26 @@ Provides a SageMaker AI Image Version resource. ### Basic usage +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.sagemaker_image_version import SagemakerImageVersion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SagemakerImageVersion(self, "example", + base_image="012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest", + image_name=test.id + ) +``` + +### With Aliases + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -29,6 +49,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SagemakerImageVersion(self, "test", + aliases=["latest", "stable"], base_image="012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest", image_name=Token.as_string(aws_sagemaker_image_test.id) ) @@ -38,8 +59,10 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `image_name` - (Required) The name of the image. Must be unique to your account. * `base_image` - (Required) The registry path of the container image on which this image version is based. +* `aliases` - (Optional) A list of aliases for the image version. * `horovod` - (Optional) Indicates Horovod compatibility. * `job_type` - (Optional) Indicates SageMaker AI job type compatibility. Valid values are: `TRAINING`, `INFERENCE`, and `NOTEBOOK_KERNEL`. * `ml_framework` - (Optional) The machine learning framework vended in the image version. @@ -52,14 +75,13 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The name of the Image. * `arn` - The Amazon Resource Name (ARN) assigned by AWS to this Image Version. * `version`- The version of the image. If not specified, the latest version is described. * `container_image` - The registry path of the container image that contains this image version. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI Image Versions using the `name`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI Image Versions using a comma-delimited string concatenating `image_name` and `version`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -73,13 +95,13 @@ from imports.aws.sagemaker_image_version import SagemakerImageVersion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SagemakerImageVersion.generate_config_for_import(self, "testImage", "my-code-repo") + SagemakerImageVersion.generate_config_for_import(self, "example", "example-name,1") ``` -Using `terraform import`, import SageMaker AI Image Versions using the `name`. For example: +Using `terraform import`, import SageMaker AI Image Versions using a comma-delimited string concatenating `image_name` and `version`. For example: ```console -% terraform import aws_sagemaker_image_version.test_image my-code-repo +% terraform import aws_sagemaker_image_version.example example-name,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_mlflow_tracking_server.html.markdown b/website/docs/cdktf/python/r/sagemaker_mlflow_tracking_server.html.markdown index e34b44884daa..b970e5ba73f3 100644 --- a/website/docs/cdktf/python/r/sagemaker_mlflow_tracking_server.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_mlflow_tracking_server.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `artifact_store_uri` - (Required) The S3 URI for a general purpose bucket to use as the MLflow Tracking Server artifact store. * `role_arn` - (Required) The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow Tracking Server uses to access the artifact store in Amazon S3. The role should have AmazonS3FullAccess permissions. For more information on IAM permissions for tracking server creation, see [Set up IAM permissions for MLflow](https://docs.aws.amazon.com/sagemaker/latest/dg/mlflow-create-tracking-server-iam.html). * `tracking_server_name` - (Required) A unique string identifying the tracking server name. This string is part of the tracking server ARN. @@ -82,4 +83,4 @@ Using `terraform import`, import SageMaker AI MLFlow Tracking Servers using the % terraform import aws_sagemaker_mlflow_tracking_server.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_model.html.markdown b/website/docs/cdktf/python/r/sagemaker_model.html.markdown index c8b9ec17068f..3c48ae9a8e1c 100644 --- a/website/docs/cdktf/python/r/sagemaker_model.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_model.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the model (must be unique). If omitted, Terraform will assign a random, unique name. * `primary_container` - (Optional) The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the `container` argument is required. Fields are documented below. * `execution_role_arn` - (Required) A role that SageMaker AI can assume to access model artifacts and docker images for deployment. @@ -151,4 +152,4 @@ Using `terraform import`, import models using the `name`. For example: % terraform import aws_sagemaker_model.test_model model-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_model_package_group.html.markdown b/website/docs/cdktf/python/r/sagemaker_model_package_group.html.markdown index 84bfd746d567..f97d20e88fb4 100644 --- a/website/docs/cdktf/python/r/sagemaker_model_package_group.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_model_package_group.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `model_package_group_name` - (Required) The name of the model group. * `model_package_group_description` - (Optional) A description for the model group. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -74,4 +75,4 @@ Using `terraform import`, import SageMaker AI Model Package Groups using the `na % terraform import aws_sagemaker_model_package_group.test_model_package_group my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_model_package_group_policy.html.markdown b/website/docs/cdktf/python/r/sagemaker_model_package_group_policy.html.markdown index 5c84b94437c2..8619d8da0424 100644 --- a/website/docs/cdktf/python/r/sagemaker_model_package_group_policy.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_model_package_group_policy.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `model_package_group_name` - (Required) The name of the model package group. ## Attribute Reference @@ -99,4 +100,4 @@ Using `terraform import`, import SageMaker AI Model Package Groups using the `na % terraform import aws_sagemaker_model_package_group_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_monitoring_schedule.html.markdown b/website/docs/cdktf/python/r/sagemaker_monitoring_schedule.html.markdown index a484274bfe75..3b57cd70258a 100644 --- a/website/docs/cdktf/python/r/sagemaker_monitoring_schedule.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_monitoring_schedule.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `monitoring_schedule_config` - (Required) The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below. * `name` - (Optional) The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, Terraform will assign a random, unique name. * `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -88,4 +89,4 @@ Using `terraform import`, import monitoring schedules using the `name`. For exam % terraform import aws_sagemaker_monitoring_schedule.test_monitoring_schedule monitoring-schedule-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_notebook_instance.html.markdown b/website/docs/cdktf/python/r/sagemaker_notebook_instance.html.markdown index 54dff2031a60..6f7fe35ddff1 100644 --- a/website/docs/cdktf/python/r/sagemaker_notebook_instance.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_notebook_instance.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the notebook instance (must be unique). * `role_arn` - (Required) The ARN of the IAM role to be used by the notebook instance which allows SageMaker AI to call other services on your behalf. * `instance_type` - (Required) The name of ML compute instance type. @@ -81,7 +82,6 @@ This resource supports the following arguments: * `volume_size` - (Optional) The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. * `subnet_id` - (Optional) The VPC subnet ID. * `security_groups` - (Optional) The associated security groups. -* `accelerator_types` - (Optional, Deprecated) A list of Elastic Inference (EI) instance types to associate with this notebook instance. See [Elastic Inference Accelerator](https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html) for more details. Valid values: `ml.eia1.medium`, `ml.eia1.large`, `ml.eia1.xlarge`, `ml.eia2.medium`, `ml.eia2.large`, `ml.eia2.xlarge`. * `additional_code_repositories` - (Optional) An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. * `default_code_repository` - (Optional) The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. @@ -131,4 +131,4 @@ Using `terraform import`, import SageMaker AI Notebook Instances using the `name % terraform import aws_sagemaker_notebook_instance.test_notebook_instance my-notebook-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown b/website/docs/cdktf/python/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown index 092f2c95f87b..4a44b67ba8fa 100644 --- a/website/docs/cdktf/python/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the lifecycle configuration (must be unique). If omitted, Terraform will assign a random, unique name. * `on_create` - (Optional) A shell script (base64-encoded) that runs only once when the SageMaker AI Notebook Instance is created. * `on_start` - (Optional) A shell script (base64-encoded) that runs every time the SageMaker AI Notebook Instance is started including the time it's created. @@ -76,4 +77,4 @@ Using `terraform import`, import models using the `name`. For example: % terraform import aws_sagemaker_notebook_instance_lifecycle_configuration.lc foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_pipeline.html.markdown b/website/docs/cdktf/python/r/sagemaker_pipeline.html.markdown index f807b2fe2578..b816f4c47a9f 100644 --- a/website/docs/cdktf/python/r/sagemaker_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_pipeline.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipeline_name` - (Required) The name of the pipeline. * `pipeline_description` - (Optional) A description of the pipeline. * `pipeline_display_name` - (Required) The display name of the pipeline. @@ -103,4 +104,4 @@ Using `terraform import`, import pipelines using the `pipeline_name`. For exampl % terraform import aws_sagemaker_pipeline.test_pipeline pipeline ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_project.html.markdown b/website/docs/cdktf/python/r/sagemaker_project.html.markdown index 040b93e94965..50e5ce604d00 100644 --- a/website/docs/cdktf/python/r/sagemaker_project.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_project.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `project_name` - (Required) The name of the Project. * `project_description` - (Optional) A description for the project. * `service_catalog_provisioning_details` - (Required) The product ID and provisioning artifact ID to provision a service catalog. See [Service Catalog Provisioning Details](#service-catalog-provisioning-details) below. @@ -91,4 +92,4 @@ Using `terraform import`, import SageMaker AI Projects using the `project_name`. % terraform import aws_sagemaker_project.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_servicecatalog_portfolio_status.html.markdown b/website/docs/cdktf/python/r/sagemaker_servicecatalog_portfolio_status.html.markdown index 75b373547dd0..0338f3d30b37 100644 --- a/website/docs/cdktf/python/r/sagemaker_servicecatalog_portfolio_status.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_servicecatalog_portfolio_status.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `status` - (Required) Whether Service Catalog is enabled or disabled in SageMaker. Valid values are `Enabled` and `Disabled`. ## Attribute Reference @@ -70,4 +71,4 @@ Using `terraform import`, import models using the `id`. For example: % terraform import aws_sagemaker_servicecatalog_portfolio_status.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_space.html.markdown b/website/docs/cdktf/python/r/sagemaker_space.html.markdown index c392c7a3c687..cf83546d5fa6 100644 --- a/website/docs/cdktf/python/r/sagemaker_space.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_space.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_id` - (Required) The ID of the associated Domain. * `ownership_settings` - (Optional) A collection of ownership settings. Required if `space_sharing_settings` is set. See [`ownership_settings` Block](#ownership_settings-block) below. * `space_display_name` - (Optional) The name of the space that appears in the SageMaker AI Studio UI. @@ -196,4 +197,4 @@ Using `terraform import`, import SageMaker AI Spaces using the `id`. For example % terraform import aws_sagemaker_space.test_space arn:aws:sagemaker:us-west-2:123456789012:space/domain-id/space-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_studio_lifecycle_config.html.markdown b/website/docs/cdktf/python/r/sagemaker_studio_lifecycle_config.html.markdown index c9f45a8f84f2..d146ecb14550 100644 --- a/website/docs/cdktf/python/r/sagemaker_studio_lifecycle_config.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_studio_lifecycle_config.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `studio_lifecycle_config_name` - (Required) The name of the Studio Lifecycle Configuration to create. - `studio_lifecycle_config_app_type` - (Required) The App type that the Lifecycle Configuration is attached to. Valid values are `JupyterServer`, `JupyterLab`, `CodeEditor` and `KernelGateway`. - `studio_lifecycle_config_content` - (Required) The content of your Studio Lifecycle Configuration script. This content must be base64 encoded. @@ -78,4 +79,4 @@ Using `terraform import`, import SageMaker AI Studio Lifecycle Configs using the % terraform import aws_sagemaker_studio_lifecycle_config.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown b/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown index d689fd1d95d1..e199e7066b47 100644 --- a/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_id` - (Required) The ID of the associated Domain. * `single_sign_on_user_identifier` - (Optional) A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. * `single_sign_on_user_value` - (Required) The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. @@ -236,13 +237,40 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The user profile Amazon Resource Name (ARN). * `arn` - The user profile Amazon Resource Name (ARN). * `home_efs_file_system_uid` - The ID of the user's profile in the Amazon Elastic File System (EFS) volume. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sagemaker_user_profile.example + identity = { + domain_id = "domain-id" + user_profile_name = "profile-name" + } +} + +resource "aws_sagemaker_user_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `domain_id` (String) SageMaker domain ID. +* `user_profile_name` (String) Name of the user profile. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI User Profiles using the `arn`. For example: ```python @@ -257,13 +285,13 @@ from imports.aws.sagemaker_user_profile import SagemakerUserProfile class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SagemakerUserProfile.generate_config_for_import(self, "testUserProfile", "arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name") + SagemakerUserProfile.generate_config_for_import(self, "example", "arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name") ``` Using `terraform import`, import SageMaker AI User Profiles using the `arn`. For example: ```console -% terraform import aws_sagemaker_user_profile.test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name +% terraform import aws_sagemaker_user_profile.example arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown b/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown index 7eb8ac6c3ae5..bfeb6a8228c5 100644 --- a/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workforce_name` - (Required) The name of the Workforce (must be unique). * `cognito_config` - (Optional) Use this parameter to configure an Amazon Cognito private workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool. Conflicts with `oidc_config`. see [Cognito Config](#cognito-config) details below. * `oidc_config` - (Optional) Use this parameter to configure a private workforce using your own OIDC Identity Provider. Conflicts with `cognito_config`. see [OIDC Config](#oidc-config) details below. @@ -159,4 +160,4 @@ Using `terraform import`, import SageMaker AI Workforces using the `workforce_na % terraform import aws_sagemaker_workforce.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown b/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown index 9491c5b6748b..0b4a1f53f810 100644 --- a/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required) A description of the work team. * `workforce_name` - (Optional) The name of the workforce. * `workteam_name` - (Required) The name of the Workteam (must be unique). @@ -148,4 +149,4 @@ Using `terraform import`, import SageMaker AI Workteams using the `workteam_name % terraform import aws_sagemaker_workteam.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/scheduler_schedule.html.markdown b/website/docs/cdktf/python/r/scheduler_schedule.html.markdown index c392683f79ca..fca0d80c9ed7 100644 --- a/website/docs/cdktf/python/r/scheduler_schedule.html.markdown +++ b/website/docs/cdktf/python/r/scheduler_schedule.html.markdown @@ -92,12 +92,14 @@ The following arguments are required: The following arguments are optional: +* `action_after_completion` - (Optional) Action that applies to the schedule after completing invocation of the target. Valid values are `NONE` and `DELETE`. Defaults to `NONE`. * `description` - (Optional) Brief description of the schedule. * `end_date` - (Optional) The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the end date you specify. EventBridge Scheduler ignores the end date for one-time schedules. Example: `2030-01-01T01:00:00Z`. * `group_name` - (Optional, Forces new resource) Name of the schedule group to associate with this schedule. When omitted, the `default` schedule group is used. * `kms_key_arn` - (Optional) ARN for the customer managed KMS key that EventBridge Scheduler will use to encrypt and decrypt your data. * `name` - (Optional, Forces new resource) Name of the schedule. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `schedule_expression_timezone` - (Optional) Timezone in which the scheduling expression is evaluated. Defaults to `UTC`. Example: `Australia/Sydney`. * `start_date` - (Optional) The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the start date you specify. EventBridge Scheduler ignores the start date for one-time schedules. Example: `2030-01-01T01:00:00Z`. * `state` - (Optional) Specifies whether the schedule is enabled or disabled. One of: `ENABLED` (default), `DISABLED`. @@ -116,6 +118,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dead_letter_config` - (Optional) Information about an Amazon SQS queue that EventBridge Scheduler uses as a dead-letter queue for your schedule. If specified, EventBridge Scheduler delivers failed events that could not be successfully delivered to a target to the queue. Detailed below. * `ecs_parameters` - (Optional) Templated target type for the Amazon ECS [`RunTask`](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) API operation. Detailed below. * `eventbridge_parameters` - (Optional) Templated target type for the EventBridge [`PutEvents`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_PutEvents.html) API operation. Detailed below. @@ -137,6 +140,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity_provider_strategy` - (Optional) Up to `6` capacity provider strategies to use for the task. Detailed below. * `enable_ecs_managed_tags` - (Optional) Specifies whether to enable Amazon ECS managed tags for the task. For more information, see [Tagging Your Amazon ECS Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the Amazon ECS Developer Guide. * `enable_execute_command` - (Optional) Specifies whether to enable the execute command functionality for the containers in this task. @@ -232,4 +236,4 @@ Using `terraform import`, import schedules using the combination `group_name/nam % terraform import aws_scheduler_schedule.example my-schedule-group/my-schedule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/scheduler_schedule_group.html.markdown b/website/docs/cdktf/python/r/scheduler_schedule_group.html.markdown index 8f410eae1c02..bce49b81e6b7 100644 --- a/website/docs/cdktf/python/r/scheduler_schedule_group.html.markdown +++ b/website/docs/cdktf/python/r/scheduler_schedule_group.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the schedule group. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,4 +87,4 @@ Using `terraform import`, import schedule groups using the `name`. For example: % terraform import aws_scheduler_schedule_group.example my-schedule-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/schemas_discoverer.html.markdown b/website/docs/cdktf/python/r/schemas_discoverer.html.markdown index f14fc070caac..4126161f27c6 100644 --- a/website/docs/cdktf/python/r/schemas_discoverer.html.markdown +++ b/website/docs/cdktf/python/r/schemas_discoverer.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_arn` - (Required) The ARN of the event bus to discover event schemas on. * `description` - (Optional) The description of the discoverer. Maximum of 256 characters. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -79,4 +80,4 @@ Using `terraform import`, import EventBridge discoverers using the `id`. For exa % terraform import aws_schemas_discoverer.test 123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/schemas_registry.html.markdown b/website/docs/cdktf/python/r/schemas_registry.html.markdown index 85fa26279864..50d6f2706fe7 100644 --- a/website/docs/cdktf/python/r/schemas_registry.html.markdown +++ b/website/docs/cdktf/python/r/schemas_registry.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the custom event schema registry. Maximum of 64 characters consisting of lower case letters, upper case letters, 0-9, ., -, _. * `description` - (Optional) The description of the discoverer. Maximum of 256 characters. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -74,4 +75,4 @@ Using `terraform import`, import EventBridge schema registries using the `name`. % terraform import aws_schemas_registry.test my_own_registry ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/schemas_registry_policy.html.markdown b/website/docs/cdktf/python/r/schemas_registry_policy.html.markdown index 5c454aac87f3..81fd11ddcfbe 100644 --- a/website/docs/cdktf/python/r/schemas_registry_policy.html.markdown +++ b/website/docs/cdktf/python/r/schemas_registry_policy.html.markdown @@ -54,8 +54,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `registry_name` - (Required) Name of EventBridge Schema Registry * `policy` - (Required) Resource Policy for EventBridge Schema Registry @@ -96,4 +97,4 @@ Using `terraform import`, import EventBridge Schema Registry Policy using the `r % terraform import aws_schemas_registry_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/schemas_schema.html.markdown b/website/docs/cdktf/python/r/schemas_schema.html.markdown index c3e7f98359ae..41fb64832834 100644 --- a/website/docs/cdktf/python/r/schemas_schema.html.markdown +++ b/website/docs/cdktf/python/r/schemas_schema.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the schema. Maximum of 385 characters consisting of lower case letters, upper case letters, ., -, _, @. * `content` - (Required) The schema specification. Must be a valid Open API 3.0 spec. * `registry_name` - (Required) The name of the registry in which this schema belongs. @@ -109,4 +110,4 @@ Using `terraform import`, import EventBridge schema using the `name` and `regist % terraform import aws_schemas_schema.test name/registry ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/secretsmanager_secret.html.markdown b/website/docs/cdktf/python/r/secretsmanager_secret.html.markdown index 008ed004bc7b..bd82a0076988 100644 --- a/website/docs/cdktf/python/r/secretsmanager_secret.html.markdown +++ b/website/docs/cdktf/python/r/secretsmanager_secret.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the secret. * `kms_key_id` - (Optional) ARN or Id of the AWS KMS key to be used to encrypt the secret values in the versions stored in this secret. If you need to reference a CMK in a different account, you can use only the key ARN. If you don't specify this value, then Secrets Manager defaults to using the AWS account's default KMS key (the one named `aws/secretsmanager`). If the default KMS key with that name doesn't yet exist, then AWS Secrets Manager creates it for you automatically the first time. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -69,6 +70,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret` using the secret Amazon Resource Name (ARN). For example: ```python @@ -92,4 +114,4 @@ Using `terraform import`, import `aws_secretsmanager_secret` using the secret Am % terraform import aws_secretsmanager_secret.example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/secretsmanager_secret_policy.html.markdown b/website/docs/cdktf/python/r/secretsmanager_secret_policy.html.markdown index 6853d8bb82a9..12e53bb8a2ac 100644 --- a/website/docs/cdktf/python/r/secretsmanager_secret_policy.html.markdown +++ b/website/docs/cdktf/python/r/secretsmanager_secret_policy.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `block_public_policy` - (Optional) Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret. ## Attribute Reference @@ -76,6 +77,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_policy.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_policy` using the secret Amazon Resource Name (ARN). For example: ```python @@ -99,4 +121,4 @@ Using `terraform import`, import `aws_secretsmanager_secret_policy` using the se % terraform import aws_secretsmanager_secret_policy.example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/secretsmanager_secret_rotation.html.markdown b/website/docs/cdktf/python/r/secretsmanager_secret_rotation.html.markdown index 8b8d36751454..8e53dae86c79 100644 --- a/website/docs/cdktf/python/r/secretsmanager_secret_rotation.html.markdown +++ b/website/docs/cdktf/python/r/secretsmanager_secret_rotation.html.markdown @@ -49,6 +49,7 @@ To enable automatic secret rotation, the Secrets Manager service requires usage This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret_id` - (Required) Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. * `rotate_immediately` - (Optional) Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in `rotation_rules`. For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it. Defaults to `true`. * `rotation_lambda_arn` - (Optional) Specifies the ARN of the Lambda function that can rotate the secret. Must be supplied if the secret is not managed by AWS. @@ -70,6 +71,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_rotation.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret_rotation" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_rotation` using the secret Amazon Resource Name (ARN). For example: ```python @@ -93,4 +115,4 @@ Using `terraform import`, import `aws_secretsmanager_secret_rotation` using the % terraform import aws_secretsmanager_secret_rotation.example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/secretsmanager_secret_version.html.markdown b/website/docs/cdktf/python/r/secretsmanager_secret_version.html.markdown index 29e826076303..3496e4ea2a66 100644 --- a/website/docs/cdktf/python/r/secretsmanager_secret_version.html.markdown +++ b/website/docs/cdktf/python/r/secretsmanager_secret_version.html.markdown @@ -14,7 +14,7 @@ Provides a resource to manage AWS Secrets Manager secret version including its s ~> **NOTE:** If the `AWSCURRENT` staging label is present on this version during resource deletion, that label cannot be removed and will be skipped to prevent errors when fully deleting the secret. That label will leave this secret version active even after the resource is deleted from Terraform unless the secret itself is deleted. Move the `AWSCURRENT` staging label before or after deleting this resource from Terraform to fully trigger version deprecation if necessary. --> **Note:** Write-Only argument `secret_string_wo` is available to use in place of `secret_string`. Write-Only argumentss are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `secret_string_wo` is available to use in place of `secret_string`. Write-Only argumentss are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -93,6 +93,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret_id` - (Required) Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. * `secret_string` - (Optional) Specifies text data that you want to encrypt and store in this version of the secret. This is required if `secret_binary` or `secret_string_wo` is not set. * `secret_string_wo` - (Optional) Specifies text data that you want to encrypt and store in this version of the secret. This is required if `secret_binary` or `secret_string` is not set. @@ -112,6 +113,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_version.example + identity = { + secret_id = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + version_id = "xxxxx-xxxxxxx-xxxxxxx-xxxxx" + } +} + +resource "aws_secretsmanager_secret_version" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `secret_id` - (String) ID of the secret. +* `version_id` - (String) ID of the secret version. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_version` using the secret ID and version ID. For example: ```python @@ -135,4 +164,4 @@ Using `terraform import`, import `aws_secretsmanager_secret_version` using the s % terraform import aws_secretsmanager_secret_version.example 'arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456|xxxxx-xxxxxxx-xxxxxxx-xxxxx' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/security_group.html.markdown b/website/docs/cdktf/python/r/security_group.html.markdown index 1b7e21a58359..a7cd8dfdd592 100644 --- a/website/docs/cdktf/python/r/security_group.html.markdown +++ b/website/docs/cdktf/python/r/security_group.html.markdown @@ -118,10 +118,9 @@ from cdktf import TerraformStack from imports.aws.security_group import SecurityGroup from imports.aws.vpc_endpoint import VpcEndpoint class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, serviceName, vpcId): + def __init__(self, scope, name, *, vpcId): super().__init__(scope, name) my_endpoint = VpcEndpoint(self, "my_endpoint", - service_name=service_name, vpc_id=vpc_id ) SecurityGroup(self, "example", @@ -312,6 +311,7 @@ resource "null_resource" "example" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) Security group description. Defaults to `Managed by Terraform`. Cannot be `""`. **NOTE**: This field maps to the AWS `GroupDescription` attribute, for which there is no Update API. If you'd like to classify your security groups in a way that can be updated, use `tags`. * `egress` - (Optional, VPC only) Configuration block for egress rules. Can be specified multiple times for each egress rule. Each egress block supports fields documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `ingress` - (Optional) Configuration block for ingress rules. Can be specified multiple times for each ingress rule. Each ingress block supports fields documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). @@ -381,6 +381,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_security_group.example + identity = { + id = "sg-903004f8" + } +} + +resource "aws_security_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the security group. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Groups using the security group `id`. For example: ```python @@ -395,13 +421,13 @@ from imports.aws.security_group import SecurityGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SecurityGroup.generate_config_for_import(self, "elbSg", "sg-903004f8") + SecurityGroup.generate_config_for_import(self, "example", "sg-903004f8") ``` Using `terraform import`, import Security Groups using the security group `id`. For example: ```console -% terraform import aws_security_group.elb_sg sg-903004f8 +% terraform import aws_security_group.example sg-903004f8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/security_group_rule.html.markdown b/website/docs/cdktf/python/r/security_group_rule.html.markdown index 87b1070979e0..20a22bbb20e9 100644 --- a/website/docs/cdktf/python/r/security_group_rule.html.markdown +++ b/website/docs/cdktf/python/r/security_group_rule.html.markdown @@ -66,10 +66,9 @@ from cdktf import TerraformStack from imports.aws.security_group_rule import SecurityGroupRule from imports.aws.vpc_endpoint import VpcEndpoint class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, serviceName, vpcId): + def __init__(self, scope, name, *, vpcId): super().__init__(scope, name) my_endpoint = VpcEndpoint(self, "my_endpoint", - service_name=service_name, vpc_id=vpc_id ) SecurityGroupRule(self, "allow_all", @@ -101,7 +100,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) current = DataAwsRegion(self, "current") s3 = DataAwsPrefixList(self, "s3", - name="com.amazonaws.${" + current.name + "}.s3" + name="com.amazonaws.${" + current.region + "}.s3" ) SecurityGroupRule(self, "s3_gateway_egress", description="S3 Gateway Egress", @@ -116,8 +115,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `from_port` - (Required) Start port (or ICMP type number if protocol is "icmp" or "icmpv6"). * `protocol` - (Required) Protocol. If not icmp, icmpv6, tcp, udp, or all use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) * `security_group_id` - (Required) Security group to apply this rule to. @@ -127,6 +127,7 @@ or `egress` (outbound). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ~> **Note** Although `cidr_blocks`, `ipv6_cidr_blocks`, `prefix_list_ids`, and `source_security_group_id` are all marked as optional, you _must_ provide one of them in order to configure the source of the traffic. * `cidr_blocks` - (Optional) List of CIDR blocks. Cannot be specified with `source_security_group_id` or `self`. @@ -320,4 +321,4 @@ Import a rule that has itself and an IPv6 CIDR block as sources: % terraform import aws_security_group_rule.rule_name sg-656c65616e6f72_ingress_tcp_80_80_self_2001:db8::/48 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_account.html.markdown b/website/docs/cdktf/python/r/securityhub_account.html.markdown index 3b04d2b5dd51..976ed1006a11 100644 --- a/website/docs/cdktf/python/r/securityhub_account.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_account.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enable_default_standards` - (Optional) Whether to enable the security standards that Security Hub has designated as automatically enabled including: ` AWS Foundational Security Best Practices v1.0.0` and `CIS AWS Foundations Benchmark v1.2.0`. Defaults to `true`. * `control_finding_generator` - (Optional) Updates whether the calling account has consolidated control findings turned on. If the value for this field is set to `SECURITY_CONTROL`, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to `STANDARD_CONTROL`, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. For accounts that are part of an organization, this value can only be updated in the administrator account. * `auto_enable_controls` - (Optional) Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. @@ -71,4 +72,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_action_target.html.markdown b/website/docs/cdktf/python/r/securityhub_action_target.html.markdown index a40353f78bf9..b62b2d408bed 100644 --- a/website/docs/cdktf/python/r/securityhub_action_target.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_action_target.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The description for the custom action target. * `identifier` - (Required) The ID for the custom action target. * `description` - (Required) The name of the custom action target. @@ -77,4 +78,4 @@ Using `terraform import`, import Security Hub custom action using the action tar % terraform import aws_securityhub_action_target.example arn:aws:securityhub:eu-west-1:312940875350:action/custom/a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_automation_rule.html.markdown b/website/docs/cdktf/python/r/securityhub_automation_rule.html.markdown index 57481b553f4c..1cbf9dd9b1f5 100644 --- a/website/docs/cdktf/python/r/securityhub_automation_rule.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_automation_rule.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actions` - (Required) A block that specifies one or more actions to update finding fields if a finding matches the conditions specified in `Criteria`. [Documented below](#actions). * `criteria` - (Required) A block that specifies a set of ASFF finding field attributes and corresponding expected values that Security Hub uses to filter findings. [Documented below](#criteria). * `description` - (Required) The description of the rule. @@ -217,6 +218,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_securityhub_automation_rule.example + identity = { + "arn" = "arn:aws:securityhub:us-east-1:123456789012:automation-rule/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_securityhub_automation_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Security Hub automation rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub Automation Rule using their ARN. For example: ```python @@ -240,4 +262,4 @@ Using `terraform import`, import Security Hub automation rule using their ARN. F % terraform import aws_securityhub_automation_rule.example arn:aws:securityhub:us-west-2:123456789012:automation-rule/473eddde-f5c4-4ae5-85c7-e922f271fffc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_configuration_policy.html.markdown b/website/docs/cdktf/python/r/securityhub_configuration_policy.html.markdown index 8969afa26673..3910ca9e9791 100644 --- a/website/docs/cdktf/python/r/securityhub_configuration_policy.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_configuration_policy.html.markdown @@ -149,6 +149,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration_policy` - (Required) Defines how Security Hub is configured. See [below](#configuration_policy). * `description` - (Optional) The description of the configuration policy. * `name` - (Required) The name of the configuration policy. @@ -224,4 +225,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_configuration_policy.example "00000000-1111-2222-3333-444444444444" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_configuration_policy_association.markdown b/website/docs/cdktf/python/r/securityhub_configuration_policy_association.markdown index 938cd37bc3a1..bfe650b84dc1 100644 --- a/website/docs/cdktf/python/r/securityhub_configuration_policy_association.markdown +++ b/website/docs/cdktf/python/r/securityhub_configuration_policy_association.markdown @@ -79,6 +79,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_id` - (Required) The universally unique identifier (UUID) of the configuration policy. * `target_id` - (Required, Forces new resource) The identifier of the target account, organizational unit, or the root to associate with the specified configuration. @@ -120,4 +121,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_configuration_policy_association.example_account_association 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_finding_aggregator.html.markdown b/website/docs/cdktf/python/r/securityhub_finding_aggregator.html.markdown index 5bb2c7defb57..d29ae9bb0738 100644 --- a/website/docs/cdktf/python/r/securityhub_finding_aggregator.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_finding_aggregator.html.markdown @@ -128,6 +128,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `linking_mode` - (Required) Indicates whether to aggregate findings from all of the available Regions or from a specified list. The options are `ALL_REGIONS`, `ALL_REGIONS_EXCEPT_SPECIFIED`, `SPECIFIED_REGIONS` or `NO_REGIONS`. When `ALL_REGIONS` or `ALL_REGIONS_EXCEPT_SPECIFIED` are used, Security Hub will automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. - `specified_regions` - (Optional) List of regions to include or exclude (required if `linking_mode` is set to `ALL_REGIONS_EXCEPT_SPECIFIED` or `SPECIFIED_REGIONS`) @@ -162,4 +163,4 @@ Using `terraform import`, import an existing Security Hub finding aggregator usi % terraform import aws_securityhub_finding_aggregator.example arn:aws:securityhub:eu-west-1:123456789098:finding-aggregator/abcd1234-abcd-1234-1234-abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_insight.html.markdown b/website/docs/cdktf/python/r/securityhub_insight.html.markdown index f768ddba2005..7279d81db5a2 100644 --- a/website/docs/cdktf/python/r/securityhub_insight.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_insight.html.markdown @@ -180,8 +180,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filters` - (Required) A configuration block including one or more (up to 10 distinct) attributes used to filter the findings included in the insight. The insight only includes findings that match criteria defined in the filters. See [filters](#filters) below for more details. * `group_by_attribute` - (Required) The attribute used to group the findings for the insight e.g., if an insight is grouped by `ResourceId`, then the insight produces a list of resource identifiers. * `name` - (Required) The name of the custom insight. @@ -372,4 +373,4 @@ Using `terraform import`, import Security Hub insights using the ARN. For exampl % terraform import aws_securityhub_insight.example arn:aws:securityhub:us-west-2:1234567890:insight/1234567890/custom/91299ed7-abd0-4e44-a858-d0b15e37141a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_invite_accepter.html.markdown b/website/docs/cdktf/python/r/securityhub_invite_accepter.html.markdown index e829add482a7..9a9842e3672d 100644 --- a/website/docs/cdktf/python/r/securityhub_invite_accepter.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_invite_accepter.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `master_id` - (Required) The account ID of the master Security Hub account whose invitation you're accepting. ## Attribute Reference @@ -87,4 +88,4 @@ Using `terraform import`, import Security Hub invite acceptance using the accoun % terraform import aws_securityhub_invite_accepter.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_member.html.markdown b/website/docs/cdktf/python/r/securityhub_member.html.markdown index ad430dc1ab2a..48a71ed8b391 100644 --- a/website/docs/cdktf/python/r/securityhub_member.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_member.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) The ID of the member AWS account. * `email` - (Optional) The email of the member AWS account. * `invite` - (Optional) Boolean whether to invite the account to Security Hub as a member. Defaults to `false`. @@ -79,4 +80,4 @@ Using `terraform import`, import Security Hub members using their account ID. Fo % terraform import aws_securityhub_member.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_organization_admin_account.html.markdown b/website/docs/cdktf/python/r/securityhub_organization_admin_account.html.markdown index 3a7645e500d9..adf97b7bf6ee 100644 --- a/website/docs/cdktf/python/r/securityhub_organization_admin_account.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_organization_admin_account.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `admin_account_id` - (Required) The AWS account identifier of the account to designate as the Security Hub administrator account. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import Security Hub Organization Admin Accounts using % terraform import aws_securityhub_organization_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_organization_configuration.html.markdown b/website/docs/cdktf/python/r/securityhub_organization_configuration.html.markdown index 960bd2d39272..e629b2314654 100644 --- a/website/docs/cdktf/python/r/securityhub_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_organization_configuration.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auto_enable` - (Required) Whether to automatically enable Security Hub for new accounts in the organization. * `auto_enable_standards` - (Optional) Whether to automatically enable Security Hub default standards for new member accounts in the organization. By default, this parameter is equal to `DEFAULT`, and new member accounts are automatically enabled with default Security Hub standards. To opt out of enabling default standards for new member accounts, set this parameter equal to `NONE`. * `organization_configuration` - (Optional) Provides information about the way an organization is configured in Security Hub. @@ -148,4 +149,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_organization_configuration.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_product_subscription.html.markdown b/website/docs/cdktf/python/r/securityhub_product_subscription.html.markdown index 41a5aebc9b88..9064e155871e 100644 --- a/website/docs/cdktf/python/r/securityhub_product_subscription.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_product_subscription.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode(TerraformStack): aws_securityhub_product_subscription_example = SecurityhubProductSubscription(self, "example_2", depends_on=[example], - product_arn="arn:aws:securityhub:${" + current.name + "}:733251395267:product/alertlogic/althreatmanagement" + product_arn="arn:aws:securityhub:${" + current.region + "}:733251395267:product/alertlogic/althreatmanagement" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_securityhub_product_subscription_example.override_logical_id("example") @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `product_arn` - (Required) The ARN of the product that generates findings that you want to import into Security Hub - see below. Amazon maintains a list of [Product integrations in AWS Security Hub](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-providers.html) that changes over time. Any of the products on the linked [Available AWS service integrations](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-internal-providers.html) or [Available third-party partner product integrations](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-partner-providers.html) can be configured using `aws_securityhub_product_subscription`. @@ -114,4 +115,4 @@ Using `terraform import`, import Security Hub product subscriptions using `produ % terraform import aws_securityhub_product_subscription.example arn:aws:securityhub:eu-west-1:733251395267:product/alertlogic/althreatmanagement,arn:aws:securityhub:eu-west-1:123456789012:product-subscription/alertlogic/althreatmanagement ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_standards_control.html.markdown b/website/docs/cdktf/python/r/securityhub_standards_control.html.markdown index 1bef812fefdb..e859988b0f23 100644 --- a/website/docs/cdktf/python/r/securityhub_standards_control.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_standards_control.html.markdown @@ -49,21 +49,22 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `standards_control_arn` - (Required) The standards control ARN. See the AWS documentation for how to list existing controls using [`get-enabled-standards`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/get-enabled-standards.html) and [`describe-standards-controls`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/describe-standards-controls.html). -* `control_status` – (Required) The control status could be `ENABLED` or `DISABLED`. You have to specify `disabled_reason` argument for `DISABLED` control status. -* `disabled_reason` – (Optional) A description of the reason why you are disabling a security standard control. If you specify this attribute, `control_status` will be set to `DISABLED` automatically. +* `control_status` - (Required) The control status could be `ENABLED` or `DISABLED`. You have to specify `disabled_reason` argument for `DISABLED` control status. +* `disabled_reason` - (Optional) A description of the reason why you are disabling a security standard control. If you specify this attribute, `control_status` will be set to `DISABLED` automatically. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - The standard control ARN. -* `control_id` – The identifier of the security standard control. -* `control_status_updated_at` – The date and time that the status of the security standard control was most recently updated. -* `description` – The standard control longer description. Provides information about what the control is checking for. -* `related_requirements` – The list of requirements that are related to this control. -* `remediation_url` – A link to remediation information for the control in the Security Hub user documentation. -* `severity_rating` – The severity of findings generated from this security standard control. -* `title` – The standard control title. +* `control_id` - The identifier of the security standard control. +* `control_status_updated_at` - The date and time that the status of the security standard control was most recently updated. +* `description` - The standard control longer description. Provides information about what the control is checking for. +* `related_requirements` - The list of requirements that are related to this control. +* `remediation_url` - A link to remediation information for the control in the Security Hub user documentation. +* `severity_rating` - The severity of findings generated from this security standard control. +* `title` - The standard control title. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_standards_control_association.html.markdown b/website/docs/cdktf/python/r/securityhub_standards_control_association.html.markdown index e19e84f7c16c..0bc47490b7f0 100644 --- a/website/docs/cdktf/python/r/securityhub_standards_control_association.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_standards_control_association.html.markdown @@ -101,10 +101,11 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `updated_reason` - (Optional) The reason for updating the control's enablement status in the standard. Required when `association_status` is `DISABLED`. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securityhub_standards_subscription.html.markdown b/website/docs/cdktf/python/r/securityhub_standards_subscription.html.markdown index 2bc0ba7121f7..0ed47a76ee2b 100644 --- a/website/docs/cdktf/python/r/securityhub_standards_subscription.html.markdown +++ b/website/docs/cdktf/python/r/securityhub_standards_subscription.html.markdown @@ -36,7 +36,7 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") SecurityhubStandardsSubscription(self, "pci_321", depends_on=[example], - standards_arn="arn:aws:securityhub:${" + current.name + "}::standards/pci-dss/v/3.2.1" + standards_arn="arn:aws:securityhub:${" + current.region + "}::standards/pci-dss/v/3.2.1" ) ``` @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `standards_arn` - (Required) The ARN of a standard - see below. Currently available standards (remember to replace `${var.partition}` and `${var.region}` as appropriate): @@ -56,7 +57,9 @@ Currently available standards (remember to replace `${var.partition}` and `${var | CIS AWS Foundations Benchmark v1.4.0 | `arn:${var.partition}:securityhub:${var.region}::standards/cis-aws-foundations-benchmark/v/1.4.0` | | CIS AWS Foundations Benchmark v3.0.0 | `arn:${var.partition}:securityhub:${var.region}::standards/cis-aws-foundations-benchmark/v/3.0.0` | | NIST SP 800-53 Rev. 5 | `arn:${var.partition}:securityhub:${var.region}::standards/nist-800-53/v/5.0.0` | -| PCI DSS | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/3.2.1` | +| NIST SP 800-171 Rev. 2 | `arn:${var.partition}:securityhub:${var.region}::standards/nist-800-171/v/2.0.0` | +| PCI DSS v3.2.1 | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/3.2.1` | +| PCI DSS v4.0.1 | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/4.0.1` | ## Attribute Reference @@ -134,4 +137,4 @@ Using `terraform import`, import Security Hub standards subscriptions using the % terraform import aws_securityhub_standards_subscription.nist_800_53_rev_5 arn:aws:securityhub:eu-west-1:123456789012:subscription/nist-800-53/v/5.0.0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securitylake_aws_log_source.html.markdown b/website/docs/cdktf/python/r/securitylake_aws_log_source.html.markdown index 2f8e6b03756b..a37559a65d23 100644 --- a/website/docs/cdktf/python/r/securitylake_aws_log_source.html.markdown +++ b/website/docs/cdktf/python/r/securitylake_aws_log_source.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source` - (Required) Specify the natively-supported AWS service to add as a source in Security Lake. `source` supports the following: @@ -88,4 +89,4 @@ Using `terraform import`, import AWS log sources using the source name. For exam % terraform import aws_securitylake_aws_log_source.example ROUTE53 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securitylake_custom_log_source.html.markdown b/website/docs/cdktf/python/r/securitylake_custom_log_source.html.markdown index d499260c9fb7..cf001115c1e5 100644 --- a/website/docs/cdktf/python/r/securitylake_custom_log_source.html.markdown +++ b/website/docs/cdktf/python/r/securitylake_custom_log_source.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Required) The configuration for the third-party custom source. * `crawler_configuration` - (Required) The configuration for the Glue Crawler for the third-party custom source. * `role_arn` - (Required) The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be used by the AWS Glue crawler. @@ -103,4 +104,4 @@ Using `terraform import`, import Custom log sources using the source name. For e % terraform import aws_securitylake_custom_log_source.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securitylake_data_lake.html.markdown b/website/docs/cdktf/python/r/securitylake_data_lake.html.markdown index da0d2881da43..74464e3aaa63 100644 --- a/website/docs/cdktf/python/r/securitylake_data_lake.html.markdown +++ b/website/docs/cdktf/python/r/securitylake_data_lake.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `meta_store_manager_role_arn` - (Required) The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. * `configuration` - (Required) Specify the Region or Regions that will contribute data to the rollup region. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -139,6 +140,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_securitylake_data_lake.example + identity = { + "arn" = "arn:aws:securitylake:us-east-1:123456789012:data-lake/default" + } +} + +resource "aws_securitylake_data_lake" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Security Lake data lake. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub standards subscriptions using the standards subscription ARN. For example: ```python @@ -162,4 +184,4 @@ Using `terraform import`, import Security Hub standards subscriptions using the % terraform import aws_securitylake_data_lake.example arn:aws:securitylake:eu-west-1:123456789012:data-lake/default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securitylake_subscriber.html.markdown b/website/docs/cdktf/python/r/securitylake_subscriber.html.markdown index aad1e1180b23..ccedbb2ce801 100644 --- a/website/docs/cdktf/python/r/securitylake_subscriber.html.markdown +++ b/website/docs/cdktf/python/r/securitylake_subscriber.html.markdown @@ -16,6 +16,8 @@ Terraform resource for managing an AWS Security Lake Subscriber. ## Example Usage +### Basic Usage + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -48,10 +50,51 @@ class MyConvertedCode(TerraformStack): ) ``` +### Multiple Log Sources + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.securitylake_subscriber import SecuritylakeSubscriber +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + SecuritylakeSubscriber(self, "example", + access_type="S3", + depends_on=[aws_securitylake_data_lake_example], + source=[SecuritylakeSubscriberSource( + aws_log_source_resource=[SecuritylakeSubscriberSourceAwsLogSourceResource( + source_name="SH_FINDINGS", + source_version="2.0" + ) + ] + ), SecuritylakeSubscriberSource( + aws_log_source_resource=[SecuritylakeSubscriberSourceAwsLogSourceResource( + source_name="ROUTE53", + source_version="2.0" + ) + ] + ) + ], + subscriber_identity=[SecuritylakeSubscriberSubscriberIdentity( + external_id="example", + principal="1234567890" + ) + ], + subscriber_name="example-name" + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_type` - (Optional) The Amazon S3 or Lake Formation access type. * `source` - (Required) The supported AWS services from which logs and events are collected. Security Lake supports log and event collection for natively supported AWS services. See [`source` Blocks](#source-blocks) below. * `subscriber_identity` - (Required) The AWS identity used to access your data. See [`subscriber_identity` Block](#subscriber_identity-block) below. @@ -77,8 +120,8 @@ The `subscriber_identity` block supports the following arguments: The `aws_log_source_resource` block supports the following arguments: -* `source_name` - (Required) Provides data expiration details of Amazon Security Lake object. -* `source_version` - (Optional) Provides data storage transition details of Amazon Security Lake object. +* `source_name` - (Required) The name for a AWS source. This must be a Regionally unique value. Valid values: `ROUTE53`, `VPC_FLOW`, `SH_FINDINGS`, `CLOUD_TRAIL_MGMT`, `LAMBDA_EXECUTION`, `S3_DATA`, `EKS_AUDIT` and `WAF`. +* `source_version` - (Optional) The version for a AWS source. This must be a Regionally unique value. ### `custom_log_source_resource` Block @@ -156,4 +199,4 @@ Using `terraform import`, import Security Lake subscriber using the subscriber I % terraform import aws_securitylake_subscriber.example 9f3bfe79-d543-474d-a93c-f3846805d208 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/securitylake_subscriber_notification.html.markdown b/website/docs/cdktf/python/r/securitylake_subscriber_notification.html.markdown index 652d9fc5e866..6efdfa8d6f6d 100644 --- a/website/docs/cdktf/python/r/securitylake_subscriber_notification.html.markdown +++ b/website/docs/cdktf/python/r/securitylake_subscriber_notification.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subscriber_id` - (Required) The subscriber ID for the notification subscription. * `configuration` - (Required) Specify the configuration using which you want to create the subscriber notification.. @@ -103,4 +104,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/serverlessapplicationrepository_cloudformation_stack.html.markdown b/website/docs/cdktf/python/r/serverlessapplicationrepository_cloudformation_stack.html.markdown index e711114a5114..46baa56c80fd 100644 --- a/website/docs/cdktf/python/r/serverlessapplicationrepository_cloudformation_stack.html.markdown +++ b/website/docs/cdktf/python/r/serverlessapplicationrepository_cloudformation_stack.html.markdown @@ -37,7 +37,7 @@ class MyConvertedCode(TerraformStack): capabilities=["CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY"], name="postgres-rotator", parameters={ - "endpoint": "secretsmanager.${" + data_aws_region_current.name + "}.${" + current.dns_suffix + "}", + "endpoint": "secretsmanager.${" + data_aws_region_current.region + "}.${" + current.dns_suffix + "}", "function_name": "func-postgres-rotator" } ) @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the stack to create. The resource deployed in AWS will be prefixed with `serverlessrepo-` * `application_id` - (Required) The ARN of the application from the Serverless Application Repository. * `capabilities` - (Required) A list of capabilities. Valid values are `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_RESOURCE_POLICY`, or `CAPABILITY_AUTO_EXPAND` @@ -87,4 +88,4 @@ Using `terraform import`, import Serverless Application Repository Stack using t % terraform import aws_serverlessapplicationrepository_cloudformation_stack.example serverlessrepo-postgres-rotator ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/service_discovery_http_namespace.html.markdown b/website/docs/cdktf/python/r/service_discovery_http_namespace.html.markdown index bf281eac627e..72e2a363ae6d 100644 --- a/website/docs/cdktf/python/r/service_discovery_http_namespace.html.markdown +++ b/website/docs/cdktf/python/r/service_discovery_http_namespace.html.markdown @@ -34,6 +34,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the http namespace. * `description` - (Optional) The description that you specify for the namespace when you create it. * `tags` - (Optional) A map of tags to assign to the namespace. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -72,4 +73,4 @@ Using `terraform import`, import Service Discovery HTTP Namespace using the name % terraform import aws_service_discovery_http_namespace.example ns-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/service_discovery_instance.html.markdown b/website/docs/cdktf/python/r/service_discovery_instance.html.markdown index e5876f793817..d5ae2d1bb81f 100644 --- a/website/docs/cdktf/python/r/service_discovery_instance.html.markdown +++ b/website/docs/cdktf/python/r/service_discovery_instance.html.markdown @@ -110,6 +110,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_id` - (Required, ForceNew) The ID of the service instance. * `service_id` - (Required, ForceNew) The ID of the service that you want to use to create the instance. * `attributes` - (Required) A map contains the attributes of the instance. Check the [doc](https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html#API_RegisterInstance_RequestSyntax) for the supported attributes and syntax. @@ -145,4 +146,4 @@ Using `terraform import`, import Service Discovery Instance using the service ID % terraform import aws_service_discovery_instance.example 0123456789/i-0123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/service_discovery_private_dns_namespace.html.markdown b/website/docs/cdktf/python/r/service_discovery_private_dns_namespace.html.markdown index 1b1fed9e6c06..95a21e94c641 100644 --- a/website/docs/cdktf/python/r/service_discovery_private_dns_namespace.html.markdown +++ b/website/docs/cdktf/python/r/service_discovery_private_dns_namespace.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the namespace. * `vpc` - (Required) The ID of VPC that you want to associate the namespace with. * `description` - (Optional) The description that you specify for the namespace when you create it. @@ -83,4 +84,4 @@ Using `terraform import`, import Service Discovery Private DNS Namespace using t % terraform import aws_service_discovery_private_dns_namespace.example 0123456789:vpc-123345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/service_discovery_public_dns_namespace.html.markdown b/website/docs/cdktf/python/r/service_discovery_public_dns_namespace.html.markdown index 2a0b2a728ab4..3bacd8cbe802 100644 --- a/website/docs/cdktf/python/r/service_discovery_public_dns_namespace.html.markdown +++ b/website/docs/cdktf/python/r/service_discovery_public_dns_namespace.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the namespace. * `description` - (Optional) The description that you specify for the namespace when you create it. * `tags` - (Optional) A map of tags to assign to the namespace. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -74,4 +75,4 @@ Using `terraform import`, import Service Discovery Public DNS Namespace using th % terraform import aws_service_discovery_public_dns_namespace.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/service_discovery_service.html.markdown b/website/docs/cdktf/python/r/service_discovery_service.html.markdown index 641806946b56..6c8b7d619c81 100644 --- a/website/docs/cdktf/python/r/service_discovery_service.html.markdown +++ b/website/docs/cdktf/python/r/service_discovery_service.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the service. * `description` - (Optional) The description of the service. * `dns_config` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dns_config` Block](#dns_config-block) for details. @@ -138,7 +139,7 @@ The `health_check_config` configuration block supports the following arguments: The `health_check_custom_config` configuration block supports the following arguments: -* `failure_threshold` - (Optional, Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. +* `failure_threshold` - (Optional, **Deprecated** Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Value is always set to 1. ## Attribute Reference @@ -146,7 +147,6 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the service. * `arn` - The ARN of the service. -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -173,4 +173,4 @@ Using `terraform import`, import Service Discovery Service using the service ID. % terraform import aws_service_discovery_service.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_budget_resource_association.html.markdown b/website/docs/cdktf/python/r/servicecatalog_budget_resource_association.html.markdown index 493afa3468c3..a3d8de890aea 100644 --- a/website/docs/cdktf/python/r/servicecatalog_budget_resource_association.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_budget_resource_association.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `budget_name` - (Required) Budget name. * `resource_id` - (Required) Resource identifier. @@ -82,4 +83,4 @@ Using `terraform import`, import `aws_servicecatalog_budget_resource_association % terraform import aws_servicecatalog_budget_resource_association.example budget-pjtvyakdlyo3m:prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_constraint.html.markdown b/website/docs/cdktf/python/r/servicecatalog_constraint.html.markdown index 4c252421e893..fe4ec5217d28 100644 --- a/website/docs/cdktf/python/r/servicecatalog_constraint.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_constraint.html.markdown @@ -53,6 +53,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `description` - (Optional) Description of the constraint. @@ -131,4 +132,4 @@ Using `terraform import`, import `aws_servicecatalog_constraint` using the const % terraform import aws_servicecatalog_constraint.example cons-nmdkb6cgxfcrs ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_portfolio.html.markdown b/website/docs/cdktf/python/r/servicecatalog_portfolio.html.markdown index 0fb1061c3459..46c0a41747e8 100644 --- a/website/docs/cdktf/python/r/servicecatalog_portfolio.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_portfolio.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the portfolio. * `description` - (Required) Description of the portfolio * `provider_name` - (Required) Name of the person or organization who owns the portfolio. @@ -83,4 +84,4 @@ Using `terraform import`, import Service Catalog Portfolios using the Service Ca % terraform import aws_servicecatalog_portfolio.testfolio port-12344321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_portfolio_share.html.markdown b/website/docs/cdktf/python/r/servicecatalog_portfolio_share.html.markdown index 24510b1a77a2..e41db9d14104 100644 --- a/website/docs/cdktf/python/r/servicecatalog_portfolio_share.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_portfolio_share.html.markdown @@ -53,6 +53,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `share_principals` - (Optional) Enables or disables Principal sharing when creating the portfolio share. If this flag is not provided, principal sharing is disabled. * `share_tag_options` - (Optional) Whether to enable sharing of `aws_servicecatalog_tag_option` resources when creating the portfolio share. @@ -98,4 +99,4 @@ Using `terraform import`, import `aws_servicecatalog_portfolio_share` using the % terraform import aws_servicecatalog_portfolio_share.example port-12344321:ACCOUNT:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_principal_portfolio_association.html.markdown b/website/docs/cdktf/python/r/servicecatalog_principal_portfolio_association.html.markdown index d540299cc9ff..9a3a0e6c1df6 100644 --- a/website/docs/cdktf/python/r/servicecatalog_principal_portfolio_association.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_principal_portfolio_association.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `principal_type` - (Optional) Principal type. Setting this argument empty (e.g., `principal_type = ""`) will result in an error. Valid values are `IAM` and `IAM_PATTERN`. Default is `IAM`. @@ -85,4 +86,4 @@ Using `terraform import`, import `aws_servicecatalog_principal_portfolio_associa % terraform import aws_servicecatalog_principal_portfolio_association.example en,arn:aws:iam::123456789012:user/Eleanor,port-68656c6c6f,IAM ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_product.html.markdown b/website/docs/cdktf/python/r/servicecatalog_product.html.markdown index ef8a6e7e5e45..b7f34f5b6944 100644 --- a/website/docs/cdktf/python/r/servicecatalog_product.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_product.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `description` - (Optional) Description of the product. * `distributor` - (Optional) Distributor (i.e., vendor) of the product. @@ -120,4 +121,4 @@ Using `terraform import`, import `aws_servicecatalog_product` using the product % terraform import aws_servicecatalog_product.example prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_product_portfolio_association.html.markdown b/website/docs/cdktf/python/r/servicecatalog_product_portfolio_association.html.markdown index 1257d32f30f0..16b9b31e7196 100644 --- a/website/docs/cdktf/python/r/servicecatalog_product_portfolio_association.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_product_portfolio_association.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `source_portfolio_id` - (Optional) Identifier of the source portfolio. @@ -83,4 +84,4 @@ Using `terraform import`, import `aws_servicecatalog_product_portfolio_associati % terraform import aws_servicecatalog_product_portfolio_association.example en:port-68656c6c6f:prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown b/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown index aeaa4c856556..691c89e4c6d1 100644 --- a/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `ignore_errors` - (Optional) _Only applies to deleting._ If set to `true`, AWS Service Catalog stops managing the specified provisioned product even if it cannot delete the underlying resources. The default value is `false`. * `notification_arns` - (Optional) Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events. @@ -157,4 +158,4 @@ Using `terraform import`, import `aws_servicecatalog_provisioned_product` using % terraform import aws_servicecatalog_provisioned_product.example pp-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_provisioning_artifact.html.markdown b/website/docs/cdktf/python/r/servicecatalog_provisioning_artifact.html.markdown index cc1f1b1f798a..2f5000da3b49 100644 --- a/website/docs/cdktf/python/r/servicecatalog_provisioning_artifact.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_provisioning_artifact.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). The default value is `en`. * `active` - (Optional) Whether the product version is active. Inactive provisioning artifacts are invisible to end users. End users cannot launch or update a provisioned product from an inactive provisioning artifact. Default is `true`. * `description` - (Optional) Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. @@ -103,4 +104,4 @@ Using `terraform import`, import `aws_servicecatalog_provisioning_artifact` usin % terraform import aws_servicecatalog_provisioning_artifact.example pa-ij2b6lusy6dec:prod-el3an0rma3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_service_action.html.markdown b/website/docs/cdktf/python/r/servicecatalog_service_action.html.markdown index fca69d5d1119..78ef4dddfa4e 100644 --- a/website/docs/cdktf/python/r/servicecatalog_service_action.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_service_action.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accept_language` - (Optional) Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`. * `description` - (Optional) Self-service action description. @@ -100,4 +101,4 @@ Using `terraform import`, import `aws_servicecatalog_service_action` using the s % terraform import aws_servicecatalog_service_action.example act-f1w12eperfslh ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_tag_option.html.markdown b/website/docs/cdktf/python/r/servicecatalog_tag_option.html.markdown index 74369525bfdb..cc5501d86757 100644 --- a/website/docs/cdktf/python/r/servicecatalog_tag_option.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_tag_option.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active` - (Optional) Whether tag option is active. Default is `true`. ## Attribute Reference @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_servicecatalog_tag_option` using the tag o % terraform import aws_servicecatalog_tag_option.example tag-pjtvagohlyo3m ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_tag_option_resource_association.html.markdown b/website/docs/cdktf/python/r/servicecatalog_tag_option_resource_association.html.markdown index b761f36ef788..6626197828b6 100644 --- a/website/docs/cdktf/python/r/servicecatalog_tag_option_resource_association.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_tag_option_resource_association.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) Resource identifier. * `tag_option_id` - (Required) Tag Option identifier. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_servicecatalog_tag_option_resource_associa % terraform import aws_servicecatalog_tag_option_resource_association.example tag-pjtvyakdlyo3m:prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalogappregistry_application.html.markdown b/website/docs/cdktf/python/r/servicecatalogappregistry_application.html.markdown index 9be28dffe543..972ba57cda82 100644 --- a/website/docs/cdktf/python/r/servicecatalogappregistry_application.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalogappregistry_application.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the application. * `tags` - (Optional) A map of tags assigned to the Application. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -103,4 +104,4 @@ Using `terraform import`, import AWS Service Catalog AppRegistry Application usi % terraform import aws_servicecatalogappregistry_application.example application-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group.html.markdown b/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group.html.markdown index 08da9b88717c..033ae4b3b347 100644 --- a/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group.html.markdown @@ -18,20 +18,21 @@ Terraform resource for managing an AWS Service Catalog AppRegistry Attribute Gro ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import Fn, TerraformStack +from cdktf import Fn, Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import ServicecatalogappregistryAttributeGroup +from imports.aws.servicecatalogappregistry_attribute_group import ServicecatalogappregistryAttributeGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) ServicecatalogappregistryAttributeGroup(self, "example", - attributes=Fn.jsonencode({ - "app": "exampleapp", - "group": "examplegroup" - }), + attributes=Token.as_string( + Fn.jsonencode({ + "app": "exampleapp", + "group": "examplegroup" + })), description="example description", name="example" ) @@ -46,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Attribute Group. * `tags` - (Optional) A map of tags assigned to the Attribute Group. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -69,7 +71,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import ServicecatalogappregistryAttributeGroup +from imports.aws.servicecatalogappregistry_attribute_group import ServicecatalogappregistryAttributeGroup class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -82,4 +84,4 @@ Using `terraform import`, import Service Catalog AppRegistry Attribute Group usi % terraform import aws_servicecatalogappregistry_attribute_group.example 1234567890abcfedhijk09876s ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group_association.html.markdown b/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group_association.html.markdown index 178e95a8a910..0a06de408be1 100644 --- a/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group_association.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalogappregistry_attribute_group_association.html.markdown @@ -55,8 +55,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_id` - (Required) ID of the application. * `attribute_group_id` - (Required) ID of the attribute group to associate with the application. @@ -89,4 +90,4 @@ Using `terraform import`, import Service Catalog AppRegistry Attribute Group Ass % terraform import aws_servicecatalogappregistry_attribute_group_association.example 12456778723424sdffsdfsdq34,12234t3564dsfsdf34asff4ww3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicequotas_service_quota.html.markdown b/website/docs/cdktf/python/r/servicequotas_service_quota.html.markdown index 6a6ddbf8fc70..fdc7d6a0b022 100644 --- a/website/docs/cdktf/python/r/servicequotas_service_quota.html.markdown +++ b/website/docs/cdktf/python/r/servicequotas_service_quota.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `quota_code` - (Required) Code of the service quota to track. For example: `L-F678F1CE`. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html). * `service_code` - (Required) Code of the service to track. For example: `vpc`. Available values can be found with the [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html). * `value` - (Required) Float specifying the desired value for the service quota. If the desired value is higher than the current value, a quota increase request is submitted. When a known request is submitted and pending, the value reflects the desired value of the pending request. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_servicequotas_service_quota` using the ser % terraform import aws_servicequotas_service_quota.example vpc/L-F678F1CE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicequotas_template.html.markdown b/website/docs/cdktf/python/r/servicequotas_template.html.markdown index 7a4176386e5c..20dcdee8e361 100644 --- a/website/docs/cdktf/python/r/servicequotas_template.html.markdown +++ b/website/docs/cdktf/python/r/servicequotas_template.html.markdown @@ -6,6 +6,7 @@ description: |- Terraform resource for managing an AWS Service Quotas Template. --- + # Resource: aws_servicequotas_template @@ -30,8 +31,8 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) ServicequotasTemplate(self, "example", + aws_region="us-east-1", quota_code="L-2ACBD22F", - region="us-east-1", service_code="lambda", value=Token.as_number("80") ) @@ -39,9 +40,10 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `region` - (Required) AWS Region to which the template applies. +* `aws_region` - (Optional) AWS Region to which the template applies. +* `region` - (Optional, **Deprecated**) AWS Region to which the template applies. Use `aws_region` instead. * `quota_code` - (Required) Quota identifier. To find the quota code for a specific quota, use the [aws_servicequotas_service_quota](../d/servicequotas_service_quota.html.markdown) data source. * `service_code` - (Required) Service identifier. To find the service code value for an AWS service, use the [aws_servicequotas_service](../d/servicequotas_service.html.markdown) data source. * `value` - (Required) The new, increased value for the quota. @@ -81,4 +83,4 @@ Using `terraform import`, import Service Quotas Template using the `id`. For exa % terraform import aws_servicequotas_template.example us-east-1,L-2ACBD22F,lambda ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicequotas_template_association.html.markdown b/website/docs/cdktf/python/r/servicequotas_template_association.html.markdown index 2e694773e280..f777fe96fe22 100644 --- a/website/docs/cdktf/python/r/servicequotas_template_association.html.markdown +++ b/website/docs/cdktf/python/r/servicequotas_template_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `skip_destroy` - (Optional) Skip disassociating the quota increase template upon destruction. This will remove the resource from Terraform state, but leave the remote association in place. ## Attribute Reference @@ -70,4 +71,4 @@ Using `terraform import`, import Service Quotas Template Association using the ` % terraform import aws_servicequotas_template_association.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_active_receipt_rule_set.html.markdown b/website/docs/cdktf/python/r/ses_active_receipt_rule_set.html.markdown index b8e3244cb3c9..8e3ffdd687ea 100644 --- a/website/docs/cdktf/python/r/ses_active_receipt_rule_set.html.markdown +++ b/website/docs/cdktf/python/r/ses_active_receipt_rule_set.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rule_set_name` - (Required) The name of the rule set ## Attribute Reference @@ -69,4 +70,4 @@ Using `terraform import`, import active SES receipt rule sets using the rule set % terraform import aws_ses_active_receipt_rule_set.my_rule_set my_rule_set_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_configuration_set.html.markdown b/website/docs/cdktf/python/r/ses_configuration_set.html.markdown index c8804593611d..0df8b7ec5ec9 100644 --- a/website/docs/cdktf/python/r/ses_configuration_set.html.markdown +++ b/website/docs/cdktf/python/r/ses_configuration_set.html.markdown @@ -85,6 +85,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delivery_options` - (Optional) Whether messages that use the configuration set are required to use TLS. See below. * `reputation_metrics_enabled` - (Optional) Whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch. The default value is `false`. * `sending_enabled` - (Optional) Whether email sending is enabled or disabled for the configuration set. The default value is `true`. @@ -131,4 +132,4 @@ Using `terraform import`, import SES Configuration Sets using their `name`. For % terraform import aws_ses_configuration_set.test some-configuration-set-test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_domain_dkim.html.markdown b/website/docs/cdktf/python/r/ses_domain_dkim.html.markdown index a1e803ed6409..1da4f6b577b8 100644 --- a/website/docs/cdktf/python/r/ses_domain_dkim.html.markdown +++ b/website/docs/cdktf/python/r/ses_domain_dkim.html.markdown @@ -18,6 +18,7 @@ Domain ownership needs to be confirmed first using [ses_domain_identity Resource This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) Verified domain name to generate DKIM tokens for. ## Attribute Reference @@ -103,4 +104,4 @@ Using `terraform import`, import DKIM tokens using the `domain` attribute. For e % terraform import aws_ses_domain_dkim.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_domain_identity.html.markdown b/website/docs/cdktf/python/r/ses_domain_identity.html.markdown index c5163f6f7840..cfe415f0d1b1 100644 --- a/website/docs/cdktf/python/r/ses_domain_identity.html.markdown +++ b/website/docs/cdktf/python/r/ses_domain_identity.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The domain name to assign to SES ## Attribute Reference @@ -98,4 +99,4 @@ Using `terraform import`, import SES domain identities using the domain name. Fo % terraform import aws_ses_domain_identity.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_domain_identity_verification.html.markdown b/website/docs/cdktf/python/r/ses_domain_identity_verification.html.markdown index 0d9f29e08733..26be043e4ced 100644 --- a/website/docs/cdktf/python/r/ses_domain_identity_verification.html.markdown +++ b/website/docs/cdktf/python/r/ses_domain_identity_verification.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The domain name of the SES domain identity to verify. ## Attribute Reference @@ -69,4 +70,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `45m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_domain_mail_from.html.markdown b/website/docs/cdktf/python/r/ses_domain_mail_from.html.markdown index d2ca1ed7e667..321973fad8a5 100644 --- a/website/docs/cdktf/python/r/ses_domain_mail_from.html.markdown +++ b/website/docs/cdktf/python/r/ses_domain_mail_from.html.markdown @@ -92,6 +92,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `behavior_on_mx_failure` - (Optional) The action that you want Amazon SES to take if it cannot successfully read the required MX record when you send an email. Defaults to `UseDefaultValue`. See the [SES API documentation](https://docs.aws.amazon.com/ses/latest/APIReference/API_SetIdentityMailFromDomain.html) for more information. ## Attribute Reference @@ -125,4 +126,4 @@ Using `terraform import`, import MAIL FROM domain using the `domain` attribute. % terraform import aws_ses_domain_mail_from.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_email_identity.html.markdown b/website/docs/cdktf/python/r/ses_email_identity.html.markdown index bebe35ac4f57..cb1563822f2a 100644 --- a/website/docs/cdktf/python/r/ses_email_identity.html.markdown +++ b/website/docs/cdktf/python/r/ses_email_identity.html.markdown @@ -16,6 +16,7 @@ Provides an SES email identity resource This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email` - (Required) The email address to assign to SES. ## Attribute Reference @@ -68,4 +69,4 @@ Using `terraform import`, import SES email identities using the email address. F % terraform import aws_ses_email_identity.example email@example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_event_destination.html.markdown b/website/docs/cdktf/python/r/ses_event_destination.html.markdown index dd76cb940bed..6364efad1914 100644 --- a/website/docs/cdktf/python/r/ses_event_destination.html.markdown +++ b/website/docs/cdktf/python/r/ses_event_destination.html.markdown @@ -97,6 +97,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the event destination * `configuration_set_name` - (Required) The name of the configuration set * `enabled` - (Optional) If true, the event destination will be enabled @@ -154,4 +155,4 @@ Using `terraform import`, import SES event destinations using `configuration_set % terraform import aws_ses_event_destination.sns some-configuration-set-test/event-destination-sns ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_identity_notification_topic.html.markdown b/website/docs/cdktf/python/r/ses_identity_notification_topic.html.markdown index db9c3ed8e2a4..5f2af7639a1d 100644 --- a/website/docs/cdktf/python/r/ses_identity_notification_topic.html.markdown +++ b/website/docs/cdktf/python/r/ses_identity_notification_topic.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `topic_arn` - (Optional) The Amazon Resource Name (ARN) of the Amazon SNS topic. Can be set to `""` (an empty string) to disable publishing. * `notification_type` - (Required) The type of notifications that will be published to the specified Amazon SNS topic. Valid Values: `Bounce`, `Complaint` or `Delivery`. * `identity` - (Required) The identity for which the Amazon SNS topic will be set. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). @@ -72,4 +73,4 @@ Using `terraform import`, import Identity Notification Topics using the ID of th % terraform import aws_ses_identity_notification_topic.test 'example.com|Bounce' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_identity_policy.html.markdown b/website/docs/cdktf/python/r/ses_identity_policy.html.markdown index 4406c20e2a1c..f99b15320d98 100644 --- a/website/docs/cdktf/python/r/ses_identity_policy.html.markdown +++ b/website/docs/cdktf/python/r/ses_identity_policy.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity` - (Required) Name or Amazon Resource Name (ARN) of the SES Identity. * `name` - (Required) Name of the policy. * `policy` - (Required) JSON string of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -91,4 +92,4 @@ Using `terraform import`, import SES Identity Policies using the identity and po % terraform import aws_ses_identity_policy.example 'example.com|example' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_receipt_filter.html.markdown b/website/docs/cdktf/python/r/ses_receipt_filter.html.markdown index 6b2c1de0128b..8f5451851b5d 100644 --- a/website/docs/cdktf/python/r/ses_receipt_filter.html.markdown +++ b/website/docs/cdktf/python/r/ses_receipt_filter.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the filter * `cidr` - (Required) The IP address or address range to filter, in CIDR notation * `policy` - (Required) Block or Allow @@ -73,4 +74,4 @@ Using `terraform import`, import SES Receipt Filter using their `name`. For exam % terraform import aws_ses_receipt_filter.test some-filter ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_receipt_rule.html.markdown b/website/docs/cdktf/python/r/ses_receipt_rule.html.markdown index af708609c7c5..10b534c36a8f 100644 --- a/website/docs/cdktf/python/r/ses_receipt_rule.html.markdown +++ b/website/docs/cdktf/python/r/ses_receipt_rule.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule * `rule_set_name` - (Required) The name of the rule set * `after` - (Optional) The name of the rule to place this rule after @@ -146,4 +147,4 @@ Using `terraform import`, import SES receipt rules using the ruleset name and ru % terraform import aws_ses_receipt_rule.my_rule my_rule_set:my_rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_receipt_rule_set.html.markdown b/website/docs/cdktf/python/r/ses_receipt_rule_set.html.markdown index aaa4892f38f8..310df0811f35 100644 --- a/website/docs/cdktf/python/r/ses_receipt_rule_set.html.markdown +++ b/website/docs/cdktf/python/r/ses_receipt_rule_set.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rule_set_name` - (Required) Name of the rule set. ## Attribute Reference @@ -69,4 +70,4 @@ Using `terraform import`, import SES receipt rule sets using the rule set name. % terraform import aws_ses_receipt_rule_set.my_rule_set my_rule_set_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ses_template.html.markdown b/website/docs/cdktf/python/r/ses_template.html.markdown index 78e00c95bf50..943699436129 100644 --- a/website/docs/cdktf/python/r/ses_template.html.markdown +++ b/website/docs/cdktf/python/r/ses_template.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the template. Cannot exceed 64 characters. You will refer to this name when you send email. * `html` - (Optional) The HTML body of the email. Must be less than 500KB in size, including both the text and HTML parts. * `subject` - (Optional) The subject line of the email. @@ -75,4 +76,4 @@ Using `terraform import`, import SES templates using the template name. For exam % terraform import aws_ses_template.MyTemplate MyTemplate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_account_suppression_attributes.html.markdown b/website/docs/cdktf/python/r/sesv2_account_suppression_attributes.html.markdown index c149df761333..44b8407c5b05 100644 --- a/website/docs/cdktf/python/r/sesv2_account_suppression_attributes.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_account_suppression_attributes.html.markdown @@ -33,8 +33,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `suppressed_reasons` - (Required) A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. Valid values: `COMPLAINT`, `BOUNCE`. ## Attribute Reference @@ -66,4 +67,4 @@ Using `terraform import`, import account-level suppression attributes using the % terraform import aws_sesv2_account_suppression_attributes.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_account_vdm_attributes.html.markdown b/website/docs/cdktf/python/r/sesv2_account_vdm_attributes.html.markdown index f0fc5a082565..420e16203594 100644 --- a/website/docs/cdktf/python/r/sesv2_account_vdm_attributes.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_account_vdm_attributes.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dashboard_attributes` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. * `guardian_attributes` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. @@ -87,4 +88,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Account VDM Attributes % terraform import aws_sesv2_account_vdm_attributes.example ses-account-vdm-attributes ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown b/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown index 592b1461c185..8c0dda154cf8 100644 --- a/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration_set_name` - (Required) The name of the configuration set. * `delivery_options` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. See [`delivery_options` Block](#delivery_options-block) for details. * `reputation_options` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. See [`reputation_options` Block](#reputation_options-block) for details. @@ -148,4 +149,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Configuration Set using % terraform import aws_sesv2_configuration_set.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_configuration_set_event_destination.html.markdown b/website/docs/cdktf/python/r/sesv2_configuration_set_event_destination.html.markdown index f26010fb6eaa..591104d87ead 100644 --- a/website/docs/cdktf/python/r/sesv2_configuration_set_event_destination.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_configuration_set_event_destination.html.markdown @@ -189,8 +189,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration_set_name` - (Required) The name of the configuration set. * `event_destination` - (Required) A name that identifies the event destination within the configuration set. * `event_destination_name` - (Required) An object that defines the event destination. See [`event_destination` Block](#event_destination-block) for details. @@ -202,7 +203,7 @@ The `event_destination` configuration block supports the following arguments: * `matching_event_types` - (Required) - An array that specifies which events the Amazon SES API v2 should send to the destinations. Valid values: `SEND`, `REJECT`, `BOUNCE`, `COMPLAINT`, `DELIVERY`, `OPEN`, `CLICK`, `RENDERING_FAILURE`, `DELIVERY_DELAY`, `SUBSCRIPTION`. * `cloud_watch_destination` - (Optional) An object that defines an Amazon CloudWatch destination for email events. See [`cloud_watch_destination` Block](#cloud_watch_destination-block) for details. * `enabled` - (Optional) When the event destination is enabled, the specified event types are sent to the destinations. Default: `false`. -* `event_bridge_configuration` - (Optional) An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. See [`event_bridge_configuration` Block](#event_bridge_configuration-block) for details. +* `event_bridge_destination` - (Optional) An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. See [`event_bridge_destination` Block](#event_bridge_destination-block) for details. * `kinesis_firehose_destination` - (Optional) An object that defines an Amazon Kinesis Data Firehose destination for email events. See [`kinesis_firehose_destination` Block](#kinesis_firehose_destination-block) for details. * `pinpoint_destination` - (Optional) An object that defines an Amazon Pinpoint project destination for email events. See [`pinpoint_destination` Block](#pinpoint_destination-block) for details. * `sns_destination` - (Optional) An object that defines an Amazon SNS destination for email events. See [`sns_destination` Block](#sns_destination-block) for details. @@ -221,9 +222,9 @@ The `dimension_configuration` configuration block supports the following argumen * `dimension_name` - (Required) The name of an Amazon CloudWatch dimension associated with an email sending metric. * `dimension_value_source` - (Required) The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. Valid values: `MESSAGE_TAG`, `EMAIL_HEADER`, `LINK_TAG`. -### `event_bridge_configuration` Block +### `event_bridge_destination` Block -The `event_bridge_configuration` configuration block supports the following arguments: +The `event_bridge_destination` configuration block supports the following arguments: * `event_bus_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported. @@ -277,4 +278,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Configuration Set Event % terraform import aws_sesv2_configuration_set_event_destination.example example_configuration_set|example_event_destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_contact_list.html.markdown b/website/docs/cdktf/python/r/sesv2_contact_list.html.markdown index 77f0498030b1..2651d862c563 100644 --- a/website/docs/cdktf/python/r/sesv2_contact_list.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_contact_list.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of what the contact list is about. * `tags` - (Optional) Key-value map of resource tags for the contact list. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `topic` - (Optional) Configuration block(s) with topic for the contact list. Detailed below. @@ -82,6 +83,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of what the topic is about, which the contact will see. ## Attribute Reference @@ -117,4 +119,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Contact List using the % terraform import aws_sesv2_contact_list.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_dedicated_ip_assignment.html.markdown b/website/docs/cdktf/python/r/sesv2_dedicated_ip_assignment.html.markdown index f5ec5ee6d592..9c30b318b950 100644 --- a/website/docs/cdktf/python/r/sesv2_dedicated_ip_assignment.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_dedicated_ip_assignment.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ip` - (Required) Dedicated IP address. * `destination_pool_name` - (Required) Dedicated IP address. @@ -74,4 +75,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Dedicated IP Assignment % terraform import aws_sesv2_dedicated_ip_assignment.example "0.0.0.0,my-pool" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_dedicated_ip_pool.html.markdown b/website/docs/cdktf/python/r/sesv2_dedicated_ip_pool.html.markdown index b8de3a63d3ec..7dc2a35ec950 100644 --- a/website/docs/cdktf/python/r/sesv2_dedicated_ip_pool.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_dedicated_ip_pool.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `scaling_mode` - (Optional) IP pool scaling mode. Valid values: `STANDARD`, `MANAGED`. If omitted, the AWS API will default to a standard pool. * `tags` - (Optional) A map of tags to assign to the pool. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -95,4 +96,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Dedicated IP Pool using % terraform import aws_sesv2_dedicated_ip_pool.example my-pool ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_email_identity.html.markdown b/website/docs/cdktf/python/r/sesv2_email_identity.html.markdown index 7fc58e9a1246..29695ce1546f 100644 --- a/website/docs/cdktf/python/r/sesv2_email_identity.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_email_identity.html.markdown @@ -111,6 +111,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration_set_name` - (Optional) The configuration set to use by default when sending from this identity. Note that any configuration set defined in the email sending request takes precedence. * `dkim_signing_attributes` - (Optional) The configuration of the DKIM authentication settings for an email domain identity. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -138,6 +139,7 @@ This resource exports the following attributes in addition to the arguments abov * `tokens` - If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. * `identity_type` - The email identity type. Valid values: `EMAIL_ADDRESS`, `DOMAIN`. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +* `verification_status` - The verification status of the identity. The status can be one of the following: `PENDING`, `SUCCESS`, `FAILED`, `TEMPORARY_FAILURE`, and `NOT_STARTED`. * `verified_for_sending_status` - Specifies whether or not the identity is verified. ## Import @@ -165,4 +167,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Email Identity using th % terraform import aws_sesv2_email_identity.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_email_identity_feedback_attributes.html.markdown b/website/docs/cdktf/python/r/sesv2_email_identity_feedback_attributes.html.markdown index 5ea984cba80f..c601f2ef891b 100644 --- a/website/docs/cdktf/python/r/sesv2_email_identity_feedback_attributes.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_email_identity_feedback_attributes.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email_identity` - (Required) The email identity. * `email_forwarding_enabled` - (Optional) Sets the feedback forwarding configuration for the identity. @@ -77,4 +78,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Feedback % terraform import aws_sesv2_email_identity_feedback_attributes.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_email_identity_mail_from_attributes.html.markdown b/website/docs/cdktf/python/r/sesv2_email_identity_mail_from_attributes.html.markdown index db5cf1dc248f..432df306e370 100644 --- a/website/docs/cdktf/python/r/sesv2_email_identity_mail_from_attributes.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_email_identity_mail_from_attributes.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email_identity` - (Required) The verified email identity. * `behavior_on_mx_failure` - (Optional) The action to take if the required MX record isn't found when you send an email. Valid values: `USE_DEFAULT_VALUE`, `REJECT_MESSAGE`. * `mail_from_domain` - (Optional) The custom MAIL FROM domain that you want the verified identity to use. Required if `behavior_on_mx_failure` is `REJECT_MESSAGE`. @@ -79,4 +80,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Mail Fro % terraform import aws_sesv2_email_identity_mail_from_attributes.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_email_identity_policy.html.markdown b/website/docs/cdktf/python/r/sesv2_email_identity_policy.html.markdown index fa17a09ad09e..f5efc0286042 100644 --- a/website/docs/cdktf/python/r/sesv2_email_identity_policy.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_email_identity_policy.html.markdown @@ -42,8 +42,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email_identity` - (Required) The email identity. * `policy_name` - (Required) - The name of the policy. * `policy` - (Required) - The text of the policy in JSON format. @@ -54,7 +55,7 @@ This resource exports no additional attributes. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SESv2 (Simple Email V2) Email Identity Policy using the `id` (`email_identity|policy_name`). For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SESv2 (Simple Email V2) Email Identity Policy using the `email_identity` and `policy_name` separated by `|`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -71,10 +72,10 @@ class MyConvertedCode(TerraformStack): Sesv2EmailIdentityPolicy.generate_config_for_import(self, "example", "example_email_identity|example_policy_name") ``` -Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Policy using the `example_id_arg`. For example: +Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Policy using the `email_identity` and `policy_name` separated by `|`. For example: ```console % terraform import aws_sesv2_email_identity_policy.example example_email_identity|example_policy_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sfn_activity.html.markdown b/website/docs/cdktf/python/r/sfn_activity.html.markdown index 16b3dfea811e..342166622d42 100644 --- a/website/docs/cdktf/python/r/sfn_activity.html.markdown +++ b/website/docs/cdktf/python/r/sfn_activity.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `encryption_configuration` - (Optional) Defines what encryption configuration is used to encrypt data in the Activity. For more information see the section [Data at rest encyption](https://docs.aws.amazon.com/step-functions/latest/dg/encryption-at-rest.html) in the AWS Step Functions User Guide. * `name` - (Required) The name of the activity to create. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -77,13 +78,29 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The Amazon Resource Name (ARN) that identifies the created activity. -* `name` - The name of the activity. -* `creation_date` - The date the activity was created. +* `id` - Amazon Resource Name (ARN) of the activity. +* `arn` - Amazon Resource Name (ARN) of the activity. +* `name` - Name of the activity. +* `creation_date` - Date the activity was created. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_activity.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:activity:bar" + } +} + +resource "aws_sfn_activity" "example" { + ### Configuration omitted for brevity ### +} +``` + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import activities using the `arn`. For example: ```python @@ -98,13 +115,13 @@ from imports.aws.sfn_activity import SfnActivity class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SfnActivity.generate_config_for_import(self, "foo", "arn:aws:states:eu-west-1:123456789098:activity:bar") + SfnActivity.generate_config_for_import(self, "example", "arn:aws:states:eu-west-1:123456789098:activity:bar") ``` Using `terraform import`, import activities using the `arn`. For example: ```console -% terraform import aws_sfn_activity.foo arn:aws:states:eu-west-1:123456789098:activity:bar +% terraform import aws_sfn_activity.example arn:aws:states:eu-west-1:123456789098:activity:bar ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sfn_alias.html.markdown b/website/docs/cdktf/python/r/sfn_alias.html.markdown index 2b6f80b5e0e6..08bd3407bc7d 100644 --- a/website/docs/cdktf/python/r/sfn_alias.html.markdown +++ b/website/docs/cdktf/python/r/sfn_alias.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the alias you are creating. * `description` - (Optional) Description of the alias. * `routing_configuration` - (Required) The StateMachine alias' route configuration settings. Fields documented below @@ -71,6 +72,21 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_alias.example + identity = { + "arn" = "arn:aws:states:us-east-1:123456789098:stateMachine:myStateMachine:foo" + } +} + +resource "aws_sfn_alias" "example" { + ### Configuration omitted for brevity ### +} +``` + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SFN (Step Functions) Alias using the `arn`. For example: ```python @@ -94,4 +110,4 @@ Using `terraform import`, import SFN (Step Functions) Alias using the `arn`. For % terraform import aws_sfn_alias.foo arn:aws:states:us-east-1:123456789098:stateMachine:myStateMachine:foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sfn_state_machine.html.markdown b/website/docs/cdktf/python/r/sfn_state_machine.html.markdown index cda5d2d48f01..7e6f56af421f 100644 --- a/website/docs/cdktf/python/r/sfn_state_machine.html.markdown +++ b/website/docs/cdktf/python/r/sfn_state_machine.html.markdown @@ -140,6 +140,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `definition` - (Required) The [Amazon States Language](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) definition of the state machine. * `encryption_configuration` - (Optional) Defines what encryption configuration is used to encrypt data in the State Machine. For more information see [TBD] in the AWS Step Functions User Guide. * `logging_configuration` - (Optional) Defines what execution history events are logged and where they are logged. The `logging_configuration` parameter is valid when `type` is set to `STANDARD` or `EXPRESS`. Defaults to `OFF`. For more information see [Logging Express Workflows](https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html), [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) and [Logging Configuration](https://docs.aws.amazon.com/step-functions/latest/apireference/API_CreateStateMachine.html) in the AWS Step Functions User Guide. @@ -188,6 +189,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_state_machine.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:stateMachine:bar" + } +} + +resource "aws_sfn_state_machine" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the state machine. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import State Machines using the `arn`. For example: ```python @@ -211,4 +233,4 @@ Using `terraform import`, import State Machines using the `arn`. For example: % terraform import aws_sfn_state_machine.foo arn:aws:states:eu-west-1:123456789098:stateMachine:bar ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/shield_drt_access_log_bucket_association.html.markdown b/website/docs/cdktf/python/r/shield_drt_access_log_bucket_association.html.markdown index f73cf79e58b3..e1854aabedc9 100644 --- a/website/docs/cdktf/python/r/shield_drt_access_log_bucket_association.html.markdown +++ b/website/docs/cdktf/python/r/shield_drt_access_log_bucket_association.html.markdown @@ -31,7 +31,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) test = ShieldDrtAccessRoleArnAssociation(self, "test", - role_arn="arn:aws:iam:${" + current.name + "}:${" + data_aws_caller_identity_current.account_id + "}:${" + shield_drt_access_role_name.value + "}" + role_arn="arn:aws:iam:${" + current.region + "}:${" + data_aws_caller_identity_current.account_id + "}:${" + shield_drt_access_role_name.value + "}" ) aws_shield_drt_access_log_bucket_association_test = ShieldDrtAccessLogBucketAssociation(self, "test_1", @@ -85,4 +85,4 @@ Using `terraform import`, import Shield DRT access log bucket associations using % terraform import aws_shield_drt_access_log_bucket_association.example example-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/shield_protection.html.markdown b/website/docs/cdktf/python/r/shield_protection.html.markdown index d2f38944ae22..9653fb29c763 100644 --- a/website/docs/cdktf/python/r/shield_protection.html.markdown +++ b/website/docs/cdktf/python/r/shield_protection.html.markdown @@ -43,7 +43,7 @@ class MyConvertedCode(TerraformStack): data_aws_region_current.override_logical_id("current") aws_shield_protection_example = ShieldProtection(self, "example_4", name="example", - resource_arn="arn:aws:ec2:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}", + resource_arn="arn:aws:ec2:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}", tags={ "Environment": "Dev" } @@ -93,4 +93,4 @@ Using `terraform import`, import Shield protection resources using specifying th % terraform import aws_shield_protection.example ff9592dc-22f3-4e88-afa1-7b29fde9669a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/shield_protection_group.html.markdown b/website/docs/cdktf/python/r/shield_protection_group.html.markdown index 9cf2a057ba7a..6b1a4bae8223 100644 --- a/website/docs/cdktf/python/r/shield_protection_group.html.markdown +++ b/website/docs/cdktf/python/r/shield_protection_group.html.markdown @@ -64,14 +64,14 @@ class MyConvertedCode(TerraformStack): data_aws_region_current.override_logical_id("current") aws_shield_protection_example = ShieldProtection(self, "example_3", name="example", - resource_arn="arn:aws:ec2:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}" + resource_arn="arn:aws:ec2:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_shield_protection_example.override_logical_id("example") aws_shield_protection_group_example = ShieldProtectionGroup(self, "example_4", aggregation="MEAN", depends_on=[aws_shield_protection_example], - members=["arn:aws:ec2:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}" + members=["arn:aws:ec2:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}" ], pattern="ARBITRARY", protection_group_id="example" @@ -145,4 +145,4 @@ Using `terraform import`, import Shield protection group resources using their p % terraform import aws_shield_protection_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/shield_protection_health_check_association.html.markdown b/website/docs/cdktf/python/r/shield_protection_health_check_association.html.markdown index 84fcd0be4b3b..703aa0adafd0 100644 --- a/website/docs/cdktf/python/r/shield_protection_health_check_association.html.markdown +++ b/website/docs/cdktf/python/r/shield_protection_health_check_association.html.markdown @@ -65,7 +65,7 @@ class MyConvertedCode(TerraformStack): data_aws_region_current.override_logical_id("current") aws_shield_protection_example = ShieldProtection(self, "example_5", name="example-protection", - resource_arn="arn:${" + data_aws_partition_current.partition + "}:ec2:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}" + resource_arn="arn:${" + data_aws_partition_current.partition + "}:ec2:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:eip-allocation/${" + example.id + "}" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_shield_protection_example.override_logical_id("example") @@ -116,4 +116,4 @@ Using `terraform import`, import Shield protection health check association reso % terraform import aws_shield_protection_health_check_association.example ff9592dc-22f3-4e88-afa1-7b29fde9669a+arn:aws:route53:::healthcheck/3742b175-edb9-46bc-9359-f53e3b794b1b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/signer_signing_job.html.markdown b/website/docs/cdktf/python/r/signer_signing_job.html.markdown index e8dfafec32ba..84614407008a 100644 --- a/website/docs/cdktf/python/r/signer_signing_job.html.markdown +++ b/website/docs/cdktf/python/r/signer_signing_job.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `profile_name` - (Required) The name of the profile to initiate the signing operation. * `source` - (Required) The S3 bucket that contains the object to sign. See [Source](#source) below for details. * `destination` - (Required) The S3 bucket in which to save your signed object. See [Destination](#destination) below for details. @@ -129,4 +130,4 @@ Using `terraform import`, import Signer signing jobs using the `job_id`. For exa % terraform import aws_signer_signing_job.test_signer_signing_job 9ed7e5c3-b8d4-4da0-8459-44e0b068f7ee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/signer_signing_profile.html.markdown b/website/docs/cdktf/python/r/signer_signing_profile.html.markdown index 3f74a0bc1c3b..28ade7df0776 100644 --- a/website/docs/cdktf/python/r/signer_signing_profile.html.markdown +++ b/website/docs/cdktf/python/r/signer_signing_profile.html.markdown @@ -47,11 +47,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `platform_id` - (Required, Forces new resource) The ID of the platform that is used by the target signing profile. * `name` - (Optional, Forces new resource) A unique signing profile name. By default generated by Terraform. Signing profile names are immutable and cannot be reused after canceled. * `name_prefix` - (Optional, Forces new resource) A signing profile name prefix. Terraform will generate a unique suffix. Conflicts with `name`. * `signature_validity_period` - (Optional, Forces new resource) The validity period for a signing job. See [`signature_validity_period` Block](#signature_validity_period-block) below for details. * `signing_material` - (Optional, Forces new resource) The AWS Certificate Manager certificate that will be used to sign code with the new signing profile. See [`signing_material` Block](#signing_material-block) below for details. +* `signing_parameters` - (Optional, Forces new resource) Map of key-value pairs for signing. These can include any information that you want to use during signing. * `tags` - (Optional) A list of tags associated with the signing profile. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `signature_validity_period` Block @@ -113,4 +115,4 @@ Using `terraform import`, import Signer signing profiles using the `name`. For e % terraform import aws_signer_signing_profile.test_signer_signing_profile test_sp_DdW3Mk1foYL88fajut4mTVFGpuwfd4ACO6ANL0D1uIj7lrn8adK ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/signer_signing_profile_permission.html.markdown b/website/docs/cdktf/python/r/signer_signing_profile_permission.html.markdown index e7ddf5b3005c..cb17b931bcf8 100644 --- a/website/docs/cdktf/python/r/signer_signing_profile_permission.html.markdown +++ b/website/docs/cdktf/python/r/signer_signing_profile_permission.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `profile_name` - (Required) Name of the signing profile to add the cross-account permissions. * `action` - (Required) An AWS Signer action permitted as part of cross-account permissions. Valid values: `signer:StartSigningJob`, `signer:GetSigningProfile`, `signer:RevokeSignature`, or `signer:SignPayload`. * `principal` - (Required) The AWS principal to be granted a cross-account permission. @@ -99,4 +100,4 @@ Using `terraform import`, import Signer signing profile permission statements us % terraform import aws_signer_signing_profile_permission.test_signer_signing_profile_permission prod_profile_DdW3Mk1foYL88fajut4mTVFGpuwfd4ACO6ANL0D1uIj7lrn8adK/ProdAccountStartSigningJobStatementId ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/snapshot_create_volume_permission.html.markdown b/website/docs/cdktf/python/r/snapshot_create_volume_permission.html.markdown index d68ddd947c90..bc704ca23735 100644 --- a/website/docs/cdktf/python/r/snapshot_create_volume_permission.html.markdown +++ b/website/docs/cdktf/python/r/snapshot_create_volume_permission.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `snapshot_id` - (Required) A snapshot ID * `account_id` - (Required) An AWS Account ID to add create volume permissions. The AWS Account cannot be the snapshot's owner @@ -54,4 +55,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - A combination of "`snapshot_id`-`account_id`". - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sns_platform_application.html.markdown b/website/docs/cdktf/python/r/sns_platform_application.html.markdown index feea6c43aedc..b19371260864 100644 --- a/website/docs/cdktf/python/r/sns_platform_application.html.markdown +++ b/website/docs/cdktf/python/r/sns_platform_application.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The friendly name for the SNS platform application * `platform` - (Required) The platform that the app is registered with. See [Platform][1] for supported platforms. * `platform_credential` - (Required) Application Platform credential. See [Credential][1] for type of credential required for platform. The value of this attribute when stored into the Terraform state is only a hash of the real value, so therefore it is not practical to use this as an attribute for other resources. @@ -137,4 +138,4 @@ Using `terraform import`, import SNS platform applications using the ARN. For ex % terraform import aws_sns_platform_application.gcm_application arn:aws:sns:us-west-2:123456789012:app/GCM/gcm_application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sns_sms_preferences.html.markdown b/website/docs/cdktf/python/r/sns_sms_preferences.html.markdown index 2b72ac88334c..2ea3d621a3be 100644 --- a/website/docs/cdktf/python/r/sns_sms_preferences.html.markdown +++ b/website/docs/cdktf/python/r/sns_sms_preferences.html.markdown @@ -33,6 +33,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `monthly_spend_limit` - (Optional) The maximum amount in USD that you are willing to spend each month to send SMS messages. * `delivery_status_iam_role_arn` - (Optional) The ARN of the IAM role that allows Amazon SNS to write logs about SMS deliveries in CloudWatch Logs. * `delivery_status_success_sampling_rate` - (Optional) The percentage of successful SMS deliveries for which Amazon SNS will write logs in CloudWatch Logs. The value must be between 0 and 100. @@ -48,4 +49,4 @@ This resource exports no additional attributes. You cannot import the SMS preferences. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sns_topic.html.markdown b/website/docs/cdktf/python/r/sns_topic.html.markdown index 3fea5e9d130a..3ddcc16970ab 100644 --- a/website/docs/cdktf/python/r/sns_topic.html.markdown +++ b/website/docs/cdktf/python/r/sns_topic.html.markdown @@ -100,6 +100,7 @@ The `_success_feedback_role_arn` and `_failure_feedback_role This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the topic. Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long. For a FIFO (first-in-first-out) topic, the name must end with the `.fifo` suffix. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix` * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name` * `display_name` - (Optional) The display name for the topic @@ -141,6 +142,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic" + } +} + +resource "aws_sns_topic" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topics using the topic `arn`. For example: ```python @@ -164,4 +186,4 @@ Using `terraform import`, import SNS Topics using the topic `arn`. For example: % terraform import aws_sns_topic.user_updates arn:aws:sns:us-west-2:123456789012:my-topic ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sns_topic_data_protection_policy.html.markdown b/website/docs/cdktf/python/r/sns_topic_data_protection_policy.html.markdown index 68f7963c5245..ee53ad2ad2a1 100644 --- a/website/docs/cdktf/python/r/sns_topic_data_protection_policy.html.markdown +++ b/website/docs/cdktf/python/r/sns_topic_data_protection_policy.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The ARN of the SNS topic * `policy` - (Required) The fully-formed AWS policy as JSON. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -68,6 +69,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_data_protection_policy.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:example" + } +} + +resource "aws_sns_topic_data_protection_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Data Protection Topic Policy using the topic ARN. For example: ```python @@ -91,4 +113,4 @@ Using `terraform import`, import SNS Data Protection Topic Policy using the topi % terraform import aws_sns_topic_data_protection_policy.example arn:aws:sns:us-west-2:123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sns_topic_policy.html.markdown b/website/docs/cdktf/python/r/sns_topic_policy.html.markdown index 953cd34ff9c9..697f2702b182 100644 --- a/website/docs/cdktf/python/r/sns_topic_policy.html.markdown +++ b/website/docs/cdktf/python/r/sns_topic_policy.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The ARN of the SNS topic * `policy` - (Required) The fully-formed AWS policy as JSON. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -76,6 +77,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_policy.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic" + } +} + +resource "aws_sns_topic_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topic Policy using the topic ARN. For example: ```python @@ -99,4 +121,4 @@ Using `terraform import`, import SNS Topic Policy using the topic ARN. For examp % terraform import aws_sns_topic_policy.user_updates arn:aws:sns:us-west-2:123456789012:my-topic ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sns_topic_subscription.html.markdown b/website/docs/cdktf/python/r/sns_topic_subscription.html.markdown index 39b41665d485..7a884417f764 100644 --- a/website/docs/cdktf/python/r/sns_topic_subscription.html.markdown +++ b/website/docs/cdktf/python/r/sns_topic_subscription.html.markdown @@ -296,6 +296,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `confirmation_timeout_in_minutes` - (Optional) Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`. * `delivery_policy` - (Optional) JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details. * `endpoint_auto_confirms` - (Optional) Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`. @@ -337,6 +338,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_subscription.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f" + } +} + +resource "aws_sns_topic_subscription" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic subscription. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topic Subscriptions using the subscription `arn`. For example: ```python @@ -360,4 +382,4 @@ Using `terraform import`, import SNS Topic Subscriptions using the subscription % terraform import aws_sns_topic_subscription.user_updates_sqs_target arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/spot_datafeed_subscription.html.markdown b/website/docs/cdktf/python/r/spot_datafeed_subscription.html.markdown index bde9be0ae688..f6648145087b 100644 --- a/website/docs/cdktf/python/r/spot_datafeed_subscription.html.markdown +++ b/website/docs/cdktf/python/r/spot_datafeed_subscription.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) The Amazon S3 bucket in which to store the Spot instance data feed. * `prefix` - (Optional) Path of folder inside bucket to place spot pricing data. @@ -77,4 +78,4 @@ Using `terraform import`, import a Spot Datafeed Subscription using the word `sp % terraform import aws_spot_datafeed_subscription.mysubscription spot-datafeed-subscription ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/spot_fleet_request.html.markdown b/website/docs/cdktf/python/r/spot_fleet_request.html.markdown index 405e68d7424d..5e512ceebb73 100644 --- a/website/docs/cdktf/python/r/spot_fleet_request.html.markdown +++ b/website/docs/cdktf/python/r/spot_fleet_request.html.markdown @@ -252,6 +252,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `iam_fleet_role` - (Required) Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set @@ -492,4 +493,4 @@ Using `terraform import`, import Spot Fleet Requests using `id`. For example: % terraform import aws_spot_fleet_request.fleet sfr-005e9ec8-5546-4c31-b317-31a62325411e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/spot_instance_request.html.markdown b/website/docs/cdktf/python/r/spot_instance_request.html.markdown index 53996a510d8f..a77eff1ea670 100644 --- a/website/docs/cdktf/python/r/spot_instance_request.html.markdown +++ b/website/docs/cdktf/python/r/spot_instance_request.html.markdown @@ -27,8 +27,8 @@ price availability or by a user. ~> **NOTE:** Because their behavior depends on the live status of the spot market, Spot Instance Requests have a unique lifecycle that makes them behave -differently than other Terraform resources. Most importantly: there is __no -guarantee__ that a Spot Instance exists to fulfill the request at any given +differently than other Terraform resources. Most importantly: there is **no +guarantee** that a Spot Instance exists to fulfill the request at any given point in time. See the [AWS Spot Instance documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html) for more information. @@ -64,6 +64,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + Spot Instance Requests support all the same arguments as [`aws_instance`](instance.html), with the addition of: * `spot_price` - (Optional; Default: On-demand price) The maximum price to request on the spot market. @@ -74,9 +76,6 @@ Spot Instance Requests support all the same arguments as [`aws_instance`](instan the instance is terminated, the spot request will be closed. * `launch_group` - (Optional) A launch group is a group of spot instances that launch together and terminate together. If left empty instances are launched and terminated individually. -* `block_duration_minutes` - (Optional) The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). - The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. - Note that you can't specify an Availability Zone group or a launch group if you specify a duration. * `instance_interruption_behavior` - (Optional) Indicates Spot instance behavior when it is interrupted. Valid values are `terminate`, `stop`, or `hibernate`. Default value is `terminate`. * `valid_until` - (Optional) The end date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. The default end date is 7 days from the current date. * `valid_from` - (Optional) The start date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. @@ -87,9 +86,9 @@ Spot Instance Requests support all the same arguments as [`aws_instance`](instan This resource exports the following attributes in addition to the arguments above: * `id` - The Spot Instance Request ID. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -These attributes are exported, but they are expected to change over time and so -should only be used for informational purposes, not for resource dependencies: +The following attributes are exported, but they are expected to change over time and so should only be used for informational purposes, not for resource dependencies: * `spot_bid_status` - The current [bid status](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) @@ -106,7 +105,6 @@ should only be used for informational purposes, not for resource dependencies: used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC * `private_ip` - The private IP address assigned to the instance -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -116,4 +114,4 @@ should only be used for informational purposes, not for resource dependencies: * `read` - (Default `15m`) * `delete` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sqs_queue.html.markdown b/website/docs/cdktf/python/r/sqs_queue.html.markdown index 3f8b77ac028b..b706661ae359 100644 --- a/website/docs/cdktf/python/r/sqs_queue.html.markdown +++ b/website/docs/cdktf/python/r/sqs_queue.html.markdown @@ -171,6 +171,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content_based_deduplication` - (Optional) Enables content-based deduplication for FIFO queues. For more information, see the [related documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing). * `deduplication_scope` - (Optional) Specifies whether message deduplication occurs at the message group or queue level. Valid values are `messageGroup` and `queue` (default). * `delay_seconds` - (Optional) Time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 seconds. @@ -178,7 +179,7 @@ This resource supports the following arguments: * `fifo_throughput_limit` - (Optional) Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are `perQueue` (default) and `perMessageGroupId`. * `kms_data_key_reuse_period_seconds` - (Optional) Length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). * `kms_master_key_id` - (Optional) ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). -* `max_message_size` - (Optional) Limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB). +* `max_message_size` - (Optional) Limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 1048576 bytes (1024 KiB). The default for this attribute is 262144 (256 KiB). * `message_retention_seconds` - (Optional) Number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days). * `name` - (Optional) Name of the queue. Queue names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 80 characters long. For a FIFO (first-in-first-out) queue, the name must end with the `.fifo` suffix. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -209,6 +210,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sqs_queue.example + identity = { + url = "https://queue.amazonaws.com/80398EXAMPLE/MyQueue" + } +} + +resource "aws_sqs_queue" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `url` (String) URL of the SQS queue. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SQS Queues using the queue `url`. For example: ```python @@ -223,13 +250,13 @@ from imports.aws.sqs_queue import SqsQueue class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SqsQueue.generate_config_for_import(self, "publicQueue", "https://queue.amazonaws.com/80398EXAMPLE/MyQueue") + SqsQueue.generate_config_for_import(self, "example", "https://queue.amazonaws.com/80398EXAMPLE/MyQueue") ``` Using `terraform import`, import SQS Queues using the queue `url`. For example: ```console -% terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue +% terraform import aws_sqs_queue.example https://queue.amazonaws.com/80398EXAMPLE/MyQueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sqs_queue_policy.html.markdown b/website/docs/cdktf/python/r/sqs_queue_policy.html.markdown index 8b53bf32535d..4a902f72e2da 100644 --- a/website/docs/cdktf/python/r/sqs_queue_policy.html.markdown +++ b/website/docs/cdktf/python/r/sqs_queue_policy.html.markdown @@ -119,6 +119,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) JSON policy for the SQS queue. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Ensure that `Version = "2012-10-17"` is set in the policy or AWS may hang in creating the queue. * `queue_url` - (Required) URL of the SQS Queue to which to attach the policy. @@ -151,4 +152,4 @@ Using `terraform import`, import SQS Queue Policies using the queue URL. For exa % terraform import aws_sqs_queue_policy.test https://queue.amazonaws.com/123456789012/myqueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sqs_queue_redrive_allow_policy.html.markdown b/website/docs/cdktf/python/r/sqs_queue_redrive_allow_policy.html.markdown index 4f349de0b79b..e82ceeb234a6 100644 --- a/website/docs/cdktf/python/r/sqs_queue_redrive_allow_policy.html.markdown +++ b/website/docs/cdktf/python/r/sqs_queue_redrive_allow_policy.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queue_url` - (Required) The URL of the SQS Queue to which to attach the policy * `redrive_allow_policy` - (Required) The JSON redrive allow policy for the SQS queue. Learn more in the [Amazon SQS dead-letter queues documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html). @@ -86,4 +87,4 @@ Using `terraform import`, import SQS Queue Redrive Allow Policies using the queu % terraform import aws_sqs_queue_redrive_allow_policy.test https://queue.amazonaws.com/123456789012/myqueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sqs_queue_redrive_policy.html.markdown b/website/docs/cdktf/python/r/sqs_queue_redrive_policy.html.markdown index d1360fa5d0bf..88eb9ad49dae 100644 --- a/website/docs/cdktf/python/r/sqs_queue_redrive_policy.html.markdown +++ b/website/docs/cdktf/python/r/sqs_queue_redrive_policy.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queue_url` - (Required) The URL of the SQS Queue to which to attach the policy * `redrive_policy` - (Required) The JSON redrive policy for the SQS queue. Accepts two key/val pairs: `deadLetterTargetArn` and `maxReceiveCount`. Learn more in the [Amazon SQS dead-letter queues documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html). @@ -91,4 +92,4 @@ Using `terraform import`, import SQS Queue Redrive Policies using the queue URL. % terraform import aws_sqs_queue_redrive_policy.test https://queue.amazonaws.com/123456789012/myqueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_activation.html.markdown b/website/docs/cdktf/python/r/ssm_activation.html.markdown index 046ba4d223bf..d7bf261f5d31 100644 --- a/website/docs/cdktf/python/r/ssm_activation.html.markdown +++ b/website/docs/cdktf/python/r/ssm_activation.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The default name of the registered managed instance. * `description` - (Optional) The description of the resource that you want to register. * `expiration_date` - (Optional) UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. Terraform will only perform drift detection of its value when present in a configuration. @@ -111,4 +112,4 @@ Using `terraform import`, import AWS SSM Activation using the `id`. For example: -> **Note:** The `activation_code` attribute cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_association.html.markdown b/website/docs/cdktf/python/r/ssm_association.html.markdown index cef1c8349430..aa86f01597d4 100644 --- a/website/docs/cdktf/python/r/ssm_association.html.markdown +++ b/website/docs/cdktf/python/r/ssm_association.html.markdown @@ -194,7 +194,7 @@ class MyConvertedCode(TerraformStack): Instance(self, "database_server", ami=Token.as_string(amazon_linux.id), iam_instance_profile=ec2_ssm_profile.name, - instance_type=instance_type.string_value, + instance_type="t3.micro", subnet_id=Token.as_string(default_var.id), tags={ "Environment": environment.string_value, @@ -209,7 +209,7 @@ class MyConvertedCode(TerraformStack): Instance(self, "web_server", ami=Token.as_string(amazon_linux.id), iam_instance_profile=ec2_ssm_profile.name, - instance_type=instance_type.string_value, + instance_type="t3.micro", subnet_id=Token.as_string(default_var.id), tags={ "Environment": environment.string_value, @@ -240,13 +240,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the SSM document to apply. * `apply_only_at_cron_interval` - (Optional) By default, when you create a new or update associations, the system runs it immediately and then according to the schedule you specified. Enable this option if you do not want an association to run immediately after you create or update it. This parameter is not supported for rate expressions. Default: `false`. * `association_name` - (Optional) The descriptive name for the association. * `automation_target_parameter_name` - (Optional) Specify the target for the association. This target is required for associations that use an `Automation` document and target resources by using rate controls. This should be set to the SSM document `parameter` that will define how your automation will branch out. * `compliance_severity` - (Optional) The compliance severity for the association. Can be one of the following: `UNSPECIFIED`, `LOW`, `MEDIUM`, `HIGH` or `CRITICAL` * `document_version` - (Optional) The document version you want to associate with the target(s). Can be a specific version or the default version. -* `instance_id` - (Optional, **Deprecated**) The instance ID to apply an SSM document to. Use `targets` with key `InstanceIds` for document schema versions 2.0 and above. Use the `targets` attribute instead. * `max_concurrency` - (Optional) The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. * `max_errors` - (Optional) The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify a number, for example 10, or a percentage of the target set, for example 10%. If you specify a threshold of 3, the stop command is sent when the fourth error is returned. If you specify a threshold of 10% for 50 associations, the stop command is sent when the sixth error is returned. * `output_location` - (Optional) An output location block. Output Location is documented below. @@ -274,13 +274,38 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The ARN of the SSM association * `association_id` - The ID of the SSM association. -* `instance_id` - The instance id that the SSM document was applied to. * `name` - The name of the SSM document to apply. * `parameters` - Additional parameters passed to the SSM document. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_association.example + identity = { + association_id = "10abcdef-0abc-1234-5678-90abcdef123456" + } +} + +resource "aws_ssm_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `association_id` - (String) ID of the SSM association. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM associations using the `association_id`. For example: ```python @@ -295,13 +320,13 @@ from imports.aws.ssm_association import SsmAssociation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SsmAssociation.generate_config_for_import(self, "testAssociation", "10abcdef-0abc-1234-5678-90abcdef123456") + SsmAssociation.generate_config_for_import(self, "example", "10abcdef-0abc-1234-5678-90abcdef123456") ``` Using `terraform import`, import SSM associations using the `association_id`. For example: ```console -% terraform import aws_ssm_association.test-association 10abcdef-0abc-1234-5678-90abcdef123456 +% terraform import aws_ssm_association.example 10abcdef-0abc-1234-5678-90abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_default_patch_baseline.html.markdown b/website/docs/cdktf/python/r/ssm_default_patch_baseline.html.markdown index b96b0f517cd6..6c40316a986b 100644 --- a/website/docs/cdktf/python/r/ssm_default_patch_baseline.html.markdown +++ b/website/docs/cdktf/python/r/ssm_default_patch_baseline.html.markdown @@ -43,8 +43,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baseline_id` - (Required) ID of the patch baseline. Can be an ID or an ARN. When specifying an AWS-provided patch baseline, must be the ARN. @@ -143,4 +144,4 @@ Using the operating system value: % terraform import aws_ssm_default_patch_baseline.example CENTOS ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_document.html.markdown b/website/docs/cdktf/python/r/ssm_document.html.markdown index 57ccfa08d940..db46045c9efb 100644 --- a/website/docs/cdktf/python/r/ssm_document.html.markdown +++ b/website/docs/cdktf/python/r/ssm_document.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the document. * `attachments_source` - (Optional) One or more configuration blocks describing attachments sources to a version of a document. See [`attachments_source` block](#attachments_source-block) below for details. * `content` - (Required) The content for the SSM document in JSON or YAML format. The content of the document must not exceed 64KB. This quota also includes the content specified for input parameters at runtime. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command. @@ -125,6 +126,32 @@ The `parameter` configuration block provides the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_document.example + identity = { + name = "example" + } +} + +resource "aws_ssm_document" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the SSM document. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Documents using the name. For example: ```python @@ -178,4 +205,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_maintenance_window.html.markdown b/website/docs/cdktf/python/r/ssm_maintenance_window.html.markdown index 6ab9249f5860..5244b20cbe2b 100644 --- a/website/docs/cdktf/python/r/ssm_maintenance_window.html.markdown +++ b/website/docs/cdktf/python/r/ssm_maintenance_window.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the maintenance window. * `schedule` - (Required) The schedule of the Maintenance Window in the form of a [cron or rate expression](https://docs.aws.amazon.com/systems-manager/latest/userguide/reference-cron-and-rate-expressions.html). * `cutoff` - (Required) The number of hours before the end of the Maintenance Window that Systems Manager stops scheduling new tasks for execution. @@ -60,6 +61,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window.example + identity = { + id = "mw-0123456789" + } +} + +resource "aws_ssm_maintenance_window" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the maintenance window. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Maintenance Windows using the maintenance window `id`. For example: ```python @@ -74,13 +101,13 @@ from imports.aws.ssm_maintenance_window import SsmMaintenanceWindow class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SsmMaintenanceWindow.generate_config_for_import(self, "importedWindow", "mw-0123456789") + SsmMaintenanceWindow.generate_config_for_import(self, "example", "mw-0123456789") ``` Using `terraform import`, import SSM Maintenance Windows using the maintenance window `id`. For example: ```console -% terraform import aws_ssm_maintenance_window.imported-window mw-0123456789 +% terraform import aws_ssm_maintenance_window.example mw-0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_maintenance_window_target.html.markdown b/website/docs/cdktf/python/r/ssm_maintenance_window_target.html.markdown index 140a4ac1abc9..ab7a7a2cc25c 100644 --- a/website/docs/cdktf/python/r/ssm_maintenance_window_target.html.markdown +++ b/website/docs/cdktf/python/r/ssm_maintenance_window_target.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `window_id` - (Required) The Id of the maintenance window to register the target with. * `name` - (Optional) The name of the maintenance window target. * `description` - (Optional) The description of the maintenance window target. @@ -102,6 +103,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window_target.example + identity = { + window_id = "mw-0c50858d01EXAMPLE" + id = "23639a0b-ddbc-4bca-9e72-78d96EXAMPLE" + } +} + +resource "aws_ssm_maintenance_window_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `window_id` - (String) ID of the maintenance window. +* `id` - (String) ID of the maintenance window target. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Maintenance Window targets using `WINDOW_ID/WINDOW_TARGET_ID`. For example: ```python @@ -125,4 +154,4 @@ Using `terraform import`, import SSM Maintenance Window targets using `WINDOW_ID % terraform import aws_ssm_maintenance_window_target.example mw-0c50858d01EXAMPLE/23639a0b-ddbc-4bca-9e72-78d96EXAMPLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_maintenance_window_task.html.markdown b/website/docs/cdktf/python/r/ssm_maintenance_window_task.html.markdown index cefe0e2b5a9b..b2cfdbe3babc 100644 --- a/website/docs/cdktf/python/r/ssm_maintenance_window_task.html.markdown +++ b/website/docs/cdktf/python/r/ssm_maintenance_window_task.html.markdown @@ -175,6 +175,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `window_id` - (Required) The Id of the maintenance window to register the task with. * `max_concurrency` - (Optional) The maximum number of targets this task can be run for in parallel. * `max_errors` - (Optional) The maximum number of errors allowed before this task stops being scheduled. @@ -250,6 +251,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window_task.example + identity = { + window_id = "mw-0c50858d01EXAMPLE" + id = "4f7ca192-7e9a-40fe-9192-5cb15EXAMPLE" + } +} + +resource "aws_ssm_maintenance_window_task" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `window_id` - (String) ID of the maintenance window. +* `id` - (String) ID of the maintenance window task. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Maintenance Window Task using the `window_id` and `window_task_id` separated by `/`. For example: ```python @@ -264,13 +293,13 @@ from imports.aws.ssm_maintenance_window_task import SsmMaintenanceWindowTask class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SsmMaintenanceWindowTask.generate_config_for_import(self, "task", "/") + SsmMaintenanceWindowTask.generate_config_for_import(self, "example", "/") ``` Using `terraform import`, import AWS Maintenance Window Task using the `window_id` and `window_task_id` separated by `/`. For example: ```console -% terraform import aws_ssm_maintenance_window_task.task / +% terraform import aws_ssm_maintenance_window_task.example / ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_parameter.html.markdown b/website/docs/cdktf/python/r/ssm_parameter.html.markdown index b8bb77ca8d3c..97f075a52d58 100644 --- a/website/docs/cdktf/python/r/ssm_parameter.html.markdown +++ b/website/docs/cdktf/python/r/ssm_parameter.html.markdown @@ -14,7 +14,7 @@ Provides an SSM Parameter resource. ~> **Note:** The `overwrite` argument makes it possible to overwrite an existing SSM Parameter created outside of Terraform. --> **Note:** Write-Only argument `value_wo` is available to use in place of `value`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `value_wo` is available to use in place of `value`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -89,6 +89,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allowed_pattern` - (Optional) Regular expression used to validate the parameter value. * `data_type` - (Optional) Data type of the parameter. Valid values: `text`, `aws:ssm:integration` and `aws:ec2:image` for AMI format, see the [Native parameter support for Amazon Machine Image IDs](https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-ec2-aliases.html). * `description` - (Optional) Description of the parameter. @@ -114,6 +115,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_parameter.example + identity = { + name = "/my_path/my_paramname" + } +} + +resource "aws_ssm_parameter" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the parameter. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Parameters using the parameter store `name`. For example: ```python @@ -128,13 +155,13 @@ from imports.aws.ssm_parameter import SsmParameter class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - SsmParameter.generate_config_for_import(self, "myParam", "/my_path/my_paramname") + SsmParameter.generate_config_for_import(self, "example", "/my_path/my_paramname") ``` Using `terraform import`, import SSM Parameters using the parameter store `name`. For example: ```console -% terraform import aws_ssm_parameter.my_param /my_path/my_paramname +% terraform import aws_ssm_parameter.example /my_path/my_paramname ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_patch_baseline.html.markdown b/website/docs/cdktf/python/r/ssm_patch_baseline.html.markdown index d45bcb545ae3..e302d6f5c0b2 100644 --- a/website/docs/cdktf/python/r/ssm_patch_baseline.html.markdown +++ b/website/docs/cdktf/python/r/ssm_patch_baseline.html.markdown @@ -178,10 +178,12 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `approval_rule` - (Optional) Set of rules used to include patches in the baseline. Up to 10 approval rules can be specified. See [`approval_rule`](#approval_rule-block) below. * `approved_patches_compliance_level` - (Optional) Compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid values are `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`, `INFORMATIONAL`, `UNSPECIFIED`. The default value is `UNSPECIFIED`. * `approved_patches_enable_non_security` - (Optional) Whether the list of approved patches includes non-security updates that should be applied to the instances. Applies to Linux instances only. * `approved_patches` - (Optional) List of explicitly approved patches for the baseline. Cannot be specified with `approval_rule`. +* `available_security_updates_compliance_status` - (Optional) Indicates the compliance status of managed nodes for which security-related patches are available but were not approved. Supported for Windows Server managed nodes only. Valid values are `COMPLIANT`, `NON_COMPLIANT`. * `description` - (Optional) Description of the patch baseline. * `global_filter` - (Optional) Set of global filters used to exclude patches from the baseline. Up to 4 global filters can be specified using Key/Value pairs. Valid Keys are `PRODUCT`, `CLASSIFICATION`, `MSRC_SEVERITY`, and `PATCH_ID`. * `operating_system` - (Optional) Operating system the patch baseline applies to. Valid values are `ALMA_LINUX`, `AMAZON_LINUX`, `AMAZON_LINUX_2`, `AMAZON_LINUX_2022`, `AMAZON_LINUX_2023`, `CENTOS`, `DEBIAN`, `MACOS`, `ORACLE_LINUX`, `RASPBIAN`, `REDHAT_ENTERPRISE_LINUX`, `ROCKY_LINUX`, `SUSE`, `UBUNTU`, and `WINDOWS`. The default value is `WINDOWS`. @@ -219,6 +221,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_patch_baseline.example + identity = { + id = "pb-12345678" + } +} + +resource "aws_ssm_patch_baseline" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the patch baseline. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Patch Baselines using their baseline ID. For example: ```python @@ -242,4 +270,4 @@ Using `terraform import`, import SSM Patch Baselines using their baseline ID. Fo % terraform import aws_ssm_patch_baseline.example pb-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_patch_group.html.markdown b/website/docs/cdktf/python/r/ssm_patch_group.html.markdown index a5cd4adf3cad..14cd119c0dcc 100644 --- a/website/docs/cdktf/python/r/ssm_patch_group.html.markdown +++ b/website/docs/cdktf/python/r/ssm_patch_group.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baseline_id` - (Required) The ID of the patch baseline to register the patch group with. * `patch_group` - (Required) The name of the patch group that should be registered with the patch baseline. @@ -50,4 +51,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The name of the patch group and ID of the patch baseline separated by a comma (`,`). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_resource_data_sync.html.markdown b/website/docs/cdktf/python/r/ssm_resource_data_sync.html.markdown index 37679013c0c1..3d1b152ede9c 100644 --- a/website/docs/cdktf/python/r/ssm_resource_data_sync.html.markdown +++ b/website/docs/cdktf/python/r/ssm_resource_data_sync.html.markdown @@ -83,6 +83,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the configuration. * `s3_destination` - (Required) Amazon S3 configuration details for the sync. @@ -125,4 +126,4 @@ Using `terraform import`, import SSM resource data sync using the `name`. For ex % terraform import aws_ssm_resource_data_sync.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_service_setting.html.markdown b/website/docs/cdktf/python/r/ssm_service_setting.html.markdown index 2efdf572a782..bc41a89934f7 100644 --- a/website/docs/cdktf/python/r/ssm_service_setting.html.markdown +++ b/website/docs/cdktf/python/r/ssm_service_setting.html.markdown @@ -36,7 +36,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `setting_id` - (Required) ID of the service setting. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `setting_id` - (Required) ID of the service setting. Valid values are shown in the [AWS documentation](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetServiceSetting.html#API_GetServiceSetting_RequestSyntax). * `setting_value` - (Required) Value of the service setting. ## Attribute Reference @@ -71,4 +72,4 @@ Using `terraform import`, import AWS SSM Service Setting using the `setting_id`. % terraform import aws_ssm_service_setting.example arn:aws:ssm:us-east-1:123456789012:servicesetting/ssm/parameter-store/high-throughput-enabled ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmcontacts_contact.html.markdown b/website/docs/cdktf/python/r/ssmcontacts_contact.html.markdown index ea4387a63ae0..df3b7551d629 100644 --- a/website/docs/cdktf/python/r/ssmcontacts_contact.html.markdown +++ b/website/docs/cdktf/python/r/ssmcontacts_contact.html.markdown @@ -72,8 +72,9 @@ The following arguments are required: The following arguments are optional: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `display_name` - (Optional) Full friendly name of the contact or escalation plan. If set, must be between 1 and 255 characters, and may contain alphanumerics, underscores (`_`), hyphens (`-`), periods (`.`), and spaces. -- `tags` - (Optional) Map of tags to assign to the resource. +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -107,4 +108,4 @@ Using `terraform import`, import SSM Contact using the `ARN`. For example: % terraform import aws_ssmcontacts_contact.example {ARNValue} ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmcontacts_contact_channel.html.markdown b/website/docs/cdktf/python/r/ssmcontacts_contact_channel.html.markdown index 8c587e304180..0f1b19cb7f2b 100644 --- a/website/docs/cdktf/python/r/ssmcontacts_contact_channel.html.markdown +++ b/website/docs/cdktf/python/r/ssmcontacts_contact_channel.html.markdown @@ -71,8 +71,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `contact_id` - (Required) Amazon Resource Name (ARN) of the AWS SSM Contact that the contact channel belongs to. - `delivery_address` - (Required) Block that contains contact engagement details. See details below. - `name` - (Required) Name of the contact channel. Must be between 1 and 255 characters, and may contain alphanumerics, underscores (`_`), hyphens (`-`), periods (`.`), and spaces. @@ -91,7 +92,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact Channel using the `ARN`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_contact_channel.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example" + } +} + +resource "aws_ssmcontacts_contact_channel" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the contact channel. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact Channel using the `arn`. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -108,10 +130,10 @@ class MyConvertedCode(TerraformStack): SsmcontactsContactChannel.generate_config_for_import(self, "example", "arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example") ``` -Using `terraform import`, import SSM Contact Channel using the `ARN`. For example: +Using `terraform import`, import SSM Contact Channel using the `arn`. For example: ```console % terraform import aws_ssmcontacts_contact_channel.example arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmcontacts_plan.html.markdown b/website/docs/cdktf/python/r/ssmcontacts_plan.html.markdown index 8c3992534c68..b00cba30f7b8 100644 --- a/website/docs/cdktf/python/r/ssmcontacts_plan.html.markdown +++ b/website/docs/cdktf/python/r/ssmcontacts_plan.html.markdown @@ -120,8 +120,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `contact_id` - (Required) The Amazon Resource Name (ARN) of the contact or escalation plan. - `stage` - (Required) One or more configuration blocks for specifying a list of stages that the escalation plan or engagement plan uses to engage contacts and contact methods. See [Stage](#stage) below for more details. @@ -190,4 +191,4 @@ Using `terraform import`, import SSM Contact Plan using the Contact ARN. For exa % terraform import aws_ssmcontacts_plan.example {ARNValue} ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmcontacts_rotation.html.markdown b/website/docs/cdktf/python/r/ssmcontacts_rotation.html.markdown index 72049238aa7f..bfc3ee781497 100644 --- a/website/docs/cdktf/python/r/ssmcontacts_rotation.html.markdown +++ b/website/docs/cdktf/python/r/ssmcontacts_rotation.html.markdown @@ -167,6 +167,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `start_time` - (Optional) The date and time, in RFC 3339 format, that the rotation goes into effect. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -218,6 +219,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_rotation.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-east-1:123456789012:rotation/example-rotation" + } +} + +resource "aws_ssmcontacts_rotation" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSM Contacts rotation. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSMContacts Rotation using the `arn`. For example: ```python @@ -241,4 +263,4 @@ Using `terraform import`, import CodeGuru Profiler Profiling Group using the `ar % terraform import aws_ssmcontacts_rotation.example arn:aws:ssm-contacts:us-east-1:012345678910:rotation/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmincidents_replication_set.html.markdown b/website/docs/cdktf/python/r/ssmincidents_replication_set.html.markdown index 18079f9b56e0..f3d0c2344124 100644 --- a/website/docs/cdktf/python/r/ssmincidents_replication_set.html.markdown +++ b/website/docs/cdktf/python/r/ssmincidents_replication_set.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SsmincidentsReplicationSet(self, "replicationSetName", - region=[SsmincidentsReplicationSetRegion( + regions=[SsmincidentsReplicationSetRegions( name="us-west-2" ) ], @@ -58,9 +58,9 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SsmincidentsReplicationSet(self, "replicationSetName", - region=[SsmincidentsReplicationSetRegion( + regions=[SsmincidentsReplicationSetRegions( name="us-west-2" - ), SsmincidentsReplicationSetRegion( + ), SsmincidentsReplicationSetRegions( name="ap-southeast-2" ) ] @@ -82,7 +82,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SsmincidentsReplicationSet(self, "replicationSetName", - region=[SsmincidentsReplicationSetRegion( + regions=[SsmincidentsReplicationSetRegions( name="us-west-2" ) ] @@ -108,7 +108,7 @@ class MyConvertedCode(TerraformStack): super().__init__(scope, name) example_key = KmsKey(self, "example_key") SsmincidentsReplicationSet(self, "replicationSetName", - region=[SsmincidentsReplicationSetRegion( + regions=[SsmincidentsReplicationSetRegions( kms_key_arn=example_key.arn, name="us-west-2" ) @@ -123,7 +123,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `region` - (Required) The Regions that Incident Manager replicates your data to. You can have up to three Regions in your replication set. +* `region` - (Optional, **Deprecated**) The replication set's Regions. Use `regions` instead. +* `regions` - (Optional) The replication set's Regions. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. For information about the maximum allowed number of Regions and tag value constraints, see [CreateReplicationSet in the *AWS Systems Manager Incident Manager API Reference*](https://docs.aws.amazon.com/incident-manager/latest/APIReference/API_CreateReplicationSet.html). @@ -138,7 +139,7 @@ For information about the maximum allowed number of Regions and tag value constr ~> **NOTE:** If possible, create all the customer managed keys you need (using the `terraform apply` command) before you create the replication set, or create the keys and replication set in the same `terraform apply` command. Otherwise, to delete a replication set, you must run one `terraform apply` command to delete the replication set and another to delete the AWS KMS keys used by the replication set. Deleting the AWS KMS keys before deleting the replication set results in an error. In that case, you must manually reenable the deleted key using the AWS Management Console before you can delete the replication set. -The `region` configuration block supports the following arguments: +The `regions` configuration block supports the following arguments: * `name` - (Required) The name of the Region, such as `ap-southeast-2`. * `kms_key_arn` - (Optional) The Amazon Resource name (ARN) of the customer managed key. If omitted, AWS manages the AWS KMS keys for you, using an AWS owned key, as indicated by a default value of `DefaultKey`. @@ -157,7 +158,7 @@ This resource exports the following attributes in addition to the arguments abov * `status` - The overall status of a replication set. * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` -In addition to the preceding arguments, the `region` configuration block exports the following attributes for each Region: +In addition to the preceding arguments, the `regions` configuration block exports the following attributes for each Region: * `status` - The current status of the Region. * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` @@ -201,4 +202,4 @@ Using `terraform import`, import an Incident Manager replication. For example: % terraform import aws_ssmincidents_replication_set.replicationSetName import ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmincidents_response_plan.html.markdown b/website/docs/cdktf/python/r/ssmincidents_response_plan.html.markdown index e0aeae054af8..7f8014199a5e 100644 --- a/website/docs/cdktf/python/r/ssmincidents_response_plan.html.markdown +++ b/website/docs/cdktf/python/r/ssmincidents_response_plan.html.markdown @@ -118,6 +118,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the response plan. * `incident_template` - (Required) The `incident_template` configuration block is required and supports the following arguments: * `title` - (Required) The title of a generated incident. @@ -186,4 +187,4 @@ Using `terraform import`, import an Incident Manager response plan using the res % terraform import aws_ssmincidents_response_plan.responsePlanName ARNValue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssmquicksetup_configuration_manager.html.markdown b/website/docs/cdktf/python/r/ssmquicksetup_configuration_manager.html.markdown index 157bd38cfadc..1e0dab28847a 100644 --- a/website/docs/cdktf/python/r/ssmquicksetup_configuration_manager.html.markdown +++ b/website/docs/cdktf/python/r/ssmquicksetup_configuration_manager.html.markdown @@ -53,14 +53,14 @@ class MyConvertedCode(TerraformStack): "ConfigurationOptionsScanValue": "cron(0 1 * * ? *)", "IsPolicyAttachAllowed": "false", "OutputLogEnableS3": "false", - "PatchBaselineRegion": Token.as_string(data_aws_region_current.name), + "PatchBaselineRegion": Token.as_string(data_aws_region_current.region), "PatchBaselineUseDefault": "default", "PatchPolicyName": "example", "RateControlConcurrency": "10%", "RateControlErrorThreshold": "2%", "SelectedPatchBaselines": selected_patch_baselines, "TargetAccounts": Token.as_string(current.account_id), - "TargetRegions": Token.as_string(data_aws_region_current.name), + "TargetRegions": Token.as_string(data_aws_region_current.region), "TargetType": "*" }, type="AWSQuickSetupType-PatchPolicy" @@ -81,6 +81,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the configuration manager. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -139,4 +140,4 @@ Using `terraform import`, import SSM Quick Setup Configuration Manager using the % terraform import aws_ssmquicksetup_configuration_manager.example arn:aws:ssm-quicksetup:us-east-1:012345678901:configuration-manager/abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown index d4cc06175b4b..f93c2275e42c 100644 --- a/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown @@ -126,6 +126,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance. * `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set that the admin wants to grant the principal access to. * `principal_id` - (Required, Forces new resource) An identifier for an object in SSO, such as a user or group. PrincipalIds are GUIDs (For example, `f81d4fae-7dec-11d0-a765-00a0c91e6bf6`). @@ -171,4 +172,4 @@ Using `terraform import`, import SSO Account Assignments using the `principal_id % terraform import aws_ssoadmin_account_assignment.example f81d4fae-7dec-11d0-a765-00a0c91e6bf6,GROUP,1234567890,AWS_ACCOUNT,arn:aws:sso:::permissionSet/ssoins-0123456789abcdef/ps-0123456789abcdef,arn:aws:sso:::instance/ssoins-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_application.html.markdown b/website/docs/cdktf/python/r/ssoadmin_application.html.markdown index aeca43b1434b..4e3b28adcffd 100644 --- a/website/docs/cdktf/python/r/ssoadmin_application.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_application.html.markdown @@ -88,6 +88,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_token` - (Optional) A unique, case-sensitive ID that you provide to ensure the idempotency of the request. AWS generates a random value when not provided. * `description` - (Optional) Description of the application. * `portal_options` - (Optional) Options for the portal associated with an application. See [`portal_options`](#portal_options-argument-reference) below. @@ -112,12 +113,34 @@ If `IDENTITY_CENTER` is set, IAM Identity Center uses SAML identity-provider ini This resource exports the following attributes in addition to the arguments above: * `application_account` - AWS account ID. -* `application_arn` - ARN of the application. -* `id` - ARN of the application. +* `application_arn` - (**Deprecated** Reference `arn` instead) ARN of the application. +* `arn` - ARN of the application. +* `id` - (**Deprecated** Reference `arn` instead) ARN of the application. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssoadmin_application.example + identity = { + "arn" = "arn:aws:sso::123456789012:application/ssoins-1234567890abcdef/apl-1234567890abcdef" + } +} + +resource "aws_ssoadmin_application" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSO application. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application using the `id`. For example: ```python @@ -141,4 +164,4 @@ Using `terraform import`, import SSO Admin Application using the `id`. For examp % terraform import aws_ssoadmin_application.example arn:aws:sso::123456789012:application/id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_application_access_scope.html.markdown b/website/docs/cdktf/python/r/ssoadmin_application_access_scope.html.markdown index c7b36e531e3b..dd178f911775 100644 --- a/website/docs/cdktf/python/r/ssoadmin_application_access_scope.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_application_access_scope.html.markdown @@ -40,7 +40,7 @@ class MyConvertedCode(TerraformStack): aws_ssoadmin_application_example.override_logical_id("example") aws_ssoadmin_application_access_scope_example = SsoadminApplicationAccessScope(self, "example_2", - application_arn=Token.as_string(aws_ssoadmin_application_example.application_arn), + application_arn=Token.as_string(aws_ssoadmin_application_example.arn), authorized_targets=["arn:aws:sso::123456789012:application/ssoins-123456789012/apl-123456789012" ], scope="sso:account:access" @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorized_targets` - (Optional) Specifies an array list of ARNs that represent the authorized targets for this access scope. ## Attribute Reference @@ -91,4 +92,4 @@ Using `terraform import`, import SSO Admin Application Access Scope using the `i % terraform import aws_ssoadmin_application_access_scope.example arn:aws:sso::123456789012:application/ssoins-123456789012/apl-123456789012,sso:account:access ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_application_assignment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_application_assignment.html.markdown index 528ee1479eca..63a86b99f903 100644 --- a/website/docs/cdktf/python/r/ssoadmin_application_assignment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_application_assignment.html.markdown @@ -28,7 +28,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SsoadminApplicationAssignment(self, "example", - application_arn=Token.as_string(aws_ssoadmin_application_example.application_arn), + application_arn=Token.as_string(aws_ssoadmin_application_example.arn), principal_id=Token.as_string(aws_identitystore_user_example.user_id), principal_type="USER" ) @@ -49,7 +49,7 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SsoadminApplicationAssignment(self, "example", - application_arn=Token.as_string(aws_ssoadmin_application_example.application_arn), + application_arn=Token.as_string(aws_ssoadmin_application_example.arn), principal_id=Token.as_string(aws_identitystore_group_example.group_id), principal_type="GROUP" ) @@ -57,8 +57,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_arn` - (Required) ARN of the application. * `principal_id` - (Required) An identifier for an object in IAM Identity Center, such as a user or group. * `principal_type` - (Required) Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. @@ -94,4 +95,4 @@ Using `terraform import`, import SSO Admin Application Assignment using the `id` % terraform import aws_ssoadmin_application_assignment.example arn:aws:sso::123456789012:application/id-12345678,abcd1234,USER ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_application_assignment_configuration.html.markdown b/website/docs/cdktf/python/r/ssoadmin_application_assignment_configuration.html.markdown index eeb35f51d8f0..3e389bdb0015 100644 --- a/website/docs/cdktf/python/r/ssoadmin_application_assignment_configuration.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_application_assignment_configuration.html.markdown @@ -33,15 +33,16 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) SsoadminApplicationAssignmentConfiguration(self, "example", - application_arn=Token.as_string(aws_ssoadmin_application_example.application_arn), + application_arn=Token.as_string(aws_ssoadmin_application_example.arn), assignment_required=True ) ``` ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_arn` - (Required) ARN of the application. * `assignment_required` - (Required) Indicates whether users must have an explicit assignment to access the application. If `false`, all users have access to the application. @@ -53,6 +54,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssoadmin_application_assignment_configuration.example + identity = { + "arn" = "arn:aws:sso::123456789012:application/ssoins-1234567890abcdef/apl-1234567890abcdef" + } +} + +resource "aws_ssoadmin_application_assignment_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSO application. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application Assignment Configuration using the `id`. For example: ```python @@ -76,4 +98,4 @@ Using `terraform import`, import SSO Admin Application Assignment Configuration % terraform import aws_ssoadmin_application_assignment_configuration.example arn:aws:sso::123456789012:application/id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_customer_managed_policy_attachment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_customer_managed_policy_attachment.html.markdown index 2771400ad4c8..36d66c175889 100644 --- a/website/docs/cdktf/python/r/ssoadmin_customer_managed_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_customer_managed_policy_attachment.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. * `customer_managed_policy_reference` - (Required, Forces new resource) Specifies the name and path of a customer managed policy. See below. @@ -122,4 +123,4 @@ Using `terraform import`, import SSO Managed Policy Attachments using the `name` % terraform import aws_ssoadmin_customer_managed_policy_attachment.example TestPolicy,/,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_instance_access_control_attributes.html.markdown b/website/docs/cdktf/python/r/ssoadmin_instance_access_control_attributes.html.markdown index 953dbe619120..89fe84309ab3 100644 --- a/website/docs/cdktf/python/r/ssoadmin_instance_access_control_attributes.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_instance_access_control_attributes.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance. * `attribute` - (Required) See [AccessControlAttribute](#accesscontrolattribute) for more details. @@ -98,4 +99,4 @@ Using `terraform import`, import SSO Account Assignments using the `instance_arn % terraform import aws_ssoadmin_instance_access_control_attributes.example arn:aws:sso:::instance/ssoins-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown index b1294b521e56..d332cb026b62 100644 --- a/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown @@ -114,6 +114,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `managed_policy_arn` - (Required, Forces new resource) The IAM managed policy Amazon Resource Name (ARN) to be attached to the Permission Set. * `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. @@ -157,4 +158,4 @@ Using `terraform import`, import SSO Managed Policy Attachments using the `manag % terraform import aws_ssoadmin_managed_policy_attachment.example arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_permission_set.html.markdown b/website/docs/cdktf/python/r/ssoadmin_permission_set.html.markdown index 8e03a04b3487..1831655d829c 100644 --- a/website/docs/cdktf/python/r/ssoadmin_permission_set.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_permission_set.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the Permission Set. * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `name` - (Required, Forces new resource) The name of the Permission Set. @@ -93,4 +94,4 @@ Using `terraform import`, import SSO Permission Sets using the `arn` and `instan % terraform import aws_ssoadmin_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_permission_set_inline_policy.html.markdown b/website/docs/cdktf/python/r/ssoadmin_permission_set_inline_policy.html.markdown index 3f15670a25db..565bff3c3619 100644 --- a/website/docs/cdktf/python/r/ssoadmin_permission_set_inline_policy.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_permission_set_inline_policy.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `inline_policy` - (Required) The IAM inline policy to attach to a Permission Set. * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. @@ -111,4 +112,4 @@ Using `terraform import`, import SSO Permission Set Inline Policies using the `p % terraform import aws_ssoadmin_permission_set_inline_policy.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_permissions_boundary_attachment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_permissions_boundary_attachment.html.markdown index e6a62dd53a1c..d90143033467 100644 --- a/website/docs/cdktf/python/r/ssoadmin_permissions_boundary_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_permissions_boundary_attachment.html.markdown @@ -98,8 +98,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. * `permissions_boundary` - (Required, Forces new resource) The permissions boundary policy. See below. @@ -156,4 +157,4 @@ Using `terraform import`, import SSO Admin Permissions Boundary Attachments usin % terraform import aws_ssoadmin_permissions_boundary_attachment.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_trusted_token_issuer.html.markdown b/website/docs/cdktf/python/r/ssoadmin_trusted_token_issuer.html.markdown index 2dfd66e46e69..213efe759f76 100644 --- a/website/docs/cdktf/python/r/ssoadmin_trusted_token_issuer.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_trusted_token_issuer.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_token` - (Optional) A unique, case-sensitive ID that you provide to ensure the idempotency of the request. AWS generates a random value when not provided. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -107,4 +108,4 @@ Using `terraform import`, import SSO Admin Trusted Token Issuer using the `id`. % terraform import aws_ssoadmin_trusted_token_issuer.example arn:aws:sso::123456789012:trustedTokenIssuer/ssoins-lu1ye3gew4mbc7ju/tti-2657c556-9707-11ee-b9d1-0242ac120002 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_cache.html.markdown b/website/docs/cdktf/python/r/storagegateway_cache.html.markdown index cfcd704c4f48..7d2cee0e5316 100644 --- a/website/docs/cdktf/python/r/storagegateway_cache.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_cache.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disk_id` - (Required) Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`. * `gateway_arn` - (Required) The Amazon Resource Name (ARN) of the gateway. @@ -72,4 +73,4 @@ Using `terraform import`, import `aws_storagegateway_cache` using the gateway Am % terraform import aws_storagegateway_cache.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_cached_iscsi_volume.html.markdown b/website/docs/cdktf/python/r/storagegateway_cached_iscsi_volume.html.markdown index 23237a0a3514..5a81fea8662b 100644 --- a/website/docs/cdktf/python/r/storagegateway_cached_iscsi_volume.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_cached_iscsi_volume.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gateway_arn` - (Required) The Amazon Resource Name (ARN) of the gateway. * `network_interface_id` - (Required) The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. * `target_name` - (Required) The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway. @@ -143,4 +144,4 @@ Using `terraform import`, import `aws_storagegateway_cached_iscsi_volume` using % terraform import aws_storagegateway_cached_iscsi_volume.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_file_system_association.html.markdown b/website/docs/cdktf/python/r/storagegateway_file_system_association.html.markdown index cc0606a39a66..75d8bc8b9dd4 100644 --- a/website/docs/cdktf/python/r/storagegateway_file_system_association.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_file_system_association.html.markdown @@ -105,6 +105,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gateway_arn` - (Required) The Amazon Resource Name (ARN) of the gateway. * `location_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon FSx file system to associate with the FSx File Gateway. * `username` - (Required) The user name of the user credential that has permission to access the root share of the Amazon FSx file system. The user account must belong to the Amazon FSx delegated admin user group. @@ -160,4 +161,4 @@ Using `terraform import`, import `aws_storagegateway_file_system_association` us % terraform import aws_storagegateway_file_system_association.example arn:aws:storagegateway:us-east-1:123456789012:fs-association/fsa-0DA347732FDB40125 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown b/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown index 28f8cc497edf..a22dff633716 100644 --- a/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown @@ -173,6 +173,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gateway_name` - (Required) Name of the gateway. * `gateway_timezone` - (Required) Time zone for the gateway. The time zone is of the format "GMT", "GMT-hr:mm", or "GMT+hr:mm". For example, `GMT-4:00` indicates the time is 4 hours behind GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. * `activation_key` - (Optional) Gateway activation key during resource creation. Conflicts with `gateway_ip_address`. Additional information is available in the [Storage Gateway User Guide](https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html). @@ -290,4 +291,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_nfs_file_share.html.markdown b/website/docs/cdktf/python/r/storagegateway_nfs_file_share.html.markdown index 2f3374e871a9..235b40963140 100644 --- a/website/docs/cdktf/python/r/storagegateway_nfs_file_share.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_nfs_file_share.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `client_list` - (Required) The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks. Set to `["0.0.0.0/0"]` to not limit access. Minimum 1 item. Maximum 100 items. * `gateway_arn` - (Required) Amazon Resource Name (ARN) of the file gateway. * `location_arn` - (Required) The ARN of the backed storage used for storing file data. @@ -117,4 +118,4 @@ Using `terraform import`, import `aws_storagegateway_nfs_file_share` using the N % terraform import aws_storagegateway_nfs_file_share.example arn:aws:storagegateway:us-east-1:123456789012:share/share-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_smb_file_share.html.markdown b/website/docs/cdktf/python/r/storagegateway_smb_file_share.html.markdown index bc05b5d90f64..d13537517e0a 100644 --- a/website/docs/cdktf/python/r/storagegateway_smb_file_share.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_smb_file_share.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gateway_arn` - (Required) Amazon Resource Name (ARN) of the file gateway. * `location_arn` - (Required) The ARN of the backed storage used for storing file data. * `vpc_endpoint_dns_name` - (Optional) The DNS name of the VPC endpoint for S3 private link. @@ -83,8 +84,6 @@ This resource supports the following arguments: * `object_acl` - (Optional) Access Control List permission for S3 objects. Defaults to `private`. * `oplocks_enabled` - (Optional) Boolean to indicate Opportunistic lock (oplock) status. Defaults to `true`. * `cache_attributes` - (Optional) Refresh cache information. see [`cache_attributes` Block](#cache_attributes-block) for more details. - - **Note:** If you have previously included a `cache_attributes` block in your configuration, removing it will not reset the refresh cache value and the previous value will remain. You must explicitly set a new value to change it. * `read_only` - (Optional) Boolean to indicate write status of file share. File share does not accept writes if `true`. Defaults to `false`. * `requester_pays` - (Optional) Boolean who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to `true` if you want the requester to pay instead of the bucket owner. Defaults to `false`. * `smb_acl_enabled` - (Optional) Set this value to `true` to enable ACL (access control list) on the SMB fileshare. Set it to `false` to map file and directory permissions to the POSIX permissions. This setting applies only to `ActiveDirectory` authentication type. @@ -94,6 +93,8 @@ This resource supports the following arguments: * `notification_policy` - (Optional) The notification policy of the file share. For more information see the [AWS Documentation](https://docs.aws.amazon.com/storagegateway/latest/APIReference/API_CreateNFSFileShare.html#StorageGateway-CreateNFSFileShare-request-NotificationPolicy). Default value is `{}`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +**Note:** If you have previously included a `cache_attributes` block in your configuration, removing it will not reset the refresh cache value and the previous value will remain. You must explicitly set a new value to change it. + ### `cache_attributes` Block The `cache_attributes` configuration block supports the following arguments: @@ -145,4 +146,4 @@ Using `terraform import`, import `aws_storagegateway_smb_file_share` using the S % terraform import aws_storagegateway_smb_file_share.example arn:aws:storagegateway:us-east-1:123456789012:share/share-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_stored_iscsi_volume.html.markdown b/website/docs/cdktf/python/r/storagegateway_stored_iscsi_volume.html.markdown index 60c5982852ae..1e023ab4f20f 100644 --- a/website/docs/cdktf/python/r/storagegateway_stored_iscsi_volume.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_stored_iscsi_volume.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gateway_arn` - (Required) The Amazon Resource Name (ARN) of the gateway. * `network_interface_id` - (Required) The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. * `target_name` - (Required) The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway. @@ -120,4 +121,4 @@ Using `terraform import`, import `aws_storagegateway_stored_iscsi_volume` using % terraform import aws_storagegateway_stored_iscsi_volume.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_tape_pool.html.markdown b/website/docs/cdktf/python/r/storagegateway_tape_pool.html.markdown index 06476d8f7ac6..459b187b3eb4 100644 --- a/website/docs/cdktf/python/r/storagegateway_tape_pool.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_tape_pool.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pool_name` - (Required) The name of the new custom tape pool. * `storage_class` - (Required) The storage class that is associated with the new custom pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class that corresponds to the pool. Possible values are `DEEP_ARCHIVE` or `GLACIER`. * `retention_lock_type` - (Required) Tape retention lock can be configured in two modes. When configured in governance mode, AWS accounts with specific IAM permissions are authorized to remove the tape retention lock from archived virtual tapes. When configured in compliance mode, the tape retention lock cannot be removed by any user, including the root AWS account. Possible values are `COMPLIANCE`, `GOVERNANCE`, and `NONE`. Default value is `NONE`. @@ -74,4 +75,4 @@ Using `terraform import`, import `aws_storagegateway_tape_pool` using the volume % terraform import aws_storagegateway_tape_pool.example arn:aws:storagegateway:us-east-1:123456789012:tapepool/pool-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_upload_buffer.html.markdown b/website/docs/cdktf/python/r/storagegateway_upload_buffer.html.markdown index 9c381c4640d0..ceda66589ecb 100644 --- a/website/docs/cdktf/python/r/storagegateway_upload_buffer.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_upload_buffer.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disk_id` - (Optional) Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`. * `disk_path` - (Optional) Local disk path. For example, `/dev/nvme1n1`. * `gateway_arn` - (Required) The Amazon Resource Name (ARN) of the gateway. @@ -107,4 +108,4 @@ Using `terraform import`, import `aws_storagegateway_upload_buffer` using the ga % terraform import aws_storagegateway_upload_buffer.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_working_storage.html.markdown b/website/docs/cdktf/python/r/storagegateway_working_storage.html.markdown index 5a47e361ff48..09bfe6ceefdc 100644 --- a/website/docs/cdktf/python/r/storagegateway_working_storage.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_working_storage.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disk_id` - (Required) Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`. * `gateway_arn` - (Required) The Amazon Resource Name (ARN) of the gateway. @@ -72,4 +73,4 @@ Using `terraform import`, import `aws_storagegateway_working_storage` using the % terraform import aws_storagegateway_working_storage.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/subnet.html.markdown b/website/docs/cdktf/python/r/subnet.html.markdown index 8ae6e12007cd..cfe07f48eba9 100644 --- a/website/docs/cdktf/python/r/subnet.html.markdown +++ b/website/docs/cdktf/python/r/subnet.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `assign_ipv6_address_on_creation` - (Optional) Specify true to indicate that network interfaces created in the specified subnet should be assigned an IPv6 address. Default is `false` @@ -113,6 +114,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_subnet.example + identity = { + id = "subnet-9d4a7b6c" + } +} + +resource "aws_subnet" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the subnet. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import subnets using the subnet `id`. For example: ```python @@ -127,13 +154,13 @@ from imports.aws.subnet import Subnet class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - Subnet.generate_config_for_import(self, "publicSubnet", "subnet-9d4a7b6c") + Subnet.generate_config_for_import(self, "example", "subnet-9d4a7b6c") ``` Using `terraform import`, import subnets using the subnet `id`. For example: ```console -% terraform import aws_subnet.public_subnet subnet-9d4a7b6c +% terraform import aws_subnet.example subnet-9d4a7b6c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/swf_domain.html.markdown b/website/docs/cdktf/python/r/swf_domain.html.markdown index ab0146afbf8f..d5ef460c4505 100644 --- a/website/docs/cdktf/python/r/swf_domain.html.markdown +++ b/website/docs/cdktf/python/r/swf_domain.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the domain. If omitted, Terraform will assign a random, unique name. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional, Forces new resource) The domain description. @@ -78,4 +79,4 @@ Using `terraform import`, import SWF Domains using the `name`. For example: % terraform import aws_swf_domain.foo test-domain ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/synthetics_canary.html.markdown b/website/docs/cdktf/python/r/synthetics_canary.html.markdown index ac60329110b6..532885951f57 100644 --- a/website/docs/cdktf/python/r/synthetics_canary.html.markdown +++ b/website/docs/cdktf/python/r/synthetics_canary.html.markdown @@ -48,23 +48,24 @@ The following arguments are required: * `artifact_s3_location` - (Required) Location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. * `execution_role_arn` - (Required) ARN of the IAM role to be used to run the canary. see [AWS Docs](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_CreateCanary.html#API_CreateCanary_RequestSyntax) for permissions needs for IAM Role. * `handler` - (Required) Entry point to use for the source code when running the canary. This value must end with the string `.handler` . -* `name` - (Required) Name for this canary. Has a maximum length of 21 characters. Valid characters are lowercase alphanumeric, hyphen, or underscore. +* `name` - (Required) Name for this canary. Has a maximum length of 255 characters. Valid characters are lowercase alphanumeric, hyphen, or underscore. * `runtime_version` - (Required) Runtime version to use for the canary. Versions change often so consult the [Amazon CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html) for the latest valid versions. Values include `syn-python-selenium-1.0`, `syn-nodejs-puppeteer-3.0`, `syn-nodejs-2.2`, `syn-nodejs-2.1`, `syn-nodejs-2.0`, and `syn-1.0`. -* `schedule` - (Required) Configuration block providing how often the canary is to run and when these test runs are to stop. Detailed below. +* `schedule` - (Required) Configuration block providing how often the canary is to run and when these test runs are to stop. Detailed [below](#schedule). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `artifact_config` - (Optional) configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. See [Artifact Config](#artifact_config). * `delete_lambda` - (Optional) Specifies whether to also delete the Lambda functions and layers used by this canary. The default is `false`. -* `vpc_config` - (Optional) Configuration block. Detailed below. * `failure_retention_period` - (Optional) Number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days. -* `run_config` - (Optional) Configuration block for individual canary runs. Detailed below. +* `run_config` - (Optional) Configuration block for individual canary runs. Detailed [below](#run_config). * `s3_bucket` - (Optional) Full bucket name which is used if your canary script is located in S3. The bucket must already exist. **Conflicts with `zip_file`.** * `s3_key` - (Optional) S3 key of your script. **Conflicts with `zip_file`.** * `s3_version` - (Optional) S3 version ID of your script. **Conflicts with `zip_file`.** * `start_canary` - (Optional) Whether to run or stop the canary. * `success_retention_period` - (Optional) Number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `artifact_config` - (Optional) configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. See [Artifact Config](#artifact_config). +* `vpc_config` - (Optional) Configuration block. Detailed [below](#vpc_config). * `zip_file` - (Optional) ZIP file that contains the script, if you input your canary script directly into the canary instead of referring to an S3 location. It can be up to 225KB. **Conflicts with `s3_bucket`, `s3_key`, and `s3_version`.** ### artifact_config @@ -80,6 +81,11 @@ The following arguments are optional: * `expression` - (Required) Rate expression or cron expression that defines how often the canary is to run. For rate expression, the syntax is `rate(number unit)`. _unit_ can be `minute`, `minutes`, or `hour`. For cron expression, the syntax is `cron(expression)`. For more information about the syntax for cron expressions, see [Scheduling canary runs using cron](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_cron.html). * `duration_in_seconds` - (Optional) Duration in seconds, for the canary to continue making regular runs according to the schedule in the Expression value. +* `retry_config` - (Optional) Configuration block for canary retries. Detailed [below](#retry_config). + +### retry_config + +* `max_retries` - (Required) Maximum number of retries. The value must be less than or equal to `2`. If `max_retries` is `2`, `run_config.timeout_in_seconds` should be less than 600 seconds. Defaults to `0`. ### run_config @@ -87,6 +93,7 @@ The following arguments are optional: * `memory_in_mb` - (Optional) Maximum amount of memory available to the canary while it is running, in MB. The value you specify must be a multiple of 64. * `active_tracing` - (Optional) Whether this canary is to use active AWS X-Ray tracing when it runs. You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime. * `environment_variables` - (Optional) Map of environment variables that are accessible from the canary during execution. Please see [AWS Docs](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime) for variables reserved for Lambda. +* `ephemeral_storage` - (Optional) Amount of ephemeral storage (in MB) allocated for the canary run during execution. Defaults to 1024. ### vpc_config @@ -94,6 +101,7 @@ If this canary tests an endpoint in a VPC, this structure contains information a * `subnet_ids` - (Required) IDs of the subnets where this canary is to run. * `security_group_ids` - (Required) IDs of the security groups for this canary. +* `ipv6_allowed_for_dual_stack` - (Optional) If `true`, allow outbound IPv6 traffic on VPC canaries that are connected to dual-stack subnets. The default is `false`. ## Attribute Reference @@ -143,4 +151,4 @@ Using `terraform import`, import Synthetics Canaries using the `name`. For examp % terraform import aws_synthetics_canary.some some-canary ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/synthetics_group.html.markdown b/website/docs/cdktf/python/r/synthetics_group.html.markdown index e5fd6d118be0..5dec96d88c3a 100644 --- a/website/docs/cdktf/python/r/synthetics_group.html.markdown +++ b/website/docs/cdktf/python/r/synthetics_group.html.markdown @@ -41,6 +41,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -76,4 +77,4 @@ Using `terraform import`, import CloudWatch Synthetics Group using the `name`. F % terraform import aws_synthetics_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/synthetics_group_association.html.markdown b/website/docs/cdktf/python/r/synthetics_group_association.html.markdown index 410990dde4df..9198b637984c 100644 --- a/website/docs/cdktf/python/r/synthetics_group_association.html.markdown +++ b/website/docs/cdktf/python/r/synthetics_group_association.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_name` - (Required) Name of the group that the canary will be associated with. * `canary_arn` - (Required) ARN of the canary. @@ -73,4 +74,4 @@ Using `terraform import`, import CloudWatch Synthetics Group Association using t % terraform import aws_synthetics_group_association.example arn:aws:synthetics:us-west-2:123456789012:canary:tf-acc-test-abcd1234,examplename ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/timestreaminfluxdb_db_cluster.html.markdown b/website/docs/cdktf/python/r/timestreaminfluxdb_db_cluster.html.markdown new file mode 100644 index 000000000000..9abbd2045726 --- /dev/null +++ b/website/docs/cdktf/python/r/timestreaminfluxdb_db_cluster.html.markdown @@ -0,0 +1,337 @@ +--- +subcategory: "Timestream for InfluxDB" +layout: "aws" +page_title: "AWS: aws_timestreaminfluxdb_db_cluster" +description: |- + Terraform resource for managing an Amazon Timestream for InfluxDB read-replica cluster. +--- + + + +# Resource: aws_timestreaminfluxdb_db_cluster + +Terraform resource for managing an Amazon Timestream for InfluxDB read-replica cluster. + +~> **NOTE:** This resource requires a subscription to [Timestream for InfluxDB Read Replicas (Add-On) on the AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-lftzfxtb5xlv4?applicationId=AWS-Marketplace-Console&ref_=beagle&sr=0-2). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.timestreaminfluxdb_db_cluster import TimestreaminfluxdbDbCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + TimestreaminfluxdbDbCluster(self, "example", + allocated_storage=20, + bucket="example-bucket-name", + db_instance_type="db.influx.medium", + failover_mode="AUTOMATIC", + name="example-db-cluster", + organization="organization", + password="example-password", + port=8086, + username="admin", + vpc_security_group_ids=[Token.as_string(aws_security_group_example.id)], + vpc_subnet_ids=[example1.id, example2.id] + ) +``` + +### Usage with Prerequisite Resources + +All Timestream for InfluxDB clusters require a VPC, at least two subnets, and a security group. The following example shows how these prerequisite resources can be created and used with `aws_timestreaminfluxdb_db_cluster`. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.security_group import SecurityGroup +from imports.aws.subnet import Subnet +from imports.aws.timestreaminfluxdb_db_cluster import TimestreaminfluxdbDbCluster +from imports.aws.vpc import Vpc +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = Vpc(self, "example", + cidr_block="10.0.0.0/16" + ) + aws_security_group_example = SecurityGroup(self, "example_1", + name="example", + vpc_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_security_group_example.override_logical_id("example") + example1 = Subnet(self, "example_1_2", + cidr_block="10.0.1.0/24", + vpc_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + example1.override_logical_id("example_1") + example2 = Subnet(self, "example_2", + cidr_block="10.0.2.0/24", + vpc_id=example.id + ) + aws_timestreaminfluxdb_db_cluster_example = + TimestreaminfluxdbDbCluster(self, "example_4", + allocated_storage=20, + bucket="example-bucket-name", + db_instance_type="db.influx.medium", + name="example-db-cluster", + organization="organization", + password="example-password", + username="admin", + vpc_security_group_ids=[Token.as_string(aws_security_group_example.id)], + vpc_subnet_ids=[example1.id, example2.id] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_timestreaminfluxdb_db_cluster_example.override_logical_id("example") +``` + +### Usage with Public Internet Access Enabled + +The following configuration shows how to define the necessary resources and arguments to allow public internet access on your Timestream for InfluxDB read-replica cluster's primary endpoint (simply referred to as "endpoint") and read endpoint on port `8086`. After applying this configuration, the cluster's InfluxDB UI can be accessed by visiting your cluster's primary endpoint at port `8086`. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, Op, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.internet_gateway import InternetGateway +from imports.aws.route import Route +from imports.aws.route_table_association import RouteTableAssociation +from imports.aws.security_group import SecurityGroup +from imports.aws.subnet import Subnet +from imports.aws.timestreaminfluxdb_db_cluster import TimestreaminfluxdbDbCluster +from imports.aws.vpc import Vpc +from imports.aws.vpc_security_group_ingress_rule import VpcSecurityGroupIngressRule +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = Vpc(self, "example", + cidr_block="10.0.0.0/16" + ) + aws_internet_gateway_example = InternetGateway(self, "example_1", + tags={ + "Name": "example" + }, + vpc_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_internet_gateway_example.override_logical_id("example") + Route(self, "test_route", + destination_cidr_block="0.0.0.0/0", + gateway_id=Token.as_string(aws_internet_gateway_example.id), + route_table_id=example.main_route_table_id + ) + RouteTableAssociation(self, "test_route_table_association", + route_table_id=example.main_route_table_id, + subnet_id=test_subnet.id + ) + aws_security_group_example = SecurityGroup(self, "example_4", + name="example", + vpc_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_security_group_example.override_logical_id("example") + example1 = Subnet(self, "example_1_5", + cidr_block="10.0.1.0/24", + vpc_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + example1.override_logical_id("example_1") + example2 = Subnet(self, "example_2", + cidr_block="10.0.2.0/24", + vpc_id=example.id + ) + aws_timestreaminfluxdb_db_cluster_example = + TimestreaminfluxdbDbCluster(self, "example_7", + allocated_storage=20, + bucket="example-bucket-name", + db_instance_type="db.influx.medium", + name="example-db-cluster", + organization="organization", + password="example-password", + publicly_accessible=True, + username="admin", + vpc_security_group_ids=[Token.as_string(aws_security_group_example.id)], + vpc_subnet_ids=[example1.id, example2.id] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_timestreaminfluxdb_db_cluster_example.override_logical_id("example") + aws_vpc_security_group_ingress_rule_example = + VpcSecurityGroupIngressRule(self, "example_8", + ip_protocol=Token.as_string(Op.negate(1)), + referenced_security_group_id=Token.as_string(aws_security_group_example.id), + security_group_id=Token.as_string(aws_security_group_example.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_security_group_ingress_rule_example.override_logical_id("example") +``` + +### Usage with S3 Log Delivery Enabled + +You can use an S3 bucket to store logs generated by your Timestream for InfluxDB cluster. The following example shows what resources and arguments are required to configure an S3 bucket for logging, including the IAM policy that needs to be set in order to allow Timestream for InfluxDB to place logs in your S3 bucket. The configuration of the required VPC, security group, and subnets have been left out of the example for brevity. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_policy import S3BucketPolicy +from imports.aws.timestreaminfluxdb_db_cluster import TimestreaminfluxdbDbCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = S3Bucket(self, "example", + bucket="example-s3-bucket", + force_destroy=True + ) + aws_timestreaminfluxdb_db_cluster_example = + TimestreaminfluxdbDbCluster(self, "example_1", + allocated_storage=20, + bucket="example-bucket-name", + db_instance_type="db.influx.medium", + log_delivery_configuration=[TimestreaminfluxdbDbClusterLogDeliveryConfiguration( + s3_configuration=[TimestreaminfluxdbDbClusterLogDeliveryConfigurationS3Configuration( + bucket_name=example.bucket, + enabled=True + ) + ] + ) + ], + name="example-db-cluster", + organization="organization", + password="example-password", + username="admin", + vpc_security_group_ids=[Token.as_string(aws_security_group_example.id)], + vpc_subnet_ids=[example1.id, example2.id] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_timestreaminfluxdb_db_cluster_example.override_logical_id("example") + data_aws_iam_policy_document_example = DataAwsIamPolicyDocument(self, "example_2", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:PutObject"], + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["timestream-influxdb.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + example.arn + "}/*"] + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_iam_policy_document_example.override_logical_id("example") + aws_s3_bucket_policy_example = S3BucketPolicy(self, "example_3", + bucket=example.id, + policy=Token.as_string(data_aws_iam_policy_document_example.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `allocated_storage` - (Required) Amount of storage in GiB (gibibytes). The minimum value is `20`, the maximum value is `16384`. The argument `db_storage_type` places restrictions on this argument's minimum value. The following is a list of `db_storage_type` values and the corresponding minimum value for `allocated_storage`: `"InfluxIOIncludedT1": `20`, `"InfluxIOIncludedT2" and `"InfluxIOIncludedT3": `400`. +* `bucket` - (Required) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `db_instance_type` - (Required) Timestream for InfluxDB DB instance type to run InfluxDB on. Valid options are: `"db.influx.medium"`, `"db.influx.large"`, `"db.influx.xlarge"`, `"db.influx.2xlarge"`, `"db.influx.4xlarge"`, `"db.influx.8xlarge"`, `"db.influx.12xlarge"`, and `"db.influx.16xlarge"`. This argument is updatable. +* `name` - (Required) Name that uniquely identifies the DB cluster when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. Cluster names must be unique per customer and per region. The argument must start with a letter, cannot contain consecutive hyphens (`-`) and cannot end with a hyphen. +* `password` - (Required) Password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `username`, and `organization`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `organization` - (Required) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `username` - (Required) Username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `organization`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `vpc_security_group_ids` - (Required) List of VPC security group IDs to associate with the cluster. +* `vpc_subnet_ids` - (Required) List of VPC subnet IDs to associate with the cluster. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `db_parameter_group_identifier` - (Optional) ID of the DB parameter group assigned to your cluster. This argument is updatable. If added to an existing Timestream for InfluxDB cluster or given a new value, will cause an in-place update to the cluster. However, if a cluster already has a value for `db_parameter_group_identifier`, removing `db_parameter_group_identifier` will cause the cluster to be destroyed and recreated. +* `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT3"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. +* `deployment_type` - (Default `"MULTI_NODE_READ_REPLICAS"`) Specifies the type of cluster to create. Valid options are: `"MULTI_NODE_READ_REPLICAS"`. +* `failover_mode` - (Default `"AUTOMATIC"`) Specifies the behavior of failure recovery when the primary node of the cluster fails. Valid options are: `"AUTOMATIC"` and `"NO_FAILOVER"`. +* `log_delivery_configuration` - (Optional) Configuration for sending InfluxDB engine logs to a specified S3 bucket. This argument is updatable. +* `network_type` - (Optional) Specifies whether the network type of the Timestream for InfluxDB cluster is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. +* `port` - (Default `8086`) The port on which the cluster accepts connections. Valid values: `1024`-`65535`. Cannot be `2375`-`2376`, `7788`-`7799`, `8090`, or `51678`-`51680`. This argument is updatable. +* `publicly_accessible` - (Default `false`) Configures the DB cluster with a public IP to facilitate access. Other resources, such as a VPC, a subnet, an internet gateway, and a route table with routes, are also required to enabled public access, in addition to this argument. See "[Usage with Public Internet Access Enabled](#usage-with-public-internet-access-enabled)" for an example configuration with all required resources for public internet access. +* `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Nested Fields + +#### `log_delivery_configuration` + +* `s3_configuration` - (Required) Configuration for S3 bucket log delivery. + +#### `s3_configuration` + +* `bucket_name` - (Required) Name of the S3 bucket to deliver logs to. +* `enabled` - (Required) Indicates whether log delivery to the S3 bucket is enabled. + +**Note**: The following arguments do updates in-place: `db_parameter_group_identifier`, `log_delivery_configuration`, `port`, `db_instance_type`, `failover_mode`, and `tags`. Changes to any other argument after a cluster has been deployed will cause destruction and re-creation of the cluster. Additionally, when `db_parameter_group_identifier` is added to a cluster or modified, the cluster will be updated in-place but if `db_parameter_group_identifier` is removed from a cluster, the cluster will be destroyed and re-created. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Timestream for InfluxDB cluster. +* `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. +* `id` - ID of the Timestream for InfluxDB cluster. +* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. +* `reader_endpoint` - The endpoint used to connect to the Timestream for InfluxDB cluster for read-only operations. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Timestream for InfluxDB cluster using its identifier. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.timestreaminfluxdb_db_cluster import TimestreaminfluxdbDbCluster +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + TimestreaminfluxdbDbCluster.generate_config_for_import(self, "example", "12345abcde") +``` + +Using `terraform import`, import Timestream for InfluxDB cluster using its identifier. For example: + +```console +% terraform import aws_timestreaminfluxdb_db_cluster.example 12345abcde +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/cdktf/python/r/timestreaminfluxdb_db_instance.html.markdown index 256ca914fde4..fa9ad680d46c 100644 --- a/website/docs/cdktf/python/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/cdktf/python/r/timestreaminfluxdb_db_instance.html.markdown @@ -296,6 +296,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_parameter_group_identifier` - (Optional) ID of the DB parameter group assigned to your DB instance. This argument is updatable. If added to an existing Timestream for InfluxDB instance or given a new value, will cause an in-place update to the instance. However, if an instance already has a value for `db_parameter_group_identifier`, removing `db_parameter_group_identifier` will cause the instance to be destroyed and recreated. * `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT3"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. This argument is updatable. For a single instance, after this argument has been updated once, it can only be updated again after 6 hours have passed. * `deployment_type` - (Default `"SINGLE_AZ"`) Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. Valid options are: `"SINGLE_AZ"`, `"WITH_MULTIAZ_STANDBY"`. This argument is updatable. @@ -326,7 +327,7 @@ This resource exports the following attributes in addition to the arguments abov * `availability_zone` - Availability Zone in which the DB instance resides. * `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. * `id` - ID of the Timestream for InfluxDB instance. -* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. This secret will be read by the `aws_timestreaminfluxdb_db_instance` resource in order to support importing: deleting the secret or secret values can cause errors. +* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. * `secondary_availability_zone` - Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -363,4 +364,4 @@ Using `terraform import`, import Timestream for InfluxDB Db Instance using its i % terraform import aws_timestreaminfluxdb_db_instance.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/timestreamquery_scheduled_query.html.markdown b/website/docs/cdktf/python/r/timestreamquery_scheduled_query.html.markdown index ee01e37e61a3..fa5a2a417e0e 100644 --- a/website/docs/cdktf/python/r/timestreamquery_scheduled_query.html.markdown +++ b/website/docs/cdktf/python/r/timestreamquery_scheduled_query.html.markdown @@ -23,74 +23,74 @@ If your infrastructure is already set up—including the source database and tab ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import TimestreamqueryScheduledQuery +from imports.aws.timestreamquery_scheduled_query import TimestreamqueryScheduledQuery class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) TimestreamqueryScheduledQuery(self, "example", - error_report_configuration=[{ - "s3_configuration": [{ - "bucket_name": aws_s3_bucket_example.bucket - } + error_report_configuration=[TimestreamqueryScheduledQueryErrorReportConfiguration( + s3_configuration=[TimestreamqueryScheduledQueryErrorReportConfigurationS3Configuration( + bucket_name=Token.as_string(aws_s3_bucket_example.bucket) + ) ] - } + ) ], - execution_role_arn=aws_iam_role_example.arn, - name=aws_timestreamwrite_table_example.table_name, - notification_configuration=[{ - "sns_configuration": [{ - "topic_arn": aws_sns_topic_example.arn - } + execution_role_arn=Token.as_string(aws_iam_role_example.arn), + name=Token.as_string(aws_timestreamwrite_table_example.table_name), + notification_configuration=[TimestreamqueryScheduledQueryNotificationConfiguration( + sns_configuration=[TimestreamqueryScheduledQueryNotificationConfigurationSnsConfiguration( + topic_arn=Token.as_string(aws_sns_topic_example.arn) + ) ] - } + ) ], query_string="SELECT region, az, hostname, BIN(time, 15s) AS binned_timestamp,\n\tROUND(AVG(cpu_utilization), 2) AS avg_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.9), 2) AS p90_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.95), 2) AS p95_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.99), 2) AS p99_cpu_utilization\nFROM exampledatabase.exampletable\nWHERE measure_name = 'metrics' AND time > ago(2h)\nGROUP BY region, hostname, az, BIN(time, 15s)\nORDER BY binned_timestamp ASC\nLIMIT 5\n\n", - schedule_configuration=[{ - "schedule_expression": "rate(1 hour)" - } + schedule_configuration=[TimestreamqueryScheduledQueryScheduleConfiguration( + schedule_expression="rate(1 hour)" + ) ], - target_configuration=[{ - "timestream_configuration": [{ - "database_name": results.database_name, - "dimension_mapping": [{ - "dimension_value_type": "VARCHAR", - "name": "az" - }, { - "dimension_value_type": "VARCHAR", - "name": "region" - }, { - "dimension_value_type": "VARCHAR", - "name": "hostname" - } + target_configuration=[TimestreamqueryScheduledQueryTargetConfiguration( + timestream_configuration=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfiguration( + database_name=results.database_name, + dimension_mapping=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationDimensionMapping( + dimension_value_type="VARCHAR", + name="az" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationDimensionMapping( + dimension_value_type="VARCHAR", + name="region" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationDimensionMapping( + dimension_value_type="VARCHAR", + name="hostname" + ) ], - "multi_measure_mappings": [{ - "multi_measure_attribute_mapping": [{ - "measure_value_type": "DOUBLE", - "source_column": "avg_cpu_utilization" - }, { - "measure_value_type": "DOUBLE", - "source_column": "p90_cpu_utilization" - }, { - "measure_value_type": "DOUBLE", - "source_column": "p95_cpu_utilization" - }, { - "measure_value_type": "DOUBLE", - "source_column": "p99_cpu_utilization" - } + multi_measure_mappings=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappings( + multi_measure_attribute_mapping=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="avg_cpu_utilization" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="p90_cpu_utilization" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="p95_cpu_utilization" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="p99_cpu_utilization" + ) ], - "target_multi_measure_name": "multi-metrics" - } + target_multi_measure_name="multi-metrics" + ) ], - "table_name": aws_timestreamwrite_table_results.table_name, - "time_column": "binned_timestamp" - } + table_name=Token.as_string(aws_timestreamwrite_table_results.table_name), + time_column="binned_timestamp" + ) ] - } + ) ] ) ``` @@ -252,74 +252,74 @@ This is done with Amazon Timestream Write [WriteRecords](https://docs.aws.amazon ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import TimestreamqueryScheduledQuery +from imports.aws.timestreamquery_scheduled_query import TimestreamqueryScheduledQuery class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) TimestreamqueryScheduledQuery(self, "example", - error_report_configuration=[{ - "s3_configuration": [{ - "bucket_name": aws_s3_bucket_example.bucket - } + error_report_configuration=[TimestreamqueryScheduledQueryErrorReportConfiguration( + s3_configuration=[TimestreamqueryScheduledQueryErrorReportConfigurationS3Configuration( + bucket_name=Token.as_string(aws_s3_bucket_example.bucket) + ) ] - } + ) ], - execution_role_arn=aws_iam_role_example.arn, - name=aws_timestreamwrite_table_example.table_name, - notification_configuration=[{ - "sns_configuration": [{ - "topic_arn": aws_sns_topic_example.arn - } + execution_role_arn=Token.as_string(aws_iam_role_example.arn), + name=Token.as_string(aws_timestreamwrite_table_example.table_name), + notification_configuration=[TimestreamqueryScheduledQueryNotificationConfiguration( + sns_configuration=[TimestreamqueryScheduledQueryNotificationConfigurationSnsConfiguration( + topic_arn=Token.as_string(aws_sns_topic_example.arn) + ) ] - } + ) ], query_string="SELECT region, az, hostname, BIN(time, 15s) AS binned_timestamp,\n\tROUND(AVG(cpu_utilization), 2) AS avg_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.9), 2) AS p90_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.95), 2) AS p95_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.99), 2) AS p99_cpu_utilization\nFROM exampledatabase.exampletable\nWHERE measure_name = 'metrics' AND time > ago(2h)\nGROUP BY region, hostname, az, BIN(time, 15s)\nORDER BY binned_timestamp ASC\nLIMIT 5\n\n", - schedule_configuration=[{ - "schedule_expression": "rate(1 hour)" - } + schedule_configuration=[TimestreamqueryScheduledQueryScheduleConfiguration( + schedule_expression="rate(1 hour)" + ) ], - target_configuration=[{ - "timestream_configuration": [{ - "database_name": results.database_name, - "dimension_mapping": [{ - "dimension_value_type": "VARCHAR", - "name": "az" - }, { - "dimension_value_type": "VARCHAR", - "name": "region" - }, { - "dimension_value_type": "VARCHAR", - "name": "hostname" - } + target_configuration=[TimestreamqueryScheduledQueryTargetConfiguration( + timestream_configuration=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfiguration( + database_name=results.database_name, + dimension_mapping=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationDimensionMapping( + dimension_value_type="VARCHAR", + name="az" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationDimensionMapping( + dimension_value_type="VARCHAR", + name="region" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationDimensionMapping( + dimension_value_type="VARCHAR", + name="hostname" + ) ], - "multi_measure_mappings": [{ - "multi_measure_attribute_mapping": [{ - "measure_value_type": "DOUBLE", - "source_column": "avg_cpu_utilization" - }, { - "measure_value_type": "DOUBLE", - "source_column": "p90_cpu_utilization" - }, { - "measure_value_type": "DOUBLE", - "source_column": "p95_cpu_utilization" - }, { - "measure_value_type": "DOUBLE", - "source_column": "p99_cpu_utilization" - } + multi_measure_mappings=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappings( + multi_measure_attribute_mapping=[TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="avg_cpu_utilization" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="p90_cpu_utilization" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="p95_cpu_utilization" + ), TimestreamqueryScheduledQueryTargetConfigurationTimestreamConfigurationMultiMeasureMappingsMultiMeasureAttributeMapping( + measure_value_type="DOUBLE", + source_column="p99_cpu_utilization" + ) ], - "target_multi_measure_name": "multi-metrics" - } + target_multi_measure_name="multi-metrics" + ) ], - "table_name": aws_timestreamwrite_table_results.table_name, - "time_column": "binned_timestamp" - } + table_name=Token.as_string(aws_timestreamwrite_table_results.table_name), + time_column="binned_timestamp" + ) ] - } + ) ] ) ``` @@ -338,6 +338,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kms_key_id` - (Optional) Amazon KMS key used to encrypt the scheduled query resource, at-rest. If not specified, the scheduled query resource will be encrypted with a Timestream owned Amazon KMS key. To specify a KMS key, use the key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix the name with "alias/". If `error_report_configuration` uses `SSE_KMS` as the encryption type, the same `kms_key_id` is used to encrypt the error report at rest. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -490,7 +491,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import TimestreamqueryScheduledQuery +from imports.aws.timestreamquery_scheduled_query import TimestreamqueryScheduledQuery class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -503,4 +504,4 @@ Using `terraform import`, import Timestream Query Scheduled Query using the `arn % terraform import aws_timestreamquery_scheduled_query.example arn:aws:timestream:us-west-2:012345678901:scheduled-query/tf-acc-test-7774188528604787105-e13659544fe66c8d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/timestreamwrite_database.html.markdown b/website/docs/cdktf/python/r/timestreamwrite_database.html.markdown index a017643d28e0..b71f050bb27c 100644 --- a/website/docs/cdktf/python/r/timestreamwrite_database.html.markdown +++ b/website/docs/cdktf/python/r/timestreamwrite_database.html.markdown @@ -60,7 +60,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `database_name` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 64. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `database_name` - (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 64. * `kms_key_id` - (Optional) The ARN (not Alias ARN) of the KMS key to be used to encrypt the data stored in the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to [AWS managed KMS keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) for more info. * `tags` - (Optional) Map of tags to assign to this resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -99,4 +100,4 @@ Using `terraform import`, import Timestream databases using the `database_name`. % terraform import aws_timestreamwrite_database.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/timestreamwrite_table.html.markdown b/website/docs/cdktf/python/r/timestreamwrite_table.html.markdown index f4a30b62e07f..efd3aae798c7 100644 --- a/website/docs/cdktf/python/r/timestreamwrite_table.html.markdown +++ b/website/docs/cdktf/python/r/timestreamwrite_table.html.markdown @@ -92,7 +92,8 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `database_name` – (Required) The name of the Timestream database. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `database_name` - (Required) The name of the Timestream database. * `magnetic_store_write_properties` - (Optional) Contains properties to set on the table when enabling magnetic store writes. See [Magnetic Store Write Properties](#magnetic-store-write-properties) below for more details. * `retention_properties` - (Optional) The retention duration for the memory store and magnetic store. See [Retention Properties](#retention-properties) below for more details. If not provided, `magnetic_store_retention_period_in_days` default to 73000 and `memory_store_retention_period_in_hours` defaults to 6. * `schema` - (Optional) The schema of the table. See [Schema](#schema) below for more details. @@ -175,4 +176,4 @@ Using `terraform import`, import Timestream tables using the `table_name` and `d % terraform import aws_timestreamwrite_table.example ExampleTable:ExampleDatabase ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transcribe_language_model.html.markdown b/website/docs/cdktf/python/r/transcribe_language_model.html.markdown index bfbf158b9a2e..e532b474c702 100644 --- a/website/docs/cdktf/python/r/transcribe_language_model.html.markdown +++ b/website/docs/cdktf/python/r/transcribe_language_model.html.markdown @@ -95,8 +95,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `base_model_name` - (Required) Name of reference base model. * `input_data_config` - (Required) The input data config for the LanguageModel. See [Input Data Config](#input-data-config) for more details. * `language_code` - (Required) The language code you selected for your language model. Refer to the [supported languages](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) page for accepted codes. @@ -150,4 +151,4 @@ Using `terraform import`, import Transcribe LanguageModel using the `model_name` % terraform import aws_transcribe_language_model.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transcribe_medical_vocabulary.html.markdown b/website/docs/cdktf/python/r/transcribe_medical_vocabulary.html.markdown index ec0f1d94a726..e5cf34fe4516 100644 --- a/website/docs/cdktf/python/r/transcribe_medical_vocabulary.html.markdown +++ b/website/docs/cdktf/python/r/transcribe_medical_vocabulary.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the MedicalVocabulary. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -107,4 +108,4 @@ Using `terraform import`, import Transcribe MedicalVocabulary using the `vocabul % terraform import aws_transcribe_medical_vocabulary.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transcribe_vocabulary.html.markdown b/website/docs/cdktf/python/r/transcribe_vocabulary.html.markdown index 7321d39d1da1..a94284b3a491 100644 --- a/website/docs/cdktf/python/r/transcribe_vocabulary.html.markdown +++ b/website/docs/cdktf/python/r/transcribe_vocabulary.html.markdown @@ -58,11 +58,11 @@ class MyConvertedCode(TerraformStack): The following arguments are required: * `language_code` - (Required) The language code you selected for your vocabulary. -* `vocabulary_file_uri` - (Required) The Amazon S3 location (URI) of the text file that contains your custom vocabulary. * `vocabulary_name` - (Required) The name of the Vocabulary. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `phrases` - (Optional) - A list of terms to include in the vocabulary. Conflicts with `vocabulary_file_uri` * `vocabulary_file_uri` - (Optional) The Amazon S3 location (URI) of the text file that contains your custom vocabulary. Conflicts wth `phrases`. * `tags` - (Optional) A map of tags to assign to the Vocabulary. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -108,4 +108,4 @@ Using `terraform import`, import Transcribe Vocabulary using the `vocabulary_nam % terraform import aws_transcribe_vocabulary.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transcribe_vocabulary_filter.html.markdown b/website/docs/cdktf/python/r/transcribe_vocabulary_filter.html.markdown index bf6bf06b10bd..234d0a557e29 100644 --- a/website/docs/cdktf/python/r/transcribe_vocabulary_filter.html.markdown +++ b/website/docs/cdktf/python/r/transcribe_vocabulary_filter.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vocabulary_filter_file_uri` - (Optional) The Amazon S3 location (URI) of the text file that contains your custom VocabularyFilter. Conflicts with `words` argument. * `tags` - (Optional) A map of tags to assign to the VocabularyFilter. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `words` - (Optional) - A list of terms to include in the vocabulary. Conflicts with `vocabulary_filter_file_uri` argument. @@ -85,4 +86,4 @@ Using `terraform import`, import Transcribe VocabularyFilter using the `vocabula % terraform import aws_transcribe_vocabulary_filter.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_access.html.markdown b/website/docs/cdktf/python/r/transfer_access.html.markdown index f5694822d660..2b2d0cdfe09d 100644 --- a/website/docs/cdktf/python/r/transfer_access.html.markdown +++ b/website/docs/cdktf/python/r/transfer_access.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `external_id` - (Required) The SID of a group in the directory connected to the Transfer Server (e.g., `S-1-1-12-1234567890-123456789-1234567890-1234`) * `server_id` - (Required) The Server ID of the Transfer Server (e.g., `s-12345678`) * `home_directory` - (Optional) The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a `/`. The first item in the path is the name of the home bucket (accessible as `${Transfer:HomeBucket}` in the policy) and the rest is the home directory (accessible as `${Transfer:HomeDirectory}` in the policy). For example, `/example-bucket-1234/username` would set the home bucket to `example-bucket-1234` and the home directory to `username`. @@ -119,4 +120,4 @@ Using `terraform import`, import Transfer Accesses using the `server_id` and `ex % terraform import aws_transfer_access.example s-12345678/S-1-1-12-1234567890-123456789-1234567890-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_agreement.html.markdown b/website/docs/cdktf/python/r/transfer_agreement.html.markdown index 93e2e01c2a69..d161a8100dce 100644 --- a/website/docs/cdktf/python/r/transfer_agreement.html.markdown +++ b/website/docs/cdktf/python/r/transfer_agreement.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_role` - (Required) The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. * `base_directory` - (Required) The landing directory for the files transferred by using the AS2 protocol. * `description` - (Optional) The Optional description of the transdfer. @@ -83,4 +84,4 @@ Using `terraform import`, import Transfer AS2 Agreement using the `server_id/agr % terraform import aws_transfer_agreement.example s-4221a88afd5f4362a/a-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_certificate.html.markdown b/website/docs/cdktf/python/r/transfer_certificate.html.markdown index 0f54594da399..6a36f51ef591 100644 --- a/website/docs/cdktf/python/r/transfer_certificate.html.markdown +++ b/website/docs/cdktf/python/r/transfer_certificate.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate` - (Required) The valid certificate file required for the transfer. * `certificate_chain` - (Optional) The optional list of certificate that make up the chain for the certificate that is being imported. * `description` - (Optional) A short description that helps identify the certificate. @@ -85,4 +86,4 @@ Using `terraform import`, import Transfer AS2 Certificate using the `certificate % terraform import aws_transfer_certificate.example c-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_connector.html.markdown b/website/docs/cdktf/python/r/transfer_connector.html.markdown index aa58953fc05d..84a9a2e19788 100644 --- a/website/docs/cdktf/python/r/transfer_connector.html.markdown +++ b/website/docs/cdktf/python/r/transfer_connector.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_role` - (Required) The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. * `as2_config` - (Optional) Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. * `logging_role` - (Optional) The IAM Role which is required for allowing the connector to turn on CloudWatch logging for Amazon S3 events. @@ -128,4 +129,4 @@ Using `terraform import`, import Transfer AS2 Connector using the `connector_id` % terraform import aws_transfer_connector.example c-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_profile.html.markdown b/website/docs/cdktf/python/r/transfer_profile.html.markdown index 3c603586ef14..556c9f93151d 100644 --- a/website/docs/cdktf/python/r/transfer_profile.html.markdown +++ b/website/docs/cdktf/python/r/transfer_profile.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `as2_id` - (Required) The As2Id is the AS2 name as defined in the RFC 4130. For inbound ttransfers this is the AS2 From Header for the AS2 messages sent from the partner. For Outbound messages this is the AS2 To Header for the AS2 messages sent to the partner. his ID cannot include spaces. * `certificate_ids` - (Optional) The list of certificate Ids from the imported certificate operation. * `profile_type` - (Required) The profile type should be LOCAL or PARTNER. @@ -79,4 +80,4 @@ Using `terraform import`, import Transfer AS2 Profile using the `profile_id`. Fo % terraform import aws_transfer_profile.example p-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_server.html.markdown b/website/docs/cdktf/python/r/transfer_server.html.markdown index c50178e0780b..cec60f0a6998 100644 --- a/website/docs/cdktf/python/r/transfer_server.html.markdown +++ b/website/docs/cdktf/python/r/transfer_server.html.markdown @@ -201,6 +201,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate` - (Optional) The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. This is required when `protocols` is set to `FTPS` * `domain` - (Optional) The domain of the storage system that is used for file transfers. Valid values are: `S3` and `EFS`. The default value is `S3`. * `protocols` - (Optional) Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. This defaults to `SFTP` . The available protocols are: @@ -332,4 +333,4 @@ Using `terraform import`, import Transfer Servers using the server `id`. For exa Certain resource arguments, such as `host_key`, cannot be read via the API and imported into Terraform. Terraform will display a difference for these arguments the first run after import if declared in the Terraform configuration for an imported resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_ssh_key.html.markdown b/website/docs/cdktf/python/r/transfer_ssh_key.html.markdown index 66d38f2560e2..ce701a62e2e5 100644 --- a/website/docs/cdktf/python/r/transfer_ssh_key.html.markdown +++ b/website/docs/cdktf/python/r/transfer_ssh_key.html.markdown @@ -106,6 +106,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `server_id` - (Requirement) The Server ID of the Transfer Server (e.g., `s-12345678`) * `user_name` - (Requirement) The name of the user account that is assigned to one or more servers. * `body` - (Requirement) The public key portion of an SSH key pair. @@ -139,4 +140,4 @@ Using `terraform import`, import Transfer SSH Public Key using the `server_id` a % terraform import aws_transfer_ssh_key.bar s-12345678/test-username/key-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_tag.html.markdown b/website/docs/cdktf/python/r/transfer_tag.html.markdown index e058213b0dae..a15e162eab08 100644 --- a/website/docs/cdktf/python/r/transfer_tag.html.markdown +++ b/website/docs/cdktf/python/r/transfer_tag.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) Amazon Resource Name (ARN) of the Transfer Family resource to tag. * `key` - (Required) Tag name. * `value` - (Required) Tag value. @@ -85,4 +86,4 @@ Using `terraform import`, import `aws_transfer_tag` using the Transfer Family re % terraform import aws_transfer_tag.example arn:aws:transfer:us-east-1:123456789012:server/s-1234567890abcdef0,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_user.html.markdown b/website/docs/cdktf/python/r/transfer_user.html.markdown index 90191629b2b5..1adf2a8ccd7d 100644 --- a/website/docs/cdktf/python/r/transfer_user.html.markdown +++ b/website/docs/cdktf/python/r/transfer_user.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `server_id` - (Required) The Server ID of the Transfer Server (e.g., `s-12345678`) * `user_name` - (Required) The name used for log in to your SFTP server. * `home_directory` - (Optional) The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a `/`. The first item in the path is the name of the home bucket (accessible as `${Transfer:HomeBucket}` in the policy) and the rest is the home directory (accessible as `${Transfer:HomeDirectory}` in the policy). For example, `/example-bucket-1234/username` would set the home bucket to `example-bucket-1234` and the home directory to `username`. @@ -163,4 +164,4 @@ Using `terraform import`, import Transfer Users using the `server_id` and `user_ % terraform import aws_transfer_user.bar s-12345678/test-username ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/transfer_workflow.html.markdown b/website/docs/cdktf/python/r/transfer_workflow.html.markdown index 391d21b5c70b..8c5ad4c35481 100644 --- a/website/docs/cdktf/python/r/transfer_workflow.html.markdown +++ b/website/docs/cdktf/python/r/transfer_workflow.html.markdown @@ -83,6 +83,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A textual description for the workflow. * `on_exception_steps` - (Optional) Specifies the steps (actions) to take if errors are encountered during execution of the workflow. See Workflow Steps below. * `steps` - (Required) Specifies the details for the steps that are in the specified workflow. See Workflow Steps below. @@ -183,4 +184,4 @@ Using `terraform import`, import Transfer Workflows using the `worflow_id`. For % terraform import aws_transfer_workflow.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedaccess_endpoint.html.markdown b/website/docs/cdktf/python/r/verifiedaccess_endpoint.html.markdown index fbd6376c432e..2defd8d2a6b8 100644 --- a/website/docs/cdktf/python/r/verifiedaccess_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/verifiedaccess_endpoint.html.markdown @@ -121,6 +121,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `application_domain` - (Optional) The DNS name for users to reach your application. This parameter is required if the endpoint type is `load-balancer` or `network-interface`. * `description` - (Optional) A description for the Verified Access endpoint. * `domain_certificate_arn` - (Optional) - The ARN of the public TLS/SSL certificate in AWS Certificate Manager to associate with the endpoint. The CN in the certificate must match the DNS name your end users will use to reach your application. This parameter is required if the endpoint type is `load-balancer` or `network-interface`. @@ -173,4 +174,4 @@ Using `terraform import`, import Verified Access Instances using the `id`. For % terraform import aws_verifiedaccess_endpoint.example vae-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedaccess_group.html.markdown b/website/docs/cdktf/python/r/verifiedaccess_group.html.markdown index 50612ee19672..2b1ff404f87f 100644 --- a/website/docs/cdktf/python/r/verifiedaccess_group.html.markdown +++ b/website/docs/cdktf/python/r/verifiedaccess_group.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the verified access group. * `policy_document` - (Optional) The policy document that is associated with this resource. * `sse_configuration` - (Optional) Configuration block to use KMS keys for server-side encryption. @@ -93,4 +94,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedaccess_instance.html.markdown b/website/docs/cdktf/python/r/verifiedaccess_instance.html.markdown index ac6e701e1877..49617530261e 100644 --- a/website/docs/cdktf/python/r/verifiedaccess_instance.html.markdown +++ b/website/docs/cdktf/python/r/verifiedaccess_instance.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode(TerraformStack): The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description for the AWS Verified Access Instance. * `fips_enabled` - (Optional, Forces new resource) Enable or disable support for Federal Information Processing Standards (FIPS) on the AWS Verified Access Instance. * `cidr_endpoints_custom_subdomain` - (Optional) The custom subdomain for the CIDR endpoints. @@ -127,4 +128,4 @@ Using `terraform import`, import Verified Access Instances using the `id`. For % terraform import aws_verifiedaccess_instance.example vai-1234567890abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedaccess_instance_logging_configuration.html.markdown b/website/docs/cdktf/python/r/verifiedaccess_instance_logging_configuration.html.markdown index a1c312a95a1a..1ccfba5527ff 100644 --- a/website/docs/cdktf/python/r/verifiedaccess_instance_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/verifiedaccess_instance_logging_configuration.html.markdown @@ -171,6 +171,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_logs` - (Required) A block that specifies the configuration options for Verified Access instances. [Detailed below](#access_logs). * `verifiedaccess_instance_id` - (Required - Forces New resource) The ID of the Verified Access instance. @@ -236,4 +237,4 @@ Using `terraform import`, import Verified Access Logging Configuration using the % terraform import aws_verifiedaccess_instance_logging_configuration.example vai-1234567890abcdef0 ``` - + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedaccess_instance_trust_provider_attachment.html.markdown b/website/docs/cdktf/python/r/verifiedaccess_instance_trust_provider_attachment.html.markdown index 4b81e97ad698..e90fafee6f7f 100644 --- a/website/docs/cdktf/python/r/verifiedaccess_instance_trust_provider_attachment.html.markdown +++ b/website/docs/cdktf/python/r/verifiedaccess_instance_trust_provider_attachment.html.markdown @@ -51,8 +51,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `verifiedaccess_instance_id` - (Required) The ID of the Verified Access instance to attach the Trust Provider to. * `verifiedaccess_trust_provider_id` - (Required) The ID of the Verified Access trust provider. @@ -87,4 +88,4 @@ Using `terraform import`, import Verified Access Instance Trust Provider Attachm % terraform import aws_verifiedaccess_instance_trust_provider_attachment.example vai-1234567890abcdef0/vatp-8012925589 ``` - + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedaccess_trust_provider.html.markdown b/website/docs/cdktf/python/r/verifiedaccess_trust_provider.html.markdown index 8ddedf49877a..7a36701e5457 100644 --- a/website/docs/cdktf/python/r/verifiedaccess_trust_provider.html.markdown +++ b/website/docs/cdktf/python/r/verifiedaccess_trust_provider.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description for the AWS Verified Access trust provider. * `device_options` - (Optional) A block of options for device identity based trust providers. * `device_trust_provider_type` (Optional) The type of device-based trust provider. @@ -89,4 +90,4 @@ Using `terraform import`, import Transfer Workflows using the `id`. For example % terraform import aws_verifiedaccess_trust_provider.example vatp-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown b/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown index 5f639c8075f7..868489eddb85 100644 --- a/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown +++ b/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown @@ -121,6 +121,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_store_id` - (Required) Specifies the ID of the policy store in which you want to store this identity source. * `configuration`- (Required) Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. See [Configuration](#configuration) below. * `principal_entity_type`- (Optional) Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source. @@ -198,4 +199,4 @@ Using `terraform import`, import Verified Permissions Identity Source using the % terraform import aws_verifiedpermissions_identity_source.example policy-store-id-12345678:identity-source-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedpermissions_policy.html.markdown b/website/docs/cdktf/python/r/verifiedpermissions_policy.html.markdown index 818b771ee54a..aa097be166b5 100644 --- a/website/docs/cdktf/python/r/verifiedpermissions_policy.html.markdown +++ b/website/docs/cdktf/python/r/verifiedpermissions_policy.html.markdown @@ -42,8 +42,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_store_id` - (Required) The Policy Store ID of the policy store. * `definition`- (Required) The definition of the policy. See [Definition](#definition) below. @@ -99,4 +100,4 @@ Using `terraform import`, import Verified Permissions Policy using the `policy_i % terraform import aws_verifiedpermissions_policy.example policy-id-12345678,policy-store-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedpermissions_policy_store.html.markdown b/website/docs/cdktf/python/r/verifiedpermissions_policy_store.html.markdown index a17d493610fd..a0796fe741fb 100644 --- a/website/docs/cdktf/python/r/verifiedpermissions_policy_store.html.markdown +++ b/website/docs/cdktf/python/r/verifiedpermissions_policy_store.html.markdown @@ -45,6 +45,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `deletion_protection` - (Optional) Specifies whether the policy store can be deleted. If enabled, the policy store can't be deleted. Valid Values: `ENABLED`, `DISABLED`. Default value: `DISABLED`. * `description` - (Optional) A description of the Policy Store. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -81,4 +83,4 @@ Using `terraform import`, import Verified Permissions Policy Store using the `po % terraform import aws_verifiedpermissions_policy_store.example DxQg2j8xvXJQ1tQCYNWj9T ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedpermissions_policy_template.html.markdown b/website/docs/cdktf/python/r/verifiedpermissions_policy_template.html.markdown index 4f70103610dd..3cdb22347606 100644 --- a/website/docs/cdktf/python/r/verifiedpermissions_policy_template.html.markdown +++ b/website/docs/cdktf/python/r/verifiedpermissions_policy_template.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Provides a description for the policy template. ## Attribute Reference @@ -76,4 +77,4 @@ Using `terraform import`, import Verified Permissions Policy Store using the `po % terraform import aws_verifiedpermissions_policy_template.example policyStoreId:policyTemplateId ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedpermissions_schema.html.markdown b/website/docs/cdktf/python/r/verifiedpermissions_schema.html.markdown index 619ee0a779f5..1a18b7c7126c 100644 --- a/website/docs/cdktf/python/r/verifiedpermissions_schema.html.markdown +++ b/website/docs/cdktf/python/r/verifiedpermissions_schema.html.markdown @@ -29,15 +29,15 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) VerifiedpermissionsSchema(self, "example", - definition=[{ - "value": Token.as_string( + definition=[VerifiedpermissionsSchemaDefinition( + value=Token.as_string( Fn.jsonencode({ "Namespace": { "actions": {}, "entity_types": {} } })) - } + ) ], policy_store_id=Token.as_string(aws_verifiedpermissions_policy_store_example.policy_store_id) ) @@ -45,8 +45,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_store_id` - (Required) The ID of the Policy Store. * `definition` - (Required) The definition of the schema. * `value` - (Required) A JSON string representation of the schema. @@ -82,4 +83,4 @@ Using `terraform import`, import Verified Permissions Policy Store Schema using % terraform import aws_verifiedpermissions_schema.example DxQg2j8xvXJQ1tQCYNWj9T ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/volume_attachment.html.markdown b/website/docs/cdktf/python/r/volume_attachment.html.markdown index e4415a7865a3..3b248d737c57 100644 --- a/website/docs/cdktf/python/r/volume_attachment.html.markdown +++ b/website/docs/cdktf/python/r/volume_attachment.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `device_name` - (Required) The device name to expose to the instance (for example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances][1] and [Device Naming on Windows Instances][2] for more information. * `instance_id` - (Required) ID of the Instance to attach to @@ -107,4 +108,4 @@ Using `terraform import`, import EBS Volume Attachments using `DEVICE_NAME:VOLUM [2]: https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names [3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc.html.markdown b/website/docs/cdktf/python/r/vpc.html.markdown index 9039000889bc..c32007558b57 100644 --- a/website/docs/cdktf/python/r/vpc.html.markdown +++ b/website/docs/cdktf/python/r/vpc.html.markdown @@ -77,14 +77,14 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") test = VpcIpam(self, "test", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) aws_vpc_ipam_pool_test = VpcIpamPool(self, "test_2", address_family="ipv4", ipam_scope_id=test.private_default_scope_id, - locale=Token.as_string(current.name) + locale=Token.as_string(current.region) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_ipam_pool_test.override_logical_id("test") @@ -107,6 +107,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_block` - (Optional) The IPv4 CIDR block for the VPC. CIDR can be explicitly set or it can be derived from IPAM using `ipv4_netmask_length`. * `instance_tenancy` - (Optional) A tenancy option for instances launched into the VPC. Default is `default`, which ensures that EC2 instances launched in this VPC use the EC2 instance tenancy attribute specified when the EC2 instance is launched. The only other option is `dedicated`, which ensures that EC2 instances launched in this VPC are run on dedicated tenancy instances regardless of the tenancy attribute specified at launch. This has a dedicated per region fee of $2 per hour, plus an hourly per instance usage fee. * `ipv4_ipam_pool_id` - (Optional) The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across AWS Regions and accounts. Using IPAM you can monitor IP address usage throughout your AWS Organization. @@ -168,4 +169,4 @@ Using `terraform import`, import VPCs using the VPC `id`. For example: % terraform import aws_vpc.test_vpc vpc-a01106c2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_block_public_access_exclusion.html.markdown b/website/docs/cdktf/python/r/vpc_block_public_access_exclusion.html.markdown index 747143f85bc7..8da1547957c5 100644 --- a/website/docs/cdktf/python/r/vpc_block_public_access_exclusion.html.markdown +++ b/website/docs/cdktf/python/r/vpc_block_public_access_exclusion.html.markdown @@ -24,8 +24,8 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcBlockPublicAccessExclusion from imports.aws.vpc import Vpc +from imports.aws.vpc_block_public_access_exclusion import VpcBlockPublicAccessExclusion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -46,14 +46,14 @@ class MyConvertedCode(TerraformStack): ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcBlockPublicAccessExclusion from imports.aws.subnet import Subnet from imports.aws.vpc import Vpc +from imports.aws.vpc_block_public_access_exclusion import VpcBlockPublicAccessExclusion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -69,7 +69,7 @@ class MyConvertedCode(TerraformStack): aws_vpc_block_public_access_exclusion_test = VpcBlockPublicAccessExclusion(self, "test_2", internet_gateway_exclusion_mode="allow-egress", - subnet_id=aws_subnet_test.id + subnet_id=Token.as_string(aws_subnet_test.id) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_block_public_access_exclusion_test.override_logical_id("test") @@ -83,6 +83,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Optional) Id of the VPC to which this exclusion applies. Either this or the subnet_id needs to be provided. * `subnet_id` - (Optional) Id of the subnet to which this exclusion applies. Either this or the vpc_id needs to be provided. * `tags` - (Optional) A map of tags to assign to the exclusion. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -115,7 +116,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcBlockPublicAccessExclusion +from imports.aws.vpc_block_public_access_exclusion import VpcBlockPublicAccessExclusion class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -128,4 +129,4 @@ Using `terraform import`, import EC2 (Elastic Compute Cloud) VPC Block Public Ac % terraform import aws_vpc_block_public_access_exclusion.example vpcbpa-exclude-1234abcd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_block_public_access_options.html.markdown b/website/docs/cdktf/python/r/vpc_block_public_access_options.html.markdown index 6da3706e92a9..899ace4e153b 100644 --- a/website/docs/cdktf/python/r/vpc_block_public_access_options.html.markdown +++ b/website/docs/cdktf/python/r/vpc_block_public_access_options.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcBlockPublicAccessOptions +from imports.aws.vpc_block_public_access_options import VpcBlockPublicAccessOptions class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -35,8 +35,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `internet_gateway_block_mode` - (Required) Block mode. Needs to be one of `block-bidirectional`, `block-ingress`, `off`. If this resource is deleted, then this value will be set to `off` in the AWS account and region. ## Attribute Reference @@ -66,7 +67,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcBlockPublicAccessOptions +from imports.aws.vpc_block_public_access_options import VpcBlockPublicAccessOptions class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -79,4 +80,4 @@ Using `terraform import`, import VPC Block Public Access Options using the `aws_ % terraform import aws_vpc_block_public_access_options.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_dhcp_options.html.markdown b/website/docs/cdktf/python/r/vpc_dhcp_options.html.markdown index 2ce8b3c111ac..3805e8449149 100644 --- a/website/docs/cdktf/python/r/vpc_dhcp_options.html.markdown +++ b/website/docs/cdktf/python/r/vpc_dhcp_options.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain_name` - (Optional) the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file. * `domain_name_servers` - (Optional) List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`. * `ipv6_address_preferred_lease_time` - (Optional) How frequently, in seconds, a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed. @@ -117,4 +118,4 @@ Using `terraform import`, import VPC DHCP Options using the DHCP Options `id`. F % terraform import aws_vpc_dhcp_options.my_options dopt-d9070ebb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_dhcp_options_association.html.markdown b/website/docs/cdktf/python/r/vpc_dhcp_options_association.html.markdown index c4892e12bbb5..db18115db32b 100644 --- a/website/docs/cdktf/python/r/vpc_dhcp_options_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_dhcp_options_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The ID of the VPC to which we would like to associate a DHCP Options Set. * `dhcp_options_id` - (Required) The ID of the DHCP Options Set to associate to the VPC. @@ -75,4 +76,4 @@ Using `terraform import`, import DHCP associations using the VPC ID associated w % terraform import aws_vpc_dhcp_options_association.imported vpc-0f001273ec18911b1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint.html.markdown index d0c5e226e6f4..ccac71b5800c 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint.html.markdown @@ -243,6 +243,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The ID of the VPC in which the endpoint will be used. * `auto_accept` - (Optional) Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account). * `policy` - (Optional) A policy to attach to the endpoint that controls access to the service. This is a JSON formatted string. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -271,7 +272,7 @@ If no security groups are specified, the VPC's [default security group](https:// * `ipv4` - (Optional) The IPv4 address to assign to the endpoint network interface in the subnet. You must provide an IPv4 address if the VPC endpoint supports IPv4. * `ipv6` - (Optional) The IPv6 address to assign to the endpoint network interface in the subnet. You must provide an IPv6 address if the VPC endpoint supports IPv6. -* `subnet` - (Optional) The ID of the subnet. Must have a corresponding subnet in the `subnet_ids` argument. +* `subnet_id` - (Optional) The ID of the subnet. Must have a corresponding subnet in the `subnet_ids` argument. ## Timeouts @@ -303,6 +304,32 @@ DNS blocks (for `dns_entry`) support the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_endpoint.example + identity = { + id = "vpce-3ecf2a57" + } +} + +resource "aws_vpc_endpoint" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the VPC endpoint. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC Endpoints using the VPC endpoint `id`. For example: ```python @@ -317,13 +344,13 @@ from imports.aws.vpc_endpoint import VpcEndpoint class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - VpcEndpoint.generate_config_for_import(self, "endpoint1", "vpce-3ecf2a57") + VpcEndpoint.generate_config_for_import(self, "example", "vpce-3ecf2a57") ``` Using `terraform import`, import VPC Endpoints using the VPC endpoint `id`. For example: ```console -% terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 +% terraform import aws_vpc_endpoint.example vpce-3ecf2a57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_connection_accepter.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_connection_accepter.html.markdown index 368feae950a2..5392b6f5b695 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_connection_accepter.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_connection_accepter.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_id` - (Required) AWS VPC Endpoint ID. * `vpc_endpoint_service_id` - (Required) AWS VPC Endpoint Service ID. @@ -92,4 +93,4 @@ Using `terraform import`, import VPC Endpoint Services using ID of the connectio % terraform import aws_vpc_endpoint_connection_accepter.foo vpce-svc-0f97a19d3fa8220bc_vpce-010601a6db371e263 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_connection_notification.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_connection_notification.html.markdown index 3745cd9d5ec5..63e77f93b96b 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_connection_notification.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_connection_notification.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_service_id` - (Optional) The ID of the VPC Endpoint Service to receive notifications for. * `vpc_endpoint_id` - (Optional) The ID of the VPC Endpoint to receive notifications for. * `connection_notification_arn` - (Required) The ARN of the SNS topic for the notifications. @@ -107,4 +108,4 @@ Using `terraform import`, import VPC Endpoint connection notifications using the % terraform import aws_vpc_endpoint_connection_notification.foo vpce-nfn-09e6ed3b4efba2263 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_policy.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_policy.html.markdown index c307dd5c70d3..d1993f6a4a8f 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_policy.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_policy.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_id` - (Required) The VPC Endpoint ID. * `policy` - (Optional) A policy to attach to the endpoint that controls access to the service. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -102,4 +103,4 @@ Using `terraform import`, import VPC Endpoint Policies using the `id`. For examp % terraform import aws_vpc_endpoint_policy.example vpce-3ecf2a57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_private_dns.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_private_dns.html.markdown index 0198a1c9ed5b..f8b176cee954 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_private_dns.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_private_dns.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `private_dns_enabled` - (Required) Indicates whether a private hosted zone is associated with the VPC. Only applicable for `Interface` endpoints. * `vpc_endpoint_id` - (Required) VPC endpoint identifier. @@ -72,4 +73,4 @@ Using `terraform import`, import a VPC (Virtual Private Cloud) Endpoint Private % terraform import aws_vpc_endpoint_private_dns.example vpce-abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_route_table_association.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_route_table_association.html.markdown index 954572009b36..478215e2744c 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_route_table_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_route_table_association.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `route_table_id` - (Required) Identifier of the EC2 Route Table to be associated with the VPC Endpoint. * `vpc_endpoint_id` - (Required) Identifier of the VPC Endpoint with which the EC2 Route Table will be associated. @@ -70,4 +71,4 @@ Using `terraform import`, import VPC Endpoint Route Table Associations using `vp % terraform import aws_vpc_endpoint_route_table_association.example vpce-aaaaaaaa/rtb-bbbbbbbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_security_group_association.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_security_group_association.html.markdown index b65d8f13b10d..037aaf76e593 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_security_group_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_security_group_association.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_group_id` - (Required) The ID of the security group to be associated with the VPC endpoint. * `vpc_endpoint_id` - (Required) The ID of the VPC endpoint with which the security group will be associated. * `replace_default_association` - (Optional) Whether this association should replace the association with the VPC's default security group that is created when no security groups are specified during VPC endpoint creation. At most 1 association per-VPC endpoint should be configured with `replace_default_association = true`. `false` should be used when importing resources. @@ -79,4 +80,4 @@ Using `terraform import`, import VPC Endpoint Security Group Associations using % terraform import aws_vpc_endpoint_security_group_association.example vpce-aaaaaaaa/sg-bbbbbbbbbbbbbbbbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_service.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_service.html.markdown index eef130bd24a8..6713e800ecd4 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_service.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_service.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptance_required` - (Required) Whether or not VPC endpoint connection requests to the service must be accepted by the service owner - `true` or `false`. * `allowed_principals` - (Optional) The ARNs of one or more principals allowed to discover the endpoint service. * `gateway_load_balancer_arns` - (Optional) Amazon Resource Names (ARNs) of one or more Gateway Load Balancers for the endpoint service. @@ -118,4 +119,4 @@ Using `terraform import`, import VPC Endpoint Services using the VPC endpoint se % terraform import aws_vpc_endpoint_service.foo vpce-svc-0f97a19d3fa8220bc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_service_allowed_principal.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_service_allowed_principal.html.markdown index 67ae977ebbef..1a15f4f2866e 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_service_allowed_principal.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_service_allowed_principal.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_service_id` - (Required) The ID of the VPC endpoint service to allow permission. * `principal_arn` - (Required) The ARN of the principal to allow permissions. @@ -55,4 +56,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the association. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_service_private_dns_verification.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_service_private_dns_verification.html.markdown index a63467298a59..08b20768a823 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_service_private_dns_verification.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_service_private_dns_verification.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `wait_for_verification` - (Optional) Whether to wait until the endpoint service returns a `Verified` status for the configured private DNS name. ## Attribute Reference @@ -62,4 +63,4 @@ This resource exports no additional attributes. You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint_subnet_association.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint_subnet_association.html.markdown index 4c527d37fdf0..a44d50b03e4f 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint_subnet_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint_subnet_association.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_endpoint_id` - (Required) The ID of the VPC endpoint with which the subnet will be associated. * `subnet_id` - (Required) The ID of the subnet to be associated with the VPC endpoint. @@ -85,4 +86,4 @@ Using `terraform import`, import VPC Endpoint Subnet Associations using `vpc_end % terraform import aws_vpc_endpoint_subnet_association.example vpce-aaaaaaaa/subnet-bbbbbbbbbbbbbbbbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam.html.markdown b/website/docs/cdktf/python/r/vpc_ipam.html.markdown index 419e7b09aa16..00fd402e2f96 100644 --- a/website/docs/cdktf/python/r/vpc_ipam.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode(TerraformStack): VpcIpam(self, "main", description="My IPAM", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ], tags={ @@ -65,7 +65,7 @@ class MyConvertedCode(TerraformStack): ) current = DataAwsRegion(self, "current") all_ipam_regions = Fn.distinct( - Token.as_any(Fn.concat([[current.name], ipam_regions.value]))) + Token.as_any(Fn.concat([[current.region], ipam_regions.value]))) # In most cases loops should be handled in the programming language context and # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source @@ -84,9 +84,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cascade` - (Optional) Enables you to quickly delete an IPAM, private scopes, pools in private scopes, and any allocations in the pools in private scopes. * `description` - (Optional) A description for the IPAM. * `enable_private_gua` - (Optional) Enable this option to use your own GUA ranges as private IPv6 addresses. Default: `false`. +* `metered_account` - (Optional) AWS account that is charged for active IP addresses managed in IPAM. Valid values are `ipam-owner` (default) and `resource-owner`. * `operating_regions` - (Required) Determines which locales can be chosen when you create pools. Locale is the Region where you want to make an IPAM pool available for allocations. You can only create pools with locales that match the operating Regions of the IPAM. You can only create VPCs from a pool whose locale matches the VPC's Region. You specify a region using the [region_name](#operating_regions) parameter. You **must** set your provider block region as an operating_region. * `tier` - (Optional) specifies the IPAM tier. Valid options include `free` and `advanced`. Default is `advanced`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -134,4 +136,4 @@ Using `terraform import`, import IPAMs using the IPAM `id`. For example: % terraform import aws_vpc_ipam.example ipam-0178368ad2146a492 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_pool.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_pool.html.markdown index fc158d2bb4cb..492b4db5511b 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_pool.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_pool.html.markdown @@ -33,14 +33,14 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) aws_vpc_ipam_pool_example = VpcIpamPool(self, "example_2", address_family="ipv4", ipam_scope_id=example.private_default_scope_id, - locale=Token.as_string(current.name) + locale=Token.as_string(current.region) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_ipam_pool_example.override_logical_id("example") @@ -66,7 +66,7 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) @@ -81,7 +81,7 @@ class MyConvertedCode(TerraformStack): child = VpcIpamPool(self, "child", address_family="ipv4", ipam_scope_id=example.private_default_scope_id, - locale=Token.as_string(current.name), + locale=Token.as_string(current.region), source_ipam_pool_id=parent.id ) VpcIpamPoolCidr(self, "child_test", @@ -94,6 +94,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address_family` - (Required) The IP protocol assigned to this pool. You must choose either IPv4 or IPv6 protocol for a pool. * `allocation_default_netmask_length` - (Optional) A default netmask length for allocations added to this pool. If, for example, the CIDR assigned to this pool is 10.0.0.0/8 and you enter 16 here, new allocations will default to 10.0.0.0/16 (unless you provide a different netmask value when you create the new allocation). * `allocation_max_netmask_length` - (Optional) The maximum netmask length that will be required for CIDR allocations in this pool. @@ -145,4 +146,4 @@ Using `terraform import`, import IPAMs using the IPAM pool `id`. For example: % terraform import aws_vpc_ipam_pool.example ipam-pool-0958f95207d978e1e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_pool_cidr.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_pool_cidr.html.markdown index 727d1c93a566..b26c90d34d4c 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_pool_cidr.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_pool_cidr.html.markdown @@ -39,14 +39,14 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) aws_vpc_ipam_pool_example = VpcIpamPool(self, "example_2", address_family="ipv4", ipam_scope_id=example.private_default_scope_id, - locale=Token.as_string(current.name) + locale=Token.as_string(current.region) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_ipam_pool_example.override_logical_id("example") @@ -78,7 +78,7 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) @@ -103,6 +103,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr` - (Optional) The CIDR you want to assign to the pool. Conflicts with `netmask_length`. * `cidr_authorization_context` - (Optional) A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. This is not stored in the state file. See [cidr_authorization_context](#cidr_authorization_context) for more information. * `ipam_pool_id` - (Required) The ID of the pool to which you want to assign a CIDR. @@ -149,4 +150,4 @@ Using `terraform import`, import IPAMs using the `_`. For ex % terraform import aws_vpc_ipam_pool_cidr.example 172.20.0.0/24_ipam-pool-0e634f5a1517cccdc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_pool_cidr_allocation.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_pool_cidr_allocation.html.markdown index f5d60b72f802..30adb5a40a43 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_pool_cidr_allocation.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_pool_cidr_allocation.html.markdown @@ -35,14 +35,14 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) aws_vpc_ipam_pool_example = VpcIpamPool(self, "example_2", address_family="ipv4", ipam_scope_id=example.private_default_scope_id, - locale=Token.as_string(current.name) + locale=Token.as_string(current.region) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_ipam_pool_example.override_logical_id("example") @@ -82,14 +82,14 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) aws_vpc_ipam_pool_example = VpcIpamPool(self, "example_2", address_family="ipv4", ipam_scope_id=example.private_default_scope_id, - locale=Token.as_string(current.name) + locale=Token.as_string(current.region) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_ipam_pool_example.override_logical_id("example") @@ -113,6 +113,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr` - (Optional, Forces new resource) The CIDR you want to assign to the pool. * `description` - (Optional, Forces new resource) The description for the allocation. * `disallowed_cidrs` - (Optional, Forces new resource) Exclude a particular CIDR range from being returned by the pool. @@ -153,4 +154,4 @@ Using `terraform import`, import IPAM allocations using the allocation `id` and % terraform import aws_vpc_ipam_pool_cidr_allocation.example ipam-pool-alloc-0dc6d196509c049ba8b549ff99f639736_ipam-pool-07cfb559e0921fcbe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_preview_next_cidr.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_preview_next_cidr.html.markdown index f17617595c98..df49970c71af 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_preview_next_cidr.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_preview_next_cidr.html.markdown @@ -35,14 +35,14 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) aws_vpc_ipam_pool_example = VpcIpamPool(self, "example_2", address_family="ipv4", ipam_scope_id=example.private_default_scope_id, - locale=Token.as_string(current.name) + locale=Token.as_string(current.region) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_ipam_pool_example.override_logical_id("example") @@ -66,6 +66,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disallowed_cidrs` - (Optional) Exclude a particular CIDR range from being returned by the pool. * `ipam_pool_id` - (Required) The ID of the pool to which you want to assign a CIDR. * `netmask_length` - (Optional) The netmask length of the CIDR you would like to preview from the IPAM pool. @@ -77,4 +78,4 @@ This resource exports the following attributes in addition to the arguments abov * `cidr` - The previewed CIDR from the pool. * `id` - The ID of the preview. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_resource_discovery.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_resource_discovery.html.markdown index a64ee5041907..53864b273937 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_resource_discovery.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_resource_discovery.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode(TerraformStack): VpcIpamResourceDiscovery(self, "main", description="My IPAM Resource Discovery", operating_regions=[VpcIpamResourceDiscoveryOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ], tags={ @@ -46,6 +46,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description for the IPAM Resource Discovery. * `operating_regions` - (Required) Determines which regions the Resource Discovery will enable IPAM features for usage and monitoring. Locale is the Region where you want to make an IPAM pool available for allocations. You can only create pools with locales that match the operating Regions of the IPAM Resource Discovery. You can only create VPCs from a pool whose locale matches the VPC's Region. You specify a region using the [region_name](#operating_regions) parameter. **You must set your provider block region as an operating_region.** * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -90,4 +91,4 @@ Using `terraform import`, import IPAMs using the IPAM resource discovery `id`. F % terraform import aws_vpc_ipam_resource_discovery.example ipam-res-disco-0178368ad2146a492 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_resource_discovery_association.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_resource_discovery_association.html.markdown index 41504a91f2c4..60b5eff869ff 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_resource_discovery_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_resource_discovery_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipam_id` - (Required) The ID of the IPAM to associate. * `ipam_resource_discovery_id` - (Required) The ID of the Resource Discovery to associate. * `tags` - (Optional) A map of tags to add to the IPAM resource discovery association resource. @@ -85,4 +86,4 @@ Using `terraform import`, import IPAMs using the IPAM resource discovery associa % terraform import aws_vpc_ipam_resource_discovery_association.example ipam-res-disco-assoc-0178368ad2146a492 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipam_scope.html.markdown b/website/docs/cdktf/python/r/vpc_ipam_scope.html.markdown index cee84178e91b..fba2a4a9b2fd 100644 --- a/website/docs/cdktf/python/r/vpc_ipam_scope.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipam_scope.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") example = VpcIpam(self, "example", operating_regions=[VpcIpamOperatingRegions( - region_name=Token.as_string(current.name) + region_name=Token.as_string(current.region) ) ] ) @@ -49,6 +49,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipam_id` - The ID of the IPAM for which you're creating this scope. * `description` - (Optional) A description for the scope you're creating. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -89,4 +90,4 @@ Using `terraform import`, import IPAMs using the `scope_id`. For example: % terraform import aws_vpc_ipam_scope.example ipam-scope-0513c69f283d11dfb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipv4_cidr_block_association.html.markdown b/website/docs/cdktf/python/r/vpc_ipv4_cidr_block_association.html.markdown index c90d90bc2720..f861ad79fe70 100644 --- a/website/docs/cdktf/python/r/vpc_ipv4_cidr_block_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipv4_cidr_block_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_block` - (Optional) The IPv4 CIDR block for the VPC. CIDR can be explicitly set or it can be derived from IPAM using `ipv4_netmask_length`. * `ipv4_ipam_pool_id` - (Optional) The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across AWS Regions and accounts. Using IPAM you can monitor IP address usage throughout your AWS Organization. * `ipv4_netmask_length` - (Optional) The netmask length of the IPv4 CIDR you want to allocate to this VPC. Requires specifying a `ipv4_ipam_pool_id`. @@ -132,4 +133,4 @@ or % terraform import aws_vpc_ipv4_cidr_block_association.example vpc-cidr-assoc-021e8461d70ed08be,ipam-pool-0a07c432810393463,28 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_ipv6_cidr_block_association.html.markdown b/website/docs/cdktf/python/r/vpc_ipv6_cidr_block_association.html.markdown index 0b9897557e80..4f6f1439e363 100644 --- a/website/docs/cdktf/python/r/vpc_ipv6_cidr_block_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_ipv6_cidr_block_association.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `assign_generated_ipv6_cidr_block` - (Optional) Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IPv6 addresses, or the size of the CIDR block. Default is `false`. Conflicts with `ipv6_ipam_pool_id`, `ipv6_pool`, `ipv6_cidr_block` and `ipv6_netmask_length`. * `ipv6_cidr_block` - (Optional) The IPv6 CIDR block for the VPC. CIDR can be explicitly set or it can be derived from IPAM using `ipv6_netmask_length`. This parameter is required if `ipv6_netmask_length` is not set and the IPAM pool does not have `allocation_default_netmask` set. Conflicts with `assign_generated_ipv6_cidr_block`. * `ipv6_ipam_pool_id` - (Optional) The ID of an IPv6 IPAM pool you want to use for allocating this VPC's CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across AWS Regions and accounts. Conflict with `assign_generated_ipv6_cidr_block` and `ipv6_pool`. @@ -137,4 +138,4 @@ or % terraform import aws_vpc_ipv6_cidr_block_association.example vpc-cidr-assoc-0754129087e149dcd,ipam-pool-0611d1d6bbc05ce60,56 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_network_performance_metric_subscription.html.markdown b/website/docs/cdktf/python/r/vpc_network_performance_metric_subscription.html.markdown index 6b670385da9e..b66797841e5f 100644 --- a/website/docs/cdktf/python/r/vpc_network_performance_metric_subscription.html.markdown +++ b/website/docs/cdktf/python/r/vpc_network_performance_metric_subscription.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination` - (Required) The target Region or Availability Zone that the metric subscription is enabled for. For example, `eu-west-1`. * `metric` - (Optional) The metric used for the enabled subscription. Valid values: `aggregate-latency`. Default: `aggregate-latency`. * `source` - (Required) The source Region or Availability Zone that the metric subscription is enabled for. For example, `us-east-1`. @@ -47,4 +48,4 @@ This resource exports the following attributes in addition to the arguments abov * `period` - The data aggregation time for the subscription. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown b/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown index ba7a8a81df62..9670276f6e31 100644 --- a/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown +++ b/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown @@ -150,6 +150,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `peer_owner_id` - (Optional) The AWS account ID of the target peer VPC. Defaults to the account ID the [AWS provider][1] is currently connected to, so must be managed if connecting cross-account. * `peer_vpc_id` - (Required) The ID of the target VPC with which you are creating the VPC Peering Connection. @@ -220,4 +221,4 @@ Using `terraform import`, import VPC Peering resources using the VPC peering `id [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_peering_connection_accepter.html.markdown b/website/docs/cdktf/python/r/vpc_peering_connection_accepter.html.markdown index 5c6853b9fa66..3ec8d278d17e 100644 --- a/website/docs/cdktf/python/r/vpc_peering_connection_accepter.html.markdown +++ b/website/docs/cdktf/python/r/vpc_peering_connection_accepter.html.markdown @@ -21,6 +21,8 @@ connection into management. ## Example Usage +### Cross-Account Peering Or Cross-Region Peering Terraform AWS Provider v5 (and below) + ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct @@ -81,10 +83,62 @@ class MyConvertedCode(TerraformStack): aws_vpc_peering_connection_accepter_peer.override_logical_id("peer") ``` +### Cross-Region Peering (Same Account) Terraform AWS Provider v6 (and above) + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.provider import AwsProvider +from imports.aws.vpc import Vpc +from imports.aws.vpc_peering_connection import VpcPeeringConnection +from imports.aws.vpc_peering_connection_accepter import VpcPeeringConnectionAccepterA +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AwsProvider(self, "aws", + region="us-east-1" + ) + main = Vpc(self, "main", + cidr_block="10.0.0.0/16" + ) + peer = Vpc(self, "peer", + cidr_block="10.1.0.0/16", + region="us-west-2" + ) + aws_vpc_peering_connection_peer = VpcPeeringConnection(self, "peer_3", + auto_accept=False, + peer_region="us-west-2", + peer_vpc_id=peer.id, + tags={ + "Side": "Requester" + }, + vpc_id=main.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_peering_connection_peer.override_logical_id("peer") + aws_vpc_peering_connection_accepter_peer = + VpcPeeringConnectionAccepterA(self, "peer_4", + auto_accept=True, + region="us-west-2", + tags={ + "Side": "Accepter" + }, + vpc_peering_connection_id=Token.as_string(aws_vpc_peering_connection_peer.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_vpc_peering_connection_accepter_peer.override_logical_id("peer") +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_peering_connection_id` - (Required) The VPC Peering Connection ID to manage. * `auto_accept` - (Optional) Whether or not to accept the peering request. Defaults to `false`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -166,4 +220,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_peering_connection_options.html.markdown b/website/docs/cdktf/python/r/vpc_peering_connection_options.html.markdown index 85d583808019..2d629399bbc0 100644 --- a/website/docs/cdktf/python/r/vpc_peering_connection_options.html.markdown +++ b/website/docs/cdktf/python/r/vpc_peering_connection_options.html.markdown @@ -147,6 +147,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_peering_connection_id` - (Required) The ID of the requester VPC peering connection. * `accepter` (Optional) - An optional configuration block that allows for [VPC Peering Connection](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options to be set for the VPC that accepts the peering connection (a maximum of one). * `requester` (Optional) - A optional configuration block that allows for [VPC Peering Connection](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options to be set for the VPC that requests the peering connection (a maximum of one). @@ -188,4 +189,4 @@ Using `terraform import`, import VPC Peering Connection Options using the VPC pe % terraform import aws_vpc_peering_connection_options.foo pcx-111aaa111 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_route_server.html.markdown b/website/docs/cdktf/python/r/vpc_route_server.html.markdown index 0cd4bd6b6987..69692b3a5317 100644 --- a/website/docs/cdktf/python/r/vpc_route_server.html.markdown +++ b/website/docs/cdktf/python/r/vpc_route_server.html.markdown @@ -23,7 +23,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServer +from imports.aws.vpc_route_server import VpcRouteServer class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -45,7 +45,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServer +from imports.aws.vpc_route_server import VpcRouteServer class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -70,6 +70,7 @@ The following arguments are optional: * `persist_routes` - (Optional) Indicates whether routes should be persisted after all BGP sessions are terminated. Valid values are `enable`, `disable`, `reset` * `persist_routes_duration` - (Optional) The number of minutes a route server will wait after BGP is re-established to unpersist the routes in the FIB and RIB. Value must be in the range of 1-5. Required if `persist_routes` is enabled. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sns_notifications_enabled` - (Optional) Indicates whether SNS notifications should be enabled for route server events. Enabling SNS notifications persists BGP status changes to an SNS topic provisioned by AWS`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -102,7 +103,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServer +from imports.aws.vpc_route_server import VpcRouteServer class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -115,4 +116,4 @@ Using `terraform import`, import VPC (Virtual Private Cloud) Route Server using % terraform import aws_vpc_route_server.example rs-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_route_server_endpoint.html.markdown b/website/docs/cdktf/python/r/vpc_route_server_endpoint.html.markdown index 2697cbbed9ec..126c3b9ffbb5 100644 --- a/website/docs/cdktf/python/r/vpc_route_server_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/vpc_route_server_endpoint.html.markdown @@ -23,7 +23,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerEndpoint +from imports.aws.vpc_route_server_endpoint import VpcRouteServerEndpoint class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -77,7 +78,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerEndpoint +from imports.aws.vpc_route_server_endpoint import VpcRouteServerEndpoint class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -90,4 +91,4 @@ Using `terraform import`, import VPC (Virtual Private Cloud) Route Server Endpoi % terraform import aws_vpc_route_server_endpoint.example rse-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_route_server_peer.html.markdown b/website/docs/cdktf/python/r/vpc_route_server_peer.html.markdown index ae6f39a00f07..021104803280 100644 --- a/website/docs/cdktf/python/r/vpc_route_server_peer.html.markdown +++ b/website/docs/cdktf/python/r/vpc_route_server_peer.html.markdown @@ -23,14 +23,14 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerPeer +from imports.aws.vpc_route_server_peer import VpcRouteServerPeer class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) VpcRouteServerPeer(self, "test", - bgp_options=[{ - "peer_asn": 65200 - } + bgp_options=[VpcRouteServerPeerBgpOptions( + peer_asn=65200 + ) ], peer_address="10.0.1.250", route_server_endpoint_id=example.route_server_endpoint_id, @@ -45,12 +45,16 @@ class MyConvertedCode(TerraformStack): ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServer, VpcRouteServerAssociation, VpcRouteServerEndpoint, VpcRouteServerPeer, VpcRouteServerPropagation +from imports.aws. import VpcRouteServerAssociation +from imports.aws.vpc_route_server import VpcRouteServer +from imports.aws.vpc_route_server_endpoint import VpcRouteServerEndpoint +from imports.aws.vpc_route_server_peer import VpcRouteServerPeer +from imports.aws.vpc_route_server_propagation import VpcRouteServerPropagation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -69,7 +73,7 @@ class MyConvertedCode(TerraformStack): aws_vpc_route_server_endpoint_test = VpcRouteServerEndpoint(self, "test_2", depends_on=[aws_vpc_route_server_association_test], route_server_id=test.route_server_id, - subnet_id=aws_subnet_test.id, + subnet_id=Token.as_string(aws_subnet_test.id), tags={ "Name": "Test Endpoint" } @@ -77,13 +81,13 @@ class MyConvertedCode(TerraformStack): # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_route_server_endpoint_test.override_logical_id("test") aws_vpc_route_server_peer_test = VpcRouteServerPeer(self, "test_3", - bgp_options=[{ - "peer_asn": 65000, - "peer_liveness_detection": "bgp-keepalive" - } + bgp_options=[VpcRouteServerPeerBgpOptions( + peer_asn=65000, + peer_liveness_detection="bgp-keepalive" + ) ], peer_address="10.0.1.250", - route_server_endpoint_id=aws_vpc_route_server_endpoint_test.route_server_endpoint_id, + route_server_endpoint_id=Token.as_string(aws_vpc_route_server_endpoint_test.route_server_endpoint_id), tags={ "Name": "Test Appliance" } @@ -93,7 +97,7 @@ class MyConvertedCode(TerraformStack): aws_vpc_route_server_propagation_test = VpcRouteServerPropagation(self, "test_4", depends_on=[aws_vpc_route_server_association_test], route_server_id=test.route_server_id, - route_table_id=aws_route_table_test.id + route_table_id=Token.as_string(aws_route_table_test.id) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_vpc_route_server_propagation_test.override_logical_id("test") @@ -103,19 +107,20 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `route_server_endpoint_id` - (Required) The ID of the route server endpoint for which to create a peer. +* `bgp_options` - (Required) The BGP options for the peer, including ASN (Autonomous System Number) and BFD (Bidrectional Forwarding Detection) settings. Configuration block with BGP Options configuration Detailed below * `peer_address` - (Required) The IPv4 address of the peer device. -* `bgp_options` - The BGP options for the peer, including ASN (Autonomous System Number) and BFD (Bidrectional Forwarding Detection) settings. Configuration block with BGP Options configuration Detailed below +* `route_server_endpoint_id` - (Required) The ID of the route server endpoint for which to create a peer. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### bgp_options * `peer_asn` - (Required) The Border Gateway Protocol (BGP) Autonomous System Number (ASN) for the appliance. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range. * `peer_liveness_detection` (Optional) The requested liveness detection protocol for the BGP peer. Valid values are `bgp-keepalive` and `bfd`. Default value is `bgp-keepalive`. -The following arguments are optional: - -* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -148,7 +153,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerPeer +from imports.aws.vpc_route_server_peer import VpcRouteServerPeer class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -161,4 +166,4 @@ Using `terraform import`, import VPC (Virtual Private Cloud) Route Server using % terraform import aws_vpc_route_server_peer.example rsp-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_route_server_propagation.html.markdown b/website/docs/cdktf/python/r/vpc_route_server_propagation.html.markdown index a98adef3df6c..9bf657d71d4c 100644 --- a/website/docs/cdktf/python/r/vpc_route_server_propagation.html.markdown +++ b/website/docs/cdktf/python/r/vpc_route_server_propagation.html.markdown @@ -18,18 +18,18 @@ description: |- ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerPropagation +from imports.aws.vpc_route_server_propagation import VpcRouteServerPropagation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) VpcRouteServerPropagation(self, "example", - route_server_id=aws_vpc_route_server_example.route_server_id, - route_table_id=aws_route_table_example.id + route_server_id=Token.as_string(aws_vpc_route_server_example.route_server_id), + route_table_id=Token.as_string(aws_route_table_example.id) ) ``` @@ -40,6 +40,10 @@ The following arguments are required: * `route_server_id` - (Required) The unique identifier for the route server to be associated. * `route_table_id` - (Required) The ID of the route table to which route server will propagate routes. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports no additional attributes. @@ -63,7 +67,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerPropagation +from imports.aws.vpc_route_server_propagation import VpcRouteServerPropagation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -76,4 +80,4 @@ Using `terraform import`, to to import VPC (Virtual Private Cloud) Route Server % terraform import aws_vpc_route_server_propagation.example rs-12345678,rtb-656c65616e6f72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_route_server_vpc_association.html.markdown b/website/docs/cdktf/python/r/vpc_route_server_vpc_association.html.markdown index ec7b50417f2c..350d82327841 100644 --- a/website/docs/cdktf/python/r/vpc_route_server_vpc_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_route_server_vpc_association.html.markdown @@ -18,18 +18,18 @@ description: |- ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerVpcAssociation +from imports.aws.vpc_route_server_vpc_association import VpcRouteServerVpcAssociation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) VpcRouteServerVpcAssociation(self, "example", - route_server_id=aws_vpc_route_server_example.route_server_id, - vpc_id=aws_vpc_example.id + route_server_id=Token.as_string(aws_vpc_route_server_example.route_server_id), + vpc_id=Token.as_string(aws_vpc_example.id) ) ``` @@ -40,6 +40,10 @@ The following arguments are required: * `route_server_id` - (Required) The unique identifier for the route server to be associated. * `vpc_id` - (Required) The ID of the VPC to associate with the route server. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports no additional attributes. @@ -63,7 +67,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpcRouteServerVpcAssociation +from imports.aws.vpc_route_server_vpc_association import VpcRouteServerVpcAssociation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -76,4 +80,4 @@ Using `terraform import`, to to import VPC (Virtual Private Cloud) Route Server % terraform import aws_vpc_route_server_vpc_association.example rs-12345678,vpc-0f001273ec18911b1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown b/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown index aca0f83bff72..fcd90aaad839 100644 --- a/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown +++ b/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_ipv4` - (Optional) The destination IPv4 CIDR range. * `cidr_ipv6` - (Optional) The destination IPv6 CIDR range. * `description` - (Optional) The security group rule description. @@ -68,6 +69,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_security_group_egress_rule.example + identity = { + id = "sgr-02108b27edd666983" + } +} + +resource "aws_vpc_security_group_egress_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the security group rule. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import security group egress rules using the `security_group_rule_id`. For example: ```python @@ -91,4 +118,4 @@ Using `terraform import`, import security group egress rules using the `security % terraform import aws_vpc_security_group_egress_rule.example sgr-02108b27edd666983 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_security_group_ingress_rule.html.markdown b/website/docs/cdktf/python/r/vpc_security_group_ingress_rule.html.markdown index 4a7a663a2995..b63e29824651 100644 --- a/website/docs/cdktf/python/r/vpc_security_group_ingress_rule.html.markdown +++ b/website/docs/cdktf/python/r/vpc_security_group_ingress_rule.html.markdown @@ -57,8 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -~> **Note** Although `cidr_ipv4`, `cidr_ipv6`, `prefix_list_id`, and `referenced_security_group_id` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `from_port` and `to_port` arguments are required unless `ip_protocol` is set to `-1` or `icmpv6`. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr_ipv4` - (Optional) The source IPv4 CIDR range. * `cidr_ipv6` - (Optional) The source IPv6 CIDR range. * `description` - (Optional) The security group rule description. @@ -70,6 +69,8 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `to_port` - (Optional) The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. +~> **Note** Although `cidr_ipv4`, `cidr_ipv6`, `prefix_list_id`, and `referenced_security_group_id` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `from_port` and `to_port` arguments are required unless `ip_protocol` is set to `-1` or `icmpv6`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -80,6 +81,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_security_group_ingress_rule.example + identity = { + id = "sgr-02108b27edd666983" + } +} + +resource "aws_vpc_security_group_ingress_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the security group rule. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import security group ingress rules using the `security_group_rule_id`. For example: ```python @@ -103,4 +130,4 @@ Using `terraform import`, import security group ingress rules using the `securit % terraform import aws_vpc_security_group_ingress_rule.example sgr-02108b27edd666983 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_security_group_vpc_association.html.markdown b/website/docs/cdktf/python/r/vpc_security_group_vpc_association.html.markdown index a5c75655985a..63f3cbe0c733 100644 --- a/website/docs/cdktf/python/r/vpc_security_group_vpc_association.html.markdown +++ b/website/docs/cdktf/python/r/vpc_security_group_vpc_association.html.markdown @@ -34,8 +34,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `security_group_id` - (Required) The ID of the security group. * `vpc_id` - (Required) The ID of the VPC to make the association with. @@ -54,6 +55,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_security_group_vpc_association.example + identity = { + vpc_id = "vpc-67890" + security_group_id = "sg-12345" + } +} + +resource "aws_vpc_security_group_vpc_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `vpc_id` (String) VPC ID. +* `security_group_id` (String) Security Group ID. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a Security Group VPC Association using the `security_group_id` and `vpc_id` arguments, separated by a comma (`,`). For example: ```python @@ -77,4 +106,4 @@ Using `terraform import`, import a Security Group VPC Association using the `sec % terraform import aws_vpc_security_group_vpc_association.example sg-12345,vpc-67890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_access_log_subscription.html.markdown b/website/docs/cdktf/python/r/vpclattice_access_log_subscription.html.markdown index a99470e22116..8753a17b2c6e 100644 --- a/website/docs/cdktf/python/r/vpclattice_access_log_subscription.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_access_log_subscription.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_network_log_type` - (Optional, Forces new resource) Type of log that monitors your Amazon VPC Lattice service networks. Valid values are: `SERVICE`, `RESOURCE`. Defaults to `SERVICE`. ## Attribute Reference @@ -78,4 +79,4 @@ Using `terraform import`, import VPC Lattice Access Log Subscription using the a % terraform import aws_vpclattice_access_log_subscription.example rft-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_auth_policy.html.markdown b/website/docs/cdktf/python/r/vpclattice_auth_policy.html.markdown index ccbac1e12361..0b1748826615 100644 --- a/website/docs/cdktf/python/r/vpclattice_auth_policy.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_auth_policy.html.markdown @@ -59,8 +59,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_identifier` - (Required) The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. * `policy` - (Required) The auth policy. The policy string in JSON must not contain newlines or blank lines. @@ -105,4 +106,4 @@ Using `terraform import`, import VPC Lattice Auth Policy using the `id`. For exa % terraform import aws_vpclattice_auth_policy.example abcd-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_listener.html.markdown b/website/docs/cdktf/python/r/vpclattice_listener.html.markdown index 5739cf9191b9..072fa3161bea 100644 --- a/website/docs/cdktf/python/r/vpclattice_listener.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_listener.html.markdown @@ -157,6 +157,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_action` - (Required) Default action block for the default listener rule. Default action blocks are defined below. * `name` - (Required, Forces new resource) Name of the listener. A listener name must be unique within a service. Valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. * `port` - (Optional, Forces new resource) Listener port. You can specify a value from 1 to 65535. If `port` is not specified and `protocol` is HTTP, the value will default to 80. If `port` is not specified and `protocol` is HTTPS, the value will default to 443. @@ -229,4 +230,4 @@ Using `terraform import`, import VPC Lattice Listener using the `listener_id` of % terraform import aws_vpclattice_listener.example svc-1a2b3c4d/listener-987654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_listener_rule.html.markdown b/website/docs/cdktf/python/r/vpclattice_listener_rule.html.markdown index fa8eae422226..e986314199b8 100644 --- a/website/docs/cdktf/python/r/vpclattice_listener_rule.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_listener_rule.html.markdown @@ -116,6 +116,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `action` Block @@ -244,4 +245,4 @@ Using `terraform import`, import VPC Lattice Listener Rule using the `id`. For e % terraform import aws_vpclattice_listener_rule.example service123/listener456/rule789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_resource_configuration.html.markdown b/website/docs/cdktf/python/r/vpclattice_resource_configuration.html.markdown index 523044d4650d..634ea482b4bf 100644 --- a/website/docs/cdktf/python/r/vpclattice_resource_configuration.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_resource_configuration.html.markdown @@ -116,6 +116,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allow_association_to_shareable_service_network` (Optional) Allow or Deny the association of this resource to a shareable service network. * `protocol` - (Optional) Protocol for the Resource `TCP` is currently the only supported value. MUST be specified if `resource_configuration_group_id` is not. * `resource_configuration_group_id` (Optional) ID of Resource Configuration where `type` is `CHILD`. @@ -129,6 +130,7 @@ One of `dns_resource`, `ip_resource`, `arn_resource` must be specified. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn_resource` - (Optional) Resource DNS Configuration. See [`arn_resource` Block](#arn_resource-block) for details. * `dns_resource` - (Optional) Resource DNS Configuration. See [`dns_resource` Block](#dns_resource-block) for details. * `ip_resource` - (Optional) Resource DNS Configuration. See [`ip_resource` Block](#ip_resource-block) for details. @@ -193,4 +195,4 @@ Using `terraform import`, import VPC Lattice Resource Configuration using the `i % terraform import aws_vpclattice_resource_configuration.example rcfg-1234567890abcdef1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_resource_gateway.html.markdown b/website/docs/cdktf/python/r/vpclattice_resource_gateway.html.markdown index 796a2c9083d7..98c706566683 100644 --- a/website/docs/cdktf/python/r/vpclattice_resource_gateway.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_resource_gateway.html.markdown @@ -94,6 +94,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ip_address_type` - (Optional) IP address type used by the resource gateway. Valid values are `IPV4`, `IPV6`, and `DUALSTACK`. The IP address type of a resource gateway must be compatible with the subnets of the resource gateway and the IP address type of the resource. * `security_group_ids` - (Optional) Security group IDs associated with the resource gateway. The security groups must be in the same VPC. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -132,4 +133,4 @@ Using `terraform import`, import VPC Lattice Resource Gateway using the `id`. Fo % terraform import aws_vpclattice_resource_gateway.example rgw-0a1b2c3d4e5f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_resource_policy.html.markdown b/website/docs/cdktf/python/r/vpclattice_resource_policy.html.markdown index 93e1f5fcb3ff..617819afa62f 100644 --- a/website/docs/cdktf/python/r/vpclattice_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_resource_policy.html.markdown @@ -62,8 +62,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. * `policy` - (Required) An IAM policy. The policy string in JSON must not contain newlines or blank lines. @@ -96,4 +97,4 @@ Using `terraform import`, import VPC Lattice Resource Policy using the `resource % terraform import aws_vpclattice_resource_policy.example rft-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_service.html.markdown b/website/docs/cdktf/python/r/vpclattice_service.html.markdown index 2d364d45a33f..0249a239e22b 100644 --- a/website/docs/cdktf/python/r/vpclattice_service.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_service.html.markdown @@ -43,6 +43,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auth_type` - (Optional) Type of IAM policy. Either `NONE` or `AWS_IAM`. * `certificate_arn` - (Optional) Amazon Resource Name (ARN) of the certificate. * `custom_domain_name` - (Optional) Custom domain name of the service. @@ -90,4 +91,4 @@ Using `terraform import`, import VPC Lattice Service using the `id`. For example % terraform import aws_vpclattice_service.example svc-06728e2357ea55f8a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_service_network.html.markdown b/website/docs/cdktf/python/r/vpclattice_service_network.html.markdown index c5ceeb1aa4fe..533891b2b94a 100644 --- a/website/docs/cdktf/python/r/vpclattice_service_network.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_service_network.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `auth_type` - (Optional) Type of IAM policy. Either `NONE` or `AWS_IAM`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -77,4 +78,4 @@ Using `terraform import`, import VPC Lattice Service Network using the `id`. For % terraform import aws_vpclattice_service_network.example sn-0158f91c1e3358dba ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_service_network_resource_association.html.markdown b/website/docs/cdktf/python/r/vpclattice_service_network_resource_association.html.markdown index 5fe1d6a4df1d..a944eeeb9f4a 100644 --- a/website/docs/cdktf/python/r/vpclattice_service_network_resource_association.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_service_network_resource_association.html.markdown @@ -18,18 +18,18 @@ Terraform resource for managing an AWS VPC Lattice Service Network Resource Asso ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug from constructs import Construct -from cdktf import TerraformStack +from cdktf import Token, TerraformStack # # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpclatticeServiceNetworkResourceAssociation +from imports.aws.vpclattice_service_network_resource_association import VpclatticeServiceNetworkResourceAssociation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) VpclatticeServiceNetworkResourceAssociation(self, "example", - resource_configuration_identifier=aws_vpclattice_resource_configuration_example.id, - service_network_identifier=aws_vpclattice_service_network_example.id, + resource_configuration_identifier=Token.as_string(aws_vpclattice_resource_configuration_example.id), + service_network_identifier=Token.as_string(aws_vpclattice_service_network_example.id), tags={ "Name": "Example" } @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -77,7 +78,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import VpclatticeServiceNetworkResourceAssociation +from imports.aws.vpclattice_service_network_resource_association import VpclatticeServiceNetworkResourceAssociation class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -90,4 +91,4 @@ Using `terraform import`, import VPC Lattice Service Network Resource Associatio % terraform import aws_vpclattice_service_network_resource_association.example snra-1234567890abcef12 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_service_network_service_association.html.markdown b/website/docs/cdktf/python/r/vpclattice_service_network_service_association.html.markdown index 008c1525a640..b4bc515d8b52 100644 --- a/website/docs/cdktf/python/r/vpclattice_service_network_service_association.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_service_network_service_association.html.markdown @@ -36,12 +36,11 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `service_identifier` - (Required) The ID or Amazon Resource Identifier (ARN) of the service. * `service_network_identifier` - (Required) The ID or Amazon Resource Identifier (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts. -The following arguments are optional: - * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -90,4 +89,4 @@ Using `terraform import`, import VPC Lattice Service Network Service Association % terraform import aws_vpclattice_service_network_service_association.example snsa-05e2474658a88f6ba ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_service_network_vpc_association.html.markdown b/website/docs/cdktf/python/r/vpclattice_service_network_vpc_association.html.markdown index dccafa45cc10..99ad35cdc8a3 100644 --- a/website/docs/cdktf/python/r/vpclattice_service_network_vpc_association.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_service_network_vpc_association.html.markdown @@ -37,12 +37,12 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_identifier` - (Required) The ID of the VPC. * `service_network_identifier` - (Required) The ID or Amazon Resource Identifier (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts. The following arguments are optional: - * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `security_group_ids` - (Optional) The IDs of the security groups. @@ -88,4 +88,4 @@ Using `terraform import`, import VPC Lattice Service Network VPC Association usi % terraform import aws_vpclattice_service_network_vpc_association.example snsa-05e2474658a88f6ba ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_target_group.html.markdown b/website/docs/cdktf/python/r/vpclattice_target_group.html.markdown index 1972234c7dd0..22e3a6ba39f4 100644 --- a/website/docs/cdktf/python/r/vpclattice_target_group.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_target_group.html.markdown @@ -139,6 +139,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `config` - (Optional) The target group configuration. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -207,4 +208,4 @@ Using `terraform import`, import VPC Lattice Target Group using the `id`. For ex % terraform import aws_vpclattice_target_group.example tg-0c11d4dc16ed96bdb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpclattice_target_group_attachment.html.markdown b/website/docs/cdktf/python/r/vpclattice_target_group_attachment.html.markdown index 6f553400ca22..d21b1d9e6151 100644 --- a/website/docs/cdktf/python/r/vpclattice_target_group_attachment.html.markdown +++ b/website/docs/cdktf/python/r/vpclattice_target_group_attachment.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `target_group_identifier` - (Required) The ID or Amazon Resource Name (ARN) of the target group. - `target` - (Required) The target. @@ -53,4 +54,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpn_connection.html.markdown b/website/docs/cdktf/python/r/vpn_connection.html.markdown index f7adda6e5a4d..819cf86c7108 100644 --- a/website/docs/cdktf/python/r/vpn_connection.html.markdown +++ b/website/docs/cdktf/python/r/vpn_connection.html.markdown @@ -163,6 +163,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customer_gateway_id` - (Required) The ID of the customer gateway. * `type` - (Required) The type of VPN connection. The only type AWS supports at this time is "ipsec.1". * `transit_gateway_id` - (Optional) The ID of the EC2 Transit Gateway. @@ -303,4 +304,4 @@ Using `terraform import`, import VPN Connections using the VPN connection `id`. % terraform import aws_vpn_connection.testvpnconnection vpn-40f41529 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpn_connection_route.html.markdown b/website/docs/cdktf/python/r/vpn_connection_route.html.markdown index cccd82b3da02..c58abe9689c5 100644 --- a/website/docs/cdktf/python/r/vpn_connection_route.html.markdown +++ b/website/docs/cdktf/python/r/vpn_connection_route.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination_cidr_block` - (Required) The CIDR block associated with the local subnet of the customer network. * `vpn_connection_id` - (Required) The ID of the VPN connection. @@ -67,4 +68,4 @@ This resource exports the following attributes in addition to the arguments abov * `destination_cidr_block` - The CIDR block associated with the local subnet of the customer network. * `vpn_connection_id` - The ID of the VPN connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpn_gateway.html.markdown b/website/docs/cdktf/python/r/vpn_gateway.html.markdown index ab14d19b2fb6..9e73fd7beb34 100644 --- a/website/docs/cdktf/python/r/vpn_gateway.html.markdown +++ b/website/docs/cdktf/python/r/vpn_gateway.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Optional) The VPC ID to create in. * `availability_zone` - (Optional) The Availability Zone for the virtual private gateway. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -76,4 +77,4 @@ Using `terraform import`, import VPN Gateways using the VPN gateway `id`. For ex % terraform import aws_vpn_gateway.testvpngateway vgw-9a4cacf3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpn_gateway_attachment.html.markdown b/website/docs/cdktf/python/r/vpn_gateway_attachment.html.markdown index 3ca29c3378c4..866783848cfa 100644 --- a/website/docs/cdktf/python/r/vpn_gateway_attachment.html.markdown +++ b/website/docs/cdktf/python/r/vpn_gateway_attachment.html.markdown @@ -55,6 +55,7 @@ guides for more information. This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpc_id` - (Required) The ID of the VPC. * `vpn_gateway_id` - (Required) The ID of the Virtual Private Gateway. @@ -69,4 +70,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpn_gateway_route_propagation.html.markdown b/website/docs/cdktf/python/r/vpn_gateway_route_propagation.html.markdown index ff7442778308..1cb0a7db36c4 100644 --- a/website/docs/cdktf/python/r/vpn_gateway_route_propagation.html.markdown +++ b/website/docs/cdktf/python/r/vpn_gateway_route_propagation.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpn_gateway_id` - The id of the `aws_vpn_gateway` to propagate routes from. * `route_table_id` - The id of the `aws_route_table` to propagate routes into. @@ -54,4 +55,4 @@ This resource exports no additional attributes. - `create` - (Default `2m`) - `delete` - (Default `2m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_byte_match_set.html.markdown b/website/docs/cdktf/python/r/wafregional_byte_match_set.html.markdown index bcf370ce49aa..4328115b7eb1 100644 --- a/website/docs/cdktf/python/r/wafregional_byte_match_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_byte_match_set.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the ByteMatchSet. * `byte_match_tuples` - (Optional)Settings for the ByteMatchSet, such as the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests. ByteMatchTuple documented below. @@ -93,4 +94,4 @@ Using `terraform import`, import WAF Regional Byte Match Set using the id. For e % terraform import aws_wafregional_byte_match_set.byte_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_geo_match_set.html.markdown b/website/docs/cdktf/python/r/wafregional_geo_match_set.html.markdown index 6b944821069b..7611dfc86ccf 100644 --- a/website/docs/cdktf/python/r/wafregional_geo_match_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_geo_match_set.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Geo Match Set. * `geo_match_constraint` - (Optional) The Geo Match Constraint objects which contain the country that you want AWS WAF to search for. @@ -88,4 +89,4 @@ Using `terraform import`, import WAF Regional Geo Match Set using the id. For ex % terraform import aws_wafregional_geo_match_set.geo_match_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_ipset.html.markdown b/website/docs/cdktf/python/r/wafregional_ipset.html.markdown index 6a3be69510d1..bba26b30c6b8 100644 --- a/website/docs/cdktf/python/r/wafregional_ipset.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_ipset.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the IPSet. * `ip_set_descriptor` - (Optional) One or more pairs specifying the IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) from which web requests originate. @@ -89,4 +90,4 @@ Using `terraform import`, import WAF Regional IPSets using their ID. For example % terraform import aws_wafregional_ipset.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_rate_based_rule.html.markdown b/website/docs/cdktf/python/r/wafregional_rate_based_rule.html.markdown index 24e52629853e..fa1adaf56f3b 100644 --- a/website/docs/cdktf/python/r/wafregional_rate_based_rule.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_rate_based_rule.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `metric_name` - (Required) The name or description for the Amazon CloudWatch metric of this rule. * `name` - (Required) The name or description of the rule. * `rate_key` - (Required) Valid value is IP. @@ -109,4 +110,4 @@ Using `terraform import`, import WAF Regional Rate Based Rule using the id. For % terraform import aws_wafregional_rate_based_rule.wafrule a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_regex_match_set.html.markdown b/website/docs/cdktf/python/r/wafregional_regex_match_set.html.markdown index 50375775463b..6df1f73b39b0 100644 --- a/website/docs/cdktf/python/r/wafregional_regex_match_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_regex_match_set.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Regex Match Set. * `regex_match_tuple` - (Required) The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. @@ -105,4 +106,4 @@ Using `terraform import`, import WAF Regional Regex Match Set using the id. For % terraform import aws_wafregional_regex_match_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_regex_pattern_set.html.markdown b/website/docs/cdktf/python/r/wafregional_regex_pattern_set.html.markdown index 90ee9ee1093f..0cbfab8fc04b 100644 --- a/website/docs/cdktf/python/r/wafregional_regex_pattern_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_regex_pattern_set.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Regex Pattern Set. * `regex_pattern_strings` - (Optional) A list of regular expression (regex) patterns that you want AWS WAF to search for, such as `B[a@]dB[o0]t`. @@ -70,4 +71,4 @@ Using `terraform import`, import WAF Regional Regex Pattern Set using the id. Fo % terraform import aws_wafregional_regex_pattern_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_rule.html.markdown b/website/docs/cdktf/python/r/wafregional_rule.html.markdown index b1caf3d68537..bc2bb2b7b084 100644 --- a/website/docs/cdktf/python/r/wafregional_rule.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_rule.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the rule. * `metric_name` - (Required) The name or description for the Amazon CloudWatch metric of this rule. * `predicate` - (Optional) The objects to include in a rule (documented below). @@ -103,4 +104,4 @@ Using `terraform import`, import WAF Regional Rule using the id. For example: % terraform import aws_wafregional_rule.wafrule a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_rule_group.html.markdown b/website/docs/cdktf/python/r/wafregional_rule_group.html.markdown index b867e5cbe308..d5dae9215c7a 100644 --- a/website/docs/cdktf/python/r/wafregional_rule_group.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_rule_group.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A friendly name of the rule group * `metric_name` - (Required) A friendly name for the metrics from the rule group * `activated_rule` - (Optional) A list of activated rules, see below @@ -101,4 +102,4 @@ Using `terraform import`, import WAF Regional Rule Group using the id. For examp % terraform import aws_wafregional_rule_group.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_size_constraint_set.html.markdown b/website/docs/cdktf/python/r/wafregional_size_constraint_set.html.markdown index edea36234e4f..9d6fdec3b7da 100644 --- a/website/docs/cdktf/python/r/wafregional_size_constraint_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_size_constraint_set.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Size Constraint Set. * `size_constraints` - (Optional) Specifies the parts of web requests that you want to inspect the size of. @@ -108,4 +109,4 @@ Using `terraform import`, import WAF Size Constraint Set using the id. For examp % terraform import aws_wafregional_size_constraint_set.size_constraint_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_sql_injection_match_set.html.markdown b/website/docs/cdktf/python/r/wafregional_sql_injection_match_set.html.markdown index 30ba50677344..b7e953427aba 100644 --- a/website/docs/cdktf/python/r/wafregional_sql_injection_match_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_sql_injection_match_set.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the SizeConstraintSet. * `sql_injection_match_tuple` - (Optional) The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. @@ -96,4 +97,4 @@ Using `terraform import`, import WAF Regional Sql Injection Match Set using the % terraform import aws_wafregional_sql_injection_match_set.sql_injection_match_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_web_acl.html.markdown b/website/docs/cdktf/python/r/wafregional_web_acl.html.markdown index 20add098263a..15ee679ea405 100644 --- a/website/docs/cdktf/python/r/wafregional_web_acl.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_web_acl.html.markdown @@ -137,6 +137,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `default_action` - (Required) The action that you want AWS WAF Regional to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. * `metric_name` - (Required) The name or description for the Amazon CloudWatch metric of this web ACL. * `name` - (Required) The name or description of the web ACL. @@ -212,4 +213,4 @@ Using `terraform import`, import WAF Regional Web ACL using the id. For example: % terraform import aws_wafregional_web_acl.wafacl a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_web_acl_association.html.markdown b/website/docs/cdktf/python/r/wafregional_web_acl_association.html.markdown index cc66ad809a35..6b8420162844 100644 --- a/website/docs/cdktf/python/r/wafregional_web_acl_association.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_web_acl_association.html.markdown @@ -197,6 +197,7 @@ resource "aws_wafregional_web_acl_association" "association" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `web_acl_id` - (Required) The ID of the WAF Regional WebACL to create an association. * `resource_arn` - (Required) ARN of the resource to associate with. For example, an Application Load Balancer or API Gateway Stage. @@ -237,4 +238,4 @@ Using `terraform import`, import WAF Regional Web ACL Association using their `w % terraform import aws_wafregional_web_acl_association.foo web_acl_id:resource_arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafregional_xss_match_set.html.markdown b/website/docs/cdktf/python/r/wafregional_xss_match_set.html.markdown index 765b8acd25fd..e334bb9115bb 100644 --- a/website/docs/cdktf/python/r/wafregional_xss_match_set.html.markdown +++ b/website/docs/cdktf/python/r/wafregional_xss_match_set.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the set * `xss_match_tuple` - (Optional) The parts of web requests that you want to inspect for cross-site scripting attacks. @@ -93,4 +94,4 @@ Using `terraform import`, import AWS WAF Regional XSS Match using the `id`. For % terraform import aws_wafregional_xss_match_set.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_api_key.html.markdown b/website/docs/cdktf/python/r/wafv2_api_key.html.markdown index 12f85718818d..3497c915d493 100644 --- a/website/docs/cdktf/python/r/wafv2_api_key.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_api_key.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. Changing this forces a new resource to be created. **NOTE:** WAFv2 API Keys deployed for `CLOUDFRONT` must be created within the `us-east-1` region. - `token_domains` - (Required) The domains that you want to be able to use the API key with, for example `example.com`. You can specify up to 5 domains. Changing this forces a new resource to be created. @@ -70,4 +71,4 @@ Using `terraform import`, import WAFv2 API Key using `api_key,scope`. For exampl % terraform import aws_wafv2_api_key.example a1b2c3d4-5678-90ab-cdef-EXAMPLE11111,REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_ip_set.html.markdown b/website/docs/cdktf/python/r/wafv2_ip_set.html.markdown index 9de8e6498e20..b5d998855101 100644 --- a/website/docs/cdktf/python/r/wafv2_ip_set.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_ip_set.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) A friendly name of the IP set. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) A friendly description of the IP set. @@ -84,4 +85,4 @@ Using `terraform import`, import WAFv2 IP Sets using `ID/name/scope`. For exampl % terraform import aws_wafv2_ip_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_regex_pattern_set.html.markdown b/website/docs/cdktf/python/r/wafv2_regex_pattern_set.html.markdown index 110467157f03..87757331a268 100644 --- a/website/docs/cdktf/python/r/wafv2_regex_pattern_set.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_regex_pattern_set.html.markdown @@ -47,11 +47,12 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) A friendly name of the regular expression pattern set. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) A friendly description of the regular expression pattern set. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. -* `regular_expression` - (Optional) One or more blocks of regular expression patterns that you want AWS WAF to search for, such as `B[a@]dB[o0]t`. See [Regular Expression](#regular-expression) below for details. A maximum of 10 `regular_expression` blocks may be specified. +* `regular_expression` - (Optional) One or more blocks of regular expression patterns that you want AWS WAF to search for, such as `B[a@]dB[o0]t`. See [Regular Expression](#regular-expression) below for details. * `tags` - (Optional) An array of key:value pairs to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Regular Expression @@ -91,4 +92,4 @@ Using `terraform import`, import WAFv2 Regex Pattern Sets using `ID/name/scope`. % terraform import aws_wafv2_regex_pattern_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_rule_group.html.markdown b/website/docs/cdktf/python/r/wafv2_rule_group.html.markdown index f7e201a5596a..3f264b0ccf8a 100644 --- a/website/docs/cdktf/python/r/wafv2_rule_group.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_rule_group.html.markdown @@ -284,16 +284,72 @@ class MyConvertedCode(TerraformStack): ) ``` +### Using rules_json + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_rule_group import Wafv2RuleGroup +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Wafv2RuleGroup(self, "example", + capacity=100, + name="example-rule-group", + rules_json=Token.as_string( + Fn.jsonencode([{ + "Action": { + "Count": {} + }, + "Name": "rule-1", + "Priority": 1, + "Statement": { + "ByteMatchStatement": { + "FieldToMatch": { + "UriPath": {} + }, + "PositionalConstraint": "CONTAINS", + "SearchString": "badbot", + "TextTransformations": [{ + "Priority": 1, + "Type": "NONE" + } + ] + } + }, + "VisibilityConfig": { + "CloudwatchMetricsEnabled": False, + "MetricName": "friendly-rule-metric-name", + "SampledRequestsEnabled": False + } + } + ])), + scope="REGIONAL", + visibility_config=Wafv2RuleGroupVisibilityConfig( + cloudwatch_metrics_enabled=False, + metric_name="friendly-metric-name", + sampled_requests_enabled=False + ) + ) +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity` - (Required, Forces new resource) The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information. * `custom_response_body` - (Optional) Defines custom response bodies that can be referenced by `custom_response` actions. See [Custom Response Body](#custom-response-body) below for details. * `description` - (Optional) A friendly description of the rule group. * `name` - (Required, Forces new resource) A friendly name of the rule group. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `rule` - (Optional) The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See [Rules](#rules) below for details. +* `rules_json` - (Optional) Raw JSON string to allow more than three nested statements. Conflicts with `rule` attribute. This is for advanced use cases where more than 3 levels of nested statements are required. **There is no drift detection at this time**. If you use this attribute instead of `rule`, you will be foregoing drift detection. Additionally, importing an existing rule group into a configuration with `rules_json` set will result in a one time in-place update as the remote rule configuration is initially written to the `rule` attribute. See the AWS [documentation](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html) for the JSON structure. * `scope` - (Required, Forces new resource) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. * `tags` - (Optional) An array of key:value pairs to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `visibility_config` - (Required) Defines and enables Amazon CloudWatch metrics and web request sample collection. See [Visibility Configuration](#visibility-configuration) below for details. @@ -725,8 +781,8 @@ The `custom_key` block supports the following arguments: * `http_method` - (Optional) Use the request's HTTP method as an aggregate key. See [RateLimit `http_method`](#ratelimit-http_method-block) below for details. * `header` - (Optional) Use the value of a header in the request as an aggregate key. See [RateLimit `header`](#ratelimit-header-block) below for details. * `ip` - (Optional) Use the request's originating IP address as an aggregate key. See [`RateLimit ip`](#ratelimit-ip-block) below for details. -* `ja3_fingerprint` - (Optional) Use the JA3 fingerprint in the request as an aggregate key. See [`RateLimit ip`](#ratelimit-ja3_fingerprint-block) below for details. -* `ja4_fingerprint` - (Optional) Use the JA3 fingerprint in the request as an aggregate key. See [`RateLimit ip`](#ratelimit-ja4_fingerprint-block) below for details. +* `ja3_fingerprint` - (Optional) Use the JA3 fingerprint in the request as an aggregate key. See [`RateLimit ja3_fingerprint`](#ratelimit-ja3_fingerprint-block) below for details. +* `ja4_fingerprint` - (Optional) Use the JA4 fingerprint in the request as an aggregate key. See [`RateLimit ja4_fingerprint`](#ratelimit-ja4_fingerprint-block) below for details. * `label_namespace` - (Optional) Use the specified label namespace as an aggregate key. See [RateLimit `label_namespace`](#ratelimit-label_namespace-block) below for details. * `query_argument` - (Optional) Use the specified query argument as an aggregate key. See [RateLimit `query_argument`](#ratelimit-query_argument-block) below for details. * `query_string` - (Optional) Use the request's query string as an aggregate key. See [RateLimit `query_string`](#ratelimit-query_string-block) below for details. @@ -842,4 +898,4 @@ Using `terraform import`, import WAFv2 Rule Group using `ID/name/scope`. For exa % terraform import aws_wafv2_rule_group.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown index db805753b597..903071edcf43 100644 --- a/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown @@ -14,6 +14,8 @@ Creates a WAFv2 Web ACL resource. ~> **Note** In `field_to_match` blocks, _e.g._, in `byte_match_statement`, the `body` block includes an optional argument `oversize_handling`. AWS indicates this argument will be required starting February 2023. To avoid configurations breaking when that change happens, treat the `oversize_handling` argument as **required** as soon as possible. +!> **Warning:** If you use the `aws_wafv2_web_acl_rule_group_association` resource to associate rule groups with this Web ACL, you must add `lifecycle { ignore_changes = [rule] }` to this resource to prevent configuration drift. The association resource modifies the Web ACL's rules outside of this resource's direct management. + ## Example Usage This resource is based on `aws_wafv2_rule_group`, check the documentation of the `aws_wafv2_rule_group` resource to see examples of the various available statements. @@ -461,6 +463,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `association_config` - (Optional) Specifies custom configurations for the associations between the web ACL and protected resources. See [`association_config`](#association_config-block) below for details. * `captcha_config` - (Optional) Specifies how AWS WAF should handle CAPTCHA evaluations on the ACL level (used by [AWS Bot Control](https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-bot.html)). See [`captcha_config`](#captcha_config-block) below for details. * `challenge_config` - (Optional) Specifies how AWS WAF should handle Challenge evaluations on the ACL level (used by [AWS Bot Control](https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-bot.html)). See [`challenge_config`](#challenge_config-block) below for details. @@ -822,6 +825,7 @@ The `managed_rule_group_configs` block support the following arguments: * `aws_managed_rules_bot_control_rule_set` - (Optional) Additional configuration for using the Bot Control managed rule group. Use this to specify the inspection level that you want to use. See [`aws_managed_rules_bot_control_rule_set`](#aws_managed_rules_bot_control_rule_set-block) for more details * `aws_managed_rules_acfp_rule_set` - (Optional) Additional configuration for using the Account Creation Fraud Prevention managed rule group. Use this to specify information such as the registration page of your application and the type of content to accept or reject from the client. +* `aws_managed_rules_anti_ddos_rule_set` - (Optional) Configuration for using the anti-DDoS managed rule group. See [`aws_managed_rules_anti_ddos_rule_set`](#aws_managed_rules_anti_ddos_rule_set-block) for more details. * `aws_managed_rules_atp_rule_set` - (Optional) Additional configuration for using the Account Takeover Protection managed rule group. Use this to specify information such as the sign-in page of your application and the type of content to accept or reject from the client. * `login_path` - (Optional, **Deprecated**) The path of the login endpoint for your application. * `password_field` - (Optional, **Deprecated**) Details about your login page password field. See [`password_field`](#password_field-block) for more details. @@ -838,9 +842,31 @@ The `managed_rule_group_configs` block support the following arguments: * `creation_path` - (Required) The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST requests. * `enable_regex_in_path` - (Optional) Whether or not to allow the use of regular expressions in the login page path. * `registration_page_path` - (Required) The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users. This page must accept GET text/html requests. -* `request_inspection` - (Optional) The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage. See [`request_inspection`](#request_inspection-block) for more details. +* `request_inspection` - (Optional) The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage. See [`request_inspection`](#request_inspection-block-acfp) for more details. * `response_inspection` - (Optional) The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates. Note that Response Inspection is available only on web ACLs that protect CloudFront distributions. See [`response_inspection`](#response_inspection-block) for more details. +### `request_inspection` Block (ACFP) + +* `addressFields` (Optional) The names of the fields in the request payload that contain your customer's primary physical address. See [`addressFields`](#address_fields-block) for more details. +* `emailField` (Optional) The name of the field in the request payload that contains your customer's email. See [`emailField`](#email_field-block) for more details. +* `passwordField` (Optional) Details about your login page password field. See [`passwordField`](#password_field-block) for more details. +* `payloadType` (Required) The payload type for your login endpoint, either JSON or form encoded. +* `phoneNumberFields` (Optional) The names of the fields in the request payload that contain your customer's primary phone number. See [`phoneNumberFields`](#phone_number_fields-block) for more details. +* `usernameField` (Optional) Details about your login page username field. See [`usernameField`](#username_field-block) for more details. + +### `aws_managed_rules_anti_ddos_rule_set` Block + +* `client_side_action_config` - (Required) Configuration for the request handling that's applied by the managed rule group rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests` during a distributed denial of service (DDoS) attack. See [`client_side_action_config`](#client_side_action_config-block) for more details. +* `sensitivity_to_block` - (Optional) Sensitivity that the rule group rule DDoSRequests uses when matching against the DDoS suspicion labeling on a request. Valid values are `LOW` (Default), `MEDIUM`, and `HIGH`. + +### `client_side_action_config` Block + +* `challenge` - (Required) Configuration for the use of the `AWSManagedRulesAntiDDoSRuleSet` rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests`. + * `exempt_uri_regular_expression` - (Optional) Block for the list of the regular expressions to match against the web request URI, used to identify requests that can't handle a silent browser challenge. + * `regex_string` - (Optional) Regular expression string. + * `sensitivity` - (Optional) Sensitivity that the rule group rule ChallengeDDoSRequests uses when matching against the DDoS suspicion labeling on a request. Valid values are `LOW`, `MEDIUM` and `HIGH` (Default). + * `usage_of_action` - (Required) Configuration whether to use the `AWSManagedRulesAntiDDoSRuleSet` rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests` in the rule group evaluation. Valid values are `ENABLED` and `DISABLED`. + ### `aws_managed_rules_atp_rule_set` Block * `enable_regex_in_path` - (Optional) Whether or not to allow the use of regular expressions in the login page path. @@ -850,11 +876,8 @@ The `managed_rule_group_configs` block support the following arguments: ### `request_inspection` Block -* `address_fields` (Optional) The names of the fields in the request payload that contain your customer's primary physical address. See [`address_fields`](#address_fields-block) for more details. -* `email_field` (Optional) The name of the field in the request payload that contains your customer's email. See [`email_field`](#email_field-block) for more details. * `password_field` (Optional) Details about your login page password field. See [`password_field`](#password_field-block) for more details. * `payload_type` (Required) The payload type for your login endpoint, either JSON or form encoded. -* `phone_number_fields` (Optional) The names of the fields in the request payload that contain your customer's primary phone number. See [`phone_number_fields`](#phone_number_fields-block) for more details. * `username_field` (Optional) Details about your login page username field. See [`username_field`](#username_field-block) for more details. ### `address_fields` Block @@ -1109,6 +1132,7 @@ Aggregate the request counts using one or more web request components as the agg The `custom_key` block supports the following arguments: +* `asn` - (Optional) Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. See [RateLimit `asn`](#ratelimit-asn-block) below for details. * `cookie` - (Optional) Use the value of a cookie in the request as an aggregate key. See [RateLimit `cookie`](#ratelimit-cookie-block) below for details. * `forwarded_ip` - (Optional) Use the first IP address in an HTTP header as an aggregate key. See [`forwarded_ip`](#ratelimit-forwarded_ip-block) below for details. * `http_method` - (Optional) Use the request's HTTP method as an aggregate key. See [RateLimit `http_method`](#ratelimit-http_method-block) below for details. @@ -1121,6 +1145,12 @@ The `custom_key` block supports the following arguments: * `query_string` - (Optional) Use the request's query string as an aggregate key. See [RateLimit `query_string`](#ratelimit-query_string-block) below for details. * `uri_path` - (Optional) Use the request's URI path as an aggregate key. See [RateLimit `uri_path`](#ratelimit-uri_path-block) below for details. +### RateLimit `asn` Block + +Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. Each distinct ASN contributes to the aggregation instance. + +The `asn` block is configured as an empty block `{}`. + ### RateLimit `cookie` Block Use the value of a cookie in the request as an aggregate key. Each distinct value in the cookie contributes to the aggregation instance. If you use a single cookie as your custom key, then each value fully defines an aggregation instance. @@ -1241,4 +1271,4 @@ Using `terraform import`, import WAFv2 Web ACLs using `ID/Name/Scope`. For examp % terraform import aws_wafv2_web_acl.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown index 8362b1dd6239..765bf0ad7e13 100644 --- a/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown @@ -104,6 +104,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_arn` - (Required) The Amazon Resource Name (ARN) of the resource to associate with the web ACL. This must be an ARN of an Application Load Balancer, an Amazon API Gateway stage (REST only, HTTP is unsupported), an Amazon Cognito User Pool, an Amazon AppSync GraphQL API, an Amazon App Runner service, or an Amazon Verified Access instance. * `web_acl_arn` - (Required) The Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource. @@ -142,4 +143,4 @@ Using `terraform import`, import WAFv2 Web ACL Association using `WEB_ACL_ARN,RE % terraform import aws_wafv2_web_acl_association.example arn:aws:wafv2:...7ce849ea,arn:aws:apigateway:...ages/name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown index 30711ccc84e3..ab229c870354 100644 --- a/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown @@ -131,7 +131,7 @@ class MyConvertedCode(TerraformStack): actions=["logs:CreateLogStream", "logs:PutLogEvents"], condition=[DataAwsIamPolicyDocumentStatementCondition( test="ArnLike", - values=["arn:aws:logs:${" + data_aws_region_current.name + "}:${" + current.account_id + "}:*" + values=["arn:aws:logs:${" + data_aws_region_current.region + "}:${" + current.account_id + "}:*" ], variable="aws:SourceArn" ), DataAwsIamPolicyDocumentStatementCondition( @@ -166,6 +166,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `log_destination_configs` - (Required) Configuration block that allows you to associate Amazon Kinesis Data Firehose, Cloudwatch Log log group, or S3 bucket Amazon Resource Names (ARNs) with the web ACL. **Note:** data firehose, log group, or bucket name **must** be prefixed with `aws-waf-logs-`, e.g. `aws-waf-logs-example-firehose`, `aws-waf-logs-example-log-group`, or `aws-waf-logs-example-bucket`. * `logging_filter` - (Optional) Configuration block that specifies which web requests are kept in the logs and which are dropped. It allows filtering based on the rule action and the web request labels applied by matching rules during web ACL evaluation. For more details, refer to the [Logging Filter](#logging-filter) section below. * `redacted_fields` - (Optional) Configuration for parts of the request that you want to keep out of the logs. Up to 100 `redacted_fields` blocks are supported. See [Redacted Fields](#redacted-fields) below for more details. @@ -257,4 +258,4 @@ Using `terraform import`, import WAFv2 Web ACL Logging Configurations using the % terraform import aws_wafv2_web_acl_logging_configuration.example arn:aws:wafv2:us-west-2:123456789012:regional/webacl/test-logs/a1b2c3d4-5678-90ab-cdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl_rule_group_association.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl_rule_group_association.html.markdown new file mode 100644 index 000000000000..676c41ab3e94 --- /dev/null +++ b/website/docs/cdktf/python/r/wafv2_web_acl_rule_group_association.html.markdown @@ -0,0 +1,598 @@ +--- +subcategory: "WAF" +layout: "aws" +page_title: "AWS: aws_wafv2_web_acl_rule_group_association" +description: |- + Associates a WAFv2 Rule Group with a Web ACL by adding a rule that references the Rule Group. +--- + + + +# Resource: aws_wafv2_web_acl_rule_group_association + +Associates a WAFv2 Rule Group (custom or managed) with a Web ACL by adding a rule that references the Rule Group. Use this resource to apply the rules defined in a Rule Group to a Web ACL without duplicating rule definitions. + +This resource supports both: + +- **Custom Rule Groups**: User-created rule groups that you manage within your AWS account +- **Managed Rule Groups**: Pre-configured rule groups provided by AWS or third-party vendors + +!> **Warning:** Verify the rule names in your `rule_action_override`s carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group. + +!> **Warning:** Using this resource will cause the associated Web ACL resource to show configuration drift in the `rule` argument unless you add `lifecycle { ignore_changes = [rule] }` to the Web ACL resource configuration. This is because this resource modifies the Web ACL's rules outside of the Web ACL resource's direct management. + +~> **Note:** This resource creates a rule within the Web ACL that references the entire Rule Group. The rule group's individual rules are evaluated as a unit when requests are processed by the Web ACL. + +## Example Usage + +### Custom Rule Group - Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_rule_group import Wafv2RuleGroup +from imports.aws.wafv2_web_acl import Wafv2WebAcl +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = Wafv2RuleGroup(self, "example", + capacity=10, + name="example-rule-group", + rule=[Wafv2RuleGroupRule( + action=Wafv2RuleGroupRuleAction( + block=Wafv2RuleGroupRuleActionBlock() + ), + name="block-suspicious-requests", + priority=1, + statement={ + "geo_match_statement": { + "country_codes": ["CN", "RU"] + } + }, + visibility_config=Wafv2RuleGroupRuleVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="block-suspicious-requests", + sampled_requests_enabled=True + ) + ) + ], + scope="REGIONAL", + visibility_config=Wafv2RuleGroupVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="example-rule-group", + sampled_requests_enabled=True + ) + ) + aws_wafv2_web_acl_example = Wafv2WebAcl(self, "example_1", + default_action=Wafv2WebAclDefaultAction( + allow=Wafv2WebAclDefaultActionAllow() + ), + lifecycle=TerraformResourceLifecycle( + ignore_changes=[rule] + ), + name="example-web-acl", + scope="REGIONAL", + visibility_config=Wafv2WebAclVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="example-web-acl", + sampled_requests_enabled=True + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_example.override_logical_id("example") + aws_wafv2_web_acl_rule_group_association_example = + Wafv2WebAclRuleGroupAssociation(self, "example_2", + priority=100, + rule_group_reference=[Wafv2WebAclRuleGroupAssociationRuleGroupReference( + arn=example.arn + ) + ], + rule_name="example-rule-group-rule", + web_acl_arn=Token.as_string(aws_wafv2_web_acl_example.arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_rule_group_association_example.override_logical_id("example") +``` + +### Managed Rule Group - Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_web_acl import Wafv2WebAcl +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = Wafv2WebAcl(self, "example", + default_action=Wafv2WebAclDefaultAction( + allow=Wafv2WebAclDefaultActionAllow() + ), + lifecycle=TerraformResourceLifecycle( + ignore_changes=[rule] + ), + name="example-web-acl", + scope="REGIONAL", + visibility_config=Wafv2WebAclVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="example-web-acl", + sampled_requests_enabled=True + ) + ) + Wafv2WebAclRuleGroupAssociation(self, "managed_example", + managed_rule_group=[Wafv2WebAclRuleGroupAssociationManagedRuleGroup( + name="AWSManagedRulesCommonRuleSet", + vendor_name="AWS" + ) + ], + priority=50, + rule_name="aws-common-rule-set", + web_acl_arn=example.arn + ) +``` + +### Managed Rule Group - With Version + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Wafv2WebAclRuleGroupAssociation(self, "managed_versioned", + managed_rule_group=[Wafv2WebAclRuleGroupAssociationManagedRuleGroup( + name="AWSManagedRulesCommonRuleSet", + vendor_name="AWS", + version="Version_1.0" + ) + ], + priority=60, + rule_name="aws-common-rule-set-versioned", + web_acl_arn=example.arn + ) +``` + +### Managed Rule Group - With Rule Action Overrides + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Wafv2WebAclRuleGroupAssociation(self, "managed_with_overrides", + managed_rule_group=[Wafv2WebAclRuleGroupAssociationManagedRuleGroup( + name="AWSManagedRulesCommonRuleSet", + rule_action_override=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverride( + action_to_use=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverrideActionToUse( + count=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverrideActionToUseCount( + custom_request_handling=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverrideActionToUseCountCustomRequestHandling( + insert_header=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverrideActionToUseCountCustomRequestHandlingInsertHeader( + name="X-RFI-Override", + value="counted" + ) + ] + ) + ] + ) + ] + ) + ], + name="GenericRFI_BODY" + ), Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverride( + action_to_use=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverrideActionToUse( + captcha=[Wafv2WebAclRuleGroupAssociationManagedRuleGroupRuleActionOverrideActionToUseCaptcha()] + ) + ], + name="SizeRestrictions_BODY" + ) + ], + vendor_name="AWS" + ) + ], + priority=70, + rule_name="aws-common-rule-set-with-overrides", + web_acl_arn=example.arn + ) +``` + +### Custom Rule Group - With Override Action + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Wafv2WebAclRuleGroupAssociation(self, "example", + override_action="count", + priority=100, + rule_group_reference=[Wafv2WebAclRuleGroupAssociationRuleGroupReference( + arn=Token.as_string(aws_wafv2_rule_group_example.arn) + ) + ], + rule_name="example-rule-group-rule", + web_acl_arn=Token.as_string(aws_wafv2_web_acl_example.arn) + ) +``` + +### Custom Rule Group - With Rule Action Overrides + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_rule_group import Wafv2RuleGroup +from imports.aws.wafv2_web_acl import Wafv2WebAcl +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = Wafv2RuleGroup(self, "example", + capacity=10, + name="example-rule-group", + rule=[Wafv2RuleGroupRule( + action=Wafv2RuleGroupRuleAction( + block=Wafv2RuleGroupRuleActionBlock() + ), + name="geo-block-rule", + priority=1, + statement={ + "geo_match_statement": { + "country_codes": ["CN", "RU"] + } + }, + visibility_config=Wafv2RuleGroupRuleVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="geo-block-rule", + sampled_requests_enabled=True + ) + ), Wafv2RuleGroupRule( + action=Wafv2RuleGroupRuleAction( + block=Wafv2RuleGroupRuleActionBlock() + ), + name="rate-limit-rule", + priority=2, + statement={ + "rate_based_statement": { + "aggregate_key_type": "IP", + "limit": 1000 + } + }, + visibility_config=Wafv2RuleGroupRuleVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="rate-limit-rule", + sampled_requests_enabled=True + ) + ) + ], + scope="REGIONAL", + visibility_config=Wafv2RuleGroupVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="example-rule-group", + sampled_requests_enabled=True + ) + ) + aws_wafv2_web_acl_example = Wafv2WebAcl(self, "example_1", + default_action=Wafv2WebAclDefaultAction( + allow=Wafv2WebAclDefaultActionAllow() + ), + lifecycle=TerraformResourceLifecycle( + ignore_changes=[rule] + ), + name="example-web-acl", + scope="REGIONAL", + visibility_config=Wafv2WebAclVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="example-web-acl", + sampled_requests_enabled=True + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_example.override_logical_id("example") + aws_wafv2_web_acl_rule_group_association_example = + Wafv2WebAclRuleGroupAssociation(self, "example_2", + priority=100, + rule_group_reference=[Wafv2WebAclRuleGroupAssociationRuleGroupReference( + arn=example.arn, + rule_action_override=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverride( + action_to_use=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUse( + count=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUseCount( + custom_request_handling=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUseCountCustomRequestHandling( + insert_header=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUseCountCustomRequestHandlingInsertHeader( + name="X-Geo-Block-Override", + value="counted" + ) + ] + ) + ] + ) + ] + ) + ], + name="geo-block-rule" + ), Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverride( + action_to_use=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUse( + captcha=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUseCaptcha( + custom_request_handling=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUseCaptchaCustomRequestHandling( + insert_header=[Wafv2WebAclRuleGroupAssociationRuleGroupReferenceRuleActionOverrideActionToUseCaptchaCustomRequestHandlingInsertHeader( + name="X-Rate-Limit-Override", + value="captcha-required" + ) + ] + ) + ] + ) + ] + ) + ], + name="rate-limit-rule" + ) + ] + ) + ], + rule_name="example-rule-group-rule", + web_acl_arn=Token.as_string(aws_wafv2_web_acl_example.arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_rule_group_association_example.override_logical_id("example") +``` + +### Custom Rule Group - CloudFront Web ACL + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_rule_group import Wafv2RuleGroup +from imports.aws.wafv2_web_acl import Wafv2WebAcl +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + cloudfront_example = Wafv2RuleGroup(self, "cloudfront_example", + capacity=10, + name="cloudfront-rule-group", + rule=[Wafv2RuleGroupRule( + action=Wafv2RuleGroupRuleAction( + block=Wafv2RuleGroupRuleActionBlock() + ), + name="rate-limit", + priority=1, + statement={ + "rate_based_statement": { + "aggregate_key_type": "IP", + "limit": 2000 + } + }, + visibility_config=Wafv2RuleGroupRuleVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="rate-limit", + sampled_requests_enabled=True + ) + ) + ], + scope="CLOUDFRONT", + visibility_config=Wafv2RuleGroupVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="cloudfront-rule-group", + sampled_requests_enabled=True + ) + ) + aws_wafv2_web_acl_cloudfront_example = Wafv2WebAcl(self, "cloudfront_example_1", + default_action=Wafv2WebAclDefaultAction( + allow=Wafv2WebAclDefaultActionAllow() + ), + lifecycle=TerraformResourceLifecycle( + ignore_changes=[rule] + ), + name="cloudfront-web-acl", + scope="CLOUDFRONT", + visibility_config=Wafv2WebAclVisibilityConfig( + cloudwatch_metrics_enabled=True, + metric_name="cloudfront-web-acl", + sampled_requests_enabled=True + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_cloudfront_example.override_logical_id("cloudfront_example") + aws_wafv2_web_acl_rule_group_association_cloudfront_example = + Wafv2WebAclRuleGroupAssociation(self, "cloudfront_example_2", + priority=50, + rule_group_reference=[Wafv2WebAclRuleGroupAssociationRuleGroupReference( + arn=cloudfront_example.arn + ) + ], + rule_name="cloudfront-rule-group-rule", + web_acl_arn=Token.as_string(aws_wafv2_web_acl_cloudfront_example.arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_rule_group_association_cloudfront_example.override_logical_id("cloudfront_example") +``` + +## Argument Reference + +The following arguments are required: + +* `rule_name` - (Required) Name of the rule to create in the Web ACL that references the rule group. Must be between 1 and 128 characters. +* `priority` - (Required) Priority of the rule within the Web ACL. Rules are evaluated in order of priority, with lower numbers evaluated first. +* `web_acl_arn` - (Required) ARN of the Web ACL to associate the Rule Group with. + +The following arguments are optional: + +* `managed_rule_group` - (Optional) Managed Rule Group configuration. One of `rule_group_reference` or `managed_rule_group` is required. Conflicts with `rule_group_reference`. [See below](#managed_rule_group). +* `override_action` - (Optional) Override action for the rule group. Valid values are `none` and `count`. Defaults to `none`. When set to `count`, the actions defined in the rule group rules are overridden to count matches instead of blocking or allowing requests. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `rule_group_reference` - (Optional) Custom Rule Group reference configuration. One of `rule_group_reference` or `managed_rule_group` is required. Conflicts with `managed_rule_group`. [See below](#rule_group_reference). + +### rule_group_reference + +* `arn` - (Required) ARN of the Rule Group to associate with the Web ACL. +* `rule_action_override` - (Optional) Override actions for specific rules within the rule group. [See below](#rule_action_override). + +### managed_rule_group + +* `name` - (Required) Name of the managed rule group. +* `vendor_name` - (Required) Name of the managed rule group vendor. For AWS managed rule groups, this is `AWS`. +* `version` - (Optional) Version of the managed rule group. If not specified, the default version is used. +* `rule_action_override` - (Optional) Override actions for specific rules within the rule group. [See below](#rule_action_override). + +### rule_action_override + +* `name` - (Required) Name of the rule to override within the rule group. Verify the name carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group. +* `action_to_use` - (Required) Action to use instead of the rule's original action. [See below](#action_to_use). + +### action_to_use + +Exactly one of the following action blocks must be specified: + +* `allow` - (Optional) Allow the request. [See below](#allow). +* `block` - (Optional) Block the request. [See below](#block). +* `captcha` - (Optional) Require CAPTCHA verification. [See below](#captcha). +* `challenge` - (Optional) Require challenge verification. [See below](#challenge). +* `count` - (Optional) Count the request without taking action. [See below](#count). + +### allow + +* `custom_request_handling` - (Optional) Custom handling for allowed requests. [See below](#custom_request_handling). + +### block + +* `custom_response` - (Optional) Custom response for blocked requests. [See below](#custom_response). + +### captcha + +* `custom_request_handling` - (Optional) Custom handling for CAPTCHA requests. [See below](#custom_request_handling). + +### challenge + +* `custom_request_handling` - (Optional) Custom handling for challenge requests. [See below](#custom_request_handling). + +### count + +* `custom_request_handling` - (Optional) Custom handling for counted requests. [See below](#custom_request_handling). + +### custom_request_handling + +* `insert_header` - (Required) Headers to insert into the request. [See below](#insert_header). + +### custom_response + +* `custom_response_body_key` - (Optional) Key of a custom response body to use. +* `response_code` - (Required) HTTP response code to return (200-599). +* `response_header` - (Optional) Headers to include in the response. [See below](#response_header). + +### insert_header + +* `name` - (Required) Name of the header to insert. +* `value` - (Required) Value of the header to insert. + +### response_header + +* `name` - (Required) Name of the response header. +* `value` - (Required) Value of the response header. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +None. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WAFv2 web ACL custom rule group associations using `WebACLARN,RuleGroupARN,RuleName`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Wafv2WebAclRuleGroupAssociation.generate_config_for_import(self, "example", "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/example-rule-group/87654321-4321-4321-4321-210987654321,example-rule-group-rule") +``` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WAFv2 web ACL managed rule group associations using `WebACLARN,VendorName:RuleGroupName[:Version],RuleName`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.wafv2_web_acl_rule_group_association import Wafv2WebAclRuleGroupAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + Wafv2WebAclRuleGroupAssociation.generate_config_for_import(self, "managedExample", "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,AWS:AWSManagedRulesCommonRuleSet,aws-common-rule-set") +``` + +Using `terraform import`, import WAFv2 web ACL custom rule group associations using `WebACLARN,RuleGroupARN,RuleName`. For example: + +```console +% terraform import aws_wafv2_web_acl_rule_group_association.example "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/example-rule-group/87654321-4321-4321-4321-210987654321,example-rule-group-rule" +``` + +Using `terraform import`, import WAFv2 web ACL managed rule group associations using `WebACLARN,VendorName:RuleGroupName[:Version],RuleName`. For example: + +```console +% terraform import aws_wafv2_web_acl_rule_group_association.managed_example "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,AWS:AWSManagedRulesCommonRuleSet,aws-common-rule-set" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspaces_connection_alias.html.markdown b/website/docs/cdktf/python/r/workspaces_connection_alias.html.markdown index 18ed17822942..e9365629c041 100644 --- a/website/docs/cdktf/python/r/workspaces_connection_alias.html.markdown +++ b/website/docs/cdktf/python/r/workspaces_connection_alias.html.markdown @@ -35,10 +35,11 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connection_string` - (Required) The connection string specified for the connection alias. The connection string must be in the form of a fully qualified domain name (FQDN), such as www.example.com. -* `tags` – (Optional) A map of tags assigned to the WorkSpaces Connection Alias. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) A map of tags assigned to the WorkSpaces Connection Alias. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -82,4 +83,4 @@ Using `terraform import`, import WorkSpaces Connection Alias using the connectio % terraform import aws_workspaces_connection_alias.example rft-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspaces_directory.html.markdown b/website/docs/cdktf/python/r/workspaces_directory.html.markdown index 4477254294a9..b03fc6485b54 100644 --- a/website/docs/cdktf/python/r/workspaces_directory.html.markdown +++ b/website/docs/cdktf/python/r/workspaces_directory.html.markdown @@ -148,14 +148,13 @@ from cdktf import Token, TerraformStack # from imports.aws.workspaces_directory import WorkspacesDirectory class MyConvertedCode(TerraformStack): - def __init__(self, scope, name, *, directoryId): + def __init__(self, scope, name): super().__init__(scope, name) WorkspacesDirectory(self, "example", - active_directory_config=[{ - "domain_name": "example.internal", - "service_account_secret_arn": aws_secretsmanager_secret_example.arn - } - ], + active_directory_config=WorkspacesDirectoryActiveDirectoryConfig( + domain_name="example.internal", + service_account_secret_arn=Token.as_string(aws_secretsmanager_secret_example.arn) + ), saml_properties=WorkspacesDirectorySamlProperties( relay_state_parameter_name="RelayState", status="ENABLED", @@ -180,8 +179,7 @@ class MyConvertedCode(TerraformStack): ), workspace_directory_description="WorkSpaces Pools directory", workspace_directory_name="Pool directory", - workspace_type="POOLS", - directory_id=directory_id + workspace_type="POOLS" ) ``` @@ -215,17 +213,18 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_id` - (Optional) The directory identifier for registration in WorkSpaces service. * `subnet_ids` - (Optional) The identifiers of the subnets where the directory resides. -* `ip_group_ids` – (Optional) The identifiers of the IP access control groups associated with the directory. -* `tags` – (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `ip_group_ids` - (Optional) The identifiers of the IP access control groups associated with the directory. +* `tags` - (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `certificate_based_auth_properties` - (Optional) Configuration of certificate-based authentication (CBA) integration. Requires SAML authentication to be enabled. Defined below. -* `saml_properties` – (Optional) Configuration of SAML authentication integration. Defined below. -* `self_service_permissions` – (Optional) Permissions to enable or disable self-service capabilities when `workspace_type` is set to `PERSONAL`.. Defined below. -* `workspace_access_properties` – (Optional) Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. -* `workspace_creation_properties` – (Optional) Default properties that are used for creating WorkSpaces. Defined below. +* `saml_properties` - (Optional) Configuration of SAML authentication integration. Defined below. +* `self_service_permissions` - (Optional) Permissions to enable or disable self-service capabilities when `workspace_type` is set to `PERSONAL`.. Defined below. +* `workspace_access_properties` - (Optional) Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. +* `workspace_creation_properties` - (Optional) Default properties that are used for creating WorkSpaces. Defined below. * `workspace_type` - (Optional) Specifies the type of WorkSpaces directory. Valid values are `PERSONAL` and `POOLS`. Default is `PERSONAL`. -* `active_directory_config` – (Optional) Configuration for Active Directory integration when `workspace_type` is set to `POOLS`. Defined below. +* `active_directory_config` - (Optional) Configuration for Active Directory integration when `workspace_type` is set to `POOLS`. Defined below. * `workspace_directory_name` - (Required for `POOLS`) The name of the WorkSpaces directory when `workspace_type` is set to `POOLS`. * `workspace_directory_description` - (Required for `POOLS`) The description of the WorkSpaces directory when `workspace_type` is set to `POOLS`. * `user_identity_type` - (Required for `POOLS`) Specifies the user identity type for the WorkSpaces directory. Valid values are `CUSTOMER_MANAGED`, `AWS_DIRECTORY_SERVICE`, `AWS_IAM_IDENTITY_CENTER`. @@ -245,39 +244,39 @@ This resource supports the following arguments: ### self_service_permissions -* `change_compute_type` – (Optional) Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default `false`. -* `increase_volume_size` – (Optional) Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default `false`. -* `rebuild_workspace` – (Optional) Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default `false`. -* `restart_workspace` – (Optional) Whether WorkSpaces directory users can restart their workspace. Default `true`. -* `switch_running_mode` – (Optional) Whether WorkSpaces directory users can switch the running mode of their workspace. Default `false`. +* `change_compute_type` - (Optional) Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default `false`. +* `increase_volume_size` - (Optional) Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default `false`. +* `rebuild_workspace` - (Optional) Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default `false`. +* `restart_workspace` - (Optional) Whether WorkSpaces directory users can restart their workspace. Default `true`. +* `switch_running_mode` - (Optional) Whether WorkSpaces directory users can switch the running mode of their workspace. Default `false`. ### workspace_access_properties -* `device_type_android` – (Optional) Indicates whether users can use Android devices to access their WorkSpaces. -* `device_type_chromeos` – (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. -* `device_type_ios` – (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. -* `device_type_linux` – (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. -* `device_type_osx` – (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. -* `device_type_web` – (Optional) Indicates whether users can access their WorkSpaces through a web browser. -* `device_type_windows` – (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. -* `device_type_zeroclient` – (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. +* `device_type_android` - (Optional) Indicates whether users can use Android devices to access their WorkSpaces. +* `device_type_chromeos` - (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. +* `device_type_ios` - (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. +* `device_type_linux` - (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. +* `device_type_osx` - (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. +* `device_type_web` - (Optional) Indicates whether users can access their WorkSpaces through a web browser. +* `device_type_windows` - (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. +* `device_type_zeroclient` - (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. ### workspace_creation_properties -> **Note:** Once you specified `custom_security_group_id` or `default_ou`, there is no way to delete these attributes. If you cleanup them from the configuration, they still be present in state. -* `custom_security_group_id` – (Optional) The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. -* `default_ou` – (Optional) The default organizational unit (OU) for your WorkSpace directories. Should conform `"OU=,DC=,...,DC="` pattern. -* `enable_internet_access` – (Optional) Indicates whether internet access is enabled for your WorkSpaces. -* `enable_maintenance_mode` – (Optional) Indicates whether maintenance mode is enabled for your WorkSpaces. Valid only if `workspace_type` is set to `PERSONAL`. -* `user_enabled_as_local_administrator` – (Optional) Indicates whether users are local administrators of their WorkSpaces. Valid only if `workspace_type` is set to `PERSONAL`. +* `custom_security_group_id` - (Optional) The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. +* `default_ou` - (Optional) The default organizational unit (OU) for your WorkSpace directories. Should conform `"OU=,DC=,...,DC="` pattern. +* `enable_internet_access` - (Optional) Indicates whether internet access is enabled for your WorkSpaces. +* `enable_maintenance_mode` - (Optional) Indicates whether maintenance mode is enabled for your WorkSpaces. Valid only if `workspace_type` is set to `PERSONAL`. +* `user_enabled_as_local_administrator` - (Optional) Indicates whether users are local administrators of their WorkSpaces. Valid only if `workspace_type` is set to `PERSONAL`. ### active_directory_config -> **Note:** `active_directory_config` is only valid if `workspaces_type` is set to `POOLS`. -* `domain_name` – Fully qualified domain name of the AWS Directory Service directory. -* `service_account_secret_arn` – ARN of the Secrets Manager secret that contains the credentials for the service account. For more information, see [Service Account Details](https://docs.aws.amazon.com/workspaces/latest/adminguide/pools-service-account-details.html). +* `domain_name` - Fully qualified domain name of the AWS Directory Service directory. +* `service_account_secret_arn` - ARN of the Secrets Manager secret that contains the credentials for the service account. For more information, see [Service Account Details](https://docs.aws.amazon.com/workspaces/latest/adminguide/pools-service-account-details.html). ## Attribute Reference @@ -320,4 +319,4 @@ Using `terraform import`, import Workspaces directory using the directory ID. Fo % terraform import aws_workspaces_directory.main d-4444444444 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspaces_ip_group.html.markdown b/website/docs/cdktf/python/r/workspaces_ip_group.html.markdown index 83d2df265424..22331f8ec7cb 100644 --- a/website/docs/cdktf/python/r/workspaces_ip_group.html.markdown +++ b/website/docs/cdktf/python/r/workspaces_ip_group.html.markdown @@ -47,10 +47,11 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the IP group. * `description` - (Optional) The description of the IP group. * `rules` - (Optional) One or more pairs specifying the IP group rule (in CIDR format) from which web requests originate. -* `tags` – (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Nested Blocks @@ -93,4 +94,4 @@ Using `terraform import`, import WorkSpaces IP groups using their GroupID. For e % terraform import aws_workspaces_ip_group.example wsipg-488lrtl3k ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspaces_workspace.html.markdown b/website/docs/cdktf/python/r/workspaces_workspace.html.markdown index 3327a618ff55..dbd2f9259908 100644 --- a/website/docs/cdktf/python/r/workspaces_workspace.html.markdown +++ b/website/docs/cdktf/python/r/workspaces_workspace.html.markdown @@ -60,22 +60,23 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directory_id` - (Required) The ID of the directory for the WorkSpace. * `bundle_id` - (Required) The ID of the bundle for the WorkSpace. -* `user_name` – (Required) The user name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. +* `user_name` - (Required) The user name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. * `root_volume_encryption_enabled` - (Optional) Indicates whether the data stored on the root volume is encrypted. -* `user_volume_encryption_enabled` – (Optional) Indicates whether the data stored on the user volume is encrypted. -* `volume_encryption_key` – (Optional) The ARN of a symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. +* `user_volume_encryption_enabled` - (Optional) Indicates whether the data stored on the user volume is encrypted. +* `volume_encryption_key` - (Optional) The ARN of a symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. * `tags` - (Optional) The tags for the WorkSpace. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `workspace_properties` – (Optional) The WorkSpace properties. +* `workspace_properties` - (Optional) The WorkSpace properties. `workspace_properties` supports the following: -* `compute_type_name` – (Optional) The compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO`, `GRAPHICSPRO`, `GRAPHICS_G4DN`, and `GRAPHICSPRO_G4DN`. -* `root_volume_size_gib` – (Optional) The size of the root volume. -* `running_mode` – (Optional) The running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. -* `running_mode_auto_stop_timeout_in_minutes` – (Optional) The time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. -* `user_volume_size_gib` – (Optional) The size of the user storage. +* `compute_type_name` - (Optional) The compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO`, `GRAPHICSPRO`, `GRAPHICS_G4DN`, and `GRAPHICSPRO_G4DN`. +* `root_volume_size_gib` - (Optional) The size of the root volume. +* `running_mode` - (Optional) The running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. +* `running_mode_auto_stop_timeout_in_minutes` - (Optional) The time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. +* `user_volume_size_gib` - (Optional) The size of the user storage. ## Attribute Reference @@ -120,4 +121,4 @@ Using `terraform import`, import Workspaces using their ID. For example: % terraform import aws_workspaces_workspace.example ws-9z9zmbkhv ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_browser_settings.html.markdown b/website/docs/cdktf/python/r/workspacesweb_browser_settings.html.markdown index fcf50197dfc2..f03696db3da6 100644 --- a/website/docs/cdktf/python/r/workspacesweb_browser_settings.html.markdown +++ b/website/docs/cdktf/python/r/workspacesweb_browser_settings.html.markdown @@ -89,6 +89,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additional_encryption_context` - (Optional) Additional encryption context for the browser settings. * `customer_managed_key` - (Optional) ARN of the customer managed KMS key. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -126,4 +127,4 @@ Using `terraform import`, import WorkSpaces Web Browser Settings using the `brow % terraform import aws_workspacesweb_browser_settings.example arn:aws:workspacesweb:us-west-2:123456789012:browsersettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_browser_settings_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_browser_settings_association.html.markdown new file mode 100644 index 000000000000..fcc48ff95770 --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_browser_settings_association.html.markdown @@ -0,0 +1,97 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_browser_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Browser Settings Association. +--- + + + +# Resource: aws_workspacesweb_browser_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Browser Settings Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_browser_settings import WorkspaceswebBrowserSettings +from imports.aws.workspacesweb_browser_settings_association import WorkspaceswebBrowserSettingsAssociation +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = WorkspaceswebBrowserSettings(self, "example", + browser_policy=Token.as_string( + Fn.jsonencode({ + "chrome_policies": { + "DefaultDownloadDirectory": { + "value": "/home/as2-streaming-user/MyFiles/TemporaryFiles1" + } + } + })) + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + display_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") + aws_workspacesweb_browser_settings_association_example = + WorkspaceswebBrowserSettingsAssociation(self, "example_2", + browser_settings_arn=example.browser_settings_arn, + portal_arn=Token.as_string(aws_workspacesweb_portal_example.portal_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_browser_settings_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `browser_settings_arn` - (Required) ARN of the browser settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the browser settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Browser Settings Association using the `browser_settings_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_browser_settings_association import WorkspaceswebBrowserSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebBrowserSettingsAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/browser_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + +Using `terraform import`, import WorkSpaces Web Browser Settings Association using the `browser_settings_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_browser_settings_association.example arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/browser_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_data_protection_settings.html.markdown b/website/docs/cdktf/python/r/workspacesweb_data_protection_settings.html.markdown index f19a86c2da67..fdec51e388ce 100644 --- a/website/docs/cdktf/python/r/workspacesweb_data_protection_settings.html.markdown +++ b/website/docs/cdktf/python/r/workspacesweb_data_protection_settings.html.markdown @@ -24,7 +24,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebDataProtectionSettings +from imports.aws.workspacesweb_data_protection_settings import WorkspaceswebDataProtectionSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -43,27 +43,27 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebDataProtectionSettings +from imports.aws.workspacesweb_data_protection_settings import WorkspaceswebDataProtectionSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) WorkspaceswebDataProtectionSettings(self, "example", description="Example data protection settings", display_name="example", - inline_redaction_configuration=[{ - "global_confidence_level": 2, - "global_enforced_urls": ["https://example.com"], - "inline_redaction_pattern": [{ - "built_in_pattern_id": "ssn", - "confidence_level": 3, - "redaction_place_holder": [{ - "redaction_place_holder_text": "REDACTED", - "redaction_place_holder_type": "CustomText" - } + inline_redaction_configuration=[WorkspaceswebDataProtectionSettingsInlineRedactionConfiguration( + global_confidence_level=2, + global_enforced_urls=["https://example.com"], + inline_redaction_pattern=[WorkspaceswebDataProtectionSettingsInlineRedactionConfigurationInlineRedactionPattern( + built_in_pattern_id="ssn", + confidence_level=3, + redaction_place_holder=[WorkspaceswebDataProtectionSettingsInlineRedactionConfigurationInlineRedactionPatternRedactionPlaceHolder( + redaction_place_holder_text="REDACTED", + redaction_place_holder_type="CustomText" + ) ] - } + ) ] - } + ) ] ) ``` @@ -133,6 +133,7 @@ The following arguments are optional: * `customer_managed_key` - (Optional) ARN of the customer managed KMS key. * `description` - (Optional) The description of the data protection settings. * `inline_redaction_configuration` - (Optional) The inline redaction configuration of the data protection settings. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### inline_redaction_configuration @@ -183,7 +184,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebDataProtectionSettings +from imports.aws.workspacesweb_data_protection_settings import WorkspaceswebDataProtectionSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -196,4 +197,4 @@ Using `terraform import`, import WorkSpaces Web Data Protection Settings using t % terraform import aws_workspacesweb_data_protection_settings.example arn:aws:workspaces-web:us-west-2:123456789012:dataprotectionsettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_data_protection_settings_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_data_protection_settings_association.html.markdown new file mode 100644 index 000000000000..ca3893e98f2d --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_data_protection_settings_association.html.markdown @@ -0,0 +1,84 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_data_protection_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Data Protection Settings Association. +--- + + + +# Resource: aws_workspacesweb_data_protection_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Data Protection Settings Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_data_protection_settings import WorkspaceswebDataProtectionSettings +from imports.aws.workspacesweb_data_protection_settings_association import WorkspaceswebDataProtectionSettingsAssociation +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = WorkspaceswebDataProtectionSettings(self, "example", + display_name="example" + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + display_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") + aws_workspacesweb_data_protection_settings_association_example = + WorkspaceswebDataProtectionSettingsAssociation(self, "example_2", + data_protection_settings_arn=example.data_protection_settings_arn, + portal_arn=Token.as_string(aws_workspacesweb_portal_example.portal_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_data_protection_settings_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `data_protection_settings_arn` - (Required) ARN of the data protection settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the data protection settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Data Protection Settings Association using the `data_protection_settings_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_data_protection_settings_association import WorkspaceswebDataProtectionSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebDataProtectionSettingsAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:dataProtectionSettings/data_protection_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_identity_provider.html.markdown b/website/docs/cdktf/python/r/workspacesweb_identity_provider.html.markdown new file mode 100644 index 000000000000..519b0331da90 --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_identity_provider.html.markdown @@ -0,0 +1,160 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_identity_provider" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Identity Provider. +--- + + + +# Resource: aws_workspacesweb_identity_provider + +Terraform resource for managing an AWS WorkSpaces Web Identity Provider. + +## Example Usage + +### Basic Usage with SAML + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_identity_provider import WorkspaceswebIdentityProvider +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = WorkspaceswebPortal(self, "example", + display_name="example" + ) + aws_workspacesweb_identity_provider_example = + WorkspaceswebIdentityProvider(self, "example_1", + identity_provider_details={ + "MetadataURL": "https://example.com/metadata" + }, + identity_provider_name="example-saml", + identity_provider_type="SAML", + portal_arn=example.portal_arn + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_identity_provider_example.override_logical_id("example") +``` + +### OIDC Identity Provider + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_identity_provider import WorkspaceswebIdentityProvider +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + test = WorkspaceswebPortal(self, "test", + display_name="test" + ) + aws_workspacesweb_identity_provider_test = + WorkspaceswebIdentityProvider(self, "test_1", + identity_provider_details={ + "attributes_request_method": "POST", + "authorize_scopes": "openid, email", + "client_id": "test-client-id", + "client_secret": "test-client-secret", + "oidc_issuer": "https://accounts.google.com" + }, + identity_provider_name="test-updated", + identity_provider_type="OIDC", + portal_arn=test.portal_arn + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_identity_provider_test.override_logical_id("test") +``` + +## Argument Reference + +The following arguments are required: + +* `identity_provider_details` - (Required) Identity provider details. The following list describes the provider detail keys for each identity provider type: + * For Google and Login with Amazon: + * `client_id` + * `client_secret` + * `authorize_scopes` + * For Facebook: + * `client_id` + * `client_secret` + * `authorize_scopes` + * `api_version` + * For Sign in with Apple: + * `client_id` + * `team_id` + * `key_id` + * `private_key` + * `authorize_scopes` + * For OIDC providers: + * `client_id` + * `client_secret` + * `attributes_request_method` + * `oidc_issuer` + * `authorize_scopes` + * `authorize_url` if not available from discovery URL specified by `oidc_issuer` key + * `token_url` if not available from discovery URL specified by `oidc_issuer` key + * `attributes_url` if not available from discovery URL specified by `oidc_issuer` key + * `jwks_uri` if not available from discovery URL specified by `oidc_issuer` key + * For SAML providers: + * `MetadataFile` OR `MetadataURL` + * `IDPSignout` (boolean) optional + * `IDPInit` (boolean) optional + * `RequestSigningAlgorithm` (string) optional - Only accepts rsa-sha256 + * `EncryptedResponses` (boolean) optional +* `identity_provider_name` - (Required) Identity provider name. +* `identity_provider_type` - (Required) Identity provider type. Valid values: `SAML`, `Facebook`, `Google`, `LoginWithAmazon`, `SignInWithApple`, `OIDC`. +* `portal_arn` - (Required) ARN of the web portal. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `identity_provider_arn` - ARN of the identity provider. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Identity Provider using the `identity_provider_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_identity_provider import WorkspaceswebIdentityProvider +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebIdentityProvider.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:identityprovider/abcdef12345678/12345678-1234-1234-1234-123456789012") +``` + +Using `terraform import`, import WorkSpaces Web Identity Provider using the `identity_provider_arn`. For example: + +```console +% terraform import aws_workspacesweb_identity_provider.example arn:aws:workspaces-web:us-west-2:123456789012:identityprovider/abcdef12345678/12345678-1234-1234-1234-123456789012 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_ip_access_settings.html.markdown b/website/docs/cdktf/python/r/workspacesweb_ip_access_settings.html.markdown index 15c6395509ce..4355d524f77c 100644 --- a/website/docs/cdktf/python/r/workspacesweb_ip_access_settings.html.markdown +++ b/website/docs/cdktf/python/r/workspacesweb_ip_access_settings.html.markdown @@ -24,15 +24,15 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebIpAccessSettings +from imports.aws.workspacesweb_ip_access_settings import WorkspaceswebIpAccessSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) WorkspaceswebIpAccessSettings(self, "example", display_name="example", - ip_rule=[{ - "ip_range": "10.0.0.0/16" - } + ip_rule=[WorkspaceswebIpAccessSettingsIpRule( + ip_range="10.0.0.0/16" + ) ] ) ``` @@ -47,20 +47,20 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebIpAccessSettings +from imports.aws.workspacesweb_ip_access_settings import WorkspaceswebIpAccessSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) WorkspaceswebIpAccessSettings(self, "example", description="Example IP access settings", display_name="example", - ip_rule=[{ - "description": "Main office", - "ip_range": "10.0.0.0/16" - }, { - "description": "Branch office", - "ip_range": "192.168.0.0/24" - } + ip_rule=[WorkspaceswebIpAccessSettingsIpRule( + description="Main office", + ip_range="10.0.0.0/16" + ), WorkspaceswebIpAccessSettingsIpRule( + description="Branch office", + ip_range="192.168.0.0/24" + ) ] ) ``` @@ -75,8 +75,8 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebIpAccessSettings from imports.aws.kms_key import KmsKey +from imports.aws.workspacesweb_ip_access_settings import WorkspaceswebIpAccessSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -86,20 +86,19 @@ class MyConvertedCode(TerraformStack): ) aws_workspacesweb_ip_access_settings_example = WorkspaceswebIpAccessSettings(self, "example_1", - additional_encryption_context=[{ + additional_encryption_context={ "Environment": "Production" - } - ], + }, customer_managed_key=example.arn, description="Example IP access settings", display_name="example", - ip_rule=[{ - "description": "Main office", - "ip_range": "10.0.0.0/16" - }, { - "description": "Branch office", - "ip_range": "192.168.0.0/24" - } + ip_rule=[WorkspaceswebIpAccessSettingsIpRule( + description="Main office", + ip_range="10.0.0.0/16" + ), WorkspaceswebIpAccessSettingsIpRule( + description="Branch office", + ip_range="192.168.0.0/24" + ) ], tags={ "Name": "example-ip-access-settings" @@ -121,6 +120,7 @@ The following arguments are optional: * `additional_encryption_context` - (Optional) Additional encryption context for the IP access settings. * `customer_managed_key` - (Optional) ARN of the customer managed KMS key. * `description` - (Optional) The description of the IP access settings. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### IP Rules @@ -148,7 +148,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebIpAccessSettings +from imports.aws.workspacesweb_ip_access_settings import WorkspaceswebIpAccessSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -161,4 +161,4 @@ Using `terraform import`, import WorkSpaces Web IP Access Settings using the `ip % terraform import aws_workspacesweb_ip_access_settings.example arn:aws:workspaces-web:us-west-2:123456789012:ipAccessSettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_ip_access_settings_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_ip_access_settings_association.html.markdown new file mode 100644 index 000000000000..aa5f06335720 --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_ip_access_settings_association.html.markdown @@ -0,0 +1,88 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_ip_access_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web IP Access Settings Association. +--- + + + +# Resource: aws_workspacesweb_ip_access_settings_association + +Terraform resource for managing an AWS WorkSpaces Web IP Access Settings Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_ip_access_settings import WorkspaceswebIpAccessSettings +from imports.aws.workspacesweb_ip_access_settings_association import WorkspaceswebIpAccessSettingsAssociation +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = WorkspaceswebIpAccessSettings(self, "example", + display_name="example", + ip_rule=[WorkspaceswebIpAccessSettingsIpRule( + ip_range="10.0.0.0/16" + ) + ] + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + display_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") + aws_workspacesweb_ip_access_settings_association_example = + WorkspaceswebIpAccessSettingsAssociation(self, "example_2", + ip_access_settings_arn=example.ip_access_settings_arn, + portal_arn=Token.as_string(aws_workspacesweb_portal_example.portal_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_ip_access_settings_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `ip_access_settings_arn` - (Required) ARN of the IP access settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the IP access settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web IP Access Settings Association using the `ip_access_settings_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_ip_access_settings_association import WorkspaceswebIpAccessSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebIpAccessSettingsAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:ipAccessSettings/ip_access_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_network_settings.html.markdown b/website/docs/cdktf/python/r/workspacesweb_network_settings.html.markdown index c0147949db49..1089cb51b041 100644 --- a/website/docs/cdktf/python/r/workspacesweb_network_settings.html.markdown +++ b/website/docs/cdktf/python/r/workspacesweb_network_settings.html.markdown @@ -85,6 +85,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -120,4 +121,4 @@ Using `terraform import`, import WorkSpaces Web Network Settings using the `netw % terraform import aws_workspacesweb_network_settings.example arn:aws:workspacesweb:us-west-2:123456789012:networksettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_network_settings_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_network_settings_association.html.markdown new file mode 100644 index 000000000000..4404a406a105 --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_network_settings_association.html.markdown @@ -0,0 +1,147 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_network_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Network Settings Association. +--- + + + +# Resource: aws_workspacesweb_network_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Network Settings Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformCount, Fn, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_availability_zones import DataAwsAvailabilityZones +from imports.aws.security_group import SecurityGroup +from imports.aws.subnet import Subnet +from imports.aws.vpc import Vpc +from imports.aws.workspacesweb_network_settings import WorkspaceswebNetworkSettings +from imports.aws.workspacesweb_network_settings_association import WorkspaceswebNetworkSettingsAssociation +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = Vpc(self, "example", + cidr_block="10.0.0.0/16", + tags={ + "Name": "example" + } + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + display_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") + available = DataAwsAvailabilityZones(self, "available", + filter=[DataAwsAvailabilityZonesFilter( + name="opt-in-status", + values=["opt-in-not-required"] + ) + ], + state="available" + ) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + example_count = TerraformCount.of(Token.as_number("2")) + aws_security_group_example = SecurityGroup(self, "example_3", + name="example-${" + example_count.index + "}", + tags={ + "Name": "example" + }, + vpc_id=example.id, + count=example_count + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_security_group_example.override_logical_id("example") + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + aws_subnet_example_count = TerraformCount.of(Token.as_number("2")) + aws_subnet_example = Subnet(self, "example_4", + availability_zone=Token.as_string( + Fn.lookup_nested(available.names, [aws_subnet_example_count.index])), + cidr_block=Token.as_string( + Fn.cidrsubnet(example.cidr_block, 8, + Token.as_number(aws_subnet_example_count.index))), + tags={ + "Name": "example" + }, + vpc_id=example.id, + count=aws_subnet_example_count + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_subnet_example.override_logical_id("example") + aws_workspacesweb_network_settings_example = + WorkspaceswebNetworkSettings(self, "example_5", + security_group_ids=[ + Token.as_string(Fn.lookup_nested(aws_security_group_example, ["0", "id"])), + Token.as_string(Fn.lookup_nested(aws_security_group_example, ["1", "id"])) + ], + subnet_ids=[ + Token.as_string(Fn.lookup_nested(aws_subnet_example, ["0", "id"])), + Token.as_string(Fn.lookup_nested(aws_subnet_example, ["1", "id"])) + ], + vpc_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_network_settings_example.override_logical_id("example") + aws_workspacesweb_network_settings_association_example = + WorkspaceswebNetworkSettingsAssociation(self, "example_6", + network_settings_arn=Token.as_string(aws_workspacesweb_network_settings_example.network_settings_arn), + portal_arn=Token.as_string(aws_workspacesweb_portal_example.portal_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_network_settings_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `network_settings_arn` - (Required) ARN of the network settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the network settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Network Settings Association using the `network_settings_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_network_settings_association import WorkspaceswebNetworkSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebNetworkSettingsAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:networkSettings/network_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_portal.html.markdown b/website/docs/cdktf/python/r/workspacesweb_portal.html.markdown new file mode 100644 index 000000000000..a2f54169625a --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_portal.html.markdown @@ -0,0 +1,146 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_portal" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Portal. +--- + + + +# Resource: aws_workspacesweb_portal + +Terraform resource for managing an AWS WorkSpaces Web Portal. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebPortal(self, "example", + display_name="example-portal", + instance_type="standard.regular" + ) +``` + +### Complete Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.kms_key import KmsKey +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = KmsKey(self, "example", + deletion_window_in_days=7, + description="KMS key for WorkSpaces Web Portal" + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + additional_encryption_context={ + "Environment": "Production" + }, + authentication_type="IAM_Identity_Center", + customer_managed_key=example.arn, + display_name="example-portal", + instance_type="standard.large", + max_concurrent_sessions=10, + tags={ + "Name": "example-portal" + }, + timeouts=[{ + "create": "10m", + "delete": "10m", + "update": "10m" + } + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are optional: + +* `additional_encryption_context` - (Optional) Additional encryption context for the customer managed key. Forces replacement if changed. +* `authentication_type` - (Optional) Authentication type for the portal. Valid values: `Standard`, `IAM_Identity_Center`. +* `browser_settings_arn` - (Optional) ARN of the browser settings to use for the portal. +* `customer_managed_key` - (Optional) ARN of the customer managed key. Forces replacement if changed. +* `display_name` - (Optional) Display name of the portal. +* `instance_type` - (Optional) Instance type for the portal. Valid values: `standard.regular`, `standard.large`. +* `max_concurrent_sessions` - (Optional) Maximum number of concurrent sessions for the portal. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `browser_type` - Browser type of the portal. +* `creation_date` - Creation date of the portal. +* `data_protection_settings_arn` - ARN of the data protection settings associated with the portal. +* `ip_access_settings_arn` - ARN of the IP access settings associated with the portal. +* `network_settings_arn` - ARN of the network settings associated with the portal. +* `portal_arn` - ARN of the portal. +* `portal_endpoint` - Endpoint URL of the portal. +* `portal_status` - Status of the portal. +* `renderer_type` - Renderer type of the portal. +* `session_logger_arn` - ARN of the session logger associated with the portal. +* `status_reason` - Reason for the current status of the portal. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `trust_store_arn` - ARN of the trust store associated with the portal. +* `user_access_logging_settings_arn` - ARN of the user access logging settings associated with the portal. +* `user_settings_arn` - ARN of the user settings associated with the portal. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Portal using the `portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebPortal.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:portal/abcdef12345678") +``` + +Using `terraform import`, import WorkSpaces Web Portal using the `portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_portal.example arn:aws:workspaces-web:us-west-2:123456789012:portal/abcdef12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_session_logger.html.markdown b/website/docs/cdktf/python/r/workspacesweb_session_logger.html.markdown new file mode 100644 index 000000000000..753347c6b90f --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_session_logger.html.markdown @@ -0,0 +1,257 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_session_logger" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Session Logger. +--- + + + +# Resource: aws_workspacesweb_session_logger + +Terraform resource for managing an AWS WorkSpaces Web Session Logger. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_policy import S3BucketPolicy +from imports.aws.workspacesweb_session_logger import WorkspaceswebSessionLogger +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = S3Bucket(self, "example", + bucket="example-session-logs" + ) + data_aws_iam_policy_document_example = DataAwsIamPolicyDocument(self, "example_1", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:PutObject"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["workspaces-web.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + example.arn + "}/*"] + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_iam_policy_document_example.override_logical_id("example") + aws_s3_bucket_policy_example = S3BucketPolicy(self, "example_2", + bucket=example.id, + policy=Token.as_string(data_aws_iam_policy_document_example.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_example.override_logical_id("example") + aws_workspacesweb_session_logger_example = WorkspaceswebSessionLogger(self, "example_3", + depends_on=[aws_s3_bucket_policy_example], + display_name="example-session-logger", + event_filter=[WorkspaceswebSessionLoggerEventFilter( + all=[WorkspaceswebSessionLoggerEventFilterAll()] + ) + ], + log_configuration=[WorkspaceswebSessionLoggerLogConfiguration( + s3=[WorkspaceswebSessionLoggerLogConfigurationS3( + bucket=example.id, + folder_structure="Flat", + log_file_format="Json" + ) + ] + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_session_logger_example.override_logical_id("example") +``` + +### Complete Configuration with KMS Encryption + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_caller_identity import DataAwsCallerIdentity +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.data_aws_partition import DataAwsPartition +from imports.aws.kms_key import KmsKey +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_policy import S3BucketPolicy +from imports.aws.workspacesweb_session_logger import WorkspaceswebSessionLogger +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = S3Bucket(self, "example", + bucket="example-session-logs", + force_destroy=True + ) + current = DataAwsCallerIdentity(self, "current") + data_aws_iam_policy_document_example = DataAwsIamPolicyDocument(self, "example_2", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:PutObject"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["workspaces-web.amazonaws.com"], + type="Service" + ) + ], + resources=[example.arn, "${" + example.arn + "}/*"] + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_iam_policy_document_example.override_logical_id("example") + data_aws_partition_current = DataAwsPartition(self, "current_3") + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_partition_current.override_logical_id("current") + aws_s3_bucket_policy_example = S3BucketPolicy(self, "example_4", + bucket=example.id, + policy=Token.as_string(data_aws_iam_policy_document_example.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_example.override_logical_id("example") + kms_key_policy = DataAwsIamPolicyDocument(self, "kms_key_policy", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["kms:*"], + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["arn:${" + data_aws_partition_current.partition + "}:iam::${" + current.account_id + "}:root" + ], + type="AWS" + ) + ], + resources=["*"] + ), DataAwsIamPolicyDocumentStatement( + actions=["kms:Encrypt", "kms:GenerateDataKey*", "kms:ReEncrypt*", "kms:Decrypt" + ], + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["workspaces-web.amazonaws.com"], + type="Service" + ) + ], + resources=["*"] + ) + ] + ) + aws_kms_key_example = KmsKey(self, "example_6", + description="KMS key for WorkSpaces Web Session Logger", + policy=Token.as_string(kms_key_policy.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_kms_key_example.override_logical_id("example") + aws_workspacesweb_session_logger_example = WorkspaceswebSessionLogger(self, "example_7", + additional_encryption_context={ + "Application": "WorkSpacesWeb", + "Environment": "Production" + }, + customer_managed_key=Token.as_string(aws_kms_key_example.arn), + depends_on=[aws_s3_bucket_policy_example, aws_kms_key_example], + display_name="example-session-logger", + event_filter=[WorkspaceswebSessionLoggerEventFilter( + include=["SessionStart", "SessionEnd"] + ) + ], + log_configuration=[WorkspaceswebSessionLoggerLogConfiguration( + s3=[WorkspaceswebSessionLoggerLogConfigurationS3( + bucket=example.id, + bucket_owner=Token.as_string(current.account_id), + folder_structure="NestedByDate", + key_prefix="workspaces-web-logs/", + log_file_format="JsonLines" + ) + ] + ) + ], + tags={ + "Environment": "Production", + "Name": "example-session-logger" + } + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_session_logger_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `event_filter` - (Required) Event filter that determines which events are logged. See [Event Filter](#event-filter) below. +* `log_configuration` - (Required) Configuration block for specifying where logs are delivered. See [Log Configuration](#log-configuration) below. + +The following arguments are optional: + +* `additional_encryption_context` - (Optional) Map of additional encryption context key-value pairs. +* `customer_managed_key` - (Optional) ARN of the customer managed KMS key used to encrypt sensitive information. +* `display_name` - (Optional) Human-readable display name for the session logger resource. Forces replacement if changed. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Log Configuration + +* `s3` - (Required) Configuration block for S3 log delivery. See [S3 Configuration](#s3-configuration) below. + +### Event Filter + +Exactly one of the following must be specified: + +* `all` - (Optional) Block that specifies to monitor all events. Set to `{}` to monitor all events. +* `include` - (Optional) List of specific events to monitor. Valid values include session events like `SessionStart`, `SessionEnd`, etc. + +### S3 Configuration + +* `bucket` - (Required) S3 bucket name where logs are delivered. +* `folder_structure` - (Required) Folder structure that defines the organizational structure for log files in S3. Valid values: `FlatStructure`, `DateBasedStructure`. +* `log_file_format` - (Required) Format of the log file written to S3. Valid values: `Json`, `Parquet`. +* `bucket_owner` - (Optional) Expected bucket owner of the target S3 bucket. +* `key_prefix` - (Optional) S3 path prefix that determines where log files are stored. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `associated_portal_arns` - List of ARNs of the web portals associated with the session logger. +* `session_logger_arn` - ARN of the session logger. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +~> **Note:** The `additional_encryption_context` and `customer_managed_key` attributes are computed when not specified and will be populated with values from the AWS API response. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Session Logger using the `session_logger_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_session_logger import WorkspaceswebSessionLogger +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebSessionLogger.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678") +``` + +Using `terraform import`, import WorkSpaces Web Session Logger using the `session_logger_arn`. For example: + +```console +% terraform import aws_workspacesweb_session_logger.example arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_session_logger_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_session_logger_association.html.markdown new file mode 100644 index 000000000000..0d2b6467f87f --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_session_logger_association.html.markdown @@ -0,0 +1,134 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_session_logger_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Session Logger Association. +--- + + + +# Resource: aws_workspacesweb_session_logger_association + +Terraform resource for managing an AWS WorkSpaces Web Session Logger Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_iam_policy_document import DataAwsIamPolicyDocument +from imports.aws.s3_bucket import S3Bucket +from imports.aws.s3_bucket_policy import S3BucketPolicy +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +from imports.aws.workspacesweb_session_logger import WorkspaceswebSessionLogger +from imports.aws.workspacesweb_session_logger_association import WorkspaceswebSessionLoggerAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = S3Bucket(self, "example", + bucket="example-session-logs", + force_destroy=True + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + display_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") + data_aws_iam_policy_document_example = DataAwsIamPolicyDocument(self, "example_2", + statement=[DataAwsIamPolicyDocumentStatement( + actions=["s3:PutObject"], + effect="Allow", + principals=[DataAwsIamPolicyDocumentStatementPrincipals( + identifiers=["workspaces-web.amazonaws.com"], + type="Service" + ) + ], + resources=["${" + example.arn + "}/*"] + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_iam_policy_document_example.override_logical_id("example") + aws_s3_bucket_policy_example = S3BucketPolicy(self, "example_3", + bucket=example.id, + policy=Token.as_string(data_aws_iam_policy_document_example.json) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_s3_bucket_policy_example.override_logical_id("example") + aws_workspacesweb_session_logger_example = WorkspaceswebSessionLogger(self, "example_4", + depends_on=[aws_s3_bucket_policy_example], + display_name="example", + event_filter=[WorkspaceswebSessionLoggerEventFilter( + all=[WorkspaceswebSessionLoggerEventFilterAll()] + ) + ], + log_configuration=[WorkspaceswebSessionLoggerLogConfiguration( + s3=[WorkspaceswebSessionLoggerLogConfigurationS3( + bucket=example.id, + folder_structure="Flat", + log_file_format="Json" + ) + ] + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_session_logger_example.override_logical_id("example") + aws_workspacesweb_session_logger_association_example = + WorkspaceswebSessionLoggerAssociation(self, "example_5", + portal_arn=Token.as_string(aws_workspacesweb_portal_example.portal_arn), + session_logger_arn=Token.as_string(aws_workspacesweb_session_logger_example.session_logger_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_session_logger_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `portal_arn` - (Required) ARN of the web portal. +* `session_logger_arn` - (Required) ARN of the session logger. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Session Logger Association using the `session_logger_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_session_logger_association import WorkspaceswebSessionLoggerAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebSessionLoggerAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + +Using `terraform import`, import WorkSpaces Web Session Logger Association using the `session_logger_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_session_logger_association.example arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_trust_store.html.markdown b/website/docs/cdktf/python/r/workspacesweb_trust_store.html.markdown new file mode 100644 index 000000000000..657699ad107f --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_trust_store.html.markdown @@ -0,0 +1,119 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_trust_store" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Trust Store. +--- + + + +# Resource: aws_workspacesweb_trust_store + +Terraform resource for managing an AWS WorkSpaces Web Trust Store. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_trust_store import WorkspaceswebTrustStore +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebTrustStore(self, "example", + certificate=[WorkspaceswebTrustStoreCertificate( + body=Token.as_string(Fn.file("certificate.pem")) + ) + ] + ) +``` + +### Multiple Certificates + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_trust_store import WorkspaceswebTrustStore +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebTrustStore(self, "example", + certificate=[WorkspaceswebTrustStoreCertificate( + body=Token.as_string(Fn.file("certificate1.pem")) + ), WorkspaceswebTrustStoreCertificate( + body=Token.as_string(Fn.file("certificate2.pem")) + ) + ], + tags={ + "Name": "example-trust-store" + } + ) +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `certificate` - (Optional) Set of certificates to include in the trust store. See [Certificate](#certificate) below. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Certificate + +* `body` - (Required) Certificate body in PEM format. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `associated_portal_arns` - List of ARNs of the web portals associated with the trust store. +* `trust_store_arn` - ARN of the trust store. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +The `certificate` block exports the following additional attributes: + +* `issuer` - Certificate issuer. +* `not_valid_after` - Date and time when the certificate expires in RFC3339 format. +* `not_valid_before` - Date and time when the certificate becomes valid in RFC3339 format. +* `subject` - Certificate subject. +* `thumbprint` - Certificate thumbprint. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Trust Store using the `trust_store_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_trust_store import WorkspaceswebTrustStore +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebTrustStore.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678") +``` + +Using `terraform import`, import WorkSpaces Web Trust Store using the `trust_store_arn`. For example: + +```console +% terraform import aws_workspacesweb_trust_store.example arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_trust_store_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_trust_store_association.html.markdown new file mode 100644 index 000000000000..202aaea43b42 --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_trust_store_association.html.markdown @@ -0,0 +1,92 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_trust_store_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Trust Store Association. +--- + + + +# Resource: aws_workspacesweb_trust_store_association + +Terraform resource for managing an AWS WorkSpaces Web Trust Store Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +from imports.aws.workspacesweb_trust_store import WorkspaceswebTrustStore +from imports.aws.workspacesweb_trust_store_association import WorkspaceswebTrustStoreAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = WorkspaceswebPortal(self, "example", + display_name="example" + ) + aws_workspacesweb_trust_store_example = WorkspaceswebTrustStore(self, "example_1", + certificate_list=[ + Fn.base64encode(Token.as_string(Fn.file("certificate.pem"))) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_trust_store_example.override_logical_id("example") + aws_workspacesweb_trust_store_association_example = + WorkspaceswebTrustStoreAssociation(self, "example_2", + portal_arn=example.portal_arn, + trust_store_arn=Token.as_string(aws_workspacesweb_trust_store_example.trust_store_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_trust_store_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `trust_store_arn` - (Required) ARN of the trust store to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the trust store. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Trust Store Association using the `trust_store_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_trust_store_association import WorkspaceswebTrustStoreAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebTrustStoreAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + +Using `terraform import`, import WorkSpaces Web Trust Store Association using the `trust_store_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_trust_store_association.example arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings.html.markdown b/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings.html.markdown index faefa9b5fa91..8b85630b5579 100644 --- a/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings.html.markdown +++ b/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings.html.markdown @@ -24,8 +24,8 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebUserAccessLoggingSettings from imports.aws.kinesis_stream import KinesisStream +from imports.aws.workspacesweb_user_access_logging_settings import WorkspaceswebUserAccessLoggingSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -51,8 +51,8 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebUserAccessLoggingSettings from imports.aws.kinesis_stream import KinesisStream +from imports.aws.workspacesweb_user_access_logging_settings import WorkspaceswebUserAccessLoggingSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -80,6 +80,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -102,7 +103,7 @@ from cdktf import TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import WorkspaceswebUserAccessLoggingSettings +from imports.aws.workspacesweb_user_access_logging_settings import WorkspaceswebUserAccessLoggingSettings class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) @@ -115,4 +116,4 @@ Using `terraform import`, import WorkSpaces Web User Access Logging Settings usi % terraform import aws_workspacesweb_user_access_logging_settings.example arn:aws:workspaces-web:us-west-2:123456789012:userAccessLoggingSettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings_association.html.markdown new file mode 100644 index 000000000000..162c1c7c1d82 --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_user_access_logging_settings_association.html.markdown @@ -0,0 +1,92 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_user_access_logging_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web User Access Logging Settings Association. +--- + + + +# Resource: aws_workspacesweb_user_access_logging_settings_association + +Terraform resource for managing an AWS WorkSpaces Web User Access Logging Settings Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.kinesis_stream import KinesisStream +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +from imports.aws.workspacesweb_user_access_logging_settings import WorkspaceswebUserAccessLoggingSettings +from imports.aws.workspacesweb_user_access_logging_settings_association import WorkspaceswebUserAccessLoggingSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = KinesisStream(self, "example", + name="amazon-workspaces-web-example", + shard_count=1 + ) + aws_workspacesweb_portal_example = WorkspaceswebPortal(self, "example_1", + display_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_portal_example.override_logical_id("example") + aws_workspacesweb_user_access_logging_settings_example = + WorkspaceswebUserAccessLoggingSettings(self, "example_2", + kinesis_stream_arn=example.arn + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_user_access_logging_settings_example.override_logical_id("example") + aws_workspacesweb_user_access_logging_settings_association_example = + WorkspaceswebUserAccessLoggingSettingsAssociation(self, "example_3", + portal_arn=Token.as_string(aws_workspacesweb_portal_example.portal_arn), + user_access_logging_settings_arn=Token.as_string(aws_workspacesweb_user_access_logging_settings_example.user_access_logging_settings_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_user_access_logging_settings_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `user_access_logging_settings_arn` - (Required) ARN of the user access logging settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the user access logging settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Access Logging Settings Association using the `user_access_logging_settings_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_user_access_logging_settings_association import WorkspaceswebUserAccessLoggingSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebUserAccessLoggingSettingsAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:userAccessLoggingSettings/user_access_logging_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_user_settings.html.markdown b/website/docs/cdktf/python/r/workspacesweb_user_settings.html.markdown index 455c5b1c77b2..5a8d23b2feb3 100644 --- a/website/docs/cdktf/python/r/workspacesweb_user_settings.html.markdown +++ b/website/docs/cdktf/python/r/workspacesweb_user_settings.html.markdown @@ -137,6 +137,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additional_encryption_context` - (Optional) Additional encryption context for the user settings. * `associated_portal_arns` - (Optional) List of web portal ARNs to associate with the user settings. * `cookie_synchronization_configuration` - (Optional) Configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser. Detailed below. @@ -197,4 +198,4 @@ Using `terraform import`, import WorkSpaces Web User Settings using the `user_se % terraform import aws_workspacesweb_user_settings.example arn:aws:workspacesweb:us-west-2:123456789012:usersettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/workspacesweb_user_settings_association.html.markdown b/website/docs/cdktf/python/r/workspacesweb_user_settings_association.html.markdown new file mode 100644 index 000000000000..45d78838da1e --- /dev/null +++ b/website/docs/cdktf/python/r/workspacesweb_user_settings_association.html.markdown @@ -0,0 +1,88 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_user_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web User Settings Association. +--- + + + +# Resource: aws_workspacesweb_user_settings_association + +Terraform resource for managing an AWS WorkSpaces Web User Settings Association. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_portal import WorkspaceswebPortal +from imports.aws.workspacesweb_user_settings import WorkspaceswebUserSettings +from imports.aws.workspacesweb_user_settings_association import WorkspaceswebUserSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = WorkspaceswebPortal(self, "example", + display_name="example" + ) + aws_workspacesweb_user_settings_example = WorkspaceswebUserSettings(self, "example_1", + copy_allowed="Enabled", + download_allowed="Enabled", + paste_allowed="Enabled", + print_allowed="Enabled", + upload_allowed="Enabled" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_user_settings_example.override_logical_id("example") + aws_workspacesweb_user_settings_association_example = + WorkspaceswebUserSettingsAssociation(self, "example_2", + portal_arn=example.portal_arn, + user_settings_arn=Token.as_string(aws_workspacesweb_user_settings_example.user_settings_arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_workspacesweb_user_settings_association_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `user_settings_arn` - (Required) ARN of the user settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the user settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Settings Association using the `user_settings_arn,portal_arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.workspacesweb_user_settings_association import WorkspaceswebUserSettingsAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + WorkspaceswebUserSettingsAssociation.generate_config_for_import(self, "example", "arn:aws:workspaces-web:us-west-2:123456789012:userSettings/user_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678") +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/xray_encryption_config.html.markdown b/website/docs/cdktf/python/r/xray_encryption_config.html.markdown index 7e12414d4e29..722c12969f0b 100644 --- a/website/docs/cdktf/python/r/xray_encryption_config.html.markdown +++ b/website/docs/cdktf/python/r/xray_encryption_config.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `type` - (Required) The type of encryption. Set to `KMS` to use your own key for encryption. Set to `NONE` for default encryption. * `key_id` - (Optional) An AWS KMS customer master key (CMK) ARN. @@ -118,4 +119,4 @@ Using `terraform import`, import XRay Encryption Config using the region name. F % terraform import aws_xray_encryption_config.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/xray_group.html.markdown b/website/docs/cdktf/python/r/xray_group.html.markdown index 74ab436ec7b6..3f80d752542e 100644 --- a/website/docs/cdktf/python/r/xray_group.html.markdown +++ b/website/docs/cdktf/python/r/xray_group.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `group_name` - (Required) The name of the group. * `filter_expression` - (Required) The filter expression defining criteria by which to group traces. more info can be found in official [docs](https://docs.aws.amazon.com/xray/latest/devguide/xray-console-filters.html). * `insights_configuration` - (Optional) Configuration options for enabling insights. @@ -62,6 +63,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_xray_group.example + identity = { + "arn" = "arn:aws:xray:us-west-2:123456789012:group/example-group/AFAEAFE" + } +} + +resource "aws_xray_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the X-Ray group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import XRay Groups using the ARN. For example: ```python @@ -85,4 +107,4 @@ Using `terraform import`, import XRay Groups using the ARN. For example: % terraform import aws_xray_group.example arn:aws:xray:us-west-2:1234567890:group/example-group/TNGX7SW5U6QY36T4ZMOUA3HVLBYCZTWDIOOXY3CJAXTHSS3YCWUA ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/xray_resource_policy.html.markdown b/website/docs/cdktf/python/r/xray_resource_policy.html.markdown index be92d1fe49d3..01550e785eb2 100644 --- a/website/docs/cdktf/python/r/xray_resource_policy.html.markdown +++ b/website/docs/cdktf/python/r/xray_resource_policy.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy_revision_id` - (Optional) Specifies a specific policy revision, to ensure an atomic create operation. By default the resource policy is created if it does not exist, or updated with an incremented revision id. The revision id is unique to each policy in the account. If the policy revision id does not match the latest revision id, the operation will fail with an InvalidPolicyRevisionIdException exception. You can also provide a PolicyRevisionId of 0. In this case, the operation will fail with an InvalidPolicyRevisionIdException exception if a resource policy with the same name already exists. * `bypass_policy_lockout_check` - (Optional) Flag to indicate whether to bypass the resource policy lockout safety check. Setting this value to true increases the risk that the policy becomes unmanageable. Do not set this value to true indiscriminately. Use this parameter only when you include a policy in the request and you intend to prevent the principal that is making the request from making a subsequent PutResourcePolicy request. The default value is `false`. @@ -79,4 +80,4 @@ Using `terraform import`, import X-Ray Resource Policy using the `policy_name`. % terraform import aws_xray_resource_policy.example resource_policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/xray_sampling_rule.html.markdown b/website/docs/cdktf/python/r/xray_sampling_rule.html.markdown index 6f534b7cfe52..3a3a82f3d0b2 100644 --- a/website/docs/cdktf/python/r/xray_sampling_rule.html.markdown +++ b/website/docs/cdktf/python/r/xray_sampling_rule.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rule_name` - (Required) The name of the sampling rule. * `resource_arn` - (Required) Matches the ARN of the AWS resource on which the service runs. * `priority` - (Required) The priority of the sampling rule. @@ -95,4 +96,4 @@ Using `terraform import`, import XRay Sampling Rules using the name. For example % terraform import aws_xray_sampling_rule.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/cloudfront_create_invalidation.html.markdown b/website/docs/cdktf/typescript/actions/cloudfront_create_invalidation.html.markdown new file mode 100644 index 000000000000..f30ee7058b0f --- /dev/null +++ b/website/docs/cdktf/typescript/actions/cloudfront_create_invalidation.html.markdown @@ -0,0 +1,148 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_create_invalidation" +description: |- + Invalidates CloudFront distribution cache for specified paths. +--- + + + +# Action: aws_cloudfront_create_invalidation + +~> **Note:** `aws_cloudfront_create_invalidation` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invalidates CloudFront distribution cache for specified paths. This action creates an invalidation request and waits for it to complete. + +For information about CloudFront cache invalidation, see the [Amazon CloudFront Developer Guide](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html). For specific information about creating invalidation requests, see the [CreateInvalidation](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CreateInvalidation.html) page in the Amazon CloudFront API Reference. + +~> **Note:** CloudFront invalidation requests can take several minutes to complete. This action will wait for the invalidation to finish before continuing. You can only have a limited number of invalidation requests in progress at any given time. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudfrontDistribution } from "./.gen/providers/aws/cloudfront-distribution"; +interface MyConfig { + defaultCacheBehavior: any; + enabled: any; + origin: any; + restrictions: any; + viewerCertificate: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + new CloudfrontDistribution(this, "example", { + defaultCacheBehavior: config.defaultCacheBehavior, + enabled: config.enabled, + origin: config.origin, + restrictions: config.restrictions, + viewerCertificate: config.viewerCertificate, + }); + const terraformDataExample = new DataResource(this, "example_1", { + input: "trigger-invalidation", + lifecycle: { + actionTrigger: [ + { + actions: [awsCloudfrontCreateInvalidation.example], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + terraformDataExample.overrideLogicalId("example"); + } +} + +``` + +### Invalidate Specific Paths + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### With Custom Caller Reference + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to invalidate cache after updating static assets: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataResource(this, "deploy_complete", { + dependsOn: [assets], + input: deploymentId, + lifecycle: { + actionTrigger: [ + { + actions: [awsCloudfrontCreateInvalidation.postDeploy], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + } +} + +``` + +### Environment-Specific Invalidation + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This action supports the following arguments: + +* `distributionId` - (Required) ID of the CloudFront distribution to invalidate cache for. Must be a valid CloudFront distribution ID (e.g., E1GHKQ2EXAMPLE). +* `paths` - (Required) List of file paths or patterns to invalidate. Use `/*` to invalidate all files. Supports specific files (`/index.html`), directory wildcards (`/images/*`), or all files (`/*`). Maximum of 3000 paths per invalidation request. Note: The first 1,000 invalidation paths per month are free, additional paths are charged per path. +* `callerReference` - (Optional) Unique identifier for the invalidation request. If not provided, one will be generated automatically. Maximum length of 128 characters. +* `timeout` - (Optional) Timeout in seconds to wait for the invalidation to complete. Defaults to 900 seconds (15 minutes). Must be between 60 and 3600 seconds. Invalidation requests typically take 5-15 minutes to process. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/codebuild_start_build.html.markdown b/website/docs/cdktf/typescript/actions/codebuild_start_build.html.markdown new file mode 100644 index 000000000000..72b75d7ff23c --- /dev/null +++ b/website/docs/cdktf/typescript/actions/codebuild_start_build.html.markdown @@ -0,0 +1,100 @@ +--- +subcategory: "CodeBuild" +layout: "aws" +page_title: "AWS: aws_codebuild_start_build" +description: |- + Starts a CodeBuild project build. +--- + + + +# Action: aws_codebuild_start_build + +~> **Note:** `aws_codebuild_start_build` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts a CodeBuild project build. This action will initiate a build and wait for it to complete, providing progress updates during execution. + +For information about AWS CodeBuild, see the [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/). For specific information about starting builds, see the [StartBuild](https://docs.aws.amazon.com/codebuild/latest/APIReference/API_StartBuild.html) page in the AWS CodeBuild API Reference. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CodebuildProject } from "./.gen/providers/aws/codebuild-project"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new CodebuildProject(this, "example", { + artifacts: { + type: "NO_ARTIFACTS", + }, + environment: { + computeType: "BUILD_GENERAL1_SMALL", + image: "aws/codebuild/amazonlinux2-x86_64-standard:3.0", + type: "LINUX_CONTAINER", + }, + name: "example-project", + serviceRole: Token.asString(awsIamRoleExample.arn), + source: { + buildspec: + "version: 0.2\nphases:\n build:\n commands:\n - echo 'Hello World'\n", + type: "NO_SOURCE", + }, + }); + new DataResource(this, "build_trigger", { + input: "trigger-build", + lifecycle: { + actionTrigger: [ + { + actions: [awsCodebuildStartBuild.example], + events: [afterCreate], + }, + ], + }, + }); + } +} + +``` + +### Build with Environment Variables + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `projectName` - (Required) Name of the CodeBuild project to build. + +The following arguments are optional: + +* `sourceVersion` - (Optional) Version of the build input to be built. For GitHub, this can be a commit SHA, branch name, or tag name. +* `timeout` - (Optional) Timeout in seconds for the build operation. Defaults to 1800 seconds (30 minutes). +* `environment_variables_override` - (Optional) Environment variables to override for this build. See [Environment Variables Override](#environment-variables-override) below. + +### Environment Variables Override + +* `name` - (Required) Environment variable name. +* `value` - (Required) Environment variable value. +* `type` - (Optional) Environment variable type. Valid values are `PLAINTEXT`, `PARAMETER_STORE`, or `SECRETS_MANAGER`. Defaults to `PLAINTEXT`. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/ec2_stop_instance.html.markdown b/website/docs/cdktf/typescript/actions/ec2_stop_instance.html.markdown new file mode 100644 index 000000000000..bc7174d6ea7f --- /dev/null +++ b/website/docs/cdktf/typescript/actions/ec2_stop_instance.html.markdown @@ -0,0 +1,109 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_stop_instance" +description: |- + Stops an EC2 instance. +--- + + + +# Action: aws_ec2_stop_instance + +~> **Note:** `aws_ec2_stop_instance` is in alpha. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Stops an EC2 instance. This action will gracefully stop the instance and wait for it to reach the stopped state. + +For information about Amazon EC2, see the [Amazon EC2 User Guide](https://docs.aws.amazon.com/ec2/latest/userguide/). For specific information about stopping instances, see the [StopInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_StopInstances.html) page in the Amazon EC2 API Reference. + +~> **Note:** This action directly stops EC2 instances which will interrupt running workloads. Ensure proper coordination with your applications before using this action. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Instance } from "./.gen/providers/aws/instance"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new Instance(this, "example", { + ami: Token.asString(amazonLinux.id), + instanceType: "t3.micro", + tags: { + Name: "example-instance", + }, + }); + } +} + +``` + +### Force Stop + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Maintenance Window + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Instance } from "./.gen/providers/aws/instance"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const webServer = new Instance(this, "web_server", { + ami: Token.asString(amazonLinux.id), + instanceType: "t3.micro", + tags: { + Name: "web-server", + }, + }); + new DataResource(this, "maintenance_trigger", { + dependsOn: [webServer], + input: maintenanceWindow.value, + lifecycle: { + actionTrigger: [ + { + actions: [awsEc2StopInstance.maintenance], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + } +} + +``` + +## Argument Reference + +This action supports the following arguments: + +* `instanceId` - (Required) ID of the EC2 instance to stop. Must be a valid EC2 instance ID (e.g., i-1234567890abcdef0). +* `force` - (Optional) Forces the instance to stop. The instance does not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances. Default: `false`. +* `timeout` - (Optional) Timeout in seconds to wait for the instance to stop. Must be between 30 and 3600 seconds. Default: `600`. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/lambda_invoke.html.markdown b/website/docs/cdktf/typescript/actions/lambda_invoke.html.markdown new file mode 100644 index 000000000000..764a60115ab9 --- /dev/null +++ b/website/docs/cdktf/typescript/actions/lambda_invoke.html.markdown @@ -0,0 +1,200 @@ +--- +subcategory: "Lambda" +layout: "aws" +page_title: "AWS: aws_lambda_invoke" +description: |- + Invokes an AWS Lambda function with the specified payload. +--- + + + +# Action: aws_lambda_invoke + +~> **Note:** `aws_lambda_invoke` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invokes an AWS Lambda function with the specified payload. This action allows for imperative invocation of Lambda functions with full control over invocation parameters. + +For information about AWS Lambda functions, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/). For specific information about invoking Lambda functions, see the [Invoke](https://docs.aws.amazon.com/lambda/latest/api/API_Invoke.html) page in the AWS Lambda API Reference. + +~> **Note:** Synchronous invocations will wait for the function to complete execution, while asynchronous invocations return immediately after the request is _accepted_. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +interface MyConfig { + functionName: any; + role: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + new LambdaFunction(this, "example", { + functionName: config.functionName, + role: config.role, + }); + const terraformDataExample = new DataResource(this, "example_1", { + input: "trigger-lambda", + lifecycle: { + actionTrigger: [ + { + actions: [awsLambdaInvoke.example], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + terraformDataExample.overrideLogicalId("example"); + } +} + +``` + +### Invoke with Function Version + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Asynchronous Invocation + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Dry Run Validation + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### With Log Capture + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Mobile Application Context + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment functions: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataResource(this, "deploy_complete", { + dependsOn: [api], + input: deploymentId, + lifecycle: { + actionTrigger: [ + { + actions: [awsLambdaInvoke.warmup], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + } +} + +``` + +### Environment-Specific Processing + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Complex Payload with Dynamic Content + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This action supports the following arguments: + +* `clientContext` - (Optional) Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. This is only used for mobile applications and should contain information about the client application and device. +* `functionName` - (Required) Name, ARN, or partial ARN of the Lambda function to invoke. You can specify a function name (e.g., `my-function`), a qualified function name (e.g., `my-function:PROD`), or a partial ARN (e.g., `123456789012:function:my-function`). +* `invocationType` - (Optional) Invocation type. Valid values are `RequestResponse` (default) for synchronous invocation that waits for the function to complete and returns the response, `Event` for asynchronous invocation that returns immediately after the request is accepted, and `DryRun` to validate parameters and verify permissions without actually executing the function. +* `logType` - (Optional) Set to `Tail` to include the execution log in the response. Only applies to synchronous invocations (`RequestResponse` invocation type). Defaults to `None`. When set to `Tail`, the last 4 KB of the execution log is included in the response. +* `payload` - (Required) JSON payload to send to the Lambda function. This should be a valid JSON string that represents the event data for your function. The payload size limit is 6 MB for synchronous invocations and 256 KB for asynchronous invocations. +* `qualifier` - (Optional) Version or alias of the Lambda function to invoke. If not specified, the `$LATEST` version will be invoked. Can be a version number (e.g., `1`) or an alias (e.g., `PROD`). + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/ses_send_email.html.markdown b/website/docs/cdktf/typescript/actions/ses_send_email.html.markdown new file mode 100644 index 000000000000..a376af8e9872 --- /dev/null +++ b/website/docs/cdktf/typescript/actions/ses_send_email.html.markdown @@ -0,0 +1,168 @@ +--- +subcategory: "SES (Simple Email)" +layout: "aws" +page_title: "AWS: aws_ses_send_email" +description: |- + Sends an email using Amazon SES. +--- + + + +# Action: aws_ses_send_email + +~> **Note:** `aws_ses_send_email` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Sends an email using Amazon SES. This action allows for imperative email sending with full control over recipients, content, and formatting. + +For information about Amazon SES, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/). For specific information about sending emails, see the [SendEmail](https://docs.aws.amazon.com/ses/latest/APIReference/API_SendEmail.html) page in the Amazon SES API Reference. + +~> **Note:** All email addresses used must be verified in Amazon SES or belong to a verified domain. Due to the difficulty in testing, your help is important in discovering and reporting issues. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SesEmailIdentity } from "./.gen/providers/aws/ses-email-identity"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SesEmailIdentity(this, "example", { + email: "sender@example.com", + }); + const terraformDataExample = new DataResource(this, "example_1", { + input: "send-notification", + lifecycle: { + actionTrigger: [ + { + actions: [awsSesSendEmail.example], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + terraformDataExample.overrideLogicalId("example"); + } +} + +``` + +### HTML Email with Multiple Recipients + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Deployment Notification + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataResource(this, "deployment", { + dependsOn: [app], + input: deploymentId.value, + lifecycle: { + actionTrigger: [ + { + actions: [awsSesSendEmail.deployNotification], + events: [afterCreate], + }, + ], + }, + }); + } +} + +``` + +### Alert Email with Dynamic Content + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Multi-format Email + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Conditional Email Sending + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Batch Processing Notification + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This action supports the following arguments: + +* `bcc_addresses` - (Optional) List of email addresses for the BCC: field of the message. Recipients in this list will receive the email but their addresses will not be visible to other recipients. +* `cc_addresses` - (Optional) List of email addresses for the CC: field of the message. Recipients in this list will receive the email and their addresses will be visible to all recipients. +* `htmlBody` - (Optional) Message body in HTML format. Either `textBody` or `htmlBody` (or both) must be specified. HTML content allows for rich formatting including links, images, and styling. +* `reply_to_addresses` - (Optional) List of reply-to email addresses for the message. If the recipient replies to the message, each reply-to address will receive the reply. If not specified, replies will go to the source address. +* `return_path` - (Optional) Email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. This is useful for handling delivery failures and spam complaints. +* `source` - (Required) Email address that is sending the email. This address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. +* `subject` - (Required) Subject of the message: A short summary of the content, which will appear in the recipient's inbox. +* `textBody` - (Optional) Message body in text format. Either `textBody` or `htmlBody` (or both) must be specified. Text format ensures compatibility with all email clients. +* `to_addresses` - (Optional) List of email addresses for the To: field of the message. These are the primary recipients of the email. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/sfn_start_execution.html.markdown b/website/docs/cdktf/typescript/actions/sfn_start_execution.html.markdown new file mode 100644 index 000000000000..4838ae31374c --- /dev/null +++ b/website/docs/cdktf/typescript/actions/sfn_start_execution.html.markdown @@ -0,0 +1,218 @@ +--- +subcategory: "SFN (Step Functions)" +layout: "aws" +page_title: "AWS: aws_sfn_start_execution" +description: |- + Starts a Step Functions state machine execution with the specified input data. +--- + + + +# Action: aws_sfn_start_execution + +~> **Note:** `aws_sfn_start_execution` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Starts a Step Functions state machine execution with the specified input data. This action allows for imperative execution of state machines with full control over execution parameters. + +For information about AWS Step Functions, see the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/). For specific information about starting executions, see the [StartExecution](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) page in the AWS Step Functions API Reference. + +~> **Note:** For `STANDARD` workflows, executions with the same name and input are idempotent. For `EXPRESS` workflows, each execution is unique regardless of name and input. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SfnStateMachine } from "./.gen/providers/aws/sfn-state-machine"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SfnStateMachine(this, "example", { + definition: Token.asString( + Fn.jsonencode({ + Comment: "A simple minimal example", + StartAt: "Hello", + States: { + Hello: { + End: true, + Result: "Hello World!", + Type: "Pass", + }, + }, + }) + ), + name: "example-state-machine", + roleArn: sfn.arn, + }); + const terraformDataExample = new DataResource(this, "example_1", { + input: "trigger-execution", + lifecycle: { + actionTrigger: [ + { + actions: [awsSfnStartExecution.example], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + terraformDataExample.overrideLogicalId("example"); + } +} + +``` + +### Named Execution + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Execution with Version + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Execution with Alias + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SfnAlias } from "./.gen/providers/aws/sfn-alias"; +interface MyConfig { + stateMachineVersionArn: any; + weight: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + new SfnAlias(this, "prod", { + name: "PROD", + routingConfiguration: [ + { + state_machine_version_weight: [ + { + state_machine_version_arn: example.arn, + weight: 100, + }, + ], + stateMachineVersionArn: config.stateMachineVersionArn, + weight: config.weight, + }, + ], + state_machine_arn: example.arn, + }); + } +} + +``` + +### X-Ray Tracing + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment workflows: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataResource(this, "deploy_complete", { + dependsOn: [processors], + input: deploymentId, + lifecycle: { + actionTrigger: [ + { + actions: [awsSfnStartExecution.postDeploy], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + } +} + +``` + +### Environment-Specific Processing + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Complex Workflow Orchestration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This action supports the following arguments: + +* `input` - (Optional) JSON input data for the execution. Must be valid JSON. Defaults to `{}` if not specified. The input size limit is 256 KB. +* `name` - (Optional) Name of the execution. Must be unique within the account/region/state machine for 90 days. If not provided, Step Functions automatically generates a UUID. Names must not contain whitespace, brackets, wildcards, or special characters. +* `state_machine_arn` - (Required) ARN of the state machine to execute. Can be an unqualified ARN, version-qualified ARN (e.g., `arn:aws:states:region:account:stateMachine:name:version`), or alias-qualified ARN (e.g., `arn:aws:states:region:account:stateMachine:name:alias`). +* `trace_header` - (Optional) AWS X-Ray trace header for distributed tracing. Used to correlate execution traces across services. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/actions/sns_publish.html.markdown b/website/docs/cdktf/typescript/actions/sns_publish.html.markdown new file mode 100644 index 000000000000..e0913e13456b --- /dev/null +++ b/website/docs/cdktf/typescript/actions/sns_publish.html.markdown @@ -0,0 +1,142 @@ +--- +subcategory: "SNS (Simple Notification)" +layout: "aws" +page_title: "AWS: aws_sns_publish" +description: |- + Publishes a message to an Amazon SNS topic. +--- + + + +# Action: aws_sns_publish + +~> **Note:** `aws_sns_publish` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Publishes a message to an Amazon SNS topic. This action allows for imperative message publishing with full control over message attributes and structure. + +For information about Amazon SNS, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/). For specific information about publishing messages, see the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) page in the Amazon SNS API Reference. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SnsTopic } from "./.gen/providers/aws/sns-topic"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SnsTopic(this, "example", { + name: "example-topic", + }); + const terraformDataExample = new DataResource(this, "example_1", { + input: "trigger-message", + lifecycle: { + actionTrigger: [ + { + actions: [awsSnsPublish.example], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + terraformDataExample.overrideLogicalId("example"); + } +} + +``` + +### Message with Subject + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### JSON Message Structure + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Message with Attributes + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Deployment Notification + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { DataResource, TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataResource(this, "deploy_trigger", { + dependsOn: [app, main], + input: deploymentId.value, + lifecycle: { + actionTrigger: [ + { + actions: [awsSnsPublish.deployComplete], + events: [beforeCreate, beforeUpdate], + }, + ], + }, + }); + } +} + +``` + +## Argument Reference + +This action supports the following arguments: + +* `message` - (Required) Message to publish. For JSON message structure, this should be a JSON object with protocol-specific messages. Maximum size is 256 KB. +* `message_attributes` - (Optional) Message attributes to include with the message. Each attribute consists of a name, data type, and value. Up to 10 attributes are allowed. [See below.](#message-attributes) +* `message_structure` - (Optional) Set to `json` if you want to send different messages for each protocol. If not specified, the message will be sent as-is to all protocols. +* `subject` - (Optional) Optional subject for the message. Only used for email and email-json protocols. Maximum length is 100 characters. +* `topicArn` - (Required) ARN of the SNS topic to publish the message to. + +### Message Attributes + +The `message_attributes` block supports: + +* `dataType` - (Required) Data type of the message attribute. Valid values are `String`, `Number`, and `Binary`. +* `mapBlockKey` - (Required) Name of the message attribute (used as map key). Must be unique within the message. +* `stringValue` - (Required) Value of the message attribute. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/acm_certificate.html.markdown b/website/docs/cdktf/typescript/d/acm_certificate.html.markdown index 6077559594d4..792567b8989c 100644 --- a/website/docs/cdktf/typescript/d/acm_certificate.html.markdown +++ b/website/docs/cdktf/typescript/d/acm_certificate.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Optional) Domain of the certificate to look up. If set and no certificate is found with this name, an error will be returned. * `keyTypes` - (Optional) List of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. See the [ACM API Reference](https://docs.aws.amazon.com/acm/latest/APIReference/API_CertificateDetail.html#ACM-Type-CertificateDetail-KeyAlgorithm) for supported key algorithms. * `statuses` - (Optional) List of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`, @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `certificateChain` - Certificates forming the requested ACM-issued certificate's chain of trust. The chain consists of the certificate of the issuing CA and the intermediate certificates of any other subordinate CAs. * `tags` - Mapping of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/acmpca_certificate.html.markdown b/website/docs/cdktf/typescript/d/acmpca_certificate.html.markdown index 1d086995614b..6d2b1a02a0a7 100644 --- a/website/docs/cdktf/typescript/d/acmpca_certificate.html.markdown +++ b/website/docs/cdktf/typescript/d/acmpca_certificate.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the certificate issued by the private certificate authority. * `certificateAuthorityArn` - (Required) ARN of the certificate authority. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `certificate` - PEM-encoded certificate value. * `certificateChain` - PEM-encoded certificate chain that includes any intermediate certificates and chains up to root CA. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/acmpca_certificate_authority.html.markdown b/website/docs/cdktf/typescript/d/acmpca_certificate_authority.html.markdown index e4187d31c672..ed58e33f5c2d 100644 --- a/website/docs/cdktf/typescript/d/acmpca_certificate_authority.html.markdown +++ b/website/docs/cdktf/typescript/d/acmpca_certificate_authority.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the certificate authority. ## Attribute Reference @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of user-defined tags that are attached to the certificate authority. * `type` - Type of the certificate authority. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ami.html.markdown b/website/docs/cdktf/typescript/d/ami.html.markdown index 9b8768b8ee33..6436b261b289 100644 --- a/website/docs/cdktf/typescript/d/ami.html.markdown +++ b/website/docs/cdktf/typescript/d/ami.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owners` - (Optional) List of AMI owners to limit search. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g., `amazon`, `aws-marketplace`, `microsoft`). * `mostRecent` - (Optional) If more than one result is returned, use the most recent AMI. @@ -65,6 +66,10 @@ recent AMI. * `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-images in the AWS CLI reference][1]. +* `allowUnsafeFilter` - (Optional) If true, allow unsafe filter values. With unsafe +filters and `mostRecent` set to `true`, a third party may introduce a new image which +will be returned by this data source. Consider filtering by owner or image ID rather +than setting this argument. * `nameRegex` - (Optional) Regex string to apply to the AMI list returned by AWS. This allows more advanced filtering not supported from the AWS API. This filtering is done locally on what AWS returns, and could have a performance @@ -152,4 +157,4 @@ interpolation. [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ami_ids.html.markdown b/website/docs/cdktf/typescript/d/ami_ids.html.markdown index 0024ac10169d..e4bab331aeee 100644 --- a/website/docs/cdktf/typescript/d/ami_ids.html.markdown +++ b/website/docs/cdktf/typescript/d/ami_ids.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owners` - (Required) List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g., `amazon`, `aws-marketplace`, `microsoft`). * `executableUsers` - (Optional) Limit search to users with *explicit* launch permission on the image. Valid items are the numeric account ID or `self`. @@ -74,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_api_key.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_api_key.html.markdown index 33e68b249e4f..fb8846986268 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_api_key.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_api_key.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) ID of the API Key to look up. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether the API Key is enabled. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_api_keys.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_api_keys.html.markdown index 61c28c07ced2..e704357de2c2 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_api_keys.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_api_keys.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customerId` - (Optional) Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace. * `includeValues` - (Optional) Set this value to `true` if you wish the result contains the key value. Defaults to `false`. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether the API Key is enabled. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_authorizer.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_authorizer.html.markdown index a69aa9bcffe2..17e3a60956e4 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_authorizer.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_authorizer.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizerId` - (Required) Authorizer identifier. * `restApiId` - (Required) ID of the associated REST API. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `providerArns` - List of the Amazon Cognito user pool ARNs. * `type` - Type of the authorizer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_authorizers.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_authorizers.html.markdown index 73942906e41b..bfd840cb1eca 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_authorizers.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_authorizers.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the associated REST API. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of Authorizer identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_domain_name.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_domain_name.html.markdown index 055f463c70b1..803bd8251ebd 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_domain_name.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_domain_name.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) Fully-qualified domain name to look up. If no domain name is found, an error will be returned. * `domainNameId` - (Optional) The identifier for the domain name resource. Supported only for private custom domain names. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `securityPolicy` - Security policy for the domain name. * `tags` - Key-value map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_export.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_export.html.markdown index 86466fc1c671..794025770b1f 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_export.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_export.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exportType` - (Required) Type of export. Acceptable values are `oas30` for OpenAPI 3.0.x and `swagger` for Swagger/OpenAPI 2.0. * `restApiId` - (Required) Identifier of the associated REST API. * `stageName` - (Required) Name of the Stage that will be exported. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `contentType` - Content-type header value in the HTTP response. * `contentDisposition` - Content-disposition header value in the HTTP response. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_resource.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_resource.html.markdown index 1ba8016d25a3..fbc3f25657fb 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_resource.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_resource.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) REST API id that owns the resource. If no REST API is found, an error will be returned. * `path` - (Required) Full path of the resource. If no path is found, an error will be returned. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `parentId` - Set to the ID of the parent Resource. * `pathPart` - Set to the path relative to the parent Resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_rest_api.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_rest_api.html.markdown index c55477c41378..9933f91973e8 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_rest_api.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_rest_api.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the REST API to look up. If no REST API is found with this name, an error will be returned. If multiple REST APIs are found with this name, an error will be returned. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `rootResourceId` - Set to the ID of the API Gateway Resource on the found REST API where the route matches '/'. * `tags` - Key-value map of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_sdk.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_sdk.html.markdown index dc47571c70a8..44aa173a07b7 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_sdk.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_sdk.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) Identifier of the associated REST API. * `stageName` - (Required) Name of the Stage that will be exported. * `sdkType` - (Required) Language for the generated SDK. Currently `java`, `javascript`, `android`, `objectivec` (for iOS), `swift` (for iOS), and `ruby` are supported. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `contentType` - Content-type header value in the HTTP response. * `contentDisposition` - Content-disposition header value in the HTTP response. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/api_gateway_vpc_link.html.markdown b/website/docs/cdktf/typescript/d/api_gateway_vpc_link.html.markdown index ebb840c57a4f..b903cfb72c40 100644 --- a/website/docs/cdktf/typescript/d/api_gateway_vpc_link.html.markdown +++ b/website/docs/cdktf/typescript/d/api_gateway_vpc_link.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the API Gateway VPC Link to look up. If no API Gateway VPC Link is found with this name, an error will be returned. If multiple API Gateway VPC Links are found with this name, an error will be returned. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `targetArns` - List of network load balancer arns in the VPC targeted by the VPC link. Currently AWS only supports 1 target. * `tags` - Key-value map of resource tags - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown b/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown index 5da45a5ac4e0..c219461c9110 100644 --- a/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown +++ b/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. ## Attribute Reference @@ -70,4 +71,4 @@ The `corsConfiguration` object supports the following: * `exposeHeaders` - Set of exposed HTTP headers. * `maxAge` - Number of seconds that the browser should cache preflight request results. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/apigatewayv2_apis.html.markdown b/website/docs/cdktf/typescript/d/apigatewayv2_apis.html.markdown index 92a585fc4960..842b54a99ef5 100644 --- a/website/docs/cdktf/typescript/d/apigatewayv2_apis.html.markdown +++ b/website/docs/cdktf/typescript/d/apigatewayv2_apis.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) API name. * `protocolType` - (Optional) API protocol. * `tags` - (Optional) Map of tags, each pair of which must exactly match @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - Set of API identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/apigatewayv2_export.html.markdown b/website/docs/cdktf/typescript/d/apigatewayv2_export.html.markdown index 19db33cf5c43..cdd7a61350e7 100644 --- a/website/docs/cdktf/typescript/d/apigatewayv2_export.html.markdown +++ b/website/docs/cdktf/typescript/d/apigatewayv2_export.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `specification` - (Required) Version of the API specification to use. `OAS30`, for OpenAPI 3.0, is the only supported value. * `outputType` - (Required) Output type of the exported definition file. Valid values are `JSON` and `YAML`. @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - API identifier. * `body` - ID of the API. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/apigatewayv2_vpc_link.html.markdown b/website/docs/cdktf/typescript/d/apigatewayv2_vpc_link.html.markdown index 1d8e7ad0b775..9a9ba7f79cfa 100644 --- a/website/docs/cdktf/typescript/d/apigatewayv2_vpc_link.html.markdown +++ b/website/docs/cdktf/typescript/d/apigatewayv2_vpc_link.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcLinkId` - (Required) VPC Link ID ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - List of subnets attached to the VPC Link. * `tags` - VPC Link Tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appconfig_application.html.markdown b/website/docs/cdktf/typescript/d/appconfig_application.html.markdown new file mode 100644 index 000000000000..506beb8616de --- /dev/null +++ b/website/docs/cdktf/typescript/d/appconfig_application.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "AppConfig" +layout: "aws" +page_title: "AWS: aws_appconfig_application" +description: |- + Retrieves an AWS AppConfig Application by name. +--- + + + +# Data Source: aws_appconfig_application + +Provides details about an AWS AppConfig Application. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsAppconfigApplication } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsAppconfigApplication(this, "example", { + name: "my-appconfig-application", + }); + } +} + +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `id` - (Optional) ID of the Application. Either `id` or `name` must be specified. +* `name` - (Optional) AWS AppConfig Application name. Either `name` or `id` must be specified. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Application. +* `description` - Description of the Application. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appconfig_configuration_profile.html.markdown b/website/docs/cdktf/typescript/d/appconfig_configuration_profile.html.markdown index 5e82b714a038..28c175d88890 100644 --- a/website/docs/cdktf/typescript/d/appconfig_configuration_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/appconfig_configuration_profile.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ID of the AppConfig application to which this configuration profile belongs. * `configurationProfileId` - (Required) ID of the Configuration Profile. @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `content` - Either the JSON Schema content or the ARN of an AWS Lambda function. * `type` - Type of validator. Valid values: JSON_SCHEMA and LAMBDA. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appconfig_configuration_profiles.html.markdown b/website/docs/cdktf/typescript/d/appconfig_configuration_profiles.html.markdown index 962bf0a3dc10..f595e23d2ab7 100644 --- a/website/docs/cdktf/typescript/d/appconfig_configuration_profiles.html.markdown +++ b/website/docs/cdktf/typescript/d/appconfig_configuration_profiles.html.markdown @@ -55,8 +55,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ID of the AppConfig Application. ## Attribute Reference @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `configurationProfileIds` - Set of Configuration Profile IDs associated with the AppConfig Application. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appconfig_environment.html.markdown b/website/docs/cdktf/typescript/d/appconfig_environment.html.markdown index d30c4ec45cf9..a919c8f61923 100644 --- a/website/docs/cdktf/typescript/d/appconfig_environment.html.markdown +++ b/website/docs/cdktf/typescript/d/appconfig_environment.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ID of the AppConfig Application to which this Environment belongs. * `environmentId` - (Required) ID of the AppConfig Environment. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a or `ROLLED_BACK`. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appconfig_environments.html.markdown b/website/docs/cdktf/typescript/d/appconfig_environments.html.markdown index 789eb889fbe5..463ddeec31e4 100644 --- a/website/docs/cdktf/typescript/d/appconfig_environments.html.markdown +++ b/website/docs/cdktf/typescript/d/appconfig_environments.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ID of the AppConfig Application. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `environmentIds` - Set of Environment IDs associated with this AppConfig Application. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appintegrations_event_integration.html.markdown b/website/docs/cdktf/typescript/d/appintegrations_event_integration.html.markdown index 08665fb8ebd9..482673fbf548 100644 --- a/website/docs/cdktf/typescript/d/appintegrations_event_integration.html.markdown +++ b/website/docs/cdktf/typescript/d/appintegrations_event_integration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The AppIntegrations Event Integration name. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `source` - The source of the events. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_gateway_route.html.markdown b/website/docs/cdktf/typescript/d/appmesh_gateway_route.html.markdown index 41c050321b35..9a8e0fac84d0 100644 --- a/website/docs/cdktf/typescript/d/appmesh_gateway_route.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_gateway_route.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the gateway route. * `meshName` - (Required) Name of the service mesh in which the virtual gateway exists. * `virtualGatewayName` - (Required) Name of the virtual gateway in which the route exists. @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Gateway route specification. See the [`aws_appmesh_gateway_route`](/docs/providers/aws/r/appmesh_gateway_route.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_mesh.html.markdown b/website/docs/cdktf/typescript/d/appmesh_mesh.html.markdown index 73dab2c633a7..29b0ad67cfcd 100644 --- a/website/docs/cdktf/typescript/d/appmesh_mesh.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_mesh.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the service mesh. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Service mesh specification. See the [`aws_appmesh_mesh`](/docs/providers/aws/r/appmesh_mesh.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_route.html.markdown b/website/docs/cdktf/typescript/d/appmesh_route.html.markdown index 2c32c5883b1b..77a47b96653a 100644 --- a/website/docs/cdktf/typescript/d/appmesh_route.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_route.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the route. * `meshName` - (Required) Name of the service mesh in which the virtual router exists. * `virtualRouterName` - (Required) Name of the virtual router in which the route exists. @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Route specification. See the [`aws_appmesh_route`](/docs/providers/aws/r/appmesh_route.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_virtual_gateway.html.markdown b/website/docs/cdktf/typescript/d/appmesh_virtual_gateway.html.markdown index e3d5fdb4fcc7..694acf558326 100644 --- a/website/docs/cdktf/typescript/d/appmesh_virtual_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_virtual_gateway.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual gateway. * `meshName` - (Required) Name of the service mesh in which the virtual gateway exists. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual gateway specification. See the [`aws_appmesh_virtual_gateway`](/docs/providers/aws/r/appmesh_virtual_gateway.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_virtual_node.html.markdown b/website/docs/cdktf/typescript/d/appmesh_virtual_node.html.markdown index 850697cd8ea3..624e4f718096 100644 --- a/website/docs/cdktf/typescript/d/appmesh_virtual_node.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_virtual_node.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual node. * `meshName` - (Required) Name of the service mesh in which the virtual node exists. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual node specification. See the [`aws_appmesh_virtual_node`](/docs/providers/aws/r/appmesh_virtual_node.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_virtual_router.html.markdown b/website/docs/cdktf/typescript/d/appmesh_virtual_router.html.markdown index 478a8c7c5e12..b277d82aab4f 100644 --- a/website/docs/cdktf/typescript/d/appmesh_virtual_router.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_virtual_router.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual router. * `meshName` - (Required) Name of the mesh in which the virtual router exists @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual routers specification. See the [`aws_appmesh_virtual_router`](/docs/providers/aws/r/appmesh_virtual_router.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appmesh_virtual_service.html.markdown b/website/docs/cdktf/typescript/d/appmesh_virtual_service.html.markdown index b1432c799819..7de99722a4a1 100644 --- a/website/docs/cdktf/typescript/d/appmesh_virtual_service.html.markdown +++ b/website/docs/cdktf/typescript/d/appmesh_virtual_service.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the virtual service. * `meshName` - (Required) Name of the service mesh in which the virtual service exists. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `spec` - Virtual service specification. See the [`aws_appmesh_virtual_service`](/docs/providers/aws/r/appmesh_virtual_service.html#spec) resource for details. * `tags` - Map of tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/apprunner_hosted_zone_id.html.markdown b/website/docs/cdktf/typescript/d/apprunner_hosted_zone_id.html.markdown index 2f3589da711d..6e7ccf7a99d6 100644 --- a/website/docs/cdktf/typescript/d/apprunner_hosted_zone_id.html.markdown +++ b/website/docs/cdktf/typescript/d/apprunner_hosted_zone_id.html.markdown @@ -48,13 +48,12 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS App Runner service HostedZoneId is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS App Runner service HostedZoneId is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS App Runner service HostedZoneId in the selected region. +* `id` - ID of the AWS App Runner service HostedZoneId in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appstream_image.html.markdown b/website/docs/cdktf/typescript/d/appstream_image.html.markdown index 748e7ec34043..42fcea268e06 100644 --- a/website/docs/cdktf/typescript/d/appstream_image.html.markdown +++ b/website/docs/cdktf/typescript/d/appstream_image.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - Name of the image being searched for. Cannot be used with name_regex or arn. * `nameRegex` - Regular expression name of the image being searched for. Cannot be used with arn or name. * `arn` - Arn of the image being searched for. Cannot be used with name_regex or name. @@ -59,7 +60,7 @@ This data source exports the following attributes in addition to the arguments a * `icon_s3_location` - A list named icon_s3_location that contains the following: * `s3Bucket` - S3 bucket of the S3 object. * `s3Key` - S3 key of the S3 object. - * `iconUrl` - URL of the application icon. This URL may be time-limited. + * `icon_url` - URL of the application icon. This URL may be time-limited. * `instance_families` - List of the instance families of the application. * `launch_parameters` - Arguments that are passed to the application at it's launch. * `launchPath` - Path to the application's excecutable in the instance. @@ -88,4 +89,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Current state of image. Image starts in PENDING state which changes to AVAILABLE if creation passes and FAILED if it fails. Values will be from: PENDING | AVAILABLE | FAILED | COPYING | DELETING | CREATING | IMPORTING. * `visibility` - Visibility type enum indicating whether the image is PUBLIC, PRIVATE, or SHARED. Valid values include: PUBLIC | PRIVATE | SHARED. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/arn.html.markdown b/website/docs/cdktf/typescript/d/arn.html.markdown index c5ac7789ebd1..d77ee3b76dbc 100644 --- a/website/docs/cdktf/typescript/d/arn.html.markdown +++ b/website/docs/cdktf/typescript/d/arn.html.markdown @@ -45,15 +45,11 @@ This data source supports the following arguments: This data source exports the following attributes in addition to the arguments above: * `partition` - Partition that the resource is in. - * `service` - The [service namespace](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) that identifies the AWS product. - * `region` - Region the resource resides in. -Note that the ARNs for some resources do not require a region, so this component might be omitted. - +Note that the ARNs for some resources do not include a Region, so this component might be omitted. * `account` - The [ID](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html) of the AWS account that owns the resource, without the hyphens. - * `resource` - Content of this part of the ARN varies by service. It often includes an indicator of the type of resource—for example, an IAM user or Amazon RDS database —followed by a slash (/) or a colon (:), followed by the resource name itself. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/athena_named_query.html.markdown b/website/docs/cdktf/typescript/d/athena_named_query.html.markdown index 79262e3252bd..186265e7dbe7 100644 --- a/website/docs/cdktf/typescript/d/athena_named_query.html.markdown +++ b/website/docs/cdktf/typescript/d/athena_named_query.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The plain language name for the query. Maximum length of 128. * `workgroup` - (Optional) The workgroup to which the query belongs. Defaults to `primary`. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - The unique ID of the query. * `query` - Text of the query itself. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/auditmanager_control.html.markdown b/website/docs/cdktf/typescript/d/auditmanager_control.html.markdown index 176305ffa2d4..3e75d7f9238e 100644 --- a/website/docs/cdktf/typescript/d/auditmanager_control.html.markdown +++ b/website/docs/cdktf/typescript/d/auditmanager_control.html.markdown @@ -96,6 +96,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the control. * `type` - (Required) Type of control. Valid values are `Custom` and `Standard`. @@ -105,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a See the [`aws_auditmanager_control` resource](/docs/providers/aws/r/auditmanager_control.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/auditmanager_framework.html.markdown b/website/docs/cdktf/typescript/d/auditmanager_framework.html.markdown index 7f35d6d762f5..f2ebb330ebd3 100644 --- a/website/docs/cdktf/typescript/d/auditmanager_framework.html.markdown +++ b/website/docs/cdktf/typescript/d/auditmanager_framework.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the framework. * `type` - (Required) Type of framework. Valid values are `Custom` and `Standard`. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a See the [`aws_auditmanager_framework` resource](/docs/providers/aws/r/auditmanager_framework.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown b/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown index cc7ceff18573..d6cec26be213 100644 --- a/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown +++ b/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - Specify the exact name of the desired autoscaling group. ## Attribute Reference @@ -152,4 +153,4 @@ This data source exports the following attributes in addition to the arguments a ~> **NOTE:** Some values are not always set and may not be available for interpolation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/autoscaling_groups.html.markdown b/website/docs/cdktf/typescript/d/autoscaling_groups.html.markdown index dc8ab9eaa409..14d7c64546d0 100644 --- a/website/docs/cdktf/typescript/d/autoscaling_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/autoscaling_groups.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `names` - (Optional) List of autoscaling group names * `filter` - (Optional) Filter used to scope the list e.g., by tags. See [related docs](http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_Filter.html). * `name` - (Required) Name of the DescribeAutoScalingGroup filter. The recommended values are: `tag-key`, `tag-value`, and `tag:` @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `names` - List of the Autoscaling Groups in the current region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/availability_zone.html.markdown b/website/docs/cdktf/typescript/d/availability_zone.html.markdown index 80633fea9d17..d9a0c5a6b305 100644 --- a/website/docs/cdktf/typescript/d/availability_zone.html.markdown +++ b/website/docs/cdktf/typescript/d/availability_zone.html.markdown @@ -11,10 +11,10 @@ description: |- # Data Source: aws_availability_zone `aws_availability_zone` provides details about a specific availability zone (AZ) -in the current region. +in the current Region. This can be used both to validate an availability zone given in a variable -and to split the AZ name into its component parts of an AWS region and an +and to split the AZ name into its component parts of an AWS Region and an AZ identifier letter. The latter may be useful e.g., for implementing a consistent subnet numbering scheme across several regions by mapping both the region and the subnet letter to network numbers. @@ -101,6 +101,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allAvailabilityZones` - (Optional) Set to `true` to include all Availability Zones and Local Zones regardless of your opt in status. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. * `name` - (Optional) Full name of the availability zone to select. @@ -122,7 +123,8 @@ The `filter` configuration block supports the following arguments: This data source exports the following attributes in addition to the arguments above: -* `groupName` - For Availability Zones, this is the same value as the Region name. For Local Zones, the name of the associated group, for example `us-west-2-lax-1`. +* `groupLongName` - The long name of the Availability Zone group, Local Zone group, or Wavelength Zone group. +* `groupName` - The name of the zone group. For example: `us-east-1-zg-1`, `us-west-2-lax-1`, or `us-east-1-wl1-bos-wlz-1`. * `nameSuffix` - Part of the AZ name that appears after the region name, uniquely identifying the AZ within its region. For Availability Zones this is usually a single letter, for example `a` for the `us-west-2a` zone. For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz-1` for the `us-west-2-wl1-sfo-wlz-1` zone. @@ -130,7 +132,6 @@ For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz * `optInStatus` - For Availability Zones, this always has the value of `opt-in-not-required`. For Local Zones, this is the opt in status. The possible values are `opted-in` and `not-opted-in`. * `parentZoneId` - ID of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls. * `parentZoneName` - Name of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls. -* `region` - Region where the selected availability zone resides. This is always the region selected on the provider, since this data source searches only within that region. * `zoneType` - Type of zone. Values are `availability-zone`, `local-zone`, and `wavelength-zone`. ## Timeouts @@ -139,4 +140,4 @@ For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/availability_zones.html.markdown b/website/docs/cdktf/typescript/d/availability_zones.html.markdown index e0e026241dcf..edfc3d710266 100644 --- a/website/docs/cdktf/typescript/d/availability_zones.html.markdown +++ b/website/docs/cdktf/typescript/d/availability_zones.html.markdown @@ -117,6 +117,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allAvailabilityZones` - (Optional) Set to `true` to include all Availability Zones and Local Zones regardless of your opt in status. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. * `excludeNames` - (Optional) List of Availability Zone names to exclude. @@ -150,4 +151,4 @@ Note that the indexes of Availability Zone names and IDs correspond. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/backup_framework.html.markdown b/website/docs/cdktf/typescript/d/backup_framework.html.markdown index 1557c876849f..bb965735d387 100644 --- a/website/docs/cdktf/typescript/d/backup_framework.html.markdown +++ b/website/docs/cdktf/typescript/d/backup_framework.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Backup framework name. ## Attribute Reference @@ -76,4 +77,4 @@ This data source exports the following attributes in addition to the arguments a * `complianceResourceTypes` - Describes whether the control scope includes one or more types of resources, such as EFS or RDS. * `tags` - Tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/backup_plan.html.markdown b/website/docs/cdktf/typescript/d/backup_plan.html.markdown index 1d7f613682ea..c41f00013f0c 100644 --- a/website/docs/cdktf/typescript/d/backup_plan.html.markdown +++ b/website/docs/cdktf/typescript/d/backup_plan.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `planId` - (Required) Backup plan ID. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Metadata that you can assign to help organize the plans you create. * `version` - Unique, randomly generated, Unicode, UTF-8 encoded string that serves as the version ID of the backup plan. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/backup_report_plan.html.markdown b/website/docs/cdktf/typescript/d/backup_report_plan.html.markdown index 412b3a17fc3a..17be0f324660 100644 --- a/website/docs/cdktf/typescript/d/backup_report_plan.html.markdown +++ b/website/docs/cdktf/typescript/d/backup_report_plan.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Backup report plan name. ## Attribute Reference @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `regions` - (Optional) Specifies the list of regions a report covers. * `reportTemplate` - Identifies the report template for the report. Reports are built using a report template. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/backup_selection.html.markdown b/website/docs/cdktf/typescript/d/backup_selection.html.markdown index fe22866b6a6e..05a4b0ad301e 100644 --- a/website/docs/cdktf/typescript/d/backup_selection.html.markdown +++ b/website/docs/cdktf/typescript/d/backup_selection.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `planId` - (Required) Backup plan ID associated with the selection of resources. * `selectionId` - (Required) Backup selection ID. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `iamRoleArn` - ARN of the IAM role that AWS Backup uses to authenticate when restoring and backing up the target resource. See the [AWS Backup Developer Guide](https://docs.aws.amazon.com/aws-backup/latest/devguide/access-control.html#managed-policies) for additional information about using AWS managed policies or creating custom policies attached to the IAM role. * `resources` - An array of strings that either contain Amazon Resource Names (ARNs) or match patterns of resources to assign to a backup plan.. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/backup_vault.html.markdown b/website/docs/cdktf/typescript/d/backup_vault.html.markdown index 86fbcdc8f673..7646bb45b260 100644 --- a/website/docs/cdktf/typescript/d/backup_vault.html.markdown +++ b/website/docs/cdktf/typescript/d/backup_vault.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the backup vault. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `recoveryPoints` - Number of recovery points that are stored in a backup vault. * `tags` - Metadata that you can assign to help organize the resources that you create. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/batch_compute_environment.html.markdown b/website/docs/cdktf/typescript/d/batch_compute_environment.html.markdown index 5fa24dbf8236..b0a3859feef1 100644 --- a/website/docs/cdktf/typescript/d/batch_compute_environment.html.markdown +++ b/website/docs/cdktf/typescript/d/batch_compute_environment.html.markdown @@ -28,7 +28,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new DataAwsBatchComputeEnvironment(this, "batch-mongo", { - computeEnvironmentName: "batch-mongo-production", + name: "batch-mongo-production", }); } } @@ -39,7 +39,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `computeEnvironmentName` - (Required) Name of the Batch Compute Environment +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the Batch Compute Environment ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `updatePolicy` - Specifies the infrastructure update policy for the compute environment. * `tags` - Key-value map of resource tags - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/batch_job_definition.html.markdown b/website/docs/cdktf/typescript/d/batch_job_definition.html.markdown index 5a530a2f51cf..a3ae94230c34 100644 --- a/website/docs/cdktf/typescript/d/batch_job_definition.html.markdown +++ b/website/docs/cdktf/typescript/d/batch_job_definition.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - ARN of the Job Definition. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. * `revision` - The revision of the job definition. * `name` - The name of the job definition to register. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). @@ -305,4 +306,4 @@ This data source exports the following attributes in addition to the arguments a * `attemptDurationSeconds` - The job timeout time (in seconds) that's measured from the job attempt's startedAt timestamp. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/batch_job_queue.html.markdown b/website/docs/cdktf/typescript/d/batch_job_queue.html.markdown index 5585cccaf706..82cc454ef8a9 100644 --- a/website/docs/cdktf/typescript/d/batch_job_queue.html.markdown +++ b/website/docs/cdktf/typescript/d/batch_job_queue.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the job queue. ## Attribute Reference @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `job_state_time_limit_action.#.reason` - The reason to log for the action being taken. * `job_state_time_limit_action.#.state` - The state of the job needed to trigger the action. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/batch_scheduling_policy.html.markdown b/website/docs/cdktf/typescript/d/batch_scheduling_policy.html.markdown index 67265bc0547b..51547598fad5 100644 --- a/website/docs/cdktf/typescript/d/batch_scheduling_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/batch_scheduling_policy.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the scheduling policy. ## Attribute Reference @@ -59,4 +60,4 @@ A `shareDistribution` block supports the following arguments: * `shareIdentifier` - Fair share identifier or fair share identifier prefix. For more information, see [ShareAttributes](https://docs.aws.amazon.com/batch/latest/APIReference/API_ShareAttributes.html). * `weightFactor` - Weight factor for the fair share identifier. For more information, see [ShareAttributes](https://docs.aws.amazon.com/batch/latest/APIReference/API_ShareAttributes.html). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown b/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown index d85139ac6f23..e39b32654fb1 100644 --- a/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown @@ -39,7 +39,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `modelId` – (Required) Name or ARN of the custom model. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `modelId` - (Required) Name or ARN of the custom model. ## Attribute Reference @@ -66,5 +67,5 @@ This data source exports the following attributes in addition to the arguments a * `s3Uri` - The S3 URI where the validation data is stored.. * `validationMetrics` - The loss metric for each validator that you provided. * `validation_loss` - The validation loss associated with the validator. - - \ No newline at end of file + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown b/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown index 735bbff74151..3826e1e9bd4a 100644 --- a/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `modelArn` - The ARN of the custom model. * `modelName` - The name of the custom model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown b/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown index 0a3a6a4321ad..ddd1afd4406d 100644 --- a/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown @@ -50,7 +50,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `modelId` – (Required) Model identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `modelId` - (Required) Model identifier. ## Attribute Reference @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `providerName` - Model provider name. * `responseStreamingSupported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown b/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown index 42bcbad4575b..3df4a2152228 100644 --- a/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `byCustomizationType` - (Optional) Customization type to filter on. Valid values are `FINE_TUNING`. * `byInferenceType` - (Optional) Inference type to filter on. Valid values are `ON_DEMAND` and `PROVISIONED`. * `byOutputModality` - (Optional) Output modality to filter on. Valid values are `TEXT`, `IMAGE`, and `EMBEDDING`. @@ -84,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a * `providerName` - Model provider name. * `responseStreamingSupported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_inference_profile.html.markdown b/website/docs/cdktf/typescript/d/bedrock_inference_profile.html.markdown index e9bfc236fcb7..c2c483c7c162 100644 --- a/website/docs/cdktf/typescript/d/bedrock_inference_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_inference_profile.html.markdown @@ -50,7 +50,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -- `inferenceProfileId` – (Required) Inference Profile identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `inferenceProfileId` - (Required) Inference Profile identifier. ## Attribute Reference @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a - `modelArn` - The Amazon Resource Name (ARN) of the model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_inference_profiles.html.markdown b/website/docs/cdktf/typescript/d/bedrock_inference_profiles.html.markdown index 0fce6d6be27d..f14f28c76338 100644 --- a/website/docs/cdktf/typescript/d/bedrock_inference_profiles.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_inference_profiles.html.markdown @@ -10,7 +10,7 @@ description: |- # Data Source: aws_bedrock_inference_profiles -Terraform data source for managing AWS Bedrock AWS Bedrock Inference Profiles. +Terraform data source for managing AWS Bedrock Inference Profiles. ## Example Usage @@ -34,9 +34,34 @@ class MyConvertedCode extends TerraformStack { ``` +### Filter by Type + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsBedrockInferenceProfiles } from "./.gen/providers/aws/data-aws-bedrock-inference-profiles"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsBedrockInferenceProfiles(this, "test", { + type: "APPLICATION", + }); + } +} + +``` + ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `type` - (Optional) Filters for inference profiles that match the type you specify. Valid values are: `SYSTEM_DEFINED`, `APPLICATION`. ## Attribute Reference @@ -46,18 +71,18 @@ This data source exports the following attributes in addition to the arguments a ### `inferenceProfileSummaries` -- `createdAt` - The time at which the inference profile was created. -- `description` - The description of the inference profile. -- `inferenceProfileArn` - The Amazon Resource Name (ARN) of the inference profile. -- `inferenceProfileId` - The unique identifier of the inference profile. -- `inferenceProfileName` - The name of the inference profile. -- `models` - A list of information about each model in the inference profile. See [`models`](#models). -- `status` - The status of the inference profile. `ACTIVE` means that the inference profile is available to use. -- `type` - The type of the inference profile. `SYSTEM_DEFINED` means that the inference profile is defined by Amazon Bedrock. -- `updatedAt` - The time at which the inference profile was last updated. +- `createdAt` - Time at which the inference profile was created. +- `description` - Description of the inference profile. +- `inferenceProfileArn` - Amazon Resource Name (ARN) of the inference profile. +- `inferenceProfileId` - Unique identifier of the inference profile. +- `inferenceProfileName` - Name of the inference profile. +- `models` - List of information about each model in the inference profile. See [`models` Block](#models). +- `status` - Status of the inference profile. `ACTIVE` means that the inference profile is available to use. +- `type` - Type of the inference profile. `SYSTEM_DEFINED` means that the inference profile is defined by Amazon Bedrock. `APPLICATION` means the inference profile was created by a user. +- `updatedAt` - Time at which the inference profile was last updated. ### `models` -- `modelArn` - The Amazon Resource Name (ARN) of the model. +- `modelArn` - Amazon Resource Name (ARN) of the model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrockagent_agent_versions.html.markdown b/website/docs/cdktf/typescript/d/bedrockagent_agent_versions.html.markdown index 6f30b2ecff0d..4db0ef2a46cd 100644 --- a/website/docs/cdktf/typescript/d/bedrockagent_agent_versions.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrockagent_agent_versions.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agentId` - (Required) Unique identifier of the agent. ## Attribute Reference @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `guardrailIdentifier` - Unique identifier of the guardrail. * `guardrailVersion` - Version of the guardrail. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/billing_views.html.markdown b/website/docs/cdktf/typescript/d/billing_views.html.markdown new file mode 100644 index 000000000000..18bec840e474 --- /dev/null +++ b/website/docs/cdktf/typescript/d/billing_views.html.markdown @@ -0,0 +1,87 @@ +--- +subcategory: "Billing" +layout: "aws" +page_title: "AWS: aws_billing_views" +description: |- + Retrieve a list of AWS Billing Views. +--- + + + +# Data Source: aws_billing_views + +Provides details about an AWS Billing Views. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, Fn, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsBillingViews } from "./.gen/providers/aws/data-aws-billing-views"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsBillingViews(this, "example", { + billingViewTypes: ["PRIMARY"], + }); + new TerraformOutput(this, "primary_view_arn_by_types", { + value: Fn.lookupNested(example.billingView, ["0", "arn"]), + }); + } +} + +``` + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, Fn, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsBillingViews } from "./.gen/providers/aws/data-aws-billing-views"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsBillingViews(this, "example", {}); + new TerraformOutput(this, "primary_view_arn_by_name", { + value: Fn.lookupNested( + "${[ for view in ${" + + example.billingView + + '} : view.arn if view.name == "Primary View"]}', + ["0"] + ), + }); + new TerraformOutput(this, "view_arns", { + value: "${[ for view in ${" + example.billingView + "} : view.arn]}", + }); + } +} + +``` + +## Argument Reference + +The following arguments are optional: + +* `billingViewTypes` - (Optional) List of billing view types to retrieve. Valid values are `PRIMARY`, `BILLING_GROUP`, `CUSTOM`. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `billingView` - List of billing view objects with the following attributes: + * `arn` - ARN of the billing view. + * `description` - Description of the billing view. + * `name` - Name of the billing view. + * `ownerAccountId` - Account ID of the billing view owner. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/budgets_budget.html.markdown b/website/docs/cdktf/typescript/d/budgets_budget.html.markdown index feb06d4495c6..191c7823ec24 100644 --- a/website/docs/cdktf/typescript/d/budgets_budget.html.markdown +++ b/website/docs/cdktf/typescript/d/budgets_budget.html.markdown @@ -52,6 +52,7 @@ The following arguments are optional: This data source exports the following attributes in addition to the arguments above: * `autoAdjustData` - Object containing [AutoAdjustData] which determines the budget amount for an auto-adjusting budget. +* `billingViewArn` - ARN of the billing view. * `budgetExceeded` - Boolean indicating whether this budget has been exceeded. * `budgetLimit` - The total amount of cost, usage, RI utilization, RI coverage, Savings Plans utilization, or Savings Plans coverage that you want to track with your budget. Contains object [Spend](#spend). * `budgetType` - Whether this budget tracks monetary cost or usage. @@ -150,4 +151,4 @@ Valid keys for `plannedLimit` parameter. * `amount` - The cost or usage amount that's associated with a budget forecast, actual spend, or budget threshold. Length Constraints: Minimum length of `1`. Maximum length of `2147483647`. * `unit` - The unit of measurement that's used for the budget forecast, actual spend, or budget threshold, such as USD or GBP. Length Constraints: Minimum length of `1`. Maximum length of `2147483647`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ce_cost_category.html.markdown b/website/docs/cdktf/typescript/d/ce_cost_category.html.markdown index 6660fa355ec8..477070f793e5 100644 --- a/website/docs/cdktf/typescript/d/ce_cost_category.html.markdown +++ b/website/docs/cdktf/typescript/d/ce_cost_category.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_ce_cost_category +# Data Source: aws_ce_cost_category Provides details about a specific CostExplorer Cost Category. @@ -105,4 +105,4 @@ This data source exports the following attributes in addition to the arguments a * `type` - Parameter type. * `values` - Parameter values. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/chatbot_slack_workspace.html.markdown b/website/docs/cdktf/typescript/d/chatbot_slack_workspace.html.markdown index 25a5d5d51663..fb19aacb5bf7 100644 --- a/website/docs/cdktf/typescript/d/chatbot_slack_workspace.html.markdown +++ b/website/docs/cdktf/typescript/d/chatbot_slack_workspace.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `slackTeamName` - (Required) Slack workspace name configured with AWS Chatbot. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `slackTeamId` - ID of the Slack Workspace assigned by AWS Chatbot. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudcontrolapi_resource.html.markdown b/website/docs/cdktf/typescript/d/cloudcontrolapi_resource.html.markdown index 3598f2be19a4..6bc1849d645d 100644 --- a/website/docs/cdktf/typescript/d/cloudcontrolapi_resource.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudcontrolapi_resource.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `roleArn` - (Optional) ARN of the IAM Role to assume for operations. * `typeVersionId` - (Optional) Identifier of the CloudFormation resource type version. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `properties` - JSON string matching the CloudFormation resource type schema with current configuration. Underlying attributes can be referenced via the [`jsondecode()` function](https://www.terraform.io/docs/language/functions/jsondecode.html), for example, `jsondecode(data.aws_cloudcontrolapi_resource.example.properties)["example"]`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudformation_export.html.markdown b/website/docs/cdktf/typescript/d/cloudformation_export.html.markdown index 368022e7e089..502aa9565de1 100644 --- a/website/docs/cdktf/typescript/d/cloudformation_export.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudformation_export.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the export as it appears in the console or from [list-exports](http://docs.aws.amazon.com/cli/latest/reference/cloudformation/list-exports.html) ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `value` - Value from Cloudformation export identified by the export name found from [list-exports](http://docs.aws.amazon.com/cli/latest/reference/cloudformation/list-exports.html) * `exportingStackId` - ARN of stack that contains the exported output name and value. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudformation_stack.html.markdown b/website/docs/cdktf/typescript/d/cloudformation_stack.html.markdown index 681e2234629d..f79a9e906615 100644 --- a/website/docs/cdktf/typescript/d/cloudformation_stack.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudformation_stack.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the stack ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `iamRoleArn` - ARN of the IAM role used to create the stack. * `timeoutInMinutes` - Amount of time that can pass before the stack status becomes `CREATE_FAILED` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudformation_type.html.markdown b/website/docs/cdktf/typescript/d/cloudformation_type.html.markdown index 57414ae27328..917b253e4f7d 100644 --- a/website/docs/cdktf/typescript/d/cloudformation_type.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudformation_type.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the CloudFormation Type. For example, `arn:aws:cloudformation:us-west-2::type/resource/AWS-EC2-VPC`. * `type` - (Optional) CloudFormation Registry Type. For example, `RESOURCE`. * `typeName` - (Optional) CloudFormation Type name. For example, `AWS::EC2::VPC`. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `sourceUrl` - URL of the source code for the CloudFormation Type. * `visibility` - Scope of the CloudFormation Type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudfront_distribution.html.markdown b/website/docs/cdktf/typescript/d/cloudfront_distribution.html.markdown index 107e91d454b9..59ad389cdffb 100644 --- a/website/docs/cdktf/typescript/d/cloudfront_distribution.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudfront_distribution.html.markdown @@ -48,6 +48,8 @@ This data source exports the following attributes in addition to the arguments a * `aliases` - List that contains information about CNAMEs (alternate domain names), if any, for this distribution. +* `anycastIpListId` - ID of the Anycast static IP list that is associated with the distribution, if any. + * `arn` - ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID. * `status` - Current status of the distribution. `Deployed` if the @@ -70,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a alias for the zone ID `Z2FDTNDATAQYW2`. * `webAclId` AWS WAF web ACL associated with this distribution. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudfront_log_delivery_canonical_user_id.html.markdown b/website/docs/cdktf/typescript/d/cloudfront_log_delivery_canonical_user_id.html.markdown index 0f91b4fb5d82..1a8e373a5640 100644 --- a/website/docs/cdktf/typescript/d/cloudfront_log_delivery_canonical_user_id.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudfront_log_delivery_canonical_user_id.html.markdown @@ -84,12 +84,12 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Region you'd like the zone for. By default, fetches the current region. +* `region` - (Optional) Name of the Region whose canonical user ID is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - Canonical user ID for the AWS `awslogsdelivery` account in the region. +* `id` - Canonical user ID for the AWS `awslogsdelivery` account in the Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudhsm_v2_cluster.html.markdown b/website/docs/cdktf/typescript/d/cloudhsm_v2_cluster.html.markdown index cfa7bc864015..6f4bba8dd702 100644 --- a/website/docs/cdktf/typescript/d/cloudhsm_v2_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudhsm_v2_cluster.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterId` - (Required) ID of Cloud HSM v2 cluster. * `clusterState` - (Optional) State of the cluster to be found. @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `cluster_certificates.0.manufacturer_hardware_certificate` - The HSM hardware certificate issued (signed) by the hardware manufacturer. The number of available cluster certificates may vary depending on state of the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudtrail_service_account.html.markdown b/website/docs/cdktf/typescript/d/cloudtrail_service_account.html.markdown index edd6a924b973..a16d60b240f2 100644 --- a/website/docs/cdktf/typescript/d/cloudtrail_service_account.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudtrail_service_account.html.markdown @@ -13,7 +13,7 @@ description: |- Use this data source to get the Account ID of the [AWS CloudTrail Service Account](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html) in a given region for the purpose of allowing CloudTrail to store trail data in S3. -~> **Note:** AWS documentation [states that](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html#troubleshooting-s3-bucket-policy) a [service principal name](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) should be used instead of an AWS account ID in any relevant IAM policy. +~> **Warning:** This data source is deprecated. The AWS documentation [states that](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-s3-bucket-policy-for-cloudtrail.html#troubleshooting-s3-bucket-policy) a [service principal name](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) should be used instead of an AWS account ID in any relevant IAM policy. ## Example Usage @@ -90,14 +90,13 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS CloudTrail account ID is desired. -Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS CloudTrail account ID is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS CloudTrail service account in the selected region. -* `arn` - ARN of the AWS CloudTrail service account in the selected region. +* `id` - ID of the AWS CloudTrail service account in the selected Region. +* `arn` - ARN of the AWS CloudTrail service account in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_contributor_managed_insight_rules.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_contributor_managed_insight_rules.html.markdown index f7579d200cb8..a51f41f689de 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_contributor_managed_insight_rules.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_contributor_managed_insight_rules.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) ARN of an Amazon Web Services resource that has managed Contributor Insights rules. ## Attribute Reference @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `ruleName` - Name of the Contributor Insights rule that contains data for the specified Amazon Web Services resource. * `state` - Indicates whether the rule is enabled or disabled. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_event_bus.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_event_bus.html.markdown index 0ab3622eb34f..8d57edbf8ac0 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_event_bus.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_event_bus.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the event bus. ## Attribute Reference @@ -52,5 +53,8 @@ This data source exports the following attributes in addition to the arguments a * `description` - Event bus description. * `id` - Name of the event bus. * `kmsKeyIdentifier` - Identifier of the AWS KMS customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified. +* `logConfig` - Block for logging configuration settings for the event bus. + * `includeDetail` - Whether EventBridge include detailed event information in the records it generates. + * `level` - Level of logging detail to include. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_event_buses.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_event_buses.html.markdown index 10c04300b000..657da8f12b50 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_event_buses.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_event_buses.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namePrefix` - (Optional) Specifying this limits the results to only those event buses with names that start with the specified prefix. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - The name of the event bus. * `policy` - The permissions policy of the event bus, describing which other AWS accounts can write events to this event bus. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_event_connection.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_event_connection.html.markdown index 6e952667e5e4..014b48b8e90d 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_event_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_event_connection.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - Name of the connection. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `kmsKeyIdentifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use to encrypt the connection, if one has been specified. * `secretArn` - ARN of the secret created from the authorization parameters specified for the connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_event_source.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_event_source.html.markdown index 4309cf9a7d34..29b9c2062aa9 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_event_source.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_event_source.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namePrefix` - (Optional) Specifying this limits the results to only those partner event sources with names that start with the specified prefix ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the event source * `state` - State of the event source (`ACTIVE` or `PENDING`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_log_group.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_log_group.html.markdown index 10d3756e9597..5c10fdd97413 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_log_group.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_log_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Cloudwatch log group ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `retentionInDays` - Number of days log events retained in the specified log group. * `tags` - Map of tags to assign to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudwatch_log_groups.html.markdown b/website/docs/cdktf/typescript/d/cloudwatch_log_groups.html.markdown index e6d08641f307..ab878c76d0e8 100644 --- a/website/docs/cdktf/typescript/d/cloudwatch_log_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/cloudwatch_log_groups.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logGroupNamePrefix` - (Optional) Group prefix of the Cloudwatch log groups to list ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the Cloudwatch log groups * `logGroupNames` - Set of names of the Cloudwatch log groups - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codeartifact_authorization_token.html.markdown b/website/docs/cdktf/typescript/d/codeartifact_authorization_token.html.markdown index e253ce0ab604..35f242aec1d3 100644 --- a/website/docs/cdktf/typescript/d/codeartifact_authorization_token.html.markdown +++ b/website/docs/cdktf/typescript/d/codeartifact_authorization_token.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) Name of the domain that is in scope for the generated authorization token. * `domainOwner` - (Optional) Account number of the AWS account that owns the domain. * `durationSeconds` - (Optional) Time, in seconds, that the generated authorization token is valid. Valid values are `0` and between `900` and `43200`. @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `authorizationToken` - Temporary authorization token. * `expiration` - Time in UTC RFC3339 format when the authorization token expires. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codeartifact_repository_endpoint.html.markdown b/website/docs/cdktf/typescript/d/codeartifact_repository_endpoint.html.markdown index ac7d17c10d26..97e7693e7944 100644 --- a/website/docs/cdktf/typescript/d/codeartifact_repository_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/codeartifact_repository_endpoint.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) Name of the domain that contains the repository. * `repository` - (Required) Name of the repository. * `format` - (Required) Which endpoint of a repository to return. A repository has one endpoint for each package format: `npm`, `pypi`, `maven`, and `nuget`. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `repositoryEndpoint` - URL of the returned endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codebuild_fleet.html.markdown b/website/docs/cdktf/typescript/d/codebuild_fleet.html.markdown index 9df3c6125a17..349fc8d82fcc 100644 --- a/website/docs/cdktf/typescript/d/codebuild_fleet.html.markdown +++ b/website/docs/cdktf/typescript/d/codebuild_fleet.html.markdown @@ -82,8 +82,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Fleet name. ## Attribute Reference @@ -92,9 +93,10 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the Fleet. * `baseCapacity` - Number of machines allocated to the fleet. -* `compute_configuration` - Compute configuration of the compute fleet. +* `computeConfiguration` - Compute configuration of the compute fleet. * `disk` - Amount of disk space of the instance type included in the fleet. - * `machine_type` - Machine type of the instance type included in the fleet. + * `instanceType` - EC2 instance type in the fleet. + * `machineType` - Machine type of the instance type included in the fleet. * `memory` - Amount of memory of the instance type included in the fleet. * `vcpu` - Number of vCPUs of the instance type included in the fleet. * `computeType` - Compute resources the compute fleet uses. @@ -122,4 +124,4 @@ This data source exports the following attributes in addition to the arguments a * `subnets` - A list of one or more subnet IDs in your Amazon VPC. * `vpcId` - The ID of the Amazon VPC. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codecatalyst_dev_environment.html.markdown b/website/docs/cdktf/typescript/d/codecatalyst_dev_environment.html.markdown index da687dfcbfb6..c650180f712f 100644 --- a/website/docs/cdktf/typescript/d/codecatalyst_dev_environment.html.markdown +++ b/website/docs/cdktf/typescript/d/codecatalyst_dev_environment.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `envId` - - (Required) The system-generated unique ID of the Dev Environment for which you want to view information. To retrieve a list of Dev Environment IDs, use [ListDevEnvironments](https://docs.aws.amazon.com/codecatalyst/latest/APIReference/API_ListDevEnvironments.html). * `projectName` - (Required) The name of the project in the space. * `spaceName` - (Required) The name of the space. @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The current status of the Dev Environment. From: PENDING | RUNNING | STARTING | STOPPING | STOPPED | FAILED | DELETING | DELETED. * `statusReason` - The reason for the status. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codecommit_approval_rule_template.html.markdown b/website/docs/cdktf/typescript/d/codecommit_approval_rule_template.html.markdown index b1f9586ededa..b24f4affbdf9 100644 --- a/website/docs/cdktf/typescript/d/codecommit_approval_rule_template.html.markdown +++ b/website/docs/cdktf/typescript/d/codecommit_approval_rule_template.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the approval rule template. This needs to be less than 100 characters. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `lastModifiedUser` - ARN of the user who made the most recent changes to the approval rule template. * `ruleContentSha256` - SHA-256 hash signature for the content of the approval rule template. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codecommit_repository.html.markdown b/website/docs/cdktf/typescript/d/codecommit_repository.html.markdown index fb68c5832e89..214fd757de00 100644 --- a/website/docs/cdktf/typescript/d/codecommit_repository.html.markdown +++ b/website/docs/cdktf/typescript/d/codecommit_repository.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repositoryName` - (Required) Name for the repository. This needs to be less than 100 characters. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `cloneUrlHttp` - URL to use for cloning the repository over HTTPS. * `cloneUrlSsh` - URL to use for cloning the repository over SSH. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codeguruprofiler_profiling_group.html.markdown b/website/docs/cdktf/typescript/d/codeguruprofiler_profiling_group.html.markdown index 5620caacf2bd..ec6d9d448c7f 100644 --- a/website/docs/cdktf/typescript/d/codeguruprofiler_profiling_group.html.markdown +++ b/website/docs/cdktf/typescript/d/codeguruprofiler_profiling_group.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the profiling group. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Mapping of Key-Value tags for the resource. * `updatedAt` - Timestamp when Profiling Group was updated. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/codestarconnections_connection.html.markdown b/website/docs/cdktf/typescript/d/codestarconnections_connection.html.markdown index 4243e9cdb946..a2de9d70d3fb 100644 --- a/website/docs/cdktf/typescript/d/codestarconnections_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/codestarconnections_connection.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) CodeStar Connection ARN. * `name` - (Optional) CodeStar Connection name. @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `providerType` - Name of the external provider where your third-party code repository is configured. Possible values are `Bitbucket`, `GitHub` and `GitLab`. For connections to GitHub Enterprise Server or GitLab Self-Managed instances, you must create an [aws_codestarconnections_host](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codestarconnections_host) resource and use `hostArn` instead. * `tags` - Map of key-value resource tags to associate with the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_identity_pool.html.markdown b/website/docs/cdktf/typescript/d/cognito_identity_pool.html.markdown index e2376f38ad58..f6a333f61d5c 100644 --- a/website/docs/cdktf/typescript/d/cognito_identity_pool.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_identity_pool.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityPoolName` - (Required) The Cognito Identity Pool name. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `supportedLoginProviders` - Key-Value pairs mapping provider names to provider app IDs. * `tags` - A map of tags to assigned to the Identity Pool. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_group.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_group.html.markdown index dd04abce69bd..fa007d668f49 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_group.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_group.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the user group. * `userPoolId` - (Required) User pool the client belongs to. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `precedence` - Precedence of the user group. * `roleArn` - ARN of the IAM role to be associated with the user group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_groups.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_groups.html.markdown index 9661e71dd229..cdfcd86c7671 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_groups.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` - (Required) User pool the client belongs to. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `precedence` - Precedence of the user group. * `roleArn` - ARN of the IAM role to be associated with the user group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown index c0f465c4a7bf..91718f1e55a8 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` - (Required) The cognito pool ID ## Attribute Reference @@ -137,11 +138,11 @@ This data source exports the following attributes in addition to the arguments a ### user pool add-ons -* `advanced_security_additional_flows` - A block of the threat protection configuration options for additional authentication types in your user pool, including custom authentication. [Detailed below](#advanced-security-additional-flows). +* `advancedSecurityAdditionalFlows` - A block of the threat protection configuration options for additional authentication types in your user pool, including custom authentication. [Detailed below](#advanced-security-additional-flows). * `advancedSecurityMode` - Mode for advanced security. ### advanced security additional flows -* `custom_auth_mode` - Mode of threat protection operation in custom authentication. +* `customAuthMode` - Mode of threat protection operation in custom authentication. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_pool_client.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_pool_client.html.markdown index c3f1f8bfaabd..47585437f309 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_pool_client.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_pool_client.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientId` - (Required) Client Id of the user pool. * `userPoolId` - (Required) User pool the client belongs to. @@ -61,7 +62,7 @@ This data source exports the following attributes in addition to the arguments a * `logoutUrls` - (Optional) List of allowed logout URLs for the identity providers. * `preventUserExistenceErrors` - (Optional) Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool. * `readAttributes` - (Optional) List of user pool attributes the application client can read from. -* `refresh_token_rotation` - (Optional) A block that specifies the configuration of refresh token rotation. [Detailed below](#refresh_token_rotation). +* `refreshTokenRotation` - (Optional) A block that specifies the configuration of refresh token rotation. [Detailed below](#refresh_token_rotation). * `refreshTokenValidity` - (Optional) Time limit in days refresh tokens are valid for. * `supportedIdentityProviders` - (Optional) List of provider names for the identity providers that are supported on this client. Uses the `providerName` attribute of `aws_cognito_identity_provider` resource(s), or the equivalent string(s). * `tokenValidityUnits` - (Optional) Configuration block for units in which the validity times are represented in. [Detailed below](#token_validity_units). @@ -80,7 +81,7 @@ Either `applicationArn` or `applicationId` is required. ### refresh_token_rotation * `feature` - (Required) The state of refresh token rotation for the current app client. Valid values are `ENABLED` or `DISABLED`. -* `retry_grace_period_seconds` - (Optional) A period of time in seconds that the user has to use the old refresh token before it is invalidated. Valid values are between `0` and `60`. +* `retryGracePeriodSeconds` - (Optional) A period of time in seconds that the user has to use the old refresh token before it is invalidated. Valid values are between `0` and `60`. ### token_validity_units @@ -90,4 +91,4 @@ Valid values for the following arguments are: `seconds`, `minutes`, `hours` or ` * `idToken` - (Optional) Time unit in for the value in `idTokenValidity`, defaults to `hours`. * `refreshToken` - (Optional) Time unit in for the value in `refreshTokenValidity`, defaults to `days`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_pool_clients.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_pool_clients.html.markdown index f18cdb4e081f..815273aa428a 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_pool_clients.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_pool_clients.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` - (Required) Cognito user pool ID. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `clientIds` - List of Cognito user pool client IDs. * `clientNames` - List of Cognito user pool client names. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_pool_signing_certificate.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_pool_signing_certificate.html.markdown index 5ca148e3827b..861317adf028 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_pool_signing_certificate.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_pool_signing_certificate.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` - (Required) Cognito user pool ID. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `certificate` - Certificate string - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_pools.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_pools.html.markdown index f96917e5469b..61a7e5d8fb60 100644 --- a/website/docs/cdktf/typescript/d/cognito_user_pools.html.markdown +++ b/website/docs/cdktf/typescript/d/cognito_user_pools.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cognito user pools. Name is not a unique attribute for cognito user pool, so multiple pools might be returned with given name. If the pool name is expected to be unique, you can reference the pool id via ```tolist(data.aws_cognito_user_pools.selected.ids)[0]``` ## Attribute Reference @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - Set of cognito user pool ids. * `arns` - Set of cognito user pool Amazon Resource Names (ARNs). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_bot_association.html.markdown b/website/docs/cdktf/typescript/d/connect_bot_association.html.markdown index 99ad36f90a8c..467a5e8416b3 100644 --- a/website/docs/cdktf/typescript/d/connect_bot_association.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_bot_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. * `lexBot` - (Required) Configuration information of an Amazon Lex (V1) bot. Detailed below. @@ -57,4 +58,4 @@ The `lexBot` configuration block supports the following: This data source exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown b/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown index 6b0c54221406..e7a3a4f0bd21 100644 --- a/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contactFlowId` - (Optional) Returns information on a specific Contact Flow by contact flow id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Contact Flow by name @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Tags to assign to the Contact Flow. * `type` - Type of Contact Flow. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown b/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown index 5223e72ab959..2e957c74b498 100644 --- a/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contactFlowModuleId` - (Optional) Returns information on a specific Contact Flow Module by contact flow module id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Contact Flow Module by name @@ -81,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Type of Contact Flow Module Module. Values are either `ACTIVE` or `ARCHIVED`. * `status` - Status of the Contact Flow Module Module. Values are either `PUBLISHED` or `SAVED`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown b/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown index bc099ba80a5a..f924a7e26d27 100644 --- a/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hoursOfOperationId` - (Optional) Returns information on a specific Hours of Operation by hours of operation id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Hours of Operation by name @@ -99,4 +100,4 @@ A `startTime` block supports the following arguments: * `hours` - Hour of opening. * `minutes` - Minute of opening. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_instance.html.markdown b/website/docs/cdktf/typescript/d/connect_instance.html.markdown index 0e3d5462d191..0a806be3555d 100644 --- a/website/docs/cdktf/typescript/d/connect_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_instance.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Optional) Returns information on a specific connect instance by id * `instanceAlias` - (Optional) Returns information on a specific connect instance by alias @@ -86,4 +87,4 @@ This data source exports the following attributes in addition to the arguments a * `serviceRole` - Service role of the instance. * `tags` - A map of tags to assigned to the instance. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_instance_storage_config.html.markdown b/website/docs/cdktf/typescript/d/connect_instance_storage_config.html.markdown index 955461b96e2d..f79eef26e2ce 100644 --- a/website/docs/cdktf/typescript/d/connect_instance_storage_config.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_instance_storage_config.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `associationId` - (Required) The existing association identifier that uniquely identifies the resource type and storage config for the given instance ID. * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `resourceType` - (Required) A valid resource type. Valid Values: `AGENT_EVENTS` | `ATTACHMENTS` | `CALL_RECORDINGS` | `CHAT_TRANSCRIPTS` | `CONTACT_EVALUATIONS` | `CONTACT_TRACE_RECORDS` | `MEDIA_STREAMS` | `REAL_TIME_CONTACT_ANALYSIS_SEGMENTS` | `SCHEDULED_REPORTS` | `SCREEN_RECORDINGS`. @@ -97,4 +98,4 @@ The `encryptionConfig` configuration block supports the following arguments: * `encryptionType` - The type of encryption. Valid Values: `KMS`. * `keyId` - The full ARN of the encryption key. Be sure to provide the full ARN of the encryption key, not just the ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_lambda_function_association.html.markdown b/website/docs/cdktf/typescript/d/connect_lambda_function_association.html.markdown index 6d84e1d89cbe..ad5d7ee5ef1e 100644 --- a/website/docs/cdktf/typescript/d/connect_lambda_function_association.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_lambda_function_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `functionArn` - (Required) ARN of the Lambda Function, omitting any version or alias qualifier. * `instanceId` - (Required) Identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_prompt.html.markdown b/website/docs/cdktf/typescript/d/connect_prompt.html.markdown index cd4b88b53702..fbe88043d9a9 100644 --- a/website/docs/cdktf/typescript/d/connect_prompt.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_prompt.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Required) Returns information on a specific Prompt by name @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the Prompt. * `promptId` - Identifier for the prompt. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_queue.html.markdown b/website/docs/cdktf/typescript/d/connect_queue.html.markdown index ccc505981d59..592b812d7bdc 100644 --- a/website/docs/cdktf/typescript/d/connect_queue.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_queue.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queueId` - (Optional) Returns information on a specific Queue by Queue id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Queue by name @@ -90,4 +91,4 @@ A `outboundCallerConfig` block supports the following arguments: * `outboundCallerIdNumberId` - Specifies the caller ID number. * `outboundFlowId` - Outbound whisper flow to be used during an outbound call. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown b/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown index 32bd487ea8ab..cee116cab2fd 100644 --- a/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `quickConnectId` - (Optional) Returns information on a specific Quick Connect by Quick Connect id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Quick Connect by name @@ -102,4 +103,4 @@ A `userConfig` block contains the following arguments: * `contactFlowId` - Identifier of the contact flow. * `userId` - Identifier for the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown b/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown index 29c8a5ba3d26..bb7e1a9e5c9c 100644 --- a/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Routing Profile by name * `routingProfileId` - (Optional) Returns information on a specific Routing Profile by Routing Profile id @@ -96,4 +97,4 @@ A `queueConfigs` block supports the following attributes: * `queueId` - Identifier for the queue. * `queueName` - Name for the queue. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown b/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown index 8fe60267e595..1ecc4a2b18a5 100644 --- a/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityProfileId` - (Optional) Returns information on a specific Security Profile by Security Profile id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Security Profile by name @@ -81,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions assigned to the security profile. * `tags` - Map of tags to assign to the Security Profile. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_user.html.markdown b/website/docs/cdktf/typescript/d/connect_user.html.markdown index 9939ed7d08d6..613d4c9d0b2a 100644 --- a/website/docs/cdktf/typescript/d/connect_user.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_user.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific User by name * `userId` - (Optional) Returns information on a specific User by User id @@ -103,4 +104,4 @@ A `phoneConfig` block supports the following attributes: * `deskPhoneNumber` - The phone number for the user's desk phone. * `phoneType` - The phone type. Valid values are `DESK_PHONE` and `SOFT_PHONE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown b/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown index 1a1d39ba208f..bdd441bc9e92 100644 --- a/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hierarchyGroupId` - (Optional) Returns information on a specific hierarchy group by hierarchy group id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific hierarchy group by name @@ -94,4 +95,4 @@ A level block supports the following attributes: * `id` - The identifier of the hierarchy group. * `name` - Name of the hierarchy group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_user_hierarchy_structure.html.markdown b/website/docs/cdktf/typescript/d/connect_user_hierarchy_structure.html.markdown index 28bc6d27b58f..30581100ccc5 100644 --- a/website/docs/cdktf/typescript/d/connect_user_hierarchy_structure.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_user_hierarchy_structure.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance ## Attribute Reference @@ -60,4 +61,4 @@ Each level block supports the following attributes: * `id` - The identifier of the hierarchy level. * `name` - Name of the user hierarchy level. Must not be more than 50 characters. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown b/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown index 6ec1d9971112..97fa3557a47a 100644 --- a/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Vocabulary by name * `vocabularyId` - (Optional) Returns information on a specific Vocabulary by Vocabulary id @@ -85,4 +86,4 @@ separated by a colon (`:`). * `tags` - A map of tags to assign to the Vocabulary. * `vocabularyId` - The identifier of the custom vocabulary. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/controltower_controls.html.markdown b/website/docs/cdktf/typescript/d/controltower_controls.html.markdown index bd0e74f8118c..0db6c39efe3d 100644 --- a/website/docs/cdktf/typescript/d/controltower_controls.html.markdown +++ b/website/docs/cdktf/typescript/d/controltower_controls.html.markdown @@ -58,8 +58,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `targetIdentifier` - (Required) The ARN of the organizational unit. ## Attribute Reference @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a * `enabledControls` - List of all the ARNs for the controls applied to the `targetIdentifier`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/customer_gateway.html.markdown b/website/docs/cdktf/typescript/d/customer_gateway.html.markdown index c9b0549d80e1..ded13d1b8237 100644 --- a/website/docs/cdktf/typescript/d/customer_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/customer_gateway.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the gateway. * `filter` - (Optional) One or more [name-value pairs][dcg-filters] to filter by. @@ -79,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/datapipeline_pipeline.html.markdown b/website/docs/cdktf/typescript/d/datapipeline_pipeline.html.markdown index 945f385d76a2..667f2f4c500c 100644 --- a/website/docs/cdktf/typescript/d/datapipeline_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/d/datapipeline_pipeline.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipelineId` - (Required) ID of the pipeline. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of Pipeline. * `tags` - Map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/datapipeline_pipeline_definition.html.markdown b/website/docs/cdktf/typescript/d/datapipeline_pipeline_definition.html.markdown index e88255b9772b..4aa941e73074 100644 --- a/website/docs/cdktf/typescript/d/datapipeline_pipeline_definition.html.markdown +++ b/website/docs/cdktf/typescript/d/datapipeline_pipeline_definition.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipelineId` - (Required) ID of the pipeline. ## Attribute Reference @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `refValue` - Field value, expressed as the identifier of another object * `stringValue` - Field value, expressed as a String. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/datazone_domain.html.markdown b/website/docs/cdktf/typescript/d/datazone_domain.html.markdown index 6155143e07bc..22752736d93d 100644 --- a/website/docs/cdktf/typescript/d/datazone_domain.html.markdown +++ b/website/docs/cdktf/typescript/d/datazone_domain.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name of the Domain. One of `name` or `id` is required. * `id` - (Optional) ID of the Domain. One of `name` or `id` is required @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `portalUrl` - URL of the Domain. * `status` - Status of the Domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/datazone_environment_blueprint.html.markdown b/website/docs/cdktf/typescript/d/datazone_environment_blueprint.html.markdown index b8f3bc25c873..581a0e90ebc5 100644 --- a/website/docs/cdktf/typescript/d/datazone_environment_blueprint.html.markdown +++ b/website/docs/cdktf/typescript/d/datazone_environment_blueprint.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainId` - (Required) ID of the domain. * `name` - (Required) Name of the blueprint. * `managed` (Required) Whether the blueprint is managed by Amazon DataZone. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the blueprint * `blueprintProvider` - Provider of the blueprint - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_cluster_snapshot.html.markdown b/website/docs/cdktf/typescript/d/db_cluster_snapshot.html.markdown index 680c38053ff5..996e8c5985c6 100644 --- a/website/docs/cdktf/typescript/d/db_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/d/db_cluster_snapshot.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mostRecent` - (Optional) If more than one result is returned, use the most recent Snapshot. * `dbClusterIdentifier` - (Optional) Returns the list of snapshots created by the specific db_cluster * `dbClusterSnapshotIdentifier` - (Optional) Returns information on a specific snapshot_id. @@ -102,10 +103,10 @@ This data source exports the following attributes in addition to the arguments a * `licenseModel` - License model information for the restored DB cluster. * `port` - Port that the DB cluster was listening on at the time of the snapshot. * `snapshotCreateTime` - Time when the snapshot was taken, in Universal Coordinated Time (UTC). -* `source_db_cluster_snapshot_identifier` - DB Cluster Snapshot ARN that the DB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. +* `sourceDbClusterSnapshotIdentifier` - DB Cluster Snapshot ARN that the DB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. * `status` - Status of this DB Cluster Snapshot. * `storageEncrypted` - Whether the DB cluster snapshot is encrypted. * `vpcId` - VPC ID associated with the DB cluster snapshot. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_event_categories.html.markdown b/website/docs/cdktf/typescript/d/db_event_categories.html.markdown index 6aff4dcb625d..f57bf78bf385 100644 --- a/website/docs/cdktf/typescript/d/db_event_categories.html.markdown +++ b/website/docs/cdktf/typescript/d/db_event_categories.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceType` - (Optional) Type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot. ## Attribute Reference @@ -77,4 +78,4 @@ This data source exports the following attributes in addition to the arguments a * `eventCategories` - List of the event categories. * `id` - Region of the event categories. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_instance.html.markdown b/website/docs/cdktf/typescript/d/db_instance.html.markdown index cb60f9b966a7..961e7d1659c5 100644 --- a/website/docs/cdktf/typescript/d/db_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/db_instance.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbInstanceIdentifier` - (Optional) Name of the RDS instance. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired instance. @@ -97,4 +98,4 @@ The `masterUserSecret` configuration block supports the following attributes: * `secretArn` - The Amazon Resource Name (ARN) of the secret. * `secret_status` - The status of the secret. Valid Values: `creating` | `active` | `rotating` | `impaired`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_instances.html.markdown b/website/docs/cdktf/typescript/d/db_instances.html.markdown index ab96d605982c..d255cbbac4c3 100644 --- a/website/docs/cdktf/typescript/d/db_instances.html.markdown +++ b/website/docs/cdktf/typescript/d/db_instances.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) used to filter instances with AWS supported attributes, such as `engine`, `db-cluster-id` or `db-instance-id` for example. Detailed below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired instances. @@ -86,4 +87,4 @@ This data source exports the following attributes in addition to the arguments a * `instanceArns` - ARNs of the matched RDS instances. * `instanceIdentifiers` - Identifiers of the matched RDS instances. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_parameter_group.html.markdown b/website/docs/cdktf/typescript/d/db_parameter_group.html.markdown new file mode 100644 index 000000000000..03e78ff4e96e --- /dev/null +++ b/website/docs/cdktf/typescript/d/db_parameter_group.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_db_parameter_group" +description: |- + Information about a database parameter group. +--- + + + +# Data Source: aws_db_parameter_group + +Information about a database parameter group. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsDbParameterGroup } from "./.gen/providers/aws/data-aws-db-parameter-group"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsDbParameterGroup(this, "test", { + name: "default.postgres15", + }); + } +} + +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) DB parameter group name. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the parameter group. +* `family` - Family of the parameter group. +* `description` - Description of the parameter group. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_proxy.html.markdown b/website/docs/cdktf/typescript/d/db_proxy.html.markdown index 94b8daf84f31..8c1d2d3e8804 100644 --- a/website/docs/cdktf/typescript/d/db_proxy.html.markdown +++ b/website/docs/cdktf/typescript/d/db_proxy.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the DB proxy. ## Attribute Reference @@ -47,6 +48,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the DB Proxy. * `auth` - Configuration(s) with authorization mechanisms to connect to the associated instance or cluster. * `debugLogging` - Whether the proxy includes detailed information about SQL statements in its logs. +* `default_auth_scheme` - Default authentication scheme that the proxy uses for client connections to the proxy and connections from the proxy to the underlying database. * `endpoint` - Endpoint that you can use to connect to the DB proxy. * `engineFamily` - Kinds of databases that the proxy can connect to. * `idleClientTimeout` - Number of seconds a connection to the proxy can have no activity before the proxy drops the client connection. @@ -56,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `vpcSecurityGroupIds` - Provides a list of VPC security groups that the proxy belongs to. * `vpcSubnetIds` - EC2 subnet IDs for the proxy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_snapshot.html.markdown b/website/docs/cdktf/typescript/d/db_snapshot.html.markdown index e23bff9a8fd6..120563f179ad 100644 --- a/website/docs/cdktf/typescript/d/db_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/d/db_snapshot.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mostRecent` - (Optional) If more than one result is returned, use the most recent Snapshot. * `dbInstanceIdentifier` - (Optional) Returns the list of snapshots created by the specific db_instance @@ -106,4 +107,4 @@ This data source exports the following attributes in addition to the arguments a * `snapshotCreateTime` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Changes for the copy when the snapshot is copied. * `originalSnapshotCreateTime` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Doesn't change when the snapshot is copied. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_subnet_group.html.markdown b/website/docs/cdktf/typescript/d/db_subnet_group.html.markdown index ce422409e42a..1018d0e28c95 100644 --- a/website/docs/cdktf/typescript/d/db_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/d/db_subnet_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the RDS database subnet group. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `supportedNetworkTypes` - The network type of the DB subnet group. * `vpcId` - Provides the VPC ID of the DB subnet group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/devopsguru_notification_channel.html.markdown b/website/docs/cdktf/typescript/d/devopsguru_notification_channel.html.markdown index f59d7c66dcbf..99ec401d0844 100644 --- a/website/docs/cdktf/typescript/d/devopsguru_notification_channel.html.markdown +++ b/website/docs/cdktf/typescript/d/devopsguru_notification_channel.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Unique identifier for the notification channel. ## Attribute Reference @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `messageTypes` - Events to receive notifications for. * `severities` - Severity levels to receive notifications for. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/devopsguru_resource_collection.html.markdown b/website/docs/cdktf/typescript/d/devopsguru_resource_collection.html.markdown index 61b59f928553..685517b4b7c6 100644 --- a/website/docs/cdktf/typescript/d/devopsguru_resource_collection.html.markdown +++ b/website/docs/cdktf/typescript/d/devopsguru_resource_collection.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `type` - (Required) Type of AWS resource collection to create. Valid values are `AWS_CLOUD_FORMATION`, `AWS_SERVICE`, and `AWS_TAGS`. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `appBoundaryKey` - An AWS tag key that is used to identify the AWS resources that DevOps Guru analyzes. * `tagValues` - Array of tag values. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/directory_service_directory.html.markdown b/website/docs/cdktf/typescript/d/directory_service_directory.html.markdown index 0c829c1f5dd6..5deb4c2f6316 100644 --- a/website/docs/cdktf/typescript/d/directory_service_directory.html.markdown +++ b/website/docs/cdktf/typescript/d/directory_service_directory.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryId` - (Required) ID of the directory. ## Attribute Reference @@ -56,7 +57,7 @@ This data source exports the following attributes in addition to the arguments a * `accessUrl` - Access URL for the directory/connector, such as http://alias.awsapps.com. * `dnsIpAddresses` - List of IP addresses of the DNS servers for the directory/connector. * `securityGroupId` - ID of the security group created by the directory/connector. -* `tags` – A map of tags assigned to the directory/connector. +* `tags` - A map of tags assigned to the directory/connector. `vpcSettings` (for `SimpleAD` and `MicrosoftAD`) is also exported with the following attributes: @@ -81,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a * `radiusTimeout` - Amount of time, in seconds, to wait for the RADIUS server to respond. * `useSameUsername` - Not currently used. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dms_certificate.html.markdown b/website/docs/cdktf/typescript/d/dms_certificate.html.markdown index 4a176984db8b..81529eb724c5 100644 --- a/website/docs/cdktf/typescript/d/dms_certificate.html.markdown +++ b/website/docs/cdktf/typescript/d/dms_certificate.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateId` - (Required) A customer-assigned name for the certificate. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `validFromDate` - The beginning date that the certificate is valid. * `validToDate` - The final date that the certificate is valid. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dms_endpoint.html.markdown b/website/docs/cdktf/typescript/d/dms_endpoint.html.markdown index e17b80f3ffae..e8422d5a1371 100644 --- a/website/docs/cdktf/typescript/d/dms_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/dms_endpoint.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpointId` - (Required) Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a See the [`aws_dms_endpoint` resource](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dms_endpoint) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dms_replication_instance.html.markdown b/website/docs/cdktf/typescript/d/dms_replication_instance.html.markdown index cf56b9da98e4..1b5ebc42f5d5 100644 --- a/website/docs/cdktf/typescript/d/dms_replication_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/dms_replication_instance.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicationInstanceId` - (Required) The replication instance identifier. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `replicationSubnetGroupId` - A subnet group to associate with the replication instance. * `vpcSecurityGroupIds` - A set of VPC security group IDs that are used with the replication instance. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dms_replication_subnet_group.html.markdown b/website/docs/cdktf/typescript/d/dms_replication_subnet_group.html.markdown index 2317c544940b..c1c4b6854b44 100644 --- a/website/docs/cdktf/typescript/d/dms_replication_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/d/dms_replication_subnet_group.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicationSubnetGroupId` - (Required) Name for the replication subnet group. This value is stored as a lowercase string. It must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens and cannot be `default`. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - List of at least 2 EC2 subnet IDs for the subnet group. The subnets must cover at least 2 availability zones. * `vpcId` - The ID of the VPC the subnet group is in. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dms_replication_task.html.markdown b/website/docs/cdktf/typescript/d/dms_replication_task.html.markdown index 4899af613a60..0aad240517f8 100644 --- a/website/docs/cdktf/typescript/d/dms_replication_task.html.markdown +++ b/website/docs/cdktf/typescript/d/dms_replication_task.html.markdown @@ -40,15 +40,11 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicationTaskId` - (Required) The replication task identifier. - - Must contain from 1 to 255 alphanumeric characters or hyphens. - - First character must be a letter. - - Cannot end with a hyphen. - - Cannot contain two consecutive hyphens. - ## Attribute Reference This data source exports the following attributes in addition to the arguments above: @@ -65,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `targetEndpointArn` - The Amazon Resource Name (ARN) string that uniquely identifies the target endpoint. * `replicationTaskArn` - The Amazon Resource Name (ARN) for the replication task. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/docdb_engine_version.html.markdown b/website/docs/cdktf/typescript/d/docdb_engine_version.html.markdown index 5726e7912b87..10843df81cd8 100644 --- a/website/docs/cdktf/typescript/d/docdb_engine_version.html.markdown +++ b/website/docs/cdktf/typescript/d/docdb_engine_version.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) DB engine. (Default: `docdb`) * `parameterGroupFamily` - (Optional) Name of a specific DB parameter group family. An example parameter group family is `docdb3.6`. * `preferredVersions` - (Optional) Ordered list of preferred engine versions. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. If both the `version` and `preferredVersions` arguments are not configured, the data source will return the default version for the engine. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `validUpgradeTargets` - A set of engine versions that this database engine version can be upgraded to. * `versionDescription` - Description of the database engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/docdb_orderable_db_instance.html.markdown b/website/docs/cdktf/typescript/d/docdb_orderable_db_instance.html.markdown index b6b32ed926f5..35612fb12cc9 100644 --- a/website/docs/cdktf/typescript/d/docdb_orderable_db_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/docdb_orderable_db_instance.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) DB engine. Default: `docdb` * `engineVersion` - (Optional) Version of the DB engine. * `instanceClass` - (Optional) DB instance class. Examples of classes are `db.r5.12xlarge`, `db.r5.24xlarge`, `db.r5.2xlarge`, `db.r5.4xlarge`, `db.r5.large`, `db.r5.xlarge`, and `db.t3.medium`. (Conflicts with `preferredInstanceClasses`.) @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `availabilityZones` - Availability zones where the instance is available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dx_connection.html.markdown b/website/docs/cdktf/typescript/d/dx_connection.html.markdown index 1256563f1fd5..e6b3db8bab60 100644 --- a/website/docs/cdktf/typescript/d/dx_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/dx_connection.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the connection to retrieve. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags for the resource. * `vlanId` - The VLAN ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dx_location.html.markdown b/website/docs/cdktf/typescript/d/dx_location.html.markdown index 1df9a278acb1..d99746f29d42 100644 --- a/website/docs/cdktf/typescript/d/dx_location.html.markdown +++ b/website/docs/cdktf/typescript/d/dx_location.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `locationCode` - (Required) Code for the location to retrieve. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `availableProviders` - Names of the service providers for the location. * `locationName` - Name of the location. This includes the name of the colocation partner and the physical site of the building. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dx_locations.html.markdown b/website/docs/cdktf/typescript/d/dx_locations.html.markdown index b5f429051739..f4ca25128717 100644 --- a/website/docs/cdktf/typescript/d/dx_locations.html.markdown +++ b/website/docs/cdktf/typescript/d/dx_locations.html.markdown @@ -37,7 +37,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `locationCodes` - Code for the locations. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dx_router_configuration.html.markdown b/website/docs/cdktf/typescript/d/dx_router_configuration.html.markdown index e8362f4963e2..696e1cbd9ac4 100644 --- a/website/docs/cdktf/typescript/d/dx_router_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/dx_router_configuration.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtualInterfaceId` - (Required) ID of the Direct Connect Virtual Interface * `routerTypeIdentifier` - (Required) ID of the Router Type. For example: `CiscoSystemsInc-2900SeriesRouters-IOS124` @@ -77,4 +78,4 @@ A `router` block supports the following attributes: * `xslt_template_name` - Router XSLT Template Name * `xslt_template_name_for_mac` - Router XSLT Template Name for MacSec - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dynamodb_table.html.markdown b/website/docs/cdktf/typescript/d/dynamodb_table.html.markdown index 3d6f8916c3f6..292417872ccc 100644 --- a/website/docs/cdktf/typescript/d/dynamodb_table.html.markdown +++ b/website/docs/cdktf/typescript/d/dynamodb_table.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the DynamoDB table. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a See the [DynamoDB Table Resource](/docs/providers/aws/r/dynamodb_table.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dynamodb_table_item.html.markdown b/website/docs/cdktf/typescript/d/dynamodb_table_item.html.markdown index e47c2e9596fc..79d9421c9c84 100644 --- a/website/docs/cdktf/typescript/d/dynamodb_table_item.html.markdown +++ b/website/docs/cdktf/typescript/d/dynamodb_table_item.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `expression_attribute_name` - (Optional) - One or more substitution tokens for attribute names in an expression. Use the `#` character in an expression to dereference an attribute name. * `projectionExpression` - (Optional) A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas. If no attribute names are specified, then all attributes are returned. If any of the requested attributes are not found, they do not appear in the result. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `item` - JSON representation of a map of attribute names to [AttributeValue](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html) objects, as specified by ProjectionExpression. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/dynamodb_tables.html.markdown b/website/docs/cdktf/typescript/d/dynamodb_tables.html.markdown index 4328a777faf1..8083e55eca12 100644 --- a/website/docs/cdktf/typescript/d/dynamodb_tables.html.markdown +++ b/website/docs/cdktf/typescript/d/dynamodb_tables.html.markdown @@ -24,7 +24,7 @@ import { TerraformOutput, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsDynamodbTables } from "./.gen/providers/aws/"; +import { DataAwsDynamodbTables } from "./.gen/providers/aws/data-aws-dynamodb-tables"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -39,7 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -47,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `names` - A list of all the DynamoDB table names found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ebs_default_kms_key.html.markdown b/website/docs/cdktf/typescript/d/ebs_default_kms_key.html.markdown index 59376d5b4da0..e9f2c4acd054 100644 --- a/website/docs/cdktf/typescript/d/ebs_default_kms_key.html.markdown +++ b/website/docs/cdktf/typescript/d/ebs_default_kms_key.html.markdown @@ -40,7 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -55,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ebs_encryption_by_default.html.markdown b/website/docs/cdktf/typescript/d/ebs_encryption_by_default.html.markdown index be85ec346b46..fbd26b6e8562 100644 --- a/website/docs/cdktf/typescript/d/ebs_encryption_by_default.html.markdown +++ b/website/docs/cdktf/typescript/d/ebs_encryption_by_default.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -49,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ebs_snapshot.html.markdown b/website/docs/cdktf/typescript/d/ebs_snapshot.html.markdown index 2bd9fd0a2e0d..75a78653a9e3 100644 --- a/website/docs/cdktf/typescript/d/ebs_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/d/ebs_snapshot.html.markdown @@ -49,17 +49,12 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mostRecent` - (Optional) If more than one result is returned, use the most recent snapshot. - * `owners` - (Optional) Returns the snapshots owned by the specified owner id. Multiple owners can be specified. - * `snapshotIds` - (Optional) Returns information on a specific snapshot_id. - * `restorableByUserIds` - (Optional) One or more AWS accounts IDs that can create volumes from the snapshot. - -* `filter` - (Optional) One or more name/value pairs to filter off of. There are -several valid keys, for a full reference, check out -[describe-snapshots in the AWS CLI reference][1]. +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-snapshots in the AWS CLI reference][1]. ## Attribute Reference @@ -90,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-snapshots.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ebs_snapshot_ids.html.markdown b/website/docs/cdktf/typescript/d/ebs_snapshot_ids.html.markdown index 809fe7045942..7ce1da3c8f23 100644 --- a/website/docs/cdktf/typescript/d/ebs_snapshot_ids.html.markdown +++ b/website/docs/cdktf/typescript/d/ebs_snapshot_ids.html.markdown @@ -49,13 +49,10 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owners` - (Optional) Returns the snapshots owned by the specified owner id. Multiple owners can be specified. - * `restorableByUserIds` - (Optional) One or more AWS accounts IDs that can create volumes from the snapshot. - -* `filter` - (Optional) One or more name/value pairs to filter off of. There are -several valid keys, for a full reference, check out -[describe-volumes in the AWS CLI reference][1]. +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-volumes in the AWS CLI reference][1]. ## Attribute Reference @@ -72,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-snapshots.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ebs_volume.html.markdown b/website/docs/cdktf/typescript/d/ebs_volume.html.markdown index cdce1dbf8d4f..1af981dd7185 100644 --- a/website/docs/cdktf/typescript/d/ebs_volume.html.markdown +++ b/website/docs/cdktf/typescript/d/ebs_volume.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-volumes in the AWS CLI reference][1]. @@ -74,6 +75,7 @@ This data source exports the following attributes in addition to the arguments a * `throughput` - Throughput that the volume supports, in MiB/s. * `volumeId` - Volume ID (e.g., vol-59fcb34e). * `volumeType` - Type of EBS volume. +* `volumeInitializationRate` - EBS provisioned rate for volume initialization, in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. ## Timeouts @@ -83,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-volumes.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ebs_volumes.html.markdown b/website/docs/cdktf/typescript/d/ebs_volumes.html.markdown index f5de1082c1f7..d2d074f55e2e 100644 --- a/website/docs/cdktf/typescript/d/ebs_volumes.html.markdown +++ b/website/docs/cdktf/typescript/d/ebs_volumes.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired volumes. @@ -127,4 +128,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown b/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown index 3cbd69a840b0..46017c7da809 100644 --- a/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityDurationHours` - (Required) The amount of time of the Capacity Block reservation in hours. * `endDateRange` - (Optional) The date and time at which the Capacity Block Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) * `instanceCount` - (Required) The number of instances for which to reserve capacity. @@ -58,4 +59,4 @@ This resource exports the following attributes in addition to the arguments abov * `upfrontFee` - The total price to be paid up front. * `tenancy` - Indicates the tenancy of the Capacity Reservation. Specify either `default` or `dedicated`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_client_vpn_endpoint.html.markdown b/website/docs/cdktf/typescript/d/ec2_client_vpn_endpoint.html.markdown index 0d8989a23396..a7c961fd8a06 100644 --- a/website/docs/cdktf/typescript/d/ec2_client_vpn_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_client_vpn_endpoint.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientVpnEndpointId` - (Optional) ID of the Client VPN endpoint. * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired endpoint. @@ -89,17 +90,19 @@ This data source exports the following attributes in addition to the arguments a * `clientCidrBlock` - IPv4 address range, in CIDR notation, from which client IP addresses are assigned. * `clientConnectOptions` - The options for managing connection authorization for new client connections. * `clientLoginBannerOptions` - Options for enabling a customizable text banner that will be displayed on AWS provided clients when a VPN session is established. -* `client_route_enforcement_options` - Options for enforce administrator defined routes on devices connected through the VPN. +* `clientRouteEnforcementOptions` - Options for enforce administrator defined routes on devices connected through the VPN. * `connectionLogOptions` - Information about the client connection logging options for the Client VPN endpoint. * `description` - Brief description of the endpoint. * `dnsName` - DNS name to be used by clients when connecting to the Client VPN endpoint. * `dnsServers` - Information about the DNS servers to be used for DNS resolution. +* `endpointIpAddressType` - IP address type for the Client VPN endpoint. * `securityGroupIds` - IDs of the security groups for the target network associated with the Client VPN endpoint. * `selfServicePortal` - Whether the self-service portal for the Client VPN endpoint is enabled. * `selfServicePortalUrl` - The URL of the self-service portal. * `serverCertificateArn` - The ARN of the server certificate. * `sessionTimeoutHours` - The maximum VPN session duration time in hours. * `splitTunnel` - Whether split-tunnel is enabled in the AWS Client VPN endpoint. +* `trafficIpAddressType` - IP address type for traffic within the Client VPN tunnel. * `transportProtocol` - Transport protocol used by the Client VPN endpoint. * `vpcId` - ID of the VPC associated with the Client VPN endpoint. * `vpnPort` - Port number for the Client VPN endpoint. @@ -110,4 +113,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_coip_pool.html.markdown b/website/docs/cdktf/typescript/d/ec2_coip_pool.html.markdown index 2b55f3f31986..ab2b885d5296 100644 --- a/website/docs/cdktf/typescript/d/ec2_coip_pool.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_coip_pool.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `localGatewayRouteTableId` - (Optional) Local Gateway Route Table Id assigned to desired COIP Pool * `poolId` - (Optional) ID of the specific COIP Pool to retrieve. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_coip_pools.html.markdown b/website/docs/cdktf/typescript/d/ec2_coip_pools.html.markdown index cf307a905be1..9137915aa5b8 100644 --- a/website/docs/cdktf/typescript/d/ec2_coip_pools.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_coip_pools.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired aws_ec2_coip_pools. * `filter` - (Optional) Custom filter block as described below. @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_host.html.markdown b/website/docs/cdktf/typescript/d/ec2_host.html.markdown index 00df634a09b6..294c6a64a471 100644 --- a/website/docs/cdktf/typescript/d/ec2_host.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_host.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. * `hostId` - (Optional) ID of the Dedicated Host. @@ -111,4 +112,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_instance_type.html.markdown b/website/docs/cdktf/typescript/d/ec2_instance_type.html.markdown index 43a134347998..a7e02ef4b8ca 100644 --- a/website/docs/cdktf/typescript/d/ec2_instance_type.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_instance_type.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceType` - (Required) Instance ## Attribute Reference @@ -145,4 +146,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_instance_type_offering.html.markdown b/website/docs/cdktf/typescript/d/ec2_instance_type_offering.html.markdown index bb912dab8989..9388727c1c3d 100644 --- a/website/docs/cdktf/typescript/d/ec2_instance_type_offering.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_instance_type_offering.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypeOfferings.html) for supported filters. Detailed below. * `locationType` - (Optional) Location type. Defaults to `region`. Valid values: `availability-zone`, `availability-zone-id`, and `region`. * `preferredInstanceTypes` - (Optional) Ordered list of preferred EC2 Instance Types. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. @@ -59,6 +60,7 @@ This data source exports the following attributes in addition to the arguments a * `id` - EC2 Instance Type. * `instanceType` - EC2 Instance Type. +* `location` - Identifier for the location. ## Timeouts @@ -66,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_instance_type_offerings.html.markdown b/website/docs/cdktf/typescript/d/ec2_instance_type_offerings.html.markdown index 155e67a640bc..dad01857ac34 100644 --- a/website/docs/cdktf/typescript/d/ec2_instance_type_offerings.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_instance_type_offerings.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypeOfferings.html) for supported filters. Detailed below. * `locationType` - (Optional) Location type. Defaults to `region`. Valid values: `availability-zone`, `availability-zone-id`, and `region`. @@ -73,4 +74,4 @@ Note that the indexes of Instance Type Offering instance types, locations and lo - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_instance_types.html.markdown b/website/docs/cdktf/typescript/d/ec2_instance_types.html.markdown index 365a4a4ecda3..1e7c12f87ab8 100644 --- a/website/docs/cdktf/typescript/d/ec2_instance_types.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_instance_types.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypes.html) for supported filters. Detailed below. ### filter Argument Reference @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateway.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateway.html.markdown index 1eaa40980bc7..95b39eea6ff1 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateway.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `id` - (Optional) Id of the specific Local Gateway to retrieve. * `state` - (Optional) Current state of the desired Local Gateway. @@ -82,4 +83,4 @@ The following attributes are additionally exported: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateway_route_table.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateway_route_table.html.markdown index 816f761a4ed7..6bc655f3e611 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateway_route_table.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateway_route_table.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `localGatewayRouteTableId` - (Optional) Local Gateway Route Table Id assigned to desired local gateway route table * `localGatewayId` - (Optional) ID of the specific local gateway route table to retrieve. * `outpostArn` - (Optional) ARN of the Outpost the local gateway route table is associated with. @@ -80,4 +81,4 @@ This data source exports no additional attributes. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateway_route_tables.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateway_route_tables.html.markdown index aae6aede433b..af87db2962c1 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateway_route_tables.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateway_route_tables.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired local gateway route table. * `filter` - (Optional) Custom filter block as described below. @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface.html.markdown index c6eae28a0aee..4a9e0d1ff641 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLocalGatewayVirtualInterfaces.html) for supported filters. Detailed below. * `id` - (Optional) Identifier of EC2 Local Gateway Virtual Interface. * `tags` - (Optional) Key-value map of resource tags, each pair of which must exactly match a pair on the desired local gateway route table. @@ -76,4 +77,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_group.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_group.html.markdown index 040c6727cd13..5da8180e4b68 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_group.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLocalGatewayVirtualInterfaceGroups.html) for supported filters. Detailed below. * `id` - (Optional) Identifier of EC2 Local Gateway Virtual Interface Group. * `localGatewayId` - (Optional) Identifier of EC2 Local Gateway. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_groups.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_groups.html.markdown index cc8c1c8a6e16..4141d2a7397d 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateway_virtual_interface_groups.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLocalGatewayVirtualInterfaceGroups.html) for supported filters. Detailed below. * `tags` - (Optional) Key-value map of resource tags, each pair of which must exactly match a pair on the desired local gateway route table. @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_local_gateways.html.markdown b/website/docs/cdktf/typescript/d/ec2_local_gateways.html.markdown index 16611097f6cc..c6edae3b245c 100644 --- a/website/docs/cdktf/typescript/d/ec2_local_gateways.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_local_gateways.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired local_gateways. * `filter` - (Optional) Custom filter block as described below. @@ -73,4 +74,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_managed_prefix_list.html.markdown b/website/docs/cdktf/typescript/d/ec2_managed_prefix_list.html.markdown index 26b58d7f3a29..8d2028d5880a 100644 --- a/website/docs/cdktf/typescript/d/ec2_managed_prefix_list.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_managed_prefix_list.html.markdown @@ -32,7 +32,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); const current = new DataAwsRegion(this, "current", {}); new DataAwsEc2ManagedPrefixList(this, "example", { - name: "com.amazonaws.${" + current.name + "}.dynamodb", + name: "com.amazonaws.${" + current.region + "}.dynamodb", }); } } @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the prefix list to select. * `name` - (Optional) Name of the prefix list to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -104,4 +105,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_managed_prefix_lists.html.markdown b/website/docs/cdktf/typescript/d/ec2_managed_prefix_lists.html.markdown index c8ac476b9e46..a496ae923e05 100644 --- a/website/docs/cdktf/typescript/d/ec2_managed_prefix_lists.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_managed_prefix_lists.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired . @@ -87,4 +88,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_network_insights_analysis.html.markdown b/website/docs/cdktf/typescript/d/ec2_network_insights_analysis.html.markdown index d8346ff23d7e..a4111fea70c8 100644 --- a/website/docs/cdktf/typescript/d/ec2_network_insights_analysis.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_network_insights_analysis.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `networkInsightsAnalysisId` - (Optional) ID of the Network Insights Analysis to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `statusMessage` - Message to provide more context when the `status` is `failed`. * `warningMessage` - Warning message. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_network_insights_path.html.markdown b/website/docs/cdktf/typescript/d/ec2_network_insights_path.html.markdown index fe204149f5d9..a72913a7a7cb 100644 --- a/website/docs/cdktf/typescript/d/ec2_network_insights_path.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_network_insights_path.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `networkInsightsPathId` - (Optional) ID of the Network Insights Path to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `sourceIp` - IP address of the AWS resource that is the source of the path. * `tags` - Map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_public_ipv4_pool.html.markdown b/website/docs/cdktf/typescript/d/ec2_public_ipv4_pool.html.markdown index 37272d3630ad..ab9d5041dadf 100644 --- a/website/docs/cdktf/typescript/d/ec2_public_ipv4_pool.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_public_ipv4_pool.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `poolId` - (Required) AWS resource IDs of a public IPv4 pool (as a string) for which this data source will fetch detailed information. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `totalAddressCount` - Total number of addresses in the pool. * `totalAvailableAddressCount` - Total number of available addresses in the pool. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_public_ipv4_pools.html.markdown b/website/docs/cdktf/typescript/d/ec2_public_ipv4_pools.html.markdown index dd116be1dd16..5acc0f8b050f 100644 --- a/website/docs/cdktf/typescript/d/ec2_public_ipv4_pools.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_public_ipv4_pools.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired pools. @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `poolIds` - List of all the pool IDs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_serial_console_access.html.markdown b/website/docs/cdktf/typescript/d/ec2_serial_console_access.html.markdown index 5fbcd9b2749d..01e476becc89 100644 --- a/website/docs/cdktf/typescript/d/ec2_serial_console_access.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_serial_console_access.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -49,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_spot_price.html.markdown b/website/docs/cdktf/typescript/d/ec2_spot_price.html.markdown index fbcbaff9c257..0a1cd13a0b66 100644 --- a/website/docs/cdktf/typescript/d/ec2_spot_price.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_spot_price.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceType` - (Optional) Type of instance for which to query Spot Price information. * `availabilityZone` - (Optional) Availability zone in which to query Spot price information. * `filter` - (Optional) One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotPriceHistory.html) for supported filters. Detailed below. @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway.html.markdown index d9e809e9a1f5..3ac36bdc0d5a 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway. @@ -102,4 +103,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachment.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachment.html.markdown index fab9d4abe37d..870179d1da29 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachment.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachment.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transitGatewayAttachmentId` - (Optional) ID of the attachment. @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a * `transitGatewayId` - ID of the transit gateway. * `transitGatewayOwnerId` - The ID of the AWS account that owns the transit gateway. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachments.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachments.html.markdown index 249c9e6041f8..ec7ff825dbb1 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachments.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_attachments.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. ### filter Argument Reference @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect.html.markdown index 24c3e638e629..5b4da5d2b782 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transitGatewayConnectId` - (Optional) Identifier of the EC2 Transit Gateway Connect. @@ -90,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect_peer.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect_peer.html.markdown index 7ef018d40c06..161b10f015b3 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect_peer.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_connect_peer.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transitGatewayConnectPeerId` - (Optional) Identifier of the EC2 Transit Gateway Connect Peer. @@ -95,4 +96,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown index e44ed9f44e16..d0796216b226 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Optional) Identifier of the EC2 Transit Gateway. * `dxGatewayId` - (Optional) Identifier of the Direct Connect Gateway. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_multicast_domain.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_multicast_domain.html.markdown index c6a2439e343e..657ba81ddb42 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_multicast_domain.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_multicast_domain.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `transitGatewayMulticastDomainId` - (Optional) Identifier of the EC2 Transit Gateway Multicast Domain. @@ -107,4 +108,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachment.html.markdown index c3428b59be12..f8e5947a68a4 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachment.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway Peering Attachment. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match @@ -96,4 +97,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown index f1fa4464889e..109aa9f13cd7 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. ### filter Argument Reference @@ -103,4 +104,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table.html.markdown index 33d769670361..a90a94191e0f 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway Route Table. @@ -96,4 +97,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_associations.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_associations.html.markdown index d6f0c879d4fa..e7e3ef51a065 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_associations.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_associations.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. More complex filters can be expressed using one or more `filter` sub-blocks, @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of Transit Gateway Route Table Association identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_propagations.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_propagations.html.markdown index 4970fcbc779d..cd1645be7ef2 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_propagations.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_propagations.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayRouteTableId` - (Required) Identifier of EC2 Transit Gateway Route Table. * `filter` - (Optional) Custom filter block as described below. @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of Transit Gateway Route Table Association identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_routes.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_routes.html.markdown index 528d9197bad2..d812bbfcc03a 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_routes.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_table_routes.html.markdown @@ -357,6 +357,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Required) Custom filter block as described below. * `transitGatewayRouteTableId` - (Required) Identifier of EC2 Transit Gateway Route Table. @@ -382,4 +383,4 @@ This data source exports the following attributes in addition to the arguments a * `transit_gateway_route_table_announcement_id` - The id of the transit gateway route table announcement, most of the time it is an empty string. * `type` - The type of the route, can be `propagated` or `static`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_tables.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_tables.html.markdown index d1b4b7bc1081..10ba6ec95764 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_tables.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_route_tables.html.markdown @@ -47,8 +47,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. - * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired transit gateway route table. @@ -57,7 +57,6 @@ which take the following arguments: * `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayRouteTables.html). - * `values` - (Required) Set of values that are accepted for the given field. A Transit Gateway Route Table will be selected if any one of the given values matches. @@ -74,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachment.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachment.html.markdown index f815d9615b96..a16c7f7ba805 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachment.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachment.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. * `id` - (Optional) Identifier of the EC2 Transit Gateway VPC Attachment. @@ -97,4 +98,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachments.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachments.html.markdown index b159dfc0d111..034c2356e61e 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachments.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpc_attachments.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. ### filter Argument Reference @@ -83,4 +84,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpn_attachment.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpn_attachment.html.markdown index d736732e4d40..5dcc33d942d3 100644 --- a/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpn_attachment.html.markdown +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_vpn_attachment.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Optional) Identifier of the EC2 Transit Gateway. * `vpnConnectionId` - (Optional) Identifier of the EC2 VPN Connection. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -95,4 +96,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_authorization_token.html.markdown b/website/docs/cdktf/typescript/d/ecr_authorization_token.html.markdown index 4e1abb896c7b..30b546027506 100644 --- a/website/docs/cdktf/typescript/d/ecr_authorization_token.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_authorization_token.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `registryId` - (Optional) AWS account ID of the ECR Repository. If not specified the default account is assumed. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `proxyEndpoint` - Registry URL to use in the docker login command. * `userName` - User name decoded from the authorization token. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_image.html.markdown b/website/docs/cdktf/typescript/d/ecr_image.html.markdown index f04232d798c4..3214bf8a4482 100644 --- a/website/docs/cdktf/typescript/d/ecr_image.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_image.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `registryId` - (Optional) ID of the Registry where the repository resides. * `repositoryName` - (Required) Name of the ECR Repository. * `imageDigest` - (Optional) Sha256 digest of the image manifest. At least one of `imageDigest`, `imageTag`, or `mostRecent` must be specified. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `imageTags` - List of tags associated with this image. * `imageUri` - The URI for the specific image version specified by `imageTag` or `imageDigest`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_images.html.markdown b/website/docs/cdktf/typescript/d/ecr_images.html.markdown new file mode 100644 index 000000000000..04e6841e80c8 --- /dev/null +++ b/website/docs/cdktf/typescript/d/ecr_images.html.markdown @@ -0,0 +1,65 @@ +--- +subcategory: "ECR (Elastic Container Registry)" +layout: "aws" +page_title: "AWS: aws_ecr_images" +description: |- + Provides a list of images for a specified ECR Repository +--- + + + +# Data Source: aws_ecr_images + +The ECR Images data source allows the list of images in a specified repository to be retrieved. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsEcrImages } from "./.gen/providers/aws/data-aws-ecr-images"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsEcrImages(this, "example", { + repositoryName: "my-repository", + }); + new TerraformOutput(this, "image_digests", { + value: + "${[ for img in ${" + + example.imageIds + + "} : img.image_digest if img.image_digest != null]}", + }); + new TerraformOutput(this, "image_tags", { + value: + "${[ for img in ${" + + example.imageIds + + "} : img.image_tag if img.image_tag != null]}", + }); + } +} + +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `registryId` - (Optional) ID of the Registry where the repository resides. +* `repositoryName` - (Required) Name of the ECR Repository. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `imageIds` - List of image objects containing image digest and tags. Each object has the following attributes: + * `imageDigest` - The sha256 digest of the image manifest. + * `imageTag` - The tag associated with the image. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_pull_through_cache_rule.html.markdown b/website/docs/cdktf/typescript/d/ecr_pull_through_cache_rule.html.markdown index e718f9e541cf..3959136f1dc4 100644 --- a/website/docs/cdktf/typescript/d/ecr_pull_through_cache_rule.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_pull_through_cache_rule.html.markdown @@ -38,7 +38,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -- `ecrRepositoryPrefix` - (Required) The repository name prefix to use when caching images from the source registry. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `ecrRepositoryPrefix` - (Required) The repository name prefix to use when caching images from the source registry. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a - `upstreamRegistryUrl` - The registry URL of the upstream registry to use as the source. - `upstreamRepositoryPrefix` - The upstream repository prefix associated with the pull through cache rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_repositories.html.markdown b/website/docs/cdktf/typescript/d/ecr_repositories.html.markdown index a0a0f04ad9ee..1fba28e55f56 100644 --- a/website/docs/cdktf/typescript/d/ecr_repositories.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_repositories.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `names` - A list if AWS Elastic Container Registries for the region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_repository.html.markdown b/website/docs/cdktf/typescript/d/ecr_repository.html.markdown index 6273c622bd44..0142aaf4fb45 100644 --- a/website/docs/cdktf/typescript/d/ecr_repository.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_repository.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the ECR Repository. * `registryId` - (Optional) Registry ID where the repository was created. @@ -49,6 +50,7 @@ This data source exports the following attributes in addition to the arguments a * `encryptionConfiguration` - Encryption configuration for the repository. See [Encryption Configuration](#encryption-configuration) below. * `imageScanningConfiguration` - Configuration block that defines image scanning configuration for the repository. See [Image Scanning Configuration](#image-scanning-configuration) below. * `imageTagMutability` - The tag mutability setting for the repository. +* `imageTagMutabilityExclusionFilter` - Block that defines filters to specify which image tags can override the default tag mutability setting. * `mostRecentImageTags` - List of image tags associated with the most recently pushed image in the repository. * `repositoryUrl` - URL of the repository (in the form `aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName`). * `tags` - Map of tags assigned to the resource. @@ -58,8 +60,13 @@ This data source exports the following attributes in addition to the arguments a * `encryptionType` - Encryption type to use for the repository, either `AES256` or `KMS`. * `kmsKey` - If `encryptionType` is `KMS`, the ARN of the KMS key used. +### Image Tag Mutability Exclusion Filter + +* `filter` - The filter pattern to use for excluding image tags from the mutability setting. +* `filterType` - The type of filter to use. + ### Image Scanning Configuration * `scanOnPush` - Whether images are scanned after being pushed to the repository. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_repository_creation_template.html.markdown b/website/docs/cdktf/typescript/d/ecr_repository_creation_template.html.markdown index 9ac5dee157c8..85c04bdf4e8f 100644 --- a/website/docs/cdktf/typescript/d/ecr_repository_creation_template.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_repository_creation_template.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prefix` - (Required) The repository name prefix that the template matches against. ## Attribute Reference @@ -49,6 +50,7 @@ This data source exports the following attributes in addition to the arguments a * `description` - The description for this template. * `encryptionConfiguration` - Encryption configuration for any created repositories. See [Encryption Configuration](#encryption-configuration) below. * `imageTagMutability` - The tag mutability setting for any created repositories. +* `imageTagMutabilityExclusionFilter` - Block that defines filters to specify which image tags can override the default tag mutability setting. * `lifecyclePolicy` - The lifecycle policy document to apply to any created repositories. * `registryId` - The registry ID the repository creation template applies to. * `repositoryPolicy` - The registry policy document to apply to any created repositories. @@ -59,4 +61,9 @@ This data source exports the following attributes in addition to the arguments a * `encryptionType` - Encryption type to use for any created repositories, either `AES256` or `KMS`. * `kmsKey` - If `encryptionType` is `KMS`, the ARN of the KMS key used. - \ No newline at end of file +### Image Tag Mutability Exclusion Filter + +* `filter` - The filter pattern to use for excluding image tags from the mutability setting. +* `filterType` - The type of filter to use. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecrpublic_authorization_token.html.markdown b/website/docs/cdktf/typescript/d/ecrpublic_authorization_token.html.markdown index 963adcd2ead4..f2e741f3471e 100644 --- a/website/docs/cdktf/typescript/d/ecrpublic_authorization_token.html.markdown +++ b/website/docs/cdktf/typescript/d/ecrpublic_authorization_token.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -48,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `password` - Password decoded from the authorization token. * `userName` - User name decoded from the authorization token. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecs_cluster.html.markdown b/website/docs/cdktf/typescript/d/ecs_cluster.html.markdown index d8ae68422957..ac87180b25a2 100644 --- a/website/docs/cdktf/typescript/d/ecs_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/ecs_cluster.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterName` - (Required) Name of the ECS Cluster ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `setting` - Settings associated with the ECS Cluster * `tags` - Key-value map of resource tags - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecs_clusters.html.markdown b/website/docs/cdktf/typescript/d/ecs_clusters.html.markdown index b92a60a8bccb..b90e28324b53 100644 --- a/website/docs/cdktf/typescript/d/ecs_clusters.html.markdown +++ b/website/docs/cdktf/typescript/d/ecs_clusters.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -44,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `clusterArns` - List of ECS cluster ARNs associated with the account. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecs_container_definition.html.markdown b/website/docs/cdktf/typescript/d/ecs_container_definition.html.markdown index bc0e2c2450e2..0e173a68d848 100644 --- a/website/docs/cdktf/typescript/d/ecs_container_definition.html.markdown +++ b/website/docs/cdktf/typescript/d/ecs_container_definition.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `taskDefinition` - (Required) ARN of the task definition which contains the container * `containerName` - (Required) Name of the container definition @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `disableNetworking` - Indicator if networking is disabled * `dockerLabels` - Set docker labels - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecs_service.html.markdown b/website/docs/cdktf/typescript/d/ecs_service.html.markdown index c067b82826e0..9634f745331f 100644 --- a/website/docs/cdktf/typescript/d/ecs_service.html.markdown +++ b/website/docs/cdktf/typescript/d/ecs_service.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceName` - (Required) Name of the ECS Service * `clusterArn` - (Required) ARN of the ECS Cluster @@ -50,8 +51,28 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the ECS Service * `desiredCount` - Number of tasks for the ECS Service * `launchType` - Launch type for the ECS Service +* `loadBalancer` - Load balancers for the ECS Service. See [`loadBalancer` Block](#load_balancer-block) for details. * `schedulingStrategy` - Scheduling strategy for the ECS Service * `taskDefinition` - Family for the latest ACTIVE revision or full ARN of the task definition. * `tags` - Resource tags. - \ No newline at end of file +### `loadBalancer` Block + +The `loadBalancer` block exports the following attributes: + +* `advancedConfiguration` - Settings for Blue/Green deployment. See [`advancedConfiguration` Block](#advanced_configuration-block) for details. +* `containerName` - Name of the container to associate with the load balancer. +* `containerPort` - Port on the container to associate with the load balancer. +* `elbName` - Name of the load balancer. +* `targetGroupArn` - ARN of the target group to associate with the load balancer. + +### `advancedConfiguration` Block + +The `advancedConfiguration` block exports the following attributes: + +* `alternateTargetGroupArn` - ARN of the alternate target group to use for Blue/Green deployments. +* `productionListenerRule` - ARN of the listener rule that routes production traffic. +* `roleArn` - ARN of the IAM role that allows ECS to manage the target groups. +* `testListenerRule` - ARN of the listener rule that routes test traffic. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecs_task_definition.html.markdown b/website/docs/cdktf/typescript/d/ecs_task_definition.html.markdown index e767db0a8cae..de7aa0d88d15 100644 --- a/website/docs/cdktf/typescript/d/ecs_task_definition.html.markdown +++ b/website/docs/cdktf/typescript/d/ecs_task_definition.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `taskDefinition` - (Required) Family for the latest ACTIVE revision, family and revision (family:revision) for a specific revision in the family, the ARN of the task definition to access to. ## Attribute Reference @@ -74,12 +75,11 @@ This data source exports the following attributes in addition to the arguments a * `arnWithoutRevision` - ARN of the Task Definition with the trailing `revision` removed. This may be useful for situations where the latest task definition is always desired. If a revision isn't specified, the latest ACTIVE revision is used. See the [AWS documentation](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_StartTask.html#ECS-StartTask-request-taskDefinition) for details. * `containerDefinitions` - A list of valid [container definitions](http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) provided as a single valid JSON document. Please note that you should only provide values that are part of the container definition document. For a detailed description of what parameters are available, see the [Task Definition Parameters](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) section from the official [Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide). * `cpu` - Number of cpu units used by the task. If the `requiresCompatibilities` is `FARGATE` this field is required. -* `enable_fault_injection` - Enables fault injection and allows for fault injection requests to be accepted from the task's containers. Default is `false`. +* `enableFaultInjection` - Enables fault injection and allows for fault injection requests to be accepted from the task's containers. Default is `false`. * `ephemeralStorage` - The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. See [Ephemeral Storage](#ephemeral_storage). * `executionRoleArn` - ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. * `family` - A unique name for your task definition. The following arguments are optional: -* `inferenceAccelerator` - Configuration block(s) with Inference Accelerators settings. [Detailed below.](#inference_accelerator) * `ipcMode` - IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`. * `memory` - Amount (in MiB) of memory used by the task. If the `requiresCompatibilities` is `FARGATE` this field is required. * `networkMode` - Docker networking mode to use for the containers in the task. Valid values are `none`, `bridge`, `awsvpc`, and `host`. @@ -97,11 +97,6 @@ The following arguments are optional: * `sizeInGib` - The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB. -### inference_accelerator - -* `deviceName` - Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. -* `deviceType` - Elastic Inference accelerator type to use. - ### placement_constraints * `expression` - Cluster Query Language expression to apply to the constraint. For more information, see [Cluster Query Language in the Amazon EC2 Container Service Developer Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html). @@ -166,4 +161,4 @@ For more information, see [Specifying an FSX Windows File Server volume in your * `credentialsParameter` - The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager Parameter Store parameter. The ARNs refer to the stored credentials. * `domain` - A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecs_task_execution.html.markdown b/website/docs/cdktf/typescript/d/ecs_task_execution.html.markdown index 921b60567621..5c6a96ae6b6a 100644 --- a/website/docs/cdktf/typescript/d/ecs_task_execution.html.markdown +++ b/website/docs/cdktf/typescript/d/ecs_task_execution.html.markdown @@ -55,6 +55,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityProviderStrategy` - (Optional) Set of capacity provider strategies to use for the cluster. See below. * `clientToken` - (Optional) An identifier that you provide to ensure the idempotency of the request. It must be unique and is case sensitive. Up to 64 characters are allowed. The valid characters are characters in the range of 33-126, inclusive. For more information, see [Ensuring idempotency](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/ECS_Idempotency.html). * `desiredCount` - (Optional) Number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks for each call. @@ -91,7 +92,6 @@ For more information, see the [Task Networking](https://docs.aws.amazon.com/Amaz * `containerOverrides` - (Optional) One or more container overrides that are sent to a task. See below. * `cpu` - (Optional) The CPU override for the task. * `executionRoleArn` - (Optional) Amazon Resource Name (ARN) of the task execution role override for the task. -* `inferenceAcceleratorOverrides` - (Optional) **DEPRECATED** Elastic Inference accelerator override for the task. See below. * `memory` - (Optional) The memory override for the task. * `taskRoleArn` - (Optional) Amazon Resource Name (ARN) of the role that containers in this task can assume. @@ -112,13 +112,8 @@ For more information, see the [Task Networking](https://docs.aws.amazon.com/Amaz ### resource_requirements -* `type` - (Required) The type of resource to assign to a container. Valid values are `GPU` or `InferenceAccelerator`. -* `value` - (Required) The value for the specified resource type. If the `GPU` type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. If the `InferenceAccelerator` type is used, the value matches the `deviceName` for an InferenceAccelerator specified in a task definition. - -### inference_accelerator_overrides - -* `deviceName` - (Optional) The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition. -* `deviceType` - (Optional) The Elastic Inference accelerator type to use. +* `type` - (Required) The type of resource to assign to a container. Valid values are `GPU`. +* `value` - (Required) The value for the specified resource type. If the `GPU` type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. ### placement_constraints @@ -139,4 +134,4 @@ This data source exports the following attributes in addition to the arguments a * `taskArns` - A list of the provisioned task ARNs. * `id` - The unique identifier, which is a comma-delimited string joining the `cluster` and `taskDefinition` attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/efs_access_point.html.markdown b/website/docs/cdktf/typescript/d/efs_access_point.html.markdown index 9ba0fa7f7092..9f71d9e1ec50 100644 --- a/website/docs/cdktf/typescript/d/efs_access_point.html.markdown +++ b/website/docs/cdktf/typescript/d/efs_access_point.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPointId` - (Required) ID that identifies the file system. ## Attribute Reference @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `path` - Path exposed as the root directory * `tags` - Key-value mapping of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/efs_access_points.html.markdown b/website/docs/cdktf/typescript/d/efs_access_points.html.markdown index e91f42814bad..161de4ec196f 100644 --- a/website/docs/cdktf/typescript/d/efs_access_points.html.markdown +++ b/website/docs/cdktf/typescript/d/efs_access_points.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fileSystemId` - (Required) EFS File System identifier. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - EFS File System identifier. * `ids` - Set of identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/efs_file_system.html.markdown b/website/docs/cdktf/typescript/d/efs_file_system.html.markdown index c368f628a72b..5dc7e13a2338 100644 --- a/website/docs/cdktf/typescript/d/efs_file_system.html.markdown +++ b/website/docs/cdktf/typescript/d/efs_file_system.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fileSystemId` - (Optional) ID that identifies the file system (e.g., fs-ccfc0d65). * `creationToken` - (Optional) Restricts the list to the file system with this creation token. * `tags` - (Optional) Restricts the list to the file system with these tags. @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `throughputMode` - Throughput mode for the file system. * `sizeInBytes` - Current byte count used by the file system. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/efs_mount_target.html.markdown b/website/docs/cdktf/typescript/d/efs_mount_target.html.markdown index 58942e20b336..dad7da845176 100644 --- a/website/docs/cdktf/typescript/d/efs_mount_target.html.markdown +++ b/website/docs/cdktf/typescript/d/efs_mount_target.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPointId` - (Optional) ID or ARN of the access point whose mount target that you want to find. It must be included if a `fileSystemId` and `mountTargetId` are not included. * `fileSystemId` - (Optional) ID or ARN of the file system whose mount target that you want to find. It must be included if an `accessPointId` and `mountTargetId` are not included. * `mountTargetId` - (Optional) ID or ARN of the mount target that you want to find. It must be included in your request if an `accessPointId` and `fileSystemId` are not included. @@ -55,6 +56,8 @@ This data source exports the following attributes in addition to the arguments a * `fileSystemArn` - Amazon Resource Name of the file system for which the mount target is intended. * `subnetId` - ID of the mount target's subnet. * `ipAddress` - Address at which the file system may be mounted via the mount target. +* `ipAddressType` - IP address type for the mount target. +* `ipv6Address` - IPv6 address at which the file system may be mounted via the mount target. * `securityGroups` - List of VPC security group IDs attached to the mount target. * `dnsName` - DNS name for the EFS file system. * `mountTargetDnsName` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html). @@ -63,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `availabilityZoneId` - The unique and consistent identifier of the Availability Zone (AZ) that the mount target resides in. * `ownerId` - AWS account ID that owns the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eip.html.markdown b/website/docs/cdktf/typescript/d/eip.html.markdown index 27c0f66e5af2..a869dacbdb3a 100644 --- a/website/docs/cdktf/typescript/d/eip.html.markdown +++ b/website/docs/cdktf/typescript/d/eip.html.markdown @@ -113,6 +113,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAddresses.html). * `id` - (Optional) Allocation ID of the specific VPC EIP to retrieve. If a classic EIP is required, do NOT set `id`, only set `publicIp` * `publicIp` - (Optional) Public IP of the specific EIP to retrieve. @@ -152,4 +153,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eips.html.markdown b/website/docs/cdktf/typescript/d/eips.html.markdown index c8087eebaf55..4d8ca41a1e02 100644 --- a/website/docs/cdktf/typescript/d/eips.html.markdown +++ b/website/docs/cdktf/typescript/d/eips.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Elastic IPs. @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_access_entry.html.markdown b/website/docs/cdktf/typescript/d/eks_access_entry.html.markdown index 7c371389f9a2..6b44fa0d9e2c 100644 --- a/website/docs/cdktf/typescript/d/eks_access_entry.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_access_entry.html.markdown @@ -42,8 +42,9 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `clusterName` – (Required) Name of the EKS Cluster. -* `principalArn` – (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `clusterName` - (Required) Name of the EKS Cluster. +* `principalArn` - (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. ## Attribute Reference @@ -51,10 +52,10 @@ This data source exports the following attributes in addition to the arguments a * `accessEntryArn` - Amazon Resource Name (ARN) of the Access Entry. * `createdAt` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was created. -* `kubernetesGroups` – List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. +* `kubernetesGroups` - List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. * `modifiedAt` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was updated. * `userName` - Defaults to principal ARN if user is principal else defaults to assume-role/session-name is role is used. * `type` - Defaults to STANDARD which provides the standard workflow. EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX types disallow users to input a username or groups, and prevent associations. * `tagsAll` - (Optional) Key-value map of resource tags, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_addon.html.markdown b/website/docs/cdktf/typescript/d/eks_addon.html.markdown index e3a9ff77ecad..8f3752e212dd 100644 --- a/website/docs/cdktf/typescript/d/eks_addon.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_addon.html.markdown @@ -42,9 +42,10 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `addonName` – (Required) Name of the EKS add-on. The name must match one of +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addonName` - (Required) Name of the EKS add-on. The name must match one of the names returned by [list-addon](https://docs.aws.amazon.com/cli/latest/reference/eks/list-addons.html). -* `clusterName` – (Required) Name of the EKS Cluster. +* `clusterName` - (Required) Name of the EKS Cluster. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `createdAt` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was created. * `modifiedAt` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the EKS add-on was updated. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_addon_version.html.markdown b/website/docs/cdktf/typescript/d/eks_addon_version.html.markdown index c2698731f741..528f9207509a 100644 --- a/website/docs/cdktf/typescript/d/eks_addon_version.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_addon_version.html.markdown @@ -60,9 +60,10 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `addonName` – (Required) Name of the EKS add-on. The name must match one of +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addonName` - (Required) Name of the EKS add-on. The name must match one of the names returned by [list-addon](https://docs.aws.amazon.com/cli/latest/reference/eks/list-addons.html). -* `kubernetesVersion` – (Required) Version of the EKS Cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]+$`). +* `kubernetesVersion` - (Required) Version of the EKS Cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]+$`). * `mostRecent` - (Optional) Determines if the most recent or default version of the addon should be returned. ## Attribute Reference @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Name of the add-on * `version` - Version of the EKS add-on. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_cluster.html.markdown b/website/docs/cdktf/typescript/d/eks_cluster.html.markdown index 7217a0cb2b1a..b5c0f2b680a1 100644 --- a/website/docs/cdktf/typescript/d/eks_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_cluster.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster. ## Attribute Reference @@ -63,6 +64,7 @@ This data source exports the following attributes in addition to the arguments a * `data` - The base64 encoded certificate data required to communicate with your cluster. Add this to the `certificate-authority-data` section of the `kubeconfig` file for your cluster. * `clusterId` - The ID of your local Amazon EKS cluster on the AWS Outpost. This attribute isn't available for an AWS EKS cluster on AWS cloud. * `createdAt` - Unix epoch time stamp in seconds for when the cluster was created. +* `deletionProtection` - Whether deletion protection for the cluster is enabled. * `enabledClusterLogTypes` - The enabled control plane logs. * `endpoint` - Endpoint for your Kubernetes API server. * `identity` - Nested attribute containing identity provider information for your cluster. Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. For an example using this information to enable IAM Roles for Service Accounts, see the [`aws_eks_cluster` resource documentation](/docs/providers/aws/r/eks_cluster.html). @@ -99,10 +101,10 @@ This data source exports the following attributes in addition to the arguments a * `endpointPrivateAccess` - Indicates whether or not the Amazon EKS private API server endpoint is enabled. * `endpointPublicAccess` - Indicates whether or not the Amazon EKS public API server endpoint is enabled. * `publicAccessCidrs` - List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint. - * `securityGroupIds` – List of security group IDs - * `subnetIds` – List of subnet IDs - * `vpcId` – The VPC associated with your cluster. + * `securityGroupIds` - List of security group IDs + * `subnetIds` - List of subnet IDs + * `vpcId` - The VPC associated with your cluster. * `zonalShiftConfig` - Contains Zonal Shift Configuration. * `enabled` - Whether zonal shift is enabled. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_cluster_auth.html.markdown b/website/docs/cdktf/typescript/d/eks_cluster_auth.html.markdown index 4345d61836be..3d43c4f33536 100644 --- a/website/docs/cdktf/typescript/d/eks_cluster_auth.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_cluster_auth.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster ## Attribute Reference @@ -76,4 +77,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Name of the cluster. * `token` - Token to use to authenticate with the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_cluster_versions.html.markdown b/website/docs/cdktf/typescript/d/eks_cluster_versions.html.markdown index f10d990f031e..c6e8f043daa4 100644 --- a/website/docs/cdktf/typescript/d/eks_cluster_versions.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_cluster_versions.html.markdown @@ -19,7 +19,7 @@ Terraform data source for managing AWS EKS (Elastic Kubernetes) Cluster Versions ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { TerraformOutput, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -28,7 +28,22 @@ import { DataAwsEksClusterVersions } from "./.gen/providers/aws/data-aws-eks-clu class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new DataAwsEksClusterVersions(this, "example", {}); + const example = new DataAwsEksClusterVersions(this, "example", {}); + new TerraformOutput(this, "eks_cluster_version_filtered", { + value: + "${[ for version in ${" + + example.clusterVersions + + '} : version if version.cluster_version == "1.33"]}', + }); + new TerraformOutput(this, "eks_cluster_version_list", { + value: + "${[ for version in ${" + + example.clusterVersions + + "} : version.cluster_version]}", + }); + new TerraformOutput(this, "eks_cluster_versions", { + value: example.clusterVersions, + }); } } @@ -82,9 +97,9 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterType` - (Optional) Type of clusters to filter by. Currently, the only valid value is `eks`. -* `clusterVersions` - (Optional) A list of Kubernetes versions that you can use to check if EKS supports it. * `defaultOnly` - (Optional) Whether to show only the default versions of Kubernetes supported by EKS. * `includeAll` - (Optional) Whether to include all kubernetes versions in the response. * `versionStatus` - (Optional) Status of the EKS cluster versions to list. @@ -94,14 +109,15 @@ Valid values are `STANDARD_SUPPORT` or `UNSUPPORTED` or `EXTENDED_SUPPORT`. This data source exports the following attributes in addition to the arguments above: -* `clusterType` - Type of cluster that the version belongs to. -* `clusterVersion` - Kubernetes version supported by EKS. -* `default_platform_version` - Default eks platform version for the cluster version. -* `defaultVersion` - Default Kubernetes version for the cluster version. -* `end_of_extended_support_date` - End of extended support date for the cluster version. -* `end_of_standard_support_date` - End of standard support date for the cluster version. -* `kubernetes_patch_version` - Kubernetes patch version for the cluster version. -* `releaseDate` - Release date of the cluster version. -* `versionStatus` - Status of the EKS cluster version. - - \ No newline at end of file +* `clusterVersions` - A list of Kubernetes version information. + * `clusterType` - Type of cluster that the version belongs to. + * `clusterVersion` - Kubernetes version supported by EKS. + * `default_platform_version` - Default eks platform version for the cluster version. + * `defaultVersion` - Default Kubernetes version for the cluster version. + * `end_of_extended_support_date` - End of extended support date for the cluster version. + * `end_of_standard_support_date` - End of standard support date for the cluster version. + * `kubernetes_patch_version` - Kubernetes patch version for the cluster version. + * `releaseDate` - Release date of the cluster version. + * `versionStatus` - Status of the EKS cluster version. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_clusters.html.markdown b/website/docs/cdktf/typescript/d/eks_clusters.html.markdown index 513a0319e694..c4951cd4706d 100644 --- a/website/docs/cdktf/typescript/d/eks_clusters.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_clusters.html.markdown @@ -48,7 +48,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -57,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `names` - Set of EKS clusters names - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_node_group.html.markdown b/website/docs/cdktf/typescript/d/eks_node_group.html.markdown index 64219372eeb3..063074d9242e 100644 --- a/website/docs/cdktf/typescript/d/eks_node_group.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_node_group.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterName` - (Required) Name of the cluster. * `nodeGroupName` - (Required) Name of the node group. @@ -57,8 +58,8 @@ This data source exports the following attributes in addition to the arguments a * `id` - The ID of the launch template. * `name` - The name of the launch template. * `version` - The version number of the launch template. -* `nodeRoleArn` – ARN of the IAM Role that provides permissions for the EKS Node Group. -* `releaseVersion` – AMI version of the EKS Node Group. +* `nodeRoleArn` - ARN of the IAM Role that provides permissions for the EKS Node Group. +* `releaseVersion` - AMI version of the EKS Node Group. * `remoteAccess` - Configuration block with remote access settings. * `ec2SshKey` - EC2 Key Pair name that provides access for SSH communication with the worker nodes in the EKS Node Group. * `sourceSecurityGroupIds` - Set of EC2 Security Group IDs to allow SSH access (port 22) from on the worker nodes. @@ -71,12 +72,12 @@ This data source exports the following attributes in addition to the arguments a * `maxSize` - Maximum number of worker nodes. * `minSize` - Minimum number of worker nodes. * `status` - Status of the EKS Node Group. -* `subnetIds` – Identifiers of EC2 Subnets to associate with the EKS Node Group. +* `subnetIds` - Identifiers of EC2 Subnets to associate with the EKS Node Group. * `taints` - List of objects containing information about taints applied to the nodes in the EKS Node Group. * `key` - The key of the taint. * `value` - The value of the taint. * `effect` - The effect of the taint. * `tags` - Key-value map of resource tags. -* `version` – Kubernetes version. +* `version` - Kubernetes version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/eks_node_groups.html.markdown b/website/docs/cdktf/typescript/d/eks_node_groups.html.markdown index 505c6892ff8d..51cb5e68caa5 100644 --- a/website/docs/cdktf/typescript/d/eks_node_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/eks_node_groups.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterName` - (Required) Name of the cluster. ## Attribute Reference @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Cluster name. * `names` - Set of all node group names in an EKS Cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elastic_beanstalk_application.html.markdown b/website/docs/cdktf/typescript/d/elastic_beanstalk_application.html.markdown index 168ed97487c6..5bff28b9c613 100644 --- a/website/docs/cdktf/typescript/d/elastic_beanstalk_application.html.markdown +++ b/website/docs/cdktf/typescript/d/elastic_beanstalk_application.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the application ## Attribute Reference @@ -61,4 +62,4 @@ Application version lifecycle (`appversionLifecycle`) supports the nested attrib * `maxAgeInDays` - Number of days to retain an application version. * `deleteSourceFromS3` - Specifies whether delete a version's source bundle from S3 when the application version is deleted. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elastic_beanstalk_hosted_zone.html.markdown b/website/docs/cdktf/typescript/d/elastic_beanstalk_hosted_zone.html.markdown index c8c2cef32c07..a4a68cc7cb50 100644 --- a/website/docs/cdktf/typescript/d/elastic_beanstalk_hosted_zone.html.markdown +++ b/website/docs/cdktf/typescript/d/elastic_beanstalk_hosted_zone.html.markdown @@ -36,7 +36,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Region you'd like the zone for. By default, fetches the current region. +* `region` - (Optional) Name of the Region whose hosted zone is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -44,6 +44,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the hosted zone. -* `region` - Region of the hosted zone. - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elastic_beanstalk_solution_stack.html.markdown b/website/docs/cdktf/typescript/d/elastic_beanstalk_solution_stack.html.markdown index 6025e3c8802f..79c740ae85e7 100644 --- a/website/docs/cdktf/typescript/d/elastic_beanstalk_solution_stack.html.markdown +++ b/website/docs/cdktf/typescript/d/elastic_beanstalk_solution_stack.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mostRecent` - (Optional) If more than one result is returned, use the most recent solution stack. * `nameRegex` - Regex string to apply to the solution stack list returned @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a [beanstalk-platforms]: http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html "AWS Elastic Beanstalk Supported Platforms documentation" - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticache_cluster.html.markdown b/website/docs/cdktf/typescript/d/elasticache_cluster.html.markdown index 04e3573be8ff..4d43c0791589 100644 --- a/website/docs/cdktf/typescript/d/elasticache_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticache_cluster.html.markdown @@ -38,33 +38,34 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `clusterId` – (Required) Group identifier. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `clusterId` - (Required) Group identifier. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `nodeType` – The cluster node type. -* `numCacheNodes` – The number of cache nodes that the cache cluster has. -* `engine` – Name of the cache engine. -* `engineVersion` – Version number of the cache engine. +* `nodeType` - The cluster node type. +* `numCacheNodes` - The number of cache nodes that the cache cluster has. +* `engine` - Name of the cache engine. +* `engineVersion` - Version number of the cache engine. * `ipDiscovery` - The IP version advertised in the discovery protocol. * `networkType` - The IP versions for cache cluster connections. -* `subnetGroupName` – Name of the subnet group associated to the cache cluster. -* `securityGroupIds` – List VPC security groups associated with the cache cluster. -* `parameterGroupName` – Name of the parameter group associated with this cache cluster. +* `subnetGroupName` - Name of the subnet group associated to the cache cluster. +* `securityGroupIds` - List VPC security groups associated with the cache cluster. +* `parameterGroupName` - Name of the parameter group associated with this cache cluster. * `replicationGroupId` - The replication group to which this cache cluster belongs. * `logDeliveryConfiguration` - Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log) delivery settings. -* `maintenanceWindow` – Specifies the weekly time range for when maintenance +* `maintenanceWindow` - Specifies the weekly time range for when maintenance on the cache cluster is performed. * `snapshotWindow` - Daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of the cache cluster. * `snapshotRetentionLimit` - The number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. * `availabilityZone` - Availability Zone for the cache cluster. -* `notificationTopicArn` – An ARN of an +* `notificationTopicArn` - An ARN of an SNS topic that ElastiCache notifications get sent to. -* `port` – The port number on which each of the cache nodes will +* `port` - The port number on which each of the cache nodes will accept connections. * `configurationEndpoint` - (Memcached only) Configuration endpoint to allow host discovery. * `clusterAddress` - (Memcached only) DNS name of the cache cluster without the port appended. @@ -73,4 +74,4 @@ accept connections. Referenceable e.g., as `${data.aws_elasticache_cluster.bar.cache_nodes.0.address}` * `tags` - Tags assigned to the resource - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticache_replication_group.html.markdown b/website/docs/cdktf/typescript/d/elasticache_replication_group.html.markdown index 18c391fa8fac..10091cdd4ba4 100644 --- a/website/docs/cdktf/typescript/d/elasticache_replication_group.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticache_replication_group.html.markdown @@ -38,7 +38,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `replicationGroupId` – (Required) Identifier for the replication group. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `replicationGroupId` - (Required) Identifier for the replication group. ## Attribute Reference @@ -49,8 +50,8 @@ This data source exports the following attributes in addition to the arguments a * `authTokenEnabled` - Whether an AuthToken (password) is enabled. * `automaticFailoverEnabled` - A flag whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. * `clusterMode` - Whether cluster mode is enabled or disabled. -* `nodeType` – The cluster node type. -* `numCacheClusters` – The number of cache clusters that the replication group has. +* `nodeType` - The cluster node type. +* `numCacheClusters` - The number of cache clusters that the replication group has. * `numNodeGroups` - Number of node groups (shards) for the replication group. * `memberClusters` - Identifiers of all the nodes that are part of this replication group. * `multiAzEnabled` - Whether Multi-AZ Support is enabled for the replication group. @@ -58,9 +59,9 @@ This data source exports the following attributes in addition to the arguments a * `logDeliveryConfiguration` - Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log) delivery settings. * `snapshotWindow` - Daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). * `snapshotRetentionLimit` - The number of days for which ElastiCache retains automatic cache cluster snapshots before deleting them. -* `port` – The port number on which the configuration endpoint will accept connections. +* `port` - The port number on which the configuration endpoint will accept connections. * `configurationEndpointAddress` - The configuration endpoint address to allow host discovery. * `primaryEndpointAddress` - The endpoint of the primary node in this node group (shard). * `readerEndpointAddress` - The endpoint of the reader node in this node group (shard). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticache_reserved_cache_node_offering.html.markdown b/website/docs/cdktf/typescript/d/elasticache_reserved_cache_node_offering.html.markdown index 9f8987b66a0d..996379d35e38 100644 --- a/website/docs/cdktf/typescript/d/elasticache_reserved_cache_node_offering.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticache_reserved_cache_node_offering.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cacheNodeType` - (Required) Node type for the reserved cache node. See AWS documentation for information on [supported node types for Redis](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types for Redis](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). See AWS documentation for information on [supported node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/nodes-select-size.html). @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `fixedPrice` - Fixed price charged for this reserved cache node. * `offeringId` - Unique identifier for the reservation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticache_serverless_cache.html.markdown b/website/docs/cdktf/typescript/d/elasticache_serverless_cache.html.markdown index 67b13574bdb5..b5efb09dc9aa 100644 --- a/website/docs/cdktf/typescript/d/elasticache_serverless_cache.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticache_serverless_cache.html.markdown @@ -38,7 +38,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `name` – (Required) Identifier for the serverless cache. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Identifier for the serverless cache. ## Attribute Reference @@ -50,7 +51,7 @@ This data source exports the following attributes in addition to the arguments a * `dailySnapshotTime` - The daily time that snapshots will be created from the new serverless cache. Only available for engine types `"redis"` and `"valkey"`. * `description` - Description of the serverless cache. * `endpoint` - Represents the information required for client programs to connect to the cache. See [`endpoint` Block](#endpoint-block) for details. -* `engine` – Name of the cache engine. +* `engine` - Name of the cache engine. * `fullEngineVersion` - The name and version number of the engine the serverless cache is compatible with. * `kmsKeyId` - ARN of the customer managed key for encrypting the data at rest. * `majorEngineVersion` - The version number of the engine the serverless cache is compatible with. @@ -58,7 +59,7 @@ This data source exports the following attributes in addition to the arguments a * `securityGroupIds` - A list of the one or more VPC security groups associated with the serverless cache. * `snapshotRetentionLimit` - The number of snapshots that will be retained for the serverless cache. Available for Redis only. * `status` - The current status of the serverless cache. -* `subnetIds` – A list of the identifiers of the subnets where the VPC endpoint for the serverless cache are deployed. +* `subnetIds` - A list of the identifiers of the subnets where the VPC endpoint for the serverless cache are deployed. * `userGroupId` - The identifier of the UserGroup associated with the serverless cache. Available for Redis only. ### `cacheUsageLimits` Block @@ -97,4 +98,4 @@ The `readerEndpoint` block exports the following attributes: * `address` - The DNS hostname of the cache node. * `port` - The port number that the cache engine is listening on. Set as integer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticache_subnet_group.html.markdown b/website/docs/cdktf/typescript/d/elasticache_subnet_group.html.markdown index 0ab460d16fe5..cf5d3d4f76b4 100644 --- a/website/docs/cdktf/typescript/d/elasticache_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticache_subnet_group.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_elasticache_subnet_group +# Data Source: aws_elasticache_subnet_group Provides information about a ElastiCache Subnet Group. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the subnet group. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the subnet group. * `vpcId` - The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticache_user.html.markdown b/website/docs/cdktf/typescript/d/elasticache_user.html.markdown index 6536279ad89a..dd71ada136f3 100644 --- a/website/docs/cdktf/typescript/d/elasticache_user.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticache_user.html.markdown @@ -38,7 +38,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `userId` – (Required) Identifier for the user. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `userId` - (Required) Identifier for the user. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `userName` - User name of the user. * `accessString` - String for what access a user possesses within the associated ElastiCache replication groups or clusters. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elasticsearch_domain.html.markdown b/website/docs/cdktf/typescript/d/elasticsearch_domain.html.markdown index 907b0f4b7d08..09afdac02cd1 100644 --- a/website/docs/cdktf/typescript/d/elasticsearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/d/elasticsearch_domain.html.markdown @@ -38,18 +38,19 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `domainName` – (Required) Name of the domain. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `domainName` - (Required) Name of the domain. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `accessPolicies` – The policy document attached to the domain. +* `accessPolicies` - The policy document attached to the domain. * `advancedOptions` - Key-value string pairs to specify advanced configuration options. * `advancedSecurityOptions` - Status of the Elasticsearch domain's advanced security options. The block consists of the following attributes: * `enabled` - Whether advanced security is enabled. * `internalUserDatabaseEnabled` - Whether the internal user database is enabled. -* `arn` – The ARN of the domain. +* `arn` - The ARN of the domain. * `autoTuneOptions` - Configuration of the Auto-Tune options of the domain. * `desiredState` - The Auto-Tune desired state for the domain. * `maintenanceSchedule` - A list of the nested configurations for the Auto-Tune maintenance windows of the domain. @@ -78,20 +79,20 @@ This data source exports the following attributes in addition to the arguments a * `userPoolId` - The Cognito User pool used by the domain. * `identityPoolId` - The Cognito Identity pool used by the domain. * `roleArn` - The IAM Role with the AmazonESCognitoAccess policy attached. -* `created` – Status of the creation of the domain. -* `deleted` – Status of the deletion of the domain. -* `domainId` – Unique identifier for the domain. +* `created` - Status of the creation of the domain. +* `deleted` - Status of the deletion of the domain. +* `domainId` - Unique identifier for the domain. * `ebsOptions` - EBS Options for the instances in the domain. * `ebsEnabled` - Whether EBS volumes are attached to data nodes in the domain. * `throughput` - The throughput (in MiB/s) of the EBS volumes attached to data nodes. * `volumeType` - The type of EBS volumes attached to data nodes. * `volumeSize` - The size of EBS volumes attached to data nodes (in GB). * `iops` - The baseline input/output (I/O) performance of EBS volumes attached to data nodes. -* `elasticsearchVersion` – Elasticsearch version for the domain. +* `elasticsearchVersion` - Elasticsearch version for the domain. * `encryptionAtRest` - Domain encryption at rest related options. * `enabled` - Whether encryption at rest is enabled in the domain. * `kmsKeyId` - The KMS key id used to encrypt data at rest. -* `endpoint` – Domain-specific endpoint used to submit index, search, and data upload requests. +* `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. * `kibanaEndpoint` - Domain-specific endpoint used to access the Kibana application. * `logPublishingOptions` - Domain log publishing related options. * `logType` - The type of Elasticsearch log being published. @@ -99,7 +100,7 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether log publishing is enabled. * `nodeToNodeEncryption` - Domain in transit encryption related options. * `enabled` - Whether node to node encryption is enabled. -* `processing` – Status of a configuration change in the domain. +* `processing` - Status of a configuration change in the domain. * `snapshotOptions` – Domain snapshot related options. * `automatedSnapshotStartHour` - Hour during which the service takes an automated daily snapshot of the indices in the domain. * `tags` - Tags assigned to the domain. @@ -109,4 +110,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - The subnets used by the domain. * `vpcId` - The VPC used by the domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elb.html.markdown b/website/docs/cdktf/typescript/d/elb.html.markdown index dcd680f15ddc..fbe982d9c7e9 100644 --- a/website/docs/cdktf/typescript/d/elb.html.markdown +++ b/website/docs/cdktf/typescript/d/elb.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Unique name of the load balancer. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a See the [ELB Resource](/docs/providers/aws/r/elb.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elb_hosted_zone_id.html.markdown b/website/docs/cdktf/typescript/d/elb_hosted_zone_id.html.markdown index 153e0ab6e25b..9cca5f0d41de 100644 --- a/website/docs/cdktf/typescript/d/elb_hosted_zone_id.html.markdown +++ b/website/docs/cdktf/typescript/d/elb_hosted_zone_id.html.markdown @@ -48,13 +48,12 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS ELB HostedZoneId is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS ELB HostedZoneId is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS ELB HostedZoneId in the selected region. +* `id` - ID of the AWS ELB HostedZoneId in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/elb_service_account.html.markdown b/website/docs/cdktf/typescript/d/elb_service_account.html.markdown index 80d85ea3bc7e..0abed53bb106 100644 --- a/website/docs/cdktf/typescript/d/elb_service_account.html.markdown +++ b/website/docs/cdktf/typescript/d/elb_service_account.html.markdown @@ -96,14 +96,13 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS ELB account ID is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS ELB account ID is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS ELB service account in the selected region. -* `arn` - ARN of the AWS ELB service account in the selected region. +* `id` - ID of the AWS ELB service account in the selected Region. +* `arn` - ARN of the AWS ELB service account in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/emr_release_labels.html.markdown b/website/docs/cdktf/typescript/d/emr_release_labels.html.markdown index 7b8f355ee8f9..b29d19966df8 100644 --- a/website/docs/cdktf/typescript/d/emr_release_labels.html.markdown +++ b/website/docs/cdktf/typescript/d/emr_release_labels.html.markdown @@ -41,7 +41,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `filters` – (Optional) Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return. See [Filters](#filters). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `filters` - (Optional) Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return. See [Filters](#filters). ### Filters @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `releaseLabels` - Returned release labels. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/emr_supported_instance_types.html.markdown b/website/docs/cdktf/typescript/d/emr_supported_instance_types.html.markdown index 348e521e674d..68f3be34ec2b 100644 --- a/website/docs/cdktf/typescript/d/emr_supported_instance_types.html.markdown +++ b/website/docs/cdktf/typescript/d/emr_supported_instance_types.html.markdown @@ -93,8 +93,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `releaseLabel` - (Required) Amazon EMR release label. For more information about Amazon EMR releases and their included application versions and features, see the [Amazon EMR Release Guide](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-components.html). ## Attribute Reference @@ -106,15 +107,15 @@ This data source exports the following attributes in addition to the arguments a ### `supportedInstanceTypes` Attribute Reference * `architecture` - CPU architecture. -* `ebsOptimizedAvailable` - Indicates whether the instance type supports Amazon EBS optimization. -* `ebsOptimizedByDefault` - Indicates whether the instance type uses Amazon EBS optimization by default. -* `ebsStorageOnly` - Indicates whether the instance type only supports Amazon EBS. -* `instanceFamilyId` - The Amazon EC2 family and generation for the instance type. -* `is64BitsOnly` - Indicates whether the instance type only supports 64-bit architecture. -* `memoryGb` - Memory that is available to Amazon EMR from the instance type. -* `numberOfDisks` - Number of disks for the instance type. -* `storageGb` - Storage capacity of the instance type. +* `ebs_optimized_available` - Indicates whether the instance type supports Amazon EBS optimization. +* `ebs_optimized_by_default` - Indicates whether the instance type uses Amazon EBS optimization by default. +* `ebs_storage_only` - Indicates whether the instance type only supports Amazon EBS. +* `instance_family_id` - The Amazon EC2 family and generation for the instance type. +* `is_64_bits_only` - Indicates whether the instance type only supports 64-bit architecture. +* `memory_gb` - Memory that is available to Amazon EMR from the instance type. +* `number_of_disks` - Number of disks for the instance type. +* `storage_gb` - Storage capacity of the instance type. * `type` - Amazon EC2 instance type. For example, `m5.xlarge`. * `vcpu` - The number of vCPUs available for the instance type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/emrcontainers_virtual_cluster.html.markdown b/website/docs/cdktf/typescript/d/emrcontainers_virtual_cluster.html.markdown index 219caa278c57..1a495eeef1ae 100644 --- a/website/docs/cdktf/typescript/d/emrcontainers_virtual_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/emrcontainers_virtual_cluster.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtualClusterId` - (Required) ID of the cluster. ## Attribute Reference @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Status of the EKS cluster. One of `RUNNING`, `TERMINATING`, `TERMINATED`, `ARRESTED`. * `tags` - Key-value mapping of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fis_experiment_templates.html.markdown b/website/docs/cdktf/typescript/d/fis_experiment_templates.html.markdown index 7fe5f945a532..39221f3d81e8 100644 --- a/website/docs/cdktf/typescript/d/fis_experiment_templates.html.markdown +++ b/website/docs/cdktf/typescript/d/fis_experiment_templates.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired experiment templates. @@ -98,4 +99,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of all the experiment template ids found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown index d57b5e9ab607..88bd05952eb7 100644 --- a/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Identifier of the file system (e.g. `fs-12345678`). ## Attribute Reference @@ -87,4 +88,4 @@ This data source exports the following attributes in addition to the arguments a * `DNSName` - The file system's DNS name. You can mount your file system using its DNS name. * `IpAddresses` - IP addresses of the file system endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machine.html.markdown b/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machine.html.markdown index 819c40567af9..01251938b94e 100644 --- a/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machine.html.markdown +++ b/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machine.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. * `id` - (Optional) Identifier of the storage virtual machine (e.g. `svm-12345678`). @@ -128,4 +129,4 @@ The following arguments are supported for `activeDirectoryConfiguration` configu * `DNSName` - The file system's DNS name. You can mount your file system using its DNS name. * `IpAddresses` - The SVM endpoint's IP addresses. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machines.html.markdown b/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machines.html.markdown index 738ed1e5931e..990a14eb8a41 100644 --- a/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machines.html.markdown +++ b/website/docs/cdktf/typescript/d/fsx_ontap_storage_virtual_machines.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. ### filter @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of all SVM IDs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fsx_openzfs_snapshot.html.markdown b/website/docs/cdktf/typescript/d/fsx_openzfs_snapshot.html.markdown index 2d4be00d5442..b9f9c95f5d1c 100644 --- a/website/docs/cdktf/typescript/d/fsx_openzfs_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/d/fsx_openzfs_snapshot.html.markdown @@ -46,10 +46,9 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mostRecent` - (Optional) If more than one result is returned, use the most recent snapshot. - * `snapshotIds` - (Optional) Returns information on a specific snapshot_id. - * `filter` - (Optional) One or more name/value pairs to filter off of. The supported names are file-system-id or volume-id. @@ -65,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - List of Tag values, with a maximum of 50 elements. * `volumeId` - ID of the volume that the snapshot is of. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fsx_windows_file_system.html.markdown b/website/docs/cdktf/typescript/d/fsx_windows_file_system.html.markdown index 9f061150b215..6c0be774fc5a 100644 --- a/website/docs/cdktf/typescript/d/fsx_windows_file_system.html.markdown +++ b/website/docs/cdktf/typescript/d/fsx_windows_file_system.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Identifier of the file system (e.g. `fs-12345678`). ## Attribute Reference @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `vpcId` - The ID of the primary virtual private cloud (VPC) for the file system. * `weeklyMaintenanceStartTime` - The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown b/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown index 434d84e8dbcb..d9796b8fd2c1 100644 --- a/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the table. * `databaseName` - (Required) Name of the metadata database where the table metadata resides. * `catalogId` - (Optional) ID of the Glue Catalog and database where the table metadata resides. If omitted, this defaults to the current AWS Account ID. @@ -72,6 +73,7 @@ This data source exports the following attributes in addition to the arguments a * `comment` - Free-form text comment. * `name` - Name of the Partition Key. +* `parameters` - Map of key-value pairs. * `type` - Datatype of data in the Partition Key. ### storage_descriptor @@ -134,4 +136,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the target table. * `region` - Region of the target table. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/glue_connection.html.markdown b/website/docs/cdktf/typescript/d/glue_connection.html.markdown index 0824c1331c21..53ae13dfa30d 100644 --- a/website/docs/cdktf/typescript/d/glue_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/glue_connection.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Concatenation of the catalog ID and connection name. For example, if your account ID is `123456789123` and the connection name is `conn` then the ID is `123456789123:conn`. @@ -50,10 +51,10 @@ This data source exports the following attributes in addition to the arguments a * `athenaProperties` - A map of connection properties specific to the Athena compute environment. * `connectionProperties` - A map of connection properties. * `connectionType` - Type of Glue Connection. -* `description` – Description of the connection. -* `matchCriteria` – A list of criteria that can be used in selecting this connection. +* `description` - Description of the connection. +* `matchCriteria` - A list of criteria that can be used in selecting this connection. * `name` - Name of the Glue Connection. * `physicalConnectionRequirements` - A map of physical connection requirements, such as VPC and SecurityGroup. * `tags` - Tags assigned to the resource - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/glue_data_catalog_encryption_settings.html.markdown b/website/docs/cdktf/typescript/d/glue_data_catalog_encryption_settings.html.markdown index 6f1d1ce1da7d..e58d841cf1dd 100644 --- a/website/docs/cdktf/typescript/d/glue_data_catalog_encryption_settings.html.markdown +++ b/website/docs/cdktf/typescript/d/glue_data_catalog_encryption_settings.html.markdown @@ -42,14 +42,15 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Required) ID of the Data Catalog. This is typically the AWS account ID. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `dataCatalogEncryptionSettings` – The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). -* `id` – The ID of the Data Catalog to set the security configuration for. +* `dataCatalogEncryptionSettings` - The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). +* `id` - The ID of the Data Catalog to set the security configuration for. ### data_catalog_encryption_settings @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `catalogEncryptionServiceRole` - The ARN of the AWS IAM role used for accessing encrypted Data Catalog data. * `sseAwsKmsKeyId` - ARN of the AWS KMS key to use for encryption at rest. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/glue_registry.html.markdown b/website/docs/cdktf/typescript/d/glue_registry.html.markdown index 0dda40d0e84f..3cdb116a2df7 100644 --- a/website/docs/cdktf/typescript/d/glue_registry.html.markdown +++ b/website/docs/cdktf/typescript/d/glue_registry.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Glue Registry. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - Amazon Resource Name (ARN) of Glue Registry. * `description` - A description of the registry. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/glue_script.html.markdown b/website/docs/cdktf/typescript/d/glue_script.html.markdown index fdc74e11dc1c..71532ff9a422 100644 --- a/website/docs/cdktf/typescript/d/glue_script.html.markdown +++ b/website/docs/cdktf/typescript/d/glue_script.html.markdown @@ -222,6 +222,7 @@ output "scala_code" { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dagEdge` - (Required) List of the edges in the DAG. Defined below. * `dagNode` - (Required) List of the nodes in the DAG. Defined below. * `language` - (Optional) Programming language of the resulting code from the DAG. Defaults to `PYTHON`. Valid values are `PYTHON` and `SCALA`. @@ -253,4 +254,4 @@ This data source exports the following attributes in addition to the arguments a * `pythonScript` - Python script generated from the DAG when the `language` argument is set to `PYTHON`. * `scalaCode` - Scala code generated from the DAG when the `language` argument is set to `SCALA`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/grafana_workspace.html.markdown b/website/docs/cdktf/typescript/d/grafana_workspace.html.markdown index db93249bcf49..395cb81c45ed 100644 --- a/website/docs/cdktf/typescript/d/grafana_workspace.html.markdown +++ b/website/docs/cdktf/typescript/d/grafana_workspace.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workspaceId` - (Required) Grafana workspace ID. ## Attribute Reference @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the Grafana workspace. * `tags` - Tags assigned to the resource - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/guardduty_detector.html.markdown b/website/docs/cdktf/typescript/d/guardduty_detector.html.markdown index a76c78f7c305..c57f5fd35f0f 100644 --- a/website/docs/cdktf/typescript/d/guardduty_detector.html.markdown +++ b/website/docs/cdktf/typescript/d/guardduty_detector.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the detector. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Current status of the detector. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/guardduty_finding_ids.html.markdown b/website/docs/cdktf/typescript/d/guardduty_finding_ids.html.markdown index 3abee28cf999..0c023e8b9463 100644 --- a/website/docs/cdktf/typescript/d/guardduty_finding_ids.html.markdown +++ b/website/docs/cdktf/typescript/d/guardduty_finding_ids.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detectorId` - (Required) ID of the GuardDuty detector. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `hasFindings` - Indicates whether findings are present for the specified detector. * `findingIds` - A list of finding IDs for the specified detector. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/iam_principal_policy_simulation.html.markdown b/website/docs/cdktf/typescript/d/iam_principal_policy_simulation.html.markdown index ea55e392ad42..100286901ef4 100644 --- a/website/docs/cdktf/typescript/d/iam_principal_policy_simulation.html.markdown +++ b/website/docs/cdktf/typescript/d/iam_principal_policy_simulation.html.markdown @@ -73,14 +73,14 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { S3BucketObject } from "./.gen/providers/aws/s3-bucket-object"; +import { S3Object } from "./.gen/providers/aws/s3-object"; interface MyConfig { key: any; } class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string, config: MyConfig) { super(scope, name); - new S3BucketObject(this, "example", { + new S3Object(this, "example", { bucket: "my-test-bucket", dependsOn: [s3ObjectAccess], key: config.key, @@ -265,4 +265,4 @@ This data source exports the following attributes in addition to the arguments a * `missing_context_keys` - A set of context keys (or condition keys) that were needed by some of the policies contributing to this result but not specified using a `context` block in the configuration. Missing or incorrect context keys will typically cause a simulated request to be disallowed. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/iam_server_certificate.html.markdown b/website/docs/cdktf/typescript/d/iam_server_certificate.html.markdown index 1ba732234933..ea2af6f4ba4e 100644 --- a/website/docs/cdktf/typescript/d/iam_server_certificate.html.markdown +++ b/website/docs/cdktf/typescript/d/iam_server_certificate.html.markdown @@ -69,34 +69,4 @@ This data source exports the following attributes in addition to the arguments a * `certificateBody` is the public key certificate (PEM-encoded). This is useful when [configuring back-end instance authentication](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html) policy for load balancer * `certificateChain` is the public key certificate chain (PEM-encoded) if exists, empty otherwise -## Import - -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an IAM server certificate using `name`. For example: - -```typescript -// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug -import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; -/* - * Provider bindings are generated by running `cdktf get`. - * See https://cdk.tf/provider-generation for more details. - */ -import { IamServerCertificate } from "./.gen/providers/aws/iam-server-certificate"; -class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string) { - super(scope, name); - IamServerCertificate.generateConfigForImport(this, "example", "example"); - } -} - -``` - -Using `terraform import`, import an IAM server certificate using `name`. For example: - -```console -% terraform import aws_iam_server_certificate.example example -``` - -Import will read in the certificate body, certificate chain (if it exists), ID, name, path, and ARN. It will not retrieve the private key which is not available through the AWS API. - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/identitystore_group.html.markdown b/website/docs/cdktf/typescript/d/identitystore_group.html.markdown index c840bfd52ca1..112ae2ba81e5 100644 --- a/website/docs/cdktf/typescript/d/identitystore_group.html.markdown +++ b/website/docs/cdktf/typescript/d/identitystore_group.html.markdown @@ -61,8 +61,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alternateIdentifier` (Optional) A unique identifier for the group that is not the primary identifier. Conflicts with `groupId` and `filter`. Detailed below. -* `filter` - (Optional, **Deprecated** use the `alternateIdentifier` attribute instead) Configuration block for filtering by a unique attribute of the group. Detailed below. * `groupId` - (Optional) The identifier for a group in the Identity Store. -> Exactly one of the above arguments must be provided. Passing both `filter` and `groupId` is allowed for backwards compatibility. @@ -83,15 +83,6 @@ The `externalId` configuration block supports the following arguments: * `id` - (Required) The identifier issued to this resource by an external identity provider. * `issuer` - (Required) The issuer for an external identifier. -### `filter` Configuration Block - -~> The `filter` configuration block has been deprecated. Use `alternateIdentifier` instead. - -The following arguments are supported by the `filter` configuration block: - -* `attributePath` - (Required) Attribute path that is used to specify which attribute name to search. Currently, `DisplayName` is the only valid attribute path. -* `attributeValue` - (Required) Value for an attribute. - ### `uniqueAttribute` Configuration Block The `uniqueAttribute` configuration block supports the following arguments: @@ -110,4 +101,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - The identifier issued to this resource by an external identity provider. * `issuer` - The issuer for an external identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/identitystore_group_memberships.html.markdown b/website/docs/cdktf/typescript/d/identitystore_group_memberships.html.markdown index d42068a93803..1374291c28bb 100644 --- a/website/docs/cdktf/typescript/d/identitystore_group_memberships.html.markdown +++ b/website/docs/cdktf/typescript/d/identitystore_group_memberships.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupId` - (Required) The identifier for a group in the Identity Store. * `identityStoreId` - (Required) Identity Store ID associated with the Single Sign-On Instance. @@ -86,4 +87,4 @@ This data source exports the following attributes in addition to the arguments a * `userId` - User identifier of the group member. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/identitystore_groups.html.markdown b/website/docs/cdktf/typescript/d/identitystore_groups.html.markdown index 82953786b0d1..68a4550c9112 100644 --- a/website/docs/cdktf/typescript/d/identitystore_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/identitystore_groups.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityStoreId` - (Required) Identity Store ID associated with the Single Sign-On (SSO) Instance. ## Attribute Reference @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Identifier issued to this resource by an external identity provider. * `issuer` - Issuer for an external identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/identitystore_user.html.markdown b/website/docs/cdktf/typescript/d/identitystore_user.html.markdown index 163b81606ef2..7e023a1630fc 100644 --- a/website/docs/cdktf/typescript/d/identitystore_user.html.markdown +++ b/website/docs/cdktf/typescript/d/identitystore_user.html.markdown @@ -61,8 +61,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alternateIdentifier` (Optional) A unique identifier for a user or group that is not the primary identifier. Conflicts with `userId` and `filter`. Detailed below. -* `filter` - (Optional, **Deprecated** use the `alternateIdentifier` attribute instead) Configuration block for filtering by a unique attribute of the user. Detailed below. * `userId` - (Optional) The identifier for a user in the Identity Store. -> Exactly one of the above arguments must be provided. Passing both `filter` and `userId` is allowed for backwards compatibility. @@ -83,15 +83,6 @@ The `externalId` configuration block supports the following arguments: * `id` - (Required) The identifier issued to this resource by an external identity provider. * `issuer` - (Required) The issuer for an external identifier. -### `filter` Configuration Block - -~> The `filter` configuration block has been deprecated. Use `alternateIdentifier` instead. - -The following arguments are supported by the `filter` configuration block: - -* `attributePath` - (Required) Attribute path that is used to specify which attribute name to search. Currently, `UserName` is the only valid attribute path. -* `attributeValue` - (Required) Value for an attribute. - ### `uniqueAttribute` Configuration Block The `uniqueAttribute` configuration block supports the following arguments: @@ -141,4 +132,4 @@ This data source exports the following attributes in addition to the arguments a * `userName` - User's user name value. * `userType` - The user type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/identitystore_users.html.markdown b/website/docs/cdktf/typescript/d/identitystore_users.html.markdown index 8f549c23de03..7eb63e525e10 100644 --- a/website/docs/cdktf/typescript/d/identitystore_users.html.markdown +++ b/website/docs/cdktf/typescript/d/identitystore_users.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityStoreId` - (Required) Identity Store ID associated with the Single Sign-On Instance. ## Attribute Reference @@ -95,4 +96,4 @@ This data source exports the following attributes in addition to the arguments a * `userName` - User's user name value. * `userType` - User type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_component.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_component.html.markdown index a1ae88bf3a8e..ae67886beb97 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_component.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_component.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the component. ## Attribute Reference @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `type` - Type of the component. * `version` - Version of the component. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_components.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_components.html.markdown index 33f781a1f99b..d8e127249539 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_components.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_components.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owner` - (Optional) Owner of the image recipes. Valid values are `Self`, `Shared`, `Amazon` and `ThirdParty`. Defaults to `Self`. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Components. * `names` - Set of names of the matched Image Builder Components. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_container_recipe.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_container_recipe.html.markdown index 6bec5520c725..926a3128e16a 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_container_recipe.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_container_recipe.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the container recipe. ## Attribute Reference @@ -81,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a * `version` - Version of the container recipe. * `workingDirectory` - Working directory used during build and test workflows. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_container_recipes.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_container_recipes.html.markdown index 103e08a6d963..f55706df3c8f 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_container_recipes.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_container_recipes.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owner` - (Optional) Owner of the container recipes. Valid values are `Self`, `Shared`, `Amazon` and `ThirdParty`. Defaults to `Self`. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Container Recipes. * `names` - Set of names of the matched Image Builder Container Recipes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_distribution_configuration.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_distribution_configuration.html.markdown index b8fd6eb1a1c4..98eccdc78b17 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_distribution_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_distribution_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the distribution configuration. ## Attribute Reference @@ -92,4 +93,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the distribution configuration. * `tags` - Key-value map of resource tags for the distribution configuration. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_distribution_configurations.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_distribution_configurations.html.markdown index aeacf1afb5ac..686237d552fa 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_distribution_configurations.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_distribution_configurations.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ## filter Configuration Block @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Distribution Configurations. * `names` - Set of names of the matched Image Builder Distribution Configurations. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_image.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_image.html.markdown index 4b015d6f48c4..9ef1c7a999d6 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_image.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_image.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the image. The suffix can either be specified with wildcards (`x.x.x`) to fetch the latest build version or a full build version (e.g., `2020.11.26/1`) to fetch an exact version. ## Attribute Reference @@ -77,4 +78,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the image. * `version` - Version of the image. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_image_pipeline.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_image_pipeline.html.markdown index 67212ec12308..b75cf8861d6f 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_image_pipeline.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the image pipeline. ## Attribute Reference @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the image pipeline. * `tags` - Key-value map of resource tags for the image pipeline. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_image_pipelines.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_image_pipelines.html.markdown index c132284508c0..a4b69414c4bf 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_image_pipelines.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_image_pipelines.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration Block @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Image Pipelines. * `names` - Set of names of the matched Image Builder Image Pipelines. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_image_recipe.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_image_recipe.html.markdown index d5b06d925c6e..6a5080cda7b5 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_image_recipe.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_image_recipe.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the image recipe. ## Attribute Reference @@ -73,4 +74,4 @@ This data source exports the following attributes in addition to the arguments a * `version` - Version of the image recipe. * `workingDirectory` - Working directory used during build and test workflows. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_image_recipes.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_image_recipes.html.markdown index 4a589b6ea59c..9ed831443872 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_image_recipes.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_image_recipes.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `owner` - (Optional) Owner of the image recipes. Valid values are `Self`, `Shared`, `Amazon` and `ThirdParty`. Defaults to `Self`. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Image Recipes. * `names` - Set of names of the matched Image Builder Image Recipes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configuration.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configuration.html.markdown index 05e43b949865..a5e1c798c649 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configuration.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the infrastructure configuration. ## Attribute Reference @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the infrastructure configuration. * `terminateInstanceOnFailure` - Whether instances are terminated on failure. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configurations.html.markdown b/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configurations.html.markdown index 6648173b677f..033f3505e0d7 100644 --- a/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configurations.html.markdown +++ b/website/docs/cdktf/typescript/d/imagebuilder_infrastructure_configurations.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ## filter Configuration Block @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Image Builder Infrastructure Configurations. * `names` - Set of names of the matched Image Builder Infrastructure Configurations. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/inspector_rules_packages.html.markdown b/website/docs/cdktf/typescript/d/inspector_rules_packages.html.markdown index 73f9a322b111..992588dab104 100644 --- a/website/docs/cdktf/typescript/d/inspector_rules_packages.html.markdown +++ b/website/docs/cdktf/typescript/d/inspector_rules_packages.html.markdown @@ -57,7 +57,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -66,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `arns` - List of the Amazon Inspector Classic Rules Packages arns available in the AWS region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/instance.html.markdown b/website/docs/cdktf/typescript/d/instance.html.markdown index 51b76d9fb54f..a70db09bed8c 100644 --- a/website/docs/cdktf/typescript/d/instance.html.markdown +++ b/website/docs/cdktf/typescript/d/instance.html.markdown @@ -48,12 +48,14 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Optional) Specify the exact Instance ID with which to populate the data source. * `instanceTags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Instance. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. * `getPasswordData` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `passwordData` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `getUserData` - (Optional) Retrieve Base64 encoded User Data contents into the `userDataBase64` attribute. A SHA-1 hash of the User Data contents will always be present in the `userData` attribute. Defaults to `false`. @@ -63,6 +65,14 @@ several valid keys, for a full reference, check out Terraform will fail. Ensure that your search is specific enough to return a single Instance ID only. +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + ## Attribute Reference `id` is set to the ID of the found Instance. In addition, the following attributes @@ -116,6 +126,7 @@ interpolation. * `outpostArn` - ARN of the Outpost. * `passwordData` - Base-64 encoded encrypted password data for the instance. Useful for getting the administrator password for instances running Microsoft Windows. This attribute is only exported if `getPasswordData` is true. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `placementGroup` - Placement group of the Instance. +* `placementGroupId` - Placement group ID of the Instance. * `placementPartitionNumber` - Number of the partition the instance is in. * `privateDns` - Private DNS name assigned to the Instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC. * `privateDnsNameOptions` - Options for the instance hostname. @@ -152,4 +163,4 @@ interpolation. [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/instances.html.markdown b/website/docs/cdktf/typescript/d/instances.html.markdown index f6984c420b35..4450cadbc675 100644 --- a/website/docs/cdktf/typescript/d/instances.html.markdown +++ b/website/docs/cdktf/typescript/d/instances.html.markdown @@ -70,12 +70,22 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceTags` - (Optional) Map of tags, each pair of which must exactly match a pair on desired instances. * `instanceStateNames` - (Optional) List of instance states that should be applicable to the desired instances. The permitted values are: `pending, running, shutting-down, stopped, stopping, terminated`. The default value is `running`. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. ## Attribute Reference @@ -95,4 +105,4 @@ This data source exports the following attributes in addition to the arguments a [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/internet_gateway.html.markdown b/website/docs/cdktf/typescript/d/internet_gateway.html.markdown index 08dea1f49789..c98b52be0483 100644 --- a/website/docs/cdktf/typescript/d/internet_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/internet_gateway.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `internetGatewayId` - (Optional) ID of the specific Internet Gateway to retrieve. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Internet Gateway. @@ -83,4 +84,4 @@ Each attachment supports the following: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/iot_endpoint.html.markdown b/website/docs/cdktf/typescript/d/iot_endpoint.html.markdown index 6d507bfaac87..e23f5e72240a 100644 --- a/website/docs/cdktf/typescript/d/iot_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/iot_endpoint.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpointType` - (Optional) Endpoint type. Valid values: `iot:CredentialProvider`, `iot:Data`, `iot:Data-ATS`, `iot:Jobs`. ## Attribute Reference @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `iot:Data-ATS`: `IDENTIFIER-ats.iot.REGION.amazonaws.com` * `iot:Jobs`: `IDENTIFIER.jobs.iot.REGION.amazonaws.com` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/iot_registration_code.html.markdown b/website/docs/cdktf/typescript/d/iot_registration_code.html.markdown index 51cf55b929a2..84bb850cbf15 100644 --- a/website/docs/cdktf/typescript/d/iot_registration_code.html.markdown +++ b/website/docs/cdktf/typescript/d/iot_registration_code.html.markdown @@ -52,7 +52,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -60,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `registrationCode` - The CA certificate registration code. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ivs_stream_key.html.markdown b/website/docs/cdktf/typescript/d/ivs_stream_key.html.markdown index 363a33503256..fe4d128c2a88 100644 --- a/website/docs/cdktf/typescript/d/ivs_stream_key.html.markdown +++ b/website/docs/cdktf/typescript/d/ivs_stream_key.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `channelArn` - (Required) ARN of the Channel. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the resource. * `value` - Stream Key value. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kendra_experience.html.markdown b/website/docs/cdktf/typescript/d/kendra_experience.html.markdown index 954aee699be7..2d25c8dad9f4 100644 --- a/website/docs/cdktf/typescript/d/kendra_experience.html.markdown +++ b/website/docs/cdktf/typescript/d/kendra_experience.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `experienceId` - (Required) Identifier of the Experience. * `indexId` - (Required) Identifier of the index that contains the Experience. @@ -78,4 +79,4 @@ The `endpoints` block supports the following attributes: * `endpoint` - Endpoint of your Amazon Kendra Experience. * `endpointType` - Type of endpoint for your Amazon Kendra Experience. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kendra_faq.html.markdown b/website/docs/cdktf/typescript/d/kendra_faq.html.markdown index a2e999e9a6fb..9f7b375b4487 100644 --- a/website/docs/cdktf/typescript/d/kendra_faq.html.markdown +++ b/website/docs/cdktf/typescript/d/kendra_faq.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `faqId` - (Required) Identifier of the FAQ. * `indexId` - (Required) Identifier of the index that contains the FAQ. @@ -65,4 +66,4 @@ The `s3Path` configuration block supports the following attributes: * `bucket` - Name of the S3 bucket that contains the file. * `key` - Name of the file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kendra_index.html.markdown b/website/docs/cdktf/typescript/d/kendra_index.html.markdown index cb64fd76baf0..f7cb9d083669 100644 --- a/website/docs/cdktf/typescript/d/kendra_index.html.markdown +++ b/website/docs/cdktf/typescript/d/kendra_index.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Returns information on a specific Index by id. ## Attribute Reference @@ -132,4 +133,4 @@ A `jwtTokenTypeConfiguration` block supports the following attributes: * `url` - Signing key URL. * `userNameAttributeField` - The user name attribute field. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kendra_query_suggestions_block_list.html.markdown b/website/docs/cdktf/typescript/d/kendra_query_suggestions_block_list.html.markdown index fb3fa64bc98a..05005484b760 100644 --- a/website/docs/cdktf/typescript/d/kendra_query_suggestions_block_list.html.markdown +++ b/website/docs/cdktf/typescript/d/kendra_query_suggestions_block_list.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `indexId` - (Required) Identifier of the index that contains the block list. * `querySuggestionsBlockListId` - (Required) Identifier of the block list. @@ -65,4 +66,4 @@ The `sourceS3Path` configuration block supports the following attributes: * `bucket` - Name of the S3 bucket that contains the file. * `key` - Name of the file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kendra_thesaurus.html.markdown b/website/docs/cdktf/typescript/d/kendra_thesaurus.html.markdown index 5c9288c79b30..11411c4043ce 100644 --- a/website/docs/cdktf/typescript/d/kendra_thesaurus.html.markdown +++ b/website/docs/cdktf/typescript/d/kendra_thesaurus.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `indexId` - (Required) Identifier of the index that contains the Thesaurus. * `thesaurusId` - (Required) Identifier of the Thesaurus. @@ -66,4 +67,4 @@ The `sourceS3Path` configuration block supports the following attributes: * `bucket` - Name of the S3 bucket that contains the file. * `key` - Name of the file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/key_pair.html.markdown b/website/docs/cdktf/typescript/d/key_pair.html.markdown index 12d0a84a98a5..764789bacdc2 100644 --- a/website/docs/cdktf/typescript/d/key_pair.html.markdown +++ b/website/docs/cdktf/typescript/d/key_pair.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyPairId` - (Optional) Key Pair ID. * `keyName` - (Optional) Key Pair name. * `includePublicKey` - (Optional) Whether to include the public key material in the response. @@ -90,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kinesis_firehose_delivery_stream.html.markdown b/website/docs/cdktf/typescript/d/kinesis_firehose_delivery_stream.html.markdown index eb5d7b6bc5c6..2fc685c3691c 100644 --- a/website/docs/cdktf/typescript/d/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/cdktf/typescript/d/kinesis_firehose_delivery_stream.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Kinesis Firehose Delivery Stream. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://aws.amazon.com/documentation/firehose/ - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kinesis_stream.html.markdown b/website/docs/cdktf/typescript/d/kinesis_stream.html.markdown index afa0a63794c5..b0ce3029fc73 100644 --- a/website/docs/cdktf/typescript/d/kinesis_stream.html.markdown +++ b/website/docs/cdktf/typescript/d/kinesis_stream.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Kinesis Stream. ## Attribute Reference @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a [3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html [4]: https://docs.aws.amazon.com/streams/latest/dev/how-do-i-size-a-stream.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kinesis_stream_consumer.html.markdown b/website/docs/cdktf/typescript/d/kinesis_stream_consumer.html.markdown index 9200d1fcaac7..d0a2a007b45a 100644 --- a/website/docs/cdktf/typescript/d/kinesis_stream_consumer.html.markdown +++ b/website/docs/cdktf/typescript/d/kinesis_stream_consumer.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the stream consumer. * `name` - (Optional) Name of the stream consumer. * `streamArn` - (Required) ARN of the data stream the consumer is registered with. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://docs.aws.amazon.com/streams/latest/dev/amazon-kinesis-consumers.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kms_alias.html.markdown b/website/docs/cdktf/typescript/d/kms_alias.html.markdown index 04b9173aa156..ca8a18640263 100644 --- a/website/docs/cdktf/typescript/d/kms_alias.html.markdown +++ b/website/docs/cdktf/typescript/d/kms_alias.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the alias * `namePrefix` - Prefix of the alias - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kms_ciphertext.html.markdown b/website/docs/cdktf/typescript/d/kms_ciphertext.html.markdown index 6d464884c3d0..db22ffaef3eb 100644 --- a/website/docs/cdktf/typescript/d/kms_ciphertext.html.markdown +++ b/website/docs/cdktf/typescript/d/kms_ciphertext.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `plaintext` - (Required) Data to be encrypted. Note that this may show up in logs, and it will be stored in the state file. * `keyId` - (Required) Globally unique key ID for the customer master key. * `context` - (Optional) An optional mapping that makes up the encryption context. @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Globally unique key ID for the customer master key. * `ciphertextBlob` - Base64 encoded ciphertext - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kms_custom_key_store.html.markdown b/website/docs/cdktf/typescript/d/kms_custom_key_store.html.markdown index 10d336cd9d1a..3ee24c3c5d7b 100644 --- a/website/docs/cdktf/typescript/d/kms_custom_key_store.html.markdown +++ b/website/docs/cdktf/typescript/d/kms_custom_key_store.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customKeyStoreId` - (Optional) The ID for the custom key store. * `customKeyStoreName` - (Optional) The user-specified friendly name for the custom key store. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `creationDate` - The date and time when the custom key store was created. * `trustAnchorCertificate` - The trust anchor certificate of the associated CloudHSM cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kms_key.html.markdown b/website/docs/cdktf/typescript/d/kms_key.html.markdown index 3e0ef4a27189..64bb0aa4d3d0 100644 --- a/website/docs/cdktf/typescript/d/kms_key.html.markdown +++ b/website/docs/cdktf/typescript/d/kms_key.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyId` - (Required) Key identifier which can be one of the following format: * Key ID. E.g: `1234abcd-12ab-34cd-56ef-1234567890ab` * Key ARN. E.g.: `arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` @@ -68,7 +69,7 @@ This data source exports the following attributes in addition to the arguments a * `cloudHsmClusterId`: The cluster ID of the AWS CloudHSM cluster that contains the key material for the KMS key. * `creationDate`: The date and time when the key was created * `customKeyStoreId`: A unique identifier for the custom key store that contains the KMS key. -* `customerMasterKeySpec`: Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports +* `customerMasterKeySpec`: See `keySpec`. * `deletionDate`: The date and time after which AWS KMS deletes the key. This value is present only when `keyState` is `PendingDeletion`, otherwise this value is 0 * `description`: The description of the key. * `enabled`: Specifies whether the key is enabled. When `keyState` is `Enabled` this value is true, otherwise it is false @@ -95,4 +96,4 @@ The `primary_key` and `replica_keys` objects support the following: * `arn`: The key ARN of a primary or replica key of a multi-Region key. * `region`: The AWS Region of a primary or replica key in a multi-Region key. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kms_public_key.html.markdown b/website/docs/cdktf/typescript/d/kms_public_key.html.markdown index 23d67989f8c9..ae8b2a13e032 100644 --- a/website/docs/cdktf/typescript/d/kms_public_key.html.markdown +++ b/website/docs/cdktf/typescript/d/kms_public_key.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyId` - (Required) Key identifier which can be one of the following format: * Key ID. E.g - `1234abcd-12ab-34cd-56ef-1234567890ab` * Key ARN. E.g. - `arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` @@ -68,4 +69,4 @@ This data source exports the following attributes in addition to the arguments a * `publicKeyPem` - Exported public key. The value is Privacy Enhanced Mail (PEM) encoded. * `signingAlgorithms` - Signing algorithms that AWS KMS supports for this key. Only set when the `keyUsage` of the public key is `SIGN_VERIFY`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/kms_secret.html.markdown b/website/docs/cdktf/typescript/d/kms_secret.html.markdown index 9edd17eb485e..3a43919f211e 100644 --- a/website/docs/cdktf/typescript/d/kms_secret.html.markdown +++ b/website/docs/cdktf/typescript/d/kms_secret.html.markdown @@ -10,6 +10,6 @@ description: |- # Data Source: aws_kms_secret -!> **WARNING:** This data source was removed in version 2.0.0 of the Terraform AWS Provider. You can migrate existing configurations to the [`aws_kms_secrets` data source](/docs/providers/aws/d/kms_secrets.html) following instructions available in the [Version 2 Upgrade Guide](../guides/version-2-upgrade.html#data-source-aws_kms_secret). +!> **WARNING:** This data source's functionality was removed in version 2.0.0 of the Terraform AWS Provider. You can migrate existing configurations to the [`aws_kms_secrets` data source](/docs/providers/aws/d/kms_secrets.html) following instructions available in the [Version 2 Upgrade Guide](../guides/version-2-upgrade.html#data-source-aws_kms_secret). This data source will be removed in a future version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown index 76fe890a0ecf..babf53e7bf7a 100644 --- a/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown @@ -38,13 +38,14 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: -* `catalogId` – (Optional) Identifier for the Data Catalog. By default, the account ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) Identifier for the Data Catalog. By default, the account ID. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `admins` – List of ARNs of AWS Lake Formation principals (IAM users or roles). +* `admins` - List of ARNs of AWS Lake Formation principals (IAM users or roles). * `allowExternalDataFiltering` - Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `allowFullTableExternalDataAccess` - Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. * `authorizedSessionTagValueList` - Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. @@ -52,8 +53,8 @@ This data source exports the following attributes in addition to the arguments a * `createTableDefaultPermissions` - Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. * `externalDataFilteringAllowList` - A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `parameters` - Key-value map of additional configuration. `CROSS_ACCOUNT_VERSION` will be set to values `"1"`, `"2"`, `"3"`, or `"4"`. `SET_CONTEXT` will also be returned with a value of `TRUE`. In a fresh account, prior to configuring, `CROSS_ACCOUNT_VERSION` is `"1"`. -* `readOnlyAdmins` – List of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. -* `trustedResourceOwners` – List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). +* `readOnlyAdmins` - List of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. +* `trustedResourceOwners` - List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). ### create_database_default_permissions @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions granted to the principal. * `principal` - Principal who is granted permissions. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lakeformation_permissions.html.markdown b/website/docs/cdktf/typescript/d/lakeformation_permissions.html.markdown index d2a9bf3602f2..ebffc1260a5d 100644 --- a/website/docs/cdktf/typescript/d/lakeformation_permissions.html.markdown +++ b/website/docs/cdktf/typescript/d/lakeformation_permissions.html.markdown @@ -106,7 +106,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `principal` – (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `principal` - (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. One of the following is required: @@ -121,7 +122,8 @@ One of the following is required: The following arguments are optional: -* `catalogId` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. ### data_cells_filter @@ -134,7 +136,7 @@ The following arguments are optional: The following argument is required: -* `arn` – (Required) ARN that uniquely identifies the data location resource. +* `arn` - (Required) ARN that uniquely identifies the data location resource. The following argument is optional: @@ -144,7 +146,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -154,7 +156,7 @@ The following argument is optional: The following arguments are required: -* `key` – (Required) Key-name for the tag. +* `key` - (Required) Key-name for the tag. * `values` - (Required) List of possible values an attribute can take. The following argument is optional: @@ -165,7 +167,7 @@ The following argument is optional: The following arguments are required: -* `resourceType` – (Required) Resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. +* `resourceType` - (Required) Resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. * `expression` - (Required) List of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See [`expression`](#expression) below. The following argument is optional: @@ -174,17 +176,18 @@ The following argument is optional: #### expression -* `key` – (Required) Key-name of an LF-Tag. +* `key` - (Required) Key-name of an LF-Tag. * `values` - (Required) List of possible values of an LF-Tag. ### table The following argument is required: -* `databaseName` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `databaseName` - (Required) Name of the database for the table. Unique to a Data Catalog. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `name` - (Optional) Name of the table. At least one of `name` or `wildcard` is required. * `wildcard` - (Optional) Whether to use a wildcard representing every table under a database. At least one of `name` or `wildcard` is required. Defaults to `false`. @@ -193,11 +196,12 @@ The following arguments are optional: The following arguments are required: -* `databaseName` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `databaseName` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `columnNames` - (Optional) Set of column names for the table. At least one of `columnNames` or `excludedColumnNames` is required. * `excludedColumnNames` - (Optional) Set of column names for the table to exclude. At least one of `columnNames` or `excludedColumnNames` is required. @@ -206,7 +210,7 @@ The following arguments are optional: This data source exports the following attributes in addition to the arguments above: -* `permissions` – List of permissions granted to the principal. For details on permissions, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `permissions` - List of permissions granted to the principal. For details on permissions, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `permissionsWithGrantOption` - Subset of `permissions` which the principal can pass. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lakeformation_resource.html.markdown b/website/docs/cdktf/typescript/d/lakeformation_resource.html.markdown index 1780ccf2190b..bfc53deadb98 100644 --- a/website/docs/cdktf/typescript/d/lakeformation_resource.html.markdown +++ b/website/docs/cdktf/typescript/d/lakeformation_resource.html.markdown @@ -38,13 +38,17 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `arn` – (Required) ARN of the resource, an S3 path. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `arn` - (Required) ARN of the resource, an S3 path. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: +* `hybridAccessEnabled` - Flag to enable AWS LakeFormation hybrid access permission mode. * `lastModified` - Date and time the resource was last modified in [RFC 3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). -* `roleArn` – Role that the resource was registered with. +* `roleArn` - Role that the resource was registered with. +* `withFederation` - Whether the resource is a federated resource. +* `withPrivilegedAccess` - Boolean to grant the calling principal the permissions to perform all supported Lake Formation operations on the registered data location. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_alias.html.markdown b/website/docs/cdktf/typescript/d/lambda_alias.html.markdown index 75809b76d49d..771a08d9e079 100644 --- a/website/docs/cdktf/typescript/d/lambda_alias.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_alias.html.markdown @@ -3,21 +3,90 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_alias" description: |- - Provides a Lambda Alias data source. + Provides details about an AWS Lambda Alias. --- # Data Source: aws_lambda_alias -Provides information about a Lambda Alias. +Provides details about an AWS Lambda Alias. Use this data source to retrieve information about an existing Lambda function alias for traffic management, deployment strategies, or API integrations. ## Example Usage +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaAlias } from "./.gen/providers/aws/data-aws-lambda-alias"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaAlias(this, "example", { + functionName: "my-lambda-function", + name: "production", + }); + new TerraformOutput(this, "alias_arn", { + value: example.arn, + }); + } +} + +``` + +### API Gateway Integration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ApiGatewayIntegration } from "./.gen/providers/aws/api-gateway-integration"; +import { DataAwsLambdaAlias } from "./.gen/providers/aws/data-aws-lambda-alias"; +import { LambdaPermission } from "./.gen/providers/aws/lambda-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const apiHandler = new DataAwsLambdaAlias(this, "api_handler", { + functionName: "api-handler", + name: "live", + }); + new ApiGatewayIntegration(this, "example", { + httpMethod: Token.asString(awsApiGatewayMethodExample.httpMethod), + integrationHttpMethod: "POST", + resourceId: Token.asString(awsApiGatewayResourceExample.id), + restApiId: Token.asString(awsApiGatewayRestApiExample.id), + type: "AWS_PROXY", + uri: Token.asString(apiHandler.invokeArn), + }); + new LambdaPermission(this, "api_gateway", { + action: "lambda:InvokeFunction", + functionName: Token.asString(apiHandler.functionName), + principal: "apigateway.amazonaws.com", + qualifier: Token.asString(apiHandler.name), + sourceArn: "${" + awsApiGatewayRestApiExample.executionArn + "}/*/*", + statementId: "AllowExecutionFromAPIGateway", + }); + } +} + +``` + +### Deployment Version Tracking + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Op, TerraformOutput, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -26,29 +95,85 @@ import { DataAwsLambdaAlias } from "./.gen/providers/aws/data-aws-lambda-alias"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new DataAwsLambdaAlias(this, "production", { - functionName: "my-lambda-func", + const production = new DataAwsLambdaAlias(this, "production", { + functionName: "payment-processor", name: "production", }); + const staging = new DataAwsLambdaAlias(this, "staging", { + functionName: "payment-processor", + name: "staging", + }); + const versionDrift = Op.neq( + production.functionVersion, + staging.functionVersion + ); + new TerraformOutput(this, "deployment_status", { + value: [ + { + production_version: production.functionVersion, + ready_for_promotion: Op.not(versionDrift), + staging_version: staging.functionVersion, + version_drift: versionDrift, + }, + ], + }); } } ``` +### EventBridge Rule Target + +```terraform +data "aws_lambda_alias" "event_processor" { + function_name = "event-processor" + name = "stable" +} + +resource "aws_cloudwatch_event_rule" "example" { + name = "capture-events" + description = "Capture events for processing" + + event_pattern = jsonencode({ + source = ["myapp.orders"] + detail-type = ["Order Placed"] + }) +} + +resource "aws_cloudwatch_event_target" "lambda" { + rule = aws_cloudwatch_event_rule.example.name + target_id = "SendToLambda" + arn = data.aws_lambda_alias.event_processor.arn +} + +resource "aws_lambda_permission" "allow_eventbridge" { + statement_id = "AllowExecutionFromEventBridge" + action = "lambda:InvokeFunction" + function_name = data.aws_lambda_alias.event_processor.function_name + principal = "events.amazonaws.com" + qualifier = data.aws_lambda_alias.event_processor.name + source_arn = aws_cloudwatch_event_rule.example.arn +} +``` + ## Argument Reference -This data source supports the following arguments: +The following arguments are required: * `functionName` - (Required) Name of the aliased Lambda function. * `name` - (Required) Name of the Lambda alias. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `arn` - ARN identifying the Lambda function alias. -* `description` - Description of alias. +* `description` - Description of the alias. * `functionVersion` - Lambda function version which the alias uses. -* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in aws_api_gateway_integration's `uri`. +* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_code_signing_config.html.markdown b/website/docs/cdktf/typescript/d/lambda_code_signing_config.html.markdown index 97f0671ca2ad..0758d405ab86 100644 --- a/website/docs/cdktf/typescript/d/lambda_code_signing_config.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_code_signing_config.html.markdown @@ -3,38 +3,193 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_code_signing_config" description: |- - Provides a Lambda Code Signing Config data source. + Provides details about an AWS Lambda Code Signing Config. --- # Data Source: aws_lambda_code_signing_config -Provides information about a Lambda Code Signing Config. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail). +Provides details about an AWS Lambda Code Signing Config. Use this data source to retrieve information about an existing code signing configuration for Lambda functions to ensure code integrity and authenticity. -For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions][1] +For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html). ## Example Usage +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, Fn, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaCodeSigningConfig } from "./.gen/providers/aws/data-aws-lambda-code-signing-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaCodeSigningConfig(this, "example", { + arn: "arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b", + }); + new TerraformOutput(this, "config_details", { + value: [ + { + config_id: example.configId, + description: example.description, + policy: Fn.lookupNested(example.policies, [ + "0", + "untrusted_artifact_on_deployment", + ]), + }, + ], + }); + } +} + +``` + +### Use in Lambda Function + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { DataAwsLambdaCodeSigningConfig } from "./.gen/providers/aws/data-aws-lambda-code-signing-config"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new DataAwsLambdaCodeSigningConfig(this, "existing_csc", { - arn: - "arn:aws:lambda:${" + - awsRegion.value + - "}:${" + - awsAccount.value + - "}:code-signing-config:csc-0f6c334abcdea4d8b", + const securityConfig = new DataAwsLambdaCodeSigningConfig( + this, + "security_config", + { + arn: codeSigningConfigArn.stringValue, + } + ); + new LambdaFunction(this, "example", { + codeSigningConfigArn: Token.asString(securityConfig.arn), + filename: "function.zip", + functionName: "secure-function", + handler: "index.handler", + role: lambdaRole.arn, + runtime: "nodejs20.x", + tags: { + Environment: "production", + Security: "code-signed", + }, + }); + } +} + +``` + +### Validate Signing Profiles + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { + Fn, + TerraformOutput, + conditional, + Token, + TerraformCount, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaCodeSigningConfig } from "./.gen/providers/aws/data-aws-lambda-code-signing-config"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const requiredProfile = + "arn:aws:signer:us-west-2:123456789012:/signing-profiles/MyProfile"; + const example = new DataAwsLambdaCodeSigningConfig(this, "example", { + arn: codeSigningConfigArn.stringValue, + }); + const allowedProfiles = Fn.lookupNested(example.allowedPublishers, [ + "0", + "signing_profile_version_arns", + ]); + const profileAllowed = Fn.contains(allowedProfiles, requiredProfile); + new TerraformOutput(this, "deployment_status", { + value: [ + { + function_created: profileAllowed, + message: conditional( + profileAllowed, + "Function deployed with valid signing profile", + "Deployment blocked - signing profile not allowed" + ), + profile_allowed: profileAllowed, + }, + ], + }); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const conditionalCount = TerraformCount.of( + Token.asNumber(conditional(profileAllowed, 1, 0)) + ); + new LambdaFunction(this, "conditional", { + codeSigningConfigArn: Token.asString(example.arn), + filename: "function.zip", + functionName: "conditional-function", + handler: "index.handler", + role: lambdaRole.arn, + runtime: "python3.12", + count: conditionalCount, + }); + } +} + +``` + +### Multi-Environment Configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Op, TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaCodeSigningConfig } from "./.gen/providers/aws/data-aws-lambda-code-signing-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const dev = new DataAwsLambdaCodeSigningConfig(this, "dev", { + arn: "arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-dev-456", + }); + const prod = new DataAwsLambdaCodeSigningConfig(this, "prod", { + arn: "arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-prod-123", + }); + const devPolicy = Fn.lookupNested(dev.policies, [ + "0", + "untrusted_artifact_on_deployment", + ]); + const prodPolicy = Fn.lookupNested(prod.policies, [ + "0", + "untrusted_artifact_on_deployment", + ]); + const configComparison = { + dev_enforcement: devPolicy, + policies_match: Op.eq(prodPolicy, devPolicy), + prod_enforcement: prodPolicy, + }; + new TerraformOutput(this, "environment_comparison", { + value: configComparison, }); } } @@ -43,28 +198,30 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are required: * `arn` - (Required) ARN of the code signing configuration. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `allowedPublishers` - List of allowed publishers as signing profiles for this code signing configuration. +* `allowedPublishers` - List of allowed publishers as signing profiles for this code signing configuration. [See below](#allowed_publishers-attribute-reference). * `configId` - Unique identifier for the code signing configuration. * `description` - Code signing configuration description. * `lastModified` - Date and time that the code signing configuration was last modified. -* `policies` - List of code signing policies that control the validation failure action for signature mismatch or expiry. - -`allowedPublishers` is exported with the following attribute: +* `policies` - List of code signing policies that control the validation failure action for signature mismatch or expiry. [See below](#policies-attribute-reference). -* `signingProfileVersionArns` - The ARN for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. +### allowed_publishers Attribute Reference -`policies` is exported with the following attribute: +* `signingProfileVersionArns` - Set of ARNs for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. -* `untrustedArtifactOnDeployment` - Code signing configuration policy for deployment validation failure. +### policies Attribute Reference -[1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html +* `untrustedArtifactOnDeployment` - Code signing configuration policy for deployment validation failure. Valid values: `Warn`, `Enforce`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_function.html.markdown b/website/docs/cdktf/typescript/d/lambda_function.html.markdown index ce3979aa0692..71b9b0058a74 100644 --- a/website/docs/cdktf/typescript/d/lambda_function.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_function.html.markdown @@ -3,21 +3,25 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function" description: |- - Provides a Lambda Function data source. + Provides details about an AWS Lambda Function. --- # Data Source: aws_lambda_function -Provides information about a Lambda Function. +Provides details about an AWS Lambda Function. Use this data source to obtain information about an existing Lambda function for use in other resources or as a reference for function configurations. + +~> **Note:** This data source returns information about the latest version or alias specified by the `qualifier`. If no `qualifier` is provided, it returns information about the most recent published version, or `$LATEST` if no published version exists. ## Example Usage +### Basic Usage + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { VariableType, TerraformVariable, TerraformStack } from "cdktf"; +import { TerraformOutput, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -26,13 +30,131 @@ import { DataAwsLambdaFunction } from "./.gen/providers/aws/data-aws-lambda-func class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - You can read more about this at https://cdk.tf/variables*/ - const functionName = new TerraformVariable(this, "function_name", { - type: VariableType.STRING, + const example = new DataAwsLambdaFunction(this, "example", { + functionName: "my-lambda-function", + }); + new TerraformOutput(this, "function_arn", { + value: example.arn, }); - new DataAwsLambdaFunction(this, "existing", { - functionName: functionName.stringValue, + } +} + +``` + +### Using Function Alias + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ApiGatewayIntegration } from "./.gen/providers/aws/api-gateway-integration"; +import { DataAwsLambdaFunction } from "./.gen/providers/aws/data-aws-lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaFunction(this, "example", { + functionName: "api-handler", + qualifier: "production", + }); + const awsApiGatewayIntegrationExample = new ApiGatewayIntegration( + this, + "example_1", + { + httpMethod: Token.asString(awsApiGatewayMethodExample.httpMethod), + integrationHttpMethod: "POST", + resourceId: Token.asString(awsApiGatewayResourceExample.id), + restApiId: Token.asString(awsApiGatewayRestApiExample.id), + type: "AWS_PROXY", + uri: Token.asString(example.invokeArn), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsApiGatewayIntegrationExample.overrideLogicalId("example"); + } +} + +``` + +### Function Configuration Reference + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, Fn, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaFunction } from "./.gen/providers/aws/data-aws-lambda-function"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const reference = new DataAwsLambdaFunction(this, "reference", { + functionName: "existing-function", + }); + new LambdaFunction(this, "example", { + architectures: Token.asList(reference.architectures), + environment: { + variables: Token.asStringMap( + Fn.lookupNested(reference.environment, ["0", "variables"]) + ), + }, + filename: "new-function.zip", + functionName: "new-function", + handler: Token.asString(reference.handler), + memorySize: Token.asNumber(reference.memorySize), + role: Token.asString(reference.role), + runtime: Token.asString(reference.runtime), + timeout: Token.asNumber(reference.timeout), + vpcConfig: { + securityGroupIds: Token.asList( + Fn.lookupNested(reference.vpcConfig, ["0", "security_group_ids"]) + ), + subnetIds: Token.asList( + Fn.lookupNested(reference.vpcConfig, ["0", "subnet_ids"]) + ), + }, + }); + } +} + +``` + +### Function Version Management + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, Op, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaFunction } from "./.gen/providers/aws/data-aws-lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const latest = new DataAwsLambdaFunction(this, "latest", { + functionName: "my-function", + qualifier: "$LATEST", + }); + const version = new DataAwsLambdaFunction(this, "version", { + functionName: "my-function", + qualifier: "3", + }); + new TerraformOutput(this, "version_comparison", { + value: [ + { + code_difference: Op.neq(version.codeSha256, latest.codeSha256), + latest_version: latest.version, + specific_version: version.version, + }, + ], }); } } @@ -41,10 +163,14 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are required: + +* `functionName` - (Required) Name of the Lambda function. -* `functionName` - (Required) Name of the lambda function. -* `qualifier` - (Optional) Alias name or version number of the lambda functionE.g., `$LATEST`, `my-alias`, or `1`. When not included: the data source resolves to the most recent published version; if no published version exists: it resolves to the most recent unpublished version. +The following arguments are optional: + +* `qualifier` - (Optional) Alias name or version number of the Lambda function. E.g., `$LATEST`, `my-alias`, or `1`. When not included: the data source resolves to the most recent published version; if no published version exists: it resolves to the most recent unpublished version. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -54,31 +180,67 @@ This data source exports the following attributes in addition to the arguments a * `arn` - Unqualified (no `:QUALIFIER` or `:VERSION` suffix) ARN identifying your Lambda Function. See also `qualifiedArn`. * `codeSha256` - Base64-encoded representation of raw SHA-256 sum of the zip file. * `codeSigningConfigArn` - ARN for a Code Signing Configuration. -* `deadLetterConfig` - Configure the function's *dead letter queue*. +* `deadLetterConfig` - Configuration for the function's dead letter queue. [See below](#dead_letter_config-attribute-reference). * `description` - Description of what your Lambda Function does. -* `environment` - Lambda environment's configuration settings. -* `ephemeralStorage` - Amount of Ephemeral storage(`/tmp`) allocated for the Lambda Function. -* `fileSystemConfig` - Connection settings for an Amazon EFS file system. +* `environment` - Lambda environment's configuration settings. [See below](#environment-attribute-reference). +* `ephemeralStorage` - Amount of ephemeral storage (`/tmp`) allocated for the Lambda Function. [See below](#ephemeral_storage-attribute-reference). +* `fileSystemConfig` - Connection settings for an Amazon EFS file system. [See below](#file_system_config-attribute-reference). * `handler` - Function entrypoint in your code. * `imageUri` - URI of the container image. -* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway. **NOTE:** Starting with `v4.51.0` of the provider, this will *not* include the qualifier. +* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway. **Note:** Starting with `v4.51.0` of the provider, this will not include the qualifier. * `kmsKeyArn` - ARN for the KMS encryption key. * `lastModified` - Date this resource was last modified. * `layers` - List of Lambda Layer ARNs attached to your Lambda Function. -* `loggingConfig` - Advanced logging settings. +* `loggingConfig` - Advanced logging settings. [See below](#logging_config-attribute-reference). * `memorySize` - Amount of memory in MB your Lambda Function can use at runtime. * `qualifiedArn` - Qualified (`:QUALIFIER` or `:VERSION` suffix) ARN identifying your Lambda Function. See also `arn`. * `qualifiedInvokeArn` - Qualified (`:QUALIFIER` or `:VERSION` suffix) ARN to be used for invoking Lambda Function from API Gateway. See also `invokeArn`. -* `reservedConcurrentExecutions` - The amount of reserved concurrent executions for this lambda function or `-1` if unreserved. +* `reservedConcurrentExecutions` - Amount of reserved concurrent executions for this Lambda function or `-1` if unreserved. * `role` - IAM role attached to the Lambda Function. * `runtime` - Runtime environment for the Lambda function. * `signingJobArn` - ARN of a signing job. -* `signingProfileVersionArn` - The ARN for a signing profile version. +* `signingProfileVersionArn` - ARN for a signing profile version. * `sourceCodeHash` - (**Deprecated** use `codeSha256` instead) Base64-encoded representation of raw SHA-256 sum of the zip file. * `sourceCodeSize` - Size in bytes of the function .zip file. +* `source_kms_key_arn` - ARN of the AWS Key Management Service key used to encrypt the function's `.zip` deployment package. +* `tags` - Map of tags assigned to the Lambda Function. * `timeout` - Function execution time at which Lambda should terminate the function. -* `tracingConfig` - Tracing settings of the function. -* `version` - The version of the Lambda function returned. If `qualifier` is not set, this will resolve to the most recent published version. If no published version of the function exists, `version` will resolve to `$LATEST`. -* `vpcConfig` - VPC configuration associated with your Lambda function. +* `tracingConfig` - Tracing settings of the function. [See below](#tracing_config-attribute-reference). +* `version` - Version of the Lambda function returned. If `qualifier` is not set, this will resolve to the most recent published version. If no published version of the function exists, `version` will resolve to `$LATEST`. +* `vpcConfig` - VPC configuration associated with your Lambda function. [See below](#vpc_config-attribute-reference). + +### dead_letter_config + +* `targetArn` - ARN of an SNS topic or SQS queue to notify when an invocation fails. + +### environment + +* `variables` - Map of environment variables that are accessible from the function code during execution. + +### ephemeral_storage + +* `size` - Size of the Lambda function ephemeral storage (`/tmp`) in MB. + +### file_system_config + +* `arn` - ARN of the Amazon EFS Access Point that provides access to the file system. +* `localMountPath` - Path where the function can access the file system, starting with `/mnt/`. + +### logging_config + +* `applicationLogLevel` - Detail level of the logs your application sends to CloudWatch when using supported logging libraries. +* `logFormat` - Format for your function's logs. Valid values: `Text`, `JSON`. +* `logGroup` - CloudWatch log group your function sends logs to. +* `systemLogLevel` - Detail level of the Lambda platform event logs sent to CloudWatch. + +### tracing_config + +* `mode` - Tracing mode. Valid values: `Active`, `PassThrough`. + +### vpc_config + +* `securityGroupIds` - List of security group IDs associated with the Lambda function. +* `subnetIds` - List of subnet IDs associated with the Lambda function. +* `vpcId` - ID of the VPC. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_function_url.html.markdown b/website/docs/cdktf/typescript/d/lambda_function_url.html.markdown index 8841a6ef06d4..6d566fec5a36 100644 --- a/website/docs/cdktf/typescript/d/lambda_function_url.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_function_url.html.markdown @@ -3,36 +3,106 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_url" description: |- - Provides a Lambda function URL data source. + Provides details about an AWS Lambda Function URL. --- # Data Source: aws_lambda_function_url -Provides information about a Lambda function URL. +Provides details about an AWS Lambda Function URL. Use this data source to retrieve information about an existing function URL configuration. ## Example Usage +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaFunctionUrl } from "./.gen/providers/aws/data-aws-lambda-function-url"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaFunctionUrl(this, "example", { + functionName: "my_lambda_function", + }); + new TerraformOutput(this, "function_url", { + value: example.functionUrl, + }); + } +} + +``` + +### With Qualifier + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { VariableType, TerraformVariable, TerraformStack } from "cdktf"; +import { Token, Fn, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { DataAwsLambdaFunctionUrl } from "./.gen/providers/aws/data-aws-lambda-function-url"; +import { Route53Record } from "./.gen/providers/aws/route53-record"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - You can read more about this at https://cdk.tf/variables*/ - const functionName = new TerraformVariable(this, "function_name", { - type: VariableType.STRING, + const example = new DataAwsLambdaFunctionUrl(this, "example", { + functionName: Token.asString(awsLambdaFunctionExample.functionName), + qualifier: "production", }); - new DataAwsLambdaFunctionUrl(this, "existing", { - functionName: functionName.stringValue, + new Route53Record(this, "lambda_alias", { + name: "api.example.com", + records: [ + Token.asString( + Fn.replace(Token.asString(example.functionUrl), "https://", "") + ), + ], + ttl: 300, + type: "CNAME", + zoneId: Token.asString(awsRoute53ZoneExample.zoneId), + }); + } +} + +``` + +### Retrieve CORS Configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Op, conditional, TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaFunctionUrl } from "./.gen/providers/aws/data-aws-lambda-function-url"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaFunctionUrl(this, "example", { + functionName: "api_function", + }); + const corsConfig = conditional( + Op.gt(Fn.lengthOf(example.cors), 0), + Fn.lookupNested(example.cors, ["0"]), + "null" + ); + const allowedOrigins = conditional( + Op.neq(corsConfig, "null"), + Fn.lookupNested(corsConfig, ["allow_origins"]), + [] + ); + new TerraformOutput(this, "cors_allowed_origins", { + value: allowedOrigins, }); } } @@ -41,17 +111,21 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are required: + +* `functionName` - (Required) Name or ARN of the Lambda function. + +The following arguments are optional: -* `functionName` - (Required) The name (or ARN) of the Lambda function. -* `qualifier` - (Optional) Alias name or `"$LATEST"`. +* `qualifier` - (Optional) Alias name or `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `authorizationType` - Type of authentication that the function URL uses. -* `cors` - The [cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) settings for the function URL. See the [`aws_lambda_function_url` resource](/docs/providers/aws/r/lambda_function_url.html) documentation for more details. +* `cors` - Cross-origin resource sharing (CORS) settings for the function URL. [See below](#cors-attribute-reference). * `creationTime` - When the function URL was created, in [ISO-8601 format](https://www.w3.org/TR/NOTE-datetime). * `functionArn` - ARN of the function. * `functionUrl` - HTTP URL endpoint for the function in the format `https://.lambda-url..on.aws/`. @@ -59,4 +133,13 @@ This data source exports the following attributes in addition to the arguments a * `lastModifiedTime` - When the function URL configuration was last updated, in [ISO-8601 format](https://www.w3.org/TR/NOTE-datetime). * `urlId` - Generated ID for the endpoint. - \ No newline at end of file +### cors Attribute Reference + +* `allowCredentials` - Whether credentials are included in the CORS request. +* `allowHeaders` - List of headers that are specified in the Access-Control-Request-Headers header. +* `allowMethods` - List of HTTP methods that are allowed when calling the function URL. +* `allowOrigins` - List of origins that are allowed to make requests to the function URL. +* `exposeHeaders` - List of headers in the response that you want to expose to the origin that called the function URL. +* `maxAge` - Maximum amount of time, in seconds, that web browsers can cache results of a preflight request. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_functions.html.markdown b/website/docs/cdktf/typescript/d/lambda_functions.html.markdown index 8dc595ed0085..b3e89a7e6781 100644 --- a/website/docs/cdktf/typescript/d/lambda_functions.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_functions.html.markdown @@ -3,21 +3,23 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_functions" description: |- - Terraform data resource to get a list of Lambda Functions. + Provides a list of AWS Lambda Functions. --- # Data Source: aws_lambda_functions -Terraform data resource to get a list of Lambda Functions. +Provides a list of AWS Lambda Functions in the current region. Use this data source to discover existing Lambda functions for inventory, monitoring, or bulk operations. ## Example Usage +### List All Functions + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { TerraformOutput, Fn, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -26,7 +28,146 @@ import { DataAwsLambdaFunctions } from "./.gen/providers/aws/data-aws-lambda-fun class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new DataAwsLambdaFunctions(this, "all", {}); + const all = new DataAwsLambdaFunctions(this, "all", {}); + new TerraformOutput(this, "all_function_names", { + value: all.functionNames, + }); + new TerraformOutput(this, "function_count", { + value: Fn.lengthOf(all.functionNames), + }); + } +} + +``` + +### Use Function List for Bulk Operations + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformCount, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchMetricAlarm } from "./.gen/providers/aws/cloudwatch-metric-alarm"; +import { DataAwsLambdaFunctions } from "./.gen/providers/aws/data-aws-lambda-functions"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const all = new DataAwsLambdaFunctions(this, "all", {}); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const lambdaErrorsCount = TerraformCount.of( + Token.asNumber(Fn.lengthOf(all.functionNames)) + ); + new CloudwatchMetricAlarm(this, "lambda_errors", { + alarmDescription: "This metric monitors lambda errors", + alarmName: + Token.asString( + Fn.lookupNested(all.functionNames, [lambdaErrorsCount.index]) + ) + "-errors", + comparisonOperator: "GreaterThanThreshold", + dimensions: { + FunctionName: Token.asString( + Fn.lookupNested(all.functionNames, [lambdaErrorsCount.index]) + ), + }, + evaluationPeriods: Token.asNumber("2"), + metricName: "Errors", + namespace: "AWS/Lambda", + period: Token.asNumber("300"), + statistic: "Sum", + tags: { + Environment: "monitoring", + Purpose: "lambda-error-tracking", + }, + threshold: Token.asNumber("5"), + count: lambdaErrorsCount, + }); + } +} + +``` + +### Filter Functions by Name Pattern + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaFunctions } from "./.gen/providers/aws/data-aws-lambda-functions"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const all = new DataAwsLambdaFunctions(this, "all", {}); + const apiFunctions = + "${[ for name in ${" + + all.functionNames + + '} : name if can(regex("^api-", name))]}'; + const workerFunctions = + "${[ for name in ${" + + all.functionNames + + '} : name if can(regex("^worker-", name))]}'; + new TerraformOutput(this, "api_functions", { + value: apiFunctions, + }); + new TerraformOutput(this, "worker_functions", { + value: workerFunctions, + }); + } +} + +``` + +### Create Function Inventory + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { + TerraformOutput, + Fn, + Token, + TerraformCount, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaFunction } from "./.gen/providers/aws/data-aws-lambda-function"; +import { DataAwsLambdaFunctions } from "./.gen/providers/aws/data-aws-lambda-functions"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const all = new DataAwsLambdaFunctions(this, "all", {}); + const functionInventory = + "${[ for i, name in ${" + + all.functionNames + + "} : {\n name = name\n arn = data.aws_lambda_functions.all.function_arns[i]\n runtime = data.aws_lambda_function.details[i].runtime\n memory_size = data.aws_lambda_function.details[i].memory_size\n timeout = data.aws_lambda_function.details[i].timeout\n handler = data.aws_lambda_function.details[i].handler\n }]}"; + new TerraformOutput(this, "function_inventory", { + value: functionInventory, + }); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const detailsCount = TerraformCount.of( + Token.asNumber(Fn.lengthOf(all.functionNames)) + ); + new DataAwsLambdaFunction(this, "details", { + functionName: Token.asString( + Fn.lookupNested(all.functionNames, [detailsCount.index]) + ), + count: detailsCount, + }); } } @@ -34,13 +175,15 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `functionNames` - A list of Lambda Function names. -* `functionArns` - A list of Lambda Function ARNs. +* `functionArns` - List of Lambda Function ARNs. +* `functionNames` - List of Lambda Function names. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_invocation.html.markdown b/website/docs/cdktf/typescript/d/lambda_invocation.html.markdown index e27d1fd552e8..15a229ecf2d3 100644 --- a/website/docs/cdktf/typescript/d/lambda_invocation.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_invocation.html.markdown @@ -3,43 +3,160 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_invocation" description: |- - Invoke AWS Lambda Function as data source + Invokes an AWS Lambda Function and returns its results. --- # Data Source: aws_lambda_invocation -Use this data source to invoke custom lambda functions as data source. -The lambda function is invoked with [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) -invocation type. +Invokes an AWS Lambda Function and returns its results. Use this data source to execute Lambda functions during Terraform operations and use their results in other resources or outputs. -~> **NOTE:** The `aws_lambda_invocation` data source invokes the function during the first `apply` and every subsequent `plan` when the function is known. +The Lambda function is invoked with [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) +~> **Note:** The `aws_lambda_invocation` data source invokes the function during the first `apply` and every subsequent `plan` when the function is known. + +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking a Lambda function with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) ## Example Usage +### Basic Invocation + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, Fn, TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaInvocation } from "./.gen/providers/aws/data-aws-lambda-invocation"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaInvocation(this, "example", { + functionName: Token.asString(awsLambdaFunctionExample.functionName), + input: Token.asString( + Fn.jsonencode({ + id: "123456", + operation: "getStatus", + }) + ), + }); + new TerraformOutput(this, "result", { + value: Fn.jsondecode(Token.asString(example.result)), + }); + } +} + +``` + +### Dynamic Resource Configuration + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformOutput, Fn, Token, TerraformStack } from "cdktf"; +import { Fn, Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { DataAwsLambdaInvocation } from "./.gen/providers/aws/data-aws-lambda-invocation"; +import { ElasticacheCluster } from "./.gen/providers/aws/elasticache-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const resourceConfig = new DataAwsLambdaInvocation( + this, + "resource_config", + { + functionName: "resource-config-generator", + input: Token.asString( + Fn.jsonencode({ + environment: environment.value, + region: current.region, + service: "api", + }) + ), + qualifier: "production", + } + ); + const config = Fn.jsondecode(Token.asString(resourceConfig.result)); + new ElasticacheCluster(this, "example", { + clusterId: Token.asString( + Fn.lookupNested(config, ["cache", "cluster_id"]) + ), + engine: Token.asString(Fn.lookupNested(config, ["cache", "engine"])), + nodeType: Token.asString(Fn.lookupNested(config, ["cache", "node_type"])), + numCacheNodes: Token.asNumber( + Fn.lookupNested(config, ["cache", "nodes"]) + ), + parameterGroupName: Token.asString( + Fn.lookupNested(config, ["cache", "parameter_group"]) + ), + tags: Token.asStringMap(Fn.lookupNested(config, ["tags"])), + }); + } +} + +``` + +### Error Handling + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { + Token, + Fn, + Op, + conditional, + TerraformCount, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Resource } from "./.gen/providers/null/resource"; +import { DataAwsLambdaInvocation } from "./.gen/providers/aws/data-aws-lambda-invocation"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); const example = new DataAwsLambdaInvocation(this, "example", { - functionName: lambdaFunctionTest.functionName, - input: '{\n "key1": "value1",\n "key2": "value2"\n}\n\n', + functionName: Token.asString(awsLambdaFunctionExample.functionName), + input: Token.asString( + Fn.jsonencode({ + action: "validate", + payload: configuration.value, + }) + ), }); - new TerraformOutput(this, "result_entry", { - value: Fn.lookupNested(Fn.jsondecode(Token.asString(example.result)), [ - '"key1"', - ]), + const result = Fn.jsondecode(Token.asString(example.result)); + const hasErrors = Fn.try([ + Op.neq(Fn.lookupNested(result, ["errors"]), "null"), + false, + ]); + const errorMessages = conditional( + hasErrors, + Fn.join(", ", Token.asList(Fn.lookupNested(result, ["errors"]))), + "null" + ); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const validationCheckCount = TerraformCount.of( + Token.asNumber( + conditional( + hasErrors, + fail("Configuration validation failed: ${" + errorMessages + "}"), + 0 + ) + ) + ); + new Resource(this, "validation_check", { + count: validationCheckCount, }); } } @@ -48,17 +165,20 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are required: + +* `functionName` - (Required) Name of the Lambda function. +* `input` - (Required) String in JSON format that is passed as payload to the Lambda function. + +The following arguments are optional: -* `functionName` - (Required) Name of the lambda function. -* `input` - (Required) String in JSON format that is passed as payload to the lambda function. -* `qualifier` - (Optional) Qualifier (a.k.a version) of the lambda function. Defaults - to `$LATEST`. +* `qualifier` - (Optional) Qualifier (a.k.a version) of the Lambda function. Defaults to `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `result` - String result of the lambda function invocation. +* `result` - String result of the Lambda function invocation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lambda_layer_version.html.markdown b/website/docs/cdktf/typescript/d/lambda_layer_version.html.markdown index 1daeb93679fd..4aa402fd793b 100644 --- a/website/docs/cdktf/typescript/d/lambda_layer_version.html.markdown +++ b/website/docs/cdktf/typescript/d/lambda_layer_version.html.markdown @@ -3,36 +3,143 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version" description: |- - Provides a Lambda Layer Version data source. + Provides details about an AWS Lambda Layer Version. --- # Data Source: aws_lambda_layer_version -Provides information about a Lambda Layer Version. +Provides details about an AWS Lambda Layer Version. Use this data source to retrieve information about a specific layer version or find the latest version compatible with your runtime and architecture requirements. ## Example Usage +### Get Latest Layer Version + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { VariableType, TerraformVariable, TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { DataAwsLambdaLayerVersion } from "./.gen/providers/aws/data-aws-lambda-layer-version"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - You can read more about this at https://cdk.tf/variables*/ - const layerName = new TerraformVariable(this, "layer_name", { - type: VariableType.STRING, + const example = new DataAwsLambdaLayerVersion(this, "example", { + layerName: "my-shared-utilities", }); - new DataAwsLambdaLayerVersion(this, "existing", { - layerName: layerName.stringValue, + const awsLambdaFunctionExample = new LambdaFunction(this, "example_1", { + filename: "function.zip", + functionName: "example_function", + handler: "index.handler", + layers: [Token.asString(example.arn)], + role: lambdaRole.arn, + runtime: "nodejs20.x", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionExample.overrideLogicalId("example"); + } +} + +``` + +### Get Specific Layer Version + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaLayerVersion } from "./.gen/providers/aws/data-aws-lambda-layer-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsLambdaLayerVersion(this, "example", { + layerName: "production-utilities", + version: 5, + }); + new TerraformOutput(this, "layer_info", { + value: [ + { + arn: example.arn, + description: example.description, + version: example.version, + }, + ], + }); + } +} + +``` + +### Get Latest Compatible Layer Version + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaLayerVersion } from "./.gen/providers/aws/data-aws-lambda-layer-version"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const armLayer = new DataAwsLambdaLayerVersion(this, "arm_layer", { + compatibleArchitecture: "arm64", + layerName: "optimized-libraries", + }); + const pythonLayer = new DataAwsLambdaLayerVersion(this, "python_layer", { + compatibleRuntime: "python3.12", + layerName: "python-dependencies", + }); + new LambdaFunction(this, "example", { + architectures: ["arm64"], + filename: "function.zip", + functionName: "multi_layer_function", + handler: "app.handler", + layers: [Token.asString(pythonLayer.arn), Token.asString(armLayer.arn)], + role: lambdaRole.arn, + runtime: "python3.12", + }); + } +} + +``` + +### Compare Layer Versions + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Op, TerraformOutput, conditional, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsLambdaLayerVersion } from "./.gen/providers/aws/data-aws-lambda-layer-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const latest = new DataAwsLambdaLayerVersion(this, "latest", { + layerName: "shared-layer", + }); + const stable = new DataAwsLambdaLayerVersion(this, "stable", { + layerName: "shared-layer", + version: 3, + }); + const useLatestLayer = Op.gt(latest.version, 5); + new TerraformOutput(this, "selected_layer_version", { + value: conditional(useLatestLayer, latest.version, stable.version), }); } } @@ -41,32 +148,33 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are required: + +* `layerName` - (Required) Name of the Lambda layer. -* `layerName` - (Required) Name of the lambda layer. +The following arguments are optional: + +* `compatibleArchitecture` - (Optional) Specific architecture the layer version must support. Conflicts with `version`. If specified, the latest available layer version supporting the provided architecture will be used. +* `compatibleRuntime` - (Optional) Specific runtime the layer version must support. Conflicts with `version`. If specified, the latest available layer version supporting the provided runtime will be used. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `version` - (Optional) Specific layer version. Conflicts with `compatibleRuntime` and `compatibleArchitecture`. If omitted, the latest available layer version will be used. -* `compatibleRuntime` (Optional) Specific runtime the layer version must support. Conflicts with `version`. If specified, the latest available layer version supporting the provided runtime will be used. -* `compatibleArchitecture` (Optional) Specific architecture the layer version could support. Conflicts with `version`. If specified, the latest available layer version supporting the provided architecture will be used. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: +* `arn` - ARN of the Lambda Layer with version. * `codeSha256` - Base64-encoded representation of raw SHA-256 sum of the zip file. +* `compatibleArchitectures` - List of [Architectures](https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleArchitectures) the specific Lambda Layer version is compatible with. +* `compatibleRuntimes` - List of [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleRuntimes) the specific Lambda Layer version is compatible with. +* `createdDate` - Date this resource was created. * `description` - Description of the specific Lambda Layer version. -* `licenseInfo` - License info associated with the specific Lambda Layer version. -* `compatibleRuntimes` - List of [Runtimes][1] the specific Lambda Layer version is compatible with. -* `compatibleArchitectures` - A list of [Architectures][2] the specific Lambda Layer version is compatible with. -* `arn` - ARN of the Lambda Layer with version. * `layerArn` - ARN of the Lambda Layer without version. -* `createdDate` - Date this resource was created. +* `licenseInfo` - License info associated with the specific Lambda Layer version. * `signingJobArn` - ARN of a signing job. -* `signingProfileVersionArn` - The ARN for a signing profile version. +* `signingProfileVersionArn` - ARN for a signing profile version. * `sourceCodeHash` - (**Deprecated** use `codeSha256` instead) Base64-encoded representation of raw SHA-256 sum of the zip file. * `sourceCodeSize` - Size in bytes of the function .zip file. -* `version` - This Lambda Layer version. - -[1]: https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleRuntimes -[2]: https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersion.html#SSS-GetLayerVersion-response-CompatibleArchitectures +* `version` - Lambda Layer version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/launch_configuration.html.markdown b/website/docs/cdktf/typescript/d/launch_configuration.html.markdown index ca20c4968ae4..d0f3c2cf719d 100644 --- a/website/docs/cdktf/typescript/d/launch_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/launch_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the launch configuration. ## Attribute Reference @@ -92,4 +93,4 @@ This data source exports the following attributes in addition to the arguments a * `deviceName` - Name of the device. * `virtualName` - Virtual Name of the device. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/launch_template.html.markdown b/website/docs/cdktf/typescript/d/launch_template.html.markdown index 94b4abfe5bd1..09a89e029d12 100644 --- a/website/docs/cdktf/typescript/d/launch_template.html.markdown +++ b/website/docs/cdktf/typescript/d/launch_template.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. * `id` - (Optional) ID of the specific launch template to retrieve. * `name` - (Optional) Name of the launch template. @@ -91,4 +92,4 @@ This resource also exports a full set of attributes corresponding to the argumen - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lb.html.markdown b/website/docs/cdktf/typescript/d/lb.html.markdown index 69cf4d7ea1bb..d0f9b3e12230 100644 --- a/website/docs/cdktf/typescript/d/lb.html.markdown +++ b/website/docs/cdktf/typescript/d/lb.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) Full ARN of the load balancer. * `name` - (Optional) Unique name of the load balancer. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired load balancer. @@ -74,4 +75,4 @@ returned attributes - they are identical. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lb_hosted_zone_id.html.markdown b/website/docs/cdktf/typescript/d/lb_hosted_zone_id.html.markdown index 5e3c244c0fec..3efa9c9e3559 100644 --- a/website/docs/cdktf/typescript/d/lb_hosted_zone_id.html.markdown +++ b/website/docs/cdktf/typescript/d/lb_hosted_zone_id.html.markdown @@ -47,14 +47,13 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `region` - (Optional) Name of the region whose AWS ELB HostedZoneId is desired. - Defaults to the region from the AWS provider configuration. +* `region` - (Optional) Name of the Region whose AWS ELB HostedZoneId is desired. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loadBalancerType` - (Optional) Type of load balancer to create. Possible values are `application` or `network`. The default value is `application`. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - ID of the AWS ELB HostedZoneId in the selected region. +* `id` - ID of the AWS ELB HostedZoneId in the selected Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lb_listener.html.markdown b/website/docs/cdktf/typescript/d/lb_listener.html.markdown index ab207ed4bee2..025f75d6126b 100644 --- a/website/docs/cdktf/typescript/d/lb_listener.html.markdown +++ b/website/docs/cdktf/typescript/d/lb_listener.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the listener. Required if `loadBalancerArn` and `port` is not set. * `loadBalancerArn` - (Optional) ARN of the load balancer. Required if `arn` is not set. * `port` - (Optional) Port of the listener. Required if `arn` is not set. @@ -71,4 +72,4 @@ See the [LB Listener Resource](/docs/providers/aws/r/lb_listener.html) for detai - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lb_listener_rule.html.markdown b/website/docs/cdktf/typescript/d/lb_listener_rule.html.markdown index 7d05d4d7e5d3..2a6292c79587 100644 --- a/website/docs/cdktf/typescript/d/lb_listener_rule.html.markdown +++ b/website/docs/cdktf/typescript/d/lb_listener_rule.html.markdown @@ -24,7 +24,7 @@ import { VariableType, TerraformVariable, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsLbListenerRule } from "./.gen/providers/aws/"; +import { DataAwsLbListenerRule } from "./.gen/providers/aws/data-aws-lb-listener-rule"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -34,7 +34,7 @@ class MyConvertedCode extends TerraformStack { type: VariableType.STRING, }); new DataAwsLbListenerRule(this, "example", { - arn: lbRuleArn.value, + arn: lbRuleArn.stringValue, }); } } @@ -51,7 +51,7 @@ import { VariableType, TerraformVariable, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsLbListenerRule } from "./.gen/providers/aws/"; +import { DataAwsLbListenerRule } from "./.gen/providers/aws/data-aws-lb-listener-rule"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -64,8 +64,8 @@ class MyConvertedCode extends TerraformStack { type: VariableType.NUMBER, }); new DataAwsLbListenerRule(this, "example", { - listener_arn: lbListenerArn.value, - priority: lbRulePriority.value, + listenerArn: lbListenerArn.stringValue, + priority: lbRulePriority.numberValue, }); } } @@ -76,6 +76,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the Listener Rule. Either `arn` or `listenerArn` must be set. * `listenerArn` - (Optional) ARN of the associated Listener. @@ -191,4 +192,4 @@ This data source exports the following attributes in addition to the arguments a * `values` - Set of `key`-`value` pairs indicating the query string parameters to match. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lb_target_group.html.markdown b/website/docs/cdktf/typescript/d/lb_target_group.html.markdown index 5dadbd1a89c5..25eb24ac2ad5 100644 --- a/website/docs/cdktf/typescript/d/lb_target_group.html.markdown +++ b/website/docs/cdktf/typescript/d/lb_target_group.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) Full ARN of the target group. * `name` - (Optional) Unique name of the target group. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired target group. @@ -74,4 +75,4 @@ on the returned attributes - they are identical. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lb_trust_store.html.markdown b/website/docs/cdktf/typescript/d/lb_trust_store.html.markdown index 5b7bef22feaa..c92aa8a603c6 100644 --- a/website/docs/cdktf/typescript/d/lb_trust_store.html.markdown +++ b/website/docs/cdktf/typescript/d/lb_trust_store.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) Full ARN of the trust store. * `name` - (Optional) Unique name of the trust store. @@ -73,4 +74,4 @@ on the returned attributes - they are identical. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lbs.html.markdown b/website/docs/cdktf/typescript/d/lbs.html.markdown index 81e7d37800b4..60d21a228d33 100644 --- a/website/docs/cdktf/typescript/d/lbs.html.markdown +++ b/website/docs/cdktf/typescript/d/lbs.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Load Balancers. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of Load Balancer ARNs. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lex_bot.html.markdown b/website/docs/cdktf/typescript/d/lex_bot.html.markdown index 92222bffc443..4ed0139c75b0 100644 --- a/website/docs/cdktf/typescript/d/lex_bot.html.markdown +++ b/website/docs/cdktf/typescript/d/lex_bot.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the bot. The name is case sensitive. * `version` - (Optional) Version or alias of the bot. @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `version` - Version of the bot. For a new bot, the version is always `$LATEST`. * `voiceId` - Amazon Polly voice ID that the Amazon Lex Bot uses for voice interactions with the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lex_bot_alias.html.markdown b/website/docs/cdktf/typescript/d/lex_bot_alias.html.markdown index d0d1ca4cb158..d6225837b703 100644 --- a/website/docs/cdktf/typescript/d/lex_bot_alias.html.markdown +++ b/website/docs/cdktf/typescript/d/lex_bot_alias.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `botName` - (Required) Name of the bot. * `name` - (Required) Name of the bot alias. The name is case sensitive. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `lastUpdatedDate` - Date that the bot alias was updated. When you create a resource, the creation date and the last updated date are the same. * `name` - Name of the alias. The name is not case sensitive. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lex_intent.html.markdown b/website/docs/cdktf/typescript/d/lex_intent.html.markdown index 117d09ed3f9c..44b0a3dce047 100644 --- a/website/docs/cdktf/typescript/d/lex_intent.html.markdown +++ b/website/docs/cdktf/typescript/d/lex_intent.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the intent. The name is case sensitive. * `version` - (Optional) Version of the intent. @@ -59,4 +60,4 @@ intent on. To find the signature for an intent, see in the Alexa Skills Kit. * `version` - Version of the bot. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lex_slot_type.html.markdown b/website/docs/cdktf/typescript/d/lex_slot_type.html.markdown index 1cd09aa7faf3..b455220c2ef3 100644 --- a/website/docs/cdktf/typescript/d/lex_slot_type.html.markdown +++ b/website/docs/cdktf/typescript/d/lex_slot_type.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the slot type. The name is case sensitive. * `version` - (Optional) Version of the slot type. @@ -61,4 +62,4 @@ value is similar to the slot value. `TOP_RESOLUTION` returns the first value in if there is a resolution list for the slot, otherwise null is returned. * `version` - Version of the slot type. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/licensemanager_grants.html.markdown b/website/docs/cdktf/typescript/d/licensemanager_grants.html.markdown index 65f82c43a93f..817ed7033534 100644 --- a/website/docs/cdktf/typescript/d/licensemanager_grants.html.markdown +++ b/website/docs/cdktf/typescript/d/licensemanager_grants.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. ### `filter` @@ -90,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - List of all the license grant ARNs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/licensemanager_received_license.html.markdown b/website/docs/cdktf/typescript/d/licensemanager_received_license.html.markdown index de4efe853a95..e70b0498b0be 100644 --- a/website/docs/cdktf/typescript/d/licensemanager_received_license.html.markdown +++ b/website/docs/cdktf/typescript/d/licensemanager_received_license.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `licenseArn` - (Required) The ARN of the received license you want data for. ## Attribute Reference @@ -124,4 +125,4 @@ A list with a single map. * `begin` - Start of the validity time range. * `end` - End of the validity time range. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/licensemanager_received_licenses.html.markdown b/website/docs/cdktf/typescript/d/licensemanager_received_licenses.html.markdown index 7166fef64ada..c3f6d1db1d8c 100644 --- a/website/docs/cdktf/typescript/d/licensemanager_received_licenses.html.markdown +++ b/website/docs/cdktf/typescript/d/licensemanager_received_licenses.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. ### `filter` @@ -88,4 +89,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - List of all the license ARNs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_geofence_collection.html.markdown b/website/docs/cdktf/typescript/d/location_geofence_collection.html.markdown index 4b8d1653bd0b..0af877c90ced 100644 --- a/website/docs/cdktf/typescript/d/location_geofence_collection.html.markdown +++ b/website/docs/cdktf/typescript/d/location_geofence_collection.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `collectionName` - (Required) Name of the geofence collection. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the geofence collection. * `updateTime` - Timestamp for when the geofence collection resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_map.html.markdown b/website/docs/cdktf/typescript/d/location_map.html.markdown index 8e2f61c001a2..cea5715bec59 100644 --- a/website/docs/cdktf/typescript/d/location_map.html.markdown +++ b/website/docs/cdktf/typescript/d/location_map.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mapName` - (Required) Name of the map resource. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the map. * `updateTime` - Timestamp for when the map resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_place_index.html.markdown b/website/docs/cdktf/typescript/d/location_place_index.html.markdown index 2198421097e8..a01ba4c13a6a 100644 --- a/website/docs/cdktf/typescript/d/location_place_index.html.markdown +++ b/website/docs/cdktf/typescript/d/location_place_index.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `indexName` - (Required) Name of the place index resource. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the place index. * `updateTime` - Timestamp for when the place index resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_route_calculator.html.markdown b/website/docs/cdktf/typescript/d/location_route_calculator.html.markdown index 42cc65c2da40..84543aebc50f 100644 --- a/website/docs/cdktf/typescript/d/location_route_calculator.html.markdown +++ b/website/docs/cdktf/typescript/d/location_route_calculator.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `calculatorName` - (Required) Name of the route calculator resource. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Key-value map of resource tags for the route calculator. * `updateTime` - Timestamp for when the route calculator resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_tracker.html.markdown b/website/docs/cdktf/typescript/d/location_tracker.html.markdown index d870f5adc544..afe4908e1e18 100644 --- a/website/docs/cdktf/typescript/d/location_tracker.html.markdown +++ b/website/docs/cdktf/typescript/d/location_tracker.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `trackerName` - (Required) Name of the tracker resource. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `trackerArn` - ARN for the tracker resource. Used when you need to specify a resource across all AWS. * `updateTime` - Timestamp for when the tracker resource was last updated in ISO 8601 format. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_tracker_association.html.markdown b/website/docs/cdktf/typescript/d/location_tracker_association.html.markdown index d66adeb3b0bd..320e937c6f69 100644 --- a/website/docs/cdktf/typescript/d/location_tracker_association.html.markdown +++ b/website/docs/cdktf/typescript/d/location_tracker_association.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `consumerArn` - (Required) ARN of the geofence collection associated to tracker resource. * `trackerName` - (Required) Name of the tracker resource associated with a geofence collection. @@ -49,4 +50,4 @@ The following arguments are required: This data source exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/location_tracker_associations.html.markdown b/website/docs/cdktf/typescript/d/location_tracker_associations.html.markdown index 5d1f89cf24f3..a08dc69fd19d 100644 --- a/website/docs/cdktf/typescript/d/location_tracker_associations.html.markdown +++ b/website/docs/cdktf/typescript/d/location_tracker_associations.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `trackerName` - (Required) Name of the tracker resource associated with a geofence collection. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `consumerArns` - List of geofence collection ARNs associated to the tracker resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/media_convert_queue.html.markdown b/website/docs/cdktf/typescript/d/media_convert_queue.html.markdown index d9932473ced5..9703eb9a9d3f 100644 --- a/website/docs/cdktf/typescript/d/media_convert_queue.html.markdown +++ b/website/docs/cdktf/typescript/d/media_convert_queue.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_media_convert_queue +# Data Source: aws_media_convert_queue Retrieve information about a AWS Elemental MediaConvert Queue. @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Unique identifier of the queue. The same as `name`. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The status of the queue. * `tags` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/medialive_input.html.markdown b/website/docs/cdktf/typescript/d/medialive_input.html.markdown index 66c223a67bd8..95a80438da60 100644 --- a/website/docs/cdktf/typescript/d/medialive_input.html.markdown +++ b/website/docs/cdktf/typescript/d/medialive_input.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) The ID of the Input. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags assigned to the Input. * `type` - The type of the input. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/memorydb_acl.html.markdown b/website/docs/cdktf/typescript/d/memorydb_acl.html.markdown index 0b6a0b7e0c0f..894da5e4616c 100644 --- a/website/docs/cdktf/typescript/d/memorydb_acl.html.markdown +++ b/website/docs/cdktf/typescript/d/memorydb_acl.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_acl +# Data Source: aws_memorydb_acl Provides information about a MemoryDB ACL. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the ACL. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the ACL. * `userNames` - Set of MemoryDB user names included in this ACL. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/memorydb_cluster.html.markdown b/website/docs/cdktf/typescript/d/memorydb_cluster.html.markdown index a33559895163..ee84d827d7ca 100644 --- a/website/docs/cdktf/typescript/d/memorydb_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/memorydb_cluster.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_cluster +# Data Source: aws_memorydb_cluster Provides information about a MemoryDB Cluster. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster. ## Attribute Reference @@ -83,4 +84,4 @@ This data source exports the following attributes in addition to the arguments a * `tlsEnabled` - When true, in-transit encryption is enabled for the cluster. * `tags` - Map of tags assigned to the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/memorydb_parameter_group.html.markdown b/website/docs/cdktf/typescript/d/memorydb_parameter_group.html.markdown index 243fa9ca6b83..6b44e40fccd0 100644 --- a/website/docs/cdktf/typescript/d/memorydb_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/d/memorydb_parameter_group.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_parameter_group +# Data Source: aws_memorydb_parameter_group Provides information about a MemoryDB Parameter Group. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the parameter group. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `value` - Value of the parameter. * `tags` - Map of tags assigned to the parameter group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/memorydb_snapshot.html.markdown b/website/docs/cdktf/typescript/d/memorydb_snapshot.html.markdown index d49a15542593..97d011928b51 100644 --- a/website/docs/cdktf/typescript/d/memorydb_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/d/memorydb_snapshot.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_snapshot +# Data Source: aws_memorydb_snapshot Provides information about a MemoryDB Snapshot. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the snapshot. ## Attribute Reference @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `source` - Whether the snapshot is from an automatic backup (`automated`) or was created manually (`manual`). * `tags` - Map of tags assigned to the snapshot. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/memorydb_subnet_group.html.markdown b/website/docs/cdktf/typescript/d/memorydb_subnet_group.html.markdown index bbee78c67042..e619254b31ff 100644 --- a/website/docs/cdktf/typescript/d/memorydb_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/d/memorydb_subnet_group.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_subnet_group +# Data Source: aws_memorydb_subnet_group Provides information about a MemoryDB Subnet Group. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the subnet group. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `vpcId` - VPC in which the subnet group exists. * `tags` - Map of tags assigned to the subnet group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/memorydb_user.html.markdown b/website/docs/cdktf/typescript/d/memorydb_user.html.markdown index 57a0d5ee756e..d274f8e7e5ae 100644 --- a/website/docs/cdktf/typescript/d/memorydb_user.html.markdown +++ b/website/docs/cdktf/typescript/d/memorydb_user.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_memorydb_user +# Data Source: aws_memorydb_user Provides information about a MemoryDB User. @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userName` - (Required) Name of the user. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `minimumEngineVersion` - Minimum engine version supported for the user. * `tags` - Map of tags assigned to the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mq_broker.html.markdown b/website/docs/cdktf/typescript/d/mq_broker.html.markdown index dd7619a93fae..fd9167b928fa 100644 --- a/website/docs/cdktf/typescript/d/mq_broker.html.markdown +++ b/website/docs/cdktf/typescript/d/mq_broker.html.markdown @@ -3,21 +3,21 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker" description: |- - Provides a MQ Broker data source. + Provides details about an existing Amazon MQ broker. --- # Data Source: aws_mq_broker -Provides information about a MQ Broker. +Provides details about an existing Amazon MQ broker. Use this data source to retrieve configuration and metadata for an Amazon MQ broker by ID or name. ## Example Usage ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { VariableType, TerraformVariable, TerraformStack } from "cdktf"; +import { TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -26,21 +26,8 @@ import { DataAwsMqBroker } from "./.gen/providers/aws/data-aws-mq-broker"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - You can read more about this at https://cdk.tf/variables*/ - const brokerId = new TerraformVariable(this, "broker_id", { - default: "", - type: VariableType.STRING, - }); - const brokerName = new TerraformVariable(this, "broker_name", { - default: "", - type: VariableType.STRING, - }); - new DataAwsMqBroker(this, "by_id", { - brokerId: brokerId.stringValue, - }); - new DataAwsMqBroker(this, "by_name", { - brokerName: brokerName.stringValue, + new DataAwsMqBroker(this, "example", { + brokerId: "b-1234a5b6-78cd-901e-2fgh-3i45j6k178l9", }); } } @@ -49,16 +36,83 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are optional: -* `brokerId` - (Optional) Unique id of the mq broker. -* `brokerName` - (Optional) Unique name of the mq broker. +* `brokerId` - (Optional) Unique ID of the MQ broker. +* `brokerName` - (Optional) Unique name of the MQ broker. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +~> **Note:** Either `brokerId` or `brokerName` must be specified. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -See the [`aws_mq_broker` resource](/docs/providers/aws/r/mq_broker.html) for details on the returned attributes. -They are identical except for user password, which is not returned when describing broker. +* `arn` - ARN of the broker. +* `authenticationStrategy` - Authentication strategy used to secure the broker. +* `autoMinorVersionUpgrade` - Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. +* `configuration` - Configuration block for broker configuration. See [Configuration](#configuration) below. +* `deploymentMode` - Deployment mode of the broker. +* `encryptionOptions` - Configuration block containing encryption options. See [Encryption Options](#encryption-options) below. +* `engineType` - Type of broker engine. +* `engineVersion` - Version of the broker engine. +* `hostInstanceType` - Broker's instance type. +* `instances` - List of information about allocated brokers (both active & standby). See [Instances](#instances) below. +* `ldapServerMetadata` - Configuration block for the LDAP server used to authenticate and authorize connections to the broker. See [LDAP Server Metadata](#ldap-server-metadata) below. +* `logs` - Configuration block for the logging configuration of the broker. See [Logs](#logs) below. +* `maintenanceWindowStartTime` - Configuration block for the maintenance window start time. See [Maintenance Window Start Time](#maintenance-window-start-time) below. +* `publiclyAccessible` - Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. +* `securityGroups` - List of security group IDs assigned to the broker. +* `storageType` - Storage type of the broker. +* `subnetIds` - List of subnet IDs in which to launch the broker. +* `tags` - Map of tags assigned to the broker. +* `user` - Configuration block for broker users. See [User](#user) below. + +### Configuration + +* `id` - Configuration ID. +* `revision` - Revision of the Configuration. + +### Encryption Options + +* `kmsKeyId` - Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. +* `useAwsOwnedKey` - Whether to enable an AWS-owned KMS CMK that is not in your account. + +### Instances + +* `console_url` - URL of the ActiveMQ Web Console or the RabbitMQ Management UI depending on `engineType`. +* `endpoints` - Broker's wire-level protocol endpoints. +* `ipAddress` - IP Address of the broker. + +### LDAP Server Metadata + +* `hosts` - List of a fully qualified domain name of the LDAP server and an optional failover server. +* `roleBase` - Fully qualified name of the directory to search for a user's groups. +* `roleName` - LDAP attribute that identifies the group name attribute in the object returned from the group membership query. +* `roleSearchMatching` - Search criteria for groups. +* `roleSearchSubtree` - Whether the directory search scope is the entire sub-tree. +* `serviceAccountPassword` - Service account password. +* `serviceAccountUsername` - Service account username. +* `userBase` - Fully qualified name of the directory where you want to search for users. +* `userRoleName` - Name of the LDAP attribute for the user group membership. +* `userSearchMatching` - Search criteria for users. +* `userSearchSubtree` - Whether the directory search scope is the entire sub-tree. + +### Logs + +* `audit` - Whether audit logging is enabled. +* `general` - Whether general logging is enabled. + +### Maintenance Window Start Time + +* `dayOfWeek` - Day of the week. +* `timeOfDay` - Time, in 24-hour format. +* `timeZone` - Time zone in either the Country/City format or the UTC offset format. + +### User + +* `consoleAccess` - Whether to enable access to the ActiveMQ Web Console for the user. +* `groups` - List of groups to which the ActiveMQ user belongs. +* `replicationUser` - Whether to set replication user. +* `username` - Username of the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mq_broker_engine_types.html.markdown b/website/docs/cdktf/typescript/d/mq_broker_engine_types.html.markdown index 5f906d14f89f..53188fba0d25 100644 --- a/website/docs/cdktf/typescript/d/mq_broker_engine_types.html.markdown +++ b/website/docs/cdktf/typescript/d/mq_broker_engine_types.html.markdown @@ -3,19 +3,17 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker_engine_types" description: |- - Retrieve information about available broker engines. + Provides details about available MQ broker engine types. --- # Data Source: aws_mq_broker_engine_types -Retrieve information about available broker engines. +Provides details about available MQ broker engine types. Use this data source to retrieve supported engine types and their versions for Amazon MQ brokers. ## Example Usage -### Basic Usage - ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -40,17 +38,22 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `engineType` - (Optional) The MQ engine type to return version details for. +* `engineType` - (Optional) MQ engine type to return version details for. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `brokerEngineTypes` - A list of available engine types and versions. See [Engine Types](#engine-types). +* `brokerEngineTypes` - List of available engine types and versions. See [Engine Types](#engine-types). + +### Engine Types + +* `engineType` - Broker's engine type. +* `engine_versions` - List of engine versions. See [Engine Versions](#engine-versions). -### engine-types +### Engine Versions -* `engineType` - The broker's engine type. -* `engine_versions` - The list of engine versions. +* `name` - Name of the engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mq_broker_instance_type_offerings.html.markdown b/website/docs/cdktf/typescript/d/mq_broker_instance_type_offerings.html.markdown index 34a2989ba538..d00ce8693165 100644 --- a/website/docs/cdktf/typescript/d/mq_broker_instance_type_offerings.html.markdown +++ b/website/docs/cdktf/typescript/d/mq_broker_instance_type_offerings.html.markdown @@ -3,14 +3,14 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker_instance_type_offerings" description: |- - Provides a MQ Broker Instance Offerings data source. + Provides details about available MQ broker instance type offerings. --- # Data Source: aws_mq_broker_instance_type_offerings -Provides information about a MQ Broker Instance Offerings. +Provides details about available MQ broker instance type offerings. Use this data source to discover supported instance types, storage types, and deployment modes for Amazon MQ brokers. ## Example Usage @@ -26,21 +26,21 @@ import { DataAwsMqBrokerInstanceTypeOfferings } from "./.gen/providers/aws/data- class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new DataAwsMqBrokerInstanceTypeOfferings(this, "all", { + new DataAwsMqBrokerInstanceTypeOfferings(this, "activemq", { engineType: "ACTIVEMQ", - hostInstanceType: "mq.m5.large", + }); + new DataAwsMqBrokerInstanceTypeOfferings(this, "all", {}); + new DataAwsMqBrokerInstanceTypeOfferings(this, "ebs", { storageType: "EBS", }); - new DataAwsMqBrokerInstanceTypeOfferings(this, "empty", {}); - new DataAwsMqBrokerInstanceTypeOfferings(this, "engine", { + new DataAwsMqBrokerInstanceTypeOfferings(this, "filtered", { engineType: "ACTIVEMQ", - }); - new DataAwsMqBrokerInstanceTypeOfferings(this, "instance", { hostInstanceType: "mq.m5.large", - }); - new DataAwsMqBrokerInstanceTypeOfferings(this, "storage", { storageType: "EBS", }); + new DataAwsMqBrokerInstanceTypeOfferings(this, "m5", { + hostInstanceType: "mq.m5.large", + }); } } @@ -48,29 +48,30 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source supports the following arguments: +The following arguments are optional: * `engineType` - (Optional) Filter response by engine type. * `hostInstanceType` - (Optional) Filter response by host instance type. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `storageType` - (Optional) Filter response by storage type. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `brokerInstanceOptions` - Option for host instance type. See Broker Instance Options below. +* `brokerInstanceOptions` - List of broker instance options. See [Broker Instance Options](#broker-instance-options) below. ### Broker Instance Options -* `availabilityZones` - List of available AZs. See Availability Zones. below +* `availabilityZones` - List of available Availability Zones. See [Availability Zones](#availability-zones) below. * `engineType` - Broker's engine type. * `hostInstanceType` - Broker's instance type. * `storageType` - Broker's storage type. -* `supported_deployment_modes` - The list of supported deployment modes. -* `supported_engine_versions` - The list of supported engine versions. +* `supported_deployment_modes` - List of supported deployment modes. +* `supported_engine_versions` - List of supported engine versions. ### Availability Zones * `name` - Name of the Availability Zone. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/msk_bootstrap_brokers.html.markdown b/website/docs/cdktf/typescript/d/msk_bootstrap_brokers.html.markdown index f3a00a978aa3..d9fb9af0d35b 100644 --- a/website/docs/cdktf/typescript/d/msk_bootstrap_brokers.html.markdown +++ b/website/docs/cdktf/typescript/d/msk_bootstrap_brokers.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterArn` - (Required) ARN of the cluster the nodes belong to. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `bootstrapBrokersVpcConnectivitySaslScram` - A string containing one or more DNS names (or IP addresses) and SASL SCRAM port pairs for VPC connectivity. * `bootstrapBrokersVpcConnectivityTls` - A string containing one or more DNS names (or IP addresses) and TLS port pairs for VPC connectivity. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/msk_broker_nodes.html.markdown b/website/docs/cdktf/typescript/d/msk_broker_nodes.html.markdown index aad3a12dc0d3..d46449dbab84 100644 --- a/website/docs/cdktf/typescript/d/msk_broker_nodes.html.markdown +++ b/website/docs/cdktf/typescript/d/msk_broker_nodes.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterArn` - (Required) ARN of the cluster the nodes belong to. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `endpoints` - Set of endpoints for accessing the broker. This does not include ports * `node_arn` - ARN of the node - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/msk_cluster.html.markdown b/website/docs/cdktf/typescript/d/msk_cluster.html.markdown index 34dcb01c93d3..b5b8d0173d6c 100644 --- a/website/docs/cdktf/typescript/d/msk_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/msk_cluster.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterName` - (Required) Name of the cluster. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `zookeeperConnectString` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster. The returned values are sorted alphbetically. The AWS API may not return all endpoints, so this value is not guaranteed to be stable across applies. * `zookeeperConnectStringTls` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster via TLS. The returned values are sorted alphabetically. The AWS API may not return all endpoints, so this value is not guaranteed to be stable across applies. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/msk_configuration.html.markdown b/website/docs/cdktf/typescript/d/msk_configuration.html.markdown index 5e908b57e605..183518d7f7ee 100644 --- a/website/docs/cdktf/typescript/d/msk_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/msk_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the configuration. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `kafkaVersions` - List of Apache Kafka versions which can use this configuration. * `serverProperties` - Contents of the server.properties file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/msk_kafka_version.html.markdown b/website/docs/cdktf/typescript/d/msk_kafka_version.html.markdown index 3cf231250754..4a2e7e57e96f 100644 --- a/website/docs/cdktf/typescript/d/msk_kafka_version.html.markdown +++ b/website/docs/cdktf/typescript/d/msk_kafka_version.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `preferredVersions` - (Optional) Ordered list of preferred Kafka versions. The first match in this list will be returned. Either `preferredVersions` or `version` must be set. * `version` - (Optional) Version of MSK Kafka. For example 2.4.1.1 or "2.2.1" etc. Either `preferredVersions` or `version` must be set. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the MSK Kafka version eg. `ACTIVE` or `DEPRECATED`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/msk_vpc_connection.html.markdown b/website/docs/cdktf/typescript/d/msk_vpc_connection.html.markdown index 02e41d2795ab..322d5ac77d97 100644 --- a/website/docs/cdktf/typescript/d/msk_vpc_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/msk_vpc_connection.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) ARN of the VPC Connection. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `targetClusterArn` - The Amazon Resource Name (ARN) of the cluster. * `vpcId` - The VPC ID of the remote client. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown b/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown index 9a9dbf0a9a3b..fc2a3805edca 100644 --- a/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the connector. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags assigned to the resource. * `version` - Current version of the connector. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown index 11fd019080a9..56fefeb446d1 100644 --- a/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the custom plugin. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - the state of the custom plugin. * `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown index 505ad3f49e74..095b8bebd906 100644 --- a/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the worker configuration. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `propertiesFileContent` - contents of connect-distributed.properties file. * `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/nat_gateway.html.markdown b/website/docs/cdktf/typescript/d/nat_gateway.html.markdown index e88c167013b1..3c42c5ea60f1 100644 --- a/website/docs/cdktf/typescript/d/nat_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/nat_gateway.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the specific NAT Gateway to retrieve. * `subnetId` - (Optional) ID of subnet that the NAT Gateway resides in. * `vpcId` - (Optional) ID of the VPC that the NAT Gateway resides in. @@ -104,4 +105,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/nat_gateways.html.markdown b/website/docs/cdktf/typescript/d/nat_gateways.html.markdown index 9027fd836fe1..28caac7f343e 100644 --- a/website/docs/cdktf/typescript/d/nat_gateways.html.markdown +++ b/website/docs/cdktf/typescript/d/nat_gateways.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `vpcId` - (Optional) VPC ID that you want to filter from. * `tags` - (Optional) Map of tags, each pair of which must exactly match @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/neptune_engine_version.html.markdown b/website/docs/cdktf/typescript/d/neptune_engine_version.html.markdown index 87686825b9c5..96d12236cff3 100644 --- a/website/docs/cdktf/typescript/d/neptune_engine_version.html.markdown +++ b/website/docs/cdktf/typescript/d/neptune_engine_version.html.markdown @@ -49,6 +49,7 @@ This data source supports the following arguments: * `preferredMajorTargets` - (Optional) Ordered list of preferred major engine versions. * `preferredUpgradeTargets` - (Optional) Ordered list of preferred upgrade engine versions. * `preferredVersions` - (Optional) Ordered list of preferred engine versions. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. If both the `version` and `preferredVersions` arguments are not configured, the data source will return the default version for the engine. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `version` - (Optional) Version of the DB engine. For example, `1.0.1.0`, `1.0.2.2`, and `1.0.3.0`. If both the `version` and `preferredVersions` arguments are not configured, the data source will return the default version for the engine. ## Attribute Reference @@ -69,4 +70,4 @@ This data source exports the following attributes in addition to the arguments a * `versionActual` - Actual engine version returned by the API. * `versionDescription` - Description of the database engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/neptune_orderable_db_instance.html.markdown b/website/docs/cdktf/typescript/d/neptune_orderable_db_instance.html.markdown index 2c209737e56f..cf42478e1b87 100644 --- a/website/docs/cdktf/typescript/d/neptune_orderable_db_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/neptune_orderable_db_instance.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) DB engine. (Default: `neptune`) * `engineVersion` - (Optional) Version of the DB engine. For example, `1.0.1.0`, `1.0.1.2`, `1.0.2.2`, and `1.0.3.0`. * `instanceClass` - (Optional) DB instance class. Examples of classes are `db.r5.large`, `db.r5.xlarge`, `db.r4.large`, `db.r5.4xlarge`, `db.r5.12xlarge`, `db.r4.xlarge`, and `db.t3.medium`. @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `supportsPerformanceInsights` - Whether a DB instance supports Performance Insights. * `supportsStorageEncryption` - Whether a DB instance supports encrypted storage. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/network_acls.html.markdown b/website/docs/cdktf/typescript/d/network_acls.html.markdown index 877b2607df2e..3cfa48f18142 100644 --- a/website/docs/cdktf/typescript/d/network_acls.html.markdown +++ b/website/docs/cdktf/typescript/d/network_acls.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Optional) VPC ID that you want to filter from. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired network ACLs. @@ -125,4 +126,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/network_interface.html.markdown b/website/docs/cdktf/typescript/d/network_interface.html.markdown index fcf9282ae431..29bc6155f51d 100644 --- a/website/docs/cdktf/typescript/d/network_interface.html.markdown +++ b/website/docs/cdktf/typescript/d/network_interface.html.markdown @@ -38,15 +38,17 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `id` – (Optional) Identifier for the network interface. -* `filter` – (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-network-interfaces](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-network-interfaces.html) in the AWS CLI reference. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `id` - (Optional) Identifier for the network interface. +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-network-interfaces](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-network-interfaces.html) in the AWS CLI reference. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the network interface. -* `association` - Association information for an Elastic IP address (IPv4) associated with the network interface. See supported fields below. +* `association` - Association information for an Elastic IP address (IPv4) associated with the network interface. See [association](#association) below. +* `attachment` - Attachment of the ENI. See [attachment](#attachment) below. * `availabilityZone` - Availability Zone. * `description` - Description of the network interface. * `interfaceType` - Type of interface. @@ -73,10 +75,18 @@ This data source exports the following attributes in addition to the arguments a * `public_dns_name` - Public DNS name. * `publicIp` - Address of the Elastic IP address bound to the network interface. +### `attachment` + +* `attachmentId` - ID of the network interface attachment. +* `deviceIndex` - Device index of the network interface attachment on the instance. +* `instanceId` - ID of the instance. +* `instanceOwnerId` - AWS account ID of the owner of the instance. +* `networkCardIndex` - Index of the network card. + ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/network_interfaces.html.markdown b/website/docs/cdktf/typescript/d/network_interfaces.html.markdown index f2e299c08c4a..710f68318f54 100644 --- a/website/docs/cdktf/typescript/d/network_interfaces.html.markdown +++ b/website/docs/cdktf/typescript/d/network_interfaces.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired network interfaces. * `filter` - (Optional) Custom filter block as described below. @@ -126,4 +127,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkfirewall_firewall.html.markdown b/website/docs/cdktf/typescript/d/networkfirewall_firewall.html.markdown index 8bd2f0153b18..1fbe04ce7c81 100644 --- a/website/docs/cdktf/typescript/d/networkfirewall_firewall.html.markdown +++ b/website/docs/cdktf/typescript/d/networkfirewall_firewall.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - ARN of the firewall. * `name` - Descriptive name of the firewall. @@ -95,6 +96,9 @@ One or more of these arguments is required. This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the firewall. +* `availabilityZoneChangeProtection` - Indicates whether the firewall is protected against changes to its Availability Zone configuration. +* `availabilityZoneMapping` - Set of Availability Zones where the firewall endpoints are created for a transit gateway-attached firewall. + * `availabilityZoneId` - The ID of the Availability Zone where the firewall endpoint is located. * `deleteProtection` - A flag indicating whether the firewall is protected against deletion. * `description` - Description of the firewall. * `enabledAnalysisTypes` - Set of types for which to collect analysis metrics. @@ -107,6 +111,8 @@ This data source exports the following attributes in addition to the arguments a * `sync_states` - Set of subnets configured for use by the firewall. * `attachment` - Nested list describing the attachment status of the firewall's association with a single VPC subnet. * `endpointId` - The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + * `status` - The current status of the firewall endpoint instantiation in the subnet. + * `statusMessage` - It populates this with the reason for the error or failure and how to resolve it. A FAILED status indicates a non-recoverable state, and a ERROR status indicates an issue that you can fix. * `subnetId` - The unique identifier of the subnet that you've specified to be used for a firewall endpoint. * `availabilityZone` - The Availability Zone where the subnet is configured. * `capacity_usage_summary` - Aggregated count of all resources used by reference sets in a firewall. @@ -116,6 +122,10 @@ This data source exports the following attributes in addition to the arguments a * `resolved_cidr_count` - Total number of CIDR blocks used by the IP set references in a firewall. * `utilized_cidr_count` - Number of CIDR blocks used by the IP set references in a firewall. * `configuration_sync_state_summary` - Summary of sync states for all availability zones in which the firewall is configured. + * `transit_gateway_attachment_sync_states` - Set of transit gateway configured for use by the firewall. + * `attachmentId` - The unique identifier of the transit gateway attachment. + * `statusMessage` - A message providing additional information about the current status. + * `transit_gateway_attachment_status` - The current status of the transit gateway attachment. * `id` - ARN that identifies the firewall. * `name` - Descriptive name of the firewall. * `subnetChangeProtection` - A flag indicating whether the firewall is protected against changes to the subnet associations. @@ -123,6 +133,8 @@ This data source exports the following attributes in addition to the arguments a * `subnetId` - The unique identifier for the subnet. * `tags` - Map of resource tags to associate with the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `updateToken` - String token used when updating a firewall. +* `transitGatewayId` - The unique identifier of the transit gateway associated with this firewall. +* `transitGatewayOwnerAccountId` - The AWS account ID that owns the transit gateway. * `vpcId` - Unique identifier of the VPC where AWS Network Firewall should create the firewall. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkfirewall_firewall_policy.html.markdown b/website/docs/cdktf/typescript/d/networkfirewall_firewall_policy.html.markdown index 4a50f0da34e8..5067b496f139 100644 --- a/website/docs/cdktf/typescript/d/networkfirewall_firewall_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/networkfirewall_firewall_policy.html.markdown @@ -89,6 +89,7 @@ AWS Network Firewall does not allow multiple firewall policies with the same nam This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - ARN of the firewall policy. * `name` - Descriptive name of the firewall policy. @@ -106,4 +107,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ram_resource_share [2]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_firewall_policy - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkfirewall_resource_policy.html.markdown b/website/docs/cdktf/typescript/d/networkfirewall_resource_policy.html.markdown index be097d81d4af..88c5ef7eb99a 100644 --- a/website/docs/cdktf/typescript/d/networkfirewall_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/networkfirewall_resource_policy.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) The Amazon Resource Name (ARN) that identifies the resource policy. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkfirewall_resource_policy - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_connection.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_connection.html.markdown index 4626888e65d0..9ff17841cfdc 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_connection.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connection" description: |- - Retrieve information about a connection. + Provides details about an existing Network Manager connection. --- # Data Source: aws_networkmanager_connection -Retrieve information about a connection. +Provides details about an existing Network Manager connection. ## Example Usage @@ -54,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `linkId` - ID of the link for the first device. * `tags` - Key-value tags for the connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_connections.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_connections.html.markdown index 07f0fa7a750a..57ec6f23f9c3 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_connections.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_connections.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connections" description: |- - Retrieve information about connections. + Provides details about existing Network Manager connections. --- # Data Source: aws_networkmanager_connections -Retrieve information about connections. +Provides details about existing Network Manager connections. ## Example Usage @@ -51,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the connections. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown index 0f557d7e255f..0c6afdf6a325 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown @@ -3,7 +3,7 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_core_network_policy_document" description: |- - Generates an Core Network policy document in JSON format + Generates a Core Network policy document in JSON format --- @@ -236,6 +236,8 @@ The following arguments are available: * `insideCidrBlocks` (Optional) - The Classless Inter-Domain Routing (CIDR) block range used to create tunnels for AWS Transit Gateway Connect. The format is standard AWS CIDR range (for example, `10.0.1.0/24`). You can optionally define the inside CIDR in the Core Network Edges section per Region. The minimum is a `/24` for IPv4 or `/64` for IPv6. You can provide multiple `/24` subnets or a larger CIDR range. If you define a larger CIDR range, new Core Network Edges will be automatically assigned `/24` and `/64` subnets from the larger CIDR. an Inside CIDR block is required for attaching Connect attachments to a Core Network Edge. * `vpnEcmpSupport` (Optional) - Indicates whether the core network forwards traffic over multiple equal-cost routes using VPN. The value can be either `true` or `false`. The default is `true`. * `edgeLocations` (Required) - A block value of AWS Region locations where you're creating Core Network Edges. Detailed below. +* `dnsSupport` (Optional) - Indicates whether DNS resolution is enabled for the core network. The value can be either `true` or `false`. When set to `true`, DNS resolution is enabled for VPCs attached to the core network, allowing resources in different VPCs to resolve each other's domain names. The default is `true`. +* `securityGroupReferencingSupport` — (Optional) Indicates whether security group referencing is enabled for the core network. The value can be either `true` or `false`. When set to `true`, security groups in one VPC can reference security groups in another VPC attached to the core network, enabling more flexible security configurations across your network. The default is `false`. ### `edgeLocations` @@ -294,4 +296,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - Standard JSON policy document rendered based on the arguments above. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_device.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_device.html.markdown index d157faf584ff..cdb95dd4b80b 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_device.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_device.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_device" description: |- - Retrieve information about a device. + Provides details about an existing Network Manager device. --- # Data Source: aws_networkmanager_device -Retrieve information about a device. +Provides details about an existing Network Manager device. ## Example Usage @@ -23,16 +23,12 @@ import { TerraformStack } from "cdktf"; * See https://cdk.tf/provider-generation for more details. */ import { DataAwsNetworkmanagerDevice } from "./.gen/providers/aws/data-aws-networkmanager-device"; -interface MyConfig { - globalNetworkId: any; -} class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string, config: MyConfig) { + constructor(scope: Construct, name: string) { super(scope, name); new DataAwsNetworkmanagerDevice(this, "example", { deviceId: deviceId.stringValue, - global_network_id_id: globalNetworkId.value, - globalNetworkId: config.globalNetworkId, + globalNetworkId: globalNetworkId.stringValue, }); } } @@ -72,4 +68,4 @@ The `location` object supports the following: * `latitude` - Latitude. * `longitude` - Longitude. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_devices.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_devices.html.markdown index 0010398db32d..8ce2c5cf6595 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_devices.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_devices.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_devices" description: |- - Retrieve information about devices. + Provides details about existing Network Manager devices. --- # Data Source: aws_networkmanager_devices -Retrieve information about devices. +Provides details about existing Network Manager devices. ## Example Usage @@ -51,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the devices. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_global_network.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_global_network.html.markdown index d081abab2120..77dfb9e26d59 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_global_network.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_global_network.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_global_network" description: |- - Retrieve information about a global network. + Provides details about an existing Network Manager global network. --- # Data Source: aws_networkmanager_global_network -Retrieve information about a global network. +Provides details about an existing Network Manager global network. ## Example Usage @@ -48,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the global network. * `tags` - Map of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_global_networks.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_global_networks.html.markdown index 2e3f65d7c131..639bae9c87ef 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_global_networks.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_global_networks.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_global_networks" description: |- - Retrieve information about global networks. + Provides details about existing Network Manager global networks. --- # Data Source: aws_networkmanager_global_networks -Retrieve information about global networks. +Provides details about existing Network Manager global networks. ## Example Usage @@ -48,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the global networks. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_link.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_link.html.markdown index b010d3c5e2bc..8ab16e354670 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_link.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_link.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_link" description: |- - Retrieve information about a link. + Provides details about an existing Network Manager link. --- # Data Source: aws_networkmanager_link -Retrieve information about a link. +Provides details about an existing Network Manager link. ## Example Usage @@ -59,4 +59,4 @@ The `bandwidth` object supports the following: * `downloadSpeed` - Download speed in Mbps. * `uploadSpeed` - Upload speed in Mbps. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_links.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_links.html.markdown index 48ff5bca3852..b4c3e02cfcaa 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_links.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_links.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_links" description: |- - Retrieve information about links. + Provides details about existing Network Manager links. --- # Data Source: aws_networkmanager_links -Retrieve information about link. +Provides details about existing Network Manager links. ## Example Usage @@ -53,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the links. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_site.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_site.html.markdown index 2f27dc3e4ad7..1121eb55f0aa 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_site.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_site.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_site" description: |- - Retrieve information about a site. + Provides details about an existing Network Manager site. --- # Data Source: aws_networkmanager_site -Retrieve information about a site. +Provides details about an existing Network Manager site. ## Example Usage @@ -57,4 +57,4 @@ The `location` object supports the following: * `latitude` - Latitude of the location. * `longitude` - Longitude of the location. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_sites.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_sites.html.markdown index 61d2cfe82e86..77a8d666c560 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_sites.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_sites.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_sites" description: |- - Retrieve information about sites. + Provides details about existing Network Manager sites. --- # Data Source: aws_networkmanager_sites -Retrieve information about sites. +Provides details about existing Network Manager sites. ## Example Usage @@ -50,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - IDs of the sites. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/oam_link.html.markdown b/website/docs/cdktf/typescript/d/oam_link.html.markdown index 9862f688d8b1..a51011a8ab31 100644 --- a/website/docs/cdktf/typescript/d/oam_link.html.markdown +++ b/website/docs/cdktf/typescript/d/oam_link.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `linkIdentifier` - (Required) ARN of the link. ## Attribute Reference @@ -75,4 +76,4 @@ The `metricConfiguration` configuration block supports the following arguments: * `filter` - Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/oam_links.html.markdown b/website/docs/cdktf/typescript/d/oam_links.html.markdown index e457d4975e05..85f97167543d 100644 --- a/website/docs/cdktf/typescript/d/oam_links.html.markdown +++ b/website/docs/cdktf/typescript/d/oam_links.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -44,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARN of the Links. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/oam_sink.html.markdown b/website/docs/cdktf/typescript/d/oam_sink.html.markdown index fee859c980c0..3987c586892a 100644 --- a/website/docs/cdktf/typescript/d/oam_sink.html.markdown +++ b/website/docs/cdktf/typescript/d/oam_sink.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sinkIdentifier` - (Required) ARN of the sink. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `sinkId` - Random ID string that AWS generated as part of the sink ARN. * `tags` - Tags assigned to the sink. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/oam_sinks.html.markdown b/website/docs/cdktf/typescript/d/oam_sinks.html.markdown index dae1238d4330..ea29d7347728 100644 --- a/website/docs/cdktf/typescript/d/oam_sinks.html.markdown +++ b/website/docs/cdktf/typescript/d/oam_sinks.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -44,4 +46,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARN of the Sinks. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_cloud_autonomous_vm_cluster.html.markdown b/website/docs/cdktf/typescript/d/odb_cloud_autonomous_vm_cluster.html.markdown new file mode 100644 index 000000000000..a9d194550248 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_cloud_autonomous_vm_cluster.html.markdown @@ -0,0 +1,102 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_cluster" +page_title: "AWS: aws_odb_cloud_autonomous_vm_cluster" +description: |- + Terraform data source for managing cloud autonomous vm cluster resource in AWS for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_cloud_autonomous_vm_cluster + +Terraform data source for managing cloud autonomous vm cluster resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbCloudAutonomousVmCluster } from "./.gen/providers/aws/data-aws-odb-cloud-autonomous-vm-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbCloudAutonomousVmCluster(this, "example", { + id: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are optional: + +* `id` - (Required) The unique identifier of the cloud autonomous vm cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `cloudExadataInfrastructureId` - Cloud exadata infrastructure id associated with this cloud autonomous VM cluster. +* `autonomousDataStoragePercentage` - The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster. +* `autonomousDataStorageSizeInTbs` - The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. +* `availableAutonomousDataStorageSizeInTbs` - The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB. +* `availableContainerDatabases` - The number of Autonomous CDBs that you can create with the currently available storage. +* `availableCpus` - The number of CPU cores available for allocation to Autonomous Databases. +* `computeModel` - The compute model of the Autonomous VM cluster: ECPU or OCPU. +* `cpuCoreCount` - The total number of CPU cores in the Autonomous VM cluster. +* `cpuCoreCountPerNode` - The number of CPU cores enabled per node in the Autonomous VM cluster. +* `cpuPercentage` - he percentage of total CPU cores currently in use in the Autonomous VM cluster. +* `createdAt` - The date and time when the Autonomous VM cluster was created. +* `dataStorageSizeInGbs` - The total data storage allocated to the Autonomous VM cluster, in GB. +* `dataStorageSizeInTbs` - The total data storage allocated to the Autonomous VM cluster, in TB. +* `odbNodeStorageSizeInGbs` - The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB). +* `dbServers` - The list of database servers associated with the Autonomous VM cluster. +* `description` - The user-provided description of the Autonomous VM cluster. +* `displayName` - The display name of the Autonomous VM cluster. +* `domain` - The domain name of the Autonomous VM cluster. +* `exadataStorageInTbsLowestScaledValue` - The minimum value to which you can scale down the Exadata storage, in TB. +* `hostname` - The hostname of the Autonomous VM cluster. +* `isMtlsEnabledVmCluster` - Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. +* `licenseModel` - The Oracle license model that applies to the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. +* `maxAcdsLowestScaledValue` - The minimum value to which you can scale down the maximum number of Autonomous CDBs. +* `memoryPerOracleComputeUnitInGbs` - The amount of memory allocated per Oracle Compute Unit, in GB. +* `memorySizeInGbs` - The total amount of memory allocated to the Autonomous VM cluster, in gigabytes (GB). +* `nodeCount` - The number of database server nodes in the Autonomous VM cluster. +* `nonProvisionableAutonomousContainerDatabases` - The number of Autonomous CDBs that can't be provisioned because of resource constraints. +* `ociResourceAnchorName` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `ociUrl` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `odbNetworkId` - The unique identifier of the ODB network associated with this Autonomous VM cluster. +* `percentProgress` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `provisionableAutonomousContainerDatabases` - The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster. +* `provisionedAutonomousContainerDatabases` - The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster. +* `provisionedCpus` - The number of CPU cores currently provisioned in the Autonomous VM cluster. +* `reclaimableCpus` - The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases. +* `reservedCpus` - The number of CPU cores reserved for system operations and redundancy. +* `scanListenerPortNonTls` - The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. +* `scanListenerPortTls` - The SCAN listener port for TLS (TCP) protocol. The default is 2484. +* `shape` - The shape of the Exadata infrastructure for the Autonomous VM cluster. +* `status` - The status of the Autonomous VM cluster. +* `statusReason` - Additional information about the current status of the Autonomous VM cluster. +* `timeDatabaseSslCertificateExpires` - The expiration date and time of the database SSL certificate. +* `timeOrdsCertificateExpires` - The expiration date and time of the Oracle REST Data Services (ORDS)certificate. +* `timeZone` - The time zone of the Autonomous VM cluster. +* `totalContainerDatabases` - The total number of Autonomous Container Databases that can be created with the allocated local storage. +* `tags` - A map of tags to assign to the exadata infrastructure. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `maintenanceWindow` - The maintenance window for the Autonomous VM cluster. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_cloud_exadata_infrastructure.html.markdown b/website/docs/cdktf/typescript/d/odb_cloud_exadata_infrastructure.html.markdown new file mode 100644 index 000000000000..ded963c6ee44 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_cloud_exadata_infrastructure.html.markdown @@ -0,0 +1,93 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_exadata_infrastructure" +page_title: "AWS: aws_odb_cloud_exadata_infrastructure" +description: |- + Terraform data source for managing exadata infrastructure resource in AWS for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_cloud_exadata_infrastructure + +Terraform data source for exadata infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbCloudExadataInfrastructure } from "./.gen/providers/aws/data-aws-odb-cloud-exadata-infrastructure"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbCloudExadataInfrastructure(this, "example", { + id: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `activatedStorageCount` - The number of storage servers requested for the Exadata infrastructure. +* `additionalStorageCount` - The number of storage servers requested for the Exadata infrastructure. +* `availabilityZone` - The name of the Availability Zone (AZ) where the Exadata infrastructure is located. +* `availabilityZoneId` - The AZ ID of the AZ where the Exadata infrastructure is located. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `id` - The unique identifier of the Exadata infrastructure. +* `computeCount` - The number of database servers for the Exadata infrastructure. +* `cpuCount` - The total number of CPU cores that are allocated to the Exadata infrastructure. +* `dataStorageSizeInTbs` - The size of the Exadata infrastructure's data disk group, in terabytes (TB). +* `dbNodeStorageSizeInGbs` - The size of the storage available on each database node, in gigabytes (GB). +* `dbServerVersion` - The version of the Exadata infrastructure. +* `displayName` - The display name of the Exadata infrastructure. +* `lastMaintenanceRunId` - The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure. +* `maxCpuCount` - The total number of CPU cores available on the Exadata infrastructure. +* `maxDataStorageInTbs` - The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure. +* `maxDbNodeStorageSizeInGbs` - The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure. +* `maxMemoryInGbs` - The total amount of memory, in gigabytes (GB), that's available on the Exadata infrastructure. +* `memorySizeInGbs` - The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure. +* `monthlyDbServerVersion` - The monthly software version of the database servers installed on the Exadata infrastructure. +* `monthlyStorageServerVersion` - The monthly software version of the storage servers installed on the Exadata infrastructure. +* `nextMaintenanceRunId` - The OCID of the next maintenance run for the Exadata infrastructure. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the Exadata infrastructure. +* `ociUrl` - The HTTPS link to the Exadata infrastructure in OCI. +* `ocid` - The OCID of the Exadata infrastructure in OCI. +* `percentProgress` - The amount of progress made on the current operation on the Exadata infrastructure expressed as a percentage. +* `shape` - The model name of the Exadata infrastructure. +* `status` - The status of the Exadata infrastructure. +* `statusReason` - Additional information about the status of the Exadata infrastructure. +* `storageCount` - The number of storage servers that are activated for the Exadata infrastructure. +* `storageServerVersion` - The software version of the storage servers on the Exadata infrastructure. +* `totalStorageSizeInGbs` - The total amount of storage, in gigabytes (GB), on the Exadata infrastructure. +* `computeModel` - The OCI compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `createdAt` - The time when the Exadata infrastructure was created. +* `databaseServerType` - The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. +* `storageServerType` - The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. +* `maintenanceWindow` - The scheduling details of the maintenance window. Patching and system updates take place during the maintenance window. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_cloud_vm_cluster.html.markdown b/website/docs/cdktf/typescript/d/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..d1b766ecfd79 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform data source for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_cloud_vm_cluster + +Terraform data source for Exadata Infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbDbServersList } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbDbServersList(this, "example", { + cloud_exadata_infrastructure_id: "example-id", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `cloudExadataInfrastructureId` - The ID of the Cloud Exadata Infrastructure. +* `clusterName` - The name of the Grid Infrastructure (GI) cluster. +* `cpuCoreCount` - The number of CPU cores enabled on the VM cluster. +* `dataStorageSizeInTbs` - The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster. +* `dbNodeStorageSizeInGbs` - The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster. +* `dbServers` - The list of database servers for the VM cluster. +* `diskRedundancy` - The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy. +* `displayName` - The display name of the VM cluster. +* `domain` - The domain name of the VM cluster. +* `giVersion` - The software version of the Oracle Grid Infrastructure (GI) for the VM cluster. +* `hostnamePrefixComputed` - The computed hostname prefix for the VM cluster. +* `isLocalBackupEnabled` - Indicates whether database backups to local Exadata storage is enabled for the VM cluster. +* `isSparseDiskGroupEnabled` - Indicates whether the VM cluster is configured with a sparse disk group. +* `lastUpdateHistoryEntryId` - The Oracle Cloud ID (OCID) of the last maintenance update history entry. +* `licenseModel` - The Oracle license model applied to the VM cluster. +* `listenerPort` - The port number configured for the listener on the VM cluster. +* `memorySizeInGbs` - The amount of memory, in gigabytes (GB), that's allocated for the VM cluster. +* `nodeCount` - The number of nodes in the VM cluster. +* `ocid` - The OCID of the VM cluster. +* `ociResourceAnchorName` - The name of the OCI Resource Anchor. +* `ociUrl` - The HTTPS link to the VM cluster in OCI. +* `odbNetworkId` - The ID of the ODB network. +* `percentProgress` - The amount of progress made on the current operation on the VM cluster, expressed as a percentage. +* `scanDnsName` - The FQDN of the DNS record for the Single Client Access Name (SCAN) IP addresses that are associated with the VM cluster. +* `scanDnsRecordId` - The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster. +* `scanIpIds` - The OCID of the SCAN IP addresses that are associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure that's running the VM cluster. +* `sshPublicKeys` - The public key portion of one or more key pairs used for SSH access to the VM cluster. +* `status` - The status of the VM cluster. +* `statusReason` - Additional information about the status of the VM cluster. +* `storageSizeInGbs` - The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster. +* `systemVersion` - The operating system version of the image chosen for the VM cluster. +* `timezone` - The time zone of the VM cluster. +* `vipIds` - The virtual IP (VIP) addresses that are associated with the VM cluster. Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for each node in the VM cluster to enable failover. If one node fails, the VIP is reassigned to another active node in the cluster. +* `createdAt` - The time when the VM cluster was created. +* `computeModel` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `dataCollectionOptions` - The set of diagnostic collection options enabled for the VM cluster. +* `iormConfigCache` - The ExadataIormConfig cache details for the VM cluster. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_db_node.html.markdown b/website/docs/cdktf/typescript/d/odb_db_node.html.markdown new file mode 100644 index 000000000000..7cc896a32c10 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_db_node.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_node" +page_title: "AWS: aws_odb_db_node" +description: |- + Terraform data source for managing db node linked to cloud vm cluster of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_node + +Terraform data source for manging db nodes linked to cloud vm cluster of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbDbNode } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbDbNode(this, "example", { + cloud_vm_cluster_id: "cloud_vm_cluster_id", + id: "db_node_id", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_vm_cluster_id` - (Required) The unique identifier of the cloud vm cluster. +* `id` - (Required) The unique identifier of db node associated with vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `cloud_vm_cluster_id` - The ID of the cloud VM cluster. +* `status` - The current status of the DB node. +* `statusReason` - Additional information about the status of the DB node. +* `additional_details` - Additional information about the planned maintenance. +* `backup_ip_id` - The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node. +* `backup_vnic2_id` - The OCID of the second backup VNIC. +* `backup_vnic_id` - The OCID of the backup VNIC. +* `cpuCoreCount` - The number of CPU cores enabled on the DB node. +* `db_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), allocated on the DB node. +* `db_server_id` - The unique identifier of the DB server that is associated with the DB node. +* `dbSystemId` - The OCID of the DB system. +* `fault_domain` - The name of the fault domain the instance is contained in. +* `host_ip_id` - The OCID of the host IP address that's associated with the DB node. +* `hostname` - The host name for the DB node. +* `ocid` - The OCID of the DB node. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the DB node. +* `maintenance_type` - The type of database node maintenance. Either VMDB_REBOOT_MIGRATION or EXADBXS_REBOOT_MIGRATION. +* `memorySizeInGbs` - The allocated memory in GBs on the DB node. +* `software_storage_size_in_gbs` - The size (in GB) of the block storage volume allocation for the DB system. +* `createdAt` - The date and time when the DB node was created. +* `time_maintenance_window_end` - The end date and time of the maintenance window. +* `time_maintenance_window_start` - The start date and time of the maintenance window. +* `total_cpu_core_count` - The total number of CPU cores reserved on the DB node. +* `vnic2_id` - The OCID of the second VNIC. +* `vnic_id` - The OCID of the VNIC. +* `privateIpAddress` - The private IP address assigned to the DB node. +* `floating_ip_address` - The floating IP address assigned to the DB node. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_db_nodes.html.markdown b/website/docs/cdktf/typescript/d/odb_db_nodes.html.markdown new file mode 100644 index 000000000000..d2069b20754a --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_db_nodes.html.markdown @@ -0,0 +1,86 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_nodes" +page_title: "AWS: aws_odb_db_nodes" +description: |- + Terraform data source for managing db nodes linked to cloud vm cluster of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_nodes + +Terraform data source for manging db nodes linked to cloud vm cluster of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbDbNodes } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbDbNodes(this, "example", { + cloud_vm_cluster_id: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_vm_cluster_id` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `db_nodes` - The list of DB nodes along with their properties. + +### db_nodes + +* `additional_details` - Additional information about the planned maintenance. +* `backup_ip_id` - The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node. +* `backup_vnic_2_id` - The OCID of the second backup virtual network interface card (VNIC) for the DB node. +* `backup_vnic_id` - The OCID of the backup VNIC for the DB node. +* `cpuCoreCount` - The number of CPU cores enabled on the DB node. +* `createdAt` - The date and time when the DB node was created. +* `db_node_arn` - The Amazon Resource Name (ARN) of the DB node. +* `db_node_id` - The unique identifier of the DB node. +* `dbNodeStorageSizeInGbs` - The amount of local node storage, in gigabytes (GB), that's allocated on the DB node. +* `db_server_id` - The unique identifier of the database server that's associated with the DB node. +* `dbSystemId` - The OCID of the DB system. +* `fault_domain` - The name of the fault domain where the DB node is located. +* `host_ip_id` - The OCID of the host IP address that's associated with the DB node. +* `hostname` - The host name for the DB node. +* `maintenance_type` - The type of maintenance the DB node is undergoing. +* `memorySizeInGbs` - The amount of memory, in gigabytes (GB), that's allocated on the DB node. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the DB node. +* `ocid` - The OCID of the DB node. +* `software_storage_size_in_gb` - The size of the block storage volume, in gigabytes (GB), that's allocated for the DB system. This attribute applies only for virtual machine DB systems. +* `status` - The current status of the DB node. +* `statusReason` - Additional information about the status of the DB node. +* `time_maintenance_window_end` - The end date and time of the maintenance window. +* `time_maintenance_window_start` - The start date and time of the maintenance window. +* `total_cpu_core_count` - The total number of CPU cores reserved on the DB node. +* `vnic_2_id` - The OCID of the second VNIC. +* `vnic_id` - The OCID of the VNIC. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_db_server.html.markdown b/website/docs/cdktf/typescript/d/odb_db_server.html.markdown new file mode 100644 index 000000000000..74a7f0ab543c --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_db_server.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_server" +page_title: "AWS: aws_odb_db_server" +description: |- + Terraform data source for managing db server linked to exadata infrastructure of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_server + +Terraform data source for manging db server linked to exadata infrastructure of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbDbServer } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbDbServer(this, "example", { + cloud_exadata_infrastructure_id: "exadata_infra_id", + id: "db_server_id", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloudExadataInfrastructureId` - (Required) The unique identifier of the cloud vm cluster. +* `id` - (Required) The unique identifier of db node associated with vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `autonomous_virtual_machine_ids` - The list of unique identifiers for the Autonomous VMs associated with this database server. +* `autonomous_vm_cluster_ids` - The OCID of the autonomous VM clusters that are associated with the database server. +* `computeModel` - The compute model of the database server. +* `status` - The status of the database server. +* `statusReason` - Additional information about the current status of the database server. +* `cpuCoreCount` - The number of CPU cores enabled on the database server. +* `dbNodeStorageSizeInGbs` - The allocated local node storage in GBs on the database server. +* `db_server_patching_details` - The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window. +* `displayName` - The display name of the database server. +* `exadata_infrastructure_id` - The exadata infrastructure ID of the database server. +* `ocid` - The OCID of the database server to retrieve information about. +* `ociResourceAnchorName` - The name of the OCI resource anchor. +* `maxCpuCount` - The total number of CPU cores available. +* `max_db_node_storage_in_gbs` - The total local node storage available in GBs. +* `maxMemoryInGbs` - The total memory available in GBs. +* `memorySizeInGbs` - The allocated memory in GBs on the database server. +* `shape` - The shape of the database server. The shape determines the amount of CPU, storage, and memory resources available. +* `createdAt` - The date and time when the database server was created. +* `vm_cluster_ids` - The OCID of the VM clusters that are associated with the database server. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_db_servers.html.markdown b/website/docs/cdktf/typescript/d/odb_db_servers.html.markdown new file mode 100644 index 000000000000..1dc76bdcdf51 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_db_servers.html.markdown @@ -0,0 +1,80 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_servers" +page_title: "AWS: aws_odb_db_servers" +description: |- + Terraform data source for managing db servers linked to exadata infrastructure of Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_db_servers + +Terraform data source for manging db servers linked to exadata infrastructure of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbDbServers } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbDbServers(this, "example", { + cloud_exadata_infrastructure_id: "exadata_infra_id", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloudExadataInfrastructureId` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `dbServers` - the list of DB servers along with their properties. + +### db_servers + +* `autonomous_virtual_machine_ids` - A list of unique identifiers for the Autonomous VMs. +* `autonomous_vm_cluster_ids` - A list of identifiers for the Autonomous VM clusters. +* `computeModel` - The OCI compute model used when you create or clone an instance: **ECPU** or **OCPU**. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers, while OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `cpuCoreCount` - The number of CPU cores enabled on the database server. +* `createdAt` - The date and time when the database server was created. +* `dbNodeStorageSizeInGbs` - The amount of local node storage, in gigabytes (GB), that's allocated on the database server. +* `db_server_id` - The unique identifier of the database server. +* `db_server_patching_details` - The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window. +* `displayName` - The user-friendly name of the database server. The name doesn't need to be unique. +* `exadata_infrastructure_id` - The ID of the Exadata infrastructure that hosts the database server. +* `maxCpuCount` - The total number of CPU cores available on the database server. +* `max_db_node_storage_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the database server. +* `maxMemoryInGbs` - The total amount of memory, in gigabytes (GB), that's available on the database server. +* `memorySizeInGbs` - The amount of memory, in gigabytes (GB), that's allocated on the database server. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the database server. +* `ocid` - The OCID of the database server. +* `shape` - The hardware system model of the Exadata infrastructure that the database server is hosted on. The shape determines the amount of CPU, storage, and memory resources available. +* `status` - The current status of the database server. +* `statusReason` - Additional information about the status of the database server. +* `vm_cluster_ids` - The IDs of the VM clusters that are associated with the database server. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_network.html.markdown b/website/docs/cdktf/typescript/d/odb_network.html.markdown new file mode 100644 index 000000000000..a841ec3366b1 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_network.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network" +page_title: "AWS: aws_odb_network" +description: |- + Terraform data source to retrieve odb network for Oracle Database@AWS. +--- + + + +# Data Source: aws_odb_network + +Terraform data source for to retrieve network resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbNetwork } from "./.gen/providers/aws/data-aws-odb-network"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbNetwork(this, "example", { + id: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) Unique identifier of the odb network resource. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `displayName` - Display name for the network resource. +* `availabilityZoneId` - The AZ ID of the AZ where the ODB network is located. +* `availabilityZone` - The availability zone where the ODB network is located. +* `backupSubnetCidr` - The CIDR range of the backup subnet for the ODB network. +* `clientSubnetCidr` - The CIDR notation for the network resource. +* `customDomainName` - The name of the custom domain that the network is located. +* `defaultDnsPrefix` - The default DNS prefix for the network resource. +* `ociNetworkAnchorId` - The unique identifier of the OCI network anchor for the ODB network. +* `ociNetworkAnchorUrl` - The URL of the OCI network anchor for the ODB network. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the ODB network. +* `ociVcnId` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `ociVcnUrl` - The URL of the OCI VCN for the ODB network. +* `percentProgress` - The amount of progress made on the current operation on the ODB network, expressed as a percentage. +* `peeredCidrs` - The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation. +* `status` - The status of the network resource. +* `statusReason` - Additional information about the current status of the ODB network. +* `createdAt` - The date and time when the ODB network was created. +* `managedServices` - The managed services configuration for the ODB network. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/odb_network_peering_connection.html.markdown b/website/docs/cdktf/typescript/d/odb_network_peering_connection.html.markdown new file mode 100644 index 000000000000..e76b6bdd3ae9 --- /dev/null +++ b/website/docs/cdktf/typescript/d/odb_network_peering_connection.html.markdown @@ -0,0 +1,66 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connection" +page_title: "AWS: aws_odb_network_peering_connection" +description: |- + Terraform data source for managing oracle database network peering resource in AWS. +--- + + + +# Data Source: aws_odb_network_peering_connection + +Terraform data source for managing oracle database network peering resource in AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsOdbNetworkPeeringConnection } from "./.gen/providers/aws/data-aws-odb-network-peering-connection"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsOdbNetworkPeeringConnection(this, "example", { + id: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `displayName` - Display name of the ODB network peering connection. +* `status` - Status of the ODB network peering connection. +* `statusReason` - Status of the ODB network peering connection. +* `odbNetworkArn` - ARN of the ODB network peering connection. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `peerNetworkArn` - ARN of the peer network peering connection. +* `odbPeeringConnectionType` - Type of the ODB peering connection. +* `createdAt` - Created time of the ODB network peering connection. +* `percentProgress` - Progress of the ODB network peering connection. +* `tags` - Tags applied to the resource. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown b/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown index 0f41ffe6ffcb..b6fcb84c9001 100644 --- a/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown @@ -38,18 +38,19 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `domainName` – (Required) Name of the domain. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `domainName` - (Required) Name of the domain. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `accessPolicies` – Policy document attached to the domain. +* `accessPolicies` - Policy document attached to the domain. * `advancedOptions` - Key-value string pairs to specify advanced configuration options. * `advancedSecurityOptions` - Status of the OpenSearch domain's advanced security options. The block consists of the following attributes: * `enabled` - Whether advanced security is enabled. * `internalUserDatabaseEnabled` - Whether the internal user database is enabled. -* `arn` – ARN of the domain. +* `arn` - ARN of the domain. * `autoTuneOptions` - Configuration of the Auto-Tune options of the domain. * `desiredState` - Auto-Tune desired state for the domain. * `maintenanceSchedule` - A list of the nested configurations for the Auto-Tune maintenance windows of the domain. @@ -86,26 +87,25 @@ This data source exports the following attributes in addition to the arguments a * `userPoolId` - Cognito User pool used by the domain. * `identityPoolId` - Cognito Identity pool used by the domain. * `roleArn` - IAM Role with the AmazonOpenSearchServiceCognitoAccess policy attached. -* `created` – Status of the creation of the domain. +* `created` - Status of the creation of the domain. * `dashboardEndpoint` - Domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). * `dashboardEndpointV2` - V2 domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html) -* `deleted` – Status of the deletion of the domain. +* `deleted` - Status of the deletion of the domain. * `domainEndpointV2HostedZoneId` - Dual stack hosted zone ID for the domain. -* `domainId` – Unique identifier for the domain. +* `domainId` - Unique identifier for the domain. * `ebsOptions` - EBS Options for the instances in the domain. * `ebsEnabled` - Whether EBS volumes are attached to data nodes in the domain. * `throughput` - The throughput (in MiB/s) of the EBS volumes attached to data nodes. * `volumeType` - Type of EBS volumes attached to data nodes. * `volumeSize` - Size of EBS volumes attached to data nodes (in GB). * `iops` - Baseline input/output (I/O) performance of EBS volumes attached to data nodes. -* `engineVersion` – OpenSearch version for the domain. +* `engineVersion` - OpenSearch version for the domain. * `encryptionAtRest` - Domain encryption at rest related options. * `enabled` - Whether encryption at rest is enabled in the domain. * `kmsKeyId` - KMS key id used to encrypt data at rest. -* `endpoint` – Domain-specific endpoint used to submit index, search, and data upload requests. +* `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. * `endpointV2` - V2 domain-specific endpoint that works with both IPv4 and IPv6 addresses, used to submit index, search, and data upload requests. * `ipAddressType` - Type of IP addresses supported by the endpoint for the domain. -* `kibanaEndpoint` - (**Deprecated**) Domain-specific endpoint for kibana without https scheme. Use the `dashboardEndpoint` attribute instead. * `logPublishingOptions` - Domain log publishing related options. * `logType` - Type of OpenSearch log being published. * `cloudwatchLogGroupArn` - CloudWatch Log Group where the logs are published. @@ -118,7 +118,7 @@ This data source exports the following attributes in addition to the arguments a * `windowStartTime` - 10h window for updates * `hours` - Starting hour of the 10-hour window for updates * `minutes` - Starting minute of the 10-hour window for updates -* `processing` – Status of a configuration change in the domain. +* `processing` - Status of a configuration change in the domain. * `snapshotOptions` – Domain snapshot related options. * `automatedSnapshotStartHour` - Hour during which the service takes an automated daily snapshot of the indices in the domain. * `softwareUpdateOptions` - Software update options for the domain @@ -130,4 +130,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - Subnets used by the domain. * `vpcId` - VPC used by the domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearchserverless_access_policy.html.markdown b/website/docs/cdktf/typescript/d/opensearchserverless_access_policy.html.markdown index e6698520a81c..7a4404e080a3 100644 --- a/website/docs/cdktf/typescript/d/opensearchserverless_access_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearchserverless_access_policy.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy. * `type` - (Required) Type of access policy. Must be `data`. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - JSON policy document to use as the content for the new policy. * `policyVersion` - Version of the policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearchserverless_collection.html.markdown b/website/docs/cdktf/typescript/d/opensearchserverless_collection.html.markdown index 84940dec6776..3c3d1dedd542 100644 --- a/website/docs/cdktf/typescript/d/opensearchserverless_collection.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearchserverless_collection.html.markdown @@ -40,11 +40,12 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: -~> Exactly one of `id` or `name` is required. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the collection. * `name` - (Optional) Name of the collection. +~> Exactly one of `id` or `name` is required. + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags to assign to the collection. * `type` - Type of collection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearchserverless_lifecycle_policy.html.markdown b/website/docs/cdktf/typescript/d/opensearchserverless_lifecycle_policy.html.markdown index 471169c17f3a..c715ad743d2b 100644 --- a/website/docs/cdktf/typescript/d/opensearchserverless_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearchserverless_lifecycle_policy.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy * `type` - (Required) Type of lifecycle policy. Must be `retention`. @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - JSON policy document to use as the content for the new policy. * `policyVersion` - Version of the policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearchserverless_security_config.html.markdown b/website/docs/cdktf/typescript/d/opensearchserverless_security_config.html.markdown index e600b93b85e2..7aec09afa82e 100644 --- a/website/docs/cdktf/typescript/d/opensearchserverless_security_config.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearchserverless_security_config.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) The unique identifier of the security configuration. ## Attribute Reference @@ -62,4 +63,4 @@ SAML options for the security configuration. * `sessionTimeout` - Session timeout, in minutes. Minimum is 5 minutes and maximum is 720 minutes (12 hours). Default is 60 minutes. * `userAttribute` - User attribute for this SAML integration. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearchserverless_security_policy.html.markdown b/website/docs/cdktf/typescript/d/opensearchserverless_security_policy.html.markdown index 4679fdcea2d0..c5b16acfc939 100644 --- a/website/docs/cdktf/typescript/d/opensearchserverless_security_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearchserverless_security_policy.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy * `type` - (Required) Type of security policy. One of `encryption` or `network`. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - The JSON policy document without any whitespaces. * `policyVersion` - Version of the policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearchserverless_vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/d/opensearchserverless_vpc_endpoint.html.markdown index 3b5b6a51454f..3f1496e15a05 100644 --- a/website/docs/cdktf/typescript/d/opensearchserverless_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearchserverless_vpc_endpoint.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointId` - (Required) The unique identifier of the endpoint. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - The IDs of the subnets from which you access OpenSearch Serverless. * `vpcId` - The ID of the VPC from which you access OpenSearch Serverless. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/organizations_resource_tags.html.markdown b/website/docs/cdktf/typescript/d/organizations_resource_tags.html.markdown index 7f45d48bb128..825cb418a288 100644 --- a/website/docs/cdktf/typescript/d/organizations_resource_tags.html.markdown +++ b/website/docs/cdktf/typescript/d/organizations_resource_tags.html.markdown @@ -44,10 +44,10 @@ This data source supports the following arguments: You can specify any of the following taggable resources. -* AWS account – specify the account ID number. -* Organizational unit – specify the OU ID that begins with `ou-` and looks similar to: `ou-1a2b-34uvwxyz` -* Root – specify the root ID that begins with `r-` and looks similar to: `r-1a2b` -* Policy – specify the policy ID that begins with `p-` and looks similar to: `p-12abcdefg3` +* AWS account - specify the account ID number. +* Organizational unit - specify the OU ID that begins with `ou-` and looks similar to: `ou-1a2b-34uvwxyz` +* Root - specify the root ID that begins with `r-` and looks similar to: `r-1a2b` +* Policy - specify the policy ID that begins with `p-` and looks similar to: `p-12abcdefg3` ## Attribute Reference @@ -55,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of key=value pairs for each tag set on the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_asset.html.markdown b/website/docs/cdktf/typescript/d/outposts_asset.html.markdown index cfd9f8e6fa61..b59e98718bae 100644 --- a/website/docs/cdktf/typescript/d/outposts_asset.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_asset.html.markdown @@ -57,8 +57,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) Outpost ARN. * `assetId` - (Required) ID of the asset. @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `rackElevation` - Position of an asset in a rack measured in rack units. * `rackId` - Rack ID of the asset. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_assets.html.markdown b/website/docs/cdktf/typescript/d/outposts_assets.html.markdown index 1ba8782eeb1f..8410abc56c63 100644 --- a/website/docs/cdktf/typescript/d/outposts_assets.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_assets.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) Outpost ARN. * `hostIdFilter` - (Optional) Filters by list of Host IDs of a Dedicated Host. * `statusIdFilter` - (Optional) Filters by list of state status. Valid values: "ACTIVE", "RETIRING". @@ -96,4 +97,4 @@ This data source exports the following attributes in addition to the arguments a * `assetIds` - List of all the asset ids found. This data source will fail if none are found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_outpost.html.markdown b/website/docs/cdktf/typescript/d/outposts_outpost.html.markdown index 8b69cb01e6ea..1f4e66d2f2fb 100644 --- a/website/docs/cdktf/typescript/d/outposts_outpost.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_outpost.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) Identifier of the Outpost. * `name` - (Optional) Name of the Outpost. * `arn` - (Optional) ARN. @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `supportedHardwareType` - The hardware type. * `tags` - The Outpost tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_outpost_instance_type.html.markdown b/website/docs/cdktf/typescript/d/outposts_outpost_instance_type.html.markdown index fd7c40570364..df5b3f075cef 100644 --- a/website/docs/cdktf/typescript/d/outposts_outpost_instance_type.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_outpost_instance_type.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceType` - (Optional) Desired instance type. Conflicts with `preferredInstanceTypes`. * `preferredInstanceTypes` - (Optional) Ordered list of preferred instance types. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned. Conflicts with `instanceType`. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Outpost identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_outpost_instance_types.html.markdown b/website/docs/cdktf/typescript/d/outposts_outpost_instance_types.html.markdown index 365d99b542a3..bd45cff50e76 100644 --- a/website/docs/cdktf/typescript/d/outposts_outpost_instance_types.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_outpost_instance_types.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) Outpost ARN. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `instanceTypes` - Set of instance types. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_outposts.html.markdown b/website/docs/cdktf/typescript/d/outposts_outposts.html.markdown index e50517cf4261..2f9509338da5 100644 --- a/website/docs/cdktf/typescript/d/outposts_outposts.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_outposts.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Optional) Availability Zone name. * `availabilityZoneId` - (Optional) Availability Zone identifier. * `siteId` - (Optional) Site identifier. @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_site.html.markdown b/website/docs/cdktf/typescript/d/outposts_site.html.markdown index ec285f011d32..ad020f0f955b 100644 --- a/website/docs/cdktf/typescript/d/outposts_site.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_site.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) Identifier of the Site. * `name` - (Optional) Name of the Site. @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `accountId` - AWS Account identifier. * `description` - Description. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/outposts_sites.html.markdown b/website/docs/cdktf/typescript/d/outposts_sites.html.markdown index 3adb72ae6d08..3a2cd667644a 100644 --- a/website/docs/cdktf/typescript/d/outposts_sites.html.markdown +++ b/website/docs/cdktf/typescript/d/outposts_sites.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -43,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `ids` - Set of Outposts Site identifiers. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/polly_voices.html.markdown b/website/docs/cdktf/typescript/d/polly_voices.html.markdown index 02bf2fb3361b..ab0f7ccc6ef3 100644 --- a/website/docs/cdktf/typescript/d/polly_voices.html.markdown +++ b/website/docs/cdktf/typescript/d/polly_voices.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engine` - (Optional) Engine used by Amazon Polly when processing input text for speech synthesis. Valid values are `standard`, `neural`, and `long-form`. * `includeAdditionalLanguageCodes` - (Optional) Whether to return any bilingual voices that use the specified language as an additional language. * `languageCode` - (Optional) Language identification tag for filtering the list of voices returned. If not specified, all available voices are returned. @@ -83,4 +84,4 @@ See the [AWS Polly Voice documentation](https://docs.aws.amazon.com/polly/latest * `name` - Name of the voice. * `supportedEngines` - Specifies which engines are supported by a given voice. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/prefix_list.html.markdown b/website/docs/cdktf/typescript/d/prefix_list.html.markdown index 04d0fb540e27..b8c2ffffd8fb 100644 --- a/website/docs/cdktf/typescript/d/prefix_list.html.markdown +++ b/website/docs/cdktf/typescript/d/prefix_list.html.markdown @@ -107,6 +107,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prefixListId` - (Optional) ID of the prefix list to select. * `name` - (Optional) Name of the prefix list to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -136,4 +137,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/prometheus_default_scraper_configuration.html.markdown b/website/docs/cdktf/typescript/d/prometheus_default_scraper_configuration.html.markdown index 9c742b377911..c104c71b27ff 100644 --- a/website/docs/cdktf/typescript/d/prometheus_default_scraper_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/prometheus_default_scraper_configuration.html.markdown @@ -35,7 +35,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -43,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `configuration` - The configuration file. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/prometheus_workspace.html.markdown b/website/docs/cdktf/typescript/d/prometheus_workspace.html.markdown index 54bdf45b0879..831a51ffbf63 100644 --- a/website/docs/cdktf/typescript/d/prometheus_workspace.html.markdown +++ b/website/docs/cdktf/typescript/d/prometheus_workspace.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workspaceId` - (Required) Prometheus workspace ID. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the Prometheus workspace. * `tags` - Tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/prometheus_workspaces.html.markdown b/website/docs/cdktf/typescript/d/prometheus_workspaces.html.markdown index 911fae473c51..738496c14f3f 100644 --- a/website/docs/cdktf/typescript/d/prometheus_workspaces.html.markdown +++ b/website/docs/cdktf/typescript/d/prometheus_workspaces.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `aliasPrefix` - (Optional) Limits results to workspaces with aliases that begin with this value. ## Attribute Reference @@ -71,4 +72,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - List of ARNs of the matched Prometheus workspaces. * `workspaceIds` - List of workspace IDs of the matched Prometheus workspaces. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/qldb_ledger.html.markdown b/website/docs/cdktf/typescript/d/qldb_ledger.html.markdown index 88e7bc9ba91c..e0ed3064b093 100644 --- a/website/docs/cdktf/typescript/d/qldb_ledger.html.markdown +++ b/website/docs/cdktf/typescript/d/qldb_ledger.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Friendly name of the ledger to match. ## Attribute Reference @@ -47,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a See the [QLDB Ledger Resource](/docs/providers/aws/r/qldb_ledger.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/quicksight_analysis.html.markdown b/website/docs/cdktf/typescript/d/quicksight_analysis.html.markdown index 2215024030c3..9ad535a35a9b 100644 --- a/website/docs/cdktf/typescript/d/quicksight_analysis.html.markdown +++ b/website/docs/cdktf/typescript/d/quicksight_analysis.html.markdown @@ -41,7 +41,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: * `analysisId` - (Required) Identifier for the analysis. -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a See the [Analysis Resource](/docs/providers/aws/r/quicksight_analysis.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/quicksight_data_set.html.markdown b/website/docs/cdktf/typescript/d/quicksight_data_set.html.markdown index 04b62150b855..9d747dfff658 100644 --- a/website/docs/cdktf/typescript/d/quicksight_data_set.html.markdown +++ b/website/docs/cdktf/typescript/d/quicksight_data_set.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `awsAccountId` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dataSetId` - (Required) Identifier for the data set. -* `awsAccountId` - (Optional) AWS account ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a See the [Data Set Resource](/docs/providers/aws/r/quicksight_data_set.html) for details on the returned attributes - they are identical. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/quicksight_group.html.markdown b/website/docs/cdktf/typescript/d/quicksight_group.html.markdown index 5a2e0c3612c6..0b1ec60aa7d6 100644 --- a/website/docs/cdktf/typescript/d/quicksight_group.html.markdown +++ b/website/docs/cdktf/typescript/d/quicksight_group.html.markdown @@ -46,8 +46,9 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) QuickSight namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - The group description. * `principalId` - The principal ID of the group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/quicksight_theme.html.markdown b/website/docs/cdktf/typescript/d/quicksight_theme.html.markdown index a2a27be9fb00..ddb1d4f36688 100644 --- a/website/docs/cdktf/typescript/d/quicksight_theme.html.markdown +++ b/website/docs/cdktf/typescript/d/quicksight_theme.html.markdown @@ -44,7 +44,8 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - AWS account ID. +* `awsAccountId` - AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -134,4 +135,4 @@ This data source exports the following attributes in addition to the arguments a * `warning` - Color (hexadecimal) that applies to warning and informational messages. * `warningForeground` - Color (hexadecimal) that applies to any text or other elements that appear over the warning color. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/quicksight_user.html.markdown b/website/docs/cdktf/typescript/d/quicksight_user.html.markdown index a11a4612ca13..e1339f352f15 100644 --- a/website/docs/cdktf/typescript/d/quicksight_user.html.markdown +++ b/website/docs/cdktf/typescript/d/quicksight_user.html.markdown @@ -46,8 +46,9 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) QuickSight namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -55,6 +56,7 @@ This data source exports the following attributes in addition to the arguments a * `active` - The active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an Active Directory user, that user is inactive until they sign in and provide a password. * `arn` - The Amazon Resource Name (ARN) for the user. +* `customPermissionsName` - The custom permissions profile associated with this user. * `email` - The user's email address. * `identityType` - The type of identity authentication used by the user. * `principalId` - The principal ID of the user. @@ -63,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a - `AUTHOR`: A user who can create data sources, datasets, analyzes, and dashboards. - `ADMIN`: A user who is an author, who can also manage Amazon QuickSight settings. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ram_resource_share.html.markdown b/website/docs/cdktf/typescript/d/ram_resource_share.html.markdown index 91bef7f5e8c5..654c92df3adc 100644 --- a/website/docs/cdktf/typescript/d/ram_resource_share.html.markdown +++ b/website/docs/cdktf/typescript/d/ram_resource_share.html.markdown @@ -67,10 +67,11 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name of the resource share to retrieve. * `resourceOwner` (Required) Owner of the resource share. Valid values are `SELF` or `OTHER-ACCOUNTS`. * `resourceShareStatus` (Optional) Specifies that you want to retrieve details of only those resource shares that have this status. Valid values are `PENDING`, `ACTIVE`, `FAILED`, `DELETING`, and `DELETED`. -* `filter` - (Optional) Filter used to scope the list e.g., by tags. See [related docs] (https://docs.aws.amazon.com/ram/latest/APIReference/API_TagFilter.html). +* `filter` - (Optional) Filter used to scope the list of owned shares e.g., by tags. See [related docs] (https://docs.aws.amazon.com/ram/latest/APIReference/API_TagFilter.html). * `name` - (Required) Name of the tag key to filter on. * `values` - (Required) Value of the tag key. @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the resource share. * `tags` - Tags attached to the resource share. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_certificate.html.markdown b/website/docs/cdktf/typescript/d/rds_certificate.html.markdown index b0a7cd810284..aa3003eff50a 100644 --- a/website/docs/cdktf/typescript/d/rds_certificate.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_certificate.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) Certificate identifier. For example, `rds-ca-2019`. -* `default_for_new_launches` - (Optional) When enabled, returns the default certificate for new RDS instances. +* `defaultForNewLaunches` - (Optional) When enabled, returns the default certificate for new RDS instances. * `latestValidTill` - (Optional) When enabled, returns the certificate with the latest `ValidTill`. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `validFrom` - [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of certificate starting validity date. * `validTill` - [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of certificate ending validity date. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_cluster.html.markdown b/website/docs/cdktf/typescript/d/rds_cluster.html.markdown index b75bb9368f60..c0e8f358b1bd 100644 --- a/website/docs/cdktf/typescript/d/rds_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_cluster.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required) Cluster identifier of the RDS cluster. ## Attribute Reference @@ -49,4 +50,4 @@ returned attributes - they are identical for all attributes, except the `tagsAll * `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_cluster_parameter_group.html.markdown b/website/docs/cdktf/typescript/d/rds_cluster_parameter_group.html.markdown index eb279846ebc3..1de260f8d71e 100644 --- a/website/docs/cdktf/typescript/d/rds_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_cluster_parameter_group.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) DB cluster parameter group name. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `family` - Family of the cluster parameter group. * `description` - Description of the cluster parameter group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_clusters.html.markdown b/website/docs/cdktf/typescript/d/rds_clusters.html.markdown index f276e6a00856..b717a29759f1 100644 --- a/website/docs/cdktf/typescript/d/rds_clusters.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_clusters.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration block @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `clusterArns` - Set of cluster ARNs of the matched RDS clusters. * `clusterIdentifiers` - Set of ARNs of cluster identifiers of the matched RDS clusters. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_engine_version.html.markdown b/website/docs/cdktf/typescript/d/rds_engine_version.html.markdown index 99a1e76f8264..5500ca465ae4 100644 --- a/website/docs/cdktf/typescript/d/rds_engine_version.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_engine_version.html.markdown @@ -75,6 +75,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultOnly` - (Optional) Whether the engine version must be an AWS-defined default version. Some engines have multiple default versions, such as for each major version. Using `defaultOnly` may help avoid `multiple RDS engine versions` errors. See also `latest`. * `filter` - (Optional) One or more name/value pairs to use in filtering versions. There are several valid keys; for a full reference, check out [describe-db-engine-versions in the AWS CLI reference](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-engine-versions.html). * `hasMajorTarget` - (Optional) Whether the engine version must have one or more major upgrade targets. Not including `hasMajorTarget` or setting it to `false` doesn't imply that there's no corresponding major upgrade target for the engine version. @@ -99,11 +100,11 @@ This data source exports the following attributes in addition to the arguments a * `supportedFeatureNames` - Set of features supported by the engine version. * `supportedModes` - Set of supported engine version modes. * `supportedTimezones` - Set of the time zones supported by the engine version. -* `supports_certificate_rotation_without_restart` - Whether the certificates can be rotated without restarting the Aurora instance. +* `supportsCertificateRotationWithoutRestart` - Whether the certificates can be rotated without restarting the Aurora instance. * `supportsGlobalDatabases` - Whether you can use Aurora global databases with the engine version. -* `supports_integrations` - Whether the engine version supports integrations with other AWS services. +* `supportsIntegrations` - Whether the engine version supports integrations with other AWS services. * `supportsLogExportsToCloudwatch` - Whether the engine version supports exporting the log types specified by `exportableLogTypes` to CloudWatch Logs. -* `supports_local_write_forwarding` - Whether the engine version supports local write forwarding or not. +* `supportsLocalWriteForwarding` - Whether the engine version supports local write forwarding or not. * `supportsLimitlessDatabase` - Whether the engine version supports Aurora Limitless Database. * `supportsParallelQuery` - Whether you can use Aurora parallel query with the engine version. * `supportsReadReplica` - Whether the engine version supports read replicas. @@ -113,4 +114,4 @@ This data source exports the following attributes in addition to the arguments a * `versionActual` - Complete engine version. * `versionDescription` - Description of the engine version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_orderable_db_instance.html.markdown b/website/docs/cdktf/typescript/d/rds_orderable_db_instance.html.markdown index eafd6fe07bc9..2a473c878ed1 100644 --- a/website/docs/cdktf/typescript/d/rds_orderable_db_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_orderable_db_instance.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZoneGroup` - (Optional) Availability zone group. * `engineLatestVersion` - (Optional) When set to `true`, the data source attempts to return the most recent version matching the other criteria you provide. You must use `engineLatestVersion` with `preferredInstanceClasses` and/or `preferredEngineVersions`. Using `engineLatestVersion` will avoid `multiple RDS DB Instance Classes` errors. If you use `engineLatestVersion` with `preferredInstanceClasses`, the data source returns the latest version for the _first_ matching instance class (instance class priority). **Note:** The data source uses a best-effort approach at selecting the latest version but due to the complexity of version identifiers across engines, using `engineLatestVersion` may _not_ return the latest version in every situation. * `engineVersion` - (Optional) Version of the DB engine. If none is provided, the data source tries to use the AWS-defined default version that matches any other criteria. @@ -105,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a * `multiAzCapable` - Whether a DB instance is Multi-AZ capable. * `outpostCapable` - Whether a DB instance supports RDS on Outposts. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/rds_reserved_instance_offering.html.markdown b/website/docs/cdktf/typescript/d/rds_reserved_instance_offering.html.markdown index 22ecfbe2db97..3b9265f4275c 100644 --- a/website/docs/cdktf/typescript/d/rds_reserved_instance_offering.html.markdown +++ b/website/docs/cdktf/typescript/d/rds_reserved_instance_offering.html.markdown @@ -42,11 +42,12 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbInstanceClass` - (Required) DB instance class for the reserved DB instance. * `duration` - (Required) Duration of the reservation in years or seconds. Valid values are `1`, `3`, `31536000`, `94608000` * `multiAz` - (Required) Whether the reservation applies to Multi-AZ deployments. * `offeringType` - (Required) Offering type of this reserved DB instance. Valid values are `No Upfront`, `Partial Upfront`, `All Upfront`. -* `productDescription` - (Required) Description of the reserved DB instance. +* `productDescription` - (Required) Description of the reserved DB instance. Example values are `postgresql`, `aurora-postgresql`, `mysql`, `aurora-mysql`, `mariadb`. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `fixedPrice` - Fixed price charged for this reserved DB instance. * `offeringId` - Unique identifier for the reservation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshift_cluster.html.markdown b/website/docs/cdktf/typescript/d/redshift_cluster.html.markdown index 4832fe3a7865..970157af9c91 100644 --- a/website/docs/cdktf/typescript/d/redshift_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/redshift_cluster.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required) Cluster identifier ## Attribute Reference @@ -116,4 +117,4 @@ Cluster nodes (for `clusterNodes`) support the following attributes: * `privateIpAddress` - Private IP address of a node within a cluster * `publicIpAddress` - Public IP address of a node within a cluster - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshift_cluster_credentials.html.markdown b/website/docs/cdktf/typescript/d/redshift_cluster_credentials.html.markdown index 7a86635b4ff1..2549a9531109 100644 --- a/website/docs/cdktf/typescript/d/redshift_cluster_credentials.html.markdown +++ b/website/docs/cdktf/typescript/d/redshift_cluster_credentials.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoCreate` - (Optional) Create a database user with the name specified for the user named in `dbUser` if one does not exist. * `clusterIdentifier` - (Required) Unique identifier of the cluster that contains the database for which your are requesting credentials. * `dbName` - (Optional) Name of a database that DbUser is authorized to log on to. If `dbName` is not specified, `dbUser` can log on to any existing database. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `dbPassword` - Temporary password that authorizes the user name returned by `dbUser` to log on to the database `dbName`. * `expiration` - Date and time the password in `dbPassword` expires. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshift_data_shares.html.markdown b/website/docs/cdktf/typescript/d/redshift_data_shares.html.markdown index 5d4043fe33ca..8c6e41787c48 100644 --- a/website/docs/cdktf/typescript/d/redshift_data_shares.html.markdown +++ b/website/docs/cdktf/typescript/d/redshift_data_shares.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -51,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `managedBy` - Identifier of a datashare to show its managing entity. * `producerArn` - ARN (Amazon Resource Name) of the producer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshift_orderable_cluster.html.markdown b/website/docs/cdktf/typescript/d/redshift_orderable_cluster.html.markdown index 75d17f87c219..0669192ed20c 100644 --- a/website/docs/cdktf/typescript/d/redshift_orderable_cluster.html.markdown +++ b/website/docs/cdktf/typescript/d/redshift_orderable_cluster.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterType` - (Optional) Reshift Cluster typeE.g., `multi-node` or `single-node` * `clusterVersion` - (Optional) Redshift Cluster versionE.g., `1.0` * `nodeType` - (Optional) Redshift Cluster node typeE.g., `dc2.8xlarge` @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `availabilityZones` - List of Availability Zone names where the Redshift Cluster is available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshift_producer_data_shares.html.markdown b/website/docs/cdktf/typescript/d/redshift_producer_data_shares.html.markdown index 33b6fd97b1ec..d62d4d3d004a 100644 --- a/website/docs/cdktf/typescript/d/redshift_producer_data_shares.html.markdown +++ b/website/docs/cdktf/typescript/d/redshift_producer_data_shares.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `status` - (Optional) Status of a datashare in the producer. Valid values are `ACTIVE`, `AUTHORIZED`, `PENDING_AUTHORIZATION`, `DEAUTHORIZED`, and `REJECTED`. Omit this argument to return all statuses. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `managedBy` - Identifier of a datashare to show its managing entity. * `producerArn` - ARN (Amazon Resource Name) of the producer. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshift_subnet_group.html.markdown b/website/docs/cdktf/typescript/d/redshift_subnet_group.html.markdown index 66ad35a229e7..c9135a1bfba1 100644 --- a/website/docs/cdktf/typescript/d/redshift_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/d/redshift_subnet_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the cluster subnet group for which information is requested. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - An array of VPC subnet IDs. * `tags` - Tags associated to the Subnet Group - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshiftserverless_credentials.html.markdown b/website/docs/cdktf/typescript/d/redshiftserverless_credentials.html.markdown index 96342e92b1b3..09a86cdee0b4 100644 --- a/website/docs/cdktf/typescript/d/redshiftserverless_credentials.html.markdown +++ b/website/docs/cdktf/typescript/d/redshiftserverless_credentials.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workgroupName` - (Required) The name of the workgroup associated with the database. * `dbName` - (Optional) The name of the database to get temporary authorization to log on to. * `durationSeconds` - (Optional) The number of seconds until the returned temporary password expires. The minimum is 900 seconds, and the maximum is 3600 seconds. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `dbUser` - A database user name that is authorized to log on to the database `dbName` using the password `dbPassword` . If the specified `dbUser` exists in the database, the new user name has the same database privileges as the user named in `dbUser` . By default, the user is added to PUBLIC. the user doesn't exist in the database. * `expiration` - Date and time the password in `dbPassword` expires. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshiftserverless_namespace.html.markdown b/website/docs/cdktf/typescript/d/redshiftserverless_namespace.html.markdown index 5f9a97fdaf78..7a56665b434d 100644 --- a/website/docs/cdktf/typescript/d/redshiftserverless_namespace.html.markdown +++ b/website/docs/cdktf/typescript/d/redshiftserverless_namespace.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namespaceName` - (Required) The name of the namespace. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `logExports` - The types of logs the namespace can export. Available export types are `userlog`, `connectionlog`, and `useractivitylog`. * `namespaceId` - The Redshift Namespace ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/redshiftserverless_workgroup.html.markdown b/website/docs/cdktf/typescript/d/redshiftserverless_workgroup.html.markdown index 439f9c3253b4..fb1857f1ed34 100644 --- a/website/docs/cdktf/typescript/d/redshiftserverless_workgroup.html.markdown +++ b/website/docs/cdktf/typescript/d/redshiftserverless_workgroup.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workgroupName` - (Required) The name of the workgroup associated with the database. ## Attribute Reference @@ -55,7 +56,7 @@ This data source exports the following attributes in addition to the arguments a * `publiclyAccessible` - A value that specifies whether the workgroup can be accessed from a public network. * `securityGroupIds` - An array of security group IDs to associate with the workgroup. * `subnetIds` - An array of VPC subnet IDs to associate with the workgroup. When set, must contain at least three subnets spanning three Availability Zones. A minimum number of IP addresses is required and scales with the Base Capacity. For more information, see the following [AWS document](https://docs.aws.amazon.com/redshift/latest/mgmt/serverless-known-issues.html). -* `track_name` - The name of the track for the workgroup. +* `trackName` - The name of the track for the workgroup. * `workgroupId` - The Redshift Workgroup ID. ### Endpoint @@ -77,4 +78,4 @@ This data source exports the following attributes in addition to the arguments a * `privateIpAddress` - The IPv4 address of the network interface within the subnet. * `subnetId` - The unique identifier of the subnet. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/region.html.markdown b/website/docs/cdktf/typescript/d/region.html.markdown index ba6372c44c29..827b06006f01 100644 --- a/website/docs/cdktf/typescript/d/region.html.markdown +++ b/website/docs/cdktf/typescript/d/region.html.markdown @@ -3,24 +3,24 @@ subcategory: "Meta Data Sources" layout: "aws" page_title: "AWS: aws_region" description: |- - Provides details about a specific service region + Provides details about a specific AWS Region --- # Data Source: aws_region -`awsRegion` provides details about a specific AWS region. +`awsRegion` provides details about a specific AWS Region. -As well as validating a given region name this resource can be used to -discover the name of the region configured within the provider. The latter +As well as validating a given Region name this resource can be used to +discover the name of the Region configured within the provider. The latter can be useful in a child module which is inheriting an AWS provider configuration from its parent module. ## Example Usage The following example shows how the resource might be used to obtain -the name of the AWS region configured on the provider. +the name of the AWS Region configured on the provider. ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -44,8 +44,9 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `name` - (Optional) Full name of the region to select. +* `region` - (Optional) Full name of the region to select (e.g. `us-east-1`), and the region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint` - (Optional) EC2 endpoint of the region to select. +* `name` - (Optional, **Deprecated**) Full name of the region to select. Use `region` instead. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Region's description in this format: "Location (Region name)". - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/resourceexplorer2_search.html.markdown b/website/docs/cdktf/typescript/d/resourceexplorer2_search.html.markdown index 514f3bcdef45..ee7cc6657718 100644 --- a/website/docs/cdktf/typescript/d/resourceexplorer2_search.html.markdown +++ b/website/docs/cdktf/typescript/d/resourceexplorer2_search.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `viewArn` - (Optional) Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the AWS Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a `401 Unauthorized` exception. ## Attribute Reference @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a * `last_reported_at` - The date and time that the information about this resource property was last updated. * `name` - Name of this property of the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/resourcegroupstaggingapi_resources.html.markdown b/website/docs/cdktf/typescript/d/resourcegroupstaggingapi_resources.html.markdown index 089ffc71535e..170a6a92488c 100644 --- a/website/docs/cdktf/typescript/d/resourcegroupstaggingapi_resources.html.markdown +++ b/website/docs/cdktf/typescript/d/resourcegroupstaggingapi_resources.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `excludeCompliantResources` - (Optional) Specifies whether to exclude resources that are compliant with the tag policy. You can use this parameter only if the `includeComplianceDetails` argument is also set to `true`. * `includeComplianceDetails` - (Optional) Specifies whether to include details regarding the compliance with the effective tag policy. * `tagFilter` - (Optional) Specifies a list of Tag Filters (keys and values) to restrict the output to only those resources that have the specified tag and, if included, the specified value. See [Tag Filter](#tag-filter) below. Conflicts with `resourceArnList`. @@ -115,4 +116,4 @@ This data source exports the following attributes in addition to the arguments a * `resourceArn` - ARN of the resource. * `tags` - Map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route.html.markdown b/website/docs/cdktf/typescript/d/route.html.markdown index 65d66737e2bf..e1c7212e91c0 100644 --- a/website/docs/cdktf/typescript/d/route.html.markdown +++ b/website/docs/cdktf/typescript/d/route.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `routeTableId` - (Required) ID of the specific Route Table containing the Route entry. * `carrierGatewayId` - (Optional) EC2 Carrier Gateway ID of the Route belonging to the Route Table. * `coreNetworkArn` - (Optional) Core network ARN of the Route belonging to the Route Table. @@ -81,4 +82,4 @@ This data source exports no additional attributes. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_endpoint.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_endpoint.html.markdown index bea51b59c538..04de2d49fdda 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_endpoint.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolverEndpointId` - (Optional) ID of the Route53 Resolver Endpoint. * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out @@ -84,4 +85,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_Filter.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_firewall_config.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_firewall_config.html.markdown index 25cfaeee2858..5d5c33ba0cb5 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_firewall_config.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_firewall_config.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) The ID of the VPC from Amazon VPC that the configuration is for. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - The ID of the firewall configuration. * `ownerId` - The Amazon Web Services account ID of the owner of the VPC that this firewall configuration applies to. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_firewall_domain_list.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_firewall_domain_list.html.markdown index a483662c7706..e307f3c465f9 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_firewall_domain_list.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_firewall_domain_list.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewallDomainListId` - (Required) The ID of the domain list. ## Attribute Reference @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The status of the domain list. * `statusMessage` - Additional information about the status of the list, if available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group.html.markdown index 336099f3a82c..2130f537b102 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewallRuleGroupId` - (Required) The ID of the rule group. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - The status of the rule group. * `statusMessage` - Additional information about the status of the rule group, if available. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group_association.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group_association.html.markdown index 49a21e73c08b..f913acfce132 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group_association.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_firewall_rule_group_association.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewallRuleGroupAssociationId` - (Required) The identifier for the association. ## Attribute Reference @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `statusMessage` - Additional information about the status of the response, if available. * `vpcId` - The unique identifier of the VPC that is associated with the rule group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_firewall_rules.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_firewall_rules.html.markdown index 03d9d3894cf3..b5d9462e292d 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_firewall_rules.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_firewall_rules.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewallRuleGroupId` - (Required) The unique identifier of the firewall rule group that you want to retrieve the rules for. * `action` - (Optional) The action that DNS Firewall should take on a DNS query when it matches one of the domains in the rule's domain list. * `priority` - (Optional) The setting that determines the processing order of the rules in a rule group. @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `modificationTime` - The date and time that the rule was last modified, in Unix time format and Coordinated Universal Time (UTC). * `name` - The name of the rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_query_log_config.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_query_log_config.html.markdown index c43c18b20b36..f33713f257aa 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_query_log_config.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_query_log_config.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolverQueryLogConfigId` - (Optional) ID of the Route53 Resolver Query Logging Configuration. * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out @@ -86,4 +87,4 @@ This data source exports the following attributes in addition to the arguments a [1]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_Filter.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_rule.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_rule.html.markdown index bcaac3873dfd..3af8af3dd23c 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_rule.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_rule.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Optional) Domain name the desired resolver rule forwards DNS queries for. Conflicts with `resolverRuleId`. * `name` - (Optional) Friendly name of the desired resolver rule. Conflicts with `resolverRuleId`. * `resolverEndpointId` (Optional) ID of the outbound resolver endpoint of the desired resolver rule. Conflicts with `resolverRuleId`. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a Values are `NOT_SHARED`, `SHARED_BY_ME` or `SHARED_WITH_ME` * `tags` - Map of tags assigned to the resolver rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_resolver_rules.html.markdown b/website/docs/cdktf/typescript/d/route53_resolver_rules.html.markdown index f7be786f67c0..e61cca7bb75e 100644 --- a/website/docs/cdktf/typescript/d/route53_resolver_rules.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_resolver_rules.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `nameRegex` - (Optional) Regex string to filter resolver rule names. The filtering is done locally, so could have a performance impact if the result is large. This argument should be used along with other arguments to limit the number of results returned. @@ -104,4 +105,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `resolverRuleIds` - IDs of the matched resolver rules. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_traffic_policy_document.html.markdown b/website/docs/cdktf/typescript/d/route53_traffic_policy_document.html.markdown index f0eabb202089..f3680edf0da6 100644 --- a/website/docs/cdktf/typescript/d/route53_traffic_policy_document.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_traffic_policy_document.html.markdown @@ -36,11 +36,11 @@ class MyConvertedCode extends TerraformStack { { id: "my_elb", type: "elastic-load-balancer", - value: "elb-111111.${" + current.name + "}.elb.amazonaws.com", + value: "elb-111111.${" + current.region + "}.elb.amazonaws.com", }, { id: "site_down_banner", - region: Token.asString(current.name), + region: Token.asString(current.region), type: "s3-website", value: "www.example.com", }, @@ -248,4 +248,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - Standard JSON policy document rendered based on the arguments above. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53profiles_profiles.html.markdown b/website/docs/cdktf/typescript/d/route53profiles_profiles.html.markdown index aff1446c6d74..6721b0421daf 100644 --- a/website/docs/cdktf/typescript/d/route53profiles_profiles.html.markdown +++ b/website/docs/cdktf/typescript/d/route53profiles_profiles.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -48,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the Profile. * `shareStatus` - Share status of the Profile. Valid values [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53profiles_Profile.html) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route_table.html.markdown b/website/docs/cdktf/typescript/d/route_table.html.markdown index 039a9f4968f0..90ab925f9e6f 100644 --- a/website/docs/cdktf/typescript/d/route_table.html.markdown +++ b/website/docs/cdktf/typescript/d/route_table.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block. Detailed below. * `gatewayId` - (Optional) ID of an Internet Gateway or Virtual Private Gateway which is connected to the Route Table (not exported if not passed as a parameter). * `routeTableId` - (Optional) ID of the specific Route Table to retrieve. @@ -116,4 +117,4 @@ Associations are also exported with the following attributes: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route_tables.html.markdown b/website/docs/cdktf/typescript/d/route_tables.html.markdown index d27be277b929..7eea94337770 100644 --- a/website/docs/cdktf/typescript/d/route_tables.html.markdown +++ b/website/docs/cdktf/typescript/d/route_tables.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `vpcId` - (Optional) VPC ID that you want to filter from. * `tags` - (Optional) Map of tags, each pair of which must exactly match @@ -89,4 +90,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_access_point.html.markdown b/website/docs/cdktf/typescript/d/s3_access_point.html.markdown new file mode 100644 index 000000000000..73e4220023f1 --- /dev/null +++ b/website/docs/cdktf/typescript/d/s3_access_point.html.markdown @@ -0,0 +1,66 @@ +--- +subcategory: "S3 Control" +layout: "aws" +page_title: "AWS: aws_s3_access_point" +description: |- + Provides details about a specific S3 access point +--- + + + +# Data Source: aws_s3_access_point + +Provides details about a specific S3 access point. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsS3AccessPoint } from "./.gen/providers/aws/data-aws-s3-access-point"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsS3AccessPoint(this, "example", { + name: "example-access-point", + }); + } +} + +``` + +## Argument Reference + +This data source supports the following arguments: + +* `accountId` - (Optional) AWS account ID for the account that owns the specified access point. +* `name` - (Required) Name of the access point. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `alias` - Access point alias. +* `arn` - Access point ARN. +* `bucket` - Name of the bucket associated with the access point. +* `bucketAccountId` - AWS account ID associated with the S3 bucket associated with the access point. +* `dataSourceId` - Unique identifier for the data source of the access point. +* `dataSourceType` - Type of the data source that the access point is attached to. +* `endpoints` - VPC endpoint for the access point. +* `networkOrigin` - Indicates whether the access point allows access from the public Internet. +* `publicAccessBlockConfiguration` - `PublicAccessBlock` configuration for the access point. + * `blockPublicAcls` - Whether Amazon S3 blocks public ACLs for buckets in this account. + * `blockPublicPolicy` - Whether Amazon S3 blocks public bucket policies for buckets in this account. + * `ignorePublicAcls` - Whether Amazon S3 ignores public ACLs for buckets in this account. + * `restrictPublicBuckets` - Whether Amazon S3 restricts public bucket policies for buckets in this account. +* `tags` - Tags assigned to the access point. +* `vpcConfiguration` - VPC configuration for the access point. + * `vpcId` - Access point will only allow connections from this VPC. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_bucket.html.markdown b/website/docs/cdktf/typescript/d/s3_bucket.html.markdown index 07c822cb014b..e7c3e4bfea5a 100644 --- a/website/docs/cdktf/typescript/d/s3_bucket.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_bucket.html.markdown @@ -102,6 +102,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket ## Attribute Reference @@ -111,10 +112,10 @@ This data source exports the following attributes in addition to the arguments a * `id` - Name of the bucket. * `arn` - ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucketDomainName` - Bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. +* `bucketRegion` - AWS region this bucket resides in. * `bucketRegionalDomainName` - The bucket region-specific domain name. The bucket domain name including the region name. Please refer to the [S3 endpoints reference](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) for format. Note: AWS CloudFront allows specifying an S3 region-specific endpoint when creating an S3 origin. This will prevent redirect issues from CloudFront to the S3 Origin URL. For more information, see the [Virtual Hosted-Style Requests for Other Regions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#deprecated-global-endpoint) section in the AWS S3 User Guide. * `hostedZoneId` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `region` - AWS region this bucket resides in. * `websiteEndpoint` - Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. * `websiteDomain` - Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_bucket_object.html.markdown b/website/docs/cdktf/typescript/d/s3_bucket_object.html.markdown index 13886f5fb6ea..260f07f96c4a 100644 --- a/website/docs/cdktf/typescript/d/s3_bucket_object.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_bucket_object.html.markdown @@ -106,6 +106,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `key` - (Required) Full path to the object inside the bucket * `versionId` - (Optional) Specific version ID of the object returned (defaults to latest version) @@ -139,4 +140,4 @@ This data source exports the following attributes in addition to the arguments a -> **Note:** Terraform ignores all leading `/`s in the object's `key` and treats multiple `/`s in the rest of the object's `key` as a single `/`, so values of `/index.html` and `index.html` correspond to the same S3 object as do `first//second///third//` and `first/second/third/`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_bucket_objects.html.markdown b/website/docs/cdktf/typescript/d/s3_bucket_objects.html.markdown index d441a01c5d95..f14245de7ae9 100644 --- a/website/docs/cdktf/typescript/d/s3_bucket_objects.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_bucket_objects.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Lists object keys in this S3 bucket. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `prefix` - (Optional) Limits results to object keys with this prefix (Default: none) * `delimiter` - (Optional) Character used to group keys (Default: none) @@ -76,4 +77,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - S3 Bucket. * `owners` - List of strings representing object owner IDs (see `fetchOwner` above) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_bucket_policy.html.markdown b/website/docs/cdktf/typescript/d/s3_bucket_policy.html.markdown index ed177d4e512b..bfc354e8f49e 100644 --- a/website/docs/cdktf/typescript/d/s3_bucket_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_bucket_policy.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Bucket name. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - IAM bucket policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_directory_buckets.html.markdown b/website/docs/cdktf/typescript/d/s3_directory_buckets.html.markdown index 7015e73ba2bf..3ff88e5359a8 100644 --- a/website/docs/cdktf/typescript/d/s3_directory_buckets.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_directory_buckets.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -43,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Bucket ARNs. * `buckets` - Buckets names. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_object.html.markdown b/website/docs/cdktf/typescript/d/s3_object.html.markdown index 9afac57722aa..7bd00f658d39 100644 --- a/website/docs/cdktf/typescript/d/s3_object.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_object.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `checksumMode` - (Optional) To retrieve the object's checksum, this argument must be `ENABLED`. If you enable `checksumMode` and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `ENABLED` * `key` - (Required) Full path to the object inside the bucket @@ -141,4 +142,4 @@ This data source exports the following attributes in addition to the arguments a -> **Note:** Terraform ignores all leading `/`s in the object's `key` and treats multiple `/`s in the rest of the object's `key` as a single `/`, so values of `/index.html` and `index.html` correspond to the same S3 object as do `first//second///third//` and `first/second/third/`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3_objects.html.markdown b/website/docs/cdktf/typescript/d/s3_objects.html.markdown index 740ecdf8da2d..b629beb21e31 100644 --- a/website/docs/cdktf/typescript/d/s3_objects.html.markdown +++ b/website/docs/cdktf/typescript/d/s3_objects.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Lists object keys in this S3 bucket. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified * `prefix` - (Optional) Limits results to object keys with this prefix (Default: none) * `delimiter` - (Optional) Character used to group keys (Default: none) @@ -76,4 +77,4 @@ This data source exports the following attributes in addition to the arguments a * `owners` - List of strings representing object owner IDs (see `fetchOwner` above) * `requestCharged` - If present, indicates that the requester was successfully charged for the request. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/s3control_multi_region_access_point.html.markdown b/website/docs/cdktf/typescript/d/s3control_multi_region_access_point.html.markdown index e89bf3a5c87b..63b0b84a8a69 100644 --- a/website/docs/cdktf/typescript/d/s3control_multi_region_access_point.html.markdown +++ b/website/docs/cdktf/typescript/d/s3control_multi_region_access_point.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID of the S3 Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `name` - (Required) The name of the Multi-Region Access Point. @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `bucketAccountId` - The AWS account ID that owns the bucket. * `region` - The name of the region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sagemaker_prebuilt_ecr_image.html.markdown b/website/docs/cdktf/typescript/d/sagemaker_prebuilt_ecr_image.html.markdown index 5f0621e9605c..543ea4dbd907 100644 --- a/website/docs/cdktf/typescript/d/sagemaker_prebuilt_ecr_image.html.markdown +++ b/website/docs/cdktf/typescript/d/sagemaker_prebuilt_ecr_image.html.markdown @@ -46,7 +46,7 @@ This data source supports the following arguments: * `repositoryName` - (Required) Name of the repository, which is generally the algorithm or library. Values include `autogluon-inference`, `autogluon-training`, `blazingtext`, `djl-inference`, `factorization-machines`, `forecasting-deepar`, `huggingface-pytorch-inference`, `huggingface-pytorch-inference-neuron`, `huggingface-pytorch-inference-neuronx`, `huggingface-pytorch-tgi-inference`, `huggingface-pytorch-training`, `huggingface-pytorch-training-neuronx`, `huggingface-pytorch-trcomp-training`, `huggingface-tensorflow-inference`, `huggingface-tensorflow-training`, `huggingface-tensorflow-trcomp-training`, `image-classification`, `image-classification-neo`, `ipinsights`, `kmeans`, `knn`, `lda`, `linear-learner`, `mxnet-inference`, `mxnet-inference-eia`, `mxnet-training`, `ntm`, `object-detection`, `object2vec`, `pca`, `pytorch-inference`, `pytorch-inference-eia`, `pytorch-inference-graviton`, `pytorch-inference-neuronx`, `pytorch-training`, `pytorch-training-neuronx`, `pytorch-trcomp-training`, `randomcutforest`, `sagemaker-base-python`, `sagemaker-chainer`, `sagemaker-clarify-processing`, `sagemaker-data-wrangler-container`, `sagemaker-debugger-rules`, `sagemaker-geospatial-v1-0`, `sagemaker-inference-mxnet`, `sagemaker-inference-pytorch`, `sagemaker-inference-tensorflow`, `sagemaker-model-monitor-analyzer`, `sagemaker-mxnet`, `sagemaker-mxnet-eia`, `sagemaker-mxnet-serving`, `sagemaker-mxnet-serving-eia`, `sagemaker-neo-mxnet`, `sagemaker-neo-pytorch`, `sagemaker-neo-tensorflow`, `sagemaker-pytorch`, `sagemaker-rl-coach-container`, `sagemaker-rl-mxnet`, `sagemaker-rl-ray-container`, `sagemaker-rl-tensorflow`, `sagemaker-rl-vw-container`, `sagemaker-scikit-learn`, `sagemaker-spark-processing`, `sagemaker-sparkml-serving`, `sagemaker-tensorflow`, `sagemaker-tensorflow-eia`, `sagemaker-tensorflow-scriptmode`, `sagemaker-tensorflow-serving`, `sagemaker-tensorflow-serving-eia`, `sagemaker-tritonserver`, `sagemaker-xgboost`, `semantic-segmentation`, `seq2seq`, `stabilityai-pytorch-inference`, `tei`, `tei-cpu`, `tensorflow-inference`, `tensorflow-inference-eia`, `tensorflow-inference-graviton`, `tensorflow-training`, and `xgboost-neo`. * `dnsSuffix` - (Optional) DNS suffix to use in the registry path. If not specified, the AWS provider sets it to the DNS suffix for the current region. * `imageTag` - (Optional) Image tag for the Docker image. If not specified, the AWS provider sets the value to `1`, which for many repositories indicates the latest version. Some repositories, such as XGBoost, do not support `1` or `latest` and specific version must be used. -* `region` (Optional) - Region to use in the registry path. If not specified, the AWS provider sets it to the current region. +* `region` - (Optional) Region to use in the registry path. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -55,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `registryId` - Account ID containing the image. For example, `469771592824`. * `registryPath` - Docker image URL. For example, `341280168497.dkr.ecr.ca-central-1.amazonaws.com/sagemaker-sparkml-serving:2.4`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/secretsmanager_random_password.html.markdown b/website/docs/cdktf/typescript/d/secretsmanager_random_password.html.markdown index 91973e88215d..eceb07dfe788 100644 --- a/website/docs/cdktf/typescript/d/secretsmanager_random_password.html.markdown +++ b/website/docs/cdktf/typescript/d/secretsmanager_random_password.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `excludeCharacters` - (Optional) String of the characters that you don't want in the password. * `excludeLowercase` - (Optional) Specifies whether to exclude lowercase letters from the password. * `excludeNumbers` - (Optional) Specifies whether to exclude numbers from the password. @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `randomPassword` - Random password. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/secretsmanager_secret.html.markdown b/website/docs/cdktf/typescript/d/secretsmanager_secret.html.markdown index 947152475f6b..57c9dc08172f 100644 --- a/website/docs/cdktf/typescript/d/secretsmanager_secret.html.markdown +++ b/website/docs/cdktf/typescript/d/secretsmanager_secret.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the secret to retrieve. * `name` - (Optional) Name of the secret to retrieve. @@ -78,4 +79,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - Resource-based policy document that's attached to the secret. * `tags` - Tags of the secret. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/secretsmanager_secret_rotation.html.markdown b/website/docs/cdktf/typescript/d/secretsmanager_secret_rotation.html.markdown index b33a528404b9..ea26dd941e72 100644 --- a/website/docs/cdktf/typescript/d/secretsmanager_secret_rotation.html.markdown +++ b/website/docs/cdktf/typescript/d/secretsmanager_secret_rotation.html.markdown @@ -40,14 +40,21 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secretId` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `rotationEnabled` - ARN of the secret. -* `rotationLambdaArn` - Decrypted part of the protected secret information that was originally provided as a string. -* `rotationRules` - Decrypted part of the protected secret information that was originally provided as a binary. Base64 encoded. +* `rotationEnabled` - Specifies whether automatic rotation is enabled for this secret. +* `rotationLambdaArn` - Amazon Resource Name (ARN) of the lambda function used for rotation. +* `rotationRules` - Configuration block for rotation rules. See [`rotationRules`](#rotation_rules) below. - \ No newline at end of file +### rotation_rules + +* `automaticallyAfterDays` - Number of days between automatic scheduled rotations of the secret. +* `duration` - Length of the rotation window in hours. +* `scheduleExpression` - A `cron()` or `rate()` expression that defines the schedule for rotating the secret. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/secretsmanager_secret_version.html.markdown b/website/docs/cdktf/typescript/d/secretsmanager_secret_version.html.markdown index 06df95ae1d38..562a940cc53d 100644 --- a/website/docs/cdktf/typescript/d/secretsmanager_secret_version.html.markdown +++ b/website/docs/cdktf/typescript/d/secretsmanager_secret_version.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secretId` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. * `versionId` - (Optional) Specifies the unique identifier of the version of the secret that you want to retrieve. Overrides `versionStage`. * `versionStage` - (Optional) Specifies the secret version that you want to retrieve by the staging label attached to the version. Defaults to `AWSCURRENT`. @@ -102,4 +103,4 @@ This data source exports the following attributes in addition to the arguments a * `secretBinary` - Decrypted part of the protected secret information that was originally provided as a binary. * `versionId` - Unique identifier of this version of the secret. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/secretsmanager_secret_versions.html.markdown b/website/docs/cdktf/typescript/d/secretsmanager_secret_versions.html.markdown index 1b062d3690df..7d7a83204470 100644 --- a/website/docs/cdktf/typescript/d/secretsmanager_secret_versions.html.markdown +++ b/website/docs/cdktf/typescript/d/secretsmanager_secret_versions.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secretId` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. * `includeDeprecated` - (Optional) If true, all deprecated secret versions are included in the response. If false, no deprecated secret versions are included in the response. If no value is specified, the default value is `false`. @@ -106,4 +107,4 @@ This data source exports the following attributes in addition to the arguments a * `versionId` - Unique version identifier of this version of the secret. * `versionStages` - List of staging labels attached to the version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/secretsmanager_secrets.html.markdown b/website/docs/cdktf/typescript/d/secretsmanager_secrets.html.markdown index a29ec8979b32..1b221d1cf8ed 100644 --- a/website/docs/cdktf/typescript/d/secretsmanager_secrets.html.markdown +++ b/website/docs/cdktf/typescript/d/secretsmanager_secrets.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ## filter Configuration Block @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of ARNs of the matched Secrets Manager secrets. * `names` - Set of names of the matched Secrets Manager secrets. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/security_group.html.markdown b/website/docs/cdktf/typescript/d/security_group.html.markdown index ef00ded0eafa..4261b22d2b16 100644 --- a/website/docs/cdktf/typescript/d/security_group.html.markdown +++ b/website/docs/cdktf/typescript/d/security_group.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `id` - (Optional) Id of the specific security group to retrieve. * `name` - (Optional) Name that the desired security group must have. @@ -92,4 +93,4 @@ The following fields are also exported: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/security_groups.html.markdown b/website/docs/cdktf/typescript/d/security_groups.html.markdown index 149964abf65d..0e879821f522 100644 --- a/website/docs/cdktf/typescript/d/security_groups.html.markdown +++ b/website/docs/cdktf/typescript/d/security_groups.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match for desired security groups. * `filter` - (Optional) One or more name/value pairs to use as filters. There are several valid keys, for a full reference, check out [describe-security-groups in the AWS CLI reference][1]. @@ -90,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/securityhub_standards_control_associations.html.markdown b/website/docs/cdktf/typescript/d/securityhub_standards_control_associations.html.markdown index 12dc3b369d0c..66325958b372 100644 --- a/website/docs/cdktf/typescript/d/securityhub_standards_control_associations.html.markdown +++ b/website/docs/cdktf/typescript/d/securityhub_standards_control_associations.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_securityhub_standards_control_associations +# Data Source: aws_securityhub_standards_control_associations Terraform data source for managing an AWS Security Hub Standards Control Associations. @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityControlId` - (Required) The identifier of the control (identified with `SecurityControlId`, `SecurityControlArn`, or a mix of both parameters). ## Attribute Reference @@ -69,4 +70,4 @@ See [`standardsControlAssociations`](#standards_control_associations-attribute-r * `updatedAt` - Last time that a control's enablement status in a specified standard was updated. * `updatedReason` - Reason for updating a control's enablement status in a specified standard. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/serverlessapplicationrepository_application.html.markdown b/website/docs/cdktf/typescript/d/serverlessapplicationrepository_application.html.markdown index 7c6c03380fe9..549b9617de9e 100644 --- a/website/docs/cdktf/typescript/d/serverlessapplicationrepository_application.html.markdown +++ b/website/docs/cdktf/typescript/d/serverlessapplicationrepository_application.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ARN of the application. * `semanticVersion` - (Optional) Requested version of the application. By default, retrieves the latest version. @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `sourceCodeUrl` - URL pointing to the source code of the application version. * `templateUrl` - URL pointing to the Cloud Formation template for the application version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/service.html.markdown b/website/docs/cdktf/typescript/d/service.html.markdown index 6bc27a93537a..a63202fcc3f2 100644 --- a/website/docs/cdktf/typescript/d/service.html.markdown +++ b/website/docs/cdktf/typescript/d/service.html.markdown @@ -31,7 +31,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); const current = new DataAwsRegion(this, "current", {}); new DataAwsService(this, "test", { - region: Token.asString(current.name), + region: Token.asString(current.region), serviceId: "ec2", }); } @@ -88,8 +88,8 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: * `dnsName` - (Optional) DNS name of the service (_e.g.,_ `rds.us-east-1.amazonaws.com`). One of `dnsName`, `reverseDnsName`, or `serviceId` is required. -* `partition` - (Optional) Partition corresponding to the region. -* `region` - (Optional) Region of the service (_e.g.,_ `us-west-2`, `ap-northeast-1`). +* `partition` - (Optional) Partition corresponding to the Region. +* `region` - (Optional) Region of the service (_e.g.,_ `us-west-2`, `ap-northeast-1`). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `reverseDnsName` - (Optional) Reverse DNS name of the service (_e.g.,_ `com.amazonaws.us-west-2.s3`). One of `dnsName`, `reverseDnsName`, or `serviceId` is required. * `reverseDnsPrefix` - (Optional) Prefix of the service (_e.g.,_ `com.amazonaws` in AWS Commercial, `cn.com.amazonaws` in AWS China). * `serviceId` - (Optional) Service endpoint ID (_e.g.,_ `s3`, `rds`, `ec2`). One of `dnsName`, `reverseDnsName`, or `serviceId` is required. A service's endpoint ID can be found in the [_AWS General Reference_](https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html). @@ -100,4 +100,4 @@ This data source exports the following attributes in addition to the arguments a * `supported` - Whether the service is supported in the region's partition. New services may not be listed immediately as supported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/service_discovery_dns_namespace.html.markdown b/website/docs/cdktf/typescript/d/service_discovery_dns_namespace.html.markdown index a4f88100d2a9..84ae9709c191 100644 --- a/website/docs/cdktf/typescript/d/service_discovery_dns_namespace.html.markdown +++ b/website/docs/cdktf/typescript/d/service_discovery_dns_namespace.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the namespace. * `type` - (Required) Type of the namespace. Allowed values are `DNS_PUBLIC` or `DNS_PRIVATE`. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `hostedZone` - ID for the hosted zone that Amazon Route 53 creates when you create a namespace. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/service_discovery_http_namespace.html.markdown b/website/docs/cdktf/typescript/d/service_discovery_http_namespace.html.markdown index 46541c0ef092..874c263367fc 100644 --- a/website/docs/cdktf/typescript/d/service_discovery_http_namespace.html.markdown +++ b/website/docs/cdktf/typescript/d/service_discovery_http_namespace.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the http namespace. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `httpName` - Name of an HTTP namespace. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown b/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown index 1f701bdaeeee..c3de706abb7a 100644 --- a/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown +++ b/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the service. * `namespaceId` - (Required) ID of the namespace that the service belongs to. @@ -84,4 +85,4 @@ The `healthCheckCustomConfig` configuration block supports the following argumen * `failureThreshold` - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/service_principal.html.markdown b/website/docs/cdktf/typescript/d/service_principal.html.markdown index d7e2ae980388..04cc995b0f3a 100644 --- a/website/docs/cdktf/typescript/d/service_principal.html.markdown +++ b/website/docs/cdktf/typescript/d/service_principal.html.markdown @@ -43,16 +43,15 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: * `serviceName` - (Required) Name of the service you want to generate a Service Principal Name for. -* `region` - (Optional) Region you'd like the SPN for. By default, uses the current region. +* `region` - (Optional) Region you'd like the SPN for. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `id` - Identifier of the current Service Principal (compound of service, region and suffix). (e.g. `logs.us-east-1.amazonaws.com`in AWS Commercial, `logs.cn-north-1.amazonaws.com.cn` in AWS China). +* `id` - Identifier of the current Service Principal (compound of service, Region and suffix). (e.g. `logs.us-east-1.amazonaws.com`in AWS Commercial, `logs.cn-north-1.amazonaws.com.cn` in AWS China). * `name` - Service Principal Name (e.g., `logs.amazonaws.com` in AWS Commercial, `logs.amazonaws.com.cn` in AWS China). * `service` - Service used for SPN generation (e.g. `logs`). * `suffix` - Suffix of the SPN (e.g., `amazonaws.com` in AWS Commercial, `amazonaws.com.cn` in AWS China). -*`region` - Region identifier of the generated SPN (e.g., `us-east-1` in AWS Commercial, `cn-north-1` in AWS China). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalog_constraint.html.markdown b/website/docs/cdktf/typescript/d/servicecatalog_constraint.html.markdown index a774f69706de..11065c16c4df 100644 --- a/website/docs/cdktf/typescript/d/servicecatalog_constraint.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalog_constraint.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Constraint status. * `type` - Type of constraint. Valid values are `LAUNCH`, `NOTIFICATION`, `RESOURCE_UPDATE`, `STACKSET`, and `TEMPLATE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalog_launch_paths.html.markdown b/website/docs/cdktf/typescript/d/servicecatalog_launch_paths.html.markdown index 277ef6d6e8d5..85836d110985 100644 --- a/website/docs/cdktf/typescript/d/servicecatalog_launch_paths.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalog_launch_paths.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the constraint. * `type` - Type of constraint. Valid values are `LAUNCH`, `NOTIFICATION`, `STACKSET`, and `TEMPLATE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalog_portfolio.html.markdown b/website/docs/cdktf/typescript/d/servicecatalog_portfolio.html.markdown index 9d7ba0c3bb45..2b4351f9552b 100644 --- a/website/docs/cdktf/typescript/d/servicecatalog_portfolio.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalog_portfolio.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `providerName` - Name of the person or organization who owns the portfolio. * `tags` - Tags applied to the portfolio. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalog_portfolio_constraints.html.markdown b/website/docs/cdktf/typescript/d/servicecatalog_portfolio_constraints.html.markdown index d1a484fcad9e..80ce356c7b70 100644 --- a/website/docs/cdktf/typescript/d/servicecatalog_portfolio_constraints.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalog_portfolio_constraints.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `productId` - (Optional) Product identifier. @@ -61,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `productId` - Identifier of the product the constraint applies to. A constraint applies to a specific instance of a product within a certain portfolio. * `type` - Type of constraint. Valid values are `LAUNCH`, `NOTIFICATION`, `STACKSET`, and `TEMPLATE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalog_product.html.markdown b/website/docs/cdktf/typescript/d/servicecatalog_product.html.markdown index 9935e6f35b67..f16a022cf741 100644 --- a/website/docs/cdktf/typescript/d/servicecatalog_product.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalog_product.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values are `en` (English), `jp` (Japanese), `zh` (Chinese). The default value is `en`. ## Attribute Reference @@ -66,4 +67,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Tags applied to the product. * `type` - Type of product. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalog_provisioning_artifacts.html.markdown b/website/docs/cdktf/typescript/d/servicecatalog_provisioning_artifacts.html.markdown index f16c6f666688..7f1a3221bcb0 100644 --- a/website/docs/cdktf/typescript/d/servicecatalog_provisioning_artifacts.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalog_provisioning_artifacts.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. ## Attribute Reference @@ -62,4 +63,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - The name of the provisioning artifact. * `type` - The type of provisioning artifact. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalogappregistry_application.html.markdown b/website/docs/cdktf/typescript/d/servicecatalogappregistry_application.html.markdown index c46ea5f722a4..6d3276788ec6 100644 --- a/website/docs/cdktf/typescript/d/servicecatalogappregistry_application.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalogappregistry_application.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Application identifier. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the application. * `tags` - A map of tags assigned to the Application. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group.html.markdown b/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group.html.markdown index e172bea9faa3..fcc527342833 100644 --- a/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group.html.markdown @@ -24,7 +24,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsServicecatalogappregistryAttributeGroup } from "./.gen/providers/aws/"; +import { DataAwsServicecatalogappregistryAttributeGroup } from "./.gen/providers/aws/data-aws-servicecatalogappregistry-attribute-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -40,8 +40,8 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ~> Exactly one of `arn`, `id`, or `name` must be set. - * `arn` - (Optional) ARN of the Attribute Group to find. * `id` - (Optional) ID of the Attribute Group to find. * `name` - (Optional) Name of the Attribute Group to find. @@ -54,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the Attribute Group. * `tags` - A map of tags assigned to the Attribute Group. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group_associations.html.markdown b/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group_associations.html.markdown index b0d375b99c62..10de9a3e5aff 100644 --- a/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group_associations.html.markdown +++ b/website/docs/cdktf/typescript/d/servicecatalogappregistry_attribute_group_associations.html.markdown @@ -44,17 +44,15 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ~> Exactly one of `id`or `name` must be set. - * `id` - (Optional) ID of the application to which attribute groups are associated. * `name` - (Optional) Name of the application to which attribute groups are associated. -The following arguments are optional: - ## Attribute Reference This data source exports the following attributes in addition to the arguments above: * `attributeGroupIds` - Set of attribute group IDs this application is associated with. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicequotas_service.html.markdown b/website/docs/cdktf/typescript/d/servicequotas_service.html.markdown index cee21b40def0..b011ed356145 100644 --- a/website/docs/cdktf/typescript/d/servicequotas_service.html.markdown +++ b/website/docs/cdktf/typescript/d/servicequotas_service.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceName` - (Required) Service name to lookup within Service Quotas. Available values can be found with the [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html). ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Code of the service. * `serviceCode` - Code of the service. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicequotas_service_quota.html.markdown b/website/docs/cdktf/typescript/d/servicequotas_service_quota.html.markdown index 6983a6f5434a..c7bff1ecf0a7 100644 --- a/website/docs/cdktf/typescript/d/servicequotas_service_quota.html.markdown +++ b/website/docs/cdktf/typescript/d/servicequotas_service_quota.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceCode` - (Required) Service code for the quota. Available values can be found with the [`aws_servicequotas_service` data source](/docs/providers/aws/d/servicequotas_service.html) or [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html). * `quotaCode` - (Optional) Quota code within the service. When configured, the data source directly looks up the service quota. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html). One of `quotaCode` or `quotaName` must be specified. * `quotaName` - (Optional) Quota name within the service. When configured, the data source searches through all service quotas to find the matching quota name. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html). One of `quotaName` or `quotaCode` must be specified. @@ -72,4 +73,4 @@ This data source exports the following attributes in addition to the arguments a * `metric_statistic_recommendation` - The metric statistic that AWS recommend you use when determining quota usage. * `value` - Current value of the service quota. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/servicequotas_templates.html.markdown b/website/docs/cdktf/typescript/d/servicequotas_templates.html.markdown index 68a122b6b68e..68c1099a4986 100644 --- a/website/docs/cdktf/typescript/d/servicequotas_templates.html.markdown +++ b/website/docs/cdktf/typescript/d/servicequotas_templates.html.markdown @@ -3,14 +3,14 @@ subcategory: "Service Quotas" layout: "aws" page_title: "AWS: aws_servicequotas_templates" description: |- - Terraform data source for managing an AWS Service Quotas Templates. + Terraform data source for managing AWS Service Quotas Templates. --- # Data Source: aws_servicequotas_templates -Terraform data source for managing an AWS Service Quotas Templates. +Terraform data source for managing AWS Service Quotas Templates. ## Example Usage @@ -29,7 +29,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new DataAwsServicequotasTemplates(this, "example", { - region: "us-east-1", + awsRegion: "us-east-1", }); } } @@ -38,9 +38,10 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: -* `region` - (Required) AWS Region to which the quota increases apply. +* `awsRegion` - (Optional) AWS Region to which the quota increases apply. +* `region` - (Optional, **Deprecated**) AWS Region to which the quota increases apply. Use `awsRegion` instead. ## Attribute Reference @@ -54,9 +55,9 @@ This data source exports the following attributes in addition to the arguments a * `quotaName` - Quota name. * `quotaCode` - Quota identifier. * `region` - AWS Region to which the template applies. -* `serviceCode` - (Required) Service identifier. +* `serviceCode` - Service identifier. * `serviceName` - Service name. * `unit` - Unit of measurement. -* `value` - (Required) The new, increased value for the quota. +* `value` - The new, increased value for the quota. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ses_active_receipt_rule_set.html.markdown b/website/docs/cdktf/typescript/d/ses_active_receipt_rule_set.html.markdown index e0dc6f4db4e7..600f12cbef37 100644 --- a/website/docs/cdktf/typescript/d/ses_active_receipt_rule_set.html.markdown +++ b/website/docs/cdktf/typescript/d/ses_active_receipt_rule_set.html.markdown @@ -34,7 +34,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -43,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - SES receipt rule set ARN. * `ruleSetName` - Name of the rule set - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ses_domain_identity.html.markdown b/website/docs/cdktf/typescript/d/ses_domain_identity.html.markdown index 22de846a930a..0dfcac51c131 100644 --- a/website/docs/cdktf/typescript/d/ses_domain_identity.html.markdown +++ b/website/docs/cdktf/typescript/d/ses_domain_identity.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -46,4 +48,4 @@ This data source exports the following attributes in addition to the arguments a * `domain` - Name of the domain * `verificationToken` - Code which when added to the domain as a TXT record will signal to SES that the owner of the domain has authorized SES to act on their behalf. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ses_email_identity.html.markdown b/website/docs/cdktf/typescript/d/ses_email_identity.html.markdown index 1b00dbb5ce3f..502da3c0099d 100644 --- a/website/docs/cdktf/typescript/d/ses_email_identity.html.markdown +++ b/website/docs/cdktf/typescript/d/ses_email_identity.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN of the email identity. * `email` - Email identity. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sesv2_configuration_set.html.markdown b/website/docs/cdktf/typescript/d/sesv2_configuration_set.html.markdown index c699597f8d7a..e0238f100580 100644 --- a/website/docs/cdktf/typescript/d/sesv2_configuration_set.html.markdown +++ b/website/docs/cdktf/typescript/d/sesv2_configuration_set.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configurationSetName` - (Required) The name of the configuration set. ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `guardianOptions` - Specifies additional settings for your VDM configuration as applicable to the Guardian. * `optimizedSharedDelivery` - Specifies the status of your VDM optimized shared delivery. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sesv2_dedicated_ip_pool.html.markdown b/website/docs/cdktf/typescript/d/sesv2_dedicated_ip_pool.html.markdown index 9b9141f3b497..dfcaaabbe1aa 100644 --- a/website/docs/cdktf/typescript/d/sesv2_dedicated_ip_pool.html.markdown +++ b/website/docs/cdktf/typescript/d/sesv2_dedicated_ip_pool.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `poolName` - (Required) Name of the dedicated IP pool. ## Attribute Reference @@ -57,4 +58,4 @@ This data source exports the following attributes in addition to the arguments a * `warmup_percentage` - Indicates how complete the dedicated IP warm-up process is. When this value equals `1`, the address has completed the warm-up process and is ready for use. * `warmup_status` - The warm-up status of a dedicated IP address. Valid values: `IN_PROGRESS`, `DONE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sesv2_email_identity.html.markdown b/website/docs/cdktf/typescript/d/sesv2_email_identity.html.markdown index 83c6e865a321..9c541dd97915 100644 --- a/website/docs/cdktf/typescript/d/sesv2_email_identity.html.markdown +++ b/website/docs/cdktf/typescript/d/sesv2_email_identity.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `emailIdentity` - (Required) The name of the email identity. ## Attribute Reference @@ -56,6 +57,7 @@ This data source exports the following attributes in addition to the arguments a * `tokens` - If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. * `identityType` - The email identity type. Valid values: `EMAIL_ADDRESS`, `DOMAIN`. * `tags` - Key-value mapping of resource tags. +* `verificationStatus` - The verification status of the identity. The status can be one of the following: `PENDING`, `SUCCESS`, `FAILED`, `TEMPORARY_FAILURE`, and `NOT_STARTED`. * `verifiedForSendingStatus` - Specifies whether or not the identity is verified. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sesv2_email_identity_mail_from_attributes.html.markdown b/website/docs/cdktf/typescript/d/sesv2_email_identity_mail_from_attributes.html.markdown index 5af89e241adb..0f96083e28fe 100644 --- a/website/docs/cdktf/typescript/d/sesv2_email_identity_mail_from_attributes.html.markdown +++ b/website/docs/cdktf/typescript/d/sesv2_email_identity_mail_from_attributes.html.markdown @@ -47,8 +47,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `emailIdentity` - (Required) The name of the email identity. ## Attribute Reference @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `behaviorOnMxFailure` - The action to take if the required MX record isn't found when you send an email. Valid values: `USE_DEFAULT_VALUE`, `REJECT_MESSAGE`. * `mailFromDomain` - The custom MAIL FROM domain that you want the verified identity to use. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sfn_activity.html.markdown b/website/docs/cdktf/typescript/d/sfn_activity.html.markdown index d025bf73dfb9..c5656dbb0ed2 100644 --- a/website/docs/cdktf/typescript/d/sfn_activity.html.markdown +++ b/website/docs/cdktf/typescript/d/sfn_activity.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name that identifies the activity. * `arn` - (Optional) ARN that identifies the activity. @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ARN that identifies the activity. * `creationDate` - Date the activity was created. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sfn_alias.html.markdown b/website/docs/cdktf/typescript/d/sfn_alias.html.markdown index 86cbffa58e9f..b5f04cf3cbbd 100644 --- a/website/docs/cdktf/typescript/d/sfn_alias.html.markdown +++ b/website/docs/cdktf/typescript/d/sfn_alias.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the State Machine alias. * `statemachineArn` - (Required) ARN of the State Machine. @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of state machine alias. * `routingConfiguration` - Routing Configuration of state machine alias - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sfn_state_machine.html.markdown b/website/docs/cdktf/typescript/d/sfn_state_machine.html.markdown index 485d6077cc9f..8e1f2c214c22 100644 --- a/website/docs/cdktf/typescript/d/sfn_state_machine.html.markdown +++ b/website/docs/cdktf/typescript/d/sfn_state_machine.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Friendly name of the state machine to match. ## Attribute Reference @@ -54,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `revisionId` - The revision identifier for the state machine. * `status` - Set to the current status of the state machine. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sfn_state_machine_versions.html.markdown b/website/docs/cdktf/typescript/d/sfn_state_machine_versions.html.markdown index efb7974bb9bb..21bed36f5906 100644 --- a/website/docs/cdktf/typescript/d/sfn_state_machine_versions.html.markdown +++ b/website/docs/cdktf/typescript/d/sfn_state_machine_versions.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `statemachineArn` - (Required) ARN of the State Machine. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `statemachineVersions` - ARN List identifying the statemachine versions. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/signer_signing_job.html.markdown b/website/docs/cdktf/typescript/d/signer_signing_job.html.markdown index fda77f152177..a0a7381dcf31 100644 --- a/website/docs/cdktf/typescript/d/signer_signing_job.html.markdown +++ b/website/docs/cdktf/typescript/d/signer_signing_job.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `jobId` - (Required) ID of the signing job on output. ## Attribute Reference @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the signing job. * `statusReason` - String value that contains the status reason. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/signer_signing_profile.html.markdown b/website/docs/cdktf/typescript/d/signer_signing_profile.html.markdown index 6b8ba0a1a8da..c844e0f86f91 100644 --- a/website/docs/cdktf/typescript/d/signer_signing_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/signer_signing_profile.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the target signing profile. ## Attribute Reference @@ -49,9 +50,12 @@ This data source exports the following attributes in addition to the arguments a * `platformId` - ID of the platform that is used by the target signing profile. * `revocationRecord` - Revocation information for a signing profile. * `signatureValidityPeriod` - The validity period for a signing job. +* `signingMaterial` - AWS Certificate Manager certificate that will be used to sign code with the new signing profile. + * `certificateArn` - ARN of the certificate used for signing. +* `signing_parameters` - Map of key-value pairs for signing. * `status` - Status of the target signing profile. * `tags` - List of tags associated with the signing profile. * `version` - Current version of the signing profile. * `versionArn` - Signing profile ARN, including the profile version. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sns_topic.html.markdown b/website/docs/cdktf/typescript/d/sns_topic.html.markdown index 5cfe135a915b..6edce2cc23f7 100644 --- a/website/docs/cdktf/typescript/d/sns_topic.html.markdown +++ b/website/docs/cdktf/typescript/d/sns_topic.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Friendly name of the topic to match. ## Attribute Reference @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ARN of the found topic, suitable for referencing in other resources that support SNS topics. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/spot_datafeed_subscription.html.markdown b/website/docs/cdktf/typescript/d/spot_datafeed_subscription.html.markdown index aa2598ac7859..a2931894d1ba 100644 --- a/website/docs/cdktf/typescript/d/spot_datafeed_subscription.html.markdown +++ b/website/docs/cdktf/typescript/d/spot_datafeed_subscription.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -45,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `bucket` - The name of the Amazon S3 bucket where the spot instance data feed is located. * `prefix` - The prefix for the data feed files. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sqs_queue.html.markdown b/website/docs/cdktf/typescript/d/sqs_queue.html.markdown index e39c73d1e2ee..fb0383ed28f2 100644 --- a/website/docs/cdktf/typescript/d/sqs_queue.html.markdown +++ b/website/docs/cdktf/typescript/d/sqs_queue.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the queue to match. ## Attribute Reference @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `url` - URL of the queue. * `tags` - Map of tags for the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/sqs_queues.html.markdown b/website/docs/cdktf/typescript/d/sqs_queues.html.markdown index 93dea812b21d..d59566613cb7 100644 --- a/website/docs/cdktf/typescript/d/sqs_queues.html.markdown +++ b/website/docs/cdktf/typescript/d/sqs_queues.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queueNamePrefix` - (Optional) A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned. Queue URLs and names are case-sensitive. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `queueUrls` - A list of queue URLs. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_document.html.markdown b/website/docs/cdktf/typescript/d/ssm_document.html.markdown index 663cc9eb8e27..8f1ae18b9f8b 100644 --- a/website/docs/cdktf/typescript/d/ssm_document.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_document.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the document. * `documentFormat` - The format of the document. Valid values: `JSON`, `TEXT`, `YAML`. * `documentVersion` - The document version. @@ -79,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a * `content` - The content for the SSM document in JSON or YAML format. * `documentType` - The type of the document. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_instances.html.markdown b/website/docs/cdktf/typescript/d/ssm_instances.html.markdown index e530a80b4a7e..c654f420c468 100644 --- a/website/docs/cdktf/typescript/d/ssm_instances.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_instances.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration Block @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - Set of instance IDs of the matched SSM managed instances. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_maintenance_windows.html.markdown b/website/docs/cdktf/typescript/d/ssm_maintenance_windows.html.markdown index 733089e5e5a2..fbffef0caeb1 100644 --- a/website/docs/cdktf/typescript/d/ssm_maintenance_windows.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_maintenance_windows.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. ### filter Configuration Block @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of window IDs of the matched SSM maintenance windows. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_parameter.html.markdown b/website/docs/cdktf/typescript/d/ssm_parameter.html.markdown index d91007d40168..bfbc0f5baf51 100644 --- a/website/docs/cdktf/typescript/d/ssm_parameter.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_parameter.html.markdown @@ -14,6 +14,8 @@ Provides an SSM Parameter data source. ## Example Usage +### Default + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -34,6 +36,28 @@ class MyConvertedCode extends TerraformStack { ``` +### With version + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsSsmParameter } from "./.gen/providers/aws/data-aws-ssm-parameter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsSsmParameter(this, "foo", { + name: "foo:3", + }); + } +} + +``` + ~> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). @@ -43,7 +67,8 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `name` - (Required) Name of the parameter. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the parameter. To query by parameter version use `name:version` (e.g., `foo:3`). * `withDecryption` - (Optional) Whether to return decrypted `SecureString` value. Defaults to `true`. ## Attribute Reference @@ -57,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a * `insecureValue` - Value of the parameter. **Use caution:** This value is never marked as sensitive. * `version` - Version of the parameter. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_parameters_by_path.html.markdown b/website/docs/cdktf/typescript/d/ssm_parameters_by_path.html.markdown index 1a2557920726..7255614d70bc 100644 --- a/website/docs/cdktf/typescript/d/ssm_parameters_by_path.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_parameters_by_path.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `path` - (Required) The hierarchy for the parameter. Hierarchies start with a forward slash (/). The hierarchy is the parameter name except the last part of the parameter. The last part of the parameter name can't be in the path. A parameter name hierarchy can have a maximum of 15 levels. **Note:** If the parameter name (e.g., `/my-app/my-param`) is specified, the data source will not retrieve any value as designed, unless there are other parameters that happen to use the former path in their hierarchy (e.g., `/my-app/my-param/my-actual-param`). * `withDecryption` - (Optional) Whether to retrieve all parameters in the hierarchy, particularly those of `SecureString` type, with their value decrypted. Defaults to `true`. * `recursive` - (Optional) Whether to retrieve all parameters within the hirerachy. Defaults to `false`. @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `types` - A list that contains the types (`String`, `StringList`, or `SecureString`) of retrieved parameters. * `values` - A list that contains the retrieved parameter values. **Note:** This value is always marked as sensitive in the Terraform plan output, regardless of whether any retrieved parameters are of `SecureString` type. Use the [`nonsensitive` function](https://developer.hashicorp.com/terraform/language/functions/nonsensitive) to override the behavior at your own risk and discretion, if you are certain that there are no sensitive values being retrieved. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_patch_baseline.html.markdown b/website/docs/cdktf/typescript/d/ssm_patch_baseline.html.markdown index 3755fb7022a2..3e8fb7c70127 100644 --- a/website/docs/cdktf/typescript/d/ssm_patch_baseline.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_patch_baseline.html.markdown @@ -71,6 +71,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultBaseline` - (Optional) Filters the results against the baselines default_baseline field. * `namePrefix` - (Optional) Filter results by the baseline name prefix. * `operatingSystem` - (Optional) Specified OS for the baseline. Valid values: `AMAZON_LINUX`, `AMAZON_LINUX_2`, `UBUNTU`, `REDHAT_ENTERPRISE_LINUX`, `SUSE`, `CENTOS`, `ORACLE_LINUX`, `DEBIAN`, `MACOS`, `RASPBIAN` and `ROCKY_LINUX`. @@ -90,6 +91,7 @@ This data source exports the following attributes in addition to the arguments a * `patchFilter` - Patch filter group that defines the criteria for the rule. * `key` - Key for the filter. * `values` - Value for the filter. +* `availableSecurityUpdatesComplianceStatus` - Indicates the compliance status of managed nodes for which security-related patches are available but were not approved. Supported for Windows Server managed nodes only. * `globalFilter` - Set of global filters used to exclude patches from the baseline. * `key` - Key for the filter. * `values` - Value for the filter. @@ -104,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name specified to identify the patch source. * `products` - Specific operating system versions a patch repository applies to. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssm_patch_baselines.html.markdown b/website/docs/cdktf/typescript/d/ssm_patch_baselines.html.markdown index d311aafed477..a0465d0e515a 100644 --- a/website/docs/cdktf/typescript/d/ssm_patch_baselines.html.markdown +++ b/website/docs/cdktf/typescript/d/ssm_patch_baselines.html.markdown @@ -24,7 +24,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsSsmPatchBaselines } from "./.gen/providers/aws/"; +import { DataAwsSsmPatchBaselines } from "./.gen/providers/aws/data-aws-ssm-patch-baselines"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -44,7 +44,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsSsmPatchBaselines } from "./.gen/providers/aws/"; +import { DataAwsSsmPatchBaselines } from "./.gen/providers/aws/data-aws-ssm-patch-baselines"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -69,8 +69,9 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Key-value pairs used to filter the results. See [`filter`](#filter-argument-reference) below. -* `default_baselines` - (Optional) Only return baseline identities where `defaultBaseline` is `true`. +* `defaultBaselines` - (Optional) Only return baseline identities where `defaultBaseline` is `true`. ### `filter` Argument Reference @@ -81,9 +82,9 @@ The following arguments are optional: This data source exports the following attributes in addition to the arguments above: -* `baseline_identities` - List of baseline identities. See [`baseline_identities`](#baseline_identities-attribute-reference) below. +* `baselineIdentities` - List of baseline identities. See [`baselineIdentities`](#baseline_identities-attribute-reference) below. -### `baseline_identities` Attribute Reference +### `baselineIdentities` Attribute Reference * `baseline_description` - Description of the patch baseline. * `baselineId` - ID of the patch baseline. @@ -91,4 +92,4 @@ This data source exports the following attributes in addition to the arguments a * `defaultBaseline` - Indicates whether this is the default baseline. AWS Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system. * `operatingSystem` - Operating system the patch baseline applies to. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssmcontacts_contact.html.markdown b/website/docs/cdktf/typescript/d/ssmcontacts_contact.html.markdown index 3fd7f1d17ca8..1507558c6ca4 100644 --- a/website/docs/cdktf/typescript/d/ssmcontacts_contact.html.markdown +++ b/website/docs/cdktf/typescript/d/ssmcontacts_contact.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the contact or escalation plan. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `displayName` - Full friendly name of the contact or escalation plan. * `tags` - Map of tags to assign to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssmcontacts_contact_channel.html.markdown b/website/docs/cdktf/typescript/d/ssmcontacts_contact_channel.html.markdown index c811d2d34df0..30ef00d3b55d 100644 --- a/website/docs/cdktf/typescript/d/ssmcontacts_contact_channel.html.markdown +++ b/website/docs/cdktf/typescript/d/ssmcontacts_contact_channel.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `arn` - Amazon Resource Name (ARN) of the contact channel. ## Attribute Reference @@ -47,13 +48,9 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: - `activationStatus` - Whether the contact channel is activated. - - `contactId` - Amazon Resource Name (ARN) of the AWS SSM Contact that the contact channel belongs to. - - `deliveryAddress` - Details used to engage the contact channel. - - `name` - Name of the contact channel. - - `type` - Type of the contact channel. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssmcontacts_plan.html.markdown b/website/docs/cdktf/typescript/d/ssmcontacts_plan.html.markdown index 2aae4f5db13c..d270c7201cfb 100644 --- a/website/docs/cdktf/typescript/d/ssmcontacts_plan.html.markdown +++ b/website/docs/cdktf/typescript/d/ssmcontacts_plan.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contactId` - (Required) The Amazon Resource Name (ARN) of the contact or escalation plan. ## Attribute Reference @@ -49,4 +50,4 @@ This data source exports the following attributes in addition to the arguments a * `stage` - List of stages. A contact has an engagement plan with stages that contact specified contact channels. An escalation plan uses stages that contact specified contacts. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssmcontacts_rotation.html.markdown b/website/docs/cdktf/typescript/d/ssmcontacts_rotation.html.markdown index b9f66930be4e..b57559e5a1e2 100644 --- a/website/docs/cdktf/typescript/d/ssmcontacts_rotation.html.markdown +++ b/website/docs/cdktf/typescript/d/ssmcontacts_rotation.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the rotation. ## Attribute Reference @@ -53,4 +54,4 @@ This data source exports the following attributes in addition to the arguments a * `startTime` - The date and time, in RFC 3339 format, that the rotation goes into effect. * `tags` - A map of tags to assign to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssmincidents_replication_set.html.markdown b/website/docs/cdktf/typescript/d/ssmincidents_replication_set.html.markdown index 07d681b4a294..277c7bd00150 100644 --- a/website/docs/cdktf/typescript/d/ssmincidents_replication_set.html.markdown +++ b/website/docs/cdktf/typescript/d/ssmincidents_replication_set.html.markdown @@ -45,14 +45,16 @@ This data source does not support any arguments. This data source exports the following attributes in addition to the arguments above: * `arn` - The Amazon Resource Name (ARN) of the replication set. -* `tags` - All tags applied to the replication set. * `createdBy` - The ARN of the user who created the replication set. * `deletionProtected` - If `true`, the last remaining Region in a replication set can’t be deleted. * `lastModifiedBy` - The ARN of the user who last modified the replication set. +* `region` - (**Deprecated**) The replication set's Regions. Use `regions` instead. +* `regions` - The replication set's Regions. * `status` - The overall status of a replication set. * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` +* `tags` - All tags applied to the replication set. -The `region` configuration block exports the following attributes for each Region: +The `regions` configuration block exports the following attributes for each Region: * `name` - The name of the Region. * `kmsKeyArn` - The ARN of the AWS Key Management Service (AWS KMS) encryption key. @@ -60,4 +62,4 @@ The `region` configuration block exports the following attributes for each Regio * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` * `statusMessage` - More information about the status of a Region. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssmincidents_response_plan.html.markdown b/website/docs/cdktf/typescript/d/ssmincidents_response_plan.html.markdown index 8b149f69a133..db4fa4f0bb03 100644 --- a/website/docs/cdktf/typescript/d/ssmincidents_response_plan.html.markdown +++ b/website/docs/cdktf/typescript/d/ssmincidents_response_plan.html.markdown @@ -44,8 +44,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the response plan. ## Attribute Reference @@ -94,4 +95,4 @@ The `integration` configuration block exports the following attributes: * `serviceId` - The ID of the PagerDuty service that the response plan associates with an incident when it launches. * `secretId` - The ID of the AWS Secrets Manager secret that stores your PagerDuty key — either a General Access REST API Key or User Token REST API Key — and other user credentials. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_application.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_application.html.markdown index 72caad15782b..2f3bf6a98638 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_application.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_application.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationArn` - (Required) ARN of the application. ## Attribute Reference @@ -56,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `portalOptions` - Options for the portal associated with an application. See the `aws_ssoadmin_application` [resource documentation](../r/ssoadmin_application.html.markdown#portal_options-argument-reference). The attributes are the same. * `status` - Status of the application. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_application_assignments.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_application_assignments.html.markdown index 4d60bf27bad2..b114d4dca311 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_application_assignments.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_application_assignments.html.markdown @@ -29,9 +29,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new DataAwsSsoadminApplicationAssignments(this, "example", { - applicationArn: Token.asString( - awsSsoadminApplicationExample.applicationArn - ), + applicationArn: Token.asString(awsSsoadminApplicationExample.arn), }); } } @@ -40,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationArn` - (Required) ARN of the application. ## Attribute Reference @@ -56,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `principalId` - An identifier for an object in IAM Identity Center, such as a user or group. * `principalType` - Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_application_providers.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_application_providers.html.markdown index ce66c404be95..d516075c36a9 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_application_providers.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_application_providers.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -48,13 +50,13 @@ This data source exports the following attributes in addition to the arguments a ### `applicationProviders` Attribute Reference * `applicationProviderArn` - ARN of the application provider. -* `displayData` - An object describing how IAM Identity Center represents the application provider in the portal. See [`displayData`](#display_data-attribute-reference) below. -* `federationProtocol` - Protocol that the application provider uses to perform federation. Valid values are `SAML` and `OAUTH`. +* `display_data` - An object describing how IAM Identity Center represents the application provider in the portal. See [`display_data`](#display_data-attribute-reference) below. +* `federation_protocol` - Protocol that the application provider uses to perform federation. Valid values are `SAML` and `OAUTH`. -### `displayData` Attribute Reference +### `display_data` Attribute Reference * `description` - Description of the application provider. * `displayName` - Name of the application provider. -* `iconUrl` - URL that points to an icon that represents the application provider. +* `icon_url` - URL that points to an icon that represents the application provider. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_instances.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_instances.html.markdown index e46d1ee9c26b..07b2bce1c2b1 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_instances.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_instances.html.markdown @@ -40,7 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -50,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - AWS Region. * `identityStoreIds` - Set of identifiers of the identity stores connected to the SSO Instances. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_permission_set.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_permission_set.html.markdown index 0c1ff1d32579..d30b9ab11eff 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_permission_set.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_permission_set.html.markdown @@ -49,12 +49,13 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -~> **NOTE:** Either `arn` or `name` must be configured. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Optional) ARN of the permission set. * `instanceArn` - (Required) ARN of the SSO Instance associated with the permission set. * `name` - (Optional) Name of the SSO Permission Set. +~> **NOTE:** Either `arn` or `name` must be configured. + ## Attribute Reference This data source exports the following attributes in addition to the arguments above: @@ -65,4 +66,4 @@ This data source exports the following attributes in addition to the arguments a * `sessionDuration` - Length of time that the application user sessions are valid in the ISO-8601 standard. * `tags` - Key-value map of resource tags. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_permission_sets.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_permission_sets.html.markdown index 3ec70fc910be..c218250c6bac 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_permission_sets.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_permission_sets.html.markdown @@ -45,8 +45,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required) ARN of the SSO Instance associated with the permission set. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `arns` - Set of string contain the ARN of all Permission Sets. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ssoadmin_principal_application_assignments.html.markdown b/website/docs/cdktf/typescript/d/ssoadmin_principal_application_assignments.html.markdown index 93498bbc8837..3a4051f75e38 100644 --- a/website/docs/cdktf/typescript/d/ssoadmin_principal_application_assignments.html.markdown +++ b/website/docs/cdktf/typescript/d/ssoadmin_principal_application_assignments.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required) ARN of the instance of IAM Identity Center. * `principalId` - (Required) An identifier for an object in IAM Identity Center, such as a user or group. * `principalType` - (Required) Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `principalId` - An identifier for an object in IAM Identity Center, such as a user or group. * `principalType` - Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/storagegateway_local_disk.html.markdown b/website/docs/cdktf/typescript/d/storagegateway_local_disk.html.markdown index d78b7a4d3720..318ef8ff289b 100644 --- a/website/docs/cdktf/typescript/d/storagegateway_local_disk.html.markdown +++ b/website/docs/cdktf/typescript/d/storagegateway_local_disk.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gatewayArn` - (Required) ARN of the gateway. * `diskNode` - (Optional) Device node of the local disk to retrieve. For example, `/dev/sdb`. * `diskPath` - (Optional) Device path of the local disk to retrieve. For example, `/dev/xvdb` or `/dev/nvme1n1`. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `diskId` - Disk identifierE.g., `pci-0000:03:00.0-scsi-0:0:0:0` * `id` - Disk identifierE.g., `pci-0000:03:00.0-scsi-0:0:0:0` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/subnet.html.markdown b/website/docs/cdktf/typescript/d/subnet.html.markdown index fbbe64fb7816..d036563255fb 100644 --- a/website/docs/cdktf/typescript/d/subnet.html.markdown +++ b/website/docs/cdktf/typescript/d/subnet.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Optional) Availability zone where the subnet must reside. * `availabilityZoneId` - (Optional) ID of the Availability Zone for the subnet. This argument is not supported in all regions or partitions. If necessary, use `availabilityZone` instead. * `cidrBlock` - (Optional) CIDR block of the desired subnet. @@ -132,4 +133,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/subnets.html.markdown b/website/docs/cdktf/typescript/d/subnets.html.markdown index 1189bdca0a84..97f5b628f1fa 100644 --- a/website/docs/cdktf/typescript/d/subnets.html.markdown +++ b/website/docs/cdktf/typescript/d/subnets.html.markdown @@ -115,6 +115,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired subnets. @@ -169,4 +170,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/synthetics_runtime_version.html.markdown b/website/docs/cdktf/typescript/d/synthetics_runtime_version.html.markdown index ed34d9144143..3db26fc09e4f 100644 --- a/website/docs/cdktf/typescript/d/synthetics_runtime_version.html.markdown +++ b/website/docs/cdktf/typescript/d/synthetics_runtime_version.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `latest` - (Optional) Whether the latest version of the runtime should be fetched. Conflicts with `version`. Valid values: `true`. * `version` - (Optional) Version of the runtime to be fetched (for example, `9.0`). Conflicts with `latest`. @@ -80,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `releaseDate` - Date that the runtime version was released. * `versionName` - Name of the runtime version. For a list of valid runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/synthetics_runtime_versions.html.markdown b/website/docs/cdktf/typescript/d/synthetics_runtime_versions.html.markdown index 827357fb46fa..c9b316780c2b 100644 --- a/website/docs/cdktf/typescript/d/synthetics_runtime_versions.html.markdown +++ b/website/docs/cdktf/typescript/d/synthetics_runtime_versions.html.markdown @@ -36,7 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This data source does not support any arguments. +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -53,4 +55,4 @@ This data source exports the following attributes in addition to the arguments a * `versionName` - Name of the runtime version. For a list of valid runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/timestreamwrite_database.html.markdown b/website/docs/cdktf/typescript/d/timestreamwrite_database.html.markdown index e3a7450f5cda..09947f9e2e8d 100644 --- a/website/docs/cdktf/typescript/d/timestreamwrite_database.html.markdown +++ b/website/docs/cdktf/typescript/d/timestreamwrite_database.html.markdown @@ -38,9 +38,10 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: -* `databaseName` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `databaseName` - (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. ## Attribute Reference @@ -48,9 +49,9 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN that uniquely identifies this database. * `createdTime` - Creation time of database. -* `databaseName` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. +* `databaseName` - (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. * `kmsKeyId` - The ARN of the KMS key used to encrypt the data stored in the database. * `lastUpdatedTime` - Last time database was updated. * `tableCount` - Total number of tables in the Timestream database. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/timestreamwrite_table.html.markdown b/website/docs/cdktf/typescript/d/timestreamwrite_table.html.markdown index 5cf9cf963a1b..a610384cd39c 100644 --- a/website/docs/cdktf/typescript/d/timestreamwrite_table.html.markdown +++ b/website/docs/cdktf/typescript/d/timestreamwrite_table.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `databaseName` - (Required) Name of the Timestream database. * `name` - (Required) Name of the Timestream table. @@ -70,4 +71,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the table. * `tableStatus` - Current state of table. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/transfer_connector.html.markdown b/website/docs/cdktf/typescript/d/transfer_connector.html.markdown index 35f79ef98f7d..54a7a70d2e49 100644 --- a/website/docs/cdktf/typescript/d/transfer_connector.html.markdown +++ b/website/docs/cdktf/typescript/d/transfer_connector.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) Unique identifier for connector ## Attribute Reference @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `value` - Values associated with the tags key. * `url` - URL of the partner's AS2 or SFTP endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/transfer_server.html.markdown b/website/docs/cdktf/typescript/d/transfer_server.html.markdown index 985a088e3fa0..73576bbc0c6b 100644 --- a/website/docs/cdktf/typescript/d/transfer_server.html.markdown +++ b/website/docs/cdktf/typescript/d/transfer_server.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serverId` - (Required) ID for an SFTP server. ## Attribute Reference @@ -59,4 +60,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Map of tags assigned to the resource. * `url` - URL of the service endpoint used to authenticate users with an `identityProviderType` of `API_GATEWAY`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/verifiedpermissions_policy_store.html.markdown b/website/docs/cdktf/typescript/d/verifiedpermissions_policy_store.html.markdown index b66ddf62bd14..e4af2bb74c23 100644 --- a/website/docs/cdktf/typescript/d/verifiedpermissions_policy_store.html.markdown +++ b/website/docs/cdktf/typescript/d/verifiedpermissions_policy_store.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) The ID of the Policy Store. ## Attribute Reference @@ -48,8 +49,9 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN of the Policy Store. * `createdDate` - The date the Policy Store was created. +* `deletionProtection` - Whether the policy store can be deleted. * `lastUpdatedDate` - The date the Policy Store was last updated. * `tags` - Map of key-value pairs associated with the policy store. * `validationSettings` - Validation settings for the policy store. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc.html.markdown b/website/docs/cdktf/typescript/d/vpc.html.markdown index ff75ac6347a0..58eb773babc1 100644 --- a/website/docs/cdktf/typescript/d/vpc.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrBlock` - (Optional) Cidr block of the desired VPC. * `dhcpOptionsId` - (Optional) DHCP options id of the desired VPC. * `default` - (Optional) Boolean constraint on whether the desired VPC is @@ -108,4 +109,4 @@ The following attribute is additionally exported: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_dhcp_options.html.markdown b/website/docs/cdktf/typescript/d/vpc_dhcp_options.html.markdown index 599736d8dff1..a07e26b19c83 100644 --- a/website/docs/cdktf/typescript/d/vpc_dhcp_options.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_dhcp_options.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dhcpOptionsId` - (Optional) EC2 DHCP Options ID. * `filter` - (Optional) List of custom filters as described below. @@ -105,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/d/vpc_endpoint.html.markdown index 3d47138790d5..ec1c6ea2c787 100644 --- a/website/docs/cdktf/typescript/d/vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_endpoint.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `id` - (Optional) ID of the specific VPC Endpoint to retrieve. * `serviceName` - (Optional) Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). @@ -105,4 +106,4 @@ DNS options (for `dnsOptions`) support the following attributes: - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_endpoint_associations.html.markdown b/website/docs/cdktf/typescript/d/vpc_endpoint_associations.html.markdown index e1f71f17a5c0..d10733c2811e 100644 --- a/website/docs/cdktf/typescript/d/vpc_endpoint_associations.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_endpoint_associations.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointId` - ID of the specific VPC Endpoint to retrieve. ## Attribute Reference @@ -75,4 +76,4 @@ DNS blocks (for `private_dns_entry`) support the following attributes: * `dnsName` - DNS name. * `hostedZoneId` - ID of the private hosted zone. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_endpoint_service.html.markdown b/website/docs/cdktf/typescript/d/vpc_endpoint_service.html.markdown index 0aa923c64052..2bdf9b8adc82 100644 --- a/website/docs/cdktf/typescript/d/vpc_endpoint_service.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_endpoint_service.html.markdown @@ -128,8 +128,9 @@ This data source exports the following attributes in addition to the arguments a * `owner` - AWS account ID of the service owner or `amazon`. * `privateDnsName` - Private DNS name for the service. * `privateDnsNames` - Private DNS names assigned to the VPC endpoint service. -* `region` - Region of the endpoint service. +* `region` - (**Deprecated**) Region of the endpoint service. Use `serviceRegion` instead. * `serviceId` - ID of the endpoint service. +* `serviceRegion` - Region of the endpoint service. * `supportedIpAddressTypes` - The supported IP address types. * `tags` - Map of tags assigned to the resource. * `vpcEndpointPolicySupported` - Whether or not the service supports endpoint policies - `true` or `false`. @@ -140,4 +141,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_ipam.html.markdown b/website/docs/cdktf/typescript/d/vpc_ipam.html.markdown index 66b08630a2eb..4d1d46a8f1ab 100644 --- a/website/docs/cdktf/typescript/d/vpc_ipam.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_ipam.html.markdown @@ -24,7 +24,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsVpcIpam } from "./.gen/providers/aws/"; +import { DataAwsVpcIpam } from "./.gen/providers/aws/data-aws-vpc-ipam"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Required) ID of the IPAM. ## Attribute Reference @@ -53,15 +54,16 @@ This data source exports the following attributes in addition to the arguments a * `enablePrivateGua` - If private GUA is enabled. * `id` - ID of the IPAM resource. * `ipamRegion` - Region that the IPAM exists in. +* `metered_account` - AWS account that is charged for active IP addresses managed in IPAM. * `operatingRegions` - Regions that the IPAM is configured to operate in. * `ownerId` - ID of the account that owns this IPAM. * `privateDefaultScopeId` - ID of the default private scope. * `publicDefaultScopeId` - ID of the default public scope. -* `resource_discovery_association_count` - Number of resource discovery associations. +* `resourceDiscoveryAssociationCount` - Number of resource discovery associations. * `scopeCount` - Number of scopes on this IPAM. * `state` - Current state of the IPAM. -* `state_message` - State message of the IPAM. +* `stateMessage` - State message of the IPAM. * `tier` - IPAM Tier. * `tags` - Tags of the IPAM resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_ipam_pool.html.markdown b/website/docs/cdktf/typescript/d/vpc_ipam_pool.html.markdown index 7ca9aaf4648a..371c8684b670 100644 --- a/website/docs/cdktf/typescript/d/vpc_ipam_pool.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_ipam_pool.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipamPoolId` - (Optional) ID of the IPAM pool you would like information on. * `filter` - (Optional) Custom filter block as described below. @@ -96,4 +97,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_ipam_pool_cidrs.html.markdown b/website/docs/cdktf/typescript/d/vpc_ipam_pool_cidrs.html.markdown index e4ba31786811..54312987eeac 100644 --- a/website/docs/cdktf/typescript/d/vpc_ipam_pool_cidrs.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_ipam_pool_cidrs.html.markdown @@ -104,6 +104,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipamPoolId` - ID of the IPAM pool you would like the list of provisioned CIDRs. * `filter` - Custom filter block as described below. @@ -132,4 +133,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `1m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_ipam_pools.html.markdown b/website/docs/cdktf/typescript/d/vpc_ipam_pools.html.markdown index 020bcaceea07..9d0638596e26 100644 --- a/website/docs/cdktf/typescript/d/vpc_ipam_pools.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_ipam_pools.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Required) Custom filter block as described below. The arguments of this data source act as filters for querying the available IPAM Pools in the current region. @@ -86,4 +87,4 @@ The following attributes are available on each pool entry found. * `sourceIpamPoolId` - ID of the source IPAM pool. * `tags` - Map of tags to assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_ipam_preview_next_cidr.html.markdown b/website/docs/cdktf/typescript/d/vpc_ipam_preview_next_cidr.html.markdown index 4a225f3f5b20..edf3709b4ac3 100644 --- a/website/docs/cdktf/typescript/d/vpc_ipam_preview_next_cidr.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_ipam_preview_next_cidr.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disallowedCidrs` - (Optional) Exclude a particular CIDR range from being returned by the pool. * `ipamPoolId` - (Required) ID of the pool to which you want to assign a CIDR. * `netmaskLength` - (Optional) Netmask length of the CIDR you would like to preview from the IPAM pool. @@ -75,4 +76,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_ipams.html.markdown b/website/docs/cdktf/typescript/d/vpc_ipams.html.markdown index d39baa59d5a9..76164032867b 100644 --- a/website/docs/cdktf/typescript/d/vpc_ipams.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_ipams.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipamIds` - (Optional) IDs of the IPAM resources to query for. * `filter` - (Optional) Custom filter block as described below. @@ -133,4 +134,4 @@ This data source exports the following attributes in addition to the arguments a * `stateMessage` - State message of the IPAM. * `tier` - IPAM Tier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_peering_connection.html.markdown b/website/docs/cdktf/typescript/d/vpc_peering_connection.html.markdown index cae67bb028c4..5529c13250a3 100644 --- a/website/docs/cdktf/typescript/d/vpc_peering_connection.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_peering_connection.html.markdown @@ -55,11 +55,9 @@ This data source supports the following arguments: * `vpcId` - (Optional) ID of the requester VPC of the specific VPC Peering Connection to retrieve. * `ownerId` - (Optional) AWS account ID of the owner of the requester VPC of the specific VPC Peering Connection to retrieve. * `cidrBlock` - (Optional) Primary CIDR block of the requester VPC of the specific VPC Peering Connection to retrieve. -* `region` - (Optional) Region of the requester VPC of the specific VPC Peering Connection to retrieve. * `peerVpcId` - (Optional) ID of the accepter VPC of the specific VPC Peering Connection to retrieve. * `peerOwnerId` - (Optional) AWS account ID of the owner of the accepter VPC of the specific VPC Peering Connection to retrieve. * `peerCidrBlock` - (Optional) Primary CIDR block of the accepter VPC of the specific VPC Peering Connection to retrieve. -* `peerRegion` - (Optional) Region of the accepter VPC of the specific VPC Peering Connection to retrieve. * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired VPC Peering Connection. @@ -73,7 +71,6 @@ More complex filters can be expressed using one or more `filter` sub-blocks, whi * `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html). - * `values` - (Required) Set of values that are accepted for the given field. A VPC Peering Connection will be selected if any one of the given values matches. @@ -87,8 +84,11 @@ This data source exports the following attributes in addition to the arguments a * `ipv6CidrBlockSet` - List of objects with IPv6 CIDR blocks of the requester VPC. * `peerCidrBlockSet` - List of objects with IPv4 CIDR blocks of the accepter VPC. * `peerIpv6CidrBlockSet` - List of objects with IPv6 CIDR blocks of the accepter VPC. +* `peerRegion` - Region of the accepter VPC. +* `region` - (**Deprecated**) Region of the requester VPC. Use `requesterRegion` instead. * `requester` - Configuration block that describes [VPC Peering Connection] (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. +* `requesterRegion` - Region of the requester VPC. #### Accepter and Requester Attribute Reference @@ -105,4 +105,4 @@ private IP addresses when queried from instances in a peer VPC. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_peering_connections.html.markdown b/website/docs/cdktf/typescript/d/vpc_peering_connections.html.markdown index 9a2d2f38225c..17529b3797cf 100644 --- a/website/docs/cdktf/typescript/d/vpc_peering_connections.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_peering_connections.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Mapping of tags, each pair of which must exactly match a pair on the desired VPC Peering Connection. @@ -85,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_security_group_rule.html.markdown b/website/docs/cdktf/typescript/d/vpc_security_group_rule.html.markdown index b608537ae6ca..b2e99bc13e35 100644 --- a/website/docs/cdktf/typescript/d/vpc_security_group_rule.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_security_group_rule.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityGroupRuleId` - (Optional) ID of the security group rule to select. * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. @@ -67,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - A map of tags assigned to the resource. * `toPort` - (Optional) The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpc_security_group_rules.html.markdown b/website/docs/cdktf/typescript/d/vpc_security_group_rules.html.markdown index 77fc8aa1df57..732fd42cb933 100644 --- a/website/docs/cdktf/typescript/d/vpc_security_group_rules.html.markdown +++ b/website/docs/cdktf/typescript/d/vpc_security_group_rules.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filter` - (Optional) Custom filter block as described below. * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired security group rule. @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `ids` - List of all the security group rule IDs found. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpclattice_auth_policy.html.markdown b/website/docs/cdktf/typescript/d/vpclattice_auth_policy.html.markdown index 2c091b925523..db5b086ad782 100644 --- a/website/docs/cdktf/typescript/d/vpclattice_auth_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/vpclattice_auth_policy.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceIdentifier` - (Required) The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. ## Attribute Reference @@ -51,4 +52,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - The auth policy. The policy string in JSON must not contain newlines or blank lines. * `state` - The state of the auth policy. The auth policy is only active when the auth type is set to AWS_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the Auth type is NONE, then, any auth policy you provide will remain inactive. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpclattice_listener.html.markdown b/website/docs/cdktf/typescript/d/vpclattice_listener.html.markdown index 5df9676a2384..425741b19214 100644 --- a/website/docs/cdktf/typescript/d/vpclattice_listener.html.markdown +++ b/website/docs/cdktf/typescript/d/vpclattice_listener.html.markdown @@ -43,8 +43,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceIdentifier` - (Required) ID or Amazon Resource Name (ARN) of the service network * `listenerIdentifier` - (Required) ID or Amazon Resource Name (ARN) of the listener @@ -64,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `serviceId` - The ID of the service. * `tags` - List of tags associated with the listener. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpclattice_resource_policy.html.markdown b/website/docs/cdktf/typescript/d/vpclattice_resource_policy.html.markdown index b2223c6f99c4..12922287c3c0 100644 --- a/website/docs/cdktf/typescript/d/vpclattice_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/vpclattice_resource_policy.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) Resource ARN of the resource for which a policy is retrieved. ## Attribute Reference @@ -48,4 +49,4 @@ This data source exports the following attributes in addition to the arguments a * `policy` - JSON-encoded string representation of the applied resource policy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpclattice_service.html.markdown b/website/docs/cdktf/typescript/d/vpclattice_service.html.markdown index 0d65fd0d2ef2..7ebf507d8736 100644 --- a/website/docs/cdktf/typescript/d/vpclattice_service.html.markdown +++ b/website/docs/cdktf/typescript/d/vpclattice_service.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Service name. * `serviceIdentifier` - (Optional) ID or Amazon Resource Name (ARN) of the service. @@ -58,4 +59,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - Status of the service. * `tags` - List of tags associated with the service. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpclattice_service_network.html.markdown b/website/docs/cdktf/typescript/d/vpclattice_service_network.html.markdown index 2edccf484af3..c20056ee410c 100644 --- a/website/docs/cdktf/typescript/d/vpclattice_service_network.html.markdown +++ b/website/docs/cdktf/typescript/d/vpclattice_service_network.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceNetworkIdentifier` - (Required) Identifier of the service network. ## Attribute Reference @@ -55,4 +56,4 @@ This data source exports the following attributes in addition to the arguments a * `numberOfAssociatedServices` - Number of services associated with this service network. * `numberOfAssociatedVpcs` - Number of VPCs associated with this service network. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpcs.html.markdown b/website/docs/cdktf/typescript/d/vpcs.html.markdown index aa288ac3409d..faf29206ec66 100644 --- a/website/docs/cdktf/typescript/d/vpcs.html.markdown +++ b/website/docs/cdktf/typescript/d/vpcs.html.markdown @@ -109,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired vpcs. * `filter` - (Optional) Custom filter block as described below. @@ -135,4 +136,4 @@ This data source exports the following attributes in addition to the arguments a - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/vpn_gateway.html.markdown b/website/docs/cdktf/typescript/d/vpn_gateway.html.markdown index 8c116618c9ff..a95d0da7828b 100644 --- a/website/docs/cdktf/typescript/d/vpn_gateway.html.markdown +++ b/website/docs/cdktf/typescript/d/vpn_gateway.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `id` - (Optional) ID of the specific VPN Gateway to retrieve. * `state` - (Optional) State of the specific VPN Gateway to retrieve. * `availabilityZone` - (Optional) Availability Zone of the specific VPN Gateway to retrieve. @@ -78,4 +79,4 @@ This data source exports no additional attributes. - `read` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafregional_ipset.html.markdown b/website/docs/cdktf/typescript/d/wafregional_ipset.html.markdown index 9eaca372f49a..a89d463c42ef 100644 --- a/website/docs/cdktf/typescript/d/wafregional_ipset.html.markdown +++ b/website/docs/cdktf/typescript/d/wafregional_ipset.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional IP set. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional IP set. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafregional_rate_based_rule.html.markdown b/website/docs/cdktf/typescript/d/wafregional_rate_based_rule.html.markdown index ba08d047d158..0b3a5f37ce88 100644 --- a/website/docs/cdktf/typescript/d/wafregional_rate_based_rule.html.markdown +++ b/website/docs/cdktf/typescript/d/wafregional_rate_based_rule.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional rate based rule. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional rate based rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafregional_rule.html.markdown b/website/docs/cdktf/typescript/d/wafregional_rule.html.markdown index de6d06d142a2..f414393ed132 100644 --- a/website/docs/cdktf/typescript/d/wafregional_rule.html.markdown +++ b/website/docs/cdktf/typescript/d/wafregional_rule.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional rule. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional rule. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafregional_subscribed_rule_group.html.markdown b/website/docs/cdktf/typescript/d/wafregional_subscribed_rule_group.html.markdown index d1f547ffdf61..5cb5162af542 100644 --- a/website/docs/cdktf/typescript/d/wafregional_subscribed_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/d/wafregional_subscribed_rule_group.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Name of the WAF rule group. * `metricName` - (Optional) Name of the WAF rule group. @@ -79,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF rule group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafregional_web_acl.html.markdown b/website/docs/cdktf/typescript/d/wafregional_web_acl.html.markdown index e0aa03a3f149..fe37278aa978 100644 --- a/website/docs/cdktf/typescript/d/wafregional_web_acl.html.markdown +++ b/website/docs/cdktf/typescript/d/wafregional_web_acl.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAF Regional Web ACL. ## Attribute Reference @@ -46,4 +47,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the WAF Regional Web ACL. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafv2_ip_set.html.markdown b/website/docs/cdktf/typescript/d/wafv2_ip_set.html.markdown index 38d13b11fe97..edad23f8ed1a 100644 --- a/website/docs/cdktf/typescript/d/wafv2_ip_set.html.markdown +++ b/website/docs/cdktf/typescript/d/wafv2_ip_set.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAFv2 IP Set. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. @@ -52,4 +53,4 @@ This data source exports the following attributes in addition to the arguments a * `id` - Unique identifier for the set. * `ipAddressVersion` - IP address version of the set. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafv2_regex_pattern_set.html.markdown b/website/docs/cdktf/typescript/d/wafv2_regex_pattern_set.html.markdown index 1fefb4080c3d..48e9eb9a8b47 100644 --- a/website/docs/cdktf/typescript/d/wafv2_regex_pattern_set.html.markdown +++ b/website/docs/cdktf/typescript/d/wafv2_regex_pattern_set.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAFv2 Regex Pattern Set. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. @@ -57,4 +58,4 @@ Each `regularExpression` supports the following argument: * `regexString` - (Required) String representing the regular expression, see the AWS WAF [documentation](https://docs.aws.amazon.com/waf/latest/developerguide/waf-regex-pattern-set-creating.html) for more information. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafv2_rule_group.html.markdown b/website/docs/cdktf/typescript/d/wafv2_rule_group.html.markdown index f694d076cf3c..d31029770294 100644 --- a/website/docs/cdktf/typescript/d/wafv2_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/d/wafv2_rule_group.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the WAFv2 Rule Group. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. @@ -50,4 +51,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the rule group that helps with identification. * `id` - Unique identifier of the rule group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/wafv2_web_acl.html.markdown b/website/docs/cdktf/typescript/d/wafv2_web_acl.html.markdown index d1244df6d87c..703146782b4d 100644 --- a/website/docs/cdktf/typescript/d/wafv2_web_acl.html.markdown +++ b/website/docs/cdktf/typescript/d/wafv2_web_acl.html.markdown @@ -14,6 +14,8 @@ Retrieves the summary of a WAFv2 Web ACL. ## Example Usage +### Lookup by name + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -35,11 +37,41 @@ class MyConvertedCode extends TerraformStack { ``` +### Lookup by associated resource + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsWafv2WebAcl } from "./.gen/providers/aws/data-aws-wafv2-web-acl"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsWafv2WebAcl(this, "alb_example", { + resourceArn: + "arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/app/my-alb/xxxxx", + scope: "REGIONAL", + }); + new DataAwsWafv2WebAcl(this, "cloudfront_example", { + resourceArn: "arn:aws:cloudfront::123456789012:distribution/XXX", + scope: "CLOUDFRONT", + }); + } +} + +``` + ## Argument Reference This data source supports the following arguments: -* `name` - (Required) Name of the WAFv2 Web ACL. +* `name` - (Optional) Name of the WAFv2 Web ACL. Exactly one of `name` or `resourceArn` must be specified. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `resourceArn` - (Optional) ARN of the AWS resource associated with the Web ACL. This can be an ARN of an Application Load Balancer, Amazon API Gateway REST API, AWS AppSync GraphQL API, Amazon Cognito user pool, AWS App Runner service, AWS Verified Access instance, or AWS Amplify application. Exactly one of `name` or `resourceArn` must be specified. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. ## Attribute Reference @@ -50,4 +82,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - Description of the WebACL that helps with identification. * `id` - Unique identifier of the WebACL. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/workspaces_bundle.html.markdown b/website/docs/cdktf/typescript/d/workspaces_bundle.html.markdown index 221d4f2da269..c96ffbd39615 100644 --- a/website/docs/cdktf/typescript/d/workspaces_bundle.html.markdown +++ b/website/docs/cdktf/typescript/d/workspaces_bundle.html.markdown @@ -63,21 +63,22 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `bundleId` – (Optional) ID of the bundle. -* `owner` – (Optional) Owner of the bundles. You have to leave it blank for own bundles. You cannot combine this parameter with `bundleId`. -* `name` – (Optional) Name of the bundle. You cannot combine this parameter with `bundleId`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `bundleId` - (Optional) ID of the bundle. +* `owner` - (Optional) Owner of the bundles. You have to leave it blank for own bundles. You cannot combine this parameter with `bundleId`. +* `name` - (Optional) Name of the bundle. You cannot combine this parameter with `bundleId`. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `description` – The description of the bundle. -* `bundleId` – The ID of the bundle. -* `name` – The name of the bundle. -* `owner` – The owner of the bundle. -* `computeType` – The compute type. See supported fields below. -* `rootStorage` – The root volume. See supported fields below. -* `userStorage` – The user storage. See supported fields below. +* `description` - The description of the bundle. +* `bundleId` - The ID of the bundle. +* `name` - The name of the bundle. +* `owner` - The owner of the bundle. +* `computeType` - The compute type. See supported fields below. +* `rootStorage` - The root volume. See supported fields below. +* `userStorage` - The user storage. See supported fields below. ### `computeType` @@ -91,4 +92,4 @@ This data source exports the following attributes in addition to the arguments a * `capacity` - Size of the user storage. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/workspaces_directory.html.markdown b/website/docs/cdktf/typescript/d/workspaces_directory.html.markdown index f1f0bf2446c5..7b5a0423a077 100644 --- a/website/docs/cdktf/typescript/d/workspaces_directory.html.markdown +++ b/website/docs/cdktf/typescript/d/workspaces_directory.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryId` - (Required) Directory identifier for registration in WorkSpaces service. ## Attribute Reference @@ -45,9 +46,9 @@ This data source supports the following arguments: This data source exports the following attributes in addition to the arguments above: * `id` - WorkSpaces directory identifier. -* `active_directory_config` - Configuration for Active Directory integration when `workspace_type` is set to `POOLS`. - * `domainName` – Fully qualified domain name of the AWS Directory Service directory. - * `service_account_secret_arn` – ARN of the Secrets Manager secret that contains the credentials for the service account. +* `activeDirectoryConfig` - Configuration for Active Directory integration when `workspaceType` is set to `POOLS`. + * `domainName` - Fully qualified domain name of the AWS Directory Service directory. + * `serviceAccountSecretArn` - ARN of the Secrets Manager secret that contains the credentials for the service account. * `alias` - Directory alias. * `customerUserName` - User name for the service account. * `directoryName` - Name of the directory. @@ -56,33 +57,33 @@ This data source exports the following attributes in addition to the arguments a * `iamRoleId` - Identifier of the IAM role. This is the role that allows Amazon WorkSpaces to make calls to other services, such as Amazon EC2, on your behalf. * `ipGroupIds` - Identifiers of the IP access control groups associated with the directory. * `registrationCode` - Registration code for the directory. This is the code that users enter in their Amazon WorkSpaces client application to connect to the directory. -* `selfServicePermissions` – The permissions to enable or disable self-service capabilities. - * `changeComputeType` – Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. - * `increaseVolumeSize` – Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. - * `rebuildWorkspace` – Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. - * `restartWorkspace` – Whether WorkSpaces directory users can restart their workspace. - * `switchRunningMode` – Whether WorkSpaces directory users can switch the running mode of their workspace. +* `selfServicePermissions` - The permissions to enable or disable self-service capabilities. + * `changeComputeType` - Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. + * `increaseVolumeSize` - Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. + * `rebuildWorkspace` - Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. + * `restartWorkspace` - Whether WorkSpaces directory users can restart their workspace. + * `switchRunningMode` - Whether WorkSpaces directory users can switch the running mode of their workspace. * `subnetIds` - Identifiers of the subnets where the directory resides. -* `tags` – A map of tags assigned to the WorkSpaces directory. -* `user_identity_type` - The user identity type for the WorkSpaces directory. -* `workspaceAccessProperties` – Specifies which devices and operating systems users can use to access their WorkSpaces. - * `deviceTypeAndroid` – (Optional) Indicates whether users can use Android devices to access their WorkSpaces. - * `deviceTypeChromeos` – (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. - * `deviceTypeIos` – (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. - * `deviceTypeLinux` – (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. - * `deviceTypeOsx` – (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. - * `deviceTypeWeb` – (Optional) Indicates whether users can access their WorkSpaces through a web browser. - * `deviceTypeWindows` – (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. - * `deviceTypeZeroclient` – (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. -* `workspaceCreationProperties` – The default properties that are used for creating WorkSpaces. - * `customSecurityGroupId` – The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. - * `defaultOu` – The default organizational unit (OU) for your WorkSpace directories. - * `enableInternetAccess` – Indicates whether internet access is enabled for your WorkSpaces. - * `enableMaintenanceMode` – Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see [WorkSpace Maintenance](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). - * `userEnabledAsLocalAdministrator` – Indicates whether users are local administrators of their WorkSpaces. -* `workspace_directory_description` - The description of the WorkSpaces directory when `workspace_type` is set to `POOLS`. -* `workspace_directory_name` - The name of the WorkSpaces directory when `workspace_type` is set to `POOLS`. +* `tags` - A map of tags assigned to the WorkSpaces directory. +* `userIdentityType` - The user identity type for the WorkSpaces directory. +* `workspaceAccessProperties` - Specifies which devices and operating systems users can use to access their WorkSpaces. + * `deviceTypeAndroid` - (Optional) Indicates whether users can use Android devices to access their WorkSpaces. + * `deviceTypeChromeos` - (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. + * `deviceTypeIos` - (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. + * `deviceTypeLinux` - (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. + * `deviceTypeOsx` - (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. + * `deviceTypeWeb` - (Optional) Indicates whether users can access their WorkSpaces through a web browser. + * `deviceTypeWindows` - (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. + * `deviceTypeZeroclient` - (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. +* `workspaceCreationProperties` - The default properties that are used for creating WorkSpaces. + * `customSecurityGroupId` - The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. + * `defaultOu` - The default organizational unit (OU) for your WorkSpace directories. + * `enableInternetAccess` - Indicates whether internet access is enabled for your WorkSpaces. + * `enableMaintenanceMode` - Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see [WorkSpace Maintenance](https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). + * `userEnabledAsLocalAdministrator` - Indicates whether users are local administrators of their WorkSpaces. +* `workspaceDirectoryDescription` - The description of the WorkSpaces directory when `workspaceType` is set to `POOLS`. +* `workspaceDirectoryName` - The name of the WorkSpaces directory when `workspaceType` is set to `POOLS`. * `workspaceSecurityGroupId` - The identifier of the security group that is assigned to new WorkSpaces. -* `workspace_type` - The type of WorkSpaces directory. +* `workspaceType` - The type of WorkSpaces directory. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/workspaces_image.html.markdown b/website/docs/cdktf/typescript/d/workspaces_image.html.markdown index c5434430ce3d..8e2025440d32 100644 --- a/website/docs/cdktf/typescript/d/workspaces_image.html.markdown +++ b/website/docs/cdktf/typescript/d/workspaces_image.html.markdown @@ -38,16 +38,17 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: -* `imageId` – (Required) ID of the image. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `imageId` - (Required) ID of the image. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `name` – The name of the image. -* `description` – The description of the image. -* `os` – The operating system that the image is running. -* `requiredTenancy` – Specifies whether the image is running on dedicated hardware. When Bring Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more information, see [Bring Your Own Windows Desktop Images](https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). -* `state` – The status of the image. +* `name` - The name of the image. +* `description` - The description of the image. +* `os` - The operating system that the image is running. +* `requiredTenancy` - Specifies whether the image is running on dedicated hardware. When Bring Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more information, see [Bring Your Own Windows Desktop Images](https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). +* `state` - The status of the image. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/workspaces_workspace.html.markdown b/website/docs/cdktf/typescript/d/workspaces_workspace.html.markdown index 8b30f3f4e9b0..594340528e86 100644 --- a/website/docs/cdktf/typescript/d/workspaces_workspace.html.markdown +++ b/website/docs/cdktf/typescript/d/workspaces_workspace.html.markdown @@ -8,7 +8,7 @@ description: |- -# Resource: aws_workspaces_workspace +# Data Source: aws_workspaces_workspace Use this data source to get information about a workspace in [AWS Workspaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces.html) Service. @@ -63,23 +63,24 @@ class MyConvertedCode extends TerraformStack { This data source supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bundleId` - (Optional) ID of the bundle for the WorkSpace. * `directoryId` - (Optional) ID of the directory for the WorkSpace. You have to specify `userName` along with `directoryId`. You cannot combine this parameter with `workspaceId`. * `rootVolumeEncryptionEnabled` - (Optional) Indicates whether the data stored on the root volume is encrypted. * `tags` - (Optional) Tags for the WorkSpace. -* `userName` – (Optional) User name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. You cannot combine this parameter with `workspaceId`. -* `userVolumeEncryptionEnabled` – (Optional) Indicates whether the data stored on the user volume is encrypted. -* `volumeEncryptionKey` – (Optional) Symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. +* `userName` - (Optional) User name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. You cannot combine this parameter with `workspaceId`. +* `userVolumeEncryptionEnabled` - (Optional) Indicates whether the data stored on the user volume is encrypted. +* `volumeEncryptionKey` - (Optional) Symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. * `workspaceId` - (Optional) ID of the WorkSpace. You cannot combine this parameter with `directoryId`. -* `workspaceProperties` – (Optional) WorkSpace properties. +* `workspaceProperties` - (Optional) WorkSpace properties. `workspaceProperties` supports the following: -* `computeTypeName` – (Optional) Compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO` and `GRAPHICSPRO`. -* `rootVolumeSizeGib` – (Optional) Size of the root volume. -* `runningMode` – (Optional) Running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. -* `runningModeAutoStopTimeoutInMinutes` – (Optional) Time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. -* `userVolumeSizeGib` – (Optional) Size of the user storage. +* `computeTypeName` - (Optional) Compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO` and `GRAPHICSPRO`. +* `rootVolumeSizeGib` - (Optional) Size of the root volume. +* `runningMode` - (Optional) Running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. +* `runningModeAutoStopTimeoutInMinutes` - (Optional) Time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. +* `userVolumeSizeGib` - (Optional) Size of the user storage. ## Attribute Reference @@ -90,4 +91,4 @@ This data source exports the following attributes in addition to the arguments a * `computerName` - Name of the WorkSpace, as seen by the operating system. * `state` - Operational state of the WorkSpace. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown b/website/docs/cdktf/typescript/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown index 9b1bb36ae4c7..7e4d73b5e7c6 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown @@ -7,13 +7,12 @@ description: |- --- - # Ephemeral: aws_cognito_identity_openid_token_for_developer_identity Terraform ephemeral resource for managing an AWS Cognito Identity Open ID Token for Developer Identity. -~> Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -41,12 +40,14 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityPoolId` - (Required) An identity pool ID in the format REGION:GUID. The following arguments are optional: +* `region` – (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityId` - (Optional) A unique identifier in the format REGION:GUID. * `logins` - (Optional) A set of optional name-value pairs that map provider names to provider tokens. Each name-value pair represents a user from a public provider or developer provider. If the user is from a developer provider, the name-value pair will follow the syntax `"developer_provider_name": "developer_user_identifier"`. The developer provider is the "domain" by which Cognito will refer to your users; you provided this domain while creating/updating the identity pool. The developer user identifier is an identifier from your backend that uniquely identifies a user. When you create an identity pool, you can specify the supported logins. @@ -61,4 +62,4 @@ This resource exports the following attributes in addition to the arguments abov * `token` - An OpenID token. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/eks_cluster_auth.html.markdown b/website/docs/cdktf/typescript/ephemeral-resources/eks_cluster_auth.html.markdown index adc88c4481bf..54c43daad7c7 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/eks_cluster_auth.html.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/eks_cluster_auth.html.markdown @@ -12,7 +12,7 @@ description: |- Retrieve an authentication token to communicate with an EKS cluster. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the EKS cluster. ## Attribute Reference @@ -70,4 +71,4 @@ This resource exports the following attributes in addition to the arguments abov * `token` - Token to use to authenticate with the cluster. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/kms_secrets.html.markdown b/website/docs/cdktf/typescript/ephemeral-resources/kms_secrets.html.markdown index a306c2118967..ada936dbeac1 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/kms_secrets.html.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/kms_secrets.html.markdown @@ -12,7 +12,7 @@ description: |- Decrypt multiple secrets from data encrypted with the AWS KMS service. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secret` - (Required) One or more encrypted payload definitions from the KMS service. See the Secret Definitions below. ### Secret Definitions @@ -82,4 +83,4 @@ This resource exports the following attributes in addition to the arguments abov * `plaintext` - Map containing each `secret` `name` as the key with its decrypted plaintext value - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/lambda_invocation.html.markdown b/website/docs/cdktf/typescript/ephemeral-resources/lambda_invocation.html.markdown index a0a5b46801d6..6b7c8e8e0da7 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/lambda_invocation.html.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/lambda_invocation.html.markdown @@ -3,34 +3,224 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_invocation" description: |- - Invoke AWS Lambda Function + Invokes an AWS Lambda Function as an ephemeral resource. --- # Ephemeral: aws_lambda_invocation -Use this ephemeral resource to invoke a Lambda function. The lambda function is invoked with the [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. +Invokes an AWS Lambda Function as an ephemeral resource. Use this ephemeral resource to execute Lambda functions during Terraform operations without persisting results in state, ideal for generating sensitive data or performing lightweight operations. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +The Lambda function is invoked with [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. -~> **NOTE:** The `aws_lambda_invocation` ephemeral resource invokes the function during every `plan` and `apply` when the function is known. A common use case for this functionality is when invoking a lightweight function—where repeated invocations are acceptable—that produces sensitive information you do not want to store in the state. +~> **Note:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) +~> **Note:** The `aws_lambda_invocation` ephemeral resource invokes the function during every `plan` and `apply` when the function is known. A common use case for this functionality is when invoking a lightweight function—where repeated invocations are acceptable—that produces sensitive information you do not want to store in the state. + +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking a Lambda function with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) ## Example Usage -### Basic Example +### Generate Sensitive Configuration ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformOutput, Fn, TerraformStack } from "cdktf"; +import { + VariableType, + TerraformVariable, + TerraformOutput, + Fn, + Token, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SsmParameter } from "./.gen/providers/aws/ssm-parameter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + You can read more about this at https://cdk.tf/variables*/ + const environment = new TerraformVariable(this, "environment", { + description: "The environment name (e.g., dev, prod)", + type: VariableType.STRING, + }); + new TerraformOutput(this, "key_generated", { + value: "API key generated and stored in Parameter Store", + }); + new SsmParameter(this, "api_key", { + name: "/app/${" + environment.value + "}/api-key", + tags: { + Environment: environment.stringValue, + Generated: "ephemeral-lambda", + }, + type: "SecureString", + value: Token.asString( + Fn.lookupNested( + Fn.jsondecode(awsLambdaInvocation.secretGenerator.result), + ["api_key"] + ) + ), + }); + } +} + +``` + +### Dynamic Resource Configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AutoscalingGroup } from "./.gen/providers/aws/autoscaling-group"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const sizing = Fn.jsondecode(awsLambdaInvocation.resourceCalculator.result); + new AutoscalingGroup(this, "example", { + desiredCapacity: Token.asNumber( + Fn.lookupNested(sizing, ["desired_instances"]) + ), + healthCheckType: "ELB", + launchTemplate: { + id: Token.asString(awsLaunchTemplateExample.id), + version: "$Latest", + }, + maxSize: Token.asNumber(Fn.lookupNested(sizing, ["max_instances"])), + minSize: Token.asNumber(Fn.lookupNested(sizing, ["min_instances"])), + name: "optimized-asg", + tag: [ + { + key: "OptimizedBy", + propagateAtLaunch: true, + value: "ephemeral-lambda", + }, + ], + targetGroupArns: [Token.asString(awsLbTargetGroupExample.arn)], + vpcZoneIdentifier: subnetIds.listValue, + }); + } +} + +``` + +### Validation and Compliance Checks + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { + VariableType, + TerraformVariable, + conditional, + Token, + TerraformCount, + Fn, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Resource } from "./.gen/providers/null/resource"; +import { Instance } from "./.gen/providers/aws/instance"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + You can read more about this at https://cdk.tf/variables*/ + const instanceType = new TerraformVariable(this, "instance_type", { + description: "The EC2 instance type to use", + type: VariableType.STRING, + }); + const isCompliant = compliant; + const violations = validationResultViolations; + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const exampleCount = TerraformCount.of( + Token.asNumber(conditional(isCompliant, 1, 0)) + ); + new Instance(this, "example", { + ami: Token.asString(dataAwsAmiExample.id), + instanceType: instanceType.stringValue, + rootBlockDevice: { + encrypted: encryptStorage.booleanValue, + }, + tags: { + ComplianceCheck: "passed", + Environment: environment.stringValue, + }, + count: exampleCount, + }); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const complianceGateCount = TerraformCount.of( + Token.asNumber(conditional(isCompliant, 0, 1)) + ); + new Resource(this, "compliance_gate", { + count: complianceGateCount, + provisioners: [ + { + type: "local-exec", + command: + "echo 'Compliance violations: " + + Token.asString(Fn.join(", ", Token.asList(violations))) + + "' && exit 1", + }, + ], + }); + } +} + +``` + +### External API Integration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { EcsService } from "./.gen/providers/aws/ecs-service"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new TerraformOutput(this, "result_entry", { - value: Fn.lookupNested(Fn.jsondecode(example.result), ['"key1"']), + const externalConfig = Fn.jsondecode( + awsLambdaInvocation.externalConfig.result + ); + new EcsService(this, "example", { + cluster: Token.asString(awsEcsClusterExample.id), + deploymentConfiguration: { + maximum_percent: Fn.lookupNested(externalConfig, ["max_percent"]), + minimum_healthy_percent: Fn.lookupNested(externalConfig, [ + "min_healthy_percent", + ]), + }, + desiredCount: Token.asNumber( + Fn.lookupNested(externalConfig, ["replica_count"]) + ), + name: "web-app", + tags: { + ConfigSource: "external-api", + Environment: environment.stringValue, + }, + taskDefinition: Token.asString(awsEcsTaskDefinitionExample.arn), }); } } @@ -47,17 +237,115 @@ The following arguments are required: The following arguments are optional: * `clientContext` - (Optional) Up to 3583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. -* `logType` - (Optional) Set to `Tail` to include the execution log in the response. Valid values are `None` and `Tail`. +* `logType` - (Optional) Set to `Tail` to include the execution log in the response. Valid values: `None` and `Tail`. * `qualifier` - (Optional) Version or alias to invoke a published version of the function. Defaults to `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: +This ephemeral resource exports the following attributes in addition to the arguments above: -* `executed_version` - Version of the function that executed. When you invoke a function with an alias, the version the alias resolved to. +* `executed_version` - Version of the function that executed. When you invoke a function with an alias, this shows the version the alias resolved to. * `function_error` - If present, indicates that an error occurred during function execution. Details about the error are included in `result`. * `log_result` - Last 4 KB of the execution log, which is base64-encoded. -* `result` - String result of the lambda function invocation. +* `result` - String result of the Lambda function invocation. * `statusCode` - HTTP status code is in the 200 range for a successful request. - \ No newline at end of file +## Usage Notes + +### Handling Sensitive Data + +Since ephemeral resources are designed to not persist data in state, they are ideal for handling sensitive information: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SecretsmanagerSecretVersion } from "./.gen/providers/aws/secretsmanager-secret-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SecretsmanagerSecretVersion(this, "example", { + secretId: Token.asString(awsSecretsmanagerSecretExample.id), + secretString: awsLambdaInvocation.credentials.result, + }); + } +} + +``` + +### Error Handling + +Always check for function errors in your configuration: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { + Op, + Fn, + Token, + conditional, + TerraformCount, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Resource } from "./.gen/providers/null/resource"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const hasError = Op.neq(awsLambdaInvocation.example.functionError, "null"); + const invocationResult = Fn.jsondecode(awsLambdaInvocation.example.result); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const validationCount = TerraformCount.of( + Token.asNumber( + conditional( + hasError, + fail( + "Lambda function error: " + + Token.asString( + Fn.lookupNested(invocationResult, ["errorMessage"]) + ) + ), + 0 + ) + ) + ); + new Resource(this, "validation", { + count: validationCount, + }); + } +} + +``` + +### Logging + +Enable detailed logging for debugging: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformOutput, Fn, TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new TerraformOutput(this, "execution_logs", { + value: Fn.base64decode(awsLambdaInvocation.example.logResult), + }); + } +} + +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_random_password.html.markdown b/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_random_password.html.markdown index 69e3cb625a0d..d86710019b41 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_random_password.html.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_random_password.html.markdown @@ -32,6 +32,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `excludeCharacters` - (Optional) String of the characters that you don't want in the password. * `excludeLowercase` - (Optional) Specifies whether to exclude lowercase letters from the password. * `excludeNumbers` - (Optional) Specifies whether to exclude numbers from the password. @@ -47,4 +48,4 @@ This resource exports the following attributes in addition to the arguments abov * `randomPassword` - Random password. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_secret_version.html.markdown b/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_secret_version.html.markdown index 9f722d1d5093..f82e6a6d0337 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_secret_version.html.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/secretsmanager_secret_version.html.markdown @@ -12,7 +12,7 @@ description: |- Retrieve information about a Secrets Manager secret version, including its secret value. To retrieve secret metadata, see the [`aws_secretsmanager_secret` data source](/docs/providers/aws/d/secretsmanager_secret.html). -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secretId` - (Required) Specifies the secret containing the version that you want to retrieve. You can specify either the ARN or the friendly name of the secret. * `versionId` - (Optional) Specifies the unique identifier of the version of the secret that you want to retrieve. Overrides `versionStage`. * `versionStage` - (Optional) Specifies the secret version that you want to retrieve by the staging label attached to the version. Defaults to `AWSCURRENT`. @@ -87,4 +88,4 @@ This resource exports the following attributes in addition to the arguments abov * `secretBinary` - Decrypted part of the protected secret information that was originally provided as a binary. * `versionId` - Unique identifier of this version of the secret. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/ephemeral-resources/ssm_parameter.html.markdown b/website/docs/cdktf/typescript/ephemeral-resources/ssm_parameter.html.markdown index 0cebc3c87be5..26f3ef4cd198 100644 --- a/website/docs/cdktf/typescript/ephemeral-resources/ssm_parameter.html.markdown +++ b/website/docs/cdktf/typescript/ephemeral-resources/ssm_parameter.html.markdown @@ -12,7 +12,7 @@ description: |- Retrieve information about an SSM parameter, including its value. -~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/v1.10.x/resources/ephemeral). +~> **NOTE:** Ephemeral resources are a new feature and may evolve as we continue to explore their most effective uses. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral). ## Example Usage @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The Amazon Resource Name (ARN) of the parameter that you want to query * `withDecryption` - (Optional) Return decrypted values for a secure string parameter (Defaults to `true`). @@ -49,4 +50,4 @@ This resource exports the following attributes in addition to the arguments abov * `version` - The parameter version. * `withDecryption` - Indicates whether the secure string parameters were decrypted. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown b/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown index c6c0045f5259..6971f89e5118 100644 --- a/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown +++ b/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown @@ -134,6 +134,7 @@ class MyConvertedCode extends TerraformStack { |App Runner|`apprunner`|`AWS_ENDPOINT_URL_APPRUNNER`|`apprunner`| |AppStream 2.0|`appstream`|`AWS_ENDPOINT_URL_APPSTREAM`|`appstream`| |AppSync|`appsync`|`AWS_ENDPOINT_URL_APPSYNC`|`appsync`| +|Application Resilience Controller Region Switch|`arcregionswitch`|`AWS_ENDPOINT_URL_ARC_REGION_SWITCH`|`arc_region_switch`| |Athena|`athena`|`AWS_ENDPOINT_URL_ATHENA`|`athena`| |Audit Manager|`auditmanager`|`AWS_ENDPOINT_URL_AUDITMANAGER`|`auditmanager`| |Auto Scaling|`autoscaling`|`AWS_ENDPOINT_URL_AUTO_SCALING`|`auto_scaling`| @@ -143,6 +144,7 @@ class MyConvertedCode extends TerraformStack { |BCM Data Exports|`bcmdataexports`|`AWS_ENDPOINT_URL_BCM_DATA_EXPORTS`|`bcm_data_exports`| |Bedrock|`bedrock`|`AWS_ENDPOINT_URL_BEDROCK`|`bedrock`| |Bedrock Agents|`bedrockagent`|`AWS_ENDPOINT_URL_BEDROCK_AGENT`|`bedrock_agent`| +|Bedrock AgentCore|`bedrockagentcore`|`AWS_ENDPOINT_URL_BEDROCK_AGENTCORE_CONTROL`|`bedrock_agentcore_control`| |Billing|`billing`|`AWS_ENDPOINT_URL_BILLING`|`billing`| |Web Services Budgets|`budgets`|`AWS_ENDPOINT_URL_BUDGETS`|`budgets`| |CE (Cost Explorer)|`ce`(or `costexplorer`)|`AWS_ENDPOINT_URL_COST_EXPLORER`|`cost_explorer`| @@ -240,8 +242,6 @@ class MyConvertedCode extends TerraformStack { |CloudWatch Internet Monitor|`internetmonitor`|`AWS_ENDPOINT_URL_INTERNETMONITOR`|`internetmonitor`| |Invoicing|`invoicing`|`AWS_ENDPOINT_URL_INVOICING`|`invoicing`| |IoT Core|`iot`|`AWS_ENDPOINT_URL_IOT`|`iot`| -|IoT Analytics|`iotanalytics`|`AWS_ENDPOINT_URL_IOTANALYTICS`|`iotanalytics`| -|IoT Events|`iotevents`|`AWS_ENDPOINT_URL_IOT_EVENTS`|`iotEvents`| |IVS (Interactive Video)|`ivs`|`AWS_ENDPOINT_URL_IVS`|`ivs`| |IVS (Interactive Video) Chat|`ivschat`|`AWS_ENDPOINT_URL_IVSCHAT`|`ivschat`| |Managed Streaming for Kafka|`kafka`(or `msk`)|`AWS_ENDPOINT_URL_KAFKA`|`kafka`| @@ -284,9 +284,9 @@ class MyConvertedCode extends TerraformStack { |User Notifications|`notifications`|`AWS_ENDPOINT_URL_NOTIFICATIONS`|`notifications`| |User Notifications Contacts|`notificationscontacts`|`AWS_ENDPOINT_URL_NOTIFICATIONSCONTACTS`|`notificationscontacts`| |CloudWatch Observability Access Manager|`oam`(or `cloudwatchobservabilityaccessmanager`)|`AWS_ENDPOINT_URL_OAM`|`oam`| +|Oracle Database@AWS|`odb`|`AWS_ENDPOINT_URL_ODB`|`odb`| |OpenSearch|`opensearch`(or `opensearchservice`)|`AWS_ENDPOINT_URL_OPENSEARCH`|`opensearch`| |OpenSearch Serverless|`opensearchserverless`|`AWS_ENDPOINT_URL_OPENSEARCHSERVERLESS`|`opensearchserverless`| -|OpsWorks|`opsworks`|`AWS_ENDPOINT_URL_OPSWORKS`|`opsworks`| |Organizations|`organizations`|`AWS_ENDPOINT_URL_ORGANIZATIONS`|`organizations`| |OpenSearch Ingestion|`osis`(or `opensearchingestion`)|`AWS_ENDPOINT_URL_OSIS`|`osis`| |Outposts|`outposts`|`AWS_ENDPOINT_URL_OUTPOSTS`|`outposts`| @@ -324,6 +324,7 @@ class MyConvertedCode extends TerraformStack { |S3 Control|`s3Control`|`AWS_ENDPOINT_URL_S3_CONTROL`|`s3_control`| |S3 on Outposts|`s3Outposts`|`AWS_ENDPOINT_URL_S3OUTPOSTS`|`s3Outposts`| |S3 Tables|`s3Tables`|`AWS_ENDPOINT_URL_S3TABLES`|`s3Tables`| +|S3 Vectors|`s3Vectors`|`AWS_ENDPOINT_URL_S3VECTORS`|`s3Vectors`| |SageMaker AI|`sagemaker`|`AWS_ENDPOINT_URL_SAGEMAKER`|`sagemaker`| |EventBridge Scheduler|`scheduler`|`AWS_ENDPOINT_URL_SCHEDULER`|`scheduler`| |EventBridge Schemas|`schemas`|`AWS_ENDPOINT_URL_SCHEMAS`|`schemas`| @@ -340,7 +341,6 @@ class MyConvertedCode extends TerraformStack { |SFN (Step Functions)|`sfn`(or `stepfunctions`)|`AWS_ENDPOINT_URL_SFN`|`sfn`| |Shield|`shield`|`AWS_ENDPOINT_URL_SHIELD`|`shield`| |Signer|`signer`|`AWS_ENDPOINT_URL_SIGNER`|`signer`| -|SDB (SimpleDB)|`simpledb`(or `sdb`)|`AWS_ENDPOINT_URL_SIMPLEDB`|`simpledb`| |SNS (Simple Notification)|`sns`|`AWS_ENDPOINT_URL_SNS`|`sns`| |SQS (Simple Queue)|`sqs`|`AWS_ENDPOINT_URL_SQS`|`sqs`| |SSM (Systems Manager)|`ssm`|`AWS_ENDPOINT_URL_SSM`|`ssm`| @@ -366,7 +366,7 @@ class MyConvertedCode extends TerraformStack { |WAF Classic Regional|`wafregional`|`AWS_ENDPOINT_URL_WAF_REGIONAL`|`waf_regional`| |WAF|`wafv2`|`AWS_ENDPOINT_URL_WAFV2`|`wafv2`| |Well-Architected Tool|`wellarchitected`|`AWS_ENDPOINT_URL_WELLARCHITECTED`|`wellarchitected`| -|WorkLink|`worklink`|`AWS_ENDPOINT_URL_WORKLINK`|`worklink`| +|WorkMail|`workmail`|`AWS_ENDPOINT_URL_WORKMAIL`|`workmail`| |WorkSpaces|`workspaces`|`AWS_ENDPOINT_URL_WORKSPACES`|`workspaces`| |WorkSpaces Web|`workspacesweb`|`AWS_ENDPOINT_URL_WORKSPACES_WEB`|`workspaces_web`| |X-Ray|`xray`|`AWS_ENDPOINT_URL_XRAY`|`xray`| @@ -477,4 +477,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/guides/enhanced-region-support.html.markdown b/website/docs/cdktf/typescript/guides/enhanced-region-support.html.markdown new file mode 100644 index 000000000000..5349fcc4befe --- /dev/null +++ b/website/docs/cdktf/typescript/guides/enhanced-region-support.html.markdown @@ -0,0 +1,713 @@ +--- +subcategory: "" +layout: "aws" +page_title: "Terraform AWS Provider Enhanced Region Support" +description: |- + Enhanced Region support with the Terraform AWS Provider. +--- + + + +# Enhanced Region Support + +Version 6.0.0 of the Terraform AWS Provider adds `region` to most resources making it significantly easier to manage infrastructure across AWS Regions without requiring multiple provider configurations. + + + +- [What's new](#whats-new) +- [What's not changing](#whats-not-changing) +- [Can I use `region` in every resource?](#can-i-use-region-in-every-resource) +- [Why make this change](#why-make-this-change) +- [How `region` works](#how-region-works) +- [Migrating from multiple provider configurations](#migrating-from-multiple-provider-configurations) +- [Before and after examples using `region`](#before-and-after-examples-using-region) +- [Non–region-aware resources](#nonregion-aware-resources) + + + +## What's new + +As of v6.0.0, most existing resources, data sources, and ephemeral resources are now [Region-aware](#nonregion-aware-resources), meaning they support a new top-level `region`. This allows you to manage a resource in a Region different from the one specified in the provider configuration without requiring multiple provider blocks. See [How `region` works](#how-region-works) for details. + +For example, if your provider is configured for `us-east-1`, you can now manage a VPC in `us-west-2` without defining an additional provider block: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Vpc } from "./.gen/providers/aws/vpc"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new Vpc(this, "peer", { + cidrBlock: "10.1.0.0/16", + region: "us-west-2", + }); + } +} + +``` + +## What's _not_ changing + +_Pre-v6.0.0 configurations that use provider blocks per Region remain valid in v6.0.0 and are not deprecated._ + +You can still define the Region at the provider level using any of the existing methods—for example, through the AWS [config file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html), [provider configuration](https://developer.hashicorp.com/terraform/language/providers/configuration), [environment variables](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables), [shared configuration files](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#shared-configuration-and-credentials-files), or explicitly using the `provider`’s [`region`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#region). + +## Can I use `region` in every resource? + +No. While most resources are now Region-aware, there are exceptions. These include a few resources that already had a `region` and resources that are inherently global. See [Non–region-aware resources](#nonregion-aware-resources). + +## Why make this change + +Before version 6.0.0, managing infrastructure across multiple Regions required a separate provider configuration for each Region. This approach led to complex and repetitive configurations, especially for large infrastructures—AWS currently operates in [36 Regions](https://aws.amazon.com/about-aws/global-infrastructure/), with more announced. Additionally, each provider configuration adds overhead in terms of memory and compute resources. + +See the [examples](#before-and-after-examples-using-region) below for a comparison of configurations before and after introducing `region`. + +## How `region` works + +The new top-level `region` is [_Optional_ and _Computed_](https://developer.hashicorp.com/terraform/plugin/framework/handling-data/attributes/string#configurability), and defaults to the Region specified in the provider configuration. Its value is validated to ensure it belongs to the configured [partition](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/partitions.html). **Changing the value of `region` will force resource replacement.** + +To [import](https://developer.hashicorp.com/terraform/cli/import) a resource in a specific Region, append `@` to the [import ID](https://developer.hashicorp.com/terraform/language/import#import-id)—for example: + +```sh +terraform import aws_vpc.test_vpc vpc-a01106c2@eu-west-1 +``` + +## Migrating from multiple provider configurations + +To migrate from a separate provider configuration for each Region to a single provider configuration block and per-resource `region` values you must ensure that Terraform state is refreshed before editing resource configuration: + +1. Upgrade to v6.0.0 +2. Run a Terraform apply in [refresh-only mode](https://developer.hashicorp.com/terraform/cli/commands/plan#planning-modes) -- `terraform apply -refresh-only` +3. Modify the affected resource configurations, replacing the [`provider` meta-argument](https://developer.hashicorp.com/terraform/language/meta-arguments/resource-provider) with a `region` argument + +## Before and after examples using `region` + +### Cross-region VPC peering + +
+Before, Pre-v6.0.0 +

+ +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { Vpc } from "./.gen/providers/aws/vpc"; +import { VpcPeeringConnection } from "./.gen/providers/aws/vpc-peering-connection"; +import { VpcPeeringConnectionAccepterA } from "./.gen/providers/aws/vpc-peering-connection-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "us-east-1", + }); + const peer = new AwsProvider(this, "aws_1", { + alias: "peer", + region: "us-west-2", + }); + const main = new Vpc(this, "main", { + cidrBlock: "10.0.0.0/16", + }); + const awsVpcPeer = new Vpc(this, "peer", { + cidrBlock: "10.1.0.0/16", + provider: peer, + }); + const dataAwsCallerIdentityPeer = new DataAwsCallerIdentity( + this, + "peer_4", + { + provider: peer, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsCallerIdentityPeer.overrideLogicalId("peer"); + const awsVpcPeeringConnectionPeer = new VpcPeeringConnection( + this, + "peer_5", + { + autoAccept: false, + peerOwnerId: Token.asString(dataAwsCallerIdentityPeer.accountId), + peerRegion: "us-west-2", + peerVpcId: Token.asString(awsVpcPeer.id), + vpcId: main.id, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcPeeringConnectionPeer.overrideLogicalId("peer"); + const awsVpcPeeringConnectionAccepterPeer = + new VpcPeeringConnectionAccepterA(this, "peer_6", { + autoAccept: true, + provider: peer, + vpcPeeringConnectionId: Token.asString(awsVpcPeeringConnectionPeer.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcPeeringConnectionAccepterPeer.overrideLogicalId("peer"); + } +} + +``` + +

+
+ +
+After, v6.0.0+ +

+ +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { Vpc } from "./.gen/providers/aws/vpc"; +import { VpcPeeringConnection } from "./.gen/providers/aws/vpc-peering-connection"; +import { VpcPeeringConnectionAccepterA } from "./.gen/providers/aws/vpc-peering-connection-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "us-east-1", + }); + const main = new Vpc(this, "main", { + cidrBlock: "10.0.0.0/16", + }); + const peer = new Vpc(this, "peer", { + cidrBlock: "10.1.0.0/16", + region: "us-west-2", + }); + const awsVpcPeeringConnectionPeer = new VpcPeeringConnection( + this, + "peer_3", + { + autoAccept: false, + peerRegion: "us-west-2", + peerVpcId: peer.id, + vpcId: main.id, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcPeeringConnectionPeer.overrideLogicalId("peer"); + const awsVpcPeeringConnectionAccepterPeer = + new VpcPeeringConnectionAccepterA(this, "peer_4", { + autoAccept: true, + region: "us-west-2", + vpcPeeringConnectionId: Token.asString(awsVpcPeeringConnectionPeer.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcPeeringConnectionAccepterPeer.overrideLogicalId("peer"); + } +} + +``` + +

+
+ +### KMS replica key + +
+Before, Pre-v6.0.0 +

+ +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { KmsKey } from "./.gen/providers/aws/kms-key"; +import { KmsReplicaKey } from "./.gen/providers/aws/kms-replica-key"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const primary = new AwsProvider(this, "aws", { + alias: "primary", + region: "us-east-1", + }); + new AwsProvider(this, "aws_1", { + region: "us-west-2", + }); + const awsKmsKeyPrimary = new KmsKey(this, "primary", { + deletionWindowInDays: 30, + description: "Multi-Region primary key", + multiRegion: true, + provider: primary, + }); + new KmsReplicaKey(this, "replica", { + deletionWindowInDays: 7, + description: "Multi-Region replica key", + primaryKeyArn: Token.asString(awsKmsKeyPrimary.arn), + }); + } +} + +``` + +

+
+ +
+After, v6.0.0 +

+ +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { KmsKey } from "./.gen/providers/aws/kms-key"; +import { KmsReplicaKey } from "./.gen/providers/aws/kms-replica-key"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "us-west-2", + }); + const primary = new KmsKey(this, "primary", { + deletionWindowInDays: 30, + description: "Multi-Region primary key", + multiRegion: true, + region: "us-east-1", + }); + new KmsReplicaKey(this, "replica", { + deletionWindowInDays: 7, + description: "Multi-Region replica key", + primaryKeyArn: primary.arn, + }); + } +} + +``` + +

+
+ +### S3 bucket replication configuration + +
+Before, Pre-v6.0.0 +

+ +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { IamPolicy } from "./.gen/providers/aws/iam-policy"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketAcl } from "./.gen/providers/aws/s3-bucket-acl"; +import { S3BucketReplicationConfigurationA } from "./.gen/providers/aws/s3-bucket-replication-configuration"; +import { S3BucketVersioningA } from "./.gen/providers/aws/s3-bucket-versioning"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "eu-west-1", + }); + const central = new AwsProvider(this, "aws_1", { + alias: "central", + region: "eu-central-1", + }); + const destination = new S3Bucket(this, "destination", { + bucket: "tf-test-bucket-destination-12345", + }); + const source = new S3Bucket(this, "source", { + bucket: "tf-test-bucket-source-12345", + provider: central, + }); + new S3BucketAcl(this, "source_bucket_acl", { + acl: "private", + bucket: source.id, + provider: central, + }); + const awsS3BucketVersioningDestination = new S3BucketVersioningA( + this, + "destination_5", + { + bucket: destination.id, + versioningConfiguration: { + status: "Enabled", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketVersioningDestination.overrideLogicalId("destination"); + const awsS3BucketVersioningSource = new S3BucketVersioningA( + this, + "source_6", + { + bucket: source.id, + provider: central, + versioningConfiguration: { + status: "Enabled", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketVersioningSource.overrideLogicalId("source"); + const assumeRole = new DataAwsIamPolicyDocument(this, "assume_role", { + statement: [ + { + actions: ["sts:AssumeRole"], + effect: "Allow", + principals: [ + { + identifiers: ["s3.amazonaws.com"], + type: "Service", + }, + ], + }, + ], + }); + const replication = new DataAwsIamPolicyDocument(this, "replication", { + statement: [ + { + actions: ["s3:GetReplicationConfiguration", "s3:ListBucket"], + effect: "Allow", + resources: [source.arn], + }, + { + actions: [ + "s3:GetObjectVersionForReplication", + "s3:GetObjectVersionAcl", + "s3:GetObjectVersionTagging", + ], + effect: "Allow", + resources: ["${" + source.arn + "}/*"], + }, + { + actions: [ + "s3:ReplicateObject", + "s3:ReplicateDelete", + "s3:ReplicateTags", + ], + effect: "Allow", + resources: ["${" + destination.arn + "}/*"], + }, + ], + }); + const awsIamPolicyReplication = new IamPolicy(this, "replication_9", { + name: "tf-iam-role-policy-replication-12345", + policy: Token.asString(replication.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamPolicyReplication.overrideLogicalId("replication"); + const awsIamRoleReplication = new IamRole(this, "replication_10", { + assumeRolePolicy: Token.asString(assumeRole.json), + name: "tf-iam-role-replication-12345", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRoleReplication.overrideLogicalId("replication"); + const awsIamRolePolicyAttachmentReplication = new IamRolePolicyAttachment( + this, + "replication_11", + { + policyArn: Token.asString(awsIamPolicyReplication.arn), + role: Token.asString(awsIamRoleReplication.name), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyAttachmentReplication.overrideLogicalId("replication"); + const awsS3BucketReplicationConfigurationReplication = + new S3BucketReplicationConfigurationA(this, "replication_12", { + bucket: source.id, + dependsOn: [awsS3BucketVersioningSource], + provider: central, + role: Token.asString(awsIamRoleReplication.arn), + rule: [ + { + destination: { + bucket: destination.arn, + storageClass: "STANDARD", + }, + filter: { + prefix: "example", + }, + id: "examplerule", + status: "Enabled", + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketReplicationConfigurationReplication.overrideLogicalId( + "replication" + ); + } +} + +``` + +

+
+ +
+After, v6.0.0 +

+ +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { IamPolicy } from "./.gen/providers/aws/iam-policy"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketAcl } from "./.gen/providers/aws/s3-bucket-acl"; +import { S3BucketReplicationConfigurationA } from "./.gen/providers/aws/s3-bucket-replication-configuration"; +import { S3BucketVersioningA } from "./.gen/providers/aws/s3-bucket-versioning"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "eu-west-1", + }); + const destination = new S3Bucket(this, "destination", { + bucket: "tf-test-bucket-destination-12345", + }); + const source = new S3Bucket(this, "source", { + bucket: "tf-test-bucket-source-12345", + region: "eu-central-1", + }); + new S3BucketAcl(this, "source_bucket_acl", { + acl: "private", + bucket: source.id, + region: "eu-central-1", + }); + const awsS3BucketVersioningDestination = new S3BucketVersioningA( + this, + "destination_4", + { + bucket: destination.id, + versioningConfiguration: { + status: "Enabled", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketVersioningDestination.overrideLogicalId("destination"); + const awsS3BucketVersioningSource = new S3BucketVersioningA( + this, + "source_5", + { + bucket: source.id, + region: "eu-central-1", + versioningConfiguration: { + status: "Enabled", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketVersioningSource.overrideLogicalId("source"); + const assumeRole = new DataAwsIamPolicyDocument(this, "assume_role", { + statement: [ + { + actions: ["sts:AssumeRole"], + effect: "Allow", + principals: [ + { + identifiers: ["s3.amazonaws.com"], + type: "Service", + }, + ], + }, + ], + }); + const replication = new DataAwsIamPolicyDocument(this, "replication", { + statement: [ + { + actions: ["s3:GetReplicationConfiguration", "s3:ListBucket"], + effect: "Allow", + resources: [source.arn], + }, + { + actions: [ + "s3:GetObjectVersionForReplication", + "s3:GetObjectVersionAcl", + "s3:GetObjectVersionTagging", + ], + effect: "Allow", + resources: ["${" + source.arn + "}/*"], + }, + { + actions: [ + "s3:ReplicateObject", + "s3:ReplicateDelete", + "s3:ReplicateTags", + ], + effect: "Allow", + resources: ["${" + destination.arn + "}/*"], + }, + ], + }); + const awsIamPolicyReplication = new IamPolicy(this, "replication_8", { + name: "tf-iam-role-policy-replication-12345", + policy: Token.asString(replication.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamPolicyReplication.overrideLogicalId("replication"); + const awsIamRoleReplication = new IamRole(this, "replication_9", { + assumeRolePolicy: Token.asString(assumeRole.json), + name: "tf-iam-role-replication-12345", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRoleReplication.overrideLogicalId("replication"); + const awsIamRolePolicyAttachmentReplication = new IamRolePolicyAttachment( + this, + "replication_10", + { + policyArn: Token.asString(awsIamPolicyReplication.arn), + role: Token.asString(awsIamRoleReplication.name), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyAttachmentReplication.overrideLogicalId("replication"); + const awsS3BucketReplicationConfigurationReplication = + new S3BucketReplicationConfigurationA(this, "replication_11", { + bucket: source.id, + dependsOn: [awsS3BucketVersioningSource], + region: "eu-central-1", + role: Token.asString(awsIamRoleReplication.arn), + rule: [ + { + destination: { + bucket: destination.arn, + storageClass: "STANDARD", + }, + filter: { + prefix: "example", + }, + id: "examplerule", + status: "Enabled", + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketReplicationConfigurationReplication.overrideLogicalId( + "replication" + ); + } +} + +``` + +

+
+ +## Non–region-aware resources + +This section lists resources that are not Region-aware—meaning `region` has not been added to them. + +Some resources, such as [IAM and STS](https://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html#IAMEndpoints), are [global](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/global-services.html) and exist in all Regions within a partition. + +Other resources are not Region-aware because they already had a top-level `region`, are inherently global, or because adding `region` would not be appropriate for other reasons. + +### Resources deprecating `region` + +The following regional resources and data sources had a top-level `region` prior to version 6.0.0. It is now deprecated and will be replaced in a future version to support the new Region-aware behavior. + +* `aws_cloudformation_stack_set_instance` resource +* `aws_config_aggregate_authorization` resource +* `aws_dx_hosted_connection` resource +* `awsRegion` data source +* `aws_s3_bucket` data source +* `aws_servicequotas_template` resource +* `aws_servicequotas_templates` data source +* `aws_ssmincidents_replication_set` resource and data source +* `aws_vpc_endpoint_service` data source +* `aws_vpc_peering_connection` data source + +### Global services + +All resources for the following services are considered _global_: + +* Account Management (`aws_account_*`) +* Billing (`aws_billing_*`) +* Billing and Cost Management Data Exports (`aws_bcmdataexports_*`) +* Budgets (`aws_budgets_*`) +* CloudFront (`aws_cloudfront_*` and `aws_cloudfrontkeyvaluestore_*`) +* Cost Explorer (`aws_ce_*`) +* Cost Optimization Hub (`aws_costoptimizationhub_*`) +* Cost and Usage Report (`aws_cur_*`) +* Global Accelerator (`aws_globalaccelerator_*`) +* IAM (`aws_iam_*`, `aws_rolesanywhere_*` and `aws_caller_identity`) +* Network Manager (`aws_networkmanager_*`) +* Organizations (`aws_organizations_*`) +* Price List (`aws_pricing_*`) +* Route 53 (`aws_route53_*` and `aws_route53domains_*`) +* Route 53 ARC (`aws_route53recoverycontrolconfig_*` and `aws_route53recoveryreadiness_*`) +* Shield Advanced (`aws_shield_*`) +* User Notifications (`aws_notifications_*`) +* User Notifications Contacts (`aws_notificationscontacts_*`) +* WAF Classic (`aws_waf_*`) + +### Global resources in regional services + +Some regional services have a subset of resources that are global: + +| Service | Type | Name | +|---|---|---| +| Backup | Resource | `aws_backup_global_settings` | +| Chime SDK Voice | Resource | `aws_chimesdkvoice_global_settings` | +| CloudTrail | Resource | `aws_cloudtrail_organization_delegated_admin_account` | +| Direct Connect | Resource | `aws_dx_gateway` | +| Direct Connect | Data Source | `aws_dx_gateway` | +| EC2 | Resource | `aws_ec2_image_block_public_access` | +| Firewall Manager | Resource | `aws_fms_admin_account` | +| IPAM | Resource | `aws_vpc_ipam_organization_admin_account` | +| QuickSight | Resource | `aws_quicksight_account_settings` | +| Resource Access Manager | Resource | `aws_ram_sharing_with_organization` | +| S3 | Data Source | `aws_canonical_user_id` | +| S3 | Resource | `aws_s3_account_public_access_block` | +| S3 | Data Source | `aws_s3_account_public_access_block` | +| Service Catalog | Resource | `aws_servicecatalog_organizations_access` | + +### Meta data sources + +The `aws_default_tags`, `aws_partition`, and `aws_regions` data sources are effectively global. + +`region` of the `aws_arn` data source stays as-is. + +### Policy Document Data Sources + +Some data sources convert HCL into JSON policy documents and are effectively global: + +* `aws_cloudwatch_log_data_protection_policy_document` +* `aws_ecr_lifecycle_policy_document` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/guides/version-6-upgrade.html.markdown b/website/docs/cdktf/typescript/guides/version-6-upgrade.html.markdown new file mode 100644 index 000000000000..d9fa38f2cc11 --- /dev/null +++ b/website/docs/cdktf/typescript/guides/version-6-upgrade.html.markdown @@ -0,0 +1,777 @@ +--- +subcategory: "" +layout: "aws" +page_title: "Terraform AWS Provider Version 6 Upgrade Guide" +description: |- + Terraform AWS Provider Version 6 Upgrade Guide +--- + + + +# Terraform AWS Provider Version 6 Upgrade Guide + +Version 6.0.0 of the AWS provider for Terraform is a major release and includes changes that you need to consider when upgrading. This guide will help with that process and focuses only on changes from version 5.x to version 6.0.0. See the [Version 5 Upgrade Guide](/docs/providers/aws/guides/version-5-upgrade.html) for information on upgrading from 4.x to version 5.0.0. + +Upgrade topics: + + + +- [Prerequisites to Upgrade to v6.0.0](#prerequisites-to-upgrade-to-v600) +- [Removed Provider Arguments](#removed-provider-arguments) +- [Enhanced Region Support](#enhanced-region-support) +- [Amazon Elastic Transcoder Deprecation](#amazon-elastic-transcoder-deprecation) +- [CloudWatch Evidently Deprecation](#cloudwatch-evidently-deprecation) +- [Nullable Boolean Validation Update](#nullable-boolean-validation-update) +- [OpsWorks Stacks Removal](#opsworks-stacks-removal) +- [S3 Global Endpoint Deprecation](#s3-global-endpoint-deprecation) +- [SimpleDB Support Removed](#simpledb-support-removed) +- [Worklink Support Removed](#worklink-support-removed) +- [Data Source `aws_ami`](#data-source-aws_ami) +- [Data Source `aws_batch_compute_environment`](#data-source-aws_batch_compute_environment) +- [Data Source `aws_ecs_task_definition`](#data-source-aws_ecs_task_definition) +- [Data Source `aws_ecs_task_execution`](#data-source-aws_ecs_task_execution) +- [Data Source `aws_elbv2_listener_rule`](#data-source-aws_elbv2_listener_rule) +- [Data Source `aws_globalaccelerator_accelerator`](#data-source-aws_globalaccelerator_accelerator) +- [Data Source `aws_identitystore_group`](#data-source-aws_identitystore_group) +- [Data Source `aws_identitystore_user`](#data-source-aws_identitystore_user) +- [Data Source `aws_kms_secret`](#data-source-aws_kms_secret) +- [Data Source `aws_launch_template`](#data-source-aws_launch_template) +- [Data Source `aws_opensearch_domain`](#data-source-aws_opensearch_domain) +- [Data Source `aws_opensearchserverless_security_config`](#data-source-aws_opensearchserverless_security_config) +- [Data Source `aws_quicksight_data_set`](#data-source-aws_quicksight_data_set) +- [Data Source `awsRegion`](#data-source-aws_region) +- [Data Source `aws_s3_bucket`](#data-source-aws_s3_bucket) +- [Data Source `aws_service_discovery_service`](#data-source-aws_service_discovery_service) +- [Data Source `aws_servicequotas_templates`](#data-source-aws_servicequotas_templates) +- [Data Source `aws_ssmincidents_replication_set`](#data-source-aws_ssmincidents_replication_set) +- [Data Source `aws_vpc_endpoint_service`](#data-source-aws_vpc_endpoint_service) +- [Data Source `aws_vpc_peering_connection`](#data-source-aws_vpc_peering_connection) +- [Resource `aws_accessanalyzer_archive_rule`](#typenullablebool-validation-update) +- [Resource `aws_alb_target_group`](#typenullablebool-validation-update) +- [Resource `aws_api_gateway_account`](#resource-aws_api_gateway_account) +- [Resource `aws_api_gateway_deployment`](#resource-aws_api_gateway_deployment) +- [Resource `aws_appflow_connector_profile`](#resource-aws_appflow_connector_profile) +- [Resource `aws_appflow_flow`](#resource-aws_appflow_flow) +- [Resource `aws_batch_compute_environment`](#resource-aws_batch_compute_environment) +- [Resource `aws_batch_job_queue`](#resource-aws_batch_job_queue) +- [Resource `aws_bedrock_model_invocation_logging_configuration`](#resource-aws_bedrock_model_invocation_logging_configuration) +- [Resource `aws_cloudformation_stack_set_instance`](#resource-aws_cloudformation_stack_set_instance) +- [Resource `aws_cloudfront_key_value_store`](#resource-aws_cloudfront_key_value_store) +- [Resource `aws_cloudfront_response_headers_policy`](#resource-aws_cloudfront_response_headers_policy) +- [Resource `aws_cloudtrail_event_data_store`](#typenullablebool-validation-update) +- [Resource `aws_cognito_user_in_group`](#resource-aws_cognito_user_in_group) +- [Resource `aws_config_aggregate_authorization`](#resource-aws_config_aggregate_authorization) +- [Resource `aws_cur_report_definition`](#resource-aws_cur_report_definition) +- [Resource `aws_db_instance`](#resource-aws_db_instance) +- [Resource `aws_dms_endpoint`](#resource-aws_dms_endpoint) +- [Resource `aws_dx_gateway_association`](#resource-aws_dx_gateway_association) +- [Resource `aws_dx_hosted_connection`](#resource-aws_dx_hosted_connection) +- [Resource `aws_ec2_spot_instance_fleet`](#typenullablebool-validation-update) +- [Resource `aws_ecs_task_definition`](#resource-aws_ecs_task_definition) +- [Resource `aws_eip`](#resource-aws_eip) +- [Resource `aws_eks_addon`](#resource-aws_eks_addon) +- [Resource `aws_elasticache_cluster`](#typenullablebool-validation-update) +- [Resource `aws_elasticache_replication_group`](#resource-aws_elasticache_replication_group) +- [Resource `aws_elasticache_user`](#resource-aws_elasticache_user) +- [Resource `aws_elasticache_user_group`](#resource-aws_elasticache_user_group) +- [Resource `aws_evidently_feature`](#typenullablebool-validation-update) +- [Resource `aws_flow_log`](#resource-aws_flow_log) +- [Resource `aws_guardduty_detector`](#resource-aws_guardduty_detector) +- [Resource `aws_guardduty_organization_configuration`](#resource-aws_guardduty_organization_configuration) +- [Resource `aws_imagebuilder_container_recipe`](#typenullablebool-validation-update) +- [Resource `aws_imagebuilder_image_recipe`](#typenullablebool-validation-update) +- [Resource `aws_instance`](#resource-aws_instance) +- [Resource `aws_kinesis_analytics_application`](#resource-aws_kinesis_analytics_application) +- [Resource `aws_launch_template`](#resource-aws_launch_template) +- [Resource `aws_lb_listener`](#resource-aws_lb_listener) +- [Resource `aws_lb_target_group`](#typenullablebool-validation-update) +- [Resource `aws_media_store_container`](#resource-aws_media_store_container) +- [Resource `aws_media_store_container_policy`](#resource-aws_media_store_container_policy) +- [Resource `aws_mq_broker`](#typenullablebool-validation-update) +- [Resource `aws_networkmanager_core_network`](#resource-aws_networkmanager_core_network) +- [Resource `aws_opensearch_domain`](#resource-aws_opensearch_domain) +- [Resource `aws_opensearchserverless_security_config`](#resource-aws_opensearchserverless_security_config) +- [Resource `aws_paymentcryptography_key`](#resource-aws_paymentcryptography_key) +- [Resource `aws_redshift_cluster`](#resource-aws_redshift_cluster) +- [Resource `aws_redshift_service_account`](#resource-aws_redshift_service_account) +- [Resource `aws_rekognition_stream_processor`](#resource-aws_rekognition_stream_processor) +- [Resource `aws_resiliencehub_resiliency_policy`](#resource-aws_resiliencehub_resiliency_policy) +- [Resource `aws_s3_bucket`](#resource-aws_s3_bucket) +- [Resource `aws_sagemaker_image_version`](#resource-aws_sagemaker_image_version) +- [Resource `aws_sagemaker_notebook_instance`](#resource-aws_sagemaker_notebook_instance) +- [Resource `aws_servicequotas_template`](#resource-aws_servicequotas_template) +- [Resource `aws_spot_instance_request`](#resource-aws_spot_instance_request) +- [Resource `aws_ssm_association`](#resource-aws_ssm_association) +- [Resource `aws_ssmincidents_replication_set`](#resource-aws_ssmincidents_replication_set) +- [Resource `aws_verifiedpermissions_schema`](#resource-aws_verifiedpermissions_schema) +- [Resource `aws_wafv2_web_acl`](#resource-aws_wafv2_web_acl) + + + +## Prerequisites to Upgrade to v6.0.0 + +-> Before upgrading to version `6.0.0`, first upgrade to the latest available `5.x` version of the provider. Run [`terraform plan`](https://developer.hashicorp.com/terraform/cli/commands/plan) and confirm that: + +- Your plan completes without errors or unexpected changes. +- There are no deprecation warnings related to the changes described in this guide. + +If you use [version constraints](https://developer.hashicorp.com/terraform/language/providers/requirements#provider-versions) (recommended), update them to allow the `6.x` series and run [`terraform init -upgrade`](https://developer.hashicorp.com/terraform/cli/commands/init) to download the new version. + +### Example + +**Before:** + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AwsProvider } from "./.gen/providers/aws/provider"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", {}); + } +} + +``` + +**After:** + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AwsProvider } from "./.gen/providers/aws/provider"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", {}); + } +} + +``` + +## Removed Provider Arguments + +Remove the following from your provider configuration—they are no longer supported: + +- `endpoints.opsworks` – removed following AWS OpsWorks Stacks End of Life. +- `endpoints.simpledb` and `endpoints.sdb` – removed due to the removal of Amazon SimpleDB support. +- `endpoints.worklink` – removed due to the removal of Amazon Worklink support. + +## Enhanced Region Support + +Version 6.0.0 adds `region` to most resources making it significantly easier to manage infrastructure across AWS Regions without requiring multiple provider configurations. See [Enhanced Region Support](enhanced-region-support.html). + +## Amazon Elastic Transcoder Deprecation + +Amazon Elastic Transcoder will be [discontinued](https://aws.amazon.com/blogs/media/support-for-amazon-elastic-transcoder-ending-soon/) on **November 13, 2025**. + +The following resources are deprecated and will be removed in a future major release: + +- `aws_elastictranscoder_pipeline` +- `aws_elastictranscoder_preset` + +Use [AWS Elemental MediaConvert](https://aws.amazon.com/blogs/media/migrating-workflows-from-amazon-elastic-transcoder-to-aws-elemental-mediaconvert/) instead. + +## CloudWatch Evidently Deprecation + +AWS will [end support](https://aws.amazon.com/blogs/mt/support-for-amazon-cloudwatch-evidently-ending-soon/) for CloudWatch Evidently on **October 17, 2025**. + +The following resources are deprecated and will be removed in a future major release: + +- `aws_evidently_feature` +- `aws_evidently_launch` +- `aws_evidently_project` +- `aws_evidently_segment` + +Migrate to [AWS AppConfig Feature Flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/). + +## Nullable Boolean Validation Update + +Update your configuration to _only_ use `""`, `true`, or `false` if you use the arguments below _and_ you are using `0` or `1` to represent boolean values: + +| Resource | Attribute(s) | +|-----------------------------------------|--------------------------------------------------------------------------| +| `aws_accessanalyzer_archive_rule` | `filter.exists` | +| `aws_alb_target_group` | `preserveClientIp` | +| `aws_cloudtrail_event_data_store` | `suspend` | +| `aws_ec2_spot_instance_fleet` | `terminateInstancesOnDelete` | +| `aws_elasticache_cluster` | `autoMinorVersionUpgrade` | +| `aws_elasticache_replication_group` | `atRestEncryptionEnabled`, `autoMinorVersionUpgrade` | +| `aws_evidently_feature` | `variations.value.bool_value` | +| `aws_imagebuilder_container_recipe` | `instance_configuration.block_device_mapping.ebs.delete_on_termination`, `instance_configuration.block_device_mapping.ebs.encrypted` | +| `aws_imagebuilder_image_recipe` | `block_device_mapping.ebs.delete_on_termination`, `block_device_mapping.ebs.encrypted` | +| `aws_launch_template` | `block_device_mappings.ebs.delete_on_termination`, `block_device_mappings.ebs.encrypted`, `ebsOptimized`, `network_interfaces.associate_carrier_ip_address`, `network_interfaces.associate_public_ip_address`, `network_interfaces.delete_on_termination`, `network_interfaces.primary_ipv6` | +| `aws_lb_target_group` | `preserveClientIp` | +| `aws_mq_broker` | `logs.audit` | + +This is due to changes to `TypeNullableBool`. + +## OpsWorks Stacks Removal + +The AWS OpsWorks Stacks service has reached [End of Life](https://docs.aws.amazon.com/opsworks/latest/userguide/stacks-eol-faqs.html). The following resources have been removed: + +- `aws_opsworks_application` +- `aws_opsworks_custom_layer` +- `aws_opsworks_ecs_cluster_layer` +- `aws_opsworks_ganglia_layer` +- `aws_opsworks_haproxy_layer` +- `aws_opsworks_instance` +- `aws_opsworks_java_app_layer` +- `aws_opsworks_memcached_layer` +- `aws_opsworks_mysql_layer` +- `aws_opsworks_nodejs_app_layer` +- `aws_opsworks_permission` +- `aws_opsworks_php_app_layer` +- `aws_opsworks_rails_app_layer` +- `aws_opsworks_rds_db_instance` +- `aws_opsworks_stack` +- `aws_opsworks_static_web_layer` +- `aws_opsworks_user_profile` + +## SimpleDB Support Removed + +The `aws_simpledb_domain` resource has been removed, as the [AWS SDK for Go v2](https://docs.aws.amazon.com/sdk-for-go/v2/developer-guide/welcome.html) no longer supports Amazon SimpleDB. + +## Worklink Support Removed + +The following resources have been removed due to dropped support for Amazon Worklink in the [AWS SDK for Go v2](https://github.com/aws/aws-sdk-go-v2/pull/2814): + +- `aws_worklink_fleet` +- `aws_worklink_website_certificate_authority_association` + +## S3 Global Endpoint Deprecation + +Support for the global S3 endpoint is deprecated. This affects S3 resources in `us-east-1` (excluding directory buckets) when `s3UsEast1RegionalEndpoint` is set to `legacy`. + +`s3UsEast1RegionalEndpoint` will be removed in `v7.0.0`. + +To prepare: + +- Remove `s3UsEast1RegionalEndpoint` from your provider configuration, **or** +- Set its value to `regional` and verify functionality. + +## Data Source `aws_ami` + +When using `most_recent = true`, your configuration **must now include** an `owner` or a `filter` that identifies the image by `image-id` or `owner-id`. + +- **Before (v5 and earlier):** + Terraform allowed this setup and showed only a warning. + +- **Now (v6+):** + Terraform will stop with an **error** to prevent unsafe or ambiguous AMI lookups. + +### How to fix it + +Do one of the following: + +- Add `owner`: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +- Or add a `filter` block that includes either `image-id` or `owner-id`: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Unsafe option (not recommended) + +To override this check, you can set: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +However, this may lead to unreliable results and should be avoided unless absolutely necessary. + +## Data Source `aws_batch_compute_environment` + +`compute_environment_name` has been renamed to `name`. + +Update your configurations to replace any usage of `compute_environment_name` with `name` to use this version. + +## Data Source `aws_ecs_task_definition` + +Remove `inference_accelerator`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. + +## Data Source `aws_ecs_task_execution` + +Remove `inference_accelerator_overrides`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. + +## Data Source `aws_elbv2_listener_rule` + +Treat the following as lists of nested blocks instead of single-nested blocks: + +- `action.authenticate_cognito` +- `action.authenticate_oidc` +- `action.fixed_response` +- `action.forward` +- `action.forward.stickiness` +- `action.redirect` +- `condition.host_header` +- `condition.http_header` +- `condition.http_request_method` +- `condition.path_pattern` +- `condition.query_string` +- `condition.source_ip` + +The data source configuration itself does not change. However, now, include an index when referencing them. For example, update `action[0].authenticate_cognito.scope` to `action[0].authenticate_cognito[0].scope`. + +## Data Source `aws_globalaccelerator_accelerator` + +`id` is now **computed only** and can no longer be set manually. +If your configuration explicitly attempts to set a value for `id`, you must remove it to avoid an error. + +## Data Source `aws_identitystore_group` + +Remove `filter`—it is no longer supported. To locate a group, update your configuration to use `alternateIdentifier` instead. + +## Data Source `aws_identitystore_user` + +Remove `filter`—it is no longer supported. +To locate a user, update your configuration to use `alternateIdentifier` instead. + +## Data Source `aws_kms_secret` + +The functionality for this data source was removed in **v2.0.0** and the data source will be removed in a future version. + +## Data Source `aws_launch_template` + +Remove the following—they are no longer supported: + +- `elastic_gpu_specifications`: Amazon Elastic Graphics reached end of life in January 2024. +- `elastic_inference_accelerator`: Amazon Elastic Inference reached end of life in April 2024. + +## Data Source `aws_opensearch_domain` + +Remove `kibanaEndpoint`—it is no longer supported. AWS OpenSearch Service no longer uses Kibana endpoints. The service now uses **Dashboards**, accessible at the `/_dashboards/` path on the domain endpoint. +For more details, refer to the [AWS OpenSearch Dashboards documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). + +## Data Source `aws_opensearchserverless_security_config` + +Treat `samlOptions` as a list of nested blocks instead of a single-nested block. The data source configuration itself does not change. However, now, include an index when referencing it. For example, update `saml_options.session_timeout` to `saml_options[0].session_timeout`. + +## Data Source `aws_quicksight_data_set` + +Remove `tagsAll`—it is no longer supported. + +## Data Source `awsRegion` + +`name` has been deprecated. Use `region` instead. + +## Data Source `aws_s3_bucket` + +`bucketRegion` has been added and should be used instead of `region`, which is now used for [Enhanced Region Support](enhanced-region-support.html). + +## Data Source `aws_service_discovery_service` + +Remove `tagsAll`—it is no longer supported. + +## Data Source `aws_servicequotas_templates` + +`region` has been deprecated. Use `awsRegion` instead. + +## Data Source `aws_ssmincidents_replication_set` + +`region` has been deprecated. Use `regions` instead. + +## Data Source `aws_vpc_endpoint_service` + +`region` has been deprecated. Use `serviceRegion` instead. + +## Data Source `aws_vpc_peering_connection` + +`region` has been deprecated. Use `requesterRegion` instead. + +## Resource `aws_api_gateway_account` + +Remove `reset_on_delete`—it is no longer supported. The destroy operation will now always reset the API Gateway account settings by default. + +If you want to retain the previous behavior (where the account settings were not changed upon destruction), use a `removed` block in your configuration. For more details, see the [removing resources documentation](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources). + +## Resource `aws_api_gateway_deployment` + +* Use the `aws_api_gateway_stage` resource if your configuration uses any of the following, which have been removed from the `aws_api_gateway_deployment` resource: + - `stageName` + - `stage_description` + - `canarySettings` +* Remove `invokeUrl` and `executionArn`—they are no longer supported. Use the `aws_api_gateway_stage` resource instead. + +### Migration Example + +**Before (v5 and earlier, using implicit stage):** + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ApiGatewayDeployment } from "./.gen/providers/aws/api-gateway-deployment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new ApiGatewayDeployment(this, "example", { + restApiId: Token.asString(awsApiGatewayRestApiExample.id), + stage_name: "prod", + }); + } +} + +``` + +**After (v6+, using explicit stage):** + +If your previous configuration relied on an implicitly created stage, you must now define and manage that stage explicitly using the `aws_api_gateway_stage` resource. To do this, create a corresponding resource and import the existing stage into your configuration. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ApiGatewayDeployment } from "./.gen/providers/aws/api-gateway-deployment"; +import { ApiGatewayStage } from "./.gen/providers/aws/api-gateway-stage"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new ApiGatewayDeployment(this, "example", { + restApiId: Token.asString(awsApiGatewayRestApiExample.id), + }); + new ApiGatewayStage(this, "prod", { + deploymentId: example.id, + restApiId: Token.asString(awsApiGatewayRestApiExample.id), + stageName: "prod", + }); + } +} + +``` + +Import the existing stage, replacing `restApiId` and `stageName` with your values: + +```sh +terraform import aws_api_gateway_stage.prod rest_api_id/stage_name +``` + +## Resource `aws_appflow_connector_profile` + +Importing an `aws_appflow_connector_profile` resource now uses the `name` of the Connector Profile. + +## Resource `aws_appflow_flow` + +Importing an `aws_appflow_flow` resource now uses the `name` of the Flow. + +## Resource `aws_batch_compute_environment` + +Replace any usage of `compute_environment_name` with `name` and `compute_environment_name_prefix` with `namePrefix` as they have been renamed. + +## Resource `aws_batch_job_queue` + +Remove `compute_environments`—it is no longer supported. +Use `computeEnvironmentOrder` configuration blocks instead. While you must update your configuration, Terraform will upgrade states with `compute_environments` to `computeEnvironmentOrder`. + +**Before (v5 and earlier):** + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { BatchJobQueue } from "./.gen/providers/aws/batch-job-queue"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new BatchJobQueue(this, "example", { + compute_environments: [awsBatchComputeEnvironmentExample.arn], + name: "patagonia", + priority: 1, + state: "ENABLED", + }); + } +} + +``` + +**After (v6+):** + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { BatchJobQueue } from "./.gen/providers/aws/batch-job-queue"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new BatchJobQueue(this, "example", { + computeEnvironmentOrder: [ + { + computeEnvironment: Token.asString( + awsBatchComputeEnvironmentExample.arn + ), + order: 0, + }, + ], + name: "patagonia", + priority: 1, + state: "ENABLED", + }); + } +} + +``` + +## Resource `aws_bedrock_model_invocation_logging_configuration` + +Treat the following as lists of nested blocks instead of single-nested blocks: + +- `loggingConfig` +- `logging_config.cloudwatch_config` +- `logging_config.cloudwatch_config.large_data_delivery_s3_config` +- `logging_config.s3_config` + +The resource configuration itself does not change, but you must now include an index when referencing them. For example, update `logging_config.cloudwatch_config.log_group_name` to `logging_config[0].cloudwatch_config[0].log_group_name`. + +## Resource `aws_cloudformation_stack_set_instance` + +`region` has been deprecated. Use `stackSetInstanceRegion` instead. + +## Resource `aws_cloudfront_key_value_store` + +Use `name` to reference the resource name. `id` represents the ID value returned by the AWS API. + +## Resource `aws_cloudfront_response_headers_policy` + +Do not set a value for `etag` as it is now computed only. + +## Resource `aws_cognito_user_in_group` + +For the `id`, use a comma-delimited string concatenating `userPoolId`, `groupName`, and `username`. For example, in an import command, use comma-delimiting for the composite `id`. + +## Resource `aws_config_aggregate_authorization` + +`region` has been deprecated. Use `authorizedAwsRegion` instead. + +## Resource `aws_cur_report_definition` + +`s3Prefix` is now required. + +## Resource `aws_db_instance` + +Do not use `characterSetName` with `replicateSourceDb`, `restoreToPointInTime`, `s3Import`, or `snapshotIdentifier`. The combination is no longer valid. + +## Resource `aws_dms_endpoint` + +`s3Settings` has been removed. Use the `aws_dms_s3_endpoint` resource rather than `s3Settings` of `aws_dms_endpoint`. + +## Resource `aws_dx_gateway_association` + +Remove `vpnGatewayId`—it is no longer supported. Use `associatedGatewayId` instead. + +## Resource `aws_dx_hosted_connection` + +`region` has been deprecated. Use `connectionRegion` instead. + +## Resource `aws_ecs_task_definition` + +Remove `inference_accelerator`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. + +## Resource `aws_eip` + +Remove `vpc`—it is no longer supported. Use `domain` instead. + +## Resource `aws_eks_addon` + +Remove `resolve_conflicts`—it is no longer supported. Use `resolveConflictsOnCreate` and `resolveConflictsOnUpdate` instead. + +## Resource `aws_elasticache_replication_group` + +* `authTokenUpdateStrategy` no longer has a default value. If `authToken` is set, it must also be explicitly configured. +* The ability to provide an uppercase `engine` value is deprecated. In `v7.0.0`, plan-time validation of `engine` will require an entirely lowercase value to match the returned value from the AWS API without diff suppression. +* See also [changes](#typenullablebool-validation-update) to `atRestEncryptionEnabled` and `autoMinorVersionUpgrade`. + +## Resource `aws_elasticache_user` + +The ability to provide an uppercase `engine` value is deprecated. +In `v7.0.0`, plan-time validation of `engine` will require an entirely lowercase value to match the returned value from the AWS API without diff suppression. + +## Resource `aws_elasticache_user_group` + +The ability to provide an uppercase `engine` value is deprecated. +In `v7.0.0`, plan-time validation of `engine` will require an entirely lowercase value to match the returned value from the AWS API without diff suppression. + +## Resource `aws_flow_log` + +Remove `logGroupName`—it is no longer supported. Use `logDestination` instead. + +## Resource `aws_guardduty_detector` + +`datasources` is deprecated. +Use the `aws_guardduty_detector_feature` resource instead. + +## Resource `aws_guardduty_organization_configuration` + +* Remove `autoEnable`—it is no longer supported. +* `autoEnableOrganizationMembers` is now required. +* `datasources` is deprecated. + +## Resource `aws_instance` + +* `userData` no longer applies hashing and is now stored in clear text. **Do not include passwords or sensitive information** in `userData`, as it will be visible in plaintext. Follow [AWS Best Practices](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) to secure your instance metadata. If you need to provide base64-encoded user data, use `userDataBase64` instead. +* Remove `cpu_core_count` and `cpu_threads_per_core`—they are no longer supported. Instead, use the `cpuOptions` configuration block with `coreCount` and `threadsPerCore`. + +## Resource `aws_kinesis_analytics_application` + +This resource is deprecated and will be removed in a future version. [Effective January 27, 2026](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-to-amazon-managed-service-for-apache-flink-and-amazon-managed-service-for-apache-flink-studio/), AWS will [no longer support](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/discontinuation.html) Amazon Kinesis Data Analytics for SQL. Use the `aws_kinesisanalyticsv2_application` resource instead to manage Amazon Kinesis Data Analytics for Apache Flink applications. AWS provides guidance for migrating from [Amazon Kinesis Data Analytics for SQL Applications to Amazon Managed Service for Apache Flink Studio](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-applications-to-amazon-managed-service-for-apache-flink-studio/) including [examples](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/migrating-to-kda-studio-overview.html). + +## Resource `aws_launch_template` + +* Remove `elastic_gpu_specifications`—it is no longer supported. Amazon Elastic Graphics reached end of life in January 2024. +* Remove `elastic_inference_accelerator`—it is no longer supported. Amazon Elastic Inference reached end of life in April 2024. +* See also [changes](#typenullablebool-validation-update) to `block_device_mappings.ebs.delete_on_termination`, `block_device_mappings.ebs.encrypted`, `ebsOptimized`, `network_interfaces.associate_carrier_ip_address`, `network_interfaces.associate_public_ip_address`, `network_interfaces.delete_on_termination`, and `network_interfaces.primary_ipv6`. + +## Resource `aws_lb_listener` + +* For `mutualAuthentication`, `advertiseTrustStoreCaNames`, `ignoreClientCertificateExpiry`, and `trustStoreArn` can now only be set when `mode` is `verify`. +* `trustStoreArn` is required when `mode` is `verify`. + +## Resource `aws_media_store_container` + +This resource is deprecated and will be removed in a future version. AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective November 13, 2025. Users should begin transitioning to alternative solutions as soon as possible. For simple live streaming workflows, AWS recommends migrating to Amazon S3. For advanced use cases that require features such as packaging, DRM, or cross-region redundancy, consider using AWS Elemental MediaPackage. + +## Resource `aws_media_store_container_policy` + +This resource is deprecated and will be removed in a future version. AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective November 13, 2025. Users should begin transitioning to alternative solutions as soon as possible. For simple live streaming workflows, AWS recommends migrating to Amazon S3. For advanced use cases that require features such as packaging, DRM, or cross-region redundancy, consider using AWS Elemental MediaPackage. + +## Resource `aws_networkmanager_core_network` + +Remove `base_policy_region`—it is no longer supported. Use `basePolicyRegions` instead. + +## Resource `aws_opensearch_domain` + +Remove `kibanaEndpoint`—it is no longer supported. AWS OpenSearch Service does not use Kibana endpoints (i.e., `_plugin/kibana`). Instead, OpenSearch uses Dashboards, accessible at the path `/_dashboards/` on the domain endpoint. The terminology has shifted from “Kibana” to “Dashboards.” + +For more information, see the [AWS OpenSearch Dashboards documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). + +## Resource `aws_opensearchserverless_security_config` + +Treat `samlOptions` as a list of nested blocks instead of a single-nested block. The resource configuration itself does not change. However, now, include an index when referencing it. For example, update `saml_options.session_timeout` to `saml_options[0].session_timeout`. + +## Resource `aws_paymentcryptography_key` + +Treat the `keyAttributes` and `key_attributes.key_modes_of_use` as lists of nested blocks instead of single-nested blocks. The resource configuration itself does not change. However, now, include an index when referencing them. For example, update `key_attributes.key_modes_of_use.decrypt` to `key_attributes[0].key_modes_of_use[0].decrypt`. + +## Resource `aws_redshift_cluster` + +* `encrypted` now defaults to `true`. +* `publiclyAccessible` now defaults to `false`. +* Remove `snapshot_copy`—it is no longer supported. Use the `aws_redshift_snapshot_copy` resource instead. +* Remove `logging`—it is no longer supported. Use the `aws_redshift_logging` resource instead. +* `clusterPublicKey`, `clusterRevisionNumber`, and `endpoint` are now read only and should not be set. + +## Resource `aws_redshift_service_account` + +The `aws_redshift_service_account` resource has been removed. AWS [recommends](https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-bucket-permissions) that a [service principal name](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services) should be used instead of an AWS account ID in any relevant IAM policy. + +## Resource `aws_rekognition_stream_processor` + +Treat `regions_of_interest.bounding_box` as a list of nested blocks instead of a single-nested block. The resource configuration itself does not change. However, now, include an index when referencing it. For example, update `regions_of_interest[0].bounding_box.height` to `regions_of_interest[0].bounding_box[0].height`. + +## Resource `aws_resiliencehub_resiliency_policy` + +Treat the following as lists of nested blocks instead of single-nested blocks: + +- `policy` +- `policy.az` +- `policy.hardware` +- `policy.software` +- `policy.region` + +The resource configuration itself does not change. However, now, include an index when referencing them. For example, update `policy.az.rpo` to `policy[0].az[0].rpo`. + +## Resource `aws_s3_bucket` + +`bucketRegion` has been added and should be used instead of `region`, which is now used for [Enhanced Region Support](enhanced-region-support.html). + +## Resource `aws_sagemaker_image_version` + +For the `id`, use a comma-delimited string concatenating `imageName` and `version`. For example, in an import command, use comma-delimiting for the composite `id`. +Use `imageName` to reference the image name. + +## Resource `aws_sagemaker_notebook_instance` + +Remove `acceleratorTypes`—it is no longer supported. Instead, use `instanceType` to use [Inferentia](https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html). + +## Resource `aws_servicequotas_template` + +`region` has been deprecated. Use `awsRegion` instead. + +## Resource `aws_spot_instance_request` + +Remove `blockDurationMinutes`—it is no longer supported. + +## Resource `aws_ssm_association` + +Remove `instanceId`—it is no longer supported. Use `targets` instead. + +## Resource `aws_ssmincidents_replication_set` + +`region` has been deprecated. Use `regions` instead. + +## Resource `aws_verifiedpermissions_schema` + +Treat `definition` as a list of nested blocks instead of a single-nested block. The resource configuration itself does not change. However, now, include an index when referencing it. For example, update `definition.value` to `definition[0].value`. + +## Resource `aws_wafv2_web_acl` + +The default value for `rule.statement.managed_rule_group_statement.managed_rule_group_configs.aws_managed_rules_bot_control_rule_set.enable_machine_learning` is now `false`. +To retain the previous behavior where the argument was omitted, explicitly set the value to `true`. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/index.html.markdown b/website/docs/cdktf/typescript/index.html.markdown index 43166ac2d987..49ab0b6f048f 100644 --- a/website/docs/cdktf/typescript/index.html.markdown +++ b/website/docs/cdktf/typescript/index.html.markdown @@ -9,18 +9,13 @@ description: |- # AWS Provider -Use the Amazon Web Services (AWS) provider to interact with the -many resources supported by AWS. You must configure the provider -with the proper credentials before you can use it. +The Amazon Web Services (AWS) provider is Terraform’s most widely-used provider and the industry-standard way to manage AWS infrastructure as code. It is an indispensable part of how leading technology companies, global banks, government agencies, and some of the largest enterprises in the world build and operate in the cloud. Every day, it provisions and orchestrates billions of dollars of AWS infrastructure across thousands of organizations. -Use the navigation to the left to read about the available resources. There are currently 1514 resources and 608 data sources available in the provider. +With 1,543 resources and 615 data sources, the AWS provider spans the full breadth of AWS services—from foundational capabilities like compute, storage, networking, and identity management to advanced services for AI, analytics, and event-driven architectures, including Lambda, RDS, SageMaker, and Bedrock. Whether automating a single S3 bucket or orchestrating a multi-region, enterprise-scale environment, the provider delivers consistent, reliable workflows that scale with your needs. -To learn the basics of Terraform using this provider, follow the -hands-on [get started tutorials](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, -including Lambda, RDS, and IAM by following the [AWS services -tutorials](https://developer.hashicorp.com/terraform/tutorials/aws?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). +Configure the provider with your AWS credentials, and you can immediately begin creating and managing infrastructure in a safe, repeatable way. Use the navigation on the left to explore the available resources, or start with our [Get Started tutorials](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) to learn the fundamentals. For deeper guidance on specific AWS services, visit the [AWS services tutorials](https://developer.hashicorp.com/terraform/tutorials/aws?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). -Some AWS services do not support IPv6. As a result, the provider may not be able to interact with AWS APIs using IPv6 addresses. +Note: Some AWS services do not yet support IPv6. In those cases, the provider may not be able to connect to AWS APIs over IPv6 addresses. ## Example Usage @@ -481,17 +476,19 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf Can also be set with either the `AWS_REGION` or `AWS_DEFAULT_REGION` environment variables, or via a shared config file parameter `region` if `profile` is used. If credentials are retrieved from the EC2 Instance Metadata Service, the Region can also be retrieved from the metadata. + Most Regional resources, data sources and ephemeral resources support an optional top-level `region` argument which can be used to override the provider configuration value. See the individual resource's documentation for details. * `retryMode` - (Optional) Specifies how retries are attempted. Valid values are `standard` and `adaptive`. Can also be configured using the `AWS_RETRY_MODE` environment variable or the shared config file parameter `retryMode`. * `s3UsePathStyle` - (Optional) Whether to enable the request to use path-style addressing, i.e., `https://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will use virtual hosted bucket addressing, `https://BUCKET.s3.amazonaws.com/KEY`, when possible. Specific to the Amazon S3 service. -* `s3UsEast1RegionalEndpoint` - (Optional) Specifies whether S3 API calls in the `us-east-1` Region use the legacy global endpoint or a regional endpoint. +* `s3UsEast1RegionalEndpoint` - (Optional, **Deprecated**) Specifies whether S3 API calls in the `us-east-1` Region use the legacy global endpoint or a regional endpoint. Valid values are `legacy` or `regional`. If omitted, the default behavior in the `us-east-1` Region is to use the global endpoint for general purpose buckets and the regional endpoint for directory buckets. Can also be configured using the `AWS_S3_US_EAST_1_REGIONAL_ENDPOINT` environment variable or the `s3UsEast1RegionalEndpoint` shared config file parameter. Specific to the Amazon S3 service. + This argument and the ability to use the global S3 endpoint are deprecated and will be removed in `v7.0.0`. * `secretKey` - (Optional) AWS secret key. Can also be set with the `AWS_SECRET_ACCESS_KEY` environment variable, or via a shared configuration and credentials files if `profile` is used. See also `accessKey`. * `sharedConfigFiles` - (Optional) List of paths to AWS shared config files. If not set, the default is `[~/.aws/config]`. A single value can also be set with the `AWS_CONFIG_FILE` environment variable. * `sharedCredentialsFiles` - (Optional) List of paths to the shared credentials file. If not set and a profile is used, the default value is `[~/.aws/credentials]`. A single value can also be set with the `AWS_SHARED_CREDENTIALS_FILE` environment variable. @@ -951,4 +948,4 @@ Approaches differ per authentication providers: There used to be no better way to get account ID out of the API when using the federated account until `sts:GetCallerIdentity` was introduced. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/list-resources/batch_job_queue.html.markdown b/website/docs/cdktf/typescript/list-resources/batch_job_queue.html.markdown new file mode 100644 index 000000000000..5de6c15aa4db --- /dev/null +++ b/website/docs/cdktf/typescript/list-resources/batch_job_queue.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Batch" +layout: "aws" +page_title: "AWS: aws_batch_job_queue" +description: |- + Lists Batch Job Queue resources. +--- + + + +# List Resource: aws_batch_job_queue + +~> **Note:** The `aws_batch_job_queue` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists Batch Job Queue resources. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/list-resources/cloudwatch_log_group.html.markdown b/website/docs/cdktf/typescript/list-resources/cloudwatch_log_group.html.markdown new file mode 100644 index 000000000000..a246eee62fa7 --- /dev/null +++ b/website/docs/cdktf/typescript/list-resources/cloudwatch_log_group.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +description: |- + Lists CloudWatch Logs Log Group resources. +--- + + + +# List Resource: aws_cloudwatch_log_group + +~> **Note:** The `aws_cloudwatch_log_group` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists CloudWatch Logs Log Group resources. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/list-resources/iam_role.html.markdown b/website/docs/cdktf/typescript/list-resources/iam_role.html.markdown new file mode 100644 index 000000000000..20066f2fb0f3 --- /dev/null +++ b/website/docs/cdktf/typescript/list-resources/iam_role.html.markdown @@ -0,0 +1,37 @@ +--- +subcategory: "IAM (Identity & Access Management)" +layout: "aws" +page_title: "AWS: aws_iam_role" +description: |- + Lists IAM Role resources. +--- + + + +# List Resource: aws_iam_role + +~> **Note:** The `aws_iam_role` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists IAM Role resources. + +Excludes Service-Linked Roles (see "AWS service-linked role" in [IAM Roles Terms and Concepts documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts)). + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This list resource does not support any arguments. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/list-resources/instance.html.markdown b/website/docs/cdktf/typescript/list-resources/instance.html.markdown new file mode 100644 index 000000000000..0c3c7cb19aa6 --- /dev/null +++ b/website/docs/cdktf/typescript/list-resources/instance.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_instance" +description: |- + Lists EC2 Instance resources. +--- + + + +# List Resource: aws_instance + +~> **Note:** The `aws_instance` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists EC2 Instance resources. + +By default, EC2 Instances managed by an Auto Scaling Group and EC2 Instances in either the `terminated` or `shutting-down` state are excluded. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +### Filter Usage + +This example will return instances in the `stopped` state. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + } +} + +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. +* `include_auto_scaled` - (Optional) Whether to include EC2 instances that are managed by an Auto Scaling Group. + Default value is `false`. +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + +[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/accessanalyzer_analyzer.html.markdown b/website/docs/cdktf/typescript/r/accessanalyzer_analyzer.html.markdown index 4b82a7242cb9..ca84cca9c054 100644 --- a/website/docs/cdktf/typescript/r/accessanalyzer_analyzer.html.markdown +++ b/website/docs/cdktf/typescript/r/accessanalyzer_analyzer.html.markdown @@ -70,7 +70,7 @@ class MyConvertedCode extends TerraformStack { ``` -### Organization Unused Access Analyzer with analysis rule +### Organization Unused Access Analyzer With Analysis Rule ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -88,25 +88,23 @@ class MyConvertedCode extends TerraformStack { analyzerName: "example", configuration: { unusedAccess: { - analysis_rule: [ - { - exclusion: [ - { - account_ids: ["123456789012", "234567890123"], - }, - { - resource_tags: [ - { - key1: "value1", - }, - { - key2: "value2", - }, - ], - }, - ], - }, - ], + analysisRule: { + exclusion: [ + { + accountIds: ["123456789012", "234567890123"], + }, + { + resourceTags: [ + { + key1: "value1", + }, + { + key2: "value2", + }, + ], + }, + ], + }, unusedAccessAge: 180, }, }, @@ -117,6 +115,79 @@ class MyConvertedCode extends TerraformStack { ``` +### Account Internal Access Analyzer by Resource Types + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AccessanalyzerAnalyzer } from "./.gen/providers/aws/accessanalyzer-analyzer"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AccessanalyzerAnalyzer(this, "test", { + analyzerName: "example", + configuration: { + internalAccess: { + analysisRule: { + inclusion: [ + { + resourceTypes: [ + "AWS::S3::Bucket", + "AWS::RDS::DBSnapshot", + "AWS::DynamoDB::Table", + ], + }, + ], + }, + }, + }, + type: "ORGANIZATION_INTERNAL_ACCESS", + }); + } +} + +``` + +### Organization Internal Access Analyzer by Account ID and Resource ARN + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AccessanalyzerAnalyzer } from "./.gen/providers/aws/accessanalyzer-analyzer"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AccessanalyzerAnalyzer(this, "test", { + analyzerName: "example", + configuration: { + internalAccess: { + analysisRule: { + inclusion: [ + { + accountIds: ["123456789012"], + resourceArns: ["arn:aws:s3:::my-example-bucket"], + }, + ], + }, + }, + }, + type: "ORGANIZATION_INTERNAL_ACCESS", + }); + } +} + +``` + ## Argument Reference The following arguments are required: @@ -125,34 +196,64 @@ The following arguments are required: The following arguments are optional: -* `configuration` - (Optional) A block that specifies the configuration of the analyzer. [Documented below](#configuration-argument-reference) +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `configuration` - (Optional) A block that specifies the configuration of the analyzer. See [`configuration` Block](#configuration-block) for details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `type` - (Optional) Type of Analyzer. Valid values are `ACCOUNT`, `ORGANIZATION`, `ACCOUNT_UNUSED_ACCESS `, `ORGANIZATION_UNUSED_ACCESS`. Defaults to `ACCOUNT`. +* `type` - (Optional) Type that represents the zone of trust or scope for the analyzer. Valid values are `ACCOUNT`, `ACCOUNT_INTERNAL_ACCESS`, `ACCOUNT_UNUSED_ACCESS`, `ORGANIZATION`, `ORGANIZATION_INTERNAL_ACCESS`, `ORGANIZATION_UNUSED_ACCESS`. Defaults to `ACCOUNT`. + +### `configuration` Block + +The `configuration` configuration block supports the following arguments: + +* `internalAccess` - (Optional) Specifies the configuration of an internal access analyzer for an AWS organization or account. This configuration determines how the analyzer evaluates access within your AWS environment. See [`internalAccess` Block](#internal_access-block) for details. +* `unusedAccess` - (Optional) Specifies the configuration of an unused access analyzer for an AWS organization or account. See [`unusedAccess` Block](#unused_access-block) for details. + +### `internalAccess` Block + +The `internalAccess` configuration block supports the following arguments: + +* `analysisRule` - (Optional) Information about analysis rules for the internal access analyzer. These rules determine which resources and access patterns will be analyzed. See [`analysisRule` Block for Internal Access Analyzer](#analysis_rule-block-for-internal-access-analyzer) for details. + +### `analysisRule` Block for Internal Access Analyzer + +The `analysisRule` configuration block for internal access analyzer supports the following arguments: + +* `inclusion` - (Optional) List of rules for the internal access analyzer containing criteria to include in analysis. Only resources that meet the rule criteria will generate findings. See [`inclusion` Block](#inclusion-block) for details. + +### `inclusion` Block + +The `inclusion` configuration block supports the following arguments: + +* `accountIds` - (Optional) List of AWS account IDs to apply to the internal access analysis rule criteria. Account IDs can only be applied to the analysis rule criteria for organization-level analyzers. +* `resourceArns` - (Optional) List of resource ARNs to apply to the internal access analysis rule criteria. The analyzer will only generate findings for resources that match these ARNs. +* `resourceTypes` - (Optional) List of resource types to apply to the internal access analysis rule criteria. The analyzer will only generate findings for resources of these types. Refer to [InternalAccessAnalysisRuleCriteria](https://docs.aws.amazon.com/access-analyzer/latest/APIReference/API_InternalAccessAnalysisRuleCriteria.html) in the AWS IAM Access Analyzer API Reference for valid values. + +### `unusedAccess` Block -### `configuration` Argument Reference +The `unusedAccess` configuration block supports the following arguments: -* `unusedAccess` - (Optional) A block that specifies the configuration of an unused access analyzer for an AWS organization or account. [Documented below](#unused_access-argument-reference) +* `unusedAccessAge` - (Optional) Specified access age in days for which to generate findings for unused access. +* `analysisRule` - (Optional) Information about analysis rules for the analyzer. Analysis rules determine which entities will generate findings based on the criteria you define when you create the rule. See [`analysisRule` Block for Unused Access Analyzer](#analysis_rule-block-for-unused-access-analyzer) for details. -### `unusedAccess` Argument Reference +### `analysisRule` Block for Unused Access Analyzer -* `unusedAccessAge` - (Optional) The specified access age in days for which to generate findings for unused access. -* `analysis_rule` - (Optional) A block for analysis rules. [Documented below](#analysis_rule-argument-reference) +The `analysisRule` configuration block for unused access analyzer supports the following arguments: -### `analysis_rule` Argument Reference +* `exclusion` - (Optional) List of rules for the analyzer containing criteria to exclude from analysis. Entities that meet the rule criteria will not generate findings. See [`exclusion` Block](#exclusion-block) for details. -* `exclusion` - (Optional) A block for the analyzer rules containing criteria to exclude from analysis. [Documented below](#exclusion-argument-reference) +### `exclusion` Block -#### `exclusion` Argument Reference +The `exclusion` configuration block supports the following arguments: -* `accountIds` - (Optional) A list of account IDs to exclude from the analysis. -* `resourceTags` - (Optional) A list of key-value pairs for resource tags to exclude from the analysis. +* `accountIds` - (Optional) List of AWS account IDs to apply to the analysis rule criteria. The accounts cannot include the organization analyzer owner account. Account IDs can only be applied to the analysis rule criteria for organization-level analyzers. +* `resourceTags` - (Optional) List of key-value pairs for resource tags to exclude from the analysis. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Analyzer. -* `id` - Analyzer name. +* `id` - Name of the analyzer. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -183,4 +284,4 @@ Using `terraform import`, import Access Analyzer Analyzers using the `analyzerNa % terraform import aws_accessanalyzer_analyzer.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/accessanalyzer_archive_rule.html.markdown b/website/docs/cdktf/typescript/r/accessanalyzer_archive_rule.html.markdown index 4ee0eb1810f8..aeb2e171645c 100644 --- a/website/docs/cdktf/typescript/r/accessanalyzer_archive_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/accessanalyzer_archive_rule.html.markdown @@ -53,8 +53,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `analyzerName` - (Required) Analyzer name. * `filter` - (Required) Filter criteria for the archive rule. See [Filter](#filter) for more details. * `ruleName` - (Required) Rule name. @@ -107,4 +108,4 @@ Using `terraform import`, import AccessAnalyzer ArchiveRule using the `analyzer_ % terraform import aws_accessanalyzer_archive_rule.example example-analyzer/example-rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acm_certificate.html.markdown b/website/docs/cdktf/typescript/r/acm_certificate.html.markdown index 174ac8ef427e..b00091e5d7fa 100644 --- a/website/docs/cdktf/typescript/r/acm_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/acm_certificate.html.markdown @@ -208,6 +208,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * Creating an Amazon issued certificate * `domainName` - (Required) Domain name for which the certificate should be issued * `subjectAlternativeNames` - (Optional) Set of domains that should be SANs in the issued certificate. To remove all elements of a previously configured list, set this value equal to an empty list (`[]`) or use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html) to trigger recreation. @@ -235,6 +236,7 @@ This resource supports the following arguments: Supported nested arguments for the `options` configuration block: * `certificateTransparencyLoggingPreference` - (Optional) Whether certificate details should be added to a certificate transparency log. Valid values are `ENABLED` or `DISABLED`. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency for more details. +* `export` - (Optional) Whether the certificate can be exported. Valid values are `ENABLED` or `DISABLED` (default). **Note** Issuing an exportable certificate is subject to additional charges. See [AWS Certificate Manager pricing](https://aws.amazon.com/certificate-manager/pricing/) for more details. ## validation_option Configuration Block @@ -279,6 +281,27 @@ Renewal summary objects export the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acm_certificate.example + identity = { + "arn" = "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a" + } +} + +resource "aws_acm_certificate" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the certificate. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import certificates using their ARN. For example: ```typescript @@ -295,7 +318,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); AcmCertificate.generateConfigForImport( this, - "cert", + "example", "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a" ); } @@ -306,7 +329,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import certificates using their ARN. For example: ```console -% terraform import aws_acm_certificate.cert arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a +% terraform import aws_acm_certificate.example arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acm_certificate_validation.html.markdown b/website/docs/cdktf/typescript/r/acm_certificate_validation.html.markdown index 82b01cf19683..c80fde5e0a2a 100644 --- a/website/docs/cdktf/typescript/r/acm_certificate_validation.html.markdown +++ b/website/docs/cdktf/typescript/r/acm_certificate_validation.html.markdown @@ -249,6 +249,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateArn` - (Required) ARN of the certificate that is being validated. * `validationRecordFqdns` - (Optional) List of FQDNs that implement the validation. Only valid for DNS validation method ACM certificates. If this is set, the resource can implement additional sanity checks and has an explicit dependency on the resource that is implementing the validation @@ -264,4 +265,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `75m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acmpca_certificate.html.markdown b/website/docs/cdktf/typescript/r/acmpca_certificate.html.markdown index ab61f31855ba..390d021f8992 100644 --- a/website/docs/cdktf/typescript/r/acmpca_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/acmpca_certificate.html.markdown @@ -83,6 +83,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateAuthorityArn` - (Required) ARN of the certificate authority. * `certificateSigningRequest` - (Required) Certificate Signing Request in PEM format. * `signingAlgorithm` - (Required) Algorithm to use to sign certificate requests. Valid values: `SHA256WITHRSA`, `SHA256WITHECDSA`, `SHA384WITHRSA`, `SHA384WITHECDSA`, `SHA512WITHRSA`, `SHA512WITHECDSA`. @@ -106,6 +107,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_certificate.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245" + } +} + +resource "aws_acmpca_certificate" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ACM PCA Certificates using their ARN. For example: ```typescript @@ -136,4 +158,4 @@ Using `terraform import`, import ACM PCA Certificates using their ARN. For examp % terraform import aws_acmpca_certificate.cert arn:aws:acm-pca:eu-west-1:675225743824:certificate-authority/08319ede-83g9-1400-8f21-c7d12b2b6edb/certificate/a4e9c2aa4bcfab625g1b9136464cd3a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acmpca_certificate_authority.html.markdown b/website/docs/cdktf/typescript/r/acmpca_certificate_authority.html.markdown index 9cf1168d56f3..3130b33f3394 100644 --- a/website/docs/cdktf/typescript/r/acmpca_certificate_authority.html.markdown +++ b/website/docs/cdktf/typescript/r/acmpca_certificate_authority.html.markdown @@ -158,6 +158,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateAuthorityConfiguration` - (Required) Nested argument containing algorithms and certificate subject information. Defined below. * `enabled` - (Optional) Whether the certificate authority is enabled or disabled. Defaults to `true`. Can only be disabled if the CA is in an `ACTIVE` state. * `revocationConfiguration` - (Optional) Nested argument containing revocation configuration. Defined below. @@ -232,6 +233,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_certificate_authority.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_acmpca_certificate_authority" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate authority. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_acmpca_certificate_authority` using the certificate authority ARN. For example: ```typescript @@ -262,4 +284,4 @@ Using `terraform import`, import `aws_acmpca_certificate_authority` using the ce % terraform import aws_acmpca_certificate_authority.example arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acmpca_certificate_authority_certificate.html.markdown b/website/docs/cdktf/typescript/r/acmpca_certificate_authority_certificate.html.markdown index c27d6c373c91..8110f5909103 100644 --- a/website/docs/cdktf/typescript/r/acmpca_certificate_authority_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/acmpca_certificate_authority_certificate.html.markdown @@ -184,6 +184,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate` - (Required) PEM-encoded certificate for the Certificate Authority. * `certificateAuthorityArn` - (Required) ARN of the Certificate Authority. * `certificateChain` - (Optional) PEM-encoded certificate chain that includes any intermediate certificates and chains up to root CA. Required for subordinate Certificate Authorities. Not allowed for root Certificate Authorities. @@ -192,4 +193,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acmpca_permission.html.markdown b/website/docs/cdktf/typescript/r/acmpca_permission.html.markdown index efa760c39184..b515f3b62957 100644 --- a/website/docs/cdktf/typescript/r/acmpca_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/acmpca_permission.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateAuthorityArn` - (Required) ARN of the CA that grants the permissions. * `actions` - (Required) Actions that the specified AWS service principal can use. These include `IssueCertificate`, `GetCertificate`, and `ListPermissions`. Note that in order for ACM to automatically rotate certificates issued by a PCA, it must be granted permission on all 3 actions, as per the example above. * `principal` - (Required) AWS service or identity that receives the permission. At this time, the only valid principal is `acm.amazonaws.com`. @@ -64,4 +65,4 @@ This resource exports the following attributes in addition to the arguments abov * `policy` - IAM policy that is associated with the permission. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/acmpca_policy.html.markdown b/website/docs/cdktf/typescript/r/acmpca_policy.html.markdown index f869b5cd1228..f174a2a551b7 100644 --- a/website/docs/cdktf/typescript/r/acmpca_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/acmpca_policy.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) ARN of the private CA to associate with the policy. * `policy` - (Required) JSON-formatted IAM policy to attach to the specified private CA resource. @@ -94,6 +95,27 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_policy.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_acmpca_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate authority. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_acmpca_policy` using the `resourceArn` value. For example: ```typescript @@ -124,4 +146,4 @@ Using `terraform import`, import `aws_acmpca_policy` using the `resourceArn` val % terraform import aws_acmpca_policy.example arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ami.html.markdown b/website/docs/cdktf/typescript/r/ami.html.markdown index 5fc9882ff962..517caa10c554 100644 --- a/website/docs/cdktf/typescript/r/ami.html.markdown +++ b/website/docs/cdktf/typescript/r/ami.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Region-unique name for the AMI. * `bootMode` - (Optional) Boot mode of the AMI. For more information, see [Boot modes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) in the Amazon Elastic Compute Cloud User Guide. * `deprecationTime` - (Optional) Date and time to deprecate the AMI. If you specified a value for seconds, Amazon EC2 rounds the seconds to the nearest minute. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -168,4 +169,4 @@ Using `terraform import`, import `aws_ami` using the ID of the AMI. For example: % terraform import aws_ami.example ami-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ami_copy.html.markdown b/website/docs/cdktf/typescript/r/ami_copy.html.markdown index 520ee1d86a4a..1df81e1c9701 100644 --- a/website/docs/cdktf/typescript/r/ami_copy.html.markdown +++ b/website/docs/cdktf/typescript/r/ami_copy.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Region-unique name for the AMI. * `sourceAmiId` - (Required) Id of the AMI to copy. This id must be valid in the region given by `sourceAmiRegion`. @@ -85,4 +86,4 @@ configuration. * `update` - (Default `40m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ami_from_instance.html.markdown b/website/docs/cdktf/typescript/r/ami_from_instance.html.markdown index 6601a7044cdd..b4a94669d4ce 100644 --- a/website/docs/cdktf/typescript/r/ami_from_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/ami_from_instance.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Region-unique name for the AMI. * `sourceInstanceId` - (Required) ID of the instance to use as the basis of the AMI. * `snapshotWithoutReboot` - (Optional) Boolean that overrides the behavior of stopping @@ -82,4 +83,4 @@ This resource also exports a full set of attributes corresponding to the argumen [`aws_ami`](/docs/providers/aws/r/ami.html) resource, allowing the properties of the created AMI to be used elsewhere in the configuration. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ami_launch_permission.html.markdown b/website/docs/cdktf/typescript/r/ami_launch_permission.html.markdown index f7abc5503da8..8c5fc6695d8f 100644 --- a/website/docs/cdktf/typescript/r/ami_launch_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/ami_launch_permission.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) AWS account ID for the launch permission. * `group` - (Optional) Name of the group for the launch permission. Valid values: `"all"`. * `imageId` - (Required) ID of the AMI. @@ -133,4 +134,4 @@ Using `terraform import`, import AMI Launch Permissions using `[ACCOUNT-ID|GROUP % terraform import aws_ami_launch_permission.example 123456789012/ami-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/amplify_app.html.markdown b/website/docs/cdktf/typescript/r/amplify_app.html.markdown index 2e6cb51bc6d6..dd1345219f89 100644 --- a/website/docs/cdktf/typescript/r/amplify_app.html.markdown +++ b/website/docs/cdktf/typescript/r/amplify_app.html.markdown @@ -203,10 +203,36 @@ class MyConvertedCode extends TerraformStack { ``` +### Job Config + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AmplifyApp } from "./.gen/providers/aws/amplify-app"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AmplifyApp(this, "example", { + jobConfig: { + buildComputeType: "STANDARD_8GB", + }, + name: "example", + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for an Amplify app. * `accessToken` - (Optional) Personal access token for a third-party source control system for an Amplify app. This token must have write access to the relevant repo to create a webhook and a read-only deploy key for the Amplify project. The token is not stored, so after applying this attribute can be removed and the setup token deleted. * `autoBranchCreationConfig` - (Optional) Automated branch creation configuration for an Amplify app. See [`autoBranchCreationConfig` Block](#auto_branch_creation_config-block) for details. @@ -214,7 +240,7 @@ This resource supports the following arguments: * `basicAuthCredentials` - (Optional) Credentials for basic authorization for an Amplify app. * `buildSpec` - (Optional) The [build specification](https://docs.aws.amazon.com/amplify/latest/userguide/build-settings.html) (build spec) for an Amplify app. * `cacheConfig` - (Optional) Cache configuration for the Amplify app. See [`cacheConfig` Block](#cache_config-block) for details. -* `compute_role_arn` - (Optional) AWS Identity and Access Management (IAM) SSR compute role for an Amplify app. +* `computeRoleArn` - (Optional) AWS Identity and Access Management (IAM) SSR compute role for an Amplify app. * `customHeaders` - (Optional) The [custom HTTP headers](https://docs.aws.amazon.com/amplify/latest/userguide/custom-headers.html) for an Amplify app. * `customRule` - (Optional) Custom rewrite and redirect rules for an Amplify app. See [`customRule` Block](#custom_rule-block) for details. * `description` - (Optional) Description for an Amplify app. @@ -224,6 +250,7 @@ This resource supports the following arguments: * `enableBranchAutoDeletion` - (Optional) Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository. * `environmentVariables` - (Optional) Environment variables map for an Amplify app. * `iamServiceRoleArn` - (Optional) AWS Identity and Access Management (IAM) service role for an Amplify app. +* `jobConfig` - (Optional) Used to configure the [Amplify Application build instance compute type](https://docs.aws.amazon.com/amplify/latest/APIReference/API_JobConfig.html#amplify-Type-JobConfig-buildComputeType). See [`jobConfig` Block](#job_config-block) for details. * `oauthToken` - (Optional) OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key. The OAuth token is not stored. * `platform` - (Optional) Platform or framework for an Amplify app. Valid values: `WEB`, `WEB_COMPUTE`. Default value: `WEB`. * `repository` - (Optional) Repository for an Amplify app. @@ -259,6 +286,12 @@ The `customRule` configuration block supports the following arguments: * `status` - (Optional) Status code for a URL rewrite or redirect rule. Valid values: `200`, `301`, `302`, `404`, `404-200`. * `target` - (Required) Target pattern for a URL rewrite or redirect rule. +### `jobConfig` Block + +The `jobConfig` configuration block supports the following arguments: + +* `buildComputeType` - (Optional) Size of the build instance. Valid values: `STANDARD_8GB`, `LARGE_16GB`, and `XLARGE_72GB`. Default: `STANDARD_8GB`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -306,4 +339,4 @@ Using `terraform import`, import Amplify App using Amplify App ID (appId). For e App ID can be obtained from App ARN (e.g., `arn:aws:amplify:us-east-1:12345678:apps/d2ypk4k47z8u6`). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/amplify_backend_environment.html.markdown b/website/docs/cdktf/typescript/r/amplify_backend_environment.html.markdown index fab4eaebd13a..559109d6058d 100644 --- a/website/docs/cdktf/typescript/r/amplify_backend_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/amplify_backend_environment.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appId` - (Required) Unique ID for an Amplify app. * `environmentName` - (Required) Name for the backend environment. * `deploymentArtifacts` - (Optional) Name of deployment artifacts. @@ -95,4 +96,4 @@ Using `terraform import`, import Amplify backend environment using `appId` and ` % terraform import aws_amplify_backend_environment.example d2ypk4k47z8u6/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/amplify_branch.html.markdown b/website/docs/cdktf/typescript/r/amplify_branch.html.markdown index ea8f3e8e44a3..1cca1f775a17 100644 --- a/website/docs/cdktf/typescript/r/amplify_branch.html.markdown +++ b/website/docs/cdktf/typescript/r/amplify_branch.html.markdown @@ -213,6 +213,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appId` - (Required) Unique ID for an Amplify app. * `branchName` - (Required) Name for the branch. * `backendEnvironmentArn` - (Optional) ARN for a backend environment that is part of an Amplify app. @@ -224,6 +225,7 @@ This resource supports the following arguments: * `enableNotification` - (Optional) Enables notifications for the branch. * `enablePerformanceMode` - (Optional) Enables performance mode for the branch. * `enablePullRequestPreview` - (Optional) Enables pull request previews for this branch. +* `enableSkewProtection` - (Optional) Enables skew protection for the branch. * `environmentVariables` - (Optional) Environment variables for the branch. * `framework` - (Optional) Framework for the branch. * `pullRequestEnvironmentName` - (Optional) Amplify environment name for the pull request. @@ -274,4 +276,4 @@ Using `terraform import`, import Amplify branch using `appId` and `branchName`. % terraform import aws_amplify_branch.master d2ypk4k47z8u6/master ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown b/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown index 504c5b08bda7..635dcb196a33 100644 --- a/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown +++ b/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appId` - (Required) Unique ID for an Amplify app. * `certificateSettings` - (Optional) The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you. * `domainName` - (Required) Domain name for the domain association. @@ -134,4 +135,4 @@ Using `terraform import`, import Amplify domain association using `appId` and `d % terraform import aws_amplify_domain_association.app d2ypk4k47z8u6/example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/amplify_webhook.html.markdown b/website/docs/cdktf/typescript/r/amplify_webhook.html.markdown index 4122bd8fa578..dec10e5d8da2 100644 --- a/website/docs/cdktf/typescript/r/amplify_webhook.html.markdown +++ b/website/docs/cdktf/typescript/r/amplify_webhook.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appId` - (Required) Unique ID for an Amplify app. * `branchName` - (Required) Name for a branch that is part of the Amplify app. * `description` - (Optional) Description for a webhook. @@ -94,4 +95,4 @@ Using `terraform import`, import Amplify webhook using a webhook ID. For example % terraform import aws_amplify_webhook.master a26b22a0-748b-4b57-b9a0-ae7e601fe4b1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_account.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_account.html.markdown index fed1fb0538f7..1804c9859ca7 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_account.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_account.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cloudwatchRoleArn` - (Optional) ARN of an IAM role for CloudWatch (to allow logging & monitoring). See more [in AWS Docs](https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-stage-settings.html#how-to-stage-settings-console). Logging & monitoring can be enabled/disabled and otherwise tuned on the API Gateway Stage level. * `reset_on_delete` - (Optional) If `true`, destroying the resource will reset account settings to default, otherwise account settings are not modified. Defaults to `false`. @@ -107,7 +108,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway Accounts using the word `api-gateway-account`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway Accounts using the account ID. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -121,20 +122,16 @@ import { ApiGatewayAccount } from "./.gen/providers/aws/api-gateway-account"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - ApiGatewayAccount.generateConfigForImport( - this, - "demo", - "api-gateway-account" - ); + ApiGatewayAccount.generateConfigForImport(this, "demo", "123456789012"); } } ``` -Using `terraform import`, import API Gateway Accounts using the word `api-gateway-account`. For example: +Using `terraform import`, import API Gateway Accounts using the account ID. For example: ```console -% terraform import aws_api_gateway_account.demo api-gateway-account +% terraform import aws_api_gateway_account.demo 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_api_key.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_api_key.html.markdown index 959cf6fd9c4e..e0f0f5152eb7 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_api_key.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_api_key.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the API key. * `customerId` - (Optional) An Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace. * `description` - (Optional) API key description. Defaults to "Managed by Terraform". @@ -89,4 +90,4 @@ Using `terraform import`, import API Gateway Keys using the `id`. For example: % terraform import aws_api_gateway_api_key.example 8bklk8bl1k3sB38D9B3l0enyWT8c09B30lkq0blk ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_authorizer.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_authorizer.html.markdown index 03d82d8796ee..ec01785284fd 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_authorizer.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_authorizer.html.markdown @@ -133,6 +133,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizerUri` - (Optional, required for type `TOKEN`/`REQUEST`) Authorizer's Uniform Resource Identifier (URI). This must be a well-formed Lambda function URI in the form of `arn:aws:apigateway:{region}:lambda:path/{service_api}`, e.g., `arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:012345678912:function:my-function/invocations` * `name` - (Required) Name of the authorizer @@ -183,4 +184,4 @@ Using `terraform import`, import AWS API Gateway Authorizer using the `REST-API- % terraform import aws_api_gateway_authorizer.authorizer 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_base_path_mapping.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_base_path_mapping.html.markdown index dcc53cde2e5a..c6b31d2466a4 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_base_path_mapping.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_base_path_mapping.html.markdown @@ -72,11 +72,12 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) Already-registered domain name to connect the API to. * `apiId` - (Required) ID of the API to connect. * `stageName` - (Optional) Name of a specific deployment stage to expose at the given path. If omitted, callers may select any stage by including its name as a path element after the base path. * `basePath` - (Optional) Path segment that must be prepended to the path when accessing the API via this mapping. If omitted, the API is exposed at the root of the given domain. -* `domain_name_id` - (Optional) The identifier for the domain name resource. Supported only for private custom domain names. +* `domainNameId` - (Optional) The identifier for the domain name resource. Supported only for private custom domain names. ## Attribute Reference @@ -178,4 +179,4 @@ For a non-root `basePath` and a private custom domain name: % terraform import aws_api_gateway_base_path_mapping.example api.internal.example.com/base-path/abcde12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_client_certificate.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_client_certificate.html.markdown index 8409089fc438..3df0b722579d 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_client_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_client_certificate.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the client certificate. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -80,4 +81,4 @@ Using `terraform import`, import API Gateway Client Certificates using the id. F % terraform import aws_api_gateway_client_certificate.demo ab1cqe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_deployment.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_deployment.html.markdown index 7d724da88c5d..9ede545ed192 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_deployment.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_deployment.html.markdown @@ -17,8 +17,6 @@ To properly capture all REST API configuration in a deployment, this resource mu * For REST APIs that are configured via OpenAPI specification ([`aws_api_gateway_rest_api` resource](api_gateway_rest_api.html) `body` argument), no special dependency setup is needed beyond referencing the `id` attribute of that resource unless additional Terraform resources have further customized the REST API. * When the REST API configuration involves other Terraform resources ([`aws_api_gateway_integration` resource](api_gateway_integration.html), etc.), the dependency setup can be done with implicit resource references in the `triggers` argument or explicit resource references using the [resource `dependsOn` meta-argument](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html). The `triggers` argument should be preferred over `dependsOn`, since `dependsOn` can only capture dependency ordering and will not cause the resource to recreate (redeploy the REST API) with upstream configuration changes. -!> **WARNING:** We recommend using the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead of managing an API Gateway Stage via the `stageName` argument of this resource. When this resource is recreated (REST API redeployment) with the `stageName` configured, the stage is deleted and recreated. This will cause a temporary service interruption, increase Terraform plan differences, and can require a second Terraform apply to recreate any downstream stage configuration such as associated `aws_api_method_settings` resources. - ~> **NOTE:** Enable the [resource `lifecycle` configuration block `create_before_destroy` argument](https://www.terraform.io/language/meta-arguments/lifecycle#create_before_destroy) in this resource configuration to properly order redeployments in Terraform. Without enabling `create_before_destroy`, API Gateway can return errors such as `BadRequestException: Active stages pointing to this deployment must be moved or deleted` on recreation. ## Example Usage @@ -165,35 +163,17 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `canarySettings` - (Optional, **Deprecated** Use an explicit [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead) Input configuration for the canary deployment when the deployment is a canary release deployment. - See [`canary_settings](#canary_settings-argument-reference) below. - Has no effect when `stage_name` is not set. -* `description` - (Optional) Description of the deployment +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the deployment. * `restApiId` - (Required) REST API identifier. -* `stageDescription` - (Optional, **Deprecated** Use an explicit [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead) Description to set on the stage managed by the `stageName` argument. - Has no effect when `stageName` is not set. -* `stageName` - (Optional, **Deprecated** Use an explicit [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead) Name of the stage to create with this deployment. - If the specified stage already exists, it will be updated to point to the new deployment. - We recommend using the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead to manage stages. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`-replace` option](https://developer.hashicorp.com/terraform/cli/commands/plan#replace-address) with `terraform plan` or `terraform apply`. -* `variables` - (Optional) Map to set on the stage managed by the `stageName` argument. - -### `canarySettings` Argument Reference - -* `percentTraffic` - Percentage (0.0-100.0) of traffic routed to the canary deployment. -* `stageVariableOverrides` - Stage variable overrides used for the canary release deployment. They can override existing stage variables or add new stage variables for the canary release deployment. These stage variables are represented as a string-to-string map between stage variable names and their values. -* `useStageCache` - Boolean flag to indicate whether the canary release deployment uses the stage cache or not. +* `variables` - (Optional) Map to set on the related stage. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - ID of the deployment -* `invokeUrl` - **DEPRECATED: Use the `aws_api_gateway_stage` resource instead.** URL to invoke the API pointing to the stage, - e.g., `https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/prod` -* `executionArn` - **DEPRECATED: Use the `aws_api_gateway_stage` resource instead.** Execution ARN to be used in [`lambda_permission`](/docs/providers/aws/r/lambda_permission.html)'s `sourceArn` - when allowing API Gateway to invoke a Lambda function, - e.g., `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j/prod` * `createdDate` - Creation date of the deployment ## Import @@ -228,8 +208,8 @@ Using `terraform import`, import `aws_api_gateway_deployment` using `REST-API-ID % terraform import aws_api_gateway_deployment.example aabbccddee/1122334 ``` -The `stageName`, `stageDescription`, and `variables` arguments cannot be imported. Use the [`aws_api_gateway_stage` resource](api_gateway_stage.html) to import and manage stages. +The `variables` arguments cannot be imported. Use the [`aws_api_gateway_stage` resource](api_gateway_stage.html) to import and manage stages. The `triggers` argument cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_documentation_part.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_documentation_part.html.markdown index a5004ecbdeac..ed4bf1ba885d 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_documentation_part.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_documentation_part.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `location` - (Required) Location of the targeted API entity of the to-be-created documentation part. See below. * `properties` - (Required) Content map of API-specific key-value pairs describing the targeted API entity. The map must be encoded as a JSON string, e.g., "{ \"description\": \"The API does ...\" }". Only Swagger-compliant key-value pairs can be exported and, hence, published. * `restApiId` - (Required) ID of the associated Rest API @@ -106,4 +107,4 @@ Using `terraform import`, import API Gateway documentation_parts using `REST-API % terraform import aws_api_gateway_documentation_part.example 5i4e1ko720/3oyy3t ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_documentation_version.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_documentation_version.html.markdown index 7c9391afc068..19c741d43669 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_documentation_version.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_documentation_version.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `version` - (Required) Version identifier of the API documentation snapshot. * `restApiId` - (Required) ID of the associated Rest API * `description` - (Optional) Description of the API documentation version. @@ -99,4 +100,4 @@ Using `terraform import`, import API Gateway documentation versions using `REST- % terraform import aws_api_gateway_documentation_version.example 5i4e1ko720/example-version ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_domain_name.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_domain_name.html.markdown index 7ca49996c643..5496a3555532 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_domain_name.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_domain_name.html.markdown @@ -220,6 +220,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) Fully-qualified domain name to register. * `endpointConfiguration` - (Optional) Configuration block defining API endpoint information including type. See below. * `mutualTlsAuthentication` - (Optional) Mutual TLS authentication configuration for the domain name. See below. @@ -327,4 +328,4 @@ For a private custom domain name: % terraform import aws_api_gateway_domain_name.example dev.api.internal.example.com/abcde12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_domain_name_access_association.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_domain_name_access_association.html.markdown index 3df2e6f821c4..3125e74ab7f4 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_domain_name_access_association.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_domain_name_access_association.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessAssociationSource` - (Required) The identifier of the domain name access association source. For a `VPCE`, the value is the VPC endpoint ID. * `accessAssociationSourceType` - (Required) The type of the domain name access association source. Valid values are `VPCE`. * `domainNameArn` - (Required) The ARN of the domain name. @@ -55,6 +56,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_api_gateway_domain_name_access_association.example + identity = { + "arn" = "arn:aws:apigateway:us-east-1::/domainnames/example.com/accessassociation" + } +} + +resource "aws_api_gateway_domain_name_access_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the API Gateway domain name access association. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway domain name acces associations using their `arn`. For example: ```typescript @@ -85,4 +107,4 @@ Using `terraform import`, import API Gateway domain name acces associations as u % terraform import aws_api_gateway_domain_name_access_association.example arn:aws:apigateway:us-west-2:123456789012:/domainnameaccessassociations/domainname/12qmzgp2.9m7ilski.test+hykg7a12e7/vpcesource/vpce-05de3f8f82740a748 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_gateway_response.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_gateway_response.html.markdown index 242c7a787d09..8f1e3e377bd3 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_gateway_response.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_gateway_response.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be managed. See the [AWS Documentation](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) for supported values. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) String identifier of the associated REST API. -* `responseType` - (Required) Response type of the associated GatewayResponse. +* `responseType` - (Required) Response type of the associated GatewayResponse. See the [AWS Documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html) for supported values. * `statusCode` - (Optional) HTTP status code of the Gateway Response. * `responseTemplates` - (Optional) Map of templates used to transform the response body. * `responseParameters` - (Optional) Map of parameters (paths, query strings and headers) of the Gateway Response. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_api_gateway_gateway_response` using `REST- % terraform import aws_api_gateway_gateway_response.example 12345abcde/UNAUTHORIZED ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown index 0c326d1c623b..3f8ffd81808a 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown @@ -253,6 +253,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the associated REST API. * `resourceId` - (Required) API resource ID. * `httpMethod` - (Required) HTTP method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTION`, `ANY`) @@ -321,4 +322,4 @@ Using `terraform import`, import `aws_api_gateway_integration` using `REST-API-I % terraform import aws_api_gateway_integration.example 12345abcde/67890fghij/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_integration_response.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_integration_response.html.markdown index 9315e6a16ff0..b5b9233a7153 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_integration_response.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_integration_response.html.markdown @@ -87,6 +87,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contentHandling` - (Optional) How to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the response payload will be passed through from the integration response to the method response without modification. * `responseParameters` - (Optional) Map of response parameters that can be read from the backend response. For example: `response_parameters = { "method.response.header.X-Some-Header" = "integration.response.header.X-Some-Other-Header" }`. * `responseTemplates` - (Optional) Map of templates used to transform the integration response body. @@ -128,4 +129,4 @@ Using `terraform import`, import `aws_api_gateway_integration_response` using `R % terraform import aws_api_gateway_integration_response.example 12345abcde/67890fghij/GET/200 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_method.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_method.html.markdown index 0d36826f6dcd..14ab5001d4c2 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_method.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_method.html.markdown @@ -123,6 +123,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the associated REST API * `resourceId` - (Required) API resource ID * `httpMethod` - (Required) HTTP Method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTIONS`, `ANY`) @@ -174,4 +175,4 @@ Using `terraform import`, import `aws_api_gateway_method` using `REST-API-ID/RES % terraform import aws_api_gateway_method.example 12345abcde/67890fghij/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_method_response.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_method_response.html.markdown index 9e30f22d432c..e7e9bbf2b4eb 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_method_response.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_method_response.html.markdown @@ -144,6 +144,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) The string identifier of the associated REST API. * `resourceId` - (Required) The Resource identifier for the method resource. * `httpMethod` - (Required) The HTTP verb of the method resource (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTIONS`, `ANY`). @@ -151,7 +152,7 @@ This resource supports the following arguments: * `responseModels` - (Optional) A map specifying the model resources used for the response's content type. Response models are represented as a key/value map, with a content type as the key and a Model name as the value. * `responseParameters` - (Optional) A map specifying required or optional response parameters that API Gateway can send back to the caller. A key defines a method response header name and the associated value is a boolean flag indicating whether the method response parameter is required. The method response header names must match the pattern of `method.response.header.{name}`, where `name` is a valid and unique header name. - The response parameter names defined here are available in the integration response to be mapped from an integration response header expressed in `integration.response.header.{name}`, a static value enclosed within a pair of single quotes (e.g., '`application/json'`), or a JSON expression from the back-end response payload in the form of `integration.response.body.{JSON-expression}`, where `JSON-expression` is a valid JSON expression without the `$` prefix.) +The response parameter names defined here are available in the integration response to be mapped from an integration response header expressed in `integration.response.header.{name}`, a static value enclosed within a pair of single quotes (e.g., '`application/json'`), or a JSON expression from the back-end response payload in the form of `integration.response.body.{JSON-expression}`, where `JSON-expression` is a valid JSON expression without the `$` prefix.) ## Attribute Reference @@ -189,4 +190,4 @@ Using `terraform import`, import `aws_api_gateway_method_response` using `REST-A % terraform import aws_api_gateway_method_response.example 12345abcde/67890fghij/GET/200 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_method_settings.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_method_settings.html.markdown index 01055a8271fc..ea772e68b5da 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_method_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_method_settings.html.markdown @@ -210,6 +210,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the REST API * `stageName` - (Required) Name of the stage * `methodPath` - (Required) Method path defined as `{resource_path}/{http_method}` for an individual method override, or `*/*` for overriding all methods in the stage. Ensure to trim any leading forward slashes in the path (e.g., `trimprefix(aws_api_gateway_resource.example.path, "/")`). @@ -264,4 +265,4 @@ Using `terraform import`, import `aws_api_gateway_method_settings` using `REST-A % terraform import aws_api_gateway_method_settings.example 12345abcde/example/test/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_model.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_model.html.markdown index 293e3ce39b9b..ff669364d209 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_model.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_model.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the associated REST API * `name` - (Required) Name of the model * `description` - (Optional) Description of the model @@ -95,4 +96,4 @@ Using `terraform import`, import `aws_api_gateway_model` using `REST-API-ID/NAME % terraform import aws_api_gateway_model.example 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_request_validator.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_request_validator.html.markdown index 6d52d7860140..2604e89aab82 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_request_validator.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_request_validator.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the request validator * `restApiId` - (Required) ID of the associated Rest API * `validateRequestBody` - (Optional) Boolean whether to validate request body. Defaults to `false`. @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_api_gateway_request_validator` using `REST % terraform import aws_api_gateway_request_validator.example 12345abcde/67890fghij ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_resource.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_resource.html.markdown index 08ed01e695b1..9887259cf6d4 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_resource.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_resource.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the associated REST API * `parentId` - (Required) ID of the parent API resource * `pathPart` - (Required) Last path segment of this API resource. @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_api_gateway_resource` using `REST-API-ID/R % terraform import aws_api_gateway_resource.example 12345abcde/67890fghij ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_rest_api.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_rest_api.html.markdown index ce737ad63113..b3217b5802be 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_rest_api.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_rest_api.html.markdown @@ -256,6 +256,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiKeySource` - (Optional) Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `binaryMediaTypes` - (Optional) List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `body` - (Optional) OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `aws_api_gateway_deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). @@ -330,4 +331,4 @@ Using `terraform import`, import `aws_api_gateway_rest_api` using the REST API I ~> **NOTE:** Resource import does not currently support the `body` attribute. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_rest_api_policy.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_rest_api_policy.html.markdown index 469caf3152c5..c54c92ad19ab 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_rest_api_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_rest_api_policy.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the REST API. * `policy` - (Required) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) @@ -123,4 +124,4 @@ Using `terraform import`, import `aws_api_gateway_rest_api_policy` using the RES % terraform import aws_api_gateway_rest_api_policy.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_rest_api_put.markdown b/website/docs/cdktf/typescript/r/api_gateway_rest_api_put.markdown index 370da252e846..c3557bb73711 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_rest_api_put.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_rest_api_put.markdown @@ -166,13 +166,15 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `body` - (Required) PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 6MB. * `restApiId` - (Required) Identifier of the associated REST API. The following arguments are optional: +* `region` – (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `failOnWarnings` - (Optional) Whether to rollback the API update when a warning is encountered. The default value is `false`. * `parameters` - (Optional) Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, use `ignore = "documentation"`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`-replace` option](https://developer.hashicorp.com/terraform/cli/commands/plan#replace-address) with `terraform plan` or `terraform apply`. @@ -219,4 +221,4 @@ Using `terraform import`, import API Gateway REST API Put using the `restApiId`. % terraform import aws_api_gateway_rest_api_put.example import-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_stage.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_stage.html.markdown index 68a963b7ad95..2e4975030ce7 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_stage.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_stage.html.markdown @@ -137,6 +137,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `restApiId` - (Required) ID of the associated REST API * `stageName` - (Required) Name of the stage * `deploymentId` - (Required) ID of the deployment that the stage points to @@ -146,8 +147,8 @@ This resource supports the following arguments: * `canarySettings` - (Optional) Configuration settings of a canary deployment. See [Canary Settings](#canary-settings) below. * `clientCertificateId` - (Optional) Identifier of a client certificate for the stage. * `description` - (Optional) Description of the stage. -* `documentationVersion` - (Optional) Version of the associated API documentation -* `variables` - (Optional) Map that defines the stage variables +* `documentationVersion` - (Optional) Version of the associated API documentation. +* `variables` - (Optional) Map that defines the stage variables. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `xrayTracingEnabled` - (Optional) Whether active tracing with X-ray is enabled. Defaults to `false`. @@ -210,4 +211,4 @@ Using `terraform import`, import `aws_api_gateway_stage` using `REST-API-ID/STAG % terraform import aws_api_gateway_stage.example 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_usage_plan.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_usage_plan.html.markdown index 1012183b0f8f..4a9be0837092 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_usage_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_usage_plan.html.markdown @@ -95,6 +95,7 @@ resource "aws_api_gateway_usage_plan" "example" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the usage plan. * `description` - (Optional) Description of a usage plan. * `apiStages` - (Optional) Associated [API stages](#api-stages-arguments) of the usage plan. @@ -172,4 +173,4 @@ Using `terraform import`, import AWS API Gateway Usage Plan using the `id`. For % terraform import aws_api_gateway_usage_plan.myusageplan ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_usage_plan_key.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_usage_plan_key.html.markdown index c6c4918876eb..bf4ab92ae51b 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_usage_plan_key.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_usage_plan_key.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyId` - (Required) Identifier of the API key resource. * `keyType` - (Required) Type of the API key resource. Currently, the valid key type is API_KEY. * `usagePlanId` - (Required) Id of the usage plan resource representing to associate the key to. @@ -105,4 +106,4 @@ Using `terraform import`, import AWS API Gateway Usage Plan Key using the `USAGE % terraform import aws_api_gateway_usage_plan_key.key 12345abcde/zzz ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_vpc_link.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_vpc_link.html.markdown index ca6122b83132..b8b0e5467dff 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_vpc_link.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_vpc_link.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name used to label and identify the VPC link. * `description` - (Optional) Description of the VPC link. * `targetArns` - (Required, ForceNew) List of network load balancer arns in the VPC targeted by the VPC link. Currently AWS only supports 1 target. @@ -100,4 +101,4 @@ Using `terraform import`, import API Gateway VPC Link using the `id`. For exampl % terraform import aws_api_gateway_vpc_link.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_api.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_api.html.markdown index b1c103c75f98..5eb3314fbcfb 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_api.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_api.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the API. Must be less than or equal to 128 characters in length. * `protocolType` - (Required) API protocol. Valid values: `HTTP`, `WEBSOCKET`. * `apiKeySelectionExpression` - (Optional) An [API key selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-apikey-selection-expressions). @@ -146,4 +147,4 @@ Using `terraform import`, import `aws_apigatewayv2_api` using the API identifier % terraform import aws_apigatewayv2_api.example aabbccddee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_api_mapping.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_api_mapping.html.markdown index 2a757e880c75..0e22188546ba 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_api_mapping.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_api_mapping.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `domainName` - (Required) Domain name. Use the [`aws_apigatewayv2_domain_name`](/docs/providers/aws/r/apigatewayv2_domain_name.html) resource to configure a domain name. * `stage` - (Required) API stage. Use the [`aws_apigatewayv2_stage`](/docs/providers/aws/r/apigatewayv2_stage.html) resource to configure an API stage. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_apigatewayv2_api_mapping` using the API ma % terraform import aws_apigatewayv2_api_mapping.example 1122334/ws-api.example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_authorizer.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_authorizer.html.markdown index ec803599d912..57207a8885ba 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_authorizer.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_authorizer.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `authorizerType` - (Required) Authorizer type. Valid values: `JWT`, `REQUEST`. Specify `REQUEST` for a Lambda function using incoming request parameters. @@ -144,4 +145,4 @@ Using `terraform import`, import `aws_apigatewayv2_authorizer` using the API ide % terraform import aws_apigatewayv2_authorizer.example aabbccddee/1122334 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_deployment.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_deployment.html.markdown index 82e99261c6d3..95ff1dad5e9e 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_deployment.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_deployment.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `description` - (Optional) Description for the deployment resource. Must be less than or equal to 1024 characters in length. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). @@ -139,4 +140,4 @@ Using `terraform import`, import `aws_apigatewayv2_deployment` using the API ide The `triggers` argument cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_domain_name.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_domain_name.html.markdown index 598db8ab418d..d8b6749dfae1 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_domain_name.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_domain_name.html.markdown @@ -99,6 +99,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) Domain name. Must be between 1 and 512 characters in length. * `domainNameConfiguration` - (Required) Domain name configuration. See below. * `mutualTlsAuthentication` - (Optional) Mutual TLS authentication configuration for the domain name. @@ -167,4 +168,4 @@ Using `terraform import`, import `aws_apigatewayv2_domain_name` using the domain % terraform import aws_apigatewayv2_domain_name.example ws-api.example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_integration.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_integration.html.markdown index 34a2feb65641..e085f8df9d07 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_integration.html.markdown @@ -166,6 +166,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `integrationType` - (Required) Integration type of an integration. Valid values: `AWS` (supported only for WebSocket APIs), `AWS_PROXY`, `HTTP` (supported only for WebSocket APIs), `HTTP_PROXY`, `MOCK` (supported only for WebSocket APIs). For an HTTP API private integration, use `HTTP_PROXY`. @@ -244,4 +245,4 @@ Using `terraform import`, import `aws_apigatewayv2_integration` using the API id -> **Note:** The API Gateway managed integration created as part of [_quick_create_](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-basic-concept.html#apigateway-definition-quick-create) cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_integration_response.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_integration_response.html.markdown index 25314c310fee..ae0a102362f8 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_integration_response.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_integration_response.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `integrationId` - (Required) Identifier of the [`aws_apigatewayv2_integration`](/docs/providers/aws/r/apigatewayv2_integration.html). * `integrationResponseKey` - (Required) Integration response key. @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_apigatewayv2_integration_response` using t % terraform import aws_apigatewayv2_integration_response.example aabbccddee/1122334/998877 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_model.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_model.html.markdown index d4bd351fd425..39efb812fff2 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_model.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_model.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `contentType` - (Required) The content-type for the model, for example, `application/json`. Must be between 1 and 256 characters in length. * `name` - (Required) Name of the model. Must be alphanumeric. Must be between 1 and 128 characters in length. @@ -98,4 +99,4 @@ Using `terraform import`, import `aws_apigatewayv2_model` using the API identifi % terraform import aws_apigatewayv2_model.example aabbccddee/1122334 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_route.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_route.html.markdown index 12bed89420a8..033dc9f17efa 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_route.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_route.html.markdown @@ -102,6 +102,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `routeKey` - (Required) Route key for the route. For HTTP APIs, the route key can be either `$default`, or a combination of an HTTP method and resource path, for example, `GET /pets`. * `apiKeyRequired` - (Optional) Boolean whether an API key is required for the route. Defaults to `false`. Supported only for WebSocket APIs. @@ -163,4 +164,4 @@ Using `terraform import`, import `aws_apigatewayv2_route` using the API identifi -> **Note:** The API Gateway managed route created as part of [_quick_create_](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-basic-concept.html#apigateway-definition-quick-create) cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_route_response.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_route_response.html.markdown index 3d91dac29358..459daa7c8471 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_route_response.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_route_response.html.markdown @@ -49,6 +49,7 @@ You can only define the $default route response for WebSocket APIs. You can use This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API identifier. * `routeId` - (Required) Identifier of the [`aws_apigatewayv2_route`](/docs/providers/aws/r/apigatewayv2_route.html). * `routeResponseKey` - (Required) Route response key. @@ -93,4 +94,4 @@ Using `terraform import`, import `aws_apigatewayv2_route_response` using the API % terraform import aws_apigatewayv2_route_response.example aabbccddee/1122334/998877 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_stage.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_stage.html.markdown index a0a53f8d17e0..1b9121ab7cac 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_stage.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_stage.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessLogSettings` - (Optional) Settings for logging access in this stage. Use the [`aws_api_gateway_account`](/docs/providers/aws/r/api_gateway_account.html) resource to configure [permissions for CloudWatch Logging](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html#set-up-access-logging-permissions). * `autoDeploy` - (Optional) Whether updates to an API automatically trigger a new deployment. Defaults to `false`. Applicable for HTTP APIs. @@ -132,4 +133,4 @@ Using `terraform import`, import `aws_apigatewayv2_stage` using the API identifi -> **Note:** The API Gateway managed stage created as part of [_quick_create_](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-basic-concept.html#apigateway-definition-quick-create) cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apigatewayv2_vpc_link.html.markdown b/website/docs/cdktf/typescript/r/apigatewayv2_vpc_link.html.markdown index afe56177391b..0d1e41b0ef63 100644 --- a/website/docs/cdktf/typescript/r/apigatewayv2_vpc_link.html.markdown +++ b/website/docs/cdktf/typescript/r/apigatewayv2_vpc_link.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the VPC Link. Must be between 1 and 128 characters in length. * `securityGroupIds` - (Required) Security group IDs for the VPC Link. * `subnetIds` - (Required) Subnet IDs for the VPC Link. @@ -87,4 +88,4 @@ Using `terraform import`, import `aws_apigatewayv2_vpc_link` using the VPC Link % terraform import aws_apigatewayv2_vpc_link.example aabbccddee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/app_cookie_stickiness_policy.html.markdown b/website/docs/cdktf/typescript/r/app_cookie_stickiness_policy.html.markdown index c840c9bbc5c6..413315a28ee9 100644 --- a/website/docs/cdktf/typescript/r/app_cookie_stickiness_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/app_cookie_stickiness_policy.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the stickiness policy. * `loadBalancer` - (Required) Name of load balancer to which the policy should be attached. @@ -104,4 +105,4 @@ Using `terraform import`, import application cookie stickiness policies using th % terraform import aws_app_cookie_stickiness_policy.example my-elb:80:my-policy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appautoscaling_policy.html.markdown b/website/docs/cdktf/typescript/r/appautoscaling_policy.html.markdown index 045040e64bf7..c98abc36b103 100644 --- a/website/docs/cdktf/typescript/r/appautoscaling_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/appautoscaling_policy.html.markdown @@ -271,17 +271,141 @@ class MyConvertedCode extends TerraformStack { ``` +### Predictive Scaling + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppautoscalingPolicy } from "./.gen/providers/aws/appautoscaling-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppautoscalingPolicy(this, "example", { + name: "example-policy", + policyType: "PredictiveScaling", + predictiveScalingPolicyConfiguration: { + metricSpecification: [ + { + predefinedMetricPairSpecification: { + predefinedMetricType: "ECSServiceMemoryUtilization", + }, + targetValue: Token.asString(40), + }, + ], + }, + resourceId: Token.asString(awsAppautoscalingTargetExample.resourceId), + scalableDimension: Token.asString( + awsAppautoscalingTargetExample.scalableDimension + ), + serviceNamespace: Token.asString( + awsAppautoscalingTargetExample.serviceNamespace + ), + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: * `name` - (Required) Name of the policy. Must be between 1 and 255 characters in length. -* `policyType` - (Optional) Policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation. +* `policyType` - (Optional) Policy type. Valid values are `StepScaling`, `TargetTrackingScaling`, and `PredictiveScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html), [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html), and [Predictive Scaling](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-predictive-scaling.html) documentation. +* `predictiveScalingPolicyConfiguration` - (Optional) Predictive scaling policy configuration, requires `policy_type = "PredictiveScaling"`. See supported fields below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `scalableDimension` - (Required) Scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `serviceNamespace` - (Required) AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `stepScalingPolicyConfiguration` - (Optional) Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below. -* `targetTrackingScalingPolicyConfiguration` - (Optional) Target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below. +* `targetTrackingScalingPolicyConfiguration` - (Optional) Target tracking policy configuration, requires `policy_type = "TargetTrackingScaling"`. See supported fields below. + +### predictive_scaling_policy_configuration + +The `predictiveScalingPolicyConfiguration` configuration block supports the following arguments: + +* `maxCapacityBreachBehavior` - (Optional) The behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity. Valid values are `HonorMaxCapacity` and `IncreaseMaxCapacity`. +* `maxCapacityBuffer` - (Optional) Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. Required if the `maxCapacityBreachBehavior` argument is set to `IncreaseMaxCapacity`, and cannot be used otherwise. +* `metricSpecification` - (Required) Metrics and target utilization to use for predictive scaling. See supported fields below. +* `mode` - (Optional) Predictive scaling mode. Valid values are `ForecastOnly` and `ForecastAndScale`. +* `schedulingBufferTime` - (Optional) Amount of time, in seconds, that the start time can be advanced. + +### predictive_scaling_policy_configuration metric_specification + +The `predictiveScalingPolicyConfiguration` `metricSpecification` configuration block supports the following arguments: + +* `customizedCapacityMetricSpecification` - (Optional) Customized capacity metric specification. See supported fields below. +* `customizedLoadMetricSpecification` - (Optional) Customized load metric specification. See supported fields below. +* `customizedScalingMetricSpecification` - (Optional) Customized scaling metric specification. See supported fields below. +* `predefinedLoadMetricSpecification` - (Optional) Predefined load metric specification. See supported fields below. +* `predefinedMetricPairSpecification` - (Optional) Predefined metric pair specification that determines the appropriate scaling metric and load metric to use. See supported fields below. +* `predefinedScalingMetricSpecification` - (Optional) Predefined scaling metric specification. See supported fields below. +* `targetValue` - (Required) Target utilization. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification, customized_load_metric_specification and customized_scaling_metric_specification + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `customizedCapacityMetricSpecification`, `customizedLoadMetricSpecification`, and `customizedScalingMetricSpecification` configuration blocks supports the following arguments: + +* `metricDataQuery` - (Required) One or more metric data queries to provide data points for a metric specification. See supported fields below. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `customizedCapacityMetricSpecification` `metricDataQuery` configuration block supports the following arguments: + +* `expression` - (Optional) Math expression to perform on the returned data, if this object is performing a math expression. +* `id` - (Required) Short name that identifies the object's results in the response. +* `label` - (Optional) Human-readable label for this metric or expression. +* `metricStat` - (Optional) Information about the metric data to return. See supported fields below. +* `returnData` - (Optional) Whether to return the timestamps and raw data values of this metric. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `customizedCapacityMetricSpecification` `metricDataQuery` `metricStat` configuration block supports the following arguments: + +* `metric` - (Required) CloudWatch metric to return, including the metric name, namespace, and dimensions. See supported fields below. +* `stat` - (Required) Statistic to return. +* `unit` - (Optional) Unit to use for the returned data points. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat metric + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `customizedCapacityMetricSpecification` `metricDataQuery` `metricStat` `metric` configuration block supports the following arguments: + +* `dimension` - (Optional) Dimensions of the metric. See supported fields below. +* `metricName` - (Optional) Name of the metric. +* `namespace` - (Optional) Namespace of the metric. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat metric dimension + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `customizedCapacityMetricSpecification` `metricDataQuery` `metricStat` `metric` `dimension` configuration block supports the following arguments: + +* `name` - (Optional) Name of the dimension. +* `value` - (Optional) Value of the dimension. + +### predictive_scaling_policy_configuration metric_specification predefined_load_metric_specification + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `predefinedLoadMetricSpecification` configuration block supports the following arguments: + +* `predefinedMetricType` - (Required) Metric type. +* `resourceLabel` - (Optional) Label that uniquely identifies a target group. + +### predictive_scaling_policy_configuration metric_specification predefined_metric_pair_specification + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `predefinedMetricPairSpecification` configuration block supports the following arguments: + +* `predefinedMetricType` - (Required) Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. +* `resourceLabel` - (Optional) Label that uniquely identifies a specific target group from which to determine the total and average request count. + +### predictive_scaling_policy_configuration metric_specification predefined_scaling_metric_specification + +The `predictiveScalingPolicyConfiguration` `metricSpecification` `predefinedScalingMetricSpecification` configuration block supports the following arguments: + +* `predefinedMetricType` - (Required) Metric type. +* `resourceLabel` - (Optional) Label that uniquely identifies a specific target group from which to determine the average request count. ### step_scaling_policy_configuration @@ -497,4 +621,4 @@ Using `terraform import`, import Application AutoScaling Policy using the `servi % terraform import aws_appautoscaling_policy.test-policy service-namespace/resource-id/scalable-dimension/policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appautoscaling_scheduled_action.html.markdown b/website/docs/cdktf/typescript/r/appautoscaling_scheduled_action.html.markdown index eae157c5bd56..40e929993181 100644 --- a/website/docs/cdktf/typescript/r/appautoscaling_scheduled_action.html.markdown +++ b/website/docs/cdktf/typescript/r/appautoscaling_scheduled_action.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the scheduled action. * `serviceNamespace` - (Required) Namespace of the AWS service. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_PutScheduledAction.html) Example: ecs * `resourceId` - (Required) Identifier of the resource associated with the scheduled action. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_PutScheduledAction.html) @@ -121,4 +122,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the scheduled action. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appautoscaling_target.html.markdown b/website/docs/cdktf/typescript/r/appautoscaling_target.html.markdown index 15f03455bd59..8f97e1665bc8 100644 --- a/website/docs/cdktf/typescript/r/appautoscaling_target.html.markdown +++ b/website/docs/cdktf/typescript/r/appautoscaling_target.html.markdown @@ -158,6 +158,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `maxCapacity` - (Required) Max capacity of the scalable target. * `minCapacity` - (Required) Min capacity of the scalable target. * `resourceId` - (Required) Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters) @@ -214,4 +215,4 @@ Using `terraform import`, import Application AutoScaling Target using the `servi % terraform import aws_appautoscaling_target.test-target service-namespace/resource-id/scalable-dimension ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_application.html.markdown b/website/docs/cdktf/typescript/r/appconfig_application.html.markdown index b73d13c85f86..d78754414402 100644 --- a/website/docs/cdktf/typescript/r/appconfig_application.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_application.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the application. Must be between 1 and 64 characters in length. * `description` - (Optional) Description of the application. Can be at most 1024 characters. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -82,4 +83,4 @@ Using `terraform import`, import AppConfig Applications using their application % terraform import aws_appconfig_application.example 71rxuzt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_configuration_profile.html.markdown b/website/docs/cdktf/typescript/r/appconfig_configuration_profile.html.markdown index 998e61f5a66a..6753fdb0ada3 100644 --- a/website/docs/cdktf/typescript/r/appconfig_configuration_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_configuration_profile.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required, Forces new resource) Application ID. Must be between 4 and 7 characters in length. * `locationUri` - (Required, Forces new resource) URI to locate the configuration. You can specify the AWS AppConfig hosted configuration store, Systems Manager (SSM) document, an SSM Parameter Store parameter, or an Amazon S3 object. For the hosted configuration store, specify `hosted`. For an SSM document, specify either the document name in the format `ssm-document://` or the ARN. For a parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN. For an Amazon S3 object, specify the URI in the following format: `s3:///`. * `name` - (Required) Name for the configuration profile. Must be between 1 and 128 characters in length. @@ -108,4 +109,4 @@ Using `terraform import`, import AppConfig Configuration Profiles using the conf % terraform import aws_appconfig_configuration_profile.example 71abcde:11xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_deployment.html.markdown b/website/docs/cdktf/typescript/r/appconfig_deployment.html.markdown index a8de25d6e0e8..562d5cf0168b 100644 --- a/website/docs/cdktf/typescript/r/appconfig_deployment.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_deployment.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required, Forces new resource) Application ID. Must be between 4 and 7 characters in length. * `configurationProfileId` - (Required, Forces new resource) Configuration profile ID. Must be between 4 and 7 characters in length. * `configurationVersion` - (Required, Forces new resource) Configuration version to deploy. Can be at most 1024 characters. @@ -107,4 +108,4 @@ Using `terraform import`, import AppConfig Deployments using the application ID, % terraform import aws_appconfig_deployment.example 71abcde/11xxxxx/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_deployment_strategy.html.markdown b/website/docs/cdktf/typescript/r/appconfig_deployment_strategy.html.markdown index ff8471d13049..7d3812b7d7f4 100644 --- a/website/docs/cdktf/typescript/r/appconfig_deployment_strategy.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_deployment_strategy.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deploymentDurationInMinutes` - (Required) Total amount of time for a deployment to last. Minimum value of 0, maximum value of 1440. * `growthFactor` - (Required) Percentage of targets to receive a deployed configuration during each interval. Minimum value of 1.0, maximum value of 100.0. * `name` - (Required, Forces new resource) Name for the deployment strategy. Must be between 1 and 64 characters in length. @@ -96,4 +97,4 @@ Using `terraform import`, import AppConfig Deployment Strategies using their dep % terraform import aws_appconfig_deployment_strategy.example 11xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_environment.html.markdown b/website/docs/cdktf/typescript/r/appconfig_environment.html.markdown index 2b247a12496a..98a488fb0aff 100644 --- a/website/docs/cdktf/typescript/r/appconfig_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_environment.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required, Forces new resource) AppConfig application ID. Must be between 4 and 7 characters in length. * `name` - (Required) Name for the environment. Must be between 1 and 64 characters in length. * `description` - (Optional) Description of the environment. Can be at most 1024 characters. @@ -119,4 +120,4 @@ Using `terraform import`, import AppConfig Environments using the environment ID % terraform import aws_appconfig_environment.example 71abcde:11xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_extension.html.markdown b/website/docs/cdktf/typescript/r/appconfig_extension.html.markdown index b7eb08400765..b398f918fb8f 100644 --- a/website/docs/cdktf/typescript/r/appconfig_extension.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_extension.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the extension. Each extension name in your account must be unique. Extension versions use the same name. * `description` - (Optional) Information about the extension. * `actionPoint` - (Required) The action points defined in the extension. [Detailed below](#action_point). @@ -153,4 +154,4 @@ Using `terraform import`, import AppConfig Extensions using their extension ID. % terraform import aws_appconfig_extension.example 71rxuzt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_extension_association.html.markdown b/website/docs/cdktf/typescript/r/appconfig_extension_association.html.markdown index 14ed6d603250..99755935753b 100644 --- a/website/docs/cdktf/typescript/r/appconfig_extension_association.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_extension_association.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `extensionArn` - (Required) The ARN of the extension defined in the association. * `resourceArn` - (Optional) The ARN of the application, configuration profile, or environment to associate with the extension. * `parameters` - (Optional) The parameter names and values defined for the association. @@ -145,4 +146,4 @@ Using `terraform import`, import AppConfig Extension Associations using their ex % terraform import aws_appconfig_extension_association.example 71rxuzt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appconfig_hosted_configuration_version.html.markdown b/website/docs/cdktf/typescript/r/appconfig_hosted_configuration_version.html.markdown index 5290a4eb7e0c..d2f5ba4d9e50 100644 --- a/website/docs/cdktf/typescript/r/appconfig_hosted_configuration_version.html.markdown +++ b/website/docs/cdktf/typescript/r/appconfig_hosted_configuration_version.html.markdown @@ -115,10 +115,63 @@ class MyConvertedCode extends TerraformStack { ``` +### Multi-variant Feature Flags + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, Fn, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppconfigHostedConfigurationVersion } from "./.gen/providers/aws/appconfig-hosted-configuration-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppconfigHostedConfigurationVersion(this, "example", { + applicationId: Token.asString(awsAppconfigApplicationExample.id), + configurationProfileId: Token.asString( + awsAppconfigConfigurationProfileExample.configurationProfileId + ), + content: Token.asString( + Fn.jsonencode({ + flags: { + loggingenabled: { + name: "loggingEnabled", + }, + }, + values: { + loggingenabled: { + _variants: Fn.concat([ + "${[ for user_id in ${" + + appcfgEnableLoggingUserIds.value + + '} : { # Flat list of userIds\n enabled = true,\n name = "usersWithLoggingEnabled_${user_id}",\n rule = "(or (eq $userId \\"${user_id}\\"))"\n }]}', + [ + { + enabled: false, + name: "Default", + }, + ], + ]), + }, + }, + version: "1", + }) + ), + contentType: "application/json", + description: "Example Multi-variant Feature Flag Configuration Version", + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required, Forces new resource) Application ID. * `configurationProfileId` - (Required, Forces new resource) Configuration profile ID. * `content` - (Required, Forces new resource) Content of the configuration or the configuration data. @@ -165,4 +218,4 @@ Using `terraform import`, import AppConfig Hosted Configuration Versions using t % terraform import aws_appconfig_hosted_configuration_version.example 71abcde/11xxxxx/2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown b/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown index 2862d2a65f0d..94c888130aff 100644 --- a/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown +++ b/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app` - (Required) The name of the application for valid values see https://docs.aws.amazon.com/appfabric/latest/api/API_CreateAppAuthorization.html. * `appBundleArn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. * `authType` - (Required) The authorization type for the app authorization valid values are oauth2 and apiKey. @@ -99,4 +100,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `30m`) * `delete` - (Default `30m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown b/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown index 19e74c480332..5fa1cd1a4721 100644 --- a/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appBundleArn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. * `appAuthorizationArn` - (Required) The Amazon Resource Name (ARN) or Universal Unique Identifier (UUID) of the app authorization to use for the request. * `authRequest` - (Optional) Contains OAuth2 authorization information.This is required if the app authorization for the request is configured with an OAuth2 (oauth2) authorization type. @@ -63,4 +64,4 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `30m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown b/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown index 880062c90537..17fa61022f0a 100644 --- a/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown +++ b/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customerManagedKeyArn` - (Optional) The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) key to use to encrypt the application data. If this is not specified, an AWS owned key is used for encryption. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -55,6 +56,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appfabric_app_bundle.example + identity = { + "arn" = "arn:aws:appfabric:us-east-1:123456789012:appbundle/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_appfabric_app_bundle" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the AppFabric app bundle. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric AppBundle using the `arn`. For example: ```typescript @@ -85,4 +107,4 @@ Using `terraform import`, import AppFabric AppBundle using the `arn`. For exampl % terraform import aws_appfabric_app_bundle.example arn:aws:appfabric:[region]:[account]:appbundle/ee5587b4-5765-4288-a202-xxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown b/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown index eb67760a3750..d37f66a0cf2f 100644 --- a/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown +++ b/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `app` - (Required) Name of the application. Refer to the AWS Documentation for the [list of valid values](https://docs.aws.amazon.com/appfabric/latest/api/API_CreateIngestion.html#appfabric-CreateIngestion-request-app) * `appBundleArn` - (Required) Amazon Resource Name (ARN) of the app bundle to use for the request. @@ -92,4 +93,4 @@ Using `terraform import`, import AppFabric Ingestion using the `app_bundle_ident % terraform import aws_appfabric_ingestion.example arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown b/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown index c4832ebb30b0..8ae6a2e43ed0 100644 --- a/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appBundleArn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. * `ingestionArn` - (Required) The Amazon Resource Name (ARN) of the ingestion to use for the request. * `destinationConfiguration` - (Required) Contains information about the destination of ingested data. @@ -120,4 +121,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appflow_connector_profile.html.markdown b/website/docs/cdktf/typescript/r/appflow_connector_profile.html.markdown index 1177a911d290..1d15e34c33ba 100644 --- a/website/docs/cdktf/typescript/r/appflow_connector_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/appflow_connector_profile.html.markdown @@ -117,6 +117,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name ` (Required) - Name of the connector profile. The name is unique for each `ConnectorProfile` in your AWS account. * `connectionMode` (Required) - Indicates the connection mode and specifies whether it is public or private. Private flows use AWS PrivateLink to route data over AWS infrastructure without exposing it to the public internet. One of: `Public`, `Private`. * `connectorLabel` (Optional) - The label of the connector. The label is unique for each ConnectorRegistration in your AWS account. Only needed if calling for `CustomConnector` connector type. @@ -357,7 +358,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow Connector Profile using the connector profile `arn`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appflow_connector_profile.example + identity = { + name = "example_profile" + } +} + +resource "aws_appflow_connector_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the Appflow connector profile. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow Connector Profile using the connector profile `name`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -373,21 +400,21 @@ class MyConvertedCode extends TerraformStack { super(scope, name); AppflowConnectorProfile.generateConfigForImport( this, - "profile", - "arn:aws:appflow:us-west-2:123456789012:connectorprofile/example-profile" + "example", + "example-profile" ); } } ``` -Using `terraform import`, import AppFlow Connector Profile using the connector profile `arn`. For example: +Using `terraform import`, import AppFlow Connector Profile using the connector profile `name`. For example: ```console -% terraform import aws_appflow_connector_profile.profile arn:aws:appflow:us-west-2:123456789012:connectorprofile/example-profile +% terraform import aws_appflow_connector_profile.example example-profile ``` [1]: https://docs.aws.amazon.com/appflow/1.0/APIReference/Welcome.html [2]: https://docs.aws.amazon.com/appflow/1.0/APIReference/API_CreateConnectorProfile.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appflow_flow.html.markdown b/website/docs/cdktf/typescript/r/appflow_flow.html.markdown index f7f2e591cf2f..7626e8aa578b 100644 --- a/website/docs/cdktf/typescript/r/appflow_flow.html.markdown +++ b/website/docs/cdktf/typescript/r/appflow_flow.html.markdown @@ -174,6 +174,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the flow. * `destinationFlowConfig` - (Required) A [Destination Flow Config](#destination-flow-config) that controls how Amazon AppFlow places data in the destination connector. * `sourceFlowConfig` - (Required) The [Source Flow Config](#source-flow-config) that controls how Amazon AppFlow retrieves data from the source connector. @@ -484,7 +485,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow flows using the `arn`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appflow_flow.example + identity = { + name = "example-flow" + } +} + +resource "aws_appflow_flow" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the AppFlow flow. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow flows using the `name`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -498,20 +525,16 @@ import { AppflowFlow } from "./.gen/providers/aws/appflow-flow"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - AppflowFlow.generateConfigForImport( - this, - "example", - "arn:aws:appflow:us-west-2:123456789012:flow/example-flow" - ); + AppflowFlow.generateConfigForImport(this, "example", "example-flow"); } } ``` -Using `terraform import`, import AppFlow flows using the `arn`. For example: +Using `terraform import`, import AppFlow flows using the `name`. For example: ```console -% terraform import aws_appflow_flow.example arn:aws:appflow:us-west-2:123456789012:flow/example-flow +% terraform import aws_appflow_flow.example example-flow ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appintegrations_data_integration.html.markdown b/website/docs/cdktf/typescript/r/appintegrations_data_integration.html.markdown index b0e081d6b413..904e4976ce40 100644 --- a/website/docs/cdktf/typescript/r/appintegrations_data_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/appintegrations_data_integration.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Data Integration. * `kmsKey` - (Required) Specifies the KMS key Amazon Resource Name (ARN) for the Data Integration. * `name` - (Required) Specifies the name of the Data Integration. @@ -102,4 +103,4 @@ Using `terraform import`, import Amazon AppIntegrations Data Integrations using % terraform import aws_appintegrations_data_integration.example 12345678-1234-1234-1234-123456789123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appintegrations_event_integration.html.markdown b/website/docs/cdktf/typescript/r/appintegrations_event_integration.html.markdown index 01e8fa3a7452..df4599be1da5 100644 --- a/website/docs/cdktf/typescript/r/appintegrations_event_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/appintegrations_event_integration.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Event Integration. * `eventbridgeBus` - (Required) EventBridge bus. * `eventFilter` - (Required) Block that defines the configuration information for the event filter. The Event Filter block is documented below. @@ -96,4 +97,4 @@ Using `terraform import`, import Amazon AppIntegrations Event Integrations using % terraform import aws_appintegrations_event_integration.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/applicationinsights_application.html.markdown b/website/docs/cdktf/typescript/r/applicationinsights_application.html.markdown index ad6de82f1bc7..f0eeb33d006e 100644 --- a/website/docs/cdktf/typescript/r/applicationinsights_application.html.markdown +++ b/website/docs/cdktf/typescript/r/applicationinsights_application.html.markdown @@ -62,6 +62,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoConfigEnabled` - (Optional) Indicates whether Application Insights automatically configures unmonitored resources in the resource group. * `autoCreate` - (Optional) Configures all of the resources in the resource group by applying the recommended configurations. * `cweMonitorEnabled` - (Optional) Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated, failed deployment, and others. @@ -110,4 +111,4 @@ Using `terraform import`, import ApplicationInsights Applications using the `res % terraform import aws_applicationinsights_application.some some-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_gateway_route.html.markdown b/website/docs/cdktf/typescript/r/appmesh_gateway_route.html.markdown index b19408884217..519c42ffe94b 100644 --- a/website/docs/cdktf/typescript/r/appmesh_gateway_route.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_gateway_route.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the gateway route. Must be between 1 and 255 characters in length. * `meshName` - (Required) Name of the service mesh in which to create the gateway route. Must be between 1 and 255 characters in length. * `virtualGatewayName` - (Required) Name of the [virtual gateway](/docs/providers/aws/r/appmesh_virtual_gateway.html) to associate the gateway route with. Must be between 1 and 255 characters in length. @@ -212,4 +213,4 @@ Using `terraform import`, import App Mesh gateway routes using `meshName` and `v [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_mesh.html.markdown b/website/docs/cdktf/typescript/r/appmesh_mesh.html.markdown index f602473c698c..81e607202ab3 100644 --- a/website/docs/cdktf/typescript/r/appmesh_mesh.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_mesh.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the service mesh. Must be between 1 and 255 characters in length. * `spec` - (Optional) Service mesh specification to apply. * `egressFilter`- (Optional) Egress filter rules for the service mesh. @@ -115,4 +116,4 @@ Using `terraform import`, import App Mesh service meshes using the `name`. For e % terraform import aws_appmesh_mesh.simple simpleapp ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_route.html.markdown b/website/docs/cdktf/typescript/r/appmesh_route.html.markdown index 31fa2b2fa3b6..8fbc122a4d46 100644 --- a/website/docs/cdktf/typescript/r/appmesh_route.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_route.html.markdown @@ -193,6 +193,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the route. Must be between 1 and 255 characters in length. * `meshName` - (Required) Name of the service mesh in which to create the route. Must be between 1 and 255 characters in length. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -405,4 +406,4 @@ Using `terraform import`, import App Mesh virtual routes using `meshName` and `v [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_virtual_gateway.html.markdown b/website/docs/cdktf/typescript/r/appmesh_virtual_gateway.html.markdown index ccf30e71c23c..94b2f038e158 100644 --- a/website/docs/cdktf/typescript/r/appmesh_virtual_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_virtual_gateway.html.markdown @@ -102,6 +102,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual gateway. Must be between 1 and 255 characters in length. * `meshName` - (Required) Name of the service mesh in which to create the virtual gateway. Must be between 1 and 255 characters in length. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -330,4 +331,4 @@ Using `terraform import`, import App Mesh virtual gateway using `meshName` toget [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_virtual_node.html.markdown b/website/docs/cdktf/typescript/r/appmesh_virtual_node.html.markdown index 674f76942fc2..cc778d4802a2 100644 --- a/website/docs/cdktf/typescript/r/appmesh_virtual_node.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_virtual_node.html.markdown @@ -232,6 +232,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual node. Must be between 1 and 255 characters in length. * `meshName` - (Required) Name of the service mesh in which to create the virtual node. Must be between 1 and 255 characters in length. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -549,4 +550,4 @@ Using `terraform import`, import App Mesh virtual nodes using `meshName` togethe [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_virtual_router.html.markdown b/website/docs/cdktf/typescript/r/appmesh_virtual_router.html.markdown index 2b913c59901f..a91a706d673f 100644 --- a/website/docs/cdktf/typescript/r/appmesh_virtual_router.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_virtual_router.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual router. Must be between 1 and 255 characters in length. * `meshName` - (Required) Name of the service mesh in which to create the virtual router. Must be between 1 and 255 characters in length. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -124,4 +125,4 @@ Using `terraform import`, import App Mesh virtual routers using `meshName` toget [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appmesh_virtual_service.html.markdown b/website/docs/cdktf/typescript/r/appmesh_virtual_service.html.markdown index 2d115a755172..825ebe193ac1 100644 --- a/website/docs/cdktf/typescript/r/appmesh_virtual_service.html.markdown +++ b/website/docs/cdktf/typescript/r/appmesh_virtual_service.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name to use for the virtual service. Must be between 1 and 255 characters in length. * `meshName` - (Required) Name of the service mesh in which to create the virtual service. Must be between 1 and 255 characters in length. * `meshOwner` - (Optional) AWS account ID of the service mesh's owner. Defaults to the account ID the [AWS provider][1] is currently connected to. @@ -146,4 +147,4 @@ Using `terraform import`, import App Mesh virtual services using `meshName` toge [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_auto_scaling_configuration_version.html.markdown b/website/docs/cdktf/typescript/r/apprunner_auto_scaling_configuration_version.html.markdown index 590bb769f868..7aa1566eb3c3 100644 --- a/website/docs/cdktf/typescript/r/apprunner_auto_scaling_configuration_version.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_auto_scaling_configuration_version.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoScalingConfigurationName` - (Required, Forces new resource) Name of the auto scaling configuration. * `maxConcurrency` - (Optional, Forces new resource) Maximal number of concurrent requests that you want an instance to process. When the number of concurrent requests goes over this limit, App Runner scales up your service. * `maxSize` - (Optional, Forces new resource) Maximal number of instances that App Runner provisions for your service. @@ -62,6 +63,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_auto_scaling_configuration_version.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:autoscalingconfiguration/example-auto-scaling-config/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_auto_scaling_configuration_version" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner auto scaling configuration version. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner AutoScaling Configuration Versions using the `arn`. For example: ```typescript @@ -92,4 +114,4 @@ Using `terraform import`, import App Runner AutoScaling Configuration Versions u % terraform import aws_apprunner_auto_scaling_configuration_version.example "arn:aws:apprunner:us-east-1:1234567890:autoscalingconfiguration/example/1/69bdfe0115224b0db49398b7beb68e0f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_connection.html.markdown b/website/docs/cdktf/typescript/r/apprunner_connection.html.markdown index a8e8fa80fdac..ad85f9bb8186 100644 --- a/website/docs/cdktf/typescript/r/apprunner_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_connection.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionName` - (Required) Name of the connection. * `providerType` - (Required) Source repository provider. Valid values: `GITHUB`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +85,4 @@ Using `terraform import`, import App Runner Connections using the `connectionNam % terraform import aws_apprunner_connection.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_custom_domain_association.html.markdown b/website/docs/cdktf/typescript/r/apprunner_custom_domain_association.html.markdown index 9c98c7565b52..72ac2736247f 100644 --- a/website/docs/cdktf/typescript/r/apprunner_custom_domain_association.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_custom_domain_association.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) Custom domain endpoint to association. Specify a base domain e.g., `example.com` or a subdomain e.g., `subdomain.example.com`. * `enableWwwSubdomain` (Optional) Whether to associate the subdomain with the App Runner service in addition to the base domain. Defaults to `true`. * `serviceArn` - (Required) ARN of the App Runner service. @@ -94,4 +95,4 @@ Using `terraform import`, import App Runner Custom Domain Associations using the % terraform import aws_apprunner_custom_domain_association.example example.com,arn:aws:apprunner:us-east-1:123456789012:service/example-app/8fe1e10304f84fd2b0df550fe98a71fa ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_default_auto_scaling_configuration_version.html.markdown b/website/docs/cdktf/typescript/r/apprunner_default_auto_scaling_configuration_version.html.markdown index 9a3537a256f3..a0a20c6308dc 100644 --- a/website/docs/cdktf/typescript/r/apprunner_default_auto_scaling_configuration_version.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_default_auto_scaling_configuration_version.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoScalingConfigurationArn` - (Required) The ARN of the App Runner auto scaling configuration that you want to set as the default. ## Attribute Reference @@ -95,4 +96,4 @@ Using `terraform import`, import App Runner default auto scaling configurations % terraform import aws_apprunner_default_auto_scaling_configuration_version.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_deployment.html.markdown b/website/docs/cdktf/typescript/r/apprunner_deployment.html.markdown index c30cef4e4fff..f46207eb5378 100644 --- a/website/docs/cdktf/typescript/r/apprunner_deployment.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_deployment.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceArn` - (Required) The Amazon Resource Name (ARN) of the App Runner service to start the deployment for. ## Attribute Reference @@ -48,4 +49,4 @@ This resource exports the following attributes in addition to the arguments abov * `operationId` - The unique ID of the operation associated with deployment. * `status` - The current status of the App Runner service deployment. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_observability_configuration.html.markdown b/website/docs/cdktf/typescript/r/apprunner_observability_configuration.html.markdown index 7fd9923aadfa..0348ab6b33d4 100644 --- a/website/docs/cdktf/typescript/r/apprunner_observability_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_observability_configuration.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `observabilityConfigurationName` - (Required, Forces new resource) Name of the observability configuration. * `traceConfiguration` - (Optional) Configuration of the tracing feature within this observability configuration. If you don't specify it, App Runner doesn't enable tracing. See [Trace Configuration](#trace-configuration) below for more details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -66,6 +67,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_observability_configuration.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/example-observability-config/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_observability_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner observability configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner Observability Configuration using the `arn`. For example: ```typescript @@ -96,4 +118,4 @@ Using `terraform import`, import App Runner Observability Configuration using th % terraform import aws_apprunner_observability_configuration.example arn:aws:apprunner:us-east-1:1234567890:observabilityconfiguration/example/1/d75bc7ea55b71e724fe5c23452fe22a1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_service.html.markdown b/website/docs/cdktf/typescript/r/apprunner_service.html.markdown index 49791dfe823e..bf4b76b1a274 100644 --- a/website/docs/cdktf/typescript/r/apprunner_service.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_service.html.markdown @@ -160,6 +160,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoScalingConfigurationArn` - ARN of an App Runner automatic scaling configuration resource that you want to associate with your service. If not provided, App Runner associates the latest revision of a default auto scaling configuration. * `encryptionConfiguration` - (Forces new resource) An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an AWS managed CMK. See [Encryption Configuration](#encryption-configuration) below for more details. * `healthCheckConfiguration` - Settings of the health check that AWS App Runner performs to monitor the health of your service. See [Health Check Configuration](#health-check-configuration) below for more details. @@ -311,6 +312,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_service.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:service/example-app-service/8fe1e10304f84fd2b0df550fe98a71fa" + } +} + +resource "aws_apprunner_service" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner service. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner Services using the `arn`. For example: ```typescript @@ -341,4 +363,4 @@ Using `terraform import`, import App Runner Services using the `arn`. For exampl % terraform import aws_apprunner_service.example arn:aws:apprunner:us-east-1:1234567890:service/example/0a03292a89764e5882c41d8f991c82fe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_vpc_connector.html.markdown b/website/docs/cdktf/typescript/r/apprunner_vpc_connector.html.markdown index 674b034b260c..5ca25e5d43e3 100644 --- a/website/docs/cdktf/typescript/r/apprunner_vpc_connector.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_vpc_connector.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcConnectorName` - (Required) Name for the VPC connector. * `subnets` (Required) List of IDs of subnets that App Runner should use when it associates your service with a custom Amazon VPC. Specify IDs of subnets of a single Amazon VPC. App Runner determines the Amazon VPC from the subnets you specify. * `securityGroups` - List of IDs of security groups that App Runner should use for access to AWS resources under the specified subnets. If not specified, App Runner uses the default security group of the Amazon VPC. The default security group allows all outbound traffic. @@ -56,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_vpc_connector.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:vpcconnector/example-vpc-connector/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_vpc_connector" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner VPC connector. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner vpc connector using the `arn`. For example: ```typescript @@ -86,4 +108,4 @@ Using `terraform import`, import App Runner vpc connector using the `arn`. For e % terraform import aws_apprunner_vpc_connector.example arn:aws:apprunner:us-east-1:1234567890:vpcconnector/example/1/0a03292a89764e5882c41d8f991c82fe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/apprunner_vpc_ingress_connection.html.markdown b/website/docs/cdktf/typescript/r/apprunner_vpc_ingress_connection.html.markdown index 1f85299251f9..f9d105266e53 100644 --- a/website/docs/cdktf/typescript/r/apprunner_vpc_ingress_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/apprunner_vpc_ingress_connection.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the VPC Ingress Connection resource. It must be unique across all the active VPC Ingress Connections in your AWS account in the AWS Region. * `serviceArn` - (Required) The Amazon Resource Name (ARN) for this App Runner service that is used to create the VPC Ingress Connection resource. * `ingressVpcConfiguration` - (Required) Specifications for the customer’s Amazon VPC and the related AWS PrivateLink VPC endpoint that are used to create the VPC Ingress Connection resource. See [Ingress VPC Configuration](#ingress-vpc-configuration) below for more details. @@ -69,6 +70,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_vpc_ingress_connection.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:vpcingressconnection/example-vpc-ingress-connection/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_vpc_ingress_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner VPC ingress connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner VPC Ingress Connection using the `arn`. For example: ```typescript @@ -99,4 +121,4 @@ Using `terraform import`, import App Runner VPC Ingress Connection using the `ar % terraform import aws_apprunner_vpc_ingress_connection.example "arn:aws:apprunner:us-west-2:837424938642:vpcingressconnection/example/b379f86381d74825832c2e82080342fa" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_directory_config.html.markdown b/website/docs/cdktf/typescript/r/appstream_directory_config.html.markdown index 829578e5191e..45d0182d1a6e 100644 --- a/website/docs/cdktf/typescript/r/appstream_directory_config.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_directory_config.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryName` - (Required) Fully qualified name of the directory. * `organizationalUnitDistinguishedNames` - (Required) Distinguished names of the organizational units for computer accounts. * `serviceAccountCredentials` - (Required) Configuration block for the name of the directory and organizational unit (OU) to use to join the directory config to a Microsoft Active Directory domain. See [`serviceAccountCredentials`](#service_account_credentials) below. @@ -91,4 +92,4 @@ Using `terraform import`, import `aws_appstream_directory_config` using the id. % terraform import aws_appstream_directory_config.example directoryNameExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown b/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown index 8a2c2ed87a9e..25e007573bda 100644 --- a/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description to display. * `disconnectTimeoutInSeconds` - (Optional) Amount of time that a streaming session remains active after users disconnect. * `displayName` - (Optional) Human-readable friendly name for the AppStream fleet. @@ -138,4 +139,4 @@ Using `terraform import`, import `aws_appstream_fleet` using the id. For example % terraform import aws_appstream_fleet.example fleetNameExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_fleet_stack_association.html.markdown b/website/docs/cdktf/typescript/r/appstream_fleet_stack_association.html.markdown index f74cc4a9f289..7a8a1e5b9091 100644 --- a/website/docs/cdktf/typescript/r/appstream_fleet_stack_association.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_fleet_stack_association.html.markdown @@ -55,8 +55,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fleetName` - (Required) Name of the fleet. * `stackName` (Required) Name of the stack. @@ -98,4 +99,4 @@ Using `terraform import`, import AppStream Stack Fleet Association using the `fl % terraform import aws_appstream_fleet_stack_association.example fleetName/stackName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_image_builder.html.markdown b/website/docs/cdktf/typescript/r/appstream_image_builder.html.markdown index 5898c401b4b1..f985ae3e01b6 100644 --- a/website/docs/cdktf/typescript/r/appstream_image_builder.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_image_builder.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessEndpoint` - (Optional) Set of interface VPC endpoint (interface endpoint) objects. Maximum of 4. See below. * `appstreamAgentVersion` - (Optional) Version of the AppStream 2.0 agent to use for this image builder. * `description` - (Optional) Description to display. @@ -129,4 +130,4 @@ Using `terraform import`, import `aws_appstream_image_builder` using the `name`. % terraform import aws_appstream_image_builder.example imageBuilderExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_stack.html.markdown b/website/docs/cdktf/typescript/r/appstream_stack.html.markdown index 3ca74541f4d6..37d2180c5dd5 100644 --- a/website/docs/cdktf/typescript/r/appstream_stack.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_stack.html.markdown @@ -92,6 +92,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessEndpoints` - (Optional) Set of configuration blocks defining the interface VPC endpoints. Users of the stack can connect to AppStream 2.0 only through the specified endpoints. See [`accessEndpoints`](#access_endpoints) below. * `applicationSettings` - (Optional) Settings for application settings persistence. @@ -177,4 +178,4 @@ Using `terraform import`, import `aws_appstream_stack` using the id. For example % terraform import aws_appstream_stack.example stackID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_user.html.markdown b/website/docs/cdktf/typescript/r/appstream_user.html.markdown index 78cfe5a5d518..c1daf83fffa9 100644 --- a/website/docs/cdktf/typescript/r/appstream_user.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_user.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether the user in the user pool is enabled. * `firstName` - (Optional) First name, or given name, of the user. * `lastName` - (Optional) Last name, or surname, of the user. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_appstream_user` using the `userName` and ` % terraform import aws_appstream_user.example UserName/AuthenticationType ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_user_stack_association.html.markdown b/website/docs/cdktf/typescript/r/appstream_user_stack_association.html.markdown index 2424c099223b..fecaf4f42bce 100644 --- a/website/docs/cdktf/typescript/r/appstream_user_stack_association.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_user_stack_association.html.markdown @@ -62,6 +62,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sendEmailNotification` - (Optional) Whether a welcome email is sent to a user after the user is created in the user pool. ## Attribute Reference @@ -102,4 +103,4 @@ Using `terraform import`, import AppStream User Stack Association using the `use % terraform import aws_appstream_user_stack_association.example userName/auhtenticationType/stackName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_api.html.markdown b/website/docs/cdktf/typescript/r/appsync_api.html.markdown new file mode 100644 index 000000000000..7cc61e5d48ae --- /dev/null +++ b/website/docs/cdktf/typescript/r/appsync_api.html.markdown @@ -0,0 +1,283 @@ +--- +subcategory: "AppSync" +layout: "aws" +page_title: "AWS: aws_appsync_api" +description: |- + Manages an AWS AppSync Event API. +--- + + + +# Resource: aws_appsync_api + +Manages an [AWS AppSync Event API](https://docs.aws.amazon.com/appsync/latest/eventapi/event-api-concepts.html#API). Event APIs enable real-time subscriptions and event-driven communication in AppSync applications. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppsyncApi } from "./.gen/providers/aws/appsync-api"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppsyncApi(this, "example", { + eventConfig: [ + { + authProvider: [ + { + authType: "API_KEY", + }, + ], + connectionAuthMode: [ + { + authType: "API_KEY", + }, + ], + defaultPublishAuthMode: [ + { + authType: "API_KEY", + }, + ], + defaultSubscribeAuthMode: [ + { + authType: "API_KEY", + }, + ], + }, + ], + name: "example-event-api", + }); + } +} + +``` + +### With Cognito Authentication + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppsyncApi } from "./.gen/providers/aws/appsync-api"; +import { CognitoUserPool } from "./.gen/providers/aws/cognito-user-pool"; +import { DataAwsRegion } from "./.gen/providers/aws/data-aws-region"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CognitoUserPool(this, "example", { + name: "example-user-pool", + }); + const current = new DataAwsRegion(this, "current", {}); + const awsAppsyncApiExample = new AppsyncApi(this, "example_2", { + eventConfig: [ + { + authProvider: [ + { + authType: "AMAZON_COGNITO_USER_POOLS", + cognitoConfig: [ + { + awsRegion: Token.asString(current.name), + userPoolId: example.id, + }, + ], + }, + ], + connectionAuthMode: [ + { + authType: "AMAZON_COGNITO_USER_POOLS", + }, + ], + defaultPublishAuthMode: [ + { + authType: "AMAZON_COGNITO_USER_POOLS", + }, + ], + defaultSubscribeAuthMode: [ + { + authType: "AMAZON_COGNITO_USER_POOLS", + }, + ], + }, + ], + name: "example-event-api", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsAppsyncApiExample.overrideLogicalId("example"); + } +} + +``` + +### With Lambda Authorizer + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppsyncApi } from "./.gen/providers/aws/appsync-api"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppsyncApi(this, "example", { + eventConfig: [ + { + authProvider: [ + { + authType: "AWS_LAMBDA", + lambdaAuthorizerConfig: [ + { + authorizerResultTtlInSeconds: 300, + authorizerUri: Token.asString(awsLambdaFunctionExample.arn), + }, + ], + }, + ], + connectionAuthMode: [ + { + authType: "AWS_LAMBDA", + }, + ], + defaultPublishAuthMode: [ + { + authType: "AWS_LAMBDA", + }, + ], + defaultSubscribeAuthMode: [ + { + authType: "AWS_LAMBDA", + }, + ], + }, + ], + name: "example-event-api", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `eventConfig` - (Required) Configuration for the Event API. See [Event Config](#event-config) below. +* `name` - (Required) Name of the Event API. + +The following arguments are optional: + +* `ownerContact` - (Optional) Contact information for the owner of the Event API. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Event Config + +The `eventConfig` block supports the following: + +* `authProvider` - (Required) List of authentication providers. See [Auth Providers](#auth-providers) below. +* `connectionAuthMode` - (Required) List of authentication modes for connections. See [Auth Modes](#auth-modes) below. +* `defaultPublishAuthMode` - (Required) List of default authentication modes for publishing. See [Auth Modes](#auth-modes) below. +* `defaultSubscribeAuthMode` - (Required) List of default authentication modes for subscribing. See [Auth Modes](#auth-modes) below. +* `logConfig` - (Optional) Logging configuration. See [Log Config](#log-config) below. + +### Auth Providers + +The `authProvider` block supports the following: + +* `authType` - (Required) Type of authentication provider. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. +* `cognitoConfig` - (Optional) Configuration for Cognito user pool authentication. Required when `authType` is `AMAZON_COGNITO_USER_POOLS`. See [Cognito Config](#cognito-config) below. +* `lambdaAuthorizerConfig` - (Optional) Configuration for Lambda authorization. Required when `authType` is `AWS_LAMBDA`. See [Lambda Authorizer Config](#lambda-authorizer-config) below. +* `openidConnectConfig` - (Optional) Configuration for OpenID Connect. Required when `authType` is `OPENID_CONNECT`. See [OpenID Connect Config](#openid-connect-config) below. + +### Cognito Config + +The `cognitoConfig` block supports the following: + +* `appIdClientRegex` - (Optional) Regular expression for matching the client ID. +* `awsRegion` - (Required) AWS region where the user pool is located. +* `userPoolId` - (Required) ID of the Cognito user pool. + +### Lambda Authorizer Config + +The `lambdaAuthorizerConfig` block supports the following: + +* `authorizerResultTtlInSeconds` - (Optional) TTL in seconds for the authorization result cache. +* `authorizerUri` - (Required) URI of the Lambda function for authorization. +* `identityValidationExpression` - (Optional) Regular expression for identity validation. + +### OpenID Connect Config + +The `openidConnectConfig` block supports the following: + +* `authTtl` - (Optional) TTL in seconds for the authentication token. +* `clientId` - (Optional) Client ID for the OpenID Connect provider. +* `iatTtl` - (Optional) TTL in seconds for the issued at time. +* `issuer` - (Required) Issuer URL for the OpenID Connect provider. + +### Auth Modes + +The `connectionAuthMode`, `defaultPublishAuthMode`, and `defaultSubscribeAuthMode` blocks support the following: + +* `authType` - (Required) Type of authentication. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. + +### Log Config + +The `logConfig` block supports the following: + +* `cloudwatchLogsRoleArn` - (Required) ARN of the IAM role for CloudWatch logs. +* `logLevel` - (Required) Log level. Valid values: `NONE`, `ERROR`, `ALL`, `INFO`, `DEBUG`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `apiId` - ID of the Event API. +* `apiArn` - ARN of the Event API. +* `dns` - DNS configuration for the Event API. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `wafWebAclArn` - ARN of the associated WAF web ACL. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Event API using the `apiId`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppsyncApi } from "./.gen/providers/aws/appsync-api"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + AppsyncApi.generateConfigForImport(this, "example", "example-api-id"); + } +} + +``` + +Using `terraform import`, import AppSync Event API using the `apiId`. For example: + +```console +% terraform import aws_appsync_api.example example-api-id +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_api_cache.html.markdown b/website/docs/cdktf/typescript/r/appsync_api_cache.html.markdown index a42345c8c4ff..eb0f1661ea60 100644 --- a/website/docs/cdktf/typescript/r/appsync_api_cache.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_api_cache.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) GraphQL API ID. * `apiCachingBehavior` - (Required) Caching behavior. Valid values are `FULL_REQUEST_CACHING` and `PER_RESOLVER_CACHING`. * `type` - (Required) Cache instance type. Valid values are `SMALL`, `MEDIUM`, `LARGE`, `XLARGE`, `LARGE_2X`, `LARGE_4X`, `LARGE_8X`, `LARGE_12X`, `T2_SMALL`, `T2_MEDIUM`, `R4_LARGE`, `R4_XLARGE`, `R4_2XLARGE`, `R4_4XLARGE`, `R4_8XLARGE`. @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_appsync_api_cache` using the AppSync API I % terraform import aws_appsync_api_cache.example xxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_api_key.html.markdown b/website/docs/cdktf/typescript/r/appsync_api_key.html.markdown index d5e1747364ae..34f2ce74b146 100644 --- a/website/docs/cdktf/typescript/r/appsync_api_key.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_api_key.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) ID of the associated AppSync API * `description` - (Optional) API key description. Defaults to "Managed by Terraform". * `expires` - (Optional) RFC3339 string representation of the expiry date. Rounded down to nearest hour. By default, it is 7 days from the date of creation. @@ -85,4 +86,4 @@ Using `terraform import`, import `aws_appsync_api_key` using the AppSync API ID % terraform import aws_appsync_api_key.example xxxxx:yyyyy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_channel_namespace.html.markdown b/website/docs/cdktf/typescript/r/appsync_channel_namespace.html.markdown new file mode 100644 index 000000000000..d316b02aaa99 --- /dev/null +++ b/website/docs/cdktf/typescript/r/appsync_channel_namespace.html.markdown @@ -0,0 +1,128 @@ +--- +subcategory: "AppSync" +layout: "aws" +page_title: "AWS: aws_appsync_channel_namespace" +description: |- + Manages an AWS AppSync Channel Namespace. +--- + + + +# Resource: aws_appsync_channel_namespace + +Manages an [AWS AppSync Channel Namespace](https://docs.aws.amazon.com/appsync/latest/eventapi/event-api-concepts.html#namespace). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppsyncChannelNamespace } from "./.gen/providers/aws/appsync-channel-namespace"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppsyncChannelNamespace(this, "example", { + apiId: Token.asString(awsAppsyncApiExample.apiId), + name: "example-channel-namespace", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `apiId` - (Required) Event API ID. +* `name` - (Required) Name of the channel namespace. + +The following arguments are optional: + +* `codeHandlers` - (Optional) Event handler functions that run custom business logic to process published events and subscribe requests. +* `handlerConfigs` - (Optional) Configuration for the `onPublish` and `onSubscribe` handlers. See [Handler Configs](#handler-configs) below. +* `publishAuthMode` - (Optional) Authorization modes to use for publishing messages on the channel namespace. This configuration overrides the default API authorization configuration. See [Auth Modes](#auth-modes) below. +* `subscribeAuthMode` - (Optional) Authorization modes to use for subscribing to messages on the channel namespace. This configuration overrides the default API authorization configuration. See [Auth Modes](#auth-modes) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Auth Modes + +The `publishAuthMode`, and `subscribeAuthMode` blocks support the following: + +* `authType` - (Required) Type of authentication. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. + +### Handler Configs + +The `handlerConfigs` block support the following: + +* `onPublish` - (Optional) Handler configuration. See [Handler Config](#handler-config) below. +* `onSubscribe` - (Optional) Handler configuration. See [Handler Config](#handler-config) below. + +### Handler Config + +The `onPublish` and `onSubscribe` blocks support the following: + +* `behavior` - (Required) Behavior for the handler. Valid values: `CODE`, `DIRECT`. +* `integration` - (Required) Integration data source configuration for the handler. See [Integration](#integration) below. + +### Integration + +The `integration` block support the following: + +* `dataSourceName` - (Required) Unique name of the data source that has been configured on the API. +* `lambdaConfig` - (Optional) Configuration for a Lambda data source. See [Lambda Config](#lambda-config) below. + +### Lambad Config + +The `lambdaConfig` block support the following: + +* `invokeType` - (Optional) Invocation type for a Lambda data source. Valid values: `REQUEST_RESPONSE`, `EVENT`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `channelNamespaceArn` - ARN of the channel namespace. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Channel Namespace using the `apiId` and `name` separated by a comma (`,`). For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppsyncChannelNamespace } from "./.gen/providers/aws/appsync-channel-namespace"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + AppsyncChannelNamespace.generateConfigForImport( + this, + "example", + "example-api-id,example-channel-namespace" + ); + } +} + +``` + +Using `terraform import`, import AppSync Channel Namespace using the `apiId` and `name` separated by a comma (`,`). For example: + +```console +% terraform import aws_appsync_channel_namespace.example example-api-id,example-channel-namespace +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown b/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown index 9a85754d1a68..92b396c67144 100644 --- a/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown @@ -115,6 +115,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API ID for the GraphQL API for the data source. * `name` - (Required) User-supplied name for the data source. * `type` - (Required) Type of the Data Source. Valid values: `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `HTTP`, `NONE`, `RELATIONAL_DATABASE`, `AMAZON_EVENTBRIDGE`, `AMAZON_OPENSEARCH_SERVICE`. @@ -248,4 +249,4 @@ Using `terraform import`, import `aws_appsync_datasource` using the `apiId`, a h % terraform import aws_appsync_datasource.example abcdef123456-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_domain_name.html.markdown b/website/docs/cdktf/typescript/r/appsync_domain_name.html.markdown index 8a7559da667e..56f6e309bd13 100644 --- a/website/docs/cdktf/typescript/r/appsync_domain_name.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_domain_name.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateArn` - (Required) ARN of the certificate. This can be an Certificate Manager (ACM) certificate or an Identity and Access Management (IAM) server certificate. The certifiacte must reside in us-east-1. * `description` - (Optional) A description of the Domain Name. * `domainName` - (Required) Domain name. @@ -79,4 +80,4 @@ Using `terraform import`, import `aws_appsync_domain_name` using the AppSync dom % terraform import aws_appsync_domain_name.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_domain_name_api_association.html.markdown b/website/docs/cdktf/typescript/r/appsync_domain_name_api_association.html.markdown index 147722d542f6..618d2b1993ce 100644 --- a/website/docs/cdktf/typescript/r/appsync_domain_name_api_association.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_domain_name_api_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API ID. * `domainName` - (Required) Appsync domain name. @@ -80,4 +81,4 @@ Using `terraform import`, import `aws_appsync_domain_name_api_association` using % terraform import aws_appsync_domain_name_api_association.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_function.html.markdown b/website/docs/cdktf/typescript/r/appsync_function.html.markdown index 5365ca42c2bb..21ad64988bf2 100644 --- a/website/docs/cdktf/typescript/r/appsync_function.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_function.html.markdown @@ -97,6 +97,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) ID of the associated AppSync API. * `code` - (Optional) The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. * `dataSource` - (Required) Function data source name. @@ -166,4 +167,4 @@ Using `terraform import`, import `aws_appsync_function` using the AppSync API ID % terraform import aws_appsync_function.example xxxxx-yyyyy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown b/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown index 415571813fb5..1c9f330ef150 100644 --- a/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown @@ -78,7 +78,7 @@ class MyConvertedCode extends TerraformStack { authenticationType: "AMAZON_COGNITO_USER_POOLS", name: "example", userPoolConfig: { - awsRegion: Token.asString(current.name), + awsRegion: Token.asString(current.region), defaultAction: "DENY", userPoolId: Token.asString(awsCognitoUserPoolExample.id), }, @@ -368,13 +368,15 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationType` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` * `name` - (Required) User-supplied name for the GraphQL API. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additionalAuthenticationProvider` - (Optional) One or more additional authentication providers for the GraphQL API. See [`additionalAuthenticationProvider` Block](#additional_authentication_provider-block) for details. * `apiType` - (Optional) API type. Valid values are `GRAPHQL` or `MERGED`. A `MERGED` type requires `mergedApiExecutionRoleArn` to be set. * `enhancedMetricsConfig` - (Optional) Enables and controls the enhanced metrics feature. See [`enhancedMetricsConfig` Block](#enhanced_metrics_config-block) for details. @@ -480,4 +482,4 @@ Using `terraform import`, import AppSync GraphQL API using the GraphQL API ID. F % terraform import aws_appsync_graphql_api.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_resolver.html.markdown b/website/docs/cdktf/typescript/r/appsync_resolver.html.markdown index 6cb4758ed1a6..315cc508c382 100644 --- a/website/docs/cdktf/typescript/r/appsync_resolver.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_resolver.html.markdown @@ -113,6 +113,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) API ID for the GraphQL API. * `code` - (Optional) The function code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS. * `type` - (Required) Type name from the schema defined in the GraphQL API. @@ -189,4 +190,4 @@ Using `terraform import`, import `aws_appsync_resolver` using the `apiId`, a hyp % terraform import aws_appsync_resolver.example abcdef123456-exampleType-exampleField ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_source_api_association.html.markdown b/website/docs/cdktf/typescript/r/appsync_source_api_association.html.markdown index 4163fb8d757b..7f93065888c8 100644 --- a/website/docs/cdktf/typescript/r/appsync_source_api_association.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_source_api_association.html.markdown @@ -3,13 +3,13 @@ subcategory: "AppSync" layout: "aws" page_title: "AWS: aws_appsync_source_api_association" description: |- - Terraform resource for managing an AWS AppSync Source Api Association. + Terraform resource for managing an AWS AppSync Source API Association. --- # Resource: aws_appsync_source_api_association -Terraform resource for managing an AWS AppSync Source Api Association. +Terraform resource for managing an AWS AppSync Source API Association. ## Example Usage @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the source API being merged. * `mergedApiArn` - (Optional) ARN of the merged API. One of `mergedApiArn` or `mergedApiId` must be specified. * `mergedApiId` - (Optional) ID of the merged API. One of `mergedApiArn` or `mergedApiId` must be specified. @@ -57,9 +58,9 @@ The `sourceApiAssociationConfig` configuration block supports the following argu This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the Source Api Association. -* `associationId` - ID of the Source Api Association. -* `id` - Combined ID of the Source Api Association and Merge Api. +* `arn` - ARN of the Source API Association. +* `associationId` - ID of the Source API Association. +* `id` - Combined ID of the Source API Association and Merge API. ## Timeouts @@ -71,7 +72,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Source Api Association using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Source API Association using the `associationId` and `mergedApiId` separated by `,`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -95,10 +96,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import AppSync Source Api Association using the `gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31`. For example: +Using `terraform import`, import AppSync Source API Association using the `associationId` and `mergedApiId` separated by `,`. For example: ```console % terraform import aws_appsync_source_api_association.example gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_type.html.markdown b/website/docs/cdktf/typescript/r/appsync_type.html.markdown index 2e300a20ea47..b5c66064e5d6 100644 --- a/website/docs/cdktf/typescript/r/appsync_type.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_type.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `apiId` - (Required) GraphQL API ID. * `format` - (Required) The type format: `SDL` or `JSON`. * `definition` - (Required) The type definition. @@ -89,4 +90,4 @@ Using `terraform import`, import Appsync Types using the `id`. For example: % terraform import aws_appsync_type.example api-id:format:name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/athena_capacity_reservation.html.markdown b/website/docs/cdktf/typescript/r/athena_capacity_reservation.html.markdown index b68548cb2d0a..5684905eb146 100644 --- a/website/docs/cdktf/typescript/r/athena_capacity_reservation.html.markdown +++ b/website/docs/cdktf/typescript/r/athena_capacity_reservation.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -98,4 +99,4 @@ Using `terraform import`, import Athena Capacity Reservation using the `name`. F % terraform import aws_athena_capacity_reservation.example example-reservation ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/athena_data_catalog.html.markdown b/website/docs/cdktf/typescript/r/athena_data_catalog.html.markdown index 9551ebe89e90..138b1559234e 100644 --- a/website/docs/cdktf/typescript/r/athena_data_catalog.html.markdown +++ b/website/docs/cdktf/typescript/r/athena_data_catalog.html.markdown @@ -136,6 +136,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `name` - (Required) Name of the data catalog. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters. - `type` - (Required) Type of data catalog: `LAMBDA` for a federated catalog, `GLUE` for AWS Glue Catalog, or `HIVE` for an external hive metastore. - `parameters` - (Required) Key value pairs that specifies the Lambda function or functions to use for the data catalog. The mapping used depends on the catalog type. @@ -182,4 +183,4 @@ Using `terraform import`, import data catalogs using their `name`. For example: % terraform import aws_athena_data_catalog.example example-data-catalog ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/athena_database.html.markdown b/website/docs/cdktf/typescript/r/athena_database.html.markdown index e2f40bd65962..f6fca9701af7 100644 --- a/website/docs/cdktf/typescript/r/athena_database.html.markdown +++ b/website/docs/cdktf/typescript/r/athena_database.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of S3 bucket to save the results of the query execution. * `name` - (Required) Name of the database to create. * `aclConfiguration` - (Optional) That an Amazon S3 canned ACL should be set to control ownership of stored query results. See [ACL Configuration](#acl-configuration) below. @@ -53,6 +54,7 @@ This resource supports the following arguments: * `expectedBucketOwner` - (Optional) AWS account ID that you expect to be the owner of the Amazon S3 bucket. * `forceDestroy` - (Optional, Default: false) Boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable. * `properties` - (Optional) Key-value map of custom metadata properties for the database definition. +* `workgroup` - (Optional) Name of the workgroup. ### ACL Configuration @@ -125,4 +127,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/athena_named_query.html.markdown b/website/docs/cdktf/typescript/r/athena_named_query.html.markdown index 166083969fcd..db39624b9322 100644 --- a/website/docs/cdktf/typescript/r/athena_named_query.html.markdown +++ b/website/docs/cdktf/typescript/r/athena_named_query.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Plain language name for the query. Maximum length of 128. * `workgroup` - (Optional) Workgroup to which the query belongs. Defaults to `primary` * `database` - (Required) Database to which the query belongs. @@ -111,4 +112,4 @@ Using `terraform import`, import Athena Named Query using the query ID. For exam % terraform import aws_athena_named_query.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/athena_prepared_statement.html.markdown b/website/docs/cdktf/typescript/r/athena_prepared_statement.html.markdown index a036b441d025..60da214743c9 100644 --- a/website/docs/cdktf/typescript/r/athena_prepared_statement.html.markdown +++ b/website/docs/cdktf/typescript/r/athena_prepared_statement.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the prepared statement. Maximum length of 256. * `workgroup` - (Required) The name of the workgroup to which the prepared statement belongs. * `queryStatement` - (Required) The query string for the prepared statement. @@ -116,4 +117,4 @@ Using `terraform import`, import Athena Prepared Statement using the `WORKGROUP- % terraform import aws_athena_prepared_statement.example 12345abcde/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/athena_workgroup.html.markdown b/website/docs/cdktf/typescript/r/athena_workgroup.html.markdown index 674ed5ebfd7a..ff18399bab0e 100644 --- a/website/docs/cdktf/typescript/r/athena_workgroup.html.markdown +++ b/website/docs/cdktf/typescript/r/athena_workgroup.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the workgroup. * `configuration` - (Optional) Configuration block with various settings for the workgroup. Documented below. * `description` - (Optional) Description of the workgroup. @@ -61,19 +62,25 @@ This resource supports the following arguments: * `bytesScannedCutoffPerQuery` - (Optional) Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least `10485760`. * `enforceWorkgroupConfiguration` - (Optional) Boolean whether the settings for the workgroup override client-side settings. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). Defaults to `true`. * `engineVersion` - (Optional) Configuration block for the Athena Engine Versioning. For more information, see [Athena Engine Versioning](https://docs.aws.amazon.com/athena/latest/ug/engine-versions.html). See [Engine Version](#engine-version) below. -* `executionRole` - (Optional) Role used in a notebook session for accessing the user's resources. +* `executionRole` - (Optional) Role used to access user resources in notebook sessions and IAM Identity Center enabled workgroups. The property is required for IAM Identity Center enabled workgroups. +* `identityCenterConfiguration` - (Optional) Configuration block to set up an IAM Identity Center enabled workgroup. See [Identity Center Configuration](#identity-center-configuration) below. * `publishCloudwatchMetricsEnabled` - (Optional) Boolean whether Amazon CloudWatch metrics are enabled for the workgroup. Defaults to `true`. -* `resultConfiguration` - (Optional) Configuration block with result settings. See [Result Configuration](#result-configuration) below. * `requesterPaysEnabled` - (Optional) If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see [Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) in the Amazon Simple Storage Service Developer Guide. +* `resultConfiguration` - (Optional) Configuration block with result settings. See [Result Configuration](#result-configuration) below. #### Engine Version * `selectedEngineVersion` - (Optional) Requested engine version. Defaults to `AUTO`. +#### Identity Center Configuration + +* `enableIdentityCenter` - (Optional) Specifies whether the workgroup is IAM Identity Center supported. +* `identityCenterInstanceArn` - (Optional) The IAM Identity Center instance ARN that the workgroup associates to. + #### Result Configuration -* `encryptionConfiguration` - (Optional) Configuration block with encryption settings. See [Encryption Configuration](#encryption-configuration) below. * `aclConfiguration` - (Optional) That an Amazon S3 canned ACL should be set to control ownership of stored query results. See [ACL Configuration](#acl-configuration) below. +* `encryptionConfiguration` - (Optional) Configuration block with encryption settings. See [Encryption Configuration](#encryption-configuration) below. * `expectedBucketOwner` - (Optional) AWS account ID that you expect to be the owner of the Amazon S3 bucket. * `outputLocation` - (Optional) Location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/`. For more information, see [Queries and Query Result Files](https://docs.aws.amazon.com/athena/latest/ug/querying.html). @@ -125,4 +132,4 @@ Using `terraform import`, import Athena Workgroups using their name. For example % terraform import aws_athena_workgroup.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_account_registration.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_account_registration.html.markdown index be17773e071f..d8be0ad0608a 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_account_registration.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_account_registration.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `delegatedAdminAccount` - (Optional) Identifier for the delegated administrator account. * `deregisterOnDestroy` - (Optional) Flag to deregister AuditManager in the account upon destruction. Defaults to `false` (ie. AuditManager will remain active in the account, even if this resource is removed). * `kmsKey` - (Optional) KMS key identifier. @@ -103,4 +104,4 @@ Using `terraform import`, import Audit Manager Account Registration resources us % terraform import aws_auditmanager_account_registration.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_assessment.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_assessment.html.markdown index 9db807be01f0..e8182ea940dc 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_assessment.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_assessment.html.markdown @@ -75,6 +75,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the assessment. * `tags` - (Optional) A map of tags to assign to the assessment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -142,4 +143,4 @@ Using `terraform import`, import Audit Manager Assessments using the assessment % terraform import aws_auditmanager_assessment.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_assessment_delegation.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_assessment_delegation.html.markdown index 85091d601247..b9ccf57badf4 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_assessment_delegation.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_assessment_delegation.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `comment` - (Optional) Comment describing the delegation request. ## Attribute Reference @@ -92,4 +93,4 @@ Using `terraform import`, import Audit Manager Assessment Delegation using the ` % terraform import aws_auditmanager_assessment_delegation.example abcdef-123456,arn:aws:iam::123456789012:role/example,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_assessment_report.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_assessment_report.html.markdown index aaf8e05ab2cd..93b43bab681a 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_assessment_report.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_assessment_report.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the assessment report. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import Audit Manager Assessment Reports using the asse % terraform import aws_auditmanager_assessment_report.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_control.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_control.html.markdown index 20e69ca95898..fe0fdd21f18e 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_control.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_control.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actionPlanInstructions` - (Optional) Recommended actions to carry out if the control isn't fulfilled. * `actionPlanTitle` - (Optional) Title of the action plan for remediating the control. * `description` - (Optional) Description of the control. @@ -68,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceDescription` - (Optional) Description of the source. * `sourceFrequency` - (Optional) Frequency of evidence collection. Valid values are `DAILY`, `WEEKLY`, or `MONTHLY`. * `sourceKeyword` - (Optional) The keyword to search for in CloudTrail logs, Config rules, Security Hub checks, and Amazon Web Services API names. See [`sourceKeyword`](#source_keyword) below. @@ -77,8 +79,8 @@ The following arguments are optional: The following arguments are required: -* `keywordInputType` - (Required) Input method for the keyword. Valid values are `INPUT_TEXT`, `SELECT_FROM_LIST`, or `UPLOAD_FILE`. -* `keywordValue` - (Required) The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call. See the [Audit Manager supported control data sources documentation](https://docs.aws.amazon.com/audit-manager/latest/userguide/control-data-sources.html) for more information. +* `keyword_input_type` - (Required) Input method for the keyword. Valid values are `INPUT_TEXT`, `SELECT_FROM_LIST`, or `UPLOAD_FILE`. +* `keyword_value` - (Required) The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call. See the [Audit Manager supported control data sources documentation](https://docs.aws.amazon.com/audit-manager/latest/userguide/control-data-sources.html) for more information. ## Attribute Reference @@ -117,4 +119,4 @@ Using `terraform import`, import an Audit Manager Control using the `id`. For ex % terraform import aws_auditmanager_control.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_framework.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_framework.html.markdown index 0b9a98702208..264c2a94c93d 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_framework.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_framework.html.markdown @@ -58,6 +58,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `complianceType` - (Optional) Compliance type that the new custom framework supports, such as `CIS` or `HIPAA`. * `description` - (Optional) Description of the framework. * `tags` - (Optional) A map of tags to assign to the framework. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -116,4 +117,4 @@ Using `terraform import`, import Audit Manager Framework using the framework `id % terraform import aws_auditmanager_framework.example abc123-de45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_framework_share.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_framework_share.html.markdown index 2584a950e5d0..3910cc1313fb 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_framework_share.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_framework_share.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `comment` - (Optional) Comment from the sender about the share request. ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import Audit Manager Framework Share using the `id`. F % terraform import aws_auditmanager_framework_share.example abcdef-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/auditmanager_organization_admin_account_registration.html.markdown b/website/docs/cdktf/typescript/r/auditmanager_organization_admin_account_registration.html.markdown index c1da5cbc0183..58ec75740f22 100644 --- a/website/docs/cdktf/typescript/r/auditmanager_organization_admin_account_registration.html.markdown +++ b/website/docs/cdktf/typescript/r/auditmanager_organization_admin_account_registration.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `adminAccountId` - (Required) Identifier for the organization administrator account. ## Attribute Reference @@ -81,4 +82,4 @@ Using `terraform import`, import Audit Manager Organization Admin Account Regist % terraform import aws_auditmanager_organization_admin_account_registration.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_attachment.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_attachment.html.markdown index 637fea1c6000..bd2b4e8ad329 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_attachment.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoscalingGroupName` - (Required) Name of ASG to associate with the ELB. * `elb` - (Optional) Name of the ELB. * `lbTargetGroupArn` - (Optional) ARN of a load balancer target group. @@ -70,4 +71,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown index 2b3b86edbde4..2cb15a2c89f9 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown @@ -550,6 +550,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `name` - (Optional) Name of the Auto Scaling Group. By default generated by Terraform. Conflicts with `namePrefix`. - `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -639,8 +640,8 @@ This resource supports the following arguments: This configuration block supports the following: -- `capacity_reservation_ids` - (Optional) List of On-Demand Capacity Reservation Ids. Conflicts with `capacity_reservation_resource_group_arns`. -- `capacity_reservation_resource_group_arns` - (Optional) List of On-Demand Capacity Reservation Resource Group Arns. Conflicts with `capacity_reservation_ids`. +- `capacityReservationIds` - (Optional) List of On-Demand Capacity Reservation Ids. Conflicts with `capacityReservationResourceGroupArns`. +- `capacityReservationResourceGroupArns` - (Optional) List of On-Demand Capacity Reservation Resource Group Arns. Conflicts with `capacityReservationIds`. ### launch_template @@ -834,7 +835,7 @@ This configuration block supports the following: - `instanceWarmup` - (Optional) Number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. - `maxHealthyPercentage` - (Optional) Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between `100` and `200`, defaults to `100`. - `minHealthyPercentage` - (Optional) Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. - - `skipMatching` - (Optional) Replace instances that already have your desired configuration. Defaults to `false`. + - `skipMatching` - (Optional) Skip replacing instances that already have your desired configuration. Defaults to `false`. - `autoRollback` - (Optional) Automatically rollback if instance refresh fails. Defaults to `false`. This option may only be set to `true` when specifying a `launchTemplate` or `mixedInstancesPolicy`. - `alarmSpecification` - (Optional) Alarm Specification for Instance Refresh. - `alarms` - (Required) List of Cloudwatch alarms. If any of these alarms goes into ALARM state, Instance Refresh is failed. @@ -1010,4 +1011,4 @@ Using `terraform import`, import Auto Scaling Groups using the `name`. For examp % terraform import aws_autoscaling_group.web web-asg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_group_tag.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_group_tag.html.markdown index 05edda8902ac..2f845ed8e229 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_group_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_group_tag.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoscalingGroupName` - (Required) Name of the Autoscaling Group to apply the tag to. * `tag` - (Required) Tag to create. The `tag` block is documented below. @@ -127,4 +128,4 @@ Using `terraform import`, import `aws_autoscaling_group_tag` using the ASG name % terraform import aws_autoscaling_group_tag.example asg-example,k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_lifecycle_hook.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_lifecycle_hook.html.markdown index 4f7987ddb510..b94880d00c0b 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_lifecycle_hook.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_lifecycle_hook.html.markdown @@ -87,13 +87,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the lifecycle hook. * `autoscalingGroupName` - (Required) Name of the Auto Scaling group to which you want to assign the lifecycle hook * `defaultResult` - (Optional) Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The value for this parameter can be either CONTINUE or ABANDON. The default value for this parameter is ABANDON. * `heartbeatTimeout` - (Optional) Defines the amount of time, in seconds, that can elapse before the lifecycle hook times out. When the lifecycle hook times out, Auto Scaling performs the action defined in the DefaultResult parameter * `lifecycleTransition` - (Required) Instance state to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see [describe-lifecycle-hook-types](https://docs.aws.amazon.com/cli/latest/reference/autoscaling/describe-lifecycle-hook-types.html#examples) * `notificationMetadata` - (Optional) Contains additional information that you want to include any time Auto Scaling sends a message to the notification target. -* `notificationTargetArn` - (Optional) ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue or an SNS topic. +* `notificationTargetArn` - (Optional) ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue, an SNS topic, or a Lambda function. * `roleArn` - (Optional) ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. ## Attribute Reference @@ -132,4 +133,4 @@ Using `terraform import`, import AutoScaling Lifecycle Hooks using the role auto % terraform import aws_autoscaling_lifecycle_hook.test-lifecycle-hook asg-name/lifecycle-hook-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_notification.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_notification.html.markdown index 08f24557a096..727eed6c4f54 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_notification.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_notification.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupNames` - (Required) List of AutoScaling Group Names * `notifications` - (Required) List of Notification Types that trigger notifications. Acceptable values are documented [in the AWS documentation here][1] @@ -86,4 +87,4 @@ This resource exports the following attributes in addition to the arguments abov [1]: https://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_NotificationConfiguration.html [2]: https://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_DescribeNotificationConfigurations.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown index 823b74002ca5..2393eafbd1d2 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown @@ -259,6 +259,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy. * `autoscalingGroupName` - (Required) Name of the autoscaling group. * `adjustmentType` - (Optional) Whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are `ChangeInCapacity`, `ExactCapacity`, and `PercentChangeInCapacity`. @@ -571,4 +572,4 @@ Using `terraform import`, import AutoScaling scaling policy using the role autos % terraform import aws_autoscaling_policy.test-policy asg-name/policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_schedule.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_schedule.html.markdown index de291339df18..7ee9f4445a50 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_schedule.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_schedule.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `desiredCapacity` - (Optional) The initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain. Set to `-1` if you don't want to change the desired capacity at the scheduled time. Defaults to `0`. * `endTime` - (Optional) The date and time for the recurring schedule to end, in UTC with the format `"YYYY-MM-DDThh:mm:ssZ"` (e.g. `"2021-06-01T00:00:00Z"`). * `maxSize` - (Optional) The maximum size of the Auto Scaling group. Set to `-1` if you don't want to change the maximum size at the scheduled time. Defaults to `0`. @@ -114,4 +115,4 @@ Using `terraform import`, import AutoScaling ScheduledAction using the `auto-sca % terraform import aws_autoscaling_schedule.resource-name auto-scaling-group-name/scheduled-action-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_traffic_source_attachment.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_traffic_source_attachment.html.markdown index ce00729df0b2..0aea5b3349f9 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_traffic_source_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_traffic_source_attachment.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `autoscalingGroupName` - (Required) The name of the Auto Scaling group. - `trafficSource` - (Required) The unique identifiers of a traffic sources. @@ -62,4 +63,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscalingplans_scaling_plan.html.markdown b/website/docs/cdktf/typescript/r/autoscalingplans_scaling_plan.html.markdown index 4e5440762e05..080cce26f998 100644 --- a/website/docs/cdktf/typescript/r/autoscalingplans_scaling_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscalingplans_scaling_plan.html.markdown @@ -172,6 +172,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the scaling plan. Names cannot contain vertical bars, colons, or forward slashes. * `applicationSource` - (Required) CloudFormation stack or set of tags. You can create one scaling plan per application source. * `scalingInstruction` - (Required) Scaling instructions. More details can be found in the [AWS Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/plans/APIReference/API_ScalingInstruction.html). @@ -287,4 +288,4 @@ Using `terraform import`, import Auto Scaling scaling plans using the `name`. Fo % terraform import aws_autoscalingplans_scaling_plan.example MyScale1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_framework.html.markdown b/website/docs/cdktf/typescript/r/backup_framework.html.markdown index b52e724bc3a9..96722ed7f18c 100644 --- a/website/docs/cdktf/typescript/r/backup_framework.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_framework.html.markdown @@ -116,6 +116,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `control` - (Required) One or more control blocks that make up the framework. Each control in the list has a name, input parameters, and scope. Detailed below. * `description` - (Optional) The description of the framework with a maximum of 1,024 characters * `name` - (Required) The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. @@ -191,4 +192,4 @@ Using `terraform import`, import Backup Framework using the `id` which correspon % terraform import aws_backup_framework.test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_logically_air_gapped_vault.html.markdown b/website/docs/cdktf/typescript/r/backup_logically_air_gapped_vault.html.markdown index 5fee36e3c1b6..bba65a45f6ce 100644 --- a/website/docs/cdktf/typescript/r/backup_logically_air_gapped_vault.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_logically_air_gapped_vault.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Logically Air Gapped Backup Vault to create. * `maxRetentionDays` - (Required) Maximum retention period that the Logically Air Gapped Backup Vault retains recovery points. * `minRetentionDays` - (Required) Minimum retention period that the Logically Air Gapped Backup Vault retains recovery points. @@ -94,4 +95,4 @@ Using `terraform import`, import Backup Logically Air Gapped Vault using the `id % terraform import aws_backup_logically_air_gapped_vault.example lag-example-vault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_plan.html.markdown b/website/docs/cdktf/typescript/r/backup_plan.html.markdown index 0b9f44efd98b..33d456346e61 100644 --- a/website/docs/cdktf/typescript/r/backup_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_plan.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The display name of a backup plan. * `rule` - (Required) A rule object that specifies a scheduled task that is used to back up a selection of resources. * `advancedBackupSetting` - (Optional) An object that specifies backup options for each resource type. @@ -135,4 +136,4 @@ Using `terraform import`, import Backup Plan using the `id`. For example: % terraform import aws_backup_plan.test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_region_settings.html.markdown b/website/docs/cdktf/typescript/r/backup_region_settings.html.markdown index 15555d856c0e..cb300ae47b9e 100644 --- a/website/docs/cdktf/typescript/r/backup_region_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_region_settings.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceTypeOptInPreference` - (Required) A map of service names to their opt-in preferences for the Region. See [AWS Documentation on which services support backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/backup-feature-availability.html). * `resourceTypeManagementPreference` - (Optional) A map of service names to their full management preferences for the Region. For more information, see the AWS Documentation on [what full management is](https://docs.aws.amazon.com/aws-backup/latest/devguide/whatisbackup.html#full-management) and [which services support full management](https://docs.aws.amazon.com/aws-backup/latest/devguide/backup-feature-availability.html#features-by-resource). @@ -99,4 +100,4 @@ Using `terraform import`, import Backup Region Settings using the `region`. For % terraform import aws_backup_region_settings.test us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_report_plan.html.markdown b/website/docs/cdktf/typescript/r/backup_report_plan.html.markdown index 4c028c62f2ce..5d4e85c0de83 100644 --- a/website/docs/cdktf/typescript/r/backup_report_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_report_plan.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the report plan with a maximum of 1,024 characters * `name` - (Required) The unique name of the report plan. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters, numbers, and underscores. * `reportDeliveryChannel` - (Required) An object that contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports. Detailed below. @@ -112,4 +113,4 @@ Using `terraform import`, import Backup Report Plan using the `id` which corresp % terraform import aws_backup_report_plan.test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_restore_testing_plan.html.markdown b/website/docs/cdktf/typescript/r/backup_restore_testing_plan.html.markdown index c4168f4e9331..cdece63102e8 100644 --- a/website/docs/cdktf/typescript/r/backup_restore_testing_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_restore_testing_plan.html.markdown @@ -45,8 +45,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` (Required): The name of the restore testing plan. Must be between 1 and 50 characters long and contain only alphanumeric characters and underscores. * `scheduleExpression` (Required): The schedule expression for the restore testing plan. * `scheduleExpressionTimezone` (Optional): The timezone for the schedule expression. If not provided, the state value will be used. @@ -100,4 +101,4 @@ Using `terraform import`, import Backup Restore Testing Plan using the `name`. F % terraform import aws_backup_restore_testing_plan.example my_testing_plan ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_restore_testing_selection.html.markdown b/website/docs/cdktf/typescript/r/backup_restore_testing_selection.html.markdown index 92100ac4daf9..cff882510fc2 100644 --- a/website/docs/cdktf/typescript/r/backup_restore_testing_selection.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_restore_testing_selection.html.markdown @@ -83,6 +83,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the backup restore testing selection. * `restoreTestingPlanName` - (Required) The name of the restore testing plan. * `protectedResourceType` - (Required) The type of the protected resource. @@ -138,4 +139,4 @@ Using `terraform import`, import Backup Restore Testing Selection using `name:re % terraform import aws_backup_restore_testing_selection.example restore_testing_selection_12345678:restore_testing_plan_12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_selection.html.markdown b/website/docs/cdktf/typescript/r/backup_selection.html.markdown index c5608a3f5970..48fc1d0f461b 100644 --- a/website/docs/cdktf/typescript/r/backup_selection.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_selection.html.markdown @@ -226,6 +226,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The display name of a resource selection document. * `planId` - (Required) The backup plan ID to be associated with the selection of resources. * `iamRoleArn` - (Required) The ARN of the IAM role that AWS Backup uses to authenticate when restoring and backing up the target resource. See the [AWS Backup Developer Guide](https://docs.aws.amazon.com/aws-backup/latest/devguide/access-control.html#managed-policies) for additional information about using AWS managed policies or creating custom policies attached to the IAM role. @@ -317,4 +318,4 @@ Using `terraform import`, import Backup selection using the role plan_id and id % terraform import aws_backup_selection.example plan-id|selection-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_vault.html.markdown b/website/docs/cdktf/typescript/r/backup_vault.html.markdown index e49488d4fb7a..3c5bd76c193f 100644 --- a/website/docs/cdktf/typescript/r/backup_vault.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_vault.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `forceDestroy` - (Optional, Default: `false`) A boolean that indicates that all recovery points stored in the vault are deleted so that the vault can be destroyed without error. * `kmsKeyArn` - (Optional) The server-side encryption key that is used to protect your backups. * `name` - (Required) Name of the backup vault to create. @@ -87,4 +88,4 @@ Using `terraform import`, import Backup vault using the `name`. For example: % terraform import aws_backup_vault.test-vault TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_vault_lock_configuration.html.markdown b/website/docs/cdktf/typescript/r/backup_vault_lock_configuration.html.markdown index 2e8e15e72c1c..aaf6c48e293d 100644 --- a/website/docs/cdktf/typescript/r/backup_vault_lock_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_vault_lock_configuration.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backupVaultName` - (Required) Name of the backup vault to add a lock configuration for. * `changeableForDays` - (Optional) The number of days before the lock date. If omitted creates a vault lock in `governance` mode, otherwise it will create a vault lock in `compliance` mode. * `maxRetentionDays` - (Optional) The maximum retention period that the vault retains its recovery points. @@ -85,4 +86,4 @@ Using `terraform import`, import Backup vault lock configuration using the `name % terraform import aws_backup_vault_lock_configuration.test TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_vault_notifications.html.markdown b/website/docs/cdktf/typescript/r/backup_vault_notifications.html.markdown index 64f92244708f..96b062b4cb16 100644 --- a/website/docs/cdktf/typescript/r/backup_vault_notifications.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_vault_notifications.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backupVaultName` - (Required) Name of the backup vault to add notifications for. * `snsTopicArn` - (Required) The Amazon Resource Name (ARN) that specifies the topic for a backup vault’s events * `backupVaultEvents` - (Required) An array of events that indicate the status of jobs to back up resources to the backup vault. @@ -120,4 +121,4 @@ Using `terraform import`, import Backup vault notifications using the `name`. Fo % terraform import aws_backup_vault_notifications.test TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/backup_vault_policy.html.markdown b/website/docs/cdktf/typescript/r/backup_vault_policy.html.markdown index 295b449639b5..bb42472faabd 100644 --- a/website/docs/cdktf/typescript/r/backup_vault_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/backup_vault_policy.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backupVaultName` - (Required) Name of the backup vault to add policy for. * `policy` - (Required) The backup vault access policy document in JSON format. @@ -120,4 +121,4 @@ Using `terraform import`, import Backup vault policy using the `name`. For examp % terraform import aws_backup_vault_policy.test TestVault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/batch_compute_environment.html.markdown b/website/docs/cdktf/typescript/r/batch_compute_environment.html.markdown index efca7e78127e..415d4f2967de 100644 --- a/website/docs/cdktf/typescript/r/batch_compute_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/batch_compute_environment.html.markdown @@ -147,7 +147,6 @@ class MyConvertedCode extends TerraformStack { this, "sample_11", { - computeEnvironmentName: "sample", computeResources: { instanceRole: Token.asString( awsIamInstanceProfileEcsInstanceRole.arn @@ -161,6 +160,7 @@ class MyConvertedCode extends TerraformStack { type: "EC2", }, dependsOn: [awsIamRolePolicyAttachmentAwsBatchServiceRole], + name: "sample", serviceRole: awsBatchServiceRole.arn, type: "MANAGED", } @@ -187,7 +187,6 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new BatchComputeEnvironment(this, "sample", { - computeEnvironmentName: "sample", computeResources: { maxVcpus: 16, securityGroupIds: [Token.asString(awsSecurityGroupSample.id)], @@ -195,6 +194,7 @@ class MyConvertedCode extends TerraformStack { type: "FARGATE", }, dependsOn: [awsBatchServiceRole], + name: "sample", serviceRole: Token.asString(awsIamRoleAwsBatchServiceRole.arn), type: "MANAGED", }); @@ -218,7 +218,6 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new BatchComputeEnvironment(this, "sample", { - computeEnvironmentName: "sample", computeResources: { allocationStrategy: "BEST_FIT_PROGRESSIVE", instanceRole: ecsInstance.arn, @@ -229,6 +228,7 @@ class MyConvertedCode extends TerraformStack { subnets: [Token.asString(awsSubnetSample.id)], type: "EC2", }, + name: "sample", type: "MANAGED", updatePolicy: { jobExecutionTimeoutMinutes: 30, @@ -244,8 +244,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `computeEnvironmentName` - (Optional, Forces new resource) The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed. If omitted, Terraform will assign a random, unique name. -* `computeEnvironmentNamePrefix` - (Optional, Forces new resource) Creates a unique compute environment name beginning with the specified prefix. Conflicts with `computeEnvironmentName`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Optional, Forces new resource) The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed. If omitted, Terraform will assign a random, unique name. +* `namePrefix` - (Optional, Forces new resource) Creates a unique compute environment name beginning with the specified prefix. Conflicts with `name`. * `computeResources` - (Optional) Details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. See details below. * `eksConfiguration` - (Optional) Details for the Amazon EKS cluster that supports the compute environment. See details below. * `serviceRole` - (Optional) The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. @@ -279,6 +280,7 @@ This resource supports the following arguments: `ec2Configuration` supports the following: * `imageIdOverride` - (Optional) The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the `imageId` argument in the [`computeResources`](#compute_resources) block. +* `imageKubernetesVersion` - (Optional) The Kubernetes version for the compute environment. If you don't specify a value, the latest version that AWS Batch supports is used. See [Supported Kubernetes versions](https://docs.aws.amazon.com/batch/latest/userguide/supported_kubernetes_version.html) for the list of Kubernetes versions supported by AWS Batch on Amazon EKS. * `imageType` - (Optional) The image type to match with the instance type to select an AMI. If the `imageIdOverride` parameter isn't specified, then a recent [Amazon ECS-optimized Amazon Linux 2 AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) (`ECS_AL2`) is used. ### launch_template @@ -301,7 +303,7 @@ This resource supports the following arguments: `updatePolicy` supports the following: * `jobExecutionTimeoutMinutes` - (Required) Specifies the job timeout (in minutes) when the compute environment infrastructure is updated. -* `terminateJobsOnUpdate` - (Required) Specifies whether jobs are automatically terminated when the computer environment infrastructure is updated. +* `terminateJobsOnUpdate` - (Required) Specifies whether jobs are automatically terminated when the compute environment infrastructure is updated. ## Attribute Reference @@ -315,7 +317,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Batch compute using the `computeEnvironmentName`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_compute_environment.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:compute-environment/sample" + } +} + +resource "aws_batch_compute_environment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the compute environment. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Batch compute using the `name`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -335,7 +358,7 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import AWS Batch compute using the `computeEnvironmentName`. For example: +Using `terraform import`, import AWS Batch compute using the `name`. For example: ```console % terraform import aws_batch_compute_environment.sample sample @@ -345,4 +368,4 @@ Using `terraform import`, import AWS Batch compute using the `computeEnvironment [2]: http://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html [3]: http://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown b/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown index 37cf7a306424..86f3258f9865 100644 --- a/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown @@ -353,6 +353,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `containerProperties` - (Optional) Valid [container properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) provided as a single valid JSON document. This parameter is only valid if the `type` parameter is `container`. * `deregisterOnNewRevision` - (Optional) When updating a job definition a new revision is created. This parameter determines if the previous version is `deregistered` (`INACTIVE`) or left `ACTIVE`. Defaults to `true`. * `ecsProperties` - (Optional) Valid [ECS properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) provided as a single valid JSON document. This parameter is only valid if the `type` parameter is `container`. @@ -415,7 +416,7 @@ The following arguments are optional: #### eks_metadata -* `labels` - Key-value pairs used to identify, sort, and organize cube resources. +* `labels` - Key-value pairs used to identify, sort, and organize kubernetes resources. #### `eks_secret` @@ -449,6 +450,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_job_definition.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:job-definition/sample:1" + } +} + +resource "aws_batch_job_definition" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the job definition. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch Job Definition using the `arn`. For example: ```typescript @@ -479,4 +501,4 @@ Using `terraform import`, import Batch Job Definition using the `arn`. For examp % terraform import aws_batch_job_definition.test arn:aws:batch:us-east-1:123456789012:job-definition/sample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/batch_job_queue.html.markdown b/website/docs/cdktf/typescript/r/batch_job_queue.html.markdown index 5cc228e531fb..cebaceacabdb 100644 --- a/website/docs/cdktf/typescript/r/batch_job_queue.html.markdown +++ b/website/docs/cdktf/typescript/r/batch_job_queue.html.markdown @@ -103,8 +103,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Specifies the name of the job queue. -* `computeEnvironments` - (Deprecated) (Optional) This parameter is deprecated, please use `computeEnvironmentOrder` instead. List of compute environment ARNs mapped to a job queue. The position of the compute environments in the list will dictate the order. When importing a AWS Batch Job Queue, the parameter `computeEnvironments` will always be used over `computeEnvironmentOrder`. Please adjust your HCL accordingly. * `computeEnvironmentOrder` - (Optional) The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment runs a specific job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. * `jobStateTimeLimitAction` - (Optional) The set of job state time limit actions mapped to a job queue. Specifies an action that AWS Batch will take after the job has remained at the head of the queue in the specified state for longer than the specified time. * `priority` - (Required) The priority of the job queue. Job queues with a higher priority @@ -142,6 +142,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_job_queue.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:job-queue/sample" + } +} + +resource "aws_batch_job_queue" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the job queue. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch Job Queue using the `arn`. For example: ```typescript @@ -172,4 +193,4 @@ Using `terraform import`, import Batch Job Queue using the `arn`. For example: % terraform import aws_batch_job_queue.test_queue arn:aws:batch:us-east-1:123456789012:job-queue/sample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/batch_scheduling_policy.html.markdown b/website/docs/cdktf/typescript/r/batch_scheduling_policy.html.markdown index 8c421a7c80d1..db2d3c684d34 100644 --- a/website/docs/cdktf/typescript/r/batch_scheduling_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/batch_scheduling_policy.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fairshare_policy` - (Optional) A fairshare policy block specifies the `computeReservation`, `share_delay_seconds`, and `shareDistribution` of the scheduling policy. The `fairshare_policy` block is documented below. * `name` - (Required) Specifies the name of the scheduling policy. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -109,4 +110,4 @@ Using `terraform import`, import Batch Scheduling Policy using the `arn`. For ex % terraform import aws_batch_scheduling_policy.test_policy arn:aws:batch:us-east-1:123456789012:scheduling-policy/sample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bcmdataexports_export.html.markdown b/website/docs/cdktf/typescript/r/bcmdataexports_export.html.markdown index 7e0bb4bb473e..0aa1e4791bc0 100644 --- a/website/docs/cdktf/typescript/r/bcmdataexports_export.html.markdown +++ b/website/docs/cdktf/typescript/r/bcmdataexports_export.html.markdown @@ -25,9 +25,15 @@ import { Token, TerraformStack } from "cdktf"; * See https://cdk.tf/provider-generation for more details. */ import { BcmdataexportsExport } from "./.gen/providers/aws/bcmdataexports-export"; +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const current = new DataAwsCallerIdentity(this, "current", {}); + const dataAwsPartitionCurrent = new DataAwsPartition(this, "current_1", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsPartitionCurrent.overrideLogicalId("current"); new BcmdataexportsExport(this, "test", { export: [ { @@ -37,6 +43,12 @@ class MyConvertedCode extends TerraformStack { "SELECT identity_line_item_id, identity_time_interval, line_item_product_code,line_item_unblended_cost FROM COST_AND_USAGE_REPORT", tableConfigurations: { COST_AND_USAGE_REPORT: { + BILLING_VIEW_ARN: + "arn:${" + + dataAwsPartitionCurrent.partition + + "}:billing::${" + + current.accountId + + "}:billingview/primary", INCLUDE_MANUAL_DISCOUNT_COMPATIBILITY: "FALSE", INCLUDE_RESOURCES: "FALSE", INCLUDE_SPLIT_COST_ALLOCATION_DATA: "FALSE", @@ -94,8 +106,8 @@ The following arguments are required: ### `dataQuery` Argument Reference -* `queryStatement` - (Required) Query statement. -* `tableConfigurations` - (Optional) Table configuration. +* `queryStatement` - (Required) Query statement. The SQL table name for CUR 2.0 is `COST_AND_USAGE_REPORT`. See the [AWS documentation](https://docs.aws.amazon.com/cur/latest/userguide/table-dictionary-cur2.html) for a list of available columns. +* `tableConfigurations` - (Optional) Table configuration. See the [AWS documentation](https://docs.aws.amazon.com/cur/latest/userguide/table-dictionary-cur2.html#cur2-table-configurations) for the available configurations. In addition to those listed in the documentation, `BILLING_VIEW_ARN` must also be included, as shown in the example above. ### `destinationConfigurations` Argument Reference @@ -123,7 +135,8 @@ The following arguments are required: This resource exports the following attributes in addition to the arguments above: -* `exportArn` - Amazon Resource Name (ARN) for this export. +* `arn` - Amazon Resource Name (ARN) for this export. +* `export[0].export_arn` - Amazon Resource Name (ARN) for this export. ## Timeouts @@ -134,6 +147,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bcmdataexports_export.example + identity = { + "arn" = "arn:aws:bcm-data-exports:us-east-1:123456789012:export/example-export" + } +} + +resource "aws_bcmdataexports_export" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the BCM Data Exports export. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import BCM Data Exports Export using the export ARN. For example: ```typescript @@ -164,4 +198,4 @@ Using `terraform import`, import BCM Data Exports Export using the export ARN. F % terraform import aws_bcmdataexports_export.example arn:aws:bcm-data-exports:us-east-1:123456789012:export/CostUsageReport-9f1c75f3-f982-4d9a-b936-1e7ecab814b7 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown b/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown index a318225f9a0b..d592223034b6 100644 --- a/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baseModelIdentifier` - (Required) The Amazon Resource Name (ARN) of the base model. * `customModelKmsKeyId` - (Optional) The custom model is encrypted at rest using this key. Specify the key ARN. * `customModelName` - (Required) Name for the custom model. @@ -96,8 +97,8 @@ This resource supports the following arguments: * `validator` - (Required) Information about the validators. * `s3Uri` - (Required) The S3 URI where the validation data is stored. * `vpcConfig` - (Optional) Configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for this job. - * `securityGroupIds` – (Required) VPC configuration security group IDs. - * `subnetIds` – (Required) VPC configuration subnets. + * `securityGroupIds` - (Required) VPC configuration security group IDs. + * `subnetIds` - (Required) VPC configuration subnets. ## Attribute Reference @@ -120,6 +121,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bedrock_custom_model.example + identity = { + "arn" = "arn:aws:bedrock:us-west-2:123456789012:custom-model/amazon.titan-text-lite-v1:0:4k/example-model" + } +} + +resource "aws_bedrock_custom_model" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Bedrock custom model. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Custom Model using the `jobArn`. For example: ```typescript @@ -150,4 +172,4 @@ Using `terraform import`, import Bedrock custom model using the `jobArn`. For ex % terraform import aws_bedrock_custom_model.example arn:aws:bedrock:us-west-2:123456789012:model-customization-job/amazon.titan-text-express-v1:0:8k/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_guardrail.html.markdown b/website/docs/cdktf/typescript/r/bedrock_guardrail.html.markdown index dee81102db7b..33d7416a764b 100644 --- a/website/docs/cdktf/typescript/r/bedrock_guardrail.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_guardrail.html.markdown @@ -29,19 +29,30 @@ resource "aws_bedrock_guardrail" "example" { output_strength = "MEDIUM" type = "HATE" } + tier_config { + tier_name = "STANDARD" + } } sensitive_information_policy_config { pii_entities_config { - action = "BLOCK" - type = "NAME" + action = "BLOCK" + input_action = "BLOCK" + output_action = "ANONYMIZE" + input_enabled = true + output_enabled = true + type = "NAME" } regexes_config { - action = "BLOCK" - description = "example regex" - name = "regex_example" - pattern = "^\\d{3}-\\d{2}-\\d{4}$" + action = "BLOCK" + input_action = "BLOCK" + output_action = "BLOCK" + input_enabled = true + output_enabled = false + description = "example regex" + name = "regex_example" + pattern = "^\\d{3}-\\d{2}-\\d{4}$" } } @@ -52,6 +63,9 @@ resource "aws_bedrock_guardrail" "example" { type = "DENY" definition = "Investment advice refers to inquiries, guidance, or recommendations regarding the management or allocation of funds or assets with the goal of generating returns ." } + tier_config { + tier_name = "CLASSIC" + } } word_policy_config { @@ -75,6 +89,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contentPolicyConfig` - (Optional) Content policy config for a guardrail. See [Content Policy Config](#content-policy-config) for more information. * `contextualGroundingPolicyConfig` - (Optional) Contextual grounding policy config for a guardrail. See [Contextual Grounding Policy Config](#contextual-grounding-policy-config) for more information. * `description` (Optional) Description of the guardrail or its version. @@ -90,6 +105,7 @@ The `contentPolicyConfig` configuration block supports the following arguments: * `filtersConfig` - (Optional) Set of content filter configs in content policy. See [Filters Config](#content-filters-config) for more information. +* `tierConfig` - (Optional) Configuration block for the content policy tier. See [Tier Config](#content-tier-config) for more information. #### Content Filters Config @@ -99,6 +115,12 @@ The `filtersConfig` configuration block supports the following arguments: * `outputStrength` - (Optional) Strength for filters. * `type` - (Optional) Type of filter in content policy. +#### Content Tier Config + +The `tierConfig` configuration block supports the following arguments: + +* `tier_name` - (Required) The name of the content policy tier. Valid values include STANDARD or CLASSIC. + ### Contextual Grounding Policy Config * `filtersConfig` (Required) List of contextual grounding filter configs. See [Contextual Grounding Filters Config](#contextual-grounding-filters-config) for more information. @@ -110,8 +132,17 @@ The `filtersConfig` configuration block supports the following arguments: * `threshold` - (Required) The threshold for this filter. * `type` - (Required) Type of contextual grounding filter. +### Cross Region Inference + +* `crossRegionConfig` (Optional) Configuration block to enable cross-region routing for bedrock guardrails. See [Cross Region Config](#cross-region-config for more information. Note see [available regions](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-cross-region.html) here. + +#### Cross Region Config + +* `guardrailProfileIdentifier` (Required) Guardrail profile ARN. + ### Topic Policy Config +* `tierConfig` - (Optional) Configuration block for the topic policy tier. See [Tier Config](#topics-tier-config) for more information. * `topicsConfig` (Required) List of topic configs in topic policy. See [Topics Config](#topics-config) for more information. #### Topics Config @@ -121,6 +152,12 @@ The `filtersConfig` configuration block supports the following arguments: * `type` (Required) Type of topic in a policy. * `examples` (Optional) List of text examples. +#### Topics Tier Config + +The `tierConfig` configuration block supports the following arguments: + +* `tier_name` - (Required) The name of the content policy tier. Valid values include STANDARD or CLASSIC. + ### Sensitive Information Policy Config * `piiEntitiesConfig` (Optional) List of entities. See [PII Entities Config](#pii-entities-config) for more information. @@ -128,13 +165,21 @@ The `filtersConfig` configuration block supports the following arguments: #### PII Entities Config -* `action` (Required) Options for sensitive information action. +* `action` (Required) Options for sensitive information action. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `inputAction` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `inputEnabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `outputAction` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `outputEnabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. * `type` (Required) The currently supported PII entities. #### Regexes Config -* `action` (Required) Options for sensitive information action. +* `action` (Required) Options for sensitive information action. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `inputAction` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `inputEnabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. * `name` (Required) The regex name. +* `outputAction` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `outputEnabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. * `pattern` (Required) The regex pattern. * `description` (Optional) The regex description. @@ -146,10 +191,18 @@ The `filtersConfig` configuration block supports the following arguments: #### Managed Word Lists Config * `type` (Required) Options for managed words. +* `inputAction` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `NONE`. +* `inputEnabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `outputAction` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `NONE`. +* `outputEnabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. #### Words Config * `text` (Required) The custom word text. +* `inputAction` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `NONE`. +* `inputEnabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `outputAction` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `NONE`. +* `outputEnabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. ## Attribute Reference @@ -201,4 +254,4 @@ Using `terraform import`, import Amazon Bedrock Guardrail using using a comma-de % terraform import aws_bedrock_guardrail.example guardrail-id-12345678,DRAFT ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_guardrail_version.html.markdown b/website/docs/cdktf/typescript/r/bedrock_guardrail_version.html.markdown index 8a6d5d2bfedc..c7ff1fddf1c9 100644 --- a/website/docs/cdktf/typescript/r/bedrock_guardrail_version.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_guardrail_version.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Guardrail version. * `skipDestroy` - (Optional) Whether to retain the old version of a previously deployed Guardrail. Default is `false` @@ -93,4 +94,4 @@ Using `terraform import`, import Amazon Bedrock Guardrail Version using using a % terraform import aws_bedrock_guardrail_version.example arn:aws:bedrock:us-west-2:123456789012:guardrail-id-12345678,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_inference_profile.html.markdown b/website/docs/cdktf/typescript/r/bedrock_inference_profile.html.markdown index 73e5a4adccdd..6afdc4b4b5dc 100644 --- a/website/docs/cdktf/typescript/r/bedrock_inference_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_inference_profile.html.markdown @@ -24,16 +24,16 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { BedrockInferenceProfile } from "./.gen/providers/aws/"; +import { BedrockInferenceProfile } from "./.gen/providers/aws/bedrock-inference-profile"; import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new BedrockInferenceProfile(this, "example", { description: "Profile with tag for cost allocation tracking", - model_source: [ + modelSource: [ { - copy_from: + copyFrom: "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0", }, ], @@ -53,16 +53,17 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: * `name` - (Required) The name of the inference profile. -* `model_source` - (Required) The source of the model this inference profile will track metrics and cost for. See [`model_source`](#model_source). +* `modelSource` - (Required) The source of the model this inference profile will track metrics and cost for. See [`modelSource`](#model_source). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the inference profile. * `tags` - (Optional) Key-value mapping of resource tags for the inference profile. -### `model_source` +### `modelSource` -- `copy_from` - The Amazon Resource Name (ARN) of the model. +- `copyFrom` - The Amazon Resource Name (ARN) of the model. ## Attribute Reference @@ -92,7 +93,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Inference Profile using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Inference Profile using the `name`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -102,7 +103,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { BedrockInferenceProfile } from "./.gen/providers/aws/"; +import { BedrockInferenceProfile } from "./.gen/providers/aws/bedrock-inference-profile"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -116,10 +117,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Bedrock Inference Profile using the `example_id_arg`. For example: +Using `terraform import`, import Bedrock Inference Profile using the `name`. For example: ```console % terraform import aws_bedrock_inference_profile.example inference_profile-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown index c5364e584f14..305cc5c6eef4 100644 --- a/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown @@ -83,42 +83,43 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loggingConfig` - (Required) The logging configuration values to set. See [`loggingConfig` Block](#logging_config-block) for details. ### `loggingConfig` Block The `loggingConfig` configuration block supports the following arguments: -* `cloudwatchConfig` – (Optional) CloudWatch logging configuration. See [`cloudwatchConfig` Block](#cloudwatch_config-block) for details. -* `embeddingDataDeliveryEnabled` – (Optional) Set to include embeddings data in the log delivery. Defaults to `true`. -* `imageDataDeliveryEnabled` – (Optional) Set to include image data in the log delivery. Defaults to `true`. -* `s3Config` – (Optional) S3 configuration for storing log data. See [`s3Config` Block](#s3_config-block) for details. -* `textDataDeliveryEnabled` – (Optional) Set to include text data in the log delivery. Defaults to `true`. -* `videoDataDeliveryEnabled` – (Optional) Set to include text data in the log delivery. Defaults to `true`. +* `cloudwatchConfig` - (Optional) CloudWatch logging configuration. See [`cloudwatchConfig` Block](#cloudwatch_config-block) for details. +* `embeddingDataDeliveryEnabled` - (Optional) Set to include embeddings data in the log delivery. Defaults to `true`. +* `imageDataDeliveryEnabled` - (Optional) Set to include image data in the log delivery. Defaults to `true`. +* `s3Config` - (Optional) S3 configuration for storing log data. See [`s3Config` Block](#s3_config-block) for details. +* `textDataDeliveryEnabled` - (Optional) Set to include text data in the log delivery. Defaults to `true`. +* `videoDataDeliveryEnabled` - (Optional) Set to include text data in the log delivery. Defaults to `true`. ### `cloudwatchConfig` Block The `cloudwatchConfig` configuration block supports the following arguments: -* `largeDataDeliveryS3Config` – (Optional) S3 configuration for delivering a large amount of data. See [`largeDataDeliveryS3Config` Block](#large_data_delivery_s3_config-block) for details. -* `logGroupName` – (Required) Log group name. -* `roleArn` – (Optional) The role ARN. +* `largeDataDeliveryS3Config` - (Optional) S3 configuration for delivering a large amount of data. See [`largeDataDeliveryS3Config` Block](#large_data_delivery_s3_config-block) for details. +* `logGroupName` - (Required) Log group name. +* `roleArn` - (Optional) The role ARN. ### `largeDataDeliveryS3Config` Block The `largeDataDeliveryS3Config` configuration block supports the following arguments: -* `bucketName` – (Required) S3 bucket name. -* `keyPrefix` – (Optional) S3 prefix. +* `bucketName` - (Required) S3 bucket name. +* `keyPrefix` - (Optional) S3 prefix. ### `s3Config` Block The `s3Config` configuration block supports the following arguments: -* `bucketName` – (Required) S3 bucket name. -* `keyPrefix` – (Optional) S3 prefix. +* `bucketName` - (Required) S3 bucket name. +* `keyPrefix` - (Optional) S3 prefix. ## Attribute Reference @@ -158,4 +159,4 @@ Using `terraform import`, import Bedrock custom model using the `id` set to the % terraform import aws_bedrock_model_invocation_logging_configuration.my_config us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown b/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown index 21add6c9da09..33dbf3d57959 100644 --- a/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `commitmentDuration` - (Optional) Commitment duration requested for the Provisioned Throughput. For custom models, you can purchase on-demand Provisioned Throughput by omitting this argument. Valid values: `OneMonth`, `SixMonths`. * `modelArn` - (Required) ARN of the model to associate with this Provisioned Throughput. * `modelUnits` - (Required) Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. @@ -63,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bedrock_provisioned_model_throughput.example + identity = { + "arn" = "arn:aws:bedrock:us-west-2:123456789012:provisioned-model/a1b2c3d4567890ab" + } +} + +resource "aws_bedrock_provisioned_model_throughput" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Bedrock provisioned model throughput. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Provisioned Throughput using the `provisionedModelArn`. For example: ```typescript @@ -93,4 +115,4 @@ Using `terraform import`, import Provisioned Throughput using the `provisionedMo % terraform import aws_bedrock_provisioned_model_throughput.example arn:aws:bedrock:us-west-2:123456789012:provisioned-model/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown index 46a89abe9bc4..ae8a22f63927 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown @@ -51,7 +51,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}::foundation-model/anthropic.claude-v2", ], }, @@ -77,7 +77,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:agent/*", @@ -132,6 +132,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agentCollaboration` - (Optional) Agents collaboration role. Valid values: `SUPERVISOR`, `SUPERVISOR_ROUTER`, `DISABLED`. * `customerEncryptionKeyArn` - (Optional) ARN of the AWS KMS key that encrypts the agent. * `description` - (Optional) Description of the agent. @@ -233,4 +234,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent using the agent % terraform import aws_bedrockagent_agent.example GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown index 8f3496f1a4b7..9e7417a84393 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown @@ -195,6 +195,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actionGroupState` - (Optional) Whether the action group is available for the agent to invoke or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request. Valid values: `ENABLED`, `DISABLED`. * `apiSchema` - (Optional) Either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html). See [`apiSchema` Block](#api_schema-block) for details. * `description` - (Optional) Description of the action group. @@ -277,6 +278,7 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `30m`) * `update` - (Default `30m`) +* `delete` - (Default `30m`) ## Import @@ -310,4 +312,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Action Group th % terraform import aws_bedrockagent_agent_action_group.example MMAUDBZTH4,GGRRAED6JP,DRAFT ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown index 1627c20fa4f2..af45357b3fc4 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown @@ -52,7 +52,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}::foundation-model/anthropic.claude-v2", ], }, @@ -78,7 +78,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:agent/*", @@ -143,6 +143,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the alias. * `routingConfiguration` - (Optional) Details about the routing configuration of the alias. See [`routingConfiguration` Block](#routing_configuration-block) for details. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -203,4 +204,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Alias using the % terraform import aws_bedrockagent_agent_alias.example 66IVY0GUTF,GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_collaborator.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_collaborator.html.markdown index 5ecf53d760e4..788274fe2122 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_collaborator.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_collaborator.html.markdown @@ -53,7 +53,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0", ], }, @@ -63,14 +63,14 @@ class MyConvertedCode extends TerraformStack { "arn:${" + currentAgent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:agent/*", "arn:${" + currentAgent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:agent-alias/*", @@ -98,7 +98,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:bedrock:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:agent/*", @@ -188,10 +188,11 @@ The following arguments are required: * `agentId` - (Required) ID if the agent to associate the collaborator. * `collaborationInstruction` - (Required) Instruction to give the collaborator. -* `collbaorator_name` - (Required) Name of this collaborator. +* `collaboratorName` - (Required) Name of this collaborator. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prepareAgent` (Optional) Whether to prepare the agent after creation or modification. Defaults to `true`. * `relayConversationHistory` - (Optional) Configure relaying the history to the collaborator. @@ -247,4 +248,4 @@ Using `terraform import`, import Bedrock Agents Agent Collaborator using a comma % terraform import aws_bedrockagent_agent_collaborator.example 9LSJO0BFI8,DRAFT,AG3TN4RQIY ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown index ee1e81193bd8..323c56c121fc 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agentVersion` - (Optional, Forces new resource) Version of the agent with which you want to associate the knowledge base. Valid values: `DRAFT`. ## Attribute Reference @@ -63,6 +64,7 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `5m`) * `update` - (Default `5m`) +* `delete` - (Default `5m`) ## Import @@ -96,4 +98,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Knowledge Base % terraform import aws_bedrockagent_agent_knowledge_base_association.example GGRRAED6JP,DRAFT,EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown index 0761e705171e..85ac3c921f61 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dataDeletionPolicy` - (Optional) Data deletion policy for a data source. Valid values: `RETAIN`, `DELETE`. * `description` - (Optional) Description of the data source. * `serverSideEncryptionConfiguration` - (Optional) Details about the configuration of the server-side encryption. See [`serverSideEncryptionConfiguration` block](#server_side_encryption_configuration-block) for details. @@ -363,4 +364,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Data Source using the [3]: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_SharePointDataSourceConfiguration.html [4]: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_WebDataSourceConfiguration.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_flow.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_flow.html.markdown new file mode 100644 index 000000000000..c1be5da333e3 --- /dev/null +++ b/website/docs/cdktf/typescript/r/bedrockagent_flow.html.markdown @@ -0,0 +1,464 @@ +--- +subcategory: "Bedrock Agents" +layout: "aws" +page_title: "AWS: aws_bedrockagent_flow" +description: |- + Terraform resource for managing an AWS Bedrock Agents Flow. +--- + + + +# Resource: aws_bedrockagent_flow + +Terraform resource for managing an AWS Bedrock Agents Flow. + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { BedrockagentFlow } from "./.gen/providers/aws/bedrockagent-flow"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new BedrockagentFlow(this, "example", { + executionRoleArn: Token.asString(awsIamRoleExample.arn), + name: "example-flow", + }); + } +} + +``` + +## Example Usage + +The default definition: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { BedrockagentFlow } from "./.gen/providers/aws/bedrockagent-flow"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new BedrockagentFlow(this, "example", { + definition: [ + { + connection: { + configuration: [ + { + data: [ + { + sourceOutput: "document", + targetInput: "topic", + }, + ], + }, + ], + name: "FlowInputNodeFlowInputNode0ToPrompt_1PromptsNode0", + source: "FlowInputNode", + target: "Prompt_1", + type: "Data", + }, + nodeAttribute: [ + { + configuration: [ + { + input: [{}], + }, + ], + name: "FlowInputNode", + output: [ + { + name: "document", + type: "String", + }, + ], + type: "Input", + }, + { + configuration: [ + { + prompt: [ + { + sourceConfiguration: [ + { + inline: [ + { + inferenceConfiguration: [ + { + text: [ + { + maxTokens: 2048, + stopSequences: ["User:"], + temperature: 0, + topP: 0.8999999761581421, + }, + ], + }, + ], + modelId: "amazon.titan-text-express-v1", + templateConfiguration: [ + { + text: [ + { + inputVariable: [ + { + name: "topic", + }, + ], + text: "Write a paragraph about {{topic}}.", + }, + ], + }, + ], + templateType: "TEXT", + }, + ], + }, + ], + }, + ], + }, + ], + input: [ + { + expression: "$.data", + name: "topic", + type: "String", + }, + ], + name: "Prompt_1", + output: [ + { + name: "modelCompletion", + type: "String", + }, + ], + type: "Prompt", + }, + { + configuration: [ + { + output: [{}], + }, + ], + input: [ + { + expression: "$.data", + name: "document", + type: "String", + }, + ], + name: "FlowOutputNode", + type: "Output", + }, + ], + }, + ], + executionRoleArn: Token.asString(awsIamRoleExample.arn), + name: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the flow. +* `executionRoleArn` - (Required) The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see [Create a service role for flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-permissions.html) in the Amazon Bedrock User Guide. + +The following arguments are optional: + +* `description` - (Optional) A description for the flow. +* `customerEncryptionKeyArn` - (Optional) The Amazon Resource Name (ARN) of the KMS key to encrypt the flow. +* `definition` - (Optional) A definition of the nodes and connections between nodes in the flow. See [Definition](#definition) for more information. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Definition + +* `connection` - (Optional) A list of connection definitions in the flow. See [Connection](#connection) for more information. +* `node` - (Optional) A list of node definitions in the flow. See [Node](#node) for more information. + +### Connection + +* `name` - (Required) A name for the connection that you can reference. +* `source` - (Required) The node that the connection starts at. +* `target` - (Required) The node that the connection ends at. +* `type` - (Required) Whether the source node that the connection begins from is a condition node `Conditional` or not `Data`. +* `configuration` - (Required) Configuration of the connection. See [Connection Configuration](#connection-configuration) for more information. + +### Connection Configuration + +* `data` - (Optional) The configuration of a connection originating from a node that isn’t a Condition node. See [Data Connection Configuration](#data-connection-configuration) for more information. +* `conditional` - (Optional) The configuration of a connection originating from a Condition node. See [Conditional Connection Configuration](#conditional-connection-configuration) for more information. + +#### Data Connection Configuration + +* `sourceOutput` - (Required) The name of the output in the source node that the connection begins from. +* `targetInput` - (Required) The name of the input in the target node that the connection ends at. + +#### Conditional Connection Configuration + +* `condition` - (Required) The condition that triggers this connection. For more information about how to write conditions, see the Condition node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide. + +### Node + +* `name` - (Required) A name for the node. +* `type` - (Required) The type of node. This value must match the name of the key that you provide in the configuration. Valid values: `Agent`, `Collector`, `Condition`, `Input`, `Iterator`, `KnowledgeBase`, `LambdaFunction`, `Lex`, `Output`, `Prompt`, `Retrieval`, `Storage` +* `configuration` - (Required) Contains configurations for the node. See [Node Configuration](#node-configuration) for more information. +* `input` - (Optional) A list of objects containing information about an input into the node. See [Node Input](#node-input) for more information. +* `output` - (Optional) A list of objects containing information about an output from the node. See [Node Output](#node-output) for more information. + +### Node Input + +* `name` - (Required) A name for the input that you can reference. +* `type` - (Required) The data type of the input. If the input doesn’t match this type at runtime, a validation error will be thrown. +* `expression` - (Required) An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html). +* `category` - (Optional) How input data flows between iterations in a DoWhile loop. + +### Node Output + +* `name` - (Required) A name for the output that you can reference. +* `type` - (Required) The data type of the output. If the output doesn’t match this type at runtime, a validation error will be thrown. + +### Node Configuration + +* `agent` - (Optional) Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response. See [Agent Node Configuration](#agent-node-configuration) for more information. +* `collector` - (Optional) Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs. This object has no fields. +* `condition` - (Optional) Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow. See [Condition Node Configuration](#condition-node-configuration) for more information. +* `inlineCode` - (Optional) Contains configurations for an inline code node in your flow. See [Inline Code Node Configuration](#inline-code-node-configuration) for more information. +* `input` - (Optional) Contains configurations for an input flow node in your flow. The node `inputs` can’t be specified for this node. This object has no fields. +* `iterator` - (Optional) Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output. The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node. This object has no fields. +* `knowledgeBase` - (Optional) Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response. See [Knowledge Base Node Configuration](#knowledge-base-node-configuration) for more information. +* `lambdaFunction` - (Optional) Contains configurations for a Lambda function node in your flow. Invokes a Lambda function. See [Lambda Function Node Configuration](#lambda-function-node-configuration) for more information. +* `lex` - (Optional) Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output. See [Lex Node Configuration](#lex-node-configuration) for more information. +* `output` - (Optional) Contains configurations for an output flow node in your flow. The node `outputs` can’t be specified for this node. This object has no fields. +* `prompt` - (Optional) Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node. See [Prompt Node Configuration](#prompt-node-configuration) for more information. +* `retrieval` - (Optional) Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output. See [Retrieval Node Configuration](#retrieval-node-configuration) for more information. +* `storage` - (Optional) Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. See [Storage Node Configuration](#storage-node-configuration) for more information. + +### Agent Node Configuration + +* `agentAliasArn` - (Required) The Amazon Resource Name (ARN) of the alias of the agent to invoke. + +### Condition Node Configuration + +* `condition` - (Optional) A list of conditions. See [Condition Config](#condition-config) for more information. + +#### Condition Config + +* `name` - (Required) A name for the condition that you can reference. +* `expression` - (Optional) Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes). + +### Inline Code Node Configuration + +* `code` - (Required) The code that's executed in your inline code node. +* `language` - (Required) The programming language used by your inline code node. + +### Knowledge Base Node Configuration + +* `knowledgeBaseId` - (Required) The unique identifier of the knowledge base to query. +* `modelId` - (Required) The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. +* `guardrailConfiguration` - (Required) Contains configurations for a guardrail to apply during query and response generation for the knowledge base in this configuration. See [Guardrail Configuration](#guardrail-configuration) for more information. + +#### Guardrail Configuration + +* `guardrailIdentifier` - (Required) The unique identifier of the guardrail. +* `guardrailVersion` - (Required) The version of the guardrail. + +### Lambda Function Node Configuration + +* `lambdaArn` - (Required) The Amazon Resource Name (ARN) of the Lambda function to invoke. + +### Lex Node Configuration + +* `botAliasArn` - (Required) The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke. +* `localeId` - (Required) The Region to invoke the Amazon Lex bot in + +### Prompt Node Configuration + +* `resource` - (Optional) Contains configurations for a prompt from Prompt management. See [Prompt Resource Configuration](#prompt-resource-configuration) for more information. +* `inline` - (Optional) Contains configurations for a prompt that is defined inline. See [Prompt Inline Configuration](#prompt-inline-configuration) for more information. + +#### Prompt Resource Configuration + +* `prompt_arn` - (Required) The Amazon Resource Name (ARN) of the prompt from Prompt management. + +#### Prompt Inline Configuration + +* `additionalModelRequestFields` - (Optional) Additional fields to be included in the model request for the Prompt node. +* `inferenceConfiguration` - (Optional) Contains inference configurations for the prompt. See [Prompt Inference Configuration](#prompt-inference-configuration) for more information. +* `modelId` - (Required) The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to run inference with. +* `templateType` - (Required) The type of prompt template. Valid values: `TEXT`, `CHAT`. +* `templateConfiguration` - (Required) Contains a prompt and variables in the prompt that can be replaced with values at runtime. See [Prompt Template Configuration](#prompt-template-configuration) for more information. + +#### Prompt Inference Configuration + +* `text` - (Optional) Contains inference configurations for a text prompt. See [Text Inference Configuration](#text-inference-configuration) for more information. + +#### Text Inference Configuration + +* `maxTokens` - (Optional) Maximum number of tokens to return in the response. +* `stopSequences` - (Optional) List of strings that define sequences after which the model will stop generating. +* `temperature` - (Optional) Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs. +* `topP` - (Optional) Percentage of most-likely candidates that the model considers for the next token. + +#### Prompt Template Configuration + +* `text` - (Optional) Contains configurations for the text in a message for a prompt. See [Text Template Configuration](#text-template-configuration) +* `chat` - (Optional) Contains configurations to use the prompt in a conversational format. See [Chat Template Configuration](#chat-template-configuration) for more information. + +#### Text Template Configuration + +* `text` - (Required) The message for the prompt. +* `inputVariable` - (Optional) A list of variables in the prompt template. See [Input Variable](#input-variable) for more information. +* `cachePoint` - (Optional) A cache checkpoint within a template configuration. See [Cache Point](#cache-point) for more information. + +#### Chat Template Configuration + +* `inputVariable` - (Optional) A list of variables in the prompt template. See [Input Variable](#input-variable) for more information. +* `message` - (Optional) A list of messages in the chat for the prompt. See [Message](#message) for more information. +* `system` - (Optional) A list of system prompts to provide context to the model or to describe how it should behave. See [System](#system) for more information. +* `toolConfiguration` - (Optional) Configuration information for the tools that the model can use when generating a response. See [Tool Configuration](#tool-configuration) for more information. + +#### Message + +* `role` - (Required) The role that the message belongs to. +* `content` - (Required) Contains the content for the message you pass to, or receive from a model. See [Message Content] for more information. + +#### Message Content + +* `cachePoint` - (Optional) Creates a cache checkpoint within a message. See [Cache Point](#cache-point) for more information. +* `text` - (Optional) The text in the message. + +#### System + +* `cachePoint` - (Optional) Creates a cache checkpoint within a tool designation. See [Cache Point](#cache-point) for more information. +* `text` - (Optional) The text in the system prompt. + +#### Tool Configuration + +* `toolChoice` - (Optional) Defines which tools the model should request when invoked. See [Tool Choice](#tool-choice) for more information. +* `tool` - (Optional) A list of tools to pass to a model. See [Tool](#tool) for more information. + +#### Tool Choice + +* `any` - (Optional) Defines tools, at least one of which must be requested by the model. No text is generated but the results of tool use are sent back to the model to help generate a response. This object has no fields. +* `auto` - (Optional) Defines tools. The model automatically decides whether to call a tool or to generate text instead. This object has no fields. +* `tool` - (Optional) Defines a specific tool that the model must request. No text is generated but the results of tool use are sent back to the model to help generate a response. See [Named Tool](#named-tool) for more information. + +#### Named Tool + +* `name` - (Required) The name of the tool. + +#### Tool + +* `cachePoint` - (Optional) Creates a cache checkpoint within a tool designation. See [Cache Point](#cache-point) for more information. +* `toolSpec` - (Optional) The specification for the tool. See [Tool Specification](#tool-specification) for more information. + +#### Tool Specification + +* `name` - (Required) The name of the tool. +* `description` - (Optional) The description of the tool. +* `inputSchema` - (Optional) The input schema of the tool. See [Tool Input Schema](#tool-input-schema) for more information. + +#### Tool Input Schema + +* `json` - (Optional) A JSON object defining the input schema for the tool. + +#### Input Variable + +* `name` - (Required) The name of the variable. + +#### Cache Point + +* `type` - (Required) Indicates that the CachePointBlock is of the default type. Valid values: `default`. + +### Retrieval Node Configuration + +* `serviceConfiguration` - (Required) Contains configurations for the service to use for retrieving data to return as the output from the node. See [Retrieval Service Configuration](#retrieval-service-configuration) for more information. + +#### Retrieval Service Configuration + +* `s3` - (Optional) Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node. See [Retrieval S3 Service Configuration](#retrieval-s3-service-configuration) for more information. + +#### Retrieval S3 Service Configuration + +* `bucketName` - (Required) The name of the Amazon S3 bucket from which to retrieve data. + +### Storage Node Configuration + +* `serviceConfiguration` - (Required) Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. See [Storage Service Configuration](#storage-service-configuration) for more information. + +#### Storage Service Configuration + +* `s3` - (Optional) Contains configurations for the service to use for storing the input into the node. See [Storage S3 Service Configuration](#storage-s3-service-configuration) for more information. + +#### Storage S3 Service Configuration + +* `bucketName` - (Required) The name of the Amazon S3 bucket in which to store the input into the node. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) of the flow. +* `id` - The unique identifier of the flow. +* `createdAt` - The time at which the flow was created. +* `updatedAt` - The time at which the flow was last updated. +* `version` - The version of the flow. +* `status` - The status of the flow. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Agents Flow using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { BedrockagentFlow } from "./.gen/providers/aws/bedrockagent-flow"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + BedrockagentFlow.generateConfigForImport(this, "example", "ABCDEFGHIJ"); + } +} + +``` + +Using `terraform import`, import Bedrock Agents Flow using the `id`. For example: + +```console +% terraform import aws_bedrockagent_flow.example ABCDEFGHIJ +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown index 2449c332162a..6b687c69aae4 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown @@ -89,21 +89,21 @@ class MyConvertedCode extends TerraformStack { { embeddingModelArn: "arn:aws:bedrock:us-west-2::foundation-model/amazon.titan-embed-text-v2:0", - embedding_model_configuration: [ + embeddingModelConfiguration: [ { - bedrock_embedding_model_configuration: [ + bedrockEmbeddingModelConfiguration: [ { dimensions: 1024, - embedding_data_type: "FLOAT32", + embeddingDataType: "FLOAT32", }, ], }, ], - supplemental_data_storage_configuration: [ + supplementalDataStorageConfiguration: [ { - storage_location: [ + storageLocation: [ { - s3_location: [ + s3Location: [ { uri: "s3://my-bucket/chunk-processor/", }, @@ -155,6 +155,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the knowledge base. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -170,25 +171,25 @@ The `knowledgeBaseConfiguration` configuration block supports the following argu The `vectorKnowledgeBaseConfiguration` configuration block supports the following arguments: * `embeddingModelArn` - (Required) ARN of the model used to create vector embeddings for the knowledge base. -* `embedding_model_configuration` - (Optional) The embeddings model configuration details for the vector model used in Knowledge Base. See [`embedding_model_configuration` block](#embedding_model_configuration-block) for details. -* `supplemental_data_storage_configuration` - (Optional) supplemental_data_storage_configuration. See [`supplemental_data_storage_configuration` block](#supplemental_data_storage_configuration-block) for details. +* `embeddingModelConfiguration` - (Optional) The embeddings model configuration details for the vector model used in Knowledge Base. See [`embeddingModelConfiguration` block](#embedding_model_configuration-block) for details. +* `supplementalDataStorageConfiguration` - (Optional) supplemental_data_storage_configuration. See [`supplementalDataStorageConfiguration` block](#supplemental_data_storage_configuration-block) for details. -### `embedding_model_configuration` block +### `embeddingModelConfiguration` block -The `embedding_model_configuration` configuration block supports the following arguments: +The `embeddingModelConfiguration` configuration block supports the following arguments: -* `bedrock_embedding_model_configuration` - (Optional) The vector configuration details on the Bedrock embeddings model. See [`bedrock_embedding_model_configuration` block](#bedrock_embedding_model_configuration-block) for details. +* `bedrockEmbeddingModelConfiguration` - (Optional) The vector configuration details on the Bedrock embeddings model. See [`bedrockEmbeddingModelConfiguration` block](#bedrock_embedding_model_configuration-block) for details. -### `bedrock_embedding_model_configuration` block +### `bedrockEmbeddingModelConfiguration` block -The `bedrock_embedding_model_configuration` configuration block supports the following arguments: +The `bedrockEmbeddingModelConfiguration` configuration block supports the following arguments: * `dimensions` - (Optional) Dimension details for the vector configuration used on the Bedrock embeddings model. -* `embedding_data_type` - (Optional) Data type for the vectors when using a model to convert text into vector embeddings. The model must support the specified data type for vector embeddings. Valid values are `FLOAT32` and `BINARY`. +* `embeddingDataType` - (Optional) Data type for the vectors when using a model to convert text into vector embeddings. The model must support the specified data type for vector embeddings. Valid values are `FLOAT32` and `BINARY`. -### `supplemental_data_storage_configuration` block +### `supplementalDataStorageConfiguration` block -The `supplemental_data_storage_configuration` configuration block supports the following arguments: +The `supplementalDataStorageConfiguration` configuration block supports the following arguments: * `storageLocation` - (Required) A storage location specification for images extracted from multimodal documents in your data source. See [`storageLocation` block](#storage_location-block) for details. @@ -313,4 +314,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Knowledge Base using % terraform import aws_bedrockagent_knowledge_base.example EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_prompt.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_prompt.html.markdown index 77337370d6c0..42aa10e136d2 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_prompt.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_prompt.html.markdown @@ -101,6 +101,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the prompt. * `defaultVariant` - (Optional) Name of the default variant for your prompt. * `customerEncryptionKeyArn` - (Optional) Amazon Resource Name (ARN) of the KMS key that you encrypted the prompt with. @@ -252,4 +253,4 @@ Using `terraform import`, import Bedrock Agents Prompt using the `id`. For examp % terraform import aws_bedrockagent_prompt.example 1A2BC3DEFG ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/budgets_budget.html.markdown b/website/docs/cdktf/typescript/r/budgets_budget.html.markdown index 8406515c50e2..d944df7a8a8e 100644 --- a/website/docs/cdktf/typescript/r/budgets_budget.html.markdown +++ b/website/docs/cdktf/typescript/r/budgets_budget.html.markdown @@ -320,6 +320,7 @@ The following arguments are optional: * `accountId` - (Optional) The ID of the target account for budget. Will use current user's account_id by default if omitted. * `autoAdjustData` - (Optional) Object containing [AutoAdjustData](#auto-adjust-data) which determines the budget amount for an auto-adjusting budget. +* `billingViewArn` - (Optional) ARN of the billing view. * `costFilter` - (Optional) A list of [CostFilter](#cost-filter) name/values pair to apply to budget. * `costTypes` - (Optional) Object containing [CostTypes](#cost-types) The types of cost included in a budget, such as tax and subscriptions. * `limitAmount` - (Optional) The amount of cost or usage being measured for a budget. @@ -434,4 +435,4 @@ Using `terraform import`, import budgets using `AccountID:BudgetName`. For examp % terraform import aws_budgets_budget.myBudget 123456789012:myBudget ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ce_anomaly_monitor.html.markdown b/website/docs/cdktf/typescript/r/ce_anomaly_monitor.html.markdown index 32db41da3acd..b20a4f0a17b5 100644 --- a/website/docs/cdktf/typescript/r/ce_anomaly_monitor.html.markdown +++ b/website/docs/cdktf/typescript/r/ce_anomaly_monitor.html.markdown @@ -97,6 +97,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_anomaly_monitor.example + identity = { + "arn" = "arn:aws:ce::123456789012:anomalymonitor/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_anomaly_monitor" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer anomaly monitor. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_anomaly_monitor` using the `id`. For example: ```typescript @@ -127,4 +148,4 @@ Using `terraform import`, import `aws_ce_anomaly_monitor` using the `id`. For ex % terraform import aws_ce_anomaly_monitor.example costAnomalyMonitorARN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ce_anomaly_subscription.html.markdown b/website/docs/cdktf/typescript/r/ce_anomaly_subscription.html.markdown index fd161a07e0b5..bb40636f19d0 100644 --- a/website/docs/cdktf/typescript/r/ce_anomaly_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/ce_anomaly_subscription.html.markdown @@ -298,6 +298,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_anomaly_subscription.example + identity = { + "arn" = "arn:aws:ce::123456789012:anomalysubscription/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_anomaly_subscription" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer anomaly subscription. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_anomaly_subscription` using the `id`. For example: ```typescript @@ -328,4 +349,4 @@ Using `terraform import`, import `aws_ce_anomaly_subscription` using the `id`. F % terraform import aws_ce_anomaly_subscription.example AnomalySubscriptionARN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ce_cost_category.html.markdown b/website/docs/cdktf/typescript/r/ce_cost_category.html.markdown index bac364dea690..dd1a73a8906d 100644 --- a/website/docs/cdktf/typescript/r/ce_cost_category.html.markdown +++ b/website/docs/cdktf/typescript/r/ce_cost_category.html.markdown @@ -144,6 +144,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_cost_category.example + identity = { + "arn" = "arn:aws:ce::123456789012:costcategory/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_cost_category" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer cost category. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_cost_category` using the id. For example: ```typescript @@ -170,4 +191,4 @@ Using `terraform import`, import `aws_ce_cost_category` using the id. For exampl % terraform import aws_ce_cost_category.example costCategoryARN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chatbot_slack_channel_configuration.html.markdown b/website/docs/cdktf/typescript/r/chatbot_slack_channel_configuration.html.markdown index 00c57f50bcfd..1b25b532ace3 100644 --- a/website/docs/cdktf/typescript/r/chatbot_slack_channel_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/chatbot_slack_channel_configuration.html.markdown @@ -53,6 +53,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `guardrailPolicyArns` - (Optional) List of IAM policy ARNs that are applied as channel guardrails. The AWS managed `AdministratorAccess` policy is applied by default if this is not set. * `loggingLevel` - (Optional) Logging levels include `ERROR`, `INFO`, or `NONE`. * `snsTopicArns` - (Optional) ARNs of the SNS topics that deliver notifications to AWS Chatbot. @@ -108,4 +109,4 @@ Using `terraform import`, import Chatbot Slack Channel Configuration using the ` % terraform import aws_chatbot_slack_channel_configuration.example arn:aws:chatbot::123456789012:chat-configuration/slack-channel/min-slaka-kanal ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chatbot_teams_channel_configuration.html.markdown b/website/docs/cdktf/typescript/r/chatbot_teams_channel_configuration.html.markdown index a711c47851d9..c8432d20e366 100644 --- a/website/docs/cdktf/typescript/r/chatbot_teams_channel_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/chatbot_teams_channel_configuration.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `channelName` - (Optional) Name of the Microsoft Teams channel. * `guardrailPolicyArns` - (Optional) List of IAM policy ARNs that are applied as channel guardrails. The AWS managed `AdministratorAccess` policy is applied by default if this is not set. * `loggingLevel` - (Optional) Logging levels include `ERROR`, `INFO`, or `NONE`. @@ -112,4 +113,4 @@ Using `terraform import`, import Chatbot Microsoft Teams Channel Configuration u % terraform import aws_chatbot_teams_channel_configuration.example 5f4f15d2-b958-522a-8333-124aa8bf0925 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector.html.markdown index fcabaa2f3c19..25c0bac4b8fb 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `awsRegion` - (Optional) The AWS Region in which the Amazon Chime Voice Connector is created. Default value: `us-east-1` * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +85,4 @@ Using `terraform import`, import Configuration Recorder using the name. For exam % terraform import aws_chime_voice_connector.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector_group.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector_group.html.markdown index b9031ec22ca6..52e76107bfb8 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector_group.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector_group.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Amazon Chime Voice Connector group. * `connector` - (Optional) The Amazon Chime Voice Connectors to route inbound calls to. @@ -109,4 +110,4 @@ Using `terraform import`, import Configuration Recorder using the name. For exam % terraform import aws_chime_voice_connector_group.default example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector_logging.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector_logging.html.markdown index 893abdf2ec6c..b2a22f2240ee 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector_logging.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector_logging.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voiceConnectorId` - (Required) The Amazon Chime Voice Connector ID. * `enableSipLogs` - (Optional) When true, enables SIP message logs for sending to Amazon CloudWatch Logs. * `enableMediaMetricLogs` - (Optional) When true, enables logging of detailed media metrics for Voice Connectors to Amazon CloudWatch logs. @@ -93,4 +94,4 @@ Using `terraform import`, import Chime Voice Connector Logging using the `voiceC % terraform import aws_chime_voice_connector_logging.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector_origination.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector_origination.html.markdown index 1ccc168cdac1..f23caf440ae9 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector_origination.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector_origination.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voiceConnectorId` - (Required) The Amazon Chime Voice Connector ID. * `route` - (Required) Set of call distribution properties defined for your SIP hosts. See [route](#route) below for more details. Minimum of 1. Maximum of 20. * `disabled` - (Optional) When origination settings are disabled, inbound calls are not enabled for your Amazon Chime Voice Connector. @@ -115,4 +116,4 @@ Using `terraform import`, import Chime Voice Connector Origination using the `vo % terraform import aws_chime_voice_connector_origination.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector_streaming.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector_streaming.html.markdown index ca4dabfbf0cc..3d01fc9d489f 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector_streaming.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector_streaming.html.markdown @@ -144,6 +144,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voiceConnectorId` - (Required) The Amazon Chime Voice Connector ID. * `dataRetention` - (Required) The retention period, in hours, for the Amazon Kinesis data. * `disabled` - (Optional) When true, media streaming to Amazon Kinesis is turned off. Default: `false` @@ -193,4 +194,4 @@ Using `terraform import`, import Chime Voice Connector Streaming using the `voic % terraform import aws_chime_voice_connector_streaming.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector_termination.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector_termination.html.markdown index 007227c03370..af778402b9c9 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector_termination.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector_termination.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voiceConnectorId` - (Required) The Amazon Chime Voice Connector ID. * `cidrAllowList` - (Required) The IP addresses allowed to make calls, in CIDR format. * `callingRegions` - (Required) The countries to which calls are allowed, in ISO 3166-1 alpha-2 format. @@ -95,4 +96,4 @@ Using `terraform import`, import Chime Voice Connector Termination using the `vo % terraform import aws_chime_voice_connector_termination.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chime_voice_connector_termination_credentials.html.markdown b/website/docs/cdktf/typescript/r/chime_voice_connector_termination_credentials.html.markdown index f002a94cc78d..53637923bac0 100644 --- a/website/docs/cdktf/typescript/r/chime_voice_connector_termination_credentials.html.markdown +++ b/website/docs/cdktf/typescript/r/chime_voice_connector_termination_credentials.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `voiceConnectorId` - (Required) Amazon Chime Voice Connector ID. * `credentials` - (Required) List of termination SIP credentials. @@ -116,4 +117,4 @@ Using `terraform import`, import Chime Voice Connector Termination Credentials u % terraform import aws_chime_voice_connector_termination_credentials.default abcdef1ghij2klmno3pqr4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown b/website/docs/cdktf/typescript/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown index c92c2fc9d1cb..f45cd88d5dc4 100644 --- a/website/docs/cdktf/typescript/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown @@ -391,6 +391,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Configuration name. * `resourceAccessRoleArn` - (Required) ARN of IAM Role used by service to invoke processors and sinks specified by configuration elements. * `elements` - (Required) Collection of processors and sinks to transform media and deliver data. @@ -478,6 +479,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_chimesdkmediapipelines_media_insights_pipeline_configuration.example + identity = { + "arn" = "arn:aws:chime:us-east-1:123456789012:media-insights-pipeline-configuration/example-config" + } +} + +resource "aws_chimesdkmediapipelines_media_insights_pipeline_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Chime SDK media insights pipeline configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Chime SDK Media Pipelines Media Insights Pipeline Configuration using the `id`. For example: ```typescript @@ -508,4 +530,4 @@ Using `terraform import`, import Chime SDK Media Pipelines Media Insights Pipeli % terraform import aws_chimesdkmediapipelines_media_insights_pipeline_configuration.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chimesdkvoice_sip_media_application.html.markdown b/website/docs/cdktf/typescript/r/chimesdkvoice_sip_media_application.html.markdown index d534c8cdd411..8640822c55b4 100644 --- a/website/docs/cdktf/typescript/r/chimesdkvoice_sip_media_application.html.markdown +++ b/website/docs/cdktf/typescript/r/chimesdkvoice_sip_media_application.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `endpoints` @@ -98,4 +99,4 @@ Using `terraform import`, import a ChimeSDKVoice SIP Media Application using the % terraform import aws_chimesdkvoice_sip_media_application.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chimesdkvoice_sip_rule.html.markdown b/website/docs/cdktf/typescript/r/chimesdkvoice_sip_rule.html.markdown index b9d878d79c4e..ff8eab4c0644 100644 --- a/website/docs/cdktf/typescript/r/chimesdkvoice_sip_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/chimesdkvoice_sip_rule.html.markdown @@ -55,6 +55,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disabled` - (Optional) Enables or disables a rule. You must disable rules before you can delete them. ### `targetApplications` @@ -103,4 +104,4 @@ Using `terraform import`, import a ChimeSDKVoice SIP Rule using the `id`. For ex % terraform import aws_chimesdkvoice_sip_rule.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/chimesdkvoice_voice_profile_domain.html.markdown b/website/docs/cdktf/typescript/r/chimesdkvoice_voice_profile_domain.html.markdown index fbbeea953f27..7e739193eb3f 100644 --- a/website/docs/cdktf/typescript/r/chimesdkvoice_voice_profile_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/chimesdkvoice_voice_profile_domain.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of Voice Profile Domain. ## Attribute Reference @@ -110,4 +111,4 @@ Using `terraform import`, import AWS Chime SDK Voice Profile Domain using the `i % terraform import aws_chimesdkvoice_voice_profile_domain.example abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cleanrooms_collaboration.html.markdown b/website/docs/cdktf/typescript/r/cleanrooms_collaboration.html.markdown index 14a5352d265c..e2e0a5eca833 100644 --- a/website/docs/cdktf/typescript/r/cleanrooms_collaboration.html.markdown +++ b/website/docs/cdktf/typescript/r/cleanrooms_collaboration.html.markdown @@ -10,13 +10,11 @@ description: |- # Resource: aws_cleanrooms_collaboration -Provides a AWS Clean Rooms collaboration. All members included in the definition will be invited to -join the collaboration and can create memberships. +Provides a AWS Clean Rooms collaboration. +All members included in the definition will be invited to join the collaboration and can create memberships. ## Example Usage -### Collaboration with tags - ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -33,6 +31,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string, config: MyConfig) { super(scope, name); new CleanroomsCollaboration(this, "test_collaboration", { + analyticsEngine: "SPARK", creatorDisplayName: "Creator ", creatorMemberAbilities: ["CAN_QUERY", "CAN_RECEIVE_RESULTS"], dataEncryptionMetadata: { @@ -62,7 +61,7 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: * `name` - (Required) - The name of the collaboration. Collaboration names do not need to be unique. * `description` - (Required) - A description for a collaboration. @@ -70,6 +69,10 @@ This resource supports the following arguments: * `creatorDisplayName` - (Required - Forces new resource) - The name for the member record for the collaboration creator. * `queryLogStatus` - (Required - Forces new resource) - Determines if members of the collaboration can enable query logs within their own. emberships. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-queryLogStatus). + +The following arguments are optional: + +* `analyticsEngine` - (Optional) Analytics engine used by the collaboration. Valid values are `CLEAN_ROOMS_SQL` (deprecated) and `SPARK`. * `dataEncryptionMetadata` - (Required - Forces new resource) - a collection of settings which determine how the [c3r client](https://docs.aws.amazon.com/clean-rooms/latest/userguide/crypto-computing.html) will encrypt data for use within this collaboration. * `data_encryption_metadata.allow_clear_text` - (Required - Forces new resource) - Indicates whether encrypted tables can contain cleartext data. This is a boolea field. @@ -83,17 +86,18 @@ or cryptographically processed (false). * `member.account_id` - (Required - Forces new resource) - The account id for the invited member. * `member.display_name` - (Required - Forces new resource) - The display name for the invited member. * `member.member_abilities` - (Required - Forces new resource) - The list of abilities for the invited member. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-creatorMemberAbilities). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) - Key value pairs which tag the collaboration. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The arn of the collaboration. -* `id` - The id of the collaboration. -* `createTime` - The date and time the collaboration was created. +* `arn` - ARN of the collaboration. +* `id` - ID of the collaboration. +* `createTime` - Date and time the collaboration was created. * `member status` - For each member included in the collaboration an additional computed attribute of status is added. These values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_MemberSummary.html#API-Type-MemberSummary-status). -* `updatedTime` - The date and time the collaboration was last updated. +* `updatedTime` - Date and time the collaboration was last updated. ## Timeouts @@ -135,4 +139,4 @@ Using `terraform import`, import `aws_cleanrooms_collaboration` using the `id`. % terraform import aws_cleanrooms_collaboration.collaboration 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cleanrooms_configured_table.html.markdown b/website/docs/cdktf/typescript/r/cleanrooms_configured_table.html.markdown index 5e861e9003db..14f2c4687555 100644 --- a/website/docs/cdktf/typescript/r/cleanrooms_configured_table.html.markdown +++ b/website/docs/cdktf/typescript/r/cleanrooms_configured_table.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) - The name of the configured table. * `description` - (Optional) - A description for the configured table. * `analysisMethod` - (Required) - The analysis method for the configured table. The only valid value is currently `DIRECT_QUERY`. @@ -78,6 +79,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cleanrooms_configured_table.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_cleanrooms_configured_table" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the cleanrooms configured table. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_cleanrooms_configured_table` using the `id`. For example: ```typescript @@ -108,4 +135,4 @@ Using `terraform import`, import `aws_cleanrooms_configured_table` using the `id % terraform import aws_cleanrooms_configured_table.table 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cleanrooms_membership.html.markdown b/website/docs/cdktf/typescript/r/cleanrooms_membership.html.markdown index db5ee069d245..40e92be84900 100644 --- a/website/docs/cdktf/typescript/r/cleanrooms_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/cleanrooms_membership.html.markdown @@ -24,29 +24,29 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { CleanroomsMembership } from "./.gen/providers/aws/"; +import { CleanroomsMembership } from "./.gen/providers/aws/cleanrooms-membership"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new CleanroomsMembership(this, "test_membership", { - collaboration_id: "1234abcd-12ab-34cd-56ef-1234567890ab", - default_result_configuration: [ + collaborationId: "1234abcd-12ab-34cd-56ef-1234567890ab", + defaultResultConfiguration: [ { - output_configuration: [ + outputConfiguration: [ { s3: [ { bucket: "test-bucket", - key_prefix: "test-prefix", - result_format: "PARQUET", + keyPrefix: "test-prefix", + resultFormat: "PARQUET", }, ], }, ], - role_arn: "arn:aws:iam::123456789012:role/role-name", + roleArn: "arn:aws:iam::123456789012:role/role-name", }, ], - query_log_status: "DISABLED", + queryLogStatus: "DISABLED", tags: { Project: "Terraform", }, @@ -60,9 +60,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `collaboration_id` - (Required - Forces new resource) - The ID of the collaboration to which the member was invited. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `collaborationId` - (Required - Forces new resource) - The ID of the collaboration to which the member was invited. * `queryLogStatus` - (Required) - An indicator as to whether query logging has been enabled or disabled for the membership. -* `default_result_configuration` - (Optional) - The default configuration for a query result. +* `defaultResultConfiguration` - (Optional) - The default configuration for a query result. - `roleArn` - (Optional) - The ARN of the IAM role which will be used to create the membership. - `output_configuration.s3.bucket` - (Required) - The name of the S3 bucket where the query results will be stored. - `output_configuration.s3.result_format` - (Required) - The format of the query results. Valid values are `PARQUET` and `CSV`. @@ -74,11 +75,11 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the membership. -* `collaboration_arn` - The ARN of the joined collaboration. -* `collaboration_creator_account_id` - The account ID of the collaboration's creator. -* `collaboration_creator_display_name` - The display name of the collaboration's creator. -* `collaboration_id` - The ID of the joined collaboration. -* `collaboration_name` - The name of the joined collaboration. +* `collaborationArn` - The ARN of the joined collaboration. +* `collaborationCreatorAccountId` - The account ID of the collaboration's creator. +* `collaborationCreatorDisplayName` - The display name of the collaboration's creator. +* `collaborationId` - The ID of the joined collaboration. +* `collaborationName` - The name of the joined collaboration. * `createTime` - The date and time the membership was created. * `id` - The ID of the membership. * `memberAbilities` - The list of abilities for the invited member. @@ -98,7 +99,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { CleanroomsMembership } from "./.gen/providers/aws/"; +import { CleanroomsMembership } from "./.gen/providers/aws/cleanrooms-membership"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -118,4 +119,4 @@ Using `terraform import`, import `aws_cleanrooms_membership` using the `id`. For % terraform import aws_cleanrooms_membership.membership 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloud9_environment_ec2.html.markdown b/website/docs/cdktf/typescript/r/cloud9_environment_ec2.html.markdown index 7f89cb91ebea..4790a1c571a8 100644 --- a/website/docs/cdktf/typescript/r/cloud9_environment_ec2.html.markdown +++ b/website/docs/cdktf/typescript/r/cloud9_environment_ec2.html.markdown @@ -132,6 +132,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the environment. * `instanceType` - (Required) The type of instance to connect to the environment, e.g., `t2.micro`. * `imageId` - (Required) The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. Valid values are @@ -159,4 +160,4 @@ This resource exports the following attributes in addition to the arguments abov * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `type` - The type of the environment (e.g., `ssh` or `ec2`). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloud9_environment_membership.html.markdown b/website/docs/cdktf/typescript/r/cloud9_environment_membership.html.markdown index e3b8148440bd..70110046db47 100644 --- a/website/docs/cdktf/typescript/r/cloud9_environment_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/cloud9_environment_membership.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `environmentId` - (Required) The ID of the environment that contains the environment member you want to add. * `permissions` - (Required) The type of environment member permissions you want to associate with this environment member. Allowed values are `read-only` and `read-write` . * `userArn` - (Required) The Amazon Resource Name (ARN) of the environment member you want to add. @@ -104,4 +105,4 @@ Using `terraform import`, import Cloud9 environment membership using the `enviro % terraform import aws_cloud9_environment_membership.test environment-id#user-arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudcontrolapi_resource.html.markdown b/website/docs/cdktf/typescript/r/cloudcontrolapi_resource.html.markdown index 7d3d9ca22dc1..3c634632a198 100644 --- a/website/docs/cdktf/typescript/r/cloudcontrolapi_resource.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudcontrolapi_resource.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `roleArn` - (Optional) Amazon Resource Name (ARN) of the IAM Role to assume for operations. * `schema` - (Optional) JSON string of the CloudFormation resource type schema which is used for plan time validation where possible. Automatically fetched if not provided. In large scale environments with multiple resources using the same `typeName`, it is recommended to fetch the schema once via the [`aws_cloudformation_type` data source](/docs/providers/aws/d/cloudformation_type.html) and use this argument to reduce `DescribeType` API operation throttling. This value is marked sensitive only to prevent large plan differences from showing. * `typeVersionId` - (Optional) Identifier of the CloudFormation resource type version. @@ -64,4 +65,4 @@ This resource exports the following attributes in addition to the arguments abov * `properties` - JSON string matching the CloudFormation resource type schema with current configuration. Underlying attributes can be referenced via the [`jsondecode()` function](https://www.terraform.io/docs/language/functions/jsondecode.html), for example, `jsondecode(data.aws_cloudcontrolapi_resource.example.properties)["example"]`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudformation_stack.html.markdown b/website/docs/cdktf/typescript/r/cloudformation_stack.html.markdown index fb1f62e3dc10..31b4fb8726fa 100644 --- a/website/docs/cdktf/typescript/r/cloudformation_stack.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudformation_stack.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Stack name. * `templateBody` - (Optional) Structure containing the template body (max size: 51,200 bytes). * `templateUrl` - (Optional) Location of a file containing the template body (max size: 460,800 bytes). @@ -136,4 +137,4 @@ Using `terraform import`, import Cloudformation Stacks using the `name`. For exa % terraform import aws_cloudformation_stack.stack networking-stack ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudformation_stack_instances.html.markdown b/website/docs/cdktf/typescript/r/cloudformation_stack_instances.html.markdown index a0d036e8f120..9a83b53aae8d 100644 --- a/website/docs/cdktf/typescript/r/cloudformation_stack_instances.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudformation_stack_instances.html.markdown @@ -167,6 +167,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accounts` - (Optional) Accounts where you want to create stack instances in the specified `regions`. You can specify either `accounts` or `deploymentTargets`, but not both. * `deploymentTargets` - (Optional) AWS Organizations accounts for which to create stack instances in the `regions`. stack sets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for most of this argument. See [deployment_targets](#deployment_targets) below. * `parameterOverrides` - (Optional) Key-value map of input parameters to override from the stack set for these instances. This argument's drift detection is limited to the first account and region since each instance can have unique parameters. @@ -285,4 +286,4 @@ Using `terraform import`, Import CloudFormation stack instances that target OUs, % terraform import aws_cloudformation_stack_instances.example example,SELF,OU ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudformation_stack_set.html.markdown b/website/docs/cdktf/typescript/r/cloudformation_stack_set.html.markdown index 5b39f6eac976..918ddb535481 100644 --- a/website/docs/cdktf/typescript/r/cloudformation_stack_set.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudformation_stack_set.html.markdown @@ -140,6 +140,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `administrationRoleArn` - (Optional) Amazon Resource Number (ARN) of the IAM Role in the administrator account. This must be defined when using the `SELF_MANAGED` permission model. * `autoDeployment` - (Optional) Configuration block containing the auto-deployment model for your StackSet. This can only be defined when using the `SERVICE_MANAGED` permission model. * `enabled` - (Optional) Whether or not auto-deployment is enabled. @@ -242,4 +243,4 @@ Using `terraform import`, import CloudFormation StackSets when acting a delegate % terraform import aws_cloudformation_stack_set.example example,DELEGATED_ADMIN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown b/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown index a2ed9743a501..7b46d71ddf0d 100644 --- a/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown @@ -34,7 +34,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new CloudformationStackSetInstance(this, "example", { accountId: "123456789012", - region: "us-east-1", + stackSetInstanceRegion: "us-east-1", stackSetName: Token.asString(awsCloudformationStackSetExample.name), }); } @@ -149,7 +149,7 @@ class MyConvertedCode extends TerraformStack { ), ], }, - region: "us-east-1", + stackSetInstanceRegion: "us-east-1", stackSetName: Token.asString(awsCloudformationStackSetExample.name), }); } @@ -163,12 +163,13 @@ This resource supports the following arguments: * `stackSetName` - (Required) Name of the StackSet. * `accountId` - (Optional) Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. +* `callAs` - (Optional) Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: `SELF` (default), `DELEGATED_ADMIN`. * `deploymentTargets` - (Optional) AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. +* `operationPreferences` - (Optional) Preferences for how AWS CloudFormation performs a stack set operation. * `parameterOverrides` - (Optional) Key-value map of input parameters to override from the StackSet for this Instance. -* `region` - (Optional) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. +* `region` - (Optional, **Deprecated**) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. Use `stackSetInstanceRegion` instead. * `retainStack` - (Optional) During Terraform resource destroy, remove Instance from StackSet while keeping the Stack and its associated resources. Must be enabled in Terraform state _before_ destroy operation to take effect. You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to `false`. -* `callAs` - (Optional) Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. Valid values: `SELF` (default), `DELEGATED_ADMIN`. -* `operationPreferences` - (Optional) Preferences for how AWS CloudFormation performs a stack set operation. +* `stackSetInstanceRegion` - Target AWS Region to create a Stack based on the StackSet. Defaults to current region. ### `deploymentTargets` Argument Reference @@ -306,4 +307,4 @@ Using `terraform import`, import CloudFormation StackSet Instances when acting a % terraform import aws_cloudformation_stack_set_instance.example example,ou-sdas-123123123/ou-sdas-789789789,us-east-1,DELEGATED_ADMIN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudformation_type.html.markdown b/website/docs/cdktf/typescript/r/cloudformation_type.html.markdown index f3a6f566ee31..c94f588ab0fa 100644 --- a/website/docs/cdktf/typescript/r/cloudformation_type.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudformation_type.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `executionRoleArn` - (Optional) Amazon Resource Name (ARN) of the IAM Role for CloudFormation to assume when invoking the extension. If your extension calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the extension handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the extension handler, thereby supplying your extension with the appropriate credentials. * `loggingConfig` - (Optional) Configuration block containing logging configuration. * `schemaHandlerPackage` - (Required) URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. Must begin with `s3://` or `https://`. For example, `s3://example-bucket/example-object`. @@ -116,4 +117,4 @@ Using `terraform import`, import `aws_cloudformation_type` using the type versio % terraform import aws_cloudformation_type.example arn:aws:cloudformation:us-east-1:123456789012:type/resource/ExampleCompany-ExampleService-ExampleType/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudfront_continuous_deployment_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudfront_continuous_deployment_policy.html.markdown index 15e074312790..8dce3726d914 100644 --- a/website/docs/cdktf/typescript/r/cloudfront_continuous_deployment_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudfront_continuous_deployment_policy.html.markdown @@ -192,8 +192,8 @@ The following arguments are required: ### `sessionStickinessConfig` -* `idleTtl` - (Required) The amount of time in seconds after which sessions will cease if no requests are received. Valid values are `300` – `3600` (5–60 minutes). The value must be less than or equal to `maximumTtl`. -* `maximumTtl` - (Required) The maximum amount of time in seconds to consider requests from the viewer as being part of the same session. Valid values are `300` – `3600` (5–60 minutes). The value must be greater than or equal to `idleTtl`. +* `idleTtl` - (Required) The amount of time in seconds after which sessions will cease if no requests are received. Valid values are `300` - `3600` (5–60 minutes). The value must be less than or equal to `maximumTtl`. +* `maximumTtl` - (Required) The maximum amount of time in seconds to consider requests from the viewer as being part of the same session. Valid values are `300` - `3600` (5–60 minutes). The value must be greater than or equal to `idleTtl`. ## Attribute Reference @@ -236,4 +236,4 @@ Using `terraform import`, import CloudFront Continuous Deployment Policy using t % terraform import aws_cloudfront_continuous_deployment_policy.example abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudfront_distribution.html.markdown b/website/docs/cdktf/typescript/r/cloudfront_distribution.html.markdown index 8982d9192a8d..200fcc9a5e65 100644 --- a/website/docs/cdktf/typescript/r/cloudfront_distribution.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudfront_distribution.html.markdown @@ -25,30 +25,56 @@ The example below creates a CloudFront distribution with an S3 origin. ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformIterator, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { CloudfrontDistribution } from "./.gen/providers/aws/cloudfront-distribution"; +import { CloudfrontOriginAccessControl } from "./.gen/providers/aws/cloudfront-origin-access-control"; +import { DataAwsAcmCertificate } from "./.gen/providers/aws/data-aws-acm-certificate"; +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { DataAwsRoute53Zone } from "./.gen/providers/aws/data-aws-route53-zone"; +import { Route53Record } from "./.gen/providers/aws/route53-record"; import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; -import { S3BucketAcl } from "./.gen/providers/aws/s3-bucket-acl"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const myDomain = "mydomain.com"; const s3OriginId = "myS3Origin"; + const defaultVar = new CloudfrontOriginAccessControl(this, "default", { + name: "default-oac", + originAccessControlOriginType: "s3", + signingBehavior: "always", + signingProtocol: "sigv4", + }); const b = new S3Bucket(this, "b", { bucket: "mybucket", tags: { Name: "My bucket", }, }); - new S3BucketAcl(this, "b_acl", { - acl: "private", - bucket: b.id, - }); - new CloudfrontDistribution(this, "s3_distribution", { - aliases: ["mysite.example.com", "yoursite.example.com"], + const dataAwsAcmCertificateMyDomain = new DataAwsAcmCertificate( + this, + "my_domain", + { + domain: "*.${" + myDomain + "}", + region: "us-east-1", + statuses: ["ISSUED"], + } + ); + const dataAwsRoute53ZoneMyDomain = new DataAwsRoute53Zone( + this, + "my_domain_3", + { + name: myDomain, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsRoute53ZoneMyDomain.overrideLogicalId("my_domain"); + const s3Distribution = new CloudfrontDistribution(this, "s3_distribution", { + aliases: ["mysite.${" + myDomain + "}", "yoursite.${" + myDomain + "}"], comment: "Some comment", defaultCacheBehavior: { allowedMethods: [ @@ -76,11 +102,6 @@ class MyConvertedCode extends TerraformStack { defaultRootObject: "index.html", enabled: true, isIpv6Enabled: true, - loggingConfig: { - bucket: "mylogs.s3.amazonaws.com", - includeCookies: false, - prefix: "myprefix", - }, orderedCacheBehavior: [ { allowedMethods: ["GET", "HEAD", "OPTIONS"], @@ -136,9 +157,61 @@ class MyConvertedCode extends TerraformStack { Environment: "production", }, viewerCertificate: { - cloudfrontDefaultCertificate: true, + acmCertificateArn: Token.asString(dataAwsAcmCertificateMyDomain.arn), + sslSupportMethod: "sni-only", + }, + }); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const cloudfrontForEachIterator = TerraformIterator.fromList( + Token.asAny(s3Distribution.aliases) + ); + new Route53Record(this, "cloudfront", { + alias: { + evaluateTargetHealth: false, + name: s3Distribution.domainName, + zoneId: s3Distribution.hostedZoneId, }, + name: Token.asString(cloudfrontForEachIterator.value), + type: "A", + zoneId: Token.asString(dataAwsRoute53ZoneMyDomain.zoneId), + forEach: cloudfrontForEachIterator, }); + const originBucketPolicy = new DataAwsIamPolicyDocument( + this, + "origin_bucket_policy", + { + statement: [ + { + actions: ["s3:GetObject", "s3:PutObject"], + condition: [ + { + test: "StringEquals", + values: [s3Distribution.arn], + variable: "AWS:SourceArn", + }, + ], + effect: "Allow", + principals: [ + { + identifiers: ["cloudfront.amazonaws.com"], + type: "Service", + }, + ], + resources: ["${" + b.arn + "}/*"], + sid: "AllowCloudFrontServicePrincipalReadWrite", + }, + ], + } + ); + const awsS3BucketPolicyB = new S3BucketPolicy(this, "b_7", { + bucket: b.bucket, + policy: Token.asString(originBucketPolicy.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyB.overrideLogicalId("b"); } } @@ -229,12 +302,8 @@ import { TerraformStack } from "cdktf"; * See https://cdk.tf/provider-generation for more details. */ import { CloudfrontDistribution } from "./.gen/providers/aws/cloudfront-distribution"; -interface MyConfig { - cachedMethods: any; - viewerProtocolPolicy: any; -} class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string, config: MyConfig) { + constructor(scope: Construct, name: string) { super(scope, name); const s3OriginId = "myS3Origin"; new CloudfrontDistribution(this, "s3_distribution", { @@ -242,9 +311,9 @@ class MyConvertedCode extends TerraformStack { defaultCacheBehavior: { allowedMethods: ["GET", "HEAD", "OPTIONS"], cachePolicyId: "4135ea2d-6df8-44a3-9df3-4b5a84be39ad", + cachedMethods: ["GET", "HEAD"], targetOriginId: s3OriginId, - cachedMethods: config.cachedMethods, - viewerProtocolPolicy: config.viewerProtocolPolicy, + viewerProtocolPolicy: "allow-all", }, defaultRootObject: "index.html", enabled: true, @@ -289,7 +358,6 @@ import { CloudfrontDistribution } from "./.gen/providers/aws/cloudfront-distribu import { CloudwatchLogDelivery } from "./.gen/providers/aws/cloudwatch-log-delivery"; import { CloudwatchLogDeliveryDestination } from "./.gen/providers/aws/cloudwatch-log-delivery-destination"; import { CloudwatchLogDeliverySource } from "./.gen/providers/aws/cloudwatch-log-delivery-source"; -import { AwsProvider } from "./.gen/providers/aws/provider"; import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; interface MyConfig { defaultCacheBehavior: any; @@ -301,15 +369,7 @@ interface MyConfig { class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string, config: MyConfig) { super(scope, name); - new AwsProvider(this, "aws", { - region: region.stringValue, - }); - const usEast1 = new AwsProvider(this, "aws_1", { - alias: "us_east_1", - region: "us-east-1", - }); const example = new CloudfrontDistribution(this, "example", { - provider: usEast1, defaultCacheBehavior: config.defaultCacheBehavior, enabled: config.enabled, origin: config.origin, @@ -317,22 +377,22 @@ class MyConvertedCode extends TerraformStack { viewerCertificate: config.viewerCertificate, }); const awsCloudwatchLogDeliverySourceExample = - new CloudwatchLogDeliverySource(this, "example_3", { + new CloudwatchLogDeliverySource(this, "example_1", { logType: "ACCESS_LOGS", name: "example", - provider: usEast1, + region: "us-east-1", resourceArn: example.arn, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsCloudwatchLogDeliverySourceExample.overrideLogicalId("example"); - const awsS3BucketExample = new S3Bucket(this, "example_4", { + const awsS3BucketExample = new S3Bucket(this, "example_2", { bucket: "testbucket", forceDestroy: true, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsS3BucketExample.overrideLogicalId("example"); const awsCloudwatchLogDeliveryDestinationExample = - new CloudwatchLogDeliveryDestination(this, "example_5", { + new CloudwatchLogDeliveryDestination(this, "example_3", { deliveryDestinationConfiguration: [ { destinationResourceArn: "${" + awsS3BucketExample.arn + "}/prefix", @@ -340,13 +400,13 @@ class MyConvertedCode extends TerraformStack { ], name: "s3-destination", outputFormat: "parquet", - provider: usEast1, + region: "us-east-1", }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsCloudwatchLogDeliveryDestinationExample.overrideLogicalId("example"); const awsCloudwatchLogDeliveryExample = new CloudwatchLogDelivery( this, - "example_6", + "example_4", { deliveryDestinationArn: Token.asString( awsCloudwatchLogDeliveryDestinationExample.arn @@ -354,7 +414,7 @@ class MyConvertedCode extends TerraformStack { deliverySourceName: Token.asString( awsCloudwatchLogDeliverySourceExample.name ), - provider: usEast1, + region: "us-east-1", s3DeliveryConfiguration: [ { suffixPath: "/123456678910/{DistributionId}/{yyyy}/{MM}/{dd}/{HH}", @@ -369,11 +429,102 @@ class MyConvertedCode extends TerraformStack { ``` +### With V2 logging to Data Firehose + +The example below creates a CloudFront distribution with [standard logging V2 to Data Firehose](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/standard-logging.html#enable-access-logging-api). + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudfrontDistribution } from "./.gen/providers/aws/cloudfront-distribution"; +import { CloudwatchLogDelivery } from "./.gen/providers/aws/cloudwatch-log-delivery"; +import { CloudwatchLogDeliveryDestination } from "./.gen/providers/aws/cloudwatch-log-delivery-destination"; +import { CloudwatchLogDeliverySource } from "./.gen/providers/aws/cloudwatch-log-delivery-source"; +import { KinesisFirehoseDeliveryStream } from "./.gen/providers/aws/kinesis-firehose-delivery-stream"; +interface MyConfig { + defaultCacheBehavior: any; + enabled: any; + origin: any; + restrictions: any; + viewerCertificate: any; + destination: any; + name: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + const example = new CloudfrontDistribution(this, "example", { + defaultCacheBehavior: config.defaultCacheBehavior, + enabled: config.enabled, + origin: config.origin, + restrictions: config.restrictions, + viewerCertificate: config.viewerCertificate, + }); + const awsCloudwatchLogDeliverySourceExample = + new CloudwatchLogDeliverySource(this, "example_1", { + logType: "ACCESS_LOGS", + name: "cloudfront-logs-source", + region: "us-east-1", + resourceArn: example.arn, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchLogDeliverySourceExample.overrideLogicalId("example"); + const cloudfrontLogs = new KinesisFirehoseDeliveryStream( + this, + "cloudfront_logs", + { + region: "us-east-1", + tags: { + LogDeliveryEnabled: "true", + }, + destination: config.destination, + name: config.name, + } + ); + const awsCloudwatchLogDeliveryDestinationExample = + new CloudwatchLogDeliveryDestination(this, "example_3", { + deliveryDestinationConfiguration: [ + { + destinationResourceArn: cloudfrontLogs.arn, + }, + ], + name: "firehose-destination", + outputFormat: "json", + region: "us-east-1", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchLogDeliveryDestinationExample.overrideLogicalId("example"); + const awsCloudwatchLogDeliveryExample = new CloudwatchLogDelivery( + this, + "example_4", + { + deliveryDestinationArn: Token.asString( + awsCloudwatchLogDeliveryDestinationExample.arn + ), + deliverySourceName: Token.asString( + awsCloudwatchLogDeliverySourceExample.name + ), + region: "us-east-1", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchLogDeliveryExample.overrideLogicalId("example"); + } +} + +``` + ## Argument Reference This resource supports the following arguments: * `aliases` (Optional) - Extra CNAMEs (alternate domain names), if any, for this distribution. +* `anycastIpListId` (Optional) - ID of the Anycast static IP list that is associated with the distribution. * `comment` (Optional) - Any comments you want to include about the distribution. * `continuousDeploymentPolicyId` (Optional) - Identifier of a continuous deployment policy. This argument should only be set on a production distribution. See the [`aws_cloudfront_continuous_deployment_policy` resource](./cloudfront_continuous_deployment_policy.html.markdown) for additional details. * `customErrorResponse` (Optional) - One or more [custom error response](#custom-error-response-arguments) elements (multiples allowed). @@ -565,6 +716,8 @@ class MyConvertedCode extends TerraformStack { #### Custom Error Response Arguments +~> **NOTE:** When specifying either `responsePagePath` or `responseCode`, **both** must be set. + * `errorCachingMinTtl` (Optional) - Minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. * `errorCode` (Required) - 4xx or 5xx HTTP status code that you want to customize. * `responseCode` (Optional) - HTTP status code that you want CloudFront to return with the custom error page to the viewer. @@ -593,13 +746,15 @@ argument should not be specified. * `originId` (Required) - Unique identifier for the origin. * `originPath` (Optional) - Optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. * `originShield` - (Optional) [CloudFront Origin Shield](#origin-shield-arguments) configuration information. Using Origin Shield can help reduce the load on your origin. For more information, see [Using Origin Shield](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/origin-shield.html) in the Amazon CloudFront Developer Guide. +* `responseCompletionTimeout` - (Optional) Time (in seconds) that a request from CloudFront to the origin can stay open and wait for a response. Must be integer greater than or equal to the value of `originReadTimeout`. If omitted or explicitly set to `0`, no maximum value is enforced. * `s3OriginConfig` - (Optional) [CloudFront S3 origin](#s3-origin-config-arguments) configuration information. If a custom origin is required, use `customOriginConfig` instead. -* `vpcOriginConfig` - (Optional) The VPC origin configuration. +* `vpcOriginConfig` - (Optional) The [VPC origin configuration](#vpc-origin-config-arguments). ##### Custom Origin Config Arguments * `httpPort` (Required) - HTTP port the custom origin listens on. * `httpsPort` (Required) - HTTPS port the custom origin listens on. +* `ipAddressType` (Optional) - IP protocol CloudFront uses when connecting to your origin. Valid values: `ipv4`, `ipv6`, `dualstack`. * `originProtocolPolicy` (Required) - Origin protocol policy to apply to your origin. One of `http-only`, `https-only`, or `match-viewer`. * `originSslProtocols` (Required) - List of SSL/TLS protocols that CloudFront can use when connecting to your origin over HTTPS. Valid values: `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. For more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the Amazon CloudFront Developer Guide. * `originKeepaliveTimeout` - (Optional) The Custom KeepAlive timeout, in seconds. By default, AWS enforces an upper limit of `60`. But you can request an [increase](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-request-timeout). Defaults to `5`. @@ -716,4 +871,4 @@ Using `terraform import`, import CloudFront Distributions using the `id`. For ex % terraform import aws_cloudfront_distribution.distribution E74FTE3EXAMPLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudfront_function.html.markdown b/website/docs/cdktf/typescript/r/cloudfront_function.html.markdown index 979e1a52db00..e96ce8a680bb 100644 --- a/website/docs/cdktf/typescript/r/cloudfront_function.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudfront_function.html.markdown @@ -56,7 +56,7 @@ The following arguments are optional: * `comment` - (Optional) Comment. * `publish` - (Optional) Whether to publish creation/change as Live CloudFront Function Version. Defaults to `true`. -* `keyValueStoreAssociations` - (Optional) List of `aws_cloudfront_key_value_store` ARNs to be associated to the function. AWS limits associations to on key value store per function. +* `keyValueStoreAssociations` - (Optional) List of `aws_cloudfront_key_value_store` ARNs to be associated to the function. AWS limits associations to one key value store per function. ## Attribute Reference @@ -99,4 +99,4 @@ Using `terraform import`, import CloudFront Functions using the `name`. For exam % terraform import aws_cloudfront_function.test my_test_function ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudfront_key_value_store.html.markdown b/website/docs/cdktf/typescript/r/cloudfront_key_value_store.html.markdown index 1d48e1c75517..23d85924ae97 100644 --- a/website/docs/cdktf/typescript/r/cloudfront_key_value_store.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudfront_key_value_store.html.markdown @@ -52,8 +52,8 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) identifying your CloudFront KeyValueStore. -* `id` - A unique identifier for the KeyValueStore. Same as `name`. * `etag` - ETag hash of the KeyValueStore. +* `id` - A unique identifier for the KeyValueStore. ## Timeouts @@ -63,6 +63,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfront_key_value_store.example + identity = { + name = "example_store" + } +} + +resource "aws_cloudfront_key_value_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the CloudFront Key Value Store. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront Key Value Store using the `name`. For example: ```typescript @@ -93,4 +118,4 @@ Using `terraform import`, import CloudFront Key Value Store using the `name`. Fo % terraform import aws_cloudfront_key_value_store.example example_store ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudfront_realtime_log_config.html.markdown b/website/docs/cdktf/typescript/r/cloudfront_realtime_log_config.html.markdown index 606bd1bc16e9..2c1211ef7ffd 100644 --- a/website/docs/cdktf/typescript/r/cloudfront_realtime_log_config.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudfront_realtime_log_config.html.markdown @@ -120,6 +120,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfront_realtime_log_config.example + identity = { + "arn" = "arn:aws:cloudfront::123456789012:realtime-log-config/ExampleNameForRealtimeLogConfig" + } +} + +resource "aws_cloudfront_realtime_log_config" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CloudFront real-time log configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront real-time log configurations using the ARN. For example: ```typescript @@ -150,4 +171,4 @@ Using `terraform import`, import CloudFront real-time log configurations using t % terraform import aws_cloudfront_realtime_log_config.example arn:aws:cloudfront::111122223333:realtime-log-config/ExampleNameForRealtimeLogConfig ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudfrontkeyvaluestore_key.html.markdown b/website/docs/cdktf/typescript/r/cloudfrontkeyvaluestore_key.html.markdown index eb30a9dc2bbb..73f8fc60d165 100644 --- a/website/docs/cdktf/typescript/r/cloudfrontkeyvaluestore_key.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudfrontkeyvaluestore_key.html.markdown @@ -68,7 +68,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront KeyValueStore Key using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfrontkeyvaluestore_key.example + identity = { + key_value_store_arn = "arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c" + key = "someKey" + } +} + +resource "aws_cloudfrontkeyvaluestore_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `keyValueStoreArn` (String) ARN of the CloudFront Key Value Store. +* `key` (String) Key name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront KeyValueStore Key using the `keyValueStoreArn` and 'key' separated by `,`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -92,10 +119,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import CloudFront KeyValueStore Key using the `id`. For example: +Using `terraform import`, import CloudFront KeyValueStore Key using the `keyValueStoreArn` and 'key' separated by `,`. For example: ```console % terraform import aws_cloudfrontkeyvaluestore_key.example arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c,someKey ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudhsm_v2_cluster.html.markdown b/website/docs/cdktf/typescript/r/cloudhsm_v2_cluster.html.markdown index 0e790ef9defb..194c360baa54 100644 --- a/website/docs/cdktf/typescript/r/cloudhsm_v2_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudhsm_v2_cluster.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceBackupIdentifier` - (Optional) ID of Cloud HSM v2 cluster backup to be restored. * `hsmType` - (Required) The type of HSM module in the cluster. Currently, `hsm1.medium` and `hsm2m.medium` are supported. * `subnetIds` - (Required) The IDs of subnets in which cluster will operate. @@ -146,4 +147,4 @@ Using `terraform import`, import CloudHSM v2 Clusters using the cluster `id`. Fo % terraform import aws_cloudhsm_v2_cluster.test_cluster cluster-aeb282a201 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudhsm_v2_hsm.html.markdown b/website/docs/cdktf/typescript/r/cloudhsm_v2_hsm.html.markdown index 3e4a2307e77d..9ae1a03fb919 100644 --- a/website/docs/cdktf/typescript/r/cloudhsm_v2_hsm.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudhsm_v2_hsm.html.markdown @@ -45,13 +45,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -~> **NOTE:** Either `subnetId` or `availabilityZone` must be specified. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterId` - (Required) The ID of Cloud HSM v2 cluster to which HSM will be added. * `subnetId` - (Optional) The ID of subnet in which HSM module will be located. Conflicts with `availabilityZone`. * `availabilityZone` - (Optional) The IDs of AZ in which HSM module will be located. Conflicts with `subnetId`. * `ipAddress` - (Optional) The IP address of HSM module. Must be within the CIDR of selected subnet. +~> **NOTE:** Either `subnetId` or `availabilityZone` must be specified. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -92,4 +93,4 @@ Using `terraform import`, import HSM modules using their HSM ID. For example: % terraform import aws_cloudhsm_v2_hsm.bar hsm-quo8dahtaca ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudsearch_domain.html.markdown b/website/docs/cdktf/typescript/r/cloudsearch_domain.html.markdown index 9603f7e0bcc0..18d7c029c65c 100644 --- a/website/docs/cdktf/typescript/r/cloudsearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudsearch_domain.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpointOptions` - (Optional) Domain endpoint options. Documented below. * `indexField` - (Optional) The index fields for documents added to the domain. Documented below. * `multiAz` - (Optional) Whether or not to maintain extra instances for the domain in a second Availability Zone to ensure high availability. @@ -148,4 +149,4 @@ Using `terraform import`, import CloudSearch Domains using the `name`. For examp % terraform import aws_cloudsearch_domain.example example-domain ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudsearch_domain_service_access_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudsearch_domain_service_access_policy.html.markdown index e69cd8347138..1525df0cac99 100644 --- a/website/docs/cdktf/typescript/r/cloudsearch_domain_service_access_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudsearch_domain_service_access_policy.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPolicy` - (Required) The access rules you want to configure. These rules replace any existing rules. See the [AWS documentation](https://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-access.html) for details. * `domainName` - (Required) The CloudSearch domain name the policy applies to. @@ -123,4 +124,4 @@ Using `terraform import`, import CloudSearch domain service access policies usin % terraform import aws_cloudsearch_domain_service_access_policy.example example-domain ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudtrail.html.markdown b/website/docs/cdktf/typescript/r/cloudtrail.html.markdown index f90e6afa1997..c224bad7a73c 100644 --- a/website/docs/cdktf/typescript/r/cloudtrail.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudtrail.html.markdown @@ -66,7 +66,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:cloudtrail:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:trail/example", @@ -98,7 +98,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:cloudtrail:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:trail/example", @@ -483,6 +483,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `advancedEventSelector` - (Optional) Specifies an advanced event selector for enabling data event logging. Fields documented below. Conflicts with `eventSelector`. * `cloudWatchLogsGroupArn` - (Optional) Log group name using an ARN that represents the log group to which CloudTrail logs will be delivered. Note that CloudTrail requires the Log Stream wildcard. * `cloudWatchLogsRoleArn` - (Optional) Role for the CloudWatch Logs endpoint to assume to write to a user’s log group. @@ -571,4 +572,4 @@ Using `terraform import`, import Cloudtrails using the `arn`. For example: % terraform import aws_cloudtrail.sample arn:aws:cloudtrail:us-east-1:123456789012:trail/my-sample-trail ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown b/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown index 897995d9ea0c..98a572cf104e 100644 --- a/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown @@ -104,6 +104,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `name` - (Required) The name of the event data store. - `billingMode` - (Optional) The billing mode for the event data store. The valid values are `EXTENDABLE_RETENTION_PRICING` and `FIXED_RETENTION_PRICING`. Defaults to `EXTENDABLE_RETENTION_PRICING`. - `suspend` - (Optional) Specifies whether to stop ingesting new events into the event data store. If set to `true`, ingestion is suspended while maintaining the ability to query existing events. If set to `false`, ingestion is active. @@ -144,6 +145,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudtrail_event_data_store.example + identity = { + "arn" = "arn:aws:cloudtrail:us-east-1:123456789012:eventdatastore/example-event-data-store-id" + } +} + +resource "aws_cloudtrail_event_data_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CloudTrail event data store. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import event data stores using their `arn`. For example: ```typescript @@ -174,4 +196,4 @@ Using `terraform import`, import event data stores using their `arn`. For exampl % terraform import aws_cloudtrail_event_data_store.example arn:aws:cloudtrail:us-east-1:123456789123:eventdatastore/22333815-4414-412c-b155-dd254033gfhf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_composite_alarm.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_composite_alarm.html.markdown index 74ee3ba72ebb..f6083697a622 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_composite_alarm.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_composite_alarm.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actionsEnabled` - (Optional, Forces new resource) Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to `true`. * `actionsSuppressor` - (Optional) Actions will be suppressed if the suppressor alarm is in the ALARM state. * `alarm` - (Required) Can be an AlarmName or an Amazon Resource Name (ARN) from an existing alarm. @@ -103,4 +104,4 @@ Using `terraform import`, import a CloudWatch Composite Alarm using the `alarmNa % terraform import aws_cloudwatch_composite_alarm.test my-alarm ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_contributor_insight_rule.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_contributor_insight_rule.html.markdown index daf57063e0f6..411ff096a188 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_contributor_insight_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_contributor_insight_rule.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ruleState` - (Optional) State of the rule. Valid values are `ENABLED` and `DISABLED`. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import CloudWatch Contributor Insight Rule using the ` % terraform import aws_cloudwatch_contributor_insight_rule.example contributor_insight_rule-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_contributor_managed_insight_rule.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_contributor_managed_insight_rule.html.markdown index b56363cb4e00..6fc544740ecb 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_contributor_managed_insight_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_contributor_managed_insight_rule.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ruleState` - (Optional) State of the rule. Valid values are `ENABLED` and `DISABLED`. ## Attribute Reference @@ -87,4 +88,4 @@ Using `terraform import`, import CloudWatch Contributor Managed Insight Rule usi % terraform import aws_cloudwatch_contributor_managed_insight_rule.example contributor_managed_insight_rule-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_dashboard.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_dashboard.html.markdown index ddb01e636e8f..7466b02711ad 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_dashboard.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_dashboard.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dashboardName` - (Required) The name of the dashboard. * `dashboardBody` - (Required) The detailed information about the dashboard, including what widgets are included and their location on the dashboard. You can read more about the body structure in the [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html). @@ -111,4 +112,4 @@ Using `terraform import`, import CloudWatch dashboards using the `dashboardName` % terraform import aws_cloudwatch_dashboard.sample dashboard_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_api_destination.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_api_destination.html.markdown index 005c3434cb5c..2d7e592c00e1 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_api_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_api_destination.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the new API Destination. The name must be unique for your account. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_. * `description` - (Optional) The description of the new API Destination. Maximum of 512 characters. * `invocationEndpoint` - (Required) URL endpoint to invoke as a target. This could be a valid endpoint generated by a partner service. You can include "*" as path parameters wildcards to be set from the Target HttpParameters. @@ -90,4 +91,4 @@ Using `terraform import`, import EventBridge API Destinations using the `name`. % terraform import aws_cloudwatch_event_api_destination.test api-destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_archive.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_archive.html.markdown index dcd340094f28..c0ec018115b8 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_archive.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_archive.html.markdown @@ -47,7 +47,7 @@ class MyConvertedCode extends TerraformStack { ``` -## Example all optional arguments +## Example Usage Optional Arguments ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -87,21 +87,117 @@ class MyConvertedCode extends TerraformStack { ``` +## Example Usage CMK Encryption + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchEventArchive } from "./.gen/providers/aws/cloudwatch-event-archive"; +import { CloudwatchEventBus } from "./.gen/providers/aws/cloudwatch-event-bus"; +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; +import { KmsKey } from "./.gen/providers/aws/kms-key"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CloudwatchEventBus(this, "example", { + name: "example", + }); + const current = new DataAwsCallerIdentity(this, "current", {}); + const dataAwsPartitionCurrent = new DataAwsPartition(this, "current_2", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsPartitionCurrent.overrideLogicalId("current"); + const awsKmsKeyExample = new KmsKey(this, "example_3", { + deletionWindowInDays: 7, + policy: Token.asString( + Fn.jsonencode({ + Id: "key-policy-example", + Statement: [ + { + Action: "kms:*", + Effect: "Allow", + Principal: { + AWS: + "arn:${" + + dataAwsPartitionCurrent.partition + + "}:iam::${" + + current.accountId + + "}:root", + }, + Resource: "*", + Sid: "Enable IAM User Permissions", + }, + { + Action: ["kms:DescribeKey"], + Effect: "Allow", + Principal: { + Service: "events.amazonaws.com", + }, + Resource: "*", + Sid: "Allow describing of the key", + }, + { + Action: ["kms:GenerateDataKey", "kms:Decrypt", "kms:ReEncrypt*"], + Condition: { + StringEquals: { + "kms:EncryptionContext:aws:events:event-bus:arn": example.arn, + }, + }, + Effect: "Allow", + Principal: { + Service: "events.amazonaws.com", + }, + Resource: "*", + Sid: "Allow use of the key", + }, + ], + Version: "2012-10-17", + }) + ), + tags: { + EventBridgeApiDestinations: "true", + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsKmsKeyExample.overrideLogicalId("example"); + const awsCloudwatchEventArchiveExample = new CloudwatchEventArchive( + this, + "example_4", + { + eventSourceArn: example.arn, + kmsKeyIdentifier: Token.asString(awsKmsKeyExample.id), + name: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchEventArchiveExample.overrideLogicalId("example"); + } +} + +``` + ## Argument Reference This resource supports the following arguments: -* `name` - (Required) The name of the new event archive. The archive name cannot exceed 48 characters. -* `eventSourceArn` - (Required) Event bus source ARN from where these events should be archived. -* `description` - (Optional) The description of the new event archive. -* `eventPattern` - (Optional) Instructs the new event archive to only capture events matched by this pattern. By default, it attempts to archive every event received in the `eventSourceArn`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the archive. The archive name cannot exceed 48 characters. +* `eventSourceArn` - (Required) ARN of the event bus associated with the archive. Only events from this event bus are sent to the archive. +* `description` - (Optional) Description for the archive. +* `eventPattern` - (Optional) Event pattern to use to filter events sent to the archive. By default, it attempts to archive every event received in the `eventSourceArn`. +* `kmsKeyIdentifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt this archive. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. * `retentionDays` - (Optional) The maximum number of days to retain events in the new event archive. By default, it archives indefinitely. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the event archive. +* `arn` - ARN of the archive. ## Import @@ -135,4 +231,4 @@ Using `terraform import`, import an EventBridge archive using the `name`. For ex % terraform import aws_cloudwatch_event_archive.imported_event_archive order-archive ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_bus.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_bus.html.markdown index e7ed13c592b4..a6bdb5f9b11b 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_bus.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_bus.html.markdown @@ -16,6 +16,8 @@ Provides an EventBridge event bus resource. ## Example Usage +### Basic Usages + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -72,21 +74,299 @@ class MyConvertedCode extends TerraformStack { ``` +### Logging to CloudWatch Logs, S3, and Data Firehose + +See [Configuring logs for Amazon EventBridge event buses](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus-logs.html) for more details. + +#### Required Resources + +* EventBridge Event Bus with `logConfig` configured +* Log destinations: + + * CloudWatch Logs log group + * S3 bucket + * Data Firehose delivery stream + +* Resource-based policy or tagging for the service-linked role: + + * CloudWatch Logs log group - `aws_cloudwatch_log_resource_policy` to allow `delivery.logs.amazonaws.com` to put logs into the log group + * S3 bucket - `aws_s3_bucket_policy` to allow `delivery.logs.amazonaws.com` to put logs into the bucket + * Data Firehose delivery stream - tagging the delivery stream with `LogDeliveryEnabled = "true"` to allow the service-linked role `AWSServiceRoleForLogDelivery` to deliver logs + +* CloudWatch Logs Delivery: + + * `aws_cloudwatch_log_delivery_source` for each log type (INFO, ERROR, TRACE) + * `aws_cloudwatch_log_delivery_destination` for the log destination (S3 bucket, CloudWatch Logs log group, or Data Firehose delivery stream) + * `aws_cloudwatch_log_delivery` to link each log type’s delivery source to the delivery destination + +#### Example Usage + +The following example demonstrates how to set up logging for an EventBridge event bus to all three destinations: CloudWatch Logs, S3, and Data Firehose. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchEventBus } from "./.gen/providers/aws/cloudwatch-event-bus"; +import { CloudwatchLogDelivery } from "./.gen/providers/aws/cloudwatch-log-delivery"; +import { CloudwatchLogDeliveryDestination } from "./.gen/providers/aws/cloudwatch-log-delivery-destination"; +import { CloudwatchLogDeliverySource } from "./.gen/providers/aws/cloudwatch-log-delivery-source"; +import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { CloudwatchLogResourcePolicy } from "./.gen/providers/aws/cloudwatch-log-resource-policy"; +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { KinesisFirehoseDeliveryStream } from "./.gen/providers/aws/kinesis-firehose-delivery-stream"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; +interface MyConfig { + destination: any; + name: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + const example = new CloudwatchEventBus(this, "example", { + logConfig: { + includeDetail: "FULL", + level: "TRACE", + }, + name: "example-event-bus", + }); + const errorLogs = new CloudwatchLogDeliverySource(this, "error_logs", { + logType: "ERROR_LOGS", + name: "EventBusSource-${" + example.name + "}-ERROR_LOGS", + resourceArn: example.arn, + }); + const infoLogs = new CloudwatchLogDeliverySource(this, "info_logs", { + logType: "INFO_LOGS", + name: "EventBusSource-${" + example.name + "}-INFO_LOGS", + resourceArn: example.arn, + }); + const traceLogs = new CloudwatchLogDeliverySource(this, "trace_logs", { + logType: "TRACE_LOGS", + name: "EventBusSource-${" + example.name + "}-TRACE_LOGS", + resourceArn: example.arn, + }); + const eventBusLogs = new CloudwatchLogGroup(this, "event_bus_logs", { + name: "/aws/vendedlogs/events/event-bus/${" + example.name + "}", + }); + const cloudfrontLogs = new KinesisFirehoseDeliveryStream( + this, + "cloudfront_logs", + { + tags: { + LogDeliveryEnabled: "true", + }, + destination: config.destination, + name: config.name, + } + ); + const awsS3BucketExample = new S3Bucket(this, "example_6", { + bucket: "example-event-bus-logs", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketExample.overrideLogicalId("example"); + const current = new DataAwsCallerIdentity(this, "current", {}); + const bucket = new DataAwsIamPolicyDocument(this, "bucket", { + statement: [ + { + actions: ["s3:PutObject"], + condition: [ + { + test: "StringEquals", + values: ["bucket-owner-full-control"], + variable: "s3:x-amz-acl", + }, + { + test: "StringEquals", + values: [Token.asString(current.accountId)], + variable: "aws:SourceAccount", + }, + { + test: "ArnLike", + values: [infoLogs.arn, errorLogs.arn, traceLogs.arn], + variable: "aws:SourceArn", + }, + ], + effect: "Allow", + principals: [ + { + identifiers: ["delivery.logs.amazonaws.com"], + type: "Service", + }, + ], + resources: [ + "${" + + awsS3BucketExample.arn + + "}/AWSLogs/${" + + current.accountId + + "}/EventBusLogs/*", + ], + }, + ], + }); + const cwlogs = new DataAwsIamPolicyDocument(this, "cwlogs", { + statement: [ + { + actions: ["logs:CreateLogStream", "logs:PutLogEvents"], + condition: [ + { + test: "StringEquals", + values: [Token.asString(current.accountId)], + variable: "aws:SourceAccount", + }, + { + test: "ArnLike", + values: [infoLogs.arn, errorLogs.arn, traceLogs.arn], + variable: "aws:SourceArn", + }, + ], + effect: "Allow", + principals: [ + { + identifiers: ["delivery.logs.amazonaws.com"], + type: "Service", + }, + ], + resources: ["${" + eventBusLogs.arn + "}:log-stream:*"], + }, + ], + }); + const awsCloudwatchLogDeliveryDestinationCwlogs = + new CloudwatchLogDeliveryDestination(this, "cwlogs_10", { + deliveryDestinationConfiguration: [ + { + destinationResourceArn: eventBusLogs.arn, + }, + ], + name: "EventsDeliveryDestination-${" + example.name + "}-CWLogs", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchLogDeliveryDestinationCwlogs.overrideLogicalId("cwlogs"); + const firehose = new CloudwatchLogDeliveryDestination(this, "firehose", { + deliveryDestinationConfiguration: [ + { + destinationResourceArn: cloudfrontLogs.arn, + }, + ], + name: "EventsDeliveryDestination-${" + example.name + "}-Firehose", + }); + const s3 = new CloudwatchLogDeliveryDestination(this, "s3", { + deliveryDestinationConfiguration: [ + { + destinationResourceArn: Token.asString(awsS3BucketExample.arn), + }, + ], + name: "EventsDeliveryDestination-${" + example.name + "}-S3", + }); + const awsCloudwatchLogResourcePolicyExample = + new CloudwatchLogResourcePolicy(this, "example_13", { + policyDocument: Token.asString(cwlogs.json), + policyName: "AWSLogDeliveryWrite-${" + example.name + "}", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchLogResourcePolicyExample.overrideLogicalId("example"); + const awsS3BucketPolicyExample = new S3BucketPolicy(this, "example_14", { + bucket: Token.asString(awsS3BucketExample.bucket), + policy: Token.asString(bucket.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyExample.overrideLogicalId("example"); + const s3InfoLogs = new CloudwatchLogDelivery(this, "s3_info_logs", { + deliveryDestinationArn: s3.arn, + deliverySourceName: infoLogs.name, + }); + const cwlogsInfoLogs = new CloudwatchLogDelivery(this, "cwlogs_info_logs", { + deliveryDestinationArn: Token.asString( + awsCloudwatchLogDeliveryDestinationCwlogs.arn + ), + deliverySourceName: infoLogs.name, + dependsOn: [s3InfoLogs], + }); + const firehoseInfoLogs = new CloudwatchLogDelivery( + this, + "firehose_info_logs", + { + deliveryDestinationArn: firehose.arn, + deliverySourceName: infoLogs.name, + dependsOn: [cwlogsInfoLogs], + } + ); + const s3ErrorLogs = new CloudwatchLogDelivery(this, "s3_error_logs", { + deliveryDestinationArn: s3.arn, + deliverySourceName: errorLogs.name, + dependsOn: [s3InfoLogs], + }); + const s3TraceLogs = new CloudwatchLogDelivery(this, "s3_trace_logs", { + deliveryDestinationArn: s3.arn, + deliverySourceName: traceLogs.name, + dependsOn: [s3ErrorLogs], + }); + const cwlogsErrorLogs = new CloudwatchLogDelivery( + this, + "cwlogs_error_logs", + { + deliveryDestinationArn: Token.asString( + awsCloudwatchLogDeliveryDestinationCwlogs.arn + ), + deliverySourceName: errorLogs.name, + dependsOn: [s3ErrorLogs, cwlogsInfoLogs], + } + ); + const cwlogsTraceLogs = new CloudwatchLogDelivery( + this, + "cwlogs_trace_logs", + { + deliveryDestinationArn: Token.asString( + awsCloudwatchLogDeliveryDestinationCwlogs.arn + ), + deliverySourceName: traceLogs.name, + dependsOn: [s3TraceLogs, cwlogsErrorLogs], + } + ); + const firehoseErrorLogs = new CloudwatchLogDelivery( + this, + "firehose_error_logs", + { + deliveryDestinationArn: firehose.arn, + deliverySourceName: errorLogs.name, + dependsOn: [cwlogsErrorLogs, firehoseInfoLogs], + } + ); + new CloudwatchLogDelivery(this, "firehose_trace_logs", { + deliveryDestinationArn: firehose.arn, + deliverySourceName: traceLogs.name, + dependsOn: [cwlogsTraceLogs, firehoseErrorLogs], + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). The following arguments are required: * `name` - (Required) Name of the new event bus. The names of custom event buses can't contain the / character. To create a partner event bus, ensure that the `name` matches the `eventSourceName`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deadLetterConfig` - (Optional) Configuration details of the Amazon SQS queue for EventBridge to use as a dead-letter queue (DLQ). This block supports the following arguments: * `arn` - (Optional) The ARN of the SQS queue specified as the target for the dead-letter queue. * `description` - (Optional) Event bus description. * `eventSourceName` - (Optional) Partner event source that the new event bus will be matched with. Must match `name`. * `kmsKeyIdentifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. +* `logConfig` - (Optional) Block for logging configuration settings for the event bus. + * `includeDetail` - (Optional) Whether EventBridge include detailed event information in the records it generates. Valid values are `NONE` and `FULL`. + * `level` - (Optional) Level of logging detail to include. Valid values are `OFF`, `ERROR`, `INFO`, and `TRACE`. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -129,4 +409,4 @@ Using `terraform import`, import EventBridge event buses using the name of the e % terraform import aws_cloudwatch_event_bus.messenger chat-messages ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_bus_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_bus_policy.html.markdown index 2e49a1ed9ad0..9947790d8c03 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_bus_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_bus_policy.html.markdown @@ -206,6 +206,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The text of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `eventBusName` - (Optional) The name of the event bus to set the permissions on. If you omit this, the permissions are set on the `default` event bus. @@ -248,4 +249,4 @@ Using `terraform import`, import an EventBridge policy using the `eventBusName`. % terraform import aws_cloudwatch_event_bus_policy.DevAccountAccess example-event-bus ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_connection.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_connection.html.markdown index 7f9b5575bf1f..c41c8d57609f 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_connection.html.markdown @@ -215,7 +215,7 @@ class MyConvertedCode extends TerraformStack { }, authorizationType: "BASIC", description: "A connection description", - kms_key_identifier: example.id, + kmsKeyIdentifier: example.id, name: "ngrok-connection", }); const current = new DataAwsCallerIdentity(this, "current", {}); @@ -283,6 +283,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name for the connection. Maximum of 64 characters consisting of numbers, lower/upper case letters, .,-,_. * `description` - (Optional) Description for the connection. Maximum of 512 characters. * `authorizationType` - (Required) Type of authorization to use for the connection. One of `API_KEY`,`BASIC`,`OAUTH_CLIENT_CREDENTIALS`. @@ -380,4 +381,4 @@ Using `terraform import`, import EventBridge EventBridge connection using the `n % terraform import aws_cloudwatch_event_connection.test ngrok-connection ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_endpoint.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_endpoint.html.markdown index 7cd5bd5fe69b..c4017f3754b3 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_endpoint.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of the global endpoint. * `eventBus` - (Required) The event buses to use. The names of the event buses must be identical in each Region. Exactly two event buses are required. Documented below. * `name` - (Required) The name of the global endpoint. @@ -133,4 +134,4 @@ Using `terraform import`, import EventBridge Global Endpoints using the `name`. % terraform import aws_cloudwatch_event_endpoint.imported_endpoint example-endpoint ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_permission.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_permission.html.markdown index 5f85155245cd..ff84e93bba08 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_permission.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The 12-digit AWS account ID that you are permitting to put events to your default event bus. Specify `*` to permit any account to put events to your default event bus, optionally limited by `condition`. * `statementId` - (Required) An identifier string for the external account that you are granting permissions to. * `action` - (Optional) The action that you are enabling the other account to perform. Defaults to `events:PutEvents`. @@ -124,4 +125,4 @@ Using `terraform import`, import EventBridge permissions using the `event_bus_na % terraform import aws_cloudwatch_event_permission.DevAccountAccess example-event-bus/DevAccountAccess ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_rule.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_rule.html.markdown index 7c674d9c1aea..82cfb6164972 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_rule.html.markdown @@ -62,28 +62,21 @@ data "aws_iam_policy_document" "sns_topic_policy" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the rule. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. **Note**: Due to the length of the generated suffix, must be 38 characters or less. * `scheduleExpression` - (Optional) The scheduling expression. For example, `cron(0 20 * * ? *)` or `rate(5 minutes)`. At least one of `scheduleExpression` or `eventPattern` is required. Can only be used on the default event bus. For more information, refer to the AWS documentation [Schedule Expressions for Rules](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html). -* `eventBusName` - (Optional) The name or ARN of the event bus to associate with this rule. - If you omit this, the `default` event bus is used. +* `eventBusName` - (Optional) The name or ARN of the event bus to associate with this rule. If you omit this, the `default` event bus is used. * `eventPattern` - (Optional) The event pattern described a JSON object. At least one of `scheduleExpression` or `eventPattern` is required. See full documentation of [Events and Event Patterns in EventBridge](https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html) for details. **Note**: The event pattern size is 2048 by default but it is adjustable up to 4096 characters by submitting a service quota increase request. See [Amazon EventBridge quotas](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-quota.html) for details. * `forceDestroy` - (Optional) Used to delete managed rules created by AWS. Defaults to `false`. * `description` - (Optional) The description of the rule. * `roleArn` - (Optional) The Amazon Resource Name (ARN) associated with the role that is used for target invocation. -* `isEnabled` - (Optional, **Deprecated** Use `state` instead) Whether the rule should be enabled. - Defaults to `true`. - Conflicts with `state`. -* `state` - (Optional) State of the rule. - Valid values are `DISABLED`, `ENABLED`, and `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. - When state is `ENABLED`, the rule is enabled for all events except those delivered by CloudTrail. - To also enable the rule for events delivered by CloudTrail, set `state` to `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. - Defaults to `ENABLED`. - Conflicts with `isEnabled`. - - **NOTE:** The rule state `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS` cannot be used in conjunction with the `scheduleExpression` argument. +* `isEnabled` - (Optional, **Deprecated** Use `state` instead) Whether the rule should be enabled. Defaults to `true`. Conflicts with `state`. +* `state` - (Optional) State of the rule. Valid values are `DISABLED`, `ENABLED`, and `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. When state is `ENABLED`, the rule is enabled for all events except those delivered by CloudTrail. To also enable the rule for events delivered by CloudTrail, set `state` to `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS`. Defaults to `ENABLED`. Conflicts with `isEnabled`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +**NOTE:** The rule state `ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS` cannot be used in conjunction with the `scheduleExpression` argument. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -94,6 +87,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_event_rule.example + identity = { + name = "capture-console-sign-in" + event_bus_name = "example-event-bus" + } +} + +resource "aws_cloudwatch_event_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the EventBridge rule. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `eventBusName` (String) Name of the event bus. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EventBridge Rules using the `event_bus_name/rule_name` (if you omit `eventBusName`, the `default` event bus will be used). For example: ```typescript @@ -110,7 +131,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); CloudwatchEventRule.generateConfigForImport( this, - "console", + "example", "example-event-bus/capture-console-sign-in" ); } @@ -121,7 +142,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import EventBridge Rules using the `event_bus_name/rule_name` (if you omit `eventBusName`, the `default` event bus will be used). For example: ```console -% terraform import aws_cloudwatch_event_rule.console example-event-bus/capture-console-sign-in +% terraform import aws_cloudwatch_event_rule.example example-event-bus/capture-console-sign-in ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_event_target.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_event_target.html.markdown index 1bd95ebc54f4..52297f9a3246 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_event_target.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_event_target.html.markdown @@ -794,6 +794,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appsyncTarget` - (Optional) Parameters used when you are using the rule to invoke an AppSync GraphQL API mutation. Documented below. A maximum of 1 are allowed. * `batchTarget` - (Optional) Parameters used when you are using the rule to invoke an Amazon Batch Job. Documented below. A maximum of 1 are allowed. * `deadLetterConfig` - (Optional) Parameters used when you are providing a dead letter config. Documented below. A maximum of 1 are allowed. @@ -925,6 +926,36 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_event_target.example + identity = { + event_bus_name = "default" + rule = "rule-name" + target_id = "target-id" + } +} + +resource "aws_cloudwatch_event_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `eventBusName` (String) Event bus name for the target. +* `rule` (String) Rule name for the target. +* `targetId` (String) Target ID. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EventBridge Targets using `event_bus_name/rule-name/target-id` (if you omit `eventBusName`, the `default` event bus will be used). For example: ```typescript @@ -941,7 +972,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); CloudwatchEventTarget.generateConfigForImport( this, - "testEventTarget", + "example", "rule-name/target-id" ); } @@ -952,7 +983,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import EventBridge Targets using `event_bus_name/rule-name/target-id` (if you omit `eventBusName`, the `default` event bus will be used). For example: ```console -% terraform import aws_cloudwatch_event_target.test-event-target rule-name/target-id +% terraform import aws_cloudwatch_event_target.example rule-name/target-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown index 4cb6e4511a29..3e759cc85db0 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown @@ -129,6 +129,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyDocument` - (Required) Text of the account policy. Refer to the [AWS docs](https://docs.aws.amazon.com/cli/latest/reference/logs/put-account-policy.html) for more information. * `policyType` - (Required) Type of account policy. One of `DATA_PROTECTION_POLICY`, `SUBSCRIPTION_FILTER_POLICY`, `FIELD_INDEX_POLICY` or `TRANSFORMER_POLICY`. You can have one account policy per type in an account. * `policyName` - (Required) Name of the account policy. @@ -171,4 +172,4 @@ Using `terraform import`, import this resource using the `policyName` and `polic % terraform import aws_cloudwatch_log_account_policy.example "my-account-policy:SUBSCRIPTION_FILTER_POLICY" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_anomaly_detector.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_anomaly_detector.html.markdown index fd7e92bd2ac2..89f3466589b9 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_anomaly_detector.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_anomaly_detector.html.markdown @@ -55,12 +55,14 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logGroupArnList` - (Required) Array containing the ARN of the log group that this anomaly detector will watch. You can specify only one log group ARN. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `anomalyVisibilityTime` - (Optional) Number of days to have visibility on an anomaly. After this time period has elapsed for an anomaly, it will be automatically baselined and the anomaly detector will treat new occurrences of a similar anomaly as normal. Therefore, if you do not correct the cause of an anomaly during the time period specified in `anomalyVisibilityTime`, it will be considered normal going forward and will not be detected as an anomaly. Valid Range: Minimum value of 7. Maximum value of 90. * `detectorName` - (Optional) Name for this anomaly detector. @@ -103,10 +105,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import CloudWatch Log Anomaly Detector using the `example_id_arg`. For example: +Using `terraform import`, import CloudWatch Log Anomaly Detector using the `arn`. For example: ```console % terraform import aws_cloudwatch_log_anomaly_detector.example log_anomaly_detector-arn-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_data_protection_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_data_protection_policy.html.markdown index e48aad619f96..0c09df4534f2 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_data_protection_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_data_protection_policy.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logGroupName` - (Required) The name of the log group under which the log stream is to be created. * `policyDocument` - (Required) Specifies the data protection policy in JSON. Read more at [Data protection policy syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-start.html#mask-sensitive-log-data-policysyntax). @@ -126,4 +127,4 @@ Using `terraform import`, import this resource using the `logGroupName`. For exa % terraform import aws_cloudwatch_log_data_protection_policy.example my-log-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery.html.markdown index 8f81946a131d..d1d0be9d9491 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deliveryDestinationArn` - (Required) The ARN of the delivery destination to use for this delivery. * `deliverySourceName` - (Required) The name of the delivery source to use for this delivery. * `fieldDelimiter` - (Optional) The field delimiter to use between record fields when the final output format of a delivery is in `plain`, `w3c`, or `raw` format. @@ -96,4 +97,4 @@ Using `terraform import`, import CloudWatch Logs Delivery using the `id`. For ex % terraform import aws_cloudwatch_log_delivery.example jsoGVi4Zq8VlYp9n ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination.html.markdown index fe53303e5033..9519a728530c 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deliveryDestinationConfiguration` - (Required) The AWS resource that will receive the logs. * `destinationResourceArn` - (Required) The ARN of the AWS destination that this delivery destination represents. * `name` - (Required) The name for this delivery destination. @@ -93,4 +94,4 @@ Using `terraform import`, import CloudWatch Logs Delivery Destination using the % terraform import aws_cloudwatch_log_delivery_destination.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination_policy.html.markdown index 05d9fae1672c..c2c69ca2d8f1 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_destination_policy.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deliveryDestinationName` - (Required) The name of the delivery destination to assign this policy to. * `deliveryDestinationPolicy` - (Required) The contents of the policy. @@ -84,4 +85,4 @@ Using `terraform import`, import CloudWatch Logs Delivery Destination Policy usi % terraform import aws_cloudwatch_log_delivery_destination_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_source.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_source.html.markdown index 8df0ae732fa0..7f024332af7a 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_source.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_delivery_source.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logType` - (Required) The type of log that the source is sending. For Amazon Bedrock, the valid value is `APPLICATION_LOGS`. For Amazon CodeWhisperer, the valid value is `EVENT_LOGS`. For IAM Identity Center, the valid value is `ERROR_LOGS`. For Amazon WorkMail, the valid values are `ACCESS_CONTROL_LOGS`, `AUTHENTICATION_LOGS`, `WORKMAIL_AVAILABILITY_PROVIDER_LOGS`, and `WORKMAIL_MAILBOX_ACCESS_LOGS`. * `name` - (Required) The name for this delivery source. * `resourceArn` - (Required) The ARN of the AWS resource that is generating and sending logs. @@ -87,4 +88,4 @@ Using `terraform import`, import CloudWatch Logs Delivery Source using the `name % terraform import aws_cloudwatch_log_delivery_source.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_destination.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_destination.html.markdown index aa3dc2401dcb..98678fafaddc 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_destination.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the log destination. * `roleArn` - (Required) The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to put data into the target. * `targetArn` - (Required) The ARN of the target Amazon Kinesis stream resource for the destination. @@ -84,4 +85,4 @@ Using `terraform import`, import CloudWatch Logs destinations using the `name`. % terraform import aws_cloudwatch_log_destination.test_destination test_destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_destination_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_destination_policy.html.markdown index 5da179cf1047..efbe13225f7e 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_destination_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_destination_policy.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinationName` - (Required) A name for the subscription filter * `accessPolicy` - (Required) The policy document. This is a JSON formatted string. * `forceUpdate` - (Optional) Specify true if you are updating an existing destination policy to grant permission to an organization ID instead of granting permission to individual AWS accounts. @@ -114,4 +115,4 @@ Using `terraform import`, import CloudWatch Logs destination policies using the % terraform import aws_cloudwatch_log_destination_policy.test_destination_policy test_destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_group.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_group.html.markdown index d59bd9179012..5e21640d7f9d 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_group.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_group.html.markdown @@ -42,13 +42,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the log group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `skipDestroy` - (Optional) Set to true if you do not wish the log group (and any logs it may contain) to be deleted at destroy time, and instead just remove the log group from the Terraform state. -* `logGroupClass` - (Optional) Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`. +* `logGroupClass` - (Optional) Specified the log class of the log group. Possible values are: `STANDARD`, `INFREQUENT_ACCESS`, or `DELIVERY`. * `retentionInDays` - (Optional) Specifies the number of days you want to retain log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, 3653, and 0. - If you select 0, the events in the log group are always retained and never expire. + If you select 0, the events in the log group are always retained and never expire. If `logGroupClass` is set to `DELIVERY`, this argument is ignored and `retentionInDays` is forcibly set to 2. * `kmsKeyId` - (Optional) The ARN of the KMS Key to use when encrypting log data. Please note, after the AWS KMS CMK is disassociated from the log group, AWS CloudWatch Logs stops encrypting newly ingested data for the log group. All previously ingested data remains encrypted, and AWS CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested. @@ -63,6 +64,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_log_group.example + identity = { + name = "yada" + } +} + +resource "aws_cloudwatch_log_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the CloudWatch log group. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cloudwatch Log Groups using the `name`. For example: ```typescript @@ -77,7 +104,7 @@ import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - CloudwatchLogGroup.generateConfigForImport(this, "testGroup", "yada"); + CloudwatchLogGroup.generateConfigForImport(this, "example", "yada"); } } @@ -86,7 +113,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Cloudwatch Log Groups using the `name`. For example: ```console -% terraform import aws_cloudwatch_log_group.test_group yada +% terraform import aws_cloudwatch_log_group.example yada ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_index_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_index_policy.html.markdown index f7162cbe79e5..2c91ae85f230 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_index_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_index_policy.html.markdown @@ -19,13 +19,13 @@ Terraform resource for managing an AWS CloudWatch Logs Index Policy. ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Fn, TerraformStack } from "cdktf"; +import { Fn, Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { CloudwatchLogIndexPolicy } from "./.gen/providers/aws/"; import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { CloudwatchLogIndexPolicy } from "./.gen/providers/aws/cloudwatch-log-index-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -36,10 +36,12 @@ class MyConvertedCode extends TerraformStack { this, "example_1", { - log_group_name: example.name, - policy_document: Fn.jsonencode({ - Fields: ["eventName"], - }), + logGroupName: example.name, + policyDocument: Token.asString( + Fn.jsonencode({ + Fields: ["eventName"], + }) + ), } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -51,8 +53,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logGroupName` - (Required) Log group name to set the policy for. * `policyDocument` - (Required) JSON policy document. This is a JSON formatted string. @@ -72,7 +75,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { CloudwatchLogIndexPolicy } from "./.gen/providers/aws/"; +import { CloudwatchLogIndexPolicy } from "./.gen/providers/aws/cloudwatch-log-index-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -92,4 +95,4 @@ Using `terraform import`, import CloudWatch Logs Index Policy using the `logGrou % terraform import aws_cloudwatch_log_index_policy.example /aws/log/group/name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_metric_filter.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_metric_filter.html.markdown index a2b15f92ef7e..91048bcbb8fb 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_metric_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_metric_filter.html.markdown @@ -49,11 +49,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the metric filter. * `pattern` - (Required) A valid [CloudWatch Logs filter pattern](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/FilterAndPatternSyntax.html) for extracting metric data out of ingested log events. * `logGroupName` - (Required) The name of the log group to associate the metric filter with. * `metricTransformation` - (Required) A block defining collection of information needed to define how metric data gets emitted. See below. +* `applyOnTransformedLogs` - (Optional) Whether the metric filter will be applied on the transformed version of the log events instead of the original ingested log events. Defaults to `false`. Valid only for log groups that have an active log transformer. The `metricTransformation` block supports the following arguments: @@ -102,4 +104,4 @@ Using `terraform import`, import CloudWatch Log Metric Filter using the `log_gro % terraform import aws_cloudwatch_log_metric_filter.test /aws/lambda/function:test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_resource_policy.html.markdown index a0bc0e6e078f..390a3d72a175 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_resource_policy.html.markdown @@ -120,6 +120,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyDocument` - (Required) Details of the resource policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string. Maximum length of 5120 characters. * `policyName` - (Required) Name of the resource policy. @@ -161,4 +162,4 @@ Using `terraform import`, import CloudWatch log resource policies using the poli % terraform import aws_cloudwatch_log_resource_policy.MyPolicy MyPolicy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_stream.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_stream.html.markdown index ce800d37e65a..9610c4c83c5e 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_stream.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the log stream. Must not be longer than 512 characters and must not contain `:` * `logGroupName` - (Required) The name of the log group under which the log stream is to be created. @@ -84,4 +85,4 @@ Using `terraform import`, import Cloudwatch Log Stream using the stream's `logGr % terraform import aws_cloudwatch_log_stream.foo Yada:SampleLogStream1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_subscription_filter.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_subscription_filter.html.markdown index 8d25a7774f7a..b5b55266ac23 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_log_subscription_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_subscription_filter.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the subscription filter * `destinationArn` - (Required) The ARN of the destination to deliver matching log events to. Kinesis stream or Lambda function ARN. * `filterPattern` - (Required) A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of log events. Use empty string `""` to match everything. For more information, see the [Amazon CloudWatch Logs User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). @@ -86,4 +87,4 @@ Using `terraform import`, import CloudWatch Logs subscription filter using the l % terraform import aws_cloudwatch_log_subscription_filter.test_lambdafunction_logfilter "/aws/lambda/example_lambda_name|test_lambdafunction_logfilter" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown index b91ed26ebbee..f52645ebd32e 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown @@ -243,6 +243,7 @@ You must choose one or the other This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alarmName` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account * `comparisonOperator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`. Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`, `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for alarms based on anomaly detection models. * `evaluationPeriods` - (Required) The number of periods over which data is compared to the specified threshold. @@ -318,6 +319,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_metric_alarm.example + identity = { + alarm_name = "alarm-12345" + } +} + +resource "aws_cloudwatch_metric_alarm" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `alarmName` (String) Name of the CloudWatch metric alarm. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudWatch Metric Alarm using the `alarmName`. For example: ```typescript @@ -332,7 +359,11 @@ import { CloudwatchMetricAlarm } from "./.gen/providers/aws/cloudwatch-metric-al class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - CloudwatchMetricAlarm.generateConfigForImport(this, "test", "alarm-12345"); + CloudwatchMetricAlarm.generateConfigForImport( + this, + "example", + "alarm-12345" + ); } } @@ -341,7 +372,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import CloudWatch Metric Alarm using the `alarmName`. For example: ```console -% terraform import aws_cloudwatch_metric_alarm.test alarm-12345 +% terraform import aws_cloudwatch_metric_alarm.example alarm-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_metric_stream.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_metric_stream.html.markdown index 889bd826ec54..ab7348f6582b 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_metric_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_metric_stream.html.markdown @@ -230,6 +230,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `excludeFilter` - (Optional) List of exclusive metric filters. If you specify this parameter, the stream sends metrics from all metric namespaces except for the namespaces and the conditional metric names that you specify here. If you don't specify metric names or provide empty metric names whole metric namespace is excluded. Conflicts with `includeFilter`. * `includeFilter` - (Optional) List of inclusive metric filters. If you specify this parameter, the stream sends only the conditional metric names from the metric namespaces that you specify here. If you don't specify metric names or provide empty metric names whole metric namespace is included. Conflicts with `excludeFilter`. * `name` - (Optional, Forces new resource) Friendly name of the metric stream. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. @@ -302,4 +303,4 @@ Using `terraform import`, import CloudWatch metric streams using the `name`. For % terraform import aws_cloudwatch_metric_stream.sample sample-stream-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_query_definition.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_query_definition.html.markdown index a013c5b2b4d8..41b79d502e33 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_query_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_query_definition.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the query. * `queryString` - (Required) The query to save. You can read more about CloudWatch Logs Query Syntax in the [documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). * `logGroupNames` - (Optional) Specific log groups to use with the query. @@ -83,4 +84,4 @@ Using `terraform import`, import CloudWatch query definitions using the query de % terraform import aws_cloudwatch_query_definition.example arn:aws:logs:us-west-2:123456789012:query-definition:269951d7-6f75-496d-9d7b-6b7a5486bdbd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeartifact_domain.html.markdown b/website/docs/cdktf/typescript/r/codeartifact_domain.html.markdown index 1bc213a81b44..aadd22b2ec0d 100644 --- a/website/docs/cdktf/typescript/r/codeartifact_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/codeartifact_domain.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The name of the domain to create. All domain names in an AWS Region that are in the same AWS account must be unique. The domain name is used as the prefix in DNS hostnames. Do not use sensitive information in a domain name because it is publicly discoverable. * `encryptionKey` - (Optional) The encryption key for the domain. This is used to encrypt content stored in a domain. The KMS Key Amazon Resource Name (ARN). The default aws/codeartifact AWS KMS master key is used if this element is absent. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -57,6 +58,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_domain.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:domain/example" + } +} + +resource "aws_codeartifact_domain" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact domain. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Domain using the CodeArtifact Domain arn. For example: ```typescript @@ -87,4 +109,4 @@ Using `terraform import`, import CodeArtifact Domain using the CodeArtifact Doma % terraform import aws_codeartifact_domain.example arn:aws:codeartifact:us-west-2:012345678912:domain/tf-acc-test-8593714120730241305 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeartifact_domain_permissions_policy.html.markdown b/website/docs/cdktf/typescript/r/codeartifact_domain_permissions_policy.html.markdown index 461fece75d6a..7baf28a841bb 100644 --- a/website/docs/cdktf/typescript/r/codeartifact_domain_permissions_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/codeartifact_domain_permissions_policy.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The name of the domain on which to set the resource policy. * `policyDocument` - (Required) A JSON policy string to be set as the access control resource policy on the provided domain. * `domainOwner` - (Optional) The account number of the AWS account that owns the domain. @@ -87,6 +88,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_domain_permissions_policy.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:domain/example" + } +} + +resource "aws_codeartifact_domain_permissions_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact domain. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Domain Permissions Policies using the CodeArtifact Domain ARN. For example: ```typescript @@ -117,4 +139,4 @@ Using `terraform import`, import CodeArtifact Domain Permissions Policies using % terraform import aws_codeartifact_domain_permissions_policy.example arn:aws:codeartifact:us-west-2:012345678912:domain/tf-acc-test-1928056699409417367 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeartifact_repository.html.markdown b/website/docs/cdktf/typescript/r/codeartifact_repository.html.markdown index 45528112b9e3..8d8ad9f1f4eb 100644 --- a/website/docs/cdktf/typescript/r/codeartifact_repository.html.markdown +++ b/website/docs/cdktf/typescript/r/codeartifact_repository.html.markdown @@ -116,6 +116,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The domain that contains the created repository. * `repository` - (Required) The name of the repository to create. * `domainOwner` - (Optional) The account number of the AWS account that owns the domain. @@ -143,6 +144,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_repository.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:repository/example-domain/example-repo" + } +} + +resource "aws_codeartifact_repository" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact repository. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Repository using the CodeArtifact Repository ARN. For example: ```typescript @@ -173,4 +195,4 @@ Using `terraform import`, import CodeArtifact Repository using the CodeArtifact % terraform import aws_codeartifact_repository.example arn:aws:codeartifact:us-west-2:012345678912:repository/tf-acc-test-6968272603913957763/tf-acc-test-6968272603913957763 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeartifact_repository_permissions_policy.html.markdown b/website/docs/cdktf/typescript/r/codeartifact_repository_permissions_policy.html.markdown index 25f5a65acf7b..6f5413e85be4 100644 --- a/website/docs/cdktf/typescript/r/codeartifact_repository_permissions_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/codeartifact_repository_permissions_policy.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository` - (Required) The name of the repository to set the resource policy on. * `domain` - (Required) The name of the domain on which to set the resource policy. * `policyDocument` - (Required) A JSON policy string to be set as the access control resource policy on the provided domain. @@ -108,6 +109,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_repository_permissions_policy.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:repository/example-domain/example-repo" + } +} + +resource "aws_codeartifact_repository_permissions_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact repository. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Repository Permissions Policies using the CodeArtifact Repository ARN. For example: ```typescript @@ -138,4 +160,4 @@ Using `terraform import`, import CodeArtifact Repository Permissions Policies us % terraform import aws_codeartifact_repository_permissions_policy.example arn:aws:codeartifact:us-west-2:012345678912:repository/tf-acc-test-6968272603913957763/tf-acc-test-6968272603913957763 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_fleet.html.markdown b/website/docs/cdktf/typescript/r/codebuild_fleet.html.markdown index ca9c9af8ccdd..c1ed97597233 100644 --- a/website/docs/cdktf/typescript/r/codebuild_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_fleet.html.markdown @@ -89,7 +89,8 @@ The following arguments are required: The following arguments are optional: -* `compute_configuration` - (Optional) The compute configuration of the compute fleet. This is only required if `computeType` is set to `ATTRIBUTE_BASED_COMPUTE`. See [`compute_configuration`](#compute_configuration) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `computeConfiguration` - (Optional) The compute configuration of the compute fleet. This is only required if `computeType` is set to `ATTRIBUTE_BASED_COMPUTE` or `CUSTOM_INSTANCE_TYPE`. See [`computeConfiguration`](#compute_configuration) below. * `fleetServiceRole` - (Optional) The service role associated with the compute fleet. * `imageId` - (Optional) The Amazon Machine Image (AMI) of the compute fleet. * `overflowBehavior` - (Optional) Overflow behavior for compute fleet. Valid values: `ON_DEMAND`, `QUEUE`. @@ -100,9 +101,10 @@ The following arguments are optional: ### compute_configuration * `disk` - (Optional) Amount of disk space of the instance type included in the fleet. -* `machine_type` - (Optional) Machine type of the instance type included in the fleet. Valid values: `GENERAL`, `NVME`. -* `memory` - (Optional) Amount of memory of the instance type included in the fleet. -* `vcpu` - (Optional) Number of vCPUs of the instance type included in the fleet. +* `instanceType` - (Optional) EC2 instance type to be launched in the fleet. Specify only if `computeType` is set to `CUSTOM_INSTANCE_TYPE`. See [Supported instance families](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment-reserved-capacity.instance-types). +* `machineType` - (Optional) Machine type of the instance type included in the fleet. Valid values: `GENERAL`, `NVME`. Specify only if `computeType` is set to `ATTRIBUTE_BASED_COMPUTE`. +* `memory` - (Optional) Amount of memory of the instance type included in the fleet. Specify only if `computeType` is set to `ATTRIBUTE_BASED_COMPUTE`. +* `vcpu` - (Optional) Number of vCPUs of the instance type included in the fleet. Specify only if `computeType` is set to `ATTRIBUTE_BASED_COMPUTE`. ### scaling_configuration @@ -136,6 +138,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_fleet.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:fleet/example-fleet" + } +} + +resource "aws_codebuild_fleet" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild fleet. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Fleet using the `name` or the `arn`. For example: ```typescript @@ -162,4 +185,4 @@ Using `terraform import`, import CodeBuild Fleet using the `name`. For example: % terraform import aws_codebuild_fleet.name fleet-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_project.html.markdown b/website/docs/cdktf/typescript/r/codebuild_project.html.markdown index 43ff29de4971..4159cd0ca590 100644 --- a/website/docs/cdktf/typescript/r/codebuild_project.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_project.html.markdown @@ -16,6 +16,8 @@ source (e.g., the "rebuild every time a code change is pushed" option in the Cod ## Example Usage +### Basic Usage + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -258,6 +260,11 @@ class MyConvertedCode extends TerraformStack { ``` +### Runner Project + +While no special configuration is required for `aws_codebuild_project` to create a project as a Runner Project, an `aws_codebuild_webhook` resource with an appropriate `filterGroup` is required. +See the [`aws_codebuild_webhook` resource documentation example](/docs/providers/aws/r/codebuild_webhook.html#for-codebuild-runner-project) for more details. + ## Argument Reference The following arguments are required: @@ -271,6 +278,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `badgeEnabled` - (Optional) Generates a publicly-accessible URL for the projects build badge. Available as `badgeUrl` attribute when enabled. * `buildBatchConfig` - (Optional) Defines the batch build options for the project. @@ -358,6 +366,7 @@ The following arguments are optional: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE`, `BUILD_GENERAL1_XLARGE`, `BUILD_GENERAL1_2XLARGE`, `BUILD_LAMBDA_1GB`, `BUILD_LAMBDA_2GB`, `BUILD_LAMBDA_4GB`, `BUILD_LAMBDA_8GB`, `BUILD_LAMBDA_10GB`. For additional information, see the [CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). +* `dockerServer` - (Optional) Configuration block. Detailed below. * `fleet` - (Optional) Configuration block. Detailed below. * `environmentVariable` - (Optional) Configuration block. Detailed below. * `imagePullCredentialsType` - (Optional) Type of credentials AWS CodeBuild uses to pull images in your build. Valid @@ -376,6 +385,11 @@ The following arguments are optional: `LINUX_LAMBDA_CONTAINER`, `ARM_LAMBDA_CONTAINER`, `LINUX_EC2`, `ARM_EC2`, `WINDOWS_EC2`, `MAC_ARM`. For additional information, see the [CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). +#### environment: docker_server + +* `computeType` - (Required) Compute type for the Docker server. Valid values: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE`, `BUILD_GENERAL1_XLARGE`, and `BUILD_GENERAL1_2XLARGE`. +* `securityGroupIds` - (Optional) List of security group IDs to assign to the Docker server. + #### environment: fleet * `fleetArn` - (Optional) Compute fleet ARN for the build project. @@ -575,6 +589,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_project.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:project/project-name" + } +} + +resource "aws_codebuild_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Project using the `name`. For example: @@ -602,4 +637,4 @@ Using `terraform import`, import CodeBuild Project using the `name`. For example % terraform import aws_codebuild_project.name project-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_report_group.html.markdown b/website/docs/cdktf/typescript/r/codebuild_report_group.html.markdown index 405a38b21957..5a6ef7bf296c 100644 --- a/website/docs/cdktf/typescript/r/codebuild_report_group.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_report_group.html.markdown @@ -92,6 +92,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of a Report Group. * `type` - (Required) The type of the Report Group. Valid value are `TEST` and `CODE_COVERAGE`. * `exportConfig` - (Required) Information about the destination where the raw data of this Report Group is exported. see [Export Config](#export-config) documented below. @@ -123,6 +124,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_report_group.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:report-group/report-group-name" + } +} + +resource "aws_codebuild_report_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild report group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Report Group using the CodeBuild Report Group arn. For example: ```typescript @@ -153,4 +175,4 @@ Using `terraform import`, import CodeBuild Report Group using the CodeBuild Repo % terraform import aws_codebuild_report_group.example arn:aws:codebuild:us-west-2:123456789:report-group/report-group-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/codebuild_resource_policy.html.markdown index 37954a1c8cb2..0c291806936e 100644 --- a/website/docs/cdktf/typescript/r/codebuild_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_resource_policy.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) The ARN of the Project or ReportGroup resource you want to associate with a resource policy. * `policy` - (Required) A JSON-formatted resource policy. For more information, see [Sharing a Projec](https://docs.aws.amazon.com/codebuild/latest/userguide/project-sharing.html#project-sharing-share) and [Sharing a Report Group](https://docs.aws.amazon.com/codebuild/latest/userguide/report-groups-sharing.html#report-groups-sharing-share). @@ -96,6 +97,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_resource_policy.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:report-group/report-group-name" + } +} + +resource "aws_codebuild_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild resource. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Resource Policy using the CodeBuild Resource Policy arn. For example: ```typescript @@ -126,4 +148,4 @@ Using `terraform import`, import CodeBuild Resource Policy using the CodeBuild R % terraform import aws_codebuild_resource_policy.example arn:aws:codebuild:us-west-2:123456789:report-group/report-group-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_source_credential.html.markdown b/website/docs/cdktf/typescript/r/codebuild_source_credential.html.markdown index 12279a90a310..0484c450e15a 100644 --- a/website/docs/cdktf/typescript/r/codebuild_source_credential.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_source_credential.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authType` - (Required) The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. Valid values are `BASIC_AUTH`, `PERSONAL_ACCESS_TOKEN`, `CODECONNECTIONS`, and `SECRETS_MANAGER`. An OAUTH connection is not supported by the API. @@ -114,6 +115,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_source_credential.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:token/github" + } +} + +resource "aws_codebuild_source_credential" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild source credential. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Source Credential using the CodeBuild Source Credential arn. For example: @@ -145,4 +167,4 @@ Using `terraform import`, import CodeBuild Source Credential using the CodeBuild % terraform import aws_codebuild_source_credential.example arn:aws:codebuild:us-west-2:123456789:token:github ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown b/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown index e32dabcf047d..b3ca9e2ae72f 100644 --- a/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown @@ -106,33 +106,76 @@ class MyConvertedCode extends TerraformStack { ``` +### For CodeBuild Runner Project + +To create a CodeBuild project as a Runner Project, the following `aws_codebuild_webhook` resource is required for the project. +See thr [AWS Documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/action-runner.html) for more information about CodeBuild Runner Projects. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CodebuildWebhook } from "./.gen/providers/aws/codebuild-webhook"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new CodebuildWebhook(this, "example", { + buildType: "BUILD", + filterGroup: [ + { + filter: [ + { + pattern: "WORKFLOW_JOB_QUEUED", + type: "EVENT", + }, + ], + }, + ], + projectName: Token.asString(awsCodebuildProjectExample.name), + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `projectName` - (Required) The name of the build project. * `buildType` - (Optional) The type of build this webhook will trigger. Valid values for this parameter are: `BUILD`, `BUILD_BATCH`. * `manualCreation` - (Optional) If true, CodeBuild doesn't create a webhook in GitHub and instead returns `payloadUrl` and `secret` values for the webhook. The `payloadUrl` and `secret` values in the output can be used to manually create a webhook within GitHub. * `branchFilter` - (Optional) A regular expression used to determine which branches get built. Default is all branches are built. We recommend using `filterGroup` over `branchFilter`. -* `filterGroup` - (Optional) Information about the webhook's trigger. Filter group blocks are documented below. -* `scopeConfiguration` - (Optional) Scope configuration for global or organization webhooks. Scope configuration blocks are documented below. +* `filterGroup` - (Optional) Information about the webhook's trigger. See [filter_group](#filter_group) for details. +* `scopeConfiguration` - (Optional) Scope configuration for global or organization webhooks. See [scope_configuration](#scope_configuration) for details. +* `pullRequestBuildPolicy` - (Optional) Defines comment-based approval requirements for triggering builds on pull requests. See [pull_request_build_policy](#pull_request_build_policy) for details. -`filterGroup` supports the following: +### filter_group -* `filter` - (Required) A webhook filter for the group. Filter blocks are documented below. +* `filter` - (Required) A webhook filter for the group. See [filter](#filter) for details. -`filter` supports the following: +### filter * `type` - (Required) The webhook filter group's type. Valid values for this parameter are: `EVENT`, `BASE_REF`, `HEAD_REF`, `ACTOR_ACCOUNT_ID`, `FILE_PATH`, `COMMIT_MESSAGE`, `WORKFLOW_NAME`, `TAG_NAME`, `RELEASE_NAME`. At least one filter group must specify `EVENT` as its type. * `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED`, `WORKFLOW_JOB_QUEUED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. * `excludeMatchedPattern` - (Optional) If set to `true`, the specified filter does *not* trigger a build. Defaults to `false`. -`scopeConfiguration` supports the following: +### scope_configuration * `name` - (Required) The name of either the enterprise or organization. * `scope` - (Required) The type of scope for a GitHub webhook. Valid values for this parameter are: `GITHUB_ORGANIZATION`, `GITHUB_GLOBAL`. * `domain` - (Optional) The domain of the GitHub Enterprise organization. Required if your project's source type is GITHUB_ENTERPRISE. +### pull_request_build_policy + +* `requiresCommentApproval` - (Required) Specifies when comment-based approval is required before triggering a build on pull requests. Valid values are: `DISABLED`, `ALL_PULL_REQUESTS`, and `FORK_PULL_REQUESTS`. +* `approverRoles` - (Optional) List of repository roles that have approval privileges for pull request builds when comment approval is required. This argument must be specified only when `requiresCommentApproval` is not `DISABLED`. See the [AWS documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/pull-request-build-policy.html#pull-request-build-policy.configuration) for valid values and defaults. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -172,4 +215,4 @@ Using `terraform import`, import CodeBuild Webhooks using the CodeBuild Project % terraform import aws_codebuild_webhook.example MyProjectName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecatalyst_dev_environment.html.markdown b/website/docs/cdktf/typescript/r/codecatalyst_dev_environment.html.markdown index ce7dcaffecdb..ffa64904617e 100644 --- a/website/docs/cdktf/typescript/r/codecatalyst_dev_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/codecatalyst_dev_environment.html.markdown @@ -62,6 +62,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `inactivityTimeoutMinutes` - (Optional) The amount of time the Dev Environment will run without any activity detected before stopping, in minutes. Only whole integers are allowed. Dev Environments consume compute minutes when running. * `repositories` - (Optional) The source repository that contains the branch to clone into the Dev Environment. @@ -93,4 +94,4 @@ This resource exports the following attributes in addition to the arguments abov - `update` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecatalyst_project.html.markdown b/website/docs/cdktf/typescript/r/codecatalyst_project.html.markdown index f8cecc82e958..7cd7e6fbbfc8 100644 --- a/website/docs/cdktf/typescript/r/codecatalyst_project.html.markdown +++ b/website/docs/cdktf/typescript/r/codecatalyst_project.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the project. This description will be displayed to all users of the project. We recommend providing a brief description of the project and its intended purpose. ## Attribute Reference @@ -96,4 +97,4 @@ Using `terraform import`, import CodeCatalyst Project using the `id`. For exampl % terraform import aws_codecatalyst_project.example project-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecatalyst_source_repository.html.markdown b/website/docs/cdktf/typescript/r/codecatalyst_source_repository.html.markdown index 0749dc04606d..078747358cf7 100644 --- a/website/docs/cdktf/typescript/r/codecatalyst_source_repository.html.markdown +++ b/website/docs/cdktf/typescript/r/codecatalyst_source_repository.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the project. This description will be displayed to all users of the project. We recommend providing a brief description of the project and its intended purpose. ## Attribute Reference @@ -96,4 +97,4 @@ Using `terraform import`, import CodeCatalyst Source Repository using the `id`. % terraform import aws_codecatalyst_source_repository.example example-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecommit_approval_rule_template.html.markdown b/website/docs/cdktf/typescript/r/codecommit_approval_rule_template.html.markdown index 3f1c30c69e70..6bc577752e85 100644 --- a/website/docs/cdktf/typescript/r/codecommit_approval_rule_template.html.markdown +++ b/website/docs/cdktf/typescript/r/codecommit_approval_rule_template.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Required) The content of the approval rule template. Maximum of 3000 characters. * `name` - (Required) The name for the approval rule template. Maximum of 100 characters. * `description` - (Optional) The description of the approval rule template. Maximum of 1000 characters. @@ -100,4 +101,4 @@ Using `terraform import`, import CodeCommit approval rule templates using the `n % terraform import aws_codecommit_approval_rule_template.imported ExistingApprovalRuleTemplateName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecommit_approval_rule_template_association.html.markdown b/website/docs/cdktf/typescript/r/codecommit_approval_rule_template_association.html.markdown index 08b12419cda0..7d070cb2269c 100644 --- a/website/docs/cdktf/typescript/r/codecommit_approval_rule_template_association.html.markdown +++ b/website/docs/cdktf/typescript/r/codecommit_approval_rule_template_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `approvalRuleTemplateName` - (Required) The name for the approval rule template. * `repositoryName` - (Required) The name of the repository that you want to associate with the template. @@ -84,4 +85,4 @@ Using `terraform import`, import CodeCommit approval rule template associations % terraform import aws_codecommit_approval_rule_template_association.example approver-rule-for-example,MyExampleRepo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecommit_repository.html.markdown b/website/docs/cdktf/typescript/r/codecommit_repository.html.markdown index c503ab0a04bd..751037451727 100644 --- a/website/docs/cdktf/typescript/r/codecommit_repository.html.markdown +++ b/website/docs/cdktf/typescript/r/codecommit_repository.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repositoryName` - (Required) The name for the repository. This needs to be less than 100 characters. * `description` - (Optional) The description of the repository. This needs to be less than 1000 characters * `defaultBranch` - (Optional) The default branch of the repository. The branch specified here needs to exist. @@ -122,4 +123,4 @@ Using `terraform import`, import CodeCommit repository using repository name. Fo % terraform import aws_codecommit_repository.imported ExistingRepo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codecommit_trigger.html.markdown b/website/docs/cdktf/typescript/r/codecommit_trigger.html.markdown index de735411092a..e61f6a5a4bea 100644 --- a/website/docs/cdktf/typescript/r/codecommit_trigger.html.markdown +++ b/website/docs/cdktf/typescript/r/codecommit_trigger.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repositoryName` - (Required) The name for the repository. This needs to be less than 100 characters. * `trigger` - (Required) The name of the trigger. * `name` - (Required) The name of the trigger. @@ -67,4 +68,4 @@ This resource exports the following attributes in addition to the arguments abov * `configurationId` - System-generated unique identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeconnections_connection.html.markdown b/website/docs/cdktf/typescript/r/codeconnections_connection.html.markdown index 45738d831b89..e791cb6fdafb 100644 --- a/website/docs/cdktf/typescript/r/codeconnections_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/codeconnections_connection.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the connection to be created. The name must be unique in the calling AWS account. Changing `name` will create a new resource. * `providerType` - (Optional) The name of the external provider where your third-party code repository is configured. Changing `providerType` will create a new resource. Conflicts with `hostArn`. * `hostArn` - (Optional) The Amazon Resource Name (ARN) of the host associated with the connection. Conflicts with `providerType` @@ -52,13 +53,34 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The codeconnections connection ARN. * `arn` - The codeconnections connection ARN. * `connectionStatus` - The codeconnections connection status. Possible values are `PENDING`, `AVAILABLE` and `ERROR`. +* `id` - (**Deprecated**) The codeconnections connection ARN. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeconnections_connection.example + identity = { + "arn" = "arn:aws:codeconnections:us-west-2:123456789012:connection/example-connection-id" + } +} + +resource "aws_codeconnections_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeConnections connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeConnections connection using the ARN. For example: ```typescript @@ -89,4 +111,4 @@ Using `terraform import`, import CodeConnections connection using the ARN. For e % terraform import aws_codeconnections_connection.test-connection arn:aws:codeconnections:us-west-1:0123456789:connection/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeconnections_host.html.markdown b/website/docs/cdktf/typescript/r/codeconnections_host.html.markdown index 49eee71f27d6..baf75819ef83 100644 --- a/website/docs/cdktf/typescript/r/codeconnections_host.html.markdown +++ b/website/docs/cdktf/typescript/r/codeconnections_host.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the host to be created. The name must be unique in the calling AWS account. * `providerEndpoint` - (Required) The endpoint of the infrastructure to be represented by the host after it is created. * `providerType` - (Required) The name of the external provider where your third-party code repository is configured. @@ -60,12 +61,33 @@ A `vpcConfiguration` block supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The CodeConnections Host ARN. * `arn` - The CodeConnections Host ARN. +* `id` - (**Deprecated**) The CodeConnections Host ARN. * `status` - The CodeConnections Host status. Possible values are `PENDING`, `AVAILABLE`, `VPC_CONFIG_DELETING`, `VPC_CONFIG_INITIALIZING`, and `VPC_CONFIG_FAILED_INITIALIZATION`. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeconnections_host.example + identity = { + "arn" = "arn:aws:codeconnections:us-west-2:123456789012:host/example-host-id" + } +} + +resource "aws_codeconnections_host" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeConnections host. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeConnections Host using the ARN. For example: ```typescript @@ -96,4 +118,4 @@ Using `terraform import`, import CodeConnections Host using the ARN. For example % terraform import aws_codeconnections_host.example-host arn:aws:codeconnections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codedeploy_app.html.markdown b/website/docs/cdktf/typescript/r/codedeploy_app.html.markdown index 612c516c4ef0..907c50252f5a 100644 --- a/website/docs/cdktf/typescript/r/codedeploy_app.html.markdown +++ b/website/docs/cdktf/typescript/r/codedeploy_app.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the application. * `computePlatform` - (Optional) The compute platform can either be `ECS`, `Lambda`, or `Server`. Default is `Server`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -131,4 +132,4 @@ Using `terraform import`, import CodeDeploy Applications using the `name`. For e % terraform import aws_codedeploy_app.example my-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codedeploy_deployment_config.html.markdown b/website/docs/cdktf/typescript/r/codedeploy_deployment_config.html.markdown index 9d7d71e2fb63..e8198a61d190 100644 --- a/website/docs/cdktf/typescript/r/codedeploy_deployment_config.html.markdown +++ b/website/docs/cdktf/typescript/r/codedeploy_deployment_config.html.markdown @@ -130,6 +130,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deploymentConfigName` - (Required) The name of the deployment config. * `computePlatform` - (Optional) The compute platform can be `Server`, `Lambda`, or `ECS`. Default is `Server`. * `minimumHealthyHosts` - (Optional) A minimum_healthy_hosts block. Required for `Server` compute platform. Minimum Healthy Hosts are documented below. @@ -211,4 +212,4 @@ Using `terraform import`, import CodeDeploy Deployment Configurations using the % terraform import aws_codedeploy_deployment_config.example my-deployment-config ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codedeploy_deployment_group.html.markdown b/website/docs/cdktf/typescript/r/codedeploy_deployment_group.html.markdown index ca5e1dd0c161..52b370bc315e 100644 --- a/website/docs/cdktf/typescript/r/codedeploy_deployment_group.html.markdown +++ b/website/docs/cdktf/typescript/r/codedeploy_deployment_group.html.markdown @@ -246,6 +246,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appName` - (Required) The name of the application. * `deploymentGroupName` - (Required) The name of the deployment group. * `serviceRoleArn` - (Required) The service role ARN that allows deployments. @@ -452,4 +453,4 @@ Using `terraform import`, import CodeDeploy Deployment Groups using `appName`, a [1]: http://docs.aws.amazon.com/codedeploy/latest/userguide/monitoring-sns-event-notifications-create-trigger.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codeguruprofiler_profiling_group.html.markdown b/website/docs/cdktf/typescript/r/codeguruprofiler_profiling_group.html.markdown index e723465f4a35..a22c0cc6910f 100644 --- a/website/docs/cdktf/typescript/r/codeguruprofiler_profiling_group.html.markdown +++ b/website/docs/cdktf/typescript/r/codeguruprofiler_profiling_group.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `computePlatform` - (Optional) Compute platform of the profiling group. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -97,4 +98,4 @@ Using `terraform import`, import CodeGuru Profiler Profiling Group using the `id % terraform import aws_codeguruprofiler_profiling_group.example profiling_group-name-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codegurureviewer_repository_association.html.markdown b/website/docs/cdktf/typescript/r/codegurureviewer_repository_association.html.markdown index 531ba86acdbf..c07b03c9afbc 100644 --- a/website/docs/cdktf/typescript/r/codegurureviewer_repository_association.html.markdown +++ b/website/docs/cdktf/typescript/r/codegurureviewer_repository_association.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kmsKeyDetails` - (Optional) An object describing the KMS key to asssociate. Block is documented below. ## repository @@ -122,4 +123,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codepipeline.html.markdown b/website/docs/cdktf/typescript/r/codepipeline.html.markdown index 1edf6a9194f3..fa3f65109d19 100644 --- a/website/docs/cdktf/typescript/r/codepipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/codepipeline.html.markdown @@ -191,18 +191,19 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the pipeline. * `pipelineType` - (Optional) Type of the pipeline. Possible values are: `V1` and `V2`. Default value is `V1`. * `roleArn` - (Required) A service role Amazon Resource Name (ARN) that grants AWS CodePipeline permission to make calls to AWS services on your behalf. * `artifactStore` (Required) One or more artifact_store blocks. Artifact stores are documented below. * `executionMode` (Optional) The method that the pipeline will use to handle multiple executions. The default mode is `SUPERSEDED`. For value values, refer to the [AWS documentation](https://docs.aws.amazon.com/codepipeline/latest/APIReference/API_PipelineDeclaration.html#CodePipeline-Type-PipelineDeclaration-executionMode). - - **Note:** `QUEUED` or `PARALLEL` mode can only be used with V2 pipelines. * `stage` (Minimum of at least two `stage` blocks is required) A stage block. Stages are documented below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `trigger` - (Optional) A trigger block. Valid only when `pipelineType` is `V2`. Triggers are documented below. * `variable` - (Optional) A pipeline-level variable block. Valid only when `pipelineType` is `V2`. Variable are documented below. +**Note:** `QUEUED` or `PARALLEL` mode can only be used with V2 pipelines. + ### `artifactStore` An `artifactStore` block supports the following arguments: @@ -400,4 +401,4 @@ Using `terraform import`, import CodePipelines using the `name`. For example: % terraform import aws_codepipeline.example example-pipeline ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codepipeline_custom_action_type.html.markdown b/website/docs/cdktf/typescript/r/codepipeline_custom_action_type.html.markdown index 63d74b519255..62889c689b5b 100644 --- a/website/docs/cdktf/typescript/r/codepipeline_custom_action_type.html.markdown +++ b/website/docs/cdktf/typescript/r/codepipeline_custom_action_type.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `category` - (Required) The category of the custom action. Valid values: `Source`, `Build`, `Deploy`, `Test`, `Invoke`, `Approval` * `configurationProperty` - (Optional) The configuration properties for the custom action. Max 10 items. @@ -129,4 +130,4 @@ Using `terraform import`, import CodeDeploy CustomActionType using the `id`. For % terraform import aws_codepipeline_custom_action_type.example Build:terraform:1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codepipeline_webhook.html.markdown b/website/docs/cdktf/typescript/r/codepipeline_webhook.html.markdown index 34669979de25..501b09aa8547 100644 --- a/website/docs/cdktf/typescript/r/codepipeline_webhook.html.markdown +++ b/website/docs/cdktf/typescript/r/codepipeline_webhook.html.markdown @@ -122,6 +122,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the webhook. * `authentication` - (Required) The type of authentication to use. One of `IP`, `GITHUB_HMAC`, or `UNAUTHENTICATED`. * `authenticationConfiguration` - (Optional) An `auth` block. Required for `IP` and `GITHUB_HMAC`. Auth blocks are documented below. @@ -151,6 +152,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codepipeline_webhook.example + identity = { + "arn" = "arn:aws:codepipeline:us-west-2:123456789012:webhook:example-webhook" + } +} + +resource "aws_codepipeline_webhook" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodePipeline webhook. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodePipeline Webhooks using their ARN. For example: ```typescript @@ -181,4 +203,4 @@ Using `terraform import`, import CodePipeline Webhooks using their ARN. For exam % terraform import aws_codepipeline_webhook.example arn:aws:codepipeline:us-west-2:123456789012:webhook:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codestarconnections_connection.html.markdown b/website/docs/cdktf/typescript/r/codestarconnections_connection.html.markdown index 562f4616dc41..62885e508c10 100644 --- a/website/docs/cdktf/typescript/r/codestarconnections_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/codestarconnections_connection.html.markdown @@ -112,6 +112,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the connection to be created. The name must be unique in the calling AWS account. Changing `name` will create a new resource. * `providerType` - (Optional) The name of the external provider where your third-party code repository is configured. Valid values are `Bitbucket`, `GitHub`, `GitHubEnterpriseServer`, `GitLab` or `GitLabSelfManaged`. Changing `providerType` will create a new resource. Conflicts with `hostArn` * `hostArn` - (Optional) The Amazon Resource Name (ARN) of the host associated with the connection. Conflicts with `providerType` @@ -128,6 +129,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarconnections_connection.example + identity = { + "arn" = "arn:aws:codestar-connections:us-west-2:123456789012:connection/example-connection-id" + } +} + +resource "aws_codestarconnections_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar connections using the ARN. For example: ```typescript @@ -158,4 +180,4 @@ Using `terraform import`, import CodeStar connections using the ARN. For example % terraform import aws_codestarconnections_connection.test-connection arn:aws:codestar-connections:us-west-1:0123456789:connection/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codestarconnections_host.html.markdown b/website/docs/cdktf/typescript/r/codestarconnections_host.html.markdown index 3cb7f2e2c8db..73f019a718b8 100644 --- a/website/docs/cdktf/typescript/r/codestarconnections_host.html.markdown +++ b/website/docs/cdktf/typescript/r/codestarconnections_host.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the host to be created. The name must be unique in the calling AWS account. * `providerEndpoint` - (Required) The endpoint of the infrastructure to be represented by the host after it is created. * `providerType` - (Required) The name of the external provider where your third-party code repository is configured. @@ -64,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarconnections_host.example + identity = { + "arn" = "arn:aws:codestar-connections:us-west-2:123456789012:host/example-host-id" + } +} + +resource "aws_codestarconnections_host" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar connections host. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar Host using the ARN. For example: ```typescript @@ -94,4 +116,4 @@ Using `terraform import`, import CodeStar Host using the ARN. For example: % terraform import aws_codestarconnections_host.example-host arn:aws:codestar-connections:us-west-1:0123456789:host/79d4d357-a2ee-41e4-b350-2fe39ae59448 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codestarnotifications_notification_rule.html.markdown b/website/docs/cdktf/typescript/r/codestarnotifications_notification_rule.html.markdown index b120f69d8f4f..8e47e077499d 100644 --- a/website/docs/cdktf/typescript/r/codestarnotifications_notification_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/codestarnotifications_notification_rule.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detailType` - (Required) The level of detail to include in the notifications for this resource. Possible values are `BASIC` and `FULL`. * `eventTypeIds` - (Required) A list of event types associated with this notification rule. For list of allowed events see [here](https://docs.aws.amazon.com/codestar-notifications/latest/userguide/concepts.html#concepts-api). @@ -98,6 +99,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarnotifications_notification_rule.example + identity = { + "arn" = "arn:aws:codestar-notifications:us-west-2:123456789012:notificationrule/dc82df7a-9435-44d4-a696-78f67EXAMPLE" + } +} + +resource "aws_codestarnotifications_notification_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar notification rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar notification rule using the ARN. For example: ```typescript @@ -128,4 +150,4 @@ Using `terraform import`, import CodeStar notification rule using the ARN. For e % terraform import aws_codestarnotifications_notification_rule.foo arn:aws:codestar-notifications:us-west-1:0123456789:notificationrule/2cdc68a3-8f7c-4893-b6a5-45b362bd4f2b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_identity_pool.html.markdown b/website/docs/cdktf/typescript/r/cognito_identity_pool.html.markdown index 15ee69e7eec6..968a85ac5aa9 100644 --- a/website/docs/cdktf/typescript/r/cognito_identity_pool.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_identity_pool.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityPoolName` (Required) - The Cognito Identity Pool name. * `allowUnauthenticatedIdentities` (Required) - Whether the identity pool supports unauthenticated logins or not. * `allowClassicFlow` (Optional) - Enables or disables the classic / basic authentication flow. Default is `false`. @@ -124,4 +125,4 @@ Using `terraform import`, import Cognito Identity Pool using its ID. For example % terraform import aws_cognito_identity_pool.mypool us-west-2:1a234567-8901-234b-5cde-f6789g01h2i3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_identity_pool_provider_principal_tag.html.markdown b/website/docs/cdktf/typescript/r/cognito_identity_pool_provider_principal_tag.html.markdown index fcacb186a25d..62e7c06c10f8 100644 --- a/website/docs/cdktf/typescript/r/cognito_identity_pool_provider_principal_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_identity_pool_provider_principal_tag.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityPoolId` (Required) - An identity pool ID. * `identityProviderName` (Required) - The name of the identity provider. * `principalTags`: (Optional: []) - String to string map of variables. @@ -126,4 +127,4 @@ Using `terraform import`, import Cognito Identity Pool Roles Attachment using th % terraform import aws_cognito_identity_pool_provider_principal_tag.example us-west-2_abc123:CorpAD ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_identity_pool_roles_attachment.html.markdown b/website/docs/cdktf/typescript/r/cognito_identity_pool_roles_attachment.html.markdown index 289875313b77..5f72a5af4334 100644 --- a/website/docs/cdktf/typescript/r/cognito_identity_pool_roles_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_identity_pool_roles_attachment.html.markdown @@ -130,6 +130,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityPoolId` (Required) - An identity pool ID in the format `REGION_GUID`. * `roleMapping` (Optional) - A List of [Role Mapping](#role-mappings). * `roles` (Required) - The map of roles associated with this pool. For a given role, the key will be either "authenticated" or "unauthenticated" and the value will be the Role ARN. @@ -186,4 +187,4 @@ Using `terraform import`, import Cognito Identity Pool Roles Attachment using th % terraform import aws_cognito_identity_pool_roles_attachment.example us-west-2:b64805ad-cb56-40ba-9ffc-f5d8207e6d42 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_identity_provider.html.markdown b/website/docs/cdktf/typescript/r/cognito_identity_provider.html.markdown index d7387cc885e7..58ba7a3e3b2a 100644 --- a/website/docs/cdktf/typescript/r/cognito_identity_provider.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_identity_provider.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` (Required) - The user pool id * `providerName` (Required) - The provider name * `providerType` (Required) - The provider type. [See AWS API for valid values](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateIdentityProvider.html#CognitoUserPools-CreateIdentityProvider-request-ProviderType) @@ -98,4 +99,4 @@ Using `terraform import`, import `aws_cognito_identity_provider` resources using % terraform import aws_cognito_identity_provider.example us-west-2_abc123:CorpAD ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_log_delivery_configuration.html.markdown b/website/docs/cdktf/typescript/r/cognito_log_delivery_configuration.html.markdown new file mode 100644 index 000000000000..af621c2083cb --- /dev/null +++ b/website/docs/cdktf/typescript/r/cognito_log_delivery_configuration.html.markdown @@ -0,0 +1,336 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_log_delivery_configuration" +description: |- + Manages an AWS Cognito IDP (Identity Provider) Log Delivery Configuration. +--- + + + +# Resource: aws_cognito_log_delivery_configuration + +Manages an AWS Cognito IDP (Identity Provider) Log Delivery Configuration. + +## Example Usage + +### Basic Usage with CloudWatch Logs + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { CognitoLogDeliveryConfiguration } from "./.gen/providers/aws/cognito-log-delivery-configuration"; +import { CognitoUserPool } from "./.gen/providers/aws/cognito-user-pool"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CloudwatchLogGroup(this, "example", { + name: "example", + }); + const awsCognitoUserPoolExample = new CognitoUserPool(this, "example_1", { + name: "example", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCognitoUserPoolExample.overrideLogicalId("example"); + const awsCognitoLogDeliveryConfigurationExample = + new CognitoLogDeliveryConfiguration(this, "example_2", { + logConfigurations: [ + { + cloudWatchLogsConfiguration: [ + { + logGroupArn: example.arn, + }, + ], + eventSource: "userNotification", + logLevel: "ERROR", + }, + ], + userPoolId: Token.asString(awsCognitoUserPoolExample.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCognitoLogDeliveryConfigurationExample.overrideLogicalId("example"); + } +} + +``` + +### Multiple Log Configurations with Different Destinations + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { CognitoLogDeliveryConfiguration } from "./.gen/providers/aws/cognito-log-delivery-configuration"; +import { CognitoUserPool } from "./.gen/providers/aws/cognito-user-pool"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicy } from "./.gen/providers/aws/iam-role-policy"; +import { KinesisFirehoseDeliveryStream } from "./.gen/providers/aws/kinesis-firehose-delivery-stream"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CloudwatchLogGroup(this, "example", { + name: "example", + }); + const awsCognitoUserPoolExample = new CognitoUserPool(this, "example_1", { + name: "example", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCognitoUserPoolExample.overrideLogicalId("example"); + const firehose = new IamRole(this, "firehose", { + assumeRolePolicy: Token.asString( + Fn.jsonencode({ + Statement: [ + { + Action: "sts:AssumeRole", + Effect: "Allow", + Principal: { + Service: "firehose.amazonaws.com", + }, + }, + ], + Version: "2012-10-17", + }) + ), + name: "firehose-role", + }); + const awsS3BucketExample = new S3Bucket(this, "example_3", { + bucket: "example-bucket", + forceDestroy: true, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketExample.overrideLogicalId("example"); + const awsIamRolePolicyFirehose = new IamRolePolicy(this, "firehose_4", { + name: "firehose-policy", + policy: Token.asString( + Fn.jsonencode({ + Statement: [ + { + Action: [ + "s3:AbortMultipartUpload", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:PutObject", + ], + Effect: "Allow", + Resource: [ + awsS3BucketExample.arn, + "${" + awsS3BucketExample.arn + "}/*", + ], + }, + ], + Version: "2012-10-17", + }) + ), + role: firehose.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyFirehose.overrideLogicalId("firehose"); + const awsKinesisFirehoseDeliveryStreamExample = + new KinesisFirehoseDeliveryStream(this, "example_5", { + destination: "extended_s3", + extendedS3Configuration: { + bucketArn: Token.asString(awsS3BucketExample.arn), + roleArn: firehose.arn, + }, + name: "example-stream", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsKinesisFirehoseDeliveryStreamExample.overrideLogicalId("example"); + const awsCognitoLogDeliveryConfigurationExample = + new CognitoLogDeliveryConfiguration(this, "example_6", { + logConfigurations: [ + { + cloudWatchLogsConfiguration: [ + { + logGroupArn: example.arn, + }, + ], + eventSource: "userNotification", + logLevel: "INFO", + }, + { + eventSource: "userAuthEvents", + firehoseConfiguration: [ + { + streamArn: Token.asString( + awsKinesisFirehoseDeliveryStreamExample.arn + ), + }, + ], + logLevel: "ERROR", + }, + ], + userPoolId: Token.asString(awsCognitoUserPoolExample.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCognitoLogDeliveryConfigurationExample.overrideLogicalId("example"); + } +} + +``` + +### S3 Configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoLogDeliveryConfiguration } from "./.gen/providers/aws/cognito-log-delivery-configuration"; +import { CognitoUserPool } from "./.gen/providers/aws/cognito-user-pool"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CognitoUserPool(this, "example", { + name: "example", + }); + const awsS3BucketExample = new S3Bucket(this, "example_1", { + bucket: "example-bucket", + forceDestroy: true, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketExample.overrideLogicalId("example"); + const awsCognitoLogDeliveryConfigurationExample = + new CognitoLogDeliveryConfiguration(this, "example_2", { + logConfigurations: [ + { + eventSource: "userNotification", + logLevel: "ERROR", + s3Configuration: [ + { + bucketArn: Token.asString(awsS3BucketExample.arn), + }, + ], + }, + ], + userPoolId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCognitoLogDeliveryConfigurationExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `userPoolId` - (Required) The ID of the user pool for which to configure log delivery. + +The following arguments are optional: + +* `logConfigurations` - (Optional) Configuration block for log delivery. At least one configuration block is required. See [Log Configurations](#log-configurations) below. +* `region` - (Optional) The AWS region. + +### Log Configurations + +The `logConfigurations` block supports the following: + +* `eventSource` - (Required) The event source to configure logging for. Valid values are `userNotification` and `userAuthEvents`. +* `logLevel` - (Required) The log level to set for the event source. Valid values are `ERROR` and `INFO`. +* `cloudWatchLogsConfiguration` - (Optional) Configuration for CloudWatch Logs delivery. See [CloudWatch Logs Configuration](#cloudwatch-logs-configuration) below. +* `firehoseConfiguration` - (Optional) Configuration for Kinesis Data Firehose delivery. See [Firehose Configuration](#firehose-configuration) below. +* `s3Configuration` - (Optional) Configuration for S3 delivery. See [S3 Configuration](#s3-configuration) below. + +~> **Note:** At least one destination configuration (`cloudWatchLogsConfiguration`, `firehoseConfiguration`, or `s3Configuration`) must be specified for each log configuration. + +#### CloudWatch Logs Configuration + +The `cloudWatchLogsConfiguration` block supports the following: + +* `logGroupArn` - (Optional) The ARN of the CloudWatch Logs log group to which the logs should be delivered. + +#### Firehose Configuration + +The `firehoseConfiguration` block supports the following: + +* `streamArn` - (Optional) The ARN of the Kinesis Data Firehose delivery stream to which the logs should be delivered. + +#### S3 Configuration + +The `s3Configuration` block supports the following: + +* `bucketArn` - (Optional) The ARN of the S3 bucket to which the logs should be delivered. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +## Import + +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cognito_log_delivery_configuration.example + identity = { + user_pool_id = "us-west-2_example123" + } +} + +resource "aws_cognito_log_delivery_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `userPoolId` (String) ID of the Cognito User Pool. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cognito IDP (Identity Provider) Log Delivery Configuration using the `userPoolId`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoLogDeliveryConfiguration } from "./.gen/providers/aws/cognito-log-delivery-configuration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + CognitoLogDeliveryConfiguration.generateConfigForImport( + this, + "example", + "us-west-2_example123" + ); + } +} + +``` + +Using `terraform import`, import Cognito IDP (Identity Provider) Log Delivery Configuration using the `userPoolId`. For example: + +```console +% terraform import aws_cognito_log_delivery_configuration.example us-west-2_example123 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_managed_login_branding.html.markdown b/website/docs/cdktf/typescript/r/cognito_managed_login_branding.html.markdown new file mode 100644 index 000000000000..e0dfb376a521 --- /dev/null +++ b/website/docs/cdktf/typescript/r/cognito_managed_login_branding.html.markdown @@ -0,0 +1,134 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_managed_login_branding" +description: |- + Manages branding settings for a user pool style and associates it with an app client. +--- + + + +# Resource: aws_cognito_managed_login_branding + +Manages branding settings for a user pool style and associates it with an app client. + +## Example Usage + +### Default Branding Style + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoManagedLoginBranding } from "./.gen/providers/aws/cognito-managed-login-branding"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new CognitoManagedLoginBranding(this, "client", { + clientId: example.id, + useCognitoProvidedValues: true, + userPoolId: Token.asString(awsCognitoUserPoolExample.id), + }); + } +} + +``` + +### Custom Branding Style + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoManagedLoginBranding } from "./.gen/providers/aws/cognito-managed-login-branding"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new CognitoManagedLoginBranding(this, "client", { + asset: [ + { + bytes: Token.asString(Fn.filebase64("login_branding_asset.svg")), + category: "PAGE_HEADER_BACKGROUND", + colorMode: "DARK", + extension: "SVG", + }, + ], + clientId: example.id, + settings: Token.asString(Fn.jsonencode({})), + userPoolId: Token.asString(awsCognitoUserPoolExample.id), + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `clientId` - (Required) App client that the branding style is for. +* `userPoolId` - (Required) User pool the client belongs to. + +The following arguments are optional: + +* `asset` - (Optional) Image files to apply to roles like backgrounds, logos, and icons. See [details below](#asset). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `settings` - (Optional) JSON document with the the settings to apply to the style. +* `useCognitoProvidedValues` - (Optional) When `true`, applies the default branding style options. + +### asset + +* `bytes` - (Optional) Image file, in Base64-encoded binary. +* `category` - (Required) Category that the image corresponds to. See [AWS documentation](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssetType.html#CognitoUserPools-Type-AssetType-Category) for valid values. +* `colorMode` - (Required) Display-mode target of the asset. Valid values: `LIGHT`, `DARK`, `DYNAMIC`. +* `extensions` - (Required) File type of the image file. See [AWS documentation](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssetType.html#CognitoUserPools-Type-AssetType-Extension) for valid values. +* `resourceId` - (Optional) Asset ID. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `managedLoginBrandingId` - ID of the managed login branding style. +* `settingsAll` - Settings including Amazon Cognito defaults. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cognito branding settings using `userPoolId` and `managedLoginBrandingId` separated by `,`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoManagedLoginBranding } from "./.gen/providers/aws/cognito-managed-login-branding"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + CognitoManagedLoginBranding.generateConfigForImport( + this, + "example", + "us-west-2_rSss9Zltr,06c6ae7b-1e66-46d2-87a9-1203ea3307bd" + ); + } +} + +``` + +Using `terraform import`, import Cognito branding settings using `userPoolId` and `managedLoginBrandingId` separated by `,`. For example: + +```console +% terraform import aws_cognito_managed_login_branding.example us-west-2_rSss9Zltr,06c6ae7b-1e66-46d2-87a9-1203ea3307bd +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_managed_user_pool_client.html.markdown b/website/docs/cdktf/typescript/r/cognito_managed_user_pool_client.html.markdown index 103e1e52a407..a46a590f17f8 100644 --- a/website/docs/cdktf/typescript/r/cognito_managed_user_pool_client.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_managed_user_pool_client.html.markdown @@ -126,6 +126,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessTokenValidity` - (Optional) Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. By default, the unit is hours. The unit can be overridden by a value in `token_validity_units.access_token`. * `allowedOauthFlowsUserPoolClient` - (Optional) Whether the client is allowed to use OAuth 2.0 features. `allowedOauthFlowsUserPoolClient` must be set to `true` before you can configure the following arguments: `callbackUrls`, `logoutUrls`, `allowedOauthScopes` and `allowedOauthFlows`. * `allowedOauthFlows` - (Optional) List of allowed OAuth flows, including `code`, `implicit`, and `client_credentials`. `allowedOauthFlowsUserPoolClient` must be set to `true` before you can configure this option. @@ -142,7 +143,7 @@ The following arguments are optional: * `logoutUrls` - (Optional) List of allowed logout URLs for the identity providers. `allowedOauthFlowsUserPoolClient` must be set to `true` before you can configure this option. * `preventUserExistenceErrors` - (Optional) Setting determines the errors and responses returned by Cognito APIs when a user does not exist in the user pool during authentication, account confirmation, and password recovery. * `readAttributes` - (Optional) List of user pool attributes that the application client can read from. -* `refresh_token_rotation` - (Optional) A block that specifies the configuration of refresh token rotation. [Detailed below](#refresh_token_rotation). +* `refreshTokenRotation` - (Optional) A block that specifies the configuration of refresh token rotation. [Detailed below](#refresh_token_rotation). * `refreshTokenValidity` - (Optional) Time limit, between 60 minutes and 10 years, after which the refresh token is no longer valid and cannot be used. By default, the unit is days. The unit can be overridden by a value in `token_validity_units.refresh_token`. * `supportedIdentityProviders` - (Optional) List of provider names for the identity providers that are supported on this client. It uses the `providerName` attribute of the `aws_cognito_identity_provider` resource(s), or the equivalent string(s). * `tokenValidityUnits` - (Optional) Configuration block for representing the validity times in units. See details below. [Detailed below](#token_validity_units). @@ -161,7 +162,7 @@ Either `applicationArn` or `applicationId` is required for this configuration bl ### refresh_token_rotation * `feature` - (Required) The state of refresh token rotation for the current app client. Valid values are `ENABLED` or `DISABLED`. -* `retry_grace_period_seconds` - (Optional) A period of time in seconds that the user has to use the old refresh token before it is invalidated. Valid values are between `0` and `60`. +* `retryGracePeriodSeconds` - (Optional) A period of time in seconds that the user has to use the old refresh token before it is invalidated. Valid values are between `0` and `60`. ### token_validity_units @@ -211,4 +212,4 @@ Using `terraform import`, import Cognito User Pool Clients using the `id` of the % terraform import aws_cognito_managed_user_pool_client.client us-west-2_abc123/3ho4ek12345678909nh3fmhpko ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_resource_server.html.markdown b/website/docs/cdktf/typescript/r/cognito_resource_server.html.markdown index a53df80b20f8..3b7933bd8a94 100644 --- a/website/docs/cdktf/typescript/r/cognito_resource_server.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_resource_server.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identifier` - (Required) An identifier for the resource server. * `name` - (Required) A name for the resource server. * `userPoolId` - (Required) User pool the client belongs to. @@ -129,4 +130,4 @@ Using `terraform import`, import `aws_cognito_resource_server` using their User % terraform import aws_cognito_resource_server.example "us-west-2_abc123|https://example.com" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_risk_configuration.html.markdown b/website/docs/cdktf/typescript/r/cognito_risk_configuration.html.markdown index 480f242598d6..7f8e9ecbcd84 100644 --- a/website/docs/cdktf/typescript/r/cognito_risk_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_risk_configuration.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` - (Required) The user pool ID. * `clientId` - (Optional) The app client ID. When the client ID is not provided, the same risk configuration is applied to all the clients in the User Pool. * `accountTakeoverRiskConfiguration` - (Optional) The account takeover risk configuration. See details below. @@ -164,4 +165,4 @@ Import using the user pool ID and Client ID separated by a `:`: % terraform import aws_cognito_risk_configuration.main example:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user.html.markdown b/website/docs/cdktf/typescript/r/cognito_user.html.markdown index 5d6f22eafb1e..c0fed651dc91 100644 --- a/website/docs/cdktf/typescript/r/cognito_user.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user.html.markdown @@ -104,6 +104,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `attributes` - (Optional) A map that contains user attributes and attribute values to be set for the user. * `clientMetadata` - (Optional) A map of custom key-value pairs that you can provide as input for any custom workflows that user creation triggers. Amazon Cognito does not store the `clientMetadata` value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose. For more information, see [Customizing User Pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html). * `desiredDeliveryMediums` - (Optional) A list of mediums to the welcome message will be sent through. Allowed values are `EMAIL` and `SMS`. If it's provided, make sure you have also specified `email` attribute for the `EMAIL` medium and `phoneNumber` for the `SMS`. More than one value can be specified. Amazon Cognito does not store the `desiredDeliveryMediums` value. Defaults to `["SMS"]`. @@ -156,4 +157,4 @@ Using `terraform import`, import Cognito User using the `userPoolId`/`name` attr % terraform import aws_cognito_user.user us-east-1_vG78M4goG/user ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user_group.html.markdown b/website/docs/cdktf/typescript/r/cognito_user_group.html.markdown index f77cad137a13..f088401757ee 100644 --- a/website/docs/cdktf/typescript/r/cognito_user_group.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user_group.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the user group. * `userPoolId` - (Required) The user pool ID. * `description` - (Optional) The description of the user group. @@ -124,4 +125,4 @@ Using `terraform import`, import Cognito User Groups using the `userPoolId`/`nam % terraform import aws_cognito_user_group.group us-east-1_vG78M4goG/user-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user_in_group.html.markdown b/website/docs/cdktf/typescript/r/cognito_user_in_group.html.markdown index 45c3d2ee4e63..5ec9fc8aeae7 100644 --- a/website/docs/cdktf/typescript/r/cognito_user_in_group.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user_in_group.html.markdown @@ -69,8 +69,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userPoolId` - (Required) The user pool ID of the user and group. * `groupName` - (Required) The name of the group to which the user is to be added. * `username` - (Required) The username of the user to be added to the group. @@ -79,4 +80,36 @@ The following arguments are required: This resource exports no additional attributes. - \ No newline at end of file +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a Cognito Group User using a comma-delimited string concatenating the `userPoolId`, `groupName`, and `username` arguments. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoUserInGroup } from "./.gen/providers/aws/cognito-user-in-group"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + CognitoUserInGroup.generateConfigForImport( + this, + "example", + "us-east-1_vG78M4goG,example-group,example-user" + ); + } +} + +``` + +Using `terraform import`, import a Cognito Group User using a comma-delimited string concatenating the `userPoolId`, `groupName`, and `username` arguments. For example: + +```console +% terraform import aws_cognito_user_in_group.example us-east-1_vG78M4goG,example-group,example-user +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user_pool.html.markdown b/website/docs/cdktf/typescript/r/cognito_user_pool.html.markdown index 6348611a89e1..378a65426c0f 100644 --- a/website/docs/cdktf/typescript/r/cognito_user_pool.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user_pool.html.markdown @@ -109,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the user pool. * `accountRecoverySetting` - (Optional) Configuration block to define which verified available method a user can use to recover their forgotten password. [Detailed below](#account_recovery_setting). * `adminCreateUserConfig` - (Optional) Configuration block for creating a new user profile. [Detailed below](#admin_create_user_config). @@ -117,18 +118,18 @@ This resource supports the following arguments: * `deletionProtection` - (Optional) When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are `ACTIVE` and `INACTIVE`, Default value is `INACTIVE`. * `deviceConfiguration` - (Optional) Configuration block for the user pool's device tracking. [Detailed below](#device_configuration). * `emailConfiguration` - (Optional) Configuration block for configuring email. [Detailed below](#email_configuration). -* `emailMfaConfiguration` - (Optional) Configuration block for configuring email Multi-Factor Authentication (MFA); requires at least 2 `accountRecoverySetting` entries; requires an `emailConfiguration` configuration block. [Detailed below](#email_mfa_configuration). +* `emailMfaConfiguration` - (Optional) Configuration block for configuring email Multi-Factor Authentication (MFA); requires at least 2 `accountRecoverySetting` entries; requires an `emailConfiguration` configuration block. Effective only when `mfaConfiguration` is `ON` or `OPTIONAL`. [Detailed below](#email_mfa_configuration). * `emailVerificationMessage` - (Optional) String representing the email verification message. Conflicts with `verificationMessageTemplate` configuration block `emailMessage` argument. * `emailVerificationSubject` - (Optional) String representing the email verification subject. Conflicts with `verificationMessageTemplate` configuration block `emailSubject` argument. * `lambdaConfig` - (Optional) Configuration block for the AWS Lambda triggers associated with the user pool. [Detailed below](#lambda_config). -* `mfaConfiguration` - (Optional) Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of `OFF`. Valid values are `OFF` (MFA Tokens are not required), `ON` (MFA is required for all users to sign in; requires at least one of `smsConfiguration` or `softwareTokenMfaConfiguration` to be configured), or `OPTIONAL` (MFA Will be required only for individual users who have MFA Enabled; requires at least one of `smsConfiguration` or `softwareTokenMfaConfiguration` to be configured). +* `mfaConfiguration` - (Optional) Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of `OFF`. Valid values are `OFF` (MFA Tokens are not required), `ON` (MFA is required for all users to sign in; requires at least one of `emailMfaConfiguration`, `smsConfiguration` or `softwareTokenMfaConfiguration` to be configured), or `OPTIONAL` (MFA Will be required only for individual users who have MFA Enabled; requires at least one of `emailMfaConfiguration`, `smsConfiguration` or `softwareTokenMfaConfiguration` to be configured). * `passwordPolicy` - (Optional) Configuration block for information about the user pool password policy. [Detailed below](#password_policy). * `schema` - (Optional) Configuration block for the schema attributes of a user pool. [Detailed below](#schema). Schema attributes from the [standard attribute set](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#cognito-user-pools-standard-attributes) only need to be specified if they are different from the default configuration. Attributes can be added, but not modified or removed. Maximum of 50 attributes. * `signInPolicy` - (Optional) Configuration block for information about the user pool sign in policy. [Detailed below](#sign_in_policy). * `smsAuthenticationMessage` - (Optional) String representing the SMS authentication message. The Message must contain the `{####}` placeholder, which will be replaced with the code. -* `smsConfiguration` - (Optional) Configuration block for Short Message Service (SMS) settings. [Detailed below](#sms_configuration). These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). +* `smsConfiguration` - (Optional) Configuration block for Short Message Service (SMS) settings. [Detailed below](#sms_configuration). These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). SMS MFA is activated only when `mfaConfiguration` is set to `ON` or `OPTIONAL` along with this block. Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). * `smsVerificationMessage` - (Optional) String representing the SMS verification message. Conflicts with `verificationMessageTemplate` configuration block `smsMessage` argument. -* `softwareTokenMfaConfiguration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. [Detailed below](#software_token_mfa_configuration). +* `softwareTokenMfaConfiguration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. Effective only when `mfaConfiguration` is `ON` or `OPTIONAL`. [Detailed below](#software_token_mfa_configuration). * `tags` - (Optional) Map of tags to assign to the User Pool. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `userAttributeUpdateSettings` - (Optional) Configuration block for user attribute update settings. [Detailed below](#user_attribute_update_settings). * `userPoolAddOns` - (Optional) Configuration block for user pool add-ons to enable user pool advanced security mode features. [Detailed below](#user_pool_add_ons). @@ -368,4 +369,4 @@ Using `terraform import`, import Cognito User Pools using the `id`. For example: % terraform import aws_cognito_user_pool.pool us-west-2_abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user_pool_client.html.markdown b/website/docs/cdktf/typescript/r/cognito_user_pool_client.html.markdown index 69e86a46d7d0..ae6bc053d85d 100644 --- a/website/docs/cdktf/typescript/r/cognito_user_pool_client.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user_pool_client.html.markdown @@ -229,10 +229,10 @@ class MyConvertedCode extends TerraformStack { new CognitoUserPoolClient(this, "userpool_client", { explicitAuthFlows: ["ADMIN_NO_SRP_AUTH"], name: "client", - refresh_token_rotation: [ + refreshTokenRotation: [ { feature: "ENABLED", - retry_grace_period_seconds: 10, + retryGracePeriodSeconds: 10, }, ], userPoolId: pool.id, @@ -251,6 +251,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessTokenValidity` - (Optional) Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. By default, the unit is hours. The unit can be overridden by a value in `token_validity_units.access_token`. * `allowedOauthFlowsUserPoolClient` - (Optional) Whether the client is allowed to use OAuth 2.0 features. `allowedOauthFlowsUserPoolClient` must be set to `true` before you can configure the following arguments: `callbackUrls`, `logoutUrls`, `allowedOauthScopes` and `allowedOauthFlows`. * `allowedOauthFlows` - (Optional) List of allowed OAuth flows, including `code`, `implicit`, and `client_credentials`. `allowedOauthFlowsUserPoolClient` must be set to `true` before you can configure this option. @@ -267,7 +268,7 @@ The following arguments are optional: * `logoutUrls` - (Optional) List of allowed logout URLs for the identity providers. `allowedOauthFlowsUserPoolClient` must be set to `true` before you can configure this option. * `preventUserExistenceErrors` - (Optional) Setting determines the errors and responses returned by Cognito APIs when a user does not exist in the user pool during authentication, account confirmation, and password recovery. * `readAttributes` - (Optional) List of user pool attributes that the application client can read from. -* `refresh_token_rotation` - (Optional) A block that specifies the configuration of refresh token rotation. [Detailed below](#refresh_token_rotation). +* `refreshTokenRotation` - (Optional) A block that specifies the configuration of refresh token rotation. [Detailed below](#refresh_token_rotation). * `refreshTokenValidity` - (Optional) Time limit, between 60 minutes and 10 years, after which the refresh token is no longer valid and cannot be used. By default, the unit is days. The unit can be overridden by a value in `token_validity_units.refresh_token`. * `supportedIdentityProviders` - (Optional) List of provider names for the identity providers that are supported on this client. It uses the `providerName` attribute of the `aws_cognito_identity_provider` resource(s), or the equivalent string(s). * `tokenValidityUnits` - (Optional) Configuration block for representing the validity times in units. See details below. [Detailed below](#token_validity_units). @@ -286,7 +287,7 @@ Either `applicationArn` or `applicationId` is required. ### refresh_token_rotation * `feature` - (Required) The state of refresh token rotation for the current app client. Valid values are `ENABLED` or `DISABLED`. -* `retry_grace_period_seconds` - (Optional) A period of time in seconds that the user has to use the old refresh token before it is invalidated. Valid values are between `0` and `60`. +* `retryGracePeriodSeconds` - (Optional) A period of time in seconds that the user has to use the old refresh token before it is invalidated. Valid values are between `0` and `60`. ### token_validity_units @@ -335,4 +336,4 @@ Using `terraform import`, import Cognito User Pool Clients using the `id` of the % terraform import aws_cognito_user_pool_client.client us-west-2_abc123/3ho4ek12345678909nh3fmhpko ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user_pool_domain.html.markdown b/website/docs/cdktf/typescript/r/cognito_user_pool_domain.html.markdown index 88a64e2c073f..9a97d3f1b06d 100644 --- a/website/docs/cdktf/typescript/r/cognito_user_pool_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user_pool_domain.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth. * `userPoolId` - (Required) The user pool ID. * `certificateArn` - (Optional) The ARN of an ISSUED ACM certificate in us-east-1 for a custom domain. @@ -142,4 +143,4 @@ Using `terraform import`, import Cognito User Pool Domains using the `domain`. F % terraform import aws_cognito_user_pool_domain.main auth.example.org ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cognito_user_pool_ui_customization.html.markdown b/website/docs/cdktf/typescript/r/cognito_user_pool_ui_customization.html.markdown index 6e60f34eead9..12374d82c8e6 100644 --- a/website/docs/cdktf/typescript/r/cognito_user_pool_ui_customization.html.markdown +++ b/website/docs/cdktf/typescript/r/cognito_user_pool_ui_customization.html.markdown @@ -116,6 +116,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientId` (Optional) The client ID for the client app. Defaults to `ALL`. If `ALL` is specified, the `css` and/or `imageFile` settings will be used for every client that has no UI customization set previously. * `css` (Optional) - The CSS values in the UI customization, provided as a String. At least one of `css` or `imageFile` is required. * `imageFile` (Optional) - The uploaded logo image for the UI customization, provided as a base64-encoded String. Drift detection is not possible for this argument. At least one of `css` or `imageFile` is required. @@ -162,4 +163,4 @@ Using `terraform import`, import Cognito User Pool UI Customizations using the ` % terraform import aws_cognito_user_pool_ui_customization.example us-west-2_ZCTarbt5C,12bu4fuk3mlgqa2rtrujgp6egq ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/comprehend_document_classifier.html.markdown b/website/docs/cdktf/typescript/r/comprehend_document_classifier.html.markdown index c2ac6fdd9707..9b41e3e8a633 100644 --- a/website/docs/cdktf/typescript/r/comprehend_document_classifier.html.markdown +++ b/website/docs/cdktf/typescript/r/comprehend_document_classifier.html.markdown @@ -47,7 +47,7 @@ class MyConvertedCode extends TerraformStack { dataAccessRoleArn: Token.asString(awsIamRoleExample.arn), dependsOn: [awsIamRolePolicyExample], inputDataConfig: { - s3Uri: "s3://${" + test.bucket + "}/${" + documents.id + "}", + s3Uri: "s3://${" + test.bucket + "}/${" + documents.key + "}", }, languageCode: "en", name: "example", @@ -72,6 +72,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mode` - (Optional, Default: `MULTI_CLASS`) The document classification mode. One of `MULTI_CLASS` or `MULTI_LABEL`. `MULTI_CLASS` is also known as "Single Label" in the AWS Console. @@ -151,6 +152,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_comprehend_document_classifier.example + identity = { + "arn" = "arn:aws:comprehend:us-west-2:123456789012:document-classifier/example" + } +} + +resource "aws_comprehend_document_classifier" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Comprehend document classifier. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Comprehend Document Classifier using the ARN. For example: ```typescript @@ -181,4 +203,4 @@ Using `terraform import`, import Comprehend Document Classifier using the ARN. F % terraform import aws_comprehend_document_classifier.example arn:aws:comprehend:us-west-2:123456789012:document_classifier/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/comprehend_entity_recognizer.html.markdown b/website/docs/cdktf/typescript/r/comprehend_entity_recognizer.html.markdown index e1f14b11e30e..dc0c00648ae0 100644 --- a/website/docs/cdktf/typescript/r/comprehend_entity_recognizer.html.markdown +++ b/website/docs/cdktf/typescript/r/comprehend_entity_recognizer.html.markdown @@ -52,12 +52,16 @@ class MyConvertedCode extends TerraformStack { "s3://${" + awsS3BucketDocuments.bucket + "}/${" + - documents.id + + documents.key + "}", }, entityList: { s3Uri: - "s3://${" + awsS3BucketEntities.bucket + "}/${" + entities.id + "}", + "s3://${" + + awsS3BucketEntities.bucket + + "}/${" + + entities.key + + "}", }, entityTypes: [ { @@ -91,6 +95,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `modelKmsKeyId` - (Optional) The ID or ARN of a KMS Key used to encrypt trained Entity Recognizers. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` Configuration Block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `versionName` - (Optional) Name for the version of the Entity Recognizer. @@ -182,6 +187,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_comprehend_entity_recognizer.example + identity = { + "arn" = "arn:aws:comprehend:us-west-2:123456789012:entity-recognizer/example" + } +} + +resource "aws_comprehend_entity_recognizer" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Comprehend entity recognizer. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Comprehend Entity Recognizer using the ARN. For example: ```typescript @@ -212,4 +238,4 @@ Using `terraform import`, import Comprehend Entity Recognizer using the ARN. For % terraform import aws_comprehend_entity_recognizer.example arn:aws:comprehend:us-west-2:123456789012:entity-recognizer/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/computeoptimizer_enrollment_status.html.markdown b/website/docs/cdktf/typescript/r/computeoptimizer_enrollment_status.html.markdown index 37de9cdcb29c..5e78aa8bb7ad 100644 --- a/website/docs/cdktf/typescript/r/computeoptimizer_enrollment_status.html.markdown +++ b/website/docs/cdktf/typescript/r/computeoptimizer_enrollment_status.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `includeMemberAccounts` - (Optional) Whether to enroll member accounts of the organization if the account is the management account of an organization. Default is `false`. * `status` - (Required) The enrollment status of the account. Valid values: `Active`, `Inactive`. @@ -86,4 +87,4 @@ Using `terraform import`, import enrollment status using the account ID. For exa % terraform import aws_computeoptimizer_enrollment_status.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/computeoptimizer_recommendation_preferences.html.markdown b/website/docs/cdktf/typescript/r/computeoptimizer_recommendation_preferences.html.markdown index 409c7bfdaa88..51300541a3f7 100644 --- a/website/docs/cdktf/typescript/r/computeoptimizer_recommendation_preferences.html.markdown +++ b/website/docs/cdktf/typescript/r/computeoptimizer_recommendation_preferences.html.markdown @@ -87,12 +87,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enhancedInfrastructureMetrics` - (Optional) The status of the enhanced infrastructure metrics recommendation preference. Valid values: `Active`, `Inactive`. * `externalMetricsPreference` - (Optional) The provider of the external metrics recommendation preference. See [External Metrics Preference](#external-metrics-preference) below. * `inferredWorkloadTypes` - (Optional) The status of the inferred workload types recommendation preference. Valid values: `Active`, `Inactive`. * `lookBackPeriod` - (Optional) The preference to control the number of days the utilization metrics of the AWS resource are analyzed. Valid values: `DAYS_14`, `DAYS_32`, `DAYS_93`. * `preferredResource` - (Optional) The preference to control which resource type values are considered when generating rightsizing recommendations. See [Preferred Resources](#preferred-resources) below. -* `resourceType` - (Required) The target resource type of the recommendation preferences. Valid values: `Ec2Instance`, `AutoScalingGroup`, `RdsDBInstance`. +* `resourceType` - (Required) The target resource type of the recommendation preferences. Valid values: `Ec2Instance`, `AutoScalingGroup`, `RdsDBInstance`, `AuroraDBClusterStorage`. * `savingsEstimationMode` - (Optional) The status of the savings estimation mode preference. Valid values: `AfterDiscounts`, `BeforeDiscounts`. * `scope` - (Required) The scope of the recommendation preferences. See [Scope](#scope) below. * `utilizationPreference` - (Optional) The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom. See [Utilization Preferences](#utilization-preferences) below. @@ -158,4 +159,4 @@ Using `terraform import`, import recommendation preferences using the resource t % terraform import aws_computeoptimizer_recommendation_preferences.example Ec2Instance,AccountId,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_aggregate_authorization.html.markdown b/website/docs/cdktf/typescript/r/config_aggregate_authorization.html.markdown index 8163269dda3a..8ab8180a768e 100644 --- a/website/docs/cdktf/typescript/r/config_aggregate_authorization.html.markdown +++ b/website/docs/cdktf/typescript/r/config_aggregate_authorization.html.markdown @@ -28,7 +28,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new ConfigAggregateAuthorization(this, "example", { accountId: "123456789012", - region: "eu-west-2", + authorizedAwsRegion: "eu-west-2", }); } } @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `accountId` - (Required) Account ID -* `region` - (Required) Region +* `accountId` - (Required) Account ID. +* `authorizedAwsRegion` - (Optional) The region authorized to collect aggregated data. +* `region` - (Optional, **Deprecated**) The region authorized to collect aggregated data. Use `authorizedAwsRegion` instead. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -52,7 +53,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Config aggregate authorizations using `account_id:region`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Config aggregate authorizations using `account_id:authorized_aws_region`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -76,10 +77,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Config aggregate authorizations using `account_id:region`. For example: +Using `terraform import`, import Config aggregate authorizations using `account_id:authorized_aws_region`. For example: ```console % terraform import aws_config_aggregate_authorization.example 123456789012:us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_config_rule.html.markdown b/website/docs/cdktf/typescript/r/config_config_rule.html.markdown index 0f02426242e9..b9c3d4935e33 100644 --- a/website/docs/cdktf/typescript/r/config_config_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/config_config_rule.html.markdown @@ -185,6 +185,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule * `description` - (Optional) Description of the rule * `evaluationMode` - (Optional) The modes the Config rule can be evaluated in. See [Evaluation Mode](#evaluation-mode) for more details. @@ -269,4 +270,4 @@ Using `terraform import`, import Config Rule using the name. For example: % terraform import aws_config_config_rule.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_configuration_aggregator.html.markdown b/website/docs/cdktf/typescript/r/config_configuration_aggregator.html.markdown index c14961821174..7d62f465da97 100644 --- a/website/docs/cdktf/typescript/r/config_configuration_aggregator.html.markdown +++ b/website/docs/cdktf/typescript/r/config_configuration_aggregator.html.markdown @@ -108,6 +108,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the configuration aggregator. * `accountAggregationSource` - (Optional) The account(s) to aggregate config data from as documented below. * `organizationAggregationSource` - (Optional) The organization to aggregate config data from as documented below. @@ -172,4 +173,4 @@ Using `terraform import`, import Configuration Aggregators using the name. For e % terraform import aws_config_configuration_aggregator.example foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_configuration_recorder.html.markdown b/website/docs/cdktf/typescript/r/config_configuration_recorder.html.markdown index e9f507c11145..1b1323adcbc2 100644 --- a/website/docs/cdktf/typescript/r/config_configuration_recorder.html.markdown +++ b/website/docs/cdktf/typescript/r/config_configuration_recorder.html.markdown @@ -135,6 +135,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the recorder. Defaults to `default`. Changing it recreates the resource. * `roleArn` - (Required) Amazon Resource Name (ARN) of the IAM role. Used to make read or write requests to the delivery channel and to describe the AWS resources associated with the account. See [AWS Docs](http://docs.aws.amazon.com/config/latest/developerguide/iamrole-permissions.html) for more details. * `recordingGroup` - (Optional) Recording group - see below. @@ -201,4 +202,4 @@ Using `terraform import`, import Configuration Recorder using the name. For exam % terraform import aws_config_configuration_recorder.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_configuration_recorder_status.html.markdown b/website/docs/cdktf/typescript/r/config_configuration_recorder_status.html.markdown index fdf851599153..249544ea5adc 100644 --- a/website/docs/cdktf/typescript/r/config_configuration_recorder_status.html.markdown +++ b/website/docs/cdktf/typescript/r/config_configuration_recorder_status.html.markdown @@ -107,6 +107,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the recorder * `isEnabled` - (Required) Whether the configuration recorder should be enabled or disabled. @@ -146,4 +147,4 @@ Using `terraform import`, import Configuration Recorder Status using the name of % terraform import aws_config_configuration_recorder_status.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown b/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown index ff37f24a9342..1723d2fbdb3b 100644 --- a/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown +++ b/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the conformance pack. Must begin with a letter and contain from 1 to 256 alphanumeric characters and hyphens. * `deliveryS3Bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Maximum length of 63. * `deliveryS3KeyPrefix` - (Optional) The prefix for the Amazon S3 bucket. Maximum length of 1024. @@ -150,4 +151,4 @@ Using `terraform import`, import Config Conformance Packs using the `name`. For % terraform import aws_config_conformance_pack.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_delivery_channel.html.markdown b/website/docs/cdktf/typescript/r/config_delivery_channel.html.markdown index 41526a426c7c..40daf1fbac0c 100644 --- a/website/docs/cdktf/typescript/r/config_delivery_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/config_delivery_channel.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the delivery channel. Defaults to `default`. Changing it recreates the resource. * `s3BucketName` - (Required) The name of the S3 bucket used to store the configuration history. * `s3KeyPrefix` - (Optional) The prefix for the specified S3 bucket. @@ -140,4 +141,4 @@ Using `terraform import`, import Delivery Channel using the name. For example: % terraform import aws_config_delivery_channel.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_organization_conformance_pack.html.markdown b/website/docs/cdktf/typescript/r/config_organization_conformance_pack.html.markdown index 3e02a50df533..03fabdc34b23 100644 --- a/website/docs/cdktf/typescript/r/config_organization_conformance_pack.html.markdown +++ b/website/docs/cdktf/typescript/r/config_organization_conformance_pack.html.markdown @@ -111,6 +111,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the organization conformance pack. Must begin with a letter and contain from 1 to 128 alphanumeric characters and hyphens. * `deliveryS3Bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Delivery bucket must begin with `awsconfigconforms` prefix. Maximum length of 63. * `deliveryS3KeyPrefix` - (Optional) The prefix for the Amazon S3 bucket. Maximum length of 1024. @@ -173,4 +174,4 @@ Using `terraform import`, import Config Organization Conformance Packs using the % terraform import aws_config_organization_conformance_pack.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_organization_custom_policy_rule.html.markdown b/website/docs/cdktf/typescript/r/config_organization_custom_policy_rule.html.markdown index 4e97d381e0b2..e1e7d1c4c354 100644 --- a/website/docs/cdktf/typescript/r/config_organization_custom_policy_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/config_organization_custom_policy_rule.html.markdown @@ -50,28 +50,29 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `name` - (Required) name of the rule -* `policyText` - (Required) policy definition containing the logic for your organization AWS Config Custom Policy rule -* `policyRuntime` - (Required) runtime system for your organization AWS Config Custom Policy rules -* `triggerTypes` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification` +* `name` - (Required) Name of the rule. +* `policyText` - (Required) Policy definition containing the rule logic. +* `policyRuntime` - (Required) Runtime system for policy rules. +* `triggerTypes` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification`. The following arguments are optional: -* `description` - (Optional) Description of the rule -* `debugLogDeliveryAccounts` - (Optional) List of AWS account identifiers to exclude from the rule -* `excludedAccounts` - (Optional) List of AWS account identifiers to exclude from the rule -* `inputParameters` - (Optional) A string in JSON format that is passed to the AWS Config Rule Lambda Function +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the rule. +* `debugLogDeliveryAccounts` - (Optional) List of accounts that you can enable debug logging for. The list is null when debug logging is enabled for all accounts. +* `excludedAccounts` - (Optional) List of AWS account identifiers to exclude from the rule. +* `inputParameters` - (Optional) A string in JSON format that is passed to the AWS Config Rule Lambda Function. * `maximumExecutionFrequency` - (Optional) Maximum frequency with which AWS Config runs evaluations for a rule, if the rule is triggered at a periodic frequency. Defaults to `TwentyFour_Hours` for periodic frequency triggered rules. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, or `TwentyFour_Hours`. -* `resourceIdScope` - (Optional) Identifier of the AWS resource to evaluate -* `resourceTypesScope` - (Optional) List of types of AWS resources to evaluate -* `tagKeyScope` - (Optional, Required if `tagValueScope` is configured) Tag key of AWS resources to evaluate -* `tagValueScope` - (Optional) Tag value of AWS resources to evaluate +* `resourceIdScope` - (Optional) Identifier of the AWS resource to evaluate. +* `resourceTypesScope` - (Optional) List of types of AWS resources to evaluate. +* `tagKeyScope` - (Optional, Required if `tagValueScope` is configured) Tag key of AWS resources to evaluate. +* `tagValueScope` - (Optional) Tag value of AWS resources to evaluate. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Amazon Resource Name (ARN) of the rule +* `arn` - Amazon Resource Name (ARN) of the rule. ## Timeouts @@ -113,4 +114,4 @@ Using `terraform import`, import a Config Organization Custom Policy Rule using % terraform import aws_config_organization_custom_policy_rule.example example_rule_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_organization_custom_rule.html.markdown b/website/docs/cdktf/typescript/r/config_organization_custom_rule.html.markdown index 3e3fa3bfcc05..7c445206ac96 100644 --- a/website/docs/cdktf/typescript/r/config_organization_custom_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/config_organization_custom_rule.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `lambdaFunctionArn` - (Required) Amazon Resource Name (ARN) of the rule Lambda Function * `name` - (Required) The name of the rule * `triggerTypes` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification`, and `ScheduledNotification` @@ -124,4 +125,4 @@ Using `terraform import`, import Config Organization Custom Rules using the name % terraform import aws_config_organization_custom_rule.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_organization_managed_rule.html.markdown b/website/docs/cdktf/typescript/r/config_organization_managed_rule.html.markdown index a8b59cae18b8..ada4d1f64184 100644 --- a/website/docs/cdktf/typescript/r/config_organization_managed_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/config_organization_managed_rule.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule * `ruleIdentifier` - (Required) Identifier of an available AWS Config Managed Rule to call. For available values, see the [List of AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/managed-rules-by-aws-config.html) documentation * `description` - (Optional) Description of the rule @@ -109,4 +110,4 @@ Using `terraform import`, import Config Organization Managed Rules using the nam % terraform import aws_config_organization_managed_rule.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_remediation_configuration.html.markdown b/website/docs/cdktf/typescript/r/config_remediation_configuration.html.markdown index 2e7896cc4155..43cf132cc2bb 100644 --- a/website/docs/cdktf/typescript/r/config_remediation_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/config_remediation_configuration.html.markdown @@ -86,6 +86,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `automatic` - (Optional) Remediation is triggered automatically if `true`. * `executionControls` - (Optional) Configuration block for execution controls. See below. * `maximumAutomaticAttempts` - (Optional) Maximum number of failed attempts for auto-remediation. If you do not select a number, the default is 5. @@ -152,4 +153,4 @@ Using `terraform import`, import Remediation Configurations using the name confi % terraform import aws_config_remediation_configuration.this example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_retention_configuration.html.markdown b/website/docs/cdktf/typescript/r/config_retention_configuration.html.markdown index c7aaa0c5912f..ccb9410c1382 100644 --- a/website/docs/cdktf/typescript/r/config_retention_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/config_retention_configuration.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `retentionPeriodInDays` - (Required) The number of days AWS Config stores historical information. ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import the AWS Config retention configuration using th % terraform import aws_config_retention_configuration.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_bot_association.html.markdown b/website/docs/cdktf/typescript/r/connect_bot_association.html.markdown index 2a2acf1a1b25..1dc285a527fc 100644 --- a/website/docs/cdktf/typescript/r/connect_bot_association.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_bot_association.html.markdown @@ -105,7 +105,7 @@ class MyConvertedCode extends TerraformStack { { instanceId: Token.asString(awsConnectInstanceExample.id), lexBot: { - lexRegion: Token.asString(current.name), + lexRegion: Token.asString(current.region), name: Token.asString(awsLexBotExample.name), }, } @@ -121,6 +121,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. * `lexBot` - (Required) Configuration information of an Amazon Lex (V1) bot. Detailed below. @@ -169,4 +170,4 @@ Using `terraform import`, import `aws_connect_bot_association` using the Amazon % terraform import aws_connect_bot_association.example aaaaaaaa-bbbb-cccc-dddd-111111111111:Example:us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_contact_flow.html.markdown b/website/docs/cdktf/typescript/r/connect_contact_flow.html.markdown index bd26203fb68c..6f2cbcf0f840 100644 --- a/website/docs/cdktf/typescript/r/connect_contact_flow.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_contact_flow.html.markdown @@ -121,6 +121,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Optional) Specifies the content of the Contact Flow, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used. * `contentHash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the Contact Flow source specified with `filename`. The usual way to set this is filebase64sha256("mycontact_flow.json") (Terraform 0.11.12 and later) or base64sha256(file("mycontact_flow.json")) (Terraform 0.11.11 and earlier), where "mycontact_flow.json" is the local filename of the Contact Flow source. * `description` - (Optional) Specifies the description of the Contact Flow. @@ -171,4 +172,4 @@ Using `terraform import`, import Amazon Connect Contact Flows using the `instanc % terraform import aws_connect_contact_flow.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_contact_flow_module.html.markdown b/website/docs/cdktf/typescript/r/connect_contact_flow_module.html.markdown index 9af1685cab1b..9ec81b7f71f0 100644 --- a/website/docs/cdktf/typescript/r/connect_contact_flow_module.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_contact_flow_module.html.markdown @@ -137,6 +137,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Optional) Specifies the content of the Contact Flow Module, provided as a JSON string, written in Amazon Connect Contact Flow Language. If defined, the `filename` argument cannot be used. * `contentHash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the Contact Flow Module source specified with `filename`. The usual way to set this is filebase64sha256("contact_flow_module.json") (Terraform 0.11.12 and later) or base64sha256(file("contact_flow_module.json")) (Terraform 0.11.11 and earlier), where "contact_flow_module.json" is the local filename of the Contact Flow Module source. * `description` - (Optional) Specifies the description of the Contact Flow Module. @@ -186,4 +187,4 @@ Using `terraform import`, import Amazon Connect Contact Flow Modules using the ` % terraform import aws_connect_contact_flow_module.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_hours_of_operation.html.markdown b/website/docs/cdktf/typescript/r/connect_hours_of_operation.html.markdown index dbd268838252..31f327f3f8d0 100644 --- a/website/docs/cdktf/typescript/r/connect_hours_of_operation.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_hours_of_operation.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `config` - (Required) One or more config blocks which define the configuration information for the hours of operation: day, start time, and end time . Config blocks are documented below. * `description` - (Optional) Specifies the description of the Hours of Operation. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -133,4 +134,4 @@ Using `terraform import`, import Amazon Connect Hours of Operations using the `i % terraform import aws_connect_hours_of_operation.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_instance.html.markdown b/website/docs/cdktf/typescript/r/connect_instance.html.markdown index 45275b0b476f..1aca4a905424 100644 --- a/website/docs/cdktf/typescript/r/connect_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_instance.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoResolveBestVoicesEnabled` - (Optional) Specifies whether auto resolve best voices is enabled. Defaults to `true`. * `contactFlowLogsEnabled` - (Optional) Specifies whether contact flow logs are enabled. Defaults to `false`. * `contactLensEnabled` - (Optional) Specifies whether contact lens is enabled. Defaults to `true`. @@ -131,6 +132,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_connect_instance.example + identity = { + id = "f1288a1f-6193-445a-b47e-af739b2" + } +} + +resource "aws_connect_instance" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the connect instance. + +#### Optional + +- `accountId` (String) AWS Account where this resource is managed. +- `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Connect instances using the `id`. For example: ```typescript @@ -161,4 +188,4 @@ Using `terraform import`, import Connect instances using the `id`. For example: % terraform import aws_connect_instance.example f1288a1f-6193-445a-b47e-af739b2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_instance_storage_config.html.markdown b/website/docs/cdktf/typescript/r/connect_instance_storage_config.html.markdown index cd0fec29814b..b75e8f4d8eb8 100644 --- a/website/docs/cdktf/typescript/r/connect_instance_storage_config.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_instance_storage_config.html.markdown @@ -177,6 +177,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `resourceType` - (Required) A valid resource type. Valid Values: `AGENT_EVENTS` | `ATTACHMENTS` | `CALL_RECORDINGS` | `CHAT_TRANSCRIPTS` | `CONTACT_EVALUATIONS` | `CONTACT_TRACE_RECORDS` | `EMAIL_MESSAGES` | `MEDIA_STREAMS` | `REAL_TIME_CONTACT_ANALYSIS_CHAT_SEGMENTS` | `REAL_TIME_CONTACT_ANALYSIS_SEGMENTS` | `REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS` | `SCHEDULED_REPORTS` | `SCREEN_RECORDINGS`. * `storageConfig` - (Required) Specifies the storage configuration options for the Connect Instance. [Documented below](#storage_config). @@ -265,4 +266,4 @@ Using `terraform import`, import Amazon Connect Instance Storage Configs using t % terraform import aws_connect_instance_storage_config.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5:CHAT_TRANSCRIPTS ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_lambda_function_association.html.markdown b/website/docs/cdktf/typescript/r/connect_lambda_function_association.html.markdown index 3579a3708252..6c6934969c49 100644 --- a/website/docs/cdktf/typescript/r/connect_lambda_function_association.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_lambda_function_association.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `functionArn` - (Required) Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. * `instanceId` - (Required) The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_connect_lambda_function_association` using % terraform import aws_connect_lambda_function_association.example aaaaaaaa-bbbb-cccc-dddd-111111111111,arn:aws:lambda:us-west-2:123456789123:function:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_phone_number.html.markdown b/website/docs/cdktf/typescript/r/connect_phone_number.html.markdown index ce5b1bc8cfe4..3b4cf5f21c94 100644 --- a/website/docs/cdktf/typescript/r/connect_phone_number.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_phone_number.html.markdown @@ -96,6 +96,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `countryCode` - (Required, Forces new resource) The ISO country code. For a list of Valid values, refer to [PhoneNumberCountryCode](https://docs.aws.amazon.com/connect/latest/APIReference/API_SearchAvailablePhoneNumbers.html#connect-SearchAvailablePhoneNumbers-request-PhoneNumberCountryCode). * `description` - (Optional, Forces new resource) The description of the phone number. * `prefix` - (Optional, Forces new resource) The prefix of the phone number that is used to filter available phone numbers. If provided, it must contain `+` as part of the country code. Do not specify this argument when importing the resource. @@ -130,6 +131,31 @@ The `status` configuration block supports the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_connect_phone_number.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} +resource "aws_connect_phone_number" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the connect phone number. + +#### Optional + +- `accountId` (String) AWS Account where this resource is managed. +- `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Amazon Connect Phone Numbers using its `id`. For example: ```typescript @@ -160,4 +186,4 @@ Using `terraform import`, import Amazon Connect Phone Numbers using its `id`. Fo % terraform import aws_connect_phone_number.example 12345678-abcd-1234-efgh-9876543210ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_phone_number_contact_flow_association.html.markdown b/website/docs/cdktf/typescript/r/connect_phone_number_contact_flow_association.html.markdown new file mode 100644 index 000000000000..73061c83babc --- /dev/null +++ b/website/docs/cdktf/typescript/r/connect_phone_number_contact_flow_association.html.markdown @@ -0,0 +1,84 @@ +--- +subcategory: "Connect" +layout: "aws" +page_title: "AWS: aws_connect_phone_number_contact_flow_association" +description: |- + Associates a flow with a phone number claimed to an Amazon Connect instance. +--- + + + +# Resource: aws_connect_phone_number_contact_flow_association + +Associates a flow with a phone number claimed to an Amazon Connect instance. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ConnectPhoneNumberContactFlowAssociation } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new ConnectPhoneNumberContactFlowAssociation(this, "example", { + contact_flow_id: awsConnectContactFlowExample.contactFlowId, + instance_id: awsConnectInstanceExample.id, + phone_number_id: awsConnectPhoneNumberExample.id, + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `contactFlowId` - (Required) Contact flow ID. +* `instanceId` - (Required) Amazon Connect instance ID. +* `phone_number_id` - (Required) Phone number ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_connect_phone_number_contact_flow_association` using the `phone_number_id`, `instanceId` and `contactFlowId` separated by a comma (`,`). For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ConnectPhoneNumberContactFlowAssociation } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + ConnectPhoneNumberContactFlowAssociation.generateConfigForImport( + this, + "example", + "36727a4c-4683-4e49-880c-3347c61110a4,fa6c1691-e2eb-4487-bdb9-1aaed6268ebd,c4acdc79-395e-4280-a294-9062f56b07bb" + ); + } +} + +``` + +Using `terraform import`, import `aws_connect_phone_number_contact_flow_association` using the `phone_number_id`, `instanceId` and `contactFlowId` separated by a comma (`,`). For example: + +```console +% terraform import aws_connect_phone_number_contact_flow_association.example 36727a4c-4683-4e49-880c-3347c61110a4,fa6c1691-e2eb-4487-bdb9-1aaed6268ebd,c4acdc79-395e-4280-a294-9062f56b07bb +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_queue.html.markdown b/website/docs/cdktf/typescript/r/connect_queue.html.markdown index 65cdd28fd9cb..7bdf6973834a 100644 --- a/website/docs/cdktf/typescript/r/connect_queue.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_queue.html.markdown @@ -109,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Queue. * `hoursOfOperationId` - (Required) Specifies the identifier of the Hours of Operation. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -166,4 +167,4 @@ Using `terraform import`, import Amazon Connect Queues using the `instanceId` an % terraform import aws_connect_queue.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_quick_connect.html.markdown b/website/docs/cdktf/typescript/r/connect_quick_connect.html.markdown index a6f7ed552b13..82145ac24903 100644 --- a/website/docs/cdktf/typescript/r/connect_quick_connect.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_quick_connect.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Quick Connect. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `name` - (Required) Specifies the name of the Quick Connect. @@ -120,4 +121,4 @@ Using `terraform import`, import Amazon Connect Quick Connects using the `instan % terraform import aws_connect_quick_connect.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_routing_profile.html.markdown b/website/docs/cdktf/typescript/r/connect_routing_profile.html.markdown index 7c337c1b6a2b..aa0568aed030 100644 --- a/website/docs/cdktf/typescript/r/connect_routing_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_routing_profile.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultOutboundQueueId` - (Required) Specifies the default outbound queue for the Routing Profile. * `description` - (Required) Specifies the description of the Routing Profile. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -127,4 +128,4 @@ Using `terraform import`, import Amazon Connect Routing Profiles using the `inst % terraform import aws_connect_routing_profile.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_security_profile.html.markdown b/website/docs/cdktf/typescript/r/connect_security_profile.html.markdown index 1a06fb7017b1..5472217a190f 100644 --- a/website/docs/cdktf/typescript/r/connect_security_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_security_profile.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the Security Profile. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `name` - (Required) Specifies the name of the Security Profile. @@ -94,4 +95,4 @@ Using `terraform import`, import Amazon Connect Security Profiles using the `ins % terraform import aws_connect_security_profile.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_user.html.markdown b/website/docs/cdktf/typescript/r/connect_user.html.markdown index cc5ba9a6487b..257a4a43fd92 100644 --- a/website/docs/cdktf/typescript/r/connect_user.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_user.html.markdown @@ -113,7 +113,7 @@ class MyConvertedCode extends TerraformStack { email: "example@example.com", firstName: "example", lastName: "example2", - secondary_email: "secondary@example.com", + secondaryEmail: "secondary@example.com", }, instanceId: Token.asString(awsConnectInstanceExample.id), name: "example", @@ -209,6 +209,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryUserId` - (Optional) The identifier of the user account in the directory used for identity management. If Amazon Connect cannot access the directory, you can specify this identifier to authenticate users. If you include the identifier, we assume that Amazon Connect cannot access the directory. Otherwise, the identity information is used to authenticate users from your directory. This parameter is required if you are using an existing directory for identity management in Amazon Connect when Amazon Connect cannot access your directory to authenticate users. If you are using SAML for identity management and include this parameter, an error is returned. * `hierarchyGroupId` - (Optional) The identifier of the hierarchy group for the user. * `identityInfo` - (Optional) A block that contains information about the identity of the user. Documented below. @@ -226,7 +227,7 @@ A `identityInfo` block supports the following arguments: * `email` - (Optional) The email address. If you are using SAML for identity management and include this parameter, an error is returned. Note that updates to the `email` is supported. From the [UpdateUserIdentityInfo API documentation](https://docs.aws.amazon.com/connect/latest/APIReference/API_UpdateUserIdentityInfo.html) it is strongly recommended to limit who has the ability to invoke `UpdateUserIdentityInfo`. Someone with that ability can change the login credentials of other users by changing their email address. This poses a security risk to your organization. They can change the email address of a user to the attacker's email address, and then reset the password through email. For more information, see [Best Practices for Security Profiles](https://docs.aws.amazon.com/connect/latest/adminguide/security-profile-best-practices.html) in the Amazon Connect Administrator Guide. * `firstName` - (Optional) The first name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. * `lastName` - (Optional) The last name. This is required if you are using Amazon Connect or SAML for identity management. Minimum length of 1. Maximum length of 100. -* `secondary_email` - (Optional) The secondary email address. If present, email notifications will be sent to this email address instead of the primary one. +* `secondaryEmail` - (Optional) The secondary email address. If present, email notifications will be sent to this email address instead of the primary one. A `phoneConfig` block supports the following arguments: @@ -277,4 +278,4 @@ Using `terraform import`, import Amazon Connect Users using the `instanceId` and % terraform import aws_connect_user.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_user_hierarchy_group.html.markdown b/website/docs/cdktf/typescript/r/connect_user_hierarchy_group.html.markdown index 149bed1bf807..1082002c22c1 100644 --- a/website/docs/cdktf/typescript/r/connect_user_hierarchy_group.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_user_hierarchy_group.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `name` - (Required) The name of the user hierarchy group. Must not be more than 100 characters. * `parentGroupId` - (Optional) The identifier for the parent hierarchy group. The user hierarchy is created at level one if the parent group ID is null. @@ -145,4 +146,4 @@ Using `terraform import`, import Amazon Connect User Hierarchy Groups using the % terraform import aws_connect_user_hierarchy_group.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_user_hierarchy_structure.html.markdown b/website/docs/cdktf/typescript/r/connect_user_hierarchy_structure.html.markdown index 3f3d7d0078bd..3ec2151be6b6 100644 --- a/website/docs/cdktf/typescript/r/connect_user_hierarchy_structure.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_user_hierarchy_structure.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hierarchyStructure` - (Required) A block that defines the hierarchy structure's levels. The `hierarchyStructure` block is documented below. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. @@ -144,4 +145,4 @@ Using `terraform import`, import Amazon Connect User Hierarchy Structures using % terraform import aws_connect_user_hierarchy_structure.example f1288a1f-6193-445a-b47e-af739b2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/connect_vocabulary.html.markdown b/website/docs/cdktf/typescript/r/connect_vocabulary.html.markdown index 279c0566c6e3..f7fca8d0667a 100644 --- a/website/docs/cdktf/typescript/r/connect_vocabulary.html.markdown +++ b/website/docs/cdktf/typescript/r/connect_vocabulary.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Required) The content of the custom vocabulary in plain-text format with a table of values. Each row in the table represents a word or a phrase, described with Phrase, IPA, SoundsLike, and DisplayAs fields. Separate the fields with TAB characters. For more information, see [Create a custom vocabulary using a table](https://docs.aws.amazon.com/transcribe/latest/dg/custom-vocabulary.html#create-vocabulary-table). Minimum length of `1`. Maximum length of `60000`. * `instanceId` - (Required) Specifies the identifier of the hosting Amazon Connect Instance. * `languageCode` - (Required) The language code of the vocabulary entries. For a list of languages and their corresponding language codes, see [What is Amazon Transcribe?](https://docs.aws.amazon.com/transcribe/latest/dg/transcribe-whatis.html). Valid Values are `ar-AE`, `de-CH`, `de-DE`, `en-AB`, `en-AU`, `en-GB`, `en-IE`, `en-IN`, `en-US`, `en-WL`, `es-ES`, `es-US`, `fr-CA`, `fr-FR`, `hi-IN`, `it-IT`, `ja-JP`, `ko-KR`, `pt-BR`, `pt-PT`, `zh-CN`. @@ -105,4 +106,4 @@ Using `terraform import`, import Amazon Connect Vocabularies using the `instance % terraform import aws_connect_vocabulary.example f1288a1f-6193-445a-b47e-af739b2:c1d4e5f6-1b3c-1b3c-1b3c-c1d4e5f6c1d4e5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/controltower_baseline.html.markdown b/website/docs/cdktf/typescript/r/controltower_baseline.html.markdown new file mode 100644 index 000000000000..0b11002aa1e1 --- /dev/null +++ b/website/docs/cdktf/typescript/r/controltower_baseline.html.markdown @@ -0,0 +1,116 @@ +--- +subcategory: "Control Tower" +layout: "aws" +page_title: "AWS: aws_controltower_baseline" +description: |- + Terraform resource for managing an AWS Control Tower Baseline. +--- + + + +# Resource: aws_controltower_baseline + +Terraform resource for managing an AWS Control Tower Baseline. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ControltowerBaseline } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new ControltowerBaseline(this, "example", { + baseline_identifier: + "arn:aws:controltower:us-east-1::baseline/17BSJV3IGJ2QSGA2", + baseline_version: "4.0", + parameters: [ + { + key: "IdentityCenterEnabledBaselineArn", + value: + "arn:aws:controltower:us-east-1:664418989480:enabledbaseline/XALULM96QHI525UOC", + }, + ], + target_identifier: test.arn, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `baseline_identifier` - (Required) The ARN of the baseline to be enabled. +* `baseline_version` - (Required) The version of the baseline to be enabled. +* `targetIdentifier` - (Required) The ARN of the target on which the baseline will be enabled. Only OUs are supported as targets. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `parameters` - (Optional) A list of key-value objects that specify enablement parameters, where key is a string and value is a document of any type. See [Parameter](#parameters) below for details. +* `tags` - (Optional) Tags to apply to the landing zone. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### parameters + +* `key` - (Required) The key of the parameter. +* `value` - (Required) The value of the parameter. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Baseline. +* `operaton_identifier` - The ID (in UUID format) of the asynchronous operation. +* `tagsAll` - A map of tags assigned to the landing zone, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Control Tower Baseline using the `arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ControltowerBaseline } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + ControltowerBaseline.generateConfigForImport( + this, + "example", + "arn:aws:controltower:us-east-1:012345678912:enabledbaseline/XALULM96QHI525UOC" + ); + } +} + +``` + +Using `terraform import`, import Control Tower Baseline using the `arn`. For example: + +```console +% terraform import aws_controltower_baseline.example arn:aws:controltower:us-east-1:012345678912:enabledbaseline/XALULM96QHI525UOC +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/controltower_control.html.markdown b/website/docs/cdktf/typescript/r/controltower_control.html.markdown index 579831bf1dac..562e24356de6 100644 --- a/website/docs/cdktf/typescript/r/controltower_control.html.markdown +++ b/website/docs/cdktf/typescript/r/controltower_control.html.markdown @@ -44,7 +44,7 @@ class MyConvertedCode extends TerraformStack { { controlIdentifier: "arn:aws:controltower:${" + - current.name + + current.region + "}::control/AWS-GR_EC2_VOLUME_INUSE_CHECK", parameters: [ { @@ -78,6 +78,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `parameters` - (Optional) Parameter values which are specified to configure the control when you enable it. See [Parameters](#parameters) for more details. ### Parameters @@ -124,4 +125,4 @@ Using `terraform import`, import Control Tower Controls using their `organizatio % terraform import aws_controltower_control.example arn:aws:organizations::123456789101:ou/o-qqaejywet/ou-qg5o-ufbhdtv3,arn:aws:controltower:us-east-1::control/WTDSMKDKDNLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/controltower_landing_zone.html.markdown b/website/docs/cdktf/typescript/r/controltower_landing_zone.html.markdown index 648f2f578f36..1d3966888685 100644 --- a/website/docs/cdktf/typescript/r/controltower_landing_zone.html.markdown +++ b/website/docs/cdktf/typescript/r/controltower_landing_zone.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `manifestJson` - (Required) The manifest JSON file is a text file that describes your AWS resources. For examples, review [Launch your landing zone](https://docs.aws.amazon.com/controltower/latest/userguide/lz-api-launch). * `version` - (Required) The landing zone version. * `tags` - (Optional) Tags to apply to the landing zone. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -97,4 +98,4 @@ Using `terraform import`, import a Control Tower Landing Zone using the `id`. Fo % terraform import aws_controltower_landing_zone.example 1A2B3C4D5E6F7G8H ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cur_report_definition.html.markdown b/website/docs/cdktf/typescript/r/cur_report_definition.html.markdown index cae6cb8922b1..8eb120eca73f 100644 --- a/website/docs/cdktf/typescript/r/cur_report_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/cur_report_definition.html.markdown @@ -35,6 +35,7 @@ class MyConvertedCode extends TerraformStack { format: "textORcsv", reportName: "example-cur-report-definition", s3Bucket: "example-bucket-name", + s3Prefix: "example-cur-report", s3Region: "us-east-1", timeUnit: "HOURLY", }); @@ -51,9 +52,9 @@ This resource supports the following arguments: * `timeUnit` - (Required) The frequency on which report data are measured and displayed. Valid values are: `DAILY`, `HOURLY`, `MONTHLY`. * `format` - (Required) Format for report. Valid values are: `textORcsv`, `Parquet`. If `Parquet` is used, then Compression must also be `Parquet`. * `compression` - (Required) Compression format for report. Valid values are: `GZIP`, `ZIP`, `Parquet`. If `Parquet` is used, then format must also be `Parquet`. -* `additionalSchemaElements` - (Required) A list of schema elements. Valid values are: `RESOURCES`, `SPLIT_COST_ALLOCATION_DATA`. +* `additionalSchemaElements` - (Required) A list of schema elements. Valid values are: `RESOURCES`, `SPLIT_COST_ALLOCATION_DATA`, `MANUAL_DISCOUNT_COMPATIBILITY`. * `s3Bucket` - (Required) Name of the existing S3 bucket to hold generated reports. -* `s3Prefix` - (Optional) Report path prefix. Limited to 256 characters. +* `s3Prefix` - (Required) Report path prefix. Limited to 256 characters. May be empty (`""`) but the resource can then not be modified via the AWS Console. * `s3Region` - (Required) Region of the existing S3 bucket to hold generated reports. * `additionalArtifacts` - (Required) A list of additional artifacts. Valid values are: `REDSHIFT`, `QUICKSIGHT`, `ATHENA`. When ATHENA exists within additional_artifacts, no other artifact type can be declared and report_versioning must be `OVERWRITE_REPORT`. * `refreshClosedReports` - (Optional) Set to true to update your reports after they have been finalized if AWS detects charges related to previous months. @@ -99,4 +100,4 @@ Using `terraform import`, import Report Definitions using the `reportName`. For % terraform import aws_cur_report_definition.example_cur_report_definition example-cur-report-definition ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/customer_gateway.html.markdown b/website/docs/cdktf/typescript/r/customer_gateway.html.markdown index 648d46d2007e..5349cbd5a175 100644 --- a/website/docs/cdktf/typescript/r/customer_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/customer_gateway.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bgpAsn` - (Optional, Forces new resource) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). Valid values are from `1` to `2147483647`. Conflicts with `bgpAsnExtended`. * `bgpAsnExtended` - (Optional, Forces new resource) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). Valid values are from `2147483648` to `4294967295` Conflicts with `bgpAsn`. * `certificateArn` - (Optional) The Amazon Resource Name (ARN) for the customer gateway certificate. @@ -90,4 +91,4 @@ Using `terraform import`, import Customer Gateways using the `id`. For example: % terraform import aws_customer_gateway.main cgw-b4dc3961 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/customerprofiles_domain.html.markdown b/website/docs/cdktf/typescript/r/customerprofiles_domain.html.markdown index a186aa4cdaf7..a2acc81c866e 100644 --- a/website/docs/cdktf/typescript/r/customerprofiles_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/customerprofiles_domain.html.markdown @@ -132,6 +132,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deadLetterQueueUrl` - The URL of the SQS dead letter queue, which is used for reporting errors associated with ingesting data from third party applications. * `defaultEncryptionKey` - The default encryption key, which is an AWS managed key, is used when no specific type of encryption key is specified. It is used to encrypt all data before it is placed in permanent or semi-permanent storage. * `matching` - A block that specifies the process of matching duplicate profiles. [Documented below](#matching). @@ -264,4 +265,4 @@ Using `terraform import`, import Amazon Customer Profiles Domain using the resou % terraform import aws_customerprofiles_domain.example e6f777be-22d0-4b40-b307-5d2720ef16b2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/customerprofiles_profile.html.markdown b/website/docs/cdktf/typescript/r/customerprofiles_profile.html.markdown index 160ee81e58ed..2eed43f1881d 100644 --- a/website/docs/cdktf/typescript/r/customerprofiles_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/customerprofiles_profile.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountNumber` - A unique account number that you have given to the customer. * `additionalInformation` - Any additional information relevant to the customer’s profile. * `address` - A block that specifies a generic address associated with the customer that is not mailing, shipping, or billing. [Documented below](#address). @@ -140,4 +141,4 @@ Using `terraform import`, import Amazon Customer Profiles Profile using the reso % terraform import aws_customerprofiles_profile.example domain-name/5f2f473dfbe841eb8d05cfc2a4c926df ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dataexchange_data_set.html.markdown b/website/docs/cdktf/typescript/r/dataexchange_data_set.html.markdown index cfa07048d9aa..4388f3359b93 100644 --- a/website/docs/cdktf/typescript/r/dataexchange_data_set.html.markdown +++ b/website/docs/cdktf/typescript/r/dataexchange_data_set.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `assetType` - (Required) The type of asset that is added to a data set. Valid values include `API_GATEWAY_API`, `LAKE_FORMATION_DATA_PERMISSION`, `REDSHIFT_DATA_SHARE`, `S3_DATA_ACCESS`, `S3_SNAPSHOT`. * `description` - (Required) A description for the data set. * `name` - (Required) The name of the data set. @@ -85,4 +86,4 @@ Using `terraform import`, import DataExchange DataSets using their `id`. For exa % terraform import aws_dataexchange_data_set.example 4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dataexchange_event_action.html.markdown b/website/docs/cdktf/typescript/r/dataexchange_event_action.html.markdown index 41160c6c09eb..aa711b1cc30f 100644 --- a/website/docs/cdktf/typescript/r/dataexchange_event_action.html.markdown +++ b/website/docs/cdktf/typescript/r/dataexchange_event_action.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `action` - (Required) Describes the action to take. Described in [`action` Configuration Block](#action-configuration-block) below. * `event` - (Required) Describes the event that triggers the `action`. @@ -146,4 +147,4 @@ Using `terraform import`, import Data Exchange Event Action using the id. For ex % terraform import aws_dataexchange_event_action.example example-event-action-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dataexchange_revision.html.markdown b/website/docs/cdktf/typescript/r/dataexchange_revision.html.markdown index 20edb24176bd..5a4a16ffe322 100644 --- a/website/docs/cdktf/typescript/r/dataexchange_revision.html.markdown +++ b/website/docs/cdktf/typescript/r/dataexchange_revision.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dataSetId` - (Required) The dataset id. * `comment` - (Required) An optional comment about the revision. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -83,4 +84,4 @@ Using `terraform import`, import DataExchange Revisions using their `data-set-id % terraform import aws_dataexchange_revision.example 4fa784c7-ccb4-4dbf-ba4f-02198320daa1:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dataexchange_revision_assets.html.markdown b/website/docs/cdktf/typescript/r/dataexchange_revision_assets.html.markdown index 734b8c6e2b69..30157afb137a 100644 --- a/website/docs/cdktf/typescript/r/dataexchange_revision_assets.html.markdown +++ b/website/docs/cdktf/typescript/r/dataexchange_revision_assets.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `comment` - (Optional) A comment for the revision. Maximum length is 16,348 characters. * `finalize` - (Optional) Finalized a revision. Defaults to `false`. * `force_destoy` - (Optional) Force destroy the revision. Defaults to `false`. @@ -109,4 +110,4 @@ Configuration options: * `create` - (Default 30m) Time to create the revision. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datapipeline_pipeline.html.markdown b/website/docs/cdktf/typescript/r/datapipeline_pipeline.html.markdown index e46130f5bfca..b8050d383ad5 100644 --- a/website/docs/cdktf/typescript/r/datapipeline_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/datapipeline_pipeline.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of Pipeline. * `description` - (Optional) The description of Pipeline. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_datapipeline_pipeline` using the id (Pipel % terraform import aws_datapipeline_pipeline.default df-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datapipeline_pipeline_definition.html.markdown b/website/docs/cdktf/typescript/r/datapipeline_pipeline_definition.html.markdown index 03c414f98738..fcdcefe6c079 100644 --- a/website/docs/cdktf/typescript/r/datapipeline_pipeline_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/datapipeline_pipeline_definition.html.markdown @@ -103,6 +103,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `parameterObject` - (Optional) Configuration block for the parameter objects used in the pipeline definition. See below * `parameterValue` - (Optional) Configuration block for the parameter values used in the pipeline definition. See below @@ -171,4 +172,4 @@ Using `terraform import`, import `aws_datapipeline_pipeline_definition` using th % terraform import aws_datapipeline_pipeline_definition.example df-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_agent.html.markdown b/website/docs/cdktf/typescript/r/datasync_agent.html.markdown index f9994aba9725..72b26651abd0 100644 --- a/website/docs/cdktf/typescript/r/datasync_agent.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_agent.html.markdown @@ -57,7 +57,7 @@ class MyConvertedCode extends TerraformStack { const current = new DataAwsRegion(this, "current", {}); const example = new VpcEndpoint(this, "example", { securityGroupIds: [Token.asString(awsSecurityGroupExample.id)], - serviceName: "com.amazonaws.${" + current.name + "}.datasync", + serviceName: "com.amazonaws.${" + current.region + "}.datasync", subnetIds: [Token.asString(awsSubnetExample.id)], vpcEndpointType: "Interface", vpcId: Token.asString(awsVpcExample.id), @@ -94,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the DataSync Agent. * `activationKey` - (Optional) DataSync Agent activation key during resource creation. Conflicts with `ipAddress`. If an `ipAddress` is provided instead, Terraform will retrieve the `activationKey` as part of the resource creation. * `ipAddress` - (Optional) DataSync Agent IP address to retrieve activation key during resource creation. Conflicts with `activationKey`. DataSync Agent must be accessible on port 80 from where Terraform is running. @@ -119,6 +120,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_agent.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:agent/agent-12345678901234567" + } +} + +resource "aws_datasync_agent" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync agent. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_agent` using the DataSync Agent Amazon Resource Name (ARN). For example: ```typescript @@ -149,4 +171,4 @@ Using `terraform import`, import `aws_datasync_agent` using the DataSync Agent A % terraform import aws_datasync_agent.example arn:aws:datasync:us-east-1:123456789012:agent/agent-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_azure_blob.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_azure_blob.html.markdown index edbb101aaf33..439f37891be4 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_azure_blob.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_azure_blob.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessTier` - (Optional) The access tier that you want your objects or files transferred into. Valid values: `HOT`, `COOL` and `ARCHIVE`. Default: `HOT`. * `agentArns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. * `authenticationType` - (Required) The authentication method DataSync uses to access your Azure Blob Storage. Valid values: `SAS`. @@ -68,6 +69,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_azure_blob.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_azure_blob" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync Azure Blob location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_azure_blob` using the Amazon Resource Name (ARN). For example: ```typescript @@ -98,4 +120,4 @@ Using `terraform import`, import `aws_datasync_location_azure_blob` using the Am % terraform import aws_datasync_location_azure_blob.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_efs.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_efs.html.markdown index 5aa490791e71..a3fc327a7bd4 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_efs.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_efs.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPointArn` - (Optional) Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system. * `ec2Config` - (Required) Configuration block containing EC2 configurations for connecting to the EFS File System. * `efsFileSystemArn` - (Required) Amazon Resource Name (ARN) of EFS File System. @@ -69,6 +70,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_efs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_efs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync EFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_efs` using the DataSync Task Amazon Resource Name (ARN). For example: ```typescript @@ -99,4 +121,4 @@ Using `terraform import`, import `aws_datasync_location_efs` using the DataSync % terraform import aws_datasync_location_efs.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_fsx_lustre_file_system.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_fsx_lustre_file_system.html.markdown index 7f8a726f2c51..6d3d840dcf54 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_fsx_lustre_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_fsx_lustre_file_system.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fsxFilesystemArn` - (Required) The Amazon Resource Name (ARN) for the FSx for Lustre file system. * `securityGroupArns` - (Optional) The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Lustre file system. * `subdirectory` - (Optional) Subdirectory to perform actions as source or destination. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_lustre_file_system` % terraform import aws_datasync_location_fsx_lustre_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:476956259333:file-system/fs-08e04cd442c1bb94a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_fsx_ontap_file_system.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_fsx_ontap_file_system.html.markdown index 71282dc66930..60b90ff9c603 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_fsx_ontap_file_system.html.markdown @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subdirectory` - (Optional) Path to the file share in the SVM where you'll copy your data. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares) (e.g. `/vol1`, `/vol1/tree1`, `share1`). * `tags` - (Optional) Key-value pairs of resource tags to assign to the DataSync Location. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -126,4 +127,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_ontap_file_system` u % terraform import aws_datasync_location_fsx_ontap_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:123456789012:storage-virtual-machine/svm-12345678abcdef123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_fsx_openzfs_file_system.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_fsx_openzfs_file_system.html.markdown index b97fa58a3f2a..3a42970588e9 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_fsx_openzfs_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_fsx_openzfs_file_system.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fsxFilesystemArn` - (Required) The Amazon Resource Name (ARN) for the FSx for OpenZfs file system. * `protocol` - (Required) The type of protocol that DataSync uses to access your file system. See below. * `securityGroupArns` - (Optional) The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for openzfs file system. @@ -106,4 +107,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_openzfs_file_system` % terraform import aws_datasync_location_fsx_openzfs_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:123456789012:file-system/fs-08e04cd442c1bb94a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_fsx_windows_file_system.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_fsx_windows_file_system.html.markdown index 5a5758ed8990..6a9657f1ee6f 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_fsx_windows_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_fsx_windows_file_system.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fsxFilesystemArn` - (Required) The Amazon Resource Name (ARN) for the FSx for Windows file system. * `password` - (Required) The password of the user who has the permissions to access files and folders in the FSx for Windows file system. * `user` - (Required) The user who has the permissions to access files and folders in the FSx for Windows file system. @@ -91,4 +92,4 @@ Using `terraform import`, import `aws_datasync_location_fsx_windows_file_system` % terraform import aws_datasync_location_fsx_windows_file_system.example arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567#arn:aws:fsx:us-west-2:476956259333:file-system/fs-08e04cd442c1bb94a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_hdfs.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_hdfs.html.markdown index 3fb676c106cb..b9b11fda8171 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_hdfs.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_hdfs.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agentArns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. * `authenticationType` - (Required) The type of authentication used to determine the identity of the user. Valid values are `SIMPLE` and `KERBEROS`. * `blockSize` - (Optional) The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB). @@ -115,6 +116,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_hdfs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_hdfs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync HDFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_hdfs` using the Amazon Resource Name (ARN). For example: ```typescript @@ -145,4 +167,4 @@ Using `terraform import`, import `aws_datasync_location_hdfs` using the Amazon R % terraform import aws_datasync_location_hdfs.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_nfs.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_nfs.html.markdown index 66fc7a0f3ee3..3ad66962e825 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_nfs.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_nfs.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `mountOptions` - (Optional) Configuration block containing mount options used by DataSync to access the NFS Server. * `onPremConfig` - (Required) Configuration block containing information for connecting to the NFS File System. * `serverHostname` - (Required) Specifies the IP address or DNS name of the NFS server. The DataSync Agent(s) use this to mount the NFS server. @@ -72,6 +73,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_nfs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_nfs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync NFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_nfs` using the DataSync Task Amazon Resource Name (ARN). For example: ```typescript @@ -102,4 +124,4 @@ Using `terraform import`, import `aws_datasync_location_nfs` using the DataSync % terraform import aws_datasync_location_nfs.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_object_storage.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_object_storage.html.markdown index ba2fc2d3bd18..6bde6268ce4e 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_object_storage.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_object_storage.html.markdown @@ -42,7 +42,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `agentArns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `agentArns` - (Optional) A list of DataSync Agent ARNs with which this location will be associated. For agentless cross-cloud transfers, this parameter does not need to be specified. * `accessKey` - (Optional) The access key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `accessKey` and `secretKey` to provide the user name and password, respectively. * `bucketName` - (Required) The bucket on the self-managed object storage server that is used to read data from. * `secretKey` - (Optional) The secret key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `accessKey` and `secretKey` to provide the user name and password, respectively. @@ -63,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_object_storage.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_object_storage" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync object storage location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_object_storage` using the Amazon Resource Name (ARN). For example: ```typescript @@ -93,4 +115,4 @@ Using `terraform import`, import `aws_datasync_location_object_storage` using th % terraform import aws_datasync_location_object_storage.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_s3.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_s3.html.markdown index e8ee61e23ba2..641e12f72350 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_s3.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_s3.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agentArns` - (Optional) (Amazon S3 on Outposts only) Amazon Resource Name (ARN) of the DataSync agent on the Outpost. * `s3BucketArn` - (Required) Amazon Resource Name (ARN) of the S3 bucket, or the Amazon S3 access point if the S3 bucket is located on an AWS Outposts resource. * `s3Config` - (Required) Configuration block containing information for connecting to S3. @@ -95,6 +96,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_s3.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_s3" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync S3 location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_s3` using the DataSync Task Amazon Resource Name (ARN). For example: ```typescript @@ -125,4 +147,4 @@ Using `terraform import`, import `aws_datasync_location_s3` using the DataSync T % terraform import aws_datasync_location_s3.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_location_smb.html.markdown b/website/docs/cdktf/typescript/r/datasync_location_smb.html.markdown index 2f6473d78cf8..5c7104d344ba 100644 --- a/website/docs/cdktf/typescript/r/datasync_location_smb.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_location_smb.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `agentArns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. * `domain` - (Optional) The name of the Windows domain the SMB server belongs to. * `mountOptions` - (Optional) Configuration block containing mount options used by DataSync to access the SMB Server. Can be `AUTOMATIC`, `SMB2`, or `SMB3`. @@ -68,6 +69,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_smb.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_smb" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync SMB location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_smb` using the Amazon Resource Name (ARN). For example: ```typescript @@ -98,4 +120,4 @@ Using `terraform import`, import `aws_datasync_location_smb` using the Amazon Re % terraform import aws_datasync_location_smb.example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datasync_task.html.markdown b/website/docs/cdktf/typescript/r/datasync_task.html.markdown index 70f644351ca2..9cd1ec52ea08 100644 --- a/website/docs/cdktf/typescript/r/datasync_task.html.markdown +++ b/website/docs/cdktf/typescript/r/datasync_task.html.markdown @@ -133,6 +133,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinationLocationArn` - (Required) Amazon Resource Name (ARN) of destination DataSync Location. * `sourceLocationArn` - (Required) Amazon Resource Name (ARN) of source DataSync Location. * `cloudwatchLogGroupArn` - (Optional) Amazon Resource Name (ARN) of the CloudWatch Log Group that is used to monitor and log events in the sync task. @@ -228,6 +229,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_task.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:task/task-12345678901234567" + } +} + +resource "aws_datasync_task" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync task. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_task` using the DataSync Task Amazon Resource Name (ARN). For example: ```typescript @@ -258,4 +280,4 @@ Using `terraform import`, import `aws_datasync_task` using the DataSync Task Ama % terraform import aws_datasync_task.example arn:aws:datasync:us-east-1:123456789012:task/task-12345678901234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_asset_type.html.markdown b/website/docs/cdktf/typescript/r/datazone_asset_type.html.markdown index 55162ddcfd7b..58784e061fec 100644 --- a/website/docs/cdktf/typescript/r/datazone_asset_type.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_asset_type.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the custom asset type. * `formsInput` - (Optional) The metadata forms that are to be attached to the custom asset type. @@ -98,4 +99,4 @@ Using `terraform import`, import DataZone Asset Type using the `domain_identifie % terraform import aws_datazone_asset_type.example domain-id-12345678,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_domain.html.markdown b/website/docs/cdktf/typescript/r/datazone_domain.html.markdown index 96f65b8fd5f2..c8281abddc6d 100644 --- a/website/docs/cdktf/typescript/r/datazone_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_domain.html.markdown @@ -26,6 +26,7 @@ import { Fn, Token, TerraformStack } from "cdktf"; */ import { DatazoneDomain } from "./.gen/providers/aws/datazone-domain"; import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicy } from "./.gen/providers/aws/iam-role-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -51,25 +52,31 @@ class MyConvertedCode extends TerraformStack { Version: "2012-10-17", }) ), - inlinePolicy: [ - { - name: "domain_execution_policy", - policy: Token.asString( - Fn.jsonencode({ - Statement: [ - { - Action: ["datazone:*", "ram:*", "sso:*", "kms:*"], - Effect: "Allow", - Resource: "*", - }, - ], - Version: "2012-10-17", - }) - ), - }, - ], name: "my_domain_execution_role", }); + const awsIamRolePolicyDomainExecutionRole = new IamRolePolicy( + this, + "domain_execution_role_1", + { + policy: Token.asString( + Fn.jsonencode({ + Statement: [ + { + Action: ["datazone:*", "ram:*", "sso:*", "kms:*"], + Effect: "Allow", + Resource: "*", + }, + ], + Version: "2012-10-17", + }) + ), + role: domainExecutionRole.name, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyDomainExecutionRole.overrideLogicalId( + "domain_execution_role" + ); new DatazoneDomain(this, "example", { domainExecutionRole: domainExecutionRole.arn, name: "example", @@ -79,6 +86,131 @@ class MyConvertedCode extends TerraformStack { ``` +### V2 Domain + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsIamPolicy } from "./.gen/providers/aws/data-aws-iam-policy"; +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { DatazoneDomain } from "./.gen/providers/aws/datazone-domain"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const current = new DataAwsCallerIdentity(this, "current", {}); + const domainExecutionRole = new DataAwsIamPolicy( + this, + "domain_execution_role", + { + name: "SageMakerStudioDomainExecutionRolePolicy", + } + ); + const domainServiceRole = new DataAwsIamPolicy( + this, + "domain_service_role", + { + name: "SageMakerStudioDomainServiceRolePolicy", + } + ); + const assumeRoleDomainExecution = new DataAwsIamPolicyDocument( + this, + "assume_role_domain_execution", + { + statement: [ + { + actions: ["sts:AssumeRole", "sts:TagSession", "sts:SetContext"], + condition: [ + { + test: "StringEquals", + values: [Token.asString(current.accountId)], + variable: "aws:SourceAccount", + }, + { + test: "ForAllValues:StringLike", + values: ["datazone*"], + variable: "aws:TagKeys", + }, + ], + principals: [ + { + identifiers: ["datazone.amazonaws.com"], + type: "Service", + }, + ], + }, + ], + } + ); + const assumeRoleDomainService = new DataAwsIamPolicyDocument( + this, + "assume_role_domain_service", + { + statement: [ + { + actions: ["sts:AssumeRole"], + condition: [ + { + test: "StringEquals", + values: [Token.asString(current.accountId)], + variable: "aws:SourceAccount", + }, + ], + principals: [ + { + identifiers: ["datazone.amazonaws.com"], + type: "Service", + }, + ], + }, + ], + } + ); + const domainExecution = new IamRole(this, "domain_execution", { + assumeRolePolicy: Token.asString(assumeRoleDomainExecution.json), + name: "example-domain-execution-role", + }); + const domainService = new IamRole(this, "domain_service", { + assumeRolePolicy: Token.asString(assumeRoleDomainService.json), + name: "example-domain-service-role", + }); + const awsIamRolePolicyAttachmentDomainExecution = + new IamRolePolicyAttachment(this, "domain_execution_7", { + policyArn: Token.asString(domainExecutionRole.arn), + role: domainExecution.name, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyAttachmentDomainExecution.overrideLogicalId( + "domain_execution" + ); + const awsIamRolePolicyAttachmentDomainService = new IamRolePolicyAttachment( + this, + "domain_service_8", + { + policyArn: Token.asString(domainServiceRole.arn), + role: domainService.name, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyAttachmentDomainService.overrideLogicalId("domain_service"); + new DatazoneDomain(this, "example", { + domainExecutionRole: domainExecution.arn, + domainVersion: "V2", + name: "example-domain", + serviceRole: domainService.arn, + }); + } +} + +``` + ## Argument Reference The following arguments are required: @@ -88,8 +220,11 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Domain. +* `domainVersion` - (Optional) Version of the Domain. Valid values are `V1` and `V2`. Defaults to `V1`. * `kmsKeyIdentifier` - (Optional) ARN of the KMS key used to encrypt the Amazon DataZone domain, metadata and reporting data. +* `serviceRole` - (Optional) ARN of the service role used by DataZone. Required when `domainVersion` is set to `V2`. * `singleSignOn` - (Optional) Single sign on options, used to [enable AWS IAM Identity Center](https://docs.aws.amazon.com/datazone/latest/userguide/enable-IAM-identity-center-for-datazone.html) for DataZone. * `skipDeletionCheck` - (Optional) Whether to skip the deletion check for the Domain. @@ -141,4 +276,4 @@ Using `terraform import`, import DataZone Domain using the `domainId`. For examp % terraform import aws_datazone_domain.example domain-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_environment.html.markdown b/website/docs/cdktf/typescript/r/datazone_environment.html.markdown index e0f27360ce8d..ccdc8f7dc1eb 100644 --- a/website/docs/cdktf/typescript/r/datazone_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_environment.html.markdown @@ -69,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountIdentifier` - (Optional) The ID of the Amazon Web Services account where the environment exists * `accountRegion` - (Optional) The Amazon Web Services region where the environment exists. * `blueprintIdentifier` - (Optional) The blueprint with which the environment is created. @@ -132,4 +133,4 @@ Using `terraform import`, import DataZone Environment using the `domain_idntifie % terraform import aws_datazone_environment.example dzd_d2i7tzk3tnjjf4,5vpywijpwryec0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_environment_blueprint_configuration.html.markdown b/website/docs/cdktf/typescript/r/datazone_environment_blueprint_configuration.html.markdown index 3df4d2797873..c259ca4eaf3b 100644 --- a/website/docs/cdktf/typescript/r/datazone_environment_blueprint_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_environment_blueprint_configuration.html.markdown @@ -73,6 +73,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `manageAccessRoleArn` - (Optional) ARN of the manage access role with which this blueprint is created. * `provisioningRoleArn` - (Optional) ARN of the provisioning role with which this blueprint is created. * `regionalParameters` - (Optional) Parameters for each region in which the blueprint is enabled @@ -113,4 +114,4 @@ Using `terraform import`, import DataZone Environment Blueprint Configuration us % terraform import aws_datazone_environment_blueprint_configuration.example domain-id-12345/environment-blueprint-id-54321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_environment_profile.html.markdown b/website/docs/cdktf/typescript/r/datazone_environment_profile.html.markdown index 6f0e5e1921b7..fe1f148ccdff 100644 --- a/website/docs/cdktf/typescript/r/datazone_environment_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_environment_profile.html.markdown @@ -156,6 +156,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `awsAccountId` - (Required) - Id of the AWS account being used. * `awsAccountRegion` - (Required) - Desired region for environment profile. * `domainIdentifier` - (Required) - Domain Identifier for environment profile. @@ -208,4 +209,4 @@ Using `terraform import`, import DataZone Environment Profile using a comma-deli % terraform import aws_datazone_environment_profile.example environment_profile-id-12345678,domain-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_form_type.html.markdown b/website/docs/cdktf/typescript/r/datazone_form_type.html.markdown index 263832536013..8b229578561d 100644 --- a/website/docs/cdktf/typescript/r/datazone_form_type.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_form_type.html.markdown @@ -123,6 +123,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of form type. Must have a length of between 1 and 2048 characters. * `status` - (Optional) Status of form type. Must be "ENABLED" or "DISABLED" If status is set to "ENABLED" terraform cannot delete the resource until it is manually changed in the AWS console. @@ -169,4 +170,4 @@ Using `terraform import`, import DataZone Form Type using a comma separated valu % terraform import aws_datazone_form_type.example domain_identifier,name,revision ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_glossary.html.markdown b/website/docs/cdktf/typescript/r/datazone_glossary.html.markdown index 83d75b996c75..bedf5850cc69 100644 --- a/website/docs/cdktf/typescript/r/datazone_glossary.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_glossary.html.markdown @@ -137,6 +137,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the glossary. Must have a length between 0 and 4096. * `status` - (Optional) Status of business glossary. Valid values are DISABLED and ENABLED. @@ -148,7 +149,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Glossary using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Glossary using a comma-delimited string combining the domain id, glossary id, and the id of the project it's under. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -178,4 +179,4 @@ Using `terraform import`, import DataZone Glossary using the import Datazone Glo % terraform import aws_datazone_glossary.example domain-id,glossary-id,owning-project-identifier ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_glossary_term.html.markdown b/website/docs/cdktf/typescript/r/datazone_glossary_term.html.markdown index 29c3e7e6b316..0f9f8e34bcf7 100644 --- a/website/docs/cdktf/typescript/r/datazone_glossary_term.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_glossary_term.html.markdown @@ -128,6 +128,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `longDescription` - (Optional) Long description of entry. * `shortDescription` - (Optional) Short description of entry. * `status` - (Optional) If glossary term is ENABLED or DISABLED. @@ -181,4 +182,4 @@ Using `terraform import`, import DataZone Glossary Term using a comma-delimited % terraform import aws_datazone_glossary_term.example domain-id,glossary-term-id,glossary-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_project.html.markdown b/website/docs/cdktf/typescript/r/datazone_project.html.markdown index d998da3ea400..d1096798d49a 100644 --- a/website/docs/cdktf/typescript/r/datazone_project.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_project.html.markdown @@ -73,6 +73,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `skipDeletionCheck` - (Optional) Optional flag to delete all child entities within the project. * `description` - (Optional) Description of project. * `glossaryTerms` - (Optional) List of glossary terms that can be used in the project. The list cannot be empty or include over 20 values. Each value must follow the regex of `[a-zA-Z0-9_-]{1,36}$`. @@ -131,4 +132,4 @@ Using `terraform import`, import DataZone Project using a colon-delimited string % terraform import aws_datazone_project.example domain-1234:project-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/datazone_user_profile.html.markdown b/website/docs/cdktf/typescript/r/datazone_user_profile.html.markdown index 98f84d96c5f2..dea68fe859c5 100644 --- a/website/docs/cdktf/typescript/r/datazone_user_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/datazone_user_profile.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `status` - (Optional) The user profile status. * `userType` - (Optional) The user type. @@ -97,4 +98,4 @@ Using `terraform import`, import DataZone User Profile using the `user_identifie % terraform import aws_datazone_user_profile.example arn:aws:iam::123456789012:user/example,dzd_54nakfrg9k6suo,IAM ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dax_cluster.html.markdown b/website/docs/cdktf/typescript/r/dax_cluster.html.markdown index 24bfe6eb4d78..5aa3d922b4b9 100644 --- a/website/docs/cdktf/typescript/r/dax_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/dax_cluster.html.markdown @@ -41,49 +41,37 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `clusterEndpointEncryptionType` – (Optional) The type of encryption the +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `clusterEndpointEncryptionType` - (Optional) The type of encryption the cluster's endpoint should support. Valid values are: `NONE` and `TLS`. Default value is `NONE`. - -* `clusterName` – (Required) Group identifier. DAX converts this name to +* `clusterName` - (Required) Group identifier. DAX converts this name to lowercase - * `iamRoleArn` - (Required) A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role's permissions to access DynamoDB on your behalf - -* `nodeType` – (Required) The compute and memory capacity of the nodes. See +* `nodeType` - (Required) The compute and memory capacity of the nodes. See [Nodes][1] for supported node types - -* `replicationFactor` – (Required) The number of nodes in the DAX cluster. A +* `replicationFactor` - (Required) The number of nodes in the DAX cluster. A replication factor of 1 will create a single-node cluster, without any read replicas - * `availabilityZones` - (Optional) List of Availability Zones in which the nodes will be created - -* `description` – (Optional) Description for the cluster - -* `notificationTopicArn` – (Optional) An Amazon Resource Name (ARN) of an +* `description` - (Optional) Description for the cluster +* `notificationTopicArn` - (Optional) An Amazon Resource Name (ARN) of an SNS topic to send DAX notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` - -* `parameterGroupName` – (Optional) Name of the parameter group to associate +* `parameterGroupName` - (Optional) Name of the parameter group to associate with this DAX cluster - -* `maintenanceWindow` – (Optional) Specifies the weekly time range for when +* `maintenanceWindow` - (Optional) Specifies the weekly time range for when maintenance on the cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` - -* `securityGroupIds` – (Optional) One or more VPC security groups associated +* `securityGroupIds` - (Optional) One or more VPC security groups associated with the cluster - * `serverSideEncryption` - (Optional) Encrypt at rest options - -* `subnetGroupName` – (Optional) Name of the subnet group to be used for the +* `subnetGroupName` - (Optional) Name of the subnet group to be used for the cluster - * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. The `serverSideEncryption` object supports the following: @@ -147,4 +135,4 @@ Using `terraform import`, import DAX Clusters using the `clusterName`. For examp [1]: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAX.concepts.cluster.html#DAX.concepts.nodes - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dax_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/dax_parameter_group.html.markdown index 6aceb51460d1..2907ab71ac1a 100644 --- a/website/docs/cdktf/typescript/r/dax_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/dax_parameter_group.html.markdown @@ -48,11 +48,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` – (Required) The name of the parameter group. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name of the parameter group. * `description` - (Optional, ForceNew) A description of the parameter group. - -* `parameters` – (Optional) The parameters of the parameter group. +* `parameters` - (Optional) The parameters of the parameter group. ## parameters @@ -95,4 +94,4 @@ Using `terraform import`, import DAX Parameter Group using the `name`. For examp % terraform import aws_dax_parameter_group.example my_dax_pg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dax_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/dax_subnet_group.html.markdown index e964065ad681..9f09384cb7a6 100644 --- a/website/docs/cdktf/typescript/r/dax_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/dax_subnet_group.html.markdown @@ -39,16 +39,17 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` – (Required) The name of the subnet group. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name of the subnet group. * `description` - (Optional) A description of the subnet group. -* `subnetIds` – (Required) A list of VPC subnet IDs for the subnet group. +* `subnetIds` - (Required) A list of VPC subnet IDs for the subnet group. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - The name of the subnet group. -* `vpcId` – VPC ID of the subnet group. +* `vpcId` - VPC ID of the subnet group. ## Import @@ -78,4 +79,4 @@ Using `terraform import`, import DAX Subnet Group using the `name`. For example: % terraform import aws_dax_subnet_group.example my_dax_sg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_cluster_snapshot.html.markdown b/website/docs/cdktf/typescript/r/db_cluster_snapshot.html.markdown index bd87cfcb8353..3c17a9d9ad1a 100644 --- a/website/docs/cdktf/typescript/r/db_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/db_cluster_snapshot.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbClusterIdentifier` - (Required) The DB Cluster Identifier from which to take the snapshot. * `dbClusterSnapshotIdentifier` - (Required) The Identifier for the snapshot. * `sharedAccounts` - (Optional) List of AWS Account IDs to share the snapshot with. Use `all` to make the snapshot public. @@ -57,7 +58,7 @@ This resource exports the following attributes in addition to the arguments abov * `licenseModel` - License model information for the restored DB cluster. * `port` - Port that the DB cluster was listening on at the time of the snapshot. -* `source_db_cluster_snapshot_identifier` - DB Cluster Snapshot ARN that the DB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. +* `sourceDbClusterSnapshotIdentifier` - DB Cluster Snapshot ARN that the DB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. * `storageEncrypted` - Whether the DB cluster snapshot is encrypted. * `status` - The status of this DB Cluster Snapshot. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -101,4 +102,4 @@ Using `terraform import`, import `aws_db_cluster_snapshot` using the cluster sna % terraform import aws_db_cluster_snapshot.example my-cluster-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_event_subscription.html.markdown b/website/docs/cdktf/typescript/r/db_event_subscription.html.markdown index 30bcca9c235a..42717279fe3d 100644 --- a/website/docs/cdktf/typescript/r/db_event_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/db_event_subscription.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the DB event subscription. By default generated by Terraform. * `namePrefix` - (Optional) The name of the DB event subscription. Conflicts with `name`. * `snsTopic` - (Required) The SNS topic to send events to. @@ -135,4 +136,4 @@ Using `terraform import`, import DB Event Subscriptions using the `name`. For ex % terraform import aws_db_event_subscription.default rds-event-sub ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_instance.html.markdown b/website/docs/cdktf/typescript/r/db_instance.html.markdown index 8c7fea7849da..c11a727a4c24 100644 --- a/website/docs/cdktf/typescript/r/db_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/db_instance.html.markdown @@ -29,7 +29,7 @@ See the AWS Docs on [RDS Instance Maintenance][instance-maintenance] for more in ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data instate](https://www.terraform.io/docs/state/sensitive-data.html). --> **Note:** Write-Only argument `passwordWo` is available to use in place of `password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `passwordWo` is available to use in place of `password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). > **Hands-on:** Try the [Manage AWS RDS Instances](https://learn.hashicorp.com/tutorials/terraform/aws-rds) tutorial on HashiCorp Learn. @@ -395,6 +395,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocatedStorage` - (Required unless a `snapshotIdentifier` or `replicateSourceDb` is provided) The allocated storage in gibibytes. If `maxAllocatedStorage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If `replicateSourceDb` is set, the value is ignored during the creation of the instance. * `allowMajorVersionUpgrade` - (Optional) Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and @@ -424,7 +425,7 @@ Defaults to true. See [Oracle Character Sets Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html) or [Server-Level Collation for Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.SQLServer.CommonDBATasks.Collation.html) for more information. Cannot be set with `replicateSourceDb`, `restoreToPointInTime`, `s3Import`, or `snapshotIdentifier`. -* `copyTagsToSnapshot` – (Optional, boolean) Copy all Instance `tags` to snapshots. Default is `false`. +* `copyTagsToSnapshot` - (Optional, boolean) Copy all Instance `tags` to snapshots. Default is `false`. * `customIamInstanceProfile` - (Optional) The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. * `databaseInsightsMode` - (Optional) The mode of Database Insights that is enabled for the instance. Valid values: `standard`, `advanced` . * `dbName` - (Optional) The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the [AWS documentation](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/create-db-instance.html) for more details on what applies for those engines. If you are providing an Oracle db name, it needs to be in all upper case. Cannot be specified for a replica. @@ -709,4 +710,4 @@ Using `terraform import`, import DB Instances using the `identifier`. For exampl % terraform import aws_db_instance.default mydb-rds-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_instance_automated_backups_replication.html.markdown b/website/docs/cdktf/typescript/r/db_instance_automated_backups_replication.html.markdown index 8f3b4991f3e3..a7a9db14f05e 100644 --- a/website/docs/cdktf/typescript/r/db_instance_automated_backups_replication.html.markdown +++ b/website/docs/cdktf/typescript/r/db_instance_automated_backups_replication.html.markdown @@ -125,6 +125,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kmsKeyId` - (Optional, Forces new resource) The AWS KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination AWS Region, for example, `arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE`. * `preSignedUrl` - (Optional, Forces new resource) A URL that contains a [Signature Version 4](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) signed request for the [`StartDBInstanceAutomatedBackupsReplication`](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartDBInstanceAutomatedBackupsReplication.html) action to be called in the AWS Region of the source DB instance. * `retentionPeriod` - (Optional, Forces new resource) The retention period for the replicated automated backups, defaults to `7`. @@ -175,4 +176,4 @@ Using `terraform import`, import RDS instance automated backups replication usin % terraform import aws_db_instance_automated_backups_replication.default arn:aws:rds:us-east-1:123456789012:auto-backup:ab-faaa2mgdj1vmp4xflr7yhsrmtbtob7ltrzzz2my ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_instance_role_association.html.markdown b/website/docs/cdktf/typescript/r/db_instance_role_association.html.markdown index 660cffbc1fa5..e25f216feaa1 100644 --- a/website/docs/cdktf/typescript/r/db_instance_role_association.html.markdown +++ b/website/docs/cdktf/typescript/r/db_instance_role_association.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbInstanceIdentifier` - (Required) DB Instance Identifier to associate with the IAM Role. * `featureName` - (Required) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). * `roleArn` - (Required) Amazon Resource Name (ARN) of the IAM Role to associate with the DB Instance. @@ -97,4 +98,4 @@ Using `terraform import`, import `aws_db_instance_role_association` using the DB % terraform import aws_db_instance_role_association.example my-db-instance,arn:aws:iam::123456789012:role/my-role ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_option_group.html.markdown b/website/docs/cdktf/typescript/r/db_option_group.html.markdown index c771777820ed..6404ba86653f 100644 --- a/website/docs/cdktf/typescript/r/db_option_group.html.markdown +++ b/website/docs/cdktf/typescript/r/db_option_group.html.markdown @@ -79,6 +79,7 @@ More information about this can be found [here](https://docs.aws.amazon.com/Amaz This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the option group. If omitted, Terraform will assign a random, unique name. Must be lowercase, to match as it is stored in AWS. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. Must be lowercase, to match as it is stored in AWS. * `optionGroupDescription` - (Optional) Description of the option group. Defaults to "Managed by Terraform". @@ -152,4 +153,4 @@ Using `terraform import`, import DB option groups using the `name`. For example: % terraform import aws_db_option_group.example mysql-option-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/db_parameter_group.html.markdown index 850235449e43..558ff2151dc0 100644 --- a/website/docs/cdktf/typescript/r/db_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/db_parameter_group.html.markdown @@ -247,6 +247,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DB parameter group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required, Forces new resource) The family of the DB parameter group. @@ -301,4 +302,4 @@ Using `terraform import`, import DB Parameter groups using the `name`. For examp % terraform import aws_db_parameter_group.rds_pg rds-pg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_proxy.html.markdown b/website/docs/cdktf/typescript/r/db_proxy.html.markdown index 7c913d98587f..99b444c1e3dc 100644 --- a/website/docs/cdktf/typescript/r/db_proxy.html.markdown +++ b/website/docs/cdktf/typescript/r/db_proxy.html.markdown @@ -165,9 +165,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. -* `auth` - (Required) Configuration block(s) with authorization mechanisms to connect to the associated instances or clusters. Described below. +* `auth` - (Optional) Configuration block(s) with authorization mechanisms to connect to the associated instances or clusters. Required when `default_auth_scheme` is `NONE` or unspecified. Described below. * `debugLogging` - (Optional) Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. +* `default_auth_scheme` - (Optional) Default authentication scheme that the proxy uses for client connections to the proxy and connections from the proxy to the underlying database. Valid values are `NONE` and `IAM_AUTH`. Defaults to `NONE`. * `engineFamily` - (Required, Forces new resource) The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL`. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL`. For RDS for Microsoft SQL Server, specify `SQLSERVER`. Valid values are `MYSQL`, `POSTGRESQL`, and `SQLSERVER`. * `idleClientTimeout` - (Optional) The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database. * `requireTls` - (Optional) A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy. @@ -230,4 +232,4 @@ Using `terraform import`, import DB proxies using the `name`. For example: % terraform import aws_db_proxy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown b/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown index 12413429004a..39790b8bec6e 100644 --- a/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown +++ b/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbProxyName` - (Required) Name of the RDS DB Proxy. * `connectionPoolConfig` - (Optional) The settings that determine the size and behavior of the connection pool for the target group. @@ -134,4 +135,4 @@ Using `terraform import`, import DB proxy default target groups using the `dbPro % terraform import aws_db_proxy_default_target_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_proxy_endpoint.html.markdown b/website/docs/cdktf/typescript/r/db_proxy_endpoint.html.markdown index b71f3e56f48c..9276c7e17e4a 100644 --- a/website/docs/cdktf/typescript/r/db_proxy_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/db_proxy_endpoint.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbProxyEndpointName` - (Required) The identifier for the proxy endpoint. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. * `dbProxyName` - (Required) The name of the DB proxy associated with the DB proxy endpoint that you create. * `vpcSubnetIds` - (Required) One or more VPC subnet IDs to associate with the new proxy. @@ -94,4 +95,4 @@ Using `terraform import`, import DB proxy endpoints using the `DB-PROXY-NAME/DB- % terraform import aws_db_proxy_endpoint.example example/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_proxy_target.html.markdown b/website/docs/cdktf/typescript/r/db_proxy_target.html.markdown index ad548acc3c91..be470b2a98be 100644 --- a/website/docs/cdktf/typescript/r/db_proxy_target.html.markdown +++ b/website/docs/cdktf/typescript/r/db_proxy_target.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbProxyName` - (Required, Forces new resource) The name of the DB proxy. * `targetGroupName` - (Required, Forces new resource) The name of the target group. * `dbInstanceIdentifier` - (Optional, Forces new resource) DB instance identifier. @@ -167,4 +168,4 @@ Provisioned Clusters: % terraform import aws_db_proxy_target.example example-proxy/default/TRACKED_CLUSTER/example-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_snapshot.html.markdown b/website/docs/cdktf/typescript/r/db_snapshot.html.markdown index 04f106666962..d0cb3fcc6d29 100644 --- a/website/docs/cdktf/typescript/r/db_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/db_snapshot.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbInstanceIdentifier` - (Required) The DB Instance Identifier from which to take the snapshot. * `dbSnapshotIdentifier` - (Required) The Identifier for the snapshot. * `sharedAccounts` - (Optional) List of AWS Account IDs to share the snapshot with. Use `all` to make the snapshot public. @@ -112,4 +113,4 @@ Using `terraform import`, import `aws_db_snapshot` using the snapshot identifier % terraform import aws_db_snapshot.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_snapshot_copy.html.markdown b/website/docs/cdktf/typescript/r/db_snapshot_copy.html.markdown index fa0a2ba10bd2..526c3e733a1a 100644 --- a/website/docs/cdktf/typescript/r/db_snapshot_copy.html.markdown +++ b/website/docs/cdktf/typescript/r/db_snapshot_copy.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `copyTags` - (Optional) Whether to copy existing tags. Defaults to `false`. * `destinationRegion` - (Optional) The Destination region to place snapshot copy. * `kmsKeyId` - (Optional) KMS key ID. @@ -130,4 +131,4 @@ Using `terraform import`, import `aws_db_snapshot_copy` using the snapshot ident % terraform import aws_db_snapshot_copy.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/db_subnet_group.html.markdown index 5721a00391bf..6d9d3b948052 100644 --- a/website/docs/cdktf/typescript/r/db_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/db_subnet_group.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DB subnet group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) The description of the DB subnet group. Defaults to "Managed by Terraform". @@ -92,4 +93,4 @@ Using `terraform import`, import DB Subnet groups using the `name`. For example: % terraform import aws_db_subnet_group.default production-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/default_network_acl.html.markdown b/website/docs/cdktf/typescript/r/default_network_acl.html.markdown index 68b114cfae83..269c00206b9d 100644 --- a/website/docs/cdktf/typescript/r/default_network_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/default_network_acl.html.markdown @@ -184,6 +184,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `egress` - (Optional) Configuration block for an egress rule. Detailed below. * `ingress` - (Optional) Configuration block for an ingress rule. Detailed below. * `subnetIds` - (Optional) List of Subnet IDs to apply the ACL to. See the notes above on Managing Subnets in the Default Network ACL @@ -203,6 +204,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrBlock` - (Optional) The CIDR block to match. This must be a valid network mask. * `icmpCode` - (Optional) The ICMP type code to be used. Default 0. * `icmpType` - (Optional) The ICMP type to be used. Default 0. @@ -250,4 +252,4 @@ Using `terraform import`, import Default Network ACLs using the `id`. For exampl % terraform import aws_default_network_acl.sample acl-7aaabd18 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/default_route_table.html.markdown b/website/docs/cdktf/typescript/r/default_route_table.html.markdown index fe2ffbabfeaa..9be9af517f0a 100644 --- a/website/docs/cdktf/typescript/r/default_route_table.html.markdown +++ b/website/docs/cdktf/typescript/r/default_route_table.html.markdown @@ -89,6 +89,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `propagatingVgws` - (Optional) List of virtual gateways for propagation. * `route` - (Optional) Configuration block of routes. Detailed below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). This means that omitting this argument is interpreted as ignoring any existing routes. To remove all managed routes an empty list should be specified. See the example above. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -164,4 +165,4 @@ Using `terraform import`, import Default VPC route tables using the `vpcId`. For [tf-main-route-table-association]: /docs/providers/aws/r/main_route_table_association.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/default_security_group.html.markdown b/website/docs/cdktf/typescript/r/default_security_group.html.markdown index 1156a76f0448..890ba98127e2 100644 --- a/website/docs/cdktf/typescript/r/default_security_group.html.markdown +++ b/website/docs/cdktf/typescript/r/default_security_group.html.markdown @@ -108,6 +108,7 @@ Removing this resource from your configuration will remove it from your statefil The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `egress` - (Optional, VPC only) Configuration block. Detailed below. * `ingress` - (Optional) Configuration block. Detailed below. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -174,4 +175,4 @@ Using `terraform import`, import Security Groups using the security group `id`. % terraform import aws_default_security_group.default_sg sg-903004f8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/detective_graph.html.markdown b/website/docs/cdktf/typescript/r/detective_graph.html.markdown index d66569864322..b3af2d8595a1 100644 --- a/website/docs/cdktf/typescript/r/detective_graph.html.markdown +++ b/website/docs/cdktf/typescript/r/detective_graph.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the instance. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_detective_graph` using the ARN. For exampl % terraform import aws_detective_graph.example arn:aws:detective:us-east-1:123456789101:graph:231684d34gh74g4bae1dbc7bd807d02d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/detective_invitation_accepter.html.markdown b/website/docs/cdktf/typescript/r/detective_invitation_accepter.html.markdown index 6eb2c5cafe16..f7638399d1b1 100644 --- a/website/docs/cdktf/typescript/r/detective_invitation_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/detective_invitation_accepter.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `graphArn` - (Required) ARN of the behavior graph that the member account is accepting the invitation for. ## Attribute Reference @@ -91,4 +92,4 @@ Using `terraform import`, import `aws_detective_invitation_accepter` using the g % terraform import aws_detective_invitation_accepter.example arn:aws:detective:us-east-1:123456789101:graph:231684d34gh74g4bae1dbc7bd807d02d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/detective_member.html.markdown b/website/docs/cdktf/typescript/r/detective_member.html.markdown index 0f35be2add69..7e5bec8c5d85 100644 --- a/website/docs/cdktf/typescript/r/detective_member.html.markdown +++ b/website/docs/cdktf/typescript/r/detective_member.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) AWS account ID for the account. * `emailAddress` - (Required) Email address for the account. * `graphArn` - (Required) ARN of the behavior graph to invite the member accounts to contribute their data to. @@ -95,4 +96,4 @@ Using `terraform import`, import `aws_detective_member` using the ARN of the gra % terraform import aws_detective_member.example arn:aws:detective:us-east-1:123456789101:graph:231684d34gh74g4bae1dbc7bd807d02d/123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/detective_organization_admin_account.html.markdown b/website/docs/cdktf/typescript/r/detective_organization_admin_account.html.markdown index 3c896c132eed..f2268351a842 100644 --- a/website/docs/cdktf/typescript/r/detective_organization_admin_account.html.markdown +++ b/website/docs/cdktf/typescript/r/detective_organization_admin_account.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) AWS account identifier to designate as a delegated administrator for Detective. ## Attribute Reference @@ -74,4 +75,4 @@ Using `terraform import`, import `aws_detective_organization_admin_account` usin % terraform import aws_detective_organization_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown index 75d41c7eb803..576417ac5f46 100644 --- a/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoEnable` - (Required) When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s Detective delegated administrator and Detective is enabled in that AWS Region. * `graphArn` - (Required) ARN of the behavior graph. @@ -89,4 +90,4 @@ Using `terraform import`, import `aws_detective_organization_admin_account` usin % terraform import aws_detective_organization_configuration.example arn:aws:detective:us-east-1:123456789012:graph:00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devicefarm_device_pool.html.markdown b/website/docs/cdktf/typescript/r/devicefarm_device_pool.html.markdown index c9404602dfdb..cb2eb8d3d9c9 100644 --- a/website/docs/cdktf/typescript/r/devicefarm_device_pool.html.markdown +++ b/website/docs/cdktf/typescript/r/devicefarm_device_pool.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Device Pool * `projectArn` - (Required) The ARN of the project for the device pool. * `rule` - (Required) The device pool's rules. See [Rule](#rule). @@ -68,6 +69,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_device_pool.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:devicepool:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_devicefarm_device_pool" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm device pool. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Device Pools using their ARN. For example: ```typescript @@ -98,4 +120,4 @@ Using `terraform import`, import DeviceFarm Device Pools using their ARN. For ex % terraform import aws_devicefarm_device_pool.example arn:aws:devicefarm:us-west-2:123456789012:devicepool:4fa784c7-ccb4-4dbf-ba4f-02198320daa1/4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devicefarm_instance_profile.html.markdown b/website/docs/cdktf/typescript/r/devicefarm_instance_profile.html.markdown index 5318a911fe72..417db00e6b96 100644 --- a/website/docs/cdktf/typescript/r/devicefarm_instance_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/devicefarm_instance_profile.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the instance profile. * `excludeAppPackagesFromCleanup` - (Optional) An array of strings that specifies the list of app packages that should not be cleaned up from the device after a test run. * `name` - (Required) The name for the instance profile. @@ -56,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_instance_profile.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:instanceprofile:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_instance_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm instance profile. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Instance Profiles using their ARN. For example: ```typescript @@ -86,4 +108,4 @@ Using `terraform import`, import DeviceFarm Instance Profiles using their ARN. F % terraform import aws_devicefarm_instance_profile.example arn:aws:devicefarm:us-west-2:123456789012:instanceprofile:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devicefarm_network_profile.html.markdown b/website/docs/cdktf/typescript/r/devicefarm_network_profile.html.markdown index 6e19ea23e778..f4b524e8c477 100644 --- a/website/docs/cdktf/typescript/r/devicefarm_network_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/devicefarm_network_profile.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the network profile. * `downlinkBandwidthBits` - (Optional) The data throughput rate in bits per second, as an integer from `0` to `104857600`. Default value is `104857600`. * `downlinkDelayMs` - (Optional) Delay time for all packets to destination in milliseconds as an integer from `0` to `2000`. @@ -74,6 +75,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_network_profile.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:networkprofile:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_network_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm network profile. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Network Profiles using their ARN. For example: ```typescript @@ -104,4 +126,4 @@ Using `terraform import`, import DeviceFarm Network Profiles using their ARN. Fo % terraform import aws_devicefarm_network_profile.example arn:aws:devicefarm:us-west-2:123456789012:networkprofile:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devicefarm_project.html.markdown b/website/docs/cdktf/typescript/r/devicefarm_project.html.markdown index 222e50210924..0c699bf0afc0 100644 --- a/website/docs/cdktf/typescript/r/devicefarm_project.html.markdown +++ b/website/docs/cdktf/typescript/r/devicefarm_project.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the project * `defaultJobTimeoutMinutes` - (Optional) Sets the execution timeout value (in minutes) for a project. All test runs in this project use the specified execution timeout value unless overridden when scheduling a run. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -58,6 +59,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_project.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:project:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Projects using their ARN. For example: ```typescript @@ -88,4 +110,4 @@ Using `terraform import`, import DeviceFarm Projects using their ARN. For exampl % terraform import aws_devicefarm_project.example arn:aws:devicefarm:us-west-2:123456789012:project:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devicefarm_test_grid_project.html.markdown b/website/docs/cdktf/typescript/r/devicefarm_test_grid_project.html.markdown index 5b0e959e94fe..2d19d9c6e083 100644 --- a/website/docs/cdktf/typescript/r/devicefarm_test_grid_project.html.markdown +++ b/website/docs/cdktf/typescript/r/devicefarm_test_grid_project.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Selenium testing project. * `description` - (Optional) Human-readable description of the project. * `vpcConfig` - (Required) The VPC security groups and subnets that are attached to a project. See [VPC Config](#vpc-config) below. @@ -67,6 +68,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_test_grid_project.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_test_grid_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm test grid project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Test Grid Projects using their ARN. For example: ```typescript @@ -97,4 +119,4 @@ Using `terraform import`, import DeviceFarm Test Grid Projects using their ARN. % terraform import aws_devicefarm_test_grid_project.example arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devicefarm_upload.html.markdown b/website/docs/cdktf/typescript/r/devicefarm_upload.html.markdown index 55235a4ceecd..50804462d194 100644 --- a/website/docs/cdktf/typescript/r/devicefarm_upload.html.markdown +++ b/website/docs/cdktf/typescript/r/devicefarm_upload.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contentType` - (Optional) The upload's content type (for example, application/octet-stream). * `name` - (Required) The upload's file name. The name should not contain any forward slashes (/). If you are uploading an iOS app, the file name must end with the .ipa extension. If you are uploading an Android app, the file name must end with the .apk extension. For all others, the file name must end with the .zip file extension. * `projectArn` - (Required) The ARN of the project for the upload. @@ -64,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_upload.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:upload:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_devicefarm_upload" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm upload. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Uploads using their ARN. For example: ```typescript @@ -94,4 +116,4 @@ Using `terraform import`, import DeviceFarm Uploads using their ARN. For example % terraform import aws_devicefarm_upload.example arn:aws:devicefarm:us-west-2:123456789012:upload:4fa784c7-ccb4-4dbf-ba4f-02198320daa1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devopsguru_event_sources_config.html.markdown b/website/docs/cdktf/typescript/r/devopsguru_event_sources_config.html.markdown index 60876ed4b4e5..c02d9539fee6 100644 --- a/website/docs/cdktf/typescript/r/devopsguru_event_sources_config.html.markdown +++ b/website/docs/cdktf/typescript/r/devopsguru_event_sources_config.html.markdown @@ -49,8 +49,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `eventSources` - (Required) Configuration information about the integration of DevOps Guru as the Consumer via EventBridge with another AWS Service. See [`eventSources`](#event_sources-argument-reference) below. ### `eventSources` Argument Reference @@ -69,7 +70,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Event Sources Config using the `id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Event Sources Config using the region. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -93,10 +94,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import DevOps Guru Event Sources Config using the `id`. For example: +Using `terraform import`, import DevOps Guru Event Sources Config using the region. For example: ```console % terraform import aws_devopsguru_event_sources_config.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devopsguru_notification_channel.html.markdown b/website/docs/cdktf/typescript/r/devopsguru_notification_channel.html.markdown index 1209464d84a3..9c509514c5d6 100644 --- a/website/docs/cdktf/typescript/r/devopsguru_notification_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/devopsguru_notification_channel.html.markdown @@ -79,6 +79,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filters` - (Optional) Filter configurations for the Amazon SNS notification topic. See the [`filters` argument reference](#filters-argument-reference) below. ### `sns` Argument Reference @@ -128,4 +129,4 @@ Using `terraform import`, import DevOps Guru Notification Channel using the `id` % terraform import aws_devopsguru_notification_channel.example id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devopsguru_resource_collection.html.markdown b/website/docs/cdktf/typescript/r/devopsguru_resource_collection.html.markdown index 1457d9c37084..e0fa8234a08b 100644 --- a/website/docs/cdktf/typescript/r/devopsguru_resource_collection.html.markdown +++ b/website/docs/cdktf/typescript/r/devopsguru_resource_collection.html.markdown @@ -135,6 +135,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cloudformation` - (Optional) A collection of AWS CloudFormation stacks. See [`cloudformation`](#cloudformation-argument-reference) below for additional details. * `tags` - (Optional) AWS tags used to filter the resources in the resource collection. See [`tags`](#tags-argument-reference) below for additional details. @@ -185,4 +186,4 @@ Using `terraform import`, import DevOps Guru Resource Collection using the `id`. % terraform import aws_devopsguru_resource_collection.example AWS_CLOUD_FORMATION ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/devopsguru_service_integration.html.markdown b/website/docs/cdktf/typescript/r/devopsguru_service_integration.html.markdown index 9f9d091f2169..59ee01e3934b 100644 --- a/website/docs/cdktf/typescript/r/devopsguru_service_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/devopsguru_service_integration.html.markdown @@ -96,8 +96,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kmsServerSideEncryption` - (Required) Information about whether DevOps Guru is configured to encrypt server-side data using KMS. See [`kmsServerSideEncryption`](#kms_server_side_encryption-argument-reference) below. * `logsAnomalyDetection` - (Required) Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups. See [`logsAnomalyDetection`](#logs_anomaly_detection-argument-reference) below. * `opsCenter` - (Required) Information about whether DevOps Guru is configured to create an OpsItem in AWS Systems Manager OpsCenter for each created insight. See [`opsCenter`](#ops_center-argument-reference) below. @@ -124,7 +125,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Service Integration using the `id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DevOps Guru Service Integration using the region. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -148,10 +149,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import DevOps Guru Service Integration using the `id`. For example: +Using `terraform import`, import DevOps Guru Service Integration using the region. For example: ```console % terraform import aws_devopsguru_service_integration.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_conditional_forwarder.html.markdown b/website/docs/cdktf/typescript/r/directory_service_conditional_forwarder.html.markdown index d84b4c525137..4e37eeb88aac 100644 --- a/website/docs/cdktf/typescript/r/directory_service_conditional_forwarder.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_conditional_forwarder.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryId` - (Required) ID of directory. * `dnsIps` - (Required) A list of forwarder IP addresses. * `remoteDomainName` - (Required) The fully qualified domain name of the remote domain for which forwarders will be used. @@ -80,4 +81,4 @@ Using `terraform import`, import conditional forwarders using the directory id a % terraform import aws_directory_service_conditional_forwarder.example d-1234567890:example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_directory.html.markdown b/website/docs/cdktf/typescript/r/directory_service_directory.html.markdown index d26c5bca9733..08fba50e0106 100644 --- a/website/docs/cdktf/typescript/r/directory_service_directory.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_directory.html.markdown @@ -172,6 +172,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The fully qualified name for the directory, such as `corp.example.com` * `password` - (Required) The password for the directory administrator or connector user. * `size` - (Optional) (For `SimpleAD` and `ADConnector` types) The size of the directory (`Small` or `Large` are accepted values). `Large` by default. @@ -252,4 +253,4 @@ Using `terraform import`, import DirectoryService directories using the director % terraform import aws_directory_service_directory.sample d-926724cf57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_log_subscription.html.markdown b/website/docs/cdktf/typescript/r/directory_service_log_subscription.html.markdown index 8b941c530094..26342ca8d577 100644 --- a/website/docs/cdktf/typescript/r/directory_service_log_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_log_subscription.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryId` - (Required) ID of directory. * `logGroupName` - (Required) Name of the cloudwatch log group to which the logs should be published. The log group should be already created and the directory service principal should be provided with required permission to create stream and publish logs. Changing this value would delete the current subscription and create a new one. A directory can only have one log subscription at a time. @@ -115,4 +116,4 @@ Using `terraform import`, import Directory Service Log Subscriptions using the d % terraform import aws_directory_service_log_subscription.msad d-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_radius_settings.html.markdown b/website/docs/cdktf/typescript/r/directory_service_radius_settings.html.markdown index 93f58f796563..aeb9938ae51e 100644 --- a/website/docs/cdktf/typescript/r/directory_service_radius_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_radius_settings.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationProtocol` - (Optional) The protocol specified for your RADIUS endpoints. Valid values: `PAP`, `CHAP`, `MS-CHAPv1`, `MS-CHAPv2`. * `directoryId` - (Required) The identifier of the directory for which you want to manager RADIUS settings. * `displayLabel` - (Required) Display label. @@ -100,4 +101,4 @@ Using `terraform import`, import RADIUS settings using the directory ID. For exa % terraform import aws_directory_service_radius_settings.example d-926724cf57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_region.html.markdown b/website/docs/cdktf/typescript/r/directory_service_region.html.markdown index 9f2a74b346ed..50368470cc37 100644 --- a/website/docs/cdktf/typescript/r/directory_service_region.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_region.html.markdown @@ -171,6 +171,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `desiredNumberOfDomainControllers` - (Optional) The number of domain controllers desired in the replicated directory. Minimum value of `2`. * `directoryId` - (Required) The identifier of the directory to which you want to add Region replication. * `regionName` - (Required) The name of the Region where you want to add domain controllers for replication. @@ -228,4 +229,4 @@ Using `terraform import`, import Replicated Regions using directory ID,Region na % terraform import aws_directory_service_region.example d-9267651497,us-east-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_shared_directory.html.markdown b/website/docs/cdktf/typescript/r/directory_service_shared_directory.html.markdown index 2d09caf26716..ffab2d42c8e4 100644 --- a/website/docs/cdktf/typescript/r/directory_service_shared_directory.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_shared_directory.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `method` - (Optional) Method used when sharing a directory. Valid values are `ORGANIZATIONS` and `HANDSHAKE`. Default is `HANDSHAKE`. * `notes` - (Optional, Sensitive) Message sent by the directory owner to the directory consumer to help the directory consumer administrator determine whether to approve or reject the share invitation. @@ -114,4 +115,4 @@ Using `terraform import`, import Directory Service Shared Directories using the % terraform import aws_directory_service_shared_directory.example d-1234567890/d-9267633ece ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_shared_directory_accepter.html.markdown b/website/docs/cdktf/typescript/r/directory_service_shared_directory_accepter.html.markdown index 495dc5c72cbe..00058287b1f6 100644 --- a/website/docs/cdktf/typescript/r/directory_service_shared_directory_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_shared_directory_accepter.html.markdown @@ -52,8 +52,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sharedDirectoryId` - (Required) Identifier of the directory that is stored in the directory consumer account that corresponds to the shared directory in the owner account. ## Attribute Reference @@ -105,4 +106,4 @@ Using `terraform import`, import Directory Service Shared Directories using the % terraform import aws_directory_service_shared_directory_accepter.example d-9267633ece ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/directory_service_trust.html.markdown b/website/docs/cdktf/typescript/r/directory_service_trust.html.markdown index d92645e9f885..9bad123746f4 100644 --- a/website/docs/cdktf/typescript/r/directory_service_trust.html.markdown +++ b/website/docs/cdktf/typescript/r/directory_service_trust.html.markdown @@ -144,6 +144,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `conditionalForwarderIpAddrs` - (Optional) Set of IPv4 addresses for the DNS server associated with the remote Directory. Can contain between 1 and 4 values. * `deleteAssociatedConditionalForwarder` - (Optional) Whether to delete the conditional forwarder when deleting the Trust relationship. @@ -206,4 +207,4 @@ Using `terraform import`, import the Trust relationship using the directory ID a % terraform import aws_directory_service_trust.example d-926724cf57/directory.example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dlm_lifecycle_policy.html.markdown b/website/docs/cdktf/typescript/r/dlm_lifecycle_policy.html.markdown index 57e49da60832..4a4ff8e87db9 100644 --- a/website/docs/cdktf/typescript/r/dlm_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/dlm_lifecycle_policy.html.markdown @@ -114,6 +114,42 @@ class MyConvertedCode extends TerraformStack { ``` +### Example Default Policy + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DlmLifecyclePolicy } from "./.gen/providers/aws/dlm-lifecycle-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DlmLifecyclePolicy(this, "example", { + defaultPolicy: "VOLUME", + description: "tf-acc-basic", + executionRoleArn: Token.asString(awsIamRoleExample.arn), + policyDetails: { + createInterval: 5, + exclusions: { + excludeBootVolumes: false, + excludeTags: { + test: "exclude", + }, + excludeVolumeTypes: ["gp2"], + }, + policyLanguage: "SIMPLIFIED", + resourceType: "VOLUME", + }, + }); + } +} + +``` + ### Example Cross-Region Snapshot Copy Usage ```typescript @@ -273,12 +309,74 @@ class MyConvertedCode extends TerraformStack { ``` +### Example Post/Pre Scripts + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicy } from "./.gen/providers/aws/data-aws-iam-policy"; +import { DlmLifecyclePolicy } from "./.gen/providers/aws/dlm-lifecycle-policy"; +import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DlmLifecyclePolicy(this, "example", { + description: "tf-acc-basic", + executionRoleArn: Token.asString(awsIamRoleExample.arn), + policyDetails: { + resourceTypes: ["INSTANCE"], + schedule: [ + { + createRule: { + interval: 12, + scripts: { + executeOperationOnScriptFailure: false, + executionHandler: "AWS_VSS_BACKUP", + maximumRetryCount: 2, + }, + }, + name: "Windows VSS", + retainRule: { + count: 10, + }, + }, + ], + targetTags: { + tag1: "Windows", + }, + }, + }); + const awsIamRolePolicyAttachmentExample = new IamRolePolicyAttachment( + this, + "example_1", + { + policyArn: Token.asString(dataAwsIamPolicyExample.arn), + role: test.id, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyAttachmentExample.overrideLogicalId("example"); + new DataAwsIamPolicy(this, "test", { + name: "AWSDataLifecycleManagerSSMFullAccess", + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required) A description for the DLM lifecycle policy. * `executionRoleArn` - (Required) The ARN of an IAM role that is able to be assumed by the DLM service. +* `defaultPolicy` - (Required) Specify the type of default policy to create. valid values are `VOLUME` or `INSTANCE`. * `policyDetails` - (Required) See the [`policyDetails` configuration](#policy-details-arguments) block. Max of 1. * `state` - (Optional) Whether the lifecycle policy should be enabled or disabled. `ENABLED` or `DISABLED` are valid values. Defaults to `ENABLED`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -286,13 +384,20 @@ This resource supports the following arguments: #### Policy Details arguments * `action` - (Optional) The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the [`action` configuration](#action-arguments) block. +* `copyTags` - (Optional, Default policies only) Indicates whether the policy should copy tags from the source resource to the snapshot or AMI. Default value is `false`. +* `createInterval` - (Optional, Default policies only) How often the policy should run and create snapshots or AMIs. valid values range from `1` to `7`. Default value is `1`. +* `exclusions` - (Optional, Default policies only) Specifies exclusion parameters for volumes or instances for which you do not want to create snapshots or AMIs. See the [`exclusions` configuration](#exclusions-arguments) block. +* `extendDeletion` - (Optional, Default policies only) snapshot or AMI retention behavior for the policy if the source volume or instance is deleted, or if the policy enters the error, disabled, or deleted state. Default value is `false`. +* `retainInterval` - (Optional, Default policies only) Specifies how long the policy should retain snapshots or AMIs before deleting them. valid values range from `2` to `14`. Default value is `7`. * `eventSource` - (Optional) The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the [`eventSource` configuration](#event-source-arguments) block. +* `resourceType` - (Optional, Default policies only) Type of default policy to create. Valid values are `VOLUME` and `INSTANCE`. * `resourceTypes` - (Optional) A list of resource types that should be targeted by the lifecycle policy. Valid values are `VOLUME` and `INSTANCE`. -* `resourceLocations` - (Optional) The location of the resources to backup. If the source resources are located in an AWS Region, specify `CLOUD`. If the source resources are located on an Outpost in your account, specify `OUTPOST`. If you specify `OUTPOST`, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account. Valid values are `CLOUD` and `OUTPOST`. +* `resourceLocations` - (Optional) The location of the resources to backup. If the source resources are located in an AWS Region, specify `CLOUD`. If the source resources are located on an Outpost in your account, specify `OUTPOST`. If the source resources are located in a Local Zone, specify `LOCAL_ZONE`. Valid values are `CLOUD`, `LOCAL_ZONE`, and `OUTPOST`. +* `policyLanguage` - (Optional) Type of policy to create. `SIMPLIFIED` To create a default policy. `STANDARD` To create a custom policy. * `policyType` - (Optional) The valid target resource types and actions a policy can manage. Specify `EBS_SNAPSHOT_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify `IMAGE_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify `EVENT_BASED_POLICY` to create an event-based policy that performs specific actions when a defined event occurs in your AWS account. Default value is `EBS_SNAPSHOT_MANAGEMENT`. * `parameters` - (Optional) A set of optional parameters for snapshot and AMI lifecycle policies. See the [`parameters` configuration](#parameters-arguments) block. * `schedule` - (Optional) See the [`schedule` configuration](#schedule-arguments) block. -* `targetTags` (Optional) A map of tag keys and their values. Any resources that match the `resourceTypes` and are tagged with _any_ of these tags will be targeted. +* `targetTags` (Optional) A map of tag keys and their values. Any resources that match the `resourceTypes` and are tagged with _any_ of these tags will be targeted. Required when `policyType` is `EBS_SNAPSHOT_MANAGEMENT` or `IMAGE_MANAGEMENT`. Must not be specified when `policyType` is `EVENT_BASED_POLICY`. ~> Note: You cannot have overlapping lifecycle policies that share the same `targetTags`. Terraform is unable to detect this at plan time but it will fail during apply. @@ -323,6 +428,12 @@ This resource supports the following arguments: * `eventType` - (Required) The type of event. Currently, only `shareSnapshot` events are supported. * `snapshotOwner` - (Required) The IDs of the AWS accounts that can trigger policy by sharing snapshots with your account. The policy only runs if one of the specified AWS accounts shares a snapshot with your account. +#### Exclusions arguments + +* `excludeBootVolumes` - (Optional) Indicates whether to exclude volumes that are attached to instances as the boot volume. To exclude boot volumes, specify `true`. +* `excludeTags` - (Optional) Map specifies whether to exclude volumes that have specific tags. +* `excludeVolumeTypes` - (Optional) List specifies the volume types to exclude. + #### Parameters arguments * `excludeBootVolume` - (Optional) Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is `false`. @@ -330,6 +441,7 @@ This resource supports the following arguments: #### Schedule arguments +* `archiveRule` - (Optional) Specifies a snapshot archiving rule for a schedule. See [`archiveRule`](#archive-rule-arguments) block. * `copyTags` - (Optional) Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. * `createRule` - (Required) See the [`createRule`](#create-rule-arguments) block. Max of 1 per schedule. * `crossRegionCopyRule` (Optional) - See the [`crossRegionCopyRule`](#cross-region-copy-rule-arguments) block. Max of 3 per schedule. @@ -341,12 +453,21 @@ This resource supports the following arguments: * `tagsToAdd` - (Optional) A map of tag keys and their values. DLM lifecycle policies will already tag the snapshot with the tags on the volume. This configuration adds extra tags on top of these. * `variableTags` - (Optional) A map of tag keys and variable values, where the values are determined when the policy is executed. Only `$(instance-id)` or `$(timestamp)` are valid values. Can only be used when `resourceTypes` is `INSTANCE`. +#### Archive Rule Arguments + +* `archiveRetainRule` - (Required) Information about the retention period for the snapshot archiving rule. See the [`archiveRetainRule`](#archive-retain-rule-arguments) block. + +#### Archive Retain Rule Arguments + +* `retentionArchiveTier` - (Required) Information about retention period in the Amazon EBS Snapshots Archive. See the [`retentionArchiveTier`](#retention-archive-tier-arguments) block. + #### Create Rule arguments * `cronExpression` - (Optional) The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. Conflicts with `interval`, `intervalUnit`, and `times`. * `interval` - (Optional) How often this lifecycle policy should be evaluated. `1`, `2`,`3`,`4`,`6`,`8`,`12` or `24` are valid values. Conflicts with `cronExpression`. If set, `intervalUnit` and `times` must also be set. * `intervalUnit` - (Optional) The unit for how often the lifecycle policy should be evaluated. `HOURS` is currently the only allowed value and also the default value. Conflicts with `cronExpression`. Must be set if `interval` is set. * `location` - (Optional) Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD`. To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL`. If you omit this parameter, `CLOUD` is used by default. If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost. Valid values are `CLOUD` and `OUTPOST_LOCAL`. +* `scripts` - (Optional) Specifies pre and/or post scripts for a snapshot lifecycle policy that targets instances. Valid only when `resourceType` is INSTANCE. See the [`scripts` configuration](#scripts-rule-arguments) block. * `times` - (Optional) A list of times in 24 hour clock format that sets when the lifecycle policy should be evaluated. Max of 1. Conflicts with `cronExpression`. Must be set if `interval` is set. #### Deprecate Rule arguments @@ -381,7 +502,8 @@ This resource supports the following arguments: * `deprecateRule` - (Optional) The AMI deprecation rule for cross-Region AMI copies created by the rule. See the [`deprecateRule`](#cross-region-copy-rule-deprecate-rule-arguments) block. * `encrypted` - (Required) To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or if encryption by default is not enabled. * `retainRule` - (Required) The retention rule that indicates how long snapshot copies are to be retained in the destination Region. See the [`retainRule`](#cross-region-copy-rule-retain-rule-arguments) block. Max of 1 per schedule. -* `target` - (Required) The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. +* `target` - Use only for DLM policies of `policy_type=EBS_SNAPSHOT_MANAGEMENT`. The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. +* `targetRegion` - Use only for DLM policies of `policy_type=IMAGE_MANAGEMENT`. The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. #### Cross Region Copy Rule Deprecate Rule arguments @@ -393,6 +515,26 @@ This resource supports the following arguments: * `interval` - (Required) The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days. * `intervalUnit` - (Required) The unit of time for time-based retention. Valid values: `DAYS`, `WEEKS`, `MONTHS`, or `YEARS`. +#### Scripts Rule arguments + +* `executeOperationOnScriptFailure` - (Optional) Indicates whether Amazon Data Lifecycle Manager should default to crash-consistent snapshots if the pre script fails. The default is `true`. + +* `executionHandler` - (Required) The SSM document that includes the pre and/or post scripts to run. In case automating VSS backups, specify `AWS_VSS_BACKUP`. In case automating application-consistent snapshots for SAP HANA workloads, specify `AWSSystemsManagerSAP-CreateDLMSnapshotForSAPHANA`. If you are using a custom SSM document that you own, specify either the name or ARN of the SSM document. + +* `executionHandlerService` - (Optional) Indicates the service used to execute the pre and/or post scripts. If using custom SSM documents or automating application-consistent snapshots of SAP HANA workloads, specify `AWS_SYSTEMS_MANAGER`. In case automating VSS Backups, omit this parameter. The default is `AWS_SYSTEMS_MANAGER`. + +* `executionTimeout` - (Optional) Specifies a timeout period, in seconds, after which Amazon Data Lifecycle Manager fails the script run attempt if it has not completed. In case automating VSS Backups, omit this parameter. The default is `10`. + +* `maximumRetryCount` - (Optional) Specifies the number of times Amazon Data Lifecycle Manager should retry scripts that fail. Must be an integer between `0` and `3`. The default is `0`. + +* `stages` - (Optional) List to indicate which scripts Amazon Data Lifecycle Manager should run on target instances. Pre scripts run before Amazon Data Lifecycle Manager initiates snapshot creation. Post scripts run after Amazon Data Lifecycle Manager initiates snapshot creation. Valid values: `PRE` and `POST`. The default is `PRE` and `POST` + +#### Retention Archive Tier Arguments + +* `count` - (Optional)The maximum number of snapshots to retain in the archive storage tier for each volume. Must be an integer between `1` and `1000`. Conflicts with `interval` and `intervalUnit`. +* `interval` - (Optional) Specifies the period of time to retain snapshots in the archive tier. After this period expires, the snapshot is permanently deleted. Conflicts with `count`. If set, `intervalUnit` must also be set. +* `intervalUnit` - (Optional) The unit of time for time-based retention. Valid values are `DAYS`, `WEEKS`, `MONTHS`, `YEARS`. Conflicts with `count`. Must be set if `interval` is set. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -433,4 +575,4 @@ Using `terraform import`, import DLM lifecycle policies using their policy ID. F % terraform import aws_dlm_lifecycle_policy.example policy-abcdef12345678901 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_certificate.html.markdown b/website/docs/cdktf/typescript/r/dms_certificate.html.markdown index 7792ceb66532..111068818f68 100644 --- a/website/docs/cdktf/typescript/r/dms_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_certificate.html.markdown @@ -45,10 +45,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateId` - (Required) The certificate identifier. - - - Must contain from 1 to 255 alphanumeric characters and hyphens. - * `certificatePem` - (Optional) The contents of the .pem X.509 certificate file for the certificate. Either `certificatePem` or `certificateWallet` must be set. * `certificateWallet` - (Optional) The contents of the Oracle Wallet certificate for use with SSL, provided as a base64-encoded String. Either `certificatePem` or `certificateWallet` must be set. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -92,4 +90,4 @@ Using `terraform import`, import certificates using the `certificateId`. For exa % terraform import aws_dms_certificate.test test-dms-certificate-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_endpoint.html.markdown b/website/docs/cdktf/typescript/r/dms_endpoint.html.markdown index 3f3bdc91cf23..daf616c30970 100644 --- a/website/docs/cdktf/typescript/r/dms_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_endpoint.html.markdown @@ -14,8 +14,6 @@ Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be ~> **Note:** All arguments including the password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). -~> **Note:** The `s3Settings` argument is deprecated, may not be maintained, and will be removed in a future version. Use the [`aws_dms_s3_endpoint`](/docs/providers/aws/r/dms_s3_endpoint.html) resource instead. - ## Example Usage ```typescript @@ -56,15 +54,17 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpointId` - (Required) Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens. * `endpointType` - (Required) Type of endpoint. Valid values are `source`, `target`. * `engineName` - (Required) Type of engine for the endpoint. Valid values are `aurora`, `aurora-postgresql`, `aurora-serverless`, `aurora-postgresql-serverless`,`azuredb`, `azure-sql-managed-instance`, `babelfish`, `db2`, `db2-zos`, `docdb`, `dynamodb`, `elasticsearch`, `kafka`, `kinesis`, `mariadb`, `mongodb`, `mysql`, `opensearch`, `oracle`, `postgres`, `redshift`,`redshift-serverless`, `s3`, `sqlserver`, `neptune` ,`sybase`. Please note that some of engine names are available only for `target` endpoint type (e.g. `redshift`). -* `kmsKeyArn` - (Required when `engineName` is `mongodb`, cannot be set when `engineName` is `s3`, optional otherwise) ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kmsKeyArn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter `s3_settings.server_side_encryption_kms_key_id`. When `engineName` is `redshift`, `kmsKeyArn` is the KMS Key for the Redshift target and the parameter `redshift_settings.server_side_encryption_kms_key_id` encrypts the S3 intermediate storage. +* `kmsKeyArn` - (Required when `engineName` is `mongodb`, optional otherwise) ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kmsKeyArn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. When `engineName` is `redshift`, `kmsKeyArn` is the KMS Key for the Redshift target and the parameter `redshift_settings.server_side_encryption_kms_key_id` encrypts the S3 intermediate storage. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateArn` - (Optional, Default: empty string) ARN for the certificate. * `databaseName` - (Optional) Name of the endpoint database. * `elasticsearchSettings` - (Optional) Configuration block for OpenSearch settings. See below. @@ -72,12 +72,12 @@ The following arguments are optional: * `kafkaSettings` - (Optional) Configuration block for Kafka settings. See below. * `kinesisSettings` - (Optional) Configuration block for Kinesis settings. See below. * `mongodbSettings` - (Optional) Configuration block for MongoDB settings. See below. +* `oracleSettings` - (Optional) Configuration block for Oracle settings. See below. * `password` - (Optional) Password to be used to login to the endpoint database. * `postgresSettings` - (Optional) Configuration block for Postgres settings. See below. * `pauseReplicationTasks` - (Optional) Whether to pause associated running replication tasks, regardless if they are managed by Terraform, prior to modifying the endpoint. Only tasks paused by the resource will be restarted after the modification completes. Default is `false`. * `port` - (Optional) Port used by the endpoint database. * `redshiftSettings` - (Optional) Configuration block for Redshift settings. See below. -* `s3Settings` - (Optional) (**Deprecated**, use the [`aws_dms_s3_endpoint`](/docs/providers/aws/r/dms_s3_endpoint.html) resource instead) Configuration block for S3 settings. See below. * `secretsManagerAccessRoleArn` - (Optional) ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by `secretsManagerArn`. The role must allow the `iam:PassRole` action. ~> **Note:** You can specify one of two sets of values for these permissions. You can specify the values for this setting and `secretsManagerArn`. Or you can specify clear-text values for `username`, `password` , `serverName`, and `port`. You can't specify both. @@ -149,11 +149,18 @@ The following arguments are optional: * `extractDocId` - (Optional) Document ID. Use this setting when `nestingLevel` is set to `none`. Default is `false`. * `nestingLevel` - (Optional) Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode). +### oracle_settings + +-> Additional information can be found in the [Using Oracle as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html). + +* `authenticationMethod` - (Optional) Authentication mechanism to access the Oracle source endpoint. Default is `password`. Valid values are `password` and `kerberos`. + ### postgres_settings -> Additional information can be found in the [Using PostgreSQL as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html). * `afterConnectScript` - (Optional) For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. +* `authenticationMethod` - (Optional) Specifies the authentication method. Valid values: `password`, `iam`. * `babelfishDatabaseName` - (Optional) The Babelfish for Aurora PostgreSQL database name for the endpoint. * `captureDdls` - (Optional) To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. * `databaseMode` - (Optional) Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. @@ -168,6 +175,7 @@ The following arguments are optional: * `mapLongVarcharAs` - Optional When true, DMS migrates LONG values as VARCHAR. * `maxFileSize` - (Optional) Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is `32,768 KB`. * `pluginName` - (Optional) Specifies the plugin to use to create a replication slot. Valid values: `pglogical`, `test_decoding`. +* `serviceAccessRoleArn` - (Optional) Specifies the IAM role to use to authenticate the connection. * `slotName` - (Optional) Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. ### redis_settings @@ -192,51 +200,6 @@ The following arguments are optional: * `serverSideEncryptionKmsKeyId` - (Required when `encryptionMode` is `SSE_KMS`, must not be set otherwise) ARN or Id of KMS Key to use when `encryptionMode` is `SSE_KMS`. * `serviceAccessRoleArn` - (Optional) Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage. -### s3_settings - -~> **Deprecated:** This argument is deprecated, may not be maintained, and will be removed in a future version. Use the [`aws_dms_s3_endpoint`](/docs/providers/aws/r/dms_s3_endpoint.html) resource instead. - --> Additional information can be found in the [Using Amazon S3 as a Source for AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.S3.html) and [Using Amazon S3 as a Target for AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html). - -* `addColumnName` - (Optional) Whether to add column name information to the .csv output file. Default is `false`. -* `bucketFolder` - (Optional) S3 object prefix. -* `bucketName` - (Optional) S3 bucket name. -* `cannedAclForObjects` - (Optional) Predefined (canned) access control list for objects created in an S3 bucket. Valid values include `none`, `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Default is `none`. -* `cdcInsertsAndUpdates` - (Optional) Whether to write insert and update operations to .csv or .parquet output files. Default is `false`. -* `cdcInsertsOnly` - (Optional) Whether to write insert operations to .csv or .parquet output files. Default is `false`. -* `cdcMaxBatchInterval` - (Optional) Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is `60`. -* `cdcMinFileSize` - (Optional) Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is `32000`. **NOTE:** Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly. -* `cdcPath` - (Optional) Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If `cdcPath` is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later. -* `compressionType` - (Optional) Set to compress target files. Default is `NONE`. Valid values are `GZIP` and `NONE`. -* `csvDelimiter` - (Optional) Delimiter used to separate columns in the source files. Default is `,`. -* `csvNoSupValue` - (Optional) String to use for all columns not included in the supplemental log. -* `csvNullValue` - (Optional) String to as null when writing to the target. -* `csvRowDelimiter` - (Optional) Delimiter used to separate rows in the source files. Default is `\n`. -* `dataFormat` - (Optional) Output format for the files that AWS DMS uses to create S3 objects. Valid values are `csv` and `parquet`. Default is `csv`. -* `dataPageSize` - (Optional) Size of one data page in bytes. Default is `1048576` (1 MiB). -* `datePartitionDelimiter` - (Optional) Date separating delimiter to use during folder partitioning. Valid values are `SLASH`, `UNDERSCORE`, `DASH`, and `NONE`. Default is `SLASH`. -* `datePartitionEnabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Default is `false`. -* `datePartitionSequence` - (Optional) Date format to use during folder partitioning. Use this parameter when `datePartitionEnabled` is set to true. Valid values are `YYYYMMDD`, `YYYYMMDDHH`, `YYYYMM`, `MMYYYYDD`, and `DDMMYYYY`. Default is `YYYYMMDD`. -* `dictPageSizeLimit` - (Optional) Maximum size in bytes of an encoded dictionary page of a column. Default is `1048576` (1 MiB). -* `enableStatistics` - (Optional) Whether to enable statistics for Parquet pages and row groups. Default is `true`. -* `encodingType` - (Optional) Type of encoding to use. Value values are `rle_dictionary`, `plain`, and `plain_dictionary`. Default is `rle_dictionary`. -* `encryptionMode` - (Optional) Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are `SSE_S3` and `SSE_KMS`. Default is `SSE_S3`. -* `externalTableDefinition` - (Optional) JSON document that describes how AWS DMS should interpret the data. -* `glueCatalogGeneration` - (Optional) Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See [Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.GlueCatalog) for more information. Default is `false`. -* `ignoreHeaderRows` - (Optional) When this value is set to `1`, DMS ignores the first row header in a .csv file. Default is `0`. -* `includeOpForFullLoad` - (Optional) Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is `false`. -* `maxFileSize` - (Optional) Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from `1` to `1048576`. Default is `1048576` (1 GB). -* `parquetTimestampInMillisecond` - (Optional) - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is `false`. -* `parquetVersion` - (Optional) Version of the .parquet file format. Default is `parquet-1-0`. Valid values are `parquet-1-0` and `parquet-2-0`. -* `preserveTransactions` - (Optional) Whether DMS saves the transaction order for a CDC load on the S3 target specified by `cdcPath`. Default is `false`. -* `rfc4180` - (Optional) For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is `true`. -* `rowGroupLength` - (Optional) Number of rows in a row group. Default is `10000`. -* `serverSideEncryptionKmsKeyId` - (Required when `encryptionMode` is `SSE_KMS`, must not be set otherwise) ARN or Id of KMS Key to use when `encryptionMode` is `SSE_KMS`. -* `serviceAccessRoleArn` - (Optional) ARN of the IAM Role with permissions to read from or write to the S3 Bucket. -* `timestampColumnName` - (Optional) Column to add with timestamp information to the endpoint data for an Amazon S3 target. -* `useCsvNoSupValue` - (Optional) Whether to use `csvNoSupValue` for columns not included in the supplemental log. -* `useTaskStartTimeForFullLoadTimestamp` - (Optional) When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is `false`. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -279,4 +242,4 @@ Using `terraform import`, import endpoints using the `endpointId`. For example: % terraform import aws_dms_endpoint.test test-dms-endpoint-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_event_subscription.html.markdown b/website/docs/cdktf/typescript/r/dms_event_subscription.html.markdown index 05a76f7e7483..c901e5c8ba37 100644 --- a/website/docs/cdktf/typescript/r/dms_event_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_event_subscription.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of event subscription. * `enabled` - (Optional, Default: true) Whether the event subscription should be enabled. * `eventCategories` - (Optional) List of event categories to listen for, see `DescribeEventCategories` for a canonical list. @@ -103,4 +104,4 @@ Using `terraform import`, import event subscriptions using the `name`. For examp % terraform import aws_dms_event_subscription.test my-awesome-event-subscription ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_replication_config.html.markdown b/website/docs/cdktf/typescript/r/dms_replication_config.html.markdown index 6dfde317e856..909e1a85a78b 100644 --- a/website/docs/cdktf/typescript/r/dms_replication_config.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_replication_config.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `computeConfig` - (Required) Configuration block for provisioning an DMS Serverless replication. * `startReplication` - (Optional) Whether to run or stop the serverless replication, default is false. * `replicationConfigIdentifier` - (Required) Unique identifier that you want to use to create the config. @@ -100,6 +101,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dms_replication_config.example + identity = { + "arn" = "arn:aws:dms:us-east-1:123456789012:replication-config:example-config" + } +} + +resource "aws_dms_replication_config" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DMS replication configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import replication configs using the `arn`. For example: ```typescript @@ -130,4 +152,4 @@ Using `terraform import`, import a replication config using the `arn`. For examp % terraform import aws_dms_replication_config.example arn:aws:dms:us-east-1:123456789012:replication-config:UX6OL6MHMMJKFFOXE3H7LLJCMEKBDUG4ZV7DRSI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_replication_instance.html.markdown b/website/docs/cdktf/typescript/r/dms_replication_instance.html.markdown index aaabf46dbdc5..4ef597279158 100644 --- a/website/docs/cdktf/typescript/r/dms_replication_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_replication_instance.html.markdown @@ -126,35 +126,34 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocatedStorage` - (Optional, Default: 50, Min: 5, Max: 6144) The amount of storage (in gigabytes) to be initially allocated for the replication instance. * `allowMajorVersionUpgrade` - (Optional, Default: false) Indicates that major version upgrades are allowed. * `applyImmediately` - (Optional, Default: false) Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource. * `autoMinorVersionUpgrade` - (Optional, Default: false) Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. * `availabilityZone` - (Optional) The EC2 Availability Zone that the replication instance will be created in. +* `dnsNameServers` - (Optional) A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. * `engineVersion` - (Optional) The engine version number of the replication instance. +* `kerberosAuthenticationSettings` - (Optional) Configuration block for settings required for Kerberos authentication. See below. * `kmsKeyArn` - (Optional) The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kmsKeyArn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. * `multiAz` - (Optional) Specifies if the replication instance is a multi-az deployment. You cannot set the `availabilityZone` parameter if the `multiAz` parameter is set to `true`. * `networkType` - (Optional) The type of IP address protocol used by a replication instance. Valid values: `IPV4`, `DUAL`. * `preferredMaintenanceWindow` - (Optional) The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). - - - Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. - - Format: `ddd:hh24:mi-ddd:hh24:mi` - - Valid Days: `mon, tue, wed, thu, fri, sat, sun` - - Constraints: Minimum 30-minute window. - * `publiclyAccessible` - (Optional, Default: false) Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. * `replicationInstanceClass` - (Required) The compute and memory capacity of the replication instance as specified by the replication instance class. See [AWS DMS User Guide](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.Types.html) for available instance sizes and advice on which one to choose. * `replicationInstanceId` - (Required) The replication instance identifier. This parameter is stored as a lowercase string. - - - Must contain from 1 to 63 alphanumeric characters or hyphens. - - First character must be a letter. - - Cannot end with a hyphen - - Cannot contain two consecutive hyphens. - * `replicationSubnetGroupId` - (Optional) A subnet group to associate with the replication instance. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpcSecurityGroupIds` - (Optional) A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance. +## kerberos_authentication_settings + +-> Additional information can be found in the [Using Kerberos Authentication with AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.Kerberos.html). + +* `keyCacheSecretIamArn` - (Required) ARN of the IAM role that grants AWS DMS access to the secret containing key cache file for the Kerberos authentication. +* `keyCacheSecretId` - (Required) Secret ID that stores the key cache file required for Kerberos authentication. +* `krb5FileContents` - (Required) Contents of krb5 configuration file required for Kerberos authentication. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -204,4 +203,4 @@ Using `terraform import`, import replication instances using the `replicationIns % terraform import aws_dms_replication_instance.test test-dms-replication-instance-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_replication_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/dms_replication_subnet_group.html.markdown index c2f05eeab353..1592827d875e 100644 --- a/website/docs/cdktf/typescript/r/dms_replication_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_replication_subnet_group.html.markdown @@ -108,6 +108,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicationSubnetGroupDescription` - (Required) Description for the subnet group. * `replicationSubnetGroupId` - (Required) Name for the replication subnet group. This value is stored as a lowercase string. It must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens and cannot be `default`. * `subnetIds` - (Required) List of at least 2 EC2 subnet IDs for the subnet group. The subnets must cover at least 2 availability zones. @@ -160,4 +161,4 @@ Using `terraform import`, import replication subnet groups using the `replicatio % terraform import aws_dms_replication_subnet_group.test test-dms-replication-subnet-group-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_replication_task.html.markdown b/website/docs/cdktf/typescript/r/dms_replication_task.html.markdown index 554cf5105bb3..4ccc88ef5b0d 100644 --- a/website/docs/cdktf/typescript/r/dms_replication_task.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_replication_task.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cdcStartPosition` - (Optional, Conflicts with `cdcStartTime`) Indicates when you want a change data capture (CDC) operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the source engine. For more information see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). * `cdcStartTime` - (Optional, Conflicts with `cdcStartPosition`) RFC3339 formatted date string or UNIX timestamp for the start of the Change Data Capture (CDC) operation. * `migrationType` - (Required) Migration type. Can be one of `full-load | cdc | full-load-and-cdc`. @@ -105,4 +106,4 @@ Using `terraform import`, import replication tasks using the `replicationTaskId` % terraform import aws_dms_replication_task.test test-dms-replication-task-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dms_s3_endpoint.html.markdown b/website/docs/cdktf/typescript/r/dms_s3_endpoint.html.markdown index 6936f8f5e63f..af750d91e7b1 100644 --- a/website/docs/cdktf/typescript/r/dms_s3_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/dms_s3_endpoint.html.markdown @@ -128,6 +128,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addColumnName` - (Optional) Whether to add column name information to the .csv output file. Default is `false`. * `addTrailingPaddingCharacter` - (Optional) Whether to add padding. Default is `false`. (Ignored for source endpoints.) * `bucketFolder` - (Optional) S3 object prefix. @@ -222,4 +223,4 @@ Using `terraform import`, import endpoints using the `endpointId`. For example: % terraform import aws_dms_s3_endpoint.example example-dms-endpoint-tf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown b/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown index 932babf5d024..7a7341925e9a 100644 --- a/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown @@ -23,7 +23,7 @@ phase because a modification has not yet taken place. You can use the ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). --> **Note:** Write-Only argument `masterPasswordWo` is available to use in place of `masterPassword`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `masterPasswordWo` is available to use in place of `masterPassword`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -57,12 +57,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allowMajorVersionUpgrade` - (Optional) A value that indicates whether major version upgrades are allowed. Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. * `applyImmediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. -* `availabilityZones` - (Optional) A list of EC2 Availability Zones that - instances in the DB cluster can be created in. +* `availabilityZones` - (Optional) A list of EC2 Availability Zones that instances in the DB cluster can be created in. + DocumentDB automatically assigns 3 AZs if less than 3 AZs are configured, which will show as a difference requiring resource recreation next Terraform apply. + We recommend specifying 3 AZs or using [the `lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) if necessary. * `backupRetentionPeriod` - (Optional) The days to retain backups for. Default `1` * `clusterIdentifierPrefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `clusterIdentifier`. * `clusterIdentifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. @@ -90,6 +92,7 @@ This resource supports the following arguments: Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferredMaintenanceWindow` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 * `restoreToPointInTime` - (Optional, Forces new resource) A configuration block for restoring a DB instance to an arbitrary point in time. Requires the `identifier` argument to be set with the name of the new DB instance to be created. See [Restore To Point In Time](#restore-to-point-in-time) below for details. +* `serverlessV2ScalingConfiguration` - (Optional) Scaling configuration of an Amazon DocumentDB Serverless cluster. See [Serverless V2 Scaling Configuration](#serverless-v2-scaling-configuration) below for details. * `skipFinalSnapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `finalSnapshotIdentifier`. Default is `false`. * `snapshotIdentifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storageEncrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false`. @@ -110,16 +113,24 @@ The `restoreToPointInTime` block supports the following arguments: * `sourceClusterIdentifier` - (Required) The identifier of the source DB cluster from which to restore. Must match the identifier of an existing DB cluster. * `useLatestRestorableTime` - (Optional) A boolean value that indicates whether the DB cluster is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restoreToTime`. +### Serverless V2 Scaling Configuration + +The `serverlessV2ScalingConfiguration` block supports the following arguments. +Adding this block (i.e. switching to serverless) or removing it (i.e. switching from serverless) will trigger cluster replacement. + +* `maxCapacity` - (Required) Maximum number of Amazon DocumentDB capacity units (DCUs) for an instance in an Amazon DocumentDB Serverless cluster. Valid values are multiples of 0.5 between 1 and 256. +* `minCapacity` - (Required) Minimum number of Amazon DocumentDB capacity units (DCUs) for an instance in an Amazon DocumentDB Serverless cluster. Valid values are multiples of 0.5 between 0.5 and 256. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) of cluster -* `clusterMembers` – List of DocumentDB Instances that are a part of this cluster +* `clusterMembers` - List of DocumentDB Instances that are a part of this cluster * `clusterResourceId` - The DocumentDB Cluster Resource ID * `endpoint` - The DNS address of the DocumentDB instance * `hostedZoneId` - The Route53 Hosted Zone ID of the endpoint -* `id` - The DocumentDB Cluster Identifier +* `id` - (**Deprecated**) Amazon Resource Name (ARN) of cluster * `readerEndpoint` - A read-only endpoint for the DocumentDB cluster, automatically load-balanced across replicas * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -164,4 +175,4 @@ Using `terraform import`, import DocumentDB Clusters using the `clusterIdentifie % terraform import aws_docdb_cluster.docdb_cluster docdb-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown b/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown index 6d0563bb4d3c..30bcffea3995 100644 --- a/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown @@ -60,13 +60,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applyImmediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `autoMinorVersionUpgrade` - (Optional) This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set (see [docs](https://docs.aws.amazon.com/documentdb/latest/developerguide/API_DBInstance.html)). Default `true`. * `availabilityZone` - (Optional, Computed) The EC2 Availability Zone that the DB instance is created in. See [docs](https://docs.aws.amazon.com/documentdb/latest/developerguide/API_CreateDBInstance.html) about the details. * `caCertIdentifier` - (Optional) The identifier of the certificate authority (CA) certificate for the DB instance. * `clusterIdentifier` - (Required) The identifier of the [`aws_docdb_cluster`](/docs/providers/aws/r/docdb_cluster.html) in which to launch this instance. -* `copyTagsToSnapshot` – (Optional, boolean) Copy all DB instance `tags` to snapshots. Default is `false`. +* `copyTagsToSnapshot` - (Optional, boolean) Copy all DB instance `tags` to snapshots. Default is `false`. * `enablePerformanceInsights` - (Optional) A value that indicates whether to enable Performance Insights for the DB Instance. Default `false`. See [docs] (https://docs.aws.amazon.com/documentdb/latest/developerguide/performance-insights.html) about the details. * `engine` - (Optional) The name of the database engine to be used for the DocumentDB instance. Defaults to `docdb`. Valid Values: `docdb`. * `identifier` - (Optional, Forces new resource) The identifier for the DocumentDB instance, if omitted, Terraform will assign a random, unique identifier. @@ -115,7 +116,10 @@ This resource exports the following attributes in addition to the arguments abov * `preferredBackupWindow` - The daily time range during which automated backups are created if automated backups are enabled. * `storageEncrypted` - Specifies whether the DB cluster is encrypted. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. +* `writer` - Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. + +For more detailed documentation about each argument, refer to +the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). @@ -167,4 +171,4 @@ Using `terraform import`, import DocumentDB Cluster Instances using the `identif % terraform import aws_docdb_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_cluster_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/docdb_cluster_parameter_group.html.markdown index 35e4c97d7b66..cffe92efffae 100644 --- a/website/docs/cdktf/typescript/r/docdb_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_cluster_parameter_group.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DocumentDB cluster parameter group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required, Forces new resource) The family of the DocumentDB cluster parameter group. @@ -101,4 +102,4 @@ Using `terraform import`, import DocumentDB Cluster Parameter Groups using the ` % terraform import aws_docdb_cluster_parameter_group.cluster_pg production-pg-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_cluster_snapshot.html.markdown b/website/docs/cdktf/typescript/r/docdb_cluster_snapshot.html.markdown index 534553b740fc..8dfdb05576eb 100644 --- a/website/docs/cdktf/typescript/r/docdb_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_cluster_snapshot.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbClusterIdentifier` - (Required) The DocumentDB Cluster Identifier from which to take the snapshot. * `dbClusterSnapshotIdentifier` - (Required) The Identifier for the snapshot. @@ -52,7 +53,7 @@ This resource exports the following attributes in addition to the arguments abov * `engineVersion` - Version of the database engine for this DocumentDB cluster snapshot. * `kmsKeyId` - If storage_encrypted is true, the AWS KMS key identifier for the encrypted DocumentDB cluster snapshot. * `port` - Port that the DocumentDB cluster was listening on at the time of the snapshot. -* `source_db_cluster_snapshot_identifier` - The DocumentDB Cluster Snapshot Arn that the DocumentDB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. +* `sourceDbClusterSnapshotIdentifier` - The DocumentDB Cluster Snapshot Arn that the DocumentDB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. * `storageEncrypted` - Specifies whether the DocumentDB cluster snapshot is encrypted. * `status` - The status of this DocumentDB Cluster Snapshot. * `vpcId` - The VPC ID associated with the DocumentDB cluster snapshot. @@ -95,4 +96,4 @@ Using `terraform import`, import `aws_docdb_cluster_snapshot` using the cluster % terraform import aws_docdb_cluster_snapshot.example my-cluster-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_event_subscription.html.markdown b/website/docs/cdktf/typescript/r/docdb_event_subscription.html.markdown index 38e068b994b8..464db1558778 100644 --- a/website/docs/cdktf/typescript/r/docdb_event_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_event_subscription.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the DocumentDB event subscription. By default generated by Terraform. * `namePrefix` - (Optional) The name of the DocumentDB event subscription. Conflicts with `name`. * `snsTopic` - (Required) The SNS topic to send events to. @@ -125,4 +126,4 @@ Using `terraform import`, import DocumentDB Event Subscriptions using the `name` % terraform import aws_docdb_event_subscription.example event-sub ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_global_cluster.html.markdown b/website/docs/cdktf/typescript/r/docdb_global_cluster.html.markdown index 466ed9f21fd3..033266d2f374 100644 --- a/website/docs/cdktf/typescript/r/docdb_global_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_global_cluster.html.markdown @@ -136,6 +136,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `globalClusterIdentifier` - (Required, Forces new resources) The global cluster identifier. * `databaseName` - (Optional, Forces new resources) Name for an automatically created database on cluster creation. * `deletionProtection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. @@ -220,4 +221,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/docdb_subnet_group.html.markdown index 79e2baebae29..9a0ac195870f 100644 --- a/website/docs/cdktf/typescript/r/docdb_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_subnet_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the docDB subnet group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) The description of the docDB subnet group. Defaults to "Managed by Terraform". @@ -88,4 +89,4 @@ Using `terraform import`, import DocumentDB Subnet groups using the `name`. For % terraform import aws_docdb_subnet_group.default production-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdbelastic_cluster.html.markdown b/website/docs/cdktf/typescript/r/docdbelastic_cluster.html.markdown index 3145d8593fe7..7efda4d3f8ff 100644 --- a/website/docs/cdktf/typescript/r/docdbelastic_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/docdbelastic_cluster.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `backupRetentionPeriod` - (Optional) The number of days for which automatic snapshots are retained. It should be in between 1 and 35. If not specified, the default value of 1 is set. * `kmsKeyId` - (Optional) ARN of a KMS key that is used to encrypt the Elastic DocumentDB cluster. If not specified, the default encryption key that KMS creates for your account is used. * `preferredBackupWindow` - (Optional) The daily time range during which automated backups are created if automated backups are enabled, as determined by the `backupRetentionPeriod`. @@ -82,7 +83,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearchServerless Access Policy using the `name` and `type` arguments separated by a slash (`/`). For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_docdbelastic_cluster.example + identity = { + "arn" = "arn:aws:docdb-elastic:us-east-1:000011112222:cluster/12345678-7abc-def0-1234-56789abcdef" + } +} + +resource "aws_docdbelastic_cluster" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DocDB Elastic cluster. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DocDB Elastic Cluster using the `arn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -112,4 +134,4 @@ Using `terraform import`, import DocDB (DocumentDB) Elastic Cluster using the `a % terraform import aws_docdbelastic_cluster.example arn:aws:docdb-elastic:us-east-1:000011112222:cluster/12345678-7abc-def0-1234-56789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown b/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown index 3187434cd7b3..51067aa22ad0 100644 --- a/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown +++ b/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown @@ -98,6 +98,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoReplicateNewDisks` - (Optional) Whether to allow the AWS replication agent to automatically replicate newly added disks. * `tags` - (Optional) Set of tags to be associated with the Replication Configuration Template resource. @@ -159,4 +160,4 @@ Using `terraform import`, import DRS Replication Configuration Template using th % terraform import aws_drs_replication_configuration_template.example templateid ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dsql_cluster.html.markdown b/website/docs/cdktf/typescript/r/dsql_cluster.html.markdown index 6348b58fa614..50c27fba46c7 100644 --- a/website/docs/cdktf/typescript/r/dsql_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/dsql_cluster.html.markdown @@ -24,12 +24,12 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DsqlCluster } from "./.gen/providers/aws/"; +import { DsqlCluster } from "./.gen/providers/aws/dsql-cluster"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new DsqlCluster(this, "example", { - deletion_protection_enabled: true, + deletionProtectionEnabled: true, tags: { Name: "TestCluster", }, @@ -43,10 +43,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `deletionProtectionEnabled` - (Required) Whether deletion protection is enabled in this cluster. -* `kms_encryption_key` - (Optional) The ARN of the AWS KMS key that encrypts data in the DSQL Cluster, or `"AWS_OWNED_KMS_KEY"`. -* `multi_region_properties` - (Optional) Multi-region properties of the DSQL Cluster. - * `witness_region` - (Required) Witness region for the multi-region clusters. Setting this makes this cluster a multi-region cluster. Changing it recreates the resource. +* `deletionProtectionEnabled` - (Optional) Whether deletion protection is enabled in this cluster. + Default value is `false`. +* `forceDestroy` - (Optional) Destroys cluster even if `deletionProtectionEnabled` is set to `true`. + Default value is `false`. +* `kmsEncryptionKey` - (Optional) The ARN of the AWS KMS key that encrypts data in the DSQL Cluster, or `"AWS_OWNED_KMS_KEY"`. +* `multiRegionProperties` - (Optional) Multi-region properties of the DSQL Cluster. + * `witnessRegion` - (Required) Witness region for the multi-region clusters. Setting this makes this cluster a multi-region cluster. Changing it recreates the resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Set of tags to be associated with the AWS DSQL Cluster resource. ## Attribute Reference @@ -54,14 +58,14 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Cluster. -* `encryption_details` - Encryption configuration details for the DSQL Cluster. +* `encryptionDetails` - Encryption configuration details for the DSQL Cluster. * `encryption_status` - The status of encryption for the DSQL Cluster. * `encryptionType` - The type of encryption that protects the data on the DSQL Cluster. * `identifier` - Cluster Identifier. -* `multi_region_properties` - Multi-region properties of the DSQL Cluster. +* `multiRegionProperties` - Multi-region properties of the DSQL Cluster. * `clusters` - List of DSQL Cluster ARNs peered to this cluster. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). -* `vpc_endpoint_service_name` - The DSQL Cluster's VPC endpoint service name. +* `vpcEndpointServiceName` - The DSQL Cluster's VPC endpoint service name. ## Timeouts @@ -83,7 +87,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DsqlCluster } from "./.gen/providers/aws/"; +import { DsqlCluster } from "./.gen/providers/aws/dsql-cluster"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -103,4 +107,4 @@ Using `terraform import`, import DSQL Cluster using the `identifier`. For exampl % terraform import aws_dsql_cluster.example abcde1f234ghijklmnop5qr6st ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dsql_cluster_peering.html.markdown b/website/docs/cdktf/typescript/r/dsql_cluster_peering.html.markdown index 01411f23a6e7..ee1c7eb5635b 100644 --- a/website/docs/cdktf/typescript/r/dsql_cluster_peering.html.markdown +++ b/website/docs/cdktf/typescript/r/dsql_cluster_peering.html.markdown @@ -19,26 +19,27 @@ Terraform resource for managing an Amazon Aurora DSQL Cluster Peering. ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Fn, TerraformStack } from "cdktf"; +import { Fn, Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DsqlCluster, DsqlClusterPeering } from "./.gen/providers/aws/"; +import { DsqlCluster } from "./.gen/providers/aws/dsql-cluster"; +import { DsqlClusterPeering } from "./.gen/providers/aws/dsql-cluster-peering"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); const example1 = new DsqlCluster(this, "example_1", { - multi_region_properties: [ + multiRegionProperties: [ { - witness_region: "us-west-2", + witnessRegion: "us-west-2", }, ], }); const example2 = new DsqlCluster(this, "example_2", { - multi_region_properties: [ + multiRegionProperties: [ { - witness_region: "us-west-2", + witnessRegion: "us-west-2", }, ], provider: alternate, @@ -49,10 +50,12 @@ class MyConvertedCode extends TerraformStack { { clusters: [example2.arn], identifier: example1.identifier, - witness_region: Fn.lookupNested(example1.multiRegionProperties, [ - "0", - "witness_region", - ]), + witnessRegion: Token.asString( + Fn.lookupNested(example1.multiRegionProperties, [ + "0", + "witness_region", + ]) + ), } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -64,10 +67,12 @@ class MyConvertedCode extends TerraformStack { clusters: [example1.arn], identifier: example2.identifier, provider: alternate, - witness_region: Fn.lookupNested(example2.multiRegionProperties, [ - "0", - "witness_region", - ]), + witnessRegion: Token.asString( + Fn.lookupNested(example2.multiRegionProperties, [ + "0", + "witness_region", + ]) + ), } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -83,7 +88,8 @@ This resource supports the following arguments: * `clusters` - (Required) List of DSQL Cluster ARNs to be peered to this cluster. * `identifier` - (Required) DSQL Cluster Identifier. -* `witness_region` - (Required) Witness region for a multi-region cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `witnessRegion` - (Required) Witness region for a multi-region cluster. ## Attribute Reference @@ -107,7 +113,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DsqlClusterPeering } from "./.gen/providers/aws/"; +import { DsqlClusterPeering } from "./.gen/providers/aws/dsql-cluster-peering"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -127,4 +133,4 @@ Using `terraform import`, import DSQL Cluster Peering using the `identifier`. Fo % terraform import aws_dsql_cluster_peering.example cluster-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_bgp_peer.html.markdown b/website/docs/cdktf/typescript/r/dx_bgp_peer.html.markdown index 9a1d663fccd6..3f55bdd87284 100644 --- a/website/docs/cdktf/typescript/r/dx_bgp_peer.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_bgp_peer.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `virtualInterfaceId` - (Required) The ID of the Direct Connect virtual interface on which to create the BGP peer. @@ -65,4 +66,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_connection.html.markdown b/website/docs/cdktf/typescript/r/dx_connection.html.markdown index 0588178381b4..1dcc2af947c9 100644 --- a/website/docs/cdktf/typescript/r/dx_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_connection.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bandwidth` - (Required) The bandwidth of the connection. Valid values for dedicated connections: 1Gbps, 10Gbps, 100Gbps, and 400Gbps. Valid values for hosted connections: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Case sensitive. Refer to the AWS Direct Connection supported bandwidths for [Dedicated Connections](https://docs.aws.amazon.com/directconnect/latest/UserGuide/dedicated_connection.html) and [Hosted Connections](https://docs.aws.amazon.com/directconnect/latest/UserGuide/hosted_connection.html). * `encryptionMode` - (Optional) The connection MAC Security (MACsec) encryption mode. MAC Security (MACsec) is only available on dedicated connections. Valid values are `no_encrypt`, `should_encrypt`, and `must_encrypt`. * `location` - (Required) The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`. @@ -155,4 +156,4 @@ Using `terraform import`, import Direct Connect connections using the connection % terraform import aws_dx_connection.test_connection dxcon-ffre0ec3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_connection_association.html.markdown b/website/docs/cdktf/typescript/r/dx_connection_association.html.markdown index f7232578899f..cbdd9ff9a8c0 100644 --- a/website/docs/cdktf/typescript/r/dx_connection_association.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_connection_association.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionId` - (Required) The ID of the connection. * `lagId` - (Required) The ID of the LAG with which to associate the connection. @@ -66,4 +67,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_connection_confirmation.html.markdown b/website/docs/cdktf/typescript/r/dx_connection_confirmation.html.markdown index 00d1037aacd2..ca5ed841fdb0 100644 --- a/website/docs/cdktf/typescript/r/dx_connection_confirmation.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_connection_confirmation.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionId` - (Required) The ID of the hosted connection. ### Removing `aws_dx_connection_confirmation` from your configuration @@ -51,4 +52,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_gateway.html.markdown b/website/docs/cdktf/typescript/r/dx_gateway.html.markdown index 6c890bc843ce..e6446225d176 100644 --- a/website/docs/cdktf/typescript/r/dx_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_gateway.html.markdown @@ -59,6 +59,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dx_gateway.example + identity = { + id = "abcd1234-dcba-5678-be23-cdef9876ab45" + } +} + +resource "aws_dx_gateway" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the Direct Connect Gateway. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Direct Connect Gateways using the gateway `id`. For example: ```typescript @@ -75,7 +101,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); DxGateway.generateConfigForImport( this, - "test", + "example", "abcd1234-dcba-5678-be23-cdef9876ab45" ); } @@ -86,7 +112,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Direct Connect Gateways using the gateway `id`. For example: ```console -% terraform import aws_dx_gateway.test abcd1234-dcba-5678-be23-cdef9876ab45 +% terraform import aws_dx_gateway.example abcd1234-dcba-5678-be23-cdef9876ab45 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown b/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown index 84d70e2e50df..78c33503ffe0 100644 --- a/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown @@ -160,6 +160,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dxGatewayId` - (Required) The ID of the Direct Connect gateway. * `associatedGatewayId` - (Optional) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. Used for single account Direct Connect gateway associations. @@ -177,10 +178,10 @@ Used for cross-account Direct Connect gateway associations. This resource exports the following attributes in addition to the arguments above: -* `id` - The ID of the Direct Connect gateway association resource. * `associatedGatewayType` - The type of the associated gateway, `transitGateway` or `virtualPrivateGateway`. * `dxGatewayAssociationId` - The ID of the Direct Connect gateway association. * `dxGatewayOwnerAccountId` - The ID of the AWS account that owns the Direct Connect gateway. +* `transitGatewayAttachmentId` - The ID of the Transit Gateway Attachment when the type is `transitGateway`. ## Timeouts @@ -222,4 +223,4 @@ Using `terraform import`, import Direct Connect gateway associations using `dxGa % terraform import aws_dx_gateway_association.example 345508c3-7215-4aef-9832-07c125d5bd0f/vgw-98765432 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_gateway_association_proposal.html.markdown b/website/docs/cdktf/typescript/r/dx_gateway_association_proposal.html.markdown index 9eb8b29b4cc0..7b11be1ef39a 100644 --- a/website/docs/cdktf/typescript/r/dx_gateway_association_proposal.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_gateway_association_proposal.html.markdown @@ -44,6 +44,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `associatedGatewayId` - (Required) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. * `dxGatewayId` - (Required) Direct Connect Gateway identifier. * `dxGatewayOwnerAccountId` - (Required) AWS Account identifier of the Direct Connect Gateway's owner. @@ -126,4 +127,4 @@ Using a proposal ID, Direct Connect Gateway ID and associated gateway ID separat The latter case is useful when a previous proposal has been accepted and deleted by AWS. The `aws_dx_gateway_association_proposal` resource will then represent a pseudo-proposal for the same Direct Connect Gateway and associated gateway. If no previous proposal is available, use a tool like [`uuidgen`](http://manpages.ubuntu.com/manpages/bionic/man1/uuidgen.1.html) to generate a new random pseudo-proposal ID. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_connection.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_connection.html.markdown index a836b96dc6a3..cbb473f63d11 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_connection.html.markdown @@ -52,16 +52,17 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The ID of the connection. -* `jumboFrameCapable` - Boolean value representing if jumbo frames have been enabled for this connection. -* `hasLogicalRedundancy` - Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6). * `awsDevice` - The Direct Connect endpoint on which the physical connection terminates. -* `state` - The state of the connection. Possible values include: ordering, requested, pending, available, down, deleting, deleted, rejected, unknown. See [AllocateHostedConnection](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_AllocateHostedConnection.html) for a description of each connection state. +* `connectionRegion` - The AWS Region where the connection is located. +* `hasLogicalRedundancy` - Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6). +* `id` - The ID of the hosted connection. +* `jumboFrameCapable` - Boolean value representing if jumbo frames have been enabled for this connection. * `lagId` - The ID of the LAG. * `loaIssueTime` - The time of the most recent call to [DescribeLoa](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLoa.html) for this connection. * `location` - The location of the connection. * `partnerName` - The name of the AWS Direct Connect service provider associated with the connection. * `providerName` - The name of the service provider associated with the connection. -* `region` - The AWS Region where the connection is located. +* `region` - (**Deprecated**) The AWS Region where the connection is located. Use `connectionRegion` instead. +* `state` - The state of the connection. Possible values include: ordering, requested, pending, available, down, deleting, deleted, rejected, unknown. See [AllocateHostedConnection](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_AllocateHostedConnection.html) for a description of each connection state. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface.html.markdown index fa9abfce1de8..bd1d63dd2cdb 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connectionId` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -107,4 +108,4 @@ Using `terraform import`, import Direct Connect hosted private virtual interface % terraform import aws_dx_hosted_private_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface_accepter.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface_accepter.html.markdown index 5c2ab08f8ddf..c09b859723fd 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_private_virtual_interface_accepter.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtualInterfaceId` - (Required) The ID of the Direct Connect virtual interface to accept. * `dxGatewayId` - (Optional) The ID of the Direct Connect gateway to which to connect the virtual interface. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -136,4 +137,4 @@ Using `terraform import`, import Direct Connect hosted private virtual interface % terraform import aws_dx_hosted_private_virtual_interface_accepter.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface.html.markdown index a274deb1cec5..6bc42759dfdc 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connectionId` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -108,4 +109,4 @@ Using `terraform import`, import Direct Connect hosted public virtual interfaces % terraform import aws_dx_hosted_public_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface_accepter.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface_accepter.html.markdown index a7f83c2919bb..f110436d50d8 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_public_virtual_interface_accepter.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `virtualInterfaceId` - (Required) The ID of the Direct Connect virtual interface to accept. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -131,4 +132,4 @@ Using `terraform import`, import Direct Connect hosted public virtual interfaces % terraform import aws_dx_hosted_public_virtual_interface_accepter.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface.html.markdown index bc1eb2acf87c..282695b92c27 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connectionId` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -108,4 +109,4 @@ Using `terraform import`, import Direct Connect hosted transit virtual interface % terraform import aws_dx_hosted_transit_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface_accepter.html.markdown b/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface_accepter.html.markdown index 2405b849f43e..bcb328fdf729 100644 --- a/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_hosted_transit_virtual_interface_accepter.html.markdown @@ -80,6 +80,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dxGatewayId` - (Required) The ID of the [Direct Connect gateway](dx_gateway.html) to which to connect the virtual interface. * `virtualInterfaceId` - (Required) The ID of the Direct Connect virtual interface to accept. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -131,4 +132,4 @@ Using `terraform import`, import Direct Connect hosted transit virtual interface % terraform import aws_dx_hosted_transit_virtual_interface_accepter.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_lag.html.markdown b/website/docs/cdktf/typescript/r/dx_lag.html.markdown index 15c8ad570cd7..8d0d7f8344b8 100644 --- a/website/docs/cdktf/typescript/r/dx_lag.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_lag.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the LAG. * `connectionsBandwidth` - (Required) The bandwidth of the individual dedicated connections bundled by the LAG. Valid values: 1Gbps, 10Gbps, 100Gbps, and 400Gbps. Case sensitive. Refer to the AWS Direct Connection supported bandwidths for [Dedicated Connections](https://docs.aws.amazon.com/directconnect/latest/UserGuide/dedicated_connection.html). * `location` - (Required) The AWS Direct Connect location in which the LAG should be allocated. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`. @@ -90,4 +91,4 @@ Using `terraform import`, import Direct Connect LAGs using the LAG `id`. For exa % terraform import aws_dx_lag.test_lag dxlag-fgnsp5rq ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_macsec_key_association.html.markdown b/website/docs/cdktf/typescript/r/dx_macsec_key_association.html.markdown index 06d04115fcae..a6f5905363bb 100644 --- a/website/docs/cdktf/typescript/r/dx_macsec_key_association.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_macsec_key_association.html.markdown @@ -90,6 +90,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cak` - (Optional) The MAC Security (MACsec) CAK to associate with the dedicated connection. The valid values are 64 hexadecimal characters (0-9, A-E). Required if using `ckn`. * `ckn` - (Optional) The MAC Security (MACsec) CKN to associate with the dedicated connection. The valid values are 64 hexadecimal characters (0-9, A-E). Required if using `cak`. * `connectionId` - (Required) The ID of the dedicated Direct Connect connection. The connection must be a dedicated connection in the `AVAILABLE` state. @@ -105,4 +106,4 @@ This resource exports the following attributes in addition to the arguments abov * `startOn` - The date in UTC format that the MAC Security (MACsec) secret key takes effect. * `state` - The state of the MAC Security (MACsec) secret key. The possible values are: associating, associated, disassociating, disassociated. See [MacSecKey](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_MacSecKey.html#DX-Type-MacSecKey-state) for descriptions of each state. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_private_virtual_interface.html.markdown b/website/docs/cdktf/typescript/r/dx_private_virtual_interface.html.markdown index cb2d977cf1fc..991386bae01d 100644 --- a/website/docs/cdktf/typescript/r/dx_private_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_private_virtual_interface.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connectionId` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -107,4 +108,4 @@ Using `terraform import`, import Direct Connect private virtual interfaces using % terraform import aws_dx_private_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_public_virtual_interface.html.markdown b/website/docs/cdktf/typescript/r/dx_public_virtual_interface.html.markdown index ab4b770357ea..24d692436c84 100644 --- a/website/docs/cdktf/typescript/r/dx_public_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_public_virtual_interface.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connectionId` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -104,4 +105,4 @@ Using `terraform import`, import Direct Connect public virtual interfaces using % terraform import aws_dx_public_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_transit_virtual_interface.html.markdown b/website/docs/cdktf/typescript/r/dx_transit_virtual_interface.html.markdown index 031127a13766..cd09e7773309 100644 --- a/website/docs/cdktf/typescript/r/dx_transit_virtual_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_transit_virtual_interface.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The address family for the BGP peer. `ipv4 ` or `ipv6`. * `bgpAsn` - (Required) The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. * `connectionId` - (Required) The ID of the Direct Connect connection (or LAG) on which to create the virtual interface. @@ -119,4 +120,4 @@ Using `terraform import`, import Direct Connect transit virtual interfaces using % terraform import aws_dx_transit_virtual_interface.test dxvif-33cc44dd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_contributor_insights.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_contributor_insights.html.markdown index ec473c25aed0..459748e553ed 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_contributor_insights.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_contributor_insights.html.markdown @@ -38,8 +38,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tableName` - (Required) The name of the table to enable contributor insights * `indexName` - (Optional) The global secondary index name +* `mode` - (Optional) argument to specify the [CloudWatch contributor insights mode](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/contributorinsights_HowItWorks.html#contributorinsights_HowItWorks.Modes) ## Attribute Reference @@ -77,4 +79,4 @@ Using `terraform import`, import `aws_dynamodb_contributor_insights` using the f % terraform import aws_dynamodb_contributor_insights.test name:ExampleTableName/index:ExampleIndexName/123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_global_table.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_global_table.html.markdown index 5f85933e4379..d3f854c597c6 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_global_table.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_global_table.html.markdown @@ -92,6 +92,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the global table. Must match underlying DynamoDB Table names in all regions. * `replica` - (Required) Underlying DynamoDB Table. At least 1 replica must be defined. See below. @@ -136,4 +137,4 @@ Using `terraform import`, import DynamoDB Global Tables using the global table n % terraform import aws_dynamodb_global_table.MyTable MyTable ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_kinesis_streaming_destination.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_kinesis_streaming_destination.html.markdown index c473fffff482..cb6ac874a302 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_kinesis_streaming_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_kinesis_streaming_destination.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `approximateCreationDateTimePrecision` - (Optional) Toggle for the precision of Kinesis data stream timestamp. Valid values: `MILLISECOND` and `MICROSECOND`. * `streamArn` - (Required) The ARN for a Kinesis data stream. This must exist in the same account and region as the DynamoDB table. * `tableName` - (Required) The name of the DynamoDB table. There can only be one Kinesis streaming destination for a given DynamoDB table. @@ -103,4 +104,4 @@ Using `terraform import`, import DynamoDB Kinesis Streaming Destinations using t % terraform import aws_dynamodb_kinesis_streaming_destination.example example,arn:aws:kinesis:us-east-1:111122223333:exampleStreamName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_resource_policy.html.markdown index 2dfe24878841..a1b85191a7ef 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_resource_policy.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `confirmRemoveSelfResourceAccess` - (Optional) Set this parameter to true to confirm that you want to remove your permissions to change the policy of this resource in the future. ## Attribute Reference @@ -57,7 +58,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB Resource Policy using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dynamodb_resource_policy.example + identity = { + "arn" = "arn:aws:dynamodb:us-west-2:123456789012:table/example-table" + } +} + +resource "aws_dynamodb_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DynamoDB table. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB Resource Policy using the `resourceArn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -81,10 +103,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import DynamoDB Resource Policy using the `example_id_arg`. For example: +Using `terraform import`, import DynamoDB Resource Policy using the `resourceArn`. For example: ```console % terraform import aws_dynamodb_resource_policy.example arn:aws:dynamodb:us-east-1:1234567890:table/my-table ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown index afb9489a2753..77b69c0bf77a 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown @@ -133,6 +133,54 @@ class MyConvertedCode extends TerraformStack { ``` +#### Global Tables with Multi-Region Strong Consistency + +A global table configured for Multi-Region strong consistency (MRSC) provides the ability to perform a strongly consistent read with multi-Region scope. Performing a strongly consistent read on an MRSC table ensures you're always reading the latest version of an item, irrespective of the Region in which you're performing the read. + +**Note** Please see detailed information, restrictions, caveats etc on the [AWS Support Page](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/multi-region-strong-consistency-gt.html). + +Consistency Mode (`consistencyMode`) is a new argument on the embedded `replica` that allows you to configure consistency mode for Global Tables. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DynamodbTable } from "./.gen/providers/aws/dynamodb-table"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DynamodbTable(this, "example", { + attribute: [ + { + name: "TestTableHashKey", + type: "S", + }, + ], + billingMode: "PAY_PER_REQUEST", + hashKey: "TestTableHashKey", + name: "example", + replica: [ + { + consistencyMode: "STRONG", + regionName: "us-east-2", + }, + { + consistencyMode: "STRONG", + regionName: "us-west-2", + }, + ], + streamEnabled: true, + streamViewType: "NEW_AND_OLD_IMAGES", + }); + } +} + +``` + ### Replica Tagging You can manage global table replicas' tags in various ways. This example shows using `replica.*.propagate_tags` for the first replica and the `aws_dynamodb_tag` resource for the other. @@ -203,7 +251,7 @@ class MyConvertedCode extends TerraformStack { resourceArn: Token.asString( Fn.replace( example.arn, - Token.asString(current.name), + Token.asString(current.region), Token.asString(alternate.name) ) ), @@ -226,6 +274,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `billingMode` - (Optional) Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`. * `deletionProtectionEnabled` - (Optional) Enables deletion protection for table. Defaults to `false`. * `importTable` - (Optional) Import Amazon S3 data into a new table. See below. @@ -248,6 +297,7 @@ The following arguments are optional: Default value is `STANDARD`. * `tags` - (Optional) A map of tags to populate on the created table. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `ttl` - (Optional) Configuration block for TTL. See below. +* `warmThroughput` - (Optional) Sets the number of warm read and write units for the specified table. See below. * `writeCapacity` - (Optional) Number of write units for this table. If the `billingMode` is `PROVISIONED`, this field is required. ### `attribute` @@ -284,10 +334,11 @@ The following arguments are optional: * `hashKey` - (Required) Name of the hash key in the index; must be defined as an attribute in the resource. * `name` - (Required) Name of the index. * `nonKeyAttributes` - (Optional) Only required with `INCLUDE` as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. -* `onDemandThroughput` - (Optional) Sets the maximum number of read and write units for the specified on-demand table. See below. +* `onDemandThroughput` - (Optional) Sets the maximum number of read and write units for the specified on-demand index. See below. * `projectionType` - (Required) One of `ALL`, `INCLUDE` or `KEYS_ONLY` where `ALL` projects every attribute into the index, `KEYS_ONLY` projects into the index only the table and index hash_key and sort_key attributes , `INCLUDE` projects into the index all of the attributes that are defined in `nonKeyAttributes` in addition to the attributes that that`KEYS_ONLY` project. * `rangeKey` - (Optional) Name of the range key; must be defined * `readCapacity` - (Optional) Number of read units for this index. Must be set if billing_mode is set to PROVISIONED. +* `warmThroughput` - (Optional) Sets the number of warm read and write units for this index. See below. * `writeCapacity` - (Optional) Number of write units for this index. Must be set if billing_mode is set to PROVISIONED. ### `localSecondaryIndex` @@ -314,6 +365,7 @@ The following arguments are optional: **Note:** This attribute will _not_ be populated with the ARN of _default_ keys. **Note:** Changing this value will recreate the replica. * `pointInTimeRecovery` - (Optional) Whether to enable Point In Time Recovery for the replica. Default is `false`. +* `deletionProtectionEnabled` - (Optional) Whether deletion protection is enabled (true) or disabled (false) on the replica. Default is `false`. * `propagateTags` - (Optional) Whether to propagate the global table's tags to a replica. Default is `false`. Changes to tags only move in one direction: from global (source) to replica. @@ -321,6 +373,7 @@ The following arguments are optional: Tag changes on the global table are propagated to replicas. Changing from `true` to `false` on a subsequent `apply` leaves replica tags as-is and no longer manages them. * `regionName` - (Required) Region name of the replica. +* `consistencyMode` - (Optional) Whether this global table will be using `STRONG` consistency mode or `EVENTUAL` consistency mode. Default value is `EVENTUAL`. ### `serverSideEncryption` @@ -334,6 +387,13 @@ The following arguments are optional: * `enabled` - (Optional) Whether TTL is enabled. Default value is `false`. +### `warmThroughput` + +~> **Note:** Explicitly configuring both `readUnitsPerSecond` and `writeUnitsPerSecond` to the default/minimum values will cause Terraform to report differences. + +* `readUnitsPerSecond` - (Optional) Number of read operations a table or index can instantaneously support. For the base table, decreasing this value will force a new resource. For a global secondary index, this value can be increased or decreased without recreation. Minimum value of `12000` (default). +* `writeUnitsPerSecond` - (Optional) Number of write operations a table or index can instantaneously support. For the base table, decreasing this value will force a new resource. For a global secondary index, this value can be increased or decreased without recreation. Minimum value of `4000` (default). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -389,4 +449,4 @@ Using `terraform import`, import DynamoDB tables using the `name`. For example: % terraform import aws_dynamodb_table.basic-dynamodb-table GameScores ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_table_export.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_table_export.html.markdown index 15ba62639186..a8244600757a 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_table_export.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_table_export.html.markdown @@ -132,6 +132,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exportFormat` - (Optional, Forces new resource) Format for the exported data. Valid values are: `DYNAMODB_JSON`, `ION`. See the [AWS Documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/S3DataExport.Output.html#S3DataExport.Output_Data) for more information on these export formats. Default is `DYNAMODB_JSON`. * `exportTime` - (Optional, Forces new resource) Time in RFC3339 format from which to export table data. The table export will be a snapshot of the table's state at this point in time. Omitting this value will result in a snapshot from the current time. * `exportType` - (Optional, Forces new resource) Whether to execute as a full export or incremental export. Valid values are: `FULL_EXPORT`, `INCREMENTAL_EXPORT`. Defaults to `FULL_EXPORT`. If `INCREMENTAL_EXPORT` is provided, the `incrementalExportSpecification` argument must also be provided. @@ -170,6 +171,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dynamodb_table_export.example + identity = { + "arn" = "arn:aws:dynamodb:us-west-2:123456789012:table/example-table/export/01234567890123-a1b2c3d4" + } +} + +resource "aws_dynamodb_table_export" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DynamoDB table export. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB table exports using the `arn`. For example: ```typescript @@ -200,4 +222,4 @@ Using `terraform import`, import DynamoDB table exports using the `arn`. For exa % terraform import aws_dynamodb_table_export.example arn:aws:dynamodb:us-west-2:12345678911:table/my-table-1/export/01580735656614-2c2f422e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown index d2864e24d5b3..ce051163304b 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hashKey` - (Required) Hash key to use for lookups and identification of the item * `item` - (Required) JSON representation of a map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. * `rangeKey` - (Optional) Range key to use for lookups and identification of the item. Required if there is range key defined in the table. @@ -77,4 +78,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import DynamoDB table items. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_table_replica.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_table_replica.html.markdown index 2e3632c66fae..8a91d5d77633 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_table_replica.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_table_replica.html.markdown @@ -86,6 +86,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `kmsKeyArn` - (Optional, Forces new resource) ARN of the CMK that should be used for the AWS KMS encryption. This argument should only be used if the key is different from the default KMS-managed DynamoDB key, `alias/aws/dynamodb`. **Note:** This attribute will _not_ be populated with the ARN of _default_ keys. * `deletionProtectionEnabled` - (Optional) Whether deletion protection is enabled (true) or disabled (false) on the table replica. * `pointInTimeRecovery` - (Optional) Whether to enable Point In Time Recovery for the table replica. Default is `false`. @@ -144,4 +145,4 @@ Using `terraform import`, import DynamoDB table replicas using the `table-name:m % terraform import aws_dynamodb_table_replica.example TestTable:us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_tag.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_tag.html.markdown index bf2c8546e82c..1817491b0447 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_tag.html.markdown @@ -61,7 +61,7 @@ class MyConvertedCode extends TerraformStack { resourceArn: Token.asString( Fn.replace( example.arn, - Token.asString(current.name), + Token.asString(current.region), Token.asString(dataAwsRegionReplica.name) ) ), @@ -76,6 +76,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) Amazon Resource Name (ARN) of the DynamoDB resource to tag. * `key` - (Required) Tag name. * `value` - (Required) Tag value. @@ -118,4 +119,4 @@ Using `terraform import`, import `aws_dynamodb_tag` using the DynamoDB resource % terraform import aws_dynamodb_tag.example arn:aws:dynamodb:us-east-1:123456789012:table/example,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_default_kms_key.html.markdown b/website/docs/cdktf/typescript/r/ebs_default_kms_key.html.markdown index 63cc195d09ec..50037dd57948 100644 --- a/website/docs/cdktf/typescript/r/ebs_default_kms_key.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_default_kms_key.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyArn` - (Required, ForceNew) The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use to encrypt the EBS volume. ## Attribute Reference @@ -83,4 +84,4 @@ Using `terraform import`, import the EBS default KMS CMK using the KMS key ARN. % terraform import aws_ebs_default_kms_key.example arn:aws:kms:us-east-1:123456789012:key/abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_encryption_by_default.html.markdown b/website/docs/cdktf/typescript/r/ebs_encryption_by_default.html.markdown index 10a3561e6a20..4f95f2031c1e 100644 --- a/website/docs/cdktf/typescript/r/ebs_encryption_by_default.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_encryption_by_default.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether or not default EBS encryption is enabled. Valid values are `true` or `false`. Defaults to `true`. ## Attribute Reference @@ -74,4 +75,4 @@ Using `terraform import`, import the default EBS encryption state. For example: % terraform import aws_ebs_encryption_by_default.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_fast_snapshot_restore.html.markdown b/website/docs/cdktf/typescript/r/ebs_fast_snapshot_restore.html.markdown index 47d2b700f779..e01fa6b5d6b1 100644 --- a/website/docs/cdktf/typescript/r/ebs_fast_snapshot_restore.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_fast_snapshot_restore.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Required) Availability zone in which to enable fast snapshot restores. * `snapshotId` - (Required) ID of the snapshot. @@ -60,7 +61,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `availabilityZone` and `snapshotId` separated by `,`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -84,10 +85,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `id`. For example: +Using `terraform import`, import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `availabilityZone` and `snapshotId` separated by `,`. For example: ```console % terraform import aws_ebs_fast_snapshot_restore.example us-west-2a,snap-abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_snapshot.html.markdown b/website/docs/cdktf/typescript/r/ebs_snapshot.html.markdown index 591ddf756fb3..18b45d880487 100644 --- a/website/docs/cdktf/typescript/r/ebs_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_snapshot.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `volumeId` - (Required) The Volume ID of which to make a snapshot. * `description` - (Optional) A description of what the snapshot is. * `outpostArn` - (Optional) The Amazon Resource Name (ARN) of the Outpost on which to create a local snapshot. @@ -106,4 +107,4 @@ Using `terraform import`, import EBS Snapshot using the `id`. For example: % terraform import aws_ebs_snapshot.id snap-049df61146c4d7901 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_snapshot_block_public_access.html.markdown b/website/docs/cdktf/typescript/r/ebs_snapshot_block_public_access.html.markdown index c1acbf4bba48..6f6199054e39 100644 --- a/website/docs/cdktf/typescript/r/ebs_snapshot_block_public_access.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_snapshot_block_public_access.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `state` - (Required) The mode in which to enable "Block public access for snapshots" for the region. Allowed values are `block-all-sharing`, `block-new-sharing`, `unblocked`. ## Attribute Reference @@ -78,4 +79,4 @@ Using `terraform import`, import the state. For example: % terraform import aws_ebs_snapshot_block_public_access.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_snapshot_copy.html.markdown b/website/docs/cdktf/typescript/r/ebs_snapshot_copy.html.markdown index bfe713de10d6..5351e220da72 100644 --- a/website/docs/cdktf/typescript/r/ebs_snapshot_copy.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_snapshot_copy.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of what the snapshot is. * `encrypted` - Whether the snapshot is encrypted. * `kmsKeyId` - The ARN for the KMS encryption key. @@ -65,7 +66,7 @@ This resource supports the following arguments: * `storageTier` - (Optional) The name of the storage tier. Valid values are `archive` and `standard`. Default value is `standard`. * `permanentRestore` - (Optional) Indicates whether to permanently restore an archived snapshot. * `temporaryRestoreDays` - (Optional) Specifies the number of days for which to temporarily restore an archived snapshot. Required for temporary restores only. The snapshot will be automatically re-archived after this period. -* `completion_duration_minutes` - (Optional) Specifies a completion duration to initiate a time-based snapshot copy. Time-based snapshot copy operations complete within the specified duration. Value must be between 15 and 2880 minutes, in 15 minute increments only. +* `completionDurationMinutes` - (Optional) Specifies a completion duration to initiate a time-based snapshot copy. Time-based snapshot copy operations complete within the specified duration. Value must be between 15 and 2880 minutes, in 15 minute increments only. * `tags` - A map of tags for the snapshot. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -87,4 +88,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_snapshot_import.html.markdown b/website/docs/cdktf/typescript/r/ebs_snapshot_import.html.markdown index 50f0e85f5bf4..ac2b4744a917 100644 --- a/website/docs/cdktf/typescript/r/ebs_snapshot_import.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_snapshot_import.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientData` - (Optional) The client-specific data. Detailed below. * `description` - (Optional) The description string for the import snapshot task. * `diskContainer` - (Required) Information about the disk container. Detailed below. @@ -97,4 +98,4 @@ This resource exports the following attributes in addition to the arguments abov * `dataEncryptionKeyId` - The data encryption key identifier for the snapshot. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ebs_volume.html.markdown b/website/docs/cdktf/typescript/r/ebs_volume.html.markdown index 117ea5bbfef7..344111e51c60 100644 --- a/website/docs/cdktf/typescript/r/ebs_volume.html.markdown +++ b/website/docs/cdktf/typescript/r/ebs_volume.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Required) Availability zone where the EBS volume will exist. * `encrypted` - (Optional) If true, the disk will be encrypted. * `finalSnapshot` - (Optional) If true, snapshot will be created before volume deletion. Any tags on the volume will be migrated to the snapshot. By default set to false @@ -54,6 +55,7 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughput` - (Optional) Throughput that the volume supports, in MiB/s. Only valid for `type` of `gp3`. * `type` - (Optional) Type of EBS volume. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `gp2`). +* `volumeInitializationRate` - (Optional) EBS provisioned rate for volume initialization, in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. This argument can only be set if `snapshotId` is specified. ~> **NOTE:** At least one of `size` or `snapshotId` is required. @@ -104,4 +106,4 @@ Using `terraform import`, import EBS Volumes using the `id`. For example: % terraform import aws_ebs_volume.id vol-049df61146c4d7901 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_availability_zone_group.html.markdown b/website/docs/cdktf/typescript/r/ec2_availability_zone_group.html.markdown index dbd27d0c235b..e463fa61bae2 100644 --- a/website/docs/cdktf/typescript/r/ec2_availability_zone_group.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_availability_zone_group.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupName` - (Required) Name of the Availability Zone Group. * `optInStatus` - (Required) Indicates whether to enable or disable Availability Zone Group. Valid values: `opted-in` or `not-opted-in`. @@ -82,4 +83,4 @@ Using `terraform import`, import EC2 Availability Zone Groups using the group na % terraform import aws_ec2_availability_zone_group.example us-west-2-lax-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown b/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown index 1888438564f1..f5d6a119ae2c 100644 --- a/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityBlockOfferingId` - (Required) The Capacity Block Reservation ID. * `instancePlatform` - (Required) The type of operating system for which to reserve capacity. Valid options are `Linux/UNIX`, `Red Hat Enterprise Linux`, `SUSE Linux`, `Windows`, `Windows with SQL Server`, `Windows with SQL Server Enterprise`, `Windows with SQL Server Standard` or `Windows with SQL Server Web`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -78,4 +79,4 @@ This resource exports the following attributes in addition to the arguments abov * `tenancy` - Indicates the tenancy of the Capacity Block Reservation. Specify either `default` or `dedicated`. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown b/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown index 2ca2d3b82f46..f59fbb70a71f 100644 --- a/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Required) The Availability Zone in which to create the Capacity Reservation. * `ebsOptimized` - (Optional) Indicates whether the Capacity Reservation supports EBS-optimized instances. * `endDate` - (Optional) The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -104,4 +105,4 @@ Using `terraform import`, import Capacity Reservations using the `id`. For examp % terraform import aws_ec2_capacity_reservation.web cr-0123456789abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_carrier_gateway.html.markdown b/website/docs/cdktf/typescript/r/ec2_carrier_gateway.html.markdown index c56c60470a36..c4b9538e5127 100644 --- a/website/docs/cdktf/typescript/r/ec2_carrier_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_carrier_gateway.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpcId` - (Required) The ID of the VPC to associate with the carrier gateway. @@ -81,4 +82,4 @@ Using `terraform import`, import `aws_ec2_carrier_gateway` using the carrier gat % terraform import aws_ec2_carrier_gateway.example cgw-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_client_vpn_authorization_rule.html.markdown b/website/docs/cdktf/typescript/r/ec2_client_vpn_authorization_rule.html.markdown index 2321ed8c629a..83e08d05fa2f 100644 --- a/website/docs/cdktf/typescript/r/ec2_client_vpn_authorization_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_client_vpn_authorization_rule.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientVpnEndpointId` - (Required) The ID of the Client VPN endpoint. * `targetNetworkCidr` - (Required) The IPv4 address range, in CIDR notation, of the network to which the authorization rule applies. * `accessGroupId` - (Optional) The ID of the group to which the authorization rule grants access. One of `accessGroupId` or `authorizeAllGroups` must be set. @@ -124,4 +125,4 @@ Using the endpoint ID, target network CIDR, and group name: % terraform import aws_ec2_client_vpn_authorization_rule.example cvpn-endpoint-0ac3a1abbccddd666,10.1.0.0/24,team-a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_client_vpn_endpoint.html.markdown b/website/docs/cdktf/typescript/r/ec2_client_vpn_endpoint.html.markdown index b8066cdfe2cc..17cb549e13b2 100644 --- a/website/docs/cdktf/typescript/r/ec2_client_vpn_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_client_vpn_endpoint.html.markdown @@ -52,21 +52,24 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationOptions` - (Required) Information about the authentication method to be used to authenticate clients. -* `clientCidrBlock` - (Required) The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. The CIDR block should be /22 or greater. +* `clientCidrBlock` - (Optional) The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. The CIDR block should be /22 or greater. When `trafficIpAddressType` is set to `ipv6`, it must not be specified. Otherwise, it is required. * `clientConnectOptions` - (Optional) The options for managing connection authorization for new client connections. * `clientLoginBannerOptions` - (Optional) Options for enabling a customizable text banner that will be displayed on AWS provided clients when a VPN session is established. -* `client_route_enforcement_options` - (Optional) Options for enforce administrator defined routes on devices connected through the VPN. +* `clientRouteEnforcementOptions` - (Optional) Options for enforce administrator defined routes on devices connected through the VPN. * `connectionLogOptions` - (Required) Information about the client connection logging options. * `description` - (Optional) A brief description of the Client VPN endpoint. * `disconnectOnSessionTimeout` - (Optional) Indicates whether the client VPN session is disconnected after the maximum `sessionTimeoutHours` is reached. If `true`, users are prompted to reconnect client VPN. If `false`, client VPN attempts to reconnect automatically. The default value is `false`. * `dnsServers` - (Optional) Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address of the connecting device is used. +* `endpointIpAddressType` - (Optional) IP address type for the Client VPN endpoint. Valid values are `ipv4`, `ipv6`, or `dual-stack`. Defaults to `ipv4`. * `securityGroupIds` - (Optional) The IDs of one or more security groups to apply to the target network. You must also specify the ID of the VPC that contains the security groups. * `selfServicePortal` - (Optional) Specify whether to enable the self-service portal for the Client VPN endpoint. Values can be `enabled` or `disabled`. Default value is `disabled`. * `serverCertificateArn` - (Required) The ARN of the ACM server certificate. * `sessionTimeoutHours` - (Optional) The maximum session duration is a trigger by which end-users are required to re-authenticate prior to establishing a VPN session. Default value is `24` - Valid values: `8 | 10 | 12 | 24` * `splitTunnel` - (Optional) Indicates whether split-tunnel is enabled on VPN endpoint. Default value is `false`. * `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `trafficIpAddressType` - (Optional) IP address type for traffic within the Client VPN tunnel. Valid values are `ipv4`, `ipv6`, or `dual-stack`. Defaults to `ipv4`. When it is set to `ipv6`, `clientCidrBlock` must not be specified. * `transportProtocol` - (Optional) The transport protocol to be used by the VPN session. Default value is `udp`. * `vpcId` - (Optional) The ID of the VPC to associate with the Client VPN endpoint. If no security group IDs are specified in the request, the default security group for the VPC is applied. * `vpnPort` - (Optional) The port number for the Client VPN endpoint. Valid values are `443` and `1194`. Default value is `443`. @@ -91,7 +94,7 @@ One of the following arguments must be supplied: * `bannerText` - (Optional) Customizable text that will be displayed in a banner on AWS provided clients when a VPN session is established. UTF-8 encoded characters only. Maximum of 1400 characters. * `enabled` - (Optional) Enable or disable a customizable text banner that will be displayed on AWS provided clients when a VPN session is established. The default is `false` (not enabled). -### `client_route_enforcement_options` Argument reference +### `clientRouteEnforcementOptions` Argument reference * `enforced` - (Optional) Enable or disable Client Route Enforcement. The default is `false` (not enabled). @@ -145,4 +148,4 @@ Using `terraform import`, import AWS Client VPN endpoints using the `id` value f % terraform import aws_ec2_client_vpn_endpoint.example cvpn-endpoint-0ac3a1abbccddd666 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_client_vpn_network_association.html.markdown b/website/docs/cdktf/typescript/r/ec2_client_vpn_network_association.html.markdown index 2debd8dd3511..f3d4e49e93de 100644 --- a/website/docs/cdktf/typescript/r/ec2_client_vpn_network_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_client_vpn_network_association.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientVpnEndpointId` - (Required) The ID of the Client VPN endpoint. * `subnetId` - (Required) The ID of the subnet to associate with the Client VPN endpoint. @@ -90,4 +91,4 @@ Using `terraform import`, import AWS Client VPN network associations using the e % terraform import aws_ec2_client_vpn_network_association.example cvpn-endpoint-0ac3a1abbccddd666,cvpn-assoc-0b8db902465d069ad ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_client_vpn_route.html.markdown b/website/docs/cdktf/typescript/r/ec2_client_vpn_route.html.markdown index 856e2b4d5dc0..68808a5a3290 100644 --- a/website/docs/cdktf/typescript/r/ec2_client_vpn_route.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_client_vpn_route.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientVpnEndpointId` - (Required) The ID of the Client VPN endpoint. * `destinationCidrBlock` - (Required) The IPv4 address range, in CIDR notation, of the route destination. * `description` - (Optional) A brief description of the route. @@ -124,4 +125,4 @@ Using `terraform import`, import AWS Client VPN routes using the endpoint ID, ta % terraform import aws_ec2_client_vpn_route.example cvpn-endpoint-1234567890abcdef,subnet-9876543210fedcba,10.1.0.0/24 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_default_credit_specification.html.markdown b/website/docs/cdktf/typescript/r/ec2_default_credit_specification.html.markdown index 5ef055a7306d..07135273d4ab 100644 --- a/website/docs/cdktf/typescript/r/ec2_default_credit_specification.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_default_credit_specification.html.markdown @@ -23,13 +23,13 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { Ec2DefaultCreditSpecification } from "./.gen/providers/aws/"; +import { Ec2DefaultCreditSpecification } from "./.gen/providers/aws/ec2-default-credit-specification"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new Ec2DefaultCreditSpecification(this, "example", { - cpu_credits: "standard", - instance_family: "t2", + cpuCredits: "standard", + instanceFamily: "t2", }); } } @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cpuCredits` - (Required) Credit option for CPU usage of the instance family. Valid values: `standard`, `unlimited`. * `instanceFamily` - (Required) Instance family. Valid values are `t2`, `t3`, `t3a`, `t4g`. @@ -66,7 +67,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { Ec2DefaultCreditSpecification } from "./.gen/providers/aws/"; +import { Ec2DefaultCreditSpecification } from "./.gen/providers/aws/ec2-default-credit-specification"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -85,4 +86,4 @@ Using `terraform import`, import EC2 (Elastic Compute Cloud) Default Credit Spec ```console % terraform import aws_ec2_default_credit_specification.example t2 - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown b/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown index 392b453d7ffc..5d6187efee15 100644 --- a/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `context` - (Optional) Reserved. * `excessCapacityTerminationPolicy` - (Optional) Whether running instances should be terminated if the total target capacity of the EC2 Fleet is decreased below the current size of the EC2. Valid values: `no-termination`, `termination`. Defaults to `termination`. Supported only for fleets of type `maintain`. * `launchTemplateConfig` - (Required) Nested argument containing EC2 Launch Template configurations. Defined below. @@ -290,4 +291,4 @@ Using `terraform import`, import `aws_ec2_fleet` using the Fleet identifier. For % terraform import aws_ec2_fleet.example fleet-b9b55d27-c5fc-41ac-a6f3-48fcc91f080c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_host.html.markdown b/website/docs/cdktf/typescript/r/ec2_host.html.markdown index d616a208c743..5c118209122b 100644 --- a/website/docs/cdktf/typescript/r/ec2_host.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_host.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `assetId` - (Optional) The ID of the Outpost hardware asset on which to allocate the Dedicated Hosts. This parameter is supported only if you specify OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this parameter. * `autoPlacement` - (Optional) Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. Valid values: `on`, `off`. Default: `on`. * `availabilityZone` - (Required) The Availability Zone in which to allocate the Dedicated Host. @@ -95,4 +96,4 @@ Using `terraform import`, import hosts using the host `id`. For example: % terraform import aws_ec2_host.example h-0385a99d0e4b20cbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_instance_connect_endpoint.html.markdown b/website/docs/cdktf/typescript/r/ec2_instance_connect_endpoint.html.markdown index ec30d68ec29c..8d0ffab573c2 100644 --- a/website/docs/cdktf/typescript/r/ec2_instance_connect_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_instance_connect_endpoint.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `preserveClientIp` - (Optional) Indicates whether your client's IP address is preserved as the source. Default: `true`. * `securityGroupIds` - (Optional) One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for the VPC will be associated with the endpoint. * `subnetId` - (Required) The ID of the subnet in which to create the EC2 Instance Connect Endpoint. @@ -95,4 +96,4 @@ Using `terraform import`, import EC2 Instance Connect Endpoints using the `id`. % terraform import aws_ec2_instance_connect_endpoint.example eice-012345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_instance_metadata_defaults.html.markdown b/website/docs/cdktf/typescript/r/ec2_instance_metadata_defaults.html.markdown index dbecc0455417..548e4a2cc3b3 100644 --- a/website/docs/cdktf/typescript/r/ec2_instance_metadata_defaults.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_instance_metadata_defaults.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `httpEndpoint` - (Optional) Whether the metadata service is available. Can be `"enabled"`, `"disabled"`, or `"no-preference"`. Default: `"no-preference"`. * `httpTokens` - (Optional) Whether the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2 (IMDSv2)_. Can be `"optional"`, `"required"`, or `"no-preference"`. Default: `"no-preference"`. * `httpPutResponseHopLimit` - (Optional) The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from `1` to `64`, or `-1` to indicate no preference. Default: `-1`. @@ -53,4 +54,4 @@ This data source exports no additional attributes. You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_instance_state.html.markdown b/website/docs/cdktf/typescript/r/ec2_instance_state.html.markdown index 5faee798065e..493e606fe5eb 100644 --- a/website/docs/cdktf/typescript/r/ec2_instance_state.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_instance_state.html.markdown @@ -71,6 +71,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `force` - (Optional) Whether to request a forced stop when `state` is `stopped`. Otherwise (_i.e._, `state` is `running`), ignored. When an instance is forced to stop, it does not flush file system caches or file system metadata, and you must subsequently perform file system check and repair. Not recommended for Windows instances. Defaults to `false`. ## Attribute Reference @@ -119,4 +120,4 @@ Using `terraform import`, import `aws_ec2_instance_state` using the `instanceId` % terraform import aws_ec2_instance_state.test i-02cae6557dfcf2f96 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_local_gateway_route.html.markdown b/website/docs/cdktf/typescript/r/ec2_local_gateway_route.html.markdown index c8186c1b0025..2b7c89abe572 100644 --- a/website/docs/cdktf/typescript/r/ec2_local_gateway_route.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_local_gateway_route.html.markdown @@ -42,8 +42,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinationCidrBlock` - (Required) IPv4 CIDR range used for destination matches. Routing decisions are based on the most specific match. * `localGatewayRouteTableId` - (Required) Identifier of EC2 Local Gateway Route Table. * `localGatewayVirtualInterfaceGroupId` - (Required) Identifier of EC2 Local Gateway Virtual Interface Group. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_ec2_local_gateway_route` using the EC2 Loc % terraform import aws_ec2_local_gateway_route.example lgw-rtb-12345678_172.16.0.0/16 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_local_gateway_route_table_vpc_association.html.markdown b/website/docs/cdktf/typescript/r/ec2_local_gateway_route_table_vpc_association.html.markdown index c7bae8d17e04..89b08bbaf376 100644 --- a/website/docs/cdktf/typescript/r/ec2_local_gateway_route_table_vpc_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_local_gateway_route_table_vpc_association.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -104,4 +105,4 @@ Using `terraform import`, import `aws_ec2_local_gateway_route_table_vpc_associat % terraform import aws_ec2_local_gateway_route_table_vpc_association.example lgw-vpc-assoc-1234567890abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_managed_prefix_list.html.markdown b/website/docs/cdktf/typescript/r/ec2_managed_prefix_list.html.markdown index 01e560dc2d7b..7c40d8e0512f 100644 --- a/website/docs/cdktf/typescript/r/ec2_managed_prefix_list.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_managed_prefix_list.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required, Forces new resource) Address family (`IPv4` or `IPv6`) of this prefix list. * `entry` - (Optional) Configuration block for prefix list entry. Detailed below. Different entries may have overlapping CIDR blocks, but a particular CIDR should not be duplicated. * `maxEntries` - (Required) Maximum number of entries that this prefix list can contain. @@ -120,4 +121,4 @@ Using `terraform import`, import Prefix Lists using the `id`. For example: % terraform import aws_ec2_managed_prefix_list.default pl-0570a1d2d725c16be ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_managed_prefix_list_entry.html.markdown b/website/docs/cdktf/typescript/r/ec2_managed_prefix_list_entry.html.markdown index d2511ccde9c2..93dc9a43606f 100644 --- a/website/docs/cdktf/typescript/r/ec2_managed_prefix_list_entry.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_managed_prefix_list_entry.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr` - (Required) CIDR block of this entry. * `description` - (Optional) Description of this entry. Please note that due to API limitations, updating only the description of an entry will require recreating the entry. * `prefixListId` - (Required) The ID of the prefix list. @@ -97,4 +98,4 @@ Using `terraform import`, import prefix list entries using `prefixListId` and `c % terraform import aws_ec2_managed_prefix_list_entry.default pl-0570a1d2d725c16be,10.0.3.0/24 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_network_insights_analysis.html.markdown b/website/docs/cdktf/typescript/r/ec2_network_insights_analysis.html.markdown index 61b693447e39..2a885042be6b 100644 --- a/website/docs/cdktf/typescript/r/ec2_network_insights_analysis.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_network_insights_analysis.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filterInArns` - (Optional) A list of ARNs for resources the path must traverse. * `waitForCompletion` - (Optional) If enabled, the resource will wait for the Network Insights Analysis status to change to `succeeded` or `failed`. Setting this to `false` will skip the process. Default: `true`. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -106,4 +107,4 @@ Using `terraform import`, import Network Insights Analyzes using the `id`. For e % terraform import aws_ec2_network_insights_analysis.test nia-0462085c957f11a55 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown b/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown index 339ea50aa308..b0d625e8d6f9 100644 --- a/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceIp` - (Optional) IP address of the source resource. * `destination` - (Optional) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. Either the `destination` argument or the `destinationAddress` argument in the `filterAtSource` block must be specified. * `destinationIp` - (Optional) IP address of the destination resource. @@ -107,4 +108,4 @@ Using `terraform import`, import Network Insights Paths using the `id`. For exam % terraform import aws_ec2_network_insights_path.test nip-00edfba169923aefd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_serial_console_access.html.markdown b/website/docs/cdktf/typescript/r/ec2_serial_console_access.html.markdown index c539cbc3f028..2cc8695e5f39 100644 --- a/website/docs/cdktf/typescript/r/ec2_serial_console_access.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_serial_console_access.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether or not serial console access is enabled. Valid values are `true` or `false`. Defaults to `true`. ## Attribute Reference @@ -74,4 +75,4 @@ Using `terraform import`, import serial console access state. For example: % terraform import aws_ec2_serial_console_access.example default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_subnet_cidr_reservation.html.markdown b/website/docs/cdktf/typescript/r/ec2_subnet_cidr_reservation.html.markdown index 84208ccefcc8..e80a809f3784 100644 --- a/website/docs/cdktf/typescript/r/ec2_subnet_cidr_reservation.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_subnet_cidr_reservation.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrBlock` - (Required) The CIDR block for the reservation. * `reservationType` - (Required) The type of reservation to create. Valid values: `explicit`, `prefix` * `subnetId` - (Required) The ID of the subnet to create the reservation for. @@ -84,4 +85,4 @@ Using `terraform import`, import Existing CIDR reservations using `SUBNET_ID:RES % terraform import aws_ec2_subnet_cidr_reservation.example subnet-01llsxvsxabqiymcz:scr-4mnvz6wb7otksjcs9 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_tag.html.markdown b/website/docs/cdktf/typescript/r/ec2_tag.html.markdown index a05b2069d1f5..ec214f42c465 100644 --- a/website/docs/cdktf/typescript/r/ec2_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_tag.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) The ID of the EC2 resource to manage the tag for. * `key` - (Required) The tag name. * `value` - (Required) The value of the tag. @@ -112,4 +113,4 @@ Using `terraform import`, import `aws_ec2_tag` using the EC2 resource identifier % terraform import aws_ec2_tag.example tgw-attach-1234567890abcdef,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter.html.markdown b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter.html.markdown index c0b67362cb17..6379350b878d 100644 --- a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) A description of the filter. * `networkServices` - (Optional) List of amazon network services that should be mirrored. Valid values: `amazon-dns`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -86,4 +87,4 @@ Using `terraform import`, import traffic mirror filter using the `id`. For examp % terraform import aws_ec2_traffic_mirror_filter.foo tmf-0fbb93ddf38198f64 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter_rule.html.markdown b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter_rule.html.markdown index 1fd52abea770..bc7892456eaa 100644 --- a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_filter_rule.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the traffic mirror filter rule. * `trafficMirrorFilterId` - (Required) ID of the traffic mirror filter to which this rule should be added * `destinationCidrBlock` - (Required) Destination CIDR block to assign to the Traffic Mirror rule. @@ -125,4 +126,4 @@ Using `terraform import`, import traffic mirror rules using the `trafficMirrorFi % terraform import aws_ec2_traffic_mirror_filter_rule.rule tmf-0fbb93ddf38198f64:tmfr-05a458f06445d0aee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_session.html.markdown b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_session.html.markdown index 6422c9ca9cf5..8b3d16e6860d 100644 --- a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_session.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_session.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of the traffic mirror session. * `networkInterfaceId` - (Required, Forces new) ID of the source network interface. Not all network interfaces are eligible as mirror sources. On EC2 instances only nitro based instances support mirroring. * `trafficMirrorFilterId` - (Required) ID of the traffic mirror filter to be used @@ -104,4 +105,4 @@ Using `terraform import`, import traffic mirror sessions using the `id`. For exa % terraform import aws_ec2_traffic_mirror_session.session tms-0d8aa3ca35897b82e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_target.html.markdown b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_target.html.markdown index 2bdc130fc1bf..842c79babf63 100644 --- a/website/docs/cdktf/typescript/r/ec2_traffic_mirror_target.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_traffic_mirror_target.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new) A description of the traffic mirror session. * `networkInterfaceId` - (Optional, Forces new) The network interface ID that is associated with the target. * `networkLoadBalancerArn` - (Optional, Forces new) The Amazon Resource Name (ARN) of the Network Load Balancer that is associated with the target. @@ -99,4 +100,4 @@ Using `terraform import`, import traffic mirror targets using the `id`. For exam % terraform import aws_ec2_traffic_mirror_target.target tmt-0c13a005422b86606 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway.html.markdown index ccbeef69f6ca..dfc96fe4f010 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amazonSideAsn` - (Optional) Private Autonomous System Number (ASN) for the Amazon side of a BGP session. The range is `64512` to `65534` for 16-bit ASNs and `4200000000` to `4294967294` for 32-bit ASNs. Default value: `64512`. -> **NOTE:** Modifying `amazonSideAsn` on a Transit Gateway with active BGP sessions is [not allowed](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyTransitGatewayOptions.html). You must first delete all Transit Gateway attachments that have BGP configured prior to modifying `amazonSideAsn`. @@ -100,4 +101,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway` using the EC2 Transit % terraform import aws_ec2_transit_gateway.example tgw-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect.html.markdown index dae24a198b4e..eae646a35634 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `protocol` - (Optional) The tunnel protocol. Valid values: `gre`. Default is `gre`. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Connect. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transitGatewayDefaultRouteTableAssociation` - (Optional) Boolean whether the Connect should be associated with the EC2 Transit Gateway association default route table. This cannot be configured or perform drift detection with Resource Access Manager shared EC2 Transit Gateways. Default value: `true`. @@ -99,4 +100,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_connect` using the EC2 % terraform import aws_ec2_transit_gateway_connect.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect_peer.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect_peer.html.markdown index c8332f7205df..7508bb337f54 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect_peer.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_connect_peer.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bgpAsn` - (Optional) The BGP ASN number assigned customer device. If not provided, it will use the same BGP ASN as is associated with Transit Gateway. * `insideCidrBlocks` - (Required) The CIDR block that will be used for addressing within the tunnel. It must contain exactly one IPv4 CIDR block and up to one IPv6 CIDR block. The IPv4 CIDR block must be /29 size and must be within 169.254.0.0/16 range, with exception of: 169.254.0.0/29, 169.254.1.0/29, 169.254.2.0/29, 169.254.3.0/29, 169.254.4.0/29, 169.254.5.0/29, 169.254.169.248/29. The IPv6 CIDR block must be /125 size and must be within fd00::/8. The first IP from each CIDR block is assigned for customer gateway, the second and third is for Transit Gateway (An example: from range 169.254.100.0/29, .1 is assigned to customer gateway and .2 and .3 are assigned to Transit Gateway) * `peerAddress` - (Required) The IP addressed assigned to customer device, which will be used as tunnel endpoint. It can be IPv4 or IPv6 address, but must be the same address family as `transitGatewayAddress` @@ -106,4 +107,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_connect_peer` using th % terraform import aws_ec2_transit_gateway_connect_peer.example tgw-connect-peer-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_association.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_association.html.markdown index e85dd9e8ae12..b6d371de81e2 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_association.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Required) ID of the Transit Gateway to change the default association route table on. * `transitGatewayRouteTableId` - (Required) ID of the Transit Gateway Route Table to be made the default association route table. @@ -57,4 +58,4 @@ This resource exports no additional attributes. * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_propagation.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_propagation.html.markdown index 293f34c850b2..e5dbdda6fc4a 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_propagation.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_default_route_table_propagation.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Required) ID of the Transit Gateway to change the default association route table on. * `transitGatewayRouteTableId` - (Required) ID of the Transit Gateway Route Table to be made the default association route table. @@ -57,4 +58,4 @@ This resource exports no additional attributes. * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain.html.markdown index 204eb115a992..85d463a96093 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain.html.markdown @@ -167,6 +167,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Required) EC2 Transit Gateway identifier. The EC2 Transit Gateway must have `multicastSupport` enabled. * `autoAcceptSharedAssociations` - (Optional) Whether to automatically accept cross-account subnet associations that are associated with the EC2 Transit Gateway Multicast Domain. Valid values: `disable`, `enable`. Default value: `disable`. * `igmpv2Support` - (Optional) Whether to enable Internet Group Management Protocol (IGMP) version 2 for the EC2 Transit Gateway Multicast Domain. Valid values: `disable`, `enable`. Default value: `disable`. @@ -221,4 +222,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_multicast_domain` usin % terraform import aws_ec2_transit_gateway_multicast_domain.example tgw-mcast-domain-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain_association.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain_association.html.markdown index 66554522bdfc..09a58f31b89b 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_domain_association.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnetId` - (Required) The ID of the subnet to associate with the transit gateway multicast domain. * `transitGatewayAttachmentId` - (Required) The ID of the transit gateway attachment. * `transitGatewayMulticastDomainId` - (Required) The ID of the transit gateway multicast domain. @@ -86,4 +87,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_member.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_member.html.markdown index cf0ceda246e4..71093d53ad16 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_member.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_member.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupIpAddress` - (Required) The IP address assigned to the transit gateway multicast group. * `networkInterfaceId` - (Required) The group members' network interface ID to register with the transit gateway multicast group. * `transitGatewayMulticastDomainId` - (Required) The ID of the transit gateway multicast domain. @@ -53,4 +54,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - EC2 Transit Gateway Multicast Group Member identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_source.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_source.html.markdown index b96cd36bbf26..21cde8f66751 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_source.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_multicast_group_source.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupIpAddress` - (Required) The IP address assigned to the transit gateway multicast group. * `networkInterfaceId` - (Required) The group members' network interface ID to register with the transit gateway multicast group. * `transitGatewayMulticastDomainId` - (Required) The ID of the transit gateway multicast domain. @@ -53,4 +54,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - EC2 Transit Gateway Multicast Group Member identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown index 71c7f5610615..6ca9151af13b 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown @@ -75,6 +75,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `peerAccountId` - (Optional) Account ID of EC2 Transit Gateway to peer with. Defaults to the account ID the [AWS provider][1] is currently connected to. * `peerRegion` - (Required) Region of EC2 Transit Gateway to peer with. * `peerTransitGatewayId` - (Required) Identifier of EC2 Transit Gateway to peer with. @@ -130,4 +131,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_peering_attachment` us [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown index f96751abffd1..a0a107361536 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment_accepter.html.markdown @@ -45,6 +45,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayAttachmentId` - (Required) The ID of the EC2 Transit Gateway Peering Attachment to manage. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Peering Attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -90,4 +91,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_peering_attachment_acc % terraform import aws_ec2_transit_gateway_peering_attachment_accepter.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table.html.markdown index 13654a173356..805503f528a5 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Required) EC2 Transit Gateway identifier. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Policy Table. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -85,4 +86,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_policy_table` using th % terraform import aws_ec2_transit_gateway_policy_table.example tgw-rtb-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table_association.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table_association.html.markdown index 4f80b03da28c..eeb004426610 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_policy_table_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayAttachmentId` - (Required) Identifier of EC2 Transit Gateway Attachment. * `transitGatewayPolicyTableId` - (Required) Identifier of EC2 Transit Gateway Policy Table. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_policy_table_associati % terraform import aws_ec2_transit_gateway_policy_table_association.example tgw-rtb-12345678_tgw-attach-87654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_prefix_list_reference.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_prefix_list_reference.html.markdown index 4c6055b56016..e7ce96dcee1b 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_prefix_list_reference.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_prefix_list_reference.html.markdown @@ -77,6 +77,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `blackhole` - (Optional) Indicates whether to drop traffic that matches the Prefix List. Defaults to `false`. * `transitGatewayAttachmentId` - (Optional) Identifier of EC2 Transit Gateway Attachment. @@ -118,4 +119,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_prefix_list_reference` % terraform import aws_ec2_transit_gateway_prefix_list_reference.example tgw-rtb-12345678_pl-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route.html.markdown index 7c6d917cdba5..099258015857 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinationCidrBlock` - (Required) IPv4 or IPv6 RFC1924 CIDR used for destination matches. Routing decisions are based on the most specific match. * `transitGatewayAttachmentId` - (Optional) Identifier of EC2 Transit Gateway Attachment (required if `blackhole` is set to false). * `blackhole` - (Optional) Indicates whether to drop traffic that matches this route (default to `false`). @@ -115,4 +116,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route` using the EC2 T % terraform import aws_ec2_transit_gateway_route.example tgw-rtb-12345678_0.0.0.0/0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table.html.markdown index 0cee01d7a198..b80f8e0fecae 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayId` - (Required) Identifier of EC2 Transit Gateway. * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Route Table. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -83,4 +84,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route_table` using the % terraform import aws_ec2_transit_gateway_route_table.example tgw-rtb-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_association.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_association.html.markdown index 0a9cde346637..2b4d3d9a1f4f 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayAttachmentId` - (Required) Identifier of EC2 Transit Gateway Attachment. * `transitGatewayRouteTableId` - (Required) Identifier of EC2 Transit Gateway Route Table. * `replaceExistingAssociation` - (Optional) Boolean whether the Gateway Attachment should remove any current Route Table association before associating with the specified Route Table. Default value: `false`. This argument is intended for use with EC2 Transit Gateways shared into the current account, otherwise the `transitGatewayDefaultRouteTableAssociation` argument of the `aws_ec2_transit_gateway_vpc_attachment` resource should be used. @@ -87,4 +88,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route_table_associatio % terraform import aws_ec2_transit_gateway_route_table_association.example tgw-rtb-12345678_tgw-attach-87654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_propagation.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_propagation.html.markdown index 0a514f736073..43011f87178e 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_propagation.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_route_table_propagation.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayAttachmentId` - (Required) Identifier of EC2 Transit Gateway Attachment. * `transitGatewayRouteTableId` - (Required) Identifier of EC2 Transit Gateway Route Table. @@ -86,4 +87,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_route_table_propagatio % terraform import aws_ec2_transit_gateway_route_table_propagation.example tgw-rtb-12345678_tgw-attach-87654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment.html.markdown index c0d62d4a0e62..be00a2b9cd5a 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment.html.markdown @@ -42,6 +42,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnetIds` - (Required) Identifiers of EC2 Subnets. * `transitGatewayId` - (Required) Identifier of EC2 Transit Gateway. * `vpcId` - (Required) Identifier of EC2 VPC. @@ -94,4 +95,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_vpc_attachment` using % terraform import aws_ec2_transit_gateway_vpc_attachment.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown index 6f52d66e84dc..a2b4f0b10eba 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_vpc_attachment_accepter.html.markdown @@ -51,6 +51,7 @@ A full example of how to create a Transit Gateway in one AWS account, share it w This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `transitGatewayAttachmentId` - (Required) The ID of the EC2 Transit Gateway Attachment to manage. * `transitGatewayDefaultRouteTableAssociation` - (Optional) Boolean whether the VPC Attachment should be associated with the EC2 Transit Gateway association default route table. Default value: `true`. * `transitGatewayDefaultRouteTablePropagation` - (Optional) Boolean whether the VPC Attachment should propagate routes with the EC2 Transit Gateway propagation default route table. Default value: `true`. @@ -103,4 +104,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_vpc_attachment_accepte % terraform import aws_ec2_transit_gateway_vpc_attachment_accepter.example tgw-attach-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_account_setting.html.markdown b/website/docs/cdktf/typescript/r/ecr_account_setting.html.markdown index 447944326c0d..3056e75c79ba 100644 --- a/website/docs/cdktf/typescript/r/ecr_account_setting.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_account_setting.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the account setting. One of: `BASIC_SCAN_TYPE_VERSION`, `REGISTRY_POLICY_SCOPE`. * `value` - (Required) Setting value that is specified. Valid values are: * If `name` is specified as `BASIC_SCAN_TYPE_VERSION`, one of: `AWS_NATIVE`, `CLAIR`. @@ -107,4 +108,4 @@ Using `terraform import`, import EMR Security Configurations using the account s % terraform import aws_ecr_account_setting.foo BASIC_SCAN_TYPE_VERSION ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_lifecycle_policy.html.markdown b/website/docs/cdktf/typescript/r/ecr_lifecycle_policy.html.markdown index d0512ec4fc2e..9e0db95c5c6a 100644 --- a/website/docs/cdktf/typescript/r/ecr_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_lifecycle_policy.html.markdown @@ -90,6 +90,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository` - (Required) Name of the repository to apply the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. See more details about [Policy Parameters](http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lifecycle_policy_parameters) in the official AWS docs. Consider using the [`aws_ecr_lifecycle_policy_document` data_source](/docs/providers/aws/d/ecr_lifecycle_policy_document.html) to generate/manage the JSON document used for the `policy` argument. @@ -102,6 +103,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_lifecycle_policy.example + identity = { + repository = "tf-example" + } +} + +resource "aws_ecr_lifecycle_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `repository` - (String) Name of the ECR repository. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Lifecycle Policy using the name of the repository. For example: ```typescript @@ -128,4 +155,4 @@ Using `terraform import`, import ECR Lifecycle Policy using the name of the repo % terraform import aws_ecr_lifecycle_policy.example tf-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_pull_through_cache_rule.html.markdown b/website/docs/cdktf/typescript/r/ecr_pull_through_cache_rule.html.markdown index 4c584f9355c6..6be693823fca 100644 --- a/website/docs/cdktf/typescript/r/ecr_pull_through_cache_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_pull_through_cache_rule.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `credentialArn` - (Optional) ARN of the Secret which will be used to authenticate against the registry. * `customRoleArn` - (Optional) The ARN of the IAM role associated with the pull through cache rule. Must be specified if the upstream registry is a cross-account ECR private registry. See [AWS Document - Setting up permissions for cross-account ECR to ECR PTC](https://docs.aws.amazon.com/AmazonECR/latest/userguide/pull-through-cache-private.html). * `ecrRepositoryPrefix` - (Required, Forces new resource) The repository name prefix to use when caching images from the source registry. Use `ROOT` as the prefix to apply a template to all repositories in your registry that don't have an associated pull through cache rule. @@ -88,4 +89,4 @@ Using `terraform import`, import a pull-through cache rule using the `ecrReposit % terraform import aws_ecr_pull_through_cache_rule.example ecr-public ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_registry_policy.html.markdown b/website/docs/cdktf/typescript/r/ecr_registry_policy.html.markdown index 3a8541a593e4..530edafd4a92 100644 --- a/website/docs/cdktf/typescript/r/ecr_registry_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_registry_policy.html.markdown @@ -57,7 +57,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:ecr:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:repository/*", @@ -78,6 +78,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) ## Attribute Reference @@ -114,4 +115,4 @@ Using `terraform import`, import ECR Registry Policy using the registry id. For % terraform import aws_ecr_registry_policy.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_registry_scanning_configuration.html.markdown b/website/docs/cdktf/typescript/r/ecr_registry_scanning_configuration.html.markdown index 63aa58fe2535..8e65a0401015 100644 --- a/website/docs/cdktf/typescript/r/ecr_registry_scanning_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_registry_scanning_configuration.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `scanType` - (Required) the scanning type to set for the registry. Can be either `ENHANCED` or `BASIC`. - `rule` - (Optional) One or multiple blocks specifying scanning rules to determine which repository filters are used and at what frequency scanning will occur. See [below for schema](#rule). @@ -139,4 +140,4 @@ Using `terraform import`, import ECR Scanning Configurations using the `registry % terraform import aws_ecr_registry_scanning_configuration.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_replication_configuration.html.markdown b/website/docs/cdktf/typescript/r/ecr_replication_configuration.html.markdown index b4c1ca352cee..310ba8a70ee9 100644 --- a/website/docs/cdktf/typescript/r/ecr_replication_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_replication_configuration.html.markdown @@ -146,6 +146,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicationConfiguration` - (Required) Replication configuration for a registry. See [Replication Configuration](#replication-configuration). ### Replication Configuration @@ -205,4 +206,4 @@ Using `terraform import`, import ECR Replication Configuration using the `regist % terraform import aws_ecr_replication_configuration.service 012345678912 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_repository.html.markdown b/website/docs/cdktf/typescript/r/ecr_repository.html.markdown index 698149833826..b6db45d5b1b1 100644 --- a/website/docs/cdktf/typescript/r/ecr_repository.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_repository.html.markdown @@ -38,15 +38,50 @@ class MyConvertedCode extends TerraformStack { ``` +### With Image Tag Mutability Exclusion + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { EcrRepository } from "./.gen/providers/aws/ecr-repository"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new EcrRepository(this, "example", { + imageTagMutability: "IMMUTABLE_WITH_EXCLUSION", + imageTagMutabilityExclusionFilter: [ + { + filter: "latest*", + filterType: "WILDCARD", + }, + { + filter: "dev-*", + filterType: "WILDCARD", + }, + ], + name: "example-repo", + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the repository. * `encryptionConfiguration` - (Optional) Encryption configuration for the repository. See [below for schema](#encryption_configuration). * `forceDelete` - (Optional) If `true`, will delete the repository even if it contains images. Defaults to `false`. -* `imageTagMutability` - (Optional) The tag mutability setting for the repository. Must be one of: `MUTABLE` or `IMMUTABLE`. Defaults to `MUTABLE`. +* `imageTagMutability` - (Optional) The tag mutability setting for the repository. Must be one of: `MUTABLE`, `IMMUTABLE`, `IMMUTABLE_WITH_EXCLUSION`, or `MUTABLE_WITH_EXCLUSION`. Defaults to `MUTABLE`. +* `imageTagMutabilityExclusionFilter` - (Optional) Configuration block that defines filters to specify which image tags can override the default tag mutability setting. Only applicable when `imageTagMutability` is set to `IMMUTABLE_WITH_EXCLUSION` or `MUTABLE_WITH_EXCLUSION`. See [below for schema](#image_tag_mutability_exclusion_filter). * `imageScanningConfiguration` - (Optional) Configuration block that defines image scanning configuration for the repository. By default, image scanning must be manually triggered. See the [ECR User Guide](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) for more information about image scanning. * `scanOnPush` - (Required) Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -56,6 +91,11 @@ This resource supports the following arguments: * `encryptionType` - (Optional) The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`. * `kmsKey` - (Optional) The ARN of the KMS key to use when `encryptionType` is `KMS`. If not specified, uses the default AWS managed key for ECR. +### image_tag_mutability_exclusion_filter + +* `filter` - (Required) The filter pattern to use for excluding image tags from the mutability setting. Must contain only letters, numbers, and special characters (._*-). Each filter can be up to 128 characters long and can contain a maximum of 2 wildcards (*). +* `filterType` - (Required) The type of filter to use. Must be `WILDCARD`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -73,6 +113,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_repository.service + identity = { + name = "test-service" + } +} + +resource "aws_ecr_repository" "service" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the ECR repository. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Repositories using the `name`. For example: ```typescript @@ -99,4 +165,4 @@ Using `terraform import`, import ECR Repositories using the `name`. For example: % terraform import aws_ecr_repository.service test-service ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_repository_creation_template.html.markdown b/website/docs/cdktf/typescript/r/ecr_repository_creation_template.html.markdown index 5f010f27ed66..80dea352b46d 100644 --- a/website/docs/cdktf/typescript/r/ecr_repository_creation_template.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_repository_creation_template.html.markdown @@ -87,12 +87,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `prefix` - (Required, Forces new resource) The repository name prefix to match against. Use `ROOT` to match any prefix that doesn't explicitly match another template. * `appliedFor` - (Required) Which features this template applies to. Must contain one or more of `PULL_THROUGH_CACHE` or `REPLICATION`. * `customRoleArn` - (Optional) A custom IAM role to use for repository creation. Required if using repository tags or KMS encryption. * `description` - (Optional) The description for this template. * `encryptionConfiguration` - (Optional) Encryption configuration for any created repositories. See [below for schema](#encryption_configuration). * `imageTagMutability` - (Optional) The tag mutability setting for any created repositories. Must be one of: `MUTABLE` or `IMMUTABLE`. Defaults to `MUTABLE`. +* `imageTagMutabilityExclusionFilter` - (Optional) Configuration block that defines filters to specify which image tags can override the default tag mutability setting. Only applicable when `imageTagMutability` is set to `IMMUTABLE_WITH_EXCLUSION` or `MUTABLE_WITH_EXCLUSION`. See [below for schema](#image_tag_mutability_exclusion_filter). * `lifecyclePolicy` - (Optional) The lifecycle policy document to apply to any created repositories. See more details about [Policy Parameters](http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lifecycle_policy_parameters) in the official AWS docs. Consider using the [`aws_ecr_lifecycle_policy_document` data_source](/docs/providers/aws/d/ecr_lifecycle_policy_document.html) to generate/manage the JSON document used for the `lifecyclePolicy` argument. * `repositoryPolicy` - (Optional) The registry policy document to apply to any created repositories. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `resourceTags` - (Optional) A map of tags to assign to any created repositories. @@ -102,6 +104,11 @@ This resource supports the following arguments: * `encryptionType` - (Optional) The encryption type to use for any created repositories. Valid values are `AES256` or `KMS`. Defaults to `AES256`. * `kmsKey` - (Optional) The ARN of the KMS key to use when `encryptionType` is `KMS`. If not specified, uses the default AWS managed key for ECR. +### image_tag_mutability_exclusion_filter + +* `filter` - (Required) The filter pattern to use for excluding image tags from the mutability setting. Must contain only letters, numbers, and special characters (._*-). Each filter can be up to 128 characters long and can contain a maximum of 2 wildcards (*). +* `filterType` - (Required) The type of filter to use. Must be `WILDCARD`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -140,4 +147,4 @@ Using `terraform import`, import the ECR Repository Creating Templates using the % terraform import aws_ecr_repository_creation_template.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecr_repository_policy.html.markdown b/website/docs/cdktf/typescript/r/ecr_repository_policy.html.markdown index 0fc1d2d0f973..0876a0bebdee 100644 --- a/website/docs/cdktf/typescript/r/ecr_repository_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/ecr_repository_policy.html.markdown @@ -88,6 +88,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repository` - (Required) Name of the repository to apply the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) @@ -100,6 +101,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_repository_policy.example + identity = { + repository = "example" + } +} + +resource "aws_ecr_repository_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `repository` - (String) Name of the ECR repository. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Repository Policy using the repository name. For example: ```typescript @@ -126,4 +153,4 @@ Using `terraform import`, import ECR Repository Policy using the repository name % terraform import aws_ecr_repository_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecrpublic_repository.html.markdown b/website/docs/cdktf/typescript/r/ecrpublic_repository.html.markdown index 49b83dd08024..42ef1510ae40 100644 --- a/website/docs/cdktf/typescript/r/ecrpublic_repository.html.markdown +++ b/website/docs/cdktf/typescript/r/ecrpublic_repository.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repositoryName` - (Required) Name of the repository. * `catalogData` - (Optional) Catalog data configuration for the repository. See [below for schema](#catalog_data). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -114,4 +115,4 @@ Using `terraform import`, import ECR Public Repositories using the `repositoryNa % terraform import aws_ecrpublic_repository.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecrpublic_repository_policy.html.markdown b/website/docs/cdktf/typescript/r/ecrpublic_repository_policy.html.markdown index b224120cba82..3b71aab291fc 100644 --- a/website/docs/cdktf/typescript/r/ecrpublic_repository_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/ecrpublic_repository_policy.html.markdown @@ -90,6 +90,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repositoryName` - (Required) Name of the repository to apply the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) @@ -131,4 +132,4 @@ Using `terraform import`, import ECR Public Repository Policy using the reposito % terraform import aws_ecrpublic_repository_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_account_setting_default.html.markdown b/website/docs/cdktf/typescript/r/ecs_account_setting_default.html.markdown index b00246ffc295..49d30e0d2b0e 100644 --- a/website/docs/cdktf/typescript/r/ecs_account_setting_default.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_account_setting_default.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the account setting to set. * `value` - (Required) State of the setting. @@ -75,7 +76,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - ARN that identifies the account setting. * `prinicpal_arn` - ARN that identifies the account setting. ## Import @@ -110,4 +110,4 @@ Using `terraform import`, import ECS Account Setting defaults using the `name`. % terraform import aws_ecs_account_setting_default.example taskLongArnFormat ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_capacity_provider.html.markdown b/website/docs/cdktf/typescript/r/ecs_capacity_provider.html.markdown index 27f9daddab3c..a20d63e31d3e 100644 --- a/website/docs/cdktf/typescript/r/ecs_capacity_provider.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_capacity_provider.html.markdown @@ -33,7 +33,7 @@ interface MyConfig { class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string, config: MyConfig) { super(scope, name); - const test = new AutoscalingGroup(this, "test", { + const example = new AutoscalingGroup(this, "example", { tag: [ { key: "AmazonECSManaged", @@ -44,21 +44,25 @@ class MyConvertedCode extends TerraformStack { maxSize: config.maxSize, minSize: config.minSize, }); - const awsEcsCapacityProviderTest = new EcsCapacityProvider(this, "test_1", { - autoScalingGroupProvider: { - autoScalingGroupArn: test.arn, - managedScaling: { - maximumScalingStepSize: 1000, - minimumScalingStepSize: 1, - status: "ENABLED", - targetCapacity: 10, + const awsEcsCapacityProviderExample = new EcsCapacityProvider( + this, + "example_1", + { + autoScalingGroupProvider: { + autoScalingGroupArn: example.arn, + managedScaling: { + maximumScalingStepSize: 1000, + minimumScalingStepSize: 1, + status: "ENABLED", + targetCapacity: 10, + }, + managedTerminationProtection: "ENABLED", }, - managedTerminationProtection: "ENABLED", - }, - name: "test", - }); + name: "example", + } + ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ - awsEcsCapacityProviderTest.overrideLogicalId("test"); + awsEcsCapacityProviderExample.overrideLogicalId("example"); } } @@ -68,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoScalingGroupProvider` - (Required) Configuration block for the provider for the ECS auto scaling group. Detailed below. * `name` - (Required) Name of the capacity provider. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -94,12 +99,32 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN that identifies the capacity provider. -* `id` - ARN that identifies the capacity provider. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS Capacity Providers using the `name`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecs_capacity_provider.example + identity = { + "arn" = "arn:aws:ecs:us-west-2:123456789012:capacity-provider/example" + } +} + +resource "aws_ecs_capacity_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ECS capacity provider. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS Capacity Providers using the `arn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -113,16 +138,20 @@ import { EcsCapacityProvider } from "./.gen/providers/aws/ecs-capacity-provider" class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - EcsCapacityProvider.generateConfigForImport(this, "example", "example"); + EcsCapacityProvider.generateConfigForImport( + this, + "example", + "arn:aws:ecs:us-west-2:123456789012:capacity-provider/example" + ); } } ``` -Using `terraform import`, import ECS Capacity Providers using the `name`. For example: +Using `terraform import`, import ECS Capacity Providers using the `arn`. For example: ```console -% terraform import aws_ecs_capacity_provider.example example +% terraform import aws_ecs_capacity_provider.example arn:aws:ecs:us-west-2:123456789012:capacity-provider/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_cluster.html.markdown b/website/docs/cdktf/typescript/r/ecs_cluster.html.markdown index cc4dc147a09e..f9dad5558d6f 100644 --- a/website/docs/cdktf/typescript/r/ecs_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_cluster.html.markdown @@ -186,6 +186,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Optional) Execute command configuration for the cluster. See [`configuration` Block](#configuration-block) for details. * `serviceConnectDefaults` - (Optional) Default Service Connect namespace. See [`serviceConnectDefaults` Block](#service_connect_defaults-block) for details. * `setting` - (Optional) Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. See [`setting` Block](#setting-block) for details. @@ -241,7 +242,6 @@ The `setting` configuration block supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN that identifies the cluster. -* `id` - ARN that identifies the cluster. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -272,4 +272,4 @@ Using `terraform import`, import ECS clusters using the cluster name. For exampl % terraform import aws_ecs_cluster.stateless stateless-app ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_cluster_capacity_providers.html.markdown b/website/docs/cdktf/typescript/r/ecs_cluster_capacity_providers.html.markdown index 89542d2c008d..d3482b6afb37 100644 --- a/website/docs/cdktf/typescript/r/ecs_cluster_capacity_providers.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_cluster_capacity_providers.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityProviders` - (Optional) Set of names of one or more capacity providers to associate with the cluster. Valid values also include `FARGATE` and `FARGATE_SPOT`. * `clusterName` - (Required, Forces new resource) Name of the ECS cluster to manage capacity providers for. * `defaultCapacityProviderStrategy` - (Optional) Set of capacity provider strategies to use by default for the cluster. Detailed below. @@ -103,4 +104,4 @@ Using `terraform import`, import ECS cluster capacity providers using the `clust % terraform import aws_ecs_cluster_capacity_providers.example my-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_service.html.markdown b/website/docs/cdktf/typescript/r/ecs_service.html.markdown index 2c06ef40bf63..9ddda9798702 100644 --- a/website/docs/cdktf/typescript/r/ecs_service.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_service.html.markdown @@ -173,6 +173,34 @@ class MyConvertedCode extends TerraformStack { ``` +### Blue/Green Deployment with SIGINT Rollback + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { EcsService } from "./.gen/providers/aws/ecs-service"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new EcsService(this, "example", { + cluster: Token.asString(awsEcsClusterExample.id), + deploymentConfiguration: { + strategy: "BLUE_GREEN", + }, + name: "example", + sigintRollback: true, + waitForSteadyState: true, + }); + } +} + +``` + ### Redeploy Service On Every Apply The key used with `triggers` is arbitrary. @@ -212,11 +240,13 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alarms` - (Optional) Information about the CloudWatch alarms. [See below](#alarms). -* `availabilityZoneRebalancing` - (Optional) ECS automatically redistributes tasks within a service across Availability Zones (AZs) to mitigate the risk of impaired application availability due to underlying infrastructure failures and task lifecycle activities. The valid values are `ENABLED` and `DISABLED`. Defaults to `DISABLED`. -* `capacityProviderStrategy` - (Optional) Capacity provider strategies to use for the service. Can be one or more. These can be updated without destroying and recreating the service only if `force_new_deployment = true` and not changing from 0 `capacityProviderStrategy` blocks to greater than 0, or vice versa. [See below](#capacity_provider_strategy). Conflicts with `launchType`. +* `availabilityZoneRebalancing` - (Optional) ECS automatically redistributes tasks within a service across Availability Zones (AZs) to mitigate the risk of impaired application availability due to underlying infrastructure failures and task lifecycle activities. The valid values are `ENABLED` and `DISABLED`. When creating a new service, if no value is specified, it defaults to `ENABLED` if the service is compatible with AvailabilityZoneRebalancing. When updating an existing service, if no value is specified it defaults to the existing service's AvailabilityZoneRebalancing value. If the service never had an AvailabilityZoneRebalancing value set, Amazon ECS treats this as `DISABLED`. +* `capacityProviderStrategy` - (Optional) Capacity provider strategies to use for the service. Can be one or more. Updating this argument requires `force_new_deployment = true`. [See below](#capacity_provider_strategy). Conflicts with `launchType`. * `cluster` - (Optional) ARN of an ECS cluster. * `deploymentCircuitBreaker` - (Optional) Configuration block for deployment circuit breaker. [See below](#deployment_circuit_breaker). +* `deploymentConfiguration` - (Optional) Configuration block for deployment settings. [See below](#deployment_configuration). * `deploymentController` - (Optional) Configuration block for deployment controller configuration. [See below](#deployment_controller). * `deploymentMaximumPercent` - (Optional) Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. * `deploymentMinimumHealthyPercent` - (Optional) Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. @@ -237,6 +267,7 @@ The following arguments are optional: * `schedulingStrategy` - (Optional) Scheduling strategy to use for the service. The valid values are `REPLICA` and `DAEMON`. Defaults to `REPLICA`. Note that [*Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy*](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html). * `serviceConnectConfiguration` - (Optional) ECS Service Connect configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace. [See below](#service_connect_configuration). * `serviceRegistries` - (Optional) Service discovery registries for the service. The maximum number of `serviceRegistries` blocks is `1`. [See below](#service_registries). +* `sigintRollback` - (Optional) Whether to enable graceful termination of deployments using SIGINT signals. When enabled, allows customers to safely cancel an in-progress deployment and automatically trigger a rollback to the previous stable state. Defaults to `false`. Only applicable when using `ECS` deployment controller and requires `wait_for_steady_state = true`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `taskDefinition` - (Optional) Family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. Required unless using the `EXTERNAL` deployment controller. If a revision is not specified, the latest `ACTIVE` revision is used. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger an in-place update (redeployment). Useful with `plantimestamp()`. See example above. @@ -291,6 +322,23 @@ The `capacityProviderStrategy` configuration block supports the following: * `capacityProvider` - (Required) Short name of the capacity provider. * `weight` - (Required) Relative percentage of the total number of launched tasks that should use the specified capacity provider. +### deployment_configuration + +The `deploymentConfiguration` configuration block supports the following: + +* `strategy` - (Optional) Type of deployment strategy. Valid values: `ROLLING`, `BLUE_GREEN`. Default: `ROLLING`. +* `bakeTimeInMinutes` - (Optional) Number of minutes to wait after a new deployment is fully provisioned before terminating the old deployment. Only used when `strategy` is set to `BLUE_GREEN`. +* `lifecycleHook` - (Optional) Configuration block for lifecycle hooks that are invoked during deployments. [See below](#lifecycle_hook). + +### lifecycle_hook + +The `lifecycleHook` configuration block supports the following: + +* `hookTargetArn` - (Required) ARN of the Lambda function to invoke for the lifecycle hook. +* `roleArn` - (Required) ARN of the IAM role that grants the service permission to invoke the Lambda function. +* `lifecycleStages` - (Required) Stages during the deployment when the hook should be invoked. Valid values: `RECONCILE_SERVICE`, `PRE_SCALE_UP`, `POST_SCALE_UP`, `TEST_TRAFFIC_SHIFT`, `POST_TEST_TRAFFIC_SHIFT`, `PRODUCTION_TRAFFIC_SHIFT`, `POST_PRODUCTION_TRAFFIC_SHIFT`. +* `hookDetails` - (Optional) Custom parameters that Amazon ECS will pass to the hook target invocations (such as a Lambda function). + ### deployment_circuit_breaker The `deploymentCircuitBreaker` configuration block supports the following: @@ -312,9 +360,19 @@ The `deploymentController` configuration block supports the following: * `targetGroupArn` - (Required for ALB/NLB) ARN of the Load Balancer target group to associate with the service. * `containerName` - (Required) Name of the container to associate with the load balancer (as it appears in a container definition). * `containerPort` - (Required) Port on the container to associate with the load balancer. +* `advancedConfiguration` - (Optional) Configuration block for Blue/Green deployment settings. Required when using `BLUE_GREEN` deployment strategy. [See below](#advanced_configuration). -> **Version note:** Multiple `loadBalancer` configuration block support was added in Terraform AWS Provider version 2.22.0. This allows configuration of [ECS service support for multiple target groups](https://aws.amazon.com/about-aws/whats-new/2019/07/amazon-ecs-services-now-support-multiple-load-balancer-target-groups/). +### advanced_configuration + +The `advancedConfiguration` configuration block supports the following: + +* `alternateTargetGroupArn` - (Required) ARN of the alternate target group to use for Blue/Green deployments. +* `productionListenerRule` - (Required) ARN of the listener rule that routes production traffic. +* `roleArn` - (Required) ARN of the IAM role that allows ECS to manage the target groups. +* `testListenerRule` - (Optional) ARN of the listener rule that routes test traffic. + ### network_configuration `networkConfiguration` support the following: @@ -381,7 +439,7 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC `service` supports the following: -* `clientAlias` - (Optional) List of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1. [See below](#client_alias). +* `clientAlias` - (Optional) List of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. For each service block where enabled is true, exactly one `clientAlias` with one `port` should be specified. [See below](#client_alias). * `discoveryName` - (Optional) Name of the new AWS Cloud Map service that Amazon ECS creates for this Amazon ECS service. * `ingressPortOverride` - (Optional) Port number for the Service Connect proxy to listen on. * `portName` - (Required) Name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service. @@ -415,6 +473,26 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC * `dnsName` - (Optional) Name that you use in the applications of client tasks to connect to this service. * `port` - (Required) Listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace. +* `testTrafficRules` - (Optional) Configuration block for test traffic routing rules. [See below](#test_traffic_rules). + +### test_traffic_rules + +The `testTrafficRules` configuration block supports the following: + +* `header` - (Optional) Configuration block for header-based routing rules. [See below](#header). + +### header + +The `header` configuration block supports the following: + +* `name` - (Required) Name of the HTTP header to match. +* `value` - (Required) Configuration block for header value matching criteria. [See below](#value). + +### value + +The `value` configuration block supports the following: + +* `exact` - (Required) Exact string value to match in the header. ### tag_specifications @@ -428,7 +506,7 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC This resource exports the following attributes in addition to the arguments above: -* `id` - ARN that identifies the service. +* `arn` - ARN that identifies the service. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -471,4 +549,4 @@ Using `terraform import`, import ECS services using the `name` together with ecs % terraform import aws_ecs_service.imported cluster-name/service-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_tag.html.markdown b/website/docs/cdktf/typescript/r/ecs_tag.html.markdown index 93cb565f929b..e3c850dc23ae 100644 --- a/website/docs/cdktf/typescript/r/ecs_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_tag.html.markdown @@ -32,7 +32,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); const example = new BatchComputeEnvironment(this, "example", { - computeEnvironmentName: "example", + name: "example", serviceRole: Token.asString(awsIamRoleExample.arn), type: "UNMANAGED", }); @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) Amazon Resource Name (ARN) of the ECS resource to tag. * `key` - (Required) Tag name. * `value` - (Required) Tag value. @@ -94,4 +95,4 @@ Using `terraform import`, import `aws_ecs_tag` using the ECS resource identifier % terraform import aws_ecs_tag.example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_task_definition.html.markdown b/website/docs/cdktf/typescript/r/ecs_task_definition.html.markdown index 10357afb00ee..e08425d4653e 100644 --- a/website/docs/cdktf/typescript/r/ecs_task_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_task_definition.html.markdown @@ -226,7 +226,7 @@ resource "aws_secretsmanager_secret_version" "test" { } ``` -### Example Using `containerDefinitions` and `inferenceAccelerator` +### Example Using `containerDefinitions` ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -242,14 +242,8 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new EcsTaskDefinition(this, "test", { containerDefinitions: - '[\n {\n "cpu": 10,\n "command": ["sleep", "10"],\n "entryPoint": ["/"],\n "environment": [\n {"name": "VARNAME", "value": "VARVAL"}\n ],\n "essential": true,\n "image": "jenkins",\n "memory": 128,\n "name": "jenkins",\n "portMappings": [\n {\n "containerPort": 80,\n "hostPort": 8080\n }\n ],\n "resourceRequirements":[\n {\n "type":"InferenceAccelerator",\n "value":"device_1"\n }\n ]\n }\n]\n\n', + '[\n {\n "cpu": 10,\n "command": ["sleep", "10"],\n "entryPoint": ["/"],\n "environment": [\n {"name": "VARNAME", "value": "VARVAL"}\n ],\n "essential": true,\n "image": "jenkins",\n "memory": 128,\n "name": "jenkins",\n "portMappings": [\n {\n "containerPort": 80,\n "hostPort": 8080\n }\n ]\n }\n]\n\n', family: "test", - inferenceAccelerator: [ - { - deviceName: "device_1", - deviceType: "eia1.medium", - }, - ], }); } } @@ -297,12 +291,10 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cpu` - (Optional) Number of cpu units used by the task. If the `requiresCompatibilities` is `FARGATE` this field is required. * `enableFaultInjection` - (Optional) Enables fault injection and allows for fault injection requests to be accepted from the task's containers. Default is `false`. - - **Note:** Fault injection only works with tasks using the `awsvpc` or `host` network modes. Fault injection isn't available on Windows. * `executionRoleArn` - (Optional) ARN of the task execution role that the Amazon ECS container agent and the Docker daemon can assume. -* `inferenceAccelerator` - (Optional) Configuration block(s) with Inference Accelerators settings. [Detailed below.](#inference_accelerator) * `ipcMode` - (Optional) IPC resource namespace to be used for the containers in the task The valid values are `host`, `task`, and `none`. * `memory` - (Optional) Amount (in MiB) of memory used by the task. If the `requiresCompatibilities` is `FARGATE` this field is required. * `networkMode` - (Optional) Docker networking mode to use for the containers in the task. Valid values are `none`, `bridge`, `awsvpc`, and `host`. @@ -320,6 +312,8 @@ The following arguments are optional: ~> **NOTE:** Proper escaping is required for JSON field values containing quotes (`"`) such as `environment` values. If directly setting the JSON, they should be escaped as `\"` in the JSON, e.g., `"value": "I \"love\" escaped quotes"`. If using a Terraform variable value, they should be escaped as `\\\"` in the variable, e.g., `value = "I \\\"love\\\" escaped quotes"` in the variable and `"value": "${var.myvariable}"` in the JSON. +~> **Note:** Fault injection only works with tasks using the `awsvpc` or `host` network modes. Fault injection isn't available on Windows. + ### volume * `dockerVolumeConfiguration` - (Optional) Configuration block to configure a [docker volume](#docker_volume_configuration). Detailed below. @@ -388,11 +382,6 @@ For more information, see [Specifying an FSX Windows File Server volume in your * `sizeInGib` - (Required) The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB. -### inference_accelerator - -* `deviceName` - (Required) Elastic Inference accelerator device name. The deviceName must also be referenced in a container definition as a ResourceRequirement. -* `deviceType` - (Required) Elastic Inference accelerator type to use. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -434,4 +423,4 @@ Using `terraform import`, import ECS Task Definitions using their ARNs. For exam % terraform import aws_ecs_task_definition.example arn:aws:ecs:us-east-1:012345678910:task-definition/mytaskfamily:123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ecs_task_set.html.markdown b/website/docs/cdktf/typescript/r/ecs_task_set.html.markdown index adb2f64c7e08..1480ecf72bdd 100644 --- a/website/docs/cdktf/typescript/r/ecs_task_set.html.markdown +++ b/website/docs/cdktf/typescript/r/ecs_task_set.html.markdown @@ -92,6 +92,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityProviderStrategy` - (Optional) The capacity provider strategy to use for the service. Can be one or more. [Defined below](#capacity_provider_strategy). * `externalId` - (Optional) The external ID associated with the task set. * `forceDelete` - (Optional) Whether to allow deleting the task set without waiting for scaling down to 0. You can force a task set to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the tasks before deleting the task set. This bypasses that behavior and potentially leaves resources dangling. @@ -193,4 +194,4 @@ Using `terraform import`, import ECS Task Sets using the `taskSetId`, `service`, % terraform import aws_ecs_task_set.example ecs-svc/7177320696926227436,arn:aws:ecs:us-west-2:123456789101:service/example/example-1234567890,arn:aws:ecs:us-west-2:123456789101:cluster/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/efs_access_point.html.markdown b/website/docs/cdktf/typescript/r/efs_access_point.html.markdown index 247aa48540fa..86106bae5651 100644 --- a/website/docs/cdktf/typescript/r/efs_access_point.html.markdown +++ b/website/docs/cdktf/typescript/r/efs_access_point.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fileSystemId` - (Required) ID of the file system for which the access point is intended. * `posixUser` - (Optional) Operating system user and group applied to all file system requests made using the access point. [Detailed](#posix_user) below. * `rootDirectory`- (Optional) Directory on the Amazon EFS file system that the access point provides access to. [Detailed](#root_directory) below. @@ -101,4 +102,4 @@ Using `terraform import`, import the EFS access points using the `id`. For examp % terraform import aws_efs_access_point.test fsap-52a643fb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/efs_backup_policy.html.markdown b/website/docs/cdktf/typescript/r/efs_backup_policy.html.markdown index 52450ddadc97..5c23d50e7804 100644 --- a/website/docs/cdktf/typescript/r/efs_backup_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/efs_backup_policy.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fileSystemId` - (Required) The ID of the EFS file system. * `backupPolicy` - (Required) A backup_policy object (documented below). @@ -89,4 +90,4 @@ Using `terraform import`, import the EFS backup policies using the `id`. For exa % terraform import aws_efs_backup_policy.example fs-6fa144c6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/efs_file_system.html.markdown b/website/docs/cdktf/typescript/r/efs_file_system.html.markdown index 057dc51d0a05..09bf463f98d3 100644 --- a/website/docs/cdktf/typescript/r/efs_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/efs_file_system.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZoneName` - (Optional) the AWS Availability Zone in which to create the file system. Used to create a file system that uses One Zone storage classes. See [user guide](https://docs.aws.amazon.com/efs/latest/ug/availability-durability.html) for more information. * `creationToken` - (Optional) A unique name (a maximum of 64 characters are allowed) used as reference when creating the Elastic File System to ensure idempotent file @@ -150,4 +151,4 @@ Using `terraform import`, import the EFS file systems using the `id`. For exampl % terraform import aws_efs_file_system.foo fs-6fa144c6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/efs_file_system_policy.html.markdown b/website/docs/cdktf/typescript/r/efs_file_system_policy.html.markdown index b52cd6d36bd5..fa3fc4dff2fa 100644 --- a/website/docs/cdktf/typescript/r/efs_file_system_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/efs_file_system_policy.html.markdown @@ -81,6 +81,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypassPolicyLockoutSafetyCheck` - (Optional) A flag to indicate whether to bypass the `aws_efs_file_system_policy` lockout safety check. The policy lockout safety check determines whether the policy in the request will prevent the principal making the request will be locked out from making future `PutFileSystemPolicy` requests on the file system. Set `bypassPolicyLockoutSafetyCheck` to `true` only when you intend to prevent the principal that is making the request from making a subsequent `PutFileSystemPolicy` request on the file system. The default value is `false`. ## Attribute Reference @@ -117,4 +118,4 @@ Using `terraform import`, import the EFS file system policies using the `id`. Fo % terraform import aws_efs_file_system_policy.foo fs-6fa144c6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/efs_mount_target.html.markdown b/website/docs/cdktf/typescript/r/efs_mount_target.html.markdown index f2d6318619d7..140a52a28e30 100644 --- a/website/docs/cdktf/typescript/r/efs_mount_target.html.markdown +++ b/website/docs/cdktf/typescript/r/efs_mount_target.html.markdown @@ -51,10 +51,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fileSystemId` - (Required) The ID of the file system for which the mount target is intended. * `subnetId` - (Required) The ID of the subnet to add the mount target in. * `ipAddress` - (Optional) The address (within the address range of the specified subnet) at which the file system may be mounted via the mount target. +* `ipAddressType` - (Optional) IP address type for the mount target. Valid values are `IPV4_ONLY` (only IPv4 addresses), `IPV6_ONLY` (only IPv6 addresses), and `DUAL_STACK` (dual-stack, both IPv4 and IPv6 addresses). Defaults to `IPV4_ONLY`. +* `ipv6Address` - (Optional) IPv6 address to use. Valid only when `ipAddressType` is set to `IPV6_ONLY` or `DUAL_STACK`. * `securityGroups` - (Optional) A list of up to 5 VPC security group IDs (that must be for the same VPC as subnet specified) in effect for the mount target. @@ -110,4 +113,4 @@ Using `terraform import`, import the EFS mount targets using the `id`. For examp % terraform import aws_efs_mount_target.alpha fsmt-52a643fb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/efs_replication_configuration.html.markdown b/website/docs/cdktf/typescript/r/efs_replication_configuration.html.markdown index 52414b4fcf8d..bfa23d2d1fd5 100644 --- a/website/docs/cdktf/typescript/r/efs_replication_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/efs_replication_configuration.html.markdown @@ -111,6 +111,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination` - (Required) A destination configuration block (documented below). * `sourceFileSystemId` - (Required) The ID of the file system that is to be replicated. @@ -173,4 +174,4 @@ Using `terraform import`, import EFS Replication Configurations using the file s % terraform import aws_efs_replication_configuration.example fs-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/egress_only_internet_gateway.html.markdown b/website/docs/cdktf/typescript/r/egress_only_internet_gateway.html.markdown index c1d3addadca0..5eb740a80744 100644 --- a/website/docs/cdktf/typescript/r/egress_only_internet_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/egress_only_internet_gateway.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The VPC ID to create in. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -97,4 +98,4 @@ Using `terraform import`, import Egress-only Internet gateways using the `id`. F % terraform import aws_egress_only_internet_gateway.example eigw-015e0e244e24dfe8a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eip.html.markdown b/website/docs/cdktf/typescript/r/eip.html.markdown index 0fc5ca3b2427..aeb102e524ae 100644 --- a/website/docs/cdktf/typescript/r/eip.html.markdown +++ b/website/docs/cdktf/typescript/r/eip.html.markdown @@ -173,6 +173,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `address` - (Optional) IP address from an EC2 BYOIP pool. This option is only available for VPC EIPs. * `associateWithPrivateIp` - (Optional) User-specified primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address. * `customerOwnedIpv4Pool` - (Optional) ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing). @@ -184,13 +185,12 @@ This resource supports the following arguments: * `publicIpv4Pool` - (Optional) EC2 IPv4 address pool identifier or `amazon`. This option is only available for VPC EIPs. * `tags` - (Optional) Map of tags to assign to the resource. Tags can only be applied to EIPs in a VPC. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `vpc` - (Optional **Deprecated**) Boolean if the EIP is in a VPC or not. Use `domain` instead. - Defaults to `true` unless the region supports EC2-Classic. -~> **NOTE:** You can specify either the `instance` ID or the `networkInterface` ID, but not both. Including both will **not** return an error from the AWS API, but will have undefined behavior. See the relevant [AssociateAddress API Call][1] for more information. +~> **NOTE:** You can specify either the `instance` ID or the `networkInterface` ID, but not both. +Including both will **not** return an error from the AWS API, but will have undefined behavior. +See the relevant [AssociateAddress API Call][1] for more information. -~> **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the -case both options are defined as the api only requires one or the other. +~> **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error, however, only `address` will be used if both options are defined as the API only requires one of the two. ## Attribute Reference @@ -248,4 +248,4 @@ Using `terraform import`, import EIPs in a VPC using their Allocation ID. For ex [1]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eip_association.html.markdown b/website/docs/cdktf/typescript/r/eip_association.html.markdown index ce278dd0cef1..1d0621af1fd6 100644 --- a/website/docs/cdktf/typescript/r/eip_association.html.markdown +++ b/website/docs/cdktf/typescript/r/eip_association.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocationId` - (Optional, Forces new resource) ID of the associated Elastic IP. This argument is required despite being optional at the resource level due to legacy support for EC2-Classic networking. * `allowReassociation` - (Optional, Forces new resource) Whether to allow an Elastic IP address to be re-associated. @@ -104,4 +105,4 @@ Using `terraform import`, import EIP Assocations using their association IDs. Fo % terraform import aws_eip_association.test eipassoc-ab12c345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eip_domain_name.html.markdown b/website/docs/cdktf/typescript/r/eip_domain_name.html.markdown index 04f17971d43a..a445da99a4e3 100644 --- a/website/docs/cdktf/typescript/r/eip_domain_name.html.markdown +++ b/website/docs/cdktf/typescript/r/eip_domain_name.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocationId` - (Required) The allocation ID. * `domainName` - (Required) The domain name to modify for the IP address. @@ -71,4 +72,4 @@ This resource exports the following attributes in addition to the arguments abov - `update` - (Default `10m`) - `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_access_entry.html.markdown b/website/docs/cdktf/typescript/r/eks_access_entry.html.markdown index 3a192fea9419..625ef30bb1c2 100644 --- a/website/docs/cdktf/typescript/r/eks_access_entry.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_access_entry.html.markdown @@ -41,12 +41,13 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `clusterName` – (Required) Name of the EKS Cluster. -* `principalArn` – (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. +* `clusterName` - (Required) Name of the EKS Cluster. +* `principalArn` - (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. The following arguments are optional: -* `kubernetesGroups` – (Optional) List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `kubernetesGroups` - (Optional) List of string which can optionally specify the Kubernetes groups the user would belong to when creating an access entry. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `type` - (Optional) Defaults to STANDARD which provides the standard workflow. EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX types disallow users to input a username or groups, and prevent associations. * `userName` - (Optional) Defaults to principal ARN if user is principal else defaults to assume-role/session-name is role is used. @@ -100,4 +101,4 @@ Using `terraform import`, import EKS access entry using the `clusterName` and `p % terraform import aws_eks_access_entry.my_eks_access_entry my_cluster_name:my_principal_arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_access_policy_association.html.markdown b/website/docs/cdktf/typescript/r/eks_access_policy_association.html.markdown index a6702d11df87..fb83daea83d4 100644 --- a/website/docs/cdktf/typescript/r/eks_access_policy_association.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_access_policy_association.html.markdown @@ -42,12 +42,13 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `clusterName` – (Required) Name of the EKS Cluster. -* `policyArn` – (Required) The ARN of the access policy that you're associating. -* `principalArn` – (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. -* `accessScope` – (Required) The configuration block to determine the scope of the access. See [`accessScope` Block](#access_scope-block) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `clusterName` - (Required) Name of the EKS Cluster. +* `policyArn` - (Required) The ARN of the access policy that you're associating. +* `principalArn` - (Required) The IAM Principal ARN which requires Authentication access to the EKS cluster. +* `accessScope` - (Required) The configuration block to determine the scope of the access. See [`accessScope` Block](#access_scope-block) below. ### `accessScope` Block @@ -109,4 +110,4 @@ Using `terraform import`, import EKS access entry using the `clusterName` `princ % terraform import aws_eks_access_policy_association.my_eks_access_entry my_cluster_name#my_principal_arn#my_policy_arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_addon.html.markdown b/website/docs/cdktf/typescript/r/eks_addon.html.markdown index 8eb95d014fe7..8f7e4e536815 100644 --- a/website/docs/cdktf/typescript/r/eks_addon.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_addon.html.markdown @@ -68,13 +68,14 @@ Custom add-on configuration can be passed using `configurationValues` as a singl ~> **Note:** `configurationValues` is a single JSON string should match the valid JSON schema for each add-on with specific version. -To find the correct JSON schema for each add-on can be extracted using [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html) call. -This below is an example for extracting the `configurationValues` schema for `coredns`. +You can use [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html) to extract each add-on's JSON schema. +Here's an example command to extract the `configurationValues` schema for `coredns`. ```bash - aws eks describe-addon-configuration \ - --addon-name coredns \ - --addon-version v1.10.1-eksbuild.1 +aws eks describe-addon-configuration \ + --addon-name coredns \ + --addon-version v1.10.1-eksbuild.1 \ + | jq -r .configurationSchema | jq . ``` Example to create a `coredns` managed addon with custom `configurationValues`. @@ -234,20 +235,21 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `addonName` – (Required) Name of the EKS add-on. The name must match one of +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addonName` - (Required) Name of the EKS add-on. The name must match one of the names returned by [describe-addon-versions](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-versions.html). -* `clusterName` – (Required) Name of the EKS Cluster. +* `clusterName` - (Required) Name of the EKS Cluster. The following arguments are optional: -* `addonVersion` – (Optional) The version of the EKS add-on. The version must +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `addonVersion` - (Optional) The version of the EKS add-on. The version must match one of the versions returned by [describe-addon-versions](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-versions.html). * `configurationValues` - (Optional) custom configuration values for addons with single JSON string. This JSON string value must match the JSON schema derived from [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html). -* `resolveConflictsOnCreate` - (Optional) How to resolve field value conflicts when migrating a self-managed add-on to an Amazon EKS add-on. Valid values are `NONE` and `OVERWRITE`. For more details see the [CreateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateAddon.html) API Docs. -* `resolveConflictsOnUpdate` - (Optional) How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Valid values are `NONE`, `OVERWRITE`, and `PRESERVE`. For more details see the [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) API Docs. -* `resolveConflicts` - (**Deprecated** use the `resolveConflictsOnCreate` and `resolveConflictsOnUpdate` attributes instead) Define how to resolve parameter value conflicts when migrating an existing add-on to an Amazon EKS add-on or when applying version updates to the add-on. Valid values are `NONE`, `OVERWRITE` and `PRESERVE`. Note that `PRESERVE` is only valid on addon update, not for initial addon creation. If you need to set this to `PRESERVE`, use the `resolveConflictsOnCreate` and `resolveConflictsOnUpdate` attributes instead. For more details check [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) API Docs. +* `resolveConflictsOnCreate` - (Optional) How to resolve field value conflicts when migrating a self-managed add-on to an Amazon EKS add-on. Valid values are `NONE` and `OVERWRITE`. For more details see the [CreateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateAddon.html) API Documentation. +* `resolveConflictsOnUpdate` - (Optional) How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Valid values are `NONE`, `OVERWRITE`, and `PRESERVE`. For more details see the [UpdateAddon](https://docs.aws.amazon.com/eks/latest/APIReference/API_UpdateAddon.html) API Documentation. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `podIdentityAssociation` - (Optional) Configuration block with EKS Pod Identity association settings. See [`podIdentityAssociation`](#pod-identity-association) below for details. * `preserve` - (Optional) Indicates if you want to preserve the created resources when deleting the EKS add-on. @@ -319,4 +321,4 @@ Using `terraform import`, import EKS add-on using the `clusterName` and `addonNa % terraform import aws_eks_addon.my_eks_addon my_cluster_name:my_addon_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_cluster.html.markdown b/website/docs/cdktf/typescript/r/eks_cluster.html.markdown index 4200c2bec346..e935649904e7 100644 --- a/website/docs/cdktf/typescript/r/eks_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_cluster.html.markdown @@ -365,7 +365,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `name` – (Required) Name of the cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]*$`). +* `name` - (Required) Name of the cluster. Must be between 1-100 characters in length. Must begin with an alphanumeric character, and must only contain alphanumeric characters, dashes and underscores (`^[0-9A-Za-z][A-Za-z0-9\-_]*$`). * `roleArn` - (Required) ARN of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding [`dependsOn`](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html) if using the [`aws_iam_role_policy` resource](/docs/providers/aws/r/iam_role_policy.html) or [`aws_iam_role_policy_attachment` resource](/docs/providers/aws/r/iam_role_policy_attachment.html), otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. * `vpcConfig` - (Required) Configuration block for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the Amazon EKS User Guide. Detailed below. Also contains attributes detailed in the Attributes section. @@ -374,16 +374,18 @@ The following arguments are optional: * `accessConfig` - (Optional) Configuration block for the access config associated with your cluster, see [Amazon EKS Access Entries](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html). [Detailed](#access_config) below. * `bootstrapSelfManagedAddons` - (Optional) Install default unmanaged add-ons, such as `aws-cni`, `kube-proxy`, and CoreDNS during cluster creation. If `false`, you must manually install desired add-ons. Changing this value will force a new cluster to be created. Defaults to `true`. * `computeConfig` - (Optional) Configuration block with compute configuration for EKS Auto Mode. [Detailed](#compute_config) below. +* `deletionProtection` - (Optional) Whether to enable deletion protection for the cluster. When enabled, the cluster cannot be deleted unless deletion protection is first disabled. Default: `false`. * `enabledClusterLogTypes` - (Optional) List of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). * `encryptionConfig` - (Optional) Configuration block with encryption configuration for the cluster. [Detailed](#encryption_config) below. * `forceUpdateVersion` - (Optional) Force version update by overriding upgrade-blocking readiness checks when updating a cluster. * `kubernetesNetworkConfig` - (Optional) Configuration block with kubernetes network configuration for the cluster. [Detailed](#kubernetes_network_config) below. If removed, Terraform will only perform drift detection if a configuration value is provided. * `outpostConfig` - (Optional) Configuration block representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This block isn't available for creating Amazon EKS clusters on the AWS cloud. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `remoteNetworkConfig` - (Optional) Configuration block with remote network configuration for EKS Hybrid Nodes. [Detailed](#remote_network_config) below. * `storageConfig` - (Optional) Configuration block with storage configuration for EKS Auto Mode. [Detailed](#storage_config) below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `upgradePolicy` - (Optional) Configuration block for the support policy to use for the cluster. See [upgrade_policy](#upgrade_policy) for details. -* `version` – (Optional) Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. +* `version` - (Optional) Desired Kubernetes master version. If you do not specify a value, the latest available version at resource creation is used and no upgrades will occur except those automatically triggered by EKS. The value must be configured and increased to upgrade the version when desired. Downgrades are not supported by EKS. * `zonalShiftConfig` - (Optional) Configuration block with zonal shift configuration for the cluster. [Detailed](#zonal_shift_config) below. ### access_config @@ -391,7 +393,7 @@ The following arguments are optional: The `accessConfig` configuration block supports the following arguments: * `authenticationMode` - (Optional) The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` -* `bootstrapClusterCreatorAdminPermissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `false`. +* `bootstrapClusterCreatorAdminPermissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `true`. ### compute_config @@ -439,8 +441,8 @@ The `remotePodNetworks` configuration block supports the following arguments: * `endpointPrivateAccess` - (Optional) Whether the Amazon EKS private API server endpoint is enabled. Default is `false`. * `endpointPublicAccess` - (Optional) Whether the Amazon EKS public API server endpoint is enabled. Default is `true`. * `publicAccessCidrs` - (Optional) List of CIDR blocks. Indicates which CIDR blocks can access the Amazon EKS public API server endpoint when enabled. EKS defaults this to a list with `0.0.0.0/0`. Terraform will only perform drift detection of its value when present in a configuration. -* `securityGroupIds` – (Optional) List of security group IDs for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. -* `subnetIds` – (Required) List of subnet IDs. Must be in at least two different availability zones. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. +* `securityGroupIds` - (Optional) List of security group IDs for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. +* `subnetIds` - (Required) List of subnet IDs. Must be in at least two different availability zones. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane. * `vpcId` - (Computed) ID of the VPC associated with your cluster. ### kubernetes_network_config @@ -574,4 +576,4 @@ Using `terraform import`, import EKS Clusters using the `name`. For example: % terraform import aws_eks_cluster.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_fargate_profile.html.markdown b/website/docs/cdktf/typescript/r/eks_fargate_profile.html.markdown index 438d42bdcb01..cab234a86c67 100644 --- a/website/docs/cdktf/typescript/r/eks_fargate_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_fargate_profile.html.markdown @@ -92,14 +92,15 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `clusterName` – (Required) Name of the EKS Cluster. -* `fargateProfileName` – (Required) Name of the EKS Fargate Profile. -* `podExecutionRoleArn` – (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. +* `clusterName` - (Required) Name of the EKS Cluster. +* `fargateProfileName` - (Required) Name of the EKS Fargate Profile. +* `podExecutionRoleArn` - (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Fargate Profile. * `selector` - (Required) Configuration block(s) for selecting Kubernetes Pods to execute with this EKS Fargate Profile. Detailed below. -* `subnetIds` – (Required) Identifiers of private EC2 Subnets to associate with the EKS Fargate Profile. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster). +* `subnetIds` - (Required) Identifiers of private EC2 Subnets to associate with the EKS Fargate Profile. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### selector Configuration Block @@ -110,6 +111,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `labels` - (Optional) Key-value map of Kubernetes labels for selection. ## Attribute Reference @@ -160,4 +162,4 @@ Using `terraform import`, import EKS Fargate Profiles using the `clusterName` an % terraform import aws_eks_fargate_profile.my_fargate_profile my_cluster:my_fargate_profile ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_identity_provider_config.html.markdown b/website/docs/cdktf/typescript/r/eks_identity_provider_config.html.markdown index f538d26e0a0b..554d5349c48c 100644 --- a/website/docs/cdktf/typescript/r/eks_identity_provider_config.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_identity_provider_config.html.markdown @@ -43,16 +43,17 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `clusterName` – (Required) Name of the EKS Cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `clusterName` - (Required) Name of the EKS Cluster. * `oidc` - (Required) Nested attribute containing [OpenID Connect](https://openid.net/connect/) identity provider information for the cluster. Detailed below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### oidc Configuration Block -* `clientId` – (Required) Client ID for the OpenID Connect identity provider. +* `clientId` - (Required) Client ID for the OpenID Connect identity provider. * `groupsClaim` - (Optional) The JWT claim that the provider will use to return groups. * `groupsPrefix` - (Optional) A prefix that is prepended to group claims e.g., `oidc:`. -* `identityProviderConfigName` – (Required) The name of the identity provider config. +* `identityProviderConfigName` - (Required) The name of the identity provider config. * `issuerUrl` - (Required) Issuer URL for the OpenID Connect identity provider. * `requiredClaims` - (Optional) The key value pairs that describe required claims in the identity token. * `usernameClaim` - (Optional) The JWT claim that the provider will use as the username. @@ -106,4 +107,4 @@ Using `terraform import`, import EKS Identity Provider Configurations using the % terraform import aws_eks_identity_provider_config.my_identity_provider_config my_cluster:my_identity_provider_config ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_node_group.html.markdown b/website/docs/cdktf/typescript/r/eks_node_group.html.markdown index 884726be8d6d..cd57844e6999 100644 --- a/website/docs/cdktf/typescript/r/eks_node_group.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_node_group.html.markdown @@ -236,13 +236,14 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `clusterName` – (Required) Name of the EKS Cluster. -* `nodeRoleArn` – (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. +* `clusterName` - (Required) Name of the EKS Cluster. +* `nodeRoleArn` - (Required) Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group. * `scalingConfig` - (Required) Configuration block with scaling settings. See [`scalingConfig`](#scaling_config-configuration-block) below for details. -* `subnetIds` – (Required) Identifiers of EC2 Subnets to associate with the EKS Node Group. +* `subnetIds` - (Required) Identifiers of EC2 Subnets to associate with the EKS Node Group. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amiType` - (Optional) Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values. Terraform will only perform drift detection if a configuration value is provided. * `capacityType` - (Optional) Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. Terraform will only perform drift detection if a configuration value is provided. * `diskSize` - (Optional) Disk size in GiB for worker nodes. Defaults to `50` for Windows, `20` all other node groups. Terraform will only perform drift detection if a configuration value is provided. @@ -250,15 +251,15 @@ The following arguments are optional: * `instanceTypes` - (Optional) List of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. Terraform will only perform drift detection if a configuration value is provided. * `labels` - (Optional) Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed. * `launchTemplate` - (Optional) Configuration block with Launch Template settings. See [`launchTemplate`](#launch_template-configuration-block) below for details. Conflicts with `remoteAccess`. -* `nodeGroupName` – (Optional) Name of the EKS Node Group. If omitted, Terraform will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`. The node group name can't be longer than 63 characters. It must start with a letter or digit, but can also include hyphens and underscores for the remaining characters. -* `nodeGroupNamePrefix` – (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`. -* `node_repair_config` - (Optional) The node auto repair configuration for the node group. See [`node_repair_config`](#node_repair_config-configuration-block) below for details. -* `releaseVersion` – (Optional) AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. +* `nodeGroupName` - (Optional) Name of the EKS Node Group. If omitted, Terraform will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`. The node group name can't be longer than 63 characters. It must start with a letter or digit, but can also include hyphens and underscores for the remaining characters. +* `nodeGroupNamePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`. +* `nodeRepairConfig` - (Optional) The node auto repair configuration for the node group. See [`nodeRepairConfig`](#node_repair_config-configuration-block) below for details. +* `releaseVersion` - (Optional) AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version. * `remoteAccess` - (Optional) Configuration block with remote access settings. See [`remoteAccess`](#remote_access-configuration-block) below for details. Conflicts with `launchTemplate`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `taint` - (Optional) The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group. See [taint](#taint-configuration-block) below for details. * `updateConfig` - (Optional) Configuration block with update settings. See [`updateConfig`](#update_config-configuration-block) below for details. -* `version` – (Optional) Kubernetes version. Defaults to EKS Cluster Kubernetes version. Terraform will only perform drift detection if a configuration value is provided. +* `version` - (Optional) Kubernetes version. Defaults to EKS Cluster Kubernetes version. Terraform will only perform drift detection if a configuration value is provided. ### launch_template Configuration Block @@ -349,4 +350,4 @@ Using `terraform import`, import EKS Node Groups using the `clusterName` and `no % terraform import aws_eks_node_group.my_node_group my_cluster:my_node_group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_pod_identity_association.html.markdown b/website/docs/cdktf/typescript/r/eks_pod_identity_association.html.markdown index f72fedc81578..9f823b253c6f 100644 --- a/website/docs/cdktf/typescript/r/eks_pod_identity_association.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_pod_identity_association.html.markdown @@ -89,7 +89,10 @@ The following arguments are required: The following arguments are optional: +* `disableSessionTags` - (Optional) Disable the tags that are automatically added to role session by Amazon EKS. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `targetRoleArn` - (Optional) The Amazon Resource Name (ARN) of the IAM role to be chained to the the IAM role specified as `roleArn`. ## Attribute Reference @@ -97,6 +100,7 @@ This resource exports the following attributes in addition to the arguments abov * `associationArn` - The Amazon Resource Name (ARN) of the association. * `associationId` - The ID of the association. +* `externalId` - The unique identifier for this association for a target IAM role. You put this value in the trust policy of the target role, in a Condition to match the sts.ExternalId. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -131,4 +135,4 @@ Using `terraform import`, import EKS (Elastic Kubernetes) Pod Identity Associati % terraform import aws_eks_pod_identity_association.example example,a-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastic_beanstalk_application.html.markdown b/website/docs/cdktf/typescript/r/elastic_beanstalk_application.html.markdown index 32d4eff7d05f..bb3e3393d07c 100644 --- a/website/docs/cdktf/typescript/r/elastic_beanstalk_application.html.markdown +++ b/website/docs/cdktf/typescript/r/elastic_beanstalk_application.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the application, must be unique within your account * `description` - (Optional) Short description of the application * `tags` - (Optional) Key-value map of tags for the Elastic Beanstalk Application. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -99,4 +100,4 @@ Using `terraform import`, import Elastic Beanstalk Applications using the `name` % terraform import aws_elastic_beanstalk_application.tf_test tf-test-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastic_beanstalk_application_version.html.markdown b/website/docs/cdktf/typescript/r/elastic_beanstalk_application_version.html.markdown index 09799dab757c..a11f5b0675f8 100644 --- a/website/docs/cdktf/typescript/r/elastic_beanstalk_application_version.html.markdown +++ b/website/docs/cdktf/typescript/r/elastic_beanstalk_application_version.html.markdown @@ -60,7 +60,7 @@ class MyConvertedCode extends TerraformStack { application: "tf-test-name", bucket: Token.asString(awsS3BucketDefault.id), description: "application version created by terraform", - key: Token.asString(awsS3ObjectDefault.id), + key: Token.asString(awsS3ObjectDefault.key), name: "tf-test-version-label", }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -81,6 +81,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Short description of the Application Version. * `forceDelete` - (Optional) On delete, force an Application Version to be deleted when it may be in use by multiple Elastic Beanstalk Environments. * `process` - (Optional) Pre-processes and validates the environment manifest (env.yaml ) and configuration files (*.config files in the .ebextensions folder) in the source bundle. Validating configuration files can identify issues prior to deploying the application version to an environment. You must turn processing on for application versions that you create using AWS CodeBuild or AWS CodeCommit. For application versions built from a source bundle in Amazon S3, processing is optional. It validates Elastic Beanstalk configuration files. It doesn’t validate your application’s configuration files, like proxy server or Docker configuration. @@ -93,4 +94,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN assigned by AWS for this Elastic Beanstalk Application. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastic_beanstalk_configuration_template.html.markdown b/website/docs/cdktf/typescript/r/elastic_beanstalk_configuration_template.html.markdown index 5b164f0a3503..f968989a65ff 100644 --- a/website/docs/cdktf/typescript/r/elastic_beanstalk_configuration_template.html.markdown +++ b/website/docs/cdktf/typescript/r/elastic_beanstalk_configuration_template.html.markdown @@ -47,14 +47,15 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique name for this Template. -* `application` – (Required) name of the application to associate with this configuration template +* `application` - (Required) name of the application to associate with this configuration template * `description` - (Optional) Short description of the Template -* `environmentId` – (Optional) The ID of the environment used with this configuration template -* `setting` – (Optional) Option settings to configure the new Environment. These +* `environmentId` - (Optional) The ID of the environment used with this configuration template +* `setting` - (Optional) Option settings to configure the new Environment. These override specific values that are set as defaults. The format is detailed below in [Option Settings](#option-settings) -* `solutionStackName` – (Optional) A solution stack to base your Template +* `solutionStackName` - (Optional) A solution stack to base your Template off of. Example stacks can be found in the [Amazon API documentation][1] ## Option Settings @@ -79,4 +80,4 @@ This resource exports the following attributes in addition to the arguments abov [1]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastic_beanstalk_environment.html.markdown b/website/docs/cdktf/typescript/r/elastic_beanstalk_environment.html.markdown index c35799eac960..22b5e3360aa1 100644 --- a/website/docs/cdktf/typescript/r/elastic_beanstalk_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/elastic_beanstalk_environment.html.markdown @@ -50,29 +50,30 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique name for this Environment. This name is used in the application URL -* `application` – (Required) Name of the application that contains the version +* `application` - (Required) Name of the application that contains the version to be deployed * `cnamePrefix` - (Optional) Prefix to use for the fully qualified DNS name of the Environment. * `description` - (Optional) Short description of the Environment * `tier` - (Optional) Elastic Beanstalk Environment tier. Valid values are `Worker` or `WebServer`. If tier is left blank `WebServer` will be used. -* `setting` – (Optional) Option settings to configure the new Environment. These +* `setting` - (Optional) Option settings to configure the new Environment. These override specific values that are set as defaults. The format is detailed below in [Option Settings](#option-settings) -* `solutionStackName` – (Optional) A solution stack to base your environment +* `solutionStackName` - (Optional) A solution stack to base your environment off of. Example stacks can be found in the [Amazon API documentation][1] -* `templateName` – (Optional) The name of the Elastic Beanstalk Configuration +* `templateName` - (Optional) The name of the Elastic Beanstalk Configuration template to use in deployment -* `platformArn` – (Optional) The [ARN][2] of the Elastic Beanstalk [Platform][3] +* `platformArn` - (Optional) The [ARN][2] of the Elastic Beanstalk [Platform][3] to use in deployment * `waitForReadyTimeout` - (Default `20m`) The maximum [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should wait for an Elastic Beanstalk Environment to be in a ready state before timing out. -* `pollInterval` – The time between polling the AWS API to +* `pollInterval` - The time between polling the AWS API to check if changes have been applied. Use this to adjust the rate of API calls for any `create` or `update` action. Minimum `10s`, maximum `180s`. Omit this to use the default behavior, which is an exponential backoff @@ -142,9 +143,9 @@ This resource exports the following attributes in addition to the arguments abov * `description` - Description of the Elastic Beanstalk Environment. * `tier` - The environment tier specified. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `application` – The Elastic Beanstalk Application specified for this environment. -* `setting` – Settings specifically set for this Environment. -* `allSettings` – List of all option settings configured in this Environment. These +* `application` - The Elastic Beanstalk Application specified for this environment. +* `setting` - Settings specifically set for this Environment. +* `allSettings` - List of all option settings configured in this Environment. These are a combination of default settings and their overrides from `setting` in the configuration. * `cname` - Fully qualified DNS name for this Environment. @@ -192,4 +193,4 @@ Using `terraform import`, import Elastic Beanstalk Environments using the `id`. % terraform import aws_elastic_beanstalk_environment.prodenv e-rpqsewtp2j ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_cluster.html.markdown b/website/docs/cdktf/typescript/r/elasticache_cluster.html.markdown index 3735e946660f..209d42dd46e0 100644 --- a/website/docs/cdktf/typescript/r/elasticache_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_cluster.html.markdown @@ -237,26 +237,24 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `clusterId` – (Required) Group identifier. ElastiCache converts this name to lowercase. Changing this value will re-create the resource. -* `engine` – (Optional, Required if `replicationGroupId` is not specified) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` and `valkey`. -* `nodeType` – (Required unless `replicationGroupId` is provided) The instance class used. +* `clusterId` - (Required) Group identifier. ElastiCache converts this name to lowercase. Changing this value will re-create the resource. +* `engine` - (Optional, Required if `replicationGroupId` is not specified) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` and `valkey`. +* `nodeType` - (Required unless `replicationGroupId` is provided) The instance class used. See AWS documentation for information on [supported node types for Valkey or Redis OSS](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.CurrentGen) and [guidance on selecting node types for Valkey or Redis OSS](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SelectSize.html#CacheNodes.SelectSize.redis). See AWS documentation for information on [supported node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.CurrentGen-Memcached) and [guidance on selecting node types for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SelectSize.html#CacheNodes.SelectSize.Mem). For Memcached, changing this value will re-create the resource. -* `numCacheNodes` – (Required unless `replicationGroupId` is provided) The initial number of cache nodes that the cache cluster will have. For Redis, this value must be 1. For Memcached, this value must be between 1 and 40. If this number is reduced on subsequent runs, the highest numbered nodes will be removed. -* `parameterGroupName` – (Required unless `replicationGroupId` is provided) The name of the parameter group to associate with this cache cluster. - -The following arguments are optional: - +* `numCacheNodes` - (Required unless `replicationGroupId` is provided) The initial number of cache nodes that the cache cluster will have. For Redis, this value must be 1. For Memcached, this value must be between 1 and 40. If this number is reduced on subsequent runs, the highest numbered nodes will be removed. +* `parameterGroupName` - (Required unless `replicationGroupId` is provided) The name of the parameter group to associate with this cache cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applyImmediately` - (Optional) Whether any database modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon ElastiCache Documentation for more information](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html). * `autoMinorVersionUpgrade` - (Optional) Specifies whether minor version engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Only supported for engine type `"redis"` and if the engine version is 6 or higher. Defaults to `true`. * `availabilityZone` - (Optional) Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferredAvailabilityZones` instead. Default: System chosen Availability Zone. Changing this value will re-create the resource. * `azMode` - (Optional, Memcached only) Whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `numCacheNodes` must be greater than `1`. -* `engineVersion` – (Optional) Version number of the cache engine to be used. +* `engineVersion` - (Optional) Version number of the cache engine to be used. If not set, defaults to the latest version. See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html) in the AWS Documentation for supported versions. When `engine` is `redis` and the version is 7 or higher, the major and minor version should be set, e.g., `7.2`. @@ -267,22 +265,22 @@ The following arguments are optional: * `finalSnapshotIdentifier` - (Optional, Redis only) Name of your final cluster snapshot. If omitted, no final snapshot will be made. * `ipDiscovery` - (Optional) The IP version to advertise in the discovery protocol. Valid values are `ipv4` or `ipv6`. * `logDeliveryConfiguration` - (Optional, Redis only) Specifies the destination and format of Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/Log_Delivery.html#Log_contents-engine-log). See the documentation on [Amazon ElastiCache](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/Log_Delivery.html). See [Log Delivery Configuration](#log-delivery-configuration) below for more details. -* `maintenanceWindow` – (Optional) Specifies the weekly time range for when maintenance +* `maintenanceWindow` - (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`. * `networkType` - (Optional) The IP versions for cache cluster connections. IPv6 is supported with Redis engine `6.2` onword or Memcached version `1.6.6` for all [Nitro system](https://aws.amazon.com/ec2/nitro/) instances. Valid values are `ipv4`, `ipv6` or `dual_stack`. -* `notificationTopicArn` – (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic`. +* `notificationTopicArn` - (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic`. * `outpostMode` - (Optional) Specify the outpost mode that will apply to the cache cluster creation. Valid values are `"single-outpost"` and `"cross-outpost"`, however AWS currently only supports `"single-outpost"` mode. -* `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replicationGroupId`. Changing this value will re-create the resource. +* `port` - (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replicationGroupId`. Changing this value will re-create the resource. * `preferredAvailabilityZones` - (Optional, Memcached only) List of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `numCacheNodes`. If you want all the nodes in the same Availability Zone, use `availabilityZone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference. * `preferredOutpostArn` - (Optional, Required if `outpostMode` is specified) The outpost ARN in which the cache cluster will be created. * `replicationGroupId` - (Optional, Required if `engine` is not specified) ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. -* `securityGroupIds` – (Optional, VPC only) One or more VPC security groups associated with the cache cluster. Cannot be provided with `replication_group_id.` -* `snapshotArns` – (Optional, Redis only) Single-element string list containing an Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. The object name cannot contain any commas. Changing `snapshotArns` forces a new resource. +* `securityGroupIds` - (Optional, VPC only) One or more VPC security groups associated with the cache cluster. Cannot be provided with `replication_group_id.` +* `snapshotArns` - (Optional, Redis only) Single-element string list containing an Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. The object name cannot contain any commas. Changing `snapshotArns` forces a new resource. * `snapshotName` - (Optional, Redis only) Name of a snapshot from which to restore data into the new node group. Changing `snapshotName` forces a new resource. * `snapshotRetentionLimit` - (Optional, Redis only) Number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. Please note that setting a `snapshotRetentionLimit` is not supported on cache.t1.micro cache nodes * `snapshotWindow` - (Optional, Redis only) Daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00 -* `subnetGroupName` – (Optional, VPC only) Name of the subnet group to be used for the cache cluster. Changing this value will re-create the resource. Cannot be provided with `replication_group_id.` +* `subnetGroupName` - (Optional, VPC only) Name of the subnet group to be used for the cache cluster. Changing this value will re-create the resource. Cannot be provided with `replication_group_id.` * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transitEncryptionEnabled` - (Optional) Enable encryption in-transit. Supported with Memcached versions `1.6.12` and later, Valkey `7.2` and later, Redis OSS versions `3.2.6`, `4.0.10` and later, running in a VPC. See the [ElastiCache in-transit encryption documentation](https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/in-transit-encryption.html#in-transit-encryption-constraints) for more details. @@ -342,4 +340,4 @@ Using `terraform import`, import ElastiCache Clusters using the `clusterId`. For % terraform import aws_elasticache_cluster.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_global_replication_group.html.markdown b/website/docs/cdktf/typescript/r/elasticache_global_replication_group.html.markdown index 06a7ca680636..bdc6d5dacf09 100644 --- a/website/docs/cdktf/typescript/r/elasticache_global_replication_group.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_global_replication_group.html.markdown @@ -61,8 +61,7 @@ The initial Redis version is determined by the version set on the primary replic However, once it is part of a Global Replication Group, the Global Replication Group manages the version of all member replication groups. -The member replication groups must have [`lifecycle.ignore_changes[engine_version]`](https://www.terraform.io/language/meta-arguments/lifecycle) set, -or Terraform will always return a diff. +The provider is configured to ignore changes to `engine`, `engineVersion` and `parameterGroupName` inside `aws_elasticache_replication_group` resources if they belong to a global replication group. In this example, the primary replication group will be created with Redis 6.0, @@ -86,9 +85,6 @@ class MyConvertedCode extends TerraformStack { description: "primary replication group", engine: "redis", engineVersion: "6.0", - lifecycle: { - ignoreChanges: [engineVersion], - }, nodeType: "cache.m5.large", numCacheClusters: 1, replicationGroupId: "example-primary", @@ -101,9 +97,6 @@ class MyConvertedCode extends TerraformStack { new ElasticacheReplicationGroup(this, "secondary", { description: "secondary replication group", globalReplicationGroupId: example.globalReplicationGroupId, - lifecycle: { - ignoreChanges: [engineVersion], - }, numCacheClusters: 1, provider: otherRegion, replicationGroupId: "example-secondary", @@ -117,13 +110,19 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `automaticFailoverEnabled` - (Optional) Specifies whether read-only replicas will be automatically promoted to read/write primary if the existing primary fails. When creating, by default the Global Replication Group inherits the automatic failover setting of the primary replication group. * `cacheNodeType` - (Optional) The instance class used. See AWS documentation for information on [supported node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). When creating, by default the Global Replication Group inherits the node type of the primary replication group. -* `engineVersion` - (Optional) Redis version to use for the Global Replication Group. +* `engine` - (Optional) The name of the cache engine to be used for the clusters in this global replication group. + When creating, by default the Global Replication Group inherits the engine of the primary replication group. + If an engine is specified, the Global Replication Group and all member replication groups will be upgraded to this engine. + Valid values are `redis` or `valkey`. + Default is `redis` if `engineVersion` is specified. +* `engineVersion` - (Optional) Engine version to use for the Global Replication Group. When creating, by default the Global Replication Group inherits the version of the primary replication group. If a version is specified, the Global Replication Group and all member replication groups will be upgraded to this version. Cannot be downgraded without replacing the Global Replication Group and all member replication groups. @@ -131,12 +130,12 @@ This resource supports the following arguments: When the version is 6, the major and minor version can be set, e.g., `6.2`, or the minor version can be unspecified which will use the latest version at creation time, e.g., `6.x`. The actual engine version used is returned in the attribute `engineVersionActual`, see [Attribute Reference](#attribute-reference) below. -* `globalReplicationGroupIdSuffix` – (Required) The suffix name of a Global Datastore. If `globalReplicationGroupIdSuffix` is changed, creates a new resource. -* `primaryReplicationGroupId` – (Required) The ID of the primary cluster that accepts writes and will replicate updates to the secondary cluster. If `primaryReplicationGroupId` is changed, creates a new resource. -* `globalReplicationGroupDescription` – (Optional) A user-created description for the global replication group. +* `globalReplicationGroupIdSuffix` - (Required) The suffix name of a Global Datastore. If `globalReplicationGroupIdSuffix` is changed, creates a new resource. +* `primaryReplicationGroupId` - (Required) The ID of the primary cluster that accepts writes and will replicate updates to the secondary cluster. If `primaryReplicationGroupId` is changed, creates a new resource. +* `globalReplicationGroupDescription` - (Optional) A user-created description for the global replication group. * `numNodeGroups` - (Optional) The number of node groups (shards) on the global replication group. * `parameterGroupName` - (Optional) An ElastiCache Parameter Group to use for the Global Replication Group. - Required when upgrading a major engine version, but will be ignored if left configured after the upgrade is complete. + Required when upgrading an engine or major engine version, but will be ignored if left configured after the upgrade is complete. Specifying without a major version upgrade will fail. Note that ElastiCache creates a copy of this parameter group for each member replication group. @@ -150,7 +149,6 @@ This resource exports the following attributes in addition to the arguments abov * `atRestEncryptionEnabled` - A flag that indicate whether the encryption at rest is enabled. * `authTokenEnabled` - A flag that indicate whether AuthToken (password) is enabled. * `clusterEnabled` - Indicates whether the Global Datastore is cluster enabled. -* `engine` - The name of the cache engine to be used for the clusters in this global replication group. * `globalReplicationGroupId` - The full ID of the global replication group. * `globalNodeGroups` - Set of node groups (shards) on the global replication group. Has the values: @@ -198,4 +196,4 @@ Using `terraform import`, import ElastiCache Global Replication Groups using the % terraform import aws_elasticache_global_replication_group.my_global_replication_group okuqm-global-replication-group-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/elasticache_parameter_group.html.markdown index e1bef8e2473f..e6c461ca9ca6 100644 --- a/website/docs/cdktf/typescript/r/elasticache_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_parameter_group.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the ElastiCache parameter group. * `family` - (Required) The family of the ElastiCache parameter group. * `description` - (Optional) The description of the ElastiCache parameter group. Defaults to "Managed by Terraform". @@ -102,4 +103,4 @@ Using `terraform import`, import ElastiCache Parameter Groups using the `name`. % terraform import aws_elasticache_parameter_group.default redis-params ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_replication_group.html.markdown b/website/docs/cdktf/typescript/r/elasticache_replication_group.html.markdown index 8fb6884299ed..92ac2f482f8e 100644 --- a/website/docs/cdktf/typescript/r/elasticache_replication_group.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_replication_group.html.markdown @@ -273,17 +273,18 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `description` – (Required) User-created description for the replication group. Must not be empty. -* `replicationGroupId` – (Required) Replication group identifier. This parameter is stored as a lowercase string. +* `description` - (Required) User-created description for the replication group. Must not be empty. +* `replicationGroupId` - (Required) Replication group identifier. This parameter is stored as a lowercase string. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applyImmediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`. * `atRestEncryptionEnabled` - (Optional) Whether to enable encryption at rest. When `engine` is `redis`, default is `false`. When `engine` is `valkey`, default is `true`. * `authToken` - (Optional) Password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`. -* `authTokenUpdateStrategy` - (Optional) Strategy to use when updating the `authToken`. Valid values are `SET`, `ROTATE`, and `DELETE`. Defaults to `ROTATE`. +* `authTokenUpdateStrategy` - (Optional) Strategy to use when updating the `authToken`. Valid values are `SET`, `ROTATE`, and `DELETE`. Required if `authToken` is set. * `autoMinorVersionUpgrade` - (Optional) Specifies whether minor version engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Only supported for engine types `"redis"` and `"valkey"` and if the engine version is 6 or higher. Defaults to `true`. @@ -304,7 +305,7 @@ The following arguments are optional: * `ipDiscovery` - (Optional) The IP version to advertise in the discovery protocol. Valid values are `ipv4` or `ipv6`. * `kmsKeyId` - (Optional) The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`. * `logDeliveryConfiguration` - (Optional, Redis only) Specifies the destination and format of Redis OSS/Valkey [SLOWLOG](https://redis.io/commands/slowlog) or Redis OSS/Valkey [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log). See the documentation on [Amazon ElastiCache](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log). See [Log Delivery Configuration](#log-delivery-configuration) below for more details. -* `maintenanceWindow` – (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` +* `maintenanceWindow` - (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` * `multiAzEnabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. If `true`, `automaticFailoverEnabled` must also be enabled. Defaults to `false`. @@ -313,7 +314,7 @@ The following arguments are optional: See AWS documentation for information on [supported node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). Required unless `globalReplicationGroupId` is set. Cannot be set if `globalReplicationGroupId` is set. -* `notificationTopicArn` – (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` +* `notificationTopicArn` - (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` * `numCacheClusters` - (Optional) Number of cache clusters (primary and replicas) this replication group will have. If `automaticFailoverEnabled` or `multiAzEnabled` are `true`, must be at least 2. Updates will occur before other modifications. @@ -323,7 +324,7 @@ The following arguments are optional: Changing this number will trigger a resizing operation before other settings modifications. Conflicts with `numCacheClusters`. * `parameterGroupName` - (Optional) Name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. To enable "cluster mode", i.e., data sharding, use a parameter group that has the parameter `cluster-enabled` set to true. -* `port` – (Optional) Port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. +* `port` - (Optional) Port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. * `preferredCacheClusterAzs` - (Optional) List of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is considered. The first item in the list will be the primary node. Ignored when updating. * `replicasPerNodeGroup` - (Optional) Number of replica nodes in each node group. Changing this number will trigger a resizing operation before other settings modifications. @@ -332,7 +333,7 @@ The following arguments are optional: Can only be set if `numNodeGroups` is set. * `securityGroupIds` - (Optional) IDs of one or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud. * `securityGroupNames` - (Optional) Names of one or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud. -* `snapshotArns` – (Optional) List of ARNs that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas. +* `snapshotArns` - (Optional) List of ARNs that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas. * `snapshotName` - (Optional) Name of a snapshot from which to restore data into the new node group. Changing the `snapshotName` forces a new resource. * `snapshotRetentionLimit` - (Optional, Redis only) Number of days for which ElastiCache will retain automatic cache cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days before being deleted. If the value of `snapshotRetentionLimit` is set to zero (0), backups are turned off. Please note that setting a `snapshotRetentionLimit` is not supported on cache.t1.micro cache nodes * `snapshotWindow` - (Optional, Redis only) Daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster. The minimum snapshot window is a 60 minute period. Example: `05:00-09:00` @@ -410,4 +411,4 @@ Using `terraform import`, import ElastiCache Replication Groups using the `repli % terraform import aws_elasticache_replication_group.my_replication_group replication-group-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_reserved_cache_node.html.markdown b/website/docs/cdktf/typescript/r/elasticache_reserved_cache_node.html.markdown index e8c497dd6a9b..961c7642b8d8 100644 --- a/website/docs/cdktf/typescript/r/elasticache_reserved_cache_node.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_reserved_cache_node.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cacheNodeCount` - (Optional) Number of cache node instances to reserve. Default value is `1`. * `id` - (Optional) Customer-specified identifier to track this reservation. @@ -75,7 +76,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN for the reserved cache node. * `duration` - Duration of the reservation as an RFC3339 duration. -* `fixedPrice` – Fixed price charged for this reserved cache node. +* `fixedPrice` - Fixed price charged for this reserved cache node. * `cacheNodeType` - Node type for the reserved cache nodes. * `offeringType` - Offering type of this reserved cache node. * `productDescription` - Engine type for the reserved cache node. @@ -125,4 +126,4 @@ Using `terraform import`, import ElastiCache Reserved Cache Node using the `id`. % terraform import aws_elasticache_reserved_cache_node.example CustomReservationID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_serverless_cache.html.markdown b/website/docs/cdktf/typescript/r/elasticache_serverless_cache.html.markdown index 970ad83f23d7..116ab7da6b3d 100644 --- a/website/docs/cdktf/typescript/r/elasticache_serverless_cache.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_serverless_cache.html.markdown @@ -151,21 +151,22 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `engine` – (Required) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` or `valkey`. -* `name` – (Required) The Cluster name which serves as a unique identifier to the serverless cache +* `engine` - (Required) Name of the cache engine to be used for this cache cluster. Valid values are `memcached`, `redis` or `valkey`. +* `name` - (Required) The Cluster name which serves as a unique identifier to the serverless cache The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cacheUsageLimits` - (Optional) Sets the cache usage limits for storage and ElastiCache Processing Units for the cache. See [`cacheUsageLimits` Block](#cache_usage_limits-block) for details. * `dailySnapshotTime` - (Optional) The daily time that snapshots will be created from the new serverless cache. Only supported for engine types `"redis"` or `"valkey"`. Defaults to `0`. * `description` - (Optional) User-provided description for the serverless cache. The default is NULL. * `kmsKeyId` - (Optional) ARN of the customer managed key for encrypting the data at rest. If no KMS key is provided, a default service key is used. -* `majorEngineVersion` – (Optional) The version of the cache engine that will be used to create the serverless cache. +* `majorEngineVersion` - (Optional) The version of the cache engine that will be used to create the serverless cache. See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html) in the AWS Documentation for supported versions. * `securityGroupIds` - (Optional) A list of the one or more VPC security groups to be associated with the serverless cache. The security group will authorize traffic access for the VPC end-point (private-link). If no other information is given this will be the VPC’s Default Security Group that is associated with the cluster VPC end-point. * `snapshotArnsToRestore` - (Optional, Redis only) The list of ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis only. * `snapshotRetentionLimit` - (Optional, Redis only) The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis only. -* `subnetIds` – (Optional) A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. +* `subnetIds` - (Optional) A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `userGroupId` - (Optional) The identifier of the UserGroup to be associated with the serverless cache. Available for Redis only. Default is NULL. @@ -257,4 +258,4 @@ Using `terraform import`, import ElastiCache Serverless Cache using the `name`. % terraform import aws_elasticache_serverless_cache.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/elasticache_subnet_group.html.markdown index 6cb8466fe2b6..1a5a85dd9ce0 100644 --- a/website/docs/cdktf/typescript/r/elasticache_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_subnet_group.html.markdown @@ -57,9 +57,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` – (Required) Name for the cache subnet group. ElastiCache converts this name to lowercase. -* `description` – (Optional) Description for the cache subnet group. Defaults to "Managed by Terraform". -* `subnetIds` – (Required) List of VPC Subnet IDs for the cache subnet group +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name for the cache subnet group. ElastiCache converts this name to lowercase. +* `description` - (Optional) Description for the cache subnet group. Defaults to "Managed by Terraform". +* `subnetIds` - (Required) List of VPC Subnet IDs for the cache subnet group * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -101,4 +102,4 @@ Using `terraform import`, import ElastiCache Subnet Groups using the `name`. For % terraform import aws_elasticache_subnet_group.bar tf-test-cache-subnet ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_user.html.markdown b/website/docs/cdktf/typescript/r/elasticache_user.html.markdown index 08e4f195de55..bae3f546da61 100644 --- a/website/docs/cdktf/typescript/r/elasticache_user.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_user.html.markdown @@ -106,6 +106,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationMode` - (Optional) Denotes the user's authentication properties. Detailed below. * `noPasswordRequired` - (Optional) Indicates a password is not required for this user. * `passwords` - (Optional) Passwords used for this user. You can create up to two passwords for each user. @@ -159,4 +160,4 @@ Using `terraform import`, import ElastiCache users using the `userId`. For examp % terraform import aws_elasticache_user.my_user userId1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_user_group.html.markdown b/website/docs/cdktf/typescript/r/elasticache_user_group.html.markdown index 7ba15924af71..07538d3994e1 100644 --- a/website/docs/cdktf/typescript/r/elasticache_user_group.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_user_group.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userIds` - (Optional) The list of user IDs that belong to the user group. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -103,4 +104,4 @@ Using `terraform import`, import ElastiCache user groups using the `userGroupId` % terraform import aws_elasticache_user_group.my_user_group userGoupId1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticache_user_group_association.html.markdown b/website/docs/cdktf/typescript/r/elasticache_user_group_association.html.markdown index 0ece3367ce38..2204eec53d0f 100644 --- a/website/docs/cdktf/typescript/r/elasticache_user_group_association.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticache_user_group_association.html.markdown @@ -74,8 +74,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `userGroupId` - (Required) ID of the user group. * `userId` - (Required) ID of the user to associated with the user group. @@ -122,4 +123,4 @@ Using `terraform import`, import ElastiCache user group associations using the ` % terraform import aws_elasticache_user_group_association.example userGoupId1,userId ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticsearch_domain.html.markdown b/website/docs/cdktf/typescript/r/elasticsearch_domain.html.markdown index 7f6237641f3a..97d26695727e 100644 --- a/website/docs/cdktf/typescript/r/elasticsearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticsearch_domain.html.markdown @@ -73,7 +73,7 @@ class MyConvertedCode extends TerraformStack { new ElasticsearchDomain(this, "example", { accessPolicies: '{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": "es:*",\n "Principal": "*",\n "Effect": "Allow",\n "Resource": "arn:aws:es:${' + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:domain/${" + @@ -230,7 +230,7 @@ class MyConvertedCode extends TerraformStack { const awsElasticsearchDomainEs = new ElasticsearchDomain(this, "es_8", { accessPolicies: '{\n\t"Version": "2012-10-17",\n\t"Statement": [\n\t\t{\n\t\t\t"Action": "es:*",\n\t\t\t"Principal": "*",\n\t\t\t"Effect": "Allow",\n\t\t\t"Resource": "arn:aws:es:${' + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:domain/${" + @@ -272,6 +272,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPolicies` - (Optional) IAM policy document specifying the access policies for the domain. * `advancedOptions` - (Optional) Key-value string pairs to specify advanced configuration options. Note that the values for these configuration options must be strings (wrapped in quotes) or they may be wrong and cause a perpetual diff, causing Terraform to want to recreate your Elasticsearch domain on every apply. * `advancedSecurityOptions` - (Optional) Configuration block for [fine-grained access control](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/fgac.html). Detailed below. @@ -404,7 +405,6 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the domain. * `domainId` - Unique identifier for the domain. -* `domainName` - Name of the Elasticsearch domain. * `endpoint` - Domain-specific endpoint used to submit index, search, and data upload requests. * `kibanaEndpoint` - Domain-specific endpoint for kibana without https scheme. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -447,4 +447,4 @@ Using `terraform import`, import Elasticsearch domains using the `domainName`. F % terraform import aws_elasticsearch_domain.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticsearch_domain_policy.html.markdown b/website/docs/cdktf/typescript/r/elasticsearch_domain_policy.html.markdown index ec5d86f49c38..e012baeed50a 100644 --- a/website/docs/cdktf/typescript/r/elasticsearch_domain_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticsearch_domain_policy.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) Name of the domain. * `accessPolicies` - (Optional) IAM policy document specifying the access policies for the domain @@ -54,4 +55,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticsearch_domain_saml_options.html.markdown b/website/docs/cdktf/typescript/r/elasticsearch_domain_saml_options.html.markdown index 9524d15b7dd4..895721a16a90 100644 --- a/website/docs/cdktf/typescript/r/elasticsearch_domain_saml_options.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticsearch_domain_saml_options.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `samlOptions` - (Optional) The SAML authentication options for an AWS Elasticsearch Domain. ### saml_options @@ -123,4 +124,4 @@ Using `terraform import`, import Elasticsearch domains using the `domainName`. F % terraform import aws_elasticsearch_domain_saml_options.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elasticsearch_vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/r/elasticsearch_vpc_endpoint.html.markdown index 41eadc4a56c0..eaea90a25370 100644 --- a/website/docs/cdktf/typescript/r/elasticsearch_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/elasticsearch_vpc_endpoint.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainArn` - (Required, Forces new resource) Specifies the Amazon Resource Name (ARN) of the domain to create the endpoint for * `vpcOptions` - (Required) Options to specify the subnets and security groups for the endpoint. @@ -102,4 +103,4 @@ Using `terraform import`, import elasticsearch VPC endpoint connections using th % terraform import aws_elasticsearch_vpc_endpoint_connection.example endpoint-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown b/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown index 409c73aa4df9..e5be522da8dc 100644 --- a/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown @@ -12,6 +12,8 @@ description: |- Provides an Elastic Transcoder pipeline resource. +~> **Warning:** This resource is deprecated. Use [AWS Elemental MediaConvert](https://aws.amazon.com/blogs/media/migrating-workflows-from-amazon-elastic-transcoder-to-aws-elemental-mediaconvert/) instead. AWS will [discontinue support for Amazon Elastic Transcoder](https://aws.amazon.com/blogs/media/support-for-amazon-elastic-transcoder-ending-soon/), effective November 13, 2025. + ## Example Usage ```typescript @@ -48,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `awsKmsKeyArn` - (Optional) The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. * `contentConfig` - (Optional) The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) * `contentConfigPermissions` - (Optional) The permissions for the `contentConfig` object. (documented below) @@ -146,4 +149,4 @@ Using `terraform import`, import Elastic Transcoder pipelines using the `id`. Fo % terraform import aws_elastictranscoder_pipeline.basic_pipeline 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown b/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown index 7902ab885e97..6e3f8d871a5c 100644 --- a/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown +++ b/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown @@ -12,6 +12,8 @@ description: |- Provides an Elastic Transcoder preset resource. +~> **Warning:** This resource is deprecated. Use [AWS Elemental MediaConvert](https://aws.amazon.com/blogs/media/migrating-workflows-from-amazon-elastic-transcoder-to-aws-elemental-mediaconvert/) instead. AWS will [discontinue support for Amazon Elastic Transcoder](https://aws.amazon.com/blogs/media/support-for-amazon-elastic-transcoder-ending-soon/), effective November 13, 2025. + ## Example Usage ```typescript @@ -92,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `audio` - (Optional, Forces new resource) Audio parameters object (documented below). * `audioCodecOptions` - (Optional, Forces new resource) Codec options for the audio parameters (documented below) * `container` - (Required, Forces new resource) The container type for the output file. Valid values are `flac`, `flv`, `fmp4`, `gif`, `mp3`, `mp4`, `mpg`, `mxf`, `oga`, `ogg`, `ts`, and `webm`. @@ -209,4 +212,4 @@ Using `terraform import`, import Elastic Transcoder presets using the `id`. For % terraform import aws_elastictranscoder_preset.basic_preset 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elb.html.markdown b/website/docs/cdktf/typescript/r/elb.html.markdown index 81f0c64ed07b..1030370d114c 100644 --- a/website/docs/cdktf/typescript/r/elb.html.markdown +++ b/website/docs/cdktf/typescript/r/elb.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the ELB. By default generated by Terraform. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -189,4 +190,4 @@ Using `terraform import`, import ELBs using the `name`. For example: % terraform import aws_elb.bar elb-production-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elb_attachment.html.markdown b/website/docs/cdktf/typescript/r/elb_attachment.html.markdown index dff08fb643f4..6871d8b75679 100644 --- a/website/docs/cdktf/typescript/r/elb_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/elb_attachment.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `elb` - (Required) The name of the ELB. * `instance` - (Required) Instance ID to place in the ELB pool. @@ -53,4 +54,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_block_public_access_configuration.html.markdown b/website/docs/cdktf/typescript/r/emr_block_public_access_configuration.html.markdown index e46a459484d4..c97db031accb 100644 --- a/website/docs/cdktf/typescript/r/emr_block_public_access_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_block_public_access_configuration.html.markdown @@ -134,6 +134,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `permittedPublicSecurityGroupRuleRange` - (Optional) Configuration block for defining permitted public security group rule port ranges. Can be defined multiple times per resource. Only valid if `blockPublicSecurityGroupRules` is set to `true`. ### `permittedPublicSecurityGroupRuleRange` @@ -179,4 +180,4 @@ Using `terraform import`, import the current EMR Block Public Access Configurati % terraform import aws_emr_block_public_access_configuration.example current ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_cluster.html.markdown b/website/docs/cdktf/typescript/r/emr_cluster.html.markdown index 9e40de2f1729..8d2f6b575035 100644 --- a/website/docs/cdktf/typescript/r/emr_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_cluster.html.markdown @@ -628,6 +628,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additionalInfo` - (Optional) JSON string for selecting additional features such as adding proxy information. Note: Currently there is no API to retrieve the value of this argument after EMR cluster creation from provider, therefore Terraform cannot detect drift from the actual EMR cluster if its value is changed outside Terraform. * `applications` - (Optional) A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster. For a list of applications available for each Amazon EMR release version, see the [Amazon EMR Release Guide](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-components.html). * `autoscalingRole` - (Optional) IAM role for automatic scaling policies. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group. @@ -679,6 +680,7 @@ class MyConvertedCode extends TerraformStack { * `logUri` - (Optional) S3 bucket to write the log files of the job flow. If a value is not provided, logs are not created. * `masterInstanceFleet` - (Optional) Configuration block to use an [Instance Fleet](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html) for the master node type. Cannot be specified if any `masterInstanceGroup` configuration blocks are set. Detailed below. * `masterInstanceGroup` - (Optional) Configuration block to use an [Instance Group](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-groups) for the [master node type](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-master-core-task-nodes.html#emr-plan-master). +* `osReleaseLabel` - (Optional) Amazon Linux release for all nodes in a cluster launch RunJobFlow request. If not specified, Amazon EMR uses the latest validated Amazon Linux release for cluster launch. * `placementGroupConfig` - (Optional) The specified placement group configuration for an Amazon EMR cluster. * `scaleDownBehavior` - (Optional) Way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an `instance group` is resized. * `securityConfiguration` - (Optional) Security configuration name to attach to the EMR cluster. Only valid for EMR clusters with `releaseLabel` 4.8.0 or greater. @@ -689,6 +691,8 @@ class MyConvertedCode extends TerraformStack { * `unhealthyNodeReplacement` - (Optional) Whether whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster. Default value is `false`. * `visibleToAllUsers` - (Optional) Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default value is `true`. + **NOTE:** As per the [Amazon EMR API Reference](https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html#EMR-RunJobFlow-request-VisibleToAllUsers), this argument is no longer supported. Do not set this argument, particularly to `false`, as it would lead to perpetual differences. + ### bootstrap_action * `args` - (Optional) List of command line arguments to pass to the bootstrap action script. @@ -855,7 +859,6 @@ This resource exports the following attributes in addition to the arguments abov * `releaseLabel` - Release label for the Amazon EMR release. * `serviceRole` - IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `visibleToAllUsers` - Indicates whether the job flow is visible to all IAM users of the AWS account associated with the job flow. ## Import @@ -917,4 +920,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_instance_fleet.html.markdown b/website/docs/cdktf/typescript/r/emr_instance_fleet.html.markdown index 8d770c460f93..a02a8f175f0b 100644 --- a/website/docs/cdktf/typescript/r/emr_instance_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_instance_fleet.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterId` - (Required) ID of the EMR Cluster to attach to. Changing this forces a new resource to be created. * `instanceTypeConfigs` - (Optional) Configuration block for instance fleet * `launchSpecifications` - (Optional) Configuration block for launch specification @@ -181,4 +182,4 @@ Using `terraform import`, import EMR Instance Fleet using the EMR Cluster identi % terraform import aws_emr_instance_fleet.example j-123456ABCDEF/if-15EK4O09RZLNR ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_instance_group.html.markdown b/website/docs/cdktf/typescript/r/emr_instance_group.html.markdown index a6c3124308f6..3ccaf5c3a125 100644 --- a/website/docs/cdktf/typescript/r/emr_instance_group.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_instance_group.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` (Required) Human friendly name given to the instance group. Changing this forces a new resource to be created. * `clusterId` (Required) ID of the EMR Cluster to attach to. Changing this forces a new resource to be created. * `instanceType` (Required) The EC2 instance type for all instances in the instance group. Changing this forces a new resource to be created. @@ -130,4 +131,4 @@ Using `terraform import`, import EMR task instance group using their EMR Cluster % terraform import aws_emr_instance_group.task_group j-123456ABCDEF/ig-15EK4O09RZLNR ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_managed_scaling_policy.html.markdown b/website/docs/cdktf/typescript/r/emr_managed_scaling_policy.html.markdown index e5709a043239..4b4ea1b648b8 100644 --- a/website/docs/cdktf/typescript/r/emr_managed_scaling_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_managed_scaling_policy.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterId` - (Required) ID of the EMR cluster * `computeLimits` - (Required) Configuration block with compute limit settings. Described below. @@ -109,4 +110,4 @@ Using `terraform import`, import EMR Managed Scaling Policies using the EMR Clus % terraform import aws_emr_managed_scaling_policy.example j-123456ABCDEF ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_security_configuration.html.markdown b/website/docs/cdktf/typescript/r/emr_security_configuration.html.markdown index 0a1a8e0c6649..52acbcc123c7 100644 --- a/website/docs/cdktf/typescript/r/emr_security_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_security_configuration.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the EMR Security Configuration. By default generated by Terraform. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -86,4 +87,4 @@ Using `terraform import`, import EMR Security Configurations using the `name`. F % terraform import aws_emr_security_configuration.sc example-sc-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_studio.html.markdown b/website/docs/cdktf/typescript/r/emr_studio.html.markdown index 0d2e8b3962fa..f2f1747e6c47 100644 --- a/website/docs/cdktf/typescript/r/emr_studio.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_studio.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A detailed description of the Amazon EMR Studio. * `encryptionKeyArn` - (Optional) The AWS KMS key identifier (ARN) used to encrypt Amazon EMR Studio workspace and notebook files when backed up to Amazon S3. * `idpAuthUrl` - (Optional) The authentication endpoint of your identity provider (IdP). Specify this value when you use IAM authentication and want to let federated users log in to a Studio with the Studio URL and credentials from your IdP. Amazon EMR Studio redirects users to this endpoint to enter credentials. @@ -99,4 +100,4 @@ Using `terraform import`, import EMR studios using the `id`. For example: % terraform import aws_emr_studio.studio es-123456ABCDEF ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emr_studio_session_mapping.html.markdown b/website/docs/cdktf/typescript/r/emr_studio_session_mapping.html.markdown index 68fd77495ce8..cc132b5966a4 100644 --- a/website/docs/cdktf/typescript/r/emr_studio_session_mapping.html.markdown +++ b/website/docs/cdktf/typescript/r/emr_studio_session_mapping.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identityId`- (Optional) The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store. * `identityName` - (Optional) The name of the user or group from the Amazon Web Services SSO Identity Store. * `identityType` - (Required) Specifies whether the identity to map to the Amazon EMR Studio is a `USER` or a `GROUP`. @@ -85,4 +86,4 @@ Using `terraform import`, import EMR studio session mappings using `studio-id:id % terraform import aws_emr_studio_session_mapping.example es-xxxxx:USER:xxxxx-xxx-xxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emrcontainers_job_template.html.markdown b/website/docs/cdktf/typescript/r/emrcontainers_job_template.html.markdown index b2a91ef0103d..7657a9b0cd38 100644 --- a/website/docs/cdktf/typescript/r/emrcontainers_job_template.html.markdown +++ b/website/docs/cdktf/typescript/r/emrcontainers_job_template.html.markdown @@ -49,9 +49,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `jobTemplateData` - (Required) The job template data which holds values of StartJobRun API request. * `kmsKeyArn` - (Optional) The KMS key ARN used to encrypt the job template. -* `name` – (Required) The specified name of the job template. +* `name` - (Required) The specified name of the job template. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### job_template_data Arguments @@ -144,4 +145,4 @@ Using `terraform import`, import EKS job templates using the `id`. For example: % terraform import aws_emrcontainers_job_template.example a1b2c3d4e5f6g7h8i9j10k11l ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emrcontainers_virtual_cluster.html.markdown b/website/docs/cdktf/typescript/r/emrcontainers_virtual_cluster.html.markdown index 067bce9e1e6b..8c4d1deee822 100644 --- a/website/docs/cdktf/typescript/r/emrcontainers_virtual_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/emrcontainers_virtual_cluster.html.markdown @@ -49,8 +49,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `containerProvider` - (Required) Configuration block for the container provider associated with your cluster. -* `name` – (Required) Name of the virtual cluster. +* `name` - (Required) Name of the virtual cluster. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### container_provider Arguments @@ -101,4 +102,4 @@ Using `terraform import`, import EKS Clusters using the `id`. For example: % terraform import aws_emrcontainers_virtual_cluster.example a1b2c3d4e5f6g7h8i9j10k11l ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/emrserverless_application.html.markdown b/website/docs/cdktf/typescript/r/emrserverless_application.html.markdown index 15ebc5907102..4f3f346e1a48 100644 --- a/website/docs/cdktf/typescript/r/emrserverless_application.html.markdown +++ b/website/docs/cdktf/typescript/r/emrserverless_application.html.markdown @@ -106,17 +106,18 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `architecture` – (Optional) The CPU architecture of an application. Valid values are `ARM64` or `X86_64`. Default value is `X86_64`. -* `autoStartConfiguration` – (Optional) The configuration for an application to automatically start on job submission. -* `autoStopConfiguration` – (Optional) The configuration for an application to automatically stop after a certain amount of time being idle. -* `imageConfiguration` – (Optional) The image configuration applied to all worker types. -* `initialCapacity` – (Optional) The capacity to initialize when the application is created. -* `interactiveConfiguration` – (Optional) Enables the interactive use cases to use when running an application. -* `maximumCapacity` – (Optional) The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. -* `name` – (Required) The name of the application. -* `networkConfiguration` – (Optional) The network configuration for customer VPC connectivity. -* `releaseLabel` – (Required) The EMR release version associated with the application. -* `type` – (Required) The type of application you want to start, such as `spark` or `hive`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `architecture` - (Optional) The CPU architecture of an application. Valid values are `ARM64` or `X86_64`. Default value is `X86_64`. +* `autoStartConfiguration` - (Optional) The configuration for an application to automatically start on job submission. +* `autoStopConfiguration` - (Optional) The configuration for an application to automatically stop after a certain amount of time being idle. +* `imageConfiguration` - (Optional) The image configuration applied to all worker types. +* `initialCapacity` - (Optional) The capacity to initialize when the application is created. +* `interactiveConfiguration` - (Optional) Enables the interactive use cases to use when running an application. +* `maximumCapacity` - (Optional) The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. +* `name` - (Required) The name of the application. +* `networkConfiguration` - (Optional) The network configuration for customer VPC connectivity. +* `releaseLabel` - (Required) The EMR release version associated with the application. +* `type` - (Required) The type of application you want to start, such as `spark` or `hive`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_start_configuration Arguments @@ -200,4 +201,4 @@ Using `terraform import`, import EMR Severless applications using the `id`. For % terraform import aws_emrserverless_application.example id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/evidently_feature.html.markdown b/website/docs/cdktf/typescript/r/evidently_feature.html.markdown index d6ebd422a59d..828bbef4b238 100644 --- a/website/docs/cdktf/typescript/r/evidently_feature.html.markdown +++ b/website/docs/cdktf/typescript/r/evidently_feature.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Feature resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -166,6 +168,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultVariation` - (Optional) The name of the variation to use as the default variation. The default variation is served to users who are not allocated to any ongoing launches or experiments of this feature. This variation must also be listed in the `variations` structure. If you omit `defaultVariation`, the first variation listed in the `variations` structure is used as the default variation. * `description` - (Optional) Specifies the description of the feature. * `entityOverrides` - (Optional) Specify users that should always be served a specific variation of a feature. Each user is specified by a key-value pair . For each key, specify a user by entering their user ID, account ID, or some other identifier. For the value, specify the name of the variation that they are to be served. @@ -253,4 +256,4 @@ Using `terraform import`, import CloudWatch Evidently Feature using the feature % terraform import aws_evidently_feature.example exampleFeatureName:arn:aws:evidently:us-east-1:123456789012:project/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/evidently_launch.html.markdown b/website/docs/cdktf/typescript/r/evidently_launch.html.markdown index 564a2417b311..d797fb479ff5 100644 --- a/website/docs/cdktf/typescript/r/evidently_launch.html.markdown +++ b/website/docs/cdktf/typescript/r/evidently_launch.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Launch resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -366,6 +368,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Specifies the description of the launch. * `groups` - (Required) One or up to five blocks that contain the feature and variations that are to be used for the launch. [Detailed below](#groups). * `metricMonitors` - (Optional) One or up to three blocks that define the metrics that will be used to monitor the launch performance. [Detailed below](#metric_monitors). @@ -515,4 +518,4 @@ Import using the `name` of the launch and `arn` of the project separated by a `: % terraform import aws_evidently_launch.example exampleLaunchName:arn:aws:evidently:us-east-1:123456789012:project/exampleProjectName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/evidently_project.html.markdown b/website/docs/cdktf/typescript/r/evidently_project.html.markdown index d3510e616782..4b5decac50d7 100644 --- a/website/docs/cdktf/typescript/r/evidently_project.html.markdown +++ b/website/docs/cdktf/typescript/r/evidently_project.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Project resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -107,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dataDelivery` - (Optional) A block that contains information about where Evidently is to store evaluation events for longer term storage, if you choose to do so. If you choose not to store these events, Evidently deletes them after using them to produce metrics and other experiment results that you can view. See below. * `description` - (Optional) Specifies the description of the project. * `name` - (Required) A name for the project. @@ -184,4 +187,4 @@ Using `terraform import`, import CloudWatch Evidently Project using the `arn`. F % terraform import aws_evidently_project.example arn:aws:evidently:us-east-1:123456789012:segment/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/evidently_segment.html.markdown b/website/docs/cdktf/typescript/r/evidently_segment.html.markdown index a368c2a4df3e..a93bb499be10 100644 --- a/website/docs/cdktf/typescript/r/evidently_segment.html.markdown +++ b/website/docs/cdktf/typescript/r/evidently_segment.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a CloudWatch Evidently Segment resource. +~> **Warning:** This resource is deprecated. Use [AWS AppConfig feature flags](https://aws.amazon.com/blogs/mt/using-aws-appconfig-feature-flags/) instead. + ## Example Usage ### Basic @@ -95,6 +97,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) Specifies the description of the segment. * `name` - (Required, Forces new resource) A name for the segment. * `pattern` - (Required, Forces new resource) The pattern to use for the segment. For more information about pattern syntax, see [Segment rule pattern syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Evidently-segments.html#CloudWatch-Evidently-segments-syntax.html). @@ -144,4 +147,4 @@ Using `terraform import`, import CloudWatch Evidently Segment using the `arn`. F % terraform import aws_evidently_segment.example arn:aws:evidently:us-west-2:123456789012:segment/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_cluster.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_cluster.html.markdown index fee38d1c1fe4..091355f6b8d2 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_cluster.html.markdown @@ -94,11 +94,12 @@ The following arguments are required: * RDB - Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the `savedownStorageConfiguration` parameter. * GATEWAY - A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. * GP - A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only `SINGLE` AZ mode. - * Tickerplant – A tickerplant cluster allows you to subscribe to feed handlers based on IAM permissions. It can publish to RDBs, other Tickerplants, and real-time subscribers (RTS). Tickerplants can persist messages to log, which is readable by any RDB environment. It supports only single-node that is only one kdb process. + * Tickerplant - A tickerplant cluster allows you to subscribe to feed handlers based on IAM permissions. It can publish to RDBs, other Tickerplants, and real-time subscribers (RTS). Tickerplants can persist messages to log, which is readable by any RDB environment. It supports only single-node that is only one kdb process. * `vpcConfiguration` - (Required) Configuration details about the network where the Privatelink endpoint of the cluster resides. See [vpc_configuration](#vpc_configuration). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoScalingConfiguration` - (Optional) Configuration based on which FinSpace will scale in or scale out nodes in your cluster. See [auto_scaling_configuration](#auto_scaling_configuration). * `availabilityZoneId` - (Optional) The availability zone identifiers for the requested regions. Required when `azMode` is set to SINGLE. * `cacheStorageConfigurations` - (Optional) Configurations for a read only cache storage associated with a cluster. This cache will be stored as an FSx Lustre that reads from the S3 store. See [cache_storage_configuration](#cache_storage_configuration). @@ -131,13 +132,13 @@ The capacity_configuration block supports the following arguments: * `nodeType` - (Required) Determines the hardware of the host computer used for your cluster instance. Each node type offers different memory and storage capabilities. Choose a node type based on the requirements of the application or software that you plan to run on your instance. You can only specify one of the following values: - * kx.s.large – The node type with a configuration of 12 GiB memory and 2 vCPUs. - * kx.s.xlarge – The node type with a configuration of 27 GiB memory and 4 vCPUs. - * kx.s.2xlarge – The node type with a configuration of 54 GiB memory and 8 vCPUs. - * kx.s.4xlarge – The node type with a configuration of 108 GiB memory and 16 vCPUs. - * kx.s.8xlarge – The node type with a configuration of 216 GiB memory and 32 vCPUs. - * kx.s.16xlarge – The node type with a configuration of 432 GiB memory and 64 vCPUs. - * kx.s.32xlarge – The node type with a configuration of 864 GiB memory and 128 vCPUs. + * kx.s.large - The node type with a configuration of 12 GiB memory and 2 vCPUs. + * kx.s.xlarge - The node type with a configuration of 27 GiB memory and 4 vCPUs. + * kx.s.2xlarge - The node type with a configuration of 54 GiB memory and 8 vCPUs. + * kx.s.4xlarge - The node type with a configuration of 108 GiB memory and 16 vCPUs. + * kx.s.8xlarge - The node type with a configuration of 216 GiB memory and 32 vCPUs. + * kx.s.16xlarge - The node type with a configuration of 432 GiB memory and 64 vCPUs. + * kx.s.32xlarge - The node type with a configuration of 864 GiB memory and 128 vCPUs. * `nodeCount` - (Required) Number of instances running in a cluster. Must be at least 1 and at most 5. ### cache_storage_configuration @@ -261,4 +262,4 @@ Using `terraform import`, import an AWS FinSpace Kx Cluster using the `id` (envi % terraform import aws_finspace_kx_cluster.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_database.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_database.html.markdown index 8f9bc9063841..1af859ca72f5 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_database.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_database.html.markdown @@ -69,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the KX database. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -122,4 +123,4 @@ Using `terraform import`, import an AWS FinSpace Kx Database using the `id` (env % terraform import aws_finspace_kx_database.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_dataview.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_dataview.html.markdown index c2b9f98df88a..827ecdae1381 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_dataview.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_dataview.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoUpdate` - (Optional) The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false. * `availabilityZoneId` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to. * `changesetId` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data. @@ -135,4 +136,4 @@ Using `terraform import`, import an AWS FinSpace Kx Cluster using the `id` (envi % terraform import aws_finspace_kx_dataview.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database,my-tf-kx-dataview ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_environment.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_environment.html.markdown index 99280d6c83f1..4d47e005043f 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_environment.html.markdown @@ -163,6 +163,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customDnsConfiguration` - (Optional) List of DNS server name and server IP. This is used to set up Route-53 outbound resolvers. Defined below. * `description` - (Optional) Description for the KX environment. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -263,4 +264,4 @@ Using `terraform import`, import an AWS FinSpace Kx Environment using the `id`. % terraform import aws_finspace_kx_environment.example n3ceo7wqxoxcti5tujqwzs ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_scaling_group.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_scaling_group.html.markdown index 68d26b3ee6d3..cd5fc808bc1f 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_scaling_group.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_scaling_group.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. ## Attribute Reference @@ -61,14 +62,14 @@ This resource exports the following attributes in addition to the arguments abov * `createdTimestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `lastModifiedTimestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. * `status` - The status of scaling group. - * `CREATING` – The scaling group creation is in progress. - * `CREATE_FAILED` – The scaling group creation has failed. - * `ACTIVE` – The scaling group is active. - * `UPDATING` – The scaling group is in the process of being updated. - * `UPDATE_FAILED` – The update action failed. - * `DELETING` – The scaling group is in the process of being deleted. - * `DELETE_FAILED` – The system failed to delete the scaling group. - * `DELETED` – The scaling group is successfully deleted. + * `CREATING` - The scaling group creation is in progress. + * `CREATE_FAILED` - The scaling group creation has failed. + * `ACTIVE` - The scaling group is active. + * `UPDATING` - The scaling group is in the process of being updated. + * `UPDATE_FAILED` - The update action failed. + * `DELETING` - The scaling group is in the process of being deleted. + * `DELETE_FAILED` - The system failed to delete the scaling group. + * `DELETED` - The scaling group is successfully deleted. * `statusReason` - The error message when a failed state occurs. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). @@ -112,4 +113,4 @@ Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` % terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_user.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_user.html.markdown index f250a1782a7d..f57a29f9b7c8 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_user.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_user.html.markdown @@ -87,6 +87,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -137,4 +138,4 @@ Using `terraform import`, import an AWS FinSpace Kx User using the `id` (environ % terraform import aws_finspace_kx_user.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-user ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/finspace_kx_volume.html.markdown b/website/docs/cdktf/typescript/r/finspace_kx_volume.html.markdown index ce68cee9d6fe..470be7e18ee0 100644 --- a/website/docs/cdktf/typescript/r/finspace_kx_volume.html.markdown +++ b/website/docs/cdktf/typescript/r/finspace_kx_volume.html.markdown @@ -29,7 +29,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new FinspaceKxVolume(this, "example", { - availabilityZones: Token.asList("use1-az2"), + availabilityZones: ["use1-az2"], azMode: "SINGLE", environmentId: Token.asString(awsFinspaceKxEnvironmentExample.id), name: "my-tf-kx-volume", @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `nas1Configuration` - (Optional) Specifies the configuration for the Network attached storage (`NAS_1`) file system volume. This parameter is required when `volumeType` is `NAS_1`. See [`nas1Configuration` Argument Reference](#nas1_configuration-argument-reference) below. * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume @@ -77,15 +78,15 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX volume. * `createdTimestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `status` - The status of volume creation. - * `CREATING` – The volume creation is in progress. - * `CREATE_FAILED` – The volume creation has failed. - * `ACTIVE` – The volume is active. - * `UPDATING` – The volume is in the process of being updated. - * `UPDATE_FAILED` – The update action failed. - * `UPDATED` – The volume is successfully updated. - * `DELETING` – The volume is in the process of being deleted. - * `DELETE_FAILED` – The system failed to delete the volume. - * `DELETED` – The volume is successfully deleted. + * `CREATING` - The volume creation is in progress. + * `CREATE_FAILED` - The volume creation has failed. + * `ACTIVE` - The volume is active. + * `UPDATING` - The volume is in the process of being updated. + * `UPDATE_FAILED` - The update action failed. + * `UPDATED` - The volume is successfully updated. + * `DELETING` - The volume is in the process of being deleted. + * `DELETE_FAILED` - The system failed to delete the volume. + * `DELETED` - The volume is successfully deleted. * `statusReason` - The error message when a failed state occurs. * `lastModifiedTimestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. @@ -129,4 +130,4 @@ Using `terraform import`, import an AWS FinSpace Kx Volume using the `id` (envir % terraform import aws_finspace_kx_volume.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fis_experiment_template.html.markdown b/website/docs/cdktf/typescript/r/fis_experiment_template.html.markdown index 3c30a74eccfa..9df359ee96aa 100644 --- a/website/docs/cdktf/typescript/r/fis_experiment_template.html.markdown +++ b/website/docs/cdktf/typescript/r/fis_experiment_template.html.markdown @@ -225,6 +225,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `experimentOptions` - (Optional) The experiment options for the experiment template. See [experiment_options](#experiment_options) below for more details! * `tags` - (Optional) Key-value mapping of tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `target` - (Optional) Target of an action. See below. @@ -365,4 +366,4 @@ Using `terraform import`, import FIS Experiment Templates using the `id`. For ex % terraform import aws_fis_experiment_template.template EXT123AbCdEfGhIjK ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/flow_log.html.markdown b/website/docs/cdktf/typescript/r/flow_log.html.markdown index 5cc699ffecda..2d411acac349 100644 --- a/website/docs/cdktf/typescript/r/flow_log.html.markdown +++ b/website/docs/cdktf/typescript/r/flow_log.html.markdown @@ -11,7 +11,7 @@ description: |- # Resource: aws_flow_log Provides a VPC/Subnet/ENI/Transit Gateway/Transit Gateway Attachment Flow Log to capture IP traffic for a specific network -interface, subnet, or VPC. Logs are sent to a CloudWatch Log Group, a S3 Bucket, or Amazon Kinesis Data Firehose +interface, subnet, or VPC. Logs are sent to a CloudWatch Log Group, a S3 Bucket, or Amazon Data Firehose ## Example Usage @@ -97,7 +97,7 @@ class MyConvertedCode extends TerraformStack { ``` -### Amazon Kinesis Data Firehose logging +### Amazon Data Firehose logging ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -265,26 +265,201 @@ class MyConvertedCode extends TerraformStack { ``` +### Cross-Account Amazon Data Firehose Logging + +The following example shows how to set up a flow log in one AWS account (source) that sends logs to an Amazon Data Firehose delivery stream in another AWS account (destination). +See the [AWS Documentation](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs-firehose.html). + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { FlowLog } from "./.gen/providers/aws/flow-log"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicy } from "./.gen/providers/aws/iam-role-policy"; +import { KinesisFirehoseDeliveryStream } from "./.gen/providers/aws/kinesis-firehose-delivery-stream"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { Vpc } from "./.gen/providers/aws/vpc"; +interface MyConfig { + destination: any; + name: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + new AwsProvider(this, "aws", { + profile: "admin-src", + }); + const destinationAccount = new AwsProvider(this, "aws_1", { + alias: "destination_account", + profile: "admin-dst", + }); + const dst = new KinesisFirehoseDeliveryStream(this, "dst", { + provider: destinationAccount, + tags: { + LogDeliveryEnabled: "true", + }, + destination: config.destination, + name: config.name, + }); + const src = new Vpc(this, "src", {}); + const dstRolePolicy = new DataAwsIamPolicyDocument( + this, + "dst_role_policy", + { + statement: [ + { + actions: [ + "iam:CreateServiceLinkedRole", + "firehose:TagDeliveryStream", + ], + effect: "Allow", + resources: ["*"], + }, + ], + } + ); + const srcAssumeRolePolicy = new DataAwsIamPolicyDocument( + this, + "src_assume_role_policy", + { + statement: [ + { + actions: ["sts:AssumeRole"], + effect: "Allow", + principals: [ + { + identifiers: ["delivery.logs.amazonaws.com"], + type: "Service", + }, + ], + }, + ], + } + ); + const awsIamRoleSrc = new IamRole(this, "src_6", { + assumeRolePolicy: Token.asString(srcAssumeRolePolicy.json), + name: "tf-example-mySourceRole", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRoleSrc.overrideLogicalId("src"); + const dstAssumeRolePolicy = new DataAwsIamPolicyDocument( + this, + "dst_assume_role_policy", + { + statement: [ + { + actions: ["sts:AssumeRole"], + effect: "Allow", + principals: [ + { + identifiers: [Token.asString(awsIamRoleSrc.arn)], + type: "AWS", + }, + ], + }, + ], + } + ); + const awsIamRoleDst = new IamRole(this, "dst_8", { + assumeRolePolicy: Token.asString(dstAssumeRolePolicy.json), + name: "AWSLogDeliveryFirehoseCrossAccountRole", + provider: destinationAccount, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRoleDst.overrideLogicalId("dst"); + const awsIamRolePolicyDst = new IamRolePolicy(this, "dst_9", { + name: "AWSLogDeliveryFirehoseCrossAccountRolePolicy", + policy: Token.asString(dstRolePolicy.json), + provider: destinationAccount, + role: Token.asString(awsIamRoleDst.name), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyDst.overrideLogicalId("dst"); + const srcRolePolicy = new DataAwsIamPolicyDocument( + this, + "src_role_policy", + { + statement: [ + { + actions: ["iam:PassRole"], + condition: [ + { + test: "StringEquals", + values: ["delivery.logs.amazonaws.com"], + variable: "iam:PassedToService", + }, + { + test: "StringLike", + values: [src.arn], + variable: "iam:AssociatedResourceARN", + }, + ], + effect: "Allow", + resources: [Token.asString(awsIamRoleSrc.arn)], + }, + { + actions: [ + "logs:CreateLogDelivery", + "logs:DeleteLogDelivery", + "logs:ListLogDeliveries", + "logs:GetLogDelivery", + ], + effect: "Allow", + resources: ["*"], + }, + { + actions: ["sts:AssumeRole"], + effect: "Allow", + resources: [Token.asString(awsIamRoleDst.arn)], + }, + ], + } + ); + const awsFlowLogSrc = new FlowLog(this, "src_11", { + deliverCrossAccountRole: Token.asString(awsIamRoleDst.arn), + iamRoleArn: Token.asString(awsIamRoleSrc.arn), + logDestination: dst.arn, + logDestinationType: "kinesis-data-firehose", + trafficType: "ALL", + vpcId: src.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsFlowLogSrc.overrideLogicalId("src"); + new IamRolePolicy(this, "src_policy", { + name: "tf-example-mySourceRolePolicy", + policy: Token.asString(srcRolePolicy.json), + role: Token.asString(awsIamRoleSrc.name), + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `trafficType` - (Required) The type of traffic to capture. Valid values: `ACCEPT`,`REJECT`, `ALL`. -* `deliverCrossAccountRole` - (Optional) ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. -* `eniId` - (Optional) Elastic Network Interface ID to attach to -* `iamRoleArn` - (Optional) The ARN for the IAM role that's used to post flow logs to a CloudWatch Logs log group -* `logDestinationType` - (Optional) The type of the logging destination. Valid values: `cloud-watch-logs`, `s3`, `kinesis-data-firehose`. Default: `cloud-watch-logs`. -* `logDestination` - (Optional) The ARN of the logging destination. Either `logDestination` or `logGroupName` must be set. -* `logGroupName` - (Optional) **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. -* `subnetId` - (Optional) Subnet ID to attach to -* `transitGatewayId` - (Optional) Transit Gateway ID to attach to -* `transitGatewayAttachmentId` - (Optional) Transit Gateway Attachment ID to attach to -* `vpcId` - (Optional) VPC ID to attach to +* `deliverCrossAccountRole` - (Optional) ARN of the IAM role in the destination account used for cross-account delivery of flow logs. +* `eniId` - (Optional) Elastic Network Interface ID to attach to. +* `iamRoleArn` - (Optional) ARN of the IAM role used to post flow logs. Corresponds to `DeliverLogsPermissionArn` in the [AWS API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFlowLogs.html). +* `logDestinationType` - (Optional) Logging destination type. Valid values: `cloud-watch-logs`, `s3`, `kinesis-data-firehose`. Default: `cloud-watch-logs`. +* `logDestination` - (Optional) ARN of the logging destination. +* `subnetId` - (Optional) Subnet ID to attach to. +* `transitGatewayId` - (Optional) Transit Gateway ID to attach to. +* `transitGatewayAttachmentId` - (Optional) Transit Gateway Attachment ID to attach to. +* `vpcId` - (Optional) VPC ID to attach to. * `logFormat` - (Optional) The fields to include in the flow log record. Accepted format example: `"$${interface-id} $${srcaddr} $${dstaddr} $${srcport} $${dstport}"`. -* `maxAggregationInterval` - (Optional) The maximum interval of time - during which a flow of packets is captured and aggregated into a flow - log record. Valid Values: `60` seconds (1 minute) or `600` seconds (10 - minutes). Default: `600`. When `transitGatewayId` or `transitGatewayAttachmentId` is specified, `maxAggregationInterval` *must* be 60 seconds (1 minute). +* `maxAggregationInterval` - (Optional) The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. + Valid Values: `60` seconds (1 minute) or `600` seconds (10 minutes). Default: `600`. + When `transitGatewayId` or `transitGatewayAttachmentId` is specified, `maxAggregationInterval` *must* be 60 seconds (1 minute). * `destinationOptions` - (Optional) Describes the destination options for a flow log. More details below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -294,7 +469,7 @@ This resource supports the following arguments: Describes the destination options for a flow log. -* `fileFormat` - (Optional) The format for the flow log. Default value: `plain-text`. Valid values: `plain-text`, `parquet`. +* `fileFormat` - (Optional) File format for the flow log. Default value: `plain-text`. Valid values: `plain-text`, `parquet`. * `hiveCompatiblePartitions` - (Optional) Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3. Default value: `false`. * `perHourPartition` - (Optional) Indicates whether to partition the flow log per hour. This reduces the cost and response time for queries. Default value: `false`. @@ -302,8 +477,8 @@ Describes the destination options for a flow log. This resource exports the following attributes in addition to the arguments above: -* `id` - The Flow Log ID -* `arn` - The ARN of the Flow Log. +* `id` - Flow Log ID. +* `arn` - ARN of the Flow Log. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -334,4 +509,4 @@ Using `terraform import`, import Flow Logs using the `id`. For example: % terraform import aws_flow_log.test_flow_log fl-1a2b3c4d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fms_policy.html.markdown b/website/docs/cdktf/typescript/r/fms_policy.html.markdown index 87f411836b60..cb1c4dfb52f6 100644 --- a/website/docs/cdktf/typescript/r/fms_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/fms_policy.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The friendly name of the AWS Firewall Manager Policy. * `deleteAllPolicyResources` - (Optional) If true, the request will also perform a clean-up process. Defaults to `true`. More information can be found here [AWS Firewall Manager delete policy](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_DeletePolicy.html) * `deleteUnusedFmManagedResources` - (Optional) If true, Firewall Manager will automatically remove protections from resources that leave the policy scope. Defaults to `false`. More information can be found here [AWS Firewall Manager policy contents](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html) @@ -81,6 +82,7 @@ This resource supports the following arguments: * `excludeResourceTags` - (Required, Forces new resource) A boolean value, if true the tags that are specified in the `resourceTags` are not protected by this policy. If set to false and resource_tags are populated, resources that contain tags will be protected by this policy. * `includeMap` - (Optional) A map of lists of accounts and OU's to include in the policy. See the [`includeMap`](#include_map-configuration-block) block. * `remediationEnabled` - (Required) A boolean value, indicates if the policy should automatically applied to resources that already exist in the account. +* `resourceTagLogicalOperator` - (Optional) Controls how multiple resource tags are combined: with AND, so that a resource must have all tags to be included or excluded, or OR, so that a resource must have at least one tag. The valid values are `AND` and `OR`. * `resourceTags` - (Optional) A map of resource tags, that if present will filter protections on resources based on the exclude_resource_tags. * `resourceType` - (Optional) A resource type to protect. Conflicts with `resourceTypeList`. See the [FMS API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html#fms-Type-Policy-ResourceType) for more information about supported values. * `resourceTypeList` - (Optional) A list of resource types to protect. Conflicts with `resourceType`. See the [FMS API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html#fms-Type-Policy-ResourceType) for more information about supported values. Lists with only one element are not supported, instead use `resourceType`. @@ -202,4 +204,4 @@ Using `terraform import`, import Firewall Manager policies using the policy ID. % terraform import aws_fms_policy.example 5be49585-a7e3-4c49-dde1-a179fe4a619a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown b/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown index 146721a1a68d..f4b544a9ebdd 100644 --- a/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown +++ b/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown @@ -43,8 +43,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceSet` - (Required) Details about the resource set to be created or updated. See [`resourceSet` Attribute Reference](#resource_set-attribute-reference) below. ### `resourceSet` Attribute Reference @@ -102,4 +103,4 @@ Using `terraform import`, import FMS (Firewall Manager) Resource Set using the ` % terraform import aws_fms_resource_set.example resource_set-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_backup.html.markdown b/website/docs/cdktf/typescript/r/fsx_backup.html.markdown index 8203228fc369..84f081afe0e0 100644 --- a/website/docs/cdktf/typescript/r/fsx_backup.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_backup.html.markdown @@ -144,12 +144,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -Note - Only file_system_id or volume_id can be specified. file_system_id is used for Lustre and Windows, volume_id is used for ONTAP. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fileSystemId` - (Optional) The ID of the file system to back up. Required if backing up Lustre or Windows file systems. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. If you have set `copyTagsToBackups` to true, and you specify one or more tags, no existing file system tags are copied from the file system to the backup. * `volumeId` - (Optional) The ID of the volume to back up. Required if backing up a ONTAP Volume. +Note - One of `fileSystemId` or `volumeId` can be specified. `fileSystemId` is used for Lustre and Windows, `volumeId` is used for ONTAP. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -196,4 +197,4 @@ Using `terraform import`, import FSx Backups using the `id`. For example: % terraform import aws_fsx_backup.example fs-543ab12b1ca672f33 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_data_repository_association.html.markdown b/website/docs/cdktf/typescript/r/fsx_data_repository_association.html.markdown index c84c9e8cc338..096a99b1239d 100644 --- a/website/docs/cdktf/typescript/r/fsx_data_repository_association.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_data_repository_association.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `batchImportMetaDataOnCreate` - (Optional) Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`. * `dataRepositoryPath` - (Required) The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system. * `fileSystemId` - (Required) The ID of the Amazon FSx file system to on which to create a data repository association. @@ -140,4 +141,4 @@ Using `terraform import`, import FSx Data Repository Associations using the `id` % terraform import aws_fsx_data_repository_association.example dra-0b1cfaeca11088b10 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_file_cache.html.markdown b/website/docs/cdktf/typescript/r/fsx_file_cache.html.markdown index 0c0504365155..4f7fb60ee910 100644 --- a/website/docs/cdktf/typescript/r/fsx_file_cache.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_file_cache.html.markdown @@ -74,6 +74,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `copyTagsToDataRepositoryAssociations` - A boolean flag indicating whether tags for the cache should be copied to data repository associations. This value defaults to false. * `dataRepositoryAssociation` - See the [`dataRepositoryAssociation` configuration](#data-repository-association-arguments) block. Max of 8. A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA configurations must meet the following requirements: 1) All configurations on the list must be of the same data repository type, either all S3 or all NFS. A cache can't link to different data repository types at the same time. 2) An NFS DRA must link to an NFS file system that supports the NFSv3 protocol. DRA automatic import and automatic export is not supported. @@ -161,4 +162,4 @@ Using `terraform import`, import Amazon File Cache cache using the resource `id` % terraform import aws_fsx_file_cache.example fc-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_lustre_file_system.html.markdown b/website/docs/cdktf/typescript/r/fsx_lustre_file_system.html.markdown index ab70bd9dbac8..220692cfdbda 100644 --- a/website/docs/cdktf/typescript/r/fsx_lustre_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_lustre_file_system.html.markdown @@ -40,12 +40,14 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnetIds` - (Required) A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoImportPolicy` - (Optional) How Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. see [Auto Import Data Repo](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) for more details. Only supported on `PERSISTENT_1` deployment types. * `automaticBackupRetentionDays` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. only valid for `PERSISTENT_1` and `PERSISTENT_2` deployment_type. * `backupId` - (Optional) The ID of the source backup to create the filesystem from. @@ -72,7 +74,7 @@ The following arguments are optional: **Note:** If the filesystem uses a Scratch deployment type, final backup during delete will always be skipped and this argument will not be used even when set. * `storageCapacity` - (Optional) The storage capacity (GiB) of the file system. Minimum of `1200`. See more details at [Allowed values for Fsx storage capacity](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileSystem.html#FSx-CreateFileSystem-request-StorageCapacity). Update is allowed only for `SCRATCH_2`, `PERSISTENT_1` and `PERSISTENT_2` deployment types, See more details at [Fsx Storage Capacity Update](https://docs.aws.amazon.com/fsx/latest/APIReference/API_UpdateFileSystem.html#FSx-UpdateFileSystem-request-StorageCapacity). Required when not creating filesystem for a backup. -* `storageType` - (Optional) - The filesystem storage type. One of `SSD`, `HDD` or `INTELLIGENT_TIERING`, defaults to `SSD`. `HDD` is only supported on `PERSISTENT_1` deployment types. `INTELLIGENT_TIERING` requires `data_read_cache_configuration` and `metadataConfiguration` to be set and is only supported for `PERSISTENT_2` deployment types. +* `storageType` - (Optional) - The filesystem storage type. One of `SSD`, `HDD` or `INTELLIGENT_TIERING`, defaults to `SSD`. `HDD` is only supported on `PERSISTENT_1` deployment types. `INTELLIGENT_TIERING` requires `dataReadCacheConfiguration` and `metadataConfiguration` to be set and is only supported for `PERSISTENT_2` deployment types. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughputCapacity` - (Optional) Throughput in MBps required for the `INTELLIGENT_TIERING` storage type. Must be 4000 or multiples of 4000. * `weeklyMaintenanceStartTime` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. @@ -100,12 +102,12 @@ The `rootSquashConfiguration` configuration block supports the following argumen * `noSquashNids` - (Optional) When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses: 1. A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, 10.0.1.6@tcp). 2. An address range is described using a dash to separate the range (for example, 10.0.[2-10].[1-255]@tcp). * `rootSquash` - (Optional) You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format UID:GID (for example, 365534:65534). The UID and GID values can range from 0 to 4294967294. -### `data_read_cache_configuration` Block +### `dataReadCacheConfiguration` Block -The `data_read_cache_configuration` configuration block supports the following arguments: +The `dataReadCacheConfiguration` configuration block supports the following arguments: -* `size` - (Optional) Size of the file system's SSD read cache, in gibibytes (GiB). Required when the `sizing_mode` is `USER_PROVISIONED`. -* `sizing_mode` - (Required) Sizing mode for the cache. Valud values are `NO_CACHE`, `USER_PROVISIONED`, and `PROPORTIONAL_TO_THROUGHPUT_CAPACITY`. +* `size` - (Optional) Size of the file system's SSD read cache, in gibibytes (GiB). Required when the `sizingMode` is `USER_PROVISIONED`. +* `sizingMode` - (Required) Sizing mode for the cache. Valud values are `NO_CACHE`, `USER_PROVISIONED`, and `PROPORTIONAL_TO_THROUGHPUT_CAPACITY`. ## Attribute Reference @@ -189,4 +191,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown index af84f72912c2..23e7d1bfd7f9 100644 --- a/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown @@ -118,6 +118,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `storageCapacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values are between `1024` and `524288` for `MULTI_AZ_2`. Valid values between `1024` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. For `SINGLE_AZ_2`, the `1048576` (1PB) maximum is only supported when using 2 or more ha_pairs, the maximum is `524288` (512TB) when using 1 ha_pair. * `subnetIds` - (Required) A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. * `preferredSubnetId` - (Required) The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). @@ -242,4 +243,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_ontap_storage_virtual_machine.html.markdown b/website/docs/cdktf/typescript/r/fsx_ontap_storage_virtual_machine.html.markdown index c3afbd452a75..efc1acbaf630 100644 --- a/website/docs/cdktf/typescript/r/fsx_ontap_storage_virtual_machine.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_ontap_storage_virtual_machine.html.markdown @@ -76,6 +76,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `activeDirectoryConfiguration` - (Optional) Configuration block that Amazon FSx uses to join the FSx ONTAP Storage Virtual Machine(SVM) to your Microsoft Active Directory (AD) directory. Detailed below. * `fileSystemId` - (Required) The ID of the Amazon FSx ONTAP File System that this SVM will be created on. * `name` - (Required) The name of the SVM. You can use a maximum of 47 alphanumeric characters, plus the underscore (_) special character. @@ -195,4 +196,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_ontap_volume.html.markdown b/website/docs/cdktf/typescript/r/fsx_ontap_volume.html.markdown index 8712401bd85f..598732d77c45 100644 --- a/website/docs/cdktf/typescript/r/fsx_ontap_volume.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_ontap_volume.html.markdown @@ -86,6 +86,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `aggregateConfiguration` - (Optional) The Aggregate configuration only applies to `FLEXGROUP` volumes. See [`aggregateConfiguration` Block] for details. * `bypassSnaplockEnterpriseRetention` - (Optional) Setting this to `true` allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `copyTagsToBackups` - (Optional) A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to `false`. @@ -217,4 +218,4 @@ Using `terraform import`, import FSx ONTAP volume using the `id`. For example: % terraform import aws_fsx_ontap_volume.example fsvol-12345678abcdef123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_openzfs_file_system.html.markdown b/website/docs/cdktf/typescript/r/fsx_openzfs_file_system.html.markdown index 340e24f520d2..fae4cda72ad5 100644 --- a/website/docs/cdktf/typescript/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_openzfs_file_system.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `automaticBackupRetentionDays` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `backupId` - (Optional) The ID of the source backup to create the filesystem from. * `copyTagsToBackups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. @@ -65,6 +66,7 @@ The following arguments are optional: * `securityGroupIds` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `skipFinalBackup` - (Optional) When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `storageType` - (Optional) The filesystem storage type. Only `SSD` is supported. +* `userAndGroupQuotas` - (Optional) - Specify how much storage users or groups can use on the filesystem. Maximum number of items defined by [FSx for OpenZFS Resource quota](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/limits.html#limits-openzfs-resources-file-system). See [`userAndGroupQuotas` Block](#user_and_group_quotas-block) Below. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weeklyMaintenanceStartTime` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. @@ -194,4 +196,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_openzfs_snapshot.html.markdown b/website/docs/cdktf/typescript/r/fsx_openzfs_snapshot.html.markdown index 6290d1680847..8459c68a994e 100644 --- a/website/docs/cdktf/typescript/r/fsx_openzfs_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_openzfs_snapshot.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Snapshot. You can use a maximum of 203 alphanumeric characters plus either _ or - or : or . for the name. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. If you have set `copyTagsToBackups` to true, and you specify one or more tags, no existing file system tags are copied from the file system to the backup. * `volumeId` - (Optional) The ID of the volume to snapshot. This can be the root volume or a child volume. @@ -150,4 +151,4 @@ Using `terraform import`, import FSx OpenZFS snapshot using the `id`. For exampl % terraform import aws_fsx_openzfs_snapshot.example fs-543ab12b1ca672f33 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_openzfs_volume.html.markdown b/website/docs/cdktf/typescript/r/fsx_openzfs_volume.html.markdown index ba3587540cc8..eb6f3a0b3869 100644 --- a/website/docs/cdktf/typescript/r/fsx_openzfs_volume.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_openzfs_volume.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. * `parentVolumeId` - (Required) The volume id of volume that will be the parent volume for the volume being created, this could be the root volume created from the `aws_fsx_openzfs_file_system` resource with the `rootVolumeId` or the `id` property of another `aws_fsx_openzfs_volume`. * `copyTagsToSnapshots` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. @@ -51,7 +52,7 @@ This resource supports the following arguments: * `originSnapshot` - (Optional) Specifies the configuration to use when creating the OpenZFS volume. See [`originSnapshot` Block](#origin_snapshot-block) below for details. * `storageCapacityQuotaGib` - (Optional) The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. * `storageCapacityReservationGib` - (Optional) The amount of storage in gibibytes (GiB) to reserve from the parent volume. -* `userAndGroupQuotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [`userAndGroupQuotas` Block](#user_and_group_quotas-block) Below. +* `userAndGroupQuotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum number of items defined by [FSx for OpenZFS Resource quota](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/limits.html#limits-openzfs-resources-file-system). See [`userAndGroupQuotas` Block](#user_and_group_quotas-block) Below. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `nfsExports` Block @@ -130,4 +131,4 @@ Using `terraform import`, import FSx Volumes using the `id`. For example: % terraform import aws_fsx_openzfs_volume.example fsvol-543ab12b1ca672f33 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_s3_access_point_attachment.html.markdown b/website/docs/cdktf/typescript/r/fsx_s3_access_point_attachment.html.markdown new file mode 100644 index 000000000000..73e6fd6aa467 --- /dev/null +++ b/website/docs/cdktf/typescript/r/fsx_s3_access_point_attachment.html.markdown @@ -0,0 +1,148 @@ +--- +subcategory: "FSx" +layout: "aws" +page_title: "AWS: aws_fsx_s3_access_point_attachment" +description: |- + Manages an Amazon FSx S3 Access Point attachment. +--- + + + +# Resource: aws_fsx_s3_access_point_attachment + +Manages an Amazon FSx S3 Access Point attachment. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { FsxS3AccessPointAttachment } from "./.gen/providers/aws/fsx-s3-access-point-attachment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new FsxS3AccessPointAttachment(this, "example", { + name: "example-attachment", + openzfsConfiguration: [ + { + fileSystemIdentity: [ + { + posixUser: [ + { + gid: 1001, + uid: 1001, + }, + ], + type: "POSIX", + }, + ], + volumeId: Token.asString(awsFsxOpenzfsVolumeExample.id), + }, + ], + type: "OPENZFS", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the S3 access point. +* `openzfsConfiguration` - (Required) Configuration to use when creating and attaching an S3 access point to an FSx for OpenZFS volume. See [`openzfsConfiguration` Block](#openzfs_configuration-block) for details. +* `type` - (Required) Type of S3 access point. Valid values: `OpenZFS`. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `s3AccessPoint` - (Optional) S3 access point configuration. See [`s3AccessPoint` Block](#s3_access_point-block) for details. + +### `openzfsConfiguration` Block + +The `openzfsConfiguration` configuration block supports the following arguments: + +* `fileSystemIdentity` - (Required) File system user identity to use for authorizing file read and write requests that are made using the S3 access point. See [`fileSystemIdentity` Block](#file_system_identity-block) for details. +* `volumeId` - (Required) ID of the FSx for OpenZFS volume to which the S3 access point is attached. + +### `fileSystemIdentity` Block + +The `fileSystemIdentity` configuration block supports the following arguments: + +* `posixUser` - (Required) UID and GIDs of the file system POSIX user. See [`posixUser` Block](#posix_user-block) for details. +* `type` - (Required) FSx for OpenZFS user identity type. Valid values: `POSIX`. + +### `posixUser` Block + +The `posixUser` configuration block supports the following arguments: + +* `gid` - (Required) GID of the file system user. +* `secondaryGids` - (Optional) List of secondary GIDs for the file system user.. +* `uid` - (Required) UID of the file system user. + +### `s3AccessPoint` Block + +The `s3AccessPoint` configuration block supports the following arguments: + +* `policy` - (Required) Access policy associated with the S3 access point configuration. +* `vpcConfiguration` - (Optional) Amazon S3 restricts access to the S3 access point to requests made from the specified VPC. See [`vpcConfiguration` Block](#vpc_configuration-block) for details. + +### `vpcConfiguration` Block + +The `vpcConfiguration` configuration block supports the following arguments: + +* `vpcId` - (Required) VPC ID. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `s3AccessPointAlias` - S3 access point's alias. +* `s3AccessPointArn` - S3 access point's ARN. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) +* `delete` - (Default `15m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import FSx S3 Access Point attachments using the `name`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { FsxS3AccessPointAttachment } from "./.gen/providers/aws/fsx-s3-access-point-attachment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + FsxS3AccessPointAttachment.generateConfigForImport( + this, + "example", + "example-attachment" + ); + } +} + +``` + +Using `terraform import`, import FSx S3 Access Point attachments using the `name`. For example: + +```console +% terraform import aws_fsx_s3_access_point_attachment.example example-attachment +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_windows_file_system.html.markdown b/website/docs/cdktf/typescript/r/fsx_windows_file_system.html.markdown index a25f31e75a2d..340a6c4d19e2 100644 --- a/website/docs/cdktf/typescript/r/fsx_windows_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_windows_file_system.html.markdown @@ -86,6 +86,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `activeDirectoryId` - (Optional) The ID for an existing Microsoft Active Directory instance that the file system should join when it's created. Cannot be specified with `selfManagedActiveDirectory`. * `aliases` - (Optional) An array DNS alias names that you want to associate with the Amazon FSx file system. For more information, see [Working with DNS Aliases](https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) * `auditLogConfiguration` - (Optional) The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. See [`auditLogConfiguration` Block](#audit_log_configuration-block) for details. @@ -217,4 +218,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/gamelift_alias.html.markdown b/website/docs/cdktf/typescript/r/gamelift_alias.html.markdown index 16367cb2b32c..57f358042eb8 100644 --- a/website/docs/cdktf/typescript/r/gamelift_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/gamelift_alias.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the alias. * `description` - (Optional) Description of the alias. * `routingStrategy` - (Required) Specifies the fleet and/or routing type to use for the alias. @@ -92,4 +93,4 @@ Using `terraform import`, import GameLift Aliases using the ID. For example: % terraform import aws_gamelift_alias.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/gamelift_build.html.markdown b/website/docs/cdktf/typescript/r/gamelift_build.html.markdown index 8fb9fd30090a..91d9b8d98cfe 100644 --- a/website/docs/cdktf/typescript/r/gamelift_build.html.markdown +++ b/website/docs/cdktf/typescript/r/gamelift_build.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the build * `operatingSystem` - (Required) Operating system that the game server binaries are built to run on. Valid values: `WINDOWS_2012`, `AMAZON_LINUX`, `AMAZON_LINUX_2`, `WINDOWS_2016`, `AMAZON_LINUX_2023`. * `storageLocation` - (Required) Information indicating where your game build files are stored. See below. @@ -95,4 +96,4 @@ Using `terraform import`, import GameLift Builds using the ID. For example: % terraform import aws_gamelift_build.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/gamelift_fleet.html.markdown b/website/docs/cdktf/typescript/r/gamelift_fleet.html.markdown index b9a6981933db..2c28319de85a 100644 --- a/website/docs/cdktf/typescript/r/gamelift_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/gamelift_fleet.html.markdown @@ -34,7 +34,8 @@ resource "aws_gamelift_fleet" "example" { This resource supports the following arguments: -* `buildId` - (Optional) ID of the GameLift Build to be deployed on the fleet. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `buildId` - (Optional) ID of the GameLift Build to be deployed on the fleet. Conflicts with `scriptId`. * `certificateConfiguration` - (Optional) Prompts GameLift to generate a TLS/SSL certificate for the fleet. See [certificate_configuration](#certificate_configuration). * `description` - (Optional) Human-readable description of the fleet. * `ec2InboundPermission` - (Optional) Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. See below. @@ -46,7 +47,7 @@ This resource supports the following arguments: * `newGameSessionProtectionPolicy` - (Optional) Game session protection policy to apply to all instances in this fleetE.g., `FullProtection`. Defaults to `NoProtection`. * `resourceCreationLimitPolicy` - (Optional) Policy that limits the number of game sessions an individual player can create over a span of time for this fleet. See below. * `runtimeConfiguration` - (Optional) Instructions for launching server processes on each instance in the fleet. See below. -* `scriptId` - (Optional) ID of the GameLift Script to be deployed on the fleet. +* `scriptId` - (Optional) ID of the GameLift Script to be deployed on the fleet. Conflicts with `buildId`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Nested Fields @@ -125,4 +126,4 @@ Using `terraform import`, import GameLift Fleets using the ID. For example: % terraform import aws_gamelift_fleet.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/gamelift_game_server_group.html.markdown b/website/docs/cdktf/typescript/r/gamelift_game_server_group.html.markdown index c8bd9be84174..5e71c668bda9 100644 --- a/website/docs/cdktf/typescript/r/gamelift_game_server_group.html.markdown +++ b/website/docs/cdktf/typescript/r/gamelift_game_server_group.html.markdown @@ -162,6 +162,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `balancingStrategy` - (Optional) Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances. Valid values: `SPOT_ONLY`, `SPOT_PREFERRED`, `ON_DEMAND_ONLY`. Defaults to `SPOT_PREFERRED`. * `gameServerGroupName` - (Required) Name of the game server group. @@ -263,4 +264,4 @@ Using `terraform import`, import GameLift Game Server Group using the `name`. Fo % terraform import aws_gamelift_game_server_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/gamelift_game_session_queue.html.markdown b/website/docs/cdktf/typescript/r/gamelift_game_session_queue.html.markdown index 33bcd27a8abc..70b4e45ce692 100644 --- a/website/docs/cdktf/typescript/r/gamelift_game_session_queue.html.markdown +++ b/website/docs/cdktf/typescript/r/gamelift_game_session_queue.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the session queue. * `timeoutInSeconds` - (Required) Maximum time a game session request can remain in the queue. * `customEventData` - (Optional) Information to be added to all events that are related to this game session queue. @@ -104,4 +105,4 @@ Using `terraform import`, import GameLift Game Session Queues using their `name` % terraform import aws_gamelift_game_session_queue.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/gamelift_script.html.markdown b/website/docs/cdktf/typescript/r/gamelift_script.html.markdown index f638e68e70e4..dc3a690310f3 100644 --- a/website/docs/cdktf/typescript/r/gamelift_script.html.markdown +++ b/website/docs/cdktf/typescript/r/gamelift_script.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the script * `storageLocation` - (Optional) Information indicating where your game script files are stored. See below. * `version` - (Optional) Version that is associated with this script. @@ -94,4 +95,4 @@ Using `terraform import`, import GameLift Scripts using the ID. For example: % terraform import aws_gamelift_script.example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glacier_vault.html.markdown b/website/docs/cdktf/typescript/r/glacier_vault.html.markdown index 66b940be5dc0..88c6707dde68 100644 --- a/website/docs/cdktf/typescript/r/glacier_vault.html.markdown +++ b/website/docs/cdktf/typescript/r/glacier_vault.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Vault. Names can be between 1 and 255 characters long and the valid characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period). * `accessPolicy` - (Optional) The policy document. This is a JSON formatted string. The heredoc syntax or `file` function is helpful here. Use the [Glacier Developer Guide](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html) for more information on Glacier Vault Policy @@ -120,4 +121,4 @@ Using `terraform import`, import Glacier Vaults using the `name`. For example: % terraform import aws_glacier_vault.archive my_archive ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glacier_vault_lock.html.markdown b/website/docs/cdktf/typescript/r/glacier_vault_lock.html.markdown index 45a6b76c7c86..cfaa65dedddb 100644 --- a/website/docs/cdktf/typescript/r/glacier_vault_lock.html.markdown +++ b/website/docs/cdktf/typescript/r/glacier_vault_lock.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `completeLock` - (Required) Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the Terraform resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time. * `policy` - (Required) JSON string containing the IAM policy to apply as the Glacier Vault Lock policy. * `vaultName` - (Required) The name of the Glacier Vault. @@ -140,4 +141,4 @@ Using `terraform import`, import Glacier Vault Locks using the Glacier Vault nam % terraform import aws_glacier_vault_lock.example example-vault ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_accelerator.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_accelerator.html.markdown index bbca35fe44b5..ef3b6c3e5ab2 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_accelerator.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_accelerator.html.markdown @@ -89,6 +89,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_accelerator.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_accelerator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator accelerator. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator accelerators using the `arn`. For example: ```typescript @@ -119,4 +140,4 @@ Using `terraform import`, import Global Accelerator accelerators using the `arn` % terraform import aws_globalaccelerator_accelerator.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown index ebadc0919943..114e15933415 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown @@ -101,7 +101,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator Cross Account Attachment using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_cross_account_attachment.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:attachment/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_cross_account_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator cross-account attachment. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator Cross Account Attachment using the `arn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -125,10 +146,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Global Accelerator Cross Account Attachment using the `example_id_arg`. For example: +Using `terraform import`, import Global Accelerator Cross Account Attachment using the `arn`. For example: ```console % terraform import aws_globalaccelerator_cross_account_attachment.example arn:aws:globalaccelerator::012345678910:attachment/01234567-abcd-8910-efgh-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_accelerator.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_accelerator.html.markdown index 18b9b6f3202c..14089b3d3fc4 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_accelerator.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_accelerator.html.markdown @@ -88,6 +88,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_accelerator.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_custom_routing_accelerator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing accelerator. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing accelerators using the `arn`. For example: ```typescript @@ -118,4 +139,4 @@ Using `terraform import`, import Global Accelerator custom routing accelerators % terraform import aws_globalaccelerator_custom_routing_accelerator.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_endpoint_group.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_endpoint_group.html.markdown index 767f72fbfdbc..6db97be274cb 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_endpoint_group.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_endpoint_group.html.markdown @@ -84,6 +84,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_endpoint_group.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz/endpoint-group/098765zyxwvu" + } +} + +resource "aws_globalaccelerator_custom_routing_endpoint_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing endpoint group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing endpoint groups using the `id`. For example: ```typescript @@ -114,4 +135,4 @@ Using `terraform import`, import Global Accelerator custom routing endpoint grou % terraform import aws_globalaccelerator_custom_routing_endpoint_group.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxx/endpoint-group/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_listener.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_listener.html.markdown index 87ff8303d7e9..f13a349d4cdf 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_listener.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_custom_routing_listener.html.markdown @@ -88,6 +88,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_listener.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz" + } +} + +resource "aws_globalaccelerator_custom_routing_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing listeners using the `id`. For example: ```typescript @@ -118,4 +139,4 @@ Using `terraform import`, import Global Accelerator custom routing listeners usi % terraform import aws_globalaccelerator_custom_routing_listener.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_endpoint_group.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_endpoint_group.html.markdown index c88df045292a..653226fc3dc8 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_endpoint_group.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_endpoint_group.html.markdown @@ -86,6 +86,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_endpoint_group.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz/endpoint-group/098765zyxwvu" + } +} + +resource "aws_globalaccelerator_endpoint_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator endpoint group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator endpoint groups using the `id`. For example: ```typescript @@ -116,4 +137,4 @@ Using `terraform import`, import Global Accelerator endpoint groups using the `i % terraform import aws_globalaccelerator_endpoint_group.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxx/endpoint-group/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_listener.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_listener.html.markdown index 0d124316c7b6..a2603bf6bb85 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_listener.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_listener.html.markdown @@ -90,6 +90,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_listener.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz" + } +} + +resource "aws_globalaccelerator_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator listeners using the `id`. For example: ```typescript @@ -120,4 +141,4 @@ Using `terraform import`, import Global Accelerator listeners using the `id`. Fo % terraform import aws_globalaccelerator_listener.example arn:aws:globalaccelerator::111111111111:accelerator/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/listener/xxxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_catalog_database.html.markdown b/website/docs/cdktf/typescript/r/glue_catalog_database.html.markdown index 00888fca91dc..99e6148c9033 100644 --- a/website/docs/cdktf/typescript/r/glue_catalog_database.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_catalog_database.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) ID of the Glue Catalog to create the database in. If omitted, this defaults to the AWS Account ID. * `createTableDefaultPermission` - (Optional) Creates a set of default permissions on the table for principals. See [`createTableDefaultPermission`](#create_table_default_permission) below. * `description` - (Optional) Description of the database. @@ -138,4 +139,4 @@ Using `terraform import`, import Glue Catalog Databases using the `catalog_id:na % terraform import aws_glue_catalog_database.database 123456789012:my_database ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown b/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown index 472e2c5bf64d..2a013c945053 100644 --- a/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown @@ -114,6 +114,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name. * `description` - (Optional) Description of the table. * `owner` - (Optional) Owner of the table. @@ -156,6 +157,7 @@ To add an index to an existing table, see the [`glue_partition_index` resource]( * `comment` - (Optional) Free-form text comment. * `name` - (Required) Name of the Partition Key. +* `parameters` - (Optional) Map of key-value pairs. * `type` - (Optional) Datatype of data in the Partition Key. ### storage_descriptor @@ -257,4 +259,4 @@ Using `terraform import`, import Glue Tables using the catalog ID (usually AWS a % terraform import aws_glue_catalog_table.MyTable 123456789012:MyDatabase:MyTable ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_catalog_table_optimizer.html.markdown b/website/docs/cdktf/typescript/r/glue_catalog_table_optimizer.html.markdown index ff4cee599498..88559db1cd0a 100644 --- a/website/docs/cdktf/typescript/r/glue_catalog_table_optimizer.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_catalog_table_optimizer.html.markdown @@ -64,13 +64,13 @@ class MyConvertedCode extends TerraformStack { configuration: [ { enabled: true, - retention_configuration: [ + retentionConfiguration: [ { - iceberg_configuration: [ + icebergConfiguration: [ { - clean_expired_files: true, - number_of_snapshots_to_retain: 3, - snapshot_retention_period_in_days: 7, + cleanExpiredFiles: true, + numberOfSnapshotsToRetain: 3, + snapshotRetentionPeriodInDays: 7, }, ], }, @@ -106,12 +106,12 @@ class MyConvertedCode extends TerraformStack { configuration: [ { enabled: true, - orphan_file_deletion_configuration: [ + orphanFileDeletionConfiguration: [ { - iceberg_configuration: [ + icebergConfiguration: [ { location: "s3://example-bucket/example_table/", - orphan_file_retention_period_in_days: 7, + orphanFileRetentionPeriodInDays: 7, }, ], }, @@ -130,8 +130,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Required) The Catalog ID of the table. * `configuration` - (Required) A configuration block that defines the table optimizer settings. See [Configuration](#configuration) for additional details. * `databaseName` - (Required) The name of the database in the catalog in which the table resides. @@ -141,22 +142,24 @@ The following arguments are required: ### Configuration * `enabled` - (Required) Indicates whether the table optimizer is enabled. -* `orphan_file_deletion_configuration` (Optional) - The configuration block for an orphan file deletion optimizer. See [Orphan File Deletion Configuration](#orphan-file-deletion-configuration) for additional details. -* `retention_configuration` (Optional) - The configuration block for a snapshot retention optimizer. See [Retention Configuration](#retention-configuration) for additional details. +* `orphanFileDeletionConfiguration` (Optional) - The configuration block for an orphan file deletion optimizer. See [Orphan File Deletion Configuration](#orphan-file-deletion-configuration) for additional details. +* `retentionConfiguration` (Optional) - The configuration block for a snapshot retention optimizer. See [Retention Configuration](#retention-configuration) for additional details. * `roleArn` - (Required) The ARN of the IAM role to use for the table optimizer. ### Orphan File Deletion Configuration * `icebergConfiguration` (Optional) - The configuration for an Iceberg orphan file deletion optimizer. - * `orphan_file_retention_period_in_days` (Optional) - The number of days that orphan files should be retained before file deletion. Defaults to `3`. * `location` (Optional) - Specifies a directory in which to look for files. You may choose a sub-directory rather than the top-level table location. Defaults to the table's location. - + * `orphanFileRetentionPeriodInDays` (Optional) - The number of days that orphan files should be retained before file deletion. Defaults to `3`. + * `runRateInHours` (Optional) - interval in hours between orphan file deletion job runs. Defaults to `24`. + ### Retention Configuration * `icebergConfiguration` (Optional) - The configuration for an Iceberg snapshot retention optimizer. - * `snapshot_retention_period_in_days` (Optional) - The number of days to retain the Iceberg snapshots. Defaults to `5`, or the corresponding Iceberg table configuration field if it exists. - * `number_of_snapshots_to_retain` (Optional) - The number of Iceberg snapshots to retain within the retention period. Defaults to `1` or the corresponding Iceberg table configuration field if it exists. - * `clean_expired_files` (Optional) - If set to `false`, snapshots are only deleted from table metadata, and the underlying data and metadata files are not deleted. Defaults to `false`. + * `cleanExpiredFiles` (Optional) - If set to `false`, snapshots are only deleted from table metadata, and the underlying data and metadata files are not deleted. Defaults to `false`. + * `numberOfSnapshotsToRetain` (Optional) - The number of Iceberg snapshots to retain within the retention period. Defaults to `1` or the corresponding Iceberg table configuration field if it exists. + * `runRateInHours` (Optional) - Interval in hours between retention job runs. Defaults to `24`. + * `snapshotRetentionPeriodInDays` (Optional) - The number of days to retain the Iceberg snapshots. Defaults to `5`, or the corresponding Iceberg table configuration field if it exists. ## Attribute Reference @@ -194,4 +197,4 @@ Using `terraform import`, import Glue Catalog Table Optimizer using the `catalog % terraform import aws_glue_catalog_table_optimizer.example 123456789012,example_database,example_table,compaction ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_classifier.html.markdown b/website/docs/cdktf/typescript/r/glue_classifier.html.markdown index 9e1a38c186cc..31bbe06b0a16 100644 --- a/website/docs/cdktf/typescript/r/glue_classifier.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_classifier.html.markdown @@ -127,11 +127,12 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `csvClassifier` - (Optional) A classifier for CSV content. Defined below. -* `grokClassifier` – (Optional) A classifier that uses grok patterns. Defined below. -* `jsonClassifier` – (Optional) A classifier for JSON content. Defined below. -* `name` – (Required) The name of the classifier. -* `xmlClassifier` – (Optional) A classifier for XML content. Defined below. +* `grokClassifier` - (Optional) A classifier that uses grok patterns. Defined below. +* `jsonClassifier` - (Optional) A classifier for JSON content. Defined below. +* `name` - (Required) The name of the classifier. +* `xmlClassifier` - (Optional) A classifier for XML content. Defined below. ### csv_classifier @@ -143,7 +144,7 @@ This resource supports the following arguments: * `disableValueTrimming` - (Optional) Specifies whether to trim column values. * `header` - (Optional) A list of strings representing column names. * `quoteSymbol` - (Optional) A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter. -* `serde` – (Optional) The SerDe for processing CSV. Valid values are `OpenCSVSerDe`, `LazySimpleSerDe`, `None`. +* `serde` - (Optional) The SerDe for processing CSV. Valid values are `OpenCSVSerDe`, `LazySimpleSerDe`, `None`. ### grok_classifier @@ -198,4 +199,4 @@ Using `terraform import`, import Glue Classifiers using their name. For example: % terraform import aws_glue_classifier.MyClassifier MyClassifier ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_connection.html.markdown b/website/docs/cdktf/typescript/r/glue_connection.html.markdown index 7e19c514ae44..010c6a5e38be 100644 --- a/website/docs/cdktf/typescript/r/glue_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_connection.html.markdown @@ -456,20 +456,22 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `name` – (Required) Name of the connection. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) Name of the connection. The following arguments are optional: -* `catalogId` – (Optional) ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. -* `athenaProperties` – (Optional) Map of key-value pairs used as connection properties specific to the Athena compute environment. -* `connectionProperties` – (Optional) Map of key-value pairs used as parameters for this connection. For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/connection-properties.html). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. +* `athenaProperties` - (Optional) Map of key-value pairs used as connection properties specific to the Athena compute environment. +* `connectionProperties` - (Optional) Map of key-value pairs used as parameters for this connection. For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/connection-properties.html). **Note:** Some connection types require the `SparkProperties` property with a JSON document that contains the actual connection properties. For specific examples, refer to [Example Usage](#example-usage). -* `connectionType` – (Optional) Type of the connection. Valid values: `AZURECOSMOS`, `AZURESQL`, `BIGQUERY`, `CUSTOM`, `DYNAMODB`, `JDBC`, `KAFKA`, `MARKETPLACE`, `MONGODB`, `NETWORK`, `OPENSEARCH`, `SNOWFLAKE`. Defaults to `JDBC`. -* `description` – (Optional) Description of the connection. -* `matchCriteria` – (Optional) List of criteria that can be used in selecting this connection. +* `connectionType` - (Optional) Type of the connection. Valid values: `AZURECOSMOS`, `AZURESQL`, `BIGQUERY`, `CUSTOM`, `DYNAMODB`, `JDBC`, `KAFKA`, `MARKETPLACE`, `MONGODB`, `NETWORK`, `OPENSEARCH`, `SNOWFLAKE`. Defaults to `JDBC`. +* `description` - (Optional) Description of the connection. +* `matchCriteria` - (Optional) List of criteria that can be used in selecting this connection. * `physicalConnectionRequirements` - (Optional) Map of physical connection requirements, such as VPC and SecurityGroup. See [`physicalConnectionRequirements` Block](#physical_connection_requirements-block) for details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -521,4 +523,4 @@ Using `terraform import`, import Glue Connections using the `CATALOG-ID` (AWS ac % terraform import aws_glue_connection.MyConnection 123456789012:MyConnection ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_crawler.html.markdown b/website/docs/cdktf/typescript/r/glue_crawler.html.markdown index b74a57731dd2..daad08b0e059 100644 --- a/website/docs/cdktf/typescript/r/glue_crawler.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_crawler.html.markdown @@ -215,6 +215,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `databaseName` (Required) Glue database where results are written. * `name` (Required) Name of the crawler. * `role` (Required) The IAM role friendly name (including path without leading slash), or ARN of an IAM role, used by the crawler to access other resources. @@ -355,4 +356,4 @@ Using `terraform import`, import Glue Crawlers using `name`. For example: % terraform import aws_glue_crawler.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_data_catalog_encryption_settings.html.markdown b/website/docs/cdktf/typescript/r/glue_data_catalog_encryption_settings.html.markdown index e4b9def504bf..25cc783afda9 100644 --- a/website/docs/cdktf/typescript/r/glue_data_catalog_encryption_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_data_catalog_encryption_settings.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `dataCatalogEncryptionSettings` – (Required) The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). -* `catalogId` – (Optional) The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `dataCatalogEncryptionSettings` - (Required) The security configuration to set. see [Data Catalog Encryption Settings](#data_catalog_encryption_settings). +* `catalogId` - (Optional) The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default. ### data_catalog_encryption_settings @@ -105,4 +106,4 @@ Using `terraform import`, import Glue Data Catalog Encryption Settings using `CA % terraform import aws_glue_data_catalog_encryption_settings.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_data_quality_ruleset.html.markdown b/website/docs/cdktf/typescript/r/glue_data_quality_ruleset.html.markdown index 651a132e1bda..fde0f95cfb29 100644 --- a/website/docs/cdktf/typescript/r/glue_data_quality_ruleset.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_data_quality_ruleset.html.markdown @@ -118,6 +118,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the data quality ruleset. * `name` - (Required, Forces new resource) Name of the data quality ruleset. * `ruleset` - (Optional) A Data Quality Definition Language (DQDL) ruleset. For more information, see the AWS Glue developer guide. @@ -172,4 +173,4 @@ Using `terraform import`, import Glue Data Quality Ruleset using the `name`. For % terraform import aws_glue_data_quality_ruleset.example exampleName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_dev_endpoint.html.markdown b/website/docs/cdktf/typescript/r/glue_dev_endpoint.html.markdown index 25e141efeb9c..2552d7394417 100644 --- a/website/docs/cdktf/typescript/r/glue_dev_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_dev_endpoint.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arguments` - (Optional) A map of arguments used to configure the endpoint. * `extraJarsS3Path` - (Optional) Path to one or more Java Jars in an S3 bucket that should be loaded in this endpoint. * `extraPythonLibsS3Path` - (Optional) Path(s) to one or more Python libraries in an S3 bucket that should be loaded in this endpoint. Multiple values must be complete paths separated by a comma. @@ -129,4 +130,4 @@ Using `terraform import`, import a Glue Development Endpoint using the `name`. F % terraform import aws_glue_dev_endpoint.example foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_job.html.markdown b/website/docs/cdktf/typescript/r/glue_job.html.markdown index d9bb942c71f2..ac12ac73c193 100644 --- a/website/docs/cdktf/typescript/r/glue_job.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_job.html.markdown @@ -297,34 +297,29 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `command` – (Required) The command of the job. Defined below. -* `connections` – (Optional) The list of connections used for this job. -* `defaultArguments` – (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. -* `nonOverridableArguments` – (Optional) Non-overridable arguments for this job, specified as name-value pairs. -* `description` – (Optional) Description of the job. -* `executionProperty` – (Optional) Execution property of the job. Defined below. +* `command` - (Required) The command of the job. Defined below. +* `connections` - (Optional) The list of connections used for this job. +* `defaultArguments` - (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. +* `description` - (Optional) Description of the job. +* `executionClass` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. +* `executionProperty` - (Optional) Execution property of the job. Defined below. * `glueVersion` - (Optional) The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). +* `jobMode` - (Optional) Describes how a job was created. Valid values are `SCRIPT`, `NOTEBOOK` and `VISUAL`. * `jobRunQueuingEnabled` - (Optional) Specifies whether job run queuing is enabled for the job runs for this job. A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will not be considered for queueing. -* `executionClass` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. -* `maintenanceWindow` – (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. -* `maxCapacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `numberOfWorkers` and `workerType` arguments instead with `glueVersion` `2.0` and above. -* `maxRetries` – (Optional) The maximum number of times to retry this job if it fails. -* `name` – (Required) The name you assign to this job. It must be unique in your account. +* `maintenanceWindow` - (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. +* `maxCapacity` - (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `numberOfWorkers` and `workerType` arguments instead with `glueVersion` `2.0` and above. +* `maxRetries` - (Optional) The maximum number of times to retry this job if it fails. +* `name` - (Required) The name you assign to this job. It must be unique in your account. +* `nonOverridableArguments` - (Optional) Non-overridable arguments for this job, specified as name-value pairs. * `notificationProperty` - (Optional) Notification property of the job. Defined below. -* `roleArn` – (Required) The ARN of the IAM role associated with this job. +* `numberOfWorkers` - (Optional) The number of workers of a defined workerType that are allocated when a job runs. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `roleArn` - (Required) The ARN of the IAM role associated with this job. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `timeout` – (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and null (unlimited) for `gluestreaming` jobs. +* `timeout` - (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and null (unlimited) for `gluestreaming` jobs. * `securityConfiguration` - (Optional) The name of the Security Configuration to be associated with the job. * `sourceControlDetails` - (Optional) The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. Defined below. -* `workerType` - (Optional) The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - * For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. - * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. Recommended for memory-intensive jobs. - * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. Recommended for memory-intensive jobs. - * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. Recommended for memory-intensive jobs. Only available for Glue version 3.0. Available AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. Recommended for memory-intensive jobs. Only available for Glue version 3.0. Available AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4GB of memory, 64 GB disk), and provides 1 executor per worker. Recommended for low volume streaming jobs. Only available for Glue version 3.0. - * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler. -* `numberOfWorkers` - (Optional) The number of workers of a defined workerType that are allocated when a job runs. +* `workerType` - (Optional) The type of predefined worker that is allocated when a job runs. Valid values: `Standard`, `G.1X`, `G.2X`, `G.025X`, `G.4X`, `G.8X`, `G.12X`, `G.16X`, `R.1X`, `R.2X`, `R.4X`, `R.8X`, `Z.2X` (Ray jobs). See the [AWS documentation](https://docs.aws.amazon.com/glue/latest/dg/worker-types.html) for details. ### command Argument Reference @@ -388,4 +383,4 @@ Using `terraform import`, import Glue Jobs using `name`. For example: % terraform import aws_glue_job.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_ml_transform.html.markdown b/website/docs/cdktf/typescript/r/glue_ml_transform.html.markdown index c7ced2be29e2..f44612d51518 100644 --- a/website/docs/cdktf/typescript/r/glue_ml_transform.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_ml_transform.html.markdown @@ -130,16 +130,17 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` – (Required) The name you assign to this ML Transform. It must be unique in your account. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name you assign to this ML Transform. It must be unique in your account. * `inputRecordTables` - (Required) A list of AWS Glue table definitions used by the transform. see [Input Record Tables](#input_record_tables). * `parameters` - (Required) The algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type. see [Parameters](#parameters). -* `roleArn` – (Required) The ARN of the IAM role associated with this ML Transform. -* `description` – (Optional) Description of the ML Transform. +* `roleArn` - (Required) The ARN of the IAM role associated with this ML Transform. +* `description` - (Optional) Description of the ML Transform. * `glueVersion` - (Optional) The version of glue to use, for example "1.0". For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). -* `maxCapacity` – (Optional) The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from `2` to `100` DPUs; the default is `10`. `maxCapacity` is a mutually exclusive option with `numberOfWorkers` and `workerType`. -* `maxRetries` – (Optional) The maximum number of times to retry this ML Transform if it fails. +* `maxCapacity` - (Optional) The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from `2` to `100` DPUs; the default is `10`. `maxCapacity` is a mutually exclusive option with `numberOfWorkers` and `workerType`. +* `maxRetries` - (Optional) The maximum number of times to retry this ML Transform if it fails. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `timeout` – (Optional) The ML Transform timeout in minutes. The default is 2880 minutes (48 hours). +* `timeout` - (Optional) The ML Transform timeout in minutes. The default is 2880 minutes (48 hours). * `workerType` - (Optional) The type of predefined worker that is allocated when an ML Transform runs. Accepts a value of `Standard`, `G.1X`, or `G.2X`. Required with `numberOfWorkers`. * `numberOfWorkers` - (Optional) The number of workers of a defined `workerType` that are allocated when an ML Transform runs. Required with `workerType`. @@ -209,4 +210,4 @@ Using `terraform import`, import Glue ML Transforms using `id`. For example: % terraform import aws_glue_ml_transform.example tfm-c2cafbe83b1c575f49eaca9939220e2fcd58e2d5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_partition.html.markdown b/website/docs/cdktf/typescript/r/glue_partition.html.markdown index cc2b469a506d..ec424c2712b4 100644 --- a/website/docs/cdktf/typescript/r/glue_partition.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_partition.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `databaseName` - (Required) Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase. * `partitionValues` - (Required) The values that define the partition. * `catalogId` - (Optional) ID of the Glue Catalog and database to create the table in. If omitted, this defaults to the AWS Account ID plus the database name. @@ -126,4 +127,4 @@ Using `terraform import`, import Glue Partitions using the catalog ID (usually A % terraform import aws_glue_partition.part 123456789012:MyDatabase:MyTable:val1#val2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_partition_index.html.markdown b/website/docs/cdktf/typescript/r/glue_partition_index.html.markdown index 82f479b04dc2..87b98db52ea0 100644 --- a/website/docs/cdktf/typescript/r/glue_partition_index.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_partition_index.html.markdown @@ -123,6 +123,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tableName` - (Required) Name of the table. For Hive compatibility, this must be entirely lowercase. * `databaseName` - (Required) Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase. * `partitionIndex` - (Required) Configuration block for a partition index. See [`partitionIndex`](#partition_index) below. @@ -178,4 +179,4 @@ Using `terraform import`, import Glue Partition Indexes using the catalog ID (us % terraform import aws_glue_partition_index.example 123456789012:MyDatabase:MyTable:index-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_registry.html.markdown b/website/docs/cdktf/typescript/r/glue_registry.html.markdown index e2f5da609362..ed65338a262a 100644 --- a/website/docs/cdktf/typescript/r/glue_registry.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_registry.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `registryName` – (Required) The Name of the registry. -* `description` – (Optional) A description of the registry. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `registryName` - (Required) The Name of the registry. +* `description` - (Optional) A description of the registry. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -52,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_glue_registry.example + identity = { + "arn" = "arn:aws:glue:us-west-2:123456789012:registry/example" + } +} + +resource "aws_glue_registry" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Glue registry. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Glue Registries using `arn`. For example: ```typescript @@ -82,4 +104,4 @@ Using `terraform import`, import Glue Registries using `arn`. For example: % terraform import aws_glue_registry.example arn:aws:glue:us-west-2:123456789012:registry/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/glue_resource_policy.html.markdown index fbfb203908e5..72cfbd17167c 100644 --- a/website/docs/cdktf/typescript/r/glue_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_resource_policy.html.markdown @@ -54,7 +54,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:glue:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:*", @@ -75,7 +75,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `policy` – (Required) The policy to be applied to the aws glue data catalog. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `policy` - (Required) The policy to be applied to the aws glue data catalog. * `enableHybrid` - (Optional) Indicates that you are using both methods to grant cross-account. Valid values are `TRUE` and `FALSE`. Note the terraform will not perform drift detetction on this field as its not return on read. ## Attribute Reference @@ -110,4 +111,4 @@ Using `terraform import`, import Glue Resource Policy using the account ID. For % terraform import aws_glue_resource_policy.Test 12356789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_schema.html.markdown b/website/docs/cdktf/typescript/r/glue_schema.html.markdown index 0fefdd2f5dd3..c530bfab6e89 100644 --- a/website/docs/cdktf/typescript/r/glue_schema.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_schema.html.markdown @@ -43,12 +43,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `schemaName` – (Required) The Name of the schema. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `schemaName` - (Required) The Name of the schema. * `registryArn` - (Required) The ARN of the Glue Registry to create the schema in. * `dataFormat` - (Required) The data format of the schema definition. Valid values are `AVRO`, `JSON` and `PROTOBUF`. * `compatibility` - (Required) The compatibility mode of the schema. Values values are: `NONE`, `DISABLED`, `BACKWARD`, `BACKWARD_ALL`, `FORWARD`, `FORWARD_ALL`, `FULL`, and `FULL_ALL`. * `schemaDefinition` - (Required) The schema definition using the `dataFormat` setting for `schemaName`. -* `description` – (Optional) A description of the schema. +* `description` - (Optional) A description of the schema. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -65,6 +66,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_glue_schema.example + identity = { + "arn" = "arn:aws:glue:us-west-2:123456789012:schema/example-registry/example-schema" + } +} + +resource "aws_glue_schema" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Glue schema. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Glue Registries using `arn`. For example: ```typescript @@ -95,4 +117,4 @@ Using `terraform import`, import Glue Registries using `arn`. For example: % terraform import aws_glue_schema.example arn:aws:glue:us-west-2:123456789012:schema/example/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_security_configuration.html.markdown b/website/docs/cdktf/typescript/r/glue_security_configuration.html.markdown index 89ce2aa8d48a..7500e7a2bfba 100644 --- a/website/docs/cdktf/typescript/r/glue_security_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_security_configuration.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `encryptionConfiguration` – (Required) Configuration block containing encryption configuration. Detailed below. -* `name` – (Required) Name of the security configuration. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `encryptionConfiguration` - (Required) Configuration block containing encryption configuration. Detailed below. +* `name` - (Required) Name of the security configuration. ### encryption_configuration Argument Reference @@ -112,4 +113,4 @@ Using `terraform import`, import Glue Security Configurations using `name`. For % terraform import aws_glue_security_configuration.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_trigger.html.markdown b/website/docs/cdktf/typescript/r/glue_trigger.html.markdown index a16f8781d056..3c12b33b324e 100644 --- a/website/docs/cdktf/typescript/r/glue_trigger.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_trigger.html.markdown @@ -187,15 +187,16 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `actions` – (Required) List of actions initiated by this trigger when it fires. See [Actions](#actions) Below. -* `description` – (Optional) A description of the new trigger. -* `enabled` – (Optional) Start the trigger. Defaults to `true`. -* `name` – (Required) The name of the trigger. -* `predicate` – (Optional) A predicate to specify when the new trigger should fire. Required when trigger type is `CONDITIONAL`. See [Predicate](#predicate) Below. -* `schedule` – (Optional) A cron expression used to specify the schedule. [Time-Based Schedules for Jobs and Crawlers](https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html) +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `actions` - (Required) List of actions initiated by this trigger when it fires. See [Actions](#actions) Below. +* `description` - (Optional) A description of the new trigger. +* `enabled` - (Optional) Start the trigger. Defaults to `true`. +* `name` - (Required) The name of the trigger. +* `predicate` - (Optional) A predicate to specify when the new trigger should fire. Required when trigger type is `CONDITIONAL`. See [Predicate](#predicate) Below. +* `schedule` - (Optional) A cron expression used to specify the schedule. [Time-Based Schedules for Jobs and Crawlers](https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html) * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `startOnCreation` – (Optional) Set to true to start `SCHEDULED` and `CONDITIONAL` triggers when created. True is not supported for `ON_DEMAND` triggers. -* `type` – (Required) The type of trigger. Valid values are `CONDITIONAL`, `EVENT`, `ON_DEMAND`, and `SCHEDULED`. +* `startOnCreation` - (Optional) Set to true to start `SCHEDULED` and `CONDITIONAL` triggers when created. True is not supported for `ON_DEMAND` triggers. +* `type` - (Required) The type of trigger. Valid values are `CONDITIONAL`, `EVENT`, `ON_DEMAND`, and `SCHEDULED`. * `workflowName` - (Optional) A workflow to which the trigger should be associated to. Every workflow graph (DAG) needs a starting trigger (`ON_DEMAND` or `SCHEDULED` type) and can contain multiple additional `CONDITIONAL` triggers. * `eventBatchingCondition` - (Optional) Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires. See [Event Batching Condition](#event-batching-condition). @@ -275,4 +276,4 @@ Using `terraform import`, import Glue Triggers using `name`. For example: % terraform import aws_glue_trigger.MyTrigger MyTrigger ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_user_defined_function.html.markdown b/website/docs/cdktf/typescript/r/glue_user_defined_function.html.markdown index 8d126d4e5f75..b5f9481c1443 100644 --- a/website/docs/cdktf/typescript/r/glue_user_defined_function.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_user_defined_function.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the function. * `catalogId` - (Optional) ID of the Glue Catalog to create the function in. If omitted, this defaults to the AWS Account ID. * `databaseName` - (Required) The name of the Database to create the Function. @@ -112,4 +113,4 @@ Using `terraform import`, import Glue User Defined Functions using the `catalog_ % terraform import aws_glue_user_defined_function.func 123456789012:my_database:my_func ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_workflow.html.markdown b/website/docs/cdktf/typescript/r/glue_workflow.html.markdown index 0f2440146daa..30bb0d8110d4 100644 --- a/website/docs/cdktf/typescript/r/glue_workflow.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_workflow.html.markdown @@ -69,9 +69,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` – (Required) The name you assign to this workflow. -* `defaultRunProperties` – (Optional) A map of default run properties for this workflow. These properties are passed to all jobs associated to the workflow. -* `description` – (Optional) Description of the workflow. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `name` - (Required) The name you assign to this workflow. +* `defaultRunProperties` - (Optional) A map of default run properties for this workflow. These properties are passed to all jobs associated to the workflow. +* `description` - (Optional) Description of the workflow. * `maxConcurrentRuns` - (Optional) Prevents exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -111,4 +112,4 @@ Using `terraform import`, import Glue Workflows using `name`. For example: % terraform import aws_glue_workflow.MyWorkflow MyWorkflow ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_license_association.html.markdown b/website/docs/cdktf/typescript/r/grafana_license_association.html.markdown index 207d97365e68..d1ed81e87518 100644 --- a/website/docs/cdktf/typescript/r/grafana_license_association.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_license_association.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `grafanaToken` - (Optional) A token from Grafana Labs that ties your AWS account with a Grafana Labs account. * `licenseType` - (Required) The type of license for the workspace license association. Valid values are `ENTERPRISE` and `ENTERPRISE_FREE_TRIAL`. * `workspaceId` - (Required) The workspace id. @@ -116,4 +117,4 @@ Using `terraform import`, import Grafana workspace license association using the % terraform import aws_grafana_license_association.example g-2054c75a02 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_role_association.html.markdown b/website/docs/cdktf/typescript/r/grafana_role_association.html.markdown index d11b622b6db5..ed769240f566 100644 --- a/website/docs/cdktf/typescript/r/grafana_role_association.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_role_association.html.markdown @@ -79,6 +79,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupIds` - (Optional) The AWS SSO group ids to be assigned the role given in `role`. * `userIds` - (Optional) The AWS SSO user ids to be assigned the role given in `role`. @@ -86,4 +87,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace.html.markdown index 79d0346301be..7d7730d71899 100644 --- a/website/docs/cdktf/typescript/r/grafana_workspace.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_workspace.html.markdown @@ -93,7 +93,7 @@ class MyConvertedCode extends TerraformStack { ``` -The optional argument `configuration` is a JSON string that enables the unified `Grafana Alerting` (Grafana version 10 or newer) and `Plugins Management` (Grafana version 9 or newer) on the Grafana Workspaces. +The optional argument `configuration` is a JSON string that disables the unified `Grafana Alerting` (Grafana version 10 or newer) and enables `Plugin Management` (Grafana version 9 or newer) on the Grafana Workspaces. For more information about using Grafana alerting, and the effects of turning it on or off, see [Alerts in Grafana version 10](https://docs.aws.amazon.com/grafana/latest/userguide/v10-alerts.html). @@ -107,8 +107,9 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Optional) The configuration string for the workspace that you create. For more information about the format and configuration options available, see [Working in your Grafana workspace](https://docs.aws.amazon.com/grafana/latest/userguide/AMG-configure-workspace.html). -* `dataSources` - (Optional) The data sources for the workspace. Valid values are `AMAZON_OPENSEARCH_SERVICE`, `ATHENA`, `CLOUDWATCH`, `PROMETHEUS`, `REDSHIFT`, `SITEWISE`, `TIMESTREAM`, `XRAY` +* `dataSources` - (Optional) The data sources for the workspace. Valid values are `AMAZON_OPENSEARCH_SERVICE`, `ATHENA`, `CLOUDWATCH`, `PROMETHEUS`, `REDSHIFT`, `SITEWISE`, `TIMESTREAM`, `TWINMAKER`, XRAY` * `description` - (Optional) The workspace description. * `grafanaVersion` - (Optional) Specifies the version of Grafana to support in the new workspace. Supported values are `8.4`, `9.4` and `10.4`. If not specified, defaults to the latest version. * `name` - (Optional) The Grafana workspace name. @@ -168,4 +169,4 @@ Using `terraform import`, import Grafana Workspace using the workspace's `id`. F % terraform import aws_grafana_workspace.example g-2054c75a02 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace_api_key.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace_api_key.html.markdown index 8a2970422a87..ec2f43b67176 100644 --- a/website/docs/cdktf/typescript/r/grafana_workspace_api_key.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_workspace_api_key.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `keyName` - (Required) Specifies the name of the API key. Key names must be unique to the workspace. - `keyRole` - (Required) Specifies the permission level of the API key. Valid values are `VIEWER`, `EDITOR`, or `ADMIN`. - `secondsToLive` - (Required) Specifies the time in seconds until the API key expires. Keys can be valid for up to 30 days. @@ -54,4 +55,4 @@ This resource exports the following attributes in addition to the arguments abov * `key` - The key token in JSON format. Use this value as a bearer token to authenticate HTTP requests to the workspace. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace_saml_configuration.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace_saml_configuration.html.markdown index f4ac84f81479..53c715ab6c85 100644 --- a/website/docs/cdktf/typescript/r/grafana_workspace_saml_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_workspace_saml_configuration.html.markdown @@ -76,6 +76,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `adminRoleValues` - (Optional) The admin role values. * `allowedOrganizations` - (Optional) The allowed organizations. * `emailAssertion` - (Optional) The email assertion. @@ -126,4 +127,4 @@ Using `terraform import`, import Grafana Workspace SAML configuration using the % terraform import aws_grafana_workspace_saml_configuration.example g-2054c75a02 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown index 995087abea8b..49d783d10d19 100644 --- a/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown @@ -43,8 +43,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account. * `grafanaRole` - (Required) The permission level to use for this service account. For more information about the roles and the permissions each has, see the [User roles](https://docs.aws.amazon.com/grafana/latest/userguide/Grafana-user-roles.html) documentation. * `workspaceId` - (Required) The Grafana workspace with which the service account is associated. @@ -87,4 +88,4 @@ Using `terraform import`, import Managed Grafana Workspace Service Account using % terraform import aws_grafana_workspace_service_account.example g-abc12345,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown index 501f5d3f27d3..b2705c70e1e3 100644 --- a/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown +++ b/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown @@ -53,8 +53,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name for the token to create. The name must be unique within the workspace. * `secondsToLive` - (Required) Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future. * `serviceAccountId` - (Required) The ID of the service account for which to create a token. @@ -69,4 +70,4 @@ This resource exports the following attributes in addition to the arguments abov * `expiresAt` - Specifies when the service account token will expire. * `key` - The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_detector.html.markdown b/website/docs/cdktf/typescript/r/guardduty_detector.html.markdown index 510b1c85a76e..2078f9f93408 100644 --- a/website/docs/cdktf/typescript/r/guardduty_detector.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_detector.html.markdown @@ -57,9 +57,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enable` - (Optional) Enable monitoring and feedback reporting. Setting to `false` is equivalent to "suspending" GuardDuty. Defaults to `true`. * `findingPublishingFrequency` - (Optional) Specifies the frequency of notifications sent for subsequent finding occurrences. If the detector is a GuardDuty member account, the value is determined by the GuardDuty primary account and cannot be modified, otherwise defaults to `SIX_HOURS`. For standalone and GuardDuty primary accounts, it must be configured in Terraform to enable drift detection. Valid values for standalone and primary accounts: `FIFTEEN_MINUTES`, `ONE_HOUR`, `SIX_HOURS`. See [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings_cloudwatch.html#guardduty_findings_cloudwatch_notification_frequency) for more information. -* `datasources` - (Optional) Describes which data sources will be enabled for the detector. See [Data Sources](#data-sources) below for more details. [Deprecated](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-feature-object-api-changes-march2023.html) in favor of [`aws_guardduty_detector_feature` resources](guardduty_detector_feature.html). +* `datasources` - (Optional, **Deprecated** use `aws_guardduty_detector_feature` resources instead) Describes which data sources will be enabled for the detector. See [Data Sources](#data-sources) below for more details. [Deprecated](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-feature-object-api-changes-march2023.html) in favor of [`aws_guardduty_detector_feature` resources](guardduty_detector_feature.html). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Data Sources @@ -160,4 +161,4 @@ Using `terraform import`, import GuardDuty detectors using the detector ID. For The ID of the detector can be retrieved via the [AWS CLI](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/list-detectors.html) using `aws guardduty list-detectors`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown b/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown index 387144a2f40d..03cc294a73d5 100644 --- a/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown @@ -32,6 +32,41 @@ class MyConvertedCode extends TerraformStack { const example = new GuarddutyDetector(this, "example", { enable: true, }); + new GuarddutyDetectorFeature(this, "s3_protection", { + detectorId: example.id, + name: "S3_DATA_EVENTS", + status: "ENABLED", + }); + } +} + +``` + +## Extended Threat Detection for EKS + +To enable GuardDuty [Extended Threat Detection](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-extended-threat-detection.html) for EKS, you need at least one of these features enabled: [EKS Protection](https://docs.aws.amazon.com/guardduty/latest/ug/kubernetes-protection.html) or [Runtime Monitoring](https://docs.aws.amazon.com/guardduty/latest/ug/runtime-monitoring-configuration.html). For maximum detection coverage, enabling both is recommended to enhance detection capabilities. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { GuarddutyDetector } from "./.gen/providers/aws/guardduty-detector"; +import { GuarddutyDetectorFeature } from "./.gen/providers/aws/guardduty-detector-feature"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new GuarddutyDetector(this, "example", { + enable: true, + }); + new GuarddutyDetectorFeature(this, "eks_protection", { + detectorId: example.id, + name: "EKS_AUDIT_LOGS", + status: "ENABLED", + }); new GuarddutyDetectorFeature(this, "eks_runtime_monitoring", { additionalConfiguration: [ { @@ -52,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detectorId` - (Required) Amazon GuardDuty detector ID. * `name` - (Required) The name of the detector feature. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. * `status` - (Required) The status of the detector feature. Valid values: `ENABLED`, `DISABLED`. @@ -68,4 +104,4 @@ The `additionalConfiguration` block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_filter.html.markdown b/website/docs/cdktf/typescript/r/guardduty_filter.html.markdown index 59b05d83229b..cd0dcf402440 100644 --- a/website/docs/cdktf/typescript/r/guardduty_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_filter.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detectorId` - (Required) ID of a GuardDuty detector, attached to your account. * `name` - (Required) The name of your filter. * `description` - (Optional) Description of the filter. @@ -87,7 +88,6 @@ The `criterion` block suports the following: This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the GuardDuty filter. -* `id` - A compound field, consisting of the ID of the GuardDuty detector and the name of the filter. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -122,4 +122,4 @@ Using `terraform import`, import GuardDuty filters using the detector ID and fil % terraform import aws_guardduty_filter.MyFilter 00b00fd5aecc0ab60a708659477e9617:MyFilter ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_invite_accepter.html.markdown b/website/docs/cdktf/typescript/r/guardduty_invite_accepter.html.markdown index f2dc24b72b45..b5b30623abc5 100644 --- a/website/docs/cdktf/typescript/r/guardduty_invite_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_invite_accepter.html.markdown @@ -71,14 +71,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detectorId` - (Required) The detector ID of the member GuardDuty account. * `masterAccountId` - (Required) AWS account ID for primary account. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - GuardDuty member detector ID +This resource exports no additional attributes. ## Timeouts @@ -118,4 +117,4 @@ Using `terraform import`, import `aws_guardduty_invite_accepter` using the membe % terraform import aws_guardduty_invite_accepter.member 00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_ipset.html.markdown b/website/docs/cdktf/typescript/r/guardduty_ipset.html.markdown index 8be8aacfafe4..c6c03a61f0e6 100644 --- a/website/docs/cdktf/typescript/r/guardduty_ipset.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_ipset.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `activate` - (Required) Specifies whether GuardDuty is to start using the uploaded IPSet. * `detectorId` - (Required) The detector ID of the GuardDuty. * `format` - (Required) The format of the file that contains the IPSet. Valid values: `TXT` | `STIX` | `OTX_CSV` | `ALIEN_VAULT` | `PROOF_POINT` | `FIRE_EYE` @@ -77,7 +78,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) of the GuardDuty IPSet. -* `id` - The ID of the GuardDuty IPSet. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -112,4 +112,4 @@ Using `terraform import`, import GuardDuty IPSet using the primary GuardDuty det % terraform import aws_guardduty_ipset.MyIPSet 00b00fd5aecc0ab60a708659477e9617:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown b/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown index 52804d971afb..d1faaf9da806 100644 --- a/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actions` - (Optional) Information about whether the tags will be added to the S3 object after scanning. See [`actions`](#actions-argument-reference) below. * `protectedResource` - (Required) Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. See [`protectedResource`](#protected_resource-argument-reference) below. * `role` - (Required) ARN of IAM role that includes the permissions required to scan and add tags to the associated protected resource. @@ -123,4 +124,4 @@ Using `terraform import`, import GuardDuty malware protection plans using their % terraform import aws_guardduty_malware_protection_plan.example 1234567890abcdef0123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_member.html.markdown b/website/docs/cdktf/typescript/r/guardduty_member.html.markdown index 2491e6379dc4..7bf86c78f183 100644 --- a/website/docs/cdktf/typescript/r/guardduty_member.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_member.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) AWS account ID for member account. * `detectorId` - (Required) The detector ID of the GuardDuty account where you want to create member accounts. * `email` - (Required) Email address for member account. @@ -63,7 +64,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The ID of the GuardDuty member * `relationshipStatus` - The status of the relationship between the member account and its primary account. More information can be found in [Amazon GuardDuty API Reference](https://docs.aws.amazon.com/guardduty/latest/ug/get-members.html). ## Timeouts @@ -105,4 +105,4 @@ Using `terraform import`, import GuardDuty members using the primary GuardDuty d % terraform import aws_guardduty_member.MyMember 00b00fd5aecc0ab60a708659477e9617:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_member_detector_feature.html.markdown b/website/docs/cdktf/typescript/r/guardduty_member_detector_feature.html.markdown index b5d375b6b5d9..6c5cb74c091b 100644 --- a/website/docs/cdktf/typescript/r/guardduty_member_detector_feature.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_member_detector_feature.html.markdown @@ -24,8 +24,8 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { GuarddutyMemberDetectorFeature } from "./.gen/providers/aws/"; import { GuarddutyDetector } from "./.gen/providers/aws/guardduty-detector"; +import { GuarddutyMemberDetectorFeature } from "./.gen/providers/aws/guardduty-member-detector-feature"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -33,19 +33,52 @@ class MyConvertedCode extends TerraformStack { enable: true, }); new GuarddutyMemberDetectorFeature(this, "runtime_monitoring", { + accountId: "123456789012", + detectorId: example.id, + name: "S3_DATA_EVENTS", + status: "ENABLED", + }); + } +} + +``` + +## Extended Threat Detection for EKS + +To enable GuardDuty [Extended Threat Detection](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-extended-threat-detection.html) for EKS, you need at least one of these features enabled: [EKS Protection](https://docs.aws.amazon.com/guardduty/latest/ug/kubernetes-protection.html) or [Runtime Monitoring](https://docs.aws.amazon.com/guardduty/latest/ug/runtime-monitoring-configuration.html). For maximum detection coverage, enabling both is recommended to enhance detection capabilities. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { GuarddutyDetector } from "./.gen/providers/aws/guardduty-detector"; +import { GuarddutyDetectorFeature } from "./.gen/providers/aws/guardduty-detector-feature"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new GuarddutyDetector(this, "example", { + enable: true, + }); + new GuarddutyDetectorFeature(this, "eks_protection", { + account_id: "123456789012", + detectorId: example.id, + name: "EKS_AUDIT_LOGS", + status: "ENABLED", + }); + new GuarddutyDetectorFeature(this, "eks_runtime_monitoring", { account_id: "123456789012", - additional_configuration: [ + additionalConfiguration: [ { name: "EKS_ADDON_MANAGEMENT", status: "ENABLED", }, - { - name: "ECS_FARGATE_AGENT_MANAGEMENT", - status: "ENABLED", - }, ], - detector_id: example.id, - name: "RUNTIME_MONITORING", + detectorId: example.id, + name: "EKS_RUNTIME_MONITORING", status: "ENABLED", }); } @@ -57,6 +90,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detectorId` - (Required) Amazon GuardDuty detector ID. * `accountId` - (Required) Member account ID to be updated. * `name` - (Required) The name of the detector feature. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`,`RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`. @@ -74,4 +108,4 @@ The `additionalConfiguration` block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_organization_admin_account.html.markdown b/website/docs/cdktf/typescript/r/guardduty_organization_admin_account.html.markdown index c1b9b2940bf5..52c9083a6ffc 100644 --- a/website/docs/cdktf/typescript/r/guardduty_organization_admin_account.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_organization_admin_account.html.markdown @@ -55,13 +55,12 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `adminAccountId` - (Required) AWS account identifier to designate as a delegated administrator for GuardDuty. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - AWS account identifier. +This resource exports no additional attributes. ## Import @@ -95,4 +94,4 @@ Using `terraform import`, import GuardDuty Organization Admin Account using the % terraform import aws_guardduty_organization_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown index 421640ba1c4f..5e86e3245420 100644 --- a/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown @@ -65,8 +65,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `autoEnable` - (Optional) *Deprecated:* Use `autoEnableOrganizationMembers` instead. When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s GuardDuty delegated administrator and GuardDuty is enabled in that AWS Region. -* `autoEnableOrganizationMembers` - (Optional) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. Valid values are `ALL`, `NEW`, `NONE`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `autoEnableOrganizationMembers` - (Required) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. + Valid values are `ALL`, `NEW`, `NONE`. * `detectorId` - (Required) The detector ID of the GuardDuty account. * `datasources` - (Optional) Configuration for the collected datasources. [Deprecated](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-feature-object-api-changes-march2023.html) in favor of [`aws_guardduty_organization_configuration_feature` resources](guardduty_organization_configuration_feature.html). @@ -121,9 +122,7 @@ The `ebsVolumes` block supports the following: ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - Identifier of the GuardDuty Detector. +This resource exports no additional attributes. ## Import @@ -157,4 +156,4 @@ Using `terraform import`, import GuardDuty Organization Configurations using the % terraform import aws_guardduty_organization_configuration.example 00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown b/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown index e55abc10b499..56d3f1b2389d 100644 --- a/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown @@ -12,7 +12,7 @@ description: |- Provides a resource to manage a single Amazon GuardDuty [organization configuration feature](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty-features-activation-model.html#guardduty-features). -~> **NOTE:** Deleting this resource does not disable the organization configuration feature, the resource in simply removed from state instead. +~> **NOTE:** Deleting this resource does not disable the organization configuration feature, the resource is simply removed from state instead. ## Example Usage @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoEnable` - (Required) The status of the feature that is configured for the member accounts within the organization. Valid values: `NEW`, `ALL`, `NONE`. * `detectorId` - (Required) The ID of the detector that configures the delegated administrator. * `name` - (Required) The name of the feature that will be configured for the organization. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. @@ -72,4 +73,4 @@ The `additionalConfiguration` block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_publishing_destination.html.markdown b/website/docs/cdktf/typescript/r/guardduty_publishing_destination.html.markdown index b8ec1f803581..7a357ab9d7f2 100644 --- a/website/docs/cdktf/typescript/r/guardduty_publishing_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_publishing_destination.html.markdown @@ -90,7 +90,7 @@ class MyConvertedCode extends TerraformStack { ], resources: [ "arn:aws:kms:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:key/*", @@ -107,7 +107,7 @@ class MyConvertedCode extends TerraformStack { ], resources: [ "arn:aws:kms:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:key/*", @@ -138,6 +138,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `detectorId` - (Required) The detector ID of the GuardDuty. * `destinationArn` - (Required) The bucket arn and prefix under which the findings get exported. Bucket-ARN is required, the prefix is optional and will be `AWSLogs/[Account-ID]/GuardDuty/[Region]/` if not provided * `kmsKeyArn` - (Required) The ARN of the KMS key used to encrypt GuardDuty findings. GuardDuty enforces this to be encrypted. @@ -147,9 +148,7 @@ This resource supports the following arguments: ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - The ID of the GuardDuty PublishingDestination and the detector ID. Format: `:` +This resource exports no additional attributes. ## Import @@ -183,4 +182,4 @@ Using `terraform import`, import GuardDuty PublishingDestination using the maste % terraform import aws_guardduty_publishing_destination.test a4b86f26fa42e7e7cf0d1c333ea77777:a4b86f27a0e464e4a7e0516d242f1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_threatintelset.html.markdown b/website/docs/cdktf/typescript/r/guardduty_threatintelset.html.markdown index d8d3652c3ee6..b50da0727a52 100644 --- a/website/docs/cdktf/typescript/r/guardduty_threatintelset.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_threatintelset.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `activate` - (Required) Specifies whether GuardDuty is to start using the uploaded ThreatIntelSet. * `detectorId` - (Required) The detector ID of the GuardDuty. * `format` - (Required) The format of the file that contains the ThreatIntelSet. Valid values: `TXT` | `STIX` | `OTX_CSV` | `ALIEN_VAULT` | `PROOF_POINT` | `FIRE_EYE` @@ -83,7 +84,6 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name (ARN) of the GuardDuty ThreatIntelSet. -* `id` - The ID of the GuardDuty ThreatIntelSet and the detector ID. Format: `:` * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -118,4 +118,4 @@ Using `terraform import`, import GuardDuty ThreatIntelSet using the primary Guar % terraform import aws_guardduty_threatintelset.MyThreatIntelSet 00b00fd5aecc0ab60a708659477e9617:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_group_membership.html.markdown b/website/docs/cdktf/typescript/r/iam_group_membership.html.markdown index 960b3ae168ed..36608e89f562 100644 --- a/website/docs/cdktf/typescript/r/iam_group_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_group_membership.html.markdown @@ -60,7 +60,7 @@ This resource supports the following arguments: * `name` - (Required) The name to identify the Group Membership * `users` - (Required) A list of IAM User names to associate with the Group -* `group` – (Required) The IAM Group name to attach the list of `users` to +* `group` - (Required) The IAM Group name to attach the list of `users` to ## Attribute Reference @@ -68,10 +68,10 @@ This resource exports the following attributes in addition to the arguments abov * `name` - The name to identify the Group Membership * `users` - list of IAM User names -* `group` – IAM Group name +* `group` - IAM Group name [1]: /docs/providers/aws/r/iam_group.html [2]: /docs/providers/aws/r/iam_user.html [3]: /docs/providers/aws/r/iam_user_group_membership.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_openid_connect_provider.html.markdown b/website/docs/cdktf/typescript/r/iam_openid_connect_provider.html.markdown index f4bdb7309bfc..8fd1a2b583d3 100644 --- a/website/docs/cdktf/typescript/r/iam_openid_connect_provider.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_openid_connect_provider.html.markdown @@ -83,6 +83,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_openid_connect_provider.example + identity = { + "arn" = "arn:aws:iam::123456789012:oidc-provider/example.com" + } +} + +resource "aws_iam_openid_connect_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM OpenID Connect provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM OpenID Connect Providers using the `arn`. For example: ```typescript @@ -113,4 +134,4 @@ Using `terraform import`, import IAM OpenID Connect Providers using the `arn`. F % terraform import aws_iam_openid_connect_provider.default arn:aws:iam::123456789012:oidc-provider/accounts.google.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_policy.html.markdown b/website/docs/cdktf/typescript/r/iam_policy.html.markdown index cc0a49e7a641..398f3629d666 100644 --- a/website/docs/cdktf/typescript/r/iam_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_policy.html.markdown @@ -73,6 +73,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_policy.example + identity = { + "arn" = "arn:aws:iam::123456789012:policy/UsersManageOwnCredentials" + } +} + +resource "aws_iam_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM policy. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Policies using the `arn`. For example: ```typescript @@ -103,4 +124,4 @@ Using `terraform import`, import IAM Policies using the `arn`. For example: % terraform import aws_iam_policy.administrator arn:aws:iam::123456789012:policy/UsersManageOwnCredentials ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_role.html.markdown b/website/docs/cdktf/typescript/r/iam_role.html.markdown index d1197ca97394..21bf8b865f7a 100644 --- a/website/docs/cdktf/typescript/r/iam_role.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_role.html.markdown @@ -317,6 +317,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role.example + identity = { + name = "developer_name" + } +} + +resource "aws_iam_role" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the IAM role. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Roles using the `name`. For example: ```typescript @@ -331,7 +356,7 @@ import { IamRole } from "./.gen/providers/aws/iam-role"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - IamRole.generateConfigForImport(this, "developer", "developer_name"); + IamRole.generateConfigForImport(this, "example", "developer_name"); } } @@ -340,7 +365,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import IAM Roles using the `name`. For example: ```console -% terraform import aws_iam_role.developer developer_name +% terraform import aws_iam_role.example developer_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_role_policy.html.markdown b/website/docs/cdktf/typescript/r/iam_role_policy.html.markdown index e1c6f9206567..6a0f8c568f63 100644 --- a/website/docs/cdktf/typescript/r/iam_role_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_role_policy.html.markdown @@ -74,24 +74,48 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` - (Optional) The name of the role policy. If omitted, Terraform will -assign a random, unique name. -* `namePrefix` - (Optional) Creates a unique name beginning with the specified - prefix. Conflicts with `name`. -* `policy` - (Required) The inline policy document. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) +* `name` - (Optional) The name of the role policy. + If omitted, Terraform will assign a random, unique name. +* `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. + Conflicts with `name`. +* `policy` - (Required) The inline policy document. + This is a JSON formatted string. + For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) * `role` - (Required) The name of the IAM role to attach to the policy. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `id` - The role policy ID, in the form of `role_name:role_policy_name`. -* `name` - The name of the policy. -* `policy` - The policy document attached to the role. -* `role` - The name of the role associated with the policy. +This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role_policy.example + identity = { + role = "role_of_mypolicy_name" + name = "mypolicy_name" + } +} + +resource "aws_iam_role_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `role` (String) Name of the IAM role. +* `name` (String) Name of the role policy. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Role Policies using the `role_name:role_policy_name`. For example: ```typescript @@ -108,7 +132,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); IamRolePolicy.generateConfigForImport( this, - "mypolicy", + "example", "role_of_mypolicy_name:mypolicy_name" ); } @@ -119,7 +143,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import IAM Role Policies using the `role_name:role_policy_name`. For example: ```console -% terraform import aws_iam_role_policy.mypolicy role_of_mypolicy_name:mypolicy_name +% terraform import aws_iam_role_policy.example role_of_mypolicy_name:mypolicy_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_role_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/iam_role_policy_attachment.html.markdown index 1ecb373266cb..9025052b6778 100644 --- a/website/docs/cdktf/typescript/r/iam_role_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_role_policy_attachment.html.markdown @@ -89,6 +89,33 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role_policy_attachment.example + identity = { + role = "test-role" + policy_arn = "arn:aws:iam::xxxxxxxxxxxx:policy/test-policy" + } +} + +resource "aws_iam_role_policy_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `role` (String) Name of the IAM role. +* `policyArn` (String) ARN of the IAM policy. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM role policy attachments using the role name and policy arn separated by `/`. For example: ```typescript @@ -105,7 +132,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); IamRolePolicyAttachment.generateConfigForImport( this, - "testAttach", + "example", "test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy" ); } @@ -116,7 +143,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import IAM role policy attachments using the role name and policy arn separated by `/`. For example: ```console -% terraform import aws_iam_role_policy_attachment.test-attach test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy +% terraform import aws_iam_role_policy_attachment.example test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_saml_provider.html.markdown b/website/docs/cdktf/typescript/r/iam_saml_provider.html.markdown index cc2b00bb0aec..b29be9893f2d 100644 --- a/website/docs/cdktf/typescript/r/iam_saml_provider.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_saml_provider.html.markdown @@ -53,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_saml_provider.example + identity = { + "arn" = "arn:aws:iam::123456789012:saml-provider/ExampleProvider" + } +} + +resource "aws_iam_saml_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM SAML provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM SAML Providers using the `arn`. For example: ```typescript @@ -83,4 +104,4 @@ Using `terraform import`, import IAM SAML Providers using the `arn`. For example % terraform import aws_iam_saml_provider.default arn:aws:iam::123456789012:saml-provider/SAMLADFS ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown b/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown index 545bc90f3220..eb813960d6dc 100644 --- a/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown @@ -129,9 +129,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `certificateBody` – (Required, Forces new resource) The contents of the public key certificate in +* `certificateBody` - (Required, Forces new resource) The contents of the public key certificate in PEM-encoded format. -* `certificateChain` – (Optional, Forces new resource) The contents of the certificate chain. +* `certificateChain` - (Optional, Forces new resource) The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain. * `name` - (Optional) The name of the Server Certificate. Do not include the path in this value. If omitted, Terraform will assign a random, unique name. @@ -141,7 +141,7 @@ This resource supports the following arguments: included, it defaults to a slash (/). If this certificate is for use with AWS CloudFront, the path must be in format `/cloudfront/your_path_here`. See [IAM Identifiers][1] for more details on IAM Paths. -* `privateKey` – (Required, Forces new resource) The contents of the private key in PEM-encoded format. +* `privateKey` - (Required, Forces new resource) The contents of the private key in PEM-encoded format. * `tags` - (Optional) Map of resource tags for the server certificate. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ~> **NOTE:** AWS performs behind-the-scenes modifications to some certificate files if they do not adhere to a specific format. These modifications will result in terraform forever believing that it needs to update the resources since the local and AWS file contents will not match after theses modifications occur. In order to prevent this from happening you must ensure that all your PEM-encoded files use UNIX line-breaks and that `certificateBody` contains only one certificate. All other certificates should go in `certificateChain`. It is common for some Certificate Authorities to issue certificate files that have DOS line-breaks and that are actually multiple certificates concatenated together in order to form a full certificate chain. @@ -198,4 +198,4 @@ Using `terraform import`, import IAM Server Certificates using the `name`. For e [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html [lifecycle]: /docs/configuration/resources.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_service_linked_role.html.markdown b/website/docs/cdktf/typescript/r/iam_service_linked_role.html.markdown index 7a208146dd1d..76345f1fb44f 100644 --- a/website/docs/cdktf/typescript/r/iam_service_linked_role.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_service_linked_role.html.markdown @@ -57,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_service_linked_role.example + identity = { + "arn" = "arn:aws:iam::123456789012:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk" + } +} + +resource "aws_iam_service_linked_role" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM service-linked role. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM service-linked roles using role ARN. For example: ```typescript @@ -87,4 +108,4 @@ Using `terraform import`, import IAM service-linked roles using role ARN. For ex % terraform import aws_iam_service_linked_role.elasticbeanstalk arn:aws:iam::123456789012:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_signing_certificate.html.markdown b/website/docs/cdktf/typescript/r/iam_signing_certificate.html.markdown index cde0c023f553..3f7691376995 100644 --- a/website/docs/cdktf/typescript/r/iam_signing_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_signing_certificate.html.markdown @@ -76,9 +76,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `certificateBody` – (Required) The contents of the signing certificate in PEM-encoded format. -* `status` – (Optional) The status you want to assign to the certificate. `Active` means that the certificate can be used for programmatic calls to Amazon Web Services `Inactive` means that the certificate cannot be used. -* `userName` – (Required) The name of the user the signing certificate is for. +* `certificateBody` - (Required) The contents of the signing certificate in PEM-encoded format. +* `status` - (Optional) The status you want to assign to the certificate. `Active` means that the certificate can be used for programmatic calls to Amazon Web Services `Inactive` means that the certificate cannot be used. +* `userName` - (Required) The name of the user the signing certificate is for. ## Attribute Reference @@ -119,4 +119,4 @@ Using `terraform import`, import IAM Signing Certificates using the `id`. For ex % terraform import aws_iam_signing_certificate.certificate IDIDIDIDID:user-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_virtual_mfa_device.html.markdown b/website/docs/cdktf/typescript/r/iam_virtual_mfa_device.html.markdown index ea263f83b71b..ad564e305747 100644 --- a/website/docs/cdktf/typescript/r/iam_virtual_mfa_device.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_virtual_mfa_device.html.markdown @@ -48,7 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `virtualMfaDeviceName` - (Required) The name of the virtual MFA device. Use with path to uniquely identify a virtual MFA device. -* `path` – (Optional) The path for the virtual MFA device. +* `path` - (Optional) The path for the virtual MFA device. * `tags` - (Optional) Map of resource tags for the virtual mfa device. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -94,4 +94,4 @@ Using `terraform import`, import IAM Virtual MFA Devices using the `arn`. For ex % terraform import aws_iam_virtual_mfa_device.example arn:aws:iam::123456789012:mfa/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/identitystore_group.html.markdown b/website/docs/cdktf/typescript/r/identitystore_group.html.markdown index a148a4c5ecf3..f751eb4f6f14 100644 --- a/website/docs/cdktf/typescript/r/identitystore_group.html.markdown +++ b/website/docs/cdktf/typescript/r/identitystore_group.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `displayName` - (Optional) A string containing the name of the group. This value is commonly displayed when the group is referenced. * `description` - (Optional) A string containing the description of the group. @@ -103,4 +104,4 @@ Using `terraform import`, import an Identity Store Group using the combination ` % terraform import aws_identitystore_group.example d-9c6705e95c/b8a1c340-8031-7071-a2fb-7dc540320c30 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/identitystore_group_membership.html.markdown b/website/docs/cdktf/typescript/r/identitystore_group_membership.html.markdown index 78300f500ba4..eddb12f87561 100644 --- a/website/docs/cdktf/typescript/r/identitystore_group_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/identitystore_group_membership.html.markdown @@ -79,6 +79,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `memberId` - (Required) The identifier for a user in the Identity Store. * `groupId` - (Required) The identifier for a group in the Identity Store. * `identityStoreId` - (Required) Identity Store ID associated with the Single Sign-On Instance. @@ -121,4 +122,4 @@ Using `terraform import`, import `aws_identitystore_group_membership` using the % terraform import aws_identitystore_group_membership.example d-0000000000/00000000-0000-0000-0000-000000000000 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/identitystore_user.html.markdown b/website/docs/cdktf/typescript/r/identitystore_user.html.markdown index 7bd19c954735..01ae3667660f 100644 --- a/website/docs/cdktf/typescript/r/identitystore_user.html.markdown +++ b/website/docs/cdktf/typescript/r/identitystore_user.html.markdown @@ -66,6 +66,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addresses` - (Optional) Details about the user's address. At most 1 address is allowed. Detailed below. * `emails` - (Optional) Details about the user's email. At most 1 email is allowed. Detailed below. * `locale` - (Optional) The user's geographical region or location. @@ -105,6 +106,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `formatted` - (Optional) The name that is typically displayed when the name is shown for display. * `honorificPrefix` - (Optional) The honorific prefix of the user. * `honorificSuffix` - (Optional) The honorific suffix of the user. @@ -157,4 +159,4 @@ Using `terraform import`, import an Identity Store User using the combination `i % terraform import aws_identitystore_user.example d-9c6705e95c/065212b4-9061-703b-5876-13a517ae2a7c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_component.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_component.html.markdown index f31cb6a589a5..3929c6495694 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_component.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_component.html.markdown @@ -98,6 +98,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `changeDescription` - (Optional) Change description of the component. * `data` - (Optional) Inline YAML string with data of the component. Exactly one of `data` and `uri` can be specified. Terraform will only perform drift detection of its value when present in a configuration. * `description` - (Optional) Description of the component. @@ -154,4 +155,4 @@ Using `terraform import`, import `aws_imagebuilder_components` resources using t Certain resource arguments, such as `uri`, cannot be read via the API and imported into Terraform. Terraform will display a difference for these arguments the first run after import if declared in the Terraform configuration for an imported resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_container_recipe.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_container_recipe.html.markdown index 8f9ad318dcd0..e98e0bb57c34 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_container_recipe.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_container_recipe.html.markdown @@ -72,6 +72,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the container recipe. * `dockerfileTemplateData` - (Optional) The Dockerfile template used to build the image as an inline data blob. * `dockerfileTemplateUri` - (Optional) The Amazon S3 URI for the Dockerfile that will be used to build the container image. @@ -106,6 +107,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `blockDeviceMapping` - (Optional) Configuration block(s) with block device mappings for the container recipe. Detailed below. * `image` - (Optional) The AMI ID to use as the base image for a container build and test instance. If not specified, Image Builder will use the appropriate ECS-optimized AMI as a base image. @@ -113,6 +115,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deviceName` - (Optional) Name of the device. For example, `/dev/sda` or `/dev/xvdb`. * `ebs` - (Optional) Configuration block with Elastic Block Storage (EBS) block device mapping settings. Detailed below. * `noDevice` - (Optional) Set to `true` to remove a mapping from the parent image. @@ -122,6 +125,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deleteOnTermination` - (Optional) Whether to delete the volume on termination. Defaults to unset, which is the value inherited from the parent image. * `encrypted` - (Optional) Whether to encrypt the volume. Defaults to unset, which is the value inherited from the parent image. * `iops` - (Optional) Number of Input/Output (I/O) operations per second to provision for an `io1` or `io2` volume. @@ -144,6 +148,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_container_recipe.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:container-recipe/example/1.0.0" + } +} + +resource "aws_imagebuilder_container_recipe" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder container recipe. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_container_recipe` resources using the Amazon Resource Name (ARN). For example: ```typescript @@ -174,4 +199,4 @@ Using `terraform import`, import `aws_imagebuilder_container_recipe` resources u % terraform import aws_imagebuilder_container_recipe.example arn:aws:imagebuilder:us-east-1:123456789012:container-recipe/example/1.0.0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_distribution_configuration.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_distribution_configuration.html.markdown index b9cb35a8e78b..4bfd5300ca35 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_distribution_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_distribution_configuration.html.markdown @@ -62,6 +62,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the distribution configuration. * `tags` - (Optional) Key-value map of resource tags for the distribution configuration. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -73,6 +74,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amiDistributionConfiguration` - (Optional) Configuration block with Amazon Machine Image (AMI) distribution settings. Detailed below. * `containerDistributionConfiguration` - (Optional) Configuration block with container distribution settings. Detailed below. * `fastLaunchConfiguration` - (Optional) Set of Windows faster-launching configurations to use for AMI distribution. Detailed below. @@ -85,6 +87,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amiTags` - (Optional) Key-value map of tags to apply to the distributed AMI. * `description` - (Optional) Description to apply to the distributed AMI. * `kmsKeyId` - (Optional) Amazon Resource Name (ARN) of the Key Management Service (KMS) Key to encrypt the distributed AMI. @@ -96,6 +99,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `organizationArns` - (Optional) Set of AWS Organization ARNs to assign. * `organizationalUnitArns` - (Optional) Set of AWS Organizational Unit ARNs to assign. * `userGroups` - (Optional) Set of EC2 launch permission user groups to assign. Use `all` to distribute a public AMI. @@ -160,6 +164,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_distribution_configuration.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example" + } +} + +resource "aws_imagebuilder_distribution_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder distribution configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_distribution_configurations` resources using the Amazon Resource Name (ARN). For example: ```typescript @@ -190,4 +215,4 @@ Using `terraform import`, import `aws_imagebuilder_distribution_configurations` % terraform import aws_imagebuilder_distribution_configuration.example arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_image.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_image.html.markdown index f74162b4a612..3efa2d53cbc4 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_image.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_image.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `containerRecipeArn` - (Optional) - Amazon Resource Name (ARN) of the container recipe. * `distributionConfigurationArn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. * `enhancedImageMetadataEnabled` - (Optional) Whether additional information about the image being created is collected. Defaults to `true`. @@ -62,6 +63,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `imageTestsEnabled` - (Optional) Whether image tests are enabled. Defaults to `true`. * `timeoutMinutes` - (Optional) Number of minutes before image tests time out. Valid values are between `60` and `1440`. Defaults to `720`. @@ -69,6 +71,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `imageScanningEnabled` - (Optional) Indicates whether Image Builder keeps a snapshot of the vulnerability scans that Amazon Inspector runs against the build instance when you create a new image. Defaults to `false`. * `ecrConfiguration` - (Optional) Configuration block with ECR configuration. Detailed below. @@ -76,6 +79,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `repositoryName` - (Optional) The name of the container repository that Amazon Inspector scans to identify findings for your container images. * `containerTags` - (Optional) Set of tags for Image Builder to apply to the output container image that that Amazon Inspector scans. @@ -87,6 +91,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `onFailure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. * `parallelGroup` - (Optional) The parallel group in which to run a test Workflow. * `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. @@ -127,6 +132,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1" + } +} + +resource "aws_imagebuilder_image" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image` resources using the Amazon Resource Name (ARN). For example: ```typescript @@ -157,4 +183,4 @@ Using `terraform import`, import `aws_imagebuilder_image` resources using the Am % terraform import aws_imagebuilder_image.example arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown index 262c51cbf3db..25d489883ad1 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown @@ -60,7 +60,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + current.partition + "}:imagebuilder:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:aws:image/amazon-linux-2-x86/x.x.x", version: "1.0.0", }); @@ -97,6 +97,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `containerRecipeArn` - (Optional) Amazon Resource Name (ARN) of the container recipe. * `description` - (Optional) Description of the image pipeline. * `distributionConfigurationArn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. @@ -114,6 +115,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `imageScanningEnabled` - (Optional) Whether image scans are enabled. Defaults to `false`. * `ecrConfiguration` - (Optional) Configuration block with ECR configuration for image scanning. Detailed below. @@ -121,6 +123,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `container tags` - (Optional) list of tags to apply to scanned images * `repositoryName` - (Optional) The name of the repository to scan @@ -128,6 +131,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `imageTestsEnabled` - (Optional) Whether image tests are enabled. Defaults to `true`. * `timeoutMinutes` - (Optional) Number of minutes before image tests time out. Valid values are between `60` and `1440`. Defaults to `720`. @@ -139,6 +143,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipelineExecutionStartCondition` - (Optional) Condition when the pipeline should trigger a new image build. Valid values are `EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE` and `EXPRESSION_MATCH_ONLY`. Defaults to `EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE`. * `timezone` - (Optional) The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the [IANA timezone format](https://www.joda.org/joda-time/timezones.html). If not specified this defaults to UTC. @@ -151,6 +156,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `onFailure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. * `parallelGroup` - (Optional) The parallel group in which to run a test Workflow. * `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. @@ -176,6 +182,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image_pipeline.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example" + } +} + +resource "aws_imagebuilder_image_pipeline" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image pipeline. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image_pipeline` resources using the Amazon Resource Name (ARN). For example: ```typescript @@ -206,4 +233,4 @@ Using `terraform import`, import `aws_imagebuilder_image_pipeline` resources usi % terraform import aws_imagebuilder_image_pipeline.example arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_image_recipe.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_image_recipe.html.markdown index 07b7b874ec27..c0ec9da9d1c4 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_image_recipe.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_image_recipe.html.markdown @@ -57,7 +57,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + current.partition + "}:imagebuilder:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:aws:image/amazon-linux-2-x86/x.x.x", version: "1.0.0", }); @@ -72,11 +72,12 @@ The following arguments are required: * `component` - (Required) Ordered configuration block(s) with components for the image recipe. Detailed below. * `name` - (Required) Name of the image recipe. -* `parentImage` - (Required) The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN or an AMI ID. +* `parentImage` - (Required) The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN, an AMI ID, or an SSM Parameter referencing the AMI. For an SSM Parameter, enter the prefix `ssm:`, followed by the parameter name or ARN. * `version` - (Required) The semantic version of the image recipe, which specifies the version in the following format, with numeric values in each position to indicate a specific version: major.minor.patch. For example: 1.0.0. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `blockDeviceMapping` - (Optional) Configuration block(s) with block device mappings for the image recipe. Detailed below. * `description` - (Optional) Description of the image recipe. * `systemsManagerAgent` - (Optional) Configuration block for the Systems Manager Agent installed by default by Image Builder. Detailed below. @@ -128,6 +129,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image_recipe.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image-recipe/example/1.0.0" + } +} + +resource "aws_imagebuilder_image_recipe" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image recipe. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image_recipe` resources using the Amazon Resource Name (ARN). For example: ```typescript @@ -158,4 +180,4 @@ Using `terraform import`, import `aws_imagebuilder_image_recipe` resources using % terraform import aws_imagebuilder_image_recipe.example arn:aws:imagebuilder:us-east-1:123456789012:image-recipe/example/1.0.0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_infrastructure_configuration.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_infrastructure_configuration.html.markdown index a30e58b1948b..8a9522cf6717 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_infrastructure_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_infrastructure_configuration.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description for the configuration. * `instanceMetadataOptions` - (Optional) Configuration block with instance metadata options for the HTTP requests that pipeline builds use to launch EC2 build and test instances. Detailed below. * `instanceTypes` - (Optional) Set of EC2 Instance Types. @@ -77,6 +78,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `httpPutResponseHopLimit` - The number of hops that an instance can traverse to reach its destonation. * `httpTokens` - Whether a signed token is required for instance metadata retrieval requests. Valid values: `required`, `optional`. @@ -94,12 +96,14 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `s3KeyPrefix` - (Optional) Prefix to use for S3 logs. Defaults to `/`. ### placement The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Optional) Availability Zone where your build and test instances will launch. * `hostId` - (Optional) ID of the Dedicated Host on which build and test instances run. Conflicts with `hostResourceGroupArn`. * `hostResourceGroupArn` - (Optional) ARN of the host resource group in which to launch build and test instances. Conflicts with `hostId`. @@ -117,6 +121,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_infrastructure_configuration.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:infrastructure-configuration/example" + } +} + +resource "aws_imagebuilder_infrastructure_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder infrastructure configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_infrastructure_configuration` using the Amazon Resource Name (ARN). For example: ```typescript @@ -147,4 +172,4 @@ Using `terraform import`, import `aws_imagebuilder_infrastructure_configuration` % terraform import aws_imagebuilder_infrastructure_configuration.example arn:aws:imagebuilder:us-east-1:123456789012:infrastructure-configuration/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_lifecycle_policy.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_lifecycle_policy.html.markdown index 2154e12c794e..695a709ce2c6 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_lifecycle_policy.html.markdown @@ -22,11 +22,11 @@ import { Fn, Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { ImagebuilderLifecyclePolicy } from "./.gen/providers/aws/"; import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; import { DataAwsRegion } from "./.gen/providers/aws/data-aws-region"; import { IamRole } from "./.gen/providers/aws/iam-role"; import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +import { ImagebuilderLifecyclePolicy } from "./.gen/providers/aws/imagebuilder-lifecycle-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -89,12 +89,10 @@ class MyConvertedCode extends TerraformStack { ], resourceSelection: [ { - tagMap: [ - { - key1: "value1", - key2: "value2", - }, - ], + tagMap: { + key1: "value1", + key2: "value2", + }, }, ], resourceType: "AMI_IMAGE", @@ -113,11 +111,12 @@ The following arguments are required: * `name` - (Required) The name of the lifecycle policy to create. * `resourceType` - (Required) The type of Image Builder resource that the lifecycle policy applies to. Valid values: `AMI_IMAGE` or `CONTAINER_IMAGE`. * `executionRole` - (Required) The Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access to run lifecycle actions. More information about this role can be found [`here`](https://docs.aws.amazon.com/imagebuilder/latest/userguide/image-lifecycle-prerequisites.html#image-lifecycle-prereq-role). -* `policy_detail` - (Required) Configuration block with policy details. Detailed below. -* `resource_selection` - (Required) Selection criteria for the resources that the lifecycle policy applies to. Detailed below. +* `policyDetail` - (Required) Configuration block with policy details. Detailed below. +* `resourceSelection` - (Required) Selection criteria for the resources that the lifecycle policy applies to. Detailed below. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) description for the lifecycle policy. * `tags` - (Optional) Key-value map of resource tags for the Image Builder Lifecycle Policy. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -130,7 +129,8 @@ The following arguments are required: The following arguments are optional: -* `exclusion_rules` - (Optional) Additional rules to specify resources that should be exempt from policy actions. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `exclusionRules` - (Optional) Additional rules to specify resources that should be exempt from policy actions. ### action @@ -140,12 +140,14 @@ The following arguments are required: The following arguments are optional: -* `include_resources` - (Optional) Specifies the resources that the lifecycle policy applies to. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `includeResources` - (Optional) Specifies the resources that the lifecycle policy applies to. Detailed below. ### include_resources The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amis` - (Optional) Specifies whether the lifecycle action should apply to distributed AMIs. * `containers` - (Optional) Specifies whether the lifecycle action should apply to distributed containers. * `snapshots` - (Optional) Specifies whether the lifecycle action should apply to snapshots associated with distributed AMIs. @@ -159,25 +161,28 @@ The following arguments are required: The following arguments are optional: -* `retain_at_least` - (Optional) For age-based filters, this is the number of resources to keep on hand after the lifecycle DELETE action is applied. Impacted resources are only deleted if you have more than this number of resources. If you have fewer resources than this number, the impacted resource is not deleted. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `retainAtLeast` - (Optional) For age-based filters, this is the number of resources to keep on hand after the lifecycle DELETE action is applied. Impacted resources are only deleted if you have more than this number of resources. If you have fewer resources than this number, the impacted resource is not deleted. * `unit` - (Optional) Defines the unit of time that the lifecycle policy uses to determine impacted resources. This is required for age-based rules. Valid values: `DAYS`, `WEEKS`, `MONTHS` or `YEARS`. ### exclusion_rules The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amis` - (Optional) Lists configuration values that apply to AMIs that Image Builder should exclude from the lifecycle action. Detailed below. -* `tag_map` - (Optional) Contains a list of tags that Image Builder uses to skip lifecycle actions for Image Builder image resources that have them. +* `tagMap` - (Optional) Contains a list of tags that Image Builder uses to skip lifecycle actions for Image Builder image resources that have them. ### amis The following arguments are optional: -* `is_public` - (Optional) Configures whether public AMIs are excluded from the lifecycle action. -* `last_launched` - (Optional) Specifies configuration details for Image Builder to exclude the most recent resources from lifecycle actions. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `isPublic` - (Optional) Configures whether public AMIs are excluded from the lifecycle action. +* `lastLaunched` - (Optional) Specifies configuration details for Image Builder to exclude the most recent resources from lifecycle actions. Detailed below. * `regions` - (Optional) Configures AWS Regions that are excluded from the lifecycle action. * `sharedAccounts` - Specifies AWS accounts whose resources are excluded from the lifecycle action. -* `tag_map` - (Optional) Lists tags that should be excluded from lifecycle actions for the AMIs that have them. +* `tagMap` - (Optional) Lists tags that should be excluded from lifecycle actions for the AMIs that have them. ### last_launched @@ -190,8 +195,9 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `recipe` - (Optional) A list of recipe that are used as selection criteria for the output images that the lifecycle policy applies to. Detailed below. -* `tag_map` - (Optional) A list of tags that are used as selection criteria for the Image Builder image resources that the lifecycle policy applies to. +* `tagMap` - (Optional) A list of tags that are used as selection criteria for the Image Builder image resources that the lifecycle policy applies to. ### recipe @@ -211,6 +217,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_lifecycle_policy.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:lifecycle-policy/example" + } +} + +resource "aws_imagebuilder_lifecycle_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder lifecycle policy. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_lifecycle_policy` using the Amazon Resource Name (ARN). For example: ```typescript @@ -221,7 +248,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { ImagebuilderLifecyclePolicy } from "./.gen/providers/aws/"; +import { ImagebuilderLifecyclePolicy } from "./.gen/providers/aws/imagebuilder-lifecycle-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -241,4 +268,4 @@ Using `terraform import`, import `aws_imagebuilder_lifecycle_policy` using the A % terraform import aws_imagebuilder_lifecycle_policy.example arn:aws:imagebuilder:us-east-1:123456789012:lifecycle-policy/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_workflow.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_workflow.html.markdown index b13447cf6fcf..ae31d83555b1 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_workflow.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_workflow.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `changeDescription` - (Optional) Change description of the workflow. * `data` - (Optional) Inline YAML string with data of the workflow. Exactly one of `data` and `uri` can be specified. * `description` - (Optional) Description of the workflow. @@ -67,7 +68,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 Image Builder Workflow using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_workflow.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:workflow/build/example/1.0.0" + } +} + +resource "aws_imagebuilder_workflow" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder workflow. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 Image Builder Workflow using the `arn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -91,7 +113,7 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import EC2 Image Builder Workflow using the `example_id_arg`. For example: +Using `terraform import`, import EC2 Image Builder Workflow using the `arn`. For example: ```console % terraform import aws_imagebuilder_workflow.example arn:aws:imagebuilder:us-east-1:aws:workflow/test/example/1.0.1/1 @@ -99,4 +121,4 @@ Using `terraform import`, import EC2 Image Builder Workflow using the `example_i Certain resource arguments, such as `uri`, cannot be read via the API and imported into Terraform. Terraform will display a difference for these arguments the first run after import if declared in the Terraform configuration for an imported resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector2_delegated_admin_account.html.markdown b/website/docs/cdktf/typescript/r/inspector2_delegated_admin_account.html.markdown index fd47f6920ebc..93092a62d6f8 100644 --- a/website/docs/cdktf/typescript/r/inspector2_delegated_admin_account.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector2_delegated_admin_account.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) Account to enable as delegated admin account. ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import Inspector Delegated Admin Account using the `ac % terraform import aws_inspector2_delegated_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector2_enabler.html.markdown b/website/docs/cdktf/typescript/r/inspector2_enabler.html.markdown index 2b3904cc4877..878f5f489e87 100644 --- a/website/docs/cdktf/typescript/r/inspector2_enabler.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector2_enabler.html.markdown @@ -66,12 +66,13 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountIds` - (Required) Set of account IDs. Can contain one of: the Organization's Administrator Account, or one or more Member Accounts. * `resourceTypes` - (Required) Type of resources to scan. - Valid values are `EC2`, `ECR`, `LAMBDA` and `LAMBDA_CODE`. + Valid values are `EC2`, `ECR`, `LAMBDA`, `LAMBDA_CODE` and `CODE_REPOSITORY`. At least one item is required. ## Attribute Reference @@ -86,4 +87,36 @@ This resource exports no additional attributes. * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Inspector Enabler using `accountIds` and `region_types` formatted as `[account_id1]:[account_id2]:...-[resource_type1]:[resource_type2]:...`, where `accountIds` are sorted in ascending order and `resourceTypes` are sorted in alphabetical order. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Inspector2Enabler } from "./.gen/providers/aws/inspector2-enabler"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + Inspector2Enabler.generateConfigForImport( + this, + "example", + "123456789012:234567890123-EC2:ECR" + ); + } +} + +``` + +Using `terraform import`, import Inspector Enabler using using `accountIds` and `region_types` formatted as `[account_id1]:[account_id2]:...-[resource_type1]:[resource_type2]:...`, where `accountIds` are sorted in ascending order and `resourceTypes` are sorted in alphabetical order. For example: + +```console +% terraform import aws_inspector2_enabler.example 123456789012:234567890123-EC2:ECR +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector2_filter.html.markdown b/website/docs/cdktf/typescript/r/inspector2_filter.html.markdown index 82c2d5647cab..fc864835e3d7 100644 --- a/website/docs/cdktf/typescript/r/inspector2_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector2_filter.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description * `reason` - (Optional) Reason for creating the filter * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -73,6 +74,8 @@ This resource exports the following attributes in addition to the arguments abov The `filterCriteria` configuration block supports the following attributes: * `awsAccountId` - (Optional) The AWS account ID in which the finding was generated. [Documented below](#string-filter). +* `code_repository_project_name` - (Optional) The project name in a code repository. [Documented below](#string-filter). +* `code_repository_provider_type` - (Optional) The repository provider type (such as GitHub, GitLab, etc.) [Documented below](#string-filter). * `codeVulnerabilityDetectorName` - (Optional) The ID of the component. [Documented below](#string-filter). * `codeVulnerabilityDetectorTags` - (Optional) The ID of the component. [Documented below](#string-filter). * `codeVulnerabilityFilePath` - (Optional) The ID of the component. [Documented below](#string-filter). @@ -82,6 +85,8 @@ The `filterCriteria` configuration block supports the following attributes: * `ec2InstanceSubnetId` - (Optional) The ID of the subnet. [Documented below](#string-filter). * `ec2InstanceVpcId` - (Optional) The ID of the VPC. [Documented below](#string-filter). * `ecrImageArchitecture` - (Optional) The architecture of the ECR image. [Documented below](#string-filter). +* `ecr_image_in_use_count` - (Optional) The number of the ECR images in use. [Documented below](#number-filter). +* `ecr_image_last_in_use_at` - (Optional) The date range when an ECR image was last used in an ECS cluster task or EKS cluster pod. [Documented below](#date-filter). * `ecrImageHash` - (Optional) The SHA256 hash of the ECR image. [Documented below](#string-filter). * `ecrImagePushedAt` - (Optional) The date range when the image was pushed. [Documented below](#date-filter). * `ecrImageRegistry` - (Optional) The registry of the ECR image. [Documented below](#string-filter). @@ -190,10 +195,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Inspector Filter using the `example_id_arg`. For example: +Using `terraform import`, import Inspector Filter using the `arn`. For example: ```console % terraform import aws_inspector2_filter.example "arn:aws:inspector2:us-east-1:111222333444:owner/111222333444/filter/abcdefgh12345678" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector2_member_association.html.markdown b/website/docs/cdktf/typescript/r/inspector2_member_association.html.markdown index bdf0a2af17cd..c6001f4ce2a2 100644 --- a/website/docs/cdktf/typescript/r/inspector2_member_association.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector2_member_association.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) ID of the account to associate ## Attribute Reference @@ -82,4 +83,4 @@ Using `terraform import`, import Amazon Inspector Member Association using the ` % terraform import aws_inspector2_member_association.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector2_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/inspector2_organization_configuration.html.markdown index 1d6d5b6493d2..421b9ddd38b9 100644 --- a/website/docs/cdktf/typescript/r/inspector2_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector2_organization_configuration.html.markdown @@ -34,6 +34,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new Inspector2OrganizationConfiguration(this, "example", { autoEnable: { + codeRepository: false, ec2: true, ecr: false, lambda: true, @@ -47,14 +48,16 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoEnable` - (Required) Configuration block for auto enabling. See below. ### `autoEnable` * `ec2` - (Required) Whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector organization. * `ecr` - (Required) Whether Amazon ECR scans are automatically enabled for new members of your Amazon Inspector organization. +* `codeRepository` - (Optional) Whether code repository scans are automatically enabled for new members of your Amazon Inspector organization. * `lambda` - (Optional) Whether Lambda Function scans are automatically enabled for new members of your Amazon Inspector organization. * `lambdaCode` - (Optional) Whether AWS Lambda code scans are automatically enabled for new members of your Amazon Inspector organization. **Note:** Lambda code scanning requires Lambda standard scanning to be activated. Consequently, if you are setting this argument to `true`, you must also set the `lambda` argument to `true`. See [Scanning AWS Lambda functions with Amazon Inspector](https://docs.aws.amazon.com/inspector/latest/user/scanning-lambda.html#lambda-code-scans) for more information. @@ -72,4 +75,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `5m`) * `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector_assessment_target.html.markdown b/website/docs/cdktf/typescript/r/inspector_assessment_target.html.markdown index eee06537c763..845b721713ae 100644 --- a/website/docs/cdktf/typescript/r/inspector_assessment_target.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector_assessment_target.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the assessment target. * `resourceGroupArn` (Optional) Inspector Resource Group Amazon Resource Name (ARN) stating tags for instance matching. If not specified, all EC2 instances in the current AWS account and region are included in the assessment target. @@ -57,6 +58,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_inspector_assessment_target.example + identity = { + "arn" = "arn:aws:inspector:us-west-2:123456789012:target/0-12345678" + } +} + +resource "aws_inspector_assessment_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Inspector assessment target. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Inspector Classic Assessment Targets using their Amazon Resource Name (ARN). For example: ```typescript @@ -87,4 +109,4 @@ Using `terraform import`, import Inspector Classic Assessment Targets using thei % terraform import aws_inspector_assessment_target.example arn:aws:inspector:us-east-1:123456789012:target/0-xxxxxxx ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector_assessment_template.html.markdown b/website/docs/cdktf/typescript/r/inspector_assessment_template.html.markdown index 94121fd0de3d..84998726fa33 100644 --- a/website/docs/cdktf/typescript/r/inspector_assessment_template.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector_assessment_template.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the assessment template. * `targetArn` - (Required) The assessment target ARN to attach the template to. * `duration` - (Required) The duration of the inspector run. @@ -75,6 +76,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_inspector_assessment_template.example + identity = { + "arn" = "arn:aws:inspector:us-west-2:123456789012:target/0-12345678/template/0-87654321" + } +} + +resource "aws_inspector_assessment_template" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Inspector assessment template. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_inspector_assessment_template` using the template assessment ARN. For example: ```typescript @@ -105,4 +127,4 @@ Using `terraform import`, import `aws_inspector_assessment_template` using the t % terraform import aws_inspector_assessment_template.example arn:aws:inspector:us-west-2:123456789012:target/0-9IaAzhGR/template/0-WEcjR8CH ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/inspector_resource_group.html.markdown b/website/docs/cdktf/typescript/r/inspector_resource_group.html.markdown index ada336c01196..5b7ff5e6ae99 100644 --- a/website/docs/cdktf/typescript/r/inspector_resource_group.html.markdown +++ b/website/docs/cdktf/typescript/r/inspector_resource_group.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Required) Key-value map of tags that are used to select the EC2 instances to be included in an [Amazon Inspector assessment target](/docs/providers/aws/r/inspector_assessment_target.html). ## Attribute Reference @@ -49,4 +50,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The resource group ARN. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/instance.html.markdown b/website/docs/cdktf/typescript/r/instance.html.markdown index ecae3b21a0e4..fc1356658586 100644 --- a/website/docs/cdktf/typescript/r/instance.html.markdown +++ b/website/docs/cdktf/typescript/r/instance.html.markdown @@ -45,7 +45,7 @@ class MyConvertedCode extends TerraformStack { mostRecent: true, owners: ["099720109477"], }); - new Instance(this, "web", { + new Instance(this, "example", { ami: Token.asString(ubuntu.id), instanceType: "t3.micro", tags: { @@ -71,7 +71,7 @@ import { Instance } from "./.gen/providers/aws/instance"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new Instance(this, "web", { + new Instance(this, "example", { ami: "resolve:ssm:/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64", instanceType: "t3.micro", tags: { @@ -98,7 +98,7 @@ import { Instance } from "./.gen/providers/aws/instance"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - const thisVar = new DataAwsAmi(this, "this", { + const example = new DataAwsAmi(this, "example", { filter: [ { name: "architecture", @@ -112,8 +112,8 @@ class MyConvertedCode extends TerraformStack { mostRecent: true, owners: ["amazon"], }); - const awsInstanceThis = new Instance(this, "this_1", { - ami: Token.asString(thisVar.id), + const awsInstanceExample = new Instance(this, "example_1", { + ami: Token.asString(example.id), instanceMarketOptions: { marketType: "spot", spotOptions: { @@ -126,7 +126,7 @@ class MyConvertedCode extends TerraformStack { }, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ - awsInstanceThis.overrideLogicalId("this"); + awsInstanceExample.overrideLogicalId("example"); } } @@ -163,28 +163,25 @@ class MyConvertedCode extends TerraformStack { }, vpcId: myVpc.id, }); - const foo = new NetworkInterface(this, "foo", { + const example = new NetworkInterface(this, "example", { privateIps: ["172.16.10.100"], subnetId: mySubnet.id, tags: { Name: "primary_network_interface", }, }); - const awsInstanceFoo = new Instance(this, "foo_3", { + const awsInstanceExample = new Instance(this, "example_3", { ami: "ami-005e54dee72cc1d00", creditSpecification: { cpuCredits: "unlimited", }, instanceType: "t2.micro", - networkInterface: [ - { - deviceIndex: 0, - networkInterfaceId: foo.id, - }, - ], + primaryNetworkInterface: { + networkInterfaceId: example.id, + }, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ - awsInstanceFoo.overrideLogicalId("foo"); + awsInstanceExample.overrideLogicalId("example"); } } @@ -298,17 +295,12 @@ Do not use `volumeTags` if you plan to manage block device tags outside the `aws This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ami` - (Optional) AMI to use for the instance. Required unless `launchTemplate` is specified and the Launch Template specifes an AMI. If an AMI is specified in the Launch Template, setting `ami` will override the AMI specified in the Launch Template. * `associatePublicIpAddress` - (Optional) Whether to associate a public IP address with an instance in a VPC. * `availabilityZone` - (Optional) AZ to start the instance in. - * `capacityReservationSpecification` - (Optional) Describes an instance's Capacity Reservation targeting option. See [Capacity Reservation Specification](#capacity-reservation-specification) below for more details. - --> **NOTE:** Changing `cpuCoreCount` and/or `cpuThreadsPerCore` will cause the resource to be destroyed and re-created. - -* `cpuCoreCount` - (Optional, **Deprecated** use the `cpuOptions` argument instead) Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. * `cpuOptions` - (Optional) The CPU options for the instance. See [CPU Options](#cpu-options) below for more details. -* `cpuThreadsPerCore` - (Optional - has no effect unless `cpuCoreCount` is also set, **Deprecated** use the `cpuOptions` argument instead) If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. * `creditSpecification` - (Optional) Configuration block for customizing the credit specification of the instance. See [Credit Specification](#credit-specification) below for more details. Terraform will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. * `disableApiStop` - (Optional) If true, enables [EC2 Instance Stop Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection). * `disableApiTermination` - (Optional) If true, enables [EC2 Instance Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination). @@ -317,6 +309,7 @@ This resource supports the following arguments: * `enablePrimaryIpv6` - (Optional) Whether to assign a primary IPv6 Global Unicast Address (GUA) to the instance when launched in a dual-stack or IPv6-only subnet. A primary IPv6 address ensures a consistent IPv6 address for the instance and is automatically assigned by AWS to the ENI. Once enabled, the first IPv6 GUA becomes the primary IPv6 address and cannot be disabled. The primary IPv6 address remains until the instance is terminated or the ENI is detached. Disabling `enablePrimaryIpv6` after it has been enabled forces recreation of the instance. * `enclaveOptions` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. * `ephemeralBlockDevice` - (Optional) One or more configuration blocks to customize Ephemeral (also known as "Instance Store") volumes on the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. When accessing this as an attribute reference, it is a set of objects. +* `forceDestroy` - (Optional) Destroys instance even if `disableApiTermination` or `disableApiStop` is set to `true`. Defaults to `false`. Once this parameter is set to `true`, a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the instance or destroying the instance, this flag will not work. Additionally when importing an instance, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `getPasswordData` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `passwordData` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. * `hostId` - (Optional) ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. @@ -332,9 +325,11 @@ This resource supports the following arguments: * `maintenanceOptions` - (Optional) Maintenance and recovery options for the instance. See [Maintenance Options](#maintenance-options) below for more details. * `metadataOptions` - (Optional) Customize the metadata options of the instance. See [Metadata Options](#metadata-options) below for more details. * `monitoring` - (Optional) If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) -* `networkInterface` - (Optional) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. -* `placementGroup` - (Optional) Placement Group to start the instance in. +* `networkInterface` - (Optional, **Deprecated** to specify the primary network interface, use `primaryNetworkInterface`, to attach additional network interfaces, use `aws_network_interface_attachment` resources) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. +* `placementGroup` - (Optional) Placement Group to start the instance in. Conflicts with `placementGroupId`. +* `placementGroupId` - (Optional) Placement Group ID to start the instance in. Conflicts with `placementGroup`. * `placementPartitionNumber` - (Optional) Number of the partition the instance is in. Valid only if [the `aws_placement_group` resource's](placement_group.html) `strategy` argument is set to `"partition"`. +* `primaryNetworkInterface` - (Optional) The primary network interface. See [Primary Network Interface](#primary-network-interface) below. * `privateDnsNameOptions` - (Optional) Options for the instance hostname. The default values are inherited from the subnet. See [Private DNS Name Options](#private-dns-name-options) below for more details. * `privateIp` - (Optional) Private IP address to associate with the instance in a VPC. * `rootBlockDevice` - (Optional) Configuration block to customize details about the root block device of the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. When accessing this as an attribute reference, it is a list containing one object. @@ -481,7 +476,11 @@ For more information, see the documentation on the [Instance Metadata Service](h ### Network Interfaces -Each of the `networkInterface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use the `aws_network_interface` or `aws_network_interface_attachment` resources instead. +`networkInterface` is **deprecated**. +Use `primaryNetworkInterface` to specify the primary network interface. +To attach additional network interfaces, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources. + +Each of the `networkInterface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources instead. The `networkInterface` configuration block _does_, however, allow users to supply their own network interface to be used as the default network interface on an EC2 Instance, attached at `eth0`. @@ -492,6 +491,16 @@ Each `networkInterface` block supports the following: * `networkCardIndex` - (Optional) Integer index of the network card. Limited by instance type. The default index is `0`. * `networkInterfaceId` - (Required) ID of the network interface to attach. +### Primary Network Interface + +Represents the primary network interface on the EC2 Instance. +To manage additional network interfaces, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources. + +Each `primaryNetworkInterface` block supports the following: + +* `deleteOnTermination` - (Read-Only) Whether the network interface will be deleted when the instance terminates. +* `networkInterfaceId` - (Required) ID of the network interface to attach. + ### Private DNS Name Options The `privateDnsNameOptions` block supports the following: @@ -565,6 +574,32 @@ For `instanceMarketOptions`, in addition to the arguments above, the following a ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_instance.example + identity = { + id = "i-12345678" + } +} + +resource "aws_instance" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the instance. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import instances using the `id`. For example: ```typescript @@ -591,4 +626,4 @@ Using `terraform import`, import instances using the `id`. For example: % terraform import aws_instance.web i-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/internet_gateway.html.markdown b/website/docs/cdktf/typescript/r/internet_gateway.html.markdown index 63c2fdd7501a..47147cb927ed 100644 --- a/website/docs/cdktf/typescript/r/internet_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/internet_gateway.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Optional) The VPC ID to create in. See the [aws_internet_gateway_attachment](internet_gateway_attachment.html) resource for an alternate way to attach an Internet Gateway to a VPC. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -115,4 +116,4 @@ Using `terraform import`, import Internet Gateways using the `id`. For example: % terraform import aws_internet_gateway.gw igw-c0a643a9 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/internet_gateway_attachment.html.markdown b/website/docs/cdktf/typescript/r/internet_gateway_attachment.html.markdown index bab31fa8c617..ad0266174f00 100644 --- a/website/docs/cdktf/typescript/r/internet_gateway_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/internet_gateway_attachment.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `internetGatewayId` - (Required) The ID of the internet gateway. * `vpcId` - (Required) The ID of the VPC. @@ -101,4 +102,4 @@ Using `terraform import`, import Internet Gateway Attachments using the `id`. Fo % terraform import aws_internet_gateway_attachment.example igw-c0a643a9:vpc-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/internetmonitor_monitor.html.markdown b/website/docs/cdktf/typescript/r/internetmonitor_monitor.html.markdown index abb29f514e26..b02073578370 100644 --- a/website/docs/cdktf/typescript/r/internetmonitor_monitor.html.markdown +++ b/website/docs/cdktf/typescript/r/internetmonitor_monitor.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `healthEventsConfig` - (Optional) Health event thresholds. A health event threshold percentage, for performance and availability, determines when Internet Monitor creates a health event when there's an internet issue that affects your application end users. See [Health Events Config](#health-events-config) below. * `internetMeasurementsLogDelivery` - (Optional) Publish internet measurements for Internet Monitor to an Amazon S3 bucket in addition to CloudWatch Logs. * `maxCityNetworksToMonitor` - (Optional) The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider (ISP), that clients access the resources through. This limit helps control billing costs. @@ -97,4 +98,4 @@ Using `terraform import`, import Internet Monitor Monitors using the `monitorNam % terraform import aws_internetmonitor_monitor.some some-monitor ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown b/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown index 5c6fdaac0fcb..61c2843cc16e 100644 --- a/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizerFunctionArn` - (Required) The ARN of the authorizer's Lambda function. * `enableCachingForHttp` - (Optional) Specifies whether the HTTP caching is enabled or not. Default: `false`. * `name` - (Required) The name of the authorizer. @@ -94,4 +95,4 @@ Using `terraform import`, import IOT Authorizers using the name. For example: % terraform import aws_iot_authorizer.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_billing_group.html.markdown b/website/docs/cdktf/typescript/r/iot_billing_group.html.markdown index 0478647f63c4..99390a5083ea 100644 --- a/website/docs/cdktf/typescript/r/iot_billing_group.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_billing_group.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Billing Group. * `properties` - (Optional) The Billing Group properties. Defined below. * `tags` - (Optional) Key-value mapping of resource tags @@ -90,4 +91,4 @@ Using `terraform import`, import IoT Billing Groups using the name. For example: % terraform import aws_iot_billing_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_ca_certificate.html.markdown b/website/docs/cdktf/typescript/r/iot_ca_certificate.html.markdown index 85a1bb54f0a0..098e5b205664 100644 --- a/website/docs/cdktf/typescript/r/iot_ca_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_ca_certificate.html.markdown @@ -96,6 +96,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active` - (Required) Boolean flag to indicate if the certificate should be active for device authentication. * `allowAutoRegistration` - (Required) Boolean flag to indicate if the certificate should be active for device regisration. * `caCertificatePem` - (Required) PEM encoded CA certificate. @@ -124,4 +125,4 @@ This resource exports the following attributes in addition to the arguments abov * `notAfter` - The certificate is not valid after this date. * `notBefore` - The certificate is not valid before this date. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_certificate.html.markdown b/website/docs/cdktf/typescript/r/iot_certificate.html.markdown index 92223e0e21a5..482526122c35 100644 --- a/website/docs/cdktf/typescript/r/iot_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_certificate.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active` - (Required) Boolean flag to indicate if the certificate should be active * `csr` - (Optional) The certificate signing request. Review [CreateCertificateFromCsr](https://docs.aws.amazon.com/iot/latest/apireference/API_CreateCertificateFromCsr.html) @@ -110,4 +111,4 @@ This resource exports the following attributes in addition to the arguments abov * `publicKey` - When neither CSR nor certificate is provided, the public key. * `privateKey` - When neither CSR nor certificate is provided, the private key. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_domain_configuration.html.markdown b/website/docs/cdktf/typescript/r/iot_domain_configuration.html.markdown index f01e3f38ca18..4cb8a026251f 100644 --- a/website/docs/cdktf/typescript/r/iot_domain_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_domain_configuration.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationProtocol` - (Optional) An enumerated string that specifies the application-layer protocol. Valid values are `SECURE_MQTT`, `MQTT_WSS`, `HTTPS` or `DEFAULT`. * `authenticationType` - (Optional) An enumerated string that specifies the authentication type. Valid values are `CUSTOM_AUTH_X509`, `CUSTOM_AUTH`, `AWS_X509`, `AWS_SIGV4` or `DEFAULT`. * `authorizerConfig` - (Optional) An object that specifies the authorization service for a domain. See the [`authorizerConfig` Block](#authorizer_config-block) below for details. @@ -103,4 +104,4 @@ Using `terraform import`, import domain configurations using the name. For examp % terraform import aws_iot_domain_configuration.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_event_configurations.html.markdown b/website/docs/cdktf/typescript/r/iot_event_configurations.html.markdown index 6023c7539fa4..2ed5bb7c3ac7 100644 --- a/website/docs/cdktf/typescript/r/iot_event_configurations.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_event_configurations.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `eventConfigurations` - (Required) Map. The new event configuration values. You can use only these strings as keys: `THING_GROUP_HIERARCHY`, `THING_GROUP_MEMBERSHIP`, `THING_TYPE`, `THING_TYPE_ASSOCIATION`, `THING_GROUP`, `THING`, `POLICY`, `CA_CERTIFICATE`, `JOB_EXECUTION`, `CERTIFICATE`, `JOB`. Use boolean for values of mapping. ## Attribute Reference @@ -90,4 +91,4 @@ Using `terraform import`, import IoT Event Configurations using the AWS Region. % terraform import aws_iot_event_configurations.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_indexing_configuration.html.markdown b/website/docs/cdktf/typescript/r/iot_indexing_configuration.html.markdown index d6fc40318771..d0029cd1bd41 100644 --- a/website/docs/cdktf/typescript/r/iot_indexing_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_indexing_configuration.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `thingGroupIndexingConfiguration` - (Optional) Thing group indexing configuration. See below. * `thingIndexingConfiguration` - (Optional) Thing indexing configuration. See below. @@ -104,4 +105,4 @@ The `filter` configuration block supports the following: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_logging_options.html.markdown b/website/docs/cdktf/typescript/r/iot_logging_options.html.markdown index 0f4b888d09c6..309d77332f4f 100644 --- a/website/docs/cdktf/typescript/r/iot_logging_options.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_logging_options.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultLogLevel` - (Optional) The default logging level. Valid Values: `"DEBUG"`, `"INFO"`, `"ERROR"`, `"WARN"`, `"DISABLED"`. * `disableAllLogs` - (Optional) If `true` all logs are disabled. The default is `false`. * `roleArn` - (Required) The ARN of the role that allows IoT to write to Cloudwatch logs. @@ -47,4 +48,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_policy.html.markdown b/website/docs/cdktf/typescript/r/iot_policy.html.markdown index b0b1fd849fac..8f023ec0ae3c 100644 --- a/website/docs/cdktf/typescript/r/iot_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_policy.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the policy. * `policy` - (Required) The policy document. This is a JSON formatted string. Use the [IoT Developer Guide](http://docs.aws.amazon.com/iot/latest/developerguide/iot-policies.html) for more information on IoT Policies. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -101,4 +102,4 @@ Using `terraform import`, import IoT policies using the `name`. For example: % terraform import aws_iot_policy.pubsub PubSubToAnyTopic ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/iot_policy_attachment.html.markdown index c7e0a053f470..5b54ac4fe1d6 100644 --- a/website/docs/cdktf/typescript/r/iot_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_policy_attachment.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The name of the policy to attach. * `target` - (Required) The identity to which the policy is attached. @@ -68,4 +69,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_provisioning_template.html.markdown b/website/docs/cdktf/typescript/r/iot_provisioning_template.html.markdown index 92c564407294..d3b965e3be70 100644 --- a/website/docs/cdktf/typescript/r/iot_provisioning_template.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_provisioning_template.html.markdown @@ -112,6 +112,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the fleet provisioning template. * `description` - (Optional) The description of the fleet provisioning template. * `enabled` - (Optional) True to enable the fleet provisioning template, otherwise false. @@ -168,4 +169,4 @@ Using `terraform import`, import IoT fleet provisioning templates using the `nam % terraform import aws_iot_provisioning_template.fleet FleetProvisioningTemplate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_role_alias.html.markdown b/website/docs/cdktf/typescript/r/iot_role_alias.html.markdown index 05785f7d01e6..3d37a8cbca0f 100644 --- a/website/docs/cdktf/typescript/r/iot_role_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_role_alias.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias` - (Required) The name of the role alias. * `roleArn` - (Required) The identity of the role to which the alias refers. * `credentialDuration` - (Optional) The duration of the credential, in seconds. If you do not specify a value for this setting, the default maximum of one hour is applied. This setting can have a value from 900 seconds (15 minutes) to 43200 seconds (12 hours). @@ -95,4 +96,4 @@ Using `terraform import`, import IOT Role Alias using the alias. For example: % terraform import aws_iot_role_alias.example myalias ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_thing.html.markdown b/website/docs/cdktf/typescript/r/iot_thing.html.markdown index f8da409ec185..80c1308dba02 100644 --- a/website/docs/cdktf/typescript/r/iot_thing.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_thing.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the thing. * `attributes` - (Optional) Map of attributes of the thing. * `thingTypeName` - (Optional) The thing type name. @@ -81,4 +82,4 @@ Using `terraform import`, import IOT Things using the name. For example: % terraform import aws_iot_thing.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_thing_group.html.markdown b/website/docs/cdktf/typescript/r/iot_thing_group.html.markdown index b289d3d3ebb6..2879cc8013e2 100644 --- a/website/docs/cdktf/typescript/r/iot_thing_group.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_thing_group.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Thing Group. * `parentGroupName` - (Optional) The name of the parent Thing Group. * `properties` - (Optional) The Thing Group properties. Defined below. @@ -104,4 +105,4 @@ Using `terraform import`, import IoT Things Groups using the name. For example: % terraform import aws_iot_thing_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_thing_group_membership.html.markdown b/website/docs/cdktf/typescript/r/iot_thing_group_membership.html.markdown index ad0eb5c26ab3..8a7b8281d5ac 100644 --- a/website/docs/cdktf/typescript/r/iot_thing_group_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_thing_group_membership.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `thingName` - (Required) The name of the thing to add to a group. * `thingGroupName` - (Required) The name of the group to which you are adding a thing. * `overrideDynamicGroup` - (Optional) Override dynamic thing groups with static thing groups when 10-group limit is reached. If a thing belongs to 10 thing groups, and one or more of those groups are dynamic thing groups, adding a thing to a static group removes the thing from the last dynamic group. @@ -82,4 +83,4 @@ Using `terraform import`, import IoT Thing Group Membership using the thing grou % terraform import aws_iot_thing_group_membership.example thing_group_name/thing_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_thing_principal_attachment.html.markdown b/website/docs/cdktf/typescript/r/iot_thing_principal_attachment.html.markdown index e19b6f9ad834..5f2f6fd3568a 100644 --- a/website/docs/cdktf/typescript/r/iot_thing_principal_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_thing_principal_attachment.html.markdown @@ -48,11 +48,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The AWS IoT Certificate ARN or Amazon Cognito Identity ID. * `thing` - (Required) The name of the thing. +* `thing_principal_type` - (Optional) The type of relationship to specify when attaching a principal to a thing. Valid values are `EXCLUSIVE_THING` (the thing will be the only one attached to the principal) or `NON_EXCLUSIVE_THING` (multiple things can be attached to the principal). Defaults to `NON_EXCLUSIVE_THING`. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_thing_type.html.markdown b/website/docs/cdktf/typescript/r/iot_thing_type.html.markdown index 525b205a0b25..62d04abb350e 100644 --- a/website/docs/cdktf/typescript/r/iot_thing_type.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_thing_type.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces New Resource) The name of the thing type. * `deprecated` - (Optional, Defaults to false) Whether the thing type is deprecated. If true, no new things could be associated with this type. * `properties` - (Optional), Configuration block that can contain the following properties of the thing type: @@ -80,4 +81,4 @@ Using `terraform import`, import IOT Thing Types using the name. For example: % terraform import aws_iot_thing_type.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown b/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown index c719cb39a1d6..a66972e9f74f 100644 --- a/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown @@ -100,6 +100,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule. * `description` - (Optional) The description of the rule. * `enabled` - (Required) Specifies whether the rule is enabled. @@ -288,4 +289,4 @@ Using `terraform import`, import IoT Topic Rules using the `name`. For example: % terraform import aws_iot_topic_rule.rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_topic_rule_destination.html.markdown b/website/docs/cdktf/typescript/r/iot_topic_rule_destination.html.markdown index 800b362a1af7..a0d3aaea7b65 100644 --- a/website/docs/cdktf/typescript/r/iot_topic_rule_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_topic_rule_destination.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether or not to enable the destination. Default: `true`. * `vpcConfiguration` - (Required) Configuration of the virtual private cloud (VPC) connection. For more info, see the [AWS documentation](https://docs.aws.amazon.com/iot/latest/developerguide/vpc-rule-action.html). @@ -89,4 +90,4 @@ Using `terraform import`, import IoT topic rule destinations using the `arn`. Fo % terraform import aws_iot_topic_rule_destination.example arn:aws:iot:us-west-2:123456789012:ruledestination/vpc/2ce781c8-68a6-4c52-9c62-63fe489ecc60 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ivs_channel.html.markdown b/website/docs/cdktf/typescript/r/ivs_channel.html.markdown index c567634e79cf..ecf0debb4b53 100644 --- a/website/docs/cdktf/typescript/r/ivs_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/ivs_channel.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorized` - (Optional) If `true`, channel is private (enabled for playback authorization). * `latencyMode` - (Optional) Channel latency mode. Valid values: `NORMAL`, `LOW`. * `name` - (Optional) Channel name. @@ -66,6 +67,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_channel.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:channel/abcdABCDefgh" + } +} + +resource "aws_ivs_channel" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS channel. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Channel using the ARN. For example: ```typescript @@ -96,4 +118,4 @@ Using `terraform import`, import IVS (Interactive Video) Channel using the ARN. % terraform import aws_ivs_channel.example arn:aws:ivs:us-west-2:326937407773:channel/0Y1lcs4U7jk5 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ivs_playback_key_pair.html.markdown b/website/docs/cdktf/typescript/r/ivs_playback_key_pair.html.markdown index fd7be044431d..30fd5938b2d1 100644 --- a/website/docs/cdktf/typescript/r/ivs_playback_key_pair.html.markdown +++ b/website/docs/cdktf/typescript/r/ivs_playback_key_pair.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Playback Key Pair name. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -64,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_playback_key_pair.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:playback-key/abcdABCDefgh" + } +} + +resource "aws_ivs_playback_key_pair" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS playback key pair. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Playback Key Pair using the ARN. For example: ```typescript @@ -94,4 +116,4 @@ Using `terraform import`, import IVS (Interactive Video) Playback Key Pair using % terraform import aws_ivs_playback_key_pair.example arn:aws:ivs:us-west-2:326937407773:playback-key/KDJRJNQhiQzA ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ivs_recording_configuration.html.markdown b/website/docs/cdktf/typescript/r/ivs_recording_configuration.html.markdown index 613bb05c682b..fac29adacead 100644 --- a/website/docs/cdktf/typescript/r/ivs_recording_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/ivs_recording_configuration.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Recording Configuration name. * `recordingReconnectWindowSeconds` - (Optional) If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -75,6 +76,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_recording_configuration.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:recording-configuration/abcdABCDefgh" + } +} + +resource "aws_ivs_recording_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS recording configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Recording Configuration using the ARN. For example: ```typescript @@ -105,4 +127,4 @@ Using `terraform import`, import IVS (Interactive Video) Recording Configuration % terraform import aws_ivs_recording_configuration.example arn:aws:ivs:us-west-2:326937407773:recording-configuration/KAk1sHBl2L47 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ivschat_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/ivschat_logging_configuration.html.markdown index 61230122c42e..1c608114d7f6 100644 --- a/website/docs/cdktf/typescript/r/ivschat_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/ivschat_logging_configuration.html.markdown @@ -172,6 +172,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) Logging Configuration name. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -194,6 +195,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivschat_logging_configuration.example + identity = { + "arn" = "arn:aws:ivschat:us-west-2:123456789012:logging-configuration/abcdABCDefgh" + } +} + +resource "aws_ivschat_logging_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS Chat logging configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Chat Logging Configuration using the ARN. For example: ```typescript @@ -224,4 +246,4 @@ Using `terraform import`, import IVS (Interactive Video) Chat Logging Configurat % terraform import aws_ivschat_logging_configuration.example arn:aws:ivschat:us-west-2:326937407773:logging-configuration/MMUQc8wcqZmC ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ivschat_room.html.markdown b/website/docs/cdktf/typescript/r/ivschat_room.html.markdown index 04a42df0d70f..c4440bb888f5 100644 --- a/website/docs/cdktf/typescript/r/ivschat_room.html.markdown +++ b/website/docs/cdktf/typescript/r/ivschat_room.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loggingConfigurationIdentifiers` - (Optional) List of Logging Configuration ARNs to attach to the room. * `maximumMessageLength` - (Optional) Maximum number of characters in a single @@ -121,6 +122,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivschat_room.example + identity = { + "arn" = "arn:aws:ivschat:us-west-2:123456789012:room/g1H2I3j4k5L6" + } +} + +resource "aws_ivschat_room" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS Chat room. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Chat Room using the ARN. For example: ```typescript @@ -151,4 +173,4 @@ Using `terraform import`, import IVS (Interactive Video) Chat Room using the ARN % terraform import aws_ivschat_room.example arn:aws:ivschat:us-west-2:326937407773:room/GoXEXyB4VwHb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kendra_data_source.html.markdown b/website/docs/cdktf/typescript/r/kendra_data_source.html.markdown index c3fcdb3d4b7c..c2411670c644 100644 --- a/website/docs/cdktf/typescript/r/kendra_data_source.html.markdown +++ b/website/docs/cdktf/typescript/r/kendra_data_source.html.markdown @@ -537,6 +537,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Optional) A block with the configuration information to connect to your Data Source repository. You can't specify the `configuration` block when the `type` parameter is set to `CUSTOM`. [Detailed below](#configuration-block). * `customDocumentEnrichmentConfiguration` - (Optional) A block with the configuration information for altering document metadata and content during the document ingestion process. For more information on how to create, modify and delete document metadata, or make other content alterations when you ingest documents into Amazon Kendra, see [Customizing document metadata during the ingestion process](https://docs.aws.amazon.com/kendra/latest/dg/custom-document-enrichment.html). [Detailed below](#custom_document_enrichment_configuration-block). * `description` - (Optional) A description for the Data Source connector. @@ -584,7 +585,7 @@ The `documentsMetadataConfiguration` configuration block supports the following The `webCrawlerConfiguration` configuration block supports the following arguments: * `authenticationConfiguration` - (Optional) A block with the configuration information required to connect to websites using authentication. You can connect to websites using basic authentication of user name and password. You use a secret in AWS Secrets Manager to store your authentication credentials. You must provide the website host name and port number. For example, the host name of `https://a.example.com/page1.html` is `"a.example.com"` and the port is `443`, the standard port for HTTPS. [Detailed below](#authentication_configuration-block). -* `crawlDepth` - (Optional) Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels – index level (i.e. seed in this example), sections level, and subsections level – and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to `2`. Minimum value of `0`. Maximum value of `10`. +* `crawlDepth` - (Optional) Specifies the number of levels in a website that you want to crawl. The first level begins from the website seed or starting point URL. For example, if a website has 3 levels - index level (i.e. seed in this example), sections level, and subsections level - and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1. The default crawl depth is set to `2`. Minimum value of `0`. Maximum value of `10`. * `maxContentSizePerPageInMegaBytes` - (Optional) The maximum size (in MB) of a webpage or attachment to crawl. Files larger than this size (in MB) are skipped/not crawled. The default maximum size of a webpage or attachment is set to `50` MB. Minimum value of `1.0e-06`. Maximum value of `50`. * `maxLinksPerPage` - (Optional) The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage. As a website’s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance. The default maximum links per page is `100`. Minimum value of `1`. Maximum value of `1000`. * `maxUrlsPerMinuteCrawlRate` - (Optional) The maximum number of URLs crawled per website host per minute. The default maximum number of URLs crawled per website host per minute is `300`. Minimum value of `1`. Maximum value of `300`. @@ -634,9 +635,9 @@ The `seedUrlConfiguration` configuration block supports the following arguments: * `seedUrls` - (Required) The list of seed or starting point URLs of the websites you want to crawl. The list can include a maximum of `100` seed URLs. Array Members: Minimum number of `0` items. Maximum number of `100` items. Length Constraints: Minimum length of `1`. Maximum length of `2048`. * `webCrawlerMode` - (Optional) The default mode is set to `HOST_ONLY`. You can choose one of the following modes: - * `HOST_ONLY` – crawl only the website host names. For example, if the seed URL is `"abc.example.com"`, then only URLs with host name `"abc.example.com"` are crawled. - * `SUBDOMAINS` – crawl the website host names with subdomains. For example, if the seed URL is `"abc.example.com"`, then `"a.abc.example.com"` and `"b.abc.example.com"` are also crawled. - * `EVERYTHING` – crawl the website host names with subdomains and other domains that the webpages link to. + * `HOST_ONLY` - crawl only the website host names. For example, if the seed URL is `"abc.example.com"`, then only URLs with host name `"abc.example.com"` are crawled. + * `SUBDOMAINS` - crawl the website host names with subdomains. For example, if the seed URL is `"abc.example.com"`, then `"a.abc.example.com"` and `"b.abc.example.com"` are also crawled. + * `EVERYTHING` - crawl the website host names with subdomains and other domains that the webpages link to. ### site_maps_configuration Block @@ -764,4 +765,4 @@ Using `terraform import`, import Kendra Data Source using the unique identifiers % terraform import aws_kendra_data_source.example 1045d08d-66ef-4882-b3ed-dfb7df183e90/b34dfdf7-1f2b-4704-9581-79e00296845f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kendra_experience.html.markdown b/website/docs/cdktf/typescript/r/kendra_experience.html.markdown index 179e707b1ccc..23cfcc86fcc7 100644 --- a/website/docs/cdktf/typescript/r/kendra_experience.html.markdown +++ b/website/docs/cdktf/typescript/r/kendra_experience.html.markdown @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource if removed) A description for your Amazon Kendra experience. * `configuration` - (Optional) Configuration information for your Amazon Kendra experience. Terraform will only perform drift detection of its value when present in a configuration. [Detailed below](#configuration). @@ -139,4 +140,4 @@ Using `terraform import`, import Kendra Experience using the unique identifiers % terraform import aws_kendra_experience.example 1045d08d-66ef-4882-b3ed-dfb7df183e90/b34dfdf7-1f2b-4704-9581-79e00296845f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kendra_faq.html.markdown b/website/docs/cdktf/typescript/r/kendra_faq.html.markdown index 306a62a50a26..e0268c2e6343 100644 --- a/website/docs/cdktf/typescript/r/kendra_faq.html.markdown +++ b/website/docs/cdktf/typescript/r/kendra_faq.html.markdown @@ -119,6 +119,7 @@ The `s3Path` configuration block supports the following arguments: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) The description for a FAQ. * `fileFormat` - (Optional, Forces new resource) The file format used by the input files for the FAQ. Valid Values are `CSV`, `CSV_WITH_HEADER`, `JSON`. * `languageCode` - (Optional, Forces new resource) The code for a language. This shows a supported language for the FAQ document. English is supported by default. For more information on supported languages, including their codes, see [Adding documents in languages other than English](https://docs.aws.amazon.com/kendra/latest/dg/in-adding-languages.html). @@ -176,4 +177,4 @@ Using `terraform import`, import `aws_kendra_faq` using the unique identifiers o % terraform import aws_kendra_faq.example faq-123456780/idx-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kendra_index.html.markdown b/website/docs/cdktf/typescript/r/kendra_index.html.markdown index d2d72c202767..3aa2b6fce38d 100644 --- a/website/docs/cdktf/typescript/r/kendra_index.html.markdown +++ b/website/docs/cdktf/typescript/r/kendra_index.html.markdown @@ -666,6 +666,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityUnits` - (Optional) A block that sets the number of additional document storage and query capacity units that should be used by the index. [Detailed below](#capacity_units). * `description` - (Optional) The description of the Index. * `documentMetadataConfigurationUpdates` - (Optional) One or more blocks that specify the configuration settings for any metadata applied to the documents in the index. Minimum number of 0 items. Maximum number of 500 items. If specified, you must define all elements, including those that are provided by default. These index fields are documented at [Amazon Kendra Index documentation](https://docs.aws.amazon.com/kendra/latest/dg/hiw-index.html). For an example resource that defines these default index fields, refer to the [default example above](#specifying-the-predefined-elements). For an example resource that appends additional index fields, refer to the [append example above](#appending-additional-elements). All arguments for each block must be specified. Note that blocks cannot be removed since index fields cannot be deleted. This argument is [detailed below](#document_metadata_configuration_updates). @@ -825,4 +826,4 @@ Using `terraform import`, import Amazon Kendra Indexes using its `id`. For examp % terraform import aws_kendra_index.example 12345678-1234-5678-9123-123456789123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kendra_query_suggestions_block_list.html.markdown b/website/docs/cdktf/typescript/r/kendra_query_suggestions_block_list.html.markdown index 3c0e618191c5..a3a7ec7cc28c 100644 --- a/website/docs/cdktf/typescript/r/kendra_query_suggestions_block_list.html.markdown +++ b/website/docs/cdktf/typescript/r/kendra_query_suggestions_block_list.html.markdown @@ -61,6 +61,7 @@ The `sourceS3Path` configuration block supports the following arguments: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description for a block list. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block), tags with matching keys will overwrite those defined at the provider-level. @@ -112,4 +113,4 @@ Using `terraform import`, import the `aws_kendra_query_suggestions_block_list` r % terraform import aws_kendra_query_suggestions_block_list.example blocklist-123456780/idx-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kendra_thesaurus.html.markdown b/website/docs/cdktf/typescript/r/kendra_thesaurus.html.markdown index da3934857e49..20293eecda37 100644 --- a/website/docs/cdktf/typescript/r/kendra_thesaurus.html.markdown +++ b/website/docs/cdktf/typescript/r/kendra_thesaurus.html.markdown @@ -59,6 +59,7 @@ The `sourceS3Path` configuration block supports the following arguments: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description for a thesaurus. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -111,4 +112,4 @@ Using `terraform import`, import `aws_kendra_thesaurus` using the unique identif % terraform import aws_kendra_thesaurus.example thesaurus-123456780/idx-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/key_pair.html.markdown b/website/docs/cdktf/typescript/r/key_pair.html.markdown index c87565611b94..6923683233f4 100644 --- a/website/docs/cdktf/typescript/r/key_pair.html.markdown +++ b/website/docs/cdktf/typescript/r/key_pair.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyName` - (Optional) The name for the key pair. If neither `keyName` nor `keyNamePrefix` is provided, Terraform will create a unique key name using the prefix `terraform-`. * `keyNamePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `keyName`. If neither `keyName` nor `keyNamePrefix` is provided, Terraform will create a unique key name using the prefix `terraform-`. * `publicKey` - (Required) The public key material. @@ -95,4 +96,4 @@ Using `terraform import`, import Key Pairs using the `keyName`. For example: ~> **NOTE:** The AWS API does not include the public key in the response, so `terraform apply` will attempt to replace the key pair. There is currently no supported workaround for this limitation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/keyspaces_keyspace.html.markdown b/website/docs/cdktf/typescript/r/keyspaces_keyspace.html.markdown index e9ff2a4f50fb..b57c681b8299 100644 --- a/website/docs/cdktf/typescript/r/keyspaces_keyspace.html.markdown +++ b/website/docs/cdktf/typescript/r/keyspaces_keyspace.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the keyspace to be created. * `replicationSpecification` - (Optional) The replication specification of the keyspace. * `regionList` - (Optional) Replication regions. If `replicationStrategy` is `MULTI_REGION`, `regionList` requires the current Region and at least one additional AWS Region where the keyspace is going to be replicated in. @@ -89,4 +90,4 @@ Using `terraform import`, import a keyspace using the `name`. For example: % terraform import aws_keyspaces_keyspace.example my_keyspace ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/keyspaces_table.html.markdown b/website/docs/cdktf/typescript/r/keyspaces_table.html.markdown index 3eedf689f222..1aeb35aa67ce 100644 --- a/website/docs/cdktf/typescript/r/keyspaces_table.html.markdown +++ b/website/docs/cdktf/typescript/r/keyspaces_table.html.markdown @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacitySpecification` - (Optional) Specifies the read/write throughput capacity mode for the table. * `clientSideTimestamps` - (Optional) Enables client-side timestamps for the table. By default, the setting is disabled. * `comment` - (Optional) A description of the table. @@ -168,4 +169,4 @@ Using `terraform import`, import a table using the `keyspaceName` and `tableName % terraform import aws_keyspaces_table.example my_keyspace/my_table ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesis_analytics_application.html.markdown b/website/docs/cdktf/typescript/r/kinesis_analytics_application.html.markdown index b30b3d49fd11..299c646397cf 100644 --- a/website/docs/cdktf/typescript/r/kinesis_analytics_application.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesis_analytics_application.html.markdown @@ -15,6 +15,8 @@ allows processing and analyzing streaming data using standard SQL. For more details, see the [Amazon Kinesis Analytics Documentation][1]. +!> **WARNING:** _This resource is deprecated and will be removed in a future version._ [Effective January 27, 2026](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-to-amazon-managed-service-for-apache-flink-and-amazon-managed-service-for-apache-flink-studio/), AWS will [no longer support](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/discontinuation.html) Amazon Kinesis Data Analytics for SQL. Use the `aws_kinesisanalyticsv2_application` resource instead to manage Amazon Kinesis Data Analytics for Apache Flink applications. AWS provides guidance for migrating from [Amazon Kinesis Data Analytics for SQL Applications to Amazon Managed Service for Apache Flink Studio](https://aws.amazon.com/blogs/big-data/migrate-from-amazon-kinesis-data-analytics-for-sql-applications-to-amazon-managed-service-for-apache-flink-studio/) including [examples](https://docs.aws.amazon.com/kinesisanalytics/latest/dev/migrating-to-kda-studio-overview.html). + -> **Note:** To manage Amazon Kinesis Data Analytics for Apache Flink applications, use the [`aws_kinesisanalyticsv2_application`](/docs/providers/aws/r/kinesisanalyticsv2_application.html) resource. ## Example Usage @@ -180,6 +182,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Kinesis Analytics Application. * `code` - (Optional) SQL Code to transform input data, and generate output. * `description` - (Optional) Description of the application. @@ -420,4 +423,4 @@ Using `terraform import`, import Kinesis Analytics Application using ARN. For ex % terraform import aws_kinesis_analytics_application.example arn:aws:kinesisanalytics:us-west-2:1234567890:application/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesis_firehose_delivery_stream.html.markdown b/website/docs/cdktf/typescript/r/kinesis_firehose_delivery_stream.html.markdown index f5cb94c6c637..1870fabe5ce1 100644 --- a/website/docs/cdktf/typescript/r/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesis_firehose_delivery_stream.html.markdown @@ -700,7 +700,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:glue:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:catalog", @@ -871,14 +871,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. When using for WAF logging, name must be prefixed with `aws-waf-logs-`. See [AWS Documentation](https://docs.aws.amazon.com/waf/latest/developerguide/waf-policies.html#waf-policies-logging-config) for more details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `kinesisSourceConfiguration` - (Optional) The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream. See [`kinesisSourceConfiguration` block](#kinesis_source_configuration-block) below for details. * `mskSourceConfiguration` - (Optional) The configuration for the Amazon MSK cluster to be used as the source for a delivery stream. See [`mskSourceConfiguration` block](#msk_source_configuration-block) below for details. * `serverSideEncryption` - (Optional) Encrypt at rest options. See [`serverSideEncryption` block](#server_side_encryption-block) below for details. - - **NOTE:** Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream. -* `destination` – (Required) This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, `httpEndpoint`, `opensearch`, `opensearchserverless` and `snowflake`. +* `destination` - (Required) This is the destination to where the data is delivered. The only options are `s3` (Deprecated, use `extended_s3` instead), `extended_s3`, `redshift`, `elasticsearch`, `splunk`, `httpEndpoint`, `opensearch`, `opensearchserverless` and `snowflake`. * `elasticsearchConfiguration` - (Optional) Configuration options when `destination` is `elasticsearch`. See [`elasticsearchConfiguration` block](#elasticsearch_configuration-block) below for details. * `extendedS3Configuration` - (Optional, only Required when `destination` is `extended_s3`) Enhanced configuration options for the s3 destination. See [`extendedS3Configuration` block](#extended_s3_configuration-block) below for details. * `httpEndpointConfiguration` - (Optional) Configuration options when `destination` is `httpEndpoint`. Requires the user to also specify an `s3Configuration` block. See [`httpEndpointConfiguration` block](#http_endpoint_configuration-block) below for details. @@ -889,6 +888,8 @@ This resource supports the following arguments: * `snowflakeConfiguration` - (Optional) Configuration options when `destination` is `snowflake`. See [`snowflakeConfiguration` block](#snowflake_configuration-block) below for details. * `splunkConfiguration` - (Optional) Configuration options when `destination` is `splunk`. See [`splunkConfiguration` block](#splunk_configuration-block) below for details. +**NOTE:** Server-side encryption should not be enabled when a kinesis stream is configured as the source of the firehose delivery stream. + ### `kinesisSourceConfiguration` block The `kinesisSourceConfiguration` configuration block supports the following arguments: @@ -1381,4 +1382,4 @@ Using `terraform import`, import Kinesis Firehose Delivery streams using the str Note: Import does not work for stream destination `s3`. Consider using `extended_s3` since `s3` destination is deprecated. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesis_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/kinesis_resource_policy.html.markdown index 13756263f8b3..d6e1bd296f53 100644 --- a/website/docs/cdktf/typescript/r/kinesis_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesis_resource_policy.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) The policy document. * `resourceArn` - (Required) The Amazon Resource Name (ARN) of the data stream or consumer. @@ -52,6 +53,27 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kinesis_resource_policy.example + identity = { + "arn" = "arn:aws:kinesis:us-east-1:123456789012:stream/example-stream" + } +} + +resource "aws_kinesis_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Kinesis stream. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Kinesis resource policies using the `resourceArn`. For example: ```typescript @@ -82,4 +104,4 @@ Using `terraform import`, import Kinesis resource policies using the `resourceAr % terraform import aws_kinesis_resource_policy.example arn:aws:kinesis:us-west-2:123456789012:stream/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesis_stream.html.markdown b/website/docs/cdktf/typescript/r/kinesis_stream.html.markdown index f375ef33de13..b3c8397ba4d4 100644 --- a/website/docs/cdktf/typescript/r/kinesis_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesis_stream.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. -* `shardCount` – (Optional) The number of shards that the stream will use. If the `streamMode` is `PROVISIONED`, this field is required. +* `shardCount` - (Optional) The number of shards that the stream will use. If the `streamMode` is `PROVISIONED`, this field is required. Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams][2] for more. * `retentionPeriod` - (Optional) Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24. * `shardLevelMetrics` - (Optional) A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch][3] for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. @@ -119,4 +120,4 @@ Using `terraform import`, import Kinesis Streams using the `name`. For example: [2]: https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesis_stream_consumer.html.markdown b/website/docs/cdktf/typescript/r/kinesis_stream_consumer.html.markdown index db9457014d21..0627a306acea 100644 --- a/website/docs/cdktf/typescript/r/kinesis_stream_consumer.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesis_stream_consumer.html.markdown @@ -54,8 +54,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) Name of the stream consumer. -* `streamArn` – (Required, Forces new resource) Amazon Resource Name (ARN) of the data stream the consumer is registered with. +* `streamArn` - (Required, Forces new resource) Amazon Resource Name (ARN) of the data stream the consumer is registered with. ## Attribute Reference @@ -99,4 +100,4 @@ Using `terraform import`, import Kinesis Stream Consumers using the Amazon Resou [1]: https://docs.aws.amazon.com/streams/latest/dev/amazon-kinesis-consumers.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesis_video_stream.html.markdown b/website/docs/cdktf/typescript/r/kinesis_video_stream.html.markdown index a33a461bf9c3..774dadda9afd 100644 --- a/website/docs/cdktf/typescript/r/kinesis_video_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesis_video_stream.html.markdown @@ -46,9 +46,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. -* `dataRetentionInHours` – (Optional) The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. The default value is `0`, indicating that the stream does not persist data. +* `dataRetentionInHours` - (Optional) The number of hours that you want to retain the data in the stream. Kinesis Video Streams retains the data in a data store that is associated with the stream. The default value is `0`, indicating that the stream does not persist data. * `deviceName` - (Optional) The name of the device that is writing to the stream. **In the current implementation, Kinesis Video Streams does not use this name.** * `kmsKeyId` - (Optional) The ID of the AWS Key Management Service (AWS KMS) key that you want Kinesis Video Streams to use to encrypt stream data. If no key ID is specified, the default, Kinesis Video-managed key (`aws/kinesisvideo`) is used. * `mediaType` - (Optional) The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see [Media Types][2]. If you choose to specify the MediaType, see [Naming Requirements][3] for guidelines. @@ -108,4 +109,4 @@ Using `terraform import`, import Kinesis Streams using the `arn`. For example: [2]: http://www.iana.org/assignments/media-types/media-types.xhtml [3]: https://tools.ietf.org/html/rfc6838#section-4.2 - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown b/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown index ab2604a294e1..defad8337d71 100644 --- a/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown @@ -300,6 +300,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the application. * `runtimeEnvironment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`, `FLINK-1_19`. * `serviceExecutionRole` - (Required) The ARN of the [IAM role](/docs/providers/aws/r/iam_role.html) used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. @@ -569,4 +570,4 @@ Using `terraform import`, import `aws_kinesisanalyticsv2_application` using the % terraform import aws_kinesisanalyticsv2_application.example arn:aws:kinesisanalytics:us-west-2:123456789012:application/example-sql-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application_snapshot.html.markdown b/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application_snapshot.html.markdown index 5d20aca169ca..02a76a91a1f8 100644 --- a/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application_snapshot.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationName` - (Required) The name of an existing [Kinesis Analytics v2 Application](/docs/providers/aws/r/kinesisanalyticsv2_application.html). Note that the application must be running for a snapshot to be created. * `snapshotName` - (Required) The name of the application snapshot. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_kinesisanalyticsv2_application` using `app % terraform import aws_kinesisanalyticsv2_application_snapshot.example example-application/example-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_alias.html.markdown b/website/docs/cdktf/typescript/r/kms_alias.html.markdown index ea5df0681a58..25135c9c3fac 100644 --- a/website/docs/cdktf/typescript/r/kms_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_alias.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/) * `namePrefix` - (Optional) Creates an unique alias beginning with the specified prefix. The name must start with the word "alias" followed by a forward slash (alias/). Conflicts with `name`. @@ -59,6 +60,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kms_alias.example + identity = { + name = "alias/my-key-alias" + } +} + +resource "aws_kms_alias" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the KMS key alias. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import KMS aliases using the `name`. For example: ```typescript @@ -85,4 +112,4 @@ Using `terraform import`, import KMS aliases using the `name`. For example: % terraform import aws_kms_alias.a alias/my-key-alias ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_ciphertext.html.markdown b/website/docs/cdktf/typescript/r/kms_ciphertext.html.markdown index 1911cdf65d8e..ed1e26381ee4 100644 --- a/website/docs/cdktf/typescript/r/kms_ciphertext.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_ciphertext.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `plaintext` - (Required) Data to be encrypted. Note that this may show up in logs, and it will be stored in the state file. * `keyId` - (Required) Globally unique key ID for the customer master key. * `context` - (Optional) An optional mapping that makes up the encryption context. @@ -61,4 +62,4 @@ This resource exports the following attributes in addition to the arguments abov * `ciphertextBlob` - Base64 encoded ciphertext - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_custom_key_store.html.markdown b/website/docs/cdktf/typescript/r/kms_custom_key_store.html.markdown index f144db0c5b48..fafb2bf92b38 100644 --- a/website/docs/cdktf/typescript/r/kms_custom_key_store.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_custom_key_store.html.markdown @@ -109,6 +109,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customKeyStoreType` - (Optional, ForceNew) Specifies the type of key store to create. Valid values are `AWS_CLOUDHSM` and `EXTERNAL_KEY_STORE`. If omitted, AWS will default the value to `AWS_CLOUDHSM`. If `customKeyStoreType` is `AWS_CLOUDHSM`, the following optional arguments must be set: @@ -176,4 +177,4 @@ Using `terraform import`, import KMS (Key Management) Custom Key Store using the % terraform import aws_kms_custom_key_store.example cks-5ebd4ef395a96288e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_external_key.html.markdown b/website/docs/cdktf/typescript/r/kms_external_key.html.markdown index a100d5e8c92f..693d231883d2 100644 --- a/website/docs/cdktf/typescript/r/kms_external_key.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_external_key.html.markdown @@ -46,8 +46,11 @@ This resource supports the following arguments: * `description` - (Optional) Description of the key. * `enabled` - (Optional) Specifies whether the key is enabled. Keys pending import can only be `false`. Imported keys default to `true` unless expired. * `keyMaterialBase64` - (Optional) Base64 encoded 256-bit symmetric encryption key material to import. The CMK is permanently associated with this key material. The same key material can be reimported, but you cannot import different key material. +* `keySpec` - (Optional) Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports. Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_224`, `HMAC_256`, `HMAC_384`, `HMAC_512`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, `ECC_SECG_P256K1`, `ML_DSA_44`, `ML_DSA_65`, `ML_DSA_87`, or `SM2` (China Regions only). Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). +* `keyUsage` - (Optional) Specifies the intended use of the key. Valid values: `ENCRYPT_DECRYPT`, `SIGN_VERIFY`, or `GENERATE_VERIFY_MAC`. Defaults to `ENCRYPT_DECRYPT`. * `multiRegion` - (Optional) Indicates whether the KMS key is a multi-Region (`true`) or regional (`false`) key. Defaults to `false`. * `policy` - (Optional) A key policy JSON document. If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A key-value map of tags to assign to the key. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `validTo` - (Optional) Time at which the imported key material expires. When the key material expires, AWS KMS deletes the key material and the CMK becomes unusable. If not specified, key material does not expire. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -59,7 +62,6 @@ This resource exports the following attributes in addition to the arguments abov * `expirationModel` - Whether the key material expires. Empty when pending key material import, otherwise `KEY_MATERIAL_EXPIRES` or `KEY_MATERIAL_DOES_NOT_EXPIRE`. * `id` - The unique identifier for the key. * `keyState` - The state of the CMK. -* `keyUsage` - The cryptographic operations for which you can use the CMK. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -94,4 +96,4 @@ Using `terraform import`, import KMS External Keys using the `id`. For example: % terraform import aws_kms_external_key.a arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_grant.html.markdown b/website/docs/cdktf/typescript/r/kms_grant.html.markdown index a9922865be77..3bf28085d050 100644 --- a/website/docs/cdktf/typescript/r/kms_grant.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_grant.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resources) A friendly name for identifying the grant. * `keyId` - (Required, Forces new resources) The unique identifier for the customer master key (CMK) that the grant applies to. Specify the key ID or the Amazon Resource Name (ARN) of the CMK. To specify a CMK in a different AWS account, you must use the key ARN. * `granteePrincipal` - (Required, Forces new resources) The principal that is given permission to perform the operations that the grant permits in ARN format. Note that due to eventual consistency issues around IAM principals, terraform's state may not always be refreshed to reflect what is true in AWS. @@ -131,4 +132,4 @@ Using `terraform import`, import KMS Grants using the Key ID and Grant ID separa % terraform import aws_kms_grant.test 1234abcd-12ab-34cd-56ef-1234567890ab:abcde1237f76e4ba7987489ac329fbfba6ad343d6f7075dbd1ef191f0120514 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_key.html.markdown b/website/docs/cdktf/typescript/r/kms_key.html.markdown index a187db07715e..9c328001ed47 100644 --- a/website/docs/cdktf/typescript/r/kms_key.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_key.html.markdown @@ -386,12 +386,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the key as viewed in AWS console. * `keyUsage` - (Optional) Specifies the intended use of the key. Valid values: `ENCRYPT_DECRYPT`, `SIGN_VERIFY`, or `GENERATE_VERIFY_MAC`. Defaults to `ENCRYPT_DECRYPT`. * `customKeyStoreId` - (Optional) ID of the KMS [Custom Key Store](https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html) where the key will be stored instead of KMS (eg CloudHSM). * `customerMasterKeySpec` - (Optional) Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports. -Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_256`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, or `ECC_SECG_P256K1`. Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). +Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_224`, `HMAC_256`, `HMAC_384`, `HMAC_512`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, `ECC_SECG_P256K1`, `ML_DSA_44`, `ML_DSA_65`, `ML_DSA_87`, or `SM2` (China Regions only). Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). * `policy` - (Optional) A valid policy JSON document. Although this is a key policy, not an IAM policy, an [`aws_iam_policy_document`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document), in the form that designates a principal, can be used. For more information about building policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). ~> **NOTE:** Note: All KMS keys must have a key policy. If a key policy is not specified, AWS gives the KMS key a [default key policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) that gives all principals in the owning account unlimited access to all KMS operations for the key. This default key policy effectively delegates all access control to IAM policies and KMS grants. @@ -428,6 +429,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kms_key.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_kms_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the KMS key. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import KMS Keys using the `id`. For example: ```typescript @@ -458,4 +485,4 @@ Using `terraform import`, import KMS Keys using the `id`. For example: % terraform import aws_kms_key.a 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_key_policy.html.markdown b/website/docs/cdktf/typescript/r/kms_key_policy.html.markdown index c426d80e46b5..19542810cc2c 100644 --- a/website/docs/cdktf/typescript/r/kms_key_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_key_policy.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyId` - (Required) The ID of the KMS Key to attach the policy. * `policy` - (Required) A valid policy JSON document. Although this is a key policy, not an IAM policy, an [`aws_iam_policy_document`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document), in the form that designates a principal, can be used. For more information about building policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -106,4 +107,4 @@ Using `terraform import`, import KMS Key Policies using the `keyId`. For example % terraform import aws_kms_key_policy.a 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_replica_external_key.html.markdown b/website/docs/cdktf/typescript/r/kms_replica_external_key.html.markdown index f9452c4ba604..37a34b31cd46 100644 --- a/website/docs/cdktf/typescript/r/kms_replica_external_key.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_replica_external_key.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypassPolicyLockoutSafetyCheck` - (Optional) A flag to indicate whether to bypass the key policy lockout safety check. Setting this value to true increases the risk that the KMS key becomes unmanageable. Do not set this value to true indiscriminately. For more information, refer to the scenario in the [Default Key Policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the _AWS Key Management Service Developer Guide_. @@ -117,4 +118,4 @@ Using `terraform import`, import KMS multi-Region replica keys using the `id`. F % terraform import aws_kms_replica_external_key.example 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kms_replica_key.html.markdown b/website/docs/cdktf/typescript/r/kms_replica_key.html.markdown index 37bdee7d3ab4..afbd18b6f460 100644 --- a/website/docs/cdktf/typescript/r/kms_replica_key.html.markdown +++ b/website/docs/cdktf/typescript/r/kms_replica_key.html.markdown @@ -14,6 +14,8 @@ Manages a KMS multi-Region replica key. ## Example Usage +### Terraform AWS Provider v5 (and below) + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -51,10 +53,46 @@ class MyConvertedCode extends TerraformStack { ``` +### Terraform AWS Provider v6 (and above) + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { KmsKey } from "./.gen/providers/aws/kms-key"; +import { KmsReplicaKey } from "./.gen/providers/aws/kms-replica-key"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "us-west-2", + }); + const primary = new KmsKey(this, "primary", { + deletionWindowInDays: 30, + description: "Multi-Region primary key", + multiRegion: true, + region: "us-east-1", + }); + new KmsReplicaKey(this, "replica", { + deletionWindowInDays: 7, + description: "Multi-Region replica key", + primaryKeyArn: primary.arn, + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypassPolicyLockoutSafetyCheck` - (Optional) A flag to indicate whether to bypass the key policy lockout safety check. Setting this value to true increases the risk that the KMS key becomes unmanageable. Do not set this value to true indiscriminately. For more information, refer to the scenario in the [Default Key Policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the _AWS Key Management Service Developer Guide_. @@ -111,4 +149,4 @@ Using `terraform import`, import KMS multi-Region replica keys using the `id`. F % terraform import aws_kms_replica_key.example 1234abcd-12ab-34cd-56ef-1234567890ab ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_data_cells_filter.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_data_cells_filter.html.markdown index a501deb6a556..b810a6c61b5e 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_data_cells_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_data_cells_filter.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tableData` - (Required) Information about the data cells filter. See [Table Data](#table-data) below for details. ### Table Data @@ -88,7 +89,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation Data Cells Filter using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation Data Cells Filter using the `databaseName`, `name`, `tableCatalogId`, and `tableName` separated by `,`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -112,10 +113,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Lake Formation Data Cells Filter using the `id`. For example: +Using `terraform import`, import Lake Formation Data Cells Filter using the `databaseName`, `name`, `tableCatalogId`, and `tableName` separated by `,`. For example: ```console % terraform import aws_lakeformation_data_cells_filter.example database_name,name,table_catalog_id,table_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown index a217c044cb88..fc1ebfa8da6e 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown @@ -141,17 +141,18 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: -* `admins` – (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `admins` - (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles). * `allowExternalDataFiltering` - (Optional) Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `allowFullTableExternalDataAccess` - (Optional) Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. * `authorizedSessionTagValueList` - (Optional) Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. -* `catalogId` – (Optional) Identifier for the Data Catalog. By default, the account ID. +* `catalogId` - (Optional) Identifier for the Data Catalog. By default, the account ID. * `createDatabaseDefaultPermissions` - (Optional) Up to three configuration blocks of principal permissions for default create database permissions. Detailed below. * `createTableDefaultPermissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. * `externalDataFilteringAllowList` - (Optional) A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `parameters` - Key-value map of additional configuration. Valid values for the `CROSS_ACCOUNT_VERSION` key are `"1"`, `"2"`, `"3"`, or `"4"`. `SET_CONTEXT` is also returned with a value of `TRUE`. In a fresh account, prior to configuring, `CROSS_ACCOUNT_VERSION` is `"1"`. Destroying this resource sets the `CROSS_ACCOUNT_VERSION` to `"1"`. -* `readOnlyAdmins` – (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. -* `trustedResourceOwners` – (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). +* `readOnlyAdmins` - (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles) with only view access to the resources. +* `trustedResourceOwners` - (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). ~> **NOTE:** Although optional, not including `admins`, `createDatabaseDefaultPermissions`, `createTableDefaultPermissions`, `parameters`, and/or `trustedResourceOwners` results in the setting being cleared. @@ -159,6 +160,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `permissions` - (Optional) List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, and `CREATE_TABLE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. @@ -166,6 +168,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `permissions` - (Optional) List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. @@ -173,4 +176,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_lf_tag.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_lf_tag.html.markdown index 3efe9cf504f0..074f31f4799a 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_lf_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_lf_tag.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) ID of the Data Catalog to create the tag in. If omitted, this defaults to the AWS Account ID. * `key` - (Required) Key-name for the tag. * `values` - (Required) List of possible values an attribute can take. @@ -81,4 +82,4 @@ Using `terraform import`, import Lake Formation LF-Tags using the `catalog_id:ke % terraform import aws_lakeformation_lf_tag.example 123456789012:some_key ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_lf_tag_expression.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_lf_tag_expression.html.markdown new file mode 100644 index 000000000000..d1f379ee4707 --- /dev/null +++ b/website/docs/cdktf/typescript/r/lakeformation_lf_tag_expression.html.markdown @@ -0,0 +1,106 @@ +--- +subcategory: "Lake Formation" +layout: "aws" +page_title: "AWS: aws_lakeformation_lf_tag_expression" +description: |- + Terraform resource for managing an AWS Lake Formation LF Tag Expression. +--- + + +# Resource: aws_lakeformation_lf_tag_expression + +Terraform resource for managing an AWS Lake Formation LF Tag Expression. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LakeformationLfTagExpression } from "./.gen/providers/aws/"; +import { LakeformationLfTag } from "./.gen/providers/aws/lakeformation-lf-tag"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new LakeformationLfTag(this, "example", { + key: "example", + values: ["value"], + }); + const awsLakeformationLfTagExpressionExample = + new LakeformationLfTagExpression(this, "example_1", { + expression: [ + { + tag_key: example.key, + tag_values: example.values, + }, + ], + name: "example", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLakeformationLfTagExpressionExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the LF-Tag Expression. +* `expression` - (Required) A list of LF-Tag conditions (key-value pairs). See [expression](#expression) for more details. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) ID of the Data Catalog. Defaults to the account ID if not specified. +* `description` - (Optional) Description of the LF-Tag Expression. + +### expression + +* `tagKey` - (Required) The key-name for the LF-Tag. +* `tagValues` - (Required) A list of possible values for the LF-Tag + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation LF Tag Expression using the `name,catalog_id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LakeformationLfTagExpression } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + LakeformationLfTagExpression.generateConfigForImport( + this, + "example", + "example-tag-expression,123456789012" + ); + } +} + +``` + +Using `terraform import`, import Lake Formation LF Tag Expression using the `name,catalog_id`. For example: + +```console +% terraform import aws_lakeformation_lf_tag_expression.example example-tag-expression,123456789012 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_opt_in.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_opt_in.html.markdown index e7fc1b7a689e..cf37686972fc 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_opt_in.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_opt_in.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) Lake Formation principal. Supported principals are IAM users or IAM roles. See [Principal](#principal) for more details. * `resourceData` - (Required) Structure for the resource. See [Resource](#resource) for more details. @@ -121,4 +122,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_permissions.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_permissions.html.markdown index c7b1eb0e91b5..dfed46ccc351 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_permissions.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_permissions.html.markdown @@ -249,8 +249,8 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `permissions` – (Required) List of permissions granted to the principal. Valid values may include `ALL`, `ALTER`, `ASSOCIATE`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). -* `principal` – (Required) Principal to be granted the permissions on the resource. Supported principals include `IAM_ALLOWED_PRINCIPALS` (see [Default Behavior and `IAMAllowedPrincipals`](#default-behavior-and-iamallowedprincipals) above), IAM roles, users, groups, Federated Users, SAML groups and users, QuickSight groups, OUs, and organizations as well as AWS account IDs for cross-account permissions. For more information, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `permissions` - (Required) List of permissions granted to the principal. Valid values may include `ALL`, `ALTER`, `ASSOCIATE`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `principal` - (Required) Principal to be granted the permissions on the resource. Supported principals include `IAM_ALLOWED_PRINCIPALS` (see [Default Behavior and `IAMAllowedPrincipals`](#default-behavior-and-iamallowedprincipals) above), IAM roles, users, groups, Federated Users, SAML groups and users, QuickSight groups, OUs, and organizations as well as AWS account IDs for cross-account permissions. For more information, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). ~> **NOTE:** We highly recommend that the `principal` _NOT_ be a Lake Formation administrator (granted using `aws_lakeformation_data_lake_settings`). The entity (e.g., IAM role) running Terraform will most likely need to be a Lake Formation administrator. As such, the entity will have implicit permissions and does not need permissions granted through this resource. @@ -267,7 +267,8 @@ One of the following is required: The following arguments are optional: -* `catalogId` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. * `permissionsWithGrantOption` - (Optional) Subset of `permissions` which the principal can pass. ### data_cells_filter @@ -281,7 +282,7 @@ The following arguments are optional: The following argument is required: -* `arn` – (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. +* `arn` - (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. The following argument is optional: @@ -291,7 +292,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -301,7 +302,7 @@ The following argument is optional: The following arguments are required: -* `key` – (Required) The key-name for the tag. +* `key` - (Required) The key-name for the tag. * `values` - (Required) A list of possible values an attribute can take. The following argument is optional: @@ -312,7 +313,7 @@ The following argument is optional: The following arguments are required: -* `resourceType` – (Required) The resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. +* `resourceType` - (Required) The resource type for which the tag policy applies. Valid values are `DATABASE` and `TABLE`. * `expression` - (Required) A list of tag conditions that apply to the resource's tag policy. Configuration block for tag conditions that apply to the policy. See [`expression`](#expression) below. The following argument is optional: @@ -321,19 +322,20 @@ The following argument is optional: #### expression -* `key` – (Required) The key-name of an LF-Tag. +* `key` - (Required) The key-name of an LF-Tag. * `values` - (Required) A list of possible values of an LF-Tag. ### table The following argument is required: -* `databaseName` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `databaseName` - (Required) Name of the database for the table. Unique to a Data Catalog. * `name` - (Required, at least one of `name` or `wildcard`) Name of the table. * `wildcard` - (Required, at least one of `name` or `wildcard`) Whether to use a wildcard representing every table under a database. Defaults to `false`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. ### table_with_columns @@ -341,12 +343,13 @@ The following arguments are optional: The following arguments are required: * `columnNames` - (Required, at least one of `columnNames` or `wildcard`) Set of column names for the table. -* `databaseName` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `databaseName` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. * `wildcard` - (Required, at least one of `columnNames` or `wildcard`) Whether to use a column wildcard. If `excludedColumnNames` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `excludedColumnNames` - (Optional) Set of column names for the table to exclude. If `excludedColumnNames` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. @@ -354,4 +357,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_resource.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_resource.html.markdown index 15f415597df1..23c6001c3b78 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_resource.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_resource.html.markdown @@ -53,13 +53,16 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `arn` – (Required) Amazon Resource Name (ARN) of the resource. +* `arn` - (Required) Amazon Resource Name (ARN) of the resource. The following arguments are optional: -* `roleArn` – (Optional) Role that has read/write access to the resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `roleArn` - (Optional) Role that has read/write access to the resource. * `useServiceLinkedRole` - (Optional) Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. * `hybridAccessEnabled` - (Optional) Flag to enable AWS LakeFormation hybrid access permission mode. +* `withFederation`- (Optional) Whether or not the resource is a federated resource. Set to true when registering AWS Glue connections for federated catalog functionality. +* `withPrivilegedAccess` - (Optional) Boolean to grant the calling principal the permissions to perform all supported Lake Formation operations on the registered data location. ~> **NOTE:** AWS does not support registering an S3 location with an IAM role and subsequently updating the S3 location registration to a service-linked role. @@ -69,4 +72,4 @@ This resource exports the following attributes in addition to the arguments abov * `lastModified` - Date and time the resource was last modified in [RFC 3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tag.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tag.html.markdown index 99e75ef47080..403955b19ea4 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tag.html.markdown @@ -49,7 +49,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `lfTag` – (Required) Set of LF-tags to attach to the resource. See [LF Tag](#lf-tag) for more details. +* `lfTag` - (Required) Set of LF-tags to attach to the resource. See [LF Tag](#lf-tag) for more details. Exactly one of the following is required: @@ -59,13 +59,14 @@ Exactly one of the following is required: The following arguments are optional: -* `catalogId` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. ### LF Tag The following arguments are required: -* `key` – (Required) Key name for an existing LF-tag. +* `key` - (Required) Key name for an existing LF-tag. * `value` - (Required) Value from the possible values for the LF-tag. The following argument is optional: @@ -76,7 +77,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -86,12 +87,13 @@ The following argument is optional: The following argument is required: -* `databaseName` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `databaseName` - (Required) Name of the database for the table. Unique to a Data Catalog. * `name` - (Required, at least one of `name` or `wildcard`) Name of the table. * `wildcard` - (Required, at least one of `name` or `wildcard`) Whether to use a wildcard representing every table under a database. Defaults to `false`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. ### Table With Columns @@ -99,11 +101,12 @@ The following arguments are optional: The following arguments are required: * `columnNames` - (Required, at least one of `columnNames` or `wildcard`) Set of column names for the table. -* `databaseName` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `databaseName` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `columnWildcard` - (Optional) Option to add column wildcard. See [Column Wildcard](#column-wildcard) for more details. @@ -126,4 +129,4 @@ This resource exports no additional attributes. You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tags.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tags.html.markdown index 926afee34b5b..baeb0be6ee1b 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tags.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_resource_lf_tags.html.markdown @@ -128,7 +128,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `lfTag` – (Required) Set of LF-tags to attach to the resource. See below. +* `lfTag` - (Required) Set of LF-tags to attach to the resource. See below. Exactly one of the following is required: @@ -138,13 +138,14 @@ Exactly one of the following is required: The following arguments are optional: -* `catalogId` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalogId` - (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. ### lf_tag The following arguments are required: -* `key` – (Required) Key name for an existing LF-tag. +* `key` - (Required) Key name for an existing LF-tag. * `value` - (Required) Value from the possible values for the LF-tag. The following argument is optional: @@ -155,7 +156,7 @@ The following argument is optional: The following argument is required: -* `name` – (Required) Name of the database resource. Unique to the Data Catalog. +* `name` - (Required) Name of the database resource. Unique to the Data Catalog. The following argument is optional: @@ -165,12 +166,13 @@ The following argument is optional: The following argument is required: -* `databaseName` – (Required) Name of the database for the table. Unique to a Data Catalog. +* `databaseName` - (Required) Name of the database for the table. Unique to a Data Catalog. * `name` - (Required, at least one of `name` or `wildcard`) Name of the table. * `wildcard` - (Required, at least one of `name` or `wildcard`) Whether to use a wildcard representing every table under a database. Defaults to `false`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. ### table_with_columns @@ -178,12 +180,13 @@ The following arguments are optional: The following arguments are required: * `columnNames` - (Required, at least one of `columnNames` or `wildcard`) Set of column names for the table. -* `databaseName` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. -* `name` – (Required) Name of the table resource. +* `databaseName` - (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` - (Required) Name of the table resource. * `wildcard` - (Required, at least one of `columnNames` or `wildcard`) Whether to use a column wildcard. If `excludedColumnNames` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `catalogId` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. * `excludedColumnNames` - (Optional) Set of column names for the table to exclude. If `excludedColumnNames` is included, `wildcard` must be set to `true` to avoid Terraform reporting a difference. @@ -191,4 +194,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_alias.html.markdown b/website/docs/cdktf/typescript/r/lambda_alias.html.markdown index 1087e443310f..fc492b50bcd7 100644 --- a/website/docs/cdktf/typescript/r/lambda_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_alias.html.markdown @@ -3,24 +3,25 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_alias" description: |- - Creates a Lambda function alias. + Manages an AWS Lambda Alias. --- # Resource: aws_lambda_alias -Creates a Lambda function alias. Creates an alias that points to the specified Lambda function version. +Manages an AWS Lambda Alias. Use this resource to create an alias that points to a specific Lambda function version for traffic management and deployment strategies. -For information about Lambda and how to use it, see [What is AWS Lambda?][1] -For information about function aliases, see [CreateAlias][2] and [AliasRoutingConfiguration][3] in the API docs. +For information about Lambda and how to use it, see [What is AWS Lambda?](http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). For information about function aliases, see [CreateAlias](http://docs.aws.amazon.com/lambda/latest/dg/API_CreateAlias.html) and [AliasRoutingConfiguration](https://docs.aws.amazon.com/lambda/latest/dg/API_AliasRoutingConfiguration.html) in the API docs. ## Example Usage +### Basic Alias + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -29,14 +30,40 @@ import { LambdaAlias } from "./.gen/providers/aws/lambda-alias"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new LambdaAlias(this, "test_lambda_alias", { - description: "a sample description", - functionName: lambdaFunctionTest.arn, + new LambdaAlias(this, "example", { + description: "Production environment alias", + functionName: Token.asString(awsLambdaFunctionExample.arn), functionVersion: "1", - name: "my_alias", + name: "production", + }); + } +} + +``` + +### Alias with Traffic Splitting + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaAlias } from "./.gen/providers/aws/lambda-alias"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaAlias(this, "example", { + description: "Staging environment with traffic splitting", + functionName: Token.asString(awsLambdaFunctionExample.functionName), + functionVersion: "2", + name: "staging", routingConfig: { additionalVersionWeights: { - 2: 0.5, + 1: 0.1, + 3: 0.2, }, }, }); @@ -45,30 +72,85 @@ class MyConvertedCode extends TerraformStack { ``` +### Blue-Green Deployment Alias + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaAlias } from "./.gen/providers/aws/lambda-alias"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaAlias(this, "example", { + description: "Live traffic with gradual rollout to new version", + functionName: Token.asString(awsLambdaFunctionExample.functionName), + functionVersion: "5", + name: "live", + routingConfig: { + additionalVersionWeights: { + 6: 0.05, + }, + }, + }); + } +} + +``` + +### Development Alias + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaAlias } from "./.gen/providers/aws/lambda-alias"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaAlias(this, "example", { + description: "Development environment - always points to latest", + functionName: Token.asString(awsLambdaFunctionExample.functionName), + functionVersion: "$LATEST", + name: "dev", + }); + } +} + +``` + ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `name` - (Required) Name for the alias you are creating. Pattern: `(?!^[0-9]+$)([a-zA-Z0-9-_]+)` -* `description` - (Optional) Description of the alias. -* `functionName` - (Required) Lambda Function name or ARN. +* `functionName` - (Required) Name or ARN of the Lambda function. * `functionVersion` - (Required) Lambda function version for which you are creating the alias. Pattern: `(\$LATEST|[0-9]+)`. -* `routingConfig` - (Optional) The Lambda alias' route configuration settings. Fields documented below +* `name` - (Required) Name for the alias. Pattern: `(?!^[0-9]+$)([a-zA-Z0-9-_]+)`. -`routingConfig` supports the following arguments: +The following arguments are optional: -* `additionalVersionWeights` - (Optional) A map that defines the proportion of events that should be sent to different versions of a lambda function. +* `description` - (Optional) Description of the alias. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `routingConfig` - (Optional) Lambda alias' route configuration settings. [See below](#routing_config-configuration-block). + +### routing_config Configuration Block + +* `additionalVersionWeights` - (Optional) Map that defines the proportion of events that should be sent to different versions of a Lambda function. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) identifying your Lambda function alias. -* `invokeArn` - The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri` - -[1]: http://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[2]: http://docs.aws.amazon.com/lambda/latest/dg/API_CreateAlias.html -[3]: https://docs.aws.amazon.com/lambda/latest/dg/API_AliasRoutingConfiguration.html +* `arn` - ARN identifying your Lambda function alias. +* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. ## Import @@ -86,20 +168,16 @@ import { LambdaAlias } from "./.gen/providers/aws/lambda-alias"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - LambdaAlias.generateConfigForImport( - this, - "testLambdaAlias", - "my_test_lambda_function/my_alias" - ); + LambdaAlias.generateConfigForImport(this, "example", "example/production"); } } ``` -Using `terraform import`, import Lambda Function Aliases using the `function_name/alias`. For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_alias.test_lambda_alias my_test_lambda_function/my_alias +% terraform import aws_lambda_alias.example example/production ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_code_signing_config.html.markdown b/website/docs/cdktf/typescript/r/lambda_code_signing_config.html.markdown index 838707064baf..9baca5afbb29 100644 --- a/website/docs/cdktf/typescript/r/lambda_code_signing_config.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_code_signing_config.html.markdown @@ -3,19 +3,68 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_code_signing_config" description: |- - Provides a Lambda Code Signing Config resource. + Manages an AWS Lambda Code Signing Config. --- # Resource: aws_lambda_code_signing_config -Provides a Lambda Code Signing Config resource. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail). +Manages an AWS Lambda Code Signing Config. Use this resource to define allowed signing profiles and code-signing validation policies for Lambda functions to ensure code integrity and authenticity. -For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions][1] +For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html). ## Example Usage +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaCodeSigningConfig } from "./.gen/providers/aws/lambda-code-signing-config"; +import { SignerSigningProfile } from "./.gen/providers/aws/signer-signing-profile"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const dev = new SignerSigningProfile(this, "dev", { + namePrefix: "dev_lambda_", + platformId: "AWSLambda-SHA384-ECDSA", + tags: { + Environment: "development", + }, + }); + const prod = new SignerSigningProfile(this, "prod", { + namePrefix: "prod_lambda_", + platformId: "AWSLambda-SHA384-ECDSA", + tags: { + Environment: "production", + }, + }); + new LambdaCodeSigningConfig(this, "example", { + allowedPublishers: { + signingProfileVersionArns: [prod.versionArn, dev.versionArn], + }, + description: "Code signing configuration for Lambda functions", + policies: { + untrustedArtifactOnDeployment: "Enforce", + }, + tags: { + Environment: "production", + Purpose: "code-signing", + }, + }); + } +} + +``` + +### Warning Only Configuration + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -28,16 +77,68 @@ import { LambdaCodeSigningConfig } from "./.gen/providers/aws/lambda-code-signin class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new LambdaCodeSigningConfig(this, "new_csc", { + new LambdaCodeSigningConfig(this, "example", { allowedPublishers: { - signingProfileVersionArns: [example1.versionArn, example2.versionArn], + signingProfileVersionArns: [dev.versionArn], }, - description: "My awesome code signing config.", + description: "Development code signing configuration", policies: { untrustedArtifactOnDeployment: "Warn", }, tags: { - Name: "dynamodb", + Environment: "development", + Purpose: "code-signing", + }, + }); + } +} + +``` + +### Multiple Environment Configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaCodeSigningConfig } from "./.gen/providers/aws/lambda-code-signing-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaCodeSigningConfig(this, "dev", { + allowedPublishers: { + signingProfileVersionArns: [ + Token.asString(awsSignerSigningProfileDev.versionArn), + test.versionArn, + ], + }, + description: "Development code signing configuration with warnings", + policies: { + untrustedArtifactOnDeployment: "Warn", + }, + tags: { + Environment: "development", + Security: "flexible", + }, + }); + new LambdaCodeSigningConfig(this, "prod", { + allowedPublishers: { + signingProfileVersionArns: [ + Token.asString(awsSignerSigningProfileProd.versionArn), + ], + }, + description: + "Production code signing configuration with strict enforcement", + policies: { + untrustedArtifactOnDeployment: "Enforce", + }, + tags: { + Environment: "production", + Security: "strict", }, }); } @@ -47,31 +148,33 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `allowedPublishers` - (Required) Configuration block of allowed publishers as signing profiles for this code signing configuration. [See below](#allowed_publishers-configuration-block). + +The following arguments are optional: -* `allowedPublishers` (Required) A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below. -* `policies` (Optional) A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below. * `description` - (Optional) Descriptive name for this code signing configuration. +* `policies` - (Optional) Configuration block of code signing policies that define the actions to take if the validation checks fail. [See below](#policies-configuration-block). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -The `allowedPublishers` block supports the following argument: +### allowed_publishers Configuration Block -* `signingProfileVersionArns` - (Required) The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. +* `signingProfileVersionArns` - (Required) Set of ARNs for each of the signing profiles. A signing profile defines a trusted user who can sign a code package. Maximum of 20 signing profiles. -The `policies` block supports the following argument: +### policies Configuration Block -* `untrustedArtifactOnDeployment` - (Required) Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log. Valid values: `Warn`, `Enforce`. Default value: `Warn`. +* `untrustedArtifactOnDeployment` - (Required) Code signing configuration policy for deployment validation failure. If you set the policy to `Enforce`, Lambda blocks the deployment request if code-signing validation checks fail. If you set the policy to `Warn`, Lambda allows the deployment and creates a CloudWatch log. Valid values: `Warn`, `Enforce`. Default value: `Warn`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the code signing configuration. +* `arn` - ARN of the code signing configuration. * `configId` - Unique identifier for the code signing configuration. -* `lastModified` - The date and time that the code signing configuration was last modified. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). - -[1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html +* `lastModified` - Date and time that the code signing configuration was last modified. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -91,7 +194,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); LambdaCodeSigningConfig.generateConfigForImport( this, - "importedCsc", + "example", "arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b" ); } @@ -99,10 +202,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Code Signing Configs using their ARN. For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_code_signing_config.imported_csc arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b +% terraform import aws_lambda_code_signing_config.example arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown b/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown index d0f6188cd528..896d85fd64f0 100644 --- a/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown @@ -3,21 +3,20 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_event_source_mapping" description: |- - Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK). + Manages an AWS Lambda Event Source Mapping. --- # Resource: aws_lambda_event_source_mapping -Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK). +Manages an AWS Lambda Event Source Mapping. Use this resource to connect Lambda functions to event sources like Kinesis, DynamoDB, SQS, Amazon MQ, and Managed Streaming for Apache Kafka (MSK). -For information about Lambda and how to use it, see [What is AWS Lambda?][1]. -For information about event source mappings, see [CreateEventSourceMapping][2] in the API docs. +For information about Lambda and how to use it, see [What is AWS Lambda?](http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). For information about event source mappings, see [CreateEventSourceMapping](http://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html) in the API docs. ## Example Usage -### DynamoDB +### DynamoDB Stream ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -36,7 +35,7 @@ class MyConvertedCode extends TerraformStack { functionName: Token.asString(awsLambdaFunctionExample.arn), startingPosition: "LATEST", tags: { - Name: "dynamodb", + Name: "dynamodb-stream-mapping", }, }); } @@ -44,7 +43,7 @@ class MyConvertedCode extends TerraformStack { ``` -### Kinesis +### Kinesis Stream ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -59,8 +58,16 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaEventSourceMapping(this, "example", { + batchSize: 100, + destinationConfig: { + onFailure: { + destinationArn: dlq.arn, + }, + }, eventSourceArn: Token.asString(awsKinesisStreamExample.arn), functionName: Token.asString(awsLambdaFunctionExample.arn), + maximumBatchingWindowInSeconds: 5, + parallelizationFactor: 2, startingPosition: "LATEST", }); } @@ -68,7 +75,7 @@ class MyConvertedCode extends TerraformStack { ``` -### Managed Streaming for Apache Kafka (MSK) +### SQS Queue ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -83,22 +90,24 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaEventSourceMapping(this, "example", { - eventSourceArn: Token.asString(awsMskClusterExample.arn), + batchSize: 10, + eventSourceArn: Token.asString(awsSqsQueueExample.arn), functionName: Token.asString(awsLambdaFunctionExample.arn), - startingPosition: "TRIM_HORIZON", - topics: ["Example"], + scalingConfig: { + maximumConcurrency: 100, + }, }); } } ``` -### Self Managed Apache Kafka +### SQS with Event Filtering ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Token, TerraformStack } from "cdktf"; +import { Token, Fn, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -108,40 +117,33 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaEventSourceMapping(this, "example", { - functionName: Token.asString(awsLambdaFunctionExample.arn), - provisionedPollerConfig: { - maximumPollers: 80, - minimumPollers: 10, - }, - selfManagedEventSource: { - endpoints: { - KAFKA_BOOTSTRAP_SERVERS: - "kafka1.example.com:9092,kafka2.example.com:9092", - }, + eventSourceArn: Token.asString(awsSqsQueueExample.arn), + filterCriteria: { + filter: [ + { + pattern: Token.asString( + Fn.jsonencode({ + body: { + Location: ["New York"], + Temperature: [ + { + numeric: [">", 0, "<=", 100], + }, + ], + }, + }) + ), + }, + ], }, - sourceAccessConfiguration: [ - { - type: "VPC_SUBNET", - uri: "subnet:subnet-example1", - }, - { - type: "VPC_SUBNET", - uri: "subnet:subnet-example2", - }, - { - type: "VPC_SECURITY_GROUP", - uri: "security_group:sg-example", - }, - ], - startingPosition: "TRIM_HORIZON", - topics: ["Example"], + functionName: Token.asString(awsLambdaFunctionExample.arn), }); } } ``` -### SQS +### Amazon MSK ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -156,20 +158,26 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaEventSourceMapping(this, "example", { - eventSourceArn: sqsQueueTest.arn, + amazonManagedKafkaEventSourceConfig: { + consumerGroupId: "lambda-consumer-group", + }, + batchSize: 100, + eventSourceArn: Token.asString(awsMskClusterExample.arn), functionName: Token.asString(awsLambdaFunctionExample.arn), + startingPosition: "TRIM_HORIZON", + topics: ["orders", "inventory"], }); } } ``` -### SQS with event filter +### Self-Managed Apache Kafka ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Fn, Token, TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -179,26 +187,36 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaEventSourceMapping(this, "example", { - eventSourceArn: sqsQueueTest.arn, - filterCriteria: { - filter: [ - { - pattern: Token.asString( - Fn.jsonencode({ - body: { - Location: ["New York"], - Temperature: [ - { - numeric: [">", 0, "<=", 100], - }, - ], - }, - }) - ), - }, - ], - }, functionName: Token.asString(awsLambdaFunctionExample.arn), + provisionedPollerConfig: { + maximumPollers: 100, + minimumPollers: 10, + }, + selfManagedEventSource: { + endpoints: { + KAFKA_BOOTSTRAP_SERVERS: + "kafka1.example.com:9092,kafka2.example.com:9092", + }, + }, + selfManagedKafkaEventSourceConfig: { + consumerGroupId: "lambda-consumer-group", + }, + sourceAccessConfiguration: [ + { + type: "VPC_SUBNET", + uri: "subnet:${" + example1.id + "}", + }, + { + type: "VPC_SUBNET", + uri: "subnet:${" + example2.id + "}", + }, + { + type: "VPC_SECURITY_GROUP", + uri: "security_group:${" + awsSecurityGroupExample.id + "}", + }, + ], + startingPosition: "TRIM_HORIZON", + topics: ["orders"], }); } } @@ -221,10 +239,9 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new LambdaEventSourceMapping(this, "example", { batchSize: 10, - enabled: true, eventSourceArn: Token.asString(awsMqBrokerExample.arn), functionName: Token.asString(awsLambdaFunctionExample.arn), - queues: ["example"], + queues: ["orders"], sourceAccessConfiguration: [ { type: "BASIC_AUTH", @@ -253,14 +270,13 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new LambdaEventSourceMapping(this, "example", { batchSize: 1, - enabled: true, eventSourceArn: Token.asString(awsMqBrokerExample.arn), functionName: Token.asString(awsLambdaFunctionExample.arn), - queues: ["example"], + queues: ["orders"], sourceAccessConfiguration: [ { type: "VIRTUAL_HOST", - uri: "/example", + uri: "/production", }, { type: "BASIC_AUTH", @@ -273,105 +289,141 @@ class MyConvertedCode extends TerraformStack { ``` +### DocumentDB Change Stream + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaEventSourceMapping } from "./.gen/providers/aws/lambda-event-source-mapping"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaEventSourceMapping(this, "example", { + documentDbEventSourceConfig: { + collectionName: "transactions", + databaseName: "orders", + fullDocument: "UpdateLookup", + }, + eventSourceArn: Token.asString(awsDocdbClusterExample.arn), + functionName: Token.asString(awsLambdaFunctionExample.arn), + sourceAccessConfiguration: [ + { + type: "BASIC_AUTH", + uri: Token.asString(awsSecretsmanagerSecretVersionExample.arn), + }, + ], + startingPosition: "LATEST", + }); + } +} + +``` + ## Argument Reference -This resource supports the following arguments: - -* `amazonManagedKafkaEventSourceConfig` - (Optional) Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below. -* `batchSize` - (Optional) The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to `100` for DynamoDB, Kinesis, MQ and MSK, `10` for SQS. -* `bisectBatchOnFunctionError`: - (Optional) If the function returns an error, split the batch in two and retry. Only available for stream sources (DynamoDB and Kinesis). Defaults to `false`. -* `destinationConfig`: - (Optional) An Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). Detailed below. -* `documentDbEventSourceConfig`: - (Optional) Configuration settings for a DocumentDB event source. Detailed below. -* `enabled` - (Optional) Determines if the mapping is enabled. This parameter can be used to enable or disable the mapping, both during resource creation and for already created resources. Defaults to `true`. -* `eventSourceArn` - (Optional) The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source. -* `filterCriteria` - (Optional) The criteria to use for [event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below. -* `functionName` - (Required) The name or the ARN of the Lambda function that will be subscribing to events. -* `functionResponseTypes` - (Optional) A list of current response type enums applied to the event source mapping for [AWS Lambda checkpointing](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting). Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: `ReportBatchItemFailures`. -* `kmsKeyArn` - (Optional) The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. -* `maximumBatchingWindowInSeconds` - (Optional) The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either `maximumBatchingWindowInSeconds` expires or `batchSize` has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. -* `maximumRecordAgeInSeconds`: - (Optional) The maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). -* `maximumRetryAttempts`: - (Optional) The maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. -* `metricsConfig`: - (Optional) CloudWatch metrics configuration of the event source. Only available for stream sources (DynamoDB and Kinesis) and SQS queues. Detailed below. -* `parallelizationFactor`: - (Optional) The number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. -* `provisionedPollerConfig`: - (Optional) Event poller configuration for the event source. Only valid for Amazon MSK or self-managed Apache Kafka sources. Detailed below. -* `queues` - (Optional) The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. -* `scalingConfig` - (Optional) Scaling configuration of the event source. Only available for SQS queues. Detailed below. -* `selfManagedEventSource`: - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include `sourceAccessConfiguration`. Detailed below. -* `selfManagedKafkaEventSourceConfig` - (Optional) Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below. -* `sourceAccessConfiguration`: (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include `selfManagedEventSource`. Detailed below. -* `startingPosition` - (Optional) The position in the stream where AWS Lambda should start reading. Must be one of `AT_TIMESTAMP` (Kinesis only), `LATEST` or `TRIM_HORIZON` if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the [AWS DynamoDB Streams API Reference](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_GetShardIterator.html) and [AWS Kinesis API Reference](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType). -* `startingPositionTimestamp` - (Optional) A timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of the data record which to start reading when using `startingPosition` set to `AT_TIMESTAMP`. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. +The following arguments are required: + +* `functionName` - (Required) Name or ARN of the Lambda function that will be subscribing to events. + +The following arguments are optional: + +* `amazonManagedKafkaEventSourceConfig` - (Optional) Additional configuration block for Amazon Managed Kafka sources. Incompatible with `selfManagedEventSource` and `selfManagedKafkaEventSourceConfig`. [See below](#amazon_managed_kafka_event_source_config-configuration-block). +* `batchSize` - (Optional) Largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to `100` for DynamoDB, Kinesis, MQ and MSK, `10` for SQS. +* `bisectBatchOnFunctionError` - (Optional) Whether to split the batch in two and retry if the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Defaults to `false`. +* `destinationConfig` - (Optional) Amazon SQS queue, Amazon SNS topic or Amazon S3 bucket (only available for Kafka sources) destination for failed records. Only available for stream sources (DynamoDB and Kinesis) and Kafka sources (Amazon MSK and Self-managed Apache Kafka). [See below](#destination_config-configuration-block). +* `documentDbEventSourceConfig` - (Optional) Configuration settings for a DocumentDB event source. [See below](#document_db_event_source_config-configuration-block). +* `enabled` - (Optional) Whether the mapping is enabled. Defaults to `true`. +* `eventSourceArn` - (Optional) Event source ARN - required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. Incompatible with Self Managed Kafka source. +* `filterCriteria` - (Optional) Criteria to use for [event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) Kinesis stream, DynamoDB stream, SQS queue event sources. [See below](#filter_criteria-configuration-block). +* `functionResponseTypes` - (Optional) List of current response type enums applied to the event source mapping for [AWS Lambda checkpointing](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-ddb-batchfailurereporting). Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: `ReportBatchItemFailures`. +* `kmsKeyArn` - (Optional) ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. +* `maximumBatchingWindowInSeconds` - (Optional) Maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer until either `maximumBatchingWindowInSeconds` expires or `batchSize` has been met. For streaming event sources, defaults to as soon as records are available in the stream. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues. +* `maximumRecordAgeInSeconds` - (Optional) Maximum age of a record that Lambda sends to a function for processing. Only available for stream sources (DynamoDB and Kinesis). Must be either -1 (forever, and the default value) or between 60 and 604800 (inclusive). +* `maximumRetryAttempts` - (Optional) Maximum number of times to retry when the function returns an error. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of -1 (forever), maximum of 10000. +* `metricsConfig` - (Optional) CloudWatch metrics configuration of the event source. Only available for stream sources (DynamoDB and Kinesis) and SQS queues. [See below](#metrics_config-configuration-block). +* `parallelizationFactor` - (Optional) Number of batches to process from each shard concurrently. Only available for stream sources (DynamoDB and Kinesis). Minimum and default of 1, maximum of 10. +* `provisionedPollerConfig` - (Optional) Event poller configuration for the event source. Only valid for Amazon MSK or self-managed Apache Kafka sources. [See below](#provisioned_poller_config-configuration-block). +* `queues` - (Optional) Name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. The list must contain exactly one queue name. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `scalingConfig` - (Optional) Scaling configuration of the event source. Only available for SQS queues. [See below](#scaling_config-configuration-block). +* `selfManagedEventSource` - (Optional) For Self Managed Kafka sources, the location of the self managed cluster. If set, configuration must also include `sourceAccessConfiguration`. [See below](#self_managed_event_source-configuration-block). +* `selfManagedKafkaEventSourceConfig` - (Optional) Additional configuration block for Self Managed Kafka sources. Incompatible with `eventSourceArn` and `amazonManagedKafkaEventSourceConfig`. [See below](#self_managed_kafka_event_source_config-configuration-block). +* `sourceAccessConfiguration` - (Optional) For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include `selfManagedEventSource`. [See below](#source_access_configuration-configuration-block). +* `startingPosition` - (Optional) Position in the stream where AWS Lambda should start reading. Must be one of `AT_TIMESTAMP` (Kinesis only), `LATEST` or `TRIM_HORIZON` if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the [AWS DynamoDB Streams API Reference](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_GetShardIterator.html) and [AWS Kinesis API Reference](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#Kinesis-GetShardIterator-request-ShardIteratorType). +* `startingPositionTimestamp` - (Optional) Timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) of the data record which to start reading when using `startingPosition` set to `AT_TIMESTAMP`. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen. * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `topics` - (Optional) The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. -* `tumblingWindowInSeconds` - (Optional) The duration in seconds of a processing window for [AWS Lambda streaming analytics](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows). The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). +* `topics` - (Optional) Name of the Kafka topics. Only available for MSK sources. A single topic name must be specified. +* `tumblingWindowInSeconds` - (Optional) Duration in seconds of a processing window for [AWS Lambda streaming analytics](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-windows). The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis). ### amazon_managed_kafka_event_source_config Configuration Block -* `consumerGroupId` - (Optional) A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [AmazonManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_AmazonManagedKafkaEventSourceConfig.html). +* `consumerGroupId` - (Optional) Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [AmazonManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_AmazonManagedKafkaEventSourceConfig.html). ### destination_config Configuration Block -* `onFailure` - (Optional) The destination configuration for failed invocations. Detailed below. +* `onFailure` - (Optional) Destination configuration for failed invocations. [See below](#destination_config-on_failure-configuration-block). #### destination_config on_failure Configuration Block -* `destinationArn` - (Required) The Amazon Resource Name (ARN) of the destination resource. +* `destinationArn` - (Required) ARN of the destination resource. ### document_db_event_source_config Configuration Block -* `collectionName` - (Optional) The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. -* `databaseName` - (Required) The name of the database to consume within the DocumentDB cluster. +* `collectionName` - (Optional) Name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections. +* `databaseName` - (Required) Name of the database to consume within the DocumentDB cluster. * `fullDocument` - (Optional) Determines what DocumentDB sends to your event stream during document update operations. If set to `UpdateLookup`, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes. Valid values: `UpdateLookup`, `Default`. ### filter_criteria Configuration Block -* `filter` - (Optional) A set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. Detailed below. +* `filter` - (Optional) Set of up to 5 filter. If an event satisfies at least one, Lambda sends the event to the function or adds it to the next batch. [See below](#filter_criteria-filter-configuration-block). #### filter_criteria filter Configuration Block -* `pattern` - (Optional) A filter pattern up to 4096 characters. See [Filter Rule Syntax](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-syntax). +* `pattern` - (Optional) Filter pattern up to 4096 characters. See [Filter Rule Syntax](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-syntax). ### metrics_config Configuration Block -* `metrics` - (Required) A list containing the metrics to be produced by the event source mapping. Valid values: `EventCount`. +* `metrics` - (Required) List containing the metrics to be produced by the event source mapping. Valid values: `EventCount`. ### provisioned_poller_config Configuration Block -* `maximumPollers` - (Optional) The maximum number of event pollers this event source can scale up to. The range is between 1 and 2000. -* `minimumPollers` - (Optional) The minimum number of event pollers this event source can scale down to. The range is between 1 and 200. +* `maximumPollers` - (Optional) Maximum number of event pollers this event source can scale up to. The range is between 1 and 2000. +* `minimumPollers` - (Optional) Minimum number of event pollers this event source can scale down to. The range is between 1 and 200. ### scaling_config Configuration Block -* `maximumConcurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to `2`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. +* `maximumConcurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to 2. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. ### self_managed_event_source Configuration Block -* `endpoints` - (Required) A map of endpoints for the self managed source. For Kafka self-managed sources, the key should be `KAFKA_BOOTSTRAP_SERVERS` and the value should be a string with a comma separated list of broker endpoints. +* `endpoints` - (Required) Map of endpoints for the self managed source. For Kafka self-managed sources, the key should be `KAFKA_BOOTSTRAP_SERVERS` and the value should be a string with a comma separated list of broker endpoints. ### self_managed_kafka_event_source_config Configuration Block -* `consumerGroupId` - (Optional) A Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [SelfManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_SelfManagedKafkaEventSourceConfig.html). +* `consumerGroupId` - (Optional) Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [SelfManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_SelfManagedKafkaEventSourceConfig.html). ### source_access_configuration Configuration Block -* `type` - (Required) The type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/api/API_SourceAccessConfiguration.html). -* `uri` - (Required) The URI for this configuration. For type `VPC_SUBNET` the value should be `subnet:subnet_id` where `subnetId` is the value you would find in an aws_subnet resource's id attribute. For type `VPC_SECURITY_GROUP` the value should be `security_group:security_group_id` where `securityGroupId` is the value you would find in an aws_security_group resource's id attribute. +* `type` - (Required) Type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/api/API_SourceAccessConfiguration.html). +* `uri` - (Required) URI for this configuration. For type `VPC_SUBNET` the value should be `subnet:subnet_id` where `subnetId` is the value you would find in an aws_subnet resource's id attribute. For type `VPC_SECURITY_GROUP` the value should be `security_group:security_group_id` where `securityGroupId` is the value you would find in an aws_security_group resource's id attribute. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The event source mapping ARN. -* `functionArn` - The ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from `functionName` above.) -* `lastModified` - The date this resource was last modified. -* `lastProcessingResult` - The result of the last AWS Lambda invocation of your Lambda function. -* `state` - The state of the event source mapping. -* `stateTransitionReason` - The reason the event source mapping is in its current state. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `uuid` - The UUID of the created event source mapping. - -[1]: http://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[2]: http://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html +* `arn` - Event source mapping ARN. +* `functionArn` - ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from `functionName` above.) +* `lastModified` - Date this resource was last modified. +* `lastProcessingResult` - Result of the last AWS Lambda invocation of your Lambda function. +* `state` - State of the event source mapping. +* `stateTransitionReason` - Reason the event source mapping is in its current state. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `uuid` - UUID of the created event source mapping. ## Import @@ -391,7 +443,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); LambdaEventSourceMapping.generateConfigForImport( this, - "eventSourceMapping", + "example", "12345kxodurf3443" ); } @@ -402,7 +454,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Lambda event source mappings using the `UUID` (event source mapping identifier). For example: ```console -% terraform import aws_lambda_event_source_mapping.event_source_mapping 12345kxodurf3443 +% terraform import aws_lambda_event_source_mapping.example 12345kxodurf3443 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_function.html.markdown b/website/docs/cdktf/typescript/r/lambda_function.html.markdown index c3b176f93878..0e702b91fea1 100644 --- a/website/docs/cdktf/typescript/r/lambda_function.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_function.html.markdown @@ -3,28 +3,26 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function" description: |- - Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS, enabling serverless backend solutions. The Lambda Function itself includes source code and runtime configuration. + Manages an AWS Lambda Function. --- # Resource: aws_lambda_function -Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS, enabling serverless backend solutions. The Lambda Function itself includes source code and runtime configuration. +Manages an AWS Lambda Function. Use this resource to create serverless functions that run code in response to events without provisioning or managing servers. -For information about Lambda and how to use it, see [What is AWS Lambda?][1] +For information about Lambda and how to use it, see [What is AWS Lambda?](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html). For a detailed example of setting up Lambda and API Gateway, see [Serverless Applications with AWS Lambda and API Gateway](https://learn.hashicorp.com/terraform/aws/lambda-api-gateway). -For a detailed example of setting up Lambda and API Gateway, see [Serverless Applications with AWS Lambda and API Gateway.][11] +~> **Note:** Due to [AWS Lambda improved VPC networking changes that began deploying in September 2019](https://aws.amazon.com/blogs/compute/announcing-improved-vpc-networking-for-aws-lambda-functions/), EC2 subnets and security groups associated with Lambda Functions can take up to 45 minutes to successfully delete. Terraform AWS Provider version 2.31.0 and later automatically handles this increased timeout, however prior versions require setting the customizable deletion timeouts of those Terraform resources to 45 minutes (`delete = "45m"`). AWS and HashiCorp are working together to reduce the amount of time required for resource deletion and updates can be tracked in this [GitHub issue](https://github.com/hashicorp/terraform-provider-aws/issues/10329). -~> **NOTE:** Due to [AWS Lambda improved VPC networking changes that began deploying in September 2019](https://aws.amazon.com/blogs/compute/announcing-improved-vpc-networking-for-aws-lambda-functions/), EC2 subnets and security groups associated with Lambda Functions can take up to 45 minutes to successfully delete. Terraform AWS Provider version 2.31.0 and later automatically handles this increased timeout, however prior versions require setting the customizable deletion timeouts of those Terraform resources to 45 minutes (`delete = "45m"`). AWS and HashiCorp are working together to reduce the amount of time required for resource deletion and updates can be tracked in this [GitHub issue](https://github.com/hashicorp/terraform-provider-aws/issues/10329). +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an `aws_lambda_function` with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) - --> To give an external source (like an EventBridge Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model][4] for more details. On the other hand, the `role` argument of this resource is the function's execution role for identity and access to AWS services and resources. +-> **Tip:** To give an external source (like an EventBridge Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model](https://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html) for more details. On the other hand, the `role` argument of this resource is the function's execution role for identity and access to AWS services and resources. ## Example Usage -### Basic Example +### Basic Function with Node.js ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -43,9 +41,9 @@ class MyConvertedCode extends TerraformStack { super(scope, name); /*The following providers are missing schema information and might need manual adjustments to synthesize correctly: archive. For a more precise conversion please use the --provider flag in convert.*/ - const lambda = new DataArchiveFile(this, "lambda", { - output_path: "lambda_function_payload.zip", - source_file: "lambda.js", + const example = new DataArchiveFile(this, "example", { + output_path: "${path.module}/lambda/function.zip", + source_file: "${path.module}/lambda/index.js", type: "zip", }); const assumeRole = new DataAwsIamPolicyDocument(this, "assume_role", { @@ -62,57 +60,103 @@ class MyConvertedCode extends TerraformStack { }, ], }); - const iamForLambda = new IamRole(this, "iam_for_lambda", { + const awsIamRoleExample = new IamRole(this, "example_2", { assumeRolePolicy: Token.asString(assumeRole.json), - name: "iam_for_lambda", + name: "lambda_execution_role", }); - new LambdaFunction(this, "test_lambda", { + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRoleExample.overrideLogicalId("example"); + const awsLambdaFunctionExample = new LambdaFunction(this, "example_3", { environment: { variables: { - foo: "bar", + ENVIRONMENT: "production", + LOG_LEVEL: "info", }, }, - filename: "lambda_function_payload.zip", - functionName: "lambda_function_name", - handler: "index.test", - role: iamForLambda.arn, - runtime: "nodejs18.x", - sourceCodeHash: Token.asString(lambda.outputBase64Sha256), + filename: Token.asString(example.outputPath), + functionName: "example_lambda_function", + handler: "index.handler", + role: Token.asString(awsIamRoleExample.arn), + runtime: "nodejs20.x", + sourceCodeHash: Token.asString(example.outputBase64Sha256), + tags: { + Application: "example", + Environment: "production", + }, }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionExample.overrideLogicalId("example"); } } ``` -### Lambda Layers +### Container Image Function -~> **NOTE:** The `aws_lambda_layer_version` attribute values for `arn` and `layerArn` were swapped in version 2.0.0 of the Terraform AWS Provider. For version 1.x, use `layerArn` references. For version 2.x, use `arn` references. +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaFunction(this, "example", { + architectures: ["arm64"], + functionName: "example_container_function", + imageConfig: { + command: ["app.handler"], + entryPoint: ["/lambda-entrypoint.sh"], + }, + imageUri: "${" + awsEcrRepositoryExample.repositoryUrl + "}:latest", + memorySize: 512, + packageType: "Image", + role: Token.asString(awsIamRoleExample.arn), + timeout: 30, + }); + } +} + +``` + +### Function with Lambda Layers + +~> **Note:** The `aws_lambda_layer_version` attribute values for `arn` and `layerArn` were swapped in version 2.0.0 of the Terraform AWS Provider. For version 2.x, use `arn` references. ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; import { LambdaLayerVersion } from "./.gen/providers/aws/lambda-layer-version"; -interface MyConfig { - layerName: any; - functionName: any; - role: any; -} class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string, config: MyConfig) { + constructor(scope: Construct, name: string) { super(scope, name); const example = new LambdaLayerVersion(this, "example", { - layerName: config.layerName, + compatibleArchitectures: ["x86_64", "arm64"], + compatibleRuntimes: ["nodejs20.x", "python3.12"], + description: "Common dependencies for Lambda functions", + filename: "layer.zip", + layerName: "example_dependencies_layer", }); const awsLambdaFunctionExample = new LambdaFunction(this, "example_1", { + filename: "function.zip", + functionName: "example_layered_function", + handler: "index.handler", layers: [example.arn], - functionName: config.functionName, - role: config.role, + role: Token.asString(awsIamRoleExample.arn), + runtime: "nodejs20.x", + tracingConfig: { + mode: "Active", + }, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsLambdaFunctionExample.overrideLogicalId("example"); @@ -121,9 +165,7 @@ class MyConvertedCode extends TerraformStack { ``` -### Lambda Ephemeral Storage - -Lambda Function Ephemeral Storage(`/tmp`) allows you to configure the storage upto `10` GB. The default value set to `512` MB. +### VPC Function with Enhanced Networking ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -133,53 +175,48 @@ import { Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; -import { IamRole } from "./.gen/providers/aws/iam-role"; import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - const assumeRole = new DataAwsIamPolicyDocument(this, "assume_role", { - statement: [ - { - actions: ["sts:AssumeRole"], - effect: "Allow", - principals: [ - { - identifiers: ["lambda.amazonaws.com"], - type: "Service", - }, - ], - }, - ], - }); - const iamForLambda = new IamRole(this, "iam_for_lambda", { - assumeRolePolicy: Token.asString(assumeRole.json), - name: "iam_for_lambda", - }); - new LambdaFunction(this, "test_lambda", { + new LambdaFunction(this, "example", { ephemeralStorage: { - size: 10240, + size: 5120, + }, + filename: "function.zip", + functionName: "example_vpc_function", + handler: "app.handler", + memorySize: 1024, + role: Token.asString(awsIamRoleExample.arn), + runtime: "python3.12", + snapStart: { + applyOn: "PublishedVersions", + }, + timeout: 30, + vpcConfig: { + ipv6AllowedForDualStack: true, + securityGroupIds: [exampleLambda.id], + subnetIds: [examplePrivate1.id, examplePrivate2.id], }, - filename: "lambda_function_payload.zip", - functionName: "lambda_function_name", - handler: "index.test", - role: iamForLambda.arn, - runtime: "nodejs18.x", }); } } ``` -### Lambda File Systems - -Lambda File Systems allow you to connect an Amazon Elastic File System (EFS) file system to a Lambda function to share data across function invocations, access existing data including large files, and save function state. +### Function with EFS Integration ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { + VariableType, + TerraformVariable, + Fn, + Token, + TerraformCount, + TerraformStack, +} from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -188,133 +225,391 @@ import { EfsAccessPoint } from "./.gen/providers/aws/efs-access-point"; import { EfsFileSystem } from "./.gen/providers/aws/efs-file-system"; import { EfsMountTarget } from "./.gen/providers/aws/efs-mount-target"; import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; -interface MyConfig { - functionName: any; - role: any; -} class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string, config: MyConfig) { + constructor(scope: Construct, name: string) { super(scope, name); - const efsForLambda = new EfsFileSystem(this, "efs_for_lambda", { + /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + You can read more about this at https://cdk.tf/variables*/ + const subnetIds = new TerraformVariable(this, "subnet_ids", { + default: ["subnet-12345678", "subnet-87654321"], + description: "List of subnet IDs for EFS mount targets", + type: VariableType.list(VariableType.STRING), + }); + const example = new EfsFileSystem(this, "example", { + encrypted: true, tags: { - Name: "efs_for_lambda", + Name: "lambda-efs", }, }); - const alpha = new EfsMountTarget(this, "alpha", { - fileSystemId: efsForLambda.id, - securityGroups: [sgForLambda.id], - subnetId: subnetForLambda.id, + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const exampleCount = TerraformCount.of( + Token.asNumber(Fn.lengthOf(subnetIds.value)) + ); + const awsEfsMountTargetExample = new EfsMountTarget(this, "example_2", { + fileSystemId: example.id, + securityGroups: [efs.id], + subnetId: Token.asString( + Fn.lookupNested(subnetIds.value, [exampleCount.index]) + ), + count: exampleCount, }); - const accessPointForLambda = new EfsAccessPoint( - this, - "access_point_for_lambda", - { - fileSystemId: efsForLambda.id, - posixUser: { - gid: 1000, - uid: 1000, - }, - rootDirectory: { - creationInfo: { - ownerGid: 1000, - ownerUid: 1000, - permissions: "777", - }, - path: "/lambda", + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsEfsMountTargetExample.overrideLogicalId("example"); + const awsEfsAccessPointExample = new EfsAccessPoint(this, "example_3", { + fileSystemId: example.id, + posixUser: { + gid: 1000, + uid: 1000, + }, + rootDirectory: { + creationInfo: { + ownerGid: 1000, + ownerUid: 1000, + permissions: "755", }, - } - ); - new LambdaFunction(this, "example", { - dependsOn: [alpha], + path: "/lambda", + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsEfsAccessPointExample.overrideLogicalId("example"); + const awsLambdaFunctionExample = new LambdaFunction(this, "example_4", { + dependsOn: [awsEfsMountTargetExample], fileSystemConfig: { - arn: accessPointForLambda.arn, - localMountPath: "/mnt/efs", + arn: Token.asString(awsEfsAccessPointExample.arn), + localMountPath: "/mnt/data", }, + filename: "function.zip", + functionName: "example_efs_function", + handler: "index.handler", + role: Token.asString(awsIamRoleExample.arn), + runtime: "nodejs20.x", vpcConfig: { - securityGroupIds: [sgForLambda.id], - subnetIds: [subnetForLambda.id], + securityGroupIds: [lambda.id], + subnetIds: subnetIds.listValue, }, - functionName: config.functionName, - role: config.role, }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionExample.overrideLogicalId("example"); } } ``` -### Lambda retries +### Function with Advanced Logging -Lambda Functions allow you to configure error handling for asynchronous invocation. The settings that it supports are `Maximum age of event` and `Retry attempts` as stated in [Lambda documentation for Configuring error handling for asynchronous invocation](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-errors). To configure these settings, refer to the [aws_lambda_function_event_invoke_config resource](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function_event_invoke_config). +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CloudwatchLogGroup(this, "example", { + name: "/aws/lambda/example_function", + retentionInDays: 14, + tags: { + Application: "example", + Environment: "production", + }, + }); + const awsLambdaFunctionExample = new LambdaFunction(this, "example_1", { + dependsOn: [example], + filename: "function.zip", + functionName: "example_function", + handler: "index.handler", + loggingConfig: { + applicationLogLevel: "INFO", + logFormat: "JSON", + systemLogLevel: "WARN", + }, + role: Token.asString(awsIamRoleExample.arn), + runtime: "nodejs20.x", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionExample.overrideLogicalId("example"); + } +} + +``` + +### Function with logging to S3 or Data Firehose + +#### Required Resources -## CloudWatch Logging and Permissions +* An S3 bucket or Data Firehose delivery stream to store the logs. +* A CloudWatch Log Group with: -For more information about CloudWatch Logs for Lambda, see the [Lambda User Guide](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-functions-logs.html). + * `log_group_class = "DELIVERY"` + * A subscription filter whose `destinationArn` points to the S3 bucket or the Data Firehose delivery stream. + +* IAM roles: + + * Assumed by the `logs.amazonaws.com` service to deliver logs to the S3 bucket or Data Firehose delivery stream. + * Assumed by the `lambda.amazonaws.com` service to send logs to CloudWatch Logs + +* A Lambda function: + + * In the `loggingConfiguration`, specify the name of the Log Group created above using the `logGroup` field + * No special configuration is required to use S3 or Firehose as the log destination + +For more details, see [Sending Lambda function logs to Amazon S3](https://docs.aws.amazon.com/lambda/latest/dg/logging-with-s3.html). + +#### Example: Exporting Lambda Logs to S3 Bucket ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformVariable, Token, TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { CloudwatchLogSubscriptionFilter } from "./.gen/providers/aws/cloudwatch-log-subscription-filter"; import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; -import { IamPolicy } from "./.gen/providers/aws/iam-policy"; -import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicy } from "./.gen/providers/aws/iam-role-policy"; import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; -interface MyConfig { - role: any; -} +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string, config: MyConfig) { + constructor(scope: Construct, name: string) { super(scope, name); - /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. - You can read more about this at https://cdk.tf/variables*/ - const lambdaFunctionName = new TerraformVariable( + const lambdaFunctionName = "lambda-log-export-example"; + const exportVar = new CloudwatchLogGroup(this, "export", { + logGroupClass: "DELIVERY", + name: "/aws/lambda/${" + lambdaFunctionName + "}", + }); + new LambdaFunction(this, "log_export", { + dependsOn: [exportVar], + filename: "function.zip", + functionName: lambdaFunctionName, + handler: "index.lambda_handler", + loggingConfig: { + logFormat: "Text", + logGroup: exportVar.name, + }, + role: example.arn, + runtime: "python3.13", + }); + const lambdaLogExport = new S3Bucket(this, "lambda_log_export", { + bucket: "${" + lambdaFunctionName + "}-bucket", + }); + const dataAwsIamPolicyDocumentLambdaLogExport = + new DataAwsIamPolicyDocument(this, "lambda_log_export_3", { + statement: [ + { + actions: ["s3:PutObject"], + effect: "Allow", + resources: ["${" + lambdaLogExport.arn + "}/*"], + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsIamPolicyDocumentLambdaLogExport.overrideLogicalId( + "lambda_log_export" + ); + const logsAssumeRole = new DataAwsIamPolicyDocument( this, - "lambda_function_name", + "logs_assume_role", { - default: "lambda_function_name", + statement: [ + { + actions: ["sts:AssumeRole"], + effect: "Allow", + principals: [ + { + identifiers: ["logs.amazonaws.com"], + type: "Service", + }, + ], + }, + ], } ); - const example = new CloudwatchLogGroup(this, "example", { - name: "/aws/lambda/${" + lambdaFunctionName.value + "}", - retentionInDays: 14, + const logsLogExport = new IamRole(this, "logs_log_export", { + assumeRolePolicy: Token.asString(logsAssumeRole.json), + name: "${" + lambdaFunctionName + "}-lambda-log-export-role", }); - const lambdaLogging = new DataAwsIamPolicyDocument(this, "lambda_logging", { - statement: [ - { - actions: [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents", - ], - effect: "Allow", - resources: ["arn:aws:logs:*:*:*"], + const awsIamRolePolicyLambdaLogExport = new IamRolePolicy( + this, + "lambda_log_export_6", + { + policy: Token.asString(dataAwsIamPolicyDocumentLambdaLogExport.json), + role: logsLogExport.name, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyLambdaLogExport.overrideLogicalId("lambda_log_export"); + const awsCloudwatchLogSubscriptionFilterLambdaLogExport = + new CloudwatchLogSubscriptionFilter(this, "lambda_log_export_7", { + destinationArn: lambdaLogExport.arn, + filterPattern: "", + logGroupName: exportVar.name, + name: "${" + lambdaFunctionName + "}-filter", + roleArn: logsLogExport.arn, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCloudwatchLogSubscriptionFilterLambdaLogExport.overrideLogicalId( + "lambda_log_export" + ); + } +} + +``` + +### Function with Error Handling + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +import { LambdaFunctionEventInvokeConfig } from "./.gen/providers/aws/lambda-function-event-invoke-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new LambdaFunction(this, "example", { + deadLetterConfig: { + targetArn: dlq.arn, + }, + filename: "function.zip", + functionName: "example_function", + handler: "index.handler", + role: Token.asString(awsIamRoleExample.arn), + runtime: "nodejs20.x", + }); + const awsLambdaFunctionEventInvokeConfigExample = + new LambdaFunctionEventInvokeConfig(this, "example_1", { + destinationConfig: { + onFailure: { + destination: dlq.arn, + }, + onSuccess: { + destination: success.arn, + }, }, - ], + functionName: example.functionName, + maximumEventAgeInSeconds: 60, + maximumRetryAttempts: 2, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionEventInvokeConfigExample.overrideLogicalId("example"); + } +} + +``` + +### CloudWatch Logging and Permissions + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { + VariableType, + TerraformVariable, + Fn, + Token, + TerraformStack, +} from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { IamPolicy } from "./.gen/providers/aws/iam-policy"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + /*Terraform Variables are not always the best fit for getting inputs in the context of Terraform CDK. + You can read more about this at https://cdk.tf/variables*/ + const functionName = new TerraformVariable(this, "function_name", { + default: "example_function", + description: "Name of the Lambda function", + type: VariableType.STRING, + }); + const example = new CloudwatchLogGroup(this, "example", { + name: "/aws/lambda/${" + functionName.value + "}", + retentionInDays: 14, + tags: { + Environment: "production", + Function: functionName.stringValue, + }, }); - const awsIamPolicyLambdaLogging = new IamPolicy(this, "lambda_logging_3", { - description: "IAM policy for logging from a lambda", + const lambdaLogging = new IamPolicy(this, "lambda_logging", { + description: "IAM policy for logging from Lambda", name: "lambda_logging", path: "/", - policy: Token.asString(lambdaLogging.json), + policy: Token.asString( + Fn.jsonencode({ + Statement: [ + { + Action: [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + Effect: "Allow", + Resource: ["arn:aws:logs:*:*:*"], + }, + ], + Version: "2012-10-17", + }) + ), + }); + const awsIamRoleExample = new IamRole(this, "example_3", { + assumeRolePolicy: Token.asString( + Fn.jsonencode({ + Statement: [ + { + Action: "sts:AssumeRole", + Effect: "Allow", + Principal: { + Service: "lambda.amazonaws.com", + }, + }, + ], + Version: "2012-10-17", + }) + ), + name: "lambda_execution_role", }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ - awsIamPolicyLambdaLogging.overrideLogicalId("lambda_logging"); + awsIamRoleExample.overrideLogicalId("example"); const lambdaLogs = new IamRolePolicyAttachment(this, "lambda_logs", { - policyArn: Token.asString(awsIamPolicyLambdaLogging.arn), - role: iamForLambda.name, + policyArn: lambdaLogging.arn, + role: Token.asString(awsIamRoleExample.name), }); - new LambdaFunction(this, "test_lambda", { + const awsLambdaFunctionExample = new LambdaFunction(this, "example_5", { dependsOn: [lambdaLogs, example], - functionName: lambdaFunctionName.stringValue, + filename: "function.zip", + functionName: functionName.stringValue, + handler: "index.handler", loggingConfig: { - logFormat: "Text", + applicationLogLevel: "INFO", + logFormat: "JSON", + systemLogLevel: "WARN", }, - role: config.role, + role: Token.asString(awsIamRoleExample.arn), + runtime: "nodejs20.x", }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionExample.overrideLogicalId("example"); } } @@ -322,7 +617,7 @@ class MyConvertedCode extends TerraformStack { ## Specifying the Deployment Package -AWS Lambda expects source code to be provided as a deployment package whose structure varies depending on which `runtime` is in use. See [Runtimes][6] for the valid values of `runtime`. The expected structure of the deployment package can be found in [the AWS Lambda documentation for each runtime][8]. +AWS Lambda expects source code to be provided as a deployment package whose structure varies depending on which `runtime` is in use. See [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime) for the valid values of `runtime`. The expected structure of the deployment package can be found in [the AWS Lambda documentation for each runtime](https://docs.aws.amazon.com/lambda/latest/dg/deployment-package-v2.html). Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or indirectly via Amazon S3 (using the `s3Bucket`, `s3Key` and `s3ObjectVersion` arguments). When providing the deployment package via S3 it may be useful to use [the `aws_s3_object` resource](s3_object.html) to upload it. @@ -333,102 +628,87 @@ For larger deployment packages it is recommended by Amazon to upload via S3, sin The following arguments are required: * `functionName` - (Required) Unique name for your Lambda Function. -* `role` - (Required) Amazon Resource Name (ARN) of the function's execution role. The role provides the function's identity and access to AWS services and resources. +* `role` - (Required) ARN of the function's execution role. The role provides the function's identity and access to AWS services and resources. The following arguments are optional: -* `architectures` - (Optional) Instruction set architecture for your Lambda function. Valid values are `["x86_64"]` and `["arm64"]`. Default is `["x86_64"]`. Removing this attribute, function's architecture stay the same. -* `codeSigningConfigArn` - (Optional) To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function. -* `deadLetterConfig` - (Optional) Configuration block. Detailed below. +* `architectures` - (Optional) Instruction set architecture for your Lambda function. Valid values are `["x86_64"]` and `["arm64"]`. Default is `["x86_64"]`. Removing this attribute, function's architecture stays the same. +* `codeSigningConfigArn` - (Optional) ARN of a code-signing configuration to enable code signing for this function. +* `deadLetterConfig` - (Optional) Configuration block for dead letter queue. [See below](#dead_letter_config-configuration-block). * `description` - (Optional) Description of what your Lambda Function does. -* `environment` - (Optional) Configuration block. Detailed below. -* `ephemeralStorage` - (Optional) The amount of Ephemeral storage(`/tmp`) to allocate for the Lambda Function in MB. This parameter is used to expand the total amount of Ephemeral storage available, beyond the default amount of `512`MB. Detailed below. -* `fileSystemConfig` - (Optional) Configuration block. Detailed below. -* `filename` - (Optional) Path to the function's deployment package within the local filesystem. Exactly one of `filename`, `imageUri`, or `s3Bucket` must be specified. -* `handler` - (Optional) Function [entrypoint][3] in your code. -* `imageConfig` - (Optional) Configuration block. Detailed below. -* `imageUri` - (Optional) ECR image URI containing the function's deployment package. Exactly one of `filename`, `imageUri`, or `s3Bucket` must be specified. -* `kmsKeyArn` - (Optional) Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference, remove this configuration. -* `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] -* `loggingConfig` - (Optional) Configuration block used to specify advanced logging settings. Detailed below. -* `memorySize` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] +* `environment` - (Optional) Configuration block for environment variables. [See below](#environment-configuration-block). +* `ephemeralStorage` - (Optional) Amount of ephemeral storage (`/tmp`) to allocate for the Lambda Function. [See below](#ephemeral_storage-configuration-block). +* `fileSystemConfig` - (Optional) Configuration block for EFS file system. [See below](#file_system_config-configuration-block). +* `filename` - (Optional) Path to the function's deployment package within the local filesystem. Conflicts with `imageUri` and `s3Bucket`. One of `filename`, `imageUri`, or `s3Bucket` must be specified. +* `handler` - (Optional) Function entry point in your code. Required if `packageType` is `Zip`. +* `imageConfig` - (Optional) Container image configuration values. [See below](#image_config-configuration-block). +* `imageUri` - (Optional) ECR image URI containing the function's deployment package. Conflicts with `filename` and `s3Bucket`. One of `filename`, `imageUri`, or `s3Bucket` must be specified. +* `kmsKeyArn` - (Optional) ARN of the AWS Key Management Service key used to encrypt environment variables. If not provided when environment variables are in use, AWS Lambda uses a default service key. If provided when environment variables are not in use, the AWS Lambda API does not save this configuration. +* `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. +* `loggingConfig` - (Optional) Configuration block for advanced logging settings. [See below](#logging_config-configuration-block). +* `memorySize` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Valid value between 128 MB to 10,240 MB (10 GB), in 1 MB increments. Defaults to 128. * `packageType` - (Optional) Lambda deployment package type. Valid values are `Zip` and `Image`. Defaults to `Zip`. * `publish` - (Optional) Whether to publish creation/change as new Lambda Function Version. Defaults to `false`. -* `reservedConcurrentExecutions` - (Optional) Amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9] -* `replaceSecurityGroupsOnDestroy` - (Optional) Whether to replace the security groups on the function's VPC configuration prior to destruction. -Removing these security group associations prior to function destruction can speed up security group deletion times of AWS's internal cleanup operations. -By default, the security groups will be replaced with the `default` security group in the function's configured VPC. -Set the `replacementSecurityGroupIds` attribute to use a custom list of security groups for replacement. -* `replacementSecurityGroupIds` - (Optional) List of security group IDs to assign to the function's VPC configuration prior to destruction. -`replaceSecurityGroupsOnDestroy` must be set to `true` to use this attribute. -* `runtime` - (Optional) Identifier of the function's runtime. See [Runtimes][6] for valid values. -* `s3Bucket` - (Optional) S3 bucket location containing the function's deployment package. This bucket must reside in the same AWS region where you are creating the Lambda function. Exactly one of `filename`, `imageUri`, or `s3Bucket` must be specified. When `s3Bucket` is set, `s3Key` is required. -* `s3Key` - (Optional) S3 key of an object containing the function's deployment package. When `s3Bucket` is set, `s3Key` is required. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `replaceSecurityGroupsOnDestroy` - (Optional) Whether to replace the security groups on the function's VPC configuration prior to destruction. Default is `false`. +* `replacementSecurityGroupIds` - (Optional) List of security group IDs to assign to the function's VPC configuration prior to destruction. Required if `replaceSecurityGroupsOnDestroy` is `true`. +* `reservedConcurrentExecutions` - (Optional) Amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. +* `runtime` - (Optional) Identifier of the function's runtime. Required if `packageType` is `Zip`. See [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime) for valid values. +* `s3Bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename` and `imageUri`. One of `filename`, `imageUri`, or `s3Bucket` must be specified. +* `s3Key` - (Optional) S3 key of an object containing the function's deployment package. Required if `s3Bucket` is set. * `s3ObjectVersion` - (Optional) Object version containing the function's deployment package. Conflicts with `filename` and `imageUri`. -* `skipDestroy` - (Optional) Set to true if you do not wish the function to be deleted at destroy time, and instead just remove the function from the Terraform state. -* `sourceCodeHash` - (Optional) Virtual attribute used to trigger replacement when source code changes. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3Key`. The usual way to set this is `filebase64sha256("file.zip")` (Terraform 0.11.12 and later) or `base64sha256(file("file.zip"))` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive. -* `snapStart` - (Optional) Snap start settings block. Detailed below. -* `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]. -* `tracingConfig` - (Optional) Configuration block. Detailed below. -* `vpcConfig` - (Optional) Configuration block. Detailed below. +* `skipDestroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. +* `snapStart` - (Optional) Configuration block for snap start settings. [See below](#snap_start-configuration-block). +* `sourceCodeHash` - (Optional) Base64-encoded SHA256 hash of the package file. Used to trigger updates when source code changes. +* `sourceKmsKeyArn` - (Optional) ARN of the AWS Key Management Service key used to encrypt the function's `.zip` deployment package. Conflicts with `imageUri`. +* `tags` - (Optional) Key-value map of tags for the Lambda function. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to 3. Valid between 1 and 900. +* `tracingConfig` - (Optional) Configuration block for X-Ray tracing. [See below](#tracing_config-configuration-block). +* `vpcConfig` - (Optional) Configuration block for VPC. [See below](#vpc_config-configuration-block). -### dead_letter_config +### dead_letter_config Configuration Block -Dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see [Dead Letter Queues](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#dlq). +* `targetArn` - (Required) ARN of an SNS topic or SQS queue to notify when an invocation fails. -* `targetArn` - (Required) ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role must be granted suitable access to write to the target object, which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on which service is targeted. +### environment Configuration Block -### environment +* `variables` - (Optional) Map of environment variables available to your Lambda function during execution. -* `variables` - (Optional) Map of environment variables that are accessible from the function code during execution. If provided at least one key must be present. +### ephemeral_storage Configuration Block -### ephemeral_storage +* `size` - (Required) Amount of ephemeral storage (`/tmp`) in MB. Valid between 512 MB and 10,240 MB (10 GB). -* `size` - (Required) The size of the Lambda function Ephemeral storage(`/tmp`) represented in MB. The minimum supported `ephemeralStorage` value defaults to `512`MB and the maximum supported value is `10240`MB. +### file_system_config Configuration Block -### file_system_config +* `arn` - (Required) ARN of the Amazon EFS Access Point. +* `localMountPath` - (Required) Path where the function can access the file system. Must start with `/mnt/`. -Connection settings for an EFS file system. Before creating or updating Lambda functions with `fileSystemConfig`, EFS mount targets must be in available lifecycle state. Use `dependsOn` to explicitly declare this dependency. See [Using Amazon EFS with Lambda][12]. +### image_config Configuration Block -* `arn` - (Required) Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. -* `localMountPath` - (Required) Path where the function can access the file system, starting with /mnt/. +* `command` - (Optional) Parameters to pass to the container image. +* `entryPoint` - (Optional) Entry point to your application. +* `workingDirectory` - (Optional) Working directory for the container image. -### image_config +### logging_config Configuration Block -Container image configuration values that override the values in the container image Dockerfile. +* `applicationLogLevel` - (Optional) Detail level of application logs. Valid values: `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`. +* `logFormat` - (Required) Log format. Valid values: `Text`, `JSON`. +* `logGroup` - (Optional) CloudWatch log group where logs are sent. +* `systemLogLevel` - (Optional) Detail level of Lambda platform logs. Valid values: `DEBUG`, `INFO`, `WARN`. -* `command` - (Optional) Parameters that you want to pass in with `entryPoint`. -* `entryPoint` - (Optional) Entry point to your application, which is typically the location of the runtime executable. -* `workingDirectory` - (Optional) Working directory. +### snap_start Configuration Block -### logging_config +* `applyOn` - (Required) When to apply snap start optimization. Valid value: `PublishedVersions`. -Advanced logging settings. See [Configuring advanced logging controls for your Lambda function][13]. +### tracing_config Configuration Block -* `applicationLogLevel` - (Optional) for JSON structured logs, choose the detail level of the logs your application sends to CloudWatch when using supported logging libraries. -* `logFormat` - (Required) select between `Text` and structured `JSON` format for your function's logs. -* `logGroup` - (Optional) the CloudWatch log group your function sends logs to. -* `systemLogLevel` - (optional) for JSON structured logs, choose the detail level of the Lambda platform event logs sent to CloudWatch, such as `WARN`, `DEBUG`, or `INFO`. +* `mode` - (Required) X-Ray tracing mode. Valid values: `Active`, `PassThrough`. -### snap_start - -Snap start settings for low-latency startups. This feature is currently only supported for specific runtimes, see [Supported features and limitations][14]. -Remove this block to delete the associated settings (rather than setting `apply_on = "None"`). - -* `applyOn` - (Required) Conditions where snap start is enabled. Valid values are `PublishedVersions`. - -### tracing_config - -* `mode` - (Required) Whether to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are `PassThrough` and `Active`. If `PassThrough`, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If `Active`, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. - -### vpc_config - -For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. See [VPC Settings][7]. +### vpc_config Configuration Block ~> **NOTE:** If `subnetIds`, `securityGroupIds` and `ipv6AllowedForDualStack` are empty then `vpcConfig` is considered to be empty or unset. -* `ipv6AllowedForDualStack` - (Optional) Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets. Default is `false`. +* `ipv6AllowedForDualStack` - (Optional) Whether to allow outbound IPv6 traffic on VPC functions connected to dual-stack subnets. Default: `false`. * `securityGroupIds` - (Required) List of security group IDs associated with the Lambda function. * `subnetIds` - (Required) List of subnet IDs associated with the Lambda function. @@ -436,34 +716,20 @@ For network connectivity to AWS resources in a VPC, specify a list of security g This resource exports the following attributes in addition to the arguments above: -* `arn` - Amazon Resource Name (ARN) identifying your Lambda Function. +* `arn` - ARN identifying your Lambda Function. * `codeSha256` - Base64-encoded representation of raw SHA-256 sum of the zip file. -* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`. +* `invokeArn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. * `lastModified` - Date this resource was last modified. * `qualifiedArn` - ARN identifying your Lambda Function Version (if versioning is enabled via `publish = true`). -* `qualifiedInvokeArn` - Qualified ARN (ARN with lambda version number) to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`. +* `qualifiedInvokeArn` - Qualified ARN (ARN with lambda version number) to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_integration)'s `uri`. * `signingJobArn` - ARN of the signing job. * `signingProfileVersionArn` - ARN of the signing profile version. * `snap_start.optimization_status` - Optimization status of the snap start configuration. Valid values are `On` and `Off`. * `sourceCodeSize` - Size in bytes of the function .zip file. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `version` - Latest published version of your Lambda Function. * `vpc_config.vpc_id` - ID of the VPC. -[1]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[3]: https://docs.aws.amazon.com/lambda/latest/dg/walkthrough-custom-events-create-test-function.html -[4]: https://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html -[5]: https://docs.aws.amazon.com/lambda/latest/dg/limits.html -[6]: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime -[7]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html -[8]: https://docs.aws.amazon.com/lambda/latest/dg/deployment-package-v2.html -[9]: https://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html -[10]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html -[11]: https://learn.hashicorp.com/terraform/aws/lambda-api-gateway -[12]: https://docs.aws.amazon.com/lambda/latest/dg/services-efs.html -[13]: https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced -[14]: https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html#snapstart-runtimes - ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): @@ -474,6 +740,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lambda_function.example + identity = { + function_name = "example" + } +} + +resource "aws_lambda_function" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `functionName` (String) Name of the Lambda function. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Functions using the `functionName`. For example: ```typescript @@ -488,11 +780,7 @@ import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - LambdaFunction.generateConfigForImport( - this, - "testLambda", - "my_test_lambda_function" - ); + LambdaFunction.generateConfigForImport(this, "example", "example"); } } @@ -501,7 +789,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Lambda Functions using the `functionName`. For example: ```console -% terraform import aws_lambda_function.test_lambda my_test_lambda_function +% terraform import aws_lambda_function.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_function_event_invoke_config.html.markdown b/website/docs/cdktf/typescript/r/lambda_function_event_invoke_config.html.markdown index 4560e6e493fb..666775f8df03 100644 --- a/website/docs/cdktf/typescript/r/lambda_function_event_invoke_config.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_function_event_invoke_config.html.markdown @@ -3,20 +3,22 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_event_invoke_config" description: |- - Manages an asynchronous invocation configuration for a Lambda Function or Alias. + Manages an AWS Lambda Function Event Invoke Config. --- # Resource: aws_lambda_function_event_invoke_config -Manages an asynchronous invocation configuration for a Lambda Function or Alias. More information about asynchronous invocations and the configurable values can be found in the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html). +Manages an AWS Lambda Function Event Invoke Config. Use this resource to configure error handling and destinations for asynchronous Lambda function invocations. + +More information about asynchronous invocations and the configurable values can be found in the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html). ## Example Usage -### Destination Configuration +### Complete Error Handling and Destinations -~> **NOTE:** Ensure the Lambda Function IAM Role has necessary permissions for the destination, such as `sqs:SendMessage` or `sns:Publish`, otherwise the API will return a generic `InvalidParameterValueException: The destination ARN arn:PARTITION:SERVICE:REGION:ACCOUNT:RESOURCE is invalid.` error. +~> **Note:** Ensure the Lambda Function IAM Role has necessary permissions for the destination, such as `sqs:SendMessage` or `sns:Publish`, otherwise the API will return a generic `InvalidParameterValueException: The destination ARN arn:PARTITION:SERVICE:REGION:ACCOUNT:RESOURCE is invalid.` error. ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -27,26 +29,44 @@ import { Token, TerraformStack } from "cdktf"; * See https://cdk.tf/provider-generation for more details. */ import { LambdaFunctionEventInvokeConfig } from "./.gen/providers/aws/lambda-function-event-invoke-config"; +import { SnsTopic } from "./.gen/providers/aws/sns-topic"; +import { SqsQueue } from "./.gen/providers/aws/sqs-queue"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const success = new SnsTopic(this, "success", { + name: "lambda-success-notifications", + tags: { + Environment: "production", + Purpose: "lambda-success-notifications", + }, + }); + const dlq = new SqsQueue(this, "dlq", { + name: "lambda-dlq", + tags: { + Environment: "production", + Purpose: "lambda-error-handling", + }, + }); new LambdaFunctionEventInvokeConfig(this, "example", { destinationConfig: { onFailure: { - destination: Token.asString(awsSqsQueueExample.arn), + destination: dlq.arn, }, onSuccess: { - destination: Token.asString(awsSnsTopicExample.arn), + destination: success.arn, }, }, - functionName: Token.asString(awsLambdaAliasExample.functionName), + functionName: Token.asString(awsLambdaFunctionExample.functionName), + maximumEventAgeInSeconds: 300, + maximumRetryAttempts: 1, }); } } ``` -### Error Handling Configuration +### Error Handling Only ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -61,7 +81,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaFunctionEventInvokeConfig(this, "example", { - functionName: Token.asString(awsLambdaAliasExample.functionName), + functionName: Token.asString(awsLambdaFunctionExample.functionName), maximumEventAgeInSeconds: 60, maximumRetryAttempts: 0, }); @@ -70,7 +90,47 @@ class MyConvertedCode extends TerraformStack { ``` -### Configuration for Alias Name +### Configuration for Lambda Alias + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaAlias } from "./.gen/providers/aws/lambda-alias"; +import { LambdaFunctionEventInvokeConfig } from "./.gen/providers/aws/lambda-function-event-invoke-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new LambdaAlias(this, "example", { + description: "Production alias", + functionName: Token.asString(awsLambdaFunctionExample.functionName), + functionVersion: Token.asString(awsLambdaFunctionExample.version), + name: "production", + }); + const awsLambdaFunctionEventInvokeConfigExample = + new LambdaFunctionEventInvokeConfig(this, "example_1", { + destinationConfig: { + onFailure: { + destination: productionDlq.arn, + }, + }, + functionName: Token.asString(awsLambdaFunctionExample.functionName), + maximumEventAgeInSeconds: 1800, + maximumRetryAttempts: 2, + qualifier: example.name, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionEventInvokeConfigExample.overrideLogicalId("example"); + } +} + +``` + +### Configuration for Published Version ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -85,15 +145,25 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaFunctionEventInvokeConfig(this, "example", { - functionName: Token.asString(awsLambdaAliasExample.functionName), - qualifier: Token.asString(awsLambdaAliasExample.name), + destinationConfig: { + onFailure: { + destination: versionDlq.arn, + }, + onSuccess: { + destination: versionSuccess.arn, + }, + }, + functionName: Token.asString(awsLambdaFunctionExample.functionName), + maximumEventAgeInSeconds: 21600, + maximumRetryAttempts: 2, + qualifier: Token.asString(awsLambdaFunctionExample.version), }); } } ``` -### Configuration for Function Latest Unpublished Version +### Configuration for Latest Version ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -108,7 +178,14 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaFunctionEventInvokeConfig(this, "example", { + destinationConfig: { + onFailure: { + destination: devDlq.arn, + }, + }, functionName: Token.asString(awsLambdaFunctionExample.functionName), + maximumEventAgeInSeconds: 120, + maximumRetryAttempts: 0, qualifier: "$LATEST", }); } @@ -116,7 +193,7 @@ class MyConvertedCode extends TerraformStack { ``` -### Configuration for Function Published Version +### Multiple Destination Types ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -126,13 +203,28 @@ import { Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ +import { CloudwatchEventBus } from "./.gen/providers/aws/cloudwatch-event-bus"; import { LambdaFunctionEventInvokeConfig } from "./.gen/providers/aws/lambda-function-event-invoke-config"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const lambdaFailures = new CloudwatchEventBus(this, "lambda_failures", { + name: "lambda-failure-events", + }); + const lambdaSuccessArchive = new S3Bucket(this, "lambda_success_archive", { + bucket: "lambda-success-archive-${" + bucketSuffix.hex + "}", + }); new LambdaFunctionEventInvokeConfig(this, "example", { + destinationConfig: { + onFailure: { + destination: lambdaFailures.arn, + }, + onSuccess: { + destination: lambdaSuccessArchive.arn, + }, + }, functionName: Token.asString(awsLambdaFunctionExample.functionName), - qualifier: Token.asString(awsLambdaFunctionExample.version), }); } } @@ -143,45 +235,40 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `functionName` - (Required) Name or Amazon Resource Name (ARN) of the Lambda Function, omitting any version or alias qualifier. +* `functionName` - (Required) Name or ARN of the Lambda Function, omitting any version or alias qualifier. The following arguments are optional: -* `destinationConfig` - (Optional) Configuration block with destination configuration. See below for details. +* `destinationConfig` - (Optional) Configuration block with destination configuration. [See below](#destination_config-configuration-block). * `maximumEventAgeInSeconds` - (Optional) Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. * `maximumRetryAttempts` - (Optional) Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. * `qualifier` - (Optional) Lambda Function published version, `$LATEST`, or Lambda Alias name. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### destination_config Configuration Block -~> **NOTE:** At least one of `onFailure` or `onSuccess` must be configured when using this configuration block, otherwise remove it completely to prevent perpetual differences in Terraform runs. - -The following arguments are optional: +~> **Note:** At least one of `onFailure` or `onSuccess` must be configured when using this configuration block, otherwise remove it completely to prevent perpetual differences in Terraform runs. -* `onFailure` - (Optional) Configuration block with destination configuration for failed asynchronous invocations. See below for details. -* `onSuccess` - (Optional) Configuration block with destination configuration for successful asynchronous invocations. See below for details. +* `onFailure` - (Optional) Configuration block with destination configuration for failed asynchronous invocations. [See below](#destination_config-on_failure-configuration-block). +* `onSuccess` - (Optional) Configuration block with destination configuration for successful asynchronous invocations. [See below](#destination_config-on_success-configuration-block). #### destination_config on_failure Configuration Block -The following arguments are required: - -* `destination` - (Required) Amazon Resource Name (ARN) of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. +* `destination` - (Required) ARN of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. #### destination_config on_success Configuration Block -The following arguments are required: - -* `destination` - (Required) Amazon Resource Name (ARN) of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. +* `destination` - (Required) ARN of the destination resource. See the [Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) for acceptable resource types and associated IAM permissions. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `id` - Fully qualified Lambda Function name or Amazon Resource Name (ARN) +* `id` - Fully qualified Lambda Function name or ARN. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Function Event Invoke Configs using the fully qualified Function name or Amazon Resource Name (ARN). For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Function Event Invoke Configs using the fully qualified Function name or ARN. For example: ARN without qualifier (all versions and aliases): @@ -200,7 +287,7 @@ class MyConvertedCode extends TerraformStack { LambdaFunctionEventInvokeConfig.generateConfigForImport( this, "example", - "arn:aws:us-east-1:123456789012:function:my_function" + "arn:aws:lambda:us-east-1:123456789012:function:example" ); } } @@ -224,7 +311,7 @@ class MyConvertedCode extends TerraformStack { LambdaFunctionEventInvokeConfig.generateConfigForImport( this, "example", - "arn:aws:us-east-1:123456789012:function:my_function:production" + "arn:aws:lambda:us-east-1:123456789012:function:example:production" ); } } @@ -248,7 +335,7 @@ class MyConvertedCode extends TerraformStack { LambdaFunctionEventInvokeConfig.generateConfigForImport( this, "example", - "my_function" + "example" ); } } @@ -272,37 +359,37 @@ class MyConvertedCode extends TerraformStack { LambdaFunctionEventInvokeConfig.generateConfigForImport( this, "example", - "my_function:production" + "example:production" ); } } ``` -**Using `terraform import` to import** Lambda Function Event Invoke Configs using the fully qualified Function name or Amazon Resource Name (ARN). For example: +For backwards compatibility, the following legacy `terraform import` commands are also supported: -ARN without qualifier (all versions and aliases): +Using ARN without qualifier: ```console -% terraform import aws_lambda_function_event_invoke_config.example arn:aws:us-east-1:123456789012:function:my_function +% terraform import aws_lambda_function_event_invoke_config.example arn:aws:lambda:us-east-1:123456789012:function:example ``` -ARN with qualifier: +Using ARN with qualifier: ```console -% terraform import aws_lambda_function_event_invoke_config.example arn:aws:us-east-1:123456789012:function:my_function:production +% terraform import aws_lambda_function_event_invoke_config.example arn:aws:lambda:us-east-1:123456789012:function:example:production ``` Name without qualifier (all versions and aliases): ```console -% terraform import aws_lambda_function_event_invoke_config.example my_function +% terraform import aws_lambda_function_event_invoke_config.example example ``` Name with qualifier: ```console -% terraform import aws_lambda_function_event_invoke_config.example my_function:production +% terraform import aws_lambda_function_event_invoke_config.example example:production ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_function_recursion_config.html.markdown b/website/docs/cdktf/typescript/r/lambda_function_recursion_config.html.markdown index 41d2b64061b6..48e2e97b4144 100644 --- a/website/docs/cdktf/typescript/r/lambda_function_recursion_config.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_function_recursion_config.html.markdown @@ -3,19 +3,55 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_recursion_config" description: |- - Terraform resource for managing an AWS Lambda Function Recursion Config. + Manages an AWS Lambda Function Recursion Config. --- # Resource: aws_lambda_function_recursion_config -Terraform resource for managing an AWS Lambda Function Recursion Config. +Manages an AWS Lambda Function Recursion Config. Use this resource to control how Lambda handles recursive function invocations to prevent infinite loops. -~> Destruction of this resource will return the `recursiveLoop` configuration back to the default value of `Terminate`. +~> **Note:** Destruction of this resource will return the `recursiveLoop` configuration back to the default value of `Terminate`. ## Example Usage +### Allow Recursive Invocations + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; +import { LambdaFunctionRecursionConfig } from "./.gen/providers/aws/lambda-function-recursion-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new LambdaFunction(this, "example", { + filename: "function.zip", + functionName: "recursive_processor", + handler: "index.handler", + role: lambdaRole.arn, + runtime: "python3.12", + }); + const awsLambdaFunctionRecursionConfigExample = + new LambdaFunctionRecursionConfig(this, "example_1", { + functionName: example.functionName, + recursiveLoop: "Allow", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaFunctionRecursionConfigExample.overrideLogicalId("example"); + } +} + +``` + +### Production Safety Configuration + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -24,13 +60,29 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; import { LambdaFunctionRecursionConfig } from "./.gen/providers/aws/lambda-function-recursion-config"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const productionProcessor = new LambdaFunction( + this, + "production_processor", + { + filename: "processor.zip", + functionName: "production-data-processor", + handler: "app.handler", + role: lambdaRole.arn, + runtime: "nodejs20.x", + tags: { + Environment: "production", + Purpose: "data-processing", + }, + } + ); new LambdaFunctionRecursionConfig(this, "example", { - functionName: "SomeFunction", - recursiveLoop: "Allow", + functionName: productionProcessor.functionName, + recursiveLoop: "Terminate", }); } } @@ -41,16 +93,20 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `functionName` - (Required) Lambda function name. +* `functionName` - (Required) Name of the Lambda function. * `recursiveLoop` - (Required) Lambda function recursion configuration. Valid values are `Allow` or `Terminate`. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports no additional attributes. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Lambda Function Recursion Config using the `functionName`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Function Recursion Config using the `functionName`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -67,17 +123,17 @@ class MyConvertedCode extends TerraformStack { LambdaFunctionRecursionConfig.generateConfigForImport( this, "example", - "SomeFunction" + "recursive_processor" ); } } ``` -Using `terraform import`, import AWS Lambda Function Recursion Config using the `functionName`. For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_function_recursion_config.example SomeFunction +% terraform import aws_lambda_function_recursion_config.example recursive_processor ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_function_url.html.markdown b/website/docs/cdktf/typescript/r/lambda_function_url.html.markdown index 2887ef763de0..7c86dc47fe99 100644 --- a/website/docs/cdktf/typescript/r/lambda_function_url.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_function_url.html.markdown @@ -2,24 +2,23 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_function_url" -description: |- - Provides a Lambda function URL resource. +description: Manages a Lambda function URL. --- # Resource: aws_lambda_function_url -Provides a Lambda function URL resource. A function URL is a dedicated HTTP(S) endpoint for a Lambda function. - -See the [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html) for more information. +Manages a Lambda function URL. Creates a dedicated HTTP(S) endpoint for a Lambda function to enable direct invocation via HTTP requests. ## Example Usage +### Basic Function URL with No Authentication + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -28,21 +27,41 @@ import { LambdaFunctionUrl } from "./.gen/providers/aws/lambda-function-url"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new LambdaFunctionUrl(this, "test_latest", { + new LambdaFunctionUrl(this, "example", { authorizationType: "NONE", - functionName: test.functionName, + functionName: Token.asString(awsLambdaFunctionExample.functionName), }); - new LambdaFunctionUrl(this, "test_live", { + } +} + +``` + +### Function URL with IAM Authentication and CORS Configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaFunctionUrl } from "./.gen/providers/aws/lambda-function-url"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaFunctionUrl(this, "example", { authorizationType: "AWS_IAM", cors: { allowCredentials: true, allowHeaders: ["date", "keep-alive"], - allowMethods: ["*"], - allowOrigins: ["*"], + allowMethods: ["GET", "POST"], + allowOrigins: ["https://example.com"], exposeHeaders: ["keep-alive", "date"], maxAge: 86400, }, - functionName: test.functionName, + functionName: Token.asString(awsLambdaFunctionExample.functionName), + invokeMode: "RESPONSE_STREAM", qualifier: "my_alias", }); } @@ -52,32 +71,40 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `authorizationType` - (Required) Type of authentication that the function URL uses. Valid values are `AWS_IAM` and `NONE`. +* `functionName` - (Required) Name or ARN of the Lambda function. -* `authorizationType` - (Required) The type of authentication that the function URL uses. Set to `"AWS_IAM"` to restrict access to authenticated IAM users only. Set to `"NONE"` to bypass IAM authentication and create a public endpoint. See the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) for more details. -* `cors` - (Optional) The [cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) settings for the function URL. Documented below. -* `functionName` - (Required) The name (or ARN) of the Lambda function. -* `invokeMode` - (Optional) Determines how the Lambda function responds to an invocation. Valid values are `BUFFERED` (default) and `RESPONSE_STREAM`. See more in [Configuring a Lambda function to stream responses](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html). -* `qualifier` - (Optional) The alias name or `"$LATEST"`. +The following arguments are optional: -### cors +* `cors` - (Optional) Cross-origin resource sharing (CORS) settings for the function URL. [See below](#cors). +* `invokeMode` - (Optional) How the Lambda function responds to an invocation. Valid values are `BUFFERED` (default) and `RESPONSE_STREAM`. +* `qualifier` - (Optional) Alias name or `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -This configuration block supports the following attributes: +### CORS -* `allowCredentials` - (Optional) Whether to allow cookies or other credentials in requests to the function URL. The default is `false`. -* `allowHeaders` - (Optional) The HTTP headers that origins can include in requests to the function URL. For example: `["date", "keep-alive", "x-custom-header"]`. -* `allowMethods` - (Optional) The HTTP methods that are allowed when calling the function URL. For example: `["GET", "POST", "DELETE"]`, or the wildcard character (`["*"]`). -* `allowOrigins` - (Optional) The origins that can access the function URL. You can list any number of specific origins (or the wildcard character (`"*"`)), separated by a comma. For example: `["https://www.example.com", "http://localhost:60905"]`. -* `exposeHeaders` - (Optional) The HTTP headers in your function response that you want to expose to origins that call the function URL. -* `maxAge` - (Optional) The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to `0`, which means that the browser doesn't cache results. The maximum value is `86400`. +* `allowCredentials` - (Optional) Whether to allow cookies or other credentials in requests to the function URL. +* `allowHeaders` - (Optional) HTTP headers that origins can include in requests to the function URL. +* `allowMethods` - (Optional) HTTP methods that are allowed when calling the function URL. +* `allowOrigins` - (Optional) Origins that can access the function URL. +* `exposeHeaders` - (Optional) HTTP headers in your function response that you want to expose to origins that call the function URL. +* `maxAge` - (Optional) Maximum amount of time, in seconds, that web browsers can cache results of a preflight request. Maximum value is `86400`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `functionArn` - The Amazon Resource Name (ARN) of the function. -* `functionUrl` - The HTTP URL endpoint for the function in the format `https://.lambda-url..on.aws/`. -* `urlId` - A generated ID for the endpoint. +* `functionArn` - ARN of the Lambda function. +* `functionUrl` - HTTP URL endpoint for the function in the format `https://.lambda-url..on.aws/`. +* `urlId` - Generated ID for the endpoint. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) ## Import @@ -95,11 +122,7 @@ import { LambdaFunctionUrl } from "./.gen/providers/aws/lambda-function-url"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - LambdaFunctionUrl.generateConfigForImport( - this, - "testLambdaUrl", - "my_test_lambda_function" - ); + LambdaFunctionUrl.generateConfigForImport(this, "example", "example"); } } @@ -108,7 +131,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Lambda function URLs using the `functionName` or `function_name/qualifier`. For example: ```console -% terraform import aws_lambda_function_url.test_lambda_url my_test_lambda_function +% terraform import aws_lambda_function_url.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_invocation.html.markdown b/website/docs/cdktf/typescript/r/lambda_invocation.html.markdown index 009c47718ccc..7666f6eeed18 100644 --- a/website/docs/cdktf/typescript/r/lambda_invocation.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_invocation.html.markdown @@ -3,22 +3,22 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_invocation" description: |- - Invoke AWS Lambda Function + Manages an AWS Lambda Function invocation. --- # Resource: aws_lambda_invocation -Use this resource to invoke a lambda function. The lambda function is invoked with the [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. +Manages an AWS Lambda Function invocation. Use this resource to invoke a Lambda function with the [RequestResponse](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax) invocation type. -~> **NOTE:** By default this resource _only_ invokes the function when the arguments call for a create or replace. In other words, after an initial invocation on _apply_, if the arguments do not change, a subsequent _apply_ does not invoke the function again. To dynamically invoke the function, see the `triggers` example below. To always invoke a function on each _apply_, see the [`aws_lambda_invocation`](/docs/providers/aws/d/lambda_invocation.html) data source. To invoke the lambda function when the terraform resource is updated and deleted, see the [CRUD Lifecycle Scope](#crud-lifecycle-scope) example below. +~> **Note:** By default this resource _only_ invokes the function when the arguments call for a create or replace. After an initial invocation on _apply_, if the arguments do not change, a subsequent _apply_ does not invoke the function again. To dynamically invoke the function, see the `triggers` example below. To always invoke a function on each _apply_, see the [`aws_lambda_invocation` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lambda_invocation). To invoke the Lambda function when the Terraform resource is updated and deleted, see the [CRUD Lifecycle Management](#crud-lifecycle-management) example below. -~> **NOTE:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking an [`aws_lambda_function`](/docs/providers/aws/r/lambda_function.html) with environment variables, the IAM role associated with the function may have been deleted and recreated _after_ the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) +~> **Note:** If you get a `KMSAccessDeniedException: Lambda was unable to decrypt the environment variables because KMS access was denied` error when invoking a Lambda function with environment variables, the IAM role associated with the function may have been deleted and recreated after the function was created. You can fix the problem two ways: 1) updating the function's role to another role and then updating it back again to the recreated role, or 2) by using Terraform to `taint` the function and `apply` your configuration again to recreate the function. (When you create a function, Lambda grants permissions on the KMS key to the function's IAM role. If the IAM role is recreated, the grant is no longer valid. Changing the function's role or recreating the function causes Lambda to update the grant.) ## Example Usage -### Basic Example +### Basic Invocation ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -28,33 +28,49 @@ import { Fn, Token, TerraformOutput, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ +import { LambdaFunction } from "./.gen/providers/aws/lambda-function"; import { LambdaInvocation } from "./.gen/providers/aws/lambda-invocation"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - const example = new LambdaInvocation(this, "example", { - functionName: lambdaFunctionTest.functionName, + const example = new LambdaFunction(this, "example", { + filename: "function.zip", + functionName: "data_processor", + handler: "index.handler", + role: lambdaRole.arn, + runtime: "python3.12", + }); + const awsLambdaInvocationExample = new LambdaInvocation(this, "example_1", { + functionName: example.functionName, input: Token.asString( Fn.jsonencode({ - key1: "value1", - key2: "value2", + config: { + debug: false, + environment: "production", + }, + operation: "initialize", }) ), }); - new TerraformOutput(this, "result_entry", { - value: Fn.lookupNested(Fn.jsondecode(example.result), ['"key1"']), + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaInvocationExample.overrideLogicalId("example"); + new TerraformOutput(this, "initialization_result", { + value: Fn.lookupNested( + Fn.jsondecode(Token.asString(awsLambdaInvocationExample.result)), + ['"status"'] + ), }); } } ``` -### Dynamic Invocation Example Using Triggers +### Dynamic Invocation with Triggers ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Fn, Token, TerraformStack } from "cdktf"; +import { Token, Fn, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -64,21 +80,26 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaInvocation(this, "example", { - functionName: lambdaFunctionTest.functionName, + functionName: Token.asString(awsLambdaFunctionExample.functionName), input: Token.asString( Fn.jsonencode({ - key1: "value1", - key2: "value2", + batch_id: batchId.result, + environment: environment.value, + operation: "process_data", }) ), triggers: { - redeployment: Token.asString( - Fn.sha1( + config_hash: Token.asString( + Fn.sha256( Token.asString( - Fn.jsonencode([awsLambdaFunctionExample.environment]) + Fn.jsonencode({ + environment: environment.value, + timestamp: Fn.timestamp(), + }) ) ) ), + function_version: Token.asString(awsLambdaFunctionExample.version), }, }); } @@ -86,12 +107,12 @@ class MyConvertedCode extends TerraformStack { ``` -### CRUD Lifecycle Scope +### CRUD Lifecycle Management ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Fn, Token, TerraformStack } from "cdktf"; +import { Token, Fn, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -101,11 +122,15 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaInvocation(this, "example", { - functionName: lambdaFunctionTest.functionName, + functionName: Token.asString(awsLambdaFunctionExample.functionName), input: Token.asString( Fn.jsonencode({ - key1: "value1", - key2: "value2", + credentials: { + password: dbPassword.value, + username: dbUsername.value, + }, + database_url: awsDbInstanceExample.endpoint, + resource_name: "database_setup", }) ), lifecycleScope: "CRUD", @@ -115,19 +140,23 @@ class MyConvertedCode extends TerraformStack { ``` -~> **NOTE:** `lifecycle_scope = "CRUD"` will inject a key `tf` in the input event to pass lifecycle information! This allows the lambda function to handle different lifecycle transitions uniquely. If you need to use a key `tf` in your own input JSON, the default key name can be overridden with the `terraformKey` argument. +~> **Note:** `lifecycle_scope = "CRUD"` will inject a key `tf` in the input event to pass lifecycle information! This allows the Lambda function to handle different lifecycle transitions uniquely. If you need to use a key `tf` in your own input JSON, the default key name can be overridden with the `terraformKey` argument. -The key `tf` gets added with subkeys: +The lifecycle key gets added with subkeys: * `action` - Action Terraform performs on the resource. Values are `create`, `update`, or `delete`. * `prev_input` - Input JSON payload from the previous invocation. This can be used to handle update and delete events. -When the resource from the example above is created, the Lambda will get following JSON payload: +When the resource from the CRUD example above is created, the Lambda will receive the following JSON payload: ```json { - "key1": "value1", - "key2": "value2", + "resource_name": "database_setup", + "database_url": "mydb.cluster-xyz.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + }, "tf": { "action": "create", "prev_input": null @@ -135,33 +164,49 @@ When the resource from the example above is created, the Lambda will get followi } ``` -If the input value of `key1` changes to "valueB", then the lambda will be invoked again with the following JSON payload: +If the `databaseUrl` changes, the Lambda will be invoked again with: ```json { - "key1": "valueB", - "key2": "value2", + "resource_name": "database_setup", + "database_url": "mydb-new.cluster-abc.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + }, "tf": { "action": "update", "prev_input": { - "key1": "value1", - "key2": "value2" + "resource_name": "database_setup", + "database_url": "mydb.cluster-xyz.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + } } } } ``` -When the invocation resource is removed, the final invocation will have the following JSON payload: +When the invocation resource is removed, the final invocation will have: ```json { - "key1": "valueB", - "key2": "value2", + "resource_name": "database_setup", + "database_url": "mydb-new.cluster-abc.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + }, "tf": { "action": "delete", "prev_input": { - "key1": "valueB", - "key2": "value2" + "resource_name": "database_setup", + "database_url": "mydb-new.cluster-abc.us-west-2.rds.amazonaws.com:5432", + "credentials": { + "username": "admin", + "password": "secret123" + } } } } @@ -171,20 +216,21 @@ When the invocation resource is removed, the final invocation will have the foll The following arguments are required: -* `functionName` - (Required) Name of the lambda function. -* `input` - (Required) JSON payload to the lambda function. +* `functionName` - (Required) Name of the Lambda function. +* `input` - (Required) JSON payload to the Lambda function. The following arguments are optional: * `lifecycleScope` - (Optional) Lifecycle scope of the resource to manage. Valid values are `CREATE_ONLY` and `CRUD`. Defaults to `CREATE_ONLY`. `CREATE_ONLY` will invoke the function only on creation or replacement. `CRUD` will invoke the function on each lifecycle event, and augment the input JSON payload with additional lifecycle information. -* `qualifier` - (Optional) Qualifier (i.e., version) of the lambda function. Defaults to `$LATEST`. -* `terraformKey` - (Optional) The JSON key used to store lifecycle information in the input JSON payload. Defaults to `tf`. This additional key is only included when `lifecycleScope` is set to `CRUD`. -* `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a re-invocation. To force a re-invocation without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). +* `qualifier` - (Optional) Qualifier (i.e., version) of the Lambda function. Defaults to `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `terraformKey` - (Optional) JSON key used to store lifecycle information in the input JSON payload. Defaults to `tf`. This additional key is only included when `lifecycleScope` is set to `CRUD`. +* `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a re-invocation. To force a re-invocation without changing these keys/values, use the [`terraform taint` command](https://developer.hashicorp.com/terraform/cli/commands/taint). ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `result` - String result of the lambda function invocation. +* `result` - String result of the Lambda function invocation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_layer_version.html.markdown b/website/docs/cdktf/typescript/r/lambda_layer_version.html.markdown index 3fb0631360a3..a2f2611aea2d 100644 --- a/website/docs/cdktf/typescript/r/lambda_layer_version.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_layer_version.html.markdown @@ -3,21 +3,23 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version" description: |- - Provides a Lambda Layer Version resource. Lambda Layers allow you to reuse shared bits of code across multiple lambda functions. + Manages an AWS Lambda Layer Version. --- # Resource: aws_lambda_layer_version -Provides a Lambda Layer Version resource. Lambda Layers allow you to reuse shared bits of code across multiple lambda functions. +Manages an AWS Lambda Layer Version. Use this resource to share code and dependencies across multiple Lambda functions. -For information about Lambda Layers and how to use them, see [AWS Lambda Layers][1]. +For information about Lambda Layers and how to use them, see [AWS Lambda Layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). -~> **NOTE:** Setting `skipDestroy` to `true` means that the AWS Provider will _not_ destroy any layer version, even when running `terraform destroy`. Layer versions are thus intentional dangling resources that are _not_ managed by Terraform and may incur extra expense in your AWS account. +~> **Note:** Setting `skipDestroy` to `true` means that the AWS Provider will not destroy any layer version, even when running `terraform destroy`. Layer versions are thus intentional dangling resources that are not managed by Terraform and may incur extra expense in your AWS account. ## Example Usage +### Basic Layer + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -30,7 +32,7 @@ import { LambdaLayerVersion } from "./.gen/providers/aws/lambda-layer-version"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new LambdaLayerVersion(this, "lambda_layer", { + new LambdaLayerVersion(this, "example", { compatibleRuntimes: ["nodejs20.x"], filename: "lambda_layer_payload.zip", layerName: "lambda_layer_name", @@ -40,14 +42,72 @@ class MyConvertedCode extends TerraformStack { ``` +### Layer with S3 Source + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaLayerVersion } from "./.gen/providers/aws/lambda-layer-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaLayerVersion(this, "example", { + compatibleArchitectures: ["x86_64", "arm64"], + compatibleRuntimes: ["nodejs20.x", "python3.12"], + layerName: "lambda_layer_name", + s3Bucket: lambdaLayerZip.bucket, + s3Key: lambdaLayerZip.key, + }); + } +} + +``` + +### Layer with Multiple Runtimes and Architectures + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaLayerVersion } from "./.gen/providers/aws/lambda-layer-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaLayerVersion(this, "example", { + compatibleArchitectures: ["x86_64", "arm64"], + compatibleRuntimes: [ + "nodejs18.x", + "nodejs20.x", + "python3.11", + "python3.12", + ], + description: "Shared utilities for Lambda functions", + filename: "lambda_layer_payload.zip", + layerName: "multi_runtime_layer", + licenseInfo: "MIT", + sourceCodeHash: Token.asString( + Fn.filebase64sha256("lambda_layer_payload.zip") + ), + }); + } +} + +``` + ## Specifying the Deployment Package -AWS Lambda Layers expect source code to be provided as a deployment package whose structure varies depending on which `compatibleRuntimes` this layer specifies. -See [Runtimes][2] for the valid values of `compatibleRuntimes`. +AWS Lambda Layers expect source code to be provided as a deployment package whose structure varies depending on which `compatibleRuntimes` this layer specifies. See [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes) for the valid values of `compatibleRuntimes`. -Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or -indirectly via Amazon S3 (using the `s3Bucket`, `s3Key` and `s3ObjectVersion` arguments). When providing the deployment -package via S3 it may be useful to use [the `aws_s3_object` resource](s3_object.html) to upload it. +Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or indirectly via Amazon S3 (using the `s3Bucket`, `s3Key` and `s3ObjectVersion` arguments). When providing the deployment package via S3 it may be useful to use [the `aws_s3_object` resource](s3_object.html) to upload it. For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading large files efficiently. @@ -55,20 +115,21 @@ For larger deployment packages it is recommended by Amazon to upload via S3, sin The following arguments are required: -* `layerName` - (Required) Unique name for your Lambda Layer +* `layerName` - (Required) Unique name for your Lambda Layer. The following arguments are optional: -* `compatibleArchitectures` - (Optional) List of [Architectures][4] this layer is compatible with. Currently `x86_64` and `arm64` can be specified. -* `compatibleRuntimes` - (Optional) List of [Runtimes][2] this layer is compatible with. Up to 15 runtimes can be specified. +* `compatibleArchitectures` - (Optional) List of [Architectures](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleArchitectures) this layer is compatible with. Currently `x86_64` and `arm64` can be specified. +* `compatibleRuntimes` - (Optional) List of [Runtimes](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes) this layer is compatible with. Up to 15 runtimes can be specified. * `description` - (Optional) Description of what your Lambda Layer does. -* `filename` (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. -* `licenseInfo` - (Optional) License info for your Lambda Layer. See [License Info][3]. +* `filename` - (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. +* `licenseInfo` - (Optional) License info for your Lambda Layer. See [License Info](https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-LicenseInfo). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `s3Bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. * `s3Key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename`. * `s3ObjectVersion` - (Optional) Object version containing the function's deployment package. Conflicts with `filename`. * `skipDestroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. When this is not set to `true`, changing any of `compatibleArchitectures`, `compatibleRuntimes`, `description`, `filename`, `layerName`, `licenseInfo`, `s3Bucket`, `s3Key`, `s3ObjectVersion`, or `sourceCodeHash` forces deletion of the existing layer version and creation of a new layer version. -* `sourceCodeHash` - (Optional) Virtual attribute used to trigger replacement when source code changes. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3Key`. The usual way to set this is `${filebase64sha256("file.zip")}` (Terraform 0.11.12 or later) or `${base64sha256(file("file.zip"))}` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. +* `sourceCodeHash` - (Optional) Virtual attribute used to trigger replacement when source code changes. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3Key`. The usual way to set this is `filebase64sha256("file.zip")` (Terraform 0.11.12 or later) or `base64sha256(file("file.zip"))` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda layer source archive. ## Attribute Reference @@ -83,11 +144,6 @@ This resource exports the following attributes in addition to the arguments abov * `sourceCodeSize` - Size in bytes of the function .zip file. * `version` - Lambda Layer version. -[1]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html -[2]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleRuntimes -[3]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-LicenseInfo -[4]: https://docs.aws.amazon.com/lambda/latest/dg/API_PublishLayerVersion.html#SSS-PublishLayerVersion-request-CompatibleArchitectures - ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Layers using `arn`. For example: @@ -106,8 +162,8 @@ class MyConvertedCode extends TerraformStack { super(scope, name); LambdaLayerVersion.generateConfigForImport( this, - "testLayer", - "arn:aws:lambda:_REGION_:_ACCOUNT_ID_:layer:_LAYER_NAME_:_LAYER_VERSION_" + "example", + "arn:aws:lambda:us-west-2:123456789012:layer:example:1" ); } } @@ -117,9 +173,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Lambda Layers using `arn`. For example: ```console -% terraform import \ - aws_lambda_layer_version.test_layer \ - arn:aws:lambda:_REGION_:_ACCOUNT_ID_:layer:_LAYER_NAME_:_LAYER_VERSION_ +% terraform import aws_lambda_layer_version.example arn:aws:lambda:us-west-2:123456789012:layer:example:1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_layer_version_permission.html.markdown b/website/docs/cdktf/typescript/r/lambda_layer_version_permission.html.markdown index 35a2d1e118e3..4753fa6e3517 100644 --- a/website/docs/cdktf/typescript/r/lambda_layer_version_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_layer_version_permission.html.markdown @@ -3,39 +3,144 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_layer_version_permission" description: |- - Provides a Lambda Layer Version Permission resource. + Manages an AWS Lambda Layer Version Permission. --- # Resource: aws_lambda_layer_version_permission -Provides a Lambda Layer Version Permission resource. It allows you to share you own Lambda Layers to another account by account ID, to all accounts in AWS organization or even to all AWS accounts. +Manages an AWS Lambda Layer Version Permission. Use this resource to share Lambda Layers with other AWS accounts, organizations, or make them publicly accessible. -For information about Lambda Layer Permissions and how to use them, see [Using Resource-based Policies for AWS Lambda][1] +For information about Lambda Layer Permissions and how to use them, see [Using Resource-based Policies for AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer). -~> **NOTE:** Setting `skipDestroy` to `true` means that the AWS Provider will _not_ destroy any layer version permission, even when running `terraform destroy`. Layer version permissions are thus intentional dangling resources that are _not_ managed by Terraform and may incur extra expense in your AWS account. +~> **Note:** Setting `skipDestroy` to `true` means that the AWS Provider will not destroy any layer version permission, even when running `terraform destroy`. Layer version permissions are thus intentional dangling resources that are not managed by Terraform and may incur extra expense in your AWS account. ## Example Usage +### Share Layer with Specific Account + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ +import { LambdaLayerVersion } from "./.gen/providers/aws/lambda-layer-version"; import { LambdaLayerVersionPermission } from "./.gen/providers/aws/lambda-layer-version-permission"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new LambdaLayerVersionPermission(this, "lambda_layer_permission", { + const example = new LambdaLayerVersion(this, "example", { + compatibleRuntimes: ["nodejs20.x", "python3.12"], + description: "Common utilities for Lambda functions", + filename: "layer.zip", + layerName: "shared_utilities", + }); + const awsLambdaLayerVersionPermissionExample = + new LambdaLayerVersionPermission(this, "example_1", { + action: "lambda:GetLayerVersion", + layerName: example.layerName, + principal: "123456789012", + statementId: "dev-account-access", + versionNumber: Token.asNumber(example.version), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLambdaLayerVersionPermissionExample.overrideLogicalId("example"); + } +} + +``` + +### Share Layer with Organization + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaLayerVersionPermission } from "./.gen/providers/aws/lambda-layer-version-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaLayerVersionPermission(this, "example", { action: "lambda:GetLayerVersion", - layerName: "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1", + layerName: Token.asString(awsLambdaLayerVersionExample.layerName), + organizationId: "o-1234567890", + principal: "*", + statementId: "org-wide-access", + versionNumber: Token.asNumber(awsLambdaLayerVersionExample.version), + }); + } +} + +``` + +### Share Layer Publicly + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaLayerVersionPermission } from "./.gen/providers/aws/lambda-layer-version-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaLayerVersionPermission(this, "example", { + action: "lambda:GetLayerVersion", + layerName: Token.asString(awsLambdaLayerVersionExample.layerName), + principal: "*", + statementId: "public-access", + versionNumber: Token.asNumber(awsLambdaLayerVersionExample.version), + }); + } +} + +``` + +### Multiple Account Access + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LambdaLayerVersionPermission } from "./.gen/providers/aws/lambda-layer-version-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LambdaLayerVersionPermission(this, "dev_account", { + action: "lambda:GetLayerVersion", + layerName: example.layerName, principal: "111111111111", statementId: "dev-account", - versionNumber: 1, + versionNumber: Token.asNumber(example.version), + }); + new LambdaLayerVersionPermission(this, "prod_account", { + action: "lambda:GetLayerVersion", + layerName: example.layerName, + principal: "333333333333", + statementId: "prod-account", + versionNumber: Token.asNumber(example.version), + }); + new LambdaLayerVersionPermission(this, "staging_account", { + action: "lambda:GetLayerVersion", + layerName: example.layerName, + principal: "222222222222", + statementId: "staging-account", + versionNumber: Token.asNumber(example.version), }); } } @@ -44,23 +149,27 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `action` - (Required) Action that will be allowed. `lambda:GetLayerVersion` is the standard value for layer access. +* `layerName` - (Required) Name or ARN of the Lambda Layer. +* `principal` - (Required) AWS account ID that should be able to use your Lambda Layer. Use `*` to share with all AWS accounts. +* `statementId` - (Required) Unique identifier for the permission statement. +* `versionNumber` - (Required) Version of Lambda Layer to grant access to. Note: permissions only apply to a single version of a layer. -* `action` - (Required) Action, which will be allowed. `lambda:GetLayerVersion` value is suggested by AWS documantation. -* `layerName` (Required) The name or ARN of the Lambda Layer, which you want to grant access to. -* `organizationId` - (Optional) An identifier of AWS Organization, which should be able to use your Lambda Layer. `principal` should be equal to `*` if `organizationId` provided. -* `principal` - (Required) AWS account ID which should be able to use your Lambda Layer. `*` can be used here, if you want to share your Lambda Layer widely. -* `statementId` - (Required) The name of Lambda Layer Permission, for example `dev-account` - human readable note about what is this permission for. -* `versionNumber` (Required) Version of Lambda Layer, which you want to grant access to. Note: permissions only apply to a single version of a layer. -* `skipDestroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. When this is not set to `true`, changing any of `compatibleArchitectures`, `compatibleRuntimes`, `description`, `filename`, `layerName`, `licenseInfo`, `s3Bucket`, `s3Key`, `s3ObjectVersion`, or `sourceCodeHash` forces deletion of the existing layer version and creation of a new layer version. +The following arguments are optional: + +* `organizationId` - (Optional) AWS Organization ID that should be able to use your Lambda Layer. `principal` should be set to `*` when `organizationId` is provided. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `skipDestroy` - (Optional) Whether to retain the permission when the resource is destroyed. Default is `false`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `id` - The `layerName` and `versionNumber`, separated by a comma (`,`). -* `revisionId` - A unique identifier for the current revision of the policy. +* `id` - Layer name and version number, separated by a comma (`,`). * `policy` - Full Lambda Layer Permission policy. +* `revisionId` - Unique identifier for the current revision of the policy. ## Import @@ -81,19 +190,17 @@ class MyConvertedCode extends TerraformStack { LambdaLayerVersionPermission.generateConfigForImport( this, "example", - "arn:aws:lambda:us-west-2:123456654321:layer:test_layer1,1" + "arn:aws:lambda:us-west-2:123456789012:layer:shared_utilities,1" ); } } ``` -Using `terraform import`, import Lambda Layer Permissions using `layerName` and `versionNumber`, separated by a comma (`,`). For example: +For backwards compatibility, the following legacy `terraform import` command is also supported: ```console -% terraform import aws_lambda_layer_version_permission.example arn:aws:lambda:us-west-2:123456654321:layer:test_layer1,1 +% terraform import aws_lambda_layer_version_permission.example arn:aws:lambda:us-west-2:123456789012:layer:shared_utilities,1 ``` -[1]: https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountlayer - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_permission.html.markdown b/website/docs/cdktf/typescript/r/lambda_permission.html.markdown index 55b3d3fc0069..1478d706e238 100644 --- a/website/docs/cdktf/typescript/r/lambda_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_permission.html.markdown @@ -3,18 +3,18 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_permission" description: |- - Creates a Lambda function permission. + Manages an AWS Lambda permission. --- # Resource: aws_lambda_permission -Gives an external source (like an EventBridge Rule, SNS, or S3) permission to access the Lambda function. +Manages an AWS Lambda permission. Use this resource to grant external sources (e.g., EventBridge Rules, SNS, or S3) permission to invoke Lambda functions. ## Example Usage -### Basic Usage +### Basic Usage with EventBridge ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -75,7 +75,7 @@ class MyConvertedCode extends TerraformStack { ``` -### With SNS +### SNS Integration ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -140,7 +140,7 @@ class MyConvertedCode extends TerraformStack { ``` -### With API Gateway REST API +### API Gateway REST API Integration ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -171,7 +171,7 @@ class MyConvertedCode extends TerraformStack { ``` -### With CloudWatch Log Group +### CloudWatch Log Group Integration ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -243,7 +243,7 @@ class MyConvertedCode extends TerraformStack { ``` -### With Cross-Account Invocation Policy +### Cross-Account Function URL Access ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -276,9 +276,7 @@ class MyConvertedCode extends TerraformStack { ``` -### With `replace_triggered_by` Lifecycle Configuration - -If omitting the `qualifier` argument (which forces re-creation each time a function version is published), a `lifecycle` block can be used to ensure permissions are re-applied on any change to the underlying function. +### Automatic Permission Updates with Function Changes ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -308,27 +306,23 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: - -* `action` - (Required) The AWS Lambda action you want to allow in this statement. (e.g., `lambda:InvokeFunction`) -* `eventSourceToken` - (Optional) The Event Source Token to validate. Used with [Alexa Skills][1]. -* `functionName` - (Required) Name of the Lambda function whose resource policy you are updating -* `functionUrlAuthType` - (Optional) Lambda Function URLs [authentication type][3]. Valid values are: `AWS_IAM` or `NONE`. Only supported for `lambda:InvokeFunctionUrl` action. -* `principal` - (Required) The principal who is getting this permission e.g., `s3.amazonaws.com`, an AWS account ID, or AWS IAM principal, or AWS service principal such as `events.amazonaws.com` or `sns.amazonaws.com`. -* `qualifier` - (Optional) Query parameter to specify function version or alias name. The permission will then apply to the specific qualified ARN e.g., `arn:aws:lambda:aws-region:acct-id:function:function-name:2` -* `sourceAccount` - (Optional) This parameter is used when allowing cross-account access, or for S3 and SES. The AWS account ID (without a hyphen) of the source owner. -* `sourceArn` - (Optional) When the principal is an AWS service, the ARN of the specific resource within that service to grant permission to. - Without this, any resource from `principal` will be granted permission – even if that resource is from another account. - For S3, this should be the ARN of the S3 Bucket. - For EventBridge events, this should be the ARN of the EventBridge Rule. - For API Gateway, this should be the ARN of the API, as described [here][2]. -* `statementId` - (Optional) A unique statement identifier. By default generated by Terraform. -* `statementIdPrefix` - (Optional) A statement identifier prefix. Terraform will generate a unique suffix. Conflicts with `statementId`. -* `principalOrgId` - (Optional) The identifier for your organization in AWS Organizations. Use this to grant permissions to all the AWS accounts under this organization. - -[1]: https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-an-aws-lambda-function.html#use-aws-cli -[2]: https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html -[3]: https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html +The following arguments are required: + +* `action` - (Required) Lambda action to allow in this statement (e.g., `lambda:InvokeFunction`) +* `functionName` - (Required) Name or ARN of the Lambda function +* `principal` - (Required) AWS service or account that invokes the function (e.g., `s3.amazonaws.com`, `sns.amazonaws.com`, AWS account ID, or AWS IAM principal) + +The following arguments are optional: + +* `eventSourceToken` - (Optional) Event Source Token for Alexa Skills +* `functionUrlAuthType` - (Optional) Lambda Function URL authentication type. Valid values: `AWS_IAM` or `NONE`. Only valid with `lambda:InvokeFunctionUrl` action +* `principalOrgId` - (Optional) AWS Organizations ID to grant permission to all accounts under this organization +* `qualifier` - (Optional) Lambda function version or alias name +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference) +* `sourceAccount` - (Optional) AWS account ID of the source owner for cross-account access, S3, or SES +* `sourceArn` - (Optional) ARN of the source resource granting permission to invoke the Lambda function +* `statementId` - (Optional) Statement identifier. Generated by Terraform if not provided +* `statementIdPrefix` - (Optional) Statement identifier prefix. Conflicts with `statementId` ## Attribute Reference @@ -336,6 +330,35 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lambda_permission.example + identity = { + function_name = "my_test_lambda_function" + statement_id = "AllowExecutionFromCloudWatch" + } +} + +resource "aws_lambda_permission" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `functionName` (String) Lambda function name. +* `statementId` (String) Statement ID for the permission. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `qualifier` (String) Qualifier for the function version or alias. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda permission statements using function_name/statement_id with an optional qualifier. For example: ```typescript @@ -352,7 +375,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); LambdaPermission.generateConfigForImport( this, - "testLambdaPermission", + "example", "my_test_lambda_function/AllowExecutionFromCloudWatch" ); } @@ -360,6 +383,8 @@ class MyConvertedCode extends TerraformStack { ``` +Using `qualifier`: + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -374,7 +399,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); LambdaPermission.generateConfigForImport( this, - "testLambdaPermission", + "example", "my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch" ); } @@ -382,14 +407,11 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Lambda permission statements using function_name/statement_id with an optional qualifier. For example: - -```console -% terraform import aws_lambda_permission.test_lambda_permission my_test_lambda_function/AllowExecutionFromCloudWatch -``` +For backwards compatibility, the following legacy `terraform import` commands are also supported: ```console -% terraform import aws_lambda_permission.test_lambda_permission my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch +% terraform import aws_lambda_permission.example my_test_lambda_function/AllowExecutionFromCloudWatch +% terraform import aws_lambda_permission.example my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_provisioned_concurrency_config.html.markdown b/website/docs/cdktf/typescript/r/lambda_provisioned_concurrency_config.html.markdown index 89612af96ff6..4d3280b02e34 100644 --- a/website/docs/cdktf/typescript/r/lambda_provisioned_concurrency_config.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_provisioned_concurrency_config.html.markdown @@ -3,16 +3,16 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_provisioned_concurrency_config" description: |- - Manages a Lambda Provisioned Concurrency Configuration + Manages an AWS Lambda Provisioned Concurrency Configuration. --- # Resource: aws_lambda_provisioned_concurrency_config -Manages a Lambda Provisioned Concurrency Configuration. +Manages an AWS Lambda Provisioned Concurrency Configuration. Use this resource to configure provisioned concurrency for Lambda functions. -~> **NOTE:** Setting `skipDestroy` to `true` means that the AWS Provider will _not_ destroy a provisioned concurrency configuration, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is _not_ managed by Terraform and may incur extra expense in your AWS account. +~> **Note:** Setting `skipDestroy` to `true` means that the AWS Provider will not destroy a provisioned concurrency configuration, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is not managed by Terraform and may incur extra expense in your AWS account. ## Example Usage @@ -69,12 +69,13 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: * `functionName` - (Required) Name or Amazon Resource Name (ARN) of the Lambda Function. -* `provisionedConcurrentExecutions` - (Required) Amount of capacity to allocate. Must be greater than or equal to `1`. +* `provisionedConcurrentExecutions` - (Required) Amount of capacity to allocate. Must be greater than or equal to 1. * `qualifier` - (Required) Lambda Function version or Lambda Alias name. The following arguments are optional: -* `skipDestroy` - (Optional) Whether to retain the provisoned concurrency configuration upon destruction. Defaults to `false`. If set to `true`, the resource in simply removed from state instead. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `skipDestroy` - (Optional) Whether to retain the provisioned concurrency configuration upon destruction. Defaults to `false`. If set to `true`, the resource is simply removed from state instead. ## Attribute Reference @@ -108,7 +109,7 @@ class MyConvertedCode extends TerraformStack { LambdaProvisionedConcurrencyConfig.generateConfigForImport( this, "example", - "my_function,production" + "example,production" ); } } @@ -118,7 +119,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import a Lambda Provisioned Concurrency Configuration using the `functionName` and `qualifier` separated by a comma (`,`). For example: ```console -% terraform import aws_lambda_provisioned_concurrency_config.example my_function,production +% terraform import aws_lambda_provisioned_concurrency_config.example example,production ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_runtime_management_config.html.markdown b/website/docs/cdktf/typescript/r/lambda_runtime_management_config.html.markdown index bba2ab336d98..584a929657df 100644 --- a/website/docs/cdktf/typescript/r/lambda_runtime_management_config.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_runtime_management_config.html.markdown @@ -3,18 +3,17 @@ subcategory: "Lambda" layout: "aws" page_title: "AWS: aws_lambda_runtime_management_config" description: |- - Terraform resource for managing an AWS Lambda Runtime Management Config. + Manages an AWS Lambda Runtime Management Config. --- # Resource: aws_lambda_runtime_management_config -Terraform resource for managing an AWS Lambda Runtime Management Config. +Manages an AWS Lambda Runtime Management Config. Use this resource to control how Lambda updates the runtime for your function. Refer to the [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) for supported runtimes. -~> Deletion of this resource returns the runtime update mode to `Auto` (the default behavior). -To leave the configured runtime management options in-place, use a [`removed` block](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) with the destroy lifecycle set to `false`. +~> **Note:** Deletion of this resource returns the runtime update mode to `Auto` (the default behavior). To leave the configured runtime management options in-place, use a [`removed` block](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) with the destroy lifecycle set to `false`. ## Example Usage @@ -23,7 +22,7 @@ To leave the configured runtime management options in-place, use a [`removed` bl ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -33,7 +32,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaRuntimeManagementConfig(this, "example", { - functionName: test.functionName, + functionName: Token.asString(awsLambdaFunctionExample.functionName), updateRuntimeOn: "FunctionUpdate", }); } @@ -41,12 +40,12 @@ class MyConvertedCode extends TerraformStack { ``` -### `Manual` Update +### Manual Update ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -56,7 +55,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new LambdaRuntimeManagementConfig(this, "example", { - functionName: test.functionName, + functionName: Token.asString(awsLambdaFunctionExample.functionName), runtimeVersionArn: "arn:aws:lambda:us-east-1::runtime:abcd1234", updateRuntimeOn: "Manual", }); @@ -65,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ``` -~> Once the runtime update mode is set to `Manual`, the `aws_lambda_function` `runtime` cannot be updated. To upgrade a runtime, the `updateRuntimeOn` argument must be set to `Auto` or `FunctionUpdate` prior to changing the function's `runtime` argument. +~> **Note:** Once the runtime update mode is set to `Manual`, the `aws_lambda_function` `runtime` cannot be updated. To upgrade a runtime, the `updateRuntimeOn` argument must be set to `Auto` or `FunctionUpdate` prior to changing the function's `runtime` argument. ## Argument Reference @@ -76,6 +75,7 @@ The following arguments are required: The following arguments are optional: * `qualifier` - (Optional) Version of the function. This can be `$LATEST` or a published version number. If omitted, this resource will manage the runtime configuration for `$LATEST`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `runtimeVersionArn` - (Optional) ARN of the runtime version. Only required when `updateRuntimeOn` is `Manual`. * `updateRuntimeOn` - (Optional) Runtime update mode. Valid values are `Auto`, `FunctionUpdate`, and `Manual`. When a function is created, the default mode is `Auto`. @@ -104,7 +104,7 @@ class MyConvertedCode extends TerraformStack { LambdaRuntimeManagementConfig.generateConfigForImport( this, "example", - "my-function,$LATEST" + "example,$LATEST" ); } } @@ -114,7 +114,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Lambda Runtime Management Config using a comma-delimited string combining `functionName` and `qualifier`. For example: ```console -% terraform import aws_lambda_runtime_management_config.example my-function,$LATEST +% terraform import aws_lambda_runtime_management_config.example example,$LATEST ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/launch_configuration.html.markdown b/website/docs/cdktf/typescript/r/launch_configuration.html.markdown index 34559f90d2b1..98f469cd158b 100644 --- a/website/docs/cdktf/typescript/r/launch_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/launch_configuration.html.markdown @@ -188,6 +188,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `associatePublicIpAddress` - (Optional) Associate a public ip address with an instance in a VPC. * `ebsBlockDevice` - (Optional) Additional EBS block devices to attach to the instance. See [Block Devices](#block-devices) below for details. * `ebsOptimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. @@ -304,4 +305,4 @@ Using `terraform import`, import launch configurations using the `name`. For exa % terraform import aws_launch_configuration.as_conf terraform-lg-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/launch_template.html.markdown b/website/docs/cdktf/typescript/r/launch_template.html.markdown index 27490c24fb1b..010174898736 100644 --- a/website/docs/cdktf/typescript/r/launch_template.html.markdown +++ b/website/docs/cdktf/typescript/r/launch_template.html.markdown @@ -48,14 +48,6 @@ class MyConvertedCode extends TerraformStack { disableApiStop: true, disableApiTermination: true, ebsOptimized: Token.asString(true), - elasticGpuSpecifications: [ - { - type: "test", - }, - ], - elasticInferenceAccelerator: { - type: "eia1.medium", - }, iamInstanceProfile: { name: "test", }, @@ -112,6 +104,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `blockDeviceMappings` - (Optional) Specify volumes to attach to the instance besides the volumes specified by the AMI. See [Block Devices](#block-devices) below for details. * `capacityReservationSpecification` - (Optional) Targeting for EC2 capacity reservations. See [Capacity Reservation Specification](#capacity-reservation-specification) below for more details. @@ -124,9 +117,6 @@ This resource supports the following arguments: * `disableApiTermination` - (Optional) If `true`, enables [EC2 Instance Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingDisableAPITermination.html) * `ebsOptimized` - (Optional) If `true`, the launched EC2 instance will be EBS-optimized. -* `elasticGpuSpecifications` - (Optional) **DEPRECATED** The elastic GPU to attach to the instance. See [Elastic GPU](#elastic-gpu) - below for more details. -* `elasticInferenceAccelerator` - (Optional) **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See [Elastic Inference Accelerator](#elastic-inference-accelerator) below for more details. * `enclaveOptions` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. * `hibernationOptions` - (Optional) The hibernation options for the instance. See [Hibernation Options](#hibernation-options) below for more details. * `iamInstanceProfile` - (Optional) The IAM Instance Profile to launch the instance with. See [Instance Profile](#instance-profile) @@ -197,7 +187,7 @@ The `ebs` block supports the following: The `capacityReservationSpecification` block supports the following: -* `capacityReservationPreference` - Indicates the instance's Capacity Reservation preferences. Can be `open` or `none`. (Default `none`). +* `capacityReservationPreference` - Indicates the instance's Capacity Reservation preferences. Can be `capacity-reservations-only`, `open` or `none`. If `capacityReservationId` or `capacityReservationResourceGroupArn` is specified in `capacityReservationTarget` block, either omit `capacityReservationPreference` or set it to `capacity-reservations-only`. * `capacityReservationTarget` - Used to target a specific Capacity Reservation: The `capacityReservationTarget` block supports the following: @@ -228,22 +218,6 @@ The `creditSpecification` block supports the following: T3 instances are launched as `unlimited` by default. T2 instances are launched as `standard` by default. -### Elastic GPU - -Attach an elastic GPU the instance. - -The `elasticGpuSpecifications` block supports the following: - -* `type` - The [Elastic GPU Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-graphics.html#elastic-graphics-basics) - -### Elastic Inference Accelerator - -**DEPRECATED** Attach an Elastic Inference Accelerator to the instance. Additional information about Elastic Inference in EC2 can be found in the [EC2 User Guide](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-inference.html). - -The `elasticInferenceAccelerator` configuration block supports the following: - -* `type` - (Required) Accelerator type. - ### Enclave Options The `enclaveOptions` block supports the following: @@ -488,7 +462,8 @@ The `placement` block supports the following: * `affinity` - (Optional) The affinity setting for an instance on a Dedicated Host. * `availabilityZone` - (Optional) The Availability Zone for the instance. -* `groupName` - (Optional) The name of the placement group for the instance. +* `groupId` - (Optional) The ID of the placement group for the instance. Conflicts with `groupName`. +* `groupName` - (Optional) The name of the placement group for the instance. Conflicts with `groupId`. * `hostId` - (Optional) The ID of the Dedicated Host for the instance. * `hostResourceGroupArn` - (Optional) The ARN of the Host Resource Group in which to launch instances. * `spreadDomain` - (Optional) Reserved for future use. @@ -549,4 +524,4 @@ Using `terraform import`, import Launch Templates using the `id`. For example: % terraform import aws_launch_template.web lt-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb.html.markdown b/website/docs/cdktf/typescript/r/lb.html.markdown index e57b0681c771..a7971610b53b 100644 --- a/website/docs/cdktf/typescript/r/lb.html.markdown +++ b/website/docs/cdktf/typescript/r/lb.html.markdown @@ -154,6 +154,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessLogs` - (Optional) Access Logs block. See below. * `connectionLogs` - (Optional) Connection Logs block. See below. Only valid for Load Balancers of type `application`. * `clientKeepAlive` - (Optional) Client keep alive value in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. @@ -174,11 +175,12 @@ This resource supports the following arguments: * `ipAddressType` - (Optional) Type of IP addresses used by the subnets for your load balancer. The possible values depend upon the load balancer type: `ipv4` (all load balancer types), `dualstack` (all load balancer types), and `dualstack-without-public-ipv4` (type `application` only). * `ipamPools` (Optional). The IPAM pools to use with the load balancer. Only valid for Load Balancers of type `application`. See [ipam_pools](#ipam_pools) for more information. * `loadBalancerType` - (Optional) Type of load balancer to create. Possible values are `application`, `gateway`, or `network`. The default value is `application`. -* `minimum_load_balancer_capacity` - (Optional) Minimum capacity for a load balancer. Only valid for Load Balancers of type `application` or `network`. +* `minimumLoadBalancerCapacity` - (Optional) Minimum capacity for a load balancer. Only valid for Load Balancers of type `application` or `network`. * `name` - (Optional) Name of the LB. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. If not specified, Terraform will autogenerate a name beginning with `tf-lb`. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `securityGroups` - (Optional) List of security group IDs to assign to the LB. Only valid for Load Balancers of type `application` or `network`. For load balancers of type `network` security groups cannot be added if none are currently present, and cannot all be removed once added. If either of these conditions are met, this will force a recreation of the resource. * `preserveHostHeader` - (Optional) Whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. Defaults to `false`. +* `secondaryIpsAutoAssignedPerSubnet` - (Optional) The number of secondary IP addresses to configure for your load balancer nodes. Only valid for Load Balancers of type `network`. The valid range is 0-7. When decreased, this will force a recreation of the resource. Default: `0`. * `subnetMapping` - (Optional) Subnet mapping block. See below. For Load Balancers of type `network` subnet mappings can only be added. * `subnets` - (Optional) List of subnet IDs to attach to the LB. For Load Balancers of type `network` subnets can only be added (see [Availability Zones](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html#availability-zones)), deleting a subnet for load balancers of type `network` will force a recreation of the resource. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -219,10 +221,9 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the load balancer (matches `id`). +* `arn` - ARN of the load balancer. * `arnSuffix` - ARN suffix for use with CloudWatch Metrics. * `dnsName` - DNS name of the load balancer. -* `id` - ARN of the load balancer (matches `arn`). * `subnet_mapping.*.outpost_id` - ID of the Outpost containing the load balancer. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `zoneId` - Canonical hosted zone ID of the load balancer (to be used in a Route 53 Alias record). @@ -237,6 +238,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" + } +} + +resource "aws_lb" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import LBs using their ARN. For example: ```typescript @@ -267,4 +289,4 @@ Using `terraform import`, import LBs using their ARN. For example: % terraform import aws_lb.bar arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_cookie_stickiness_policy.html.markdown b/website/docs/cdktf/typescript/r/lb_cookie_stickiness_policy.html.markdown index 36d9680ee5ea..aaa090779fef 100644 --- a/website/docs/cdktf/typescript/r/lb_cookie_stickiness_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_cookie_stickiness_policy.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the stickiness policy. * `loadBalancer` - (Required) The load balancer to which the policy should be attached. @@ -73,4 +74,4 @@ This resource exports the following attributes in addition to the arguments abov * `lbPort` - The load balancer port to which the policy is applied. * `cookieExpirationPeriod` - The time period after which the session cookie is considered stale, expressed in seconds. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_listener.html.markdown b/website/docs/cdktf/typescript/r/lb_listener.html.markdown index 6d3c9cf306ed..0c287e8d5d66 100644 --- a/website/docs/cdktf/typescript/r/lb_listener.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_listener.html.markdown @@ -57,6 +57,57 @@ class MyConvertedCode extends TerraformStack { ``` +With weighted target groups: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Lb } from "./.gen/providers/aws/lb"; +import { LbListener } from "./.gen/providers/aws/lb-listener"; +import { LbTargetGroup } from "./.gen/providers/aws/lb-target-group"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const frontEnd = new Lb(this, "front_end", {}); + const frontEndBlue = new LbTargetGroup(this, "front_end_blue", {}); + const frontEndGreen = new LbTargetGroup(this, "front_end_green", {}); + const awsLbListenerFrontEnd = new LbListener(this, "front_end_3", { + certificateArn: + "arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4", + defaultAction: [ + { + forward: { + targetGroup: [ + { + arn: frontEndBlue.arn, + weight: 100, + }, + { + arn: frontEndGreen.arn, + weight: 0, + }, + ], + }, + type: "forward", + }, + ], + loadBalancerArn: frontEnd.arn, + port: Token.asNumber("443"), + protocol: "HTTPS", + sslPolicy: "ELBSecurityPolicy-2016-08", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsLbListenerFrontEnd.overrideLogicalId("front_end"); + } +} + +``` + To a NLB: ```typescript @@ -387,6 +438,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alpnPolicy` - (Optional) Name of the Application-Layer Protocol Negotiation (ALPN) policy. Can be set if `protocol` is `TLS`. Valid values are `HTTP1Only`, `HTTP2Only`, `HTTP2Optional`, `HTTP2Preferred`, and `None`. * `certificateArn` - (Optional) ARN of the default SSL server certificate. Exactly one certificate is required if the protocol is HTTPS. For adding additional SSL certificates, see the [`aws_lb_listener_certificate` resource](/docs/providers/aws/r/lb_listener_certificate.html). * `mutualAuthentication` - (Optional) The mutual authentication configuration information. See below. @@ -425,6 +477,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticateCognito` - (Optional) Configuration block for using Amazon Cognito to authenticate users. Specify only when `type` is `authenticate-cognito`. See below. * `authenticateOidc` - (Optional) Configuration block for an identity provider that is compliant with OpenID Connect (OIDC). Specify only when `type` is `authenticate-oidc`. See below. * `fixedResponse` - (Optional) Information for creating an action that returns a custom HTTP response. Required if `type` is `fixed-response`. @@ -443,6 +496,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationRequestExtraParams` - (Optional) Query parameters to include in the redirect request to the authorization endpoint. Max: 10. See below. * `onUnauthenticatedRequest` - (Optional) Behavior if the user is not authenticated. Valid values are `deny`, `allow` and `authenticate`. * `scope` - (Optional) Set of user claims to be requested from the IdP. @@ -467,6 +521,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationRequestExtraParams` - (Optional) Query parameters to include in the redirect request to the authorization endpoint. Max: 10. * `onUnauthenticatedRequest` - (Optional) Behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate` * `scope` - (Optional) Set of user claims to be requested from the IdP. @@ -481,6 +536,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `messageBody` - (Optional) Message body. * `statusCode` - (Optional) HTTP response code. Valid values are `2XX`, `4XX`, or `5XX`. @@ -492,6 +548,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `stickiness` - (Optional) Configuration block for target group stickiness for the rule. See below. ##### target_group @@ -502,6 +559,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `weight` - (Optional) Weight. The range is 0 to 999. ##### stickiness @@ -512,6 +570,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether target group stickiness is enabled. Default is `false`. #### redirect @@ -524,6 +583,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `host` - (Optional) Hostname. This component is not percent-encoded. The hostname can contain `#{host}`. Defaults to `#{host}`. * `path` - (Optional) Absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to `/#{path}`. * `port` - (Optional) Port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`. @@ -532,23 +592,44 @@ The following arguments are optional: ### mutual_authentication -* `advertiseTrustStoreCaNames` - (Optional) Valid values are `off` and `on`. -* `ignoreClientCertificateExpiry` - (Optional) Whether client certificate expiry is ignored. Default is `false`. -* `mode` - (Required) Valid values are `off`, `verify` and `passthrough`. -* `trustStoreArn` - (Required) ARN of the elbv2 Trust Store. +* `advertiseTrustStoreCaNames` - (Optional when `mode` is `verify`, invalid otherwise) Valid values are `off` and `on`. +* `ignoreClientCertificateExpiry` - (Optional when `mode` is `verify`, invalid otherwise) Whether client certificate expiry is ignored. + Default is `false`. +* `mode` - (Required) Valid values are `off`, `passthrough`, and `verify`. +* `trustStoreArn` - (Required when `mode` is `verify`, invalid otherwise) ARN of the elbv2 Trust Store. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the listener (matches `id`). -* `id` - ARN of the listener (matches `arn`). +* `arn` - ARN of the listener. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ~> **Note:** When importing a listener with a forward-type default action, you must include both a top-level target group ARN and a `forward` block with a `targetGroup` and `arn` to avoid import differences. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_listener.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96" + } +} + +resource "aws_lb_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import listeners using their ARN. For example: ```typescript @@ -579,4 +660,4 @@ Using `terraform import`, import listeners using their ARN. For example: % terraform import aws_lb_listener.front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_listener_certificate.html.markdown b/website/docs/cdktf/typescript/r/lb_listener_certificate.html.markdown index 1d5e807a4c64..3efb39e118f1 100644 --- a/website/docs/cdktf/typescript/r/lb_listener_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_listener_certificate.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `listenerArn` - (Required, Forces New Resource) The ARN of the listener to which to attach the certificate. * `certificateArn` - (Required, Forces New Resource) The ARN of the certificate to attach to the listener. @@ -105,4 +106,4 @@ Using `terraform import`, import Listener Certificates using the listener arn an % terraform import aws_lb_listener_certificate.example arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/test/8e4497da625e2d8a/9ab28ade35828f96/67b3d2d36dd7c26b_arn:aws:iam::123456789012:server-certificate/tf-acc-test-6453083910015726063 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_listener_rule.html.markdown b/website/docs/cdktf/typescript/r/lb_listener_rule.html.markdown index 3a09d2cc59b1..0f7c74ac8522 100644 --- a/website/docs/cdktf/typescript/r/lb_listener_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_listener_rule.html.markdown @@ -228,6 +228,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `listenerArn` - (Required, Forces New Resource) The ARN of the listener to which to attach the rule. * `priority` - (Optional) The priority for the rule between `1` and `50000`. Leaving it unset will automatically set the rule with next available priority after currently existing highest rule. A listener can't have multiple rules with the same priority. * `action` - (Required) An Action block. Action blocks are documented below. @@ -360,6 +361,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_listener_rule.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/9683b2d02a6cabee" + } +} + +resource "aws_lb_listener_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer listener rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import rules using their ARN. For example: ```typescript @@ -390,4 +412,4 @@ Using `terraform import`, import rules using their ARN. For example: % terraform import aws_lb_listener_rule.front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener-rule/app/test/8e4497da625e2d8a/9ab28ade35828f96/67b3d2d36dd7c26b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_ssl_negotiation_policy.html.markdown b/website/docs/cdktf/typescript/r/lb_ssl_negotiation_policy.html.markdown index 2595c92c9fbe..5b3aa263c5c6 100644 --- a/website/docs/cdktf/typescript/r/lb_ssl_negotiation_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_ssl_negotiation_policy.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the SSL negotiation policy. * `loadBalancer` - (Required) The load balancer to which the policy should be attached. @@ -110,4 +111,4 @@ This resource exports the following attributes in addition to the arguments abov * `lbPort` - The load balancer port to which the policy is applied. * `attribute` - The SSL Negotiation policy attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_target_group.html.markdown b/website/docs/cdktf/typescript/r/lb_target_group.html.markdown index bb6c226b8c79..781fdc921cdd 100644 --- a/website/docs/cdktf/typescript/r/lb_target_group.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_target_group.html.markdown @@ -193,6 +193,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionTermination` - (Optional) Whether to terminate connections at the end of the deregistration timeout on Network Load Balancers. See [doc](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html#deregistration-delay) for more information. Default is `false`. * `deregistrationDelay` - (Optional) Amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. * `healthCheck` - (Optional, Maximum of 1) Health Check configuration block. Detailed below. @@ -317,6 +318,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_target_group.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067" + } +} + +resource "aws_lb_target_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the target group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Target Groups using their ARN. For example: ```typescript @@ -347,4 +369,4 @@ Using `terraform import`, import Target Groups using their ARN. For example: % terraform import aws_lb_target_group.app_front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:targetgroup/app-front-end/20cfe21448b66314 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_target_group_attachment.html.markdown b/website/docs/cdktf/typescript/r/lb_target_group_attachment.html.markdown index 5212e5c0b7f1..ae6cb407f19c 100644 --- a/website/docs/cdktf/typescript/r/lb_target_group_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_target_group_attachment.html.markdown @@ -175,6 +175,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `availabilityZone` - (Optional) The Availability Zone where the IP address of the target is to be registered. If the private IP address is outside of the VPC scope, this value must be set to `all`. * `port` - (Optional) The port on which targets receive traffic. @@ -188,4 +189,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import Target Group Attachments. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_trust_store.html.markdown b/website/docs/cdktf/typescript/r/lb_trust_store.html.markdown index bdf55d4a874a..fb3eeb733bc1 100644 --- a/website/docs/cdktf/typescript/r/lb_trust_store.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_trust_store.html.markdown @@ -56,10 +56,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `caCertificatesBundleS3Bucket` - (Required) S3 Bucket name holding the client certificate CA bundle. * `caCertificatesBundleS3Key` - (Required) S3 object key holding the client certificate CA bundle. * `caCertificatesBundleS3ObjectVersion` - (Optional) Version Id of CA bundle S3 bucket object, if versioned, defaults to latest if omitted. - * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. Cannot be longer than 6 characters. * `name` - (Optional, Forces new resource) Name of the Trust Store. If omitted, Terraform will assign a random, unique name. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -76,6 +76,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_trust_store.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:truststore/my-trust-store/73e2d6bc24d8a067" + } +} + +resource "aws_lb_trust_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the trust store. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Trust Stores using their ARN. For example: ```typescript @@ -106,4 +127,4 @@ Using `terraform import`, import Target Groups using their ARN. For example: % terraform import aws_lb_trust_store.example arn:aws:elasticloadbalancing:us-west-2:187416307283:truststore/my-trust-store/20cfe21448b66314 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_trust_store_revocation.html.markdown b/website/docs/cdktf/typescript/r/lb_trust_store_revocation.html.markdown index fe13ef856b34..6713bb185e0e 100644 --- a/website/docs/cdktf/typescript/r/lb_trust_store_revocation.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_trust_store_revocation.html.markdown @@ -54,6 +54,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `trustStoreArn` - (Required) Trust Store ARN. * `revocationsS3Bucket` - (Required) S3 Bucket name holding the client certificate CA bundle. * `revocationsS3Key` - (Required) S3 object key holding the client certificate CA bundle. @@ -98,4 +99,4 @@ Using `terraform import`, import Trust Store Revocations using their ARN. For ex % terraform import aws_lb_trust_store_revocation.example arn:aws:elasticloadbalancing:us-west-2:187416307283:truststore/my-trust-store/20cfe21448b66314,6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lex_bot.html.markdown b/website/docs/cdktf/typescript/r/lex_bot.html.markdown index 97411aa198af..8b4e9cf515ec 100644 --- a/website/docs/cdktf/typescript/r/lex_bot.html.markdown +++ b/website/docs/cdktf/typescript/r/lex_bot.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `abortStatement` - (Required) The message that Amazon Lex uses to abort a conversation. Attributes are documented under [statement](#statement). * `childDirected` - (Required) By specifying true, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. For more information see the [Amazon Lex FAQ](https://aws.amazon.com/lex/faqs#data-security) and the [Amazon Lex PutBot API Docs](https://docs.aws.amazon.com/lex/latest/dg/API_PutBot.html#lex-PutBot-request-childDirected). * `clarificationPrompt` - (Required) The message that Amazon Lex uses when it doesn't understand the user's request. Attributes are documented under [prompt](#prompt). @@ -177,4 +178,4 @@ Using `terraform import`, import bots using their name. For example: % terraform import aws_lex_bot.order_flowers_bot OrderFlowers ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lex_bot_alias.html.markdown b/website/docs/cdktf/typescript/r/lex_bot_alias.html.markdown index 6dbf990864fa..975734f5a20b 100644 --- a/website/docs/cdktf/typescript/r/lex_bot_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/lex_bot_alias.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `botName` - (Required) The name of the bot. * `botVersion` - (Required) The version of the bot. * `conversationLogs` - (Optional) The settings that determine how Amazon Lex uses conversation logs for the alias. Attributes are documented under [conversation_logs](#conversation_logs). @@ -114,4 +115,4 @@ Using `terraform import`, import bot aliases using an ID with the format `bot_na % terraform import aws_lex_bot_alias.order_flowers_prod OrderFlowers:OrderFlowersProd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lex_intent.html.markdown b/website/docs/cdktf/typescript/r/lex_intent.html.markdown index 46a78506b1a1..b2d8ff3ac0ca 100644 --- a/website/docs/cdktf/typescript/r/lex_intent.html.markdown +++ b/website/docs/cdktf/typescript/r/lex_intent.html.markdown @@ -124,6 +124,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `conclusionStatement` - (Optional) The statement that you want Amazon Lex to convey to the user after the intent is successfully fulfilled by the Lambda function. This element is relevant only if you provide a Lambda function in the `fulfillmentActivity`. If you return the intent to the client @@ -296,4 +297,4 @@ Using `terraform import`, import intents using their name. For example: % terraform import aws_lex_intent.order_flowers_intent OrderFlowers ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lex_slot_type.html.markdown b/website/docs/cdktf/typescript/r/lex_slot_type.html.markdown index 8042db1d492a..104f204023a4 100644 --- a/website/docs/cdktf/typescript/r/lex_slot_type.html.markdown +++ b/website/docs/cdktf/typescript/r/lex_slot_type.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enumerationValue` - (Required) A list of EnumerationValue objects that defines the values that the slot type can take. Each value can have a list of synonyms, which are additional values that help train the machine learning model about the values that it resolves for a slot. Attributes are @@ -123,4 +124,4 @@ Using `terraform import`, import slot types using their name. For example: % terraform import aws_lex_slot_type.flower_types FlowerTypes ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lexv2models_bot.html.markdown b/website/docs/cdktf/typescript/r/lexv2models_bot.html.markdown index 597166682a8e..7aafc984cb28 100644 --- a/website/docs/cdktf/typescript/r/lexv2models_bot.html.markdown +++ b/website/docs/cdktf/typescript/r/lexv2models_bot.html.markdown @@ -83,6 +83,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `members` - List of bot members in a network to be created. See [`bot_members`](#bot-members). * `tags` - List of tags to add to the bot. You can only add tags when you create a bot. * `type` - Type of a bot to create. Possible values are `"Bot"` and `"BotNetwork"`. @@ -143,4 +144,4 @@ Using `terraform import`, import Lex V2 Models Bot using the `id`. For example: % terraform import aws_lexv2models_bot.example bot-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lexv2models_bot_locale.html.markdown b/website/docs/cdktf/typescript/r/lexv2models_bot_locale.html.markdown index 18c8e624b1d7..f08a7b6bf9ef 100644 --- a/website/docs/cdktf/typescript/r/lexv2models_bot_locale.html.markdown +++ b/website/docs/cdktf/typescript/r/lexv2models_bot_locale.html.markdown @@ -81,6 +81,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - Description of the bot locale. Use this to help identify the bot locale in lists. * `voiceSettings` - Amazon Polly voice ID that Amazon Lex uses for voice interaction with the user. See [`voiceSettings`](#voice-settings). @@ -136,4 +137,4 @@ Using `terraform import`, import Lex V2 Models Bot Locale using the `id`. For ex % terraform import aws_lexv2models_bot_locale.example en_US,abcd-12345678,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lexv2models_bot_version.html.markdown b/website/docs/cdktf/typescript/r/lexv2models_bot_version.html.markdown index a3b5c3777c55..0a25ce80f76e 100644 --- a/website/docs/cdktf/typescript/r/lexv2models_bot_version.html.markdown +++ b/website/docs/cdktf/typescript/r/lexv2models_bot_version.html.markdown @@ -47,12 +47,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `botId` - (Required) Idientifier of the bot to create the version for. * `localeSpecification` - (Required) Specifies the locales that Amazon Lex adds to this version. You can choose the draft version or any other previously published version for each locale. When you specify a source version, the locale data is copied from the source version to the new version. - - The attribute value is a map with one or more entries, each of which has a locale name as the key and an object with the following attribute as the value: - * `sourceBotVersion` - (Required) The version of a bot used for a bot locale. Valid values: `DRAFT`, a numeric version. * `description` - (Optional) A description of the version. Use the description to help identify the version in lists. +* `sourceBotVersion` - (Required) The version of a bot used for a bot locale. Valid values: `DRAFT`, a numeric version. + +The `localeSpecification` attribute value is a map with one or more entries, each of which has a locale name as the key and an object with the following attribute as the value: ## Attribute Reference @@ -100,4 +101,4 @@ Using `terraform import`, import Lex V2 Models Bot Version using the `id`. For e % terraform import aws_lexv2models_bot_version.example id-12345678,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lexv2models_intent.html.markdown b/website/docs/cdktf/typescript/r/lexv2models_intent.html.markdown index 62225cc1079b..9d5004f94011 100644 --- a/website/docs/cdktf/typescript/r/lexv2models_intent.html.markdown +++ b/website/docs/cdktf/typescript/r/lexv2models_intent.html.markdown @@ -195,6 +195,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `closingSetting` - (Optional) Configuration block for the response that Amazon Lex sends to the user when the intent is closed. See [`closingSetting`](#closing_setting). * `confirmationSetting` - (Optional) Configuration block for prompts that Amazon Lex sends to the user to confirm the completion of an intent. If the user answers "no," the settings contain a statement that is sent to the user to end the intent. If you configure this block without `prompt_specification.*.prompt_attempts_specification`, AWS will provide default configurations for `Initial` and `Retry1` `promptAttemptsSpecification`s. This will cause Terraform to report differences. Use the `confirmationSetting` configuration above in the [Basic Usage](#basic-usage) example to avoid differences resulting from AWS default configuration. See [`confirmationSetting`](#confirmation_setting). * `description` - (Optional) Description of the intent. Use the description to help identify the intent in lists. @@ -606,4 +607,4 @@ Using `terraform import`, import Lex V2 Models Intent using the `intent_id:bot_i % terraform import aws_lexv2models_intent.example intent-42874:bot-11376:DRAFT:en_US ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lexv2models_slot.html.markdown b/website/docs/cdktf/typescript/r/lexv2models_slot.html.markdown index 50b00211f963..0d433d1b5e31 100644 --- a/website/docs/cdktf/typescript/r/lexv2models_slot.html.markdown +++ b/website/docs/cdktf/typescript/r/lexv2models_slot.html.markdown @@ -182,6 +182,7 @@ See the [`valueElicitationSetting` argument reference](#value_elicitation_settin The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the slot. * `multipleValuesSetting` - (Optional) Whether the slot returns multiple values in one response. See the [`multipleValuesSetting` argument reference](#multiple_values_setting-argument-reference) below. @@ -335,4 +336,4 @@ Using `terraform import`, import Lex V2 Models Slot using the `id`. For example: % terraform import aws_lexv2models_slot.example bot-1234,1,intent-5678,en-US,slot-9012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lexv2models_slot_type.html.markdown b/website/docs/cdktf/typescript/r/lexv2models_slot_type.html.markdown index a575a5f2d41f..8797c87dc2d4 100644 --- a/website/docs/cdktf/typescript/r/lexv2models_slot_type.html.markdown +++ b/website/docs/cdktf/typescript/r/lexv2models_slot_type.html.markdown @@ -107,6 +107,7 @@ All of the bots, slot types, and slots used by the intent must have the same loc The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the slot type. * `compositeSlotTypeSetting` - (Optional) Specifications for a composite slot type. See [`compositeSlotTypeSetting` argument reference](#composite_slot_type_setting-argument-reference) below. @@ -229,4 +230,4 @@ Using `terraform import`, import Lex V2 Models Slot Type using using a comma-del % terraform import aws_lexv2models_slot_type.example bot-1234,DRAFT,en_US,slot_type-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/licensemanager_association.html.markdown b/website/docs/cdktf/typescript/r/licensemanager_association.html.markdown index 826a5efe1aaf..a6a14be79061 100644 --- a/website/docs/cdktf/typescript/r/licensemanager_association.html.markdown +++ b/website/docs/cdktf/typescript/r/licensemanager_association.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `licenseConfigurationArn` - (Required) ARN of the license configuration. * `resourceArn` - (Required) ARN of the resource associated with the license configuration. @@ -113,4 +114,4 @@ Using `terraform import`, import license configurations using `resource_arn,lice % terraform import aws_licensemanager_association.example arn:aws:ec2:eu-west-1:123456789012:image/ami-123456789abcdef01,arn:aws:license-manager:eu-west-1:123456789012:license-configuration:lic-0123456789abcdef0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/licensemanager_grant.html.markdown b/website/docs/cdktf/typescript/r/licensemanager_grant.html.markdown index 4c0bd17a0f08..d9d7cd97498b 100644 --- a/website/docs/cdktf/typescript/r/licensemanager_grant.html.markdown +++ b/website/docs/cdktf/typescript/r/licensemanager_grant.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The Name of the grant. * `allowedOperations` - (Required) A list of the allowed operations for the grant. This is a subset of the allowed operations on the license. * `licenseArn` - (Required) The ARN of the license to grant. @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_licensemanager_grant` using the grant arn. % terraform import aws_licensemanager_grant.test arn:aws:license-manager::123456789011:grant:g-01d313393d9e443d8664cc054db1e089 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/licensemanager_grant_accepter.html.markdown b/website/docs/cdktf/typescript/r/licensemanager_grant_accepter.html.markdown index 4c7ba8f18b47..66f688de3d82 100644 --- a/website/docs/cdktf/typescript/r/licensemanager_grant_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/licensemanager_grant_accepter.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `grantArn` - (Required) The ARN of the grant to accept. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_licensemanager_grant_accepter` using the g % terraform import aws_licensemanager_grant_accepter.test arn:aws:license-manager::123456789012:grant:g-1cf9fba4ba2f42dcab11c686c4b4d329 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/licensemanager_license_configuration.html.markdown b/website/docs/cdktf/typescript/r/licensemanager_license_configuration.html.markdown index 8280c3687c4c..9c12f26c0dbe 100644 --- a/website/docs/cdktf/typescript/r/licensemanager_license_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/licensemanager_license_configuration.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the license configuration. * `description` - (Optional) Description of the license configuration. * `licenseCount` - (Optional) Number of licenses managed by the license configuration. @@ -109,4 +110,4 @@ Using `terraform import`, import license configurations using the `id`. For exam % terraform import aws_licensemanager_license_configuration.example arn:aws:license-manager:eu-west-1:123456789012:license-configuration:lic-0123456789abcdef0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_bucket.html.markdown b/website/docs/cdktf/typescript/r/lightsail_bucket.html.markdown index e99e154c352c..5228ad921169 100644 --- a/website/docs/cdktf/typescript/r/lightsail_bucket.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_bucket.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: * `forceDelete` - (Optional) Whether to force delete non-empty buckets using `terraform destroy`. AWS by default will not delete a bucket which is not empty, to prevent losing bucket data and affecting other resources in Lightsail. If `forceDelete` is set to `true` the bucket will be deleted even when not empty. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -55,7 +56,6 @@ This resource exports the following attributes in addition to the arguments abov * `availabilityZone` - Availability Zone. Follows the format us-east-2a (case-sensitive). * `createdAt` - Date and time when the bucket was created. * `id` - Name used for this bucket (matches `name`). -* `region` - AWS Region name. * `supportCode` - Support code for the resource. Include this code in your email to support when you have questions about a resource in Lightsail. This code enables our support team to look up your Lightsail information more easily. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider `defaultTags` configuration block. * `url` - URL of the bucket. @@ -88,4 +88,4 @@ Using `terraform import`, import `aws_lightsail_bucket` using the `name` attribu % terraform import aws_lightsail_bucket.example example-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_bucket_access_key.html.markdown b/website/docs/cdktf/typescript/r/lightsail_bucket_access_key.html.markdown index 2fb118b2666e..3d12537c5d14 100644 --- a/website/docs/cdktf/typescript/r/lightsail_bucket_access_key.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_bucket_access_key.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `bucketName` - (Required) Name of the bucket that the access key will belong to and grant access to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -93,4 +94,4 @@ Using `terraform import`, import `aws_lightsail_bucket_access_key` using the `id % terraform import aws_lightsail_bucket_access_key.example example-bucket,AKIAIOSFODNN7EXAMPLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_bucket_resource_access.html.markdown b/website/docs/cdktf/typescript/r/lightsail_bucket_resource_access.html.markdown index a7bb852c3aeb..594ae3968de5 100644 --- a/website/docs/cdktf/typescript/r/lightsail_bucket_resource_access.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_bucket_resource_access.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `bucketName` - (Required) Name of the bucket to grant access to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceName` - (Required) Name of the resource to grant bucket access. ## Attribute Reference @@ -101,4 +102,4 @@ Using `terraform import`, import `aws_lightsail_bucket_resource_access` using th % terraform import aws_lightsail_bucket_resource_access.example example-bucket,example-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_certificate.html.markdown b/website/docs/cdktf/typescript/r/lightsail_certificate.html.markdown index 376d12de7dda..fe6d704cfcf7 100644 --- a/website/docs/cdktf/typescript/r/lightsail_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_certificate.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: * `domainName` - (Optional) Domain name for which the certificate should be issued. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subjectAlternativeNames` - (Optional) Set of domains that should be SANs in the issued certificate. `domainName` attribute is automatically added as a Subject Alternative Name. * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. @@ -94,4 +95,4 @@ Using `terraform import`, import `aws_lightsail_certificate` using the certifica % terraform import aws_lightsail_certificate.example example-certificate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown b/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown index 5f70707c0c30..d3a54989901e 100644 --- a/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown @@ -172,6 +172,7 @@ The following arguments are optional: * `isDisabled` - (Optional) Whether to disable the container service. Defaults to `false`. * `privateRegistryAccess` - (Optional) Configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry (Amazon ECR) private repositories. [See below](#private-registry-access). * `publicDomainNames` - (Optional) Public domain names to use with the container service, such as example.com and www.example.com. You can specify up to four public domain names for a container service. The domain names that you specify are used when you create a deployment with a container configured as the public endpoint of your container service. If you don't specify public domain names, then you can use the default domain of the container service. [See below](#public-domain-names). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ### Private Registry Access @@ -254,4 +255,4 @@ Using `terraform import`, import Lightsail Container Service using the `name`. F % terraform import aws_lightsail_container_service.example container-service-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_container_service_deployment_version.html.markdown b/website/docs/cdktf/typescript/r/lightsail_container_service_deployment_version.html.markdown index 05f20fa0b9be..002ae7a0c69f 100644 --- a/website/docs/cdktf/typescript/r/lightsail_container_service_deployment_version.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_container_service_deployment_version.html.markdown @@ -75,6 +75,7 @@ The following arguments are required: The following arguments are optional: * `publicEndpoint` - (Optional) Configuration block that describes the settings of the public endpoint for the container service. [See below](#public_endpoint). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `container` @@ -152,4 +153,4 @@ Using `terraform import`, import Lightsail Container Service Deployment Version % terraform import aws_lightsail_container_service_deployment_version.example container-service-1/1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_database.html.markdown b/website/docs/cdktf/typescript/r/lightsail_database.html.markdown index 3b9fad69259d..fdacc78d3132 100644 --- a/website/docs/cdktf/typescript/r/lightsail_database.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_database.html.markdown @@ -188,6 +188,7 @@ The following arguments are optional: * `preferredBackupWindow` - (Optional) Daily time range during which automated backups are created for your database if automated backups are enabled. Must be in the hh24:mi-hh24:mi format. Example: `16:00-16:30`. Specified in Coordinated Universal Time (UTC). * `preferredMaintenanceWindow` - (Optional) Weekly time range during which system maintenance can occur on your database. Must be in the ddd:hh24:mi-ddd:hh24:mi format. Specified in Coordinated Universal Time (UTC). Example: `Tue:17:00-Tue:17:30` * `publiclyAccessible` - (Optional) Whether the database is accessible to resources outside of your Lightsail account. A value of true specifies a database that is available to resources outside of your Lightsail account. A value of false specifies a database that is available only to your Lightsail resources in the same region as your database. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `skipFinalSnapshot` - (Optional) Whether a final database snapshot is created before your database is deleted. If true is specified, no database snapshot is created. If false is specified, a database snapshot is created before your database is deleted. You must specify the final relational database snapshot name parameter if the skip final snapshot parameter is false. * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. @@ -289,4 +290,4 @@ Using `terraform import`, import Lightsail Databases using their name. For examp % terraform import aws_lightsail_database.example example-database ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_disk.html.markdown b/website/docs/cdktf/typescript/r/lightsail_disk.html.markdown index a11e8bbc4e23..79c6a35801ed 100644 --- a/website/docs/cdktf/typescript/r/lightsail_disk.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_disk.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_lightsail_disk` using the name attribute. % terraform import aws_lightsail_disk.example example-disk ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_disk_attachment.html.markdown b/website/docs/cdktf/typescript/r/lightsail_disk_attachment.html.markdown index d22e96ce1714..404c135f865c 100644 --- a/website/docs/cdktf/typescript/r/lightsail_disk_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_disk_attachment.html.markdown @@ -80,6 +80,7 @@ This resource supports the following arguments: * `diskName` - (Required) Name of the Lightsail disk. * `diskPath` - (Required) Disk path to expose to the instance. * `instanceName` - (Required) Name of the Lightsail instance to attach to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -119,4 +120,4 @@ Using `terraform import`, import `aws_lightsail_disk_attachment` using the id at % terraform import aws_lightsail_disk_attachment.example example-disk,example-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_distribution.html.markdown b/website/docs/cdktf/typescript/r/lightsail_distribution.html.markdown index 9107580c9584..d2b64792af35 100644 --- a/website/docs/cdktf/typescript/r/lightsail_distribution.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_distribution.html.markdown @@ -243,6 +243,7 @@ The following arguments are optional: * `certificateName` - (Optional) Name of the SSL/TLS certificate attached to the distribution. * `ipAddressType` - (Optional) IP address type of the distribution. Valid values: `dualstack`, `ipv4`. Default: `dualstack`. * `isEnabled` - (Optional) Whether the distribution is enabled. Default: `true`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags for the Lightsail Distribution. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### cache_behavior @@ -347,4 +348,4 @@ Using `terraform import`, import Lightsail Distribution using the `name`. For ex % terraform import aws_lightsail_distribution.example example-distribution ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_domain.html.markdown b/website/docs/cdktf/typescript/r/lightsail_domain.html.markdown index 0f4018b87381..40dedcf639d4 100644 --- a/website/docs/cdktf/typescript/r/lightsail_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_domain.html.markdown @@ -44,6 +44,10 @@ The following arguments are required: * `domainName` - (Required) Name of the Lightsail domain to manage. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -51,4 +55,4 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the Lightsail domain. * `id` - Name used for this domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_domain_entry.html.markdown b/website/docs/cdktf/typescript/r/lightsail_domain_entry.html.markdown index c0636cbfdcbb..4207b328e380 100644 --- a/website/docs/cdktf/typescript/r/lightsail_domain_entry.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_domain_entry.html.markdown @@ -61,6 +61,7 @@ The following arguments are required: The following arguments are optional: * `isAlias` - (Optional) Whether the entry should be an alias. Default: `false`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -100,4 +101,4 @@ Using `terraform import`, import Lightsail Domain Entry using the id attribute. % terraform import aws_lightsail_domain_entry.example www,example.com,A,127.0.0.1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_instance.html.markdown b/website/docs/cdktf/typescript/r/lightsail_instance.html.markdown index 61ad4dc8f60e..e70d979d38a6 100644 --- a/website/docs/cdktf/typescript/r/lightsail_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_instance.html.markdown @@ -121,6 +121,7 @@ The following arguments are optional: * `addOn` - (Optional) Add-on configuration for the instance. [See below](#add_on). * `ipAddressType` - (Optional) IP address type of the Lightsail Instance. Valid values: `dualstack`, `ipv4`, `ipv6`. Default: `dualstack`. * `keyPairName` - (Optional) Name of your key pair. Created in the Lightsail console (cannot use `aws_key_pair` at this time). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `userData` - (Optional) Single lined launch script as a string to configure server with additional user data. @@ -176,4 +177,4 @@ Using `terraform import`, import Lightsail Instances using their name. For examp % terraform import aws_lightsail_instance.example 'example' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_instance_public_ports.html.markdown b/website/docs/cdktf/typescript/r/lightsail_instance_public_ports.html.markdown index 1f302813f054..25f09859d68f 100644 --- a/website/docs/cdktf/typescript/r/lightsail_instance_public_ports.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_instance_public_ports.html.markdown @@ -75,22 +75,23 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `instanceName` - (Required) Name of the Lightsail Instance. -* `portInfo` - (Required) Configuration block with port information. AWS closes all currently open ports that are not included in the `portInfo`. [See below](#port_info). +* `instanceName` - (Required) Name of the instance for which to open ports. +* `portInfo` - (Required) Descriptor of the ports to open for the specified instance. AWS closes all currently open ports that are not included in this argument. See [`portInfo` Block](#port_info-block) for details. -### port_info +The following arguments are optional: -The following arguments are required: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `fromPort` - (Required) First port in a range of open ports on an instance. -* `protocol` - (Required) IP protocol name. Valid values: `tcp`, `all`, `udp`, `icmp`. -* `toPort` - (Required) Last port in a range of open ports on an instance. +### `portInfo` Block -The following arguments are optional: +The `portInfo` configuration block supports the following arguments: +* `fromPort` - (Required) First port in a range of open ports on an instance. See [PortInfo](https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_PortInfo.html) for details. +* `protocol` - (Required) IP protocol name. Valid values: `tcp`, `all`, `udp`, `icmp`, `icmpv6`. See [PortInfo](https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_PortInfo.html) for details. +* `toPort` - (Required) Last port in a range of open ports on an instance. See [PortInfo](https://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_PortInfo.html) for details. * `cidrListAliases` - (Optional) Set of CIDR aliases that define access for a preconfigured range of IP addresses. -* `cidrs` - (Optional) Set of CIDR blocks. -* `ipv6Cidrs` - (Optional) Set of IPv6 CIDR blocks. +* `cidrs` - (Optional) Set of IPv4 addresses or ranges of IPv4 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. +* `ipv6Cidrs` - (Optional) Set of IPv6 addresses or ranges of IPv6 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. ## Attribute Reference @@ -98,4 +99,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - ID of the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_key_pair.html.markdown b/website/docs/cdktf/typescript/r/lightsail_key_pair.html.markdown index 3ccb6941345d..eda566755a24 100644 --- a/website/docs/cdktf/typescript/r/lightsail_key_pair.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_key_pair.html.markdown @@ -92,6 +92,7 @@ The following arguments are optional: * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `pgpKey` - (Optional) PGP key to encrypt the resulting private key material. Only used when creating a new key pair. * `publicKey` - (Optional) Public key material. This public key will be imported into Lightsail. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ~> **Note:** A PGP key is not required, however it is strongly encouraged. Without a PGP key, the private key material will be stored in state unencrypted. `pgpKey` is ignored if `publicKey` is supplied. @@ -113,4 +114,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import Lightsail Key Pairs because the private and public key are only available on initial creation. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_lb.html.markdown b/website/docs/cdktf/typescript/r/lightsail_lb.html.markdown index e28c3c84fcfc..e40315efa047 100644 --- a/website/docs/cdktf/typescript/r/lightsail_lb.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_lb.html.markdown @@ -52,6 +52,7 @@ The following arguments are optional: * `healthCheckPath` - (Optional) Health check path of the load balancer. Default value `/`. * `ipAddressType` - (Optional) IP address type of the load balancer. Valid values: `dualstack`, `ipv4`. Default value `dualstack`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the resource. To create a key-only tag, use an empty string as the value. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -99,4 +100,4 @@ Using `terraform import`, import `aws_lightsail_lb` using the name attribute. Fo % terraform import aws_lightsail_lb.example example-load-balancer ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_lb_attachment.html.markdown b/website/docs/cdktf/typescript/r/lightsail_lb_attachment.html.markdown index fe44b7ef75f5..15d5dc0a6dbf 100644 --- a/website/docs/cdktf/typescript/r/lightsail_lb_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_lb_attachment.html.markdown @@ -82,6 +82,10 @@ The following arguments are required: * `instanceName` - (Required) Name of the instance to attach to the load balancer. * `lbName` - (Required) Name of the Lightsail load balancer. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -120,4 +124,4 @@ Using `terraform import`, import `aws_lightsail_lb_attachment` using the name at % terraform import aws_lightsail_lb_attachment.example example-load-balancer,example-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_lb_certificate.html.markdown b/website/docs/cdktf/typescript/r/lightsail_lb_certificate.html.markdown index 54de1b66e97b..2d2e88edbcc8 100644 --- a/website/docs/cdktf/typescript/r/lightsail_lb_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_lb_certificate.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subjectAlternativeNames` - (Optional) Set of domains that should be SANs in the issued certificate. `domainName` attribute is automatically added as a Subject Alternative Name. ## Attribute Reference @@ -107,4 +108,4 @@ Using `terraform import`, import `aws_lightsail_lb_certificate` using the id att % terraform import aws_lightsail_lb_certificate.example example-load-balancer,example-load-balancer-certificate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_lb_certificate_attachment.html.markdown b/website/docs/cdktf/typescript/r/lightsail_lb_certificate_attachment.html.markdown index d5afda7c4465..ac21de77fdfa 100644 --- a/website/docs/cdktf/typescript/r/lightsail_lb_certificate_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_lb_certificate_attachment.html.markdown @@ -68,6 +68,10 @@ The following arguments are required: * `certificateName` - (Required) Name of your SSL/TLS certificate. * `lbName` - (Required) Name of the load balancer to which you want to associate the SSL/TLS certificate. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -106,4 +110,4 @@ Using `terraform import`, import `aws_lightsail_lb_certificate_attachment` using % terraform import aws_lightsail_lb_certificate_attachment.example example-load-balancer,example-certificate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_lb_https_redirection_policy.html.markdown b/website/docs/cdktf/typescript/r/lightsail_lb_https_redirection_policy.html.markdown index f99b17c6aa78..e49d70248961 100644 --- a/website/docs/cdktf/typescript/r/lightsail_lb_https_redirection_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_lb_https_redirection_policy.html.markdown @@ -76,6 +76,10 @@ The following arguments are required: * `enabled` - (Required) Whether to enable HTTP to HTTPS redirection. `true` to activate HTTP to HTTPS redirection or `false` to deactivate HTTP to HTTPS redirection. * `lbName` - (Required) Name of the load balancer to which you want to enable HTTP to HTTPS redirection. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -114,4 +118,4 @@ Using `terraform import`, import `aws_lightsail_lb_https_redirection_policy` usi % terraform import aws_lightsail_lb_https_redirection_policy.example example-load-balancer ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_lb_stickiness_policy.html.markdown b/website/docs/cdktf/typescript/r/lightsail_lb_stickiness_policy.html.markdown index 99f554a2e8dd..77e9562b1f5e 100644 --- a/website/docs/cdktf/typescript/r/lightsail_lb_stickiness_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_lb_stickiness_policy.html.markdown @@ -58,6 +58,10 @@ The following arguments are required: * `enabled` - (Required) Whether to enable session stickiness for the load balancer. * `lbName` - (Required) Name of the load balancer to which you want to enable session stickiness. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -96,4 +100,4 @@ Using `terraform import`, import `aws_lightsail_lb_stickiness_policy` using the % terraform import aws_lightsail_lb_stickiness_policy.example example-load-balancer ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_static_ip.html.markdown b/website/docs/cdktf/typescript/r/lightsail_static_ip.html.markdown index 0318f47870e9..92f08793b6ae 100644 --- a/website/docs/cdktf/typescript/r/lightsail_static_ip.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_static_ip.html.markdown @@ -44,6 +44,10 @@ The following arguments are required: * `name` - (Required) Name for the allocated static IP. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -80,4 +84,4 @@ Using `terraform import`, import `aws_lightsail_static_ip` using the name attrib % terraform import aws_lightsail_static_ip.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_static_ip_attachment.html.markdown b/website/docs/cdktf/typescript/r/lightsail_static_ip_attachment.html.markdown index 531dc5e03de7..4c40bcc83659 100644 --- a/website/docs/cdktf/typescript/r/lightsail_static_ip_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_static_ip_attachment.html.markdown @@ -66,6 +66,10 @@ The following arguments are required: * `instanceName` - (Required) Name of the Lightsail instance to attach the IP to. * `staticIpName` - (Required) Name of the allocated static IP. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -104,4 +108,4 @@ Using `terraform import`, import `aws_lightsail_static_ip_attachment` using the % terraform import aws_lightsail_static_ip_attachment.example example-static-ip ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/load_balancer_backend_server_policy.html.markdown b/website/docs/cdktf/typescript/r/load_balancer_backend_server_policy.html.markdown index e35e09e86fdd..c1ccaffb8e30 100644 --- a/website/docs/cdktf/typescript/r/load_balancer_backend_server_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/load_balancer_backend_server_policy.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loadBalancerName` - (Required) The load balancer to attach the policy to. * `policyNames` - (Required) List of Policy Names to apply to the backend server. * `instancePort` - (Required) The instance port to apply the policy to. @@ -101,4 +102,4 @@ This resource exports the following attributes in addition to the arguments abov * `loadBalancerName` - The load balancer on which the policy is defined. * `instancePort` - The backend port the policies are applied to - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/load_balancer_listener_policy.html.markdown b/website/docs/cdktf/typescript/r/load_balancer_listener_policy.html.markdown index f92bd5922962..0f73e0bab617 100644 --- a/website/docs/cdktf/typescript/r/load_balancer_listener_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/load_balancer_listener_policy.html.markdown @@ -134,6 +134,7 @@ This example shows how to add a [Predefined Security Policy for ELBs](https://do This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loadBalancerName` - (Required) The load balancer to attach the policy to. * `loadBalancerPort` - (Required) The load balancer listener port to apply the policy to. * `policyNames` - (Required) List of Policy Names to apply to the backend server. @@ -147,4 +148,4 @@ This resource exports the following attributes in addition to the arguments abov * `loadBalancerName` - The load balancer on which the policy is defined. * `loadBalancerPort` - The load balancer listener port the policies are applied to - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/load_balancer_policy.html.markdown b/website/docs/cdktf/typescript/r/load_balancer_policy.html.markdown index 313a8bd02267..bfdd2cb5488a 100644 --- a/website/docs/cdktf/typescript/r/load_balancer_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/load_balancer_policy.html.markdown @@ -121,6 +121,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loadBalancerName` - (Required) The load balancer on which the policy is defined. * `policyName` - (Required) The name of the load balancer policy. * `policyTypeName` - (Required) The policy type. @@ -135,4 +136,4 @@ This resource exports the following attributes in addition to the arguments abov * `policyTypeName` - The policy type of the policy. * `loadBalancerName` - The load balancer on which the policy is defined. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/location_geofence_collection.html.markdown b/website/docs/cdktf/typescript/r/location_geofence_collection.html.markdown index d91cdc5e77a8..5d125dff33d6 100644 --- a/website/docs/cdktf/typescript/r/location_geofence_collection.html.markdown +++ b/website/docs/cdktf/typescript/r/location_geofence_collection.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The optional description for the geofence collection. * `kmsKeyId` - (Optional) A key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource. * `tags` - (Optional) Key-value tags for the geofence collection. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -94,4 +95,4 @@ Using `terraform import`, import Location Geofence Collection using the `collect % terraform import aws_location_geofence_collection.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/location_map.html.markdown b/website/docs/cdktf/typescript/r/location_map.html.markdown index 28ce8558aca7..b2d59f3e9cac 100644 --- a/website/docs/cdktf/typescript/r/location_map.html.markdown +++ b/website/docs/cdktf/typescript/r/location_map.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) An optional description for the map resource. * `tags` - (Optional) Key-value tags for the map. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_location_map` resources using the map name % terraform import aws_location_map.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/location_place_index.html.markdown b/website/docs/cdktf/typescript/r/location_place_index.html.markdown index 8b800071c9b7..bd4ae2f3090a 100644 --- a/website/docs/cdktf/typescript/r/location_place_index.html.markdown +++ b/website/docs/cdktf/typescript/r/location_place_index.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dataSourceConfiguration` - (Optional) Configuration block with the data storage option chosen for requesting Places. Detailed below. * `description` - (Optional) The optional description for the place index resource. * `tags` - (Optional) Key-value tags for the place index. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -52,6 +53,7 @@ The following arguments are optional: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `intendedUse` - (Optional) Specifies how the results of an operation will be stored by the caller. Valid values: `SingleUse`, `Storage`. Default: `SingleUse`. ## Attribute Reference @@ -91,4 +93,4 @@ Using `terraform import`, import `aws_location_place_index` resources using the % terraform import aws_location_place_index.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/location_route_calculator.html.markdown b/website/docs/cdktf/typescript/r/location_route_calculator.html.markdown index c8473d552937..c6be9731cdc1 100644 --- a/website/docs/cdktf/typescript/r/location_route_calculator.html.markdown +++ b/website/docs/cdktf/typescript/r/location_route_calculator.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The optional description for the route calculator resource. * `tags` - (Optional) Key-value tags for the route calculator. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_location_route_calculator` using the route % terraform import aws_location_route_calculator.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/location_tracker.html.markdown b/website/docs/cdktf/typescript/r/location_tracker.html.markdown index 0cca7a4e29a5..e616cf8f336e 100644 --- a/website/docs/cdktf/typescript/r/location_tracker.html.markdown +++ b/website/docs/cdktf/typescript/r/location_tracker.html.markdown @@ -42,6 +42,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The optional description for the tracker resource. * `kmsKeyId` - (Optional) A key identifier for an AWS KMS customer managed key assigned to the Amazon Location resource. * `positionFiltering` - (Optional) The position filtering method of the tracker resource. Valid values: `TimeBased`, `DistanceBased`, `AccuracyBased`. Default: `TimeBased`. @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_location_tracker` resources using the trac % terraform import aws_location_tracker.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/location_tracker_association.html.markdown b/website/docs/cdktf/typescript/r/location_tracker_association.html.markdown index b0ade03c9012..70730c775a49 100644 --- a/website/docs/cdktf/typescript/r/location_tracker_association.html.markdown +++ b/website/docs/cdktf/typescript/r/location_tracker_association.html.markdown @@ -53,8 +53,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `consumerArn` - (Required) The Amazon Resource Name (ARN) for the geofence collection to be associated to tracker resource. Used when you need to specify a resource across all AWS. * `trackerName` - (Required) The name of the tracker resource to be associated with a geofence collection. @@ -101,4 +102,4 @@ Using `terraform import`, import Location Tracker Association using the `tracker % terraform import aws_location_tracker_association.example "tracker_name|consumer_arn" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/m2_application.html.markdown b/website/docs/cdktf/typescript/r/m2_application.html.markdown index f704d0fba107..527018dc5a2b 100644 --- a/website/docs/cdktf/typescript/r/m2_application.html.markdown +++ b/website/docs/cdktf/typescript/r/m2_application.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `definition` - (Optional) The application definition for this application. You can specify either inline JSON or an S3 bucket location. * `kmsKeyId` - (Optional) KMS Key to use for the Application. * `roleArn` - (Optional) ARN of role for application to use to access AWS resources. @@ -65,6 +66,7 @@ This argument is processed in [attribute-as-blocks mode](https://www.terraform.i The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `content` - (Optional) JSON application definition. Either this or `s3Location` must be specified. * `s3Location` - (Optional) Location of the application definition in S3. Either this or `content` must be specified. @@ -117,4 +119,4 @@ Using `terraform import`, import Mainframe Modernization Application using the ` % terraform import aws_m2_application.example 01234567890abcdef012345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/m2_deployment.html.markdown b/website/docs/cdktf/typescript/r/m2_deployment.html.markdown index 7061738bd7ed..d264c583e8f8 100644 --- a/website/docs/cdktf/typescript/r/m2_deployment.html.markdown +++ b/website/docs/cdktf/typescript/r/m2_deployment.html.markdown @@ -40,8 +40,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `environmentId` - (Required) Environment to deploy application to. * `applicationId` - (Required) Application to deploy. * `applicationVersion` - (Required) Version to application to deploy @@ -91,4 +92,4 @@ Using `terraform import`, import Mainframe Modernization Deployment using the `A % terraform import aws_m2_deployment.example APPLICATION-ID,DEPLOYMENT-ID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/m2_environment.html.markdown b/website/docs/cdktf/typescript/r/m2_environment.html.markdown index 73a5a5f12c8a..e2542c3ff389 100644 --- a/website/docs/cdktf/typescript/r/m2_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/m2_environment.html.markdown @@ -152,6 +152,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `engineVersion` - (Optional) The specific version of the engine for the Environment. * `forceUpdate` - (Optional) Force update the environment even if applications are running. * `kmsKeyId` - (Optional) ARN of the KMS key to use for the Environment. @@ -238,4 +239,4 @@ Using `terraform import`, import Mainframe Modernization Environment using the ` % terraform import aws_m2_environment.example 01234567890abcdef012345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_account.html.markdown b/website/docs/cdktf/typescript/r/macie2_account.html.markdown index b7e974c3ace2..31a7644f183e 100644 --- a/website/docs/cdktf/typescript/r/macie2_account.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_account.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `findingPublishingFrequency` - (Optional) Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). Valid values are `FIFTEEN_MINUTES`, `ONE_HOUR` or `SIX_HOURS`. * `status` - (Optional) Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`. @@ -79,4 +80,4 @@ Using `terraform import`, import `aws_macie2_account` using the id. For example: % terraform import aws_macie2_account.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_classification_export_configuration.html.markdown b/website/docs/cdktf/typescript/r/macie2_classification_export_configuration.html.markdown index 011f262c14e1..d2de233fd84b 100644 --- a/website/docs/cdktf/typescript/r/macie2_classification_export_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_classification_export_configuration.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `s3Destination` - (Required) Configuration block for a S3 Destination. Defined below ### s3_destination Configuration Block @@ -70,7 +71,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_macie2_classification_export_configuration` using the account ID and region. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_macie2_classification_export_configuration` using the region. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -87,17 +88,17 @@ class MyConvertedCode extends TerraformStack { Macie2ClassificationExportConfiguration.generateConfigForImport( this, "example", - "123456789012:us-west-2" + "us-west-2" ); } } ``` -Using `terraform import`, import `aws_macie2_classification_export_configuration` using the account ID and region. For example: +Using `terraform import`, import `aws_macie2_classification_export_configuration` using the region. For example: ```console -% terraform import aws_macie2_classification_export_configuration.example 123456789012:us-west-2 +% terraform import aws_macie2_classification_export_configuration.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_classification_job.html.markdown b/website/docs/cdktf/typescript/r/macie2_classification_job.html.markdown index 156892a3fcdc..c0e93d1a9b20 100644 --- a/website/docs/cdktf/typescript/r/macie2_classification_job.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_classification_job.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `scheduleFrequency` - (Optional) The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value for the `jobType` property to `ONE_TIME`. (documented below) * `customDataIdentifierIds` - (Optional) The custom data identifiers to use for data analysis and classification. * `samplingPercentage` - (Optional) The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects. @@ -185,4 +186,4 @@ Using `terraform import`, import `aws_macie2_classification_job` using the id. F % terraform import aws_macie2_classification_job.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_custom_data_identifier.html.markdown b/website/docs/cdktf/typescript/r/macie2_custom_data_identifier.html.markdown index 31601987712b..4636b7602b8b 100644 --- a/website/docs/cdktf/typescript/r/macie2_custom_data_identifier.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_custom_data_identifier.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `regex` - (Optional) The regular expression (regex) that defines the pattern to match. The expression can contain as many as 512 characters. * `keywords` - (Optional) An array that lists specific character sequences (keywords), one of which must be within proximity (`maximumMatchDistance`) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3 - 90 characters. Keywords aren't case sensitive. * `ignoreWords` - (Optional) An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters. Ignore words are case sensitive. @@ -103,4 +104,4 @@ Using `terraform import`, import `aws_macie2_custom_data_identifier` using the i % terraform import aws_macie2_custom_data_identifier.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_findings_filter.html.markdown b/website/docs/cdktf/typescript/r/macie2_findings_filter.html.markdown index 13566e25f97e..bb2f50d2a418 100644 --- a/website/docs/cdktf/typescript/r/macie2_findings_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_findings_filter.html.markdown @@ -35,7 +35,7 @@ class MyConvertedCode extends TerraformStack { findingCriteria: { criterion: [ { - eq: [Token.asString(current.name)], + eq: [Token.asString(current.region)], field: "region", }, ], @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `findingCriteria` - (Required) The criteria to use to filter findings. * `name` - (Optional) A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -111,4 +112,4 @@ Using `terraform import`, import `aws_macie2_findings_filter` using the id. For % terraform import aws_macie2_findings_filter.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_invitation_accepter.html.markdown b/website/docs/cdktf/typescript/r/macie2_invitation_accepter.html.markdown index b53c9bdfa9f7..c7e0b7e309ee 100644 --- a/website/docs/cdktf/typescript/r/macie2_invitation_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_invitation_accepter.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `administratorAccountId` - (Required) The AWS account ID for the account that sent the invitation. ## Attribute Reference @@ -102,4 +103,4 @@ Using `terraform import`, import `aws_macie2_invitation_accepter` using the admi % terraform import aws_macie2_invitation_accepter.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_member.html.markdown b/website/docs/cdktf/typescript/r/macie2_member.html.markdown index 17132f8dbd32..b52d02c644d3 100644 --- a/website/docs/cdktf/typescript/r/macie2_member.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_member.html.markdown @@ -47,9 +47,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) The AWS account ID for the account. * `email` - (Required) The email address for the account. -* `tags` - (Optional) A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie. * `status` - (Optional) Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`. * `invite` - (Optional) Send an invitation to a member * `invitationMessage` - (Optional) A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation. @@ -96,4 +96,4 @@ Using `terraform import`, import `aws_macie2_member` using the account ID of the % terraform import aws_macie2_member.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_organization_admin_account.html.markdown b/website/docs/cdktf/typescript/r/macie2_organization_admin_account.html.markdown index 7407fd466555..166001a38200 100644 --- a/website/docs/cdktf/typescript/r/macie2_organization_admin_account.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_organization_admin_account.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `adminAccountId` - (Required) The AWS account ID for the account to designate as the delegated Amazon Macie administrator account for the organization. ## Attribute Reference @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_macie2_organization_admin_account` using t % terraform import aws_macie2_organization_admin_account.example abcd1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/macie2_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/macie2_organization_configuration.html.markdown index d2ceb484a74f..2d06889b49d1 100644 --- a/website/docs/cdktf/typescript/r/macie2_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/macie2_organization_configuration.html.markdown @@ -38,10 +38,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoEnable` - (Required) Whether to enable Amazon Macie automatically for accounts that are added to the organization in AWS Organizations. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/main_route_table_association.html.markdown b/website/docs/cdktf/typescript/r/main_route_table_association.html.markdown index 43a989700cba..814e8faae0d5 100644 --- a/website/docs/cdktf/typescript/r/main_route_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/main_route_table_association.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The ID of the VPC whose main route table should be set * `routeTableId` - (Required) The ID of the Route Table to set as the new main route table for the target VPC @@ -74,4 +75,4 @@ the `main_route_table_association` delete to work properly. [tf-route-tables]: /docs/providers/aws/r/route_table.html [tf-default-route-table]: /docs/providers/aws/r/default_route_table.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/media_convert_queue.html.markdown b/website/docs/cdktf/typescript/r/media_convert_queue.html.markdown index 3e01664e8587..631b88a277f2 100644 --- a/website/docs/cdktf/typescript/r/media_convert_queue.html.markdown +++ b/website/docs/cdktf/typescript/r/media_convert_queue.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique identifier describing the queue -* `concurrent_jobs` - (Optional) The maximum number of jobs your queue can process concurrently. For on-demand queues, the value you enter is constrained by your service quotas for Maximum concurrent jobs, per on-demand queue and Maximum concurrent jobs, per account. For reserved queues, specify the number of jobs you can process concurrently in your reservation plan instead. +* `concurrentJobs` - (Optional) The maximum number of jobs your queue can process concurrently. For on-demand queues, the value you enter is constrained by your service quotas for Maximum concurrent jobs, per on-demand queue and Maximum concurrent jobs, per account. For reserved queues, specify the number of jobs you can process concurrently in your reservation plan instead. * `description` - (Optional) A description of the queue * `pricingPlan` - (Optional) Specifies whether the pricing plan for the queue is on-demand or reserved. Valid values are `ON_DEMAND` or `RESERVED`. Default to `ON_DEMAND`. * `reservationPlanSettings` - (Optional) A detail pricing plan of the reserved queue. See below. @@ -90,4 +91,4 @@ Using `terraform import`, import Media Convert Queue using the queue name. For e % terraform import aws_media_convert_queue.test tf-test-queue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/media_package_channel.html.markdown b/website/docs/cdktf/typescript/r/media_package_channel.html.markdown index 13bbe8bc76ba..4f7802c12e3f 100644 --- a/website/docs/cdktf/typescript/r/media_package_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/media_package_channel.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `channelId` - (Required) A unique identifier describing the channel * `description` - (Optional) A description of the channel * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -88,4 +89,4 @@ Using `terraform import`, import Media Package Channels using the channel ID. Fo % terraform import aws_media_package_channel.kittens kittens-channel ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/media_packagev2_channel_group.html.markdown b/website/docs/cdktf/typescript/r/media_packagev2_channel_group.html.markdown index e4f029fcfc10..c03763a1124b 100644 --- a/website/docs/cdktf/typescript/r/media_packagev2_channel_group.html.markdown +++ b/website/docs/cdktf/typescript/r/media_packagev2_channel_group.html.markdown @@ -22,7 +22,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { MediaPackagev2ChannelGroup } from "./.gen/providers/aws/"; +import { MediaPackagev2ChannelGroup } from "./.gen/providers/aws/media-packagev2-channel-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A unique identifier naming the channel group * `description` - (Optional) A description of the channel group * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -49,7 +50,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The ARN of the channel * `description` - The same as `description` -* `egress_domain` - The egress domain of the channel group +* `egressDomain` - The egress domain of the channel group * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -64,7 +65,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { MediaPackagev2ChannelGroup } from "./.gen/providers/aws/"; +import { MediaPackagev2ChannelGroup } from "./.gen/providers/aws/media-packagev2-channel-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -84,4 +85,4 @@ Using `terraform import`, import Elemental MediaPackage Version 2 Channel Group % terraform import aws_media_packagev2_channel_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/media_store_container.html.markdown b/website/docs/cdktf/typescript/r/media_store_container.html.markdown index 54ff64828fe1..c44b7b9041dc 100644 --- a/website/docs/cdktf/typescript/r/media_store_container.html.markdown +++ b/website/docs/cdktf/typescript/r/media_store_container.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a MediaStore Container. +!> **WARNING:** _This resource is deprecated and will be removed in a future version._ AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective **November 13, 2025**. Users should begin transitioning to alternative solutions as soon as possible. For **simple live streaming workflows**, AWS recommends migrating to **Amazon S3**. For **advanced use cases** that require features such as packaging, DRM, or cross-region redundancy, consider using **AWS Elemental MediaPackage**. + ## Example Usage ```typescript @@ -38,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the container. Must contain alphanumeric characters or underscores. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -77,4 +80,4 @@ Using `terraform import`, import MediaStore Container using the MediaStore Conta % terraform import aws_media_store_container.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/media_store_container_policy.html.markdown b/website/docs/cdktf/typescript/r/media_store_container_policy.html.markdown index 21a1e8eedf47..1ac452e38df2 100644 --- a/website/docs/cdktf/typescript/r/media_store_container_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/media_store_container_policy.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a MediaStore Container Policy. +!> **WARNING:** _This resource is deprecated and will be removed in a future version._ AWS has [announced](https://aws.amazon.com/blogs/media/support-for-aws-elemental-mediastore-ending-soon/) the discontinuation of AWS Elemental MediaStore, effective **November 13, 2025**. Users should begin transitioning to alternative solutions as soon as possible. For **simple live streaming workflows**, AWS recommends migrating to **Amazon S3**. For **advanced use cases** that require features such as packaging, DRM, or cross-region redundancy, consider using **AWS Elemental MediaPackage**. + ~> **NOTE:** We suggest using [`jsonencode()`](https://developer.hashicorp.com/terraform/language/functions/jsonencode) or [`aws_iam_policy_document`](/docs/providers/aws/d/iam_policy_document.html) when assigning a value to `policy`. They seamlessly translate Terraform language into JSON, enabling you to maintain consistency within your configuration without the need for context switches. Also, you can sidestep potential complications arising from formatting discrepancies, whitespace inconsistencies, and other nuances inherent to JSON. ## Example Usage @@ -62,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ], resources: [ "arn:aws:mediastore:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:container/${" + @@ -95,6 +97,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `containerName` - (Required) The name of the container. * `policy` - (Required) The contents of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -134,4 +137,4 @@ Using `terraform import`, import MediaStore Container Policy using the MediaStor % terraform import aws_media_store_container_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/medialive_channel.html.markdown b/website/docs/cdktf/typescript/r/medialive_channel.html.markdown index 018db8c08010..a63bf8813797 100644 --- a/website/docs/cdktf/typescript/r/medialive_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/medialive_channel.html.markdown @@ -123,6 +123,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cdiInputSpecification` - (Optional) Specification of CDI inputs for this channel. See [CDI Input Specification](#cdi-input-specification) for more details. * `inputAttachments` - (Optional) Input attachments for the channel. See [Input Attachments](#input-attachments) for more details. * `logLevel` - (Optional) The log level to write to Cloudwatch logs. @@ -247,7 +248,7 @@ The following arguments are optional: ### SCTE 20 Source Settings -* `convert608To708` – (Optional) If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. +* `convert608To708` - (Optional) If upconvert, 608 data is both passed through via the “608 compatibility bytes” fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded. * `source608ChannelNumber` - (Optional) Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough. ### SCTE 27 Source Settings @@ -596,62 +597,62 @@ The following arguments are optional: * `embeddedPlusScte20DestinationSettings` - (Optional) Embedded Plus SCTE20 Destination Settings. * `rtmpCaptionInfoDestinationSettings` - (Optional) RTMP Caption Info Destination Settings. * `scte20PlusEmbeddedDestinationSettings` - (Optional) SCTE20 Plus Embedded Destination Settings. -* `scte27DestinationSettings` – (Optional) SCTE27 Destination Settings. -* `smpteTtDestinationSettings` – (Optional) SMPTE TT Destination Settings. -* `teletextDestinationSettings` – (Optional) Teletext Destination Settings. -* `ttmlDestinationSettings` – (Optional) TTML Destination Settings. See [TTML Destination Settings](#ttml-destination-settings) for more details. +* `scte27DestinationSettings` - (Optional) SCTE27 Destination Settings. +* `smpteTtDestinationSettings` - (Optional) SMPTE TT Destination Settings. +* `teletextDestinationSettings` - (Optional) Teletext Destination Settings. +* `ttmlDestinationSettings` - (Optional) TTML Destination Settings. See [TTML Destination Settings](#ttml-destination-settings) for more details. * `webvttDestinationSettings` - (Optional) WebVTT Destination Settings. See [WebVTT Destination Settings](#webvtt-destination-settings) for more details. ### Burn In Destination Settings -* `alignment` – (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. -* `backgroundColor` – (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. -* `backgroundOpacity` – (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `font` – (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. -* `fontColor` – (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `fontOpacity` – (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. -* `fontResolution` – (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. -* `fontSize` – (Optional) When set to ‘auto’ fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. -* `outlineColor` – (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `outlineSize` – (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `shadowColor` – (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. -* `shadowOpacity` – (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `shadowXOffset` – (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. -* `shadowYOffset` – (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. -* `teletextGridControl` – (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. -* `xPosition` – (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. All burn-in and DVB-Sub font settings must match. -* `yPosition` – (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. All burn-in and DVB-Sub font settings must match. +* `alignment` - (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. +* `backgroundColor` - (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. +* `backgroundOpacity` - (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `font` - (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. +* `fontColor` - (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `fontOpacity` - (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. +* `fontResolution` - (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. +* `fontSize` - (Optional) When set to ‘auto’ fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. +* `outlineColor` - (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `outlineSize` - (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `shadowColor` - (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. +* `shadowOpacity` - (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `shadowXOffset` - (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. +* `shadowYOffset` - (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. +* `teletextGridControl` - (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. +* `xPosition` - (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. All burn-in and DVB-Sub font settings must match. +* `yPosition` - (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. All burn-in and DVB-Sub font settings must match. ### DVB Sub Destination Settings -* `alignment` – (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. This option is not valid for source captions that are STL or 608/embedded. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `backgroundColor` – (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. -* `backgroundOpacity` – (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `font` – (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. -* `fontColor` – (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `fontOpacity` – (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. -* `fontResolution` – (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. -* `fontSize` – (Optional) When set to auto fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. -* `outlineColor` – (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `outlineSize` – (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `shadowColor` – (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. -* `shadowOpacity` – (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. -* `shadowXOffset` – (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. -* `shadowYOffset` – (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. -* `teletextGridControl` – (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. -* `xPosition` – (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. -* `yPosition` – (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `alignment` - (Optional) If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting “smart” justification will left-justify live subtitles and center-justify pre-recorded subtitles. This option is not valid for source captions that are STL or 608/embedded. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `backgroundColor` - (Optional) Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. +* `backgroundOpacity` - (Optional) Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `font` - (Optional) External font file used for caption burn-in. File extension must be ‘ttf’ or ‘tte’. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. See [Font](#font) for more details. +* `fontColor` - (Optional) Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `fontOpacity` - (Optional) Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. +* `fontResolution` - (Optional) Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. +* `fontSize` - (Optional) When set to auto fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. +* `outlineColor` - (Optional) Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `outlineSize` - (Optional) Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `shadowColor` - (Optional) Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. +* `shadowOpacity` - (Optional) Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. +* `shadowXOffset` - (Optional) Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. +* `shadowYOffset` - (Optional) Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. +* `teletextGridControl` - (Optional) Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. +* `xPosition` - (Optional) Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. +* `yPosition` - (Optional) Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. ### EBU TT D Destination Settings -* `copyrightHolder` – (Optional) Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. -* `fillLineGap` – (Optional) Specifies how to handle the gap between the lines (in multi-line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. -* `fontFamily` – (Optional) Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. -* `styleControl` – (Optional) Specifies the style information (font color, font position, and so on) to include in the font data that is attached to the EBU-TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. +* `copyrightHolder` - (Optional) Complete this field if you want to include the name of the copyright holder in the copyright tag in the captions metadata. +* `fillLineGap` - (Optional) Specifies how to handle the gap between the lines (in multi-line captions). - enabled: Fill with the captions background color (as specified in the input captions). - disabled: Leave the gap unfilled. +* `fontFamily` - (Optional) Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to “monospaced”. (If styleControl is set to exclude, the font family is always set to “monospaced”.) You specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size. - Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as “Arial”), or a generic font family (such as “serif”), or “default” (to let the downstream player choose the font). - Leave blank to set the family to “monospace”. +* `styleControl` - (Optional) Specifies the style information (font color, font position, and so on) to include in the font data that is attached to the EBU-TT captions. - include: Take the style information (font color, font position, and so on) from the source captions and include that information in the font data attached to the EBU-TT captions. This option is valid only if the source captions are Embedded or Teletext. - exclude: In the font data attached to the EBU-TT captions, set the font family to “monospaced”. Do not include any other style information. ### TTML Destination Settings -* `styleControl` – (Optional) This field is not currently supported and will not affect the output styling. Leave the default value. +* `styleControl` - (Optional) This field is not currently supported and will not affect the output styling. Leave the default value. ### WebVTT Destination Settings @@ -659,38 +660,38 @@ The following arguments are optional: ### Font -* `passwordParam` – (Optional) Key used to extract the password from EC2 Parameter store. -* `uri` – (Required) Path to a file accessible to the live stream. -* `username` – (Optional) Username to be used. +* `passwordParam` - (Optional) Key used to extract the password from EC2 Parameter store. +* `uri` - (Required) Path to a file accessible to the live stream. +* `username` - (Optional) Username to be used. ### Global Configuration -* `initialAudioGain` – (Optional) Value to set the initial audio gain for the Live Event. -* `inputEndAction` – (Optional) Indicates the action to take when the current input completes (e.g. end-of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). +* `initialAudioGain` - (Optional) Value to set the initial audio gain for the Live Event. +* `inputEndAction` - (Optional) Indicates the action to take when the current input completes (e.g. end-of-file). When switchAndLoopInputs is configured the encoder will restart at the beginning of the first input. When “none” is configured the encoder will transcode either black, a solid color, or a user specified slate images per the “Input Loss Behavior” configuration until the next input switch occurs (which is controlled through the Channel Schedule API). * `inputLossBehavior` - (Optional) Settings for system actions when input is lost. See [Input Loss Behavior](#input-loss-behavior) for more details. -* `outputLockingMode` – (Optional) Indicates how MediaLive pipelines are synchronized. PIPELINE\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. -* `outputTimingSource` – (Optional) Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. -* `supportLowFramerateInputs` – (Optional) Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. +* `outputLockingMode` - (Optional) Indicates how MediaLive pipelines are synchronized. PIPELINE\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the other. EPOCH\_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch. +* `outputTimingSource` - (Optional) Indicates whether the rate of frames emitted by the Live encoder should be paced by its system clock (which optionally may be locked to another source via NTP) or should be locked to the clock of the source that is providing the input stream. +* `supportLowFramerateInputs` - (Optional) Adjusts video input buffer for streams with very low video framerates. This is commonly set to enabled for music channels with less than one video frame per second. ### Input Loss Behavior -* `passwordParam` – (Optional) Key used to extract the password from EC2 Parameter store. -* `uri` – (Required) Path to a file accessible to the live stream. -* `username` – (Optional) Username to be used. +* `passwordParam` - (Optional) Key used to extract the password from EC2 Parameter store. +* `uri` - (Required) Path to a file accessible to the live stream. +* `username` - (Optional) Username to be used. ### Motion Graphics Configuration -* `motionGraphicsInsertion` – (Optional) Motion Graphics Insertion. +* `motionGraphicsInsertion` - (Optional) Motion Graphics Insertion. * `motionGraphicsSettings`– (Required) Motion Graphics Settings. See [Motion Graphics Settings](#motion-graphics-settings) for more details. ### Motion Graphics Settings -* `htmlMotionGraphicsSettings` – (Optional) Html Motion Graphics Settings. +* `htmlMotionGraphicsSettings` - (Optional) Html Motion Graphics Settings. ### Nielsen Configuration -* `distributorId` – (Optional) Enter the Distributor ID assigned to your organization by Nielsen. -* `nielsenPcmToId3Tagging` – (Optional) Enables Nielsen PCM to ID3 tagging. +* `distributorId` - (Optional) Enter the Distributor ID assigned to your organization by Nielsen. +* `nielsenPcmToId3Tagging` - (Optional) Enables Nielsen PCM to ID3 tagging. ### Avail Blanking @@ -833,4 +834,4 @@ Using `terraform import`, import MediaLive Channel using the `channelId`. For ex % terraform import aws_medialive_channel.example 1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/medialive_input.html.markdown b/website/docs/cdktf/typescript/r/medialive_input.html.markdown index ca1bfcffb2b4..f1019fcbd9b5 100644 --- a/website/docs/cdktf/typescript/r/medialive_input.html.markdown +++ b/website/docs/cdktf/typescript/r/medialive_input.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinations` - (Optional) Destination settings for PUSH type inputs. See [Destinations](#destinations) for more details. * `inputDevices` - (Optional) Settings for the devices. See [Input Devices](#input-devices) for more details. * `mediaConnectFlows` - (Optional) A list of the MediaConnect Flows. See [Media Connect Flows](#media-connect-flows) for more details. @@ -141,4 +142,4 @@ Using `terraform import`, import MediaLive Input using the `id`. For example: % terraform import aws_medialive_input.example 12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/medialive_input_security_group.html.markdown b/website/docs/cdktf/typescript/r/medialive_input_security_group.html.markdown index 6be8f0f4dc7e..759b5d87ef2b 100644 --- a/website/docs/cdktf/typescript/r/medialive_input_security_group.html.markdown +++ b/website/docs/cdktf/typescript/r/medialive_input_security_group.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the InputSecurityGroup. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Whitelist Rules @@ -105,4 +106,4 @@ Using `terraform import`, import MediaLive InputSecurityGroup using the `id`. Fo % terraform import aws_medialive_input_security_group.example 123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/medialive_multiplex.html.markdown b/website/docs/cdktf/typescript/r/medialive_multiplex.html.markdown index c348038c3fca..46eac8a03a1f 100644 --- a/website/docs/cdktf/typescript/r/medialive_multiplex.html.markdown +++ b/website/docs/cdktf/typescript/r/medialive_multiplex.html.markdown @@ -64,6 +64,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `startMultiplex` - (Optional) Whether to start the Multiplex. Defaults to `false`. * `tags` - (Optional) A map of tags to assign to the Multiplex. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -116,4 +117,4 @@ Using `terraform import`, import MediaLive Multiplex using the `id`. For example % terraform import aws_medialive_multiplex.example 12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/medialive_multiplex_program.html.markdown b/website/docs/cdktf/typescript/r/medialive_multiplex_program.html.markdown index a3538b86e5f4..921e99bd170c 100644 --- a/website/docs/cdktf/typescript/r/medialive_multiplex_program.html.markdown +++ b/website/docs/cdktf/typescript/r/medialive_multiplex_program.html.markdown @@ -86,6 +86,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ### Multiple Program Settings * `programNumber` - (Required) Unique program number. @@ -154,4 +156,4 @@ Using `terraform import`, import MediaLive MultiplexProgram using the `id`, or a % terraform import aws_medialive_multiplex_program.example example_program/1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_acl.html.markdown b/website/docs/cdktf/typescript/r/memorydb_acl.html.markdown index 0cfa3e61282b..e263cb24bfdb 100644 --- a/website/docs/cdktf/typescript/r/memorydb_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_acl.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the ACL. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `userNames` - (Optional) Set of MemoryDB user names to be included in this ACL. @@ -83,4 +84,4 @@ Using `terraform import`, import an ACL using the `name`. For example: % terraform import aws_memorydb_acl.example my-acl ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_cluster.html.markdown b/website/docs/cdktf/typescript/r/memorydb_cluster.html.markdown index 5647df9736f8..eba4e0694528 100644 --- a/website/docs/cdktf/typescript/r/memorydb_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_cluster.html.markdown @@ -53,6 +53,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoMinorVersionUpgrade` - (Optional, Forces new resource) When set to `true`, the cluster will automatically receive minor engine version upgrades after launch. Defaults to `true`. * `dataTiering` - (Optional, Forces new resource) Enables data tiering. This option is not supported by all instance types. For more information, see [Data tiering](https://docs.aws.amazon.com/memorydb/latest/devguide/data-tiering.html). * `description` - (Optional) Description for the cluster. Defaults to `"Managed by Terraform"`. @@ -137,4 +138,4 @@ Using `terraform import`, import a cluster using the `name`. For example: % terraform import aws_memorydb_cluster.example my-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_multi_region_cluster.html.markdown b/website/docs/cdktf/typescript/r/memorydb_multi_region_cluster.html.markdown index 824a763a3030..d55d3f648d24 100644 --- a/website/docs/cdktf/typescript/r/memorydb_multi_region_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_multi_region_cluster.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) description for the multi-region cluster. * `engine` - (Optional) The name of the engine to be used for the multi-region cluster. Valid values are `redis` and `valkey`. * `engineVersion` - (Optional) The version of the engine to be used for the multi-region cluster. Downgrades are not supported. @@ -116,4 +117,4 @@ Using `terraform import`, import a cluster using the `multiRegionClusterName`. F % terraform import aws_memorydb_multi_region_cluster.example virxk-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/memorydb_parameter_group.html.markdown index e0b23c3d1c83..f663fce500d5 100644 --- a/website/docs/cdktf/typescript/r/memorydb_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_parameter_group.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the parameter group. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional, Forces new resource) Description for the parameter group. Defaults to `"Managed by Terraform"`. @@ -102,4 +103,4 @@ Using `terraform import`, import a parameter group using the `name`. For example % terraform import aws_memorydb_parameter_group.example my-parameter-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_snapshot.html.markdown b/website/docs/cdktf/typescript/r/memorydb_snapshot.html.markdown index a698a5ba8784..71a432594c8c 100644 --- a/website/docs/cdktf/typescript/r/memorydb_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_snapshot.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterName` - (Required, Forces new resource) Name of the MemoryDB cluster to take a snapshot of. * `name` - (Optional, Forces new resource) Name of the snapshot. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -106,4 +107,4 @@ Using `terraform import`, import a snapshot using the `name`. For example: % terraform import aws_memorydb_snapshot.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/memorydb_subnet_group.html.markdown index 74278d58d020..5f7bd6490ed6 100644 --- a/website/docs/cdktf/typescript/r/memorydb_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_subnet_group.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the subnet group. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) Description for the subnet group. Defaults to `"Managed by Terraform"`. @@ -109,4 +110,4 @@ Using `terraform import`, import a subnet group using its `name`. For example: % terraform import aws_memorydb_subnet_group.example my-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/memorydb_user.html.markdown b/website/docs/cdktf/typescript/r/memorydb_user.html.markdown index f3c9510392e3..f295bdcb216d 100644 --- a/website/docs/cdktf/typescript/r/memorydb_user.html.markdown +++ b/website/docs/cdktf/typescript/r/memorydb_user.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### authentication_mode Configuration Block @@ -108,4 +109,4 @@ Using `terraform import`, import a user using the `userName`. For example: The `passwords` are not available for imported resources, as this information cannot be read back from the MemoryDB API. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mq_broker.html.markdown b/website/docs/cdktf/typescript/r/mq_broker.html.markdown index 9e59b8b7c06d..23a7f535559e 100644 --- a/website/docs/cdktf/typescript/r/mq_broker.html.markdown +++ b/website/docs/cdktf/typescript/r/mq_broker.html.markdown @@ -3,22 +3,22 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_broker" description: |- - Provides an MQ Broker Resource + Manages an AWS MQ broker --- # Resource: aws_mq_broker -Provides an Amazon MQ broker resource. This resources also manages users for the broker. +Manages an AWS MQ broker. Use to create and manage message brokers for ActiveMQ and RabbitMQ engines. -> For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html). -~> **NOTE:** Amazon MQ currently places limits on **RabbitMQ** brokers. For example, a RabbitMQ broker cannot have: instances with an associated IP address of an ENI attached to the broker, an associated LDAP server to authenticate and authorize broker connections, storage type `EFS`, or audit logging. Although this resource allows you to create RabbitMQ users, RabbitMQ users cannot have console access or groups. Also, Amazon MQ does not return information about RabbitMQ users so drift detection is not possible. +!> **Warning:** Amazon MQ currently places limits on **RabbitMQ** brokers. For example, a RabbitMQ broker cannot have: instances with an associated IP address of an ENI attached to the broker, an associated LDAP server to authenticate and authorize broker connections, storage type `EFS`, or audit logging. Although this resource allows you to create RabbitMQ users, RabbitMQ users cannot have console access or groups. Also, Amazon MQ does not return information about RabbitMQ users so drift detection is not possible. -~> **NOTE:** Changes to an MQ Broker can occur when you change a parameter, such as `configuration` or `user`, and are reflected in the next maintenance window. Because of this, Terraform may report a difference in its planning phase because a modification has not yet taken place. You can use the `applyImmediately` flag to instruct the service to apply the change immediately (see documentation below). Using `applyImmediately` can result in a brief downtime as the broker reboots. +!> **Warning:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). -~> **NOTE:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). +~> **Note:** Changes to an MQ Broker can occur when you change a parameter, such as `configuration` or `user`, and are reflected in the next maintenance window. Because of this, Terraform may report a difference in its planning phase because a modification has not yet taken place. You can use the `applyImmediately` flag to instruct the service to apply the change immediately (see documentation below). Using `applyImmediately` can result in a brief downtime as the broker reboots. ## Example Usage @@ -48,8 +48,8 @@ class MyConvertedCode extends TerraformStack { securityGroups: [Token.asString(awsSecurityGroupTest.id)], user: [ { - password: "MindTheGap", - username: "ExampleUser", + password: "", + username: "example_user", }, ], }); @@ -60,8 +60,6 @@ class MyConvertedCode extends TerraformStack { ### High-throughput Optimized Example -This example shows the use of EBS storage for high-throughput optimized performance. - ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -87,8 +85,8 @@ class MyConvertedCode extends TerraformStack { storageType: "ebs", user: [ { - password: "MindTheGap", - username: "ExampleUser", + password: "", + username: "example_user", }, ], }); @@ -123,13 +121,13 @@ class MyConvertedCode extends TerraformStack { securityGroups: [Token.asString(awsSecurityGroupExample.id)], user: [ { - password: "MindTheGap", - username: "ExampleUser", + password: "", + username: "example_user", }, { - password: "Example12345", + password: "", replicationUser: true, - username: "ExampleReplicationUser", + username: "example_replication_user", }, ], }); @@ -144,13 +142,13 @@ class MyConvertedCode extends TerraformStack { securityGroups: [Token.asString(awsSecurityGroupExamplePrimary.id)], user: [ { - password: "MindTheGap", - username: "ExampleUser", + password: "", + username: "example_user", }, { - password: "Example12345", + password: "", replicationUser: true, - username: "ExampleReplicationUser", + username: "example_replication_user", }, ], }); @@ -167,26 +165,27 @@ The following arguments are required: * `brokerName` - (Required) Name of the broker. * `engineType` - (Required) Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`. -* `engineVersion` - (Required) Version of the broker engine. See the [AmazonMQ Broker Engine docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html) for supported versions. For example, `5.17.6`. +* `engineVersion` - (Required) Version of the broker engine. * `hostInstanceType` - (Required) Broker's instance type. For example, `mq.t3.micro`, `mq.m5.large`. * `user` - (Required) Configuration block for broker users. For `engineType` of `RabbitMQ`, Amazon MQ does not return broker users preventing this resource from making user updates and drift detection. Detailed below. The following arguments are optional: -* `applyImmediately` - (Optional) Specifies whether any broker modifications are applied immediately, or during the next maintenance window. Default is `false`. +* `applyImmediately` - (Optional) Whether to apply broker modifications immediately. Default is `false`. * `authenticationStrategy` - (Optional) Authentication strategy used to secure the broker. Valid values are `simple` and `ldap`. `ldap` is not supported for `engineType` `RabbitMQ`. * `autoMinorVersionUpgrade` - (Optional) Whether to automatically upgrade to new minor versions of brokers as Amazon MQ makes releases available. * `configuration` - (Optional) Configuration block for broker configuration. Applies to `engineType` of `ActiveMQ` and `RabbitMQ` only. Detailed below. -* `dataReplicationMode` - (Optional) Defines whether this broker is a part of a data replication pair. Valid values are `CRDR` and `NONE`. -* `dataReplicationPrimaryBrokerArn` - (Optional) The Amazon Resource Name (ARN) of the primary broker that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when `dataReplicationMode` is `CRDR`. +* `dataReplicationMode` - (Optional) Whether this broker is part of a data replication pair. Valid values are `CRDR` and `NONE`. +* `dataReplicationPrimaryBrokerArn` - (Optional) ARN of the primary broker used to replicate data in a data replication pair. Required when `dataReplicationMode` is `CRDR`. * `deploymentMode` - (Optional) Deployment mode of the broker. Valid values are `SINGLE_INSTANCE`, `ACTIVE_STANDBY_MULTI_AZ`, and `CLUSTER_MULTI_AZ`. Default is `SINGLE_INSTANCE`. * `encryptionOptions` - (Optional) Configuration block containing encryption options. Detailed below. -* `ldapServerMetadata` - (Optional) Configuration block for the LDAP server used to authenticate and authorize connections to the broker. Not supported for `engineType` `RabbitMQ`. Detailed below. (Currently, AWS may not process changes to LDAP server metadata.) -* `logs` - (Optional) Configuration block for the logging configuration of the broker. Detailed below. +* `ldapServerMetadata` - (Optional) Configuration block for the LDAP server used to authenticate and authorize connections. Not supported for `engineType` `RabbitMQ`. Detailed below. +* `logs` - (Optional) Configuration block for the logging configuration. Detailed below. * `maintenanceWindowStartTime` - (Optional) Configuration block for the maintenance window start time. Detailed below. * `publiclyAccessible` - (Optional) Whether to enable connections from applications outside of the VPC that hosts the broker's subnets. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityGroups` - (Optional) List of security group IDs assigned to the broker. -* `storageType` - (Optional) Storage type of the broker. For `engineType` `ActiveMQ`, the valid values are `efs` and `ebs`, and the AWS-default is `efs`. For `engineType` `RabbitMQ`, only `ebs` is supported. When using `ebs`, only the `mq.m5` broker instance type family is supported. +* `storageType` - (Optional) Storage type of the broker. For `engineType` `ActiveMQ`, valid values are `efs` and `ebs` (AWS-default is `efs`). For `engineType` `RabbitMQ`, only `ebs` is supported. When using `ebs`, only the `mq.m5` broker instance type family is supported. * `subnetIds` - (Optional) List of subnet IDs in which to launch the broker. A `SINGLE_INSTANCE` deployment requires one subnet. An `ACTIVE_STANDBY_MULTI_AZ` deployment requires multiple subnets. * `tags` - (Optional) Map of tags to assign to the broker. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -194,29 +193,29 @@ The following arguments are optional: The following arguments are optional: -* `id` - (Optional) The Configuration ID. +* `id` - (Optional) Configuration ID. * `revision` - (Optional) Revision of the Configuration. ### encryption_options The following arguments are optional: -* `kmsKeyId` - (Optional) Amazon Resource Name (ARN) of Key Management Service (KMS) Customer Master Key (CMK) to use for encryption at rest. Requires setting `useAwsOwnedKey` to `false`. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. -* `useAwsOwnedKey` - (Optional) Whether to enable an AWS-owned KMS CMK that is not in your account. Defaults to `true`. Setting to `false` without configuring `kmsKeyId` will create an AWS-managed CMK aliased to `aws/mq` in your account. +* `kmsKeyId` - (Optional) ARN of KMS CMK to use for encryption at rest. Requires setting `useAwsOwnedKey` to `false`. To perform drift detection when AWS-managed CMKs or customer-managed CMKs are in use, this value must be configured. +* `useAwsOwnedKey` - (Optional) Whether to enable an AWS-owned KMS CMK not in your account. Defaults to `true`. Setting to `false` without configuring `kmsKeyId` creates an AWS-managed CMK aliased to `aws/mq` in your account. ### ldap_server_metadata The following arguments are optional: -* `hosts` - (Optional) List of a fully qualified domain name of the LDAP server and an optional failover server. -* `roleBase` - (Optional) Fully qualified name of the directory to search for a user’s groups. -* `roleName` - (Optional) Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query. +* `hosts` - (Optional) List of fully qualified domain names of the LDAP server and optional failover server. +* `roleBase` - (Optional) Fully qualified name of the directory to search for a user's groups. +* `roleName` - (Optional) LDAP attribute that identifies the group name attribute in the object returned from the group membership query. * `roleSearchMatching` - (Optional) Search criteria for groups. * `roleSearchSubtree` - (Optional) Whether the directory search scope is the entire sub-tree. * `serviceAccountPassword` - (Optional) Service account password. * `serviceAccountUsername` - (Optional) Service account username. * `userBase` - (Optional) Fully qualified name of the directory where you want to search for users. -* `userRoleName` - (Optional) Specifies the name of the LDAP attribute for the user group membership. +* `userRoleName` - (Optional) Name of the LDAP attribute for the user group membership. * `userSearchMatching` - (Optional) Search criteria for users. * `userSearchSubtree` - (Optional) Whether the directory search scope is the entire sub-tree. @@ -224,8 +223,8 @@ The following arguments are optional: The following arguments are optional: -* `audit` - (Optional) Enables audit logging. Auditing is only possible for `engineType` of `ActiveMQ`. User management action made using JMX or the ActiveMQ Web Console is logged. Defaults to `false`. -* `general` - (Optional) Enables general logging via CloudWatch. Defaults to `false`. +* `audit` - (Optional) Whether to enable audit logging. Only possible for `engineType` of `ActiveMQ`. Logs user management actions via JMX or ActiveMQ Web Console. Defaults to `false`. +* `general` - (Optional) Whether to enable general logging via CloudWatch. Defaults to `false`. ### maintenance_window_start_time @@ -237,11 +236,16 @@ The following arguments are required: ### user +The following arguments are required: + +* `password` - (Required) Password of the user. Must be 12 to 250 characters long, contain at least 4 unique characters, and must not contain commas. +* `username` - (Required) Username of the user. + +The following arguments are optional: + * `consoleAccess` - (Optional) Whether to enable access to the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) for the user. Applies to `engineType` of `ActiveMQ` only. * `groups` - (Optional) List of groups (20 maximum) to which the ActiveMQ user belongs. Applies to `engineType` of `ActiveMQ` only. -* `password` - (Required) Password of the user. It must be 12 to 250 characters long, at least 4 unique characters, and must not contain commas. -* `replicationUser` - (Optional) Whether to set set replication user. Defaults to `false`. -* `username` - (Required) Username of the user. +* `replicationUser` - (Optional) Whether to set replication user. Defaults to `false`. ~> **NOTE:** AWS currently does not support updating RabbitMQ users. Updates to users can only be in the RabbitMQ UI. @@ -252,7 +256,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the broker. * `id` - Unique ID that Amazon MQ generates for the broker. * `instances` - List of information about allocated brokers (both active & standby). - * `instances.0.console_url` - The URL of the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) or the [RabbitMQ Management UI](https://www.rabbitmq.com/management.html#external-monitoring) depending on `engineType`. + * `instances.0.console_url` - URL of the [ActiveMQ Web Console](http://activemq.apache.org/web-console.html) or the [RabbitMQ Management UI](https://www.rabbitmq.com/management.html#external-monitoring) depending on `engineType`. * `instances.0.ip_address` - IP Address of the broker. * `instances.0.endpoints` - Broker's wire-level protocol endpoints in the following order & format referenceable e.g., as `instances.0.endpoints.0` (SSL): * For `ActiveMQ`: @@ -263,8 +267,8 @@ This resource exports the following attributes in addition to the arguments abov * `wss://broker-id.mq.us-west-2.amazonaws.com:61619` * For `RabbitMQ`: * `amqps://broker-id.mq.us-west-2.amazonaws.com:5671` -* `pendingDataReplicationMode` - (Optional) The data replication mode that will be applied after reboot. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `pendingDataReplicationMode` - Data replication mode that will be applied after reboot. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -306,4 +310,4 @@ Using `terraform import`, import MQ Brokers using their broker id. For example: % terraform import aws_mq_broker.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mq_configuration.html.markdown b/website/docs/cdktf/typescript/r/mq_configuration.html.markdown index ccc85435122c..f127ea4e517a 100644 --- a/website/docs/cdktf/typescript/r/mq_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/mq_configuration.html.markdown @@ -2,17 +2,14 @@ subcategory: "MQ" layout: "aws" page_title: "AWS: aws_mq_configuration" -description: |- - Provides an MQ configuration Resource +description: "Manages an Amazon MQ configuration" --- # Resource: aws_mq_configuration -Provides an MQ Configuration Resource. - -For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html). +Manages an Amazon MQ configuration. Use this resource to create and manage broker configurations for ActiveMQ and RabbitMQ brokers. ## Example Usage @@ -72,16 +69,17 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `data` - (Required) Broker configuration in XML format for `ActiveMQ` or [Cuttlefish](https://github.com/Kyorai/cuttlefish) format for `RabbitMQ`. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML. +* `data` - (Required) Broker configuration in XML format for ActiveMQ or Cuttlefish format for RabbitMQ. See [AWS documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML. * `engineType` - (Required) Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`. * `engineVersion` - (Required) Version of the broker engine. * `name` - (Required) Name of the configuration. The following arguments are optional: -* `authenticationStrategy` - (Optional) Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engineType` `RabbitMQ`. +* `authenticationStrategy` - (Optional) Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for RabbitMQ engine type. * `description` - (Optional) Description of the configuration. -* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -90,7 +88,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the configuration. * `id` - Unique ID that Amazon MQ generates for the configuration. * `latestRevision` - Latest revision of the configuration. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -124,4 +122,4 @@ Using `terraform import`, import MQ Configurations using the configuration ID. F % terraform import aws_mq_configuration.example c-0187d1eb-88c8-475a-9b79-16ef5a10c94f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_cluster.html.markdown b/website/docs/cdktf/typescript/r/msk_cluster.html.markdown index 736b65ce40de..c6ce36d58e7e 100644 --- a/website/docs/cdktf/typescript/r/msk_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_cluster.html.markdown @@ -212,16 +212,17 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `brokerNodeGroupInfo` - (Required) Configuration block for the broker nodes of the Kafka cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `brokerNodeGroupInfo` - (Required) Configuration block for the broker nodes of the Kafka cluster. See [broker_node_group_info Argument Reference](#broker_node_group_info-argument-reference) below. * `clusterName` - (Required) Name of the MSK cluster. * `kafkaVersion` - (Required) Specify the desired Kafka software version. * `numberOfBrokerNodes` - (Required) The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. -* `clientAuthentication` - (Optional) Configuration block for specifying a client authentication. See below. -* `configurationInfo` - (Optional) Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. -* `encryptionInfo` - (Optional) Configuration block for specifying encryption. See below. +* `clientAuthentication` - (Optional) Configuration block for specifying a client authentication. See [client_authentication Argument Reference](#client_authentication-argument-reference) below. +* `configurationInfo` - (Optional) Configuration block for specifying an MSK Configuration to attach to Kafka brokers. See [configuration_info Argument Reference](#configuration_info-argument-reference) below. +* `encryptionInfo` - (Optional) Configuration block for specifying encryption. See [encryption_info Argument Reference](#encryption_info-argument-reference) below. * `enhancedMonitoring` - (Optional) Specify the desired enhanced MSK CloudWatch monitoring level. See [Monitoring Amazon MSK with Amazon CloudWatch](https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html) -* `openMonitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See below. -* `loggingInfo` - (Optional) Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See below. +* `openMonitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See [open_monitoring Argument Reference](#open_monitoring-argument-reference) below. +* `loggingInfo` - (Optional) Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See [logging_info Argument Reference](#logging_info-argument-reference) below. * `storageMode` - (Optional) Controls storage mode for supported storage tiers. Valid values are: `LOCAL` or `TIERED`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -230,14 +231,14 @@ This resource supports the following arguments: * `clientSubnets` - (Required) A list of subnets to connect to in client VPC ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-prop-brokernodegroupinfo-clientsubnets)). * `instanceType` - (Required) Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. ([Pricing info](https://aws.amazon.com/msk/pricing/)) * `securityGroups` - (Required) A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. -* `azDistribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently the only valid value is `DEFAULT`. -* `connectivityInfo` - (Optional) Information about the cluster access configuration. See below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible ([documentation](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html)). -* `storageInfo` - (Optional) A block that contains information about storage volumes attached to MSK broker nodes. See below. +* `azDistribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently, the only valid value is `DEFAULT`. +* `connectivityInfo` - (Optional) Information about the cluster access configuration. See [broker_node_group_info connectivity_info Argument Reference](#broker_node_group_info-connectivity_info-argument-reference) below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible ([documentation](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html)). +* `storageInfo` - (Optional) A block that contains information about storage volumes attached to MSK broker nodes. See [broker_node_group_info storage_info Argument Reference](#broker_node_group_info-storage_info-argument-reference) below. ### broker_node_group_info connectivity_info Argument Reference -* `publicAccess` - (Optional) Access control settings for brokers. See below. -* `vpcConnectivity` - (Optional) VPC connectivity access control for brokers. See below. +* `publicAccess` - (Optional) Access control settings for brokers. See [connectivity_info public_access Argument Reference](#connectivity_info-public_access-argument-reference) below. +* `vpcConnectivity` - (Optional) VPC connectivity access control for brokers. See [connectivity_info vpc_connectivity Argument Reference](#connectivity_info-vpc_connectivity-argument-reference) below. ### connectivity_info public_access Argument Reference @@ -245,11 +246,11 @@ This resource supports the following arguments: ### connectivity_info vpc_connectivity Argument Reference -* `clientAuthentication` - (Optional) Includes all client authentication information for VPC connectivity. See below. +* `clientAuthentication` - (Optional) Includes all client authentication information for VPC connectivity. See [vpc_connectivity client_authentication Argument Reference](#vpc_connectivity-client_authentication-argument-reference) below. ### vpc_connectivity client_authentication Argument Reference -* `sasl` - (Optional) SASL authentication type details for VPC connectivity. See below. +* `sasl` - (Optional) SASL authentication type details for VPC connectivity. See [vpc_connectivity client_authentication sasl Argument Reference](#vpc_connectivity-client_authentication-sasl-argument-reference) below. * `tls` - (Optional) Enables TLS authentication for VPC connectivity. ### vpc_connectivity client_authentication sasl Argument Reference @@ -259,11 +260,11 @@ This resource supports the following arguments: ### broker_node_group_info storage_info Argument Reference -* `ebsStorageInfo` - (Optional) A block that contains EBS volume information. See below. +* `ebsStorageInfo` - (Optional) A block that contains EBS volume information. See [storage_info ebs_storage_info Argument Reference](#storage_info-ebs_storage_info-argument-reference) below. ### storage_info ebs_storage_info Argument Reference -* `provisionedThroughput` - (Optional) A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See below. +* `provisionedThroughput` - (Optional) A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See [ebs_storage_info provisioned_throughput Argument Reference](#ebs_storage_info-provisioned_throughput-argument-reference) below. * `volumeSize` - (Optional) The size in GiB of the EBS volume for the data drive on each broker node. Minimum value of `1` and maximum value of `16384`. ### ebs_storage_info provisioned_throughput Argument Reference @@ -273,8 +274,8 @@ This resource supports the following arguments: ### client_authentication Argument Reference -* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See below. -* `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. +* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See [client_authentication sasl Argument Reference](#client_authentication-sasl-argument-reference) below. +* `tls` - (Optional) Configuration block for specifying TLS client authentication. See [client_authentication tls Argument Reference](#client_authentication-tls-argument-reference) below. * `unauthenticated` - (Optional) Enables unauthenticated access. #### client_authentication sasl Argument Reference @@ -293,7 +294,7 @@ This resource supports the following arguments: ### encryption_info Argument Reference -* `encryptionInTransit` - (Optional) Configuration block to specify encryption in transit. See below. +* `encryptionInTransit` - (Optional) Configuration block to specify encryption in transit. See [encryption_info encryption_in_transit Argument Reference](#encryption_info-encryption_in_transit-argument-reference) below. * `encryptionAtRestKmsKeyArn` - (Optional) You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest. #### encryption_info encryption_in_transit Argument Reference @@ -303,12 +304,12 @@ This resource supports the following arguments: #### open_monitoring Argument Reference -* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See below. +* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See [open_monitoring prometheus Argument Reference](#open_monitoring-prometheus-argument-reference) below. #### open_monitoring prometheus Argument Reference -* `jmxExporter` - (Optional) Configuration block for JMX Exporter. See below. -* `nodeExporter` - (Optional) Configuration block for Node Exporter. See below. +* `jmxExporter` - (Optional) Configuration block for JMX Exporter. See [open_monitoring prometheus jmx_exporter Argument Reference](#open_monitoring-prometheus-jmx_exporter-argument-reference) below. +* `nodeExporter` - (Optional) Configuration block for Node Exporter. See [open_monitoring prometheus node_exporter Argument Reference](#open_monitoring-prometheus-node_exporter-argument-reference) below. #### open_monitoring prometheus jmx_exporter Argument Reference @@ -320,7 +321,13 @@ This resource supports the following arguments: #### logging_info Argument Reference -* `brokerLogs` - (Required) Configuration block for Broker Logs settings for logging info. See below. +* `brokerLogs` - (Required) Configuration block for Broker Logs settings for logging info. See [logging_info broker_logs Argument Reference](#logging_info-broker_logs-argument-reference) below. + +#### logging_info broker_logs Argument Reference + +* `cloudwatchLogs` - (Optional) Configuration block for Cloudwatch Logs settings. See [logging_info broker_logs cloudwatch_logs Argument Reference](#logging_info-broker_logs-cloudwatch_logs-argument-reference) below. +* `firehose` - (Optional) Configuration block for Kinesis Data Firehose settings. See [logging_info broker_logs firehose Argument Reference](#logging_info-broker_logs-firehose-argument-reference) below. +* `s3` - (Optional) Configuration block for S3 settings. See [logging_info broker_logs s3 Argument Reference](#logging_info-broker_logs-s3-argument-reference) below. #### logging_info broker_logs cloudwatch_logs Argument Reference @@ -401,4 +408,4 @@ Using `terraform import`, import MSK clusters using the cluster `arn`. For examp % terraform import aws_msk_cluster.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_cluster_policy.html.markdown b/website/docs/cdktf/typescript/r/msk_cluster_policy.html.markdown index c5e37ddcad7b..2da579192d80 100644 --- a/website/docs/cdktf/typescript/r/msk_cluster_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_cluster_policy.html.markdown @@ -69,8 +69,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterArn` - (Required) The Amazon Resource Name (ARN) that uniquely identifies the cluster. * `policy` - (Required) Resource policy for cluster. @@ -112,4 +113,4 @@ Using `terraform import`, import Managed Streaming for Kafka Cluster Policy usin % terraform import aws_msk_cluster_policy.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_configuration.html.markdown b/website/docs/cdktf/typescript/r/msk_configuration.html.markdown index 84e3f6617100..b3177722d21a 100644 --- a/website/docs/cdktf/typescript/r/msk_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_configuration.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serverProperties` - (Required) Contents of the server.properties file. Supported properties are documented in the [MSK Developer Guide](https://docs.aws.amazon.com/msk/latest/developerguide/msk-configuration-properties.html). * `kafkaVersions` - (Optional) List of Apache Kafka versions which can use this configuration. * `name` - (Required) Name of the configuration. @@ -85,4 +86,4 @@ Using `terraform import`, import MSK configurations using the configuration ARN. % terraform import aws_msk_configuration.example arn:aws:kafka:us-west-2:123456789012:configuration/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_replicator.html.markdown b/website/docs/cdktf/typescript/r/msk_replicator.html.markdown index 9a463877fd50..4646e5adc30b 100644 --- a/website/docs/cdktf/typescript/r/msk_replicator.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_replicator.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicatorName` - (Required) The name of the replicator. * `kafkaCluster` - (Required) A list of Kafka clusters which are targets of the replicator. * `serviceExecutionRoleArn` - (Required) The ARN of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters). @@ -188,4 +189,4 @@ Using `terraform import`, import MSK replicators using the replicator ARN. For e % terraform import aws_msk_replicator.example arn:aws:kafka:us-west-2:123456789012:configuration/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_scram_secret_association.html.markdown b/website/docs/cdktf/typescript/r/msk_scram_secret_association.html.markdown index da8c32992a3b..60f6521b51d7 100644 --- a/website/docs/cdktf/typescript/r/msk_scram_secret_association.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_scram_secret_association.html.markdown @@ -139,6 +139,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterArn` - (Required, Forces new resource) Amazon Resource Name (ARN) of the MSK cluster. * `secretArnList` - (Required) List of AWS Secrets Manager secret ARNs. @@ -180,4 +181,4 @@ Using `terraform import`, import MSK SCRAM Secret Associations using the `id`. F % terraform import aws_msk_scram_secret_association.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_serverless_cluster.html.markdown b/website/docs/cdktf/typescript/r/msk_serverless_cluster.html.markdown index cdb780074ee5..39c3fbbd43ae 100644 --- a/website/docs/cdktf/typescript/r/msk_serverless_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_serverless_cluster.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientAuthentication` - (Required) Specifies client authentication information for the serverless cluster. See below. * `clusterName` - (Required) The name of the serverless cluster. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -125,4 +126,4 @@ Using `terraform import`, import MSK serverless clusters using the cluster `arn` % terraform import aws_msk_serverless_cluster.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_single_scram_secret_association.html.markdown b/website/docs/cdktf/typescript/r/msk_single_scram_secret_association.html.markdown index c74fb1509c06..9252f6bfc761 100644 --- a/website/docs/cdktf/typescript/r/msk_single_scram_secret_association.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_single_scram_secret_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterArn` - (Required, Forces new resource) Amazon Resource Name (ARN) of the MSK cluster. * `secretArn` - (Required, Forces new resource) AWS Secrets Manager secret ARN. @@ -78,4 +79,4 @@ Using `terraform import`, import an MSK SCRAM Secret Association using the `clus % terraform import aws_msk_single_scram_secret_association.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3,arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/msk_vpc_connection.html.markdown b/website/docs/cdktf/typescript/r/msk_vpc_connection.html.markdown index 6f63c7882208..f690bca54ece 100644 --- a/website/docs/cdktf/typescript/r/msk_vpc_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/msk_vpc_connection.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication` - (Required) The authentication type for the client VPC connection. Specify one of these auth type strings: SASL_IAM, SASL_SCRAM, or TLS. * `clientSubnets` - (Required) The list of subnets in the client VPC to connect to. * `securityGroups` - (Required) The security groups to attach to the ENIs for the broker nodes. @@ -95,4 +96,4 @@ Using `terraform import`, import MSK configurations using the configuration ARN. % terraform import aws_msk_vpc_connection.example arn:aws:kafka:eu-west-2:123456789012:vpc-connection/123456789012/example/38173259-79cd-4ee8-87f3-682ea6023f48-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown b/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown index caca95caa28a..14570fbd3bd6 100644 --- a/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown @@ -100,6 +100,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A summary description of the connector. * `logDelivery` - (Optional) Details about log delivery. See [`logDelivery` Block](#log_delivery-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -277,4 +278,4 @@ Using `terraform import`, import MSK Connect Connector using the connector's `ar % terraform import aws_mskconnect_connector.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:connector/example/264edee4-17a3-412e-bd76-6681cfc93805-3' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown index 6cc6aff7b15a..e41cf9929ab2 100644 --- a/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the custom plugin.. * `contentType` - (Required, Forces new resource) The type of the plugin file. Allowed values are `ZIP` and `JAR`. * `description` - (Optional, Forces new resource) A summary description of the custom plugin. @@ -133,4 +134,4 @@ Using `terraform import`, import MSK Connect Custom Plugin using the plugin's `a % terraform import aws_mskconnect_custom_plugin.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:custom-plugin/debezium-example/abcdefgh-1234-5678-9abc-defghijklmno-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown index 8dc6fc4e4e05..a75e5703b188 100644 --- a/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) A summary description of the worker configuration. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -96,4 +97,4 @@ Using `terraform import`, import MSK Connect Worker Configuration using the plug % terraform import aws_mskconnect_worker_configuration.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:worker-configuration/example/8848493b-7fcc-478c-a646-4a52634e3378-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown b/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown index 0322bae5b18d..667ca0502d9b 100644 --- a/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown @@ -184,15 +184,17 @@ This resource supports the following arguments: * `networkConfiguration` - (Required) Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See [`networkConfiguration` Block](#network_configuration-block) for details. * `pluginsS3ObjectVersion` - (Optional) The plugins.zip file version you want to use. * `pluginsS3Path` - (Optional) The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `requirementsS3ObjectVersion` - (Optional) The requirements.txt file version you want to use. * `requirementsS3Path` - (Optional) The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). * `schedulers` - (Optional) The number of schedulers that you want to run in your environment. v2.0.2 and above accepts `2` - `5`, default `2`. v1.10.12 accepts `1`. * `sourceBucketArn` - (Required) The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname. * `startupScriptS3ObjectVersion` - (Optional) The version of the startup shell script you want to use. You must specify the version ID that Amazon S3 assigns to the file every time you update the script. * `startupScriptS3Path` - (Optional) The relative path to the script hosted in your bucket. The script runs as your environment starts before starting the Apache Airflow process. Use this script to install dependencies, modify configuration options, and set environment variables. See [Using a startup script](https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html). Supported for environment versions 2.x and later. +* `tags` - (Optional) A map of resource tags to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `webserverAccessMode` - (Optional) Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`. * `weeklyMaintenanceWindowStart` - (Optional) Specifies the start date for the weekly maintenance window. -* `tags` - (Optional) A map of resource tags to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `worker_replacement_strategy` - (Optional) Worker replacement strategy. Valid values: `FORCED`, `GRACEFUL`. ### `loggingConfiguration` Block @@ -272,4 +274,4 @@ Using `terraform import`, import MWAA Environment using `Name`. For example: % terraform import aws_mwaa_environment.example MyAirflowEnvironment ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/nat_gateway.html.markdown b/website/docs/cdktf/typescript/r/nat_gateway.html.markdown index 7ea18e5883f7..f7c8a78b2457 100644 --- a/website/docs/cdktf/typescript/r/nat_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/nat_gateway.html.markdown @@ -12,6 +12,8 @@ description: |- Provides a resource to create a VPC NAT Gateway. +!> **WARNING:** You should not use the `aws_nat_gateway` resource that has `secondaryAllocationIds` in conjunction with an [`aws_nat_gateway_eip_association`](nat_gateway_eip_association.html) resource. Doing so may cause perpetual differences, and result in associations being overwritten. + ## Example Usage ### Public NAT @@ -120,10 +122,11 @@ This resource supports the following arguments: * `allocationId` - (Optional) The Allocation ID of the Elastic IP address for the NAT Gateway. Required for `connectivityType` of `public`. * `connectivityType` - (Optional) Connectivity type for the NAT Gateway. Valid values are `private` and `public`. Defaults to `public`. * `privateIp` - (Optional) The private IPv4 address to assign to the NAT Gateway. If you don't provide an address, a private IPv4 address will be automatically assigned. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnetId` - (Required) The Subnet ID of the subnet in which to place the NAT Gateway. -* `secondaryAllocationIds` - (Optional) A list of secondary allocation EIP IDs for this NAT Gateway. +* `secondaryAllocationIds` - (Optional) A list of secondary allocation EIP IDs for this NAT Gateway. To remove all secondary allocations an empty list should be specified. * `secondaryPrivateIpAddressCount` - (Optional) [Private NAT Gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT Gateway. -* `secondaryPrivateIpAddresses` - (Optional) A list of secondary private IPv4 addresses to assign to the NAT Gateway. +* `secondaryPrivateIpAddresses` - (Optional) A list of secondary private IPv4 addresses to assign to the NAT Gateway. To remove all secondary private addresses an empty list should be specified. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -176,4 +179,4 @@ Using `terraform import`, import NAT Gateways using the `id`. For example: % terraform import aws_nat_gateway.private_gw nat-05dba92075d71c408 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/nat_gateway_eip_association.html.markdown b/website/docs/cdktf/typescript/r/nat_gateway_eip_association.html.markdown new file mode 100644 index 000000000000..69b619ccca49 --- /dev/null +++ b/website/docs/cdktf/typescript/r/nat_gateway_eip_association.html.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "VPC (Virtual Private Cloud)" +layout: "aws" +page_title: "AWS: aws_nat_gateway_eip_association" +description: |- + Terraform resource for managing an AWS VPC NAT Gateway EIP Association. +--- + + +# Resource: aws_nat_gateway_eip_association + +Terraform resource for managing an AWS VPC NAT Gateway EIP Association. + +!> **WARNING:** You should not use the `aws_nat_gateway_eip_association` resource in conjunction with an [`aws_nat_gateway`](aws_nat_gateway.html) resource that has `secondaryAllocationIds` configured. Doing so may cause perpetual differences, and result in associations being overwritten. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NatGatewayEipAssociation } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NatGatewayEipAssociation(this, "example", { + allocation_id: awsEipExample.id, + nat_gateway_id: awsNatGatewayExample.id, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `allocationId` - (Required) The ID of the Elastic IP Allocation to associate with the NAT Gateway. +* `natGatewayId` - (Required) The ID of the NAT Gateway to associate the Elastic IP Allocation to. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC NAT Gateway EIP Association using the `nat_gateway_id,allocation_id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NatGatewayEipAssociation } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + NatGatewayEipAssociation.generateConfigForImport( + this, + "example", + "nat-1234567890abcdef1,eipalloc-1234567890abcdef1" + ); + } +} + +``` + +Using `terraform import`, import VPC NAT Gateway EIP Association using the `nat_gateway_id,allocation_id`. For example: + +```console +% terraform import aws_nat_gateway_eip_association.example nat-1234567890abcdef1,eipalloc-1234567890abcdef1 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_cluster.html.markdown b/website/docs/cdktf/typescript/r/neptune_cluster.html.markdown index e7aef195783e..fc461224c961 100644 --- a/website/docs/cdktf/typescript/r/neptune_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_cluster.html.markdown @@ -62,6 +62,7 @@ This resource supports the following arguments: * `clusterIdentifier` - (Optional, Forces new resources) Cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `clusterIdentifierPrefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `clusterIdentifier`. * `copyTagsToSnapshot` - (Optional) If set to true, tags are copied to any snapshot of the DB cluster that is created. +* `deletionProtection` - (Optional) Value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. * `enableCloudwatchLogsExports` - (Optional) List of the log types this DB cluster is configured to export to Cloudwatch Logs. Currently only supports `audit` and `slowquery`. * `engine` - (Optional) Name of the database engine to be used for this Neptune cluster. Defaults to `neptune`. * `engineVersion` - (Optional) Database engine version. @@ -70,21 +71,21 @@ This resource supports the following arguments: * `iamRoles` - (Optional) List of ARNs for the IAM roles to associate to the Neptune Cluster. * `iamDatabaseAuthenticationEnabled` - (Optional) Whether or not mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. * `kmsKeyArn` - (Optional) ARN for the KMS encryption key. When specifying `kmsKeyArn`, `storageEncrypted` needs to be set to true. -* `neptuneSubnetGroupName` - (Optional) Neptune subnet group to associate with this Neptune instance. * `neptuneClusterParameterGroupName` - (Optional) Cluster parameter group to associate with the cluster. * `neptuneInstanceParameterGroupName` – (Optional) Name of DB parameter group to apply to all instances in the cluster. When upgrading, AWS does not return this value, so do not reference it in other arguments—either leave it unset, configure each instance directly, or ensure it matches the `engineVersion`. -* `storageType` - (Optional) Storage type associated with the cluster `standard/iopt1`. Default: `standard` +* `neptuneSubnetGroupName` - (Optional) Neptune subnet group to associate with this Neptune instance. +* `port` - (Optional) Port on which the Neptune accepts connections. Default is `8182`. * `preferredBackupWindow` - (Optional) Daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferredMaintenanceWindow` - (Optional) Weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 -* `port` - (Optional) Port on which the Neptune accepts connections. Default is `8182`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `replicationSourceIdentifier` - (Optional) ARN of a source Neptune cluster or Neptune instance if this Neptune cluster is to be created as a Read Replica. +* `serverlessV2ScalingConfiguration` - (Optional) If set, create the Neptune cluster as a serverless one. See [Serverless](#serverless) for example block attributes. * `skipFinalSnapshot` - (Optional) Whether a final Neptune snapshot is created before the Neptune cluster is deleted. If true is specified, no Neptune snapshot is created. If false is specified, a Neptune snapshot is created before the Neptune cluster is deleted, using the value from `finalSnapshotIdentifier`. Default is `false`. * `snapshotIdentifier` - (Optional) Whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a Neptune cluster snapshot, or the ARN when specifying a Neptune snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storageEncrypted` - (Optional) Whether the Neptune cluster is encrypted. The default is `false` if not specified. +* `storageType` - (Optional) Storage type associated with the cluster `standard/iopt1`. Default: `standard`. * `tags` - (Optional) Map of tags to assign to the Neptune cluster. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpcSecurityGroupIds` - (Optional) List of VPC security groups to associate with the Cluster -* `deletionProtection` - (Optional) Value that indicates whether the DB cluster has deletion protection enabled.The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. -* `serverlessV2ScalingConfiguration` - (Optional) If set, create the Neptune cluster as a serverless one. See [Serverless](#serverless) for example block attributes. ### Serverless @@ -183,4 +184,4 @@ Using `terraform import`, import `aws_neptune_cluster` using the cluster identif % terraform import aws_neptune_cluster.example my-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_cluster_endpoint.html.markdown b/website/docs/cdktf/typescript/r/neptune_cluster_endpoint.html.markdown index 2a730a7f15b1..e160c318532b 100644 --- a/website/docs/cdktf/typescript/r/neptune_cluster_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_cluster_endpoint.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required, Forces new resources) The DB cluster identifier of the DB cluster associated with the endpoint. * `clusterEndpointIdentifier` - (Required, Forces new resources) The identifier of the endpoint. * `endpointType` - (Required) The type of the endpoint. One of: `READER`, `WRITER`, `ANY`. @@ -88,4 +89,4 @@ Using `terraform import`, import `aws_neptune_cluster_endpoint` using the `clust % terraform import aws_neptune_cluster_endpoint.example my-cluster:my-endpoint ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_cluster_instance.html.markdown b/website/docs/cdktf/typescript/r/neptune_cluster_instance.html.markdown index caf308dbc258..d41776cc8faf 100644 --- a/website/docs/cdktf/typescript/r/neptune_cluster_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_cluster_instance.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applyImmediately` - (Optional) Specifies whether any instance modifications are applied immediately, or during the next maintenance window. Default is`false`. * `autoMinorVersionUpgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`. @@ -96,7 +97,7 @@ This resource exports the following attributes in addition to the arguments abov * `storageEncrypted` - Specifies whether the neptune cluster is encrypted. * `storageType` - Storage type associated with the cluster `standard/iopt1`. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. +* `writer` - Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. [1]: https://www.terraform.io/docs/configuration/meta-arguments/count.html @@ -140,4 +141,4 @@ Using `terraform import`, import `aws_neptune_cluster_instance` using the instan % terraform import aws_neptune_cluster_instance.example my-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_cluster_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/neptune_cluster_parameter_group.html.markdown index 5ae608eaf142..d83d951e065d 100644 --- a/website/docs/cdktf/typescript/r/neptune_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_cluster_parameter_group.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the neptune cluster parameter group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required) The family of the neptune cluster parameter group. @@ -99,4 +100,4 @@ Using `terraform import`, import Neptune Cluster Parameter Groups using the `nam % terraform import aws_neptune_cluster_parameter_group.cluster_pg production-pg-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_cluster_snapshot.html.markdown b/website/docs/cdktf/typescript/r/neptune_cluster_snapshot.html.markdown index 40245a3c2ba0..91bb648a615b 100644 --- a/website/docs/cdktf/typescript/r/neptune_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_cluster_snapshot.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbClusterIdentifier` - (Required) The DB Cluster Identifier from which to take the snapshot. * `dbClusterSnapshotIdentifier` - (Required) The Identifier for the snapshot. @@ -54,7 +55,7 @@ This resource exports the following attributes in addition to the arguments abov * `kmsKeyId` - If storage_encrypted is true, the AWS KMS key identifier for the encrypted DB cluster snapshot. * `licenseModel` - License model information for the restored DB cluster. * `port` - Port that the DB cluster was listening on at the time of the snapshot. -* `source_db_cluster_snapshot_identifier` - The DB Cluster Snapshot Arn that the DB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. +* `sourceDbClusterSnapshotIdentifier` - The DB Cluster Snapshot Arn that the DB Cluster Snapshot was copied from. It only has value in case of cross customer or cross region copy. * `storageEncrypted` - Specifies whether the DB cluster snapshot is encrypted. * `status` - The status of this DB Cluster Snapshot. * `vpcId` - The VPC ID associated with the DB cluster snapshot. @@ -97,4 +98,4 @@ Using `terraform import`, import `aws_neptune_cluster_snapshot` using the cluste % terraform import aws_neptune_cluster_snapshot.example my-cluster-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_event_subscription.html.markdown b/website/docs/cdktf/typescript/r/neptune_event_subscription.html.markdown index b480fcf3a88a..4ab272e08390 100644 --- a/website/docs/cdktf/typescript/r/neptune_event_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_event_subscription.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) A boolean flag to enable/disable the subscription. Defaults to true. * `eventCategories` - (Optional) A list of event categories for a `sourceType` that you want to subscribe to. Run `aws neptune describe-event-categories` to find all the event categories. * `name` - (Optional) The name of the Neptune event subscription. By default generated by Terraform. @@ -143,4 +144,4 @@ Using `terraform import`, import `aws_neptune_event_subscription` using the even % terraform import aws_neptune_event_subscription.example my-event-subscription ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_global_cluster.html.markdown b/website/docs/cdktf/typescript/r/neptune_global_cluster.html.markdown index e86cd3c8d7ac..b7c3d87a06fb 100644 --- a/website/docs/cdktf/typescript/r/neptune_global_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_global_cluster.html.markdown @@ -141,6 +141,7 @@ This resource supports the following arguments: * `deletionProtection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Current Valid values: `neptune`. Conflicts with `sourceDbClusterIdentifier`. * `engineVersion` - (Optional) Engine version of the global database. Upgrading the engine version will result in all cluster members being immediately updated and will. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceDbClusterIdentifier` - (Optional) ARN to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value. * `storageEncrypted` - (Optional, Forces new resources) Whether the DB cluster is encrypted. The default is `false` unless `sourceDbClusterIdentifier` is specified and encrypted. Terraform will only perform drift detection if a configuration value is provided. @@ -219,4 +220,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/neptune_parameter_group.html.markdown index ff4e05bf5cca..c172c20db087 100644 --- a/website/docs/cdktf/typescript/r/neptune_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_parameter_group.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the Neptune parameter group. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required) The family of the Neptune parameter group. @@ -94,4 +95,4 @@ Using `terraform import`, import Neptune Parameter Groups using the `name`. For % terraform import aws_neptune_parameter_group.some_pg some-pg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptune_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/neptune_subnet_group.html.markdown index 1a7e55fcf651..8995605ac376 100644 --- a/website/docs/cdktf/typescript/r/neptune_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/neptune_subnet_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the neptune subnet group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) The description of the neptune subnet group. Defaults to "Managed by Terraform". @@ -88,4 +89,4 @@ Using `terraform import`, import Neptune Subnet groups using the `name`. For exa % terraform import aws_neptune_subnet_group.default production-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/neptunegraph_graph.html.markdown b/website/docs/cdktf/typescript/r/neptunegraph_graph.html.markdown index c0b53e808a49..8da8e8ce3d96 100644 --- a/website/docs/cdktf/typescript/r/neptunegraph_graph.html.markdown +++ b/website/docs/cdktf/typescript/r/neptunegraph_graph.html.markdown @@ -10,7 +10,7 @@ description: |- # Resource: aws_neptunegraph_graph -The aws_neptunegraph_graph resource creates an Amazon Analytics Graph. +The `aws_neptunegraph_graph` resource creates an Amazon Analytics Graph. ## Example Usage @@ -61,19 +61,14 @@ The following arguments are required: The following arguments are optional: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `deletionProtection` (Boolean, Default: `true`) Value that indicates whether the Graph has deletion protection enabled. The graph can't be deleted when deletion protection is enabled. - - `graphName` (String, Forces new resource) Contains a user-supplied name for the Graph. If omitted, Terraform will assign a random, unique identifier. - - `publicConnectivity` (Boolean, Default: `false`) Specifies whether the Graph can be reached over the internet. Access to all graphs requires IAM authentication. When the Graph is publicly reachable, its Domain Name System (DNS) endpoint resolves to the public IP address from the internet. When the Graph isn't publicly reachable, you need to create a PrivateGraphEndpoint in a given VPC to ensure the DNS name resolves to a private IP address that is reachable from the VPC. - - `replicaCount` (Number, Default: `1`, Forces new resource) Specifies the number of replicas you want when finished. All replicas will be provisioned in different availability zones. Replica Count should always be less than or equal to 2. - - `kmsKeyIdentifier` (String) The ARN for the KMS encryption key. By Default, Neptune Analytics will use an AWS provided key ("AWS_OWNED_KEY"). This parameter is used if you want to encrypt the graph using a KMS Customer Managed Key (CMK). - - `vectorSearchConfiguration` (Block, Forces new resource) Vector Search Configuration (see below for nested schema of vector_search_configuration) - -- `tags` (Attributes Set) The tags associated with this graph. (see below for nested schema of tags) +- `tags` - (Optional) Key-value tags for the graph. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -82,6 +77,7 @@ This resource exports the following attributes in addition to the arguments abov - `endpoint` (String) The connection endpoint for the graph. For example: `g-12a3bcdef4.us-east-1.neptune-graph.amazonaws.com` - `arn` (String) Graph resource ARN - `id` (String) The auto-generated id assigned by the service. +- `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -132,4 +128,4 @@ Using `terraform import`, import `aws_neptunegraph_graph` using the graph identi % terraform import aws_neptunegraph_graph.example "graph_id" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_acl.html.markdown b/website/docs/cdktf/typescript/r/network_acl.html.markdown index c05465223a72..9b30e7251fb5 100644 --- a/website/docs/cdktf/typescript/r/network_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/network_acl.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The ID of the associated VPC. * `subnetIds` - (Optional) A list of Subnet IDs to apply the ACL to * `ingress` - (Optional) Specifies an ingress rule. Parameters defined below. @@ -137,4 +138,4 @@ Using `terraform import`, import Network ACLs using the `id`. For example: % terraform import aws_network_acl.main acl-7aaabd18 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_acl_association.html.markdown b/website/docs/cdktf/typescript/r/network_acl_association.html.markdown index c5e333f10c7c..8ebef2af2f1d 100644 --- a/website/docs/cdktf/typescript/r/network_acl_association.html.markdown +++ b/website/docs/cdktf/typescript/r/network_acl_association.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `networkAclId` - (Required) The ID of the network ACL. * `subnetId` - (Required) The ID of the associated Subnet. @@ -84,4 +85,4 @@ Using `terraform import`, import Network ACL associations using the `id`. For ex % terraform import aws_network_acl_association.main aclassoc-02baf37f20966b3e6 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_acl_rule.html.markdown b/website/docs/cdktf/typescript/r/network_acl_rule.html.markdown index 8679c81d1062..c962ba6d44ed 100644 --- a/website/docs/cdktf/typescript/r/network_acl_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/network_acl_rule.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `networkAclId` - (Required) The ID of the network ACL. * `ruleNumber` - (Required) The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number. * `egress` - (Optional, bool) Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`. @@ -151,4 +152,4 @@ Using the procotol's decimal value: % terraform import aws_network_acl_rule.my_rule acl-7aaabd18:100:6:false ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_interface.html.markdown b/website/docs/cdktf/typescript/r/network_interface.html.markdown index 0138cca2ca32..ff6ab7d9c685 100644 --- a/website/docs/cdktf/typescript/r/network_interface.html.markdown +++ b/website/docs/cdktf/typescript/r/network_interface.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `attachment` - (Optional) Configuration block to define the attachment of the ENI. See [Attachment](#attachment) below for more details! * `description` - (Optional) Description for the network interface. * `enablePrimaryIpv6` - (Optional) Enables assigning a primary IPv6 Global Unicast Address (GUA) to the network interface (ENI) in dual-stack or IPv6-only subnets. This ensures the instance attached to the ENI retains a consistent IPv6 address. Once enabled, the first IPv6 GUA becomes the primary IPv6 address and cannot be disabled. The primary IPv6 address remains assigned until the instance is terminated or the ENI is detached. Enabling and subsequent disabling forces recreation of the ENI. @@ -93,6 +94,7 @@ The `attachment` block supports the following: * `instance` - (Required) ID of the instance to attach to. * `deviceIndex` - (Required) Integer to define the devices index. +* `networkCardIndex` - (Optional) Index of the network card. Specify a value greater than 0 when using multiple network cards, which are supported by [some instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards). The default is 0. ## Attribute Reference @@ -133,4 +135,4 @@ Using `terraform import`, import Network Interfaces using the `id`. For example: % terraform import aws_network_interface.test eni-e5aa89a3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_interface_attachment.html.markdown b/website/docs/cdktf/typescript/r/network_interface_attachment.html.markdown index 6b7ef9f56138..19c92cd866bb 100644 --- a/website/docs/cdktf/typescript/r/network_interface_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/network_interface_attachment.html.markdown @@ -40,9 +40,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required) Instance ID to attach. * `networkInterfaceId` - (Required) ENI ID to attach. * `deviceIndex` - (Required) Network interface index (int). +* `networkCardIndex` - (Optional) Index of the network card. Specify a value greater than 0 when using multiple network cards, which are supported by [some instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards). The default is 0. ## Attribute Reference @@ -85,4 +87,4 @@ Using `terraform import`, import Elastic network interface (ENI) Attachments usi % terraform import aws_network_interface_attachment.secondary_nic eni-attach-0a33842b4ec347c4c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_interface_permission.html.markdown b/website/docs/cdktf/typescript/r/network_interface_permission.html.markdown index 2826a367ca0f..e74792e9346e 100644 --- a/website/docs/cdktf/typescript/r/network_interface_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/network_interface_permission.html.markdown @@ -56,8 +56,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `networkInterfaceId` - (Required) The ID of the network interface. * `awsAccountId` - (Required) The Amazon Web Services account ID. * `permission` - (Required) The type of permission to grant. Valid values are `INSTANCE-ATTACH` or `EIP-ASSOCIATE`. @@ -100,4 +101,4 @@ Using `terraform import`, import Network Interface Permissions using the `networ % terraform import aws_network_interface_permission.example eni-perm-056ad97ce2ac377ed ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/network_interface_sg_attachment.html.markdown b/website/docs/cdktf/typescript/r/network_interface_sg_attachment.html.markdown index 854ac9a58102..58306b01a0de 100644 --- a/website/docs/cdktf/typescript/r/network_interface_sg_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/network_interface_sg_attachment.html.markdown @@ -117,6 +117,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityGroupId` - (Required) The ID of the security group. * `networkInterfaceId` - (Required) The ID of the network interface to attach to. @@ -164,4 +165,4 @@ Using `terraform import`, import Network Interface Security Group attachments us % terraform import aws_network_interface_sg_attachment.sg_attachment eni-1234567890abcdef0_sg-1234567890abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_firewall.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_firewall.html.markdown index 352089676818..65dff97b9c4a 100644 --- a/website/docs/cdktf/typescript/r/networkfirewall_firewall.html.markdown +++ b/website/docs/cdktf/typescript/r/networkfirewall_firewall.html.markdown @@ -55,10 +55,65 @@ class MyConvertedCode extends TerraformStack { ``` +### Transit Gateway Attached Firewall + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsAvailabilityZones } from "./.gen/providers/aws/data-aws-availability-zones"; +import { NetworkfirewallFirewall } from "./.gen/providers/aws/networkfirewall-firewall"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new DataAwsAvailabilityZones(this, "example", { + state: "available", + }); + const awsNetworkfirewallFirewallExample = new NetworkfirewallFirewall( + this, + "example_1", + { + availabilityZoneMapping: [ + { + availabilityZoneId: Token.asString( + Fn.lookupNested(example.zoneIds, ["0"]) + ), + }, + { + availabilityZoneId: Token.asString( + Fn.lookupNested(example.zoneIds, ["1"]) + ), + }, + ], + firewallPolicyArn: Token.asString( + awsNetworkfirewallFirewallPolicyExample.arn + ), + name: "example", + transitGatewayId: Token.asString(awsEc2TransitGatewayExample.id), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsNetworkfirewallFirewallExample.overrideLogicalId("example"); + } +} + +``` + +### Transit Gateway Attached Firewall (Cross Account) + +A full example of how to create a Transit Gateway in one AWS account, share it with a second AWS account, and create Network Firewall in the second account to the Transit Gateway via the `aws_networkfirewall_firewall` and [`aws_networkfirewall_network_firewall_transit_gateway_attachment_accepter`](/docs/providers/aws/r/networkfirewall_network_firewall_transit_gateway_attachment_accepter.html) resources can be found in [the `./examples/network-firewall-cross-account-transit-gateway` directory within the Github Repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/network-firewall-cross-account-transit-gateway) + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `availabilityZoneChangeProtection` - (Optional) A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to `true`, you must first disable this protection before adding or removing Availability Zones. +* `availabilityZoneMapping` - (Optional) Required when creating a transit gateway-attached firewall. Set of configuration blocks describing the avaiability availability where you want to create firewall endpoints for a transit gateway-attached firewall. * `deleteProtection` - (Optional) A flag indicating whether the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. Defaults to `false`. * `description` - (Optional) A friendly description of the firewall. * `enabledAnalysisTypes` - (Optional) Set of types for which to collect analysis metrics. See [Reporting on network traffic in Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/reporting.html) for details on how to use the data. Valid values: `TLS_SNI`, `HTTP_HOST`. Defaults to `[]`. @@ -67,9 +122,16 @@ This resource supports the following arguments: * `firewallPolicyChangeProtection` - (Optional) A flag indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. Defaults to `false`. * `name` - (Required, Forces new resource) A friendly name of the firewall. * `subnetChangeProtection` - (Optional) A flag indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. Defaults to `false`. -* `subnetMapping` - (Required) Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See [Subnet Mapping](#subnet-mapping) below for details. +* `subnetMapping` - (Optional) Required when creating a VPC attached firewall. Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See [Subnet Mapping](#subnet-mapping) below for details. * `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `vpcId` - (Required, Forces new resource) The unique identifier of the VPC where AWS Network Firewall should create the firewall. +* `transitGatewayId` - (Optional, Forces new resource). Required when creating a transit gateway-attached firewall. The unique identifier of the transit gateway to attach to this firewall. You can provide either a transit gateway from your account or one that has been shared with you through AWS Resource Access Manager +* `vpcId` - (Optional, Forces new resource) Required when creating a VPC attached firewall. The unique identifier of the VPC where AWS Network Firewall should create the firewall. + +### Availability Zone Mapping + +The `availabilityZoneMapping` block supports the following arguments: + +* `availabilityZoneId` - (Required)The ID of the Availability Zone where the firewall endpoint is located.. ### Encryption Configuration @@ -97,16 +159,19 @@ This resource exports the following attributes in addition to the arguments abov * `endpointId` - The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. * `subnetId` - The unique identifier of the subnet that you've specified to be used for a firewall endpoint. * `availabilityZone` - The Availability Zone where the subnet is configured. + * `transit_gateway_attachment_sync_states` - Set of transit gateway configured for use by the firewall. + * `attachmentId` - The unique identifier of the transit gateway attachment. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `transitGatewayOwnerAccountId` - The AWS account ID that owns the transit gateway. * `updateToken` - A string token used when updating a firewall. ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): -- `create` - (Default `30m`) -- `update` - (Default `30m`) -- `delete` - (Default `30m`) +- `create` - (Default `60m`) +- `update` - (Default `60m`) +- `delete` - (Default `60m`) ## Import @@ -140,4 +205,4 @@ Using `terraform import`, import Network Firewall Firewalls using their `arn`. F % terraform import aws_networkfirewall_firewall.example arn:aws:network-firewall:us-west-1:123456789012:firewall/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_firewall_policy.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_firewall_policy.html.markdown index 09c2dc4d347d..0537eeee8f75 100644 --- a/website/docs/cdktf/typescript/r/networkfirewall_firewall_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/networkfirewall_firewall_policy.html.markdown @@ -22,10 +22,20 @@ import { Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; +import { DataAwsRegion } from "./.gen/providers/aws/data-aws-region"; import { NetworkfirewallFirewallPolicy } from "./.gen/providers/aws/networkfirewall-firewall-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const current = new DataAwsCallerIdentity(this, "current", {}); + const dataAwsPartitionCurrent = new DataAwsPartition(this, "current_1", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsPartitionCurrent.overrideLogicalId("current"); + const dataAwsRegionCurrent = new DataAwsRegion(this, "current_2", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsRegionCurrent.overrideLogicalId("current"); new NetworkfirewallFirewallPolicy(this, "example", { firewallPolicy: { statelessDefaultActions: ["aws:pass"], @@ -37,7 +47,13 @@ class MyConvertedCode extends TerraformStack { }, ], tlsInspectionConfigurationArn: - "arn:aws:network-firewall:REGION:ACCT:tls-configuration/example", + "arn:${" + + dataAwsPartitionCurrent.partition + + "}:network-firewall:${" + + dataAwsRegionCurrent.region + + "}:${" + + current.accountId + + "}:tls-configuration/example", }, name: "example", tags: { @@ -110,7 +126,7 @@ import { NetworkfirewallFirewallPolicy } from "./.gen/providers/aws/networkfirew class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new NetworkfirewallFirewallPolicy(this, "test", { + new NetworkfirewallFirewallPolicy(this, "example", { firewallPolicy: { statelessCustomAction: [ { @@ -136,18 +152,105 @@ class MyConvertedCode extends TerraformStack { ``` +## Policy with Active Threat Defense in Action Order + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; +import { DataAwsRegion } from "./.gen/providers/aws/data-aws-region"; +import { NetworkfirewallFirewallPolicy } from "./.gen/providers/aws/networkfirewall-firewall-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const current = new DataAwsPartition(this, "current", {}); + const dataAwsRegionCurrent = new DataAwsRegion(this, "current_1", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsRegionCurrent.overrideLogicalId("current"); + new NetworkfirewallFirewallPolicy(this, "example", { + firewallPolicy: { + statefulRuleGroupReference: [ + { + deepThreatInspection: Token.asString(true), + resourceArn: + "arn:${" + + current.partition + + "}:network-firewall:${" + + dataAwsRegionCurrent.region + + "}:aws-managed:stateful-rulegroup/AttackInfrastructureActionOrder", + }, + ], + statelessDefaultActions: ["aws:pass"], + statelessFragmentDefaultActions: ["aws:drop"], + }, + name: "example", + }); + } +} + +``` + +## Policy with Active Threat Defense in Strict Order + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; +import { DataAwsRegion } from "./.gen/providers/aws/data-aws-region"; +import { NetworkfirewallFirewallPolicy } from "./.gen/providers/aws/networkfirewall-firewall-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const current = new DataAwsPartition(this, "current", {}); + const dataAwsRegionCurrent = new DataAwsRegion(this, "current_1", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsRegionCurrent.overrideLogicalId("current"); + new NetworkfirewallFirewallPolicy(this, "example", { + firewallPolicy: { + statefulEngineOptions: { + ruleOrder: "STRICT_ORDER", + }, + statefulRuleGroupReference: [ + { + deepThreatInspection: Token.asString(false), + priority: 1, + resourceArn: + "arn:${" + + current.partition + + "}:network-firewall:${" + + dataAwsRegionCurrent.region + + "}:aws-managed:stateful-rulegroup/AttackInfrastructureStrictOrder", + }, + ], + statelessDefaultActions: ["aws:pass"], + statelessFragmentDefaultActions: ["aws:drop"], + }, + name: "example", + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A friendly description of the firewall policy. - * `encryptionConfiguration` - (Optional) KMS encryption configuration settings. See [Encryption Configuration](#encryption-configuration) below for details. - * `firewallPolicy` - (Required) A configuration block describing the rule groups and policy actions to use in the firewall policy. See [Firewall Policy](#firewall-policy) below for details. - * `name` - (Required, Forces new resource) A friendly name of the firewall policy. - * `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Encryption Configuration @@ -201,7 +304,7 @@ The `statefulEngineOptions` block supports the following argument: ~> **NOTE:** If the `STRICT_ORDER` rule order is specified, this firewall policy can only reference stateful rule groups that utilize `STRICT_ORDER`. -* `flow_timeouts` - (Optional) Amount of time that can pass without any traffic sent through the firewall before the firewall determines that the connection is idle. +* `flowTimeouts` - (Optional) Amount of time that can pass without any traffic sent through the firewall before the firewall determines that the connection is idle. * `ruleOrder` - Indicates how to manage the order of stateful rule evaluation for the policy. Default value: `DEFAULT_ACTION_ORDER`. Valid values: `DEFAULT_ACTION_ORDER`, `STRICT_ORDER`. @@ -209,7 +312,7 @@ The `statefulEngineOptions` block supports the following argument: ### Flow Timeouts -The `flow_timeouts` block supports the following argument: +The `flowTimeouts` block supports the following argument: * `tcpIdleTimeoutSeconds` - Number of seconds that can pass without any TCP traffic sent through the firewall before the firewall determines that the connection is idle. After the idle timeout passes, data packets are dropped, however, the next TCP SYN packet is considered a new flow and is processed by the firewall. Clients or targets can use TCP keepalive packets to reset the idle timeout. Default value: `350`. @@ -217,6 +320,9 @@ The `flow_timeouts` block supports the following argument: The `statefulRuleGroupReference` block supports the following arguments: +* `deepThreatInspection` - (Optional) Whether to enable deep threat inspection, which allows AWS to analyze service logs of network traffic processed by these rule groups to identify threat indicators across customers. AWS will use these threat indicators to improve the active threat defense managed rule groups and protect the security of AWS customers and services. This only applies to active threat defense maanaged rule groups. + + For details, refer to [AWS active threat defense for AWS Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/aws-managed-rule-groups-atd.html) in the AWS Network Firewall Developer Guide. * `priority` - (Optional) An integer setting that indicates the order in which to apply the stateful rule groups in a single policy. This argument must be specified if the policy has a `statefulEngineOptions` block with a `ruleOrder` value of `STRICT_ORDER`. AWS Network Firewall applies each stateful rule group to a packet starting with the group that has the lowest priority setting. * `resourceArn` - (Required) The Amazon Resource Name (ARN) of the stateful rule group. @@ -305,4 +411,4 @@ Using `terraform import`, import Network Firewall Policies using their `arn`. Fo % terraform import aws_networkfirewall_firewall_policy.example arn:aws:network-firewall:us-west-1:123456789012:firewall-policy/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown new file mode 100644 index 000000000000..42e0efefcc83 --- /dev/null +++ b/website/docs/cdktf/typescript/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown @@ -0,0 +1,107 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_firewall_transit_gateway_attachment_accepter" +description: |- + Manages an AWS Network Firewall Firewall Transit Gateway Attachment Accepter. +--- + + + +# Resource: aws_networkfirewall_firewall_transit_gateway_attachment_accepter + +Manages an AWS Network Firewall Firewall Transit Gateway Attachment Accepter. + +When a cross-account (requester's AWS account differs from the accepter's AWS account) requester creates a Network Firewall with Transit Gateway ID using `aws_networkfirewall_firewall`. Then an EC2 Transit Gateway VPC Attachment resource is automatically created in the accepter's account. +The accepter can use the `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resource to "adopt" its side of the connection into management. + +~> **NOTE:** If the `transitGatewayId` argument in the `aws_networkfirewall_firewall` resource is used to attach a firewall to a transit gateway in a cross-account setup (where **Auto accept shared attachments** is disabled), the resource will be considered created when the transit gateway attachment is in the *Pending Acceptance* state and the firewall is in the *Provisioning* status. At this point, you can use the `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resource to finalize the network firewall deployment. Once the transit gateway attachment reaches the *Available* state, the firewall status *Ready*. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallFirewallTransitGatewayAttachmentAccepter } from "./.gen/providers/aws/networkfirewall-firewall-transit-gateway-attachment-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkfirewallFirewallTransitGatewayAttachmentAccepter( + this, + "example", + { + transitGatewayAttachmentId: Token.asString( + Fn.lookupNested(awsNetworkfirewallFirewallExample.firewallStatus, [ + "0", + "transit_gateway_attachment_sync_state", + "0", + "attachment_id", + ]) + ), + } + ); + } +} + +``` + +A full example of how to create a Transit Gateway in one AWS account, share it with a second AWS account, and create Network Firewall in the second account to the Transit Gateway via the `aws_networkfirewall_firewall` and `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resources can be found in [the `./examples/network-firewall-cross-account-transit-gateway` directory within the Github Repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/network-firewall-cross-account-transit-gateway) + +## Argument Reference + +This resource supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `transitGatewayAttachmentId` - (Required) The unique identifier of the transit gateway attachment to accept. This ID is returned in the response when creating a transit gateway-attached firewall. + +## Attribute Reference + +This resource exports no additional attributes. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall Firewall Transit Gateway Attachment Accepter using the `transitGatewayAttachmentId`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallFirewallTransitGatewayAttachmentAccepter } from "./.gen/providers/aws/networkfirewall-firewall-transit-gateway-attachment-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + NetworkfirewallFirewallTransitGatewayAttachmentAccepter.generateConfigForImport( + this, + "example", + "tgw-attach-0c3b7e9570eee089c" + ); + } +} + +``` + +Using `terraform import`, import Network Firewall Firewall Transit Gateway Attachment Accepter using the `transitGatewayAttachmentId`. For example: + +```console +% terraform import aws_networkfirewall_firewall_transit_gateway_attachment_accepter.example tgw-attach-0c3b7e9570eee089c +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_logging_configuration.html.markdown index 7c693459d44a..b867d467a49a 100644 --- a/website/docs/cdktf/typescript/r/networkfirewall_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/networkfirewall_logging_configuration.html.markdown @@ -120,8 +120,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewallArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Network Firewall firewall. - * `loggingConfiguration` - (Required) A configuration block describing how AWS Network Firewall performs logging for a firewall. See [Logging Configuration](#logging-configuration) below for details. ### Logging Configuration @@ -181,4 +181,4 @@ Using `terraform import`, import Network Firewall Logging Configurations using t % terraform import aws_networkfirewall_logging_configuration.example arn:aws:network-firewall:us-west-1:123456789012:firewall/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_resource_policy.html.markdown index 290a3c58e21a..db44cc6f0a7a 100644 --- a/website/docs/cdktf/typescript/r/networkfirewall_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/networkfirewall_resource_policy.html.markdown @@ -101,8 +101,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) JSON formatted policy document that controls access to the Network Firewall resource. The policy must be provided **without whitespaces**. We recommend using [jsonencode](https://www.terraform.io/docs/configuration/functions/jsonencode.html) for formatting as seen in the examples above. For more details, including available policy statement Actions, see the [Policy](https://docs.aws.amazon.com/network-firewall/latest/APIReference/API_PutResourcePolicy.html#API_PutResourcePolicy_RequestSyntax) parameter in the AWS API documentation. - * `resourceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the rule group or firewall policy. ## Attribute Reference @@ -143,4 +143,4 @@ Using `terraform import`, import Network Firewall Resource Policies using the `r % terraform import aws_networkfirewall_resource_policy.example aws_networkfirewall_rule_group.example arn:aws:network-firewall:us-west-1:123456789012:stateful-rulegroup/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_rule_group.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_rule_group.html.markdown index 8fdf40b8359b..162b027e8194 100644 --- a/website/docs/cdktf/typescript/r/networkfirewall_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/r/networkfirewall_rule_group.html.markdown @@ -443,20 +443,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity` - (Required, Forces new resource) The maximum number of operating resources that this rule group can use. For a stateless rule group, the capacity required is the sum of the capacity requirements of the individual rules. For a stateful rule group, the minimum capacity required is the number of individual rules. - * `description` - (Optional) A friendly description of the rule group. - * `encryptionConfiguration` - (Optional) KMS encryption configuration settings. See [Encryption Configuration](#encryption-configuration) below for details. - * `name` - (Required, Forces new resource) A friendly name of the rule group. - * `ruleGroup` - (Optional) A configuration block that defines the rule group rules. Required unless `rules` is specified. See [Rule Group](#rule-group) below for details. - * `rules` - (Optional) The stateful rule group rules specifications in Suricata file format, with one rule per line. Use this to import your existing Suricata compatible rule groups. Required unless `ruleGroup` is specified. - * `tags` - (Optional) A map of key:value pairs to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. - * `type` - (Required) Whether the rule group is stateless (containing stateless rules) or stateful (containing stateful rules). Valid values include: `STATEFUL` or `STATELESS`. ### Encryption Configuration @@ -664,7 +658,7 @@ The `dimension` block supports the following argument: The `destination` block supports the following argument: -* `addressDefinition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. +* `addressDefinition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4 and IPv6. ### Destination Port @@ -678,7 +672,7 @@ The `destinationPort` block supports the following arguments: The `source` block supports the following argument: -* `addressDefinition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. +* `addressDefinition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4 and IPv6. ### Source Port @@ -742,4 +736,4 @@ Using `terraform import`, import Network Firewall Rule Groups using their `arn`. % terraform import aws_networkfirewall_rule_group.example arn:aws:network-firewall:us-west-1:123456789012:stateful-rulegroup/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown index ee63d75975a9..ed473e8c9f05 100644 --- a/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown @@ -401,6 +401,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the TLS inspection configuration. * `encryptionConfiguration` - (Optional) Encryption configuration block. Detailed below. @@ -512,6 +513,27 @@ The `certificates` block exports the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_networkfirewall_tls_inspection_configuration.example + identity = { + "arn" = "arn:aws:network-firewall:us-west-2:123456789012:tls-configuration/example" + } +} + +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Network Firewall TLS inspection configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall TLS Inspection Configuration using the `arn`. For example: ```typescript @@ -542,4 +564,4 @@ Using `terraform import`, import Network Firewall TLS Inspection Configuration u % terraform import aws_networkfirewall_tls_inspection_configuration.example arn:aws:network-firewall::::tls-configuration/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_vpc_endpoint_association.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_vpc_endpoint_association.html.markdown new file mode 100644 index 000000000000..1daf95a944a7 --- /dev/null +++ b/website/docs/cdktf/typescript/r/networkfirewall_vpc_endpoint_association.html.markdown @@ -0,0 +1,124 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_vpc_endpoint_association" +description: |- + Manages a firewall endpoint for an AWS Network Firewall firewall. +--- + + + +# Resource: aws_networkfirewall_vpc_endpoint_association + +Manages a firewall endpoint for an AWS Network Firewall firewall. + +Use `aws_networkfirewall_vpc_endpoint_association` to establish new firewall endpoints in any Availability Zone where the firewall is already being used. The first use of a firewall in an Availability Zone must be defined by `aws_networkfirewall_firewall` resource and `subnetMapping` argument. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallVpcEndpointAssociation } from "./.gen/providers/aws/networkfirewall-vpc-endpoint-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkfirewallVpcEndpointAssociation(this, "example", { + firewallArn: Token.asString(awsNetworkfirewallFirewallExample.arn), + subnetMapping: [ + { + subnetId: Token.asString(awsSubnetExample.id), + }, + { + subnetId: exampleTwo.id, + }, + ], + tags: { + Name: "example endpoint", + }, + vpcId: Token.asString(awsVpcExample.id), + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `description` (Optional) - A description of the VPC endpoint association. +* `firewallArn` (Required) - The Amazon Resource Name (ARN) that identifies the firewall. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `subnetMapping` (Required) - The ID for a subnet that's used in an association with a firewall. See [Subnet Mapping](#subnet-mapping) below for details. +* `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `vpcId` (Required) - The unique identifier of the VPC for the endpoint association. + +### Subnet Mapping + +The `subnetMapping` block supports the following arguments: + +* `ipAddressType` - (Optional) The subnet's IP address type. Valid values: `"DUALSTACK"`, `"IPV4"`. +* `subnetId` - (Required) The unique identifier for the subnet. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `vpcEndpointAssociationArn` - ARN of the VPC Endpoint Association. +* `vpcEndpointAssociationId` - The unique identifier of the VPC endpoint association. +* `vpcEndpointAssociationStatus` - Nested list of information about the current status of the VPC Endpoint Association. + * `association_sync_states` - Set of subnets configured for use by the VPC Endpoint Association. + * `attachment` - Nested list describing the attachment status of the firewall's VPC Endpoint Association with a single VPC subnet. + * `endpointId` - The identifier of the VPC endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + * `subnetId` - The unique identifier of the subnet that you've specified to be used for a VPC Endpoint Association endpoint. + * `availabilityZone` - The Availability Zone where the subnet is configured. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall VPC Endpoint Association using the `vpcEndpointAssociationArn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallVpcEndpointAssociation } from "./.gen/providers/aws/networkfirewall-vpc-endpoint-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + NetworkfirewallVpcEndpointAssociation.generateConfigForImport( + this, + "example", + "arn:aws:network-firewall:us-west-1:123456789012:vpc-endpoint-association/example" + ); + } +} + +``` + +Using `terraform import`, import Network Firewall VPC Endpoint Association using the `vpcEndpointAssociationArn`. For example: + +```console +% terraform import aws_networkfirewall_vpc_endpoint_association.example arn:aws:network-firewall:us-west-1:123456789012:vpc-endpoint-association/example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_attachment_accepter.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_attachment_accepter.html.markdown index 4fe219fd240d..5241d350fa13 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_attachment_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_attachment_accepter.html.markdown @@ -3,23 +3,50 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_attachment_accepter" description: |- - Terraform resource for managing an AWS Network Manager Attachment Accepter. + Manages an AWS Network Manager Attachment Accepter. --- # Resource: aws_networkmanager_attachment_accepter -Terraform resource for managing an AWS Network Manager Attachment Accepter. +Manages an AWS Network Manager Attachment Accepter. + +Use this resource to accept cross-account attachments in AWS Network Manager. When an attachment is created in one account and needs to be accepted by another account that owns the core network, this resource handles the acceptance process. ## Example Usage -### Example with VPC attachment +### VPC Attachment + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmanagerAttachmentAccepter } from "./.gen/providers/aws/networkmanager-attachment-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkmanagerAttachmentAccepter(this, "example", { + attachmentId: Token.asString(awsNetworkmanagerVpcAttachmentExample.id), + attachmentType: Token.asString( + awsNetworkmanagerVpcAttachmentExample.attachmentType + ), + }); + } +} + +``` + +### Site-to-Site VPN Attachment ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -28,21 +55,25 @@ import { NetworkmanagerAttachmentAccepter } from "./.gen/providers/aws/networkma class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new NetworkmanagerAttachmentAccepter(this, "test", { - attachmentId: vpc.id, - attachmentType: vpc.attachmentType, + new NetworkmanagerAttachmentAccepter(this, "example", { + attachmentId: Token.asString( + awsNetworkmanagerSiteToSiteVpnAttachmentExample.id + ), + attachmentType: Token.asString( + awsNetworkmanagerSiteToSiteVpnAttachmentExample.attachmentType + ), }); } } ``` -### Example with site-to-site VPN attachment +### Connect Attachment ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. @@ -51,9 +82,67 @@ import { NetworkmanagerAttachmentAccepter } from "./.gen/providers/aws/networkma class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new NetworkmanagerAttachmentAccepter(this, "test", { - attachmentId: vpn.id, - attachmentType: vpn.attachmentType, + new NetworkmanagerAttachmentAccepter(this, "example", { + attachmentId: Token.asString( + awsNetworkmanagerConnectAttachmentExample.id + ), + attachmentType: Token.asString( + awsNetworkmanagerConnectAttachmentExample.attachmentType + ), + }); + } +} + +``` + +### Transit Gateway Route Table Attachment + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmanagerAttachmentAccepter } from "./.gen/providers/aws/networkmanager-attachment-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkmanagerAttachmentAccepter(this, "example", { + attachmentId: Token.asString( + awsNetworkmanagerTransitGatewayRouteTableAttachmentExample.id + ), + attachmentType: Token.asString( + awsNetworkmanagerTransitGatewayRouteTableAttachmentExample.attachmentType + ), + }); + } +} + +``` + +### Direct Connect Gateway Attachment + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmanagerAttachmentAccepter } from "./.gen/providers/aws/networkmanager-attachment-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkmanagerAttachmentAccepter(this, "example", { + attachmentId: Token.asString( + awsNetworkmanagerDxGatewayAttachmentExample.id + ), + attachmentType: Token.asString( + awsNetworkmanagerDxGatewayAttachmentExample.attachmentType + ), }); } } @@ -64,21 +153,27 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -- `attachmentId` - (Required) The ID of the attachment. -- `attachmentType` - (Required) The type of attachment. Valid values can be found in the [AWS Documentation](https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_ListAttachments.html#API_ListAttachments_RequestSyntax) +* `attachmentId` - (Required) ID of the attachment. +* `attachmentType` - (Required) Type of attachment. Valid values: `CONNECT`, `DIRECT_CONNECT_GATEWAY`, `SITE_TO_SITE_VPN`, `TRANSIT_GATEWAY_ROUTE_TABLE`, `VPC`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `attachmentPolicyRuleNumber` - The policy rule number associated with the attachment. -- `coreNetworkArn` - The ARN of a core network. -- `coreNetworkId` - The id of a core network. -- `edgeLocation` - The Region where the edge is located. This is returned for all attachment types except a Direct Connect gateway attachment, which instead returns `edgeLocations`. -- `edgeLocations` - The edge locations that the Direct Connect gateway is associated with. This is returned only for Direct Connect gateway attachments. All other attachment types return `edgeLocation` -- `ownerAccountId` - The ID of the attachment account owner. -- `resourceArn` - The attachment resource ARN. -- `segmentName` - The name of the segment attachment. -- `state` - The state of the attachment. - - \ No newline at end of file +* `attachmentPolicyRuleNumber` - Policy rule number associated with the attachment. +* `coreNetworkArn` - ARN of the core network. +* `coreNetworkId` - ID of the core network. +* `edgeLocation` - Region where the edge is located. This is returned for all attachment types except Direct Connect gateway attachments, which instead return `edgeLocations`. +* `edgeLocations` - Edge locations that the Direct Connect gateway is associated with. This is returned only for Direct Connect gateway attachments. All other attachment types return `edgeLocation`. +* `ownerAccountId` - ID of the attachment account owner. +* `resourceArn` - Attachment resource ARN. +* `segmentName` - Name of the segment attachment. +* `state` - State of the attachment. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_connect_attachment.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_connect_attachment.html.markdown index ba8914a289d3..47fcf16eccaf 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_connect_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_connect_attachment.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connect_attachment" description: |- - Terraform resource for managing an AWS Network Manager ConnectAttachment. + Manages an AWS Network Manager Connect Attachment. --- # Resource: aws_networkmanager_connect_attachment -Terraform resource for managing an AWS Network Manager ConnectAttachment. +Manages an AWS Network Manager Connect Attachment. + +Use this resource to create a Connect attachment in AWS Network Manager. Connect attachments enable you to connect your on-premises networks to your core network through a VPC or Transit Gateway attachment. ## Example Usage @@ -81,7 +83,7 @@ class MyConvertedCode extends TerraformStack { const awsNetworkmanagerConnectAttachmentExample = new NetworkmanagerConnectAttachment(this, "example_2", { coreNetworkId: Token.asString(awsccNetworkmanagerCoreNetworkExample.id), - dependsOn: ["aws_networkmanager_attachment_accepter.test"], + dependsOn: [awsNetworkmanagerAttachmentAccepterExample], edgeLocation: example.edgeLocation, options: { protocol: "GRE", @@ -107,35 +109,40 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -- `coreNetworkId` - (Required) The ID of a core network where you want to create the attachment. -- `transportAttachmentId` - (Required) The ID of the attachment between the two connections. -- `edgeLocation` - (Required) The Region where the edge is located. -- `options` - (Required) Options block. See [options](#options) for more information. +* `coreNetworkId` - (Required) ID of a core network where you want to create the attachment. +* `edgeLocation` - (Required) Region where the edge is located. +* `options` - (Required) Options block. See [options](#options) for more information. +* `transportAttachmentId` - (Required) ID of the attachment between the two connections. The following arguments are optional: -- `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### options -* `protocol` - (Required) The protocol used for the attachment connection. Possible values are `GRE` and `NO_ENCAP`. +* `protocol` - (Optional) Protocol used for the attachment connection. Valid values: `GRE`, `NO_ENCAP`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `arn` - The ARN of the attachment. -- `attachmentPolicyRuleNumber` - The policy rule number associated with the attachment. -- `attachmentType` - The type of attachment. -- `coreNetworkArn` - The ARN of a core network. -- `coreNetworkId` - The ID of a core network -- `edgeLocation` - The Region where the edge is located. -- `id` - The ID of the attachment. -- `ownerAccountId` - The ID of the attachment account owner. -- `resourceArn` - The attachment resource ARN. -- `segmentName` - The name of the segment attachment. -- `state` - The state of the attachment. -- `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the attachment. +* `attachmentId` - ID of the attachment. +* `attachmentPolicyRuleNumber` - Policy rule number associated with the attachment. +* `attachmentType` - Type of attachment. +* `coreNetworkArn` - ARN of a core network. +* `ownerAccountId` - ID of the attachment account owner. +* `resourceArn` - Attachment resource ARN. +* `segmentName` - Name of the segment attachment. +* `state` - State of the attachment. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) ## Import @@ -169,4 +176,4 @@ Using `terraform import`, import `aws_networkmanager_connect_attachment` using t % terraform import aws_networkmanager_connect_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_connect_peer.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_connect_peer.html.markdown index 93ebe01fc9b6..13b46720bad6 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_connect_peer.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_connect_peer.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connect_peer" description: |- - Terraform resource for managing an AWS Network Manager Connect Peer. + Manages an AWS Network Manager Connect Peer. --- # Resource: aws_networkmanager_connect_peer -Terraform resource for managing an AWS Network Manager Connect Peer. +Manages an AWS Network Manager Connect Peer. + +Use this resource to create a Connect peer in AWS Network Manager. Connect peers establish BGP sessions with your on-premises networks through Connect attachments, enabling dynamic routing between your core network and external networks. ## Example Usage @@ -99,7 +101,7 @@ class MyConvertedCode extends TerraformStack { const awsNetworkmanagerConnectAttachmentExample = new NetworkmanagerConnectAttachment(this, "example_2", { coreNetworkId: Token.asString(awsccNetworkmanagerCoreNetworkExample.id), - dependsOn: ["aws_networkmanager_attachment_accepter.test"], + dependsOn: [awsNetworkmanagerAttachmentAccepterExample], edgeLocation: example.edgeLocation, options: { protocol: "GRE", @@ -108,9 +110,17 @@ class MyConvertedCode extends TerraformStack { }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsNetworkmanagerConnectAttachmentExample.overrideLogicalId("example"); + const example2 = new NetworkmanagerAttachmentAccepter(this, "example2", { + attachmentId: Token.asString( + awsNetworkmanagerConnectAttachmentExample.id + ), + attachmentType: Token.asString( + awsNetworkmanagerConnectAttachmentExample.attachmentType + ), + }); const awsNetworkmanagerConnectPeerExample = new NetworkmanagerConnectPeer( this, - "example_3", + "example_4", { bgpOptions: { peerAsn: 65500, @@ -118,21 +128,13 @@ class MyConvertedCode extends TerraformStack { connectAttachmentId: Token.asString( awsNetworkmanagerConnectAttachmentExample.id ), - dependsOn: ["aws_networkmanager_attachment_accepter.example2"], + dependsOn: [example2], insideCidrBlocks: ["172.16.0.0/16"], peerAddress: "127.0.0.1", } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsNetworkmanagerConnectPeerExample.overrideLogicalId("example"); - new NetworkmanagerAttachmentAccepter(this, "example2", { - attachmentId: Token.asString( - awsNetworkmanagerConnectAttachmentExample.id - ), - attachmentType: Token.asString( - awsNetworkmanagerConnectAttachmentExample.attachmentType - ), - }); } } @@ -181,7 +183,7 @@ class MyConvertedCode extends TerraformStack { awsNetworkmanagerConnectAttachmentExample.id ), peerAddress: "127.0.0.1", - subnetArn: test2.arn, + subnetArn: example2.arn, } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -195,28 +197,40 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -- `connectAttachmentId` - (Required) The ID of the connection attachment. -- `peerAddress` - (Required) The Connect peer address. +* `connectAttachmentId` - (Required) ID of the connection attachment. +* `peerAddress` - (Required) Connect peer address. The following arguments are optional: -- `bgpOptions` (Optional) The Connect peer BGP options. -- `coreNetworkAddress` (Optional) A Connect peer core network address. -- `insideCidrBlocks` - (Optional) The inside IP addresses used for BGP peering. Required when the Connect attachment protocol is `GRE`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. -- `subnetArn` - (Optional) The subnet ARN for the Connect peer. Required when the Connect attachment protocol is `NO_ENCAP`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. -- `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `bgpOptions` - (Optional) Connect peer BGP options. See [bgp_options](#bgp_options) for more information. +* `coreNetworkAddress` - (Optional) Connect peer core network address. +* `insideCidrBlocks` - (Optional) Inside IP addresses used for BGP peering. Required when the Connect attachment protocol is `GRE`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. +* `subnetArn` - (Optional) Subnet ARN for the Connect peer. Required when the Connect attachment protocol is `NO_ENCAP`. See [`aws_networkmanager_connect_attachment`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/networkmanager_connect_attachment) for details. +* `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### bgp_options + +* `peerAsn` - (Optional) Peer ASN. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `arn` - The ARN of the attachment. -- `configuration` - The configuration of the Connect peer. -- `coreNetworkId` - The ID of a core network. -- `edgeLocation` - The Region where the peer is located. -- `id` - The ID of the Connect peer. -- `state` - The state of the Connect peer. -- `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the Connect peer. +* `configuration` - Configuration of the Connect peer. +* `connectPeerId` - ID of the Connect peer. +* `coreNetworkId` - ID of a core network. +* `createdAt` - Timestamp when the Connect peer was created. +* `edgeLocation` - Region where the peer is located. +* `state` - State of the Connect peer. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `15m`) ## Import @@ -250,4 +264,4 @@ Using `terraform import`, import `aws_networkmanager_connect_peer` using the con % terraform import aws_networkmanager_connect_peer.example connect-peer-061f3e96275db1acc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_connection.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_connection.html.markdown index baeda9d40ae4..29de416e9e04 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_connection.html.markdown @@ -3,15 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_connection" description: |- - Creates a connection between two devices. + Manages a Network Manager Connection. --- # Resource: aws_networkmanager_connection -Creates a connection between two devices. -The devices can be a physical or virtual appliance that connects to a third-party appliance in a VPC, or a physical appliance that connects to another physical appliance in an on-premises network. +Manages a Network Manager Connection. + +Use this resource to create a connection between two devices in your global network. ## Example Usage @@ -39,22 +40,33 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `connectedDeviceId` - (Required) ID of the second device in the connection. +* `deviceId` - (Required) ID of the first device in the connection. +* `globalNetworkId` - (Required) ID of the global network. + +The following arguments are optional: -* `connectedDeviceId` - (Required) The ID of the second device in the connection. -* `connectedLinkId` - (Optional) The ID of the link for the second device. -* `description` - (Optional) A description of the connection. -* `deviceId` - (Required) The ID of the first device in the connection. -* `globalNetworkId` - (Required) The ID of the global network. -* `linkId` - (Optional) The ID of the link for the first device. +* `connectedLinkId` - (Optional) ID of the link for the second device. +* `description` - (Optional) Description of the connection. +* `linkId` - (Optional) ID of the link for the first device. * `tags` - (Optional) Key-value tags for the connection. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the connection. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the connection. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -88,4 +100,4 @@ Using `terraform import`, import `aws_networkmanager_connection` using the conne % terraform import aws_networkmanager_connection.example arn:aws:networkmanager::123456789012:device/global-network-0d47f6t230mz46dy4/connection-07f6fd08867abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_core_network.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_core_network.html.markdown index eac2b17ee894..22effd7ea872 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_core_network.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_core_network.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_core_network" description: |- - Provides a core network resource. + Manages a Network Manager Core Network. --- # Resource: aws_networkmanager_core_network -Provides a core network resource. +Manages a Network Manager Core Network. + +Use this resource to create and manage a core network within a global network. ## Example Usage @@ -550,13 +552,15 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `description` - (Optional) Description of the Core Network. -* `basePolicyDocument` - (Optional, conflicts with `basePolicyRegion`, `basePolicyRegions`) Sets the base policy document for the core network. Refer to the [Core network policies documentation](https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-change-sets.html) for more information. -* `basePolicyRegion` - (Optional, **Deprecated** use the `basePolicyRegions` or `basePolicyDocument` argument instead) The base policy created by setting the `createBasePolicy` argument to `true` requires a region to be set in the `edge-locations`, `location` key. If `basePolicyRegion` is not specified, the region used in the base policy defaults to the region specified in the `provider` block. -* `basePolicyRegions` - (Optional, conflicts with `basePolicyRegion`, `basePolicyDocument`) A list of regions to add to the base policy. The base policy created by setting the `createBasePolicy` argument to `true` requires one or more regions to be set in the `edge-locations`, `location` key. If `basePolicyRegions` is not specified, the region used in the base policy defaults to the region specified in the `provider` block. -* `createBasePolicy` - (Optional) Specifies whether to create a base policy when a core network is created or updated. A base policy is created and set to `LIVE` to allow attachments to the core network (e.g. VPC Attachments) before applying a policy document provided using the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). This base policy is needed if your core network does not have any `LIVE` policies and your policy document has static routes pointing to VPC attachments and you want to attach your VPCs to the core network before applying the desired policy document. Valid values are `true` or `false`. An example of this Terraform snippet can be found above [for VPC Attachment in a single region](#with-vpc-attachment-single-region) and [for VPC Attachment multi-region](#with-vpc-attachment-multi-region). An example base policy is shown below. This base policy is overridden with the policy that you specify in the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). +* `globalNetworkId` - (Required) ID of the global network that a core network will be a part of. + +The following arguments are optional: + +* `basePolicyDocument` - (Optional, conflicts with `basePolicyRegions`) Sets the base policy document for the core network. Refer to the [Core network policies documentation](https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-change-sets.html) for more information. +* `basePolicyRegions` - (Optional, conflicts with `basePolicyDocument`) List of regions to add to the base policy. The base policy created by setting the `createBasePolicy` argument to `true` requires one or more regions to be set in the `edge-locations`, `location` key. If `basePolicyRegions` is not specified, the region used in the base policy defaults to the region specified in the `provider` block. +* `createBasePolicy` - (Optional) Whether to create a base policy when a core network is created or updated. A base policy is created and set to `LIVE` to allow attachments to the core network (e.g. VPC Attachments) before applying a policy document provided using the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). This base policy is needed if your core network does not have any `LIVE` policies and your policy document has static routes pointing to VPC attachments and you want to attach your VPCs to the core network before applying the desired policy document. Valid values are `true` or `false`. An example of this Terraform snippet can be found above [for VPC Attachment in a single region](#with-vpc-attachment-single-region) and [for VPC Attachment multi-region](#with-vpc-attachment-multi-region). An example base policy is shown below. This base policy is overridden with the policy that you specify in the [`aws_networkmanager_core_network_policy_attachment` resource](/docs/providers/aws/r/networkmanager_core_network_policy_attachment.html). ```json { @@ -583,28 +587,20 @@ This resource supports the following arguments: } ``` -* `globalNetworkId` - (Required) The ID of the global network that a core network will be a part of. +* `description` - (Optional) Description of the Core Network. * `tags` - (Optional) Key-value tags for the Core Network. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `30m`) -* `delete` - (Default `30m`) -* `update` - (Default `30m`) - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Core Network Amazon Resource Name (ARN). +* `arn` - Core Network ARN. * `createdAt` - Timestamp when a core network was created. * `edges` - One or more blocks detailing the edges within a core network. [Detailed below](#edges). * `id` - Core Network ID. * `segments` - One or more blocks detailing the segments within a core network. [Detailed below](#segments). * `state` - Current state of a core network. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ### `edges` @@ -622,6 +618,14 @@ The `segments` configuration block supports the following arguments: * `name` - Name of a core network segment. * `shared_segments` - Shared segments of a core network. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) +* `update` - (Default `30m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_core_network` using the core network ID. For example: @@ -654,4 +658,4 @@ Using `terraform import`, import `aws_networkmanager_core_network` using the cor % terraform import aws_networkmanager_core_network.example core-network-0d47f6t230mz46dy4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_core_network_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_core_network_policy_attachment.html.markdown index 8c3762590e60..daab57dda2d7 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_core_network_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_core_network_policy_attachment.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_core_network_policy_attachment" description: |- - Provides a Core Network Policy Attachment resource. + Manages a Network Manager Core Network Policy Attachment. --- # Resource: aws_networkmanager_core_network_policy_attachment -Provides a Core Network Policy Attachment resource. This puts a Core Network Policy to an existing Core Network and executes the change set, which deploys changes globally based on the policy submitted (Sets the policy to `LIVE`). +Manages a Network Manager Core Network Policy Attachment. + +Use this resource to attach a Core Network Policy to an existing Core Network and execute the change set, which deploys changes globally based on the policy submitted (sets the policy to `LIVE`). ~> **NOTE:** Deleting this resource will not delete the current policy defined in this resource. Deleting this resource will also not revert the current `LIVE` policy to the previous version. @@ -514,23 +516,23 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `coreNetworkId` - (Required) The ID of the core network that a policy will be attached to and made `LIVE`. +* `coreNetworkId` - (Required) ID of the core network that a policy will be attached to and made `LIVE`. * `policyDocument` - (Required) Policy document for creating a core network. Note that updating this argument will result in the new policy document version being set as the `LATEST` and `LIVE` policy document. Refer to the [Core network policies documentation](https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-change-sets.html) for more information. -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `update` - (Default `30m`). If this is the first time attaching a policy to a core network then this timeout value is also used as the `create` timeout value. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `state` - Current state of a core network. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `update` - (Default `30m`). If this is the first time attaching a policy to a core network then this timeout value is also used as the `create` timeout value. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_core_network_policy_attachment` using the core network ID. For example: @@ -563,4 +565,4 @@ Using `terraform import`, import `aws_networkmanager_core_network_policy_attachm % terraform import aws_networkmanager_core_network_policy_attachment.example core-network-0d47f6t230mz46dy4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_customer_gateway_association.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_customer_gateway_association.html.markdown index 1d34d927559e..e179f8c21983 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_customer_gateway_association.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_customer_gateway_association.html.markdown @@ -3,15 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_customer_gateway_association" description: |- - Associates a customer gateway with a device and optionally, with a link. + Manages a Network Manager Customer Gateway Association. --- # Resource: aws_networkmanager_customer_gateway_association -Associates a customer gateway with a device and optionally, with a link. -If you specify a link, it must be associated with the specified device. +Manages a Network Manager Customer Gateway Association. + +Use this resource to associate a customer gateway with a device and optionally, with a link. If you specify a link, it must be associated with the specified device. ## Example Usage @@ -115,17 +116,27 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `customerGatewayArn` - (Required) ARN of the customer gateway. +* `deviceId` - (Required) ID of the device. +* `globalNetworkId` - (Required) ID of the global network. + +The following arguments are optional: -* `customerGatewayArn` - (Required) The Amazon Resource Name (ARN) of the customer gateway. -* `deviceId` - (Required) The ID of the device. -* `globalNetworkId` - (Required) The ID of the global network. -* `linkId` - (Optional) The ID of the link. +* `linkId` - (Optional) ID of the link. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_customer_gateway_association` using the global network ID and customer gateway ARN. For example: @@ -158,4 +169,4 @@ Using `terraform import`, import `aws_networkmanager_customer_gateway_associatio % terraform import aws_networkmanager_customer_gateway_association.example global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:customer-gateway/cgw-123abc05e04123abc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_device.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_device.html.markdown index 63929fca89d5..c3c5c2470785 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_device.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_device.html.markdown @@ -3,15 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_device" description: |- - Creates a device in a global network. + Manages a Network Manager Device. --- # Resource: aws_networkmanager_device -Creates a device in a global network. If you specify both a site ID and a location, -the location of the site is used for visualization in the Network Manager console. +Manages a Network Manager Device. + +Use this resource to create a device in a global network. If you specify both a site ID and a location, the location of the site is used for visualization in the Network Manager console. ## Example Usage @@ -38,36 +39,47 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `globalNetworkId` - (Required) ID of the global network. + +The following arguments are optional: -* `awsLocation` - (Optional) The AWS location of the device. Documented below. -* `description` - (Optional) A description of the device. -* `globalNetworkId` - (Required) The ID of the global network. -* `location` - (Optional) The location of the device. Documented below. -* `model` - (Optional) The model of device. -* `serialNumber` - (Optional) The serial number of the device. -* `siteId` - (Optional) The ID of the site. +* `awsLocation` - (Optional) AWS location of the device. Documented below. +* `description` - (Optional) Description of the device. +* `location` - (Optional) Location of the device. Documented below. +* `model` - (Optional) Model of device. +* `serialNumber` - (Optional) Serial number of the device. +* `siteId` - (Optional) ID of the site. * `tags` - (Optional) Key-value tags for the device. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `type` - (Optional) The type of device. -* `vendor` - (Optional) The vendor of the device. +* `type` - (Optional) Type of device. +* `vendor` - (Optional) Vendor of the device. The `awsLocation` object supports the following: -* `subnetArn` - (Optional) The Amazon Resource Name (ARN) of the subnet that the device is located in. -* `zone` - (Optional) The Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. +* `subnetArn` - (Optional) ARN of the subnet that the device is located in. +* `zone` - (Optional) Zone that the device is located in. Specify the ID of an Availability Zone, Local Zone, Wavelength Zone, or an Outpost. The `location` object supports the following: -* `address` - (Optional) The physical address. -* `latitude` - (Optional) The latitude. -* `longitude` - (Optional) The longitude. +* `address` - (Optional) Physical address. +* `latitude` - (Optional) Latitude. +* `longitude` - (Optional) Longitude. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The Amazon Resource Name (ARN) of the device. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the device. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -101,4 +113,4 @@ Using `terraform import`, import `aws_networkmanager_device` using the device AR % terraform import aws_networkmanager_device.example arn:aws:networkmanager::123456789012:device/global-network-0d47f6t230mz46dy4/device-07f6fd08867abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_dx_gateway_attachment.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_dx_gateway_attachment.html.markdown index b54b7b296f2f..1ae168479dd0 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_dx_gateway_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_dx_gateway_attachment.html.markdown @@ -3,13 +3,15 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_dx_gateway_attachment" description: |- - Terraform resource for managing an AWS Network Manager Direct Connect Gateway Attachment. + Manages a Network Manager Direct Connect Gateway Attachment. --- # Resource: aws_networkmanager_dx_gateway_attachment -Terraform resource for managing an AWS Network Manager Direct Connect (DX) Gateway Attachment. +Manages a Network Manager Direct Connect Gateway Attachment. + +Use this resource to create and manage a Direct Connect Gateway attachment to a Cloud WAN core network. ## Example Usage @@ -37,7 +39,7 @@ class MyConvertedCode extends TerraformStack { "}:dx-gateway/${" + awsDxGatewayTest.id + "}", - edgeLocations: [Token.asString(dataAwsRegionCurrent.name)], + edgeLocations: [Token.asString(dataAwsRegionCurrent.region)], }); } } @@ -60,14 +62,15 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: +* `arn` - ARN of the attachment. * `attachmentPolicyRuleNumber` - Policy rule number associated with the attachment. * `attachmentType` - Type of attachment. * `coreNetworkArn` - ARN of the core network for the attachment. -* `id` - The ID of the attachment. +* `id` - ID of the attachment. * `ownerAccountId` - ID of the attachment account owner. * `segmentName` - Name of the segment attachment. * `state` - State of the attachment. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -109,4 +112,4 @@ Using `terraform import`, import Network Manager DX Gateway Attachment using the % terraform import aws_networkmanager_dx_gateway_attachment.example attachment-1a2b3c4d5e6f7g ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_global_network.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_global_network.html.markdown index 036088de455d..90e3b315ca37 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_global_network.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_global_network.html.markdown @@ -3,14 +3,16 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_global_network" description: |- - Provides a global network resource. + Manages a Network Manager Global Network. --- # Resource: aws_networkmanager_global_network -Provides a global network resource. +Manages a Network Manager Global Network. + +Use this resource to create and manage a global network, which is a single private network that acts as the high-level container for your network objects. ## Example Usage @@ -36,7 +38,7 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are optional: * `description` - (Optional) Description of the Global Network. * `tags` - (Optional) Key-value tags for the Global Network. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -45,8 +47,16 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `arn` - Global Network Amazon Resource Name (ARN) -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Global Network ARN. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -80,4 +90,4 @@ Using `terraform import`, import `aws_networkmanager_global_network` using the g % terraform import aws_networkmanager_global_network.example global-network-0d47f6t230mz46dy4 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_link.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_link.html.markdown index 03e93283265f..afa6f81742f4 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_link.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_link.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_link" description: |- - Creates a link for a site. + Manages a Network Manager link. --- # Resource: aws_networkmanager_link -Creates a link for a site. +Manages a Network Manager link. Use this resource to create a link for a site. ## Example Usage @@ -42,17 +42,20 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `bandwidth` - (Required) The upload speed and download speed in Mbps. Documented below. -* `description` - (Optional) A description of the link. -* `globalNetworkId` - (Required) The ID of the global network. -* `providerName` - (Optional) The provider of the link. -* `siteId` - (Required) The ID of the site. +* `bandwidth` - (Required) Upload speed and download speed in Mbps. [See below](#bandwidth). +* `globalNetworkId` - (Required) ID of the global network. +* `siteId` - (Required) ID of the site. + +The following arguments are optional: + +* `description` - (Optional) Description of the link. +* `providerName` - (Optional) Provider of the link. * `tags` - (Optional) Key-value tags for the link. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `type` - (Optional) The type of the link. +* `type` - (Optional) Type of the link. -The `bandwidth` object supports the following: +### bandwidth * `downloadSpeed` - (Optional) Download speed in Mbps. * `uploadSpeed` - (Optional) Upload speed in Mbps. @@ -61,8 +64,16 @@ The `bandwidth` object supports the following: This resource exports the following attributes in addition to the arguments above: -* `arn` - Link Amazon Resource Name (ARN). -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Link ARN. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -96,4 +107,4 @@ Using `terraform import`, import `aws_networkmanager_link` using the link ARN. F % terraform import aws_networkmanager_link.example arn:aws:networkmanager::123456789012:link/global-network-0d47f6t230mz46dy4/link-444555aaabbb11223 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_link_association.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_link_association.html.markdown index 702394dc2e7d..9cfe68a766c7 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_link_association.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_link_association.html.markdown @@ -3,16 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_link_association" description: |- - Associates a link to a device. + Manages a Network Manager link association. --- # Resource: aws_networkmanager_link_association -Associates a link to a device. -A device can be associated to multiple links and a link can be associated to multiple devices. -The device and link must be in the same global network and the same site. +Manages a Network Manager link association. Associates a link to a device. A device can be associated to multiple links and a link can be associated to multiple devices. The device and link must be in the same global network and the same site. ## Example Usage @@ -40,16 +38,23 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `deviceId` - (Required) The ID of the device. -* `globalNetworkId` - (Required) The ID of the global network. -* `linkId` - (Required) The ID of the link. +* `deviceId` - (Required) ID of the device. +* `globalNetworkId` - (Required) ID of the global network. +* `linkId` - (Required) ID of the link. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_link_association` using the global network ID, link ID and device ID. For example: @@ -82,4 +87,4 @@ Using `terraform import`, import `aws_networkmanager_link_association` using the % terraform import aws_networkmanager_link_association.example global-network-0d47f6t230mz46dy4,link-444555aaabbb11223,device-07f6fd08867abc123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_site.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_site.html.markdown index f1a780295fa9..6955c3c3c472 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_site.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_site.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_site" description: |- - Creates a site in a global network. + Manages a Network Manager site. --- # Resource: aws_networkmanager_site -Creates a site in a global network. +Manages a Network Manager site. Use this resource to create a site in a global network. ## Example Usage @@ -44,14 +44,17 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `globalNetworkId` - (Required) ID of the Global Network to create the site in. + +The following arguments are optional: -* `globalNetworkId` - (Required) The ID of the Global Network to create the site in. * `description` - (Optional) Description of the Site. -* `location` - (Optional) The site location as documented below. +* `location` - (Optional) Site location. [See below](#location). * `tags` - (Optional) Key-value tags for the Site. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -The `location` object supports the following: +### location * `address` - (Optional) Address of the location. * `latitude` - (Optional) Latitude of the location. @@ -61,8 +64,16 @@ The `location` object supports the following: This resource exports the following attributes in addition to the arguments above: -* `arn` - Site Amazon Resource Name (ARN) -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Site ARN. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -96,4 +107,4 @@ Using `terraform import`, import `aws_networkmanager_site` using the site ARN. F % terraform import aws_networkmanager_site.example arn:aws:networkmanager::123456789012:site/global-network-0d47f6t230mz46dy4/site-444555aaabbb11223 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_site_to_site_vpn_attachment.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_site_to_site_vpn_attachment.html.markdown index ac2227d555fe..42af97f5dcd3 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_site_to_site_vpn_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_site_to_site_vpn_attachment.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_site_to_site_vpn_attachment" description: |- - Terraform resource for managing an AWS Network Manager SiteToSiteAttachment. + Manages a Network Manager site-to-site VPN attachment. --- # Resource: aws_networkmanager_site_to_site_vpn_attachment -Terraform resource for managing an AWS Network Manager SiteToSiteAttachment. +Manages a Network Manager site-to-site VPN attachment. ## Example Usage @@ -110,7 +110,7 @@ class MyConvertedCode extends TerraformStack { edgeLocations: [ { asn: Token.asString(64512), - location: Token.asString(current.name), + location: Token.asString(current.region), }, ], vpnEcmpSupport: false, @@ -182,29 +182,36 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -- `coreNetworkId` - (Required) The ID of a core network for the VPN attachment. -- `vpnConnectionArn` - (Required) The ARN of the site-to-site VPN connection. +* `coreNetworkId` - (Required) ID of a core network for the VPN attachment. +* `vpnConnectionArn` - (Required) ARN of the site-to-site VPN connection. The following arguments are optional: -- `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -- `arn` - The ARN of the attachment. -- `attachmentPolicyRuleNumber` - The policy rule number associated with the attachment. -- `attachmentType` - The type of attachment. -- `coreNetworkArn` - The ARN of a core network. -- `coreNetworkId` - The ID of a core network -- `edgeLocation` - The Region where the edge is located. -- `id` - The ID of the attachment. -- `ownerAccountId` - The ID of the attachment account owner. -- `resourceArn` - The attachment resource ARN. -- `segmentName` - The name of the segment attachment. -- `state` - The state of the attachment. -- `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the attachment. +* `attachmentPolicyRuleNumber` - Policy rule number associated with the attachment. +* `attachmentType` - Type of attachment. +* `coreNetworkArn` - ARN of a core network. +* `edgeLocation` - Region where the edge is located. +* `id` - ID of the attachment. +* `ownerAccountId` - ID of the attachment account owner. +* `resourceArn` - Attachment resource ARN. +* `segmentName` - Name of the segment attachment. +* `state` - State of the attachment. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -238,4 +245,4 @@ Using `terraform import`, import `aws_networkmanager_site_to_site_vpn_attachment % terraform import aws_networkmanager_site_to_site_vpn_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_connect_peer_association.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_connect_peer_association.html.markdown index e8c0a11517e5..c87b9091911a 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_connect_peer_association.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_connect_peer_association.html.markdown @@ -3,15 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_connect_peer_association" description: |- - Associates a transit gateway Connect peer with a device, and optionally, with a link. + Manages a Network Manager transit gateway Connect peer association. --- # Resource: aws_networkmanager_transit_gateway_connect_peer_association -Associates a transit gateway Connect peer with a device, and optionally, with a link. -If you specify a link, it must be associated with the specified device. +Manages a Network Manager transit gateway Connect peer association. Associates a transit gateway Connect peer with a device, and optionally, with a link. If you specify a link, it must be associated with the specified device. ## Example Usage @@ -41,20 +40,30 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `deviceId` - (Required) The ID of the device. -* `globalNetworkId` - (Required) The ID of the global network. -* `linkId` - (Optional) The ID of the link. -* `transitGatewayConnectPeerArn` - (Required) The Amazon Resource Name (ARN) of the Connect peer. +* `deviceId` - (Required) ID of the device. +* `globalNetworkId` - (Required) ID of the global network. +* `transitGatewayConnectPeerArn` - (Required) ARN of the Connect peer. + +The following arguments are optional: + +* `linkId` - (Optional) ID of the link. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and customer gateway ARN. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and Connect peer ARN. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -78,10 +87,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and customer gateway ARN. For example: +Using `terraform import`, import `aws_networkmanager_transit_gateway_connect_peer_association` using the global network ID and Connect peer ARN. For example: ```console % terraform import aws_networkmanager_transit_gateway_connect_peer_association.example global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-connect-peer-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_peering.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_peering.html.markdown index 541a9f6fce01..b59e967f898f 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_peering.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_peering.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_peering" description: |- - Creates a peering connection between an AWS Cloud WAN core network and an AWS Transit Gateway. + Manages a Network Manager transit gateway peering connection. --- # Resource: aws_networkmanager_transit_gateway_peering -Creates a peering connection between an AWS Cloud WAN core network and an AWS Transit Gateway. +Manages a Network Manager transit gateway peering connection. Creates a peering connection between an AWS Cloud WAN core network and an AWS Transit Gateway. ## Example Usage @@ -28,6 +28,10 @@ class MyConvertedCode extends TerraformStack { super(scope, name); new NetworkmanagerTransitGatewayPeering(this, "example", { coreNetworkId: Token.asString(awsccNetworkmanagerCoreNetworkExample.id), + dependsOn: [ + awsEc2TransitGatewayPolicyTableExample, + awsNetworkmanagerCoreNetworkPolicyAttachmentExample, + ], transitGatewayArn: Token.asString(awsEc2TransitGatewayExample.arn), }); } @@ -37,25 +41,35 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `coreNetworkId` - (Required) ID of a core network. +* `transitGatewayArn` - (Required) ARN of the transit gateway for the peering request. + +The following arguments are optional: -* `coreNetworkId` - (Required) The ID of a core network. * `tags` - (Optional) Key-value tags for the peering. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `transitGatewayArn` - (Required) The ARN of the transit gateway for the peering request. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Peering Amazon Resource Name (ARN). -* `coreNetworkArn` - The ARN of the core network. -* `edgeLocation` - The edge location for the peer. +* `arn` - Peering ARN. +* `coreNetworkArn` - ARN of the core network. +* `edgeLocation` - Edge location for the peer. * `id` - Peering ID. -* `ownerAccountId` - The ID of the account owner. -* `peeringType` - The type of peering. This will be `TRANSIT_GATEWAY`. -* `resourceArn` - The resource ARN of the peer. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `transitGatewayPeeringAttachmentId` - The ID of the transit gateway peering attachment. +* `ownerAccountId` - ID of the account owner. +* `peeringType` - Type of peering. This will be `TRANSIT_GATEWAY`. +* `resourceArn` - Resource ARN of the peer. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `transitGatewayPeeringAttachmentId` - ID of the transit gateway peering attachment. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `20m`) +* `delete` - (Default `20m`) ## Import @@ -89,4 +103,4 @@ Using `terraform import`, import `aws_networkmanager_transit_gateway_peering` us % terraform import aws_networkmanager_transit_gateway_peering.example peering-444555aaabbb11223 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_registration.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_registration.html.markdown index 741a8f14d9b1..491dc53cecb7 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_registration.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_registration.html.markdown @@ -3,16 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_registration" description: |- - Registers a transit gateway to a global network. + Manages a Network Manager transit gateway registration. --- # Resource: aws_networkmanager_transit_gateway_registration -Registers a transit gateway to a global network. The transit gateway can be in any AWS Region, -but it must be owned by the same AWS account that owns the global network. -You cannot register a transit gateway in more than one global network. +Manages a Network Manager transit gateway registration. Registers a transit gateway to a global network. The transit gateway can be in any AWS Region, but it must be owned by the same AWS account that owns the global network. You cannot register a transit gateway in more than one global network. ## Example Usage @@ -55,15 +53,22 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `globalNetworkId` - (Required) The ID of the Global Network to register to. -* `transitGatewayArn` - (Required) The ARN of the Transit Gateway to register. +* `globalNetworkId` - (Required) ID of the Global Network to register to. +* `transitGatewayArn` - (Required) ARN of the Transit Gateway to register. ## Attribute Reference This resource exports no additional attributes. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmanager_transit_gateway_registration` using the global network ID and transit gateway ARN. For example: @@ -96,4 +101,4 @@ Using `terraform import`, import `aws_networkmanager_transit_gateway_registratio % terraform import aws_networkmanager_transit_gateway_registration.example global-network-0d47f6t230mz46dy4,arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-123abc05e04123abc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_route_table_attachment.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_route_table_attachment.html.markdown index 0f2b026130c9..df2359cf445d 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_route_table_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_transit_gateway_route_table_attachment.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_transit_gateway_route_table_attachment" description: |- - Creates a transit gateway route table attachment. + Manages a Network Manager transit gateway route table attachment. --- # Resource: aws_networkmanager_transit_gateway_route_table_attachment -Creates a transit gateway route table attachment. +Manages a Network Manager transit gateway route table attachment. ## Example Usage @@ -41,28 +41,38 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: + +* `peeringId` - (Required) ID of the peer for the attachment. +* `transitGatewayRouteTableArn` - (Required) ARN of the transit gateway route table for the attachment. + +The following arguments are optional: -* `peeringId` - (Required) The ID of the peer for the attachment. * `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `transitGatewayRouteTableArn` - (Required) The ARN of the transit gateway route table for the attachment. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Attachment Amazon Resource Name (ARN). -* `attachmentPolicyRuleNumber` - The policy rule number associated with the attachment. -* `attachmentType` - The type of attachment. -* `coreNetworkArn` - The ARN of the core network. -* `coreNetworkId` - The ID of the core network. -* `edgeLocation` - The edge location for the peer. -* `id` - The ID of the attachment. -* `ownerAccountId` - The ID of the attachment account owner. -* `resourceArn` - The attachment resource ARN. -* `segmentName` - The name of the segment attachment. -* `state` - The state of the attachment. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - Attachment ARN. +* `attachmentPolicyRuleNumber` - Policy rule number associated with the attachment. +* `attachmentType` - Type of attachment. +* `coreNetworkArn` - ARN of the core network. +* `coreNetworkId` - ID of the core network. +* `edgeLocation` - Edge location for the peer. +* `id` - ID of the attachment. +* `ownerAccountId` - ID of the attachment account owner. +* `resourceArn` - Attachment resource ARN. +* `segmentName` - Name of the segment attachment. +* `state` - State of the attachment. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) ## Import @@ -96,4 +106,4 @@ Using `terraform import`, import `aws_networkmanager_transit_gateway_route_table % terraform import aws_networkmanager_transit_gateway_route_table_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmanager_vpc_attachment.html.markdown b/website/docs/cdktf/typescript/r/networkmanager_vpc_attachment.html.markdown index ee8a80cf0f22..17f509e71827 100644 --- a/website/docs/cdktf/typescript/r/networkmanager_vpc_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmanager_vpc_attachment.html.markdown @@ -3,14 +3,14 @@ subcategory: "Network Manager" layout: "aws" page_title: "AWS: aws_networkmanager_vpc_attachment" description: |- - Terraform resource for managing an AWS Network Manager VPC Attachment. + Manages a Network Manager VPC attachment. --- # Resource: aws_networkmanager_vpc_attachment -Terraform resource for managing an AWS Network Manager VPC Attachment. +Manages a Network Manager VPC attachment. ## Example Usage @@ -38,42 +38,79 @@ class MyConvertedCode extends TerraformStack { ``` +### Usage with Options + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmanagerVpcAttachment } from "./.gen/providers/aws/networkmanager-vpc-attachment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkmanagerVpcAttachment(this, "example", { + coreNetworkId: Token.asString(awsccNetworkmanagerCoreNetworkExample.id), + options: { + applianceModeSupport: false, + dnsSupport: true, + ipv6Support: false, + securityGroupReferencingSupport: true, + }, + subnetArns: [Token.asString(awsSubnetExample.arn)], + vpcArn: Token.asString(awsVpcExample.arn), + }); + } +} + +``` + ## Argument Reference The following arguments are required: -* `coreNetworkId` - (Required) The ID of a core network for the VPC attachment. -* `subnetArns` - (Required) The subnet ARN of the VPC attachment. -* `vpcArn` - (Required) The ARN of the VPC. +* `coreNetworkId` - (Required) ID of a core network for the VPC attachment. +* `subnetArns` - (Required) Subnet ARNs of the VPC attachment. +* `vpcArn` - (Required) ARN of the VPC. The following arguments are optional: -* `options` - (Optional) Options for the VPC attachment. +* `options` - (Optional) Options for the VPC attachment. [See below](#options). * `tags` - (Optional) Key-value tags for the attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### options -* `applianceModeSupport` - (Optional) Indicates whether appliance mode is supported. - If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. - If the VPC attachment is pending acceptance, changing this value will recreate the resource. -* `ipv6Support` - (Optional) Indicates whether IPv6 is supported. - If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `applianceModeSupport` - (Optional) Whether to enable appliance mode support. If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `dnsSupport` - (Optional) Whether to enable DNS support. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `ipv6Support` - (Optional) Whether to enable IPv6 support. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `securityGroupReferencingSupport` - (Optional) Whether to enable security group referencing support for this VPC attachment. The default is `true`. However, at the core network policy-level the default is set to `false`. If the VPC attachment is pending acceptance, changing this value will recreate the resource. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The ARN of the attachment. -* `attachmentPolicyRuleNumber` - The policy rule number associated with the attachment. -* `attachmentType` - The type of attachment. -* `coreNetworkArn` - The ARN of a core network. -* `edgeLocation` - The Region where the edge is located. -* `id` - The ID of the attachment. -* `ownerAccountId` - The ID of the attachment account owner. -* `resourceArn` - The attachment resource ARN. -* `segmentName` - The name of the segment attachment. -* `state` - The state of the attachment. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `arn` - ARN of the attachment. +* `attachmentPolicyRuleNumber` - Policy rule number associated with the attachment. +* `attachmentType` - Type of attachment. +* `coreNetworkArn` - ARN of a core network. +* `edgeLocation` - Region where the edge is located. +* `id` - ID of the attachment. +* `ownerAccountId` - ID of the attachment account owner. +* `resourceArn` - Attachment resource ARN. +* `segmentName` - Name of the segment attachment. +* `state` - State of the attachment. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) +* `delete` - (Default `10m`) +* `update` - (Default `10m`) ## Import @@ -107,4 +144,4 @@ Using `terraform import`, import `aws_networkmanager_vpc_attachment` using the a % terraform import aws_networkmanager_vpc_attachment.example attachment-0f8fa60d2238d1bd8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown b/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown index 0e5ec409a978..4fd9da276bf6 100644 --- a/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `aggregationPeriod` - (Optional) The time, in seconds, that metrics are aggregated and sent to Amazon CloudWatch. Valid values are either 30 or 60. - `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -87,4 +88,4 @@ Using `terraform import`, import `aws_networkmonitor_monitor` using the monitor % terraform import aws_networkmonitor_monitor.example monitor-7786087912324693644 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown b/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown index e1b56ab176c8..efa83df1beb3 100644 --- a/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown +++ b/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `destination` - (Required) The destination IP address. This must be either IPV4 or IPV6. - `destinationPort` - (Optional) The port associated with the destination. This is required only if the protocol is TCP and must be a number between 1 and 65536. - `monitorName` - (Required) The name of the monitor. @@ -104,4 +105,4 @@ Using `terraform import`, import `aws_networkmonitor_probe` using the monitor na % terraform import aws_networkmonitor_probe.example monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/oam_link.html.markdown b/website/docs/cdktf/typescript/r/oam_link.html.markdown index f2027b6827f0..bc8d76ac0f26 100644 --- a/website/docs/cdktf/typescript/r/oam_link.html.markdown +++ b/website/docs/cdktf/typescript/r/oam_link.html.markdown @@ -132,6 +132,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `linkConfiguration` - (Optional) Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`linkConfiguration` Block](#link_configuration-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -204,4 +205,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Link us % terraform import aws_oam_link.example arn:aws:oam:us-west-2:123456789012:link/link-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/oam_sink.html.markdown b/website/docs/cdktf/typescript/r/oam_sink.html.markdown index 4fc32545b777..64fe1d15643b 100644 --- a/website/docs/cdktf/typescript/r/oam_sink.html.markdown +++ b/website/docs/cdktf/typescript/r/oam_sink.html.markdown @@ -47,6 +47,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -97,4 +98,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink us % terraform import aws_oam_sink.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown b/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown index 2198236de769..d1c7f8cb81d9 100644 --- a/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown @@ -67,8 +67,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sinkIdentifier` - (Required) ARN of the sink to attach this policy to. * `policy` - (Required) JSON policy to use. If you are updating an existing policy, the entire existing policy is replaced by what you specify here. @@ -118,4 +119,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink Po % terraform import aws_oam_sink_policy.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/odb_cloud_autonomous_vm_cluster.html.markdown b/website/docs/cdktf/typescript/r/odb_cloud_autonomous_vm_cluster.html.markdown new file mode 100644 index 000000000000..8984536d5ca6 --- /dev/null +++ b/website/docs/cdktf/typescript/r/odb_cloud_autonomous_vm_cluster.html.markdown @@ -0,0 +1,220 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_cluster" +page_title: "AWS: aws_odb_cloud_autonomous_vm_cluster" +description: |- + Terraform resource managing cloud autonomous vm cluster in AWS for Oracle Database@AWS. +--- + + + +# Resource: aws_odb_cloud_autonomous_vm_cluster + +Terraform resource managing cloud autonomous vm cluster in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbCloudAutonomousVmCluster } from "./.gen/providers/aws/odb-cloud-autonomous-vm-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OdbCloudAutonomousVmCluster(this, "avmc_with_all_params", { + autonomousDataStorageSizeInTbs: 5, + cloudExadataInfrastructureId: + "", + cpuCoreCountPerNode: 40, + dbServers: ["", ""], + description: "my first avmc", + displayName: "Ofake_my avmc", + licenseModel: "LICENSE_INCLUDED", + maintenanceWindow: [ + { + daysOfWeek: [ + { + name: "MONDAY", + }, + { + name: "TUESDAY", + }, + ], + hoursOfDay: [4, 16], + leadTimeInWeeks: 3, + months: [ + { + name: "FEBRUARY", + }, + { + name: "MAY", + }, + { + name: "AUGUST", + }, + { + name: "NOVEMBER", + }, + ], + preference: "CUSTOM_PREFERENCE", + weeksOfMonth: [2, 4], + }, + ], + memoryPerOracleComputeUnitInGbs: 2, + odbNetworkId: "", + scanListenerPortNonTls: 1024, + scanListenerPortTls: 8561, + tags: { + env: "dev", + }, + timeZone: "UTC", + totalContainerDatabases: 1, + }); + new OdbCloudAutonomousVmCluster(this, "avmc_with_minimum_parameters", { + autonomousDataStorageSizeInTbs: 5, + cloudExadataInfrastructureId: "", + cpuCoreCountPerNode: 40, + dbServers: [""], + displayName: "Ofake-avmc-my_avmc", + licenseModel: "LICENSE_INCLUDED", + maintenanceWindow: [ + { + preference: "NO_PREFERENCE", + }, + ], + memoryPerOracleComputeUnitInGbs: 2, + odbNetworkId: "", + scanListenerPortNonTls: 1024, + scanListenerPortTls: 8561, + totalContainerDatabases: 1, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloudExadataInfrastructureId` - (Required) Exadata infrastructure id. Changing this will force terraform to create new resource. +* `autonomousDataStorageSizeInTbs` - (Required) The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. Changing this will force terraform to create new resource. +* `cpuCoreCountPerNode` - (Required) The number of CPU cores enabled per node in the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `dbServers` - (Required) The database servers in the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `displayName` - (Required) The display name of the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `memoryPerOracleComputeUnitInGbs` - (Required) The amount of memory allocated per Oracle Compute Unit, in GB. Changing this will force terraform to create new resource. +* `odbNetworkId` - (Required) The unique identifier of the ODB network associated with this Autonomous VM Cluster. Changing this will force terraform to create new resource. +* `scanListenerPortNonTls` - (Required) The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. Changing this will force terraform to create new resource. +* `scanListenerPortTls` - (Required) The SCAN listener port for TLS (TCP) protocol. The default is 2484. Changing this will force terraform to create new resource. +* `totalContainerDatabases` - (Required) The total number of Autonomous Container Databases that can be created with the allocated local storage. Changing this will force terraform to create new resource. +* `maintenanceWindow` - (Required) The maintenance window of the Autonomous VM cluster. Changing this will force terraform to create new resource. + +The following arguments are optional: + +* `description` - (Optional) The description of the Autonomous VM cluster. +* `isMtlsEnabledVmCluster` - (Optional) Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `licenseModel` - (Optional) The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. Changing this will force terraform to create new resource. +* `timeZone` - (Optional) The time zone of the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### maintenance_window + +* `preference` - (Required) The preference for the maintenance window scheduling. Changing this will force terraform to create new resource. +* `daysOfWeek` - (Optional) The days of the week when maintenance can be performed. Changing this will force terraform to create new resource. +* `hoursOfDay` - (Optional) The hours of the day when maintenance can be performed. Changing this will force terraform to create new resource. +* `leadTimeInWeeks` - (Optional) The lead time in weeks before the maintenance window. Changing this will force terraform to create new resource. +* `months` - (Optional) The months when maintenance can be performed. Changing this will force terraform to create new resource. +* `weeksOfMonth` - (Optional) Indicates whether to skip release updates during maintenance. Changing this will force terraform to create new resource. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `autonomousDataStoragePercentage` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `availableAutonomousDataStorageSizeInTbs` - The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB. +* `availableContainerDatabases` - The number of Autonomous CDBs that you can create with the currently available storage. +* `availableCpus` - The number of CPU cores available for allocation to Autonomous Databases. +* `computeModel` - The compute model of the Autonomous VM cluster: ECPU or OCPU. +* `cpuCoreCount` - The total number of CPU cores in the Autonomous VM cluster. +* `cpuPercentage` - The percentage of total CPU cores currently in use in the Autonomous VM cluster. +* `createdAt` - The date and time when the Autonomous VM cluster was created. +* `dataStorageSizeInGbs` - The total data storage allocated to the Autonomous VM cluster, in GB. +* `dataStorageSizeInTbs` - The total data storage allocated to the Autonomous VM cluster, in TB. +* `odbNodeStorageSizeInGbs` - The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB). +* `domain` - The domain name of the Autonomous VM cluster. +* `exadataStorageInTbsLowestScaledValue` - The minimum value to which you can scale down the Exadata storage, in TB. +* `hostname` - The hostname of the Autonomous VM cluster. +* `licenseModel` - The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. +* `maxAcdsLowestScaledValue` - The minimum value to which you can scale down the maximum number of Autonomous CDBs. +* `memorySizeInGbs` - The total amount of memory allocated to the Autonomous VM cluster, in gigabytes(GB). +* `nodeCount` - The number of database server nodes in the Autonomous VM cluster. +* `nonProvisionableAutonomousContainerDatabases` - The number of Autonomous CDBs that can't be provisioned because of resource constraints. +* `ociResourceAnchorName` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `ociUrl` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `percentProgress` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `provisionableAutonomousContainerDatabases` - The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster. +* `provisionedAutonomousContainerDatabases` - The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster. +* `provisionedCpus` - The number of CPUs provisioned in the Autonomous VM cluster. +* `reclaimableCpus` - The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases. +* `reservedCpus` - The number of CPU cores reserved for system operations and redundancy. +* `shape` - The shape of the Exadata infrastructure for the Autonomous VM cluster. +* `status` - The status of the Autonomous VM cluster. Possible values include CREATING, AVAILABLE, UPDATING, DELETING, DELETED, FAILED. +* `statusReason` - Additional information about the current status of the Autonomous VM cluster. +* `timeZone` - The time zone of the Autonomous VM cluster. +* `timeOrdsCertificateExpires` - The expiration date and time of the ORDS certificate. +* `timeDatabaseSslCertificateExpires` - The expiration date and time of the database SSL certificate. +* `tagsAll` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbCloudAutonomousVmCluster } from "./.gen/providers/aws/odb-cloud-autonomous-vm-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + OdbCloudAutonomousVmCluster.generateConfigForImport( + this, + "example", + "example" + ); + } +} + +``` + +Using `terraform import`, import cloud autonomous vm cluster `id`. For example: + +```console +% terraform import aws_odb_cloud_autonomous_vm_cluster.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/odb_cloud_exadata_infrastructure.html.markdown b/website/docs/cdktf/typescript/r/odb_cloud_exadata_infrastructure.html.markdown new file mode 100644 index 000000000000..f03c3077f636 --- /dev/null +++ b/website/docs/cdktf/typescript/r/odb_cloud_exadata_infrastructure.html.markdown @@ -0,0 +1,191 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "aws" +page_title: "AWS: aws_odb_cloud_exadata_infrastructure" +description: |- + Terraform resource for managing exadata infrastructure resource for Oracle Database@AWS. +--- + + + +# Resource: aws_odb_cloud_exadata_infrastructure + +Terraform resource for managing exadata infrastructure resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbCloudExadataInfrastructure } from "./.gen/providers/aws/odb-cloud-exadata-infrastructure"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OdbCloudExadataInfrastructure(this, "example", { + availabilityZoneId: "use1-az6", + computeCount: 2, + customerContactsToSendToOci: [ + { + email: "abc@example.com", + }, + { + email: "def@example.com", + }, + ], + databaseServerType: "X11M", + displayName: "my-exa-infra", + maintenanceWindow: [ + { + customActionTimeoutInMins: 16, + daysOfWeek: [ + { + name: "MONDAY", + }, + { + name: "TUESDAY", + }, + ], + hoursOfDay: [11, 16], + isCustomActionTimeoutEnabled: true, + leadTimeInWeeks: 3, + months: [ + { + name: "FEBRUARY", + }, + { + name: "MAY", + }, + { + name: "AUGUST", + }, + { + name: "NOVEMBER", + }, + ], + patchingMode: "ROLLING", + preference: "CUSTOM_PREFERENCE", + weeksOfMonth: [2, 4], + }, + ], + shape: "Exadata.X11M", + storageCount: 3, + storageServerType: "X11M-HC", + tags: { + env: "dev", + }, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `displayName` - (Required) The user-friendly name for the Exadata infrastructure. Changing this will force terraform to create a new resource. +* `shape` - (Required) The model name of the Exadata infrastructure. Changing this will force terraform to create new resource. +* `storageCount` - (Required) The number of storage servers that are activated for the Exadata infrastructure. Changing this will force terraform to create new resource. +* `computeCount` - (Required) The number of compute instances that the Exadata infrastructure is located. Changing this will force terraform to create new resource. +* `availabilityZoneId` - (Required) The AZ ID of the AZ where the Exadata infrastructure is located. Changing this will force terraform to create new resource. + +The following arguments are optional: + +* `customerContactsToSendToOci` - (Optional) The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure. Changing this will force terraform to create new resource. +* `availabilityZone`: (Optional) The name of the Availability Zone (AZ) where the Exadata infrastructure is located. Changing this will force terraform to create new resource. +* `databaseServerType` - (Optional) The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. This is a mandatory parameter for Exadata.X11M system shape. Changing this will force terraform to create new resource. +* `storageServerType` - (Optional) The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. This is a mandatory parameter for Exadata.X11M system shape. Changing this will force terraform to create new resource. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### maintenance_window + +* `customActionTimeoutInMins` - (Required) The custom action timeout in minutes for the maintenance window. +* `isCustomActionTimeoutEnabled` - (Required) ndicates whether custom action timeout is enabled for the maintenance window. +* `patchingMode` - (Required) The patching mode for the maintenance window. +* `preference` - (Required) The preference for the maintenance window scheduling. +* `daysOfWeek` - (Optional) The days of the week when maintenance can be performed. +* `hoursOfDay` - (Optional) The hours of the day when maintenance can be performed. +* `leadTimeInWeeks` - (Optional) The lead time in weeks before the maintenance window. +* `months` - (Optional) The months when maintenance can be performed. +* `weeksOfMonth` - (Optional) The weeks of the month when maintenance can be performed. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier for the pipeline. +* `arn` - Amazon Resource Name (ARN) of the pipeline. +* `activatedStorageCount` - The number of storage servers requested for the Exadata infrastructure. +* `additionalStorageCount` - The number of storage servers requested for the Exadata infrastructure. +* `availableStorageSizeInGbs` - The amount of available storage, in gigabytes (GB), for the Exadata infrastructure. +* `cpuCount` - The total number of CPU cores that are allocated to the Exadata infrastructure. +* `dataStorageSizeInTbs` - The size of the Exadata infrastructure's data disk group, in terabytes (TB). +* `dbNodeStorageSizeInGbs` - The size of the Exadata infrastructure's local node storage, in gigabytes (GB). +* `dbServerVersion` - The software version of the database servers (dom0) in the Exadata infrastructure. +* `lastMaintenanceRunId` - The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure. +* `maxCpuCount` - The total number of CPU cores available on the Exadata infrastructure. +* `maxDataStorageInTbs` - The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure. +* `maxDbNodeStorageSizeInGbs` - The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure. +* `maxMemoryInGbs` - The total amount of memory in gigabytes (GB) available on the Exadata infrastructure. +* `monthlyDbServerVersion` - The monthly software version of the database servers in the Exadata infrastructure. +* `monthlyStorageServerVersion` - The monthly software version of the storage servers installed on the Exadata infrastructure. +* `nextMaintenanceRunId` - The OCID of the next maintenance run for the Exadata infrastructure. +* `ocid` - The OCID of the Exadata infrastructure. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the Exadata infrastructure. +* `percentProgress` - The amount of progress made on the current operation on the Exadata infrastructure, expressed as a percentage. +* `status` - The current status of the Exadata infrastructure. +* `statusReason` - Additional information about the status of the Exadata infrastructure. +* `storageServerVersion` - The software version of the storage servers on the Exadata infrastructure. +* `totalStorageSizeInGbs` - The total amount of storage, in gigabytes (GB), on the Exadata infrastructure. +* `createdAt` - The time when the Exadata infrastructure was created. +* `computeModel` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbCloudExadataInfrastructure } from "./.gen/providers/aws/odb-cloud-exadata-infrastructure"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + OdbCloudExadataInfrastructure.generateConfigForImport( + this, + "example", + "example" + ); + } +} + +``` + +Using `terraform import`, import Exadata Infrastructure using the `id`. For example: + +```console +% terraform import aws_odb_cloud_exadata_infrastructure.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/odb_cloud_vm_cluster.html.markdown b/website/docs/cdktf/typescript/r/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..d42a620f7b26 --- /dev/null +++ b/website/docs/cdktf/typescript/r/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,183 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform resource for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + + + +# Resource: aws_odb_cloud_vm_cluster + +Terraform data source for Exadata Infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbCloudVmCluster } from "./.gen/providers/aws/odb-cloud-vm-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OdbCloudVmCluster(this, "with_all_parameters", { + cloudExadataInfrastructureId: "exa_gjrmtxl4qk", + clusterName: "julia-13", + cpuCoreCount: 6, + dataCollectionOptions: [ + { + isDiagnosticsEventsEnabled: true, + isHealthMonitoringEnabled: true, + isIncidentLogsEnabled: true, + }, + ], + dataStorageSizeInTbs: 20, + dbNodeStorageSizeInGbs: 120, + dbServers: ["my-dbserver-1", "my-db-server-2"], + displayName: "my-vmc", + giVersion: "23.0.0.0", + hostnamePrefix: "apollo12", + isLocalBackupEnabled: true, + isSparseDiskgroupEnabled: true, + licenseModel: "LICENSE_INCLUDED", + memorySizeInGbs: 60, + odbNetworkId: "odbnet_3l9st3litg", + scanListenerPortTcp: 1521, + sshPublicKeys: ["my-ssh-key"], + tags: { + env: "dev", + }, + timezone: "UTC", + }); + new OdbCloudVmCluster(this, "with_minimum_parameter", { + cloudExadataInfrastructureId: "exa_gjrmtxl4qk", + cpuCoreCount: 6, + dataCollectionOptions: [ + { + isDiagnosticsEventsEnabled: false, + isHealthMonitoringEnabled: false, + isIncidentLogsEnabled: false, + }, + ], + dataStorageSizeInTbs: 20, + dbNodeStorageSizeInGbs: 120, + dbServers: ["db-server-1", "db-server-2"], + displayName: "my-exa-infra", + giVersion: "23.0.0.0", + hostnamePrefix: "apollo12", + isLocalBackupEnabled: true, + isSparseDiskgroupEnabled: true, + licenseModel: "LICENSE_INCLUDED", + memorySizeInGbs: 60, + odbNetworkId: "odbnet_3l9st3litg", + sshPublicKeys: ["public-ssh-key"], + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloudExadataInfrastructureId` - (Required) The unique identifier of the Exadata infrastructure for this VM cluster. Changing this will create a new resource. +* `cpuCoreCount` - (Required) The number of CPU cores to enable on the VM cluster. Changing this will create a new resource. +* `dbServers` - (Required) The list of database servers for the VM cluster. Changing this will create a new resource. +* `displayName` - (Required) A user-friendly name for the VM cluster. Changing this will create a new resource. +* `giVersion` - (Required) A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure. Example: 19.0.0.0 Changing this will create a new resource. +* `hostnamePrefix` - (Required) The host name prefix for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. Changing this will create a new resource. +* `odbNetworkId` - (Required) The unique identifier of the ODB network for the VM cluster. Changing this will create a new resource. +* `sshPublicKeys` - (Required) The public key portion of one or more key pairs used for SSH access to the VM cluster. Changing this will create a new resource. +* `dataCollectionOptions` - (Required) The set of preferences for the various diagnostic collection options for the VM cluster. + +The following arguments are optional: + +* `clusterName` - (Optional) The name of the Grid Infrastructure (GI) cluster. Changing this will create a new resource. +* `dataStorageSizeInTbs` - (Optional) The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster. Changing this will create a new resource. +* `dbNodeStorageSizeInGbs` - (Optional) The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `isLocalBackupEnabled` - (Optional) Specifies whether to enable database backups to local Exadata storage for the VM cluster. Changing this will create a new resource. +* `isSparseDiskgroupEnabled` - (Optional) Specifies whether to create a sparse disk group for the VM cluster. Changing this will create a new resource. +* `licenseModel` - (Optional) The Oracle license model to apply to the VM cluster. Default: LICENSE_INCLUDED. Changing this will create a new resource. +* `memorySizeInGbs` - (Optional) The amount of memory, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `scanListenerPortTcp` - (Optional) The port number for TCP connections to the single client access name (SCAN) listener. Valid values: 1024–8999, except 2484, 6100, 6200, 7060, 7070, 7085, and 7879. Default: 1521. Changing this will create a new resource. +* `timezone` - (Optional) The configured time zone of the VM cluster. Changing this will create a new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `diskRedundancy` - The type of redundancy for the VM cluster: NORMAL (2-way) or HIGH (3-way). +* `AttrDomain` - The domain name associated with the VM cluster. +* `hostnamePrefixComputed` - The host name for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. This member is required. Changing this will create a new resource. +* `iormConfigCache` - The Exadata IORM (I/O Resource Manager) configuration cache details for the VM cluster. +* `lastUpdateHistoryEntryId` - The OCID of the most recent maintenance update history entry. +* `listenerPort` - The listener port number configured on the VM cluster. +* `nodeCount` - The total number of nodes in the VM cluster. +* `ocid` - The OCID (Oracle Cloud Identifier) of the VM cluster. +* `ociResourceAnchorName` - The name of the OCI resource anchor associated with the VM cluster. +* `ociUrl` - The HTTPS link to the VM cluster resource in OCI. +* `percentProgress` - The percentage of progress made on the current operation for the VM cluster. +* `scanDnsName` - The fully qualified domain name (FQDN) for the SCAN IP addresses associated with the VM cluster. +* `scanDnsRecordId` - The OCID of the DNS record for the SCAN IPs linked to the VM cluster. +* `scanIpIds` - The list of OCIDs for SCAN IP addresses associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure running the VM cluster. +* `status` - The current lifecycle status of the VM cluster. +* `statusReason` - Additional information regarding the current status of the VM cluster. +* `storageSizeInGbs` - The local node storage allocated to the VM cluster, in gigabytes (GB). +* `systemVersion` - The operating system version of the image chosen for the VM cluster. +* `vipIds` - The virtual IP (VIP) addresses assigned to the VM cluster. CRS assigns one VIP per node for failover support. +* `createdAt` - The timestamp when the VM cluster was created. +* `computeModel` - The compute model used when the instance is created or cloned — either ECPU or OCPU. ECPU is a virtualized compute unit; OCPU is a physical processor core with hyper-threading. +* `tagsAll` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbCloudVmCluster } from "./.gen/providers/aws/odb-cloud-vm-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + OdbCloudVmCluster.generateConfigForImport(this, "example", "example"); + } +} + +``` + +Using `terraform import`, import cloud vm cluster using the `id`. For example: + +```console +% terraform import aws_odb_cloud_vm_cluster.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/odb_network.html.markdown b/website/docs/cdktf/typescript/r/odb_network.html.markdown new file mode 100644 index 000000000000..a384dc5164bf --- /dev/null +++ b/website/docs/cdktf/typescript/r/odb_network.html.markdown @@ -0,0 +1,122 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network" +page_title: "AWS: aws_odb_network" +description: |- + Terraform resource for managing odb network of an Oracle Database@AWS. +--- + + + +# Resource: aws_odb_network + +Terraform resource for managing odb Network resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbNetwork } from "./.gen/providers/aws/odb-network"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OdbNetwork(this, "example", { + availabilityZoneId: "use1-az6", + backupSubnetCidr: "10.2.1.0/24", + clientSubnetCidr: "10.2.0.0/24", + displayName: "odb-my-net", + s3Access: "DISABLED", + tags: { + env: "dev", + }, + zeroEtlAccess: "DISABLED", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `displayName` - (Required) The user-friendly name for the odb network. Changing this will force terraform to create a new resource. +* `availabilityZoneId` - (Required) The AZ ID of the AZ where the ODB network is located. Changing this will force terraform to create new resource. +* `clientSubnetCidr` - (Required) The CIDR notation for the network resource. Changing this will force terraform to create new resource. +* `backupSubnetCidr` - (Required) The CIDR range of the backup subnet for the ODB network. Changing this will force terraform to create new resource. +* `s3Access` - (Required) Specifies the configuration for Amazon S3 access from the ODB network. +* `zeroEtlAccess` - (Required) Specifies the configuration for Zero-ETL access from the ODB network. + +The following arguments are optional: + +* `customDomainName` - (Optional) The name of the custom domain that the network is located. Custom_domain_name and default_dns_prefix both can't be given. Changing this will force terraform to create new resource. +* `availabilityZone` - (Optional) The name of the Availability Zone (AZ) where the odb network is located. Changing this will force terraform to create new resource. Make sure availability_zone maps correctly with availability_zone_id. +* `s3PolicyDocument` - (Optional) Specifies the endpoint policy for Amazon S3 access from the ODB network. +* `defaultDnsPrefix` - (Optional) The default DNS prefix for the network resource. Changing this will force terraform to create new resource. Changing this will force terraform to create new resource. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `ociDnsForwardingConfigs` - The number of storage servers requested for the Exadata infrastructure. +* `peeredCidrs` - The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation. +* `ociNetworkAnchorId` - The unique identifier of the OCI network anchor for the ODB network. +* `ociNetworkAnchorUrl` -The URL of the OCI network anchor for the ODB network. +* `ociResourceAnchorName` - The name of the OCI resource anchor for the ODB network. +* `ociVcnId` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `ociVcnUrl` - The URL of the OCI VCN for the ODB network. +* `percentProgress` - The amount of progress made on the current operation on the ODB network, expressed as a percentage. +* `managedServices` - The name of the OCI resource anchor for the Exadata infrastructure. +* `status` - The status of the network resource. +* `statusReason` - Additional information about the current status of the ODB network. +* `createdAt` - The date and time when the ODB network was created. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbNetwork } from "./.gen/providers/aws/odb-network"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + OdbNetwork.generateConfigForImport(this, "example", "example"); + } +} + +``` + +Using `terraform import`, import Odb Network using the `id`. For example: + +```console +% terraform import aws_odb_network.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/odb_network_peering_connection.html.markdown b/website/docs/cdktf/typescript/r/odb_network_peering_connection.html.markdown new file mode 100644 index 000000000000..4f96703add82 --- /dev/null +++ b/website/docs/cdktf/typescript/r/odb_network_peering_connection.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connection" +page_title: "AWS: aws_odb_network_peering_connection" +description: |- + Terraform resource for managing oracle database network peering resource in AWS. +--- + + + +# Resource: aws_odb_network_peering_connection + +Terraform resource for managing oracle database network peering resource in AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbNetworkPeeringConnection } from "./.gen/providers/aws/odb-network-peering-connection"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OdbNetworkPeeringConnection(this, "example", { + displayName: "example", + odbNetworkId: "my-odb-network-id", + peerNetworkId: "my-vpc-id", + tags: { + env: "dev", + }, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `odbNetworkId` - (Required) The unique identifier of the ODB network that initiates the peering connection. A sample ID is `odbpcx-abcdefgh12345678`. Changing this will force Terraform to create a new resource. +* `peerNetworkId` - (Required) The unique identifier of the ODB peering connection. Changing this will force Terraform to create a new resource. +* `displayName` - (Required) Display name of the ODB network peering connection. Changing this will force Terraform to create a new resource. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `status` - Status of the ODB network peering connection. +* `statusReason` - The reason for the current status of the ODB peering connection. +* `odbNetworkArn` - ARN of the ODB network peering connection. +* `peerNetworkArn` - ARN of the peer network peering connection. +* `odbPeeringConnectionType` - Type of the ODB peering connection. +* `createdAt` - Created time of the ODB network peering connection. +* `percentProgress` - Progress of the ODB network peering connection. +* `tagsAll` - A map of tags assigned to the resource, including inherited tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OdbNetworkPeeringConnection } from "./.gen/providers/aws/odb-network-peering-connection"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + OdbNetworkPeeringConnection.generateConfigForImport( + this, + "example", + "example" + ); + } +} + +``` + +Using `terraform import`, import odb network peering using the `id`. For example: + +```console +% terraform import aws_odb_network_peering_connection.example example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_authorize_vpc_endpoint_access.html.markdown b/website/docs/cdktf/typescript/r/opensearch_authorize_vpc_endpoint_access.html.markdown index 187afcb63929..8820d6c36761 100644 --- a/website/docs/cdktf/typescript/r/opensearch_authorize_vpc_endpoint_access.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_authorize_vpc_endpoint_access.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account` - (Required) AWS account ID to grant access to. * `domainName` - (Required) Name of OpenSearch Service domain to provide access to. @@ -59,7 +60,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Authorize Vpc Endpoint Access using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Authorize Vpc Endpoint Access using the `domainName`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -83,10 +84,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import OpenSearch Authorize Vpc Endpoint Access using the `example_id_arg`. For example: +Using `terraform import`, import OpenSearch Authorize Vpc Endpoint Access using the `domainName`. For example: ```console % terraform import aws_opensearch_authorize_vpc_endpoint_access.example authorize_vpc_endpoint_access-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown b/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown index 3f59ea372f74..d40324d6bca5 100644 --- a/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown @@ -108,7 +108,7 @@ class MyConvertedCode extends TerraformStack { ], resources: [ "arn:aws:es:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:domain/${" + @@ -272,7 +272,7 @@ class MyConvertedCode extends TerraformStack { ], resources: [ "arn:aws:es:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:domain/${" + @@ -440,6 +440,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPolicies` - (Optional) IAM policy document specifying the access policies for the domain. * `advancedOptions` - (Optional) Key-value string pairs to specify advanced configuration options. Note that the values for these configuration options must be strings (wrapped in quotes) or they may be wrong and cause a perpetual diff, causing Terraform to want to recreate your OpenSearch domain on every apply. * `advancedSecurityOptions` - (Optional) Configuration block for [fine-grained access control](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html). Detailed below. @@ -616,7 +617,6 @@ This resource exports the following attributes in addition to the arguments abov * `endpointV2` - V2 domain endpoint that works with both IPv4 and IPv6 addresses, used to submit index, search, and data upload requests. * `dashboardEndpoint` - Domain-specific endpoint for Dashboard without https scheme. * `dashboardEndpointV2` - V2 domain endpoint for Dashboard that works with both IPv4 and IPv6 addresses, without https scheme. -* `kibanaEndpoint` - (**Deprecated**) Domain-specific endpoint for kibana without https scheme. Use the `dashboardEndpoint` attribute instead. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `vpc_options.0.availability_zones` - If the domain was created inside a VPC, the names of the availability zones the configured `subnetIds` were created inside. * `vpc_options.0.vpc_id` - If the domain was created inside a VPC, the ID of the VPC. @@ -657,4 +657,4 @@ Using `terraform import`, import OpenSearch domains using the `domainName`. For % terraform import aws_opensearch_domain.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_domain_policy.html.markdown b/website/docs/cdktf/typescript/r/opensearch_domain_policy.html.markdown index 3fc1bbde729e..ee4addad1e42 100644 --- a/website/docs/cdktf/typescript/r/opensearch_domain_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_domain_policy.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPolicies` - (Optional) IAM policy document specifying the access policies for the domain * `domainName` - (Required) Name of the domain. @@ -87,4 +88,36 @@ This resource exports no additional attributes. * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Domain Policy using `domainName` prefixed with `esd-policy-`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OpensearchDomainPolicy } from "./.gen/providers/aws/opensearch-domain-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + OpensearchDomainPolicy.generateConfigForImport( + this, + "example", + "esd-policy-tf-test" + ); + } +} + +``` + +Using `terraform import`, import OpenSearch Domain Policy using `domainName` prefixed with `esd-policy-`. For example: + +```console +% terraform import aws_opensearch_domain_policy.example esd-policy-tf-test +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_domain_saml_options.html.markdown b/website/docs/cdktf/typescript/r/opensearch_domain_saml_options.html.markdown index 2df67d7d695e..736357b851cd 100644 --- a/website/docs/cdktf/typescript/r/opensearch_domain_saml_options.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_domain_saml_options.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `samlOptions` - (Optional) SAML authentication options for an AWS OpenSearch Domain. ### saml_options @@ -130,4 +131,4 @@ Using `terraform import`, import OpenSearch domains using the `domainName`. For % terraform import aws_opensearch_domain_saml_options.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_inbound_connection_accepter.html.markdown b/website/docs/cdktf/typescript/r/opensearch_inbound_connection_accepter.html.markdown index 459a1d9d14c0..d89094327717 100644 --- a/website/docs/cdktf/typescript/r/opensearch_inbound_connection_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_inbound_connection_accepter.html.markdown @@ -40,12 +40,12 @@ class MyConvertedCode extends TerraformStack { localDomainInfo: { domainName: localDomain.domainName, ownerId: Token.asString(current.accountId), - region: Token.asString(dataAwsRegionCurrent.name), + region: Token.asString(dataAwsRegionCurrent.region), }, remoteDomainInfo: { domainName: remoteDomain.domainName, ownerId: Token.asString(current.accountId), - region: Token.asString(dataAwsRegionCurrent.name), + region: Token.asString(dataAwsRegionCurrent.region), }, }); const awsOpensearchInboundConnectionAccepterFoo = @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionId` - (Required, Forces new resource) Specifies the ID of the connection to accept. ## Attribute Reference @@ -111,4 +112,4 @@ Using `terraform import`, import AWS Opensearch Inbound Connection Accepters usi % terraform import aws_opensearch_inbound_connection_accepter.foo connection-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_outbound_connection.html.markdown b/website/docs/cdktf/typescript/r/opensearch_outbound_connection.html.markdown index cdfa5a902181..02067561cc58 100644 --- a/website/docs/cdktf/typescript/r/opensearch_outbound_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_outbound_connection.html.markdown @@ -40,12 +40,12 @@ class MyConvertedCode extends TerraformStack { localDomainInfo: { domainName: localDomain.domainName, ownerId: Token.asString(current.accountId), - region: Token.asString(dataAwsRegionCurrent.name), + region: Token.asString(dataAwsRegionCurrent.region), }, remoteDomainInfo: { domainName: remoteDomain.domainName, ownerId: Token.asString(current.accountId), - region: Token.asString(dataAwsRegionCurrent.name), + region: Token.asString(dataAwsRegionCurrent.region), }, }); } @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionAlias` - (Required, Forces new resource) Specifies the connection alias that will be used by the customer for this connection. * `connectionMode` - (Required, Forces new resource) Specifies the connection mode. Accepted values are `DIRECT` or `VPC_ENDPOINT`. * `acceptConnection` - (Optional, Forces new resource) Accepts the connection. @@ -134,4 +135,4 @@ Using `terraform import`, import AWS Opensearch Outbound Connections using the O % terraform import aws_opensearch_outbound_connection.foo connection-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_package.html.markdown b/website/docs/cdktf/typescript/r/opensearch_package.html.markdown index cd4793bab4c4..3d3e8218689e 100644 --- a/website/docs/cdktf/typescript/r/opensearch_package.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_package.html.markdown @@ -62,8 +62,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `engineVersion` - (Optional, Forces new resources) Engine version that the package is compatible with. This argument is required and only valid when `packageType` is `ZIP-PLUGIN`. Format: `OpenSearch_X.Y` or `Elasticsearch_X.Y`, where `X` and `Y` are the major and minor version numbers, respectively. * `packageName` - (Required, Forces new resource) Unique name for the package. -* `packageType` - (Required, Forces new resource) The type of package. +* `packageType` - (Required, Forces new resource) The type of package. Valid values are `TXT-DICTIONARY`, `ZIP-PLUGIN`, `PACKAGE-LICENSE` and `PACKAGE-CONFIG`. * `packageSource` - (Required, Forces new resource) Configuration block for the package source options. * `packageDescription` - (Optional, Forces new resource) Description of the package. @@ -107,4 +109,4 @@ Using `terraform import`, import AWS Opensearch Packages using the Package ID. F % terraform import aws_opensearch_package.example package-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_package_association.html.markdown b/website/docs/cdktf/typescript/r/opensearch_package_association.html.markdown index 013748ed86c5..03f579d23928 100644 --- a/website/docs/cdktf/typescript/r/opensearch_package_association.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_package_association.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `packageId` - (Required, Forces new resource) Internal ID of the package to associate with a domain. * `domainName` - (Required, Forces new resource) Name of the domain to associate the package with. @@ -77,4 +78,4 @@ This resource exports the following attributes in addition to the arguments abov * `create` - (Default `10m`) * `delete` - (Default `10m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/r/opensearch_vpc_endpoint.html.markdown index eb8f330870cc..b093c33d4b31 100644 --- a/website/docs/cdktf/typescript/r/opensearch_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_vpc_endpoint.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainArn` - (Required, Forces new resource) Specifies the Amazon Resource Name (ARN) of the domain to create the endpoint for * `vpcOptions` - (Required) Options to specify the subnets and security groups for the endpoint. @@ -102,4 +103,4 @@ Using `terraform import`, import OpenSearch VPC endpoint connections using the ` % terraform import aws_opensearch_vpc_endpoint_connection.example endpoint-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearchserverless_access_policy.html.markdown b/website/docs/cdktf/typescript/r/opensearchserverless_access_policy.html.markdown index 55afb353f767..22d1cce5761e 100644 --- a/website/docs/cdktf/typescript/r/opensearchserverless_access_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearchserverless_access_policy.html.markdown @@ -160,6 +160,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the policy. Typically used to store information about the permissions defined in the policy. ## Attribute Reference @@ -200,4 +201,4 @@ Using `terraform import`, import OpenSearchServerless Access Policy using the `n % terraform import aws_opensearchserverless_access_policy.example example/data ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearchserverless_collection.html.markdown b/website/docs/cdktf/typescript/r/opensearchserverless_collection.html.markdown index e84ade8324af..e42ecc5f3515 100644 --- a/website/docs/cdktf/typescript/r/opensearchserverless_collection.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearchserverless_collection.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the collection. * `standbyReplicas` - (Optional) Indicates whether standby replicas should be used for a collection. One of `ENABLED` or `DISABLED`. Defaults to `ENABLED`. * `tags` - (Optional) A map of tags to assign to the collection. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -122,4 +123,4 @@ Using `terraform import`, import OpenSearchServerless Collection using the `id`. % terraform import aws_opensearchserverless_collection.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearchserverless_lifecycle_policy.html.markdown b/website/docs/cdktf/typescript/r/opensearchserverless_lifecycle_policy.html.markdown index 534430f4255e..ad8c5cfd24ee 100644 --- a/website/docs/cdktf/typescript/r/opensearchserverless_lifecycle_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearchserverless_lifecycle_policy.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the policy. ## Attribute Reference @@ -103,4 +104,4 @@ Using `terraform import`, import OpenSearch Serverless Lifecycle Policy using th % terraform import aws_opensearchserverless_lifecycle_policy.example example/retention ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearchserverless_security_config.html.markdown b/website/docs/cdktf/typescript/r/opensearchserverless_security_config.html.markdown index 2ab1e037ce39..b00d5f243b77 100644 --- a/website/docs/cdktf/typescript/r/opensearchserverless_security_config.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearchserverless_security_config.html.markdown @@ -52,6 +52,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the security configuration. ### saml_options @@ -99,4 +100,4 @@ Using `terraform import`, import OpenSearchServerless Access Policy using the `n % terraform import aws_opensearchserverless_security_config.example saml/123456789012/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearchserverless_security_policy.html.markdown b/website/docs/cdktf/typescript/r/opensearchserverless_security_policy.html.markdown index a873d28a3b86..ce8c7dbf1766 100644 --- a/website/docs/cdktf/typescript/r/opensearchserverless_security_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearchserverless_security_policy.html.markdown @@ -276,6 +276,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the policy. Typically used to store information about the permissions defined in the policy. ## Attribute Reference @@ -316,4 +317,4 @@ Using `terraform import`, import OpenSearchServerless Security Policy using the % terraform import aws_opensearchserverless_security_policy.example example/encryption ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearchserverless_vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/r/opensearchserverless_vpc_endpoint.html.markdown index e844f1440a95..948c60733bd3 100644 --- a/website/docs/cdktf/typescript/r/opensearchserverless_vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearchserverless_vpc_endpoint.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityGroupIds` - (Optional) One or more security groups that define the ports, protocols, and sources for inbound traffic that you are authorizing into your endpoint. Up to 5 security groups can be provided. ## Attribute Reference @@ -96,4 +97,4 @@ Using `terraform import`, import OpenSearchServerless Vpc Endpointa using the `i % terraform import aws_opensearchserverless_vpc_endpoint.example vpce-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/organizations_account.html.markdown b/website/docs/cdktf/typescript/r/organizations_account.html.markdown index 858f762a34fb..fb0e4c57db9b 100644 --- a/website/docs/cdktf/typescript/r/organizations_account.html.markdown +++ b/website/docs/cdktf/typescript/r/organizations_account.html.markdown @@ -75,6 +75,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_account.example + identity = { + id = "111111111111" + } +} + +resource "aws_organizations_account" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the AWS Organizations account. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the AWS member account using the `accountId`. For example: ```typescript @@ -91,7 +116,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); OrganizationsAccount.generateConfigForImport( this, - "myAccount", + "example", "111111111111" ); } @@ -102,13 +127,13 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import the AWS member account using the `accountId`. For example: ```console -% terraform import aws_organizations_account.my_account 111111111111 +% terraform import aws_organizations_account.example 111111111111 ``` To import accounts that have set iam_user_access_to_billing, use the following: ```console -% terraform import aws_organizations_account.my_account 111111111111_ALLOW +% terraform import aws_organizations_account.example 111111111111_ALLOW ``` Certain resource arguments, like `roleName`, do not have an Organizations API method for reading the information after account creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference. For example: @@ -138,4 +163,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/organizations_delegated_administrator.html.markdown b/website/docs/cdktf/typescript/r/organizations_delegated_administrator.html.markdown index 77cac943ea3d..88ad8448dce2 100644 --- a/website/docs/cdktf/typescript/r/organizations_delegated_administrator.html.markdown +++ b/website/docs/cdktf/typescript/r/organizations_delegated_administrator.html.markdown @@ -57,6 +57,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_delegated_administrator.example + identity = { + service_principal = "config.amazonaws.com" + delegated_account_id = "123456789012" + } +} + +resource "aws_organizations_delegated_administrator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `servicePrincipal` (String) Service principal for the AWS service. +* `delegated_account_id` (String) Account ID to be designated as a delegated administrator. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_organizations_delegated_administrator` using the account ID and its service principal. For example: ```typescript @@ -87,4 +114,4 @@ Using `terraform import`, import `aws_organizations_delegated_administrator` usi % terraform import aws_organizations_delegated_administrator.example 123456789012/config.amazonaws.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/organizations_organization.html.markdown b/website/docs/cdktf/typescript/r/organizations_organization.html.markdown index 1edf8eca5981..e951beb5a7aa 100644 --- a/website/docs/cdktf/typescript/r/organizations_organization.html.markdown +++ b/website/docs/cdktf/typescript/r/organizations_organization.html.markdown @@ -82,6 +82,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_organization.example + identity = { + id = "o-1234567" + } +} + +resource "aws_organizations_organization" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the AWS Organizations organization. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the AWS organization using the `id`. For example: ```typescript @@ -98,7 +123,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); OrganizationsOrganization.generateConfigForImport( this, - "myOrg", + "example", "o-1234567" ); } @@ -109,7 +134,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import the AWS organization using the `id`. For example: ```console -% terraform import aws_organizations_organization.my_org o-1234567 +% terraform import aws_organizations_organization.example o-1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/organizations_organizational_unit.html.markdown b/website/docs/cdktf/typescript/r/organizations_organizational_unit.html.markdown index 44465d85a87f..39537a189bd2 100644 --- a/website/docs/cdktf/typescript/r/organizations_organizational_unit.html.markdown +++ b/website/docs/cdktf/typescript/r/organizations_organizational_unit.html.markdown @@ -60,6 +60,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_organizational_unit.example + identity = { + id = "ou-1234567" + } +} + +resource "aws_organizations_organizational_unit" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the organizational unit. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Organizations Organizational Units using the `id`. For example: ```typescript @@ -90,4 +115,4 @@ Using `terraform import`, import AWS Organizations Organizational Units using th % terraform import aws_organizations_organizational_unit.example ou-1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/organizations_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/organizations_policy_attachment.html.markdown index eee898aa4dfd..0a06afcd2a1c 100644 --- a/website/docs/cdktf/typescript/r/organizations_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/organizations_policy_attachment.html.markdown @@ -99,6 +99,33 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_policy_attachment.example + identity = { + policy_id = "p-12345678" + target_id = "123456789012" + } +} + +resource "aws_organizations_policy_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `policyId` (String) Organizations policy ID. +* `targetId` (String) Organizations target ID (account, OU, or root). + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_organizations_policy_attachment` using the target ID and policy ID. For example: With an account target: @@ -117,7 +144,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); OrganizationsPolicyAttachment.generateConfigForImport( this, - "account", + "example", "123456789012:p-12345678" ); } @@ -130,7 +157,7 @@ Using `terraform import`, import `aws_organizations_policy_attachment` using the With an account target: ```console -% terraform import aws_organizations_policy_attachment.account 123456789012:p-12345678 +% terraform import aws_organizations_policy_attachment.example 123456789012:p-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/osis_pipeline.html.markdown b/website/docs/cdktf/typescript/r/osis_pipeline.html.markdown index 15fcb9181cd6..b53ada9c389d 100644 --- a/website/docs/cdktf/typescript/r/osis_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/osis_pipeline.html.markdown @@ -55,7 +55,7 @@ class MyConvertedCode extends TerraformStack { 'version: "2"\nexample-pipeline:\n source:\n http:\n path: "/example"\n sink:\n - s3:\n aws:\n sts_role_arn: "${' + example.arn + '}"\n region: "${' + - current.name + + current.region + '}"\n bucket: "example"\n threshold:\n event_collect_timeout: "60s"\n codec:\n ndjson:\n\n', pipelineName: "example", }); @@ -102,6 +102,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bufferOptions` - (Optional) Key-value pairs to configure persistent buffering for the pipeline. See [`bufferOptions`](#buffer_options) below. * `encryptionAtRestOptions` - (Optional) Key-value pairs to configure encryption for data that is written to a persistent buffer. See [`encryptionAtRestOptions`](#encryption_at_rest_options) below. * `logPublishingOptions` - (Optional) Key-value pairs to configure log publishing. See [`logPublishingOptions`](#log_publishing_options) below. @@ -175,4 +176,4 @@ Using `terraform import`, import OpenSearch Ingestion Pipeline using the `id`. F % terraform import aws_osis_pipeline.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/paymentcryptography_key.html.markdown b/website/docs/cdktf/typescript/r/paymentcryptography_key.html.markdown index 52cf2d8d8317..fbe3aeeea10c 100644 --- a/website/docs/cdktf/typescript/r/paymentcryptography_key.html.markdown +++ b/website/docs/cdktf/typescript/r/paymentcryptography_key.html.markdown @@ -59,6 +59,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enabled` - (Optional) Whether to enable the key. * `keyCheckValueAlgorithm` - (Optional) Algorithm that AWS Payment Cryptography uses to calculate the key check value (KCV). * `tags` - (Optional) Map of tags assigned to the WorkSpaces Connection Alias. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -76,6 +77,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `decrypt` - (Optional) Whether an AWS Payment Cryptography key can be used to decrypt data. * `deriveKey` - (Optional) Whether an AWS Payment Cryptography key can be used to derive new keys. * `encrypt` - (Optional) Whether an AWS Payment Cryptography key can be used to encrypt data. @@ -106,6 +108,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_paymentcryptography_key.example + identity = { + "arn" = "arn:aws:payment-cryptography:us-east-1:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_paymentcryptography_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Payment Cryptography key. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Payment Cryptography Control Plane Key using the `arn:aws:payment-cryptography:us-east-1:123456789012:key/qtbojf64yshyvyzf`. For example: ```typescript @@ -136,4 +159,4 @@ Using `terraform import`, import Payment Cryptography Control Plane Key using th % terraform import aws_paymentcryptography_key.example arn:aws:payment-cryptography:us-east-1:123456789012:key/qtbojf64yshyvyzf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/paymentcryptography_key_alias.html.markdown b/website/docs/cdktf/typescript/r/paymentcryptography_key_alias.html.markdown index 1c0f4b2aa2c1..c00b9714bd28 100644 --- a/website/docs/cdktf/typescript/r/paymentcryptography_key_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/paymentcryptography_key_alias.html.markdown @@ -69,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `keyArn` - (Optional) ARN of the key. ## Attribute Reference @@ -107,4 +108,4 @@ Using `terraform import`, import Payment Cryptography Control Plane Key Alias us % terraform import aws_paymentcryptography_key_alias.example alias/4681482429376900170 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_adm_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_adm_channel.html.markdown index 1bef135d9321..777d2e30c1e4 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_adm_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_adm_channel.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `clientId` - (Required) Client ID (part of OAuth Credentials) obtained via Amazon Developer Account. * `clientSecret` - (Required) Client Secret (part of OAuth Credentials) obtained via Amazon Developer Account. @@ -87,4 +88,4 @@ Using `terraform import`, import Pinpoint ADM Channel using the `application-id` % terraform import aws_pinpoint_adm_channel.channel application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_apns_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_apns_channel.html.markdown index 01418725d7ed..7f503c5c4969 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_apns_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_apns_channel.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `defaultAuthenticationMethod` - (Optional) The default authentication method used for APNs. @@ -98,4 +99,4 @@ Using `terraform import`, import Pinpoint APNs Channel using the `application-id % terraform import aws_pinpoint_apns_channel.apns application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_apns_sandbox_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_apns_sandbox_channel.html.markdown index ddd730a31b3d..a0fbbe3263db 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_apns_sandbox_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_apns_sandbox_channel.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `defaultAuthenticationMethod` - (Optional) The default authentication method used for APNs Sandbox. @@ -102,4 +103,4 @@ Using `terraform import`, import Pinpoint APNs Sandbox Channel using the `applic % terraform import aws_pinpoint_apns_sandbox_channel.apns_sandbox application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_apns_voip_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_apns_voip_channel.html.markdown index 44a7e1423685..29ca21cfdb42 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_apns_voip_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_apns_voip_channel.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `defaultAuthenticationMethod` - (Optional) The default authentication method used for APNs. @@ -102,4 +103,4 @@ Using `terraform import`, import Pinpoint APNs VoIP Channel using the `applicati % terraform import aws_pinpoint_apns_voip_channel.apns_voip application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_apns_voip_sandbox_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_apns_voip_sandbox_channel.html.markdown index f8623afe41ae..0cbdef0359aa 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_apns_voip_sandbox_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_apns_voip_sandbox_channel.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `defaultAuthenticationMethod` - (Optional) The default authentication method used for APNs. @@ -102,4 +103,4 @@ Using `terraform import`, import Pinpoint APNs VoIP Sandbox Channel using the `a % terraform import aws_pinpoint_apns_voip_sandbox_channel.apns_voip_sandbox application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_app.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_app.html.markdown index 06b758b9751a..9f51e92d2215 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_app.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_app.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The application name. By default generated by Terraform * `namePrefix` - (Optional) The name of the Pinpoint application. Conflicts with `name` * `campaignHook` - (Optional) Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign @@ -106,4 +107,4 @@ Using `terraform import`, import Pinpoint App using the `application-id`. For ex % terraform import aws_pinpoint_app.name application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_baidu_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_baidu_channel.html.markdown index c1385fbc8581..92251732fce0 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_baidu_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_baidu_channel.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `enabled` - (Optional) Specifies whether to enable the channel. Defaults to `true`. * `apiKey` - (Required) Platform credential API key from Baidu. @@ -86,4 +87,4 @@ Using `terraform import`, import Pinpoint Baidu Channel using the `application-i % terraform import aws_pinpoint_baidu_channel.channel application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_email_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_email_channel.html.markdown index 43fe66bcbfe2..0bf43df7abec 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_email_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_email_channel.html.markdown @@ -90,12 +90,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. * `configurationSet` - (Optional) The ARN of the Amazon SES configuration set that you want to apply to messages that you send through the channel. * `fromAddress` - (Required) The email address used to send emails from. You can use email only (`user@example.com`) or friendly address (`User `). This field comply with [RFC 5322](https://www.ietf.org/rfc/rfc5322.txt). * `identity` - (Required) The ARN of an identity verified with SES. -* `orchestration_sending_role_arn` - (Optional) The ARN of an IAM role for Amazon Pinpoint to use to send email from your campaigns or journeys through Amazon SES. +* `orchestrationSendingRoleArn` - (Optional) The ARN of an IAM role for Amazon Pinpoint to use to send email from your campaigns or journeys through Amazon SES. * `roleArn` - (Optional) *Deprecated* The ARN of an IAM Role used to submit events to Mobile Analytics' event ingestion service. ## Attribute Reference @@ -136,4 +137,4 @@ Using `terraform import`, import Pinpoint Email Channel using the `application-i % terraform import aws_pinpoint_email_channel.email application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_email_template.markdown b/website/docs/cdktf/typescript/r/pinpoint_email_template.markdown index e6deedd89024..4aa828ba464d 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_email_template.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_email_template.markdown @@ -48,8 +48,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `templateName` - (Required) name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive. * `emailTemplate` - (Required) Specifies the content and settings for a message template that can be used in messages that are sent through the email channel. See [Email Template](#email-template) @@ -107,4 +108,4 @@ Using `terraform import`, import Pinpoint Email Template using the `templateName % terraform import aws_pinpoint_email_template.reset template_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_event_stream.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_event_stream.html.markdown index d691c1e8d39c..470587307094 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_event_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_event_stream.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `destinationStreamArn` - (Required) The Amazon Resource Name (ARN) of the Amazon Kinesis stream or Firehose delivery stream to which you want to publish events. * `roleArn` - (Required) The IAM role that authorizes Amazon Pinpoint to publish events to the stream in your account. @@ -131,4 +132,4 @@ Using `terraform import`, import Pinpoint Event Stream using the `application-id % terraform import aws_pinpoint_event_stream.stream application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_gcm_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_gcm_channel.html.markdown index ff587950d0e8..2002e4c32da2 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_gcm_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_gcm_channel.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) The application ID. * `apiKey` - (Required) Platform credential API key from Google. * `enabled` - (Optional) Whether the channel is enabled or disabled. Defaults to `true`. @@ -81,4 +82,4 @@ Using `terraform import`, import Pinpoint GCM Channel using the `application-id` % terraform import aws_pinpoint_gcm_channel.gcm application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpoint_sms_channel.html.markdown b/website/docs/cdktf/typescript/r/pinpoint_sms_channel.html.markdown index 4f12430270e1..ff2cadc0b3d4 100644 --- a/website/docs/cdktf/typescript/r/pinpoint_sms_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpoint_sms_channel.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ID of the application. * `enabled` - (Optional) Whether the channel is enabled or disabled. By default, it is set to `true`. * `senderId` - (Optional) Identifier of the sender for your messages. @@ -80,4 +81,4 @@ Using `terraform import`, import the Pinpoint SMS Channel using the `application % terraform import aws_pinpoint_sms_channel.sms application-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_configuration_set.html.markdown b/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_configuration_set.html.markdown index 9f6ad507fbf0..0228e5c3f847 100644 --- a/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_configuration_set.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_configuration_set.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the configuration set. * `defaultSenderId` - (Optional) The default sender ID to use for this configuration set. * `defaultMessageType` - (Optional) The default message type. Must either be "TRANSACTIONAL" or "PROMOTIONAL" @@ -84,4 +85,4 @@ Using `terraform import`, import configuration sets using the `name`. For exampl % terraform import aws_pinpointsmsvoicev2_configuration_set.example example-configuration-set ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_opt_out_list.html.markdown b/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_opt_out_list.html.markdown index 5f9c2002968f..3de7fb60abe2 100644 --- a/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_opt_out_list.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_opt_out_list.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the opt-out list. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -80,4 +81,4 @@ Using `terraform import`, import opt-out lists using the `name`. For example: % terraform import aws_pinpointsmsvoicev2_opt_out_list.example example-opt-out-list ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_phone_number.html.markdown b/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_phone_number.html.markdown index 9b0fc4904342..e13413cb6b3c 100644 --- a/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_phone_number.html.markdown +++ b/website/docs/cdktf/typescript/r/pinpointsmsvoicev2_phone_number.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deletionProtectionEnabled` - (Optional) By default this is set to `false`. When set to true the phone number can’t be deleted. * `isoCountryCode` - (Required) The two-character code, in ISO 3166-1 alpha-2 format, for the country or region. * `messageType` - (Required) The type of message. Valid values are `TRANSACTIONAL` for messages that are critical or time-sensitive and `PROMOTIONAL` for messages that aren’t critical or time-sensitive. @@ -49,7 +50,7 @@ This resource supports the following arguments: * `optOutListName` - (Optional) The name of the opt-out list to associate with the phone number. * `registrationId` - (Optional) Use this field to attach your phone number for an external registration process. * `selfManagedOptOutsEnabled` - (Optional) When set to `false` an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the opt-out list. When set to true you’re responsible for responding to HELP and STOP requests. You’re also responsible for tracking and honoring opt-out request. -* `twoWayChannelArn` - (Optional) The Amazon Resource Name (ARN) of the two way channel. +* `twoWayChannelArn` - (Optional) Configuration for two-way SMS. Specify an ARN to receive incoming SMS messages, or `connect.[region].amazonaws.com` (with `[region]` replaced by the AWS Region of the Amazon Connect instance) to set Amazon Connect as the inbound destination. * `twoWayChannelEnabled` - (Optional) By default this is set to `false`. When set to `true` you can receive incoming text messages from your end recipients. * `twoWayChannelRole` - (Optional) IAM Role ARN for a service to assume, to be able to post inbound SMS messages. @@ -95,4 +96,4 @@ Using `terraform import`, import phone numbers using the `id`. For example: % terraform import aws_pinpointsmsvoicev2_phone_number.example phone-abcdef0123456789abcdef0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/pipes_pipe.html.markdown b/website/docs/cdktf/typescript/r/pipes_pipe.html.markdown index 07bd90239bf1..d6a5f6299b52 100644 --- a/website/docs/cdktf/typescript/r/pipes_pipe.html.markdown +++ b/website/docs/cdktf/typescript/r/pipes_pipe.html.markdown @@ -275,6 +275,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description of the pipe. At most 512 characters. * `desiredState` - (Optional) The state the pipe should be in. One of: `RUNNING`, `STOPPED`. * `enrichment` - (Optional) Enrichment resource of the pipe (typically an ARN). Read more about enrichment in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-enrichment). @@ -687,4 +688,4 @@ Using `terraform import`, import pipes using the `name`. For example: % terraform import aws_pipes_pipe.example my-pipe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/placement_group.html.markdown b/website/docs/cdktf/typescript/r/placement_group.html.markdown index b64bd1843613..f426cc779eb6 100644 --- a/website/docs/cdktf/typescript/r/placement_group.html.markdown +++ b/website/docs/cdktf/typescript/r/placement_group.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the placement group. * `partitionCount` - (Optional) The number of partitions to create in the placement group. Can only be specified when the `strategy` is set to @@ -90,4 +91,4 @@ Using `terraform import`, import placement groups using the `name`. For example: % terraform import aws_placement_group.prod_pg production-placement-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/prometheus_alert_manager_definition.html.markdown b/website/docs/cdktf/typescript/r/prometheus_alert_manager_definition.html.markdown index 98e9ee3921d5..4243ad5126f0 100644 --- a/website/docs/cdktf/typescript/r/prometheus_alert_manager_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/prometheus_alert_manager_definition.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workspaceId` - (Required) ID of the prometheus workspace the alert manager definition should be linked to * `definition` - (Required) the alert manager definition that you want to be applied. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alert-manager.html). @@ -84,4 +85,4 @@ Using `terraform import`, import the prometheus alert manager definition using t % terraform import aws_prometheus_alert_manager_definition.demo ws-C6DCB907-F2D7-4D96-957B-66691F865D8B ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/prometheus_query_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/prometheus_query_logging_configuration.html.markdown new file mode 100644 index 000000000000..ee4450269594 --- /dev/null +++ b/website/docs/cdktf/typescript/r/prometheus_query_logging_configuration.html.markdown @@ -0,0 +1,130 @@ +--- +subcategory: "AMP (Managed Prometheus)" +layout: "aws" +page_title: "AWS: aws_prometheus_query_logging_configuration" +description: |- + Manages an Amazon Managed Service for Prometheus (AMP) Query Logging Configuration. +--- + + + +# Resource: aws_prometheus_query_logging_configuration + +Manages an Amazon Managed Service for Prometheus (AMP) Query Logging Configuration. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogGroup } from "./.gen/providers/aws/cloudwatch-log-group"; +import { PrometheusQueryLoggingConfiguration } from "./.gen/providers/aws/prometheus-query-logging-configuration"; +import { PrometheusWorkspace } from "./.gen/providers/aws/prometheus-workspace"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CloudwatchLogGroup(this, "example", { + name: "/aws/prometheus/query-logs/example", + }); + const awsPrometheusWorkspaceExample = new PrometheusWorkspace( + this, + "example_1", + { + alias: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsPrometheusWorkspaceExample.overrideLogicalId("example"); + const awsPrometheusQueryLoggingConfigurationExample = + new PrometheusQueryLoggingConfiguration(this, "example_2", { + destination: [ + { + cloudwatchLogs: [ + { + logGroupArn: "${" + example.arn + "}:*", + }, + ], + filters: [ + { + qspThreshold: 1000, + }, + ], + }, + ], + workspaceId: Token.asString(awsPrometheusWorkspaceExample.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsPrometheusQueryLoggingConfigurationExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `destination` - (Required) Configuration block for the logging destinations. See [`destinations`](#destinations). +* `workspaceId` - (Required) The ID of the AMP workspace for which to configure query logging. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `destination` + +* `cloudwatchLogs` - (Required) Configuration block for CloudWatch Logs destination. See [`cloudwatchLogs`](#cloudwatch_logs). +* `filters` - (Required) A list of filter configurations that specify which logs should be sent to the destination. See [`filters`](#filters). + +#### `cloudwatchLogs` + +* `logGroupArn` - (Required) The ARN of the CloudWatch log group to which query logs will be sent. + +#### `filters` + +* `qspThreshold` - (Required) The Query Samples Processed (QSP) threshold above which queries will be logged. Queries processing more samples than this threshold will be captured in logs. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the Query Logging Configuration using the workspace ID. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { PrometheusQueryLoggingConfiguration } from "./.gen/providers/aws/prometheus-query-logging-configuration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + PrometheusQueryLoggingConfiguration.generateConfigForImport( + this, + "example", + "ws-12345678-90ab-cdef-1234-567890abcdef" + ); + } +} + +``` + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `5m`) +- `update` - (Default `5m`) +- `delete` - (Default `5m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/prometheus_rule_group_namespace.html.markdown b/website/docs/cdktf/typescript/r/prometheus_rule_group_namespace.html.markdown index 29819cb05d20..60de529f8d57 100644 --- a/website/docs/cdktf/typescript/r/prometheus_rule_group_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/prometheus_rule_group_namespace.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `data` - (Required) the rule group namespace data that you want to be applied. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-Ruler.html). * `name` - (Required) The name of the rule group namespace. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -89,4 +90,4 @@ Using `terraform import`, import the prometheus rule group namespace using the a % terraform import aws_prometheus_rule_group_namespace.demo arn:aws:aps:us-west-2:123456789012:rulegroupsnamespace/IDstring/namespace_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/prometheus_scraper.html.markdown b/website/docs/cdktf/typescript/r/prometheus_scraper.html.markdown index 33d7e04072fa..5c7e99020c01 100644 --- a/website/docs/cdktf/typescript/r/prometheus_scraper.html.markdown +++ b/website/docs/cdktf/typescript/r/prometheus_scraper.html.markdown @@ -259,14 +259,16 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination` - (Required) Configuration block for the managed scraper to send metrics to. See [`destination`](#destination). * `scrapeConfiguration` - (Required) The configuration file to use in the new scraper. For more information, see [Scraper configuration](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-configuration). * `source` - (Required) Configuration block to specify where the managed scraper will collect metrics from. See [`source`](#source). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias` - (Optional) a name to associate with the managed scraper. This is for your use, and does not need to be unique. * `roleConfiguration` - (Optional) Configuration block to enable writing to an Amazon Managed Service for Prometheus workspace in a different account. See [`roleConfiguration`](#role_configuration) below. @@ -344,4 +346,4 @@ For example: % terraform import aws_prometheus_scraper.example s-0123abc-0000-0123-a000-000000000000 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/prometheus_workspace.html.markdown b/website/docs/cdktf/typescript/r/prometheus_workspace.html.markdown index ec7997e4cd4f..531c0d27e19a 100644 --- a/website/docs/cdktf/typescript/r/prometheus_workspace.html.markdown +++ b/website/docs/cdktf/typescript/r/prometheus_workspace.html.markdown @@ -109,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alias` - (Optional) The alias of the prometheus workspace. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-create-workspace.html). * `kmsKeyArn` - (Optional) The ARN for the KMS encryption key. If this argument is not provided, then the AWS owned encryption key will be used to encrypt the data in the workspace. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html) * `loggingConfiguration` - (Optional) Logging configuration for the workspace. See [Logging Configuration](#logging-configuration) below for details. @@ -161,4 +162,4 @@ Using `terraform import`, import AMP Workspaces using the identifier. For exampl % terraform import aws_prometheus_workspace.demo ws-C6DCB907-F2D7-4D96-957B-66691F865D8B ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/prometheus_workspace_configuration.html.markdown b/website/docs/cdktf/typescript/r/prometheus_workspace_configuration.html.markdown index 522e53dccd8b..67b8005b9a93 100644 --- a/website/docs/cdktf/typescript/r/prometheus_workspace_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/prometheus_workspace_configuration.html.markdown @@ -23,42 +23,38 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { PrometheusWorkspaceConfiguration } from "./.gen/providers/aws/"; import { PrometheusWorkspace } from "./.gen/providers/aws/prometheus-workspace"; +import { PrometheusWorkspaceConfiguration } from "./.gen/providers/aws/prometheus-workspace-configuration"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); const example = new PrometheusWorkspace(this, "example", {}); const awsPrometheusWorkspaceConfigurationExample = new PrometheusWorkspaceConfiguration(this, "example_1", { - limits_per_label_set: [ + limitsPerLabelSet: [ { - label_set: [ - { - env: "dev", - }, - ], + labelSet: { + env: "dev", + }, limits: [ { - max_series: 100000, + maxSeries: 100000, }, ], }, { - label_set: [ - { - env: "prod", - }, - ], + labelSet: { + env: "prod", + }, limits: [ { - max_series: 400000, + maxSeries: 400000, }, ], }, ], - retention_period_in_days: 60, - workspace_id: example.id, + retentionPeriodInDays: 60, + workspaceId: example.id, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsPrometheusWorkspaceConfigurationExample.overrideLogicalId("example"); @@ -81,25 +77,25 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { PrometheusWorkspaceConfiguration } from "./.gen/providers/aws/"; import { PrometheusWorkspace } from "./.gen/providers/aws/prometheus-workspace"; +import { PrometheusWorkspaceConfiguration } from "./.gen/providers/aws/prometheus-workspace-configuration"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); const example = new PrometheusWorkspace(this, "example", {}); const awsPrometheusWorkspaceConfigurationExample = new PrometheusWorkspaceConfiguration(this, "example_1", { - limits_per_label_set: [ + limitsPerLabelSet: [ { - label_set: [{}], + labelSet: {}, limits: [ { - max_series: 50000, + maxSeries: 50000, }, ], }, ], - workspace_id: example.id, + workspaceId: example.id, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsPrometheusWorkspaceConfigurationExample.overrideLogicalId("example"); @@ -116,22 +112,22 @@ The following arguments are required: The following arguments are optional: +* `limitsPerLabelSet` - (Optional) Configuration block for setting limits on metrics with specific label sets. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `retentionPeriodInDays` - (Optional) Number of days to retain metric data in the workspace. -* `limits_per_label_set` - (Optional) Configuration block for setting limits on metrics with specific label sets. Detailed below. - -### `limits_per_label_set` -The `limits_per_label_set` configuration block supports the following arguments: +### `limitsPerLabelSet` -* `label_set` - (Required) Map of label key-value pairs that identify the metrics to which the limits apply. An empty map represents the default bucket for metrics that don't match any other label set. +The `limitsPerLabelSet` configuration block supports the following arguments: +* `labelSet` - (Required) Map of label key-value pairs that identify the metrics to which the limits apply. An empty map represents the default bucket for metrics that don't match any other label set. * `limits` - (Required) Configuration block for the limits to apply to the specified label set. Detailed below. #### `limits` The `limits` configuration block supports the following arguments: -* `max_series` - (Required) Maximum number of active time series that can be ingested for metrics matching the label set. +* `maxSeries` - (Required) Maximum number of active time series that can be ingested for metrics matching the label set. ## Attribute Reference @@ -156,7 +152,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { PrometheusWorkspaceConfiguration } from "./.gen/providers/aws/"; +import { PrometheusWorkspaceConfiguration } from "./.gen/providers/aws/prometheus-workspace-configuration"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -176,4 +172,4 @@ Using `terraform import`, import AMP (Managed Prometheus) Workspace Configuratio % terraform import aws_prometheus_workspace_configuration.example ws-12345678-abcd-1234-abcd-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/proxy_protocol_policy.html.markdown b/website/docs/cdktf/typescript/r/proxy_protocol_policy.html.markdown index 73abf47b731f..f4f05e3de819 100644 --- a/website/docs/cdktf/typescript/r/proxy_protocol_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/proxy_protocol_policy.html.markdown @@ -58,6 +58,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `loadBalancer` - (Required) The load balancer to which the policy should be attached. * `instancePorts` - (Required) List of instance ports to which the policy @@ -70,4 +71,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the policy. * `loadBalancer` - The load balancer to which the policy is attached. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/qbusiness_application.html.markdown b/website/docs/cdktf/typescript/r/qbusiness_application.html.markdown index 6dbcbf3f3902..3a0d09ffc47c 100644 --- a/website/docs/cdktf/typescript/r/qbusiness_application.html.markdown +++ b/website/docs/cdktf/typescript/r/qbusiness_application.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Amazon Q application. * `encryptionConfiguration` - (Optional) Information about encryption configuration. See [`encryptionConfiguration`](#encryption_configuration) below. @@ -114,4 +115,4 @@ Using `terraform import`, import a Q Business Application using the `id`. For ex % terraform import aws_qbusiness_application.example id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/qldb_ledger.html.markdown b/website/docs/cdktf/typescript/r/qldb_ledger.html.markdown index 11c7acdec4fa..f62c999b6693 100644 --- a/website/docs/cdktf/typescript/r/qldb_ledger.html.markdown +++ b/website/docs/cdktf/typescript/r/qldb_ledger.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deletionProtection` - (Optional) The deletion protection for the QLDB Ledger instance. By default it is `true`. To delete this resource via Terraform, this value must be configured to `false` and applied first before attempting deletion. * `kmsKey` - (Optional) The key in AWS Key Management Service (AWS KMS) to use for encryption of data at rest in the ledger. For more information, see the [AWS documentation](https://docs.aws.amazon.com/qldb/latest/developerguide/encryption-at-rest.html). Valid values are `"AWS_OWNED_KMS_KEY"` to use an AWS KMS key that is owned and managed by AWS on your behalf, or the ARN of a valid symmetric customer managed KMS key. * `name` - (Optional) The friendly name for the QLDB Ledger instance. By default generated by Terraform. @@ -90,4 +91,4 @@ Using `terraform import`, import QLDB Ledgers using the `name`. For example: % terraform import aws_qldb_ledger.sample-ledger sample-ledger ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/qldb_stream.html.markdown b/website/docs/cdktf/typescript/r/qldb_stream.html.markdown index 4c93061ba334..c9a216663ba2 100644 --- a/website/docs/cdktf/typescript/r/qldb_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/qldb_stream.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exclusiveEndTime` - (Optional) The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it. It must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: `"2019-06-13T21:36:34Z"`. * `inclusiveStartTime` - (Required) The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: `"2019-06-13T21:36:34Z"`. This cannot be in the future and must be before `exclusiveEndTime`. If you provide a value that is before the ledger's `CreationDateTime`, QLDB effectively defaults it to the ledger's `CreationDateTime`. * `kinesisConfiguration` - (Required) The configuration settings of the Kinesis Data Streams destination for your stream request. Documented below. @@ -79,4 +80,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `8m`) - `delete` - (Default `5m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_account_settings.html.markdown b/website/docs/cdktf/typescript/r/quicksight_account_settings.html.markdown index 9625e1ebd5c7..d4ea70bd8518 100644 --- a/website/docs/cdktf/typescript/r/quicksight_account_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_account_settings.html.markdown @@ -24,7 +24,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { QuicksightAccountSettings } from "./.gen/providers/aws/"; +import { QuicksightAccountSettings } from "./.gen/providers/aws/quicksight-account-settings"; import { QuicksightAccountSubscription } from "./.gen/providers/aws/quicksight-account-subscription"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { @@ -41,7 +41,7 @@ class MyConvertedCode extends TerraformStack { ); new QuicksightAccountSettings(this, "example", { dependsOn: [subscription], - termination_protection_enabled: false, + terminationProtectionEnabled: false, }); } } @@ -52,14 +52,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `default_namespace` - (Optional) The default namespace for this Amazon Web Services account. Currently, the default is `default`. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `defaultNamespace` - (Optional) The default namespace for this Amazon Web Services account. Currently, the default is `default`. * `terminationProtectionEnabled` - (Optional) A boolean value that determines whether or not an Amazon QuickSight account can be deleted. If `true`, it does not allow the account to be deleted and results in an error message if a user tries to make a DeleteAccountSubscription request. If `false`, it will allow the account to be deleted. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `awsAccountId` - The ID for the AWS account that contains the settings. +This resource exports no additional attributes. ## Import @@ -73,7 +72,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { QuicksightAccountSettings } from "./.gen/providers/aws/"; +import { QuicksightAccountSettings } from "./.gen/providers/aws/quicksight-account-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -93,4 +92,4 @@ Using `terraform import`, import QuickSight Account Settings using the AWS accou % terraform import aws_quicksight_account_settings.example "012345678901" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown b/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown index 9a66e092fb86..cc1f28264d41 100644 --- a/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown @@ -53,7 +53,7 @@ The following arguments are optional: * `activeDirectoryName` - (Optional) Name of your Active Directory. This field is required if `ACTIVE_DIRECTORY` is the selected authentication method of the new Amazon QuickSight account. * `adminGroup` - (Optional) Admin group associated with your Active Directory or IAM Identity Center account. This field is required if `ACTIVE_DIRECTORY` or `IAM_IDENTITY_CENTER` is the selected authentication method of the new Amazon QuickSight account. * `authorGroup` - (Optional) Author group associated with your Active Directory or IAM Identity Center account. -* `awsAccountId` - (Optional) AWS account ID hosting the QuickSight account. Default to provider account. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `contactNumber` - (Optional) A 10-digit phone number for the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `directoryId` - (Optional) Active Directory ID that is associated with your Amazon QuickSight account. * `emailAddress` - (Optional) Email address of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. @@ -62,6 +62,7 @@ The following arguments are optional: * `lastName` - (Optional) Last name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `readerGroup` - (Optional) Reader group associated with your Active Directory or IAM Identity Center account. * `realm` - (Optional) Realm of the Active Directory that is associated with your Amazon QuickSight account. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -78,6 +79,36 @@ This resource exports the following attributes in addition to the arguments abov ## Import -You cannot import this resource. +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight Account Subscription using `awsAccountId`. For example: - \ No newline at end of file +~> Due to the absence of required arguments in the [`DescribeAccountSettings`](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeAccountSettings.html) API response, importing an existing account subscription will result in a planned replacement on the subsequent `apply` operation. Until the Describe API response in extended to include all configurable arguments, an [`ignore_changes` lifecycle argument](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes) can be used to suppress differences on arguments not read into state. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightAccountSubscription } from "./.gen/providers/aws/quicksight-account-subscription"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + QuicksightAccountSubscription.generateConfigForImport( + this, + "example", + "012345678901" + ); + } +} + +``` + +Using `terraform import`, import a QuickSight Account Subscription using `awsAccountId`. For example: + +```console +% terraform import aws_quicksight_account_subscription.example "012345678901" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_analysis.html.markdown b/website/docs/cdktf/typescript/r/quicksight_analysis.html.markdown index eff9453baca5..e078145c6ee2 100644 --- a/website/docs/cdktf/typescript/r/quicksight_analysis.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_analysis.html.markdown @@ -135,11 +135,12 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `definition` - (Optional) A detailed analysis definition. Only one of `definition` or `sourceEntity` should be configured. See [definition](#definition). * `parameters` - (Optional) The parameters for the creation of the analysis, which you want to use to override the default settings. An analysis can have any type of parameters, and some parameters might accept multiple values. See [parameters](#parameters). * `permissions` - (Optional) A set of resource permissions on the analysis. Maximum of 64 items. See [permissions](#permissions). * `recoveryWindowInDays` - (Optional) A value that specifies the number of days that Amazon QuickSight waits before it deletes the analysis. Use `0` to force deletion without recovery. Minimum value of `7`. Maximum value of `30`. Default to `30`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceEntity` - (Optional) The entity that you are using as a source when you create the analysis (template). Only one of `definition` or `sourceEntity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `themeArn` - (Optional) The Amazon Resource Name (ARN) of the theme that is being used for this analysis. The theme ARN must exist in the same AWS account where you create the analysis. @@ -231,4 +232,4 @@ Using `terraform import`, import a QuickSight Analysis using the AWS account ID % terraform import aws_quicksight_analysis.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_custom_permissions.html.markdown b/website/docs/cdktf/typescript/r/quicksight_custom_permissions.html.markdown new file mode 100644 index 000000000000..e69a0bcfd91e --- /dev/null +++ b/website/docs/cdktf/typescript/r/quicksight_custom_permissions.html.markdown @@ -0,0 +1,104 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_custom_permissions" +description: |- + Manages a QuickSight custom permissions profile. +--- + + + +# Resource: aws_quicksight_custom_permissions + +Manages a QuickSight custom permissions profile. + +## Example Usage + +resource "aws_quicksight_custom_permissions" "example" { + custom_permissions_name = "example-permissions" + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } +} + +## Argument Reference + +The following arguments are required: + +* `capabilities` - (Required) Actions to include in the custom permissions profile. See [capabilities](#capabilities). +* `customPermissionsName` - (Required, Forces new resource) Custom permissions profile name. + +The following arguments are optional: + +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### capabilities + +* `addOrRunAnomalyDetectionForAnalyses` - (Optional) The ability to add or run anomaly detection. Valid values: `DENY`. +* `createAndUpdateDashboardEmailReports` - (Optional) The ability to create and update email reports. Valid values: `DENY`. +* `createAndUpdateDatasets` - (Optional) The ability to create and update datasets. Valid values: `DENY`. +* `createAndUpdateDataSources` - (Optional) The ability to create and update data sources. Valid values: `DENY`. +* `createAndUpdateThemes` - (Optional) The ability to export to create and update themes. Valid values: `DENY`. +* `createAndUpdateThresholdAlerts` - (Optional) The ability to create and update threshold alerts. Valid values: `DENY`. +* `createSharedFolders` - (Optional) The ability to create shared folders. Valid values: `DENY`. +* `createSpiceDataset` - (Optional) The ability to create a SPICE dataset. Valid values: `DENY`. +* `exportToCsv` - (Optional) The ability to export to CSV files from the UI. Valid values: `DENY`. +* `exportToCsvInScheduledReports` - (Optional) The ability to export to CSV files in scheduled email reports. Valid values: `DENY`. +* `exportToExcel` - (Optional) The ability to export to Excel files from the UI. Valid values: `DENY`. +* `exportToExcelInScheduledReports` - (Optional) The ability to export to Excel files in scheduled email reports. Valid values: `DENY`. +* `exportToPdf` - (Optional) The ability to export to PDF files from the UI. Valid values: `DENY`. +* `exportToPdfInScheduledReports` - (Optional) The ability to export to PDF files in scheduled email reports. Valid values: `DENY`. +* `includeContentInScheduledReportsEmail` - (Optional) The ability to include content in scheduled email reports. Valid values: `DENY`. +* `printReports` - (Optional) The ability to print reports. Valid values: `DENY`. +* `renameSharedFolders` - (Optional) The ability to rename shared folders. Valid values: `DENY`. +* `shareAnalyses` - (Optional) The ability to share analyses. Valid values: `DENY`. +* `shareDashboards` - (Optional) The ability to share dashboards. Valid values: `DENY`. +* `shareDatasets` - (Optional) The ability to share datasets. Valid values: `DENY`. +* `shareDataSources` - (Optional) The ability to share data sources. Valid values: `DENY`. +* `subscribeDashboardEmailReports` - (Optional) The ability to subscribe to email reports. Valid values: `DENY`. +* `viewAccountSpiceCapacity` - (Optional) The ability to view account SPICE capacity. Valid values: `DENY`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the custom permissions profile. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight custom permissions profile using the AWS account ID and custom permissions profile name separated by a comma (`,`). For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightCustomPermissions } from "./.gen/providers/aws/quicksight-custom-permissions"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + QuicksightCustomPermissions.generateConfigForImport( + this, + "example", + "123456789012,example-permissions" + ); + } +} + +``` + +Using `terraform import`, import a QuickSight custom permissions profile using the AWS account ID and custom permissions profile name separated by a comma (`,`). For example: + +```console +% terraform import aws_quicksight_custom_permissions.example 123456789012,example-permissions +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_dashboard.html.markdown b/website/docs/cdktf/typescript/r/quicksight_dashboard.html.markdown index 0a54c7419b24..6acb55a676c2 100644 --- a/website/docs/cdktf/typescript/r/quicksight_dashboard.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_dashboard.html.markdown @@ -138,11 +138,12 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dashboardPublishOptions` - (Optional) Options for publishing the dashboard. See [dashboard_publish_options](#dashboard_publish_options). * `definition` - (Optional) A detailed dashboard definition. Only one of `definition` or `sourceEntity` should be configured. See [definition](#definition). * `parameters` - (Optional) The parameters for the creation of the dashboard, which you want to use to override the default settings. A dashboard can have any type of parameters, and some parameters might accept multiple values. See [parameters](#parameters). * `permissions` - (Optional) A set of resource permissions on the dashboard. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceEntity` - (Optional) The entity that you are using as a source when you create the dashboard (template). Only one of `definition` or `sourceEntity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `themeArn` - (Optional) The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. The theme ARN must exist in the same AWS account where you create the dashboard. @@ -289,4 +290,4 @@ Using `terraform import`, import a QuickSight Dashboard using the AWS account ID % terraform import aws_quicksight_dashboard.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_data_set.html.markdown b/website/docs/cdktf/typescript/r/quicksight_data_set.html.markdown index 38b76f5d4669..dbd0a75c2a44 100644 --- a/website/docs/cdktf/typescript/r/quicksight_data_set.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_data_set.html.markdown @@ -265,7 +265,7 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `columnGroups` - (Optional) Groupings of columns that work together in certain Amazon QuickSight features. Currently, only geospatial hierarchy is supported. See [column_groups](#column_groups). * `columnLevelPermissionRules` - (Optional) A set of 1 or more definitions of a [ColumnLevelPermissionRule](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_ColumnLevelPermissionRule.html). See [column_level_permission_rules](#column_level_permission_rules). * `dataSetUsageConfiguration` - (Optional) The usage configuration to apply to child datasets that reference this dataset as a source. See [data_set_usage_configuration](#data_set_usage_configuration). @@ -273,6 +273,7 @@ The following arguments are optional: * `logicalTableMap` - (Optional) Configures the combination and transformation of the data from the physical tables. Maximum of 1 entry. See [logical_table_map](#logical_table_map). * `permissions` - (Optional) A set of resource permissions on the data source. Maximum of 64 items. See [permissions](#permissions). * `physicalTableMap` - (Optional) Declares the physical tables that are available in the underlying data sources. See [physical_table_map](#physical_table_map). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rowLevelPermissionDataSet` - (Optional) The row-level security configuration for the data that you want to create. See [row_level_permission_data_set](#row_level_permission_data_set). * `rowLevelPermissionTagConfiguration` - (Optional) The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only. See [row_level_permission_tag_configuration](#row_level_permission_tag_configuration). * `refreshProperties` - (Optional) The refresh properties for the data set. **NOTE**: Only valid when `importMode` is set to `SPICE`. See [refresh_properties](#refresh_properties). @@ -489,8 +490,17 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) of the data set. * `id` - A comma-delimited string joining AWS account ID and data set ID. +* `outputColumns` - The final set of columns available for use in analyses and dashboards after all data preparation and transformation steps have been applied within the data set. See [`outputColumns` Block](#output_columns-block) below. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +### `outputColumns` Block + +The `outputColumns` block has the following attributes. + +* `name` - The name of the column. +* `description` - The description of the column. +* `type` - The data type of the column. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight Data Set using the AWS account ID and data set ID separated by a comma (`,`). For example: @@ -523,4 +533,4 @@ Using `terraform import`, import a QuickSight Data Set using the AWS account ID % terraform import aws_quicksight_data_set.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_data_source.html.markdown b/website/docs/cdktf/typescript/r/quicksight_data_source.html.markdown index fb60cce66d4a..0382e2c84f84 100644 --- a/website/docs/cdktf/typescript/r/quicksight_data_source.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_data_source.html.markdown @@ -110,7 +110,7 @@ class MyConvertedCode extends TerraformStack { "https://${" + example.id + "}.s3-${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}.${" + dataAwsPartitionCurrent.dnsSuffix + "}", @@ -199,9 +199,10 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) The ID for the AWS account that the data source is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `credentials` - (Optional) The credentials Amazon QuickSight uses to connect to your underlying source. See [Credentials](#credentials-argument-reference) below for more details. * `permission` - (Optional) A set of resource permissions on the data source. Maximum of 64 items. See [Permission](#permission-argument-reference) below for more details. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sslProperties` - (Optional) Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your underlying source. See [SSL Properties](#ssl_properties-argument-reference) below for more details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpcConnectionProperties`- (Optional) Use this parameter only when you want Amazon QuickSight to use a VPC connection when connecting to your underlying source. See [VPC Connection Properties](#vpc_connection_properties-argument-reference) below for more details. @@ -414,4 +415,4 @@ Using `terraform import`, import a QuickSight data source using the AWS account % terraform import aws_quicksight_data_source.example 123456789123/my-data-source-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_folder.html.markdown b/website/docs/cdktf/typescript/r/quicksight_folder.html.markdown index 1da8b321b4a3..c815d9044e9c 100644 --- a/website/docs/cdktf/typescript/r/quicksight_folder.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_folder.html.markdown @@ -112,10 +112,11 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `folderType` - (Optional) The type of folder. By default, it is `SHARED`. Valid values are: `SHARED`. * `parentFolderArn` - (Optional) The Amazon Resource Name (ARN) for the parent folder. If not set, creates a root-level folder. * `permissions` - (Optional) A set of resource permissions on the folder. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### permissions @@ -175,4 +176,4 @@ Using `terraform import`, import a QuickSight folder using the AWS account ID an % terraform import aws_quicksight_folder.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_folder_membership.html.markdown b/website/docs/cdktf/typescript/r/quicksight_folder_membership.html.markdown index 7a681cb552f9..611d7891138a 100644 --- a/website/docs/cdktf/typescript/r/quicksight_folder_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_folder_membership.html.markdown @@ -48,7 +48,8 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import QuickSight Folder Membership using the AWS acco % terraform import aws_quicksight_folder_membership.example 123456789012,example-folder,DATASET,example-dataset ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_group.html.markdown b/website/docs/cdktf/typescript/r/quicksight_group.html.markdown index 3cd7e65f94e9..b45cdf4d9dc0 100644 --- a/website/docs/cdktf/typescript/r/quicksight_group.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_group.html.markdown @@ -38,10 +38,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `groupName` - (Required) A name for the group. -* `awsAccountId` - (Optional) The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `description` - (Optional) A description for the group. +* `groupName` - (Required) A name for the group. * `namespace` - (Optional) The namespace. Currently, you should set this to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -81,4 +82,4 @@ Using `terraform import`, import QuickSight Group using the aws account id, name % terraform import aws_quicksight_group.example 123456789123/default/tf-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_group_membership.html.markdown b/website/docs/cdktf/typescript/r/quicksight_group_membership.html.markdown index bb85c00f49aa..d1345e38d438 100644 --- a/website/docs/cdktf/typescript/r/quicksight_group_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_group_membership.html.markdown @@ -39,10 +39,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `groupName` - (Required) The name of the group in which the member will be added. * `memberName` - (Required) The name of the member to add to the group. -* `awsAccountId` - (Optional) The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. -* `namespace` - (Required) The namespace that you want the user to be a part of. Defaults to `default`. +* `namespace` - (Optional) The namespace that you want the user to be a part of. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -80,4 +81,4 @@ Using `terraform import`, import QuickSight Group membership using the AWS accou % terraform import aws_quicksight_group_membership.example 123456789123/default/all-access-users/john_smith ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_iam_policy_assignment.html.markdown b/website/docs/cdktf/typescript/r/quicksight_iam_policy_assignment.html.markdown index 92aaf4725427..79c76a1551a6 100644 --- a/website/docs/cdktf/typescript/r/quicksight_iam_policy_assignment.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_iam_policy_assignment.html.markdown @@ -52,10 +52,11 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `identities` - (Optional) Amazon QuickSight users, groups, or both to assign the policy to. See [`identities` block](#identities-block). * `namespace` - (Optional) Namespace that contains the assignment. Defaults to `default`. * `policyArn` - (Optional) ARN of the IAM policy to apply to the Amazon QuickSight users and groups specified in this assignment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `identities` block @@ -101,4 +102,4 @@ Using `terraform import`, import QuickSight IAM Policy Assignment using the AWS % terraform import aws_quicksight_iam_policy_assignment.example 123456789012,default,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_ingestion.html.markdown b/website/docs/cdktf/typescript/r/quicksight_ingestion.html.markdown index 717fa5dbf94f..f652375979f4 100644 --- a/website/docs/cdktf/typescript/r/quicksight_ingestion.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_ingestion.html.markdown @@ -48,7 +48,8 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -90,4 +91,4 @@ Using `terraform import`, import QuickSight Ingestion using the AWS account ID, % terraform import aws_quicksight_ingestion.example 123456789012,example-dataset-id,example-ingestion-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_ip_restriction.html.markdown b/website/docs/cdktf/typescript/r/quicksight_ip_restriction.html.markdown new file mode 100644 index 000000000000..01016f088299 --- /dev/null +++ b/website/docs/cdktf/typescript/r/quicksight_ip_restriction.html.markdown @@ -0,0 +1,92 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_ip_restriction" +description: |- + Manages the content and status of IP rules. +--- + + + +# Resource: aws_quicksight_ip_restriction + +Manages the content and status of IP rules. + +~> Deletion of this resource clears all IP restrictions from a QuickSight account. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightIpRestriction } from "./.gen/providers/aws/quicksight-ip-restriction"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new QuicksightIpRestriction(this, "example", { + enabled: true, + ipRestrictionRuleMap: { + "108.56.166.202/32": "Allow self", + }, + vpcIdRestrictionRuleMap: { + "${(aws_vpc.example.id)}": "Main VPC", + }, + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `enabled` - (Required) Whether IP rules are turned on. +* `ipRestrictionRuleMap` - (Optional) Map of allowed IPv4 CIDR ranges and descriptions. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `vpcEndpointIdRestrictionRuleMap` - (Optional) Map of allowed VPC endpoint IDs and descriptions. +* `vpcIdRestrictionRuleMap` - (Optional) Map of VPC IDs and descriptions. Traffic from all VPC endpoints that are present in the specified VPC is allowed. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight IP restriction using the AWS account ID. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightIpRestriction } from "./.gen/providers/aws/quicksight-ip-restriction"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + QuicksightIpRestriction.generateConfigForImport( + this, + "example", + "012345678901" + ); + } +} + +``` + +Using `terraform import`, import QuickSight IP restriction using the AWS account ID. For example: + +```console +% terraform import aws_quicksight_ip_restriction.example "012345678901" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_key_registration.html.markdown b/website/docs/cdktf/typescript/r/quicksight_key_registration.html.markdown new file mode 100644 index 000000000000..592b4aa3ec0d --- /dev/null +++ b/website/docs/cdktf/typescript/r/quicksight_key_registration.html.markdown @@ -0,0 +1,96 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_key_registration" +description: |- + Registers customer managed keys in a Amazon QuickSight account. +--- + + + +# Resource: aws_quicksight_key_registration + +Registers customer managed keys in a Amazon QuickSight account. + +~> Deletion of this resource clears all CMK registrations from a QuickSight account. QuickSight then uses AWS owned keys to encrypt your resources. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightKeyRegistration } from "./.gen/providers/aws/quicksight-key-registration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new QuicksightKeyRegistration(this, "example", { + keyRegistration: [ + { + keyArn: example1.arn, + }, + { + defaultKey: true, + keyArn: example2.arn, + }, + ], + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `keyRegistration` - (Required) Registered keys. See [key_registration](#key_registration). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### key_registration + +* `defaultKey` - (Optional) Whether the key is set as the default key for encryption and decryption use. +* `keyArn` - (Required) ARN of the AWS KMS key that is registered for encryption and decryption use. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight key registration using the AWS account ID. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightKeyRegistration } from "./.gen/providers/aws/quicksight-key-registration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + QuicksightKeyRegistration.generateConfigForImport( + this, + "example", + "012345678901" + ); + } +} + +``` + +Using `terraform import`, import QuickSight key registration using the AWS account ID. For example: + +```console +% terraform import aws_quicksight_key_registration.example "012345678901" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_namespace.html.markdown b/website/docs/cdktf/typescript/r/quicksight_namespace.html.markdown index 58aad988531c..c46712570be9 100644 --- a/website/docs/cdktf/typescript/r/quicksight_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_namespace.html.markdown @@ -44,8 +44,9 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `identityStore` - (Optional) User identity directory type. Defaults to `QUICKSIGHT`, the only current valid value. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -97,4 +98,4 @@ Using `terraform import`, import QuickSight Namespace using the AWS account ID a % terraform import aws_quicksight_namespace.example 123456789012,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_refresh_schedule.html.markdown b/website/docs/cdktf/typescript/r/quicksight_refresh_schedule.html.markdown index 0c47d49a1ff8..101f64f2b465 100644 --- a/website/docs/cdktf/typescript/r/quicksight_refresh_schedule.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_refresh_schedule.html.markdown @@ -137,7 +137,8 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### schedule @@ -196,4 +197,4 @@ Using `terraform import`, import a QuickSight Refresh Schedule using the AWS acc % terraform import aws_quicksight_refresh_schedule.example 123456789012,dataset-id,schedule-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_role_custom_permission.html.markdown b/website/docs/cdktf/typescript/r/quicksight_role_custom_permission.html.markdown new file mode 100644 index 000000000000..d729bdd4aef2 --- /dev/null +++ b/website/docs/cdktf/typescript/r/quicksight_role_custom_permission.html.markdown @@ -0,0 +1,89 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_role_custom_permission" +description: |- + Manages the custom permissions that are associated with a role. +--- + + + +# Resource: aws_quicksight_role_custom_permission + +Manages the custom permissions that are associated with a role. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightRoleCustomPermission } from "./.gen/providers/aws/quicksight-role-custom-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new QuicksightRoleCustomPermission(this, "example", { + customPermissionsName: Token.asString( + awsQuicksightCustomPermissionsExample.customPermissionsName + ), + role: "READER", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `customPermissionsName` - (Required, Forces new resource) Custom permissions profile name. +* `role` - (Required, Forces new resource) Role. Valid values are `ADMIN`, `AUTHOR`, `READER`, `ADMIN_PRO`, `AUTHOR_PRO`, and `READER_PRO`. + +The following arguments are optional: + +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `namespace` - (Optional, Forces new resource) Namespace containing the role. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight role custom permissions using a comma-delimited string combining the `awsAccountId`, `namespace` and `role`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightRoleCustomPermission } from "./.gen/providers/aws/quicksight-role-custom-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + QuicksightRoleCustomPermission.generateConfigForImport( + this, + "example", + "012345678901,default,READER" + ); + } +} + +``` + +Using `terraform import`, import QuickSight role custom permissions using a comma-delimited string combining the `awsAccountId`, `namespace`, and `role`. For example: + +```console +% terraform import aws_quicksight_role_custom_permission.example 012345678901,default,READER +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_role_membership.html.markdown b/website/docs/cdktf/typescript/r/quicksight_role_membership.html.markdown index b854f38fd0e0..b64639de339e 100644 --- a/website/docs/cdktf/typescript/r/quicksight_role_membership.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_role_membership.html.markdown @@ -47,8 +47,9 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. Defaults to the account of the caller identity if not configured. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) Name of the namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -86,4 +87,4 @@ Using `terraform import`, import QuickSight Role Membership using a comma-delimi % terraform import aws_quicksight_role_membership.example 012345678901,default,READER,example-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_template.html.markdown b/website/docs/cdktf/typescript/r/quicksight_template.html.markdown index 31c491920c9e..844b887971b6 100644 --- a/website/docs/cdktf/typescript/r/quicksight_template.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_template.html.markdown @@ -140,9 +140,10 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `definition` - (Optional) A detailed template definition. Only one of `definition` or `sourceEntity` should be configured. See [definition](#definition). * `permissions` - (Optional) A set of resource permissions on the template. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceEntity` - (Optional) The entity that you are using as a source when you create the template (analysis or template). Only one of `definition` or `sourceEntity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -233,4 +234,4 @@ Using `terraform import`, import a QuickSight Template using the AWS account ID % terraform import aws_quicksight_template.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_template_alias.html.markdown b/website/docs/cdktf/typescript/r/quicksight_template_alias.html.markdown index 42e71d59419d..93f9c98525b7 100644 --- a/website/docs/cdktf/typescript/r/quicksight_template_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_template_alias.html.markdown @@ -48,7 +48,8 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import QuickSight Template Alias using the AWS account % terraform import aws_quicksight_template_alias.example 123456789012,example-id,example-alias ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_theme.html.markdown b/website/docs/cdktf/typescript/r/quicksight_theme.html.markdown index 40066844b508..4cf6cb64315b 100644 --- a/website/docs/cdktf/typescript/r/quicksight_theme.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_theme.html.markdown @@ -60,15 +60,16 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `themeId` - (Required, Forces new resource) Identifier of the theme. * `baseThemeId` - (Required) The ID of the theme that a custom theme will inherit from. All themes inherit from one of the starting themes defined by Amazon QuickSight. For a list of the starting themes, use ListThemes or choose Themes from within an analysis. -* `name` - (Required) Display name of the theme. * `configuration` - (Required) The theme configuration, which contains the theme display properties. See [configuration](#configuration). +* `name` - (Required) Display name of the theme. +* `themeId` - (Required, Forces new resource) Identifier of the theme. The following arguments are optional: -* `awsAccountId` - (Optional, Forces new resource) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `permissions` - (Optional) A set of resource permissions on the theme. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `versionDescription` - (Optional) A description of the current theme version being created/updated. @@ -195,4 +196,4 @@ Using `terraform import`, import a QuickSight Theme using the AWS account ID and % terraform import aws_quicksight_theme.example 123456789012,example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_user.html.markdown b/website/docs/cdktf/typescript/r/quicksight_user.html.markdown index 332609b7826e..347e44e774e3 100644 --- a/website/docs/cdktf/typescript/r/quicksight_user.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_user.html.markdown @@ -96,14 +96,15 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: * `email` - (Required) Email address of the user that you want to register. -* `identityType` - (Required) Identity type that your Amazon QuickSight account uses to manage the identity of users. Valid values: `IAM`, `QUICKSIGHT`. -* `userRole` - (Required) Amazon QuickSight role for the user. Value values: `READER`, `AUTHOR`, `ADMIN`, `READER_PRO`, `AUTHOR_PRO`, `ADMIN_PRO`. +* `identityType` - (Required) Identity type that your Amazon QuickSight account uses to manage the identity of users. Valid values: `IAM`, `QUICKSIGHT`, `IAM_IDENTITY_CENTER`. +* `userRole` - (Required) Amazon QuickSight role for the user. Valid values: `READER`, `AUTHOR`, `ADMIN`, `READER_PRO`, `AUTHOR_PRO`, `ADMIN_PRO`, `RESTRICTED_AUTHOR`, `RESTRICTED_READER`. The following arguments are optional: -* `awsAccountId` - (Optional) ID for the AWS account that the user is in. Use the ID for the AWS account that contains your Amazon QuickSight account. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `iamArn` - (Optional) ARN of the IAM user or role that you are registering with Amazon QuickSight. Required only for users with an identity type of `IAM`. * `namespace` - (Optional) The Amazon Quicksight namespace to create the user in. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sessionName` - (Optional) Name of the IAM session to use when assuming roles that can embed QuickSight dashboards. Only valid for registering users using an assumed IAM role. Additionally, if registering multiple users using the same IAM role, each user needs to have a unique session name. * `userName` - (Optional) Amazon QuickSight user name that you want to create for the user you are registering. Required only for users with an identity type of `QUICKSIGHT`. @@ -119,4 +120,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_user_custom_permission.html.markdown b/website/docs/cdktf/typescript/r/quicksight_user_custom_permission.html.markdown new file mode 100644 index 000000000000..938f502a9957 --- /dev/null +++ b/website/docs/cdktf/typescript/r/quicksight_user_custom_permission.html.markdown @@ -0,0 +1,89 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_user_custom_permission" +description: |- + Manages the custom permissions profile for a user. +--- + + + +# Resource: aws_quicksight_user_custom_permission + +Manages the custom permissions profile for a user. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightUserCustomPermission } from "./.gen/providers/aws/quicksight-user-custom-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new QuicksightUserCustomPermission(this, "example", { + customPermissionsName: Token.asString( + awsQuicksightCustomPermissionsExample.customPermissionsName + ), + userName: Token.asString(awsQuicksightUserExample.userName), + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `customPermissionsName` - (Required, Forces new resource) Custom permissions profile name. +* `userName` - (Required, Forces new resource) Username of the user. + +The following arguments are optional: + +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `namespace` - (Optional, Forces new resource) Namespace that the user belongs to. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight user custom permissions using a comma-delimited string combining the `awsAccountId`, `namespace` and `userName`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { QuicksightUserCustomPermission } from "./.gen/providers/aws/quicksight-user-custom-permission"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + QuicksightUserCustomPermission.generateConfigForImport( + this, + "example", + "012345678901,default,user1" + ); + } +} + +``` + +Using `terraform import`, import QuickSight user custom permissions using a comma-delimited string combining the `awsAccountId`, `namespace`, and `userName`. For example: + +```console +% terraform import aws_quicksight_user_custom_permission.example 012345678901,default,user1 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_vpc_connection.html.markdown b/website/docs/cdktf/typescript/r/quicksight_vpc_connection.html.markdown index 651c0fe78e1e..7b97708ab26a 100644 --- a/website/docs/cdktf/typescript/r/quicksight_vpc_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_vpc_connection.html.markdown @@ -92,8 +92,9 @@ The following arguments are required: The following arguments are optional: -* `awsAccountId` - (Optional) AWS account ID. +* `awsAccountId` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dnsResolvers` - (Optional) A list of IP addresses of DNS resolver endpoints for the VPC connection. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -145,4 +146,4 @@ Using `terraform import`, import QuickSight VPC connection using the AWS account % terraform import aws_quicksight_vpc_connection.example 123456789012,example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ram_principal_association.html.markdown b/website/docs/cdktf/typescript/r/ram_principal_association.html.markdown index 790dfeea6bd7..3a4068ce4e15 100644 --- a/website/docs/cdktf/typescript/r/ram_principal_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ram_principal_association.html.markdown @@ -88,6 +88,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The principal to associate with the resource share. Possible values are an AWS account ID, an AWS Organizations Organization ARN, or an AWS Organizations Organization Unit ARN. * `resourceShareArn` - (Required) The Amazon Resource Name (ARN) of the resource share. @@ -129,4 +130,4 @@ Using `terraform import`, import RAM Principal Associations using their Resource % terraform import aws_ram_principal_association.example arn:aws:ram:eu-west-1:123456789012:resource-share/73da1ab9-b94a-4ba3-8eb4-45917f7f4b12,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ram_resource_association.html.markdown b/website/docs/cdktf/typescript/r/ram_resource_association.html.markdown index 62afb971763c..d31edcff1b0e 100644 --- a/website/docs/cdktf/typescript/r/ram_resource_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ram_resource_association.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) Amazon Resource Name (ARN) of the resource to associate with the RAM Resource Share. * `resourceShareArn` - (Required) Amazon Resource Name (ARN) of the RAM Resource Share. @@ -82,4 +83,4 @@ Using `terraform import`, import RAM Resource Associations using their Resource % terraform import aws_ram_resource_association.example arn:aws:ram:eu-west-1:123456789012:resource-share/73da1ab9-b94a-4ba3-8eb4-45917f7f4b12,arn:aws:ec2:eu-west-1:123456789012:subnet/subnet-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ram_resource_share.html.markdown b/website/docs/cdktf/typescript/r/ram_resource_share.html.markdown index 3c899e898487..fb7b3be880ef 100644 --- a/website/docs/cdktf/typescript/r/ram_resource_share.html.markdown +++ b/website/docs/cdktf/typescript/r/ram_resource_share.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the resource share. * `allowExternalPrincipals` - (Optional) Indicates whether principals outside your organization can be associated with a resource share. * `permissionArns` - (Optional) Specifies the Amazon Resource Names (ARNs) of the RAM permission to associate with the resource share. If you do not specify an ARN for the permission, RAM automatically attaches the default version of the permission for each resource type. You can associate only one permission with each resource type included in the resource share. @@ -87,4 +88,4 @@ Using `terraform import`, import resource shares using the `arn` of the resource % terraform import aws_ram_resource_share.example arn:aws:ram:eu-west-1:123456789012:resource-share/73da1ab9-b94a-4ba3-8eb4-45917f7f4b12 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ram_resource_share_accepter.html.markdown b/website/docs/cdktf/typescript/r/ram_resource_share_accepter.html.markdown index 9b48cd9c19b3..ffc7c1b616d4 100644 --- a/website/docs/cdktf/typescript/r/ram_resource_share_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/ram_resource_share_accepter.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `shareArn` - (Required) The ARN of the resource share. ## Attribute Reference @@ -113,4 +114,4 @@ Using `terraform import`, import resource share accepters using the resource sha % terraform import aws_ram_resource_share_accepter.example arn:aws:ram:us-east-1:123456789012:resource-share/c4b56393-e8d9-89d9-6dc9-883752de4767 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rbin_rule.html.markdown b/website/docs/cdktf/typescript/r/rbin_rule.html.markdown index baf43519c027..a721c9e5433c 100644 --- a/website/docs/cdktf/typescript/r/rbin_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/rbin_rule.html.markdown @@ -29,7 +29,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new RbinRule(this, "example", { - description: "example_rule", + description: "Example tag-level retention rule", resourceTags: [ { resourceTagKey: "tag_key", @@ -50,35 +50,73 @@ class MyConvertedCode extends TerraformStack { ``` +### Region-Level Retention Rule + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { RbinRule } from "./.gen/providers/aws/rbin-rule"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new RbinRule(this, "example", { + description: "Example region-level retention rule with exclusion tags", + excludeResourceTags: [ + { + resourceTagKey: "tag_key", + resourceTagValue: "tag_value", + }, + ], + resourceType: "EC2_IMAGE", + retentionPeriod: { + retentionPeriodUnit: "DAYS", + retentionPeriodValue: 10, + }, + tags: { + test_tag_key: "test_tag_value", + }, + }); + } +} + +``` + ## Argument Reference The following arguments are required: -* `resourceType` - (Required) The resource type to be retained by the retention rule. Valid values are `EBS_SNAPSHOT` and `EC2_IMAGE`. +* `resourceType` - (Required) Resource type to be retained by the retention rule. Valid values are `EBS_SNAPSHOT` and `EC2_IMAGE`. * `retentionPeriod` - (Required) Information about the retention period for which the retention rule is to retain resources. See [`retentionPeriod`](#retention_period) below. The following arguments are optional: -* `description` - (Optional) The retention rule description. -* `resourceTags` - (Optional) Specifies the resource tags to use to identify resources that are to be retained by a tag-level retention rule. See [`resourceTags`](#resource_tags) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Retention rule description. +* `excludeResourceTags` - (Optional) Exclusion tags to use to identify resources that are to be excluded, or ignored, by a Region-level retention rule. See [`excludeResourceTags`](#exclude_resource_tags) below. * `lockConfiguration` - (Optional) Information about the retention rule lock configuration. See [`lockConfiguration`](#lock_configuration) below. +* `resourceTags` - (Optional) Resource tags to use to identify resources that are to be retained by a tag-level retention rule. See [`resourceTags`](#resource_tags) below. ### retention_period The following arguments are required: -* `retentionPeriodUnit` - (Required) The unit of time in which the retention period is measured. Currently, only DAYS is supported. -* `retentionPeriodValue` - (Required) The period value for which the retention rule is to retain resources. The period is measured using the unit specified for RetentionPeriodUnit. +* `retentionPeriodUnit` - (Required) Unit of time in which the retention period is measured. Currently, only DAYS is supported. +* `retentionPeriodValue` - (Required) Period value for which the retention rule is to retain resources. The period is measured using the unit specified for RetentionPeriodUnit. -### resource_tags +### exclude_resource_tags The following argument is required: -* `resourceTagKey` - (Required) The tag key. +* `resourceTagKey` - (Required) Tag key. The following argument is optional: -* `resourceTagValue` - (Optional) The tag value. +* `resourceTagValue` - (Optional) Tag value. ### lock_configuration @@ -90,17 +128,27 @@ The following argument is required: The following arguments are required: -* `unlockDelayUnit` - (Required) The unit of time in which to measure the unlock delay. Currently, the unlock delay can be measure only in days. -* `unlockDelayValue` - (Required) The unlock delay period, measured in the unit specified for UnlockDelayUnit. +* `unlockDelayUnit` - (Required) Unit of time in which to measure the unlock delay. Currently, the unlock delay can be measure only in days. +* `unlockDelayValue` - (Required) Unlock delay period, measured in the unit specified for UnlockDelayUnit. + +### resource_tags + +The following argument is required: + +* `resourceTagKey` - (Required) Tag key. + +The following argument is optional: + +* `resourceTagValue` - (Optional) Tag value. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - (String) ID of the Rule. -* `lockEndTime` - (Timestamp) The date and time at which the unlock delay is set to expire. Only returned for retention rules that have been unlocked and that are still within the unlock delay period. -* `lockState` - (Optional) The lock state of the retention rules to list. Only retention rules with the specified lock state are returned. Valid values are `locked`, `pending_unlock`, `unlocked`. -* `status` - (String) The state of the retention rule. Only retention rules that are in the `available` state retain resources. Valid values include `pending` and `available`. +* `lockEndTime` - (Timestamp) Date and time at which the unlock delay is set to expire. Only returned for retention rules that have been unlocked and that are still within the unlock delay period. +* `lockState` - (Optional) Lock state of the retention rules to list. Only retention rules with the specified lock state are returned. Valid values are `locked`, `pending_unlock`, `unlocked`. +* `status` - (String) State of the retention rule. Only retention rules that are in the `available` state retain resources. Valid values include `pending` and `available`. ## Import @@ -130,4 +178,4 @@ Using `terraform import`, import RBin Rule using the `id`. For example: % terraform import aws_rbin_rule.example examplerule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_certificate.html.markdown b/website/docs/cdktf/typescript/r/rds_certificate.html.markdown index 4994af788d98..79f149732332 100644 --- a/website/docs/cdktf/typescript/r/rds_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_certificate.html.markdown @@ -38,8 +38,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificateIdentifier` - (Required) Certificate identifier. For example, `rds-ca-rsa4096-g1`. Refer to [AWS RDS (Relational Database) Certificate Identifier](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificateIdentifier) for more information. ## Attribute Reference @@ -74,4 +75,4 @@ Using `terraform import`, import the RDS certificate override using the `region` % terraform import aws_rds_certificate.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster.html.markdown index 8caea45b2056..8e95c580b63f 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster.html.markdown @@ -27,7 +27,7 @@ Changes to an RDS Cluster can occur when you manually change a parameter, such a ~> **NOTE on RDS Clusters and RDS Cluster Role Associations:** Terraform provides both a standalone [RDS Cluster Role Association](rds_cluster_role_association.html) - (an association between an RDS Cluster and a single IAM Role) and an RDS Cluster resource with `iamRoles` attributes. Use one resource or the other to associate IAM Roles and RDS Clusters. Not doing so will cause a conflict of associations and will result in the association being overwritten. --> **Note:** Write-Only argument `masterPasswordWo` is available to use in place of `masterPassword`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `masterPasswordWo` is available to use in place of `masterPassword`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -328,6 +328,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocatedStorage` - (Optional, Required for Multi-AZ DB cluster) The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. * `allowMajorVersionUpgrade` - (Optional) Enable to allow major engine version upgrades when changing engine versions. Defaults to `false`. * `applyImmediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon RDS Documentation for more information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) @@ -341,7 +342,7 @@ This resource supports the following arguments: * `clusterIdentifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `clusterIdentifierPrefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `clusterIdentifier`. * `clusterScalabilityType` - (Optional, Forces new resources) Specifies the scalability mode of the Aurora DB cluster. When set to `limitless`, the cluster operates as an Aurora Limitless Database. When set to `standard` (the default), the cluster uses normal DB instance creation. Valid values: `limitless`, `standard`. -* `copyTagsToSnapshot` – (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. +* `copyTagsToSnapshot` - (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. * `databaseInsightsMode` - (Optional) The mode of Database Insights to enable for the DB cluster. Valid values: `standard`, `advanced`. * `databaseName` - (Optional) Name for an automatically created database on cluster creation. There are different naming restrictions per database engine: [RDS Naming Constraints][5] * `dbClusterInstanceClass` - (Optional, Required for Multi-AZ DB cluster) The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example `db.m6g.xlarge`. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide. @@ -359,7 +360,7 @@ This resource supports the following arguments: * `enableGlobalWriteForwarding` - (Optional) Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an [`aws_rds_global_cluster`](/docs/providers/aws/r/rds_global_cluster.html)'s primary cluster. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-write-forwarding.html) for more information. * `enableHttpEndpoint` - (Optional) Enable HTTP endpoint (data API). Only valid for some combinations of `engineMode`, `engine` and `engineVersion` and only available in some regions. See the [Region and version availability](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html#data-api.regions) section of the documentation. This option also does not work with any of these options specified: `snapshotIdentifier`, `replicationSourceIdentifier`, `s3Import`. * `enableLocalWriteForwarding` - (Optional) Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-write-forwarding.html) for more information. **NOTE:** Local write forwarding requires Aurora MySQL version 3.04 or higher. -* `enabledCloudwatchLogsExports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `slowquery`, `iam-db-auth-error`, `postgresql` (PostgreSQL). +* `enabledCloudwatchLogsExports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `iam-db-auth-error`, `instance`, `postgresql` (PostgreSQL), `slowquery`. * `engineMode` - (Optional) Database engine mode. Valid values: `global` (only valid for Aurora MySQL 1.21 and earlier), `parallelquery`, `provisioned`, `serverless`. Defaults to: `provisioned`. Specify an empty value (`""`) for no engine mode. See the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) for limitations when using `serverless`. * `engineLifecycleSupport` - (Optional) The life cycle type for this DB instance. This setting is valid for cluster types Aurora DB clusters and Multi-AZ DB clusters. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engineVersion` - (Optional) Database engine version. Updating this argument results in an outage. See the [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) and [Aurora Postgres](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.html) documentation for your configured engine to determine this value, or by running `aws rds describe-db-engine-versions`. For example with Aurora MySQL 2, a potential value for this argument is `5.7.mysql_aurora.2.03.2`. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute `engineVersionActual`, , see [Attribute Reference](#attribute-reference) below. @@ -579,7 +580,7 @@ This resource exports the following attributes in addition to the arguments abov * `id` - RDS Cluster Identifier * `clusterIdentifier` - RDS Cluster Identifier * `clusterResourceId` - RDS Cluster Resource ID -* `clusterMembers` – List of RDS Instances that are a part of this cluster +* `clusterMembers` - List of RDS Instances that are a part of this cluster * `availabilityZones` - Availability zone of the instance * `backupRetentionPeriod` - Backup retention period * `caCertificateIdentifier` - CA identifier of the CA certificate used for the DB instance's server certificate @@ -658,4 +659,4 @@ Using `terraform import`, import RDS Clusters using the `clusterIdentifier`. For % terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown index b2f35bfa5708..f1d528823f0b 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown @@ -86,6 +86,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required, Forces new resources) The Amazon Resource Name (ARN) of the DB cluster. * `mode` - (Required, Forces new resources) Specifies the mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously. One of: `sync`, `async`. * `kmsKeyId` - (Required, Forces new resources) The AWS KMS key identifier for encrypting messages in the database activity stream. The AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. @@ -137,4 +138,4 @@ Using `terraform import`, import RDS Aurora Cluster Database Activity Streams us [2]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartActivityStream.html [3]: https://docs.aws.amazon.com/cli/latest/reference/rds/start-activity-stream.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown index 02024174010b..676a17add15a 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required, Forces new resources) The cluster identifier. * `clusterEndpointIdentifier` - (Required, Forces new resources) The identifier to use for the new endpoint. This parameter is stored as a lowercase string. * `customEndpointType` - (Required) The type of the endpoint. One of: READER , ANY . @@ -140,4 +141,4 @@ Using `terraform import`, import RDS Clusters Endpoint using the `clusterEndpoin [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown index 4f2fec687240..a70425888f8e 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown @@ -75,12 +75,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applyImmediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `autoMinorVersionUpgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`. * `availabilityZone` - (Optional, Computed, Forces new resource) EC2 Availability Zone that the DB instance is created in. See [docs](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html) about the details. * `caCertIdentifier` - (Optional) Identifier of the CA certificate for the DB instance. * `clusterIdentifier` - (Required, Forces new resource) Identifier of the [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html) in which to launch this instance. -* `copyTagsToSnapshot` – (Optional, boolean) Indicates whether to copy all of the user-defined tags from the DB instance to snapshots of the DB instance. Default `false`. +* `copyTagsToSnapshot` - (Optional, boolean) Indicates whether to copy all of the user-defined tags from the DB instance to snapshots of the DB instance. Default `false`. * `customIamInstanceProfile` - (Optional) Instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. * `dbParameterGroupName` - (Optional) Name of the DB parameter group to associate with this instance. * `dbSubnetGroupName` - (Optional, Forces new resource) Specifies the DB subnet group to associate with this DB instance. The default behavior varies depending on whether `dbSubnetGroupName` is specified. Please refer to official [AWS documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html) to understand how `dbSubnetGroupName` and `publiclyAccessible` parameters affect DB instance behaviour. **NOTE:** This must match the `dbSubnetGroupName` of the attached [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html). @@ -113,7 +114,7 @@ This resource exports the following attributes in addition to the arguments abov * `clusterIdentifier` - RDS Cluster Identifier * `identifier` - Instance identifier * `id` - Instance identifier -* `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. +* `writer` - Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. * `availabilityZone` - Availability zone of the instance * `endpoint` - DNS address for this instance. May not be writable * `engine` - Database engine @@ -174,4 +175,4 @@ Using `terraform import`, import RDS Cluster Instances using the `identifier`. F % terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_parameter_group.html.markdown index 39b8958c8873..5d01b9ff240c 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_parameter_group.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the DB cluster parameter group. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `family` - (Required) The family of the DB cluster parameter group. @@ -108,4 +109,4 @@ Using `terraform import`, import RDS Cluster Parameter Groups using the `name`. % terraform import aws_rds_cluster_parameter_group.cluster_pg production-pg-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_role_association.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_role_association.html.markdown index 4bc2aeaacb20..7ef2312c842c 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_role_association.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_role_association.html.markdown @@ -43,8 +43,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbClusterIdentifier` - (Required) DB Cluster Identifier to associate with the IAM Role. -* `featureName` - (Required) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). +* `featureName` - (Optional) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). * `roleArn` - (Required) Amazon Resource Name (ARN) of the IAM Role to associate with the DB Cluster. ## Attribute Reference @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_rds_cluster_role_association` using the DB % terraform import aws_rds_cluster_role_association.example my-db-cluster,arn:aws:iam::123456789012:role/my-role ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_snapshot_copy.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_snapshot_copy.html.markdown index 4720b03dca07..ac16a44a6be0 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_snapshot_copy.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_snapshot_copy.html.markdown @@ -17,14 +17,14 @@ Manages an RDS database cluster snapshot copy. For managing RDS database instanc ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { RdsClusterSnapshotCopy } from "./.gen/providers/aws/"; import { DbClusterSnapshot } from "./.gen/providers/aws/db-cluster-snapshot"; import { RdsCluster } from "./.gen/providers/aws/rds-cluster"; +import { RdsClusterSnapshotCopy } from "./.gen/providers/aws/rds-cluster-snapshot-copy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -50,9 +50,10 @@ class MyConvertedCode extends TerraformStack { this, "example_2", { - source_db_cluster_snapshot_identifier: - awsDbClusterSnapshotExample.dbClusterSnapshotArn, - target_db_cluster_snapshot_identifier: "example-copy", + sourceDbClusterSnapshotIdentifier: Token.asString( + awsDbClusterSnapshotExample.dbClusterSnapshotArn + ), + targetDbClusterSnapshotIdentifier: "example-copy", } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -66,11 +67,12 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `source_db_cluster_snapshot_identifier` - (Required) Identifier of the source snapshot. -* `target_db_cluster_snapshot_identifier` - (Required) Identifier for the snapshot. +* `sourceDbClusterSnapshotIdentifier` - (Required) Identifier of the source snapshot. +* `targetDbClusterSnapshotIdentifier` - (Required) Identifier for the snapshot. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `copyTags` - (Optional) Whether to copy existing tags. Defaults to `false`. * `destinationRegion` - (Optional) The Destination region to place snapshot copy. * `kmsKeyId` - (Optional) KMS key ID. @@ -91,7 +93,7 @@ This resource exports the following attributes in addition to the arguments abov * `kmsKeyId` - ARN for the KMS encryption key. * `licenseModel` - License model information for the restored DB instance. * `sharedAccounts` - (Optional) List of AWS Account IDs to share the snapshot with. Use `all` to make the snapshot public. -* `source_db_cluster_snapshot_identifier` - DB snapshot ARN that the DB cluster snapshot was copied from. It only has value in case of cross customer or cross region copy. +* `sourceDbClusterSnapshotIdentifier` - DB snapshot ARN that the DB cluster snapshot was copied from. It only has value in case of cross customer or cross region copy. * `storageEncrypted` - Specifies whether the DB cluster snapshot is encrypted. * `storageType` - Specifies the storage type associated with DB cluster snapshot. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -115,7 +117,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { RdsClusterSnapshotCopy } from "./.gen/providers/aws/"; +import { RdsClusterSnapshotCopy } from "./.gen/providers/aws/rds-cluster-snapshot-copy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -135,4 +137,4 @@ Using `terraform import`, import `aws_rds_cluster_snapshot_copy` using the `id`. % terraform import aws_rds_cluster_snapshot_copy.example my-snapshot ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_custom_db_engine_version.markdown b/website/docs/cdktf/typescript/r/rds_custom_db_engine_version.markdown index f40926006028..8e0282cbad82 100644 --- a/website/docs/cdktf/typescript/r/rds_custom_db_engine_version.markdown +++ b/website/docs/cdktf/typescript/r/rds_custom_db_engine_version.markdown @@ -157,6 +157,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `databaseInstallationFilesS3BucketName` - (Required) The name of the Amazon S3 bucket that contains the database installation files. * `databaseInstallationFilesS3Prefix` - (Required) The prefix for the Amazon S3 bucket that contains the database installation files. * `description` - (Optional) The description of the CEV. @@ -222,4 +223,4 @@ Using `terraform import`, import custom engine versions for Amazon RDS custom us % terraform import aws_rds_custom_db_engine_version.example custom-oracle-ee-cdb:19.cdb_cev1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_export_task.html.markdown b/website/docs/cdktf/typescript/r/rds_export_task.html.markdown index b436c4fe2dce..8ac310cb7037 100644 --- a/website/docs/cdktf/typescript/r/rds_export_task.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_export_task.html.markdown @@ -184,6 +184,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `exportOnly` - (Optional) Data to be exported from the snapshot. If this parameter is not provided, all the snapshot data is exported. Valid values are documented in the [AWS StartExportTask API documentation](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartExportTask.html#API_StartExportTask_RequestParameters). * `s3Prefix` - (Optional) Amazon S3 bucket prefix to use as the file name and path of the exported snapshot. @@ -229,4 +230,4 @@ Using `terraform import`, import a RDS (Relational Database) Export Task using t % terraform import aws_rds_export_task.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_global_cluster.html.markdown b/website/docs/cdktf/typescript/r/rds_global_cluster.html.markdown index c89d9167b87b..9b24516f0015 100644 --- a/website/docs/cdktf/typescript/r/rds_global_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_global_cluster.html.markdown @@ -281,19 +281,25 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: * `globalClusterIdentifier` - (Required, Forces new resources) Global cluster identifier. + +The following arguments are optional: + * `databaseName` - (Optional, Forces new resources) Name for an automatically created database on cluster creation. Terraform will only perform drift detection if a configuration value is provided. * `deletionProtection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Valid values: `aurora`, `aurora-mysql`, `aurora-postgresql`. Defaults to `aurora`. Conflicts with `sourceDbClusterIdentifier`. * `engineLifecycleSupport` - (Optional) The life cycle type for this DB instance. This setting applies only to Aurora PostgreSQL-based global databases. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engineVersion` - (Optional) Engine version of the Aurora global database. The `engine`, `engineVersion`, and `instanceClass` (on the `aws_rds_cluster_instance`) must together support global databases. See [Using Amazon Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) for more information. By upgrading the engine version, Terraform will upgrade cluster members. **NOTE:** To avoid an `inconsistent final plan` error while upgrading, use the `lifecycle` `ignore_changes` for `engineVersion` meta argument on the associated `aws_rds_cluster` resource as shown above in [Upgrading Engine Versions](#upgrading-engine-versions) example. * `forceDestroy` - (Optional) Enable to remove DB Cluster members from Global Cluster on destroy. Required with `sourceDbClusterIdentifier`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceDbClusterIdentifier` - (Optional) Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value. **NOTE:** After initial creation, this argument can be removed and replaced with `engine` and `engineVersion`. This allows upgrading the engine version of the Global Cluster. * `storageEncrypted` - (Optional, Forces new resources) Specifies whether the DB cluster is encrypted. The default is `false` unless `sourceDbClusterIdentifier` is specified and encrypted. Terraform will only perform drift detection if a configuration value is provided. * `tags` - (Optional) A map of tags to assign to the DB cluster. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +~> When both `sourceDbClusterIdentifier` and `engine`/`engineVersion` are set, all engine related values will be ignored during creation. The global cluster will inherit the `engine` and `engineVersion` values from the source cluster. After the first apply, any differences between the inherited and configured values will trigger an in-place update. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -373,4 +379,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_instance_state.html.markdown b/website/docs/cdktf/typescript/r/rds_instance_state.html.markdown index cc68f4f2b475..2bd7f59cc907 100644 --- a/website/docs/cdktf/typescript/r/rds_instance_state.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_instance_state.html.markdown @@ -41,16 +41,15 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identifier` - (Required) DB Instance Identifier * `state` - (Required) Configured state of the DB Instance. Valid values are `available` and `stopped`. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `identifier` - DB Instance Identifier +This resource exports no additional attributes. ## Timeouts @@ -61,7 +60,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) RDS Instance State using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) RDS Instance State using the `identifier`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -85,10 +84,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import RDS (Relational Database) RDS Instance State using the `example_id_arg`. For example: +Using `terraform import`, import RDS (Relational Database) RDS Instance State using the `identifier`. For example: ```console % terraform import aws_rds_instance_state.example rds_instance_state-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_integration.html.markdown b/website/docs/cdktf/typescript/r/rds_integration.html.markdown index aeb8b172dac3..133f712053b6 100644 --- a/website/docs/cdktf/typescript/r/rds_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_integration.html.markdown @@ -136,6 +136,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additionalEncryptionContext` - (Optional, Forces new resources) Set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see the [User Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). You can only include this parameter if you specify the `kmsKeyId` parameter. @@ -156,7 +157,7 @@ For more detailed documentation about each argument, refer to the [AWS official This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Integration. -* `id` - ID of the Integration. +* `id` - (**Deprecated**, use `arn` instead) ARN of the Integration. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -169,6 +170,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_rds_integration.example + identity = { + "arn" = "arn:aws:rds:us-east-1:123456789012:integration:12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_rds_integration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the RDS integration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) Integration using the `arn`. For example: ```typescript @@ -199,4 +221,4 @@ Using `terraform import`, import RDS (Relational Database) Integration using the % terraform import aws_rds_integration.example arn:aws:rds:us-west-2:123456789012:integration:abcdefgh-0000-1111-2222-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_reserved_instance.html.markdown b/website/docs/cdktf/typescript/r/rds_reserved_instance.html.markdown index 70be4344ac62..77d777d6d9d7 100644 --- a/website/docs/cdktf/typescript/r/rds_reserved_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_reserved_instance.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceCount` - (Optional) Number of instances to reserve. Default value is `1`. * `reservationId` - (Optional) Customer-specified identifier to track this reservation. * `tags` - (Optional) Map of tags to assign to the DB reservation. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -68,7 +69,7 @@ This resource exports the following attributes in addition to the arguments abov * `id` - Unique identifier for the reservation. same as `reservationId`. * `currencyCode` - Currency code for the reserved DB instance. * `duration` - Duration of the reservation in seconds. -* `fixedPrice` – Fixed price charged for this reserved DB instance. +* `fixedPrice` - Fixed price charged for this reserved DB instance. * `dbInstanceClass` - DB instance class for the reserved DB instance. * `leaseId` - Unique identifier for the lease associated with the reserved DB instance. Amazon Web Services Support might request the lease ID for an issue related to a reserved DB instance. * `multiAz` - Whether the reservation applies to Multi-AZ deployments. @@ -120,4 +121,4 @@ Using `terraform import`, import RDS DB Instance Reservations using the `instanc % terraform import aws_rds_reserved_instance.reservation_instance CustomReservationID ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_shard_group.html.markdown b/website/docs/cdktf/typescript/r/rds_shard_group.html.markdown index a6f8b085b0b1..50c702764041 100644 --- a/website/docs/cdktf/typescript/r/rds_shard_group.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_shard_group.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `computeRedundancy` - (Optional) Specifies whether to create standby DB shard groups for the DB shard group. Valid values are: * `0` - Creates a DB shard group without a standby DB shard group. This is the default value. * `1` - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). @@ -122,4 +123,4 @@ Using `terraform import`, import shard group using the `dbShardGroupIdentifier`. % terraform import aws_rds_shard_group.example example-shard-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_authentication_profile.html.markdown b/website/docs/cdktf/typescript/r/redshift_authentication_profile.html.markdown index 3d9764292a18..0fd4168a6688 100644 --- a/website/docs/cdktf/typescript/r/redshift_authentication_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_authentication_profile.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authenticationProfileName` - (Required, Forces new resource) The name of the authentication profile. * `authenticationProfileContent` - (Required) The content of the authentication profile in JSON format. The maximum length of the JSON string is determined by a quota for your account. @@ -86,4 +87,4 @@ Using `terraform import`, import Redshift Authentication by `authenticationProfi % terraform import aws_redshift_authentication_profile.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_cluster.html.markdown b/website/docs/cdktf/typescript/r/redshift_cluster.html.markdown index ebea869c8010..e350df7eca22 100644 --- a/website/docs/cdktf/typescript/r/redshift_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_cluster.html.markdown @@ -17,7 +17,7 @@ Provides a Redshift Cluster Resource. ~> **NOTE:** A Redshift cluster's default IAM role can be managed both by this resource's `defaultIamRoleArn` argument and the [`aws_redshift_cluster_iam_roles`](redshift_cluster_iam_roles.html) resource's `defaultIamRoleArn` argument. Do not configure different values for both arguments. Doing so will cause a conflict of default IAM roles. --> **Note:** Write-Only argument `masterPasswordWo` is available to use in place of `masterPassword`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `masterPasswordWo` is available to use in place of `masterPassword`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -79,6 +79,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required) The Cluster Identifier. Must be a lower case string. * `databaseName` - (Optional) The name of the first database to be created when the cluster is created. If you do not provide a name, Amazon Redshift will create a default database called `dev`. @@ -122,8 +123,9 @@ This resource supports the following arguments: No longer supported by the AWS API. Always returns `auto`. * `numberOfNodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. -* `publiclyAccessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `true`. +* `publiclyAccessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `false`. * `encrypted` - (Optional) If true , the data in the cluster is encrypted at rest. + Default is `true`. * `enhancedVpcRouting` - (Optional) If true , enhanced VPC routing is enabled. * `kmsKeyId` - (Optional) The ARN for the KMS encryption key. When specifying `kmsKeyId`, `encrypted` needs to be set to true. * `elasticIp` - (Optional) The Elastic IP (EIP) address for the cluster. @@ -134,36 +136,13 @@ This resource supports the following arguments: * `snapshotClusterIdentifier` - (Optional) The name of the cluster the source snapshot was created from. * `ownerAccount` - (Optional) The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. * `iamRoles` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. -* `logging` - (Optional, **Deprecated**) Logging, documented below. * `maintenanceTrackName` - (Optional) The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. Default value is `current`. * `manualSnapshotRetentionPeriod` - (Optional) The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. Valid values are between `-1` and `3653`. Default value is `-1`. -* `snapshotCopy` - (Optional, **Deprecated**) Configuration of automatic copy of snapshots from one region to another. Documented below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. For more detailed documentation about each argument, refer to the [AWS official documentation](http://docs.aws.amazon.com/cli/latest/reference/redshift/index.html#cli-aws-redshift). -### Nested Blocks - -#### `logging` - -~> The `logging` argument is deprecated. Use the [`aws_redshift_logging`](./redshift_logging.html.markdown) resource instead. This argument will be removed in a future major version. - -* `enable` - (Required) Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. -* `bucketName` - (Optional, required when `enable` is `true` and `logDestinationType` is `s3`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. -For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) -* `s3KeyPrefix` - (Optional) The prefix applied to the log file names. -* `logDestinationType` - (Optional) The log destination type. An enum with possible values of `s3` and `cloudwatch`. -* `logExports` - (Optional) The collection of exported log types. Log types include the connection log, user log and user activity log. Required when `logDestinationType` is `cloudwatch`. Valid log types are `connectionlog`, `userlog`, and `useractivitylog`. - -#### `snapshotCopy` - -~> The `snapshotCopy` argument is deprecated. Use the [`aws_redshift_snapshot_copy`](./redshift_snapshot_copy.html.markdown) resource instead. This argument will be removed in a future major version. - -* `destinationRegion` - (Required) The destination region that you want to copy snapshots to. -* `retentionPeriod` - (Optional) The number of days to retain automated snapshots in the destination region after they are copied from the source region. Defaults to `7`. -* `grantName` - (Optional) The name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -238,4 +217,4 @@ Using `terraform import`, import Redshift Clusters using the `clusterIdentifier` % terraform import aws_redshift_cluster.myprodcluster tf-redshift-cluster-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_cluster_iam_roles.html.markdown b/website/docs/cdktf/typescript/r/redshift_cluster_iam_roles.html.markdown index 5d3d7cf17f23..5867f2dcde23 100644 --- a/website/docs/cdktf/typescript/r/redshift_cluster_iam_roles.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_cluster_iam_roles.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required) The name of the Redshift Cluster IAM Roles. * `iamRoleArns` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. * `defaultIamRoleArn` - (Optional) The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. @@ -85,4 +86,4 @@ Using `terraform import`, import Redshift Cluster IAM Roless using the `clusterI % terraform import aws_redshift_cluster_iam_roles.examplegroup1 example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_cluster_snapshot.html.markdown b/website/docs/cdktf/typescript/r/redshift_cluster_snapshot.html.markdown index ab3905d89fa0..e52556bacf25 100644 --- a/website/docs/cdktf/typescript/r/redshift_cluster_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_cluster_snapshot.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required, Forces new resource) The cluster identifier for which you want a snapshot. * `snapshotIdentifier` - (Required, Forces new resource) A unique identifier for the snapshot that you are requesting. This identifier must be unique for all snapshots within the Amazon Web Services account. * `manualSnapshotRetentionPeriod` - (Optional) The number of days that a manual snapshot is retained. If the value is `-1`, the manual snapshot is retained indefinitely. Valid values are -1 and between `1` and `3653`. @@ -92,4 +93,4 @@ Using `terraform import`, import Redshift Cluster Snapshots using `snapshotIdent % terraform import aws_redshift_cluster_snapshot.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_data_share_authorization.html.markdown b/website/docs/cdktf/typescript/r/redshift_data_share_authorization.html.markdown index ca7337748244..cc521705e802 100644 --- a/website/docs/cdktf/typescript/r/redshift_data_share_authorization.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_data_share_authorization.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allowWrites` - (Optional) Whether to allow write operations for a datashare. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import Redshift Data Share Authorization using the `id % terraform import aws_redshift_data_share_authorization.example arn:aws:redshift:us-west-2:123456789012:datashare:3072dae5-022b-4d45-9cd3-01f010aae4b2/example_share,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_data_share_consumer_association.html.markdown b/website/docs/cdktf/typescript/r/redshift_data_share_consumer_association.html.markdown index 63c6497ba9c4..8c5819a022c2 100644 --- a/website/docs/cdktf/typescript/r/redshift_data_share_consumer_association.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_data_share_consumer_association.html.markdown @@ -69,6 +69,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allowWrites` - (Optional) Whether to allow write operations for a datashare. * `associateEntireAccount` - (Optional) Whether the datashare is associated with the entire account. Conflicts with `consumerArn` and `consumerRegion`. * `consumerArn` - (Optional) Amazon Resource Name (ARN) of the consumer that is associated with the datashare. Conflicts with `associateEntireAccount` and `consumerRegion`. @@ -114,4 +115,4 @@ Using `terraform import`, import Redshift Data Share Consumer Association using % terraform import aws_redshift_data_share_consumer_association.example arn:aws:redshift:us-west-2:123456789012:datashare:b3bfde75-73fd-408b-9086-d6fccfd6d588/example,,,us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_endpoint_access.html.markdown b/website/docs/cdktf/typescript/r/redshift_endpoint_access.html.markdown index fe912d5eea04..4520ff968ef0 100644 --- a/website/docs/cdktf/typescript/r/redshift_endpoint_access.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_endpoint_access.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required) The cluster identifier of the cluster to access. * `endpointName` - (Required) The Redshift-managed VPC endpoint name. * `resourceOwner` - (Optional) The Amazon Web Services account ID of the owner of the cluster. This is only required if the cluster is in another Amazon Web Services account. @@ -98,4 +99,4 @@ Using `terraform import`, import Redshift endpoint access using the `name`. For % terraform import aws_redshift_endpoint_access.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_endpoint_authorization.html.markdown b/website/docs/cdktf/typescript/r/redshift_endpoint_authorization.html.markdown index 5abbd8f4d67f..4cd9bc60df7b 100644 --- a/website/docs/cdktf/typescript/r/redshift_endpoint_authorization.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_endpoint_authorization.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account` - (Required) The Amazon Web Services account ID to grant access to. * `clusterIdentifier` - (Required) The cluster identifier of the cluster to grant access to. * `forceDelete` - (Optional) Indicates whether to force the revoke action. If true, the Redshift-managed VPC endpoints associated with the endpoint authorization are also deleted. Default value is `false`. @@ -88,4 +89,4 @@ Using `terraform import`, import Redshift endpoint authorization using the `id`. % terraform import aws_redshift_endpoint_authorization.example 01234567910:cluster-example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_event_subscription.html.markdown b/website/docs/cdktf/typescript/r/redshift_event_subscription.html.markdown index adb14cbe00f2..4e96a45377f0 100644 --- a/website/docs/cdktf/typescript/r/redshift_event_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_event_subscription.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Redshift event subscription. * `snsTopicArn` - (Required) The ARN of the SNS topic to send events to. * `sourceIds` - (Optional) A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a `sourceType` must also be specified. @@ -122,4 +123,4 @@ Using `terraform import`, import Redshift Event Subscriptions using the `name`. % terraform import aws_redshift_event_subscription.default redshift-event-sub ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_hsm_client_certificate.html.markdown b/website/docs/cdktf/typescript/r/redshift_hsm_client_certificate.html.markdown index 3f051d800ad5..a3e5b71e3303 100644 --- a/website/docs/cdktf/typescript/r/redshift_hsm_client_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_hsm_client_certificate.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hsmClientCertificateIdentifier` - (Required, Forces new resource) The identifier of the HSM client certificate. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -81,4 +82,4 @@ Using `terraform import`, import Redshift HSM Client Certificates using `hsmClie % terraform import aws_redshift_hsm_client_certificate.test example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_hsm_configuration.html.markdown b/website/docs/cdktf/typescript/r/redshift_hsm_configuration.html.markdown index c7bda39cecf4..52bb8dcc439b 100644 --- a/website/docs/cdktf/typescript/r/redshift_hsm_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_hsm_configuration.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required, Forces new resource) A text description of the HSM configuration to be created. * `hsmConfigurationIdentifier` - (Required, Forces new resource) The identifier to be assigned to the new Amazon Redshift HSM configuration. * `hsmIpAddress` - (Required, Forces new resource) The IP address that the Amazon Redshift cluster must use to access the HSM. @@ -87,4 +88,4 @@ Using `terraform import`, import Redshift HSM Client Certificates using `hsmConf % terraform import aws_redshift_hsm_configuration.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_integration.html.markdown b/website/docs/cdktf/typescript/r/redshift_integration.html.markdown index 3b286d82f366..e3ad759f91a9 100644 --- a/website/docs/cdktf/typescript/r/redshift_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_integration.html.markdown @@ -177,6 +177,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additionalEncryptionContext` - (Optional, Forces new resources) Set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see the [User Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). You can only include this parameter if you specify the `kmsKeyId` parameter. @@ -235,4 +236,4 @@ Using `terraform import`, import Redshift Integration using the `arn`. For examp % terraform import aws_redshift_integration.example arn:aws:redshift:us-west-2:123456789012:integration:abcdefgh-0000-1111-2222-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_logging.html.markdown b/website/docs/cdktf/typescript/r/redshift_logging.html.markdown index 4b436171a74d..ddcfbaf5fe6e 100644 --- a/website/docs/cdktf/typescript/r/redshift_logging.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_logging.html.markdown @@ -70,6 +70,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucketName` - (Optional) Name of an existing S3 bucket where the log files are to be stored. Required when `logDestinationType` is `s3`. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions. For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging) * `logDestinationType` - (Optional) Log destination type. Valid values are `s3` and `cloudwatch`. * `logExports` - (Optional) Collection of exported log types. Required when `logDestinationType` is `cloudwatch`. Valid values are `connectionlog`, `useractivitylog`, and `userlog`. @@ -79,7 +80,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: -* `id` - Identifier of the source cluster. +* `id` - (**Deprecated**, use `clusterIdentifier` instead) Identifier of the source cluster. ## Import @@ -113,4 +114,4 @@ Using `terraform import`, import Redshift Logging using the `id`. For example: % terraform import aws_redshift_logging.example cluster-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_parameter_group.html.markdown b/website/docs/cdktf/typescript/r/redshift_parameter_group.html.markdown index b04b6d96e92c..f526e0f9bbc3 100644 --- a/website/docs/cdktf/typescript/r/redshift_parameter_group.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_parameter_group.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Redshift parameter group. * `family` - (Required) The family of the Redshift parameter group. * `description` - (Optional) The description of the Redshift parameter group. Defaults to "Managed by Terraform". @@ -106,4 +107,4 @@ Using `terraform import`, import Redshift Parameter Groups using the `name`. For % terraform import aws_redshift_parameter_group.paramgroup1 parameter-group-test-terraform ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_partner.html.markdown b/website/docs/cdktf/typescript/r/redshift_partner.html.markdown index 37c79ef5a025..3d158ada262a 100644 --- a/website/docs/cdktf/typescript/r/redshift_partner.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_partner.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) The Amazon Web Services account ID that owns the cluster. * `clusterIdentifier` - (Required) The cluster identifier of the cluster that receives data from the partner. * `databaseName` - (Required) The name of the database that receives data from the partner. @@ -86,4 +87,4 @@ Using `terraform import`, import Redshift usage limits using the `id`. For examp % terraform import aws_redshift_partner.example 01234567910:cluster-example-id:example:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/redshift_resource_policy.html.markdown index 62fb5b04e2e8..0c77ad4c8bbc 100644 --- a/website/docs/cdktf/typescript/r/redshift_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_resource_policy.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) The Amazon Resource Name (ARN) of the account to create or update a resource policy for. * `policy` - (Required) The content of the resource policy being updated. @@ -93,4 +94,4 @@ Using `terraform import`, import Redshift Resource Policies using the `resourceA % terraform import aws_redshift_resource_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_scheduled_action.html.markdown b/website/docs/cdktf/typescript/r/redshift_scheduled_action.html.markdown index 66a9a0b7d06f..614a7d343a33 100644 --- a/website/docs/cdktf/typescript/r/redshift_scheduled_action.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_scheduled_action.html.markdown @@ -136,6 +136,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The scheduled action name. * `description` - (Optional) The description of the scheduled action. * `enable` - (Optional) Whether to enable the scheduled action. Default is `true` . @@ -207,4 +208,4 @@ Using `terraform import`, import Redshift Scheduled Action using the `name`. For % terraform import aws_redshift_scheduled_action.example tf-redshift-scheduled-action ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_snapshot_copy.html.markdown b/website/docs/cdktf/typescript/r/redshift_snapshot_copy.html.markdown index 710cc72cbc84..e514e31ae981 100644 --- a/website/docs/cdktf/typescript/r/redshift_snapshot_copy.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_snapshot_copy.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `manualSnapshotRetentionPeriod` - (Optional) Number of days to retain newly copied snapshots in the destination AWS Region after they are copied from the source AWS Region. If the value is `-1`, the manual snapshot is retained indefinitely. * `retentionPeriod` - (Optional) Number of days to retain automated snapshots in the destination region after they are copied from the source region. * `snapshotCopyGrantName` - (Optional) Name of the snapshot copy grant to use when snapshots of an AWS KMS-encrypted cluster are copied to the destination region. @@ -87,4 +88,4 @@ Using `terraform import`, import Redshift Snapshot Copy using the `id`. For exam % terraform import aws_redshift_snapshot_copy.example cluster-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_snapshot_copy_grant.html.markdown b/website/docs/cdktf/typescript/r/redshift_snapshot_copy_grant.html.markdown index 80246a8e8cd4..9284c58139e0 100644 --- a/website/docs/cdktf/typescript/r/redshift_snapshot_copy_grant.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_snapshot_copy_grant.html.markdown @@ -37,10 +37,12 @@ class MyConvertedCode extends TerraformStack { snapshotCopyGrantName: "my-grant", }); const awsRedshiftClusterTest = new RedshiftCluster(this, "test_1", { - snapshotCopy: { - destinationRegion: "us-east-2", - grantName: test.snapshotCopyGrantName, - }, + snapshot_copy: [ + { + destination_region: "us-east-2", + grant_name: test.snapshotCopyGrantName, + }, + ], clusterIdentifier: config.clusterIdentifier, nodeType: config.nodeType, }); @@ -55,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `snapshotCopyGrantName` - (Required, Forces new resource) A friendly name for identifying the grant. * `kmsKeyId` - (Optional, Forces new resource) The unique identifier for the customer master key (CMK) that the grant applies to. Specify the key ID or the Amazon Resource Name (ARN) of the CMK. To specify a CMK in a different AWS account, you must use the key ARN. If not specified, the default key is used. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -94,4 +97,4 @@ Using `terraform import`, import Redshift Snapshot Copy Grants by name. For exam % terraform import aws_redshift_snapshot_copy_grant.test my-grant ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_snapshot_schedule.html.markdown b/website/docs/cdktf/typescript/r/redshift_snapshot_schedule.html.markdown index 273477ebaff9..e172b72b4ec1 100644 --- a/website/docs/cdktf/typescript/r/redshift_snapshot_schedule.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_snapshot_schedule.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identifier` - (Optional, Forces new resource) The snapshot schedule identifier. If omitted, Terraform will assign a random, unique identifier. * `identifierPrefix` - (Optional, Forces new resource) Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`. @@ -84,4 +85,4 @@ Using `terraform import`, import Redshift Snapshot Schedule using the `identifie % terraform import aws_redshift_snapshot_schedule.default tf-redshift-snapshot-schedule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_snapshot_schedule_association.html.markdown b/website/docs/cdktf/typescript/r/redshift_snapshot_schedule_association.html.markdown index 37039ec064bd..66cec7d90846 100644 --- a/website/docs/cdktf/typescript/r/redshift_snapshot_schedule_association.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_snapshot_schedule_association.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Required, Forces new resource) The cluster identifier. * `scheduleIdentifier` - (Required, Forces new resource) The snapshot schedule identifier. @@ -101,4 +102,4 @@ Using `terraform import`, import Redshift Snapshot Schedule Association using th % terraform import aws_redshift_snapshot_schedule_association.default tf-redshift-cluster/tf-redshift-snapshot-schedule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_subnet_group.html.markdown b/website/docs/cdktf/typescript/r/redshift_subnet_group.html.markdown index fd3bb8d8ddc1..70bb7b9c47b1 100644 --- a/website/docs/cdktf/typescript/r/redshift_subnet_group.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_subnet_group.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the Redshift Subnet group. * `description` - (Optional) The description of the Redshift Subnet group. Defaults to "Managed by Terraform". * `subnetIds` - (Required) An array of VPC subnet IDs. @@ -112,4 +113,4 @@ Using `terraform import`, import Redshift subnet groups using the `name`. For ex % terraform import aws_redshift_subnet_group.testgroup1 test-cluster-subnet-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshift_usage_limit.html.markdown b/website/docs/cdktf/typescript/r/redshift_usage_limit.html.markdown index 62fb089076ea..24b3edac722d 100644 --- a/website/docs/cdktf/typescript/r/redshift_usage_limit.html.markdown +++ b/website/docs/cdktf/typescript/r/redshift_usage_limit.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amount` - (Required) The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB). The value must be a positive number. * `breachAction` - (Optional) The action that Amazon Redshift takes when the limit is reached. The default is `log`. Valid values are `log`, `emit-metric`, and `disable`. * `clusterIdentifier` - (Required) The identifier of the cluster that you want to limit usage. @@ -85,4 +86,4 @@ Using `terraform import`, import Redshift usage limits using the `id`. For examp % terraform import aws_redshift_usage_limit.example example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftdata_statement.html.markdown b/website/docs/cdktf/typescript/r/redshiftdata_statement.html.markdown index 94f0f22175c7..ae6b849468d5 100644 --- a/website/docs/cdktf/typescript/r/redshiftdata_statement.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftdata_statement.html.markdown @@ -76,6 +76,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clusterIdentifier` - (Optional) The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials. * `dbUser` - (Optional) The database user name. * `secretArn` - (Optional) The name or ARN of the secret that enables access to the database. @@ -117,4 +118,4 @@ Using `terraform import`, import Redshift Data Statements using the `id`. For ex % terraform import aws_redshiftdata_statement.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_custom_domain_association.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_custom_domain_association.html.markdown index 2d2a8bad4aa5..e211efc6b621 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_custom_domain_association.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_custom_domain_association.html.markdown @@ -65,8 +65,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workgroupName` - (Required) Name of the workgroup. * `customDomainName` - (Required) Custom domain to associate with the workgroup. * `customDomainCertificateArn` - (Required) ARN of the certificate for the custom domain association. @@ -109,4 +110,4 @@ Using `terraform import`, import Redshift Serverless Custom Domain Association u % terraform import aws_redshiftserverless_custom_domain_association.example example-workgroup,example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_endpoint_access.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_endpoint_access.html.markdown index e8a481cd1fbf..48441c454a13 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_endpoint_access.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_endpoint_access.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpointName` - (Required) The name of the endpoint. * `ownerAccount` - (Optional) The owner Amazon Web Services account for the Amazon Redshift Serverless workgroup. * `subnetIds` - (Required) An array of VPC subnet IDs to associate with the endpoint. @@ -104,4 +105,4 @@ Using `terraform import`, import Redshift Serverless Endpoint Access using the ` % terraform import aws_redshiftserverless_endpoint_access.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_namespace.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_namespace.html.markdown index f0ec36d6a812..0ea3c99eb387 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_namespace.html.markdown @@ -12,7 +12,7 @@ description: |- Creates a new Amazon Redshift Serverless Namespace. --> **Note:** Write-Only argument `admin_password_wo` is available to use in place of `admin_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `admin_password_wo` is available to use in place of `admin_password`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `adminPasswordSecretKmsKeyId` - (Optional) ID of the KMS key used to encrypt the namespace's admin credentials secret. * `adminUserPassword` - (Optional) The password of the administrator for the first database created in the namespace. Conflicts with `manageAdminPassword` and `adminUserPasswordWo`. @@ -99,4 +100,4 @@ Using `terraform import`, import Redshift Serverless Namespaces using the `names % terraform import aws_redshiftserverless_namespace.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_resource_policy.html.markdown index 14e2ef9dd358..96da022bb449 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_resource_policy.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) The Amazon Resource Name (ARN) of the account to create or update a resource policy for. * `policy` - (Required) The policy to create or update. For example, the following policy grants a user authorization to restore a snapshot. @@ -94,4 +95,4 @@ Using `terraform import`, import Redshift Serverless Resource Policies using the % terraform import aws_redshiftserverless_resource_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_snapshot.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_snapshot.html.markdown index 9b5b21196327..f062224cad24 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_snapshot.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namespaceName` - (Required) The namespace to create a snapshot for. * `snapshotName` - (Required) The name of the snapshot. * `retentionPeriod` - (Optional) How long to retain the created snapshot. Default value is `-1`. @@ -90,4 +91,4 @@ Using `terraform import`, import Redshift Serverless Snapshots using the `snapsh % terraform import aws_redshiftserverless_snapshot.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_usage_limit.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_usage_limit.html.markdown index 99d827efde69..b87c8e65a17b 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_usage_limit.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_usage_limit.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `amount` - (Required) The limit amount. If time-based, this amount is in Redshift Processing Units (RPU) consumed per hour. If data-based, this amount is in terabytes (TB) of data transferred between Regions in cross-account sharing. The value must be a positive number. * `breachAction` - (Optional) The action that Amazon Redshift Serverless takes when the limit is reached. Valid values are `log`, `emit-metric`, and `deactivate`. The default is `log`. * `period` - (Optional) The time period that the amount applies to. A weekly period begins on Sunday. Valid values are `daily`, `weekly`, and `monthly`. The default is `monthly`. @@ -95,4 +96,4 @@ Using `terraform import`, import Redshift Serverless Usage Limits using the `id` % terraform import aws_redshiftserverless_usage_limit.example example-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/redshiftserverless_workgroup.html.markdown b/website/docs/cdktf/typescript/r/redshiftserverless_workgroup.html.markdown index 5f793edcbd62..430c84e24715 100644 --- a/website/docs/cdktf/typescript/r/redshiftserverless_workgroup.html.markdown +++ b/website/docs/cdktf/typescript/r/redshiftserverless_workgroup.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baseCapacity` - (Optional) The base data warehouse capacity of the workgroup in Redshift Processing Units (RPUs). * `pricePerformanceTarget` - (Optional) Price-performance scaling for the workgroup. See `Price Performance Target` below. * `configParameter` - (Optional) An array of parameters to set for more control over a serverless database. See `Config Parameter` below. @@ -53,7 +54,7 @@ The following arguments are optional: * `publiclyAccessible` - (Optional) A value that specifies whether the workgroup can be accessed from a public network. * `securityGroupIds` - (Optional) An array of security group IDs to associate with the workgroup. * `subnetIds` - (Optional) An array of VPC subnet IDs to associate with the workgroup. When set, must contain at least three subnets spanning three Availability Zones. A minimum number of IP addresses is required and scales with the Base Capacity. For more information, see the following [AWS document](https://docs.aws.amazon.com/redshift/latest/mgmt/serverless-known-issues.html). -* `track_name` - (Optional) The name of the track for the workgroup. If it is `current`, you get the most up-to-date certified release version with the latest features, security updates, and performance enhancements. If it is `trailing`, you will be on the previous certified release. For more information, see the following [AWS document](https://docs.aws.amazon.com/redshift/latest/mgmt/tracks.html). +* `trackName` - (Optional) The name of the track for the workgroup. If it is `current`, you get the most up-to-date certified release version with the latest features, security updates, and performance enhancements. If it is `trailing`, you will be on the previous certified release. For more information, see the following [AWS document](https://docs.aws.amazon.com/redshift/latest/mgmt/tracks.html). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Price Performance Target @@ -135,4 +136,4 @@ Using `terraform import`, import Redshift Serverless Workgroups using the `workg % terraform import aws_redshiftserverless_workgroup.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rekognition_collection.html.markdown b/website/docs/cdktf/typescript/r/rekognition_collection.html.markdown index e4eea22b4b86..88d7c8b4883a 100644 --- a/website/docs/cdktf/typescript/r/rekognition_collection.html.markdown +++ b/website/docs/cdktf/typescript/r/rekognition_collection.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -63,7 +64,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Collection using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Collection using the `collectionId`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -87,10 +88,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Rekognition Collection using the `example_id_arg`. For example: +Using `terraform import`, import Rekognition Collection using the `collectionId`. For example: ```console % terraform import aws_rekognition_collection.example collection-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rekognition_project.html.markdown b/website/docs/cdktf/typescript/r/rekognition_project.html.markdown index d092b2e452fa..701b068462d5 100644 --- a/website/docs/cdktf/typescript/r/rekognition_project.html.markdown +++ b/website/docs/cdktf/typescript/r/rekognition_project.html.markdown @@ -14,6 +14,8 @@ Terraform resource for managing an AWS Rekognition Project. ## Example Usage +### Content Moderation + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -36,6 +38,29 @@ class MyConvertedCode extends TerraformStack { ``` +### Custom Labels + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { RekognitionProject } from "./.gen/providers/aws/rekognition-project"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new RekognitionProject(this, "example", { + feature: "CUSTOM_LABELS", + name: "example-project", + }); + } +} + +``` + ## Argument Reference The following arguments are required: @@ -44,7 +69,8 @@ The following arguments are required: The following arguments are optional: -* `autoUpdate` - (Optional) Specify if automatic retraining should occur. Valid values are `ENABLED` or `DISABLED`. Defaults to `DISABLED`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `autoUpdate` - (Optional) Specify if automatic retraining should occur. Valid values are `ENABLED` or `DISABLED`. Must be set when `feature` is `CONTENT_MODERATION`, but do not set otherwise. * `feature` - (Optional) Specify the feature being customized. Valid values are `CONTENT_MODERATION` or `CUSTOM_LABELS`. Defaults to `CUSTOM_LABELS`. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -64,7 +90,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Project using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Project using the `name`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -94,4 +120,4 @@ Using `terraform import`, import Rekognition Project using the `name`. For examp % terraform import aws_rekognition_project.example project-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rekognition_stream_processor.html.markdown b/website/docs/cdktf/typescript/r/rekognition_stream_processor.html.markdown index c6329ae6b544..9a50e939177c 100644 --- a/website/docs/cdktf/typescript/r/rekognition_stream_processor.html.markdown +++ b/website/docs/cdktf/typescript/r/rekognition_stream_processor.html.markdown @@ -313,6 +313,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dataSharingPreference` - (Optional) See [`dataSharingPreference`](#data_sharing_preference). * `kmsKeyId` - (Optional) Optional parameter for label detection stream processors. * `notificationChannel` - (Optional) The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status. See [`notificationChannel`](#notification_channel). @@ -434,4 +435,4 @@ Using `terraform import`, import Rekognition Stream Processor using the `name`. % terraform import aws_rekognition_stream_processor.example my-stream ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/resiliencehub_resiliency_policy.html.markdown b/website/docs/cdktf/typescript/r/resiliencehub_resiliency_policy.html.markdown index 5e65b5bfeb6f..2114a64c6583 100644 --- a/website/docs/cdktf/typescript/r/resiliencehub_resiliency_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/resiliencehub_resiliency_policy.html.markdown @@ -78,6 +78,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` (String) Description of Resiliency Policy. * `dataLocationConstraint` (String) Data Location Constraint of the Policy. Valid values are `AnyLocation`, `SameContinent`, and `SameCountry`. @@ -93,6 +94,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `region` - (Attributes) Specifies Region failure policy. [`policy.region`](#policyregion) ### `policy.az` @@ -179,4 +181,4 @@ Using `terraform import`, import Resilience Hub Resiliency Policy using the `arn % terraform import aws_resiliencehub_resiliency_policy.example arn:aws:resiliencehub:us-east-1:123456789012:resiliency-policy/8c1cfa29-d1dd-4421-aa68-c9f64cced4c2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/resourceexplorer2_index.html.markdown b/website/docs/cdktf/typescript/r/resourceexplorer2_index.html.markdown index 154c3f7c7a38..0443927faaf1 100644 --- a/website/docs/cdktf/typescript/r/resourceexplorer2_index.html.markdown +++ b/website/docs/cdktf/typescript/r/resourceexplorer2_index.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `type` - (Required) The type of the index. Valid values: `AGGREGATOR`, `LOCAL`. To understand the difference between `LOCAL` and `AGGREGATOR`, see the [_AWS Resource Explorer User Guide_](https://docs.aws.amazon.com/resource-explorer/latest/userguide/manage-aggregator-region.html). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -58,6 +59,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_resourceexplorer2_index.example + identity = { + "arn" = "arn:aws:resource-explorer-2:us-east-1:123456789012:index/example-index-id" + } +} + +resource "aws_resourceexplorer2_index" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Resource Explorer index. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Resource Explorer indexes using the `arn`. For example: ```typescript @@ -88,4 +110,4 @@ Using `terraform import`, import Resource Explorer indexes using the `arn`. For % terraform import aws_resourceexplorer2_index.example arn:aws:resource-explorer-2:us-east-1:123456789012:index/6047ac4e-207e-4487-9bcf-cb53bb0ff5cc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/resourceexplorer2_view.html.markdown b/website/docs/cdktf/typescript/r/resourceexplorer2_view.html.markdown index 70339b17dd4d..96f06a8bc625 100644 --- a/website/docs/cdktf/typescript/r/resourceexplorer2_view.html.markdown +++ b/website/docs/cdktf/typescript/r/resourceexplorer2_view.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultView` - (Optional) Specifies whether the view is the [_default view_](https://docs.aws.amazon.com/resource-explorer/latest/userguide/manage-views-about.html#manage-views-about-default) for the AWS Region. Default: `false`. * `filters` - (Optional) Specifies which resources are included in the results of queries made using this view. See [Filters](#filters) below for more details. * `includedProperty` - (Optional) Optional fields to be included in search results from this view. See [Included Properties](#included-properties) below for more details. @@ -87,6 +88,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_resourceexplorer2_view.example + identity = { + "arn" = "arn:aws:resource-explorer-2:us-east-1:123456789012:view/example-view/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_resourceexplorer2_view" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Resource Explorer view. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Resource Explorer views using the `arn`. For example: ```typescript @@ -117,4 +139,4 @@ Using `terraform import`, import Resource Explorer views using the `arn`. For ex % terraform import aws_resourceexplorer2_view.example arn:aws:resource-explorer-2:us-west-2:123456789012:view/exampleview/e0914f6c-6c27-4b47-b5d4-6b28381a2421 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/resourcegroups_group.html.markdown b/website/docs/cdktf/typescript/r/resourcegroups_group.html.markdown index 1c13bf519c88..2dca403491c3 100644 --- a/website/docs/cdktf/typescript/r/resourcegroups_group.html.markdown +++ b/website/docs/cdktf/typescript/r/resourcegroups_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The resource group's name. A resource group name can have a maximum of 127 characters, including letters, numbers, hyphens, dots, and underscores. The name cannot start with `AWS` or `aws`. * `configuration` - (Optional) A configuration associates the resource group with an AWS service and specifies how the service can interact with the resources in the group. See below for details. * `description` - (Optional) A description of the resource group. @@ -102,4 +103,4 @@ Using `terraform import`, import resource groups using the `name`. For example: % terraform import aws_resourcegroups_group.foo resource-group-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/resourcegroups_resource.html.markdown b/website/docs/cdktf/typescript/r/resourcegroups_resource.html.markdown index 6298fdb9a2f2..b2b702ca8d7c 100644 --- a/website/docs/cdktf/typescript/r/resourcegroups_resource.html.markdown +++ b/website/docs/cdktf/typescript/r/resourcegroups_resource.html.markdown @@ -62,8 +62,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupArn` - (Required) Name or ARN of the resource group to add resources to. * `resourceArn` - (Required) ARN of the resource to be added to the group. @@ -113,4 +114,4 @@ Using `terraform import`, import an AWS Resource Groups Resource using `groupArn % terraform import aws_resourcegroups_resource.example arn:aws:resource-groups:us-west-2:012345678901:group/example,arn:aws:lambda:us-west-2:012345678901:function:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route.html.markdown b/website/docs/cdktf/typescript/r/route.html.markdown index 20f092e7cc39..7248bbac1a20 100644 --- a/website/docs/cdktf/typescript/r/route.html.markdown +++ b/website/docs/cdktf/typescript/r/route.html.markdown @@ -79,6 +79,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `routeTableId` - (Required) The ID of the routing table. One of the following destination arguments must be supplied: @@ -124,6 +125,46 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route.example + identity = { + route_table_id = "rtb-656C65616E6F72" + destination_cidr_block = "10.42.0.0/16" + + ### OR by IPv6 CIDR block + # destination_ipv6_cidr_block = "10.42.0.0/16" + + ### OR by prefix list ID + # destination_prefix_list_id = "pl-0570a1d2d725c16be" + } +} + +resource "aws_route" "example" { + route_table_id = "rtb-656C65616E6F72" + destination_cidr_block = "10.42.0.0/16" + vpc_peering_connection_id = "pcx-45ff3dc1" +} +``` + +### Identity Schema + +#### Required + +* `routeTableId` - (String) ID of the route table. + +#### Optional + +~> Exactly one of of `destinationCidrBlock`, `destinationIpv6CidrBlock`, or `destinationPrefixListId` is required. + +* `accountId` (String) AWS Account where this resource is managed. +* `destinationCidrBlock` - (String) Destination IPv4 CIDR block. +* `destinationIpv6CidrBlock` - (String) Destination IPv6 CIDR block. +* `destinationPrefixListId` - (String) Destination IPv6 CIDR block. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import individual routes using `ROUTETABLEID_DESTINATION`. Import [local routes](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html#RouteTables) using the VPC's IPv4 or IPv6 CIDR blocks. For example: Import a route in route table `rtb-656C65616E6F72` with an IPv4 destination CIDR of `10.42.0.0/16`: @@ -218,4 +259,4 @@ Import a route in route table `rtb-656C65616E6F72` with a managed prefix list de % terraform import aws_route.my_route rtb-656C65616E6F72_pl-0570a1d2d725c16be ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_record.html.markdown b/website/docs/cdktf/typescript/r/route53_record.html.markdown index 2ccfa756121b..deac707342e4 100644 --- a/website/docs/cdktf/typescript/r/route53_record.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_record.html.markdown @@ -332,6 +332,36 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_record.example + identity = { + zone_id = "Z4KAPRWWNC7JR" + name = "dev.example.com" + type = "NS" + } +} + +resource "aws_route53_record" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `zoneId` (String) Hosted zone ID for the record. +* `name` (String) Name of the record. +* `type` (String) Record type. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `setIdentifier` (String) Set identifier for the record. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Records using the ID of the record, record name, record type, and set identifier. For example: Using the ID of the record, which is the zone identifier, record name, and record type, separated by underscores (`_`): @@ -350,7 +380,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); Route53Record.generateConfigForImport( this, - "myrecord", + "example", "Z4KAPRWWNC7JR_dev.example.com_NS" ); } @@ -374,7 +404,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); Route53Record.generateConfigForImport( this, - "myrecord", + "example", "Z4KAPRWWNC7JR_dev.example.com_NS_dev" ); } @@ -396,11 +426,7 @@ import { Route53Record } from "./.gen/providers/aws/route53-record"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - Route53Record.generateConfigForImport( - this, - "myrecord", - "Z4KAPRWWNC7JR__NS" - ); + Route53Record.generateConfigForImport(this, "example", "Z4KAPRWWNC7JR__NS"); } } @@ -411,13 +437,13 @@ class MyConvertedCode extends TerraformStack { Using the ID of the record, which is the zone identifier, record name, and record type, separated by underscores (`_`): ```console -% terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev_NS +% terraform import aws_route53_record.example Z4KAPRWWNC7JR_dev_NS ``` If the record also contains a set identifier, append it: ```console -% terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev_NS_dev +% terraform import aws_route53_record.example Z4KAPRWWNC7JR_dev_NS_dev ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_config.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_config.html.markdown index 19e16138527f..7e278e0aa848 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_config.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_config.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) The ID of the VPC that the configuration is for. * `autodefinedReverseFlag` - (Required) Indicates whether or not the Resolver will create autodefined rules for reverse DNS lookups. Valid values: `ENABLE`, `DISABLE`. @@ -93,4 +94,4 @@ Using `terraform import`, import Route 53 Resolver configs using the Route 53 Re % terraform import aws_route53_resolver_config.example rslvr-rc-715aa20c73a23da7 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_dnssec_config.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_dnssec_config.html.markdown index b7e17923ad52..2bb60cdb09ed 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_dnssec_config.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_dnssec_config.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) The ID of the virtual private cloud (VPC) that you're updating the DNSSEC validation status for. ## Attribute Reference @@ -90,4 +91,4 @@ Using `terraform import`, import Route 53 Resolver DNSSEC configs using the Rou % terraform import aws_route53_resolver_dnssec_config.example rdsc-be1866ecc1683e95 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_endpoint.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_endpoint.html.markdown index 96cc0a82bbc4..6d116454f9ad 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_endpoint.html.markdown @@ -54,9 +54,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `direction` - (Required) Direction of DNS queries to or from the Route 53 Resolver endpoint. -Valid values are `INBOUND` (resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC) -or `OUTBOUND` (resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC). +Valid values are `INBOUND` (resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC), `OUTBOUND` (resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC) or `INBOUND_DELEGATION` (resolver delegates queries to Route 53 private hosted zones from your network). * `ipAddress` - (Required) Subnets and IP addresses in your VPC that you want DNS queries to pass through on the way from your VPCs to your network (for outbound endpoints) or on the way from your network to your VPCs (for inbound endpoints). Described below. * `name` - (Optional) Friendly name of the Route 53 Resolver endpoint. @@ -122,4 +122,4 @@ Using `terraform import`, import Route 53 Resolver endpoints using the Route 53 % terraform import aws_route53_resolver_endpoint.foo rslvr-in-abcdef01234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_firewall_config.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_firewall_config.html.markdown index 85adf5f7f503..491f1210ca1c 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_firewall_config.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_firewall_config.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) The ID of the VPC that the configuration is for. * `firewallFailOpen` - (Required) Determines how Route 53 Resolver handles queries during failures, for example when all traffic that is sent to DNS Firewall fails to receive a reply. By default, fail open is disabled, which means the failure mode is closed. This approach favors security over availability. DNS Firewall blocks queries that it is unable to evaluate properly. If you enable this option, the failure mode is open. This approach favors availability over security. DNS Firewall allows queries to proceed if it is unable to properly evaluate them. Valid values: `ENABLED`, `DISABLED`. @@ -90,4 +91,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall configs using th % terraform import aws_route53_resolver_firewall_config.example rdsc-be1866ecc1683e95 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_firewall_domain_list.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_firewall_domain_list.html.markdown index af80547764e3..c77375efdaa1 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_firewall_domain_list.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_firewall_domain_list.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the domain list, to manage and use it. * `domains` - (Optional) A array of domains for the firewall domain list. * `tags` - (Optional) A map of tags to assign to the resource. f configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -82,4 +83,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall domain lists us % terraform import aws_route53_resolver_firewall_domain_list.example rslvr-fdl-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown index 37b91e528acb..9351f4cf4571 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the rule, to manage and use it. * `action` - (Required) The action that DNS Firewall should take on a DNS query when it matches one of the domains in the rule's domain list. Valid values: `ALLOW`, `BLOCK`, `ALERT`. * `blockOverrideDnsType` - (Required if `blockResponse` is `OVERRIDE`) The DNS record's type. This determines the format of the record value that you provided in BlockOverrideDomain. Value values: `CNAME`. @@ -115,4 +116,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rules using the % terraform import aws_route53_resolver_firewall_rule.example rslvr-frg-0123456789abcdef:rslvr-fdl-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group.html.markdown index 2e47e71faa72..0d7dfa126717 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the rule group, to manage and use it. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -83,4 +84,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rule groups usi % terraform import aws_route53_resolver_firewall_rule_group.example rslvr-frg-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group_association.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group_association.html.markdown index d6212a1255a6..dd5963c9b157 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule_group_association.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A name that lets you identify the rule group association, to manage and use it. * `firewallRuleGroupId` - (Required) The unique identifier of the firewall rule group. * `mutationProtection` - (Optional) If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`. @@ -97,4 +98,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rule group assoc % terraform import aws_route53_resolver_firewall_rule_group_association.example rslvr-frgassoc-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_query_log_config.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_query_log_config.html.markdown index 423c8baeae61..81323f65c375 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_query_log_config.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_query_log_config.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinationArn` - (Required) The ARN of the resource that you want Route 53 Resolver to send query logs. You can send query logs to an [S3 bucket](s3_bucket.html), a [CloudWatch Logs log group](cloudwatch_log_group.html), or a [Kinesis Data Firehose delivery stream](kinesis_firehose_delivery_stream.html). * `name` - (Required) The name of the Route 53 Resolver query logging configuration. @@ -91,4 +92,4 @@ Using `terraform import`, import Route 53 Resolver query logging configurations % terraform import aws_route53_resolver_query_log_config.example rqlc-92edc3b1838248bf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_query_log_config_association.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_query_log_config_association.html.markdown index 6e1d4a7d5533..f8d4e6ddfab4 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_query_log_config_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_query_log_config_association.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolverQueryLogConfigId` - (Required) The ID of the [Route 53 Resolver query logging configuration](route53_resolver_query_log_config.html) that you want to associate a VPC with. * `resourceId` - (Required) The ID of a VPC that you want this query logging configuration to log queries for. @@ -82,4 +83,4 @@ Using `terraform import`, import Route 53 Resolver query logging configuration % terraform import aws_route53_resolver_query_log_config_association.example rqlca-b320624fef3c4d70 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_rule.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_rule.html.markdown index 2ac5230661ee..0014e91686c9 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_rule.html.markdown @@ -107,6 +107,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Required) DNS queries for this domain name are forwarded to the IP addresses that are specified using `targetIp`. * `ruleType` - (Required) Rule type. Valid values are `FORWARD`, `SYSTEM` and `RECURSIVE`. * `name` - (Optional) Friendly name that lets you easily find a rule in the Resolver dashboard in the Route 53 console. @@ -136,6 +137,32 @@ Values are `NOT_SHARED`, `SHARED_BY_ME` or `SHARED_WITH_ME` ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_resolver_rule.example + identity = { + id = "rslvr-rr-0123456789abcdef0" + } +} + +resource "aws_route53_resolver_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the Route53 Resolver rule. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Resolver rules using the `id`. For example: ```typescript @@ -152,7 +179,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); Route53ResolverRule.generateConfigForImport( this, - "sys", + "example", "rslvr-rr-0123456789abcdef0" ); } @@ -163,7 +190,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Route53 Resolver rules using the `id`. For example: ```console -% terraform import aws_route53_resolver_rule.sys rslvr-rr-0123456789abcdef0 +% terraform import aws_route53_resolver_rule.example rslvr-rr-0123456789abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_rule_association.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_rule_association.html.markdown index 2c542a0d6823..52cff3ecb8e9 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_rule_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_rule_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resolverRuleId` - (Required) The ID of the resolver rule that you want to associate with the VPC. * `vpcId` - (Required) The ID of the VPC that you want to associate the resolver rule with. * `name` - (Optional) A name for the association that you're creating between a resolver rule and a VPC. @@ -51,6 +52,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_resolver_rule_association.example + identity = { + id = "rslvr-rrassoc-97242eaf88example" + } +} + +resource "aws_route53_resolver_rule_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the Route53 Resolver rule association. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Resolver rule associations using the `id`. For example: ```typescript @@ -81,4 +108,4 @@ Using `terraform import`, import Route53 Resolver rule associations using the `i % terraform import aws_route53_resolver_rule_association.example rslvr-rrassoc-97242eaf88example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_zone.html.markdown b/website/docs/cdktf/typescript/r/route53_zone.html.markdown index 793e6be6ed4f..88480df0509b 100644 --- a/website/docs/cdktf/typescript/r/route53_zone.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_zone.html.markdown @@ -91,14 +91,28 @@ import { TerraformStack } from "cdktf"; * See https://cdk.tf/provider-generation for more details. */ import { Route53Zone } from "./.gen/providers/aws/route53-zone"; +import { Vpc } from "./.gen/providers/aws/vpc"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); + const primary = new Vpc(this, "primary", { + cidrBlock: "10.6.0.0/16", + enableDnsHostnames: true, + enableDnsSupport: true, + }); + const secondary = new Vpc(this, "secondary", { + cidrBlock: "10.7.0.0/16", + enableDnsHostnames: true, + enableDnsSupport: true, + }); new Route53Zone(this, "private", { name: "example.com", vpc: [ { - vpcId: example.id, + vpcId: primary.id, + }, + { + vpcId: secondary.id, }, ], }); @@ -170,4 +184,4 @@ Using `terraform import`, import Route53 Zones using the zone `id`. For example: % terraform import aws_route53_zone.myzone Z1D633PJN98FT9 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53profiles_association.html.markdown b/website/docs/cdktf/typescript/r/route53profiles_association.html.markdown index 3d3421b807bf..c18dd50d261e 100644 --- a/website/docs/cdktf/typescript/r/route53profiles_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route53profiles_association.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Profile Association. Must match a regex of `(?!^[0-9]+$)([a-zA-Z0-9\\-_' ']+)`. * `profileId` - (Required) ID of the profile associated with the VPC. * `resourceId` - (Required) Resource ID of the VPC the profile to be associated with. @@ -109,10 +110,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Route 53 Profiles Association using the `example_id_arg`. For example: +Using `terraform import`, import Route 53 Profiles Association using the `id`. For example: ```console % terraform import aws_route53profiles_association.example rpa-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53profiles_profile.html.markdown b/website/docs/cdktf/typescript/r/route53profiles_profile.html.markdown index fb5b1aeeb9bc..49fe3fcd54a1 100644 --- a/website/docs/cdktf/typescript/r/route53profiles_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/route53profiles_profile.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Profile. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -69,7 +70,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route 53 Profiles Profile using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route 53 Profiles Profile using the `id`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -93,10 +94,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Route 53 Profiles Profile using the `example`. For example: +Using `terraform import`, import Route 53 Profiles Profile using the `id`. For example: ```console % terraform import aws_route53profiles_profile.example rp-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53profiles_resource_association.html.markdown b/website/docs/cdktf/typescript/r/route53profiles_resource_association.html.markdown index 5e00ad174255..c84bd2282851 100644 --- a/website/docs/cdktf/typescript/r/route53profiles_resource_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route53profiles_resource_association.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the Profile Resource Association. * `profileId` - (Required) ID of the profile associated with the VPC. * `resourceArn` - (Required) Resource ID of the resource to be associated with the profile. @@ -115,10 +116,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import Route 53 Profiles Resource Association using the `example_id_arg`. For example: +Using `terraform import`, import Route 53 Profiles Resource Association using the `id`. For example: ```console % terraform import aws_route53profiles_resource_association.example rpa-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53recoverycontrolconfig_cluster.html.markdown b/website/docs/cdktf/typescript/r/route53recoverycontrolconfig_cluster.html.markdown index 461d37ecaa4d..93ef1dddd247 100644 --- a/website/docs/cdktf/typescript/r/route53recoverycontrolconfig_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/route53recoverycontrolconfig_cluster.html.markdown @@ -36,9 +36,10 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: * `name` - (Required) Unique name describing the cluster. +* `networkType` - (Optional) Network type of cluster. Valid values are `IPV4` and `DUALSTACK`. Defaults to `IPV4`. ## Attribute Reference @@ -85,4 +86,4 @@ Using `terraform import`, import Route53 Recovery Control Config cluster using t % terraform import aws_route53recoverycontrolconfig_cluster.mycluster arn:aws:route53-recovery-control::313517334327:cluster/f9ae13be-a11e-4ec7-8522-94a70468e6ea ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route_table.html.markdown b/website/docs/cdktf/typescript/r/route_table.html.markdown index d25fa477f385..1e254fdd7de9 100644 --- a/website/docs/cdktf/typescript/r/route_table.html.markdown +++ b/website/docs/cdktf/typescript/r/route_table.html.markdown @@ -187,6 +187,7 @@ The target could then be updated again back to `local`. This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The VPC ID. * `route` - (Optional) A list of route objects. Their keys are documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). This means that omitting this argument is interpreted as ignoring any existing routes. To remove all managed routes an empty list should be specified. See the example above. @@ -240,6 +241,32 @@ attribute once the route resource is created. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route_table.example + identity = { + id = "rtb-4e616f6d69" + } +} + +resource "aws_route_table" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the routing table. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route Tables using the route table `id`. For example: ```typescript @@ -266,4 +293,4 @@ Using `terraform import`, import Route Tables using the route table `id`. For ex % terraform import aws_route_table.public_rt rtb-4e616f6d69 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route_table_association.html.markdown b/website/docs/cdktf/typescript/r/route_table_association.html.markdown index b0f95a4c58bd..7c6985ea4acd 100644 --- a/website/docs/cdktf/typescript/r/route_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route_table_association.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnetId` - (Optional) The subnet ID to create an association. Conflicts with `gatewayId`. * `gatewayId` - (Optional) The gateway ID to create an association. Conflicts with `subnetId`. * `routeTableId` - (Required) The ID of the routing table to associate with. @@ -149,4 +150,4 @@ With EC2 Internet Gateways: % terraform import aws_route_table_association.assoc igw-01b3a60780f8d034a/rtb-656c65616e6f72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rum_app_monitor.html.markdown b/website/docs/cdktf/typescript/r/rum_app_monitor.html.markdown index 020c870c6ca7..f207d4c427ea 100644 --- a/website/docs/cdktf/typescript/r/rum_app_monitor.html.markdown +++ b/website/docs/cdktf/typescript/r/rum_app_monitor.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the log stream. * `appMonitorConfiguration` - (Optional) configuration data for the app monitor. See [app_monitor_configuration](#app_monitor_configuration) below. * `cwLogEnabled` - (Optional) Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM sends a copy of this telemetry data to Amazon CloudWatch Logs in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur Amazon CloudWatch Logs charges. Default value is `false`. @@ -48,8 +49,8 @@ This resource supports the following arguments: ### app_monitor_configuration * `allowCookies` - (Optional) If you set this to `true`, RUM web client sets two cookies, a session cookie and a user cookie. The cookies allow the RUM web client to collect data relating to the number of users an application has and the behavior of the application across a sequence of events. Cookies are stored in the top-level domain of the current page. -* `domain` - (Optional) The top-level internet domain name for which your application has administrative authority. Exactly one of `domain` or `domain_list` must be specified. -* `domain_list` - (Optional) A list of internet domain names for which your application has administrative authority. Exactly one of `domain` or `domain_list` must be specified. +* `domain` - (Optional) The top-level internet domain name for which your application has administrative authority. Exactly one of `domain` or `domainList` must be specified. +* `domainList` - (Optional) A list of internet domain names for which your application has administrative authority. Exactly one of `domain` or `domainList` must be specified. * `enableXray` - (Optional) If you set this to `true`, RUM enables X-Ray tracing for the user sessions that RUM samples. RUM adds an X-Ray trace header to allowed HTTP requests. It also records an X-Ray segment for allowed HTTP requests. * `excludedPages` - (Optional) A list of URLs in your website or application to exclude from RUM data collection. * `favoritePages` - (Optional) A list of pages in the CloudWatch RUM console that are to be displayed with a "favorite" icon. @@ -101,4 +102,4 @@ Using `terraform import`, import Cloudwatch RUM App Monitor using the `name`. Fo % terraform import aws_rum_app_monitor.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rum_metrics_destination.html.markdown b/website/docs/cdktf/typescript/r/rum_metrics_destination.html.markdown index ddafdfe8faa5..88afed1a97b4 100644 --- a/website/docs/cdktf/typescript/r/rum_metrics_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/rum_metrics_destination.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appMonitorName` - (Required) The name of the CloudWatch RUM app monitor that will send the metrics. * `destination` - (Required) Defines the destination to send the metrics to. Valid values are `CloudWatch` and `Evidently`. If you specify `Evidently`, you must also specify the ARN of the CloudWatchEvidently experiment that is to be the destination and an IAM role that has permission to write to the experiment. * `destinationArn` - (Optional) Use this parameter only if Destination is Evidently. This parameter specifies the ARN of the Evidently experiment that will receive the extended metrics. @@ -78,4 +79,4 @@ Using `terraform import`, import Cloudwatch RUM Metrics Destination using the `i % terraform import aws_rum_metrics_destination.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_access_point.html.markdown b/website/docs/cdktf/typescript/r/s3_access_point.html.markdown index b5932dc50e90..69dedafdebf9 100644 --- a/website/docs/cdktf/typescript/r/s3_access_point.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_access_point.html.markdown @@ -146,6 +146,8 @@ The following arguments are optional: * `bucketAccountId` - (Optional) AWS account ID associated with the S3 bucket associated with this access point. * `policy` - (Optional) Valid JSON document that specifies the policy that you want to apply to this access point. Removing `policy` from your configuration or setting `policy` to null or an empty string (i.e., `policy = ""`) _will not_ delete the policy since it could have been set by `aws_s3control_access_point_policy`. To remove the `policy`, set it to `"{}"` (an empty JSON document). * `publicAccessBlockConfiguration` - (Optional) Configuration block to manage the `PublicAccessBlock` configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the bucket. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpcConfiguration` - (Optional) Configuration block to restrict access to this access point to requests from the specified Virtual Private Cloud (VPC). Required for S3 on Outposts. Detailed below. ### public_access_block_configuration Configuration Block @@ -181,6 +183,7 @@ Note: S3 access points only support secure access by HTTPS. HTTP isn't supported * `hasPublicAccessPolicy` - Indicates whether this access point currently has a policy that allows public access. * `id` - For Access Point of an AWS Partition S3 Bucket, the AWS account ID and access point name separated by a colon (`:`). For S3 on Outposts Bucket, the ARN of the Access Point. * `networkOrigin` - Indicates whether this access point allows access from the public Internet. Values are `VPC` (the access point doesn't allow access from the public Internet) and `Internet` (the access point allows access from the public Internet, subject to the access point and bucket access policies). +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -248,4 +251,4 @@ Import using the ARN for Access Points associated with an S3 on Outposts Bucket: % terraform import aws_s3_access_point.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-1234567890123456/accesspoint/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket.html.markdown index 2fd3d5c33c3b..b276d2edca96 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Optional, Forces new resource) Name of the bucket. If omitted, Terraform will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). The name must not be in the format `[bucket_name]--[azid]--x-s3`. Use the [`aws_s3_directory_bucket`](s3_directory_bucket.html) resource to manage S3 Express buckets. * `bucketPrefix` - (Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). * `forceDestroy` - (Optional, Default:`false`) Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. @@ -321,9 +322,9 @@ This resource exports the following attributes in addition to the arguments abov * `id` - Name of the bucket. * `arn` - ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucketDomainName` - Bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. +* `bucketRegion` - AWS region this bucket resides in. * `bucketRegionalDomainName` - The bucket region-specific domain name. The bucket domain name including the region name. Please refer to the [S3 endpoints reference](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) for format. Note: AWS CloudFront allows specifying an S3 region-specific endpoint when creating an S3 origin. This will prevent redirect issues from CloudFront to the S3 Origin URL. For more information, see the [Virtual Hosted-Style Requests for Other Regions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#deprecated-global-endpoint) section in the AWS S3 User Guide. * `hostedZoneId` - [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `region` - AWS region this bucket resides in. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `websiteEndpoint` - (**Deprecated**) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) instead. * `websiteDomain` - (**Deprecated**) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) instead. @@ -339,6 +340,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) Name of the S3 bucket. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket using the `bucket`. For example: ```typescript @@ -353,7 +380,7 @@ import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - S3Bucket.generateConfigForImport(this, "bucket", "bucket-name"); + S3Bucket.generateConfigForImport(this, "example", "bucket-name"); } } @@ -362,7 +389,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import S3 bucket using the `bucket`. For example: ```console -% terraform import aws_s3_bucket.bucket bucket-name +% terraform import aws_s3_bucket.example bucket-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_accelerate_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_accelerate_configuration.html.markdown index 690ce240b24a..7ddfa440bdbf 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_accelerate_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_accelerate_configuration.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `status` - (Required) Transfer acceleration state of the bucket. Valid values: `Enabled`, `Suspended`. @@ -121,4 +122,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_accelerate_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_acl.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_acl.html.markdown index 97966f4bc7c7..6b576dc511fc 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_acl.html.markdown @@ -193,6 +193,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional, either `accessControlPolicy` or `acl` is required) Specifies the Canned ACL to apply to the bucket. Valid values: `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, `bucket-owner-full-control`, `log-delivery-write`. Full details are available on the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). * `accessControlPolicy` - (Optional, either `accessControlPolicy` or `acl` is required) Configuration block that sets the ACL permissions for an object per grantee. [See below](#access_control_policy). * `bucket` - (Required, Forces new resource) Bucket to which to apply the ACL. @@ -236,6 +237,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_acl.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_acl" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `acl` (String) Canned ACL to apply to the bucket. +* `expectedBucketOwner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket ACL using `bucket`, `expectedBucketOwner`, and/or `acl`, depending on your situation. For example: If the owner (account ID) of the source bucket is the _same_ account used to configure the Terraform AWS Provider, and the source bucket is **not configured** with a @@ -358,4 +387,4 @@ If the owner (account ID) of the source bucket _differs_ from the account used t [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_analytics_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_analytics_configuration.html.markdown index 987f6116b14a..62e6a6848b27 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_analytics_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_analytics_configuration.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket this analytics configuration is associated with. * `name` - (Required) Unique identifier of the analytics configuration for the bucket. * `filter` - (Optional) Object filtering that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). @@ -159,4 +160,4 @@ Using `terraform import`, import S3 bucket analytics configurations using `bucke % terraform import aws_s3_bucket_analytics_configuration.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_cors_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_cors_configuration.html.markdown index 901803d57c85..cfd80c1f2649 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_cors_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_cors_configuration.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `corsRule` - (Required) Set of origins and methods (cross-origin access that you want to allow). [See below](#cors_rule). You can configure up to 100 rules. @@ -88,6 +89,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_cors_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_cors_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `expectedBucketOwner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket CORS configuration using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -152,4 +180,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_cors_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_intelligent_tiering_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_intelligent_tiering_configuration.html.markdown index ad982f6dd904..ef9eda5bae23 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_intelligent_tiering_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_intelligent_tiering_configuration.html.markdown @@ -98,6 +98,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket this intelligent tiering configuration is associated with. * `name` - (Required) Unique name used to identify the S3 Intelligent-Tiering configuration for the bucket. * `status` - (Optional) Specifies the status of the configuration. Valid values: `Enabled`, `Disabled`. @@ -150,4 +151,4 @@ Using `terraform import`, import S3 bucket intelligent tiering configurations us % terraform import aws_s3_bucket_intelligent_tiering_configuration.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_inventory.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_inventory.html.markdown index a7f439b769f0..e9edc74e1df8 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_inventory.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_inventory.html.markdown @@ -106,6 +106,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the source bucket that inventory lists the objects for. * `name` - (Required) Unique identifier of the inventory configuration for the bucket. * `includedObjectVersions` - (Required) Object versions to include in the inventory list. Valid values: `All`, `Current`. @@ -180,4 +181,4 @@ Using `terraform import`, import S3 bucket inventory configurations using `bucke % terraform import aws_s3_bucket_inventory.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_lifecycle_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_lifecycle_configuration.html.markdown index 20957dcd5b1e..d7425ebce823 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_lifecycle_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_lifecycle_configuration.html.markdown @@ -503,6 +503,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the source S3 bucket you want Amazon S3 to monitor. * `expectedBucketOwner` - (Optional) Account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error. * `rule` - (Required) List of configuration blocks describing the rules managing the replication. [See below](#rule). @@ -675,4 +676,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_lifecycle_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_logging.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_logging.html.markdown index cf19bcc336e4..f4062a1e775f 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_logging.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_logging.html.markdown @@ -20,6 +20,83 @@ to decide which method meets your requirements. ## Example Usage +### Grant permission by using bucket policy + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketLoggingA } from "./.gen/providers/aws/s3-bucket-logging"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new S3Bucket(this, "example", { + bucket: "example-bucket", + }); + const logging = new S3Bucket(this, "logging", { + bucket: "access-logging-bucket", + }); + const awsS3BucketLoggingExample = new S3BucketLoggingA(this, "example_2", { + bucket: example.bucket, + targetBucket: logging.bucket, + targetObjectKeyFormat: { + partitionedPrefix: { + partitionDateSource: "EventTime", + }, + }, + targetPrefix: "log/", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketLoggingExample.overrideLogicalId("example"); + const current = new DataAwsCallerIdentity(this, "current", {}); + const loggingBucketPolicy = new DataAwsIamPolicyDocument( + this, + "logging_bucket_policy", + { + statement: [ + { + actions: ["s3:PutObject"], + condition: [ + { + test: "StringEquals", + values: [Token.asString(current.accountId)], + variable: "aws:SourceAccount", + }, + ], + principals: [ + { + identifiers: ["logging.s3.amazonaws.com"], + type: "Service", + }, + ], + resources: ["${" + logging.arn + "}/*"], + }, + ], + } + ); + const awsS3BucketPolicyLogging = new S3BucketPolicy(this, "logging_5", { + bucket: logging.bucket, + policy: Token.asString(loggingBucketPolicy.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyLogging.overrideLogicalId("logging"); + } +} + +``` + +### Grant permission by using bucket ACL + +The [AWS Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html) does not recommend using the ACL. + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -66,6 +143,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `targetBucket` - (Required) Name of the bucket where you want Amazon S3 to store server access logs. @@ -93,8 +171,8 @@ The `grantee` configuration block supports the following arguments: The `targetObjectKeyFormat` configuration block supports the following arguments: -* `partitionedPrefix` - (Optional) Partitioned S3 key for log objects. [See below](#partitioned_prefix). -* `simplePrefix` - (Optional) Use the simple format for S3 keys for log objects. To use, set `simple_prefix {}`. +* `partitionedPrefix` - (Optional) Partitioned S3 key for log objects, in the form `[target_prefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]`. Conflicts with `simplePrefix`. [See below](#partitioned_prefix). +* `simplePrefix` - (Optional) Use the simple format for S3 keys for log objects, in the form `[target_prefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]`. To use, set `simple_prefix {}`. Conflicts with `partitionedPrefix`. ### partitioned_prefix @@ -110,6 +188,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_logging.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_logging" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `expectedBucketOwner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket logging using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -170,4 +275,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_logging.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_metadata_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_metadata_configuration.html.markdown new file mode 100644 index 000000000000..a9dbed965f85 --- /dev/null +++ b/website/docs/cdktf/typescript/r/s3_bucket_metadata_configuration.html.markdown @@ -0,0 +1,189 @@ +--- +subcategory: "S3 (Simple Storage)" +layout: "aws" +page_title: "AWS: aws_s3_bucket_metadata_configuration" +description: |- + Manages Amazon S3 Metadata for a bucket. +--- + + + +# Resource: aws_s3_bucket_metadata_configuration + +Manages Amazon S3 Metadata for a bucket. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { S3BucketMetadataConfiguration } from "./.gen/providers/aws/s3-bucket-metadata-configuration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new S3BucketMetadataConfiguration(this, "example", { + bucket: Token.asString(awsS3BucketExample.bucket), + metadataConfiguration: [ + { + inventoryTableConfiguration: [ + { + configurationState: "ENABLED", + }, + ], + journalTableConfiguration: [ + { + recordExpiration: [ + { + days: 7, + expiration: "ENABLED", + }, + ], + }, + ], + }, + ], + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `bucket` - (Required) General purpose bucket that you want to create the metadata configuration for. +* `metadataConfiguration` - (Required) Metadata configuration. See [`metadataConfiguration` Block](#metadata_configuration-block) for details. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `metadataConfiguration` Block + +The `metadataConfiguration` configuration block supports the following arguments: + +* `inventoryTableConfiguration` - (Required) Inventory table configuration. See [`inventoryTableConfiguration` Block](#inventory_table_configuration-block) for details. +* `journalTableConfiguration` - (Required) Journal table configuration. See [`journalTableConfiguration` Block](#journal_table_configuration-block) for details. + +### `inventoryTableConfiguration` Block + +The `inventoryTableConfiguration` configuration block supports the following arguments: + +* `configurationState` - (Required) Configuration state of the inventory table, indicating whether the inventory table is enabled or disabled. Valid values: `ENABLED`, `DISABLED`. +* `encryptionConfiguration` - (Optional) Encryption configuration for the inventory table. See [`encryptionConfiguration` Block](#encryption_configuration-block) for details. + +### `journalTableConfiguration` Block + +The `journalTableConfiguration` configuration block supports the following arguments: + +* `encryptionConfiguration` - (Optional) Encryption configuration for the journal table. See [`encryptionConfiguration` Block](#encryption_configuration-block) for details. +* `recordExpiration` - (Required) Journal table record expiration settings. See [`recordExpiration` Block](#record_expiration-block) for details. + +### `encryptionConfiguration` Block + +The `encryptionConfiguration` configuration block supports the following arguments: + +* `kmsKeyArn` - (Optional) KMS key ARN when `sseAlgorithm` is `aws:kms`. +* `sseAlgorithm` - (Required) Encryption type for the metadata table. Valid values: `aws:kms`, `AES256`. + +### `recordExpiration` Block + +The `recordExpiration` configuration block supports the following arguments: + +* `days` - (Optional) Number of days to retain journal table records. +* `expiration` - (Required) Whether journal table record expiration is enabled or disabled. Valid values: `ENABLED`, `DISABLED`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `metadata_configuration.0.destination` - Destination information for the S3 Metadata configuration. + * `tableBucketArn` - ARN of the table bucket where the metadata configuration is stored. + * `table_bucket_type` - Type of the table bucket where the metadata configuration is stored. + * `table_namespace` - Namespace in the table bucket where the metadata tables for the metadata configuration are stored. +* `metadata_configuration.0.inventory_table_configuration.0.table_arn` - Inventory table ARN. +* `metadata_configuration.0.inventory_table_configuration.0.table_name` - Inventory table name. +* `metadata_configuration.0.journal_table_configuration.0.table_arn` - Journal table ARN. +* `metadata_configuration.0.journal_table_configuration.0.table_name` - Journal table name. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket metadata configuration using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { S3BucketMetadataConfiguration } from "./.gen/providers/aws/s3-bucket-metadata-configuration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + S3BucketMetadataConfiguration.generateConfigForImport( + this, + "example", + "bucket-name" + ); + } +} + +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expectedBucketOwner` separated by a comma (`,`): + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { S3BucketMetadataConfiguration } from "./.gen/providers/aws/s3-bucket-metadata-configuration"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + S3BucketMetadataConfiguration.generateConfigForImport( + this, + "example", + "bucket-name,123456789012" + ); + } +} + +``` + +**Using `terraform import` to import** S3 bucket metadata configuration using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```console +% terraform import aws_s3_bucket_metadata_configuration.example bucket-name +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expectedBucketOwner` separated by a comma (`,`): + +```console +% terraform import aws_s3_bucket_metadata_configuration.example bucket-name,123456789012 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_metric.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_metric.html.markdown index f60cdb18da17..8cc6cdf5b99a 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_metric.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_metric.html.markdown @@ -120,6 +120,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to put metric configuration. * `name` - (Required) Unique identifier of the metrics configuration for the bucket. Must be less than or equal to 64 characters in length. * `filter` - (Optional) [Object filtering](http://docs.aws.amazon.com/AmazonS3/latest/dev/metrics-configurations.html#metrics-configurations-filter) that accepts a prefix, tags, or a logical AND of prefix and tags (documented below). @@ -168,4 +169,4 @@ Using `terraform import`, import S3 bucket metric configurations using `bucket:m % terraform import aws_s3_bucket_metric.my-bucket-entire-bucket my-bucket:EntireBucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_notification.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_notification.html.markdown index 72681113ef72..ca814820baf2 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_notification.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_notification.html.markdown @@ -432,6 +432,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `eventbridge` - (Optional) Whether to enable Amazon EventBridge notifications. Defaults to `false`. * `lambdaFunction` - (Optional, Multiple) Used to configure notifications to a Lambda Function. See below. * `queue` - (Optional) Notification configuration to SQS Queue. See below. @@ -497,4 +498,4 @@ Using `terraform import`, import S3 bucket notification using the `bucket`. For % terraform import aws_s3_bucket_notification.bucket_notification bucket-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_object.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_object.html.markdown index d76c7c6e4368..dba922977cfb 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_object.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_object.html.markdown @@ -217,6 +217,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`. * `bucketKeyEnabled` - (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. * `cacheControl` - (Optional) Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. @@ -257,6 +258,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_object.example + identity = { + bucket = "some-bucket-name" + key = "some/key.txt" + } +} + +resource "aws_s3_bucket_object" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. +* `key` (String) Object key. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import objects using the `id` or S3 URL. For example: Import using the `id`, which is the bucket name and the key together: @@ -321,4 +350,4 @@ Import using S3 URL syntax: % terraform import aws_s3_bucket_object.example s3://some-bucket-name/some/key.txt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown index 0e93f6154c06..48a8501f848f 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `objectLockEnabled` - (Optional, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. Defaults to `Enabled`. Valid values: `Enabled`. @@ -159,4 +160,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_object_lock_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_ownership_controls.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_ownership_controls.html.markdown index c85d43b0899d..2505c384f0ef 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_ownership_controls.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_ownership_controls.html.markdown @@ -51,15 +51,17 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket that you want to associate this access point with. * `rule` - (Required) Configuration block(s) with Ownership Controls rules. Detailed below. ### rule Configuration Block -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `objectOwnership` - (Required) Object ownership. Valid values: `BucketOwnerPreferred`, `ObjectWriter` or `BucketOwnerEnforced` * `BucketOwnerPreferred` - Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the `bucket-owner-full-control` canned ACL. * `ObjectWriter` - Uploading account will own the object if the object is uploaded with the `bucket-owner-full-control` canned ACL. @@ -103,4 +105,4 @@ Using `terraform import`, import S3 Bucket Ownership Controls using S3 Bucket na % terraform import aws_s3_bucket_ownership_controls.example my-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_policy.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_policy.html.markdown index 109ff95ed719..6cc6a8ca1105 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_policy.html.markdown @@ -70,10 +70,13 @@ class MyConvertedCode extends TerraformStack { ``` +-> Only one `aws_s3_bucket_policy` resource should be defined per S3 bucket. Defining multiple `aws_s3_bucket_policy` resources with different Terraform names but the same `bucket` value may result in unexpected policy overwrites. Each resource uses the `PutBucketPolicy` API, which replaces the entire existing policy without error or warning. Because Terraform treats each resource independently, the policy applied last will silently override any previously applied policy. + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket to which to apply the policy. * `policy` - (Required) Text of the policy. Although this is a bucket policy rather than an IAM policy, the [`aws_iam_policy_document`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) data source may be used, so long as it specifies a principal. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Note: Bucket policies are limited to 20 KB in size. @@ -83,6 +86,32 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_policy.example + identity = { + bucket = "my-tf-test-bucket" + } +} + +resource "aws_s3_bucket_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) Name of the S3 bucket. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket policies using the bucket name. For example: ```typescript @@ -99,7 +128,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); S3BucketPolicy.generateConfigForImport( this, - "allowAccessFromAnotherAccount", + "example", "my-tf-test-bucket" ); } @@ -110,7 +139,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import S3 bucket policies using the bucket name. For example: ```console -% terraform import aws_s3_bucket_policy.allow_access_from_another_account my-tf-test-bucket +% terraform import aws_s3_bucket_policy.example my-tf-test-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_public_access_block.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_public_access_block.html.markdown index 5000c0a8ec02..91b90b445c1b 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_public_access_block.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_public_access_block.html.markdown @@ -14,6 +14,8 @@ Manages S3 bucket-level Public Access Block configuration. For more information -> This resource cannot be used with S3 directory buckets. +~> Setting `skipDestroy` to `true` means that the AWS Provider will not destroy a public access block, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is not managed by Terraform and will remain in-place in your AWS account. + ## Example Usage ```typescript @@ -54,9 +56,10 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) S3 Bucket to which this Public Access Block configuration should be applied. * `blockPublicAcls` - (Optional) Whether Amazon S3 should block public ACLs for this bucket. Defaults to `false`. Enabling this setting does not affect existing policies or ACLs. When set to `true` causes the following behavior: - * PUT Bucket acl and PUT Object acl calls will fail if the specified ACL allows public access. + * PUT Bucket ACL and PUT Object ACL calls will fail if the specified ACL allows public access. * PUT Object calls will fail if the request includes an object ACL. * `blockPublicPolicy` - (Optional) Whether Amazon S3 should block public bucket policies for this bucket. Defaults to `false`. Enabling this setting does not affect the existing bucket policy. When set to `true` causes Amazon S3 to: * Reject calls to PUT Bucket policy if the specified bucket policy allows public access. @@ -64,6 +67,7 @@ This resource supports the following arguments: * Ignore public ACLs on this bucket and any objects that it contains. * `restrictPublicBuckets` - (Optional) Whether Amazon S3 should restrict public bucket policies for this bucket. Defaults to `false`. Enabling this setting does not affect the previously stored bucket policy, except that public and cross-account access within the public bucket policy, including non-public delegation to specific accounts, is blocked. When set to `true`: * Only the bucket owner and AWS Services can access this buckets if it has a public policy. +* `skipDestroy` - (Optional) Whether to retain the public access block upon destruction. If set to `true`, the resource is simply removed from state instead. This may be desirable in certain scenarios to prevent the removal of a public access block before deletion of the associated bucket. ## Attribute Reference @@ -103,4 +107,4 @@ Using `terraform import`, import `aws_s3_bucket_public_access_block` using the b % terraform import aws_s3_bucket_public_access_block.example my-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_replication_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_replication_configuration.html.markdown index fdcb80642d7c..0382b38c7221 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_replication_configuration.html.markdown @@ -20,6 +20,8 @@ Provides an independent configuration resource for S3 bucket [replication config ### Using replication configuration +#### Terraform AWS Provider v5 (and below) + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -160,9 +162,163 @@ class MyConvertedCode extends TerraformStack { storageClass: "STANDARD", }, filter: { - prefix: "foo", + prefix: "example", + }, + id: "examplerule", + status: "Enabled", + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketReplicationConfigurationReplication.overrideLogicalId( + "replication" + ); + } +} + +``` + +#### Terraform AWS Provider v6 (and above) + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { IamPolicy } from "./.gen/providers/aws/iam-policy"; +import { IamRole } from "./.gen/providers/aws/iam-role"; +import { IamRolePolicyAttachment } from "./.gen/providers/aws/iam-role-policy-attachment"; +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketAcl } from "./.gen/providers/aws/s3-bucket-acl"; +import { S3BucketReplicationConfigurationA } from "./.gen/providers/aws/s3-bucket-replication-configuration"; +import { S3BucketVersioningA } from "./.gen/providers/aws/s3-bucket-versioning"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "eu-west-1", + }); + const destination = new S3Bucket(this, "destination", { + bucket: "tf-test-bucket-destination-12345", + }); + const source = new S3Bucket(this, "source", { + bucket: "tf-test-bucket-source-12345", + region: "eu-central-1", + }); + new S3BucketAcl(this, "source_bucket_acl", { + acl: "private", + bucket: source.id, + region: "eu-central-1", + }); + const awsS3BucketVersioningDestination = new S3BucketVersioningA( + this, + "destination_4", + { + bucket: destination.id, + versioningConfiguration: { + status: "Enabled", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketVersioningDestination.overrideLogicalId("destination"); + const awsS3BucketVersioningSource = new S3BucketVersioningA( + this, + "source_5", + { + bucket: source.id, + region: "eu-central-1", + versioningConfiguration: { + status: "Enabled", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketVersioningSource.overrideLogicalId("source"); + const assumeRole = new DataAwsIamPolicyDocument(this, "assume_role", { + statement: [ + { + actions: ["sts:AssumeRole"], + effect: "Allow", + principals: [ + { + identifiers: ["s3.amazonaws.com"], + type: "Service", + }, + ], + }, + ], + }); + const replication = new DataAwsIamPolicyDocument(this, "replication", { + statement: [ + { + actions: ["s3:GetReplicationConfiguration", "s3:ListBucket"], + effect: "Allow", + resources: [source.arn], + }, + { + actions: [ + "s3:GetObjectVersionForReplication", + "s3:GetObjectVersionAcl", + "s3:GetObjectVersionTagging", + ], + effect: "Allow", + resources: ["${" + source.arn + "}/*"], + }, + { + actions: [ + "s3:ReplicateObject", + "s3:ReplicateDelete", + "s3:ReplicateTags", + ], + effect: "Allow", + resources: ["${" + destination.arn + "}/*"], + }, + ], + }); + const awsIamPolicyReplication = new IamPolicy(this, "replication_8", { + name: "tf-iam-role-policy-replication-12345", + policy: Token.asString(replication.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamPolicyReplication.overrideLogicalId("replication"); + const awsIamRoleReplication = new IamRole(this, "replication_9", { + assumeRolePolicy: Token.asString(assumeRole.json), + name: "tf-iam-role-replication-12345", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRoleReplication.overrideLogicalId("replication"); + const awsIamRolePolicyAttachmentReplication = new IamRolePolicyAttachment( + this, + "replication_10", + { + policyArn: Token.asString(awsIamPolicyReplication.arn), + role: Token.asString(awsIamRoleReplication.name), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsIamRolePolicyAttachmentReplication.overrideLogicalId("replication"); + const awsS3BucketReplicationConfigurationReplication = + new S3BucketReplicationConfigurationA(this, "replication_11", { + bucket: source.id, + dependsOn: [awsS3BucketVersioningSource], + region: "eu-central-1", + role: Token.asString(awsIamRoleReplication.arn), + rule: [ + { + destination: { + bucket: destination.arn, + storageClass: "STANDARD", + }, + filter: { + prefix: "example", }, - id: "foobar", + id: "examplerule", status: "Enabled", }, ], @@ -262,6 +418,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rule` - (Required) List of configuration blocks describing the rules managing the replication. [See below](#rule). @@ -522,4 +679,4 @@ Using `terraform import`, import S3 bucket replication configuration using the ` % terraform import aws_s3_bucket_replication_configuration.replication bucket-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_request_payment_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_request_payment_configuration.html.markdown index e65a5d014309..ec3dc96d83bb 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_request_payment_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_request_payment_configuration.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `payer` - (Required) Specifies who pays for the download and request fees. Valid values: `BucketOwner`, `Requester`. @@ -119,4 +120,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_request_payment_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_server_side_encryption_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_server_side_encryption_configuration.html.markdown index 9511b8c633e1..7d7a1439e1f2 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_server_side_encryption_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_server_side_encryption_configuration.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) ID (name) of the bucket. * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `rule` - (Required) Set of server-side encryption configuration rules. [See below](#rule). Currently, only a single rule is supported. @@ -83,6 +84,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_server_side_encryption_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `expectedBucketOwner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket server-side encryption configuration using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -147,4 +175,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_server_side_encryption_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_versioning.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_versioning.html.markdown index b9278168ba4c..804f932c9225 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_versioning.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_versioning.html.markdown @@ -147,6 +147,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the S3 bucket. * `versioningConfiguration` - (Required) Configuration block for the versioning parameters. [See below](#versioning_configuration). * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. @@ -170,6 +171,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_versioning.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_versioning" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `expectedBucketOwner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket versioning using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -230,4 +258,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_versioning.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_website_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_website_configuration.html.markdown index d525a0d3cb43..df63ea000265 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_website_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_website_configuration.html.markdown @@ -88,6 +88,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required, Forces new resource) Name of the bucket. * `errorDocument` - (Optional, Conflicts with `redirectAllRequestsTo`) Name of the error document for the website. [See below](#error_document). * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. @@ -152,6 +153,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_website_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_website_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `expectedBucketOwner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket website configuration using the `bucket` or using the `bucket` and `expectedBucketOwner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: @@ -216,4 +244,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_website_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_directory_bucket.html.markdown b/website/docs/cdktf/typescript/r/s3_directory_bucket.html.markdown index e55143743af4..662251843aa9 100644 --- a/website/docs/cdktf/typescript/r/s3_directory_bucket.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_directory_bucket.html.markdown @@ -73,10 +73,12 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket. The name must be in the format `[bucket_name]--[azid]--x-s3`. Use the [`aws_s3_bucket`](s3_bucket.html) resource to manage general purpose buckets. * `dataRedundancy` - (Optional) Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. * `forceDestroy` - (Optional, Default:`false`) Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `location` - (Required) Bucket location. See [Location](#location) below for more details. +* `tags` - (Optional) Map of tags to assign to the bucket. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `type` - (Optional, Default:`Directory`) Bucket type. Valid values: `Directory`. ### Location @@ -92,6 +94,7 @@ This resource exports the following attributes in addition to the arguments abov * `id` - (**Deprecated**, use `bucket` instead) Name of the bucket. * `arn` - ARN of the bucket. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -125,4 +128,4 @@ Using `terraform import`, import S3 bucket using `bucket`. For example: % terraform import aws_s3_directory_bucket.example example--usw2-az1--x-s3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_object.html.markdown b/website/docs/cdktf/typescript/r/s3_object.html.markdown index 2b81f23ec30c..626a8e582ed1 100644 --- a/website/docs/cdktf/typescript/r/s3_object.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_object.html.markdown @@ -272,10 +272,11 @@ The following arguments are optional: * `objectLockMode` - (Optional) Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. * `objectLockRetainUntilDate` - (Optional) Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods). * `overrideProvider` - (Optional) Override provider-level configuration options. See [Override Provider](#override-provider) below for more details. -* `serverSideEncryption` - (Optional) Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`". +* `serverSideEncryption` - (Optional) Server-side encryption of the object in S3. Valid values are `"AES256"`, `"aws:kms"`, `"aws:kms:dsse"`, and `"aws:fsx"`. * `sourceHash` - (Optional) Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")` (Terraform 0.11.12 or later). (The value is only stored in state and not saved by AWS.) * `source` - (Optional, conflicts with `content` and `contentBase64`) Path to a file that will be read and uploaded as raw bytes for the object content. * `storageClass` - (Optional) [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`". +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `websiteRedirect` - (Optional) Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). @@ -307,6 +308,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_object.example + identity = { + bucket = "some-bucket-name" + key = "some/key.txt" + } +} + +resource "aws_s3_object" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. +* `key` (String) Object key. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import objects using the `id` or S3 URL. For example: Import using the `id`, which is the bucket name and the key together: @@ -371,4 +400,4 @@ Import using S3 URL syntax: % terraform import aws_s3_object.example s3://some-bucket-name/some/key.txt ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_object_copy.html.markdown b/website/docs/cdktf/typescript/r/s3_object_copy.html.markdown index 8a1969a48e58..2477836646f2 100644 --- a/website/docs/cdktf/typescript/r/s3_object_copy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_object_copy.html.markdown @@ -85,6 +85,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Conflicts with `grant`. * `cacheControl` - (Optional) Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. * `checksumAlgorithm` - (Optional) Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. @@ -162,4 +163,4 @@ This resource exports the following attributes in addition to the arguments abov * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `versionId` - Version ID of the newly created copy. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_access_grant.html.markdown b/website/docs/cdktf/typescript/r/s3control_access_grant.html.markdown index 2d5e62764d77..9ec8007a10d7 100644 --- a/website/docs/cdktf/typescript/r/s3control_access_grant.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_access_grant.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessGrantsLocationConfiguration` - (Optional) See [Location Configuration](#location-configuration) below for more details. * `accessGrantsLocationId` - (Required) The ID of the S3 Access Grants location to with the access grant is giving access. * `accountId` - (Optional) The AWS account ID for the S3 Access Grants location. Defaults to automatically determined account ID of the Terraform AWS provider. @@ -133,4 +134,4 @@ Using `terraform import`, import S3 Access Grants using the `accountId` and `acc % terraform import aws_s3control_access_grants_location.example 123456789012,04549c5e-2f3c-4a07-824d-2cafe720aa22 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_access_grants_instance.html.markdown b/website/docs/cdktf/typescript/r/s3control_access_grants_instance.html.markdown index ac4ff1cf9e44..e84d5f7f0b92 100644 --- a/website/docs/cdktf/typescript/r/s3control_access_grants_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_access_grants_instance.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the S3 Access Grants instance. Defaults to automatically determined account ID of the Terraform AWS provider. * `identityCenterArn` - (Optional) The ARN of the AWS IAM Identity Center instance associated with the S3 Access Grants instance. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -106,4 +107,4 @@ Using `terraform import`, import S3 Access Grants instances using the `accountId % terraform import aws_s3control_access_grants_instance.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_access_grants_instance_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/s3control_access_grants_instance_resource_policy.html.markdown index 95d055a76958..be8f6e58e03c 100644 --- a/website/docs/cdktf/typescript/r/s3control_access_grants_instance_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_access_grants_instance_resource_policy.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the S3 Access Grants instance. Defaults to automatically determined account ID of the Terraform AWS provider. * `policy` - (Optional) The policy document. @@ -88,4 +89,4 @@ Using `terraform import`, import S3 Access Grants instance resource policies usi % terraform import aws_s3control_access_grants_instance_resource_policy.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_access_grants_location.html.markdown b/website/docs/cdktf/typescript/r/s3control_access_grants_location.html.markdown index 08ee54a1b431..9fe38a36c4c1 100644 --- a/website/docs/cdktf/typescript/r/s3control_access_grants_location.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_access_grants_location.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the S3 Access Grants location. Defaults to automatically determined account ID of the Terraform AWS provider. * `iamRoleArn` - (Required) The ARN of the IAM role that S3 Access Grants should use when fulfilling runtime access requests to the location. @@ -94,4 +95,4 @@ Using `terraform import`, import S3 Access Grants locations using the `accountId % terraform import aws_s3control_access_grants_location.example 123456789012,default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_access_point_policy.html.markdown b/website/docs/cdktf/typescript/r/s3control_access_point_policy.html.markdown index 5a2b85b3c50f..25a617448d64 100644 --- a/website/docs/cdktf/typescript/r/s3control_access_point_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_access_point_policy.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessPointArn` - (Required) The ARN of the access point that you want to associate with the specified policy. * `policy` - (Required) The policy that you want to apply to the specified access point. @@ -123,4 +124,4 @@ Using `terraform import`, import Access Point policies using the `accessPointArn % terraform import aws_s3control_access_point_policy.example arn:aws:s3:us-west-2:123456789012:accesspoint/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_bucket.html.markdown b/website/docs/cdktf/typescript/r/s3control_bucket.html.markdown index 2537eb369ea8..f0be7a847d5b 100644 --- a/website/docs/cdktf/typescript/r/s3control_bucket.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_bucket.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Name of the bucket. * `outpostId` - (Required) Identifier of the Outpost to contain this bucket. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -57,6 +58,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3control_bucket.example + identity = { + "arn" = "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example" + } +} + +resource "aws_s3control_bucket" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the bucket. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Control Buckets using Amazon Resource Name (ARN). For example: ```typescript @@ -87,4 +109,4 @@ Using `terraform import`, import S3 Control Buckets using Amazon Resource Name ( % terraform import aws_s3control_bucket.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_bucket_lifecycle_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3control_bucket_lifecycle_configuration.html.markdown index e0d9d2b93175..4a739260a7f8 100644 --- a/website/docs/cdktf/typescript/r/s3control_bucket_lifecycle_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_bucket_lifecycle_configuration.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Amazon Resource Name (ARN) of the bucket. * `rule` - (Required) Configuration block(s) containing lifecycle rules for the bucket. * `abortIncompleteMultipartUpload` - (Optional) Configuration block containing settings for abort incomplete multipart upload. @@ -114,4 +115,4 @@ Using `terraform import`, import S3 Control Bucket Lifecycle Configurations usin % terraform import aws_s3control_bucket_lifecycle_configuration.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_bucket_policy.html.markdown b/website/docs/cdktf/typescript/r/s3control_bucket_policy.html.markdown index f1d9f1466ba4..66f11de226bb 100644 --- a/website/docs/cdktf/typescript/r/s3control_bucket_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_bucket_policy.html.markdown @@ -55,8 +55,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) Amazon Resource Name (ARN) of the bucket. * `policy` - (Required) JSON string of the resource policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -98,4 +99,4 @@ Using `terraform import`, import S3 Control Bucket Policies using the Amazon Res % terraform import aws_s3control_bucket_policy.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_directory_bucket_access_point_scope.html.markdown b/website/docs/cdktf/typescript/r/s3control_directory_bucket_access_point_scope.html.markdown index cf76b01d59f4..0576963f8f14 100644 --- a/website/docs/cdktf/typescript/r/s3control_directory_bucket_access_point_scope.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_directory_bucket_access_point_scope.html.markdown @@ -30,15 +30,15 @@ import { Fn, Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { S3ControlDirectoryBucketAccessPointScope } from "./.gen/providers/aws/"; import { DataAwsAvailabilityZones } from "./.gen/providers/aws/data-aws-availability-zones"; import { S3AccessPoint } from "./.gen/providers/aws/s3-access-point"; +import { S3ControlDirectoryBucketAccessPointScope } from "./.gen/providers/aws/s3-control-directory-bucket-access-point-scope"; import { S3DirectoryBucket } from "./.gen/providers/aws/s3-directory-bucket"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new S3ControlDirectoryBucketAccessPointScope(this, "example", { - account_id: "123456789012", + accountId: "123456789012", name: "example--zoneId--xa-s3", scope: [ { @@ -79,8 +79,9 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` - (Required) The name of the access point that you want to apply the scope to. * `accountId` - (Required) The AWS account ID that owns the specified access point. +* `name` - (Required) The name of the access point that you want to apply the scope to. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `scope` - (Optional). Scope is used to restrict access to specific prefixes, API operations, or a combination of both. To remove the `scope`, set it to `{permissions=[] prefixes=[]}`. The default scope is `{permissions=[] prefixes=[]}`. ### Scope Configuration block @@ -108,7 +109,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { S3ControlDirectoryBucketAccessPointScope } from "./.gen/providers/aws/"; +import { S3ControlDirectoryBucketAccessPointScope } from "./.gen/providers/aws/s3-control-directory-bucket-access-point-scope"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -128,4 +129,4 @@ Using `terraform import`, import Access Point Scope using access point name and % terraform import aws_s3control_directory_bucket_access_point_scope.example example--zoneid--xa-s3,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_multi_region_access_point.html.markdown b/website/docs/cdktf/typescript/r/s3control_multi_region_access_point.html.markdown index 685a69a901ce..bbee11ab2e20 100644 --- a/website/docs/cdktf/typescript/r/s3control_multi_region_access_point.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_multi_region_access_point.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the owner of the buckets for which you want to create a Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `details` - (Required) A configuration block containing details about the Multi-Region Access Point. See [Details Configuration Block](#details-configuration) below for more details @@ -154,4 +155,4 @@ Using `terraform import`, import Multi-Region Access Points using the `accountId % terraform import aws_s3control_multi_region_access_point.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_multi_region_access_point_policy.html.markdown b/website/docs/cdktf/typescript/r/s3control_multi_region_access_point_policy.html.markdown index c2d388fe6ac9..e13d3579de08 100644 --- a/website/docs/cdktf/typescript/r/s3control_multi_region_access_point_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_multi_region_access_point_policy.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the owner of the Multi-Region Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `details` - (Required) A configuration block containing details about the policy for the Multi-Region Access Point. See [Details Configuration Block](#details-configuration) below for more details @@ -150,4 +151,4 @@ Using `terraform import`, import Multi-Region Access Point Policies using the `a % terraform import aws_s3control_multi_region_access_point_policy.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point.html.markdown b/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point.html.markdown index 311d8e911b3f..f2272a78ed3a 100644 --- a/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `configuration` - (Required) A configuration block containing details about the Object Lambda Access Point. See [Configuration](#configuration) below for more details. * `name` - (Required) The name for this Object Lambda Access Point. @@ -139,4 +140,4 @@ Using `terraform import`, import Object Lambda Access Points using the `accountI % terraform import aws_s3control_object_lambda_access_point.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point_policy.html.markdown b/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point_policy.html.markdown index dcb844e95795..0013ed31621d 100644 --- a/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_object_lambda_access_point_policy.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the account that owns the Object Lambda Access Point. Defaults to automatically determined account ID of the Terraform AWS provider. * `name` - (Required) The name of the Object Lambda Access Point. * `policy` - (Required) The Object Lambda Access Point resource policy document. @@ -132,4 +133,4 @@ Using `terraform import`, import Object Lambda Access Point policies using the ` % terraform import aws_s3control_object_lambda_access_point_policy.example 123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3control_storage_lens_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3control_storage_lens_configuration.html.markdown index 2f8283eb098b..1bb32d63b0e3 100644 --- a/website/docs/cdktf/typescript/r/s3control_storage_lens_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3control_storage_lens_configuration.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Optional) The AWS account ID for the S3 Storage Lens configuration. Defaults to automatically determined account ID of the Terraform AWS provider. * `configId` - (Required) The ID of the S3 Storage Lens configuration. * `storageLensConfiguration` - (Required) The S3 Storage Lens configuration. See [Storage Lens Configuration](#storage-lens-configuration) below for more details. @@ -248,4 +249,4 @@ Using `terraform import`, import S3 Storage Lens configurations using the `accou % terraform import aws_s3control_storage_lens_configuration.example 123456789012:example-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3outposts_endpoint.html.markdown b/website/docs/cdktf/typescript/r/s3outposts_endpoint.html.markdown index 1a5e718498dc..70d9259d9f2e 100644 --- a/website/docs/cdktf/typescript/r/s3outposts_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/s3outposts_endpoint.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `outpostId` - (Required) Identifier of the Outpost to contain this endpoint. * `securityGroupId` - (Required) Identifier of the EC2 Security Group. * `subnetId` - (Required) Identifier of the EC2 Subnet. @@ -89,4 +90,4 @@ Using `terraform import`, import S3 Outposts Endpoints using Amazon Resource Nam % terraform import aws_s3outposts_endpoint.example arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/endpoint/0123456789abcdef,sg-12345678,subnet-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3tables_namespace.html.markdown b/website/docs/cdktf/typescript/r/s3tables_namespace.html.markdown index 0cb2c46f4e09..ce00ebedadd3 100644 --- a/website/docs/cdktf/typescript/r/s3tables_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/s3tables_namespace.html.markdown @@ -49,8 +49,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `namespace` - (Required, Forces new resource) Name of the namespace. Must be between 1 and 255 characters in length. Can consist of lowercase letters, numbers, and underscores, and must begin and end with a lowercase letter or number. @@ -96,4 +97,4 @@ Using `terraform import`, import S3 Tables Namespace using the `tableBucketArn` % terraform import aws_s3tables_namespace.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3tables_table.html.markdown b/website/docs/cdktf/typescript/r/s3tables_table.html.markdown index 36d7b9ad632e..89af35e1e437 100644 --- a/website/docs/cdktf/typescript/r/s3tables_table.html.markdown +++ b/website/docs/cdktf/typescript/r/s3tables_table.html.markdown @@ -58,6 +58,84 @@ class MyConvertedCode extends TerraformStack { ``` +### With Metadata Schema + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { S3TablesNamespace } from "./.gen/providers/aws/s3-tables-namespace"; +import { S3TablesTable } from "./.gen/providers/aws/s3-tables-table"; +import { S3TablesTableBucket } from "./.gen/providers/aws/s3-tables-table-bucket"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new S3TablesTableBucket(this, "example", { + name: "example-bucket", + }); + const awsS3TablesNamespaceExample = new S3TablesNamespace( + this, + "example_1", + { + namespace: "example_namespace", + tableBucketArn: example.arn, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3TablesNamespaceExample.overrideLogicalId("example"); + const awsS3TablesTableExample = new S3TablesTable(this, "example_2", { + format: "ICEBERG", + metadata: [ + { + iceberg: [ + { + schema: [ + { + field: [ + { + name: "id", + required: true, + type: "long", + }, + { + name: "name", + required: true, + type: "string", + }, + { + name: "created_at", + required: false, + type: "timestamp", + }, + { + name: "price", + required: false, + type: "decimal(10,2)", + }, + ], + }, + ], + }, + ], + }, + ], + name: "example_table", + namespace: Token.asString(awsS3TablesNamespaceExample.namespace), + tableBucketArn: Token.asString( + awsS3TablesNamespaceExample.tableBucketArn + ), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3TablesTableExample.overrideLogicalId("example"); + } +} + +``` + ## Argument Reference The following arguments are required: @@ -75,10 +153,13 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `encryptionConfiguration` - (Optional) A single table bucket encryption configuration object. [See `encryptionConfiguration` below](#encryption_configuration). * `maintenanceConfiguration` - (Optional) A single table bucket maintenance configuration object. [See `maintenanceConfiguration` below](#maintenance_configuration). +* `metadata` - (Optional) Contains details about the table metadata. This configuration specifies the metadata format and schema for the table. Currently only supports Iceberg format. + [See `metadata` below](#metadata). ### `encryptionConfiguration` @@ -130,6 +211,35 @@ The `iceberg_snapshot_management.settings` object supports the following argumen * `min_snapshots_to_keep` - (Required) Minimum number of snapshots to keep. Must be at least `1`. +### `metadata` + +The `metadata` configuration block supports the following argument: + +* `iceberg` - (Optional) Contains details about the metadata for an Iceberg table. This block defines the schema structure for the Apache Iceberg table format. + [See `iceberg` below](#iceberg). + +### `iceberg` + +The `iceberg` configuration block supports the following argument: + +* `schema` - (Required) Schema configuration for the Iceberg table. + [See `schema` below](#schema). + +### `schema` + +The `schema` configuration block supports the following argument: + +* `field` - (Required) List of schema fields for the Iceberg table. Each field defines a column in the table schema. + [See `field` below](#field). + +### `field` + +The `field` configuration block supports the following arguments: + +* `name` - (Required) The name of the field. +* `type` - (Required) The field type. S3 Tables supports all Apache Iceberg primitive types including: `boolean`, `int`, `long`, `float`, `double`, `decimal(precision,scale)`, `date`, `time`, `timestamp`, `timestamptz`, `string`, `uuid`, `fixed(length)`, `binary`. +* `required` - (Optional) A Boolean value that specifies whether values are required for each row in this field. Defaults to `false`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -178,4 +288,4 @@ Using `terraform import`, import S3 Tables Table using the `tableBucketArn`, the % terraform import aws_s3tables_table.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3tables_table_bucket.html.markdown b/website/docs/cdktf/typescript/r/s3tables_table_bucket.html.markdown index a00a8cd6890b..9cdff4920e4d 100644 --- a/website/docs/cdktf/typescript/r/s3tables_table_bucket.html.markdown +++ b/website/docs/cdktf/typescript/r/s3tables_table_bucket.html.markdown @@ -49,8 +49,10 @@ The following arguments are optional: * `encryptionConfiguration` - (Optional) A single table bucket encryption configuration object. [See `encryptionConfiguration` below](#encryption_configuration). +* `forceDestroy` - (Optional, Default:`false`) Whether all tables and namespaces within the table bucket should be deleted *when the table bucket is destroyed* so that the table bucket can be destroyed without error. These tables and namespaces are *not* recoverable. This only deletes tables and namespaces when the table bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the table bucket or destroying the table bucket, this flag will not work. Additionally when importing a table bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `maintenanceConfiguration` - (Optional) A single table bucket maintenance configuration object. [See `maintenanceConfiguration` below](#maintenance_configuration). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `encryptionConfiguration` @@ -124,4 +126,4 @@ Using `terraform import`, import S3 Tables Table Bucket using the `arn`. For exa % terraform import aws_s3tables_table_bucket.example arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3tables_table_bucket_policy.html.markdown b/website/docs/cdktf/typescript/r/s3tables_table_bucket_policy.html.markdown index fed23f56eef8..875afb818705 100644 --- a/website/docs/cdktf/typescript/r/s3tables_table_bucket_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3tables_table_bucket_policy.html.markdown @@ -53,8 +53,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourcePolicy` - (Required) Amazon Web Services resource-based policy document in JSON format. * `tableBucketArn` - (Required, Forces new resource) ARN referencing the Table Bucket that owns this policy. @@ -94,4 +95,4 @@ Using `terraform import`, import S3 Tables Table Bucket Policy using the `tableB % terraform import aws_s3tables_table_bucket_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3tables_table_policy.html.markdown b/website/docs/cdktf/typescript/r/s3tables_table_policy.html.markdown index a41f0c655a45..e294ddec65bc 100644 --- a/website/docs/cdktf/typescript/r/s3tables_table_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/s3tables_table_policy.html.markdown @@ -83,8 +83,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourcePolicy` - (Required) Amazon Web Services resource-based policy document in JSON format. * `name` - (Required, Forces new resource) Name of the table. Must be between 1 and 255 characters in length. @@ -130,4 +131,4 @@ Using `terraform import`, import S3 Tables Table Policy using the `tableBucketAr % terraform import aws_s3tables_table_policy.example 'arn:aws:s3tables:us-west-2:123456789012:bucket/example-bucket;example-namespace;example-table' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_app.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_app.html.markdown index 397120dedb17..e5324737ec86 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_app.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_app.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appName` - (Required) The name of the app. * `appType` - (Required) The type of app. Valid values are `JupyterServer`, `KernelGateway`, `RStudioServerPro`, `RSessionGateway`, `TensorBoard`, `CodeEditor`, `JupyterLab`, `DetailedProfiler`, and `Canvas`. * `domainId` - (Required) The domain ID. @@ -101,4 +102,4 @@ Using `terraform import`, import SageMaker AI Apps using the `id`. For example: % terraform import aws_sagemaker_app.example arn:aws:sagemaker:us-west-2:012345678912:app/domain-id/user-profile-name/app-type/app-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_app_image_config.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_app_image_config.html.markdown index 7e970d39fc6d..3bc854950f4c 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_app_image_config.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_app_image_config.html.markdown @@ -43,6 +43,29 @@ class MyConvertedCode extends TerraformStack { ``` +### Using Code Editor with empty configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SagemakerAppImageConfig } from "./.gen/providers/aws/sagemaker-app-image-config"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SagemakerAppImageConfig(this, "test", { + appImageConfigName: "example", + codeEditorAppImageConfig: {}, + }); + } +} + +``` + ### Default File System Config ```typescript @@ -77,12 +100,15 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appImageConfigName` - (Required) The name of the App Image Config. -* `codeEditorAppImageConfig` - (Optional) The CodeEditorAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in Code Editor. See [Code Editor App Image Config](#code-editor-app-image-config) details below. -* `jupyterLabImageConfig` - (Optional) The JupyterLabAppImageConfig. You can only specify one image kernel in the AppImageConfig API. This kernel is shown to users before the image starts. After the image runs, all kernels are visible in JupyterLab. See [Jupyter Lab Image Config](#jupyter-lab-image-config) details below. +* `codeEditorAppImageConfig` - (Optional) The CodeEditorAppImageConfig. See [Code Editor App Image Config](#code-editor-app-image-config) details below. +* `jupyterLabImageConfig` - (Optional) The JupyterLabAppImageConfig. See [Jupyter Lab Image Config](#jupyter-lab-image-config) details below. * `kernelGatewayImageConfig` - (Optional) The configuration for the file system and kernels in a SageMaker AI image running as a KernelGateway app. See [Kernel Gateway Image Config](#kernel-gateway-image-config) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +~> **NOTE:** Exactly one of `codeEditorAppImageConfig`, `jupyterLabImageConfig`, or `kernelGatewayImageConfig` must be configured. Empty blocks (e.g., `code_editor_app_image_config {}`) are valid configurations. + ### Code Editor App Image Config * `containerConfig` - (Optional) The configuration used to run the application image container. See [Container Config](#container-config) details below. @@ -153,4 +179,4 @@ Using `terraform import`, import SageMaker AI App Image Configs using the `name` % terraform import aws_sagemaker_app_image_config.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_code_repository.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_code_repository.html.markdown index e842fbd332ee..ef1b988c1985 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_code_repository.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_code_repository.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `codeRepositoryName` - (Required) The name of the Code Repository (must be unique). * `gitConfig` - (Required) Specifies details about the repository. see [Git Config](#git-config) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -145,4 +146,4 @@ Using `terraform import`, import SageMaker AI Code Repositories using the `name` % terraform import aws_sagemaker_code_repository.test_code_repository my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_data_quality_job_definition.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_data_quality_job_definition.html.markdown index 473efd827ca2..266e609ff979 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_data_quality_job_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_data_quality_job_definition.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dataQualityAppSpecification` - (Required) Specifies the container that runs the monitoring job. Fields are documented below. * `dataQualityBaselineConfig` - (Optional) Configures the constraints and baselines for the monitoring job. Fields are documented below. * `dataQualityJobInput` - (Required) A list of inputs for the monitoring job. Fields are documented below. @@ -209,4 +210,4 @@ Using `terraform import`, import data quality job definitions using the `name`. % terraform import aws_sagemaker_data_quality_job_definition.test_data_quality_job_definition data-quality-job-definition-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_device.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_device.html.markdown index bb165cb7e0dd..4a2f0d229ed7 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_device.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_device.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deviceFleetName` - (Required) The name of the Device Fleet. * `device` - (Required) The device to register with SageMaker AI Edge Manager. See [Device](#device) details below. @@ -93,4 +94,4 @@ Using `terraform import`, import SageMaker AI Devices using the `device-fleet-na % terraform import aws_sagemaker_device.example my-fleet/my-device ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_device_fleet.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_device_fleet.html.markdown index 620fbac8590e..88e978550aba 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_device_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_device_fleet.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deviceFleetName` - (Required) The name of the Device Fleet (must be unique). * `roleArn` - (Required) The Amazon Resource Name (ARN) that has access to AWS Internet of Things (IoT). * `outputConfig` - (Required) Specifies details about the repository. see [Output Config](#output-config) details below. @@ -92,4 +93,4 @@ Using `terraform import`, import SageMaker AI Device Fleets using the `name`. Fo % terraform import aws_sagemaker_device_fleet.example my-fleet ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown index 6d144505ea79..c147d68e1ffd 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown @@ -148,6 +148,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `appNetworkAccessType` - (Optional) Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly`. Valid values are `PublicInternetOnly` and `VpcOnly`. * `appSecurityGroupManagement` - (Optional) The entity that creates and manages the required security groups for inter-app communication in `VPCOnly` mode. Valid values are `Service` and `Customer`. * `domainSettings` - (Optional) The domain settings. See [`domainSettings` Block](#domain_settings-block) below. @@ -413,4 +414,4 @@ Using `terraform import`, import SageMaker AI Domains using the `id`. For exampl % terraform import aws_sagemaker_domain.test_domain d-8jgsjtilstu8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_endpoint.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_endpoint.html.markdown index 9a74680b60d8..ec48625410aa 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_endpoint.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpointConfigName` - (Required) The name of the endpoint configuration to use. * `deploymentConfig` - (Optional) The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. See [Deployment Config](#deployment-config). * `name` - (Optional) The name of the endpoint. If omitted, Terraform will assign a random, unique name. @@ -143,4 +144,4 @@ Using `terraform import`, import endpoints using the `name`. For example: % terraform import aws_sagemaker_endpoint.test_endpoint my-endpoint ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown index 856f5555ef1f..6346921010b8 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `productionVariants` - (Required) An list of ProductionVariant objects, one for each model that you want to host at this endpoint. Fields are documented below. * `kmsKeyArn` - (Optional) Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. * `name` - (Optional) The name of the endpoint configuration. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. @@ -182,4 +183,4 @@ Using `terraform import`, import endpoint configurations using the `name`. For e % terraform import aws_sagemaker_endpoint_configuration.test_endpoint_config endpoint-config-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_feature_group.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_feature_group.html.markdown index 87390070dec0..d45992759ffb 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_feature_group.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_feature_group.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `featureGroupName` - (Required) The name of the Feature Group. The name must be unique within an AWS Region in an AWS account. * `recordIdentifierFeatureName` - (Required) The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store. * `eventTimeFeatureName` - (Required) The name of the feature that stores the EventTime of a Record in a Feature Group. @@ -142,4 +143,4 @@ Using `terraform import`, import Feature Groups using the `name`. For example: % terraform import aws_sagemaker_feature_group.test_feature_group feature_group-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_flow_definition.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_flow_definition.html.markdown index d4c802e07f62..d3d2298a4a00 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_flow_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_flow_definition.html.markdown @@ -78,7 +78,7 @@ class MyConvertedCode extends TerraformStack { taskTitle: "example", workteamArn: "arn:aws:sagemaker:${" + - current.name + + current.region + "}:394669845002:workteam/public-crowd/default", }, outputConfig: { @@ -139,6 +139,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `flowDefinitionName` - (Required) The name of your flow definition. * `humanLoopConfig` - (Required) An object containing information about the tasks the human reviewers will perform. See [Human Loop Config](#human-loop-config) details below. * `roleArn` - (Required) The Amazon Resource Name (ARN) of the role needed to call other services on your behalf. @@ -222,4 +223,4 @@ Using `terraform import`, import SageMaker AI Flow Definitions using the `flowDe % terraform import aws_sagemaker_flow_definition.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_hub.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_hub.html.markdown index 8d36f1b0b7f2..5ef4c5616724 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_hub.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_hub.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `hubName` - (Required) The name of the hub. * `hubDescription` - (Required) A description of the hub. * `hubDisplayName` - (Optional) The display name of the hub. @@ -88,4 +89,4 @@ Using `terraform import`, import SageMaker AI Hubs using the `name`. For example % terraform import aws_sagemaker_hub.test_hub my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_human_task_ui.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_human_task_ui.html.markdown index 6babf4eafdca..e329ded08b96 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_human_task_ui.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_human_task_ui.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `humanTaskUiName` - (Required) The name of the Human Task UI. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `uiTemplate` - (Required) The Liquid template for the worker user interface. See [UI Template](#ui-template) below. @@ -93,4 +94,4 @@ Using `terraform import`, import SageMaker AI Human Task UIs using the `humanTas % terraform import aws_sagemaker_human_task_ui.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_image.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_image.html.markdown index 65a067953fe7..beb3dc89d3bb 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_image.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_image.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `imageName` - (Required) The name of the image. Must be unique to your account. * `roleArn` - (Required) The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. * `displayName` - (Optional) The display name of the image. When the image is added to a domain (must be unique to the domain). @@ -83,4 +84,4 @@ Using `terraform import`, import SageMaker AI Code Images using the `name`. For % terraform import aws_sagemaker_image.test_image my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_image_version.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_image_version.html.markdown index b4ad81a3d90a..2c47e4127d15 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_image_version.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_image_version.html.markdown @@ -16,6 +16,29 @@ Provides a SageMaker AI Image Version resource. ### Basic usage +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SagemakerImageVersion } from "./.gen/providers/aws/sagemaker-image-version"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SagemakerImageVersion(this, "example", { + baseImage: "012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest", + imageName: test.id, + }); + } +} + +``` + +### With Aliases + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -29,6 +52,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SagemakerImageVersion(this, "test", { + aliases: ["latest", "stable"], baseImage: "012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest", imageName: Token.asString(awsSagemakerImageTest.id), }); @@ -41,28 +65,29 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `imageName` - (Required) The name of the image. Must be unique to your account. * `baseImage` - (Required) The registry path of the container image on which this image version is based. +* `aliases` - (Optional) A list of aliases for the image version. * `horovod` - (Optional) Indicates Horovod compatibility. * `jobType` - (Optional) Indicates SageMaker AI job type compatibility. Valid values are: `TRAINING`, `INFERENCE`, and `NOTEBOOK_KERNEL`. -* `ml_framework` - (Optional) The machine learning framework vended in the image version. +* `mlFramework` - (Optional) The machine learning framework vended in the image version. * `processor` - (Optional) Indicates CPU or GPU compatibility. Valid values are: `CPU` and `GPU`. -* `programming_lang` - (Optional) The supported programming language and its version. -* `release_notes` - (Optional) The maintainer description of the image version. -* `vendor_guidance` - (Optional) The stability of the image version, specified by the maintainer. Valid values are: `NOT_PROVIDED`, `STABLE`, `TO_BE_ARCHIVED`, and `ARCHIVED`. +* `programmingLang` - (Optional) The supported programming language and its version. +* `releaseNotes` - (Optional) The maintainer description of the image version. +* `vendorGuidance` - (Optional) The stability of the image version, specified by the maintainer. Valid values are: `NOT_PROVIDED`, `STABLE`, `TO_BE_ARCHIVED`, and `ARCHIVED`. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `id` - The name of the Image. * `arn` - The Amazon Resource Name (ARN) assigned by AWS to this Image Version. * `version`- The version of the image. If not specified, the latest version is described. * `containerImage` - The registry path of the container image that contains this image version. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI Image Versions using the `name`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI Image Versions using a comma-delimited string concatenating `imageName` and `version`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -78,18 +103,18 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SagemakerImageVersion.generateConfigForImport( this, - "testImage", - "my-code-repo" + "example", + "example-name,1" ); } } ``` -Using `terraform import`, import SageMaker AI Image Versions using the `name`. For example: +Using `terraform import`, import SageMaker AI Image Versions using a comma-delimited string concatenating `imageName` and `version`. For example: ```console -% terraform import aws_sagemaker_image_version.test_image my-code-repo +% terraform import aws_sagemaker_image_version.example example-name,1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_mlflow_tracking_server.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_mlflow_tracking_server.html.markdown index d278afb0fb4d..d76e69f35634 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_mlflow_tracking_server.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_mlflow_tracking_server.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `artifactStoreUri` - (Required) The S3 URI for a general purpose bucket to use as the MLflow Tracking Server artifact store. * `roleArn` - (Required) The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow Tracking Server uses to access the artifact store in Amazon S3. The role should have AmazonS3FullAccess permissions. For more information on IAM permissions for tracking server creation, see [Set up IAM permissions for MLflow](https://docs.aws.amazon.com/sagemaker/latest/dg/mlflow-create-tracking-server-iam.html). * `trackingServerName` - (Required) A unique string identifying the tracking server name. This string is part of the tracking server ARN. @@ -92,4 +93,4 @@ Using `terraform import`, import SageMaker AI MLFlow Tracking Servers using the % terraform import aws_sagemaker_mlflow_tracking_server.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_model.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_model.html.markdown index d2aab574588a..93cf3770faec 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_model.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_model.html.markdown @@ -68,6 +68,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the model (must be unique). If omitted, Terraform will assign a random, unique name. * `primaryContainer` - (Optional) The primary docker image containing inference code that is used when the model is deployed for predictions. If not specified, the `container` argument is required. Fields are documented below. * `executionRoleArn` - (Required) A role that SageMaker AI can assume to access model artifacts and docker images for deployment. @@ -159,4 +160,4 @@ Using `terraform import`, import models using the `name`. For example: % terraform import aws_sagemaker_model.test_model model-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_model_package_group.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_model_package_group.html.markdown index 52e26204e966..a4e3e9a0c0d7 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_model_package_group.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_model_package_group.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `modelPackageGroupName` - (Required) The name of the model group. * `modelPackageGroupDescription` - (Optional) A description for the model group. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +85,4 @@ Using `terraform import`, import SageMaker AI Model Package Groups using the `na % terraform import aws_sagemaker_model_package_group.test_model_package_group my-code-repo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_model_package_group_policy.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_model_package_group_policy.html.markdown index 614fe106280c..61432239fc2d 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_model_package_group_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_model_package_group_policy.html.markdown @@ -79,6 +79,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `modelPackageGroupName` - (Required) The name of the model package group. ## Attribute Reference @@ -119,4 +120,4 @@ Using `terraform import`, import SageMaker AI Model Package Groups using the `na % terraform import aws_sagemaker_model_package_group_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_monitoring_schedule.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_monitoring_schedule.html.markdown index b594823fc060..f31013dd0829 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_monitoring_schedule.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_monitoring_schedule.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `monitoringScheduleConfig` - (Required) The configuration object that specifies the monitoring schedule and defines the monitoring job. Fields are documented below. * `name` - (Optional) The name of the monitoring schedule. The name must be unique within an AWS Region within an AWS account. If omitted, Terraform will assign a random, unique name. * `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -100,4 +101,4 @@ Using `terraform import`, import monitoring schedules using the `name`. For exam % terraform import aws_sagemaker_monitoring_schedule.test_monitoring_schedule monitoring-schedule-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_notebook_instance.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_notebook_instance.html.markdown index 258c93865da6..af8327f72a40 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_notebook_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_notebook_instance.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the notebook instance (must be unique). * `roleArn` - (Required) The ARN of the IAM role to be used by the notebook instance which allows SageMaker AI to call other services on your behalf. * `instanceType` - (Required) The name of ML compute instance type. @@ -88,7 +89,6 @@ This resource supports the following arguments: * `volumeSize` - (Optional) The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. * `subnetId` - (Optional) The VPC subnet ID. * `securityGroups` - (Optional) The associated security groups. -* `acceleratorTypes` - (Optional, Deprecated) A list of Elastic Inference (EI) instance types to associate with this notebook instance. See [Elastic Inference Accelerator](https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html) for more details. Valid values: `ml.eia1.medium`, `ml.eia1.large`, `ml.eia1.xlarge`, `ml.eia2.medium`, `ml.eia2.large`, `ml.eia2.xlarge`. * `additionalCodeRepositories` - (Optional) An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. * `defaultCodeRepository` - (Optional) The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. @@ -145,4 +145,4 @@ Using `terraform import`, import SageMaker AI Notebook Instances using the `name % terraform import aws_sagemaker_notebook_instance.test_notebook_instance my-notebook-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown index 37f68ac76da3..9971d807a9a3 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_notebook_instance_lifecycle_configuration.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the lifecycle configuration (must be unique). If omitted, Terraform will assign a random, unique name. * `onCreate` - (Optional) A shell script (base64-encoded) that runs only once when the SageMaker AI Notebook Instance is created. * `onStart` - (Optional) A shell script (base64-encoded) that runs every time the SageMaker AI Notebook Instance is started including the time it's created. @@ -86,4 +87,4 @@ Using `terraform import`, import models using the `name`. For example: % terraform import aws_sagemaker_notebook_instance_lifecycle_configuration.lc foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_pipeline.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_pipeline.html.markdown index f5f462dfc82d..fa3b41fb3bd0 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_pipeline.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `pipelineName` - (Required) The name of the pipeline. * `pipelineDescription` - (Optional) A description of the pipeline. * `pipelineDisplayName` - (Required) The display name of the pipeline. @@ -111,4 +112,4 @@ Using `terraform import`, import pipelines using the `pipelineName`. For example % terraform import aws_sagemaker_pipeline.test_pipeline pipeline ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_project.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_project.html.markdown index 2bfbc8eb7d79..7109c211b832 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_project.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_project.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `projectName` - (Required) The name of the Project. * `projectDescription` - (Optional) A description for the project. * `serviceCatalogProvisioningDetails` - (Required) The product ID and provisioning artifact ID to provision a service catalog. See [Service Catalog Provisioning Details](#service-catalog-provisioning-details) below. @@ -97,4 +98,4 @@ Using `terraform import`, import SageMaker AI Projects using the `projectName`. % terraform import aws_sagemaker_project.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_servicecatalog_portfolio_status.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_servicecatalog_portfolio_status.html.markdown index 9108e90d82d2..1091fe2fa937 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_servicecatalog_portfolio_status.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_servicecatalog_portfolio_status.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `status` - (Required) Whether Service Catalog is enabled or disabled in SageMaker. Valid values are `Enabled` and `Disabled`. ## Attribute Reference @@ -80,4 +81,4 @@ Using `terraform import`, import models using the `id`. For example: % terraform import aws_sagemaker_servicecatalog_portfolio_status.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_space.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_space.html.markdown index 2a8878f0140b..552a46c3ad2d 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_space.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_space.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainId` - (Required) The ID of the associated Domain. * `ownershipSettings` - (Optional) A collection of ownership settings. Required if `spaceSharingSettings` is set. See [`ownershipSettings` Block](#ownership_settings-block) below. * `spaceDisplayName` - (Optional) The name of the space that appears in the SageMaker AI Studio UI. @@ -206,4 +207,4 @@ Using `terraform import`, import SageMaker AI Spaces using the `id`. For example % terraform import aws_sagemaker_space.test_space arn:aws:sagemaker:us-west-2:123456789012:space/domain-id/space-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_studio_lifecycle_config.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_studio_lifecycle_config.html.markdown index 4bad4addef6f..8d1b412a965b 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_studio_lifecycle_config.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_studio_lifecycle_config.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `studioLifecycleConfigName` - (Required) The name of the Studio Lifecycle Configuration to create. - `studioLifecycleConfigAppType` - (Required) The App type that the Lifecycle Configuration is attached to. Valid values are `JupyterServer`, `JupyterLab`, `CodeEditor` and `KernelGateway`. - `studioLifecycleConfigContent` - (Required) The content of your Studio Lifecycle Configuration script. This content must be base64 encoded. @@ -89,4 +90,4 @@ Using `terraform import`, import SageMaker AI Studio Lifecycle Configs using the % terraform import aws_sagemaker_studio_lifecycle_config.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown index c76ab135a2bd..60f3daafec6d 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainId` - (Required) The ID of the associated Domain. * `singleSignOnUserIdentifier` - (Optional) A specifier for the type of value specified in `singleSignOnUserValue`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. * `singleSignOnUserValue` - (Required) The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. @@ -239,13 +240,40 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The user profile Amazon Resource Name (ARN). * `arn` - The user profile Amazon Resource Name (ARN). * `homeEfsFileSystemUid` - The ID of the user's profile in the Amazon Elastic File System (EFS) volume. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sagemaker_user_profile.example + identity = { + domain_id = "domain-id" + user_profile_name = "profile-name" + } +} + +resource "aws_sagemaker_user_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `domainId` (String) SageMaker domain ID. +* `userProfileName` (String) Name of the user profile. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI User Profiles using the `arn`. For example: ```typescript @@ -262,7 +290,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SagemakerUserProfile.generateConfigForImport( this, - "testUserProfile", + "example", "arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name" ); } @@ -273,7 +301,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import SageMaker AI User Profiles using the `arn`. For example: ```console -% terraform import aws_sagemaker_user_profile.test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name +% terraform import aws_sagemaker_user_profile.example arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown index cfd32bc055dd..b7e08a32f03d 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown @@ -109,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `workforceName` - (Required) The name of the Workforce (must be unique). * `cognitoConfig` - (Optional) Use this parameter to configure an Amazon Cognito private workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool. Conflicts with `oidcConfig`. see [Cognito Config](#cognito-config) details below. * `oidcConfig` - (Optional) Use this parameter to configure a private workforce using your own OIDC Identity Provider. Conflicts with `cognitoConfig`. see [OIDC Config](#oidc-config) details below. @@ -180,4 +181,4 @@ Using `terraform import`, import SageMaker AI Workforces using the `workforceNam % terraform import aws_sagemaker_workforce.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown index b77d5fcd2f85..29747f032619 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required) A description of the work team. * `workforceName` - (Optional) The name of the workforce. * `workteamName` - (Required) The name of the Workteam (must be unique). @@ -161,4 +162,4 @@ Using `terraform import`, import SageMaker AI Workteams using the `workteamName` % terraform import aws_sagemaker_workteam.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/scheduler_schedule.html.markdown b/website/docs/cdktf/typescript/r/scheduler_schedule.html.markdown index 436020ac31f5..cec269f267b6 100644 --- a/website/docs/cdktf/typescript/r/scheduler_schedule.html.markdown +++ b/website/docs/cdktf/typescript/r/scheduler_schedule.html.markdown @@ -103,12 +103,14 @@ The following arguments are required: The following arguments are optional: +* `action_after_completion` - (Optional) Action that applies to the schedule after completing invocation of the target. Valid values are `NONE` and `DELETE`. Defaults to `NONE`. * `description` - (Optional) Brief description of the schedule. * `endDate` - (Optional) The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the end date you specify. EventBridge Scheduler ignores the end date for one-time schedules. Example: `2030-01-01T01:00:00Z`. * `groupName` - (Optional, Forces new resource) Name of the schedule group to associate with this schedule. When omitted, the `default` schedule group is used. * `kmsKeyArn` - (Optional) ARN for the customer managed KMS key that EventBridge Scheduler will use to encrypt and decrypt your data. * `name` - (Optional, Forces new resource) Name of the schedule. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `scheduleExpressionTimezone` - (Optional) Timezone in which the scheduling expression is evaluated. Defaults to `UTC`. Example: `Australia/Sydney`. * `startDate` - (Optional) The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the start date you specify. EventBridge Scheduler ignores the start date for one-time schedules. Example: `2030-01-01T01:00:00Z`. * `state` - (Optional) Specifies whether the schedule is enabled or disabled. One of: `ENABLED` (default), `DISABLED`. @@ -127,6 +129,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deadLetterConfig` - (Optional) Information about an Amazon SQS queue that EventBridge Scheduler uses as a dead-letter queue for your schedule. If specified, EventBridge Scheduler delivers failed events that could not be successfully delivered to a target to the queue. Detailed below. * `ecsParameters` - (Optional) Templated target type for the Amazon ECS [`RunTask`](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) API operation. Detailed below. * `eventbridgeParameters` - (Optional) Templated target type for the EventBridge [`PutEvents`](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_PutEvents.html) API operation. Detailed below. @@ -148,6 +151,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacityProviderStrategy` - (Optional) Up to `6` capacity provider strategies to use for the task. Detailed below. * `enableEcsManagedTags` - (Optional) Specifies whether to enable Amazon ECS managed tags for the task. For more information, see [Tagging Your Amazon ECS Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the Amazon ECS Developer Guide. * `enableExecuteCommand` - (Optional) Specifies whether to enable the execute command functionality for the containers in this task. @@ -250,4 +254,4 @@ Using `terraform import`, import schedules using the combination `group_name/nam % terraform import aws_scheduler_schedule.example my-schedule-group/my-schedule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/scheduler_schedule_group.html.markdown b/website/docs/cdktf/typescript/r/scheduler_schedule_group.html.markdown index 1af58c98b8c7..fd345ede33c2 100644 --- a/website/docs/cdktf/typescript/r/scheduler_schedule_group.html.markdown +++ b/website/docs/cdktf/typescript/r/scheduler_schedule_group.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) Name of the schedule group. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -96,4 +97,4 @@ Using `terraform import`, import schedule groups using the `name`. For example: % terraform import aws_scheduler_schedule_group.example my-schedule-group ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/schemas_discoverer.html.markdown b/website/docs/cdktf/typescript/r/schemas_discoverer.html.markdown index 95282c8b84b1..8ae96bcc5a66 100644 --- a/website/docs/cdktf/typescript/r/schemas_discoverer.html.markdown +++ b/website/docs/cdktf/typescript/r/schemas_discoverer.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `sourceArn` - (Required) The ARN of the event bus to discover event schemas on. * `description` - (Optional) The description of the discoverer. Maximum of 256 characters. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -85,4 +86,4 @@ Using `terraform import`, import EventBridge discoverers using the `id`. For exa % terraform import aws_schemas_discoverer.test 123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/schemas_registry.html.markdown b/website/docs/cdktf/typescript/r/schemas_registry.html.markdown index 923609c64e33..0697576110d8 100644 --- a/website/docs/cdktf/typescript/r/schemas_registry.html.markdown +++ b/website/docs/cdktf/typescript/r/schemas_registry.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the custom event schema registry. Maximum of 64 characters consisting of lower case letters, upper case letters, 0-9, ., -, _. * `description` - (Optional) The description of the discoverer. Maximum of 256 characters. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -80,4 +81,4 @@ Using `terraform import`, import EventBridge schema registries using the `name`. % terraform import aws_schemas_registry.test my_own_registry ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/schemas_registry_policy.html.markdown b/website/docs/cdktf/typescript/r/schemas_registry_policy.html.markdown index 5ba98c36b6c7..c962fcc33f87 100644 --- a/website/docs/cdktf/typescript/r/schemas_registry_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/schemas_registry_policy.html.markdown @@ -65,8 +65,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `registryName` - (Required) Name of EventBridge Schema Registry * `policy` - (Required) Resource Policy for EventBridge Schema Registry @@ -110,4 +111,4 @@ Using `terraform import`, import EventBridge Schema Registry Policy using the `r % terraform import aws_schemas_registry_policy.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/schemas_schema.html.markdown b/website/docs/cdktf/typescript/r/schemas_schema.html.markdown index 14b25d2cbfb5..dc6f2eed9ff4 100644 --- a/website/docs/cdktf/typescript/r/schemas_schema.html.markdown +++ b/website/docs/cdktf/typescript/r/schemas_schema.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the schema. Maximum of 385 characters consisting of lower case letters, upper case letters, ., -, _, @. * `content` - (Required) The schema specification. Must be a valid Open API 3.0 spec. * `registryName` - (Required) The name of the registry in which this schema belongs. @@ -116,4 +117,4 @@ Using `terraform import`, import EventBridge schema using the `name` and `regist % terraform import aws_schemas_schema.test name/registry ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/secretsmanager_secret.html.markdown b/website/docs/cdktf/typescript/r/secretsmanager_secret.html.markdown index 62c2b2c025b4..a3ec1836a354 100644 --- a/website/docs/cdktf/typescript/r/secretsmanager_secret.html.markdown +++ b/website/docs/cdktf/typescript/r/secretsmanager_secret.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the secret. * `kmsKeyId` - (Optional) ARN or Id of the AWS KMS key to be used to encrypt the secret values in the versions stored in this secret. If you need to reference a CMK in a different account, you can use only the key ARN. If you don't specify this value, then Secrets Manager defaults to using the AWS account's default KMS key (the one named `aws/secretsmanager`). If the default KMS key with that name doesn't yet exist, then AWS Secrets Manager creates it for you automatically the first time. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -72,6 +73,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret` using the secret Amazon Resource Name (ARN). For example: ```typescript @@ -102,4 +124,4 @@ Using `terraform import`, import `aws_secretsmanager_secret` using the secret Am % terraform import aws_secretsmanager_secret.example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/secretsmanager_secret_policy.html.markdown b/website/docs/cdktf/typescript/r/secretsmanager_secret_policy.html.markdown index 35412b46032b..43b0da817ece 100644 --- a/website/docs/cdktf/typescript/r/secretsmanager_secret_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/secretsmanager_secret_policy.html.markdown @@ -79,6 +79,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `blockPublicPolicy` - (Optional) Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret. ## Attribute Reference @@ -89,6 +90,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_policy.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_policy` using the secret Amazon Resource Name (ARN). For example: ```typescript @@ -119,4 +141,4 @@ Using `terraform import`, import `aws_secretsmanager_secret_policy` using the se % terraform import aws_secretsmanager_secret_policy.example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/secretsmanager_secret_rotation.html.markdown b/website/docs/cdktf/typescript/r/secretsmanager_secret_rotation.html.markdown index be7e13444051..6938da998bb5 100644 --- a/website/docs/cdktf/typescript/r/secretsmanager_secret_rotation.html.markdown +++ b/website/docs/cdktf/typescript/r/secretsmanager_secret_rotation.html.markdown @@ -52,6 +52,7 @@ To enable automatic secret rotation, the Secrets Manager service requires usage This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secretId` - (Required) Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. * `rotateImmediately` - (Optional) Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in `rotationRules`. For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the testSecret step (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it. Defaults to `true`. * `rotationLambdaArn` - (Optional) Specifies the ARN of the Lambda function that can rotate the secret. Must be supplied if the secret is not managed by AWS. @@ -73,6 +74,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_rotation.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret_rotation" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_rotation` using the secret Amazon Resource Name (ARN). For example: ```typescript @@ -103,4 +125,4 @@ Using `terraform import`, import `aws_secretsmanager_secret_rotation` using the % terraform import aws_secretsmanager_secret_rotation.example arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/secretsmanager_secret_version.html.markdown b/website/docs/cdktf/typescript/r/secretsmanager_secret_version.html.markdown index 7c98ffcc68f0..86389d553371 100644 --- a/website/docs/cdktf/typescript/r/secretsmanager_secret_version.html.markdown +++ b/website/docs/cdktf/typescript/r/secretsmanager_secret_version.html.markdown @@ -14,7 +14,7 @@ Provides a resource to manage AWS Secrets Manager secret version including its s ~> **NOTE:** If the `AWSCURRENT` staging label is present on this version during resource deletion, that label cannot be removed and will be skipped to prevent errors when fully deleting the secret. That label will leave this secret version active even after the resource is deleted from Terraform unless the secret itself is deleted. Move the `AWSCURRENT` staging label before or after deleting this resource from Terraform to fully trigger version deprecation if necessary. --> **Note:** Write-Only argument `secretStringWo` is available to use in place of `secretString`. Write-Only argumentss are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `secretStringWo` is available to use in place of `secretString`. Write-Only argumentss are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -109,6 +109,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `secretId` - (Required) Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist. * `secretString` - (Optional) Specifies text data that you want to encrypt and store in this version of the secret. This is required if `secretBinary` or `secretStringWo` is not set. * `secretStringWo` - (Optional) Specifies text data that you want to encrypt and store in this version of the secret. This is required if `secretBinary` or `secretString` is not set. @@ -128,6 +129,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_version.example + identity = { + secret_id = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + version_id = "xxxxx-xxxxxxx-xxxxxxx-xxxxx" + } +} + +resource "aws_secretsmanager_secret_version" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `secretId` - (String) ID of the secret. +* `versionId` - (String) ID of the secret version. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_version` using the secret ID and version ID. For example: ```typescript @@ -158,4 +187,4 @@ Using `terraform import`, import `aws_secretsmanager_secret_version` using the s % terraform import aws_secretsmanager_secret_version.example 'arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456|xxxxx-xxxxxxx-xxxxxxx-xxxxx' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/security_group.html.markdown b/website/docs/cdktf/typescript/r/security_group.html.markdown index b4ed6bb24f75..54ea6c6c6d13 100644 --- a/website/docs/cdktf/typescript/r/security_group.html.markdown +++ b/website/docs/cdktf/typescript/r/security_group.html.markdown @@ -125,14 +125,12 @@ import { TerraformStack } from "cdktf"; import { SecurityGroup } from "./.gen/providers/aws/security-group"; import { VpcEndpoint } from "./.gen/providers/aws/vpc-endpoint"; interface MyConfig { - serviceName: any; vpcId: any; } class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string, config: MyConfig) { super(scope, name); const myEndpoint = new VpcEndpoint(this, "my_endpoint", { - serviceName: config.serviceName, vpcId: config.vpcId, }); new SecurityGroup(this, "example", { @@ -338,6 +336,7 @@ resource "null_resource" "example" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional, Forces new resource) Security group description. Defaults to `Managed by Terraform`. Cannot be `""`. **NOTE**: This field maps to the AWS `GroupDescription` attribute, for which there is no Update API. If you'd like to classify your security groups in a way that can be updated, use `tags`. * `egress` - (Optional, VPC only) Configuration block for egress rules. Can be specified multiple times for each egress rule. Each egress block supports fields documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `ingress` - (Optional) Configuration block for ingress rules. Can be specified multiple times for each ingress rule. Each ingress block supports fields documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). @@ -407,6 +406,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_security_group.example + identity = { + id = "sg-903004f8" + } +} + +resource "aws_security_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the security group. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Groups using the security group `id`. For example: ```typescript @@ -421,7 +446,7 @@ import { SecurityGroup } from "./.gen/providers/aws/security-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - SecurityGroup.generateConfigForImport(this, "elbSg", "sg-903004f8"); + SecurityGroup.generateConfigForImport(this, "example", "sg-903004f8"); } } @@ -430,7 +455,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import Security Groups using the security group `id`. For example: ```console -% terraform import aws_security_group.elb_sg sg-903004f8 +% terraform import aws_security_group.example sg-903004f8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/security_group_rule.html.markdown b/website/docs/cdktf/typescript/r/security_group_rule.html.markdown index 5b128e248e97..986269ee517b 100644 --- a/website/docs/cdktf/typescript/r/security_group_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/security_group_rule.html.markdown @@ -69,14 +69,12 @@ import { TerraformStack } from "cdktf"; import { SecurityGroupRule } from "./.gen/providers/aws/security-group-rule"; import { VpcEndpoint } from "./.gen/providers/aws/vpc-endpoint"; interface MyConfig { - serviceName: any; vpcId: any; } class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string, config: MyConfig) { super(scope, name); const myEndpoint = new VpcEndpoint(this, "my_endpoint", { - serviceName: config.serviceName, vpcId: config.vpcId, }); new SecurityGroupRule(this, "allow_all", { @@ -111,7 +109,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); const current = new DataAwsRegion(this, "current", {}); const s3 = new DataAwsPrefixList(this, "s3", { - name: "com.amazonaws.${" + current.name + "}.s3", + name: "com.amazonaws.${" + current.region + "}.s3", }); new SecurityGroupRule(this, "s3_gateway_egress", { description: "S3 Gateway Egress", @@ -129,8 +127,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `fromPort` - (Required) Start port (or ICMP type number if protocol is "icmp" or "icmpv6"). * `protocol` - (Required) Protocol. If not icmp, icmpv6, tcp, udp, or all use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) * `securityGroupId` - (Required) Security group to apply this rule to. @@ -140,6 +139,7 @@ or `egress` (outbound). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ~> **Note** Although `cidrBlocks`, `ipv6CidrBlocks`, `prefixListIds`, and `sourceSecurityGroupId` are all marked as optional, you _must_ provide one of them in order to configure the source of the traffic. * `cidrBlocks` - (Optional) List of CIDR blocks. Cannot be specified with `sourceSecurityGroupId` or `self`. @@ -382,4 +382,4 @@ Import a rule that has itself and an IPv6 CIDR block as sources: % terraform import aws_security_group_rule.rule_name sg-656c65616e6f72_ingress_tcp_80_80_self_2001:db8::/48 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_account.html.markdown b/website/docs/cdktf/typescript/r/securityhub_account.html.markdown index 95466c8b9e41..c9e590433461 100644 --- a/website/docs/cdktf/typescript/r/securityhub_account.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_account.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `enableDefaultStandards` - (Optional) Whether to enable the security standards that Security Hub has designated as automatically enabled including: ` AWS Foundational Security Best Practices v1.0.0` and `CIS AWS Foundations Benchmark v1.2.0`. Defaults to `true`. * `controlFindingGenerator` - (Optional) Updates whether the calling account has consolidated control findings turned on. If the value for this field is set to `SECURITY_CONTROL`, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to `STANDARD_CONTROL`, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. For accounts that are part of an organization, this value can only be updated in the administrator account. * `autoEnableControls` - (Optional) Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. @@ -77,4 +78,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_action_target.html.markdown b/website/docs/cdktf/typescript/r/securityhub_action_target.html.markdown index bf3da5cb9672..23468d3233e0 100644 --- a/website/docs/cdktf/typescript/r/securityhub_action_target.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_action_target.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The description for the custom action target. * `identifier` - (Required) The ID for the custom action target. * `description` - (Required) The name of the custom action target. @@ -91,4 +92,4 @@ Using `terraform import`, import Security Hub custom action using the action tar % terraform import aws_securityhub_action_target.example arn:aws:securityhub:eu-west-1:312940875350:action/custom/a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_automation_rule.html.markdown b/website/docs/cdktf/typescript/r/securityhub_automation_rule.html.markdown index 62fe04dae2d7..929543df3a01 100644 --- a/website/docs/cdktf/typescript/r/securityhub_automation_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_automation_rule.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `actions` - (Required) A block that specifies one or more actions to update finding fields if a finding matches the conditions specified in `Criteria`. [Documented below](#actions). * `criteria` - (Required) A block that specifies a set of ASFF finding field attributes and corresponding expected values that Security Hub uses to filter findings. [Documented below](#criteria). * `description` - (Required) The description of the rule. @@ -229,6 +230,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_securityhub_automation_rule.example + identity = { + "arn" = "arn:aws:securityhub:us-east-1:123456789012:automation-rule/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_securityhub_automation_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Security Hub automation rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub Automation Rule using their ARN. For example: ```typescript @@ -259,4 +281,4 @@ Using `terraform import`, import Security Hub automation rule using their ARN. F % terraform import aws_securityhub_automation_rule.example arn:aws:securityhub:us-west-2:123456789012:automation-rule/473eddde-f5c4-4ae5-85c7-e922f271fffc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_configuration_policy.html.markdown b/website/docs/cdktf/typescript/r/securityhub_configuration_policy.html.markdown index 20b07987a320..081a41f0bfb2 100644 --- a/website/docs/cdktf/typescript/r/securityhub_configuration_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_configuration_policy.html.markdown @@ -168,6 +168,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configurationPolicy` - (Required) Defines how Security Hub is configured. See [below](#configuration_policy). * `description` - (Optional) The description of the configuration policy. * `name` - (Required) The name of the configuration policy. @@ -250,4 +251,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_configuration_policy.example "00000000-1111-2222-3333-444444444444" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_configuration_policy_association.markdown b/website/docs/cdktf/typescript/r/securityhub_configuration_policy_association.markdown index 49c949d319d6..2de4be4d37c5 100644 --- a/website/docs/cdktf/typescript/r/securityhub_configuration_policy_association.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_configuration_policy_association.markdown @@ -84,6 +84,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyId` - (Required) The universally unique identifier (UUID) of the configuration policy. * `targetId` - (Required, Forces new resource) The identifier of the target account, organizational unit, or the root to associate with the specified configuration. @@ -132,4 +133,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_configuration_policy_association.example_account_association 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_finding_aggregator.html.markdown b/website/docs/cdktf/typescript/r/securityhub_finding_aggregator.html.markdown index c5b6d035d397..22cfeaee8c71 100644 --- a/website/docs/cdktf/typescript/r/securityhub_finding_aggregator.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_finding_aggregator.html.markdown @@ -140,6 +140,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `linkingMode` - (Required) Indicates whether to aggregate findings from all of the available Regions or from a specified list. The options are `ALL_REGIONS`, `ALL_REGIONS_EXCEPT_SPECIFIED`, `SPECIFIED_REGIONS` or `NO_REGIONS`. When `ALL_REGIONS` or `ALL_REGIONS_EXCEPT_SPECIFIED` are used, Security Hub will automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. - `specifiedRegions` - (Optional) List of regions to include or exclude (required if `linkingMode` is set to `ALL_REGIONS_EXCEPT_SPECIFIED` or `SPECIFIED_REGIONS`) @@ -181,4 +182,4 @@ Using `terraform import`, import an existing Security Hub finding aggregator usi % terraform import aws_securityhub_finding_aggregator.example arn:aws:securityhub:eu-west-1:123456789098:finding-aggregator/abcd1234-abcd-1234-1234-abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_insight.html.markdown b/website/docs/cdktf/typescript/r/securityhub_insight.html.markdown index 111398b81899..590556723f55 100644 --- a/website/docs/cdktf/typescript/r/securityhub_insight.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_insight.html.markdown @@ -221,8 +221,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `filters` - (Required) A configuration block including one or more (up to 10 distinct) attributes used to filter the findings included in the insight. The insight only includes findings that match criteria defined in the filters. See [filters](#filters) below for more details. * `groupByAttribute` - (Required) The attribute used to group the findings for the insight e.g., if an insight is grouped by `ResourceId`, then the insight produces a list of resource identifiers. * `name` - (Required) The name of the custom insight. @@ -420,4 +421,4 @@ Using `terraform import`, import Security Hub insights using the ARN. For exampl % terraform import aws_securityhub_insight.example arn:aws:securityhub:us-west-2:1234567890:insight/1234567890/custom/91299ed7-abd0-4e44-a858-d0b15e37141a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_invite_accepter.html.markdown b/website/docs/cdktf/typescript/r/securityhub_invite_accepter.html.markdown index 039b6e351d18..738cc03dc843 100644 --- a/website/docs/cdktf/typescript/r/securityhub_invite_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_invite_accepter.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `masterId` - (Required) The account ID of the master Security Hub account whose invitation you're accepting. ## Attribute Reference @@ -105,4 +106,4 @@ Using `terraform import`, import Security Hub invite acceptance using the accoun % terraform import aws_securityhub_invite_accepter.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_member.html.markdown b/website/docs/cdktf/typescript/r/securityhub_member.html.markdown index 5214905a015e..43656176875f 100644 --- a/website/docs/cdktf/typescript/r/securityhub_member.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_member.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accountId` - (Required) The ID of the member AWS account. * `email` - (Optional) The email of the member AWS account. * `invite` - (Optional) Boolean whether to invite the account to Security Hub as a member. Defaults to `false`. @@ -89,4 +90,4 @@ Using `terraform import`, import Security Hub members using their account ID. Fo % terraform import aws_securityhub_member.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_organization_admin_account.html.markdown b/website/docs/cdktf/typescript/r/securityhub_organization_admin_account.html.markdown index 59b33e032992..8b22aeb20d00 100644 --- a/website/docs/cdktf/typescript/r/securityhub_organization_admin_account.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_organization_admin_account.html.markdown @@ -62,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `adminAccountId` - (Required) The AWS account identifier of the account to designate as the Security Hub administrator account. ## Attribute Reference @@ -102,4 +103,4 @@ Using `terraform import`, import Security Hub Organization Admin Accounts using % terraform import aws_securityhub_organization_admin_account.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/securityhub_organization_configuration.html.markdown index b0cf9f18cc8b..07247d49c492 100644 --- a/website/docs/cdktf/typescript/r/securityhub_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_organization_configuration.html.markdown @@ -107,6 +107,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `autoEnable` - (Required) Whether to automatically enable Security Hub for new accounts in the organization. * `autoEnableStandards` - (Optional) Whether to automatically enable Security Hub default standards for new member accounts in the organization. By default, this parameter is equal to `DEFAULT`, and new member accounts are automatically enabled with default Security Hub standards. To opt out of enabling default standards for new member accounts, set this parameter equal to `NONE`. * `organizationConfiguration` - (Optional) Provides information about the way an organization is configured in Security Hub. @@ -161,4 +162,4 @@ Using `terraform import`, import an existing Security Hub enabled account using % terraform import aws_securityhub_organization_configuration.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_product_subscription.html.markdown b/website/docs/cdktf/typescript/r/securityhub_product_subscription.html.markdown index 414c5149059d..0fdc2f3d43b3 100644 --- a/website/docs/cdktf/typescript/r/securityhub_product_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_product_subscription.html.markdown @@ -35,7 +35,7 @@ class MyConvertedCode extends TerraformStack { dependsOn: [example], productArn: "arn:aws:securityhub:${" + - current.name + + current.region + "}:733251395267:product/alertlogic/althreatmanagement", }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `productArn` - (Required) The ARN of the product that generates findings that you want to import into Security Hub - see below. Amazon maintains a list of [Product integrations in AWS Security Hub](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-providers.html) that changes over time. Any of the products on the linked [Available AWS service integrations](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-internal-providers.html) or [Available third-party partner product integrations](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-partner-providers.html) can be configured using `aws_securityhub_product_subscription`. @@ -127,4 +128,4 @@ Using `terraform import`, import Security Hub product subscriptions using `produ % terraform import aws_securityhub_product_subscription.example arn:aws:securityhub:eu-west-1:733251395267:product/alertlogic/althreatmanagement,arn:aws:securityhub:eu-west-1:123456789012:product-subscription/alertlogic/althreatmanagement ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_standards_control.html.markdown b/website/docs/cdktf/typescript/r/securityhub_standards_control.html.markdown index 8ddc8905df61..2a22c3f28aaa 100644 --- a/website/docs/cdktf/typescript/r/securityhub_standards_control.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_standards_control.html.markdown @@ -62,21 +62,22 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `standardsControlArn` - (Required) The standards control ARN. See the AWS documentation for how to list existing controls using [`get-enabled-standards`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/get-enabled-standards.html) and [`describe-standards-controls`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/describe-standards-controls.html). -* `controlStatus` – (Required) The control status could be `ENABLED` or `DISABLED`. You have to specify `disabledReason` argument for `DISABLED` control status. -* `disabledReason` – (Optional) A description of the reason why you are disabling a security standard control. If you specify this attribute, `controlStatus` will be set to `DISABLED` automatically. +* `controlStatus` - (Required) The control status could be `ENABLED` or `DISABLED`. You have to specify `disabledReason` argument for `DISABLED` control status. +* `disabledReason` - (Optional) A description of the reason why you are disabling a security standard control. If you specify this attribute, `controlStatus` will be set to `DISABLED` automatically. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - The standard control ARN. -* `controlId` – The identifier of the security standard control. -* `controlStatusUpdatedAt` – The date and time that the status of the security standard control was most recently updated. -* `description` – The standard control longer description. Provides information about what the control is checking for. -* `relatedRequirements` – The list of requirements that are related to this control. -* `remediationUrl` – A link to remediation information for the control in the Security Hub user documentation. -* `severityRating` – The severity of findings generated from this security standard control. -* `title` – The standard control title. +* `controlId` - The identifier of the security standard control. +* `controlStatusUpdatedAt` - The date and time that the status of the security standard control was most recently updated. +* `description` - The standard control longer description. Provides information about what the control is checking for. +* `relatedRequirements` - The list of requirements that are related to this control. +* `remediationUrl` - A link to remediation information for the control in the Security Hub user documentation. +* `severityRating` - The severity of findings generated from this security standard control. +* `title` - The standard control title. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_standards_control_association.html.markdown b/website/docs/cdktf/typescript/r/securityhub_standards_control_association.html.markdown index 774f3e6ee8f7..7386cf9d1ecb 100644 --- a/website/docs/cdktf/typescript/r/securityhub_standards_control_association.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_standards_control_association.html.markdown @@ -125,10 +125,11 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `updatedReason` - (Optional) The reason for updating the control's enablement status in the standard. Required when `associationStatus` is `DISABLED`. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securityhub_standards_subscription.html.markdown b/website/docs/cdktf/typescript/r/securityhub_standards_subscription.html.markdown index c2217ee8e8c4..3ec8f533dcfe 100644 --- a/website/docs/cdktf/typescript/r/securityhub_standards_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/securityhub_standards_subscription.html.markdown @@ -39,7 +39,7 @@ class MyConvertedCode extends TerraformStack { dependsOn: [example], standardsArn: "arn:aws:securityhub:${" + - current.name + + current.region + "}::standards/pci-dss/v/3.2.1", }); } @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `standardsArn` - (Required) The ARN of a standard - see below. Currently available standards (remember to replace `${var.partition}` and `${var.region}` as appropriate): @@ -63,7 +64,9 @@ Currently available standards (remember to replace `${var.partition}` and `${var | CIS AWS Foundations Benchmark v1.4.0 | `arn:${var.partition}:securityhub:${var.region}::standards/cis-aws-foundations-benchmark/v/1.4.0` | | CIS AWS Foundations Benchmark v3.0.0 | `arn:${var.partition}:securityhub:${var.region}::standards/cis-aws-foundations-benchmark/v/3.0.0` | | NIST SP 800-53 Rev. 5 | `arn:${var.partition}:securityhub:${var.region}::standards/nist-800-53/v/5.0.0` | -| PCI DSS | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/3.2.1` | +| NIST SP 800-171 Rev. 2 | `arn:${var.partition}:securityhub:${var.region}::standards/nist-800-171/v/2.0.0` | +| PCI DSS v3.2.1 | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/3.2.1` | +| PCI DSS v4.0.1 | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/4.0.1` | ## Attribute Reference @@ -162,4 +165,4 @@ Using `terraform import`, import Security Hub standards subscriptions using the % terraform import aws_securityhub_standards_subscription.nist_800_53_rev_5 arn:aws:securityhub:eu-west-1:123456789012:subscription/nist-800-53/v/5.0.0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securitylake_aws_log_source.html.markdown b/website/docs/cdktf/typescript/r/securitylake_aws_log_source.html.markdown index 1c94f55de038..928d410ed240 100644 --- a/website/docs/cdktf/typescript/r/securitylake_aws_log_source.html.markdown +++ b/website/docs/cdktf/typescript/r/securitylake_aws_log_source.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source` - (Required) Specify the natively-supported AWS service to add as a source in Security Lake. `source` supports the following: @@ -99,4 +100,4 @@ Using `terraform import`, import AWS log sources using the source name. For exam % terraform import aws_securitylake_aws_log_source.example ROUTE53 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securitylake_custom_log_source.html.markdown b/website/docs/cdktf/typescript/r/securitylake_custom_log_source.html.markdown index d0c86258a6ae..ee55c7a13ded 100644 --- a/website/docs/cdktf/typescript/r/securitylake_custom_log_source.html.markdown +++ b/website/docs/cdktf/typescript/r/securitylake_custom_log_source.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configuration` - (Required) The configuration for the third-party custom source. * `crawlerConfiguration` - (Required) The configuration for the Glue Crawler for the third-party custom source. * `roleArn` - (Required) The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role to be used by the AWS Glue crawler. @@ -116,4 +117,4 @@ Using `terraform import`, import Custom log sources using the source name. For e % terraform import aws_securitylake_custom_log_source.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securitylake_data_lake.html.markdown b/website/docs/cdktf/typescript/r/securitylake_data_lake.html.markdown index e2271a334c17..4a35c08f9cdc 100644 --- a/website/docs/cdktf/typescript/r/securitylake_data_lake.html.markdown +++ b/website/docs/cdktf/typescript/r/securitylake_data_lake.html.markdown @@ -101,6 +101,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `metaStoreManagerRoleArn` - (Required) The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources. * `configuration` - (Required) Specify the Region or Regions that will contribute data to the rollup region. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -153,6 +154,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_securitylake_data_lake.example + identity = { + "arn" = "arn:aws:securitylake:us-east-1:123456789012:data-lake/default" + } +} + +resource "aws_securitylake_data_lake" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Security Lake data lake. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub standards subscriptions using the standards subscription ARN. For example: ```typescript @@ -183,4 +205,4 @@ Using `terraform import`, import Security Hub standards subscriptions using the % terraform import aws_securitylake_data_lake.example arn:aws:securitylake:eu-west-1:123456789012:data-lake/default ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securitylake_subscriber.html.markdown b/website/docs/cdktf/typescript/r/securitylake_subscriber.html.markdown index 34fa2ea0f25e..2500764af5f4 100644 --- a/website/docs/cdktf/typescript/r/securitylake_subscriber.html.markdown +++ b/website/docs/cdktf/typescript/r/securitylake_subscriber.html.markdown @@ -16,6 +16,8 @@ Terraform resource for managing an AWS Security Lake Subscriber. ## Example Usage +### Basic Usage + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -54,10 +56,59 @@ class MyConvertedCode extends TerraformStack { ``` +### Multiple Log Sources + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SecuritylakeSubscriber } from "./.gen/providers/aws/securitylake-subscriber"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new SecuritylakeSubscriber(this, "example", { + accessType: "S3", + dependsOn: [awsSecuritylakeDataLakeExample], + source: [ + { + awsLogSourceResource: [ + { + sourceName: "SH_FINDINGS", + sourceVersion: "2.0", + }, + ], + }, + { + awsLogSourceResource: [ + { + sourceName: "ROUTE53", + sourceVersion: "2.0", + }, + ], + }, + ], + subscriberIdentity: [ + { + externalId: "example", + principal: "1234567890", + }, + ], + subscriberName: "example-name", + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessType` - (Optional) The Amazon S3 or Lake Formation access type. * `source` - (Required) The supported AWS services from which logs and events are collected. Security Lake supports log and event collection for natively supported AWS services. See [`source` Blocks](#source-blocks) below. * `subscriberIdentity` - (Required) The AWS identity used to access your data. See [`subscriberIdentity` Block](#subscriber_identity-block) below. @@ -83,8 +134,8 @@ The `subscriberIdentity` block supports the following arguments: The `awsLogSourceResource` block supports the following arguments: -* `sourceName` - (Required) Provides data expiration details of Amazon Security Lake object. -* `sourceVersion` - (Optional) Provides data storage transition details of Amazon Security Lake object. +* `sourceName` - (Required) The name for a AWS source. This must be a Regionally unique value. Valid values: `ROUTE53`, `VPC_FLOW`, `SH_FINDINGS`, `CLOUD_TRAIL_MGMT`, `LAMBDA_EXECUTION`, `S3_DATA`, `EKS_AUDIT` and `WAF`. +* `sourceVersion` - (Optional) The version for a AWS source. This must be a Regionally unique value. ### `customLogSourceResource` Block @@ -169,4 +220,4 @@ Using `terraform import`, import Security Lake subscriber using the subscriber I % terraform import aws_securitylake_subscriber.example 9f3bfe79-d543-474d-a93c-f3846805d208 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/securitylake_subscriber_notification.html.markdown b/website/docs/cdktf/typescript/r/securitylake_subscriber_notification.html.markdown index f1c3568b192d..70cf9d8013fd 100644 --- a/website/docs/cdktf/typescript/r/securitylake_subscriber_notification.html.markdown +++ b/website/docs/cdktf/typescript/r/securitylake_subscriber_notification.html.markdown @@ -77,6 +77,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subscriberId` - (Required) The subscriber ID for the notification subscription. * `configuration` - (Required) Specify the configuration using which you want to create the subscriber notification.. @@ -112,4 +113,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/serverlessapplicationrepository_cloudformation_stack.html.markdown b/website/docs/cdktf/typescript/r/serverlessapplicationrepository_cloudformation_stack.html.markdown index 22f89ee32185..97e67bc66b7a 100644 --- a/website/docs/cdktf/typescript/r/serverlessapplicationrepository_cloudformation_stack.html.markdown +++ b/website/docs/cdktf/typescript/r/serverlessapplicationrepository_cloudformation_stack.html.markdown @@ -43,7 +43,7 @@ class MyConvertedCode extends TerraformStack { parameters: { endpoint: "secretsmanager.${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}.${" + current.dnsSuffix + "}", @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the stack to create. The resource deployed in AWS will be prefixed with `serverlessrepo-` * `applicationId` - (Required) The ARN of the application from the Serverless Application Repository. * `capabilities` - (Required) A list of capabilities. Valid values are `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, `CAPABILITY_RESOURCE_POLICY`, or `CAPABILITY_AUTO_EXPAND` @@ -107,4 +108,4 @@ Using `terraform import`, import Serverless Application Repository Stack using t % terraform import aws_serverlessapplicationrepository_cloudformation_stack.example serverlessrepo-postgres-rotator ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/service_discovery_http_namespace.html.markdown b/website/docs/cdktf/typescript/r/service_discovery_http_namespace.html.markdown index 7418d4ffe019..f2bc2e76e56f 100644 --- a/website/docs/cdktf/typescript/r/service_discovery_http_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/service_discovery_http_namespace.html.markdown @@ -37,6 +37,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the http namespace. * `description` - (Optional) The description that you specify for the namespace when you create it. * `tags` - (Optional) A map of tags to assign to the namespace. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -82,4 +83,4 @@ Using `terraform import`, import Service Discovery HTTP Namespace using the name % terraform import aws_service_discovery_http_namespace.example ns-1234567890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/service_discovery_instance.html.markdown b/website/docs/cdktf/typescript/r/service_discovery_instance.html.markdown index b774de7ff6e5..ef4cb6e8118b 100644 --- a/website/docs/cdktf/typescript/r/service_discovery_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/service_discovery_instance.html.markdown @@ -135,6 +135,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceId` - (Required, ForceNew) The ID of the service instance. * `serviceId` - (Required, ForceNew) The ID of the service that you want to use to create the instance. * `attributes` - (Required) A map contains the attributes of the instance. Check the [doc](https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html#API_RegisterInstance_RequestSyntax) for the supported attributes and syntax. @@ -177,4 +178,4 @@ Using `terraform import`, import Service Discovery Instance using the service ID % terraform import aws_service_discovery_instance.example 0123456789/i-0123 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/service_discovery_private_dns_namespace.html.markdown b/website/docs/cdktf/typescript/r/service_discovery_private_dns_namespace.html.markdown index 256f20b03361..3594cb1609d4 100644 --- a/website/docs/cdktf/typescript/r/service_discovery_private_dns_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/service_discovery_private_dns_namespace.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the namespace. * `vpc` - (Required) The ID of VPC that you want to associate the namespace with. * `description` - (Optional) The description that you specify for the namespace when you create it. @@ -93,4 +94,4 @@ Using `terraform import`, import Service Discovery Private DNS Namespace using t % terraform import aws_service_discovery_private_dns_namespace.example 0123456789:vpc-123345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/service_discovery_public_dns_namespace.html.markdown b/website/docs/cdktf/typescript/r/service_discovery_public_dns_namespace.html.markdown index 3bdb54d7fac5..470b8536e978 100644 --- a/website/docs/cdktf/typescript/r/service_discovery_public_dns_namespace.html.markdown +++ b/website/docs/cdktf/typescript/r/service_discovery_public_dns_namespace.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the namespace. * `description` - (Optional) The description that you specify for the namespace when you create it. * `tags` - (Optional) A map of tags to assign to the namespace. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -84,4 +85,4 @@ Using `terraform import`, import Service Discovery Public DNS Namespace using th % terraform import aws_service_discovery_public_dns_namespace.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown b/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown index c36a40923687..60507434c6b6 100644 --- a/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown +++ b/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown @@ -119,6 +119,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required, Forces new resource) The name of the service. * `description` - (Optional) The description of the service. * `dnsConfig` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dnsConfig` Block](#dns_config-block) for details. @@ -156,7 +157,7 @@ The `healthCheckConfig` configuration block supports the following arguments: The `healthCheckCustomConfig` configuration block supports the following arguments: -* `failureThreshold` - (Optional, Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. +* `failureThreshold` - (Optional, **Deprecated** Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Value is always set to 1. ## Attribute Reference @@ -164,7 +165,6 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the service. * `arn` - The ARN of the service. -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -198,4 +198,4 @@ Using `terraform import`, import Service Discovery Service using the service ID. % terraform import aws_service_discovery_service.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_budget_resource_association.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_budget_resource_association.html.markdown index 0b72e1138878..785986e9d565 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_budget_resource_association.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_budget_resource_association.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `budgetName` - (Required) Budget name. * `resourceId` - (Required) Resource identifier. @@ -92,4 +93,4 @@ Using `terraform import`, import `aws_servicecatalog_budget_resource_association % terraform import aws_servicecatalog_budget_resource_association.example budget-pjtvyakdlyo3m:prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_constraint.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_constraint.html.markdown index d14ca2af8ed3..7e9884fc3a8c 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_constraint.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_constraint.html.markdown @@ -57,6 +57,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `description` - (Optional) Description of the constraint. @@ -142,4 +143,4 @@ Using `terraform import`, import `aws_servicecatalog_constraint` using the const % terraform import aws_servicecatalog_constraint.example cons-nmdkb6cgxfcrs ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_portfolio.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_portfolio.html.markdown index 58fe9400bddd..80bc4fabd515 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_portfolio.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_portfolio.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the portfolio. * `description` - (Required) Description of the portfolio * `providerName` - (Required) Name of the person or organization who owns the portfolio. @@ -93,4 +94,4 @@ Using `terraform import`, import Service Catalog Portfolios using the Service Ca % terraform import aws_servicecatalog_portfolio.testfolio port-12344321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_portfolio_share.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_portfolio_share.html.markdown index 825f7e410d05..f73762248bb3 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_portfolio_share.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_portfolio_share.html.markdown @@ -56,6 +56,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `sharePrincipals` - (Optional) Enables or disables Principal sharing when creating the portfolio share. If this flag is not provided, principal sharing is disabled. * `shareTagOptions` - (Optional) Whether to enable sharing of `aws_servicecatalog_tag_option` resources when creating the portfolio share. @@ -108,4 +109,4 @@ Using `terraform import`, import `aws_servicecatalog_portfolio_share` using the % terraform import aws_servicecatalog_portfolio_share.example port-12344321:ACCOUNT:123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_principal_portfolio_association.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_principal_portfolio_association.html.markdown index 23de99525afd..fd69fb02392e 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_principal_portfolio_association.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_principal_portfolio_association.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `principalType` - (Optional) Principal type. Setting this argument empty (e.g., `principal_type = ""`) will result in an error. Valid values are `IAM` and `IAM_PATTERN`. Default is `IAM`. @@ -95,4 +96,4 @@ Using `terraform import`, import `aws_servicecatalog_principal_portfolio_associa % terraform import aws_servicecatalog_principal_portfolio_association.example en,arn:aws:iam::123456789012:user/Eleanor,port-68656c6c6f,IAM ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown index 754cf8534c65..20d697d6e5d9 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `description` - (Optional) Description of the product. * `distributor` - (Optional) Distributor (i.e., vendor) of the product. @@ -131,4 +132,4 @@ Using `terraform import`, import `aws_servicecatalog_product` using the product % terraform import aws_servicecatalog_product.example prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_product_portfolio_association.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_product_portfolio_association.html.markdown index ff4f409c243b..292cc8c641ed 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_product_portfolio_association.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_product_portfolio_association.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `sourcePortfolioId` - (Optional) Identifier of the source portfolio. @@ -93,4 +94,4 @@ Using `terraform import`, import `aws_servicecatalog_product_portfolio_associati % terraform import aws_servicecatalog_product_portfolio_association.example en:port-68656c6c6f:prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown index eae9f68d505d..82394e065cef 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown @@ -63,6 +63,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). Default value is `en`. * `ignoreErrors` - (Optional) _Only applies to deleting._ If set to `true`, AWS Service Catalog stops managing the specified provisioned product even if it cannot delete the underlying resources. The default value is `false`. * `notificationArns` - (Optional) Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related events. @@ -168,4 +169,4 @@ Using `terraform import`, import `aws_servicecatalog_provisioned_product` using % terraform import aws_servicecatalog_provisioned_product.example pp-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_provisioning_artifact.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_provisioning_artifact.html.markdown index d936bb516b0c..3dd4d73ca7bf 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_provisioning_artifact.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_provisioning_artifact.html.markdown @@ -60,6 +60,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values: `en` (English), `jp` (Japanese), `zh` (Chinese). The default value is `en`. * `active` - (Optional) Whether the product version is active. Inactive provisioning artifacts are invisible to end users. End users cannot launch or update a provisioned product from an inactive provisioning artifact. Default is `true`. * `description` - (Optional) Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. @@ -118,4 +119,4 @@ Using `terraform import`, import `aws_servicecatalog_provisioning_artifact` usin % terraform import aws_servicecatalog_provisioning_artifact.example pa-ij2b6lusy6dec:prod-el3an0rma3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_service_action.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_service_action.html.markdown index 2e4a3173a2c9..b5fe73122c8a 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_service_action.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_service_action.html.markdown @@ -53,6 +53,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptLanguage` - (Optional) Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`. * `description` - (Optional) Self-service action description. @@ -113,4 +114,4 @@ Using `terraform import`, import `aws_servicecatalog_service_action` using the s % terraform import aws_servicecatalog_service_action.example act-f1w12eperfslh ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_tag_option.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_tag_option.html.markdown index fe1d3c3f6232..a3d4fe30ce90 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_tag_option.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_tag_option.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active` - (Optional) Whether tag option is active. Default is `true`. ## Attribute Reference @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_servicecatalog_tag_option` using the tag o % terraform import aws_servicecatalog_tag_option.example tag-pjtvagohlyo3m ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_tag_option_resource_association.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_tag_option_resource_association.html.markdown index 0daf24cc5797..2e80613ccd8f 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_tag_option_resource_association.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_tag_option_resource_association.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceId` - (Required) Resource identifier. * `tagOptionId` - (Required) Tag Option identifier. @@ -96,4 +97,4 @@ Using `terraform import`, import `aws_servicecatalog_tag_option_resource_associa % terraform import aws_servicecatalog_tag_option_resource_association.example tag-pjtvyakdlyo3m:prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalogappregistry_application.html.markdown b/website/docs/cdktf/typescript/r/servicecatalogappregistry_application.html.markdown index 85802f6778e2..b4b36b50d9d3 100644 --- a/website/docs/cdktf/typescript/r/servicecatalogappregistry_application.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalogappregistry_application.html.markdown @@ -72,6 +72,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the application. * `tags` - (Optional) A map of tags assigned to the Application. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -116,4 +117,4 @@ Using `terraform import`, import AWS Service Catalog AppRegistry Application usi % terraform import aws_servicecatalogappregistry_application.example application-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group.html.markdown b/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group.html.markdown index d90248ea5388..13cc432a149e 100644 --- a/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group.html.markdown @@ -18,20 +18,22 @@ Terraform resource for managing an AWS Service Catalog AppRegistry Attribute Gro ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { Fn, TerraformStack } from "cdktf"; +import { Fn, Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { ServicecatalogappregistryAttributeGroup } from "./.gen/providers/aws/"; +import { ServicecatalogappregistryAttributeGroup } from "./.gen/providers/aws/servicecatalogappregistry-attribute-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new ServicecatalogappregistryAttributeGroup(this, "example", { - attributes: Fn.jsonencode({ - app: "exampleapp", - group: "examplegroup", - }), + attributes: Token.asString( + Fn.jsonencode({ + app: "exampleapp", + group: "examplegroup", + }) + ), description: "example description", name: "example", }); @@ -49,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Attribute Group. * `tags` - (Optional) A map of tags assigned to the Attribute Group. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -72,7 +75,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { ServicecatalogappregistryAttributeGroup } from "./.gen/providers/aws/"; +import { ServicecatalogappregistryAttributeGroup } from "./.gen/providers/aws/servicecatalogappregistry-attribute-group"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -92,4 +95,4 @@ Using `terraform import`, import Service Catalog AppRegistry Attribute Group usi % terraform import aws_servicecatalogappregistry_attribute_group.example 1234567890abcfedhijk09876s ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group_association.html.markdown b/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group_association.html.markdown index 07988fcdd4cf..0acf327b8492 100644 --- a/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group_association.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalogappregistry_attribute_group_association.html.markdown @@ -69,8 +69,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationId` - (Required) ID of the application. * `attributeGroupId` - (Required) ID of the attribute group to associate with the application. @@ -110,4 +111,4 @@ Using `terraform import`, import Service Catalog AppRegistry Attribute Group Ass % terraform import aws_servicecatalogappregistry_attribute_group_association.example 12456778723424sdffsdfsdq34,12234t3564dsfsdf34asff4ww3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicequotas_service_quota.html.markdown b/website/docs/cdktf/typescript/r/servicequotas_service_quota.html.markdown index 19a1483eac82..e3459b5552f8 100644 --- a/website/docs/cdktf/typescript/r/servicequotas_service_quota.html.markdown +++ b/website/docs/cdktf/typescript/r/servicequotas_service_quota.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `quotaCode` - (Required) Code of the service quota to track. For example: `L-F678F1CE`. Available values can be found with the [AWS CLI service-quotas list-service-quotas command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-service-quotas.html). * `serviceCode` - (Required) Code of the service to track. For example: `vpc`. Available values can be found with the [AWS CLI service-quotas list-services command](https://docs.aws.amazon.com/cli/latest/reference/service-quotas/list-services.html). * `value` - (Required) Float specifying the desired value for the service quota. If the desired value is higher than the current value, a quota increase request is submitted. When a known request is submitted and pending, the value reflects the desired value of the pending request. @@ -102,4 +103,4 @@ Using `terraform import`, import `aws_servicequotas_service_quota` using the ser % terraform import aws_servicequotas_service_quota.example vpc/L-F678F1CE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicequotas_template.html.markdown b/website/docs/cdktf/typescript/r/servicequotas_template.html.markdown index e1271a83c21c..a02e63e29ecf 100644 --- a/website/docs/cdktf/typescript/r/servicequotas_template.html.markdown +++ b/website/docs/cdktf/typescript/r/servicequotas_template.html.markdown @@ -6,6 +6,7 @@ description: |- Terraform resource for managing an AWS Service Quotas Template. --- + # Resource: aws_servicequotas_template @@ -30,8 +31,8 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new ServicequotasTemplate(this, "example", { + awsRegion: "us-east-1", quotaCode: "L-2ACBD22F", - region: "us-east-1", serviceCode: "lambda", value: Token.asNumber("80"), }); @@ -42,9 +43,10 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `region` - (Required) AWS Region to which the template applies. +* `awsRegion` - (Optional) AWS Region to which the template applies. +* `region` - (Optional, **Deprecated**) AWS Region to which the template applies. Use `awsRegion` instead. * `quotaCode` - (Required) Quota identifier. To find the quota code for a specific quota, use the [aws_servicequotas_service_quota](../d/servicequotas_service_quota.html.markdown) data source. * `serviceCode` - (Required) Service identifier. To find the service code value for an AWS service, use the [aws_servicequotas_service](../d/servicequotas_service.html.markdown) data source. * `value` - (Required) The new, increased value for the quota. @@ -91,4 +93,4 @@ Using `terraform import`, import Service Quotas Template using the `id`. For exa % terraform import aws_servicequotas_template.example us-east-1,L-2ACBD22F,lambda ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicequotas_template_association.html.markdown b/website/docs/cdktf/typescript/r/servicequotas_template_association.html.markdown index d8ea3080759b..7a2a813672d7 100644 --- a/website/docs/cdktf/typescript/r/servicequotas_template_association.html.markdown +++ b/website/docs/cdktf/typescript/r/servicequotas_template_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `skipDestroy` - (Optional) Skip disassociating the quota increase template upon destruction. This will remove the resource from Terraform state, but leave the remote association in place. ## Attribute Reference @@ -80,4 +81,4 @@ Using `terraform import`, import Service Quotas Template Association using the ` % terraform import aws_servicequotas_template_association.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_active_receipt_rule_set.html.markdown b/website/docs/cdktf/typescript/r/ses_active_receipt_rule_set.html.markdown index fb7d38f39ee9..49c0c1258ac0 100644 --- a/website/docs/cdktf/typescript/r/ses_active_receipt_rule_set.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_active_receipt_rule_set.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ruleSetName` - (Required) The name of the rule set ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import active SES receipt rule sets using the rule set % terraform import aws_ses_active_receipt_rule_set.my_rule_set my_rule_set_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_configuration_set.html.markdown b/website/docs/cdktf/typescript/r/ses_configuration_set.html.markdown index cc136657e441..e428cdbbc2a1 100644 --- a/website/docs/cdktf/typescript/r/ses_configuration_set.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_configuration_set.html.markdown @@ -94,6 +94,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deliveryOptions` - (Optional) Whether messages that use the configuration set are required to use TLS. See below. * `reputationMetricsEnabled` - (Optional) Whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch. The default value is `false`. * `sendingEnabled` - (Optional) Whether email sending is enabled or disabled for the configuration set. The default value is `true`. @@ -147,4 +148,4 @@ Using `terraform import`, import SES Configuration Sets using their `name`. For % terraform import aws_ses_configuration_set.test some-configuration-set-test ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_domain_dkim.html.markdown b/website/docs/cdktf/typescript/r/ses_domain_dkim.html.markdown index f86ff5786d60..ad3cc2f2cca7 100644 --- a/website/docs/cdktf/typescript/r/ses_domain_dkim.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_domain_dkim.html.markdown @@ -18,6 +18,7 @@ Domain ownership needs to be confirmed first using [ses_domain_identity Resource This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) Verified domain name to generate DKIM tokens for. ## Attribute Reference @@ -114,4 +115,4 @@ Using `terraform import`, import DKIM tokens using the `domain` attribute. For e % terraform import aws_ses_domain_dkim.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_domain_identity.html.markdown b/website/docs/cdktf/typescript/r/ses_domain_identity.html.markdown index 551c811759c3..beb6b417a7c5 100644 --- a/website/docs/cdktf/typescript/r/ses_domain_identity.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_domain_identity.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The domain name to assign to SES ## Attribute Reference @@ -107,4 +108,4 @@ Using `terraform import`, import SES domain identities using the domain name. Fo % terraform import aws_ses_domain_identity.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_domain_identity_verification.html.markdown b/website/docs/cdktf/typescript/r/ses_domain_identity_verification.html.markdown index 0ffb2314b1df..9e07485832b0 100644 --- a/website/docs/cdktf/typescript/r/ses_domain_identity_verification.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_domain_identity_verification.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domain` - (Required) The domain name of the SES domain identity to verify. ## Attribute Reference @@ -76,4 +77,4 @@ This resource exports the following attributes in addition to the arguments abov - `create` - (Default `45m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_domain_mail_from.html.markdown b/website/docs/cdktf/typescript/r/ses_domain_mail_from.html.markdown index 71ca596a0c7f..dfbaf8bcd855 100644 --- a/website/docs/cdktf/typescript/r/ses_domain_mail_from.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_domain_mail_from.html.markdown @@ -106,6 +106,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `behaviorOnMxFailure` - (Optional) The action that you want Amazon SES to take if it cannot successfully read the required MX record when you send an email. Defaults to `UseDefaultValue`. See the [SES API documentation](https://docs.aws.amazon.com/ses/latest/APIReference/API_SetIdentityMailFromDomain.html) for more information. ## Attribute Reference @@ -142,4 +143,4 @@ Using `terraform import`, import MAIL FROM domain using the `domain` attribute. % terraform import aws_ses_domain_mail_from.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_email_identity.html.markdown b/website/docs/cdktf/typescript/r/ses_email_identity.html.markdown index 0e050588029b..d02e4d634fdf 100644 --- a/website/docs/cdktf/typescript/r/ses_email_identity.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_email_identity.html.markdown @@ -16,6 +16,7 @@ Provides an SES email identity resource This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `email` - (Required) The email address to assign to SES. ## Attribute Reference @@ -78,4 +79,4 @@ Using `terraform import`, import SES email identities using the email address. F % terraform import aws_ses_email_identity.example email@example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_event_destination.html.markdown b/website/docs/cdktf/typescript/r/ses_event_destination.html.markdown index 85fc8c9589ad..e1f3898b4b31 100644 --- a/website/docs/cdktf/typescript/r/ses_event_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_event_destination.html.markdown @@ -107,6 +107,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the event destination * `configurationSetName` - (Required) The name of the configuration set * `enabled` - (Optional) If true, the event destination will be enabled @@ -171,4 +172,4 @@ Using `terraform import`, import SES event destinations using `configurationSetN % terraform import aws_ses_event_destination.sns some-configuration-set-test/event-destination-sns ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_identity_notification_topic.html.markdown b/website/docs/cdktf/typescript/r/ses_identity_notification_topic.html.markdown index bbfe04f0249d..ddfe2b88bdbd 100644 --- a/website/docs/cdktf/typescript/r/ses_identity_notification_topic.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_identity_notification_topic.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `topicArn` - (Optional) The Amazon Resource Name (ARN) of the Amazon SNS topic. Can be set to `""` (an empty string) to disable publishing. * `notificationType` - (Required) The type of notifications that will be published to the specified Amazon SNS topic. Valid Values: `Bounce`, `Complaint` or `Delivery`. * `identity` - (Required) The identity for which the Amazon SNS topic will be set. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). @@ -82,4 +83,4 @@ Using `terraform import`, import Identity Notification Topics using the ID of th % terraform import aws_ses_identity_notification_topic.test 'example.com|Bounce' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_identity_policy.html.markdown b/website/docs/cdktf/typescript/r/ses_identity_policy.html.markdown index 56a872d934e0..3a970aa58b90 100644 --- a/website/docs/cdktf/typescript/r/ses_identity_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_identity_policy.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `identity` - (Required) Name or Amazon Resource Name (ARN) of the SES Identity. * `name` - (Required) Name of the policy. * `policy` - (Required) JSON string of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -111,4 +112,4 @@ Using `terraform import`, import SES Identity Policies using the identity and po % terraform import aws_ses_identity_policy.example 'example.com|example' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_receipt_filter.html.markdown b/website/docs/cdktf/typescript/r/ses_receipt_filter.html.markdown index 1916da35e658..d8544292af55 100644 --- a/website/docs/cdktf/typescript/r/ses_receipt_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_receipt_filter.html.markdown @@ -40,6 +40,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the filter * `cidr` - (Required) The IP address or address range to filter, in CIDR notation * `policy` - (Required) Block or Allow @@ -79,4 +80,4 @@ Using `terraform import`, import SES Receipt Filter using their `name`. For exam % terraform import aws_ses_receipt_filter.test some-filter ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_receipt_rule.html.markdown b/website/docs/cdktf/typescript/r/ses_receipt_rule.html.markdown index 6f6023a193f8..4da9ae75358f 100644 --- a/website/docs/cdktf/typescript/r/ses_receipt_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_receipt_rule.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the rule * `ruleSetName` - (Required) The name of the rule set * `after` - (Optional) The name of the rule to place this rule after @@ -158,4 +159,4 @@ Using `terraform import`, import SES receipt rules using the ruleset name and ru % terraform import aws_ses_receipt_rule.my_rule my_rule_set:my_rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_receipt_rule_set.html.markdown b/website/docs/cdktf/typescript/r/ses_receipt_rule_set.html.markdown index 8a4403239c0d..3b87c76043af 100644 --- a/website/docs/cdktf/typescript/r/ses_receipt_rule_set.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_receipt_rule_set.html.markdown @@ -38,6 +38,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ruleSetName` - (Required) Name of the rule set. ## Attribute Reference @@ -79,4 +80,4 @@ Using `terraform import`, import SES receipt rule sets using the rule set name. % terraform import aws_ses_receipt_rule_set.my_rule_set my_rule_set_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ses_template.html.markdown b/website/docs/cdktf/typescript/r/ses_template.html.markdown index 813e14f40c80..f6bde4884279 100644 --- a/website/docs/cdktf/typescript/r/ses_template.html.markdown +++ b/website/docs/cdktf/typescript/r/ses_template.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the template. Cannot exceed 64 characters. You will refer to this name when you send email. * `html` - (Optional) The HTML body of the email. Must be less than 500KB in size, including both the text and HTML parts. * `subject` - (Optional) The subject line of the email. @@ -81,4 +82,4 @@ Using `terraform import`, import SES templates using the template name. For exam % terraform import aws_ses_template.MyTemplate MyTemplate ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_account_suppression_attributes.html.markdown b/website/docs/cdktf/typescript/r/sesv2_account_suppression_attributes.html.markdown index 41db2e5ab239..d5cf5d29081f 100644 --- a/website/docs/cdktf/typescript/r/sesv2_account_suppression_attributes.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_account_suppression_attributes.html.markdown @@ -36,8 +36,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `suppressedReasons` - (Required) A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. Valid values: `COMPLAINT`, `BOUNCE`. ## Attribute Reference @@ -76,4 +77,4 @@ Using `terraform import`, import account-level suppression attributes using the % terraform import aws_sesv2_account_suppression_attributes.example 123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_account_vdm_attributes.html.markdown b/website/docs/cdktf/typescript/r/sesv2_account_vdm_attributes.html.markdown index 6a8ffad05209..70a9f2a6e60c 100644 --- a/website/docs/cdktf/typescript/r/sesv2_account_vdm_attributes.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_account_vdm_attributes.html.markdown @@ -50,6 +50,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dashboardAttributes` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. * `guardianAttributes` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. @@ -97,4 +98,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Account VDM Attributes % terraform import aws_sesv2_account_vdm_attributes.example ses-account-vdm-attributes ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown b/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown index bf167638ff95..05ff88b8d2b8 100644 --- a/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configurationSetName` - (Required) The name of the configuration set. * `deliveryOptions` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. See [`deliveryOptions` Block](#delivery_options-block) for details. * `reputationOptions` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. See [`reputationOptions` Block](#reputation_options-block) for details. @@ -154,4 +155,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Configuration Set using % terraform import aws_sesv2_configuration_set.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_configuration_set_event_destination.html.markdown b/website/docs/cdktf/typescript/r/sesv2_configuration_set_event_destination.html.markdown index 45ac055eb7eb..d5d5239c3199 100644 --- a/website/docs/cdktf/typescript/r/sesv2_configuration_set_event_destination.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_configuration_set_event_destination.html.markdown @@ -217,8 +217,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configurationSetName` - (Required) The name of the configuration set. * `eventDestination` - (Required) A name that identifies the event destination within the configuration set. * `eventDestinationName` - (Required) An object that defines the event destination. See [`eventDestination` Block](#event_destination-block) for details. @@ -230,7 +231,7 @@ The `eventDestination` configuration block supports the following arguments: * `matchingEventTypes` - (Required) - An array that specifies which events the Amazon SES API v2 should send to the destinations. Valid values: `SEND`, `REJECT`, `BOUNCE`, `COMPLAINT`, `DELIVERY`, `OPEN`, `CLICK`, `RENDERING_FAILURE`, `DELIVERY_DELAY`, `SUBSCRIPTION`. * `cloudWatchDestination` - (Optional) An object that defines an Amazon CloudWatch destination for email events. See [`cloudWatchDestination` Block](#cloud_watch_destination-block) for details. * `enabled` - (Optional) When the event destination is enabled, the specified event types are sent to the destinations. Default: `false`. -* `event_bridge_configuration` - (Optional) An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. See [`event_bridge_configuration` Block](#event_bridge_configuration-block) for details. +* `eventBridgeDestination` - (Optional) An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. See [`eventBridgeDestination` Block](#event_bridge_destination-block) for details. * `kinesisFirehoseDestination` - (Optional) An object that defines an Amazon Kinesis Data Firehose destination for email events. See [`kinesisFirehoseDestination` Block](#kinesis_firehose_destination-block) for details. * `pinpointDestination` - (Optional) An object that defines an Amazon Pinpoint project destination for email events. See [`pinpointDestination` Block](#pinpoint_destination-block) for details. * `snsDestination` - (Optional) An object that defines an Amazon SNS destination for email events. See [`snsDestination` Block](#sns_destination-block) for details. @@ -249,9 +250,9 @@ The `dimensionConfiguration` configuration block supports the following argument * `dimensionName` - (Required) The name of an Amazon CloudWatch dimension associated with an email sending metric. * `dimensionValueSource` - (Required) The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. Valid values: `MESSAGE_TAG`, `EMAIL_HEADER`, `LINK_TAG`. -### `event_bridge_configuration` Block +### `eventBridgeDestination` Block -The `event_bridge_configuration` configuration block supports the following arguments: +The `eventBridgeDestination` configuration block supports the following arguments: * `eventBusArn` - (Required) The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported. @@ -312,4 +313,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Configuration Set Event % terraform import aws_sesv2_configuration_set_event_destination.example example_configuration_set|example_event_destination ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_contact_list.html.markdown b/website/docs/cdktf/typescript/r/sesv2_contact_list.html.markdown index cfad306a3918..472d345d4b20 100644 --- a/website/docs/cdktf/typescript/r/sesv2_contact_list.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_contact_list.html.markdown @@ -75,6 +75,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of what the contact list is about. * `tags` - (Optional) Key-value map of resource tags for the contact list. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `topic` - (Optional) Configuration block(s) with topic for the contact list. Detailed below. @@ -89,6 +90,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of what the topic is about, which the contact will see. ## Attribute Reference @@ -127,4 +129,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Contact List using the % terraform import aws_sesv2_contact_list.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_assignment.html.markdown b/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_assignment.html.markdown index 6978b0894460..bf97c9fd70ea 100644 --- a/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_assignment.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_assignment.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ip` - (Required) Dedicated IP address. * `destinationPoolName` - (Required) Dedicated IP address. @@ -84,4 +85,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Dedicated IP Assignment % terraform import aws_sesv2_dedicated_ip_assignment.example "0.0.0.0,my-pool" ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_pool.html.markdown b/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_pool.html.markdown index ecb173763229..ea6734a9379a 100644 --- a/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_pool.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_dedicated_ip_pool.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `scalingMode` - (Optional) IP pool scaling mode. Valid values: `STANDARD`, `MANAGED`. If omitted, the AWS API will default to a standard pool. * `tags` - (Optional) A map of tags to assign to the pool. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -104,4 +105,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Dedicated IP Pool using % terraform import aws_sesv2_dedicated_ip_pool.example my-pool ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_email_identity.html.markdown b/website/docs/cdktf/typescript/r/sesv2_email_identity.html.markdown index 1425b9723afd..93863be49f20 100644 --- a/website/docs/cdktf/typescript/r/sesv2_email_identity.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_email_identity.html.markdown @@ -128,6 +128,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `configurationSetName` - (Optional) The configuration set to use by default when sending from this identity. Note that any configuration set defined in the email sending request takes precedence. * `dkimSigningAttributes` - (Optional) The configuration of the DKIM authentication settings for an email domain identity. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -155,6 +156,7 @@ This resource exports the following attributes in addition to the arguments abov * `tokens` - If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. * `identityType` - The email identity type. Valid values: `EMAIL_ADDRESS`, `DOMAIN`. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +* `verificationStatus` - The verification status of the identity. The status can be one of the following: `PENDING`, `SUCCESS`, `FAILED`, `TEMPORARY_FAILURE`, and `NOT_STARTED`. * `verifiedForSendingStatus` - Specifies whether or not the identity is verified. ## Import @@ -185,4 +187,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Email Identity using th % terraform import aws_sesv2_email_identity.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_email_identity_feedback_attributes.html.markdown b/website/docs/cdktf/typescript/r/sesv2_email_identity_feedback_attributes.html.markdown index 17ee3f2614a9..fe0233e6edae 100644 --- a/website/docs/cdktf/typescript/r/sesv2_email_identity_feedback_attributes.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_email_identity_feedback_attributes.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `emailIdentity` - (Required) The email identity. * `emailForwardingEnabled` - (Optional) Sets the feedback forwarding configuration for the identity. @@ -87,4 +88,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Feedback % terraform import aws_sesv2_email_identity_feedback_attributes.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_email_identity_mail_from_attributes.html.markdown b/website/docs/cdktf/typescript/r/sesv2_email_identity_mail_from_attributes.html.markdown index 42bc5663a580..9366d622dd1d 100644 --- a/website/docs/cdktf/typescript/r/sesv2_email_identity_mail_from_attributes.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_email_identity_mail_from_attributes.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `emailIdentity` - (Required) The verified email identity. * `behaviorOnMxFailure` - (Optional) The action to take if the required MX record isn't found when you send an email. Valid values: `USE_DEFAULT_VALUE`, `REJECT_MESSAGE`. * `mailFromDomain` - (Optional) The custom MAIL FROM domain that you want the verified identity to use. Required if `behaviorOnMxFailure` is `REJECT_MESSAGE`. @@ -89,4 +90,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Mail Fro % terraform import aws_sesv2_email_identity_mail_from_attributes.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_email_identity_policy.html.markdown b/website/docs/cdktf/typescript/r/sesv2_email_identity_policy.html.markdown index d5a3faa37ed6..85a90970c88a 100644 --- a/website/docs/cdktf/typescript/r/sesv2_email_identity_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_email_identity_policy.html.markdown @@ -52,8 +52,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `emailIdentity` - (Required) The email identity. * `policyName` - (Required) - The name of the policy. * `policy` - (Required) - The text of the policy in JSON format. @@ -64,7 +65,7 @@ This resource exports no additional attributes. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SESv2 (Simple Email V2) Email Identity Policy using the `id` (`email_identity|policy_name`). For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SESv2 (Simple Email V2) Email Identity Policy using the `emailIdentity` and `policyName` separated by `|`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -88,10 +89,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Policy using the `example_id_arg`. For example: +Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Policy using the `emailIdentity` and `policyName` separated by `|`. For example: ```console % terraform import aws_sesv2_email_identity_policy.example example_email_identity|example_policy_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sfn_activity.html.markdown b/website/docs/cdktf/typescript/r/sfn_activity.html.markdown index 7c24af9b756d..5f7f43450f1b 100644 --- a/website/docs/cdktf/typescript/r/sfn_activity.html.markdown +++ b/website/docs/cdktf/typescript/r/sfn_activity.html.markdown @@ -69,6 +69,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `encryptionConfiguration` - (Optional) Defines what encryption configuration is used to encrypt data in the Activity. For more information see the section [Data at rest encyption](https://docs.aws.amazon.com/step-functions/latest/dg/encryption-at-rest.html) in the AWS Step Functions User Guide. * `name` - (Required) The name of the activity to create. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -83,13 +84,29 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The Amazon Resource Name (ARN) that identifies the created activity. -* `name` - The name of the activity. -* `creationDate` - The date the activity was created. +* `id` - Amazon Resource Name (ARN) of the activity. +* `arn` - Amazon Resource Name (ARN) of the activity. +* `name` - Name of the activity. +* `creationDate` - Date the activity was created. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_activity.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:activity:bar" + } +} + +resource "aws_sfn_activity" "example" { + ### Configuration omitted for brevity ### +} +``` + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import activities using the `arn`. For example: ```typescript @@ -106,7 +123,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SfnActivity.generateConfigForImport( this, - "foo", + "example", "arn:aws:states:eu-west-1:123456789098:activity:bar" ); } @@ -117,7 +134,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import activities using the `arn`. For example: ```console -% terraform import aws_sfn_activity.foo arn:aws:states:eu-west-1:123456789098:activity:bar +% terraform import aws_sfn_activity.example arn:aws:states:eu-west-1:123456789098:activity:bar ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sfn_alias.html.markdown b/website/docs/cdktf/typescript/r/sfn_alias.html.markdown index 92d08766684c..79ea3700b1f9 100644 --- a/website/docs/cdktf/typescript/r/sfn_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/sfn_alias.html.markdown @@ -61,6 +61,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the alias you are creating. * `description` - (Optional) Description of the alias. * `routingConfiguration` - (Required) The StateMachine alias' route configuration settings. Fields documented below @@ -79,6 +80,21 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_alias.example + identity = { + "arn" = "arn:aws:states:us-east-1:123456789098:stateMachine:myStateMachine:foo" + } +} + +resource "aws_sfn_alias" "example" { + ### Configuration omitted for brevity ### +} +``` + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SFN (Step Functions) Alias using the `arn`. For example: ```typescript @@ -109,4 +125,4 @@ Using `terraform import`, import SFN (Step Functions) Alias using the `arn`. For % terraform import aws_sfn_alias.foo arn:aws:states:us-east-1:123456789098:stateMachine:myStateMachine:foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sfn_state_machine.html.markdown b/website/docs/cdktf/typescript/r/sfn_state_machine.html.markdown index 99ec5fa80044..f266c11c5f5c 100644 --- a/website/docs/cdktf/typescript/r/sfn_state_machine.html.markdown +++ b/website/docs/cdktf/typescript/r/sfn_state_machine.html.markdown @@ -170,6 +170,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `definition` - (Required) The [Amazon States Language](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) definition of the state machine. * `encryptionConfiguration` - (Optional) Defines what encryption configuration is used to encrypt data in the State Machine. For more information see [TBD] in the AWS Step Functions User Guide. * `loggingConfiguration` - (Optional) Defines what execution history events are logged and where they are logged. The `loggingConfiguration` parameter is valid when `type` is set to `STANDARD` or `EXPRESS`. Defaults to `OFF`. For more information see [Logging Express Workflows](https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html), [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) and [Logging Configuration](https://docs.aws.amazon.com/step-functions/latest/apireference/API_CreateStateMachine.html) in the AWS Step Functions User Guide. @@ -218,6 +219,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_state_machine.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:stateMachine:bar" + } +} + +resource "aws_sfn_state_machine" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the state machine. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import State Machines using the `arn`. For example: ```typescript @@ -248,4 +270,4 @@ Using `terraform import`, import State Machines using the `arn`. For example: % terraform import aws_sfn_state_machine.foo arn:aws:states:eu-west-1:123456789098:stateMachine:bar ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/shield_drt_access_log_bucket_association.html.markdown b/website/docs/cdktf/typescript/r/shield_drt_access_log_bucket_association.html.markdown index d07cd20bc76f..579248961f41 100644 --- a/website/docs/cdktf/typescript/r/shield_drt_access_log_bucket_association.html.markdown +++ b/website/docs/cdktf/typescript/r/shield_drt_access_log_bucket_association.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode extends TerraformStack { const test = new ShieldDrtAccessRoleArnAssociation(this, "test", { roleArn: "arn:aws:iam:${" + - current.name + + current.region + "}:${" + dataAwsCallerIdentityCurrent.accountId + "}:${" + @@ -102,4 +102,4 @@ Using `terraform import`, import Shield DRT access log bucket associations using % terraform import aws_shield_drt_access_log_bucket_association.example example-bucket ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/shield_protection.html.markdown b/website/docs/cdktf/typescript/r/shield_protection.html.markdown index 883dccc59fb0..e72a5965992b 100644 --- a/website/docs/cdktf/typescript/r/shield_protection.html.markdown +++ b/website/docs/cdktf/typescript/r/shield_protection.html.markdown @@ -45,7 +45,7 @@ class MyConvertedCode extends TerraformStack { name: "example", resourceArn: "arn:aws:ec2:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:eip-allocation/${" + @@ -110,4 +110,4 @@ Using `terraform import`, import Shield protection resources using specifying th % terraform import aws_shield_protection.example ff9592dc-22f3-4e88-afa1-7b29fde9669a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/shield_protection_group.html.markdown b/website/docs/cdktf/typescript/r/shield_protection_group.html.markdown index d56da1c0a178..f4794cc3dd3d 100644 --- a/website/docs/cdktf/typescript/r/shield_protection_group.html.markdown +++ b/website/docs/cdktf/typescript/r/shield_protection_group.html.markdown @@ -69,7 +69,7 @@ class MyConvertedCode extends TerraformStack { name: "example", resourceArn: "arn:aws:ec2:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:eip-allocation/${" + @@ -86,7 +86,7 @@ class MyConvertedCode extends TerraformStack { dependsOn: [awsShieldProtectionExample], members: [ "arn:aws:ec2:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:eip-allocation/${" + @@ -175,4 +175,4 @@ Using `terraform import`, import Shield protection group resources using their p % terraform import aws_shield_protection_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/shield_protection_health_check_association.html.markdown b/website/docs/cdktf/typescript/r/shield_protection_health_check_association.html.markdown index 9d811206f1ff..57fd8a9c4d38 100644 --- a/website/docs/cdktf/typescript/r/shield_protection_health_check_association.html.markdown +++ b/website/docs/cdktf/typescript/r/shield_protection_health_check_association.html.markdown @@ -73,7 +73,7 @@ class MyConvertedCode extends TerraformStack { "arn:${" + dataAwsPartitionCurrent.partition + "}:ec2:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:eip-allocation/${" + @@ -141,4 +141,4 @@ Using `terraform import`, import Shield protection health check association reso % terraform import aws_shield_protection_health_check_association.example ff9592dc-22f3-4e88-afa1-7b29fde9669a+arn:aws:route53:::healthcheck/3742b175-edb9-46bc-9359-f53e3b794b1b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/signer_signing_job.html.markdown b/website/docs/cdktf/typescript/r/signer_signing_job.html.markdown index 0a53f3240ead..6c45f683db4c 100644 --- a/website/docs/cdktf/typescript/r/signer_signing_job.html.markdown +++ b/website/docs/cdktf/typescript/r/signer_signing_job.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `profileName` - (Required) The name of the profile to initiate the signing operation. * `source` - (Required) The S3 bucket that contains the object to sign. See [Source](#source) below for details. * `destination` - (Required) The S3 bucket in which to save your signed object. See [Destination](#destination) below for details. @@ -139,4 +140,4 @@ Using `terraform import`, import Signer signing jobs using the `jobId`. For exam % terraform import aws_signer_signing_job.test_signer_signing_job 9ed7e5c3-b8d4-4da0-8459-44e0b068f7ee ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/signer_signing_profile.html.markdown b/website/docs/cdktf/typescript/r/signer_signing_profile.html.markdown index 34f84ef3c3dd..ee7e736a971c 100644 --- a/website/docs/cdktf/typescript/r/signer_signing_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/signer_signing_profile.html.markdown @@ -50,11 +50,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `platformId` - (Required, Forces new resource) The ID of the platform that is used by the target signing profile. * `name` - (Optional, Forces new resource) A unique signing profile name. By default generated by Terraform. Signing profile names are immutable and cannot be reused after canceled. * `namePrefix` - (Optional, Forces new resource) A signing profile name prefix. Terraform will generate a unique suffix. Conflicts with `name`. * `signatureValidityPeriod` - (Optional, Forces new resource) The validity period for a signing job. See [`signatureValidityPeriod` Block](#signature_validity_period-block) below for details. * `signingMaterial` - (Optional, Forces new resource) The AWS Certificate Manager certificate that will be used to sign code with the new signing profile. See [`signingMaterial` Block](#signing_material-block) below for details. +* `signing_parameters` - (Optional, Forces new resource) Map of key-value pairs for signing. These can include any information that you want to use during signing. * `tags` - (Optional) A list of tags associated with the signing profile. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `signatureValidityPeriod` Block @@ -123,4 +125,4 @@ Using `terraform import`, import Signer signing profiles using the `name`. For e % terraform import aws_signer_signing_profile.test_signer_signing_profile test_sp_DdW3Mk1foYL88fajut4mTVFGpuwfd4ACO6ANL0D1uIj7lrn8adK ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/signer_signing_profile_permission.html.markdown b/website/docs/cdktf/typescript/r/signer_signing_profile_permission.html.markdown index 4900e3721caa..6c40b51c7df6 100644 --- a/website/docs/cdktf/typescript/r/signer_signing_profile_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/signer_signing_profile_permission.html.markdown @@ -66,6 +66,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `profileName` - (Required) Name of the signing profile to add the cross-account permissions. * `action` - (Required) An AWS Signer action permitted as part of cross-account permissions. Valid values: `signer:StartSigningJob`, `signer:GetSigningProfile`, `signer:RevokeSignature`, or `signer:SignPayload`. * `principal` - (Required) The AWS principal to be granted a cross-account permission. @@ -109,4 +110,4 @@ Using `terraform import`, import Signer signing profile permission statements us % terraform import aws_signer_signing_profile_permission.test_signer_signing_profile_permission prod_profile_DdW3Mk1foYL88fajut4mTVFGpuwfd4ACO6ANL0D1uIj7lrn8adK/ProdAccountStartSigningJobStatementId ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/snapshot_create_volume_permission.html.markdown b/website/docs/cdktf/typescript/r/snapshot_create_volume_permission.html.markdown index 6ec31f8bbb76..97ff166c5b91 100644 --- a/website/docs/cdktf/typescript/r/snapshot_create_volume_permission.html.markdown +++ b/website/docs/cdktf/typescript/r/snapshot_create_volume_permission.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `snapshotId` - (Required) A snapshot ID * `accountId` - (Required) An AWS Account ID to add create volume permissions. The AWS Account cannot be the snapshot's owner @@ -57,4 +58,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - A combination of "`snapshotId`-`accountId`". - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sns_platform_application.html.markdown b/website/docs/cdktf/typescript/r/sns_platform_application.html.markdown index 4c8610b9b501..0886dcf7fb98 100644 --- a/website/docs/cdktf/typescript/r/sns_platform_application.html.markdown +++ b/website/docs/cdktf/typescript/r/sns_platform_application.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The friendly name for the SNS platform application * `platform` - (Required) The platform that the app is registered with. See [Platform][1] for supported platforms. * `platformCredential` - (Required) Application Platform credential. See [Credential][1] for type of credential required for platform. The value of this attribute when stored into the Terraform state is only a hash of the real value, so therefore it is not practical to use this as an attribute for other resources. @@ -153,4 +154,4 @@ Using `terraform import`, import SNS platform applications using the ARN. For ex % terraform import aws_sns_platform_application.gcm_application arn:aws:sns:us-west-2:123456789012:app/GCM/gcm_application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sns_sms_preferences.html.markdown b/website/docs/cdktf/typescript/r/sns_sms_preferences.html.markdown index 14ed8f82560e..27bc4d205509 100644 --- a/website/docs/cdktf/typescript/r/sns_sms_preferences.html.markdown +++ b/website/docs/cdktf/typescript/r/sns_sms_preferences.html.markdown @@ -36,6 +36,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `monthlySpendLimit` - (Optional) The maximum amount in USD that you are willing to spend each month to send SMS messages. * `deliveryStatusIamRoleArn` - (Optional) The ARN of the IAM role that allows Amazon SNS to write logs about SMS deliveries in CloudWatch Logs. * `deliveryStatusSuccessSamplingRate` - (Optional) The percentage of successful SMS deliveries for which Amazon SNS will write logs in CloudWatch Logs. The value must be between 0 and 100. @@ -51,4 +52,4 @@ This resource exports no additional attributes. You cannot import the SMS preferences. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sns_topic.html.markdown b/website/docs/cdktf/typescript/r/sns_topic.html.markdown index a123275bdd5e..17bdd5ad1830 100644 --- a/website/docs/cdktf/typescript/r/sns_topic.html.markdown +++ b/website/docs/cdktf/typescript/r/sns_topic.html.markdown @@ -113,6 +113,7 @@ The `_success_feedback_role_arn` and `_failure_feedback_role This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The name of the topic. Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long. For a FIFO (first-in-first-out) topic, the name must end with the `.fifo` suffix. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix` * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name` * `displayName` - (Optional) The display name for the topic @@ -127,7 +128,7 @@ This resource supports the following arguments: * `kmsMasterKeyId` - (Optional) The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see [Key Terms](https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms) * `signatureVersion` - (Optional) If `SignatureVersion` should be [1 (SHA1) or 2 (SHA256)](https://docs.aws.amazon.com/sns/latest/dg/sns-verify-signature-of-message.html). The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. * `tracingConfig` - (Optional) Tracing mode of an Amazon SNS topic. Valid values: `"PassThrough"`, `"Active"`. -* `fifo_throughput_scope` - (Optional) Enables higher throughput for FIFO topics by adjusting the scope of deduplication. This attribute has two possible values, `Topic` and `MessageGroup`. For more information, see the [related documentation](https://docs.aws.amazon.com/sns/latest/dg/fifo-high-throughput.html#enable-high-throughput-on-fifo-topic). +* `fifoThroughputScope` - (Optional) Enables higher throughput for FIFO topics by adjusting the scope of deduplication. This attribute has two possible values, `Topic` and `MessageGroup`. For more information, see the [related documentation](https://docs.aws.amazon.com/sns/latest/dg/fifo-high-throughput.html#enable-high-throughput-on-fifo-topic). * `fifoTopic` - (Optional) Boolean indicating whether or not to create a FIFO (first-in-first-out) topic. FIFO topics can't deliver messages to customer managed endpoints, such as email addresses, mobile apps, SMS, or HTTP(S) endpoints. These endpoint types aren't guaranteed to preserve strict message ordering. Default is `false`. * `archivePolicy` - (Optional) The message archive policy for FIFO topics. More details in the [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/message-archiving-and-replay-topic-owner.html). * `contentBasedDeduplication` - (Optional) Enables content-based deduplication for FIFO topics. For more information, see the [related documentation](https://docs.aws.amazon.com/sns/latest/dg/fifo-message-dedup.html) @@ -154,6 +155,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic" + } +} + +resource "aws_sns_topic" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topics using the topic `arn`. For example: ```typescript @@ -184,4 +206,4 @@ Using `terraform import`, import SNS Topics using the topic `arn`. For example: % terraform import aws_sns_topic.user_updates arn:aws:sns:us-west-2:123456789012:my-topic ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sns_topic_data_protection_policy.html.markdown b/website/docs/cdktf/typescript/r/sns_topic_data_protection_policy.html.markdown index dd155b2b7d77..686c44d484a9 100644 --- a/website/docs/cdktf/typescript/r/sns_topic_data_protection_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sns_topic_data_protection_policy.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The ARN of the SNS topic * `policy` - (Required) The fully-formed AWS policy as JSON. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -74,6 +75,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_data_protection_policy.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:example" + } +} + +resource "aws_sns_topic_data_protection_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Data Protection Topic Policy using the topic ARN. For example: ```typescript @@ -104,4 +126,4 @@ Using `terraform import`, import SNS Data Protection Topic Policy using the topi % terraform import aws_sns_topic_data_protection_policy.example arn:aws:sns:us-west-2:123456789012:example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sns_topic_policy.html.markdown b/website/docs/cdktf/typescript/r/sns_topic_policy.html.markdown index 83020f084540..1b1ab9826bdc 100644 --- a/website/docs/cdktf/typescript/r/sns_topic_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sns_topic_policy.html.markdown @@ -84,6 +84,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arn` - (Required) The ARN of the SNS topic * `policy` - (Required) The fully-formed AWS policy as JSON. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -95,6 +96,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_policy.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic" + } +} + +resource "aws_sns_topic_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topic Policy using the topic ARN. For example: ```typescript @@ -125,4 +147,4 @@ Using `terraform import`, import SNS Topic Policy using the topic ARN. For examp % terraform import aws_sns_topic_policy.user_updates arn:aws:sns:us-west-2:123456789012:my-topic ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sns_topic_subscription.html.markdown b/website/docs/cdktf/typescript/r/sns_topic_subscription.html.markdown index c15a35308e30..3b1364e5acf1 100644 --- a/website/docs/cdktf/typescript/r/sns_topic_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/sns_topic_subscription.html.markdown @@ -380,6 +380,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `confirmationTimeoutInMinutes` - (Optional) Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`. * `deliveryPolicy` - (Optional) JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details. * `endpointAutoConfirms` - (Optional) Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`. @@ -421,6 +422,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_subscription.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f" + } +} + +resource "aws_sns_topic_subscription" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic subscription. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topic Subscriptions using the subscription `arn`. For example: ```typescript @@ -451,4 +473,4 @@ Using `terraform import`, import SNS Topic Subscriptions using the subscription % terraform import aws_sns_topic_subscription.user_updates_sqs_target arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/spot_datafeed_subscription.html.markdown b/website/docs/cdktf/typescript/r/spot_datafeed_subscription.html.markdown index c1b22408a1be..bc74f9fd8280 100644 --- a/website/docs/cdktf/typescript/r/spot_datafeed_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/spot_datafeed_subscription.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) The Amazon S3 bucket in which to store the Spot instance data feed. * `prefix` - (Optional) Path of folder inside bucket to place spot pricing data. @@ -91,4 +92,4 @@ Using `terraform import`, import a Spot Datafeed Subscription using the word `sp % terraform import aws_spot_datafeed_subscription.mysubscription spot-datafeed-subscription ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/spot_fleet_request.html.markdown b/website/docs/cdktf/typescript/r/spot_fleet_request.html.markdown index 77cf41961d9d..a51e897de78c 100644 --- a/website/docs/cdktf/typescript/r/spot_fleet_request.html.markdown +++ b/website/docs/cdktf/typescript/r/spot_fleet_request.html.markdown @@ -291,6 +291,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `iamFleetRole` - (Required) Grants the Spot fleet permission to terminate Spot instances on your behalf when you cancel its Spot fleet request using CancelSpotFleetRequests or when the Spot fleet request expires, if you set @@ -538,4 +539,4 @@ Using `terraform import`, import Spot Fleet Requests using `id`. For example: % terraform import aws_spot_fleet_request.fleet sfr-005e9ec8-5546-4c31-b317-31a62325411e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/spot_instance_request.html.markdown b/website/docs/cdktf/typescript/r/spot_instance_request.html.markdown index 855493596846..19a3570d3c03 100644 --- a/website/docs/cdktf/typescript/r/spot_instance_request.html.markdown +++ b/website/docs/cdktf/typescript/r/spot_instance_request.html.markdown @@ -27,8 +27,8 @@ price availability or by a user. ~> **NOTE:** Because their behavior depends on the live status of the spot market, Spot Instance Requests have a unique lifecycle that makes them behave -differently than other Terraform resources. Most importantly: there is __no -guarantee__ that a Spot Instance exists to fulfill the request at any given +differently than other Terraform resources. Most importantly: there is **no +guarantee** that a Spot Instance exists to fulfill the request at any given point in time. See the [AWS Spot Instance documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html) for more information. @@ -67,6 +67,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + Spot Instance Requests support all the same arguments as [`aws_instance`](instance.html), with the addition of: * `spotPrice` - (Optional; Default: On-demand price) The maximum price to request on the spot market. @@ -77,9 +79,6 @@ Spot Instance Requests support all the same arguments as [`aws_instance`](instan the instance is terminated, the spot request will be closed. * `launchGroup` - (Optional) A launch group is a group of spot instances that launch together and terminate together. If left empty instances are launched and terminated individually. -* `blockDurationMinutes` - (Optional) The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360). - The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. - Note that you can't specify an Availability Zone group or a launch group if you specify a duration. * `instanceInterruptionBehavior` - (Optional) Indicates Spot instance behavior when it is interrupted. Valid values are `terminate`, `stop`, or `hibernate`. Default value is `terminate`. * `validUntil` - (Optional) The end date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. The default end date is 7 days from the current date. * `validFrom` - (Optional) The start date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately. @@ -90,9 +89,9 @@ Spot Instance Requests support all the same arguments as [`aws_instance`](instan This resource exports the following attributes in addition to the arguments above: * `id` - The Spot Instance Request ID. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -These attributes are exported, but they are expected to change over time and so -should only be used for informational purposes, not for resource dependencies: +The following attributes are exported, but they are expected to change over time and so should only be used for informational purposes, not for resource dependencies: * `spotBidStatus` - The current [bid status](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) @@ -109,7 +108,6 @@ should only be used for informational purposes, not for resource dependencies: used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC * `privateIp` - The private IP address assigned to the instance -* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -119,4 +117,4 @@ should only be used for informational purposes, not for resource dependencies: * `read` - (Default `15m`) * `delete` - (Default `20m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sqs_queue.html.markdown b/website/docs/cdktf/typescript/r/sqs_queue.html.markdown index 23bb0623c3be..32696081beef 100644 --- a/website/docs/cdktf/typescript/r/sqs_queue.html.markdown +++ b/website/docs/cdktf/typescript/r/sqs_queue.html.markdown @@ -200,6 +200,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `contentBasedDeduplication` - (Optional) Enables content-based deduplication for FIFO queues. For more information, see the [related documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing). * `deduplicationScope` - (Optional) Specifies whether message deduplication occurs at the message group or queue level. Valid values are `messageGroup` and `queue` (default). * `delaySeconds` - (Optional) Time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 seconds. @@ -207,7 +208,7 @@ This resource supports the following arguments: * `fifoThroughputLimit` - (Optional) Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are `perQueue` (default) and `perMessageGroupId`. * `kmsDataKeyReusePeriodSeconds` - (Optional) Length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). * `kmsMasterKeyId` - (Optional) ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). -* `maxMessageSize` - (Optional) Limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB). +* `maxMessageSize` - (Optional) Limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 1048576 bytes (1024 KiB). The default for this attribute is 262144 (256 KiB). * `messageRetentionSeconds` - (Optional) Number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days). * `name` - (Optional) Name of the queue. Queue names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 80 characters long. For a FIFO (first-in-first-out) queue, the name must end with the `.fifo` suffix. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -238,6 +239,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sqs_queue.example + identity = { + url = "https://queue.amazonaws.com/80398EXAMPLE/MyQueue" + } +} + +resource "aws_sqs_queue" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `url` (String) URL of the SQS queue. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SQS Queues using the queue `url`. For example: ```typescript @@ -254,7 +281,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SqsQueue.generateConfigForImport( this, - "publicQueue", + "example", "https://queue.amazonaws.com/80398EXAMPLE/MyQueue" ); } @@ -265,7 +292,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import SQS Queues using the queue `url`. For example: ```console -% terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue +% terraform import aws_sqs_queue.example https://queue.amazonaws.com/80398EXAMPLE/MyQueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sqs_queue_policy.html.markdown b/website/docs/cdktf/typescript/r/sqs_queue_policy.html.markdown index 2cbcc1fd5900..7ec663821ba6 100644 --- a/website/docs/cdktf/typescript/r/sqs_queue_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sqs_queue_policy.html.markdown @@ -130,6 +130,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policy` - (Required) JSON policy for the SQS queue. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Ensure that `Version = "2012-10-17"` is set in the policy or AWS may hang in creating the queue. * `queueUrl` - (Required) URL of the SQS Queue to which to attach the policy. @@ -169,4 +170,4 @@ Using `terraform import`, import SQS Queue Policies using the queue URL. For exa % terraform import aws_sqs_queue_policy.test https://queue.amazonaws.com/123456789012/myqueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sqs_queue_redrive_allow_policy.html.markdown b/website/docs/cdktf/typescript/r/sqs_queue_redrive_allow_policy.html.markdown index 85bbdd48c120..907233070b78 100644 --- a/website/docs/cdktf/typescript/r/sqs_queue_redrive_allow_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sqs_queue_redrive_allow_policy.html.markdown @@ -63,6 +63,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queueUrl` - (Required) The URL of the SQS Queue to which to attach the policy * `redriveAllowPolicy` - (Required) The JSON redrive allow policy for the SQS queue. Learn more in the [Amazon SQS dead-letter queues documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html). @@ -102,4 +103,4 @@ Using `terraform import`, import SQS Queue Redrive Allow Policies using the queu % terraform import aws_sqs_queue_redrive_allow_policy.test https://queue.amazonaws.com/123456789012/myqueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sqs_queue_redrive_policy.html.markdown b/website/docs/cdktf/typescript/r/sqs_queue_redrive_policy.html.markdown index 1234f58690c0..9b6f4314228b 100644 --- a/website/docs/cdktf/typescript/r/sqs_queue_redrive_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/sqs_queue_redrive_policy.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `queueUrl` - (Required) The URL of the SQS Queue to which to attach the policy * `redrivePolicy` - (Required) The JSON redrive policy for the SQS queue. Accepts two key/val pairs: `deadLetterTargetArn` and `maxReceiveCount`. Learn more in the [Amazon SQS dead-letter queues documentation](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html). @@ -103,4 +104,4 @@ Using `terraform import`, import SQS Queue Redrive Policies using the queue URL. % terraform import aws_sqs_queue_redrive_policy.test https://queue.amazonaws.com/123456789012/myqueue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_activation.html.markdown b/website/docs/cdktf/typescript/r/ssm_activation.html.markdown index d6cc465d4415..004795a08d3c 100644 --- a/website/docs/cdktf/typescript/r/ssm_activation.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_activation.html.markdown @@ -67,6 +67,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional) The default name of the registered managed instance. * `description` - (Optional) The description of the resource that you want to register. * `expirationDate` - (Optional) UTC timestamp in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) by which this activation request should expire. The default value is 24 hours from resource creation time. Terraform will only perform drift detection of its value when present in a configuration. @@ -123,4 +124,4 @@ Using `terraform import`, import AWS SSM Activation using the `id`. For example: -> **Note:** The `activationCode` attribute cannot be imported. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_association.html.markdown b/website/docs/cdktf/typescript/r/ssm_association.html.markdown index d9da775391ac..37c87f076508 100644 --- a/website/docs/cdktf/typescript/r/ssm_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_association.html.markdown @@ -227,7 +227,7 @@ class MyConvertedCode extends TerraformStack { new Instance(this, "database_server", { ami: Token.asString(amazonLinux.id), iamInstanceProfile: ec2SsmProfile.name, - instanceType: instanceType.stringValue, + instanceType: "t3.micro", subnetId: Token.asString(defaultVar.id), tags: { Environment: environment.stringValue, @@ -245,7 +245,7 @@ class MyConvertedCode extends TerraformStack { new Instance(this, "web_server", { ami: Token.asString(amazonLinux.id), iamInstanceProfile: ec2SsmProfile.name, - instanceType: instanceType.stringValue, + instanceType: "t3.micro", subnetId: Token.asString(defaultVar.id), tags: { Environment: environment.stringValue, @@ -285,13 +285,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the SSM document to apply. * `applyOnlyAtCronInterval` - (Optional) By default, when you create a new or update associations, the system runs it immediately and then according to the schedule you specified. Enable this option if you do not want an association to run immediately after you create or update it. This parameter is not supported for rate expressions. Default: `false`. * `associationName` - (Optional) The descriptive name for the association. * `automationTargetParameterName` - (Optional) Specify the target for the association. This target is required for associations that use an `Automation` document and target resources by using rate controls. This should be set to the SSM document `parameter` that will define how your automation will branch out. * `complianceSeverity` - (Optional) The compliance severity for the association. Can be one of the following: `UNSPECIFIED`, `LOW`, `MEDIUM`, `HIGH` or `CRITICAL` * `documentVersion` - (Optional) The document version you want to associate with the target(s). Can be a specific version or the default version. -* `instanceId` - (Optional, **Deprecated**) The instance ID to apply an SSM document to. Use `targets` with key `InstanceIds` for document schema versions 2.0 and above. Use the `targets` attribute instead. * `maxConcurrency` - (Optional) The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. * `maxErrors` - (Optional) The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify a number, for example 10, or a percentage of the target set, for example 10%. If you specify a threshold of 3, the stop command is sent when the fourth error is returned. If you specify a threshold of 10% for 50 associations, the stop command is sent when the sixth error is returned. * `outputLocation` - (Optional) An output location block. Output Location is documented below. @@ -319,13 +319,38 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The ARN of the SSM association * `associationId` - The ID of the SSM association. -* `instanceId` - The instance id that the SSM document was applied to. * `name` - The name of the SSM document to apply. * `parameters` - Additional parameters passed to the SSM document. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_association.example + identity = { + association_id = "10abcdef-0abc-1234-5678-90abcdef123456" + } +} + +resource "aws_ssm_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `associationId` - (String) ID of the SSM association. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM associations using the `associationId`. For example: ```typescript @@ -342,7 +367,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SsmAssociation.generateConfigForImport( this, - "testAssociation", + "example", "10abcdef-0abc-1234-5678-90abcdef123456" ); } @@ -353,7 +378,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import SSM associations using the `associationId`. For example: ```console -% terraform import aws_ssm_association.test-association 10abcdef-0abc-1234-5678-90abcdef123456 +% terraform import aws_ssm_association.example 10abcdef-0abc-1234-5678-90abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_default_patch_baseline.html.markdown b/website/docs/cdktf/typescript/r/ssm_default_patch_baseline.html.markdown index 9591fad197b9..d3974091dc41 100644 --- a/website/docs/cdktf/typescript/r/ssm_default_patch_baseline.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_default_patch_baseline.html.markdown @@ -50,8 +50,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baselineId` - (Required) ID of the patch baseline. Can be an ID or an ARN. When specifying an AWS-provided patch baseline, must be the ARN. @@ -167,4 +168,4 @@ Using the operating system value: % terraform import aws_ssm_default_patch_baseline.example CENTOS ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_document.html.markdown b/website/docs/cdktf/typescript/r/ssm_document.html.markdown index fd00a8173702..cd25ff0b7e1b 100644 --- a/website/docs/cdktf/typescript/r/ssm_document.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_document.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the document. * `attachmentsSource` - (Optional) One or more configuration blocks describing attachments sources to a version of a document. See [`attachmentsSource` block](#attachments_source-block) below for details. * `content` - (Required) The content for the SSM document in JSON or YAML format. The content of the document must not exceed 64KB. This quota also includes the content specified for input parameters at runtime. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command. @@ -133,6 +134,32 @@ The `parameter` configuration block provides the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_document.example + identity = { + name = "example" + } +} + +resource "aws_ssm_document" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the SSM document. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Documents using the name. For example: ```typescript @@ -195,4 +222,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_maintenance_window.html.markdown b/website/docs/cdktf/typescript/r/ssm_maintenance_window.html.markdown index e2ef1296e6aa..e02d3fcf96f7 100644 --- a/website/docs/cdktf/typescript/r/ssm_maintenance_window.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_maintenance_window.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the maintenance window. * `schedule` - (Required) The schedule of the Maintenance Window in the form of a [cron or rate expression](https://docs.aws.amazon.com/systems-manager/latest/userguide/reference-cron-and-rate-expressions.html). * `cutoff` - (Required) The number of hours before the end of the Maintenance Window that Systems Manager stops scheduling new tasks for execution. @@ -63,6 +64,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window.example + identity = { + id = "mw-0123456789" + } +} + +resource "aws_ssm_maintenance_window" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the maintenance window. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Maintenance Windows using the maintenance window `id`. For example: ```typescript @@ -79,7 +106,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SsmMaintenanceWindow.generateConfigForImport( this, - "importedWindow", + "example", "mw-0123456789" ); } @@ -90,7 +117,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import SSM Maintenance Windows using the maintenance window `id`. For example: ```console -% terraform import aws_ssm_maintenance_window.imported-window mw-0123456789 +% terraform import aws_ssm_maintenance_window.example mw-0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_maintenance_window_target.html.markdown b/website/docs/cdktf/typescript/r/ssm_maintenance_window_target.html.markdown index 2377b825eb9c..dd76eaeddbe3 100644 --- a/website/docs/cdktf/typescript/r/ssm_maintenance_window_target.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_maintenance_window_target.html.markdown @@ -94,6 +94,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `windowId` - (Required) The Id of the maintenance window to register the target with. * `name` - (Optional) The name of the maintenance window target. * `description` - (Optional) The description of the maintenance window target. @@ -110,6 +111,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window_target.example + identity = { + window_id = "mw-0c50858d01EXAMPLE" + id = "23639a0b-ddbc-4bca-9e72-78d96EXAMPLE" + } +} + +resource "aws_ssm_maintenance_window_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `windowId` - (String) ID of the maintenance window. +* `id` - (String) ID of the maintenance window target. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Maintenance Window targets using `WINDOW_ID/WINDOW_TARGET_ID`. For example: ```typescript @@ -140,4 +169,4 @@ Using `terraform import`, import SSM Maintenance Window targets using `WINDOW_ID % terraform import aws_ssm_maintenance_window_target.example mw-0c50858d01EXAMPLE/23639a0b-ddbc-4bca-9e72-78d96EXAMPLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_maintenance_window_task.html.markdown b/website/docs/cdktf/typescript/r/ssm_maintenance_window_task.html.markdown index 5f5ab78abcdb..e09997809885 100644 --- a/website/docs/cdktf/typescript/r/ssm_maintenance_window_task.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_maintenance_window_task.html.markdown @@ -194,6 +194,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `windowId` - (Required) The Id of the maintenance window to register the task with. * `maxConcurrency` - (Optional) The maximum number of targets this task can be run for in parallel. * `maxErrors` - (Optional) The maximum number of errors allowed before this task stops being scheduled. @@ -269,6 +270,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window_task.example + identity = { + window_id = "mw-0c50858d01EXAMPLE" + id = "4f7ca192-7e9a-40fe-9192-5cb15EXAMPLE" + } +} + +resource "aws_ssm_maintenance_window_task" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `windowId` - (String) ID of the maintenance window. +* `id` - (String) ID of the maintenance window task. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Maintenance Window Task using the `windowId` and `windowTaskId` separated by `/`. For example: ```typescript @@ -285,7 +314,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SsmMaintenanceWindowTask.generateConfigForImport( this, - "task", + "example", "/" ); } @@ -296,7 +325,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import AWS Maintenance Window Task using the `windowId` and `windowTaskId` separated by `/`. For example: ```console -% terraform import aws_ssm_maintenance_window_task.task / +% terraform import aws_ssm_maintenance_window_task.example / ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_parameter.html.markdown b/website/docs/cdktf/typescript/r/ssm_parameter.html.markdown index 373124ef7922..460d6651bf01 100644 --- a/website/docs/cdktf/typescript/r/ssm_parameter.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_parameter.html.markdown @@ -14,7 +14,7 @@ Provides an SSM Parameter resource. ~> **Note:** The `overwrite` argument makes it possible to overwrite an existing SSM Parameter created outside of Terraform. --> **Note:** Write-Only argument `valueWo` is available to use in place of `value`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/v1.11.x/resources/ephemeral#write-only-arguments). +-> **Note:** Write-Only argument `valueWo` is available to use in place of `value`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). ## Example Usage @@ -95,6 +95,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allowedPattern` - (Optional) Regular expression used to validate the parameter value. * `dataType` - (Optional) Data type of the parameter. Valid values: `text`, `aws:ssm:integration` and `aws:ec2:image` for AMI format, see the [Native parameter support for Amazon Machine Image IDs](https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-ec2-aliases.html). * `description` - (Optional) Description of the parameter. @@ -120,6 +121,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_parameter.example + identity = { + name = "/my_path/my_paramname" + } +} + +resource "aws_ssm_parameter" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the parameter. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Parameters using the parameter store `name`. For example: ```typescript @@ -136,7 +163,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); SsmParameter.generateConfigForImport( this, - "myParam", + "example", "/my_path/my_paramname" ); } @@ -147,7 +174,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import SSM Parameters using the parameter store `name`. For example: ```console -% terraform import aws_ssm_parameter.my_param /my_path/my_paramname +% terraform import aws_ssm_parameter.example /my_path/my_paramname ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_patch_baseline.html.markdown b/website/docs/cdktf/typescript/r/ssm_patch_baseline.html.markdown index cdbf9c849272..d33ef7a9fe20 100644 --- a/website/docs/cdktf/typescript/r/ssm_patch_baseline.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_patch_baseline.html.markdown @@ -211,10 +211,12 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `approvalRule` - (Optional) Set of rules used to include patches in the baseline. Up to 10 approval rules can be specified. See [`approvalRule`](#approval_rule-block) below. * `approvedPatchesComplianceLevel` - (Optional) Compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid values are `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`, `INFORMATIONAL`, `UNSPECIFIED`. The default value is `UNSPECIFIED`. * `approvedPatchesEnableNonSecurity` - (Optional) Whether the list of approved patches includes non-security updates that should be applied to the instances. Applies to Linux instances only. * `approvedPatches` - (Optional) List of explicitly approved patches for the baseline. Cannot be specified with `approvalRule`. +* `availableSecurityUpdatesComplianceStatus` - (Optional) Indicates the compliance status of managed nodes for which security-related patches are available but were not approved. Supported for Windows Server managed nodes only. Valid values are `COMPLIANT`, `NON_COMPLIANT`. * `description` - (Optional) Description of the patch baseline. * `globalFilter` - (Optional) Set of global filters used to exclude patches from the baseline. Up to 4 global filters can be specified using Key/Value pairs. Valid Keys are `PRODUCT`, `CLASSIFICATION`, `MSRC_SEVERITY`, and `PATCH_ID`. * `operatingSystem` - (Optional) Operating system the patch baseline applies to. Valid values are `ALMA_LINUX`, `AMAZON_LINUX`, `AMAZON_LINUX_2`, `AMAZON_LINUX_2022`, `AMAZON_LINUX_2023`, `CENTOS`, `DEBIAN`, `MACOS`, `ORACLE_LINUX`, `RASPBIAN`, `REDHAT_ENTERPRISE_LINUX`, `ROCKY_LINUX`, `SUSE`, `UBUNTU`, and `WINDOWS`. The default value is `WINDOWS`. @@ -252,6 +254,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_patch_baseline.example + identity = { + id = "pb-12345678" + } +} + +resource "aws_ssm_patch_baseline" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the patch baseline. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Patch Baselines using their baseline ID. For example: ```typescript @@ -278,4 +306,4 @@ Using `terraform import`, import SSM Patch Baselines using their baseline ID. Fo % terraform import aws_ssm_patch_baseline.example pb-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_patch_group.html.markdown b/website/docs/cdktf/typescript/r/ssm_patch_group.html.markdown index a355b4d615bc..dcd29a103d25 100644 --- a/website/docs/cdktf/typescript/r/ssm_patch_group.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_patch_group.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baselineId` - (Required) The ID of the patch baseline to register the patch group with. * `patchGroup` - (Required) The name of the patch group that should be registered with the patch baseline. @@ -53,4 +54,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The name of the patch group and ID of the patch baseline separated by a comma (`,`). - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_resource_data_sync.html.markdown b/website/docs/cdktf/typescript/r/ssm_resource_data_sync.html.markdown index 2a576f2c74f1..1bf18237fdf7 100644 --- a/website/docs/cdktf/typescript/r/ssm_resource_data_sync.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_resource_data_sync.html.markdown @@ -95,6 +95,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name for the configuration. * `s3Destination` - (Required) Amazon S3 configuration details for the sync. @@ -144,4 +145,4 @@ Using `terraform import`, import SSM resource data sync using the `name`. For ex % terraform import aws_ssm_resource_data_sync.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_service_setting.html.markdown b/website/docs/cdktf/typescript/r/ssm_service_setting.html.markdown index eb2a67fcf967..ba56bbb07c02 100644 --- a/website/docs/cdktf/typescript/r/ssm_service_setting.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_service_setting.html.markdown @@ -40,7 +40,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `settingId` - (Required) ID of the service setting. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `settingId` - (Required) ID of the service setting. Valid values are shown in the [AWS documentation](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetServiceSetting.html#API_GetServiceSetting_RequestSyntax). * `settingValue` - (Required) Value of the service setting. ## Attribute Reference @@ -82,4 +83,4 @@ Using `terraform import`, import AWS SSM Service Setting using the `settingId`. % terraform import aws_ssm_service_setting.example arn:aws:ssm:us-east-1:123456789012:servicesetting/ssm/parameter-store/high-throughput-enabled ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmcontacts_contact.html.markdown b/website/docs/cdktf/typescript/r/ssmcontacts_contact.html.markdown index d6d720b4fb09..743fe6ea316c 100644 --- a/website/docs/cdktf/typescript/r/ssmcontacts_contact.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmcontacts_contact.html.markdown @@ -78,8 +78,9 @@ The following arguments are required: The following arguments are optional: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `displayName` - (Optional) Full friendly name of the contact or escalation plan. If set, must be between 1 and 255 characters, and may contain alphanumerics, underscores (`_`), hyphens (`-`), periods (`.`), and spaces. -- `tags` - (Optional) Map of tags to assign to the resource. +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -116,4 +117,4 @@ Using `terraform import`, import SSM Contact using the `ARN`. For example: % terraform import aws_ssmcontacts_contact.example {ARNValue} ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmcontacts_contact_channel.html.markdown b/website/docs/cdktf/typescript/r/ssmcontacts_contact_channel.html.markdown index 6f953af61586..9f1c4603fce4 100644 --- a/website/docs/cdktf/typescript/r/ssmcontacts_contact_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmcontacts_contact_channel.html.markdown @@ -78,8 +78,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `contactId` - (Required) Amazon Resource Name (ARN) of the AWS SSM Contact that the contact channel belongs to. - `deliveryAddress` - (Required) Block that contains contact engagement details. See details below. - `name` - (Required) Name of the contact channel. Must be between 1 and 255 characters, and may contain alphanumerics, underscores (`_`), hyphens (`-`), periods (`.`), and spaces. @@ -98,7 +99,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact Channel using the `ARN`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_contact_channel.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example" + } +} + +resource "aws_ssmcontacts_contact_channel" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the contact channel. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact Channel using the `arn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -122,10 +144,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import SSM Contact Channel using the `ARN`. For example: +Using `terraform import`, import SSM Contact Channel using the `arn`. For example: ```console % terraform import aws_ssmcontacts_contact_channel.example arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmcontacts_plan.html.markdown b/website/docs/cdktf/typescript/r/ssmcontacts_plan.html.markdown index 0c061c15febd..84d62b673be7 100644 --- a/website/docs/cdktf/typescript/r/ssmcontacts_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmcontacts_plan.html.markdown @@ -136,8 +136,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `contactId` - (Required) The Amazon Resource Name (ARN) of the contact or escalation plan. - `stage` - (Required) One or more configuration blocks for specifying a list of stages that the escalation plan or engagement plan uses to engage contacts and contact methods. See [Stage](#stage) below for more details. @@ -209,4 +210,4 @@ Using `terraform import`, import SSM Contact Plan using the Contact ARN. For exa % terraform import aws_ssmcontacts_plan.example {ARNValue} ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmcontacts_rotation.html.markdown b/website/docs/cdktf/typescript/r/ssmcontacts_rotation.html.markdown index 22b0e6c0edec..3d3bcb9d3202 100644 --- a/website/docs/cdktf/typescript/r/ssmcontacts_rotation.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmcontacts_rotation.html.markdown @@ -192,6 +192,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `startTime` - (Optional) The date and time, in RFC 3339 format, that the rotation goes into effect. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -243,6 +244,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_rotation.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-east-1:123456789012:rotation/example-rotation" + } +} + +resource "aws_ssmcontacts_rotation" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSM Contacts rotation. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSMContacts Rotation using the `arn`. For example: ```typescript @@ -273,4 +295,4 @@ Using `terraform import`, import CodeGuru Profiler Profiling Group using the `ar % terraform import aws_ssmcontacts_rotation.example arn:aws:ssm-contacts:us-east-1:012345678910:rotation/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmincidents_replication_set.html.markdown b/website/docs/cdktf/typescript/r/ssmincidents_replication_set.html.markdown index 3edae00919fc..8b8565875956 100644 --- a/website/docs/cdktf/typescript/r/ssmincidents_replication_set.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmincidents_replication_set.html.markdown @@ -33,7 +33,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SsmincidentsReplicationSet(this, "replicationSetName", { - region: [ + regions: [ { name: "us-west-2", }, @@ -62,7 +62,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SsmincidentsReplicationSet(this, "replicationSetName", { - region: [ + regions: [ { name: "us-west-2", }, @@ -91,7 +91,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SsmincidentsReplicationSet(this, "replicationSetName", { - region: [ + regions: [ { name: "us-west-2", }, @@ -121,7 +121,7 @@ class MyConvertedCode extends TerraformStack { super(scope, name); const exampleKey = new KmsKey(this, "example_key", {}); new SsmincidentsReplicationSet(this, "replicationSetName", { - region: [ + regions: [ { kmsKeyArn: exampleKey.arn, name: "us-west-2", @@ -140,7 +140,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `region` - (Required) The Regions that Incident Manager replicates your data to. You can have up to three Regions in your replication set. +* `region` - (Optional, **Deprecated**) The replication set's Regions. Use `regions` instead. +* `regions` - (Optional) The replication set's Regions. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. For information about the maximum allowed number of Regions and tag value constraints, see [CreateReplicationSet in the *AWS Systems Manager Incident Manager API Reference*](https://docs.aws.amazon.com/incident-manager/latest/APIReference/API_CreateReplicationSet.html). @@ -155,7 +156,7 @@ For information about the maximum allowed number of Regions and tag value constr ~> **NOTE:** If possible, create all the customer managed keys you need (using the `terraform apply` command) before you create the replication set, or create the keys and replication set in the same `terraform apply` command. Otherwise, to delete a replication set, you must run one `terraform apply` command to delete the replication set and another to delete the AWS KMS keys used by the replication set. Deleting the AWS KMS keys before deleting the replication set results in an error. In that case, you must manually reenable the deleted key using the AWS Management Console before you can delete the replication set. -The `region` configuration block supports the following arguments: +The `regions` configuration block supports the following arguments: * `name` - (Required) The name of the Region, such as `ap-southeast-2`. * `kmsKeyArn` - (Optional) The Amazon Resource name (ARN) of the customer managed key. If omitted, AWS manages the AWS KMS keys for you, using an AWS owned key, as indicated by a default value of `DefaultKey`. @@ -174,7 +175,7 @@ This resource exports the following attributes in addition to the arguments abov * `status` - The overall status of a replication set. * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` -In addition to the preceding arguments, the `region` configuration block exports the following attributes for each Region: +In addition to the preceding arguments, the `regions` configuration block exports the following attributes for each Region: * `status` - The current status of the Region. * Valid Values: `ACTIVE` | `CREATING` | `UPDATING` | `DELETING` | `FAILED` @@ -225,4 +226,4 @@ Using `terraform import`, import an Incident Manager replication. For example: % terraform import aws_ssmincidents_replication_set.replicationSetName import ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmincidents_response_plan.html.markdown b/website/docs/cdktf/typescript/r/ssmincidents_response_plan.html.markdown index 937f9c80ae6d..7a91fe10e6d3 100644 --- a/website/docs/cdktf/typescript/r/ssmincidents_response_plan.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmincidents_response_plan.html.markdown @@ -131,6 +131,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the response plan. * `incidentTemplate` - (Required) The `incidentTemplate` configuration block is required and supports the following arguments: * `title` - (Required) The title of a generated incident. @@ -206,4 +207,4 @@ Using `terraform import`, import an Incident Manager response plan using the res % terraform import aws_ssmincidents_response_plan.responsePlanName ARNValue ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssmquicksetup_configuration_manager.html.markdown b/website/docs/cdktf/typescript/r/ssmquicksetup_configuration_manager.html.markdown index 1d0696c44620..92cf0b4ab15c 100644 --- a/website/docs/cdktf/typescript/r/ssmquicksetup_configuration_manager.html.markdown +++ b/website/docs/cdktf/typescript/r/ssmquicksetup_configuration_manager.html.markdown @@ -64,14 +64,14 @@ class MyConvertedCode extends TerraformStack { ConfigurationOptionsScanValue: "cron(0 1 * * ? *)", IsPolicyAttachAllowed: "false", OutputLogEnableS3: "false", - PatchBaselineRegion: Token.asString(dataAwsRegionCurrent.name), + PatchBaselineRegion: Token.asString(dataAwsRegionCurrent.region), PatchBaselineUseDefault: "default", PatchPolicyName: "example", RateControlConcurrency: "10%", RateControlErrorThreshold: "2%", SelectedPatchBaselines: selectedPatchBaselines, TargetAccounts: Token.asString(current.accountId), - TargetRegions: Token.asString(dataAwsRegionCurrent.name), + TargetRegions: Token.asString(dataAwsRegionCurrent.region), TargetType: "*", }, type: "AWSQuickSetupType-PatchPolicy", @@ -95,6 +95,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the configuration manager. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -160,4 +161,4 @@ Using `terraform import`, import SSM Quick Setup Configuration Manager using the % terraform import aws_ssmquicksetup_configuration_manager.example arn:aws:ssm-quicksetup:us-east-1:012345678901:configuration-manager/abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown index 59b086cd470c..96232a211b9e 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown @@ -157,6 +157,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance. * `permissionSetArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set that the admin wants to grant the principal access to. * `principalId` - (Required, Forces new resource) An identifier for an object in SSO, such as a user or group. PrincipalIds are GUIDs (For example, `f81d4fae-7dec-11d0-a765-00a0c91e6bf6`). @@ -209,4 +210,4 @@ Using `terraform import`, import SSO Account Assignments using the `principalId` % terraform import aws_ssoadmin_account_assignment.example f81d4fae-7dec-11d0-a765-00a0c91e6bf6,GROUP,1234567890,AWS_ACCOUNT,arn:aws:sso:::permissionSet/ssoins-0123456789abcdef/ps-0123456789abcdef,arn:aws:sso:::instance/ssoins-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_application.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_application.html.markdown index c95ea888b031..e18251c3d62d 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_application.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_application.html.markdown @@ -106,6 +106,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientToken` - (Optional) A unique, case-sensitive ID that you provide to ensure the idempotency of the request. AWS generates a random value when not provided. * `description` - (Optional) Description of the application. * `portalOptions` - (Optional) Options for the portal associated with an application. See [`portalOptions`](#portal_options-argument-reference) below. @@ -130,12 +131,34 @@ If `IDENTITY_CENTER` is set, IAM Identity Center uses SAML identity-provider ini This resource exports the following attributes in addition to the arguments above: * `applicationAccount` - AWS account ID. -* `applicationArn` - ARN of the application. -* `id` - ARN of the application. +* `applicationArn` - (**Deprecated** Reference `arn` instead) ARN of the application. +* `arn` - ARN of the application. +* `id` - (**Deprecated** Reference `arn` instead) ARN of the application. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssoadmin_application.example + identity = { + "arn" = "arn:aws:sso::123456789012:application/ssoins-1234567890abcdef/apl-1234567890abcdef" + } +} + +resource "aws_ssoadmin_application" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSO application. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application using the `id`. For example: ```typescript @@ -166,4 +189,4 @@ Using `terraform import`, import SSO Admin Application using the `id`. For examp % terraform import aws_ssoadmin_application.example arn:aws:sso::123456789012:application/id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_application_access_scope.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_application_access_scope.html.markdown index cede520ae640..2aa4e85e28ae 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_application_access_scope.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_application_access_scope.html.markdown @@ -45,9 +45,7 @@ class MyConvertedCode extends TerraformStack { awsSsoadminApplicationExample.overrideLogicalId("example"); const awsSsoadminApplicationAccessScopeExample = new SsoadminApplicationAccessScope(this, "example_2", { - applicationArn: Token.asString( - awsSsoadminApplicationExample.applicationArn - ), + applicationArn: Token.asString(awsSsoadminApplicationExample.arn), authorizedTargets: [ "arn:aws:sso::123456789012:application/ssoins-123456789012/apl-123456789012", ], @@ -69,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authorizedTargets` - (Optional) Specifies an array list of ARNs that represent the authorized targets for this access scope. ## Attribute Reference @@ -109,4 +108,4 @@ Using `terraform import`, import SSO Admin Application Access Scope using the `i % terraform import aws_ssoadmin_application_access_scope.example arn:aws:sso::123456789012:application/ssoins-123456789012/apl-123456789012,sso:account:access ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_application_assignment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_application_assignment.html.markdown index f4f328640fe7..af9899ee31f7 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_application_assignment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_application_assignment.html.markdown @@ -28,9 +28,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SsoadminApplicationAssignment(this, "example", { - applicationArn: Token.asString( - awsSsoadminApplicationExample.applicationArn - ), + applicationArn: Token.asString(awsSsoadminApplicationExample.arn), principalId: Token.asString(awsIdentitystoreUserExample.userId), principalType: "USER", }); @@ -54,9 +52,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SsoadminApplicationAssignment(this, "example", { - applicationArn: Token.asString( - awsSsoadminApplicationExample.applicationArn - ), + applicationArn: Token.asString(awsSsoadminApplicationExample.arn), principalId: Token.asString(awsIdentitystoreGroupExample.groupId), principalType: "GROUP", }); @@ -67,8 +63,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationArn` - (Required) ARN of the application. * `principalId` - (Required) An identifier for an object in IAM Identity Center, such as a user or group. * `principalType` - (Required) Entity type for which the assignment will be created. Valid values are `USER` or `GROUP`. @@ -111,4 +108,4 @@ Using `terraform import`, import SSO Admin Application Assignment using the `id` % terraform import aws_ssoadmin_application_assignment.example arn:aws:sso::123456789012:application/id-12345678,abcd1234,USER ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_application_assignment_configuration.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_application_assignment_configuration.html.markdown index 07192d33638a..7bce0368074b 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_application_assignment_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_application_assignment_configuration.html.markdown @@ -33,9 +33,7 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new SsoadminApplicationAssignmentConfiguration(this, "example", { - applicationArn: Token.asString( - awsSsoadminApplicationExample.applicationArn - ), + applicationArn: Token.asString(awsSsoadminApplicationExample.arn), assignmentRequired: true, }); } @@ -45,8 +43,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationArn` - (Required) ARN of the application. * `assignmentRequired` - (Required) Indicates whether users must have an explicit assignment to access the application. If `false`, all users have access to the application. @@ -58,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssoadmin_application_assignment_configuration.example + identity = { + "arn" = "arn:aws:sso::123456789012:application/ssoins-1234567890abcdef/apl-1234567890abcdef" + } +} + +resource "aws_ssoadmin_application_assignment_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSO application. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application Assignment Configuration using the `id`. For example: ```typescript @@ -88,4 +108,4 @@ Using `terraform import`, import SSO Admin Application Assignment Configuration % terraform import aws_ssoadmin_application_assignment_configuration.example arn:aws:sso::123456789012:application/id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_customer_managed_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_customer_managed_policy_attachment.html.markdown index 555b7541405e..0fa37fc4eb80 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_customer_managed_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_customer_managed_policy_attachment.html.markdown @@ -92,6 +92,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `permissionSetArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. * `customerManagedPolicyReference` - (Required, Forces new resource) Specifies the name and path of a customer managed policy. See below. @@ -148,4 +149,4 @@ Using `terraform import`, import SSO Managed Policy Attachments using the `name` % terraform import aws_ssoadmin_customer_managed_policy_attachment.example TestPolicy,/,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_instance_access_control_attributes.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_instance_access_control_attributes.html.markdown index b3aa9cc2dff9..6001b44a3406 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_instance_access_control_attributes.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_instance_access_control_attributes.html.markdown @@ -65,6 +65,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance. * `attribute` - (Required) See [AccessControlAttribute](#accesscontrolattribute) for more details. @@ -115,4 +116,4 @@ Using `terraform import`, import SSO Account Assignments using the `instanceArn` % terraform import aws_ssoadmin_instance_access_control_attributes.example arn:aws:sso:::instance/ssoins-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown index bc94e068d13b..6dc23d821a24 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown @@ -142,6 +142,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `managedPolicyArn` - (Required, Forces new resource) The IAM managed policy Amazon Resource Name (ARN) to be attached to the Permission Set. * `permissionSetArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. @@ -192,4 +193,4 @@ Using `terraform import`, import SSO Managed Policy Attachments using the `manag % terraform import aws_ssoadmin_managed_policy_attachment.example arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_permission_set.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_permission_set.html.markdown index a0c106523117..bbed4de0a38a 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_permission_set.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_permission_set.html.markdown @@ -55,6 +55,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) The description of the Permission Set. * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `name` - (Required, Forces new resource) The name of the Permission Set. @@ -109,4 +110,4 @@ Using `terraform import`, import SSO Permission Sets using the `arn` and `instan % terraform import aws_ssoadmin_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_permission_set_inline_policy.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_permission_set_inline_policy.html.markdown index 310afb6b234e..eb0beadcd511 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_permission_set_inline_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_permission_set_inline_policy.html.markdown @@ -85,6 +85,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `inlinePolicy` - (Required) The IAM inline policy to attach to a Permission Set. * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `permissionSetArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. @@ -134,4 +135,4 @@ Using `terraform import`, import SSO Permission Set Inline Policies using the `p % terraform import aws_ssoadmin_permission_set_inline_policy.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_permissions_boundary_attachment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_permissions_boundary_attachment.html.markdown index 2c971742f637..970ff0c955fb 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_permissions_boundary_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_permissions_boundary_attachment.html.markdown @@ -120,8 +120,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `instanceArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. * `permissionSetArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. * `permissionsBoundary` - (Required, Forces new resource) The permissions boundary policy. See below. @@ -185,4 +186,4 @@ Using `terraform import`, import SSO Admin Permissions Boundary Attachments usin % terraform import aws_ssoadmin_permissions_boundary_attachment.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_trusted_token_issuer.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_trusted_token_issuer.html.markdown index 353dc599528b..bf2e4466f3ae 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_trusted_token_issuer.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_trusted_token_issuer.html.markdown @@ -70,6 +70,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientToken` - (Optional) A unique, case-sensitive ID that you provide to ensure the idempotency of the request. AWS generates a random value when not provided. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -124,4 +125,4 @@ Using `terraform import`, import SSO Admin Trusted Token Issuer using the `id`. % terraform import aws_ssoadmin_trusted_token_issuer.example arn:aws:sso::123456789012:trustedTokenIssuer/ssoins-lu1ye3gew4mbc7ju/tti-2657c556-9707-11ee-b9d1-0242ac120002 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_cache.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_cache.html.markdown index 15c0c50e25e4..2a95cd476189 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_cache.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_cache.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `diskId` - (Required) Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`. * `gatewayArn` - (Required) The Amazon Resource Name (ARN) of the gateway. @@ -82,4 +83,4 @@ Using `terraform import`, import `aws_storagegateway_cache` using the gateway Am % terraform import aws_storagegateway_cache.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_cached_iscsi_volume.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_cached_iscsi_volume.html.markdown index 116808244987..8b72d144aeff 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_cached_iscsi_volume.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_cached_iscsi_volume.html.markdown @@ -106,6 +106,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gatewayArn` - (Required) The Amazon Resource Name (ARN) of the gateway. * `networkInterfaceId` - (Required) The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. * `targetName` - (Required) The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway. @@ -162,4 +163,4 @@ Using `terraform import`, import `aws_storagegateway_cached_iscsi_volume` using % terraform import aws_storagegateway_cached_iscsi_volume.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_file_system_association.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_file_system_association.html.markdown index 0bb9d5b2ce68..ef0955939f97 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_file_system_association.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_file_system_association.html.markdown @@ -119,6 +119,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gatewayArn` - (Required) The Amazon Resource Name (ARN) of the gateway. * `locationArn` - (Required) The Amazon Resource Name (ARN) of the Amazon FSx file system to associate with the FSx File Gateway. * `username` - (Required) The user name of the user credential that has permission to access the root share of the Amazon FSx file system. The user account must belong to the Amazon FSx delegated admin user group. @@ -181,4 +182,4 @@ Using `terraform import`, import `aws_storagegateway_file_system_association` us % terraform import aws_storagegateway_file_system_association.example arn:aws:storagegateway:us-east-1:123456789012:fs-association/fsa-0DA347732FDB40125 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown index 7ddab12005b2..0f571bbbe1ac 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown @@ -191,6 +191,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gatewayName` - (Required) Name of the gateway. * `gatewayTimezone` - (Required) Time zone for the gateway. The time zone is of the format "GMT", "GMT-hr:mm", or "GMT+hr:mm". For example, `GMT-4:00` indicates the time is 4 hours behind GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. * `activationKey` - (Optional) Gateway activation key during resource creation. Conflicts with `gatewayIpAddress`. Additional information is available in the [Storage Gateway User Guide](https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html). @@ -321,4 +322,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_nfs_file_share.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_nfs_file_share.html.markdown index f98b36f512b6..2233d4515138 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_nfs_file_share.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_nfs_file_share.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `clientList` - (Required) The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks. Set to `["0.0.0.0/0"]` to not limit access. Minimum 1 item. Maximum 100 items. * `gatewayArn` - (Required) Amazon Resource Name (ARN) of the file gateway. * `locationArn` - (Required) The ARN of the backed storage used for storing file data. @@ -127,4 +128,4 @@ Using `terraform import`, import `aws_storagegateway_nfs_file_share` using the N % terraform import aws_storagegateway_nfs_file_share.example arn:aws:storagegateway:us-east-1:123456789012:share/share-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_smb_file_share.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_smb_file_share.html.markdown index 31a7344ddbf0..484a2445a80d 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_smb_file_share.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_smb_file_share.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gatewayArn` - (Required) Amazon Resource Name (ARN) of the file gateway. * `locationArn` - (Required) The ARN of the backed storage used for storing file data. * `vpcEndpointDnsName` - (Optional) The DNS name of the VPC endpoint for S3 private link. @@ -89,8 +90,6 @@ This resource supports the following arguments: * `objectAcl` - (Optional) Access Control List permission for S3 objects. Defaults to `private`. * `oplocksEnabled` - (Optional) Boolean to indicate Opportunistic lock (oplock) status. Defaults to `true`. * `cacheAttributes` - (Optional) Refresh cache information. see [`cacheAttributes` Block](#cache_attributes-block) for more details. - - **Note:** If you have previously included a `cacheAttributes` block in your configuration, removing it will not reset the refresh cache value and the previous value will remain. You must explicitly set a new value to change it. * `readOnly` - (Optional) Boolean to indicate write status of file share. File share does not accept writes if `true`. Defaults to `false`. * `requesterPays` - (Optional) Boolean who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to `true` if you want the requester to pay instead of the bucket owner. Defaults to `false`. * `smbAclEnabled` - (Optional) Set this value to `true` to enable ACL (access control list) on the SMB fileshare. Set it to `false` to map file and directory permissions to the POSIX permissions. This setting applies only to `ActiveDirectory` authentication type. @@ -100,6 +99,8 @@ This resource supports the following arguments: * `notificationPolicy` - (Optional) The notification policy of the file share. For more information see the [AWS Documentation](https://docs.aws.amazon.com/storagegateway/latest/APIReference/API_CreateNFSFileShare.html#StorageGateway-CreateNFSFileShare-request-NotificationPolicy). Default value is `{}`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +**Note:** If you have previously included a `cacheAttributes` block in your configuration, removing it will not reset the refresh cache value and the previous value will remain. You must explicitly set a new value to change it. + ### `cacheAttributes` Block The `cacheAttributes` configuration block supports the following arguments: @@ -158,4 +159,4 @@ Using `terraform import`, import `aws_storagegateway_smb_file_share` using the S % terraform import aws_storagegateway_smb_file_share.example arn:aws:storagegateway:us-east-1:123456789012:share/share-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_stored_iscsi_volume.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_stored_iscsi_volume.html.markdown index 41129cb559e0..9d07127270c5 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_stored_iscsi_volume.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_stored_iscsi_volume.html.markdown @@ -73,6 +73,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `gatewayArn` - (Required) The Amazon Resource Name (ARN) of the gateway. * `networkInterfaceId` - (Required) The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. * `targetName` - (Required) The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway. @@ -133,4 +134,4 @@ Using `terraform import`, import `aws_storagegateway_stored_iscsi_volume` using % terraform import aws_storagegateway_stored_iscsi_volume.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_tape_pool.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_tape_pool.html.markdown index 895f794eae4c..4f237add56a5 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_tape_pool.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_tape_pool.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `poolName` - (Required) The name of the new custom tape pool. * `storageClass` - (Required) The storage class that is associated with the new custom pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class that corresponds to the pool. Possible values are `DEEP_ARCHIVE` or `GLACIER`. * `retentionLockType` - (Required) Tape retention lock can be configured in two modes. When configured in governance mode, AWS accounts with specific IAM permissions are authorized to remove the tape retention lock from archived virtual tapes. When configured in compliance mode, the tape retention lock cannot be removed by any user, including the root AWS account. Possible values are `COMPLIANCE`, `GOVERNANCE`, and `NONE`. Default value is `NONE`. @@ -84,4 +85,4 @@ Using `terraform import`, import `aws_storagegateway_tape_pool` using the volume % terraform import aws_storagegateway_tape_pool.example arn:aws:storagegateway:us-east-1:123456789012:tapepool/pool-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_upload_buffer.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_upload_buffer.html.markdown index 4214e7409e6f..b111a7bae12f 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_upload_buffer.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_upload_buffer.html.markdown @@ -82,6 +82,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `diskId` - (Optional) Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`. * `diskPath` - (Optional) Local disk path. For example, `/dev/nvme1n1`. * `gatewayArn` - (Required) The Amazon Resource Name (ARN) of the gateway. @@ -124,4 +125,4 @@ Using `terraform import`, import `aws_storagegateway_upload_buffer` using the ga % terraform import aws_storagegateway_upload_buffer.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_working_storage.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_working_storage.html.markdown index 3d00cf6433d6..89a109c3f7c2 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_working_storage.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_working_storage.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `diskId` - (Required) Local disk identifier. For example, `pci-0000:03:00.0-scsi-0:0:0:0`. * `gatewayArn` - (Required) The Amazon Resource Name (ARN) of the gateway. @@ -82,4 +83,4 @@ Using `terraform import`, import `aws_storagegateway_working_storage` using the % terraform import aws_storagegateway_working_storage.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678:pci-0000:03:00.0-scsi-0:0:0:0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/subnet.html.markdown b/website/docs/cdktf/typescript/r/subnet.html.markdown index 795a55bb1d9a..bb822a2d96d3 100644 --- a/website/docs/cdktf/typescript/r/subnet.html.markdown +++ b/website/docs/cdktf/typescript/r/subnet.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `assignIpv6AddressOnCreation` - (Optional) Specify true to indicate that network interfaces created in the specified subnet should be assigned an IPv6 address. Default is `false` @@ -123,6 +124,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_subnet.example + identity = { + id = "subnet-9d4a7b6c" + } +} + +resource "aws_subnet" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the subnet. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import subnets using the subnet `id`. For example: ```typescript @@ -137,7 +164,7 @@ import { Subnet } from "./.gen/providers/aws/subnet"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - Subnet.generateConfigForImport(this, "publicSubnet", "subnet-9d4a7b6c"); + Subnet.generateConfigForImport(this, "example", "subnet-9d4a7b6c"); } } @@ -146,7 +173,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import subnets using the subnet `id`. For example: ```console -% terraform import aws_subnet.public_subnet subnet-9d4a7b6c +% terraform import aws_subnet.example subnet-9d4a7b6c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/swf_domain.html.markdown b/website/docs/cdktf/typescript/r/swf_domain.html.markdown index eafc2452a214..cb317b4fe231 100644 --- a/website/docs/cdktf/typescript/r/swf_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/swf_domain.html.markdown @@ -42,6 +42,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) The name of the domain. If omitted, Terraform will assign a random, unique name. * `namePrefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional, Forces new resource) The domain description. @@ -84,4 +85,4 @@ Using `terraform import`, import SWF Domains using the `name`. For example: % terraform import aws_swf_domain.foo test-domain ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/synthetics_canary.html.markdown b/website/docs/cdktf/typescript/r/synthetics_canary.html.markdown index 788372e6f1a4..848d8b83b56d 100644 --- a/website/docs/cdktf/typescript/r/synthetics_canary.html.markdown +++ b/website/docs/cdktf/typescript/r/synthetics_canary.html.markdown @@ -51,23 +51,24 @@ The following arguments are required: * `artifactS3Location` - (Required) Location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. * `executionRoleArn` - (Required) ARN of the IAM role to be used to run the canary. see [AWS Docs](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_CreateCanary.html#API_CreateCanary_RequestSyntax) for permissions needs for IAM Role. * `handler` - (Required) Entry point to use for the source code when running the canary. This value must end with the string `.handler` . -* `name` - (Required) Name for this canary. Has a maximum length of 21 characters. Valid characters are lowercase alphanumeric, hyphen, or underscore. +* `name` - (Required) Name for this canary. Has a maximum length of 255 characters. Valid characters are lowercase alphanumeric, hyphen, or underscore. * `runtimeVersion` - (Required) Runtime version to use for the canary. Versions change often so consult the [Amazon CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html) for the latest valid versions. Values include `syn-python-selenium-1.0`, `syn-nodejs-puppeteer-3.0`, `syn-nodejs-2.2`, `syn-nodejs-2.1`, `syn-nodejs-2.0`, and `syn-1.0`. -* `schedule` - (Required) Configuration block providing how often the canary is to run and when these test runs are to stop. Detailed below. +* `schedule` - (Required) Configuration block providing how often the canary is to run and when these test runs are to stop. Detailed [below](#schedule). The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `artifactConfig` - (Optional) configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. See [Artifact Config](#artifact_config). * `deleteLambda` - (Optional) Specifies whether to also delete the Lambda functions and layers used by this canary. The default is `false`. -* `vpcConfig` - (Optional) Configuration block. Detailed below. * `failureRetentionPeriod` - (Optional) Number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days. -* `runConfig` - (Optional) Configuration block for individual canary runs. Detailed below. +* `runConfig` - (Optional) Configuration block for individual canary runs. Detailed [below](#run_config). * `s3Bucket` - (Optional) Full bucket name which is used if your canary script is located in S3. The bucket must already exist. **Conflicts with `zipFile`.** * `s3Key` - (Optional) S3 key of your script. **Conflicts with `zipFile`.** * `s3Version` - (Optional) S3 version ID of your script. **Conflicts with `zipFile`.** * `startCanary` - (Optional) Whether to run or stop the canary. * `successRetentionPeriod` - (Optional) Number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `artifactConfig` - (Optional) configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. See [Artifact Config](#artifact_config). +* `vpcConfig` - (Optional) Configuration block. Detailed [below](#vpc_config). * `zipFile` - (Optional) ZIP file that contains the script, if you input your canary script directly into the canary instead of referring to an S3 location. It can be up to 225KB. **Conflicts with `s3Bucket`, `s3Key`, and `s3Version`.** ### artifact_config @@ -83,6 +84,11 @@ The following arguments are optional: * `expression` - (Required) Rate expression or cron expression that defines how often the canary is to run. For rate expression, the syntax is `rate(number unit)`. _unit_ can be `minute`, `minutes`, or `hour`. For cron expression, the syntax is `cron(expression)`. For more information about the syntax for cron expressions, see [Scheduling canary runs using cron](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_cron.html). * `durationInSeconds` - (Optional) Duration in seconds, for the canary to continue making regular runs according to the schedule in the Expression value. +* `retryConfig` - (Optional) Configuration block for canary retries. Detailed [below](#retry_config). + +### retry_config + +* `maxRetries` - (Required) Maximum number of retries. The value must be less than or equal to `2`. If `maxRetries` is `2`, `run_config.timeout_in_seconds` should be less than 600 seconds. Defaults to `0`. ### run_config @@ -90,6 +96,7 @@ The following arguments are optional: * `memoryInMb` - (Optional) Maximum amount of memory available to the canary while it is running, in MB. The value you specify must be a multiple of 64. * `activeTracing` - (Optional) Whether this canary is to use active AWS X-Ray tracing when it runs. You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime. * `environmentVariables` - (Optional) Map of environment variables that are accessible from the canary during execution. Please see [AWS Docs](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime) for variables reserved for Lambda. +* `ephemeralStorage` - (Optional) Amount of ephemeral storage (in MB) allocated for the canary run during execution. Defaults to 1024. ### vpc_config @@ -97,6 +104,7 @@ If this canary tests an endpoint in a VPC, this structure contains information a * `subnetIds` - (Required) IDs of the subnets where this canary is to run. * `securityGroupIds` - (Required) IDs of the security groups for this canary. +* `ipv6AllowedForDualStack` - (Optional) If `true`, allow outbound IPv6 traffic on VPC canaries that are connected to dual-stack subnets. The default is `false`. ## Attribute Reference @@ -149,4 +157,4 @@ Using `terraform import`, import Synthetics Canaries using the `name`. For examp % terraform import aws_synthetics_canary.some some-canary ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/synthetics_group.html.markdown b/website/docs/cdktf/typescript/r/synthetics_group.html.markdown index 38d47ad3e94f..99a1ee207683 100644 --- a/website/docs/cdktf/typescript/r/synthetics_group.html.markdown +++ b/website/docs/cdktf/typescript/r/synthetics_group.html.markdown @@ -44,6 +44,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -82,4 +83,4 @@ Using `terraform import`, import CloudWatch Synthetics Group using the `name`. F % terraform import aws_synthetics_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/synthetics_group_association.html.markdown b/website/docs/cdktf/typescript/r/synthetics_group_association.html.markdown index 1eb79b1e410b..83b91d656c6f 100644 --- a/website/docs/cdktf/typescript/r/synthetics_group_association.html.markdown +++ b/website/docs/cdktf/typescript/r/synthetics_group_association.html.markdown @@ -39,8 +39,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupName` - (Required) Name of the group that the canary will be associated with. * `canaryArn` - (Required) ARN of the canary. @@ -83,4 +84,4 @@ Using `terraform import`, import CloudWatch Synthetics Group Association using t % terraform import aws_synthetics_group_association.example arn:aws:synthetics:us-west-2:123456789012:canary:tf-acc-test-abcd1234,examplename ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_cluster.html.markdown b/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_cluster.html.markdown new file mode 100644 index 000000000000..378b53ad0403 --- /dev/null +++ b/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_cluster.html.markdown @@ -0,0 +1,364 @@ +--- +subcategory: "Timestream for InfluxDB" +layout: "aws" +page_title: "AWS: aws_timestreaminfluxdb_db_cluster" +description: |- + Terraform resource for managing an Amazon Timestream for InfluxDB read-replica cluster. +--- + + + +# Resource: aws_timestreaminfluxdb_db_cluster + +Terraform resource for managing an Amazon Timestream for InfluxDB read-replica cluster. + +~> **NOTE:** This resource requires a subscription to [Timestream for InfluxDB Read Replicas (Add-On) on the AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-lftzfxtb5xlv4?applicationId=AWS-Marketplace-Console&ref_=beagle&sr=0-2). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { TimestreaminfluxdbDbCluster } from "./.gen/providers/aws/timestreaminfluxdb-db-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new TimestreaminfluxdbDbCluster(this, "example", { + allocatedStorage: 20, + bucket: "example-bucket-name", + dbInstanceType: "db.influx.medium", + failoverMode: "AUTOMATIC", + name: "example-db-cluster", + organization: "organization", + password: "example-password", + port: 8086, + username: "admin", + vpcSecurityGroupIds: [Token.asString(awsSecurityGroupExample.id)], + vpcSubnetIds: [example1.id, example2.id], + }); + } +} + +``` + +### Usage with Prerequisite Resources + +All Timestream for InfluxDB clusters require a VPC, at least two subnets, and a security group. The following example shows how these prerequisite resources can be created and used with `aws_timestreaminfluxdb_db_cluster`. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { SecurityGroup } from "./.gen/providers/aws/security-group"; +import { Subnet } from "./.gen/providers/aws/subnet"; +import { TimestreaminfluxdbDbCluster } from "./.gen/providers/aws/timestreaminfluxdb-db-cluster"; +import { Vpc } from "./.gen/providers/aws/vpc"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new Vpc(this, "example", { + cidrBlock: "10.0.0.0/16", + }); + const awsSecurityGroupExample = new SecurityGroup(this, "example_1", { + name: "example", + vpcId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsSecurityGroupExample.overrideLogicalId("example"); + const example1 = new Subnet(this, "example_1_2", { + cidrBlock: "10.0.1.0/24", + vpcId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + example1.overrideLogicalId("example_1"); + const example2 = new Subnet(this, "example_2", { + cidrBlock: "10.0.2.0/24", + vpcId: example.id, + }); + const awsTimestreaminfluxdbDbClusterExample = + new TimestreaminfluxdbDbCluster(this, "example_4", { + allocatedStorage: 20, + bucket: "example-bucket-name", + dbInstanceType: "db.influx.medium", + name: "example-db-cluster", + organization: "organization", + password: "example-password", + username: "admin", + vpcSecurityGroupIds: [Token.asString(awsSecurityGroupExample.id)], + vpcSubnetIds: [example1.id, example2.id], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsTimestreaminfluxdbDbClusterExample.overrideLogicalId("example"); + } +} + +``` + +### Usage with Public Internet Access Enabled + +The following configuration shows how to define the necessary resources and arguments to allow public internet access on your Timestream for InfluxDB read-replica cluster's primary endpoint (simply referred to as "endpoint") and read endpoint on port `8086`. After applying this configuration, the cluster's InfluxDB UI can be accessed by visiting your cluster's primary endpoint at port `8086`. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, Op, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { InternetGateway } from "./.gen/providers/aws/internet-gateway"; +import { Route } from "./.gen/providers/aws/route"; +import { RouteTableAssociation } from "./.gen/providers/aws/route-table-association"; +import { SecurityGroup } from "./.gen/providers/aws/security-group"; +import { Subnet } from "./.gen/providers/aws/subnet"; +import { TimestreaminfluxdbDbCluster } from "./.gen/providers/aws/timestreaminfluxdb-db-cluster"; +import { Vpc } from "./.gen/providers/aws/vpc"; +import { VpcSecurityGroupIngressRule } from "./.gen/providers/aws/vpc-security-group-ingress-rule"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new Vpc(this, "example", { + cidrBlock: "10.0.0.0/16", + }); + const awsInternetGatewayExample = new InternetGateway(this, "example_1", { + tags: { + Name: "example", + }, + vpcId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsInternetGatewayExample.overrideLogicalId("example"); + new Route(this, "test_route", { + destinationCidrBlock: "0.0.0.0/0", + gatewayId: Token.asString(awsInternetGatewayExample.id), + routeTableId: example.mainRouteTableId, + }); + new RouteTableAssociation(this, "test_route_table_association", { + routeTableId: example.mainRouteTableId, + subnetId: testSubnet.id, + }); + const awsSecurityGroupExample = new SecurityGroup(this, "example_4", { + name: "example", + vpcId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsSecurityGroupExample.overrideLogicalId("example"); + const example1 = new Subnet(this, "example_1_5", { + cidrBlock: "10.0.1.0/24", + vpcId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + example1.overrideLogicalId("example_1"); + const example2 = new Subnet(this, "example_2", { + cidrBlock: "10.0.2.0/24", + vpcId: example.id, + }); + const awsTimestreaminfluxdbDbClusterExample = + new TimestreaminfluxdbDbCluster(this, "example_7", { + allocatedStorage: 20, + bucket: "example-bucket-name", + dbInstanceType: "db.influx.medium", + name: "example-db-cluster", + organization: "organization", + password: "example-password", + publiclyAccessible: true, + username: "admin", + vpcSecurityGroupIds: [Token.asString(awsSecurityGroupExample.id)], + vpcSubnetIds: [example1.id, example2.id], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsTimestreaminfluxdbDbClusterExample.overrideLogicalId("example"); + const awsVpcSecurityGroupIngressRuleExample = + new VpcSecurityGroupIngressRule(this, "example_8", { + ipProtocol: Token.asString(Op.negate(1)), + referencedSecurityGroupId: Token.asString(awsSecurityGroupExample.id), + securityGroupId: Token.asString(awsSecurityGroupExample.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcSecurityGroupIngressRuleExample.overrideLogicalId("example"); + } +} + +``` + +### Usage with S3 Log Delivery Enabled + +You can use an S3 bucket to store logs generated by your Timestream for InfluxDB cluster. The following example shows what resources and arguments are required to configure an S3 bucket for logging, including the IAM policy that needs to be set in order to allow Timestream for InfluxDB to place logs in your S3 bucket. The configuration of the required VPC, security group, and subnets have been left out of the example for brevity. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; +import { TimestreaminfluxdbDbCluster } from "./.gen/providers/aws/timestreaminfluxdb-db-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new S3Bucket(this, "example", { + bucket: "example-s3-bucket", + forceDestroy: true, + }); + const awsTimestreaminfluxdbDbClusterExample = + new TimestreaminfluxdbDbCluster(this, "example_1", { + allocatedStorage: 20, + bucket: "example-bucket-name", + dbInstanceType: "db.influx.medium", + logDeliveryConfiguration: [ + { + s3Configuration: [ + { + bucketName: example.bucket, + enabled: true, + }, + ], + }, + ], + name: "example-db-cluster", + organization: "organization", + password: "example-password", + username: "admin", + vpcSecurityGroupIds: [Token.asString(awsSecurityGroupExample.id)], + vpcSubnetIds: [example1.id, example2.id], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsTimestreaminfluxdbDbClusterExample.overrideLogicalId("example"); + const dataAwsIamPolicyDocumentExample = new DataAwsIamPolicyDocument( + this, + "example_2", + { + statement: [ + { + actions: ["s3:PutObject"], + principals: [ + { + identifiers: ["timestream-influxdb.amazonaws.com"], + type: "Service", + }, + ], + resources: ["${" + example.arn + "}/*"], + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsIamPolicyDocumentExample.overrideLogicalId("example"); + const awsS3BucketPolicyExample = new S3BucketPolicy(this, "example_3", { + bucket: example.id, + policy: Token.asString(dataAwsIamPolicyDocumentExample.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `allocatedStorage` - (Required) Amount of storage in GiB (gibibytes). The minimum value is `20`, the maximum value is `16384`. The argument `dbStorageType` places restrictions on this argument's minimum value. The following is a list of `dbStorageType` values and the corresponding minimum value for `allocatedStorage`: `"InfluxIOIncludedT1": `20`, `"InfluxIOIncludedT2" and `"InfluxIOIncludedT3": `400`. +* `bucket` - (Required) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influxAuthParametersSecretArn` attribute. +* `dbInstanceType` - (Required) Timestream for InfluxDB DB instance type to run InfluxDB on. Valid options are: `"db.influx.medium"`, `"db.influx.large"`, `"db.influx.xlarge"`, `"db.influx.2xlarge"`, `"db.influx.4xlarge"`, `"db.influx.8xlarge"`, `"db.influx.12xlarge"`, and `"db.influx.16xlarge"`. This argument is updatable. +* `name` - (Required) Name that uniquely identifies the DB cluster when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. Cluster names must be unique per customer and per region. The argument must start with a letter, cannot contain consecutive hyphens (`-`) and cannot end with a hyphen. +* `password` - (Required) Password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `username`, and `organization`, this argument will be stored in the secret referred to by the `influxAuthParametersSecretArn` attribute. +* `organization` - (Required) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influxAuthParametersSecretArn` attribute. +* `username` - (Required) Username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `organization`, and `password`, this argument will be stored in the secret referred to by the `influxAuthParametersSecretArn` attribute. +* `vpcSecurityGroupIds` - (Required) List of VPC security group IDs to associate with the cluster. +* `vpcSubnetIds` - (Required) List of VPC subnet IDs to associate with the cluster. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `dbParameterGroupIdentifier` - (Optional) ID of the DB parameter group assigned to your cluster. This argument is updatable. If added to an existing Timestream for InfluxDB cluster or given a new value, will cause an in-place update to the cluster. However, if a cluster already has a value for `dbParameterGroupIdentifier`, removing `dbParameterGroupIdentifier` will cause the cluster to be destroyed and recreated. +* `dbStorageType` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT3"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. +* `deploymentType` - (Default `"MULTI_NODE_READ_REPLICAS"`) Specifies the type of cluster to create. Valid options are: `"MULTI_NODE_READ_REPLICAS"`. +* `failoverMode` - (Default `"AUTOMATIC"`) Specifies the behavior of failure recovery when the primary node of the cluster fails. Valid options are: `"AUTOMATIC"` and `"NO_FAILOVER"`. +* `logDeliveryConfiguration` - (Optional) Configuration for sending InfluxDB engine logs to a specified S3 bucket. This argument is updatable. +* `networkType` - (Optional) Specifies whether the network type of the Timestream for InfluxDB cluster is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. +* `port` - (Default `8086`) The port on which the cluster accepts connections. Valid values: `1024`-`65535`. Cannot be `2375`-`2376`, `7788`-`7799`, `8090`, or `51678`-`51680`. This argument is updatable. +* `publiclyAccessible` - (Default `false`) Configures the DB cluster with a public IP to facilitate access. Other resources, such as a VPC, a subnet, an internet gateway, and a route table with routes, are also required to enabled public access, in addition to this argument. See "[Usage with Public Internet Access Enabled](#usage-with-public-internet-access-enabled)" for an example configuration with all required resources for public internet access. +* `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Nested Fields + +#### `logDeliveryConfiguration` + +* `s3Configuration` - (Required) Configuration for S3 bucket log delivery. + +#### `s3Configuration` + +* `bucketName` - (Required) Name of the S3 bucket to deliver logs to. +* `enabled` - (Required) Indicates whether log delivery to the S3 bucket is enabled. + +**Note**: The following arguments do updates in-place: `dbParameterGroupIdentifier`, `logDeliveryConfiguration`, `port`, `dbInstanceType`, `failoverMode`, and `tags`. Changes to any other argument after a cluster has been deployed will cause destruction and re-creation of the cluster. Additionally, when `dbParameterGroupIdentifier` is added to a cluster or modified, the cluster will be updated in-place but if `dbParameterGroupIdentifier` is removed from a cluster, the cluster will be destroyed and re-created. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Timestream for InfluxDB cluster. +* `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. +* `id` - ID of the Timestream for InfluxDB cluster. +* `influxAuthParametersSecretArn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. +* `readerEndpoint` - The endpoint used to connect to the Timestream for InfluxDB cluster for read-only operations. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Timestream for InfluxDB cluster using its identifier. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { TimestreaminfluxdbDbCluster } from "./.gen/providers/aws/timestreaminfluxdb-db-cluster"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + TimestreaminfluxdbDbCluster.generateConfigForImport( + this, + "example", + "12345abcde" + ); + } +} + +``` + +Using `terraform import`, import Timestream for InfluxDB cluster using its identifier. For example: + +```console +% terraform import aws_timestreaminfluxdb_db_cluster.example 12345abcde +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_instance.html.markdown index 63996f8d4a84..b44468542083 100644 --- a/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/timestreaminfluxdb_db_instance.html.markdown @@ -319,6 +319,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `dbParameterGroupIdentifier` - (Optional) ID of the DB parameter group assigned to your DB instance. This argument is updatable. If added to an existing Timestream for InfluxDB instance or given a new value, will cause an in-place update to the instance. However, if an instance already has a value for `dbParameterGroupIdentifier`, removing `dbParameterGroupIdentifier` will cause the instance to be destroyed and recreated. * `dbStorageType` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT3"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. This argument is updatable. For a single instance, after this argument has been updated once, it can only be updated again after 6 hours have passed. * `deploymentType` - (Default `"SINGLE_AZ"`) Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. Valid options are: `"SINGLE_AZ"`, `"WITH_MULTIAZ_STANDBY"`. This argument is updatable. @@ -349,7 +350,7 @@ This resource exports the following attributes in addition to the arguments abov * `availabilityZone` - Availability Zone in which the DB instance resides. * `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. * `id` - ID of the Timestream for InfluxDB instance. -* `influxAuthParametersSecretArn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. This secret will be read by the `aws_timestreaminfluxdb_db_instance` resource in order to support importing: deleting the secret or secret values can cause errors. +* `influxAuthParametersSecretArn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. * `secondaryAvailabilityZone` - Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -393,4 +394,4 @@ Using `terraform import`, import Timestream for InfluxDB Db Instance using its i % terraform import aws_timestreaminfluxdb_db_instance.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/timestreamquery_scheduled_query.html.markdown b/website/docs/cdktf/typescript/r/timestreamquery_scheduled_query.html.markdown index 857658e6119b..1d9a8b641d83 100644 --- a/website/docs/cdktf/typescript/r/timestreamquery_scheduled_query.html.markdown +++ b/website/docs/cdktf/typescript/r/timestreamquery_scheduled_query.html.markdown @@ -23,87 +23,89 @@ If your infrastructure is already set up—including the source database and tab ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { TimestreamqueryScheduledQuery } from "./.gen/providers/aws/"; +import { TimestreamqueryScheduledQuery } from "./.gen/providers/aws/timestreamquery-scheduled-query"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new TimestreamqueryScheduledQuery(this, "example", { - error_report_configuration: [ + errorReportConfiguration: [ { - s3_configuration: [ + s3Configuration: [ { - bucket_name: awsS3BucketExample.bucket, + bucketName: Token.asString(awsS3BucketExample.bucket), }, ], }, ], - execution_role_arn: awsIamRoleExample.arn, - name: awsTimestreamwriteTableExample.tableName, - notification_configuration: [ + executionRoleArn: Token.asString(awsIamRoleExample.arn), + name: Token.asString(awsTimestreamwriteTableExample.tableName), + notificationConfiguration: [ { - sns_configuration: [ + snsConfiguration: [ { - topic_arn: awsSnsTopicExample.arn, + topicArn: Token.asString(awsSnsTopicExample.arn), }, ], }, ], - query_string: + queryString: "SELECT region, az, hostname, BIN(time, 15s) AS binned_timestamp,\n\tROUND(AVG(cpu_utilization), 2) AS avg_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.9), 2) AS p90_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.95), 2) AS p95_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.99), 2) AS p99_cpu_utilization\nFROM exampledatabase.exampletable\nWHERE measure_name = 'metrics' AND time > ago(2h)\nGROUP BY region, hostname, az, BIN(time, 15s)\nORDER BY binned_timestamp ASC\nLIMIT 5\n\n", - schedule_configuration: [ + scheduleConfiguration: [ { - schedule_expression: "rate(1 hour)", + scheduleExpression: "rate(1 hour)", }, ], - target_configuration: [ + targetConfiguration: [ { - timestream_configuration: [ + timestreamConfiguration: [ { - database_name: results.databaseName, - dimension_mapping: [ + databaseName: results.databaseName, + dimensionMapping: [ { - dimension_value_type: "VARCHAR", + dimensionValueType: "VARCHAR", name: "az", }, { - dimension_value_type: "VARCHAR", + dimensionValueType: "VARCHAR", name: "region", }, { - dimension_value_type: "VARCHAR", + dimensionValueType: "VARCHAR", name: "hostname", }, ], - multi_measure_mappings: [ + multiMeasureMappings: [ { - multi_measure_attribute_mapping: [ + multiMeasureAttributeMapping: [ { - measure_value_type: "DOUBLE", - source_column: "avg_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "avg_cpu_utilization", }, { - measure_value_type: "DOUBLE", - source_column: "p90_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "p90_cpu_utilization", }, { - measure_value_type: "DOUBLE", - source_column: "p95_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "p95_cpu_utilization", }, { - measure_value_type: "DOUBLE", - source_column: "p99_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "p99_cpu_utilization", }, ], - target_multi_measure_name: "multi-metrics", + targetMultiMeasureName: "multi-metrics", }, ], - table_name: awsTimestreamwriteTableResults.tableName, - time_column: "binned_timestamp", + tableName: Token.asString( + awsTimestreamwriteTableResults.tableName + ), + timeColumn: "binned_timestamp", }, ], }, @@ -305,87 +307,89 @@ This is done with Amazon Timestream Write [WriteRecords](https://docs.aws.amazon ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { TimestreamqueryScheduledQuery } from "./.gen/providers/aws/"; +import { TimestreamqueryScheduledQuery } from "./.gen/providers/aws/timestreamquery-scheduled-query"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new TimestreamqueryScheduledQuery(this, "example", { - error_report_configuration: [ + errorReportConfiguration: [ { - s3_configuration: [ + s3Configuration: [ { - bucket_name: awsS3BucketExample.bucket, + bucketName: Token.asString(awsS3BucketExample.bucket), }, ], }, ], - execution_role_arn: awsIamRoleExample.arn, - name: awsTimestreamwriteTableExample.tableName, - notification_configuration: [ + executionRoleArn: Token.asString(awsIamRoleExample.arn), + name: Token.asString(awsTimestreamwriteTableExample.tableName), + notificationConfiguration: [ { - sns_configuration: [ + snsConfiguration: [ { - topic_arn: awsSnsTopicExample.arn, + topicArn: Token.asString(awsSnsTopicExample.arn), }, ], }, ], - query_string: + queryString: "SELECT region, az, hostname, BIN(time, 15s) AS binned_timestamp,\n\tROUND(AVG(cpu_utilization), 2) AS avg_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.9), 2) AS p90_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.95), 2) AS p95_cpu_utilization,\n\tROUND(APPROX_PERCENTILE(cpu_utilization, 0.99), 2) AS p99_cpu_utilization\nFROM exampledatabase.exampletable\nWHERE measure_name = 'metrics' AND time > ago(2h)\nGROUP BY region, hostname, az, BIN(time, 15s)\nORDER BY binned_timestamp ASC\nLIMIT 5\n\n", - schedule_configuration: [ + scheduleConfiguration: [ { - schedule_expression: "rate(1 hour)", + scheduleExpression: "rate(1 hour)", }, ], - target_configuration: [ + targetConfiguration: [ { - timestream_configuration: [ + timestreamConfiguration: [ { - database_name: results.databaseName, - dimension_mapping: [ + databaseName: results.databaseName, + dimensionMapping: [ { - dimension_value_type: "VARCHAR", + dimensionValueType: "VARCHAR", name: "az", }, { - dimension_value_type: "VARCHAR", + dimensionValueType: "VARCHAR", name: "region", }, { - dimension_value_type: "VARCHAR", + dimensionValueType: "VARCHAR", name: "hostname", }, ], - multi_measure_mappings: [ + multiMeasureMappings: [ { - multi_measure_attribute_mapping: [ + multiMeasureAttributeMapping: [ { - measure_value_type: "DOUBLE", - source_column: "avg_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "avg_cpu_utilization", }, { - measure_value_type: "DOUBLE", - source_column: "p90_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "p90_cpu_utilization", }, { - measure_value_type: "DOUBLE", - source_column: "p95_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "p95_cpu_utilization", }, { - measure_value_type: "DOUBLE", - source_column: "p99_cpu_utilization", + measureValueType: "DOUBLE", + sourceColumn: "p99_cpu_utilization", }, ], - target_multi_measure_name: "multi-metrics", + targetMultiMeasureName: "multi-metrics", }, ], - table_name: awsTimestreamwriteTableResults.tableName, - time_column: "binned_timestamp", + tableName: Token.asString( + awsTimestreamwriteTableResults.tableName + ), + timeColumn: "binned_timestamp", }, ], }, @@ -400,20 +404,21 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `error_report_configuration` - (Required) Configuration block for error reporting configuration. [See below.](#error_report_configuration) +* `errorReportConfiguration` - (Required) Configuration block for error reporting configuration. [See below.](#error_report_configuration) * `executionRoleArn` - (Required) ARN for the IAM role that Timestream will assume when running the scheduled query. * `name` - (Required) Name of the scheduled query. * `notificationConfiguration` - (Required) Configuration block for notification configuration for a scheduled query. A notification is sent by Timestream when a scheduled query is created, its state is updated, or when it is deleted. [See below.](#notification_configuration) -* `queryString` - (Required) Query string to run. Parameter names can be specified in the query string using the `@` character followed by an identifier. The named parameter `@scheduled_runtime` is reserved and can be used in the query to get the time at which the query is scheduled to run. The timestamp calculated according to the `schedule_configuration` parameter, will be the value of `@scheduled_runtime` paramater for each query run. For example, consider an instance of a scheduled query executing on 2021-12-01 00:00:00. For this instance, the `@scheduled_runtime` parameter is initialized to the timestamp 2021-12-01 00:00:00 when invoking the query. -* `schedule_configuration` - (Required) Configuration block for schedule configuration for the query. [See below.](#schedule_configuration) -* `target_configuration` - (Required) Configuration block for writing the result of a query. [See below.](#target_configuration) +* `queryString` - (Required) Query string to run. Parameter names can be specified in the query string using the `@` character followed by an identifier. The named parameter `@scheduled_runtime` is reserved and can be used in the query to get the time at which the query is scheduled to run. The timestamp calculated according to the `scheduleConfiguration` parameter, will be the value of `@scheduled_runtime` paramater for each query run. For example, consider an instance of a scheduled query executing on 2021-12-01 00:00:00. For this instance, the `@scheduled_runtime` parameter is initialized to the timestamp 2021-12-01 00:00:00 when invoking the query. +* `scheduleConfiguration` - (Required) Configuration block for schedule configuration for the query. [See below.](#schedule_configuration) +* `targetConfiguration` - (Required) Configuration block for writing the result of a query. [See below.](#target_configuration) The following arguments are optional: -* `kmsKeyId` - (Optional) Amazon KMS key used to encrypt the scheduled query resource, at-rest. If not specified, the scheduled query resource will be encrypted with a Timestream owned Amazon KMS key. To specify a KMS key, use the key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix the name with "alias/". If `error_report_configuration` uses `SSE_KMS` as the encryption type, the same `kmsKeyId` is used to encrypt the error report at rest. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `kmsKeyId` - (Optional) Amazon KMS key used to encrypt the scheduled query resource, at-rest. If not specified, the scheduled query resource will be encrypted with a Timestream owned Amazon KMS key. To specify a KMS key, use the key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix the name with "alias/". If `errorReportConfiguration` uses `SSE_KMS` as the encryption type, the same `kmsKeyId` is used to encrypt the error report at rest. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### `error_report_configuration` +### `errorReportConfiguration` * `s3Configuration` - (Required) Configuration block for the S3 configuration for the error reports. [See below.](#s3_configuration) @@ -425,53 +430,53 @@ The following arguments are optional: ### `notificationConfiguration` -* `sns_configuration` - (Required) Configuration block for details about the Amazon Simple Notification Service (SNS) configuration. [See below.](#sns_configuration) +* `snsConfiguration` - (Required) Configuration block for details about the Amazon Simple Notification Service (SNS) configuration. [See below.](#sns_configuration) -#### `sns_configuration` +#### `snsConfiguration` * `topicArn` - (Required) SNS topic ARN that the scheduled query status notifications will be sent to. -### `schedule_configuration` +### `scheduleConfiguration` * `scheduleExpression` - (Required) When to trigger the scheduled query run. This can be a cron expression or a rate expression. -### `target_configuration` +### `targetConfiguration` -* `timestream_configuration` - (Required) Configuration block for information needed to write data into the Timestream database and table. [See below.](#timestream_configuration) +* `timestreamConfiguration` - (Required) Configuration block for information needed to write data into the Timestream database and table. [See below.](#timestream_configuration) -#### `timestream_configuration` +#### `timestreamConfiguration` * `databaseName` - (Required) Name of Timestream database to which the query result will be written. -* `dimension_mapping` - (Required) Configuration block for mapping of column(s) from the query result to the dimension in the destination table. [See below.](#dimension_mapping) +* `dimensionMapping` - (Required) Configuration block for mapping of column(s) from the query result to the dimension in the destination table. [See below.](#dimension_mapping) * `tableName` - (Required) Name of Timestream table that the query result will be written to. The table should be within the same database that is provided in Timestream configuration. -* `time_column` - (Required) Column from query result that should be used as the time column in destination table. Column type for this should be TIMESTAMP. -* `measure_name_column` - (Optional) Name of the measure column. -* `mixed_measure_mapping` - (Optional) Configuration block for how to map measures to multi-measure records. [See below.](#mixed_measure_mapping) -* `multi_measure_mappings` - (Optional) Configuration block for multi-measure mappings. Only one of `mixed_measure_mappings` or `multi_measure_mappings` can be provided. `multi_measure_mappings` can be used to ingest data as multi measures in the derived table. [See below.](#multi_measure_mappings) +* `timeColumn` - (Required) Column from query result that should be used as the time column in destination table. Column type for this should be TIMESTAMP. +* `measureNameColumn` - (Optional) Name of the measure column. +* `mixedMeasureMapping` - (Optional) Configuration block for how to map measures to multi-measure records. [See below.](#mixed_measure_mapping) +* `multiMeasureMappings` - (Optional) Configuration block for multi-measure mappings. Only one of `mixed_measure_mappings` or `multiMeasureMappings` can be provided. `multiMeasureMappings` can be used to ingest data as multi measures in the derived table. [See below.](#multi_measure_mappings) -##### `dimension_mapping` +##### `dimensionMapping` -* `dimension_value_type` - (Required) Type for the dimension. Valid value: `VARCHAR`. +* `dimensionValueType` - (Required) Type for the dimension. Valid value: `VARCHAR`. * `name` - (Required) Column name from query result. -##### `mixed_measure_mapping` +##### `mixedMeasureMapping` -* `measure_name` - (Optional) Refers to the value of measure_name in a result row. This field is required if `measure_name_column` is provided. -* `multi_measure_attribute_mapping` - (Optional) Configuration block for attribute mappings for `MULTI` value measures. Required when `measure_value_type` is `MULTI`. [See below.](#multi_measure_attribute_mapping) -* `measure_value_type` - (Required) Type of the value that is to be read from `source_column`. Valid values are `BIGINT`, `BOOLEAN`, `DOUBLE`, `VARCHAR`, `MULTI`. -* `source_column` - (Optional) Source column from which measure-value is to be read for result materialization. -* `target_measure_name` - (Optional) Target measure name to be used. If not provided, the target measure name by default is `measure_name`, if provided, or `source_column` otherwise. +* `measureName` - (Optional) Refers to the value of measure_name in a result row. This field is required if `measureNameColumn` is provided. +* `multiMeasureAttributeMapping` - (Optional) Configuration block for attribute mappings for `MULTI` value measures. Required when `measureValueType` is `MULTI`. [See below.](#multi_measure_attribute_mapping) +* `measureValueType` - (Required) Type of the value that is to be read from `sourceColumn`. Valid values are `BIGINT`, `BOOLEAN`, `DOUBLE`, `VARCHAR`, `MULTI`. +* `sourceColumn` - (Optional) Source column from which measure-value is to be read for result materialization. +* `targetMeasureName` - (Optional) Target measure name to be used. If not provided, the target measure name by default is `measureName`, if provided, or `sourceColumn` otherwise. -##### `multi_measure_attribute_mapping` +##### `multiMeasureAttributeMapping` -* `measure_value_type` - (Required) Type of the attribute to be read from the source column. Valid values are `BIGINT`, `BOOLEAN`, `DOUBLE`, `VARCHAR`, `TIMESTAMP`. -* `source_column` - (Required) Source column from where the attribute value is to be read. -* `target_multi_measure_attribute_name` - (Optional) Custom name to be used for attribute name in derived table. If not provided, `source_column` is used. +* `measureValueType` - (Required) Type of the attribute to be read from the source column. Valid values are `BIGINT`, `BOOLEAN`, `DOUBLE`, `VARCHAR`, `TIMESTAMP`. +* `sourceColumn` - (Required) Source column from where the attribute value is to be read. +* `targetMultiMeasureAttributeName` - (Optional) Custom name to be used for attribute name in derived table. If not provided, `sourceColumn` is used. -##### `multi_measure_mappings` +##### `multiMeasureMappings` -* `multi_measure_attribute_mapping` - (Required) Attribute mappings to be used for mapping query results to ingest data for multi-measure attributes. [See above.](#multi_measure_attribute_mapping) -* `target_multi_measure_name` - (Optional) Name of the target multi-measure name in the derived table. This input is required when `measure_name_column` is not provided. If `measure_name_column` is provided, then the value from that column will be used as the multi-measure name. +* `multiMeasureAttributeMapping` - (Required) Attribute mappings to be used for mapping query results to ingest data for multi-measure attributes. [See above.](#multi_measure_attribute_mapping) +* `targetMultiMeasureName` - (Optional) Name of the target multi-measure name in the derived table. This input is required when `measureNameColumn` is not provided. If `measureNameColumn` is provided, then the value from that column will be used as the multi-measure name. ## Attribute Reference @@ -479,67 +484,67 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the Scheduled Query. * `creationTime` - Creation time for the scheduled query. -* `next_invocation_time` - Next time the scheduled query is scheduled to run. -* `previous_invocation_time` - Last time the scheduled query was run. +* `nextInvocationTime` - Next time the scheduled query is scheduled to run. +* `previousInvocationTime` - Last time the scheduled query was run. * `state` - State of the scheduled query, either `ENABLED` or `DISABLED`. -* `last_run_summary` - Runtime summary for the last scheduled query run. - * `error_report_location` - Contains the location of the error report for a single scheduled query call. - * `s3_report_location` - S3 report location for the scheduled query run. +* `lastRunSummary` - Runtime summary for the last scheduled query run. + * `errorReportLocation` - Contains the location of the error report for a single scheduled query call. + * `s3ReportLocation` - S3 report location for the scheduled query run. * `bucketName` - S3 bucket name. * `objectKey` - S3 key. - * `execution_stats` - Statistics for a single scheduled query run. - * `bytes_metered` - Bytes metered for a single scheduled query run. - * `cumulative_bytes_scanned` - Bytes scanned for a single scheduled query run. - * `data_writes` - Data writes metered for records ingested in a single scheduled query run. - * `execution_time_in_millis` - Total time, measured in milliseconds, that was needed for the scheduled query run to complete. - * `query_result_rows` - Number of rows present in the output from running a query before ingestion to destination data source. - * `records_ingested` - Number of records ingested for a single scheduled query run. + * `executionStats` - Statistics for a single scheduled query run. + * `bytesMetered` - Bytes metered for a single scheduled query run. + * `cumulativeBytesScanned` - Bytes scanned for a single scheduled query run. + * `dataWrites` - Data writes metered for records ingested in a single scheduled query run. + * `executionTimeInMillis` - Total time, measured in milliseconds, that was needed for the scheduled query run to complete. + * `queryResultRows` - Number of rows present in the output from running a query before ingestion to destination data source. + * `recordsIngested` - Number of records ingested for a single scheduled query run. * `failureReason` - Error message for the scheduled query in case of failure. You might have to look at the error report to get more detailed error reasons. - * `invocation_time` - InvocationTime for this run. This is the time at which the query is scheduled to run. Parameter `@scheduled_runtime` can be used in the query to get the value. - * `query_insights_response` - Provides various insights and metrics related to the run summary of the scheduled query. - * `output_bytes` - Size of query result set in bytes. You can use this data to validate if the result set has changed as part of the query tuning exercise. - * `output_rows` - Total number of rows returned as part of the query result set. You can use this data to validate if the number of rows in the result set have changed as part of the query tuning exercise. - * `query_spatial_coverage` - Insights into the spatial coverage of the query, including the table with sub-optimal (max) spatial pruning. This information can help you identify areas for improvement in your partitioning strategy to enhance spatial pruning. + * `invocationTime` - InvocationTime for this run. This is the time at which the query is scheduled to run. Parameter `@scheduled_runtime` can be used in the query to get the value. + * `queryInsightsResponse` - Provides various insights and metrics related to the run summary of the scheduled query. + * `outputBytes` - Size of query result set in bytes. You can use this data to validate if the result set has changed as part of the query tuning exercise. + * `outputRows` - Total number of rows returned as part of the query result set. You can use this data to validate if the number of rows in the result set have changed as part of the query tuning exercise. + * `querySpatialCoverage` - Insights into the spatial coverage of the query, including the table with sub-optimal (max) spatial pruning. This information can help you identify areas for improvement in your partitioning strategy to enhance spatial pruning. * `max` - Insights into the spatial coverage of the executed query and the table with the most inefficient spatial pruning. * `partitionKey` - Partition key used for partitioning, which can be a default measure_name or a customer defined partition key. * `tableArn` - ARN of the table with the most sub-optimal spatial pruning. * `value` - Maximum ratio of spatial coverage. - * `query_table_count` - Number of tables in the query. - * `query_temporal_range` - Insights into the temporal range of the query, including the table with the largest (max) time range. Following are some of the potential options for optimizing time-based pruning: add missing time-predicates, remove functions around the time predicates, add time predicates to all the sub-queries. + * `queryTableCount` - Number of tables in the query. + * `queryTemporalRange` - Insights into the temporal range of the query, including the table with the largest (max) time range. Following are some of the potential options for optimizing time-based pruning: add missing time-predicates, remove functions around the time predicates, add time predicates to all the sub-queries. * `max` - Insights into the temporal range of the query, including the table with the largest (max) time range. * `tableArn` - ARN of the table table which is queried with the largest time range. * `value` - Maximum duration in nanoseconds between the start and end of the query. - * `run_status` - Status of a scheduled query run. Valid values: `AUTO_TRIGGER_SUCCESS`, `AUTO_TRIGGER_FAILURE`, `MANUAL_TRIGGER_SUCCESS`, `MANUAL_TRIGGER_FAILURE`. - * `trigger_time` - Actual time when the query was run. -* `recently_failed_runs` - Runtime summary for the last five failed scheduled query runs. - * `error_report_location` - S3 location for error report. - * `s3_report_location` - S3 location where error reports are written. + * `runStatus` - Status of a scheduled query run. Valid values: `AUTO_TRIGGER_SUCCESS`, `AUTO_TRIGGER_FAILURE`, `MANUAL_TRIGGER_SUCCESS`, `MANUAL_TRIGGER_FAILURE`. + * `triggerTime` - Actual time when the query was run. +* `recentlyFailedRuns` - Runtime summary for the last five failed scheduled query runs. + * `errorReportLocation` - S3 location for error report. + * `s3ReportLocation` - S3 location where error reports are written. * `bucketName` - S3 bucket name. * `objectKey` - S3 key. - * `execution_stats` - Statistics for a single scheduled query run. - * `bytes_metered` - Bytes metered for a single scheduled query run. - * `cumulative_bytes_scanned` - Bytes scanned for a single scheduled query run. - * `data_writes` - Data writes metered for records ingested in a single scheduled query run. - * `execution_time_in_millis` - Total time, measured in milliseconds, that was needed for the scheduled query run to complete. - * `query_result_rows` - Number of rows present in the output from running a query before ingestion to destination data source. - * `records_ingested` - Number of records ingested for a single scheduled query run. + * `executionStats` - Statistics for a single scheduled query run. + * `bytesMetered` - Bytes metered for a single scheduled query run. + * `cumulativeBytesScanned` - Bytes scanned for a single scheduled query run. + * `dataWrites` - Data writes metered for records ingested in a single scheduled query run. + * `executionTimeInMillis` - Total time, measured in milliseconds, that was needed for the scheduled query run to complete. + * `queryResultRows` - Number of rows present in the output from running a query before ingestion to destination data source. + * `recordsIngested` - Number of records ingested for a single scheduled query run. * `failureReason` - Error message for the scheduled query in case of failure. You might have to look at the error report to get more detailed error reasons. - * `invocation_time` - InvocationTime for this run. This is the time at which the query is scheduled to run. Parameter `@scheduled_runtime` can be used in the query to get the value. - * `query_insights_response` - Various insights and metrics related to the run summary of the scheduled query. - * `output_bytes` - Size of query result set in bytes. You can use this data to validate if the result set has changed as part of the query tuning exercise. - * `output_rows` - Total number of rows returned as part of the query result set. You can use this data to validate if the number of rows in the result set have changed as part of the query tuning exercise. - * `query_spatial_coverage` - Insights into the spatial coverage of the query, including the table with sub-optimal (max) spatial pruning. This information can help you identify areas for improvement in your partitioning strategy to enhance spatial pruning. + * `invocationTime` - InvocationTime for this run. This is the time at which the query is scheduled to run. Parameter `@scheduled_runtime` can be used in the query to get the value. + * `queryInsightsResponse` - Various insights and metrics related to the run summary of the scheduled query. + * `outputBytes` - Size of query result set in bytes. You can use this data to validate if the result set has changed as part of the query tuning exercise. + * `outputRows` - Total number of rows returned as part of the query result set. You can use this data to validate if the number of rows in the result set have changed as part of the query tuning exercise. + * `querySpatialCoverage` - Insights into the spatial coverage of the query, including the table with sub-optimal (max) spatial pruning. This information can help you identify areas for improvement in your partitioning strategy to enhance spatial pruning. * `max` - Insights into the spatial coverage of the executed query and the table with the most inefficient spatial pruning. * `partitionKey` - Partition key used for partitioning, which can be a default measure_name or a customer defined partition key. * `tableArn` - ARN of the table with the most sub-optimal spatial pruning. * `value` - Maximum ratio of spatial coverage. - * `query_table_count` - Number of tables in the query. - * `query_temporal_range` - Insights into the temporal range of the query, including the table with the largest (max) time range. Following are some of the potential options for optimizing time-based pruning: add missing time-predicates, remove functions around the time predicates, add time predicates to all the sub-queries. + * `queryTableCount` - Number of tables in the query. + * `queryTemporalRange` - Insights into the temporal range of the query, including the table with the largest (max) time range. Following are some of the potential options for optimizing time-based pruning: add missing time-predicates, remove functions around the time predicates, add time predicates to all the sub-queries. * `max` - Insights into the most sub-optimal performing table on the temporal axis: * `tableArn` - ARN of the table which is queried with the largest time range. * `value` - Maximum duration in nanoseconds between the start and end of the query. - * `run_status` - Status of a scheduled query run. Valid values: `AUTO_TRIGGER_SUCCESS`, `AUTO_TRIGGER_FAILURE`, `MANUAL_TRIGGER_SUCCESS`, `MANUAL_TRIGGER_FAILURE`. - * `trigger_time` - Actual time when the query was run. + * `runStatus` - Status of a scheduled query run. Valid values: `AUTO_TRIGGER_SUCCESS`, `AUTO_TRIGGER_FAILURE`, `MANUAL_TRIGGER_SUCCESS`, `MANUAL_TRIGGER_FAILURE`. + * `triggerTime` - Actual time when the query was run. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -562,7 +567,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { TimestreamqueryScheduledQuery } from "./.gen/providers/aws/"; +import { TimestreamqueryScheduledQuery } from "./.gen/providers/aws/timestreamquery-scheduled-query"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -582,4 +587,4 @@ Using `terraform import`, import Timestream Query Scheduled Query using the `arn % terraform import aws_timestreamquery_scheduled_query.example arn:aws:timestream:us-west-2:012345678901:scheduled-query/tf-acc-test-7774188528604787105-e13659544fe66c8d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/timestreamwrite_database.html.markdown b/website/docs/cdktf/typescript/r/timestreamwrite_database.html.markdown index 1a292113092a..d7b0e5cb2b15 100644 --- a/website/docs/cdktf/typescript/r/timestreamwrite_database.html.markdown +++ b/website/docs/cdktf/typescript/r/timestreamwrite_database.html.markdown @@ -66,7 +66,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `databaseName` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 64. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `databaseName` - (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 64. * `kmsKeyId` - (Optional) The ARN (not Alias ARN) of the KMS key to be used to encrypt the data stored in the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to [AWS managed KMS keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) for more info. * `tags` - (Optional) Map of tags to assign to this resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -108,4 +109,4 @@ Using `terraform import`, import Timestream databases using the `databaseName`. % terraform import aws_timestreamwrite_database.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/timestreamwrite_table.html.markdown b/website/docs/cdktf/typescript/r/timestreamwrite_table.html.markdown index 90dfcf3957d9..eeb05d7bd120 100644 --- a/website/docs/cdktf/typescript/r/timestreamwrite_table.html.markdown +++ b/website/docs/cdktf/typescript/r/timestreamwrite_table.html.markdown @@ -107,7 +107,8 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `databaseName` – (Required) The name of the Timestream database. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `databaseName` - (Required) The name of the Timestream database. * `magneticStoreWriteProperties` - (Optional) Contains properties to set on the table when enabling magnetic store writes. See [Magnetic Store Write Properties](#magnetic-store-write-properties) below for more details. * `retentionProperties` - (Optional) The retention duration for the memory store and magnetic store. See [Retention Properties](#retention-properties) below for more details. If not provided, `magneticStoreRetentionPeriodInDays` default to 73000 and `memoryStoreRetentionPeriodInHours` defaults to 6. * `schema` - (Optional) The schema of the table. See [Schema](#schema) below for more details. @@ -197,4 +198,4 @@ Using `terraform import`, import Timestream tables using the `tableName` and `da % terraform import aws_timestreamwrite_table.example ExampleTable:ExampleDatabase ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transcribe_language_model.html.markdown b/website/docs/cdktf/typescript/r/transcribe_language_model.html.markdown index 7310f16d27a3..598e525df72c 100644 --- a/website/docs/cdktf/typescript/r/transcribe_language_model.html.markdown +++ b/website/docs/cdktf/typescript/r/transcribe_language_model.html.markdown @@ -110,8 +110,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `baseModelName` - (Required) Name of reference base model. * `inputDataConfig` - (Required) The input data config for the LanguageModel. See [Input Data Config](#input-data-config) for more details. * `languageCode` - (Required) The language code you selected for your language model. Refer to the [supported languages](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) page for accepted codes. @@ -172,4 +173,4 @@ Using `terraform import`, import Transcribe LanguageModel using the `modelName`. % terraform import aws_transcribe_language_model.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transcribe_medical_vocabulary.html.markdown b/website/docs/cdktf/typescript/r/transcribe_medical_vocabulary.html.markdown index 1170810f5004..de4344aed11e 100644 --- a/website/docs/cdktf/typescript/r/transcribe_medical_vocabulary.html.markdown +++ b/website/docs/cdktf/typescript/r/transcribe_medical_vocabulary.html.markdown @@ -67,6 +67,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the MedicalVocabulary. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -117,4 +118,4 @@ Using `terraform import`, import Transcribe MedicalVocabulary using the `vocabul % terraform import aws_transcribe_medical_vocabulary.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transcribe_vocabulary.html.markdown b/website/docs/cdktf/typescript/r/transcribe_vocabulary.html.markdown index 1f4269e36a98..03dd0e6a2f1e 100644 --- a/website/docs/cdktf/typescript/r/transcribe_vocabulary.html.markdown +++ b/website/docs/cdktf/typescript/r/transcribe_vocabulary.html.markdown @@ -65,11 +65,11 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: * `languageCode` - (Required) The language code you selected for your vocabulary. -* `vocabularyFileUri` - (Required) The Amazon S3 location (URI) of the text file that contains your custom vocabulary. * `vocabularyName` - (Required) The name of the Vocabulary. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `phrases` - (Optional) - A list of terms to include in the vocabulary. Conflicts with `vocabularyFileUri` * `vocabularyFileUri` - (Optional) The Amazon S3 location (URI) of the text file that contains your custom vocabulary. Conflicts wth `phrases`. * `tags` - (Optional) A map of tags to assign to the Vocabulary. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -122,4 +122,4 @@ Using `terraform import`, import Transcribe Vocabulary using the `vocabularyName % terraform import aws_transcribe_vocabulary.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transcribe_vocabulary_filter.html.markdown b/website/docs/cdktf/typescript/r/transcribe_vocabulary_filter.html.markdown index efa284575ca6..c2759970e1fb 100644 --- a/website/docs/cdktf/typescript/r/transcribe_vocabulary_filter.html.markdown +++ b/website/docs/cdktf/typescript/r/transcribe_vocabulary_filter.html.markdown @@ -51,6 +51,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vocabularyFilterFileUri` - (Optional) The Amazon S3 location (URI) of the text file that contains your custom VocabularyFilter. Conflicts with `words` argument. * `tags` - (Optional) A map of tags to assign to the VocabularyFilter. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `words` - (Optional) - A list of terms to include in the vocabulary. Conflicts with `vocabularyFilterFileUri` argument. @@ -95,4 +96,4 @@ Using `terraform import`, import Transcribe VocabularyFilter using the `vocabula % terraform import aws_transcribe_vocabulary_filter.example example-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_access.html.markdown b/website/docs/cdktf/typescript/r/transfer_access.html.markdown index 667ed93ab55a..71220337c10c 100644 --- a/website/docs/cdktf/typescript/r/transfer_access.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_access.html.markdown @@ -74,6 +74,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `externalId` - (Required) The SID of a group in the directory connected to the Transfer Server (e.g., `S-1-1-12-1234567890-123456789-1234567890-1234`) * `serverId` - (Required) The Server ID of the Transfer Server (e.g., `s-12345678`) * `homeDirectory` - (Optional) The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a `/`. The first item in the path is the name of the home bucket (accessible as `${Transfer:HomeBucket}` in the policy) and the rest is the home directory (accessible as `${Transfer:HomeDirectory}` in the policy). For example, `/example-bucket-1234/username` would set the home bucket to `example-bucket-1234` and the home directory to `username`. @@ -132,4 +133,4 @@ Using `terraform import`, import Transfer Accesses using the `serverId` and `ext % terraform import aws_transfer_access.example s-12345678/S-1-1-12-1234567890-123456789-1234567890-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_agreement.html.markdown b/website/docs/cdktf/typescript/r/transfer_agreement.html.markdown index dce8add5f27c..2e6561fb234f 100644 --- a/website/docs/cdktf/typescript/r/transfer_agreement.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_agreement.html.markdown @@ -45,6 +45,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessRole` - (Required) The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. * `baseDirectory` - (Required) The landing directory for the files transferred by using the AS2 protocol. * `description` - (Optional) The Optional description of the transdfer. @@ -93,4 +94,4 @@ Using `terraform import`, import Transfer AS2 Agreement using the `server_id/agr % terraform import aws_transfer_agreement.example s-4221a88afd5f4362a/a-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_certificate.html.markdown b/website/docs/cdktf/typescript/r/transfer_certificate.html.markdown index 99705d00a1d5..cedf7df8b849 100644 --- a/website/docs/cdktf/typescript/r/transfer_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_certificate.html.markdown @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate` - (Required) The valid certificate file required for the transfer. * `certificateChain` - (Optional) The optional list of certificate that make up the chain for the certificate that is being imported. * `description` - (Optional) A short description that helps identify the certificate. @@ -98,4 +99,4 @@ Using `terraform import`, import Transfer AS2 Certificate using the `certificate % terraform import aws_transfer_certificate.example c-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_connector.html.markdown b/website/docs/cdktf/typescript/r/transfer_connector.html.markdown index 93145bfb70c2..7c295cda25d3 100644 --- a/website/docs/cdktf/typescript/r/transfer_connector.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_connector.html.markdown @@ -78,6 +78,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessRole` - (Required) The IAM Role which provides read and write access to the parent directory of the file location mentioned in the StartFileTransfer request. * `as2Config` - (Optional) Either SFTP or AS2 is configured.The parameters to configure for the connector object. Fields documented below. * `loggingRole` - (Optional) The IAM Role which is required for allowing the connector to turn on CloudWatch logging for Amazon S3 events. @@ -141,4 +142,4 @@ Using `terraform import`, import Transfer AS2 Connector using the `connectorId`. % terraform import aws_transfer_connector.example c-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_profile.html.markdown b/website/docs/cdktf/typescript/r/transfer_profile.html.markdown index 7ced30289f0a..bdfabbbd8003 100644 --- a/website/docs/cdktf/typescript/r/transfer_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_profile.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `as2Id` - (Required) The As2Id is the AS2 name as defined in the RFC 4130. For inbound ttransfers this is the AS2 From Header for the AS2 messages sent from the partner. For Outbound messages this is the AS2 To Header for the AS2 messages sent to the partner. his ID cannot include spaces. * `certificateIds` - (Optional) The list of certificate Ids from the imported certificate operation. * `profileType` - (Required) The profile type should be LOCAL or PARTNER. @@ -92,4 +93,4 @@ Using `terraform import`, import Transfer AS2 Profile using the `profileId`. For % terraform import aws_transfer_profile.example p-4221a88afd5f4362a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_server.html.markdown b/website/docs/cdktf/typescript/r/transfer_server.html.markdown index ac35ebe01708..27f250dbf03e 100644 --- a/website/docs/cdktf/typescript/r/transfer_server.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_server.html.markdown @@ -234,6 +234,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `certificate` - (Optional) The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. This is required when `protocols` is set to `FTPS` * `domain` - (Optional) The domain of the storage system that is used for file transfers. Valid values are: `S3` and `EFS`. The default value is `S3`. * `protocols` - (Optional) Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. This defaults to `SFTP` . The available protocols are: @@ -368,4 +369,4 @@ Using `terraform import`, import Transfer Servers using the server `id`. For exa Certain resource arguments, such as `hostKey`, cannot be read via the API and imported into Terraform. Terraform will display a difference for these arguments the first run after import if declared in the Terraform configuration for an imported resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_ssh_key.html.markdown b/website/docs/cdktf/typescript/r/transfer_ssh_key.html.markdown index af2203c3f5f0..77b160a93f0c 100644 --- a/website/docs/cdktf/typescript/r/transfer_ssh_key.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_ssh_key.html.markdown @@ -117,6 +117,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serverId` - (Requirement) The Server ID of the Transfer Server (e.g., `s-12345678`) * `userName` - (Requirement) The name of the user account that is assigned to one or more servers. * `body` - (Requirement) The public key portion of an SSH key pair. @@ -157,4 +158,4 @@ Using `terraform import`, import Transfer SSH Public Key using the `serverId` an % terraform import aws_transfer_ssh_key.bar s-12345678/test-username/key-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_tag.html.markdown b/website/docs/cdktf/typescript/r/transfer_tag.html.markdown index 79212658dc42..b04d76fe1a3f 100644 --- a/website/docs/cdktf/typescript/r/transfer_tag.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_tag.html.markdown @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) Amazon Resource Name (ARN) of the Transfer Family resource to tag. * `key` - (Required) Tag name. * `value` - (Required) Tag value. @@ -95,4 +96,4 @@ Using `terraform import`, import `aws_transfer_tag` using the Transfer Family re % terraform import aws_transfer_tag.example arn:aws:transfer:us-east-1:123456789012:server/s-1234567890abcdef0,Name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_user.html.markdown b/website/docs/cdktf/typescript/r/transfer_user.html.markdown index fd874316ede0..7aae2dc970ff 100644 --- a/website/docs/cdktf/typescript/r/transfer_user.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_user.html.markdown @@ -104,6 +104,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serverId` - (Required) The Server ID of the Transfer Server (e.g., `s-12345678`) * `userName` - (Required) The name used for log in to your SFTP server. * `homeDirectory` - (Optional) The landing directory (folder) for a user when they log in to the server using their SFTP client. It should begin with a `/`. The first item in the path is the name of the home bucket (accessible as `${Transfer:HomeBucket}` in the policy) and the rest is the home directory (accessible as `${Transfer:HomeDirectory}` in the policy). For example, `/example-bucket-1234/username` would set the home bucket to `example-bucket-1234` and the home directory to `username`. @@ -184,4 +185,4 @@ Using `terraform import`, import Transfer Users using the `serverId` and `userNa % terraform import aws_transfer_user.bar s-12345678/test-username ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/transfer_workflow.html.markdown b/website/docs/cdktf/typescript/r/transfer_workflow.html.markdown index cfac42e8d9b8..790c2a3e62ac 100644 --- a/website/docs/cdktf/typescript/r/transfer_workflow.html.markdown +++ b/website/docs/cdktf/typescript/r/transfer_workflow.html.markdown @@ -93,6 +93,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A textual description for the workflow. * `onExceptionSteps` - (Optional) Specifies the steps (actions) to take if errors are encountered during execution of the workflow. See Workflow Steps below. * `steps` - (Required) Specifies the details for the steps that are in the specified workflow. See Workflow Steps below. @@ -196,4 +197,4 @@ Using `terraform import`, import Transfer Workflows using the `worflow_id`. For % terraform import aws_transfer_workflow.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedaccess_endpoint.html.markdown b/website/docs/cdktf/typescript/r/verifiedaccess_endpoint.html.markdown index ab8834fe80ba..0348d8b8d9d9 100644 --- a/website/docs/cdktf/typescript/r/verifiedaccess_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedaccess_endpoint.html.markdown @@ -135,6 +135,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `applicationDomain` - (Optional) The DNS name for users to reach your application. This parameter is required if the endpoint type is `load-balancer` or `network-interface`. * `description` - (Optional) A description for the Verified Access endpoint. * `domainCertificateArn` - (Optional) - The ARN of the public TLS/SSL certificate in AWS Certificate Manager to associate with the endpoint. The CN in the certificate must match the DNS name your end users will use to reach your application. This parameter is required if the endpoint type is `load-balancer` or `network-interface`. @@ -194,4 +195,4 @@ Using `terraform import`, import Verified Access Instances using the `id`. For % terraform import aws_verifiedaccess_endpoint.example vae-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedaccess_group.html.markdown b/website/docs/cdktf/typescript/r/verifiedaccess_group.html.markdown index 3fe7947a3300..621a6903771e 100644 --- a/website/docs/cdktf/typescript/r/verifiedaccess_group.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedaccess_group.html.markdown @@ -77,6 +77,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the verified access group. * `policyDocument` - (Optional) The policy document that is associated with this resource. * `sseConfiguration` - (Optional) Configuration block to use KMS keys for server-side encryption. @@ -103,4 +104,4 @@ This resource exports the following attributes in addition to the arguments abov * `update` - (Default `180m`) * `delete` - (Default `90m`) - + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedaccess_instance.html.markdown b/website/docs/cdktf/typescript/r/verifiedaccess_instance.html.markdown index e2c6643e3003..5b57fc85bf31 100644 --- a/website/docs/cdktf/typescript/r/verifiedaccess_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedaccess_instance.html.markdown @@ -87,6 +87,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description for the AWS Verified Access Instance. * `fipsEnabled` - (Optional, Forces new resource) Enable or disable support for Federal Information Processing Standards (FIPS) on the AWS Verified Access Instance. * `cidrEndpointsCustomSubdomain` - (Optional) The custom subdomain for the CIDR endpoints. @@ -143,4 +144,4 @@ Using `terraform import`, import Verified Access Instances using the `id`. For % terraform import aws_verifiedaccess_instance.example vai-1234567890abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedaccess_instance_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/verifiedaccess_instance_logging_configuration.html.markdown index eba821c06e9d..53eb19e8139b 100644 --- a/website/docs/cdktf/typescript/r/verifiedaccess_instance_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedaccess_instance_logging_configuration.html.markdown @@ -205,6 +205,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `accessLogs` - (Required) A block that specifies the configuration options for Verified Access instances. [Detailed below](#access_logs). * `verifiedaccessInstanceId` - (Required - Forces New resource) The ID of the Verified Access instance. @@ -277,4 +278,4 @@ Using `terraform import`, import Verified Access Logging Configuration using the % terraform import aws_verifiedaccess_instance_logging_configuration.example vai-1234567890abcdef0 ``` - + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedaccess_instance_trust_provider_attachment.html.markdown b/website/docs/cdktf/typescript/r/verifiedaccess_instance_trust_provider_attachment.html.markdown index 184d8b1e9d6c..1c863615ffc4 100644 --- a/website/docs/cdktf/typescript/r/verifiedaccess_instance_trust_provider_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedaccess_instance_trust_provider_attachment.html.markdown @@ -58,8 +58,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `verifiedaccessInstanceId` - (Required) The ID of the Verified Access instance to attach the Trust Provider to. * `verifiedaccessTrustProviderId` - (Required) The ID of the Verified Access trust provider. @@ -101,4 +102,4 @@ Using `terraform import`, import Verified Access Instance Trust Provider Attachm % terraform import aws_verifiedaccess_instance_trust_provider_attachment.example vai-1234567890abcdef0/vatp-8012925589 ``` - + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedaccess_trust_provider.html.markdown b/website/docs/cdktf/typescript/r/verifiedaccess_trust_provider.html.markdown index 6af5fb52b888..c78640e92412 100644 --- a/website/docs/cdktf/typescript/r/verifiedaccess_trust_provider.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedaccess_trust_provider.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description for the AWS Verified Access trust provider. * `deviceOptions` - (Optional) A block of options for device identity based trust providers. * `deviceTrustProviderType` (Optional) The type of device-based trust provider. @@ -99,4 +100,4 @@ Using `terraform import`, import Transfer Workflows using the `id`. For example % terraform import aws_verifiedaccess_trust_provider.example vatp-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown b/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown index e3ea82d309a7..97c3a4449043 100644 --- a/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown @@ -142,6 +142,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyStoreId` - (Required) Specifies the ID of the policy store in which you want to store this identity source. * `configuration`- (Required) Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. See [Configuration](#configuration) below. * `principalEntityType`- (Optional) Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source. @@ -226,4 +227,4 @@ Using `terraform import`, import Verified Permissions Identity Source using the % terraform import aws_verifiedpermissions_identity_source.example policy-store-id-12345678:identity-source-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedpermissions_policy.html.markdown b/website/docs/cdktf/typescript/r/verifiedpermissions_policy.html.markdown index 36917b2d1fdb..f0a235fe3a23 100644 --- a/website/docs/cdktf/typescript/r/verifiedpermissions_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedpermissions_policy.html.markdown @@ -48,8 +48,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyStoreId` - (Required) The Policy Store ID of the policy store. * `definition`- (Required) The definition of the policy. See [Definition](#definition) below. @@ -112,4 +113,4 @@ Using `terraform import`, import Verified Permissions Policy using the `policy_i % terraform import aws_verifiedpermissions_policy.example policy-id-12345678,policy-store-id-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedpermissions_policy_store.html.markdown b/website/docs/cdktf/typescript/r/verifiedpermissions_policy_store.html.markdown index ebcbe0c6e3ce..33bb4b927697 100644 --- a/website/docs/cdktf/typescript/r/verifiedpermissions_policy_store.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedpermissions_policy_store.html.markdown @@ -49,6 +49,8 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `deletionProtection` - (Optional) Specifies whether the policy store can be deleted. If enabled, the policy store can't be deleted. Valid Values: `ENABLED`, `DISABLED`. Default value: `DISABLED`. * `description` - (Optional) A description of the Policy Store. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -92,4 +94,4 @@ Using `terraform import`, import Verified Permissions Policy Store using the `po % terraform import aws_verifiedpermissions_policy_store.example DxQg2j8xvXJQ1tQCYNWj9T ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedpermissions_policy_template.html.markdown b/website/docs/cdktf/typescript/r/verifiedpermissions_policy_template.html.markdown index 2f59b06d3776..8fa6287a92a6 100644 --- a/website/docs/cdktf/typescript/r/verifiedpermissions_policy_template.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedpermissions_policy_template.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Provides a description for the policy template. ## Attribute Reference @@ -89,4 +90,4 @@ Using `terraform import`, import Verified Permissions Policy Store using the `po % terraform import aws_verifiedpermissions_policy_template.example policyStoreId:policyTemplateId ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedpermissions_schema.html.markdown b/website/docs/cdktf/typescript/r/verifiedpermissions_schema.html.markdown index 7ff85b68c9b6..0cc40625829f 100644 --- a/website/docs/cdktf/typescript/r/verifiedpermissions_schema.html.markdown +++ b/website/docs/cdktf/typescript/r/verifiedpermissions_schema.html.markdown @@ -52,8 +52,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyStoreId` - (Required) The ID of the Policy Store. * `definition` - (Required) The definition of the schema. * `value` - (Required) A JSON string representation of the schema. @@ -96,4 +97,4 @@ Using `terraform import`, import Verified Permissions Policy Store Schema using % terraform import aws_verifiedpermissions_schema.example DxQg2j8xvXJQ1tQCYNWj9T ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/volume_attachment.html.markdown b/website/docs/cdktf/typescript/r/volume_attachment.html.markdown index beb83515a32f..98cbef13832d 100644 --- a/website/docs/cdktf/typescript/r/volume_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/volume_attachment.html.markdown @@ -57,6 +57,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `deviceName` - (Required) The device name to expose to the instance (for example, `/dev/sdh` or `xvdh`). See [Device Naming on Linux Instances][1] and [Device Naming on Windows Instances][2] for more information. * `instanceId` - (Required) ID of the Instance to attach to @@ -117,4 +118,4 @@ Using `terraform import`, import EBS Volume Attachments using `DEVICE_NAME:VOLUM [2]: https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/device_naming.html#available-ec2-device-names [3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc.html.markdown b/website/docs/cdktf/typescript/r/vpc.html.markdown index fc72daccc8dc..96a5319843f3 100644 --- a/website/docs/cdktf/typescript/r/vpc.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc.html.markdown @@ -84,14 +84,14 @@ class MyConvertedCode extends TerraformStack { const test = new VpcIpam(this, "test", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); const awsVpcIpamPoolTest = new VpcIpamPool(this, "test_2", { addressFamily: "ipv4", ipamScopeId: test.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcIpamPoolTest.overrideLogicalId("test"); @@ -117,6 +117,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrBlock` - (Optional) The IPv4 CIDR block for the VPC. CIDR can be explicitly set or it can be derived from IPAM using `ipv4NetmaskLength`. * `instanceTenancy` - (Optional) A tenancy option for instances launched into the VPC. Default is `default`, which ensures that EC2 instances launched in this VPC use the EC2 instance tenancy attribute specified when the EC2 instance is launched. The only other option is `dedicated`, which ensures that EC2 instances launched in this VPC are run on dedicated tenancy instances regardless of the tenancy attribute specified at launch. This has a dedicated per region fee of $2 per hour, plus an hourly per instance usage fee. * `ipv4IpamPoolId` - (Optional) The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across AWS Regions and accounts. Using IPAM you can monitor IP address usage throughout your AWS Organization. @@ -181,4 +182,4 @@ Using `terraform import`, import VPCs using the VPC `id`. For example: % terraform import aws_vpc.test_vpc vpc-a01106c2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_block_public_access_exclusion.html.markdown b/website/docs/cdktf/typescript/r/vpc_block_public_access_exclusion.html.markdown index 4d4dd369cc88..8038bb839e2d 100644 --- a/website/docs/cdktf/typescript/r/vpc_block_public_access_exclusion.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_block_public_access_exclusion.html.markdown @@ -24,8 +24,8 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcBlockPublicAccessExclusion } from "./.gen/providers/aws/"; import { Vpc } from "./.gen/providers/aws/vpc"; +import { VpcBlockPublicAccessExclusion } from "./.gen/providers/aws/vpc-block-public-access-exclusion"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -34,8 +34,8 @@ class MyConvertedCode extends TerraformStack { }); const awsVpcBlockPublicAccessExclusionTest = new VpcBlockPublicAccessExclusion(this, "test_1", { - internet_gateway_exclusion_mode: "allow-bidirectional", - vpc_id: test.id, + internetGatewayExclusionMode: "allow-bidirectional", + vpcId: test.id, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcBlockPublicAccessExclusionTest.overrideLogicalId("test"); @@ -49,14 +49,14 @@ class MyConvertedCode extends TerraformStack { ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcBlockPublicAccessExclusion } from "./.gen/providers/aws/"; import { Subnet } from "./.gen/providers/aws/subnet"; import { Vpc } from "./.gen/providers/aws/vpc"; +import { VpcBlockPublicAccessExclusion } from "./.gen/providers/aws/vpc-block-public-access-exclusion"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -71,8 +71,8 @@ class MyConvertedCode extends TerraformStack { awsSubnetTest.overrideLogicalId("test"); const awsVpcBlockPublicAccessExclusionTest = new VpcBlockPublicAccessExclusion(this, "test_2", { - internet_gateway_exclusion_mode: "allow-egress", - subnet_id: awsSubnetTest.id, + internetGatewayExclusionMode: "allow-egress", + subnetId: Token.asString(awsSubnetTest.id), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcBlockPublicAccessExclusionTest.overrideLogicalId("test"); @@ -85,10 +85,11 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `internet_gateway_exclusion_mode` - (Required) Mode of exclusion from Block Public Access. The allowed values are `allow-egress` and `allow-bidirectional`. +* `internetGatewayExclusionMode` - (Required) Mode of exclusion from Block Public Access. The allowed values are `allow-egress` and `allow-bidirectional`. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Optional) Id of the VPC to which this exclusion applies. Either this or the subnet_id needs to be provided. * `subnetId` - (Optional) Id of the subnet to which this exclusion applies. Either this or the vpc_id needs to be provided. * `tags` - (Optional) A map of tags to assign to the exclusion. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -121,7 +122,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcBlockPublicAccessExclusion } from "./.gen/providers/aws/"; +import { VpcBlockPublicAccessExclusion } from "./.gen/providers/aws/vpc-block-public-access-exclusion"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -141,4 +142,4 @@ Using `terraform import`, import EC2 (Elastic Compute Cloud) VPC Block Public Ac % terraform import aws_vpc_block_public_access_exclusion.example vpcbpa-exclude-1234abcd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_block_public_access_options.html.markdown b/website/docs/cdktf/typescript/r/vpc_block_public_access_options.html.markdown index 684ea54225ee..03dcf9e80fb5 100644 --- a/website/docs/cdktf/typescript/r/vpc_block_public_access_options.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_block_public_access_options.html.markdown @@ -24,12 +24,12 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcBlockPublicAccessOptions } from "./.gen/providers/aws/"; +import { VpcBlockPublicAccessOptions } from "./.gen/providers/aws/vpc-block-public-access-options"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcBlockPublicAccessOptions(this, "example", { - internet_gateway_block_mode: "block-bidirectional", + internetGatewayBlockMode: "block-bidirectional", }); } } @@ -38,9 +38,10 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: -* `internet_gateway_block_mode` - (Required) Block mode. Needs to be one of `block-bidirectional`, `block-ingress`, `off`. If this resource is deleted, then this value will be set to `off` in the AWS account and region. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `internetGatewayBlockMode` - (Required) Block mode. Needs to be one of `block-bidirectional`, `block-ingress`, `off`. If this resource is deleted, then this value will be set to `off` in the AWS account and region. ## Attribute Reference @@ -69,7 +70,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcBlockPublicAccessOptions } from "./.gen/providers/aws/"; +import { VpcBlockPublicAccessOptions } from "./.gen/providers/aws/vpc-block-public-access-options"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -89,4 +90,4 @@ Using `terraform import`, import VPC Block Public Access Options using the `awsR % terraform import aws_vpc_block_public_access_options.example us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_dhcp_options.html.markdown b/website/docs/cdktf/typescript/r/vpc_dhcp_options.html.markdown index f6442a39f933..4d9069052744 100644 --- a/website/docs/cdktf/typescript/r/vpc_dhcp_options.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_dhcp_options.html.markdown @@ -70,6 +70,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `domainName` - (Optional) the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file. * `domainNameServers` - (Optional) List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`. * `ipv6AddressPreferredLeaseTime` - (Optional) How frequently, in seconds, a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed. @@ -126,4 +127,4 @@ Using `terraform import`, import VPC DHCP Options using the DHCP Options `id`. F % terraform import aws_vpc_dhcp_options.my_options dopt-d9070ebb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_dhcp_options_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_dhcp_options_association.html.markdown index e40385522b52..6edec271efb5 100644 --- a/website/docs/cdktf/typescript/r/vpc_dhcp_options_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_dhcp_options_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The ID of the VPC to which we would like to associate a DHCP Options Set. * `dhcpOptionsId` - (Required) The ID of the DHCP Options Set to associate to the VPC. @@ -85,4 +86,4 @@ Using `terraform import`, import DHCP associations using the VPC ID associated w % terraform import aws_vpc_dhcp_options_association.imported vpc-0f001273ec18911b1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown index f93fc6bac2fe..d0279cd4559f 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown @@ -276,6 +276,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The ID of the VPC in which the endpoint will be used. * `autoAccept` - (Optional) Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account). * `policy` - (Optional) A policy to attach to the endpoint that controls access to the service. This is a JSON formatted string. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -304,7 +305,7 @@ If no security groups are specified, the VPC's [default security group](https:// * `ipv4` - (Optional) The IPv4 address to assign to the endpoint network interface in the subnet. You must provide an IPv4 address if the VPC endpoint supports IPv4. * `ipv6` - (Optional) The IPv6 address to assign to the endpoint network interface in the subnet. You must provide an IPv6 address if the VPC endpoint supports IPv6. -* `subnet` - (Optional) The ID of the subnet. Must have a corresponding subnet in the `subnetIds` argument. +* `subnetId` - (Optional) The ID of the subnet. Must have a corresponding subnet in the `subnetIds` argument. ## Timeouts @@ -336,6 +337,32 @@ DNS blocks (for `dnsEntry`) support the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_endpoint.example + identity = { + id = "vpce-3ecf2a57" + } +} + +resource "aws_vpc_endpoint" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the VPC endpoint. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC Endpoints using the VPC endpoint `id`. For example: ```typescript @@ -350,7 +377,7 @@ import { VpcEndpoint } from "./.gen/providers/aws/vpc-endpoint"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - VpcEndpoint.generateConfigForImport(this, "endpoint1", "vpce-3ecf2a57"); + VpcEndpoint.generateConfigForImport(this, "example", "vpce-3ecf2a57"); } } @@ -359,7 +386,7 @@ class MyConvertedCode extends TerraformStack { Using `terraform import`, import VPC Endpoints using the VPC endpoint `id`. For example: ```console -% terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 +% terraform import aws_vpc_endpoint.example vpce-3ecf2a57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_connection_accepter.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_connection_accepter.html.markdown index c8c6dde79b02..cbb6eebdb83a 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_connection_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_connection_accepter.html.markdown @@ -64,6 +64,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointId` - (Required) AWS VPC Endpoint ID. * `vpcEndpointServiceId` - (Required) AWS VPC Endpoint Service ID. @@ -106,4 +107,4 @@ Using `terraform import`, import VPC Endpoint Services using ID of the connectio % terraform import aws_vpc_endpoint_connection_accepter.foo vpce-svc-0f97a19d3fa8220bc_vpce-010601a6db371e263 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_connection_notification.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_connection_notification.html.markdown index 31bb57612398..5c1b805d3dea 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_connection_notification.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_connection_notification.html.markdown @@ -72,6 +72,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointServiceId` - (Optional) The ID of the VPC Endpoint Service to receive notifications for. * `vpcEndpointId` - (Optional) The ID of the VPC Endpoint to receive notifications for. * `connectionNotificationArn` - (Required) The ARN of the SNS topic for the notifications. @@ -119,4 +120,4 @@ Using `terraform import`, import VPC Endpoint connection notifications using the % terraform import aws_vpc_endpoint_connection_notification.foo vpce-nfn-09e6ed3b4efba2263 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_policy.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_policy.html.markdown index 1b23f58e4970..660a0923db40 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_policy.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointId` - (Required) The VPC Endpoint ID. * `policy` - (Optional) A policy to attach to the endpoint that controls access to the service. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). @@ -118,4 +119,4 @@ Using `terraform import`, import VPC Endpoint Policies using the `id`. For examp % terraform import aws_vpc_endpoint_policy.example vpce-3ecf2a57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_private_dns.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_private_dns.html.markdown index 28c494e79338..492285224d37 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_private_dns.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_private_dns.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `privateDnsEnabled` - (Required) Indicates whether a private hosted zone is associated with the VPC. Only applicable for `Interface` endpoints. * `vpcEndpointId` - (Required) VPC endpoint identifier. @@ -82,4 +83,4 @@ Using `terraform import`, import a VPC (Virtual Private Cloud) Endpoint Private % terraform import aws_vpc_endpoint_private_dns.example vpce-abcd-1234 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_route_table_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_route_table_association.html.markdown index 160a445a8f85..f39f894c251e 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_route_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_route_table_association.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `routeTableId` - (Required) Identifier of the EC2 Route Table to be associated with the VPC Endpoint. * `vpcEndpointId` - (Required) Identifier of the VPC Endpoint with which the EC2 Route Table will be associated. @@ -80,4 +81,4 @@ Using `terraform import`, import VPC Endpoint Route Table Associations using `vp % terraform import aws_vpc_endpoint_route_table_association.example vpce-aaaaaaaa/rtb-bbbbbbbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_security_group_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_security_group_association.html.markdown index f3649b4a5281..0a4a78660288 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_security_group_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_security_group_association.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityGroupId` - (Required) The ID of the security group to be associated with the VPC endpoint. * `vpcEndpointId` - (Required) The ID of the VPC endpoint with which the security group will be associated. * `replaceDefaultAssociation` - (Optional) Whether this association should replace the association with the VPC's default security group that is created when no security groups are specified during VPC endpoint creation. At most 1 association per-VPC endpoint should be configured with `replace_default_association = true`. `false` should be used when importing resources. @@ -89,4 +90,4 @@ Using `terraform import`, import VPC Endpoint Security Group Associations using % terraform import aws_vpc_endpoint_security_group_association.example vpce-aaaaaaaa/sg-bbbbbbbbbbbbbbbbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_service.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_service.html.markdown index fb7a197da5fa..afe7132600fa 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_service.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_service.html.markdown @@ -71,6 +71,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acceptanceRequired` - (Required) Whether or not VPC endpoint connection requests to the service must be accepted by the service owner - `true` or `false`. * `allowedPrincipals` - (Optional) The ARNs of one or more principals allowed to discover the endpoint service. * `gatewayLoadBalancerArns` - (Optional) Amazon Resource Names (ARNs) of one or more Gateway Load Balancers for the endpoint service. @@ -131,4 +132,4 @@ Using `terraform import`, import VPC Endpoint Services using the VPC endpoint se % terraform import aws_vpc_endpoint_service.foo vpce-svc-0f97a19d3fa8220bc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_service_allowed_principal.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_service_allowed_principal.html.markdown index 8e5060ee6d95..c531d85cd9a9 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_service_allowed_principal.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_service_allowed_principal.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointServiceId` - (Required) The ID of the VPC endpoint service to allow permission. * `principalArn` - (Required) The ARN of the principal to allow permissions. @@ -58,4 +59,4 @@ This resource exports the following attributes in addition to the arguments abov * `id` - The ID of the association. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_service_private_dns_verification.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_service_private_dns_verification.html.markdown index 5cbc967fac84..cd4b1d79267b 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_service_private_dns_verification.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_service_private_dns_verification.html.markdown @@ -49,6 +49,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `waitForVerification` - (Optional) Whether to wait until the endpoint service returns a `Verified` status for the configured private DNS name. ## Attribute Reference @@ -65,4 +66,4 @@ This resource exports no additional attributes. You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint_subnet_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint_subnet_association.html.markdown index 4a6d94b8b48a..278b8e8c8e3f 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint_subnet_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint_subnet_association.html.markdown @@ -47,6 +47,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcEndpointId` - (Required) The ID of the VPC endpoint with which the subnet will be associated. * `subnetId` - (Required) The ID of the subnet to be associated with the VPC endpoint. @@ -95,4 +96,4 @@ Using `terraform import`, import VPC Endpoint Subnet Associations using `vpcEndp % terraform import aws_vpc_endpoint_subnet_association.example vpce-aaaaaaaa/subnet-bbbbbbbbbbbbbbbbb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam.html.markdown index 043d3cdb3956..4c11c3bcfa54 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam.html.markdown @@ -34,7 +34,7 @@ class MyConvertedCode extends TerraformStack { description: "My IPAM", operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], tags: { @@ -76,7 +76,7 @@ class MyConvertedCode extends TerraformStack { }); const current = new DataAwsRegion(this, "current", {}); const allIpamRegions = Fn.distinct( - Token.asAny(Fn.concat([[current.name], ipamRegions.value])) + Token.asAny(Fn.concat([[current.region], ipamRegions.value])) ); /*In most cases loops should be handled in the programming language context and not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input @@ -100,9 +100,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cascade` - (Optional) Enables you to quickly delete an IPAM, private scopes, pools in private scopes, and any allocations in the pools in private scopes. * `description` - (Optional) A description for the IPAM. * `enablePrivateGua` - (Optional) Enable this option to use your own GUA ranges as private IPv6 addresses. Default: `false`. +* `metered_account` - (Optional) AWS account that is charged for active IP addresses managed in IPAM. Valid values are `ipam-owner` (default) and `resource-owner`. * `operatingRegions` - (Required) Determines which locales can be chosen when you create pools. Locale is the Region where you want to make an IPAM pool available for allocations. You can only create pools with locales that match the operating Regions of the IPAM. You can only create VPCs from a pool whose locale matches the VPC's Region. You specify a region using the [region_name](#operating_regions) parameter. You **must** set your provider block region as an operating_region. * `tier` - (Optional) specifies the IPAM tier. Valid options include `free` and `advanced`. Default is `advanced`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -153,4 +155,4 @@ Using `terraform import`, import IPAMs using the IPAM `id`. For example: % terraform import aws_vpc_ipam.example ipam-0178368ad2146a492 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_pool.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_pool.html.markdown index 3d0c456d2145..17448ebfb55a 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_pool.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_pool.html.markdown @@ -34,14 +34,14 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); const awsVpcIpamPoolExample = new VpcIpamPool(this, "example_2", { addressFamily: "ipv4", ipamScopeId: example.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcIpamPoolExample.overrideLogicalId("example"); @@ -71,7 +71,7 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); @@ -86,7 +86,7 @@ class MyConvertedCode extends TerraformStack { const child = new VpcIpamPool(this, "child", { addressFamily: "ipv4", ipamScopeId: example.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), sourceIpamPoolId: parent.id, }); new VpcIpamPoolCidr(this, "child_test", { @@ -102,6 +102,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `addressFamily` - (Required) The IP protocol assigned to this pool. You must choose either IPv4 or IPv6 protocol for a pool. * `allocationDefaultNetmaskLength` - (Optional) A default netmask length for allocations added to this pool. If, for example, the CIDR assigned to this pool is 10.0.0.0/8 and you enter 16 here, new allocations will default to 10.0.0.0/16 (unless you provide a different netmask value when you create the new allocation). * `allocationMaxNetmaskLength` - (Optional) The maximum netmask length that will be required for CIDR allocations in this pool. @@ -160,4 +161,4 @@ Using `terraform import`, import IPAMs using the IPAM pool `id`. For example: % terraform import aws_vpc_ipam_pool.example ipam-pool-0958f95207d978e1e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr.html.markdown index 5eb290309c70..20c2fe001284 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr.html.markdown @@ -40,14 +40,14 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); const awsVpcIpamPoolExample = new VpcIpamPool(this, "example_2", { addressFamily: "ipv4", ipamScopeId: example.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcIpamPoolExample.overrideLogicalId("example"); @@ -83,7 +83,7 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); @@ -115,6 +115,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr` - (Optional) The CIDR you want to assign to the pool. Conflicts with `netmaskLength`. * `cidrAuthorizationContext` - (Optional) A signed document that proves that you are authorized to bring the specified IP address range to Amazon using BYOIP. This is not stored in the state file. See [cidr_authorization_context](#cidr_authorization_context) for more information. * `ipamPoolId` - (Required) The ID of the pool to which you want to assign a CIDR. @@ -168,4 +169,4 @@ Using `terraform import`, import IPAMs using the `_`. For ex % terraform import aws_vpc_ipam_pool_cidr.example 172.20.0.0/24_ipam-pool-0e634f5a1517cccdc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr_allocation.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr_allocation.html.markdown index 1b10b339fd3e..d45cdb0b2bbf 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr_allocation.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_pool_cidr_allocation.html.markdown @@ -36,14 +36,14 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); const awsVpcIpamPoolExample = new VpcIpamPool(this, "example_2", { addressFamily: "ipv4", ipamScopeId: example.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcIpamPoolExample.overrideLogicalId("example"); @@ -91,14 +91,14 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); const awsVpcIpamPoolExample = new VpcIpamPool(this, "example_2", { addressFamily: "ipv4", ipamScopeId: example.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcIpamPoolExample.overrideLogicalId("example"); @@ -129,6 +129,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidr` - (Optional, Forces new resource) The CIDR you want to assign to the pool. * `description` - (Optional, Forces new resource) The description for the allocation. * `disallowedCidrs` - (Optional, Forces new resource) Exclude a particular CIDR range from being returned by the pool. @@ -176,4 +177,4 @@ Using `terraform import`, import IPAM allocations using the allocation `id` and % terraform import aws_vpc_ipam_pool_cidr_allocation.example ipam-pool-alloc-0dc6d196509c049ba8b549ff99f639736_ipam-pool-07cfb559e0921fcbe ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_preview_next_cidr.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_preview_next_cidr.html.markdown index 9ce999287c46..60c90c663909 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_preview_next_cidr.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_preview_next_cidr.html.markdown @@ -36,14 +36,14 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); const awsVpcIpamPoolExample = new VpcIpamPool(this, "example_2", { addressFamily: "ipv4", ipamScopeId: example.privateDefaultScopeId, - locale: Token.asString(current.name), + locale: Token.asString(current.region), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcIpamPoolExample.overrideLogicalId("example"); @@ -74,6 +74,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `disallowedCidrs` - (Optional) Exclude a particular CIDR range from being returned by the pool. * `ipamPoolId` - (Required) The ID of the pool to which you want to assign a CIDR. * `netmaskLength` - (Optional) The netmask length of the CIDR you would like to preview from the IPAM pool. @@ -85,4 +86,4 @@ This resource exports the following attributes in addition to the arguments abov * `cidr` - The previewed CIDR from the pool. * `id` - The ID of the preview. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery.html.markdown index 39a053f163f7..5224e002f966 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery.html.markdown @@ -34,7 +34,7 @@ class MyConvertedCode extends TerraformStack { description: "My IPAM Resource Discovery", operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], tags: { @@ -50,6 +50,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) A description for the IPAM Resource Discovery. * `operatingRegions` - (Required) Determines which regions the Resource Discovery will enable IPAM features for usage and monitoring. Locale is the Region where you want to make an IPAM pool available for allocations. You can only create pools with locales that match the operating Regions of the IPAM Resource Discovery. You can only create VPCs from a pool whose locale matches the VPC's Region. You specify a region using the [region_name](#operating_regions) parameter. **You must set your provider block region as an operating_region.** * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -101,4 +102,4 @@ Using `terraform import`, import IPAMs using the IPAM resource discovery `id`. F % terraform import aws_vpc_ipam_resource_discovery.example ipam-res-disco-0178368ad2146a492 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery_association.html.markdown index 552a3b1e6222..7556b4b2da3e 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_resource_discovery_association.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipamId` - (Required) The ID of the IPAM to associate. * `ipamResourceDiscoveryId` - (Required) The ID of the Resource Discovery to associate. * `tags` - (Optional) A map of tags to add to the IPAM resource discovery association resource. @@ -97,4 +98,4 @@ Using `terraform import`, import IPAMs using the IPAM resource discovery associa % terraform import aws_vpc_ipam_resource_discovery_association.example ipam-res-disco-assoc-0178368ad2146a492 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipam_scope.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipam_scope.html.markdown index 4627b72fd656..f1f056a3ba00 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipam_scope.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipam_scope.html.markdown @@ -34,7 +34,7 @@ class MyConvertedCode extends TerraformStack { const example = new VpcIpam(this, "example", { operatingRegions: [ { - regionName: Token.asString(current.name), + regionName: Token.asString(current.region), }, ], }); @@ -53,6 +53,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipamId` - The ID of the IPAM for which you're creating this scope. * `description` - (Optional) A description for the scope you're creating. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -100,4 +101,4 @@ Using `terraform import`, import IPAMs using the `scope_id`. For example: % terraform import aws_vpc_ipam_scope.example ipam-scope-0513c69f283d11dfb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipv4_cidr_block_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipv4_cidr_block_association.html.markdown index a9593411f749..87b87fdeb8ea 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipv4_cidr_block_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipv4_cidr_block_association.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrBlock` - (Optional) The IPv4 CIDR block for the VPC. CIDR can be explicitly set or it can be derived from IPAM using `ipv4NetmaskLength`. * `ipv4IpamPoolId` - (Optional) The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across AWS Regions and accounts. Using IPAM you can monitor IP address usage throughout your AWS Organization. * `ipv4NetmaskLength` - (Optional) The netmask length of the IPv4 CIDR you want to allocate to this VPC. Requires specifying a `ipv4IpamPoolId`. @@ -156,4 +157,4 @@ or % terraform import aws_vpc_ipv4_cidr_block_association.example vpc-cidr-assoc-021e8461d70ed08be,ipam-pool-0a07c432810393463,28 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_ipv6_cidr_block_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_ipv6_cidr_block_association.html.markdown index a06c18c3f8fb..775621a635a8 100644 --- a/website/docs/cdktf/typescript/r/vpc_ipv6_cidr_block_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_ipv6_cidr_block_association.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `assignGeneratedIpv6CidrBlock` - (Optional) Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IPv6 addresses, or the size of the CIDR block. Default is `false`. Conflicts with `ipv6IpamPoolId`, `ipv6Pool`, `ipv6CidrBlock` and `ipv6NetmaskLength`. * `ipv6CidrBlock` - (Optional) The IPv6 CIDR block for the VPC. CIDR can be explicitly set or it can be derived from IPAM using `ipv6NetmaskLength`. This parameter is required if `ipv6NetmaskLength` is not set and the IPAM pool does not have `allocation_default_netmask` set. Conflicts with `assignGeneratedIpv6CidrBlock`. * `ipv6IpamPoolId` - (Optional) The ID of an IPv6 IPAM pool you want to use for allocating this VPC's CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across AWS Regions and accounts. Conflict with `assignGeneratedIpv6CidrBlock` and `ipv6Pool`. @@ -165,4 +166,4 @@ or % terraform import aws_vpc_ipv6_cidr_block_association.example vpc-cidr-assoc-0754129087e149dcd,ipam-pool-0611d1d6bbc05ce60,56 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_network_performance_metric_subscription.html.markdown b/website/docs/cdktf/typescript/r/vpc_network_performance_metric_subscription.html.markdown index 67526eb764e4..d70301d4c0a6 100644 --- a/website/docs/cdktf/typescript/r/vpc_network_performance_metric_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_network_performance_metric_subscription.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destination` - (Required) The target Region or Availability Zone that the metric subscription is enabled for. For example, `eu-west-1`. * `metric` - (Optional) The metric used for the enabled subscription. Valid values: `aggregate-latency`. Default: `aggregate-latency`. * `source` - (Required) The source Region or Availability Zone that the metric subscription is enabled for. For example, `us-east-1`. @@ -50,4 +51,4 @@ This resource exports the following attributes in addition to the arguments abov * `period` - The data aggregation time for the subscription. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown b/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown index d8328d857052..0fd6d08d6e8e 100644 --- a/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown @@ -162,6 +162,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `peerOwnerId` - (Optional) The AWS account ID of the target peer VPC. Defaults to the account ID the [AWS provider][1] is currently connected to, so must be managed if connecting cross-account. * `peerVpcId` - (Required) The ID of the target VPC with which you are creating the VPC Peering Connection. @@ -239,4 +240,4 @@ Using `terraform import`, import VPC Peering resources using the VPC peering `id [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_peering_connection_accepter.html.markdown b/website/docs/cdktf/typescript/r/vpc_peering_connection_accepter.html.markdown index 7dd96330373a..b2bbc9ab761f 100644 --- a/website/docs/cdktf/typescript/r/vpc_peering_connection_accepter.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_peering_connection_accepter.html.markdown @@ -21,6 +21,8 @@ connection into management. ## Example Usage +### Cross-Account Peering Or Cross-Region Peering Terraform AWS Provider v5 (and below) + ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; @@ -92,10 +94,69 @@ class MyConvertedCode extends TerraformStack { ``` +### Cross-Region Peering (Same Account) Terraform AWS Provider v6 (and above) + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AwsProvider } from "./.gen/providers/aws/provider"; +import { Vpc } from "./.gen/providers/aws/vpc"; +import { VpcPeeringConnection } from "./.gen/providers/aws/vpc-peering-connection"; +import { VpcPeeringConnectionAccepterA } from "./.gen/providers/aws/vpc-peering-connection-accepter"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AwsProvider(this, "aws", { + region: "us-east-1", + }); + const main = new Vpc(this, "main", { + cidrBlock: "10.0.0.0/16", + }); + const peer = new Vpc(this, "peer", { + cidrBlock: "10.1.0.0/16", + region: "us-west-2", + }); + const awsVpcPeeringConnectionPeer = new VpcPeeringConnection( + this, + "peer_3", + { + autoAccept: false, + peerRegion: "us-west-2", + peerVpcId: peer.id, + tags: { + Side: "Requester", + }, + vpcId: main.id, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcPeeringConnectionPeer.overrideLogicalId("peer"); + const awsVpcPeeringConnectionAccepterPeer = + new VpcPeeringConnectionAccepterA(this, "peer_4", { + autoAccept: true, + region: "us-west-2", + tags: { + Side: "Accepter", + }, + vpcPeeringConnectionId: Token.asString(awsVpcPeeringConnectionPeer.id), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVpcPeeringConnectionAccepterPeer.overrideLogicalId("peer"); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcPeeringConnectionId` - (Required) The VPC Peering Connection ID to manage. * `autoAccept` - (Optional) Whether or not to accept the peering request. Defaults to `false`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -189,4 +250,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_peering_connection_options.html.markdown b/website/docs/cdktf/typescript/r/vpc_peering_connection_options.html.markdown index 0a4b3db43442..316c39f1e649 100644 --- a/website/docs/cdktf/typescript/r/vpc_peering_connection_options.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_peering_connection_options.html.markdown @@ -169,6 +169,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcPeeringConnectionId` - (Required) The ID of the requester VPC peering connection. * `accepter` (Optional) - An optional configuration block that allows for [VPC Peering Connection](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options to be set for the VPC that accepts the peering connection (a maximum of one). * `requester` (Optional) - A optional configuration block that allows for [VPC Peering Connection](https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options to be set for the VPC that requests the peering connection (a maximum of one). @@ -217,4 +218,4 @@ Using `terraform import`, import VPC Peering Connection Options using the VPC pe % terraform import aws_vpc_peering_connection_options.foo pcx-111aaa111 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_route_server.html.markdown b/website/docs/cdktf/typescript/r/vpc_route_server.html.markdown index 0c93e893551e..30db235333a1 100644 --- a/website/docs/cdktf/typescript/r/vpc_route_server.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_route_server.html.markdown @@ -23,12 +23,12 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServer } from "./.gen/providers/aws/"; +import { VpcRouteServer } from "./.gen/providers/aws/vpc-route-server"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcRouteServer(this, "test", { - amazon_side_asn: 65534, + amazonSideAsn: 65534, tags: { Name: "Test", }, @@ -48,15 +48,15 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServer } from "./.gen/providers/aws/"; +import { VpcRouteServer } from "./.gen/providers/aws/vpc-route-server"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcRouteServer(this, "test", { - amazon_side_asn: 65534, - persist_routes: "enable", - persist_routes_duration: 2, - sns_notifications_enabled: true, + amazonSideAsn: 65534, + persistRoutes: "enable", + persistRoutesDuration: 2, + snsNotificationsEnabled: true, tags: { Name: "Main Route Server", }, @@ -74,9 +74,10 @@ The following arguments are required: The following arguments are optional: -* `persist_routes` - (Optional) Indicates whether routes should be persisted after all BGP sessions are terminated. Valid values are `enable`, `disable`, `reset` -* `persist_routes_duration` - (Optional) The number of minutes a route server will wait after BGP is re-established to unpersist the routes in the FIB and RIB. Value must be in the range of 1-5. Required if `persist_routes` is enabled. -* `sns_notifications_enabled` - (Optional) Indicates whether SNS notifications should be enabled for route server events. Enabling SNS notifications persists BGP status changes to an SNS topic provisioned by AWS`. +* `persistRoutes` - (Optional) Indicates whether routes should be persisted after all BGP sessions are terminated. Valid values are `enable`, `disable`, `reset` +* `persistRoutesDuration` - (Optional) The number of minutes a route server will wait after BGP is re-established to unpersist the routes in the FIB and RIB. Value must be in the range of 1-5. Required if `persistRoutes` is enabled. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `snsNotificationsEnabled` - (Optional) Indicates whether SNS notifications should be enabled for route server events. Enabling SNS notifications persists BGP status changes to an SNS topic provisioned by AWS`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -84,7 +85,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the route server. -* `route_server_id` - The unique identifier of the route server. +* `routeServerId` - The unique identifier of the route server. * `snsTopicArn` - The ARN of the SNS topic where notifications are published. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -98,7 +99,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC (Virtual Private Cloud) Route Server using the `route_server_id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC (Virtual Private Cloud) Route Server using the `routeServerId`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -108,7 +109,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServer } from "./.gen/providers/aws/"; +import { VpcRouteServer } from "./.gen/providers/aws/vpc-route-server"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -118,10 +119,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import VPC (Virtual Private Cloud) Route Server using the `route_server_id`. For example: +Using `terraform import`, import VPC (Virtual Private Cloud) Route Server using the `routeServerId`. For example: ```console % terraform import aws_vpc_route_server.example rs-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_route_server_endpoint.html.markdown b/website/docs/cdktf/typescript/r/vpc_route_server_endpoint.html.markdown index 28d0da4233a5..e58b9327efa7 100644 --- a/website/docs/cdktf/typescript/r/vpc_route_server_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_route_server_endpoint.html.markdown @@ -23,13 +23,13 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerEndpoint } from "./.gen/providers/aws/"; +import { VpcRouteServerEndpoint } from "./.gen/providers/aws/vpc-route-server-endpoint"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcRouteServerEndpoint(this, "test", { - route_server_id: example.routeServerId, - subnet_id: main.id, + routeServerId: example.routeServerId, + subnetId: main.id, tags: { Name: "Endpoint A", }, @@ -43,11 +43,12 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `route_server_id` - (Required) The ID of the route server for which to create an endpoint. +* `routeServerId` - (Required) The ID of the route server for which to create an endpoint. * `subnetId` - (Required) The ID of the subnet in which to create the route server endpoint. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -55,9 +56,9 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the route server endpoint. -* `route_server_endpoint_id` - The unique identifier of the route server endpoint. +* `routeServerEndpointId` - The unique identifier of the route server endpoint. * `eniId` - The ID of the Elastic network interface for the endpoint. -* `eni_address` - The IP address of the Elastic network interface for the endpoint. +* `eniAddress` - The IP address of the Elastic network interface for the endpoint. * `vpcId` - The ID of the VPC containing the endpoint. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -70,7 +71,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC (Virtual Private Cloud) Route Server Endpoint using the `route_server_endpoint_id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC (Virtual Private Cloud) Route Server Endpoint using the `routeServerEndpointId`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -80,7 +81,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerEndpoint } from "./.gen/providers/aws/"; +import { VpcRouteServerEndpoint } from "./.gen/providers/aws/vpc-route-server-endpoint"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -94,10 +95,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import VPC (Virtual Private Cloud) Route Server Endpoint using the `route_server_endpoint_id`. For example: +Using `terraform import`, import VPC (Virtual Private Cloud) Route Server Endpoint using the `routeServerEndpointId`. For example: ```console % terraform import aws_vpc_route_server_endpoint.example rse-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_route_server_peer.html.markdown b/website/docs/cdktf/typescript/r/vpc_route_server_peer.html.markdown index d7aa211c2aca..32460c0d6af4 100644 --- a/website/docs/cdktf/typescript/r/vpc_route_server_peer.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_route_server_peer.html.markdown @@ -23,18 +23,18 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerPeer } from "./.gen/providers/aws/"; +import { VpcRouteServerPeer } from "./.gen/providers/aws/vpc-route-server-peer"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcRouteServerPeer(this, "test", { - bgp_options: [ + bgpOptions: [ { - peer_asn: 65200, + peerAsn: 65200, }, ], - peer_address: "10.0.1.250", - route_server_endpoint_id: example.routeServerEndpointId, + peerAddress: "10.0.1.250", + routeServerEndpointId: example.routeServerEndpointId, tags: { Name: "Appliance 1", }, @@ -49,23 +49,21 @@ class MyConvertedCode extends TerraformStack { ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { - VpcRouteServer, - VpcRouteServerAssociation, - VpcRouteServerEndpoint, - VpcRouteServerPeer, - VpcRouteServerPropagation, -} from "./.gen/providers/aws/"; +import { VpcRouteServerAssociation } from "./.gen/providers/aws/"; +import { VpcRouteServer } from "./.gen/providers/aws/vpc-route-server"; +import { VpcRouteServerEndpoint } from "./.gen/providers/aws/vpc-route-server-endpoint"; +import { VpcRouteServerPeer } from "./.gen/providers/aws/vpc-route-server-peer"; +import { VpcRouteServerPropagation } from "./.gen/providers/aws/vpc-route-server-propagation"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); const test = new VpcRouteServer(this, "test", { - amazon_side_asn: 4294967294, + amazonSideAsn: 4294967294, tags: { Name: "Test", }, @@ -85,8 +83,8 @@ class MyConvertedCode extends TerraformStack { "test_2", { dependsOn: [awsVpcRouteServerAssociationTest], - route_server_id: test.routeServerId, - subnet_id: awsSubnetTest.id, + routeServerId: test.routeServerId, + subnetId: Token.asString(awsSubnetTest.id), tags: { Name: "Test Endpoint", }, @@ -95,15 +93,16 @@ class MyConvertedCode extends TerraformStack { /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsVpcRouteServerEndpointTest.overrideLogicalId("test"); const awsVpcRouteServerPeerTest = new VpcRouteServerPeer(this, "test_3", { - bgp_options: [ + bgpOptions: [ { - peer_asn: 65000, - peer_liveness_detection: "bgp-keepalive", + peerAsn: 65000, + peerLivenessDetection: "bgp-keepalive", }, ], - peer_address: "10.0.1.250", - route_server_endpoint_id: - awsVpcRouteServerEndpointTest.routeServerEndpointId, + peerAddress: "10.0.1.250", + routeServerEndpointId: Token.asString( + awsVpcRouteServerEndpointTest.routeServerEndpointId + ), tags: { Name: "Test Appliance", }, @@ -115,8 +114,8 @@ class MyConvertedCode extends TerraformStack { "test_4", { dependsOn: [awsVpcRouteServerAssociationTest], - route_server_id: test.routeServerId, - route_table_id: awsRouteTableTest.id, + routeServerId: test.routeServerId, + routeTableId: Token.asString(awsRouteTableTest.id), } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ @@ -130,28 +129,29 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `route_server_endpoint_id` - (Required) The ID of the route server endpoint for which to create a peer. +* `bgpOptions` - (Required) The BGP options for the peer, including ASN (Autonomous System Number) and BFD (Bidrectional Forwarding Detection) settings. Configuration block with BGP Options configuration Detailed below * `peerAddress` - (Required) The IPv4 address of the peer device. -* `bgpOptions` - The BGP options for the peer, including ASN (Autonomous System Number) and BFD (Bidrectional Forwarding Detection) settings. Configuration block with BGP Options configuration Detailed below - -### bgp_options - -* `peerAsn` - (Required) The Border Gateway Protocol (BGP) Autonomous System Number (ASN) for the appliance. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range. -* `peer_liveness_detection` (Optional) The requested liveness detection protocol for the BGP peer. Valid values are `bgp-keepalive` and `bfd`. Default value is `bgp-keepalive`. +* `routeServerEndpointId` - (Required) The ID of the route server endpoint for which to create a peer. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### bgp_options + +* `peerAsn` - (Required) The Border Gateway Protocol (BGP) Autonomous System Number (ASN) for the appliance. Valid values are from 1 to 4294967295. We recommend using a private ASN in the 64512–65534 (16-bit ASN) or 4200000000–4294967294 (32-bit ASN) range. +* `peerLivenessDetection` (Optional) The requested liveness detection protocol for the BGP peer. Valid values are `bgp-keepalive` and `bfd`. Default value is `bgp-keepalive`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the route server peer. -* `route_server_peer_id` - The unique identifier of the route server peer. -* `route_server_id` - The ID of the route server associated with this peer. -* `endpoint_eni_address` - The IP address of the Elastic network interface for the route server endpoint. -* `endpoint_eni_id` - The ID of the Elastic network interface for the route server endpoint. +* `routeServerPeerId` - The unique identifier of the route server peer. +* `routeServerId` - The ID of the route server associated with this peer. +* `endpointEniAddress` - The IP address of the Elastic network interface for the route server endpoint. +* `endpointEniId` - The ID of the Elastic network interface for the route server endpoint. * `subnetId` - The ID of the subnet containing the route server peer. * `vpcId` - The ID of the VPC containing the route server peer. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). @@ -165,7 +165,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC (Virtual Private Cloud) Route Server using the `route_server_peer_id`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC (Virtual Private Cloud) Route Server using the `routeServerPeerId`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -175,7 +175,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerPeer } from "./.gen/providers/aws/"; +import { VpcRouteServerPeer } from "./.gen/providers/aws/vpc-route-server-peer"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -185,10 +185,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import VPC (Virtual Private Cloud) Route Server using the `route_server_peer_id`. For example: +Using `terraform import`, import VPC (Virtual Private Cloud) Route Server using the `routeServerPeerId`. For example: ```console % terraform import aws_vpc_route_server_peer.example rsp-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_route_server_propagation.html.markdown b/website/docs/cdktf/typescript/r/vpc_route_server_propagation.html.markdown index b76bf222b759..8f2ae2ae29f0 100644 --- a/website/docs/cdktf/typescript/r/vpc_route_server_propagation.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_route_server_propagation.html.markdown @@ -18,18 +18,18 @@ description: |- ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerPropagation } from "./.gen/providers/aws/"; +import { VpcRouteServerPropagation } from "./.gen/providers/aws/vpc-route-server-propagation"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcRouteServerPropagation(this, "example", { - route_server_id: awsVpcRouteServerExample.routeServerId, - route_table_id: awsRouteTableExample.id, + routeServerId: Token.asString(awsVpcRouteServerExample.routeServerId), + routeTableId: Token.asString(awsRouteTableExample.id), }); } } @@ -40,9 +40,13 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `route_server_id` - (Required) The unique identifier for the route server to be associated. +* `routeServerId` - (Required) The unique identifier for the route server to be associated. * `routeTableId` - (Required) The ID of the route table to which route server will propagate routes. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports no additional attributes. @@ -66,7 +70,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerPropagation } from "./.gen/providers/aws/"; +import { VpcRouteServerPropagation } from "./.gen/providers/aws/vpc-route-server-propagation"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -86,4 +90,4 @@ Using `terraform import`, to to import VPC (Virtual Private Cloud) Route Server % terraform import aws_vpc_route_server_propagation.example rs-12345678,rtb-656c65616e6f72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_route_server_vpc_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_route_server_vpc_association.html.markdown index b2e601bff9f3..b167e4dd9cf2 100644 --- a/website/docs/cdktf/typescript/r/vpc_route_server_vpc_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_route_server_vpc_association.html.markdown @@ -18,18 +18,18 @@ description: |- ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerVpcAssociation } from "./.gen/providers/aws/"; +import { VpcRouteServerVpcAssociation } from "./.gen/providers/aws/vpc-route-server-vpc-association"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpcRouteServerVpcAssociation(this, "example", { - route_server_id: awsVpcRouteServerExample.routeServerId, - vpc_id: awsVpcExample.id, + routeServerId: Token.asString(awsVpcRouteServerExample.routeServerId), + vpcId: Token.asString(awsVpcExample.id), }); } } @@ -40,9 +40,13 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `route_server_id` - (Required) The unique identifier for the route server to be associated. +* `routeServerId` - (Required) The unique identifier for the route server to be associated. * `vpcId` - (Required) The ID of the VPC to associate with the route server. +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + ## Attribute Reference This resource exports no additional attributes. @@ -66,7 +70,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpcRouteServerVpcAssociation } from "./.gen/providers/aws/"; +import { VpcRouteServerVpcAssociation } from "./.gen/providers/aws/vpc-route-server-vpc-association"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -86,4 +90,4 @@ Using `terraform import`, to to import VPC (Virtual Private Cloud) Route Server % terraform import aws_vpc_route_server_vpc_association.example rs-12345678,vpc-0f001273ec18911b1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown b/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown index de9e16e7b3f0..f0a4e3981007 100644 --- a/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrIpv4` - (Optional) The destination IPv4 CIDR range. * `cidrIpv6` - (Optional) The destination IPv6 CIDR range. * `description` - (Optional) The security group rule description. @@ -71,6 +72,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_security_group_egress_rule.example + identity = { + id = "sgr-02108b27edd666983" + } +} + +resource "aws_vpc_security_group_egress_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the security group rule. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import security group egress rules using the `securityGroupRuleId`. For example: ```typescript @@ -101,4 +128,4 @@ Using `terraform import`, import security group egress rules using the `security % terraform import aws_vpc_security_group_egress_rule.example sgr-02108b27edd666983 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_security_group_ingress_rule.html.markdown b/website/docs/cdktf/typescript/r/vpc_security_group_ingress_rule.html.markdown index d6c9abcb9b5c..c31f7cf40920 100644 --- a/website/docs/cdktf/typescript/r/vpc_security_group_ingress_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_security_group_ingress_rule.html.markdown @@ -60,8 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -~> **Note** Although `cidrIpv4`, `cidrIpv6`, `prefixListId`, and `referencedSecurityGroupId` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `fromPort` and `toPort` arguments are required unless `ipProtocol` is set to `-1` or `icmpv6`. - +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cidrIpv4` - (Optional) The source IPv4 CIDR range. * `cidrIpv6` - (Optional) The source IPv6 CIDR range. * `description` - (Optional) The security group rule description. @@ -73,6 +72,8 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `toPort` - (Optional) The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. +~> **Note** Although `cidrIpv4`, `cidrIpv6`, `prefixListId`, and `referencedSecurityGroupId` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `fromPort` and `toPort` arguments are required unless `ipProtocol` is set to `-1` or `icmpv6`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -83,6 +84,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_security_group_ingress_rule.example + identity = { + id = "sgr-02108b27edd666983" + } +} + +resource "aws_vpc_security_group_ingress_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the security group rule. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import security group ingress rules using the `securityGroupRuleId`. For example: ```typescript @@ -113,4 +140,4 @@ Using `terraform import`, import security group ingress rules using the `securit % terraform import aws_vpc_security_group_ingress_rule.example sgr-02108b27edd666983 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_security_group_vpc_association.html.markdown b/website/docs/cdktf/typescript/r/vpc_security_group_vpc_association.html.markdown index 0360b6a75056..d1e1b29f84ad 100644 --- a/website/docs/cdktf/typescript/r/vpc_security_group_vpc_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_security_group_vpc_association.html.markdown @@ -37,8 +37,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `securityGroupId` - (Required) The ID of the security group. * `vpcId` - (Required) The ID of the VPC to make the association with. @@ -57,6 +58,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_vpc_security_group_vpc_association.example + identity = { + vpc_id = "vpc-67890" + security_group_id = "sg-12345" + } +} + +resource "aws_vpc_security_group_vpc_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `vpcId` (String) VPC ID. +* `securityGroupId` (String) Security Group ID. + +#### Optional + +* `accountId` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a Security Group VPC Association using the `securityGroupId` and `vpcId` arguments, separated by a comma (`,`). For example: ```typescript @@ -87,4 +116,4 @@ Using `terraform import`, import a Security Group VPC Association using the `sec % terraform import aws_vpc_security_group_vpc_association.example sg-12345,vpc-67890 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_access_log_subscription.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_access_log_subscription.html.markdown index fe6681179d9d..3b2deb73c3d1 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_access_log_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_access_log_subscription.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceNetworkLogType` - (Optional, Forces new resource) Type of log that monitors your Amazon VPC Lattice service networks. Valid values are: `SERVICE`, `RESOURCE`. Defaults to `SERVICE`. ## Attribute Reference @@ -88,4 +89,4 @@ Using `terraform import`, import VPC Lattice Access Log Subscription using the a % terraform import aws_vpclattice_access_log_subscription.example rft-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_auth_policy.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_auth_policy.html.markdown index 571e66bc78be..4246d2f4bdee 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_auth_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_auth_policy.html.markdown @@ -68,8 +68,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceIdentifier` - (Required) The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. * `policy` - (Required) The auth policy. The policy string in JSON must not contain newlines or blank lines. @@ -121,4 +122,4 @@ Using `terraform import`, import VPC Lattice Auth Policy using the `id`. For exa % terraform import aws_vpclattice_auth_policy.example abcd-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_listener.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_listener.html.markdown index 794ad58d43e9..e5cf609d19ad 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_listener.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_listener.html.markdown @@ -189,6 +189,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultAction` - (Required) Default action block for the default listener rule. Default action blocks are defined below. * `name` - (Required, Forces new resource) Name of the listener. A listener name must be unique within a service. Valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. * `port` - (Optional, Forces new resource) Listener port. You can specify a value from 1 to 65535. If `port` is not specified and `protocol` is HTTP, the value will default to 80. If `port` is not specified and `protocol` is HTTPS, the value will default to 443. @@ -268,4 +269,4 @@ Using `terraform import`, import VPC Lattice Listener using the `listenerId` of % terraform import aws_vpclattice_listener.example svc-1a2b3c4d/listener-987654321 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_listener_rule.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_listener_rule.html.markdown index 5a9800ed820d..bd9ab7d557f3 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_listener_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_listener_rule.html.markdown @@ -131,6 +131,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `action` Block @@ -266,4 +267,4 @@ Using `terraform import`, import VPC Lattice Listener Rule using the `id`. For e % terraform import aws_vpclattice_listener_rule.example service123/listener456/rule789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_resource_configuration.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_resource_configuration.html.markdown index e6a1e5a0421e..9a5a3157517d 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_resource_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_resource_configuration.html.markdown @@ -137,6 +137,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allowAssociationToShareableServiceNetwork` (Optional) Allow or Deny the association of this resource to a shareable service network. * `protocol` - (Optional) Protocol for the Resource `TCP` is currently the only supported value. MUST be specified if `resourceConfigurationGroupId` is not. * `resourceConfigurationGroupId` (Optional) ID of Resource Configuration where `type` is `CHILD`. @@ -150,6 +151,7 @@ One of `dnsResource`, `ipResource`, `arnResource` must be specified. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `arnResource` - (Optional) Resource DNS Configuration. See [`arnResource` Block](#arn_resource-block) for details. * `dnsResource` - (Optional) Resource DNS Configuration. See [`dnsResource` Block](#dns_resource-block) for details. * `ipResource` - (Optional) Resource DNS Configuration. See [`ipResource` Block](#ip_resource-block) for details. @@ -221,4 +223,4 @@ Using `terraform import`, import VPC Lattice Resource Configuration using the `i % terraform import aws_vpclattice_resource_configuration.example rcfg-1234567890abcdef1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_resource_gateway.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_resource_gateway.html.markdown index 3286ce69cd15..60572a862dbf 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_resource_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_resource_gateway.html.markdown @@ -103,6 +103,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ipAddressType` - (Optional) IP address type used by the resource gateway. Valid values are `IPV4`, `IPV6`, and `DUALSTACK`. The IP address type of a resource gateway must be compatible with the subnets of the resource gateway and the IP address type of the resource. * `securityGroupIds` - (Optional) Security group IDs associated with the resource gateway. The security groups must be in the same VPC. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -148,4 +149,4 @@ Using `terraform import`, import VPC Lattice Resource Gateway using the `id`. Fo % terraform import aws_vpclattice_resource_gateway.example rgw-0a1b2c3d4e5f ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_resource_policy.html.markdown index d5ea78f6b56a..bf134fcf9a6b 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_resource_policy.html.markdown @@ -79,8 +79,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. * `policy` - (Required) An IAM policy. The policy string in JSON must not contain newlines or blank lines. @@ -120,4 +121,4 @@ Using `terraform import`, import VPC Lattice Resource Policy using the `resource % terraform import aws_vpclattice_resource_policy.example rft-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_service.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_service.html.markdown index 43e7b5932356..dd26da96078a 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_service.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_service.html.markdown @@ -46,6 +46,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authType` - (Optional) Type of IAM policy. Either `NONE` or `AWS_IAM`. * `certificateArn` - (Optional) Amazon Resource Name (ARN) of the certificate. * `customDomainName` - (Optional) Custom domain name of the service. @@ -100,4 +101,4 @@ Using `terraform import`, import VPC Lattice Service using the `id`. For example % terraform import aws_vpclattice_service.example svc-06728e2357ea55f8a ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_service_network.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_service_network.html.markdown index e125935f0c16..5eca00942c41 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_service_network.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_service_network.html.markdown @@ -45,6 +45,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authType` - (Optional) Type of IAM policy. Either `NONE` or `AWS_IAM`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -87,4 +88,4 @@ Using `terraform import`, import VPC Lattice Service Network using the `id`. For % terraform import aws_vpclattice_service_network.example sn-0158f91c1e3358dba ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_service_network_resource_association.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_service_network_resource_association.html.markdown index 5fd79426f7d3..3d0407e32a51 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_service_network_resource_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_service_network_resource_association.html.markdown @@ -18,19 +18,22 @@ Terraform resource for managing an AWS VPC Lattice Service Network Resource Asso ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug import { Construct } from "constructs"; -import { TerraformStack } from "cdktf"; +import { Token, TerraformStack } from "cdktf"; /* * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpclatticeServiceNetworkResourceAssociation } from "./.gen/providers/aws/"; +import { VpclatticeServiceNetworkResourceAssociation } from "./.gen/providers/aws/vpclattice-service-network-resource-association"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new VpclatticeServiceNetworkResourceAssociation(this, "example", { - resource_configuration_identifier: - awsVpclatticeResourceConfigurationExample.id, - service_network_identifier: awsVpclatticeServiceNetworkExample.id, + resourceConfigurationIdentifier: Token.asString( + awsVpclatticeResourceConfigurationExample.id + ), + serviceNetworkIdentifier: Token.asString( + awsVpclatticeServiceNetworkExample.id + ), tags: { Name: "Example", }, @@ -44,11 +47,12 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `resource_configuration_identifier` - (Required) Identifier of Resource Configuration to associate to the Service Network. +* `resourceConfigurationIdentifier` - (Required) Identifier of Resource Configuration to associate to the Service Network. * `serviceNetworkIdentifier` - (Required) Identifier of the Service Network to associate the Resource to. The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -81,7 +85,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { VpclatticeServiceNetworkResourceAssociation } from "./.gen/providers/aws/"; +import { VpclatticeServiceNetworkResourceAssociation } from "./.gen/providers/aws/vpclattice-service-network-resource-association"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -101,4 +105,4 @@ Using `terraform import`, import VPC Lattice Service Network Resource Associatio % terraform import aws_vpclattice_service_network_resource_association.example snra-1234567890abcef12 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_service_network_service_association.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_service_network_service_association.html.markdown index 3150b11b0657..2d308c658933 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_service_network_service_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_service_network_service_association.html.markdown @@ -41,12 +41,11 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `serviceIdentifier` - (Required) The ID or Amazon Resource Identifier (ARN) of the service. * `serviceNetworkIdentifier` - (Required) The ID or Amazon Resource Identifier (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts. -The following arguments are optional: - * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -102,4 +101,4 @@ Using `terraform import`, import VPC Lattice Service Network Service Association % terraform import aws_vpclattice_service_network_service_association.example snsa-05e2474658a88f6ba ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_service_network_vpc_association.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_service_network_vpc_association.html.markdown index 29e7da5dae13..3c3999b27e2c 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_service_network_vpc_association.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_service_network_vpc_association.html.markdown @@ -42,12 +42,12 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcIdentifier` - (Required) The ID of the VPC. * `serviceNetworkIdentifier` - (Required) The ID or Amazon Resource Identifier (ARN) of the service network. You must use the ARN if the resources specified in the operation are in different accounts. The following arguments are optional: - * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `securityGroupIds` - (Optional) The IDs of the security groups. @@ -100,4 +100,4 @@ Using `terraform import`, import VPC Lattice Service Network VPC Association usi % terraform import aws_vpclattice_service_network_vpc_association.example snsa-05e2474658a88f6ba ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_target_group.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_target_group.html.markdown index 998363fe8efe..afad6e09d60d 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_target_group.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_target_group.html.markdown @@ -151,6 +151,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `config` - (Optional) The target group configuration. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -226,4 +227,4 @@ Using `terraform import`, import VPC Lattice Target Group using the `id`. For ex % terraform import aws_vpclattice_target_group.example tg-0c11d4dc16ed96bdb ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpclattice_target_group_attachment.html.markdown b/website/docs/cdktf/typescript/r/vpclattice_target_group_attachment.html.markdown index bdbdb734f83e..978d4c26a5ad 100644 --- a/website/docs/cdktf/typescript/r/vpclattice_target_group_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/vpclattice_target_group_attachment.html.markdown @@ -44,6 +44,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `targetGroupIdentifier` - (Required) The ID or Amazon Resource Name (ARN) of the target group. - `target` - (Required) The target. @@ -56,4 +57,4 @@ This resource supports the following arguments: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpn_connection.html.markdown b/website/docs/cdktf/typescript/r/vpn_connection.html.markdown index 27cdaae44d19..d8be02653a14 100644 --- a/website/docs/cdktf/typescript/r/vpn_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/vpn_connection.html.markdown @@ -188,13 +188,14 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `customerGatewayId` - (Required) The ID of the customer gateway. * `type` - (Required) The type of VPN connection. The only type AWS supports at this time is "ipsec.1". * `transitGatewayId` - (Optional) The ID of the EC2 Transit Gateway. * `vpnGatewayId` - (Optional) The ID of the Virtual Private Gateway. * `staticRoutesOnly` - (Optional, Default `false`) Whether the VPN connection uses static routes exclusively. Static routes must be used for devices that don't support BGP. * `enableAcceleration` - (Optional, Default `false`) Indicate whether to enable acceleration for the VPN connection. Supports only EC2 Transit Gateway. -* `preshared_key_storage` - (Optional) Storage mode for the pre-shared key (PSK). Valid values are `Standard` (stored in the Site-to-Site VPN service) or `SecretsManager` (stored in AWS Secrets Manager). +* `presharedKeyStorage` - (Optional) Storage mode for the pre-shared key (PSK). Valid values are `Standard` (stored in the Site-to-Site VPN service) or `SecretsManager` (stored in AWS Secrets Manager). * `tags` - (Optional) Tags to apply to the connection. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `localIpv4NetworkCidr` - (Optional, Default `0.0.0.0/0`) The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. * `localIpv6NetworkCidr` - (Optional, Default `::/0`) The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. @@ -269,20 +270,20 @@ This resource exports the following attributes in addition to the arguments abov * `customerGatewayConfiguration` - The configuration information for the VPN connection's customer gateway (in the native XML format). * `customerGatewayId` - The ID of the customer gateway to which the connection is attached. * `routes` - The static routes associated with the VPN connection. Detailed below. -* `preshared_key_arn` - ARN of the Secrets Manager secret storing the pre-shared key(s) for the VPN connection. Note that even if it returns a valid Secrets Manager ARN, the pre-shared key(s) will not be stored in Secrets Manager unless the `preshared_key_storage` argument is set to `SecretsManager`. +* `presharedKeyArn` - ARN of the Secrets Manager secret storing the pre-shared key(s) for the VPN connection. Note that even if it returns a valid Secrets Manager ARN, the pre-shared key(s) will not be stored in Secrets Manager unless the `presharedKeyStorage` argument is set to `SecretsManager`. * `staticRoutesOnly` - Whether the VPN connection uses static routes exclusively. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `transitGatewayAttachmentId` - When associated with an EC2 Transit Gateway (`transitGatewayId` argument), the attachment ID. See also the [`aws_ec2_tag` resource](/docs/providers/aws/r/ec2_tag.html) for tagging the EC2 Transit Gateway VPN Attachment. * `tunnel1Address` - The public IP address of the first VPN tunnel. * `tunnel1CgwInsideAddress` - The RFC 6890 link-local address of the first VPN tunnel (Customer Gateway Side). * `tunnel1VgwInsideAddress` - The RFC 6890 link-local address of the first VPN tunnel (VPN Gateway Side). -* `tunnel1PresharedKey` - The preshared key of the first VPN tunnel. If `preshared_key_storage` is set to `SecretsManager`, it returns strings indicating the keys are redacted and the actual values are stored in Secrets Manager. +* `tunnel1PresharedKey` - The preshared key of the first VPN tunnel. If `presharedKeyStorage` is set to `SecretsManager`, it returns strings indicating the keys are redacted and the actual values are stored in Secrets Manager. * `tunnel1BgpAsn` - The bgp asn number of the first VPN tunnel. * `tunnel1BgpHoldtime` - The bgp holdtime of the first VPN tunnel. * `tunnel2Address` - The public IP address of the second VPN tunnel. * `tunnel2CgwInsideAddress` - The RFC 6890 link-local address of the second VPN tunnel (Customer Gateway Side). * `tunnel2VgwInsideAddress` - The RFC 6890 link-local address of the second VPN tunnel (VPN Gateway Side). -* `tunnel2PresharedKey` - The preshared key of the second VPN tunnel. If `preshared_key_storage` is set to `SecretsManager`, it returns strings indicating the keys are redacted and the actual values are stored in Secrets Manager. +* `tunnel2PresharedKey` - The preshared key of the second VPN tunnel. If `presharedKeyStorage` is set to `SecretsManager`, it returns strings indicating the keys are redacted and the actual values are stored in Secrets Manager. * `tunnel2BgpAsn` - The bgp asn number of the second VPN tunnel. * `tunnel2BgpHoldtime` - The bgp holdtime of the second VPN tunnel. * `vgwTelemetry` - Telemetry for the VPN tunnels. Detailed below. @@ -335,4 +336,4 @@ Using `terraform import`, import VPN Connections using the VPN connection `id`. % terraform import aws_vpn_connection.testvpnconnection vpn-40f41529 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpn_connection_route.html.markdown b/website/docs/cdktf/typescript/r/vpn_connection_route.html.markdown index 7764590de974..fb0ff06544f3 100644 --- a/website/docs/cdktf/typescript/r/vpn_connection_route.html.markdown +++ b/website/docs/cdktf/typescript/r/vpn_connection_route.html.markdown @@ -60,6 +60,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `destinationCidrBlock` - (Required) The CIDR block associated with the local subnet of the customer network. * `vpnConnectionId` - (Required) The ID of the VPN connection. @@ -70,4 +71,4 @@ This resource exports the following attributes in addition to the arguments abov * `destinationCidrBlock` - The CIDR block associated with the local subnet of the customer network. * `vpnConnectionId` - The ID of the VPN connection. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpn_gateway.html.markdown b/website/docs/cdktf/typescript/r/vpn_gateway.html.markdown index 5662fb66818f..da56b64fe6d4 100644 --- a/website/docs/cdktf/typescript/r/vpn_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/vpn_gateway.html.markdown @@ -41,6 +41,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Optional) The VPC ID to create in. * `availabilityZone` - (Optional) The Availability Zone for the virtual private gateway. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -82,4 +83,4 @@ Using `terraform import`, import VPN Gateways using the VPN gateway `id`. For ex % terraform import aws_vpn_gateway.testvpngateway vgw-9a4cacf3 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpn_gateway_attachment.html.markdown b/website/docs/cdktf/typescript/r/vpn_gateway_attachment.html.markdown index 65553fca3936..495ec8545528 100644 --- a/website/docs/cdktf/typescript/r/vpn_gateway_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/vpn_gateway_attachment.html.markdown @@ -58,6 +58,7 @@ guides for more information. This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpcId` - (Required) The ID of the VPC. * `vpnGatewayId` - (Required) The ID of the Virtual Private Gateway. @@ -72,4 +73,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpn_gateway_route_propagation.html.markdown b/website/docs/cdktf/typescript/r/vpn_gateway_route_propagation.html.markdown index d954040c75c5..3aaa26f764f9 100644 --- a/website/docs/cdktf/typescript/r/vpn_gateway_route_propagation.html.markdown +++ b/website/docs/cdktf/typescript/r/vpn_gateway_route_propagation.html.markdown @@ -41,8 +41,9 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `vpnGatewayId` - The id of the `aws_vpn_gateway` to propagate routes from. * `routeTableId` - The id of the `aws_route_table` to propagate routes into. @@ -57,4 +58,4 @@ This resource exports no additional attributes. - `create` - (Default `2m`) - `delete` - (Default `2m`) - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_byte_match_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_byte_match_set.html.markdown index 9c085c01f15e..a0debc51d838 100644 --- a/website/docs/cdktf/typescript/r/wafregional_byte_match_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_byte_match_set.html.markdown @@ -49,6 +49,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the ByteMatchSet. * `byteMatchTuples` - (Optional)Settings for the ByteMatchSet, such as the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests. ByteMatchTuple documented below. @@ -104,4 +105,4 @@ Using `terraform import`, import WAF Regional Byte Match Set using the id. For e % terraform import aws_wafregional_byte_match_set.byte_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_geo_match_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_geo_match_set.html.markdown index 8304a9034a05..cdc40d0c8f8b 100644 --- a/website/docs/cdktf/typescript/r/wafregional_geo_match_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_geo_match_set.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Geo Match Set. * `geoMatchConstraint` - (Optional) The Geo Match Constraint objects which contain the country that you want AWS WAF to search for. @@ -100,4 +101,4 @@ Using `terraform import`, import WAF Regional Geo Match Set using the id. For ex % terraform import aws_wafregional_geo_match_set.geo_match_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_ipset.html.markdown b/website/docs/cdktf/typescript/r/wafregional_ipset.html.markdown index 31efcae4bdc8..0d9f43ca1935 100644 --- a/website/docs/cdktf/typescript/r/wafregional_ipset.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_ipset.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the IPSet. * `ipSetDescriptor` - (Optional) One or more pairs specifying the IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) from which web requests originate. @@ -101,4 +102,4 @@ Using `terraform import`, import WAF Regional IPSets using their ID. For example % terraform import aws_wafregional_ipset.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_rate_based_rule.html.markdown b/website/docs/cdktf/typescript/r/wafregional_rate_based_rule.html.markdown index 5c68d6a41b31..40416ce60d23 100644 --- a/website/docs/cdktf/typescript/r/wafregional_rate_based_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_rate_based_rule.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `metricName` - (Required) The name or description for the Amazon CloudWatch metric of this rule. * `name` - (Required) The name or description of the rule. * `rateKey` - (Required) Valid value is IP. @@ -121,4 +122,4 @@ Using `terraform import`, import WAF Regional Rate Based Rule using the id. For % terraform import aws_wafregional_rate_based_rule.wafrule a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_regex_match_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_regex_match_set.html.markdown index a77641fcb3ff..625cb45c24dd 100644 --- a/website/docs/cdktf/typescript/r/wafregional_regex_match_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_regex_match_set.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Regex Match Set. * `regexMatchTuple` - (Required) The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. See below. @@ -120,4 +121,4 @@ Using `terraform import`, import WAF Regional Regex Match Set using the id. For % terraform import aws_wafregional_regex_match_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_regex_pattern_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_regex_pattern_set.html.markdown index 8689dc18a9f2..980b754a24d8 100644 --- a/website/docs/cdktf/typescript/r/wafregional_regex_pattern_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_regex_pattern_set.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Regex Pattern Set. * `regexPatternStrings` - (Optional) A list of regular expression (regex) patterns that you want AWS WAF to search for, such as `B[a@]dB[o0]t`. @@ -80,4 +81,4 @@ Using `terraform import`, import WAF Regional Regex Pattern Set using the id. Fo % terraform import aws_wafregional_regex_pattern_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_rule.html.markdown b/website/docs/cdktf/typescript/r/wafregional_rule.html.markdown index c81c01d931bd..ed4bb52fd901 100644 --- a/website/docs/cdktf/typescript/r/wafregional_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_rule.html.markdown @@ -56,6 +56,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the rule. * `metricName` - (Required) The name or description for the Amazon CloudWatch metric of this rule. * `predicate` - (Optional) The objects to include in a rule (documented below). @@ -115,4 +116,4 @@ Using `terraform import`, import WAF Regional Rule using the id. For example: % terraform import aws_wafregional_rule.wafrule a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_rule_group.html.markdown b/website/docs/cdktf/typescript/r/wafregional_rule_group.html.markdown index 527d6704e4c7..916b83dfdb02 100644 --- a/website/docs/cdktf/typescript/r/wafregional_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_rule_group.html.markdown @@ -59,6 +59,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) A friendly name of the rule group * `metricName` - (Required) A friendly name for the metrics from the rule group * `activatedRule` - (Optional) A list of activated rules, see below @@ -116,4 +117,4 @@ Using `terraform import`, import WAF Regional Rule Group using the id. For examp % terraform import aws_wafregional_rule_group.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_size_constraint_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_size_constraint_set.html.markdown index b695cd29a346..3056fcb18cf9 100644 --- a/website/docs/cdktf/typescript/r/wafregional_size_constraint_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_size_constraint_set.html.markdown @@ -48,6 +48,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the Size Constraint Set. * `sizeConstraints` - (Optional) Specifies the parts of web requests that you want to inspect the size of. @@ -119,4 +120,4 @@ Using `terraform import`, import WAF Size Constraint Set using the id. For examp % terraform import aws_wafregional_size_constraint_set.size_constraint_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_sql_injection_match_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_sql_injection_match_set.html.markdown index 408219ab0572..52885ba3fd02 100644 --- a/website/docs/cdktf/typescript/r/wafregional_sql_injection_match_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_sql_injection_match_set.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name or description of the SizeConstraintSet. * `sqlInjectionMatchTuple` - (Optional) The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. @@ -107,4 +108,4 @@ Using `terraform import`, import WAF Regional Sql Injection Match Set using the % terraform import aws_wafregional_sql_injection_match_set.sql_injection_match_set a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_web_acl.html.markdown b/website/docs/cdktf/typescript/r/wafregional_web_acl.html.markdown index 13d0705379e0..9b216a2d3584 100644 --- a/website/docs/cdktf/typescript/r/wafregional_web_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_web_acl.html.markdown @@ -159,6 +159,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `defaultAction` - (Required) The action that you want AWS WAF Regional to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL. * `metricName` - (Required) The name or description for the Amazon CloudWatch metric of this web ACL. * `name` - (Required) The name or description of the web ACL. @@ -241,4 +242,4 @@ Using `terraform import`, import WAF Regional Web ACL using the id. For example: % terraform import aws_wafregional_web_acl.wafacl a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_web_acl_association.html.markdown b/website/docs/cdktf/typescript/r/wafregional_web_acl_association.html.markdown index 522e6c4ba56d..c10e2c17b860 100644 --- a/website/docs/cdktf/typescript/r/wafregional_web_acl_association.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_web_acl_association.html.markdown @@ -207,6 +207,7 @@ resource "aws_wafregional_web_acl_association" "association" { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `webAclId` - (Required) The ID of the WAF Regional WebACL to create an association. * `resourceArn` - (Required) ARN of the resource to associate with. For example, an Application Load Balancer or API Gateway Stage. @@ -254,4 +255,4 @@ Using `terraform import`, import WAF Regional Web ACL Association using their `w % terraform import aws_wafregional_web_acl_association.foo web_acl_id:resource_arn ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafregional_xss_match_set.html.markdown b/website/docs/cdktf/typescript/r/wafregional_xss_match_set.html.markdown index 5ba30178f82a..fe34b8407bd5 100644 --- a/website/docs/cdktf/typescript/r/wafregional_xss_match_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafregional_xss_match_set.html.markdown @@ -52,6 +52,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the set * `xssMatchTuple` - (Optional) The parts of web requests that you want to inspect for cross-site scripting attacks. @@ -105,4 +106,4 @@ Using `terraform import`, import AWS WAF Regional XSS Match using the `id`. For % terraform import aws_wafregional_xss_match_set.example 12345abcde ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_api_key.html.markdown b/website/docs/cdktf/typescript/r/wafv2_api_key.html.markdown index 4ac62861ffc1..07fa23d3e9a8 100644 --- a/website/docs/cdktf/typescript/r/wafv2_api_key.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_api_key.html.markdown @@ -39,6 +39,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +- `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). - `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. Changing this forces a new resource to be created. **NOTE:** WAFv2 API Keys deployed for `CLOUDFRONT` must be created within the `us-east-1` region. - `tokenDomains` - (Required) The domains that you want to be able to use the API key with, for example `example.com`. You can specify up to 5 domains. Changing this forces a new resource to be created. @@ -80,4 +81,4 @@ Using `terraform import`, import WAFv2 API Key using `api_key,scope`. For exampl % terraform import aws_wafv2_api_key.example a1b2c3d4-5678-90ab-cdef-EXAMPLE11111,REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_ip_set.html.markdown b/website/docs/cdktf/typescript/r/wafv2_ip_set.html.markdown index 0716244302c1..e691426b02c5 100644 --- a/website/docs/cdktf/typescript/r/wafv2_ip_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_ip_set.html.markdown @@ -46,6 +46,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) A friendly name of the IP set. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) A friendly description of the IP set. @@ -94,4 +95,4 @@ Using `terraform import`, import WAFv2 IP Sets using `ID/name/scope`. For exampl % terraform import aws_wafv2_ip_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_regex_pattern_set.html.markdown b/website/docs/cdktf/typescript/r/wafv2_regex_pattern_set.html.markdown index 3f40bc570509..4656a8043d08 100644 --- a/website/docs/cdktf/typescript/r/wafv2_regex_pattern_set.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_regex_pattern_set.html.markdown @@ -52,11 +52,12 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Optional, Forces new resource) A friendly name of the regular expression pattern set. If omitted, Terraform will assign a random, unique name. Conflicts with `namePrefix`. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `description` - (Optional) A friendly description of the regular expression pattern set. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. -* `regularExpression` - (Optional) One or more blocks of regular expression patterns that you want AWS WAF to search for, such as `B[a@]dB[o0]t`. See [Regular Expression](#regular-expression) below for details. A maximum of 10 `regularExpression` blocks may be specified. +* `regularExpression` - (Optional) One or more blocks of regular expression patterns that you want AWS WAF to search for, such as `B[a@]dB[o0]t`. See [Regular Expression](#regular-expression) below for details. * `tags` - (Optional) An array of key:value pairs to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Regular Expression @@ -103,4 +104,4 @@ Using `terraform import`, import WAFv2 Regex Pattern Sets using `ID/name/scope`. % terraform import aws_wafv2_regex_pattern_set.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_rule_group.html.markdown b/website/docs/cdktf/typescript/r/wafv2_rule_group.html.markdown index 6a95fcecffa8..17223cfa0da4 100644 --- a/website/docs/cdktf/typescript/r/wafv2_rule_group.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_rule_group.html.markdown @@ -319,16 +319,78 @@ class MyConvertedCode extends TerraformStack { ``` +### Using rules_json + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2RuleGroup } from "./.gen/providers/aws/wafv2-rule-group"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new Wafv2RuleGroup(this, "example", { + capacity: 100, + name: "example-rule-group", + rulesJson: Token.asString( + Fn.jsonencode([ + { + Action: { + Count: {}, + }, + Name: "rule-1", + Priority: 1, + Statement: { + ByteMatchStatement: { + FieldToMatch: { + UriPath: {}, + }, + PositionalConstraint: "CONTAINS", + SearchString: "badbot", + TextTransformations: [ + { + Priority: 1, + Type: "NONE", + }, + ], + }, + }, + VisibilityConfig: { + CloudwatchMetricsEnabled: false, + MetricName: "friendly-rule-metric-name", + SampledRequestsEnabled: false, + }, + }, + ]) + ), + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: false, + metricName: "friendly-metric-name", + sampledRequestsEnabled: false, + }, + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `capacity` - (Required, Forces new resource) The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information. * `customResponseBody` - (Optional) Defines custom response bodies that can be referenced by `customResponse` actions. See [Custom Response Body](#custom-response-body) below for details. * `description` - (Optional) A friendly description of the rule group. * `name` - (Required, Forces new resource) A friendly name of the rule group. * `namePrefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `rule` - (Optional) The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See [Rules](#rules) below for details. +* `rulesJson` - (Optional) Raw JSON string to allow more than three nested statements. Conflicts with `rule` attribute. This is for advanced use cases where more than 3 levels of nested statements are required. **There is no drift detection at this time**. If you use this attribute instead of `rule`, you will be foregoing drift detection. Additionally, importing an existing rule group into a configuration with `rulesJson` set will result in a one time in-place update as the remote rule configuration is initially written to the `rule` attribute. See the AWS [documentation](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html) for the JSON structure. * `scope` - (Required, Forces new resource) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. * `tags` - (Optional) An array of key:value pairs to associate with the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `visibilityConfig` - (Required) Defines and enables Amazon CloudWatch metrics and web request sample collection. See [Visibility Configuration](#visibility-configuration) below for details. @@ -884,4 +946,4 @@ Using `terraform import`, import WAFv2 Rule Group using `ID/name/scope`. For exa % terraform import aws_wafv2_rule_group.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown index ba4552504444..e91e3a8a8b34 100644 --- a/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown @@ -14,6 +14,8 @@ Creates a WAFv2 Web ACL resource. ~> **Note** In `fieldToMatch` blocks, _e.g._, in `byteMatchStatement`, the `body` block includes an optional argument `oversizeHandling`. AWS indicates this argument will be required starting February 2023. To avoid configurations breaking when that change happens, treat the `oversizeHandling` argument as **required** as soon as possible. +!> **Warning:** If you use the `aws_wafv2_web_acl_rule_group_association` resource to associate rule groups with this Web ACL, you must add `lifecycle { ignore_changes = [rule] }` to this resource to prevent configuration drift. The association resource modifies the Web ACL's rules outside of this resource's direct management. + ## Example Usage This resource is based on `aws_wafv2_rule_group`, check the documentation of the `aws_wafv2_rule_group` resource to see examples of the various available statements. @@ -494,6 +496,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `associationConfig` - (Optional) Specifies custom configurations for the associations between the web ACL and protected resources. See [`associationConfig`](#association_config-block) below for details. * `captchaConfig` - (Optional) Specifies how AWS WAF should handle CAPTCHA evaluations on the ACL level (used by [AWS Bot Control](https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-bot.html)). See [`captchaConfig`](#captcha_config-block) below for details. * `challengeConfig` - (Optional) Specifies how AWS WAF should handle Challenge evaluations on the ACL level (used by [AWS Bot Control](https://docs.aws.amazon.com/waf/latest/developerguide/aws-managed-rule-groups-bot.html)). See [`challengeConfig`](#challenge_config-block) below for details. @@ -855,6 +858,7 @@ The `managedRuleGroupConfigs` block support the following arguments: * `awsManagedRulesBotControlRuleSet` - (Optional) Additional configuration for using the Bot Control managed rule group. Use this to specify the inspection level that you want to use. See [`awsManagedRulesBotControlRuleSet`](#aws_managed_rules_bot_control_rule_set-block) for more details * `awsManagedRulesAcfpRuleSet` - (Optional) Additional configuration for using the Account Creation Fraud Prevention managed rule group. Use this to specify information such as the registration page of your application and the type of content to accept or reject from the client. +* `awsManagedRulesAntiDdosRuleSet` - (Optional) Configuration for using the anti-DDoS managed rule group. See [`awsManagedRulesAntiDdosRuleSet`](#aws_managed_rules_anti_ddos_rule_set-block) for more details. * `awsManagedRulesAtpRuleSet` - (Optional) Additional configuration for using the Account Takeover Protection managed rule group. Use this to specify information such as the sign-in page of your application and the type of content to accept or reject from the client. * `loginPath` - (Optional, **Deprecated**) The path of the login endpoint for your application. * `passwordField` - (Optional, **Deprecated**) Details about your login page password field. See [`passwordField`](#password_field-block) for more details. @@ -871,9 +875,31 @@ The `managedRuleGroupConfigs` block support the following arguments: * `creationPath` - (Required) The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST requests. * `enableRegexInPath` - (Optional) Whether or not to allow the use of regular expressions in the login page path. * `registrationPagePath` - (Required) The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users. This page must accept GET text/html requests. -* `requestInspection` - (Optional) The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage. See [`requestInspection`](#request_inspection-block) for more details. +* `requestInspection` - (Optional) The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage. See [`requestInspection`](#request_inspection-block-acfp) for more details. * `responseInspection` - (Optional) The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates. Note that Response Inspection is available only on web ACLs that protect CloudFront distributions. See [`responseInspection`](#response_inspection-block) for more details. +### `requestInspection` Block (ACFP) + +* `addressFields` (Optional) The names of the fields in the request payload that contain your customer's primary physical address. See [`addressFields`](#address_fields-block) for more details. +* `emailField` (Optional) The name of the field in the request payload that contains your customer's email. See [`emailField`](#email_field-block) for more details. +* `passwordField` (Optional) Details about your login page password field. See [`passwordField`](#password_field-block) for more details. +* `payloadType` (Required) The payload type for your login endpoint, either JSON or form encoded. +* `phoneNumberFields` (Optional) The names of the fields in the request payload that contain your customer's primary phone number. See [`phoneNumberFields`](#phone_number_fields-block) for more details. +* `usernameField` (Optional) Details about your login page username field. See [`usernameField`](#username_field-block) for more details. + +### `awsManagedRulesAntiDdosRuleSet` Block + +* `clientSideActionConfig` - (Required) Configuration for the request handling that's applied by the managed rule group rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests` during a distributed denial of service (DDoS) attack. See [`clientSideActionConfig`](#client_side_action_config-block) for more details. +* `sensitivityToBlock` - (Optional) Sensitivity that the rule group rule DDoSRequests uses when matching against the DDoS suspicion labeling on a request. Valid values are `LOW` (Default), `MEDIUM`, and `HIGH`. + +### `clientSideActionConfig` Block + +* `challenge` - (Required) Configuration for the use of the `AWSManagedRulesAntiDDoSRuleSet` rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests`. + * `exemptUriRegularExpression` - (Optional) Block for the list of the regular expressions to match against the web request URI, used to identify requests that can't handle a silent browser challenge. + * `regexString` - (Optional) Regular expression string. + * `sensitivity` - (Optional) Sensitivity that the rule group rule ChallengeDDoSRequests uses when matching against the DDoS suspicion labeling on a request. Valid values are `LOW`, `MEDIUM` and `HIGH` (Default). + * `usageOfAction` - (Required) Configuration whether to use the `AWSManagedRulesAntiDDoSRuleSet` rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests` in the rule group evaluation. Valid values are `ENABLED` and `DISABLED`. + ### `awsManagedRulesAtpRuleSet` Block * `enableRegexInPath` - (Optional) Whether or not to allow the use of regular expressions in the login page path. @@ -883,11 +909,8 @@ The `managedRuleGroupConfigs` block support the following arguments: ### `requestInspection` Block -* `addressFields` (Optional) The names of the fields in the request payload that contain your customer's primary physical address. See [`addressFields`](#address_fields-block) for more details. -* `emailField` (Optional) The name of the field in the request payload that contains your customer's email. See [`emailField`](#email_field-block) for more details. * `passwordField` (Optional) Details about your login page password field. See [`passwordField`](#password_field-block) for more details. * `payloadType` (Required) The payload type for your login endpoint, either JSON or form encoded. -* `phoneNumberFields` (Optional) The names of the fields in the request payload that contain your customer's primary phone number. See [`phoneNumberFields`](#phone_number_fields-block) for more details. * `usernameField` (Optional) Details about your login page username field. See [`usernameField`](#username_field-block) for more details. ### `addressFields` Block @@ -1142,6 +1165,7 @@ Aggregate the request counts using one or more web request components as the agg The `customKey` block supports the following arguments: +* `asn` - (Optional) Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. See [RateLimit `asn`](#ratelimit-asn-block) below for details. * `cookie` - (Optional) Use the value of a cookie in the request as an aggregate key. See [RateLimit `cookie`](#ratelimit-cookie-block) below for details. * `forwardedIp` - (Optional) Use the first IP address in an HTTP header as an aggregate key. See [`forwardedIp`](#ratelimit-forwarded_ip-block) below for details. * `httpMethod` - (Optional) Use the request's HTTP method as an aggregate key. See [RateLimit `httpMethod`](#ratelimit-http_method-block) below for details. @@ -1154,6 +1178,12 @@ The `customKey` block supports the following arguments: * `queryString` - (Optional) Use the request's query string as an aggregate key. See [RateLimit `queryString`](#ratelimit-query_string-block) below for details. * `uriPath` - (Optional) Use the request's URI path as an aggregate key. See [RateLimit `uriPath`](#ratelimit-uri_path-block) below for details. +### RateLimit `asn` Block + +Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. Each distinct ASN contributes to the aggregation instance. + +The `asn` block is configured as an empty block `{}`. + ### RateLimit `cookie` Block Use the value of a cookie in the request as an aggregate key. Each distinct value in the cookie contributes to the aggregation instance. If you use a single cookie as your custom key, then each value fully defines an aggregation instance. @@ -1281,4 +1311,4 @@ Using `terraform import`, import WAFv2 Web ACLs using `ID/Name/Scope`. For examp % terraform import aws_wafv2_web_acl.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown index cd214108d937..ea687251d39d 100644 --- a/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown @@ -116,6 +116,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resourceArn` - (Required) The Amazon Resource Name (ARN) of the resource to associate with the web ACL. This must be an ARN of an Application Load Balancer, an Amazon API Gateway stage (REST only, HTTP is unsupported), an Amazon Cognito User Pool, an Amazon AppSync GraphQL API, an Amazon App Runner service, or an Amazon Verified Access instance. * `webAclArn` - (Required) The Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource. @@ -161,4 +162,4 @@ Using `terraform import`, import WAFv2 Web ACL Association using `WEB_ACL_ARN,RE % terraform import aws_wafv2_web_acl_association.example arn:aws:wafv2:...7ce849ea,arn:aws:apigateway:...ages/name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown index bb4d926ec772..d56df2537f17 100644 --- a/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown @@ -151,7 +151,7 @@ class MyConvertedCode extends TerraformStack { test: "ArnLike", values: [ "arn:aws:logs:${" + - dataAwsRegionCurrent.name + + dataAwsRegionCurrent.region + "}:${" + current.accountId + "}:*", @@ -195,6 +195,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `logDestinationConfigs` - (Required) Configuration block that allows you to associate Amazon Kinesis Data Firehose, Cloudwatch Log log group, or S3 bucket Amazon Resource Names (ARNs) with the web ACL. **Note:** data firehose, log group, or bucket name **must** be prefixed with `aws-waf-logs-`, e.g. `aws-waf-logs-example-firehose`, `aws-waf-logs-example-log-group`, or `aws-waf-logs-example-bucket`. * `loggingFilter` - (Optional) Configuration block that specifies which web requests are kept in the logs and which are dropped. It allows filtering based on the rule action and the web request labels applied by matching rules during web ACL evaluation. For more details, refer to the [Logging Filter](#logging-filter) section below. * `redactedFields` - (Optional) Configuration for parts of the request that you want to keep out of the logs. Up to 100 `redactedFields` blocks are supported. See [Redacted Fields](#redacted-fields) below for more details. @@ -293,4 +294,4 @@ Using `terraform import`, import WAFv2 Web ACL Logging Configurations using the % terraform import aws_wafv2_web_acl_logging_configuration.example arn:aws:wafv2:us-west-2:123456789012:regional/webacl/test-logs/a1b2c3d4-5678-90ab-cdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl_rule_group_association.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl_rule_group_association.html.markdown new file mode 100644 index 000000000000..7598cc13ce33 --- /dev/null +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl_rule_group_association.html.markdown @@ -0,0 +1,663 @@ +--- +subcategory: "WAF" +layout: "aws" +page_title: "AWS: aws_wafv2_web_acl_rule_group_association" +description: |- + Associates a WAFv2 Rule Group with a Web ACL by adding a rule that references the Rule Group. +--- + + + +# Resource: aws_wafv2_web_acl_rule_group_association + +Associates a WAFv2 Rule Group (custom or managed) with a Web ACL by adding a rule that references the Rule Group. Use this resource to apply the rules defined in a Rule Group to a Web ACL without duplicating rule definitions. + +This resource supports both: + +- **Custom Rule Groups**: User-created rule groups that you manage within your AWS account +- **Managed Rule Groups**: Pre-configured rule groups provided by AWS or third-party vendors + +!> **Warning:** Verify the rule names in your `ruleActionOverride`s carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group. + +!> **Warning:** Using this resource will cause the associated Web ACL resource to show configuration drift in the `rule` argument unless you add `lifecycle { ignore_changes = [rule] }` to the Web ACL resource configuration. This is because this resource modifies the Web ACL's rules outside of the Web ACL resource's direct management. + +~> **Note:** This resource creates a rule within the Web ACL that references the entire Rule Group. The rule group's individual rules are evaluated as a unit when requests are processed by the Web ACL. + +## Example Usage + +### Custom Rule Group - Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2RuleGroup } from "./.gen/providers/aws/wafv2-rule-group"; +import { Wafv2WebAcl } from "./.gen/providers/aws/wafv2-web-acl"; +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new Wafv2RuleGroup(this, "example", { + capacity: 10, + name: "example-rule-group", + rule: [ + { + action: { + block: {}, + }, + name: "block-suspicious-requests", + priority: 1, + statement: { + geoMatchStatement: { + countryCodes: ["CN", "RU"], + }, + }, + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "block-suspicious-requests", + sampledRequestsEnabled: true, + }, + }, + ], + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "example-rule-group", + sampledRequestsEnabled: true, + }, + }); + const awsWafv2WebAclExample = new Wafv2WebAcl(this, "example_1", { + defaultAction: { + allow: {}, + }, + lifecycle: { + ignoreChanges: [rule], + }, + name: "example-web-acl", + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "example-web-acl", + sampledRequestsEnabled: true, + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclExample.overrideLogicalId("example"); + const awsWafv2WebAclRuleGroupAssociationExample = + new Wafv2WebAclRuleGroupAssociation(this, "example_2", { + priority: 100, + ruleGroupReference: [ + { + arn: example.arn, + }, + ], + ruleName: "example-rule-group-rule", + webAclArn: Token.asString(awsWafv2WebAclExample.arn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclRuleGroupAssociationExample.overrideLogicalId("example"); + } +} + +``` + +### Managed Rule Group - Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2WebAcl } from "./.gen/providers/aws/wafv2-web-acl"; +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new Wafv2WebAcl(this, "example", { + defaultAction: { + allow: {}, + }, + lifecycle: { + ignoreChanges: [rule], + }, + name: "example-web-acl", + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "example-web-acl", + sampledRequestsEnabled: true, + }, + }); + new Wafv2WebAclRuleGroupAssociation(this, "managed_example", { + managedRuleGroup: [ + { + name: "AWSManagedRulesCommonRuleSet", + vendorName: "AWS", + }, + ], + priority: 50, + ruleName: "aws-common-rule-set", + webAclArn: example.arn, + }); + } +} + +``` + +### Managed Rule Group - With Version + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new Wafv2WebAclRuleGroupAssociation(this, "managed_versioned", { + managedRuleGroup: [ + { + name: "AWSManagedRulesCommonRuleSet", + vendorName: "AWS", + version: "Version_1.0", + }, + ], + priority: 60, + ruleName: "aws-common-rule-set-versioned", + webAclArn: example.arn, + }); + } +} + +``` + +### Managed Rule Group - With Rule Action Overrides + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new Wafv2WebAclRuleGroupAssociation(this, "managed_with_overrides", { + managedRuleGroup: [ + { + name: "AWSManagedRulesCommonRuleSet", + ruleActionOverride: [ + { + actionToUse: [ + { + count: [ + { + customRequestHandling: [ + { + insertHeader: [ + { + name: "X-RFI-Override", + value: "counted", + }, + ], + }, + ], + }, + ], + }, + ], + name: "GenericRFI_BODY", + }, + { + actionToUse: [ + { + captcha: [{}], + }, + ], + name: "SizeRestrictions_BODY", + }, + ], + vendorName: "AWS", + }, + ], + priority: 70, + ruleName: "aws-common-rule-set-with-overrides", + webAclArn: example.arn, + }); + } +} + +``` + +### Custom Rule Group - With Override Action + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new Wafv2WebAclRuleGroupAssociation(this, "example", { + overrideAction: "count", + priority: 100, + ruleGroupReference: [ + { + arn: Token.asString(awsWafv2RuleGroupExample.arn), + }, + ], + ruleName: "example-rule-group-rule", + webAclArn: Token.asString(awsWafv2WebAclExample.arn), + }); + } +} + +``` + +### Custom Rule Group - With Rule Action Overrides + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2RuleGroup } from "./.gen/providers/aws/wafv2-rule-group"; +import { Wafv2WebAcl } from "./.gen/providers/aws/wafv2-web-acl"; +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new Wafv2RuleGroup(this, "example", { + capacity: 10, + name: "example-rule-group", + rule: [ + { + action: { + block: {}, + }, + name: "geo-block-rule", + priority: 1, + statement: { + geoMatchStatement: { + countryCodes: ["CN", "RU"], + }, + }, + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "geo-block-rule", + sampledRequestsEnabled: true, + }, + }, + { + action: { + block: {}, + }, + name: "rate-limit-rule", + priority: 2, + statement: { + rateBasedStatement: { + aggregateKeyType: "IP", + limit: 1000, + }, + }, + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "rate-limit-rule", + sampledRequestsEnabled: true, + }, + }, + ], + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "example-rule-group", + sampledRequestsEnabled: true, + }, + }); + const awsWafv2WebAclExample = new Wafv2WebAcl(this, "example_1", { + defaultAction: { + allow: {}, + }, + lifecycle: { + ignoreChanges: [rule], + }, + name: "example-web-acl", + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "example-web-acl", + sampledRequestsEnabled: true, + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclExample.overrideLogicalId("example"); + const awsWafv2WebAclRuleGroupAssociationExample = + new Wafv2WebAclRuleGroupAssociation(this, "example_2", { + priority: 100, + ruleGroupReference: [ + { + arn: example.arn, + ruleActionOverride: [ + { + actionToUse: [ + { + count: [ + { + customRequestHandling: [ + { + insertHeader: [ + { + name: "X-Geo-Block-Override", + value: "counted", + }, + ], + }, + ], + }, + ], + }, + ], + name: "geo-block-rule", + }, + { + actionToUse: [ + { + captcha: [ + { + customRequestHandling: [ + { + insertHeader: [ + { + name: "X-Rate-Limit-Override", + value: "captcha-required", + }, + ], + }, + ], + }, + ], + }, + ], + name: "rate-limit-rule", + }, + ], + }, + ], + ruleName: "example-rule-group-rule", + webAclArn: Token.asString(awsWafv2WebAclExample.arn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclRuleGroupAssociationExample.overrideLogicalId("example"); + } +} + +``` + +### Custom Rule Group - CloudFront Web ACL + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2RuleGroup } from "./.gen/providers/aws/wafv2-rule-group"; +import { Wafv2WebAcl } from "./.gen/providers/aws/wafv2-web-acl"; +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const cloudfrontExample = new Wafv2RuleGroup(this, "cloudfront_example", { + capacity: 10, + name: "cloudfront-rule-group", + rule: [ + { + action: { + block: {}, + }, + name: "rate-limit", + priority: 1, + statement: { + rateBasedStatement: { + aggregateKeyType: "IP", + limit: 2000, + }, + }, + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "rate-limit", + sampledRequestsEnabled: true, + }, + }, + ], + scope: "CLOUDFRONT", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "cloudfront-rule-group", + sampledRequestsEnabled: true, + }, + }); + const awsWafv2WebAclCloudfrontExample = new Wafv2WebAcl( + this, + "cloudfront_example_1", + { + defaultAction: { + allow: {}, + }, + lifecycle: { + ignoreChanges: [rule], + }, + name: "cloudfront-web-acl", + scope: "CLOUDFRONT", + visibilityConfig: { + cloudwatchMetricsEnabled: true, + metricName: "cloudfront-web-acl", + sampledRequestsEnabled: true, + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclCloudfrontExample.overrideLogicalId("cloudfront_example"); + const awsWafv2WebAclRuleGroupAssociationCloudfrontExample = + new Wafv2WebAclRuleGroupAssociation(this, "cloudfront_example_2", { + priority: 50, + ruleGroupReference: [ + { + arn: cloudfrontExample.arn, + }, + ], + ruleName: "cloudfront-rule-group-rule", + webAclArn: Token.asString(awsWafv2WebAclCloudfrontExample.arn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclRuleGroupAssociationCloudfrontExample.overrideLogicalId( + "cloudfront_example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `ruleName` - (Required) Name of the rule to create in the Web ACL that references the rule group. Must be between 1 and 128 characters. +* `priority` - (Required) Priority of the rule within the Web ACL. Rules are evaluated in order of priority, with lower numbers evaluated first. +* `webAclArn` - (Required) ARN of the Web ACL to associate the Rule Group with. + +The following arguments are optional: + +* `managedRuleGroup` - (Optional) Managed Rule Group configuration. One of `ruleGroupReference` or `managedRuleGroup` is required. Conflicts with `ruleGroupReference`. [See below](#managed_rule_group). +* `overrideAction` - (Optional) Override action for the rule group. Valid values are `none` and `count`. Defaults to `none`. When set to `count`, the actions defined in the rule group rules are overridden to count matches instead of blocking or allowing requests. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `ruleGroupReference` - (Optional) Custom Rule Group reference configuration. One of `ruleGroupReference` or `managedRuleGroup` is required. Conflicts with `managedRuleGroup`. [See below](#rule_group_reference). + +### rule_group_reference + +* `arn` - (Required) ARN of the Rule Group to associate with the Web ACL. +* `ruleActionOverride` - (Optional) Override actions for specific rules within the rule group. [See below](#rule_action_override). + +### managed_rule_group + +* `name` - (Required) Name of the managed rule group. +* `vendorName` - (Required) Name of the managed rule group vendor. For AWS managed rule groups, this is `AWS`. +* `version` - (Optional) Version of the managed rule group. If not specified, the default version is used. +* `ruleActionOverride` - (Optional) Override actions for specific rules within the rule group. [See below](#rule_action_override). + +### rule_action_override + +* `name` - (Required) Name of the rule to override within the rule group. Verify the name carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group. +* `actionToUse` - (Required) Action to use instead of the rule's original action. [See below](#action_to_use). + +### action_to_use + +Exactly one of the following action blocks must be specified: + +* `allow` - (Optional) Allow the request. [See below](#allow). +* `block` - (Optional) Block the request. [See below](#block). +* `captcha` - (Optional) Require CAPTCHA verification. [See below](#captcha). +* `challenge` - (Optional) Require challenge verification. [See below](#challenge). +* `count` - (Optional) Count the request without taking action. [See below](#count). + +### allow + +* `customRequestHandling` - (Optional) Custom handling for allowed requests. [See below](#custom_request_handling). + +### block + +* `customResponse` - (Optional) Custom response for blocked requests. [See below](#custom_response). + +### captcha + +* `customRequestHandling` - (Optional) Custom handling for CAPTCHA requests. [See below](#custom_request_handling). + +### challenge + +* `customRequestHandling` - (Optional) Custom handling for challenge requests. [See below](#custom_request_handling). + +### count + +* `customRequestHandling` - (Optional) Custom handling for counted requests. [See below](#custom_request_handling). + +### custom_request_handling + +* `insertHeader` - (Required) Headers to insert into the request. [See below](#insert_header). + +### custom_response + +* `customResponseBodyKey` - (Optional) Key of a custom response body to use. +* `responseCode` - (Required) HTTP response code to return (200-599). +* `responseHeader` - (Optional) Headers to include in the response. [See below](#response_header). + +### insert_header + +* `name` - (Required) Name of the header to insert. +* `value` - (Required) Value of the header to insert. + +### response_header + +* `name` - (Required) Name of the response header. +* `value` - (Required) Value of the response header. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +None. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WAFv2 web ACL custom rule group associations using `WebACLARN,RuleGroupARN,RuleName`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + Wafv2WebAclRuleGroupAssociation.generateConfigForImport( + this, + "example", + "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/example-rule-group/87654321-4321-4321-4321-210987654321,example-rule-group-rule" + ); + } +} + +``` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WAFv2 web ACL managed rule group associations using `WebACLARN,VendorName:RuleGroupName[:Version],RuleName`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { Wafv2WebAclRuleGroupAssociation } from "./.gen/providers/aws/wafv2-web-acl-rule-group-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + Wafv2WebAclRuleGroupAssociation.generateConfigForImport( + this, + "managedExample", + "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,AWS:AWSManagedRulesCommonRuleSet,aws-common-rule-set" + ); + } +} + +``` + +Using `terraform import`, import WAFv2 web ACL custom rule group associations using `WebACLARN,RuleGroupARN,RuleName`. For example: + +```console +% terraform import aws_wafv2_web_acl_rule_group_association.example "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/example-rule-group/87654321-4321-4321-4321-210987654321,example-rule-group-rule" +``` + +Using `terraform import`, import WAFv2 web ACL managed rule group associations using `WebACLARN,VendorName:RuleGroupName[:Version],RuleName`. For example: + +```console +% terraform import aws_wafv2_web_acl_rule_group_association.managed_example "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,AWS:AWSManagedRulesCommonRuleSet,aws-common-rule-set" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspaces_connection_alias.html.markdown b/website/docs/cdktf/typescript/r/workspaces_connection_alias.html.markdown index 1239a8aae868..2ca13e874ab9 100644 --- a/website/docs/cdktf/typescript/r/workspaces_connection_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/workspaces_connection_alias.html.markdown @@ -38,10 +38,11 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `connectionString` - (Required) The connection string specified for the connection alias. The connection string must be in the form of a fully qualified domain name (FQDN), such as www.example.com. -* `tags` – (Optional) A map of tags assigned to the WorkSpaces Connection Alias. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) A map of tags assigned to the WorkSpaces Connection Alias. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -92,4 +93,4 @@ Using `terraform import`, import WorkSpaces Connection Alias using the connectio % terraform import aws_workspaces_connection_alias.example rft-8012925589 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspaces_directory.html.markdown b/website/docs/cdktf/typescript/r/workspaces_directory.html.markdown index af2b4ef3fb41..d948c220338c 100644 --- a/website/docs/cdktf/typescript/r/workspaces_directory.html.markdown +++ b/website/docs/cdktf/typescript/r/workspaces_directory.html.markdown @@ -171,26 +171,23 @@ import { Token, TerraformStack } from "cdktf"; * See https://cdk.tf/provider-generation for more details. */ import { WorkspacesDirectory } from "./.gen/providers/aws/workspaces-directory"; -interface MyConfig { - directoryId: any; -} class MyConvertedCode extends TerraformStack { - constructor(scope: Construct, name: string, config: MyConfig) { + constructor(scope: Construct, name: string) { super(scope, name); new WorkspacesDirectory(this, "example", { - active_directory_config: [ - { - domain_name: "example.internal", - service_account_secret_arn: awsSecretsmanagerSecretExample.arn, - }, - ], + activeDirectoryConfig: { + domainName: "example.internal", + serviceAccountSecretArn: Token.asString( + awsSecretsmanagerSecretExample.arn + ), + }, samlProperties: { relayStateParameterName: "RelayState", status: "ENABLED", userAccessUrl: "https://sso.example.com/", }, subnetIds: [exampleC.id, exampleD.id], - user_identity_type: "CUSTOMER_MANAGED", + userIdentityType: "CUSTOMER_MANAGED", workspaceAccessProperties: { deviceTypeAndroid: "ALLOW", deviceTypeChromeos: "ALLOW", @@ -206,10 +203,9 @@ class MyConvertedCode extends TerraformStack { defaultOu: "OU=AWS,DC=Workgroup,DC=Example,DC=com", enableInternetAccess: true, }, - workspace_directory_description: "WorkSpaces Pools directory", - workspace_directory_name: "Pool directory", - workspace_type: "POOLS", - directoryId: config.directoryId, + workspaceDirectoryDescription: "WorkSpaces Pools directory", + workspaceDirectoryName: "Pool directory", + workspaceType: "POOLS", }); } } @@ -253,22 +249,23 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryId` - (Optional) The directory identifier for registration in WorkSpaces service. * `subnetIds` - (Optional) The identifiers of the subnets where the directory resides. -* `ipGroupIds` – (Optional) The identifiers of the IP access control groups associated with the directory. -* `tags` – (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `ipGroupIds` - (Optional) The identifiers of the IP access control groups associated with the directory. +* `tags` - (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `certificateBasedAuthProperties` - (Optional) Configuration of certificate-based authentication (CBA) integration. Requires SAML authentication to be enabled. Defined below. -* `samlProperties` – (Optional) Configuration of SAML authentication integration. Defined below. -* `selfServicePermissions` – (Optional) Permissions to enable or disable self-service capabilities when `workspace_type` is set to `PERSONAL`.. Defined below. -* `workspaceAccessProperties` – (Optional) Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. -* `workspaceCreationProperties` – (Optional) Default properties that are used for creating WorkSpaces. Defined below. -* `workspace_type` - (Optional) Specifies the type of WorkSpaces directory. Valid values are `PERSONAL` and `POOLS`. Default is `PERSONAL`. -* `active_directory_config` – (Optional) Configuration for Active Directory integration when `workspace_type` is set to `POOLS`. Defined below. -* `workspace_directory_name` - (Required for `POOLS`) The name of the WorkSpaces directory when `workspace_type` is set to `POOLS`. -* `workspace_directory_description` - (Required for `POOLS`) The description of the WorkSpaces directory when `workspace_type` is set to `POOLS`. -* `user_identity_type` - (Required for `POOLS`) Specifies the user identity type for the WorkSpaces directory. Valid values are `CUSTOMER_MANAGED`, `AWS_DIRECTORY_SERVICE`, `AWS_IAM_IDENTITY_CENTER`. - --> **Note:** When `workspace_type` is set to `POOLS`, the `directoryId` is automatically generated and cannot be manually set. +* `samlProperties` - (Optional) Configuration of SAML authentication integration. Defined below. +* `selfServicePermissions` - (Optional) Permissions to enable or disable self-service capabilities when `workspaceType` is set to `PERSONAL`.. Defined below. +* `workspaceAccessProperties` - (Optional) Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. +* `workspaceCreationProperties` - (Optional) Default properties that are used for creating WorkSpaces. Defined below. +* `workspaceType` - (Optional) Specifies the type of WorkSpaces directory. Valid values are `PERSONAL` and `POOLS`. Default is `PERSONAL`. +* `activeDirectoryConfig` - (Optional) Configuration for Active Directory integration when `workspaceType` is set to `POOLS`. Defined below. +* `workspaceDirectoryName` - (Required for `POOLS`) The name of the WorkSpaces directory when `workspaceType` is set to `POOLS`. +* `workspaceDirectoryDescription` - (Required for `POOLS`) The description of the WorkSpaces directory when `workspaceType` is set to `POOLS`. +* `userIdentityType` - (Required for `POOLS`) Specifies the user identity type for the WorkSpaces directory. Valid values are `CUSTOMER_MANAGED`, `AWS_DIRECTORY_SERVICE`, `AWS_IAM_IDENTITY_CENTER`. + +-> **Note:** When `workspaceType` is set to `POOLS`, the `directoryId` is automatically generated and cannot be manually set. ### certificate_based_auth_properties @@ -283,39 +280,39 @@ This resource supports the following arguments: ### self_service_permissions -* `changeComputeType` – (Optional) Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default `false`. -* `increaseVolumeSize` – (Optional) Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default `false`. -* `rebuildWorkspace` – (Optional) Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default `false`. -* `restartWorkspace` – (Optional) Whether WorkSpaces directory users can restart their workspace. Default `true`. -* `switchRunningMode` – (Optional) Whether WorkSpaces directory users can switch the running mode of their workspace. Default `false`. +* `changeComputeType` - (Optional) Whether WorkSpaces directory users can change the compute type (bundle) for their workspace. Default `false`. +* `increaseVolumeSize` - (Optional) Whether WorkSpaces directory users can increase the volume size of the drives on their workspace. Default `false`. +* `rebuildWorkspace` - (Optional) Whether WorkSpaces directory users can rebuild the operating system of a workspace to its original state. Default `false`. +* `restartWorkspace` - (Optional) Whether WorkSpaces directory users can restart their workspace. Default `true`. +* `switchRunningMode` - (Optional) Whether WorkSpaces directory users can switch the running mode of their workspace. Default `false`. ### workspace_access_properties -* `deviceTypeAndroid` – (Optional) Indicates whether users can use Android devices to access their WorkSpaces. -* `deviceTypeChromeos` – (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. -* `deviceTypeIos` – (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. -* `deviceTypeLinux` – (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. -* `deviceTypeOsx` – (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. -* `deviceTypeWeb` – (Optional) Indicates whether users can access their WorkSpaces through a web browser. -* `deviceTypeWindows` – (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. -* `deviceTypeZeroclient` – (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. +* `deviceTypeAndroid` - (Optional) Indicates whether users can use Android devices to access their WorkSpaces. +* `deviceTypeChromeos` - (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. +* `deviceTypeIos` - (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. +* `deviceTypeLinux` - (Optional) Indicates whether users can use Linux clients to access their WorkSpaces. +* `deviceTypeOsx` - (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. +* `deviceTypeWeb` - (Optional) Indicates whether users can access their WorkSpaces through a web browser. +* `deviceTypeWindows` - (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. +* `deviceTypeZeroclient` - (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. ### workspace_creation_properties -> **Note:** Once you specified `customSecurityGroupId` or `defaultOu`, there is no way to delete these attributes. If you cleanup them from the configuration, they still be present in state. -* `customSecurityGroupId` – (Optional) The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. -* `defaultOu` – (Optional) The default organizational unit (OU) for your WorkSpace directories. Should conform `"OU=,DC=,...,DC="` pattern. -* `enableInternetAccess` – (Optional) Indicates whether internet access is enabled for your WorkSpaces. -* `enableMaintenanceMode` – (Optional) Indicates whether maintenance mode is enabled for your WorkSpaces. Valid only if `workspace_type` is set to `PERSONAL`. -* `userEnabledAsLocalAdministrator` – (Optional) Indicates whether users are local administrators of their WorkSpaces. Valid only if `workspace_type` is set to `PERSONAL`. +* `customSecurityGroupId` - (Optional) The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. +* `defaultOu` - (Optional) The default organizational unit (OU) for your WorkSpace directories. Should conform `"OU=,DC=,...,DC="` pattern. +* `enableInternetAccess` - (Optional) Indicates whether internet access is enabled for your WorkSpaces. +* `enableMaintenanceMode` - (Optional) Indicates whether maintenance mode is enabled for your WorkSpaces. Valid only if `workspaceType` is set to `PERSONAL`. +* `userEnabledAsLocalAdministrator` - (Optional) Indicates whether users are local administrators of their WorkSpaces. Valid only if `workspaceType` is set to `PERSONAL`. ### active_directory_config --> **Note:** `active_directory_config` is only valid if `workspaces_type` is set to `POOLS`. +-> **Note:** `activeDirectoryConfig` is only valid if `workspaces_type` is set to `POOLS`. -* `domainName` – Fully qualified domain name of the AWS Directory Service directory. -* `service_account_secret_arn` – ARN of the Secrets Manager secret that contains the credentials for the service account. For more information, see [Service Account Details](https://docs.aws.amazon.com/workspaces/latest/adminguide/pools-service-account-details.html). +* `domainName` - Fully qualified domain name of the AWS Directory Service directory. +* `serviceAccountSecretArn` - ARN of the Secrets Manager secret that contains the credentials for the service account. For more information, see [Service Account Details](https://docs.aws.amazon.com/workspaces/latest/adminguide/pools-service-account-details.html). ## Attribute Reference @@ -361,4 +358,4 @@ Using `terraform import`, import Workspaces directory using the directory ID. Fo % terraform import aws_workspaces_directory.main d-4444444444 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspaces_ip_group.html.markdown b/website/docs/cdktf/typescript/r/workspaces_ip_group.html.markdown index 91e0bda8176c..98af26cf1164 100644 --- a/website/docs/cdktf/typescript/r/workspaces_ip_group.html.markdown +++ b/website/docs/cdktf/typescript/r/workspaces_ip_group.html.markdown @@ -53,10 +53,11 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The name of the IP group. * `description` - (Optional) The description of the IP group. * `rules` - (Optional) One or more pairs specifying the IP group rule (in CIDR format) from which web requests originate. -* `tags` – (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tags` - (Optional) A map of tags assigned to the WorkSpaces directory. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Nested Blocks @@ -106,4 +107,4 @@ Using `terraform import`, import WorkSpaces IP groups using their GroupID. For e % terraform import aws_workspaces_ip_group.example wsipg-488lrtl3k ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspaces_workspace.html.markdown b/website/docs/cdktf/typescript/r/workspaces_workspace.html.markdown index 04e7440b2dd7..85ceb37aa557 100644 --- a/website/docs/cdktf/typescript/r/workspaces_workspace.html.markdown +++ b/website/docs/cdktf/typescript/r/workspaces_workspace.html.markdown @@ -67,22 +67,23 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `directoryId` - (Required) The ID of the directory for the WorkSpace. * `bundleId` - (Required) The ID of the bundle for the WorkSpace. -* `userName` – (Required) The user name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. +* `userName` - (Required) The user name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. * `rootVolumeEncryptionEnabled` - (Optional) Indicates whether the data stored on the root volume is encrypted. -* `userVolumeEncryptionEnabled` – (Optional) Indicates whether the data stored on the user volume is encrypted. -* `volumeEncryptionKey` – (Optional) The ARN of a symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. +* `userVolumeEncryptionEnabled` - (Optional) Indicates whether the data stored on the user volume is encrypted. +* `volumeEncryptionKey` - (Optional) The ARN of a symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs. * `tags` - (Optional) The tags for the WorkSpace. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `workspaceProperties` – (Optional) The WorkSpace properties. +* `workspaceProperties` - (Optional) The WorkSpace properties. `workspaceProperties` supports the following: -* `computeTypeName` – (Optional) The compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO`, `GRAPHICSPRO`, `GRAPHICS_G4DN`, and `GRAPHICSPRO_G4DN`. -* `rootVolumeSizeGib` – (Optional) The size of the root volume. -* `runningMode` – (Optional) The running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. -* `runningModeAutoStopTimeoutInMinutes` – (Optional) The time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. -* `userVolumeSizeGib` – (Optional) The size of the user storage. +* `computeTypeName` - (Optional) The compute type. For more information, see [Amazon WorkSpaces Bundles](http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). Valid values are `VALUE`, `STANDARD`, `PERFORMANCE`, `POWER`, `GRAPHICS`, `POWERPRO`, `GRAPHICSPRO`, `GRAPHICS_G4DN`, and `GRAPHICSPRO_G4DN`. +* `rootVolumeSizeGib` - (Optional) The size of the root volume. +* `runningMode` - (Optional) The running mode. For more information, see [Manage the WorkSpace Running Mode](https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html). Valid values are `AUTO_STOP` and `ALWAYS_ON`. +* `runningModeAutoStopTimeoutInMinutes` - (Optional) The time after a user logs off when WorkSpaces are automatically stopped. Configured in 60-minute intervals. +* `userVolumeSizeGib` - (Optional) The size of the user storage. ## Attribute Reference @@ -134,4 +135,4 @@ Using `terraform import`, import Workspaces using their ID. For example: % terraform import aws_workspaces_workspace.example ws-9z9zmbkhv ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_browser_settings.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_browser_settings.html.markdown index 78c3f079e0c8..ff4ab8f216f3 100644 --- a/website/docs/cdktf/typescript/r/workspacesweb_browser_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/workspacesweb_browser_settings.html.markdown @@ -97,6 +97,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additionalEncryptionContext` - (Optional) Additional encryption context for the browser settings. * `customerManagedKey` - (Optional) ARN of the customer managed KMS key. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -141,4 +142,4 @@ Using `terraform import`, import WorkSpaces Web Browser Settings using the `brow % terraform import aws_workspacesweb_browser_settings.example arn:aws:workspacesweb:us-west-2:123456789012:browsersettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_browser_settings_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_browser_settings_association.html.markdown new file mode 100644 index 000000000000..86cccd9e828e --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_browser_settings_association.html.markdown @@ -0,0 +1,114 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_browser_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Browser Settings Association. +--- + + + +# Resource: aws_workspacesweb_browser_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Browser Settings Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebBrowserSettings } from "./.gen/providers/aws/workspacesweb-browser-settings"; +import { WorkspaceswebBrowserSettingsAssociation } from "./.gen/providers/aws/workspacesweb-browser-settings-association"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new WorkspaceswebBrowserSettings(this, "example", { + browserPolicy: Token.asString( + Fn.jsonencode({ + chromePolicies: { + DefaultDownloadDirectory: { + value: "/home/as2-streaming-user/MyFiles/TemporaryFiles1", + }, + }, + }) + ), + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + displayName: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + const awsWorkspaceswebBrowserSettingsAssociationExample = + new WorkspaceswebBrowserSettingsAssociation(this, "example_2", { + browserSettingsArn: example.browserSettingsArn, + portalArn: Token.asString(awsWorkspaceswebPortalExample.portalArn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebBrowserSettingsAssociationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `browserSettingsArn` - (Required) ARN of the browser settings to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the browser settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Browser Settings Association using the `browser_settings_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebBrowserSettingsAssociation } from "./.gen/providers/aws/workspacesweb-browser-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebBrowserSettingsAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/browser_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Browser Settings Association using the `browser_settings_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_browser_settings_association.example arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/browser_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings.html.markdown index 75259faf786e..b368908ca319 100644 --- a/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings.html.markdown @@ -24,12 +24,12 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/workspacesweb-data-protection-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new WorkspaceswebDataProtectionSettings(this, "example", { - display_name: "example", + displayName: "example", }); } } @@ -46,25 +46,25 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/workspacesweb-data-protection-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new WorkspaceswebDataProtectionSettings(this, "example", { description: "Example data protection settings", - display_name: "example", - inline_redaction_configuration: [ + displayName: "example", + inlineRedactionConfiguration: [ { - global_confidence_level: 2, - global_enforced_urls: ["https://example.com"], - inline_redaction_pattern: [ + globalConfidenceLevel: 2, + globalEnforcedUrls: ["https://example.com"], + inlineRedactionPattern: [ { - built_in_pattern_id: "ssn", - confidence_level: 3, - redaction_place_holder: [ + builtInPatternId: "ssn", + confidenceLevel: 3, + redactionPlaceHolder: [ { - redaction_place_holder_text: "REDACTED", - redaction_place_holder_type: "CustomText", + redactionPlaceHolderText: "REDACTED", + redactionPlaceHolderType: "CustomText", }, ], }, @@ -141,48 +141,49 @@ The following arguments are optional: * `additionalEncryptionContext` - (Optional) Additional encryption context for the data protection settings. * `customerManagedKey` - (Optional) ARN of the customer managed KMS key. * `description` - (Optional) The description of the data protection settings. -* `inline_redaction_configuration` - (Optional) The inline redaction configuration of the data protection settings. Detailed below. +* `inlineRedactionConfiguration` - (Optional) The inline redaction configuration of the data protection settings. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### inline_redaction_configuration -* `global_confidence_level` - (Optional) The global confidence level for the inline redaction configuration. This indicates the certainty of data type matches in the redaction process. Values range from 1 (low confidence) to 3 (high confidence). -* `global_enforced_urls` - (Optional) The global enforced URL configuration for the inline redaction configuration. -* `global_exempt_urls` - (Optional) The global exempt URL configuration for the inline redaction configuration. -* `inline_redaction_pattern` - (Optional) The inline redaction patterns to be enabled for the inline redaction configuration. Detailed below. +* `globalConfidenceLevel` - (Optional) The global confidence level for the inline redaction configuration. This indicates the certainty of data type matches in the redaction process. Values range from 1 (low confidence) to 3 (high confidence). +* `globalEnforcedUrls` - (Optional) The global enforced URL configuration for the inline redaction configuration. +* `globalExemptUrls` - (Optional) The global exempt URL configuration for the inline redaction configuration. +* `inlineRedactionPattern` - (Optional) The inline redaction patterns to be enabled for the inline redaction configuration. Detailed below. ### inline_redaction_pattern -* `built_in_pattern_id` - (Optional) The built-in pattern from the list of preconfigured patterns. Either a `custom_pattern` or `built_in_pattern_id` is required. -* `confidence_level` - (Optional) The confidence level for inline redaction pattern. This indicates the certainty of data type matches in the redaction process. Values range from 1 (low confidence) to 3 (high confidence). -* `custom_pattern` - (Optional) The configuration for a custom pattern. Either a `custom_pattern` or `built_in_pattern_id` is required. Detailed below. -* `enforced_urls` - (Optional) The enforced URL configuration for the inline redaction pattern. -* `exempt_urls` - (Optional) The exempt URL configuration for the inline redaction pattern. -* `redaction_place_holder` - (Required) The redaction placeholder that will replace the redacted text in session. Detailed below. +* `builtInPatternId` - (Optional) The built-in pattern from the list of preconfigured patterns. Either a `customPattern` or `builtInPatternId` is required. +* `confidenceLevel` - (Optional) The confidence level for inline redaction pattern. This indicates the certainty of data type matches in the redaction process. Values range from 1 (low confidence) to 3 (high confidence). +* `customPattern` - (Optional) The configuration for a custom pattern. Either a `customPattern` or `builtInPatternId` is required. Detailed below. +* `enforcedUrls` - (Optional) The enforced URL configuration for the inline redaction pattern. +* `exemptUrls` - (Optional) The exempt URL configuration for the inline redaction pattern. +* `redactionPlaceHolder` - (Required) The redaction placeholder that will replace the redacted text in session. Detailed below. ### custom_pattern -* `pattern_name` - (Required) The pattern name for the custom pattern. -* `pattern_regex` - (Required) The pattern regex for the customer pattern. The format must follow JavaScript regex format. -* `keyword_regex` - (Optional) The keyword regex for the customer pattern. -* `pattern_description` - (Optional) The pattern description for the customer pattern. +* `patternName` - (Required) The pattern name for the custom pattern. +* `patternRegex` - (Required) The pattern regex for the customer pattern. The format must follow JavaScript regex format. +* `keywordRegex` - (Optional) The keyword regex for the customer pattern. +* `patternDescription` - (Optional) The pattern description for the customer pattern. ### redaction_place_holder -* `redaction_place_holder_type` - (Required) The redaction placeholder type that will replace the redacted text in session. Currently, only `CustomText` is supported. -* `redaction_place_holder_text` - (Optional) The redaction placeholder text that will replace the redacted text in session for the custom text redaction placeholder type. +* `redactionPlaceHolderType` - (Required) The redaction placeholder type that will replace the redacted text in session. Currently, only `CustomText` is supported. +* `redactionPlaceHolderText` - (Optional) The redaction placeholder text that will replace the redacted text in session for the custom text redaction placeholder type. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `data_protection_settings_arn` - ARN of the data protection settings resource. +* `dataProtectionSettingsArn` - ARN of the data protection settings resource. * `associatedPortalArns` - List of web portal ARNs that this data protection settings resource is associated with. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Data Protection Settings using the `data_protection_settings_arn`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Data Protection Settings using the `dataProtectionSettingsArn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -192,7 +193,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/workspacesweb-data-protection-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -206,10 +207,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import WorkSpaces Web Data Protection Settings using the `data_protection_settings_arn`. For example: +Using `terraform import`, import WorkSpaces Web Data Protection Settings using the `dataProtectionSettingsArn`. For example: ```console % terraform import aws_workspacesweb_data_protection_settings.example arn:aws:workspaces-web:us-west-2:123456789012:dataprotectionsettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings_association.html.markdown new file mode 100644 index 000000000000..5ad2f6955cf3 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_data_protection_settings_association.html.markdown @@ -0,0 +1,100 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_data_protection_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Data Protection Settings Association. +--- + + + +# Resource: aws_workspacesweb_data_protection_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Data Protection Settings Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebDataProtectionSettings } from "./.gen/providers/aws/workspacesweb-data-protection-settings"; +import { WorkspaceswebDataProtectionSettingsAssociation } from "./.gen/providers/aws/workspacesweb-data-protection-settings-association"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new WorkspaceswebDataProtectionSettings(this, "example", { + displayName: "example", + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + displayName: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + const awsWorkspaceswebDataProtectionSettingsAssociationExample = + new WorkspaceswebDataProtectionSettingsAssociation(this, "example_2", { + dataProtectionSettingsArn: example.dataProtectionSettingsArn, + portalArn: Token.asString(awsWorkspaceswebPortalExample.portalArn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebDataProtectionSettingsAssociationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `dataProtectionSettingsArn` - (Required) ARN of the data protection settings to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the data protection settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Data Protection Settings Association using the `data_protection_settings_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebDataProtectionSettingsAssociation } from "./.gen/providers/aws/workspacesweb-data-protection-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebDataProtectionSettingsAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:dataProtectionSettings/data_protection_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_identity_provider.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_identity_provider.html.markdown new file mode 100644 index 000000000000..3553c1002637 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_identity_provider.html.markdown @@ -0,0 +1,173 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_identity_provider" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Identity Provider. +--- + + + +# Resource: aws_workspacesweb_identity_provider + +Terraform resource for managing an AWS WorkSpaces Web Identity Provider. + +## Example Usage + +### Basic Usage with SAML + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebIdentityProvider } from "./.gen/providers/aws/workspacesweb-identity-provider"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new WorkspaceswebPortal(this, "example", { + displayName: "example", + }); + const awsWorkspaceswebIdentityProviderExample = + new WorkspaceswebIdentityProvider(this, "example_1", { + identityProviderDetails: { + MetadataURL: "https://example.com/metadata", + }, + identityProviderName: "example-saml", + identityProviderType: "SAML", + portalArn: example.portalArn, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebIdentityProviderExample.overrideLogicalId("example"); + } +} + +``` + +### OIDC Identity Provider + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebIdentityProvider } from "./.gen/providers/aws/workspacesweb-identity-provider"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const test = new WorkspaceswebPortal(this, "test", { + displayName: "test", + }); + const awsWorkspaceswebIdentityProviderTest = + new WorkspaceswebIdentityProvider(this, "test_1", { + identityProviderDetails: { + attributes_request_method: "POST", + authorize_scopes: "openid, email", + client_id: "test-client-id", + client_secret: "test-client-secret", + oidc_issuer: "https://accounts.google.com", + }, + identityProviderName: "test-updated", + identityProviderType: "OIDC", + portalArn: test.portalArn, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebIdentityProviderTest.overrideLogicalId("test"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `identityProviderDetails` - (Required) Identity provider details. The following list describes the provider detail keys for each identity provider type: + * For Google and Login with Amazon: + * `clientId` + * `clientSecret` + * `authorize_scopes` + * For Facebook: + * `clientId` + * `clientSecret` + * `authorize_scopes` + * `apiVersion` + * For Sign in with Apple: + * `clientId` + * `teamId` + * `keyId` + * `privateKey` + * `authorize_scopes` + * For OIDC providers: + * `clientId` + * `clientSecret` + * `attributes_request_method` + * `oidc_issuer` + * `authorize_scopes` + * `authorize_url` if not available from discovery URL specified by `oidc_issuer` key + * `tokenUrl` if not available from discovery URL specified by `oidc_issuer` key + * `attributes_url` if not available from discovery URL specified by `oidc_issuer` key + * `jwksUri` if not available from discovery URL specified by `oidc_issuer` key + * For SAML providers: + * `MetadataFile` OR `MetadataURL` + * `IDPSignout` (boolean) optional + * `IDPInit` (boolean) optional + * `RequestSigningAlgorithm` (string) optional - Only accepts rsa-sha256 + * `EncryptedResponses` (boolean) optional +* `identityProviderName` - (Required) Identity provider name. +* `identityProviderType` - (Required) Identity provider type. Valid values: `SAML`, `Facebook`, `Google`, `LoginWithAmazon`, `SignInWithApple`, `OIDC`. +* `portalArn` - (Required) ARN of the web portal. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `identityProviderArn` - ARN of the identity provider. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Identity Provider using the `identityProviderArn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebIdentityProvider } from "./.gen/providers/aws/workspacesweb-identity-provider"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebIdentityProvider.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:identityprovider/abcdef12345678/12345678-1234-1234-1234-123456789012" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Identity Provider using the `identityProviderArn`. For example: + +```console +% terraform import aws_workspacesweb_identity_provider.example arn:aws:workspaces-web:us-west-2:123456789012:identityprovider/abcdef12345678/12345678-1234-1234-1234-123456789012 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings.html.markdown index b07fe90c4623..04368fd4ed62 100644 --- a/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings.html.markdown @@ -24,15 +24,15 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/workspacesweb-ip-access-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new WorkspaceswebIpAccessSettings(this, "example", { - display_name: "example", - ip_rule: [ + displayName: "example", + ipRule: [ { - ip_range: "10.0.0.0/16", + ipRange: "10.0.0.0/16", }, ], }); @@ -51,21 +51,21 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/workspacesweb-ip-access-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new WorkspaceswebIpAccessSettings(this, "example", { description: "Example IP access settings", - display_name: "example", - ip_rule: [ + displayName: "example", + ipRule: [ { description: "Main office", - ip_range: "10.0.0.0/16", + ipRange: "10.0.0.0/16", }, { description: "Branch office", - ip_range: "192.168.0.0/24", + ipRange: "192.168.0.0/24", }, ], }); @@ -84,8 +84,8 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/"; import { KmsKey } from "./.gen/providers/aws/kms-key"; +import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/workspacesweb-ip-access-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -95,22 +95,20 @@ class MyConvertedCode extends TerraformStack { }); const awsWorkspaceswebIpAccessSettingsExample = new WorkspaceswebIpAccessSettings(this, "example_1", { - additional_encryption_context: [ - { - Environment: "Production", - }, - ], - customer_managed_key: example.arn, + additionalEncryptionContext: { + Environment: "Production", + }, + customerManagedKey: example.arn, description: "Example IP access settings", - display_name: "example", - ip_rule: [ + displayName: "example", + ipRule: [ { description: "Main office", - ip_range: "10.0.0.0/16", + ipRange: "10.0.0.0/16", }, { description: "Branch office", - ip_range: "192.168.0.0/24", + ipRange: "192.168.0.0/24", }, ], tags: { @@ -129,13 +127,14 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: * `displayName` - (Required) The display name of the IP access settings. -* `ip_rule` - (Required) The IP rules of the IP access settings. See [IP Rule](#ip-rules) below. +* `ipRule` - (Required) The IP rules of the IP access settings. See [IP Rule](#ip-rules) below. The following arguments are optional: * `additionalEncryptionContext` - (Optional) Additional encryption context for the IP access settings. * `customerManagedKey` - (Optional) ARN of the customer managed KMS key. * `description` - (Optional) The description of the IP access settings. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### IP Rules @@ -148,12 +147,12 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `associatedPortalArns` - List of web portal ARNs that this IP access settings resource is associated with. -* `ip_access_settings_arn` - ARN of the IP access settings resource. +* `ipAccessSettingsArn` - ARN of the IP access settings resource. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web IP Access Settings using the `ip_access_settings_arn`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web IP Access Settings using the `ipAccessSettingsArn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -163,7 +162,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/workspacesweb-ip-access-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -177,10 +176,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import WorkSpaces Web IP Access Settings using the `ip_access_settings_arn`. For example: +Using `terraform import`, import WorkSpaces Web IP Access Settings using the `ipAccessSettingsArn`. For example: ```console % terraform import aws_workspacesweb_ip_access_settings.example arn:aws:workspaces-web:us-west-2:123456789012:ipAccessSettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings_association.html.markdown new file mode 100644 index 000000000000..5e97cc87f4dd --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_ip_access_settings_association.html.markdown @@ -0,0 +1,105 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_ip_access_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web IP Access Settings Association. +--- + + + +# Resource: aws_workspacesweb_ip_access_settings_association + +Terraform resource for managing an AWS WorkSpaces Web IP Access Settings Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebIpAccessSettings } from "./.gen/providers/aws/workspacesweb-ip-access-settings"; +import { WorkspaceswebIpAccessSettingsAssociation } from "./.gen/providers/aws/workspacesweb-ip-access-settings-association"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new WorkspaceswebIpAccessSettings(this, "example", { + displayName: "example", + ipRule: [ + { + ipRange: "10.0.0.0/16", + }, + ], + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + displayName: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + const awsWorkspaceswebIpAccessSettingsAssociationExample = + new WorkspaceswebIpAccessSettingsAssociation(this, "example_2", { + ipAccessSettingsArn: example.ipAccessSettingsArn, + portalArn: Token.asString(awsWorkspaceswebPortalExample.portalArn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebIpAccessSettingsAssociationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `ipAccessSettingsArn` - (Required) ARN of the IP access settings to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the IP access settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web IP Access Settings Association using the `ip_access_settings_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebIpAccessSettingsAssociation } from "./.gen/providers/aws/workspacesweb-ip-access-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebIpAccessSettingsAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:ipAccessSettings/ip_access_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_network_settings.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_network_settings.html.markdown index 728e5d6ad8fb..09ca55066503 100644 --- a/website/docs/cdktf/typescript/r/workspacesweb_network_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/workspacesweb_network_settings.html.markdown @@ -90,6 +90,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -132,4 +133,4 @@ Using `terraform import`, import WorkSpaces Web Network Settings using the `netw % terraform import aws_workspacesweb_network_settings.example arn:aws:workspacesweb:us-west-2:123456789012:networksettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_network_settings_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_network_settings_association.html.markdown new file mode 100644 index 000000000000..eab09affcb23 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_network_settings_association.html.markdown @@ -0,0 +1,171 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_network_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Network Settings Association. +--- + + + +# Resource: aws_workspacesweb_network_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Network Settings Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformCount, Fn, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsAvailabilityZones } from "./.gen/providers/aws/data-aws-availability-zones"; +import { SecurityGroup } from "./.gen/providers/aws/security-group"; +import { Subnet } from "./.gen/providers/aws/subnet"; +import { Vpc } from "./.gen/providers/aws/vpc"; +import { WorkspaceswebNetworkSettings } from "./.gen/providers/aws/workspacesweb-network-settings"; +import { WorkspaceswebNetworkSettingsAssociation } from "./.gen/providers/aws/workspacesweb-network-settings-association"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new Vpc(this, "example", { + cidrBlock: "10.0.0.0/16", + tags: { + Name: "example", + }, + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + displayName: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + const available = new DataAwsAvailabilityZones(this, "available", { + filter: [ + { + name: "opt-in-status", + values: ["opt-in-not-required"], + }, + ], + state: "available", + }); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const exampleCount = TerraformCount.of(Token.asNumber("2")); + const awsSecurityGroupExample = new SecurityGroup(this, "example_3", { + name: "example-${" + exampleCount.index + "}", + tags: { + Name: "example", + }, + vpcId: example.id, + count: exampleCount, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsSecurityGroupExample.overrideLogicalId("example"); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const awsSubnetExampleCount = TerraformCount.of(Token.asNumber("2")); + const awsSubnetExample = new Subnet(this, "example_4", { + availabilityZone: Token.asString( + Fn.lookupNested(available.names, [awsSubnetExampleCount.index]) + ), + cidrBlock: Token.asString( + Fn.cidrsubnet( + example.cidrBlock, + 8, + Token.asNumber(awsSubnetExampleCount.index) + ) + ), + tags: { + Name: "example", + }, + vpcId: example.id, + count: awsSubnetExampleCount, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsSubnetExample.overrideLogicalId("example"); + const awsWorkspaceswebNetworkSettingsExample = + new WorkspaceswebNetworkSettings(this, "example_5", { + securityGroupIds: [ + Token.asString(Fn.lookupNested(awsSecurityGroupExample, ["0", "id"])), + Token.asString(Fn.lookupNested(awsSecurityGroupExample, ["1", "id"])), + ], + subnetIds: [ + Token.asString(Fn.lookupNested(awsSubnetExample, ["0", "id"])), + Token.asString(Fn.lookupNested(awsSubnetExample, ["1", "id"])), + ], + vpcId: example.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebNetworkSettingsExample.overrideLogicalId("example"); + const awsWorkspaceswebNetworkSettingsAssociationExample = + new WorkspaceswebNetworkSettingsAssociation(this, "example_6", { + networkSettingsArn: Token.asString( + awsWorkspaceswebNetworkSettingsExample.networkSettingsArn + ), + portalArn: Token.asString(awsWorkspaceswebPortalExample.portalArn), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebNetworkSettingsAssociationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `networkSettingsArn` - (Required) ARN of the network settings to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the network settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Network Settings Association using the `network_settings_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebNetworkSettingsAssociation } from "./.gen/providers/aws/workspacesweb-network-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebNetworkSettingsAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:networkSettings/network_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_portal.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_portal.html.markdown new file mode 100644 index 000000000000..3a6d45d94ef2 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_portal.html.markdown @@ -0,0 +1,164 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_portal" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Portal. +--- + + + +# Resource: aws_workspacesweb_portal + +Terraform resource for managing an AWS WorkSpaces Web Portal. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new WorkspaceswebPortal(this, "example", { + displayName: "example-portal", + instanceType: "standard.regular", + }); + } +} + +``` + +### Complete Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { KmsKey } from "./.gen/providers/aws/kms-key"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new KmsKey(this, "example", { + deletionWindowInDays: 7, + description: "KMS key for WorkSpaces Web Portal", + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + additionalEncryptionContext: { + Environment: "Production", + }, + authenticationType: "IAM_Identity_Center", + customerManagedKey: example.arn, + displayName: "example-portal", + instanceType: "standard.large", + maxConcurrentSessions: 10, + tags: { + Name: "example-portal", + }, + timeouts: [ + { + create: "10m", + delete: "10m", + update: "10m", + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are optional: + +* `additionalEncryptionContext` - (Optional) Additional encryption context for the customer managed key. Forces replacement if changed. +* `authenticationType` - (Optional) Authentication type for the portal. Valid values: `Standard`, `IAM_Identity_Center`. +* `browserSettingsArn` - (Optional) ARN of the browser settings to use for the portal. +* `customerManagedKey` - (Optional) ARN of the customer managed key. Forces replacement if changed. +* `displayName` - (Optional) Display name of the portal. +* `instanceType` - (Optional) Instance type for the portal. Valid values: `standard.regular`, `standard.large`. +* `maxConcurrentSessions` - (Optional) Maximum number of concurrent sessions for the portal. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `browserType` - Browser type of the portal. +* `creationDate` - Creation date of the portal. +* `dataProtectionSettingsArn` - ARN of the data protection settings associated with the portal. +* `ipAccessSettingsArn` - ARN of the IP access settings associated with the portal. +* `networkSettingsArn` - ARN of the network settings associated with the portal. +* `portalArn` - ARN of the portal. +* `portalEndpoint` - Endpoint URL of the portal. +* `portalStatus` - Status of the portal. +* `rendererType` - Renderer type of the portal. +* `sessionLoggerArn` - ARN of the session logger associated with the portal. +* `statusReason` - Reason for the current status of the portal. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `trustStoreArn` - ARN of the trust store associated with the portal. +* `userAccessLoggingSettingsArn` - ARN of the user access logging settings associated with the portal. +* `userSettingsArn` - ARN of the user settings associated with the portal. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Portal using the `portalArn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebPortal.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:portal/abcdef12345678" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Portal using the `portalArn`. For example: + +```console +% terraform import aws_workspacesweb_portal.example arn:aws:workspaces-web:us-west-2:123456789012:portal/abcdef12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_session_logger.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_session_logger.html.markdown new file mode 100644 index 000000000000..3e3e182279c8 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_session_logger.html.markdown @@ -0,0 +1,309 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_session_logger" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Session Logger. +--- + + + +# Resource: aws_workspacesweb_session_logger + +Terraform resource for managing an AWS WorkSpaces Web Session Logger. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; +import { WorkspaceswebSessionLogger } from "./.gen/providers/aws/workspacesweb-session-logger"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new S3Bucket(this, "example", { + bucket: "example-session-logs", + }); + const dataAwsIamPolicyDocumentExample = new DataAwsIamPolicyDocument( + this, + "example_1", + { + statement: [ + { + actions: ["s3:PutObject"], + effect: "Allow", + principals: [ + { + identifiers: ["workspaces-web.amazonaws.com"], + type: "Service", + }, + ], + resources: ["${" + example.arn + "}/*"], + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsIamPolicyDocumentExample.overrideLogicalId("example"); + const awsS3BucketPolicyExample = new S3BucketPolicy(this, "example_2", { + bucket: example.id, + policy: Token.asString(dataAwsIamPolicyDocumentExample.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyExample.overrideLogicalId("example"); + const awsWorkspaceswebSessionLoggerExample = new WorkspaceswebSessionLogger( + this, + "example_3", + { + dependsOn: [awsS3BucketPolicyExample], + displayName: "example-session-logger", + eventFilter: [ + { + all: [{}], + }, + ], + logConfiguration: [ + { + s3: [ + { + bucket: example.id, + folderStructure: "Flat", + logFileFormat: "Json", + }, + ], + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebSessionLoggerExample.overrideLogicalId("example"); + } +} + +``` + +### Complete Configuration with KMS Encryption + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsCallerIdentity } from "./.gen/providers/aws/data-aws-caller-identity"; +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { DataAwsPartition } from "./.gen/providers/aws/data-aws-partition"; +import { KmsKey } from "./.gen/providers/aws/kms-key"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; +import { WorkspaceswebSessionLogger } from "./.gen/providers/aws/workspacesweb-session-logger"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new S3Bucket(this, "example", { + bucket: "example-session-logs", + forceDestroy: true, + }); + const current = new DataAwsCallerIdentity(this, "current", {}); + const dataAwsIamPolicyDocumentExample = new DataAwsIamPolicyDocument( + this, + "example_2", + { + statement: [ + { + actions: ["s3:PutObject"], + effect: "Allow", + principals: [ + { + identifiers: ["workspaces-web.amazonaws.com"], + type: "Service", + }, + ], + resources: [example.arn, "${" + example.arn + "}/*"], + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsIamPolicyDocumentExample.overrideLogicalId("example"); + const dataAwsPartitionCurrent = new DataAwsPartition(this, "current_3", {}); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsPartitionCurrent.overrideLogicalId("current"); + const awsS3BucketPolicyExample = new S3BucketPolicy(this, "example_4", { + bucket: example.id, + policy: Token.asString(dataAwsIamPolicyDocumentExample.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyExample.overrideLogicalId("example"); + const kmsKeyPolicy = new DataAwsIamPolicyDocument(this, "kms_key_policy", { + statement: [ + { + actions: ["kms:*"], + principals: [ + { + identifiers: [ + "arn:${" + + dataAwsPartitionCurrent.partition + + "}:iam::${" + + current.accountId + + "}:root", + ], + type: "AWS", + }, + ], + resources: ["*"], + }, + { + actions: [ + "kms:Encrypt", + "kms:GenerateDataKey*", + "kms:ReEncrypt*", + "kms:Decrypt", + ], + principals: [ + { + identifiers: ["workspaces-web.amazonaws.com"], + type: "Service", + }, + ], + resources: ["*"], + }, + ], + }); + const awsKmsKeyExample = new KmsKey(this, "example_6", { + description: "KMS key for WorkSpaces Web Session Logger", + policy: Token.asString(kmsKeyPolicy.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsKmsKeyExample.overrideLogicalId("example"); + const awsWorkspaceswebSessionLoggerExample = new WorkspaceswebSessionLogger( + this, + "example_7", + { + additionalEncryptionContext: { + Application: "WorkSpacesWeb", + Environment: "Production", + }, + customerManagedKey: Token.asString(awsKmsKeyExample.arn), + dependsOn: [awsS3BucketPolicyExample, awsKmsKeyExample], + displayName: "example-session-logger", + eventFilter: [ + { + include: ["SessionStart", "SessionEnd"], + }, + ], + logConfiguration: [ + { + s3: [ + { + bucket: example.id, + bucketOwner: Token.asString(current.accountId), + folderStructure: "NestedByDate", + keyPrefix: "workspaces-web-logs/", + logFileFormat: "JsonLines", + }, + ], + }, + ], + tags: { + Environment: "Production", + Name: "example-session-logger", + }, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebSessionLoggerExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `eventFilter` - (Required) Event filter that determines which events are logged. See [Event Filter](#event-filter) below. +* `logConfiguration` - (Required) Configuration block for specifying where logs are delivered. See [Log Configuration](#log-configuration) below. + +The following arguments are optional: + +* `additionalEncryptionContext` - (Optional) Map of additional encryption context key-value pairs. +* `customerManagedKey` - (Optional) ARN of the customer managed KMS key used to encrypt sensitive information. +* `displayName` - (Optional) Human-readable display name for the session logger resource. Forces replacement if changed. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Log Configuration + +* `s3` - (Required) Configuration block for S3 log delivery. See [S3 Configuration](#s3-configuration) below. + +### Event Filter + +Exactly one of the following must be specified: + +* `all` - (Optional) Block that specifies to monitor all events. Set to `{}` to monitor all events. +* `include` - (Optional) List of specific events to monitor. Valid values include session events like `SessionStart`, `SessionEnd`, etc. + +### S3 Configuration + +* `bucket` - (Required) S3 bucket name where logs are delivered. +* `folderStructure` - (Required) Folder structure that defines the organizational structure for log files in S3. Valid values: `FlatStructure`, `DateBasedStructure`. +* `logFileFormat` - (Required) Format of the log file written to S3. Valid values: `Json`, `Parquet`. +* `bucketOwner` - (Optional) Expected bucket owner of the target S3 bucket. +* `keyPrefix` - (Optional) S3 path prefix that determines where log files are stored. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `associatedPortalArns` - List of ARNs of the web portals associated with the session logger. +* `sessionLoggerArn` - ARN of the session logger. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +~> **Note:** The `additionalEncryptionContext` and `customerManagedKey` attributes are computed when not specified and will be populated with values from the AWS API response. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Session Logger using the `sessionLoggerArn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebSessionLogger } from "./.gen/providers/aws/workspacesweb-session-logger"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebSessionLogger.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Session Logger using the `sessionLoggerArn`. For example: + +```console +% terraform import aws_workspacesweb_session_logger.example arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_session_logger_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_session_logger_association.html.markdown new file mode 100644 index 000000000000..6b8d467ef3a4 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_session_logger_association.html.markdown @@ -0,0 +1,165 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_session_logger_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Session Logger Association. +--- + + + +# Resource: aws_workspacesweb_session_logger_association + +Terraform resource for managing an AWS WorkSpaces Web Session Logger Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsIamPolicyDocument } from "./.gen/providers/aws/data-aws-iam-policy-document"; +import { S3Bucket } from "./.gen/providers/aws/s3-bucket"; +import { S3BucketPolicy } from "./.gen/providers/aws/s3-bucket-policy"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +import { WorkspaceswebSessionLogger } from "./.gen/providers/aws/workspacesweb-session-logger"; +import { WorkspaceswebSessionLoggerAssociation } from "./.gen/providers/aws/workspacesweb-session-logger-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new S3Bucket(this, "example", { + bucket: "example-session-logs", + forceDestroy: true, + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + displayName: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + const dataAwsIamPolicyDocumentExample = new DataAwsIamPolicyDocument( + this, + "example_2", + { + statement: [ + { + actions: ["s3:PutObject"], + effect: "Allow", + principals: [ + { + identifiers: ["workspaces-web.amazonaws.com"], + type: "Service", + }, + ], + resources: ["${" + example.arn + "}/*"], + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsIamPolicyDocumentExample.overrideLogicalId("example"); + const awsS3BucketPolicyExample = new S3BucketPolicy(this, "example_3", { + bucket: example.id, + policy: Token.asString(dataAwsIamPolicyDocumentExample.json), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsS3BucketPolicyExample.overrideLogicalId("example"); + const awsWorkspaceswebSessionLoggerExample = new WorkspaceswebSessionLogger( + this, + "example_4", + { + dependsOn: [awsS3BucketPolicyExample], + displayName: "example", + eventFilter: [ + { + all: [{}], + }, + ], + logConfiguration: [ + { + s3: [ + { + bucket: example.id, + folderStructure: "Flat", + logFileFormat: "Json", + }, + ], + }, + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebSessionLoggerExample.overrideLogicalId("example"); + const awsWorkspaceswebSessionLoggerAssociationExample = + new WorkspaceswebSessionLoggerAssociation(this, "example_5", { + portalArn: Token.asString(awsWorkspaceswebPortalExample.portalArn), + sessionLoggerArn: Token.asString( + awsWorkspaceswebSessionLoggerExample.sessionLoggerArn + ), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebSessionLoggerAssociationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `portalArn` - (Required) ARN of the web portal. +* `sessionLoggerArn` - (Required) ARN of the session logger. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Session Logger Association using the `session_logger_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebSessionLoggerAssociation } from "./.gen/providers/aws/workspacesweb-session-logger-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebSessionLoggerAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Session Logger Association using the `session_logger_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_session_logger_association.example arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_trust_store.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_trust_store.html.markdown new file mode 100644 index 000000000000..517b7f0a959f --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_trust_store.html.markdown @@ -0,0 +1,135 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_trust_store" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Trust Store. +--- + + + +# Resource: aws_workspacesweb_trust_store + +Terraform resource for managing an AWS WorkSpaces Web Trust Store. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebTrustStore } from "./.gen/providers/aws/workspacesweb-trust-store"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new WorkspaceswebTrustStore(this, "example", { + certificate: [ + { + body: Token.asString(Fn.file("certificate.pem")), + }, + ], + }); + } +} + +``` + +### Multiple Certificates + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebTrustStore } from "./.gen/providers/aws/workspacesweb-trust-store"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new WorkspaceswebTrustStore(this, "example", { + certificate: [ + { + body: Token.asString(Fn.file("certificate1.pem")), + }, + { + body: Token.asString(Fn.file("certificate2.pem")), + }, + ], + tags: { + Name: "example-trust-store", + }, + }); + } +} + +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `certificate` - (Optional) Set of certificates to include in the trust store. See [Certificate](#certificate) below. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Certificate + +* `body` - (Required) Certificate body in PEM format. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `associatedPortalArns` - List of ARNs of the web portals associated with the trust store. +* `trustStoreArn` - ARN of the trust store. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +The `certificate` block exports the following additional attributes: + +* `issuer` - Certificate issuer. +* `notValidAfter` - Date and time when the certificate expires in RFC3339 format. +* `notValidBefore` - Date and time when the certificate becomes valid in RFC3339 format. +* `subject` - Certificate subject. +* `thumbprint` - Certificate thumbprint. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Trust Store using the `trustStoreArn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebTrustStore } from "./.gen/providers/aws/workspacesweb-trust-store"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebTrustStore.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Trust Store using the `trustStoreArn`. For example: + +```console +% terraform import aws_workspacesweb_trust_store.example arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_trust_store_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_trust_store_association.html.markdown new file mode 100644 index 000000000000..678715521d1d --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_trust_store_association.html.markdown @@ -0,0 +1,108 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_trust_store_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Trust Store Association. +--- + + + +# Resource: aws_workspacesweb_trust_store_association + +Terraform resource for managing an AWS WorkSpaces Web Trust Store Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +import { WorkspaceswebTrustStore } from "./.gen/providers/aws/workspacesweb-trust-store"; +import { WorkspaceswebTrustStoreAssociation } from "./.gen/providers/aws/workspacesweb-trust-store-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new WorkspaceswebPortal(this, "example", { + displayName: "example", + }); + const awsWorkspaceswebTrustStoreExample = new WorkspaceswebTrustStore( + this, + "example_1", + { + certificate_list: [ + Fn.base64encode(Token.asString(Fn.file("certificate.pem"))), + ], + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebTrustStoreExample.overrideLogicalId("example"); + const awsWorkspaceswebTrustStoreAssociationExample = + new WorkspaceswebTrustStoreAssociation(this, "example_2", { + portalArn: example.portalArn, + trustStoreArn: Token.asString( + awsWorkspaceswebTrustStoreExample.trustStoreArn + ), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebTrustStoreAssociationExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `trustStoreArn` - (Required) ARN of the trust store to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the trust store. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Trust Store Association using the `trust_store_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebTrustStoreAssociation } from "./.gen/providers/aws/workspacesweb-trust-store-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebTrustStoreAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import WorkSpaces Web Trust Store Association using the `trust_store_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_trust_store_association.example arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings.html.markdown index 5b20aa3d2d49..bf5c59fe7a1b 100644 --- a/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings.html.markdown @@ -24,8 +24,8 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/"; import { KinesisStream } from "./.gen/providers/aws/kinesis-stream"; +import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/workspacesweb-user-access-logging-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -35,7 +35,7 @@ class MyConvertedCode extends TerraformStack { }); const awsWorkspaceswebUserAccessLoggingSettingsExample = new WorkspaceswebUserAccessLoggingSettings(this, "example_1", { - kinesis_stream_arn: example.arn, + kinesisStreamArn: example.arn, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsWorkspaceswebUserAccessLoggingSettingsExample.overrideLogicalId( @@ -56,8 +56,8 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/"; import { KinesisStream } from "./.gen/providers/aws/kinesis-stream"; +import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/workspacesweb-user-access-logging-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -67,7 +67,7 @@ class MyConvertedCode extends TerraformStack { }); const awsWorkspaceswebUserAccessLoggingSettingsExample = new WorkspaceswebUserAccessLoggingSettings(this, "example_1", { - kinesis_stream_arn: example.arn, + kinesisStreamArn: example.arn, tags: { Environment: "Production", Name: "example-user-access-logging-settings", @@ -90,6 +90,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -97,12 +98,12 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `associatedPortalArns` - List of web portal ARNs that this user access logging settings resource is associated with. -* `user_access_logging_settings_arn` - ARN of the user access logging settings resource. +* `userAccessLoggingSettingsArn` - ARN of the user access logging settings resource. * `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Access Logging Settings using the `user_access_logging_settings_arn`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Access Logging Settings using the `userAccessLoggingSettingsArn`. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -112,7 +113,7 @@ import { TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/"; +import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/workspacesweb-user-access-logging-settings"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); @@ -126,10 +127,10 @@ class MyConvertedCode extends TerraformStack { ``` -Using `terraform import`, import WorkSpaces Web User Access Logging Settings using the `user_access_logging_settings_arn`. For example: +Using `terraform import`, import WorkSpaces Web User Access Logging Settings using the `userAccessLoggingSettingsArn`. For example: ```console % terraform import aws_workspacesweb_user_access_logging_settings.example arn:aws:workspaces-web:us-west-2:123456789012:userAccessLoggingSettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings_association.html.markdown new file mode 100644 index 000000000000..5fa647393129 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_user_access_logging_settings_association.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_user_access_logging_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web User Access Logging Settings Association. +--- + + + +# Resource: aws_workspacesweb_user_access_logging_settings_association + +Terraform resource for managing an AWS WorkSpaces Web User Access Logging Settings Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { KinesisStream } from "./.gen/providers/aws/kinesis-stream"; +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +import { WorkspaceswebUserAccessLoggingSettings } from "./.gen/providers/aws/workspacesweb-user-access-logging-settings"; +import { WorkspaceswebUserAccessLoggingSettingsAssociation } from "./.gen/providers/aws/workspacesweb-user-access-logging-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new KinesisStream(this, "example", { + name: "amazon-workspaces-web-example", + shardCount: 1, + }); + const awsWorkspaceswebPortalExample = new WorkspaceswebPortal( + this, + "example_1", + { + displayName: "example", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebPortalExample.overrideLogicalId("example"); + const awsWorkspaceswebUserAccessLoggingSettingsExample = + new WorkspaceswebUserAccessLoggingSettings(this, "example_2", { + kinesisStreamArn: example.arn, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebUserAccessLoggingSettingsExample.overrideLogicalId( + "example" + ); + const awsWorkspaceswebUserAccessLoggingSettingsAssociationExample = + new WorkspaceswebUserAccessLoggingSettingsAssociation(this, "example_3", { + portalArn: Token.asString(awsWorkspaceswebPortalExample.portalArn), + userAccessLoggingSettingsArn: Token.asString( + awsWorkspaceswebUserAccessLoggingSettingsExample.userAccessLoggingSettingsArn + ), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebUserAccessLoggingSettingsAssociationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `userAccessLoggingSettingsArn` - (Required) ARN of the user access logging settings to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the user access logging settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Access Logging Settings Association using the `user_access_logging_settings_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebUserAccessLoggingSettingsAssociation } from "./.gen/providers/aws/workspacesweb-user-access-logging-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebUserAccessLoggingSettingsAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:userAccessLoggingSettings/user_access_logging_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_user_settings.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_user_settings.html.markdown index 46ae3438a1b8..e5cd02a11f16 100644 --- a/website/docs/cdktf/typescript/r/workspacesweb_user_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/workspacesweb_user_settings.html.markdown @@ -155,6 +155,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `additionalEncryptionContext` - (Optional) Additional encryption context for the user settings. * `associatedPortalArns` - (Optional) List of web portal ARNs to associate with the user settings. * `cookieSynchronizationConfiguration` - (Optional) Configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser. Detailed below. @@ -222,4 +223,4 @@ Using `terraform import`, import WorkSpaces Web User Settings using the `userSet % terraform import aws_workspacesweb_user_settings.example arn:aws:workspacesweb:us-west-2:123456789012:usersettings/abcdef12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/workspacesweb_user_settings_association.html.markdown b/website/docs/cdktf/typescript/r/workspacesweb_user_settings_association.html.markdown new file mode 100644 index 000000000000..396f6f63b0c7 --- /dev/null +++ b/website/docs/cdktf/typescript/r/workspacesweb_user_settings_association.html.markdown @@ -0,0 +1,104 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_user_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web User Settings Association. +--- + + + +# Resource: aws_workspacesweb_user_settings_association + +Terraform resource for managing an AWS WorkSpaces Web User Settings Association. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebPortal } from "./.gen/providers/aws/workspacesweb-portal"; +import { WorkspaceswebUserSettings } from "./.gen/providers/aws/workspacesweb-user-settings"; +import { WorkspaceswebUserSettingsAssociation } from "./.gen/providers/aws/workspacesweb-user-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new WorkspaceswebPortal(this, "example", { + displayName: "example", + }); + const awsWorkspaceswebUserSettingsExample = new WorkspaceswebUserSettings( + this, + "example_1", + { + copyAllowed: "Enabled", + downloadAllowed: "Enabled", + pasteAllowed: "Enabled", + printAllowed: "Enabled", + uploadAllowed: "Enabled", + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebUserSettingsExample.overrideLogicalId("example"); + const awsWorkspaceswebUserSettingsAssociationExample = + new WorkspaceswebUserSettingsAssociation(this, "example_2", { + portalArn: example.portalArn, + userSettingsArn: Token.asString( + awsWorkspaceswebUserSettingsExample.userSettingsArn + ), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWorkspaceswebUserSettingsAssociationExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `userSettingsArn` - (Required) ARN of the user settings to associate with the portal. Forces replacement if changed. +* `portalArn` - (Required) ARN of the portal to associate with the user settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Settings Association using the `user_settings_arn,portal_arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { WorkspaceswebUserSettingsAssociation } from "./.gen/providers/aws/workspacesweb-user-settings-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + WorkspaceswebUserSettingsAssociation.generateConfigForImport( + this, + "example", + "arn:aws:workspaces-web:us-west-2:123456789012:userSettings/user_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" + ); + } +} + +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/xray_encryption_config.html.markdown b/website/docs/cdktf/typescript/r/xray_encryption_config.html.markdown index 42e7a608a011..115a9ff649c0 100644 --- a/website/docs/cdktf/typescript/r/xray_encryption_config.html.markdown +++ b/website/docs/cdktf/typescript/r/xray_encryption_config.html.markdown @@ -96,6 +96,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `type` - (Required) The type of encryption. Set to `KMS` to use your own key for encryption. Set to `NONE` for default encryption. * `keyId` - (Optional) An AWS KMS customer master key (CMK) ARN. @@ -133,4 +134,4 @@ Using `terraform import`, import XRay Encryption Config using the region name. F % terraform import aws_xray_encryption_config.example us-west-2 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/xray_group.html.markdown b/website/docs/cdktf/typescript/r/xray_group.html.markdown index 11f53f15a80a..cc79b79b470e 100644 --- a/website/docs/cdktf/typescript/r/xray_group.html.markdown +++ b/website/docs/cdktf/typescript/r/xray_group.html.markdown @@ -43,6 +43,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `groupName` - (Required) The name of the group. * `filterExpression` - (Required) The filter expression defining criteria by which to group traces. more info can be found in official [docs](https://docs.aws.amazon.com/xray/latest/devguide/xray-console-filters.html). * `insightsConfiguration` - (Optional) Configuration options for enabling insights. @@ -65,6 +66,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_xray_group.example + identity = { + "arn" = "arn:aws:xray:us-west-2:123456789012:group/example-group/AFAEAFE" + } +} + +resource "aws_xray_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the X-Ray group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import XRay Groups using the ARN. For example: ```typescript @@ -95,4 +117,4 @@ Using `terraform import`, import XRay Groups using the ARN. For example: % terraform import aws_xray_group.example arn:aws:xray:us-west-2:1234567890:group/example-group/TNGX7SW5U6QY36T4ZMOUA3HVLBYCZTWDIOOXY3CJAXTHSS3YCWUA ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/xray_resource_policy.html.markdown b/website/docs/cdktf/typescript/r/xray_resource_policy.html.markdown index 420a99645900..09b74aa5689d 100644 --- a/website/docs/cdktf/typescript/r/xray_resource_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/xray_resource_policy.html.markdown @@ -48,6 +48,7 @@ The following arguments are required: The following arguments are optional: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `policyRevisionId` - (Optional) Specifies a specific policy revision, to ensure an atomic create operation. By default the resource policy is created if it does not exist, or updated with an incremented revision id. The revision id is unique to each policy in the account. If the policy revision id does not match the latest revision id, the operation will fail with an InvalidPolicyRevisionIdException exception. You can also provide a PolicyRevisionId of 0. In this case, the operation will fail with an InvalidPolicyRevisionIdException exception if a resource policy with the same name already exists. * `bypassPolicyLockoutCheck` - (Optional) Flag to indicate whether to bypass the resource policy lockout safety check. Setting this value to true increases the risk that the policy becomes unmanageable. Do not set this value to true indiscriminately. Use this parameter only when you include a policy in the request and you intend to prevent the principal that is making the request from making a subsequent PutResourcePolicy request. The default value is `false`. @@ -90,4 +91,4 @@ Using `terraform import`, import X-Ray Resource Policy using the `policyName`. F % terraform import aws_xray_resource_policy.example resource_policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/xray_sampling_rule.html.markdown b/website/docs/cdktf/typescript/r/xray_sampling_rule.html.markdown index 53f62a78ce24..548731df5e8c 100644 --- a/website/docs/cdktf/typescript/r/xray_sampling_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/xray_sampling_rule.html.markdown @@ -51,6 +51,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ruleName` - (Required) The name of the sampling rule. * `resourceArn` - (Required) Matches the ARN of the AWS resource on which the service runs. * `priority` - (Required) The priority of the sampling rule. @@ -101,4 +102,4 @@ Using `terraform import`, import XRay Sampling Rules using the name. For example % terraform import aws_xray_sampling_rule.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/d/appconfig_application.html.markdown b/website/docs/d/appconfig_application.html.markdown new file mode 100644 index 000000000000..54695859ba7a --- /dev/null +++ b/website/docs/d/appconfig_application.html.markdown @@ -0,0 +1,36 @@ +--- +subcategory: "AppConfig" +layout: "aws" +page_title: "AWS: aws_appconfig_application" +description: |- + Retrieves an AWS AppConfig Application by name. +--- + +# Data Source: aws_appconfig_application + +Provides details about an AWS AppConfig Application. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_appconfig_application" "example" { + name = "my-appconfig-application" +} +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `id` - (Optional) ID of the Application. Either `id` or `name` must be specified. +* `name` - (Optional) AWS AppConfig Application name. Either `name` or `id` must be specified. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Application. +* `description` - Description of the Application. diff --git a/website/docs/d/bedrock_inference_profiles.html.markdown b/website/docs/d/bedrock_inference_profiles.html.markdown index 4f14e295c556..7a3a80b89467 100644 --- a/website/docs/d/bedrock_inference_profiles.html.markdown +++ b/website/docs/d/bedrock_inference_profiles.html.markdown @@ -8,7 +8,7 @@ description: |- # Data Source: aws_bedrock_inference_profiles -Terraform data source for managing AWS Bedrock AWS Bedrock Inference Profiles. +Terraform data source for managing AWS Bedrock Inference Profiles. ## Example Usage @@ -18,11 +18,20 @@ Terraform data source for managing AWS Bedrock AWS Bedrock Inference Profiles. data "aws_bedrock_inference_profiles" "test" {} ``` +### Filter by Type + +```terraform +data "aws_bedrock_inference_profiles" "test" { + type = "APPLICATION" +} +``` + ## Argument Reference This data source supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `type` - (Optional) Filters for inference profiles that match the type you specify. Valid values are: `SYSTEM_DEFINED`, `APPLICATION`. ## Attribute Reference @@ -32,16 +41,16 @@ This data source exports the following attributes in addition to the arguments a ### `inference_profile_summaries` -- `created_at` - The time at which the inference profile was created. -- `description` - The description of the inference profile. -- `inference_profile_arn` - The Amazon Resource Name (ARN) of the inference profile. -- `inference_profile_id` - The unique identifier of the inference profile. -- `inference_profile_name` - The name of the inference profile. -- `models` - A list of information about each model in the inference profile. See [`models`](#models). -- `status` - The status of the inference profile. `ACTIVE` means that the inference profile is available to use. -- `type` - The type of the inference profile. `SYSTEM_DEFINED` means that the inference profile is defined by Amazon Bedrock. -- `updated_at` - The time at which the inference profile was last updated. +- `created_at` - Time at which the inference profile was created. +- `description` - Description of the inference profile. +- `inference_profile_arn` - Amazon Resource Name (ARN) of the inference profile. +- `inference_profile_id` - Unique identifier of the inference profile. +- `inference_profile_name` - Name of the inference profile. +- `models` - List of information about each model in the inference profile. See [`models` Block](#models). +- `status` - Status of the inference profile. `ACTIVE` means that the inference profile is available to use. +- `type` - Type of the inference profile. `SYSTEM_DEFINED` means that the inference profile is defined by Amazon Bedrock. `APPLICATION` means the inference profile was created by a user. +- `updated_at` - Time at which the inference profile was last updated. ### `models` -- `model_arn` - The Amazon Resource Name (ARN) of the model. +- `model_arn` - Amazon Resource Name (ARN) of the model. diff --git a/website/docs/d/billing_views.html.markdown b/website/docs/d/billing_views.html.markdown new file mode 100644 index 000000000000..7933857e18ef --- /dev/null +++ b/website/docs/d/billing_views.html.markdown @@ -0,0 +1,53 @@ +--- +subcategory: "Billing" +layout: "aws" +page_title: "AWS: aws_billing_views" +description: |- + Retrieve a list of AWS Billing Views. +--- + +# Data Source: aws_billing_views + +Provides details about an AWS Billing Views. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_billing_views" "example" { + billing_view_types = ["PRIMARY"] +} + +output "primary_view_arn_by_types" { + value = data.aws_billing_views.example.billing_view[0].arn +} +``` + +```terraform +data "aws_billing_views" "example" {} + +output "view_arns" { + value = [for view in data.aws_billing_views.example.billing_view : view.arn] +} + +output "primary_view_arn_by_name" { + value = [for view in data.aws_billing_views.example.billing_view : view.arn if view.name == "Primary View"][0] +} +``` + +## Argument Reference + +The following arguments are optional: + +* `billing_view_types` - (Optional) List of billing view types to retrieve. Valid values are `PRIMARY`, `BILLING_GROUP`, `CUSTOM`. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `billing_view` - List of billing view objects with the following attributes: + * `arn` - ARN of the billing view. + * `description` - Description of the billing view. + * `name` - Name of the billing view. + * `owner_account_id` - Account ID of the billing view owner. diff --git a/website/docs/d/budgets_budget.html.markdown b/website/docs/d/budgets_budget.html.markdown index 239f1aa32c78..1cd53de20e18 100644 --- a/website/docs/d/budgets_budget.html.markdown +++ b/website/docs/d/budgets_budget.html.markdown @@ -36,6 +36,7 @@ The following arguments are optional: This data source exports the following attributes in addition to the arguments above: * `auto_adjust_data` - Object containing [AutoAdjustData] which determines the budget amount for an auto-adjusting budget. +* `billing_view_arn` - ARN of the billing view. * `budget_exceeded` - Boolean indicating whether this budget has been exceeded. * `budget_limit` - The total amount of cost, usage, RI utilization, RI coverage, Savings Plans utilization, or Savings Plans coverage that you want to track with your budget. Contains object [Spend](#spend). * `budget_type` - Whether this budget tracks monetary cost or usage. diff --git a/website/docs/d/ce_cost_category.html.markdown b/website/docs/d/ce_cost_category.html.markdown index 3d97fb2bbffe..d613a8f5f5af 100644 --- a/website/docs/d/ce_cost_category.html.markdown +++ b/website/docs/d/ce_cost_category.html.markdown @@ -6,7 +6,7 @@ description: |- Provides details about a specific CostExplorer Cost Category Definition --- -# Resource: aws_ce_cost_category +# Data Source: aws_ce_cost_category Provides details about a specific CostExplorer Cost Category. diff --git a/website/docs/d/cloudwatch_event_bus.html.markdown b/website/docs/d/cloudwatch_event_bus.html.markdown index 896c62d3c784..5b06ca96ddac 100644 --- a/website/docs/d/cloudwatch_event_bus.html.markdown +++ b/website/docs/d/cloudwatch_event_bus.html.markdown @@ -37,3 +37,6 @@ This data source exports the following attributes in addition to the arguments a * `description` - Event bus description. * `id` - Name of the event bus. * `kms_key_identifier` - Identifier of the AWS KMS customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified. +* `log_config` - Block for logging configuration settings for the event bus. + * `include_detail` - Whether EventBridge include detailed event information in the records it generates. + * `level` - Level of logging detail to include. diff --git a/website/docs/d/codebuild_fleet.html.markdown b/website/docs/d/codebuild_fleet.html.markdown index 0b92ff3ec7d3..7a5fc9f1f264 100644 --- a/website/docs/d/codebuild_fleet.html.markdown +++ b/website/docs/d/codebuild_fleet.html.markdown @@ -59,6 +59,7 @@ This data source exports the following attributes in addition to the arguments a * `base_capacity` - Number of machines allocated to the fleet. * `compute_configuration` - Compute configuration of the compute fleet. * `disk` - Amount of disk space of the instance type included in the fleet. + * `instance_type` - EC2 instance type in the fleet. * `machine_type` - Machine type of the instance type included in the fleet. * `memory` - Amount of memory of the instance type included in the fleet. * `vcpu` - Number of vCPUs of the instance type included in the fleet. diff --git a/website/docs/d/db_proxy.html.markdown b/website/docs/d/db_proxy.html.markdown index ff1a895002e9..ae297f065042 100644 --- a/website/docs/d/db_proxy.html.markdown +++ b/website/docs/d/db_proxy.html.markdown @@ -32,6 +32,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the DB Proxy. * `auth` - Configuration(s) with authorization mechanisms to connect to the associated instance or cluster. * `debug_logging` - Whether the proxy includes detailed information about SQL statements in its logs. +* `default_auth_scheme` - Default authentication scheme that the proxy uses for client connections to the proxy and connections from the proxy to the underlying database. * `endpoint` - Endpoint that you can use to connect to the DB proxy. * `engine_family` - Kinds of databases that the proxy can connect to. * `idle_client_timeout` - Number of seconds a connection to the proxy can have no activity before the proxy drops the client connection. diff --git a/website/docs/d/ebs_volume.html.markdown b/website/docs/d/ebs_volume.html.markdown index fab12e98468e..61c72f4ed06a 100644 --- a/website/docs/d/ebs_volume.html.markdown +++ b/website/docs/d/ebs_volume.html.markdown @@ -59,6 +59,7 @@ This data source exports the following attributes in addition to the arguments a * `throughput` - Throughput that the volume supports, in MiB/s. * `volume_id` - Volume ID (e.g., vol-59fcb34e). * `volume_type` - Type of EBS volume. +* `volume_initialization_rate` - EBS provisioned rate for volume initialization, in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. ## Timeouts diff --git a/website/docs/d/ec2_client_vpn_endpoint.html.markdown b/website/docs/d/ec2_client_vpn_endpoint.html.markdown index 137f449b759d..41c5e318cb56 100644 --- a/website/docs/d/ec2_client_vpn_endpoint.html.markdown +++ b/website/docs/d/ec2_client_vpn_endpoint.html.markdown @@ -63,12 +63,14 @@ This data source exports the following attributes in addition to the arguments a * `description` - Brief description of the endpoint. * `dns_name` - DNS name to be used by clients when connecting to the Client VPN endpoint. * `dns_servers` - Information about the DNS servers to be used for DNS resolution. +* `endpoint_ip_address_type` - IP address type for the Client VPN endpoint. * `security_group_ids` - IDs of the security groups for the target network associated with the Client VPN endpoint. * `self_service_portal` - Whether the self-service portal for the Client VPN endpoint is enabled. * `self_service_portal_url` - The URL of the self-service portal. * `server_certificate_arn` - The ARN of the server certificate. * `session_timeout_hours` - The maximum VPN session duration time in hours. * `split_tunnel` - Whether split-tunnel is enabled in the AWS Client VPN endpoint. +* `traffic_ip_address_type` - IP address type for traffic within the Client VPN tunnel. * `transport_protocol` - Transport protocol used by the Client VPN endpoint. * `vpc_id` - ID of the VPC associated with the Client VPN endpoint. * `vpn_port` - Port number for the Client VPN endpoint. diff --git a/website/docs/d/ec2_instance_type_offering.html.markdown b/website/docs/d/ec2_instance_type_offering.html.markdown index 9150643b44b9..b5a4957feb1b 100644 --- a/website/docs/d/ec2_instance_type_offering.html.markdown +++ b/website/docs/d/ec2_instance_type_offering.html.markdown @@ -43,6 +43,7 @@ This data source exports the following attributes in addition to the arguments a * `id` - EC2 Instance Type. * `instance_type` - EC2 Instance Type. +* `location` - Identifier for the location. ## Timeouts diff --git a/website/docs/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown b/website/docs/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown index afd93419e51a..e2c89e3568ed 100644 --- a/website/docs/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown +++ b/website/docs/d/ec2_transit_gateway_dx_gateway_attachment.html.markdown @@ -10,6 +10,8 @@ description: |- Get information on an EC2 Transit Gateway's attachment to a Direct Connect Gateway. +!> **Warning:** Using the `aws_ec2_transit_gateway_dx_gateway_attachment` data source in combination with `aws_ec2_transit_gateway_route_table_propagation` or `aws_ec2_transit_gateway_route_table_association` may result in lost connectivity due to unnecessary resource re-creation. To avoid this, use the `transit_gateway_attachment_id` attribute directly from the `aws_dx_gateway_association` resource. For example, `transit_gateway_attachment_id = aws_dx_gateway_association.example.transit_gateway_attachment_id`. + ## Example Usage ### By Transit Gateway and Direct Connect Gateway Identifiers diff --git a/website/docs/d/ec2_transit_gateway_vpc_attachment.html.markdown b/website/docs/d/ec2_transit_gateway_vpc_attachment.html.markdown index 35852ec457b8..d9f5fc00cb1c 100644 --- a/website/docs/d/ec2_transit_gateway_vpc_attachment.html.markdown +++ b/website/docs/d/ec2_transit_gateway_vpc_attachment.html.markdown @@ -10,6 +10,8 @@ description: |- Get information on an EC2 Transit Gateway VPC Attachment. +!> **Warning:** Using the `aws_ec2_transit_gateway_vpc_attachment` data source in combination with `aws_ec2_transit_gateway_route_table_propagation` or `aws_ec2_transit_gateway_route_table_association` may result in lost connectivity due to unnecessary resource re-creation. To avoid this, use the `id` attribute directly from the `aws_ec2_transit_gateway_vpc_attachment` _resource_. For example, `transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.example.id`. + ## Example Usage ### By Filter diff --git a/website/docs/d/ecr_images.html.markdown b/website/docs/d/ecr_images.html.markdown new file mode 100644 index 000000000000..d6e2db4ab312 --- /dev/null +++ b/website/docs/d/ecr_images.html.markdown @@ -0,0 +1,43 @@ +--- +subcategory: "ECR (Elastic Container Registry)" +layout: "aws" +page_title: "AWS: aws_ecr_images" +description: |- + Provides a list of images for a specified ECR Repository +--- + +# Data Source: aws_ecr_images + +The ECR Images data source allows the list of images in a specified repository to be retrieved. + +## Example Usage + +```terraform +data "aws_ecr_images" "example" { + repository_name = "my-repository" +} + +output "image_digests" { + value = [for img in data.aws_ecr_images.example.image_ids : img.image_digest if img.image_digest != null] +} + +output "image_tags" { + value = [for img in data.aws_ecr_images.example.image_ids : img.image_tag if img.image_tag != null] +} +``` + +## Argument Reference + +This data source supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `registry_id` - (Optional) ID of the Registry where the repository resides. +* `repository_name` - (Required) Name of the ECR Repository. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `image_ids` - List of image objects containing image digest and tags. Each object has the following attributes: + * `image_digest` - The sha256 digest of the image manifest. + * `image_tag` - The tag associated with the image. diff --git a/website/docs/d/ecr_repository.html.markdown b/website/docs/d/ecr_repository.html.markdown index 8b42b6122ca6..e6bcc7efce5e 100644 --- a/website/docs/d/ecr_repository.html.markdown +++ b/website/docs/d/ecr_repository.html.markdown @@ -34,6 +34,7 @@ This data source exports the following attributes in addition to the arguments a * `encryption_configuration` - Encryption configuration for the repository. See [Encryption Configuration](#encryption-configuration) below. * `image_scanning_configuration` - Configuration block that defines image scanning configuration for the repository. See [Image Scanning Configuration](#image-scanning-configuration) below. * `image_tag_mutability` - The tag mutability setting for the repository. +* `image_tag_mutability_exclusion_filter` - Block that defines filters to specify which image tags can override the default tag mutability setting. * `most_recent_image_tags` - List of image tags associated with the most recently pushed image in the repository. * `repository_url` - URL of the repository (in the form `aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName`). * `tags` - Map of tags assigned to the resource. @@ -43,6 +44,11 @@ This data source exports the following attributes in addition to the arguments a * `encryption_type` - Encryption type to use for the repository, either `AES256` or `KMS`. * `kms_key` - If `encryption_type` is `KMS`, the ARN of the KMS key used. +### Image Tag Mutability Exclusion Filter + +* `filter` - The filter pattern to use for excluding image tags from the mutability setting. +* `filter_type` - The type of filter to use. + ### Image Scanning Configuration * `scan_on_push` - Whether images are scanned after being pushed to the repository. diff --git a/website/docs/d/ecr_repository_creation_template.html.markdown b/website/docs/d/ecr_repository_creation_template.html.markdown index 7a66b35c3346..9a984d8653aa 100644 --- a/website/docs/d/ecr_repository_creation_template.html.markdown +++ b/website/docs/d/ecr_repository_creation_template.html.markdown @@ -34,6 +34,7 @@ This data source exports the following attributes in addition to the arguments a * `description` - The description for this template. * `encryption_configuration` - Encryption configuration for any created repositories. See [Encryption Configuration](#encryption-configuration) below. * `image_tag_mutability` - The tag mutability setting for any created repositories. +* `image_tag_mutability_exclusion_filter` - Block that defines filters to specify which image tags can override the default tag mutability setting. * `lifecycle_policy` - The lifecycle policy document to apply to any created repositories. * `registry_id` - The registry ID the repository creation template applies to. * `repository_policy` - The registry policy document to apply to any created repositories. @@ -43,3 +44,8 @@ This data source exports the following attributes in addition to the arguments a * `encryption_type` - Encryption type to use for any created repositories, either `AES256` or `KMS`. * `kms_key` - If `encryption_type` is `KMS`, the ARN of the KMS key used. + +### Image Tag Mutability Exclusion Filter + +* `filter` - The filter pattern to use for excluding image tags from the mutability setting. +* `filter_type` - The type of filter to use. diff --git a/website/docs/d/ecs_service.html.markdown b/website/docs/d/ecs_service.html.markdown index f1022035e991..e524834186ac 100644 --- a/website/docs/d/ecs_service.html.markdown +++ b/website/docs/d/ecs_service.html.markdown @@ -35,6 +35,26 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the ECS Service * `desired_count` - Number of tasks for the ECS Service * `launch_type` - Launch type for the ECS Service +* `load_balancer` - Load balancers for the ECS Service. See [`load_balancer` Block](#load_balancer-block) for details. * `scheduling_strategy` - Scheduling strategy for the ECS Service * `task_definition` - Family for the latest ACTIVE revision or full ARN of the task definition. * `tags` - Resource tags. + +### `load_balancer` Block + +The `load_balancer` block exports the following attributes: + +* `advanced_configuration` - Settings for Blue/Green deployment. See [`advanced_configuration` Block](#advanced_configuration-block) for details. +* `container_name` - Name of the container to associate with the load balancer. +* `container_port` - Port on the container to associate with the load balancer. +* `elb_name` - Name of the load balancer. +* `target_group_arn` - ARN of the target group to associate with the load balancer. + +### `advanced_configuration` Block + +The `advanced_configuration` block exports the following attributes: + +* `alternate_target_group_arn` - ARN of the alternate target group to use for Blue/Green deployments. +* `production_listener_rule` - ARN of the listener rule that routes production traffic. +* `role_arn` - ARN of the IAM role that allows ECS to manage the target groups. +* `test_listener_rule` - ARN of the listener rule that routes test traffic. diff --git a/website/docs/d/efs_mount_target.html.markdown b/website/docs/d/efs_mount_target.html.markdown index 02028a405b30..f7104cedb91d 100644 --- a/website/docs/d/efs_mount_target.html.markdown +++ b/website/docs/d/efs_mount_target.html.markdown @@ -39,6 +39,8 @@ This data source exports the following attributes in addition to the arguments a * `file_system_arn` - Amazon Resource Name of the file system for which the mount target is intended. * `subnet_id` - ID of the mount target's subnet. * `ip_address` - Address at which the file system may be mounted via the mount target. +* `ip_address_type` - IP address type for the mount target. +* `ipv6_address` - IPv6 address at which the file system may be mounted via the mount target. * `security_groups` - List of VPC security group IDs attached to the mount target. * `dns_name` - DNS name for the EFS file system. * `mount_target_dns_name` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html). diff --git a/website/docs/d/eks_cluster.html.markdown b/website/docs/d/eks_cluster.html.markdown index f270a5e54d6a..57234661b176 100644 --- a/website/docs/d/eks_cluster.html.markdown +++ b/website/docs/d/eks_cluster.html.markdown @@ -50,6 +50,7 @@ This data source exports the following attributes in addition to the arguments a * `data` - The base64 encoded certificate data required to communicate with your cluster. Add this to the `certificate-authority-data` section of the `kubeconfig` file for your cluster. * `cluster_id` - The ID of your local Amazon EKS cluster on the AWS Outpost. This attribute isn't available for an AWS EKS cluster on AWS cloud. * `created_at` - Unix epoch time stamp in seconds for when the cluster was created. +* `deletion_protection` - Whether deletion protection for the cluster is enabled. * `enabled_cluster_log_types` - The enabled control plane logs. * `endpoint` - Endpoint for your Kubernetes API server. * `identity` - Nested attribute containing identity provider information for your cluster. Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. For an example using this information to enable IAM Roles for Service Accounts, see the [`aws_eks_cluster` resource documentation](/docs/providers/aws/r/eks_cluster.html). diff --git a/website/docs/d/eks_cluster_versions.html.markdown b/website/docs/d/eks_cluster_versions.html.markdown index 7a9a17f39a36..f363e7c612ec 100644 --- a/website/docs/d/eks_cluster_versions.html.markdown +++ b/website/docs/d/eks_cluster_versions.html.markdown @@ -16,6 +16,18 @@ Terraform data source for managing AWS EKS (Elastic Kubernetes) Cluster Versions ```terraform data "aws_eks_cluster_versions" "example" {} + +output "eks_cluster_versions" { + value = data.aws_eks_cluster_versions.example.cluster_versions +} + +output "eks_cluster_version_filtered" { + value = [for version in data.aws_eks_cluster_versions.example.cluster_versions : version if version.cluster_version == "1.33"] +} + +output "eks_cluster_version_list" { + value = [for version in data.aws_eks_cluster_versions.example.cluster_versions : version.cluster_version] +} ``` ### Filter by Cluster Type @@ -41,7 +53,6 @@ The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `cluster_type` - (Optional) Type of clusters to filter by. Currently, the only valid value is `eks`. -* `cluster_versions` - (Optional) A list of Kubernetes versions that you can use to check if EKS supports it. * `default_only` - (Optional) Whether to show only the default versions of Kubernetes supported by EKS. * `include_all` - (Optional) Whether to include all kubernetes versions in the response. * `version_status` - (Optional) Status of the EKS cluster versions to list. @@ -51,12 +62,13 @@ Valid values are `STANDARD_SUPPORT` or `UNSUPPORTED` or `EXTENDED_SUPPORT`. This data source exports the following attributes in addition to the arguments above: -* `cluster_type` - Type of cluster that the version belongs to. -* `cluster_version` - Kubernetes version supported by EKS. -* `default_platform_version` - Default eks platform version for the cluster version. -* `default_version` - Default Kubernetes version for the cluster version. -* `end_of_extended_support_date` - End of extended support date for the cluster version. -* `end_of_standard_support_date` - End of standard support date for the cluster version. -* `kubernetes_patch_version` - Kubernetes patch version for the cluster version. -* `release_date` - Release date of the cluster version. -* `version_status` - Status of the EKS cluster version. +* `cluster_versions` - A list of Kubernetes version information. + * `cluster_type` - Type of cluster that the version belongs to. + * `cluster_version` - Kubernetes version supported by EKS. + * `default_platform_version` - Default eks platform version for the cluster version. + * `default_version` - Default Kubernetes version for the cluster version. + * `end_of_extended_support_date` - End of extended support date for the cluster version. + * `end_of_standard_support_date` - End of standard support date for the cluster version. + * `kubernetes_patch_version` - Kubernetes patch version for the cluster version. + * `release_date` - Release date of the cluster version. + * `version_status` - Status of the EKS cluster version. diff --git a/website/docs/d/elasticache_subnet_group.html.markdown b/website/docs/d/elasticache_subnet_group.html.markdown index a89f273ebef0..d15f392bede7 100644 --- a/website/docs/d/elasticache_subnet_group.html.markdown +++ b/website/docs/d/elasticache_subnet_group.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a ElastiCache Subnet Group. --- -# Resource: aws_elasticache_subnet_group +# Data Source: aws_elasticache_subnet_group Provides information about a ElastiCache Subnet Group. diff --git a/website/docs/d/glue_catalog_table.html.markdown b/website/docs/d/glue_catalog_table.html.markdown index 212c212071c9..3c33b1d2b6ea 100644 --- a/website/docs/d/glue_catalog_table.html.markdown +++ b/website/docs/d/glue_catalog_table.html.markdown @@ -57,6 +57,7 @@ This data source exports the following attributes in addition to the arguments a * `comment` - Free-form text comment. * `name` - Name of the Partition Key. +* `parameters` - Map of key-value pairs. * `type` - Datatype of data in the Partition Key. ### storage_descriptor diff --git a/website/docs/d/iam_principal_policy_simulation.html.markdown b/website/docs/d/iam_principal_policy_simulation.html.markdown index 430a9299bc29..9575c826cfaa 100644 --- a/website/docs/d/iam_principal_policy_simulation.html.markdown +++ b/website/docs/d/iam_principal_policy_simulation.html.markdown @@ -49,7 +49,7 @@ data "aws_iam_principal_policy_simulation" "s3_object_access" { If you intend to use this data source to quickly raise an error when the given credentials are insufficient then you must use [`depends_on`](https://www.terraform.io/language/meta-arguments/depends_on) inside any resource which would require those credentials, to ensure that the policy check will run first: ```terraform -resource "aws_s3_bucket_object" "example" { +resource "aws_s3_object" "example" { bucket = "my-test-bucket" # ... diff --git a/website/docs/d/iam_server_certificate.html.markdown b/website/docs/d/iam_server_certificate.html.markdown index 589a4411e6d9..c18c07f929dc 100644 --- a/website/docs/d/iam_server_certificate.html.markdown +++ b/website/docs/d/iam_server_certificate.html.markdown @@ -51,22 +51,3 @@ This data source exports the following attributes in addition to the arguments a * `upload_date` is the date when the server certificate was uploaded * `certificate_body` is the public key certificate (PEM-encoded). This is useful when [configuring back-end instance authentication](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html) policy for load balancer * `certificate_chain` is the public key certificate chain (PEM-encoded) if exists, empty otherwise - -## Import - -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an IAM server certificate using `name`. For example: - -```terraform -import { - to = aws_iam_server_certificate.example - id = "example" -} -``` - -Using `terraform import`, import an IAM server certificate using `name`. For example: - -```console -% terraform import aws_iam_server_certificate.example example -``` - -Import will read in the certificate body, certificate chain (if it exists), ID, name, path, and ARN. It will not retrieve the private key which is not available through the AWS API. diff --git a/website/docs/d/instance.html.markdown b/website/docs/d/instance.html.markdown index 0d94af8bee0d..f00bc0569d0c 100644 --- a/website/docs/d/instance.html.markdown +++ b/website/docs/d/instance.html.markdown @@ -36,9 +36,10 @@ This data source supports the following arguments: * `instance_id` - (Optional) Specify the exact Instance ID with which to populate the data source. * `instance_tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Instance. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `get_user_data` - (Optional) Retrieve Base64 encoded User Data contents into the `user_data_base64` attribute. A SHA-1 hash of the User Data contents will always be present in the `user_data` attribute. Defaults to `false`. @@ -48,6 +49,14 @@ several valid keys, for a full reference, check out Terraform will fail. Ensure that your search is specific enough to return a single Instance ID only. +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + ## Attribute Reference `id` is set to the ID of the found Instance. In addition, the following attributes @@ -101,6 +110,7 @@ interpolation. * `outpost_arn` - ARN of the Outpost. * `password_data` - Base-64 encoded encrypted password data for the instance. Useful for getting the administrator password for instances running Microsoft Windows. This attribute is only exported if `get_password_data` is true. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `placement_group` - Placement group of the Instance. +* `placement_group_id` - Placement group ID of the Instance. * `placement_partition_number` - Number of the partition the instance is in. * `private_dns` - Private DNS name assigned to the Instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC. * `private_dns_name_options` - Options for the instance hostname. diff --git a/website/docs/d/instances.html.markdown b/website/docs/d/instances.html.markdown index 276a36802bd6..ebfa78f0bf7b 100644 --- a/website/docs/d/instances.html.markdown +++ b/website/docs/d/instances.html.markdown @@ -51,9 +51,18 @@ This data source supports the following arguments: * `instance_tags` - (Optional) Map of tags, each pair of which must exactly match a pair on desired instances. * `instance_state_names` - (Optional) List of instance states that should be applicable to the desired instances. The permitted values are: `pending, running, shutting-down, stopped, stopping, terminated`. The default value is `running`. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. ## Attribute Reference diff --git a/website/docs/d/lakeformation_resource.html.markdown b/website/docs/d/lakeformation_resource.html.markdown index 8ae41ca5000c..72d1cb90ad7b 100644 --- a/website/docs/d/lakeformation_resource.html.markdown +++ b/website/docs/d/lakeformation_resource.html.markdown @@ -29,5 +29,8 @@ This data source supports the following arguments: This data source exports the following attributes in addition to the arguments above: +* `hybrid_access_enabled` - Flag to enable AWS LakeFormation hybrid access permission mode. * `last_modified` - Date and time the resource was last modified in [RFC 3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). * `role_arn` - Role that the resource was registered with. +* `with_federation` - Whether the resource is a federated resource. +* `with_privileged_access` - Boolean to grant the calling principal the permissions to perform all supported Lake Formation operations on the registered data location. diff --git a/website/docs/d/lambda_function.html.markdown b/website/docs/d/lambda_function.html.markdown index 61b8c1a3cbc1..c0108877d25e 100644 --- a/website/docs/d/lambda_function.html.markdown +++ b/website/docs/d/lambda_function.html.markdown @@ -142,6 +142,7 @@ This data source exports the following attributes in addition to the arguments a * `signing_profile_version_arn` - ARN for a signing profile version. * `source_code_hash` - (**Deprecated** use `code_sha256` instead) Base64-encoded representation of raw SHA-256 sum of the zip file. * `source_code_size` - Size in bytes of the function .zip file. +* `source_kms_key_arn` - ARN of the AWS Key Management Service key used to encrypt the function's `.zip` deployment package. * `tags` - Map of tags assigned to the Lambda Function. * `timeout` - Function execution time at which Lambda should terminate the function. * `tracing_config` - Tracing settings of the function. [See below](#tracing_config-attribute-reference). diff --git a/website/docs/d/media_convert_queue.html.markdown b/website/docs/d/media_convert_queue.html.markdown index 7be378a8b627..ea852617f32d 100644 --- a/website/docs/d/media_convert_queue.html.markdown +++ b/website/docs/d/media_convert_queue.html.markdown @@ -6,7 +6,7 @@ description: |- Retrieve information about a AWS Elemental MediaConvert Queue. --- -# Resource: aws_media_convert_queue +# Data Source: aws_media_convert_queue Retrieve information about a AWS Elemental MediaConvert Queue. diff --git a/website/docs/d/memorydb_acl.html.markdown b/website/docs/d/memorydb_acl.html.markdown index c5f25e576e0e..6ff3a37bf226 100644 --- a/website/docs/d/memorydb_acl.html.markdown +++ b/website/docs/d/memorydb_acl.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a MemoryDB ACL. --- -# Resource: aws_memorydb_acl +# Data Source: aws_memorydb_acl Provides information about a MemoryDB ACL. diff --git a/website/docs/d/memorydb_cluster.html.markdown b/website/docs/d/memorydb_cluster.html.markdown index 6072bad7ff5e..ed8625225db2 100644 --- a/website/docs/d/memorydb_cluster.html.markdown +++ b/website/docs/d/memorydb_cluster.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a MemoryDB Cluster. --- -# Resource: aws_memorydb_cluster +# Data Source: aws_memorydb_cluster Provides information about a MemoryDB Cluster. diff --git a/website/docs/d/memorydb_parameter_group.html.markdown b/website/docs/d/memorydb_parameter_group.html.markdown index 4baf30a72579..8bfa4cef86a1 100644 --- a/website/docs/d/memorydb_parameter_group.html.markdown +++ b/website/docs/d/memorydb_parameter_group.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a MemoryDB Parameter Group. --- -# Resource: aws_memorydb_parameter_group +# Data Source: aws_memorydb_parameter_group Provides information about a MemoryDB Parameter Group. diff --git a/website/docs/d/memorydb_snapshot.html.markdown b/website/docs/d/memorydb_snapshot.html.markdown index cd7bb1163722..aef058242a8c 100644 --- a/website/docs/d/memorydb_snapshot.html.markdown +++ b/website/docs/d/memorydb_snapshot.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a MemoryDB Snapshot. --- -# Resource: aws_memorydb_snapshot +# Data Source: aws_memorydb_snapshot Provides information about a MemoryDB Snapshot. diff --git a/website/docs/d/memorydb_subnet_group.html.markdown b/website/docs/d/memorydb_subnet_group.html.markdown index d743f1fb9a9e..84c4971e3e20 100644 --- a/website/docs/d/memorydb_subnet_group.html.markdown +++ b/website/docs/d/memorydb_subnet_group.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a MemoryDB Subnet Group. --- -# Resource: aws_memorydb_subnet_group +# Data Source: aws_memorydb_subnet_group Provides information about a MemoryDB Subnet Group. diff --git a/website/docs/d/memorydb_user.html.markdown b/website/docs/d/memorydb_user.html.markdown index 85418ec2a082..63f6ca1406e1 100644 --- a/website/docs/d/memorydb_user.html.markdown +++ b/website/docs/d/memorydb_user.html.markdown @@ -6,7 +6,7 @@ description: |- Provides information about a MemoryDB User. --- -# Resource: aws_memorydb_user +# Data Source: aws_memorydb_user Provides information about a MemoryDB User. diff --git a/website/docs/d/network_interface.html.markdown b/website/docs/d/network_interface.html.markdown index ed611309037f..8307d52f6741 100644 --- a/website/docs/d/network_interface.html.markdown +++ b/website/docs/d/network_interface.html.markdown @@ -31,7 +31,8 @@ This data source supports the following arguments: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the network interface. -* `association` - Association information for an Elastic IP address (IPv4) associated with the network interface. See supported fields below. +* `association` - Association information for an Elastic IP address (IPv4) associated with the network interface. See [association](#association) below. +* `attachment` - Attachment of the ENI. See [attachment](#attachment) below. * `availability_zone` - Availability Zone. * `description` - Description of the network interface. * `interface_type` - Type of interface. @@ -58,6 +59,14 @@ This data source exports the following attributes in addition to the arguments a * `public_dns_name` - Public DNS name. * `public_ip` - Address of the Elastic IP address bound to the network interface. +### `attachment` + +* `attachment_id` - ID of the network interface attachment. +* `device_index` - Device index of the network interface attachment on the instance. +* `instance_id` - ID of the instance. +* `instance_owner_id` - AWS account ID of the owner of the instance. +* `network_card_index` - Index of the network card. + ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): diff --git a/website/docs/d/networkfirewall_firewall.html.markdown b/website/docs/d/networkfirewall_firewall.html.markdown index cbc70c3df2cd..6a6cd36c0236 100644 --- a/website/docs/d/networkfirewall_firewall.html.markdown +++ b/website/docs/d/networkfirewall_firewall.html.markdown @@ -52,6 +52,9 @@ One or more of these arguments is required. This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the firewall. +* `availability_zone_change_protection` - Indicates whether the firewall is protected against changes to its Availability Zone configuration. +* `availability_zone_mapping` - Set of Availability Zones where the firewall endpoints are created for a transit gateway-attached firewall. + * `availability_zone_id` - The ID of the Availability Zone where the firewall endpoint is located. * `delete_protection` - A flag indicating whether the firewall is protected against deletion. * `description` - Description of the firewall. * `enabled_analysis_types` - Set of types for which to collect analysis metrics. @@ -64,6 +67,8 @@ This data source exports the following attributes in addition to the arguments a * `sync_states` - Set of subnets configured for use by the firewall. * `attachment` - Nested list describing the attachment status of the firewall's association with a single VPC subnet. * `endpoint_id` - The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + * `status` - The current status of the firewall endpoint instantiation in the subnet. + * `status_message` - It populates this with the reason for the error or failure and how to resolve it. A FAILED status indicates a non-recoverable state, and a ERROR status indicates an issue that you can fix. * `subnet_id` - The unique identifier of the subnet that you've specified to be used for a firewall endpoint. * `availability_zone` - The Availability Zone where the subnet is configured. * `capacity_usage_summary` - Aggregated count of all resources used by reference sets in a firewall. @@ -73,6 +78,10 @@ This data source exports the following attributes in addition to the arguments a * `resolved_cidr_count` - Total number of CIDR blocks used by the IP set references in a firewall. * `utilized_cidr_count` - Number of CIDR blocks used by the IP set references in a firewall. * `configuration_sync_state_summary` - Summary of sync states for all availability zones in which the firewall is configured. + * `transit_gateway_attachment_sync_states` - Set of transit gateway configured for use by the firewall. + * `attachment_id` - The unique identifier of the transit gateway attachment. + * `status_message` - A message providing additional information about the current status. + * `transit_gateway_attachment_status` - The current status of the transit gateway attachment. * `id` - ARN that identifies the firewall. * `name` - Descriptive name of the firewall. * `subnet_change_protection` - A flag indicating whether the firewall is protected against changes to the subnet associations. @@ -80,4 +89,6 @@ This data source exports the following attributes in addition to the arguments a * `subnet_id` - The unique identifier for the subnet. * `tags` - Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `update_token` - String token used when updating a firewall. +* `transit_gateway_id` - The unique identifier of the transit gateway associated with this firewall. +* `transit_gateway_owner_account_id` - The AWS account ID that owns the transit gateway. * `vpc_id` - Unique identifier of the VPC where AWS Network Firewall should create the firewall. diff --git a/website/docs/d/odb_cloud_autonomous_vm_cluster.html.markdown b/website/docs/d/odb_cloud_autonomous_vm_cluster.html.markdown new file mode 100644 index 000000000000..93994d0b85e6 --- /dev/null +++ b/website/docs/d/odb_cloud_autonomous_vm_cluster.html.markdown @@ -0,0 +1,84 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_cluster" +page_title: "AWS: aws_odb_cloud_autonomous_vm_cluster" +description: |- + Terraform data source for managing cloud autonomous vm cluster resource in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_autonomous_vm_cluster + +Terraform data source for managing cloud autonomous vm cluster resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_cloud_autonomous_vm_cluster" "example" { + id = "example" +} +``` + +## Argument Reference + +The following arguments are optional: + +* `id` - (Required) The unique identifier of the cloud autonomous vm cluster. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `cloud_exadata_infrastructure_id` - Cloud exadata infrastructure id associated with this cloud autonomous VM cluster. +* `autonomous_data_storage_percentage` - The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster. +* `autonomous_data_storage_size_in_tbs` - The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. +* `available_autonomous_data_storage_size_in_tbs` - The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB. +* `available_container_databases` - The number of Autonomous CDBs that you can create with the currently available storage. +* `available_cpus` - The number of CPU cores available for allocation to Autonomous Databases. +* `compute_model` - The compute model of the Autonomous VM cluster: ECPU or OCPU. +* `cpu_core_count` - The total number of CPU cores in the Autonomous VM cluster. +* `cpu_core_count_per_node` - The number of CPU cores enabled per node in the Autonomous VM cluster. +* `cpu_percentage` - he percentage of total CPU cores currently in use in the Autonomous VM cluster. +* `created_at` - The date and time when the Autonomous VM cluster was created. +* `data_storage_size_in_gbs` - The total data storage allocated to the Autonomous VM cluster, in GB. +* `data_storage_size_in_tbs` - The total data storage allocated to the Autonomous VM cluster, in TB. +* `odb_node_storage_size_in_gbs` - The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB). +* `db_servers` - The list of database servers associated with the Autonomous VM cluster. +* `description` - The user-provided description of the Autonomous VM cluster. +* `display_name` - The display name of the Autonomous VM cluster. +* `domain` - The domain name of the Autonomous VM cluster. +* `exadata_storage_in_tbs_lowest_scaled_value` - The minimum value to which you can scale down the Exadata storage, in TB. +* `hostname` - The hostname of the Autonomous VM cluster. +* `is_mtls_enabled_vm_cluster` - Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. +* `license_model` - The Oracle license model that applies to the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. +* `max_acds_lowest_scaled_value` - The minimum value to which you can scale down the maximum number of Autonomous CDBs. +* `memory_per_oracle_compute_unit_in_gbs` - The amount of memory allocated per Oracle Compute Unit, in GB. +* `memory_size_in_gbs` - The total amount of memory allocated to the Autonomous VM cluster, in gigabytes (GB). +* `node_count` - The number of database server nodes in the Autonomous VM cluster. +* `non_provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can't be provisioned because of resource constraints. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `oci_url` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `odb_network_id` - The unique identifier of the ODB network associated with this Autonomous VM cluster. +* `percent_progress` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster. +* `provisioned_autonomous_container_databases` - The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster. +* `provisioned_cpus` - The number of CPU cores currently provisioned in the Autonomous VM cluster. +* `reclaimable_cpus` - The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases. +* `reserved_cpus` - The number of CPU cores reserved for system operations and redundancy. +* `scan_listener_port_non_tls` - The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. +* `scan_listener_port_tls` - The SCAN listener port for TLS (TCP) protocol. The default is 2484. +* `shape` - The shape of the Exadata infrastructure for the Autonomous VM cluster. +* `status` - The status of the Autonomous VM cluster. +* `status_reason` - Additional information about the current status of the Autonomous VM cluster. +* `time_database_ssl_certificate_expires` - The expiration date and time of the database SSL certificate. +* `time_ords_certificate_expires` - The expiration date and time of the Oracle REST Data Services (ORDS)certificate. +* `time_zone` - The time zone of the Autonomous VM cluster. +* `total_container_databases` - The total number of Autonomous Container Databases that can be created with the allocated local storage. +* `tags` - A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `maintenance_window` - The maintenance window for the Autonomous VM cluster. diff --git a/website/docs/d/odb_cloud_autonomous_vm_clusters.html.markdown b/website/docs/d/odb_cloud_autonomous_vm_clusters.html.markdown new file mode 100644 index 000000000000..6ae3d386630b --- /dev/null +++ b/website/docs/d/odb_cloud_autonomous_vm_clusters.html.markdown @@ -0,0 +1,44 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_clusters" +page_title: "AWS: aws_odb_cloud_autonomous_vm_clusters" +description: |- + Terraform data source for managing cloud autonomous vm clusters in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_autonomous_vm_clusters + +Terraform data source for managing cloud autonomous vm clusters in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_cloud_autonomous_vm_clusters" "example" {} +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `cloud_autonomous_vm_clusters` - List of Cloud Autonomous VM Clusters. The list going to contain basic information about the cloud autonomous VM clusters. + +### cloud_autonomous_vm_clusters + +* `id` - The unique identifier of the cloud autonomous vm cluster. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `cloud_exadata_infrastructure_id` - Cloud exadata infrastructure id associated with this cloud autonomous VM cluster. +* `odb_network_id` - The unique identifier of the ODB network associated with this Autonomous VM cluster. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `oci_url` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `display_name` - The display name of the Autonomous VM cluster. diff --git a/website/docs/d/odb_cloud_exadata_infrastructure.html.markdown b/website/docs/d/odb_cloud_exadata_infrastructure.html.markdown new file mode 100644 index 000000000000..1c1652a2c800 --- /dev/null +++ b/website/docs/d/odb_cloud_exadata_infrastructure.html.markdown @@ -0,0 +1,75 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_exadata_infrastructure" +page_title: "AWS: aws_odb_cloud_exadata_infrastructure" +description: |- + Terraform data source for managing exadata infrastructure resource in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_exadata_infrastructure + +Terraform data source for exadata infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_cloud_exadata_infrastructure" "example" { + id = "example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `activated_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `additional_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `availability_zone` - The name of the Availability Zone (AZ) where the Exadata infrastructure is located. +* `availability_zone_id` - The AZ ID of the AZ where the Exadata infrastructure is located. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `id` - The unique identifier of the Exadata infrastructure. +* `compute_count` - The number of database servers for the Exadata infrastructure. +* `cpu_count` - The total number of CPU cores that are allocated to the Exadata infrastructure. +* `data_storage_size_in_tbs` - The size of the Exadata infrastructure's data disk group, in terabytes (TB). +* `db_node_storage_size_in_gbs` - The size of the storage available on each database node, in gigabytes (GB). +* `db_server_version` - The version of the Exadata infrastructure. +* `display_name` - The display name of the Exadata infrastructure. +* `last_maintenance_run_id` - The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure. +* `max_cpu_count` - The total number of CPU cores available on the Exadata infrastructure. +* `max_data_storage_in_tbs` - The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure. +* `max_db_node_storage_size_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure. +* `max_memory_in_gbs` - The total amount of memory, in gigabytes (GB), that's available on the Exadata infrastructure. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure. +* `monthly_db_server_version` - The monthly software version of the database servers installed on the Exadata infrastructure. +* `monthly_storage_server_version` - The monthly software version of the storage servers installed on the Exadata infrastructure. +* `next_maintenance_run_id` - The OCID of the next maintenance run for the Exadata infrastructure. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the Exadata infrastructure. +* `oci_url` - The HTTPS link to the Exadata infrastructure in OCI. +* `ocid` - The OCID of the Exadata infrastructure in OCI. +* `percent_progress` - The amount of progress made on the current operation on the Exadata infrastructure expressed as a percentage. +* `shape` - The model name of the Exadata infrastructure. +* `status` - The status of the Exadata infrastructure. +* `status_reason` - Additional information about the status of the Exadata infrastructure. +* `storage_count` - The number of storage servers that are activated for the Exadata infrastructure. +* `storage_server_version` - The software version of the storage servers on the Exadata infrastructure. +* `total_storage_size_in_gbs` - The total amount of storage, in gigabytes (GB), on the Exadata infrastructure. +* `compute_model` - The OCI compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `created_at` - The time when the Exadata infrastructure was created. +* `database_server_type` - The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. +* `storage_server_type` - The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. +* `maintenance_window` - The scheduling details of the maintenance window. Patching and system updates take place during the maintenance window. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. diff --git a/website/docs/d/odb_cloud_exadata_infrastructures.html.markdown b/website/docs/d/odb_cloud_exadata_infrastructures.html.markdown new file mode 100644 index 000000000000..809874ccc73b --- /dev/null +++ b/website/docs/d/odb_cloud_exadata_infrastructures.html.markdown @@ -0,0 +1,42 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_exadata_infrastructures" +page_title: "AWS: aws_odb_cloud_exadata_infrastructures" +description: |- + Terraform data source for managing exadata infrastructures in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_exadata_infrastructures + +Terraform data source for exadata infrastructures in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_cloud_exadata_infrastructures" "example" {} +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `cloud_exadata_infrastructures` - List of Cloud Exadata Infrastructures. Returns basic information about the Cloud Exadata Infrastructures. + +### cloud_exadata_infrastructures + +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `id` - The unique identifier of the Exadata infrastructure. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the Exadata infrastructure. +* `oci_url` - The HTTPS link to the Exadata infrastructure in OCI. +* `ocid` - The OCID of the Exadata infrastructure in OCI. +* `display_name` - The display name of the Exadata infrastructure. diff --git a/website/docs/d/odb_cloud_vm_cluster.html.markdown b/website/docs/d/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..55a0b2e77402 --- /dev/null +++ b/website/docs/d/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform data source for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_vm_cluster + +Terraform data source for cloud vm cluster in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_cloud_vm_cluster" "example" { + id = "example-id" +} +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `cloud_exadata_infrastructure_id` - The ID of the Cloud Exadata Infrastructure. +* `cluster_name` - The name of the Grid Infrastructure (GI) cluster. +* `cpu_core_count` - The number of CPU cores enabled on the VM cluster. +* `data_storage_size_in_tbs` - The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster. +* `db_servers` - The list of database servers for the VM cluster. +* `disk_redundancy` - The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy. +* `display_name` - The display name of the VM cluster. +* `domain` - The domain name of the VM cluster. +* `gi_version` - The software version of the Oracle Grid Infrastructure (GI) for the VM cluster. +* `hostname_prefix_computed` - The computed hostname prefix for the VM cluster. +* `is_local_backup_enabled` - Indicates whether database backups to local Exadata storage is enabled for the VM cluster. +* `is_sparse_disk_group_enabled` - Indicates whether the VM cluster is configured with a sparse disk group. +* `last_update_history_entry_id` - The Oracle Cloud ID (OCID) of the last maintenance update history entry. +* `license_model` - The Oracle license model applied to the VM cluster. +* `listener_port` - The port number configured for the listener on the VM cluster. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated for the VM cluster. +* `node_count` - The number of nodes in the VM cluster. +* `ocid` - The OCID of the VM cluster. +* `oci_resource_anchor_name` - The name of the OCI Resource Anchor. +* `oci_url` - The HTTPS link to the VM cluster in OCI. +* `odb_network_id` - The ID of the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the VM cluster, expressed as a percentage. +* `scan_dns_name` - The FQDN of the DNS record for the Single Client Access Name (SCAN) IP addresses that are associated with the VM cluster. +* `scan_dns_record_id` - The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster. +* `scan_ip_ids` - The OCID of the SCAN IP addresses that are associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure that's running the VM cluster. +* `ssh_public_keys` - The public key portion of one or more key pairs used for SSH access to the VM cluster. +* `status` - The status of the VM cluster. +* `status_reason` - Additional information about the status of the VM cluster. +* `storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster. +* `system_version` - The operating system version of the image chosen for the VM cluster. +* `timezone` - The time zone of the VM cluster. +* `vip_ids` - The virtual IP (VIP) addresses that are associated with the VM cluster. Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for each node in the VM cluster to enable failover. If one node fails, the VIP is reassigned to another active node in the cluster. +* `created_at` - The time when the VM cluster was created. +* `compute_model` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `data_collection_options` - The set of diagnostic collection options enabled for the VM cluster. +* `iorm_config_cache` - The ExadataIormConfig cache details for the VM cluster. diff --git a/website/docs/d/odb_cloud_vm_clusters.html.markdown b/website/docs/d/odb_cloud_vm_clusters.html.markdown new file mode 100644 index 000000000000..d4bdbd8176c6 --- /dev/null +++ b/website/docs/d/odb_cloud_vm_clusters.html.markdown @@ -0,0 +1,44 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_clusters" +page_title: "AWS: aws_odb_cloud_vm_clusters" +description: |- + Terraform data source for retrieving all cloud vm clusters resource in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_vm_clusters + +Terraform data source for retrieving all cloud vm clusters AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_cloud_vm_clusters" "example" {} +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `cloud_vm_clusters` - List of Cloud VM Clusters. It returns only basic information about the cloud VM clusters. + +### cloud_vm_clusters + +* `id` - The unique identifier of the cloud vm cluster. +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `cloud_exadata_infrastructure_id` - The ID of the Cloud Exadata Infrastructure. +* `oci_resource_anchor_name` - The name of the OCI Resource Anchor. +* `odb_network_id` - The ID of the ODB network. +* `oci_url` - The HTTPS link to the VM cluster in OCI. +* `ocid` - The OCID of the VM cluster. +* `display_name` - The display name of the VM cluster. diff --git a/website/docs/d/odb_db_node.html.markdown b/website/docs/d/odb_db_node.html.markdown new file mode 100644 index 000000000000..b1b4758bd0a6 --- /dev/null +++ b/website/docs/d/odb_db_node.html.markdown @@ -0,0 +1,67 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_node" +page_title: "AWS: aws_odb_db_node" +description: |- + Terraform data source for managing db node linked to cloud vm cluster of Oracle Database@AWS. +--- + +# Data Source: aws_odb_db_node + +Terraform data source for manging db nodes linked to cloud vm cluster of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_db_node" "example" { + cloud_vm_cluster_id = "cloud_vm_cluster_id" + id = "db_node_id" +} +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_vm_cluster_id` - (Required) The unique identifier of the cloud vm cluster. +* `id` - (Required) The unique identifier of db node associated with vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `cloud_vm_cluster_id` - The ID of the cloud VM cluster. +* `status` - The current status of the DB node. +* `status_reason` - Additional information about the status of the DB node. +* `additional_details` - Additional information about the planned maintenance. +* `backup_ip_id` - The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node. +* `backup_vnic2_id` - The OCID of the second backup VNIC. +* `backup_vnic_id` - The OCID of the backup VNIC. +* `cpu_core_count` - The number of CPU cores enabled on the DB node. +* `db_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), allocated on the DB node. +* `db_server_id` - The unique identifier of the DB server that is associated with the DB node. +* `db_system_id` - The OCID of the DB system. +* `fault_domain` - The name of the fault domain the instance is contained in. +* `host_ip_id` - The OCID of the host IP address that's associated with the DB node. +* `hostname` - The host name for the DB node. +* `ocid` - The OCID of the DB node. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the DB node. +* `maintenance_type` - The type of database node maintenance. Either VMDB_REBOOT_MIGRATION or EXADBXS_REBOOT_MIGRATION. +* `memory_size_in_gbs` - The allocated memory in GBs on the DB node. +* `software_storage_size_in_gbs` - The size (in GB) of the block storage volume allocation for the DB system. +* `created_at` - The date and time when the DB node was created. +* `time_maintenance_window_end` - The end date and time of the maintenance window. +* `time_maintenance_window_start` - The start date and time of the maintenance window. +* `total_cpu_core_count` - The total number of CPU cores reserved on the DB node. +* `vnic2_id` - The OCID of the second VNIC. +* `vnic_id` - The OCID of the VNIC. +* `private_ip_address` - The private IP address assigned to the DB node. +* `floating_ip_address` - The floating IP address assigned to the DB node. diff --git a/website/docs/d/odb_db_nodes.html.markdown b/website/docs/d/odb_db_nodes.html.markdown new file mode 100644 index 000000000000..b045513deaac --- /dev/null +++ b/website/docs/d/odb_db_nodes.html.markdown @@ -0,0 +1,68 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_nodes" +page_title: "AWS: aws_odb_db_nodes" +description: |- + Terraform data source for managing db nodes linked to cloud vm cluster of Oracle Database@AWS. +--- + +# Data Source: aws_odb_db_nodes + +Terraform data source for manging db nodes linked to cloud vm cluster of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_db_nodes" "example" { + cloud_vm_cluster_id = "example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_vm_cluster_id` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `db_nodes` - The list of DB nodes along with their properties. + +### db_nodes + +* `additional_details` - Additional information about the planned maintenance. +* `backup_ip_id` - The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node. +* `backup_vnic_2_id` - The OCID of the second backup virtual network interface card (VNIC) for the DB node. +* `backup_vnic_id` - The OCID of the backup VNIC for the DB node. +* `cpu_core_count` - The number of CPU cores enabled on the DB node. +* `created_at` - The date and time when the DB node was created. +* `db_node_arn` - The Amazon Resource Name (ARN) of the DB node. +* `db_node_id` - The unique identifier of the DB node. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated on the DB node. +* `db_server_id` - The unique identifier of the database server that's associated with the DB node. +* `db_system_id` - The OCID of the DB system. +* `fault_domain` - The name of the fault domain where the DB node is located. +* `host_ip_id` - The OCID of the host IP address that's associated with the DB node. +* `hostname` - The host name for the DB node. +* `maintenance_type` - The type of maintenance the DB node is undergoing. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated on the DB node. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the DB node. +* `ocid` - The OCID of the DB node. +* `software_storage_size_in_gb` - The size of the block storage volume, in gigabytes (GB), that's allocated for the DB system. This attribute applies only for virtual machine DB systems. +* `status` - The current status of the DB node. +* `status_reason` - Additional information about the status of the DB node. +* `time_maintenance_window_end` - The end date and time of the maintenance window. +* `time_maintenance_window_start` - The start date and time of the maintenance window. +* `total_cpu_core_count` - The total number of CPU cores reserved on the DB node. +* `vnic_2_id` - The OCID of the second VNIC. +* `vnic_id` - The OCID of the VNIC. diff --git a/website/docs/d/odb_db_server.html.markdown b/website/docs/d/odb_db_server.html.markdown new file mode 100644 index 000000000000..8fa61615f447 --- /dev/null +++ b/website/docs/d/odb_db_server.html.markdown @@ -0,0 +1,59 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_server" +page_title: "AWS: aws_odb_db_server" +description: |- + Terraform data source for managing db server linked to exadata infrastructure of Oracle Database@AWS. +--- + +# Data Source: aws_odb_db_server + +Terraform data source for manging db server linked to exadata infrastructure of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_db_server" "example" { + cloud_exadata_infrastructure_id = "exadata_infra_id" + id = "db_server_id" +} +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the cloud vm cluster. +* `id` - (Required) The unique identifier of db node associated with vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `autonomous_virtual_machine_ids` - The list of unique identifiers for the Autonomous VMs associated with this database server. +* `autonomous_vm_cluster_ids` - The OCID of the autonomous VM clusters that are associated with the database server. +* `compute_model` - The compute model of the database server. +* `status` - The status of the database server. +* `status_reason` - Additional information about the current status of the database server. +* `cpu_core_count` - The number of CPU cores enabled on the database server. +* `db_node_storage_size_in_gbs` - The allocated local node storage in GBs on the database server. +* `db_server_patching_details` - The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window. +* `display_name` - The display name of the database server. +* `exadata_infrastructure_id` - The exadata infrastructure ID of the database server. +* `ocid` - The OCID of the database server to retrieve information about. +* `oci_resource_anchor_name` - The name of the OCI resource anchor. +* `max_cpu_count` - The total number of CPU cores available. +* `max_db_node_storage_in_gbs` - The total local node storage available in GBs. +* `max_memory_in_gbs` - The total memory available in GBs. +* `memory_size_in_gbs` - The allocated memory in GBs on the database server. +* `shape` - The shape of the database server. The shape determines the amount of CPU, storage, and memory resources available. +* `created_at` - The date and time when the database server was created. +* `vm_cluster_ids` - The OCID of the VM clusters that are associated with the database server. diff --git a/website/docs/d/odb_db_servers.html.markdown b/website/docs/d/odb_db_servers.html.markdown new file mode 100644 index 000000000000..66067f0ba465 --- /dev/null +++ b/website/docs/d/odb_db_servers.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_servers" +page_title: "AWS: aws_odb_db_servers" +description: |- + Terraform data source for managing db servers linked to exadata infrastructure of Oracle Database@AWS. +--- + +# Data Source: aws_odb_db_servers + +Terraform data source for manging db servers linked to exadata infrastructure of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_db_servers" "example" { + cloud_exadata_infrastructure_id = "exadata_infra_id" +} +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the cloud vm cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `db_servers` - the list of DB servers along with their properties. + +### db_servers + +* `autonomous_virtual_machine_ids` - A list of unique identifiers for the Autonomous VMs. +* `autonomous_vm_cluster_ids` - A list of identifiers for the Autonomous VM clusters. +* `compute_model` - The OCI compute model used when you create or clone an instance: **ECPU** or **OCPU**. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers, while OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `cpu_core_count` - The number of CPU cores enabled on the database server. +* `created_at` - The date and time when the database server was created. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated on the database server. +* `db_server_id` - The unique identifier of the database server. +* `db_server_patching_details` - The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window. +* `display_name` - The user-friendly name of the database server. The name doesn't need to be unique. +* `exadata_infrastructure_id` - The ID of the Exadata infrastructure that hosts the database server. +* `max_cpu_count` - The total number of CPU cores available on the database server. +* `max_db_node_storage_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the database server. +* `max_memory_in_gbs` - The total amount of memory, in gigabytes (GB), that's available on the database server. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated on the database server. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the database server. +* `ocid` - The OCID of the database server. +* `shape` - The hardware system model of the Exadata infrastructure that the database server is hosted on. The shape determines the amount of CPU, storage, and memory resources available. +* `status` - The current status of the database server. +* `status_reason` - Additional information about the status of the database server. +* `vm_cluster_ids` - The IDs of the VM clusters that are associated with the database server. diff --git a/website/docs/d/odb_db_system_shapes.html.markdown b/website/docs/d/odb_db_system_shapes.html.markdown new file mode 100644 index 000000000000..06309456783d --- /dev/null +++ b/website/docs/d/odb_db_system_shapes.html.markdown @@ -0,0 +1,61 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_db_system_shapes" +page_title: "AWS: aws_odb_db_system_shapes" +description: |- + Terraform data source to retrieve available system shapes Oracle Database@AWS. +--- + +# Data Source: aws_odb_db_system_shapes + +Terraform data source to retrieve available system shapes Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_db_system_shapes" "example" {} +``` + +## Argument Reference + +The following arguments are optional: + +* `availability_zone_id` - (Optional) The physical ID of the AZ, for example, use1-az4. This ID persists across accounts. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `db_system_shapes` - The list of shapes and their properties. Information about a hardware system model (shape) that's available for an Exadata infrastructure. The shape determines resources, such as CPU cores, memory, and storage, to allocate to the Exadata infrastructure. + +### db_system_shapes + +* `are_server_types_supported` - Indicates whether the hardware system model supports configurable database and server storage types. +* `available_core_count` - The maximum number of CPU cores that can be enabled for the shape. +* `available_core_count_per_node` - The maximum number of CPU cores per DB node that can be enabled for the shape. +* `available_data_storage_in_tbs` - The maximum amount of data storage, in terabytes (TB), that can be enabled for the shape. +* `available_data_storage_per_server_in_tbs` - The maximum amount of data storage, in terabytes (TB), that's available per storage server for the shape. +* `available_db_node_per_node_in_gbs` - The maximum amount of DB node storage, in gigabytes (GB), that's available per DB node for the shape. +* `available_db_node_storage_in_gbs` - The maximum amount of DB node storage, in gigabytes (GB), that can be enabled for the shape. +* `available_memory_in_gbs` - The maximum amount of memory, in gigabytes (GB), that can be enabled for the shape. +* `available_memory_per_node_in_gbs` - The maximum amount of memory, in gigabytes (GB), that's available per DB node for the shape. +* `compute_model` - The OCI compute model used when creating or cloning an instance: ECPU or OCPU. +* `core_count_increment` - The discrete number by which the CPU core count for the shape can be increased or decreased. +* `max_storage_count` - The maximum number of Exadata storage servers available for the shape. +* `maximum_node_count` - The maximum number of compute servers available for the shape. +* `min_core_count_per_node` - The minimum number of CPU cores that can be enabled per node for the shape. +* `min_data_storage_in_tbs` - The minimum amount of data storage, in terabytes (TB), that must be allocated for the shape. +* `min_db_node_storage_per_node_in_gbs` - The minimum amount of DB node storage, in gigabytes (GB), that must be allocated per DB node for the shape. +* `min_memory_per_node_in_gbs` - The minimum amount of memory, in gigabytes (GB), that must be allocated per DB node for the shape. +* `min_storage_count` - The minimum number of Exadata storage servers available for the shape. +* `minimum_core_count` - The minimum number of CPU cores that can be enabled for the shape. +* `minimum_node_count` - The minimum number of compute servers available for the shape. +* `name` - The name of the shape. +* `runtime_minimum_core_count` - The runtime minimum number of CPU cores that can be enabled for the shape. +* `shape_family` - The family of the shape. +* `shape_type` - The shape type, determined by the CPU hardware. diff --git a/website/docs/d/odb_gi_versions.html.markdown b/website/docs/d/odb_gi_versions.html.markdown new file mode 100644 index 000000000000..188a4c2c7ba4 --- /dev/null +++ b/website/docs/d/odb_gi_versions.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_gi_versions_list" +page_title: "AWS: aws_odb_gi_versions" +description: |- + Terraform data source to retrieve available Grid Infrastructure versions of Oracle Database@AWS. +--- + +# Data Source: aws_odb_gi_versions + +Terraform data source to retrieve available Grid Infrastructure versions of Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_gi_versions" "example" {} + +data "aws_odb_gi_versions" "example" { + shape = "Exadata.X11M" +} + +data "aws_odb_gi_versions" "example" { + shape = "Exadata.X9M" +} +``` + +## Argument Reference + +The following arguments are optional: + +* `shape` - (Optional) The system shape. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `gi_versions` - Information about a specific version of Oracle Grid Infrastructure (GI) software that can be installed on a VM cluster. + +### gi_versions + +* `version` - The GI software version. diff --git a/website/docs/d/odb_network.html.markdown b/website/docs/d/odb_network.html.markdown new file mode 100644 index 000000000000..2b68850ed5a5 --- /dev/null +++ b/website/docs/d/odb_network.html.markdown @@ -0,0 +1,57 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network" +page_title: "AWS: aws_odb_network" +description: |- + Terraform data source to retrieve odb network for Oracle Database@AWS. +--- + +# Data Source: aws_odb_network + +Terraform data source for to retrieve network resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```terraform + +data "aws_odb_network" "example" { + id = "example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) Unique identifier of the odb network resource. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `display_name` - Display name for the network resource. +* `availability_zone_id` - The AZ ID of the AZ where the ODB network is located. +* `availability_zone` - The availability zone where the ODB network is located. +* `backup_subnet_cidr` - The CIDR range of the backup subnet for the ODB network. +* `client_subnet_cidr` - The CIDR notation for the network resource. +* `custom_domain_name` - The name of the custom domain that the network is located. +* `default_dns_prefix` - The default DNS prefix for the network resource. +* `oci_network_anchor_id` - The unique identifier of the OCI network anchor for the ODB network. +* `oci_network_anchor_url` - The URL of the OCI network anchor for the ODB network. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the ODB network. +* `oci_vcn_id` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `oci_vcn_url` - The URL of the OCI VCN for the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the ODB network, expressed as a percentage. +* `peered_cidrs` - The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation. +* `status` - The status of the network resource. +* `status_reason` - Additional information about the current status of the ODB network. +* `created_at` - The date and time when the ODB network was created. +* `managed_services` - The managed services configuration for the ODB network. diff --git a/website/docs/d/odb_network_peering_connection.html.markdown b/website/docs/d/odb_network_peering_connection.html.markdown new file mode 100644 index 000000000000..f22da9b49f32 --- /dev/null +++ b/website/docs/d/odb_network_peering_connection.html.markdown @@ -0,0 +1,48 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connection" +page_title: "AWS: aws_odb_network_peering_connection" +description: |- + Terraform data source for managing oracle database network peering resource in AWS. +--- + +# Data Source: aws_odb_network_peering_connection + +Terraform data source for managing oracle database network peering resource in AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_network_peering_connection" "example" { + id = "example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the ODB network peering connection. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `display_name` - Display name of the ODB network peering connection. +* `status` - Status of the ODB network peering connection. +* `status_reason` - Status of the ODB network peering connection. +* `odb_network_arn` - ARN of the ODB network peering connection. +* `arn` - The Amazon Resource Name (ARN) for the ODB network peering connection. +* `peer_network_arn` - ARN of the peer network peering connection. +* `odb_peering_connection_type` - Type of the ODB peering connection. +* `created_at` - Created time of the ODB network peering connection. +* `percent_progress` - Progress of the ODB network peering connection. +* `tags` - Tags applied to the resource. diff --git a/website/docs/d/odb_network_peering_connections.html.markdown b/website/docs/d/odb_network_peering_connections.html.markdown new file mode 100644 index 000000000000..dff163e01ed1 --- /dev/null +++ b/website/docs/d/odb_network_peering_connections.html.markdown @@ -0,0 +1,41 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connections" +page_title: "AWS: aws_odb_network_peering_connections" +description: |- + Terraform data source for retrieving all database network peering connections in Oracle Database@AWS. +--- + +# Data Source: aws_odb_network_peering_connections + +Terraform data source for retrieving all oracle database network peering resource in Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_network_peering_connections" "example" {} +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `odb_peering_connections` - The list of ODB peering connections. A summary of an ODB peering connection. + +### odb_peering_connections + +* `id` - The unique identifier of the ODB network peering connection. +* `arn` - The Amazon Resource Name (ARN) for the ODB network peering connection. +* `display_name` - Display name of the ODB network peering connection. +* `odb_network_arn` - ARN of the ODB network peering connection. +* `peer_network_arn` - ARN of the peer network peering connection. diff --git a/website/docs/d/odb_networks.html.markdown b/website/docs/d/odb_networks.html.markdown new file mode 100644 index 000000000000..9b24a273e2c0 --- /dev/null +++ b/website/docs/d/odb_networks.html.markdown @@ -0,0 +1,41 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_networks" +page_title: "AWS: aws_odb_networks" +description: |- + Terraform data source to odb networks for Oracle Database@AWS. +--- + +# Data Source: aws_odb_networks + +Terraform data source for to retrieve networks from AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```terraform + +data "aws_odb_networks" "example" {} +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `odb_networks` - List of odb networks returns basic information about odb networks. + +### odb_networks + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `oci_network_anchor_id` - The unique identifier of the OCI network anchor for the ODB network. +* `oci_vcn_url` - The URL of the OCI VCN for the ODB network. +* `oci_vcn_id` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `display_name` - Display name for the network resource. diff --git a/website/docs/d/quicksight_analysis.html.markdown b/website/docs/d/quicksight_analysis.html.markdown index 0ba1c342ed68..19ded3ac070c 100644 --- a/website/docs/d/quicksight_analysis.html.markdown +++ b/website/docs/d/quicksight_analysis.html.markdown @@ -24,9 +24,9 @@ data "aws_quicksight_analysis" "example" { This data source supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `analysis_id` - (Required) Identifier for the analysis. -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference diff --git a/website/docs/d/quicksight_data_set.html.markdown b/website/docs/d/quicksight_data_set.html.markdown index fce943b46428..81f870982808 100644 --- a/website/docs/d/quicksight_data_set.html.markdown +++ b/website/docs/d/quicksight_data_set.html.markdown @@ -24,9 +24,9 @@ data "aws_quicksight_data_set" "example" { This data source supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `data_set_id` - (Required) Identifier for the data set. -* `aws_account_id` - (Optional) AWS account ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference diff --git a/website/docs/d/quicksight_group.html.markdown b/website/docs/d/quicksight_group.html.markdown index 5be1cc4e9bf3..27f064df37ae 100644 --- a/website/docs/d/quicksight_group.html.markdown +++ b/website/docs/d/quicksight_group.html.markdown @@ -30,9 +30,9 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) QuickSight namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference diff --git a/website/docs/d/quicksight_theme.html.markdown b/website/docs/d/quicksight_theme.html.markdown index 732b934e848f..75b4d736b389 100644 --- a/website/docs/d/quicksight_theme.html.markdown +++ b/website/docs/d/quicksight_theme.html.markdown @@ -28,8 +28,8 @@ The following arguments are required: The following arguments are optional: +* `aws_account_id` - AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - AWS account ID. ## Attribute Reference diff --git a/website/docs/d/quicksight_user.html.markdown b/website/docs/d/quicksight_user.html.markdown index 526f8d56bcdb..aec9e82c9579 100644 --- a/website/docs/d/quicksight_user.html.markdown +++ b/website/docs/d/quicksight_user.html.markdown @@ -30,9 +30,9 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) QuickSight namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -40,6 +40,7 @@ This data source exports the following attributes in addition to the arguments a * `active` - The active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an Active Directory user, that user is inactive until they sign in and provide a password. * `arn` - The Amazon Resource Name (ARN) for the user. +* `custom_permissions_name` - The custom permissions profile associated with this user. * `email` - The user's email address. * `identity_type` - The type of identity authentication used by the user. * `principal_id` - The principal ID of the user. diff --git a/website/docs/d/ram_resource_share.html.markdown b/website/docs/d/ram_resource_share.html.markdown index 8a310e31297c..b00590488668 100644 --- a/website/docs/d/ram_resource_share.html.markdown +++ b/website/docs/d/ram_resource_share.html.markdown @@ -39,7 +39,7 @@ This data source supports the following arguments: * `name` - (Optional) Name of the resource share to retrieve. * `resource_owner` (Required) Owner of the resource share. Valid values are `SELF` or `OTHER-ACCOUNTS`. * `resource_share_status` (Optional) Specifies that you want to retrieve details of only those resource shares that have this status. Valid values are `PENDING`, `ACTIVE`, `FAILED`, `DELETING`, and `DELETED`. -* `filter` - (Optional) Filter used to scope the list e.g., by tags. See [related docs] (https://docs.aws.amazon.com/ram/latest/APIReference/API_TagFilter.html). +* `filter` - (Optional) Filter used to scope the list of owned shares e.g., by tags. See [related docs] (https://docs.aws.amazon.com/ram/latest/APIReference/API_TagFilter.html). * `name` - (Required) Name of the tag key to filter on. * `values` - (Required) Value of the tag key. diff --git a/website/docs/d/rds_global_cluster.html.markdown b/website/docs/d/rds_global_cluster.html.markdown new file mode 100644 index 000000000000..2a89a4e14deb --- /dev/null +++ b/website/docs/d/rds_global_cluster.html.markdown @@ -0,0 +1,49 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_rds_global_cluster" +description: |- + Terraform data source for managing an AWS RDS (Relational Database) Global Cluster. +--- + +# Data Source: aws_rds_global_cluster + +Terraform data source for managing an AWS RDS (Relational Database) Global Cluster. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_rds_global_cluster" "example" { + identifier = aws_rds_global_cluster.test.global_cluster_identifier +} +``` + +## Argument Reference + +The following arguments are required: + +* `identifier` - (Required) The global cluster identifier of the RDS global cluster. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - RDS Global Cluster Amazon Resource Name (ARN) +* `database_name` - Name of the automatically created database on cluster creation. +* `deletion_protection` - If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. +* `endpoint` - The endpoint for the Global Cluster. +* `engine` - Name of the database engine. +* `engine_lifecycle_support` - The current lifecycle support status of the database engine for this Global Cluster. +* `engine_version` - Version of the database engine for this Global Cluster. +* `storage_encrypted` - Whether the DB cluster is encrypted. +* `members` - Set of objects containing Global Cluster members. + * `db_cluster_arn` - Amazon Resource Name (ARN) of member DB Cluster + * `is_writer` - Whether the member is the primary DB Cluster +* `resource_id` - AWS Region-unique, immutable identifier for the global database cluster. +* `tags` - A map of tags to assigned to the Global Cluster. diff --git a/website/docs/d/rds_reserved_instance_offering.html.markdown b/website/docs/d/rds_reserved_instance_offering.html.markdown index aadde1b6727d..983d2ed94270 100644 --- a/website/docs/d/rds_reserved_instance_offering.html.markdown +++ b/website/docs/d/rds_reserved_instance_offering.html.markdown @@ -31,7 +31,7 @@ This data source supports the following arguments: * `duration` - (Required) Duration of the reservation in years or seconds. Valid values are `1`, `3`, `31536000`, `94608000` * `multi_az` - (Required) Whether the reservation applies to Multi-AZ deployments. * `offering_type` - (Required) Offering type of this reserved DB instance. Valid values are `No Upfront`, `Partial Upfront`, `All Upfront`. -* `product_description` - (Required) Description of the reserved DB instance. +* `product_description` - (Required) Description of the reserved DB instance. Example values are `postgresql`, `aurora-postgresql`, `mysql`, `aurora-mysql`, `mariadb`. ## Attribute Reference diff --git a/website/docs/d/s3_access_point.html.markdown b/website/docs/d/s3_access_point.html.markdown new file mode 100644 index 000000000000..a0c4c98e3925 --- /dev/null +++ b/website/docs/d/s3_access_point.html.markdown @@ -0,0 +1,48 @@ +--- +subcategory: "S3 Control" +layout: "aws" +page_title: "AWS: aws_s3_access_point" +description: |- + Provides details about a specific S3 access point +--- + +# Data Source: aws_s3_access_point + +Provides details about a specific S3 access point. + +## Example Usage + +```terraform +data "aws_s3_access_point" "example" { + name = "example-access-point" +} +``` + +## Argument Reference + +This data source supports the following arguments: + +* `account_id` - (Optional) AWS account ID for the account that owns the specified access point. +* `name` - (Required) Name of the access point. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `alias` - Access point alias. +* `arn` - Access point ARN. +* `bucket` - Name of the bucket associated with the access point. +* `bucket_account_id` - AWS account ID associated with the S3 bucket associated with the access point. +* `data_source_id` - Unique identifier for the data source of the access point. +* `data_source_type` - Type of the data source that the access point is attached to. +* `endpoints` - VPC endpoint for the access point. +* `network_origin` - Indicates whether the access point allows access from the public Internet. +* `public_access_block_configuration` - `PublicAccessBlock` configuration for the access point. + * `block_public_acls` - Whether Amazon S3 blocks public ACLs for buckets in this account. + * `block_public_policy` - Whether Amazon S3 blocks public bucket policies for buckets in this account. + * `ignore_public_acls` - Whether Amazon S3 ignores public ACLs for buckets in this account. + * `restrict_public_buckets` - Whether Amazon S3 restricts public bucket policies for buckets in this account. +* `tags` - Tags assigned to the access point. +* `vpc_configuration` - VPC configuration for the access point. + * `vpc_id` - Access point will only allow connections from this VPC. diff --git a/website/docs/d/secretsmanager_secret_rotation.html.markdown b/website/docs/d/secretsmanager_secret_rotation.html.markdown index 5525ed0b175f..86da4794170a 100644 --- a/website/docs/d/secretsmanager_secret_rotation.html.markdown +++ b/website/docs/d/secretsmanager_secret_rotation.html.markdown @@ -31,6 +31,12 @@ This data source supports the following arguments: This data source exports the following attributes in addition to the arguments above: -* `rotation_enabled` - ARN of the secret. -* `rotation_lambda_arn` - Decrypted part of the protected secret information that was originally provided as a string. -* `rotation_rules` - Decrypted part of the protected secret information that was originally provided as a binary. Base64 encoded. +* `rotation_enabled` - Specifies whether automatic rotation is enabled for this secret. +* `rotation_lambda_arn` - Amazon Resource Name (ARN) of the lambda function used for rotation. +* `rotation_rules` - Configuration block for rotation rules. See [`rotation_rules`](#rotation_rules) below. + +### rotation_rules + +* `automatically_after_days` - Number of days between automatic scheduled rotations of the secret. +* `duration` - Length of the rotation window in hours. +* `schedule_expression` - A `cron()` or `rate()` expression that defines the schedule for rotating the secret. diff --git a/website/docs/d/securityhub_standards_control_associations.html.markdown b/website/docs/d/securityhub_standards_control_associations.html.markdown index 7439e1232168..d7b6baf90562 100644 --- a/website/docs/d/securityhub_standards_control_associations.html.markdown +++ b/website/docs/d/securityhub_standards_control_associations.html.markdown @@ -6,7 +6,7 @@ description: |- Terraform data source for managing an AWS Security Hub Standards Control Associations. --- -# Resource: aws_securityhub_standards_control_associations +# Data Source: aws_securityhub_standards_control_associations Terraform data source for managing an AWS Security Hub Standards Control Associations. diff --git a/website/docs/d/sesv2_email_identity.html.markdown b/website/docs/d/sesv2_email_identity.html.markdown index c751b2992a88..61a995bbe7db 100644 --- a/website/docs/d/sesv2_email_identity.html.markdown +++ b/website/docs/d/sesv2_email_identity.html.markdown @@ -41,4 +41,5 @@ This data source exports the following attributes in addition to the arguments a * `tokens` - If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. * `identity_type` - The email identity type. Valid values: `EMAIL_ADDRESS`, `DOMAIN`. * `tags` - Key-value mapping of resource tags. +* `verification_status` - The verification status of the identity. The status can be one of the following: `PENDING`, `SUCCESS`, `FAILED`, `TEMPORARY_FAILURE`, and `NOT_STARTED`. * `verified_for_sending_status` - Specifies whether or not the identity is verified. diff --git a/website/docs/d/signer_signing_profile.html.markdown b/website/docs/d/signer_signing_profile.html.markdown index bc6ae22f6259..897dae9e71bf 100644 --- a/website/docs/d/signer_signing_profile.html.markdown +++ b/website/docs/d/signer_signing_profile.html.markdown @@ -34,6 +34,9 @@ This data source exports the following attributes in addition to the arguments a * `platform_id` - ID of the platform that is used by the target signing profile. * `revocation_record` - Revocation information for a signing profile. * `signature_validity_period` - The validity period for a signing job. +* `signing_material` - AWS Certificate Manager certificate that will be used to sign code with the new signing profile. + * `certificate_arn` - ARN of the certificate used for signing. +* `signing_parameters` - Map of key-value pairs for signing. * `status` - Status of the target signing profile. * `tags` - List of tags associated with the signing profile. * `version` - Current version of the signing profile. diff --git a/website/docs/d/ssm_parameter.html.markdown b/website/docs/d/ssm_parameter.html.markdown index 70f1f187d07c..eed18304c737 100644 --- a/website/docs/d/ssm_parameter.html.markdown +++ b/website/docs/d/ssm_parameter.html.markdown @@ -12,12 +12,22 @@ Provides an SSM Parameter data source. ## Example Usage +### Default + ```terraform data "aws_ssm_parameter" "foo" { name = "foo" } ``` +### With version + +```terraform +data "aws_ssm_parameter" "foo" { + name = "foo:3" +} +``` + ~> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). @@ -28,7 +38,7 @@ data "aws_ssm_parameter" "foo" { This data source supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `name` - (Required) Name of the parameter. +* `name` - (Required) Name of the parameter. To query by parameter version use `name:version` (e.g., `foo:3`). * `with_decryption` - (Optional) Whether to return decrypted `SecureString` value. Defaults to `true`. ## Attribute Reference diff --git a/website/docs/d/ssm_patch_baseline.html.markdown b/website/docs/d/ssm_patch_baseline.html.markdown index 01f32b663e6b..77e528816d50 100644 --- a/website/docs/d/ssm_patch_baseline.html.markdown +++ b/website/docs/d/ssm_patch_baseline.html.markdown @@ -61,6 +61,7 @@ This data source exports the following attributes in addition to the arguments a * `patch_filter` - Patch filter group that defines the criteria for the rule. * `key` - Key for the filter. * `values` - Value for the filter. +* `available_security_updates_compliance_status` - Indicates the compliance status of managed nodes for which security-related patches are available but were not approved. Supported for Windows Server managed nodes only. * `global_filter` - Set of global filters used to exclude patches from the baseline. * `key` - Key for the filter. * `values` - Value for the filter. diff --git a/website/docs/d/verifiedpermissions_policy_store.html.markdown b/website/docs/d/verifiedpermissions_policy_store.html.markdown index 969fd41898c4..54e916770bb1 100644 --- a/website/docs/d/verifiedpermissions_policy_store.html.markdown +++ b/website/docs/d/verifiedpermissions_policy_store.html.markdown @@ -33,6 +33,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - The ARN of the Policy Store. * `created_date` - The date the Policy Store was created. +* `deletion_protection` - Whether the policy store can be deleted. * `last_updated_date` - The date the Policy Store was last updated. * `tags` - Map of key-value pairs associated with the policy store. * `validation_settings` - Validation settings for the policy store. diff --git a/website/docs/d/vpc_ipam.html.markdown b/website/docs/d/vpc_ipam.html.markdown index 028d4f3e91c7..157e1bbf37a5 100644 --- a/website/docs/d/vpc_ipam.html.markdown +++ b/website/docs/d/vpc_ipam.html.markdown @@ -38,6 +38,7 @@ This data source exports the following attributes in addition to the arguments a * `enable_private_gua` - If private GUA is enabled. * `id` - ID of the IPAM resource. * `ipam_region` - Region that the IPAM exists in. +* `metered_account` - AWS account that is charged for active IP addresses managed in IPAM. * `operating_regions` - Regions that the IPAM is configured to operate in. * `owner_id` - ID of the account that owns this IPAM. * `private_default_scope_id` - ID of the default private scope. diff --git a/website/docs/d/vpn_connection.html.markdown b/website/docs/d/vpn_connection.html.markdown new file mode 100644 index 000000000000..62c8f77c3d68 --- /dev/null +++ b/website/docs/d/vpn_connection.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "VPN (Site-to-Site)" +layout: "aws" +page_title: "AWS: aws_vpn_connection" +description: |- + Fetches details of a Site-to-Site VPN connection. A Site-to-Site VPN connection is an Internet Protocol security (IPsec) VPN connection between a VPC and an on-premises network. +--- + +# Data Source: aws_vpn_connection + +Fetches details of a Site-to-Site VPN connection. A Site-to-Site VPN connection is an Internet Protocol security (IPsec) VPN connection between a VPC and an on-premises network. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_vpn_connection" "example" { + filter { + name = "customer-gateway-id" + values = ["cgw-1234567890"] + } +} + +output "vpn_connection_id" { + value = data.aws_vpn_connection.example.vpn_connection_id +} +``` + +### Find by VPN Connection ID + +```terraform +data "aws_vpn_connection" "example" { + vpn_connection_id = "vpn-abcd1234567890" +} + +output "gateway_association_state" { + value = data.aws_vpn_connection.example.gateway_association_state +} +``` + +## Argument Reference + +This data source supports the following arguments: + +* `vpn_connection_id` - (Optional) Identifier of the EC2 VPN Connection. +* `filter` - (Optional) Configuration block(s) for filtering. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### Filter Configuration Block + +The `filter` configuration block supports the following arguments: + +* `name` - (Required) Name of the filter field. Valid values can be found in the [EC2 `DescribeVPNConnections` API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html). +* `values` - (Required) Set of values that are accepted for the given filter field. Results will be selected if any given value matches. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `category` - Category of the VPN connection. A value of VPN indicates an AWS VPN connection. A value of VPN-Classic indicates an AWS Classic VPN connection. +* `core_network_arn` - ARN of the core network. +* `core_network_attachment_arn` - ARN of the core network attachment. +* `customer_gateway_configuration` - Configuration information for the VPN connection's customer gateway (in the native XML format). +* `customer_gateway_id` - ID of the customer gateway at your end of the VPN connection. +* `gateway_association_state` - Current state of the gateway association. +* `pre_shared_key_arn` - (ARN) of the Secrets Manager secret storing the pre-shared key(s) for the VPN connection. +* `routes` - List of static routes associated with the VPN connection. +* `state` - Current state of the VPN connection. +* `tags` - Tags associated to the VPN Connection. +* `transit_gateway_id` - ID of a transit gateway associated with the VPN connection. +* `type` - Type of VPN connection. Currently the only supported type is ipsec.1. +* `vgw_telemetries` - List of objects containing information about the VPN tunnel. +* `vpn_gateway_id` - ID of a virtual private gateway associated with the VPN connection. diff --git a/website/docs/d/wafv2_web_acl.html.markdown b/website/docs/d/wafv2_web_acl.html.markdown index e7818cad9a91..563337705f42 100644 --- a/website/docs/d/wafv2_web_acl.html.markdown +++ b/website/docs/d/wafv2_web_acl.html.markdown @@ -12,6 +12,8 @@ Retrieves the summary of a WAFv2 Web ACL. ## Example Usage +### Lookup by name + ```terraform data "aws_wafv2_web_acl" "example" { name = "some-web-acl" @@ -19,12 +21,27 @@ data "aws_wafv2_web_acl" "example" { } ``` +### Lookup by associated resource + +```terraform +data "aws_wafv2_web_acl" "alb_example" { + resource_arn = "arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/app/my-alb/xxxxx" + scope = "REGIONAL" +} + +data "aws_wafv2_web_acl" "cloudfront_example" { + resource_arn = "arn:aws:cloudfront::123456789012:distribution/XXX" + scope = "CLOUDFRONT" +} +``` + ## Argument Reference This data source supports the following arguments: +* `name` - (Optional) Name of the WAFv2 Web ACL. Exactly one of `name` or `resource_arn` must be specified. * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `name` - (Required) Name of the WAFv2 Web ACL. +* `resource_arn` - (Optional) ARN of the AWS resource associated with the Web ACL. This can be an ARN of an Application Load Balancer, Amazon API Gateway REST API, AWS AppSync GraphQL API, Amazon Cognito user pool, AWS App Runner service, AWS Verified Access instance, or AWS Amplify application. Exactly one of `name` or `resource_arn` must be specified. * `scope` - (Required) Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider. ## Attribute Reference diff --git a/website/docs/d/workspaces_workspace.html.markdown b/website/docs/d/workspaces_workspace.html.markdown index 0677b374d607..3d66b3105fe3 100644 --- a/website/docs/d/workspaces_workspace.html.markdown +++ b/website/docs/d/workspaces_workspace.html.markdown @@ -6,7 +6,7 @@ description: |- Get information about a WorkSpace in AWS Workspaces Service. --- -# Resource: aws_workspaces_workspace +# Data Source: aws_workspaces_workspace Use this data source to get information about a workspace in [AWS Workspaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces.html) Service. diff --git a/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown b/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown index 64a13f45f055..f7c321734c32 100644 --- a/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown +++ b/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown @@ -6,7 +6,6 @@ description: |- Terraform ephemeral resource for managing an AWS Cognito Identity Open ID Token for Developer Identity. --- - # Ephemeral: aws_cognito_identity_openid_token_for_developer_identity Terraform ephemeral resource for managing an AWS Cognito Identity Open ID Token for Developer Identity. diff --git a/website/docs/guides/custom-service-endpoints.html.markdown b/website/docs/guides/custom-service-endpoints.html.markdown index 1bc7580337e4..9d363d48e057 100644 --- a/website/docs/guides/custom-service-endpoints.html.markdown +++ b/website/docs/guides/custom-service-endpoints.html.markdown @@ -102,6 +102,7 @@ provider "aws" { |App Runner|`apprunner`|`AWS_ENDPOINT_URL_APPRUNNER`|`apprunner`| |AppStream 2.0|`appstream`|`AWS_ENDPOINT_URL_APPSTREAM`|`appstream`| |AppSync|`appsync`|`AWS_ENDPOINT_URL_APPSYNC`|`appsync`| +|Application Resilience Controller Region Switch|`arcregionswitch`|`AWS_ENDPOINT_URL_ARC_REGION_SWITCH`|`arc_region_switch`| |Athena|`athena`|`AWS_ENDPOINT_URL_ATHENA`|`athena`| |Audit Manager|`auditmanager`|`AWS_ENDPOINT_URL_AUDITMANAGER`|`auditmanager`| |Auto Scaling|`autoscaling`|`AWS_ENDPOINT_URL_AUTO_SCALING`|`auto_scaling`| @@ -111,6 +112,7 @@ provider "aws" { |BCM Data Exports|`bcmdataexports`|`AWS_ENDPOINT_URL_BCM_DATA_EXPORTS`|`bcm_data_exports`| |Bedrock|`bedrock`|`AWS_ENDPOINT_URL_BEDROCK`|`bedrock`| |Bedrock Agents|`bedrockagent`|`AWS_ENDPOINT_URL_BEDROCK_AGENT`|`bedrock_agent`| +|Bedrock AgentCore|`bedrockagentcore`|`AWS_ENDPOINT_URL_BEDROCK_AGENTCORE_CONTROL`|`bedrock_agentcore_control`| |Billing|`billing`|`AWS_ENDPOINT_URL_BILLING`|`billing`| |Web Services Budgets|`budgets`|`AWS_ENDPOINT_URL_BUDGETS`|`budgets`| |CE (Cost Explorer)|`ce`(or `costexplorer`)|`AWS_ENDPOINT_URL_COST_EXPLORER`|`cost_explorer`| @@ -250,6 +252,7 @@ provider "aws" { |User Notifications|`notifications`|`AWS_ENDPOINT_URL_NOTIFICATIONS`|`notifications`| |User Notifications Contacts|`notificationscontacts`|`AWS_ENDPOINT_URL_NOTIFICATIONSCONTACTS`|`notificationscontacts`| |CloudWatch Observability Access Manager|`oam`(or `cloudwatchobservabilityaccessmanager`)|`AWS_ENDPOINT_URL_OAM`|`oam`| +|Oracle Database@AWS|`odb`|`AWS_ENDPOINT_URL_ODB`|`odb`| |OpenSearch|`opensearch`(or `opensearchservice`)|`AWS_ENDPOINT_URL_OPENSEARCH`|`opensearch`| |OpenSearch Serverless|`opensearchserverless`|`AWS_ENDPOINT_URL_OPENSEARCHSERVERLESS`|`opensearchserverless`| |Organizations|`organizations`|`AWS_ENDPOINT_URL_ORGANIZATIONS`|`organizations`| @@ -289,6 +292,7 @@ provider "aws" { |S3 Control|`s3control`|`AWS_ENDPOINT_URL_S3_CONTROL`|`s3_control`| |S3 on Outposts|`s3outposts`|`AWS_ENDPOINT_URL_S3OUTPOSTS`|`s3outposts`| |S3 Tables|`s3tables`|`AWS_ENDPOINT_URL_S3TABLES`|`s3tables`| +|S3 Vectors|`s3vectors`|`AWS_ENDPOINT_URL_S3VECTORS`|`s3vectors`| |SageMaker AI|`sagemaker`|`AWS_ENDPOINT_URL_SAGEMAKER`|`sagemaker`| |EventBridge Scheduler|`scheduler`|`AWS_ENDPOINT_URL_SCHEDULER`|`scheduler`| |EventBridge Schemas|`schemas`|`AWS_ENDPOINT_URL_SCHEMAS`|`schemas`| @@ -330,6 +334,7 @@ provider "aws" { |WAF Classic Regional|`wafregional`|`AWS_ENDPOINT_URL_WAF_REGIONAL`|`waf_regional`| |WAF|`wafv2`|`AWS_ENDPOINT_URL_WAFV2`|`wafv2`| |Well-Architected Tool|`wellarchitected`|`AWS_ENDPOINT_URL_WELLARCHITECTED`|`wellarchitected`| +|WorkMail|`workmail`|`AWS_ENDPOINT_URL_WORKMAIL`|`workmail`| |WorkSpaces|`workspaces`|`AWS_ENDPOINT_URL_WORKSPACES`|`workspaces`| |WorkSpaces Web|`workspacesweb`|`AWS_ENDPOINT_URL_WORKSPACES_WEB`|`workspaces_web`| |X-Ray|`xray`|`AWS_ENDPOINT_URL_XRAY`|`xray`| diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 16961f1e80d8..cb3ec0f24c46 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -7,18 +7,13 @@ description: |- # AWS Provider -Use the Amazon Web Services (AWS) provider to interact with the -many resources supported by AWS. You must configure the provider -with the proper credentials before you can use it. +The Amazon Web Services (AWS) provider is Terraform’s most widely-used provider and the industry-standard way to manage AWS infrastructure as code. It is an indispensable part of how leading technology companies, global banks, government agencies, and some of the largest enterprises in the world build and operate in the cloud. Every day, it provisions and orchestrates billions of dollars of AWS infrastructure across thousands of organizations. -Use the navigation to the left to read about the available resources. There are currently 1506 resources and 607 data sources available in the provider. +With 1,548 resources and 627 data sources, the AWS provider spans the full breadth of AWS services—from foundational capabilities like compute, storage, networking, and identity management to advanced services for AI, analytics, and event-driven architectures, including Lambda, RDS, SageMaker, and Bedrock. Whether automating a single S3 bucket or orchestrating a multi-region, enterprise-scale environment, the provider delivers consistent, reliable workflows that scale with your needs. -To learn the basics of Terraform using this provider, follow the -hands-on [get started tutorials](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, -including Lambda, RDS, and IAM by following the [AWS services -tutorials](https://developer.hashicorp.com/terraform/tutorials/aws?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). +Configure the provider with your AWS credentials, and you can immediately begin creating and managing infrastructure in a safe, repeatable way. Use the navigation on the left to explore the available resources, or start with our [Get Started tutorials](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) to learn the fundamentals. For deeper guidance on specific AWS services, visit the [AWS services tutorials](https://developer.hashicorp.com/terraform/tutorials/aws?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). -Some AWS services do not support IPv6. As a result, the provider may not be able to interact with AWS APIs using IPv6 addresses. +Note: Some AWS services do not yet support IPv6. In those cases, the provider may not be able to connect to AWS APIs over IPv6 addresses. ## Example Usage diff --git a/website/docs/list-resources/batch_job_queue.html.markdown b/website/docs/list-resources/batch_job_queue.html.markdown new file mode 100644 index 000000000000..30c4f93573b6 --- /dev/null +++ b/website/docs/list-resources/batch_job_queue.html.markdown @@ -0,0 +1,28 @@ +--- +subcategory: "Batch" +layout: "aws" +page_title: "AWS: aws_batch_job_queue" +description: |- + Lists Batch Job Queue resources. +--- + +# List Resource: aws_batch_job_queue + +~> **Note:** The `aws_batch_job_queue` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists Batch Job Queue resources. + +## Example Usage + +```terraform +list "aws_batch_job_queue" "example" { + provider = aws +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). diff --git a/website/docs/list-resources/cloudwatch_log_group.html.markdown b/website/docs/list-resources/cloudwatch_log_group.html.markdown new file mode 100644 index 000000000000..a51300c5edd9 --- /dev/null +++ b/website/docs/list-resources/cloudwatch_log_group.html.markdown @@ -0,0 +1,28 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +description: |- + Lists CloudWatch Logs Log Group resources. +--- + +# List Resource: aws_cloudwatch_log_group + +~> **Note:** The `aws_cloudwatch_log_group` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists CloudWatch Logs Log Group resources. + +## Example Usage + +```terraform +list "aws_cloudwatch_log_group" "example" { + provider = aws +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). diff --git a/website/docs/list-resources/iam_role.html.markdown b/website/docs/list-resources/iam_role.html.markdown new file mode 100644 index 000000000000..c1c6205556ad --- /dev/null +++ b/website/docs/list-resources/iam_role.html.markdown @@ -0,0 +1,27 @@ +--- +subcategory: "IAM (Identity & Access Management)" +layout: "aws" +page_title: "AWS: aws_iam_role" +description: |- + Lists IAM Role resources. +--- + +# List Resource: aws_iam_role + +~> **Note:** The `aws_iam_role` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists IAM Role resources. + +Excludes Service-Linked Roles (see "AWS service-linked role" in [IAM Roles Terms and Concepts documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts)). + +## Example Usage + +```terraform +list "aws_iam_role" "example" { + provider = aws +} +``` + +## Argument Reference + +This list resource does not support any arguments. diff --git a/website/docs/list-resources/instance.html.markdown b/website/docs/list-resources/instance.html.markdown new file mode 100644 index 000000000000..689e97c2c91f --- /dev/null +++ b/website/docs/list-resources/instance.html.markdown @@ -0,0 +1,65 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_instance" +description: |- + Lists EC2 Instance resources. +--- + +# List Resource: aws_instance + +~> **Note:** The `aws_instance` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists EC2 Instance resources. + +By default, EC2 Instances managed by an Auto Scaling Group and EC2 Instances in either the `terminated` or `shutting-down` state are excluded. + +## Example Usage + +### Basic Usage + +```terraform +list "aws_instance" "example" { + provider = aws +} +``` + +### Filter Usage + +This example will return instances in the `stopped` state. + +```terraform +list "aws_instance" "example" { + provider = aws + + config { + filter { + name = "instance-state-name" + values = ["stopped"] + } + } +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. +* `include_auto_scaled` - (Optional) Whether to include EC2 instances that are managed by an Auto Scaling Group. + Default value is `false`. +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + +[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html diff --git a/website/docs/list-resources/subnet.html.markdown b/website/docs/list-resources/subnet.html.markdown new file mode 100644 index 000000000000..1a81d070056b --- /dev/null +++ b/website/docs/list-resources/subnet.html.markdown @@ -0,0 +1,63 @@ +--- +subcategory: "VPC (Virtual Private Cloud)" +layout: "aws" +page_title: "AWS: aws_subnet" +description: |- + Lists VPC Subnet resources. +--- + +# List Resource: aws_subnet + +Lists VPC Subnet resources. + +Note: Default VPCs are not included. + +## Example Usage + +### Basic Usage + +```terraform +list "aws_subnet" "example" { + provider = aws +} +``` + +### Filter Usage + +This example will return VPC Subnets with the tag `Project` with the value `example`. + +```terraform +list "aws_subnet" "example" { + provider = aws + + config { + filter { + name = "tag:Project" + values = ["example"] + } + } +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-subnets in the AWS CLI reference][describe-subnets]. + See [`filter` Block](#filter-block) below. +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `subnet_ids` - (Optional) List of VPC Subnets IDs to query. + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-subnets in the AWS CLI reference][describe-subnets]. + `default-for-az` is not supported. +* `values` - (Required) One or more values to match. + +[describe-subnets]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-subnets.html diff --git a/website/docs/list-resources/vpc.html.markdown b/website/docs/list-resources/vpc.html.markdown new file mode 100644 index 000000000000..219f189878b0 --- /dev/null +++ b/website/docs/list-resources/vpc.html.markdown @@ -0,0 +1,63 @@ +--- +subcategory: "VPC (Virtual Private Cloud)" +layout: "aws" +page_title: "AWS: aws_vpc" +description: |- + Lists VPC resources. +--- + +# List Resource: aws_vpc + +Lists VPC resources. + +Note: The default VPC is not included. + +## Example Usage + +### Basic Usage + +```terraform +list "aws_vpc" "example" { + provider = aws +} +``` + +### Filter Usage + +This example will return VPCs with the tag `Project` with the value `example`. + +```terraform +list "aws_vpc" "example" { + provider = aws + + config { + filter { + name = "tag:Project" + values = ["example"] + } + } +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-vpcs in the AWS CLI reference][describe-vpcs]. + See [`filter` Block](#filter-block) below. +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `vpc_ids` - (Optional) List of VPC IDs to query. + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-vpcs in the AWS CLI reference][describe-vpcs]. + `is-default` is not supported. +* `values` - (Required) One or more values to match. + +[describe-vpcs]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpcs.html diff --git a/website/docs/r/acm_certificate.html.markdown b/website/docs/r/acm_certificate.html.markdown index c8676bbf09c8..401ade570540 100644 --- a/website/docs/r/acm_certificate.html.markdown +++ b/website/docs/r/acm_certificate.html.markdown @@ -168,6 +168,7 @@ This resource supports the following arguments: Supported nested arguments for the `options` configuration block: * `certificate_transparency_logging_preference` - (Optional) Whether certificate details should be added to a certificate transparency log. Valid values are `ENABLED` or `DISABLED`. See https://docs.aws.amazon.com/acm/latest/userguide/acm-concepts.html#concept-transparency for more details. +* `export` - (Optional) Whether the certificate can be exported. Valid values are `ENABLED` or `DISABLED` (default). **Note** Issuing an exportable certificate is subject to additional charges. See [AWS Certificate Manager pricing](https://aws.amazon.com/certificate-manager/pricing/) for more details. ## validation_option Configuration Block @@ -212,11 +213,32 @@ Renewal summary objects export the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acm_certificate.example + identity = { + "arn" = "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a" + } +} + +resource "aws_acm_certificate" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the certificate. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import certificates using their ARN. For example: ```terraform import { - to = aws_acm_certificate.cert + to = aws_acm_certificate.example id = "arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a" } ``` @@ -224,5 +246,5 @@ import { Using `terraform import`, import certificates using their ARN. For example: ```console -% terraform import aws_acm_certificate.cert arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a +% terraform import aws_acm_certificate.example arn:aws:acm:eu-central-1:123456789012:certificate/7e7a28d2-163f-4b8f-b9cd-822f96c08d6a ``` diff --git a/website/docs/r/acmpca_certificate.html.markdown b/website/docs/r/acmpca_certificate.html.markdown index d6ebab9dd285..7f1ef51e57ed 100644 --- a/website/docs/r/acmpca_certificate.html.markdown +++ b/website/docs/r/acmpca_certificate.html.markdown @@ -84,6 +84,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_certificate.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012/certificate/286535153982981100925020015808220737245" + } +} + +resource "aws_acmpca_certificate" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ACM PCA Certificates using their ARN. For example: ```terraform diff --git a/website/docs/r/acmpca_certificate_authority.html.markdown b/website/docs/r/acmpca_certificate_authority.html.markdown index c3bb44f2ef5f..0d161ce725ac 100644 --- a/website/docs/r/acmpca_certificate_authority.html.markdown +++ b/website/docs/r/acmpca_certificate_authority.html.markdown @@ -184,6 +184,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_certificate_authority.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_acmpca_certificate_authority" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate authority. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_acmpca_certificate_authority` using the certificate authority ARN. For example: ```terraform diff --git a/website/docs/r/acmpca_policy.html.markdown b/website/docs/r/acmpca_policy.html.markdown index 577dd23b630d..385421bc67f0 100644 --- a/website/docs/r/acmpca_policy.html.markdown +++ b/website/docs/r/acmpca_policy.html.markdown @@ -76,6 +76,27 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_acmpca_policy.example + identity = { + "arn" = "arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_acmpca_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ACM PCA certificate authority. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_acmpca_policy` using the `resource_arn` value. For example: ```terraform diff --git a/website/docs/r/amplify_app.html.markdown b/website/docs/r/amplify_app.html.markdown index 01da90432542..d4ffd08b0b13 100644 --- a/website/docs/r/amplify_app.html.markdown +++ b/website/docs/r/amplify_app.html.markdown @@ -194,7 +194,7 @@ This resource supports the following arguments: * `enable_branch_auto_deletion` - (Optional) Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository. * `environment_variables` - (Optional) Environment variables map for an Amplify app. * `iam_service_role_arn` - (Optional) AWS Identity and Access Management (IAM) service role for an Amplify app. -* `job_config` - (Optional) Used to configure the [Amplify Application build settings](https://docs.aws.amazon.com/amplify/latest/userguide/build-settings.html). See [`job_config` Block](#job_config-block) for details. +* `job_config` - (Optional) Used to configure the [Amplify Application build instance compute type](https://docs.aws.amazon.com/amplify/latest/APIReference/API_JobConfig.html#amplify-Type-JobConfig-buildComputeType). See [`job_config` Block](#job_config-block) for details. * `oauth_token` - (Optional) OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key. The OAuth token is not stored. * `platform` - (Optional) Platform or framework for an Amplify app. Valid values: `WEB`, `WEB_COMPUTE`. Default value: `WEB`. * `repository` - (Optional) Repository for an Amplify app. diff --git a/website/docs/r/api_gateway_domain_name_access_association.html.markdown b/website/docs/r/api_gateway_domain_name_access_association.html.markdown index 143f448e98aa..ce6f20691ce6 100644 --- a/website/docs/r/api_gateway_domain_name_access_association.html.markdown +++ b/website/docs/r/api_gateway_domain_name_access_association.html.markdown @@ -40,6 +40,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_api_gateway_domain_name_access_association.example + identity = { + "arn" = "arn:aws:apigateway:us-east-1::/domainnames/example.com/accessassociation" + } +} + +resource "aws_api_gateway_domain_name_access_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the API Gateway domain name access association. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import API Gateway domain name acces associations using their `arn`. For example: ```terraform diff --git a/website/docs/r/api_gateway_gateway_response.html.markdown b/website/docs/r/api_gateway_gateway_response.html.markdown index 94bdcedde45f..a6cf7d03c7be 100644 --- a/website/docs/r/api_gateway_gateway_response.html.markdown +++ b/website/docs/r/api_gateway_gateway_response.html.markdown @@ -36,9 +36,9 @@ resource "aws_api_gateway_gateway_response" "test" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `region` - (Optional) Region where this resource will be managed. See the [AWS Documentation](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) for supported values. Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `rest_api_id` - (Required) String identifier of the associated REST API. -* `response_type` - (Required) Response type of the associated GatewayResponse. +* `response_type` - (Required) Response type of the associated GatewayResponse. See the [AWS Documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/supported-gateway-response-types.html) for supported values. * `status_code` - (Optional) HTTP status code of the Gateway Response. * `response_templates` - (Optional) Map of templates used to transform the response body. * `response_parameters` - (Optional) Map of parameters (paths, query strings and headers) of the Gateway Response. diff --git a/website/docs/r/appautoscaling_policy.html.markdown b/website/docs/r/appautoscaling_policy.html.markdown index e7942ce1bcd7..512f14f19d6f 100644 --- a/website/docs/r/appautoscaling_policy.html.markdown +++ b/website/docs/r/appautoscaling_policy.html.markdown @@ -196,18 +196,123 @@ resource "aws_appautoscaling_policy" "example" { } ``` +### Predictive Scaling + +```terraform +resource "aws_appautoscaling_policy" "example" { + name = "example-policy" + resource_id = aws_appautoscaling_target.example.resource_id + scalable_dimension = aws_appautoscaling_target.example.scalable_dimension + service_namespace = aws_appautoscaling_target.example.service_namespace + policy_type = "PredictiveScaling" + + predictive_scaling_policy_configuration { + metric_specification { + target_value = 40 + + predefined_metric_pair_specification { + predefined_metric_type = "ECSServiceMemoryUtilization" + } + } + } +} +``` + ## Argument Reference This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) Name of the policy. Must be between 1 and 255 characters in length. -* `policy_type` - (Optional) Policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation. +* `policy_type` - (Optional) Policy type. Valid values are `StepScaling`, `TargetTrackingScaling`, and `PredictiveScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html), [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html), and [Predictive Scaling](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-predictive-scaling.html) documentation. +* `predictive_scaling_policy_configuration` - (Optional) Predictive scaling policy configuration, requires `policy_type = "PredictiveScaling"`. See supported fields below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `resource_id` - (Required) Resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `scalable_dimension` - (Required) Scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `service_namespace` - (Required) AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) * `step_scaling_policy_configuration` - (Optional) Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below. -* `target_tracking_scaling_policy_configuration` - (Optional) Target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below. +* `target_tracking_scaling_policy_configuration` - (Optional) Target tracking policy configuration, requires `policy_type = "TargetTrackingScaling"`. See supported fields below. + +### predictive_scaling_policy_configuration + +The `predictive_scaling_policy_configuration` configuration block supports the following arguments: + +* `max_capacity_breach_behavior` - (Optional) The behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity. Valid values are `HonorMaxCapacity` and `IncreaseMaxCapacity`. +* `max_capacity_buffer` - (Optional) Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. Required if the `max_capacity_breach_behavior` argument is set to `IncreaseMaxCapacity`, and cannot be used otherwise. +* `metric_specification` - (Required) Metrics and target utilization to use for predictive scaling. See supported fields below. +* `mode` - (Optional) Predictive scaling mode. Valid values are `ForecastOnly` and `ForecastAndScale`. +* `scheduling_buffer_time` - (Optional) Amount of time, in seconds, that the start time can be advanced. + +### predictive_scaling_policy_configuration metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` configuration block supports the following arguments: + +* `customized_capacity_metric_specification` - (Optional) Customized capacity metric specification. See supported fields below. +* `customized_load_metric_specification` - (Optional) Customized load metric specification. See supported fields below. +* `customized_scaling_metric_specification` - (Optional) Customized scaling metric specification. See supported fields below. +* `predefined_load_metric_specification` - (Optional) Predefined load metric specification. See supported fields below. +* `predefined_metric_pair_specification` - (Optional) Predefined metric pair specification that determines the appropriate scaling metric and load metric to use. See supported fields below. +* `predefined_scaling_metric_specification` - (Optional) Predefined scaling metric specification. See supported fields below. +* `target_value` - (Required) Target utilization. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification, customized_load_metric_specification and customized_scaling_metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification`, `customized_load_metric_specification`, and `customized_scaling_metric_specification` configuration blocks supports the following arguments: + +* `metric_data_query` - (Required) One or more metric data queries to provide data points for a metric specification. See supported fields below. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` configuration block supports the following arguments: + +* `expression` - (Optional) Math expression to perform on the returned data, if this object is performing a math expression. +* `id` - (Required) Short name that identifies the object's results in the response. +* `label` - (Optional) Human-readable label for this metric or expression. +* `metric_stat` - (Optional) Information about the metric data to return. See supported fields below. +* `return_data` - (Optional) Whether to return the timestamps and raw data values of this metric. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` `metric_stat` configuration block supports the following arguments: + +* `metric` - (Required) CloudWatch metric to return, including the metric name, namespace, and dimensions. See supported fields below. +* `stat` - (Required) Statistic to return. +* `unit` - (Optional) Unit to use for the returned data points. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat metric + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` `metric_stat` `metric` configuration block supports the following arguments: + +* `dimension` - (Optional) Dimensions of the metric. See supported fields below. +* `metric_name` - (Optional) Name of the metric. +* `namespace` - (Optional) Namespace of the metric. + +### predictive_scaling_policy_configuration metric_specification customized_capacity_metric_specification metric_data_query metric_stat metric dimension + +The `predictive_scaling_policy_configuration` `metric_specification` `customized_capacity_metric_specification` `metric_data_query` `metric_stat` `metric` `dimension` configuration block supports the following arguments: + +* `name` - (Optional) Name of the dimension. +* `value` - (Optional) Value of the dimension. + +### predictive_scaling_policy_configuration metric_specification predefined_load_metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `predefined_load_metric_specification` configuration block supports the following arguments: + +* `predefined_metric_type` - (Required) Metric type. +* `resource_label` - (Optional) Label that uniquely identifies a target group. + +### predictive_scaling_policy_configuration metric_specification predefined_metric_pair_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `predefined_metric_pair_specification` configuration block supports the following arguments: + +* `predefined_metric_type` - (Required) Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. +* `resource_label` - (Optional) Label that uniquely identifies a specific target group from which to determine the total and average request count. + +### predictive_scaling_policy_configuration metric_specification predefined_scaling_metric_specification + +The `predictive_scaling_policy_configuration` `metric_specification` `predefined_scaling_metric_specification` configuration block supports the following arguments: + +* `predefined_metric_type` - (Required) Metric type. +* `resource_label` - (Optional) Label that uniquely identifies a specific target group from which to determine the average request count. ### step_scaling_policy_configuration diff --git a/website/docs/r/appconfig_hosted_configuration_version.html.markdown b/website/docs/r/appconfig_hosted_configuration_version.html.markdown index b56be04fdb8f..18a7ffab23c1 100644 --- a/website/docs/r/appconfig_hosted_configuration_version.html.markdown +++ b/website/docs/r/appconfig_hosted_configuration_version.html.markdown @@ -79,6 +79,42 @@ resource "aws_appconfig_hosted_configuration_version" "example" { } ``` +### Multi-variant Feature Flags + +```terraform +resource "aws_appconfig_hosted_configuration_version" "example" { + application_id = aws_appconfig_application.example.id + configuration_profile_id = aws_appconfig_configuration_profile.example.configuration_profile_id + description = "Example Multi-variant Feature Flag Configuration Version" + content_type = "application/json" + + content = jsonencode({ + flags = { + loggingenabled = { + name = "loggingEnabled" + } + }, + values = { + loggingenabled = { + _variants = concat([ + for user_id in var.appcfg_enableLogging_userIds : { # Flat list of userIds + enabled = true, + name = "usersWithLoggingEnabled_${user_id}", + rule = "(or (eq $userId \"${user_id}\"))" + } + ], [ + { + enabled = false, + name = "Default" + } + ]) + } + }, + version = "1" + }) +} +``` + ## Argument Reference This resource supports the following arguments: diff --git a/website/docs/r/appfabric_app_bundle.html.markdown b/website/docs/r/appfabric_app_bundle.html.markdown index 5b385c141926..3a4b24c417f4 100644 --- a/website/docs/r/appfabric_app_bundle.html.markdown +++ b/website/docs/r/appfabric_app_bundle.html.markdown @@ -40,6 +40,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appfabric_app_bundle.example + identity = { + "arn" = "arn:aws:appfabric:us-east-1:123456789012:appbundle/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_appfabric_app_bundle" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the AppFabric app bundle. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric AppBundle using the `arn`. For example: ```terraform diff --git a/website/docs/r/appflow_connector_profile.html.markdown b/website/docs/r/appflow_connector_profile.html.markdown index 7c33890882c4..f508dba5b441 100644 --- a/website/docs/r/appflow_connector_profile.html.markdown +++ b/website/docs/r/appflow_connector_profile.html.markdown @@ -324,6 +324,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appflow_connector_profile.example + identity = { + name = "example_profile" + } +} + +resource "aws_appflow_connector_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the Appflow connector profile. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow Connector Profile using the connector profile `name`. For example: ```terraform diff --git a/website/docs/r/appflow_flow.html.markdown b/website/docs/r/appflow_flow.html.markdown index cc75cc04ad95..459777bd61d3 100644 --- a/website/docs/r/appflow_flow.html.markdown +++ b/website/docs/r/appflow_flow.html.markdown @@ -418,6 +418,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_appflow_flow.example + identity = { + name = "example-flow" + } +} + +resource "aws_appflow_flow" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the AppFlow flow. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFlow flows using the `name`. For example: ```terraform diff --git a/website/docs/r/apprunner_auto_scaling_configuration_version.html.markdown b/website/docs/r/apprunner_auto_scaling_configuration_version.html.markdown index 4bb453ee7c67..d03d26eba389 100644 --- a/website/docs/r/apprunner_auto_scaling_configuration_version.html.markdown +++ b/website/docs/r/apprunner_auto_scaling_configuration_version.html.markdown @@ -49,6 +49,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_auto_scaling_configuration_version.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:autoscalingconfiguration/example-auto-scaling-config/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_auto_scaling_configuration_version" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner auto scaling configuration version. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner AutoScaling Configuration Versions using the `arn`. For example: ```terraform diff --git a/website/docs/r/apprunner_observability_configuration.html.markdown b/website/docs/r/apprunner_observability_configuration.html.markdown index cdecc27f9025..b93af4fb9665 100644 --- a/website/docs/r/apprunner_observability_configuration.html.markdown +++ b/website/docs/r/apprunner_observability_configuration.html.markdown @@ -53,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_observability_configuration.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/example-observability-config/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_observability_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner observability configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner Observability Configuration using the `arn`. For example: ```terraform diff --git a/website/docs/r/apprunner_service.html.markdown b/website/docs/r/apprunner_service.html.markdown index 86f29278b0c1..a13448cf4d83 100644 --- a/website/docs/r/apprunner_service.html.markdown +++ b/website/docs/r/apprunner_service.html.markdown @@ -273,6 +273,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_service.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:service/example-app-service/8fe1e10304f84fd2b0df550fe98a71fa" + } +} + +resource "aws_apprunner_service" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner service. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner Services using the `arn`. For example: ```terraform diff --git a/website/docs/r/apprunner_vpc_connector.html.markdown b/website/docs/r/apprunner_vpc_connector.html.markdown index 4e8c24e86abe..d81523735e0f 100644 --- a/website/docs/r/apprunner_vpc_connector.html.markdown +++ b/website/docs/r/apprunner_vpc_connector.html.markdown @@ -41,6 +41,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_vpc_connector.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:vpcconnector/example-vpc-connector/1/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_vpc_connector" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner VPC connector. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner vpc connector using the `arn`. For example: ```terraform diff --git a/website/docs/r/apprunner_vpc_ingress_connection.html.markdown b/website/docs/r/apprunner_vpc_ingress_connection.html.markdown index e9cc347f66e5..8c42c530f33c 100644 --- a/website/docs/r/apprunner_vpc_ingress_connection.html.markdown +++ b/website/docs/r/apprunner_vpc_ingress_connection.html.markdown @@ -57,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_apprunner_vpc_ingress_connection.example + identity = { + "arn" = "arn:aws:apprunner:us-east-1:123456789012:vpcingressconnection/example-vpc-ingress-connection/a1b2c3d4567890ab" + } +} + +resource "aws_apprunner_vpc_ingress_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the App Runner VPC ingress connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import App Runner VPC Ingress Connection using the `arn`. For example: ```terraform diff --git a/website/docs/r/appsync_api.html.markdown b/website/docs/r/appsync_api.html.markdown new file mode 100644 index 000000000000..0aa0ab26dca7 --- /dev/null +++ b/website/docs/r/appsync_api.html.markdown @@ -0,0 +1,202 @@ +--- +subcategory: "AppSync" +layout: "aws" +page_title: "AWS: aws_appsync_api" +description: |- + Manages an AWS AppSync Event API. +--- + +# Resource: aws_appsync_api + +Manages an [AWS AppSync Event API](https://docs.aws.amazon.com/appsync/latest/eventapi/event-api-concepts.html#API). Event APIs enable real-time subscriptions and event-driven communication in AppSync applications. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_appsync_api" "example" { + name = "example-event-api" + + event_config { + auth_provider { + auth_type = "API_KEY" + } + + connection_auth_mode { + auth_type = "API_KEY" + } + + default_publish_auth_mode { + auth_type = "API_KEY" + } + + default_subscribe_auth_mode { + auth_type = "API_KEY" + } + } +} +``` + +### With Cognito Authentication + +```terraform +resource "aws_cognito_user_pool" "example" { + name = "example-user-pool" +} + +resource "aws_appsync_api" "example" { + name = "example-event-api" + + event_config { + auth_provider { + auth_type = "AMAZON_COGNITO_USER_POOLS" + cognito_config { + user_pool_id = aws_cognito_user_pool.example.id + aws_region = data.aws_region.current.name + } + } + + connection_auth_mode { + auth_type = "AMAZON_COGNITO_USER_POOLS" + } + + default_publish_auth_mode { + auth_type = "AMAZON_COGNITO_USER_POOLS" + } + + default_subscribe_auth_mode { + auth_type = "AMAZON_COGNITO_USER_POOLS" + } + } +} + +data "aws_region" "current" {} +``` + +### With Lambda Authorizer + +```terraform +resource "aws_appsync_api" "example" { + name = "example-event-api" + + event_config { + auth_provider { + auth_type = "AWS_LAMBDA" + lambda_authorizer_config { + authorizer_uri = aws_lambda_function.example.arn + authorizer_result_ttl_in_seconds = 300 + } + } + + connection_auth_mode { + auth_type = "AWS_LAMBDA" + } + + default_publish_auth_mode { + auth_type = "AWS_LAMBDA" + } + + default_subscribe_auth_mode { + auth_type = "AWS_LAMBDA" + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `event_config` - (Required) Configuration for the Event API. See [Event Config](#event-config) below. +* `name` - (Required) Name of the Event API. + +The following arguments are optional: + +* `owner_contact` - (Optional) Contact information for the owner of the Event API. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Event Config + +The `event_config` block supports the following: + +* `auth_provider` - (Required) List of authentication providers. See [Auth Providers](#auth-providers) below. +* `connection_auth_mode` - (Required) List of authentication modes for connections. See [Auth Modes](#auth-modes) below. +* `default_publish_auth_mode` - (Required) List of default authentication modes for publishing. See [Auth Modes](#auth-modes) below. +* `default_subscribe_auth_mode` - (Required) List of default authentication modes for subscribing. See [Auth Modes](#auth-modes) below. +* `log_config` - (Optional) Logging configuration. See [Log Config](#log-config) below. + +### Auth Providers + +The `auth_provider` block supports the following: + +* `auth_type` - (Required) Type of authentication provider. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. +* `cognito_config` - (Optional) Configuration for Cognito user pool authentication. Required when `auth_type` is `AMAZON_COGNITO_USER_POOLS`. See [Cognito Config](#cognito-config) below. +* `lambda_authorizer_config` - (Optional) Configuration for Lambda authorization. Required when `auth_type` is `AWS_LAMBDA`. See [Lambda Authorizer Config](#lambda-authorizer-config) below. +* `openid_connect_config` - (Optional) Configuration for OpenID Connect. Required when `auth_type` is `OPENID_CONNECT`. See [OpenID Connect Config](#openid-connect-config) below. + +### Cognito Config + +The `cognito_config` block supports the following: + +* `app_id_client_regex` - (Optional) Regular expression for matching the client ID. +* `aws_region` - (Required) AWS region where the user pool is located. +* `user_pool_id` - (Required) ID of the Cognito user pool. + +### Lambda Authorizer Config + +The `lambda_authorizer_config` block supports the following: + +* `authorizer_result_ttl_in_seconds` - (Optional) TTL in seconds for the authorization result cache. +* `authorizer_uri` - (Required) URI of the Lambda function for authorization. +* `identity_validation_expression` - (Optional) Regular expression for identity validation. + +### OpenID Connect Config + +The `openid_connect_config` block supports the following: + +* `auth_ttl` - (Optional) TTL in seconds for the authentication token. +* `client_id` - (Optional) Client ID for the OpenID Connect provider. +* `iat_ttl` - (Optional) TTL in seconds for the issued at time. +* `issuer` - (Required) Issuer URL for the OpenID Connect provider. + +### Auth Modes + +The `connection_auth_mode`, `default_publish_auth_mode`, and `default_subscribe_auth_mode` blocks support the following: + +* `auth_type` - (Required) Type of authentication. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. + +### Log Config + +The `log_config` block supports the following: + +* `cloudwatch_logs_role_arn` - (Required) ARN of the IAM role for CloudWatch logs. +* `log_level` - (Required) Log level. Valid values: `NONE`, `ERROR`, `ALL`, `INFO`, `DEBUG`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `api_id` - ID of the Event API. +* `api_arn` - ARN of the Event API. +* `dns` - DNS configuration for the Event API. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `waf_web_acl_arn` - ARN of the associated WAF web ACL. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Event API using the `api_id`. For example: + +```terraform +import { + to = aws_appsync_api.example + id = "example-api-id" +} +``` + +Using `terraform import`, import AppSync Event API using the `api_id`. For example: + +```console +% terraform import aws_appsync_api.example example-api-id +``` diff --git a/website/docs/r/appsync_channel_namespace.html.markdown b/website/docs/r/appsync_channel_namespace.html.markdown new file mode 100644 index 000000000000..6b8ad380738f --- /dev/null +++ b/website/docs/r/appsync_channel_namespace.html.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "AppSync" +layout: "aws" +page_title: "AWS: aws_appsync_channel_namespace" +description: |- + Manages an AWS AppSync Channel Namespace. +--- + +# Resource: aws_appsync_channel_namespace + +Manages an [AWS AppSync Channel Namespace](https://docs.aws.amazon.com/appsync/latest/eventapi/event-api-concepts.html#namespace). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_appsync_channel_namespace" "example" { + name = "example-channel-namespace" + api_id = aws_appsync_api.example.api_id +} +``` + +## Argument Reference + +The following arguments are required: + +* `api_id` - (Required) Event API ID. +* `name` - (Required) Name of the channel namespace. + +The following arguments are optional: + +* `code_handlers` - (Optional) Event handler functions that run custom business logic to process published events and subscribe requests. +* `handler_configs` - (Optional) Configuration for the `on_publish` and `on_subscribe` handlers. See [Handler Configs](#handler-configs) below. +* `publish_auth_mode` - (Optional) Authorization modes to use for publishing messages on the channel namespace. This configuration overrides the default API authorization configuration. See [Auth Modes](#auth-modes) below. +* `subscribe_auth_mode` - (Optional) Authorization modes to use for subscribing to messages on the channel namespace. This configuration overrides the default API authorization configuration. See [Auth Modes](#auth-modes) below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Auth Modes + +The `publish_auth_mode`, and `subscribe_auth_mode` blocks support the following: + +* `auth_type` - (Required) Type of authentication. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA`. + +### Handler Configs + +The `handler_configs` block support the following: + +* `on_publish` - (Optional) Handler configuration. See [Handler Config](#handler-config) below. +* `on_subscribe` - (Optional) Handler configuration. See [Handler Config](#handler-config) below. + +### Handler Config + +The `on_publish` and `on_subscribe` blocks support the following: + +* `behavior` - (Required) Behavior for the handler. Valid values: `CODE`, `DIRECT`. +* `integration` - (Required) Integration data source configuration for the handler. See [Integration](#integration) below. + +### Integration + +The `integration` block support the following: + +* `data_source_name` - (Required) Unique name of the data source that has been configured on the API. +* `lambda_config` - (Optional) Configuration for a Lambda data source. See [Lambda Config](#lambda-config) below. + +### Lambad Config + +The `lambda_config` block support the following: + +* `invoke_type` - (Optional) Invocation type for a Lambda data source. Valid values: `REQUEST_RESPONSE`, `EVENT`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `channel_namespace_arn` - ARN of the channel namespace. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Channel Namespace using the `api_id` and `name` separated by a comma (`,`). For example: + +```terraform +import { + to = aws_appsync_channel_namespace.example + id = "example-api-id,example-channel-namespace" +} +``` + +Using `terraform import`, import AppSync Channel Namespace using the `api_id` and `name` separated by a comma (`,`). For example: + +```console +% terraform import aws_appsync_channel_namespace.example example-api-id,example-channel-namespace +``` diff --git a/website/docs/r/appsync_source_api_association.html.markdown b/website/docs/r/appsync_source_api_association.html.markdown index 802f6e4f5869..48847f954096 100644 --- a/website/docs/r/appsync_source_api_association.html.markdown +++ b/website/docs/r/appsync_source_api_association.html.markdown @@ -3,11 +3,11 @@ subcategory: "AppSync" layout: "aws" page_title: "AWS: aws_appsync_source_api_association" description: |- - Terraform resource for managing an AWS AppSync Source Api Association. + Terraform resource for managing an AWS AppSync Source API Association. --- # Resource: aws_appsync_source_api_association -Terraform resource for managing an AWS AppSync Source Api Association. +Terraform resource for managing an AWS AppSync Source API Association. ## Example Usage @@ -42,9 +42,9 @@ The `source_api_association_config` configuration block supports the following a This resource exports the following attributes in addition to the arguments above: -* `arn` - ARN of the Source Api Association. -* `association_id` - ID of the Source Api Association. -* `id` - Combined ID of the Source Api Association and Merge Api. +* `arn` - ARN of the Source API Association. +* `association_id` - ID of the Source API Association. +* `id` - Combined ID of the Source API Association and Merge API. ## Timeouts @@ -56,7 +56,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Source Api Association using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppSync Source API Association using the `association_id` and `merged_api_id` separated by `,`. For example: ```terraform import { @@ -65,7 +65,7 @@ import { } ``` -Using `terraform import`, import AppSync Source Api Association using the `gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31`. For example: +Using `terraform import`, import AppSync Source API Association using the `association_id` and `merged_api_id` separated by `,`. For example: ```console % terraform import aws_appsync_source_api_association.example gzos6bteufdunffzzifiowisoe,243685a0-9347-4a1a-89c1-9b57dea01e31 diff --git a/website/docs/r/athena_database.html.markdown b/website/docs/r/athena_database.html.markdown index 6ad5bbbedc84..e3541b013021 100644 --- a/website/docs/r/athena_database.html.markdown +++ b/website/docs/r/athena_database.html.markdown @@ -36,6 +36,7 @@ This resource supports the following arguments: * `expected_bucket_owner` - (Optional) AWS account ID that you expect to be the owner of the Amazon S3 bucket. * `force_destroy` - (Optional, Default: false) Boolean that indicates all tables should be deleted from the database so that the database can be destroyed without error. The tables are *not* recoverable. * `properties` - (Optional) Key-value map of custom metadata properties for the database definition. +* `workgroup` - (Optional) Name of the workgroup. ### ACL Configuration diff --git a/website/docs/r/athena_workgroup.html.markdown b/website/docs/r/athena_workgroup.html.markdown index 814c2f4f7e90..cbd782a8911c 100644 --- a/website/docs/r/athena_workgroup.html.markdown +++ b/website/docs/r/athena_workgroup.html.markdown @@ -49,19 +49,25 @@ This resource supports the following arguments: * `bytes_scanned_cutoff_per_query` - (Optional) Integer for the upper data usage limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. Must be at least `10485760`. * `enforce_workgroup_configuration` - (Optional) Boolean whether the settings for the workgroup override client-side settings. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). Defaults to `true`. * `engine_version` - (Optional) Configuration block for the Athena Engine Versioning. For more information, see [Athena Engine Versioning](https://docs.aws.amazon.com/athena/latest/ug/engine-versions.html). See [Engine Version](#engine-version) below. -* `execution_role` - (Optional) Role used in a notebook session for accessing the user's resources. +* `execution_role` - (Optional) Role used to access user resources in notebook sessions and IAM Identity Center enabled workgroups. The property is required for IAM Identity Center enabled workgroups. +* `identity_center_configuration` - (Optional) Configuration block to set up an IAM Identity Center enabled workgroup. See [Identity Center Configuration](#identity-center-configuration) below. * `publish_cloudwatch_metrics_enabled` - (Optional) Boolean whether Amazon CloudWatch metrics are enabled for the workgroup. Defaults to `true`. -* `result_configuration` - (Optional) Configuration block with result settings. See [Result Configuration](#result-configuration) below. * `requester_pays_enabled` - (Optional) If set to true , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false . For more information about Requester Pays buckets, see [Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) in the Amazon Simple Storage Service Developer Guide. +* `result_configuration` - (Optional) Configuration block with result settings. See [Result Configuration](#result-configuration) below. #### Engine Version * `selected_engine_version` - (Optional) Requested engine version. Defaults to `AUTO`. +#### Identity Center Configuration + +* `enable_identity_center` - (Optional) Specifies whether the workgroup is IAM Identity Center supported. +* `identity_center_instance_arn` - (Optional) The IAM Identity Center instance ARN that the workgroup associates to. + #### Result Configuration -* `encryption_configuration` - (Optional) Configuration block with encryption settings. See [Encryption Configuration](#encryption-configuration) below. * `acl_configuration` - (Optional) That an Amazon S3 canned ACL should be set to control ownership of stored query results. See [ACL Configuration](#acl-configuration) below. +* `encryption_configuration` - (Optional) Configuration block with encryption settings. See [Encryption Configuration](#encryption-configuration) below. * `expected_bucket_owner` - (Optional) AWS account ID that you expect to be the owner of the Amazon S3 bucket. * `output_location` - (Optional) Location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/`. For more information, see [Queries and Query Result Files](https://docs.aws.amazon.com/athena/latest/ug/querying.html). diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index bbe33e98dc50..e62672f5fcee 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -684,7 +684,7 @@ This configuration block supports the following: - `instance_warmup` - (Optional) Number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. - `max_healthy_percentage` - (Optional) Amount of capacity in the Auto Scaling group that can be in service and healthy, or pending, to support your workload when an instance refresh is in place, as a percentage of the desired capacity of the Auto Scaling group. Values must be between `100` and `200`, defaults to `100`. - `min_healthy_percentage` - (Optional) Amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. - - `skip_matching` - (Optional) Replace instances that already have your desired configuration. Defaults to `false`. + - `skip_matching` - (Optional) Skip replacing instances that already have your desired configuration. Defaults to `false`. - `auto_rollback` - (Optional) Automatically rollback if instance refresh fails. Defaults to `false`. This option may only be set to `true` when specifying a `launch_template` or `mixed_instances_policy`. - `alarm_specification` - (Optional) Alarm Specification for Instance Refresh. - `alarms` - (Required) List of Cloudwatch alarms. If any of these alarms goes into ALARM state, Instance Refresh is failed. diff --git a/website/docs/r/autoscaling_lifecycle_hook.html.markdown b/website/docs/r/autoscaling_lifecycle_hook.html.markdown index 3d950744e831..361b67466c53 100644 --- a/website/docs/r/autoscaling_lifecycle_hook.html.markdown +++ b/website/docs/r/autoscaling_lifecycle_hook.html.markdown @@ -65,7 +65,7 @@ This resource supports the following arguments: * `heartbeat_timeout` - (Optional) Defines the amount of time, in seconds, that can elapse before the lifecycle hook times out. When the lifecycle hook times out, Auto Scaling performs the action defined in the DefaultResult parameter * `lifecycle_transition` - (Required) Instance state to which you want to attach the lifecycle hook. For a list of lifecycle hook types, see [describe-lifecycle-hook-types](https://docs.aws.amazon.com/cli/latest/reference/autoscaling/describe-lifecycle-hook-types.html#examples) * `notification_metadata` - (Optional) Contains additional information that you want to include any time Auto Scaling sends a message to the notification target. -* `notification_target_arn` - (Optional) ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue or an SNS topic. +* `notification_target_arn` - (Optional) ARN of the notification target that Auto Scaling will use to notify you when an instance is in the transition state for the lifecycle hook. This ARN target can be either an SQS queue, an SNS topic, or a Lambda function. * `role_arn` - (Optional) ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. ## Attribute Reference diff --git a/website/docs/r/batch_compute_environment.html.markdown b/website/docs/r/batch_compute_environment.html.markdown index 2d4a0bc3daea..4ea9fed89ffd 100644 --- a/website/docs/r/batch_compute_environment.html.markdown +++ b/website/docs/r/batch_compute_environment.html.markdown @@ -225,6 +225,7 @@ This resource supports the following arguments: `ec2_configuration` supports the following: * `image_id_override` - (Optional) The AMI ID used for instances launched in the compute environment that match the image type. This setting overrides the `image_id` argument in the [`compute_resources`](#compute_resources) block. +* `image_kubernetes_version` - (Optional) The Kubernetes version for the compute environment. If you don't specify a value, the latest version that AWS Batch supports is used. See [Supported Kubernetes versions](https://docs.aws.amazon.com/batch/latest/userguide/supported_kubernetes_version.html) for the list of Kubernetes versions supported by AWS Batch on Amazon EKS. * `image_type` - (Optional) The image type to match with the instance type to select an AMI. If the `image_id_override` parameter isn't specified, then a recent [Amazon ECS-optimized Amazon Linux 2 AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) (`ECS_AL2`) is used. ### launch_template @@ -247,7 +248,7 @@ This resource supports the following arguments: `update_policy` supports the following: * `job_execution_timeout_minutes` - (Required) Specifies the job timeout (in minutes) when the compute environment infrastructure is updated. -* `terminate_jobs_on_update` - (Required) Specifies whether jobs are automatically terminated when the computer environment infrastructure is updated. +* `terminate_jobs_on_update` - (Required) Specifies whether jobs are automatically terminated when the compute environment infrastructure is updated. ## Attribute Reference @@ -261,6 +262,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_compute_environment.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:compute-environment/sample" + } +} + +resource "aws_batch_compute_environment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the compute environment. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Batch compute using the `name`. For example: ```terraform diff --git a/website/docs/r/batch_job_definition.html.markdown b/website/docs/r/batch_job_definition.html.markdown index 7a3e03875298..3abfbfb22b36 100644 --- a/website/docs/r/batch_job_definition.html.markdown +++ b/website/docs/r/batch_job_definition.html.markdown @@ -344,7 +344,7 @@ The following arguments are optional: #### eks_metadata -* `labels` - Key-value pairs used to identify, sort, and organize cube resources. +* `labels` - Key-value pairs used to identify, sort, and organize kubernetes resources. #### `eks_secret` @@ -378,6 +378,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_job_definition.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:job-definition/sample:1" + } +} + +resource "aws_batch_job_definition" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the job definition. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch Job Definition using the `arn`. For example: ```terraform diff --git a/website/docs/r/batch_job_queue.html.markdown b/website/docs/r/batch_job_queue.html.markdown index 20e6a536c297..5a05a0ad248b 100644 --- a/website/docs/r/batch_job_queue.html.markdown +++ b/website/docs/r/batch_job_queue.html.markdown @@ -111,6 +111,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_batch_job_queue.example + identity = { + "arn" = "arn:aws:batch:us-east-1:123456789012:job-queue/sample" + } +} + +resource "aws_batch_job_queue" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the job queue. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Batch Job Queue using the `arn`. For example: ```terraform diff --git a/website/docs/r/bcmdataexports_export.html.markdown b/website/docs/r/bcmdataexports_export.html.markdown index 90a2e5cca33a..768262af4647 100644 --- a/website/docs/r/bcmdataexports_export.html.markdown +++ b/website/docs/r/bcmdataexports_export.html.markdown @@ -15,6 +15,9 @@ Terraform resource for managing an AWS BCM Data Exports Export. ### Basic Usage ```terraform +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} + resource "aws_bcmdataexports_export" "test" { export { name = "testexample" @@ -22,6 +25,7 @@ resource "aws_bcmdataexports_export" "test" { query_statement = "SELECT identity_line_item_id, identity_time_interval, line_item_product_code,line_item_unblended_cost FROM COST_AND_USAGE_REPORT" table_configurations = { COST_AND_USAGE_REPORT = { + BILLING_VIEW_ARN = "arn:${data.aws_partition.current.partition}:billing::${data.aws_caller_identity.current.account_id}:billingview/primary" TIME_GRANULARITY = "HOURLY", INCLUDE_RESOURCES = "FALSE", INCLUDE_MANUAL_DISCOUNT_COMPATIBILITY = "FALSE", @@ -66,8 +70,8 @@ The following arguments are required: ### `data_query` Argument Reference -* `query_statement` - (Required) Query statement. -* `table_configurations` - (Optional) Table configuration. +* `query_statement` - (Required) Query statement. The SQL table name for CUR 2.0 is `COST_AND_USAGE_REPORT`. See the [AWS documentation](https://docs.aws.amazon.com/cur/latest/userguide/table-dictionary-cur2.html) for a list of available columns. +* `table_configurations` - (Optional) Table configuration. See the [AWS documentation](https://docs.aws.amazon.com/cur/latest/userguide/table-dictionary-cur2.html#cur2-table-configurations) for the available configurations. In addition to those listed in the documentation, `BILLING_VIEW_ARN` must also be included, as shown in the example above. ### `destination_configurations` Argument Reference @@ -107,6 +111,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bcmdataexports_export.example + identity = { + "arn" = "arn:aws:bcm-data-exports:us-east-1:123456789012:export/example-export" + } +} + +resource "aws_bcmdataexports_export" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the BCM Data Exports export. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import BCM Data Exports Export using the export ARN. For example: ```terraform diff --git a/website/docs/r/bedrock_custom_model.html.markdown b/website/docs/r/bedrock_custom_model.html.markdown index 589c52c55d24..8177d81b84b6 100644 --- a/website/docs/r/bedrock_custom_model.html.markdown +++ b/website/docs/r/bedrock_custom_model.html.markdown @@ -98,6 +98,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bedrock_custom_model.example + identity = { + "arn" = "arn:aws:bedrock:us-west-2:123456789012:custom-model/amazon.titan-text-lite-v1:0:4k/example-model" + } +} + +resource "aws_bedrock_custom_model" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Bedrock custom model. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Custom Model using the `job_arn`. For example: ```terraform diff --git a/website/docs/r/bedrock_guardrail.html.markdown b/website/docs/r/bedrock_guardrail.html.markdown index b5a7b08b8754..8cfe96ca2e59 100644 --- a/website/docs/r/bedrock_guardrail.html.markdown +++ b/website/docs/r/bedrock_guardrail.html.markdown @@ -27,19 +27,30 @@ resource "aws_bedrock_guardrail" "example" { output_strength = "MEDIUM" type = "HATE" } + tier_config { + tier_name = "STANDARD" + } } sensitive_information_policy_config { pii_entities_config { - action = "BLOCK" - type = "NAME" + action = "BLOCK" + input_action = "BLOCK" + output_action = "ANONYMIZE" + input_enabled = true + output_enabled = true + type = "NAME" } regexes_config { - action = "BLOCK" - description = "example regex" - name = "regex_example" - pattern = "^\\d{3}-\\d{2}-\\d{4}$" + action = "BLOCK" + input_action = "BLOCK" + output_action = "BLOCK" + input_enabled = true + output_enabled = false + description = "example regex" + name = "regex_example" + pattern = "^\\d{3}-\\d{2}-\\d{4}$" } } @@ -50,6 +61,9 @@ resource "aws_bedrock_guardrail" "example" { type = "DENY" definition = "Investment advice refers to inquiries, guidance, or recommendations regarding the management or allocation of funds or assets with the goal of generating returns ." } + tier_config { + tier_name = "CLASSIC" + } } word_policy_config { @@ -89,6 +103,7 @@ The `content_policy_config` configuration block supports the following arguments * `filters_config` - (Optional) Set of content filter configs in content policy. See [Filters Config](#content-filters-config) for more information. +* `tier_config` - (Optional) Configuration block for the content policy tier. See [Tier Config](#content-tier-config) for more information. #### Content Filters Config @@ -98,9 +113,15 @@ The `filters_config` configuration block supports the following arguments: * `output_strength` - (Optional) Strength for filters. * `type` - (Optional) Type of filter in content policy. +#### Content Tier Config + +The `tier_config` configuration block supports the following arguments: + +* `tier_name` - (Required) The name of the content policy tier. Valid values include STANDARD or CLASSIC. + ### Contextual Grounding Policy Config -* `filters_config` (Required) List of contextual grounding filter configs. See [Contextual Grounding Filters Config](#contextual-grounding-filters-config) for more information. +* `filters_config` (Required) One or more blocks defining contextual grounding filter configs. See [Contextual Grounding Filters Config](#contextual-grounding-filters-config) for more information. #### Contextual Grounding Filters Config @@ -109,8 +130,17 @@ The `filters_config` configuration block supports the following arguments: * `threshold` - (Required) The threshold for this filter. * `type` - (Required) Type of contextual grounding filter. +### Cross Region Inference + +* `cross_region_config` (Optional) Configuration block to enable cross-region routing for bedrock guardrails. See [Cross Region Config](#cross-region-config for more information. Note see [available regions](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-cross-region.html) here. + +#### Cross Region Config + +* `guardrail_profile_identifier` (Required) Guardrail profile ARN. + ### Topic Policy Config +* `tier_config` - (Optional) Configuration block for the topic policy tier. See [Tier Config](#topics-tier-config) for more information. * `topics_config` (Required) List of topic configs in topic policy. See [Topics Config](#topics-config) for more information. #### Topics Config @@ -120,6 +150,12 @@ The `filters_config` configuration block supports the following arguments: * `type` (Required) Type of topic in a policy. * `examples` (Optional) List of text examples. +#### Topics Tier Config + +The `tier_config` configuration block supports the following arguments: + +* `tier_name` - (Required) The name of the content policy tier. Valid values include STANDARD or CLASSIC. + ### Sensitive Information Policy Config * `pii_entities_config` (Optional) List of entities. See [PII Entities Config](#pii-entities-config) for more information. @@ -127,13 +163,21 @@ The `filters_config` configuration block supports the following arguments: #### PII Entities Config -* `action` (Required) Options for sensitive information action. +* `action` (Required) Options for sensitive information action. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. * `type` (Required) The currently supported PII entities. #### Regexes Config -* `action` (Required) Options for sensitive information action. +* `action` (Required) Options for sensitive information action. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. * `name` (Required) The regex name. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `ANONYMIZE`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. * `pattern` (Required) The regex pattern. * `description` (Optional) The regex description. @@ -145,10 +189,18 @@ The `filters_config` configuration block supports the following arguments: #### Managed Word Lists Config * `type` (Required) Options for managed words. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. #### Words Config * `text` (Required) The custom word text. +* `input_action` (Optional) Action to take when harmful content is detected in the input. Valid values: `BLOCK`, `NONE`. +* `input_enabled` (Optional) Whether to enable guardrail evaluation on the input. When disabled, you aren't charged for the evaluation. +* `output_action` (Optional) Action to take when harmful content is detected in the output. Valid values: `BLOCK`, `NONE`. +* `output_enabled` (Optional) Whether to enable guardrail evaluation on the output. When disabled, you aren't charged for the evaluation. ## Attribute Reference diff --git a/website/docs/r/bedrock_inference_profile.html.markdown b/website/docs/r/bedrock_inference_profile.html.markdown index c7fe84deea70..5ab1bdc9e701 100644 --- a/website/docs/r/bedrock_inference_profile.html.markdown +++ b/website/docs/r/bedrock_inference_profile.html.markdown @@ -79,7 +79,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Inference Profile using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Inference Profile using the `name`. For example: ```terraform import { @@ -88,7 +88,7 @@ import { } ``` -Using `terraform import`, import Bedrock Inference Profile using the `example_id_arg`. For example: +Using `terraform import`, import Bedrock Inference Profile using the `name`. For example: ```console % terraform import aws_bedrock_inference_profile.example inference_profile-id-12345678 diff --git a/website/docs/r/bedrock_provisioned_model_throughput.html.markdown b/website/docs/r/bedrock_provisioned_model_throughput.html.markdown index 6f6ff75c996e..59668b1e2383 100644 --- a/website/docs/r/bedrock_provisioned_model_throughput.html.markdown +++ b/website/docs/r/bedrock_provisioned_model_throughput.html.markdown @@ -47,6 +47,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_bedrock_provisioned_model_throughput.example + identity = { + "arn" = "arn:aws:bedrock:us-west-2:123456789012:provisioned-model/a1b2c3d4567890ab" + } +} + +resource "aws_bedrock_provisioned_model_throughput" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Bedrock provisioned model throughput. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Provisioned Throughput using the `provisioned_model_arn`. For example: ```terraform diff --git a/website/docs/r/bedrockagent_agent_collaborator.html.markdown b/website/docs/r/bedrockagent_agent_collaborator.html.markdown index 7698f25c5201..d0b27f058014 100644 --- a/website/docs/r/bedrockagent_agent_collaborator.html.markdown +++ b/website/docs/r/bedrockagent_agent_collaborator.html.markdown @@ -108,7 +108,7 @@ The following arguments are required: * `agent_id` - (Required) ID if the agent to associate the collaborator. * `collaboration_instruction` - (Required) Instruction to give the collaborator. -* `collbaorator_name` - (Required) Name of this collaborator. +* `collaborator_name` - (Required) Name of this collaborator. The following arguments are optional: diff --git a/website/docs/r/bedrockagent_flow.html.markdown b/website/docs/r/bedrockagent_flow.html.markdown new file mode 100644 index 000000000000..200c760cbb81 --- /dev/null +++ b/website/docs/r/bedrockagent_flow.html.markdown @@ -0,0 +1,408 @@ +--- +subcategory: "Bedrock Agents" +layout: "aws" +page_title: "AWS: aws_bedrockagent_flow" +description: |- + Terraform resource for managing an AWS Bedrock Agents Flow. +--- + +# Resource: aws_bedrockagent_flow + +Terraform resource for managing an AWS Bedrock Agents Flow. + +### Basic Usage + +```terraform +resource "aws_bedrockagent_flow" "example" { + name = "example-flow" + execution_role_arn = aws_iam_role.example.arn +} +``` + +## Example Usage + +The default definition: + +```terraform +resource "aws_bedrockagent_flow" "example" { + name = "example" + execution_role_arn = aws_iam_role.example.arn + + definition { + connection { + name = "FlowInputNodeFlowInputNode0ToPrompt_1PromptsNode0" + source = "FlowInputNode" + target = "Prompt_1" + type = "Data" + + configuration { + data { + source_output = "document" + target_input = "topic" + } + } + } + connection { + name = "Prompt_1PromptsNode0ToFlowOutputNodeFlowOutputNode0" + source = "Prompt_1" + target = "FlowOutputNode" + type = "Data" + + configuration { + data { + source_output = "modelCompletion" + target_input = "document" + } + } + } + node { + name = "FlowInputNode" + type = "Input" + + configuration { + input {} + } + + output { + name = "document" + type = "String" + } + } + node { + name = "Prompt_1" + type = "Prompt" + + configuration { + prompt { + source_configuration { + inline { + model_id = "amazon.titan-text-express-v1" + template_type = "TEXT" + + inference_configuration { + text { + max_tokens = 2048 + stop_sequences = ["User:"] + temperature = 0 + top_p = 0.8999999761581421 + } + } + + template_configuration { + text { + text = "Write a paragraph about {{topic}}." + + input_variable { + name = "topic" + } + } + } + } + } + } + } + + input { + expression = "$.data" + name = "topic" + type = "String" + } + + output { + name = "modelCompletion" + type = "String" + } + } + node { + name = "FlowOutputNode" + type = "Output" + + configuration { + output {} + } + + input { + expression = "$.data" + name = "document" + type = "String" + } + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the flow. +* `execution_role_arn` - (Required) The Amazon Resource Name (ARN) of the service role with permissions to create and manage a flow. For more information, see [Create a service role for flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-permissions.html) in the Amazon Bedrock User Guide. + +The following arguments are optional: + +* `description` - (Optional) A description for the flow. +* `customer_encryption_key_arn` - (Optional) The Amazon Resource Name (ARN) of the KMS key to encrypt the flow. +* `definition` - (Optional) A definition of the nodes and connections between nodes in the flow. See [Definition](#definition) for more information. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Definition + +* `connection` - (Optional) A list of connection definitions in the flow. See [Connection](#connection) for more information. +* `node` - (Optional) A list of node definitions in the flow. See [Node](#node) for more information. + +### Connection + +* `name` - (Required) A name for the connection that you can reference. +* `source` - (Required) The node that the connection starts at. +* `target` - (Required) The node that the connection ends at. +* `type` - (Required) Whether the source node that the connection begins from is a condition node `Conditional` or not `Data`. +* `configuration` - (Required) Configuration of the connection. See [Connection Configuration](#connection-configuration) for more information. + +### Connection Configuration + +* `data` - (Optional) The configuration of a connection originating from a node that isn’t a Condition node. See [Data Connection Configuration](#data-connection-configuration) for more information. +* `conditional` - (Optional) The configuration of a connection originating from a Condition node. See [Conditional Connection Configuration](#conditional-connection-configuration) for more information. + +#### Data Connection Configuration + +* `source_output` - (Required) The name of the output in the source node that the connection begins from. +* `target_input` - (Required) The name of the input in the target node that the connection ends at. + +#### Conditional Connection Configuration + +* `condition` - (Required) The condition that triggers this connection. For more information about how to write conditions, see the Condition node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide. + +### Node + +* `name` - (Required) A name for the node. +* `type` - (Required) The type of node. This value must match the name of the key that you provide in the configuration. Valid values: `Agent`, `Collector`, `Condition`, `Input`, `Iterator`, `KnowledgeBase`, `LambdaFunction`, `Lex`, `Output`, `Prompt`, `Retrieval`, `Storage` +* `configuration` - (Required) Contains configurations for the node. See [Node Configuration](#node-configuration) for more information. +* `input` - (Optional) A list of objects containing information about an input into the node. See [Node Input](#node-input) for more information. +* `output` - (Optional) A list of objects containing information about an output from the node. See [Node Output](#node-output) for more information. + +### Node Input + +* `name` - (Required) A name for the input that you can reference. +* `type` - (Required) The data type of the input. If the input doesn’t match this type at runtime, a validation error will be thrown. +* `expression` - (Required) An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html). +* `category` - (Optional) How input data flows between iterations in a DoWhile loop. + +### Node Output + +* `name` - (Required) A name for the output that you can reference. +* `type` - (Required) The data type of the output. If the output doesn’t match this type at runtime, a validation error will be thrown. + +### Node Configuration + +* `agent` - (Optional) Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response. See [Agent Node Configuration](#agent-node-configuration) for more information. +* `collector` - (Optional) Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs. This object has no fields. +* `condition` - (Optional) Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow. See [Condition Node Configuration](#condition-node-configuration) for more information. +* `inline_code` - (Optional) Contains configurations for an inline code node in your flow. See [Inline Code Node Configuration](#inline-code-node-configuration) for more information. +* `input` - (Optional) Contains configurations for an input flow node in your flow. The node `inputs` can’t be specified for this node. This object has no fields. +* `iterator` - (Optional) Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output. The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node. This object has no fields. +* `knowledge_base` - (Optional) Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response. See [Knowledge Base Node Configuration](#knowledge-base-node-configuration) for more information. +* `lambda_function` - (Optional) Contains configurations for a Lambda function node in your flow. Invokes a Lambda function. See [Lambda Function Node Configuration](#lambda-function-node-configuration) for more information. +* `lex` - (Optional) Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output. See [Lex Node Configuration](#lex-node-configuration) for more information. +* `output` - (Optional) Contains configurations for an output flow node in your flow. The node `outputs` can’t be specified for this node. This object has no fields. +* `prompt` - (Optional) Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node. See [Prompt Node Configuration](#prompt-node-configuration) for more information. +* `retrieval` - (Optional) Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output. See [Retrieval Node Configuration](#retrieval-node-configuration) for more information. +* `storage` - (Optional) Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. See [Storage Node Configuration](#storage-node-configuration) for more information. + +### Agent Node Configuration + +* `agent_alias_arn` - (Required) The Amazon Resource Name (ARN) of the alias of the agent to invoke. + +### Condition Node Configuration + +* `condition` - (Optional) A list of conditions. See [Condition Config](#condition-config) for more information. + +#### Condition Config + +* `name` - (Required) A name for the condition that you can reference. +* `expression` - (Optional) Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes). + +### Inline Code Node Configuration + +* `code` - (Required) The code that's executed in your inline code node. +* `language` - (Required) The programming language used by your inline code node. + +### Knowledge Base Node Configuration + +* `knowledge_base_id` - (Required) The unique identifier of the knowledge base to query. +* `model_id` - (Required) The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. +* `guardrail_configuration` - (Required) Contains configurations for a guardrail to apply during query and response generation for the knowledge base in this configuration. See [Guardrail Configuration](#guardrail-configuration) for more information. + +#### Guardrail Configuration + +* `guardrail_identifier` - (Required) The unique identifier of the guardrail. +* `guardrail_version` - (Required) The version of the guardrail. + +### Lambda Function Node Configuration + +* `lambda_arn` - (Required) The Amazon Resource Name (ARN) of the Lambda function to invoke. + +### Lex Node Configuration + +* `bot_alias_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke. +* `locale_id` - (Required) The Region to invoke the Amazon Lex bot in + +### Prompt Node Configuration + +* `resource` - (Optional) Contains configurations for a prompt from Prompt management. See [Prompt Resource Configuration](#prompt-resource-configuration) for more information. +* `inline` - (Optional) Contains configurations for a prompt that is defined inline. See [Prompt Inline Configuration](#prompt-inline-configuration) for more information. + +#### Prompt Resource Configuration + +* `prompt_arn` - (Required) The Amazon Resource Name (ARN) of the prompt from Prompt management. + +#### Prompt Inline Configuration + +* `additional_model_request_fields` - (Optional) Additional fields to be included in the model request for the Prompt node. +* `inference_configuration` - (Optional) Contains inference configurations for the prompt. See [Prompt Inference Configuration](#prompt-inference-configuration) for more information. +* `model_id` - (Required) The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to run inference with. +* `template_type` - (Required) The type of prompt template. Valid values: `TEXT`, `CHAT`. +* `template_configuration` - (Required) Contains a prompt and variables in the prompt that can be replaced with values at runtime. See [Prompt Template Configuration](#prompt-template-configuration) for more information. + +#### Prompt Inference Configuration + +* `text` - (Optional) Contains inference configurations for a text prompt. See [Text Inference Configuration](#text-inference-configuration) for more information. + +#### Text Inference Configuration + +* `max_tokens` - (Optional) Maximum number of tokens to return in the response. +* `stop_sequences` - (Optional) List of strings that define sequences after which the model will stop generating. +* `temperature` - (Optional) Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs. +* `top_p` - (Optional) Percentage of most-likely candidates that the model considers for the next token. + +#### Prompt Template Configuration + +* `text` - (Optional) Contains configurations for the text in a message for a prompt. See [Text Template Configuration](#text-template-configuration) +* `chat` - (Optional) Contains configurations to use the prompt in a conversational format. See [Chat Template Configuration](#chat-template-configuration) for more information. + +#### Text Template Configuration + +* `text` - (Required) The message for the prompt. +* `input_variable` - (Optional) A list of variables in the prompt template. See [Input Variable](#input-variable) for more information. +* `cache_point` - (Optional) A cache checkpoint within a template configuration. See [Cache Point](#cache-point) for more information. + +#### Chat Template Configuration + +* `input_variable` - (Optional) A list of variables in the prompt template. See [Input Variable](#input-variable) for more information. +* `message` - (Optional) A list of messages in the chat for the prompt. See [Message](#message) for more information. +* `system` - (Optional) A list of system prompts to provide context to the model or to describe how it should behave. See [System](#system) for more information. +* `tool_configuration` - (Optional) Configuration information for the tools that the model can use when generating a response. See [Tool Configuration](#tool-configuration) for more information. + +#### Message + +* `role` - (Required) The role that the message belongs to. +* `content` - (Required) Contains the content for the message you pass to, or receive from a model. See [Message Content] for more information. + +#### Message Content + +* `cache_point` - (Optional) Creates a cache checkpoint within a message. See [Cache Point](#cache-point) for more information. +* `text` - (Optional) The text in the message. + +#### System + +* `cache_point` - (Optional) Creates a cache checkpoint within a tool designation. See [Cache Point](#cache-point) for more information. +* `text` - (Optional) The text in the system prompt. + +#### Tool Configuration + +* `tool_choice` - (Optional) Defines which tools the model should request when invoked. See [Tool Choice](#tool-choice) for more information. +* `tool` - (Optional) A list of tools to pass to a model. See [Tool](#tool) for more information. + +#### Tool Choice + +* `any` - (Optional) Defines tools, at least one of which must be requested by the model. No text is generated but the results of tool use are sent back to the model to help generate a response. This object has no fields. +* `auto` - (Optional) Defines tools. The model automatically decides whether to call a tool or to generate text instead. This object has no fields. +* `tool` - (Optional) Defines a specific tool that the model must request. No text is generated but the results of tool use are sent back to the model to help generate a response. See [Named Tool](#named-tool) for more information. + +#### Named Tool + +* `name` - (Required) The name of the tool. + +#### Tool + +* `cache_point` - (Optional) Creates a cache checkpoint within a tool designation. See [Cache Point](#cache-point) for more information. +* `tool_spec` - (Optional) The specification for the tool. See [Tool Specification](#tool-specification) for more information. + +#### Tool Specification + +* `name` - (Required) The name of the tool. +* `description` - (Optional) The description of the tool. +* `input_schema` - (Optional) The input schema of the tool. See [Tool Input Schema](#tool-input-schema) for more information. + +#### Tool Input Schema + +* `json` - (Optional) A JSON object defining the input schema for the tool. + +#### Input Variable + +* `name` - (Required) The name of the variable. + +#### Cache Point + +* `type` - (Required) Indicates that the CachePointBlock is of the default type. Valid values: `default`. + +### Retrieval Node Configuration + +* `service_configuration` - (Required) Contains configurations for the service to use for retrieving data to return as the output from the node. See [Retrieval Service Configuration](#retrieval-service-configuration) for more information. + +#### Retrieval Service Configuration + +* `s3` - (Optional) Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node. See [Retrieval S3 Service Configuration](#retrieval-s3-service-configuration) for more information. + +#### Retrieval S3 Service Configuration + +* `bucket_name` - (Required) The name of the Amazon S3 bucket from which to retrieve data. + +### Storage Node Configuration + +* `service_configuration` - (Required) Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location. See [Storage Service Configuration](#storage-service-configuration) for more information. + +#### Storage Service Configuration + +* `s3` - (Optional) Contains configurations for the service to use for storing the input into the node. See [Storage S3 Service Configuration](#storage-s3-service-configuration) for more information. + +#### Storage S3 Service Configuration + +* `bucket_name` - (Required) The name of the Amazon S3 bucket in which to store the input into the node. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) of the flow. +* `id` - The unique identifier of the flow. +* `created_at` - The time at which the flow was created. +* `updated_at` - The time at which the flow was last updated. +* `version` - The version of the flow. +* `status` - The status of the flow. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock Agents Flow using the `id`. For example: + +```terraform +import { + to = aws_bedrockagent_flow.example + id = "ABCDEFGHIJ" +} +``` + +Using `terraform import`, import Bedrock Agents Flow using the `id`. For example: + +```console +% terraform import aws_bedrockagent_flow.example ABCDEFGHIJ +``` diff --git a/website/docs/r/bedrockagentcore_agent_runtime.html.markdown b/website/docs/r/bedrockagentcore_agent_runtime.html.markdown new file mode 100644 index 000000000000..b0fd21a24f55 --- /dev/null +++ b/website/docs/r/bedrockagentcore_agent_runtime.html.markdown @@ -0,0 +1,227 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_agent_runtime" +description: |- + Manages an AWS Bedrock AgentCore Agent Runtime. +--- + +# Resource: aws_bedrockagentcore_agent_runtime + +Manages an AWS Bedrock AgentCore Agent Runtime. Agent Runtime provides a containerized execution environment for AI agents. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_iam_policy_document" "assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "ecr_permissions" { + statement { + actions = ["ecr:GetAuthorizationToken"] + effect = "Allow" + resources = ["*"] + } + + statement { + actions = [ + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer" + ] + effect = "Allow" + resources = [aws_ecr_repository.example.arn] + } +} + +resource "aws_iam_role" "example" { + name = "bedrock-agentcore-runtime-role" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +resource "aws_iam_role_policy" "example" { + role = aws_iam_role.example.id + policy = data.aws_iam_policy_document.ecr_permissions.json +} + +resource "aws_bedrockagentcore_agent_runtime" "example" { + agent_runtime_name = "example-agent-runtime" + role_arn = aws_iam_role.example.arn + + agent_runtime_artifact { + container_configuration { + container_uri = "${aws_ecr_repository.example.repository_url}:latest" + } + } + + network_configuration { + network_mode = "PUBLIC" + } +} +``` + +### MCP Server With Custom JWT Authorizer + +```terraform +resource "aws_bedrockagentcore_agent_runtime" "example" { + agent_runtime_name = "example-agent-runtime" + description = "Agent runtime with JWT authorization" + role_arn = aws_iam_role.example.arn + + agent_runtime_artifact { + container_configuration { + container_uri = "${aws_ecr_repository.example.repository_url}:v1.0" + } + } + + environment_variables = { + LOG_LEVEL = "INFO" + ENV = "production" + } + + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["my-app", "mobile-app"] + allowed_clients = ["client-123", "client-456"] + } + } + + network_configuration { + network_mode = "PUBLIC" + } + + protocol_configuration { + server_protocol = "MCP" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `agent_runtime_name` - (Required) Name of the agent runtime. +* `role_arn` - (Required) ARN of the IAM role that the agent runtime assumes to access AWS services. +* `agent_runtime_artifact` - (Required) Container artifact configuration. See [`agent_runtime_artifact`](#agent-runtime-artifact) below. +* `network_configuration` - (Required) Network configuration for the agent runtime. See [`network_configuration`](#network_configuration) below. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the agent runtime. +* `environment_variables` - (Optional) Map of environment variables to pass to the container. +* `authorizer_configuration` - (Optional) Authorization configuration for authenticating incoming requests. See [`authorizer_configuration`](#authorizer_configuration) below. +* `lifecycle_configuration` - (Optional) Runtime session and resource lifecycle configuration for the agent runtime. See [`lifecycle_configuration`](#lifecycle_configuration) below. +* `protocol_configuration` - (Optional) Protocol configuration for the agent runtime. See [`protocol_configuration`](#protocol_configuration) below. +* `request_header_configuration` - (Optional) Configuration for HTTP request headers that will be passed through to the runtime. See [`request_header_configuration`](#request_header_configuration) below. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `agent_runtime_artifact` + +The `agent_runtime_artifact` block supports the following: + +* `container_configuration` - (Required) Container configuration block. See [`container_configuration`](#container_configuration) below. + +### `container_configuration` + +The `container_configuration` block supports the following: + +* `container_uri` - (Required) URI of the container image in Amazon ECR. + +### `authorizer_configuration` + +The `authorizer_configuration` block supports the following: + +* `custom_jwt_authorizer` - (Optional) JWT-based authorization configuration block. See [`custom_jwt_authorizer`](#custom_jwt_authorizer) below. + +### `custom_jwt_authorizer` + +The `custom_jwt_authorizer` block supports the following: + +* `discovery_url` - (Required) URL used to fetch OpenID Connect configuration or authorization server metadata. Must end with `.well-known/openid-configuration`. +* `allowed_audience` - (Optional) Set of allowed audience values for JWT token validation. +* `allowed_clients` - (Optional) Set of allowed client IDs for JWT token validation. + +### `lifecycle_configuration` + +The `lifecycle_configuration` block supports the following: + +* `idle_runtime_session_timeout` - (Optional) Timeout in seconds for idle runtime sessions. +* `max_lifetime` - (Optional) Maximum lifetime for the instance in seconds. + +### `network_configuration` + +The `network_configuration` block supports the following: + +* `network_mode` - (Required) Network mode for the agent runtime. Valid values: `PUBLIC`, `VPC`. +* `network_mode_config` - (Optional) Network mode configuration. See [`network_mode_config`](#network_mode_config) below. + +### `network_mode_config` + +The `network_mode_config` block supports the following: + +* `security_groups` - (Required) Security groups associated with the VPC configuration. +* `subnets` - (Required) Subnets associated with the VPC configuration. + +### `protocol_configuration` + +The `protocol_configuration` block supports the following: + +* `server_protocol` - (Optional) Server protocol for the agent runtime. Valid values: `HTTP`, `MCP`, `A2A`. + +### `request_header_configuration` + +The `request_header_configuration` block supports the following: + +* `request_header_allowlist` - (Optional) A list of HTTP request headers that are allowed to be passed through to the runtime. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `agent_runtime_arn` - ARN of the Agent Runtime. +* `agent_runtime_id` - Unique identifier of the Agent Runtime. +* `agent_runtime_version` - Version of the Agent Runtime. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `workload_identity_details` - Workload identity details for the agent runtime. See [`workload_identity_details`](#workload_identity_details) below. + +### `workload_identity_details` + +The `workload_identity_details` block contains the following: + +* `workload_identity_arn` - ARN of the workload identity. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore Agent Runtime using `agent_runtime_id`. For example: + +```terraform +import { + to = aws_bedrockagentcore_agent_runtime.example + id = "agent-runtime-12345" +} +``` + +Using `terraform import`, import Bedrock AgentCore Agent Runtime using `agent_runtime_id`. For example: + +```console +% terraform import aws_bedrockagentcore_agent_runtime.example agent-runtime-12345 +``` diff --git a/website/docs/r/bedrockagentcore_agent_runtime_endpoint.html.markdown b/website/docs/r/bedrockagentcore_agent_runtime_endpoint.html.markdown new file mode 100644 index 000000000000..e50e67be2a03 --- /dev/null +++ b/website/docs/r/bedrockagentcore_agent_runtime_endpoint.html.markdown @@ -0,0 +1,70 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_agent_runtime_endpoint" +description: |- + Manages an AWS Bedrock AgentCore Agent Runtime Endpoint. +--- + +# Resource: aws_bedrockagentcore_agent_runtime_endpoint + +Manages an AWS Bedrock AgentCore Agent Runtime Endpoint. Agent Runtime Endpoints provide a network-accessible interface for interacting with agent runtimes, enabling external systems to communicate with and invoke agent capabilities. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_bedrockagentcore_agent_runtime_endpoint" "example" { + name = "example-endpoint" + agent_runtime_id = aws_bedrockagentcore_agent_runtime.example.agent_runtime_id + description = "Endpoint for agent runtime communication" +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the agent runtime endpoint. +* `agent_runtime_id` - (Required) ID of the agent runtime this endpoint belongs to. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `agent_runtime_version` - (Optional) Version of the agent runtime to use for this endpoint. +* `description` - (Optional) Description of the agent runtime endpoint. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `agent_runtime_endoint_arn` - ARN of the Agent Runtime Endpoint. +* `agent_runtime_arn` - ARN of the associated Agent Runtime. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore Agent Runtime Endpoint using the `agent_runtime_id` and `name` separated by a comma. For example: + +```terraform +import { + to = aws_bedrockagentcore_agent_runtime_endpoint.example + id = "AGENTRUNTIME1234567890,example-endpoint" +} +``` + +Using `terraform import`, import Bedrock AgentCore Agent Runtime Endpoint using the `agent_runtime_id` and `name` separated by a comma. For example: + +```console +% terraform import aws_bedrockagentcore_agent_runtime_endpoint.example AGENTRUNTIME1234567890,example-endpoint +``` diff --git a/website/docs/r/bedrockagentcore_api_key_credential_provider.html.markdown b/website/docs/r/bedrockagentcore_api_key_credential_provider.html.markdown new file mode 100644 index 000000000000..826cd83c05e6 --- /dev/null +++ b/website/docs/r/bedrockagentcore_api_key_credential_provider.html.markdown @@ -0,0 +1,78 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_api_key_credential_provider" +description: |- + Manages an AWS Bedrock AgentCore API Key Credential Provider. +--- + +# Resource: aws_bedrockagentcore_api_key_credential_provider + +Manages an AWS Bedrock AgentCore API Key Credential Provider. API Key credential providers enable secure authentication with external services that use API key-based authentication for agent runtimes. + +-> **Note:** Write-Only argument `api_key_wo` is available to use in place of `api_key`. Write-Only arguments are supported in HashiCorp Terraform 1.11.0 and later. [Learn more](https://developer.hashicorp.com/terraform/language/resources/ephemeral#write-only-arguments). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_bedrockagentcore_api_key_credential_provider" "example" { + name = "example-api-key-provider" + api_key = "your-api-key-here" +} +``` + +### Write-Only API Key (Recommended for Production) + +```terraform +resource "aws_bedrockagentcore_api_key_credential_provider" "example" { + name = "example-api-key-provider" + api_key_wo = "your-api-key-here" + api_key_wo_version = 1 +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the API Key credential provider. Forces replacement when changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +**Standard API Key (choose one approach):** + +* `api_key` - (Optional) API key value. Cannot be used with `api_key_wo`. This value will be visible in Terraform plan outputs and logs. + +**Write-Only API Key (choose one approach):** + +* `api_key_wo` - (Optional) Write-only API key value. Cannot be used with `api_key`. Must be used together with `api_key_wo_version`. +* `api_key_wo_version` - (Optional) Used together with `api_key_wo` to trigger an update. Increment this value when an update to `api_key_wo` is required. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `credential_provider_arn` - ARN of the API Key credential provider. +* `api_key_secret_arn` - ARN of the AWS Secrets Manager secret containing the API key. + * `secret_arn` - ARN of the secret in AWS Secrets Manager. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore API Key Credential Provider using the provider name. For example: + +```terraform +import { + to = aws_bedrockagentcore_api_key_credential_provider.example + id = "example-api-key-provider" +} +``` + +Using `terraform import`, import Bedrock AgentCore API Key Credential Provider using the provider name. For example: + +```console +% terraform import aws_bedrockagentcore_api_key_credential_provider.example example-api-key-provider +``` diff --git a/website/docs/r/bedrockagentcore_browser.html.markdown b/website/docs/r/bedrockagentcore_browser.html.markdown new file mode 100644 index 000000000000..782c32760616 --- /dev/null +++ b/website/docs/r/bedrockagentcore_browser.html.markdown @@ -0,0 +1,136 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_browser" +description: |- + Manages an AWS Bedrock AgentCore Browser. +--- + +# Resource: aws_bedrockagentcore_browser + +Manages an AWS Bedrock AgentCore Browser. Browser provides AI agents with web browsing capabilities, allowing them to navigate websites, extract information, and interact with web content in a controlled environment. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_bedrockagentcore_browser" "example" { + name = "example-browser" + description = "Browser for web data extraction" + + network_configuration { + network_mode = "PUBLIC" + } +} +``` + +### Browser with Execution Role and Recording + +```terraform +data "aws_iam_policy_document" "assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "example" { + name = "bedrock-agentcore-browser-role" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +resource "aws_s3_bucket" "recording" { + bucket = "browser-recording-bucket" +} + +resource "aws_bedrockagentcore_browser" "example" { + name = "example-browser" + description = "Browser with recording enabled" + execution_role_arn = aws_iam_role.example.arn + + network_configuration { + network_mode = "PUBLIC" + } + + recording { + enabled = true + s3_location { + bucket = aws_s3_bucket.recording.bucket + prefix = "browser-sessions/" + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the browser. +* `network_configuration` - (Required) Network configuration for the browser. See [`network_configuration`](#network_configuration) below. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the browser. +* `execution_role_arn` - (Optional) ARN of the IAM role that the browser assumes for execution. +* `recording` - (Optional) Recording configuration for browser sessions. See [`recording`](#recording) below. +* `client_token` - (Optional) Unique identifier for request idempotency. If not provided, one will be generated automatically. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `network_configuration` + +The `network_configuration` object supports the following: + +* `network_mode` - (Required) Network mode for the browser. Valid values: `PUBLIC`, `SANDBOX`. + +### `recording` + +The `recording` object supports the following: + +* `enabled` - (Optional) Whether to enable recording for browser sessions. Defaults to `false`. +* `s3_location` - (Optional) S3 location where browser session recordings are stored. See [`s3_location`](#s3_location) below. + +### `s3_location` + +The `s3_location` object supports the following: + +* `bucket` - (Required) Name of the S3 bucket where recordings are stored. +* `prefix` - (Optional) S3 key prefix for recording files. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `browser_arn` - ARN of the Browser. +* `browser_id` - Unique identifier of the Browser. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore Browser using the browser ID. For example: + +```terraform +import { + to = aws_bedrockagentcore_browser.example + id = "BROWSER1234567890" +} +``` + +Using `terraform import`, import Bedrock AgentCore Browser using the browser ID. For example: + +```console +% terraform import aws_bedrockagentcore_browser.example BROWSER1234567890 +``` diff --git a/website/docs/r/bedrockagentcore_code_interpreter.html.markdown b/website/docs/r/bedrockagentcore_code_interpreter.html.markdown new file mode 100644 index 000000000000..423934c154a5 --- /dev/null +++ b/website/docs/r/bedrockagentcore_code_interpreter.html.markdown @@ -0,0 +1,117 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_code_interpreter" +description: |- + Manages an AWS Bedrock AgentCore Code Interpreter. +--- + +# Resource: aws_bedrockagentcore_code_interpreter + +Manages an AWS Bedrock AgentCore Code Interpreter. Code Interpreter provides a secure environment for AI agents to execute Python code, enabling data analysis, calculations, and file processing capabilities. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_bedrockagentcore_code_interpreter" "example" { + name = "example-code-interpreter" + description = "Code interpreter for data analysis" + + network_configuration { + network_mode = "PUBLIC" + } +} +``` + +### Code Interpreter with Execution Role + +```terraform +data "aws_iam_policy_document" "assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "example" { + name = "bedrock-agentcore-code-interpreter-role" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +resource "aws_bedrockagentcore_code_interpreter" "example" { + name = "example-code-interpreter" + description = "Code interpreter with custom execution role" + execution_role_arn = aws_iam_role.example.arn + + network_configuration { + network_mode = "SANDBOX" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the code interpreter. +* `network_configuration` - (Required) Network configuration for the code interpreter. See [`network_configuration`](#network_configuration) below. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the code interpreter. +* `execution_role_arn` - (Optional) ARN of the IAM role that the code interpreter assumes for execution. Required when using `SANDBOX` network mode. +* `client_token` - (Optional) Unique identifier for request idempotency. If not provided, one will be generated automatically. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `network_configuration` + +The `network_configuration` object supports the following: + +* `network_mode` - (Required) Network mode for the code interpreter. Valid values: `PUBLIC`, `SANDBOX`, `VPC`. +* `vpc_config` - (Optional) VPC configuration. See [`vpc_config`](#vpc_config) below. + +### `vpc_config` + +The `vpc_config` block supports the following: + +* `security_groups` - (Required) Security groups associated with the VPC configuration. +* `subnets` - (Required) Subnets associated with the VPC configuration. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `code_interpreter_arn` - ARN of the Code Interpreter. +* `code_interpreter_id` - Unique identifier of the Code Interpreter. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore Code Interpreter using the code interpreter ID. For example: + +```terraform +import { + to = aws_bedrockagentcore_code_interpreter.example + id = "CODEINTERPRETER1234567890" +} +``` + +Using `terraform import`, import Bedrock AgentCore Code Interpreter using the code interpreter ID. For example: + +```console +% terraform import aws_bedrockagentcore_code_interpreter.example CODEINTERPRETER1234567890 +``` diff --git a/website/docs/r/bedrockagentcore_gateway.html.markdown b/website/docs/r/bedrockagentcore_gateway.html.markdown new file mode 100644 index 000000000000..b6689953db9f --- /dev/null +++ b/website/docs/r/bedrockagentcore_gateway.html.markdown @@ -0,0 +1,164 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_gateway" +description: |- + Manages an AWS Bedrock AgentCore Gateway. +--- + +# Resource: aws_bedrockagentcore_gateway + +Manages an AWS Bedrock AgentCore Gateway. With Gateway, developers can convert APIs, Lambda functions, and existing services into Model Context Protocol (MCP)-compatible tools. + +## Example Usage + +### Gateway with JWT Authorization + +```terraform +data "aws_iam_policy_document" "assume_role" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "example" { + name = "bedrock-agentcore-gateway-role" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +resource "aws_bedrockagentcore_gateway" "example" { + name = "example-gateway" + role_arn = aws_iam_role.example.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + allowed_audience = ["test1", "test2"] + } + } + + protocol_type = "MCP" +} +``` + +### Gateway with advanced JWT Authorization and MCP Configuration + +```terraform +resource "aws_bedrockagentcore_gateway" "example" { + name = "mcp-gateway" + description = "Gateway for MCP communication" + role_arn = aws_iam_role.example.arn + + authorizer_type = "CUSTOM_JWT" + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://auth.example.com/.well-known/openid-configuration" + allowed_audience = ["app-client", "web-client"] + allowed_clients = ["client-123", "client-456"] + } + } + + protocol_type = "MCP" + protocol_configuration { + mcp { + instructions = "Gateway for handling MCP requests" + search_type = "HYBRID" + supported_versions = ["2025-03-26", "2025-06-18"] + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `authorizer_configuration` - (Required) Configuration for request authorization. See [`authorizer_configuration`](#authorizer_configuration) below. +* `authorizer_type` - (Required) Type of authorizer to use. Valid values: `CUSTOM_JWT`, `AWS_IAM`. +* `name` - (Required) Name of the gateway. +* `protocol_type` - (Required) Protocol type for the gateway. Valid values: `MCP`. +* `role_arn` - (Required) ARN of the IAM role that the gateway assumes to access AWS services. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `description` - (Optional) Description of the gateway. +* `exception_level` - (Optional) Exception level for the gateway. Valid values: `INFO`, `WARN`, `ERROR`. +* `kms_key_arn` - (Optional) ARN of the KMS key used to encrypt the gateway data. +* `protocol_configuration` - (Optional) Protocol-specific configuration for the gateway. See [`protocol_configuration`](#protocol_configuration) below. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `authorizer_configuration` + +The `authorizer_configuration` block supports the following: + +* `custom_jwt_authorizer` - (Required) JWT-based authorization configuration block. See [`custom_jwt_authorizer`](#custom_jwt_authorizer) below. + +### `custom_jwt_authorizer` + +The `custom_jwt_authorizer` block supports the following: + +* `discovery_url` - (Required) URL used to fetch OpenID Connect configuration or authorization server metadata. Must end with `.well-known/openid-configuration`. +* `allowed_audience` - (Optional) Set of allowed audience values for JWT token validation. +* `allowed_clients` - (Optional) Set of allowed client IDs for JWT token validation. + +### `protocol_configuration` + +The `protocol_configuration` block supports the following: + +* `mcp` - (Optional) Model Context Protocol (MCP) configuration block. See [`mcp`](#mcp) below. + +### `mcp` + +The `mcp` block supports the following: + +* `instructions` - (Optional) Instructions for the MCP protocol configuration. +* `search_type` - (Optional) Search type for MCP. Valid values: `SEMANTIC`, `HYBRID`. +* `supported_versions` - (Optional) Set of supported MCP protocol versions. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `gateway_arn` - ARN of the Gateway. +* `gateway_id` - Unique identifier of the Gateway. +* `gateway_url` - URL endpoint for the gateway. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `workload_identity_details` - Workload identity details for the gateway. See [`workload_identity_details`](#workload_identity_details) below. + +### `workload_identity_details` + +The `workload_identity_details` block contains the following: + +* `workload_identity_arn` - ARN of the workload identity. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore Gateway using the gateway ID. For example: + +```terraform +import { + to = aws_bedrockagentcore_gateway.example + id = "GATEWAY1234567890" +} +``` + +Using `terraform import`, import Bedrock AgentCore Gateway using the gateway ID. For example: + +```console +% terraform import aws_bedrockagentcore_gateway.example GATEWAY1234567890 +``` diff --git a/website/docs/r/bedrockagentcore_gateway_target.html.markdown b/website/docs/r/bedrockagentcore_gateway_target.html.markdown new file mode 100644 index 000000000000..dcbdc39de9d2 --- /dev/null +++ b/website/docs/r/bedrockagentcore_gateway_target.html.markdown @@ -0,0 +1,454 @@ +--- +subcategory: "Bedrock AgentCore" +layout: "aws" +page_title: "AWS: aws_bedrockagentcore_gateway_target" +description: |- + Manages an AWS Bedrock AgentCore Gateway Target. +--- + +# Resource: aws_bedrockagentcore_gateway_target + +Manages an AWS Bedrock AgentCore Gateway Target. Gateway targets define the endpoints and configurations that a gateway can invoke, such as Lambda functions or APIs, allowing agents to interact with external services through the Model Context Protocol (MCP). + +## Example Usage + +### Lambda Target with Gateway IAM Role + +```terraform +data "aws_iam_policy_document" "gateway_assume" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["bedrock-agentcore.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "gateway_role" { + name = "bedrock-gateway-role" + assume_role_policy = data.aws_iam_policy_document.gateway_assume.json +} + +data "aws_iam_policy_document" "lambda_assume" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "lambda_role" { + name = "example-lambda-role" + assume_role_policy = data.aws_iam_policy_document.lambda_assume.json +} + +resource "aws_lambda_function" "example" { + filename = "example.zip" + function_name = "example-function" + role = aws_iam_role.lambda_role.arn + handler = "index.handler" + runtime = "nodejs20.x" +} + +resource "aws_bedrockagentcore_gateway" "example" { + name = "example-gateway" + role_arn = aws_iam_role.gateway_role.arn + + authorizer_configuration { + custom_jwt_authorizer { + discovery_url = "https://accounts.google.com/.well-known/openid-configuration" + } + } +} + +resource "aws_bedrockagentcore_gateway_target" "example" { + name = "example-target" + gateway_identifier = aws_bedrockagentcore_gateway.example.gateway_id + description = "Lambda function target for processing requests" + + credential_provider_configuration { + gateway_iam_role {} + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.example.arn + + tool_schema { + inline_payload { + name = "process_request" + description = "Process incoming requests" + + input_schema { + type = "object" + description = "Request processing schema" + + property { + name = "message" + type = "string" + description = "Message to process" + required = true + } + + property { + name = "options" + type = "object" + + property { + name = "priority" + type = "string" + } + + property { + name = "tags" + type = "array" + + items { + type = "string" + } + } + } + } + + output_schema { + type = "object" + + property { + name = "status" + type = "string" + required = true + } + + property { + name = "result" + type = "string" + } + } + } + } + } + } + } +} +``` + +### Target with API Key Authentication + +```terraform +resource "aws_bedrockagentcore_gateway_target" "api_key_example" { + name = "api-target" + gateway_identifier = aws_bedrockagentcore_gateway.example.gateway_id + description = "External API target with API key authentication" + + credential_provider_configuration { + api_key { + provider_arn = "arn:aws:iam::123456789012:oidc-provider/example.com" + credential_location = "HEADER" + credential_parameter_name = "X-API-Key" + credential_prefix = "Bearer" + } + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.example.arn + + tool_schema { + inline_payload { + name = "api_tool" + description = "External API integration tool" + + input_schema { + type = "string" + description = "Simple string input for API calls" + } + } + } + } + } + } +} +``` + +### Target with OAuth Authentication + +```terraform +resource "aws_bedrockagentcore_gateway_target" "oauth_example" { + name = "oauth-target" + gateway_identifier = aws_bedrockagentcore_gateway.example.gateway_id + + credential_provider_configuration { + oauth { + provider_arn = "arn:aws:iam::123456789012:oidc-provider/oauth.example.com" + scopes = ["read", "write"] + custom_parameters = { + "client_type" = "confidential" + "grant_type" = "authorization_code" + } + } + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.example.arn + + tool_schema { + inline_payload { + name = "oauth_tool" + description = "OAuth-authenticated service" + + input_schema { + type = "array" + + items { + type = "object" + + property { + name = "id" + type = "string" + required = true + } + + property { + name = "value" + type = "number" + } + } + } + } + } + } + } + } +} +``` + +### Complex Schema with JSON Serialization + +```terraform +resource "aws_bedrockagentcore_gateway_target" "complex_schema" { + name = "complex-target" + gateway_identifier = aws_bedrockagentcore_gateway.example.gateway_id + + credential_provider_configuration { + gateway_iam_role {} + } + + target_configuration { + mcp { + lambda { + lambda_arn = aws_lambda_function.example.arn + + tool_schema { + inline_payload { + name = "complex_tool" + description = "Tool with complex nested schema" + + input_schema { + type = "object" + + property { + name = "profile" + type = "object" + + property { + name = "nested_tags" + type = "array" + items_json = jsonencode({ + type = "string" + }) + } + + property { + name = "metadata" + type = "object" + properties_json = jsonencode({ + properties = { + "created_at" = { type = "string" } + "version" = { type = "number" } + } + required = ["created_at"] + }) + } + } + } + } + } + } + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the gateway target. +* `gateway_identifier` - (Required) Identifier of the gateway that this target belongs to. +* `credential_provider_configuration` - (Required) Configuration for authenticating requests to the target. See [`credential_provider_configuration`](#credential_provider_configuration) below. +* `target_configuration` - (Required) Configuration for the target endpoint. See [`target_configuration`](#target_configuration) below. + +The following arguments are optional: + +* `description` - (Optional) Description of the gateway target. +* `region` - (Optional) AWS region where the resource will be created. If not provided, the region from the provider configuration will be used. + +### `credential_provider_configuration` + +The `credential_provider_configuration` block supports exactly one of the following: + +* `gateway_iam_role` - (Optional) Use the gateway's IAM role for authentication. This is an empty configuration block. +* `api_key` - (Optional) API key-based authentication configuration. See [`api_key`](#api_key) below. +* `oauth` - (Optional) OAuth-based authentication configuration. See [`oauth`](#oauth) below. + +### `api_key` + +The `api_key` block supports the following: + +* `provider_arn` - (Required) ARN of the OIDC provider for API key authentication. +* `credential_location` - (Optional) Location where the API key credential is provided. Valid values: `HEADER`, `QUERY_PARAMETER`. +* `credential_parameter_name` - (Optional) Name of the parameter containing the API key credential. +* `credential_prefix` - (Optional) Prefix to add to the API key credential value. + +### `oauth` + +The `oauth` block supports the following: + +* `provider_arn` - (Required) ARN of the OIDC provider for OAuth authentication. +* `scopes` - (Optional) Set of OAuth scopes to request. +* `custom_parameters` - (Optional) Map of custom parameters to include in OAuth requests. + +### `target_configuration` + +The `target_configuration` block supports the following: + +* `mcp` - (Optional) Model Context Protocol (MCP) configuration. See [`mcp`](#mcp) below. + +### `mcp` + +The `mcp` block supports exactly one of the following: + +* `lambda` - (Optional) Lambda function target configuration. See [`lambda`](#lambda) below. +* `open_api_schema` - (Optional) OpenAPI schema-based target configuration. See [`api_schema_configuration`](#api_schema_configuration) below. +* `smithy_model` - (Optional) Smithy model-based target configuration. See [`api_schema_configuration`](#api_schema_configuration) below. + +### `lambda` + +The `lambda` block supports the following: + +* `lambda_arn` - (Required) ARN of the Lambda function to invoke. +* `tool_schema` - (Required) Schema definition for the tool. See [`tool_schema`](#tool_schema) below. + +### `tool_schema` + +The `tool_schema` block supports exactly one of the following: + +* `inline_payload` - (Optional) Inline tool definition. See [`inline_payload`](#inline_payload) below. +* `s3` - (Optional) S3-based tool definition. See [`s3`](#s3) below. + +### `inline_payload` + +The `inline_payload` block supports the following: + +* `name` - (Required) Name of the tool. +* `description` - (Required) Description of what the tool does. +* `input_schema` - (Required) Schema for the tool's input. See [`schema_definition`](#schema_definition) below. +* `output_schema` - (Optional) Schema for the tool's output. See [`schema_definition`](#schema_definition) below. + +### `s3` + +The `s3` block supports the following: + +* `uri` - (Optional) S3 URI where the tool schema is stored. +* `bucket_owner_account_id` - (Optional) Account ID of the S3 bucket owner. + +### `api_schema_configuration` + +The `api_schema_configuration` block supports exactly one of the following: + +* `inline_payload` - (Optional) Inline schema payload. See [`inline_payload`](#inline_payload) below. +* `s3` - (Optional) S3-based schema configuration. See [`s3`](#s3) below. + +### `inline_payload` (API Schema) + +The `inline_payload` block for API schemas supports the following: + +* `payload` - (Required) The inline schema payload content. + +### `s3` (API Schema) + +The `s3` block for API schemas supports the following: + +* `uri` - (Optional) S3 URI where the schema is stored. +* `bucket_owner_account_id` - (Optional) Account ID of the S3 bucket owner. + +### `schema_definition` + +The `schema_definition` block supports the following: + +* `type` - (Required) Data type of the schema. Valid values: `string`, `number`, `integer`, `boolean`, `array`, `object`. +* `description` - (Optional) Description of the schema element. +* `items` - (Optional) Schema definition for array items. Can only be used when `type` is `array`. See [`items`](#items) below. +* `property` - (Optional) Set of property definitions for object types. Can only be used when `type` is `object`. See [`property`](#property) below. + +### `items` + +The `items` block supports the following: + +* `type` - (Required) Data type of the array items. +* `description` - (Optional) Description of the array items. +* `items` - (Optional) Nested items definition for arrays of arrays. +* `property` - (Optional) Set of property definitions for arrays of objects. See [`property`](#property) below. + +### `property` + +The `property` block supports the following: + +* `name` - (Required) Name of the property. +* `type` - (Required) Data type of the property. +* `description` - (Optional) Description of the property. +* `required` - (Optional) Whether this property is required. Defaults to `false`. +* `items_json` - (Optional) JSON-encoded schema definition for array items. Used for complex nested structures. Cannot be used with `properties_json`. +* `properties_json` - (Optional) JSON-encoded schema definition for object properties. Used for complex nested structures. Cannot be used with `items_json`. +* `items` - (Optional) Items definition for array properties. See [`items`](#items) above. +* `property` - (Optional) Set of nested property definitions for object properties. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `target_id` - Unique identifier of the gateway target. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Bedrock AgentCore Gateway Target using the gateway identifier and target ID separated by a comma. For example: + +```terraform +import { + to = aws_bedrockagentcore_gateway_target.example + id = "GATEWAY1234567890,TARGET0987654321" +} +``` + +Using `terraform import`, import Bedrock AgentCore Gateway Target using the gateway identifier and target ID separated by a comma. For example: + +```console +% terraform import aws_bedrockagentcore_gateway_target.example GATEWAY1234567890,TARGET0987654321 +``` diff --git a/website/docs/r/budgets_budget.html.markdown b/website/docs/r/budgets_budget.html.markdown index 32f9808f3b0b..8f32a2e363ef 100644 --- a/website/docs/r/budgets_budget.html.markdown +++ b/website/docs/r/budgets_budget.html.markdown @@ -184,6 +184,7 @@ The following arguments are optional: * `account_id` - (Optional) The ID of the target account for budget. Will use current user's account_id by default if omitted. * `auto_adjust_data` - (Optional) Object containing [AutoAdjustData](#auto-adjust-data) which determines the budget amount for an auto-adjusting budget. +* `billing_view_arn` - (Optional) ARN of the billing view. * `cost_filter` - (Optional) A list of [CostFilter](#cost-filter) name/values pair to apply to budget. * `cost_types` - (Optional) Object containing [CostTypes](#cost-types) The types of cost included in a budget, such as tax and subscriptions. * `limit_amount` - (Optional) The amount of cost or usage being measured for a budget. diff --git a/website/docs/r/ce_anomaly_monitor.html.markdown b/website/docs/r/ce_anomaly_monitor.html.markdown index fb196ce241f8..63e00e32a732 100644 --- a/website/docs/r/ce_anomaly_monitor.html.markdown +++ b/website/docs/r/ce_anomaly_monitor.html.markdown @@ -69,6 +69,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_anomaly_monitor.example + identity = { + "arn" = "arn:aws:ce::123456789012:anomalymonitor/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_anomaly_monitor" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer anomaly monitor. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_anomaly_monitor` using the `id`. For example: ```terraform diff --git a/website/docs/r/ce_anomaly_subscription.html.markdown b/website/docs/r/ce_anomaly_subscription.html.markdown index 572869322c89..16da0ed5137b 100644 --- a/website/docs/r/ce_anomaly_subscription.html.markdown +++ b/website/docs/r/ce_anomaly_subscription.html.markdown @@ -255,6 +255,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_anomaly_subscription.example + identity = { + "arn" = "arn:aws:ce::123456789012:anomalysubscription/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_anomaly_subscription" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer anomaly subscription. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_anomaly_subscription` using the `id`. For example: ```terraform diff --git a/website/docs/r/ce_cost_category.html.markdown b/website/docs/r/ce_cost_category.html.markdown index 229ebf6bfccc..e0536e714a48 100644 --- a/website/docs/r/ce_cost_category.html.markdown +++ b/website/docs/r/ce_cost_category.html.markdown @@ -126,6 +126,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ce_cost_category.example + identity = { + "arn" = "arn:aws:ce::123456789012:costcategory/12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_ce_cost_category" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Cost Explorer cost category. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_ce_cost_category` using the id. For example: ```terraform diff --git a/website/docs/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown b/website/docs/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown index 5ef9664328dd..c377e26567d3 100644 --- a/website/docs/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown +++ b/website/docs/r/chimesdkmediapipelines_media_insights_pipeline_configuration.html.markdown @@ -358,6 +358,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_chimesdkmediapipelines_media_insights_pipeline_configuration.example + identity = { + "arn" = "arn:aws:chime:us-east-1:123456789012:media-insights-pipeline-configuration/example-config" + } +} + +resource "aws_chimesdkmediapipelines_media_insights_pipeline_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Chime SDK media insights pipeline configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Chime SDK Media Pipelines Media Insights Pipeline Configuration using the `id`. For example: ```terraform diff --git a/website/docs/r/cleanrooms_collaboration.html.markdown b/website/docs/r/cleanrooms_collaboration.html.markdown index 4f6c3b9b80ff..03b07b4a6233 100644 --- a/website/docs/r/cleanrooms_collaboration.html.markdown +++ b/website/docs/r/cleanrooms_collaboration.html.markdown @@ -8,13 +8,11 @@ description: |- # Resource: aws_cleanrooms_collaboration -Provides a AWS Clean Rooms collaboration. All members included in the definition will be invited to -join the collaboration and can create memberships. +Provides a AWS Clean Rooms collaboration. +All members included in the definition will be invited to join the collaboration and can create memberships. ## Example Usage -### Collaboration with tags - ```terraform resource "aws_cleanrooms_collaboration" "test_collaboration" { name = "terraform-example-collaboration" @@ -22,6 +20,7 @@ resource "aws_cleanrooms_collaboration" "test_collaboration" { creator_display_name = "Creator " description = "I made this collaboration with terraform!" query_log_status = "DISABLED" + analytics_engine = "SPARK" data_encryption_metadata { allow_clear_text = true @@ -39,21 +38,23 @@ resource "aws_cleanrooms_collaboration" "test_collaboration" { tags = { Project = "Terraform" } - } ``` ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) - The name of the collaboration. Collaboration names do not need to be unique. * `description` - (Required) - A description for a collaboration. * `creator_member_abilities` - (Required - Forces new resource) - The list of member abilities for the creator of the collaboration. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-creatorMemberAbilities). * `creator_display_name` - (Required - Forces new resource) - The name for the member record for the collaboration creator. * `query_log_status` - (Required - Forces new resource) - Determines if members of the collaboration can enable query logs within their own. emberships. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-queryLogStatus). + +The following arguments are optional: + +* `analytics_engine` - (Optional) Analytics engine used by the collaboration. Valid values are `CLEAN_ROOMS_SQL` (deprecated) and `SPARK`. * `data_encryption_metadata` - (Required - Forces new resource) - a collection of settings which determine how the [c3r client](https://docs.aws.amazon.com/clean-rooms/latest/userguide/crypto-computing.html) will encrypt data for use within this collaboration. * `data_encryption_metadata.allow_clear_text` - (Required - Forces new resource) - Indicates whether encrypted tables can contain cleartext data. This is a boolea field. @@ -67,17 +68,18 @@ or cryptographically processed (false). * `member.account_id` - (Required - Forces new resource) - The account id for the invited member. * `member.display_name` - (Required - Forces new resource) - The display name for the invited member. * `member.member_abilities` - (Required - Forces new resource) - The list of abilities for the invited member. Valid values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_CreateCollaboration.html#API-CreateCollaboration-request-creatorMemberAbilities). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) - Key value pairs which tag the collaboration. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - The arn of the collaboration. -* `id` - The id of the collaboration. -* `create_time` - The date and time the collaboration was created. +* `arn` - ARN of the collaboration. +* `id` - ID of the collaboration. +* `create_time` - Date and time the collaboration was created. * `member status` - For each member included in the collaboration an additional computed attribute of status is added. These values [may be found here](https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_MemberSummary.html#API-Type-MemberSummary-status). -* `updated_time` - The date and time the collaboration was last updated. +* `updated_time` - Date and time the collaboration was last updated. ## Timeouts diff --git a/website/docs/r/cleanrooms_configured_table.html.markdown b/website/docs/r/cleanrooms_configured_table.html.markdown index 26c01bae7a8a..b3a63d08993f 100644 --- a/website/docs/r/cleanrooms_configured_table.html.markdown +++ b/website/docs/r/cleanrooms_configured_table.html.markdown @@ -69,6 +69,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cleanrooms_configured_table.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_cleanrooms_configured_table" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the cleanrooms configured table. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_cleanrooms_configured_table` using the `id`. For example: ```terraform diff --git a/website/docs/r/cloudfront_distribution.html.markdown b/website/docs/r/cloudfront_distribution.html.markdown index b6c0dd3be379..9503b407d910 100644 --- a/website/docs/r/cloudfront_distribution.html.markdown +++ b/website/docs/r/cloudfront_distribution.html.markdown @@ -29,13 +29,55 @@ resource "aws_s3_bucket" "b" { } } -resource "aws_s3_bucket_acl" "b_acl" { - bucket = aws_s3_bucket.b.id - acl = "private" +# See https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html +data "aws_iam_policy_document" "origin_bucket_policy" { + statement { + sid = "AllowCloudFrontServicePrincipalReadWrite" + effect = "Allow" + + principals { + type = "Service" + identifiers = ["cloudfront.amazonaws.com"] + } + + actions = [ + "s3:GetObject", + "s3:PutObject", + ] + + resources = [ + "${aws_s3_bucket.b.arn}/*", + ] + + condition { + test = "StringEquals" + variable = "AWS:SourceArn" + values = [aws_cloudfront_distribution.s3_distribution.arn] + } + } +} + +resource "aws_s3_bucket_policy" "b" { + bucket = aws_s3_bucket.b.bucket + policy = data.aws_iam_policy_document.origin_bucket_policy.json } locals { s3_origin_id = "myS3Origin" + my_domain = "mydomain.com" +} + +data "aws_acm_certificate" "my_domain" { + region = "us-east-1" + domain = "*.${local.my_domain}" + statuses = ["ISSUED"] +} + +resource "aws_cloudfront_origin_access_control" "default" { + name = "default-oac" + origin_access_control_origin_type = "s3" + signing_behavior = "always" + signing_protocol = "sigv4" } resource "aws_cloudfront_distribution" "s3_distribution" { @@ -50,13 +92,7 @@ resource "aws_cloudfront_distribution" "s3_distribution" { comment = "Some comment" default_root_object = "index.html" - logging_config { - include_cookies = false - bucket = "mylogs.s3.amazonaws.com" - prefix = "myprefix" - } - - aliases = ["mysite.example.com", "yoursite.example.com"] + aliases = ["mysite.${local.my_domain}", "yoursite.${local.my_domain}"] default_cache_behavior { allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] @@ -136,7 +172,26 @@ resource "aws_cloudfront_distribution" "s3_distribution" { } viewer_certificate { - cloudfront_default_certificate = true + acm_certificate_arn = data.aws_acm_certificate.my_domain.arn + ssl_support_method = "sni-only" + } +} + +# Create Route53 records for the CloudFront distribution aliases +data "aws_route53_zone" "my_domain" { + name = local.my_domain +} + +resource "aws_route53_record" "cloudfront" { + for_each = aws_cloudfront_distribution.s3_distribution.aliases + zone_id = data.aws_route53_zone.my_domain.zone_id + name = each.value + type = "A" + + alias { + name = aws_cloudfront_distribution.s3_distribution.domain_name + zone_id = aws_cloudfront_distribution.s3_distribution.hosted_zone_id + evaluate_target_health = false } } ``` @@ -216,9 +271,11 @@ resource "aws_cloudfront_distribution" "s3_distribution" { # AWS Managed Caching Policy (CachingDisabled) default_cache_behavior { # Using the CachingDisabled managed policy ID: - cache_policy_id = "4135ea2d-6df8-44a3-9df3-4b5a84be39ad" - allowed_methods = ["GET", "HEAD", "OPTIONS"] - target_origin_id = local.s3_origin_id + cache_policy_id = "4135ea2d-6df8-44a3-9df3-4b5a84be39ad" + allowed_methods = ["GET", "HEAD", "OPTIONS"] + cached_methods = ["GET", "HEAD"] + target_origin_id = local.s3_origin_id + viewer_protocol_policy = "allow-all" } restrictions { @@ -241,23 +298,12 @@ resource "aws_cloudfront_distribution" "s3_distribution" { The example below creates a CloudFront distribution with [standard logging V2 to S3](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/standard-logging.html#enable-access-logging-api). ```terraform -provider "aws" { - region = var.region -} - -provider "aws" { - region = "us-east-1" - alias = "us_east_1" -} - resource "aws_cloudfront_distribution" "example" { - provider = aws.us_east_1 - # other config... } resource "aws_cloudwatch_log_delivery_source" "example" { - provider = aws.us_east_1 + region = "us-east-1" name = "example" log_type = "ACCESS_LOGS" @@ -270,7 +316,7 @@ resource "aws_s3_bucket" "example" { } resource "aws_cloudwatch_log_delivery_destination" "example" { - provider = aws.us_east_1 + region = "us-east-1" name = "s3-destination" output_format = "parquet" @@ -281,7 +327,7 @@ resource "aws_cloudwatch_log_delivery_destination" "example" { } resource "aws_cloudwatch_log_delivery" "example" { - provider = aws.us_east_1 + region = "us-east-1" delivery_source_name = aws_cloudwatch_log_delivery_source.example.name delivery_destination_arn = aws_cloudwatch_log_delivery_destination.example.arn @@ -292,6 +338,52 @@ resource "aws_cloudwatch_log_delivery" "example" { } ``` +### With V2 logging to Data Firehose + +The example below creates a CloudFront distribution with [standard logging V2 to Data Firehose](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/standard-logging.html#enable-access-logging-api). + +```terraform +resource "aws_cloudfront_distribution" "example" { + # other config +} + +resource "aws_kinesis_firehose_delivery_stream" "cloudfront_logs" { + region = "us-east-1" + # The tag named "LogDeliveryEnabled" must be set to "true" to allow the service-linked role "AWSServiceRoleForLogDelivery" + # to perform permitted actions on your behalf. + # See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html#AWS-logs-infrastructure-Firehose + tags = { + LogDeliveryEnabled = "true" + } + + # other config +} + +resource "aws_cloudwatch_log_delivery_source" "example" { + region = "us-east-1" + + name = "cloudfront-logs-source" + log_type = "ACCESS_LOGS" + resource_arn = aws_cloudfront_distribution.example.arn +} + +resource "aws_cloudwatch_log_delivery_destination" "example" { + region = "us-east-1" + + name = "firehose-destination" + output_format = "json" + delivery_destination_configuration { + destination_resource_arn = aws_kinesis_firehose_delivery_stream.cloudfront_logs.arn + } +} +resource "aws_cloudwatch_log_delivery" "example" { + region = "us-east-1" + + delivery_source_name = aws_cloudwatch_log_delivery_source.example.name + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.example.arn +} +``` + ## Argument Reference This resource supports the following arguments: @@ -419,6 +511,8 @@ resource "aws_cloudfront_distribution" "example" { #### Custom Error Response Arguments +~> **NOTE:** When specifying either `response_page_path` or `response_code`, **both** must be set. + * `error_caching_min_ttl` (Optional) - Minimum amount of time you want HTTP error codes to stay in CloudFront caches before CloudFront queries your origin to see whether the object has been updated. * `error_code` (Required) - 4xx or 5xx HTTP status code that you want to customize. * `response_code` (Optional) - HTTP status code that you want CloudFront to return with the custom error page to the viewer. @@ -447,13 +541,15 @@ argument should not be specified. * `origin_id` (Required) - Unique identifier for the origin. * `origin_path` (Optional) - Optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. * `origin_shield` - (Optional) [CloudFront Origin Shield](#origin-shield-arguments) configuration information. Using Origin Shield can help reduce the load on your origin. For more information, see [Using Origin Shield](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/origin-shield.html) in the Amazon CloudFront Developer Guide. +* `response_completion_timeout` - (Optional) Time (in seconds) that a request from CloudFront to the origin can stay open and wait for a response. Must be integer greater than or equal to the value of `origin_read_timeout`. If omitted or explicitly set to `0`, no maximum value is enforced. * `s3_origin_config` - (Optional) [CloudFront S3 origin](#s3-origin-config-arguments) configuration information. If a custom origin is required, use `custom_origin_config` instead. -* `vpc_origin_config` - (Optional) The VPC origin configuration. +* `vpc_origin_config` - (Optional) The [VPC origin configuration](#vpc-origin-config-arguments). ##### Custom Origin Config Arguments * `http_port` (Required) - HTTP port the custom origin listens on. * `https_port` (Required) - HTTPS port the custom origin listens on. +* `ip_address_type` (Optional) - IP protocol CloudFront uses when connecting to your origin. Valid values: `ipv4`, `ipv6`, `dualstack`. * `origin_protocol_policy` (Required) - Origin protocol policy to apply to your origin. One of `http-only`, `https-only`, or `match-viewer`. * `origin_ssl_protocols` (Required) - List of SSL/TLS protocols that CloudFront can use when connecting to your origin over HTTPS. Valid values: `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`. For more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the Amazon CloudFront Developer Guide. * `origin_keepalive_timeout` - (Optional) The Custom KeepAlive timeout, in seconds. By default, AWS enforces an upper limit of `60`. But you can request an [increase](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-request-timeout). Defaults to `5`. diff --git a/website/docs/r/cloudfront_function.html.markdown b/website/docs/r/cloudfront_function.html.markdown index f51aa9c0383e..57ae88b8c086 100644 --- a/website/docs/r/cloudfront_function.html.markdown +++ b/website/docs/r/cloudfront_function.html.markdown @@ -40,7 +40,7 @@ The following arguments are optional: * `comment` - (Optional) Comment. * `publish` - (Optional) Whether to publish creation/change as Live CloudFront Function Version. Defaults to `true`. -* `key_value_store_associations` - (Optional) List of `aws_cloudfront_key_value_store` ARNs to be associated to the function. AWS limits associations to on key value store per function. +* `key_value_store_associations` - (Optional) List of `aws_cloudfront_key_value_store` ARNs to be associated to the function. AWS limits associations to one key value store per function. ## Attribute Reference diff --git a/website/docs/r/cloudfront_key_value_store.html.markdown b/website/docs/r/cloudfront_key_value_store.html.markdown index 6524a6a64adc..a3a940321e57 100644 --- a/website/docs/r/cloudfront_key_value_store.html.markdown +++ b/website/docs/r/cloudfront_key_value_store.html.markdown @@ -47,6 +47,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfront_key_value_store.example + identity = { + name = "example_store" + } +} + +resource "aws_cloudfront_key_value_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the CloudFront Key Value Store. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront Key Value Store using the `name`. For example: ```terraform diff --git a/website/docs/r/cloudfront_realtime_log_config.html.markdown b/website/docs/r/cloudfront_realtime_log_config.html.markdown index c7036249a8f4..2e44a3e1b305 100644 --- a/website/docs/r/cloudfront_realtime_log_config.html.markdown +++ b/website/docs/r/cloudfront_realtime_log_config.html.markdown @@ -99,6 +99,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfront_realtime_log_config.example + identity = { + "arn" = "arn:aws:cloudfront::123456789012:realtime-log-config/ExampleNameForRealtimeLogConfig" + } +} + +resource "aws_cloudfront_realtime_log_config" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CloudFront real-time log configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront real-time log configurations using the ARN. For example: ```terraform diff --git a/website/docs/r/cloudfrontkeyvaluestore_key.html.markdown b/website/docs/r/cloudfrontkeyvaluestore_key.html.markdown index cfc452fe6f91..eadaafe5de95 100644 --- a/website/docs/r/cloudfrontkeyvaluestore_key.html.markdown +++ b/website/docs/r/cloudfrontkeyvaluestore_key.html.markdown @@ -46,7 +46,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront KeyValueStore Key using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudfrontkeyvaluestore_key.example + identity = { + key_value_store_arn = "arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c" + key = "someKey" + } +} + +resource "aws_cloudfrontkeyvaluestore_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `key_value_store_arn` (String) ARN of the CloudFront Key Value Store. +* `key` (String) Key name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudFront KeyValueStore Key using the `key_value_store_arn` and 'key' separated by `,`. For example: ```terraform import { @@ -55,7 +82,7 @@ import { } ``` -Using `terraform import`, import CloudFront KeyValueStore Key using the `id`. For example: +Using `terraform import`, import CloudFront KeyValueStore Key using the `key_value_store_arn` and 'key' separated by `,`. For example: ```console % terraform import aws_cloudfrontkeyvaluestore_key.example arn:aws:cloudfront::111111111111:key-value-store/8562g61f-caba-2845-9d99-b97diwae5d3c,someKey diff --git a/website/docs/r/cloudtrail_event_data_store.html.markdown b/website/docs/r/cloudtrail_event_data_store.html.markdown index 3c2191f98128..69fd64cdb6b9 100644 --- a/website/docs/r/cloudtrail_event_data_store.html.markdown +++ b/website/docs/r/cloudtrail_event_data_store.html.markdown @@ -119,6 +119,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudtrail_event_data_store.example + identity = { + "arn" = "arn:aws:cloudtrail:us-east-1:123456789012:eventdatastore/example-event-data-store-id" + } +} + +resource "aws_cloudtrail_event_data_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CloudTrail event data store. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import event data stores using their `arn`. For example: ```terraform diff --git a/website/docs/r/cloudwatch_event_bus.html.markdown b/website/docs/r/cloudwatch_event_bus.html.markdown index 0ac4961c3b05..db7c01e4d169 100644 --- a/website/docs/r/cloudwatch_event_bus.html.markdown +++ b/website/docs/r/cloudwatch_event_bus.html.markdown @@ -14,6 +14,8 @@ Provides an EventBridge event bus resource. ## Example Usage +### Basic Usages + ```terraform resource "aws_cloudwatch_event_bus" "messenger" { name = "chat-messages" @@ -32,6 +34,257 @@ resource "aws_cloudwatch_event_bus" "examplepartner" { } ``` +### Logging to CloudWatch Logs, S3, and Data Firehose + +See [Configuring logs for Amazon EventBridge event buses](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus-logs.html) for more details. + +#### Required Resources + +* EventBridge Event Bus with `log_config` configured +* Log destinations: + + * CloudWatch Logs log group + * S3 bucket + * Data Firehose delivery stream + +* Resource-based policy or tagging for the service-linked role: + + * CloudWatch Logs log group - `aws_cloudwatch_log_resource_policy` to allow `delivery.logs.amazonaws.com` to put logs into the log group + * S3 bucket - `aws_s3_bucket_policy` to allow `delivery.logs.amazonaws.com` to put logs into the bucket + * Data Firehose delivery stream - tagging the delivery stream with `LogDeliveryEnabled = "true"` to allow the service-linked role `AWSServiceRoleForLogDelivery` to deliver logs + +* CloudWatch Logs Delivery: + + * `aws_cloudwatch_log_delivery_source` for each log type (INFO, ERROR, TRACE) + * `aws_cloudwatch_log_delivery_destination` for the log destination (S3 bucket, CloudWatch Logs log group, or Data Firehose delivery stream) + * `aws_cloudwatch_log_delivery` to link each log type’s delivery source to the delivery destination + +#### Example Usage + +The following example demonstrates how to set up logging for an EventBridge event bus to all three destinations: CloudWatch Logs, S3, and Data Firehose. + +```terraform +data "aws_caller_identity" "current" {} + +resource "aws_cloudwatch_event_bus" "example" { + name = "example-event-bus" + log_config { + include_detail = "FULL" + level = "TRACE" + } +} + +# CloudWatch Log Delivery Sources for INFO, ERROR, and TRACE logs +resource "aws_cloudwatch_log_delivery_source" "info_logs" { + name = "EventBusSource-${aws_cloudwatch_event_bus.example.name}-INFO_LOGS" + log_type = "INFO_LOGS" + resource_arn = aws_cloudwatch_event_bus.example.arn +} + +resource "aws_cloudwatch_log_delivery_source" "error_logs" { + name = "EventBusSource-${aws_cloudwatch_event_bus.example.name}-ERROR_LOGS" + log_type = "ERROR_LOGS" + resource_arn = aws_cloudwatch_event_bus.example.arn +} + +resource "aws_cloudwatch_log_delivery_source" "trace_logs" { + name = "EventBusSource-${aws_cloudwatch_event_bus.example.name}-TRACE_LOGS" + log_type = "TRACE_LOGS" + resource_arn = aws_cloudwatch_event_bus.example.arn +} + +# Logging to S3 Bucket +resource "aws_s3_bucket" "example" { + bucket = "example-event-bus-logs" +} + +data "aws_iam_policy_document" "bucket" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + actions = [ + "s3:PutObject" + ] + resources = [ + "${aws_s3_bucket.example.arn}/AWSLogs/${data.aws_caller_identity.current.account_id}/EventBusLogs/*" + ] + condition { + test = "StringEquals" + variable = "s3:x-amz-acl" + values = ["bucket-owner-full-control"] + } + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = [ + aws_cloudwatch_log_delivery_source.info_logs.arn, + aws_cloudwatch_log_delivery_source.error_logs.arn, + aws_cloudwatch_log_delivery_source.trace_logs.arn + ] + } + } +} + +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_bucket.example.bucket + policy = data.aws_iam_policy_document.bucket.json +} + +resource "aws_cloudwatch_log_delivery_destination" "s3" { + name = "EventsDeliveryDestination-${aws_cloudwatch_event_bus.example.name}-S3" + delivery_destination_configuration { + destination_resource_arn = aws_s3_bucket.example.arn + } +} + +resource "aws_cloudwatch_log_delivery" "s3_info_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.s3.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.info_logs.name +} +resource "aws_cloudwatch_log_delivery" "s3_error_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.s3.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.error_logs.name + # to avoid operation conflict for the same delivery_destination_arn + depends_on = [ + aws_cloudwatch_log_delivery.s3_info_logs + ] +} +resource "aws_cloudwatch_log_delivery" "s3_trace_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.s3.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.trace_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.s3_error_logs + ] +} + +# Logging to CloudWatch Log Group +resource "aws_cloudwatch_log_group" "event_bus_logs" { + name = "/aws/vendedlogs/events/event-bus/${aws_cloudwatch_event_bus.example.name}" +} + +data "aws_iam_policy_document" "cwlogs" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + actions = [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ] + resources = [ + "${aws_cloudwatch_log_group.event_bus_logs.arn}:log-stream:*" + ] + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = [ + aws_cloudwatch_log_delivery_source.info_logs.arn, + aws_cloudwatch_log_delivery_source.error_logs.arn, + aws_cloudwatch_log_delivery_source.trace_logs.arn + ] + } + } +} + +resource "aws_cloudwatch_log_resource_policy" "example" { + policy_document = data.aws_iam_policy_document.cwlogs.json + policy_name = "AWSLogDeliveryWrite-${aws_cloudwatch_event_bus.example.name}" +} + +resource "aws_cloudwatch_log_delivery_destination" "cwlogs" { + name = "EventsDeliveryDestination-${aws_cloudwatch_event_bus.example.name}-CWLogs" + delivery_destination_configuration { + destination_resource_arn = aws_cloudwatch_log_group.event_bus_logs.arn + } +} + +resource "aws_cloudwatch_log_delivery" "cwlogs_info_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.cwlogs.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.info_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.s3_info_logs + ] +} + +resource "aws_cloudwatch_log_delivery" "cwlogs_error_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.cwlogs.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.error_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.s3_error_logs, + aws_cloudwatch_log_delivery.cwlogs_info_logs + ] +} + +resource "aws_cloudwatch_log_delivery" "cwlogs_trace_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.cwlogs.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.trace_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.s3_trace_logs, + aws_cloudwatch_log_delivery.cwlogs_error_logs + ] +} + +# Logging to Data Firehose +resource "aws_kinesis_firehose_delivery_stream" "cloudfront_logs" { + # The tag named "LogDeliveryEnabled" must be set to "true" to allow the service-linked role "AWSServiceRoleForLogDelivery" + # to perform permitted actions on your behalf. + # See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html#AWS-logs-infrastructure-V2-Firehose + tags = { + LogDeliveryEnabled = "true" + } + + # other config... +} + +resource "aws_cloudwatch_log_delivery_destination" "firehose" { + name = "EventsDeliveryDestination-${aws_cloudwatch_event_bus.example.name}-Firehose" + delivery_destination_configuration { + destination_resource_arn = aws_kinesis_firehose_delivery_stream.cloudfront_logs.arn + } +} + +resource "aws_cloudwatch_log_delivery" "firehose_info_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.firehose.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.info_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.cwlogs_info_logs + ] +} + +resource "aws_cloudwatch_log_delivery" "firehose_error_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.firehose.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.error_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.cwlogs_error_logs, + aws_cloudwatch_log_delivery.firehose_info_logs + ] +} + +resource "aws_cloudwatch_log_delivery" "firehose_trace_logs" { + delivery_destination_arn = aws_cloudwatch_log_delivery_destination.firehose.arn + delivery_source_name = aws_cloudwatch_log_delivery_source.trace_logs.name + depends_on = [ + aws_cloudwatch_log_delivery.cwlogs_trace_logs, + aws_cloudwatch_log_delivery.firehose_error_logs + ] +} +``` + ## Argument Reference This resource supports the following arguments: @@ -49,6 +302,9 @@ The following arguments are optional: * `description` - (Optional) Event bus description. * `event_source_name` - (Optional) Partner event source that the new event bus will be matched with. Must match `name`. * `kms_key_identifier` - (Optional) Identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. +* `log_config` - (Optional) Block for logging configuration settings for the event bus. + * `include_detail` - (Optional) Whether EventBridge include detailed event information in the records it generates. Valid values are `NONE` and `FULL`. + * `level` - (Optional) Level of logging detail to include. Valid values are `OFF`, `ERROR`, `INFO`, and `TRACE`. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference diff --git a/website/docs/r/cloudwatch_event_rule.html.markdown b/website/docs/r/cloudwatch_event_rule.html.markdown index 6cdd2323e464..ba41ae8a7688 100644 --- a/website/docs/r/cloudwatch_event_rule.html.markdown +++ b/website/docs/r/cloudwatch_event_rule.html.markdown @@ -85,11 +85,39 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_event_rule.example + identity = { + name = "capture-console-sign-in" + event_bus_name = "example-event-bus" + } +} + +resource "aws_cloudwatch_event_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the EventBridge rule. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `event_bus_name` (String) Name of the event bus. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EventBridge Rules using the `event_bus_name/rule_name` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```terraform import { - to = aws_cloudwatch_event_rule.console + to = aws_cloudwatch_event_rule.example id = "example-event-bus/capture-console-sign-in" } ``` @@ -97,5 +125,5 @@ import { Using `terraform import`, import EventBridge Rules using the `event_bus_name/rule_name` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```console -% terraform import aws_cloudwatch_event_rule.console example-event-bus/capture-console-sign-in +% terraform import aws_cloudwatch_event_rule.example example-event-bus/capture-console-sign-in ``` diff --git a/website/docs/r/cloudwatch_event_target.html.markdown b/website/docs/r/cloudwatch_event_target.html.markdown index aa7e0ff53d61..860e0fcb963d 100644 --- a/website/docs/r/cloudwatch_event_target.html.markdown +++ b/website/docs/r/cloudwatch_event_target.html.markdown @@ -690,11 +690,41 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_event_target.example + identity = { + event_bus_name = "default" + rule = "rule-name" + target_id = "target-id" + } +} + +resource "aws_cloudwatch_event_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `event_bus_name` (String) Event bus name for the target. +* `rule` (String) Rule name for the target. +* `target_id` (String) Target ID. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EventBridge Targets using `event_bus_name/rule-name/target-id` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```terraform import { - to = aws_cloudwatch_event_target.test-event-target + to = aws_cloudwatch_event_target.example id = "rule-name/target-id" } ``` @@ -702,5 +732,5 @@ import { Using `terraform import`, import EventBridge Targets using `event_bus_name/rule-name/target-id` (if you omit `event_bus_name`, the `default` event bus will be used). For example: ```console -% terraform import aws_cloudwatch_event_target.test-event-target rule-name/target-id +% terraform import aws_cloudwatch_event_target.example rule-name/target-id ``` diff --git a/website/docs/r/cloudwatch_log_anomaly_detector.html.markdown b/website/docs/r/cloudwatch_log_anomaly_detector.html.markdown index fe702ac22b2d..5b80038b8f6c 100644 --- a/website/docs/r/cloudwatch_log_anomaly_detector.html.markdown +++ b/website/docs/r/cloudwatch_log_anomaly_detector.html.markdown @@ -66,7 +66,7 @@ import { } ``` -Using `terraform import`, import CloudWatch Log Anomaly Detector using the `example_id_arg`. For example: +Using `terraform import`, import CloudWatch Log Anomaly Detector using the `arn`. For example: ```console % terraform import aws_cloudwatch_log_anomaly_detector.example log_anomaly_detector-arn-12345678 diff --git a/website/docs/r/cloudwatch_log_group.html.markdown b/website/docs/r/cloudwatch_log_group.html.markdown index a626d60b284c..4b2592b5f8ba 100644 --- a/website/docs/r/cloudwatch_log_group.html.markdown +++ b/website/docs/r/cloudwatch_log_group.html.markdown @@ -49,11 +49,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_log_group.example + identity = { + name = "yada" + } +} + +resource "aws_cloudwatch_log_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the CloudWatch log group. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cloudwatch Log Groups using the `name`. For example: ```terraform import { - to = aws_cloudwatch_log_group.test_group + to = aws_cloudwatch_log_group.example id = "yada" } ``` @@ -61,5 +87,5 @@ import { Using `terraform import`, import Cloudwatch Log Groups using the `name`. For example: ```console -% terraform import aws_cloudwatch_log_group.test_group yada +% terraform import aws_cloudwatch_log_group.example yada ``` diff --git a/website/docs/r/cloudwatch_log_metric_filter.html.markdown b/website/docs/r/cloudwatch_log_metric_filter.html.markdown index e8a819de8dec..083f25310b79 100644 --- a/website/docs/r/cloudwatch_log_metric_filter.html.markdown +++ b/website/docs/r/cloudwatch_log_metric_filter.html.markdown @@ -40,6 +40,7 @@ This resource supports the following arguments: for extracting metric data out of ingested log events. * `log_group_name` - (Required) The name of the log group to associate the metric filter with. * `metric_transformation` - (Required) A block defining collection of information needed to define how metric data gets emitted. See below. +* `apply_on_transformed_logs` - (Optional) Whether the metric filter will be applied on the transformed version of the log events instead of the original ingested log events. Defaults to `false`. Valid only for log groups that have an active log transformer. The `metric_transformation` block supports the following arguments: diff --git a/website/docs/r/cloudwatch_metric_alarm.html.markdown b/website/docs/r/cloudwatch_metric_alarm.html.markdown index 9585ff9b6aba..9350ecf16c92 100644 --- a/website/docs/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/r/cloudwatch_metric_alarm.html.markdown @@ -12,6 +12,8 @@ Provides a CloudWatch Metric Alarm resource. ## Example Usage +### Basic Usage + ```terraform resource "aws_cloudwatch_metric_alarm" "foobar" { alarm_name = "terraform-test-foobar5" @@ -27,7 +29,7 @@ resource "aws_cloudwatch_metric_alarm" "foobar" { } ``` -## Example in Conjunction with Scaling Policies +### With Scaling Policies ```terraform resource "aws_autoscaling_policy" "bat" { @@ -57,7 +59,7 @@ resource "aws_cloudwatch_metric_alarm" "bat" { } ``` -## Example with an Expression +### With a Metrics Math Expression ```terraform resource "aws_cloudwatch_metric_alarm" "foobar" { @@ -143,7 +145,36 @@ resource "aws_cloudwatch_metric_alarm" "xx_anomaly_detection" { } ``` -## Example of monitoring Healthy Hosts on NLB using Target Group and NLB +### With a Metrics Insights Query + +```terraform +resource "aws_cloudwatch_metric_alarm" "example" { + alarm_name = "example-alarm" + alarm_description = "Triggers if the smallest per-instance maximum load during the evaluation period exceeds the threshold" + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + threshold = 0.6 + treat_missing_data = "notBreaching" + + metric_query { + id = "q1" + expression = <<-EOT + SELECT + MAX(DBLoadRelativeToNumVCPUs) + FROM SCHEMA("AWS/RDS", DBInstanceIdentifier) + WHERE DBInstanceIdentifier != 'example-rds-instance' + GROUP BY DBInstanceIdentifier + ORDER BY MIN() ASC + LIMIT 1 + EOT + period = 60 + return_data = true + label = "Max DB Load of the Least-Loaded RDS Instance" + } +} +``` + +### Monitoring Healthy NLB Hosts with Target Group and NLB ```terraform resource "aws_cloudwatch_metric_alarm" "nlb_healthyhosts" { @@ -167,7 +198,7 @@ resource "aws_cloudwatch_metric_alarm" "nlb_healthyhosts" { ``` ~> **NOTE:** You cannot create a metric alarm consisting of both `statistic` and `extended_statistic` parameters. -You must choose one or the other +You must choose one or the other. ## Argument Reference @@ -190,7 +221,7 @@ This resource supports the following arguments: * `actions_enabled` - (Optional) Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to `true`. * `alarm_actions` - (Optional) The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). * `alarm_description` - (Optional) The description for the alarm. -* `datapoints_to_alarm` - (Optional) The number of datapoints that must be breaching to trigger the alarm. +* `datapoints_to_alarm` - (Optional) The number of data points that must be breaching to trigger the alarm. * `dimensions` - (Optional) The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). * `insufficient_data_actions` - (Optional) The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). * `ok_actions` - (Optional) The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). @@ -215,7 +246,9 @@ for details about valid values. * `id` - (Required) A short name used to tie this object to the results in the response. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter. * `account_id` - (Optional) The ID of the account where the metrics are located, if this is a cross-account alarm. -* `expression` - (Optional) The math expression to be performed on the returned data, if this object is performing a math expression. This expression can use the id of the other metrics to refer to those metrics, and can also use the id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the [Amazon CloudWatch User Guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax). +* `expression` - (Optional) A Metrics Insights query or a metric math expression to be evaluated on the returned data. + For details about Metrics Insights queries, see [Metrics Insights query components and syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage) in the AWS documentation. + For details about metric math expressions, see [Metric Math Syntax and Functions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) in the AWS documentation. * `label` - (Optional) A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. * `metric` - (Optional) The metric to be returned, along with statistics, period, and units. Use this parameter only if this object is retrieving a metric and not performing a math expression on returned data. * `period` - (Optional) Granularity in seconds of returned data points. @@ -249,11 +282,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cloudwatch_metric_alarm.example + identity = { + alarm_name = "alarm-12345" + } +} + +resource "aws_cloudwatch_metric_alarm" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `alarm_name` (String) Name of the CloudWatch metric alarm. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CloudWatch Metric Alarm using the `alarm_name`. For example: ```terraform import { - to = aws_cloudwatch_metric_alarm.test + to = aws_cloudwatch_metric_alarm.example id = "alarm-12345" } ``` @@ -261,5 +320,5 @@ import { Using `terraform import`, import CloudWatch Metric Alarm using the `alarm_name`. For example: ```console -% terraform import aws_cloudwatch_metric_alarm.test alarm-12345 +% terraform import aws_cloudwatch_metric_alarm.example alarm-12345 ``` diff --git a/website/docs/r/codeartifact_domain.html.markdown b/website/docs/r/codeartifact_domain.html.markdown index 048dc5fb4dd6..7b7d7d1f2cd6 100644 --- a/website/docs/r/codeartifact_domain.html.markdown +++ b/website/docs/r/codeartifact_domain.html.markdown @@ -42,6 +42,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_domain.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:domain/example" + } +} + +resource "aws_codeartifact_domain" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact domain. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Domain using the CodeArtifact Domain arn. For example: ```terraform diff --git a/website/docs/r/codeartifact_domain_permissions_policy.html.markdown b/website/docs/r/codeartifact_domain_permissions_policy.html.markdown index d8ce2df19fba..6d7c15ed0bf7 100644 --- a/website/docs/r/codeartifact_domain_permissions_policy.html.markdown +++ b/website/docs/r/codeartifact_domain_permissions_policy.html.markdown @@ -60,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_domain_permissions_policy.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:domain/example" + } +} + +resource "aws_codeartifact_domain_permissions_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact domain. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Domain Permissions Policies using the CodeArtifact Domain ARN. For example: ```terraform diff --git a/website/docs/r/codeartifact_repository.html.markdown b/website/docs/r/codeartifact_repository.html.markdown index 15ca5ea8eefa..c153c6c4bb55 100644 --- a/website/docs/r/codeartifact_repository.html.markdown +++ b/website/docs/r/codeartifact_repository.html.markdown @@ -96,6 +96,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_repository.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:repository/example-domain/example-repo" + } +} + +resource "aws_codeartifact_repository" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact repository. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Repository using the CodeArtifact Repository ARN. For example: ```terraform diff --git a/website/docs/r/codeartifact_repository_permissions_policy.html.markdown b/website/docs/r/codeartifact_repository_permissions_policy.html.markdown index 82f1fa5d4900..e0007ae829bc 100644 --- a/website/docs/r/codeartifact_repository_permissions_policy.html.markdown +++ b/website/docs/r/codeartifact_repository_permissions_policy.html.markdown @@ -67,6 +67,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeartifact_repository_permissions_policy.example + identity = { + "arn" = "arn:aws:codeartifact:us-west-2:123456789012:repository/example-domain/example-repo" + } +} + +resource "aws_codeartifact_repository_permissions_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeArtifact repository. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeArtifact Repository Permissions Policies using the CodeArtifact Repository ARN. For example: ```terraform diff --git a/website/docs/r/codebuild_fleet.html.markdown b/website/docs/r/codebuild_fleet.html.markdown index acf66bd39c72..4d093173aa5f 100644 --- a/website/docs/r/codebuild_fleet.html.markdown +++ b/website/docs/r/codebuild_fleet.html.markdown @@ -52,7 +52,7 @@ The following arguments are required: The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `compute_configuration` - (Optional) The compute configuration of the compute fleet. This is only required if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. See [`compute_configuration`](#compute_configuration) below. +* `compute_configuration` - (Optional) The compute configuration of the compute fleet. This is only required if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE` or `CUSTOM_INSTANCE_TYPE`. See [`compute_configuration`](#compute_configuration) below. * `fleet_service_role` - (Optional) The service role associated with the compute fleet. * `image_id` - (Optional) The Amazon Machine Image (AMI) of the compute fleet. * `overflow_behavior` - (Optional) Overflow behavior for compute fleet. Valid values: `ON_DEMAND`, `QUEUE`. @@ -63,9 +63,10 @@ The following arguments are optional: ### compute_configuration * `disk` - (Optional) Amount of disk space of the instance type included in the fleet. -* `machine_type` - (Optional) Machine type of the instance type included in the fleet. Valid values: `GENERAL`, `NVME`. -* `memory` - (Optional) Amount of memory of the instance type included in the fleet. -* `vcpu` - (Optional) Number of vCPUs of the instance type included in the fleet. +* `instance_type` - (Optional) EC2 instance type to be launched in the fleet. Specify only if `compute_type` is set to `CUSTOM_INSTANCE_TYPE`. See [Supported instance families](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment-reserved-capacity.instance-types). +* `machine_type` - (Optional) Machine type of the instance type included in the fleet. Valid values: `GENERAL`, `NVME`. Specify only if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. +* `memory` - (Optional) Amount of memory of the instance type included in the fleet. Specify only if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. +* `vcpu` - (Optional) Number of vCPUs of the instance type included in the fleet. Specify only if `compute_type` is set to `ATTRIBUTE_BASED_COMPUTE`. ### scaling_configuration @@ -99,6 +100,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_fleet.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:fleet/example-fleet" + } +} + +resource "aws_codebuild_fleet" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild fleet. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Fleet using the `name` or the `arn`. For example: ```terraform diff --git a/website/docs/r/codebuild_project.html.markdown b/website/docs/r/codebuild_project.html.markdown index 68023663b338..eb135521db63 100755 --- a/website/docs/r/codebuild_project.html.markdown +++ b/website/docs/r/codebuild_project.html.markdown @@ -14,6 +14,8 @@ source (e.g., the "rebuild every time a code change is pushed" option in the Cod ## Example Usage +### Basic Usage + ```terraform resource "aws_s3_bucket" "example" { bucket = "example" @@ -260,6 +262,11 @@ resource "aws_codebuild_project" "project-using-github-app" { } ``` +### Runner Project + +While no special configuration is required for `aws_codebuild_project` to create a project as a Runner Project, an `aws_codebuild_webhook` resource with an appropriate `filter_group` is required. +See the [`aws_codebuild_webhook` resource documentation example](/docs/providers/aws/r/codebuild_webhook.html#for-codebuild-runner-project) for more details. + ## Argument Reference The following arguments are required: @@ -274,6 +281,8 @@ The following arguments are required: The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `auto_retry_limit` - (Optional) Specify a maximum number of additional automatic retries after a failed build. + The default is 0. * `badge_enabled` - (Optional) Generates a publicly-accessible URL for the projects build badge. Available as `badge_url` attribute when enabled. * `build_batch_config` - (Optional) Defines the batch build options for the project. @@ -584,6 +593,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_project.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:project/project-name" + } +} + +resource "aws_codebuild_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Project using the `name`. For example: diff --git a/website/docs/r/codebuild_report_group.html.markdown b/website/docs/r/codebuild_report_group.html.markdown index a5ee39ff729b..28bba164d66c 100644 --- a/website/docs/r/codebuild_report_group.html.markdown +++ b/website/docs/r/codebuild_report_group.html.markdown @@ -93,6 +93,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_report_group.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:report-group/report-group-name" + } +} + +resource "aws_codebuild_report_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild report group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Report Group using the CodeBuild Report Group arn. For example: ```terraform diff --git a/website/docs/r/codebuild_resource_policy.html.markdown b/website/docs/r/codebuild_resource_policy.html.markdown index ced81c1a62fe..52cbeec6d107 100644 --- a/website/docs/r/codebuild_resource_policy.html.markdown +++ b/website/docs/r/codebuild_resource_policy.html.markdown @@ -65,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_resource_policy.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:report-group/report-group-name" + } +} + +resource "aws_codebuild_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild resource. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Resource Policy using the CodeBuild Resource Policy arn. For example: ```terraform diff --git a/website/docs/r/codebuild_source_credential.html.markdown b/website/docs/r/codebuild_source_credential.html.markdown index 92156231d775..2887083cc54d 100644 --- a/website/docs/r/codebuild_source_credential.html.markdown +++ b/website/docs/r/codebuild_source_credential.html.markdown @@ -70,6 +70,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codebuild_source_credential.example + identity = { + "arn" = "arn:aws:codebuild:us-west-2:123456789012:token/github" + } +} + +resource "aws_codebuild_source_credential" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeBuild source credential. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeBuild Source Credential using the CodeBuild Source Credential arn. For example: diff --git a/website/docs/r/codebuild_webhook.html.markdown b/website/docs/r/codebuild_webhook.html.markdown index 79da76f115bf..f375bb31495f 100644 --- a/website/docs/r/codebuild_webhook.html.markdown +++ b/website/docs/r/codebuild_webhook.html.markdown @@ -64,6 +64,24 @@ resource "github_repository_webhook" "example" { } ``` +### For CodeBuild Runner Project + +To create a CodeBuild project as a Runner Project, the following `aws_codebuild_webhook` resource is required for the project. +See thr [AWS Documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/action-runner.html) for more information about CodeBuild Runner Projects. + +```terraform +resource "aws_codebuild_webhook" "example" { + project_name = aws_codebuild_project.example.name + build_type = "BUILD" + filter_group { + filter { + type = "EVENT" + pattern = "WORKFLOW_JOB_QUEUED" + } + } +} +``` + ## Argument Reference This resource supports the following arguments: @@ -73,25 +91,31 @@ This resource supports the following arguments: * `build_type` - (Optional) The type of build this webhook will trigger. Valid values for this parameter are: `BUILD`, `BUILD_BATCH`. * `manual_creation` - (Optional) If true, CodeBuild doesn't create a webhook in GitHub and instead returns `payload_url` and `secret` values for the webhook. The `payload_url` and `secret` values in the output can be used to manually create a webhook within GitHub. * `branch_filter` - (Optional) A regular expression used to determine which branches get built. Default is all branches are built. We recommend using `filter_group` over `branch_filter`. -* `filter_group` - (Optional) Information about the webhook's trigger. Filter group blocks are documented below. -* `scope_configuration` - (Optional) Scope configuration for global or organization webhooks. Scope configuration blocks are documented below. +* `filter_group` - (Optional) Information about the webhook's trigger. See [filter_group](#filter_group) for details. +* `scope_configuration` - (Optional) Scope configuration for global or organization webhooks. See [scope_configuration](#scope_configuration) for details. +* `pull_request_build_policy` - (Optional) Defines comment-based approval requirements for triggering builds on pull requests. See [pull_request_build_policy](#pull_request_build_policy) for details. -`filter_group` supports the following: +### filter_group -* `filter` - (Required) A webhook filter for the group. Filter blocks are documented below. +* `filter` - (Required) A webhook filter for the group. See [filter](#filter) for details. -`filter` supports the following: +### filter * `type` - (Required) The webhook filter group's type. Valid values for this parameter are: `EVENT`, `BASE_REF`, `HEAD_REF`, `ACTOR_ACCOUNT_ID`, `FILE_PATH`, `COMMIT_MESSAGE`, `WORKFLOW_NAME`, `TAG_NAME`, `RELEASE_NAME`. At least one filter group must specify `EVENT` as its type. * `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED`, `WORKFLOW_JOB_QUEUED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. * `exclude_matched_pattern` - (Optional) If set to `true`, the specified filter does *not* trigger a build. Defaults to `false`. -`scope_configuration` supports the following: +### scope_configuration * `name` - (Required) The name of either the enterprise or organization. * `scope` - (Required) The type of scope for a GitHub webhook. Valid values for this parameter are: `GITHUB_ORGANIZATION`, `GITHUB_GLOBAL`. * `domain` - (Optional) The domain of the GitHub Enterprise organization. Required if your project's source type is GITHUB_ENTERPRISE. +### pull_request_build_policy + +* `requires_comment_approval` - (Required) Specifies when comment-based approval is required before triggering a build on pull requests. Valid values are: `DISABLED`, `ALL_PULL_REQUESTS`, and `FORK_PULL_REQUESTS`. +* `approver_roles` - (Optional) List of repository roles that have approval privileges for pull request builds when comment approval is required. This argument must be specified only when `requires_comment_approval` is not `DISABLED`. See the [AWS documentation](https://docs.aws.amazon.com/codebuild/latest/userguide/pull-request-build-policy.html#pull-request-build-policy.configuration) for valid values and defaults. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/codeconnections_connection.html.markdown b/website/docs/r/codeconnections_connection.html.markdown index 87f1c1d3a96d..d6e72d818f10 100644 --- a/website/docs/r/codeconnections_connection.html.markdown +++ b/website/docs/r/codeconnections_connection.html.markdown @@ -44,6 +44,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeconnections_connection.example + identity = { + "arn" = "arn:aws:codeconnections:us-west-2:123456789012:connection/example-connection-id" + } +} + +resource "aws_codeconnections_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeConnections connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeConnections connection using the ARN. For example: ```terraform diff --git a/website/docs/r/codeconnections_host.html.markdown b/website/docs/r/codeconnections_host.html.markdown index 824f1f17761d..46274a2e1b43 100644 --- a/website/docs/r/codeconnections_host.html.markdown +++ b/website/docs/r/codeconnections_host.html.markdown @@ -51,6 +51,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codeconnections_host.example + identity = { + "arn" = "arn:aws:codeconnections:us-west-2:123456789012:host/example-host-id" + } +} + +resource "aws_codeconnections_host" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeConnections host. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeConnections Host using the ARN. For example: ```terraform diff --git a/website/docs/r/codepipeline_webhook.html.markdown b/website/docs/r/codepipeline_webhook.html.markdown index c335469d9d70..9492d0da929c 100644 --- a/website/docs/r/codepipeline_webhook.html.markdown +++ b/website/docs/r/codepipeline_webhook.html.markdown @@ -139,6 +139,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codepipeline_webhook.example + identity = { + "arn" = "arn:aws:codepipeline:us-west-2:123456789012:webhook:example-webhook" + } +} + +resource "aws_codepipeline_webhook" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodePipeline webhook. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodePipeline Webhooks using their ARN. For example: ```terraform diff --git a/website/docs/r/codestarconnections_connection.html.markdown b/website/docs/r/codestarconnections_connection.html.markdown index a3fa9504a5a6..d904fcb32a04 100644 --- a/website/docs/r/codestarconnections_connection.html.markdown +++ b/website/docs/r/codestarconnections_connection.html.markdown @@ -82,6 +82,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarconnections_connection.example + identity = { + "arn" = "arn:aws:codestar-connections:us-west-2:123456789012:connection/example-connection-id" + } +} + +resource "aws_codestarconnections_connection" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar connection. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar connections using the ARN. For example: ```terraform diff --git a/website/docs/r/codestarconnections_host.html.markdown b/website/docs/r/codestarconnections_host.html.markdown index f550f02b69bd..8fe10e6bf6f3 100644 --- a/website/docs/r/codestarconnections_host.html.markdown +++ b/website/docs/r/codestarconnections_host.html.markdown @@ -49,6 +49,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarconnections_host.example + identity = { + "arn" = "arn:aws:codestar-connections:us-west-2:123456789012:host/example-host-id" + } +} + +resource "aws_codestarconnections_host" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar connections host. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar Host using the ARN. For example: ```terraform diff --git a/website/docs/r/codestarnotifications_notification_rule.html.markdown b/website/docs/r/codestarnotifications_notification_rule.html.markdown index 4e5336af9de7..60e47fa855ea 100644 --- a/website/docs/r/codestarnotifications_notification_rule.html.markdown +++ b/website/docs/r/codestarnotifications_notification_rule.html.markdown @@ -81,6 +81,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_codestarnotifications_notification_rule.example + identity = { + "arn" = "arn:aws:codestar-notifications:us-west-2:123456789012:notificationrule/dc82df7a-9435-44d4-a696-78f67EXAMPLE" + } +} + +resource "aws_codestarnotifications_notification_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the CodeStar notification rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import CodeStar notification rule using the ARN. For example: ```terraform diff --git a/website/docs/r/cognito_log_delivery_configuration.html.markdown b/website/docs/r/cognito_log_delivery_configuration.html.markdown new file mode 100644 index 000000000000..165b0d4bf8b1 --- /dev/null +++ b/website/docs/r/cognito_log_delivery_configuration.html.markdown @@ -0,0 +1,244 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_log_delivery_configuration" +description: |- + Manages an AWS Cognito IDP (Identity Provider) Log Delivery Configuration. +--- + +# Resource: aws_cognito_log_delivery_configuration + +Manages an AWS Cognito IDP (Identity Provider) Log Delivery Configuration. + +## Example Usage + +### Basic Usage with CloudWatch Logs + +```terraform +resource "aws_cognito_user_pool" "example" { + name = "example" +} + +resource "aws_cloudwatch_log_group" "example" { + name = "example" +} + +resource "aws_cognito_log_delivery_configuration" "example" { + user_pool_id = aws_cognito_user_pool.example.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.example.arn + } + } +} +``` + +### Multiple Log Configurations with Different Destinations + +```terraform +resource "aws_cognito_user_pool" "example" { + name = "example" +} + +resource "aws_cloudwatch_log_group" "example" { + name = "example" +} + +resource "aws_s3_bucket" "example" { + bucket = "example-bucket" + force_destroy = true +} + +resource "aws_iam_role" "firehose" { + name = "firehose-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "firehose.amazonaws.com" + } + } + ] + }) +} + +resource "aws_iam_role_policy" "firehose" { + name = "firehose-policy" + role = aws_iam_role.firehose.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "s3:AbortMultipartUpload", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:PutObject" + ] + Resource = [ + aws_s3_bucket.example.arn, + "${aws_s3_bucket.example.arn}/*" + ] + } + ] + }) +} + +resource "aws_kinesis_firehose_delivery_stream" "example" { + name = "example-stream" + destination = "extended_s3" + + extended_s3_configuration { + role_arn = aws_iam_role.firehose.arn + bucket_arn = aws_s3_bucket.example.arn + } +} + +resource "aws_cognito_log_delivery_configuration" "example" { + user_pool_id = aws_cognito_user_pool.example.id + + log_configurations { + event_source = "userNotification" + log_level = "INFO" + + cloud_watch_logs_configuration { + log_group_arn = aws_cloudwatch_log_group.example.arn + } + } + + log_configurations { + event_source = "userAuthEvents" + log_level = "ERROR" + + firehose_configuration { + stream_arn = aws_kinesis_firehose_delivery_stream.example.arn + } + } +} +``` + +### S3 Configuration + +```terraform +resource "aws_cognito_user_pool" "example" { + name = "example" +} + +resource "aws_s3_bucket" "example" { + bucket = "example-bucket" + force_destroy = true +} + +resource "aws_cognito_log_delivery_configuration" "example" { + user_pool_id = aws_cognito_user_pool.example.id + + log_configurations { + event_source = "userNotification" + log_level = "ERROR" + + s3_configuration { + bucket_arn = aws_s3_bucket.example.arn + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `user_pool_id` - (Required) The ID of the user pool for which to configure log delivery. + +The following arguments are optional: + +* `log_configurations` - (Optional) Configuration block for log delivery. At least one configuration block is required. See [Log Configurations](#log-configurations) below. +* `region` - (Optional) The AWS region. + +### Log Configurations + +The `log_configurations` block supports the following: + +* `event_source` - (Required) The event source to configure logging for. Valid values are `userNotification` and `userAuthEvents`. +* `log_level` - (Required) The log level to set for the event source. Valid values are `ERROR` and `INFO`. +* `cloud_watch_logs_configuration` - (Optional) Configuration for CloudWatch Logs delivery. See [CloudWatch Logs Configuration](#cloudwatch-logs-configuration) below. +* `firehose_configuration` - (Optional) Configuration for Kinesis Data Firehose delivery. See [Firehose Configuration](#firehose-configuration) below. +* `s3_configuration` - (Optional) Configuration for S3 delivery. See [S3 Configuration](#s3-configuration) below. + +~> **Note:** At least one destination configuration (`cloud_watch_logs_configuration`, `firehose_configuration`, or `s3_configuration`) must be specified for each log configuration. + +#### CloudWatch Logs Configuration + +The `cloud_watch_logs_configuration` block supports the following: + +* `log_group_arn` - (Optional) The ARN of the CloudWatch Logs log group to which the logs should be delivered. + +#### Firehose Configuration + +The `firehose_configuration` block supports the following: + +* `stream_arn` - (Optional) The ARN of the Kinesis Data Firehose delivery stream to which the logs should be delivered. + +#### S3 Configuration + +The `s3_configuration` block supports the following: + +* `bucket_arn` - (Optional) The ARN of the S3 bucket to which the logs should be delivered. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +## Import + +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_cognito_log_delivery_configuration.example + identity = { + user_pool_id = "us-west-2_example123" + } +} + +resource "aws_cognito_log_delivery_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `user_pool_id` (String) ID of the Cognito User Pool. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cognito IDP (Identity Provider) Log Delivery Configuration using the `user_pool_id`. For example: + +```terraform +import { + to = aws_cognito_log_delivery_configuration.example + id = "us-west-2_example123" +} +``` + +Using `terraform import`, import Cognito IDP (Identity Provider) Log Delivery Configuration using the `user_pool_id`. For example: + +```console +% terraform import aws_cognito_log_delivery_configuration.example us-west-2_example123 +``` diff --git a/website/docs/r/cognito_managed_login_branding.html.markdown b/website/docs/r/cognito_managed_login_branding.html.markdown new file mode 100644 index 000000000000..ca6e6a9ca987 --- /dev/null +++ b/website/docs/r/cognito_managed_login_branding.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_managed_login_branding" +description: |- + Manages branding settings for a user pool style and associates it with an app client. +--- + +# Resource: aws_cognito_managed_login_branding + +Manages branding settings for a user pool style and associates it with an app client. + +## Example Usage + +### Default Branding Style + +```terraform +resource "aws_cognito_managed_login_branding" "client" { + client_id = aws_cognito_user_pool_client.example.id + user_pool_id = aws_cognito_user_pool.example.id + + use_cognito_provided_values = true +} +``` + +### Custom Branding Style + +```terraform +resource "aws_cognito_managed_login_branding" "client" { + client_id = aws_cognito_user_pool_client.example.id + user_pool_id = aws_cognito_user_pool.example.id + + asset { + bytes = filebase64("login_branding_asset.svg") + category = "PAGE_HEADER_BACKGROUND" + color_mode = "DARK" + extension = "SVG" + } + + settings = jsonencode({ + # Your settings here. + }) +} +``` + +## Argument Reference + +The following arguments are required: + +* `client_id` - (Required) App client that the branding style is for. +* `user_pool_id` - (Required) User pool the client belongs to. + +The following arguments are optional: + +* `asset` - (Optional) Image files to apply to roles like backgrounds, logos, and icons. See [details below](#asset). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `settings` - (Optional) JSON document with the the settings to apply to the style. +* `use_cognito_provided_values` - (Optional) When `true`, applies the default branding style options. + +### asset + +* `bytes` - (Optional) Image file, in Base64-encoded binary. +* `category` - (Required) Category that the image corresponds to. See [AWS documentation](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssetType.html#CognitoUserPools-Type-AssetType-Category) for valid values. +* `color_mode` - (Required) Display-mode target of the asset. Valid values: `LIGHT`, `DARK`, `DYNAMIC`. +* `extensions` - (Required) File type of the image file. See [AWS documentation](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssetType.html#CognitoUserPools-Type-AssetType-Extension) for valid values. +* `resource_id` - (Optional) Asset ID. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `managed_login_branding_id` - ID of the managed login branding style. +* `settings_all` - Settings including Amazon Cognito defaults. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Cognito branding settings using `user_pool_id` and `managed_login_branding_id` separated by `,`. For example: + +```terraform +import { + to = aws_cognito_managed_login_branding.example + id = "us-west-2_rSss9Zltr,06c6ae7b-1e66-46d2-87a9-1203ea3307bd" +} +``` + +Using `terraform import`, import Cognito branding settings using `user_pool_id` and `managed_login_branding_id` separated by `,`. For example: + +```console +% terraform import aws_cognito_managed_login_branding.example us-west-2_rSss9Zltr,06c6ae7b-1e66-46d2-87a9-1203ea3307bd +``` diff --git a/website/docs/r/cognito_user_pool.html.markdown b/website/docs/r/cognito_user_pool.html.markdown index 9a195017a094..d41a179d3923 100644 --- a/website/docs/r/cognito_user_pool.html.markdown +++ b/website/docs/r/cognito_user_pool.html.markdown @@ -74,18 +74,18 @@ This resource supports the following arguments: * `deletion_protection` - (Optional) When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. Valid values are `ACTIVE` and `INACTIVE`, Default value is `INACTIVE`. * `device_configuration` - (Optional) Configuration block for the user pool's device tracking. [Detailed below](#device_configuration). * `email_configuration` - (Optional) Configuration block for configuring email. [Detailed below](#email_configuration). -* `email_mfa_configuration` - (Optional) Configuration block for configuring email Multi-Factor Authentication (MFA); requires at least 2 `account_recovery_setting` entries; requires an `email_configuration` configuration block. [Detailed below](#email_mfa_configuration). +* `email_mfa_configuration` - (Optional) Configuration block for configuring email Multi-Factor Authentication (MFA); requires at least 2 `account_recovery_setting` entries; requires an `email_configuration` configuration block. Effective only when `mfa_configuration` is `ON` or `OPTIONAL`. [Detailed below](#email_mfa_configuration). * `email_verification_message` - (Optional) String representing the email verification message. Conflicts with `verification_message_template` configuration block `email_message` argument. * `email_verification_subject` - (Optional) String representing the email verification subject. Conflicts with `verification_message_template` configuration block `email_subject` argument. * `lambda_config` - (Optional) Configuration block for the AWS Lambda triggers associated with the user pool. [Detailed below](#lambda_config). -* `mfa_configuration` - (Optional) Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of `OFF`. Valid values are `OFF` (MFA Tokens are not required), `ON` (MFA is required for all users to sign in; requires at least one of `sms_configuration` or `software_token_mfa_configuration` to be configured), or `OPTIONAL` (MFA Will be required only for individual users who have MFA Enabled; requires at least one of `sms_configuration` or `software_token_mfa_configuration` to be configured). +* `mfa_configuration` - (Optional) Multi-Factor Authentication (MFA) configuration for the User Pool. Defaults of `OFF`. Valid values are `OFF` (MFA Tokens are not required), `ON` (MFA is required for all users to sign in; requires at least one of `email_mfa_configuration`, `sms_configuration` or `software_token_mfa_configuration` to be configured), or `OPTIONAL` (MFA Will be required only for individual users who have MFA Enabled; requires at least one of `email_mfa_configuration`, `sms_configuration` or `software_token_mfa_configuration` to be configured). * `password_policy` - (Optional) Configuration block for information about the user pool password policy. [Detailed below](#password_policy). * `schema` - (Optional) Configuration block for the schema attributes of a user pool. [Detailed below](#schema). Schema attributes from the [standard attribute set](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#cognito-user-pools-standard-attributes) only need to be specified if they are different from the default configuration. Attributes can be added, but not modified or removed. Maximum of 50 attributes. * `sign_in_policy` - (Optional) Configuration block for information about the user pool sign in policy. [Detailed below](#sign_in_policy). * `sms_authentication_message` - (Optional) String representing the SMS authentication message. The Message must contain the `{####}` placeholder, which will be replaced with the code. -* `sms_configuration` - (Optional) Configuration block for Short Message Service (SMS) settings. [Detailed below](#sms_configuration). These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). +* `sms_configuration` - (Optional) Configuration block for Short Message Service (SMS) settings. [Detailed below](#sms_configuration). These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). SMS MFA is activated only when `mfa_configuration` is set to `ON` or `OPTIONAL` along with this block. Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). * `sms_verification_message` - (Optional) String representing the SMS verification message. Conflicts with `verification_message_template` configuration block `sms_message` argument. -* `software_token_mfa_configuration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. [Detailed below](#software_token_mfa_configuration). +* `software_token_mfa_configuration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. Effective only when `mfa_configuration` is `ON` or `OPTIONAL`. [Detailed below](#software_token_mfa_configuration). * `tags` - (Optional) Map of tags to assign to the User Pool. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `user_attribute_update_settings` - (Optional) Configuration block for user attribute update settings. [Detailed below](#user_attribute_update_settings). * `user_pool_add_ons` - (Optional) Configuration block for user pool add-ons to enable user pool advanced security mode features. [Detailed below](#user_pool_add_ons). diff --git a/website/docs/r/comprehend_document_classifier.html.markdown b/website/docs/r/comprehend_document_classifier.html.markdown index c3d031067f29..b0b7bab6be54 100644 --- a/website/docs/r/comprehend_document_classifier.html.markdown +++ b/website/docs/r/comprehend_document_classifier.html.markdown @@ -22,7 +22,7 @@ resource "aws_comprehend_document_classifier" "example" { language_code = "en" input_data_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_bucket.test.bucket}/${aws_s3_object.documents.key}" } depends_on = [ @@ -134,6 +134,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_comprehend_document_classifier.example + identity = { + "arn" = "arn:aws:comprehend:us-west-2:123456789012:document-classifier/example" + } +} + +resource "aws_comprehend_document_classifier" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Comprehend document classifier. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Comprehend Document Classifier using the ARN. For example: ```terraform diff --git a/website/docs/r/comprehend_entity_recognizer.html.markdown b/website/docs/r/comprehend_entity_recognizer.html.markdown index 6434224eb6ae..3a0723a25b65 100644 --- a/website/docs/r/comprehend_entity_recognizer.html.markdown +++ b/website/docs/r/comprehend_entity_recognizer.html.markdown @@ -30,11 +30,11 @@ resource "aws_comprehend_entity_recognizer" "example" { } documents { - s3_uri = "s3://${aws_s3_bucket.documents.bucket}/${aws_s3_object.documents.id}" + s3_uri = "s3://${aws_s3_bucket.documents.bucket}/${aws_s3_object.documents.key}" } entity_list { - s3_uri = "s3://${aws_s3_bucket.entities.bucket}/${aws_s3_object.entities.id}" + s3_uri = "s3://${aws_s3_bucket.entities.bucket}/${aws_s3_object.entities.key}" } } @@ -159,6 +159,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_comprehend_entity_recognizer.example + identity = { + "arn" = "arn:aws:comprehend:us-west-2:123456789012:entity-recognizer/example" + } +} + +resource "aws_comprehend_entity_recognizer" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Comprehend entity recognizer. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Comprehend Entity Recognizer using the ARN. For example: ```terraform diff --git a/website/docs/r/computeoptimizer_recommendation_preferences.html.markdown b/website/docs/r/computeoptimizer_recommendation_preferences.html.markdown index 7d36ebfc58b9..2d5a166f658e 100644 --- a/website/docs/r/computeoptimizer_recommendation_preferences.html.markdown +++ b/website/docs/r/computeoptimizer_recommendation_preferences.html.markdown @@ -59,7 +59,7 @@ This resource supports the following arguments: * `inferred_workload_types` - (Optional) The status of the inferred workload types recommendation preference. Valid values: `Active`, `Inactive`. * `look_back_period` - (Optional) The preference to control the number of days the utilization metrics of the AWS resource are analyzed. Valid values: `DAYS_14`, `DAYS_32`, `DAYS_93`. * `preferred_resource` - (Optional) The preference to control which resource type values are considered when generating rightsizing recommendations. See [Preferred Resources](#preferred-resources) below. -* `resource_type` - (Required) The target resource type of the recommendation preferences. Valid values: `Ec2Instance`, `AutoScalingGroup`, `RdsDBInstance`. +* `resource_type` - (Required) The target resource type of the recommendation preferences. Valid values: `Ec2Instance`, `AutoScalingGroup`, `RdsDBInstance`, `AuroraDBClusterStorage`. * `savings_estimation_mode` - (Optional) The status of the savings estimation mode preference. Valid values: `AfterDiscounts`, `BeforeDiscounts`. * `scope` - (Required) The scope of the recommendation preferences. See [Scope](#scope) below. * `utilization_preference` - (Optional) The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom. See [Utilization Preferences](#utilization-preferences) below. diff --git a/website/docs/r/config_organization_custom_policy_rule.html.markdown b/website/docs/r/config_organization_custom_policy_rule.html.markdown index 00b2f79be7cf..084d7fea2d24 100644 --- a/website/docs/r/config_organization_custom_policy_rule.html.markdown +++ b/website/docs/r/config_organization_custom_policy_rule.html.markdown @@ -45,29 +45,29 @@ resource "aws_config_organization_custom_policy_rule" "example" { The following arguments are required: -* `name` - (Required) name of the rule -* `policy_text` - (Required) policy definition containing the logic for your organization AWS Config Custom Policy rule -* `policy_runtime` - (Required) runtime system for your organization AWS Config Custom Policy rules -* `trigger_types` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification` +* `name` - (Required) Name of the rule. +* `policy_text` - (Required) Policy definition containing the rule logic. +* `policy_runtime` - (Required) Runtime system for policy rules. +* `trigger_types` - (Required) List of notification types that trigger AWS Config to run an evaluation for the rule. Valid values: `ConfigurationItemChangeNotification`, `OversizedConfigurationItemChangeNotification`. The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `description` - (Optional) Description of the rule -* `debug_log_delivery_accounts` - (Optional) List of AWS account identifiers to exclude from the rule -* `excluded_accounts` - (Optional) List of AWS account identifiers to exclude from the rule -* `input_parameters` - (Optional) A string in JSON format that is passed to the AWS Config Rule Lambda Function +* `description` - (Optional) Description of the rule. +* `debug_log_delivery_accounts` - (Optional) List of accounts that you can enable debug logging for. The list is null when debug logging is enabled for all accounts. +* `excluded_accounts` - (Optional) List of AWS account identifiers to exclude from the rule. +* `input_parameters` - (Optional) A string in JSON format that is passed to the AWS Config Rule Lambda Function. * `maximum_execution_frequency` - (Optional) Maximum frequency with which AWS Config runs evaluations for a rule, if the rule is triggered at a periodic frequency. Defaults to `TwentyFour_Hours` for periodic frequency triggered rules. Valid values: `One_Hour`, `Three_Hours`, `Six_Hours`, `Twelve_Hours`, or `TwentyFour_Hours`. -* `resource_id_scope` - (Optional) Identifier of the AWS resource to evaluate -* `resource_types_scope` - (Optional) List of types of AWS resources to evaluate -* `tag_key_scope` - (Optional, Required if `tag_value_scope` is configured) Tag key of AWS resources to evaluate -* `tag_value_scope` - (Optional) Tag value of AWS resources to evaluate +* `resource_id_scope` - (Optional) Identifier of the AWS resource to evaluate. +* `resource_types_scope` - (Optional) List of types of AWS resources to evaluate. +* `tag_key_scope` - (Optional, Required if `tag_value_scope` is configured) Tag key of AWS resources to evaluate. +* `tag_value_scope` - (Optional) Tag value of AWS resources to evaluate. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `arn` - Amazon Resource Name (ARN) of the rule +* `arn` - Amazon Resource Name (ARN) of the rule. ## Timeouts diff --git a/website/docs/r/connect_instance.html.markdown b/website/docs/r/connect_instance.html.markdown index 1bdb5797e35f..c499f723b125 100644 --- a/website/docs/r/connect_instance.html.markdown +++ b/website/docs/r/connect_instance.html.markdown @@ -89,6 +89,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_connect_instance.example + identity = { + id = "f1288a1f-6193-445a-b47e-af739b2" + } +} + +resource "aws_connect_instance" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the connect instance. + +#### Optional + +- `account_id` (String) AWS Account where this resource is managed. +- `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Connect instances using the `id`. For example: ```terraform diff --git a/website/docs/r/connect_phone_number.html.markdown b/website/docs/r/connect_phone_number.html.markdown index 07d0ef048c8b..4d5fd5c31e88 100644 --- a/website/docs/r/connect_phone_number.html.markdown +++ b/website/docs/r/connect_phone_number.html.markdown @@ -88,6 +88,31 @@ The `status` configuration block supports the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_connect_phone_number.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} +resource "aws_connect_phone_number" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the connect phone number. + +#### Optional + +- `account_id` (String) AWS Account where this resource is managed. +- `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Amazon Connect Phone Numbers using its `id`. For example: ```terraform diff --git a/website/docs/r/connect_phone_number_contact_flow_association.html.markdown b/website/docs/r/connect_phone_number_contact_flow_association.html.markdown new file mode 100644 index 000000000000..ba63b0640e94 --- /dev/null +++ b/website/docs/r/connect_phone_number_contact_flow_association.html.markdown @@ -0,0 +1,51 @@ +--- +subcategory: "Connect" +layout: "aws" +page_title: "AWS: aws_connect_phone_number_contact_flow_association" +description: |- + Associates a flow with a phone number claimed to an Amazon Connect instance. +--- + +# Resource: aws_connect_phone_number_contact_flow_association + +Associates a flow with a phone number claimed to an Amazon Connect instance. + +## Example Usage + +```terraform +resource "aws_connect_phone_number_contact_flow_association" "example" { + phone_number_id = aws_connect_phone_number.example.id + instance_id = aws_connect_instance.example.id + contact_flow_id = aws_connect_contact_flow.example.contact_flow_id +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `contact_flow_id` - (Required) Contact flow ID. +* `instance_id` - (Required) Amazon Connect instance ID. +* `phone_number_id` - (Required) Phone number ID. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_connect_phone_number_contact_flow_association` using the `phone_number_id`, `instance_id` and `contact_flow_id` separated by a comma (`,`). For example: + +```terraform +import { + to = aws_connect_phone_number_contact_flow_association.example + id = "36727a4c-4683-4e49-880c-3347c61110a4,fa6c1691-e2eb-4487-bdb9-1aaed6268ebd,c4acdc79-395e-4280-a294-9062f56b07bb" +} +``` + +Using `terraform import`, import `aws_connect_phone_number_contact_flow_association` using the `phone_number_id`, `instance_id` and `contact_flow_id` separated by a comma (`,`). For example: + +```console +% terraform import aws_connect_phone_number_contact_flow_association.example 36727a4c-4683-4e49-880c-3347c61110a4,fa6c1691-e2eb-4487-bdb9-1aaed6268ebd,c4acdc79-395e-4280-a294-9062f56b07bb +``` diff --git a/website/docs/r/controltower_baseline.html.markdown b/website/docs/r/controltower_baseline.html.markdown new file mode 100644 index 000000000000..0e9938857a69 --- /dev/null +++ b/website/docs/r/controltower_baseline.html.markdown @@ -0,0 +1,79 @@ +--- +subcategory: "Control Tower" +layout: "aws" +page_title: "AWS: aws_controltower_baseline" +description: |- + Terraform resource for managing an AWS Control Tower Baseline. +--- + +# Resource: aws_controltower_baseline + +Terraform resource for managing an AWS Control Tower Baseline. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_controltower_baseline" "example" { + baseline_identifier = "arn:aws:controltower:us-east-1::baseline/17BSJV3IGJ2QSGA2" + baseline_version = "4.0" + target_identifier = aws_organizations_organizational_unit.test.arn + parameters { + key = "IdentityCenterEnabledBaselineArn" + value = "arn:aws:controltower:us-east-1:664418989480:enabledbaseline/XALULM96QHI525UOC" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `baseline_identifier` - (Required) The ARN of the baseline to be enabled. +* `baseline_version` - (Required) The version of the baseline to be enabled. +* `target_identifier` - (Required) The ARN of the target on which the baseline will be enabled. Only OUs are supported as targets. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `parameters` - (Optional) A list of key-value objects that specify enablement parameters, where key is a string and value is a document of any type. See [Parameter](#parameters) below for details. +* `tags` - (Optional) Tags to apply to the landing zone. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### parameters + +* `key` - (Required) The key of the parameter. +* `value` - (Required) The value of the parameter. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Baseline. +* `operaton_identifier` - The ID (in UUID format) of the asynchronous operation. +* `tags_all` - A map of tags assigned to the landing zone, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Control Tower Baseline using the `arn`. For example: + +```terraform +import { + to = aws_controltower_baseline.example + id = "arn:aws:controltower:us-east-1:012345678912:enabledbaseline/XALULM96QHI525UOC" +} +``` + +Using `terraform import`, import Control Tower Baseline using the `arn`. For example: + +```console +% terraform import aws_controltower_baseline.example arn:aws:controltower:us-east-1:012345678912:enabledbaseline/XALULM96QHI525UOC +``` diff --git a/website/docs/r/datasync_agent.html.markdown b/website/docs/r/datasync_agent.html.markdown index 5d1d81782d7e..a66ee44cc342 100644 --- a/website/docs/r/datasync_agent.html.markdown +++ b/website/docs/r/datasync_agent.html.markdown @@ -78,6 +78,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_agent.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:agent/agent-12345678901234567" + } +} + +resource "aws_datasync_agent" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync agent. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_agent` using the DataSync Agent Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_azure_blob.html.markdown b/website/docs/r/datasync_location_azure_blob.html.markdown index 099012adaf01..563e15c4ab8c 100644 --- a/website/docs/r/datasync_location_azure_blob.html.markdown +++ b/website/docs/r/datasync_location_azure_blob.html.markdown @@ -53,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_azure_blob.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_azure_blob" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync Azure Blob location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_azure_blob` using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_efs.html.markdown b/website/docs/r/datasync_location_efs.html.markdown index e13733ba24fa..e82902f8f21b 100644 --- a/website/docs/r/datasync_location_efs.html.markdown +++ b/website/docs/r/datasync_location_efs.html.markdown @@ -57,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_efs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_efs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync EFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_efs` using the DataSync Task Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_hdfs.html.markdown b/website/docs/r/datasync_location_hdfs.html.markdown index 04f82c4396c9..db8e47579199 100644 --- a/website/docs/r/datasync_location_hdfs.html.markdown +++ b/website/docs/r/datasync_location_hdfs.html.markdown @@ -85,6 +85,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_hdfs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_hdfs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync HDFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_hdfs` using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_nfs.html.markdown b/website/docs/r/datasync_location_nfs.html.markdown index 0c507101d82d..2015fe3cf6a7 100644 --- a/website/docs/r/datasync_location_nfs.html.markdown +++ b/website/docs/r/datasync_location_nfs.html.markdown @@ -58,6 +58,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_nfs.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_nfs" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync NFS location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_nfs` using the DataSync Task Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_object_storage.html.markdown b/website/docs/r/datasync_location_object_storage.html.markdown index 579e4ec3bb73..72051d0f8391 100644 --- a/website/docs/r/datasync_location_object_storage.html.markdown +++ b/website/docs/r/datasync_location_object_storage.html.markdown @@ -27,7 +27,7 @@ resource "aws_datasync_location_object_storage" "example" { This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `agent_arns` - (Required) A list of DataSync Agent ARNs with which this location will be associated. +* `agent_arns` - (Optional) A list of DataSync Agent ARNs with which this location will be associated. For agentless cross-cloud transfers, this parameter does not need to be specified. * `access_key` - (Optional) The access key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `access_key` and `secret_key` to provide the user name and password, respectively. * `bucket_name` - (Required) The bucket on the self-managed object storage server that is used to read data from. * `secret_key` - (Optional) The secret key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `access_key` and `secret_key` to provide the user name and password, respectively. @@ -48,6 +48,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_object_storage.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_object_storage" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync object storage location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_object_storage` using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_s3.html.markdown b/website/docs/r/datasync_location_s3.html.markdown index 546f6b3b2d3e..9fa7e652737f 100644 --- a/website/docs/r/datasync_location_s3.html.markdown +++ b/website/docs/r/datasync_location_s3.html.markdown @@ -68,6 +68,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_s3.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_s3" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync S3 location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_s3` using the DataSync Task Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_location_smb.html.markdown b/website/docs/r/datasync_location_smb.html.markdown index 69578194ebf0..3d169079b423 100644 --- a/website/docs/r/datasync_location_smb.html.markdown +++ b/website/docs/r/datasync_location_smb.html.markdown @@ -55,6 +55,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_location_smb.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:location/loc-12345678901234567" + } +} + +resource "aws_datasync_location_smb" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync SMB location. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_location_smb` using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datasync_task.html.markdown b/website/docs/r/datasync_task.html.markdown index b4978881b732..327a9cf3b791 100644 --- a/website/docs/r/datasync_task.html.markdown +++ b/website/docs/r/datasync_task.html.markdown @@ -176,6 +176,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_datasync_task.example + identity = { + "arn" = "arn:aws:datasync:us-west-2:123456789012:task/task-12345678901234567" + } +} + +resource "aws_datasync_task" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DataSync task. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_datasync_task` using the DataSync Task Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/datazone_domain.html.markdown b/website/docs/r/datazone_domain.html.markdown index 1f22ee998d47..a0ddac37bc99 100644 --- a/website/docs/r/datazone_domain.html.markdown +++ b/website/docs/r/datazone_domain.html.markdown @@ -36,26 +36,26 @@ resource "aws_iam_role" "domain_execution_role" { }, ] }) +} - inline_policy { - name = "domain_execution_policy" - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - # Consider scoping down - Action = [ - "datazone:*", - "ram:*", - "sso:*", - "kms:*", - ] - Effect = "Allow" - Resource = "*" - }, - ] - }) - } +resource "aws_iam_role_policy" "domain_execution_role" { + role = aws_iam_role.domain_execution_role.name + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + # Consider scoping down + Action = [ + "datazone:*", + "ram:*", + "sso:*", + "kms:*", + ] + Effect = "Allow" + Resource = "*" + }, + ] + }) } resource "aws_datazone_domain" "example" { @@ -64,6 +64,90 @@ resource "aws_datazone_domain" "example" { } ``` +### V2 Domain + +```terraform +data "aws_caller_identity" "current" {} + +# IAM role for Domain Execution +data "aws_iam_policy_document" "assume_role_domain_execution" { + statement { + actions = [ + "sts:AssumeRole", + "sts:TagSession", + "sts:SetContext" + ] + principals { + type = "Service" + identifiers = ["datazone.amazonaws.com"] + } + condition { + test = "StringEquals" + values = [data.aws_caller_identity.current.account_id] + variable = "aws:SourceAccount" + } + condition { + test = "ForAllValues:StringLike" + values = ["datazone*"] + variable = "aws:TagKeys" + } + } +} + +resource "aws_iam_role" "domain_execution" { + assume_role_policy = data.aws_iam_policy_document.assume_role_domain_execution.json + name = "example-domain-execution-role" +} + +data "aws_iam_policy" "domain_execution_role" { + name = "SageMakerStudioDomainExecutionRolePolicy" +} + +resource "aws_iam_role_policy_attachment" "domain_execution" { + policy_arn = data.aws_iam_policy.domain_execution_role.arn + role = aws_iam_role.domain_execution.name +} + +# IAM role for Domain Service +data "aws_iam_policy_document" "assume_role_domain_service" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["datazone.amazonaws.com"] + } + condition { + test = "StringEquals" + values = [data.aws_caller_identity.current.account_id] + variable = "aws:SourceAccount" + } + } +} + +resource "aws_iam_role" "domain_service" { + assume_role_policy = data.aws_iam_policy_document.assume_role_domain_service.json + name = "example-domain-service-role" +} + +data "aws_iam_policy" "domain_service_role" { + name = "SageMakerStudioDomainServiceRolePolicy" +} + +resource "aws_iam_role_policy_attachment" "domain_service" { + policy_arn = data.aws_iam_policy.domain_service_role.arn + role = aws_iam_role.domain_service.name +} + +# DataZone Domain V2 +resource "aws_datazone_domain" "example" { + name = "example-domain" + domain_execution_role = aws_iam_role.domain_execution.arn + domain_version = "V2" + service_role = aws_iam_role.domain_service.arn +} +``` + ## Argument Reference The following arguments are required: @@ -75,7 +159,9 @@ The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Optional) Description of the Domain. +* `domain_version` - (Optional) Version of the Domain. Valid values are `V1` and `V2`. Defaults to `V1`. * `kms_key_identifier` - (Optional) ARN of the KMS key used to encrypt the Amazon DataZone domain, metadata and reporting data. +* `service_role` - (Optional) ARN of the service role used by DataZone. Required when `domain_version` is set to `V2`. * `single_sign_on` - (Optional) Single sign on options, used to [enable AWS IAM Identity Center](https://docs.aws.amazon.com/datazone/latest/userguide/enable-IAM-identity-center-for-datazone.html) for DataZone. * `skip_deletion_check` - (Optional) Whether to skip the deletion check for the Domain. diff --git a/website/docs/r/datazone_environment.html.markdown b/website/docs/r/datazone_environment.html.markdown index 28142e89e308..3edb417362be 100644 --- a/website/docs/r/datazone_environment.html.markdown +++ b/website/docs/r/datazone_environment.html.markdown @@ -17,8 +17,6 @@ Terraform resource for managing an AWS DataZone Environment. ```terraform resource "aws_datazone_environment" "example" { name = "example" - account_identifier = data.aws_caller_identity.test.account_id - account_region = data.aws_region.test.name blueprint_identifier = aws_datazone_environment_blueprint_configuration.test.environment_blueprint_id profile_identifier = aws_datazone_environment_profile.test.id project_identifier = aws_datazone_project.test.id @@ -58,7 +56,9 @@ The following arguments are optional: * `blueprint_identifier` - (Optional) The blueprint with which the environment is created. * `description` - (Optional) The description of the environment. * `glossary_terms` - (Optional) The business glossary terms that can be used in this environment. -* `user_parameters` - (Optional) The user parameters that are used in the environment. See [User Parameters](#user-parameters) for more information. +* `user_parameters` - (Optional) The user parameters that are used in the environment. + See [User Parameters](#user-parameters) for more information. + Changing these values recreates the resource. ### User Parameters diff --git a/website/docs/r/datazone_glossary.html.markdown b/website/docs/r/datazone_glossary.html.markdown index a517396c8b30..0bd7f7cd1e78 100644 --- a/website/docs/r/datazone_glossary.html.markdown +++ b/website/docs/r/datazone_glossary.html.markdown @@ -115,7 +115,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Glossary using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Glossary using a comma-delimited string combining the domain id, glossary id, and the id of the project it's under. For example: ```terraform import { diff --git a/website/docs/r/db_proxy.html.markdown b/website/docs/r/db_proxy.html.markdown index 57421f723152..b9ea137b9b2f 100644 --- a/website/docs/r/db_proxy.html.markdown +++ b/website/docs/r/db_proxy.html.markdown @@ -98,8 +98,9 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `name` - (Required) The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. -* `auth` - (Required) Configuration block(s) with authorization mechanisms to connect to the associated instances or clusters. Described below. +* `auth` - (Optional) Configuration block(s) with authorization mechanisms to connect to the associated instances or clusters. Required when `default_auth_scheme` is `NONE` or unspecified. Described below. * `debug_logging` - (Optional) Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. +* `default_auth_scheme` - (Optional) Default authentication scheme that the proxy uses for client connections to the proxy and connections from the proxy to the underlying database. Valid values are `NONE` and `IAM_AUTH`. Defaults to `NONE`. * `engine_family` - (Required, Forces new resource) The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL`. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL`. For RDS for Microsoft SQL Server, specify `SQLSERVER`. Valid values are `MYSQL`, `POSTGRESQL`, and `SQLSERVER`. * `idle_client_timeout` - (Optional) The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database. * `require_tls` - (Optional) A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy. diff --git a/website/docs/r/devicefarm_device_pool.html.markdown b/website/docs/r/devicefarm_device_pool.html.markdown index 291ccc86f4d4..202076cd3e9e 100644 --- a/website/docs/r/devicefarm_device_pool.html.markdown +++ b/website/docs/r/devicefarm_device_pool.html.markdown @@ -51,6 +51,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_device_pool.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:devicepool:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_devicefarm_device_pool" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm device pool. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Device Pools using their ARN. For example: ```terraform diff --git a/website/docs/r/devicefarm_instance_profile.html.markdown b/website/docs/r/devicefarm_instance_profile.html.markdown index d0555bddd65a..7669bb64f178 100644 --- a/website/docs/r/devicefarm_instance_profile.html.markdown +++ b/website/docs/r/devicefarm_instance_profile.html.markdown @@ -41,6 +41,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_instance_profile.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:instanceprofile:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_instance_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm instance profile. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Instance Profiles using their ARN. For example: ```terraform diff --git a/website/docs/r/devicefarm_network_profile.html.markdown b/website/docs/r/devicefarm_network_profile.html.markdown index 2808980e2dd8..373feefad700 100644 --- a/website/docs/r/devicefarm_network_profile.html.markdown +++ b/website/docs/r/devicefarm_network_profile.html.markdown @@ -53,6 +53,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_network_profile.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:networkprofile:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_network_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm network profile. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Network Profiles using their ARN. For example: ```terraform diff --git a/website/docs/r/devicefarm_project.html.markdown b/website/docs/r/devicefarm_project.html.markdown index cedf35871eda..d08f30db9637 100644 --- a/website/docs/r/devicefarm_project.html.markdown +++ b/website/docs/r/devicefarm_project.html.markdown @@ -43,6 +43,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_project.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:project:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Projects using their ARN. For example: ```terraform diff --git a/website/docs/r/devicefarm_test_grid_project.html.markdown b/website/docs/r/devicefarm_test_grid_project.html.markdown index 9123148bd26c..40c16f1230f6 100644 --- a/website/docs/r/devicefarm_test_grid_project.html.markdown +++ b/website/docs/r/devicefarm_test_grid_project.html.markdown @@ -51,6 +51,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_test_grid_project.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:testgrid-project:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e" + } +} + +resource "aws_devicefarm_test_grid_project" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm test grid project. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Test Grid Projects using their ARN. For example: ```terraform diff --git a/website/docs/r/devicefarm_upload.html.markdown b/website/docs/r/devicefarm_upload.html.markdown index 4bd1baa9b5cd..95c52156a8ab 100644 --- a/website/docs/r/devicefarm_upload.html.markdown +++ b/website/docs/r/devicefarm_upload.html.markdown @@ -47,6 +47,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_devicefarm_upload.example + identity = { + "arn" = "arn:aws:devicefarm:us-west-2:123456789012:upload:4e7e7e7e-7e7e-7e7e-7e7e-7e7e7e7e7e7e/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_devicefarm_upload" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Device Farm upload. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DeviceFarm Uploads using their ARN. For example: ```terraform diff --git a/website/docs/r/dlm_lifecycle_policy.html.markdown b/website/docs/r/dlm_lifecycle_policy.html.markdown index b9e439506634..378c445731d0 100644 --- a/website/docs/r/dlm_lifecycle_policy.html.markdown +++ b/website/docs/r/dlm_lifecycle_policy.html.markdown @@ -97,6 +97,30 @@ resource "aws_dlm_lifecycle_policy" "example" { } ``` +### Example Default Policy + +``` +resource "aws_dlm_lifecycle_policy" "example" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.example.arn + default_policy = "VOLUME" + + policy_details { + create_interval = 5 + resource_type = "VOLUME" + policy_language = "SIMPLIFIED" + + exclusions { + exclude_boot_volumes = false + exclude_tags = { + test = "exclude" + } + exclude_volume_types = ["gp2"] + } + } +} +``` + ### Example Cross-Region Snapshot Copy Usage ```terraform @@ -216,6 +240,49 @@ resource "aws_iam_role_policy_attachment" "example" { } ``` +### Example Post/Pre Scripts + +``` +data "aws_iam_policy" "test" { + name = "AWSDataLifecycleManagerSSMFullAccess" +} + +resource "aws_iam_role_policy_attachment" "example" { + role = aws_iam_role.test.id + policy_arn = data.aws_iam_policy.example.arn +} + +resource "aws_dlm_lifecycle_policy" "example" { + description = "tf-acc-basic" + execution_role_arn = aws_iam_role.example.arn + + policy_details { + resource_types = ["INSTANCE"] + + schedule { + name = "Windows VSS" + + create_rule { + interval = 12 + scripts { + execute_operation_on_script_failure = false + execution_handler = "AWS_VSS_BACKUP" + maximum_retry_count = 2 + } + } + + retain_rule { + count = 10 + } + } + + target_tags = { + tag1 = "Windows" + } + } +} +``` + ## Argument Reference This resource supports the following arguments: @@ -223,6 +290,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `description` - (Required) A description for the DLM lifecycle policy. * `execution_role_arn` - (Required) The ARN of an IAM role that is able to be assumed by the DLM service. +* `default_policy` - (Required) Specify the type of default policy to create. valid values are `VOLUME` or `INSTANCE`. * `policy_details` - (Required) See the [`policy_details` configuration](#policy-details-arguments) block. Max of 1. * `state` - (Optional) Whether the lifecycle policy should be enabled or disabled. `ENABLED` or `DISABLED` are valid values. Defaults to `ENABLED`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -230,13 +298,20 @@ This resource supports the following arguments: #### Policy Details arguments * `action` - (Optional) The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the [`action` configuration](#action-arguments) block. +* `copy_tags` - (Optional, Default policies only) Indicates whether the policy should copy tags from the source resource to the snapshot or AMI. Default value is `false`. +* `create_interval` - (Optional, Default policies only) How often the policy should run and create snapshots or AMIs. valid values range from `1` to `7`. Default value is `1`. +* `exclusions` - (Optional, Default policies only) Specifies exclusion parameters for volumes or instances for which you do not want to create snapshots or AMIs. See the [`exclusions` configuration](#exclusions-arguments) block. +* `extend_deletion` - (Optional, Default policies only) snapshot or AMI retention behavior for the policy if the source volume or instance is deleted, or if the policy enters the error, disabled, or deleted state. Default value is `false`. +* `retain_interval` - (Optional, Default policies only) Specifies how long the policy should retain snapshots or AMIs before deleting them. valid values range from `2` to `14`. Default value is `7`. * `event_source` - (Optional) The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter. See the [`event_source` configuration](#event-source-arguments) block. +* `resource_type` - (Optional, Default policies only) Type of default policy to create. Valid values are `VOLUME` and `INSTANCE`. * `resource_types` - (Optional) A list of resource types that should be targeted by the lifecycle policy. Valid values are `VOLUME` and `INSTANCE`. -* `resource_locations` - (Optional) The location of the resources to backup. If the source resources are located in an AWS Region, specify `CLOUD`. If the source resources are located on an Outpost in your account, specify `OUTPOST`. If you specify `OUTPOST`, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account. Valid values are `CLOUD` and `OUTPOST`. +* `resource_locations` - (Optional) The location of the resources to backup. If the source resources are located in an AWS Region, specify `CLOUD`. If the source resources are located on an Outpost in your account, specify `OUTPOST`. If the source resources are located in a Local Zone, specify `LOCAL_ZONE`. Valid values are `CLOUD`, `LOCAL_ZONE`, and `OUTPOST`. +* `policy_language` - (Optional) Type of policy to create. `SIMPLIFIED` To create a default policy. `STANDARD` To create a custom policy. * `policy_type` - (Optional) The valid target resource types and actions a policy can manage. Specify `EBS_SNAPSHOT_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify `IMAGE_MANAGEMENT` to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify `EVENT_BASED_POLICY` to create an event-based policy that performs specific actions when a defined event occurs in your AWS account. Default value is `EBS_SNAPSHOT_MANAGEMENT`. * `parameters` - (Optional) A set of optional parameters for snapshot and AMI lifecycle policies. See the [`parameters` configuration](#parameters-arguments) block. * `schedule` - (Optional) See the [`schedule` configuration](#schedule-arguments) block. -* `target_tags` (Optional) A map of tag keys and their values. Any resources that match the `resource_types` and are tagged with _any_ of these tags will be targeted. +* `target_tags` (Optional) A map of tag keys and their values. Any resources that match the `resource_types` and are tagged with _any_ of these tags will be targeted. Required when `policy_type` is `EBS_SNAPSHOT_MANAGEMENT` or `IMAGE_MANAGEMENT`. Must not be specified when `policy_type` is `EVENT_BASED_POLICY`. ~> Note: You cannot have overlapping lifecycle policies that share the same `target_tags`. Terraform is unable to detect this at plan time but it will fail during apply. @@ -267,6 +342,12 @@ This resource supports the following arguments: * `event_type` - (Required) The type of event. Currently, only `shareSnapshot` events are supported. * `snapshot_owner` - (Required) The IDs of the AWS accounts that can trigger policy by sharing snapshots with your account. The policy only runs if one of the specified AWS accounts shares a snapshot with your account. +#### Exclusions arguments + +* `exclude_boot_volumes` - (Optional) Indicates whether to exclude volumes that are attached to instances as the boot volume. To exclude boot volumes, specify `true`. +* `exclude_tags` - (Optional) Map specifies whether to exclude volumes that have specific tags. +* `exclude_volume_types` - (Optional) List specifies the volume types to exclude. + #### Parameters arguments * `exclude_boot_volume` - (Optional) Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is `false`. @@ -274,6 +355,7 @@ This resource supports the following arguments: #### Schedule arguments +* `archive_rule` - (Optional) Specifies a snapshot archiving rule for a schedule. See [`archive_rule`](#archive-rule-arguments) block. * `copy_tags` - (Optional) Copy all user-defined tags on a source volume to snapshots of the volume created by this policy. * `create_rule` - (Required) See the [`create_rule`](#create-rule-arguments) block. Max of 1 per schedule. * `cross_region_copy_rule` (Optional) - See the [`cross_region_copy_rule`](#cross-region-copy-rule-arguments) block. Max of 3 per schedule. @@ -285,12 +367,21 @@ This resource supports the following arguments: * `tags_to_add` - (Optional) A map of tag keys and their values. DLM lifecycle policies will already tag the snapshot with the tags on the volume. This configuration adds extra tags on top of these. * `variable_tags` - (Optional) A map of tag keys and variable values, where the values are determined when the policy is executed. Only `$(instance-id)` or `$(timestamp)` are valid values. Can only be used when `resource_types` is `INSTANCE`. +#### Archive Rule Arguments + +* `archive_retain_rule` - (Required) Information about the retention period for the snapshot archiving rule. See the [`archive_retain_rule`](#archive-retain-rule-arguments) block. + +#### Archive Retain Rule Arguments + +* `retention_archive_tier` - (Required) Information about retention period in the Amazon EBS Snapshots Archive. See the [`retention_archive_tier`](#retention-archive-tier-arguments) block. + #### Create Rule arguments * `cron_expression` - (Optional) The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. Conflicts with `interval`, `interval_unit`, and `times`. * `interval` - (Optional) How often this lifecycle policy should be evaluated. `1`, `2`,`3`,`4`,`6`,`8`,`12` or `24` are valid values. Conflicts with `cron_expression`. If set, `interval_unit` and `times` must also be set. * `interval_unit` - (Optional) The unit for how often the lifecycle policy should be evaluated. `HOURS` is currently the only allowed value and also the default value. Conflicts with `cron_expression`. Must be set if `interval` is set. * `location` - (Optional) Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD`. To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL`. If you omit this parameter, `CLOUD` is used by default. If the policy targets resources in an AWS Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost. Valid values are `CLOUD` and `OUTPOST_LOCAL`. +* `scripts` - (Optional) Specifies pre and/or post scripts for a snapshot lifecycle policy that targets instances. Valid only when `resource_type` is INSTANCE. See the [`scripts` configuration](#scripts-rule-arguments) block. * `times` - (Optional) A list of times in 24 hour clock format that sets when the lifecycle policy should be evaluated. Max of 1. Conflicts with `cron_expression`. Must be set if `interval` is set. #### Deprecate Rule arguments @@ -325,7 +416,8 @@ This resource supports the following arguments: * `deprecate_rule` - (Optional) The AMI deprecation rule for cross-Region AMI copies created by the rule. See the [`deprecate_rule`](#cross-region-copy-rule-deprecate-rule-arguments) block. * `encrypted` - (Required) To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Copies of encrypted snapshots are encrypted, even if this parameter is false or if encryption by default is not enabled. * `retain_rule` - (Required) The retention rule that indicates how long snapshot copies are to be retained in the destination Region. See the [`retain_rule`](#cross-region-copy-rule-retain-rule-arguments) block. Max of 1 per schedule. -* `target` - (Required) The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. +* `target` - Use only for DLM policies of `policy_type=EBS_SNAPSHOT_MANAGEMENT`. The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. +* `target_region` - Use only for DLM policies of `policy_type=IMAGE_MANAGEMENT`. The target Region or the Amazon Resource Name (ARN) of the target Outpost for the snapshot copies. #### Cross Region Copy Rule Deprecate Rule arguments @@ -337,6 +429,26 @@ This resource supports the following arguments: * `interval` - (Required) The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days. * `interval_unit` - (Required) The unit of time for time-based retention. Valid values: `DAYS`, `WEEKS`, `MONTHS`, or `YEARS`. +#### Scripts Rule arguments + +* `execute_operation_on_script_failure` - (Optional) Indicates whether Amazon Data Lifecycle Manager should default to crash-consistent snapshots if the pre script fails. The default is `true`. + +* `execution_handler` - (Required) The SSM document that includes the pre and/or post scripts to run. In case automating VSS backups, specify `AWS_VSS_BACKUP`. In case automating application-consistent snapshots for SAP HANA workloads, specify `AWSSystemsManagerSAP-CreateDLMSnapshotForSAPHANA`. If you are using a custom SSM document that you own, specify either the name or ARN of the SSM document. + +* `execution_handler_service` - (Optional) Indicates the service used to execute the pre and/or post scripts. If using custom SSM documents or automating application-consistent snapshots of SAP HANA workloads, specify `AWS_SYSTEMS_MANAGER`. In case automating VSS Backups, omit this parameter. The default is `AWS_SYSTEMS_MANAGER`. + +* `execution_timeout` - (Optional) Specifies a timeout period, in seconds, after which Amazon Data Lifecycle Manager fails the script run attempt if it has not completed. In case automating VSS Backups, omit this parameter. The default is `10`. + +* `maximum_retry_count` - (Optional) Specifies the number of times Amazon Data Lifecycle Manager should retry scripts that fail. Must be an integer between `0` and `3`. The default is `0`. + +* `stages` - (Optional) List to indicate which scripts Amazon Data Lifecycle Manager should run on target instances. Pre scripts run before Amazon Data Lifecycle Manager initiates snapshot creation. Post scripts run after Amazon Data Lifecycle Manager initiates snapshot creation. Valid values: `PRE` and `POST`. The default is `PRE` and `POST` + +#### Retention Archive Tier Arguments + +* `count` - (Optional)The maximum number of snapshots to retain in the archive storage tier for each volume. Must be an integer between `1` and `1000`. Conflicts with `interval` and `interval_unit`. +* `interval` - (Optional) Specifies the period of time to retain snapshots in the archive tier. After this period expires, the snapshot is permanently deleted. Conflicts with `count`. If set, `interval_unit` must also be set. +* `interval_unit` - (Optional) The unit of time for time-based retention. Valid values are `DAYS`, `WEEKS`, `MONTHS`, `YEARS`. Conflicts with `count`. Must be set if `interval` is set. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/dms_endpoint.html.markdown b/website/docs/r/dms_endpoint.html.markdown index 127af1968b03..90dc359051bc 100644 --- a/website/docs/r/dms_endpoint.html.markdown +++ b/website/docs/r/dms_endpoint.html.markdown @@ -44,7 +44,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `endpoint_id` - (Required) Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens. * `endpoint_type` - (Required) Type of endpoint. Valid values are `source`, `target`. -* `engine_name` - (Required) Type of engine for the endpoint. Valid values are `aurora`, `aurora-postgresql`, `aurora-serverless`, `aurora-postgresql-serverless`,`azuredb`, `azure-sql-managed-instance`, `babelfish`, `db2`, `db2-zos`, `docdb`, `dynamodb`, `elasticsearch`, `kafka`, `kinesis`, `mariadb`, `mongodb`, `mysql`, `opensearch`, `oracle`, `postgres`, `redshift`,`redshift-serverless`, `s3`, `sqlserver`, `neptune` ,`sybase`. Please note that some of engine names are available only for `target` endpoint type (e.g. `redshift`). +* `engine_name` - (Required) Type of engine for the endpoint. Valid values are `aurora`, `aurora-postgresql`, `aurora-serverless`, `aurora-postgresql-serverless`,`azuredb`, `azure-sql-managed-instance`, `babelfish`, `db2`, `db2-zos`, `docdb`, `dynamodb`, `elasticsearch`, `kafka`, `kinesis`, `mariadb`, `mongodb`, `mysql`, `opensearch`, `oracle`, `postgres`, `redshift`,`redshift-serverless`, `sqlserver`, `neptune` ,`sybase`. Please note that some of engine names are available only for `target` endpoint type (e.g. `redshift`). * `kms_key_arn` - (Required when `engine_name` is `mongodb`, optional otherwise) ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. When `engine_name` is `redshift`, `kms_key_arn` is the KMS Key for the Redshift target and the parameter `redshift_settings.server_side_encryption_kms_key_id` encrypts the S3 intermediate storage. The following arguments are optional: @@ -57,6 +57,8 @@ The following arguments are optional: * `kafka_settings` - (Optional) Configuration block for Kafka settings. See below. * `kinesis_settings` - (Optional) Configuration block for Kinesis settings. See below. * `mongodb_settings` - (Optional) Configuration block for MongoDB settings. See below. +* `mysql_settings` - (Optional) Configuration block for MySQL settings. See below. +* `oracle_settings` - (Optional) Configuration block for Oracle settings. See below. * `password` - (Optional) Password to be used to login to the endpoint database. * `postgres_settings` - (Optional) Configuration block for Postgres settings. See below. * `pause_replication_tasks` - (Optional) Whether to pause associated running replication tasks, regardless if they are managed by Terraform, prior to modifying the endpoint. Only tasks paused by the resource will be restarted after the modification completes. Default is `false`. @@ -133,11 +135,33 @@ The following arguments are optional: * `extract_doc_id` - (Optional) Document ID. Use this setting when `nesting_level` is set to `none`. Default is `false`. * `nesting_level` - (Optional) Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode). +### mysql_settings + +-> Additional information can be found in the [Using MongoDB as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.html). + +* `after_connect_script` - (Optional) Script to run immediately after AWS DMS connects to the endpoint. +* `authentication_method` - (Optional) Authentication method to use. Valid values: `password`, `iam`. +* `clean_source_metadata_on_mismatch` - (Optional) Whether to clean and recreate table metadata information on the replication instance when a mismatch occurs. +* `events_poll_interval` - (Optional) Time interval to check the binary log for new changes/events when the database is idle. Default is `5`. +* `execute_timeout` - (Optional) Client statement timeout (in seconds) for a MySQL source endpoint. +* `max_file_size` - (Optional) Maximum size (in KB) of any .csv file used to transfer data to a MySQL-compatible database. +* `parallel_load_threads` - (Optional) Number of threads to use to load the data into the MySQL-compatible target database. +* `server_timezone` - (Optional) Time zone for the source MySQL database. +* `service_access_role_arn` - (Optional) ARN of the IAM role to authenticate when connecting to the endpoint. +* `target_db_type` - (Optional) Where to migrate source tables on the target. Valid values are `specific-database` and `multiple-databases`. + +### oracle_settings + +-> Additional information can be found in the [Using Oracle as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html). + +* `authentication_method` - (Optional) Authentication mechanism to access the Oracle source endpoint. Default is `password`. Valid values are `password` and `kerberos`. + ### postgres_settings -> Additional information can be found in the [Using PostgreSQL as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html). * `after_connect_script` - (Optional) For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. +* `authentication_method` - (Optional) Specifies the authentication method. Valid values: `password`, `iam`. * `babelfish_database_name` - (Optional) The Babelfish for Aurora PostgreSQL database name for the endpoint. * `capture_ddls` - (Optional) To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. * `database_mode` - (Optional) Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. @@ -152,6 +176,7 @@ The following arguments are optional: * `map_long_varchar_as` - Optional When true, DMS migrates LONG values as VARCHAR. * `max_file_size` - (Optional) Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is `32,768 KB`. * `plugin_name` - (Optional) Specifies the plugin to use to create a replication slot. Valid values: `pglogical`, `test_decoding`. +* `service_access_role_arn` - (Optional) Specifies the IAM role to use to authenticate the connection. * `slot_name` - (Optional) Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. ### redis_settings diff --git a/website/docs/r/dms_replication_config.html.markdown b/website/docs/r/dms_replication_config.html.markdown index 47aa32948535..948d1be63166 100644 --- a/website/docs/r/dms_replication_config.html.markdown +++ b/website/docs/r/dms_replication_config.html.markdown @@ -90,6 +90,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dms_replication_config.example + identity = { + "arn" = "arn:aws:dms:us-east-1:123456789012:replication-config:example-config" + } +} + +resource "aws_dms_replication_config" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DMS replication configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import replication configs using the `arn`. For example: ```terraform diff --git a/website/docs/r/dms_replication_instance.html.markdown b/website/docs/r/dms_replication_instance.html.markdown index 45aeb25bf929..0cabad042b26 100644 --- a/website/docs/r/dms_replication_instance.html.markdown +++ b/website/docs/r/dms_replication_instance.html.markdown @@ -104,7 +104,9 @@ This resource supports the following arguments: * `apply_immediately` - (Optional, Default: false) Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource. * `auto_minor_version_upgrade` - (Optional, Default: false) Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. * `availability_zone` - (Optional) The EC2 Availability Zone that the replication instance will be created in. +* `dns_name_servers` - (Optional) A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. * `engine_version` - (Optional) The engine version number of the replication instance. +* `kerberos_authentication_settings` - (Optional) Configuration block for settings required for Kerberos authentication. See below. * `kms_key_arn` - (Optional) The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. * `multi_az` - (Optional) Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`. * `network_type` - (Optional) The type of IP address protocol used by a replication instance. Valid values: `IPV4`, `DUAL`. @@ -116,6 +118,14 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_security_group_ids` - (Optional) A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance. +## kerberos_authentication_settings + +-> Additional information can be found in the [Using Kerberos Authentication with AWS Database Migration Service documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.Kerberos.html). + +* `key_cache_secret_iam_arn` - (Required) ARN of the IAM role that grants AWS DMS access to the secret containing key cache file for the Kerberos authentication. +* `key_cache_secret_id` - (Required) Secret ID that stores the key cache file required for Kerberos authentication. +* `krb5_file_contents` - (Required) Contents of krb5 configuration file required for Kerberos authentication. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/docdb_cluster.html.markdown b/website/docs/r/docdb_cluster.html.markdown index 63ec86bedd4b..6661eb06a7a1 100644 --- a/website/docs/r/docdb_cluster.html.markdown +++ b/website/docs/r/docdb_cluster.html.markdown @@ -46,8 +46,9 @@ This resource supports the following arguments: * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. -* `availability_zones` - (Optional) A list of EC2 Availability Zones that - instances in the DB cluster can be created in. +* `availability_zones` - (Optional) A list of EC2 Availability Zones that instances in the DB cluster can be created in. + DocumentDB automatically assigns 3 AZs if less than 3 AZs are configured, which will show as a difference requiring resource recreation next Terraform apply. + We recommend specifying 3 AZs or using [the `lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) if necessary. * `backup_retention_period` - (Optional) The days to retain backups for. Default `1` * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. @@ -75,6 +76,7 @@ This resource supports the following arguments: Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 * `restore_to_point_in_time` - (Optional, Forces new resource) A configuration block for restoring a DB instance to an arbitrary point in time. Requires the `identifier` argument to be set with the name of the new DB instance to be created. See [Restore To Point In Time](#restore-to-point-in-time) below for details. +* `serverless_v2_scaling_configuration` - (Optional) Scaling configuration of an Amazon DocumentDB Serverless cluster. See [Serverless V2 Scaling Configuration](#serverless-v2-scaling-configuration) below for details. * `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. * `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false`. @@ -95,6 +97,14 @@ The `restore_to_point_in_time` block supports the following arguments: * `source_cluster_identifier` - (Required) The identifier of the source DB cluster from which to restore. Must match the identifier of an existing DB cluster. * `use_latest_restorable_time` - (Optional) A boolean value that indicates whether the DB cluster is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restore_to_time`. +### Serverless V2 Scaling Configuration + +The `serverless_v2_scaling_configuration` block supports the following arguments. +Adding this block (i.e. switching to serverless) or removing it (i.e. switching from serverless) will trigger cluster replacement. + +* `max_capacity` - (Required) Maximum number of Amazon DocumentDB capacity units (DCUs) for an instance in an Amazon DocumentDB Serverless cluster. Valid values are multiples of 0.5 between 1 and 256. +* `min_capacity` - (Required) Minimum number of Amazon DocumentDB capacity units (DCUs) for an instance in an Amazon DocumentDB Serverless cluster. Valid values are multiples of 0.5 between 0.5 and 256. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/docdbelastic_cluster.html.markdown b/website/docs/r/docdbelastic_cluster.html.markdown index 91c02b1afac4..4660b48c7724 100644 --- a/website/docs/r/docdbelastic_cluster.html.markdown +++ b/website/docs/r/docdbelastic_cluster.html.markdown @@ -67,7 +67,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearchServerless Access Policy using the `name` and `type` arguments separated by a slash (`/`). For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_docdbelastic_cluster.example + identity = { + "arn" = "arn:aws:docdb-elastic:us-east-1:000011112222:cluster/12345678-7abc-def0-1234-56789abcdef" + } +} + +resource "aws_docdbelastic_cluster" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DocDB Elastic cluster. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DocDB Elastic Cluster using the `arn`. For example: ```terraform import { diff --git a/website/docs/r/dsql_cluster.html.markdown b/website/docs/r/dsql_cluster.html.markdown index f872264a42ac..e27650487b22 100644 --- a/website/docs/r/dsql_cluster.html.markdown +++ b/website/docs/r/dsql_cluster.html.markdown @@ -28,7 +28,10 @@ resource "aws_dsql_cluster" "example" { This resource supports the following arguments: -* `deletion_protection_enabled` - (Required) Whether deletion protection is enabled in this cluster. +* `deletion_protection_enabled` - (Optional) Whether deletion protection is enabled in this cluster. + Default value is `false`. +* `force_destroy` - (Optional) Destroys cluster even if `deletion_protection_enabled` is set to `true`. + Default value is `false`. * `kms_encryption_key` - (Optional) The ARN of the AWS KMS key that encrypts data in the DSQL Cluster, or `"AWS_OWNED_KMS_KEY"`. * `multi_region_properties` - (Optional) Multi-region properties of the DSQL Cluster. * `witness_region` - (Required) Witness region for the multi-region clusters. Setting this makes this cluster a multi-region cluster. Changing it recreates the resource. diff --git a/website/docs/r/dx_gateway.html.markdown b/website/docs/r/dx_gateway.html.markdown index 01f1c55fce9d..5e2bb65f13e6 100644 --- a/website/docs/r/dx_gateway.html.markdown +++ b/website/docs/r/dx_gateway.html.markdown @@ -43,11 +43,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dx_gateway.example + identity = { + id = "abcd1234-dcba-5678-be23-cdef9876ab45" + } +} + +resource "aws_dx_gateway" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the Direct Connect Gateway. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Direct Connect Gateways using the gateway `id`. For example: ```terraform import { - to = aws_dx_gateway.test + to = aws_dx_gateway.example id = "abcd1234-dcba-5678-be23-cdef9876ab45" } ``` @@ -55,5 +81,5 @@ import { Using `terraform import`, import Direct Connect Gateways using the gateway `id`. For example: ```console -% terraform import aws_dx_gateway.test abcd1234-dcba-5678-be23-cdef9876ab45 +% terraform import aws_dx_gateway.example abcd1234-dcba-5678-be23-cdef9876ab45 ``` diff --git a/website/docs/r/dx_gateway_association.html.markdown b/website/docs/r/dx_gateway_association.html.markdown index 7d1002427268..0317a5e9a6d1 100644 --- a/website/docs/r/dx_gateway_association.html.markdown +++ b/website/docs/r/dx_gateway_association.html.markdown @@ -114,6 +114,7 @@ This resource exports the following attributes in addition to the arguments abov * `associated_gateway_type` - The type of the associated gateway, `transitGateway` or `virtualPrivateGateway`. * `dx_gateway_association_id` - The ID of the Direct Connect gateway association. * `dx_gateway_owner_account_id` - The ID of the AWS account that owns the Direct Connect gateway. +* `transit_gateway_attachment_id` - The ID of the Transit Gateway Attachment when the type is `transitGateway`. ## Timeouts diff --git a/website/docs/r/dx_hosted_connection.html.markdown b/website/docs/r/dx_hosted_connection.html.markdown index b4b8c5ee474e..eaedb8faef50 100644 --- a/website/docs/r/dx_hosted_connection.html.markdown +++ b/website/docs/r/dx_hosted_connection.html.markdown @@ -39,7 +39,7 @@ This resource exports the following attributes in addition to the arguments abov * `aws_device` - The Direct Connect endpoint on which the physical connection terminates. * `connection_region` - The AWS Region where the connection is located. * `has_logical_redundancy` - Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6). -* `id` - The ID of the connection. +* `id` - The ID of the hosted connection. * `jumbo_frame_capable` - Boolean value representing if jumbo frames have been enabled for this connection. * `lag_id` - The ID of the LAG. * `loa_issue_time` - The time of the most recent call to [DescribeLoa](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLoa.html) for this connection. diff --git a/website/docs/r/dynamodb_contributor_insights.html.markdown b/website/docs/r/dynamodb_contributor_insights.html.markdown index 2055410ff2d2..8439028df109 100644 --- a/website/docs/r/dynamodb_contributor_insights.html.markdown +++ b/website/docs/r/dynamodb_contributor_insights.html.markdown @@ -25,6 +25,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `table_name` - (Required) The name of the table to enable contributor insights * `index_name` - (Optional) The global secondary index name +* `mode` - (Optional) argument to specify the [CloudWatch contributor insights mode](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/contributorinsights_HowItWorks.html#contributorinsights_HowItWorks.Modes) ## Attribute Reference diff --git a/website/docs/r/dynamodb_resource_policy.html.markdown b/website/docs/r/dynamodb_resource_policy.html.markdown index 58b57a130156..9648292634df 100644 --- a/website/docs/r/dynamodb_resource_policy.html.markdown +++ b/website/docs/r/dynamodb_resource_policy.html.markdown @@ -42,7 +42,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB Resource Policy using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dynamodb_resource_policy.example + identity = { + "arn" = "arn:aws:dynamodb:us-west-2:123456789012:table/example-table" + } +} + +resource "aws_dynamodb_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DynamoDB table. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB Resource Policy using the `resource_arn`. For example: ```terraform import { @@ -51,7 +72,7 @@ import { } ``` -Using `terraform import`, import DynamoDB Resource Policy using the `example_id_arg`. For example: +Using `terraform import`, import DynamoDB Resource Policy using the `resource_arn`. For example: ```console % terraform import aws_dynamodb_resource_policy.example arn:aws:dynamodb:us-east-1:1234567890:table/my-table diff --git a/website/docs/r/dynamodb_table.html.markdown b/website/docs/r/dynamodb_table.html.markdown index 9db105d0aec8..6df6124189d2 100644 --- a/website/docs/r/dynamodb_table.html.markdown +++ b/website/docs/r/dynamodb_table.html.markdown @@ -231,6 +231,7 @@ The following arguments are optional: Default value is `STANDARD`. * `tags` - (Optional) A map of tags to populate on the created table. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `ttl` - (Optional) Configuration block for TTL. See below. +* `warm_throughput` - (Optional) Sets the number of warm read and write units for the specified table. See below. * `write_capacity` - (Optional) Number of write units for this table. If the `billing_mode` is `PROVISIONED`, this field is required. ### `attribute` @@ -267,10 +268,11 @@ The following arguments are optional: * `hash_key` - (Required) Name of the hash key in the index; must be defined as an attribute in the resource. * `name` - (Required) Name of the index. * `non_key_attributes` - (Optional) Only required with `INCLUDE` as a projection type; a list of attributes to project into the index. These do not need to be defined as attributes on the table. -* `on_demand_throughput` - (Optional) Sets the maximum number of read and write units for the specified on-demand table. See below. +* `on_demand_throughput` - (Optional) Sets the maximum number of read and write units for the specified on-demand index. See below. * `projection_type` - (Required) One of `ALL`, `INCLUDE` or `KEYS_ONLY` where `ALL` projects every attribute into the index, `KEYS_ONLY` projects into the index only the table and index hash_key and sort_key attributes , `INCLUDE` projects into the index all of the attributes that are defined in `non_key_attributes` in addition to the attributes that that`KEYS_ONLY` project. * `range_key` - (Optional) Name of the range key; must be defined * `read_capacity` - (Optional) Number of read units for this index. Must be set if billing_mode is set to PROVISIONED. +* `warm_throughput` - (Optional) Sets the number of warm read and write units for this index. See below. * `write_capacity` - (Optional) Number of write units for this index. Must be set if billing_mode is set to PROVISIONED. ### `local_secondary_index` @@ -297,6 +299,7 @@ The following arguments are optional: **Note:** This attribute will _not_ be populated with the ARN of _default_ keys. **Note:** Changing this value will recreate the replica. * `point_in_time_recovery` - (Optional) Whether to enable Point In Time Recovery for the replica. Default is `false`. +* `deletion_protection_enabled` - (Optional) Whether deletion protection is enabled (true) or disabled (false) on the replica. Default is `false`. * `propagate_tags` - (Optional) Whether to propagate the global table's tags to a replica. Default is `false`. Changes to tags only move in one direction: from global (source) to replica. @@ -318,6 +321,13 @@ The following arguments are optional: * `enabled` - (Optional) Whether TTL is enabled. Default value is `false`. +### `warm_throughput` + +~> **Note:** Explicitly configuring both `read_units_per_second` and `write_units_per_second` to the default/minimum values will cause Terraform to report differences. + +* `read_units_per_second` - (Optional) Number of read operations a table or index can instantaneously support. For the base table, decreasing this value will force a new resource. For a global secondary index, this value can be increased or decreased without recreation. Minimum value of `12000` (default). +* `write_units_per_second` - (Optional) Number of write operations a table or index can instantaneously support. For the base table, decreasing this value will force a new resource. For a global secondary index, this value can be increased or decreased without recreation. Minimum value of `4000` (default). + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/dynamodb_table_export.html.markdown b/website/docs/r/dynamodb_table_export.html.markdown index 18af1e4c34ec..46ed37a84838 100644 --- a/website/docs/r/dynamodb_table_export.html.markdown +++ b/website/docs/r/dynamodb_table_export.html.markdown @@ -120,6 +120,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_dynamodb_table_export.example + identity = { + "arn" = "arn:aws:dynamodb:us-west-2:123456789012:table/example-table/export/01234567890123-a1b2c3d4" + } +} + +resource "aws_dynamodb_table_export" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the DynamoDB table export. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DynamoDB table exports using the `arn`. For example: ```terraform diff --git a/website/docs/r/ebs_fast_snapshot_restore.html.markdown b/website/docs/r/ebs_fast_snapshot_restore.html.markdown index e8898ac5d37e..d92628db70b2 100644 --- a/website/docs/r/ebs_fast_snapshot_restore.html.markdown +++ b/website/docs/r/ebs_fast_snapshot_restore.html.markdown @@ -45,7 +45,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `availability_zone` and `snapshot_id` separated by `,`. For example: ```terraform import { @@ -54,7 +54,7 @@ import { } ``` -Using `terraform import`, import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `id`. For example: +Using `terraform import`, import EC2 (Elastic Compute Cloud) EBS Fast Snapshot Restore using the `availability_zone` and `snapshot_id` separated by `,`. For example: ```console % terraform import aws_ebs_fast_snapshot_restore.example us-west-2a,snap-abcdef123456 diff --git a/website/docs/r/ebs_volume.html.markdown b/website/docs/r/ebs_volume.html.markdown index bd40ab063d42..eef54cefd99c 100644 --- a/website/docs/r/ebs_volume.html.markdown +++ b/website/docs/r/ebs_volume.html.markdown @@ -40,6 +40,7 @@ This resource supports the following arguments: * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughput` - (Optional) Throughput that the volume supports, in MiB/s. Only valid for `type` of `gp3`. * `type` - (Optional) Type of EBS volume. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `gp2`). +* `volume_initialization_rate` - (Optional) EBS provisioned rate for volume initialization, in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. This argument can only be set if `snapshot_id` is specified. ~> **NOTE:** At least one of `size` or `snapshot_id` is required. diff --git a/website/docs/r/ec2_client_vpn_endpoint.html.markdown b/website/docs/r/ec2_client_vpn_endpoint.html.markdown index da5f9d0a4149..aaae7b1bfbc5 100644 --- a/website/docs/r/ec2_client_vpn_endpoint.html.markdown +++ b/website/docs/r/ec2_client_vpn_endpoint.html.markdown @@ -38,7 +38,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `authentication_options` - (Required) Information about the authentication method to be used to authenticate clients. -* `client_cidr_block` - (Required) The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. The CIDR block should be /22 or greater. +* `client_cidr_block` - (Optional) The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. The CIDR block should be /22 or greater. When `traffic_ip_address_type` is set to `ipv6`, it must not be specified. Otherwise, it is required. * `client_connect_options` - (Optional) The options for managing connection authorization for new client connections. * `client_login_banner_options` - (Optional) Options for enabling a customizable text banner that will be displayed on AWS provided clients when a VPN session is established. * `client_route_enforcement_options` - (Optional) Options for enforce administrator defined routes on devices connected through the VPN. @@ -46,12 +46,14 @@ This resource supports the following arguments: * `description` - (Optional) A brief description of the Client VPN endpoint. * `disconnect_on_session_timeout` - (Optional) Indicates whether the client VPN session is disconnected after the maximum `session_timeout_hours` is reached. If `true`, users are prompted to reconnect client VPN. If `false`, client VPN attempts to reconnect automatically. The default value is `false`. * `dns_servers` - (Optional) Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address of the connecting device is used. +* `endpoint_ip_address_type` - (Optional) IP address type for the Client VPN endpoint. Valid values are `ipv4`, `ipv6`, or `dual-stack`. Defaults to `ipv4`. * `security_group_ids` - (Optional) The IDs of one or more security groups to apply to the target network. You must also specify the ID of the VPC that contains the security groups. * `self_service_portal` - (Optional) Specify whether to enable the self-service portal for the Client VPN endpoint. Values can be `enabled` or `disabled`. Default value is `disabled`. * `server_certificate_arn` - (Required) The ARN of the ACM server certificate. * `session_timeout_hours` - (Optional) The maximum session duration is a trigger by which end-users are required to re-authenticate prior to establishing a VPN session. Default value is `24` - Valid values: `8 | 10 | 12 | 24` * `split_tunnel` - (Optional) Indicates whether split-tunnel is enabled on VPN endpoint. Default value is `false`. * `tags` - (Optional) A mapping of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `traffic_ip_address_type` - (Optional) IP address type for traffic within the Client VPN tunnel. Valid values are `ipv4`, `ipv6`, or `dual-stack`. Defaults to `ipv4`. When it is set to `ipv6`, `client_cidr_block` must not be specified. * `transport_protocol` - (Optional) The transport protocol to be used by the VPN session. Default value is `udp`. * `vpc_id` - (Optional) The ID of the VPC to associate with the Client VPN endpoint. If no security group IDs are specified in the request, the default security group for the VPC is applied. * `vpn_port` - (Optional) The port number for the Client VPN endpoint. Valid values are `443` and `1194`. Default value is `443`. diff --git a/website/docs/r/ec2_transit_gateway_route_table_association.html.markdown b/website/docs/r/ec2_transit_gateway_route_table_association.html.markdown index cdb3d5fe382a..1df9dd4e5220 100644 --- a/website/docs/r/ec2_transit_gateway_route_table_association.html.markdown +++ b/website/docs/r/ec2_transit_gateway_route_table_association.html.markdown @@ -19,6 +19,79 @@ resource "aws_ec2_transit_gateway_route_table_association" "example" { } ``` +### Direct Connect Gateway Association + +When associating a Direct Connect Gateway attachment, reference the `transit_gateway_attachment_id` attribute directly from the `aws_dx_gateway_association` resource (available in v6.5.0+): + +```terraform +resource "aws_dx_gateway" "example" { + name = "example" + amazon_side_asn = 64512 +} + +resource "aws_ec2_transit_gateway" "example" { + description = "example" +} + +resource "aws_dx_gateway_association" "example" { + dx_gateway_id = aws_dx_gateway.example.id + associated_gateway_id = aws_ec2_transit_gateway.example.id + + allowed_prefixes = [ + "10.0.0.0/16", + ] +} + +resource "aws_ec2_transit_gateway_route_table" "example" { + transit_gateway_id = aws_ec2_transit_gateway.example.id +} + +# Correct: Reference the attachment ID directly from the association resource +resource "aws_ec2_transit_gateway_route_table_association" "example" { + transit_gateway_attachment_id = aws_dx_gateway_association.example.transit_gateway_attachment_id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.example.id +} +``` + +~> **NOTE:** Avoid using the `aws_ec2_transit_gateway_dx_gateway_attachment` data source to retrieve the attachment ID, as this can cause unnecessary resource recreation when unrelated attributes of the Direct Connect Gateway association change (such as `allowed_prefixes`). Always reference the `transit_gateway_attachment_id` attribute directly from the `aws_dx_gateway_association` resource when available. + +### VPC Attachment Association + +For VPC attachments, always reference the attachment resource's `id` attribute directly. Avoid using data sources or lifecycle rules that might cause the attachment ID to become unknown during planning: + +```terraform +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "example" { + vpc_id = aws_vpc.example.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_ec2_transit_gateway" "example" { + description = "example" +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "example" { + subnet_ids = [aws_subnet.example.id] + transit_gateway_id = aws_ec2_transit_gateway.example.id + vpc_id = aws_vpc.example.id +} + +resource "aws_ec2_transit_gateway_route_table" "example" { + transit_gateway_id = aws_ec2_transit_gateway.example.id +} + +# Correct: Reference the VPC attachment ID directly +resource "aws_ec2_transit_gateway_route_table_association" "example" { + transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.example.id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.example.id +} +``` + +~> **NOTE:** When the `transit_gateway_attachment_id` changes (for example, when a VPC attachment is replaced), this resource will be recreated. This is the correct behavior to maintain consistency between the attachment and its route table association. + ## Argument Reference This resource supports the following arguments: diff --git a/website/docs/r/ec2_transit_gateway_route_table_propagation.html.markdown b/website/docs/r/ec2_transit_gateway_route_table_propagation.html.markdown index a6139698b0e9..0ecef0cce785 100644 --- a/website/docs/r/ec2_transit_gateway_route_table_propagation.html.markdown +++ b/website/docs/r/ec2_transit_gateway_route_table_propagation.html.markdown @@ -19,6 +19,79 @@ resource "aws_ec2_transit_gateway_route_table_propagation" "example" { } ``` +### Direct Connect Gateway Propagation + +When propagating routes from a Direct Connect Gateway attachment, reference the `transit_gateway_attachment_id` attribute directly from the `aws_dx_gateway_association` resource (available in v6.5.0+): + +```terraform +resource "aws_dx_gateway" "example" { + name = "example" + amazon_side_asn = 64512 +} + +resource "aws_ec2_transit_gateway" "example" { + description = "example" +} + +resource "aws_dx_gateway_association" "example" { + dx_gateway_id = aws_dx_gateway.example.id + associated_gateway_id = aws_ec2_transit_gateway.example.id + + allowed_prefixes = [ + "10.0.0.0/16", + ] +} + +resource "aws_ec2_transit_gateway_route_table" "example" { + transit_gateway_id = aws_ec2_transit_gateway.example.id +} + +# Correct: Reference the attachment ID directly from the association resource +resource "aws_ec2_transit_gateway_route_table_propagation" "example" { + transit_gateway_attachment_id = aws_dx_gateway_association.example.transit_gateway_attachment_id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.example.id +} +``` + +~> **NOTE:** Avoid using the `aws_ec2_transit_gateway_dx_gateway_attachment` data source to retrieve the attachment ID, as this can cause unnecessary resource recreation when unrelated attributes of the Direct Connect Gateway association change (such as `allowed_prefixes`). Always reference the `transit_gateway_attachment_id` attribute directly from the `aws_dx_gateway_association` resource when available. + +### VPC Attachment Propagation + +For VPC attachments, always reference the attachment resource's `id` attribute directly. Avoid using data sources or lifecycle rules that might cause the attachment ID to become unknown during planning: + +```terraform +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "example" { + vpc_id = aws_vpc.example.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_ec2_transit_gateway" "example" { + description = "example" +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "example" { + subnet_ids = [aws_subnet.example.id] + transit_gateway_id = aws_ec2_transit_gateway.example.id + vpc_id = aws_vpc.example.id +} + +resource "aws_ec2_transit_gateway_route_table" "example" { + transit_gateway_id = aws_ec2_transit_gateway.example.id +} + +# Correct: Reference the VPC attachment ID directly +resource "aws_ec2_transit_gateway_route_table_propagation" "example" { + transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.example.id + transit_gateway_route_table_id = aws_ec2_transit_gateway_route_table.example.id +} +``` + +~> **NOTE:** When the `transit_gateway_attachment_id` changes (for example, when a VPC attachment is replaced), this resource will be recreated. This is the correct behavior to maintain consistency between the attachment and its route table propagation. + ## Argument Reference This resource supports the following arguments: diff --git a/website/docs/r/ecr_lifecycle_policy.html.markdown b/website/docs/r/ecr_lifecycle_policy.html.markdown index 360b6d424809..114caaef3272 100644 --- a/website/docs/r/ecr_lifecycle_policy.html.markdown +++ b/website/docs/r/ecr_lifecycle_policy.html.markdown @@ -97,6 +97,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_lifecycle_policy.example + identity = { + repository = "tf-example" + } +} + +resource "aws_ecr_lifecycle_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `repository` - (String) Name of the ECR repository. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Lifecycle Policy using the name of the repository. For example: ```terraform diff --git a/website/docs/r/ecr_repository.html.markdown b/website/docs/r/ecr_repository.html.markdown index 07704c2e4538..a9adc67eb3f2 100644 --- a/website/docs/r/ecr_repository.html.markdown +++ b/website/docs/r/ecr_repository.html.markdown @@ -23,6 +23,25 @@ resource "aws_ecr_repository" "foo" { } ``` +### With Image Tag Mutability Exclusion + +```terraform +resource "aws_ecr_repository" "example" { + name = "example-repo" + image_tag_mutability = "IMMUTABLE_WITH_EXCLUSION" + + image_tag_mutability_exclusion_filter { + filter = "latest*" + filter_type = "WILDCARD" + } + + image_tag_mutability_exclusion_filter { + filter = "dev-*" + filter_type = "WILDCARD" + } +} +``` + ## Argument Reference This resource supports the following arguments: @@ -32,7 +51,8 @@ This resource supports the following arguments: * `encryption_configuration` - (Optional) Encryption configuration for the repository. See [below for schema](#encryption_configuration). * `force_delete` - (Optional) If `true`, will delete the repository even if it contains images. Defaults to `false`. -* `image_tag_mutability` - (Optional) The tag mutability setting for the repository. Must be one of: `MUTABLE` or `IMMUTABLE`. Defaults to `MUTABLE`. +* `image_tag_mutability` - (Optional) The tag mutability setting for the repository. Must be one of: `MUTABLE`, `IMMUTABLE`, `IMMUTABLE_WITH_EXCLUSION`, or `MUTABLE_WITH_EXCLUSION`. Defaults to `MUTABLE`. +* `image_tag_mutability_exclusion_filter` - (Optional) Configuration block that defines filters to specify which image tags can override the default tag mutability setting. Only applicable when `image_tag_mutability` is set to `IMMUTABLE_WITH_EXCLUSION` or `MUTABLE_WITH_EXCLUSION`. See [below for schema](#image_tag_mutability_exclusion_filter). * `image_scanning_configuration` - (Optional) Configuration block that defines image scanning configuration for the repository. By default, image scanning must be manually triggered. See the [ECR User Guide](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) for more information about image scanning. * `scan_on_push` - (Required) Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false). * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -42,6 +62,11 @@ This resource supports the following arguments: * `encryption_type` - (Optional) The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`. * `kms_key` - (Optional) The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR. +### image_tag_mutability_exclusion_filter + +* `filter` - (Required) The filter pattern to use for excluding image tags from the mutability setting. Must contain only letters, numbers, and special characters (._*-). Each filter can be up to 128 characters long and can contain a maximum of 2 wildcards (*). +* `filter_type` - (Required) The type of filter to use. Must be `WILDCARD`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -59,6 +84,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_repository.service + identity = { + name = "test-service" + } +} + +resource "aws_ecr_repository" "service" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the ECR repository. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Repositories using the `name`. For example: ```terraform diff --git a/website/docs/r/ecr_repository_creation_template.html.markdown b/website/docs/r/ecr_repository_creation_template.html.markdown index 5f7508c004bb..21b059546106 100644 --- a/website/docs/r/ecr_repository_creation_template.html.markdown +++ b/website/docs/r/ecr_repository_creation_template.html.markdown @@ -95,6 +95,7 @@ This resource supports the following arguments: * `description` - (Optional) The description for this template. * `encryption_configuration` - (Optional) Encryption configuration for any created repositories. See [below for schema](#encryption_configuration). * `image_tag_mutability` - (Optional) The tag mutability setting for any created repositories. Must be one of: `MUTABLE` or `IMMUTABLE`. Defaults to `MUTABLE`. +* `image_tag_mutability_exclusion_filter` - (Optional) Configuration block that defines filters to specify which image tags can override the default tag mutability setting. Only applicable when `image_tag_mutability` is set to `IMMUTABLE_WITH_EXCLUSION` or `MUTABLE_WITH_EXCLUSION`. See [below for schema](#image_tag_mutability_exclusion_filter). * `lifecycle_policy` - (Optional) The lifecycle policy document to apply to any created repositories. See more details about [Policy Parameters](http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html#lifecycle_policy_parameters) in the official AWS docs. Consider using the [`aws_ecr_lifecycle_policy_document` data_source](/docs/providers/aws/d/ecr_lifecycle_policy_document.html) to generate/manage the JSON document used for the `lifecycle_policy` argument. * `repository_policy` - (Optional) The registry policy document to apply to any created repositories. This is a JSON formatted string. For more information about building IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). * `resource_tags` - (Optional) A map of tags to assign to any created repositories. @@ -104,6 +105,11 @@ This resource supports the following arguments: * `encryption_type` - (Optional) The encryption type to use for any created repositories. Valid values are `AES256` or `KMS`. Defaults to `AES256`. * `kms_key` - (Optional) The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR. +### image_tag_mutability_exclusion_filter + +* `filter` - (Required) The filter pattern to use for excluding image tags from the mutability setting. Must contain only letters, numbers, and special characters (._*-). Each filter can be up to 128 characters long and can contain a maximum of 2 wildcards (*). +* `filter_type` - (Required) The type of filter to use. Must be `WILDCARD`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/ecr_repository_policy.html.markdown b/website/docs/r/ecr_repository_policy.html.markdown index 7450fd70047c..65a702a7274b 100644 --- a/website/docs/r/ecr_repository_policy.html.markdown +++ b/website/docs/r/ecr_repository_policy.html.markdown @@ -71,6 +71,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecr_repository_policy.example + identity = { + repository = "example" + } +} + +resource "aws_ecr_repository_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `repository` - (String) Name of the ECR repository. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECR Repository Policy using the repository name. For example: ```terraform diff --git a/website/docs/r/ecs_capacity_provider.html.markdown b/website/docs/r/ecs_capacity_provider.html.markdown index f16ef3ecf3c5..13656f8df8a8 100644 --- a/website/docs/r/ecs_capacity_provider.html.markdown +++ b/website/docs/r/ecs_capacity_provider.html.markdown @@ -12,8 +12,12 @@ Provides an ECS cluster capacity provider. More information can be found on the ~> **NOTE:** Associating an ECS Capacity Provider to an Auto Scaling Group will automatically add the `AmazonECSManaged` tag to the Auto Scaling Group. This tag should be included in the `aws_autoscaling_group` resource configuration to prevent Terraform from removing it in subsequent executions as well as ensuring the `AmazonECSManaged` tag is propagated to all EC2 Instances in the Auto Scaling Group if `min_size` is above 0 on creation. Any EC2 Instances in the Auto Scaling Group without this tag must be manually be updated, otherwise they may cause unexpected scaling behavior and metrics. +~> **NOTE:** You must specify exactly one of `auto_scaling_group_provider` or `managed_instances_provider`. When using `managed_instances_provider`, the `cluster` parameter is required. When using `auto_scaling_group_provider`, the `cluster` parameter must not be set. + ## Example Usage +### Auto Scaling Group Provider + ```terraform resource "aws_autoscaling_group" "example" { # ... other configuration, including potentially other tags ... @@ -42,13 +46,58 @@ resource "aws_ecs_capacity_provider" "example" { } ``` +### Managed Instances Provider + +```terraform +resource "aws_ecs_capacity_provider" "example" { + name = "example" + cluster = "my-cluster" + + managed_instances_provider { + infrastructure_role_arn = aws_iam_role.ecs_infrastructure.arn + propagate_tags = "TASK_DEFINITION" + + instance_launch_template { + ec2_instance_profile_arn = aws_iam_instance_profile.ecs_instance.arn + monitoring = "ENABLED" + + network_configuration { + subnets = [aws_subnet.example.id] + security_groups = [aws_security_group.example.id] + } + + storage_configuration { + storage_size_gib = 30 + } + + instance_requirements { + memory_mib { + min = 1024 + max = 8192 + } + + vcpu_count { + min = 1 + max = 4 + } + + instance_generations = ["current"] + cpu_manufacturers = ["intel", "amd"] + } + } + } +} +``` + ## Argument Reference This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `auto_scaling_group_provider` - (Required) Configuration block for the provider for the ECS auto scaling group. Detailed below. +* `auto_scaling_group_provider` - (Optional) Configuration block for the provider for the ECS auto scaling group. Detailed below. Exactly one of `auto_scaling_group_provider` or `managed_instances_provider` must be specified. +* `cluster` - (Optional) Name of the ECS cluster. Required when using `managed_instances_provider`. Must not be set when using `auto_scaling_group_provider`. +* `managed_instances_provider` - (Optional) Configuration block for the managed instances provider. Detailed below. Exactly one of `auto_scaling_group_provider` or `managed_instances_provider` must be specified. * `name` - (Required) Name of the capacity provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `auto_scaling_group_provider` @@ -68,6 +117,56 @@ This resource supports the following arguments: * `status` - (Optional) Whether auto scaling is managed by ECS. Valid values are `ENABLED` and `DISABLED`. * `target_capacity` - (Optional) Target utilization for the capacity provider. A number between 1 and 100. +### `managed_instances_provider` + +* `infrastructure_role_arn` - (Required) The Amazon Resource Name (ARN) of the infrastructure role that Amazon ECS uses to manage instances on your behalf. This role must have permissions to launch, terminate, and manage Amazon EC2 instances, as well as access to other AWS services required for Amazon ECS Managed Instances functionality. For more information, see [Amazon ECS infrastructure IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html) in the Amazon ECS Developer Guide. +* `instance_launch_template` - (Required) The launch template configuration that specifies how Amazon ECS should launch Amazon EC2 instances. This includes the instance profile, network configuration, storage settings, and instance requirements for attribute-based instance type selection. For more information, see [Store instance launch parameters in Amazon EC2 launch templates](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) in the Amazon EC2 User Guide. Detailed below. +* `propagate_tags` - (Optional) Specifies whether to propagate tags from the capacity provider to the Amazon ECS Managed Instances. When enabled, tags applied to the capacity provider are automatically applied to all instances launched by this provider. Valid values are `CAPACITY_PROVIDER` and `NONE`. + +### `instance_launch_template` + +* `ec2_instance_profile_arn` - (Required) The Amazon Resource Name (ARN) of the instance profile that Amazon ECS applies to Amazon ECS Managed Instances. This instance profile must include the necessary permissions for your tasks to access AWS services and resources. For more information, see [Amazon ECS instance profile for Managed Instances](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html) in the Amazon ECS Developer Guide. +* `instance_requirements` - (Optional) The instance requirements. You can specify the instance types and instance requirements such as vCPU count, memory, network performance, and accelerator specifications. Amazon ECS automatically selects the instances that match the specified criteria. Detailed below. +* `monitoring` - (Optional) CloudWatch provides two categories of monitoring: basic monitoring and detailed monitoring. By default, your managed instance is configured for basic monitoring. You can optionally enable detailed monitoring to help you more quickly identify and act on operational issues. You can enable or turn off detailed monitoring at launch or when the managed instance is running or stopped. For more information, see [Detailed monitoring for Amazon ECS Managed Instances](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-metrics.html) in the Amazon ECS Developer Guide. Valid values are `BASIC` and `DETAILED`. +* `network_configuration` - (Required) The network configuration for Amazon ECS Managed Instances. This specifies the subnets and security groups that instances use for network connectivity. Detailed below. +* `storage_configuration` - (Optional) The storage configuration for Amazon ECS Managed Instances. This defines the root volume size and type for the instances. Detailed below. + +### `network_configuration` + +* `security_groups` - (Optional) The list of security group IDs to apply to Amazon ECS Managed Instances. These security groups control the network traffic allowed to and from the instances. +* `subnets` - (Required) The list of subnet IDs where Amazon ECS can launch Amazon ECS Managed Instances. Instances are distributed across the specified subnets for high availability. All subnets must be in the same VPC. + +### `storage_configuration` + +* `storage_size_gib` - (Required) The size of the tasks volume in GiB. Must be at least 1. + +### `instance_requirements` + +* `accelerator_count` - (Optional) The minimum and maximum number of accelerators for the instance types. This is used when you need instances with specific numbers of GPUs or other accelerators. +* `accelerator_manufacturers` - (Optional) The accelerator manufacturers to include. You can specify `nvidia`, `amd`, `amazon-web-services`, `xilinx`, or `habana` depending on your accelerator requirements. Valid values are `amazon-web-services`, `amd`, `nvidia`, `xilinx`, `habana`. +* `accelerator_names` - (Optional) The specific accelerator names to include. For example, you can specify `a100`, `v100`, `k80`, or other specific accelerator models. Valid values are `a100`, `inferentia`, `k520`, `k80`, `m60`, `radeon-pro-v520`, `t4`, `vu9p`, `v100`, `a10g`, `h100`, `t4g`. +* `accelerator_total_memory_mib` - (Optional) The minimum and maximum total accelerator memory in mebibytes (MiB). This is important for GPU workloads that require specific amounts of video memory. +* `accelerator_types` - (Optional) The accelerator types to include. You can specify `gpu` for graphics processing units, `fpga` for field programmable gate arrays, or `inference` for machine learning inference accelerators. Valid values are `gpu`, `fpga`, `inference`. +* `allowed_instance_types` - (Optional) The instance types to include in the selection. When specified, Amazon ECS only considers these instance types, subject to the other requirements specified. Maximum of 400 instance types. You can specify instance type patterns using wildcards (e.g., `m5.*`). +* `bare_metal` - (Optional) Indicates whether to include bare metal instance types. Set to `included` to allow bare metal instances, `excluded` to exclude them, or `required` to use only bare metal instances. Valid values are `included`, `excluded`, `required`. +* `baseline_ebs_bandwidth_mbps` - (Optional) The minimum and maximum baseline Amazon EBS bandwidth in megabits per second (Mbps). This is important for workloads with high storage I/O requirements. +* `burstable_performance` - (Optional) Indicates whether to include burstable performance instance types (T2, T3, T3a, T4g). Set to `included` to allow burstable instances, `excluded` to exclude them, or `required` to use only burstable instances. Valid values are `included`, `excluded`, `required`. +* `cpu_manufacturers` - (Optional) The CPU manufacturers to include or exclude. You can specify `intel`, `amd`, or `amazon-web-services` to control which CPU types are used for your workloads. Valid values are `intel`, `amd`, `amazon-web-services`. +* `excluded_instance_types` - (Optional) The instance types to exclude from selection. Use this to prevent Amazon ECS from selecting specific instance types that may not be suitable for your workloads. Maximum of 400 instance types. +* `instance_generations` - (Optional) The instance generations to include. You can specify `current` to use the latest generation instances, or `previous` to include previous generation instances for cost optimization. Valid values are `current`, `previous`. +* `local_storage` - (Optional) Indicates whether to include instance types with local storage. Set to `included` to allow local storage, `excluded` to exclude it, or `required` to use only instances with local storage. Valid values are `included`, `excluded`, `required`. +* `local_storage_types` - (Optional) The local storage types to include. You can specify `hdd` for hard disk drives, `ssd` for solid state drives, or both. Valid values are `hdd`, `ssd`. +* `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The maximum price for Spot instances as a percentage of the optimal On-Demand price. This provides more precise cost control for Spot instance selection. +* `memory_gib_per_vcpu` - (Optional) The minimum and maximum amount of memory per vCPU in gibibytes (GiB). This helps ensure that instance types have the appropriate memory-to-CPU ratio for your workloads. +* `memory_mib` - (Required) The minimum and maximum amount of memory in mebibytes (MiB) for the instance types. Amazon ECS selects instance types that have memory within this range. +* `network_bandwidth_gbps` - (Optional) The minimum and maximum network bandwidth in gigabits per second (Gbps). This is crucial for network-intensive workloads that require high throughput. +* `network_interface_count` - (Optional) The minimum and maximum number of network interfaces for the instance types. This is useful for workloads that require multiple network interfaces. +* `on_demand_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon ECS selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold. +* `require_hibernate_support` - (Optional) Indicates whether the instance types must support hibernation. When set to `true`, only instance types that support hibernation are selected. +* `spot_max_price_percentage_over_lowest_price` - (Optional) The maximum price for Spot instances as a percentage over the lowest priced On-Demand instance. This helps control Spot instance costs while maintaining access to capacity. +* `total_local_storage_gb` - (Optional) The minimum and maximum total local storage in gigabytes (GB) for instance types with local storage. +* `vcpu_count` - (Required) The minimum and maximum number of vCPUs for the instance types. Amazon ECS selects instance types that have vCPU counts within this range. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -77,6 +176,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ecs_capacity_provider.example + identity = { + "arn" = "arn:aws:ecs:us-west-2:123456789012:capacity-provider/example" + } +} + +resource "aws_ecs_capacity_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the ECS capacity provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS Capacity Providers using the `arn`. For example: ```terraform diff --git a/website/docs/r/ecs_service.html.markdown b/website/docs/r/ecs_service.html.markdown index 4e0af7f0f23f..e23197b8b597 100644 --- a/website/docs/r/ecs_service.html.markdown +++ b/website/docs/r/ecs_service.html.markdown @@ -102,6 +102,24 @@ resource "aws_ecs_service" "example" { } ``` +### Blue/Green Deployment with SIGINT Rollback + +```terraform +resource "aws_ecs_service" "example" { + name = "example" + cluster = aws_ecs_cluster.example.id + + # ... other configurations ... + + deployment_configuration { + strategy = "BLUE_GREEN" + } + + sigint_rollback = true + wait_for_steady_state = true +} +``` + ### Redeploy Service On Every Apply The key used with `triggers` is arbitrary. @@ -128,10 +146,11 @@ The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `alarms` - (Optional) Information about the CloudWatch alarms. [See below](#alarms). -* `availability_zone_rebalancing` - (Optional) ECS automatically redistributes tasks within a service across Availability Zones (AZs) to mitigate the risk of impaired application availability due to underlying infrastructure failures and task lifecycle activities. The valid values are `ENABLED` and `DISABLED`. Defaults to `DISABLED`. -* `capacity_provider_strategy` - (Optional) Capacity provider strategies to use for the service. Can be one or more. These can be updated without destroying and recreating the service only if `force_new_deployment = true` and not changing from 0 `capacity_provider_strategy` blocks to greater than 0, or vice versa. [See below](#capacity_provider_strategy). Conflicts with `launch_type`. +* `availability_zone_rebalancing` - (Optional) ECS automatically redistributes tasks within a service across Availability Zones (AZs) to mitigate the risk of impaired application availability due to underlying infrastructure failures and task lifecycle activities. The valid values are `ENABLED` and `DISABLED`. When creating a new service, if no value is specified, it defaults to `ENABLED` if the service is compatible with AvailabilityZoneRebalancing. When updating an existing service, if no value is specified it defaults to the existing service's AvailabilityZoneRebalancing value. If the service never had an AvailabilityZoneRebalancing value set, Amazon ECS treats this as `DISABLED`. +* `capacity_provider_strategy` - (Optional) Capacity provider strategies to use for the service. Can be one or more. Updating this argument requires `force_new_deployment = true`. [See below](#capacity_provider_strategy). Conflicts with `launch_type`. * `cluster` - (Optional) ARN of an ECS cluster. * `deployment_circuit_breaker` - (Optional) Configuration block for deployment circuit breaker. [See below](#deployment_circuit_breaker). +* `deployment_configuration` - (Optional) Configuration block for deployment settings. [See below](#deployment_configuration). * `deployment_controller` - (Optional) Configuration block for deployment controller configuration. [See below](#deployment_controller). * `deployment_maximum_percent` - (Optional) Upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. Not valid when using the `DAEMON` scheduling strategy. * `deployment_minimum_healthy_percent` - (Optional) Lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. @@ -152,6 +171,7 @@ The following arguments are optional: * `scheduling_strategy` - (Optional) Scheduling strategy to use for the service. The valid values are `REPLICA` and `DAEMON`. Defaults to `REPLICA`. Note that [*Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy*](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html). * `service_connect_configuration` - (Optional) ECS Service Connect configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace. [See below](#service_connect_configuration). * `service_registries` - (Optional) Service discovery registries for the service. The maximum number of `service_registries` blocks is `1`. [See below](#service_registries). +* `sigint_rollback` - (Optional) Whether to enable graceful termination of deployments using SIGINT signals. When enabled, allows customers to safely cancel an in-progress deployment and automatically trigger a rollback to the previous stable state. Defaults to `false`. Only applicable when using `ECS` deployment controller and requires `wait_for_steady_state = true`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `task_definition` - (Optional) Family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. Required unless using the `EXTERNAL` deployment controller. If a revision is not specified, the latest `ACTIVE` revision is used. * `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger an in-place update (redeployment). Useful with `plantimestamp()`. See example above. @@ -206,6 +226,23 @@ The `capacity_provider_strategy` configuration block supports the following: * `capacity_provider` - (Required) Short name of the capacity provider. * `weight` - (Required) Relative percentage of the total number of launched tasks that should use the specified capacity provider. +### deployment_configuration + +The `deployment_configuration` configuration block supports the following: + +* `strategy` - (Optional) Type of deployment strategy. Valid values: `ROLLING`, `BLUE_GREEN`. Default: `ROLLING`. +* `bake_time_in_minutes` - (Optional) Number of minutes to wait after a new deployment is fully provisioned before terminating the old deployment. Only used when `strategy` is set to `BLUE_GREEN`. +* `lifecycle_hook` - (Optional) Configuration block for lifecycle hooks that are invoked during deployments. [See below](#lifecycle_hook). + +### lifecycle_hook + +The `lifecycle_hook` configuration block supports the following: + +* `hook_target_arn` - (Required) ARN of the Lambda function to invoke for the lifecycle hook. +* `role_arn` - (Required) ARN of the IAM role that grants the service permission to invoke the Lambda function. +* `lifecycle_stages` - (Required) Stages during the deployment when the hook should be invoked. Valid values: `RECONCILE_SERVICE`, `PRE_SCALE_UP`, `POST_SCALE_UP`, `TEST_TRAFFIC_SHIFT`, `POST_TEST_TRAFFIC_SHIFT`, `PRODUCTION_TRAFFIC_SHIFT`, `POST_PRODUCTION_TRAFFIC_SHIFT`. +* `hook_details` - (Optional) Custom parameters that Amazon ECS will pass to the hook target invocations (such as a Lambda function). + ### deployment_circuit_breaker The `deployment_circuit_breaker` configuration block supports the following: @@ -227,9 +264,19 @@ The `deployment_controller` configuration block supports the following: * `target_group_arn` - (Required for ALB/NLB) ARN of the Load Balancer target group to associate with the service. * `container_name` - (Required) Name of the container to associate with the load balancer (as it appears in a container definition). * `container_port` - (Required) Port on the container to associate with the load balancer. +* `advanced_configuration` - (Optional) Configuration block for Blue/Green deployment settings. Required when using `BLUE_GREEN` deployment strategy. [See below](#advanced_configuration). -> **Version note:** Multiple `load_balancer` configuration block support was added in Terraform AWS Provider version 2.22.0. This allows configuration of [ECS service support for multiple target groups](https://aws.amazon.com/about-aws/whats-new/2019/07/amazon-ecs-services-now-support-multiple-load-balancer-target-groups/). +### advanced_configuration + +The `advanced_configuration` configuration block supports the following: + +* `alternate_target_group_arn` - (Required) ARN of the alternate target group to use for Blue/Green deployments. +* `production_listener_rule` - (Required) ARN of the listener rule that routes production traffic. +* `role_arn` - (Required) ARN of the IAM role that allows ECS to manage the target groups. +* `test_listener_rule` - (Optional) ARN of the listener rule that routes test traffic. + ### network_configuration `network_configuration` support the following: @@ -296,7 +343,7 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC `service` supports the following: -* `client_alias` - (Optional) List of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1. [See below](#client_alias). +* `client_alias` - (Optional) List of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. For each service block where enabled is true, exactly one `client_alias` with one `port` should be specified. [See below](#client_alias). * `discovery_name` - (Optional) Name of the new AWS Cloud Map service that Amazon ECS creates for this Amazon ECS service. * `ingress_port_override` - (Optional) Port number for the Service Connect proxy to listen on. * `port_name` - (Required) Name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service. @@ -330,6 +377,26 @@ For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonEC * `dns_name` - (Optional) Name that you use in the applications of client tasks to connect to this service. * `port` - (Required) Listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace. +* `test_traffic_rules` - (Optional) Configuration block for test traffic routing rules. [See below](#test_traffic_rules). + +### test_traffic_rules + +The `test_traffic_rules` configuration block supports the following: + +* `header` - (Optional) Configuration block for header-based routing rules. [See below](#header). + +### header + +The `header` configuration block supports the following: + +* `name` - (Required) Name of the HTTP header to match. +* `value` - (Required) Configuration block for header value matching criteria. [See below](#value). + +### value + +The `value` configuration block supports the following: + +* `exact` - (Required) Exact string value to match in the header. ### tag_specifications diff --git a/website/docs/r/efs_mount_target.html.markdown b/website/docs/r/efs_mount_target.html.markdown index 51c03ab784ed..95d1d65048fb 100644 --- a/website/docs/r/efs_mount_target.html.markdown +++ b/website/docs/r/efs_mount_target.html.markdown @@ -38,6 +38,8 @@ This resource supports the following arguments: * `subnet_id` - (Required) The ID of the subnet to add the mount target in. * `ip_address` - (Optional) The address (within the address range of the specified subnet) at which the file system may be mounted via the mount target. +* `ip_address_type` - (Optional) IP address type for the mount target. Valid values are `IPV4_ONLY` (only IPv4 addresses), `IPV6_ONLY` (only IPv6 addresses), and `DUAL_STACK` (dual-stack, both IPv4 and IPv6 addresses). Defaults to `IPV4_ONLY`. +* `ipv6_address` - (Optional) IPv6 address to use. Valid only when `ip_address_type` is set to `IPV6_ONLY` or `DUAL_STACK`. * `security_groups` - (Optional) A list of up to 5 VPC security group IDs (that must be for the same VPC as subnet specified) in effect for the mount target. diff --git a/website/docs/r/eks_addon.html.markdown b/website/docs/r/eks_addon.html.markdown index 3f5cb3d92c2f..438f8f44b227 100644 --- a/website/docs/r/eks_addon.html.markdown +++ b/website/docs/r/eks_addon.html.markdown @@ -38,13 +38,14 @@ Custom add-on configuration can be passed using `configuration_values` as a sing ~> **Note:** `configuration_values` is a single JSON string should match the valid JSON schema for each add-on with specific version. -To find the correct JSON schema for each add-on can be extracted using [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html) call. -This below is an example for extracting the `configuration_values` schema for `coredns`. +You can use [describe-addon-configuration](https://docs.aws.amazon.com/cli/latest/reference/eks/describe-addon-configuration.html) to extract each add-on's JSON schema. +Here's an example command to extract the `configuration_values` schema for `coredns`. ```bash - aws eks describe-addon-configuration \ - --addon-name coredns \ - --addon-version v1.10.1-eksbuild.1 +aws eks describe-addon-configuration \ + --addon-name coredns \ + --addon-version v1.10.1-eksbuild.1 \ + | jq -r .configurationSchema | jq . ``` Example to create a `coredns` managed addon with custom `configuration_values`. diff --git a/website/docs/r/eks_cluster.html.markdown b/website/docs/r/eks_cluster.html.markdown index 0790104dac30..389c2e193bd2 100644 --- a/website/docs/r/eks_cluster.html.markdown +++ b/website/docs/r/eks_cluster.html.markdown @@ -344,15 +344,16 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `access_config` - (Optional) Configuration block for the access config associated with your cluster, see [Amazon EKS Access Entries](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html). [Detailed](#access_config) below. * `bootstrap_self_managed_addons` - (Optional) Install default unmanaged add-ons, such as `aws-cni`, `kube-proxy`, and CoreDNS during cluster creation. If `false`, you must manually install desired add-ons. Changing this value will force a new cluster to be created. Defaults to `true`. * `compute_config` - (Optional) Configuration block with compute configuration for EKS Auto Mode. [Detailed](#compute_config) below. +* `deletion_protection` - (Optional) Whether to enable deletion protection for the cluster. When enabled, the cluster cannot be deleted unless deletion protection is first disabled. Default: `false`. * `enabled_cluster_log_types` - (Optional) List of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). * `encryption_config` - (Optional) Configuration block with encryption configuration for the cluster. [Detailed](#encryption_config) below. * `force_update_version` - (Optional) Force version update by overriding upgrade-blocking readiness checks when updating a cluster. * `kubernetes_network_config` - (Optional) Configuration block with kubernetes network configuration for the cluster. [Detailed](#kubernetes_network_config) below. If removed, Terraform will only perform drift detection if a configuration value is provided. * `outpost_config` - (Optional) Configuration block representing the configuration of your local Amazon EKS cluster on an AWS Outpost. This block isn't available for creating Amazon EKS clusters on the AWS cloud. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `remote_network_config` - (Optional) Configuration block with remote network configuration for EKS Hybrid Nodes. [Detailed](#remote_network_config) below. * `storage_config` - (Optional) Configuration block with storage configuration for EKS Auto Mode. [Detailed](#storage_config) below. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -365,7 +366,7 @@ The following arguments are optional: The `access_config` configuration block supports the following arguments: * `authentication_mode` - (Optional) The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` -* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `false`. +* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `true`. ### compute_config diff --git a/website/docs/r/elastic_beanstalk_application_version.html.markdown b/website/docs/r/elastic_beanstalk_application_version.html.markdown index 7025c44d9a0b..0cc4baeb8a7e 100644 --- a/website/docs/r/elastic_beanstalk_application_version.html.markdown +++ b/website/docs/r/elastic_beanstalk_application_version.html.markdown @@ -43,7 +43,7 @@ resource "aws_elastic_beanstalk_application_version" "default" { application = "tf-test-name" description = "application version created by terraform" bucket = aws_s3_bucket.default.id - key = aws_s3_object.default.id + key = aws_s3_object.default.key } ``` diff --git a/website/docs/r/elastic_beanstalk_configuration_template.html.markdown b/website/docs/r/elastic_beanstalk_configuration_template.html.markdown index 60ebd52353ba..7751908554b0 100644 --- a/website/docs/r/elastic_beanstalk_configuration_template.html.markdown +++ b/website/docs/r/elastic_beanstalk_configuration_template.html.markdown @@ -15,16 +15,16 @@ application with the same configuration settings. ## Example Usage ```terraform -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_configuration_template" "tf_template" { +resource "aws_elastic_beanstalk_configuration_template" "example" { name = "tf-test-template-config" - application = aws_elastic_beanstalk_application.tftest.name + application = aws_elastic_beanstalk_application.example.name solution_stack_name = "64bit Amazon Linux 2015.09 v2.0.8 running Go 1.4" } + +resource "aws_elastic_beanstalk_application" "example" { + name = "tf-test-name" + description = "tf-test-desc" +} ``` ## Argument Reference @@ -46,20 +46,13 @@ off of. Example stacks can be found in the [Amazon API documentation][1] The `setting` field supports the following format: -* `namespace` - unique namespace identifying the option's associated AWS resource -* `name` - name of the configuration option -* `value` - value for the configuration option +* `namespace` - (Required) Unique namespace identifying the option's associated AWS resource +* `name` - (Required) Name of the configuration option +* `value` - (Required) Value for the configuration option * `resource` - (Optional) resource name for [scheduled action](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options-general.html#command-options-general-autoscalingscheduledaction) ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `name` -* `application` -* `description` -* `environment_id` -* `option_settings` -* `solution_stack_name` +This resource exports no additional attributes. [1]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html diff --git a/website/docs/r/elastic_beanstalk_environment.html.markdown b/website/docs/r/elastic_beanstalk_environment.html.markdown index c0073d489324..24db1962f353 100644 --- a/website/docs/r/elastic_beanstalk_environment.html.markdown +++ b/website/docs/r/elastic_beanstalk_environment.html.markdown @@ -18,16 +18,16 @@ Environments are often things such as `development`, `integration`, or ## Example Usage ```terraform -resource "aws_elastic_beanstalk_application" "tftest" { - name = "tf-test-name" - description = "tf-test-desc" -} - -resource "aws_elastic_beanstalk_environment" "tfenvtest" { +resource "aws_elastic_beanstalk_environment" "example" { name = "tf-test-name" - application = aws_elastic_beanstalk_application.tftest.name + application = aws_elastic_beanstalk_application.example.name solution_stack_name = "64bit Amazon Linux 2015.03 v2.0.3 running Go 1.4" } + +resource "aws_elastic_beanstalk_application" "example" { + name = "tf-test-name" + description = "tf-test-desc" +} ``` ## Argument Reference @@ -72,9 +72,9 @@ for supported options and examples. The `setting` and `all_settings` mappings support the following format: -* `namespace` - unique namespace identifying the option's associated AWS resource -* `name` - name of the configuration option -* `value` - value for the configuration option +* `namespace` - (Required) Unique namespace identifying the option's associated AWS resource +* `name` - (Required) Name of the configuration option +* `value` - (Required) Value for the configuration option * `resource` - (Optional) resource name for [scheduled action](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options-general.html#command-options-general-autoscalingscheduledaction) ### Example With Options diff --git a/website/docs/r/elasticache_global_replication_group.html.markdown b/website/docs/r/elasticache_global_replication_group.html.markdown index 5e2ddaa606c4..7e77665e01e1 100644 --- a/website/docs/r/elasticache_global_replication_group.html.markdown +++ b/website/docs/r/elasticache_global_replication_group.html.markdown @@ -50,8 +50,7 @@ The initial Redis version is determined by the version set on the primary replic However, once it is part of a Global Replication Group, the Global Replication Group manages the version of all member replication groups. -The member replication groups must have [`lifecycle.ignore_changes[engine_version]`](https://www.terraform.io/language/meta-arguments/lifecycle) set, -or Terraform will always return a diff. +The provider is configured to ignore changes to `engine`, `engine_version` and `parameter_group_name` inside `aws_elasticache_replication_group` resources if they belong to a global replication group. In this example, the primary replication group will be created with Redis 6.0, @@ -75,10 +74,6 @@ resource "aws_elasticache_replication_group" "primary" { node_type = "cache.m5.large" num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } resource "aws_elasticache_replication_group" "secondary" { @@ -89,10 +84,6 @@ resource "aws_elasticache_replication_group" "secondary" { global_replication_group_id = aws_elasticache_global_replication_group.example.global_replication_group_id num_cache_clusters = 1 - - lifecycle { - ignore_changes = [engine_version] - } } ``` @@ -107,7 +98,12 @@ This resource supports the following arguments: See AWS documentation for information on [supported node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). When creating, by default the Global Replication Group inherits the node type of the primary replication group. -* `engine_version` - (Optional) Redis version to use for the Global Replication Group. +* `engine` - (Optional) The name of the cache engine to be used for the clusters in this global replication group. + When creating, by default the Global Replication Group inherits the engine of the primary replication group. + If an engine is specified, the Global Replication Group and all member replication groups will be upgraded to this engine. + Valid values are `redis` or `valkey`. + Default is `redis` if `engine_version` is specified. +* `engine_version` - (Optional) Engine version to use for the Global Replication Group. When creating, by default the Global Replication Group inherits the version of the primary replication group. If a version is specified, the Global Replication Group and all member replication groups will be upgraded to this version. Cannot be downgraded without replacing the Global Replication Group and all member replication groups. @@ -120,7 +116,7 @@ This resource supports the following arguments: * `global_replication_group_description` - (Optional) A user-created description for the global replication group. * `num_node_groups` - (Optional) The number of node groups (shards) on the global replication group. * `parameter_group_name` - (Optional) An ElastiCache Parameter Group to use for the Global Replication Group. - Required when upgrading a major engine version, but will be ignored if left configured after the upgrade is complete. + Required when upgrading an engine or major engine version, but will be ignored if left configured after the upgrade is complete. Specifying without a major version upgrade will fail. Note that ElastiCache creates a copy of this parameter group for each member replication group. @@ -134,7 +130,6 @@ This resource exports the following attributes in addition to the arguments abov * `at_rest_encryption_enabled` - A flag that indicate whether the encryption at rest is enabled. * `auth_token_enabled` - A flag that indicate whether AuthToken (password) is enabled. * `cluster_enabled` - Indicates whether the Global Datastore is cluster enabled. -* `engine` - The name of the cache engine to be used for the clusters in this global replication group. * `global_replication_group_id` - The full ID of the global replication group. * `global_node_groups` - Set of node groups (shards) on the global replication group. Has the values: diff --git a/website/docs/r/emr_cluster.html.markdown b/website/docs/r/emr_cluster.html.markdown index 61000f843276..96a7c2853fe2 100644 --- a/website/docs/r/emr_cluster.html.markdown +++ b/website/docs/r/emr_cluster.html.markdown @@ -668,6 +668,8 @@ EOF * `unhealthy_node_replacement` - (Optional) Whether whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster. Default value is `false`. * `visible_to_all_users` - (Optional) Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default value is `true`. + **NOTE:** As per the [Amazon EMR API Reference](https://docs.aws.amazon.com/emr/latest/APIReference/API_RunJobFlow.html#EMR-RunJobFlow-request-VisibleToAllUsers), this argument is no longer supported. Do not set this argument, particularly to `false`, as it would lead to perpetual differences. + ### bootstrap_action * `args` - (Optional) List of command line arguments to pass to the bootstrap action script. @@ -834,7 +836,6 @@ This resource exports the following attributes in addition to the arguments abov * `release_label` - Release label for the Amazon EMR release. * `service_role` - IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -* `visible_to_all_users` - Indicates whether the job flow is visible to all IAM users of the AWS account associated with the job flow. ## Import diff --git a/website/docs/r/emrserverless_application.html.markdown b/website/docs/r/emrserverless_application.html.markdown index bc42fb705c06..576f1b8ba619 100644 --- a/website/docs/r/emrserverless_application.html.markdown +++ b/website/docs/r/emrserverless_application.html.markdown @@ -74,6 +74,7 @@ This resource supports the following arguments: * `name` - (Required) The name of the application. * `network_configuration` - (Optional) The network configuration for customer VPC connectivity. * `release_label` - (Required) The EMR release version associated with the application. +* `scheduler_configuration` - (Optional) Scheduler configuration for batch and streaming jobs running on this application. Supported with release labels `emr-7.0.0` and above. See [scheduler_configuration Arguments](#scheduler_configuration-arguments) below. * `type` - (Required) The type of application you want to start, such as `spark` or `hive`. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -122,6 +123,14 @@ This resource supports the following arguments: * `disk` - (Optional) The disk requirements for every worker instance of the worker type. * `memory` - (Required) The memory requirements for every worker instance of the worker type. +### scheduler_configuration Arguments + +When an empty `scheduler_configuration {}` block is specified, the feature is enabled with default settings. +To disable the feature after it has been enabled, remove the block from the configuration. + +* `max_concurrent_runs` - (Optional) Maximum concurrent job runs on this application. Valid range is `1` to `1000`. Defaults to `15`. +* `queue_timeout_minutes` - (Optional) Maximum duration in minutes for the job in QUEUED state. Valid range is from `15` to `720`. Defaults to `360`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index e96a0edb147d..50f5cc1665ff 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -18,7 +18,7 @@ Terraform resource for managing an AWS FinSpace Kx Volume. resource "aws_finspace_kx_volume" "example" { name = "my-tf-kx-volume" environment_id = aws_finspace_kx_environment.example.id - availability_zones = "use1-az2" + availability_zones = ["use1-az2"] az_mode = "SINGLE" type = "NAS_1" nas1_configuration { diff --git a/website/docs/r/flow_log.html.markdown b/website/docs/r/flow_log.html.markdown index c0ef726e4d38..1f4ee4d517f1 100644 --- a/website/docs/r/flow_log.html.markdown +++ b/website/docs/r/flow_log.html.markdown @@ -9,7 +9,7 @@ description: |- # Resource: aws_flow_log Provides a VPC/Subnet/ENI/Transit Gateway/Transit Gateway Attachment Flow Log to capture IP traffic for a specific network -interface, subnet, or VPC. Logs are sent to a CloudWatch Log Group, a S3 Bucket, or Amazon Kinesis Data Firehose +interface, subnet, or VPC. Logs are sent to a CloudWatch Log Group, a S3 Bucket, or Amazon Data Firehose ## Example Usage @@ -68,7 +68,7 @@ resource "aws_iam_role_policy" "example" { } ``` -### Amazon Kinesis Data Firehose logging +### Amazon Data Firehose logging ```terraform resource "aws_flow_log" "example" { @@ -174,15 +174,152 @@ resource "aws_s3_bucket" "example" { } ``` +### Cross-Account Amazon Data Firehose Logging + +The following example shows how to set up a flow log in one AWS account (source) that sends logs to an Amazon Data Firehose delivery stream in another AWS account (destination). +See the [AWS Documentation](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs-firehose.html). + +```terraform +# Provider configurations +provider "aws" { + profile = "admin-src" +} + +provider "aws" { + alias = "destination_account" + profile = "admin-dst" +} + +# For source account +resource "aws_vpc" "src" { + # config... +} + +data "aws_iam_policy_document" "src_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + principals { + type = "Service" + identifiers = ["delivery.logs.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "src" { + name = "tf-example-mySourceRole" + assume_role_policy = data.aws_iam_policy_document.src_assume_role_policy.json +} + +data "aws_iam_policy_document" "src_role_policy" { + statement { + effect = "Allow" + actions = ["iam:PassRole"] + resources = [aws_iam_role.src.arn] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = ["delivery.logs.amazonaws.com"] + } + + condition { + test = "StringLike" + variable = "iam:AssociatedResourceARN" + values = [aws_vpc.src.arn] + } + } + + statement { + effect = "Allow" + actions = [ + "logs:CreateLogDelivery", + "logs:DeleteLogDelivery", + "logs:ListLogDeliveries", + "logs:GetLogDelivery" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + resources = [aws_iam_role.dst.arn] + } +} + +resource "aws_iam_role_policy" "src_policy" { + name = "tf-example-mySourceRolePolicy" + role = aws_iam_role.src.name + policy = data.aws_iam_policy_document.src_role_policy.json +} + +resource "aws_flow_log" "src" { + log_destination_type = "kinesis-data-firehose" + log_destination = aws_kinesis_firehose_delivery_stream.dst.arn + traffic_type = "ALL" + vpc_id = aws_vpc.src.id + iam_role_arn = aws_iam_role.src.arn + deliver_cross_account_role = aws_iam_role.dst.arn +} + +# For destination account +data "aws_iam_policy_document" "dst_assume_role_policy" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + principals { + type = "AWS" + identifiers = [aws_iam_role.src.arn] + } + } +} + +resource "aws_iam_role" "dst" { + provider = aws.destination_account + name = "AWSLogDeliveryFirehoseCrossAccountRole" # must start with "AWSLogDeliveryFirehoseCrossAccountRolePolicy" + assume_role_policy = data.aws_iam_policy_document.dst_assume_role_policy.json +} + +data "aws_iam_policy_document" "dst_role_policy" { + statement { + effect = "Allow" + actions = [ + "iam:CreateServiceLinkedRole", + "firehose:TagDeliveryStream" + ] + resources = ["*"] + } +} + +resource "aws_iam_role_policy" "dst" { + provider = aws.destination_account + name = "AWSLogDeliveryFirehoseCrossAccountRolePolicy" + role = aws_iam_role.dst.name + policy = data.aws_iam_policy_document.dst_role_policy.json +} + +resource "aws_kinesis_firehose_delivery_stream" "dst" { + provider = aws.destination_account + # The tag named "LogDeliveryEnabled" must be set to "true" to allow the service-linked role "AWSServiceRoleForLogDelivery" + # to perform permitted actions on your behalf. + # See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-infrastructure-Firehose.html + tags = { + LogDeliveryEnabled = "true" + } + # other config... +} +``` + ## Argument Reference This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `traffic_type` - (Required) The type of traffic to capture. Valid values: `ACCEPT`,`REJECT`, `ALL`. -* `deliver_cross_account_role` - (Optional) ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. +* `deliver_cross_account_role` - (Optional) ARN of the IAM role in the destination account used for cross-account delivery of flow logs. * `eni_id` - (Optional) Elastic Network Interface ID to attach to. -* `iam_role_arn` - (Optional) ARN of the IAM role that's used to post flow logs to a CloudWatch Logs log group. +* `iam_role_arn` - (Optional) ARN of the IAM role used to post flow logs. Corresponds to `DeliverLogsPermissionArn` in the [AWS API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFlowLogs.html). * `log_destination_type` - (Optional) Logging destination type. Valid values: `cloud-watch-logs`, `s3`, `kinesis-data-firehose`. Default: `cloud-watch-logs`. * `log_destination` - (Optional) ARN of the logging destination. * `subnet_id` - (Optional) Subnet ID to attach to. diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown index 9bcba93aa133..6637dcb91e56 100644 --- a/website/docs/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -50,6 +50,7 @@ The following arguments are optional: * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `skip_final_backup` - (Optional) When enabled, will skip the default final backup taken when the file system is deleted. This configuration must be applied separately before attempting to delete the resource to have the desired behavior. Defaults to `false`. * `storage_type` - (Optional) The filesystem storage type. Only `SSD` is supported. +* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the filesystem. Maximum number of items defined by [FSx for OpenZFS Resource quota](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/limits.html#limits-openzfs-resources-file-system). See [`user_and_group_quotas` Block](#user_and_group_quotas-block) Below. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. diff --git a/website/docs/r/fsx_openzfs_volume.html.markdown b/website/docs/r/fsx_openzfs_volume.html.markdown index 2e10f5ab1993..71514449609c 100644 --- a/website/docs/r/fsx_openzfs_volume.html.markdown +++ b/website/docs/r/fsx_openzfs_volume.html.markdown @@ -36,7 +36,7 @@ This resource supports the following arguments: * `origin_snapshot` - (Optional) Specifies the configuration to use when creating the OpenZFS volume. See [`origin_snapshot` Block](#origin_snapshot-block) below for details. * `storage_capacity_quota_gib` - (Optional) The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. * `storage_capacity_reservation_gib` - (Optional) The amount of storage in gibibytes (GiB) to reserve from the parent volume. -* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [`user_and_group_quotas` Block](#user_and_group_quotas-block) Below. +* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum number of items defined by [FSx for OpenZFS Resource quota](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/limits.html#limits-openzfs-resources-file-system). See [`user_and_group_quotas` Block](#user_and_group_quotas-block) Below. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `nfs_exports` Block diff --git a/website/docs/r/fsx_s3_access_point_attachment.html.markdown b/website/docs/r/fsx_s3_access_point_attachment.html.markdown new file mode 100644 index 000000000000..ed6a91d469ea --- /dev/null +++ b/website/docs/r/fsx_s3_access_point_attachment.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "FSx" +layout: "aws" +page_title: "AWS: aws_fsx_s3_access_point_attachment" +description: |- + Manages an Amazon FSx S3 Access Point attachment. +--- + +# Resource: aws_fsx_s3_access_point_attachment + +Manages an Amazon FSx S3 Access Point attachment. + +## Example Usage + +```terraform +resource "aws_fsx_s3_access_point_attachment" "example" { + name = "example-attachment" + type = "OPENZFS" + + openzfs_configuration { + volume_id = aws_fsx_openzfs_volume.example.id + + file_system_identity { + type = "POSIX" + + posix_user { + uid = 1001 + gid = 1001 + } + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the S3 access point. +* `openzfs_configuration` - (Required) Configuration to use when creating and attaching an S3 access point to an FSx for OpenZFS volume. See [`openzfs_configuration` Block](#openzfs_configuration-block) for details. +* `type` - (Required) Type of S3 access point. Valid values: `OpenZFS`. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `s3_access_point` - (Optional) S3 access point configuration. See [`s3_access_point` Block](#s3_access_point-block) for details. + +### `openzfs_configuration` Block + +The `openzfs_configuration` configuration block supports the following arguments: + +* `file_system_identity` - (Required) File system user identity to use for authorizing file read and write requests that are made using the S3 access point. See [`file_system_identity` Block](#file_system_identity-block) for details. +* `volume_id` - (Required) ID of the FSx for OpenZFS volume to which the S3 access point is attached. + +### `file_system_identity` Block + +The `file_system_identity` configuration block supports the following arguments: + +* `posix_user` - (Required) UID and GIDs of the file system POSIX user. See [`posix_user` Block](#posix_user-block) for details. +* `type` - (Required) FSx for OpenZFS user identity type. Valid values: `POSIX`. + +### `posix_user` Block + +The `posix_user` configuration block supports the following arguments: + +* `gid` - (Required) GID of the file system user. +* `secondary_gids` - (Optional) List of secondary GIDs for the file system user.. +* `uid` - (Required) UID of the file system user. + +### `s3_access_point` Block + +The `s3_access_point` configuration block supports the following arguments: + +* `policy` - (Required) Access policy associated with the S3 access point configuration. +* `vpc_configuration` - (Optional) Amazon S3 restricts access to the S3 access point to requests made from the specified VPC. See [`vpc_configuration` Block](#vpc_configuration-block) for details. + +### `vpc_configuration` Block + +The `vpc_configuration` configuration block supports the following arguments: + +* `vpc_id` - (Required) VPC ID. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `s3_access_point_alias` - S3 access point's alias. +* `s3_access_point_arn` - S3 access point's ARN. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `15m`) +* `delete` - (Default `15m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import FSx S3 Access Point attachments using the `name`. For example: + +```terraform +import { + to = aws_fsx_s3_access_point_attachment.example + id = "example-attachment" +} +``` + +Using `terraform import`, import FSx S3 Access Point attachments using the `name`. For example: + +```console +% terraform import aws_fsx_s3_access_point_attachment.example example-attachment +``` diff --git a/website/docs/r/gamelift_fleet.html.markdown b/website/docs/r/gamelift_fleet.html.markdown index 9caa1e1326ca..48c4cb65ddc7 100644 --- a/website/docs/r/gamelift_fleet.html.markdown +++ b/website/docs/r/gamelift_fleet.html.markdown @@ -33,7 +33,7 @@ resource "aws_gamelift_fleet" "example" { This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `build_id` - (Optional) ID of the GameLift Build to be deployed on the fleet. +* `build_id` - (Optional) ID of the GameLift Build to be deployed on the fleet. Conflicts with `script_id`. * `certificate_configuration` - (Optional) Prompts GameLift to generate a TLS/SSL certificate for the fleet. See [certificate_configuration](#certificate_configuration). * `description` - (Optional) Human-readable description of the fleet. * `ec2_inbound_permission` - (Optional) Range of IP addresses and port settings that permit inbound traffic to access server processes running on the fleet. See below. @@ -45,7 +45,7 @@ This resource supports the following arguments: * `new_game_session_protection_policy` - (Optional) Game session protection policy to apply to all instances in this fleetE.g., `FullProtection`. Defaults to `NoProtection`. * `resource_creation_limit_policy` - (Optional) Policy that limits the number of game sessions an individual player can create over a span of time for this fleet. See below. * `runtime_configuration` - (Optional) Instructions for launching server processes on each instance in the fleet. See below. -* `script_id` - (Optional) ID of the GameLift Script to be deployed on the fleet. +* `script_id` - (Optional) ID of the GameLift Script to be deployed on the fleet. Conflicts with `build_id`. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Nested Fields diff --git a/website/docs/r/globalaccelerator_accelerator.html.markdown b/website/docs/r/globalaccelerator_accelerator.html.markdown index 2c07d01ed283..307aa167ce62 100644 --- a/website/docs/r/globalaccelerator_accelerator.html.markdown +++ b/website/docs/r/globalaccelerator_accelerator.html.markdown @@ -74,6 +74,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_accelerator.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_accelerator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator accelerator. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator accelerators using the `arn`. For example: ```terraform diff --git a/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown b/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown index 9c4bbaa89697..3f7da5fbba59 100644 --- a/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown +++ b/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown @@ -69,7 +69,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator Cross Account Attachment using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_cross_account_attachment.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:attachment/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_cross_account_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator cross-account attachment. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator Cross Account Attachment using the `arn`. For example: ```terraform import { @@ -78,7 +99,7 @@ import { } ``` -Using `terraform import`, import Global Accelerator Cross Account Attachment using the `example_id_arg`. For example: +Using `terraform import`, import Global Accelerator Cross Account Attachment using the `arn`. For example: ```console % terraform import aws_globalaccelerator_cross_account_attachment.example arn:aws:globalaccelerator::012345678910:attachment/01234567-abcd-8910-efgh-123456789012 diff --git a/website/docs/r/globalaccelerator_custom_routing_accelerator.html.markdown b/website/docs/r/globalaccelerator_custom_routing_accelerator.html.markdown index 5ab917567c63..70e63deb186e 100644 --- a/website/docs/r/globalaccelerator_custom_routing_accelerator.html.markdown +++ b/website/docs/r/globalaccelerator_custom_routing_accelerator.html.markdown @@ -73,6 +73,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_accelerator.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh" + } +} + +resource "aws_globalaccelerator_custom_routing_accelerator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing accelerator. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing accelerators using the `arn`. For example: ```terraform diff --git a/website/docs/r/globalaccelerator_custom_routing_endpoint_group.html.markdown b/website/docs/r/globalaccelerator_custom_routing_endpoint_group.html.markdown index 6d7f90c62493..4c0d234fc650 100644 --- a/website/docs/r/globalaccelerator_custom_routing_endpoint_group.html.markdown +++ b/website/docs/r/globalaccelerator_custom_routing_endpoint_group.html.markdown @@ -64,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_endpoint_group.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz/endpoint-group/098765zyxwvu" + } +} + +resource "aws_globalaccelerator_custom_routing_endpoint_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing endpoint group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing endpoint groups using the `id`. For example: ```terraform diff --git a/website/docs/r/globalaccelerator_custom_routing_listener.html.markdown b/website/docs/r/globalaccelerator_custom_routing_listener.html.markdown index 25d23f5ed8d7..04502a12ff07 100644 --- a/website/docs/r/globalaccelerator_custom_routing_listener.html.markdown +++ b/website/docs/r/globalaccelerator_custom_routing_listener.html.markdown @@ -63,6 +63,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_custom_routing_listener.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz" + } +} + +resource "aws_globalaccelerator_custom_routing_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator custom routing listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator custom routing listeners using the `id`. For example: ```terraform diff --git a/website/docs/r/globalaccelerator_endpoint_group.html.markdown b/website/docs/r/globalaccelerator_endpoint_group.html.markdown index ff1a7a0a5a31..40dc43708563 100644 --- a/website/docs/r/globalaccelerator_endpoint_group.html.markdown +++ b/website/docs/r/globalaccelerator_endpoint_group.html.markdown @@ -69,6 +69,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_endpoint_group.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz/endpoint-group/098765zyxwvu" + } +} + +resource "aws_globalaccelerator_endpoint_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator endpoint group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator endpoint groups using the `id`. For example: ```terraform diff --git a/website/docs/r/globalaccelerator_listener.html.markdown b/website/docs/r/globalaccelerator_listener.html.markdown index 3ff57eb34d0a..38e8248bc5d3 100644 --- a/website/docs/r/globalaccelerator_listener.html.markdown +++ b/website/docs/r/globalaccelerator_listener.html.markdown @@ -68,6 +68,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_globalaccelerator_listener.example + identity = { + "arn" = "arn:aws:globalaccelerator::123456789012:accelerator/1234abcd-abcd-1234-abcd-1234abcdefgh/listener/0123vxyz" + } +} + +resource "aws_globalaccelerator_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Global Accelerator listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Global Accelerator listeners using the `id`. For example: ```terraform diff --git a/website/docs/r/glue_catalog_table.html.markdown b/website/docs/r/glue_catalog_table.html.markdown index 36433bc535b3..41dd1fe3a0ec 100644 --- a/website/docs/r/glue_catalog_table.html.markdown +++ b/website/docs/r/glue_catalog_table.html.markdown @@ -132,6 +132,7 @@ To add an index to an existing table, see the [`glue_partition_index` resource]( * `comment` - (Optional) Free-form text comment. * `name` - (Required) Name of the Partition Key. +* `parameters` - (Optional) Map of key-value pairs. * `type` - (Optional) Datatype of data in the Partition Key. ### storage_descriptor diff --git a/website/docs/r/glue_catalog_table_optimizer.html.markdown b/website/docs/r/glue_catalog_table_optimizer.html.markdown index cba03a6b94e0..f92a367bbe3c 100644 --- a/website/docs/r/glue_catalog_table_optimizer.html.markdown +++ b/website/docs/r/glue_catalog_table_optimizer.html.markdown @@ -101,15 +101,17 @@ This resource supports the following arguments: ### Orphan File Deletion Configuration * `iceberg_configuration` (Optional) - The configuration for an Iceberg orphan file deletion optimizer. - * `orphan_file_retention_period_in_days` (Optional) - The number of days that orphan files should be retained before file deletion. Defaults to `3`. * `location` (Optional) - Specifies a directory in which to look for files. You may choose a sub-directory rather than the top-level table location. Defaults to the table's location. - + * `orphan_file_retention_period_in_days` (Optional) - The number of days that orphan files should be retained before file deletion. Defaults to `3`. + * `run_rate_in_hours` (Optional) - interval in hours between orphan file deletion job runs. Defaults to `24`. + ### Retention Configuration * `iceberg_configuration` (Optional) - The configuration for an Iceberg snapshot retention optimizer. - * `snapshot_retention_period_in_days` (Optional) - The number of days to retain the Iceberg snapshots. Defaults to `5`, or the corresponding Iceberg table configuration field if it exists. - * `number_of_snapshots_to_retain` (Optional) - The number of Iceberg snapshots to retain within the retention period. Defaults to `1` or the corresponding Iceberg table configuration field if it exists. * `clean_expired_files` (Optional) - If set to `false`, snapshots are only deleted from table metadata, and the underlying data and metadata files are not deleted. Defaults to `false`. + * `number_of_snapshots_to_retain` (Optional) - The number of Iceberg snapshots to retain within the retention period. Defaults to `1` or the corresponding Iceberg table configuration field if it exists. + * `run_rate_in_hours` (Optional) - Interval in hours between retention job runs. Defaults to `24`. + * `snapshot_retention_period_in_days` (Optional) - The number of days to retain the Iceberg snapshots. Defaults to `5`, or the corresponding Iceberg table configuration field if it exists. ## Attribute Reference diff --git a/website/docs/r/glue_job.html.markdown b/website/docs/r/glue_job.html.markdown index 8574bbd4065f..dd65cf9cf575 100644 --- a/website/docs/r/glue_job.html.markdown +++ b/website/docs/r/glue_job.html.markdown @@ -216,36 +216,29 @@ resource "aws_glue_job" "example" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `command` - (Required) The command of the job. Defined below. * `connections` - (Optional) The list of connections used for this job. * `default_arguments` - (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. -* `non_overridable_arguments` - (Optional) Non-overridable arguments for this job, specified as name-value pairs. * `description` - (Optional) Description of the job. +* `execution_class` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. * `execution_property` - (Optional) Execution property of the job. Defined below. * `glue_version` - (Optional) The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). * `job_mode` - (Optional) Describes how a job was created. Valid values are `SCRIPT`, `NOTEBOOK` and `VISUAL`. * `job_run_queuing_enabled` - (Optional) Specifies whether job run queuing is enabled for the job runs for this job. A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will not be considered for queueing. -* `execution_class` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. * `maintenance_window` - (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. * `max_capacity` - (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` `2.0` and above. * `max_retries` - (Optional) The maximum number of times to retry this job if it fails. * `name` - (Required) The name you assign to this job. It must be unique in your account. +* `non_overridable_arguments` - (Optional) Non-overridable arguments for this job, specified as name-value pairs. * `notification_property` - (Optional) Notification property of the job. Defined below. +* `number_of_workers` - (Optional) The number of workers of a defined workerType that are allocated when a job runs. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `role_arn` - (Required) The ARN of the IAM role associated with this job. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `timeout` - (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours) for `glueetl` and `pythonshell` jobs, and null (unlimited) for `gluestreaming` jobs. * `security_configuration` - (Optional) The name of the Security Configuration to be associated with the job. * `source_control_details` - (Optional) The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. Defined below. -* `worker_type` - (Optional) The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - * For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. - * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. Recommended for memory-intensive jobs. - * For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. Recommended for memory-intensive jobs. - * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. Recommended for memory-intensive jobs. Only available for Glue version 3.0. Available AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. Recommended for memory-intensive jobs. Only available for Glue version 3.0. Available AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4GB of memory, 64 GB disk), and provides 1 executor per worker. Recommended for low volume streaming jobs. Only available for Glue version 3.0. - * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler. -* `number_of_workers` - (Optional) The number of workers of a defined workerType that are allocated when a job runs. +* `worker_type` - (Optional) The type of predefined worker that is allocated when a job runs. Valid values: `Standard`, `G.1X`, `G.2X`, `G.025X`, `G.4X`, `G.8X`, `G.12X`, `G.16X`, `R.1X`, `R.2X`, `R.4X`, `R.8X`, `Z.2X` (Ray jobs). See the [AWS documentation](https://docs.aws.amazon.com/glue/latest/dg/worker-types.html) for details. ### command Argument Reference diff --git a/website/docs/r/glue_registry.html.markdown b/website/docs/r/glue_registry.html.markdown index 2da74013a799..2c88b97f28e2 100644 --- a/website/docs/r/glue_registry.html.markdown +++ b/website/docs/r/glue_registry.html.markdown @@ -37,6 +37,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_glue_registry.example + identity = { + "arn" = "arn:aws:glue:us-west-2:123456789012:registry/example" + } +} + +resource "aws_glue_registry" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Glue registry. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Glue Registries using `arn`. For example: ```terraform diff --git a/website/docs/r/glue_schema.html.markdown b/website/docs/r/glue_schema.html.markdown index f9313b8d0bc9..12d4dbe0f35c 100644 --- a/website/docs/r/glue_schema.html.markdown +++ b/website/docs/r/glue_schema.html.markdown @@ -49,6 +49,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_glue_schema.example + identity = { + "arn" = "arn:aws:glue:us-west-2:123456789012:schema/example-registry/example-schema" + } +} + +resource "aws_glue_schema" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Glue schema. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Glue Registries using `arn`. For example: ```terraform diff --git a/website/docs/r/iam_openid_connect_provider.html.markdown b/website/docs/r/iam_openid_connect_provider.html.markdown index a5ac615be67c..5c3d93e335ae 100644 --- a/website/docs/r/iam_openid_connect_provider.html.markdown +++ b/website/docs/r/iam_openid_connect_provider.html.markdown @@ -56,6 +56,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_openid_connect_provider.example + identity = { + "arn" = "arn:aws:iam::123456789012:oidc-provider/example.com" + } +} + +resource "aws_iam_openid_connect_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM OpenID Connect provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM OpenID Connect Providers using the `arn`. For example: ```terraform diff --git a/website/docs/r/iam_policy.html.markdown b/website/docs/r/iam_policy.html.markdown index 5b8cdf13f03c..9a7289d47d5f 100644 --- a/website/docs/r/iam_policy.html.markdown +++ b/website/docs/r/iam_policy.html.markdown @@ -60,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_policy.example + identity = { + "arn" = "arn:aws:iam::123456789012:policy/UsersManageOwnCredentials" + } +} + +resource "aws_iam_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM policy. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Policies using the `arn`. For example: ```terraform diff --git a/website/docs/r/iam_role.html.markdown b/website/docs/r/iam_role.html.markdown index f80acdb44431..6893700c1cf1 100644 --- a/website/docs/r/iam_role.html.markdown +++ b/website/docs/r/iam_role.html.markdown @@ -223,11 +223,36 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role.example + identity = { + name = "developer_name" + } +} + +resource "aws_iam_role" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` (String) Name of the IAM role. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Roles using the `name`. For example: ```terraform import { - to = aws_iam_role.developer + to = aws_iam_role.example id = "developer_name" } ``` @@ -235,5 +260,5 @@ import { Using `terraform import`, import IAM Roles using the `name`. For example: ```console -% terraform import aws_iam_role.developer developer_name +% terraform import aws_iam_role.example developer_name ``` diff --git a/website/docs/r/iam_role_policy.html.markdown b/website/docs/r/iam_role_policy.html.markdown index 6a8311948db4..80a77ad77da1 100644 --- a/website/docs/r/iam_role_policy.html.markdown +++ b/website/docs/r/iam_role_policy.html.markdown @@ -75,11 +75,38 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role_policy.example + identity = { + role = "role_of_mypolicy_name" + name = "mypolicy_name" + } +} + +resource "aws_iam_role_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `role` (String) Name of the IAM role. +* `name` (String) Name of the role policy. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Role Policies using the `role_name:role_policy_name`. For example: ```terraform import { - to = aws_iam_role_policy.mypolicy + to = aws_iam_role_policy.example id = "role_of_mypolicy_name:mypolicy_name" } ``` @@ -87,5 +114,5 @@ import { Using `terraform import`, import IAM Role Policies using the `role_name:role_policy_name`. For example: ```console -% terraform import aws_iam_role_policy.mypolicy role_of_mypolicy_name:mypolicy_name +% terraform import aws_iam_role_policy.example role_of_mypolicy_name:mypolicy_name ``` diff --git a/website/docs/r/iam_role_policy_attachment.html.markdown b/website/docs/r/iam_role_policy_attachment.html.markdown index 2e900f5db313..8c8281ea6359 100644 --- a/website/docs/r/iam_role_policy_attachment.html.markdown +++ b/website/docs/r/iam_role_policy_attachment.html.markdown @@ -68,11 +68,38 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_role_policy_attachment.example + identity = { + role = "test-role" + policy_arn = "arn:aws:iam::xxxxxxxxxxxx:policy/test-policy" + } +} + +resource "aws_iam_role_policy_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `role` (String) Name of the IAM role. +* `policy_arn` (String) ARN of the IAM policy. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM role policy attachments using the role name and policy arn separated by `/`. For example: ```terraform import { - to = aws_iam_role_policy_attachment.test-attach + to = aws_iam_role_policy_attachment.example id = "test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy" } ``` @@ -80,5 +107,5 @@ import { Using `terraform import`, import IAM role policy attachments using the role name and policy arn separated by `/`. For example: ```console -% terraform import aws_iam_role_policy_attachment.test-attach test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy +% terraform import aws_iam_role_policy_attachment.example test-role/arn:aws:iam::xxxxxxxxxxxx:policy/test-policy ``` diff --git a/website/docs/r/iam_saml_provider.html.markdown b/website/docs/r/iam_saml_provider.html.markdown index 05b06017ebdd..6f83ac5a2294 100644 --- a/website/docs/r/iam_saml_provider.html.markdown +++ b/website/docs/r/iam_saml_provider.html.markdown @@ -37,6 +37,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_saml_provider.example + identity = { + "arn" = "arn:aws:iam::123456789012:saml-provider/ExampleProvider" + } +} + +resource "aws_iam_saml_provider" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM SAML provider. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM SAML Providers using the `arn`. For example: ```terraform diff --git a/website/docs/r/iam_service_linked_role.html.markdown b/website/docs/r/iam_service_linked_role.html.markdown index d470fa01f3eb..bf5682308c14 100644 --- a/website/docs/r/iam_service_linked_role.html.markdown +++ b/website/docs/r/iam_service_linked_role.html.markdown @@ -41,6 +41,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_iam_service_linked_role.example + identity = { + "arn" = "arn:aws:iam::123456789012:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk" + } +} + +resource "aws_iam_service_linked_role" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IAM service-linked role. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM service-linked roles using role ARN. For example: ```terraform diff --git a/website/docs/r/iam_service_specific_credential.html.markdown b/website/docs/r/iam_service_specific_credential.html.markdown index f91fb94d227f..48446660ac81 100644 --- a/website/docs/r/iam_service_specific_credential.html.markdown +++ b/website/docs/r/iam_service_specific_credential.html.markdown @@ -12,6 +12,8 @@ Provides an IAM Service Specific Credential. ## Example Usage +### Basic Usage + ```terraform resource "aws_iam_user" "example" { name = "example" @@ -23,22 +25,41 @@ resource "aws_iam_service_specific_credential" "example" { } ``` +### Bedrock API Key with Expiration + +```terraform +resource "aws_iam_user" "example" { + name = "example" +} + +resource "aws_iam_service_specific_credential" "bedrock" { + service_name = "bedrock.amazonaws.com" + user_name = aws_iam_user.example.name + credential_age_days = 30 # API key expires after 30 days +} +``` + ## Argument Reference This resource supports the following arguments: -* `service_name` - (Required) The name of the AWS service that is to be associated with the credentials. The service you specify here is the only service that can be accessed using these credentials. -* `user_name` - (Required) The name of the IAM user that is to be associated with the credentials. The new service-specific credentials have the same permissions as the associated user except that they can be used only to access the specified service. -* `status` - (Optional) The status to be assigned to the service-specific credential. Valid values are `Active` and `Inactive`. Default value is `Active`. +- `service_name` - (Required) The name of the AWS service that is to be associated with the credentials. The service you specify here is the only service that can be accessed using these credentials. Supported services are `codecommit.amazonaws.com`, `bedrock.amazonaws.com`, and `cassandra.amazonaws.com`. +- `user_name` - (Required) The name of the IAM user that is to be associated with the credentials. The new service-specific credentials have the same permissions as the associated user except that they can be used only to access the specified service. +- `status` - (Optional) The status to be assigned to the service-specific credential. Valid values are `Active`, `Inactive`, and `Expired`. Default value is `Active`. Note that `Expired` is only used for read operations and cannot be set manually. +- `credential_age_days` - (Optional, Forces new resource) The number of days until the service specific credential expires. This field is only valid for Bedrock API keys and must be between 1 and 36600 (approximately 100 years). When not specified, the credential will not expire. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: -* `id` - The combination of `service_name` and `user_name` as such: `service_name:user_name:service_specific_credential_id`. -* `service_password` - The generated password for the service-specific credential. -* `service_user_name` - The generated user name for the service-specific credential. This value is generated by combining the IAM user's name combined with the ID number of the AWS account, as in `jane-at-123456789012`, for example. -* `service_specific_credential_id` - The unique identifier for the service-specific credential. +- `id` - The combination of `service_name` and `user_name` as such: `service_name:user_name:service_specific_credential_id`. +- `service_password` - The generated password for the service-specific credential. This value is only available when the credential is created. +- `service_user_name` - The generated user name for the service-specific credential. This value is generated by combining the IAM user's name combined with the ID number of the AWS account, as in `jane-at-123456789012`, for example. +- `service_specific_credential_id` - The unique identifier for the service-specific credential. +- `service_credential_alias` - For Bedrock API keys, this is the public portion of the credential that includes the IAM user name and a suffix containing version and creation information. +- `service_credential_secret` - For Bedrock API keys, this is the secret portion of the credential that should be used to authenticate API calls. This value is only available when the credential is created. +- `create_date` - The date and time, in RFC3339 format, when the service-specific credential was created. +- `expiration_date` - The date and time, in RFC3339 format, when the service specific credential expires. This field is only present for Bedrock API keys that were created with an expiration period. ## Import diff --git a/website/docs/r/imagebuilder_container_recipe.html.markdown b/website/docs/r/imagebuilder_container_recipe.html.markdown index 991679410a55..f02e0a1b3053 100644 --- a/website/docs/r/imagebuilder_container_recipe.html.markdown +++ b/website/docs/r/imagebuilder_container_recipe.html.markdown @@ -136,6 +136,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_container_recipe.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:container-recipe/example/1.0.0" + } +} + +resource "aws_imagebuilder_container_recipe" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder container recipe. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_container_recipe` resources using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_distribution_configuration.html.markdown b/website/docs/r/imagebuilder_distribution_configuration.html.markdown index 35b1d0246ab2..4bf98ef498c0 100644 --- a/website/docs/r/imagebuilder_distribution_configuration.html.markdown +++ b/website/docs/r/imagebuilder_distribution_configuration.html.markdown @@ -149,6 +149,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_distribution_configuration.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:distribution-configuration/example" + } +} + +resource "aws_imagebuilder_distribution_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder distribution configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_distribution_configurations` resources using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_image.html.markdown b/website/docs/r/imagebuilder_image.html.markdown index ed6f12105cd1..439047b96430 100644 --- a/website/docs/r/imagebuilder_image.html.markdown +++ b/website/docs/r/imagebuilder_image.html.markdown @@ -112,6 +112,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1" + } +} + +resource "aws_imagebuilder_image" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image` resources using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_image_pipeline.html.markdown b/website/docs/r/imagebuilder_image_pipeline.html.markdown index 6c958f9e080f..86a6ae15c417 100644 --- a/website/docs/r/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/r/imagebuilder_image_pipeline.html.markdown @@ -156,6 +156,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image_pipeline.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example" + } +} + +resource "aws_imagebuilder_image_pipeline" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image pipeline. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image_pipeline` resources using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_image_recipe.html.markdown b/website/docs/r/imagebuilder_image_recipe.html.markdown index 9f88df44d705..a88c20366a82 100644 --- a/website/docs/r/imagebuilder_image_recipe.html.markdown +++ b/website/docs/r/imagebuilder_image_recipe.html.markdown @@ -50,7 +50,7 @@ The following arguments are required: * `component` - (Required) Ordered configuration block(s) with components for the image recipe. Detailed below. * `name` - (Required) Name of the image recipe. -* `parent_image` - (Required) The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN or an AMI ID. +* `parent_image` - (Required) The image recipe uses this image as a base from which to build your customized image. The value can be the base image ARN, an AMI ID, or an SSM Parameter referencing the AMI. For an SSM Parameter, enter the prefix `ssm:`, followed by the parameter name or ARN. * `version` - (Required) The semantic version of the image recipe, which specifies the version in the following format, with numeric values in each position to indicate a specific version: major.minor.patch. For example: 1.0.0. The following arguments are optional: @@ -107,6 +107,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_image_recipe.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:image-recipe/example/1.0.0" + } +} + +resource "aws_imagebuilder_image_recipe" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder image recipe. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_image_recipe` resources using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_infrastructure_configuration.html.markdown b/website/docs/r/imagebuilder_infrastructure_configuration.html.markdown index e18ef1dfed74..f9eb2128b732 100644 --- a/website/docs/r/imagebuilder_infrastructure_configuration.html.markdown +++ b/website/docs/r/imagebuilder_infrastructure_configuration.html.markdown @@ -107,6 +107,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_infrastructure_configuration.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:infrastructure-configuration/example" + } +} + +resource "aws_imagebuilder_infrastructure_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder infrastructure configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_infrastructure_configuration` using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_lifecycle_policy.html.markdown b/website/docs/r/imagebuilder_lifecycle_policy.html.markdown index 066b619b1c72..f6910c31a706 100644 --- a/website/docs/r/imagebuilder_lifecycle_policy.html.markdown +++ b/website/docs/r/imagebuilder_lifecycle_policy.html.markdown @@ -176,6 +176,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_lifecycle_policy.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:lifecycle-policy/example" + } +} + +resource "aws_imagebuilder_lifecycle_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder lifecycle policy. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_imagebuilder_lifecycle_policy` using the Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/imagebuilder_workflow.html.markdown b/website/docs/r/imagebuilder_workflow.html.markdown index a1acbe63929b..2957b13dff40 100644 --- a/website/docs/r/imagebuilder_workflow.html.markdown +++ b/website/docs/r/imagebuilder_workflow.html.markdown @@ -80,7 +80,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 Image Builder Workflow using the `example_id_arg`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_imagebuilder_workflow.example + identity = { + "arn" = "arn:aws:imagebuilder:us-east-1:123456789012:workflow/build/example/1.0.0" + } +} + +resource "aws_imagebuilder_workflow" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Image Builder workflow. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import EC2 Image Builder Workflow using the `arn`. For example: ```terraform import { @@ -89,7 +110,7 @@ import { } ``` -Using `terraform import`, import EC2 Image Builder Workflow using the `example_id_arg`. For example: +Using `terraform import`, import EC2 Image Builder Workflow using the `arn`. For example: ```console % terraform import aws_imagebuilder_workflow.example arn:aws:imagebuilder:us-east-1:aws:workflow/test/example/1.0.1/1 diff --git a/website/docs/r/inspector2_enabler.html.markdown b/website/docs/r/inspector2_enabler.html.markdown index 6962dcce2d17..d2b363718df8 100644 --- a/website/docs/r/inspector2_enabler.html.markdown +++ b/website/docs/r/inspector2_enabler.html.markdown @@ -42,7 +42,7 @@ This resource supports the following arguments: * `account_ids` - (Required) Set of account IDs. Can contain one of: the Organization's Administrator Account, or one or more Member Accounts. * `resource_types` - (Required) Type of resources to scan. - Valid values are `EC2`, `ECR`, `LAMBDA` and `LAMBDA_CODE`. + Valid values are `EC2`, `ECR`, `LAMBDA`, `LAMBDA_CODE` and `CODE_REPOSITORY`. At least one item is required. ## Attribute Reference @@ -56,3 +56,20 @@ This resource exports no additional attributes. * `create` - (Default `5m`) * `update` - (Default `5m`) * `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Inspector Enabler using `account_ids` and `region_types` formatted as `[account_id1]:[account_id2]:...-[resource_type1]:[resource_type2]:...`, where `account_ids` are sorted in ascending order and `resource_types` are sorted in alphabetical order. For example: + +```terraform +import { + to = aws_inspector2_enabler.example + id = "123456789012:234567890123-EC2:ECR" +} +``` + +Using `terraform import`, import Inspector Enabler using using `account_ids` and `region_types` formatted as `[account_id1]:[account_id2]:...-[resource_type1]:[resource_type2]:...`, where `account_ids` are sorted in ascending order and `resource_types` are sorted in alphabetical order. For example: + +```console +% terraform import aws_inspector2_enabler.example 123456789012:234567890123-EC2:ECR +``` diff --git a/website/docs/r/inspector2_filter.html.markdown b/website/docs/r/inspector2_filter.html.markdown index b2544b0f7b75..1bf2b3f53b90 100644 --- a/website/docs/r/inspector2_filter.html.markdown +++ b/website/docs/r/inspector2_filter.html.markdown @@ -54,6 +54,8 @@ This resource exports the following attributes in addition to the arguments abov The `filter_criteria` configuration block supports the following attributes: * `aws_account_id` - (Optional) The AWS account ID in which the finding was generated. [Documented below](#string-filter). +* `code_repository_project_name` - (Optional) The project name in a code repository. [Documented below](#string-filter). +* `code_repository_provider_type` - (Optional) The repository provider type (such as GitHub, GitLab, etc.) [Documented below](#string-filter). * `code_vulnerability_detector_name` - (Optional) The ID of the component. [Documented below](#string-filter). * `code_vulnerability_detector_tags` - (Optional) The ID of the component. [Documented below](#string-filter). * `code_vulnerability_file_path` - (Optional) The ID of the component. [Documented below](#string-filter). @@ -63,6 +65,8 @@ The `filter_criteria` configuration block supports the following attributes: * `ec2_instance_subnet_id` - (Optional) The ID of the subnet. [Documented below](#string-filter). * `ec2_instance_vpc_id` - (Optional) The ID of the VPC. [Documented below](#string-filter). * `ecr_image_architecture` - (Optional) The architecture of the ECR image. [Documented below](#string-filter). +* `ecr_image_in_use_count` - (Optional) The number of the ECR images in use. [Documented below](#number-filter). +* `ecr_image_last_in_use_at` - (Optional) The date range when an ECR image was last used in an ECS cluster task or EKS cluster pod. [Documented below](#date-filter). * `ecr_image_hash` - (Optional) The SHA256 hash of the ECR image. [Documented below](#string-filter). * `ecr_image_pushed_at` - (Optional) The date range when the image was pushed. [Documented below](#date-filter). * `ecr_image_registry` - (Optional) The registry of the ECR image. [Documented below](#string-filter). @@ -156,7 +160,7 @@ import { } ``` -Using `terraform import`, import Inspector Filter using the `example_id_arg`. For example: +Using `terraform import`, import Inspector Filter using the `arn`. For example: ```console % terraform import aws_inspector2_filter.example "arn:aws:inspector2:us-east-1:111222333444:owner/111222333444/filter/abcdefgh12345678" diff --git a/website/docs/r/inspector2_organization_configuration.html.markdown b/website/docs/r/inspector2_organization_configuration.html.markdown index e7f2e48f1c9c..76292ec041fc 100644 --- a/website/docs/r/inspector2_organization_configuration.html.markdown +++ b/website/docs/r/inspector2_organization_configuration.html.markdown @@ -21,10 +21,11 @@ Terraform resource for managing an Amazon Inspector Organization Configuration. ```terraform resource "aws_inspector2_organization_configuration" "example" { auto_enable { - ec2 = true - ecr = false - lambda = true - lambda_code = true + ec2 = true + ecr = false + code_repository = false + lambda = true + lambda_code = true } } ``` @@ -40,6 +41,7 @@ This resource supports the following arguments: * `ec2` - (Required) Whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector organization. * `ecr` - (Required) Whether Amazon ECR scans are automatically enabled for new members of your Amazon Inspector organization. +* `code_repository` - (Optional) Whether code repository scans are automatically enabled for new members of your Amazon Inspector organization. * `lambda` - (Optional) Whether Lambda Function scans are automatically enabled for new members of your Amazon Inspector organization. * `lambda_code` - (Optional) Whether AWS Lambda code scans are automatically enabled for new members of your Amazon Inspector organization. **Note:** Lambda code scanning requires Lambda standard scanning to be activated. Consequently, if you are setting this argument to `true`, you must also set the `lambda` argument to `true`. See [Scanning AWS Lambda functions with Amazon Inspector](https://docs.aws.amazon.com/inspector/latest/user/scanning-lambda.html#lambda-code-scans) for more information. diff --git a/website/docs/r/inspector_assessment_target.html.markdown b/website/docs/r/inspector_assessment_target.html.markdown index 9f9551fb2053..d126914bcc8d 100644 --- a/website/docs/r/inspector_assessment_target.html.markdown +++ b/website/docs/r/inspector_assessment_target.html.markdown @@ -42,6 +42,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_inspector_assessment_target.example + identity = { + "arn" = "arn:aws:inspector:us-west-2:123456789012:target/0-12345678" + } +} + +resource "aws_inspector_assessment_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Inspector assessment target. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Inspector Classic Assessment Targets using their Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/inspector_assessment_template.html.markdown b/website/docs/r/inspector_assessment_template.html.markdown index 58618112129f..670dd89c032b 100644 --- a/website/docs/r/inspector_assessment_template.html.markdown +++ b/website/docs/r/inspector_assessment_template.html.markdown @@ -60,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_inspector_assessment_template.example + identity = { + "arn" = "arn:aws:inspector:us-west-2:123456789012:target/0-12345678/template/0-87654321" + } +} + +resource "aws_inspector_assessment_template" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Inspector assessment template. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_inspector_assessment_template` using the template assessment ARN. For example: ```terraform diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index d8a46ec4b08a..80e5ba598a7e 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -33,7 +33,7 @@ data "aws_ami" "ubuntu" { owners = ["099720109477"] # Canonical } -resource "aws_instance" "web" { +resource "aws_instance" "example" { ami = data.aws_ami.ubuntu.id instance_type = "t3.micro" @@ -46,7 +46,7 @@ resource "aws_instance" "web" { Using AWS Systems Manager Parameter Store ```terraform -resource "aws_instance" "web" { +resource "aws_instance" "example" { ami = "resolve:ssm:/aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64" instance_type = "t3.micro" @@ -59,7 +59,7 @@ resource "aws_instance" "web" { ### Spot instance example ```terraform -data "aws_ami" "this" { +data "aws_ami" "example" { most_recent = true owners = ["amazon"] filter { @@ -72,8 +72,8 @@ data "aws_ami" "this" { } } -resource "aws_instance" "this" { - ami = data.aws_ami.this.id +resource "aws_instance" "example" { + ami = data.aws_ami.example.id instance_market_options { market_type = "spot" spot_options { @@ -108,7 +108,7 @@ resource "aws_subnet" "my_subnet" { } } -resource "aws_network_interface" "foo" { +resource "aws_network_interface" "example" { subnet_id = aws_subnet.my_subnet.id private_ips = ["172.16.10.100"] @@ -117,13 +117,12 @@ resource "aws_network_interface" "foo" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "example" { ami = "ami-005e54dee72cc1d00" # us-west-2 instance_type = "t2.micro" - network_interface { - network_interface_id = aws_network_interface.foo.id - device_index = 0 + primary_network_interface { + network_interface_id = aws_network_interface.example.id } credit_specification { @@ -224,6 +223,7 @@ This resource supports the following arguments: * `enable_primary_ipv6` - (Optional) Whether to assign a primary IPv6 Global Unicast Address (GUA) to the instance when launched in a dual-stack or IPv6-only subnet. A primary IPv6 address ensures a consistent IPv6 address for the instance and is automatically assigned by AWS to the ENI. Once enabled, the first IPv6 GUA becomes the primary IPv6 address and cannot be disabled. The primary IPv6 address remains until the instance is terminated or the ENI is detached. Disabling `enable_primary_ipv6` after it has been enabled forces recreation of the instance. * `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. * `ephemeral_block_device` - (Optional) One or more configuration blocks to customize Ephemeral (also known as "Instance Store") volumes on the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. When accessing this as an attribute reference, it is a set of objects. +* `force_destroy` - (Optional) Destroys instance even if `disable_api_termination` or `disable_api_stop` is set to `true`. Defaults to `false`. Once this parameter is set to `true`, a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the instance or destroying the instance, this flag will not work. Additionally when importing an instance, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. * `host_id` - (Optional) ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. @@ -239,9 +239,11 @@ This resource supports the following arguments: * `maintenance_options` - (Optional) Maintenance and recovery options for the instance. See [Maintenance Options](#maintenance-options) below for more details. * `metadata_options` - (Optional) Customize the metadata options of the instance. See [Metadata Options](#metadata-options) below for more details. * `monitoring` - (Optional) If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) -* `network_interface` - (Optional) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. -* `placement_group` - (Optional) Placement Group to start the instance in. +* `network_interface` - (Optional, **Deprecated** to specify the primary network interface, use `primary_network_interface`, to attach additional network interfaces, use `aws_network_interface_attachment` resources) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. +* `placement_group` - (Optional) Placement Group to start the instance in. Conflicts with `placement_group_id`. +* `placement_group_id` - (Optional) Placement Group ID to start the instance in. Conflicts with `placement_group`. * `placement_partition_number` - (Optional) Number of the partition the instance is in. Valid only if [the `aws_placement_group` resource's](placement_group.html) `strategy` argument is set to `"partition"`. +* `primary_network_interface` - (Optional) The primary network interface. See [Primary Network Interface](#primary-network-interface) below. * `private_dns_name_options` - (Optional) Options for the instance hostname. The default values are inherited from the subnet. See [Private DNS Name Options](#private-dns-name-options) below for more details. * `private_ip` - (Optional) Private IP address to associate with the instance in a VPC. * `root_block_device` - (Optional) Configuration block to customize details about the root block device of the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. When accessing this as an attribute reference, it is a list containing one object. @@ -388,7 +390,11 @@ For more information, see the documentation on the [Instance Metadata Service](h ### Network Interfaces -Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use the `aws_network_interface` or `aws_network_interface_attachment` resources instead. +`network_interface` is **deprecated**. +Use `primary_network_interface` to specify the primary network interface. +To attach additional network interfaces, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources. + +Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources instead. The `network_interface` configuration block _does_, however, allow users to supply their own network interface to be used as the default network interface on an EC2 Instance, attached at `eth0`. @@ -399,6 +405,16 @@ Each `network_interface` block supports the following: * `network_card_index` - (Optional) Integer index of the network card. Limited by instance type. The default index is `0`. * `network_interface_id` - (Required) ID of the network interface to attach. +### Primary Network Interface + +Represents the primary network interface on the EC2 Instance. +To manage additional network interfaces, use [`aws_network_interface_attachment`](docs/r/network_interface_attachment.html.markdown) resources. + +Each `primary_network_interface` block supports the following: + +* `delete_on_termination` - (Read-Only) Whether the network interface will be deleted when the instance terminates. +* `network_interface_id` - (Required) ID of the network interface to attach. + ### Private DNS Name Options The `private_dns_name_options` block supports the following: @@ -472,6 +488,32 @@ For `instance_market_options`, in addition to the arguments above, the following ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_instance.example + identity = { + id = "i-12345678" + } +} + +resource "aws_instance" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the instance. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import instances using the `id`. For example: ```terraform diff --git a/website/docs/r/iot_thing_principal_attachment.html.markdown b/website/docs/r/iot_thing_principal_attachment.html.markdown index 457170b54803..9c8ff59afdaf 100644 --- a/website/docs/r/iot_thing_principal_attachment.html.markdown +++ b/website/docs/r/iot_thing_principal_attachment.html.markdown @@ -35,6 +35,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `principal` - (Required) The AWS IoT Certificate ARN or Amazon Cognito Identity ID. * `thing` - (Required) The name of the thing. +* `thing_principal_type` - (Optional) The type of relationship to specify when attaching a principal to a thing. Valid values are `EXCLUSIVE_THING` (the thing will be the only one attached to the principal) or `NON_EXCLUSIVE_THING` (multiple things can be attached to the principal). Defaults to `NON_EXCLUSIVE_THING`. ## Attribute Reference diff --git a/website/docs/r/ivs_channel.html.markdown b/website/docs/r/ivs_channel.html.markdown index 2e94eef39649..0e3a16ab3540 100644 --- a/website/docs/r/ivs_channel.html.markdown +++ b/website/docs/r/ivs_channel.html.markdown @@ -51,6 +51,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_channel.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:channel/abcdABCDefgh" + } +} + +resource "aws_ivs_channel" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS channel. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Channel using the ARN. For example: ```terraform diff --git a/website/docs/r/ivs_playback_key_pair.html.markdown b/website/docs/r/ivs_playback_key_pair.html.markdown index 414db3aa6732..4824d0efdda8 100644 --- a/website/docs/r/ivs_playback_key_pair.html.markdown +++ b/website/docs/r/ivs_playback_key_pair.html.markdown @@ -50,6 +50,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_playback_key_pair.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:playback-key/abcdABCDefgh" + } +} + +resource "aws_ivs_playback_key_pair" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS playback key pair. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Playback Key Pair using the ARN. For example: ```terraform diff --git a/website/docs/r/ivs_recording_configuration.html.markdown b/website/docs/r/ivs_recording_configuration.html.markdown index 913cd26490fe..e9dc6280d371 100644 --- a/website/docs/r/ivs_recording_configuration.html.markdown +++ b/website/docs/r/ivs_recording_configuration.html.markdown @@ -60,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivs_recording_configuration.example + identity = { + "arn" = "arn:aws:ivs:us-west-2:123456789012:recording-configuration/abcdABCDefgh" + } +} + +resource "aws_ivs_recording_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS recording configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Recording Configuration using the ARN. For example: ```terraform diff --git a/website/docs/r/ivschat_logging_configuration.html.markdown b/website/docs/r/ivschat_logging_configuration.html.markdown index 17e6b4dcdd20..52f389c48785 100644 --- a/website/docs/r/ivschat_logging_configuration.html.markdown +++ b/website/docs/r/ivschat_logging_configuration.html.markdown @@ -132,6 +132,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivschat_logging_configuration.example + identity = { + "arn" = "arn:aws:ivschat:us-west-2:123456789012:logging-configuration/abcdABCDefgh" + } +} + +resource "aws_ivschat_logging_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS Chat logging configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Chat Logging Configuration using the ARN. For example: ```terraform diff --git a/website/docs/r/ivschat_room.html.markdown b/website/docs/r/ivschat_room.html.markdown index d63c2f9f1271..d2715f7f05ff 100644 --- a/website/docs/r/ivschat_room.html.markdown +++ b/website/docs/r/ivschat_room.html.markdown @@ -87,6 +87,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ivschat_room.example + identity = { + "arn" = "arn:aws:ivschat:us-west-2:123456789012:room/g1H2I3j4k5L6" + } +} + +resource "aws_ivschat_room" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the IVS Chat room. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IVS (Interactive Video) Chat Room using the ARN. For example: ```terraform diff --git a/website/docs/r/kinesis_resource_policy.html.markdown b/website/docs/r/kinesis_resource_policy.html.markdown index 873e3c2d58d1..57cdc66cd6a8 100644 --- a/website/docs/r/kinesis_resource_policy.html.markdown +++ b/website/docs/r/kinesis_resource_policy.html.markdown @@ -54,6 +54,27 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kinesis_resource_policy.example + identity = { + "arn" = "arn:aws:kinesis:us-east-1:123456789012:stream/example-stream" + } +} + +resource "aws_kinesis_resource_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Kinesis stream. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Kinesis resource policies using the `resource_arn`. For example: ```terraform diff --git a/website/docs/r/kms_alias.html.markdown b/website/docs/r/kms_alias.html.markdown index d5a62d006cee..9940e759417d 100644 --- a/website/docs/r/kms_alias.html.markdown +++ b/website/docs/r/kms_alias.html.markdown @@ -42,6 +42,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kms_alias.example + identity = { + name = "alias/my-key-alias" + } +} + +resource "aws_kms_alias" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the KMS key alias. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import KMS aliases using the `name`. For example: ```terraform diff --git a/website/docs/r/kms_external_key.html.markdown b/website/docs/r/kms_external_key.html.markdown index 9fe2e10a4f1e..902d35c51144 100644 --- a/website/docs/r/kms_external_key.html.markdown +++ b/website/docs/r/kms_external_key.html.markdown @@ -25,14 +25,16 @@ resource "aws_kms_external_key" "example" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bypass_policy_lockout_safety_check` - (Optional) Specifies whether to disable the policy lockout check performed when creating or updating the key's policy. Setting this value to `true` increases the risk that the key becomes unmanageable. For more information, refer to the scenario in the [Default Key Policy](https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the AWS Key Management Service Developer Guide. Defaults to `false`. * `deletion_window_in_days` - (Optional) Duration in days after which the key is deleted after destruction of the resource. Must be between `7` and `30` days. Defaults to `30`. * `description` - (Optional) Description of the key. * `enabled` - (Optional) Specifies whether the key is enabled. Keys pending import can only be `false`. Imported keys default to `true` unless expired. * `key_material_base64` - (Optional) Base64 encoded 256-bit symmetric encryption key material to import. The CMK is permanently associated with this key material. The same key material can be reimported, but you cannot import different key material. +* `key_spec` - (Optional) Specifies whether the key contains a symmetric key or an asymmetric key pair and the encryption algorithms or signing algorithms that the key supports. Valid values: `SYMMETRIC_DEFAULT`, `RSA_2048`, `RSA_3072`, `RSA_4096`, `HMAC_224`, `HMAC_256`, `HMAC_384`, `HMAC_512`, `ECC_NIST_P256`, `ECC_NIST_P384`, `ECC_NIST_P521`, `ECC_SECG_P256K1`, `ML_DSA_44`, `ML_DSA_65`, `ML_DSA_87`, or `SM2` (China Regions only). Defaults to `SYMMETRIC_DEFAULT`. For help with choosing a key spec, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html). +* `key_usage` - (Optional) Specifies the intended use of the key. Valid values: `ENCRYPT_DECRYPT`, `SIGN_VERIFY`, or `GENERATE_VERIFY_MAC`. Defaults to `ENCRYPT_DECRYPT`. * `multi_region` - (Optional) Indicates whether the KMS key is a multi-Region (`true`) or regional (`false`) key. Defaults to `false`. * `policy` - (Optional) A key policy JSON document. If you do not provide a key policy, AWS KMS attaches a default key policy to the CMK. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) A key-value map of tags to assign to the key. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `valid_to` - (Optional) Time at which the imported key material expires. When the key material expires, AWS KMS deletes the key material and the CMK becomes unusable. If not specified, key material does not expire. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) @@ -44,7 +46,6 @@ This resource exports the following attributes in addition to the arguments abov * `expiration_model` - Whether the key material expires. Empty when pending key material import, otherwise `KEY_MATERIAL_EXPIRES` or `KEY_MATERIAL_DOES_NOT_EXPIRE`. * `id` - The unique identifier for the key. * `key_state` - The state of the CMK. -* `key_usage` - The cryptographic operations for which you can use the CMK. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import diff --git a/website/docs/r/kms_key.html.markdown b/website/docs/r/kms_key.html.markdown index ceb047817c49..63c427dc1c1b 100644 --- a/website/docs/r/kms_key.html.markdown +++ b/website/docs/r/kms_key.html.markdown @@ -353,6 +353,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_kms_key.example + identity = { + id = "1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_kms_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the KMS key. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import KMS Keys using the `id`. For example: ```terraform diff --git a/website/docs/r/lakeformation_data_cells_filter.html.markdown b/website/docs/r/lakeformation_data_cells_filter.html.markdown index 243db10cdb46..77536786c4f2 100644 --- a/website/docs/r/lakeformation_data_cells_filter.html.markdown +++ b/website/docs/r/lakeformation_data_cells_filter.html.markdown @@ -71,7 +71,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation Data Cells Filter using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation Data Cells Filter using the `database_name`, `name`, `table_catalog_id`, and `table_name` separated by `,`. For example: ```terraform import { @@ -80,7 +80,7 @@ import { } ``` -Using `terraform import`, import Lake Formation Data Cells Filter using the `id`. For example: +Using `terraform import`, import Lake Formation Data Cells Filter using the `database_name`, `name`, `table_catalog_id`, and `table_name` separated by `,`. For example: ```console % terraform import aws_lakeformation_data_cells_filter.example database_name,name,table_catalog_id,table_name diff --git a/website/docs/r/lakeformation_lf_tag_expression.html.markdown b/website/docs/r/lakeformation_lf_tag_expression.html.markdown new file mode 100644 index 000000000000..1829af11e960 --- /dev/null +++ b/website/docs/r/lakeformation_lf_tag_expression.html.markdown @@ -0,0 +1,70 @@ +--- +subcategory: "Lake Formation" +layout: "aws" +page_title: "AWS: aws_lakeformation_lf_tag_expression" +description: |- + Terraform resource for managing an AWS Lake Formation LF Tag Expression. +--- +# Resource: aws_lakeformation_lf_tag_expression + +Terraform resource for managing an AWS Lake Formation LF Tag Expression. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_lakeformation_lf_tag" "example" { + key = "example" + values = ["value"] +} + +resource "aws_lakeformation_lf_tag_expression" "example" { + name = "example" + + expression { + tag_key = aws_lakeformation_lf_tag.example.key + tag_values = aws_lakeformation_lf_tag.example.values + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) Name of the LF-Tag Expression. +* `expression` - (Required) A list of LF-Tag conditions (key-value pairs). See [expression](#expression) for more details. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `catalog_id` - (Optional) ID of the Data Catalog. Defaults to the account ID if not specified. +* `description` - (Optional) Description of the LF-Tag Expression. + +### expression + +* `tag_key` - (Required) The key-name for the LF-Tag. +* `tag_values` - (Required) A list of possible values for the LF-Tag + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lake Formation LF Tag Expression using the `name,catalog_id`. For example: + +```terraform +import { + to = aws_lakeformation_lf_tag_expression.example + id = "example-tag-expression,123456789012" +} +``` + +Using `terraform import`, import Lake Formation LF Tag Expression using the `name,catalog_id`. For example: + +```console +% terraform import aws_lakeformation_lf_tag_expression.example example-tag-expression,123456789012 +``` diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index 4c504fb5f75d..5008a530e4eb 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -91,6 +91,7 @@ The resulting permissions depend on whether the table had `IAMAllowedPrincipals` AllIAMPrincipals is a pseudo-entity group that acts like a Lake Formation principal. The group includes all IAMs in the account that is defined. +```terraform resource "aws_lakeformation_permissions" "example" { permissions = ["SELECT"] principal = "123456789012:IAMPrincipals" @@ -101,6 +102,7 @@ resource "aws_lakeformation_permissions" "example" { column_names = ["event"] } } +``` ## Using Lake Formation Permissions diff --git a/website/docs/r/lakeformation_resource.html.markdown b/website/docs/r/lakeformation_resource.html.markdown index 5b03f9fa0569..314e95b5887a 100644 --- a/website/docs/r/lakeformation_resource.html.markdown +++ b/website/docs/r/lakeformation_resource.html.markdown @@ -40,6 +40,7 @@ The following arguments are optional: * `use_service_linked_role` - (Optional) Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. * `hybrid_access_enabled` - (Optional) Flag to enable AWS LakeFormation hybrid access permission mode. * `with_federation`- (Optional) Whether or not the resource is a federated resource. Set to true when registering AWS Glue connections for federated catalog functionality. +* `with_privileged_access` - (Optional) Boolean to grant the calling principal the permissions to perform all supported Lake Formation operations on the registered data location. ~> **NOTE:** AWS does not support registering an S3 location with an IAM role and subsequently updating the S3 location registration to a service-linked role. diff --git a/website/docs/r/lambda_alias.html.markdown b/website/docs/r/lambda_alias.html.markdown index 4818adf52372..b062ccf0d489 100644 --- a/website/docs/r/lambda_alias.html.markdown +++ b/website/docs/r/lambda_alias.html.markdown @@ -37,8 +37,7 @@ resource "aws_lambda_alias" "example" { routing_config { additional_version_weights = { "1" = 0.1 # Send 10% of traffic to version 1 - "3" = 0.2 # Send 20% of traffic to version 3 - # Remaining 70% goes to version 2 (the primary version) + # Remaining 90% goes to version 2 (the primary version) } } } diff --git a/website/docs/r/lambda_event_source_mapping.html.markdown b/website/docs/r/lambda_event_source_mapping.html.markdown index e07e63dcf04d..3260f1086d1f 100644 --- a/website/docs/r/lambda_event_source_mapping.html.markdown +++ b/website/docs/r/lambda_event_source_mapping.html.markdown @@ -234,6 +234,7 @@ The following arguments are optional: ### amazon_managed_kafka_event_source_config Configuration Block * `consumer_group_id` - (Optional) Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [AmazonManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_AmazonManagedKafkaEventSourceConfig.html). +* `schema_registry_config` - (Optional) Block for a Kafka schema registry setting. [See below](#schema_registry_config-configuration-block). ### destination_config Configuration Block @@ -277,12 +278,23 @@ The following arguments are optional: ### self_managed_kafka_event_source_config Configuration Block * `consumer_group_id` - (Optional) Kafka consumer group ID between 1 and 200 characters for use when creating this event source mapping. If one is not specified, this value will be automatically generated. See [SelfManagedKafkaEventSourceConfig Syntax](https://docs.aws.amazon.com/lambda/latest/dg/API_SelfManagedKafkaEventSourceConfig.html). +* `schema_registry_config` - (Optional) Block for a Kafka schema registry setting. [See below](#schema_registry_config-configuration-block). ### source_access_configuration Configuration Block * `type` - (Required) Type of authentication protocol, VPC components, or virtual host for your event source. For valid values, refer to the [AWS documentation](https://docs.aws.amazon.com/lambda/latest/api/API_SourceAccessConfiguration.html). * `uri` - (Required) URI for this configuration. For type `VPC_SUBNET` the value should be `subnet:subnet_id` where `subnet_id` is the value you would find in an aws_subnet resource's id attribute. For type `VPC_SECURITY_GROUP` the value should be `security_group:security_group_id` where `security_group_id` is the value you would find in an aws_security_group resource's id attribute. +### schema_registry_config Configuration Block + +* `access_config` - (Optional) Configuration block for authentication Lambda uses to access the schema registry. + * `type` - (Optional) Authentication type Lambda uses to access the schema registry. + * `uri` - (Optional) URI of the secret (Secrets Manager secret ARN) used to authenticate with the schema registry. +* `event_record_format` - (Optional) Record format that Lambda delivers to the function after schema validation. Valid values: `JSON`, `SOURCE`. +* `schema_registry_uri` - (Optional) URI of the schema registry. For AWS Glue schema registries, use the ARN of the registry. For Confluent schema registries, use the registry URL. +* `schema_validation_config` - (Optional) Repeatable block that defines schema validation settings. These specify the message attributes that Lambda should validate and filter using the schema registry. + * `attribute` - (Optional) Message attribute to validate. Valid values: `KEY`, `VALUE`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index 841ace6f5dad..f20d24b0fd80 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -257,6 +257,101 @@ resource "aws_lambda_function" "example" { } ``` +### Function with logging to S3 or Data Firehose + +#### Required Resources + +* An S3 bucket or Data Firehose delivery stream to store the logs. +* A CloudWatch Log Group with: + + * `log_group_class = "DELIVERY"` + * A subscription filter whose `destination_arn` points to the S3 bucket or the Data Firehose delivery stream. + +* IAM roles: + + * Assumed by the `logs.amazonaws.com` service to deliver logs to the S3 bucket or Data Firehose delivery stream. + * Assumed by the `lambda.amazonaws.com` service to send logs to CloudWatch Logs + +* A Lambda function: + + * In the `logging_configuration`, specify the name of the Log Group created above using the `log_group` field + * No special configuration is required to use S3 or Firehose as the log destination + +For more details, see [Sending Lambda function logs to Amazon S3](https://docs.aws.amazon.com/lambda/latest/dg/logging-with-s3.html). + +#### Example: Exporting Lambda Logs to S3 Bucket + +```terraform +locals { + lambda_function_name = "lambda-log-export-example" +} + +resource "aws_s3_bucket" "lambda_log_export" { + bucket = "${local.lambda_function_name}-bucket" +} + +resource "aws_cloudwatch_log_group" "export" { + name = "/aws/lambda/${local.lambda_function_name}" + log_group_class = "DELIVERY" +} + +data "aws_iam_policy_document" "logs_assume_role" { + statement { + actions = ["sts:AssumeRole"] + effect = "Allow" + principals { + type = "Service" + identifiers = ["logs.amazonaws.com"] + } + } +} + +resource "aws_iam_role" "logs_log_export" { + name = "${local.lambda_function_name}-lambda-log-export-role" + assume_role_policy = data.aws_iam_policy_document.logs_assume_role.json +} + +data "aws_iam_policy_document" "lambda_log_export" { + statement { + actions = [ + "s3:PutObject", + ] + effect = "Allow" + resources = [ + "${aws_s3_bucket.lambda_log_export.arn}/*" + ] + } +} + +resource "aws_iam_role_policy" "lambda_log_export" { + policy = data.aws_iam_policy_document.lambda_log_export.json + role = aws_iam_role.logs_log_export.name +} + +resource "aws_cloudwatch_log_subscription_filter" "lambda_log_export" { + name = "${local.lambda_function_name}-filter" + log_group_name = aws_cloudwatch_log_group.export.name + filter_pattern = "" + destination_arn = aws_s3_bucket.lambda_log_export.arn + role_arn = aws_iam_role.logs_log_export.arn +} + +resource "aws_lambda_function" "log_export" { + function_name = local.lambda_function_name + handler = "index.lambda_handler" + runtime = "python3.13" + role = aws_iam_role.example.arn + filename = "function.zip" + logging_config { + log_format = "Text" + log_group = aws_cloudwatch_log_group.export.name + } + depends_on = [ + aws_cloudwatch_log_group.export + ] +} +``` + ### Function with Error Handling ```terraform @@ -426,6 +521,7 @@ The following arguments are optional: * `skip_destroy` - (Optional) Whether to retain the old version of a previously deployed Lambda Layer. Default is `false`. * `snap_start` - (Optional) Configuration block for snap start settings. [See below](#snap_start-configuration-block). * `source_code_hash` - (Optional) Base64-encoded SHA256 hash of the package file. Used to trigger updates when source code changes. +* `source_kms_key_arn` - (Optional) ARN of the AWS Key Management Service key used to encrypt the function's `.zip` deployment package. Conflicts with `image_uri`. * `tags` - (Optional) Key-value map of tags for the Lambda function. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to 3. Valid between 1 and 900. * `tracing_config` - (Optional) Configuration block for X-Ray tracing. [See below](#tracing_config-configuration-block). @@ -471,6 +567,8 @@ The following arguments are optional: ### vpc_config Configuration Block +~> **NOTE:** If `subnet_ids`, `security_group_ids` and `ipv6_allowed_for_dual_stack` are empty then `vpc_config` is considered to be empty or unset. + * `ipv6_allowed_for_dual_stack` - (Optional) Whether to allow outbound IPv6 traffic on VPC functions connected to dual-stack subnets. Default: `false`. * `security_group_ids` - (Required) List of security group IDs associated with the Lambda function. * `subnet_ids` - (Required) List of subnet IDs associated with the Lambda function. @@ -503,6 +601,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lambda_function.example + identity = { + function_name = "example" + } +} + +resource "aws_lambda_function" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `function_name` (String) Name of the Lambda function. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda Functions using the `function_name`. For example: ```terraform diff --git a/website/docs/r/lambda_permission.html.markdown b/website/docs/r/lambda_permission.html.markdown index fb7840db8095..e6c40308fcf2 100644 --- a/website/docs/r/lambda_permission.html.markdown +++ b/website/docs/r/lambda_permission.html.markdown @@ -215,7 +215,7 @@ resource "aws_lambda_permission" "logging" { The following arguments are required: * `action` - (Required) Lambda action to allow in this statement (e.g., `lambda:InvokeFunction`) -* `function_name` - (Required) Name of the Lambda function +* `function_name` - (Required) Name or ARN of the Lambda function * `principal` - (Required) AWS service or account that invokes the function (e.g., `s3.amazonaws.com`, `sns.amazonaws.com`, AWS account ID, or AWS IAM principal) The following arguments are optional: @@ -236,11 +236,40 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lambda_permission.example + identity = { + function_name = "my_test_lambda_function" + statement_id = "AllowExecutionFromCloudWatch" + } +} + +resource "aws_lambda_permission" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `function_name` (String) Lambda function name. +* `statement_id` (String) Statement ID for the permission. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `qualifier` (String) Qualifier for the function version or alias. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Lambda permission statements using function_name/statement_id with an optional qualifier. For example: ```terraform import { - to = aws_lambda_permission.test_lambda_permission + to = aws_lambda_permission.example id = "my_test_lambda_function/AllowExecutionFromCloudWatch" } ``` @@ -249,7 +278,7 @@ Using `qualifier`: ```terraform import { - to = aws_lambda_permission.test_lambda_permission + to = aws_lambda_permission.example id = "my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch" } ``` @@ -258,5 +287,5 @@ For backwards compatibility, the following legacy `terraform import` commands ar ```console % terraform import aws_lambda_permission.example my_test_lambda_function/AllowExecutionFromCloudWatch -% terraform import aws_lambda_permission.test_lambda_permission my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch +% terraform import aws_lambda_permission.example my_test_lambda_function:qualifier_name/AllowExecutionFromCloudWatch ``` diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index e904ec8a1234..0f9379969907 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -173,7 +173,7 @@ The `ebs` block supports the following: Cannot be used with `snapshot_id`. * `iops` - (Optional) The amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). This must be set with a `volume_type` of `"io1/io2/gp3"`. -* `kms_key_id` - (Optional) The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. +* `kms_key_id` - (Optional) Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed KMS key to use for EBS encryption. `encrypted` must be set to `true` when this is set. * `snapshot_id` - (Optional) The Snapshot ID to mount. * `throughput` - (Optional) The throughput to provision for a `gp3` volume in MiB/s (specified as an integer, e.g., 500), with a maximum of 1,000 MiB/s. @@ -186,7 +186,7 @@ The `ebs` block supports the following: The `capacity_reservation_specification` block supports the following: -* `capacity_reservation_preference` - Indicates the instance's Capacity Reservation preferences. Can be `open` or `none`. (Default `none`). +* `capacity_reservation_preference` - Indicates the instance's Capacity Reservation preferences. Can be `capacity-reservations-only`, `open` or `none`. If `capacity_reservation_id` or `capacity_reservation_resource_group_arn` is specified in `capacity_reservation_target` block, either omit `capacity_reservation_preference` or set it to `capacity-reservations-only`. * `capacity_reservation_target` - Used to target a specific Capacity Reservation: The `capacity_reservation_target` block supports the following: @@ -461,7 +461,8 @@ The `placement` block supports the following: * `affinity` - (Optional) The affinity setting for an instance on a Dedicated Host. * `availability_zone` - (Optional) The Availability Zone for the instance. -* `group_name` - (Optional) The name of the placement group for the instance. +* `group_id` - (Optional) The ID of the placement group for the instance. Conflicts with `group_name`. +* `group_name` - (Optional) The name of the placement group for the instance. Conflicts with `group_id`. * `host_id` - (Optional) The ID of the Dedicated Host for the instance. * `host_resource_group_arn` - (Optional) The ARN of the Host Resource Group in which to launch instances. * `spread_domain` - (Optional) Reserved for future use. diff --git a/website/docs/r/lb.html.markdown b/website/docs/r/lb.html.markdown index 8d740d37bf72..973308e17517 100644 --- a/website/docs/r/lb.html.markdown +++ b/website/docs/r/lb.html.markdown @@ -123,6 +123,7 @@ This resource supports the following arguments: * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `security_groups` - (Optional) List of security group IDs to assign to the LB. Only valid for Load Balancers of type `application` or `network`. For load balancers of type `network` security groups cannot be added if none are currently present, and cannot all be removed once added. If either of these conditions are met, this will force a recreation of the resource. * `preserve_host_header` - (Optional) Whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. Defaults to `false`. +* `secondary_ips_auto_assigned_per_subnet` - (Optional) The number of secondary IP addresses to configure for your load balancer nodes. Only valid for Load Balancers of type `network`. The valid range is 0-7. When decreased, this will force a recreation of the resource. Default: `0`. * `subnet_mapping` - (Optional) Subnet mapping block. See below. For Load Balancers of type `network` subnet mappings can only be added. * `subnets` - (Optional) List of subnet IDs to attach to the LB. For Load Balancers of type `network` subnets can only be added (see [Availability Zones](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html#availability-zones)), deleting a subnet for load balancers of type `network` will force a recreation of the resource. * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -180,6 +181,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" + } +} + +resource "aws_lb" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import LBs using their ARN. For example: ```terraform diff --git a/website/docs/r/lb_listener.html.markdown b/website/docs/r/lb_listener.html.markdown index f163ab2cc78c..f17a72c4bc30 100644 --- a/website/docs/r/lb_listener.html.markdown +++ b/website/docs/r/lb_listener.html.markdown @@ -39,6 +39,45 @@ resource "aws_lb_listener" "front_end" { } ``` +With weighted target groups: + +```terraform +resource "aws_lb" "front_end" { + # ... +} + +resource "aws_lb_target_group" "front_end_blue" { + # ... +} + +resource "aws_lb_target_group" "front_end_green" { + # ... +} + +resource "aws_lb_listener" "front_end" { + load_balancer_arn = aws_lb.front_end.arn + port = "443" + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-2016-08" + certificate_arn = "arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4" + + default_action { + type = "forward" + + forward { + target_group { + arn = aws_lb_target_group.front_end_blue.arn + weight = 100 + } + target_group { + arn = aws_lb_target_group.front_end_green.arn + weight = 0 + } + } + } +} +``` + To a NLB: ```terraform @@ -429,6 +468,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_listener.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96" + } +} + +resource "aws_lb_listener" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer listener. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import listeners using their ARN. For example: ```terraform diff --git a/website/docs/r/lb_listener_rule.html.markdown b/website/docs/r/lb_listener_rule.html.markdown index ac9da4c71dc4..34d996f0eae0 100644 --- a/website/docs/r/lb_listener_rule.html.markdown +++ b/website/docs/r/lb_listener_rule.html.markdown @@ -341,6 +341,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_listener_rule.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/9683b2d02a6cabee" + } +} + +resource "aws_lb_listener_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the load balancer listener rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import rules using their ARN. For example: ```terraform diff --git a/website/docs/r/lb_target_group.html.markdown b/website/docs/r/lb_target_group.html.markdown index 2140b8023e38..c5eeaf71a207 100644 --- a/website/docs/r/lb_target_group.html.markdown +++ b/website/docs/r/lb_target_group.html.markdown @@ -233,6 +233,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_target_group.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067" + } +} + +resource "aws_lb_target_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the target group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Target Groups using their ARN. For example: ```terraform diff --git a/website/docs/r/lb_trust_store.html.markdown b/website/docs/r/lb_trust_store.html.markdown index 3db20580248f..642ab27a31d0 100644 --- a/website/docs/r/lb_trust_store.html.markdown +++ b/website/docs/r/lb_trust_store.html.markdown @@ -62,6 +62,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_lb_trust_store.example + identity = { + "arn" = "arn:aws:elasticloadbalancing:us-west-2:123456789012:truststore/my-trust-store/73e2d6bc24d8a067" + } +} + +resource "aws_lb_trust_store" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the trust store. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Trust Stores using their ARN. For example: ```terraform diff --git a/website/docs/r/macie2_member.html.markdown b/website/docs/r/macie2_member.html.markdown index f7f047eb0b09..58fb5ba80989 100644 --- a/website/docs/r/macie2_member.html.markdown +++ b/website/docs/r/macie2_member.html.markdown @@ -32,7 +32,6 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Required) The AWS account ID for the account. * `email` - (Required) The email address for the account. -* `tags` - (Optional) A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie. * `status` - (Optional) Specifies the status for the account. To enable Amazon Macie and start all Macie activities for the account, set this value to `ENABLED`. Valid values are `ENABLED` or `PAUSED`. * `invite` - (Optional) Send an invitation to a member * `invitation_message` - (Optional) A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation. diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index 3580ab60c116..35ad4c1ae792 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -204,16 +204,16 @@ resource "aws_msk_cluster" "example" { This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `broker_node_group_info` - (Required) Configuration block for the broker nodes of the Kafka cluster. +* `broker_node_group_info` - (Required) Configuration block for the broker nodes of the Kafka cluster. See [broker_node_group_info Argument Reference](#broker_node_group_info-argument-reference) below. * `cluster_name` - (Required) Name of the MSK cluster. * `kafka_version` - (Required) Specify the desired Kafka software version. * `number_of_broker_nodes` - (Required) The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. -* `client_authentication` - (Optional) Configuration block for specifying a client authentication. See below. -* `configuration_info` - (Optional) Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. -* `encryption_info` - (Optional) Configuration block for specifying encryption. See below. +* `client_authentication` - (Optional) Configuration block for specifying a client authentication. See [client_authentication Argument Reference](#client_authentication-argument-reference) below. +* `configuration_info` - (Optional) Configuration block for specifying an MSK Configuration to attach to Kafka brokers. See [configuration_info Argument Reference](#configuration_info-argument-reference) below. +* `encryption_info` - (Optional) Configuration block for specifying encryption. See [encryption_info Argument Reference](#encryption_info-argument-reference) below. * `enhanced_monitoring` - (Optional) Specify the desired enhanced MSK CloudWatch monitoring level. See [Monitoring Amazon MSK with Amazon CloudWatch](https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html) -* `open_monitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See below. -* `logging_info` - (Optional) Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See below. +* `open_monitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See [open_monitoring Argument Reference](#open_monitoring-argument-reference) below. +* `logging_info` - (Optional) Configuration block for streaming broker logs to Cloudwatch/S3/Kinesis Firehose. See [logging_info Argument Reference](#logging_info-argument-reference) below. * `storage_mode` - (Optional) Controls storage mode for supported storage tiers. Valid values are: `LOCAL` or `TIERED`. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -222,14 +222,14 @@ This resource supports the following arguments: * `client_subnets` - (Required) A list of subnets to connect to in client VPC ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-prop-brokernodegroupinfo-clientsubnets)). * `instance_type` - (Required) Specify the instance type to use for the kafka brokersE.g., kafka.m5.large. ([Pricing info](https://aws.amazon.com/msk/pricing/)) * `security_groups` - (Required) A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. -* `az_distribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently the only valid value is `DEFAULT`. -* `connectivity_info` - (Optional) Information about the cluster access configuration. See below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible ([documentation](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html)). -* `storage_info` - (Optional) A block that contains information about storage volumes attached to MSK broker nodes. See below. +* `az_distribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently, the only valid value is `DEFAULT`. +* `connectivity_info` - (Optional) Information about the cluster access configuration. See [broker_node_group_info connectivity_info Argument Reference](#broker_node_group_info-connectivity_info-argument-reference) below. For security reasons, you can't turn on public access while creating an MSK cluster. However, you can update an existing cluster to make it publicly accessible. You can also create a new cluster and then update it to make it publicly accessible ([documentation](https://docs.aws.amazon.com/msk/latest/developerguide/public-access.html)). +* `storage_info` - (Optional) A block that contains information about storage volumes attached to MSK broker nodes. See [broker_node_group_info storage_info Argument Reference](#broker_node_group_info-storage_info-argument-reference) below. ### broker_node_group_info connectivity_info Argument Reference -* `public_access` - (Optional) Access control settings for brokers. See below. -* `vpc_connectivity` - (Optional) VPC connectivity access control for brokers. See below. +* `public_access` - (Optional) Access control settings for brokers. See [connectivity_info public_access Argument Reference](#connectivity_info-public_access-argument-reference) below. +* `vpc_connectivity` - (Optional) VPC connectivity access control for brokers. See [connectivity_info vpc_connectivity Argument Reference](#connectivity_info-vpc_connectivity-argument-reference) below. ### connectivity_info public_access Argument Reference @@ -237,11 +237,11 @@ This resource supports the following arguments: ### connectivity_info vpc_connectivity Argument Reference -* `client_authentication` - (Optional) Includes all client authentication information for VPC connectivity. See below. +* `client_authentication` - (Optional) Includes all client authentication information for VPC connectivity. See [vpc_connectivity client_authentication Argument Reference](#vpc_connectivity-client_authentication-argument-reference) below. ### vpc_connectivity client_authentication Argument Reference -* `sasl` - (Optional) SASL authentication type details for VPC connectivity. See below. +* `sasl` - (Optional) SASL authentication type details for VPC connectivity. See [vpc_connectivity client_authentication sasl Argument Reference](#vpc_connectivity-client_authentication-sasl-argument-reference) below. * `tls` - (Optional) Enables TLS authentication for VPC connectivity. ### vpc_connectivity client_authentication sasl Argument Reference @@ -251,11 +251,11 @@ This resource supports the following arguments: ### broker_node_group_info storage_info Argument Reference -* `ebs_storage_info` - (Optional) A block that contains EBS volume information. See below. +* `ebs_storage_info` - (Optional) A block that contains EBS volume information. See [storage_info ebs_storage_info Argument Reference](#storage_info-ebs_storage_info-argument-reference) below. ### storage_info ebs_storage_info Argument Reference -* `provisioned_throughput` - (Optional) A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See below. +* `provisioned_throughput` - (Optional) A block that contains EBS volume provisioned throughput information. To provision storage throughput, you must choose broker type kafka.m5.4xlarge or larger. See [ebs_storage_info provisioned_throughput Argument Reference](#ebs_storage_info-provisioned_throughput-argument-reference) below. * `volume_size` - (Optional) The size in GiB of the EBS volume for the data drive on each broker node. Minimum value of `1` and maximum value of `16384`. ### ebs_storage_info provisioned_throughput Argument Reference @@ -265,8 +265,8 @@ This resource supports the following arguments: ### client_authentication Argument Reference -* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See below. -* `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. +* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See [client_authentication sasl Argument Reference](#client_authentication-sasl-argument-reference) below. +* `tls` - (Optional) Configuration block for specifying TLS client authentication. See [client_authentication tls Argument Reference](#client_authentication-tls-argument-reference) below. * `unauthenticated` - (Optional) Enables unauthenticated access. #### client_authentication sasl Argument Reference @@ -285,7 +285,7 @@ This resource supports the following arguments: ### encryption_info Argument Reference -* `encryption_in_transit` - (Optional) Configuration block to specify encryption in transit. See below. +* `encryption_in_transit` - (Optional) Configuration block to specify encryption in transit. See [encryption_info encryption_in_transit Argument Reference](#encryption_info-encryption_in_transit-argument-reference) below. * `encryption_at_rest_kms_key_arn` - (Optional) You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest. #### encryption_info encryption_in_transit Argument Reference @@ -295,12 +295,12 @@ This resource supports the following arguments: #### open_monitoring Argument Reference -* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See below. +* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See [open_monitoring prometheus Argument Reference](#open_monitoring-prometheus-argument-reference) below. #### open_monitoring prometheus Argument Reference -* `jmx_exporter` - (Optional) Configuration block for JMX Exporter. See below. -* `node_exporter` - (Optional) Configuration block for Node Exporter. See below. +* `jmx_exporter` - (Optional) Configuration block for JMX Exporter. See [open_monitoring prometheus jmx_exporter Argument Reference](#open_monitoring-prometheus-jmx_exporter-argument-reference) below. +* `node_exporter` - (Optional) Configuration block for Node Exporter. See [open_monitoring prometheus node_exporter Argument Reference](#open_monitoring-prometheus-node_exporter-argument-reference) below. #### open_monitoring prometheus jmx_exporter Argument Reference @@ -312,7 +312,13 @@ This resource supports the following arguments: #### logging_info Argument Reference -* `broker_logs` - (Required) Configuration block for Broker Logs settings for logging info. See below. +* `broker_logs` - (Required) Configuration block for Broker Logs settings for logging info. See [logging_info broker_logs Argument Reference](#logging_info-broker_logs-argument-reference) below. + +#### logging_info broker_logs Argument Reference + +* `cloudwatch_logs` - (Optional) Configuration block for Cloudwatch Logs settings. See [logging_info broker_logs cloudwatch_logs Argument Reference](#logging_info-broker_logs-cloudwatch_logs-argument-reference) below. +* `firehose` - (Optional) Configuration block for Kinesis Data Firehose settings. See [logging_info broker_logs firehose Argument Reference](#logging_info-broker_logs-firehose-argument-reference) below. +* `s3` - (Optional) Configuration block for S3 settings. See [logging_info broker_logs s3 Argument Reference](#logging_info-broker_logs-s3-argument-reference) below. #### logging_info broker_logs cloudwatch_logs Argument Reference diff --git a/website/docs/r/mwaa_environment.html.markdown b/website/docs/r/mwaa_environment.html.markdown index fdbd254f9b6d..8c5e7ae1106d 100644 --- a/website/docs/r/mwaa_environment.html.markdown +++ b/website/docs/r/mwaa_environment.html.markdown @@ -126,7 +126,6 @@ resource "aws_mwaa_environment" "example" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `airflow_configuration_options` - (Optional) The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options. * `airflow_version` - (Optional) Airflow version of your environment, will be set by default to the latest version that MWAA supports. * `dag_s3_path` - (Required) The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). @@ -143,15 +142,17 @@ This resource supports the following arguments: * `network_configuration` - (Required) Specifies the network configuration for your Apache Airflow Environment. This includes two private subnets as well as security groups for the Airflow environment. Each subnet requires internet connection, otherwise the deployment will fail. See [`network_configuration` Block](#network_configuration-block) for details. * `plugins_s3_object_version` - (Optional) The plugins.zip file version you want to use. * `plugins_s3_path` - (Optional) The relative path to the plugins.zip file on your Amazon S3 storage bucket. For example, plugins.zip. If a relative path is provided in the request, then plugins_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `requirements_s3_object_version` - (Optional) The requirements.txt file version you want to use. * `requirements_s3_path` - (Optional) The relative path to the requirements.txt file on your Amazon S3 storage bucket. For example, requirements.txt. If a relative path is provided in the request, then requirements_s3_object_version is required. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). * `schedulers` - (Optional) The number of schedulers that you want to run in your environment. v2.0.2 and above accepts `2` - `5`, default `2`. v1.10.12 accepts `1`. * `source_bucket_arn` - (Required) The Amazon Resource Name (ARN) of your Amazon S3 storage bucket. For example, arn:aws:s3:::airflow-mybucketname. * `startup_script_s3_object_version` - (Optional) The version of the startup shell script you want to use. You must specify the version ID that Amazon S3 assigns to the file every time you update the script. * `startup_script_s3_path` - (Optional) The relative path to the script hosted in your bucket. The script runs as your environment starts before starting the Apache Airflow process. Use this script to install dependencies, modify configuration options, and set environment variables. See [Using a startup script](https://docs.aws.amazon.com/mwaa/latest/userguide/using-startup-script.html). Supported for environment versions 2.x and later. +* `tags` - (Optional) A map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `webserver_access_mode` - (Optional) Specifies whether the webserver should be accessible over the internet or via your specified VPC. Possible options: `PRIVATE_ONLY` (default) and `PUBLIC_ONLY`. * `weekly_maintenance_window_start` - (Optional) Specifies the start date for the weekly maintenance window. -* `tags` - (Optional) A map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `worker_replacement_strategy` - (Optional) Worker replacement strategy. Valid values: `FORCED`, `GRACEFUL`. ### `logging_configuration` Block diff --git a/website/docs/r/nat_gateway.html.markdown b/website/docs/r/nat_gateway.html.markdown index c98b5ec09ed2..c107209442b1 100644 --- a/website/docs/r/nat_gateway.html.markdown +++ b/website/docs/r/nat_gateway.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a resource to create a VPC NAT Gateway. +!> **WARNING:** You should not use the `aws_nat_gateway` resource that has `secondary_allocation_ids` in conjunction with an [`aws_nat_gateway_eip_association`](nat_gateway_eip_association.html) resource. Doing so may cause perpetual differences, and result in associations being overwritten. + ## Example Usage ### Public NAT @@ -63,14 +65,14 @@ resource "aws_nat_gateway" "example" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `allocation_id` - (Optional) The Allocation ID of the Elastic IP address for the NAT Gateway. Required for `connectivity_type` of `public`. * `connectivity_type` - (Optional) Connectivity type for the NAT Gateway. Valid values are `private` and `public`. Defaults to `public`. * `private_ip` - (Optional) The private IPv4 address to assign to the NAT Gateway. If you don't provide an address, a private IPv4 address will be automatically assigned. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `subnet_id` - (Required) The Subnet ID of the subnet in which to place the NAT Gateway. -* `secondary_allocation_ids` - (Optional) A list of secondary allocation EIP IDs for this NAT Gateway. +* `secondary_allocation_ids` - (Optional) A list of secondary allocation EIP IDs for this NAT Gateway. To remove all secondary allocations an empty list should be specified. * `secondary_private_ip_address_count` - (Optional) [Private NAT Gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT Gateway. -* `secondary_private_ip_addresses` - (Optional) A list of secondary private IPv4 addresses to assign to the NAT Gateway. +* `secondary_private_ip_addresses` - (Optional) A list of secondary private IPv4 addresses to assign to the NAT Gateway. To remove all secondary private addresses an empty list should be specified. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference diff --git a/website/docs/r/nat_gateway_eip_association.html.markdown b/website/docs/r/nat_gateway_eip_association.html.markdown new file mode 100644 index 000000000000..f7a7c6007bf0 --- /dev/null +++ b/website/docs/r/nat_gateway_eip_association.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "VPC (Virtual Private Cloud)" +layout: "aws" +page_title: "AWS: aws_nat_gateway_eip_association" +description: |- + Terraform resource for managing an AWS VPC NAT Gateway EIP Association. +--- +# Resource: aws_nat_gateway_eip_association + +Terraform resource for managing an AWS VPC NAT Gateway EIP Association. + +!> **WARNING:** You should not use the `aws_nat_gateway_eip_association` resource in conjunction with an [`aws_nat_gateway`](aws_nat_gateway.html) resource that has `secondary_allocation_ids` configured. Doing so may cause perpetual differences, and result in associations being overwritten. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_nat_gateway_eip_association" "example" { + allocation_id = aws_eip.example.id + nat_gateway_id = aws_nat_gateway.example.id +} +``` + +## Argument Reference + +The following arguments are required: + +* `allocation_id` - (Required) The ID of the Elastic IP Allocation to associate with the NAT Gateway. +* `nat_gateway_id` - (Required) The ID of the NAT Gateway to associate the Elastic IP Allocation to. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import VPC NAT Gateway EIP Association using the `nat_gateway_id,allocation_id`. For example: + +```terraform +import { + to = aws_nat_gateway_eip_association.example + id = "nat-1234567890abcdef1,eipalloc-1234567890abcdef1" +} +``` + +Using `terraform import`, import VPC NAT Gateway EIP Association using the `nat_gateway_id,allocation_id`. For example: + +```console +% terraform import aws_nat_gateway_eip_association.example nat-1234567890abcdef1,eipalloc-1234567890abcdef1 +``` diff --git a/website/docs/r/network_interface.html.markdown b/website/docs/r/network_interface.html.markdown index f783129e37bf..a07837aaffc9 100644 --- a/website/docs/r/network_interface.html.markdown +++ b/website/docs/r/network_interface.html.markdown @@ -77,6 +77,7 @@ The `attachment` block supports the following: * `instance` - (Required) ID of the instance to attach to. * `device_index` - (Required) Integer to define the devices index. +* `network_card_index` - (Optional) Index of the network card. Specify a value greater than 0 when using multiple network cards, which are supported by [some instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards). The default is 0. ## Attribute Reference diff --git a/website/docs/r/network_interface_attachment.html.markdown b/website/docs/r/network_interface_attachment.html.markdown index ef3750b6ee08..649d43179ea0 100644 --- a/website/docs/r/network_interface_attachment.html.markdown +++ b/website/docs/r/network_interface_attachment.html.markdown @@ -28,6 +28,7 @@ This resource supports the following arguments: * `instance_id` - (Required) Instance ID to attach. * `network_interface_id` - (Required) ENI ID to attach. * `device_index` - (Required) Network interface index (int). +* `network_card_index` - (Optional) Index of the network card. Specify a value greater than 0 when using multiple network cards, which are supported by [some instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#network-cards). The default is 0. ## Attribute Reference diff --git a/website/docs/r/networkfirewall_firewall.html.markdown b/website/docs/r/networkfirewall_firewall.html.markdown index 61031e84908c..756969b76bd2 100644 --- a/website/docs/r/networkfirewall_firewall.html.markdown +++ b/website/docs/r/networkfirewall_firewall.html.markdown @@ -35,11 +35,39 @@ resource "aws_networkfirewall_firewall" "example" { } ``` +### Transit Gateway Attached Firewall + +```terraform +data "aws_availability_zones" "example" { + state = "available" +} + +resource "aws_networkfirewall_firewall" "example" { + name = "example" + firewall_policy_arn = aws_networkfirewall_firewall_policy.example.arn + transit_gateway_id = aws_ec2_transit_gateway.example.id + + availability_zone_mapping { + availability_zone_id = data.aws_availability_zones.example.zone_ids[0] + } + + availability_zone_mapping { + availability_zone_id = data.aws_availability_zones.example.zone_ids[1] + } +} +``` + +### Transit Gateway Attached Firewall (Cross Account) + +A full example of how to create a Transit Gateway in one AWS account, share it with a second AWS account, and create Network Firewall in the second account to the Transit Gateway via the `aws_networkfirewall_firewall` and [`aws_networkfirewall_network_firewall_transit_gateway_attachment_accepter`](/docs/providers/aws/r/networkfirewall_network_firewall_transit_gateway_attachment_accepter.html) resources can be found in [the `./examples/network-firewall-cross-account-transit-gateway` directory within the Github Repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/network-firewall-cross-account-transit-gateway) + ## Argument Reference This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `availability_zone_change_protection` - (Optional) A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to `true`, you must first disable this protection before adding or removing Availability Zones. +* `availability_zone_mapping` - (Optional) Required when creating a transit gateway-attached firewall. Set of configuration blocks describing the avaiability availability where you want to create firewall endpoints for a transit gateway-attached firewall. * `delete_protection` - (Optional) A flag indicating whether the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. Defaults to `false`. * `description` - (Optional) A friendly description of the firewall. * `enabled_analysis_types` - (Optional) Set of types for which to collect analysis metrics. See [Reporting on network traffic in Network Firewall](https://docs.aws.amazon.com/network-firewall/latest/developerguide/reporting.html) for details on how to use the data. Valid values: `TLS_SNI`, `HTTP_HOST`. Defaults to `[]`. @@ -48,9 +76,16 @@ This resource supports the following arguments: * `firewall_policy_change_protection` - (Optional) A flag indicating whether the firewall is protected against a change to the firewall policy association. Use this setting to protect against accidentally modifying the firewall policy for a firewall that is in use. Defaults to `false`. * `name` - (Required, Forces new resource) A friendly name of the firewall. * `subnet_change_protection` - (Optional) A flag indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. Defaults to `false`. -* `subnet_mapping` - (Required) Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See [Subnet Mapping](#subnet-mapping) below for details. +* `subnet_mapping` - (Optional) Required when creating a VPC attached firewall. Set of configuration blocks describing the public subnets. Each subnet must belong to a different Availability Zone in the VPC. AWS Network Firewall creates a firewall endpoint in each subnet. See [Subnet Mapping](#subnet-mapping) below for details. * `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `vpc_id` - (Required, Forces new resource) The unique identifier of the VPC where AWS Network Firewall should create the firewall. +* `transit_gateway_id` - (Optional, Forces new resource). Required when creating a transit gateway-attached firewall. The unique identifier of the transit gateway to attach to this firewall. You can provide either a transit gateway from your account or one that has been shared with you through AWS Resource Access Manager +* `vpc_id` - (Optional, Forces new resource) Required when creating a VPC attached firewall. The unique identifier of the VPC where AWS Network Firewall should create the firewall. + +### Availability Zone Mapping + +The `availability_zone_mapping` block supports the following arguments: + +* `availability_zone_id` - (Required)The ID of the Availability Zone where the firewall endpoint is located.. ### Encryption Configuration @@ -78,16 +113,19 @@ This resource exports the following attributes in addition to the arguments abov * `endpoint_id` - The identifier of the firewall endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. * `subnet_id` - The unique identifier of the subnet that you've specified to be used for a firewall endpoint. * `availability_zone` - The Availability Zone where the subnet is configured. + * `transit_gateway_attachment_sync_states` - Set of transit gateway configured for use by the firewall. + * `attachment_id` - The unique identifier of the transit gateway attachment. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `transit_gateway_owner_account_id` - The AWS account ID that owns the transit gateway. * `update_token` - A string token used when updating a firewall. ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): -- `create` - (Default `30m`) -- `update` - (Default `30m`) -- `delete` - (Default `30m`) +- `create` - (Default `60m`) +- `update` - (Default `60m`) +- `delete` - (Default `60m`) ## Import diff --git a/website/docs/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown b/website/docs/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown new file mode 100644 index 000000000000..da51ea778fcb --- /dev/null +++ b/website/docs/r/networkfirewall_firewall_transit_gateway_attachment_accepter.html.markdown @@ -0,0 +1,63 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_firewall_transit_gateway_attachment_accepter" +description: |- + Manages an AWS Network Firewall Firewall Transit Gateway Attachment Accepter. +--- + +# Resource: aws_networkfirewall_firewall_transit_gateway_attachment_accepter + +Manages an AWS Network Firewall Firewall Transit Gateway Attachment Accepter. + +When a cross-account (requester's AWS account differs from the accepter's AWS account) requester creates a Network Firewall with Transit Gateway ID using `aws_networkfirewall_firewall`. Then an EC2 Transit Gateway VPC Attachment resource is automatically created in the accepter's account. +The accepter can use the `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resource to "adopt" its side of the connection into management. + +~> **NOTE:** If the `transit_gateway_id` argument in the `aws_networkfirewall_firewall` resource is used to attach a firewall to a transit gateway in a cross-account setup (where **Auto accept shared attachments** is disabled), the resource will be considered created when the transit gateway attachment is in the *Pending Acceptance* state and the firewall is in the *Provisioning* status. At this point, you can use the `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resource to finalize the network firewall deployment. Once the transit gateway attachment reaches the *Available* state, the firewall status *Ready*. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_networkfirewall_firewall_transit_gateway_attachment_accepter" "example" { + transit_gateway_attachment_id = aws_networkfirewall_firewall.example.firewall_status[0].transit_gateway_attachment_sync_state[0].attachment_id +} +``` + +A full example of how to create a Transit Gateway in one AWS account, share it with a second AWS account, and create Network Firewall in the second account to the Transit Gateway via the `aws_networkfirewall_firewall` and `aws_networkfirewall_firewall_transit_gateway_attachment_accepter` resources can be found in [the `./examples/network-firewall-cross-account-transit-gateway` directory within the Github Repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/network-firewall-cross-account-transit-gateway) + +## Argument Reference + +This resource supports the following arguments: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `transit_gateway_attachment_id` - (Required) The unique identifier of the transit gateway attachment to accept. This ID is returned in the response when creating a transit gateway-attached firewall. + +## Attribute Reference + +This resource exports no additional attributes. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall Firewall Transit Gateway Attachment Accepter using the `transit_gateway_attachment_id`. For example: + +```terraform +import { + to = aws_networkfirewall_firewall_transit_gateway_attachment_accepter.example + id = "tgw-attach-0c3b7e9570eee089c" +} +``` + +Using `terraform import`, import Network Firewall Firewall Transit Gateway Attachment Accepter using the `transit_gateway_attachment_id`. For example: + +```console +% terraform import aws_networkfirewall_firewall_transit_gateway_attachment_accepter.example tgw-attach-0c3b7e9570eee089c +``` diff --git a/website/docs/r/networkfirewall_logging_configuration.html.markdown b/website/docs/r/networkfirewall_logging_configuration.html.markdown index 49d38bc794fb..7e15b5028393 100644 --- a/website/docs/r/networkfirewall_logging_configuration.html.markdown +++ b/website/docs/r/networkfirewall_logging_configuration.html.markdown @@ -70,6 +70,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `firewall_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Network Firewall firewall. +* `enable_monitoring_dashboard` - (Optional) Whether to enable the detailed firewall monitoring dashboard on the firewall. Defaults to `false`. * `logging_configuration` - (Required) A configuration block describing how AWS Network Firewall performs logging for a firewall. See [Logging Configuration](#logging-configuration) below for details. ### Logging Configuration diff --git a/website/docs/r/networkfirewall_rule_group.html.markdown b/website/docs/r/networkfirewall_rule_group.html.markdown index 53e4b90ea974..449789fc6b47 100644 --- a/website/docs/r/networkfirewall_rule_group.html.markdown +++ b/website/docs/r/networkfirewall_rule_group.html.markdown @@ -519,7 +519,7 @@ The `dimension` block supports the following argument: The `destination` block supports the following argument: -* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4 and IPv6. ### Destination Port @@ -533,7 +533,7 @@ The `destination_port` block supports the following arguments: The `source` block supports the following argument: -* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4 and IPv6. ### Source Port diff --git a/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown b/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown index 9ada5afe5a14..a0122d39ab44 100644 --- a/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown +++ b/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown @@ -339,6 +339,27 @@ The `certificates` block exports the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_networkfirewall_tls_inspection_configuration.example + identity = { + "arn" = "arn:aws:network-firewall:us-west-2:123456789012:tls-configuration/example" + } +} + +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Network Firewall TLS inspection configuration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall TLS Inspection Configuration using the `arn`. For example: ```terraform diff --git a/website/docs/r/networkfirewall_vpc_endpoint_association.html.markdown b/website/docs/r/networkfirewall_vpc_endpoint_association.html.markdown new file mode 100644 index 000000000000..8ee056c71140 --- /dev/null +++ b/website/docs/r/networkfirewall_vpc_endpoint_association.html.markdown @@ -0,0 +1,92 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_vpc_endpoint_association" +description: |- + Manages a firewall endpoint for an AWS Network Firewall firewall. +--- + +# Resource: aws_networkfirewall_vpc_endpoint_association + +Manages a firewall endpoint for an AWS Network Firewall firewall. + +Use `aws_networkfirewall_vpc_endpoint_association` to establish new firewall endpoints in any Availability Zone where the firewall is already being used. The first use of a firewall in an Availability Zone must be defined by `aws_networkfirewall_firewall` resource and `subnet_mapping` argument. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_networkfirewall_vpc_endpoint_association" "example" { + firewall_arn = aws_networkfirewall_firewall.example.arn + vpc_id = aws_vpc.example.id + + subnet_mapping { + subnet_id = aws_subnet.example.id + } + + subnet_mapping { + subnet_id = aws_subnet.example_two.id + } + + tags = { + Name = "example endpoint" + } +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `description` (Optional) - A description of the VPC endpoint association. +* `firewall_arn` (Required) - The Amazon Resource Name (ARN) that identifies the firewall. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `subnet_mapping` (Required) - The ID for a subnet that's used in an association with a firewall. See [Subnet Mapping](#subnet-mapping) below for details. +* `tags` - (Optional) Map of resource tags to associate with the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `vpc_id` (Required) - The unique identifier of the VPC for the endpoint association. + +### Subnet Mapping + +The `subnet_mapping` block supports the following arguments: + +* `ip_address_type` - (Optional) The subnet's IP address type. Valid values: `"DUALSTACK"`, `"IPV4"`. +* `subnet_id` - (Required) The unique identifier for the subnet. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `vpc_endpoint_association_arn` - ARN of the VPC Endpoint Association. +* `vpc_endpoint_association_id` - The unique identifier of the VPC endpoint association. +* `vpc_endpoint_association_status` - Nested list of information about the current status of the VPC Endpoint Association. + * `association_sync_states` - Set of subnets configured for use by the VPC Endpoint Association. + * `attachment` - Nested list describing the attachment status of the firewall's VPC Endpoint Association with a single VPC subnet. + * `endpoint_id` - The identifier of the VPC endpoint that AWS Network Firewall has instantiated in the subnet. You use this to identify the firewall endpoint in the VPC route tables, when you redirect the VPC traffic through the endpoint. + * `subnet_id` - The unique identifier of the subnet that you've specified to be used for a VPC Endpoint Association endpoint. + * `availability_zone` - The Availability Zone where the subnet is configured. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall VPC Endpoint Association using the `vpc_endpoint_association_arn`. For example: + +```terraform +import { + to = aws_networkfirewall_vpc_endpoint_association.example + id = "arn:aws:network-firewall:us-west-1:123456789012:vpc-endpoint-association/example" +} +``` + +Using `terraform import`, import Network Firewall VPC Endpoint Association using the `vpc_endpoint_association_arn`. For example: + +```console +% terraform import aws_networkfirewall_vpc_endpoint_association.example arn:aws:network-firewall:us-west-1:123456789012:vpc-endpoint-association/example +``` diff --git a/website/docs/r/networkmanager_transit_gateway_peering.html.markdown b/website/docs/r/networkmanager_transit_gateway_peering.html.markdown index 455407514f67..7f65eda9818d 100644 --- a/website/docs/r/networkmanager_transit_gateway_peering.html.markdown +++ b/website/docs/r/networkmanager_transit_gateway_peering.html.markdown @@ -16,6 +16,11 @@ Manages a Network Manager transit gateway peering connection. Creates a peering resource "aws_networkmanager_transit_gateway_peering" "example" { core_network_id = awscc_networkmanager_core_network.example.id transit_gateway_arn = aws_ec2_transit_gateway.example.arn + + depends_on = [ + aws_ec2_transit_gateway_policy_table.example, + aws_networkmanager_core_network_policy_attachment.example, + ] } ``` diff --git a/website/docs/r/networkmanager_vpc_attachment.html.markdown b/website/docs/r/networkmanager_vpc_attachment.html.markdown index 9ba65fb0428d..5dec7a754e1a 100644 --- a/website/docs/r/networkmanager_vpc_attachment.html.markdown +++ b/website/docs/r/networkmanager_vpc_attachment.html.markdown @@ -22,6 +22,23 @@ resource "aws_networkmanager_vpc_attachment" "example" { } ``` +### Usage with Options + +```terraform +resource "aws_networkmanager_vpc_attachment" "example" { + subnet_arns = [aws_subnet.example.arn] + core_network_id = awscc_networkmanager_core_network.example.id + vpc_arn = aws_vpc.example.arn + + options { + appliance_mode_support = false + dns_support = true + ipv6_support = false + security_group_referencing_support = true + } +} +``` + ## Argument Reference The following arguments are required: @@ -38,7 +55,9 @@ The following arguments are optional: ### options * `appliance_mode_support` - (Optional) Whether to enable appliance mode support. If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `dns_support` - (Optional) Whether to enable DNS support. If the VPC attachment is pending acceptance, changing this value will recreate the resource. * `ipv6_support` - (Optional) Whether to enable IPv6 support. If the VPC attachment is pending acceptance, changing this value will recreate the resource. +* `security_group_referencing_support` - (Optional) Whether to enable security group referencing support for this VPC attachment. The default is `true`. However, at the core network policy-level the default is set to `false`. If the VPC attachment is pending acceptance, changing this value will recreate the resource. ## Attribute Reference diff --git a/website/docs/r/odb_cloud_autonomous_vm_cluster.html.markdown b/website/docs/r/odb_cloud_autonomous_vm_cluster.html.markdown new file mode 100644 index 000000000000..53cc0ca3110d --- /dev/null +++ b/website/docs/r/odb_cloud_autonomous_vm_cluster.html.markdown @@ -0,0 +1,169 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_autonomous_vm_cluster" +page_title: "AWS: aws_odb_cloud_autonomous_vm_cluster" +description: |- + Terraform resource managing cloud autonomous vm cluster in AWS for Oracle Database@AWS. +--- + +# Resource: aws_odb_cloud_autonomous_vm_cluster + +Terraform resource managing cloud autonomous vm cluster in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_odb_cloud_autonomous_vm_cluster" "avmc_with_minimum_parameters" { + cloud_exadata_infrastructure_id = "" + odb_network_id = "" + display_name = "my_autonomous_vm_cluster" + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + # ids of db server. refer your exa infra. This is a manadatory fileld. Refer your cloud exadata infrastructure for db server id + db_servers = [""] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + preference = "NO_PREFERENCE" + } + +} + + +resource "aws_odb_cloud_autonomous_vm_cluster" "avmc_with_all_params" { + description = "my first avmc" + time_zone = "UTC" + cloud_exadata_infrastructure_id = "" + odb_network_id = "" + display_name = "my_autonomous_vm_cluster" + autonomous_data_storage_size_in_tbs = 5 + memory_per_oracle_compute_unit_in_gbs = 2 + total_container_databases = 1 + cpu_core_count_per_node = 40 + license_model = "LICENSE_INCLUDED" + db_servers = ["", ""] + scan_listener_port_tls = 8561 + scan_listener_port_non_tls = 1024 + maintenance_window { + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [4, 16] + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } + tags = { + "env" = "dev" + } + +} + +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) Exadata infrastructure id. Changing this will force terraform to create new resource. +* `autonomous_data_storage_size_in_tbs` - (Required) The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB. Changing this will force terraform to create new resource. +* `cpu_core_count_per_node` - (Required) The number of CPU cores enabled per node in the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `db_servers` - (Required) The database servers in the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `display_name` - (Required) The display name of the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `memory_per_oracle_compute_unit_in_gbs` - (Required) The amount of memory allocated per Oracle Compute Unit, in GB. Changing this will force terraform to create new resource. +* `odb_network_id` - (Required) The unique identifier of the ODB network associated with this Autonomous VM Cluster. Changing this will force terraform to create new resource. +* `scan_listener_port_non_tls` - (Required) The SCAN listener port for non-TLS (TCP) protocol. The default is 1521. Changing this will force terraform to create new resource. +* `scan_listener_port_tls` - (Required) The SCAN listener port for TLS (TCP) protocol. The default is 2484. Changing this will force terraform to create new resource. +* `total_container_databases` - (Required) The total number of Autonomous Container Databases that can be created with the allocated local storage. Changing this will force terraform to create new resource. +* `maintenance_window` - (Required) The maintenance window of the Autonomous VM cluster. Changing this will force terraform to create new resource. + +The following arguments are optional: + +* `description` - (Optional) The description of the Autonomous VM cluster. +* `is_mtls_enabled_vm_cluster` - (Optional) Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `license_model` - (Optional) The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. Changing this will force terraform to create new resource. +* `time_zone` - (Optional) The time zone of the Autonomous VM cluster. Changing this will force terraform to create new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### maintenance_window + +* `preference` - (Required) The preference for the maintenance window scheduling. Changing this will force terraform to create new resource. +* `days_of_week` - (Optional) The days of the week when maintenance can be performed. Changing this will force terraform to create new resource. +* `hours_of_day` - (Optional) The hours of the day when maintenance can be performed. Changing this will force terraform to create new resource. +* `lead_time_in_weeks` - (Optional) The lead time in weeks before the maintenance window. Changing this will force terraform to create new resource. +* `months` - (Optional) The months when maintenance can be performed. Changing this will force terraform to create new resource. +* `weeks_of_month` - (Optional) Indicates whether to skip release updates during maintenance. Changing this will force terraform to create new resource. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of autonomous vm cluster. +* `arn` - The Amazon Resource Name (ARN) for the Exadata infrastructure. +* `autonomous_data_storage_percentage` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `available_autonomous_data_storage_size_in_tbs` - The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB. +* `available_container_databases` - The number of Autonomous CDBs that you can create with the currently available storage. +* `available_cpus` - The number of CPU cores available for allocation to Autonomous Databases. +* `compute_model` - The compute model of the Autonomous VM cluster: ECPU or OCPU. +* `cpu_core_count` - The total number of CPU cores in the Autonomous VM cluster. +* `cpu_percentage` - The percentage of total CPU cores currently in use in the Autonomous VM cluster. +* `created_at` - The date and time when the Autonomous VM cluster was created. +* `data_storage_size_in_gbs` - The total data storage allocated to the Autonomous VM cluster, in GB. +* `data_storage_size_in_tbs` - The total data storage allocated to the Autonomous VM cluster, in TB. +* `odb_node_storage_size_in_gbs` - The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB). +* `domain` - The domain name of the Autonomous VM cluster. +* `exadata_storage_in_tbs_lowest_scaled_value` - The minimum value to which you can scale down the Exadata storage, in TB. +* `hostname` - The hostname of the Autonomous VM cluster. +* `license_model` - The license model for the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE. +* `max_acds_lowest_scaled_value` - The minimum value to which you can scale down the maximum number of Autonomous CDBs. +* `memory_size_in_gbs` - The total amount of memory allocated to the Autonomous VM cluster, in gigabytes(GB). +* `node_count` - The number of database server nodes in the Autonomous VM cluster. +* `non_provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can't be provisioned because of resource constraints. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with this Autonomous VM cluster. +* `oci_url` - The URL for accessing the OCI console page for this Autonomous VM cluster. +* `ocid` - The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster. +* `percent_progress` - The progress of the current operation on the Autonomous VM cluster, as a percentage. +* `provisionable_autonomous_container_databases` - The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster. +* `provisioned_autonomous_container_databases` - The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster. +* `provisioned_cpus` - The number of CPUs provisioned in the Autonomous VM cluster. +* `reclaimable_cpus` - The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases. +* `reserved_cpus` - The number of CPU cores reserved for system operations and redundancy. +* `shape` - The shape of the Exadata infrastructure for the Autonomous VM cluster. +* `status` - The status of the Autonomous VM cluster. Possible values include CREATING, AVAILABLE, UPDATING, DELETING, DELETED, FAILED. +* `status_reason` - Additional information about the current status of the Autonomous VM cluster. +* `time_zone` - The time zone of the Autonomous VM cluster. +* `time_ords_certificate_expires` - The expiration date and time of the ORDS certificate. +* `time_database_ssl_certificate_expires` - The expiration date and time of the database SSL certificate. +* `tags_all` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```terraform +import { + to = aws_odb_cloud_autonomous_vm_cluster.example + id = "example" +} +``` + +Using `terraform import`, import cloud autonomous vm cluster `id`. For example: + +```console +% terraform import aws_odb_cloud_autonomous_vm_cluster.example example +``` diff --git a/website/docs/r/odb_cloud_exadata_infrastructure.html.markdown b/website/docs/r/odb_cloud_exadata_infrastructure.html.markdown new file mode 100644 index 000000000000..5bc89ddda084 --- /dev/null +++ b/website/docs/r/odb_cloud_exadata_infrastructure.html.markdown @@ -0,0 +1,145 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "aws" +page_title: "AWS: aws_odb_cloud_exadata_infrastructure" +description: |- + Terraform resource for managing exadata infrastructure resource for Oracle Database@AWS. +--- + +# Resource: aws_odb_cloud_exadata_infrastructure + +Terraform resource for managing exadata infrastructure resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```terraform + +resource "aws_odb_cloud_exadata_infrastructure" "example" { + display_name = "my-exa-infra" + shape = "Exadata.X11M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + customer_contacts_to_send_to_oci = [{ email = "abc@example.com" }, { email = "def@example.com" }] + database_server_type = "X11M" + storage_server_type = "X11M-HC" + maintenance_window { + custom_action_timeout_in_mins = 16 + days_of_week = [{ name = "MONDAY" }, { name = "TUESDAY" }] + hours_of_day = [11, 16] + is_custom_action_timeout_enabled = true + lead_time_in_weeks = 3 + months = [{ name = "FEBRUARY" }, { name = "MAY" }, { name = "AUGUST" }, { name = "NOVEMBER" }] + patching_mode = "ROLLING" + preference = "CUSTOM_PREFERENCE" + weeks_of_month = [2, 4] + } + tags = { + "env" = "dev" + } + +} + +resource "aws_odb_cloud_exadata_infrastructure" "example" { + display_name = "my_exa_X9M" + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `display_name` - (Required) The user-friendly name for the Exadata infrastructure. Changing this will force terraform to create a new resource. +* `shape` - (Required) The model name of the Exadata infrastructure. Changing this will force terraform to create new resource. +* `storage_count` - (Required) The number of storage servers that are activated for the Exadata infrastructure. Changing this will force terraform to create new resource. +* `compute_count` - (Required) The number of compute instances that the Exadata infrastructure is located. Changing this will force terraform to create new resource. +* `availability_zone_id` - (Required) The AZ ID of the AZ where the Exadata infrastructure is located. Changing this will force terraform to create new resource. + +The following arguments are optional: + +* `customer_contacts_to_send_to_oci` - (Optional) The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure. Changing this will force terraform to create new resource. +* `availability_zone`: (Optional) The name of the Availability Zone (AZ) where the Exadata infrastructure is located. Changing this will force terraform to create new resource. +* `database_server_type` - (Optional) The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. This is a mandatory parameter for Exadata.X11M system shape. Changing this will force terraform to create new resource. +* `storage_server_type` - (Optional) The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation. This is a mandatory parameter for Exadata.X11M system shape. Changing this will force terraform to create new resource. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### maintenance_window + +* `custom_action_timeout_in_mins` - (Required) The custom action timeout in minutes for the maintenance window. +* `is_custom_action_timeout_enabled` - (Required) ndicates whether custom action timeout is enabled for the maintenance window. +* `patching_mode` - (Required) The patching mode for the maintenance window. +* `preference` - (Required) The preference for the maintenance window scheduling. +* `days_of_week` - (Optional) The days of the week when maintenance can be performed. +* `hours_of_day` - (Optional) The hours of the day when maintenance can be performed. +* `lead_time_in_weeks` - (Optional) The lead time in weeks before the maintenance window. +* `months` - (Optional) The months when maintenance can be performed. +* `weeks_of_month` - (Optional) The weeks of the month when maintenance can be performed. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier for the Exadata infrastructure. +* `arn` - Amazon Resource Name (ARN) of the Exadata infrastructure. +* `activated_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `additional_storage_count` - The number of storage servers requested for the Exadata infrastructure. +* `available_storage_size_in_gbs` - The amount of available storage, in gigabytes (GB), for the Exadata infrastructure. +* `cpu_count` - The total number of CPU cores that are allocated to the Exadata infrastructure. +* `data_storage_size_in_tbs` - The size of the Exadata infrastructure's data disk group, in terabytes (TB). +* `db_node_storage_size_in_gbs` - The size of the Exadata infrastructure's local node storage, in gigabytes (GB). +* `db_server_version` - The software version of the database servers (dom0) in the Exadata infrastructure. +* `last_maintenance_run_id` - The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure. +* `max_cpu_count` - The total number of CPU cores available on the Exadata infrastructure. +* `max_data_storage_in_tbs` - The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure. +* `max_db_node_storage_size_in_gbs` - The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure. +* `max_memory_in_gbs` - The total amount of memory in gigabytes (GB) available on the Exadata infrastructure. +* `monthly_db_server_version` - The monthly software version of the database servers in the Exadata infrastructure. +* `monthly_storage_server_version` - The monthly software version of the storage servers installed on the Exadata infrastructure. +* `next_maintenance_run_id` - The OCID of the next maintenance run for the Exadata infrastructure. +* `ocid` - The OCID of the Exadata infrastructure. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the Exadata infrastructure. +* `percent_progress` - The amount of progress made on the current operation on the Exadata infrastructure, expressed as a percentage. +* `status` - The current status of the Exadata infrastructure. +* `status_reason` - Additional information about the status of the Exadata infrastructure. +* `storage_server_version` - The software version of the storage servers on the Exadata infrastructure. +* `total_storage_size_in_gbs` - The total amount of storage, in gigabytes (GB), on the Exadata infrastructure. +* `created_at` - The time when the Exadata infrastructure was created. +* `compute_model` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```terraform +import { + to = aws_odb_cloud_exadata_infrastructure.example + id = "example" +} +``` + +Using `terraform import`, import Exadata Infrastructure using the `id`. For example: + +```console +% terraform import aws_odb_cloud_exadata_infrastructure.example example +``` diff --git a/website/docs/r/odb_cloud_vm_cluster.html.markdown b/website/docs/r/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..388c0313b806 --- /dev/null +++ b/website/docs/r/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,154 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform resource for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + +# Resource: aws_odb_cloud_vm_cluster + +Terraform to manage cloud vm cluster resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_odb_cloud_vm_cluster" "with_minimum_parameter" { + display_name = "my_vm_cluster" + cloud_exadata_infrastructure_id = "" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["public-ssh-key"] + odb_network_id = "" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["db-server-1", "db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } +} + + +resource "aws_odb_cloud_vm_cluster" "with_all_parameters" { + display_name = "my_vm_cluster" + cloud_exadata_infrastructure_id = "" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["my-ssh-key"] + odb_network_id = "" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["my-dbserver-1", "my-db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + cluster_name = "julia-13" + timezone = "UTC" + scan_listener_port_tcp = 1521 + tags = { + "env" = "dev" + } + data_collection_options { + is_diagnostics_events_enabled = true + is_health_monitoring_enabled = true + is_incident_logs_enabled = true + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the Exadata infrastructure for this VM cluster. Changing this will create a new resource. +* `cpu_core_count` - (Required) The number of CPU cores to enable on the VM cluster. Changing this will create a new resource. +* `db_servers` - (Required) The list of database servers for the VM cluster. Changing this will create a new resource. +* `display_name` - (Required) A user-friendly name for the VM cluster. Changing this will create a new resource. +* `gi_version` - (Required) A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure. Example: 19.0.0.0 Changing this will create a new resource. +* `hostname_prefix` - (Required) The host name prefix for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. Changing this will create a new resource. +* `odb_network_id` - (Required) The unique identifier of the ODB network for the VM cluster. Changing this will create a new resource. +* `ssh_public_keys` - (Required) The public key portion of one or more key pairs used for SSH access to the VM cluster. Changing this will create a new resource. +* `data_collection_options` - (Required) The set of preferences for the various diagnostic collection options for the VM cluster. +* `data_storage_size_in_tbs` - (Required) The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster. Changing this will create a new resource. + +The following arguments are optional: + +* `cluster_name` - (Optional) The name of the Grid Infrastructure (GI) cluster. Changing this will create a new resource. +* `db_node_storage_size_in_gbs` - (Optional) The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `is_local_backup_enabled` - (Optional) Specifies whether to enable database backups to local Exadata storage for the VM cluster. Changing this will create a new resource. +* `is_sparse_diskgroup_enabled` - (Optional) Specifies whether to create a sparse disk group for the VM cluster. Changing this will create a new resource. +* `license_model` - (Optional) The Oracle license model to apply to the VM cluster. Default: LICENSE_INCLUDED. Changing this will create a new resource. +* `memory_size_in_gbs` - (Optional) The amount of memory, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `scan_listener_port_tcp` - (Optional) The port number for TCP connections to the single client access name (SCAN) listener. Valid values: 1024–8999, except 2484, 6100, 6200, 7060, 7070, 7085, and 7879. Default: 1521. Changing this will create a new resource. +* `timezone` - (Optional) The configured time zone of the VM cluster. Changing this will create a new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of vm cluster. +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `disk_redundancy` - The type of redundancy for the VM cluster: NORMAL (2-way) or HIGH (3-way). +* `AttrDomain` - The domain name associated with the VM cluster. +* `hostname_prefix_computed` - The host name for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. This member is required. Changing this will create a new resource. +* `iorm_config_cache` - The Exadata IORM (I/O Resource Manager) configuration cache details for the VM cluster. +* `last_update_history_entry_id` - The OCID of the most recent maintenance update history entry. +* `listener_port` - The listener port number configured on the VM cluster. +* `node_count` - The total number of nodes in the VM cluster. +* `ocid` - The OCID (Oracle Cloud Identifier) of the VM cluster. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with the VM cluster. +* `oci_url` - The HTTPS link to the VM cluster resource in OCI. +* `percent_progress` - The percentage of progress made on the current operation for the VM cluster. +* `scan_dns_name` - The fully qualified domain name (FQDN) for the SCAN IP addresses associated with the VM cluster. +* `scan_dns_record_id` - The OCID of the DNS record for the SCAN IPs linked to the VM cluster. +* `scan_ip_ids` - The list of OCIDs for SCAN IP addresses associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure running the VM cluster. +* `status` - The current lifecycle status of the VM cluster. +* `status_reason` - Additional information regarding the current status of the VM cluster. +* `storage_size_in_gbs` - The local node storage allocated to the VM cluster, in gigabytes (GB). +* `system_version` - The operating system version of the image chosen for the VM cluster. +* `vip_ids` - The virtual IP (VIP) addresses assigned to the VM cluster. CRS assigns one VIP per node for failover support. +* `created_at` - The timestamp when the VM cluster was created. +* `gi_version_computed` - A complete software version of Oracle Grid Infrastructure (GI). +* `compute_model` - The compute model used when the instance is created or cloned — either ECPU or OCPU. ECPU is a virtualized compute unit; OCPU is a physical processor core with hyper-threading. +* `tags_all` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```terraform +import { + to = aws_odb_cloud_vm_cluster.example + id = "example" +} +``` + +Using `terraform import`, import cloud vm cluster using the `id`. For example: + +```console +% terraform import aws_odb_cloud_vm_cluster.example example +``` diff --git a/website/docs/r/odb_network.html.markdown b/website/docs/r/odb_network.html.markdown new file mode 100644 index 000000000000..a6d73abcd750 --- /dev/null +++ b/website/docs/r/odb_network.html.markdown @@ -0,0 +1,106 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network" +page_title: "AWS: aws_odb_network" +description: |- + Terraform resource for managing odb network of an Oracle Database@AWS. +--- + +# Resource: aws_odb_network + +Terraform resource for managing odb Network resource in AWS for Oracle Database@AWS. + +## Example Usage + +### Basic Usage + +```terraform + +resource "aws_odb_network" "example" { + display_name = "odb-my-net" + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" + tags = { + "env" = "dev" + } +} + +resource "aws_odb_network" "example" { + display_name = "odb-my-net" + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "ENABLED" + zero_etl_access = "ENABLED" + tags = { + "env" = "dev" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `display_name` - (Required) The user-friendly name for the odb network. Changing this will force terraform to create a new resource. +* `availability_zone_id` - (Required) The AZ ID of the AZ where the ODB network is located. Changing this will force terraform to create new resource. +* `client_subnet_cidr` - (Required) The CIDR notation for the network resource. Changing this will force terraform to create new resource. +* `backup_subnet_cidr` - (Required) The CIDR range of the backup subnet for the ODB network. Changing this will force terraform to create new resource. +* `s3_access` - (Required) Specifies the configuration for Amazon S3 access from the ODB network. +* `zero_etl_access` - (Required) Specifies the configuration for Zero-ETL access from the ODB network. + +The following arguments are optional: + +* `custom_domain_name` - (Optional) The name of the custom domain that the network is located. Custom_domain_name and default_dns_prefix both can't be given. Changing this will force terraform to create new resource. +* `availability_zone` - (Optional) The name of the Availability Zone (AZ) where the odb network is located. Changing this will force terraform to create new resource. Make sure availability_zone maps correctly with availability_zone_id. +* `s3_policy_document` - (Optional) Specifies the endpoint policy for Amazon S3 access from the ODB network. +* `default_dns_prefix` - (Optional) The default DNS prefix for the network resource. Changing this will force terraform to create new resource. Changing this will force terraform to create new resource. +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of the odb network resource. +* `arn` - Amazon Resource Name (ARN) of the odb network resource. +* `oci_dns_forwarding_configs` - The number of storage servers requested for the Exadata infrastructure. +* `peered_cidrs` - The list of CIDR ranges from the peered VPC that are allowed access to the ODB network. Please refer odb network peering documentation. +* `oci_network_anchor_id` - The unique identifier of the OCI network anchor for the ODB network. +* `oci_network_anchor_url` -The URL of the OCI network anchor for the ODB network. +* `oci_resource_anchor_name` - The name of the OCI resource anchor for the ODB network. +* `oci_vcn_id` - The unique identifier Oracle Cloud ID (OCID) of the OCI VCN for the ODB network. +* `oci_vcn_url` - The URL of the OCI VCN for the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the ODB network, expressed as a percentage. +* `managed_services` - The name of the OCI resource anchor for the Exadata infrastructure. +* `status` - The status of the network resource. +* `status_reason` - Additional information about the current status of the ODB network. +* `created_at` - The date and time when the ODB network was created. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```terraform +import { + to = aws_odb_network.example + id = "example" +} +``` + +Using `terraform import`, import Odb Network using the `id`. For example: + +```console +% terraform import aws_odb_network.example example +``` diff --git a/website/docs/r/odb_network_peering_connection.html.markdown b/website/docs/r/odb_network_peering_connection.html.markdown new file mode 100644 index 000000000000..8d7edc97cdeb --- /dev/null +++ b/website/docs/r/odb_network_peering_connection.html.markdown @@ -0,0 +1,80 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_network_peering_connection" +page_title: "AWS: aws_odb_network_peering_connection" +description: |- + Terraform resource for managing oracle database network peering resource in AWS. +--- + +# Resource: aws_odb_network_peering_connection + +Terraform resource for managing oracle database network peering resource in AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_odb_network_peering_connection" "example" { + display_name = "example" + odb_network_id = "my-odb-network-id" + peer_network_id = "my-vpc-id" + tags = { + "env" = "dev" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `odb_network_id` - (Required) The unique identifier of the ODB network that initiates the peering connection. A sample ID is `odbpcx-abcdefgh12345678`. Changing this will force Terraform to create a new resource. +* `peer_network_id` - (Required) The unique identifier of the ODB peering connection. Changing this will force Terraform to create a new resource. +* `display_name` - (Required) Display name of the ODB network peering connection. Changing this will force Terraform to create a new resource. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `id` - Unique identifier of odb network peering connection. +* `status` - Status of the ODB network peering connection. +* `status_reason` - The reason for the current status of the ODB peering connection. +* `odb_network_arn` - ARN of the ODB network peering connection. +* `peer_network_arn` - ARN of the peer network peering connection. +* `odb_peering_connection_type` - Type of the ODB peering connection. +* `created_at` - Created time of the ODB network peering connection. +* `percent_progress` - Progress of the ODB network peering connection. +* `tags_all` - A map of tags assigned to the resource, including inherited tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```terraform +import { + to = aws_odb_network_peering_connection.example + id = "example" +} +``` + +Using `terraform import`, import odb network peering using the `id`. For example: + +```console +% terraform import aws_odb_network_peering_connection.example example +``` diff --git a/website/docs/r/opensearch_authorize_vpc_endpoint_access.html.markdown b/website/docs/r/opensearch_authorize_vpc_endpoint_access.html.markdown index c459e11c9901..0141e58c7006 100644 --- a/website/docs/r/opensearch_authorize_vpc_endpoint_access.html.markdown +++ b/website/docs/r/opensearch_authorize_vpc_endpoint_access.html.markdown @@ -44,7 +44,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Authorize Vpc Endpoint Access using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Authorize Vpc Endpoint Access using the `domain_name`. For example: ```terraform import { @@ -53,7 +53,7 @@ import { } ``` -Using `terraform import`, import OpenSearch Authorize Vpc Endpoint Access using the `example_id_arg`. For example: +Using `terraform import`, import OpenSearch Authorize Vpc Endpoint Access using the `domain_name`. For example: ```console % terraform import aws_opensearch_authorize_vpc_endpoint_access.example authorize_vpc_endpoint_access-id-12345678 diff --git a/website/docs/r/opensearch_domain_policy.html.markdown b/website/docs/r/opensearch_domain_policy.html.markdown index c857add58802..f3ace154298d 100644 --- a/website/docs/r/opensearch_domain_policy.html.markdown +++ b/website/docs/r/opensearch_domain_policy.html.markdown @@ -62,3 +62,20 @@ This resource exports no additional attributes. * `update` - (Default `180m`) * `delete` - (Default `90m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Domain Policy using `domain_name` prefixed with `esd-policy-`. For example: + +```terraform +import { + to = aws_opensearch_domain_policy.example + id = "esd-policy-tf-test" +} +``` + +Using `terraform import`, import OpenSearch Domain Policy using `domain_name` prefixed with `esd-policy-`. For example: + +```console +% terraform import aws_opensearch_domain_policy.example esd-policy-tf-test +``` diff --git a/website/docs/r/opensearch_package.html.markdown b/website/docs/r/opensearch_package.html.markdown index fd71fe9fbbb3..625d22cdcd4a 100644 --- a/website/docs/r/opensearch_package.html.markdown +++ b/website/docs/r/opensearch_package.html.markdown @@ -41,8 +41,9 @@ resource "aws_opensearch_package" "example" { This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `engine_version` - (Optional, Forces new resources) Engine version that the package is compatible with. This argument is required and only valid when `package_type` is `ZIP-PLUGIN`. Format: `OpenSearch_X.Y` or `Elasticsearch_X.Y`, where `X` and `Y` are the major and minor version numbers, respectively. * `package_name` - (Required, Forces new resource) Unique name for the package. -* `package_type` - (Required, Forces new resource) The type of package. +* `package_type` - (Required, Forces new resource) The type of package. Valid values are `TXT-DICTIONARY`, `ZIP-PLUGIN`, `PACKAGE-LICENSE` and `PACKAGE-CONFIG`. * `package_source` - (Required, Forces new resource) Configuration block for the package source options. * `package_description` - (Optional, Forces new resource) Description of the package. diff --git a/website/docs/r/organizations_account.html.markdown b/website/docs/r/organizations_account.html.markdown index b023cd493ebe..1600adb9edfd 100644 --- a/website/docs/r/organizations_account.html.markdown +++ b/website/docs/r/organizations_account.html.markdown @@ -59,11 +59,36 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_account.example + identity = { + id = "111111111111" + } +} + +resource "aws_organizations_account" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the AWS Organizations account. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the AWS member account using the `account_id`. For example: ```terraform import { - to = aws_organizations_account.my_account + to = aws_organizations_account.example id = "111111111111" } ``` @@ -71,13 +96,13 @@ import { Using `terraform import`, import the AWS member account using the `account_id`. For example: ```console -% terraform import aws_organizations_account.my_account 111111111111 +% terraform import aws_organizations_account.example 111111111111 ``` To import accounts that have set iam_user_access_to_billing, use the following: ```console -% terraform import aws_organizations_account.my_account 111111111111_ALLOW +% terraform import aws_organizations_account.example 111111111111_ALLOW ``` Certain resource arguments, like `role_name`, do not have an Organizations API method for reading the information after account creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference. For example: diff --git a/website/docs/r/organizations_delegated_administrator.html.markdown b/website/docs/r/organizations_delegated_administrator.html.markdown index 80b8396caca2..b708fa2488a1 100644 --- a/website/docs/r/organizations_delegated_administrator.html.markdown +++ b/website/docs/r/organizations_delegated_administrator.html.markdown @@ -41,6 +41,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_delegated_administrator.example + identity = { + service_principal = "config.amazonaws.com" + delegated_account_id = "123456789012" + } +} + +resource "aws_organizations_delegated_administrator" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `service_principal` (String) Service principal for the AWS service. +* `delegated_account_id` (String) Account ID to be designated as a delegated administrator. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_organizations_delegated_administrator` using the account ID and its service principal. For example: ```terraform diff --git a/website/docs/r/organizations_organization.html.markdown b/website/docs/r/organizations_organization.html.markdown index 05082f502f82..aaad0cb4a2db 100644 --- a/website/docs/r/organizations_organization.html.markdown +++ b/website/docs/r/organizations_organization.html.markdown @@ -67,11 +67,36 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_organization.example + identity = { + id = "o-1234567" + } +} + +resource "aws_organizations_organization" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the AWS Organizations organization. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the AWS organization using the `id`. For example: ```terraform import { - to = aws_organizations_organization.my_org + to = aws_organizations_organization.example id = "o-1234567" } ``` @@ -79,5 +104,5 @@ import { Using `terraform import`, import the AWS organization using the `id`. For example: ```console -% terraform import aws_organizations_organization.my_org o-1234567 +% terraform import aws_organizations_organization.example o-1234567 ``` diff --git a/website/docs/r/organizations_organizational_unit.html.markdown b/website/docs/r/organizations_organizational_unit.html.markdown index 33f5d82a8ebe..a01ac90b6ae0 100644 --- a/website/docs/r/organizations_organizational_unit.html.markdown +++ b/website/docs/r/organizations_organizational_unit.html.markdown @@ -42,6 +42,31 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_organizational_unit.example + identity = { + id = "ou-1234567" + } +} + +resource "aws_organizations_organizational_unit" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the organizational unit. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Organizations Organizational Units using the `id`. For example: ```terraform diff --git a/website/docs/r/organizations_policy_attachment.html.markdown b/website/docs/r/organizations_policy_attachment.html.markdown index 8b4ca98e8e2a..2534425af2a6 100644 --- a/website/docs/r/organizations_policy_attachment.html.markdown +++ b/website/docs/r/organizations_policy_attachment.html.markdown @@ -53,13 +53,40 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_organizations_policy_attachment.example + identity = { + policy_id = "p-12345678" + target_id = "123456789012" + } +} + +resource "aws_organizations_policy_attachment" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `policy_id` (String) Organizations policy ID. +* `target_id` (String) Organizations target ID (account, OU, or root). + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_organizations_policy_attachment` using the target ID and policy ID. For example: With an account target: ```terraform import { - to = aws_organizations_policy_attachment.account + to = aws_organizations_policy_attachment.example id = "123456789012:p-12345678" } ``` @@ -69,5 +96,5 @@ Using `terraform import`, import `aws_organizations_policy_attachment` using the With an account target: ```console -% terraform import aws_organizations_policy_attachment.account 123456789012:p-12345678 +% terraform import aws_organizations_policy_attachment.example 123456789012:p-12345678 ``` diff --git a/website/docs/r/paymentcryptography_key.html.markdown b/website/docs/r/paymentcryptography_key.html.markdown index e2eb6cc00373..9ef312a40434 100644 --- a/website/docs/r/paymentcryptography_key.html.markdown +++ b/website/docs/r/paymentcryptography_key.html.markdown @@ -88,6 +88,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_paymentcryptography_key.example + identity = { + "arn" = "arn:aws:payment-cryptography:us-east-1:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab" + } +} + +resource "aws_paymentcryptography_key" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Payment Cryptography key. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Payment Cryptography Control Plane Key using the `arn:aws:payment-cryptography:us-east-1:123456789012:key/qtbojf64yshyvyzf`. For example: ```terraform diff --git a/website/docs/r/pinpointsmsvoicev2_phone_number.html.markdown b/website/docs/r/pinpointsmsvoicev2_phone_number.html.markdown index 83abea4adb4c..00c4e0015ced 100644 --- a/website/docs/r/pinpointsmsvoicev2_phone_number.html.markdown +++ b/website/docs/r/pinpointsmsvoicev2_phone_number.html.markdown @@ -37,7 +37,7 @@ This resource supports the following arguments: * `opt_out_list_name` - (Optional) The name of the opt-out list to associate with the phone number. * `registration_id` - (Optional) Use this field to attach your phone number for an external registration process. * `self_managed_opt_outs_enabled` - (Optional) When set to `false` an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the opt-out list. When set to true you’re responsible for responding to HELP and STOP requests. You’re also responsible for tracking and honoring opt-out request. -* `two_way_channel_arn` - (Optional) The Amazon Resource Name (ARN) of the two way channel. +* `two_way_channel_arn` - (Optional) Configuration for two-way SMS. Specify an ARN to receive incoming SMS messages, or `connect.[region].amazonaws.com` (with `[region]` replaced by the AWS Region of the Amazon Connect instance) to set Amazon Connect as the inbound destination. * `two_way_channel_enabled` - (Optional) By default this is set to `false`. When set to `true` you can receive incoming text messages from your end recipients. * `two_way_channel_role` - (Optional) IAM Role ARN for a service to assume, to be able to post inbound SMS messages. diff --git a/website/docs/r/prometheus_resource_policy.html.markdown b/website/docs/r/prometheus_resource_policy.html.markdown new file mode 100644 index 000000000000..6e7a7f0cc5f9 --- /dev/null +++ b/website/docs/r/prometheus_resource_policy.html.markdown @@ -0,0 +1,166 @@ +--- +subcategory: "AMP (Managed Prometheus)" +layout: "aws" +page_title: "AWS: aws_prometheus_resource_policy" +description: |- + Manages an Amazon Managed Service for Prometheus (AMP) Resource Policy. +--- + +# Resource: aws_prometheus_resource_policy + +Manages an Amazon Managed Service for Prometheus (AMP) Resource Policy. + +Resource-based policies allow you to grant permissions to other AWS accounts or services to access your Prometheus workspace. This enables cross-account access and fine-grained permissions for workspace sharing. + +## Example Usage + +### Basic Resource Policy + +```terraform +resource "aws_prometheus_workspace" "example" { + alias = "example-workspace" +} + +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "example" { + statement { + effect = "Allow" + principals { + type = "AWS" + identifiers = [data.aws_caller_identity.current.account_id] + } + actions = [ + "aps:RemoteWrite", + "aps:QueryMetrics", + "aps:GetSeries", + "aps:GetLabels", + "aps:GetMetricMetadata" + ] + resources = [aws_prometheus_workspace.example.arn] + } +} + +resource "aws_prometheus_resource_policy" "example" { + workspace_id = aws_prometheus_workspace.example.id + policy_document = data.aws_iam_policy_document.example.json +} +``` + +### Cross-Account Access + +```terraform +resource "aws_prometheus_workspace" "example" { + alias = "example-workspace" +} + +data "aws_iam_policy_document" "cross_account" { + statement { + effect = "Allow" + principals { + type = "AWS" + identifiers = ["arn:aws:iam::123456789012:root"] + } + actions = [ + "aps:RemoteWrite", + "aps:QueryMetrics" + ] + resources = [aws_prometheus_workspace.example.arn] + } +} + +resource "aws_prometheus_resource_policy" "cross_account" { + workspace_id = aws_prometheus_workspace.example.id + policy_document = data.aws_iam_policy_document.cross_account.json +} +``` + +### Service-Specific Access + +```terraform +resource "aws_prometheus_workspace" "example" { + alias = "example-workspace" +} + +data "aws_iam_policy_document" "service_access" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["grafana.amazonaws.com"] + } + actions = [ + "aps:QueryMetrics", + "aps:GetSeries", + "aps:GetLabels", + "aps:GetMetricMetadata" + ] + resources = [aws_prometheus_workspace.example.arn] + } +} + +resource "aws_prometheus_resource_policy" "service_access" { + workspace_id = aws_prometheus_workspace.example.id + policy_document = data.aws_iam_policy_document.service_access.json +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `workspace_id` - (Required) The ID of the workspace to attach the resource-based policy to. +* `policy_document` - (Required) The JSON policy document to use as the resource-based policy. This policy defines the permissions that other AWS accounts or services have to access your workspace. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `policy_status` - The current status of the resource-based policy. Can be `CREATING`, `ACTIVE`, `UPDATING`, or `DELETING`. +* `revision_id` - The revision ID of the current resource-based policy. + +## Supported Actions + +The following actions are supported in resource policies for Prometheus workspaces: + +* `aps:RemoteWrite` - Allows writing metrics to the workspace +* `aps:QueryMetrics` - Allows querying metrics from the workspace +* `aps:GetSeries` - Allows retrieving time series data +* `aps:GetLabels` - Allows retrieving label names and values +* `aps:GetMetricMetadata` - Allows retrieving metric metadata + +## Notes + +* Only Prometheus-compatible APIs can be used for workspace sharing. Non-Prometheus-compatible APIs added to the policy will be ignored. +* If your workspace uses customer-managed KMS keys for encryption, you must grant the principals in your resource-based policy access to those KMS keys through KMS grants. +* The resource ARN in the policy document must match the workspace ARN that the policy is being attached to. +* Resource policies enable cross-account access and fine-grained permissions for Prometheus workspaces. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `5m`) +- `update` - (Default `5m`) +- `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the Resource Policy using the workspace ID. For example: + +```terraform +import { + to = aws_prometheus_resource_policy.example + id = "ws-12345678-90ab-cdef-1234-567890abcdef" +} +``` + +Using `terraform import`, import AMP Resource Policies using the workspace ID. For example: + +```console +% terraform import aws_prometheus_resource_policy.example ws-12345678-90ab-cdef-1234-567890abcdef +``` diff --git a/website/docs/r/quicksight_account_settings.html.markdown b/website/docs/r/quicksight_account_settings.html.markdown index 24fa9d832005..0fd7fcdea3f0 100644 --- a/website/docs/r/quicksight_account_settings.html.markdown +++ b/website/docs/r/quicksight_account_settings.html.markdown @@ -33,14 +33,13 @@ resource "aws_quicksight_account_settings" "example" { This resource supports the following arguments: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `default_namespace` - (Optional) The default namespace for this Amazon Web Services account. Currently, the default is `default`. * `termination_protection_enabled` - (Optional) A boolean value that determines whether or not an Amazon QuickSight account can be deleted. If `true`, it does not allow the account to be deleted and results in an error message if a user tries to make a DeleteAccountSubscription request. If `false`, it will allow the account to be deleted. ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `aws_account_id` - The ID for the AWS account that contains the settings. +This resource exports no additional attributes. ## Import diff --git a/website/docs/r/quicksight_account_subscription.html.markdown b/website/docs/r/quicksight_account_subscription.html.markdown index 4571c652b11e..f22a550f0d97 100644 --- a/website/docs/r/quicksight_account_subscription.html.markdown +++ b/website/docs/r/quicksight_account_subscription.html.markdown @@ -10,7 +10,7 @@ description: |- Terraform resource for managing an AWS QuickSight Account Subscription. -~> Due to the absence of the `admin_group`, `author_group`, and `reader_group` fields in the [`DescribeAccountSettings`](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeAccountSettings.html) API response, changes made to these groups post-subscription will not be detected by this resource. +~> Due to the absence of the `admin_group`, `author_group`, `reader_group`, `admin_pro_group`, `author_pro_group`, and `reader_pro_group` fields in the [`DescribeAccountSettings`](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeAccountSettings.html) API response, changes made to these groups post-subscription will not be detected by this resource. ## Example Usage @@ -34,11 +34,12 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `active_directory_name` - (Optional) Name of your Active Directory. This field is required if `ACTIVE_DIRECTORY` is the selected authentication method of the new Amazon QuickSight account. * `admin_group` - (Optional) Admin group associated with your Active Directory or IAM Identity Center account. This field is required if `ACTIVE_DIRECTORY` or `IAM_IDENTITY_CENTER` is the selected authentication method of the new Amazon QuickSight account. +* `admin_pro_group` - (Optional) Admin PRO group associated with your Active Directory or IAM Identity Center account. * `author_group` - (Optional) Author group associated with your Active Directory or IAM Identity Center account. -* `aws_account_id` - (Optional) AWS account ID hosting the QuickSight account. Default to provider account. +* `author_pro_group` - (Optional) Author PRO group associated with your Active Directory or IAM Identity Center account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `contact_number` - (Optional) A 10-digit phone number for the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `directory_id` - (Optional) Active Directory ID that is associated with your Amazon QuickSight account. * `email_address` - (Optional) Email address of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. @@ -46,7 +47,9 @@ The following arguments are optional: * `iam_identity_center_instance_arn` - (Optional) The Amazon Resource Name (ARN) for the IAM Identity Center instance. * `last_name` - (Optional) Last name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `reader_group` - (Optional) Reader group associated with your Active Directory or IAM Identity Center account. +* `reader_pro_group` - (Optional) Reader PRO group associated with your Active Directory or IAM Identity Center account. * `realm` - (Optional) Realm of the Active Directory that is associated with your Amazon QuickSight account. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference @@ -63,4 +66,19 @@ This resource exports the following attributes in addition to the arguments abov ## Import -You cannot import this resource. +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight Account Subscription using `aws_account_id`. For example: + +~> Due to the absence of required arguments in the [`DescribeAccountSettings`](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_DescribeAccountSettings.html) API response, importing an existing account subscription will result in a planned replacement on the subsequent `apply` operation. Until the Describe API response in extended to include all configurable arguments, an [`ignore_changes` lifecycle argument](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes) can be used to suppress differences on arguments not read into state. + +```terraform +import { + to = aws_quicksight_account_subscription.example + id = "012345678901" +} +``` + +Using `terraform import`, import a QuickSight Account Subscription using `aws_account_id`. For example: + +```console +% terraform import aws_quicksight_account_subscription.example "012345678901" +``` diff --git a/website/docs/r/quicksight_analysis.html.markdown b/website/docs/r/quicksight_analysis.html.markdown index 584984f581b1..831d91e2fd81 100644 --- a/website/docs/r/quicksight_analysis.html.markdown +++ b/website/docs/r/quicksight_analysis.html.markdown @@ -93,12 +93,12 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `definition` - (Optional) A detailed analysis definition. Only one of `definition` or `source_entity` should be configured. See [definition](#definition). * `parameters` - (Optional) The parameters for the creation of the analysis, which you want to use to override the default settings. An analysis can have any type of parameters, and some parameters might accept multiple values. See [parameters](#parameters). * `permissions` - (Optional) A set of resource permissions on the analysis. Maximum of 64 items. See [permissions](#permissions). * `recovery_window_in_days` - (Optional) A value that specifies the number of days that Amazon QuickSight waits before it deletes the analysis. Use `0` to force deletion without recovery. Minimum value of `7`. Maximum value of `30`. Default to `30`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_entity` - (Optional) The entity that you are using as a source when you create the analysis (template). Only one of `definition` or `source_entity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `theme_arn` - (Optional) The Amazon Resource Name (ARN) of the theme that is being used for this analysis. The theme ARN must exist in the same AWS account where you create the analysis. diff --git a/website/docs/r/quicksight_custom_permissions.html.markdown b/website/docs/r/quicksight_custom_permissions.html.markdown new file mode 100644 index 000000000000..7ea7ab857dd8 --- /dev/null +++ b/website/docs/r/quicksight_custom_permissions.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_custom_permissions" +description: |- + Manages a QuickSight custom permissions profile. +--- + +# Resource: aws_quicksight_custom_permissions + +Manages a QuickSight custom permissions profile. + +## Example Usage + +resource "aws_quicksight_custom_permissions" "example" { + custom_permissions_name = "example-permissions" + + capabilities { + print_reports = "DENY" + share_dashboards = "DENY" + } +} + +## Argument Reference + +The following arguments are required: + +* `capabilities` - (Required) Actions to include in the custom permissions profile. See [capabilities](#capabilities). +* `custom_permissions_name` - (Required, Forces new resource) Custom permissions profile name. + +The following arguments are optional: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### capabilities + +* `add_or_run_anomaly_detection_for_analyses` - (Optional) The ability to add or run anomaly detection. Valid values: `DENY`. +* `create_and_update_dashboard_email_reports` - (Optional) The ability to create and update email reports. Valid values: `DENY`. +* `create_and_update_datasets` - (Optional) The ability to create and update datasets. Valid values: `DENY`. +* `create_and_update_data_sources` - (Optional) The ability to create and update data sources. Valid values: `DENY`. +* `create_and_update_themes` - (Optional) The ability to export to create and update themes. Valid values: `DENY`. +* `create_and_update_threshold_alerts` - (Optional) The ability to create and update threshold alerts. Valid values: `DENY`. +* `create_shared_folders` - (Optional) The ability to create shared folders. Valid values: `DENY`. +* `create_spice_dataset` - (Optional) The ability to create a SPICE dataset. Valid values: `DENY`. +* `export_to_csv` - (Optional) The ability to export to CSV files from the UI. Valid values: `DENY`. +* `export_to_csv_in_scheduled_reports` - (Optional) The ability to export to CSV files in scheduled email reports. Valid values: `DENY`. +* `export_to_excel` - (Optional) The ability to export to Excel files from the UI. Valid values: `DENY`. +* `export_to_excel_in_scheduled_reports` - (Optional) The ability to export to Excel files in scheduled email reports. Valid values: `DENY`. +* `export_to_pdf` - (Optional) The ability to export to PDF files from the UI. Valid values: `DENY`. +* `export_to_pdf_in_scheduled_reports` - (Optional) The ability to export to PDF files in scheduled email reports. Valid values: `DENY`. +* `include_content_in_scheduled_reports_email` - (Optional) The ability to include content in scheduled email reports. Valid values: `DENY`. +* `print_reports` - (Optional) The ability to print reports. Valid values: `DENY`. +* `rename_shared_folders` - (Optional) The ability to rename shared folders. Valid values: `DENY`. +* `share_analyses` - (Optional) The ability to share analyses. Valid values: `DENY`. +* `share_dashboards` - (Optional) The ability to share dashboards. Valid values: `DENY`. +* `share_datasets` - (Optional) The ability to share datasets. Valid values: `DENY`. +* `share_data_sources` - (Optional) The ability to share data sources. Valid values: `DENY`. +* `subscribe_dashboard_email_reports` - (Optional) The ability to subscribe to email reports. Valid values: `DENY`. +* `view_account_spice_capacity` - (Optional) The ability to view account SPICE capacity. Valid values: `DENY`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the custom permissions profile. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight custom permissions profile using the AWS account ID and custom permissions profile name separated by a comma (`,`). For example: + +```terraform +import { + to = aws_quicksight_custom_permissions.example + id = "123456789012,example-permissions" +} +``` + +Using `terraform import`, import a QuickSight custom permissions profile using the AWS account ID and custom permissions profile name separated by a comma (`,`). For example: + +```console +% terraform import aws_quicksight_custom_permissions.example 123456789012,example-permissions +``` diff --git a/website/docs/r/quicksight_dashboard.html.markdown b/website/docs/r/quicksight_dashboard.html.markdown index 4a15ff87aaa0..8ac5df4020f8 100644 --- a/website/docs/r/quicksight_dashboard.html.markdown +++ b/website/docs/r/quicksight_dashboard.html.markdown @@ -96,12 +96,12 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dashboard_publish_options` - (Optional) Options for publishing the dashboard. See [dashboard_publish_options](#dashboard_publish_options). * `definition` - (Optional) A detailed dashboard definition. Only one of `definition` or `source_entity` should be configured. See [definition](#definition). * `parameters` - (Optional) The parameters for the creation of the dashboard, which you want to use to override the default settings. A dashboard can have any type of parameters, and some parameters might accept multiple values. See [parameters](#parameters). * `permissions` - (Optional) A set of resource permissions on the dashboard. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_entity` - (Optional) The entity that you are using as a source when you create the dashboard (template). Only one of `definition` or `source_entity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `theme_arn` - (Optional) The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. The theme ARN must exist in the same AWS account where you create the dashboard. diff --git a/website/docs/r/quicksight_data_set.html.markdown b/website/docs/r/quicksight_data_set.html.markdown index e7e318654dba..27e8ded9f870 100644 --- a/website/docs/r/quicksight_data_set.html.markdown +++ b/website/docs/r/quicksight_data_set.html.markdown @@ -170,8 +170,7 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `column_groups` - (Optional) Groupings of columns that work together in certain Amazon QuickSight features. Currently, only geospatial hierarchy is supported. See [column_groups](#column_groups). * `column_level_permission_rules` - (Optional) A set of 1 or more definitions of a [ColumnLevelPermissionRule](https://docs.aws.amazon.com/quicksight/latest/APIReference/API_ColumnLevelPermissionRule.html). See [column_level_permission_rules](#column_level_permission_rules). * `data_set_usage_configuration` - (Optional) The usage configuration to apply to child datasets that reference this dataset as a source. See [data_set_usage_configuration](#data_set_usage_configuration). @@ -179,6 +178,7 @@ The following arguments are optional: * `logical_table_map` - (Optional) Configures the combination and transformation of the data from the physical tables. Maximum of 1 entry. See [logical_table_map](#logical_table_map). * `permissions` - (Optional) A set of resource permissions on the data source. Maximum of 64 items. See [permissions](#permissions). * `physical_table_map` - (Optional) Declares the physical tables that are available in the underlying data sources. See [physical_table_map](#physical_table_map). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `row_level_permission_data_set` - (Optional) The row-level security configuration for the data that you want to create. See [row_level_permission_data_set](#row_level_permission_data_set). * `row_level_permission_tag_configuration` - (Optional) The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only. See [row_level_permission_tag_configuration](#row_level_permission_tag_configuration). * `refresh_properties` - (Optional) The refresh properties for the data set. **NOTE**: Only valid when `import_mode` is set to `SPICE`. See [refresh_properties](#refresh_properties). @@ -395,8 +395,17 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) of the data set. * `id` - A comma-delimited string joining AWS account ID and data set ID. +* `output_columns` - The final set of columns available for use in analyses and dashboards after all data preparation and transformation steps have been applied within the data set. See [`output_columns` Block](#output_columns-block) below. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +### `output_columns` Block + +The `output_columns` block has the following attributes. + +* `name` - The name of the column. +* `description` - The description of the column. +* `type` - The data type of the column. + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import a QuickSight Data Set using the AWS account ID and data set ID separated by a comma (`,`). For example: diff --git a/website/docs/r/quicksight_data_source.html.markdown b/website/docs/r/quicksight_data_source.html.markdown index f394f1dd34ba..f1607d480937 100644 --- a/website/docs/r/quicksight_data_source.html.markdown +++ b/website/docs/r/quicksight_data_source.html.markdown @@ -139,10 +139,10 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) The ID for the AWS account that the data source is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `credentials` - (Optional) The credentials Amazon QuickSight uses to connect to your underlying source. See [Credentials](#credentials-argument-reference) below for more details. * `permission` - (Optional) A set of resource permissions on the data source. Maximum of 64 items. See [Permission](#permission-argument-reference) below for more details. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `ssl_properties` - (Optional) Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your underlying source. See [SSL Properties](#ssl_properties-argument-reference) below for more details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_connection_properties`- (Optional) Use this parameter only when you want Amazon QuickSight to use a VPC connection when connecting to your underlying source. See [VPC Connection Properties](#vpc_connection_properties-argument-reference) below for more details. diff --git a/website/docs/r/quicksight_folder.html.markdown b/website/docs/r/quicksight_folder.html.markdown index f612a952c620..875e938b4bad 100644 --- a/website/docs/r/quicksight_folder.html.markdown +++ b/website/docs/r/quicksight_folder.html.markdown @@ -69,11 +69,11 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `folder_type` - (Optional) The type of folder. By default, it is `SHARED`. Valid values are: `SHARED`. * `parent_folder_arn` - (Optional) The Amazon Resource Name (ARN) for the parent folder. If not set, creates a root-level folder. * `permissions` - (Optional) A set of resource permissions on the folder. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### permissions diff --git a/website/docs/r/quicksight_folder_membership.html.markdown b/website/docs/r/quicksight_folder_membership.html.markdown index 6d9049427828..364759489305 100644 --- a/website/docs/r/quicksight_folder_membership.html.markdown +++ b/website/docs/r/quicksight_folder_membership.html.markdown @@ -32,8 +32,8 @@ The following arguments are required: The following arguments are optional: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. ## Attribute Reference diff --git a/website/docs/r/quicksight_group.html.markdown b/website/docs/r/quicksight_group.html.markdown index 4d8f9829f97a..a7e0f021842f 100644 --- a/website/docs/r/quicksight_group.html.markdown +++ b/website/docs/r/quicksight_group.html.markdown @@ -22,11 +22,11 @@ resource "aws_quicksight_group" "example" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `group_name` - (Required) A name for the group. -* `aws_account_id` - (Optional) The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `description` - (Optional) A description for the group. +* `group_name` - (Required) A name for the group. * `namespace` - (Optional) The namespace. Currently, you should set this to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference diff --git a/website/docs/r/quicksight_group_membership.html.markdown b/website/docs/r/quicksight_group_membership.html.markdown index 4cc1792af56c..b7d268befcc2 100644 --- a/website/docs/r/quicksight_group_membership.html.markdown +++ b/website/docs/r/quicksight_group_membership.html.markdown @@ -23,11 +23,11 @@ resource "aws_quicksight_group_membership" "example" { This resource supports the following arguments: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `group_name` - (Required) The name of the group in which the member will be added. * `member_name` - (Required) The name of the member to add to the group. -* `aws_account_id` - (Optional) The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account. -* `namespace` - (Required) The namespace that you want the user to be a part of. Defaults to `default`. +* `namespace` - (Optional) The namespace that you want the user to be a part of. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference diff --git a/website/docs/r/quicksight_iam_policy_assignment.html.markdown b/website/docs/r/quicksight_iam_policy_assignment.html.markdown index a41873c94cf0..e728117c5aef 100644 --- a/website/docs/r/quicksight_iam_policy_assignment.html.markdown +++ b/website/docs/r/quicksight_iam_policy_assignment.html.markdown @@ -34,11 +34,11 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `identities` - (Optional) Amazon QuickSight users, groups, or both to assign the policy to. See [`identities` block](#identities-block). * `namespace` - (Optional) Namespace that contains the assignment. Defaults to `default`. * `policy_arn` - (Optional) ARN of the IAM policy to apply to the Amazon QuickSight users and groups specified in this assignment. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `identities` block diff --git a/website/docs/r/quicksight_ingestion.html.markdown b/website/docs/r/quicksight_ingestion.html.markdown index b1bcaa1889c6..e0163853303d 100644 --- a/website/docs/r/quicksight_ingestion.html.markdown +++ b/website/docs/r/quicksight_ingestion.html.markdown @@ -32,8 +32,8 @@ The following arguments are required: The following arguments are optional: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. ## Attribute Reference diff --git a/website/docs/r/quicksight_ip_restriction.html.markdown b/website/docs/r/quicksight_ip_restriction.html.markdown new file mode 100644 index 000000000000..3948608d845c --- /dev/null +++ b/website/docs/r/quicksight_ip_restriction.html.markdown @@ -0,0 +1,61 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_ip_restriction" +description: |- + Manages the content and status of IP rules. +--- + +# Resource: aws_quicksight_ip_restriction + +Manages the content and status of IP rules. + +~> Deletion of this resource clears all IP restrictions from a QuickSight account. + +## Example Usage + +```terraform +resource "aws_quicksight_ip_restriction" "example" { + enabled = true + + ip_restriction_rule_map = { + "108.56.166.202/32" = "Allow self" + } + + vpc_id_restriction_rule_map = { + (aws_vpc.example.id) = "Main VPC" + } +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `enabled` - (Required) Whether IP rules are turned on. +* `ip_restriction_rule_map` - (Optional) Map of allowed IPv4 CIDR ranges and descriptions. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `vpc_endpoint_id_restriction_rule_map` - (Optional) Map of allowed VPC endpoint IDs and descriptions. +* `vpc_id_restriction_rule_map` - (Optional) Map of VPC IDs and descriptions. Traffic from all VPC endpoints that are present in the specified VPC is allowed. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight IP restriction using the AWS account ID. For example: + +```terraform +import { + to = aws_quicksight_ip_restriction.example + id = "012345678901" +} +``` + +Using `terraform import`, import QuickSight IP restriction using the AWS account ID. For example: + +```console +% terraform import aws_quicksight_ip_restriction.example "012345678901" +``` diff --git a/website/docs/r/quicksight_key_registration.html.markdown b/website/docs/r/quicksight_key_registration.html.markdown new file mode 100644 index 000000000000..ad835fb88fdb --- /dev/null +++ b/website/docs/r/quicksight_key_registration.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_key_registration" +description: |- + Registers customer managed keys in a Amazon QuickSight account. +--- + +# Resource: aws_quicksight_key_registration + +Registers customer managed keys in a Amazon QuickSight account. + +~> Deletion of this resource clears all CMK registrations from a QuickSight account. QuickSight then uses AWS owned keys to encrypt your resources. + +## Example Usage + +```terraform +resource "aws_quicksight_key_registration" "example" { + key_registration { + key_arn = aws_kms_key.example1.arn + } + + key_registration { + key_arn = aws_kms_key.example2.arn + default_key = true + } +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `key_registration` - (Required) Registered keys. See [key_registration](#key_registration). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### key_registration + +* `default_key` - (Optional) Whether the key is set as the default key for encryption and decryption use. +* `key_arn` - (Required) ARN of the AWS KMS key that is registered for encryption and decryption use. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight key registration using the AWS account ID. For example: + +```terraform +import { + to = aws_quicksight_key_registration.example + id = "012345678901" +} +``` + +Using `terraform import`, import QuickSight key registration using the AWS account ID. For example: + +```console +% terraform import aws_quicksight_key_registration.example "012345678901" +``` diff --git a/website/docs/r/quicksight_namespace.html.markdown b/website/docs/r/quicksight_namespace.html.markdown index 27b8e2fea11c..c2128d32efd7 100644 --- a/website/docs/r/quicksight_namespace.html.markdown +++ b/website/docs/r/quicksight_namespace.html.markdown @@ -28,9 +28,9 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `identity_store` - (Optional) User identity directory type. Defaults to `QUICKSIGHT`, the only current valid value. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference diff --git a/website/docs/r/quicksight_refresh_schedule.html.markdown b/website/docs/r/quicksight_refresh_schedule.html.markdown index 2b56486f3ccf..3dad1806e0ea 100644 --- a/website/docs/r/quicksight_refresh_schedule.html.markdown +++ b/website/docs/r/quicksight_refresh_schedule.html.markdown @@ -83,8 +83,8 @@ The following arguments are required: The following arguments are optional: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. ### schedule diff --git a/website/docs/r/quicksight_role_custom_permission.html.markdown b/website/docs/r/quicksight_role_custom_permission.html.markdown new file mode 100644 index 000000000000..c1d1514cbdff --- /dev/null +++ b/website/docs/r/quicksight_role_custom_permission.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_role_custom_permission" +description: |- + Manages the custom permissions that are associated with a role. +--- + +# Resource: aws_quicksight_role_custom_permission + +Manages the custom permissions that are associated with a role. + +## Example Usage + +```terraform +resource "aws_quicksight_role_custom_permission" "example" { + role = "READER" + custom_permissions_name = aws_quicksight_custom_permissions.example.custom_permissions_name +} +``` + +## Argument Reference + +The following arguments are required: + +* `custom_permissions_name` - (Required, Forces new resource) Custom permissions profile name. +* `role` - (Required, Forces new resource) Role. Valid values are `ADMIN`, `AUTHOR`, `READER`, `ADMIN_PRO`, `AUTHOR_PRO`, and `READER_PRO`. + +The following arguments are optional: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `namespace` - (Optional, Forces new resource) Namespace containing the role. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight role custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace` and `role`. For example: + +```terraform +import { + to = aws_quicksight_role_custom_permission.example + id = "012345678901,default,READER" +} +``` + +Using `terraform import`, import QuickSight role custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace`, and `role`. For example: + +```console +% terraform import aws_quicksight_role_custom_permission.example 012345678901,default,READER +``` diff --git a/website/docs/r/quicksight_role_membership.html.markdown b/website/docs/r/quicksight_role_membership.html.markdown index acf0cb23268e..3b03a95ed7c3 100644 --- a/website/docs/r/quicksight_role_membership.html.markdown +++ b/website/docs/r/quicksight_role_membership.html.markdown @@ -31,9 +31,9 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. Defaults to the account of the caller identity if not configured. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `namespace` - (Optional) Name of the namespace. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ## Attribute Reference diff --git a/website/docs/r/quicksight_template.html.markdown b/website/docs/r/quicksight_template.html.markdown index bbf9dafe862f..1ad3aa2ae421 100644 --- a/website/docs/r/quicksight_template.html.markdown +++ b/website/docs/r/quicksight_template.html.markdown @@ -98,10 +98,10 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `definition` - (Optional) A detailed template definition. Only one of `definition` or `source_entity` should be configured. See [definition](#definition). * `permissions` - (Optional) A set of resource permissions on the template. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_entity` - (Optional) The entity that you are using as a source when you create the template (analysis or template). Only one of `definition` or `source_entity` should be configured. See [source_entity](#source_entity). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. diff --git a/website/docs/r/quicksight_template_alias.html.markdown b/website/docs/r/quicksight_template_alias.html.markdown index 21141245a7c1..bf60a905e994 100644 --- a/website/docs/r/quicksight_template_alias.html.markdown +++ b/website/docs/r/quicksight_template_alias.html.markdown @@ -32,8 +32,8 @@ The following arguments are required: The following arguments are optional: +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. ## Attribute Reference diff --git a/website/docs/r/quicksight_theme.html.markdown b/website/docs/r/quicksight_theme.html.markdown index 75376f861e03..b8d374344bed 100644 --- a/website/docs/r/quicksight_theme.html.markdown +++ b/website/docs/r/quicksight_theme.html.markdown @@ -49,16 +49,16 @@ resource "aws_quicksight_theme" "example" { The following arguments are required: -* `theme_id` - (Required, Forces new resource) Identifier of the theme. * `base_theme_id` - (Required) The ID of the theme that a custom theme will inherit from. All themes inherit from one of the starting themes defined by Amazon QuickSight. For a list of the starting themes, use ListThemes or choose Themes from within an analysis. -* `name` - (Required) Display name of the theme. * `configuration` - (Required) The theme configuration, which contains the theme display properties. See [configuration](#configuration). +* `name` - (Required) Display name of the theme. +* `theme_id` - (Required, Forces new resource) Identifier of the theme. The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional, Forces new resource) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `permissions` - (Optional) A set of resource permissions on the theme. Maximum of 64 items. See [permissions](#permissions). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `version_description` - (Optional) A description of the current theme version being created/updated. diff --git a/website/docs/r/quicksight_user.html.markdown b/website/docs/r/quicksight_user.html.markdown index 50e63f087a99..566db96c8ab3 100644 --- a/website/docs/r/quicksight_user.html.markdown +++ b/website/docs/r/quicksight_user.html.markdown @@ -52,15 +52,15 @@ resource "aws_quicksight_user" "example" { The following arguments are required: * `email` - (Required) Email address of the user that you want to register. -* `identity_type` - (Required) Identity type that your Amazon QuickSight account uses to manage the identity of users. Valid values: `IAM`, `QUICKSIGHT`. -* `user_role` - (Required) Amazon QuickSight role for the user. Value values: `READER`, `AUTHOR`, `ADMIN`, `READER_PRO`, `AUTHOR_PRO`, `ADMIN_PRO`. +* `identity_type` - (Required) Identity type that your Amazon QuickSight account uses to manage the identity of users. Valid values: `IAM`, `QUICKSIGHT`, `IAM_IDENTITY_CENTER`. +* `user_role` - (Required) Amazon QuickSight role for the user. Valid values: `READER`, `AUTHOR`, `ADMIN`, `READER_PRO`, `AUTHOR_PRO`, `ADMIN_PRO`, `RESTRICTED_AUTHOR`, `RESTRICTED_READER`. The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) ID for the AWS account that the user is in. Use the ID for the AWS account that contains your Amazon QuickSight account. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `iam_arn` - (Optional) ARN of the IAM user or role that you are registering with Amazon QuickSight. Required only for users with an identity type of `IAM`. * `namespace` - (Optional) The Amazon Quicksight namespace to create the user in. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `session_name` - (Optional) Name of the IAM session to use when assuming roles that can embed QuickSight dashboards. Only valid for registering users using an assumed IAM role. Additionally, if registering multiple users using the same IAM role, each user needs to have a unique session name. * `user_name` - (Optional) Amazon QuickSight user name that you want to create for the user you are registering. Required only for users with an identity type of `QUICKSIGHT`. diff --git a/website/docs/r/quicksight_user_custom_permission.html.markdown b/website/docs/r/quicksight_user_custom_permission.html.markdown new file mode 100644 index 000000000000..0056be62211a --- /dev/null +++ b/website/docs/r/quicksight_user_custom_permission.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "QuickSight" +layout: "aws" +page_title: "AWS: aws_quicksight_user_custom_permission" +description: |- + Manages the custom permissions profile for a user. +--- + +# Resource: aws_quicksight_user_custom_permission + +Manages the custom permissions profile for a user. + +## Example Usage + +```terraform +resource "aws_quicksight_user_custom_permission" "example" { + user_name = aws_quicksight_user.example.user_name + custom_permissions_name = aws_quicksight_custom_permissions.example.custom_permissions_name +} +``` + +## Argument Reference + +The following arguments are required: + +* `custom_permissions_name` - (Required, Forces new resource) Custom permissions profile name. +* `user_name` - (Required, Forces new resource) Username of the user. + +The following arguments are optional: + +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. +* `namespace` - (Optional, Forces new resource) Namespace that the user belongs to. Defaults to `default`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import QuickSight user custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace` and `user_name`. For example: + +```terraform +import { + to = aws_quicksight_user_custom_permission.example + id = "012345678901,default,user1" +} +``` + +Using `terraform import`, import QuickSight user custom permissions using a comma-delimited string combining the `aws_account_id`, `namespace`, and `user_name`. For example: + +```console +% terraform import aws_quicksight_user_custom_permission.example 012345678901,default,user1 +``` diff --git a/website/docs/r/quicksight_vpc_connection.html.markdown b/website/docs/r/quicksight_vpc_connection.html.markdown index c6a50639d3f5..0e7d8005f0c2 100644 --- a/website/docs/r/quicksight_vpc_connection.html.markdown +++ b/website/docs/r/quicksight_vpc_connection.html.markdown @@ -74,9 +74,9 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `aws_account_id` - (Optional) AWS account ID. +* `aws_account_id` - (Optional, Forces new resource) AWS account ID. Defaults to automatically determined account ID of the Terraform AWS provider. * `dns_resolvers` - (Optional) A list of IP addresses of DNS resolver endpoints for the VPC connection. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index b22316877068..4e97073660e3 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -230,7 +230,7 @@ This resource supports the following arguments: * `enable_global_write_forwarding` - (Optional) Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an [`aws_rds_global_cluster`](/docs/providers/aws/r/rds_global_cluster.html)'s primary cluster. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-write-forwarding.html) for more information. * `enable_http_endpoint` - (Optional) Enable HTTP endpoint (data API). Only valid for some combinations of `engine_mode`, `engine` and `engine_version` and only available in some regions. See the [Region and version availability](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html#data-api.regions) section of the documentation. This option also does not work with any of these options specified: `snapshot_identifier`, `replication_source_identifier`, `s3_import`. * `enable_local_write_forwarding` - (Optional) Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-write-forwarding.html) for more information. **NOTE:** Local write forwarding requires Aurora MySQL version 3.04 or higher. -* `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `slowquery`, `iam-db-auth-error`, `postgresql` (PostgreSQL). +* `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `iam-db-auth-error`, `instance`, `postgresql` (PostgreSQL), `slowquery`. * `engine_mode` - (Optional) Database engine mode. Valid values: `global` (only valid for Aurora MySQL 1.21 and earlier), `parallelquery`, `provisioned`, `serverless`. Defaults to: `provisioned`. Specify an empty value (`""`) for no engine mode. See the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) for limitations when using `serverless`. * `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting is valid for cluster types Aurora DB clusters and Multi-AZ DB clusters. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engine_version` - (Optional) Database engine version. Updating this argument results in an outage. See the [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) and [Aurora Postgres](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.html) documentation for your configured engine to determine this value, or by running `aws rds describe-db-engine-versions`. For example with Aurora MySQL 2, a potential value for this argument is `5.7.mysql_aurora.2.03.2`. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute `engine_version_actual`, , see [Attribute Reference](#attribute-reference) below. diff --git a/website/docs/r/rds_cluster_role_association.html.markdown b/website/docs/r/rds_cluster_role_association.html.markdown index 08703daa707e..9dc9e8742891 100644 --- a/website/docs/r/rds_cluster_role_association.html.markdown +++ b/website/docs/r/rds_cluster_role_association.html.markdown @@ -29,7 +29,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `db_cluster_identifier` - (Required) DB Cluster Identifier to associate with the IAM Role. -* `feature_name` - (Required) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). +* `feature_name` - (Optional) Name of the feature for association. This can be found in the AWS documentation relevant to the integration or a full list is available in the `SupportedFeatureNames` list returned by [AWS CLI rds describe-db-engine-versions](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html). * `role_arn` - (Required) Amazon Resource Name (ARN) of the IAM Role to associate with the DB Cluster. ## Attribute Reference diff --git a/website/docs/r/rds_global_cluster.html.markdown b/website/docs/r/rds_global_cluster.html.markdown index c11abc029976..15a2c81fec92 100644 --- a/website/docs/r/rds_global_cluster.html.markdown +++ b/website/docs/r/rds_global_cluster.html.markdown @@ -211,20 +211,25 @@ resource "aws_rds_cluster_instance" "primary" { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `global_cluster_identifier` - (Required, Forces new resources) Global cluster identifier. + +The following arguments are optional: + * `database_name` - (Optional, Forces new resources) Name for an automatically created database on cluster creation. Terraform will only perform drift detection if a configuration value is provided. * `deletion_protection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Valid values: `aurora`, `aurora-mysql`, `aurora-postgresql`. Defaults to `aurora`. Conflicts with `source_db_cluster_identifier`. * `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting applies only to Aurora PostgreSQL-based global databases. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engine_version` - (Optional) Engine version of the Aurora global database. The `engine`, `engine_version`, and `instance_class` (on the `aws_rds_cluster_instance`) must together support global databases. See [Using Amazon Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) for more information. By upgrading the engine version, Terraform will upgrade cluster members. **NOTE:** To avoid an `inconsistent final plan` error while upgrading, use the `lifecycle` `ignore_changes` for `engine_version` meta argument on the associated `aws_rds_cluster` resource as shown above in [Upgrading Engine Versions](#upgrading-engine-versions) example. * `force_destroy` - (Optional) Enable to remove DB Cluster members from Global Cluster on destroy. Required with `source_db_cluster_identifier`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `source_db_cluster_identifier` - (Optional) Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value. **NOTE:** After initial creation, this argument can be removed and replaced with `engine` and `engine_version`. This allows upgrading the engine version of the Global Cluster. * `storage_encrypted` - (Optional, Forces new resources) Specifies whether the DB cluster is encrypted. The default is `false` unless `source_db_cluster_identifier` is specified and encrypted. Terraform will only perform drift detection if a configuration value is provided. * `tags` - (Optional) A map of tags to assign to the DB cluster. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +~> When both `source_db_cluster_identifier` and `engine`/`engine_version` are set, all engine related values will be ignored during creation. The global cluster will inherit the `engine` and `engine_version` values from the source cluster. After the first apply, any differences between the inherited and configured values will trigger an in-place update. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/rds_instance_state.html.markdown b/website/docs/r/rds_instance_state.html.markdown index 3ecc6cbefece..56de232f09b7 100644 --- a/website/docs/r/rds_instance_state.html.markdown +++ b/website/docs/r/rds_instance_state.html.markdown @@ -33,9 +33,7 @@ This resource supports the following arguments: ## Attribute Reference -This resource exports the following attributes in addition to the arguments above: - -* `identifier` - DB Instance Identifier +This resource exports no additional attributes. ## Timeouts @@ -46,7 +44,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) RDS Instance State using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) RDS Instance State using the `identifier`. For example: ```terraform import { @@ -55,7 +53,7 @@ import { } ``` -Using `terraform import`, import RDS (Relational Database) RDS Instance State using the `example_id_arg`. For example: +Using `terraform import`, import RDS (Relational Database) RDS Instance State using the `identifier`. For example: ```console % terraform import aws_rds_instance_state.example rds_instance_state-id-12345678 diff --git a/website/docs/r/rds_integration.html.markdown b/website/docs/r/rds_integration.html.markdown index ed53dcfb87d7..d9c47107aa30 100644 --- a/website/docs/r/rds_integration.html.markdown +++ b/website/docs/r/rds_integration.html.markdown @@ -132,6 +132,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_rds_integration.example + identity = { + "arn" = "arn:aws:rds:us-east-1:123456789012:integration:12345678-1234-1234-1234-123456789012" + } +} + +resource "aws_rds_integration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the RDS integration. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RDS (Relational Database) Integration using the `arn`. For example: ```terraform diff --git a/website/docs/r/rekognition_collection.html.markdown b/website/docs/r/rekognition_collection.html.markdown index cf8859799a02..d617aad1e126 100644 --- a/website/docs/r/rekognition_collection.html.markdown +++ b/website/docs/r/rekognition_collection.html.markdown @@ -49,7 +49,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Collection using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Collection using the `collection_id`. For example: ```terraform import { @@ -58,7 +58,7 @@ import { } ``` -Using `terraform import`, import Rekognition Collection using the `example_id_arg`. For example: +Using `terraform import`, import Rekognition Collection using the `collection_id`. For example: ```console % terraform import aws_rekognition_collection.example collection-id-12345678 diff --git a/website/docs/r/rekognition_project.html.markdown b/website/docs/r/rekognition_project.html.markdown index ce0392432bcf..4b4deaa4d211 100644 --- a/website/docs/r/rekognition_project.html.markdown +++ b/website/docs/r/rekognition_project.html.markdown @@ -12,6 +12,8 @@ Terraform resource for managing an AWS Rekognition Project. ## Example Usage +### Content Moderation + ```terraform resource "aws_rekognition_project" "example" { name = "example-project" @@ -20,6 +22,16 @@ resource "aws_rekognition_project" "example" { } ``` +### Custom Labels + +```terraform +resource "aws_rekognition_project" "example" { + # Do not set auto_update when feature is "CUSTOM_LABELS" + name = "example-project" + feature = "CUSTOM_LABELS" +} +``` + ## Argument Reference The following arguments are required: @@ -29,7 +41,7 @@ The following arguments are required: The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `auto_update` - (Optional) Specify if automatic retraining should occur. Valid values are `ENABLED` or `DISABLED`. Defaults to `DISABLED`. +* `auto_update` - (Optional) Specify if automatic retraining should occur. Valid values are `ENABLED` or `DISABLED`. Must be set when `feature` is `CONTENT_MODERATION`, but do not set otherwise. * `feature` - (Optional) Specify the feature being customized. Valid values are `CONTENT_MODERATION` or `CUSTOM_LABELS`. Defaults to `CUSTOM_LABELS`. * `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -49,7 +61,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Project using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Rekognition Project using the `name`. For example: ```terraform import { diff --git a/website/docs/r/resourceexplorer2_index.html.markdown b/website/docs/r/resourceexplorer2_index.html.markdown index 6b9f1d1f5df7..9bc7b191f4dd 100644 --- a/website/docs/r/resourceexplorer2_index.html.markdown +++ b/website/docs/r/resourceexplorer2_index.html.markdown @@ -43,6 +43,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_resourceexplorer2_index.example + identity = { + "arn" = "arn:aws:resource-explorer-2:us-east-1:123456789012:index/example-index-id" + } +} + +resource "aws_resourceexplorer2_index" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Resource Explorer index. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Resource Explorer indexes using the `arn`. For example: ```terraform diff --git a/website/docs/r/resourceexplorer2_view.html.markdown b/website/docs/r/resourceexplorer2_view.html.markdown index 33b2ea1d9210..8aafe508b249 100644 --- a/website/docs/r/resourceexplorer2_view.html.markdown +++ b/website/docs/r/resourceexplorer2_view.html.markdown @@ -65,6 +65,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_resourceexplorer2_view.example + identity = { + "arn" = "arn:aws:resource-explorer-2:us-east-1:123456789012:view/example-view/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_resourceexplorer2_view" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Resource Explorer view. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Resource Explorer views using the `arn`. For example: ```terraform diff --git a/website/docs/r/route.html.markdown b/website/docs/r/route.html.markdown index 3c9a3197f876..1c91388d2e14 100644 --- a/website/docs/r/route.html.markdown +++ b/website/docs/r/route.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a resource to create a routing table entry (a route) in a VPC routing table. -~> **NOTE on Route Tables and Routes:** Terraform currently provides both a standalone Route resource and a [Route Table](route_table.html) resource with routes defined in-line. At this time you cannot use a Route Table with in-line routes in conjunction with any Route resources. Doing so will cause a conflict of rule settings and will overwrite rules. +~> **NOTE on Route Tables and Routes:** Terraform currently provides both a standalone Route resource ([`aws_route`](route.html)) and a Route Table resource with routes defined in-line ([`aws_route_table`](route_table.html)). At this time you cannot use a [`aws_route_table`](route_table.html) inline `route` blocks in conjunction with any [`aws_route`](route.html) resources. Doing so will cause a conflict of rule settings and will overwrite rules. ~> **NOTE on `gateway_id` attribute:** The AWS API is very forgiving with the resource ID passed in the `gateway_id` attribute. For example an `aws_route` resource can be created with an [`aws_nat_gateway`](nat_gateway.html) or [`aws_egress_only_internet_gateway`](egress_only_internet_gateway.html) ID specified for the `gateway_id` attribute. Specifying anything other than an [`aws_internet_gateway`](internet_gateway.html) or [`aws_vpn_gateway`](vpn_gateway.html) ID will lead to Terraform reporting a permanent diff between your configuration and recorded state, as the AWS API returns the more-specific attribute. If you are experiencing constant diffs with an `aws_route` resource, the first thing to check is that the correct attribute is being specified. @@ -95,6 +95,46 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route.example + identity = { + route_table_id = "rtb-656C65616E6F72" + destination_cidr_block = "10.42.0.0/16" + + ### OR by IPv6 CIDR block + # destination_ipv6_cidr_block = "10.42.0.0/16" + + ### OR by prefix list ID + # destination_prefix_list_id = "pl-0570a1d2d725c16be" + } +} + +resource "aws_route" "example" { + route_table_id = "rtb-656C65616E6F72" + destination_cidr_block = "10.42.0.0/16" + vpc_peering_connection_id = "pcx-45ff3dc1" +} +``` + +### Identity Schema + +#### Required + +* `route_table_id` - (String) ID of the route table. + +#### Optional + +~> Exactly one of of `destination_cidr_block`, `destination_ipv6_cidr_block`, or `destination_prefix_list_id` is required. + +* `account_id` (String) AWS Account where this resource is managed. +* `destination_cidr_block` - (String) Destination IPv4 CIDR block. +* `destination_ipv6_cidr_block` - (String) Destination IPv6 CIDR block. +* `destination_prefix_list_id` - (String) Destination IPv6 CIDR block. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import individual routes using `ROUTETABLEID_DESTINATION`. Import [local routes](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html#RouteTables) using the VPC's IPv4 or IPv6 CIDR blocks. For example: Import a route in route table `rtb-656C65616E6F72` with an IPv4 destination CIDR of `10.42.0.0/16`: diff --git a/website/docs/r/route53_record.html.markdown b/website/docs/r/route53_record.html.markdown index e024f6edd11f..d596098490d0 100644 --- a/website/docs/r/route53_record.html.markdown +++ b/website/docs/r/route53_record.html.markdown @@ -249,13 +249,43 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_record.example + identity = { + zone_id = "Z4KAPRWWNC7JR" + name = "dev.example.com" + type = "NS" + } +} + +resource "aws_route53_record" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `zone_id` (String) Hosted zone ID for the record. +* `name` (String) Name of the record. +* `type` (String) Record type. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `set_identifier` (String) Set identifier for the record. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Records using the ID of the record, record name, record type, and set identifier. For example: Using the ID of the record, which is the zone identifier, record name, and record type, separated by underscores (`_`): ```terraform import { - to = aws_route53_record.myrecord + to = aws_route53_record.example id = "Z4KAPRWWNC7JR_dev.example.com_NS" } ``` @@ -264,7 +294,7 @@ If the record also contains a set identifier, append it: ```terraform import { - to = aws_route53_record.myrecord + to = aws_route53_record.example id = "Z4KAPRWWNC7JR_dev.example.com_NS_dev" } ``` @@ -273,7 +303,7 @@ If the record name is the empty string, it can be omitted: ```terraform import { - to = aws_route53_record.myrecord + to = aws_route53_record.example id = "Z4KAPRWWNC7JR__NS" } ``` @@ -283,11 +313,11 @@ import { Using the ID of the record, which is the zone identifier, record name, and record type, separated by underscores (`_`): ```console -% terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev_NS +% terraform import aws_route53_record.example Z4KAPRWWNC7JR_dev_NS ``` If the record also contains a set identifier, append it: ```console -% terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev_NS_dev +% terraform import aws_route53_record.example Z4KAPRWWNC7JR_dev_NS_dev ``` diff --git a/website/docs/r/route53_resolver_endpoint.html.markdown b/website/docs/r/route53_resolver_endpoint.html.markdown index 751add9f6f38..df302b87f679 100644 --- a/website/docs/r/route53_resolver_endpoint.html.markdown +++ b/website/docs/r/route53_resolver_endpoint.html.markdown @@ -46,8 +46,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `direction` - (Required) Direction of DNS queries to or from the Route 53 Resolver endpoint. -Valid values are `INBOUND` (resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC) -or `OUTBOUND` (resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC). +Valid values are `INBOUND` (resolver forwards DNS queries to the DNS service for a VPC from your network or another VPC), `OUTBOUND` (resolver forwards DNS queries from the DNS service for a VPC to your network or another VPC) or `INBOUND_DELEGATION` (resolver delegates queries to Route 53 private hosted zones from your network). * `ip_address` - (Required) Subnets and IP addresses in your VPC that you want DNS queries to pass through on the way from your VPCs to your network (for outbound endpoints) or on the way from your network to your VPCs (for inbound endpoints). Described below. * `name` - (Optional) Friendly name of the Route 53 Resolver endpoint. diff --git a/website/docs/r/route53_resolver_rule.html.markdown b/website/docs/r/route53_resolver_rule.html.markdown index 1cec8dc1ebeb..807517671737 100644 --- a/website/docs/r/route53_resolver_rule.html.markdown +++ b/website/docs/r/route53_resolver_rule.html.markdown @@ -93,11 +93,37 @@ Values are `NOT_SHARED`, `SHARED_BY_ME` or `SHARED_WITH_ME` ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_resolver_rule.example + identity = { + id = "rslvr-rr-0123456789abcdef0" + } +} + +resource "aws_route53_resolver_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the Route53 Resolver rule. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Resolver rules using the `id`. For example: ```terraform import { - to = aws_route53_resolver_rule.sys + to = aws_route53_resolver_rule.example id = "rslvr-rr-0123456789abcdef0" } ``` @@ -105,5 +131,5 @@ import { Using `terraform import`, import Route53 Resolver rules using the `id`. For example: ```console -% terraform import aws_route53_resolver_rule.sys rslvr-rr-0123456789abcdef0 +% terraform import aws_route53_resolver_rule.example rslvr-rr-0123456789abcdef0 ``` diff --git a/website/docs/r/route53_resolver_rule_association.html.markdown b/website/docs/r/route53_resolver_rule_association.html.markdown index 70849bc6bc6a..6301bd4ab8f7 100644 --- a/website/docs/r/route53_resolver_rule_association.html.markdown +++ b/website/docs/r/route53_resolver_rule_association.html.markdown @@ -36,6 +36,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route53_resolver_rule_association.example + identity = { + id = "rslvr-rrassoc-97242eaf88example" + } +} + +resource "aws_route53_resolver_rule_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the Route53 Resolver rule association. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route53 Resolver rule associations using the `id`. For example: ```terraform diff --git a/website/docs/r/route53_zone.html.markdown b/website/docs/r/route53_zone.html.markdown index 5025b2133fd4..00f8ed31c58d 100644 --- a/website/docs/r/route53_zone.html.markdown +++ b/website/docs/r/route53_zone.html.markdown @@ -55,11 +55,28 @@ resource "aws_route53_record" "dev-ns" { ~> **NOTE:** Private zones require at least one VPC association at all times. ```terraform +resource "aws_vpc" "primary" { + cidr_block = "10.6.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + +resource "aws_vpc" "secondary" { + cidr_block = "10.7.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true +} + resource "aws_route53_zone" "private" { name = "example.com" vpc { - vpc_id = aws_vpc.example.id + vpc_id = aws_vpc.primary.id + } + + # Add multiple `vpc` blocks to associate additional VPCs + vpc { + vpc_id = aws_vpc.secondary.id } } ``` diff --git a/website/docs/r/route53profiles_association.html.markdown b/website/docs/r/route53profiles_association.html.markdown index 038a1def3c90..ed8fb921b297 100644 --- a/website/docs/r/route53profiles_association.html.markdown +++ b/website/docs/r/route53profiles_association.html.markdown @@ -72,7 +72,7 @@ import { } ``` -Using `terraform import`, import Route 53 Profiles Association using the `example_id_arg`. For example: +Using `terraform import`, import Route 53 Profiles Association using the `id`. For example: ```console % terraform import aws_route53profiles_association.example rpa-id-12345678 diff --git a/website/docs/r/route53profiles_profile.html.markdown b/website/docs/r/route53profiles_profile.html.markdown index 1eb323762bdc..aa5a067a226b 100644 --- a/website/docs/r/route53profiles_profile.html.markdown +++ b/website/docs/r/route53profiles_profile.html.markdown @@ -54,7 +54,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route 53 Profiles Profile using the `example_id_arg`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route 53 Profiles Profile using the `id`. For example: ```terraform import { @@ -63,7 +63,7 @@ import { } ``` -Using `terraform import`, import Route 53 Profiles Profile using the `example`. For example: +Using `terraform import`, import Route 53 Profiles Profile using the `id`. For example: ```console % terraform import aws_route53profiles_profile.example rp-12345678 diff --git a/website/docs/r/route53profiles_resource_association.html.markdown b/website/docs/r/route53profiles_resource_association.html.markdown index 1e2b01996900..995ba99fb3d3 100644 --- a/website/docs/r/route53profiles_resource_association.html.markdown +++ b/website/docs/r/route53profiles_resource_association.html.markdown @@ -77,7 +77,7 @@ import { } ``` -Using `terraform import`, import Route 53 Profiles Resource Association using the `example_id_arg`. For example: +Using `terraform import`, import Route 53 Profiles Resource Association using the `id`. For example: ```console % terraform import aws_route53profiles_resource_association.example rpa-id-12345678 diff --git a/website/docs/r/route53recoverycontrolconfig_cluster.html.markdown b/website/docs/r/route53recoverycontrolconfig_cluster.html.markdown index 6890776010c1..b6259c662d6a 100644 --- a/website/docs/r/route53recoverycontrolconfig_cluster.html.markdown +++ b/website/docs/r/route53recoverycontrolconfig_cluster.html.markdown @@ -20,9 +20,14 @@ resource "aws_route53recoverycontrolconfig_cluster" "example" { ## Argument Reference -The following arguments are required: +This resource supports the following arguments: * `name` - (Required) Unique name describing the cluster. +* `network_type` - (Optional) Network type of cluster. Valid values are `IPV4` and `DUALSTACK`. Defaults to `IPV4`. + +The following arguments are optional: + +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -31,6 +36,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the cluster * `cluster_endpoints` - List of 5 endpoints in 5 regions that can be used to talk to the cluster. See below. * `status` - Status of cluster. `PENDING` when it is being created, `PENDING_DELETION` when it is being deleted and `DEPLOYED` otherwise. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ### cluster_endpoints diff --git a/website/docs/r/route53recoverycontrolconfig_control_panel.html.markdown b/website/docs/r/route53recoverycontrolconfig_control_panel.html.markdown index c4f7b7f4a35c..acb17354d133 100644 --- a/website/docs/r/route53recoverycontrolconfig_control_panel.html.markdown +++ b/website/docs/r/route53recoverycontrolconfig_control_panel.html.markdown @@ -26,6 +26,10 @@ The following arguments are required: * `cluster_arn` - (Required) ARN of the cluster in which this control panel will reside. * `name` - (Required) Name describing the control panel. +The following arguments are optional: + +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -34,6 +38,7 @@ This resource exports the following attributes in addition to the arguments abov * `default_control_panel` - Whether a control panel is default. * `routing_control_count` - Number routing controls in a control panel. * `status` - Status of control panel: `PENDING` when it is being created/updated, `PENDING_DELETION` when it is being deleted, and `DEPLOYED` otherwise. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import diff --git a/website/docs/r/route53recoverycontrolconfig_safety_rule.html.markdown b/website/docs/r/route53recoverycontrolconfig_safety_rule.html.markdown index 0ce22e4340d3..0542e709a980 100644 --- a/website/docs/r/route53recoverycontrolconfig_safety_rule.html.markdown +++ b/website/docs/r/route53recoverycontrolconfig_safety_rule.html.markdown @@ -57,6 +57,7 @@ The following arguments are optional: * `asserted_controls` - (Optional) Routing controls that are part of transactions that are evaluated to determine if a request to change a routing control state is allowed. * `gating_controls` - (Optional) Gating controls for the new gating rule. That is, routing controls that are evaluated by the rule configuration that you specify. * `target_controls` - (Optional) Routing controls that can only be set or unset if the specified `rule_config` evaluates to true for the specified `gating_controls`. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### rule_config @@ -70,6 +71,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - ARN of the safety rule. * `status` - Status of the safety rule. `PENDING` when it is being created/updated, `PENDING_DELETION` when it is being deleted, and `DEPLOYED` otherwise. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import diff --git a/website/docs/r/route_table.html.markdown b/website/docs/r/route_table.html.markdown index 1e1db59ae66b..70cc25c8e2fd 100644 --- a/website/docs/r/route_table.html.markdown +++ b/website/docs/r/route_table.html.markdown @@ -10,12 +10,6 @@ description: |- Provides a resource to create a VPC routing table. -~> **NOTE on Route Tables and Routes:** Terraform currently -provides both a standalone [Route resource](route.html) and a Route Table resource with routes -defined in-line. At this time you cannot use a Route Table with in-line routes -in conjunction with any Route resources. Doing so will cause -a conflict of rule settings and will overwrite rules. - ~> **NOTE on `gateway_id` and `nat_gateway_id`:** The AWS API is very forgiving with these two attributes and the `aws_route_table` resource can be created with a NAT ID specified as a Gateway ID attribute. This _will_ lead to a permanent diff between your configuration and statefile, as the API returns the correct @@ -125,6 +119,9 @@ This resource supports the following arguments: * `vpc_id` - (Required) The VPC ID. * `route` - (Optional) A list of route objects. Their keys are documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). This means that omitting this argument is interpreted as ignoring any existing routes. To remove all managed routes an empty list should be specified. See the example above. + +~> **NOTE on Route Tables and Routes:** Terraform currently provides both a standalone Route resource ([`aws_route`](route.html)) and a Route Table resource with routes defined in-line ([`aws_route_table`](route_table.html)). At this time you cannot use a [`aws_route_table`](route_table.html) inline `route` blocks in conjunction with any [`aws_route`](route.html) resources. Doing so will cause a conflict of rule settings and will overwrite rules. + * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `propagating_vgws` - (Optional) A list of virtual gateways for propagation. @@ -175,6 +172,32 @@ attribute once the route resource is created. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_route_table.example + identity = { + id = "rtb-4e616f6d69" + } +} + +resource "aws_route_table" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the routing table. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Route Tables using the route table `id`. For example: ```terraform diff --git a/website/docs/r/s3_access_point.html.markdown b/website/docs/r/s3_access_point.html.markdown index f64bfac08817..bd8324242849 100644 --- a/website/docs/r/s3_access_point.html.markdown +++ b/website/docs/r/s3_access_point.html.markdown @@ -82,18 +82,18 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `account_id` - (Optional) AWS account ID for the owner of the bucket for which you want to create an access point. Defaults to automatically determined account ID of the Terraform AWS provider. * `bucket_account_id` - (Optional) AWS account ID associated with the S3 bucket associated with this access point. * `policy` - (Optional) Valid JSON document that specifies the policy that you want to apply to this access point. Removing `policy` from your configuration or setting `policy` to null or an empty string (i.e., `policy = ""`) _will not_ delete the policy since it could have been set by `aws_s3control_access_point_policy`. To remove the `policy`, set it to `"{}"` (an empty JSON document). * `public_access_block_configuration` - (Optional) Configuration block to manage the `PublicAccessBlock` configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Detailed below. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_configuration` - (Optional) Configuration block to restrict access to this access point to requests from the specified Virtual Private Cloud (VPC). Required for S3 on Outposts. Detailed below. ### public_access_block_configuration Configuration Block The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `block_public_acls` - (Optional) Whether Amazon S3 should block public ACLs for buckets in this account. Defaults to `true`. Enabling this setting does not affect existing policies or ACLs. When set to `true` causes the following behavior: * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public. * PUT Object calls fail if the request includes a public ACL. @@ -123,6 +123,7 @@ Note: S3 access points only support secure access by HTTPS. HTTP isn't supported * `has_public_access_policy` - Indicates whether this access point currently has a policy that allows public access. * `id` - For Access Point of an AWS Partition S3 Bucket, the AWS account ID and access point name separated by a colon (`:`). For S3 on Outposts Bucket, the ARN of the Access Point. * `network_origin` - Indicates whether this access point allows access from the public Internet. Values are `VPC` (the access point doesn't allow access from the public Internet) and `Internet` (the access point allows access from the public Internet, subject to the access point and bucket access policies). +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 42159b1e6a97..e0bbca18745d 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -325,11 +325,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) Name of the S3 bucket. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket using the `bucket`. For example: ```terraform import { - to = aws_s3_bucket.bucket + to = aws_s3_bucket.example id = "bucket-name" } ``` @@ -337,5 +363,5 @@ import { Using `terraform import`, import S3 bucket using the `bucket`. For example: ```console -% terraform import aws_s3_bucket.bucket bucket-name +% terraform import aws_s3_bucket.example bucket-name ``` diff --git a/website/docs/r/s3_bucket_acl.html.markdown b/website/docs/r/s3_bucket_acl.html.markdown index 94d0dd827798..2f041a0d8eec 100644 --- a/website/docs/r/s3_bucket_acl.html.markdown +++ b/website/docs/r/s3_bucket_acl.html.markdown @@ -167,6 +167,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_acl.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_acl" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `acl` (String) Canned ACL to apply to the bucket. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket ACL using `bucket`, `expected_bucket_owner`, and/or `acl`, depending on your situation. For example: If the owner (account ID) of the source bucket is the _same_ account used to configure the Terraform AWS Provider, and the source bucket is **not configured** with a diff --git a/website/docs/r/s3_bucket_cors_configuration.html.markdown b/website/docs/r/s3_bucket_cors_configuration.html.markdown index 065b37386301..835643c12271 100644 --- a/website/docs/r/s3_bucket_cors_configuration.html.markdown +++ b/website/docs/r/s3_bucket_cors_configuration.html.markdown @@ -67,6 +67,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_cors_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_cors_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket CORS configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: diff --git a/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown b/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown index 49ff6f2af3db..55b1c46f650d 100644 --- a/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown +++ b/website/docs/r/s3_bucket_lifecycle_configuration.html.markdown @@ -28,10 +28,9 @@ See the Amazon S3 User Guide on [setting lifecycle configuration on a bucket](ht ### With neither a filter nor prefix specified -The Lifecycle rule applies to a subset of objects based on the key name prefix (`""`). +When you don't specify a filter or prefix, the lifecycle rule applies to all objects in the bucket. This has the same effect as setting an empty `filter` element. -This configuration is intended to replicate the default behavior of the `lifecycle_rule` -parameter in the Terraform AWS Provider `aws_s3_bucket` resource prior to `v4.0`. +This configuration maintains compatibility with the default behavior of the `lifecycle_rule` parameter from the `aws_s3_bucket` resource in versions prior to v4.0 of the Terraform AWS Provider. ```terraform resource "aws_s3_bucket_lifecycle_configuration" "example" { @@ -384,16 +383,12 @@ The `rule` configuration block supports the following arguments: * `expiration` - (Optional) Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. [See below](#expiration). * `filter` - (Optional) Configuration block used to identify objects that a Lifecycle Rule applies to. [See below](#filter). - If not specified, the `rule` will default to using `prefix`. - One of `filter` or `prefix` should be specified. * `id` - (Required) Unique identifier for the rule. The value cannot be longer than 255 characters. * `noncurrent_version_expiration` - (Optional) Configuration block that specifies when noncurrent object versions expire. [See below](#noncurrent_version_expiration). * `noncurrent_version_transition` - (Optional) Set of configuration blocks that specify the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. [See below](#noncurrent_version_transition). * `prefix` - (Optional) **DEPRECATED** Use `filter` instead. This has been deprecated by Amazon S3. Prefix identifying one or more objects to which the rule applies. - Defaults to an empty string (`""`) if `filter` is not specified. - One of `prefix` or `filter` should be specified. * `status` - (Required) Whether the rule is currently being applied. Valid values: `Enabled` or `Disabled`. * `transition` - (Optional) Set of configuration blocks that specify when an Amazon S3 object transitions to a specified storage class. [See below](#transition). diff --git a/website/docs/r/s3_bucket_logging.html.markdown b/website/docs/r/s3_bucket_logging.html.markdown index 241dd10166d2..f544bbbedae4 100644 --- a/website/docs/r/s3_bucket_logging.html.markdown +++ b/website/docs/r/s3_bucket_logging.html.markdown @@ -18,6 +18,57 @@ to decide which method meets your requirements. ## Example Usage +### Grant permission by using bucket policy + +```terraform +data "aws_caller_identity" "current" {} + +resource "aws_s3_bucket" "logging" { + bucket = "access-logging-bucket" +} + +data "aws_iam_policy_document" "logging_bucket_policy" { + statement { + principals { + identifiers = ["logging.s3.amazonaws.com"] + type = "Service" + } + actions = ["s3:PutObject"] + resources = ["${aws_s3_bucket.logging.arn}/*"] + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } +} + +resource "aws_s3_bucket_policy" "logging" { + bucket = aws_s3_bucket.logging.bucket + policy = data.aws_iam_policy_document.logging_bucket_policy.json +} + +resource "aws_s3_bucket" "example" { + bucket = "example-bucket" +} + +resource "aws_s3_bucket_logging" "example" { + bucket = aws_s3_bucket.example.bucket + + target_bucket = aws_s3_bucket.logging.bucket + target_prefix = "log/" + target_object_key_format { + partitioned_prefix { + partition_date_source = "EventTime" + } + } +} +``` + +### Grant permission by using bucket ACL + +The [AWS Documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html) does not recommend using the ACL. + ```terraform resource "aws_s3_bucket" "example" { bucket = "my-tf-example-bucket" @@ -77,8 +128,8 @@ The `grantee` configuration block supports the following arguments: The `target_object_key_format` configuration block supports the following arguments: -* `partitioned_prefix` - (Optional) Partitioned S3 key for log objects. [See below](#partitioned_prefix). -* `simple_prefix` - (Optional) Use the simple format for S3 keys for log objects. To use, set `simple_prefix {}`. +* `partitioned_prefix` - (Optional) Partitioned S3 key for log objects, in the form `[target_prefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]`. Conflicts with `simple_prefix`. [See below](#partitioned_prefix). +* `simple_prefix` - (Optional) Use the simple format for S3 keys for log objects, in the form `[target_prefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]`. To use, set `simple_prefix {}`. Conflicts with `partitioned_prefix`. ### partitioned_prefix @@ -94,6 +145,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_logging.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_logging" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket logging using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: diff --git a/website/docs/r/s3_bucket_metadata_configuration.html.markdown b/website/docs/r/s3_bucket_metadata_configuration.html.markdown new file mode 100644 index 000000000000..b9ef2ba3673c --- /dev/null +++ b/website/docs/r/s3_bucket_metadata_configuration.html.markdown @@ -0,0 +1,135 @@ +--- +subcategory: "S3 (Simple Storage)" +layout: "aws" +page_title: "AWS: aws_s3_bucket_metadata_configuration" +description: |- + Manages Amazon S3 Metadata for a bucket. +--- + +# Resource: aws_s3_bucket_metadata_configuration + +Manages Amazon S3 Metadata for a bucket. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3_bucket_metadata_configuration" "example" { + bucket = aws_s3_bucket.example.bucket + + metadata_configuration { + inventory_table_configuration { + configuration_state = "ENABLED" + } + + journal_table_configuration { + record_expiration { + days = 7 + expiration = "ENABLED" + } + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `bucket` - (Required) General purpose bucket that you want to create the metadata configuration for. +* `metadata_configuration` - (Required) Metadata configuration. See [`metadata_configuration` Block](#metadata_configuration-block) for details. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `metadata_configuration` Block + +The `metadata_configuration` configuration block supports the following arguments: + +* `inventory_table_configuration` - (Required) Inventory table configuration. See [`inventory_table_configuration` Block](#inventory_table_configuration-block) for details. +* `journal_table_configuration` - (Required) Journal table configuration. See [`journal_table_configuration` Block](#journal_table_configuration-block) for details. + +### `inventory_table_configuration` Block + +The `inventory_table_configuration` configuration block supports the following arguments: + +* `configuration_state` - (Required) Configuration state of the inventory table, indicating whether the inventory table is enabled or disabled. Valid values: `ENABLED`, `DISABLED`. +* `encryption_configuration` - (Optional) Encryption configuration for the inventory table. See [`encryption_configuration` Block](#encryption_configuration-block) for details. + +### `journal_table_configuration` Block + +The `journal_table_configuration` configuration block supports the following arguments: + +* `encryption_configuration` - (Optional) Encryption configuration for the journal table. See [`encryption_configuration` Block](#encryption_configuration-block) for details. +* `record_expiration` - (Required) Journal table record expiration settings. See [`record_expiration` Block](#record_expiration-block) for details. + +### `encryption_configuration` Block + +The `encryption_configuration` configuration block supports the following arguments: + +* `kms_key_arn` - (Optional) KMS key ARN when `sse_algorithm` is `aws:kms`. +* `sse_algorithm` - (Required) Encryption type for the metadata table. Valid values: `aws:kms`, `AES256`. + +### `record_expiration` Block + +The `record_expiration` configuration block supports the following arguments: + +* `days` - (Optional) Number of days to retain journal table records. +* `expiration` - (Required) Whether journal table record expiration is enabled or disabled. Valid values: `ENABLED`, `DISABLED`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `metadata_configuration.0.destination` - Destination information for the S3 Metadata configuration. + * `table_bucket_arn` - ARN of the table bucket where the metadata configuration is stored. + * `table_bucket_type` - Type of the table bucket where the metadata configuration is stored. + * `table_namespace` - Namespace in the table bucket where the metadata tables for the metadata configuration are stored. +* `metadata_configuration.0.inventory_table_configuration.0.table_arn` - Inventory table ARN. +* `metadata_configuration.0.inventory_table_configuration.0.table_name` - Inventory table name. +* `metadata_configuration.0.journal_table_configuration.0.table_arn` - Journal table ARN. +* `metadata_configuration.0.journal_table_configuration.0.table_name` - Journal table name. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket metadata configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```terraform +import { + to = aws_s3_bucket_metadata_configuration.example + id = "bucket-name" +} +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expected_bucket_owner` separated by a comma (`,`): + +```terraform +import { + to = aws_s3_bucket_metadata_configuration.example + id = "bucket-name,123456789012" +} +``` + +**Using `terraform import` to import** S3 bucket metadata configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```console +% terraform import aws_s3_bucket_metadata_configuration.example bucket-name +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expected_bucket_owner` separated by a comma (`,`): + +```console +% terraform import aws_s3_bucket_metadata_configuration.example bucket-name,123456789012 +``` diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index 9d88ddeb627c..f6179d15cb70 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -181,6 +181,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_object.example + identity = { + bucket = "some-bucket-name" + key = "some/key.txt" + } +} + +resource "aws_s3_bucket_object" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. +* `key` (String) Object key. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import objects using the `id` or S3 URL. For example: Import using the `id`, which is the bucket name and the key together: diff --git a/website/docs/r/s3_bucket_policy.html.markdown b/website/docs/r/s3_bucket_policy.html.markdown index bc0a4448eafd..9716f784596a 100644 --- a/website/docs/r/s3_bucket_policy.html.markdown +++ b/website/docs/r/s3_bucket_policy.html.markdown @@ -62,11 +62,37 @@ This resource exports no additional attributes. ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_policy.example + identity = { + bucket = "my-tf-test-bucket" + } +} + +resource "aws_s3_bucket_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) Name of the S3 bucket. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket policies using the bucket name. For example: ```terraform import { - to = aws_s3_bucket_policy.allow_access_from_another_account + to = aws_s3_bucket_policy.example id = "my-tf-test-bucket" } ``` @@ -74,5 +100,5 @@ import { Using `terraform import`, import S3 bucket policies using the bucket name. For example: ```console -% terraform import aws_s3_bucket_policy.allow_access_from_another_account my-tf-test-bucket +% terraform import aws_s3_bucket_policy.example my-tf-test-bucket ``` diff --git a/website/docs/r/s3_bucket_public_access_block.html.markdown b/website/docs/r/s3_bucket_public_access_block.html.markdown index 8eba3d2ec02b..ce15d23dabdc 100644 --- a/website/docs/r/s3_bucket_public_access_block.html.markdown +++ b/website/docs/r/s3_bucket_public_access_block.html.markdown @@ -12,6 +12,8 @@ Manages S3 bucket-level Public Access Block configuration. For more information -> This resource cannot be used with S3 directory buckets. +~> Setting `skip_destroy` to `true` means that the AWS Provider will not destroy a public access block, even when running `terraform destroy`. The configuration is thus an intentional dangling resource that is not managed by Terraform and will remain in-place in your AWS account. + ## Example Usage ```terraform @@ -36,7 +38,7 @@ This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `bucket` - (Required) S3 Bucket to which this Public Access Block configuration should be applied. * `block_public_acls` - (Optional) Whether Amazon S3 should block public ACLs for this bucket. Defaults to `false`. Enabling this setting does not affect existing policies or ACLs. When set to `true` causes the following behavior: - * PUT Bucket acl and PUT Object acl calls will fail if the specified ACL allows public access. + * PUT Bucket ACL and PUT Object ACL calls will fail if the specified ACL allows public access. * PUT Object calls will fail if the request includes an object ACL. * `block_public_policy` - (Optional) Whether Amazon S3 should block public bucket policies for this bucket. Defaults to `false`. Enabling this setting does not affect the existing bucket policy. When set to `true` causes Amazon S3 to: * Reject calls to PUT Bucket policy if the specified bucket policy allows public access. @@ -44,6 +46,7 @@ This resource supports the following arguments: * Ignore public ACLs on this bucket and any objects that it contains. * `restrict_public_buckets` - (Optional) Whether Amazon S3 should restrict public bucket policies for this bucket. Defaults to `false`. Enabling this setting does not affect the previously stored bucket policy, except that public and cross-account access within the public bucket policy, including non-public delegation to specific accounts, is blocked. When set to `true`: * Only the bucket owner and AWS Services can access this buckets if it has a public policy. +* `skip_destroy` - (Optional) Whether to retain the public access block upon destruction. If set to `true`, the resource is simply removed from state instead. This may be desirable in certain scenarios to prevent the removal of a public access block before deletion of the associated bucket. ## Attribute Reference diff --git a/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown b/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown index 98e873c12551..d64617fd4943 100644 --- a/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown +++ b/website/docs/r/s3_bucket_server_side_encryption_configuration.html.markdown @@ -67,6 +67,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_server_side_encryption_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket server-side encryption configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: diff --git a/website/docs/r/s3_bucket_versioning.html.markdown b/website/docs/r/s3_bucket_versioning.html.markdown index b8dfc460e0f9..3e9aec544cec 100644 --- a/website/docs/r/s3_bucket_versioning.html.markdown +++ b/website/docs/r/s3_bucket_versioning.html.markdown @@ -116,6 +116,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_versioning.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_versioning" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket versioning using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: diff --git a/website/docs/r/s3_bucket_website_configuration.html.markdown b/website/docs/r/s3_bucket_website_configuration.html.markdown index 3e54c1854cc4..aa28b1988c02 100644 --- a/website/docs/r/s3_bucket_website_configuration.html.markdown +++ b/website/docs/r/s3_bucket_website_configuration.html.markdown @@ -135,6 +135,33 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_bucket_website_configuration.example + identity = { + bucket = "bucket-name" + } +} + +resource "aws_s3_bucket_website_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `expected_bucket_owner` (String) Account ID of the expected bucket owner. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 bucket website configuration using the `bucket` or using the `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: diff --git a/website/docs/r/s3_object.html.markdown b/website/docs/r/s3_object.html.markdown index 2f6c142307db..5e4bb2e21247 100644 --- a/website/docs/r/s3_object.html.markdown +++ b/website/docs/r/s3_object.html.markdown @@ -167,7 +167,6 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `acl` - (Optional) [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. * `bucket_key_enabled` - (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. * `cache_control` - (Optional) Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. @@ -186,10 +185,11 @@ The following arguments are optional: * `object_lock_mode` - (Optional) Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. * `object_lock_retain_until_date` - (Optional) Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods). * `override_provider` - (Optional) Override provider-level configuration options. See [Override Provider](#override-provider) below for more details. -* `server_side_encryption` - (Optional) Server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`". +* `server_side_encryption` - (Optional) Server-side encryption of the object in S3. Valid values are `"AES256"`, `"aws:kms"`, `"aws:kms:dsse"`, and `"aws:fsx"`. * `source_hash` - (Optional) Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5("path/to/source")` (Terraform 0.11.12 or later). (The value is only stored in state and not saved by AWS.) * `source` - (Optional, conflicts with `content` and `content_base64`) Path to a file that will be read and uploaded as raw bytes for the object content. * `storage_class` - (Optional) [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to "`STANDARD`". +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `tags` - (Optional) Map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `website_redirect` - (Optional) Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). @@ -221,6 +221,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3_object.example + identity = { + bucket = "some-bucket-name" + key = "some/key.txt" + } +} + +resource "aws_s3_object" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `bucket` (String) S3 bucket name. +* `key` (String) Object key. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import objects using the `id` or S3 URL. For example: Import using the `id`, which is the bucket name and the key together: diff --git a/website/docs/r/s3control_bucket.html.markdown b/website/docs/r/s3control_bucket.html.markdown index 7f0f00dfee68..0b4d0928cb0e 100644 --- a/website/docs/r/s3control_bucket.html.markdown +++ b/website/docs/r/s3control_bucket.html.markdown @@ -42,6 +42,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_s3control_bucket.example + identity = { + "arn" = "arn:aws:s3-outposts:us-east-1:123456789012:outpost/op-12345678/bucket/example" + } +} + +resource "aws_s3control_bucket" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the bucket. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 Control Buckets using Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/s3tables_table_bucket.html.markdown b/website/docs/r/s3tables_table_bucket.html.markdown index b2aeae573af3..e58a2b6aa233 100644 --- a/website/docs/r/s3tables_table_bucket.html.markdown +++ b/website/docs/r/s3tables_table_bucket.html.markdown @@ -31,11 +31,12 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `encryption_configuration` - (Optional) A single table bucket encryption configuration object. [See `encryption_configuration` below](#encryption_configuration). +* `force_destroy` - (Optional, Default:`false`) Whether all tables and namespaces within the table bucket should be deleted *when the table bucket is destroyed* so that the table bucket can be destroyed without error. These tables and namespaces are *not* recoverable. This only deletes tables and namespaces when the table bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `terraform apply` run before a destroy is required to update this value in the resource state. Without a successful `terraform apply` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the table bucket or destroying the table bucket, this flag will not work. Additionally when importing a table bucket, a successful `terraform apply` is required to set this value in state before it will take effect on a destroy operation. * `maintenance_configuration` - (Optional) A single table bucket maintenance configuration object. [See `maintenance_configuration` below](#maintenance_configuration). +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). ### `encryption_configuration` diff --git a/website/docs/r/sagemaker_user_profile.html.markdown b/website/docs/r/sagemaker_user_profile.html.markdown index ef0179cde891..51295b2340c7 100644 --- a/website/docs/r/sagemaker_user_profile.html.markdown +++ b/website/docs/r/sagemaker_user_profile.html.markdown @@ -230,11 +230,39 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sagemaker_user_profile.example + identity = { + domain_id = "domain-id" + user_profile_name = "profile-name" + } +} + +resource "aws_sagemaker_user_profile" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `domain_id` (String) SageMaker domain ID. +* `user_profile_name` (String) Name of the user profile. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SageMaker AI User Profiles using the `arn`. For example: ```terraform import { - to = aws_sagemaker_user_profile.test_user_profile + to = aws_sagemaker_user_profile.example id = "arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name" } ``` @@ -242,5 +270,5 @@ import { Using `terraform import`, import SageMaker AI User Profiles using the `arn`. For example: ```console -% terraform import aws_sagemaker_user_profile.test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name +% terraform import aws_sagemaker_user_profile.example arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name ``` diff --git a/website/docs/r/scheduler_schedule.html.markdown b/website/docs/r/scheduler_schedule.html.markdown index 2853f5600e4f..a74d910ce3a3 100644 --- a/website/docs/r/scheduler_schedule.html.markdown +++ b/website/docs/r/scheduler_schedule.html.markdown @@ -72,13 +72,14 @@ The following arguments are required: The following arguments are optional: -* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `action_after_completion` - (Optional) Action that applies to the schedule after completing invocation of the target. Valid values are `NONE` and `DELETE`. Defaults to `NONE`. * `description` - (Optional) Brief description of the schedule. * `end_date` - (Optional) The date, in UTC, before which the schedule can invoke its target. Depending on the schedule's recurrence expression, invocations might stop on, or before, the end date you specify. EventBridge Scheduler ignores the end date for one-time schedules. Example: `2030-01-01T01:00:00Z`. * `group_name` - (Optional, Forces new resource) Name of the schedule group to associate with this schedule. When omitted, the `default` schedule group is used. * `kms_key_arn` - (Optional) ARN for the customer managed KMS key that EventBridge Scheduler will use to encrypt and decrypt your data. * `name` - (Optional, Forces new resource) Name of the schedule. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). * `schedule_expression_timezone` - (Optional) Timezone in which the scheduling expression is evaluated. Defaults to `UTC`. Example: `Australia/Sydney`. * `start_date` - (Optional) The date, in UTC, after which the schedule can begin invoking its target. Depending on the schedule's recurrence expression, invocations might occur on, or after, the start date you specify. EventBridge Scheduler ignores the start date for one-time schedules. Example: `2030-01-01T01:00:00Z`. * `state` - (Optional) Specifies whether the schedule is enabled or disabled. One of: `ENABLED` (default), `DISABLED`. diff --git a/website/docs/r/secretsmanager_secret.html.markdown b/website/docs/r/secretsmanager_secret.html.markdown index 96f731c57c23..46aaea445f8d 100644 --- a/website/docs/r/secretsmanager_secret.html.markdown +++ b/website/docs/r/secretsmanager_secret.html.markdown @@ -57,6 +57,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret` using the secret Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/secretsmanager_secret_policy.html.markdown b/website/docs/r/secretsmanager_secret_policy.html.markdown index fb81412e68c9..1e99f20ed5fc 100644 --- a/website/docs/r/secretsmanager_secret_policy.html.markdown +++ b/website/docs/r/secretsmanager_secret_policy.html.markdown @@ -60,6 +60,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_policy.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_policy` using the secret Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/secretsmanager_secret_rotation.html.markdown b/website/docs/r/secretsmanager_secret_rotation.html.markdown index 4d99d9d1ee54..44c78f36d33f 100644 --- a/website/docs/r/secretsmanager_secret_rotation.html.markdown +++ b/website/docs/r/secretsmanager_secret_rotation.html.markdown @@ -59,6 +59,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_rotation.example + identity = { + "arn" = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + } +} + +resource "aws_secretsmanager_secret_rotation" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Secrets Manager secret. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_rotation` using the secret Amazon Resource Name (ARN). For example: ```terraform diff --git a/website/docs/r/secretsmanager_secret_version.html.markdown b/website/docs/r/secretsmanager_secret_version.html.markdown index 6919d7331a54..2684fdc90ef1 100644 --- a/website/docs/r/secretsmanager_secret_version.html.markdown +++ b/website/docs/r/secretsmanager_secret_version.html.markdown @@ -81,6 +81,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_secretsmanager_secret_version.example + identity = { + secret_id = "arn:aws:secretsmanager:us-east-1:123456789012:secret:example-123456" + version_id = "xxxxx-xxxxxxx-xxxxxxx-xxxxx" + } +} + +resource "aws_secretsmanager_secret_version" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `secret_id` - (String) ID of the secret. +* `version_id` - (String) ID of the secret version. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_secretsmanager_secret_version` using the secret ID and version ID. For example: ```terraform diff --git a/website/docs/r/security_group.html.markdown b/website/docs/r/security_group.html.markdown index 443fa866a5ec..b85483649020 100644 --- a/website/docs/r/security_group.html.markdown +++ b/website/docs/r/security_group.html.markdown @@ -312,11 +312,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_security_group.example + identity = { + id = "sg-903004f8" + } +} + +resource "aws_security_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the security group. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Groups using the security group `id`. For example: ```terraform import { - to = aws_security_group.elb_sg + to = aws_security_group.example id = "sg-903004f8" } ``` @@ -324,5 +350,5 @@ import { Using `terraform import`, import Security Groups using the security group `id`. For example: ```console -% terraform import aws_security_group.elb_sg sg-903004f8 +% terraform import aws_security_group.example sg-903004f8 ``` diff --git a/website/docs/r/securityhub_automation_rule.html.markdown b/website/docs/r/securityhub_automation_rule.html.markdown index 4638e55b1ead..eb7cbb28efe8 100644 --- a/website/docs/r/securityhub_automation_rule.html.markdown +++ b/website/docs/r/securityhub_automation_rule.html.markdown @@ -202,6 +202,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_securityhub_automation_rule.example + identity = { + "arn" = "arn:aws:securityhub:us-east-1:123456789012:automation-rule/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } +} + +resource "aws_securityhub_automation_rule" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Security Hub automation rule. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub Automation Rule using their ARN. For example: ```terraform diff --git a/website/docs/r/securityhub_standards_subscription.html.markdown b/website/docs/r/securityhub_standards_subscription.html.markdown index abea85bc7379..497bc4bdb212 100644 --- a/website/docs/r/securityhub_standards_subscription.html.markdown +++ b/website/docs/r/securityhub_standards_subscription.html.markdown @@ -45,7 +45,9 @@ Currently available standards (remember to replace `${var.partition}` and `${var | CIS AWS Foundations Benchmark v1.4.0 | `arn:${var.partition}:securityhub:${var.region}::standards/cis-aws-foundations-benchmark/v/1.4.0` | | CIS AWS Foundations Benchmark v3.0.0 | `arn:${var.partition}:securityhub:${var.region}::standards/cis-aws-foundations-benchmark/v/3.0.0` | | NIST SP 800-53 Rev. 5 | `arn:${var.partition}:securityhub:${var.region}::standards/nist-800-53/v/5.0.0` | -| PCI DSS | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/3.2.1` | +| NIST SP 800-171 Rev. 2 | `arn:${var.partition}:securityhub:${var.region}::standards/nist-800-171/v/2.0.0` | +| PCI DSS v3.2.1 | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/3.2.1` | +| PCI DSS v4.0.1 | `arn:${var.partition}:securityhub:${var.region}::standards/pci-dss/v/4.0.1` | ## Attribute Reference diff --git a/website/docs/r/securitylake_data_lake.html.markdown b/website/docs/r/securitylake_data_lake.html.markdown index 495fb0ad3e58..4f53f9054956 100644 --- a/website/docs/r/securitylake_data_lake.html.markdown +++ b/website/docs/r/securitylake_data_lake.html.markdown @@ -115,6 +115,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_securitylake_data_lake.example + identity = { + "arn" = "arn:aws:securitylake:us-east-1:123456789012:data-lake/default" + } +} + +resource "aws_securitylake_data_lake" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the Security Lake data lake. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Security Hub standards subscriptions using the standards subscription ARN. For example: ```terraform diff --git a/website/docs/r/securitylake_subscriber.html.markdown b/website/docs/r/securitylake_subscriber.html.markdown index d8c5990670b7..1683668256bc 100644 --- a/website/docs/r/securitylake_subscriber.html.markdown +++ b/website/docs/r/securitylake_subscriber.html.markdown @@ -14,6 +14,8 @@ Terraform resource for managing an AWS Security Lake Subscriber. ## Example Usage +### Basic Usage + ```terraform resource "aws_securitylake_subscriber" "example" { subscriber_name = "example-name" @@ -34,6 +36,36 @@ resource "aws_securitylake_subscriber" "example" { } ``` +### Multiple Log Sources + +```terraform +resource "aws_securitylake_subscriber" "example" { + subscriber_name = "example-name" + access_type = "S3" + + source { + aws_log_source_resource { + source_name = "SH_FINDINGS" + source_version = "2.0" + } + } + + source { + aws_log_source_resource { + source_name = "ROUTE53" + source_version = "2.0" + } + } + + subscriber_identity { + external_id = "example" + principal = "1234567890" + } + + depends_on = [aws_securitylake_data_lake.example] +} +``` + ## Argument Reference This resource supports the following arguments: @@ -64,8 +96,8 @@ The `subscriber_identity` block supports the following arguments: The `aws_log_source_resource` block supports the following arguments: -* `source_name` - (Required) Provides data expiration details of Amazon Security Lake object. -* `source_version` - (Optional) Provides data storage transition details of Amazon Security Lake object. +* `source_name` - (Required) The name for a AWS source. This must be a Regionally unique value. Valid values: `ROUTE53`, `VPC_FLOW`, `SH_FINDINGS`, `CLOUD_TRAIL_MGMT`, `LAMBDA_EXECUTION`, `S3_DATA`, `EKS_AUDIT` and `WAF`. +* `source_version` - (Optional) The version for a AWS source. This must be a Regionally unique value. ### `custom_log_source_resource` Block diff --git a/website/docs/r/service_discovery_service.html.markdown b/website/docs/r/service_discovery_service.html.markdown index 401b5482ee5f..b5650937aa3b 100644 --- a/website/docs/r/service_discovery_service.html.markdown +++ b/website/docs/r/service_discovery_service.html.markdown @@ -39,7 +39,7 @@ resource "aws_service_discovery_service" "example" { routing_policy = "MULTIVALUE" } - health_check_custom_config { + health_check_config { failure_threshold = 1 } } @@ -79,9 +79,9 @@ This resource supports the following arguments: * `name` - (Required, Forces new resource) The name of the service. * `description` - (Optional) The description of the service. * `dns_config` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dns_config` Block](#dns_config-block) for details. -* `health_check_config` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`health_check_config` Block](#health_check_config-block) for details. * `force_destroy` - (Optional) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. Defaults to `false`. -* `health_check_custom_config` - (Optional, Forces new resource) A complex type that contains settings for ECS managed health checks. See [`health_check_custom_config` Block](#health_check_custom_config-block) for details. +* `health_check_config` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`health_check_config` Block](#health_check_config-block) for details. +* `health_check_custom_config` - (Optional, **Deprecated**, Forces new resource) Please use `health_check_config` instead. See [`health_check_custom_config` Block](#health_check_custom_config-block) for details. * `namespace_id` - (Optional) The ID of the namespace that you want to use to create the service. * `type` - (Optional) If present, specifies that the service instances are only discoverable using the `DiscoverInstances` API operation. No DNS records is registered for the service instances. The only valid value is `HTTP`. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -111,6 +111,8 @@ The `health_check_config` configuration block supports the following arguments: ### `health_check_custom_config` Block +~> The `health_check_custom_config` argument is deprecated. Use [`health_check_config`](#health_check_config-block) instead, which supports additional attributes. + The `health_check_custom_config` configuration block supports the following arguments: * `failure_threshold` - (Optional, **Deprecated** Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Value is always set to 1. diff --git a/website/docs/r/sesv2_configuration_set_event_destination.html.markdown b/website/docs/r/sesv2_configuration_set_event_destination.html.markdown index 3b84931b8570..310ec564cd00 100644 --- a/website/docs/r/sesv2_configuration_set_event_destination.html.markdown +++ b/website/docs/r/sesv2_configuration_set_event_destination.html.markdown @@ -143,7 +143,7 @@ The `event_destination` configuration block supports the following arguments: * `matching_event_types` - (Required) - An array that specifies which events the Amazon SES API v2 should send to the destinations. Valid values: `SEND`, `REJECT`, `BOUNCE`, `COMPLAINT`, `DELIVERY`, `OPEN`, `CLICK`, `RENDERING_FAILURE`, `DELIVERY_DELAY`, `SUBSCRIPTION`. * `cloud_watch_destination` - (Optional) An object that defines an Amazon CloudWatch destination for email events. See [`cloud_watch_destination` Block](#cloud_watch_destination-block) for details. * `enabled` - (Optional) When the event destination is enabled, the specified event types are sent to the destinations. Default: `false`. -* `event_bridge_configuration` - (Optional) An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. See [`event_bridge_configuration` Block](#event_bridge_configuration-block) for details. +* `event_bridge_destination` - (Optional) An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. See [`event_bridge_destination` Block](#event_bridge_destination-block) for details. * `kinesis_firehose_destination` - (Optional) An object that defines an Amazon Kinesis Data Firehose destination for email events. See [`kinesis_firehose_destination` Block](#kinesis_firehose_destination-block) for details. * `pinpoint_destination` - (Optional) An object that defines an Amazon Pinpoint project destination for email events. See [`pinpoint_destination` Block](#pinpoint_destination-block) for details. * `sns_destination` - (Optional) An object that defines an Amazon SNS destination for email events. See [`sns_destination` Block](#sns_destination-block) for details. @@ -162,9 +162,9 @@ The `dimension_configuration` configuration block supports the following argumen * `dimension_name` - (Required) The name of an Amazon CloudWatch dimension associated with an email sending metric. * `dimension_value_source` - (Required) The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. Valid values: `MESSAGE_TAG`, `EMAIL_HEADER`, `LINK_TAG`. -### `event_bridge_configuration` Block +### `event_bridge_destination` Block -The `event_bridge_configuration` configuration block supports the following arguments: +The `event_bridge_destination` configuration block supports the following arguments: * `event_bus_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported. diff --git a/website/docs/r/sesv2_email_identity.html.markdown b/website/docs/r/sesv2_email_identity.html.markdown index 02f4adeb463c..290d565509f2 100644 --- a/website/docs/r/sesv2_email_identity.html.markdown +++ b/website/docs/r/sesv2_email_identity.html.markdown @@ -92,6 +92,7 @@ This resource exports the following attributes in addition to the arguments abov * `tokens` - If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key. * `identity_type` - The email identity type. Valid values: `EMAIL_ADDRESS`, `DOMAIN`. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +* `verification_status` - The verification status of the identity. The status can be one of the following: `PENDING`, `SUCCESS`, `FAILED`, `TEMPORARY_FAILURE`, and `NOT_STARTED`. * `verified_for_sending_status` - Specifies whether or not the identity is verified. ## Import diff --git a/website/docs/r/sesv2_email_identity_policy.html.markdown b/website/docs/r/sesv2_email_identity_policy.html.markdown index 9ca457d961f3..5b1d5c718ea0 100644 --- a/website/docs/r/sesv2_email_identity_policy.html.markdown +++ b/website/docs/r/sesv2_email_identity_policy.html.markdown @@ -63,7 +63,7 @@ This resource exports no additional attributes. ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SESv2 (Simple Email V2) Email Identity Policy using the `id` (`email_identity|policy_name`). For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SESv2 (Simple Email V2) Email Identity Policy using the `email_identity` and `policy_name` separated by `|`. For example: ```terraform import { @@ -72,7 +72,7 @@ import { } ``` -Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Policy using the `example_id_arg`. For example: +Using `terraform import`, import SESv2 (Simple Email V2) Email Identity Policy using the `email_identity` and `policy_name` separated by `|`. For example: ```console % terraform import aws_sesv2_email_identity_policy.example example_email_identity|example_policy_name diff --git a/website/docs/r/sfn_activity.html.markdown b/website/docs/r/sfn_activity.html.markdown index 89c3424300e2..78fa05ccc7c7 100644 --- a/website/docs/r/sfn_activity.html.markdown +++ b/website/docs/r/sfn_activity.html.markdown @@ -55,18 +55,34 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: -* `id` - The Amazon Resource Name (ARN) that identifies the created activity. -* `name` - The name of the activity. -* `creation_date` - The date the activity was created. +* `id` - Amazon Resource Name (ARN) of the activity. +* `arn` - Amazon Resource Name (ARN) of the activity. +* `name` - Name of the activity. +* `creation_date` - Date the activity was created. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_activity.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:activity:bar" + } +} + +resource "aws_sfn_activity" "example" { + ### Configuration omitted for brevity ### +} +``` + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import activities using the `arn`. For example: ```terraform import { - to = aws_sfn_activity.foo + to = aws_sfn_activity.example id = "arn:aws:states:eu-west-1:123456789098:activity:bar" } ``` @@ -74,5 +90,5 @@ import { Using `terraform import`, import activities using the `arn`. For example: ```console -% terraform import aws_sfn_activity.foo arn:aws:states:eu-west-1:123456789098:activity:bar +% terraform import aws_sfn_activity.example arn:aws:states:eu-west-1:123456789098:activity:bar ``` diff --git a/website/docs/r/sfn_alias.html.markdown b/website/docs/r/sfn_alias.html.markdown index 1e351776d3d1..f874f720df0d 100644 --- a/website/docs/r/sfn_alias.html.markdown +++ b/website/docs/r/sfn_alias.html.markdown @@ -62,6 +62,21 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_alias.example + identity = { + "arn" = "arn:aws:states:us-east-1:123456789098:stateMachine:myStateMachine:foo" + } +} + +resource "aws_sfn_alias" "example" { + ### Configuration omitted for brevity ### +} +``` + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SFN (Step Functions) Alias using the `arn`. For example: ```terraform diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index 8662110dc4e5..0e9e3fd0543b 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -209,6 +209,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_state_machine.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:stateMachine:bar" + } +} + +resource "aws_sfn_state_machine" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the state machine. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import State Machines using the `arn`. For example: ```terraform diff --git a/website/docs/r/signer_signing_profile.html.markdown b/website/docs/r/signer_signing_profile.html.markdown index 00c1341ff5c5..11aaebd88c61 100644 --- a/website/docs/r/signer_signing_profile.html.markdown +++ b/website/docs/r/signer_signing_profile.html.markdown @@ -43,6 +43,7 @@ This resource supports the following arguments: * `name_prefix` - (Optional, Forces new resource) A signing profile name prefix. Terraform will generate a unique suffix. Conflicts with `name`. * `signature_validity_period` - (Optional, Forces new resource) The validity period for a signing job. See [`signature_validity_period` Block](#signature_validity_period-block) below for details. * `signing_material` - (Optional, Forces new resource) The AWS Certificate Manager certificate that will be used to sign code with the new signing profile. See [`signing_material` Block](#signing_material-block) below for details. +* `signing_parameters` - (Optional, Forces new resource) Map of key-value pairs for signing. These can include any information that you want to use during signing. * `tags` - (Optional) A list of tags associated with the signing profile. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### `signature_validity_period` Block diff --git a/website/docs/r/sns_topic.html.markdown b/website/docs/r/sns_topic.html.markdown index e82721f28ba0..996c2cf6a037 100644 --- a/website/docs/r/sns_topic.html.markdown +++ b/website/docs/r/sns_topic.html.markdown @@ -114,6 +114,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic" + } +} + +resource "aws_sns_topic" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topics using the topic `arn`. For example: ```terraform diff --git a/website/docs/r/sns_topic_data_protection_policy.html.markdown b/website/docs/r/sns_topic_data_protection_policy.html.markdown index 95eab80c0740..3339d02be0b3 100644 --- a/website/docs/r/sns_topic_data_protection_policy.html.markdown +++ b/website/docs/r/sns_topic_data_protection_policy.html.markdown @@ -58,6 +58,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_data_protection_policy.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:example" + } +} + +resource "aws_sns_topic_data_protection_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Data Protection Topic Policy using the topic ARN. For example: ```terraform diff --git a/website/docs/r/sns_topic_policy.html.markdown b/website/docs/r/sns_topic_policy.html.markdown index b555d9fed777..b48eb47dab7c 100644 --- a/website/docs/r/sns_topic_policy.html.markdown +++ b/website/docs/r/sns_topic_policy.html.markdown @@ -82,6 +82,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_policy.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic" + } +} + +resource "aws_sns_topic_policy" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topic Policy using the topic ARN. For example: ```terraform diff --git a/website/docs/r/sns_topic_subscription.html.markdown b/website/docs/r/sns_topic_subscription.html.markdown index 9ab7a169a74e..bf6c6a0dc7e4 100644 --- a/website/docs/r/sns_topic_subscription.html.markdown +++ b/website/docs/r/sns_topic_subscription.html.markdown @@ -335,6 +335,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sns_topic_subscription.example + identity = { + "arn" = "arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f" + } +} + +resource "aws_sns_topic_subscription" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SNS topic subscription. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SNS Topic Subscriptions using the subscription `arn`. For example: ```terraform diff --git a/website/docs/r/spot_instance_request.html.markdown b/website/docs/r/spot_instance_request.html.markdown index 7a806200654b..5d1ba5c41118 100644 --- a/website/docs/r/spot_instance_request.html.markdown +++ b/website/docs/r/spot_instance_request.html.markdown @@ -25,8 +25,8 @@ price availability or by a user. ~> **NOTE:** Because their behavior depends on the live status of the spot market, Spot Instance Requests have a unique lifecycle that makes them behave -differently than other Terraform resources. Most importantly: there is __no -guarantee__ that a Spot Instance exists to fulfill the request at any given +differently than other Terraform resources. Most importantly: there is **no +guarantee** that a Spot Instance exists to fulfill the request at any given point in time. See the [AWS Spot Instance documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html) for more information. @@ -54,7 +54,9 @@ resource "aws_spot_instance_request" "cheap_worker" { This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + Spot Instance Requests support all the same arguments as [`aws_instance`](instance.html), with the addition of: + * `spot_price` - (Optional; Default: On-demand price) The maximum price to request on the spot market. * `wait_for_fulfillment` - (Optional; Default: false) If set, Terraform will wait for the Spot Request to be fulfilled, and will throw an error if the @@ -73,9 +75,9 @@ Spot Instance Requests support all the same arguments as [`aws_instance`](instan This resource exports the following attributes in addition to the arguments above: * `id` - The Spot Instance Request ID. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -These attributes are exported, but they are expected to change over time and so -should only be used for informational purposes, not for resource dependencies: +The following attributes are exported, but they are expected to change over time and so should only be used for informational purposes, not for resource dependencies: * `spot_bid_status` - The current [bid status](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) @@ -92,7 +94,6 @@ should only be used for informational purposes, not for resource dependencies: used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC * `private_ip` - The private IP address assigned to the instance -* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts diff --git a/website/docs/r/sqs_queue.html.markdown b/website/docs/r/sqs_queue.html.markdown index 3ed744e70025..65090ca44000 100644 --- a/website/docs/r/sqs_queue.html.markdown +++ b/website/docs/r/sqs_queue.html.markdown @@ -112,7 +112,7 @@ This resource supports the following arguments: * `fifo_throughput_limit` - (Optional) Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are `perQueue` (default) and `perMessageGroupId`. * `kms_data_key_reuse_period_seconds` - (Optional) Length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). * `kms_master_key_id` - (Optional) ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). -* `max_message_size` - (Optional) Limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB). +* `max_message_size` - (Optional) Limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 1048576 bytes (1024 KiB). The default for this attribute is 262144 (256 KiB). * `message_retention_seconds` - (Optional) Number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days). * `name` - (Optional) Name of the queue. Queue names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 80 characters long. For a FIFO (first-in-first-out) queue, the name must end with the `.fifo` suffix. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. @@ -143,11 +143,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sqs_queue.example + identity = { + url = "https://queue.amazonaws.com/80398EXAMPLE/MyQueue" + } +} + +resource "aws_sqs_queue" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `url` (String) URL of the SQS queue. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SQS Queues using the queue `url`. For example: ```terraform import { - to = aws_sqs_queue.public_queue + to = aws_sqs_queue.example id = "https://queue.amazonaws.com/80398EXAMPLE/MyQueue" } ``` @@ -155,5 +181,5 @@ import { Using `terraform import`, import SQS Queues using the queue `url`. For example: ```console -% terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue +% terraform import aws_sqs_queue.example https://queue.amazonaws.com/80398EXAMPLE/MyQueue ``` diff --git a/website/docs/r/ssm_association.html.markdown b/website/docs/r/ssm_association.html.markdown index a2836c812317..d1df3161a8b3 100644 --- a/website/docs/r/ssm_association.html.markdown +++ b/website/docs/r/ssm_association.html.markdown @@ -81,7 +81,7 @@ resource "aws_ssm_association" "example" { ### Create an association with multiple instances with their instance ids -``` +```terraform # Removed EC2 provisioning dependencies for brevity resource "aws_ssm_association" "system_update" { @@ -164,13 +164,13 @@ resource "aws_instance" "web_server_2" { ### Create an association with multiple instances with their values matching their tags -``` +```terraform # SSM Association for Webbased Servers resource "aws_ssm_association" "database_association" { name = aws_ssm_document.system_update.name # Use the name of the document as the association name targets { key = "tag:Role" - values = ["WebServer","Database"] + values = ["WebServer", "Database"] } parameters = { @@ -182,7 +182,7 @@ resource "aws_ssm_association" "database_association" { # EC2 Instance 1 - Web Server with "ServerType" tag resource "aws_instance" "web_server" { ami = data.aws_ami.amazon_linux.id - instance_type = var.instance_type + instance_type = "t3.micro" subnet_id = data.aws_subnet.default.id vpc_security_group_ids = [aws_security_group.ec2_sg.id] iam_instance_profile = aws_iam_instance_profile.ec2_ssm_profile.name @@ -214,7 +214,7 @@ resource "aws_instance" "web_server" { # EC2 Instance 2 - Database Server with "Role" tag resource "aws_instance" "database_server" { ami = data.aws_ami.amazon_linux.id - instance_type = var.instance_type + instance_type = "t3.micro" subnet_id = data.aws_subnet.default.id vpc_security_group_ids = [aws_security_group.ec2_sg.id] iam_instance_profile = aws_iam_instance_profile.ec2_ssm_profile.name @@ -286,11 +286,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_association.example + identity = { + association_id = "10abcdef-0abc-1234-5678-90abcdef123456" + } +} + +resource "aws_ssm_association" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `association_id` - (String) ID of the SSM association. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM associations using the `association_id`. For example: ```terraform import { - to = aws_ssm_association.test-association + to = aws_ssm_association.example id = "10abcdef-0abc-1234-5678-90abcdef123456" } ``` @@ -298,5 +324,5 @@ import { Using `terraform import`, import SSM associations using the `association_id`. For example: ```console -% terraform import aws_ssm_association.test-association 10abcdef-0abc-1234-5678-90abcdef123456 +% terraform import aws_ssm_association.example 10abcdef-0abc-1234-5678-90abcdef123456 ``` diff --git a/website/docs/r/ssm_default_patch_baseline.html.markdown b/website/docs/r/ssm_default_patch_baseline.html.markdown index 3ce7c9709fd2..a6c5f672f2db 100644 --- a/website/docs/r/ssm_default_patch_baseline.html.markdown +++ b/website/docs/r/ssm_default_patch_baseline.html.markdown @@ -39,6 +39,7 @@ This resource supports the following arguments: `AMAZON_LINUX`, `AMAZON_LINUX_2`, `AMAZON_LINUX_2022`, + `AMAZON_LINUX_2023`, `CENTOS`, `DEBIAN`, `MACOS`, diff --git a/website/docs/r/ssm_document.html.markdown b/website/docs/r/ssm_document.html.markdown index d0b6ecf61cd4..b0010d27096f 100644 --- a/website/docs/r/ssm_document.html.markdown +++ b/website/docs/r/ssm_document.html.markdown @@ -132,6 +132,32 @@ The `parameter` configuration block provides the following attributes: ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_document.example + identity = { + name = "example" + } +} + +resource "aws_ssm_document" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the SSM document. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Documents using the name. For example: ```terraform diff --git a/website/docs/r/ssm_maintenance_window.html.markdown b/website/docs/r/ssm_maintenance_window.html.markdown index 06697e3fbd09..d62cb46efeaa 100644 --- a/website/docs/r/ssm_maintenance_window.html.markdown +++ b/website/docs/r/ssm_maintenance_window.html.markdown @@ -48,11 +48,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window.example + identity = { + id = "mw-0123456789" + } +} + +resource "aws_ssm_maintenance_window" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the maintenance window. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Maintenance Windows using the maintenance window `id`. For example: ```terraform import { - to = aws_ssm_maintenance_window.imported-window + to = aws_ssm_maintenance_window.example id = "mw-0123456789" } ``` @@ -60,5 +86,5 @@ import { Using `terraform import`, import SSM Maintenance Windows using the maintenance window `id`. For example: ```console -% terraform import aws_ssm_maintenance_window.imported-window mw-0123456789 +% terraform import aws_ssm_maintenance_window.example mw-0123456789 ``` diff --git a/website/docs/r/ssm_maintenance_window_target.html.markdown b/website/docs/r/ssm_maintenance_window_target.html.markdown index 988df4649728..85efb6490be8 100644 --- a/website/docs/r/ssm_maintenance_window_target.html.markdown +++ b/website/docs/r/ssm_maintenance_window_target.html.markdown @@ -79,6 +79,34 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window_target.example + identity = { + window_id = "mw-0c50858d01EXAMPLE" + id = "23639a0b-ddbc-4bca-9e72-78d96EXAMPLE" + } +} + +resource "aws_ssm_maintenance_window_target" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `window_id` - (String) ID of the maintenance window. +* `id` - (String) ID of the maintenance window target. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Maintenance Window targets using `WINDOW_ID/WINDOW_TARGET_ID`. For example: ```terraform diff --git a/website/docs/r/ssm_maintenance_window_task.html.markdown b/website/docs/r/ssm_maintenance_window_task.html.markdown index 5bce6e9c79ab..124c09dc79e0 100644 --- a/website/docs/r/ssm_maintenance_window_task.html.markdown +++ b/website/docs/r/ssm_maintenance_window_task.html.markdown @@ -209,11 +209,39 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_maintenance_window_task.example + identity = { + window_id = "mw-0c50858d01EXAMPLE" + id = "4f7ca192-7e9a-40fe-9192-5cb15EXAMPLE" + } +} + +resource "aws_ssm_maintenance_window_task" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `window_id` - (String) ID of the maintenance window. +* `id` - (String) ID of the maintenance window task. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AWS Maintenance Window Task using the `window_id` and `window_task_id` separated by `/`. For example: ```terraform import { - to = aws_ssm_maintenance_window_task.task + to = aws_ssm_maintenance_window_task.example id = "/" } ``` @@ -221,5 +249,5 @@ import { Using `terraform import`, import AWS Maintenance Window Task using the `window_id` and `window_task_id` separated by `/`. For example: ```console -% terraform import aws_ssm_maintenance_window_task.task / +% terraform import aws_ssm_maintenance_window_task.example / ``` diff --git a/website/docs/r/ssm_parameter.html.markdown b/website/docs/r/ssm_parameter.html.markdown index 788cc7dcb976..b5b3c2b7694a 100644 --- a/website/docs/r/ssm_parameter.html.markdown +++ b/website/docs/r/ssm_parameter.html.markdown @@ -92,11 +92,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_parameter.example + identity = { + name = "/my_path/my_paramname" + } +} + +resource "aws_ssm_parameter" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `name` - (String) Name of the parameter. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Parameters using the parameter store `name`. For example: ```terraform import { - to = aws_ssm_parameter.my_param + to = aws_ssm_parameter.example id = "/my_path/my_paramname" } ``` @@ -104,5 +130,5 @@ import { Using `terraform import`, import SSM Parameters using the parameter store `name`. For example: ```console -% terraform import aws_ssm_parameter.my_param /my_path/my_paramname +% terraform import aws_ssm_parameter.example /my_path/my_paramname ``` diff --git a/website/docs/r/ssm_patch_baseline.html.markdown b/website/docs/r/ssm_patch_baseline.html.markdown index 78cb3bec2fa1..8b2089a82e7f 100644 --- a/website/docs/r/ssm_patch_baseline.html.markdown +++ b/website/docs/r/ssm_patch_baseline.html.markdown @@ -169,6 +169,7 @@ The following arguments are optional: * `approved_patches_compliance_level` - (Optional) Compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid values are `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`, `INFORMATIONAL`, `UNSPECIFIED`. The default value is `UNSPECIFIED`. * `approved_patches_enable_non_security` - (Optional) Whether the list of approved patches includes non-security updates that should be applied to the instances. Applies to Linux instances only. * `approved_patches` - (Optional) List of explicitly approved patches for the baseline. Cannot be specified with `approval_rule`. +* `available_security_updates_compliance_status` - (Optional) Indicates the compliance status of managed nodes for which security-related patches are available but were not approved. Supported for Windows Server managed nodes only. Valid values are `COMPLIANT`, `NON_COMPLIANT`. * `description` - (Optional) Description of the patch baseline. * `global_filter` - (Optional) Set of global filters used to exclude patches from the baseline. Up to 4 global filters can be specified using Key/Value pairs. Valid Keys are `PRODUCT`, `CLASSIFICATION`, `MSRC_SEVERITY`, and `PATCH_ID`. * `operating_system` - (Optional) Operating system the patch baseline applies to. Valid values are `ALMA_LINUX`, `AMAZON_LINUX`, `AMAZON_LINUX_2`, `AMAZON_LINUX_2022`, `AMAZON_LINUX_2023`, `CENTOS`, `DEBIAN`, `MACOS`, `ORACLE_LINUX`, `RASPBIAN`, `REDHAT_ENTERPRISE_LINUX`, `ROCKY_LINUX`, `SUSE`, `UBUNTU`, and `WINDOWS`. The default value is `WINDOWS`. @@ -206,6 +207,32 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssm_patch_baseline.example + identity = { + id = "pb-12345678" + } +} + +resource "aws_ssm_patch_baseline" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` - (String) ID of the patch baseline. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Patch Baselines using their baseline ID. For example: ```terraform diff --git a/website/docs/r/ssm_service_setting.html.markdown b/website/docs/r/ssm_service_setting.html.markdown index 1e8b0ad9dd45..798f4e5ad8a4 100644 --- a/website/docs/r/ssm_service_setting.html.markdown +++ b/website/docs/r/ssm_service_setting.html.markdown @@ -24,7 +24,7 @@ resource "aws_ssm_service_setting" "test_setting" { This resource supports the following arguments: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). -* `setting_id` - (Required) ID of the service setting. +* `setting_id` - (Required) ID of the service setting. Valid values are shown in the [AWS documentation](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetServiceSetting.html#API_GetServiceSetting_RequestSyntax). * `setting_value` - (Required) Value of the service setting. ## Attribute Reference diff --git a/website/docs/r/ssmcontacts_contact.html.markdown b/website/docs/r/ssmcontacts_contact.html.markdown index e6c5c59c4ae4..6504aa4ba133 100644 --- a/website/docs/r/ssmcontacts_contact.html.markdown +++ b/website/docs/r/ssmcontacts_contact.html.markdown @@ -64,6 +64,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_contact.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-west-2:123456789012:contact/example" + } +} + +resource "aws_ssmcontacts_contact" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the contact. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact using the `ARN`. For example: ```terraform diff --git a/website/docs/r/ssmcontacts_contact_channel.html.markdown b/website/docs/r/ssmcontacts_contact_channel.html.markdown index 8f9099aa36dc..1092434b7ed1 100644 --- a/website/docs/r/ssmcontacts_contact_channel.html.markdown +++ b/website/docs/r/ssmcontacts_contact_channel.html.markdown @@ -72,7 +72,28 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact Channel using the `ARN`. For example: +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_contact_channel.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example" + } +} + +resource "aws_ssmcontacts_contact_channel" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the contact channel. + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSM Contact Channel using the `arn`. For example: ```terraform import { @@ -81,7 +102,7 @@ import { } ``` -Using `terraform import`, import SSM Contact Channel using the `ARN`. For example: +Using `terraform import`, import SSM Contact Channel using the `arn`. For example: ```console % terraform import aws_ssmcontacts_contact_channel.example arn:aws:ssm-contacts:us-west-2:123456789012:contact-channel/example diff --git a/website/docs/r/ssmcontacts_rotation.html.markdown b/website/docs/r/ssmcontacts_rotation.html.markdown index 54fc453059d8..cbc38f7adb5e 100644 --- a/website/docs/r/ssmcontacts_rotation.html.markdown +++ b/website/docs/r/ssmcontacts_rotation.html.markdown @@ -194,6 +194,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssmcontacts_rotation.example + identity = { + "arn" = "arn:aws:ssm-contacts:us-east-1:123456789012:rotation/example-rotation" + } +} + +resource "aws_ssmcontacts_rotation" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSM Contacts rotation. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSMContacts Rotation using the `arn`. For example: ```terraform diff --git a/website/docs/r/ssoadmin_application.html.markdown b/website/docs/r/ssoadmin_application.html.markdown index fb769400a3a4..58e2f8f538ee 100644 --- a/website/docs/r/ssoadmin_application.html.markdown +++ b/website/docs/r/ssoadmin_application.html.markdown @@ -89,6 +89,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssoadmin_application.example + identity = { + "arn" = "arn:aws:sso::123456789012:application/ssoins-1234567890abcdef/apl-1234567890abcdef" + } +} + +resource "aws_ssoadmin_application" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSO application. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application using the `id`. For example: ```terraform diff --git a/website/docs/r/ssoadmin_application_assignment_configuration.html.markdown b/website/docs/r/ssoadmin_application_assignment_configuration.html.markdown index 290916c00243..cccb5ac509c8 100644 --- a/website/docs/r/ssoadmin_application_assignment_configuration.html.markdown +++ b/website/docs/r/ssoadmin_application_assignment_configuration.html.markdown @@ -41,6 +41,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_ssoadmin_application_assignment_configuration.example + identity = { + "arn" = "arn:aws:sso::123456789012:application/ssoins-1234567890abcdef/apl-1234567890abcdef" + } +} + +resource "aws_ssoadmin_application_assignment_configuration" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the SSO application. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application Assignment Configuration using the `id`. For example: ```terraform diff --git a/website/docs/r/subnet.html.markdown b/website/docs/r/subnet.html.markdown index 506a8be1e069..fcfacf844ae3 100644 --- a/website/docs/r/subnet.html.markdown +++ b/website/docs/r/subnet.html.markdown @@ -3,7 +3,7 @@ subcategory: "VPC (Virtual Private Cloud)" layout: "aws" page_title: "AWS: aws_subnet" description: |- - Provides an VPC subnet resource. + Provides an VPC Subnet resource. --- # Resource: aws_subnet @@ -91,11 +91,37 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_subnet.example + identity = { + id = "subnet-9d4a7b6c" + } +} + +resource "aws_subnet" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +* `id` (String) ID of the subnet. + +#### Optional + +* `account_id` (String) AWS Account where this resource is managed. +* `region` (String) Region where this resource is managed. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import subnets using the subnet `id`. For example: ```terraform import { - to = aws_subnet.public_subnet + to = aws_subnet.example id = "subnet-9d4a7b6c" } ``` @@ -103,5 +129,5 @@ import { Using `terraform import`, import subnets using the subnet `id`. For example: ```console -% terraform import aws_subnet.public_subnet subnet-9d4a7b6c +% terraform import aws_subnet.example subnet-9d4a7b6c ``` diff --git a/website/docs/r/synthetics_canary.html.markdown b/website/docs/r/synthetics_canary.html.markdown index 659ce7b50a4e..0a8ea30102f2 100644 --- a/website/docs/r/synthetics_canary.html.markdown +++ b/website/docs/r/synthetics_canary.html.markdown @@ -36,24 +36,24 @@ The following arguments are required: * `artifact_s3_location` - (Required) Location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. * `execution_role_arn` - (Required) ARN of the IAM role to be used to run the canary. see [AWS Docs](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_CreateCanary.html#API_CreateCanary_RequestSyntax) for permissions needs for IAM Role. * `handler` - (Required) Entry point to use for the source code when running the canary. This value must end with the string `.handler` . -* `name` - (Required) Name for this canary. Has a maximum length of 21 characters. Valid characters are lowercase alphanumeric, hyphen, or underscore. +* `name` - (Required) Name for this canary. Has a maximum length of 255 characters. Valid characters are lowercase alphanumeric, hyphen, or underscore. * `runtime_version` - (Required) Runtime version to use for the canary. Versions change often so consult the [Amazon CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html) for the latest valid versions. Values include `syn-python-selenium-1.0`, `syn-nodejs-puppeteer-3.0`, `syn-nodejs-2.2`, `syn-nodejs-2.1`, `syn-nodejs-2.0`, and `syn-1.0`. -* `schedule` - (Required) Configuration block providing how often the canary is to run and when these test runs are to stop. Detailed below. +* `schedule` - (Required) Configuration block providing how often the canary is to run and when these test runs are to stop. Detailed [below](#schedule). The following arguments are optional: * `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `artifact_config` - (Optional) configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. See [Artifact Config](#artifact_config). * `delete_lambda` - (Optional) Specifies whether to also delete the Lambda functions and layers used by this canary. The default is `false`. -* `vpc_config` - (Optional) Configuration block. Detailed below. * `failure_retention_period` - (Optional) Number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days. -* `run_config` - (Optional) Configuration block for individual canary runs. Detailed below. +* `run_config` - (Optional) Configuration block for individual canary runs. Detailed [below](#run_config). * `s3_bucket` - (Optional) Full bucket name which is used if your canary script is located in S3. The bucket must already exist. **Conflicts with `zip_file`.** * `s3_key` - (Optional) S3 key of your script. **Conflicts with `zip_file`.** * `s3_version` - (Optional) S3 version ID of your script. **Conflicts with `zip_file`.** * `start_canary` - (Optional) Whether to run or stop the canary. * `success_retention_period` - (Optional) Number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `artifact_config` - (Optional) configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. See [Artifact Config](#artifact_config). +* `vpc_config` - (Optional) Configuration block. Detailed [below](#vpc_config). * `zip_file` - (Optional) ZIP file that contains the script, if you input your canary script directly into the canary instead of referring to an S3 location. It can be up to 225KB. **Conflicts with `s3_bucket`, `s3_key`, and `s3_version`.** ### artifact_config @@ -69,6 +69,11 @@ The following arguments are optional: * `expression` - (Required) Rate expression or cron expression that defines how often the canary is to run. For rate expression, the syntax is `rate(number unit)`. _unit_ can be `minute`, `minutes`, or `hour`. For cron expression, the syntax is `cron(expression)`. For more information about the syntax for cron expressions, see [Scheduling canary runs using cron](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_cron.html). * `duration_in_seconds` - (Optional) Duration in seconds, for the canary to continue making regular runs according to the schedule in the Expression value. +* `retry_config` - (Optional) Configuration block for canary retries. Detailed [below](#retry_config). + +### retry_config + +* `max_retries` - (Required) Maximum number of retries. The value must be less than or equal to `2`. If `max_retries` is `2`, `run_config.timeout_in_seconds` should be less than 600 seconds. Defaults to `0`. ### run_config @@ -76,6 +81,7 @@ The following arguments are optional: * `memory_in_mb` - (Optional) Maximum amount of memory available to the canary while it is running, in MB. The value you specify must be a multiple of 64. * `active_tracing` - (Optional) Whether this canary is to use active AWS X-Ray tracing when it runs. You can enable active tracing only for canaries that use version syn-nodejs-2.0 or later for their canary runtime. * `environment_variables` - (Optional) Map of environment variables that are accessible from the canary during execution. Please see [AWS Docs](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime) for variables reserved for Lambda. +* `ephemeral_storage` - (Optional) Amount of ephemeral storage (in MB) allocated for the canary run during execution. Defaults to 1024. ### vpc_config @@ -83,6 +89,7 @@ If this canary tests an endpoint in a VPC, this structure contains information a * `subnet_ids` - (Required) IDs of the subnets where this canary is to run. * `security_group_ids` - (Required) IDs of the security groups for this canary. +* `ipv6_allowed_for_dual_stack` - (Optional) If `true`, allow outbound IPv6 traffic on VPC canaries that are connected to dual-stack subnets. The default is `false`. ## Attribute Reference diff --git a/website/docs/r/timestreaminfluxdb_db_cluster.html.markdown b/website/docs/r/timestreaminfluxdb_db_cluster.html.markdown new file mode 100644 index 000000000000..e7bdb8e8678a --- /dev/null +++ b/website/docs/r/timestreaminfluxdb_db_cluster.html.markdown @@ -0,0 +1,265 @@ +--- +subcategory: "Timestream for InfluxDB" +layout: "aws" +page_title: "AWS: aws_timestreaminfluxdb_db_cluster" +description: |- + Terraform resource for managing an Amazon Timestream for InfluxDB read-replica cluster. +--- + +# Resource: aws_timestreaminfluxdb_db_cluster + +Terraform resource for managing an Amazon Timestream for InfluxDB read-replica cluster. + +~> **NOTE:** This resource requires a subscription to [Timestream for InfluxDB Read Replicas (Add-On) on the AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-lftzfxtb5xlv4?applicationId=AWS-Marketplace-Console&ref_=beagle&sr=0-2). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_timestreaminfluxdb_db_cluster" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + failover_mode = "AUTOMATIC" + username = "admin" + password = "example-password" + port = 8086 + organization = "organization" + vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] + vpc_security_group_ids = [aws_security_group.example.id] + name = "example-db-cluster" +} +``` + +### Usage with Prerequisite Resources + +All Timestream for InfluxDB clusters require a VPC, at least two subnets, and a security group. The following example shows how these prerequisite resources can be created and used with `aws_timestreaminfluxdb_db_cluster`. + +```terraform +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "example_1" { + vpc_id = aws_vpc.example.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_subnet" "example_2" { + vpc_id = aws_vpc.example.id + cidr_block = "10.0.2.0/24" +} + +resource "aws_security_group" "example" { + name = "example" + vpc_id = aws_vpc.example.id +} + +resource "aws_timestreaminfluxdb_db_cluster" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + organization = "organization" + vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] + vpc_security_group_ids = [aws_security_group.example.id] + name = "example-db-cluster" +} +``` + +### Usage with Public Internet Access Enabled + +The following configuration shows how to define the necessary resources and arguments to allow public internet access on your Timestream for InfluxDB read-replica cluster's primary endpoint (simply referred to as "endpoint") and read endpoint on port `8086`. After applying this configuration, the cluster's InfluxDB UI can be accessed by visiting your cluster's primary endpoint at port `8086`. + +```terraform +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "example_1" { + vpc_id = aws_vpc.example.id + cidr_block = "10.0.1.0/24" +} + +resource "aws_subnet" "example_2" { + vpc_id = aws_vpc.example.id + cidr_block = "10.0.2.0/24" +} + +resource "aws_security_group" "example" { + name = "example" + vpc_id = aws_vpc.example.id +} + +resource "aws_internet_gateway" "example" { + vpc_id = aws_vpc.example.id + + tags = { + Name = "example" + } +} + +resource "aws_route" "test_route" { + route_table_id = aws_vpc.example.main_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.example.id +} + +resource "aws_route_table_association" "test_route_table_association" { + subnet_id = aws_subnet.test_subnet.id + route_table_id = aws_vpc.example.main_route_table_id +} + +resource "aws_vpc_security_group_ingress_rule" "example" { + security_group_id = aws_security_group.example.id + referenced_security_group_id = aws_security_group.example.id + ip_protocol = -1 +} + +resource "aws_vpc_security_group_ingress_rule" "example" { + security_group_id = aws_security_group.example.id + cidr_ipv4 = "0.0.0.0/0" + ip_protocol = "tcp" + from_port = 8086 + to_port = 8086 +} + +resource "aws_timestreaminfluxdb_db_cluster" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + organization = "organization" + vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] + vpc_security_group_ids = [aws_security_group.example.id] + name = "example-db-cluster" + publicly_accessible = true # False by default +} +``` + +### Usage with S3 Log Delivery Enabled + +You can use an S3 bucket to store logs generated by your Timestream for InfluxDB cluster. The following example shows what resources and arguments are required to configure an S3 bucket for logging, including the IAM policy that needs to be set in order to allow Timestream for InfluxDB to place logs in your S3 bucket. The configuration of the required VPC, security group, and subnets have been left out of the example for brevity. + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example-s3-bucket" + force_destroy = true +} + +data "aws_iam_policy_document" "example" { + statement { + actions = ["s3:PutObject"] + principals { + type = "Service" + identifiers = ["timestream-influxdb.amazonaws.com"] + } + resources = [ + "${aws_s3_bucket.example.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_bucket.example.id + policy = data.aws_iam_policy_document.example.json +} + +resource "aws_timestreaminfluxdb_db_cluster" "example" { + allocated_storage = 20 + bucket = "example-bucket-name" + db_instance_type = "db.influx.medium" + username = "admin" + password = "example-password" + organization = "organization" + vpc_subnet_ids = [aws_subnet.example_1.id, aws_subnet.example_2.id] + vpc_security_group_ids = [aws_security_group.example.id] + name = "example-db-cluster" + + log_delivery_configuration { + s3_configuration { + bucket_name = aws_s3_bucket.example.bucket + enabled = true + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `allocated_storage` - (Required) Amount of storage in GiB (gibibytes). The minimum value is `20`, the maximum value is `16384`. The argument `db_storage_type` places restrictions on this argument's minimum value. The following is a list of `db_storage_type` values and the corresponding minimum value for `allocated_storage`: `"InfluxIOIncludedT1": `20`, `"InfluxIOIncludedT2" and `"InfluxIOIncludedT3": `400`. +* `bucket` - (Required) Name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization. Along with `organization`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `db_instance_type` - (Required) Timestream for InfluxDB DB instance type to run InfluxDB on. Valid options are: `"db.influx.medium"`, `"db.influx.large"`, `"db.influx.xlarge"`, `"db.influx.2xlarge"`, `"db.influx.4xlarge"`, `"db.influx.8xlarge"`, `"db.influx.12xlarge"`, and `"db.influx.16xlarge"`. This argument is updatable. +* `name` - (Required) Name that uniquely identifies the DB cluster when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. Cluster names must be unique per customer and per region. The argument must start with a letter, cannot contain consecutive hyphens (`-`) and cannot end with a hyphen. +* `password` - (Required) Password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `username`, and `organization`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `organization` - (Required) Name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. Along with `bucket`, `username`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `username` - (Required) Username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. Along with `bucket`, `organization`, and `password`, this argument will be stored in the secret referred to by the `influx_auth_parameters_secret_arn` attribute. +* `vpc_security_group_ids` - (Required) List of VPC security group IDs to associate with the cluster. +* `vpc_subnet_ids` - (Required) List of VPC subnet IDs to associate with the cluster. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `db_parameter_group_identifier` - (Optional) ID of the DB parameter group assigned to your cluster. This argument is updatable. If added to an existing Timestream for InfluxDB cluster or given a new value, will cause an in-place update to the cluster. However, if a cluster already has a value for `db_parameter_group_identifier`, removing `db_parameter_group_identifier` will cause the cluster to be destroyed and recreated. +* `db_storage_type` - (Default `"InfluxIOIncludedT1"`) Timestream for InfluxDB DB storage type to read and write InfluxDB data. You can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements: Influx IO Included 3000 IOPS, Influx IO Included 12000 IOPS, Influx IO Included 16000 IOPS. Valid options are: `"InfluxIOIncludedT1"`, `"InfluxIOIncludedT2"`, and `"InfluxIOIncludedT3"`. If you use `"InfluxIOIncludedT2" or "InfluxIOIncludedT3", the minimum value for `allocated_storage` is 400. +* `deployment_type` - (Default `"MULTI_NODE_READ_REPLICAS"`) Specifies the type of cluster to create. Valid options are: `"MULTI_NODE_READ_REPLICAS"`. +* `failover_mode` - (Default `"AUTOMATIC"`) Specifies the behavior of failure recovery when the primary node of the cluster fails. Valid options are: `"AUTOMATIC"` and `"NO_FAILOVER"`. +* `log_delivery_configuration` - (Optional) Configuration for sending InfluxDB engine logs to a specified S3 bucket. This argument is updatable. +* `network_type` - (Optional) Specifies whether the network type of the Timestream for InfluxDB cluster is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols. +* `port` - (Default `8086`) The port on which the cluster accepts connections. Valid values: `1024`-`65535`. Cannot be `2375`-`2376`, `7788`-`7799`, `8090`, or `51678`-`51680`. This argument is updatable. +* `publicly_accessible` - (Default `false`) Configures the DB cluster with a public IP to facilitate access. Other resources, such as a VPC, a subnet, an internet gateway, and a route table with routes, are also required to enabled public access, in addition to this argument. See "[Usage with Public Internet Access Enabled](#usage-with-public-internet-access-enabled)" for an example configuration with all required resources for public internet access. +* `tags` - (Optional) Map of tags assigned to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Nested Fields + +#### `log_delivery_configuration` + +* `s3_configuration` - (Required) Configuration for S3 bucket log delivery. + +#### `s3_configuration` + +* `bucket_name` - (Required) Name of the S3 bucket to deliver logs to. +* `enabled` - (Required) Indicates whether log delivery to the S3 bucket is enabled. + +**Note**: The following arguments do updates in-place: `db_parameter_group_identifier`, `log_delivery_configuration`, `port`, `db_instance_type`, `failover_mode`, and `tags`. Changes to any other argument after a cluster has been deployed will cause destruction and re-creation of the cluster. Additionally, when `db_parameter_group_identifier` is added to a cluster or modified, the cluster will be updated in-place but if `db_parameter_group_identifier` is removed from a cluster, the cluster will be destroyed and re-created. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Timestream for InfluxDB cluster. +* `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. +* `id` - ID of the Timestream for InfluxDB cluster. +* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. +* `reader_endpoint` - The endpoint used to connect to the Timestream for InfluxDB cluster for read-only operations. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Timestream for InfluxDB cluster using its identifier. For example: + +```terraform +import { + to = aws_timestreaminfluxdb_db_cluster.example + id = "12345abcde" +} +``` + +Using `terraform import`, import Timestream for InfluxDB cluster using its identifier. For example: + +```console +% terraform import aws_timestreaminfluxdb_db_cluster.example 12345abcde +``` diff --git a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown index 99a481fc9023..33af36c9201f 100644 --- a/website/docs/r/timestreaminfluxdb_db_instance.html.markdown +++ b/website/docs/r/timestreaminfluxdb_db_instance.html.markdown @@ -253,7 +253,7 @@ This resource exports the following attributes in addition to the arguments abov * `availability_zone` - Availability Zone in which the DB instance resides. * `endpoint` - Endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. * `id` - ID of the Timestream for InfluxDB instance. -* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. This secret will be read by the `aws_timestreaminfluxdb_db_instance` resource in order to support importing: deleting the secret or secret values can cause errors. +* `influx_auth_parameters_secret_arn` - ARN of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password. * `secondary_availability_zone` - Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). diff --git a/website/docs/r/transcribe_vocabulary.html.markdown b/website/docs/r/transcribe_vocabulary.html.markdown index 16c70e9aa70b..54c1f8529623 100644 --- a/website/docs/r/transcribe_vocabulary.html.markdown +++ b/website/docs/r/transcribe_vocabulary.html.markdown @@ -47,7 +47,6 @@ resource "aws_transcribe_vocabulary" "example" { The following arguments are required: * `language_code` - (Required) The language code you selected for your vocabulary. -* `vocabulary_file_uri` - (Required) The Amazon S3 location (URI) of the text file that contains your custom vocabulary. * `vocabulary_name` - (Required) The name of the Vocabulary. The following arguments are optional: diff --git a/website/docs/r/transfer_host_key.html.markdown b/website/docs/r/transfer_host_key.html.markdown new file mode 100644 index 000000000000..6c59d588a3f1 --- /dev/null +++ b/website/docs/r/transfer_host_key.html.markdown @@ -0,0 +1,61 @@ +--- +subcategory: "Transfer Family" +layout: "aws" +page_title: "AWS: aws_transfer_host_key" +description: |- + Manages a host key for a server. +--- + +# Resource: aws_transfer_host_key + +Manages a host key for a server. This is an [_additional server host key_](https://docs.aws.amazon.com/transfer/latest/userguide/server-host-key-add.html). + +## Example Usage + +```terraform +resource "aws_transfer_host_key" "example" { + server_id = aws_transfer_server.example.id + description = "example additional host key" + + host_key_body_wo = < **Note** In `field_to_match` blocks, _e.g._, in `byte_match_statement`, the `body` block includes an optional argument `oversize_handling`. AWS indicates this argument will be required starting February 2023. To avoid configurations breaking when that change happens, treat the `oversize_handling` argument as **required** as soon as possible. +!> **Warning:** If you use the `aws_wafv2_web_acl_rule_group_association` resource to associate rule groups with this Web ACL, you must add `lifecycle { ignore_changes = [rule] }` to this resource to prevent configuration drift. The association resource modifies the Web ACL's rules outside of this resource's direct management. + ## Example Usage This resource is based on `aws_wafv2_rule_group`, check the documentation of the `aws_wafv2_rule_group` resource to see examples of the various available statements. @@ -836,9 +838,18 @@ The `managed_rule_group_configs` block support the following arguments: * `creation_path` - (Required) The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST requests. * `enable_regex_in_path` - (Optional) Whether or not to allow the use of regular expressions in the login page path. * `registration_page_path` - (Required) The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users. This page must accept GET text/html requests. -* `request_inspection` - (Optional) The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage. See [`request_inspection`](#request_inspection-block) for more details. +* `request_inspection` - (Optional) The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage. See [`request_inspection`](#request_inspection-block-acfp) for more details. * `response_inspection` - (Optional) The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates. Note that Response Inspection is available only on web ACLs that protect CloudFront distributions. See [`response_inspection`](#response_inspection-block) for more details. +### `request_inspection` Block (ACFP) + +* `addressFields` (Optional) The names of the fields in the request payload that contain your customer's primary physical address. See [`addressFields`](#address_fields-block) for more details. +* `emailField` (Optional) The name of the field in the request payload that contains your customer's email. See [`emailField`](#email_field-block) for more details. +* `passwordField` (Optional) Details about your login page password field. See [`passwordField`](#password_field-block) for more details. +* `payloadType` (Required) The payload type for your login endpoint, either JSON or form encoded. +* `phoneNumberFields` (Optional) The names of the fields in the request payload that contain your customer's primary phone number. See [`phoneNumberFields`](#phone_number_fields-block) for more details. +* `usernameField` (Optional) Details about your login page username field. See [`usernameField`](#username_field-block) for more details. + ### `aws_managed_rules_anti_ddos_rule_set` Block * `client_side_action_config` - (Required) Configuration for the request handling that's applied by the managed rule group rules `ChallengeAllDuringEvent` and `ChallengeDDoSRequests` during a distributed denial of service (DDoS) attack. See [`client_side_action_config`](#client_side_action_config-block) for more details. @@ -861,11 +872,8 @@ The `managed_rule_group_configs` block support the following arguments: ### `request_inspection` Block -* `address_fields` (Optional) The names of the fields in the request payload that contain your customer's primary physical address. See [`address_fields`](#address_fields-block) for more details. -* `email_field` (Optional) The name of the field in the request payload that contains your customer's email. See [`email_field`](#email_field-block) for more details. * `password_field` (Optional) Details about your login page password field. See [`password_field`](#password_field-block) for more details. * `payload_type` (Required) The payload type for your login endpoint, either JSON or form encoded. -* `phone_number_fields` (Optional) The names of the fields in the request payload that contain your customer's primary phone number. See [`phone_number_fields`](#phone_number_fields-block) for more details. * `username_field` (Optional) Details about your login page username field. See [`username_field`](#username_field-block) for more details. ### `address_fields` Block @@ -1120,6 +1128,7 @@ Aggregate the request counts using one or more web request components as the agg The `custom_key` block supports the following arguments: +* `asn` - (Optional) Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. See [RateLimit `asn`](#ratelimit-asn-block) below for details. * `cookie` - (Optional) Use the value of a cookie in the request as an aggregate key. See [RateLimit `cookie`](#ratelimit-cookie-block) below for details. * `forwarded_ip` - (Optional) Use the first IP address in an HTTP header as an aggregate key. See [`forwarded_ip`](#ratelimit-forwarded_ip-block) below for details. * `http_method` - (Optional) Use the request's HTTP method as an aggregate key. See [RateLimit `http_method`](#ratelimit-http_method-block) below for details. @@ -1132,6 +1141,12 @@ The `custom_key` block supports the following arguments: * `query_string` - (Optional) Use the request's query string as an aggregate key. See [RateLimit `query_string`](#ratelimit-query_string-block) below for details. * `uri_path` - (Optional) Use the request's URI path as an aggregate key. See [RateLimit `uri_path`](#ratelimit-uri_path-block) below for details. +### RateLimit `asn` Block + +Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. Each distinct ASN contributes to the aggregation instance. + +The `asn` block is configured as an empty block `{}`. + ### RateLimit `cookie` Block Use the value of a cookie in the request as an aggregate key. Each distinct value in the cookie contributes to the aggregation instance. If you use a single cookie as your custom key, then each value fully defines an aggregation instance. diff --git a/website/docs/r/wafv2_web_acl_rule_group_association.html.markdown b/website/docs/r/wafv2_web_acl_rule_group_association.html.markdown new file mode 100644 index 000000000000..81e2a614ede9 --- /dev/null +++ b/website/docs/r/wafv2_web_acl_rule_group_association.html.markdown @@ -0,0 +1,504 @@ +--- +subcategory: "WAF" +layout: "aws" +page_title: "AWS: aws_wafv2_web_acl_rule_group_association" +description: |- + Associates a WAFv2 Rule Group with a Web ACL by adding a rule that references the Rule Group. +--- + +# Resource: aws_wafv2_web_acl_rule_group_association + +Associates a WAFv2 Rule Group (custom or managed) with a Web ACL by adding a rule that references the Rule Group. Use this resource to apply the rules defined in a Rule Group to a Web ACL without duplicating rule definitions. + +This resource supports both: + +- **Custom Rule Groups**: User-created rule groups that you manage within your AWS account +- **Managed Rule Groups**: Pre-configured rule groups provided by AWS or third-party vendors + +!> **Warning:** Verify the rule names in your `rule_action_override`s carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group. + +!> **Warning:** Using this resource will cause the associated Web ACL resource to show configuration drift in the `rule` argument unless you add `lifecycle { ignore_changes = [rule] }` to the Web ACL resource configuration. This is because this resource modifies the Web ACL's rules outside of the Web ACL resource's direct management. + +~> **Note:** This resource creates a rule within the Web ACL that references the entire Rule Group. The rule group's individual rules are evaluated as a unit when requests are processed by the Web ACL. + +## Example Usage + +### Custom Rule Group - Basic Usage + +```terraform +resource "aws_wafv2_rule_group" "example" { + name = "example-rule-group" + scope = "REGIONAL" + capacity = 10 + + rule { + name = "block-suspicious-requests" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["CN", "RU"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "block-suspicious-requests" + sampled_requests_enabled = true + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "example-rule-group" + sampled_requests_enabled = true + } +} + +resource "aws_wafv2_web_acl" "example" { + name = "example-web-acl" + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "example-web-acl" + sampled_requests_enabled = true + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "example" { + rule_name = "example-rule-group-rule" + priority = 100 + web_acl_arn = aws_wafv2_web_acl.example.arn + + rule_group_reference { + arn = aws_wafv2_rule_group.example.arn + } +} +``` + +### Managed Rule Group - Basic Usage + +```terraform +resource "aws_wafv2_web_acl" "example" { + name = "example-web-acl" + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "example-web-acl" + sampled_requests_enabled = true + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "managed_example" { + rule_name = "aws-common-rule-set" + priority = 50 + web_acl_arn = aws_wafv2_web_acl.example.arn + + managed_rule_group { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + } +} +``` + +### Managed Rule Group - With Version + +```terraform +resource "aws_wafv2_web_acl_rule_group_association" "managed_versioned" { + rule_name = "aws-common-rule-set-versioned" + priority = 60 + web_acl_arn = aws_wafv2_web_acl.example.arn + + managed_rule_group { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + version = "Version_1.0" + } +} +``` + +### Managed Rule Group - With Rule Action Overrides + +```terraform +resource "aws_wafv2_web_acl_rule_group_association" "managed_with_overrides" { + rule_name = "aws-common-rule-set-with-overrides" + priority = 70 + web_acl_arn = aws_wafv2_web_acl.example.arn + + managed_rule_group { + name = "AWSManagedRulesCommonRuleSet" + vendor_name = "AWS" + + # Override specific rules within the managed rule group + rule_action_override { + name = "GenericRFI_BODY" + action_to_use { + count { + custom_request_handling { + insert_header { + name = "X-RFI-Override" + value = "counted" + } + } + } + } + } + + rule_action_override { + name = "SizeRestrictions_BODY" + action_to_use { + captcha {} + } + } + } +} +``` + +### Custom Rule Group - With Override Action + +```terraform +resource "aws_wafv2_web_acl_rule_group_association" "example" { + rule_name = "example-rule-group-rule" + priority = 100 + web_acl_arn = aws_wafv2_web_acl.example.arn + override_action = "count" + + rule_group_reference { + arn = aws_wafv2_rule_group.example.arn + } +} +``` + +### Custom Rule Group - With Rule Action Overrides + +```terraform +resource "aws_wafv2_rule_group" "example" { + name = "example-rule-group" + scope = "REGIONAL" + capacity = 10 + + rule { + name = "geo-block-rule" + priority = 1 + + action { + block {} + } + + statement { + geo_match_statement { + country_codes = ["CN", "RU"] + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "geo-block-rule" + sampled_requests_enabled = true + } + } + + rule { + name = "rate-limit-rule" + priority = 2 + + action { + block {} + } + + statement { + rate_based_statement { + limit = 1000 + aggregate_key_type = "IP" + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "rate-limit-rule" + sampled_requests_enabled = true + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "example-rule-group" + sampled_requests_enabled = true + } +} + +resource "aws_wafv2_web_acl" "example" { + name = "example-web-acl" + scope = "REGIONAL" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "example-web-acl" + sampled_requests_enabled = true + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "example" { + rule_name = "example-rule-group-rule" + priority = 100 + web_acl_arn = aws_wafv2_web_acl.example.arn + + rule_group_reference { + arn = aws_wafv2_rule_group.example.arn + + # Override specific rules within the rule group + rule_action_override { + name = "geo-block-rule" + action_to_use { + count { + custom_request_handling { + insert_header { + name = "X-Geo-Block-Override" + value = "counted" + } + } + } + } + } + + rule_action_override { + name = "rate-limit-rule" + action_to_use { + captcha { + custom_request_handling { + insert_header { + name = "X-Rate-Limit-Override" + value = "captcha-required" + } + } + } + } + } + } +} +``` + +### Custom Rule Group - CloudFront Web ACL + +```terraform +resource "aws_wafv2_rule_group" "cloudfront_example" { + name = "cloudfront-rule-group" + scope = "CLOUDFRONT" + capacity = 10 + + rule { + name = "rate-limit" + priority = 1 + + action { + block {} + } + + statement { + rate_based_statement { + limit = 2000 + aggregate_key_type = "IP" + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "rate-limit" + sampled_requests_enabled = true + } + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "cloudfront-rule-group" + sampled_requests_enabled = true + } +} + +resource "aws_wafv2_web_acl" "cloudfront_example" { + name = "cloudfront-web-acl" + scope = "CLOUDFRONT" + + default_action { + allow {} + } + + visibility_config { + cloudwatch_metrics_enabled = true + metric_name = "cloudfront-web-acl" + sampled_requests_enabled = true + } + + lifecycle { + ignore_changes = [rule] + } +} + +resource "aws_wafv2_web_acl_rule_group_association" "cloudfront_example" { + rule_name = "cloudfront-rule-group-rule" + priority = 50 + web_acl_arn = aws_wafv2_web_acl.cloudfront_example.arn + + rule_group_reference { + arn = aws_wafv2_rule_group.cloudfront_example.arn + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `rule_name` - (Required) Name of the rule to create in the Web ACL that references the rule group. Must be between 1 and 128 characters. +* `priority` - (Required) Priority of the rule within the Web ACL. Rules are evaluated in order of priority, with lower numbers evaluated first. +* `web_acl_arn` - (Required) ARN of the Web ACL to associate the Rule Group with. + +The following arguments are optional: + +* `managed_rule_group` - (Optional) Managed Rule Group configuration. One of `rule_group_reference` or `managed_rule_group` is required. Conflicts with `rule_group_reference`. [See below](#managed_rule_group). +* `override_action` - (Optional) Override action for the rule group. Valid values are `none` and `count`. Defaults to `none`. When set to `count`, the actions defined in the rule group rules are overridden to count matches instead of blocking or allowing requests. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `rule_group_reference` - (Optional) Custom Rule Group reference configuration. One of `rule_group_reference` or `managed_rule_group` is required. Conflicts with `managed_rule_group`. [See below](#rule_group_reference). + +### rule_group_reference + +* `arn` - (Required) ARN of the Rule Group to associate with the Web ACL. +* `rule_action_override` - (Optional) Override actions for specific rules within the rule group. [See below](#rule_action_override). + +### managed_rule_group + +* `name` - (Required) Name of the managed rule group. +* `vendor_name` - (Required) Name of the managed rule group vendor. For AWS managed rule groups, this is `AWS`. +* `version` - (Optional) Version of the managed rule group. If not specified, the default version is used. +* `rule_action_override` - (Optional) Override actions for specific rules within the rule group. [See below](#rule_action_override). + +### rule_action_override + +* `name` - (Required) Name of the rule to override within the rule group. Verify the name carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group. +* `action_to_use` - (Required) Action to use instead of the rule's original action. [See below](#action_to_use). + +### action_to_use + +Exactly one of the following action blocks must be specified: + +* `allow` - (Optional) Allow the request. [See below](#allow). +* `block` - (Optional) Block the request. [See below](#block). +* `captcha` - (Optional) Require CAPTCHA verification. [See below](#captcha). +* `challenge` - (Optional) Require challenge verification. [See below](#challenge). +* `count` - (Optional) Count the request without taking action. [See below](#count). + +### allow + +* `custom_request_handling` - (Optional) Custom handling for allowed requests. [See below](#custom_request_handling). + +### block + +* `custom_response` - (Optional) Custom response for blocked requests. [See below](#custom_response). + +### captcha + +* `custom_request_handling` - (Optional) Custom handling for CAPTCHA requests. [See below](#custom_request_handling). + +### challenge + +* `custom_request_handling` - (Optional) Custom handling for challenge requests. [See below](#custom_request_handling). + +### count + +* `custom_request_handling` - (Optional) Custom handling for counted requests. [See below](#custom_request_handling). + +### custom_request_handling + +* `insert_header` - (Required) Headers to insert into the request. [See below](#insert_header). + +### custom_response + +* `custom_response_body_key` - (Optional) Key of a custom response body to use. +* `response_code` - (Required) HTTP response code to return (200-599). +* `response_header` - (Optional) Headers to include in the response. [See below](#response_header). + +### insert_header + +* `name` - (Required) Name of the header to insert. +* `value` - (Required) Value of the header to insert. + +### response_header + +* `name` - (Required) Name of the response header. +* `value` - (Required) Value of the response header. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +None. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WAFv2 web ACL custom rule group associations using `WebACLARN,RuleGroupARN,RuleName`. For example: + +```terraform +import { + to = aws_wafv2_web_acl_rule_group_association.example + id = "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/example-rule-group/87654321-4321-4321-4321-210987654321,example-rule-group-rule" +} +``` + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WAFv2 web ACL managed rule group associations using `WebACLARN,VendorName:RuleGroupName[:Version],RuleName`. For example: + +```terraform +import { + to = aws_wafv2_web_acl_rule_group_association.managed_example + id = "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,AWS:AWSManagedRulesCommonRuleSet,aws-common-rule-set" +} +``` + +Using `terraform import`, import WAFv2 web ACL custom rule group associations using `WebACLARN,RuleGroupARN,RuleName`. For example: + +```console +% terraform import aws_wafv2_web_acl_rule_group_association.example "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,arn:aws:wafv2:us-east-1:123456789012:regional/rulegroup/example-rule-group/87654321-4321-4321-4321-210987654321,example-rule-group-rule" +``` + +Using `terraform import`, import WAFv2 web ACL managed rule group associations using `WebACLARN,VendorName:RuleGroupName[:Version],RuleName`. For example: + +```console +% terraform import aws_wafv2_web_acl_rule_group_association.managed_example "arn:aws:wafv2:us-east-1:123456789012:regional/webacl/example-web-acl/12345678-1234-1234-1234-123456789012,AWS:AWSManagedRulesCommonRuleSet,aws-common-rule-set" +``` diff --git a/website/docs/r/workspacesweb_browser_settings_association.html.markdown b/website/docs/r/workspacesweb_browser_settings_association.html.markdown new file mode 100644 index 000000000000..dd677ea79d4a --- /dev/null +++ b/website/docs/r/workspacesweb_browser_settings_association.html.markdown @@ -0,0 +1,68 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_browser_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Browser Settings Association. +--- + +# Resource: aws_workspacesweb_browser_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Browser Settings Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_browser_settings" "example" { + browser_policy = jsonencode({ + chromePolicies = { + DefaultDownloadDirectory = { + value = "/home/as2-streaming-user/MyFiles/TemporaryFiles1" + } + } + }) +} + +resource "aws_workspacesweb_browser_settings_association" "example" { + browser_settings_arn = aws_workspacesweb_browser_settings.example.browser_settings_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `browser_settings_arn` - (Required) ARN of the browser settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the browser settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Browser Settings Association using the `browser_settings_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_browser_settings_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/browser_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` + +Using `terraform import`, import WorkSpaces Web Browser Settings Association using the `browser_settings_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_browser_settings_association.example arn:aws:workspaces-web:us-west-2:123456789012:browserSettings/browser_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` diff --git a/website/docs/r/workspacesweb_data_protection_settings_association.html.markdown b/website/docs/r/workspacesweb_data_protection_settings_association.html.markdown new file mode 100644 index 000000000000..ac99b95741c8 --- /dev/null +++ b/website/docs/r/workspacesweb_data_protection_settings_association.html.markdown @@ -0,0 +1,56 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_data_protection_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Data Protection Settings Association. +--- + +# Resource: aws_workspacesweb_data_protection_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Data Protection Settings Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_data_protection_settings" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_data_protection_settings_association" "example" { + data_protection_settings_arn = aws_workspacesweb_data_protection_settings.example.data_protection_settings_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `data_protection_settings_arn` - (Required) ARN of the data protection settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the data protection settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Data Protection Settings Association using the `data_protection_settings_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_data_protection_settings_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:dataProtectionSettings/data_protection_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` diff --git a/website/docs/r/workspacesweb_identity_provider.html.markdown b/website/docs/r/workspacesweb_identity_provider.html.markdown new file mode 100644 index 000000000000..a5a986db914b --- /dev/null +++ b/website/docs/r/workspacesweb_identity_provider.html.markdown @@ -0,0 +1,122 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_identity_provider" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Identity Provider. +--- + +# Resource: aws_workspacesweb_identity_provider + +Terraform resource for managing an AWS WorkSpaces Web Identity Provider. + +## Example Usage + +### Basic Usage with SAML + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_identity_provider" "example" { + identity_provider_name = "example-saml" + identity_provider_type = "SAML" + portal_arn = aws_workspacesweb_portal.example.portal_arn + + identity_provider_details = { + MetadataURL = "https://example.com/metadata" + } +} +``` + +### OIDC Identity Provider + +```terraform +resource "aws_workspacesweb_portal" "test" { + display_name = "test" +} + +resource "aws_workspacesweb_identity_provider" "test" { + identity_provider_name = "test-updated" + identity_provider_type = "OIDC" + portal_arn = aws_workspacesweb_portal.test.portal_arn + + identity_provider_details = { + client_id = "test-client-id" + client_secret = "test-client-secret" + oidc_issuer = "https://accounts.google.com" + attributes_request_method = "POST" + authorize_scopes = "openid, email" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `identity_provider_details` - (Required) Identity provider details. The following list describes the provider detail keys for each identity provider type: + * For Google and Login with Amazon: + * `client_id` + * `client_secret` + * `authorize_scopes` + * For Facebook: + * `client_id` + * `client_secret` + * `authorize_scopes` + * `api_version` + * For Sign in with Apple: + * `client_id` + * `team_id` + * `key_id` + * `private_key` + * `authorize_scopes` + * For OIDC providers: + * `client_id` + * `client_secret` + * `attributes_request_method` + * `oidc_issuer` + * `authorize_scopes` + * `authorize_url` if not available from discovery URL specified by `oidc_issuer` key + * `token_url` if not available from discovery URL specified by `oidc_issuer` key + * `attributes_url` if not available from discovery URL specified by `oidc_issuer` key + * `jwks_uri` if not available from discovery URL specified by `oidc_issuer` key + * For SAML providers: + * `MetadataFile` OR `MetadataURL` + * `IDPSignout` (boolean) optional + * `IDPInit` (boolean) optional + * `RequestSigningAlgorithm` (string) optional - Only accepts rsa-sha256 + * `EncryptedResponses` (boolean) optional +* `identity_provider_name` - (Required) Identity provider name. +* `identity_provider_type` - (Required) Identity provider type. Valid values: `SAML`, `Facebook`, `Google`, `LoginWithAmazon`, `SignInWithApple`, `OIDC`. +* `portal_arn` - (Required) ARN of the web portal. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `identity_provider_arn` - ARN of the identity provider. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Identity Provider using the `identity_provider_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_identity_provider.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:identityprovider/abcdef12345678/12345678-1234-1234-1234-123456789012" +} +``` + +Using `terraform import`, import WorkSpaces Web Identity Provider using the `identity_provider_arn`. For example: + +```console +% terraform import aws_workspacesweb_identity_provider.example arn:aws:workspaces-web:us-west-2:123456789012:identityprovider/abcdef12345678/12345678-1234-1234-1234-123456789012 +``` diff --git a/website/docs/r/workspacesweb_ip_access_settings_association.html.markdown b/website/docs/r/workspacesweb_ip_access_settings_association.html.markdown new file mode 100644 index 000000000000..acbcb0d2b8df --- /dev/null +++ b/website/docs/r/workspacesweb_ip_access_settings_association.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_ip_access_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web IP Access Settings Association. +--- + +# Resource: aws_workspacesweb_ip_access_settings_association + +Terraform resource for managing an AWS WorkSpaces Web IP Access Settings Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_ip_access_settings" "example" { + display_name = "example" + + ip_rule { + ip_range = "10.0.0.0/16" + } +} + +resource "aws_workspacesweb_ip_access_settings_association" "example" { + ip_access_settings_arn = aws_workspacesweb_ip_access_settings.example.ip_access_settings_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `ip_access_settings_arn` - (Required) ARN of the IP access settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the IP access settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web IP Access Settings Association using the `ip_access_settings_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_ip_access_settings_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:ipAccessSettings/ip_access_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` diff --git a/website/docs/r/workspacesweb_network_settings_association.html.markdown b/website/docs/r/workspacesweb_network_settings_association.html.markdown new file mode 100644 index 000000000000..e90a65c1f2c5 --- /dev/null +++ b/website/docs/r/workspacesweb_network_settings_association.html.markdown @@ -0,0 +1,98 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_network_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Network Settings Association. +--- + +# Resource: aws_workspacesweb_network_settings_association + +Terraform resource for managing an AWS WorkSpaces Web Network Settings Association. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "example" + } +} + +resource "aws_subnet" "example" { + count = 2 + + vpc_id = aws_vpc.example.id + cidr_block = cidrsubnet(aws_vpc.example.cidr_block, 8, count.index) + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "example" + } +} + +resource "aws_security_group" "example" { + count = 2 + + vpc_id = aws_vpc.example.id + name = "example-${count.index}" + + tags = { + Name = "example" + } +} + +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_network_settings" "example" { + vpc_id = aws_vpc.example.id + subnet_ids = [aws_subnet.example[0].id, aws_subnet.example[1].id] + security_group_ids = [aws_security_group.example[0].id, aws_security_group.example[1].id] +} + +resource "aws_workspacesweb_network_settings_association" "example" { + network_settings_arn = aws_workspacesweb_network_settings.example.network_settings_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `network_settings_arn` - (Required) ARN of the network settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the network settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Network Settings Association using the `network_settings_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_network_settings_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:networkSettings/network_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` diff --git a/website/docs/r/workspacesweb_portal.html.markdown b/website/docs/r/workspacesweb_portal.html.markdown new file mode 100644 index 000000000000..62302cbdd205 --- /dev/null +++ b/website/docs/r/workspacesweb_portal.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_portal" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Portal. +--- + +# Resource: aws_workspacesweb_portal + +Terraform resource for managing an AWS WorkSpaces Web Portal. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example-portal" + instance_type = "standard.regular" +} +``` + +### Complete Usage + +```terraform +resource "aws_kms_key" "example" { + description = "KMS key for WorkSpaces Web Portal" + deletion_window_in_days = 7 +} + +resource "aws_workspacesweb_portal" "example" { + display_name = "example-portal" + instance_type = "standard.large" + authentication_type = "IAM_Identity_Center" + customer_managed_key = aws_kms_key.example.arn + max_concurrent_sessions = 10 + + additional_encryption_context = { + Environment = "Production" + } + + tags = { + Name = "example-portal" + } + + timeouts { + create = "10m" + update = "10m" + delete = "10m" + } +} +``` + +## Argument Reference + +The following arguments are optional: + +* `additional_encryption_context` - (Optional) Additional encryption context for the customer managed key. Forces replacement if changed. +* `authentication_type` - (Optional) Authentication type for the portal. Valid values: `Standard`, `IAM_Identity_Center`. +* `browser_settings_arn` - (Optional) ARN of the browser settings to use for the portal. +* `customer_managed_key` - (Optional) ARN of the customer managed key. Forces replacement if changed. +* `display_name` - (Optional) Display name of the portal. +* `instance_type` - (Optional) Instance type for the portal. Valid values: `standard.regular`, `standard.large`. +* `max_concurrent_sessions` - (Optional) Maximum number of concurrent sessions for the portal. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `browser_type` - Browser type of the portal. +* `creation_date` - Creation date of the portal. +* `data_protection_settings_arn` - ARN of the data protection settings associated with the portal. +* `ip_access_settings_arn` - ARN of the IP access settings associated with the portal. +* `network_settings_arn` - ARN of the network settings associated with the portal. +* `portal_arn` - ARN of the portal. +* `portal_endpoint` - Endpoint URL of the portal. +* `portal_status` - Status of the portal. +* `renderer_type` - Renderer type of the portal. +* `session_logger_arn` - ARN of the session logger associated with the portal. +* `status_reason` - Reason for the current status of the portal. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). +* `trust_store_arn` - ARN of the trust store associated with the portal. +* `user_access_logging_settings_arn` - ARN of the user access logging settings associated with the portal. +* `user_settings_arn` - ARN of the user settings associated with the portal. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Portal using the `portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_portal.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:portal/abcdef12345678" +} +``` + +Using `terraform import`, import WorkSpaces Web Portal using the `portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_portal.example arn:aws:workspaces-web:us-west-2:123456789012:portal/abcdef12345678 +``` diff --git a/website/docs/r/workspacesweb_session_logger.html.markdown b/website/docs/r/workspacesweb_session_logger.html.markdown new file mode 100644 index 000000000000..b083572c2e9f --- /dev/null +++ b/website/docs/r/workspacesweb_session_logger.html.markdown @@ -0,0 +1,214 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_session_logger" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Session Logger. +--- + +# Resource: aws_workspacesweb_session_logger + +Terraform resource for managing an AWS WorkSpaces Web Session Logger. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example-session-logs" +} + +data "aws_iam_policy_document" "example" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + actions = [ + "s3:PutObject" + ] + resources = ["${aws_s3_bucket.example.arn}/*"] + } +} + +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_bucket.example.id + policy = data.aws_iam_policy_document.example.json +} + +resource "aws_workspacesweb_session_logger" "example" { + display_name = "example-session-logger" + + event_filter { + all {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.example.id + folder_structure = "Flat" + log_file_format = "Json" + } + } + + depends_on = [aws_s3_bucket_policy.example] +} +``` + +### Complete Configuration with KMS Encryption + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "example-session-logs" + force_destroy = true +} + +data "aws_iam_policy_document" "example" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + actions = [ + "s3:PutObject" + ] + resources = [ + aws_s3_bucket.example.arn, + "${aws_s3_bucket.example.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_bucket.example.id + policy = data.aws_iam_policy_document.example.json +} + +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "kms_key_policy" { + statement { + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + actions = ["kms:*"] + resources = ["*"] + } + + statement { + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + actions = [ + "kms:Encrypt", + "kms:GenerateDataKey*", + "kms:ReEncrypt*", + "kms:Decrypt" + ] + resources = ["*"] + } +} + +resource "aws_kms_key" "example" { + description = "KMS key for WorkSpaces Web Session Logger" + policy = data.aws_iam_policy_document.kms_key_policy.json +} + +resource "aws_workspacesweb_session_logger" "example" { + display_name = "example-session-logger" + customer_managed_key = aws_kms_key.example.arn + additional_encryption_context = { + Environment = "Production" + Application = "WorkSpacesWeb" + } + + event_filter { + include = ["SessionStart", "SessionEnd"] + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.example.id + bucket_owner = data.aws_caller_identity.current.account_id + folder_structure = "NestedByDate" + key_prefix = "workspaces-web-logs/" + log_file_format = "JsonLines" + } + } + + tags = { + Name = "example-session-logger" + Environment = "Production" + } + + depends_on = [aws_s3_bucket_policy.example, aws_kms_key.example] +} +``` + +## Argument Reference + +The following arguments are required: + +* `event_filter` - (Required) Event filter that determines which events are logged. See [Event Filter](#event-filter) below. +* `log_configuration` - (Required) Configuration block for specifying where logs are delivered. See [Log Configuration](#log-configuration) below. + +The following arguments are optional: + +* `additional_encryption_context` - (Optional) Map of additional encryption context key-value pairs. +* `customer_managed_key` - (Optional) ARN of the customer managed KMS key used to encrypt sensitive information. +* `display_name` - (Optional) Human-readable display name for the session logger resource. Forces replacement if changed. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Log Configuration + +* `s3` - (Required) Configuration block for S3 log delivery. See [S3 Configuration](#s3-configuration) below. + +### Event Filter + +Exactly one of the following must be specified: + +* `all` - (Optional) Block that specifies to monitor all events. Set to `{}` to monitor all events. +* `include` - (Optional) List of specific events to monitor. Valid values include session events like `SessionStart`, `SessionEnd`, etc. + +### S3 Configuration + +* `bucket` - (Required) S3 bucket name where logs are delivered. +* `folder_structure` - (Required) Folder structure that defines the organizational structure for log files in S3. Valid values: `FlatStructure`, `DateBasedStructure`. +* `log_file_format` - (Required) Format of the log file written to S3. Valid values: `Json`, `Parquet`. +* `bucket_owner` - (Optional) Expected bucket owner of the target S3 bucket. +* `key_prefix` - (Optional) S3 path prefix that determines where log files are stored. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `associated_portal_arns` - List of ARNs of the web portals associated with the session logger. +* `session_logger_arn` - ARN of the session logger. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +~> **Note:** The `additional_encryption_context` and `customer_managed_key` attributes are computed when not specified and will be populated with values from the AWS API response. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Session Logger using the `session_logger_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_session_logger.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678" +} +``` + +Using `terraform import`, import WorkSpaces Web Session Logger using the `session_logger_arn`. For example: + +```console +% terraform import aws_workspacesweb_session_logger.example arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678 +``` diff --git a/website/docs/r/workspacesweb_session_logger_association.html.markdown b/website/docs/r/workspacesweb_session_logger_association.html.markdown new file mode 100644 index 000000000000..8d6ac70a268c --- /dev/null +++ b/website/docs/r/workspacesweb_session_logger_association.html.markdown @@ -0,0 +1,102 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_session_logger_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Session Logger Association. +--- + +# Resource: aws_workspacesweb_session_logger_association + +Terraform resource for managing an AWS WorkSpaces Web Session Logger Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_s3_bucket" "example" { + bucket = "example-session-logs" + force_destroy = true +} + +data "aws_iam_policy_document" "example" { + statement { + effect = "Allow" + principals { + type = "Service" + identifiers = ["workspaces-web.amazonaws.com"] + } + actions = [ + "s3:PutObject" + ] + resources = [ + "${aws_s3_bucket.example.arn}/*" + ] + } +} + +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_bucket.example.id + policy = data.aws_iam_policy_document.example.json +} + +resource "aws_workspacesweb_session_logger" "example" { + display_name = "example" + + event_filter { + all = {} + } + + log_configuration { + s3 { + bucket = aws_s3_bucket.example.id + folder_structure = "Flat" + log_file_format = "Json" + } + } + + depends_on = [aws_s3_bucket_policy.example] +} + +resource "aws_workspacesweb_session_logger_association" "example" { + portal_arn = aws_workspacesweb_portal.example.portal_arn + session_logger_arn = aws_workspacesweb_session_logger.example.session_logger_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `portal_arn` - (Required) ARN of the web portal. +* `session_logger_arn` - (Required) ARN of the session logger. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Session Logger Association using the `session_logger_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_session_logger_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` + +Using `terraform import`, import WorkSpaces Web Session Logger Association using the `session_logger_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_session_logger_association.example arn:aws:workspaces-web:us-west-2:123456789012:sessionLogger/session_logger-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` diff --git a/website/docs/r/workspacesweb_trust_store.html.markdown b/website/docs/r/workspacesweb_trust_store.html.markdown new file mode 100644 index 000000000000..38a8e5f54a87 --- /dev/null +++ b/website/docs/r/workspacesweb_trust_store.html.markdown @@ -0,0 +1,86 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_trust_store" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Trust Store. +--- + +# Resource: aws_workspacesweb_trust_store + +Terraform resource for managing an AWS WorkSpaces Web Trust Store. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_trust_store" "example" { + certificate { + body = file("certificate.pem") + } +} +``` + +### Multiple Certificates + +```terraform +resource "aws_workspacesweb_trust_store" "example" { + certificate { + body = file("certificate1.pem") + } + + certificate { + body = file("certificate2.pem") + } + + tags = { + Name = "example-trust-store" + } +} +``` + +## Argument Reference + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `certificate` - (Optional) Set of certificates to include in the trust store. See [Certificate](#certificate) below. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Certificate + +* `body` - (Required) Certificate body in PEM format. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `associated_portal_arns` - List of ARNs of the web portals associated with the trust store. +* `trust_store_arn` - ARN of the trust store. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +The `certificate` block exports the following additional attributes: + +* `issuer` - Certificate issuer. +* `not_valid_after` - Date and time when the certificate expires in RFC3339 format. +* `not_valid_before` - Date and time when the certificate becomes valid in RFC3339 format. +* `subject` - Certificate subject. +* `thumbprint` - Certificate thumbprint. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Trust Store using the `trust_store_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_trust_store.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678" +} +``` + +Using `terraform import`, import WorkSpaces Web Trust Store using the `trust_store_arn`. For example: + +```console +% terraform import aws_workspacesweb_trust_store.example arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678 +``` diff --git a/website/docs/r/workspacesweb_trust_store_association.html.markdown b/website/docs/r/workspacesweb_trust_store_association.html.markdown new file mode 100644 index 000000000000..93536311fa8a --- /dev/null +++ b/website/docs/r/workspacesweb_trust_store_association.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_trust_store_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web Trust Store Association. +--- + +# Resource: aws_workspacesweb_trust_store_association + +Terraform resource for managing an AWS WorkSpaces Web Trust Store Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_trust_store" "example" { + certificate_list = [base64encode(file("certificate.pem"))] +} + +resource "aws_workspacesweb_trust_store_association" "example" { + trust_store_arn = aws_workspacesweb_trust_store.example.trust_store_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `trust_store_arn` - (Required) ARN of the trust store to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the trust store. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web Trust Store Association using the `trust_store_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_trust_store_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` + +Using `terraform import`, import WorkSpaces Web Trust Store Association using the `trust_store_arn,portal_arn`. For example: + +```console +% terraform import aws_workspacesweb_trust_store_association.example arn:aws:workspaces-web:us-west-2:123456789012:trustStore/trust_store-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678 +``` diff --git a/website/docs/r/workspacesweb_user_access_logging_settings_association.html.markdown b/website/docs/r/workspacesweb_user_access_logging_settings_association.html.markdown new file mode 100644 index 000000000000..4dbaa27c57b3 --- /dev/null +++ b/website/docs/r/workspacesweb_user_access_logging_settings_association.html.markdown @@ -0,0 +1,61 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_user_access_logging_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web User Access Logging Settings Association. +--- + +# Resource: aws_workspacesweb_user_access_logging_settings_association + +Terraform resource for managing an AWS WorkSpaces Web User Access Logging Settings Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_kinesis_stream" "example" { + name = "amazon-workspaces-web-example" + shard_count = 1 +} + +resource "aws_workspacesweb_user_access_logging_settings" "example" { + kinesis_stream_arn = aws_kinesis_stream.example.arn +} + +resource "aws_workspacesweb_user_access_logging_settings_association" "example" { + user_access_logging_settings_arn = aws_workspacesweb_user_access_logging_settings.example.user_access_logging_settings_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `user_access_logging_settings_arn` - (Required) ARN of the user access logging settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the user access logging settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Access Logging Settings Association using the `user_access_logging_settings_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_user_access_logging_settings_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:userAccessLoggingSettings/user_access_logging_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` diff --git a/website/docs/r/workspacesweb_user_settings_association.html.markdown b/website/docs/r/workspacesweb_user_settings_association.html.markdown new file mode 100644 index 000000000000..1f403cf2ecb1 --- /dev/null +++ b/website/docs/r/workspacesweb_user_settings_association.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "WorkSpaces Web" +layout: "aws" +page_title: "AWS: aws_workspacesweb_user_settings_association" +description: |- + Terraform resource for managing an AWS WorkSpaces Web User Settings Association. +--- + +# Resource: aws_workspacesweb_user_settings_association + +Terraform resource for managing an AWS WorkSpaces Web User Settings Association. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_workspacesweb_portal" "example" { + display_name = "example" +} + +resource "aws_workspacesweb_user_settings" "example" { + copy_allowed = "Enabled" + download_allowed = "Enabled" + paste_allowed = "Enabled" + print_allowed = "Enabled" + upload_allowed = "Enabled" +} + +resource "aws_workspacesweb_user_settings_association" "example" { + user_settings_arn = aws_workspacesweb_user_settings.example.user_settings_arn + portal_arn = aws_workspacesweb_portal.example.portal_arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `user_settings_arn` - (Required) ARN of the user settings to associate with the portal. Forces replacement if changed. +* `portal_arn` - (Required) ARN of the portal to associate with the user settings. Forces replacement if changed. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import WorkSpaces Web User Settings Association using the `user_settings_arn,portal_arn`. For example: + +```terraform +import { + to = aws_workspacesweb_user_settings_association.example + id = "arn:aws:workspaces-web:us-west-2:123456789012:userSettings/user_settings-id-12345678,arn:aws:workspaces-web:us-west-2:123456789012:portal/portal-id-12345678" +} +``` diff --git a/website/docs/r/xray_group.html.markdown b/website/docs/r/xray_group.html.markdown index 3e72c86533bd..84c94ad846d8 100644 --- a/website/docs/r/xray_group.html.markdown +++ b/website/docs/r/xray_group.html.markdown @@ -51,6 +51,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_xray_group.example + identity = { + "arn" = "arn:aws:xray:us-west-2:123456789012:group/example-group/AFAEAFE" + } +} + +resource "aws_xray_group" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) Amazon Resource Name (ARN) of the X-Ray group. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import XRay Groups using the ARN. For example: ```terraform